xref: /linux/tools/testing/selftests/mm/guard-pages.c (revision 7f71507851fc7764b36a3221839607d3a45c2025)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 
3 #define _GNU_SOURCE
4 #include "../kselftest_harness.h"
5 #include <asm-generic/mman.h> /* Force the import of the tools version. */
6 #include <assert.h>
7 #include <errno.h>
8 #include <fcntl.h>
9 #include <linux/userfaultfd.h>
10 #include <setjmp.h>
11 #include <signal.h>
12 #include <stdbool.h>
13 #include <stdio.h>
14 #include <stdlib.h>
15 #include <string.h>
16 #include <sys/ioctl.h>
17 #include <sys/mman.h>
18 #include <sys/syscall.h>
19 #include <sys/uio.h>
20 #include <unistd.h>
21 
22 /*
23  * Ignore the checkpatch warning, as per the C99 standard, section 7.14.1.1:
24  *
25  * "If the signal occurs other than as the result of calling the abort or raise
26  *  function, the behavior is undefined if the signal handler refers to any
27  *  object with static storage duration other than by assigning a value to an
28  *  object declared as volatile sig_atomic_t"
29  */
30 static volatile sig_atomic_t signal_jump_set;
31 static sigjmp_buf signal_jmp_buf;
32 
33 /*
34  * Ignore the checkpatch warning, we must read from x but don't want to do
35  * anything with it in order to trigger a read page fault. We therefore must use
36  * volatile to stop the compiler from optimising this away.
37  */
38 #define FORCE_READ(x) (*(volatile typeof(x) *)x)
39 
40 static int userfaultfd(int flags)
41 {
42 	return syscall(SYS_userfaultfd, flags);
43 }
44 
45 static void handle_fatal(int c)
46 {
47 	if (!signal_jump_set)
48 		return;
49 
50 	siglongjmp(signal_jmp_buf, c);
51 }
52 
53 static int pidfd_open(pid_t pid, unsigned int flags)
54 {
55 	return syscall(SYS_pidfd_open, pid, flags);
56 }
57 
58 /*
59  * Enable our signal catcher and try to read/write the specified buffer. The
60  * return value indicates whether the read/write succeeds without a fatal
61  * signal.
62  */
63 static bool try_access_buf(char *ptr, bool write)
64 {
65 	bool failed;
66 
67 	/* Tell signal handler to jump back here on fatal signal. */
68 	signal_jump_set = true;
69 	/* If a fatal signal arose, we will jump back here and failed is set. */
70 	failed = sigsetjmp(signal_jmp_buf, 0) != 0;
71 
72 	if (!failed) {
73 		if (write)
74 			*ptr = 'x';
75 		else
76 			FORCE_READ(ptr);
77 	}
78 
79 	signal_jump_set = false;
80 	return !failed;
81 }
82 
83 /* Try and read from a buffer, return true if no fatal signal. */
84 static bool try_read_buf(char *ptr)
85 {
86 	return try_access_buf(ptr, false);
87 }
88 
89 /* Try and write to a buffer, return true if no fatal signal. */
90 static bool try_write_buf(char *ptr)
91 {
92 	return try_access_buf(ptr, true);
93 }
94 
95 /*
96  * Try and BOTH read from AND write to a buffer, return true if BOTH operations
97  * succeed.
98  */
99 static bool try_read_write_buf(char *ptr)
100 {
101 	return try_read_buf(ptr) && try_write_buf(ptr);
102 }
103 
104 FIXTURE(guard_pages)
105 {
106 	unsigned long page_size;
107 };
108 
109 FIXTURE_SETUP(guard_pages)
110 {
111 	struct sigaction act = {
112 		.sa_handler = &handle_fatal,
113 		.sa_flags = SA_NODEFER,
114 	};
115 
116 	sigemptyset(&act.sa_mask);
117 	if (sigaction(SIGSEGV, &act, NULL))
118 		ksft_exit_fail_perror("sigaction");
119 
120 	self->page_size = (unsigned long)sysconf(_SC_PAGESIZE);
121 };
122 
123 FIXTURE_TEARDOWN(guard_pages)
124 {
125 	struct sigaction act = {
126 		.sa_handler = SIG_DFL,
127 		.sa_flags = SA_NODEFER,
128 	};
129 
130 	sigemptyset(&act.sa_mask);
131 	sigaction(SIGSEGV, &act, NULL);
132 }
133 
134 TEST_F(guard_pages, basic)
135 {
136 	const unsigned long NUM_PAGES = 10;
137 	const unsigned long page_size = self->page_size;
138 	char *ptr;
139 	int i;
140 
141 	ptr = mmap(NULL, NUM_PAGES * page_size, PROT_READ | PROT_WRITE,
142 		   MAP_PRIVATE | MAP_ANON, -1, 0);
143 	ASSERT_NE(ptr, MAP_FAILED);
144 
145 	/* Trivially assert we can touch the first page. */
146 	ASSERT_TRUE(try_read_write_buf(ptr));
147 
148 	ASSERT_EQ(madvise(ptr, page_size, MADV_GUARD_INSTALL), 0);
149 
150 	/* Establish that 1st page SIGSEGV's. */
151 	ASSERT_FALSE(try_read_write_buf(ptr));
152 
153 	/* Ensure we can touch everything else.*/
154 	for (i = 1; i < NUM_PAGES; i++) {
155 		char *curr = &ptr[i * page_size];
156 
157 		ASSERT_TRUE(try_read_write_buf(curr));
158 	}
159 
160 	/* Establish a guard page at the end of the mapping. */
161 	ASSERT_EQ(madvise(&ptr[(NUM_PAGES - 1) * page_size], page_size,
162 			  MADV_GUARD_INSTALL), 0);
163 
164 	/* Check that both guard pages result in SIGSEGV. */
165 	ASSERT_FALSE(try_read_write_buf(ptr));
166 	ASSERT_FALSE(try_read_write_buf(&ptr[(NUM_PAGES - 1) * page_size]));
167 
168 	/* Remove the first guard page. */
169 	ASSERT_FALSE(madvise(ptr, page_size, MADV_GUARD_REMOVE));
170 
171 	/* Make sure we can touch it. */
172 	ASSERT_TRUE(try_read_write_buf(ptr));
173 
174 	/* Remove the last guard page. */
175 	ASSERT_FALSE(madvise(&ptr[(NUM_PAGES - 1) * page_size], page_size,
176 			     MADV_GUARD_REMOVE));
177 
178 	/* Make sure we can touch it. */
179 	ASSERT_TRUE(try_read_write_buf(&ptr[(NUM_PAGES - 1) * page_size]));
180 
181 	/*
182 	 *  Test setting a _range_ of pages, namely the first 3. The first of
183 	 *  these be faulted in, so this also tests that we can install guard
184 	 *  pages over backed pages.
185 	 */
186 	ASSERT_EQ(madvise(ptr, 3 * page_size, MADV_GUARD_INSTALL), 0);
187 
188 	/* Make sure they are all guard pages. */
189 	for (i = 0; i < 3; i++) {
190 		char *curr = &ptr[i * page_size];
191 
192 		ASSERT_FALSE(try_read_write_buf(curr));
193 	}
194 
195 	/* Make sure the rest are not. */
196 	for (i = 3; i < NUM_PAGES; i++) {
197 		char *curr = &ptr[i * page_size];
198 
199 		ASSERT_TRUE(try_read_write_buf(curr));
200 	}
201 
202 	/* Remove guard pages. */
203 	ASSERT_EQ(madvise(ptr, NUM_PAGES * page_size, MADV_GUARD_REMOVE), 0);
204 
205 	/* Now make sure we can touch everything. */
206 	for (i = 0; i < NUM_PAGES; i++) {
207 		char *curr = &ptr[i * page_size];
208 
209 		ASSERT_TRUE(try_read_write_buf(curr));
210 	}
211 
212 	/*
213 	 * Now remove all guard pages, make sure we don't remove existing
214 	 * entries.
215 	 */
216 	ASSERT_EQ(madvise(ptr, NUM_PAGES * page_size, MADV_GUARD_REMOVE), 0);
217 
218 	for (i = 0; i < NUM_PAGES * page_size; i += page_size) {
219 		char chr = ptr[i];
220 
221 		ASSERT_EQ(chr, 'x');
222 	}
223 
224 	ASSERT_EQ(munmap(ptr, NUM_PAGES * page_size), 0);
225 }
226 
227 /* Assert that operations applied across multiple VMAs work as expected. */
228 TEST_F(guard_pages, multi_vma)
229 {
230 	const unsigned long page_size = self->page_size;
231 	char *ptr_region, *ptr, *ptr1, *ptr2, *ptr3;
232 	int i;
233 
234 	/* Reserve a 100 page region over which we can install VMAs. */
235 	ptr_region = mmap(NULL, 100 * page_size, PROT_NONE,
236 			  MAP_ANON | MAP_PRIVATE, -1, 0);
237 	ASSERT_NE(ptr_region, MAP_FAILED);
238 
239 	/* Place a VMA of 10 pages size at the start of the region. */
240 	ptr1 = mmap(ptr_region, 10 * page_size, PROT_READ | PROT_WRITE,
241 		    MAP_FIXED | MAP_ANON | MAP_PRIVATE, -1, 0);
242 	ASSERT_NE(ptr1, MAP_FAILED);
243 
244 	/* Place a VMA of 5 pages size 50 pages into the region. */
245 	ptr2 = mmap(&ptr_region[50 * page_size], 5 * page_size,
246 		    PROT_READ | PROT_WRITE,
247 		    MAP_FIXED | MAP_ANON | MAP_PRIVATE, -1, 0);
248 	ASSERT_NE(ptr2, MAP_FAILED);
249 
250 	/* Place a VMA of 20 pages size at the end of the region. */
251 	ptr3 = mmap(&ptr_region[80 * page_size], 20 * page_size,
252 		    PROT_READ | PROT_WRITE,
253 		    MAP_FIXED | MAP_ANON | MAP_PRIVATE, -1, 0);
254 	ASSERT_NE(ptr3, MAP_FAILED);
255 
256 	/* Unmap gaps. */
257 	ASSERT_EQ(munmap(&ptr_region[10 * page_size], 40 * page_size), 0);
258 	ASSERT_EQ(munmap(&ptr_region[55 * page_size], 25 * page_size), 0);
259 
260 	/*
261 	 * We end up with VMAs like this:
262 	 *
263 	 * 0    10 .. 50   55 .. 80   100
264 	 * [---]      [---]      [---]
265 	 */
266 
267 	/*
268 	 * Now mark the whole range as guard pages and make sure all VMAs are as
269 	 * such.
270 	 */
271 
272 	/*
273 	 * madvise() is certifiable and lets you perform operations over gaps,
274 	 * everything works, but it indicates an error and errno is set to
275 	 * -ENOMEM. Also if anything runs out of memory it is set to
276 	 * -ENOMEM. You are meant to guess which is which.
277 	 */
278 	ASSERT_EQ(madvise(ptr_region, 100 * page_size, MADV_GUARD_INSTALL), -1);
279 	ASSERT_EQ(errno, ENOMEM);
280 
281 	for (i = 0; i < 10; i++) {
282 		char *curr = &ptr1[i * page_size];
283 
284 		ASSERT_FALSE(try_read_write_buf(curr));
285 	}
286 
287 	for (i = 0; i < 5; i++) {
288 		char *curr = &ptr2[i * page_size];
289 
290 		ASSERT_FALSE(try_read_write_buf(curr));
291 	}
292 
293 	for (i = 0; i < 20; i++) {
294 		char *curr = &ptr3[i * page_size];
295 
296 		ASSERT_FALSE(try_read_write_buf(curr));
297 	}
298 
299 	/* Now remove guar pages over range and assert the opposite. */
300 
301 	ASSERT_EQ(madvise(ptr_region, 100 * page_size, MADV_GUARD_REMOVE), -1);
302 	ASSERT_EQ(errno, ENOMEM);
303 
304 	for (i = 0; i < 10; i++) {
305 		char *curr = &ptr1[i * page_size];
306 
307 		ASSERT_TRUE(try_read_write_buf(curr));
308 	}
309 
310 	for (i = 0; i < 5; i++) {
311 		char *curr = &ptr2[i * page_size];
312 
313 		ASSERT_TRUE(try_read_write_buf(curr));
314 	}
315 
316 	for (i = 0; i < 20; i++) {
317 		char *curr = &ptr3[i * page_size];
318 
319 		ASSERT_TRUE(try_read_write_buf(curr));
320 	}
321 
322 	/* Now map incompatible VMAs in the gaps. */
323 	ptr = mmap(&ptr_region[10 * page_size], 40 * page_size,
324 		   PROT_READ | PROT_WRITE | PROT_EXEC,
325 		   MAP_FIXED | MAP_ANON | MAP_PRIVATE, -1, 0);
326 	ASSERT_NE(ptr, MAP_FAILED);
327 	ptr = mmap(&ptr_region[55 * page_size], 25 * page_size,
328 		   PROT_READ | PROT_WRITE | PROT_EXEC,
329 		   MAP_FIXED | MAP_ANON | MAP_PRIVATE, -1, 0);
330 	ASSERT_NE(ptr, MAP_FAILED);
331 
332 	/*
333 	 * We end up with VMAs like this:
334 	 *
335 	 * 0    10 .. 50   55 .. 80   100
336 	 * [---][xxxx][---][xxxx][---]
337 	 *
338 	 * Where 'x' signifies VMAs that cannot be merged with those adjacent to
339 	 * them.
340 	 */
341 
342 	/* Multiple VMAs adjacent to one another should result in no error. */
343 	ASSERT_EQ(madvise(ptr_region, 100 * page_size, MADV_GUARD_INSTALL), 0);
344 	for (i = 0; i < 100; i++) {
345 		char *curr = &ptr_region[i * page_size];
346 
347 		ASSERT_FALSE(try_read_write_buf(curr));
348 	}
349 	ASSERT_EQ(madvise(ptr_region, 100 * page_size, MADV_GUARD_REMOVE), 0);
350 	for (i = 0; i < 100; i++) {
351 		char *curr = &ptr_region[i * page_size];
352 
353 		ASSERT_TRUE(try_read_write_buf(curr));
354 	}
355 
356 	/* Cleanup. */
357 	ASSERT_EQ(munmap(ptr_region, 100 * page_size), 0);
358 }
359 
360 /*
361  * Assert that batched operations performed using process_madvise() work as
362  * expected.
363  */
364 TEST_F(guard_pages, process_madvise)
365 {
366 	const unsigned long page_size = self->page_size;
367 	pid_t pid = getpid();
368 	int pidfd = pidfd_open(pid, 0);
369 	char *ptr_region, *ptr1, *ptr2, *ptr3;
370 	ssize_t count;
371 	struct iovec vec[6];
372 
373 	ASSERT_NE(pidfd, -1);
374 
375 	/* Reserve region to map over. */
376 	ptr_region = mmap(NULL, 100 * page_size, PROT_NONE,
377 			  MAP_ANON | MAP_PRIVATE, -1, 0);
378 	ASSERT_NE(ptr_region, MAP_FAILED);
379 
380 	/*
381 	 * 10 pages offset 1 page into reserve region. We MAP_POPULATE so we
382 	 * overwrite existing entries and test this code path against
383 	 * overwriting existing entries.
384 	 */
385 	ptr1 = mmap(&ptr_region[page_size], 10 * page_size,
386 		    PROT_READ | PROT_WRITE,
387 		    MAP_FIXED | MAP_ANON | MAP_PRIVATE | MAP_POPULATE, -1, 0);
388 	ASSERT_NE(ptr1, MAP_FAILED);
389 	/* We want guard markers at start/end of each VMA. */
390 	vec[0].iov_base = ptr1;
391 	vec[0].iov_len = page_size;
392 	vec[1].iov_base = &ptr1[9 * page_size];
393 	vec[1].iov_len = page_size;
394 
395 	/* 5 pages offset 50 pages into reserve region. */
396 	ptr2 = mmap(&ptr_region[50 * page_size], 5 * page_size,
397 		    PROT_READ | PROT_WRITE,
398 		    MAP_FIXED | MAP_ANON | MAP_PRIVATE, -1, 0);
399 	ASSERT_NE(ptr2, MAP_FAILED);
400 	vec[2].iov_base = ptr2;
401 	vec[2].iov_len = page_size;
402 	vec[3].iov_base = &ptr2[4 * page_size];
403 	vec[3].iov_len = page_size;
404 
405 	/* 20 pages offset 79 pages into reserve region. */
406 	ptr3 = mmap(&ptr_region[79 * page_size], 20 * page_size,
407 		    PROT_READ | PROT_WRITE,
408 		    MAP_FIXED | MAP_ANON | MAP_PRIVATE, -1, 0);
409 	ASSERT_NE(ptr3, MAP_FAILED);
410 	vec[4].iov_base = ptr3;
411 	vec[4].iov_len = page_size;
412 	vec[5].iov_base = &ptr3[19 * page_size];
413 	vec[5].iov_len = page_size;
414 
415 	/* Free surrounding VMAs. */
416 	ASSERT_EQ(munmap(ptr_region, page_size), 0);
417 	ASSERT_EQ(munmap(&ptr_region[11 * page_size], 39 * page_size), 0);
418 	ASSERT_EQ(munmap(&ptr_region[55 * page_size], 24 * page_size), 0);
419 	ASSERT_EQ(munmap(&ptr_region[99 * page_size], page_size), 0);
420 
421 	/* Now guard in one step. */
422 	count = process_madvise(pidfd, vec, 6, MADV_GUARD_INSTALL, 0);
423 
424 	/* OK we don't have permission to do this, skip. */
425 	if (count == -1 && errno == EPERM)
426 		ksft_exit_skip("No process_madvise() permissions, try running as root.\n");
427 
428 	/* Returns the number of bytes advised. */
429 	ASSERT_EQ(count, 6 * page_size);
430 
431 	/* Now make sure the guarding was applied. */
432 
433 	ASSERT_FALSE(try_read_write_buf(ptr1));
434 	ASSERT_FALSE(try_read_write_buf(&ptr1[9 * page_size]));
435 
436 	ASSERT_FALSE(try_read_write_buf(ptr2));
437 	ASSERT_FALSE(try_read_write_buf(&ptr2[4 * page_size]));
438 
439 	ASSERT_FALSE(try_read_write_buf(ptr3));
440 	ASSERT_FALSE(try_read_write_buf(&ptr3[19 * page_size]));
441 
442 	/* Now do the same with unguard... */
443 	count = process_madvise(pidfd, vec, 6, MADV_GUARD_REMOVE, 0);
444 
445 	/* ...and everything should now succeed. */
446 
447 	ASSERT_TRUE(try_read_write_buf(ptr1));
448 	ASSERT_TRUE(try_read_write_buf(&ptr1[9 * page_size]));
449 
450 	ASSERT_TRUE(try_read_write_buf(ptr2));
451 	ASSERT_TRUE(try_read_write_buf(&ptr2[4 * page_size]));
452 
453 	ASSERT_TRUE(try_read_write_buf(ptr3));
454 	ASSERT_TRUE(try_read_write_buf(&ptr3[19 * page_size]));
455 
456 	/* Cleanup. */
457 	ASSERT_EQ(munmap(ptr1, 10 * page_size), 0);
458 	ASSERT_EQ(munmap(ptr2, 5 * page_size), 0);
459 	ASSERT_EQ(munmap(ptr3, 20 * page_size), 0);
460 	close(pidfd);
461 }
462 
463 /* Assert that unmapping ranges does not leave guard markers behind. */
464 TEST_F(guard_pages, munmap)
465 {
466 	const unsigned long page_size = self->page_size;
467 	char *ptr, *ptr_new1, *ptr_new2;
468 
469 	ptr = mmap(NULL, 10 * page_size, PROT_READ | PROT_WRITE,
470 		   MAP_ANON | MAP_PRIVATE, -1, 0);
471 	ASSERT_NE(ptr, MAP_FAILED);
472 
473 	/* Guard first and last pages. */
474 	ASSERT_EQ(madvise(ptr, page_size, MADV_GUARD_INSTALL), 0);
475 	ASSERT_EQ(madvise(&ptr[9 * page_size], page_size, MADV_GUARD_INSTALL), 0);
476 
477 	/* Assert that they are guarded. */
478 	ASSERT_FALSE(try_read_write_buf(ptr));
479 	ASSERT_FALSE(try_read_write_buf(&ptr[9 * page_size]));
480 
481 	/* Unmap them. */
482 	ASSERT_EQ(munmap(ptr, page_size), 0);
483 	ASSERT_EQ(munmap(&ptr[9 * page_size], page_size), 0);
484 
485 	/* Map over them.*/
486 	ptr_new1 = mmap(ptr, page_size, PROT_READ | PROT_WRITE,
487 			MAP_FIXED | MAP_ANON | MAP_PRIVATE, -1, 0);
488 	ASSERT_NE(ptr_new1, MAP_FAILED);
489 	ptr_new2 = mmap(&ptr[9 * page_size], page_size, PROT_READ | PROT_WRITE,
490 			MAP_FIXED | MAP_ANON | MAP_PRIVATE, -1, 0);
491 	ASSERT_NE(ptr_new2, MAP_FAILED);
492 
493 	/* Assert that they are now not guarded. */
494 	ASSERT_TRUE(try_read_write_buf(ptr_new1));
495 	ASSERT_TRUE(try_read_write_buf(ptr_new2));
496 
497 	/* Cleanup. */
498 	ASSERT_EQ(munmap(ptr, 10 * page_size), 0);
499 }
500 
501 /* Assert that mprotect() operations have no bearing on guard markers. */
502 TEST_F(guard_pages, mprotect)
503 {
504 	const unsigned long page_size = self->page_size;
505 	char *ptr;
506 	int i;
507 
508 	ptr = mmap(NULL, 10 * page_size, PROT_READ | PROT_WRITE,
509 		   MAP_ANON | MAP_PRIVATE, -1, 0);
510 	ASSERT_NE(ptr, MAP_FAILED);
511 
512 	/* Guard the middle of the range. */
513 	ASSERT_EQ(madvise(&ptr[5 * page_size], 2 * page_size,
514 			  MADV_GUARD_INSTALL), 0);
515 
516 	/* Assert that it is indeed guarded. */
517 	ASSERT_FALSE(try_read_write_buf(&ptr[5 * page_size]));
518 	ASSERT_FALSE(try_read_write_buf(&ptr[6 * page_size]));
519 
520 	/* Now make these pages read-only. */
521 	ASSERT_EQ(mprotect(&ptr[5 * page_size], 2 * page_size, PROT_READ), 0);
522 
523 	/* Make sure the range is still guarded. */
524 	ASSERT_FALSE(try_read_buf(&ptr[5 * page_size]));
525 	ASSERT_FALSE(try_read_buf(&ptr[6 * page_size]));
526 
527 	/* Make sure we can guard again without issue.*/
528 	ASSERT_EQ(madvise(&ptr[5 * page_size], 2 * page_size,
529 			  MADV_GUARD_INSTALL), 0);
530 
531 	/* Make sure the range is, yet again, still guarded. */
532 	ASSERT_FALSE(try_read_buf(&ptr[5 * page_size]));
533 	ASSERT_FALSE(try_read_buf(&ptr[6 * page_size]));
534 
535 	/* Now unguard the whole range. */
536 	ASSERT_EQ(madvise(ptr, 10 * page_size, MADV_GUARD_REMOVE), 0);
537 
538 	/* Make sure the whole range is readable. */
539 	for (i = 0; i < 10; i++) {
540 		char *curr = &ptr[i * page_size];
541 
542 		ASSERT_TRUE(try_read_buf(curr));
543 	}
544 
545 	/* Cleanup. */
546 	ASSERT_EQ(munmap(ptr, 10 * page_size), 0);
547 }
548 
549 /* Split and merge VMAs and make sure guard pages still behave. */
550 TEST_F(guard_pages, split_merge)
551 {
552 	const unsigned long page_size = self->page_size;
553 	char *ptr, *ptr_new;
554 	int i;
555 
556 	ptr = mmap(NULL, 10 * page_size, PROT_READ | PROT_WRITE,
557 		   MAP_ANON | MAP_PRIVATE, -1, 0);
558 	ASSERT_NE(ptr, MAP_FAILED);
559 
560 	/* Guard the whole range. */
561 	ASSERT_EQ(madvise(ptr, 10 * page_size, MADV_GUARD_INSTALL), 0);
562 
563 	/* Make sure the whole range is guarded. */
564 	for (i = 0; i < 10; i++) {
565 		char *curr = &ptr[i * page_size];
566 
567 		ASSERT_FALSE(try_read_write_buf(curr));
568 	}
569 
570 	/* Now unmap some pages in the range so we split. */
571 	ASSERT_EQ(munmap(&ptr[2 * page_size], page_size), 0);
572 	ASSERT_EQ(munmap(&ptr[5 * page_size], page_size), 0);
573 	ASSERT_EQ(munmap(&ptr[8 * page_size], page_size), 0);
574 
575 	/* Make sure the remaining ranges are guarded post-split. */
576 	for (i = 0; i < 2; i++) {
577 		char *curr = &ptr[i * page_size];
578 
579 		ASSERT_FALSE(try_read_write_buf(curr));
580 	}
581 	for (i = 2; i < 5; i++) {
582 		char *curr = &ptr[i * page_size];
583 
584 		ASSERT_FALSE(try_read_write_buf(curr));
585 	}
586 	for (i = 6; i < 8; i++) {
587 		char *curr = &ptr[i * page_size];
588 
589 		ASSERT_FALSE(try_read_write_buf(curr));
590 	}
591 	for (i = 9; i < 10; i++) {
592 		char *curr = &ptr[i * page_size];
593 
594 		ASSERT_FALSE(try_read_write_buf(curr));
595 	}
596 
597 	/* Now map them again - the unmap will have cleared the guards. */
598 	ptr_new = mmap(&ptr[2 * page_size], page_size, PROT_READ | PROT_WRITE,
599 		       MAP_FIXED | MAP_ANON | MAP_PRIVATE, -1, 0);
600 	ASSERT_NE(ptr_new, MAP_FAILED);
601 	ptr_new = mmap(&ptr[5 * page_size], page_size, PROT_READ | PROT_WRITE,
602 		       MAP_FIXED | MAP_ANON | MAP_PRIVATE, -1, 0);
603 	ASSERT_NE(ptr_new, MAP_FAILED);
604 	ptr_new = mmap(&ptr[8 * page_size], page_size, PROT_READ | PROT_WRITE,
605 		       MAP_FIXED | MAP_ANON | MAP_PRIVATE, -1, 0);
606 	ASSERT_NE(ptr_new, MAP_FAILED);
607 
608 	/* Now make sure guard pages are established. */
609 	for (i = 0; i < 10; i++) {
610 		char *curr = &ptr[i * page_size];
611 		bool result = try_read_write_buf(curr);
612 		bool expect_true = i == 2 || i == 5 || i == 8;
613 
614 		ASSERT_TRUE(expect_true ? result : !result);
615 	}
616 
617 	/* Now guard everything again. */
618 	ASSERT_EQ(madvise(ptr, 10 * page_size, MADV_GUARD_INSTALL), 0);
619 
620 	/* Make sure the whole range is guarded. */
621 	for (i = 0; i < 10; i++) {
622 		char *curr = &ptr[i * page_size];
623 
624 		ASSERT_FALSE(try_read_write_buf(curr));
625 	}
626 
627 	/* Now split the range into three. */
628 	ASSERT_EQ(mprotect(ptr, 3 * page_size, PROT_READ), 0);
629 	ASSERT_EQ(mprotect(&ptr[7 * page_size], 3 * page_size, PROT_READ), 0);
630 
631 	/* Make sure the whole range is guarded for read. */
632 	for (i = 0; i < 10; i++) {
633 		char *curr = &ptr[i * page_size];
634 
635 		ASSERT_FALSE(try_read_buf(curr));
636 	}
637 
638 	/* Now reset protection bits so we merge the whole thing. */
639 	ASSERT_EQ(mprotect(ptr, 3 * page_size, PROT_READ | PROT_WRITE), 0);
640 	ASSERT_EQ(mprotect(&ptr[7 * page_size], 3 * page_size,
641 			   PROT_READ | PROT_WRITE), 0);
642 
643 	/* Make sure the whole range is still guarded. */
644 	for (i = 0; i < 10; i++) {
645 		char *curr = &ptr[i * page_size];
646 
647 		ASSERT_FALSE(try_read_write_buf(curr));
648 	}
649 
650 	/* Split range into 3 again... */
651 	ASSERT_EQ(mprotect(ptr, 3 * page_size, PROT_READ), 0);
652 	ASSERT_EQ(mprotect(&ptr[7 * page_size], 3 * page_size, PROT_READ), 0);
653 
654 	/* ...and unguard the whole range. */
655 	ASSERT_EQ(madvise(ptr, 10 * page_size, MADV_GUARD_REMOVE), 0);
656 
657 	/* Make sure the whole range is remedied for read. */
658 	for (i = 0; i < 10; i++) {
659 		char *curr = &ptr[i * page_size];
660 
661 		ASSERT_TRUE(try_read_buf(curr));
662 	}
663 
664 	/* Merge them again. */
665 	ASSERT_EQ(mprotect(ptr, 3 * page_size, PROT_READ | PROT_WRITE), 0);
666 	ASSERT_EQ(mprotect(&ptr[7 * page_size], 3 * page_size,
667 			   PROT_READ | PROT_WRITE), 0);
668 
669 	/* Now ensure the merged range is remedied for read/write. */
670 	for (i = 0; i < 10; i++) {
671 		char *curr = &ptr[i * page_size];
672 
673 		ASSERT_TRUE(try_read_write_buf(curr));
674 	}
675 
676 	/* Cleanup. */
677 	ASSERT_EQ(munmap(ptr, 10 * page_size), 0);
678 }
679 
680 /* Assert that MADV_DONTNEED does not remove guard markers. */
681 TEST_F(guard_pages, dontneed)
682 {
683 	const unsigned long page_size = self->page_size;
684 	char *ptr;
685 	int i;
686 
687 	ptr = mmap(NULL, 10 * page_size, PROT_READ | PROT_WRITE,
688 		   MAP_ANON | MAP_PRIVATE, -1, 0);
689 	ASSERT_NE(ptr, MAP_FAILED);
690 
691 	/* Back the whole range. */
692 	for (i = 0; i < 10; i++) {
693 		char *curr = &ptr[i * page_size];
694 
695 		*curr = 'y';
696 	}
697 
698 	/* Guard every other page. */
699 	for (i = 0; i < 10; i += 2) {
700 		char *curr = &ptr[i * page_size];
701 		int res = madvise(curr, page_size, MADV_GUARD_INSTALL);
702 
703 		ASSERT_EQ(res, 0);
704 	}
705 
706 	/* Indicate that we don't need any of the range. */
707 	ASSERT_EQ(madvise(ptr, 10 * page_size, MADV_DONTNEED), 0);
708 
709 	/* Check to ensure guard markers are still in place. */
710 	for (i = 0; i < 10; i++) {
711 		char *curr = &ptr[i * page_size];
712 		bool result = try_read_buf(curr);
713 
714 		if (i % 2 == 0) {
715 			ASSERT_FALSE(result);
716 		} else {
717 			ASSERT_TRUE(result);
718 			/* Make sure we really did get reset to zero page. */
719 			ASSERT_EQ(*curr, '\0');
720 		}
721 
722 		/* Now write... */
723 		result = try_write_buf(&ptr[i * page_size]);
724 
725 		/* ...and make sure same result. */
726 		ASSERT_TRUE(i % 2 != 0 ? result : !result);
727 	}
728 
729 	/* Cleanup. */
730 	ASSERT_EQ(munmap(ptr, 10 * page_size), 0);
731 }
732 
733 /* Assert that mlock()'ed pages work correctly with guard markers. */
734 TEST_F(guard_pages, mlock)
735 {
736 	const unsigned long page_size = self->page_size;
737 	char *ptr;
738 	int i;
739 
740 	ptr = mmap(NULL, 10 * page_size, PROT_READ | PROT_WRITE,
741 		   MAP_ANON | MAP_PRIVATE, -1, 0);
742 	ASSERT_NE(ptr, MAP_FAILED);
743 
744 	/* Populate. */
745 	for (i = 0; i < 10; i++) {
746 		char *curr = &ptr[i * page_size];
747 
748 		*curr = 'y';
749 	}
750 
751 	/* Lock. */
752 	ASSERT_EQ(mlock(ptr, 10 * page_size), 0);
753 
754 	/* Now try to guard, should fail with EINVAL. */
755 	ASSERT_EQ(madvise(ptr, 10 * page_size, MADV_GUARD_INSTALL), -1);
756 	ASSERT_EQ(errno, EINVAL);
757 
758 	/* OK unlock. */
759 	ASSERT_EQ(munlock(ptr, 10 * page_size), 0);
760 
761 	/* Guard first half of range, should now succeed. */
762 	ASSERT_EQ(madvise(ptr, 5 * page_size, MADV_GUARD_INSTALL), 0);
763 
764 	/* Make sure guard works. */
765 	for (i = 0; i < 10; i++) {
766 		char *curr = &ptr[i * page_size];
767 		bool result = try_read_write_buf(curr);
768 
769 		if (i < 5) {
770 			ASSERT_FALSE(result);
771 		} else {
772 			ASSERT_TRUE(result);
773 			ASSERT_EQ(*curr, 'x');
774 		}
775 	}
776 
777 	/*
778 	 * Now lock the latter part of the range. We can't lock the guard pages,
779 	 * as this would result in the pages being populated and the guarding
780 	 * would cause this to error out.
781 	 */
782 	ASSERT_EQ(mlock(&ptr[5 * page_size], 5 * page_size), 0);
783 
784 	/*
785 	 * Now remove guard pages, we permit mlock()'d ranges to have guard
786 	 * pages removed as it is a non-destructive operation.
787 	 */
788 	ASSERT_EQ(madvise(ptr, 10 * page_size, MADV_GUARD_REMOVE), 0);
789 
790 	/* Now check that no guard pages remain. */
791 	for (i = 0; i < 10; i++) {
792 		char *curr = &ptr[i * page_size];
793 
794 		ASSERT_TRUE(try_read_write_buf(curr));
795 	}
796 
797 	/* Cleanup. */
798 	ASSERT_EQ(munmap(ptr, 10 * page_size), 0);
799 }
800 
801 /*
802  * Assert that moving, extending and shrinking memory via mremap() retains
803  * guard markers where possible.
804  *
805  * - Moving a mapping alone should retain markers as they are.
806  */
807 TEST_F(guard_pages, mremap_move)
808 {
809 	const unsigned long page_size = self->page_size;
810 	char *ptr, *ptr_new;
811 
812 	/* Map 5 pages. */
813 	ptr = mmap(NULL, 5 * page_size, PROT_READ | PROT_WRITE,
814 		   MAP_ANON | MAP_PRIVATE, -1, 0);
815 	ASSERT_NE(ptr, MAP_FAILED);
816 
817 	/* Place guard markers at both ends of the 5 page span. */
818 	ASSERT_EQ(madvise(ptr, page_size, MADV_GUARD_INSTALL), 0);
819 	ASSERT_EQ(madvise(&ptr[4 * page_size], page_size, MADV_GUARD_INSTALL), 0);
820 
821 	/* Make sure the guard pages are in effect. */
822 	ASSERT_FALSE(try_read_write_buf(ptr));
823 	ASSERT_FALSE(try_read_write_buf(&ptr[4 * page_size]));
824 
825 	/* Map a new region we will move this range into. Doing this ensures
826 	 * that we have reserved a range to map into.
827 	 */
828 	ptr_new = mmap(NULL, 5 * page_size, PROT_NONE, MAP_ANON | MAP_PRIVATE,
829 		       -1, 0);
830 	ASSERT_NE(ptr_new, MAP_FAILED);
831 
832 	ASSERT_EQ(mremap(ptr, 5 * page_size, 5 * page_size,
833 			 MREMAP_MAYMOVE | MREMAP_FIXED, ptr_new), ptr_new);
834 
835 	/* Make sure the guard markers are retained. */
836 	ASSERT_FALSE(try_read_write_buf(ptr_new));
837 	ASSERT_FALSE(try_read_write_buf(&ptr_new[4 * page_size]));
838 
839 	/*
840 	 * Clean up - we only need reference the new pointer as we overwrote the
841 	 * PROT_NONE range and moved the existing one.
842 	 */
843 	munmap(ptr_new, 5 * page_size);
844 }
845 
846 /*
847  * Assert that moving, extending and shrinking memory via mremap() retains
848  * guard markers where possible.
849  *
850  * Expanding should retain guard pages, only now in different position. The user
851  * will have to remove guard pages manually to fix up (they'd have to do the
852  * same if it were a PROT_NONE mapping).
853  */
854 TEST_F(guard_pages, mremap_expand)
855 {
856 	const unsigned long page_size = self->page_size;
857 	char *ptr, *ptr_new;
858 
859 	/* Map 10 pages... */
860 	ptr = mmap(NULL, 10 * page_size, PROT_READ | PROT_WRITE,
861 		   MAP_ANON | MAP_PRIVATE, -1, 0);
862 	ASSERT_NE(ptr, MAP_FAILED);
863 	/* ...But unmap the last 5 so we can ensure we can expand into them. */
864 	ASSERT_EQ(munmap(&ptr[5 * page_size], 5 * page_size), 0);
865 
866 	/* Place guard markers at both ends of the 5 page span. */
867 	ASSERT_EQ(madvise(ptr, page_size, MADV_GUARD_INSTALL), 0);
868 	ASSERT_EQ(madvise(&ptr[4 * page_size], page_size, MADV_GUARD_INSTALL), 0);
869 
870 	/* Make sure the guarding is in effect. */
871 	ASSERT_FALSE(try_read_write_buf(ptr));
872 	ASSERT_FALSE(try_read_write_buf(&ptr[4 * page_size]));
873 
874 	/* Now expand to 10 pages. */
875 	ptr = mremap(ptr, 5 * page_size, 10 * page_size, 0);
876 	ASSERT_NE(ptr, MAP_FAILED);
877 
878 	/*
879 	 * Make sure the guard markers are retained in their original positions.
880 	 */
881 	ASSERT_FALSE(try_read_write_buf(ptr));
882 	ASSERT_FALSE(try_read_write_buf(&ptr[4 * page_size]));
883 
884 	/* Reserve a region which we can move to and expand into. */
885 	ptr_new = mmap(NULL, 20 * page_size, PROT_NONE,
886 		       MAP_ANON | MAP_PRIVATE, -1, 0);
887 	ASSERT_NE(ptr_new, MAP_FAILED);
888 
889 	/* Now move and expand into it. */
890 	ptr = mremap(ptr, 10 * page_size, 20 * page_size,
891 		     MREMAP_MAYMOVE | MREMAP_FIXED, ptr_new);
892 	ASSERT_EQ(ptr, ptr_new);
893 
894 	/*
895 	 * Again, make sure the guard markers are retained in their original positions.
896 	 */
897 	ASSERT_FALSE(try_read_write_buf(ptr));
898 	ASSERT_FALSE(try_read_write_buf(&ptr[4 * page_size]));
899 
900 	/*
901 	 * A real user would have to remove guard markers, but would reasonably
902 	 * expect all characteristics of the mapping to be retained, including
903 	 * guard markers.
904 	 */
905 
906 	/* Cleanup. */
907 	munmap(ptr, 20 * page_size);
908 }
909 /*
910  * Assert that moving, extending and shrinking memory via mremap() retains
911  * guard markers where possible.
912  *
913  * Shrinking will result in markers that are shrunk over being removed. Again,
914  * if the user were using a PROT_NONE mapping they'd have to manually fix this
915  * up also so this is OK.
916  */
917 TEST_F(guard_pages, mremap_shrink)
918 {
919 	const unsigned long page_size = self->page_size;
920 	char *ptr;
921 	int i;
922 
923 	/* Map 5 pages. */
924 	ptr = mmap(NULL, 5 * page_size, PROT_READ | PROT_WRITE,
925 		   MAP_ANON | MAP_PRIVATE, -1, 0);
926 	ASSERT_NE(ptr, MAP_FAILED);
927 
928 	/* Place guard markers at both ends of the 5 page span. */
929 	ASSERT_EQ(madvise(ptr, page_size, MADV_GUARD_INSTALL), 0);
930 	ASSERT_EQ(madvise(&ptr[4 * page_size], page_size, MADV_GUARD_INSTALL), 0);
931 
932 	/* Make sure the guarding is in effect. */
933 	ASSERT_FALSE(try_read_write_buf(ptr));
934 	ASSERT_FALSE(try_read_write_buf(&ptr[4 * page_size]));
935 
936 	/* Now shrink to 3 pages. */
937 	ptr = mremap(ptr, 5 * page_size, 3 * page_size, MREMAP_MAYMOVE);
938 	ASSERT_NE(ptr, MAP_FAILED);
939 
940 	/* We expect the guard marker at the start to be retained... */
941 	ASSERT_FALSE(try_read_write_buf(ptr));
942 
943 	/* ...But remaining pages will not have guard markers. */
944 	for (i = 1; i < 3; i++) {
945 		char *curr = &ptr[i * page_size];
946 
947 		ASSERT_TRUE(try_read_write_buf(curr));
948 	}
949 
950 	/*
951 	 * As with expansion, a real user would have to remove guard pages and
952 	 * fixup. But you'd have to do similar manual things with PROT_NONE
953 	 * mappings too.
954 	 */
955 
956 	/*
957 	 * If we expand back to the original size, the end marker will, of
958 	 * course, no longer be present.
959 	 */
960 	ptr = mremap(ptr, 3 * page_size, 5 * page_size, 0);
961 	ASSERT_NE(ptr, MAP_FAILED);
962 
963 	/* Again, we expect the guard marker at the start to be retained... */
964 	ASSERT_FALSE(try_read_write_buf(ptr));
965 
966 	/* ...But remaining pages will not have guard markers. */
967 	for (i = 1; i < 5; i++) {
968 		char *curr = &ptr[i * page_size];
969 
970 		ASSERT_TRUE(try_read_write_buf(curr));
971 	}
972 
973 	/* Cleanup. */
974 	munmap(ptr, 5 * page_size);
975 }
976 
977 /*
978  * Assert that forking a process with VMAs that do not have VM_WIPEONFORK set
979  * retain guard pages.
980  */
981 TEST_F(guard_pages, fork)
982 {
983 	const unsigned long page_size = self->page_size;
984 	char *ptr;
985 	pid_t pid;
986 	int i;
987 
988 	/* Map 10 pages. */
989 	ptr = mmap(NULL, 10 * page_size, PROT_READ | PROT_WRITE,
990 		   MAP_ANON | MAP_PRIVATE, -1, 0);
991 	ASSERT_NE(ptr, MAP_FAILED);
992 
993 	/* Establish guard apges in the first 5 pages. */
994 	ASSERT_EQ(madvise(ptr, 5 * page_size, MADV_GUARD_INSTALL), 0);
995 
996 	pid = fork();
997 	ASSERT_NE(pid, -1);
998 	if (!pid) {
999 		/* This is the child process now. */
1000 
1001 		/* Assert that the guarding is in effect. */
1002 		for (i = 0; i < 10; i++) {
1003 			char *curr = &ptr[i * page_size];
1004 			bool result = try_read_write_buf(curr);
1005 
1006 			ASSERT_TRUE(i >= 5 ? result : !result);
1007 		}
1008 
1009 		/* Now unguard the range.*/
1010 		ASSERT_EQ(madvise(ptr, 10 * page_size, MADV_GUARD_REMOVE), 0);
1011 
1012 		exit(0);
1013 	}
1014 
1015 	/* Parent process. */
1016 
1017 	/* Parent simply waits on child. */
1018 	waitpid(pid, NULL, 0);
1019 
1020 	/* Child unguard does not impact parent page table state. */
1021 	for (i = 0; i < 10; i++) {
1022 		char *curr = &ptr[i * page_size];
1023 		bool result = try_read_write_buf(curr);
1024 
1025 		ASSERT_TRUE(i >= 5 ? result : !result);
1026 	}
1027 
1028 	/* Cleanup. */
1029 	ASSERT_EQ(munmap(ptr, 10 * page_size), 0);
1030 }
1031 
1032 /*
1033  * Assert that forking a process with VMAs that do have VM_WIPEONFORK set
1034  * behave as expected.
1035  */
1036 TEST_F(guard_pages, fork_wipeonfork)
1037 {
1038 	const unsigned long page_size = self->page_size;
1039 	char *ptr;
1040 	pid_t pid;
1041 	int i;
1042 
1043 	/* Map 10 pages. */
1044 	ptr = mmap(NULL, 10 * page_size, PROT_READ | PROT_WRITE,
1045 		   MAP_ANON | MAP_PRIVATE, -1, 0);
1046 	ASSERT_NE(ptr, MAP_FAILED);
1047 
1048 	/* Mark wipe on fork. */
1049 	ASSERT_EQ(madvise(ptr, 10 * page_size, MADV_WIPEONFORK), 0);
1050 
1051 	/* Guard the first 5 pages. */
1052 	ASSERT_EQ(madvise(ptr, 5 * page_size, MADV_GUARD_INSTALL), 0);
1053 
1054 	pid = fork();
1055 	ASSERT_NE(pid, -1);
1056 	if (!pid) {
1057 		/* This is the child process now. */
1058 
1059 		/* Guard will have been wiped. */
1060 		for (i = 0; i < 10; i++) {
1061 			char *curr = &ptr[i * page_size];
1062 
1063 			ASSERT_TRUE(try_read_write_buf(curr));
1064 		}
1065 
1066 		exit(0);
1067 	}
1068 
1069 	/* Parent process. */
1070 
1071 	waitpid(pid, NULL, 0);
1072 
1073 	/* Guard markers should be in effect.*/
1074 	for (i = 0; i < 10; i++) {
1075 		char *curr = &ptr[i * page_size];
1076 		bool result = try_read_write_buf(curr);
1077 
1078 		ASSERT_TRUE(i >= 5 ? result : !result);
1079 	}
1080 
1081 	/* Cleanup. */
1082 	ASSERT_EQ(munmap(ptr, 10 * page_size), 0);
1083 }
1084 
1085 /* Ensure that MADV_FREE retains guard entries as expected. */
1086 TEST_F(guard_pages, lazyfree)
1087 {
1088 	const unsigned long page_size = self->page_size;
1089 	char *ptr;
1090 	int i;
1091 
1092 	/* Map 10 pages. */
1093 	ptr = mmap(NULL, 10 * page_size, PROT_READ | PROT_WRITE,
1094 		   MAP_ANON | MAP_PRIVATE, -1, 0);
1095 	ASSERT_NE(ptr, MAP_FAILED);
1096 
1097 	/* Guard range. */
1098 	ASSERT_EQ(madvise(ptr, 10 * page_size, MADV_GUARD_INSTALL), 0);
1099 
1100 	/* Ensure guarded. */
1101 	for (i = 0; i < 10; i++) {
1102 		char *curr = &ptr[i * page_size];
1103 
1104 		ASSERT_FALSE(try_read_write_buf(curr));
1105 	}
1106 
1107 	/* Lazyfree range. */
1108 	ASSERT_EQ(madvise(ptr, 10 * page_size, MADV_FREE), 0);
1109 
1110 	/* This should leave the guard markers in place. */
1111 	for (i = 0; i < 10; i++) {
1112 		char *curr = &ptr[i * page_size];
1113 
1114 		ASSERT_FALSE(try_read_write_buf(curr));
1115 	}
1116 
1117 	/* Cleanup. */
1118 	ASSERT_EQ(munmap(ptr, 10 * page_size), 0);
1119 }
1120 
1121 /* Ensure that MADV_POPULATE_READ, MADV_POPULATE_WRITE behave as expected. */
1122 TEST_F(guard_pages, populate)
1123 {
1124 	const unsigned long page_size = self->page_size;
1125 	char *ptr;
1126 
1127 	/* Map 10 pages. */
1128 	ptr = mmap(NULL, 10 * page_size, PROT_READ | PROT_WRITE,
1129 		   MAP_ANON | MAP_PRIVATE, -1, 0);
1130 	ASSERT_NE(ptr, MAP_FAILED);
1131 
1132 	/* Guard range. */
1133 	ASSERT_EQ(madvise(ptr, 10 * page_size, MADV_GUARD_INSTALL), 0);
1134 
1135 	/* Populate read should error out... */
1136 	ASSERT_EQ(madvise(ptr, 10 * page_size, MADV_POPULATE_READ), -1);
1137 	ASSERT_EQ(errno, EFAULT);
1138 
1139 	/* ...as should populate write. */
1140 	ASSERT_EQ(madvise(ptr, 10 * page_size, MADV_POPULATE_WRITE), -1);
1141 	ASSERT_EQ(errno, EFAULT);
1142 
1143 	/* Cleanup. */
1144 	ASSERT_EQ(munmap(ptr, 10 * page_size), 0);
1145 }
1146 
1147 /* Ensure that MADV_COLD, MADV_PAGEOUT do not remove guard markers. */
1148 TEST_F(guard_pages, cold_pageout)
1149 {
1150 	const unsigned long page_size = self->page_size;
1151 	char *ptr;
1152 	int i;
1153 
1154 	/* Map 10 pages. */
1155 	ptr = mmap(NULL, 10 * page_size, PROT_READ | PROT_WRITE,
1156 		   MAP_ANON | MAP_PRIVATE, -1, 0);
1157 	ASSERT_NE(ptr, MAP_FAILED);
1158 
1159 	/* Guard range. */
1160 	ASSERT_EQ(madvise(ptr, 10 * page_size, MADV_GUARD_INSTALL), 0);
1161 
1162 	/* Ensured guarded. */
1163 	for (i = 0; i < 10; i++) {
1164 		char *curr = &ptr[i * page_size];
1165 
1166 		ASSERT_FALSE(try_read_write_buf(curr));
1167 	}
1168 
1169 	/* Now mark cold. This should have no impact on guard markers. */
1170 	ASSERT_EQ(madvise(ptr, 10 * page_size, MADV_COLD), 0);
1171 
1172 	/* Should remain guarded. */
1173 	for (i = 0; i < 10; i++) {
1174 		char *curr = &ptr[i * page_size];
1175 
1176 		ASSERT_FALSE(try_read_write_buf(curr));
1177 	}
1178 
1179 	/* OK, now page out. This should equally, have no effect on markers. */
1180 	ASSERT_EQ(madvise(ptr, 10 * page_size, MADV_PAGEOUT), 0);
1181 
1182 	/* Should remain guarded. */
1183 	for (i = 0; i < 10; i++) {
1184 		char *curr = &ptr[i * page_size];
1185 
1186 		ASSERT_FALSE(try_read_write_buf(curr));
1187 	}
1188 
1189 	/* Cleanup. */
1190 	ASSERT_EQ(munmap(ptr, 10 * page_size), 0);
1191 }
1192 
1193 /* Ensure that guard pages do not break userfaultd. */
1194 TEST_F(guard_pages, uffd)
1195 {
1196 	const unsigned long page_size = self->page_size;
1197 	int uffd;
1198 	char *ptr;
1199 	int i;
1200 	struct uffdio_api api = {
1201 		.api = UFFD_API,
1202 		.features = 0,
1203 	};
1204 	struct uffdio_register reg;
1205 	struct uffdio_range range;
1206 
1207 	/* Set up uffd. */
1208 	uffd = userfaultfd(0);
1209 	if (uffd == -1 && errno == EPERM)
1210 		ksft_exit_skip("No userfaultfd permissions, try running as root.\n");
1211 	ASSERT_NE(uffd, -1);
1212 
1213 	ASSERT_EQ(ioctl(uffd, UFFDIO_API, &api), 0);
1214 
1215 	/* Map 10 pages. */
1216 	ptr = mmap(NULL, 10 * page_size, PROT_READ | PROT_WRITE,
1217 		   MAP_ANON | MAP_PRIVATE, -1, 0);
1218 	ASSERT_NE(ptr, MAP_FAILED);
1219 
1220 	/* Register the range with uffd. */
1221 	range.start = (unsigned long)ptr;
1222 	range.len = 10 * page_size;
1223 	reg.range = range;
1224 	reg.mode = UFFDIO_REGISTER_MODE_MISSING;
1225 	ASSERT_EQ(ioctl(uffd, UFFDIO_REGISTER, &reg), 0);
1226 
1227 	/* Guard the range. This should not trigger the uffd. */
1228 	ASSERT_EQ(madvise(ptr, 10 * page_size, MADV_GUARD_INSTALL), 0);
1229 
1230 	/* The guarding should behave as usual with no uffd intervention. */
1231 	for (i = 0; i < 10; i++) {
1232 		char *curr = &ptr[i * page_size];
1233 
1234 		ASSERT_FALSE(try_read_write_buf(curr));
1235 	}
1236 
1237 	/* Cleanup. */
1238 	ASSERT_EQ(ioctl(uffd, UFFDIO_UNREGISTER, &range), 0);
1239 	close(uffd);
1240 	ASSERT_EQ(munmap(ptr, 10 * page_size), 0);
1241 }
1242 
1243 TEST_HARNESS_MAIN
1244