xref: /linux/tools/testing/selftests/mm/guard-regions.c (revision 509d3f45847627f4c5cdce004c3ec79262b5239c)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 
3 #define _GNU_SOURCE
4 #include "kselftest_harness.h"
5 #include <asm-generic/mman.h> /* Force the import of the tools version. */
6 #include <assert.h>
7 #include <errno.h>
8 #include <fcntl.h>
9 #include <linux/limits.h>
10 #include <linux/userfaultfd.h>
11 #include <linux/fs.h>
12 #include <setjmp.h>
13 #include <signal.h>
14 #include <stdbool.h>
15 #include <stdio.h>
16 #include <stdlib.h>
17 #include <string.h>
18 #include <sys/ioctl.h>
19 #include <sys/mman.h>
20 #include <sys/syscall.h>
21 #include <sys/uio.h>
22 #include <unistd.h>
23 #include "vm_util.h"
24 
25 #include "../pidfd/pidfd.h"
26 
27 /*
28  * Ignore the checkpatch warning, as per the C99 standard, section 7.14.1.1:
29  *
30  * "If the signal occurs other than as the result of calling the abort or raise
31  *  function, the behavior is undefined if the signal handler refers to any
32  *  object with static storage duration other than by assigning a value to an
33  *  object declared as volatile sig_atomic_t"
34  */
35 static volatile sig_atomic_t signal_jump_set;
36 static sigjmp_buf signal_jmp_buf;
37 
38 /*
39  * How is the test backing the mapping being tested?
40  */
41 enum backing_type {
42 	ANON_BACKED,
43 	SHMEM_BACKED,
44 	LOCAL_FILE_BACKED,
45 };
46 
FIXTURE(guard_regions)47 FIXTURE(guard_regions)
48 {
49 	unsigned long page_size;
50 	char path[PATH_MAX];
51 	int fd;
52 };
53 
FIXTURE_VARIANT(guard_regions)54 FIXTURE_VARIANT(guard_regions)
55 {
56 	enum backing_type backing;
57 };
58 
FIXTURE_VARIANT_ADD(guard_regions,anon)59 FIXTURE_VARIANT_ADD(guard_regions, anon)
60 {
61 	.backing = ANON_BACKED,
62 };
63 
FIXTURE_VARIANT_ADD(guard_regions,shmem)64 FIXTURE_VARIANT_ADD(guard_regions, shmem)
65 {
66 	.backing = SHMEM_BACKED,
67 };
68 
FIXTURE_VARIANT_ADD(guard_regions,file)69 FIXTURE_VARIANT_ADD(guard_regions, file)
70 {
71 	.backing = LOCAL_FILE_BACKED,
72 };
73 
is_anon_backed(const FIXTURE_VARIANT (guard_regions)* variant)74 static bool is_anon_backed(const FIXTURE_VARIANT(guard_regions) * variant)
75 {
76 	switch (variant->backing) {
77 	case  ANON_BACKED:
78 	case  SHMEM_BACKED:
79 		return true;
80 	default:
81 		return false;
82 	}
83 }
84 
mmap_(FIXTURE_DATA (guard_regions)* self,const FIXTURE_VARIANT (guard_regions)* variant,void * addr,size_t length,int prot,int extra_flags,off_t offset)85 static void *mmap_(FIXTURE_DATA(guard_regions) * self,
86 		   const FIXTURE_VARIANT(guard_regions) * variant,
87 		   void *addr, size_t length, int prot, int extra_flags,
88 		   off_t offset)
89 {
90 	int fd;
91 	int flags = extra_flags;
92 
93 	switch (variant->backing) {
94 	case ANON_BACKED:
95 		flags |= MAP_PRIVATE | MAP_ANON;
96 		fd = -1;
97 		offset = 0;
98 		break;
99 	case SHMEM_BACKED:
100 	case LOCAL_FILE_BACKED:
101 		flags |= MAP_SHARED;
102 		fd = self->fd;
103 		break;
104 	default:
105 		ksft_exit_fail();
106 		break;
107 	}
108 
109 	return mmap(addr, length, prot, flags, fd, offset);
110 }
111 
userfaultfd(int flags)112 static int userfaultfd(int flags)
113 {
114 	return syscall(SYS_userfaultfd, flags);
115 }
116 
handle_fatal(int c)117 static void handle_fatal(int c)
118 {
119 	if (!signal_jump_set)
120 		return;
121 
122 	siglongjmp(signal_jmp_buf, c);
123 }
124 
sys_process_madvise(int pidfd,const struct iovec * iovec,size_t n,int advice,unsigned int flags)125 static ssize_t sys_process_madvise(int pidfd, const struct iovec *iovec,
126 				   size_t n, int advice, unsigned int flags)
127 {
128 	return syscall(__NR_process_madvise, pidfd, iovec, n, advice, flags);
129 }
130 
131 /*
132  * Enable our signal catcher and try to read/write the specified buffer. The
133  * return value indicates whether the read/write succeeds without a fatal
134  * signal.
135  */
try_access_buf(char * ptr,bool write)136 static bool try_access_buf(char *ptr, bool write)
137 {
138 	bool failed;
139 
140 	/* Tell signal handler to jump back here on fatal signal. */
141 	signal_jump_set = true;
142 	/* If a fatal signal arose, we will jump back here and failed is set. */
143 	failed = sigsetjmp(signal_jmp_buf, 0) != 0;
144 
145 	if (!failed) {
146 		if (write)
147 			*ptr = 'x';
148 		else
149 			FORCE_READ(*ptr);
150 	}
151 
152 	signal_jump_set = false;
153 	return !failed;
154 }
155 
156 /* Try and read from a buffer, return true if no fatal signal. */
try_read_buf(char * ptr)157 static bool try_read_buf(char *ptr)
158 {
159 	return try_access_buf(ptr, false);
160 }
161 
162 /* Try and write to a buffer, return true if no fatal signal. */
try_write_buf(char * ptr)163 static bool try_write_buf(char *ptr)
164 {
165 	return try_access_buf(ptr, true);
166 }
167 
168 /*
169  * Try and BOTH read from AND write to a buffer, return true if BOTH operations
170  * succeed.
171  */
try_read_write_buf(char * ptr)172 static bool try_read_write_buf(char *ptr)
173 {
174 	return try_read_buf(ptr) && try_write_buf(ptr);
175 }
176 
setup_sighandler(void)177 static void setup_sighandler(void)
178 {
179 	struct sigaction act = {
180 		.sa_handler = &handle_fatal,
181 		.sa_flags = SA_NODEFER,
182 	};
183 
184 	sigemptyset(&act.sa_mask);
185 	if (sigaction(SIGSEGV, &act, NULL))
186 		ksft_exit_fail_perror("sigaction");
187 }
188 
teardown_sighandler(void)189 static void teardown_sighandler(void)
190 {
191 	struct sigaction act = {
192 		.sa_handler = SIG_DFL,
193 		.sa_flags = SA_NODEFER,
194 	};
195 
196 	sigemptyset(&act.sa_mask);
197 	sigaction(SIGSEGV, &act, NULL);
198 }
199 
open_file(const char * prefix,char * path)200 static int open_file(const char *prefix, char *path)
201 {
202 	int fd;
203 
204 	snprintf(path, PATH_MAX, "%sguard_regions_test_file_XXXXXX", prefix);
205 	fd = mkstemp(path);
206 	if (fd < 0)
207 		ksft_exit_fail_perror("mkstemp");
208 
209 	return fd;
210 }
211 
212 /* Establish a varying pattern in a buffer. */
set_pattern(char * ptr,size_t num_pages,size_t page_size)213 static void set_pattern(char *ptr, size_t num_pages, size_t page_size)
214 {
215 	size_t i;
216 
217 	for (i = 0; i < num_pages; i++) {
218 		char *ptr2 = &ptr[i * page_size];
219 
220 		memset(ptr2, 'a' + (i % 26), page_size);
221 	}
222 }
223 
224 /*
225  * Check that a buffer contains the pattern set by set_pattern(), starting at a
226  * page offset of pgoff within the buffer.
227  */
check_pattern_offset(char * ptr,size_t num_pages,size_t page_size,size_t pgoff)228 static bool check_pattern_offset(char *ptr, size_t num_pages, size_t page_size,
229 				 size_t pgoff)
230 {
231 	size_t i;
232 
233 	for (i = 0; i < num_pages * page_size; i++) {
234 		size_t offset = pgoff * page_size + i;
235 		char actual = ptr[offset];
236 		char expected = 'a' + ((offset / page_size) % 26);
237 
238 		if (actual != expected)
239 			return false;
240 	}
241 
242 	return true;
243 }
244 
245 /* Check that a buffer contains the pattern set by set_pattern(). */
check_pattern(char * ptr,size_t num_pages,size_t page_size)246 static bool check_pattern(char *ptr, size_t num_pages, size_t page_size)
247 {
248 	return check_pattern_offset(ptr, num_pages, page_size, 0);
249 }
250 
251 /* Determine if a buffer contains only repetitions of a specified char. */
is_buf_eq(char * buf,size_t size,char chr)252 static bool is_buf_eq(char *buf, size_t size, char chr)
253 {
254 	size_t i;
255 
256 	for (i = 0; i < size; i++) {
257 		if (buf[i] != chr)
258 			return false;
259 	}
260 
261 	return true;
262 }
263 
264 /*
265  * Some file systems have issues with merging due to changing merge-sensitive
266  * parameters in the .mmap callback, and prior to .mmap_prepare being
267  * implemented everywhere this will now result in an unexpected failure to
268  * merge (e.g. - overlayfs).
269  *
270  * Perform a simple test to see if the local file system suffers from this, if
271  * it does then we can skip test logic that assumes local file system merging is
272  * sane.
273  */
local_fs_has_sane_mmap(FIXTURE_DATA (guard_regions)* self,const FIXTURE_VARIANT (guard_regions)* variant)274 static bool local_fs_has_sane_mmap(FIXTURE_DATA(guard_regions) * self,
275 				   const FIXTURE_VARIANT(guard_regions) * variant)
276 {
277 	const unsigned long page_size = self->page_size;
278 	char *ptr, *ptr2;
279 	struct procmap_fd procmap;
280 
281 	if (variant->backing != LOCAL_FILE_BACKED)
282 		return true;
283 
284 	/* Map 10 pages. */
285 	ptr = mmap_(self, variant, NULL, 10 * page_size, PROT_READ | PROT_WRITE, 0, 0);
286 	if (ptr == MAP_FAILED)
287 		return false;
288 	/* Unmap the middle. */
289 	munmap(&ptr[5 * page_size], page_size);
290 
291 	/* Map again. */
292 	ptr2 = mmap_(self, variant, &ptr[5 * page_size], page_size, PROT_READ | PROT_WRITE,
293 		     MAP_FIXED, 5 * page_size);
294 
295 	if (ptr2 == MAP_FAILED)
296 		return false;
297 
298 	/* Now make sure they all merged. */
299 	if (open_self_procmap(&procmap) != 0)
300 		return false;
301 	if (!find_vma_procmap(&procmap, ptr))
302 		return false;
303 	if (procmap.query.vma_start != (unsigned long)ptr)
304 		return false;
305 	if (procmap.query.vma_end != (unsigned long)ptr + 10 * page_size)
306 		return false;
307 	close_procmap(&procmap);
308 
309 	return true;
310 }
311 
FIXTURE_SETUP(guard_regions)312 FIXTURE_SETUP(guard_regions)
313 {
314 	self->page_size = (unsigned long)sysconf(_SC_PAGESIZE);
315 	setup_sighandler();
316 
317 	switch (variant->backing) {
318 	case ANON_BACKED:
319 		return;
320 	case LOCAL_FILE_BACKED:
321 		self->fd = open_file("", self->path);
322 		break;
323 	case SHMEM_BACKED:
324 		self->fd = memfd_create(self->path, 0);
325 		break;
326 	}
327 
328 	/* We truncate file to at least 100 pages, tests can modify as needed. */
329 	ASSERT_EQ(ftruncate(self->fd, 100 * self->page_size), 0);
330 };
331 
FIXTURE_TEARDOWN_PARENT(guard_regions)332 FIXTURE_TEARDOWN_PARENT(guard_regions)
333 {
334 	teardown_sighandler();
335 
336 	if (variant->backing == ANON_BACKED)
337 		return;
338 
339 	if (self->fd >= 0)
340 		close(self->fd);
341 
342 	if (self->path[0] != '\0')
343 		unlink(self->path);
344 }
345 
TEST_F(guard_regions,basic)346 TEST_F(guard_regions, basic)
347 {
348 	const unsigned long NUM_PAGES = 10;
349 	const unsigned long page_size = self->page_size;
350 	char *ptr;
351 	int i;
352 
353 	ptr = mmap_(self, variant, NULL, NUM_PAGES * page_size,
354 		    PROT_READ | PROT_WRITE, 0, 0);
355 	ASSERT_NE(ptr, MAP_FAILED);
356 
357 	/* Trivially assert we can touch the first page. */
358 	ASSERT_TRUE(try_read_write_buf(ptr));
359 
360 	ASSERT_EQ(madvise(ptr, page_size, MADV_GUARD_INSTALL), 0);
361 
362 	/* Establish that 1st page SIGSEGV's. */
363 	ASSERT_FALSE(try_read_write_buf(ptr));
364 
365 	/* Ensure we can touch everything else.*/
366 	for (i = 1; i < NUM_PAGES; i++) {
367 		char *curr = &ptr[i * page_size];
368 
369 		ASSERT_TRUE(try_read_write_buf(curr));
370 	}
371 
372 	/* Establish a guard page at the end of the mapping. */
373 	ASSERT_EQ(madvise(&ptr[(NUM_PAGES - 1) * page_size], page_size,
374 			  MADV_GUARD_INSTALL), 0);
375 
376 	/* Check that both guard pages result in SIGSEGV. */
377 	ASSERT_FALSE(try_read_write_buf(ptr));
378 	ASSERT_FALSE(try_read_write_buf(&ptr[(NUM_PAGES - 1) * page_size]));
379 
380 	/* Remove the first guard page. */
381 	ASSERT_FALSE(madvise(ptr, page_size, MADV_GUARD_REMOVE));
382 
383 	/* Make sure we can touch it. */
384 	ASSERT_TRUE(try_read_write_buf(ptr));
385 
386 	/* Remove the last guard page. */
387 	ASSERT_FALSE(madvise(&ptr[(NUM_PAGES - 1) * page_size], page_size,
388 			     MADV_GUARD_REMOVE));
389 
390 	/* Make sure we can touch it. */
391 	ASSERT_TRUE(try_read_write_buf(&ptr[(NUM_PAGES - 1) * page_size]));
392 
393 	/*
394 	 *  Test setting a _range_ of pages, namely the first 3. The first of
395 	 *  these be faulted in, so this also tests that we can install guard
396 	 *  pages over backed pages.
397 	 */
398 	ASSERT_EQ(madvise(ptr, 3 * page_size, MADV_GUARD_INSTALL), 0);
399 
400 	/* Make sure they are all guard pages. */
401 	for (i = 0; i < 3; i++) {
402 		char *curr = &ptr[i * page_size];
403 
404 		ASSERT_FALSE(try_read_write_buf(curr));
405 	}
406 
407 	/* Make sure the rest are not. */
408 	for (i = 3; i < NUM_PAGES; i++) {
409 		char *curr = &ptr[i * page_size];
410 
411 		ASSERT_TRUE(try_read_write_buf(curr));
412 	}
413 
414 	/* Remove guard pages. */
415 	ASSERT_EQ(madvise(ptr, NUM_PAGES * page_size, MADV_GUARD_REMOVE), 0);
416 
417 	/* Now make sure we can touch everything. */
418 	for (i = 0; i < NUM_PAGES; i++) {
419 		char *curr = &ptr[i * page_size];
420 
421 		ASSERT_TRUE(try_read_write_buf(curr));
422 	}
423 
424 	/*
425 	 * Now remove all guard pages, make sure we don't remove existing
426 	 * entries.
427 	 */
428 	ASSERT_EQ(madvise(ptr, NUM_PAGES * page_size, MADV_GUARD_REMOVE), 0);
429 
430 	for (i = 0; i < NUM_PAGES * page_size; i += page_size) {
431 		char chr = ptr[i];
432 
433 		ASSERT_EQ(chr, 'x');
434 	}
435 
436 	ASSERT_EQ(munmap(ptr, NUM_PAGES * page_size), 0);
437 }
438 
439 /* Assert that operations applied across multiple VMAs work as expected. */
TEST_F(guard_regions,multi_vma)440 TEST_F(guard_regions, multi_vma)
441 {
442 	const unsigned long page_size = self->page_size;
443 	char *ptr_region, *ptr, *ptr1, *ptr2, *ptr3;
444 	int i;
445 
446 	/* Reserve a 100 page region over which we can install VMAs. */
447 	ptr_region = mmap_(self, variant, NULL, 100 * page_size,
448 			   PROT_NONE, 0, 0);
449 	ASSERT_NE(ptr_region, MAP_FAILED);
450 
451 	/* Place a VMA of 10 pages size at the start of the region. */
452 	ptr1 = mmap_(self, variant, ptr_region, 10 * page_size,
453 		     PROT_READ | PROT_WRITE, MAP_FIXED, 0);
454 	ASSERT_NE(ptr1, MAP_FAILED);
455 
456 	/* Place a VMA of 5 pages size 50 pages into the region. */
457 	ptr2 = mmap_(self, variant, &ptr_region[50 * page_size], 5 * page_size,
458 		     PROT_READ | PROT_WRITE, MAP_FIXED, 0);
459 	ASSERT_NE(ptr2, MAP_FAILED);
460 
461 	/* Place a VMA of 20 pages size at the end of the region. */
462 	ptr3 = mmap_(self, variant, &ptr_region[80 * page_size], 20 * page_size,
463 		     PROT_READ | PROT_WRITE, MAP_FIXED, 0);
464 	ASSERT_NE(ptr3, MAP_FAILED);
465 
466 	/* Unmap gaps. */
467 	ASSERT_EQ(munmap(&ptr_region[10 * page_size], 40 * page_size), 0);
468 	ASSERT_EQ(munmap(&ptr_region[55 * page_size], 25 * page_size), 0);
469 
470 	/*
471 	 * We end up with VMAs like this:
472 	 *
473 	 * 0    10 .. 50   55 .. 80   100
474 	 * [---]      [---]      [---]
475 	 */
476 
477 	/*
478 	 * Now mark the whole range as guard pages and make sure all VMAs are as
479 	 * such.
480 	 */
481 
482 	/*
483 	 * madvise() is certifiable and lets you perform operations over gaps,
484 	 * everything works, but it indicates an error and errno is set to
485 	 * -ENOMEM. Also if anything runs out of memory it is set to
486 	 * -ENOMEM. You are meant to guess which is which.
487 	 */
488 	ASSERT_EQ(madvise(ptr_region, 100 * page_size, MADV_GUARD_INSTALL), -1);
489 	ASSERT_EQ(errno, ENOMEM);
490 
491 	for (i = 0; i < 10; i++) {
492 		char *curr = &ptr1[i * page_size];
493 
494 		ASSERT_FALSE(try_read_write_buf(curr));
495 	}
496 
497 	for (i = 0; i < 5; i++) {
498 		char *curr = &ptr2[i * page_size];
499 
500 		ASSERT_FALSE(try_read_write_buf(curr));
501 	}
502 
503 	for (i = 0; i < 20; i++) {
504 		char *curr = &ptr3[i * page_size];
505 
506 		ASSERT_FALSE(try_read_write_buf(curr));
507 	}
508 
509 	/* Now remove guar pages over range and assert the opposite. */
510 
511 	ASSERT_EQ(madvise(ptr_region, 100 * page_size, MADV_GUARD_REMOVE), -1);
512 	ASSERT_EQ(errno, ENOMEM);
513 
514 	for (i = 0; i < 10; i++) {
515 		char *curr = &ptr1[i * page_size];
516 
517 		ASSERT_TRUE(try_read_write_buf(curr));
518 	}
519 
520 	for (i = 0; i < 5; i++) {
521 		char *curr = &ptr2[i * page_size];
522 
523 		ASSERT_TRUE(try_read_write_buf(curr));
524 	}
525 
526 	for (i = 0; i < 20; i++) {
527 		char *curr = &ptr3[i * page_size];
528 
529 		ASSERT_TRUE(try_read_write_buf(curr));
530 	}
531 
532 	/* Now map incompatible VMAs in the gaps. */
533 	ptr = mmap_(self, variant, &ptr_region[10 * page_size], 40 * page_size,
534 		    PROT_READ | PROT_WRITE | PROT_EXEC, MAP_FIXED, 0);
535 	ASSERT_NE(ptr, MAP_FAILED);
536 	ptr = mmap_(self, variant, &ptr_region[55 * page_size], 25 * page_size,
537 		    PROT_READ | PROT_WRITE | PROT_EXEC, MAP_FIXED, 0);
538 	ASSERT_NE(ptr, MAP_FAILED);
539 
540 	/*
541 	 * We end up with VMAs like this:
542 	 *
543 	 * 0    10 .. 50   55 .. 80   100
544 	 * [---][xxxx][---][xxxx][---]
545 	 *
546 	 * Where 'x' signifies VMAs that cannot be merged with those adjacent to
547 	 * them.
548 	 */
549 
550 	/* Multiple VMAs adjacent to one another should result in no error. */
551 	ASSERT_EQ(madvise(ptr_region, 100 * page_size, MADV_GUARD_INSTALL), 0);
552 	for (i = 0; i < 100; i++) {
553 		char *curr = &ptr_region[i * page_size];
554 
555 		ASSERT_FALSE(try_read_write_buf(curr));
556 	}
557 	ASSERT_EQ(madvise(ptr_region, 100 * page_size, MADV_GUARD_REMOVE), 0);
558 	for (i = 0; i < 100; i++) {
559 		char *curr = &ptr_region[i * page_size];
560 
561 		ASSERT_TRUE(try_read_write_buf(curr));
562 	}
563 
564 	/* Cleanup. */
565 	ASSERT_EQ(munmap(ptr_region, 100 * page_size), 0);
566 }
567 
568 /*
569  * Assert that batched operations performed using process_madvise() work as
570  * expected.
571  */
TEST_F(guard_regions,process_madvise)572 TEST_F(guard_regions, process_madvise)
573 {
574 	const unsigned long page_size = self->page_size;
575 	char *ptr_region, *ptr1, *ptr2, *ptr3;
576 	ssize_t count;
577 	struct iovec vec[6];
578 
579 	/* Reserve region to map over. */
580 	ptr_region = mmap_(self, variant, NULL, 100 * page_size,
581 			   PROT_NONE, 0, 0);
582 	ASSERT_NE(ptr_region, MAP_FAILED);
583 
584 	/*
585 	 * 10 pages offset 1 page into reserve region. We MAP_POPULATE so we
586 	 * overwrite existing entries and test this code path against
587 	 * overwriting existing entries.
588 	 */
589 	ptr1 = mmap_(self, variant, &ptr_region[page_size], 10 * page_size,
590 		     PROT_READ | PROT_WRITE, MAP_FIXED | MAP_POPULATE, 0);
591 	ASSERT_NE(ptr1, MAP_FAILED);
592 	/* We want guard markers at start/end of each VMA. */
593 	vec[0].iov_base = ptr1;
594 	vec[0].iov_len = page_size;
595 	vec[1].iov_base = &ptr1[9 * page_size];
596 	vec[1].iov_len = page_size;
597 
598 	/* 5 pages offset 50 pages into reserve region. */
599 	ptr2 = mmap_(self, variant, &ptr_region[50 * page_size], 5 * page_size,
600 		     PROT_READ | PROT_WRITE, MAP_FIXED, 0);
601 	ASSERT_NE(ptr2, MAP_FAILED);
602 	vec[2].iov_base = ptr2;
603 	vec[2].iov_len = page_size;
604 	vec[3].iov_base = &ptr2[4 * page_size];
605 	vec[3].iov_len = page_size;
606 
607 	/* 20 pages offset 79 pages into reserve region. */
608 	ptr3 = mmap_(self, variant, &ptr_region[79 * page_size], 20 * page_size,
609 		    PROT_READ | PROT_WRITE, MAP_FIXED, 0);
610 	ASSERT_NE(ptr3, MAP_FAILED);
611 	vec[4].iov_base = ptr3;
612 	vec[4].iov_len = page_size;
613 	vec[5].iov_base = &ptr3[19 * page_size];
614 	vec[5].iov_len = page_size;
615 
616 	/* Free surrounding VMAs. */
617 	ASSERT_EQ(munmap(ptr_region, page_size), 0);
618 	ASSERT_EQ(munmap(&ptr_region[11 * page_size], 39 * page_size), 0);
619 	ASSERT_EQ(munmap(&ptr_region[55 * page_size], 24 * page_size), 0);
620 	ASSERT_EQ(munmap(&ptr_region[99 * page_size], page_size), 0);
621 
622 	/* Now guard in one step. */
623 	count = sys_process_madvise(PIDFD_SELF, vec, 6, MADV_GUARD_INSTALL, 0);
624 
625 	/* OK we don't have permission to do this, skip. */
626 	if (count == -1 && errno == EPERM)
627 		SKIP(return, "No process_madvise() permissions, try running as root.\n");
628 
629 	/* Returns the number of bytes advised. */
630 	ASSERT_EQ(count, 6 * page_size);
631 
632 	/* Now make sure the guarding was applied. */
633 
634 	ASSERT_FALSE(try_read_write_buf(ptr1));
635 	ASSERT_FALSE(try_read_write_buf(&ptr1[9 * page_size]));
636 
637 	ASSERT_FALSE(try_read_write_buf(ptr2));
638 	ASSERT_FALSE(try_read_write_buf(&ptr2[4 * page_size]));
639 
640 	ASSERT_FALSE(try_read_write_buf(ptr3));
641 	ASSERT_FALSE(try_read_write_buf(&ptr3[19 * page_size]));
642 
643 	/* Now do the same with unguard... */
644 	count = sys_process_madvise(PIDFD_SELF, vec, 6, MADV_GUARD_REMOVE, 0);
645 
646 	/* ...and everything should now succeed. */
647 
648 	ASSERT_TRUE(try_read_write_buf(ptr1));
649 	ASSERT_TRUE(try_read_write_buf(&ptr1[9 * page_size]));
650 
651 	ASSERT_TRUE(try_read_write_buf(ptr2));
652 	ASSERT_TRUE(try_read_write_buf(&ptr2[4 * page_size]));
653 
654 	ASSERT_TRUE(try_read_write_buf(ptr3));
655 	ASSERT_TRUE(try_read_write_buf(&ptr3[19 * page_size]));
656 
657 	/* Cleanup. */
658 	ASSERT_EQ(munmap(ptr1, 10 * page_size), 0);
659 	ASSERT_EQ(munmap(ptr2, 5 * page_size), 0);
660 	ASSERT_EQ(munmap(ptr3, 20 * page_size), 0);
661 }
662 
663 /* Assert that unmapping ranges does not leave guard markers behind. */
TEST_F(guard_regions,munmap)664 TEST_F(guard_regions, munmap)
665 {
666 	const unsigned long page_size = self->page_size;
667 	char *ptr, *ptr_new1, *ptr_new2;
668 
669 	ptr = mmap_(self, variant, NULL, 10 * page_size,
670 		    PROT_READ | PROT_WRITE, 0, 0);
671 	ASSERT_NE(ptr, MAP_FAILED);
672 
673 	/* Guard first and last pages. */
674 	ASSERT_EQ(madvise(ptr, page_size, MADV_GUARD_INSTALL), 0);
675 	ASSERT_EQ(madvise(&ptr[9 * page_size], page_size, MADV_GUARD_INSTALL), 0);
676 
677 	/* Assert that they are guarded. */
678 	ASSERT_FALSE(try_read_write_buf(ptr));
679 	ASSERT_FALSE(try_read_write_buf(&ptr[9 * page_size]));
680 
681 	/* Unmap them. */
682 	ASSERT_EQ(munmap(ptr, page_size), 0);
683 	ASSERT_EQ(munmap(&ptr[9 * page_size], page_size), 0);
684 
685 	/* Map over them.*/
686 	ptr_new1 = mmap_(self, variant, ptr, page_size, PROT_READ | PROT_WRITE,
687 			 MAP_FIXED, 0);
688 	ASSERT_NE(ptr_new1, MAP_FAILED);
689 	ptr_new2 = mmap_(self, variant, &ptr[9 * page_size], page_size,
690 			 PROT_READ | PROT_WRITE, MAP_FIXED, 0);
691 	ASSERT_NE(ptr_new2, MAP_FAILED);
692 
693 	/* Assert that they are now not guarded. */
694 	ASSERT_TRUE(try_read_write_buf(ptr_new1));
695 	ASSERT_TRUE(try_read_write_buf(ptr_new2));
696 
697 	/* Cleanup. */
698 	ASSERT_EQ(munmap(ptr, 10 * page_size), 0);
699 }
700 
701 /* Assert that mprotect() operations have no bearing on guard markers. */
TEST_F(guard_regions,mprotect)702 TEST_F(guard_regions, mprotect)
703 {
704 	const unsigned long page_size = self->page_size;
705 	char *ptr;
706 	int i;
707 
708 	ptr = mmap_(self, variant, NULL, 10 * page_size,
709 		    PROT_READ | PROT_WRITE, 0, 0);
710 	ASSERT_NE(ptr, MAP_FAILED);
711 
712 	/* Guard the middle of the range. */
713 	ASSERT_EQ(madvise(&ptr[5 * page_size], 2 * page_size,
714 			  MADV_GUARD_INSTALL), 0);
715 
716 	/* Assert that it is indeed guarded. */
717 	ASSERT_FALSE(try_read_write_buf(&ptr[5 * page_size]));
718 	ASSERT_FALSE(try_read_write_buf(&ptr[6 * page_size]));
719 
720 	/* Now make these pages read-only. */
721 	ASSERT_EQ(mprotect(&ptr[5 * page_size], 2 * page_size, PROT_READ), 0);
722 
723 	/* Make sure the range is still guarded. */
724 	ASSERT_FALSE(try_read_buf(&ptr[5 * page_size]));
725 	ASSERT_FALSE(try_read_buf(&ptr[6 * page_size]));
726 
727 	/* Make sure we can guard again without issue.*/
728 	ASSERT_EQ(madvise(&ptr[5 * page_size], 2 * page_size,
729 			  MADV_GUARD_INSTALL), 0);
730 
731 	/* Make sure the range is, yet again, still guarded. */
732 	ASSERT_FALSE(try_read_buf(&ptr[5 * page_size]));
733 	ASSERT_FALSE(try_read_buf(&ptr[6 * page_size]));
734 
735 	/* Now unguard the whole range. */
736 	ASSERT_EQ(madvise(ptr, 10 * page_size, MADV_GUARD_REMOVE), 0);
737 
738 	/* Make sure the whole range is readable. */
739 	for (i = 0; i < 10; i++) {
740 		char *curr = &ptr[i * page_size];
741 
742 		ASSERT_TRUE(try_read_buf(curr));
743 	}
744 
745 	/* Cleanup. */
746 	ASSERT_EQ(munmap(ptr, 10 * page_size), 0);
747 }
748 
749 /* Split and merge VMAs and make sure guard pages still behave. */
TEST_F(guard_regions,split_merge)750 TEST_F(guard_regions, split_merge)
751 {
752 	const unsigned long page_size = self->page_size;
753 	char *ptr, *ptr_new;
754 	int i;
755 
756 	ptr = mmap_(self, variant, NULL, 10 * page_size,
757 		    PROT_READ | PROT_WRITE, 0, 0);
758 	ASSERT_NE(ptr, MAP_FAILED);
759 
760 	/* Guard the whole range. */
761 	ASSERT_EQ(madvise(ptr, 10 * page_size, MADV_GUARD_INSTALL), 0);
762 
763 	/* Make sure the whole range is guarded. */
764 	for (i = 0; i < 10; i++) {
765 		char *curr = &ptr[i * page_size];
766 
767 		ASSERT_FALSE(try_read_write_buf(curr));
768 	}
769 
770 	/* Now unmap some pages in the range so we split. */
771 	ASSERT_EQ(munmap(&ptr[2 * page_size], page_size), 0);
772 	ASSERT_EQ(munmap(&ptr[5 * page_size], page_size), 0);
773 	ASSERT_EQ(munmap(&ptr[8 * page_size], page_size), 0);
774 
775 	/* Make sure the remaining ranges are guarded post-split. */
776 	for (i = 0; i < 2; i++) {
777 		char *curr = &ptr[i * page_size];
778 
779 		ASSERT_FALSE(try_read_write_buf(curr));
780 	}
781 	for (i = 2; i < 5; i++) {
782 		char *curr = &ptr[i * page_size];
783 
784 		ASSERT_FALSE(try_read_write_buf(curr));
785 	}
786 	for (i = 6; i < 8; i++) {
787 		char *curr = &ptr[i * page_size];
788 
789 		ASSERT_FALSE(try_read_write_buf(curr));
790 	}
791 	for (i = 9; i < 10; i++) {
792 		char *curr = &ptr[i * page_size];
793 
794 		ASSERT_FALSE(try_read_write_buf(curr));
795 	}
796 
797 	/* Now map them again - the unmap will have cleared the guards. */
798 	ptr_new = mmap_(self, variant, &ptr[2 * page_size], page_size,
799 			PROT_READ | PROT_WRITE, MAP_FIXED, 0);
800 	ASSERT_NE(ptr_new, MAP_FAILED);
801 	ptr_new = mmap_(self, variant, &ptr[5 * page_size], page_size,
802 			PROT_READ | PROT_WRITE, MAP_FIXED, 0);
803 	ASSERT_NE(ptr_new, MAP_FAILED);
804 	ptr_new = mmap_(self, variant, &ptr[8 * page_size], page_size,
805 			PROT_READ | PROT_WRITE, MAP_FIXED, 0);
806 	ASSERT_NE(ptr_new, MAP_FAILED);
807 
808 	/* Now make sure guard pages are established. */
809 	for (i = 0; i < 10; i++) {
810 		char *curr = &ptr[i * page_size];
811 		bool result = try_read_write_buf(curr);
812 		bool expect_true = i == 2 || i == 5 || i == 8;
813 
814 		ASSERT_TRUE(expect_true ? result : !result);
815 	}
816 
817 	/* Now guard everything again. */
818 	ASSERT_EQ(madvise(ptr, 10 * page_size, MADV_GUARD_INSTALL), 0);
819 
820 	/* Make sure the whole range is guarded. */
821 	for (i = 0; i < 10; i++) {
822 		char *curr = &ptr[i * page_size];
823 
824 		ASSERT_FALSE(try_read_write_buf(curr));
825 	}
826 
827 	/* Now split the range into three. */
828 	ASSERT_EQ(mprotect(ptr, 3 * page_size, PROT_READ), 0);
829 	ASSERT_EQ(mprotect(&ptr[7 * page_size], 3 * page_size, PROT_READ), 0);
830 
831 	/* Make sure the whole range is guarded for read. */
832 	for (i = 0; i < 10; i++) {
833 		char *curr = &ptr[i * page_size];
834 
835 		ASSERT_FALSE(try_read_buf(curr));
836 	}
837 
838 	/* Now reset protection bits so we merge the whole thing. */
839 	ASSERT_EQ(mprotect(ptr, 3 * page_size, PROT_READ | PROT_WRITE), 0);
840 	ASSERT_EQ(mprotect(&ptr[7 * page_size], 3 * page_size,
841 			   PROT_READ | PROT_WRITE), 0);
842 
843 	/* Make sure the whole range is still guarded. */
844 	for (i = 0; i < 10; i++) {
845 		char *curr = &ptr[i * page_size];
846 
847 		ASSERT_FALSE(try_read_write_buf(curr));
848 	}
849 
850 	/* Split range into 3 again... */
851 	ASSERT_EQ(mprotect(ptr, 3 * page_size, PROT_READ), 0);
852 	ASSERT_EQ(mprotect(&ptr[7 * page_size], 3 * page_size, PROT_READ), 0);
853 
854 	/* ...and unguard the whole range. */
855 	ASSERT_EQ(madvise(ptr, 10 * page_size, MADV_GUARD_REMOVE), 0);
856 
857 	/* Make sure the whole range is remedied for read. */
858 	for (i = 0; i < 10; i++) {
859 		char *curr = &ptr[i * page_size];
860 
861 		ASSERT_TRUE(try_read_buf(curr));
862 	}
863 
864 	/* Merge them again. */
865 	ASSERT_EQ(mprotect(ptr, 3 * page_size, PROT_READ | PROT_WRITE), 0);
866 	ASSERT_EQ(mprotect(&ptr[7 * page_size], 3 * page_size,
867 			   PROT_READ | PROT_WRITE), 0);
868 
869 	/* Now ensure the merged range is remedied for read/write. */
870 	for (i = 0; i < 10; i++) {
871 		char *curr = &ptr[i * page_size];
872 
873 		ASSERT_TRUE(try_read_write_buf(curr));
874 	}
875 
876 	/* Cleanup. */
877 	ASSERT_EQ(munmap(ptr, 10 * page_size), 0);
878 }
879 
880 /* Assert that MADV_DONTNEED does not remove guard markers. */
TEST_F(guard_regions,dontneed)881 TEST_F(guard_regions, dontneed)
882 {
883 	const unsigned long page_size = self->page_size;
884 	char *ptr;
885 	int i;
886 
887 	ptr = mmap_(self, variant, NULL, 10 * page_size,
888 		    PROT_READ | PROT_WRITE, 0, 0);
889 	ASSERT_NE(ptr, MAP_FAILED);
890 
891 	/* Back the whole range. */
892 	for (i = 0; i < 10; i++) {
893 		char *curr = &ptr[i * page_size];
894 
895 		*curr = 'y';
896 	}
897 
898 	/* Guard every other page. */
899 	for (i = 0; i < 10; i += 2) {
900 		char *curr = &ptr[i * page_size];
901 		int res = madvise(curr, page_size, MADV_GUARD_INSTALL);
902 
903 		ASSERT_EQ(res, 0);
904 	}
905 
906 	/* Indicate that we don't need any of the range. */
907 	ASSERT_EQ(madvise(ptr, 10 * page_size, MADV_DONTNEED), 0);
908 
909 	/* Check to ensure guard markers are still in place. */
910 	for (i = 0; i < 10; i++) {
911 		char *curr = &ptr[i * page_size];
912 		bool result = try_read_buf(curr);
913 
914 		if (i % 2 == 0) {
915 			ASSERT_FALSE(result);
916 		} else {
917 			ASSERT_TRUE(result);
918 			switch (variant->backing) {
919 			case ANON_BACKED:
920 				/* If anon, then we get a zero page. */
921 				ASSERT_EQ(*curr, '\0');
922 				break;
923 			default:
924 				/* Otherwise, we get the file data. */
925 				ASSERT_EQ(*curr, 'y');
926 				break;
927 			}
928 		}
929 
930 		/* Now write... */
931 		result = try_write_buf(&ptr[i * page_size]);
932 
933 		/* ...and make sure same result. */
934 		ASSERT_TRUE(i % 2 != 0 ? result : !result);
935 	}
936 
937 	/* Cleanup. */
938 	ASSERT_EQ(munmap(ptr, 10 * page_size), 0);
939 }
940 
941 /* Assert that mlock()'ed pages work correctly with guard markers. */
TEST_F(guard_regions,mlock)942 TEST_F(guard_regions, mlock)
943 {
944 	const unsigned long page_size = self->page_size;
945 	char *ptr;
946 	int i;
947 
948 	ptr = mmap_(self, variant, NULL, 10 * page_size,
949 		    PROT_READ | PROT_WRITE, 0, 0);
950 	ASSERT_NE(ptr, MAP_FAILED);
951 
952 	/* Populate. */
953 	for (i = 0; i < 10; i++) {
954 		char *curr = &ptr[i * page_size];
955 
956 		*curr = 'y';
957 	}
958 
959 	/* Lock. */
960 	ASSERT_EQ(mlock(ptr, 10 * page_size), 0);
961 
962 	/* Now try to guard, should fail with EINVAL. */
963 	ASSERT_EQ(madvise(ptr, 10 * page_size, MADV_GUARD_INSTALL), -1);
964 	ASSERT_EQ(errno, EINVAL);
965 
966 	/* OK unlock. */
967 	ASSERT_EQ(munlock(ptr, 10 * page_size), 0);
968 
969 	/* Guard first half of range, should now succeed. */
970 	ASSERT_EQ(madvise(ptr, 5 * page_size, MADV_GUARD_INSTALL), 0);
971 
972 	/* Make sure guard works. */
973 	for (i = 0; i < 10; i++) {
974 		char *curr = &ptr[i * page_size];
975 		bool result = try_read_write_buf(curr);
976 
977 		if (i < 5) {
978 			ASSERT_FALSE(result);
979 		} else {
980 			ASSERT_TRUE(result);
981 			ASSERT_EQ(*curr, 'x');
982 		}
983 	}
984 
985 	/*
986 	 * Now lock the latter part of the range. We can't lock the guard pages,
987 	 * as this would result in the pages being populated and the guarding
988 	 * would cause this to error out.
989 	 */
990 	ASSERT_EQ(mlock(&ptr[5 * page_size], 5 * page_size), 0);
991 
992 	/*
993 	 * Now remove guard pages, we permit mlock()'d ranges to have guard
994 	 * pages removed as it is a non-destructive operation.
995 	 */
996 	ASSERT_EQ(madvise(ptr, 10 * page_size, MADV_GUARD_REMOVE), 0);
997 
998 	/* Now check that no guard pages remain. */
999 	for (i = 0; i < 10; i++) {
1000 		char *curr = &ptr[i * page_size];
1001 
1002 		ASSERT_TRUE(try_read_write_buf(curr));
1003 	}
1004 
1005 	/* Cleanup. */
1006 	ASSERT_EQ(munmap(ptr, 10 * page_size), 0);
1007 }
1008 
1009 /*
1010  * Assert that moving, extending and shrinking memory via mremap() retains
1011  * guard markers where possible.
1012  *
1013  * - Moving a mapping alone should retain markers as they are.
1014  */
TEST_F(guard_regions,mremap_move)1015 TEST_F(guard_regions, mremap_move)
1016 {
1017 	const unsigned long page_size = self->page_size;
1018 	char *ptr, *ptr_new;
1019 
1020 	/* Map 5 pages. */
1021 	ptr = mmap_(self, variant, NULL, 5 * page_size,
1022 		    PROT_READ | PROT_WRITE, 0, 0);
1023 	ASSERT_NE(ptr, MAP_FAILED);
1024 
1025 	/* Place guard markers at both ends of the 5 page span. */
1026 	ASSERT_EQ(madvise(ptr, page_size, MADV_GUARD_INSTALL), 0);
1027 	ASSERT_EQ(madvise(&ptr[4 * page_size], page_size, MADV_GUARD_INSTALL), 0);
1028 
1029 	/* Make sure the guard pages are in effect. */
1030 	ASSERT_FALSE(try_read_write_buf(ptr));
1031 	ASSERT_FALSE(try_read_write_buf(&ptr[4 * page_size]));
1032 
1033 	/* Map a new region we will move this range into. Doing this ensures
1034 	 * that we have reserved a range to map into.
1035 	 */
1036 	ptr_new = mmap_(self, variant, NULL, 5 * page_size, PROT_NONE, 0, 0);
1037 	ASSERT_NE(ptr_new, MAP_FAILED);
1038 
1039 	ASSERT_EQ(mremap(ptr, 5 * page_size, 5 * page_size,
1040 			 MREMAP_MAYMOVE | MREMAP_FIXED, ptr_new), ptr_new);
1041 
1042 	/* Make sure the guard markers are retained. */
1043 	ASSERT_FALSE(try_read_write_buf(ptr_new));
1044 	ASSERT_FALSE(try_read_write_buf(&ptr_new[4 * page_size]));
1045 
1046 	/*
1047 	 * Clean up - we only need reference the new pointer as we overwrote the
1048 	 * PROT_NONE range and moved the existing one.
1049 	 */
1050 	munmap(ptr_new, 5 * page_size);
1051 }
1052 
1053 /*
1054  * Assert that moving, extending and shrinking memory via mremap() retains
1055  * guard markers where possible.
1056  *
1057  * Expanding should retain guard pages, only now in different position. The user
1058  * will have to remove guard pages manually to fix up (they'd have to do the
1059  * same if it were a PROT_NONE mapping).
1060  */
TEST_F(guard_regions,mremap_expand)1061 TEST_F(guard_regions, mremap_expand)
1062 {
1063 	const unsigned long page_size = self->page_size;
1064 	char *ptr, *ptr_new;
1065 
1066 	/* Map 10 pages... */
1067 	ptr = mmap_(self, variant, NULL, 10 * page_size,
1068 		    PROT_READ | PROT_WRITE, 0, 0);
1069 	ASSERT_NE(ptr, MAP_FAILED);
1070 	/* ...But unmap the last 5 so we can ensure we can expand into them. */
1071 	ASSERT_EQ(munmap(&ptr[5 * page_size], 5 * page_size), 0);
1072 
1073 	/* Place guard markers at both ends of the 5 page span. */
1074 	ASSERT_EQ(madvise(ptr, page_size, MADV_GUARD_INSTALL), 0);
1075 	ASSERT_EQ(madvise(&ptr[4 * page_size], page_size, MADV_GUARD_INSTALL), 0);
1076 
1077 	/* Make sure the guarding is in effect. */
1078 	ASSERT_FALSE(try_read_write_buf(ptr));
1079 	ASSERT_FALSE(try_read_write_buf(&ptr[4 * page_size]));
1080 
1081 	/* Now expand to 10 pages. */
1082 	ptr = mremap(ptr, 5 * page_size, 10 * page_size, 0);
1083 	ASSERT_NE(ptr, MAP_FAILED);
1084 
1085 	/*
1086 	 * Make sure the guard markers are retained in their original positions.
1087 	 */
1088 	ASSERT_FALSE(try_read_write_buf(ptr));
1089 	ASSERT_FALSE(try_read_write_buf(&ptr[4 * page_size]));
1090 
1091 	/* Reserve a region which we can move to and expand into. */
1092 	ptr_new = mmap_(self, variant, NULL, 20 * page_size, PROT_NONE, 0, 0);
1093 	ASSERT_NE(ptr_new, MAP_FAILED);
1094 
1095 	/* Now move and expand into it. */
1096 	ptr = mremap(ptr, 10 * page_size, 20 * page_size,
1097 		     MREMAP_MAYMOVE | MREMAP_FIXED, ptr_new);
1098 	ASSERT_EQ(ptr, ptr_new);
1099 
1100 	/*
1101 	 * Again, make sure the guard markers are retained in their original positions.
1102 	 */
1103 	ASSERT_FALSE(try_read_write_buf(ptr));
1104 	ASSERT_FALSE(try_read_write_buf(&ptr[4 * page_size]));
1105 
1106 	/*
1107 	 * A real user would have to remove guard markers, but would reasonably
1108 	 * expect all characteristics of the mapping to be retained, including
1109 	 * guard markers.
1110 	 */
1111 
1112 	/* Cleanup. */
1113 	munmap(ptr, 20 * page_size);
1114 }
1115 /*
1116  * Assert that moving, extending and shrinking memory via mremap() retains
1117  * guard markers where possible.
1118  *
1119  * Shrinking will result in markers that are shrunk over being removed. Again,
1120  * if the user were using a PROT_NONE mapping they'd have to manually fix this
1121  * up also so this is OK.
1122  */
TEST_F(guard_regions,mremap_shrink)1123 TEST_F(guard_regions, mremap_shrink)
1124 {
1125 	const unsigned long page_size = self->page_size;
1126 	char *ptr;
1127 	int i;
1128 
1129 	/* Map 5 pages. */
1130 	ptr = mmap_(self, variant, NULL, 5 * page_size,
1131 		    PROT_READ | PROT_WRITE, 0, 0);
1132 	ASSERT_NE(ptr, MAP_FAILED);
1133 
1134 	/* Place guard markers at both ends of the 5 page span. */
1135 	ASSERT_EQ(madvise(ptr, page_size, MADV_GUARD_INSTALL), 0);
1136 	ASSERT_EQ(madvise(&ptr[4 * page_size], page_size, MADV_GUARD_INSTALL), 0);
1137 
1138 	/* Make sure the guarding is in effect. */
1139 	ASSERT_FALSE(try_read_write_buf(ptr));
1140 	ASSERT_FALSE(try_read_write_buf(&ptr[4 * page_size]));
1141 
1142 	/* Now shrink to 3 pages. */
1143 	ptr = mremap(ptr, 5 * page_size, 3 * page_size, MREMAP_MAYMOVE);
1144 	ASSERT_NE(ptr, MAP_FAILED);
1145 
1146 	/* We expect the guard marker at the start to be retained... */
1147 	ASSERT_FALSE(try_read_write_buf(ptr));
1148 
1149 	/* ...But remaining pages will not have guard markers. */
1150 	for (i = 1; i < 3; i++) {
1151 		char *curr = &ptr[i * page_size];
1152 
1153 		ASSERT_TRUE(try_read_write_buf(curr));
1154 	}
1155 
1156 	/*
1157 	 * As with expansion, a real user would have to remove guard pages and
1158 	 * fixup. But you'd have to do similar manual things with PROT_NONE
1159 	 * mappings too.
1160 	 */
1161 
1162 	/*
1163 	 * If we expand back to the original size, the end marker will, of
1164 	 * course, no longer be present.
1165 	 */
1166 	ptr = mremap(ptr, 3 * page_size, 5 * page_size, 0);
1167 	ASSERT_NE(ptr, MAP_FAILED);
1168 
1169 	/* Again, we expect the guard marker at the start to be retained... */
1170 	ASSERT_FALSE(try_read_write_buf(ptr));
1171 
1172 	/* ...But remaining pages will not have guard markers. */
1173 	for (i = 1; i < 5; i++) {
1174 		char *curr = &ptr[i * page_size];
1175 
1176 		ASSERT_TRUE(try_read_write_buf(curr));
1177 	}
1178 
1179 	/* Cleanup. */
1180 	munmap(ptr, 5 * page_size);
1181 }
1182 
1183 /*
1184  * Assert that forking a process with VMAs that do not have VM_WIPEONFORK set
1185  * retain guard pages.
1186  */
TEST_F(guard_regions,fork)1187 TEST_F(guard_regions, fork)
1188 {
1189 	const unsigned long page_size = self->page_size;
1190 	char *ptr;
1191 	pid_t pid;
1192 	int i;
1193 
1194 	/* Map 10 pages. */
1195 	ptr = mmap_(self, variant, NULL, 10 * page_size,
1196 		    PROT_READ | PROT_WRITE, 0, 0);
1197 	ASSERT_NE(ptr, MAP_FAILED);
1198 
1199 	/* Establish guard pages in the first 5 pages. */
1200 	ASSERT_EQ(madvise(ptr, 5 * page_size, MADV_GUARD_INSTALL), 0);
1201 
1202 	pid = fork();
1203 	ASSERT_NE(pid, -1);
1204 	if (!pid) {
1205 		/* This is the child process now. */
1206 
1207 		/* Assert that the guarding is in effect. */
1208 		for (i = 0; i < 10; i++) {
1209 			char *curr = &ptr[i * page_size];
1210 			bool result = try_read_write_buf(curr);
1211 
1212 			ASSERT_TRUE(i >= 5 ? result : !result);
1213 		}
1214 
1215 		/* Now unguard the range.*/
1216 		ASSERT_EQ(madvise(ptr, 10 * page_size, MADV_GUARD_REMOVE), 0);
1217 
1218 		exit(0);
1219 	}
1220 
1221 	/* Parent process. */
1222 
1223 	/* Parent simply waits on child. */
1224 	waitpid(pid, NULL, 0);
1225 
1226 	/* Child unguard does not impact parent page table state. */
1227 	for (i = 0; i < 10; i++) {
1228 		char *curr = &ptr[i * page_size];
1229 		bool result = try_read_write_buf(curr);
1230 
1231 		ASSERT_TRUE(i >= 5 ? result : !result);
1232 	}
1233 
1234 	/* Cleanup. */
1235 	ASSERT_EQ(munmap(ptr, 10 * page_size), 0);
1236 }
1237 
1238 /*
1239  * Assert expected behaviour after we fork populated ranges of anonymous memory
1240  * and then guard and unguard the range.
1241  */
TEST_F(guard_regions,fork_cow)1242 TEST_F(guard_regions, fork_cow)
1243 {
1244 	const unsigned long page_size = self->page_size;
1245 	char *ptr;
1246 	pid_t pid;
1247 	int i;
1248 
1249 	if (variant->backing != ANON_BACKED)
1250 		SKIP(return, "CoW only supported on anon mappings");
1251 
1252 	/* Map 10 pages. */
1253 	ptr = mmap_(self, variant, NULL, 10 * page_size,
1254 		    PROT_READ | PROT_WRITE, 0, 0);
1255 	ASSERT_NE(ptr, MAP_FAILED);
1256 
1257 	/* Populate range. */
1258 	for (i = 0; i < 10 * page_size; i++) {
1259 		char chr = 'a' + (i % 26);
1260 
1261 		ptr[i] = chr;
1262 	}
1263 
1264 	pid = fork();
1265 	ASSERT_NE(pid, -1);
1266 	if (!pid) {
1267 		/* This is the child process now. */
1268 
1269 		/* Ensure the range is as expected. */
1270 		for (i = 0; i < 10 * page_size; i++) {
1271 			char expected = 'a' + (i % 26);
1272 			char actual = ptr[i];
1273 
1274 			ASSERT_EQ(actual, expected);
1275 		}
1276 
1277 		/* Establish guard pages across the whole range. */
1278 		ASSERT_EQ(madvise(ptr, 10 * page_size, MADV_GUARD_INSTALL), 0);
1279 		/* Remove it. */
1280 		ASSERT_EQ(madvise(ptr, 10 * page_size, MADV_GUARD_REMOVE), 0);
1281 
1282 		/*
1283 		 * By removing the guard pages, the page tables will be
1284 		 * cleared. Assert that we are looking at the zero page now.
1285 		 */
1286 		for (i = 0; i < 10 * page_size; i++) {
1287 			char actual = ptr[i];
1288 
1289 			ASSERT_EQ(actual, '\0');
1290 		}
1291 
1292 		exit(0);
1293 	}
1294 
1295 	/* Parent process. */
1296 
1297 	/* Parent simply waits on child. */
1298 	waitpid(pid, NULL, 0);
1299 
1300 	/* Ensure the range is unchanged in parent anon range. */
1301 	for (i = 0; i < 10 * page_size; i++) {
1302 		char expected = 'a' + (i % 26);
1303 		char actual = ptr[i];
1304 
1305 		ASSERT_EQ(actual, expected);
1306 	}
1307 
1308 	/* Cleanup. */
1309 	ASSERT_EQ(munmap(ptr, 10 * page_size), 0);
1310 }
1311 
1312 /*
1313  * Assert that forking a process with VMAs that do have VM_WIPEONFORK set
1314  * behave as expected.
1315  */
TEST_F(guard_regions,fork_wipeonfork)1316 TEST_F(guard_regions, fork_wipeonfork)
1317 {
1318 	const unsigned long page_size = self->page_size;
1319 	char *ptr;
1320 	pid_t pid;
1321 	int i;
1322 
1323 	if (variant->backing != ANON_BACKED)
1324 		SKIP(return, "Wipe on fork only supported on anon mappings");
1325 
1326 	/* Map 10 pages. */
1327 	ptr = mmap_(self, variant, NULL, 10 * page_size,
1328 		    PROT_READ | PROT_WRITE, 0, 0);
1329 	ASSERT_NE(ptr, MAP_FAILED);
1330 
1331 	/* Mark wipe on fork. */
1332 	ASSERT_EQ(madvise(ptr, 10 * page_size, MADV_WIPEONFORK), 0);
1333 
1334 	/* Guard the first 5 pages. */
1335 	ASSERT_EQ(madvise(ptr, 5 * page_size, MADV_GUARD_INSTALL), 0);
1336 
1337 	pid = fork();
1338 	ASSERT_NE(pid, -1);
1339 	if (!pid) {
1340 		/* This is the child process now. */
1341 
1342 		/* Guard will have been wiped. */
1343 		for (i = 0; i < 10; i++) {
1344 			char *curr = &ptr[i * page_size];
1345 
1346 			ASSERT_TRUE(try_read_write_buf(curr));
1347 		}
1348 
1349 		exit(0);
1350 	}
1351 
1352 	/* Parent process. */
1353 
1354 	waitpid(pid, NULL, 0);
1355 
1356 	/* Guard markers should be in effect.*/
1357 	for (i = 0; i < 10; i++) {
1358 		char *curr = &ptr[i * page_size];
1359 		bool result = try_read_write_buf(curr);
1360 
1361 		ASSERT_TRUE(i >= 5 ? result : !result);
1362 	}
1363 
1364 	/* Cleanup. */
1365 	ASSERT_EQ(munmap(ptr, 10 * page_size), 0);
1366 }
1367 
1368 /* Ensure that MADV_FREE retains guard entries as expected. */
TEST_F(guard_regions,lazyfree)1369 TEST_F(guard_regions, lazyfree)
1370 {
1371 	const unsigned long page_size = self->page_size;
1372 	char *ptr;
1373 	int i;
1374 
1375 	if (variant->backing != ANON_BACKED)
1376 		SKIP(return, "MADV_FREE only supported on anon mappings");
1377 
1378 	/* Map 10 pages. */
1379 	ptr = mmap_(self, variant, NULL, 10 * page_size,
1380 		    PROT_READ | PROT_WRITE, 0, 0);
1381 	ASSERT_NE(ptr, MAP_FAILED);
1382 
1383 	/* Guard range. */
1384 	ASSERT_EQ(madvise(ptr, 10 * page_size, MADV_GUARD_INSTALL), 0);
1385 
1386 	/* Ensure guarded. */
1387 	for (i = 0; i < 10; i++) {
1388 		char *curr = &ptr[i * page_size];
1389 
1390 		ASSERT_FALSE(try_read_write_buf(curr));
1391 	}
1392 
1393 	/* Lazyfree range. */
1394 	ASSERT_EQ(madvise(ptr, 10 * page_size, MADV_FREE), 0);
1395 
1396 	/* This should leave the guard markers in place. */
1397 	for (i = 0; i < 10; i++) {
1398 		char *curr = &ptr[i * page_size];
1399 
1400 		ASSERT_FALSE(try_read_write_buf(curr));
1401 	}
1402 
1403 	/* Cleanup. */
1404 	ASSERT_EQ(munmap(ptr, 10 * page_size), 0);
1405 }
1406 
1407 /* Ensure that MADV_POPULATE_READ, MADV_POPULATE_WRITE behave as expected. */
TEST_F(guard_regions,populate)1408 TEST_F(guard_regions, populate)
1409 {
1410 	const unsigned long page_size = self->page_size;
1411 	char *ptr;
1412 
1413 	/* Map 10 pages. */
1414 	ptr = mmap_(self, variant, NULL, 10 * page_size,
1415 		    PROT_READ | PROT_WRITE, 0, 0);
1416 	ASSERT_NE(ptr, MAP_FAILED);
1417 
1418 	/* Guard range. */
1419 	ASSERT_EQ(madvise(ptr, 10 * page_size, MADV_GUARD_INSTALL), 0);
1420 
1421 	/* Populate read should error out... */
1422 	ASSERT_EQ(madvise(ptr, 10 * page_size, MADV_POPULATE_READ), -1);
1423 	ASSERT_EQ(errno, EFAULT);
1424 
1425 	/* ...as should populate write. */
1426 	ASSERT_EQ(madvise(ptr, 10 * page_size, MADV_POPULATE_WRITE), -1);
1427 	ASSERT_EQ(errno, EFAULT);
1428 
1429 	/* Cleanup. */
1430 	ASSERT_EQ(munmap(ptr, 10 * page_size), 0);
1431 }
1432 
1433 /* Ensure that MADV_COLD, MADV_PAGEOUT do not remove guard markers. */
TEST_F(guard_regions,cold_pageout)1434 TEST_F(guard_regions, cold_pageout)
1435 {
1436 	const unsigned long page_size = self->page_size;
1437 	char *ptr;
1438 	int i;
1439 
1440 	/* Map 10 pages. */
1441 	ptr = mmap_(self, variant, NULL, 10 * page_size,
1442 		    PROT_READ | PROT_WRITE, 0, 0);
1443 	ASSERT_NE(ptr, MAP_FAILED);
1444 
1445 	/* Guard range. */
1446 	ASSERT_EQ(madvise(ptr, 10 * page_size, MADV_GUARD_INSTALL), 0);
1447 
1448 	/* Ensured guarded. */
1449 	for (i = 0; i < 10; i++) {
1450 		char *curr = &ptr[i * page_size];
1451 
1452 		ASSERT_FALSE(try_read_write_buf(curr));
1453 	}
1454 
1455 	/* Now mark cold. This should have no impact on guard markers. */
1456 	ASSERT_EQ(madvise(ptr, 10 * page_size, MADV_COLD), 0);
1457 
1458 	/* Should remain guarded. */
1459 	for (i = 0; i < 10; i++) {
1460 		char *curr = &ptr[i * page_size];
1461 
1462 		ASSERT_FALSE(try_read_write_buf(curr));
1463 	}
1464 
1465 	/* OK, now page out. This should equally, have no effect on markers. */
1466 	ASSERT_EQ(madvise(ptr, 10 * page_size, MADV_PAGEOUT), 0);
1467 
1468 	/* Should remain guarded. */
1469 	for (i = 0; i < 10; i++) {
1470 		char *curr = &ptr[i * page_size];
1471 
1472 		ASSERT_FALSE(try_read_write_buf(curr));
1473 	}
1474 
1475 	/* Cleanup. */
1476 	ASSERT_EQ(munmap(ptr, 10 * page_size), 0);
1477 }
1478 
1479 /* Ensure that guard pages do not break userfaultd. */
TEST_F(guard_regions,uffd)1480 TEST_F(guard_regions, uffd)
1481 {
1482 	const unsigned long page_size = self->page_size;
1483 	int uffd;
1484 	char *ptr;
1485 	int i;
1486 	struct uffdio_api api = {
1487 		.api = UFFD_API,
1488 		.features = 0,
1489 	};
1490 	struct uffdio_register reg;
1491 	struct uffdio_range range;
1492 
1493 	if (!is_anon_backed(variant))
1494 		SKIP(return, "uffd only works on anon backing");
1495 
1496 	/* Set up uffd. */
1497 	uffd = userfaultfd(0);
1498 	if (uffd == -1) {
1499 		switch (errno) {
1500 		case EPERM:
1501 			SKIP(return, "No userfaultfd permissions, try running as root.");
1502 			break;
1503 		case ENOSYS:
1504 			SKIP(return, "userfaultfd is not supported/not enabled.");
1505 			break;
1506 		default:
1507 			ksft_exit_fail_msg("userfaultfd failed with %s\n",
1508 					   strerror(errno));
1509 			break;
1510 		}
1511 	}
1512 
1513 	ASSERT_NE(uffd, -1);
1514 
1515 	ASSERT_EQ(ioctl(uffd, UFFDIO_API, &api), 0);
1516 
1517 	/* Map 10 pages. */
1518 	ptr = mmap_(self, variant, NULL, 10 * page_size,
1519 		    PROT_READ | PROT_WRITE, 0, 0);
1520 	ASSERT_NE(ptr, MAP_FAILED);
1521 
1522 	/* Register the range with uffd. */
1523 	range.start = (unsigned long)ptr;
1524 	range.len = 10 * page_size;
1525 	reg.range = range;
1526 	reg.mode = UFFDIO_REGISTER_MODE_MISSING;
1527 	ASSERT_EQ(ioctl(uffd, UFFDIO_REGISTER, &reg), 0);
1528 
1529 	/* Guard the range. This should not trigger the uffd. */
1530 	ASSERT_EQ(madvise(ptr, 10 * page_size, MADV_GUARD_INSTALL), 0);
1531 
1532 	/* The guarding should behave as usual with no uffd intervention. */
1533 	for (i = 0; i < 10; i++) {
1534 		char *curr = &ptr[i * page_size];
1535 
1536 		ASSERT_FALSE(try_read_write_buf(curr));
1537 	}
1538 
1539 	/* Cleanup. */
1540 	ASSERT_EQ(ioctl(uffd, UFFDIO_UNREGISTER, &range), 0);
1541 	close(uffd);
1542 	ASSERT_EQ(munmap(ptr, 10 * page_size), 0);
1543 }
1544 
1545 /*
1546  * Mark a region within a file-backed mapping using MADV_SEQUENTIAL so we
1547  * aggressively read-ahead, then install guard regions and assert that it
1548  * behaves correctly.
1549  *
1550  * We page out using MADV_PAGEOUT before checking guard regions so we drop page
1551  * cache folios, meaning we maximise the possibility of some broken readahead.
1552  */
TEST_F(guard_regions,madvise_sequential)1553 TEST_F(guard_regions, madvise_sequential)
1554 {
1555 	char *ptr;
1556 	int i;
1557 	const unsigned long page_size = self->page_size;
1558 
1559 	if (variant->backing == ANON_BACKED)
1560 		SKIP(return, "MADV_SEQUENTIAL meaningful only for file-backed");
1561 
1562 	ptr = mmap_(self, variant, NULL, 10 * page_size,
1563 		    PROT_READ | PROT_WRITE, 0, 0);
1564 	ASSERT_NE(ptr, MAP_FAILED);
1565 
1566 	/* Establish a pattern of data in the file. */
1567 	set_pattern(ptr, 10, page_size);
1568 	ASSERT_TRUE(check_pattern(ptr, 10, page_size));
1569 
1570 	/* Mark it as being accessed sequentially. */
1571 	ASSERT_EQ(madvise(ptr, 10 * page_size, MADV_SEQUENTIAL), 0);
1572 
1573 	/* Mark every other page a guard page. */
1574 	for (i = 0; i < 10; i += 2) {
1575 		char *ptr2 = &ptr[i * page_size];
1576 
1577 		ASSERT_EQ(madvise(ptr2, page_size, MADV_GUARD_INSTALL), 0);
1578 	}
1579 
1580 	/* Now page it out. */
1581 	ASSERT_EQ(madvise(ptr, 10 * page_size, MADV_PAGEOUT), 0);
1582 
1583 	/* Now make sure pages are as expected. */
1584 	for (i = 0; i < 10; i++) {
1585 		char *chrp = &ptr[i * page_size];
1586 
1587 		if (i % 2 == 0) {
1588 			bool result = try_read_write_buf(chrp);
1589 
1590 			ASSERT_FALSE(result);
1591 		} else {
1592 			ASSERT_EQ(*chrp, 'a' + i);
1593 		}
1594 	}
1595 
1596 	/* Now remove guard pages. */
1597 	ASSERT_EQ(madvise(ptr, 10 * page_size, MADV_GUARD_REMOVE), 0);
1598 
1599 	/* Now make sure all data is as expected. */
1600 	if (!check_pattern(ptr, 10, page_size))
1601 		ASSERT_TRUE(false);
1602 
1603 	ASSERT_EQ(munmap(ptr, 10 * page_size), 0);
1604 }
1605 
1606 /*
1607  * Check that file-backed mappings implement guard regions with MAP_PRIVATE
1608  * correctly.
1609  */
TEST_F(guard_regions,map_private)1610 TEST_F(guard_regions, map_private)
1611 {
1612 	const unsigned long page_size = self->page_size;
1613 	char *ptr_shared, *ptr_private;
1614 	int i;
1615 
1616 	if (variant->backing == ANON_BACKED)
1617 		SKIP(return, "MAP_PRIVATE test specific to file-backed");
1618 
1619 	ptr_shared = mmap_(self, variant, NULL, 10 * page_size, PROT_READ | PROT_WRITE, 0, 0);
1620 	ASSERT_NE(ptr_shared, MAP_FAILED);
1621 
1622 	/* Manually mmap(), do not use mmap_() wrapper so we can force MAP_PRIVATE. */
1623 	ptr_private = mmap(NULL, 10 * page_size, PROT_READ | PROT_WRITE, MAP_PRIVATE, self->fd, 0);
1624 	ASSERT_NE(ptr_private, MAP_FAILED);
1625 
1626 	/* Set pattern in shared mapping. */
1627 	set_pattern(ptr_shared, 10, page_size);
1628 
1629 	/* Install guard regions in every other page in the shared mapping. */
1630 	for (i = 0; i < 10; i += 2) {
1631 		char *ptr = &ptr_shared[i * page_size];
1632 
1633 		ASSERT_EQ(madvise(ptr, page_size, MADV_GUARD_INSTALL), 0);
1634 	}
1635 
1636 	for (i = 0; i < 10; i++) {
1637 		/* Every even shared page should be guarded. */
1638 		ASSERT_EQ(try_read_buf(&ptr_shared[i * page_size]), i % 2 != 0);
1639 		/* Private mappings should always be readable. */
1640 		ASSERT_TRUE(try_read_buf(&ptr_private[i * page_size]));
1641 	}
1642 
1643 	/* Install guard regions in every other page in the private mapping. */
1644 	for (i = 0; i < 10; i += 2) {
1645 		char *ptr = &ptr_private[i * page_size];
1646 
1647 		ASSERT_EQ(madvise(ptr, page_size, MADV_GUARD_INSTALL), 0);
1648 	}
1649 
1650 	for (i = 0; i < 10; i++) {
1651 		/* Every even shared page should be guarded. */
1652 		ASSERT_EQ(try_read_buf(&ptr_shared[i * page_size]), i % 2 != 0);
1653 		/* Every odd private page should be guarded. */
1654 		ASSERT_EQ(try_read_buf(&ptr_private[i * page_size]), i % 2 != 0);
1655 	}
1656 
1657 	/* Remove guard regions from shared mapping. */
1658 	ASSERT_EQ(madvise(ptr_shared, 10 * page_size, MADV_GUARD_REMOVE), 0);
1659 
1660 	for (i = 0; i < 10; i++) {
1661 		/* Shared mappings should always be readable. */
1662 		ASSERT_TRUE(try_read_buf(&ptr_shared[i * page_size]));
1663 		/* Every even private page should be guarded. */
1664 		ASSERT_EQ(try_read_buf(&ptr_private[i * page_size]), i % 2 != 0);
1665 	}
1666 
1667 	/* Remove guard regions from private mapping. */
1668 	ASSERT_EQ(madvise(ptr_private, 10 * page_size, MADV_GUARD_REMOVE), 0);
1669 
1670 	for (i = 0; i < 10; i++) {
1671 		/* Shared mappings should always be readable. */
1672 		ASSERT_TRUE(try_read_buf(&ptr_shared[i * page_size]));
1673 		/* Private mappings should always be readable. */
1674 		ASSERT_TRUE(try_read_buf(&ptr_private[i * page_size]));
1675 	}
1676 
1677 	/* Ensure patterns are intact. */
1678 	ASSERT_TRUE(check_pattern(ptr_shared, 10, page_size));
1679 	ASSERT_TRUE(check_pattern(ptr_private, 10, page_size));
1680 
1681 	/* Now write out every other page to MAP_PRIVATE. */
1682 	for (i = 0; i < 10; i += 2) {
1683 		char *ptr = &ptr_private[i * page_size];
1684 
1685 		memset(ptr, 'a' + i, page_size);
1686 	}
1687 
1688 	/*
1689 	 * At this point the mapping is:
1690 	 *
1691 	 * 0123456789
1692 	 * SPSPSPSPSP
1693 	 *
1694 	 * Where S = shared, P = private mappings.
1695 	 */
1696 
1697 	/* Now mark the beginning of the mapping guarded. */
1698 	ASSERT_EQ(madvise(ptr_private, 5 * page_size, MADV_GUARD_INSTALL), 0);
1699 
1700 	/*
1701 	 * This renders the mapping:
1702 	 *
1703 	 * 0123456789
1704 	 * xxxxxPSPSP
1705 	 */
1706 
1707 	for (i = 0; i < 10; i++) {
1708 		char *ptr = &ptr_private[i * page_size];
1709 
1710 		/* Ensure guard regions as expected. */
1711 		ASSERT_EQ(try_read_buf(ptr), i >= 5);
1712 		/* The shared mapping should always succeed. */
1713 		ASSERT_TRUE(try_read_buf(&ptr_shared[i * page_size]));
1714 	}
1715 
1716 	/* Remove the guard regions altogether. */
1717 	ASSERT_EQ(madvise(ptr_private, 10 * page_size, MADV_GUARD_REMOVE), 0);
1718 
1719 	/*
1720 	 *
1721 	 * We now expect the mapping to be:
1722 	 *
1723 	 * 0123456789
1724 	 * SSSSSPSPSP
1725 	 *
1726 	 * As we removed guard regions, the private pages from the first 5 will
1727 	 * have been zapped, so on fault will reestablish the shared mapping.
1728 	 */
1729 
1730 	for (i = 0; i < 10; i++) {
1731 		char *ptr = &ptr_private[i * page_size];
1732 
1733 		/*
1734 		 * Assert that shared mappings in the MAP_PRIVATE mapping match
1735 		 * the shared mapping.
1736 		 */
1737 		if (i < 5 || i % 2 == 0) {
1738 			char *ptr_s = &ptr_shared[i * page_size];
1739 
1740 			ASSERT_EQ(memcmp(ptr, ptr_s, page_size), 0);
1741 			continue;
1742 		}
1743 
1744 		/* Everything else is a private mapping. */
1745 		ASSERT_TRUE(is_buf_eq(ptr, page_size, 'a' + i));
1746 	}
1747 
1748 	ASSERT_EQ(munmap(ptr_shared, 10 * page_size), 0);
1749 	ASSERT_EQ(munmap(ptr_private, 10 * page_size), 0);
1750 }
1751 
1752 /* Test that guard regions established over a read-only mapping function correctly. */
TEST_F(guard_regions,readonly_file)1753 TEST_F(guard_regions, readonly_file)
1754 {
1755 	const unsigned long page_size = self->page_size;
1756 	char *ptr;
1757 	int i;
1758 
1759 	if (variant->backing != LOCAL_FILE_BACKED)
1760 		SKIP(return, "Read-only test specific to file-backed");
1761 
1762 	/* Map shared so we can populate with pattern, populate it, unmap. */
1763 	ptr = mmap_(self, variant, NULL, 10 * page_size,
1764 		    PROT_READ | PROT_WRITE, 0, 0);
1765 	ASSERT_NE(ptr, MAP_FAILED);
1766 	set_pattern(ptr, 10, page_size);
1767 	ASSERT_EQ(munmap(ptr, 10 * page_size), 0);
1768 	/* Close the fd so we can re-open read-only. */
1769 	ASSERT_EQ(close(self->fd), 0);
1770 
1771 	/* Re-open read-only. */
1772 	self->fd = open(self->path, O_RDONLY);
1773 	ASSERT_NE(self->fd, -1);
1774 	/* Re-map read-only. */
1775 	ptr = mmap_(self, variant, NULL, 10 * page_size, PROT_READ, 0, 0);
1776 	ASSERT_NE(ptr, MAP_FAILED);
1777 
1778 	/* Mark every other page guarded. */
1779 	for (i = 0; i < 10; i += 2) {
1780 		char *ptr_pg = &ptr[i * page_size];
1781 
1782 		ASSERT_EQ(madvise(ptr_pg, page_size, MADV_GUARD_INSTALL), 0);
1783 	}
1784 
1785 	/* Assert that the guard regions are in place.*/
1786 	for (i = 0; i < 10; i++) {
1787 		char *ptr_pg = &ptr[i * page_size];
1788 
1789 		ASSERT_EQ(try_read_buf(ptr_pg), i % 2 != 0);
1790 	}
1791 
1792 	/* Remove guard regions. */
1793 	ASSERT_EQ(madvise(ptr, 10 * page_size, MADV_GUARD_REMOVE), 0);
1794 
1795 	/* Ensure the data is as expected. */
1796 	ASSERT_TRUE(check_pattern(ptr, 10, page_size));
1797 
1798 	ASSERT_EQ(munmap(ptr, 10 * page_size), 0);
1799 }
1800 
TEST_F(guard_regions,fault_around)1801 TEST_F(guard_regions, fault_around)
1802 {
1803 	const unsigned long page_size = self->page_size;
1804 	char *ptr;
1805 	int i;
1806 
1807 	if (variant->backing == ANON_BACKED)
1808 		SKIP(return, "Fault-around test specific to file-backed");
1809 
1810 	ptr = mmap_(self, variant, NULL, 10 * page_size,
1811 		    PROT_READ | PROT_WRITE, 0, 0);
1812 	ASSERT_NE(ptr, MAP_FAILED);
1813 
1814 	/* Establish a pattern in the backing file. */
1815 	set_pattern(ptr, 10, page_size);
1816 
1817 	/*
1818 	 * Now drop it from the page cache so we get major faults when next we
1819 	 * map it.
1820 	 */
1821 	ASSERT_EQ(madvise(ptr, 10 * page_size, MADV_PAGEOUT), 0);
1822 
1823 	/* Unmap and remap 'to be sure'. */
1824 	ASSERT_EQ(munmap(ptr, 10 * page_size), 0);
1825 	ptr = mmap_(self, variant, NULL, 10 * page_size,
1826 		    PROT_READ | PROT_WRITE, 0, 0);
1827 	ASSERT_NE(ptr, MAP_FAILED);
1828 
1829 	/* Now make every even page guarded. */
1830 	for (i = 0; i < 10; i += 2) {
1831 		char *ptr_p = &ptr[i * page_size];
1832 
1833 		ASSERT_EQ(madvise(ptr_p, page_size, MADV_GUARD_INSTALL), 0);
1834 	}
1835 
1836 	/* Now fault in every odd page. This should trigger fault-around. */
1837 	for (i = 1; i < 10; i += 2) {
1838 		char *ptr_p = &ptr[i * page_size];
1839 
1840 		ASSERT_TRUE(try_read_buf(ptr_p));
1841 	}
1842 
1843 	/* Finally, ensure that guard regions are intact as expected. */
1844 	for (i = 0; i < 10; i++) {
1845 		char *ptr_p = &ptr[i * page_size];
1846 
1847 		ASSERT_EQ(try_read_buf(ptr_p), i % 2 != 0);
1848 	}
1849 
1850 	ASSERT_EQ(munmap(ptr, 10 * page_size), 0);
1851 }
1852 
TEST_F(guard_regions,truncation)1853 TEST_F(guard_regions, truncation)
1854 {
1855 	const unsigned long page_size = self->page_size;
1856 	char *ptr;
1857 	int i;
1858 
1859 	if (variant->backing == ANON_BACKED)
1860 		SKIP(return, "Truncation test specific to file-backed");
1861 
1862 	ptr = mmap_(self, variant, NULL, 10 * page_size,
1863 		    PROT_READ | PROT_WRITE, 0, 0);
1864 	ASSERT_NE(ptr, MAP_FAILED);
1865 
1866 	/*
1867 	 * Establish a pattern in the backing file, just so there is data
1868 	 * there.
1869 	 */
1870 	set_pattern(ptr, 10, page_size);
1871 
1872 	/* Now make every even page guarded. */
1873 	for (i = 0; i < 10; i += 2) {
1874 		char *ptr_p = &ptr[i * page_size];
1875 
1876 		ASSERT_EQ(madvise(ptr_p, page_size, MADV_GUARD_INSTALL), 0);
1877 	}
1878 
1879 	/* Now assert things are as expected. */
1880 	for (i = 0; i < 10; i++) {
1881 		char *ptr_p = &ptr[i * page_size];
1882 
1883 		ASSERT_EQ(try_read_write_buf(ptr_p), i % 2 != 0);
1884 	}
1885 
1886 	/* Now truncate to actually used size (initialised to 100). */
1887 	ASSERT_EQ(ftruncate(self->fd, 10 * page_size), 0);
1888 
1889 	/* Here the guard regions will remain intact. */
1890 	for (i = 0; i < 10; i++) {
1891 		char *ptr_p = &ptr[i * page_size];
1892 
1893 		ASSERT_EQ(try_read_write_buf(ptr_p), i % 2 != 0);
1894 	}
1895 
1896 	/* Now truncate to half the size, then truncate again to the full size. */
1897 	ASSERT_EQ(ftruncate(self->fd, 5 * page_size), 0);
1898 	ASSERT_EQ(ftruncate(self->fd, 10 * page_size), 0);
1899 
1900 	/* Again, guard pages will remain intact. */
1901 	for (i = 0; i < 10; i++) {
1902 		char *ptr_p = &ptr[i * page_size];
1903 
1904 		ASSERT_EQ(try_read_write_buf(ptr_p), i % 2 != 0);
1905 	}
1906 
1907 	ASSERT_EQ(munmap(ptr, 10 * page_size), 0);
1908 }
1909 
TEST_F(guard_regions,hole_punch)1910 TEST_F(guard_regions, hole_punch)
1911 {
1912 	const unsigned long page_size = self->page_size;
1913 	char *ptr;
1914 	int i;
1915 
1916 	if (variant->backing == ANON_BACKED)
1917 		SKIP(return, "Truncation test specific to file-backed");
1918 
1919 	/* Establish pattern in mapping. */
1920 	ptr = mmap_(self, variant, NULL, 10 * page_size,
1921 		    PROT_READ | PROT_WRITE, 0, 0);
1922 	ASSERT_NE(ptr, MAP_FAILED);
1923 	set_pattern(ptr, 10, page_size);
1924 
1925 	/* Install a guard region in the middle of the mapping. */
1926 	ASSERT_EQ(madvise(&ptr[3 * page_size], 4 * page_size,
1927 			  MADV_GUARD_INSTALL), 0);
1928 
1929 	/*
1930 	 * The buffer will now be:
1931 	 *
1932 	 * 0123456789
1933 	 * ***xxxx***
1934 	 *
1935 	 * Where * is data and x is the guard region.
1936 	 */
1937 
1938 	/* Ensure established. */
1939 	for (i = 0; i < 10; i++) {
1940 		char *ptr_p = &ptr[i * page_size];
1941 
1942 		ASSERT_EQ(try_read_buf(ptr_p), i < 3 || i >= 7);
1943 	}
1944 
1945 	/* Now hole punch the guarded region. */
1946 	ASSERT_EQ(madvise(&ptr[3 * page_size], 4 * page_size,
1947 			  MADV_REMOVE), 0);
1948 
1949 	/* Ensure guard regions remain. */
1950 	for (i = 0; i < 10; i++) {
1951 		char *ptr_p = &ptr[i * page_size];
1952 
1953 		ASSERT_EQ(try_read_buf(ptr_p), i < 3 || i >= 7);
1954 	}
1955 
1956 	/* Now remove guard region throughout. */
1957 	ASSERT_EQ(madvise(ptr, 10 * page_size, MADV_GUARD_REMOVE), 0);
1958 
1959 	/* Check that the pattern exists in non-hole punched region. */
1960 	ASSERT_TRUE(check_pattern(ptr, 3, page_size));
1961 	/* Check that hole punched region is zeroed. */
1962 	ASSERT_TRUE(is_buf_eq(&ptr[3 * page_size], 4 * page_size, '\0'));
1963 	/* Check that the pattern exists in the remainder of the file. */
1964 	ASSERT_TRUE(check_pattern_offset(ptr, 3, page_size, 7));
1965 
1966 	ASSERT_EQ(munmap(ptr, 10 * page_size), 0);
1967 }
1968 
1969 /*
1970  * Ensure that a memfd works correctly with guard regions, that we can write
1971  * seal it then open the mapping read-only and still establish guard regions
1972  * within, remove those guard regions and have everything work correctly.
1973  */
TEST_F(guard_regions,memfd_write_seal)1974 TEST_F(guard_regions, memfd_write_seal)
1975 {
1976 	const unsigned long page_size = self->page_size;
1977 	char *ptr;
1978 	int i;
1979 
1980 	if (variant->backing != SHMEM_BACKED)
1981 		SKIP(return, "memfd write seal test specific to shmem");
1982 
1983 	/* OK, we need a memfd, so close existing one. */
1984 	ASSERT_EQ(close(self->fd), 0);
1985 
1986 	/* Create and truncate memfd. */
1987 	self->fd = memfd_create("guard_regions_memfd_seals_test",
1988 				MFD_ALLOW_SEALING);
1989 	ASSERT_NE(self->fd, -1);
1990 	ASSERT_EQ(ftruncate(self->fd, 10 * page_size), 0);
1991 
1992 	/* Map, set pattern, unmap. */
1993 	ptr = mmap_(self, variant, NULL, 10 * page_size, PROT_READ | PROT_WRITE, 0, 0);
1994 	ASSERT_NE(ptr, MAP_FAILED);
1995 	set_pattern(ptr, 10, page_size);
1996 	ASSERT_EQ(munmap(ptr, 10 * page_size), 0);
1997 
1998 	/* Write-seal the memfd. */
1999 	ASSERT_EQ(fcntl(self->fd, F_ADD_SEALS, F_SEAL_WRITE), 0);
2000 
2001 	/* Now map the memfd readonly. */
2002 	ptr = mmap_(self, variant, NULL, 10 * page_size, PROT_READ, 0, 0);
2003 	ASSERT_NE(ptr, MAP_FAILED);
2004 
2005 	/* Ensure pattern is as expected. */
2006 	ASSERT_TRUE(check_pattern(ptr, 10, page_size));
2007 
2008 	/* Now make every even page guarded. */
2009 	for (i = 0; i < 10; i += 2) {
2010 		char *ptr_p = &ptr[i * page_size];
2011 
2012 		ASSERT_EQ(madvise(ptr_p, page_size, MADV_GUARD_INSTALL), 0);
2013 	}
2014 
2015 	/* Now assert things are as expected. */
2016 	for (i = 0; i < 10; i++) {
2017 		char *ptr_p = &ptr[i * page_size];
2018 
2019 		ASSERT_EQ(try_read_buf(ptr_p), i % 2 != 0);
2020 	}
2021 
2022 	/* Now remove guard regions. */
2023 	ASSERT_EQ(madvise(ptr, 10 * page_size, MADV_GUARD_REMOVE), 0);
2024 
2025 	/* Ensure pattern is as expected. */
2026 	ASSERT_TRUE(check_pattern(ptr, 10, page_size));
2027 
2028 	/* Ensure write seal intact. */
2029 	for (i = 0; i < 10; i++) {
2030 		char *ptr_p = &ptr[i * page_size];
2031 
2032 		ASSERT_FALSE(try_write_buf(ptr_p));
2033 	}
2034 
2035 	ASSERT_EQ(munmap(ptr, 10 * page_size), 0);
2036 }
2037 
2038 
2039 /*
2040  * Since we are now permitted to establish guard regions in read-only anonymous
2041  * mappings, for the sake of thoroughness, though it probably has no practical
2042  * use, test that guard regions function with a mapping to the anonymous zero
2043  * page.
2044  */
TEST_F(guard_regions,anon_zeropage)2045 TEST_F(guard_regions, anon_zeropage)
2046 {
2047 	const unsigned long page_size = self->page_size;
2048 	char *ptr;
2049 	int i;
2050 
2051 	if (!is_anon_backed(variant))
2052 		SKIP(return, "anon zero page test specific to anon/shmem");
2053 
2054 	/* Obtain a read-only i.e. anon zero page mapping. */
2055 	ptr = mmap_(self, variant, NULL, 10 * page_size, PROT_READ, 0, 0);
2056 	ASSERT_NE(ptr, MAP_FAILED);
2057 
2058 	/* Now make every even page guarded. */
2059 	for (i = 0; i < 10; i += 2) {
2060 		char *ptr_p = &ptr[i * page_size];
2061 
2062 		ASSERT_EQ(madvise(ptr_p, page_size, MADV_GUARD_INSTALL), 0);
2063 	}
2064 
2065 	/* Now assert things are as expected. */
2066 	for (i = 0; i < 10; i++) {
2067 		char *ptr_p = &ptr[i * page_size];
2068 
2069 		ASSERT_EQ(try_read_buf(ptr_p), i % 2 != 0);
2070 	}
2071 
2072 	/* Now remove all guard regions. */
2073 	ASSERT_EQ(madvise(ptr, 10 * page_size, MADV_GUARD_REMOVE), 0);
2074 
2075 	/* Now assert things are as expected. */
2076 	for (i = 0; i < 10; i++) {
2077 		char *ptr_p = &ptr[i * page_size];
2078 
2079 		ASSERT_TRUE(try_read_buf(ptr_p));
2080 	}
2081 
2082 	/* Ensure zero page...*/
2083 	ASSERT_TRUE(is_buf_eq(ptr, 10 * page_size, '\0'));
2084 
2085 	ASSERT_EQ(munmap(ptr, 10 * page_size), 0);
2086 }
2087 
2088 /*
2089  * Assert that /proc/$pid/pagemap correctly identifies guard region ranges.
2090  */
TEST_F(guard_regions,pagemap)2091 TEST_F(guard_regions, pagemap)
2092 {
2093 	const unsigned long page_size = self->page_size;
2094 	int proc_fd;
2095 	char *ptr;
2096 	int i;
2097 
2098 	proc_fd = open("/proc/self/pagemap", O_RDONLY);
2099 	ASSERT_NE(proc_fd, -1);
2100 
2101 	ptr = mmap_(self, variant, NULL, 10 * page_size,
2102 		    PROT_READ | PROT_WRITE, 0, 0);
2103 	ASSERT_NE(ptr, MAP_FAILED);
2104 
2105 	/* Read from pagemap, and assert no guard regions are detected. */
2106 	for (i = 0; i < 10; i++) {
2107 		char *ptr_p = &ptr[i * page_size];
2108 		unsigned long entry = pagemap_get_entry(proc_fd, ptr_p);
2109 		unsigned long masked = entry & PM_GUARD_REGION;
2110 
2111 		ASSERT_EQ(masked, 0);
2112 	}
2113 
2114 	/* Install a guard region in every other page. */
2115 	for (i = 0; i < 10; i += 2) {
2116 		char *ptr_p = &ptr[i * page_size];
2117 
2118 		ASSERT_EQ(madvise(ptr_p, page_size, MADV_GUARD_INSTALL), 0);
2119 	}
2120 
2121 	/* Re-read from pagemap, and assert guard regions are detected. */
2122 	for (i = 0; i < 10; i++) {
2123 		char *ptr_p = &ptr[i * page_size];
2124 		unsigned long entry = pagemap_get_entry(proc_fd, ptr_p);
2125 		unsigned long masked = entry & PM_GUARD_REGION;
2126 
2127 		ASSERT_EQ(masked, i % 2 == 0 ? PM_GUARD_REGION : 0);
2128 	}
2129 
2130 	ASSERT_EQ(close(proc_fd), 0);
2131 	ASSERT_EQ(munmap(ptr, 10 * page_size), 0);
2132 }
2133 
2134 /*
2135  * Assert that PAGEMAP_SCAN correctly reports guard region ranges.
2136  */
TEST_F(guard_regions,pagemap_scan)2137 TEST_F(guard_regions, pagemap_scan)
2138 {
2139 	const unsigned long page_size = self->page_size;
2140 	struct page_region pm_regs[10];
2141 	struct pm_scan_arg pm_scan_args = {
2142 		.size = sizeof(struct pm_scan_arg),
2143 		.category_anyof_mask = PAGE_IS_GUARD,
2144 		.return_mask = PAGE_IS_GUARD,
2145 		.vec = (long)&pm_regs,
2146 		.vec_len = ARRAY_SIZE(pm_regs),
2147 	};
2148 	int proc_fd, i;
2149 	char *ptr;
2150 
2151 	proc_fd = open("/proc/self/pagemap", O_RDONLY);
2152 	ASSERT_NE(proc_fd, -1);
2153 
2154 	ptr = mmap_(self, variant, NULL, 10 * page_size,
2155 		    PROT_READ | PROT_WRITE, 0, 0);
2156 	ASSERT_NE(ptr, MAP_FAILED);
2157 
2158 	pm_scan_args.start = (long)ptr;
2159 	pm_scan_args.end = (long)ptr + 10 * page_size;
2160 	ASSERT_EQ(ioctl(proc_fd, PAGEMAP_SCAN, &pm_scan_args), 0);
2161 	ASSERT_EQ(pm_scan_args.walk_end, (long)ptr + 10 * page_size);
2162 
2163 	/* Install a guard region in every other page. */
2164 	for (i = 0; i < 10; i += 2) {
2165 		char *ptr_p = &ptr[i * page_size];
2166 
2167 		ASSERT_EQ(syscall(__NR_madvise, ptr_p, page_size, MADV_GUARD_INSTALL), 0);
2168 	}
2169 
2170 	/*
2171 	 * Assert ioctl() returns the count of located regions, where each
2172 	 * region spans every other page within the range of 10 pages.
2173 	 */
2174 	ASSERT_EQ(ioctl(proc_fd, PAGEMAP_SCAN, &pm_scan_args), 5);
2175 	ASSERT_EQ(pm_scan_args.walk_end, (long)ptr + 10 * page_size);
2176 
2177 	/* Re-read from pagemap, and assert guard regions are detected. */
2178 	for (i = 0; i < 5; i++) {
2179 		long ptr_p = (long)&ptr[2 * i * page_size];
2180 
2181 		ASSERT_EQ(pm_regs[i].start, ptr_p);
2182 		ASSERT_EQ(pm_regs[i].end, ptr_p + page_size);
2183 		ASSERT_EQ(pm_regs[i].categories, PAGE_IS_GUARD);
2184 	}
2185 
2186 	ASSERT_EQ(close(proc_fd), 0);
2187 	ASSERT_EQ(munmap(ptr, 10 * page_size), 0);
2188 }
2189 
TEST_F(guard_regions,collapse)2190 TEST_F(guard_regions, collapse)
2191 {
2192 	const unsigned long page_size = self->page_size;
2193 	const unsigned long size = 2 * HPAGE_SIZE;
2194 	const unsigned long num_pages = size / page_size;
2195 	char *ptr;
2196 	int i;
2197 
2198 	/* Need file to be correct size for tests for non-anon. */
2199 	if (variant->backing != ANON_BACKED)
2200 		ASSERT_EQ(ftruncate(self->fd, size), 0);
2201 
2202 	/*
2203 	 * We must close and re-open local-file backed as read-only for
2204 	 * CONFIG_READ_ONLY_THP_FOR_FS to work.
2205 	 */
2206 	if (variant->backing == LOCAL_FILE_BACKED) {
2207 		ASSERT_EQ(close(self->fd), 0);
2208 
2209 		self->fd = open(self->path, O_RDONLY);
2210 		ASSERT_GE(self->fd, 0);
2211 	}
2212 
2213 	ptr = mmap_(self, variant, NULL, size, PROT_READ, 0, 0);
2214 	ASSERT_NE(ptr, MAP_FAILED);
2215 
2216 	/* Prevent being faulted-in as huge. */
2217 	ASSERT_EQ(madvise(ptr, size, MADV_NOHUGEPAGE), 0);
2218 	/* Fault in. */
2219 	ASSERT_EQ(madvise(ptr, size, MADV_POPULATE_READ), 0);
2220 
2221 	/* Install guard regions in ever other page. */
2222 	for (i = 0; i < num_pages; i += 2) {
2223 		char *ptr_page = &ptr[i * page_size];
2224 
2225 		ASSERT_EQ(madvise(ptr_page, page_size, MADV_GUARD_INSTALL), 0);
2226 		/* Accesses should now fail. */
2227 		ASSERT_FALSE(try_read_buf(ptr_page));
2228 	}
2229 
2230 	/* Allow huge page throughout region. */
2231 	ASSERT_EQ(madvise(ptr, size, MADV_HUGEPAGE), 0);
2232 
2233 	/*
2234 	 * Now collapse the entire region. This should fail in all cases.
2235 	 *
2236 	 * The madvise() call will also fail if CONFIG_READ_ONLY_THP_FOR_FS is
2237 	 * not set for the local file case, but we can't differentiate whether
2238 	 * this occurred or if the collapse was rightly rejected.
2239 	 */
2240 	EXPECT_NE(madvise(ptr, size, MADV_COLLAPSE), 0);
2241 
2242 	/*
2243 	 * If we introduce a bug that causes the collapse to succeed, gather
2244 	 * data on whether guard regions are at least preserved. The test will
2245 	 * fail at this point in any case.
2246 	 */
2247 	for (i = 0; i < num_pages; i += 2) {
2248 		char *ptr_page = &ptr[i * page_size];
2249 
2250 		/* Accesses should still fail. */
2251 		ASSERT_FALSE(try_read_buf(ptr_page));
2252 	}
2253 }
2254 
TEST_F(guard_regions,smaps)2255 TEST_F(guard_regions, smaps)
2256 {
2257 	const unsigned long page_size = self->page_size;
2258 	struct procmap_fd procmap;
2259 	char *ptr, *ptr2;
2260 	int i;
2261 
2262 	/* Map a region. */
2263 	ptr = mmap_(self, variant, NULL, 10 * page_size, PROT_READ | PROT_WRITE, 0, 0);
2264 	ASSERT_NE(ptr, MAP_FAILED);
2265 
2266 	/* We shouldn't yet see a guard flag. */
2267 	ASSERT_FALSE(check_vmflag_guard(ptr));
2268 
2269 	/* Install a single guard region. */
2270 	ASSERT_EQ(madvise(ptr, page_size, MADV_GUARD_INSTALL), 0);
2271 
2272 	/* Now we should see a guard flag. */
2273 	ASSERT_TRUE(check_vmflag_guard(ptr));
2274 
2275 	/*
2276 	 * Removing the guard region should not change things because we simply
2277 	 * cannot accurately track whether a given VMA has had all of its guard
2278 	 * regions removed.
2279 	 */
2280 	ASSERT_EQ(madvise(ptr, page_size, MADV_GUARD_REMOVE), 0);
2281 	ASSERT_TRUE(check_vmflag_guard(ptr));
2282 
2283 	/* Install guard regions throughout. */
2284 	for (i = 0; i < 10; i++) {
2285 		ASSERT_EQ(madvise(&ptr[i * page_size], page_size, MADV_GUARD_INSTALL), 0);
2286 		/* We should always see the guard region flag. */
2287 		ASSERT_TRUE(check_vmflag_guard(ptr));
2288 	}
2289 
2290 	/* Split into two VMAs. */
2291 	ASSERT_EQ(munmap(&ptr[4 * page_size], page_size), 0);
2292 
2293 	/* Both VMAs should have the guard flag set. */
2294 	ASSERT_TRUE(check_vmflag_guard(ptr));
2295 	ASSERT_TRUE(check_vmflag_guard(&ptr[5 * page_size]));
2296 
2297 	/*
2298 	 * If the local file system is unable to merge VMAs due to having
2299 	 * unusual characteristics, there is no point in asserting merge
2300 	 * behaviour.
2301 	 */
2302 	if (!local_fs_has_sane_mmap(self, variant)) {
2303 		TH_LOG("local filesystem does not support sane merging skipping merge test");
2304 		return;
2305 	}
2306 
2307 	/* Map a fresh VMA between the two split VMAs. */
2308 	ptr2 = mmap_(self, variant, &ptr[4 * page_size], page_size,
2309 		     PROT_READ | PROT_WRITE, MAP_FIXED, 4 * page_size);
2310 	ASSERT_NE(ptr2, MAP_FAILED);
2311 
2312 	/*
2313 	 * Check the procmap to ensure that this VMA merged with the adjacent
2314 	 * two. The guard region flag is 'sticky' so should not preclude
2315 	 * merging.
2316 	 */
2317 	ASSERT_EQ(open_self_procmap(&procmap), 0);
2318 	ASSERT_TRUE(find_vma_procmap(&procmap, ptr));
2319 	ASSERT_EQ(procmap.query.vma_start, (unsigned long)ptr);
2320 	ASSERT_EQ(procmap.query.vma_end, (unsigned long)ptr + 10 * page_size);
2321 	ASSERT_EQ(close_procmap(&procmap), 0);
2322 	/* And, of course, this VMA should have the guard flag set. */
2323 	ASSERT_TRUE(check_vmflag_guard(ptr));
2324 }
2325 
2326 TEST_HARNESS_MAIN
2327