Lines Matching +full:signal +full:- +full:guard

1 // SPDX-License-Identifier: GPL-2.0-or-later
5 #include <asm-generic/mman.h> /* Force the import of the tools version. */
13 #include <signal.h>
30 * "If the signal occurs other than as the result of calling the abort or raise
31 * function, the behavior is undefined if the signal handler refers to any
76 switch (variant->backing) { in is_anon_backed()
93 switch (variant->backing) { in mmap_()
96 fd = -1; in mmap_()
101 fd = self->fd; in mmap_()
131 * Enable our signal catcher and try to read/write the specified buffer. The
133 * signal.
139 /* Tell signal handler to jump back here on fatal signal. */ in try_access_buf()
141 /* If a fatal signal arose, we will jump back here and failed is set. */ in try_access_buf()
155 /* Try and read from a buffer, return true if no fatal signal. */
161 /* Try and write to a buffer, return true if no fatal signal. */
265 self->page_size = (unsigned long)sysconf(_SC_PAGESIZE); in FIXTURE_SETUP()
268 switch (variant->backing) { in FIXTURE_SETUP()
272 self->fd = open_file("", self->path); in FIXTURE_SETUP()
275 self->fd = memfd_create(self->path, 0); in FIXTURE_SETUP()
280 ASSERT_EQ(ftruncate(self->fd, 100 * self->page_size), 0); in FIXTURE_SETUP()
287 if (variant->backing == ANON_BACKED) in FIXTURE_TEARDOWN_PARENT()
290 if (self->fd >= 0) in FIXTURE_TEARDOWN_PARENT()
291 close(self->fd); in FIXTURE_TEARDOWN_PARENT()
293 if (self->path[0] != '\0') in FIXTURE_TEARDOWN_PARENT()
294 unlink(self->path); in FIXTURE_TEARDOWN_PARENT()
300 const unsigned long page_size = self->page_size; in TEST_F()
323 /* Establish a guard page at the end of the mapping. */ in TEST_F()
324 ASSERT_EQ(madvise(&ptr[(NUM_PAGES - 1) * page_size], page_size, in TEST_F()
327 /* Check that both guard pages result in SIGSEGV. */ in TEST_F()
329 ASSERT_FALSE(try_read_write_buf(&ptr[(NUM_PAGES - 1) * page_size])); in TEST_F()
331 /* Remove the first guard page. */ in TEST_F()
337 /* Remove the last guard page. */ in TEST_F()
338 ASSERT_FALSE(madvise(&ptr[(NUM_PAGES - 1) * page_size], page_size, in TEST_F()
342 ASSERT_TRUE(try_read_write_buf(&ptr[(NUM_PAGES - 1) * page_size])); in TEST_F()
346 * these be faulted in, so this also tests that we can install guard in TEST_F()
351 /* Make sure they are all guard pages. */ in TEST_F()
365 /* Remove guard pages. */ in TEST_F()
376 * Now remove all guard pages, make sure we don't remove existing in TEST_F()
393 const unsigned long page_size = self->page_size; in TEST_F()
425 * [---] [---] [---] in TEST_F()
429 * Now mark the whole range as guard pages and make sure all VMAs are as in TEST_F()
436 * -ENOMEM. Also if anything runs out of memory it is set to in TEST_F()
437 * -ENOMEM. You are meant to guess which is which. in TEST_F()
439 ASSERT_EQ(madvise(ptr_region, 100 * page_size, MADV_GUARD_INSTALL), -1); in TEST_F()
462 ASSERT_EQ(madvise(ptr_region, 100 * page_size, MADV_GUARD_REMOVE), -1); in TEST_F()
495 * [---][xxxx][---][xxxx][---] in TEST_F()
525 const unsigned long page_size = self->page_size; in TEST_F()
543 /* We want guard markers at start/end of each VMA. */ in TEST_F()
573 /* Now guard in one step. */ in TEST_F()
577 if (count == -1 && errno == EPERM) in TEST_F()
614 /* Assert that unmapping ranges does not leave guard markers behind. */
617 const unsigned long page_size = self->page_size; in TEST_F()
624 /* Guard first and last pages. */ in TEST_F()
652 /* Assert that mprotect() operations have no bearing on guard markers. */
655 const unsigned long page_size = self->page_size; in TEST_F()
663 /* Guard the middle of the range. */ in TEST_F()
671 /* Now make these pages read-only. */ in TEST_F()
678 /* Make sure we can guard again without issue.*/ in TEST_F()
700 /* Split and merge VMAs and make sure guard pages still behave. */
703 const unsigned long page_size = self->page_size; in TEST_F()
711 /* Guard the whole range. */ in TEST_F()
726 /* Make sure the remaining ranges are guarded post-split. */ in TEST_F()
748 /* Now map them again - the unmap will have cleared the guards. */ in TEST_F()
759 /* Now make sure guard pages are established. */ in TEST_F()
768 /* Now guard everything again. */ in TEST_F()
831 /* Assert that MADV_DONTNEED does not remove guard markers. */
834 const unsigned long page_size = self->page_size; in TEST_F()
849 /* Guard every other page. */ in TEST_F()
860 /* Check to ensure guard markers are still in place. */ in TEST_F()
869 switch (variant->backing) { in TEST_F()
892 /* Assert that mlock()'ed pages work correctly with guard markers. */
895 const unsigned long page_size = self->page_size; in TEST_F()
913 /* Now try to guard, should fail with EINVAL. */ in TEST_F()
914 ASSERT_EQ(madvise(ptr, 10 * page_size, MADV_GUARD_INSTALL), -1); in TEST_F()
920 /* Guard first half of range, should now succeed. */ in TEST_F()
923 /* Make sure guard works. */ in TEST_F()
937 * Now lock the latter part of the range. We can't lock the guard pages, in TEST_F()
944 * Now remove guard pages, we permit mlock()'d ranges to have guard in TEST_F()
945 * pages removed as it is a non-destructive operation. in TEST_F()
949 /* Now check that no guard pages remain. */ in TEST_F()
962 * guard markers where possible.
964 * - Moving a mapping alone should retain markers as they are.
968 const unsigned long page_size = self->page_size; in TEST_F()
976 /* Place guard markers at both ends of the 5 page span. */ in TEST_F()
980 /* Make sure the guard pages are in effect. */ in TEST_F()
993 /* Make sure the guard markers are retained. */ in TEST_F()
998 * Clean up - we only need reference the new pointer as we overwrote the in TEST_F()
1006 * guard markers where possible.
1008 * Expanding should retain guard pages, only now in different position. The user
1009 * will have to remove guard pages manually to fix up (they'd have to do the
1014 const unsigned long page_size = self->page_size; in TEST_F()
1024 /* Place guard markers at both ends of the 5 page span. */ in TEST_F()
1037 * Make sure the guard markers are retained in their original positions. in TEST_F()
1052 * Again, make sure the guard markers are retained in their original positions. in TEST_F()
1058 * A real user would have to remove guard markers, but would reasonably in TEST_F()
1060 * guard markers. in TEST_F()
1068 * guard markers where possible.
1076 const unsigned long page_size = self->page_size; in TEST_F()
1085 /* Place guard markers at both ends of the 5 page span. */ in TEST_F()
1097 /* We expect the guard marker at the start to be retained... */ in TEST_F()
1100 /* ...But remaining pages will not have guard markers. */ in TEST_F()
1108 * As with expansion, a real user would have to remove guard pages and in TEST_F()
1120 /* Again, we expect the guard marker at the start to be retained... */ in TEST_F()
1123 /* ...But remaining pages will not have guard markers. */ in TEST_F()
1136 * retain guard pages.
1140 const unsigned long page_size = self->page_size; in TEST_F()
1150 /* Establish guard pages in the first 5 pages. */ in TEST_F()
1154 ASSERT_NE(pid, -1); in TEST_F()
1191 * and then guard and unguard the range.
1195 const unsigned long page_size = self->page_size; in TEST_F()
1200 if (variant->backing != ANON_BACKED) in TEST_F()
1216 ASSERT_NE(pid, -1); in TEST_F()
1228 /* Establish guard pages across the whole range. */ in TEST_F()
1234 * By removing the guard pages, the page tables will be in TEST_F()
1269 const unsigned long page_size = self->page_size; in TEST_F()
1274 if (variant->backing != ANON_BACKED) in TEST_F()
1285 /* Guard the first 5 pages. */ in TEST_F()
1289 ASSERT_NE(pid, -1); in TEST_F()
1293 /* Guard will have been wiped. */ in TEST_F()
1307 /* Guard markers should be in effect.*/ in TEST_F()
1319 /* Ensure that MADV_FREE retains guard entries as expected. */
1322 const unsigned long page_size = self->page_size; in TEST_F()
1326 if (variant->backing != ANON_BACKED) in TEST_F()
1334 /* Guard range. */ in TEST_F()
1347 /* This should leave the guard markers in place. */ in TEST_F()
1361 const unsigned long page_size = self->page_size; in TEST_F()
1369 /* Guard range. */ in TEST_F()
1373 ASSERT_EQ(madvise(ptr, 10 * page_size, MADV_POPULATE_READ), -1); in TEST_F()
1377 ASSERT_EQ(madvise(ptr, 10 * page_size, MADV_POPULATE_WRITE), -1); in TEST_F()
1384 /* Ensure that MADV_COLD, MADV_PAGEOUT do not remove guard markers. */
1387 const unsigned long page_size = self->page_size; in TEST_F()
1396 /* Guard range. */ in TEST_F()
1406 /* Now mark cold. This should have no impact on guard markers. */ in TEST_F()
1430 /* Ensure that guard pages do not break userfaultd. */
1433 const unsigned long page_size = self->page_size; in TEST_F()
1449 if (uffd == -1) { in TEST_F()
1464 ASSERT_NE(uffd, -1); in TEST_F()
1480 /* Guard the range. This should not trigger the uffd. */ in TEST_F()
1497 * Mark a region within a file-backed mapping using MADV_SEQUENTIAL so we
1498 * aggressively read-ahead, then install guard regions and assert that it
1501 * We page out using MADV_PAGEOUT before checking guard regions so we drop page
1508 const unsigned long page_size = self->page_size; in TEST_F()
1510 if (variant->backing == ANON_BACKED) in TEST_F()
1511 SKIP(return, "MADV_SEQUENTIAL meaningful only for file-backed"); in TEST_F()
1524 /* Mark every other page a guard page. */ in TEST_F()
1547 /* Now remove guard pages. */ in TEST_F()
1558 * Check that file-backed mappings implement guard regions with MAP_PRIVATE
1563 const unsigned long page_size = self->page_size; in TEST_F()
1567 if (variant->backing == ANON_BACKED) in TEST_F()
1568 SKIP(return, "MAP_PRIVATE test specific to file-backed"); in TEST_F()
1574 ptr_private = mmap(NULL, 10 * page_size, PROT_READ | PROT_WRITE, MAP_PRIVATE, self->fd, 0); in TEST_F()
1580 /* Install guard regions in every other page in the shared mapping. */ in TEST_F()
1594 /* Install guard regions in every other page in the private mapping. */ in TEST_F()
1608 /* Remove guard regions from shared mapping. */ in TEST_F()
1618 /* Remove guard regions from private mapping. */ in TEST_F()
1661 /* Ensure guard regions as expected. */ in TEST_F()
1667 /* Remove the guard regions altogether. */ in TEST_F()
1677 * As we removed guard regions, the private pages from the first 5 will in TEST_F()
1703 /* Test that guard regions established over a read-only mapping function correctly. */
1706 const unsigned long page_size = self->page_size; in TEST_F()
1710 if (variant->backing != LOCAL_FILE_BACKED) in TEST_F()
1711 SKIP(return, "Read-only test specific to file-backed"); in TEST_F()
1719 /* Close the fd so we can re-open read-only. */ in TEST_F()
1720 ASSERT_EQ(close(self->fd), 0); in TEST_F()
1722 /* Re-open read-only. */ in TEST_F()
1723 self->fd = open(self->path, O_RDONLY); in TEST_F()
1724 ASSERT_NE(self->fd, -1); in TEST_F()
1725 /* Re-map read-only. */ in TEST_F()
1736 /* Assert that the guard regions are in place.*/ in TEST_F()
1743 /* Remove guard regions. */ in TEST_F()
1754 const unsigned long page_size = self->page_size; in TEST_F()
1758 if (variant->backing == ANON_BACKED) in TEST_F()
1759 SKIP(return, "Fault-around test specific to file-backed"); in TEST_F()
1787 /* Now fault in every odd page. This should trigger fault-around. */ in TEST_F()
1794 /* Finally, ensure that guard regions are intact as expected. */ in TEST_F()
1806 const unsigned long page_size = self->page_size; in TEST_F()
1810 if (variant->backing == ANON_BACKED) in TEST_F()
1811 SKIP(return, "Truncation test specific to file-backed"); in TEST_F()
1838 ASSERT_EQ(ftruncate(self->fd, 10 * page_size), 0); in TEST_F()
1840 /* Here the guard regions will remain intact. */ in TEST_F()
1848 ASSERT_EQ(ftruncate(self->fd, 5 * page_size), 0); in TEST_F()
1849 ASSERT_EQ(ftruncate(self->fd, 10 * page_size), 0); in TEST_F()
1851 /* Again, guard pages will remain intact. */ in TEST_F()
1863 const unsigned long page_size = self->page_size; in TEST_F()
1867 if (variant->backing == ANON_BACKED) in TEST_F()
1868 SKIP(return, "Truncation test specific to file-backed"); in TEST_F()
1876 /* Install a guard region in the middle of the mapping. */ in TEST_F()
1886 * Where * is data and x is the guard region. in TEST_F()
1900 /* Ensure guard regions remain. */ in TEST_F()
1907 /* Now remove guard region throughout. */ in TEST_F()
1910 /* Check that the pattern exists in non-hole punched region. */ in TEST_F()
1921 * Ensure that a memfd works correctly with guard regions, that we can write
1922 * seal it then open the mapping read-only and still establish guard regions
1923 * within, remove those guard regions and have everything work correctly.
1927 const unsigned long page_size = self->page_size; in TEST_F()
1931 if (variant->backing != SHMEM_BACKED) in TEST_F()
1935 ASSERT_EQ(close(self->fd), 0); in TEST_F()
1938 self->fd = memfd_create("guard_regions_memfd_seals_test", in TEST_F()
1940 ASSERT_NE(self->fd, -1); in TEST_F()
1941 ASSERT_EQ(ftruncate(self->fd, 10 * page_size), 0); in TEST_F()
1949 /* Write-seal the memfd. */ in TEST_F()
1950 ASSERT_EQ(fcntl(self->fd, F_ADD_SEALS, F_SEAL_WRITE), 0); in TEST_F()
1973 /* Now remove guard regions. */ in TEST_F()
1991 * Since we are now permitted to establish guard regions in read-only anonymous
1993 * use, test that guard regions function with a mapping to the anonymous zero
1998 const unsigned long page_size = self->page_size; in TEST_F()
2005 /* Obtain a read-only i.e. anon zero page mapping. */ in TEST_F()
2023 /* Now remove all guard regions. */ in TEST_F()
2040 * Assert that /proc/$pid/pagemap correctly identifies guard region ranges.
2044 const unsigned long page_size = self->page_size; in TEST_F()
2050 ASSERT_NE(proc_fd, -1); in TEST_F()
2056 /* Read from pagemap, and assert no guard regions are detected. */ in TEST_F()
2065 /* Install a guard region in every other page. */ in TEST_F()
2072 /* Re-read from pagemap, and assert guard regions are detected. */ in TEST_F()
2086 * Assert that PAGEMAP_SCAN correctly reports guard region ranges.
2090 const unsigned long page_size = self->page_size; in TEST_F()
2103 ASSERT_NE(proc_fd, -1); in TEST_F()
2114 /* Install a guard region in every other page. */ in TEST_F()
2128 /* Re-read from pagemap, and assert guard regions are detected. */ in TEST_F()