113d4f961SRyan Libby /*
213d4f961SRyan Libby * Copyright (c) 2021 Dell Inc. or its subsidiaries. All Rights Reserved.
31dfa8b73SMark Johnston * Copyright (c) 2022 The FreeBSD Foundation
41dfa8b73SMark Johnston *
51dfa8b73SMark Johnston * Portions of this software were developed by Mark Johnston under sponsorship
61dfa8b73SMark Johnston * from the FreeBSD Foundation.
713d4f961SRyan Libby *
813d4f961SRyan Libby * Redistribution and use in source and binary forms, with or without
913d4f961SRyan Libby * modification, are permitted provided that the following conditions
1013d4f961SRyan Libby * are met:
1113d4f961SRyan Libby * 1. Redistributions of source code must retain the above copyright
1213d4f961SRyan Libby * notice, this list of conditions and the following disclaimer.
1313d4f961SRyan Libby * 2. Redistributions in binary form must reproduce the above copyright
1413d4f961SRyan Libby * notice, this list of conditions and the following disclaimer in the
1513d4f961SRyan Libby * documentation and/or other materials provided with the distribution.
1613d4f961SRyan Libby *
1713d4f961SRyan Libby * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
1813d4f961SRyan Libby * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
1913d4f961SRyan Libby * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
2013d4f961SRyan Libby * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
2113d4f961SRyan Libby * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
2213d4f961SRyan Libby * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
2313d4f961SRyan Libby * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
2413d4f961SRyan Libby * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
2513d4f961SRyan Libby * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
2613d4f961SRyan Libby * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
2713d4f961SRyan Libby * SUCH DAMAGE.
2813d4f961SRyan Libby */
2913d4f961SRyan Libby
3013d4f961SRyan Libby /*
3113d4f961SRyan Libby * Test behavior when a mapping of a shared shadow vm object is
321dfa8b73SMark Johnston * invalidated by COW from another mapping. In particular, when
331dfa8b73SMark Johnston * minherit(INHERT_SHARE) is applied to a COW mapping, a subsequently
341dfa8b73SMark Johnston * forked child process will share the parent's shadow object. Thus,
351dfa8b73SMark Johnston * pages already mapped into one sharing process may be written from
361dfa8b73SMark Johnston * another, triggering a copy into the shadow object. The VM system
371dfa8b73SMark Johnston * expects that a fully shadowed page is unmapped, but at one point the
381dfa8b73SMark Johnston * use of a shared shadow object could break this invariant.
3913d4f961SRyan Libby *
4013d4f961SRyan Libby * This is a regression test for an issue isolated by rlibby@FreeBSD.org
4113d4f961SRyan Libby * from an issue detected by stress2's collapse.sh by jeff@FreeBSD.org.
4213d4f961SRyan Libby * The issue became CVE-2021-29626.
4313d4f961SRyan Libby *
4413d4f961SRyan Libby * This file is written as an ATF test suite but may be compiled as a
4513d4f961SRyan Libby * standalone program with -DSTANDALONE (and optionally -DDEBUG).
4613d4f961SRyan Libby */
4713d4f961SRyan Libby
481dfa8b73SMark Johnston #include <sys/param.h>
4913d4f961SRyan Libby #include <sys/mman.h>
5013d4f961SRyan Libby #include <sys/procctl.h>
511dfa8b73SMark Johnston #include <sys/resource.h>
521dfa8b73SMark Johnston #include <sys/sysctl.h>
5313d4f961SRyan Libby #include <sys/wait.h>
541dfa8b73SMark Johnston
5513d4f961SRyan Libby #include <machine/atomic.h>
5613d4f961SRyan Libby
5713d4f961SRyan Libby #include <err.h>
5813d4f961SRyan Libby #include <errno.h>
5913d4f961SRyan Libby #include <stdbool.h>
6013d4f961SRyan Libby #include <stddef.h>
6113d4f961SRyan Libby #include <stdio.h>
6213d4f961SRyan Libby #include <stdlib.h>
6313d4f961SRyan Libby #include <unistd.h>
6413d4f961SRyan Libby
6513d4f961SRyan Libby #ifdef STANDALONE
6613d4f961SRyan Libby #define ATF_REQUIRE(x) do { \
6713d4f961SRyan Libby if (!(x)) \
6813d4f961SRyan Libby errx(1, "%s", #x); \
6913d4f961SRyan Libby } while (0)
7013d4f961SRyan Libby #else
7113d4f961SRyan Libby #include <atf-c.h>
7213d4f961SRyan Libby #endif
7313d4f961SRyan Libby
7413d4f961SRyan Libby #ifdef DEBUG
7513d4f961SRyan Libby #define dprintf(...) printf(__VA_ARGS__)
7613d4f961SRyan Libby #else
7713d4f961SRyan Libby #define dprintf(...)
7813d4f961SRyan Libby #endif
7913d4f961SRyan Libby
8013d4f961SRyan Libby #define DEPTH 5
8113d4f961SRyan Libby
821dfa8b73SMark Johnston #define FLAG_COLLAPSE 0x1
831dfa8b73SMark Johnston #define FLAG_BLOCK_XFER 0x2
841dfa8b73SMark Johnston #define FLAG_FULLMOD 0x4
851dfa8b73SMark Johnston #define FLAG_MASK (FLAG_COLLAPSE | FLAG_BLOCK_XFER | FLAG_FULLMOD)
861dfa8b73SMark Johnston
8713d4f961SRyan Libby struct shared_state {
8813d4f961SRyan Libby void *p;
8913d4f961SRyan Libby size_t len;
9013d4f961SRyan Libby size_t modlen;
911dfa8b73SMark Johnston size_t pagesize;
9213d4f961SRyan Libby bool collapse;
9313d4f961SRyan Libby bool block_xfer;
941dfa8b73SMark Johnston bool lazy_cow;
9513d4f961SRyan Libby bool okay;
9613d4f961SRyan Libby volatile bool exiting[DEPTH];
9713d4f961SRyan Libby volatile bool exit;
9813d4f961SRyan Libby volatile bool p3_did_write;
9913d4f961SRyan Libby };
10013d4f961SRyan Libby
10113d4f961SRyan Libby /*
10213d4f961SRyan Libby * Program flow. There are three or four processes that are descendants
10313d4f961SRyan Libby * of the process running the test (P0), where arrows go from parents to
10413d4f961SRyan Libby * children, and thicker arrows indicate sharing a certain memory region
10513d4f961SRyan Libby * without COW semantics:
10613d4f961SRyan Libby * P0 -> P1 -> P2 => P3
10713d4f961SRyan Libby * \=> P4
10813d4f961SRyan Libby * The main idea is that P1 maps a memory region, and that region is
10913d4f961SRyan Libby * shared with P2/P3, but with COW semantics. When P3 modifies the
11013d4f961SRyan Libby * memory, P2 ought to see that modification. P4 optionally exists to
11113d4f961SRyan Libby * defeat a COW optimization.
11213d4f961SRyan Libby */
11313d4f961SRyan Libby
11413d4f961SRyan Libby #define child_err(...) do { \
11513d4f961SRyan Libby ss->exit = true; \
11613d4f961SRyan Libby err(1, __VA_ARGS__); \
11713d4f961SRyan Libby } while (0)
11813d4f961SRyan Libby
11913d4f961SRyan Libby #define child_errx(...) do { \
12013d4f961SRyan Libby ss->exit = true; \
12113d4f961SRyan Libby errx(1, __VA_ARGS__); \
12213d4f961SRyan Libby } while (0)
12313d4f961SRyan Libby
12413d4f961SRyan Libby #define SLEEP_TIME_US 1000
12513d4f961SRyan Libby
12613d4f961SRyan Libby static void child(struct shared_state *ss, int depth);
12713d4f961SRyan Libby
12813d4f961SRyan Libby static pid_t
child_fork(struct shared_state * ss,int depth)12913d4f961SRyan Libby child_fork(struct shared_state *ss, int depth)
13013d4f961SRyan Libby {
13113d4f961SRyan Libby pid_t pid = fork();
13213d4f961SRyan Libby if (pid == -1)
13313d4f961SRyan Libby child_err("fork");
13413d4f961SRyan Libby else if (pid == 0)
13513d4f961SRyan Libby child(ss, depth);
13613d4f961SRyan Libby return pid;
13713d4f961SRyan Libby }
13813d4f961SRyan Libby
13913d4f961SRyan Libby static void
child_fault(struct shared_state * ss)14013d4f961SRyan Libby child_fault(struct shared_state *ss)
14113d4f961SRyan Libby {
14213d4f961SRyan Libby size_t i;
14313d4f961SRyan Libby
1441dfa8b73SMark Johnston for (i = 0; i < ss->len; i += ss->pagesize)
14513d4f961SRyan Libby (void)((volatile char *)ss->p)[i];
14613d4f961SRyan Libby }
14713d4f961SRyan Libby
14813d4f961SRyan Libby static void
child_write(struct shared_state * ss,int val,size_t len)14913d4f961SRyan Libby child_write(struct shared_state *ss, int val, size_t len)
15013d4f961SRyan Libby {
15113d4f961SRyan Libby size_t i;
15213d4f961SRyan Libby
1531dfa8b73SMark Johnston for (i = 0; i < len; i += ss->pagesize)
15413d4f961SRyan Libby ((int *)ss->p)[i / sizeof(int)] = val;
15513d4f961SRyan Libby atomic_thread_fence_rel();
15613d4f961SRyan Libby }
15713d4f961SRyan Libby
15813d4f961SRyan Libby static void
child_wait_p3_write(struct shared_state * ss)15913d4f961SRyan Libby child_wait_p3_write(struct shared_state *ss)
16013d4f961SRyan Libby {
16113d4f961SRyan Libby while (!ss->p3_did_write) {
16213d4f961SRyan Libby if (ss->exit)
16313d4f961SRyan Libby exit(1);
16413d4f961SRyan Libby usleep(SLEEP_TIME_US);
16513d4f961SRyan Libby }
16613d4f961SRyan Libby atomic_thread_fence_acq();
16713d4f961SRyan Libby }
16813d4f961SRyan Libby
16913d4f961SRyan Libby static void
child_verify(struct shared_state * ss,int depth,int newval,int oldval)17013d4f961SRyan Libby child_verify(struct shared_state *ss, int depth, int newval, int oldval)
17113d4f961SRyan Libby {
17213d4f961SRyan Libby size_t i;
17313d4f961SRyan Libby int expectval, foundval;
17413d4f961SRyan Libby
1751dfa8b73SMark Johnston for (i = 0; i < ss->len; i += ss->pagesize) {
17613d4f961SRyan Libby expectval = i < ss->modlen ? newval : oldval;
17713d4f961SRyan Libby foundval = ((int *)ss->p)[i / sizeof(int)];
17813d4f961SRyan Libby if (foundval == expectval)
17913d4f961SRyan Libby continue;
18013d4f961SRyan Libby child_errx("P%d saw %d but expected %d, %d was the old value",
18113d4f961SRyan Libby depth, foundval, expectval, oldval);
18213d4f961SRyan Libby }
18313d4f961SRyan Libby }
18413d4f961SRyan Libby
18513d4f961SRyan Libby static void
child(struct shared_state * ss,int depth)18613d4f961SRyan Libby child(struct shared_state *ss, int depth)
18713d4f961SRyan Libby {
18813d4f961SRyan Libby pid_t mypid, oldval, pid;
18913d4f961SRyan Libby
19013d4f961SRyan Libby if (depth < 1 || depth >= DEPTH)
19113d4f961SRyan Libby child_errx("Bad depth %d", depth);
19213d4f961SRyan Libby mypid = getpid();
19313d4f961SRyan Libby dprintf("P%d (pid %d) started\n", depth, mypid);
19413d4f961SRyan Libby switch (depth) {
19513d4f961SRyan Libby case 1:
19613d4f961SRyan Libby /* Shared memory undergoing test. */
19713d4f961SRyan Libby ss->p = mmap(NULL, ss->len, PROT_READ | PROT_WRITE,
19813d4f961SRyan Libby MAP_SHARED | MAP_ANON, -1, 0);
19913d4f961SRyan Libby if (ss->p == MAP_FAILED)
20013d4f961SRyan Libby child_err("mmap");
2011dfa8b73SMark Johnston
20213d4f961SRyan Libby /* P1 stamps the shared memory. */
20313d4f961SRyan Libby child_write(ss, mypid, ss->len);
2041dfa8b73SMark Johnston if (!ss->lazy_cow) {
2051dfa8b73SMark Johnston if (mlock(ss->p, ss->len) == -1)
206*3b0f105cSEric van Gyzen child_err("mlock");
2071dfa8b73SMark Johnston if (mprotect(ss->p, ss->len, PROT_READ) == -1)
2081dfa8b73SMark Johnston child_err("mprotect");
2091dfa8b73SMark Johnston }
21013d4f961SRyan Libby if (ss->block_xfer) {
21113d4f961SRyan Libby /*
21213d4f961SRyan Libby * P4 is forked so that its existence blocks a page COW
21313d4f961SRyan Libby * path where the page is simply transferred between
21413d4f961SRyan Libby * objects, rather than being copied.
21513d4f961SRyan Libby */
21613d4f961SRyan Libby child_fork(ss, 4);
21713d4f961SRyan Libby }
21813d4f961SRyan Libby /*
21913d4f961SRyan Libby * P1 specifies that modifications from its child processes not
22013d4f961SRyan Libby * be shared with P1. Child process reads can be serviced from
22113d4f961SRyan Libby * pages in P1's object, but writes must be COW'd.
22213d4f961SRyan Libby */
22313d4f961SRyan Libby if (minherit(ss->p, ss->len, INHERIT_COPY) != 0)
22413d4f961SRyan Libby child_err("minherit");
22513d4f961SRyan Libby /* Fork P2. */
22613d4f961SRyan Libby child_fork(ss, depth + 1);
22713d4f961SRyan Libby /* P1 and P4 wait for P3's writes before exiting. */
22813d4f961SRyan Libby child_wait_p3_write(ss);
22913d4f961SRyan Libby child_verify(ss, depth, mypid, mypid);
23013d4f961SRyan Libby if (!ss->collapse) {
23113d4f961SRyan Libby /* Hang around to prevent collapse. */
23213d4f961SRyan Libby while (!ss->exit)
23313d4f961SRyan Libby usleep(SLEEP_TIME_US);
23413d4f961SRyan Libby }
23513d4f961SRyan Libby /* Exit so the P2 -> P1/P4 shadow chain can collapse. */
23613d4f961SRyan Libby break;
23713d4f961SRyan Libby case 2:
23813d4f961SRyan Libby /*
23913d4f961SRyan Libby * P2 now specifies that modifications from its child processes
24013d4f961SRyan Libby * be shared. P2 and P3 will share a shadow object.
24113d4f961SRyan Libby */
24213d4f961SRyan Libby if (minherit(ss->p, ss->len, INHERIT_SHARE) != 0)
24313d4f961SRyan Libby child_err("minherit");
2441dfa8b73SMark Johnston
24513d4f961SRyan Libby /*
24613d4f961SRyan Libby * P2 faults a page in P1's object before P1 exits and the
2471dfa8b73SMark Johnston * shadow chain is collapsed. This may be redundant if the
2481dfa8b73SMark Johnston * (read-only) mappings were copied by fork(), but it doesn't
2491dfa8b73SMark Johnston * hurt.
25013d4f961SRyan Libby */
25113d4f961SRyan Libby child_fault(ss);
25213d4f961SRyan Libby oldval = atomic_load_acq_int(ss->p);
2531dfa8b73SMark Johnston
25413d4f961SRyan Libby /* Fork P3. */
25513d4f961SRyan Libby pid = child_fork(ss, depth + 1);
25613d4f961SRyan Libby if (ss->collapse) {
25713d4f961SRyan Libby /* Wait for P1 and P4 to exit, triggering collapse. */
25813d4f961SRyan Libby while (!ss->exiting[1] ||
25913d4f961SRyan Libby (ss->block_xfer && !ss->exiting[4]))
26013d4f961SRyan Libby usleep(SLEEP_TIME_US);
26113d4f961SRyan Libby /*
26213d4f961SRyan Libby * This is racy, just guess at how long it may take
26313d4f961SRyan Libby * them to finish exiting.
26413d4f961SRyan Libby */
26513d4f961SRyan Libby usleep(100 * 1000);
26613d4f961SRyan Libby }
26713d4f961SRyan Libby /* P2 waits for P3's modification. */
26813d4f961SRyan Libby child_wait_p3_write(ss);
26913d4f961SRyan Libby child_verify(ss, depth, pid, oldval);
27013d4f961SRyan Libby ss->okay = true;
27113d4f961SRyan Libby ss->exit = true;
27213d4f961SRyan Libby break;
27313d4f961SRyan Libby case 3:
27413d4f961SRyan Libby /*
2751dfa8b73SMark Johnston * Use mlock()+mprotect() to trigger the COW. This
2761dfa8b73SMark Johnston * exercises a different COW handler than the one used
2771dfa8b73SMark Johnston * for lazy faults.
2781dfa8b73SMark Johnston */
2791dfa8b73SMark Johnston if (!ss->lazy_cow) {
2801dfa8b73SMark Johnston if (mlock(ss->p, ss->len) == -1)
2811dfa8b73SMark Johnston child_err("mlock");
2821dfa8b73SMark Johnston if (mprotect(ss->p, ss->len, PROT_READ | PROT_WRITE) ==
2831dfa8b73SMark Johnston -1)
2841dfa8b73SMark Johnston child_err("mprotect");
2851dfa8b73SMark Johnston }
2861dfa8b73SMark Johnston
2871dfa8b73SMark Johnston /*
28813d4f961SRyan Libby * P3 writes the memory. A page is faulted into the shared
28913d4f961SRyan Libby * P2/P3 shadow object. P2's mapping of the page in P1's
29013d4f961SRyan Libby * object must now be shot down, or else P2 will wrongly
29113d4f961SRyan Libby * continue to have that page mapped.
29213d4f961SRyan Libby */
29313d4f961SRyan Libby child_write(ss, mypid, ss->modlen);
29413d4f961SRyan Libby ss->p3_did_write = true;
29513d4f961SRyan Libby dprintf("P3 (pid %d) wrote its pid\n", mypid);
29613d4f961SRyan Libby break;
29713d4f961SRyan Libby case 4:
29813d4f961SRyan Libby /* Just hang around until P3 is done writing. */
29913d4f961SRyan Libby oldval = atomic_load_acq_int(ss->p);
30013d4f961SRyan Libby child_wait_p3_write(ss);
30113d4f961SRyan Libby child_verify(ss, depth, oldval, oldval);
30213d4f961SRyan Libby break;
30313d4f961SRyan Libby default:
30413d4f961SRyan Libby child_errx("Bad depth %d", depth);
30513d4f961SRyan Libby }
30613d4f961SRyan Libby
30713d4f961SRyan Libby dprintf("P%d (pid %d) exiting\n", depth, mypid);
30813d4f961SRyan Libby ss->exiting[depth] = true;
30913d4f961SRyan Libby exit(0);
31013d4f961SRyan Libby }
31113d4f961SRyan Libby
31213d4f961SRyan Libby static void
do_one_shared_shadow_inval(bool lazy_cow,size_t pagesize,size_t len,unsigned int flags)3131dfa8b73SMark Johnston do_one_shared_shadow_inval(bool lazy_cow, size_t pagesize, size_t len,
3141dfa8b73SMark Johnston unsigned int flags)
31513d4f961SRyan Libby {
31613d4f961SRyan Libby struct shared_state *ss;
31713d4f961SRyan Libby pid_t pid;
3181dfa8b73SMark Johnston int status;
31913d4f961SRyan Libby
32013d4f961SRyan Libby pid = getpid();
32113d4f961SRyan Libby
32213d4f961SRyan Libby dprintf("P0 (pid %d) %s(collapse=%d, block_xfer=%d, full_mod=%d)\n",
32313d4f961SRyan Libby pid, __func__, (int)collapse, (int)block_xfer, (int)full_mod);
32413d4f961SRyan Libby
32513d4f961SRyan Libby ATF_REQUIRE(procctl(P_PID, pid, PROC_REAP_ACQUIRE, NULL) == 0);
32613d4f961SRyan Libby
32713d4f961SRyan Libby /* Shared memory for coordination. */
32813d4f961SRyan Libby ss = mmap(NULL, sizeof(*ss), PROT_READ | PROT_WRITE,
32913d4f961SRyan Libby MAP_SHARED | MAP_ANON, -1, 0);
33013d4f961SRyan Libby ATF_REQUIRE(ss != MAP_FAILED);
33113d4f961SRyan Libby
3321dfa8b73SMark Johnston ss->len = len;
3331dfa8b73SMark Johnston ss->modlen = (flags & FLAG_FULLMOD) ? ss->len : ss->len / 2;
3341dfa8b73SMark Johnston ss->pagesize = pagesize;
3351dfa8b73SMark Johnston ss->collapse = (flags & FLAG_COLLAPSE) != 0;
3361dfa8b73SMark Johnston ss->block_xfer = (flags & FLAG_BLOCK_XFER) != 0;
3371dfa8b73SMark Johnston ss->lazy_cow = lazy_cow;
33813d4f961SRyan Libby
33913d4f961SRyan Libby pid = fork();
34013d4f961SRyan Libby ATF_REQUIRE(pid != -1);
34113d4f961SRyan Libby if (pid == 0)
34213d4f961SRyan Libby child(ss, 1);
34313d4f961SRyan Libby
34413d4f961SRyan Libby /* Wait for all descendants to exit. */
34513d4f961SRyan Libby do {
3461dfa8b73SMark Johnston pid = wait(&status);
3471dfa8b73SMark Johnston ATF_REQUIRE(WIFEXITED(status));
34813d4f961SRyan Libby } while (pid != -1 || errno != ECHILD);
34913d4f961SRyan Libby
35013d4f961SRyan Libby atomic_thread_fence_acq();
35113d4f961SRyan Libby ATF_REQUIRE(ss->okay);
35213d4f961SRyan Libby
35313d4f961SRyan Libby ATF_REQUIRE(munmap(ss, sizeof(*ss)) == 0);
35413d4f961SRyan Libby ATF_REQUIRE(procctl(P_PID, getpid(), PROC_REAP_RELEASE, NULL) == 0);
35513d4f961SRyan Libby }
35613d4f961SRyan Libby
3571dfa8b73SMark Johnston static void
do_shared_shadow_inval(bool lazy_cow)3581dfa8b73SMark Johnston do_shared_shadow_inval(bool lazy_cow)
3591dfa8b73SMark Johnston {
3601dfa8b73SMark Johnston size_t largepagesize, pagesize, pagesizes[MAXPAGESIZES], sysctllen;
3611dfa8b73SMark Johnston
3621dfa8b73SMark Johnston sysctllen = sizeof(pagesizes);
3631dfa8b73SMark Johnston ATF_REQUIRE(sysctlbyname("hw.pagesizes", pagesizes, &sysctllen, NULL,
3641dfa8b73SMark Johnston 0) == 0);
3651dfa8b73SMark Johnston ATF_REQUIRE(sysctllen >= sizeof(size_t));
3661dfa8b73SMark Johnston
3671dfa8b73SMark Johnston pagesize = pagesizes[0];
3684b8feb5dSMark Johnston largepagesize = MAXPAGESIZES >= 2 &&
3694b8feb5dSMark Johnston sysctllen >= 2 * sizeof(size_t) && pagesizes[1] != 0 ?
3701dfa8b73SMark Johnston pagesizes[1] : 2 * 1024 * 1024;
3711dfa8b73SMark Johnston
3721dfa8b73SMark Johnston for (unsigned int i = 0; i <= FLAG_MASK; i++) {
3731dfa8b73SMark Johnston do_one_shared_shadow_inval(lazy_cow, pagesize,
3741dfa8b73SMark Johnston pagesize, i);
3751dfa8b73SMark Johnston do_one_shared_shadow_inval(lazy_cow, pagesize,
3761dfa8b73SMark Johnston 2 * pagesize, i);
3771dfa8b73SMark Johnston do_one_shared_shadow_inval(lazy_cow, pagesize,
3781dfa8b73SMark Johnston largepagesize - pagesize, i);
3791dfa8b73SMark Johnston do_one_shared_shadow_inval(lazy_cow, pagesize,
3801dfa8b73SMark Johnston largepagesize, i);
3811dfa8b73SMark Johnston do_one_shared_shadow_inval(lazy_cow, pagesize,
3821dfa8b73SMark Johnston largepagesize + pagesize, i);
3831dfa8b73SMark Johnston }
3841dfa8b73SMark Johnston }
3851dfa8b73SMark Johnston
3861dfa8b73SMark Johnston static void
do_shared_shadow_inval_eager(void)3871dfa8b73SMark Johnston do_shared_shadow_inval_eager(void)
3881dfa8b73SMark Johnston {
3891dfa8b73SMark Johnston struct rlimit rl;
3901dfa8b73SMark Johnston
3911dfa8b73SMark Johnston rl.rlim_cur = rl.rlim_max = RLIM_INFINITY;
3921dfa8b73SMark Johnston ATF_REQUIRE(setrlimit(RLIMIT_MEMLOCK, &rl) == 0);
3931dfa8b73SMark Johnston
3941dfa8b73SMark Johnston do_shared_shadow_inval(false);
3951dfa8b73SMark Johnston }
3961dfa8b73SMark Johnston
3971dfa8b73SMark Johnston static void
do_shared_shadow_inval_lazy(void)3981dfa8b73SMark Johnston do_shared_shadow_inval_lazy(void)
3991dfa8b73SMark Johnston {
4001dfa8b73SMark Johnston do_shared_shadow_inval(true);
4011dfa8b73SMark Johnston }
4021dfa8b73SMark Johnston
40313d4f961SRyan Libby #ifdef STANDALONE
40413d4f961SRyan Libby int
main(void)40513d4f961SRyan Libby main(void)
40613d4f961SRyan Libby {
4071dfa8b73SMark Johnston do_shared_shadow_inval_lazy();
4081dfa8b73SMark Johnston do_shared_shadow_inval_eager();
40913d4f961SRyan Libby printf("pass\n");
41013d4f961SRyan Libby }
41113d4f961SRyan Libby #else
4121dfa8b73SMark Johnston ATF_TC_WITHOUT_HEAD(shared_shadow_inval__lazy_cow);
ATF_TC_BODY(shared_shadow_inval__lazy_cow,tc)4131dfa8b73SMark Johnston ATF_TC_BODY(shared_shadow_inval__lazy_cow, tc)
4141dfa8b73SMark Johnston {
4151dfa8b73SMark Johnston do_shared_shadow_inval_lazy();
41613d4f961SRyan Libby }
41713d4f961SRyan Libby
4181dfa8b73SMark Johnston ATF_TC(shared_shadow_inval__eager_cow);
ATF_TC_HEAD(shared_shadow_inval__eager_cow,tc)4191dfa8b73SMark Johnston ATF_TC_HEAD(shared_shadow_inval__eager_cow, tc)
4201dfa8b73SMark Johnston {
4211dfa8b73SMark Johnston /* Needed to raise the mlock() limit. */
4221dfa8b73SMark Johnston atf_tc_set_md_var(tc, "require.user", "root");
4231dfa8b73SMark Johnston }
ATF_TC_BODY(shared_shadow_inval__eager_cow,tc)4241dfa8b73SMark Johnston ATF_TC_BODY(shared_shadow_inval__eager_cow, tc)
4251dfa8b73SMark Johnston {
4261dfa8b73SMark Johnston do_shared_shadow_inval_eager();
4271dfa8b73SMark Johnston }
42813d4f961SRyan Libby
ATF_TP_ADD_TCS(tp)42913d4f961SRyan Libby ATF_TP_ADD_TCS(tp)
43013d4f961SRyan Libby {
4311dfa8b73SMark Johnston ATF_TP_ADD_TC(tp, shared_shadow_inval__lazy_cow);
4321dfa8b73SMark Johnston ATF_TP_ADD_TC(tp, shared_shadow_inval__eager_cow);
4331dfa8b73SMark Johnston return (atf_no_error());
43413d4f961SRyan Libby }
4351dfa8b73SMark Johnston #endif /* !STANDALONE */
436