xref: /freebsd/tests/sys/vm/shared_shadow_inval_test.c (revision bc5304a006238115291e7568583632889dffbab9)
1 /*
2  * Copyright (c) 2021 Dell Inc. or its subsidiaries. All Rights Reserved.
3  *
4  * Redistribution and use in source and binary forms, with or without
5  * modification, are permitted provided that the following conditions
6  * are met:
7  * 1. Redistributions of source code must retain the above copyright
8  *    notice, this list of conditions and the following disclaimer.
9  * 2. Redistributions in binary form must reproduce the above copyright
10  *    notice, this list of conditions and the following disclaimer in the
11  *    documentation and/or other materials provided with the distribution.
12  *
13  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
14  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
16  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
17  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
18  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
19  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
20  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
21  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
22  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
23  * SUCH DAMAGE.
24  */
25 
26 /*
27  * Test behavior when a mapping of a shared shadow vm object is
28  * invalidated by COW from another mapping.
29  *
30  * This is a regression test for an issue isolated by rlibby@FreeBSD.org
31  * from an issue detected by stress2's collapse.sh by jeff@FreeBSD.org.
32  * The issue became CVE-2021-29626.
33  *
34  * This file is written as an ATF test suite but may be compiled as a
35  * standalone program with -DSTANDALONE (and optionally -DDEBUG).
36  */
37 
38 #include <sys/types.h>
39 #include <sys/mman.h>
40 #include <sys/procctl.h>
41 #include <sys/wait.h>
42 #include <machine/atomic.h>
43 
44 #include <err.h>
45 #include <errno.h>
46 #include <stdbool.h>
47 #include <stddef.h>
48 #include <stdio.h>
49 #include <stdlib.h>
50 #include <unistd.h>
51 
52 #ifdef	STANDALONE
53 #define	ATF_REQUIRE(x)	do {		\
54 	if (!(x))			\
55 		errx(1, "%s", #x);	\
56 } while (0)
57 #else
58 #include <atf-c.h>
59 #endif
60 
61 #ifdef	DEBUG
62 #define	dprintf(...)	printf(__VA_ARGS__)
63 #else
64 #define	dprintf(...)
65 #endif
66 
67 #define	DEPTH	5
68 
69 struct shared_state {
70 	void *p;
71 	size_t len;
72 	size_t modlen;
73 	bool collapse;
74 	bool block_xfer;
75 	bool okay;
76 	volatile bool exiting[DEPTH];
77 	volatile bool exit;
78 	volatile bool p3_did_write;
79 };
80 
81 static long g_pagesize;
82 
83 /*
84  * Program flow.  There are three or four processes that are descendants
85  * of the process running the test (P0), where arrows go from parents to
86  * children, and thicker arrows indicate sharing a certain memory region
87  * without COW semantics:
88  *     P0 -> P1 -> P2 => P3
89  *             \=> P4
90  * The main idea is that P1 maps a memory region, and that region is
91  * shared with P2/P3, but with COW semantics.  When P3 modifies the
92  * memory, P2 ought to see that modification.  P4 optionally exists to
93  * defeat a COW optimization.
94  */
95 
96 #define	child_err(...)	do {						\
97 	ss->exit = true;						\
98 	err(1, __VA_ARGS__);						\
99 } while (0)
100 
101 #define	child_errx(...)	do {						\
102 	ss->exit = true;						\
103 	errx(1, __VA_ARGS__);						\
104 } while (0)
105 
106 #define	SLEEP_TIME_US	1000
107 
108 static void child(struct shared_state *ss, int depth);
109 
110 static pid_t
111 child_fork(struct shared_state *ss, int depth)
112 {
113 	pid_t pid = fork();
114 	if (pid == -1)
115 		child_err("fork");
116 	else if (pid == 0)
117 		child(ss, depth);
118 	return pid;
119 }
120 
121 static void
122 child_fault(struct shared_state *ss)
123 {
124 	size_t i;
125 
126 	for (i = 0; i < ss->len; i += g_pagesize)
127 		(void)((volatile char *)ss->p)[i];
128 }
129 
130 static void
131 child_write(struct shared_state *ss, int val, size_t len)
132 {
133 	size_t i;
134 
135 	for (i = 0; i < len; i += g_pagesize)
136 		((int *)ss->p)[i / sizeof(int)] = val;
137 	atomic_thread_fence_rel();
138 }
139 
140 static void
141 child_wait_p3_write(struct shared_state *ss)
142 {
143 	while (!ss->p3_did_write) {
144 		if (ss->exit)
145 			exit(1);
146 		usleep(SLEEP_TIME_US);
147 	}
148 	atomic_thread_fence_acq();
149 }
150 
151 static void
152 child_verify(struct shared_state *ss, int depth, int newval, int oldval)
153 {
154 	size_t i;
155 	int expectval, foundval;
156 
157 	for (i = 0; i < ss->len; i += g_pagesize) {
158 		expectval = i < ss->modlen ? newval : oldval;
159 		foundval = ((int *)ss->p)[i / sizeof(int)];
160 		if (foundval == expectval)
161 			continue;
162 		child_errx("P%d saw %d but expected %d, %d was the old value",
163 		    depth, foundval, expectval, oldval);
164 	}
165 }
166 
167 static void
168 child(struct shared_state *ss, int depth)
169 {
170 	pid_t mypid, oldval, pid;
171 
172 	if (depth < 1 || depth >= DEPTH)
173 		child_errx("Bad depth %d", depth);
174 	mypid = getpid();
175 	dprintf("P%d (pid %d) started\n", depth, mypid);
176 	switch (depth) {
177 	case 1:
178 		/* Shared memory undergoing test. */
179 		ss->p = mmap(NULL, ss->len, PROT_READ | PROT_WRITE,
180 		    MAP_SHARED | MAP_ANON, -1, 0);
181 		if (ss->p == MAP_FAILED)
182 			child_err("mmap");
183 		/* P1 stamps the shared memory. */
184 		child_write(ss, mypid, ss->len);
185 		if (ss->block_xfer) {
186 			/*
187 			 * P4 is forked so that its existence blocks a page COW
188 			 * path where the page is simply transferred between
189 			 * objects, rather than being copied.
190 			 */
191 			child_fork(ss, 4);
192 		}
193 		/*
194 		 * P1 specifies that modifications from its child processes not
195 		 * be shared with P1.  Child process reads can be serviced from
196 		 * pages in P1's object, but writes must be COW'd.
197 		 */
198 		if (minherit(ss->p, ss->len, INHERIT_COPY) != 0)
199 			child_err("minherit");
200 		/* Fork P2. */
201 		child_fork(ss, depth + 1);
202 		/* P1 and P4 wait for P3's writes before exiting. */
203 		child_wait_p3_write(ss);
204 		child_verify(ss, depth, mypid, mypid);
205 		if (!ss->collapse) {
206 			/* Hang around to prevent collapse. */
207 			while (!ss->exit)
208 				usleep(SLEEP_TIME_US);
209 		}
210 		/* Exit so the P2 -> P1/P4 shadow chain can collapse. */
211 		break;
212 	case 2:
213 		/*
214 		 * P2 now specifies that modifications from its child processes
215 		 * be shared.  P2 and P3 will share a shadow object.
216 		 */
217 		if (minherit(ss->p, ss->len, INHERIT_SHARE) != 0)
218 			child_err("minherit");
219 		/*
220 		 * P2 faults a page in P1's object before P1 exits and the
221 		 * shadow chain is collapsed.
222 		 */
223 		child_fault(ss);
224 		oldval = atomic_load_acq_int(ss->p);
225 		/* Fork P3. */
226 		pid = child_fork(ss, depth + 1);
227 		if (ss->collapse) {
228 			/* Wait for P1 and P4 to exit, triggering collapse. */
229 			while (!ss->exiting[1] ||
230 			    (ss->block_xfer && !ss->exiting[4]))
231 				usleep(SLEEP_TIME_US);
232 			/*
233 			 * This is racy, just guess at how long it may take
234 			 * them to finish exiting.
235 			 */
236 			usleep(100 * 1000);
237 		}
238 		/* P2 waits for P3's modification. */
239 		child_wait_p3_write(ss);
240 		child_verify(ss, depth, pid, oldval);
241 		ss->okay = true;
242 		ss->exit = true;
243 		break;
244 	case 3:
245 		/*
246 		 * P3 writes the memory.  A page is faulted into the shared
247 		 * P2/P3 shadow object.  P2's mapping of the page in P1's
248 		 * object must now be shot down, or else P2 will wrongly
249 		 * continue to have that page mapped.
250 		 */
251 		child_write(ss, mypid, ss->modlen);
252 		ss->p3_did_write = true;
253 		dprintf("P3 (pid %d) wrote its pid\n", mypid);
254 		break;
255 	case 4:
256 		/* Just hang around until P3 is done writing. */
257 		oldval = atomic_load_acq_int(ss->p);
258 		child_wait_p3_write(ss);
259 		child_verify(ss, depth, oldval, oldval);
260 		break;
261 	default:
262 		child_errx("Bad depth %d", depth);
263 	}
264 
265 	dprintf("P%d (pid %d) exiting\n", depth, mypid);
266 	ss->exiting[depth] = true;
267 	exit(0);
268 }
269 
270 static void
271 do_shared_shadow_inval(bool collapse, bool block_xfer, bool full_mod)
272 {
273 	struct shared_state *ss;
274 	pid_t pid;
275 
276 	pid = getpid();
277 
278 	dprintf("P0 (pid %d) %s(collapse=%d, block_xfer=%d, full_mod=%d)\n",
279 	    pid, __func__, (int)collapse, (int)block_xfer, (int)full_mod);
280 
281 	g_pagesize = sysconf(_SC_PAGESIZE);
282 	ATF_REQUIRE(g_pagesize > 0);
283 
284 	ATF_REQUIRE(procctl(P_PID, pid, PROC_REAP_ACQUIRE, NULL) == 0);
285 
286 	/* Shared memory for coordination. */
287 	ss = mmap(NULL, sizeof(*ss), PROT_READ | PROT_WRITE,
288 	    MAP_SHARED | MAP_ANON, -1, 0);
289 	ATF_REQUIRE(ss != MAP_FAILED);
290 
291 	ss->len = 2 * 1024 * 1024 + g_pagesize; /* 2 MB + page size */
292 	ss->modlen = full_mod ? ss->len : ss->len / 2;
293 	ss->collapse = collapse;
294 	ss->block_xfer = block_xfer;
295 
296 	pid = fork();
297 	ATF_REQUIRE(pid != -1);
298 	if (pid == 0)
299 		child(ss, 1);
300 
301 	/* Wait for all descendants to exit. */
302 	do {
303 		pid = wait(NULL);
304 	} while (pid != -1 || errno != ECHILD);
305 
306 	atomic_thread_fence_acq();
307 	ATF_REQUIRE(ss->okay);
308 
309 	ATF_REQUIRE(munmap(ss, sizeof(*ss)) == 0);
310 	ATF_REQUIRE(procctl(P_PID, getpid(), PROC_REAP_RELEASE, NULL) == 0);
311 }
312 
313 #ifdef STANDALONE
314 int
315 main(void)
316 {
317 
318 	do_shared_shadow_inval(false, false, false);
319 	do_shared_shadow_inval(false, false, true);
320 	do_shared_shadow_inval(false, true, false);
321 	do_shared_shadow_inval(false, true, true);
322 	do_shared_shadow_inval(true, false, false);
323 	do_shared_shadow_inval(true, false, true);
324 	do_shared_shadow_inval(true, true, false);
325 	do_shared_shadow_inval(true, true, true);
326 	printf("pass\n");
327 }
328 #else
329 
330 #define SHARED_SHADOW_INVAL_TC(suffix, collapse, block_xfer, full_mod)	\
331 ATF_TC_WITHOUT_HEAD(shared_shadow_inval__##suffix);			\
332 ATF_TC_BODY(shared_shadow_inval__##suffix, tc)				\
333 {									\
334 	do_shared_shadow_inval(collapse, block_xfer, full_mod);		\
335 }
336 
337 SHARED_SHADOW_INVAL_TC(nocollapse_noblockxfer_nofullmod, false, false, false);
338 SHARED_SHADOW_INVAL_TC(nocollapse_noblockxfer_fullmod, false, false, true);
339 SHARED_SHADOW_INVAL_TC(nocollapse_blockxfer_nofullmod, false, true, false);
340 SHARED_SHADOW_INVAL_TC(nocollapse_blockxfer_fullmod, false, true, true);
341 SHARED_SHADOW_INVAL_TC(collapse_noblockxfer_nofullmod, true, false, false);
342 SHARED_SHADOW_INVAL_TC(collapse_noblockxfer_fullmod, true, false, true);
343 SHARED_SHADOW_INVAL_TC(collapse_blockxfer_nofullmod, true, true, false);
344 SHARED_SHADOW_INVAL_TC(collapse_blockxfer_fullmod, true, true, true);
345 
346 ATF_TP_ADD_TCS(tp)
347 {
348 	ATF_TP_ADD_TC(tp,
349 	    shared_shadow_inval__nocollapse_noblockxfer_nofullmod);
350 	ATF_TP_ADD_TC(tp, shared_shadow_inval__nocollapse_noblockxfer_fullmod);
351 	ATF_TP_ADD_TC(tp, shared_shadow_inval__nocollapse_blockxfer_nofullmod);
352 	ATF_TP_ADD_TC(tp, shared_shadow_inval__nocollapse_blockxfer_fullmod);
353 	ATF_TP_ADD_TC(tp, shared_shadow_inval__collapse_noblockxfer_nofullmod);
354 	ATF_TP_ADD_TC(tp, shared_shadow_inval__collapse_noblockxfer_fullmod);
355 	ATF_TP_ADD_TC(tp, shared_shadow_inval__collapse_blockxfer_nofullmod);
356 	ATF_TP_ADD_TC(tp, shared_shadow_inval__collapse_blockxfer_fullmod);
357 
358 	return atf_no_error();
359 }
360 #endif
361