xref: /linux/tools/testing/selftests/arm64/gcs/libc-gcs.c (revision 06bc7ff0a1e0f2b0102e1314e3527a7ec0997851)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (C) 2023 ARM Limited.
4  */
5 
6 #define _GNU_SOURCE
7 
8 #include <pthread.h>
9 #include <stdbool.h>
10 
11 #include <sys/auxv.h>
12 #include <sys/mman.h>
13 #include <sys/prctl.h>
14 #include <sys/ptrace.h>
15 #include <sys/uio.h>
16 
17 #include <asm/hwcap.h>
18 #include <asm/mman.h>
19 #include <asm/ptrace.h>
20 
21 #include <linux/compiler.h>
22 
23 #include "kselftest_harness.h"
24 
25 #include "gcs-util.h"
26 
27 #define my_syscall2(num, arg1, arg2)                                          \
28 ({                                                                            \
29 	register long _num  __asm__ ("x8") = (num);                           \
30 	register long _arg1 __asm__ ("x0") = (long)(arg1);                    \
31 	register long _arg2 __asm__ ("x1") = (long)(arg2);                    \
32 	register long _arg3 __asm__ ("x2") = 0;                               \
33 	register long _arg4 __asm__ ("x3") = 0;                               \
34 	register long _arg5 __asm__ ("x4") = 0;                               \
35 	                                                                      \
36 	__asm__  volatile (                                                   \
37 		"svc #0\n"                                                    \
38 		: "=r"(_arg1)                                                 \
39 		: "r"(_arg1), "r"(_arg2),                                     \
40 		  "r"(_arg3), "r"(_arg4),                                     \
41 		  "r"(_arg5), "r"(_num)					      \
42 		: "memory", "cc"                                              \
43 	);                                                                    \
44 	_arg1;                                                                \
45 })
46 
gcs_recurse(int depth)47 static noinline void gcs_recurse(int depth)
48 {
49 	if (depth)
50 		gcs_recurse(depth - 1);
51 
52 	/* Prevent tail call optimization so we actually recurse */
53 	asm volatile("dsb sy" : : : "memory");
54 }
55 
56 /* Smoke test that a function call and return works*/
TEST(can_call_function)57 TEST(can_call_function)
58 {
59 	gcs_recurse(0);
60 }
61 
gcs_test_thread(void * arg)62 static void *gcs_test_thread(void *arg)
63 {
64 	int ret;
65 	unsigned long mode;
66 
67 	/*
68 	 * Some libcs don't seem to fill unused arguments with 0 but
69 	 * the kernel validates this so we supply all 5 arguments.
70 	 */
71 	ret = prctl(PR_GET_SHADOW_STACK_STATUS, &mode, 0, 0, 0);
72 	if (ret != 0) {
73 		ksft_print_msg("PR_GET_SHADOW_STACK_STATUS failed: %d\n", ret);
74 		return NULL;
75 	}
76 
77 	if (!(mode & PR_SHADOW_STACK_ENABLE)) {
78 		ksft_print_msg("GCS not enabled in thread, mode is %lu\n",
79 			       mode);
80 		return NULL;
81 	}
82 
83 	/* Just in case... */
84 	gcs_recurse(0);
85 
86 	/* Use a non-NULL value to indicate a pass */
87 	return &gcs_test_thread;
88 }
89 
90 /* Verify that if we start a new thread it has GCS enabled */
TEST(gcs_enabled_thread)91 TEST(gcs_enabled_thread)
92 {
93 	pthread_t thread;
94 	void *thread_ret;
95 	int ret;
96 
97 	ret = pthread_create(&thread, NULL, gcs_test_thread, NULL);
98 	ASSERT_TRUE(ret == 0);
99 	if (ret != 0)
100 		return;
101 
102 	ret = pthread_join(thread, &thread_ret);
103 	ASSERT_TRUE(ret == 0);
104 	if (ret != 0)
105 		return;
106 
107 	ASSERT_TRUE(thread_ret != NULL);
108 }
109 
110 /* Read the GCS until we find the terminator */
TEST(gcs_find_terminator)111 TEST(gcs_find_terminator)
112 {
113 	unsigned long *gcs, *cur;
114 
115 	gcs = get_gcspr();
116 	cur = gcs;
117 	while (*cur)
118 		cur++;
119 
120 	ksft_print_msg("GCS in use from %p-%p\n", gcs, cur);
121 
122 	/*
123 	 * We should have at least whatever called into this test so
124 	 * the two pointer should differ.
125 	 */
126 	ASSERT_TRUE(gcs != cur);
127 }
128 
129 /*
130  * We can access a GCS via ptrace
131  *
132  * This could usefully have a fixture but note that each test is
133  * fork()ed into a new child whcih causes issues.  Might be better to
134  * lift at least some of this out into a separate, non-harness, test
135  * program.
136  */
TEST(ptrace_read_write)137 TEST(ptrace_read_write)
138 {
139 	pid_t child, pid;
140 	int ret, status;
141 	siginfo_t si;
142 	uint64_t val, rval, gcspr;
143 	struct user_gcs child_gcs;
144 	struct iovec iov, local_iov, remote_iov;
145 
146 	child = fork();
147 	if (child == -1) {
148 		ksft_print_msg("fork() failed: %d (%s)\n",
149 			       errno, strerror(errno));
150 		ASSERT_NE(child, -1);
151 	}
152 
153 	if (child == 0) {
154 		/*
155 		 * In child, make sure there's something on the stack and
156 		 * ask to be traced.
157 		 */
158 		gcs_recurse(0);
159 		if (ptrace(PTRACE_TRACEME, -1, NULL, NULL))
160 			ksft_exit_fail_msg("PTRACE_TRACEME %s",
161 					   strerror(errno));
162 
163 		if (raise(SIGSTOP))
164 			ksft_exit_fail_msg("raise(SIGSTOP) %s",
165 					   strerror(errno));
166 
167 		return;
168 	}
169 
170 	ksft_print_msg("Child: %d\n", child);
171 
172 	/* Attach to the child */
173 	while (1) {
174 		int sig;
175 
176 		pid = wait(&status);
177 		if (pid == -1) {
178 			ksft_print_msg("wait() failed: %s",
179 				       strerror(errno));
180 			goto error;
181 		}
182 
183 		/*
184 		 * This should never happen but it's hard to flag in
185 		 * the framework.
186 		 */
187 		if (pid != child)
188 			continue;
189 
190 		if (WIFEXITED(status) || WIFSIGNALED(status))
191 			ksft_exit_fail_msg("Child died unexpectedly\n");
192 
193 		if (!WIFSTOPPED(status))
194 			goto error;
195 
196 		sig = WSTOPSIG(status);
197 
198 		if (ptrace(PTRACE_GETSIGINFO, pid, NULL, &si)) {
199 			if (errno == ESRCH) {
200 				ASSERT_NE(errno, ESRCH);
201 				return;
202 			}
203 
204 			if (errno == EINVAL) {
205 				sig = 0; /* bust group-stop */
206 				goto cont;
207 			}
208 
209 			ksft_print_msg("PTRACE_GETSIGINFO: %s\n",
210 				       strerror(errno));
211 			goto error;
212 		}
213 
214 		if (sig == SIGSTOP && si.si_code == SI_TKILL &&
215 		    si.si_pid == pid)
216 			break;
217 
218 	cont:
219 		if (ptrace(PTRACE_CONT, pid, NULL, sig)) {
220 			if (errno == ESRCH) {
221 				ASSERT_NE(errno, ESRCH);
222 				return;
223 			}
224 
225 			ksft_print_msg("PTRACE_CONT: %s\n", strerror(errno));
226 			goto error;
227 		}
228 	}
229 
230 	/* Where is the child GCS? */
231 	iov.iov_base = &child_gcs;
232 	iov.iov_len = sizeof(child_gcs);
233 	ret = ptrace(PTRACE_GETREGSET, child, NT_ARM_GCS, &iov);
234 	if (ret != 0) {
235 		ksft_print_msg("Failed to read child GCS state: %s (%d)\n",
236 			       strerror(errno), errno);
237 		goto error;
238 	}
239 
240 	/* We should have inherited GCS over fork(), confirm */
241 	if (!(child_gcs.features_enabled & PR_SHADOW_STACK_ENABLE)) {
242 		ASSERT_TRUE(child_gcs.features_enabled &
243 			    PR_SHADOW_STACK_ENABLE);
244 		goto error;
245 	}
246 
247 	gcspr = child_gcs.gcspr_el0;
248 	ksft_print_msg("Child GCSPR 0x%lx, flags %llx, locked %llx\n",
249 		       gcspr, child_gcs.features_enabled,
250 		       child_gcs.features_locked);
251 
252 	/* Ideally we'd cross check with the child memory map */
253 
254 	errno = 0;
255 	val = ptrace(PTRACE_PEEKDATA, child, (void *)gcspr, NULL);
256 	ret = errno;
257 	if (ret != 0)
258 		ksft_print_msg("PTRACE_PEEKDATA failed: %s (%d)\n",
259 			       strerror(ret), ret);
260 	EXPECT_EQ(ret, 0);
261 
262 	/* The child should be in a function, the GCSPR shouldn't be 0 */
263 	EXPECT_NE(val, 0);
264 
265 	/* Same thing via process_vm_readv() */
266 	local_iov.iov_base = &rval;
267 	local_iov.iov_len = sizeof(rval);
268 	remote_iov.iov_base = (void *)gcspr;
269 	remote_iov.iov_len = sizeof(rval);
270 	ret = process_vm_readv(child, &local_iov, 1, &remote_iov, 1, 0);
271 	if (ret == -1)
272 		ksft_print_msg("process_vm_readv() failed: %s (%d)\n",
273 			       strerror(errno), errno);
274 	EXPECT_EQ(ret, sizeof(rval));
275 	EXPECT_EQ(val, rval);
276 
277 	/* Write data via a peek */
278 	ret = ptrace(PTRACE_POKEDATA, child, (void *)gcspr, NULL);
279 	if (ret == -1)
280 		ksft_print_msg("PTRACE_POKEDATA failed: %s (%d)\n",
281 			       strerror(errno), errno);
282 	EXPECT_EQ(ret, 0);
283 	EXPECT_EQ(0, ptrace(PTRACE_PEEKDATA, child, (void *)gcspr, NULL));
284 
285 	/* Restore what we had before */
286 	ret = ptrace(PTRACE_POKEDATA, child, (void *)gcspr, val);
287 	if (ret == -1)
288 		ksft_print_msg("PTRACE_POKEDATA failed: %s (%d)\n",
289 			       strerror(errno), errno);
290 	EXPECT_EQ(ret, 0);
291 	EXPECT_EQ(val, ptrace(PTRACE_PEEKDATA, child, (void *)gcspr, NULL));
292 
293 	/* That's all, folks */
294 	kill(child, SIGKILL);
295 	return;
296 
297 error:
298 	kill(child, SIGKILL);
299 	ASSERT_FALSE(true);
300 }
301 
FIXTURE(map_gcs)302 FIXTURE(map_gcs)
303 {
304 	unsigned long *stack;
305 };
306 
FIXTURE_VARIANT(map_gcs)307 FIXTURE_VARIANT(map_gcs)
308 {
309 	size_t stack_size;
310 	unsigned long flags;
311 };
312 
FIXTURE_VARIANT_ADD(map_gcs,s2k_cap_marker)313 FIXTURE_VARIANT_ADD(map_gcs, s2k_cap_marker)
314 {
315 	.stack_size = 2 * 1024,
316 	.flags = SHADOW_STACK_SET_MARKER | SHADOW_STACK_SET_TOKEN,
317 };
318 
FIXTURE_VARIANT_ADD(map_gcs,s2k_cap)319 FIXTURE_VARIANT_ADD(map_gcs, s2k_cap)
320 {
321 	.stack_size = 2 * 1024,
322 	.flags = SHADOW_STACK_SET_TOKEN,
323 };
324 
FIXTURE_VARIANT_ADD(map_gcs,s2k_marker)325 FIXTURE_VARIANT_ADD(map_gcs, s2k_marker)
326 {
327 	.stack_size = 2 * 1024,
328 	.flags = SHADOW_STACK_SET_MARKER,
329 };
330 
FIXTURE_VARIANT_ADD(map_gcs,s2k)331 FIXTURE_VARIANT_ADD(map_gcs, s2k)
332 {
333 	.stack_size = 2 * 1024,
334 	.flags = 0,
335 };
336 
FIXTURE_VARIANT_ADD(map_gcs,s4k_cap_marker)337 FIXTURE_VARIANT_ADD(map_gcs, s4k_cap_marker)
338 {
339 	.stack_size = 4 * 1024,
340 	.flags = SHADOW_STACK_SET_MARKER | SHADOW_STACK_SET_TOKEN,
341 };
342 
FIXTURE_VARIANT_ADD(map_gcs,s4k_cap)343 FIXTURE_VARIANT_ADD(map_gcs, s4k_cap)
344 {
345 	.stack_size = 4 * 1024,
346 	.flags = SHADOW_STACK_SET_TOKEN,
347 };
348 
FIXTURE_VARIANT_ADD(map_gcs,s3k_marker)349 FIXTURE_VARIANT_ADD(map_gcs, s3k_marker)
350 {
351 	.stack_size = 4 * 1024,
352 	.flags = SHADOW_STACK_SET_MARKER,
353 };
354 
FIXTURE_VARIANT_ADD(map_gcs,s4k)355 FIXTURE_VARIANT_ADD(map_gcs, s4k)
356 {
357 	.stack_size = 4 * 1024,
358 	.flags = 0,
359 };
360 
FIXTURE_VARIANT_ADD(map_gcs,s16k_cap_marker)361 FIXTURE_VARIANT_ADD(map_gcs, s16k_cap_marker)
362 {
363 	.stack_size = 16 * 1024,
364 	.flags = SHADOW_STACK_SET_MARKER | SHADOW_STACK_SET_TOKEN,
365 };
366 
FIXTURE_VARIANT_ADD(map_gcs,s16k_cap)367 FIXTURE_VARIANT_ADD(map_gcs, s16k_cap)
368 {
369 	.stack_size = 16 * 1024,
370 	.flags = SHADOW_STACK_SET_TOKEN,
371 };
372 
FIXTURE_VARIANT_ADD(map_gcs,s16k_marker)373 FIXTURE_VARIANT_ADD(map_gcs, s16k_marker)
374 {
375 	.stack_size = 16 * 1024,
376 	.flags = SHADOW_STACK_SET_MARKER,
377 };
378 
FIXTURE_VARIANT_ADD(map_gcs,s16k)379 FIXTURE_VARIANT_ADD(map_gcs, s16k)
380 {
381 	.stack_size = 16 * 1024,
382 	.flags = 0,
383 };
384 
FIXTURE_VARIANT_ADD(map_gcs,s64k_cap_marker)385 FIXTURE_VARIANT_ADD(map_gcs, s64k_cap_marker)
386 {
387 	.stack_size = 64 * 1024,
388 	.flags = SHADOW_STACK_SET_MARKER | SHADOW_STACK_SET_TOKEN,
389 };
390 
FIXTURE_VARIANT_ADD(map_gcs,s64k_cap)391 FIXTURE_VARIANT_ADD(map_gcs, s64k_cap)
392 {
393 	.stack_size = 64 * 1024,
394 	.flags = SHADOW_STACK_SET_TOKEN,
395 };
396 
FIXTURE_VARIANT_ADD(map_gcs,s64k_marker)397 FIXTURE_VARIANT_ADD(map_gcs, s64k_marker)
398 {
399 	.stack_size = 64 * 1024,
400 	.flags = SHADOW_STACK_SET_MARKER,
401 };
402 
FIXTURE_VARIANT_ADD(map_gcs,s64k)403 FIXTURE_VARIANT_ADD(map_gcs, s64k)
404 {
405 	.stack_size = 64 * 1024,
406 	.flags = 0,
407 };
408 
FIXTURE_VARIANT_ADD(map_gcs,s128k_cap_marker)409 FIXTURE_VARIANT_ADD(map_gcs, s128k_cap_marker)
410 {
411 	.stack_size = 128 * 1024,
412 	.flags = SHADOW_STACK_SET_MARKER | SHADOW_STACK_SET_TOKEN,
413 };
414 
FIXTURE_VARIANT_ADD(map_gcs,s128k_cap)415 FIXTURE_VARIANT_ADD(map_gcs, s128k_cap)
416 {
417 	.stack_size = 128 * 1024,
418 	.flags = SHADOW_STACK_SET_TOKEN,
419 };
420 
FIXTURE_VARIANT_ADD(map_gcs,s128k_marker)421 FIXTURE_VARIANT_ADD(map_gcs, s128k_marker)
422 {
423 	.stack_size = 128 * 1024,
424 	.flags = SHADOW_STACK_SET_MARKER,
425 };
426 
FIXTURE_VARIANT_ADD(map_gcs,s128k)427 FIXTURE_VARIANT_ADD(map_gcs, s128k)
428 {
429 	.stack_size = 128 * 1024,
430 	.flags = 0,
431 };
432 
FIXTURE_SETUP(map_gcs)433 FIXTURE_SETUP(map_gcs)
434 {
435 	self->stack = (void *)syscall(__NR_map_shadow_stack, 0,
436 				      variant->stack_size,
437 				      variant->flags);
438 	ASSERT_FALSE(self->stack == MAP_FAILED);
439 	ksft_print_msg("Allocated stack from %p-%p\n", self->stack,
440 		       self->stack + variant->stack_size);
441 }
442 
FIXTURE_TEARDOWN(map_gcs)443 FIXTURE_TEARDOWN(map_gcs)
444 {
445 	int ret;
446 
447 	if (self->stack != MAP_FAILED) {
448 		ret = munmap(self->stack, variant->stack_size);
449 		ASSERT_EQ(ret, 0);
450 	}
451 }
452 
453 /* The stack has a cap token */
TEST_F(map_gcs,stack_capped)454 TEST_F(map_gcs, stack_capped)
455 {
456 	unsigned long *stack = self->stack;
457 	size_t cap_index;
458 
459 	cap_index = (variant->stack_size / sizeof(unsigned long));
460 
461 	switch (variant->flags & (SHADOW_STACK_SET_MARKER | SHADOW_STACK_SET_TOKEN)) {
462 	case SHADOW_STACK_SET_MARKER | SHADOW_STACK_SET_TOKEN:
463 		cap_index -= 2;
464 		break;
465 	case SHADOW_STACK_SET_TOKEN:
466 		cap_index -= 1;
467 		break;
468 	case SHADOW_STACK_SET_MARKER:
469 	case 0:
470 		/* No cap, no test */
471 		return;
472 	}
473 
474 	ASSERT_EQ(stack[cap_index], GCS_CAP(&stack[cap_index]));
475 }
476 
477 /* The top of the stack is 0 */
TEST_F(map_gcs,stack_terminated)478 TEST_F(map_gcs, stack_terminated)
479 {
480 	unsigned long *stack = self->stack;
481 	size_t term_index;
482 
483 	if (!(variant->flags & SHADOW_STACK_SET_MARKER))
484 		return;
485 
486 	term_index = (variant->stack_size / sizeof(unsigned long)) - 1;
487 
488 	ASSERT_EQ(stack[term_index], 0);
489 }
490 
491 /* Writes should fault */
TEST_F_SIGNAL(map_gcs,not_writeable,SIGSEGV)492 TEST_F_SIGNAL(map_gcs, not_writeable, SIGSEGV)
493 {
494 	self->stack[0] = 0;
495 }
496 
497 /* Put it all together, we can safely switch to and from the stack */
TEST_F(map_gcs,stack_switch)498 TEST_F(map_gcs, stack_switch)
499 {
500 	size_t cap_index;
501 	cap_index = (variant->stack_size / sizeof(unsigned long));
502 	unsigned long *orig_gcspr_el0, *pivot_gcspr_el0;
503 
504 	/* Skip over the stack terminator and point at the cap */
505 	switch (variant->flags & (SHADOW_STACK_SET_MARKER | SHADOW_STACK_SET_TOKEN)) {
506 	case SHADOW_STACK_SET_MARKER | SHADOW_STACK_SET_TOKEN:
507 		cap_index -= 2;
508 		break;
509 	case SHADOW_STACK_SET_TOKEN:
510 		cap_index -= 1;
511 		break;
512 	case SHADOW_STACK_SET_MARKER:
513 	case 0:
514 		/* No cap, no test */
515 		return;
516 	}
517 	pivot_gcspr_el0 = &self->stack[cap_index];
518 
519 	/* Pivot to the new GCS */
520 	ksft_print_msg("Pivoting to %p from %p, target has value 0x%lx\n",
521 		       pivot_gcspr_el0, get_gcspr(),
522 		       *pivot_gcspr_el0);
523 	gcsss1(pivot_gcspr_el0);
524 	orig_gcspr_el0 = gcsss2();
525 	ksft_print_msg("Pivoted to %p from %p, target has value 0x%lx\n",
526 		       get_gcspr(), orig_gcspr_el0,
527 		       *pivot_gcspr_el0);
528 
529 	ksft_print_msg("Pivoted, GCSPR_EL0 now %p\n", get_gcspr());
530 
531 	/* New GCS must be in the new buffer */
532 	ASSERT_TRUE((unsigned long)get_gcspr() > (unsigned long)self->stack);
533 	ASSERT_TRUE((unsigned long)get_gcspr() <=
534 		    (unsigned long)self->stack + variant->stack_size);
535 
536 	/* We should be able to use all but 2 slots of the new stack */
537 	ksft_print_msg("Recursing %zu levels\n", cap_index - 1);
538 	gcs_recurse(cap_index - 1);
539 
540 	/* Pivot back to the original GCS */
541 	gcsss1(orig_gcspr_el0);
542 	pivot_gcspr_el0 = gcsss2();
543 
544 	gcs_recurse(0);
545 	ksft_print_msg("Pivoted back to GCSPR_EL0 0x%p\n", get_gcspr());
546 }
547 
548 /* We fault if we try to go beyond the end of the stack */
TEST_F_SIGNAL(map_gcs,stack_overflow,SIGSEGV)549 TEST_F_SIGNAL(map_gcs, stack_overflow, SIGSEGV)
550 {
551 	size_t cap_index;
552 	cap_index = (variant->stack_size / sizeof(unsigned long));
553 	unsigned long *orig_gcspr_el0, *pivot_gcspr_el0;
554 
555 	/* Skip over the stack terminator and point at the cap */
556 	switch (variant->flags & (SHADOW_STACK_SET_MARKER | SHADOW_STACK_SET_TOKEN)) {
557 	case SHADOW_STACK_SET_MARKER | SHADOW_STACK_SET_TOKEN:
558 		cap_index -= 2;
559 		break;
560 	case SHADOW_STACK_SET_TOKEN:
561 		cap_index -= 1;
562 		break;
563 	case SHADOW_STACK_SET_MARKER:
564 	case 0:
565 		/* No cap, no test but we need to SEGV to avoid a false fail */
566 		orig_gcspr_el0 = get_gcspr();
567 		*orig_gcspr_el0 = 0;
568 		return;
569 	}
570 	pivot_gcspr_el0 = &self->stack[cap_index];
571 
572 	/* Pivot to the new GCS */
573 	ksft_print_msg("Pivoting to %p from %p, target has value 0x%lx\n",
574 		       pivot_gcspr_el0, get_gcspr(),
575 		       *pivot_gcspr_el0);
576 	gcsss1(pivot_gcspr_el0);
577 	orig_gcspr_el0 = gcsss2();
578 	ksft_print_msg("Pivoted to %p from %p, target has value 0x%lx\n",
579 		       pivot_gcspr_el0, orig_gcspr_el0,
580 		       *pivot_gcspr_el0);
581 
582 	ksft_print_msg("Pivoted, GCSPR_EL0 now %p\n", get_gcspr());
583 
584 	/* New GCS must be in the new buffer */
585 	ASSERT_TRUE((unsigned long)get_gcspr() > (unsigned long)self->stack);
586 	ASSERT_TRUE((unsigned long)get_gcspr() <=
587 		    (unsigned long)self->stack + variant->stack_size);
588 
589 	/* Now try to recurse, we should fault doing this. */
590 	ksft_print_msg("Recursing %zu levels...\n", cap_index + 1);
591 	gcs_recurse(cap_index + 1);
592 	ksft_print_msg("...done\n");
593 
594 	/* Clean up properly to try to guard against spurious passes. */
595 	gcsss1(orig_gcspr_el0);
596 	pivot_gcspr_el0 = gcsss2();
597 	ksft_print_msg("Pivoted back to GCSPR_EL0 0x%p\n", get_gcspr());
598 }
599 
FIXTURE(map_invalid_gcs)600 FIXTURE(map_invalid_gcs)
601 {
602 };
603 
FIXTURE_VARIANT(map_invalid_gcs)604 FIXTURE_VARIANT(map_invalid_gcs)
605 {
606 	size_t stack_size;
607 };
608 
FIXTURE_SETUP(map_invalid_gcs)609 FIXTURE_SETUP(map_invalid_gcs)
610 {
611 }
612 
FIXTURE_TEARDOWN(map_invalid_gcs)613 FIXTURE_TEARDOWN(map_invalid_gcs)
614 {
615 }
616 
617 /* GCS must be larger than 16 bytes */
FIXTURE_VARIANT_ADD(map_invalid_gcs,too_small)618 FIXTURE_VARIANT_ADD(map_invalid_gcs, too_small)
619 {
620 	.stack_size = 8,
621 };
622 
623 /* GCS size must be 16 byte aligned */
FIXTURE_VARIANT_ADD(map_invalid_gcs,unligned_1)624 FIXTURE_VARIANT_ADD(map_invalid_gcs, unligned_1)  { .stack_size = 1024 + 1  };
FIXTURE_VARIANT_ADD(map_invalid_gcs,unligned_2)625 FIXTURE_VARIANT_ADD(map_invalid_gcs, unligned_2)  { .stack_size = 1024 + 2  };
FIXTURE_VARIANT_ADD(map_invalid_gcs,unligned_3)626 FIXTURE_VARIANT_ADD(map_invalid_gcs, unligned_3)  { .stack_size = 1024 + 3  };
FIXTURE_VARIANT_ADD(map_invalid_gcs,unligned_4)627 FIXTURE_VARIANT_ADD(map_invalid_gcs, unligned_4)  { .stack_size = 1024 + 4  };
FIXTURE_VARIANT_ADD(map_invalid_gcs,unligned_5)628 FIXTURE_VARIANT_ADD(map_invalid_gcs, unligned_5)  { .stack_size = 1024 + 5  };
FIXTURE_VARIANT_ADD(map_invalid_gcs,unligned_6)629 FIXTURE_VARIANT_ADD(map_invalid_gcs, unligned_6)  { .stack_size = 1024 + 6  };
FIXTURE_VARIANT_ADD(map_invalid_gcs,unligned_7)630 FIXTURE_VARIANT_ADD(map_invalid_gcs, unligned_7)  { .stack_size = 1024 + 7  };
631 
TEST_F(map_invalid_gcs,do_map)632 TEST_F(map_invalid_gcs, do_map)
633 {
634 	void *stack;
635 
636 	stack = (void *)syscall(__NR_map_shadow_stack, 0,
637 				variant->stack_size, 0);
638 	ASSERT_TRUE(stack == MAP_FAILED);
639 	if (stack != MAP_FAILED)
640 		munmap(stack, variant->stack_size);
641 }
642 
FIXTURE(invalid_mprotect)643 FIXTURE(invalid_mprotect)
644 {
645 	unsigned long *stack;
646 	size_t stack_size;
647 };
648 
FIXTURE_VARIANT(invalid_mprotect)649 FIXTURE_VARIANT(invalid_mprotect)
650 {
651 	unsigned long flags;
652 };
653 
FIXTURE_SETUP(invalid_mprotect)654 FIXTURE_SETUP(invalid_mprotect)
655 {
656 	self->stack_size = sysconf(_SC_PAGE_SIZE);
657 	self->stack = (void *)syscall(__NR_map_shadow_stack, 0,
658 				      self->stack_size, 0);
659 	ASSERT_FALSE(self->stack == MAP_FAILED);
660 	ksft_print_msg("Allocated stack from %p-%p\n", self->stack,
661 		       self->stack + self->stack_size);
662 }
663 
FIXTURE_TEARDOWN(invalid_mprotect)664 FIXTURE_TEARDOWN(invalid_mprotect)
665 {
666 	int ret;
667 
668 	if (self->stack != MAP_FAILED) {
669 		ret = munmap(self->stack, self->stack_size);
670 		ASSERT_EQ(ret, 0);
671 	}
672 }
673 
FIXTURE_VARIANT_ADD(invalid_mprotect,exec)674 FIXTURE_VARIANT_ADD(invalid_mprotect, exec)
675 {
676 	.flags = PROT_EXEC,
677 };
678 
TEST_F(invalid_mprotect,do_map)679 TEST_F(invalid_mprotect, do_map)
680 {
681 	int ret;
682 
683 	ret = mprotect(self->stack, self->stack_size, variant->flags);
684 	ASSERT_EQ(ret, -1);
685 }
686 
TEST_F(invalid_mprotect,do_map_read)687 TEST_F(invalid_mprotect, do_map_read)
688 {
689 	int ret;
690 
691 	ret = mprotect(self->stack, self->stack_size,
692 		       variant->flags | PROT_READ);
693 	ASSERT_EQ(ret, -1);
694 }
695 
main(int argc,char ** argv)696 int main(int argc, char **argv)
697 {
698 	unsigned long gcs_mode;
699 	int ret;
700 
701 	if (!(getauxval(AT_HWCAP) & HWCAP_GCS))
702 		ksft_exit_skip("SKIP GCS not supported\n");
703 
704 	/*
705 	 * Force shadow stacks on, our tests *should* be fine with or
706 	 * without libc support and with or without this having ended
707 	 * up tagged for GCS and enabled by the dynamic linker.  We
708 	 * can't use the libc prctl() function since we can't return
709 	 * from enabling the stack.
710 	 */
711 	ret = my_syscall2(__NR_prctl, PR_GET_SHADOW_STACK_STATUS, &gcs_mode);
712 	if (ret) {
713 		ksft_print_msg("Failed to read GCS state: %d\n", ret);
714 		return EXIT_FAILURE;
715 	}
716 
717 	if (!(gcs_mode & PR_SHADOW_STACK_ENABLE)) {
718 		gcs_mode = PR_SHADOW_STACK_ENABLE;
719 		ret = my_syscall2(__NR_prctl, PR_SET_SHADOW_STACK_STATUS,
720 				  gcs_mode);
721 		if (ret) {
722 			ksft_print_msg("Failed to configure GCS: %d\n", ret);
723 			return EXIT_FAILURE;
724 		}
725 	}
726 
727 	/* Avoid returning in case libc doesn't understand GCS */
728 	exit(test_harness_run(argc, argv));
729 }
730