xref: /linux/drivers/misc/lkdtm/cfi.c (revision fc6dfd5547794b0bf10790576a9d97443d975439)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * This is for all the tests relating directly to Control Flow Integrity.
4  */
5 #include "lkdtm.h"
6 #include <asm/page.h>
7 
8 static int called_count;
9 
10 /* Function taking one argument, without a return value. */
11 static noinline void lkdtm_increment_void(int *counter)
12 {
13 	(*counter)++;
14 }
15 
16 /* Function taking one argument, returning int. */
17 static noinline int lkdtm_increment_int(int *counter)
18 {
19 	(*counter)++;
20 
21 	return *counter;
22 }
23 
24 /* Don't allow the compiler to inline the calls. */
25 static noinline void lkdtm_indirect_call(void (*func)(int *))
26 {
27 	func(&called_count);
28 }
29 
30 /*
31  * This tries to call an indirect function with a mismatched prototype.
32  */
33 static void lkdtm_CFI_FORWARD_PROTO(void)
34 {
35 	/*
36 	 * Matches lkdtm_increment_void()'s prototype, but not
37 	 * lkdtm_increment_int()'s prototype.
38 	 */
39 	pr_info("Calling matched prototype ...\n");
40 	lkdtm_indirect_call(lkdtm_increment_void);
41 
42 	pr_info("Calling mismatched prototype ...\n");
43 	lkdtm_indirect_call((void *)lkdtm_increment_int);
44 
45 	pr_err("FAIL: survived mismatched prototype function call!\n");
46 	pr_expected_config(CONFIG_CFI_CLANG);
47 }
48 
49 /*
50  * This can stay local to LKDTM, as there should not be a production reason
51  * to disable PAC && SCS.
52  */
53 #ifdef CONFIG_ARM64_PTR_AUTH_KERNEL
54 # ifdef CONFIG_ARM64_BTI_KERNEL
55 #  define __no_pac             "branch-protection=bti"
56 # else
57 #  define __no_pac             "branch-protection=none"
58 # endif
59 # define __no_ret_protection   __noscs __attribute__((__target__(__no_pac)))
60 #else
61 # define __no_ret_protection   __noscs
62 #endif
63 
64 #define no_pac_addr(addr)      \
65 	((__force __typeof__(addr))((uintptr_t)(addr) | PAGE_OFFSET))
66 
67 /* The ultimate ROP gadget. */
68 static noinline __no_ret_protection
69 void set_return_addr_unchecked(unsigned long *expected, unsigned long *addr)
70 {
71 	/* Use of volatile is to make sure final write isn't seen as a dead store. */
72 	unsigned long * volatile *ret_addr = (unsigned long **)__builtin_frame_address(0) + 1;
73 
74 	/* Make sure we've found the right place on the stack before writing it. */
75 	if (no_pac_addr(*ret_addr) == expected)
76 		*ret_addr = (addr);
77 	else
78 		/* Check architecture, stack layout, or compiler behavior... */
79 		pr_warn("Eek: return address mismatch! %px != %px\n",
80 			*ret_addr, addr);
81 }
82 
83 static noinline
84 void set_return_addr(unsigned long *expected, unsigned long *addr)
85 {
86 	/* Use of volatile is to make sure final write isn't seen as a dead store. */
87 	unsigned long * volatile *ret_addr = (unsigned long **)__builtin_frame_address(0) + 1;
88 
89 	/* Make sure we've found the right place on the stack before writing it. */
90 	if (no_pac_addr(*ret_addr) == expected)
91 		*ret_addr = (addr);
92 	else
93 		/* Check architecture, stack layout, or compiler behavior... */
94 		pr_warn("Eek: return address mismatch! %px != %px\n",
95 			*ret_addr, addr);
96 }
97 
98 static volatile int force_check;
99 
100 static void lkdtm_CFI_BACKWARD(void)
101 {
102 	/* Use calculated gotos to keep labels addressable. */
103 	void *labels[] = { NULL, &&normal, &&redirected, &&check_normal, &&check_redirected };
104 
105 	pr_info("Attempting unchecked stack return address redirection ...\n");
106 
107 	/* Always false */
108 	if (force_check) {
109 		/*
110 		 * Prepare to call with NULLs to avoid parameters being treated as
111 		 * constants in -02.
112 		 */
113 		set_return_addr_unchecked(NULL, NULL);
114 		set_return_addr(NULL, NULL);
115 		if (force_check)
116 			goto *labels[1];
117 		if (force_check)
118 			goto *labels[2];
119 		if (force_check)
120 			goto *labels[3];
121 		if (force_check)
122 			goto *labels[4];
123 		return;
124 	}
125 
126 	/*
127 	 * Use fallthrough switch case to keep basic block ordering between
128 	 * set_return_addr*() and the label after it.
129 	 */
130 	switch (force_check) {
131 	case 0:
132 		set_return_addr_unchecked(&&normal, &&redirected);
133 		fallthrough;
134 	case 1:
135 normal:
136 		/* Always true */
137 		if (!force_check) {
138 			pr_err("FAIL: stack return address manipulation failed!\n");
139 			/* If we can't redirect "normally", we can't test mitigations. */
140 			return;
141 		}
142 		break;
143 	default:
144 redirected:
145 		pr_info("ok: redirected stack return address.\n");
146 		break;
147 	}
148 
149 	pr_info("Attempting checked stack return address redirection ...\n");
150 
151 	switch (force_check) {
152 	case 0:
153 		set_return_addr(&&check_normal, &&check_redirected);
154 		fallthrough;
155 	case 1:
156 check_normal:
157 		/* Always true */
158 		if (!force_check) {
159 			pr_info("ok: control flow unchanged.\n");
160 			return;
161 		}
162 
163 check_redirected:
164 		pr_err("FAIL: stack return address was redirected!\n");
165 		break;
166 	}
167 
168 	if (IS_ENABLED(CONFIG_ARM64_PTR_AUTH_KERNEL)) {
169 		pr_expected_config(CONFIG_ARM64_PTR_AUTH_KERNEL);
170 		return;
171 	}
172 	if (IS_ENABLED(CONFIG_SHADOW_CALL_STACK)) {
173 		pr_expected_config(CONFIG_SHADOW_CALL_STACK);
174 		return;
175 	}
176 	pr_warn("This is probably expected, since this %s was built *without* %s=y nor %s=y\n",
177 		lkdtm_kernel_info,
178 		"CONFIG_ARM64_PTR_AUTH_KERNEL", "CONFIG_SHADOW_CALL_STACK");
179 }
180 
181 static struct crashtype crashtypes[] = {
182 	CRASHTYPE(CFI_FORWARD_PROTO),
183 	CRASHTYPE(CFI_BACKWARD),
184 };
185 
186 struct crashtype_category cfi_crashtypes = {
187 	.crashtypes = crashtypes,
188 	.len	    = ARRAY_SIZE(crashtypes),
189 };
190