xref: /linux/drivers/misc/lkdtm/cfi.c (revision 23c48a124b469cee2eb0c75e6d22d366d1caa118)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * This is for all the tests relating directly to Control Flow Integrity.
4  */
5 #include "lkdtm.h"
6 #include <asm/page.h>
7 
8 static int called_count;
9 
10 /* Function taking one argument, without a return value. */
11 static noinline void lkdtm_increment_void(int *counter)
12 {
13 	(*counter)++;
14 }
15 
16 /* Function taking one argument, returning int. */
17 static noinline int lkdtm_increment_int(int *counter)
18 {
19 	(*counter)++;
20 
21 	return *counter;
22 }
23 /*
24  * This tries to call an indirect function with a mismatched prototype.
25  */
26 static void lkdtm_CFI_FORWARD_PROTO(void)
27 {
28 	/*
29 	 * Matches lkdtm_increment_void()'s prototype, but not
30 	 * lkdtm_increment_int()'s prototype.
31 	 */
32 	void (*func)(int *);
33 
34 	pr_info("Calling matched prototype ...\n");
35 	func = lkdtm_increment_void;
36 	func(&called_count);
37 
38 	pr_info("Calling mismatched prototype ...\n");
39 	func = (void *)lkdtm_increment_int;
40 	func(&called_count);
41 
42 	pr_err("FAIL: survived mismatched prototype function call!\n");
43 	pr_expected_config(CONFIG_CFI_CLANG);
44 }
45 
46 /*
47  * This can stay local to LKDTM, as there should not be a production reason
48  * to disable PAC && SCS.
49  */
50 #ifdef CONFIG_ARM64_PTR_AUTH_KERNEL
51 # ifdef CONFIG_ARM64_BTI_KERNEL
52 #  define __no_pac             "branch-protection=bti"
53 # else
54 #  define __no_pac             "branch-protection=none"
55 # endif
56 # define __no_ret_protection   __noscs __attribute__((__target__(__no_pac)))
57 #else
58 # define __no_ret_protection   __noscs
59 #endif
60 
61 #define no_pac_addr(addr)      \
62 	((__force __typeof__(addr))((uintptr_t)(addr) | PAGE_OFFSET))
63 
64 /* The ultimate ROP gadget. */
65 static noinline __no_ret_protection
66 void set_return_addr_unchecked(unsigned long *expected, unsigned long *addr)
67 {
68 	/* Use of volatile is to make sure final write isn't seen as a dead store. */
69 	unsigned long * volatile *ret_addr = (unsigned long **)__builtin_frame_address(0) + 1;
70 
71 	/* Make sure we've found the right place on the stack before writing it. */
72 	if (no_pac_addr(*ret_addr) == expected)
73 		*ret_addr = (addr);
74 	else
75 		/* Check architecture, stack layout, or compiler behavior... */
76 		pr_warn("Eek: return address mismatch! %px != %px\n",
77 			*ret_addr, addr);
78 }
79 
80 static noinline
81 void set_return_addr(unsigned long *expected, unsigned long *addr)
82 {
83 	/* Use of volatile is to make sure final write isn't seen as a dead store. */
84 	unsigned long * volatile *ret_addr = (unsigned long **)__builtin_frame_address(0) + 1;
85 
86 	/* Make sure we've found the right place on the stack before writing it. */
87 	if (no_pac_addr(*ret_addr) == expected)
88 		*ret_addr = (addr);
89 	else
90 		/* Check architecture, stack layout, or compiler behavior... */
91 		pr_warn("Eek: return address mismatch! %px != %px\n",
92 			*ret_addr, addr);
93 }
94 
95 static volatile int force_check;
96 
97 static void lkdtm_CFI_BACKWARD(void)
98 {
99 	/* Use calculated gotos to keep labels addressable. */
100 	void *labels[] = { NULL, &&normal, &&redirected, &&check_normal, &&check_redirected };
101 
102 	pr_info("Attempting unchecked stack return address redirection ...\n");
103 
104 	/* Always false */
105 	if (force_check) {
106 		/*
107 		 * Prepare to call with NULLs to avoid parameters being treated as
108 		 * constants in -02.
109 		 */
110 		set_return_addr_unchecked(NULL, NULL);
111 		set_return_addr(NULL, NULL);
112 		if (force_check)
113 			goto *labels[1];
114 		if (force_check)
115 			goto *labels[2];
116 		if (force_check)
117 			goto *labels[3];
118 		if (force_check)
119 			goto *labels[4];
120 		return;
121 	}
122 
123 	/*
124 	 * Use fallthrough switch case to keep basic block ordering between
125 	 * set_return_addr*() and the label after it.
126 	 */
127 	switch (force_check) {
128 	case 0:
129 		set_return_addr_unchecked(&&normal, &&redirected);
130 		fallthrough;
131 	case 1:
132 normal:
133 		/* Always true */
134 		if (!force_check) {
135 			pr_err("FAIL: stack return address manipulation failed!\n");
136 			/* If we can't redirect "normally", we can't test mitigations. */
137 			return;
138 		}
139 		break;
140 	default:
141 redirected:
142 		pr_info("ok: redirected stack return address.\n");
143 		break;
144 	}
145 
146 	pr_info("Attempting checked stack return address redirection ...\n");
147 
148 	switch (force_check) {
149 	case 0:
150 		set_return_addr(&&check_normal, &&check_redirected);
151 		fallthrough;
152 	case 1:
153 check_normal:
154 		/* Always true */
155 		if (!force_check) {
156 			pr_info("ok: control flow unchanged.\n");
157 			return;
158 		}
159 
160 check_redirected:
161 		pr_err("FAIL: stack return address was redirected!\n");
162 		break;
163 	}
164 
165 	if (IS_ENABLED(CONFIG_ARM64_PTR_AUTH_KERNEL)) {
166 		pr_expected_config(CONFIG_ARM64_PTR_AUTH_KERNEL);
167 		return;
168 	}
169 	if (IS_ENABLED(CONFIG_SHADOW_CALL_STACK)) {
170 		pr_expected_config(CONFIG_SHADOW_CALL_STACK);
171 		return;
172 	}
173 	pr_warn("This is probably expected, since this %s was built *without* %s=y nor %s=y\n",
174 		lkdtm_kernel_info,
175 		"CONFIG_ARM64_PTR_AUTH_KERNEL", "CONFIG_SHADOW_CALL_STACK");
176 }
177 
178 static struct crashtype crashtypes[] = {
179 	CRASHTYPE(CFI_FORWARD_PROTO),
180 	CRASHTYPE(CFI_BACKWARD),
181 };
182 
183 struct crashtype_category cfi_crashtypes = {
184 	.crashtypes = crashtypes,
185 	.len	    = ARRAY_SIZE(crashtypes),
186 };
187