xref: /linux/lib/test_kprobes.c (revision 97733180fafbeb7cc3fd1c8be60d05980615f5d6)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * test_kprobes.c - simple sanity test for *probes
4  *
5  * Copyright IBM Corp. 2008
6  */
7 
8 #include <linux/kernel.h>
9 #include <linux/kprobes.h>
10 #include <linux/random.h>
11 #include <kunit/test.h>
12 
13 #define div_factor 3
14 
15 static u32 rand1, preh_val, posth_val;
16 static u32 (*target)(u32 value);
17 static u32 (*target2)(u32 value);
18 static struct kunit *current_test;
19 
20 static unsigned long (*internal_target)(void);
21 static unsigned long (*stacktrace_target)(void);
22 static unsigned long (*stacktrace_driver)(void);
23 static unsigned long target_return_address[2];
24 
25 static noinline u32 kprobe_target(u32 value)
26 {
27 	return (value / div_factor);
28 }
29 
30 static int kp_pre_handler(struct kprobe *p, struct pt_regs *regs)
31 {
32 	KUNIT_EXPECT_FALSE(current_test, preemptible());
33 	preh_val = (rand1 / div_factor);
34 	return 0;
35 }
36 
37 static void kp_post_handler(struct kprobe *p, struct pt_regs *regs,
38 		unsigned long flags)
39 {
40 	KUNIT_EXPECT_FALSE(current_test, preemptible());
41 	KUNIT_EXPECT_EQ(current_test, preh_val, (rand1 / div_factor));
42 	posth_val = preh_val + div_factor;
43 }
44 
45 static struct kprobe kp = {
46 	.symbol_name = "kprobe_target",
47 	.pre_handler = kp_pre_handler,
48 	.post_handler = kp_post_handler
49 };
50 
51 static void test_kprobe(struct kunit *test)
52 {
53 	current_test = test;
54 	KUNIT_EXPECT_EQ(test, 0, register_kprobe(&kp));
55 	target(rand1);
56 	unregister_kprobe(&kp);
57 	KUNIT_EXPECT_NE(test, 0, preh_val);
58 	KUNIT_EXPECT_NE(test, 0, posth_val);
59 }
60 
61 static noinline u32 kprobe_target2(u32 value)
62 {
63 	return (value / div_factor) + 1;
64 }
65 
66 static noinline unsigned long kprobe_stacktrace_internal_target(void)
67 {
68 	if (!target_return_address[0])
69 		target_return_address[0] = (unsigned long)__builtin_return_address(0);
70 	return target_return_address[0];
71 }
72 
73 static noinline unsigned long kprobe_stacktrace_target(void)
74 {
75 	if (!target_return_address[1])
76 		target_return_address[1] = (unsigned long)__builtin_return_address(0);
77 
78 	if (internal_target)
79 		internal_target();
80 
81 	return target_return_address[1];
82 }
83 
84 static noinline unsigned long kprobe_stacktrace_driver(void)
85 {
86 	if (stacktrace_target)
87 		stacktrace_target();
88 
89 	/* This is for preventing inlining the function */
90 	return (unsigned long)__builtin_return_address(0);
91 }
92 
93 static int kp_pre_handler2(struct kprobe *p, struct pt_regs *regs)
94 {
95 	preh_val = (rand1 / div_factor) + 1;
96 	return 0;
97 }
98 
99 static void kp_post_handler2(struct kprobe *p, struct pt_regs *regs,
100 		unsigned long flags)
101 {
102 	KUNIT_EXPECT_EQ(current_test, preh_val, (rand1 / div_factor) + 1);
103 	posth_val = preh_val + div_factor;
104 }
105 
106 static struct kprobe kp2 = {
107 	.symbol_name = "kprobe_target2",
108 	.pre_handler = kp_pre_handler2,
109 	.post_handler = kp_post_handler2
110 };
111 
112 static void test_kprobes(struct kunit *test)
113 {
114 	struct kprobe *kps[2] = {&kp, &kp2};
115 
116 	current_test = test;
117 
118 	/* addr and flags should be cleard for reusing kprobe. */
119 	kp.addr = NULL;
120 	kp.flags = 0;
121 
122 	KUNIT_EXPECT_EQ(test, 0, register_kprobes(kps, 2));
123 	preh_val = 0;
124 	posth_val = 0;
125 	target(rand1);
126 
127 	KUNIT_EXPECT_NE(test, 0, preh_val);
128 	KUNIT_EXPECT_NE(test, 0, posth_val);
129 
130 	preh_val = 0;
131 	posth_val = 0;
132 	target2(rand1);
133 
134 	KUNIT_EXPECT_NE(test, 0, preh_val);
135 	KUNIT_EXPECT_NE(test, 0, posth_val);
136 	unregister_kprobes(kps, 2);
137 }
138 
139 #ifdef CONFIG_KRETPROBES
140 static u32 krph_val;
141 
142 static int entry_handler(struct kretprobe_instance *ri, struct pt_regs *regs)
143 {
144 	KUNIT_EXPECT_FALSE(current_test, preemptible());
145 	krph_val = (rand1 / div_factor);
146 	return 0;
147 }
148 
149 static int return_handler(struct kretprobe_instance *ri, struct pt_regs *regs)
150 {
151 	unsigned long ret = regs_return_value(regs);
152 
153 	KUNIT_EXPECT_FALSE(current_test, preemptible());
154 	KUNIT_EXPECT_EQ(current_test, ret, rand1 / div_factor);
155 	KUNIT_EXPECT_NE(current_test, krph_val, 0);
156 	krph_val = rand1;
157 	return 0;
158 }
159 
160 static struct kretprobe rp = {
161 	.handler	= return_handler,
162 	.entry_handler  = entry_handler,
163 	.kp.symbol_name = "kprobe_target"
164 };
165 
166 static void test_kretprobe(struct kunit *test)
167 {
168 	current_test = test;
169 	KUNIT_EXPECT_EQ(test, 0, register_kretprobe(&rp));
170 	target(rand1);
171 	unregister_kretprobe(&rp);
172 	KUNIT_EXPECT_EQ(test, krph_val, rand1);
173 }
174 
175 static int return_handler2(struct kretprobe_instance *ri, struct pt_regs *regs)
176 {
177 	unsigned long ret = regs_return_value(regs);
178 
179 	KUNIT_EXPECT_EQ(current_test, ret, (rand1 / div_factor) + 1);
180 	KUNIT_EXPECT_NE(current_test, krph_val, 0);
181 	krph_val = rand1;
182 	return 0;
183 }
184 
185 static struct kretprobe rp2 = {
186 	.handler	= return_handler2,
187 	.entry_handler  = entry_handler,
188 	.kp.symbol_name = "kprobe_target2"
189 };
190 
191 static void test_kretprobes(struct kunit *test)
192 {
193 	struct kretprobe *rps[2] = {&rp, &rp2};
194 
195 	current_test = test;
196 	/* addr and flags should be cleard for reusing kprobe. */
197 	rp.kp.addr = NULL;
198 	rp.kp.flags = 0;
199 	KUNIT_EXPECT_EQ(test, 0, register_kretprobes(rps, 2));
200 
201 	krph_val = 0;
202 	target(rand1);
203 	KUNIT_EXPECT_EQ(test, krph_val, rand1);
204 
205 	krph_val = 0;
206 	target2(rand1);
207 	KUNIT_EXPECT_EQ(test, krph_val, rand1);
208 	unregister_kretprobes(rps, 2);
209 }
210 
211 #ifdef CONFIG_ARCH_CORRECT_STACKTRACE_ON_KRETPROBE
212 #define STACK_BUF_SIZE 16
213 static unsigned long stack_buf[STACK_BUF_SIZE];
214 
215 static int stacktrace_return_handler(struct kretprobe_instance *ri, struct pt_regs *regs)
216 {
217 	unsigned long retval = regs_return_value(regs);
218 	int i, ret;
219 
220 	KUNIT_EXPECT_FALSE(current_test, preemptible());
221 	KUNIT_EXPECT_EQ(current_test, retval, target_return_address[1]);
222 
223 	/*
224 	 * Test stacktrace inside the kretprobe handler, this will involves
225 	 * kretprobe trampoline, but must include correct return address
226 	 * of the target function.
227 	 */
228 	ret = stack_trace_save(stack_buf, STACK_BUF_SIZE, 0);
229 	KUNIT_EXPECT_NE(current_test, ret, 0);
230 
231 	for (i = 0; i < ret; i++) {
232 		if (stack_buf[i] == target_return_address[1])
233 			break;
234 	}
235 	KUNIT_EXPECT_NE(current_test, i, ret);
236 
237 #if !IS_MODULE(CONFIG_KPROBES_SANITY_TEST)
238 	/*
239 	 * Test stacktrace from pt_regs at the return address. Thus the stack
240 	 * trace must start from the target return address.
241 	 */
242 	ret = stack_trace_save_regs(regs, stack_buf, STACK_BUF_SIZE, 0);
243 	KUNIT_EXPECT_NE(current_test, ret, 0);
244 	KUNIT_EXPECT_EQ(current_test, stack_buf[0], target_return_address[1]);
245 #endif
246 
247 	return 0;
248 }
249 
250 static struct kretprobe rp3 = {
251 	.handler	= stacktrace_return_handler,
252 	.kp.symbol_name = "kprobe_stacktrace_target"
253 };
254 
255 static void test_stacktrace_on_kretprobe(struct kunit *test)
256 {
257 	unsigned long myretaddr = (unsigned long)__builtin_return_address(0);
258 
259 	current_test = test;
260 	rp3.kp.addr = NULL;
261 	rp3.kp.flags = 0;
262 
263 	/*
264 	 * Run the stacktrace_driver() to record correct return address in
265 	 * stacktrace_target() and ensure stacktrace_driver() call is not
266 	 * inlined by checking the return address of stacktrace_driver()
267 	 * and the return address of this function is different.
268 	 */
269 	KUNIT_ASSERT_NE(test, myretaddr, stacktrace_driver());
270 
271 	KUNIT_ASSERT_EQ(test, 0, register_kretprobe(&rp3));
272 	KUNIT_ASSERT_NE(test, myretaddr, stacktrace_driver());
273 	unregister_kretprobe(&rp3);
274 }
275 
276 static int stacktrace_internal_return_handler(struct kretprobe_instance *ri, struct pt_regs *regs)
277 {
278 	unsigned long retval = regs_return_value(regs);
279 	int i, ret;
280 
281 	KUNIT_EXPECT_FALSE(current_test, preemptible());
282 	KUNIT_EXPECT_EQ(current_test, retval, target_return_address[0]);
283 
284 	/*
285 	 * Test stacktrace inside the kretprobe handler for nested case.
286 	 * The unwinder will find the kretprobe_trampoline address on the
287 	 * return address, and kretprobe must solve that.
288 	 */
289 	ret = stack_trace_save(stack_buf, STACK_BUF_SIZE, 0);
290 	KUNIT_EXPECT_NE(current_test, ret, 0);
291 
292 	for (i = 0; i < ret - 1; i++) {
293 		if (stack_buf[i] == target_return_address[0]) {
294 			KUNIT_EXPECT_EQ(current_test, stack_buf[i + 1], target_return_address[1]);
295 			break;
296 		}
297 	}
298 	KUNIT_EXPECT_NE(current_test, i, ret);
299 
300 #if !IS_MODULE(CONFIG_KPROBES_SANITY_TEST)
301 	/* Ditto for the regs version. */
302 	ret = stack_trace_save_regs(regs, stack_buf, STACK_BUF_SIZE, 0);
303 	KUNIT_EXPECT_NE(current_test, ret, 0);
304 	KUNIT_EXPECT_EQ(current_test, stack_buf[0], target_return_address[0]);
305 	KUNIT_EXPECT_EQ(current_test, stack_buf[1], target_return_address[1]);
306 #endif
307 
308 	return 0;
309 }
310 
311 static struct kretprobe rp4 = {
312 	.handler	= stacktrace_internal_return_handler,
313 	.kp.symbol_name = "kprobe_stacktrace_internal_target"
314 };
315 
316 static void test_stacktrace_on_nested_kretprobe(struct kunit *test)
317 {
318 	unsigned long myretaddr = (unsigned long)__builtin_return_address(0);
319 	struct kretprobe *rps[2] = {&rp3, &rp4};
320 
321 	current_test = test;
322 	rp3.kp.addr = NULL;
323 	rp3.kp.flags = 0;
324 
325 	//KUNIT_ASSERT_NE(test, myretaddr, stacktrace_driver());
326 
327 	KUNIT_ASSERT_EQ(test, 0, register_kretprobes(rps, 2));
328 	KUNIT_ASSERT_NE(test, myretaddr, stacktrace_driver());
329 	unregister_kretprobes(rps, 2);
330 }
331 #endif /* CONFIG_ARCH_CORRECT_STACKTRACE_ON_KRETPROBE */
332 
333 #endif /* CONFIG_KRETPROBES */
334 
335 static int kprobes_test_init(struct kunit *test)
336 {
337 	target = kprobe_target;
338 	target2 = kprobe_target2;
339 	stacktrace_target = kprobe_stacktrace_target;
340 	internal_target = kprobe_stacktrace_internal_target;
341 	stacktrace_driver = kprobe_stacktrace_driver;
342 
343 	do {
344 		rand1 = prandom_u32();
345 	} while (rand1 <= div_factor);
346 	return 0;
347 }
348 
349 static struct kunit_case kprobes_testcases[] = {
350 	KUNIT_CASE(test_kprobe),
351 	KUNIT_CASE(test_kprobes),
352 #ifdef CONFIG_KRETPROBES
353 	KUNIT_CASE(test_kretprobe),
354 	KUNIT_CASE(test_kretprobes),
355 #ifdef CONFIG_ARCH_CORRECT_STACKTRACE_ON_KRETPROBE
356 	KUNIT_CASE(test_stacktrace_on_kretprobe),
357 	KUNIT_CASE(test_stacktrace_on_nested_kretprobe),
358 #endif
359 #endif
360 	{}
361 };
362 
363 static struct kunit_suite kprobes_test_suite = {
364 	.name = "kprobes_test",
365 	.init = kprobes_test_init,
366 	.test_cases = kprobes_testcases,
367 };
368 
369 kunit_test_suites(&kprobes_test_suite);
370 
371 MODULE_LICENSE("GPL");
372