1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3 * test_kprobes.c - simple sanity test for k*probes
4 *
5 * Copyright IBM Corp. 2008
6 */
7
8 #include <linux/kernel.h>
9 #include <linux/kprobes.h>
10 #include <linux/random.h>
11 #include <kunit/test.h>
12
13 #define div_factor 3
14
15 #define KP_CLEAR(_kp) \
16 do { \
17 (_kp).addr = NULL; \
18 (_kp).flags = 0; \
19 } while (0)
20
21 static u32 rand1, preh_val, posth_val;
22 static u32 (*target)(u32 value);
23 static u32 (*recursed_target)(u32 value);
24 static u32 (*target2)(u32 value);
25 static struct kunit *current_test;
26
27 static unsigned long (*internal_target)(void);
28 static unsigned long (*stacktrace_target)(void);
29 static unsigned long (*stacktrace_driver)(void);
30 static unsigned long target_return_address[2];
31
kprobe_target(u32 value)32 static noinline u32 kprobe_target(u32 value)
33 {
34 return (value / div_factor);
35 }
36
kprobe_recursed_target(u32 value)37 static noinline u32 kprobe_recursed_target(u32 value)
38 {
39 return (value / div_factor);
40 }
41
kp_pre_handler(struct kprobe * p,struct pt_regs * regs)42 static int kp_pre_handler(struct kprobe *p, struct pt_regs *regs)
43 {
44 KUNIT_EXPECT_FALSE(current_test, preemptible());
45
46 preh_val = recursed_target(rand1);
47 return 0;
48 }
49
kp_post_handler(struct kprobe * p,struct pt_regs * regs,unsigned long flags)50 static void kp_post_handler(struct kprobe *p, struct pt_regs *regs,
51 unsigned long flags)
52 {
53 u32 expval = recursed_target(rand1);
54
55 KUNIT_EXPECT_FALSE(current_test, preemptible());
56 KUNIT_EXPECT_EQ(current_test, preh_val, expval);
57
58 posth_val = preh_val + div_factor;
59 }
60
61 static struct kprobe kp = {
62 .symbol_name = "kprobe_target",
63 .pre_handler = kp_pre_handler,
64 .post_handler = kp_post_handler
65 };
66
test_kprobe(struct kunit * test)67 static void test_kprobe(struct kunit *test)
68 {
69 current_test = test;
70 KUNIT_EXPECT_EQ(test, 0, register_kprobe(&kp));
71 target(rand1);
72 unregister_kprobe(&kp);
73 KUNIT_EXPECT_NE(test, 0, preh_val);
74 KUNIT_EXPECT_NE(test, 0, posth_val);
75 }
76
kprobe_target2(u32 value)77 static noinline u32 kprobe_target2(u32 value)
78 {
79 return (value / div_factor) + 1;
80 }
81
kprobe_stacktrace_internal_target(void)82 static noinline unsigned long kprobe_stacktrace_internal_target(void)
83 {
84 if (!target_return_address[0])
85 target_return_address[0] = (unsigned long)__builtin_return_address(0);
86 return target_return_address[0];
87 }
88
kprobe_stacktrace_target(void)89 static noinline unsigned long kprobe_stacktrace_target(void)
90 {
91 if (!target_return_address[1])
92 target_return_address[1] = (unsigned long)__builtin_return_address(0);
93
94 if (internal_target)
95 internal_target();
96
97 return target_return_address[1];
98 }
99
kprobe_stacktrace_driver(void)100 static noinline unsigned long kprobe_stacktrace_driver(void)
101 {
102 if (stacktrace_target)
103 stacktrace_target();
104
105 /* This is for preventing inlining the function */
106 return (unsigned long)__builtin_return_address(0);
107 }
108
kp_pre_handler2(struct kprobe * p,struct pt_regs * regs)109 static int kp_pre_handler2(struct kprobe *p, struct pt_regs *regs)
110 {
111 preh_val = (rand1 / div_factor) + 1;
112 return 0;
113 }
114
kp_post_handler2(struct kprobe * p,struct pt_regs * regs,unsigned long flags)115 static void kp_post_handler2(struct kprobe *p, struct pt_regs *regs,
116 unsigned long flags)
117 {
118 KUNIT_EXPECT_EQ(current_test, preh_val, (rand1 / div_factor) + 1);
119 posth_val = preh_val + div_factor;
120 }
121
122 static struct kprobe kp2 = {
123 .symbol_name = "kprobe_target2",
124 .pre_handler = kp_pre_handler2,
125 .post_handler = kp_post_handler2
126 };
127
test_kprobes(struct kunit * test)128 static void test_kprobes(struct kunit *test)
129 {
130 struct kprobe *kps[2] = {&kp, &kp2};
131
132 current_test = test;
133
134 KUNIT_EXPECT_EQ(test, 0, register_kprobes(kps, 2));
135 preh_val = 0;
136 posth_val = 0;
137 target(rand1);
138
139 KUNIT_EXPECT_NE(test, 0, preh_val);
140 KUNIT_EXPECT_NE(test, 0, posth_val);
141
142 preh_val = 0;
143 posth_val = 0;
144 target2(rand1);
145
146 KUNIT_EXPECT_NE(test, 0, preh_val);
147 KUNIT_EXPECT_NE(test, 0, posth_val);
148 unregister_kprobes(kps, 2);
149 }
150
151 static struct kprobe kp_missed = {
152 .symbol_name = "kprobe_recursed_target",
153 .pre_handler = kp_pre_handler,
154 .post_handler = kp_post_handler,
155 };
156
test_kprobe_missed(struct kunit * test)157 static void test_kprobe_missed(struct kunit *test)
158 {
159 current_test = test;
160 preh_val = 0;
161 posth_val = 0;
162
163 KUNIT_EXPECT_EQ(test, 0, register_kprobe(&kp_missed));
164
165 recursed_target(rand1);
166
167 KUNIT_EXPECT_EQ(test, 2, kp_missed.nmissed);
168 KUNIT_EXPECT_NE(test, 0, preh_val);
169 KUNIT_EXPECT_NE(test, 0, posth_val);
170
171 unregister_kprobe(&kp_missed);
172 }
173
174 #ifdef CONFIG_KRETPROBES
175 static u32 krph_val;
176
entry_handler(struct kretprobe_instance * ri,struct pt_regs * regs)177 static int entry_handler(struct kretprobe_instance *ri, struct pt_regs *regs)
178 {
179 KUNIT_EXPECT_FALSE(current_test, preemptible());
180 krph_val = (rand1 / div_factor);
181 return 0;
182 }
183
return_handler(struct kretprobe_instance * ri,struct pt_regs * regs)184 static int return_handler(struct kretprobe_instance *ri, struct pt_regs *regs)
185 {
186 unsigned long ret = regs_return_value(regs);
187
188 KUNIT_EXPECT_FALSE(current_test, preemptible());
189 KUNIT_EXPECT_EQ(current_test, ret, rand1 / div_factor);
190 KUNIT_EXPECT_NE(current_test, krph_val, 0);
191 krph_val = rand1;
192 return 0;
193 }
194
195 static struct kretprobe rp = {
196 .handler = return_handler,
197 .entry_handler = entry_handler,
198 .kp.symbol_name = "kprobe_target"
199 };
200
test_kretprobe(struct kunit * test)201 static void test_kretprobe(struct kunit *test)
202 {
203 current_test = test;
204 KUNIT_EXPECT_EQ(test, 0, register_kretprobe(&rp));
205 target(rand1);
206 unregister_kretprobe(&rp);
207 KUNIT_EXPECT_EQ(test, krph_val, rand1);
208 }
209
return_handler2(struct kretprobe_instance * ri,struct pt_regs * regs)210 static int return_handler2(struct kretprobe_instance *ri, struct pt_regs *regs)
211 {
212 unsigned long ret = regs_return_value(regs);
213
214 KUNIT_EXPECT_EQ(current_test, ret, (rand1 / div_factor) + 1);
215 KUNIT_EXPECT_NE(current_test, krph_val, 0);
216 krph_val = rand1;
217 return 0;
218 }
219
220 static struct kretprobe rp2 = {
221 .handler = return_handler2,
222 .entry_handler = entry_handler,
223 .kp.symbol_name = "kprobe_target2"
224 };
225
test_kretprobes(struct kunit * test)226 static void test_kretprobes(struct kunit *test)
227 {
228 struct kretprobe *rps[2] = {&rp, &rp2};
229
230 current_test = test;
231 KUNIT_EXPECT_EQ(test, 0, register_kretprobes(rps, 2));
232
233 krph_val = 0;
234 target(rand1);
235 KUNIT_EXPECT_EQ(test, krph_val, rand1);
236
237 krph_val = 0;
238 target2(rand1);
239 KUNIT_EXPECT_EQ(test, krph_val, rand1);
240 unregister_kretprobes(rps, 2);
241 }
242
243 #ifdef CONFIG_ARCH_CORRECT_STACKTRACE_ON_KRETPROBE
244 #define STACK_BUF_SIZE 16
245 static unsigned long stack_buf[STACK_BUF_SIZE];
246
stacktrace_return_handler(struct kretprobe_instance * ri,struct pt_regs * regs)247 static int stacktrace_return_handler(struct kretprobe_instance *ri, struct pt_regs *regs)
248 {
249 unsigned long retval = regs_return_value(regs);
250 int i, ret;
251
252 KUNIT_EXPECT_FALSE(current_test, preemptible());
253 KUNIT_EXPECT_EQ(current_test, retval, target_return_address[1]);
254
255 /*
256 * Test stacktrace inside the kretprobe handler, this will involves
257 * kretprobe trampoline, but must include correct return address
258 * of the target function.
259 */
260 ret = stack_trace_save(stack_buf, STACK_BUF_SIZE, 0);
261 KUNIT_EXPECT_NE(current_test, ret, 0);
262
263 for (i = 0; i < ret; i++) {
264 if (stack_buf[i] == target_return_address[1])
265 break;
266 }
267 KUNIT_EXPECT_NE(current_test, i, ret);
268
269 #if !IS_MODULE(CONFIG_KPROBES_SANITY_TEST)
270 /*
271 * Test stacktrace from pt_regs at the return address. Thus the stack
272 * trace must start from the target return address.
273 */
274 ret = stack_trace_save_regs(regs, stack_buf, STACK_BUF_SIZE, 0);
275 KUNIT_EXPECT_NE(current_test, ret, 0);
276 KUNIT_EXPECT_EQ(current_test, stack_buf[0], target_return_address[1]);
277 #endif
278
279 return 0;
280 }
281
282 static struct kretprobe rp3 = {
283 .handler = stacktrace_return_handler,
284 .kp.symbol_name = "kprobe_stacktrace_target"
285 };
286
test_stacktrace_on_kretprobe(struct kunit * test)287 static void test_stacktrace_on_kretprobe(struct kunit *test)
288 {
289 unsigned long myretaddr = (unsigned long)__builtin_return_address(0);
290
291 current_test = test;
292
293 /*
294 * Run the stacktrace_driver() to record correct return address in
295 * stacktrace_target() and ensure stacktrace_driver() call is not
296 * inlined by checking the return address of stacktrace_driver()
297 * and the return address of this function is different.
298 */
299 KUNIT_ASSERT_NE(test, myretaddr, stacktrace_driver());
300
301 KUNIT_ASSERT_EQ(test, 0, register_kretprobe(&rp3));
302 KUNIT_ASSERT_NE(test, myretaddr, stacktrace_driver());
303 unregister_kretprobe(&rp3);
304 }
305
stacktrace_internal_return_handler(struct kretprobe_instance * ri,struct pt_regs * regs)306 static int stacktrace_internal_return_handler(struct kretprobe_instance *ri, struct pt_regs *regs)
307 {
308 unsigned long retval = regs_return_value(regs);
309 int i, ret;
310
311 KUNIT_EXPECT_FALSE(current_test, preemptible());
312 KUNIT_EXPECT_EQ(current_test, retval, target_return_address[0]);
313
314 /*
315 * Test stacktrace inside the kretprobe handler for nested case.
316 * The unwinder will find the kretprobe_trampoline address on the
317 * return address, and kretprobe must solve that.
318 */
319 ret = stack_trace_save(stack_buf, STACK_BUF_SIZE, 0);
320 KUNIT_EXPECT_NE(current_test, ret, 0);
321
322 for (i = 0; i < ret - 1; i++) {
323 if (stack_buf[i] == target_return_address[0]) {
324 KUNIT_EXPECT_EQ(current_test, stack_buf[i + 1], target_return_address[1]);
325 break;
326 }
327 }
328 KUNIT_EXPECT_NE(current_test, i, ret);
329
330 #if !IS_MODULE(CONFIG_KPROBES_SANITY_TEST)
331 /* Ditto for the regs version. */
332 ret = stack_trace_save_regs(regs, stack_buf, STACK_BUF_SIZE, 0);
333 KUNIT_EXPECT_NE(current_test, ret, 0);
334 KUNIT_EXPECT_EQ(current_test, stack_buf[0], target_return_address[0]);
335 KUNIT_EXPECT_EQ(current_test, stack_buf[1], target_return_address[1]);
336 #endif
337
338 return 0;
339 }
340
341 static struct kretprobe rp4 = {
342 .handler = stacktrace_internal_return_handler,
343 .kp.symbol_name = "kprobe_stacktrace_internal_target"
344 };
345
test_stacktrace_on_nested_kretprobe(struct kunit * test)346 static void test_stacktrace_on_nested_kretprobe(struct kunit *test)
347 {
348 unsigned long myretaddr = (unsigned long)__builtin_return_address(0);
349 struct kretprobe *rps[2] = {&rp3, &rp4};
350
351 current_test = test;
352
353 //KUNIT_ASSERT_NE(test, myretaddr, stacktrace_driver());
354
355 KUNIT_ASSERT_EQ(test, 0, register_kretprobes(rps, 2));
356 KUNIT_ASSERT_NE(test, myretaddr, stacktrace_driver());
357 unregister_kretprobes(rps, 2);
358 }
359 #endif /* CONFIG_ARCH_CORRECT_STACKTRACE_ON_KRETPROBE */
360
361 #endif /* CONFIG_KRETPROBES */
362
kprobes_test_init(struct kunit * test)363 static int kprobes_test_init(struct kunit *test)
364 {
365 KP_CLEAR(kp);
366 KP_CLEAR(kp2);
367 KP_CLEAR(kp_missed);
368 #ifdef CONFIG_KRETPROBES
369 KP_CLEAR(rp.kp);
370 KP_CLEAR(rp2.kp);
371 #ifdef CONFIG_ARCH_CORRECT_STACKTRACE_ON_KRETPROBE
372 KP_CLEAR(rp3.kp);
373 KP_CLEAR(rp4.kp);
374 #endif
375 #endif
376
377 target = kprobe_target;
378 target2 = kprobe_target2;
379 recursed_target = kprobe_recursed_target;
380 stacktrace_target = kprobe_stacktrace_target;
381 internal_target = kprobe_stacktrace_internal_target;
382 stacktrace_driver = kprobe_stacktrace_driver;
383 rand1 = get_random_u32_above(div_factor);
384 return 0;
385 }
386
387 static struct kunit_case kprobes_testcases[] = {
388 KUNIT_CASE(test_kprobe),
389 KUNIT_CASE(test_kprobes),
390 KUNIT_CASE(test_kprobe_missed),
391 #ifdef CONFIG_KRETPROBES
392 KUNIT_CASE(test_kretprobe),
393 KUNIT_CASE(test_kretprobes),
394 #ifdef CONFIG_ARCH_CORRECT_STACKTRACE_ON_KRETPROBE
395 KUNIT_CASE(test_stacktrace_on_kretprobe),
396 KUNIT_CASE(test_stacktrace_on_nested_kretprobe),
397 #endif
398 #endif
399 {}
400 };
401
402 static struct kunit_suite kprobes_test_suite = {
403 .name = "kprobes_test",
404 .init = kprobes_test_init,
405 .test_cases = kprobes_testcases,
406 };
407
408 kunit_test_suites(&kprobes_test_suite);
409
410 MODULE_DESCRIPTION("simple sanity test for k*probes");
411 MODULE_LICENSE("GPL");
412