xref: /linux/tools/testing/selftests/bpf/prog_tests/attach_probe.c (revision ae28ed4578e6d5a481e39c5a9827f27048661fdd)
1 // SPDX-License-Identifier: GPL-2.0
2 #include <test_progs.h>
3 #include "test_attach_kprobe_sleepable.skel.h"
4 #include "test_attach_probe_manual.skel.h"
5 #include "test_attach_probe.skel.h"
6 #include "kprobe_write_ctx.skel.h"
7 
8 /* this is how USDT semaphore is actually defined, except volatile modifier */
9 volatile unsigned short uprobe_ref_ctr __attribute__((unused)) __attribute((section(".probes")));
10 
11 /* uprobe attach point */
12 static noinline void trigger_func(void)
13 {
14 	asm volatile ("");
15 }
16 
17 /* attach point for byname uprobe */
18 static noinline void trigger_func2(void)
19 {
20 	asm volatile ("");
21 }
22 
23 /* attach point for byname sleepable uprobe */
24 static noinline void trigger_func3(void)
25 {
26 	asm volatile ("");
27 }
28 
29 /* attach point for ref_ctr */
30 static noinline void trigger_func4(void)
31 {
32 	asm volatile ("");
33 }
34 
35 static char test_data[] = "test_data";
36 
37 /* manual attach kprobe/kretprobe/uprobe/uretprobe testings */
38 static void test_attach_probe_manual(enum probe_attach_mode attach_mode)
39 {
40 	DECLARE_LIBBPF_OPTS(bpf_uprobe_opts, uprobe_opts);
41 	DECLARE_LIBBPF_OPTS(bpf_kprobe_opts, kprobe_opts);
42 	struct bpf_link *kprobe_link, *kretprobe_link;
43 	struct bpf_link *uprobe_link, *uretprobe_link;
44 	struct test_attach_probe_manual *skel;
45 	ssize_t uprobe_offset;
46 
47 	skel = test_attach_probe_manual__open_and_load();
48 	if (!ASSERT_OK_PTR(skel, "skel_kprobe_manual_open_and_load"))
49 		return;
50 
51 	uprobe_offset = get_uprobe_offset(&trigger_func);
52 	if (!ASSERT_GE(uprobe_offset, 0, "uprobe_offset"))
53 		goto cleanup;
54 
55 	/* manual-attach kprobe/kretprobe */
56 	kprobe_opts.attach_mode = attach_mode;
57 	kprobe_opts.retprobe = false;
58 	kprobe_link = bpf_program__attach_kprobe_opts(skel->progs.handle_kprobe,
59 						      SYS_NANOSLEEP_KPROBE_NAME,
60 						      &kprobe_opts);
61 	if (!ASSERT_OK_PTR(kprobe_link, "attach_kprobe"))
62 		goto cleanup;
63 	skel->links.handle_kprobe = kprobe_link;
64 
65 	kprobe_opts.retprobe = true;
66 	kretprobe_link = bpf_program__attach_kprobe_opts(skel->progs.handle_kretprobe,
67 							 SYS_NANOSLEEP_KPROBE_NAME,
68 							 &kprobe_opts);
69 	if (!ASSERT_OK_PTR(kretprobe_link, "attach_kretprobe"))
70 		goto cleanup;
71 	skel->links.handle_kretprobe = kretprobe_link;
72 
73 	/* manual-attach uprobe/uretprobe */
74 	uprobe_opts.attach_mode = attach_mode;
75 	uprobe_opts.ref_ctr_offset = 0;
76 	uprobe_opts.retprobe = false;
77 	uprobe_link = bpf_program__attach_uprobe_opts(skel->progs.handle_uprobe,
78 						      0 /* self pid */,
79 						      "/proc/self/exe",
80 						      uprobe_offset,
81 						      &uprobe_opts);
82 	if (!ASSERT_OK_PTR(uprobe_link, "attach_uprobe"))
83 		goto cleanup;
84 	skel->links.handle_uprobe = uprobe_link;
85 
86 	uprobe_opts.retprobe = true;
87 	uretprobe_link = bpf_program__attach_uprobe_opts(skel->progs.handle_uretprobe,
88 							 -1 /* any pid */,
89 							 "/proc/self/exe",
90 							 uprobe_offset, &uprobe_opts);
91 	if (!ASSERT_OK_PTR(uretprobe_link, "attach_uretprobe"))
92 		goto cleanup;
93 	skel->links.handle_uretprobe = uretprobe_link;
94 
95 	/* attach uprobe by function name manually */
96 	uprobe_opts.func_name = "trigger_func2";
97 	uprobe_opts.retprobe = false;
98 	uprobe_opts.ref_ctr_offset = 0;
99 	skel->links.handle_uprobe_byname =
100 			bpf_program__attach_uprobe_opts(skel->progs.handle_uprobe_byname,
101 							0 /* this pid */,
102 							"/proc/self/exe",
103 							0, &uprobe_opts);
104 	if (!ASSERT_OK_PTR(skel->links.handle_uprobe_byname, "attach_uprobe_byname"))
105 		goto cleanup;
106 
107 	/* trigger & validate kprobe && kretprobe */
108 	usleep(1);
109 
110 	/* trigger & validate uprobe & uretprobe */
111 	trigger_func();
112 
113 	/* trigger & validate uprobe attached by name */
114 	trigger_func2();
115 
116 	ASSERT_EQ(skel->bss->kprobe_res, 1, "check_kprobe_res");
117 	ASSERT_EQ(skel->bss->kretprobe_res, 2, "check_kretprobe_res");
118 	ASSERT_EQ(skel->bss->uprobe_res, 3, "check_uprobe_res");
119 	ASSERT_EQ(skel->bss->uretprobe_res, 4, "check_uretprobe_res");
120 	ASSERT_EQ(skel->bss->uprobe_byname_res, 5, "check_uprobe_byname_res");
121 
122 cleanup:
123 	test_attach_probe_manual__destroy(skel);
124 }
125 
126 /* attach uprobe/uretprobe long event name testings */
127 static void test_attach_uprobe_long_event_name(void)
128 {
129 	DECLARE_LIBBPF_OPTS(bpf_uprobe_opts, uprobe_opts);
130 	struct bpf_link *uprobe_link, *uretprobe_link;
131 	struct test_attach_probe_manual *skel;
132 	ssize_t uprobe_offset;
133 	char path[PATH_MAX] = {0};
134 
135 	skel = test_attach_probe_manual__open_and_load();
136 	if (!ASSERT_OK_PTR(skel, "skel_kprobe_manual_open_and_load"))
137 		return;
138 
139 	uprobe_offset = get_uprobe_offset(&trigger_func);
140 	if (!ASSERT_GE(uprobe_offset, 0, "uprobe_offset"))
141 		goto cleanup;
142 
143 	if (!ASSERT_GT(readlink("/proc/self/exe", path, PATH_MAX - 1), 0, "readlink"))
144 		goto cleanup;
145 
146 	/* manual-attach uprobe/uretprobe */
147 	uprobe_opts.attach_mode = PROBE_ATTACH_MODE_LEGACY;
148 	uprobe_opts.ref_ctr_offset = 0;
149 	uprobe_opts.retprobe = false;
150 	uprobe_link = bpf_program__attach_uprobe_opts(skel->progs.handle_uprobe,
151 						      0 /* self pid */,
152 						      path,
153 						      uprobe_offset,
154 						      &uprobe_opts);
155 	if (!ASSERT_OK_PTR(uprobe_link, "attach_uprobe_long_event_name"))
156 		goto cleanup;
157 	skel->links.handle_uprobe = uprobe_link;
158 
159 	uprobe_opts.retprobe = true;
160 	uretprobe_link = bpf_program__attach_uprobe_opts(skel->progs.handle_uretprobe,
161 							 -1 /* any pid */,
162 							 path,
163 							 uprobe_offset, &uprobe_opts);
164 	if (!ASSERT_OK_PTR(uretprobe_link, "attach_uretprobe_long_event_name"))
165 		goto cleanup;
166 	skel->links.handle_uretprobe = uretprobe_link;
167 
168 cleanup:
169 	test_attach_probe_manual__destroy(skel);
170 }
171 
172 /* attach kprobe/kretprobe long event name testings */
173 static void test_attach_kprobe_long_event_name(void)
174 {
175 	DECLARE_LIBBPF_OPTS(bpf_kprobe_opts, kprobe_opts);
176 	struct bpf_link *kprobe_link, *kretprobe_link;
177 	struct test_attach_probe_manual *skel;
178 
179 	skel = test_attach_probe_manual__open_and_load();
180 	if (!ASSERT_OK_PTR(skel, "skel_kprobe_manual_open_and_load"))
181 		return;
182 
183 	/* manual-attach kprobe/kretprobe */
184 	kprobe_opts.attach_mode = PROBE_ATTACH_MODE_LEGACY;
185 	kprobe_opts.retprobe = false;
186 	kprobe_link = bpf_program__attach_kprobe_opts(skel->progs.handle_kprobe,
187 						      "bpf_testmod_looooooooooooooooooooooooooooooong_name",
188 						      &kprobe_opts);
189 	if (!ASSERT_OK_PTR(kprobe_link, "attach_kprobe_long_event_name"))
190 		goto cleanup;
191 	skel->links.handle_kprobe = kprobe_link;
192 
193 	kprobe_opts.retprobe = true;
194 	kretprobe_link = bpf_program__attach_kprobe_opts(skel->progs.handle_kretprobe,
195 							 "bpf_testmod_looooooooooooooooooooooooooooooong_name",
196 							 &kprobe_opts);
197 	if (!ASSERT_OK_PTR(kretprobe_link, "attach_kretprobe_long_event_name"))
198 		goto cleanup;
199 	skel->links.handle_kretprobe = kretprobe_link;
200 
201 cleanup:
202 	test_attach_probe_manual__destroy(skel);
203 }
204 
205 #ifdef __x86_64__
206 /* attach kprobe/kretprobe long event name testings */
207 static void test_attach_kprobe_write_ctx(void)
208 {
209 	struct kprobe_write_ctx *skel = NULL;
210 	struct bpf_link *link = NULL;
211 
212 	skel = kprobe_write_ctx__open_and_load();
213 	if (!ASSERT_OK_PTR(skel, "kprobe_write_ctx__open_and_load"))
214 		return;
215 
216 	link = bpf_program__attach_kprobe_opts(skel->progs.kprobe_write_ctx,
217 					     "bpf_fentry_test1", NULL);
218 	if (!ASSERT_ERR_PTR(link, "bpf_program__attach_kprobe_opts"))
219 		bpf_link__destroy(link);
220 
221 	kprobe_write_ctx__destroy(skel);
222 }
223 #else
224 static void test_attach_kprobe_write_ctx(void)
225 {
226 	test__skip();
227 }
228 #endif
229 
230 static void test_attach_probe_auto(struct test_attach_probe *skel)
231 {
232 	struct bpf_link *uprobe_err_link;
233 
234 	/* auto-attachable kprobe and kretprobe */
235 	skel->links.handle_kprobe_auto = bpf_program__attach(skel->progs.handle_kprobe_auto);
236 	ASSERT_OK_PTR(skel->links.handle_kprobe_auto, "attach_kprobe_auto");
237 
238 	skel->links.handle_kretprobe_auto = bpf_program__attach(skel->progs.handle_kretprobe_auto);
239 	ASSERT_OK_PTR(skel->links.handle_kretprobe_auto, "attach_kretprobe_auto");
240 
241 	/* verify auto-attach fails for old-style uprobe definition */
242 	uprobe_err_link = bpf_program__attach(skel->progs.handle_uprobe_byname);
243 	if (!ASSERT_EQ(libbpf_get_error(uprobe_err_link), -EOPNOTSUPP,
244 		       "auto-attach should fail for old-style name"))
245 		return;
246 
247 	/* verify auto-attach works */
248 	skel->links.handle_uretprobe_byname =
249 			bpf_program__attach(skel->progs.handle_uretprobe_byname);
250 	if (!ASSERT_OK_PTR(skel->links.handle_uretprobe_byname, "attach_uretprobe_byname"))
251 		return;
252 
253 	/* trigger & validate kprobe && kretprobe */
254 	usleep(1);
255 
256 	/* trigger & validate uprobe attached by name */
257 	trigger_func2();
258 
259 	ASSERT_EQ(skel->bss->kprobe2_res, 11, "check_kprobe_auto_res");
260 	ASSERT_EQ(skel->bss->kretprobe2_res, 22, "check_kretprobe_auto_res");
261 	ASSERT_EQ(skel->bss->uretprobe_byname_res, 6, "check_uretprobe_byname_res");
262 }
263 
264 static void test_uprobe_lib(struct test_attach_probe *skel)
265 {
266 	DECLARE_LIBBPF_OPTS(bpf_uprobe_opts, uprobe_opts);
267 	FILE *devnull;
268 
269 	/* test attach by name for a library function, using the library
270 	 * as the binary argument. libc.so.6 will be resolved via dlopen()/dlinfo().
271 	 */
272 	uprobe_opts.func_name = "fopen";
273 	uprobe_opts.retprobe = false;
274 	skel->links.handle_uprobe_byname2 =
275 			bpf_program__attach_uprobe_opts(skel->progs.handle_uprobe_byname2,
276 							0 /* this pid */,
277 							"libc.so.6",
278 							0, &uprobe_opts);
279 	if (!ASSERT_OK_PTR(skel->links.handle_uprobe_byname2, "attach_uprobe_byname2"))
280 		return;
281 
282 	uprobe_opts.func_name = "fclose";
283 	uprobe_opts.retprobe = true;
284 	skel->links.handle_uretprobe_byname2 =
285 			bpf_program__attach_uprobe_opts(skel->progs.handle_uretprobe_byname2,
286 							-1 /* any pid */,
287 							"libc.so.6",
288 							0, &uprobe_opts);
289 	if (!ASSERT_OK_PTR(skel->links.handle_uretprobe_byname2, "attach_uretprobe_byname2"))
290 		return;
291 
292 	/* trigger & validate shared library u[ret]probes attached by name */
293 	devnull = fopen("/dev/null", "r");
294 	fclose(devnull);
295 
296 	ASSERT_EQ(skel->bss->uprobe_byname2_res, 7, "check_uprobe_byname2_res");
297 	ASSERT_EQ(skel->bss->uretprobe_byname2_res, 8, "check_uretprobe_byname2_res");
298 }
299 
300 static void test_uprobe_ref_ctr(struct test_attach_probe *skel)
301 {
302 	DECLARE_LIBBPF_OPTS(bpf_uprobe_opts, uprobe_opts);
303 	struct bpf_link *uprobe_link, *uretprobe_link;
304 	ssize_t uprobe_offset, ref_ctr_offset;
305 
306 	uprobe_offset = get_uprobe_offset(&trigger_func4);
307 	if (!ASSERT_GE(uprobe_offset, 0, "uprobe_offset_ref_ctr"))
308 		return;
309 
310 	ref_ctr_offset = get_rel_offset((uintptr_t)&uprobe_ref_ctr);
311 	if (!ASSERT_GE(ref_ctr_offset, 0, "ref_ctr_offset"))
312 		return;
313 
314 	ASSERT_EQ(uprobe_ref_ctr, 0, "uprobe_ref_ctr_before");
315 
316 	uprobe_opts.retprobe = false;
317 	uprobe_opts.ref_ctr_offset = ref_ctr_offset;
318 	uprobe_link = bpf_program__attach_uprobe_opts(skel->progs.handle_uprobe_ref_ctr,
319 						      0 /* self pid */,
320 						      "/proc/self/exe",
321 						      uprobe_offset,
322 						      &uprobe_opts);
323 	if (!ASSERT_OK_PTR(uprobe_link, "attach_uprobe_ref_ctr"))
324 		return;
325 	skel->links.handle_uprobe_ref_ctr = uprobe_link;
326 
327 	ASSERT_GT(uprobe_ref_ctr, 0, "uprobe_ref_ctr_after");
328 
329 	/* if uprobe uses ref_ctr, uretprobe has to use ref_ctr as well */
330 	uprobe_opts.retprobe = true;
331 	uprobe_opts.ref_ctr_offset = ref_ctr_offset;
332 	uretprobe_link = bpf_program__attach_uprobe_opts(skel->progs.handle_uretprobe_ref_ctr,
333 							 -1 /* any pid */,
334 							 "/proc/self/exe",
335 							 uprobe_offset, &uprobe_opts);
336 	if (!ASSERT_OK_PTR(uretprobe_link, "attach_uretprobe_ref_ctr"))
337 		return;
338 	skel->links.handle_uretprobe_ref_ctr = uretprobe_link;
339 }
340 
341 static void test_kprobe_sleepable(void)
342 {
343 	struct test_attach_kprobe_sleepable *skel;
344 
345 	skel = test_attach_kprobe_sleepable__open();
346 	if (!ASSERT_OK_PTR(skel, "skel_kprobe_sleepable_open"))
347 		return;
348 
349 	/* sleepable kprobe test case needs flags set before loading */
350 	if (!ASSERT_OK(bpf_program__set_flags(skel->progs.handle_kprobe_sleepable,
351 		BPF_F_SLEEPABLE), "kprobe_sleepable_flags"))
352 		goto cleanup;
353 
354 	if (!ASSERT_OK(test_attach_kprobe_sleepable__load(skel),
355 		       "skel_kprobe_sleepable_load"))
356 		goto cleanup;
357 
358 	/* sleepable kprobes should not attach successfully */
359 	skel->links.handle_kprobe_sleepable = bpf_program__attach(skel->progs.handle_kprobe_sleepable);
360 	ASSERT_ERR_PTR(skel->links.handle_kprobe_sleepable, "attach_kprobe_sleepable");
361 
362 cleanup:
363 	test_attach_kprobe_sleepable__destroy(skel);
364 }
365 
366 static void test_uprobe_sleepable(struct test_attach_probe *skel)
367 {
368 	/* test sleepable uprobe and uretprobe variants */
369 	skel->links.handle_uprobe_byname3_sleepable = bpf_program__attach(skel->progs.handle_uprobe_byname3_sleepable);
370 	if (!ASSERT_OK_PTR(skel->links.handle_uprobe_byname3_sleepable, "attach_uprobe_byname3_sleepable"))
371 		return;
372 
373 	skel->links.handle_uprobe_byname3 = bpf_program__attach(skel->progs.handle_uprobe_byname3);
374 	if (!ASSERT_OK_PTR(skel->links.handle_uprobe_byname3, "attach_uprobe_byname3"))
375 		return;
376 
377 	skel->links.handle_uretprobe_byname3_sleepable = bpf_program__attach(skel->progs.handle_uretprobe_byname3_sleepable);
378 	if (!ASSERT_OK_PTR(skel->links.handle_uretprobe_byname3_sleepable, "attach_uretprobe_byname3_sleepable"))
379 		return;
380 
381 	skel->links.handle_uretprobe_byname3 = bpf_program__attach(skel->progs.handle_uretprobe_byname3);
382 	if (!ASSERT_OK_PTR(skel->links.handle_uretprobe_byname3, "attach_uretprobe_byname3"))
383 		return;
384 
385 	skel->bss->user_ptr = test_data;
386 
387 	/* trigger & validate sleepable uprobe attached by name */
388 	trigger_func3();
389 
390 	ASSERT_EQ(skel->bss->uprobe_byname3_sleepable_res, 9, "check_uprobe_byname3_sleepable_res");
391 	ASSERT_EQ(skel->bss->uprobe_byname3_str_sleepable_res, 10, "check_uprobe_byname3_str_sleepable_res");
392 	ASSERT_EQ(skel->bss->uprobe_byname3_res, 11, "check_uprobe_byname3_res");
393 	ASSERT_EQ(skel->bss->uretprobe_byname3_sleepable_res, 12, "check_uretprobe_byname3_sleepable_res");
394 	ASSERT_EQ(skel->bss->uretprobe_byname3_str_sleepable_res, 13, "check_uretprobe_byname3_str_sleepable_res");
395 	ASSERT_EQ(skel->bss->uretprobe_byname3_res, 14, "check_uretprobe_byname3_res");
396 }
397 
398 void test_attach_probe(void)
399 {
400 	struct test_attach_probe *skel;
401 
402 	skel = test_attach_probe__open();
403 	if (!ASSERT_OK_PTR(skel, "skel_open"))
404 		return;
405 
406 	if (!ASSERT_OK(test_attach_probe__load(skel), "skel_load"))
407 		goto cleanup;
408 	if (!ASSERT_OK_PTR(skel->bss, "check_bss"))
409 		goto cleanup;
410 
411 	if (test__start_subtest("manual-default"))
412 		test_attach_probe_manual(PROBE_ATTACH_MODE_DEFAULT);
413 	if (test__start_subtest("manual-legacy"))
414 		test_attach_probe_manual(PROBE_ATTACH_MODE_LEGACY);
415 	if (test__start_subtest("manual-perf"))
416 		test_attach_probe_manual(PROBE_ATTACH_MODE_PERF);
417 	if (test__start_subtest("manual-link"))
418 		test_attach_probe_manual(PROBE_ATTACH_MODE_LINK);
419 
420 	if (test__start_subtest("auto"))
421 		test_attach_probe_auto(skel);
422 	if (test__start_subtest("kprobe-sleepable"))
423 		test_kprobe_sleepable();
424 	if (test__start_subtest("uprobe-lib"))
425 		test_uprobe_lib(skel);
426 	if (test__start_subtest("uprobe-sleepable"))
427 		test_uprobe_sleepable(skel);
428 	if (test__start_subtest("uprobe-ref_ctr"))
429 		test_uprobe_ref_ctr(skel);
430 
431 	if (test__start_subtest("uprobe-long_name"))
432 		test_attach_uprobe_long_event_name();
433 	if (test__start_subtest("kprobe-long_name"))
434 		test_attach_kprobe_long_event_name();
435 	if (test__start_subtest("kprobe-write-ctx"))
436 		test_attach_kprobe_write_ctx();
437 
438 cleanup:
439 	test_attach_probe__destroy(skel);
440 	ASSERT_EQ(uprobe_ref_ctr, 0, "uprobe_ref_ctr_cleanup");
441 }
442