xref: /linux/tools/testing/selftests/bpf/prog_tests/attach_probe.c (revision 90b83efa6701656e02c86e7df2cb1765ea602d07)
1 // SPDX-License-Identifier: GPL-2.0
2 #include <test_progs.h>
3 #include "test_attach_kprobe_sleepable.skel.h"
4 #include "test_attach_probe_manual.skel.h"
5 #include "test_attach_probe.skel.h"
6 
7 /* this is how USDT semaphore is actually defined, except volatile modifier */
8 volatile unsigned short uprobe_ref_ctr __attribute__((unused)) __attribute((section(".probes")));
9 
10 /* uprobe attach point */
11 static noinline void trigger_func(void)
12 {
13 	asm volatile ("");
14 }
15 
16 /* attach point for byname uprobe */
17 static noinline void trigger_func2(void)
18 {
19 	asm volatile ("");
20 }
21 
22 /* attach point for byname sleepable uprobe */
23 static noinline void trigger_func3(void)
24 {
25 	asm volatile ("");
26 }
27 
28 /* attach point for ref_ctr */
29 static noinline void trigger_func4(void)
30 {
31 	asm volatile ("");
32 }
33 
34 static char test_data[] = "test_data";
35 
36 /* manual attach kprobe/kretprobe/uprobe/uretprobe testings */
37 static void test_attach_probe_manual(enum probe_attach_mode attach_mode)
38 {
39 	DECLARE_LIBBPF_OPTS(bpf_uprobe_opts, uprobe_opts);
40 	DECLARE_LIBBPF_OPTS(bpf_kprobe_opts, kprobe_opts);
41 	struct bpf_link *kprobe_link, *kretprobe_link;
42 	struct bpf_link *uprobe_link, *uretprobe_link;
43 	struct test_attach_probe_manual *skel;
44 	ssize_t uprobe_offset;
45 
46 	skel = test_attach_probe_manual__open_and_load();
47 	if (!ASSERT_OK_PTR(skel, "skel_kprobe_manual_open_and_load"))
48 		return;
49 
50 	uprobe_offset = get_uprobe_offset(&trigger_func);
51 	if (!ASSERT_GE(uprobe_offset, 0, "uprobe_offset"))
52 		goto cleanup;
53 
54 	/* manual-attach kprobe/kretprobe */
55 	kprobe_opts.attach_mode = attach_mode;
56 	kprobe_opts.retprobe = false;
57 	kprobe_link = bpf_program__attach_kprobe_opts(skel->progs.handle_kprobe,
58 						      SYS_NANOSLEEP_KPROBE_NAME,
59 						      &kprobe_opts);
60 	if (!ASSERT_OK_PTR(kprobe_link, "attach_kprobe"))
61 		goto cleanup;
62 	skel->links.handle_kprobe = kprobe_link;
63 
64 	kprobe_opts.retprobe = true;
65 	kretprobe_link = bpf_program__attach_kprobe_opts(skel->progs.handle_kretprobe,
66 							 SYS_NANOSLEEP_KPROBE_NAME,
67 							 &kprobe_opts);
68 	if (!ASSERT_OK_PTR(kretprobe_link, "attach_kretprobe"))
69 		goto cleanup;
70 	skel->links.handle_kretprobe = kretprobe_link;
71 
72 	/* manual-attach uprobe/uretprobe */
73 	uprobe_opts.attach_mode = attach_mode;
74 	uprobe_opts.ref_ctr_offset = 0;
75 	uprobe_opts.retprobe = false;
76 	uprobe_link = bpf_program__attach_uprobe_opts(skel->progs.handle_uprobe,
77 						      0 /* self pid */,
78 						      "/proc/self/exe",
79 						      uprobe_offset,
80 						      &uprobe_opts);
81 	if (!ASSERT_OK_PTR(uprobe_link, "attach_uprobe"))
82 		goto cleanup;
83 	skel->links.handle_uprobe = uprobe_link;
84 
85 	uprobe_opts.retprobe = true;
86 	uretprobe_link = bpf_program__attach_uprobe_opts(skel->progs.handle_uretprobe,
87 							 -1 /* any pid */,
88 							 "/proc/self/exe",
89 							 uprobe_offset, &uprobe_opts);
90 	if (!ASSERT_OK_PTR(uretprobe_link, "attach_uretprobe"))
91 		goto cleanup;
92 	skel->links.handle_uretprobe = uretprobe_link;
93 
94 	/* attach uprobe by function name manually */
95 	uprobe_opts.func_name = "trigger_func2";
96 	uprobe_opts.retprobe = false;
97 	uprobe_opts.ref_ctr_offset = 0;
98 	skel->links.handle_uprobe_byname =
99 			bpf_program__attach_uprobe_opts(skel->progs.handle_uprobe_byname,
100 							0 /* this pid */,
101 							"/proc/self/exe",
102 							0, &uprobe_opts);
103 	if (!ASSERT_OK_PTR(skel->links.handle_uprobe_byname, "attach_uprobe_byname"))
104 		goto cleanup;
105 
106 	/* trigger & validate kprobe && kretprobe */
107 	usleep(1);
108 
109 	/* trigger & validate uprobe & uretprobe */
110 	trigger_func();
111 
112 	/* trigger & validate uprobe attached by name */
113 	trigger_func2();
114 
115 	ASSERT_EQ(skel->bss->kprobe_res, 1, "check_kprobe_res");
116 	ASSERT_EQ(skel->bss->kretprobe_res, 2, "check_kretprobe_res");
117 	ASSERT_EQ(skel->bss->uprobe_res, 3, "check_uprobe_res");
118 	ASSERT_EQ(skel->bss->uretprobe_res, 4, "check_uretprobe_res");
119 	ASSERT_EQ(skel->bss->uprobe_byname_res, 5, "check_uprobe_byname_res");
120 
121 cleanup:
122 	test_attach_probe_manual__destroy(skel);
123 }
124 
125 /* attach uprobe/uretprobe long event name testings */
126 static void test_attach_uprobe_long_event_name(void)
127 {
128 	DECLARE_LIBBPF_OPTS(bpf_uprobe_opts, uprobe_opts);
129 	struct bpf_link *uprobe_link, *uretprobe_link;
130 	struct test_attach_probe_manual *skel;
131 	ssize_t uprobe_offset;
132 	char path[PATH_MAX] = {0};
133 
134 	skel = test_attach_probe_manual__open_and_load();
135 	if (!ASSERT_OK_PTR(skel, "skel_kprobe_manual_open_and_load"))
136 		return;
137 
138 	uprobe_offset = get_uprobe_offset(&trigger_func);
139 	if (!ASSERT_GE(uprobe_offset, 0, "uprobe_offset"))
140 		goto cleanup;
141 
142 	if (!ASSERT_GT(readlink("/proc/self/exe", path, PATH_MAX - 1), 0, "readlink"))
143 		goto cleanup;
144 
145 	/* manual-attach uprobe/uretprobe */
146 	uprobe_opts.attach_mode = PROBE_ATTACH_MODE_LEGACY;
147 	uprobe_opts.ref_ctr_offset = 0;
148 	uprobe_opts.retprobe = false;
149 	uprobe_link = bpf_program__attach_uprobe_opts(skel->progs.handle_uprobe,
150 						      0 /* self pid */,
151 						      path,
152 						      uprobe_offset,
153 						      &uprobe_opts);
154 	if (!ASSERT_OK_PTR(uprobe_link, "attach_uprobe_long_event_name"))
155 		goto cleanup;
156 	skel->links.handle_uprobe = uprobe_link;
157 
158 	uprobe_opts.retprobe = true;
159 	uretprobe_link = bpf_program__attach_uprobe_opts(skel->progs.handle_uretprobe,
160 							 -1 /* any pid */,
161 							 path,
162 							 uprobe_offset, &uprobe_opts);
163 	if (!ASSERT_OK_PTR(uretprobe_link, "attach_uretprobe_long_event_name"))
164 		goto cleanup;
165 	skel->links.handle_uretprobe = uretprobe_link;
166 
167 cleanup:
168 	test_attach_probe_manual__destroy(skel);
169 }
170 
171 /* attach kprobe/kretprobe long event name testings */
172 static void test_attach_kprobe_long_event_name(void)
173 {
174 	DECLARE_LIBBPF_OPTS(bpf_kprobe_opts, kprobe_opts);
175 	struct bpf_link *kprobe_link, *kretprobe_link;
176 	struct test_attach_probe_manual *skel;
177 
178 	skel = test_attach_probe_manual__open_and_load();
179 	if (!ASSERT_OK_PTR(skel, "skel_kprobe_manual_open_and_load"))
180 		return;
181 
182 	/* manual-attach kprobe/kretprobe */
183 	kprobe_opts.attach_mode = PROBE_ATTACH_MODE_LEGACY;
184 	kprobe_opts.retprobe = false;
185 	kprobe_link = bpf_program__attach_kprobe_opts(skel->progs.handle_kprobe,
186 						      "bpf_testmod_looooooooooooooooooooooooooooooong_name",
187 						      &kprobe_opts);
188 	if (!ASSERT_OK_PTR(kprobe_link, "attach_kprobe_long_event_name"))
189 		goto cleanup;
190 	skel->links.handle_kprobe = kprobe_link;
191 
192 	kprobe_opts.retprobe = true;
193 	kretprobe_link = bpf_program__attach_kprobe_opts(skel->progs.handle_kretprobe,
194 							 "bpf_testmod_looooooooooooooooooooooooooooooong_name",
195 							 &kprobe_opts);
196 	if (!ASSERT_OK_PTR(kretprobe_link, "attach_kretprobe_long_event_name"))
197 		goto cleanup;
198 	skel->links.handle_kretprobe = kretprobe_link;
199 
200 cleanup:
201 	test_attach_probe_manual__destroy(skel);
202 }
203 
204 static void test_attach_probe_auto(struct test_attach_probe *skel)
205 {
206 	struct bpf_link *uprobe_err_link;
207 
208 	/* auto-attachable kprobe and kretprobe */
209 	skel->links.handle_kprobe_auto = bpf_program__attach(skel->progs.handle_kprobe_auto);
210 	ASSERT_OK_PTR(skel->links.handle_kprobe_auto, "attach_kprobe_auto");
211 
212 	skel->links.handle_kretprobe_auto = bpf_program__attach(skel->progs.handle_kretprobe_auto);
213 	ASSERT_OK_PTR(skel->links.handle_kretprobe_auto, "attach_kretprobe_auto");
214 
215 	/* verify auto-attach fails for old-style uprobe definition */
216 	uprobe_err_link = bpf_program__attach(skel->progs.handle_uprobe_byname);
217 	if (!ASSERT_EQ(libbpf_get_error(uprobe_err_link), -EOPNOTSUPP,
218 		       "auto-attach should fail for old-style name"))
219 		return;
220 
221 	/* verify auto-attach works */
222 	skel->links.handle_uretprobe_byname =
223 			bpf_program__attach(skel->progs.handle_uretprobe_byname);
224 	if (!ASSERT_OK_PTR(skel->links.handle_uretprobe_byname, "attach_uretprobe_byname"))
225 		return;
226 
227 	/* trigger & validate kprobe && kretprobe */
228 	usleep(1);
229 
230 	/* trigger & validate uprobe attached by name */
231 	trigger_func2();
232 
233 	ASSERT_EQ(skel->bss->kprobe2_res, 11, "check_kprobe_auto_res");
234 	ASSERT_EQ(skel->bss->kretprobe2_res, 22, "check_kretprobe_auto_res");
235 	ASSERT_EQ(skel->bss->uretprobe_byname_res, 6, "check_uretprobe_byname_res");
236 }
237 
238 static void test_uprobe_lib(struct test_attach_probe *skel)
239 {
240 	DECLARE_LIBBPF_OPTS(bpf_uprobe_opts, uprobe_opts);
241 	FILE *devnull;
242 
243 	/* test attach by name for a library function, using the library
244 	 * as the binary argument. libc.so.6 will be resolved via dlopen()/dlinfo().
245 	 */
246 	uprobe_opts.func_name = "fopen";
247 	uprobe_opts.retprobe = false;
248 	skel->links.handle_uprobe_byname2 =
249 			bpf_program__attach_uprobe_opts(skel->progs.handle_uprobe_byname2,
250 							0 /* this pid */,
251 							"libc.so.6",
252 							0, &uprobe_opts);
253 	if (!ASSERT_OK_PTR(skel->links.handle_uprobe_byname2, "attach_uprobe_byname2"))
254 		return;
255 
256 	uprobe_opts.func_name = "fclose";
257 	uprobe_opts.retprobe = true;
258 	skel->links.handle_uretprobe_byname2 =
259 			bpf_program__attach_uprobe_opts(skel->progs.handle_uretprobe_byname2,
260 							-1 /* any pid */,
261 							"libc.so.6",
262 							0, &uprobe_opts);
263 	if (!ASSERT_OK_PTR(skel->links.handle_uretprobe_byname2, "attach_uretprobe_byname2"))
264 		return;
265 
266 	/* trigger & validate shared library u[ret]probes attached by name */
267 	devnull = fopen("/dev/null", "r");
268 	fclose(devnull);
269 
270 	ASSERT_EQ(skel->bss->uprobe_byname2_res, 7, "check_uprobe_byname2_res");
271 	ASSERT_EQ(skel->bss->uretprobe_byname2_res, 8, "check_uretprobe_byname2_res");
272 }
273 
274 static void test_uprobe_ref_ctr(struct test_attach_probe *skel)
275 {
276 	DECLARE_LIBBPF_OPTS(bpf_uprobe_opts, uprobe_opts);
277 	struct bpf_link *uprobe_link, *uretprobe_link;
278 	ssize_t uprobe_offset, ref_ctr_offset;
279 
280 	uprobe_offset = get_uprobe_offset(&trigger_func4);
281 	if (!ASSERT_GE(uprobe_offset, 0, "uprobe_offset_ref_ctr"))
282 		return;
283 
284 	ref_ctr_offset = get_rel_offset((uintptr_t)&uprobe_ref_ctr);
285 	if (!ASSERT_GE(ref_ctr_offset, 0, "ref_ctr_offset"))
286 		return;
287 
288 	ASSERT_EQ(uprobe_ref_ctr, 0, "uprobe_ref_ctr_before");
289 
290 	uprobe_opts.retprobe = false;
291 	uprobe_opts.ref_ctr_offset = ref_ctr_offset;
292 	uprobe_link = bpf_program__attach_uprobe_opts(skel->progs.handle_uprobe_ref_ctr,
293 						      0 /* self pid */,
294 						      "/proc/self/exe",
295 						      uprobe_offset,
296 						      &uprobe_opts);
297 	if (!ASSERT_OK_PTR(uprobe_link, "attach_uprobe_ref_ctr"))
298 		return;
299 	skel->links.handle_uprobe_ref_ctr = uprobe_link;
300 
301 	ASSERT_GT(uprobe_ref_ctr, 0, "uprobe_ref_ctr_after");
302 
303 	/* if uprobe uses ref_ctr, uretprobe has to use ref_ctr as well */
304 	uprobe_opts.retprobe = true;
305 	uprobe_opts.ref_ctr_offset = ref_ctr_offset;
306 	uretprobe_link = bpf_program__attach_uprobe_opts(skel->progs.handle_uretprobe_ref_ctr,
307 							 -1 /* any pid */,
308 							 "/proc/self/exe",
309 							 uprobe_offset, &uprobe_opts);
310 	if (!ASSERT_OK_PTR(uretprobe_link, "attach_uretprobe_ref_ctr"))
311 		return;
312 	skel->links.handle_uretprobe_ref_ctr = uretprobe_link;
313 }
314 
315 static void test_kprobe_sleepable(void)
316 {
317 	struct test_attach_kprobe_sleepable *skel;
318 
319 	skel = test_attach_kprobe_sleepable__open();
320 	if (!ASSERT_OK_PTR(skel, "skel_kprobe_sleepable_open"))
321 		return;
322 
323 	/* sleepable kprobe test case needs flags set before loading */
324 	if (!ASSERT_OK(bpf_program__set_flags(skel->progs.handle_kprobe_sleepable,
325 		BPF_F_SLEEPABLE), "kprobe_sleepable_flags"))
326 		goto cleanup;
327 
328 	if (!ASSERT_OK(test_attach_kprobe_sleepable__load(skel),
329 		       "skel_kprobe_sleepable_load"))
330 		goto cleanup;
331 
332 	/* sleepable kprobes should not attach successfully */
333 	skel->links.handle_kprobe_sleepable = bpf_program__attach(skel->progs.handle_kprobe_sleepable);
334 	ASSERT_ERR_PTR(skel->links.handle_kprobe_sleepable, "attach_kprobe_sleepable");
335 
336 cleanup:
337 	test_attach_kprobe_sleepable__destroy(skel);
338 }
339 
340 static void test_uprobe_sleepable(struct test_attach_probe *skel)
341 {
342 	/* test sleepable uprobe and uretprobe variants */
343 	skel->links.handle_uprobe_byname3_sleepable = bpf_program__attach(skel->progs.handle_uprobe_byname3_sleepable);
344 	if (!ASSERT_OK_PTR(skel->links.handle_uprobe_byname3_sleepable, "attach_uprobe_byname3_sleepable"))
345 		return;
346 
347 	skel->links.handle_uprobe_byname3 = bpf_program__attach(skel->progs.handle_uprobe_byname3);
348 	if (!ASSERT_OK_PTR(skel->links.handle_uprobe_byname3, "attach_uprobe_byname3"))
349 		return;
350 
351 	skel->links.handle_uretprobe_byname3_sleepable = bpf_program__attach(skel->progs.handle_uretprobe_byname3_sleepable);
352 	if (!ASSERT_OK_PTR(skel->links.handle_uretprobe_byname3_sleepable, "attach_uretprobe_byname3_sleepable"))
353 		return;
354 
355 	skel->links.handle_uretprobe_byname3 = bpf_program__attach(skel->progs.handle_uretprobe_byname3);
356 	if (!ASSERT_OK_PTR(skel->links.handle_uretprobe_byname3, "attach_uretprobe_byname3"))
357 		return;
358 
359 	skel->bss->user_ptr = test_data;
360 
361 	/* trigger & validate sleepable uprobe attached by name */
362 	trigger_func3();
363 
364 	ASSERT_EQ(skel->bss->uprobe_byname3_sleepable_res, 9, "check_uprobe_byname3_sleepable_res");
365 	ASSERT_EQ(skel->bss->uprobe_byname3_str_sleepable_res, 10, "check_uprobe_byname3_str_sleepable_res");
366 	ASSERT_EQ(skel->bss->uprobe_byname3_res, 11, "check_uprobe_byname3_res");
367 	ASSERT_EQ(skel->bss->uretprobe_byname3_sleepable_res, 12, "check_uretprobe_byname3_sleepable_res");
368 	ASSERT_EQ(skel->bss->uretprobe_byname3_str_sleepable_res, 13, "check_uretprobe_byname3_str_sleepable_res");
369 	ASSERT_EQ(skel->bss->uretprobe_byname3_res, 14, "check_uretprobe_byname3_res");
370 }
371 
372 void test_attach_probe(void)
373 {
374 	struct test_attach_probe *skel;
375 
376 	skel = test_attach_probe__open();
377 	if (!ASSERT_OK_PTR(skel, "skel_open"))
378 		return;
379 
380 	if (!ASSERT_OK(test_attach_probe__load(skel), "skel_load"))
381 		goto cleanup;
382 	if (!ASSERT_OK_PTR(skel->bss, "check_bss"))
383 		goto cleanup;
384 
385 	if (test__start_subtest("manual-default"))
386 		test_attach_probe_manual(PROBE_ATTACH_MODE_DEFAULT);
387 	if (test__start_subtest("manual-legacy"))
388 		test_attach_probe_manual(PROBE_ATTACH_MODE_LEGACY);
389 	if (test__start_subtest("manual-perf"))
390 		test_attach_probe_manual(PROBE_ATTACH_MODE_PERF);
391 	if (test__start_subtest("manual-link"))
392 		test_attach_probe_manual(PROBE_ATTACH_MODE_LINK);
393 
394 	if (test__start_subtest("auto"))
395 		test_attach_probe_auto(skel);
396 	if (test__start_subtest("kprobe-sleepable"))
397 		test_kprobe_sleepable();
398 	if (test__start_subtest("uprobe-lib"))
399 		test_uprobe_lib(skel);
400 	if (test__start_subtest("uprobe-sleepable"))
401 		test_uprobe_sleepable(skel);
402 	if (test__start_subtest("uprobe-ref_ctr"))
403 		test_uprobe_ref_ctr(skel);
404 
405 	if (test__start_subtest("uprobe-long_name"))
406 		test_attach_uprobe_long_event_name();
407 	if (test__start_subtest("kprobe-long_name"))
408 		test_attach_kprobe_long_event_name();
409 
410 cleanup:
411 	test_attach_probe__destroy(skel);
412 	ASSERT_EQ(uprobe_ref_ctr, 0, "uprobe_ref_ctr_cleanup");
413 }
414