xref: /linux/tools/testing/selftests/bpf/prog_tests/stacktrace_ips.c (revision 3490d29964bdd524366d266b655112cb549c7460)
1 // SPDX-License-Identifier: GPL-2.0
2 #include <test_progs.h>
3 #include "stacktrace_ips.skel.h"
4 
5 #ifdef __x86_64__
6 static int check_stacktrace_ips(int fd, __u32 key, int cnt, ...)
7 {
8 	__u64 ips[PERF_MAX_STACK_DEPTH];
9 	struct ksyms *ksyms = NULL;
10 	int i, err = 0;
11 	va_list args;
12 
13 	/* sorted by addr */
14 	ksyms = load_kallsyms_local();
15 	if (!ASSERT_OK_PTR(ksyms, "load_kallsyms_local"))
16 		return -1;
17 
18 	/* unlikely, but... */
19 	if (!ASSERT_LT(cnt, PERF_MAX_STACK_DEPTH, "check_max"))
20 		return -1;
21 
22 	err = bpf_map_lookup_elem(fd, &key, ips);
23 	if (err)
24 		goto out;
25 
26 	/*
27 	 * Compare all symbols provided via arguments with stacktrace ips,
28 	 * and their related symbol addresses.t
29 	 */
30 	va_start(args, cnt);
31 
32 	for (i = 0; i < cnt; i++) {
33 		unsigned long val;
34 		struct ksym *ksym;
35 
36 		val = va_arg(args, unsigned long);
37 		ksym = ksym_search_local(ksyms, ips[i]);
38 		if (!ASSERT_OK_PTR(ksym, "ksym_search_local"))
39 			break;
40 		ASSERT_EQ(ksym->addr, val, "stack_cmp");
41 	}
42 
43 	va_end(args);
44 
45 out:
46 	free_kallsyms_local(ksyms);
47 	return err;
48 }
49 
50 static void test_stacktrace_ips_kprobe_multi(bool retprobe)
51 {
52 	LIBBPF_OPTS(bpf_kprobe_multi_opts, opts,
53 		.retprobe = retprobe
54 	);
55 	LIBBPF_OPTS(bpf_test_run_opts, topts);
56 	struct stacktrace_ips *skel;
57 
58 	skel = stacktrace_ips__open_and_load();
59 	if (!ASSERT_OK_PTR(skel, "stacktrace_ips__open_and_load"))
60 		return;
61 
62 	if (!skel->kconfig->CONFIG_UNWINDER_ORC) {
63 		test__skip();
64 		goto cleanup;
65 	}
66 
67 	skel->links.kprobe_multi_test = bpf_program__attach_kprobe_multi_opts(
68 							skel->progs.kprobe_multi_test,
69 							"bpf_testmod_stacktrace_test", &opts);
70 	if (!ASSERT_OK_PTR(skel->links.kprobe_multi_test, "bpf_program__attach_kprobe_multi_opts"))
71 		goto cleanup;
72 
73 	trigger_module_test_read(1);
74 
75 	load_kallsyms();
76 
77 	check_stacktrace_ips(bpf_map__fd(skel->maps.stackmap), skel->bss->stack_key, 4,
78 			     ksym_get_addr("bpf_testmod_stacktrace_test_3"),
79 			     ksym_get_addr("bpf_testmod_stacktrace_test_2"),
80 			     ksym_get_addr("bpf_testmod_stacktrace_test_1"),
81 			     ksym_get_addr("bpf_testmod_test_read"));
82 
83 cleanup:
84 	stacktrace_ips__destroy(skel);
85 }
86 
87 static void test_stacktrace_ips_raw_tp(void)
88 {
89 	__u32 info_len = sizeof(struct bpf_prog_info);
90 	LIBBPF_OPTS(bpf_test_run_opts, topts);
91 	struct bpf_prog_info info = {};
92 	struct stacktrace_ips *skel;
93 	__u64 bpf_prog_ksym = 0;
94 	int err;
95 
96 	skel = stacktrace_ips__open_and_load();
97 	if (!ASSERT_OK_PTR(skel, "stacktrace_ips__open_and_load"))
98 		return;
99 
100 	if (!skel->kconfig->CONFIG_UNWINDER_ORC) {
101 		test__skip();
102 		goto cleanup;
103 	}
104 
105 	skel->links.rawtp_test = bpf_program__attach_raw_tracepoint(
106 							skel->progs.rawtp_test,
107 							"bpf_testmod_test_read");
108 	if (!ASSERT_OK_PTR(skel->links.rawtp_test, "bpf_program__attach_raw_tracepoint"))
109 		goto cleanup;
110 
111 	/* get bpf program address */
112 	info.jited_ksyms = ptr_to_u64(&bpf_prog_ksym);
113 	info.nr_jited_ksyms = 1;
114 	err = bpf_prog_get_info_by_fd(bpf_program__fd(skel->progs.rawtp_test),
115 				      &info, &info_len);
116 	if (!ASSERT_OK(err, "bpf_prog_get_info_by_fd"))
117 		goto cleanup;
118 
119 	trigger_module_test_read(1);
120 
121 	load_kallsyms();
122 
123 	check_stacktrace_ips(bpf_map__fd(skel->maps.stackmap), skel->bss->stack_key, 2,
124 			     bpf_prog_ksym,
125 			     ksym_get_addr("bpf_trace_run2"));
126 
127 cleanup:
128 	stacktrace_ips__destroy(skel);
129 }
130 
131 static void __test_stacktrace_ips(void)
132 {
133 	if (test__start_subtest("kprobe_multi"))
134 		test_stacktrace_ips_kprobe_multi(false);
135 	if (test__start_subtest("kretprobe_multi"))
136 		test_stacktrace_ips_kprobe_multi(true);
137 	if (test__start_subtest("raw_tp"))
138 		test_stacktrace_ips_raw_tp();
139 }
140 #else
141 static void __test_stacktrace_ips(void)
142 {
143 	test__skip();
144 }
145 #endif
146 
147 void test_stacktrace_ips(void)
148 {
149 	__test_stacktrace_ips();
150 }
151