xref: /linux/tools/testing/selftests/bpf/prog_tests/bpf_cookie.c (revision 0a9f4a20c6153d187c8ee58133357ac671372f5f)
1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (c) 2021 Facebook */
3 #define _GNU_SOURCE
4 #include <pthread.h>
5 #include <sched.h>
6 #include <sys/syscall.h>
7 #include <unistd.h>
8 #include <test_progs.h>
9 #include "test_bpf_cookie.skel.h"
10 
11 /* uprobe attach point */
12 static void trigger_func(void)
13 {
14 	asm volatile ("");
15 }
16 
17 static void kprobe_subtest(struct test_bpf_cookie *skel)
18 {
19 	DECLARE_LIBBPF_OPTS(bpf_kprobe_opts, opts);
20 	struct bpf_link *link1 = NULL, *link2 = NULL;
21 	struct bpf_link *retlink1 = NULL, *retlink2 = NULL;
22 
23 	/* attach two kprobes */
24 	opts.bpf_cookie = 0x1;
25 	opts.retprobe = false;
26 	link1 = bpf_program__attach_kprobe_opts(skel->progs.handle_kprobe,
27 						 SYS_NANOSLEEP_KPROBE_NAME, &opts);
28 	if (!ASSERT_OK_PTR(link1, "link1"))
29 		goto cleanup;
30 
31 	opts.bpf_cookie = 0x2;
32 	opts.retprobe = false;
33 	link2 = bpf_program__attach_kprobe_opts(skel->progs.handle_kprobe,
34 						 SYS_NANOSLEEP_KPROBE_NAME, &opts);
35 	if (!ASSERT_OK_PTR(link2, "link2"))
36 		goto cleanup;
37 
38 	/* attach two kretprobes */
39 	opts.bpf_cookie = 0x10;
40 	opts.retprobe = true;
41 	retlink1 = bpf_program__attach_kprobe_opts(skel->progs.handle_kretprobe,
42 						    SYS_NANOSLEEP_KPROBE_NAME, &opts);
43 	if (!ASSERT_OK_PTR(retlink1, "retlink1"))
44 		goto cleanup;
45 
46 	opts.bpf_cookie = 0x20;
47 	opts.retprobe = true;
48 	retlink2 = bpf_program__attach_kprobe_opts(skel->progs.handle_kretprobe,
49 						    SYS_NANOSLEEP_KPROBE_NAME, &opts);
50 	if (!ASSERT_OK_PTR(retlink2, "retlink2"))
51 		goto cleanup;
52 
53 	/* trigger kprobe && kretprobe */
54 	usleep(1);
55 
56 	ASSERT_EQ(skel->bss->kprobe_res, 0x1 | 0x2, "kprobe_res");
57 	ASSERT_EQ(skel->bss->kretprobe_res, 0x10 | 0x20, "kretprobe_res");
58 
59 cleanup:
60 	bpf_link__destroy(link1);
61 	bpf_link__destroy(link2);
62 	bpf_link__destroy(retlink1);
63 	bpf_link__destroy(retlink2);
64 }
65 
66 static void uprobe_subtest(struct test_bpf_cookie *skel)
67 {
68 	DECLARE_LIBBPF_OPTS(bpf_uprobe_opts, opts);
69 	struct bpf_link *link1 = NULL, *link2 = NULL;
70 	struct bpf_link *retlink1 = NULL, *retlink2 = NULL;
71 	ssize_t uprobe_offset;
72 
73 	uprobe_offset = get_uprobe_offset(&trigger_func);
74 	if (!ASSERT_GE(uprobe_offset, 0, "uprobe_offset"))
75 		goto cleanup;
76 
77 	/* attach two uprobes */
78 	opts.bpf_cookie = 0x100;
79 	opts.retprobe = false;
80 	link1 = bpf_program__attach_uprobe_opts(skel->progs.handle_uprobe, 0 /* self pid */,
81 						"/proc/self/exe", uprobe_offset, &opts);
82 	if (!ASSERT_OK_PTR(link1, "link1"))
83 		goto cleanup;
84 
85 	opts.bpf_cookie = 0x200;
86 	opts.retprobe = false;
87 	link2 = bpf_program__attach_uprobe_opts(skel->progs.handle_uprobe, -1 /* any pid */,
88 						"/proc/self/exe", uprobe_offset, &opts);
89 	if (!ASSERT_OK_PTR(link2, "link2"))
90 		goto cleanup;
91 
92 	/* attach two uretprobes */
93 	opts.bpf_cookie = 0x1000;
94 	opts.retprobe = true;
95 	retlink1 = bpf_program__attach_uprobe_opts(skel->progs.handle_uretprobe, -1 /* any pid */,
96 						   "/proc/self/exe", uprobe_offset, &opts);
97 	if (!ASSERT_OK_PTR(retlink1, "retlink1"))
98 		goto cleanup;
99 
100 	opts.bpf_cookie = 0x2000;
101 	opts.retprobe = true;
102 	retlink2 = bpf_program__attach_uprobe_opts(skel->progs.handle_uretprobe, 0 /* self pid */,
103 						   "/proc/self/exe", uprobe_offset, &opts);
104 	if (!ASSERT_OK_PTR(retlink2, "retlink2"))
105 		goto cleanup;
106 
107 	/* trigger uprobe && uretprobe */
108 	trigger_func();
109 
110 	ASSERT_EQ(skel->bss->uprobe_res, 0x100 | 0x200, "uprobe_res");
111 	ASSERT_EQ(skel->bss->uretprobe_res, 0x1000 | 0x2000, "uretprobe_res");
112 
113 cleanup:
114 	bpf_link__destroy(link1);
115 	bpf_link__destroy(link2);
116 	bpf_link__destroy(retlink1);
117 	bpf_link__destroy(retlink2);
118 }
119 
120 static void tp_subtest(struct test_bpf_cookie *skel)
121 {
122 	DECLARE_LIBBPF_OPTS(bpf_tracepoint_opts, opts);
123 	struct bpf_link *link1 = NULL, *link2 = NULL, *link3 = NULL;
124 
125 	/* attach first tp prog */
126 	opts.bpf_cookie = 0x10000;
127 	link1 = bpf_program__attach_tracepoint_opts(skel->progs.handle_tp1,
128 						    "syscalls", "sys_enter_nanosleep", &opts);
129 	if (!ASSERT_OK_PTR(link1, "link1"))
130 		goto cleanup;
131 
132 	/* attach second tp prog */
133 	opts.bpf_cookie = 0x20000;
134 	link2 = bpf_program__attach_tracepoint_opts(skel->progs.handle_tp2,
135 						    "syscalls", "sys_enter_nanosleep", &opts);
136 	if (!ASSERT_OK_PTR(link2, "link2"))
137 		goto cleanup;
138 
139 	/* trigger tracepoints */
140 	usleep(1);
141 
142 	ASSERT_EQ(skel->bss->tp_res, 0x10000 | 0x20000, "tp_res1");
143 
144 	/* now we detach first prog and will attach third one, which causes
145 	 * two internal calls to bpf_prog_array_copy(), shuffling
146 	 * bpf_prog_array_items around. We test here that we don't lose track
147 	 * of associated bpf_cookies.
148 	 */
149 	bpf_link__destroy(link1);
150 	link1 = NULL;
151 	kern_sync_rcu();
152 	skel->bss->tp_res = 0;
153 
154 	/* attach third tp prog */
155 	opts.bpf_cookie = 0x40000;
156 	link3 = bpf_program__attach_tracepoint_opts(skel->progs.handle_tp3,
157 						    "syscalls", "sys_enter_nanosleep", &opts);
158 	if (!ASSERT_OK_PTR(link3, "link3"))
159 		goto cleanup;
160 
161 	/* trigger tracepoints */
162 	usleep(1);
163 
164 	ASSERT_EQ(skel->bss->tp_res, 0x20000 | 0x40000, "tp_res2");
165 
166 cleanup:
167 	bpf_link__destroy(link1);
168 	bpf_link__destroy(link2);
169 	bpf_link__destroy(link3);
170 }
171 
172 static void burn_cpu(void)
173 {
174 	volatile int j = 0;
175 	cpu_set_t cpu_set;
176 	int i, err;
177 
178 	/* generate some branches on cpu 0 */
179 	CPU_ZERO(&cpu_set);
180 	CPU_SET(0, &cpu_set);
181 	err = pthread_setaffinity_np(pthread_self(), sizeof(cpu_set), &cpu_set);
182 	ASSERT_OK(err, "set_thread_affinity");
183 
184 	/* spin the loop for a while (random high number) */
185 	for (i = 0; i < 1000000; ++i)
186 		++j;
187 }
188 
189 static void pe_subtest(struct test_bpf_cookie *skel)
190 {
191 	DECLARE_LIBBPF_OPTS(bpf_perf_event_opts, opts);
192 	struct bpf_link *link = NULL;
193 	struct perf_event_attr attr;
194 	int pfd = -1;
195 
196 	/* create perf event */
197 	memset(&attr, 0, sizeof(attr));
198 	attr.size = sizeof(attr);
199 	attr.type = PERF_TYPE_SOFTWARE;
200 	attr.config = PERF_COUNT_SW_CPU_CLOCK;
201 	attr.freq = 1;
202 	attr.sample_freq = 4000;
203 	pfd = syscall(__NR_perf_event_open, &attr, -1, 0, -1, PERF_FLAG_FD_CLOEXEC);
204 	if (!ASSERT_GE(pfd, 0, "perf_fd"))
205 		goto cleanup;
206 
207 	opts.bpf_cookie = 0x100000;
208 	link = bpf_program__attach_perf_event_opts(skel->progs.handle_pe, pfd, &opts);
209 	if (!ASSERT_OK_PTR(link, "link1"))
210 		goto cleanup;
211 
212 	burn_cpu(); /* trigger BPF prog */
213 
214 	ASSERT_EQ(skel->bss->pe_res, 0x100000, "pe_res1");
215 
216 	/* prevent bpf_link__destroy() closing pfd itself */
217 	bpf_link__disconnect(link);
218 	/* close BPF link's FD explicitly */
219 	close(bpf_link__fd(link));
220 	/* free up memory used by struct bpf_link */
221 	bpf_link__destroy(link);
222 	link = NULL;
223 	kern_sync_rcu();
224 	skel->bss->pe_res = 0;
225 
226 	opts.bpf_cookie = 0x200000;
227 	link = bpf_program__attach_perf_event_opts(skel->progs.handle_pe, pfd, &opts);
228 	if (!ASSERT_OK_PTR(link, "link2"))
229 		goto cleanup;
230 
231 	burn_cpu(); /* trigger BPF prog */
232 
233 	ASSERT_EQ(skel->bss->pe_res, 0x200000, "pe_res2");
234 
235 cleanup:
236 	close(pfd);
237 	bpf_link__destroy(link);
238 }
239 
240 void test_bpf_cookie(void)
241 {
242 	struct test_bpf_cookie *skel;
243 
244 	skel = test_bpf_cookie__open_and_load();
245 	if (!ASSERT_OK_PTR(skel, "skel_open"))
246 		return;
247 
248 	skel->bss->my_tid = syscall(SYS_gettid);
249 
250 	if (test__start_subtest("kprobe"))
251 		kprobe_subtest(skel);
252 	if (test__start_subtest("uprobe"))
253 		uprobe_subtest(skel);
254 	if (test__start_subtest("tracepoint"))
255 		tp_subtest(skel);
256 	if (test__start_subtest("perf_event"))
257 		pe_subtest(skel);
258 
259 	test_bpf_cookie__destroy(skel);
260 }
261