xref: /linux/tools/testing/selftests/bpf/prog_tests/bpf_cookie.c (revision 97ef3b7f4fdf8ad6818aa2c8201c3b72cc635e16)
1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (c) 2021 Facebook */
3 #define _GNU_SOURCE
4 #include <pthread.h>
5 #include <sched.h>
6 #include <sys/syscall.h>
7 #include <unistd.h>
8 #include <test_progs.h>
9 #include "test_bpf_cookie.skel.h"
10 
11 static void kprobe_subtest(struct test_bpf_cookie *skel)
12 {
13 	DECLARE_LIBBPF_OPTS(bpf_kprobe_opts, opts);
14 	struct bpf_link *link1 = NULL, *link2 = NULL;
15 	struct bpf_link *retlink1 = NULL, *retlink2 = NULL;
16 
17 	/* attach two kprobes */
18 	opts.bpf_cookie = 0x1;
19 	opts.retprobe = false;
20 	link1 = bpf_program__attach_kprobe_opts(skel->progs.handle_kprobe,
21 						 SYS_NANOSLEEP_KPROBE_NAME, &opts);
22 	if (!ASSERT_OK_PTR(link1, "link1"))
23 		goto cleanup;
24 
25 	opts.bpf_cookie = 0x2;
26 	opts.retprobe = false;
27 	link2 = bpf_program__attach_kprobe_opts(skel->progs.handle_kprobe,
28 						 SYS_NANOSLEEP_KPROBE_NAME, &opts);
29 	if (!ASSERT_OK_PTR(link2, "link2"))
30 		goto cleanup;
31 
32 	/* attach two kretprobes */
33 	opts.bpf_cookie = 0x10;
34 	opts.retprobe = true;
35 	retlink1 = bpf_program__attach_kprobe_opts(skel->progs.handle_kretprobe,
36 						    SYS_NANOSLEEP_KPROBE_NAME, &opts);
37 	if (!ASSERT_OK_PTR(retlink1, "retlink1"))
38 		goto cleanup;
39 
40 	opts.bpf_cookie = 0x20;
41 	opts.retprobe = true;
42 	retlink2 = bpf_program__attach_kprobe_opts(skel->progs.handle_kretprobe,
43 						    SYS_NANOSLEEP_KPROBE_NAME, &opts);
44 	if (!ASSERT_OK_PTR(retlink2, "retlink2"))
45 		goto cleanup;
46 
47 	/* trigger kprobe && kretprobe */
48 	usleep(1);
49 
50 	ASSERT_EQ(skel->bss->kprobe_res, 0x1 | 0x2, "kprobe_res");
51 	ASSERT_EQ(skel->bss->kretprobe_res, 0x10 | 0x20, "kretprobe_res");
52 
53 cleanup:
54 	bpf_link__destroy(link1);
55 	bpf_link__destroy(link2);
56 	bpf_link__destroy(retlink1);
57 	bpf_link__destroy(retlink2);
58 }
59 
60 static void uprobe_subtest(struct test_bpf_cookie *skel)
61 {
62 	DECLARE_LIBBPF_OPTS(bpf_uprobe_opts, opts);
63 	struct bpf_link *link1 = NULL, *link2 = NULL;
64 	struct bpf_link *retlink1 = NULL, *retlink2 = NULL;
65 	size_t uprobe_offset;
66 	ssize_t base_addr;
67 
68 	base_addr = get_base_addr();
69 	uprobe_offset = get_uprobe_offset(&get_base_addr, base_addr);
70 
71 	/* attach two uprobes */
72 	opts.bpf_cookie = 0x100;
73 	opts.retprobe = false;
74 	link1 = bpf_program__attach_uprobe_opts(skel->progs.handle_uprobe, 0 /* self pid */,
75 						"/proc/self/exe", uprobe_offset, &opts);
76 	if (!ASSERT_OK_PTR(link1, "link1"))
77 		goto cleanup;
78 
79 	opts.bpf_cookie = 0x200;
80 	opts.retprobe = false;
81 	link2 = bpf_program__attach_uprobe_opts(skel->progs.handle_uprobe, -1 /* any pid */,
82 						"/proc/self/exe", uprobe_offset, &opts);
83 	if (!ASSERT_OK_PTR(link2, "link2"))
84 		goto cleanup;
85 
86 	/* attach two uretprobes */
87 	opts.bpf_cookie = 0x1000;
88 	opts.retprobe = true;
89 	retlink1 = bpf_program__attach_uprobe_opts(skel->progs.handle_uretprobe, -1 /* any pid */,
90 						   "/proc/self/exe", uprobe_offset, &opts);
91 	if (!ASSERT_OK_PTR(retlink1, "retlink1"))
92 		goto cleanup;
93 
94 	opts.bpf_cookie = 0x2000;
95 	opts.retprobe = true;
96 	retlink2 = bpf_program__attach_uprobe_opts(skel->progs.handle_uretprobe, 0 /* self pid */,
97 						   "/proc/self/exe", uprobe_offset, &opts);
98 	if (!ASSERT_OK_PTR(retlink2, "retlink2"))
99 		goto cleanup;
100 
101 	/* trigger uprobe && uretprobe */
102 	get_base_addr();
103 
104 	ASSERT_EQ(skel->bss->uprobe_res, 0x100 | 0x200, "uprobe_res");
105 	ASSERT_EQ(skel->bss->uretprobe_res, 0x1000 | 0x2000, "uretprobe_res");
106 
107 cleanup:
108 	bpf_link__destroy(link1);
109 	bpf_link__destroy(link2);
110 	bpf_link__destroy(retlink1);
111 	bpf_link__destroy(retlink2);
112 }
113 
114 static void tp_subtest(struct test_bpf_cookie *skel)
115 {
116 	DECLARE_LIBBPF_OPTS(bpf_tracepoint_opts, opts);
117 	struct bpf_link *link1 = NULL, *link2 = NULL, *link3 = NULL;
118 
119 	/* attach first tp prog */
120 	opts.bpf_cookie = 0x10000;
121 	link1 = bpf_program__attach_tracepoint_opts(skel->progs.handle_tp1,
122 						    "syscalls", "sys_enter_nanosleep", &opts);
123 	if (!ASSERT_OK_PTR(link1, "link1"))
124 		goto cleanup;
125 
126 	/* attach second tp prog */
127 	opts.bpf_cookie = 0x20000;
128 	link2 = bpf_program__attach_tracepoint_opts(skel->progs.handle_tp2,
129 						    "syscalls", "sys_enter_nanosleep", &opts);
130 	if (!ASSERT_OK_PTR(link2, "link2"))
131 		goto cleanup;
132 
133 	/* trigger tracepoints */
134 	usleep(1);
135 
136 	ASSERT_EQ(skel->bss->tp_res, 0x10000 | 0x20000, "tp_res1");
137 
138 	/* now we detach first prog and will attach third one, which causes
139 	 * two internal calls to bpf_prog_array_copy(), shuffling
140 	 * bpf_prog_array_items around. We test here that we don't lose track
141 	 * of associated bpf_cookies.
142 	 */
143 	bpf_link__destroy(link1);
144 	link1 = NULL;
145 	kern_sync_rcu();
146 	skel->bss->tp_res = 0;
147 
148 	/* attach third tp prog */
149 	opts.bpf_cookie = 0x40000;
150 	link3 = bpf_program__attach_tracepoint_opts(skel->progs.handle_tp3,
151 						    "syscalls", "sys_enter_nanosleep", &opts);
152 	if (!ASSERT_OK_PTR(link3, "link3"))
153 		goto cleanup;
154 
155 	/* trigger tracepoints */
156 	usleep(1);
157 
158 	ASSERT_EQ(skel->bss->tp_res, 0x20000 | 0x40000, "tp_res2");
159 
160 cleanup:
161 	bpf_link__destroy(link1);
162 	bpf_link__destroy(link2);
163 	bpf_link__destroy(link3);
164 }
165 
166 static void burn_cpu(void)
167 {
168 	volatile int j = 0;
169 	cpu_set_t cpu_set;
170 	int i, err;
171 
172 	/* generate some branches on cpu 0 */
173 	CPU_ZERO(&cpu_set);
174 	CPU_SET(0, &cpu_set);
175 	err = pthread_setaffinity_np(pthread_self(), sizeof(cpu_set), &cpu_set);
176 	ASSERT_OK(err, "set_thread_affinity");
177 
178 	/* spin the loop for a while (random high number) */
179 	for (i = 0; i < 1000000; ++i)
180 		++j;
181 }
182 
183 static void pe_subtest(struct test_bpf_cookie *skel)
184 {
185 	DECLARE_LIBBPF_OPTS(bpf_perf_event_opts, opts);
186 	struct bpf_link *link = NULL;
187 	struct perf_event_attr attr;
188 	int pfd = -1;
189 
190 	/* create perf event */
191 	memset(&attr, 0, sizeof(attr));
192 	attr.size = sizeof(attr);
193 	attr.type = PERF_TYPE_SOFTWARE;
194 	attr.config = PERF_COUNT_SW_CPU_CLOCK;
195 	attr.freq = 1;
196 	attr.sample_freq = 4000;
197 	pfd = syscall(__NR_perf_event_open, &attr, -1, 0, -1, PERF_FLAG_FD_CLOEXEC);
198 	if (!ASSERT_GE(pfd, 0, "perf_fd"))
199 		goto cleanup;
200 
201 	opts.bpf_cookie = 0x100000;
202 	link = bpf_program__attach_perf_event_opts(skel->progs.handle_pe, pfd, &opts);
203 	if (!ASSERT_OK_PTR(link, "link1"))
204 		goto cleanup;
205 
206 	burn_cpu(); /* trigger BPF prog */
207 
208 	ASSERT_EQ(skel->bss->pe_res, 0x100000, "pe_res1");
209 
210 	/* prevent bpf_link__destroy() closing pfd itself */
211 	bpf_link__disconnect(link);
212 	/* close BPF link's FD explicitly */
213 	close(bpf_link__fd(link));
214 	/* free up memory used by struct bpf_link */
215 	bpf_link__destroy(link);
216 	link = NULL;
217 	kern_sync_rcu();
218 	skel->bss->pe_res = 0;
219 
220 	opts.bpf_cookie = 0x200000;
221 	link = bpf_program__attach_perf_event_opts(skel->progs.handle_pe, pfd, &opts);
222 	if (!ASSERT_OK_PTR(link, "link2"))
223 		goto cleanup;
224 
225 	burn_cpu(); /* trigger BPF prog */
226 
227 	ASSERT_EQ(skel->bss->pe_res, 0x200000, "pe_res2");
228 
229 cleanup:
230 	close(pfd);
231 	bpf_link__destroy(link);
232 }
233 
234 void test_bpf_cookie(void)
235 {
236 	struct test_bpf_cookie *skel;
237 
238 	skel = test_bpf_cookie__open_and_load();
239 	if (!ASSERT_OK_PTR(skel, "skel_open"))
240 		return;
241 
242 	skel->bss->my_tid = syscall(SYS_gettid);
243 
244 	if (test__start_subtest("kprobe"))
245 		kprobe_subtest(skel);
246 	if (test__start_subtest("uprobe"))
247 		uprobe_subtest(skel);
248 	if (test__start_subtest("tracepoint"))
249 		tp_subtest(skel);
250 	if (test__start_subtest("perf_event"))
251 		pe_subtest(skel);
252 
253 	test_bpf_cookie__destroy(skel);
254 }
255