xref: /linux/tools/testing/selftests/bpf/prog_tests/ringbuf.c (revision 90d32e92011eaae8e70a9169b4e7acf4ca8f9d3a)
1 // SPDX-License-Identifier: GPL-2.0
2 #define _GNU_SOURCE
3 #include <linux/compiler.h>
4 #include <asm/barrier.h>
5 #include <test_progs.h>
6 #include <sys/mman.h>
7 #include <sys/epoll.h>
8 #include <time.h>
9 #include <sched.h>
10 #include <signal.h>
11 #include <pthread.h>
12 #include <sys/sysinfo.h>
13 #include <linux/perf_event.h>
14 #include <linux/ring_buffer.h>
15 #include "test_ringbuf.lskel.h"
16 #include "test_ringbuf_n.lskel.h"
17 #include "test_ringbuf_map_key.lskel.h"
18 
19 #define EDONE 7777
20 
21 static int duration = 0;
22 
23 struct sample {
24 	int pid;
25 	int seq;
26 	long value;
27 	char comm[16];
28 };
29 
30 static int sample_cnt;
31 
32 static void atomic_inc(int *cnt)
33 {
34 	__atomic_add_fetch(cnt, 1, __ATOMIC_SEQ_CST);
35 }
36 
37 static int atomic_xchg(int *cnt, int val)
38 {
39 	return __atomic_exchange_n(cnt, val, __ATOMIC_SEQ_CST);
40 }
41 
42 static int process_sample(void *ctx, void *data, size_t len)
43 {
44 	struct sample *s = data;
45 
46 	atomic_inc(&sample_cnt);
47 
48 	switch (s->seq) {
49 	case 0:
50 		CHECK(s->value != 333, "sample1_value", "exp %ld, got %ld\n",
51 		      333L, s->value);
52 		return 0;
53 	case 1:
54 		CHECK(s->value != 777, "sample2_value", "exp %ld, got %ld\n",
55 		      777L, s->value);
56 		return -EDONE;
57 	default:
58 		/* we don't care about the rest */
59 		return 0;
60 	}
61 }
62 
63 static struct test_ringbuf_map_key_lskel *skel_map_key;
64 static struct test_ringbuf_lskel *skel;
65 static struct ring_buffer *ringbuf;
66 
67 static void trigger_samples()
68 {
69 	skel->bss->dropped = 0;
70 	skel->bss->total = 0;
71 	skel->bss->discarded = 0;
72 
73 	/* trigger exactly two samples */
74 	skel->bss->value = 333;
75 	syscall(__NR_getpgid);
76 	skel->bss->value = 777;
77 	syscall(__NR_getpgid);
78 }
79 
80 static void *poll_thread(void *input)
81 {
82 	long timeout = (long)input;
83 
84 	return (void *)(long)ring_buffer__poll(ringbuf, timeout);
85 }
86 
87 static void ringbuf_subtest(void)
88 {
89 	const size_t rec_sz = BPF_RINGBUF_HDR_SZ + sizeof(struct sample);
90 	pthread_t thread;
91 	long bg_ret = -1;
92 	int err, cnt, rb_fd;
93 	int page_size = getpagesize();
94 	void *mmap_ptr, *tmp_ptr;
95 	struct ring *ring;
96 	int map_fd;
97 	unsigned long avail_data, ring_size, cons_pos, prod_pos;
98 
99 	skel = test_ringbuf_lskel__open();
100 	if (CHECK(!skel, "skel_open", "skeleton open failed\n"))
101 		return;
102 
103 	skel->maps.ringbuf.max_entries = page_size;
104 
105 	err = test_ringbuf_lskel__load(skel);
106 	if (CHECK(err != 0, "skel_load", "skeleton load failed\n"))
107 		goto cleanup;
108 
109 	rb_fd = skel->maps.ringbuf.map_fd;
110 	/* good read/write cons_pos */
111 	mmap_ptr = mmap(NULL, page_size, PROT_READ | PROT_WRITE, MAP_SHARED, rb_fd, 0);
112 	ASSERT_OK_PTR(mmap_ptr, "rw_cons_pos");
113 	tmp_ptr = mremap(mmap_ptr, page_size, 2 * page_size, MREMAP_MAYMOVE);
114 	if (!ASSERT_ERR_PTR(tmp_ptr, "rw_extend"))
115 		goto cleanup;
116 	ASSERT_ERR(mprotect(mmap_ptr, page_size, PROT_EXEC), "exec_cons_pos_protect");
117 	ASSERT_OK(munmap(mmap_ptr, page_size), "unmap_rw");
118 
119 	/* bad writeable prod_pos */
120 	mmap_ptr = mmap(NULL, page_size, PROT_WRITE, MAP_SHARED, rb_fd, page_size);
121 	err = -errno;
122 	ASSERT_ERR_PTR(mmap_ptr, "wr_prod_pos");
123 	ASSERT_EQ(err, -EPERM, "wr_prod_pos_err");
124 
125 	/* bad writeable data pages */
126 	mmap_ptr = mmap(NULL, page_size, PROT_WRITE, MAP_SHARED, rb_fd, 2 * page_size);
127 	err = -errno;
128 	ASSERT_ERR_PTR(mmap_ptr, "wr_data_page_one");
129 	ASSERT_EQ(err, -EPERM, "wr_data_page_one_err");
130 	mmap_ptr = mmap(NULL, page_size, PROT_WRITE, MAP_SHARED, rb_fd, 3 * page_size);
131 	ASSERT_ERR_PTR(mmap_ptr, "wr_data_page_two");
132 	mmap_ptr = mmap(NULL, 2 * page_size, PROT_WRITE, MAP_SHARED, rb_fd, 2 * page_size);
133 	ASSERT_ERR_PTR(mmap_ptr, "wr_data_page_all");
134 
135 	/* good read-only pages */
136 	mmap_ptr = mmap(NULL, 4 * page_size, PROT_READ, MAP_SHARED, rb_fd, 0);
137 	if (!ASSERT_OK_PTR(mmap_ptr, "ro_prod_pos"))
138 		goto cleanup;
139 
140 	ASSERT_ERR(mprotect(mmap_ptr, 4 * page_size, PROT_WRITE), "write_protect");
141 	ASSERT_ERR(mprotect(mmap_ptr, 4 * page_size, PROT_EXEC), "exec_protect");
142 	ASSERT_ERR_PTR(mremap(mmap_ptr, 0, 4 * page_size, MREMAP_MAYMOVE), "ro_remap");
143 	ASSERT_OK(munmap(mmap_ptr, 4 * page_size), "unmap_ro");
144 
145 	/* good read-only pages with initial offset */
146 	mmap_ptr = mmap(NULL, page_size, PROT_READ, MAP_SHARED, rb_fd, page_size);
147 	if (!ASSERT_OK_PTR(mmap_ptr, "ro_prod_pos"))
148 		goto cleanup;
149 
150 	ASSERT_ERR(mprotect(mmap_ptr, page_size, PROT_WRITE), "write_protect");
151 	ASSERT_ERR(mprotect(mmap_ptr, page_size, PROT_EXEC), "exec_protect");
152 	ASSERT_ERR_PTR(mremap(mmap_ptr, 0, 3 * page_size, MREMAP_MAYMOVE), "ro_remap");
153 	ASSERT_OK(munmap(mmap_ptr, page_size), "unmap_ro");
154 
155 	/* only trigger BPF program for current process */
156 	skel->bss->pid = getpid();
157 
158 	ringbuf = ring_buffer__new(skel->maps.ringbuf.map_fd,
159 				   process_sample, NULL, NULL);
160 	if (CHECK(!ringbuf, "ringbuf_create", "failed to create ringbuf\n"))
161 		goto cleanup;
162 
163 	err = test_ringbuf_lskel__attach(skel);
164 	if (CHECK(err, "skel_attach", "skeleton attachment failed: %d\n", err))
165 		goto cleanup;
166 
167 	trigger_samples();
168 
169 	ring = ring_buffer__ring(ringbuf, 0);
170 	if (!ASSERT_OK_PTR(ring, "ring_buffer__ring_idx_0"))
171 		goto cleanup;
172 
173 	map_fd = ring__map_fd(ring);
174 	ASSERT_EQ(map_fd, skel->maps.ringbuf.map_fd, "ring_map_fd");
175 
176 	/* 2 submitted + 1 discarded records */
177 	CHECK(skel->bss->avail_data != 3 * rec_sz,
178 	      "err_avail_size", "exp %ld, got %ld\n",
179 	      3L * rec_sz, skel->bss->avail_data);
180 	CHECK(skel->bss->ring_size != page_size,
181 	      "err_ring_size", "exp %ld, got %ld\n",
182 	      (long)page_size, skel->bss->ring_size);
183 	CHECK(skel->bss->cons_pos != 0,
184 	      "err_cons_pos", "exp %ld, got %ld\n",
185 	      0L, skel->bss->cons_pos);
186 	CHECK(skel->bss->prod_pos != 3 * rec_sz,
187 	      "err_prod_pos", "exp %ld, got %ld\n",
188 	      3L * rec_sz, skel->bss->prod_pos);
189 
190 	/* verify getting this data directly via the ring object yields the same
191 	 * results
192 	 */
193 	avail_data = ring__avail_data_size(ring);
194 	ASSERT_EQ(avail_data, 3 * rec_sz, "ring_avail_size");
195 	ring_size = ring__size(ring);
196 	ASSERT_EQ(ring_size, page_size, "ring_ring_size");
197 	cons_pos = ring__consumer_pos(ring);
198 	ASSERT_EQ(cons_pos, 0, "ring_cons_pos");
199 	prod_pos = ring__producer_pos(ring);
200 	ASSERT_EQ(prod_pos, 3 * rec_sz, "ring_prod_pos");
201 
202 	/* poll for samples */
203 	err = ring_buffer__poll(ringbuf, -1);
204 
205 	/* -EDONE is used as an indicator that we are done */
206 	if (CHECK(err != -EDONE, "err_done", "done err: %d\n", err))
207 		goto cleanup;
208 	cnt = atomic_xchg(&sample_cnt, 0);
209 	CHECK(cnt != 2, "cnt", "exp %d samples, got %d\n", 2, cnt);
210 
211 	/* we expect extra polling to return nothing */
212 	err = ring_buffer__poll(ringbuf, 0);
213 	if (CHECK(err != 0, "extra_samples", "poll result: %d\n", err))
214 		goto cleanup;
215 	cnt = atomic_xchg(&sample_cnt, 0);
216 	CHECK(cnt != 0, "cnt", "exp %d samples, got %d\n", 0, cnt);
217 
218 	CHECK(skel->bss->dropped != 0, "err_dropped", "exp %ld, got %ld\n",
219 	      0L, skel->bss->dropped);
220 	CHECK(skel->bss->total != 2, "err_total", "exp %ld, got %ld\n",
221 	      2L, skel->bss->total);
222 	CHECK(skel->bss->discarded != 1, "err_discarded", "exp %ld, got %ld\n",
223 	      1L, skel->bss->discarded);
224 
225 	/* now validate consumer position is updated and returned */
226 	trigger_samples();
227 	CHECK(skel->bss->cons_pos != 3 * rec_sz,
228 	      "err_cons_pos", "exp %ld, got %ld\n",
229 	      3L * rec_sz, skel->bss->cons_pos);
230 	err = ring_buffer__poll(ringbuf, -1);
231 	CHECK(err <= 0, "poll_err", "err %d\n", err);
232 	cnt = atomic_xchg(&sample_cnt, 0);
233 	CHECK(cnt != 2, "cnt", "exp %d samples, got %d\n", 2, cnt);
234 
235 	/* start poll in background w/ long timeout */
236 	err = pthread_create(&thread, NULL, poll_thread, (void *)(long)10000);
237 	if (CHECK(err, "bg_poll", "pthread_create failed: %d\n", err))
238 		goto cleanup;
239 
240 	/* turn off notifications now */
241 	skel->bss->flags = BPF_RB_NO_WAKEUP;
242 
243 	/* give background thread a bit of a time */
244 	usleep(50000);
245 	trigger_samples();
246 	/* sleeping arbitrarily is bad, but no better way to know that
247 	 * epoll_wait() **DID NOT** unblock in background thread
248 	 */
249 	usleep(50000);
250 	/* background poll should still be blocked */
251 	err = pthread_tryjoin_np(thread, (void **)&bg_ret);
252 	if (CHECK(err != EBUSY, "try_join", "err %d\n", err))
253 		goto cleanup;
254 
255 	/* BPF side did everything right */
256 	CHECK(skel->bss->dropped != 0, "err_dropped", "exp %ld, got %ld\n",
257 	      0L, skel->bss->dropped);
258 	CHECK(skel->bss->total != 2, "err_total", "exp %ld, got %ld\n",
259 	      2L, skel->bss->total);
260 	CHECK(skel->bss->discarded != 1, "err_discarded", "exp %ld, got %ld\n",
261 	      1L, skel->bss->discarded);
262 	cnt = atomic_xchg(&sample_cnt, 0);
263 	CHECK(cnt != 0, "cnt", "exp %d samples, got %d\n", 0, cnt);
264 
265 	/* clear flags to return to "adaptive" notification mode */
266 	skel->bss->flags = 0;
267 
268 	/* produce new samples, no notification should be triggered, because
269 	 * consumer is now behind
270 	 */
271 	trigger_samples();
272 
273 	/* background poll should still be blocked */
274 	err = pthread_tryjoin_np(thread, (void **)&bg_ret);
275 	if (CHECK(err != EBUSY, "try_join", "err %d\n", err))
276 		goto cleanup;
277 
278 	/* still no samples, because consumer is behind */
279 	cnt = atomic_xchg(&sample_cnt, 0);
280 	CHECK(cnt != 0, "cnt", "exp %d samples, got %d\n", 0, cnt);
281 
282 	skel->bss->dropped = 0;
283 	skel->bss->total = 0;
284 	skel->bss->discarded = 0;
285 
286 	skel->bss->value = 333;
287 	syscall(__NR_getpgid);
288 	/* now force notifications */
289 	skel->bss->flags = BPF_RB_FORCE_WAKEUP;
290 	skel->bss->value = 777;
291 	syscall(__NR_getpgid);
292 
293 	/* now we should get a pending notification */
294 	usleep(50000);
295 	err = pthread_tryjoin_np(thread, (void **)&bg_ret);
296 	if (CHECK(err, "join_bg", "err %d\n", err))
297 		goto cleanup;
298 
299 	if (CHECK(bg_ret <= 0, "bg_ret", "epoll_wait result: %ld", bg_ret))
300 		goto cleanup;
301 
302 	/* due to timing variations, there could still be non-notified
303 	 * samples, so consume them here to collect all the samples
304 	 */
305 	err = ring_buffer__consume(ringbuf);
306 	CHECK(err < 0, "rb_consume", "failed: %d\b", err);
307 
308 	/* also consume using ring__consume to make sure it works the same */
309 	err = ring__consume(ring);
310 	ASSERT_GE(err, 0, "ring_consume");
311 
312 	/* 3 rounds, 2 samples each */
313 	cnt = atomic_xchg(&sample_cnt, 0);
314 	CHECK(cnt != 6, "cnt", "exp %d samples, got %d\n", 6, cnt);
315 
316 	/* BPF side did everything right */
317 	CHECK(skel->bss->dropped != 0, "err_dropped", "exp %ld, got %ld\n",
318 	      0L, skel->bss->dropped);
319 	CHECK(skel->bss->total != 2, "err_total", "exp %ld, got %ld\n",
320 	      2L, skel->bss->total);
321 	CHECK(skel->bss->discarded != 1, "err_discarded", "exp %ld, got %ld\n",
322 	      1L, skel->bss->discarded);
323 
324 	test_ringbuf_lskel__detach(skel);
325 cleanup:
326 	ring_buffer__free(ringbuf);
327 	test_ringbuf_lskel__destroy(skel);
328 }
329 
330 /*
331  * Test ring_buffer__consume_n() by producing N_TOT_SAMPLES samples in the ring
332  * buffer, via getpid(), and consuming them in chunks of N_SAMPLES.
333  */
334 #define N_TOT_SAMPLES	32
335 #define N_SAMPLES	4
336 
337 /* Sample value to verify the callback validity */
338 #define SAMPLE_VALUE	42L
339 
340 static int process_n_sample(void *ctx, void *data, size_t len)
341 {
342 	struct sample *s = data;
343 
344 	ASSERT_EQ(s->value, SAMPLE_VALUE, "sample_value");
345 
346 	return 0;
347 }
348 
349 static void ringbuf_n_subtest(void)
350 {
351 	struct test_ringbuf_n_lskel *skel_n;
352 	int err, i;
353 
354 	skel_n = test_ringbuf_n_lskel__open();
355 	if (!ASSERT_OK_PTR(skel_n, "test_ringbuf_n_lskel__open"))
356 		return;
357 
358 	skel_n->maps.ringbuf.max_entries = getpagesize();
359 	skel_n->bss->pid = getpid();
360 
361 	err = test_ringbuf_n_lskel__load(skel_n);
362 	if (!ASSERT_OK(err, "test_ringbuf_n_lskel__load"))
363 		goto cleanup;
364 
365 	ringbuf = ring_buffer__new(skel_n->maps.ringbuf.map_fd,
366 				   process_n_sample, NULL, NULL);
367 	if (!ASSERT_OK_PTR(ringbuf, "ring_buffer__new"))
368 		goto cleanup;
369 
370 	err = test_ringbuf_n_lskel__attach(skel_n);
371 	if (!ASSERT_OK(err, "test_ringbuf_n_lskel__attach"))
372 		goto cleanup_ringbuf;
373 
374 	/* Produce N_TOT_SAMPLES samples in the ring buffer by calling getpid() */
375 	skel_n->bss->value = SAMPLE_VALUE;
376 	for (i = 0; i < N_TOT_SAMPLES; i++)
377 		syscall(__NR_getpgid);
378 
379 	/* Consume all samples from the ring buffer in batches of N_SAMPLES */
380 	for (i = 0; i < N_TOT_SAMPLES; i += err) {
381 		err = ring_buffer__consume_n(ringbuf, N_SAMPLES);
382 		if (!ASSERT_EQ(err, N_SAMPLES, "rb_consume"))
383 			goto cleanup_ringbuf;
384 	}
385 
386 cleanup_ringbuf:
387 	ring_buffer__free(ringbuf);
388 cleanup:
389 	test_ringbuf_n_lskel__destroy(skel_n);
390 }
391 
392 static int process_map_key_sample(void *ctx, void *data, size_t len)
393 {
394 	struct sample *s;
395 	int err, val;
396 
397 	s = data;
398 	switch (s->seq) {
399 	case 1:
400 		ASSERT_EQ(s->value, 42, "sample_value");
401 		err = bpf_map_lookup_elem(skel_map_key->maps.hash_map.map_fd,
402 					  s, &val);
403 		ASSERT_OK(err, "hash_map bpf_map_lookup_elem");
404 		ASSERT_EQ(val, 1, "hash_map val");
405 		return -EDONE;
406 	default:
407 		return 0;
408 	}
409 }
410 
411 static void ringbuf_map_key_subtest(void)
412 {
413 	int err;
414 
415 	skel_map_key = test_ringbuf_map_key_lskel__open();
416 	if (!ASSERT_OK_PTR(skel_map_key, "test_ringbuf_map_key_lskel__open"))
417 		return;
418 
419 	skel_map_key->maps.ringbuf.max_entries = getpagesize();
420 	skel_map_key->bss->pid = getpid();
421 
422 	err = test_ringbuf_map_key_lskel__load(skel_map_key);
423 	if (!ASSERT_OK(err, "test_ringbuf_map_key_lskel__load"))
424 		goto cleanup;
425 
426 	ringbuf = ring_buffer__new(skel_map_key->maps.ringbuf.map_fd,
427 				   process_map_key_sample, NULL, NULL);
428 	if (!ASSERT_OK_PTR(ringbuf, "ring_buffer__new"))
429 		goto cleanup;
430 
431 	err = test_ringbuf_map_key_lskel__attach(skel_map_key);
432 	if (!ASSERT_OK(err, "test_ringbuf_map_key_lskel__attach"))
433 		goto cleanup_ringbuf;
434 
435 	syscall(__NR_getpgid);
436 	ASSERT_EQ(skel_map_key->bss->seq, 1, "skel_map_key->bss->seq");
437 	err = ring_buffer__poll(ringbuf, -1);
438 	ASSERT_EQ(err, -EDONE, "ring_buffer__poll");
439 
440 cleanup_ringbuf:
441 	ring_buffer__free(ringbuf);
442 cleanup:
443 	test_ringbuf_map_key_lskel__destroy(skel_map_key);
444 }
445 
446 void test_ringbuf(void)
447 {
448 	if (test__start_subtest("ringbuf"))
449 		ringbuf_subtest();
450 	if (test__start_subtest("ringbuf_n"))
451 		ringbuf_n_subtest();
452 	if (test__start_subtest("ringbuf_map_key"))
453 		ringbuf_map_key_subtest();
454 }
455