xref: /linux/tools/testing/selftests/bpf/prog_tests/ringbuf.c (revision 84318277d6334c6981ab326d4acc87c6a6ddc9b8)
1 // SPDX-License-Identifier: GPL-2.0
2 #define _GNU_SOURCE
3 #include <linux/compiler.h>
4 #include <asm/barrier.h>
5 #include <test_progs.h>
6 #include <sys/mman.h>
7 #include <sys/epoll.h>
8 #include <time.h>
9 #include <sched.h>
10 #include <signal.h>
11 #include <pthread.h>
12 #include <sys/sysinfo.h>
13 #include <linux/perf_event.h>
14 #include <linux/ring_buffer.h>
15 
16 #include "test_ringbuf.lskel.h"
17 #include "test_ringbuf_n.lskel.h"
18 #include "test_ringbuf_map_key.lskel.h"
19 #include "test_ringbuf_write.lskel.h"
20 #include "test_ringbuf_overwrite.lskel.h"
21 
22 #define EDONE 7777
23 
24 static int duration = 0;
25 
26 struct sample {
27 	int pid;
28 	int seq;
29 	long value;
30 	char comm[16];
31 };
32 
33 static int sample_cnt;
34 
35 static void atomic_inc(int *cnt)
36 {
37 	__atomic_add_fetch(cnt, 1, __ATOMIC_SEQ_CST);
38 }
39 
40 static int atomic_xchg(int *cnt, int val)
41 {
42 	return __atomic_exchange_n(cnt, val, __ATOMIC_SEQ_CST);
43 }
44 
45 static int process_sample(void *ctx, void *data, size_t len)
46 {
47 	struct sample *s = data;
48 
49 	atomic_inc(&sample_cnt);
50 
51 	switch (s->seq) {
52 	case 0:
53 		CHECK(s->value != 333, "sample1_value", "exp %ld, got %ld\n",
54 		      333L, s->value);
55 		return 0;
56 	case 1:
57 		CHECK(s->value != 777, "sample2_value", "exp %ld, got %ld\n",
58 		      777L, s->value);
59 		return -EDONE;
60 	default:
61 		/* we don't care about the rest */
62 		return 0;
63 	}
64 }
65 
66 static struct test_ringbuf_map_key_lskel *skel_map_key;
67 static struct test_ringbuf_lskel *skel;
68 static struct ring_buffer *ringbuf;
69 
70 static void trigger_samples()
71 {
72 	skel->bss->dropped = 0;
73 	skel->bss->total = 0;
74 	skel->bss->discarded = 0;
75 
76 	/* trigger exactly two samples */
77 	skel->bss->value = 333;
78 	syscall(__NR_getpgid);
79 	skel->bss->value = 777;
80 	syscall(__NR_getpgid);
81 }
82 
83 static void *poll_thread(void *input)
84 {
85 	long timeout = (long)input;
86 
87 	return (void *)(long)ring_buffer__poll(ringbuf, timeout);
88 }
89 
90 static void ringbuf_write_subtest(void)
91 {
92 	struct test_ringbuf_write_lskel *skel;
93 	int page_size = getpagesize();
94 	size_t *mmap_ptr;
95 	int err, rb_fd;
96 
97 	skel = test_ringbuf_write_lskel__open();
98 	if (!ASSERT_OK_PTR(skel, "skel_open"))
99 		return;
100 
101 	skel->maps.ringbuf.max_entries = 0x40000;
102 
103 	err = test_ringbuf_write_lskel__load(skel);
104 	if (!ASSERT_OK(err, "skel_load"))
105 		goto cleanup;
106 
107 	rb_fd = skel->maps.ringbuf.map_fd;
108 
109 	mmap_ptr = mmap(NULL, page_size, PROT_READ | PROT_WRITE, MAP_SHARED, rb_fd, 0);
110 	if (!ASSERT_OK_PTR(mmap_ptr, "rw_cons_pos"))
111 		goto cleanup;
112 	*mmap_ptr = 0x30000;
113 	ASSERT_OK(munmap(mmap_ptr, page_size), "unmap_rw");
114 
115 	skel->bss->pid = getpid();
116 
117 	ringbuf = ring_buffer__new(rb_fd, process_sample, NULL, NULL);
118 	if (!ASSERT_OK_PTR(ringbuf, "ringbuf_new"))
119 		goto cleanup;
120 
121 	err = test_ringbuf_write_lskel__attach(skel);
122 	if (!ASSERT_OK(err, "skel_attach"))
123 		goto cleanup_ringbuf;
124 
125 	skel->bss->discarded = 0;
126 	skel->bss->passed = 0;
127 
128 	/* trigger exactly two samples */
129 	syscall(__NR_getpgid);
130 	syscall(__NR_getpgid);
131 
132 	ASSERT_EQ(skel->bss->discarded, 2, "discarded");
133 	ASSERT_EQ(skel->bss->passed, 0, "passed");
134 
135 	test_ringbuf_write_lskel__detach(skel);
136 cleanup_ringbuf:
137 	ring_buffer__free(ringbuf);
138 cleanup:
139 	test_ringbuf_write_lskel__destroy(skel);
140 }
141 
142 static void ringbuf_subtest(void)
143 {
144 	const size_t rec_sz = BPF_RINGBUF_HDR_SZ + sizeof(struct sample);
145 	pthread_t thread;
146 	long bg_ret = -1;
147 	int err, cnt, rb_fd;
148 	int page_size = getpagesize();
149 	void *mmap_ptr, *tmp_ptr;
150 	struct ring *ring;
151 	int map_fd;
152 	unsigned long avail_data, ring_size, cons_pos, prod_pos;
153 
154 	skel = test_ringbuf_lskel__open();
155 	if (CHECK(!skel, "skel_open", "skeleton open failed\n"))
156 		return;
157 
158 	skel->maps.ringbuf.max_entries = page_size;
159 
160 	err = test_ringbuf_lskel__load(skel);
161 	if (CHECK(err != 0, "skel_load", "skeleton load failed\n"))
162 		goto cleanup;
163 
164 	rb_fd = skel->maps.ringbuf.map_fd;
165 	/* good read/write cons_pos */
166 	mmap_ptr = mmap(NULL, page_size, PROT_READ | PROT_WRITE, MAP_SHARED, rb_fd, 0);
167 	ASSERT_OK_PTR(mmap_ptr, "rw_cons_pos");
168 	tmp_ptr = mremap(mmap_ptr, page_size, 2 * page_size, MREMAP_MAYMOVE);
169 	if (!ASSERT_ERR_PTR(tmp_ptr, "rw_extend"))
170 		goto cleanup;
171 	ASSERT_ERR(mprotect(mmap_ptr, page_size, PROT_EXEC), "exec_cons_pos_protect");
172 	ASSERT_OK(munmap(mmap_ptr, page_size), "unmap_rw");
173 
174 	/* bad writeable prod_pos */
175 	mmap_ptr = mmap(NULL, page_size, PROT_WRITE, MAP_SHARED, rb_fd, page_size);
176 	err = -errno;
177 	ASSERT_ERR_PTR(mmap_ptr, "wr_prod_pos");
178 	ASSERT_EQ(err, -EPERM, "wr_prod_pos_err");
179 
180 	/* bad writeable data pages */
181 	mmap_ptr = mmap(NULL, page_size, PROT_WRITE, MAP_SHARED, rb_fd, 2 * page_size);
182 	err = -errno;
183 	ASSERT_ERR_PTR(mmap_ptr, "wr_data_page_one");
184 	ASSERT_EQ(err, -EPERM, "wr_data_page_one_err");
185 	mmap_ptr = mmap(NULL, page_size, PROT_WRITE, MAP_SHARED, rb_fd, 3 * page_size);
186 	ASSERT_ERR_PTR(mmap_ptr, "wr_data_page_two");
187 	mmap_ptr = mmap(NULL, 2 * page_size, PROT_WRITE, MAP_SHARED, rb_fd, 2 * page_size);
188 	ASSERT_ERR_PTR(mmap_ptr, "wr_data_page_all");
189 
190 	/* good read-only pages */
191 	mmap_ptr = mmap(NULL, 4 * page_size, PROT_READ, MAP_SHARED, rb_fd, 0);
192 	if (!ASSERT_OK_PTR(mmap_ptr, "ro_prod_pos"))
193 		goto cleanup;
194 
195 	ASSERT_ERR(mprotect(mmap_ptr, 4 * page_size, PROT_WRITE), "write_protect");
196 	ASSERT_ERR(mprotect(mmap_ptr, 4 * page_size, PROT_EXEC), "exec_protect");
197 	ASSERT_ERR_PTR(mremap(mmap_ptr, 0, 4 * page_size, MREMAP_MAYMOVE), "ro_remap");
198 	ASSERT_OK(munmap(mmap_ptr, 4 * page_size), "unmap_ro");
199 
200 	/* good read-only pages with initial offset */
201 	mmap_ptr = mmap(NULL, page_size, PROT_READ, MAP_SHARED, rb_fd, page_size);
202 	if (!ASSERT_OK_PTR(mmap_ptr, "ro_prod_pos"))
203 		goto cleanup;
204 
205 	ASSERT_ERR(mprotect(mmap_ptr, page_size, PROT_WRITE), "write_protect");
206 	ASSERT_ERR(mprotect(mmap_ptr, page_size, PROT_EXEC), "exec_protect");
207 	ASSERT_ERR_PTR(mremap(mmap_ptr, 0, 3 * page_size, MREMAP_MAYMOVE), "ro_remap");
208 	ASSERT_OK(munmap(mmap_ptr, page_size), "unmap_ro");
209 
210 	/* only trigger BPF program for current process */
211 	skel->bss->pid = getpid();
212 
213 	ringbuf = ring_buffer__new(skel->maps.ringbuf.map_fd,
214 				   process_sample, NULL, NULL);
215 	if (CHECK(!ringbuf, "ringbuf_create", "failed to create ringbuf\n"))
216 		goto cleanup;
217 
218 	err = test_ringbuf_lskel__attach(skel);
219 	if (CHECK(err, "skel_attach", "skeleton attachment failed: %d\n", err))
220 		goto cleanup;
221 
222 	trigger_samples();
223 
224 	ring = ring_buffer__ring(ringbuf, 0);
225 	if (!ASSERT_OK_PTR(ring, "ring_buffer__ring_idx_0"))
226 		goto cleanup;
227 
228 	map_fd = ring__map_fd(ring);
229 	ASSERT_EQ(map_fd, skel->maps.ringbuf.map_fd, "ring_map_fd");
230 
231 	/* 2 submitted + 1 discarded records */
232 	CHECK(skel->bss->avail_data != 3 * rec_sz,
233 	      "err_avail_size", "exp %ld, got %ld\n",
234 	      3L * rec_sz, skel->bss->avail_data);
235 	CHECK(skel->bss->ring_size != page_size,
236 	      "err_ring_size", "exp %ld, got %ld\n",
237 	      (long)page_size, skel->bss->ring_size);
238 	CHECK(skel->bss->cons_pos != 0,
239 	      "err_cons_pos", "exp %ld, got %ld\n",
240 	      0L, skel->bss->cons_pos);
241 	CHECK(skel->bss->prod_pos != 3 * rec_sz,
242 	      "err_prod_pos", "exp %ld, got %ld\n",
243 	      3L * rec_sz, skel->bss->prod_pos);
244 
245 	/* verify getting this data directly via the ring object yields the same
246 	 * results
247 	 */
248 	avail_data = ring__avail_data_size(ring);
249 	ASSERT_EQ(avail_data, 3 * rec_sz, "ring_avail_size");
250 	ring_size = ring__size(ring);
251 	ASSERT_EQ(ring_size, page_size, "ring_ring_size");
252 	cons_pos = ring__consumer_pos(ring);
253 	ASSERT_EQ(cons_pos, 0, "ring_cons_pos");
254 	prod_pos = ring__producer_pos(ring);
255 	ASSERT_EQ(prod_pos, 3 * rec_sz, "ring_prod_pos");
256 
257 	/* poll for samples */
258 	err = ring_buffer__poll(ringbuf, -1);
259 
260 	/* -EDONE is used as an indicator that we are done */
261 	if (CHECK(err != -EDONE, "err_done", "done err: %d\n", err))
262 		goto cleanup;
263 	cnt = atomic_xchg(&sample_cnt, 0);
264 	CHECK(cnt != 2, "cnt", "exp %d samples, got %d\n", 2, cnt);
265 
266 	/* we expect extra polling to return nothing */
267 	err = ring_buffer__poll(ringbuf, 0);
268 	if (CHECK(err != 0, "extra_samples", "poll result: %d\n", err))
269 		goto cleanup;
270 	cnt = atomic_xchg(&sample_cnt, 0);
271 	CHECK(cnt != 0, "cnt", "exp %d samples, got %d\n", 0, cnt);
272 
273 	CHECK(skel->bss->dropped != 0, "err_dropped", "exp %ld, got %ld\n",
274 	      0L, skel->bss->dropped);
275 	CHECK(skel->bss->total != 2, "err_total", "exp %ld, got %ld\n",
276 	      2L, skel->bss->total);
277 	CHECK(skel->bss->discarded != 1, "err_discarded", "exp %ld, got %ld\n",
278 	      1L, skel->bss->discarded);
279 
280 	/* now validate consumer position is updated and returned */
281 	trigger_samples();
282 	CHECK(skel->bss->cons_pos != 3 * rec_sz,
283 	      "err_cons_pos", "exp %ld, got %ld\n",
284 	      3L * rec_sz, skel->bss->cons_pos);
285 	err = ring_buffer__poll(ringbuf, -1);
286 	CHECK(err <= 0, "poll_err", "err %d\n", err);
287 	cnt = atomic_xchg(&sample_cnt, 0);
288 	CHECK(cnt != 2, "cnt", "exp %d samples, got %d\n", 2, cnt);
289 
290 	/* start poll in background w/ long timeout */
291 	err = pthread_create(&thread, NULL, poll_thread, (void *)(long)10000);
292 	if (CHECK(err, "bg_poll", "pthread_create failed: %d\n", err))
293 		goto cleanup;
294 
295 	/* turn off notifications now */
296 	skel->bss->flags = BPF_RB_NO_WAKEUP;
297 
298 	/* give background thread a bit of a time */
299 	usleep(50000);
300 	trigger_samples();
301 	/* sleeping arbitrarily is bad, but no better way to know that
302 	 * epoll_wait() **DID NOT** unblock in background thread
303 	 */
304 	usleep(50000);
305 	/* background poll should still be blocked */
306 	err = pthread_tryjoin_np(thread, (void **)&bg_ret);
307 	if (CHECK(err != EBUSY, "try_join", "err %d\n", err))
308 		goto cleanup;
309 
310 	/* BPF side did everything right */
311 	CHECK(skel->bss->dropped != 0, "err_dropped", "exp %ld, got %ld\n",
312 	      0L, skel->bss->dropped);
313 	CHECK(skel->bss->total != 2, "err_total", "exp %ld, got %ld\n",
314 	      2L, skel->bss->total);
315 	CHECK(skel->bss->discarded != 1, "err_discarded", "exp %ld, got %ld\n",
316 	      1L, skel->bss->discarded);
317 	cnt = atomic_xchg(&sample_cnt, 0);
318 	CHECK(cnt != 0, "cnt", "exp %d samples, got %d\n", 0, cnt);
319 
320 	/* clear flags to return to "adaptive" notification mode */
321 	skel->bss->flags = 0;
322 
323 	/* produce new samples, no notification should be triggered, because
324 	 * consumer is now behind
325 	 */
326 	trigger_samples();
327 
328 	/* background poll should still be blocked */
329 	err = pthread_tryjoin_np(thread, (void **)&bg_ret);
330 	if (CHECK(err != EBUSY, "try_join", "err %d\n", err))
331 		goto cleanup;
332 
333 	/* still no samples, because consumer is behind */
334 	cnt = atomic_xchg(&sample_cnt, 0);
335 	CHECK(cnt != 0, "cnt", "exp %d samples, got %d\n", 0, cnt);
336 
337 	skel->bss->dropped = 0;
338 	skel->bss->total = 0;
339 	skel->bss->discarded = 0;
340 
341 	skel->bss->value = 333;
342 	syscall(__NR_getpgid);
343 	/* now force notifications */
344 	skel->bss->flags = BPF_RB_FORCE_WAKEUP;
345 	skel->bss->value = 777;
346 	syscall(__NR_getpgid);
347 
348 	/* now we should get a pending notification */
349 	usleep(50000);
350 	err = pthread_tryjoin_np(thread, (void **)&bg_ret);
351 	if (CHECK(err, "join_bg", "err %d\n", err))
352 		goto cleanup;
353 
354 	if (CHECK(bg_ret <= 0, "bg_ret", "epoll_wait result: %ld", bg_ret))
355 		goto cleanup;
356 
357 	/* due to timing variations, there could still be non-notified
358 	 * samples, so consume them here to collect all the samples
359 	 */
360 	err = ring_buffer__consume(ringbuf);
361 	CHECK(err < 0, "rb_consume", "failed: %d\b", err);
362 
363 	/* also consume using ring__consume to make sure it works the same */
364 	err = ring__consume(ring);
365 	ASSERT_GE(err, 0, "ring_consume");
366 
367 	/* 3 rounds, 2 samples each */
368 	cnt = atomic_xchg(&sample_cnt, 0);
369 	CHECK(cnt != 6, "cnt", "exp %d samples, got %d\n", 6, cnt);
370 
371 	/* BPF side did everything right */
372 	CHECK(skel->bss->dropped != 0, "err_dropped", "exp %ld, got %ld\n",
373 	      0L, skel->bss->dropped);
374 	CHECK(skel->bss->total != 2, "err_total", "exp %ld, got %ld\n",
375 	      2L, skel->bss->total);
376 	CHECK(skel->bss->discarded != 1, "err_discarded", "exp %ld, got %ld\n",
377 	      1L, skel->bss->discarded);
378 
379 	test_ringbuf_lskel__detach(skel);
380 cleanup:
381 	ring_buffer__free(ringbuf);
382 	test_ringbuf_lskel__destroy(skel);
383 }
384 
385 /*
386  * Test ring_buffer__consume_n() by producing N_TOT_SAMPLES samples in the ring
387  * buffer, via getpid(), and consuming them in chunks of N_SAMPLES.
388  */
389 #define N_TOT_SAMPLES	32
390 #define N_SAMPLES	4
391 
392 /* Sample value to verify the callback validity */
393 #define SAMPLE_VALUE	42L
394 
395 static int process_n_sample(void *ctx, void *data, size_t len)
396 {
397 	struct sample *s = data;
398 
399 	ASSERT_EQ(s->value, SAMPLE_VALUE, "sample_value");
400 
401 	return 0;
402 }
403 
404 static void ringbuf_n_subtest(void)
405 {
406 	struct test_ringbuf_n_lskel *skel_n;
407 	int err, i;
408 
409 	skel_n = test_ringbuf_n_lskel__open();
410 	if (!ASSERT_OK_PTR(skel_n, "test_ringbuf_n_lskel__open"))
411 		return;
412 
413 	skel_n->maps.ringbuf.max_entries = getpagesize();
414 	skel_n->bss->pid = getpid();
415 
416 	err = test_ringbuf_n_lskel__load(skel_n);
417 	if (!ASSERT_OK(err, "test_ringbuf_n_lskel__load"))
418 		goto cleanup;
419 
420 	ringbuf = ring_buffer__new(skel_n->maps.ringbuf.map_fd,
421 				   process_n_sample, NULL, NULL);
422 	if (!ASSERT_OK_PTR(ringbuf, "ring_buffer__new"))
423 		goto cleanup;
424 
425 	err = test_ringbuf_n_lskel__attach(skel_n);
426 	if (!ASSERT_OK(err, "test_ringbuf_n_lskel__attach"))
427 		goto cleanup_ringbuf;
428 
429 	/* Produce N_TOT_SAMPLES samples in the ring buffer by calling getpid() */
430 	skel_n->bss->value = SAMPLE_VALUE;
431 	for (i = 0; i < N_TOT_SAMPLES; i++)
432 		syscall(__NR_getpgid);
433 
434 	/* Consume all samples from the ring buffer in batches of N_SAMPLES */
435 	for (i = 0; i < N_TOT_SAMPLES; i += err) {
436 		err = ring_buffer__consume_n(ringbuf, N_SAMPLES);
437 		if (!ASSERT_EQ(err, N_SAMPLES, "rb_consume"))
438 			goto cleanup_ringbuf;
439 	}
440 
441 cleanup_ringbuf:
442 	ring_buffer__free(ringbuf);
443 cleanup:
444 	test_ringbuf_n_lskel__destroy(skel_n);
445 }
446 
447 static int process_map_key_sample(void *ctx, void *data, size_t len)
448 {
449 	struct sample *s;
450 	int err, val;
451 
452 	s = data;
453 	switch (s->seq) {
454 	case 1:
455 		ASSERT_EQ(s->value, 42, "sample_value");
456 		err = bpf_map_lookup_elem(skel_map_key->maps.hash_map.map_fd,
457 					  s, &val);
458 		ASSERT_OK(err, "hash_map bpf_map_lookup_elem");
459 		ASSERT_EQ(val, 1, "hash_map val");
460 		return -EDONE;
461 	default:
462 		return 0;
463 	}
464 }
465 
466 static void ringbuf_map_key_subtest(void)
467 {
468 	int err;
469 
470 	skel_map_key = test_ringbuf_map_key_lskel__open();
471 	if (!ASSERT_OK_PTR(skel_map_key, "test_ringbuf_map_key_lskel__open"))
472 		return;
473 
474 	skel_map_key->maps.ringbuf.max_entries = getpagesize();
475 	skel_map_key->bss->pid = getpid();
476 
477 	err = test_ringbuf_map_key_lskel__load(skel_map_key);
478 	if (!ASSERT_OK(err, "test_ringbuf_map_key_lskel__load"))
479 		goto cleanup;
480 
481 	ringbuf = ring_buffer__new(skel_map_key->maps.ringbuf.map_fd,
482 				   process_map_key_sample, NULL, NULL);
483 	if (!ASSERT_OK_PTR(ringbuf, "ring_buffer__new"))
484 		goto cleanup;
485 
486 	err = test_ringbuf_map_key_lskel__attach(skel_map_key);
487 	if (!ASSERT_OK(err, "test_ringbuf_map_key_lskel__attach"))
488 		goto cleanup_ringbuf;
489 
490 	syscall(__NR_getpgid);
491 	ASSERT_EQ(skel_map_key->bss->seq, 1, "skel_map_key->bss->seq");
492 	err = ring_buffer__poll(ringbuf, -1);
493 	ASSERT_EQ(err, -EDONE, "ring_buffer__poll");
494 
495 cleanup_ringbuf:
496 	ring_buffer__free(ringbuf);
497 cleanup:
498 	test_ringbuf_map_key_lskel__destroy(skel_map_key);
499 }
500 
501 static void ringbuf_overwrite_mode_subtest(void)
502 {
503 	unsigned long size, len1, len2, len3, len4, len5;
504 	unsigned long expect_avail_data, expect_prod_pos, expect_over_pos;
505 	struct test_ringbuf_overwrite_lskel *skel;
506 	int page_size = getpagesize();
507 	int err;
508 
509 	skel = test_ringbuf_overwrite_lskel__open();
510 	if (!ASSERT_OK_PTR(skel, "skel_open"))
511 		return;
512 
513 	size = page_size;
514 	len1 = page_size / 2;
515 	len2 = page_size / 4;
516 	len3 = size - len1 - len2 - BPF_RINGBUF_HDR_SZ * 3;
517 	len4 = len3 - 8;
518 	len5 = len3; /* retry with len3 */
519 
520 	skel->maps.ringbuf.max_entries = size;
521 	skel->rodata->LEN1 = len1;
522 	skel->rodata->LEN2 = len2;
523 	skel->rodata->LEN3 = len3;
524 	skel->rodata->LEN4 = len4;
525 	skel->rodata->LEN5 = len5;
526 
527 	skel->bss->pid = getpid();
528 
529 	err = test_ringbuf_overwrite_lskel__load(skel);
530 	if (!ASSERT_OK(err, "skel_load"))
531 		goto cleanup;
532 
533 	err = test_ringbuf_overwrite_lskel__attach(skel);
534 	if (!ASSERT_OK(err, "skel_attach"))
535 		goto cleanup;
536 
537 	syscall(__NR_getpgid);
538 
539 	ASSERT_EQ(skel->bss->reserve1_fail, 0, "reserve 1");
540 	ASSERT_EQ(skel->bss->reserve2_fail, 0, "reserve 2");
541 	ASSERT_EQ(skel->bss->reserve3_fail, 1, "reserve 3");
542 	ASSERT_EQ(skel->bss->reserve4_fail, 0, "reserve 4");
543 	ASSERT_EQ(skel->bss->reserve5_fail, 0, "reserve 5");
544 
545 	ASSERT_EQ(skel->bss->ring_size, size, "check_ring_size");
546 
547 	expect_avail_data = len2 + len4 + len5 + 3 * BPF_RINGBUF_HDR_SZ;
548 	ASSERT_EQ(skel->bss->avail_data, expect_avail_data, "check_avail_size");
549 
550 	ASSERT_EQ(skel->bss->cons_pos, 0, "check_cons_pos");
551 
552 	expect_prod_pos = len1 + len2 + len4 + len5 + 4 * BPF_RINGBUF_HDR_SZ;
553 	ASSERT_EQ(skel->bss->prod_pos, expect_prod_pos, "check_prod_pos");
554 
555 	expect_over_pos = len1 + BPF_RINGBUF_HDR_SZ;
556 	ASSERT_EQ(skel->bss->over_pos, expect_over_pos, "check_over_pos");
557 
558 	test_ringbuf_overwrite_lskel__detach(skel);
559 cleanup:
560 	test_ringbuf_overwrite_lskel__destroy(skel);
561 }
562 
563 void test_ringbuf(void)
564 {
565 	if (test__start_subtest("ringbuf"))
566 		ringbuf_subtest();
567 	if (test__start_subtest("ringbuf_n"))
568 		ringbuf_n_subtest();
569 	if (test__start_subtest("ringbuf_map_key"))
570 		ringbuf_map_key_subtest();
571 	if (test__start_subtest("ringbuf_write"))
572 		ringbuf_write_subtest();
573 	if (test__start_subtest("ringbuf_overwrite_mode"))
574 		ringbuf_overwrite_mode_subtest();
575 }
576