1 // SPDX-License-Identifier: GPL-2.0 2 #define _GNU_SOURCE 3 #include <test_progs.h> 4 #include <sys/epoll.h> 5 #include "test_ringbuf_multi.skel.h" 6 7 static int duration = 0; 8 9 struct sample { 10 int pid; 11 int seq; 12 long value; 13 char comm[16]; 14 }; 15 16 static int process_sample(void *ctx, void *data, size_t len) 17 { 18 int ring = (unsigned long)ctx; 19 struct sample *s = data; 20 21 switch (s->seq) { 22 case 0: 23 CHECK(ring != 1, "sample1_ring", "exp %d, got %d\n", 1, ring); 24 CHECK(s->value != 333, "sample1_value", "exp %ld, got %ld\n", 25 333L, s->value); 26 break; 27 case 1: 28 CHECK(ring != 2, "sample2_ring", "exp %d, got %d\n", 2, ring); 29 CHECK(s->value != 777, "sample2_value", "exp %ld, got %ld\n", 30 777L, s->value); 31 break; 32 default: 33 CHECK(true, "extra_sample", "unexpected sample seq %d, val %ld\n", 34 s->seq, s->value); 35 return -1; 36 } 37 38 return 0; 39 } 40 41 void test_ringbuf_multi(void) 42 { 43 struct test_ringbuf_multi *skel; 44 struct ring_buffer *ringbuf = NULL; 45 int err; 46 int page_size = getpagesize(); 47 int proto_fd = -1; 48 49 skel = test_ringbuf_multi__open(); 50 if (CHECK(!skel, "skel_open", "skeleton open failed\n")) 51 return; 52 53 err = bpf_map__set_max_entries(skel->maps.ringbuf1, page_size); 54 if (CHECK(err != 0, "bpf_map__set_max_entries", "bpf_map__set_max_entries failed\n")) 55 goto cleanup; 56 57 err = bpf_map__set_max_entries(skel->maps.ringbuf2, page_size); 58 if (CHECK(err != 0, "bpf_map__set_max_entries", "bpf_map__set_max_entries failed\n")) 59 goto cleanup; 60 61 err = bpf_map__set_max_entries(bpf_map__inner_map(skel->maps.ringbuf_arr), page_size); 62 if (CHECK(err != 0, "bpf_map__set_max_entries", "bpf_map__set_max_entries failed\n")) 63 goto cleanup; 64 65 proto_fd = bpf_map_create(BPF_MAP_TYPE_RINGBUF, NULL, 0, 0, page_size, NULL); 66 if (CHECK(proto_fd < 0, "bpf_map_create", "bpf_map_create failed\n")) 67 goto cleanup; 68 69 err = bpf_map__set_inner_map_fd(skel->maps.ringbuf_hash, proto_fd); 70 if (CHECK(err != 0, "bpf_map__set_inner_map_fd", "bpf_map__set_inner_map_fd failed\n")) 71 goto cleanup; 72 73 err = test_ringbuf_multi__load(skel); 74 if (CHECK(err != 0, "skel_load", "skeleton load failed\n")) 75 goto cleanup; 76 77 close(proto_fd); 78 proto_fd = -1; 79 80 /* only trigger BPF program for current process */ 81 skel->bss->pid = getpid(); 82 83 ringbuf = ring_buffer__new(bpf_map__fd(skel->maps.ringbuf1), 84 process_sample, (void *)(long)1, NULL); 85 if (CHECK(!ringbuf, "ringbuf_create", "failed to create ringbuf\n")) 86 goto cleanup; 87 88 err = ring_buffer__add(ringbuf, bpf_map__fd(skel->maps.ringbuf2), 89 process_sample, (void *)(long)2); 90 if (CHECK(err, "ringbuf_add", "failed to add another ring\n")) 91 goto cleanup; 92 93 err = test_ringbuf_multi__attach(skel); 94 if (CHECK(err, "skel_attach", "skeleton attachment failed: %d\n", err)) 95 goto cleanup; 96 97 /* trigger few samples, some will be skipped */ 98 skel->bss->target_ring = 0; 99 skel->bss->value = 333; 100 syscall(__NR_getpgid); 101 102 /* skipped, no ringbuf in slot 1 */ 103 skel->bss->target_ring = 1; 104 skel->bss->value = 555; 105 syscall(__NR_getpgid); 106 107 skel->bss->target_ring = 2; 108 skel->bss->value = 777; 109 syscall(__NR_getpgid); 110 111 /* poll for samples, should get 2 ringbufs back */ 112 err = ring_buffer__poll(ringbuf, -1); 113 if (CHECK(err != 2, "poll_res", "expected 2 records, got %d\n", err)) 114 goto cleanup; 115 116 /* expect extra polling to return nothing */ 117 err = ring_buffer__poll(ringbuf, 0); 118 if (CHECK(err < 0, "extra_samples", "poll result: %d\n", err)) 119 goto cleanup; 120 121 CHECK(skel->bss->dropped != 0, "err_dropped", "exp %ld, got %ld\n", 122 0L, skel->bss->dropped); 123 CHECK(skel->bss->skipped != 1, "err_skipped", "exp %ld, got %ld\n", 124 1L, skel->bss->skipped); 125 CHECK(skel->bss->total != 2, "err_total", "exp %ld, got %ld\n", 126 2L, skel->bss->total); 127 128 cleanup: 129 if (proto_fd >= 0) 130 close(proto_fd); 131 ring_buffer__free(ringbuf); 132 test_ringbuf_multi__destroy(skel); 133 } 134