1 // SPDX-License-Identifier: GPL-2.0
2 #include <bpf/bpf.h>
3 #include <errno.h>
4 #include <linux/bitmap.h>
5 #include <linux/if_link.h>
6 #include <linux/mman.h>
7 #include <linux/netdev.h>
8 #include <poll.h>
9 #include <pthread.h>
10 #include <signal.h>
11 #include <string.h>
12 #include <sys/mman.h>
13 #include <sys/socket.h>
14 #include <sys/time.h>
15 #include <unistd.h>
16
17 #include "network_helpers.h"
18 #include "test_xsk.h"
19 #include "xsk_xdp_common.h"
20 #include "xsk_xdp_progs.skel.h"
21
22 #define DEFAULT_BATCH_SIZE 64
23 #define MIN_PKT_SIZE 64
24 #define MAX_ETH_JUMBO_SIZE 9000
25 #define MAX_INTERFACES 2
26 #define MAX_TEARDOWN_ITER 10
27 #define MAX_TX_BUDGET_DEFAULT 32
28 #define PKT_DUMP_NB_TO_PRINT 16
29 /* Just to align the data in the packet */
30 #define PKT_HDR_SIZE (sizeof(struct ethhdr) + 2)
31 #define POLL_TMOUT 1000
32 #define THREAD_TMOUT 3
33 #define UMEM_HEADROOM_TEST_SIZE 128
34 #define XSK_DESC__INVALID_OPTION (0xffff)
35 #define XSK_UMEM__INVALID_FRAME_SIZE (MAX_ETH_JUMBO_SIZE + 1)
36 #define XSK_UMEM__LARGE_FRAME_SIZE (3 * 1024)
37 #define XSK_UMEM__MAX_FRAME_SIZE (4 * 1024)
38
39 static const u8 g_mac[ETH_ALEN] = {0x55, 0x44, 0x33, 0x22, 0x11, 0x00};
40
41 bool opt_verbose;
42 pthread_barrier_t barr;
43 pthread_mutex_t pacing_mutex = PTHREAD_MUTEX_INITIALIZER;
44
45 int pkts_in_flight;
46
47 /* The payload is a word consisting of a packet sequence number in the upper
48 * 16-bits and a intra packet data sequence number in the lower 16 bits. So the 3rd packet's
49 * 5th word of data will contain the number (2<<16) | 4 as they are numbered from 0.
50 */
write_payload(void * dest,u32 pkt_nb,u32 start,u32 size)51 static void write_payload(void *dest, u32 pkt_nb, u32 start, u32 size)
52 {
53 u32 *ptr = (u32 *)dest, i;
54
55 start /= sizeof(*ptr);
56 size /= sizeof(*ptr);
57 for (i = 0; i < size; i++)
58 ptr[i] = htonl(pkt_nb << 16 | (i + start));
59 }
60
gen_eth_hdr(struct xsk_socket_info * xsk,struct ethhdr * eth_hdr)61 static void gen_eth_hdr(struct xsk_socket_info *xsk, struct ethhdr *eth_hdr)
62 {
63 memcpy(eth_hdr->h_dest, xsk->dst_mac, ETH_ALEN);
64 memcpy(eth_hdr->h_source, xsk->src_mac, ETH_ALEN);
65 eth_hdr->h_proto = htons(ETH_P_LOOPBACK);
66 }
67
is_umem_valid(struct ifobject * ifobj)68 static bool is_umem_valid(struct ifobject *ifobj)
69 {
70 return !!ifobj->umem->umem;
71 }
72
mode_to_xdp_flags(enum test_mode mode)73 static u32 mode_to_xdp_flags(enum test_mode mode)
74 {
75 return (mode == TEST_MODE_SKB) ? XDP_FLAGS_SKB_MODE : XDP_FLAGS_DRV_MODE;
76 }
77
umem_size(struct xsk_umem_info * umem)78 static u64 umem_size(struct xsk_umem_info *umem)
79 {
80 return umem->num_frames * umem->frame_size;
81 }
82
xsk_configure_umem(struct ifobject * ifobj,struct xsk_umem_info * umem,void * buffer,u64 size)83 int xsk_configure_umem(struct ifobject *ifobj, struct xsk_umem_info *umem, void *buffer,
84 u64 size)
85 {
86 struct xsk_umem_config cfg = {
87 .fill_size = XSK_RING_PROD__DEFAULT_NUM_DESCS,
88 .comp_size = XSK_RING_CONS__DEFAULT_NUM_DESCS,
89 .frame_size = umem->frame_size,
90 .frame_headroom = umem->frame_headroom,
91 .flags = XSK_UMEM__DEFAULT_FLAGS
92 };
93 int ret;
94
95 if (umem->fill_size)
96 cfg.fill_size = umem->fill_size;
97
98 if (umem->comp_size)
99 cfg.comp_size = umem->comp_size;
100
101 if (umem->unaligned_mode)
102 cfg.flags |= XDP_UMEM_UNALIGNED_CHUNK_FLAG;
103
104 ret = xsk_umem__create(&umem->umem, buffer, size,
105 &umem->fq, &umem->cq, &cfg);
106 if (ret)
107 return ret;
108
109 umem->buffer = buffer;
110 if (ifobj->shared_umem && ifobj->rx_on) {
111 umem->base_addr = umem_size(umem);
112 umem->next_buffer = umem_size(umem);
113 }
114
115 return 0;
116 }
117
umem_alloc_buffer(struct xsk_umem_info * umem)118 static u64 umem_alloc_buffer(struct xsk_umem_info *umem)
119 {
120 u64 addr;
121
122 addr = umem->next_buffer;
123 umem->next_buffer += umem->frame_size;
124 if (umem->next_buffer >= umem->base_addr + umem_size(umem))
125 umem->next_buffer = umem->base_addr;
126
127 return addr;
128 }
129
umem_reset_alloc(struct xsk_umem_info * umem)130 static void umem_reset_alloc(struct xsk_umem_info *umem)
131 {
132 umem->next_buffer = 0;
133 }
134
enable_busy_poll(struct xsk_socket_info * xsk)135 static int enable_busy_poll(struct xsk_socket_info *xsk)
136 {
137 int sock_opt;
138
139 sock_opt = 1;
140 if (setsockopt(xsk_socket__fd(xsk->xsk), SOL_SOCKET, SO_PREFER_BUSY_POLL,
141 (void *)&sock_opt, sizeof(sock_opt)) < 0)
142 return -errno;
143
144 sock_opt = 20;
145 if (setsockopt(xsk_socket__fd(xsk->xsk), SOL_SOCKET, SO_BUSY_POLL,
146 (void *)&sock_opt, sizeof(sock_opt)) < 0)
147 return -errno;
148
149 sock_opt = xsk->batch_size;
150 if (setsockopt(xsk_socket__fd(xsk->xsk), SOL_SOCKET, SO_BUSY_POLL_BUDGET,
151 (void *)&sock_opt, sizeof(sock_opt)) < 0)
152 return -errno;
153
154 return 0;
155 }
156
xsk_configure_socket(struct xsk_socket_info * xsk,struct xsk_umem_info * umem,struct ifobject * ifobject,bool shared)157 int xsk_configure_socket(struct xsk_socket_info *xsk, struct xsk_umem_info *umem,
158 struct ifobject *ifobject, bool shared)
159 {
160 struct xsk_socket_config cfg = {};
161 struct xsk_ring_cons *rxr;
162 struct xsk_ring_prod *txr;
163
164 xsk->umem = umem;
165 cfg.rx_size = xsk->rxqsize;
166 cfg.tx_size = XSK_RING_PROD__DEFAULT_NUM_DESCS;
167 cfg.bind_flags = ifobject->bind_flags;
168 if (shared)
169 cfg.bind_flags |= XDP_SHARED_UMEM;
170 if (ifobject->mtu > MAX_ETH_PKT_SIZE)
171 cfg.bind_flags |= XDP_USE_SG;
172 if (umem->comp_size)
173 cfg.tx_size = umem->comp_size;
174 if (umem->fill_size)
175 cfg.rx_size = umem->fill_size;
176
177 txr = ifobject->tx_on ? &xsk->tx : NULL;
178 rxr = ifobject->rx_on ? &xsk->rx : NULL;
179 return xsk_socket__create(&xsk->xsk, ifobject->ifindex, 0, umem->umem, rxr, txr, &cfg);
180 }
181
182 #define MAX_SKB_FRAGS_PATH "/proc/sys/net/core/max_skb_frags"
get_max_skb_frags(void)183 static unsigned int get_max_skb_frags(void)
184 {
185 unsigned int max_skb_frags = 0;
186 FILE *file;
187
188 file = fopen(MAX_SKB_FRAGS_PATH, "r");
189 if (!file) {
190 ksft_print_msg("Error opening %s\n", MAX_SKB_FRAGS_PATH);
191 return 0;
192 }
193
194 if (fscanf(file, "%u", &max_skb_frags) != 1)
195 ksft_print_msg("Error reading %s\n", MAX_SKB_FRAGS_PATH);
196
197 fclose(file);
198 return max_skb_frags;
199 }
200
set_ring_size(struct ifobject * ifobj)201 static int set_ring_size(struct ifobject *ifobj)
202 {
203 int ret;
204 u32 ctr = 0;
205
206 while (ctr++ < SOCK_RECONF_CTR) {
207 ret = set_hw_ring_size(ifobj->ifname, &ifobj->ring);
208 if (!ret)
209 break;
210
211 /* Retry if it fails */
212 if (ctr >= SOCK_RECONF_CTR || errno != EBUSY)
213 return -errno;
214
215 usleep(USLEEP_MAX);
216 }
217
218 return ret;
219 }
220
hw_ring_size_reset(struct ifobject * ifobj)221 int hw_ring_size_reset(struct ifobject *ifobj)
222 {
223 ifobj->ring.tx_pending = ifobj->set_ring.default_tx;
224 ifobj->ring.rx_pending = ifobj->set_ring.default_rx;
225 return set_ring_size(ifobj);
226 }
227
__test_spec_init(struct test_spec * test,struct ifobject * ifobj_tx,struct ifobject * ifobj_rx)228 static void __test_spec_init(struct test_spec *test, struct ifobject *ifobj_tx,
229 struct ifobject *ifobj_rx)
230 {
231 u32 i, j;
232
233 for (i = 0; i < MAX_INTERFACES; i++) {
234 struct ifobject *ifobj = i ? ifobj_rx : ifobj_tx;
235
236 ifobj->xsk = &ifobj->xsk_arr[0];
237 ifobj->use_poll = false;
238 ifobj->use_fill_ring = true;
239 ifobj->release_rx = true;
240 ifobj->validation_func = NULL;
241 ifobj->use_metadata = false;
242
243 if (i == 0) {
244 ifobj->rx_on = false;
245 ifobj->tx_on = true;
246 } else {
247 ifobj->rx_on = true;
248 ifobj->tx_on = false;
249 }
250
251 memset(ifobj->umem, 0, sizeof(*ifobj->umem));
252 ifobj->umem->num_frames = DEFAULT_UMEM_BUFFERS;
253 ifobj->umem->frame_size = XSK_UMEM__DEFAULT_FRAME_SIZE;
254
255 for (j = 0; j < MAX_SOCKETS; j++) {
256 memset(&ifobj->xsk_arr[j], 0, sizeof(ifobj->xsk_arr[j]));
257 ifobj->xsk_arr[j].rxqsize = XSK_RING_CONS__DEFAULT_NUM_DESCS;
258 ifobj->xsk_arr[j].batch_size = DEFAULT_BATCH_SIZE;
259 if (i == 0)
260 ifobj->xsk_arr[j].pkt_stream = test->tx_pkt_stream_default;
261 else
262 ifobj->xsk_arr[j].pkt_stream = test->rx_pkt_stream_default;
263
264 memcpy(ifobj->xsk_arr[j].src_mac, g_mac, ETH_ALEN);
265 memcpy(ifobj->xsk_arr[j].dst_mac, g_mac, ETH_ALEN);
266 ifobj->xsk_arr[j].src_mac[5] += ((j * 2) + 0);
267 ifobj->xsk_arr[j].dst_mac[5] += ((j * 2) + 1);
268 }
269 }
270
271 if (ifobj_tx->hw_ring_size_supp)
272 hw_ring_size_reset(ifobj_tx);
273
274 test->ifobj_tx = ifobj_tx;
275 test->ifobj_rx = ifobj_rx;
276 test->current_step = 0;
277 test->total_steps = 1;
278 test->nb_sockets = 1;
279 test->fail = false;
280 test->set_ring = false;
281 test->adjust_tail = false;
282 test->adjust_tail_support = false;
283 test->mtu = MAX_ETH_PKT_SIZE;
284 test->xdp_prog_rx = ifobj_rx->xdp_progs->progs.xsk_def_prog;
285 test->xskmap_rx = ifobj_rx->xdp_progs->maps.xsk;
286 test->xdp_prog_tx = ifobj_tx->xdp_progs->progs.xsk_def_prog;
287 test->xskmap_tx = ifobj_tx->xdp_progs->maps.xsk;
288 }
289
test_init(struct test_spec * test,struct ifobject * ifobj_tx,struct ifobject * ifobj_rx,enum test_mode mode,const struct test_spec * test_to_run)290 void test_init(struct test_spec *test, struct ifobject *ifobj_tx,
291 struct ifobject *ifobj_rx, enum test_mode mode,
292 const struct test_spec *test_to_run)
293 {
294 struct pkt_stream *tx_pkt_stream;
295 struct pkt_stream *rx_pkt_stream;
296 u32 i;
297
298 tx_pkt_stream = test->tx_pkt_stream_default;
299 rx_pkt_stream = test->rx_pkt_stream_default;
300 memset(test, 0, sizeof(*test));
301 test->tx_pkt_stream_default = tx_pkt_stream;
302 test->rx_pkt_stream_default = rx_pkt_stream;
303
304 for (i = 0; i < MAX_INTERFACES; i++) {
305 struct ifobject *ifobj = i ? ifobj_rx : ifobj_tx;
306
307 ifobj->bind_flags = XDP_USE_NEED_WAKEUP;
308 if (mode == TEST_MODE_ZC)
309 ifobj->bind_flags |= XDP_ZEROCOPY;
310 else
311 ifobj->bind_flags |= XDP_COPY;
312 }
313
314 memcpy(test->name, test_to_run->name, MAX_TEST_NAME_SIZE);
315 test->test_func = test_to_run->test_func;
316 test->mode = mode;
317 __test_spec_init(test, ifobj_tx, ifobj_rx);
318 }
319
test_spec_reset(struct test_spec * test)320 static void test_spec_reset(struct test_spec *test)
321 {
322 __test_spec_init(test, test->ifobj_tx, test->ifobj_rx);
323 }
324
test_spec_set_xdp_prog(struct test_spec * test,struct bpf_program * xdp_prog_rx,struct bpf_program * xdp_prog_tx,struct bpf_map * xskmap_rx,struct bpf_map * xskmap_tx)325 static void test_spec_set_xdp_prog(struct test_spec *test, struct bpf_program *xdp_prog_rx,
326 struct bpf_program *xdp_prog_tx, struct bpf_map *xskmap_rx,
327 struct bpf_map *xskmap_tx)
328 {
329 test->xdp_prog_rx = xdp_prog_rx;
330 test->xdp_prog_tx = xdp_prog_tx;
331 test->xskmap_rx = xskmap_rx;
332 test->xskmap_tx = xskmap_tx;
333 }
334
test_spec_set_mtu(struct test_spec * test,int mtu)335 static int test_spec_set_mtu(struct test_spec *test, int mtu)
336 {
337 int err;
338
339 if (test->ifobj_rx->mtu != mtu) {
340 err = xsk_set_mtu(test->ifobj_rx->ifindex, mtu);
341 if (err)
342 return err;
343 test->ifobj_rx->mtu = mtu;
344 }
345 if (test->ifobj_tx->mtu != mtu) {
346 err = xsk_set_mtu(test->ifobj_tx->ifindex, mtu);
347 if (err)
348 return err;
349 test->ifobj_tx->mtu = mtu;
350 }
351
352 return 0;
353 }
354
pkt_stream_reset(struct pkt_stream * pkt_stream)355 void pkt_stream_reset(struct pkt_stream *pkt_stream)
356 {
357 if (pkt_stream) {
358 pkt_stream->current_pkt_nb = 0;
359 pkt_stream->nb_rx_pkts = 0;
360 }
361 }
362
pkt_stream_get_next_tx_pkt(struct pkt_stream * pkt_stream)363 static struct pkt *pkt_stream_get_next_tx_pkt(struct pkt_stream *pkt_stream)
364 {
365 if (pkt_stream->current_pkt_nb >= pkt_stream->nb_pkts)
366 return NULL;
367
368 return &pkt_stream->pkts[pkt_stream->current_pkt_nb++];
369 }
370
pkt_stream_get_next_rx_pkt(struct pkt_stream * pkt_stream,u32 * pkts_sent)371 static struct pkt *pkt_stream_get_next_rx_pkt(struct pkt_stream *pkt_stream, u32 *pkts_sent)
372 {
373 while (pkt_stream->current_pkt_nb < pkt_stream->nb_pkts) {
374 (*pkts_sent)++;
375 if (pkt_stream->pkts[pkt_stream->current_pkt_nb].valid)
376 return &pkt_stream->pkts[pkt_stream->current_pkt_nb++];
377 pkt_stream->current_pkt_nb++;
378 }
379 return NULL;
380 }
381
pkt_stream_delete(struct pkt_stream * pkt_stream)382 void pkt_stream_delete(struct pkt_stream *pkt_stream)
383 {
384 free(pkt_stream->pkts);
385 free(pkt_stream);
386 }
387
pkt_stream_restore_default(struct test_spec * test)388 void pkt_stream_restore_default(struct test_spec *test)
389 {
390 struct pkt_stream *tx_pkt_stream = test->ifobj_tx->xsk->pkt_stream;
391 struct pkt_stream *rx_pkt_stream = test->ifobj_rx->xsk->pkt_stream;
392
393 if (tx_pkt_stream != test->tx_pkt_stream_default) {
394 pkt_stream_delete(test->ifobj_tx->xsk->pkt_stream);
395 test->ifobj_tx->xsk->pkt_stream = test->tx_pkt_stream_default;
396 }
397
398 if (rx_pkt_stream != test->rx_pkt_stream_default) {
399 pkt_stream_delete(test->ifobj_rx->xsk->pkt_stream);
400 test->ifobj_rx->xsk->pkt_stream = test->rx_pkt_stream_default;
401 }
402 }
403
__pkt_stream_alloc(u32 nb_pkts)404 static struct pkt_stream *__pkt_stream_alloc(u32 nb_pkts)
405 {
406 struct pkt_stream *pkt_stream;
407
408 pkt_stream = calloc(1, sizeof(*pkt_stream));
409 if (!pkt_stream)
410 return NULL;
411
412 pkt_stream->pkts = calloc(nb_pkts, sizeof(*pkt_stream->pkts));
413 if (!pkt_stream->pkts) {
414 free(pkt_stream);
415 return NULL;
416 }
417
418 pkt_stream->nb_pkts = nb_pkts;
419 return pkt_stream;
420 }
421
pkt_nb_frags(u32 frame_size,struct pkt_stream * pkt_stream,struct pkt * pkt)422 static u32 pkt_nb_frags(u32 frame_size, struct pkt_stream *pkt_stream, struct pkt *pkt)
423 {
424 u32 nb_frags = 1, next_frag;
425
426 if (!pkt)
427 return 1;
428
429 if (!pkt_stream->verbatim) {
430 if (!pkt->valid || !pkt->len)
431 return 1;
432 return ceil_u32(pkt->len, frame_size);
433 }
434
435 /* Search for the end of the packet in verbatim mode */
436 if (!pkt_continues(pkt->options) || !pkt->valid)
437 return nb_frags;
438
439 next_frag = pkt_stream->current_pkt_nb;
440 pkt++;
441 while (next_frag++ < pkt_stream->nb_pkts) {
442 nb_frags++;
443 if (!pkt_continues(pkt->options) || !pkt->valid)
444 break;
445 pkt++;
446 }
447 return nb_frags;
448 }
449
set_pkt_valid(int offset,u32 len)450 static bool set_pkt_valid(int offset, u32 len)
451 {
452 return len <= MAX_ETH_JUMBO_SIZE;
453 }
454
pkt_set(struct pkt_stream * pkt_stream,struct pkt * pkt,int offset,u32 len)455 static void pkt_set(struct pkt_stream *pkt_stream, struct pkt *pkt, int offset, u32 len)
456 {
457 pkt->offset = offset;
458 pkt->len = len;
459 pkt->valid = set_pkt_valid(offset, len);
460 }
461
pkt_stream_pkt_set(struct pkt_stream * pkt_stream,struct pkt * pkt,int offset,u32 len)462 static void pkt_stream_pkt_set(struct pkt_stream *pkt_stream, struct pkt *pkt, int offset, u32 len)
463 {
464 bool prev_pkt_valid = pkt->valid;
465
466 pkt_set(pkt_stream, pkt, offset, len);
467 pkt_stream->nb_valid_entries += pkt->valid - prev_pkt_valid;
468 }
469
pkt_get_buffer_len(struct xsk_umem_info * umem,u32 len)470 static u32 pkt_get_buffer_len(struct xsk_umem_info *umem, u32 len)
471 {
472 return ceil_u32(len, umem->frame_size) * umem->frame_size;
473 }
474
__pkt_stream_generate(u32 nb_pkts,u32 pkt_len,u32 nb_start,u32 nb_off)475 static struct pkt_stream *__pkt_stream_generate(u32 nb_pkts, u32 pkt_len, u32 nb_start, u32 nb_off)
476 {
477 struct pkt_stream *pkt_stream;
478 u32 i;
479
480 pkt_stream = __pkt_stream_alloc(nb_pkts);
481 if (!pkt_stream)
482 return NULL;
483
484 pkt_stream->nb_pkts = nb_pkts;
485 pkt_stream->max_pkt_len = pkt_len;
486 for (i = 0; i < nb_pkts; i++) {
487 struct pkt *pkt = &pkt_stream->pkts[i];
488
489 pkt_stream_pkt_set(pkt_stream, pkt, 0, pkt_len);
490 pkt->pkt_nb = nb_start + i * nb_off;
491 }
492
493 return pkt_stream;
494 }
495
pkt_stream_generate(u32 nb_pkts,u32 pkt_len)496 struct pkt_stream *pkt_stream_generate(u32 nb_pkts, u32 pkt_len)
497 {
498 return __pkt_stream_generate(nb_pkts, pkt_len, 0, 1);
499 }
500
pkt_stream_clone(struct pkt_stream * pkt_stream)501 static struct pkt_stream *pkt_stream_clone(struct pkt_stream *pkt_stream)
502 {
503 return pkt_stream_generate(pkt_stream->nb_pkts, pkt_stream->pkts[0].len);
504 }
505
pkt_stream_replace_ifobject(struct ifobject * ifobj,u32 nb_pkts,u32 pkt_len)506 static int pkt_stream_replace_ifobject(struct ifobject *ifobj, u32 nb_pkts, u32 pkt_len)
507 {
508 ifobj->xsk->pkt_stream = pkt_stream_generate(nb_pkts, pkt_len);
509
510 if (!ifobj->xsk->pkt_stream)
511 return -ENOMEM;
512
513 return 0;
514 }
515
pkt_stream_replace(struct test_spec * test,u32 nb_pkts,u32 pkt_len)516 static int pkt_stream_replace(struct test_spec *test, u32 nb_pkts, u32 pkt_len)
517 {
518 int ret;
519
520 ret = pkt_stream_replace_ifobject(test->ifobj_tx, nb_pkts, pkt_len);
521 if (ret)
522 return ret;
523
524 return pkt_stream_replace_ifobject(test->ifobj_rx, nb_pkts, pkt_len);
525 }
526
__pkt_stream_replace_half(struct ifobject * ifobj,u32 pkt_len,int offset)527 static int __pkt_stream_replace_half(struct ifobject *ifobj, u32 pkt_len,
528 int offset)
529 {
530 struct pkt_stream *pkt_stream;
531 u32 i;
532
533 pkt_stream = pkt_stream_clone(ifobj->xsk->pkt_stream);
534 if (!pkt_stream)
535 return -ENOMEM;
536
537 for (i = 1; i < ifobj->xsk->pkt_stream->nb_pkts; i += 2)
538 pkt_stream_pkt_set(pkt_stream, &pkt_stream->pkts[i], offset, pkt_len);
539
540 ifobj->xsk->pkt_stream = pkt_stream;
541
542 return 0;
543 }
544
pkt_stream_replace_half(struct test_spec * test,u32 pkt_len,int offset)545 static int pkt_stream_replace_half(struct test_spec *test, u32 pkt_len, int offset)
546 {
547 int ret = __pkt_stream_replace_half(test->ifobj_tx, pkt_len, offset);
548
549 if (ret)
550 return ret;
551
552 return __pkt_stream_replace_half(test->ifobj_rx, pkt_len, offset);
553 }
554
pkt_stream_receive_half(struct test_spec * test)555 static int pkt_stream_receive_half(struct test_spec *test)
556 {
557 struct pkt_stream *pkt_stream = test->ifobj_tx->xsk->pkt_stream;
558 u32 i;
559
560 if (test->ifobj_rx->xsk->pkt_stream != test->rx_pkt_stream_default)
561 /* Packet stream has already been replaced so we have to release this one.
562 * The newly created one will be freed by the restore_default() at the
563 * end of the test
564 */
565 pkt_stream_delete(test->ifobj_rx->xsk->pkt_stream);
566
567 test->ifobj_rx->xsk->pkt_stream = pkt_stream_generate(pkt_stream->nb_pkts,
568 pkt_stream->pkts[0].len);
569 if (!test->ifobj_rx->xsk->pkt_stream)
570 return -ENOMEM;
571
572 pkt_stream = test->ifobj_rx->xsk->pkt_stream;
573 for (i = 1; i < pkt_stream->nb_pkts; i += 2)
574 pkt_stream->pkts[i].valid = false;
575
576 pkt_stream->nb_valid_entries /= 2;
577
578 return 0;
579 }
580
pkt_stream_even_odd_sequence(struct test_spec * test)581 static int pkt_stream_even_odd_sequence(struct test_spec *test)
582 {
583 struct pkt_stream *pkt_stream;
584 u32 i;
585
586 for (i = 0; i < test->nb_sockets; i++) {
587 pkt_stream = test->ifobj_tx->xsk_arr[i].pkt_stream;
588 pkt_stream = __pkt_stream_generate(pkt_stream->nb_pkts / 2,
589 pkt_stream->pkts[0].len, i, 2);
590 if (!pkt_stream)
591 return -ENOMEM;
592 test->ifobj_tx->xsk_arr[i].pkt_stream = pkt_stream;
593
594 pkt_stream = test->ifobj_rx->xsk_arr[i].pkt_stream;
595 pkt_stream = __pkt_stream_generate(pkt_stream->nb_pkts / 2,
596 pkt_stream->pkts[0].len, i, 2);
597 if (!pkt_stream)
598 return -ENOMEM;
599 test->ifobj_rx->xsk_arr[i].pkt_stream = pkt_stream;
600 }
601
602 return 0;
603 }
604
release_even_odd_sequence(struct test_spec * test)605 static void release_even_odd_sequence(struct test_spec *test)
606 {
607 struct pkt_stream *later_free_tx = test->ifobj_tx->xsk->pkt_stream;
608 struct pkt_stream *later_free_rx = test->ifobj_rx->xsk->pkt_stream;
609 int i;
610
611 for (i = 0; i < test->nb_sockets; i++) {
612 /* later_free_{rx/tx} will be freed by restore_default() */
613 if (test->ifobj_tx->xsk_arr[i].pkt_stream != later_free_tx)
614 pkt_stream_delete(test->ifobj_tx->xsk_arr[i].pkt_stream);
615 if (test->ifobj_rx->xsk_arr[i].pkt_stream != later_free_rx)
616 pkt_stream_delete(test->ifobj_rx->xsk_arr[i].pkt_stream);
617 }
618
619 }
620
pkt_get_addr(struct pkt * pkt,struct xsk_umem_info * umem)621 static u64 pkt_get_addr(struct pkt *pkt, struct xsk_umem_info *umem)
622 {
623 if (!pkt->valid)
624 return pkt->offset;
625 return pkt->offset + umem_alloc_buffer(umem);
626 }
627
pkt_stream_cancel(struct pkt_stream * pkt_stream)628 static void pkt_stream_cancel(struct pkt_stream *pkt_stream)
629 {
630 pkt_stream->current_pkt_nb--;
631 }
632
pkt_generate(struct xsk_socket_info * xsk,struct xsk_umem_info * umem,u64 addr,u32 len,u32 pkt_nb,u32 bytes_written)633 static void pkt_generate(struct xsk_socket_info *xsk, struct xsk_umem_info *umem, u64 addr, u32 len,
634 u32 pkt_nb, u32 bytes_written)
635 {
636 void *data = xsk_umem__get_data(umem->buffer, addr);
637
638 if (len < MIN_PKT_SIZE)
639 return;
640
641 if (!bytes_written) {
642 gen_eth_hdr(xsk, data);
643
644 len -= PKT_HDR_SIZE;
645 data += PKT_HDR_SIZE;
646 } else {
647 bytes_written -= PKT_HDR_SIZE;
648 }
649
650 write_payload(data, pkt_nb, bytes_written, len);
651 }
652
__pkt_stream_generate_custom(struct ifobject * ifobj,struct pkt * frames,u32 nb_frames,bool verbatim)653 static struct pkt_stream *__pkt_stream_generate_custom(struct ifobject *ifobj, struct pkt *frames,
654 u32 nb_frames, bool verbatim)
655 {
656 u32 i, len = 0, pkt_nb = 0, payload = 0;
657 struct pkt_stream *pkt_stream;
658
659 pkt_stream = __pkt_stream_alloc(nb_frames);
660 if (!pkt_stream)
661 return NULL;
662
663 for (i = 0; i < nb_frames; i++) {
664 struct pkt *pkt = &pkt_stream->pkts[pkt_nb];
665 struct pkt *frame = &frames[i];
666
667 pkt->offset = frame->offset;
668 if (verbatim) {
669 *pkt = *frame;
670 pkt->pkt_nb = payload;
671 if (!frame->valid || !pkt_continues(frame->options))
672 payload++;
673 } else {
674 if (frame->valid)
675 len += frame->len;
676 if (frame->valid && pkt_continues(frame->options))
677 continue;
678
679 pkt->pkt_nb = pkt_nb;
680 pkt->len = len;
681 pkt->valid = frame->valid;
682 pkt->options = 0;
683
684 len = 0;
685 }
686
687 print_verbose("offset: %d len: %u valid: %u options: %u pkt_nb: %u\n",
688 pkt->offset, pkt->len, pkt->valid, pkt->options, pkt->pkt_nb);
689
690 if (pkt->valid && pkt->len > pkt_stream->max_pkt_len)
691 pkt_stream->max_pkt_len = pkt->len;
692
693 if (pkt->valid)
694 pkt_stream->nb_valid_entries++;
695
696 pkt_nb++;
697 }
698
699 pkt_stream->nb_pkts = pkt_nb;
700 pkt_stream->verbatim = verbatim;
701 return pkt_stream;
702 }
703
pkt_stream_generate_custom(struct test_spec * test,struct pkt * pkts,u32 nb_pkts)704 static int pkt_stream_generate_custom(struct test_spec *test, struct pkt *pkts, u32 nb_pkts)
705 {
706 struct pkt_stream *pkt_stream;
707
708 pkt_stream = __pkt_stream_generate_custom(test->ifobj_tx, pkts, nb_pkts, true);
709 if (!pkt_stream)
710 return -ENOMEM;
711 test->ifobj_tx->xsk->pkt_stream = pkt_stream;
712
713 pkt_stream = __pkt_stream_generate_custom(test->ifobj_rx, pkts, nb_pkts, false);
714 if (!pkt_stream)
715 return -ENOMEM;
716 test->ifobj_rx->xsk->pkt_stream = pkt_stream;
717
718 return 0;
719 }
720
pkt_print_data(u32 * data,u32 cnt)721 static void pkt_print_data(u32 *data, u32 cnt)
722 {
723 u32 i;
724
725 for (i = 0; i < cnt; i++) {
726 u32 seqnum, pkt_nb;
727
728 seqnum = ntohl(*data) & 0xffff;
729 pkt_nb = ntohl(*data) >> 16;
730 ksft_print_msg("%u:%u ", pkt_nb, seqnum);
731 data++;
732 }
733 }
734
pkt_dump(void * pkt,u32 len,bool eth_header)735 static void pkt_dump(void *pkt, u32 len, bool eth_header)
736 {
737 struct ethhdr *ethhdr = pkt;
738 u32 i, *data;
739
740 if (eth_header) {
741 /*extract L2 frame */
742 ksft_print_msg("DEBUG>> L2: dst mac: ");
743 for (i = 0; i < ETH_ALEN; i++)
744 ksft_print_msg("%02X", ethhdr->h_dest[i]);
745
746 ksft_print_msg("\nDEBUG>> L2: src mac: ");
747 for (i = 0; i < ETH_ALEN; i++)
748 ksft_print_msg("%02X", ethhdr->h_source[i]);
749
750 data = pkt + PKT_HDR_SIZE;
751 } else {
752 data = pkt;
753 }
754
755 /*extract L5 frame */
756 ksft_print_msg("\nDEBUG>> L5: seqnum: ");
757 pkt_print_data(data, PKT_DUMP_NB_TO_PRINT);
758 ksft_print_msg("....");
759 if (len > PKT_DUMP_NB_TO_PRINT * sizeof(u32)) {
760 ksft_print_msg("\n.... ");
761 pkt_print_data(data + len / sizeof(u32) - PKT_DUMP_NB_TO_PRINT,
762 PKT_DUMP_NB_TO_PRINT);
763 }
764 ksft_print_msg("\n---------------------------------------\n");
765 }
766
is_offset_correct(struct xsk_umem_info * umem,struct pkt * pkt,u64 addr)767 static bool is_offset_correct(struct xsk_umem_info *umem, struct pkt *pkt, u64 addr)
768 {
769 u32 headroom = umem->unaligned_mode ? 0 : umem->frame_headroom;
770 u32 offset = addr % umem->frame_size, expected_offset;
771 int pkt_offset = pkt->valid ? pkt->offset : 0;
772
773 if (!umem->unaligned_mode)
774 pkt_offset = 0;
775
776 expected_offset = (pkt_offset + headroom + XDP_PACKET_HEADROOM) % umem->frame_size;
777
778 if (offset == expected_offset)
779 return true;
780
781 ksft_print_msg("[%s] expected [%u], got [%u]\n", __func__, expected_offset, offset);
782 return false;
783 }
784
is_metadata_correct(struct pkt * pkt,void * buffer,u64 addr)785 static bool is_metadata_correct(struct pkt *pkt, void *buffer, u64 addr)
786 {
787 void *data = xsk_umem__get_data(buffer, addr);
788 struct xdp_info *meta = data - sizeof(struct xdp_info);
789
790 if (meta->count != pkt->pkt_nb) {
791 ksft_print_msg("[%s] expected meta_count [%d], got meta_count [%llu]\n",
792 __func__, pkt->pkt_nb,
793 (unsigned long long)meta->count);
794 return false;
795 }
796
797 return true;
798 }
799
is_adjust_tail_supported(struct xsk_xdp_progs * skel_rx,bool * supported)800 static int is_adjust_tail_supported(struct xsk_xdp_progs *skel_rx, bool *supported)
801 {
802 struct bpf_map *data_map;
803 int adjust_value = 0;
804 int key = 0;
805 int ret;
806
807 data_map = bpf_object__find_map_by_name(skel_rx->obj, "xsk_xdp_.bss");
808 if (!data_map || !bpf_map__is_internal(data_map)) {
809 ksft_print_msg("Error: could not find bss section of XDP program\n");
810 return -EINVAL;
811 }
812
813 ret = bpf_map_lookup_elem(bpf_map__fd(data_map), &key, &adjust_value);
814 if (ret) {
815 ksft_print_msg("Error: bpf_map_lookup_elem failed with error %d\n", ret);
816 return ret;
817 }
818
819 /* Set the 'adjust_value' variable to -EOPNOTSUPP in the XDP program if the adjust_tail
820 * helper is not supported. Skip the adjust_tail test case in this scenario.
821 */
822 *supported = adjust_value != -EOPNOTSUPP;
823
824 return 0;
825 }
826
is_frag_valid(struct xsk_umem_info * umem,u64 addr,u32 len,u32 expected_pkt_nb,u32 bytes_processed)827 static bool is_frag_valid(struct xsk_umem_info *umem, u64 addr, u32 len, u32 expected_pkt_nb,
828 u32 bytes_processed)
829 {
830 u32 seqnum, pkt_nb, *pkt_data, words_to_end, expected_seqnum;
831 void *data = xsk_umem__get_data(umem->buffer, addr);
832
833 addr -= umem->base_addr;
834
835 if (addr >= umem->num_frames * umem->frame_size ||
836 addr + len > umem->num_frames * umem->frame_size) {
837 ksft_print_msg("Frag invalid addr: %llx len: %u\n",
838 (unsigned long long)addr, len);
839 return false;
840 }
841 if (!umem->unaligned_mode && addr % umem->frame_size + len > umem->frame_size) {
842 ksft_print_msg("Frag crosses frame boundary addr: %llx len: %u\n",
843 (unsigned long long)addr, len);
844 return false;
845 }
846
847 pkt_data = data;
848 if (!bytes_processed) {
849 pkt_data += PKT_HDR_SIZE / sizeof(*pkt_data);
850 len -= PKT_HDR_SIZE;
851 } else {
852 bytes_processed -= PKT_HDR_SIZE;
853 }
854
855 expected_seqnum = bytes_processed / sizeof(*pkt_data);
856 seqnum = ntohl(*pkt_data) & 0xffff;
857 pkt_nb = ntohl(*pkt_data) >> 16;
858
859 if (expected_pkt_nb != pkt_nb) {
860 ksft_print_msg("[%s] expected pkt_nb [%u], got pkt_nb [%u]\n",
861 __func__, expected_pkt_nb, pkt_nb);
862 goto error;
863 }
864 if (expected_seqnum != seqnum) {
865 ksft_print_msg("[%s] expected seqnum at start [%u], got seqnum [%u]\n",
866 __func__, expected_seqnum, seqnum);
867 goto error;
868 }
869
870 words_to_end = len / sizeof(*pkt_data) - 1;
871 pkt_data += words_to_end;
872 seqnum = ntohl(*pkt_data) & 0xffff;
873 expected_seqnum += words_to_end;
874 if (expected_seqnum != seqnum) {
875 ksft_print_msg("[%s] expected seqnum at end [%u], got seqnum [%u]\n",
876 __func__, expected_seqnum, seqnum);
877 goto error;
878 }
879
880 return true;
881
882 error:
883 pkt_dump(data, len, !bytes_processed);
884 return false;
885 }
886
is_pkt_valid(struct pkt * pkt,void * buffer,u64 addr,u32 len)887 static bool is_pkt_valid(struct pkt *pkt, void *buffer, u64 addr, u32 len)
888 {
889 if (pkt->len != len) {
890 ksft_print_msg("[%s] expected packet length [%d], got length [%d]\n",
891 __func__, pkt->len, len);
892 pkt_dump(xsk_umem__get_data(buffer, addr), len, true);
893 return false;
894 }
895
896 return true;
897 }
898
load_value(u32 * counter)899 static u32 load_value(u32 *counter)
900 {
901 return __atomic_load_n(counter, __ATOMIC_ACQUIRE);
902 }
903
kick_tx_with_check(struct xsk_socket_info * xsk,int * ret)904 static bool kick_tx_with_check(struct xsk_socket_info *xsk, int *ret)
905 {
906 u32 max_budget = MAX_TX_BUDGET_DEFAULT;
907 u32 cons, ready_to_send;
908 int delta;
909
910 cons = load_value(xsk->tx.consumer);
911 ready_to_send = load_value(xsk->tx.producer) - cons;
912 *ret = sendto(xsk_socket__fd(xsk->xsk), NULL, 0, MSG_DONTWAIT, NULL, 0);
913
914 delta = load_value(xsk->tx.consumer) - cons;
915 /* By default, xsk should consume exact @max_budget descs at one
916 * send in this case where hitting the max budget limit in while
917 * loop is triggered in __xsk_generic_xmit(). Please make sure that
918 * the number of descs to be sent is larger than @max_budget, or
919 * else the tx.consumer will be updated in xskq_cons_peek_desc()
920 * in time which hides the issue we try to verify.
921 */
922 if (ready_to_send > max_budget && delta != max_budget)
923 return false;
924
925 return true;
926 }
927
kick_tx(struct xsk_socket_info * xsk)928 int kick_tx(struct xsk_socket_info *xsk)
929 {
930 int ret;
931
932 if (xsk->check_consumer) {
933 if (!kick_tx_with_check(xsk, &ret))
934 return TEST_FAILURE;
935 } else {
936 ret = sendto(xsk_socket__fd(xsk->xsk), NULL, 0, MSG_DONTWAIT, NULL, 0);
937 }
938 if (ret >= 0)
939 return TEST_PASS;
940 if (errno == ENOBUFS || errno == EAGAIN || errno == EBUSY || errno == ENETDOWN) {
941 usleep(100);
942 return TEST_PASS;
943 }
944 return TEST_FAILURE;
945 }
946
kick_rx(struct xsk_socket_info * xsk)947 int kick_rx(struct xsk_socket_info *xsk)
948 {
949 int ret;
950
951 ret = recvfrom(xsk_socket__fd(xsk->xsk), NULL, 0, MSG_DONTWAIT, NULL, NULL);
952 if (ret < 0)
953 return TEST_FAILURE;
954
955 return TEST_PASS;
956 }
957
complete_pkts(struct xsk_socket_info * xsk,int batch_size)958 static int complete_pkts(struct xsk_socket_info *xsk, int batch_size)
959 {
960 unsigned int rcvd;
961 u32 idx;
962 int ret;
963
964 if (xsk_ring_prod__needs_wakeup(&xsk->tx)) {
965 ret = kick_tx(xsk);
966 if (ret)
967 return TEST_FAILURE;
968 }
969
970 rcvd = xsk_ring_cons__peek(&xsk->umem->cq, batch_size, &idx);
971 if (rcvd) {
972 if (rcvd > xsk->outstanding_tx) {
973 u64 addr = *xsk_ring_cons__comp_addr(&xsk->umem->cq, idx + rcvd - 1);
974
975 ksft_print_msg("[%s] Too many packets completed\n", __func__);
976 ksft_print_msg("Last completion address: %llx\n",
977 (unsigned long long)addr);
978 return TEST_FAILURE;
979 }
980
981 xsk_ring_cons__release(&xsk->umem->cq, rcvd);
982 xsk->outstanding_tx -= rcvd;
983 }
984
985 return TEST_PASS;
986 }
987
__receive_pkts(struct test_spec * test,struct xsk_socket_info * xsk)988 static int __receive_pkts(struct test_spec *test, struct xsk_socket_info *xsk)
989 {
990 u32 frags_processed = 0, nb_frags = 0, pkt_len = 0;
991 u32 idx_rx = 0, idx_fq = 0, rcvd, pkts_sent = 0;
992 struct pkt_stream *pkt_stream = xsk->pkt_stream;
993 struct ifobject *ifobj = test->ifobj_rx;
994 struct xsk_umem_info *umem = xsk->umem;
995 struct pollfd fds = { };
996 struct pkt *pkt;
997 u64 first_addr = 0;
998 int ret;
999
1000 fds.fd = xsk_socket__fd(xsk->xsk);
1001 fds.events = POLLIN;
1002
1003 ret = kick_rx(xsk);
1004 if (ret)
1005 return TEST_FAILURE;
1006
1007 if (ifobj->use_poll) {
1008 ret = poll(&fds, 1, POLL_TMOUT);
1009 if (ret < 0)
1010 return TEST_FAILURE;
1011
1012 if (!ret) {
1013 if (!is_umem_valid(test->ifobj_tx))
1014 return TEST_PASS;
1015
1016 ksft_print_msg("ERROR: [%s] Poll timed out\n", __func__);
1017 return TEST_CONTINUE;
1018 }
1019
1020 if (!(fds.revents & POLLIN))
1021 return TEST_CONTINUE;
1022 }
1023
1024 rcvd = xsk_ring_cons__peek(&xsk->rx, xsk->batch_size, &idx_rx);
1025 if (!rcvd)
1026 return TEST_CONTINUE;
1027
1028 if (ifobj->use_fill_ring) {
1029 ret = xsk_ring_prod__reserve(&umem->fq, rcvd, &idx_fq);
1030 while (ret != rcvd) {
1031 if (xsk_ring_prod__needs_wakeup(&umem->fq)) {
1032 ret = poll(&fds, 1, POLL_TMOUT);
1033 if (ret < 0)
1034 return TEST_FAILURE;
1035 }
1036 ret = xsk_ring_prod__reserve(&umem->fq, rcvd, &idx_fq);
1037 }
1038 }
1039
1040 while (frags_processed < rcvd) {
1041 const struct xdp_desc *desc = xsk_ring_cons__rx_desc(&xsk->rx, idx_rx++);
1042 u64 addr = desc->addr, orig;
1043
1044 orig = xsk_umem__extract_addr(addr);
1045 addr = xsk_umem__add_offset_to_addr(addr);
1046
1047 if (!nb_frags) {
1048 pkt = pkt_stream_get_next_rx_pkt(pkt_stream, &pkts_sent);
1049 if (!pkt) {
1050 ksft_print_msg("[%s] received too many packets addr: %lx len %u\n",
1051 __func__, addr, desc->len);
1052 return TEST_FAILURE;
1053 }
1054 }
1055
1056 print_verbose("Rx: addr: %lx len: %u options: %u pkt_nb: %u valid: %u\n",
1057 addr, desc->len, desc->options, pkt->pkt_nb, pkt->valid);
1058
1059 if (!is_frag_valid(umem, addr, desc->len, pkt->pkt_nb, pkt_len) ||
1060 !is_offset_correct(umem, pkt, addr) || (ifobj->use_metadata &&
1061 !is_metadata_correct(pkt, umem->buffer, addr)))
1062 return TEST_FAILURE;
1063
1064 if (!nb_frags++)
1065 first_addr = addr;
1066 frags_processed++;
1067 pkt_len += desc->len;
1068 if (ifobj->use_fill_ring)
1069 *xsk_ring_prod__fill_addr(&umem->fq, idx_fq++) = orig;
1070
1071 if (pkt_continues(desc->options))
1072 continue;
1073
1074 /* The complete packet has been received */
1075 if (!is_pkt_valid(pkt, umem->buffer, first_addr, pkt_len) ||
1076 !is_offset_correct(umem, pkt, addr))
1077 return TEST_FAILURE;
1078
1079 pkt_stream->nb_rx_pkts++;
1080 nb_frags = 0;
1081 pkt_len = 0;
1082 }
1083
1084 if (nb_frags) {
1085 /* In the middle of a packet. Start over from beginning of packet. */
1086 idx_rx -= nb_frags;
1087 xsk_ring_cons__cancel(&xsk->rx, nb_frags);
1088 if (ifobj->use_fill_ring) {
1089 idx_fq -= nb_frags;
1090 xsk_ring_prod__cancel(&umem->fq, nb_frags);
1091 }
1092 frags_processed -= nb_frags;
1093 pkt_stream_cancel(pkt_stream);
1094 pkts_sent--;
1095 }
1096
1097 if (ifobj->use_fill_ring)
1098 xsk_ring_prod__submit(&umem->fq, frags_processed);
1099 if (ifobj->release_rx)
1100 xsk_ring_cons__release(&xsk->rx, frags_processed);
1101
1102 pthread_mutex_lock(&pacing_mutex);
1103 pkts_in_flight -= pkts_sent;
1104 pthread_mutex_unlock(&pacing_mutex);
1105 pkts_sent = 0;
1106
1107 return TEST_CONTINUE;
1108 }
1109
all_packets_received(struct test_spec * test,struct xsk_socket_info * xsk,u32 sock_num,unsigned long * bitmap)1110 bool all_packets_received(struct test_spec *test, struct xsk_socket_info *xsk, u32 sock_num,
1111 unsigned long *bitmap)
1112 {
1113 struct pkt_stream *pkt_stream = xsk->pkt_stream;
1114
1115 if (!pkt_stream) {
1116 __set_bit(sock_num, bitmap);
1117 return false;
1118 }
1119
1120 if (pkt_stream->nb_rx_pkts == pkt_stream->nb_valid_entries) {
1121 __set_bit(sock_num, bitmap);
1122 if (bitmap_full(bitmap, test->nb_sockets))
1123 return true;
1124 }
1125
1126 return false;
1127 }
1128
receive_pkts(struct test_spec * test)1129 static int receive_pkts(struct test_spec *test)
1130 {
1131 struct timeval tv_end, tv_now, tv_timeout = {THREAD_TMOUT, 0};
1132 DECLARE_BITMAP(bitmap, test->nb_sockets);
1133 struct xsk_socket_info *xsk;
1134 u32 sock_num = 0;
1135 int res, ret;
1136
1137 bitmap_zero(bitmap, test->nb_sockets);
1138
1139 ret = gettimeofday(&tv_now, NULL);
1140 if (ret)
1141 return TEST_FAILURE;
1142
1143 timeradd(&tv_now, &tv_timeout, &tv_end);
1144
1145 while (1) {
1146 xsk = &test->ifobj_rx->xsk_arr[sock_num];
1147
1148 if ((all_packets_received(test, xsk, sock_num, bitmap)))
1149 break;
1150
1151 res = __receive_pkts(test, xsk);
1152 if (!(res == TEST_PASS || res == TEST_CONTINUE))
1153 return res;
1154
1155 ret = gettimeofday(&tv_now, NULL);
1156 if (ret)
1157 return TEST_FAILURE;
1158
1159 if (timercmp(&tv_now, &tv_end, >)) {
1160 ksft_print_msg("ERROR: [%s] Receive loop timed out\n", __func__);
1161 return TEST_FAILURE;
1162 }
1163 sock_num = (sock_num + 1) % test->nb_sockets;
1164 }
1165
1166 return TEST_PASS;
1167 }
1168
__send_pkts(struct ifobject * ifobject,struct xsk_socket_info * xsk,bool timeout)1169 static int __send_pkts(struct ifobject *ifobject, struct xsk_socket_info *xsk, bool timeout)
1170 {
1171 u32 i, idx = 0, valid_pkts = 0, valid_frags = 0, buffer_len;
1172 struct pkt_stream *pkt_stream = xsk->pkt_stream;
1173 struct xsk_umem_info *umem = ifobject->umem;
1174 bool use_poll = ifobject->use_poll;
1175 struct pollfd fds = { };
1176 int ret;
1177
1178 buffer_len = pkt_get_buffer_len(umem, pkt_stream->max_pkt_len);
1179 /* pkts_in_flight might be negative if many invalid packets are sent */
1180 if (pkts_in_flight >= (int)((umem_size(umem) - xsk->batch_size * buffer_len) /
1181 buffer_len)) {
1182 ret = kick_tx(xsk);
1183 if (ret)
1184 return TEST_FAILURE;
1185 return TEST_CONTINUE;
1186 }
1187
1188 fds.fd = xsk_socket__fd(xsk->xsk);
1189 fds.events = POLLOUT;
1190
1191 while (xsk_ring_prod__reserve(&xsk->tx, xsk->batch_size, &idx) < xsk->batch_size) {
1192 if (use_poll) {
1193 ret = poll(&fds, 1, POLL_TMOUT);
1194 if (timeout) {
1195 if (ret < 0) {
1196 ksft_print_msg("ERROR: [%s] Poll error %d\n",
1197 __func__, errno);
1198 return TEST_FAILURE;
1199 }
1200 if (ret == 0)
1201 return TEST_PASS;
1202 break;
1203 }
1204 if (ret <= 0) {
1205 ksft_print_msg("ERROR: [%s] Poll error %d\n",
1206 __func__, errno);
1207 return TEST_FAILURE;
1208 }
1209 }
1210
1211 complete_pkts(xsk, xsk->batch_size);
1212 }
1213
1214 for (i = 0; i < xsk->batch_size; i++) {
1215 struct pkt *pkt = pkt_stream_get_next_tx_pkt(pkt_stream);
1216 u32 nb_frags_left, nb_frags, bytes_written = 0;
1217
1218 if (!pkt)
1219 break;
1220
1221 nb_frags = pkt_nb_frags(umem->frame_size, pkt_stream, pkt);
1222 if (nb_frags > xsk->batch_size - i) {
1223 pkt_stream_cancel(pkt_stream);
1224 xsk_ring_prod__cancel(&xsk->tx, xsk->batch_size - i);
1225 break;
1226 }
1227 nb_frags_left = nb_frags;
1228
1229 while (nb_frags_left--) {
1230 struct xdp_desc *tx_desc = xsk_ring_prod__tx_desc(&xsk->tx, idx + i);
1231
1232 tx_desc->addr = pkt_get_addr(pkt, ifobject->umem);
1233 if (pkt_stream->verbatim) {
1234 tx_desc->len = pkt->len;
1235 tx_desc->options = pkt->options;
1236 } else if (nb_frags_left) {
1237 tx_desc->len = umem->frame_size;
1238 tx_desc->options = XDP_PKT_CONTD;
1239 } else {
1240 tx_desc->len = pkt->len - bytes_written;
1241 tx_desc->options = 0;
1242 }
1243 if (pkt->valid)
1244 pkt_generate(xsk, umem, tx_desc->addr, tx_desc->len, pkt->pkt_nb,
1245 bytes_written);
1246 bytes_written += tx_desc->len;
1247
1248 print_verbose("Tx addr: %llx len: %u options: %u pkt_nb: %u\n",
1249 tx_desc->addr, tx_desc->len, tx_desc->options, pkt->pkt_nb);
1250
1251 if (nb_frags_left) {
1252 i++;
1253 if (pkt_stream->verbatim)
1254 pkt = pkt_stream_get_next_tx_pkt(pkt_stream);
1255 }
1256 }
1257
1258 if (pkt && pkt->valid) {
1259 valid_pkts++;
1260 valid_frags += nb_frags;
1261 }
1262 }
1263
1264 pthread_mutex_lock(&pacing_mutex);
1265 pkts_in_flight += valid_pkts;
1266 pthread_mutex_unlock(&pacing_mutex);
1267
1268 xsk_ring_prod__submit(&xsk->tx, i);
1269 xsk->outstanding_tx += valid_frags;
1270
1271 if (use_poll) {
1272 ret = poll(&fds, 1, POLL_TMOUT);
1273 if (ret <= 0) {
1274 if (ret == 0 && timeout)
1275 return TEST_PASS;
1276
1277 ksft_print_msg("ERROR: [%s] Poll error %d\n", __func__, ret);
1278 return TEST_FAILURE;
1279 }
1280 }
1281
1282 if (!timeout) {
1283 if (complete_pkts(xsk, i))
1284 return TEST_FAILURE;
1285
1286 usleep(10);
1287 return TEST_PASS;
1288 }
1289
1290 return TEST_CONTINUE;
1291 }
1292
wait_for_tx_completion(struct xsk_socket_info * xsk)1293 static int wait_for_tx_completion(struct xsk_socket_info *xsk)
1294 {
1295 struct timeval tv_end, tv_now, tv_timeout = {THREAD_TMOUT, 0};
1296 int ret;
1297
1298 ret = gettimeofday(&tv_now, NULL);
1299 if (ret)
1300 return TEST_FAILURE;
1301 timeradd(&tv_now, &tv_timeout, &tv_end);
1302
1303 while (xsk->outstanding_tx) {
1304 ret = gettimeofday(&tv_now, NULL);
1305 if (ret)
1306 return TEST_FAILURE;
1307 if (timercmp(&tv_now, &tv_end, >)) {
1308 ksft_print_msg("ERROR: [%s] Transmission loop timed out\n", __func__);
1309 return TEST_FAILURE;
1310 }
1311
1312 complete_pkts(xsk, xsk->batch_size);
1313 }
1314
1315 return TEST_PASS;
1316 }
1317
all_packets_sent(struct test_spec * test,unsigned long * bitmap)1318 bool all_packets_sent(struct test_spec *test, unsigned long *bitmap)
1319 {
1320 return bitmap_full(bitmap, test->nb_sockets);
1321 }
1322
send_pkts(struct test_spec * test,struct ifobject * ifobject)1323 static int send_pkts(struct test_spec *test, struct ifobject *ifobject)
1324 {
1325 bool timeout = !is_umem_valid(test->ifobj_rx);
1326 DECLARE_BITMAP(bitmap, test->nb_sockets);
1327 u32 i, ret;
1328
1329 bitmap_zero(bitmap, test->nb_sockets);
1330
1331 while (!(all_packets_sent(test, bitmap))) {
1332 for (i = 0; i < test->nb_sockets; i++) {
1333 struct pkt_stream *pkt_stream;
1334
1335 pkt_stream = ifobject->xsk_arr[i].pkt_stream;
1336 if (!pkt_stream || pkt_stream->current_pkt_nb >= pkt_stream->nb_pkts) {
1337 __set_bit(i, bitmap);
1338 continue;
1339 }
1340 ret = __send_pkts(ifobject, &ifobject->xsk_arr[i], timeout);
1341 if (ret == TEST_CONTINUE && !test->fail)
1342 continue;
1343
1344 if ((ret || test->fail) && !timeout)
1345 return TEST_FAILURE;
1346
1347 if (ret == TEST_PASS && timeout)
1348 return ret;
1349
1350 ret = wait_for_tx_completion(&ifobject->xsk_arr[i]);
1351 if (ret)
1352 return TEST_FAILURE;
1353 }
1354 }
1355
1356 return TEST_PASS;
1357 }
1358
get_xsk_stats(struct xsk_socket * xsk,struct xdp_statistics * stats)1359 static int get_xsk_stats(struct xsk_socket *xsk, struct xdp_statistics *stats)
1360 {
1361 int fd = xsk_socket__fd(xsk), err;
1362 socklen_t optlen, expected_len;
1363
1364 optlen = sizeof(*stats);
1365 err = getsockopt(fd, SOL_XDP, XDP_STATISTICS, stats, &optlen);
1366 if (err) {
1367 ksft_print_msg("[%s] getsockopt(XDP_STATISTICS) error %u %s\n",
1368 __func__, -err, strerror(-err));
1369 return TEST_FAILURE;
1370 }
1371
1372 expected_len = sizeof(struct xdp_statistics);
1373 if (optlen != expected_len) {
1374 ksft_print_msg("[%s] getsockopt optlen error. Expected: %u got: %u\n",
1375 __func__, expected_len, optlen);
1376 return TEST_FAILURE;
1377 }
1378
1379 return TEST_PASS;
1380 }
1381
validate_rx_dropped(struct ifobject * ifobject)1382 static int validate_rx_dropped(struct ifobject *ifobject)
1383 {
1384 struct xsk_socket *xsk = ifobject->xsk->xsk;
1385 struct xdp_statistics stats;
1386 int err;
1387
1388 err = kick_rx(ifobject->xsk);
1389 if (err)
1390 return TEST_FAILURE;
1391
1392 err = get_xsk_stats(xsk, &stats);
1393 if (err)
1394 return TEST_FAILURE;
1395
1396 /* The receiver calls getsockopt after receiving the last (valid)
1397 * packet which is not the final packet sent in this test (valid and
1398 * invalid packets are sent in alternating fashion with the final
1399 * packet being invalid). Since the last packet may or may not have
1400 * been dropped already, both outcomes must be allowed.
1401 */
1402 if (stats.rx_dropped == ifobject->xsk->pkt_stream->nb_pkts / 2 ||
1403 stats.rx_dropped == ifobject->xsk->pkt_stream->nb_pkts / 2 - 1)
1404 return TEST_PASS;
1405
1406 return TEST_FAILURE;
1407 }
1408
validate_rx_full(struct ifobject * ifobject)1409 static int validate_rx_full(struct ifobject *ifobject)
1410 {
1411 struct xsk_socket *xsk = ifobject->xsk->xsk;
1412 struct xdp_statistics stats;
1413 int err;
1414
1415 usleep(1000);
1416 err = kick_rx(ifobject->xsk);
1417 if (err)
1418 return TEST_FAILURE;
1419
1420 err = get_xsk_stats(xsk, &stats);
1421 if (err)
1422 return TEST_FAILURE;
1423
1424 if (stats.rx_ring_full)
1425 return TEST_PASS;
1426
1427 return TEST_FAILURE;
1428 }
1429
validate_fill_empty(struct ifobject * ifobject)1430 static int validate_fill_empty(struct ifobject *ifobject)
1431 {
1432 struct xsk_socket *xsk = ifobject->xsk->xsk;
1433 struct xdp_statistics stats;
1434 int err;
1435
1436 usleep(1000);
1437 err = kick_rx(ifobject->xsk);
1438 if (err)
1439 return TEST_FAILURE;
1440
1441 err = get_xsk_stats(xsk, &stats);
1442 if (err)
1443 return TEST_FAILURE;
1444
1445 if (stats.rx_fill_ring_empty_descs)
1446 return TEST_PASS;
1447
1448 return TEST_FAILURE;
1449 }
1450
validate_tx_invalid_descs(struct ifobject * ifobject)1451 static int validate_tx_invalid_descs(struct ifobject *ifobject)
1452 {
1453 struct xsk_socket *xsk = ifobject->xsk->xsk;
1454 int fd = xsk_socket__fd(xsk);
1455 struct xdp_statistics stats;
1456 socklen_t optlen;
1457 int err;
1458
1459 optlen = sizeof(stats);
1460 err = getsockopt(fd, SOL_XDP, XDP_STATISTICS, &stats, &optlen);
1461 if (err) {
1462 ksft_print_msg("[%s] getsockopt(XDP_STATISTICS) error %u %s\n",
1463 __func__, -err, strerror(-err));
1464 return TEST_FAILURE;
1465 }
1466
1467 if (stats.tx_invalid_descs != ifobject->xsk->pkt_stream->nb_pkts / 2) {
1468 ksft_print_msg("[%s] tx_invalid_descs incorrect. Got [%llu] expected [%u]\n",
1469 __func__,
1470 (unsigned long long)stats.tx_invalid_descs,
1471 ifobject->xsk->pkt_stream->nb_pkts);
1472 return TEST_FAILURE;
1473 }
1474
1475 return TEST_PASS;
1476 }
1477
xsk_configure(struct test_spec * test,struct ifobject * ifobject,struct xsk_umem_info * umem,bool tx)1478 static int xsk_configure(struct test_spec *test, struct ifobject *ifobject,
1479 struct xsk_umem_info *umem, bool tx)
1480 {
1481 int i, ret;
1482
1483 for (i = 0; i < test->nb_sockets; i++) {
1484 bool shared = (ifobject->shared_umem && tx) ? true : !!i;
1485 u32 ctr = 0;
1486
1487 while (ctr++ < SOCK_RECONF_CTR) {
1488 ret = xsk_configure_socket(&ifobject->xsk_arr[i], umem,
1489 ifobject, shared);
1490 if (!ret)
1491 break;
1492
1493 /* Retry if it fails as xsk_socket__create() is asynchronous */
1494 if (ctr >= SOCK_RECONF_CTR)
1495 return ret;
1496 usleep(USLEEP_MAX);
1497 }
1498 if (ifobject->busy_poll) {
1499 ret = enable_busy_poll(&ifobject->xsk_arr[i]);
1500 if (ret)
1501 return ret;
1502 }
1503 }
1504
1505 return 0;
1506 }
1507
thread_common_ops_tx(struct test_spec * test,struct ifobject * ifobject)1508 static int thread_common_ops_tx(struct test_spec *test, struct ifobject *ifobject)
1509 {
1510 int ret = xsk_configure(test, ifobject, test->ifobj_rx->umem, true);
1511
1512 if (ret)
1513 return ret;
1514 ifobject->xsk = &ifobject->xsk_arr[0];
1515 ifobject->xskmap = test->ifobj_rx->xskmap;
1516 memcpy(ifobject->umem, test->ifobj_rx->umem, sizeof(struct xsk_umem_info));
1517 ifobject->umem->base_addr = 0;
1518
1519 return 0;
1520 }
1521
xsk_populate_fill_ring(struct xsk_umem_info * umem,struct pkt_stream * pkt_stream,bool fill_up)1522 static int xsk_populate_fill_ring(struct xsk_umem_info *umem, struct pkt_stream *pkt_stream,
1523 bool fill_up)
1524 {
1525 u32 rx_frame_size = umem->frame_size - XDP_PACKET_HEADROOM;
1526 u32 idx = 0, filled = 0, buffers_to_fill, nb_pkts;
1527 int ret;
1528
1529 if (umem->num_frames < XSK_RING_PROD__DEFAULT_NUM_DESCS)
1530 buffers_to_fill = umem->num_frames;
1531 else
1532 buffers_to_fill = umem->fill_size;
1533
1534 ret = xsk_ring_prod__reserve(&umem->fq, buffers_to_fill, &idx);
1535 if (ret != buffers_to_fill)
1536 return -ENOSPC;
1537
1538 while (filled < buffers_to_fill) {
1539 struct pkt *pkt = pkt_stream_get_next_rx_pkt(pkt_stream, &nb_pkts);
1540 u64 addr;
1541 u32 i;
1542
1543 for (i = 0; i < pkt_nb_frags(rx_frame_size, pkt_stream, pkt); i++) {
1544 if (!pkt) {
1545 if (!fill_up)
1546 break;
1547 addr = filled * umem->frame_size + umem->base_addr;
1548 } else if (pkt->offset >= 0) {
1549 addr = pkt->offset % umem->frame_size + umem_alloc_buffer(umem);
1550 } else {
1551 addr = pkt->offset + umem_alloc_buffer(umem);
1552 }
1553
1554 *xsk_ring_prod__fill_addr(&umem->fq, idx++) = addr;
1555 if (++filled >= buffers_to_fill)
1556 break;
1557 }
1558 }
1559 xsk_ring_prod__submit(&umem->fq, filled);
1560 xsk_ring_prod__cancel(&umem->fq, buffers_to_fill - filled);
1561
1562 pkt_stream_reset(pkt_stream);
1563 umem_reset_alloc(umem);
1564
1565 return 0;
1566 }
1567
thread_common_ops(struct test_spec * test,struct ifobject * ifobject)1568 static int thread_common_ops(struct test_spec *test, struct ifobject *ifobject)
1569 {
1570 LIBBPF_OPTS(bpf_xdp_query_opts, opts);
1571 int mmap_flags;
1572 u64 umem_sz;
1573 void *bufs;
1574 int ret;
1575 u32 i;
1576
1577 umem_sz = ifobject->umem->num_frames * ifobject->umem->frame_size;
1578 mmap_flags = MAP_PRIVATE | MAP_ANONYMOUS | MAP_NORESERVE;
1579
1580 if (ifobject->umem->unaligned_mode)
1581 mmap_flags |= MAP_HUGETLB | MAP_HUGE_2MB;
1582
1583 if (ifobject->shared_umem)
1584 umem_sz *= 2;
1585
1586 bufs = mmap(NULL, umem_sz, PROT_READ | PROT_WRITE, mmap_flags, -1, 0);
1587 if (bufs == MAP_FAILED)
1588 return -errno;
1589
1590 ret = xsk_configure_umem(ifobject, ifobject->umem, bufs, umem_sz);
1591 if (ret)
1592 return ret;
1593
1594 ret = xsk_configure(test, ifobject, ifobject->umem, false);
1595 if (ret)
1596 return ret;
1597
1598 ifobject->xsk = &ifobject->xsk_arr[0];
1599
1600 if (!ifobject->rx_on)
1601 return 0;
1602
1603 ret = xsk_populate_fill_ring(ifobject->umem, ifobject->xsk->pkt_stream,
1604 ifobject->use_fill_ring);
1605 if (ret)
1606 return ret;
1607
1608 for (i = 0; i < test->nb_sockets; i++) {
1609 ifobject->xsk = &ifobject->xsk_arr[i];
1610 ret = xsk_update_xskmap(ifobject->xskmap, ifobject->xsk->xsk, i);
1611 if (ret)
1612 return ret;
1613 }
1614
1615 return 0;
1616 }
1617
worker_testapp_validate_tx(void * arg)1618 void *worker_testapp_validate_tx(void *arg)
1619 {
1620 struct test_spec *test = (struct test_spec *)arg;
1621 struct ifobject *ifobject = test->ifobj_tx;
1622 int err;
1623
1624 if (test->current_step == 1) {
1625 if (!ifobject->shared_umem) {
1626 if (thread_common_ops(test, ifobject)) {
1627 test->fail = true;
1628 pthread_exit(NULL);
1629 }
1630 } else {
1631 if (thread_common_ops_tx(test, ifobject)) {
1632 test->fail = true;
1633 pthread_exit(NULL);
1634 }
1635 }
1636 }
1637
1638 err = send_pkts(test, ifobject);
1639
1640 if (!err && ifobject->validation_func)
1641 err = ifobject->validation_func(ifobject);
1642 if (err)
1643 test->fail = true;
1644
1645 pthread_exit(NULL);
1646 }
1647
worker_testapp_validate_rx(void * arg)1648 void *worker_testapp_validate_rx(void *arg)
1649 {
1650 struct test_spec *test = (struct test_spec *)arg;
1651 struct ifobject *ifobject = test->ifobj_rx;
1652 int err;
1653
1654 if (test->current_step == 1) {
1655 err = thread_common_ops(test, ifobject);
1656 } else {
1657 xsk_clear_xskmap(ifobject->xskmap);
1658 err = xsk_update_xskmap(ifobject->xskmap, ifobject->xsk->xsk, 0);
1659 if (err)
1660 ksft_print_msg("Error: Failed to update xskmap, error %s\n",
1661 strerror(-err));
1662 }
1663
1664 pthread_barrier_wait(&barr);
1665
1666 /* We leave only now in case of error to avoid getting stuck in the barrier */
1667 if (err) {
1668 test->fail = true;
1669 pthread_exit(NULL);
1670 }
1671
1672 err = receive_pkts(test);
1673
1674 if (!err && ifobject->validation_func)
1675 err = ifobject->validation_func(ifobject);
1676
1677 if (err) {
1678 if (!test->adjust_tail) {
1679 test->fail = true;
1680 } else {
1681 bool supported;
1682
1683 if (is_adjust_tail_supported(ifobject->xdp_progs, &supported))
1684 test->fail = true;
1685 else if (!supported)
1686 test->adjust_tail_support = false;
1687 else
1688 test->fail = true;
1689 }
1690 }
1691
1692 pthread_exit(NULL);
1693 }
1694
testapp_clean_xsk_umem(struct ifobject * ifobj)1695 static void testapp_clean_xsk_umem(struct ifobject *ifobj)
1696 {
1697 u64 umem_sz = ifobj->umem->num_frames * ifobj->umem->frame_size;
1698
1699 if (ifobj->shared_umem)
1700 umem_sz *= 2;
1701
1702 umem_sz = ceil_u64(umem_sz, HUGEPAGE_SIZE) * HUGEPAGE_SIZE;
1703 xsk_umem__delete(ifobj->umem->umem);
1704 munmap(ifobj->umem->buffer, umem_sz);
1705 }
1706
handler(int signum)1707 static void handler(int signum)
1708 {
1709 pthread_exit(NULL);
1710 }
1711
xdp_prog_changed_rx(struct test_spec * test)1712 static bool xdp_prog_changed_rx(struct test_spec *test)
1713 {
1714 struct ifobject *ifobj = test->ifobj_rx;
1715
1716 return ifobj->xdp_prog != test->xdp_prog_rx || ifobj->mode != test->mode;
1717 }
1718
xdp_prog_changed_tx(struct test_spec * test)1719 static bool xdp_prog_changed_tx(struct test_spec *test)
1720 {
1721 struct ifobject *ifobj = test->ifobj_tx;
1722
1723 return ifobj->xdp_prog != test->xdp_prog_tx || ifobj->mode != test->mode;
1724 }
1725
xsk_reattach_xdp(struct ifobject * ifobj,struct bpf_program * xdp_prog,struct bpf_map * xskmap,enum test_mode mode)1726 static int xsk_reattach_xdp(struct ifobject *ifobj, struct bpf_program *xdp_prog,
1727 struct bpf_map *xskmap, enum test_mode mode)
1728 {
1729 int err;
1730
1731 xsk_detach_xdp_program(ifobj->ifindex, mode_to_xdp_flags(ifobj->mode));
1732 err = xsk_attach_xdp_program(xdp_prog, ifobj->ifindex, mode_to_xdp_flags(mode));
1733 if (err) {
1734 ksft_print_msg("Error attaching XDP program\n");
1735 return err;
1736 }
1737
1738 if (ifobj->mode != mode && (mode == TEST_MODE_DRV || mode == TEST_MODE_ZC))
1739 if (!xsk_is_in_mode(ifobj->ifindex, XDP_FLAGS_DRV_MODE)) {
1740 ksft_print_msg("ERROR: XDP prog not in DRV mode\n");
1741 return -EINVAL;
1742 }
1743
1744 ifobj->xdp_prog = xdp_prog;
1745 ifobj->xskmap = xskmap;
1746 ifobj->mode = mode;
1747
1748 return 0;
1749 }
1750
xsk_attach_xdp_progs(struct test_spec * test,struct ifobject * ifobj_rx,struct ifobject * ifobj_tx)1751 static int xsk_attach_xdp_progs(struct test_spec *test, struct ifobject *ifobj_rx,
1752 struct ifobject *ifobj_tx)
1753 {
1754 int err = 0;
1755
1756 if (xdp_prog_changed_rx(test)) {
1757 err = xsk_reattach_xdp(ifobj_rx, test->xdp_prog_rx, test->xskmap_rx, test->mode);
1758 if (err)
1759 return err;
1760 }
1761
1762 if (!ifobj_tx || ifobj_tx->shared_umem)
1763 return 0;
1764
1765 if (xdp_prog_changed_tx(test))
1766 err = xsk_reattach_xdp(ifobj_tx, test->xdp_prog_tx, test->xskmap_tx, test->mode);
1767
1768 return err;
1769 }
1770
clean_sockets(struct test_spec * test,struct ifobject * ifobj)1771 static void clean_sockets(struct test_spec *test, struct ifobject *ifobj)
1772 {
1773 u32 i;
1774
1775 if (!ifobj || !test)
1776 return;
1777
1778 for (i = 0; i < test->nb_sockets; i++)
1779 xsk_socket__delete(ifobj->xsk_arr[i].xsk);
1780 }
1781
clean_umem(struct test_spec * test,struct ifobject * ifobj1,struct ifobject * ifobj2)1782 static void clean_umem(struct test_spec *test, struct ifobject *ifobj1, struct ifobject *ifobj2)
1783 {
1784 if (!ifobj1)
1785 return;
1786
1787 testapp_clean_xsk_umem(ifobj1);
1788 if (ifobj2 && !ifobj2->shared_umem)
1789 testapp_clean_xsk_umem(ifobj2);
1790 }
1791
__testapp_validate_traffic(struct test_spec * test,struct ifobject * ifobj1,struct ifobject * ifobj2)1792 static int __testapp_validate_traffic(struct test_spec *test, struct ifobject *ifobj1,
1793 struct ifobject *ifobj2)
1794 {
1795 pthread_t t0, t1;
1796 int err;
1797
1798 if (test->mtu > MAX_ETH_PKT_SIZE) {
1799 if (test->mode == TEST_MODE_ZC && (!ifobj1->multi_buff_zc_supp ||
1800 (ifobj2 && !ifobj2->multi_buff_zc_supp))) {
1801 ksft_print_msg("Multi buffer for zero-copy not supported.\n");
1802 return TEST_SKIP;
1803 }
1804 if (test->mode != TEST_MODE_ZC && (!ifobj1->multi_buff_supp ||
1805 (ifobj2 && !ifobj2->multi_buff_supp))) {
1806 ksft_print_msg("Multi buffer not supported.\n");
1807 return TEST_SKIP;
1808 }
1809 }
1810 err = test_spec_set_mtu(test, test->mtu);
1811 if (err) {
1812 ksft_print_msg("Error, could not set mtu.\n");
1813 return TEST_FAILURE;
1814 }
1815
1816 if (ifobj2) {
1817 if (pthread_barrier_init(&barr, NULL, 2))
1818 return TEST_FAILURE;
1819 pkt_stream_reset(ifobj2->xsk->pkt_stream);
1820 }
1821
1822 test->current_step++;
1823 pkt_stream_reset(ifobj1->xsk->pkt_stream);
1824 pkts_in_flight = 0;
1825
1826 signal(SIGUSR1, handler);
1827 /*Spawn RX thread */
1828 pthread_create(&t0, NULL, ifobj1->func_ptr, test);
1829
1830 if (ifobj2) {
1831 pthread_barrier_wait(&barr);
1832 if (pthread_barrier_destroy(&barr)) {
1833 pthread_kill(t0, SIGUSR1);
1834 clean_sockets(test, ifobj1);
1835 clean_umem(test, ifobj1, NULL);
1836 return TEST_FAILURE;
1837 }
1838
1839 /*Spawn TX thread */
1840 pthread_create(&t1, NULL, ifobj2->func_ptr, test);
1841
1842 pthread_join(t1, NULL);
1843 }
1844
1845 if (!ifobj2)
1846 pthread_kill(t0, SIGUSR1);
1847 else
1848 pthread_join(t0, NULL);
1849
1850 if (test->total_steps == test->current_step || test->fail) {
1851 clean_sockets(test, ifobj1);
1852 clean_sockets(test, ifobj2);
1853 clean_umem(test, ifobj1, ifobj2);
1854 }
1855
1856 if (test->fail)
1857 return TEST_FAILURE;
1858
1859 return TEST_PASS;
1860 }
1861
testapp_validate_traffic(struct test_spec * test)1862 static int testapp_validate_traffic(struct test_spec *test)
1863 {
1864 struct ifobject *ifobj_rx = test->ifobj_rx;
1865 struct ifobject *ifobj_tx = test->ifobj_tx;
1866
1867 if ((ifobj_rx->umem->unaligned_mode && !ifobj_rx->unaligned_supp) ||
1868 (ifobj_tx->umem->unaligned_mode && !ifobj_tx->unaligned_supp)) {
1869 ksft_print_msg("No huge pages present.\n");
1870 return TEST_SKIP;
1871 }
1872
1873 if (test->set_ring) {
1874 if (ifobj_tx->hw_ring_size_supp) {
1875 if (set_ring_size(ifobj_tx)) {
1876 ksft_print_msg("Failed to change HW ring size.\n");
1877 return TEST_FAILURE;
1878 }
1879 } else {
1880 ksft_print_msg("Changing HW ring size not supported.\n");
1881 return TEST_SKIP;
1882 }
1883 }
1884
1885 if (xsk_attach_xdp_progs(test, ifobj_rx, ifobj_tx))
1886 return TEST_FAILURE;
1887 return __testapp_validate_traffic(test, ifobj_rx, ifobj_tx);
1888 }
1889
testapp_validate_traffic_single_thread(struct test_spec * test,struct ifobject * ifobj)1890 static int testapp_validate_traffic_single_thread(struct test_spec *test, struct ifobject *ifobj)
1891 {
1892 return __testapp_validate_traffic(test, ifobj, NULL);
1893 }
1894
testapp_teardown(struct test_spec * test)1895 int testapp_teardown(struct test_spec *test)
1896 {
1897 int i;
1898
1899 for (i = 0; i < MAX_TEARDOWN_ITER; i++) {
1900 if (testapp_validate_traffic(test))
1901 return TEST_FAILURE;
1902 test_spec_reset(test);
1903 }
1904
1905 return TEST_PASS;
1906 }
1907
swap_directions(struct ifobject ** ifobj1,struct ifobject ** ifobj2)1908 static void swap_directions(struct ifobject **ifobj1, struct ifobject **ifobj2)
1909 {
1910 thread_func_t tmp_func_ptr = (*ifobj1)->func_ptr;
1911 struct ifobject *tmp_ifobj = (*ifobj1);
1912
1913 (*ifobj1)->func_ptr = (*ifobj2)->func_ptr;
1914 (*ifobj2)->func_ptr = tmp_func_ptr;
1915
1916 *ifobj1 = *ifobj2;
1917 *ifobj2 = tmp_ifobj;
1918 }
1919
testapp_bidirectional(struct test_spec * test)1920 int testapp_bidirectional(struct test_spec *test)
1921 {
1922 int res;
1923
1924 test->ifobj_tx->rx_on = true;
1925 test->ifobj_rx->tx_on = true;
1926 test->total_steps = 2;
1927 if (testapp_validate_traffic(test))
1928 return TEST_FAILURE;
1929
1930 print_verbose("Switching Tx/Rx direction\n");
1931 swap_directions(&test->ifobj_rx, &test->ifobj_tx);
1932 res = __testapp_validate_traffic(test, test->ifobj_rx, test->ifobj_tx);
1933
1934 swap_directions(&test->ifobj_rx, &test->ifobj_tx);
1935 return res;
1936 }
1937
swap_xsk_resources(struct test_spec * test)1938 static int swap_xsk_resources(struct test_spec *test)
1939 {
1940 int ret;
1941
1942 test->ifobj_tx->xsk_arr[0].pkt_stream = NULL;
1943 test->ifobj_rx->xsk_arr[0].pkt_stream = NULL;
1944 test->ifobj_tx->xsk_arr[1].pkt_stream = test->tx_pkt_stream_default;
1945 test->ifobj_rx->xsk_arr[1].pkt_stream = test->rx_pkt_stream_default;
1946 test->ifobj_tx->xsk = &test->ifobj_tx->xsk_arr[1];
1947 test->ifobj_rx->xsk = &test->ifobj_rx->xsk_arr[1];
1948
1949 ret = xsk_update_xskmap(test->ifobj_rx->xskmap, test->ifobj_rx->xsk->xsk, 0);
1950 if (ret)
1951 return TEST_FAILURE;
1952
1953 return TEST_PASS;
1954 }
1955
testapp_xdp_prog_cleanup(struct test_spec * test)1956 int testapp_xdp_prog_cleanup(struct test_spec *test)
1957 {
1958 test->total_steps = 2;
1959 test->nb_sockets = 2;
1960 if (testapp_validate_traffic(test))
1961 return TEST_FAILURE;
1962
1963 if (swap_xsk_resources(test)) {
1964 clean_sockets(test, test->ifobj_rx);
1965 clean_sockets(test, test->ifobj_tx);
1966 clean_umem(test, test->ifobj_rx, test->ifobj_tx);
1967 return TEST_FAILURE;
1968 }
1969
1970 return testapp_validate_traffic(test);
1971 }
1972
testapp_headroom(struct test_spec * test)1973 int testapp_headroom(struct test_spec *test)
1974 {
1975 test->ifobj_rx->umem->frame_headroom = UMEM_HEADROOM_TEST_SIZE;
1976 return testapp_validate_traffic(test);
1977 }
1978
testapp_stats_rx_dropped(struct test_spec * test)1979 int testapp_stats_rx_dropped(struct test_spec *test)
1980 {
1981 if (test->mode == TEST_MODE_ZC) {
1982 ksft_print_msg("Can not run RX_DROPPED test for ZC mode\n");
1983 return TEST_SKIP;
1984 }
1985
1986 if (pkt_stream_replace_half(test, MIN_PKT_SIZE * 4, 0))
1987 return TEST_FAILURE;
1988 test->ifobj_rx->umem->frame_headroom = test->ifobj_rx->umem->frame_size -
1989 XDP_PACKET_HEADROOM - MIN_PKT_SIZE * 3;
1990 if (pkt_stream_receive_half(test))
1991 return TEST_FAILURE;
1992 test->ifobj_rx->validation_func = validate_rx_dropped;
1993 return testapp_validate_traffic(test);
1994 }
1995
testapp_stats_tx_invalid_descs(struct test_spec * test)1996 int testapp_stats_tx_invalid_descs(struct test_spec *test)
1997 {
1998 if (pkt_stream_replace_half(test, XSK_UMEM__INVALID_FRAME_SIZE, 0))
1999 return TEST_FAILURE;
2000 test->ifobj_tx->validation_func = validate_tx_invalid_descs;
2001 return testapp_validate_traffic(test);
2002 }
2003
testapp_stats_rx_full(struct test_spec * test)2004 int testapp_stats_rx_full(struct test_spec *test)
2005 {
2006 struct pkt_stream *tmp;
2007
2008 tmp = pkt_stream_generate(DEFAULT_UMEM_BUFFERS + DEFAULT_UMEM_BUFFERS / 2, MIN_PKT_SIZE);
2009 if (!tmp)
2010 return TEST_FAILURE;
2011 test->ifobj_tx->xsk->pkt_stream = tmp;
2012
2013 tmp = pkt_stream_generate(DEFAULT_UMEM_BUFFERS, MIN_PKT_SIZE);
2014 if (!tmp)
2015 return TEST_FAILURE;
2016 test->ifobj_rx->xsk->pkt_stream = tmp;
2017
2018 test->ifobj_rx->xsk->rxqsize = DEFAULT_UMEM_BUFFERS;
2019 test->ifobj_rx->release_rx = false;
2020 test->ifobj_rx->validation_func = validate_rx_full;
2021 return testapp_validate_traffic(test);
2022 }
2023
testapp_stats_fill_empty(struct test_spec * test)2024 int testapp_stats_fill_empty(struct test_spec *test)
2025 {
2026 struct pkt_stream *tmp;
2027
2028 tmp = pkt_stream_generate(DEFAULT_UMEM_BUFFERS + DEFAULT_UMEM_BUFFERS / 2, MIN_PKT_SIZE);
2029 if (!tmp)
2030 return TEST_FAILURE;
2031 test->ifobj_tx->xsk->pkt_stream = tmp;
2032
2033 tmp = pkt_stream_generate(DEFAULT_UMEM_BUFFERS, MIN_PKT_SIZE);
2034 if (!tmp)
2035 return TEST_FAILURE;
2036 test->ifobj_rx->xsk->pkt_stream = tmp;
2037
2038 test->ifobj_rx->use_fill_ring = false;
2039 test->ifobj_rx->validation_func = validate_fill_empty;
2040 return testapp_validate_traffic(test);
2041 }
2042
testapp_send_receive_unaligned(struct test_spec * test)2043 int testapp_send_receive_unaligned(struct test_spec *test)
2044 {
2045 test->ifobj_tx->umem->unaligned_mode = true;
2046 test->ifobj_rx->umem->unaligned_mode = true;
2047 /* Let half of the packets straddle a 4K buffer boundary */
2048 if (pkt_stream_replace_half(test, MIN_PKT_SIZE, -MIN_PKT_SIZE / 2))
2049 return TEST_FAILURE;
2050
2051 return testapp_validate_traffic(test);
2052 }
2053
testapp_send_receive_unaligned_mb(struct test_spec * test)2054 int testapp_send_receive_unaligned_mb(struct test_spec *test)
2055 {
2056 test->mtu = MAX_ETH_JUMBO_SIZE;
2057 test->ifobj_tx->umem->unaligned_mode = true;
2058 test->ifobj_rx->umem->unaligned_mode = true;
2059 if (pkt_stream_replace(test, DEFAULT_PKT_CNT, MAX_ETH_JUMBO_SIZE))
2060 return TEST_FAILURE;
2061 return testapp_validate_traffic(test);
2062 }
2063
testapp_single_pkt(struct test_spec * test)2064 int testapp_single_pkt(struct test_spec *test)
2065 {
2066 struct pkt pkts[] = {{0, MIN_PKT_SIZE, 0, true}};
2067
2068 if (pkt_stream_generate_custom(test, pkts, ARRAY_SIZE(pkts)))
2069 return TEST_FAILURE;
2070 return testapp_validate_traffic(test);
2071 }
2072
testapp_send_receive_mb(struct test_spec * test)2073 int testapp_send_receive_mb(struct test_spec *test)
2074 {
2075 test->mtu = MAX_ETH_JUMBO_SIZE;
2076 if (pkt_stream_replace(test, DEFAULT_PKT_CNT, MAX_ETH_JUMBO_SIZE))
2077 return TEST_FAILURE;
2078
2079 return testapp_validate_traffic(test);
2080 }
2081
testapp_invalid_desc_mb(struct test_spec * test)2082 int testapp_invalid_desc_mb(struct test_spec *test)
2083 {
2084 struct xsk_umem_info *umem = test->ifobj_tx->umem;
2085 u64 umem_size = umem->num_frames * umem->frame_size;
2086 struct pkt pkts[] = {
2087 /* Valid packet for synch to start with */
2088 {0, MIN_PKT_SIZE, 0, true, 0},
2089 /* Zero frame len is not legal */
2090 {0, XSK_UMEM__LARGE_FRAME_SIZE, 0, false, XDP_PKT_CONTD},
2091 {0, XSK_UMEM__LARGE_FRAME_SIZE, 0, false, XDP_PKT_CONTD},
2092 {0, 0, 0, false, 0},
2093 /* Invalid address in the second frame */
2094 {0, XSK_UMEM__LARGE_FRAME_SIZE, 0, false, XDP_PKT_CONTD},
2095 {umem_size, XSK_UMEM__LARGE_FRAME_SIZE, 0, false, XDP_PKT_CONTD},
2096 /* Invalid len in the middle */
2097 {0, XSK_UMEM__LARGE_FRAME_SIZE, 0, false, XDP_PKT_CONTD},
2098 {0, XSK_UMEM__INVALID_FRAME_SIZE, 0, false, XDP_PKT_CONTD},
2099 /* Invalid options in the middle */
2100 {0, XSK_UMEM__LARGE_FRAME_SIZE, 0, false, XDP_PKT_CONTD},
2101 {0, XSK_UMEM__LARGE_FRAME_SIZE, 0, false, XSK_DESC__INVALID_OPTION},
2102 /* Transmit 2 frags, receive 3 */
2103 {0, XSK_UMEM__MAX_FRAME_SIZE, 0, true, XDP_PKT_CONTD},
2104 {0, XSK_UMEM__MAX_FRAME_SIZE, 0, true, 0},
2105 /* Middle frame crosses chunk boundary with small length */
2106 {0, XSK_UMEM__LARGE_FRAME_SIZE, 0, false, XDP_PKT_CONTD},
2107 {-MIN_PKT_SIZE / 2, MIN_PKT_SIZE, 0, false, 0},
2108 /* Valid packet for synch so that something is received */
2109 {0, MIN_PKT_SIZE, 0, true, 0}};
2110
2111 if (umem->unaligned_mode) {
2112 /* Crossing a chunk boundary allowed */
2113 pkts[12].valid = true;
2114 pkts[13].valid = true;
2115 }
2116
2117 test->mtu = MAX_ETH_JUMBO_SIZE;
2118 if (pkt_stream_generate_custom(test, pkts, ARRAY_SIZE(pkts)))
2119 return TEST_FAILURE;
2120 return testapp_validate_traffic(test);
2121 }
2122
testapp_invalid_desc(struct test_spec * test)2123 int testapp_invalid_desc(struct test_spec *test)
2124 {
2125 struct xsk_umem_info *umem = test->ifobj_tx->umem;
2126 u64 umem_size = umem->num_frames * umem->frame_size;
2127 struct pkt pkts[] = {
2128 /* Zero packet address allowed */
2129 {0, MIN_PKT_SIZE, 0, true},
2130 /* Allowed packet */
2131 {0, MIN_PKT_SIZE, 0, true},
2132 /* Straddling the start of umem */
2133 {-2, MIN_PKT_SIZE, 0, false},
2134 /* Packet too large */
2135 {0, XSK_UMEM__INVALID_FRAME_SIZE, 0, false},
2136 /* Up to end of umem allowed */
2137 {umem_size - MIN_PKT_SIZE - 2 * umem->frame_size, MIN_PKT_SIZE, 0, true},
2138 /* After umem ends */
2139 {umem_size, MIN_PKT_SIZE, 0, false},
2140 /* Straddle the end of umem */
2141 {umem_size - MIN_PKT_SIZE / 2, MIN_PKT_SIZE, 0, false},
2142 /* Straddle a 4K boundary */
2143 {0x1000 - MIN_PKT_SIZE / 2, MIN_PKT_SIZE, 0, false},
2144 /* Straddle a 2K boundary */
2145 {0x800 - MIN_PKT_SIZE / 2, MIN_PKT_SIZE, 0, true},
2146 /* Valid packet for synch so that something is received */
2147 {0, MIN_PKT_SIZE, 0, true}};
2148
2149 if (umem->unaligned_mode) {
2150 /* Crossing a page boundary allowed */
2151 pkts[7].valid = true;
2152 }
2153 if (umem->frame_size == XSK_UMEM__DEFAULT_FRAME_SIZE / 2) {
2154 /* Crossing a 2K frame size boundary not allowed */
2155 pkts[8].valid = false;
2156 }
2157
2158 if (test->ifobj_tx->shared_umem) {
2159 pkts[4].offset += umem_size;
2160 pkts[5].offset += umem_size;
2161 pkts[6].offset += umem_size;
2162 }
2163
2164 if (pkt_stream_generate_custom(test, pkts, ARRAY_SIZE(pkts)))
2165 return TEST_FAILURE;
2166 return testapp_validate_traffic(test);
2167 }
2168
testapp_xdp_drop(struct test_spec * test)2169 int testapp_xdp_drop(struct test_spec *test)
2170 {
2171 struct xsk_xdp_progs *skel_rx = test->ifobj_rx->xdp_progs;
2172 struct xsk_xdp_progs *skel_tx = test->ifobj_tx->xdp_progs;
2173
2174 test_spec_set_xdp_prog(test, skel_rx->progs.xsk_xdp_drop, skel_tx->progs.xsk_xdp_drop,
2175 skel_rx->maps.xsk, skel_tx->maps.xsk);
2176
2177 if (pkt_stream_receive_half(test))
2178 return TEST_FAILURE;
2179 return testapp_validate_traffic(test);
2180 }
2181
testapp_xdp_metadata_copy(struct test_spec * test)2182 int testapp_xdp_metadata_copy(struct test_spec *test)
2183 {
2184 struct xsk_xdp_progs *skel_rx = test->ifobj_rx->xdp_progs;
2185 struct xsk_xdp_progs *skel_tx = test->ifobj_tx->xdp_progs;
2186
2187 test_spec_set_xdp_prog(test, skel_rx->progs.xsk_xdp_populate_metadata,
2188 skel_tx->progs.xsk_xdp_populate_metadata,
2189 skel_rx->maps.xsk, skel_tx->maps.xsk);
2190 test->ifobj_rx->use_metadata = true;
2191
2192 skel_rx->bss->count = 0;
2193
2194 return testapp_validate_traffic(test);
2195 }
2196
testapp_xdp_shared_umem(struct test_spec * test)2197 int testapp_xdp_shared_umem(struct test_spec *test)
2198 {
2199 struct xsk_xdp_progs *skel_rx = test->ifobj_rx->xdp_progs;
2200 struct xsk_xdp_progs *skel_tx = test->ifobj_tx->xdp_progs;
2201 int ret;
2202
2203 test->total_steps = 1;
2204 test->nb_sockets = 2;
2205
2206 test_spec_set_xdp_prog(test, skel_rx->progs.xsk_xdp_shared_umem,
2207 skel_tx->progs.xsk_xdp_shared_umem,
2208 skel_rx->maps.xsk, skel_tx->maps.xsk);
2209
2210 if (pkt_stream_even_odd_sequence(test))
2211 return TEST_FAILURE;
2212
2213 ret = testapp_validate_traffic(test);
2214
2215 release_even_odd_sequence(test);
2216
2217 return ret;
2218 }
2219
testapp_poll_txq_tmout(struct test_spec * test)2220 int testapp_poll_txq_tmout(struct test_spec *test)
2221 {
2222 test->ifobj_tx->use_poll = true;
2223 /* create invalid frame by set umem frame_size and pkt length equal to 2048 */
2224 test->ifobj_tx->umem->frame_size = 2048;
2225 if (pkt_stream_replace(test, 2 * DEFAULT_PKT_CNT, 2048))
2226 return TEST_FAILURE;
2227 return testapp_validate_traffic_single_thread(test, test->ifobj_tx);
2228 }
2229
testapp_poll_rxq_tmout(struct test_spec * test)2230 int testapp_poll_rxq_tmout(struct test_spec *test)
2231 {
2232 test->ifobj_rx->use_poll = true;
2233 return testapp_validate_traffic_single_thread(test, test->ifobj_rx);
2234 }
2235
testapp_too_many_frags(struct test_spec * test)2236 int testapp_too_many_frags(struct test_spec *test)
2237 {
2238 struct pkt *pkts;
2239 u32 max_frags, i;
2240 int ret = TEST_FAILURE;
2241
2242 if (test->mode == TEST_MODE_ZC) {
2243 max_frags = test->ifobj_tx->xdp_zc_max_segs;
2244 } else {
2245 max_frags = get_max_skb_frags();
2246 if (!max_frags) {
2247 ksft_print_msg("Can't get MAX_SKB_FRAGS from system, using default (17)\n");
2248 max_frags = 17;
2249 }
2250 max_frags += 1;
2251 }
2252
2253 pkts = calloc(2 * max_frags + 2, sizeof(struct pkt));
2254 if (!pkts)
2255 return TEST_FAILURE;
2256
2257 test->mtu = MAX_ETH_JUMBO_SIZE;
2258
2259 /* Valid packet for synch */
2260 pkts[0].len = MIN_PKT_SIZE;
2261 pkts[0].valid = true;
2262
2263 /* One valid packet with the max amount of frags */
2264 for (i = 1; i < max_frags + 1; i++) {
2265 pkts[i].len = MIN_PKT_SIZE;
2266 pkts[i].options = XDP_PKT_CONTD;
2267 pkts[i].valid = true;
2268 }
2269 pkts[max_frags].options = 0;
2270
2271 /* An invalid packet with the max amount of frags but signals packet
2272 * continues on the last frag
2273 */
2274 for (i = max_frags + 1; i < 2 * max_frags + 1; i++) {
2275 pkts[i].len = MIN_PKT_SIZE;
2276 pkts[i].options = XDP_PKT_CONTD;
2277 pkts[i].valid = false;
2278 }
2279
2280 /* Valid packet for synch */
2281 pkts[2 * max_frags + 1].len = MIN_PKT_SIZE;
2282 pkts[2 * max_frags + 1].valid = true;
2283
2284 if (pkt_stream_generate_custom(test, pkts, 2 * max_frags + 2)) {
2285 free(pkts);
2286 return TEST_FAILURE;
2287 }
2288
2289 ret = testapp_validate_traffic(test);
2290 free(pkts);
2291 return ret;
2292 }
2293
xsk_load_xdp_programs(struct ifobject * ifobj)2294 static int xsk_load_xdp_programs(struct ifobject *ifobj)
2295 {
2296 ifobj->xdp_progs = xsk_xdp_progs__open_and_load();
2297 if (libbpf_get_error(ifobj->xdp_progs))
2298 return libbpf_get_error(ifobj->xdp_progs);
2299
2300 return 0;
2301 }
2302
2303 /* Simple test */
hugepages_present(void)2304 static bool hugepages_present(void)
2305 {
2306 size_t mmap_sz = 2 * DEFAULT_UMEM_BUFFERS * XSK_UMEM__DEFAULT_FRAME_SIZE;
2307 void *bufs;
2308
2309 bufs = mmap(NULL, mmap_sz, PROT_READ | PROT_WRITE,
2310 MAP_PRIVATE | MAP_ANONYMOUS | MAP_HUGETLB, -1, MAP_HUGE_2MB);
2311 if (bufs == MAP_FAILED)
2312 return false;
2313
2314 mmap_sz = ceil_u64(mmap_sz, HUGEPAGE_SIZE) * HUGEPAGE_SIZE;
2315 munmap(bufs, mmap_sz);
2316 return true;
2317 }
2318
init_iface(struct ifobject * ifobj,thread_func_t func_ptr)2319 int init_iface(struct ifobject *ifobj, thread_func_t func_ptr)
2320 {
2321 LIBBPF_OPTS(bpf_xdp_query_opts, query_opts);
2322 int err;
2323
2324 ifobj->func_ptr = func_ptr;
2325
2326 err = xsk_load_xdp_programs(ifobj);
2327 if (err) {
2328 ksft_print_msg("Error loading XDP program\n");
2329 return err;
2330 }
2331
2332 if (hugepages_present())
2333 ifobj->unaligned_supp = true;
2334
2335 err = bpf_xdp_query(ifobj->ifindex, XDP_FLAGS_DRV_MODE, &query_opts);
2336 if (err) {
2337 ksft_print_msg("Error querying XDP capabilities\n");
2338 return err;
2339 }
2340 if (query_opts.feature_flags & NETDEV_XDP_ACT_RX_SG)
2341 ifobj->multi_buff_supp = true;
2342 if (query_opts.feature_flags & NETDEV_XDP_ACT_XSK_ZEROCOPY) {
2343 if (query_opts.xdp_zc_max_segs > 1) {
2344 ifobj->multi_buff_zc_supp = true;
2345 ifobj->xdp_zc_max_segs = query_opts.xdp_zc_max_segs;
2346 } else {
2347 ifobj->xdp_zc_max_segs = 0;
2348 }
2349 }
2350
2351 return 0;
2352 }
2353
testapp_send_receive(struct test_spec * test)2354 int testapp_send_receive(struct test_spec *test)
2355 {
2356 return testapp_validate_traffic(test);
2357 }
2358
testapp_send_receive_2k_frame(struct test_spec * test)2359 int testapp_send_receive_2k_frame(struct test_spec *test)
2360 {
2361 test->ifobj_tx->umem->frame_size = 2048;
2362 test->ifobj_rx->umem->frame_size = 2048;
2363 if (pkt_stream_replace(test, DEFAULT_PKT_CNT, MIN_PKT_SIZE))
2364 return TEST_FAILURE;
2365 return testapp_validate_traffic(test);
2366 }
2367
testapp_poll_rx(struct test_spec * test)2368 int testapp_poll_rx(struct test_spec *test)
2369 {
2370 test->ifobj_rx->use_poll = true;
2371 return testapp_validate_traffic(test);
2372 }
2373
testapp_poll_tx(struct test_spec * test)2374 int testapp_poll_tx(struct test_spec *test)
2375 {
2376 test->ifobj_tx->use_poll = true;
2377 return testapp_validate_traffic(test);
2378 }
2379
testapp_aligned_inv_desc(struct test_spec * test)2380 int testapp_aligned_inv_desc(struct test_spec *test)
2381 {
2382 return testapp_invalid_desc(test);
2383 }
2384
testapp_aligned_inv_desc_2k_frame(struct test_spec * test)2385 int testapp_aligned_inv_desc_2k_frame(struct test_spec *test)
2386 {
2387 test->ifobj_tx->umem->frame_size = 2048;
2388 test->ifobj_rx->umem->frame_size = 2048;
2389 return testapp_invalid_desc(test);
2390 }
2391
testapp_unaligned_inv_desc(struct test_spec * test)2392 int testapp_unaligned_inv_desc(struct test_spec *test)
2393 {
2394 test->ifobj_tx->umem->unaligned_mode = true;
2395 test->ifobj_rx->umem->unaligned_mode = true;
2396 return testapp_invalid_desc(test);
2397 }
2398
testapp_unaligned_inv_desc_4001_frame(struct test_spec * test)2399 int testapp_unaligned_inv_desc_4001_frame(struct test_spec *test)
2400 {
2401 u64 page_size, umem_size;
2402
2403 /* Odd frame size so the UMEM doesn't end near a page boundary. */
2404 test->ifobj_tx->umem->frame_size = 4001;
2405 test->ifobj_rx->umem->frame_size = 4001;
2406 test->ifobj_tx->umem->unaligned_mode = true;
2407 test->ifobj_rx->umem->unaligned_mode = true;
2408 /* This test exists to test descriptors that staddle the end of
2409 * the UMEM but not a page.
2410 */
2411 page_size = sysconf(_SC_PAGESIZE);
2412 umem_size = test->ifobj_tx->umem->num_frames * test->ifobj_tx->umem->frame_size;
2413 assert(umem_size % page_size > MIN_PKT_SIZE);
2414 assert(umem_size % page_size < page_size - MIN_PKT_SIZE);
2415
2416 return testapp_invalid_desc(test);
2417 }
2418
testapp_aligned_inv_desc_mb(struct test_spec * test)2419 int testapp_aligned_inv_desc_mb(struct test_spec *test)
2420 {
2421 return testapp_invalid_desc_mb(test);
2422 }
2423
testapp_unaligned_inv_desc_mb(struct test_spec * test)2424 int testapp_unaligned_inv_desc_mb(struct test_spec *test)
2425 {
2426 test->ifobj_tx->umem->unaligned_mode = true;
2427 test->ifobj_rx->umem->unaligned_mode = true;
2428 return testapp_invalid_desc_mb(test);
2429 }
2430
testapp_xdp_metadata(struct test_spec * test)2431 int testapp_xdp_metadata(struct test_spec *test)
2432 {
2433 return testapp_xdp_metadata_copy(test);
2434 }
2435
testapp_xdp_metadata_mb(struct test_spec * test)2436 int testapp_xdp_metadata_mb(struct test_spec *test)
2437 {
2438 test->mtu = MAX_ETH_JUMBO_SIZE;
2439 return testapp_xdp_metadata_copy(test);
2440 }
2441
testapp_hw_sw_min_ring_size(struct test_spec * test)2442 int testapp_hw_sw_min_ring_size(struct test_spec *test)
2443 {
2444 int ret;
2445
2446 test->set_ring = true;
2447 test->total_steps = 2;
2448 test->ifobj_tx->ring.tx_pending = DEFAULT_BATCH_SIZE;
2449 test->ifobj_tx->ring.rx_pending = DEFAULT_BATCH_SIZE * 2;
2450 test->ifobj_tx->xsk->batch_size = 1;
2451 test->ifobj_rx->xsk->batch_size = 1;
2452 ret = testapp_validate_traffic(test);
2453 if (ret)
2454 return ret;
2455
2456 /* Set batch size to hw_ring_size - 1 */
2457 test->ifobj_tx->xsk->batch_size = DEFAULT_BATCH_SIZE - 1;
2458 test->ifobj_rx->xsk->batch_size = DEFAULT_BATCH_SIZE - 1;
2459 return testapp_validate_traffic(test);
2460 }
2461
testapp_hw_sw_max_ring_size(struct test_spec * test)2462 int testapp_hw_sw_max_ring_size(struct test_spec *test)
2463 {
2464 u32 max_descs = XSK_RING_PROD__DEFAULT_NUM_DESCS * 4;
2465 int ret;
2466
2467 test->set_ring = true;
2468 test->total_steps = 2;
2469 test->ifobj_tx->ring.tx_pending = test->ifobj_tx->ring.tx_max_pending;
2470 test->ifobj_tx->ring.rx_pending = test->ifobj_tx->ring.rx_max_pending;
2471 test->ifobj_rx->umem->num_frames = max_descs;
2472 test->ifobj_rx->umem->fill_size = max_descs;
2473 test->ifobj_rx->umem->comp_size = max_descs;
2474 test->ifobj_tx->xsk->batch_size = XSK_RING_PROD__DEFAULT_NUM_DESCS;
2475 test->ifobj_rx->xsk->batch_size = XSK_RING_PROD__DEFAULT_NUM_DESCS;
2476
2477 ret = testapp_validate_traffic(test);
2478 if (ret)
2479 return ret;
2480
2481 /* Set batch_size to 8152 for testing, as the ice HW ignores the 3 lowest bits when
2482 * updating the Rx HW tail register.
2483 */
2484 test->ifobj_tx->xsk->batch_size = test->ifobj_tx->ring.tx_max_pending - 8;
2485 test->ifobj_rx->xsk->batch_size = test->ifobj_tx->ring.tx_max_pending - 8;
2486 if (pkt_stream_replace(test, max_descs, MIN_PKT_SIZE)) {
2487 clean_sockets(test, test->ifobj_tx);
2488 clean_sockets(test, test->ifobj_rx);
2489 clean_umem(test, test->ifobj_rx, test->ifobj_tx);
2490 return TEST_FAILURE;
2491 }
2492
2493 return testapp_validate_traffic(test);
2494 }
2495
testapp_xdp_adjust_tail(struct test_spec * test,int adjust_value)2496 static int testapp_xdp_adjust_tail(struct test_spec *test, int adjust_value)
2497 {
2498 struct xsk_xdp_progs *skel_rx = test->ifobj_rx->xdp_progs;
2499 struct xsk_xdp_progs *skel_tx = test->ifobj_tx->xdp_progs;
2500
2501 test_spec_set_xdp_prog(test, skel_rx->progs.xsk_xdp_adjust_tail,
2502 skel_tx->progs.xsk_xdp_adjust_tail,
2503 skel_rx->maps.xsk, skel_tx->maps.xsk);
2504
2505 skel_rx->bss->adjust_value = adjust_value;
2506
2507 return testapp_validate_traffic(test);
2508 }
2509
testapp_adjust_tail(struct test_spec * test,u32 value,u32 pkt_len)2510 static int testapp_adjust_tail(struct test_spec *test, u32 value, u32 pkt_len)
2511 {
2512 int ret;
2513
2514 test->adjust_tail_support = true;
2515 test->adjust_tail = true;
2516 test->total_steps = 1;
2517
2518 ret = pkt_stream_replace_ifobject(test->ifobj_tx, DEFAULT_BATCH_SIZE, pkt_len);
2519 if (ret)
2520 return TEST_FAILURE;
2521
2522 ret = pkt_stream_replace_ifobject(test->ifobj_rx, DEFAULT_BATCH_SIZE, pkt_len + value);
2523 if (ret)
2524 return TEST_FAILURE;
2525
2526 ret = testapp_xdp_adjust_tail(test, value);
2527 if (ret)
2528 return ret;
2529
2530 if (!test->adjust_tail_support) {
2531 ksft_print_msg("%s %sResize pkt with bpf_xdp_adjust_tail() not supported\n",
2532 mode_string(test), busy_poll_string(test));
2533 return TEST_SKIP;
2534 }
2535
2536 return 0;
2537 }
2538
testapp_adjust_tail_shrink(struct test_spec * test)2539 int testapp_adjust_tail_shrink(struct test_spec *test)
2540 {
2541 /* Shrink by 4 bytes for testing purpose */
2542 return testapp_adjust_tail(test, -4, MIN_PKT_SIZE * 2);
2543 }
2544
testapp_adjust_tail_shrink_mb(struct test_spec * test)2545 int testapp_adjust_tail_shrink_mb(struct test_spec *test)
2546 {
2547 test->mtu = MAX_ETH_JUMBO_SIZE;
2548 /* Shrink by the frag size */
2549 return testapp_adjust_tail(test, -XSK_UMEM__MAX_FRAME_SIZE, XSK_UMEM__LARGE_FRAME_SIZE * 2);
2550 }
2551
testapp_adjust_tail_grow(struct test_spec * test)2552 int testapp_adjust_tail_grow(struct test_spec *test)
2553 {
2554 /* Grow by 4 bytes for testing purpose */
2555 return testapp_adjust_tail(test, 4, MIN_PKT_SIZE * 2);
2556 }
2557
testapp_adjust_tail_grow_mb(struct test_spec * test)2558 int testapp_adjust_tail_grow_mb(struct test_spec *test)
2559 {
2560 test->mtu = MAX_ETH_JUMBO_SIZE;
2561 /* Grow by (frag_size - last_frag_Size) - 1 to stay inside the last fragment */
2562 return testapp_adjust_tail(test, (XSK_UMEM__MAX_FRAME_SIZE / 2) - 1,
2563 XSK_UMEM__LARGE_FRAME_SIZE * 2);
2564 }
2565
testapp_tx_queue_consumer(struct test_spec * test)2566 int testapp_tx_queue_consumer(struct test_spec *test)
2567 {
2568 int nr_packets;
2569
2570 if (test->mode == TEST_MODE_ZC) {
2571 ksft_print_msg("Can not run TX_QUEUE_CONSUMER test for ZC mode\n");
2572 return TEST_SKIP;
2573 }
2574
2575 nr_packets = MAX_TX_BUDGET_DEFAULT + 1;
2576 if (pkt_stream_replace(test, nr_packets, MIN_PKT_SIZE))
2577 return TEST_FAILURE;
2578 test->ifobj_tx->xsk->batch_size = nr_packets;
2579 test->ifobj_tx->xsk->check_consumer = true;
2580
2581 return testapp_validate_traffic(test);
2582 }
2583
ifobject_create(void)2584 struct ifobject *ifobject_create(void)
2585 {
2586 struct ifobject *ifobj;
2587
2588 ifobj = calloc(1, sizeof(struct ifobject));
2589 if (!ifobj)
2590 return NULL;
2591
2592 ifobj->xsk_arr = calloc(MAX_SOCKETS, sizeof(*ifobj->xsk_arr));
2593 if (!ifobj->xsk_arr)
2594 goto out_xsk_arr;
2595
2596 ifobj->umem = calloc(1, sizeof(*ifobj->umem));
2597 if (!ifobj->umem)
2598 goto out_umem;
2599
2600 return ifobj;
2601
2602 out_umem:
2603 free(ifobj->xsk_arr);
2604 out_xsk_arr:
2605 free(ifobj);
2606 return NULL;
2607 }
2608
ifobject_delete(struct ifobject * ifobj)2609 void ifobject_delete(struct ifobject *ifobj)
2610 {
2611 free(ifobj->umem);
2612 free(ifobj->xsk_arr);
2613 free(ifobj);
2614 }
2615