Lines Matching full:xsk

46  *       then remove xsk sockets from queue 0 on both veth interfaces and
103 #include "xsk.h"
168 static void gen_eth_hdr(struct xsk_socket_info *xsk, struct ethhdr *eth_hdr) in gen_eth_hdr() argument
170 memcpy(eth_hdr->h_dest, xsk->dst_mac, ETH_ALEN); in gen_eth_hdr()
171 memcpy(eth_hdr->h_source, xsk->src_mac, ETH_ALEN); in gen_eth_hdr()
242 static void enable_busy_poll(struct xsk_socket_info *xsk) in enable_busy_poll() argument
247 if (setsockopt(xsk_socket__fd(xsk->xsk), SOL_SOCKET, SO_PREFER_BUSY_POLL, in enable_busy_poll()
252 if (setsockopt(xsk_socket__fd(xsk->xsk), SOL_SOCKET, SO_BUSY_POLL, in enable_busy_poll()
256 sock_opt = xsk->batch_size; in enable_busy_poll()
257 if (setsockopt(xsk_socket__fd(xsk->xsk), SOL_SOCKET, SO_BUSY_POLL_BUDGET, in enable_busy_poll()
262 static int __xsk_configure_socket(struct xsk_socket_info *xsk, struct xsk_umem_info *umem, in __xsk_configure_socket() argument
269 xsk->umem = umem; in __xsk_configure_socket()
270 cfg.rx_size = xsk->rxqsize; in __xsk_configure_socket()
282 txr = ifobject->tx_on ? &xsk->tx : NULL; in __xsk_configure_socket()
283 rxr = ifobject->rx_on ? &xsk->rx : NULL; in __xsk_configure_socket()
284 return xsk_socket__create(&xsk->xsk, ifobject->ifindex, 0, umem->umem, rxr, txr, &cfg); in __xsk_configure_socket()
291 struct xsk_socket_info *xsk; in ifobj_zc_avail() local
311 xsk = calloc(1, sizeof(struct xsk_socket_info)); in ifobj_zc_avail()
312 if (!xsk) in ifobj_zc_avail()
316 xsk->rxqsize = XSK_RING_CONS__DEFAULT_NUM_DESCS; in ifobj_zc_avail()
317 ret = __xsk_configure_socket(xsk, umem, ifobject, false); in ifobj_zc_avail()
321 xsk_socket__delete(xsk->xsk); in ifobj_zc_avail()
322 free(xsk); in ifobj_zc_avail()
484 ifobj->xsk = &ifobj->xsk_arr[0]; in __test_spec_init()
533 test->xskmap_rx = ifobj_rx->xdp_progs->maps.xsk; in __test_spec_init()
535 test->xskmap_tx = ifobj_tx->xdp_progs->maps.xsk; in __test_spec_init()
638 struct pkt_stream *tx_pkt_stream = test->ifobj_tx->xsk->pkt_stream; in pkt_stream_restore_default()
639 struct pkt_stream *rx_pkt_stream = test->ifobj_rx->xsk->pkt_stream; in pkt_stream_restore_default()
642 pkt_stream_delete(test->ifobj_tx->xsk->pkt_stream); in pkt_stream_restore_default()
643 test->ifobj_tx->xsk->pkt_stream = test->tx_pkt_stream_default; in pkt_stream_restore_default()
647 pkt_stream_delete(test->ifobj_rx->xsk->pkt_stream); in pkt_stream_restore_default()
648 test->ifobj_rx->xsk->pkt_stream = test->rx_pkt_stream_default; in pkt_stream_restore_default()
766 ifobj->xsk->pkt_stream = pkt_stream_generate(nb_pkts, pkt_len); in pkt_stream_replace_ifobject()
781 pkt_stream = pkt_stream_clone(ifobj->xsk->pkt_stream); in __pkt_stream_replace_half()
782 for (i = 1; i < ifobj->xsk->pkt_stream->nb_pkts; i += 2) in __pkt_stream_replace_half()
785 ifobj->xsk->pkt_stream = pkt_stream; in __pkt_stream_replace_half()
796 struct pkt_stream *pkt_stream = test->ifobj_tx->xsk->pkt_stream; in pkt_stream_receive_half()
799 test->ifobj_rx->xsk->pkt_stream = pkt_stream_generate(pkt_stream->nb_pkts, in pkt_stream_receive_half()
801 pkt_stream = test->ifobj_rx->xsk->pkt_stream; in pkt_stream_receive_half()
838 static void pkt_generate(struct xsk_socket_info *xsk, struct xsk_umem_info *umem, u64 addr, u32 len, in pkt_generate() argument
847 gen_eth_hdr(xsk, data); in pkt_generate()
914 test->ifobj_tx->xsk->pkt_stream = pkt_stream; in pkt_stream_generate_custom()
917 test->ifobj_rx->xsk->pkt_stream = pkt_stream; in pkt_stream_generate_custom()
1101 static bool kick_tx_with_check(struct xsk_socket_info *xsk, int *ret) in kick_tx_with_check() argument
1107 cons = load_value(xsk->tx.consumer); in kick_tx_with_check()
1108 ready_to_send = load_value(xsk->tx.producer) - cons; in kick_tx_with_check()
1109 *ret = sendto(xsk_socket__fd(xsk->xsk), NULL, 0, MSG_DONTWAIT, NULL, 0); in kick_tx_with_check()
1111 delta = load_value(xsk->tx.consumer) - cons; in kick_tx_with_check()
1112 /* By default, xsk should consume exact @max_budget descs at one in kick_tx_with_check()
1125 static int kick_tx(struct xsk_socket_info *xsk) in kick_tx() argument
1129 if (xsk->check_consumer) { in kick_tx()
1130 if (!kick_tx_with_check(xsk, &ret)) in kick_tx()
1133 ret = sendto(xsk_socket__fd(xsk->xsk), NULL, 0, MSG_DONTWAIT, NULL, 0); in kick_tx()
1144 static int kick_rx(struct xsk_socket_info *xsk) in kick_rx() argument
1148 ret = recvfrom(xsk_socket__fd(xsk->xsk), NULL, 0, MSG_DONTWAIT, NULL, NULL); in kick_rx()
1155 static int complete_pkts(struct xsk_socket_info *xsk, int batch_size) in complete_pkts() argument
1161 if (xsk_ring_prod__needs_wakeup(&xsk->tx)) { in complete_pkts()
1162 ret = kick_tx(xsk); in complete_pkts()
1167 rcvd = xsk_ring_cons__peek(&xsk->umem->cq, batch_size, &idx); in complete_pkts()
1169 if (rcvd > xsk->outstanding_tx) { in complete_pkts()
1170 u64 addr = *xsk_ring_cons__comp_addr(&xsk->umem->cq, idx + rcvd - 1); in complete_pkts()
1178 xsk_ring_cons__release(&xsk->umem->cq, rcvd); in complete_pkts()
1179 xsk->outstanding_tx -= rcvd; in complete_pkts()
1185 static int __receive_pkts(struct test_spec *test, struct xsk_socket_info *xsk) in __receive_pkts() argument
1189 struct pkt_stream *pkt_stream = xsk->pkt_stream; in __receive_pkts()
1191 struct xsk_umem_info *umem = xsk->umem; in __receive_pkts()
1197 fds.fd = xsk_socket__fd(xsk->xsk); in __receive_pkts()
1200 ret = kick_rx(xsk); in __receive_pkts()
1221 rcvd = xsk_ring_cons__peek(&xsk->rx, xsk->batch_size, &idx_rx); in __receive_pkts()
1238 const struct xdp_desc *desc = xsk_ring_cons__rx_desc(&xsk->rx, idx_rx++); in __receive_pkts()
1284 xsk_ring_cons__cancel(&xsk->rx, nb_frags); in __receive_pkts()
1295 xsk_ring_cons__release(&xsk->rx, frags_processed); in __receive_pkts()
1305 bool all_packets_received(struct test_spec *test, struct xsk_socket_info *xsk, u32 sock_num, in all_packets_received() argument
1308 struct pkt_stream *pkt_stream = xsk->pkt_stream; in all_packets_received()
1328 struct xsk_socket_info *xsk; in receive_pkts() local
1339 xsk = &test->ifobj_rx->xsk_arr[sock_num]; in receive_pkts()
1341 if ((all_packets_received(test, xsk, sock_num, bitmap))) in receive_pkts()
1344 res = __receive_pkts(test, xsk); in receive_pkts()
1362 static int __send_pkts(struct ifobject *ifobject, struct xsk_socket_info *xsk, bool timeout) in __send_pkts() argument
1365 struct pkt_stream *pkt_stream = xsk->pkt_stream; in __send_pkts()
1373 if (pkts_in_flight >= (int)((umem_size(umem) - xsk->batch_size * buffer_len) / in __send_pkts()
1375 ret = kick_tx(xsk); in __send_pkts()
1381 fds.fd = xsk_socket__fd(xsk->xsk); in __send_pkts()
1384 while (xsk_ring_prod__reserve(&xsk->tx, xsk->batch_size, &idx) < xsk->batch_size) { in __send_pkts()
1404 complete_pkts(xsk, xsk->batch_size); in __send_pkts()
1407 for (i = 0; i < xsk->batch_size; i++) { in __send_pkts()
1415 if (nb_frags > xsk->batch_size - i) { in __send_pkts()
1417 xsk_ring_prod__cancel(&xsk->tx, xsk->batch_size - i); in __send_pkts()
1423 struct xdp_desc *tx_desc = xsk_ring_prod__tx_desc(&xsk->tx, idx + i); in __send_pkts()
1437 pkt_generate(xsk, umem, tx_desc->addr, tx_desc->len, pkt->pkt_nb, in __send_pkts()
1461 xsk_ring_prod__submit(&xsk->tx, i); in __send_pkts()
1462 xsk->outstanding_tx += valid_frags; in __send_pkts()
1476 if (complete_pkts(xsk, i)) in __send_pkts()
1486 static int wait_for_tx_completion(struct xsk_socket_info *xsk) in wait_for_tx_completion() argument
1496 while (xsk->outstanding_tx) { in wait_for_tx_completion()
1505 complete_pkts(xsk, xsk->batch_size); in wait_for_tx_completion()
1550 static int get_xsk_stats(struct xsk_socket *xsk, struct xdp_statistics *stats) in get_xsk_stats() argument
1552 int fd = xsk_socket__fd(xsk), err; in get_xsk_stats()
1575 struct xsk_socket *xsk = ifobject->xsk->xsk; in validate_rx_dropped() local
1579 err = kick_rx(ifobject->xsk); in validate_rx_dropped()
1583 err = get_xsk_stats(xsk, &stats); in validate_rx_dropped()
1593 if (stats.rx_dropped == ifobject->xsk->pkt_stream->nb_pkts / 2 || in validate_rx_dropped()
1594 stats.rx_dropped == ifobject->xsk->pkt_stream->nb_pkts / 2 - 1) in validate_rx_dropped()
1602 struct xsk_socket *xsk = ifobject->xsk->xsk; in validate_rx_full() local
1607 err = kick_rx(ifobject->xsk); in validate_rx_full()
1611 err = get_xsk_stats(xsk, &stats); in validate_rx_full()
1623 struct xsk_socket *xsk = ifobject->xsk->xsk; in validate_fill_empty() local
1628 err = kick_rx(ifobject->xsk); in validate_fill_empty()
1632 err = get_xsk_stats(xsk, &stats); in validate_fill_empty()
1644 struct xsk_socket *xsk = ifobject->xsk->xsk; in validate_tx_invalid_descs() local
1645 int fd = xsk_socket__fd(xsk); in validate_tx_invalid_descs()
1658 if (stats.tx_invalid_descs != ifobject->xsk->pkt_stream->nb_pkts / 2) { in validate_tx_invalid_descs()
1662 ifobject->xsk->pkt_stream->nb_pkts); in validate_tx_invalid_descs()
1697 ifobject->xsk = &ifobject->xsk_arr[0]; in thread_common_ops_tx()
1772 ifobject->xsk = &ifobject->xsk_arr[0]; in thread_common_ops()
1777 xsk_populate_fill_ring(ifobject->umem, ifobject->xsk->pkt_stream, ifobject->use_fill_ring); in thread_common_ops()
1780 ifobject->xsk = &ifobject->xsk_arr[i]; in thread_common_ops()
1781 ret = xsk_update_xskmap(ifobject->xskmap, ifobject->xsk->xsk, i); in thread_common_ops()
1820 err = xsk_update_xskmap(ifobject->xskmap, ifobject->xsk->xsk, 0); in worker_testapp_validate_rx()
1944 pkt_stream_reset(ifobj2->xsk->pkt_stream); in __testapp_validate_traffic()
1948 pkt_stream_reset(ifobj1->xsk->pkt_stream); in __testapp_validate_traffic()
1976 xsk_socket__delete(ifobj2->xsk_arr[i].xsk); in __testapp_validate_traffic()
1979 xsk_socket__delete(ifobj1->xsk_arr[i].xsk); in __testapp_validate_traffic()
2072 test->ifobj_tx->xsk = &test->ifobj_tx->xsk_arr[1]; in swap_xsk_resources()
2073 test->ifobj_rx->xsk = &test->ifobj_rx->xsk_arr[1]; in swap_xsk_resources()
2075 ret = xsk_update_xskmap(test->ifobj_rx->xskmap, test->ifobj_rx->xsk->xsk, 0); in swap_xsk_resources()
2125 test->ifobj_rx->xsk->pkt_stream = pkt_stream_generate(DEFAULT_UMEM_BUFFERS, MIN_PKT_SIZE); in testapp_stats_rx_full()
2127 test->ifobj_rx->xsk->rxqsize = DEFAULT_UMEM_BUFFERS; in testapp_stats_rx_full()
2136 test->ifobj_rx->xsk->pkt_stream = pkt_stream_generate(DEFAULT_UMEM_BUFFERS, MIN_PKT_SIZE); in testapp_stats_fill_empty()
2269 skel_rx->maps.xsk, skel_tx->maps.xsk); in testapp_xdp_drop()
2285 skel_rx->maps.xsk, skel_tx->maps.xsk); in testapp_xdp_metadata_copy()
2312 skel_rx->maps.xsk, skel_tx->maps.xsk); in testapp_xdp_shared_umem()
2547 test->ifobj_tx->xsk->batch_size = 1; in testapp_hw_sw_min_ring_size()
2548 test->ifobj_rx->xsk->batch_size = 1; in testapp_hw_sw_min_ring_size()
2554 test->ifobj_tx->xsk->batch_size = DEFAULT_BATCH_SIZE - 1; in testapp_hw_sw_min_ring_size()
2555 test->ifobj_rx->xsk->batch_size = DEFAULT_BATCH_SIZE - 1; in testapp_hw_sw_min_ring_size()
2571 test->ifobj_tx->xsk->batch_size = XSK_RING_PROD__DEFAULT_NUM_DESCS; in testapp_hw_sw_max_ring_size()
2572 test->ifobj_rx->xsk->batch_size = XSK_RING_PROD__DEFAULT_NUM_DESCS; in testapp_hw_sw_max_ring_size()
2581 test->ifobj_tx->xsk->batch_size = test->ifobj_tx->ring.tx_max_pending - 8; in testapp_hw_sw_max_ring_size()
2582 test->ifobj_rx->xsk->batch_size = test->ifobj_tx->ring.tx_max_pending - 8; in testapp_hw_sw_max_ring_size()
2594 skel_rx->maps.xsk, skel_tx->maps.xsk); in testapp_xdp_adjust_tail()
2663 test->ifobj_tx->xsk->batch_size = nr_packets; in testapp_tx_queue_consumer()
2664 test->ifobj_tx->xsk->check_consumer = true; in testapp_tx_queue_consumer()