Lines Matching +full:num +full:- +full:rxq
1 /* SPDX-License-Identifier: GPL-2.0-only */
28 * pick maximum pointer-compatible alignment.
36 * struct libeth_xdp_buff - libeth extension over &xdp_buff
40 * @priv: driver-private scratchspace
43 * to quickly get frame metadata from xdpmo and driver buff-to-xdp callbacks
45 * Pointer/layout-compatible with &xdp_buff and &xdp_buff_xsk.
65 * __LIBETH_XDP_ONSTACK_BUFF - declare a &libeth_xdp_buff on the stack
67 * @...: sizeof() of the driver-private data
72 * LIBETH_XDP_ONSTACK_BUFF - declare a &libeth_xdp_buff on the stack
74 * @...: type or variable name of the driver-private data
105 * libeth_xdpsq_num - calculate optimal number of XDPSQs for this device + sys
106 * @rxq: current number of active Rx queues
116 static inline u32 libeth_xdpsq_num(u32 rxq, u32 txq, u32 max) in libeth_xdpsq_num() argument
118 return min(max(nr_cpu_ids, rxq), max - txq); in libeth_xdpsq_num()
122 * libeth_xdpsq_shared - whether XDPSQs can be shared between several CPUs
123 * @num: number of active XDPSQs
127 static inline bool libeth_xdpsq_shared(u32 num) in libeth_xdpsq_shared() argument
129 return num < nr_cpu_ids; in libeth_xdpsq_shared()
133 * libeth_xdpsq_id - get XDPSQ index corresponding to this CPU
134 * @num: number of active XDPSQs
140 static inline u32 libeth_xdpsq_id(u32 num) in libeth_xdpsq_id() argument
145 libeth_xdpsq_shared(num)) in libeth_xdpsq_id()
146 ret %= num; in libeth_xdpsq_id()
157 * libeth_xdpsq_get - initialize &libeth_xdpsq_lock
174 * libeth_xdpsq_put - deinitialize &libeth_xdpsq_lock
184 if (static_branch_unlikely(&libeth_xdpsq_share) && lock->share) in libeth_xdpsq_put()
192 * libeth_xdpsq_lock - grab &libeth_xdpsq_lock if needed
200 if (static_branch_unlikely(&libeth_xdpsq_share) && lock->share) in libeth_xdpsq_lock()
205 * libeth_xdpsq_unlock - free &libeth_xdpsq_lock if needed
213 if (static_branch_unlikely(&libeth_xdpsq_share) && lock->share) in libeth_xdpsq_unlock()
217 /* XDPSQ clean-up timers */
224 * libeth_xdpsq_deinit_timer - deinitialize &libeth_xdpsq_timer
231 cancel_delayed_work_sync(&timer->dwork); in libeth_xdpsq_deinit_timer()
235 * libeth_xdpsq_queue_timer - run &libeth_xdpsq_timer
240 * second (-> lazy cleaning won't happen).
247 &timer->dwork, HZ); in libeth_xdpsq_queue_timer()
251 * libeth_xdpsq_run_timer - wrapper to run a queue clean-up on a timer event
253 * @poll: driver-specific completion queue poll function
266 libeth_xdpsq_lock(timer->lock); in libeth_xdpsq_run_timer()
268 if (poll(timer->xdpsq, U32_MAX)) in libeth_xdpsq_run_timer()
271 libeth_xdpsq_unlock(timer->lock); in libeth_xdpsq_run_timer()
277 * enum - libeth_xdp internal Tx flags
294 * enum - &libeth_xdp_tx_frame and &libeth_xdp_tx_desc flags
317 * struct libeth_xdp_tx_frame - represents one XDP Tx element
321 * @frag: one (non-head) frag for ``XDP_TX``
323 * @dma: DMA address of the non-head frag for .ndo_xdp_xmit()
368 * struct libeth_xdp_tx_bulk - XDP Tx frame bulk for bulk sending
371 * @xdpsq: shortcut to the corresponding driver-specific XDPSQ structure
395 * LIBETH_XDP_ONSTACK_BULK - declare &libeth_xdp_tx_bulk on the stack
406 * struct libeth_xdpsq - abstraction for an XDPSQ
412 * @pending: pointer to the number of sent-not-completed descs on that queue
413 * @xdp_tx: pointer to the above, but only for non-XSk-xmit frames
416 * Abstraction for driver-independent implementation of Tx. Placed on the stack
418 * functions can access and modify driver-specific resources.
434 * struct libeth_xdp_tx_desc - abstraction for an XDP Tx descriptor
440 * Filled by the generic functions and then passed to driver-specific functions
455 * libeth_xdp_ptr_to_priv - convert pointer to a libeth_xdp u64 priv
466 * libeth_xdp_priv_to_ptr - convert libeth_xdp u64 priv to a pointer
479 * On 64-bit systems, assigning one u64 is faster than two u32s. When ::len
494 * libeth_xdp_tx_xmit_bulk - main XDP Tx function
496 * @xdpsq: pointer to the driver-specific XDPSQ struct
499 * @priv: driver-specific private data
536 this = sq.count - ntu; in libeth_xdp_tx_xmit_bulk()
547 u32 base = ntu + i - off; in libeth_xdp_tx_xmit_bulk()
558 xmit(fill(bulk[i], ntu + i - off, &sq, priv), in libeth_xdp_tx_xmit_bulk()
559 ntu + i - off, &sq, priv); in libeth_xdp_tx_xmit_bulk()
569 this = n - i; in libeth_xdp_tx_xmit_bulk()
592 * libeth_xdp_tx_queue_head - internal helper for queueing one ``XDP_TX`` head
601 const struct xdp_buff *base = &xdp->base; in libeth_xdp_tx_queue_head()
603 bq->bulk[bq->count++] = (typeof(*bq->bulk)){ in libeth_xdp_tx_queue_head()
604 .data = xdp->data, in libeth_xdp_tx_queue_head()
605 .len_fl = (base->data_end - xdp->data) | LIBETH_XDP_TX_FIRST, in libeth_xdp_tx_queue_head()
606 .soff = xdp_data_hard_end(base) - xdp->data, in libeth_xdp_tx_queue_head()
612 bq->bulk[bq->count - 1].len_fl |= LIBETH_XDP_TX_MULTI; in libeth_xdp_tx_queue_head()
618 * libeth_xdp_tx_queue_frag - internal helper for queueing one ``XDP_TX`` frag
625 bq->bulk[bq->count++].frag = *frag; in libeth_xdp_tx_queue_frag()
629 * libeth_xdp_tx_queue_bulk - internal helper for queueing one ``XDP_TX`` frame
646 if (unlikely(bq->count == LIBETH_XDP_TX_BULK) && in libeth_xdp_tx_queue_bulk()
655 sinfo = xdp_get_shared_info_from_buff(&xdp->base); in libeth_xdp_tx_queue_bulk()
656 nr_frags = sinfo->nr_frags; in libeth_xdp_tx_queue_bulk()
659 if (unlikely(bq->count == LIBETH_XDP_TX_BULK) && in libeth_xdp_tx_queue_bulk()
665 libeth_xdp_tx_queue_frag(bq, &sinfo->frags[i]); in libeth_xdp_tx_queue_bulk()
669 bq->bulk[bq->count - 1].len_fl |= LIBETH_XDP_TX_LAST; in libeth_xdp_tx_queue_bulk()
670 xdp->data = NULL; in libeth_xdp_tx_queue_bulk()
676 * libeth_xdp_tx_fill_stats - fill &libeth_sqe with ``XDP_TX`` frame stats
693 ue->nr_frags = 1; \
694 ue->bytes = ud->len; \
696 if (ud->flags & LIBETH_XDP_TX_MULTI) { \
698 ue->nr_frags += us->nr_frags; \
699 ue->bytes += us->xdp_frags_size; \
704 * libeth_xdp_tx_fill_buf - internal helper to fill one ``XDP_TX`` &libeth_sqe
740 dma_sync_single_for_device(__netmem_get_pp(netmem)->p.dev, desc.addr, in libeth_xdp_tx_fill_buf()
746 sqe = &sq->sqes[i]; in libeth_xdp_tx_fill_buf()
747 sqe->type = LIBETH_SQE_XDP_TX; in libeth_xdp_tx_fill_buf()
748 sqe->sinfo = sinfo; in libeth_xdp_tx_fill_buf()
758 * __libeth_xdp_tx_flush_bulk - internal helper to flush one XDP Tx bulk
761 * @prep: driver-specific callback to prepare the queue for sending
783 sent = libeth_xdp_tx_xmit_bulk(bq->bulk, bq->xdpsq, in __libeth_xdp_tx_flush_bulk()
784 min(bq->count, LIBETH_XDP_TX_BULK), in __libeth_xdp_tx_flush_bulk()
786 drops = bq->count - sent; in __libeth_xdp_tx_flush_bulk()
790 err = -ENXIO; in __libeth_xdp_tx_flush_bulk()
792 bq->count = 0; in __libeth_xdp_tx_flush_bulk()
795 trace_xdp_bulk_tx(bq->dev, sent, drops, err); in __libeth_xdp_tx_flush_bulk()
801 * libeth_xdp_tx_flush_bulk - wrapper to define flush of one ``XDP_TX`` bulk
817 * libeth_xdp_xmit_init_bulk - internal helper to initialize bulk for XDP xmit
820 * @xdpsqs: array of driver-specific XDPSQ structs
821 * @num: number of active XDPSQs (the above array length)
823 #define libeth_xdp_xmit_init_bulk(bq, dev, xdpsqs, num) \ argument
824 __libeth_xdp_xmit_init_bulk(bq, dev, (xdpsqs)[libeth_xdpsq_id(num)])
830 bq->dev = dev; in __libeth_xdp_xmit_init_bulk()
831 bq->xdpsq = xdpsq; in __libeth_xdp_xmit_init_bulk()
832 bq->count = 0; in __libeth_xdp_xmit_init_bulk()
836 * libeth_xdp_xmit_frame_dma - internal helper to access DMA of an &xdp_frame
865 * libeth_xdp_xmit_queue_head - internal helper for queueing one XDP xmit head
880 dma = dma_map_single(dev, xdpf->data, xdpf->len, DMA_TO_DEVICE); in libeth_xdp_xmit_queue_head()
886 bq->bulk[bq->count++] = (typeof(*bq->bulk)){ in libeth_xdp_xmit_queue_head()
888 __libeth_xdp_tx_len(xdpf->len, LIBETH_XDP_TX_FIRST), in libeth_xdp_xmit_queue_head()
894 bq->bulk[bq->count - 1].flags |= LIBETH_XDP_TX_MULTI; in libeth_xdp_xmit_queue_head()
900 * libeth_xdp_xmit_queue_frag - internal helper for queueing one XDP xmit frag
917 bq->bulk[bq->count++] = (typeof(*bq->bulk)){ in libeth_xdp_xmit_queue_frag()
926 * libeth_xdp_xmit_queue_bulk - internal helper for queueing one XDP xmit frame
942 struct device *dev = bq->dev->dev.parent; in libeth_xdp_xmit_queue_bulk()
945 if (unlikely(bq->count == LIBETH_XDP_TX_BULK) && in libeth_xdp_xmit_queue_bulk()
956 nr_frags = sinfo->nr_frags; in libeth_xdp_xmit_queue_bulk()
959 if (unlikely(bq->count == LIBETH_XDP_TX_BULK) && in libeth_xdp_xmit_queue_bulk()
963 if (!libeth_xdp_xmit_queue_frag(bq, &sinfo->frags[i], dev)) in libeth_xdp_xmit_queue_bulk()
971 bq->bulk[bq->count - 1].flags |= LIBETH_XDP_TX_LAST; in libeth_xdp_xmit_queue_bulk()
977 * libeth_xdp_xmit_fill_buf - internal helper to fill one XDP xmit &libeth_sqe
1003 sqe = &sq->sqes[i]; in libeth_xdp_xmit_fill_buf()
1008 sqe->type = LIBETH_SQE_XDP_XMIT_FRAG; in libeth_xdp_xmit_fill_buf()
1012 sqe->type = LIBETH_SQE_XDP_XMIT; in libeth_xdp_xmit_fill_buf()
1013 sqe->xdpf = xdpf; in libeth_xdp_xmit_fill_buf()
1021 * libeth_xdp_xmit_flush_bulk - wrapper to define flush of one XDP xmit bulk
1038 * __libeth_xdp_xmit_do_bulk - internal function to implement .ndo_xdp_xmit()
1049 * Return: number of frames send or -errno on error.
1061 return -EINVAL; in __libeth_xdp_xmit_do_bulk()
1075 if (bq->count) { in __libeth_xdp_xmit_do_bulk()
1077 if (unlikely(bq->count)) in __libeth_xdp_xmit_do_bulk()
1078 nxmit -= libeth_xdp_xmit_return_bulk(bq->bulk, in __libeth_xdp_xmit_do_bulk()
1079 bq->count, in __libeth_xdp_xmit_do_bulk()
1080 bq->dev); in __libeth_xdp_xmit_do_bulk()
1083 finalize(bq->xdpsq, nxmit, flags & XDP_XMIT_FLUSH); in __libeth_xdp_xmit_do_bulk()
1089 * libeth_xdp_xmit_do_bulk - implement full .ndo_xdp_xmit() in driver
1102 * Return: number of frames sent or -errno on error.
1120 ur = -ENXIO; \
1129 * libeth_xdp_tx_init_bulk - initialize an XDP Tx bulk for Rx NAPI poll
1134 * @num: number of active XDPSQs, the above array length
1137 * Initializes all the needed fields to run libeth_xdp functions. If @num == 0,
1141 #define libeth_xdp_tx_init_bulk(bq, prog, dev, xdpsqs, num) \ argument
1142 __libeth_xdp_tx_init_bulk(bq, prog, dev, xdpsqs, num, false, \
1145 #define __libeth_xdp_tx_init_bulk(bq, pr, d, xdpsqs, num, xsk, ub, un) do { \ argument
1147 u32 un = (num); \
1152 ub->prog = rcu_dereference(pr); \
1153 ub->dev = (d); \
1154 ub->xdpsq = (xdpsqs)[libeth_xdpsq_id(un)]; \
1156 ub->prog = NULL; \
1159 ub->act_mask = 0; \
1160 ub->count = 0; \
1170 * libeth_xdp_init_buff - initialize a &libeth_xdp_buff for Rx NAPI poll
1173 * @rxq: registered &xdp_rxq_info corresponding to this queue
1182 struct xdp_rxq_info *rxq) in libeth_xdp_init_buff() argument
1184 if (likely(!src->data)) in libeth_xdp_init_buff()
1185 dst->data = NULL; in libeth_xdp_init_buff()
1189 dst->base.rxq = rxq; in libeth_xdp_init_buff()
1193 * libeth_xdp_save_buff - save a partially built buffer on a queue
1204 if (likely(!src->data)) in libeth_xdp_save_buff()
1205 dst->data = NULL; in libeth_xdp_save_buff()
1211 * libeth_xdp_return_stash - free an XDP buffer stash from a queue
1219 if (stash->data) in libeth_xdp_return_stash()
1233 for (u32 i = 0; i < sinfo->nr_frags; i++) { in libeth_xdp_return_frags()
1234 netmem_ref netmem = skb_frag_netmem(&sinfo->frags[i]); in libeth_xdp_return_frags()
1241 * libeth_xdp_return_buff - free/recycle &libeth_xdp_buff
1245 * it's faster as it gets inlined and always assumes order-0 pages and safe
1246 * direct recycling. Zeroes @xdp->data to avoid UAFs.
1253 if (!xdp_buff_has_frags(&xdp->base)) in __libeth_xdp_return_buff()
1256 libeth_xdp_return_frags(xdp_get_shared_info_from_buff(&xdp->base), in __libeth_xdp_return_buff()
1260 libeth_xdp_return_va(xdp->data, napi); in __libeth_xdp_return_buff()
1261 xdp->data = NULL; in __libeth_xdp_return_buff()
1269 * libeth_xdp_prepare_buff - fill &libeth_xdp_buff with head FQE data
1282 const struct page *page = __netmem_to_page(fqe->netmem); in libeth_xdp_prepare_buff()
1284 xdp_prepare_buff(&xdp->base, page_address(page) + fqe->offset, in libeth_xdp_prepare_buff()
1285 pp_page_to_nmdesc(page)->pp->p.offset, len, true); in libeth_xdp_prepare_buff()
1286 xdp_init_buff(&xdp->base, fqe->truesize, xdp->base.rxq); in libeth_xdp_prepare_buff()
1290 * libeth_xdp_process_buff - attach Rx buffer to &libeth_xdp_buff
1297 * Already performs DMA sync-for-CPU and frame start prefetch
1310 if (xdp->data) in libeth_xdp_process_buff()
1315 prefetch(xdp->data); in libeth_xdp_process_buff()
1321 * libeth_xdp_buff_stats_frags - update onstack RQ stats with XDP frags info
1334 sinfo = xdp_get_shared_info_from_buff(&xdp->base); in libeth_xdp_buff_stats_frags()
1335 ss->bytes += sinfo->xdp_frags_size; in libeth_xdp_buff_stats_frags()
1336 ss->fragments += sinfo->nr_frags + 1; in libeth_xdp_buff_stats_frags()
1344 * __libeth_xdp_run_prog - run XDP program on an XDP buffer
1360 act = bpf_prog_run_xdp(bq->prog, &xdp->base); in __libeth_xdp_run_prog()
1374 if (unlikely(xdp_do_redirect(bq->dev, &xdp->base, bq->prog))) in __libeth_xdp_run_prog()
1377 xdp->data = NULL; in __libeth_xdp_run_prog()
1389 * __libeth_xdp_run_flush - run XDP program and handle ``XDP_TX`` verdict
1421 bq->act_mask |= act; in __libeth_xdp_run_flush()
1427 * libeth_xdp_run_prog - run XDP program (non-XSk path) and handle all verdicts
1445 * __libeth_xdp_run_pass - helper to run XDP program and handle the result
1455 * Inline abstraction that does the following (non-XSk path):
1482 rs->bytes += xdp->base.data_end - xdp->data; in __libeth_xdp_run_pass()
1483 rs->packets++; in __libeth_xdp_run_pass()
1485 if (xdp_buff_has_frags(&xdp->base)) in __libeth_xdp_run_pass()
1491 if (!bq || !run || !bq->prog) in __libeth_xdp_run_pass()
1498 skb = xdp_build_skb_from_buff(&xdp->base); in __libeth_xdp_run_pass()
1504 xdp->data = NULL; in __libeth_xdp_run_pass()
1517 xdp->desc = desc; in libeth_xdp_prep_desc()
1521 * libeth_xdp_run_pass - helper to run XDP program and handle the result
1531 * means just writing the pointer to the HW descriptor as @xdp->desc.
1538 * libeth_xdp_finalize_rx - finalize XDPSQ after a NAPI polling loop (non-XSk)
1555 if (bq->act_mask & LIBETH_XDP_TX) { in __libeth_xdp_finalize_rx()
1556 if (bq->count) in __libeth_xdp_finalize_rx()
1558 finalize(bq->xdpsq, true, true); in __libeth_xdp_finalize_rx()
1560 if (bq->act_mask & LIBETH_XDP_REDIRECT) in __libeth_xdp_finalize_rx()
1593 __diag_ignore(GCC, 8, "-Wold-style-declaration", \
1597 * LIBETH_XDP_DEFINE_TIMER - define a driver XDPSQ cleanup timer callback
1608 * LIBETH_XDP_DEFINE_FLUSH_TX - define a driver ``XDP_TX`` bulk flush function
1623 * LIBETH_XDP_DEFINE_FLUSH_XMIT - define a driver XDP xmit bulk flush function
1635 * LIBETH_XDP_DEFINE_RUN_PROG - define a driver XDP program run function
1649 * LIBETH_XDP_DEFINE_RUN_PASS - define a driver buffer process + pass function
1667 * LIBETH_XDP_DEFINE_RUN - define a driver buffer process, run + pass function
1681 * LIBETH_XDP_DEFINE_FINALIZE - define a driver Rx NAPI poll finalize function
1700 * libeth_xdp_buff_to_rq - get RQ pointer from an XDP buffer pointer
1711 container_of_const((xdp)->base.rxq, type, member)
1714 * libeth_xdpmo_rx_hash - convert &libeth_rx_pt to an XDP RSS hash metadata
1720 * Handle zeroed/non-available hash and convert libeth parsed packet type to
1727 * Return: 0 on success, -ENODATA when the hash is not available.
1734 return -ENODATA; in libeth_xdpmo_rx_hash()
1749 * __libeth_xdp_complete_tx - complete sent XDPSQE
1752 * @bulk: internal callback to bulk-free ``XDP_TX`` buffers
1755 * Use the non-underscored version in drivers instead. This one is shared
1765 enum libeth_sqe_type type = sqe->type; in __libeth_xdp_complete_tx()
1772 dma_unmap_page(cp->dev, dma_unmap_addr(sqe, dma), in __libeth_xdp_complete_tx()
1781 bulk(sqe->sinfo, cp->bq, sqe->nr_frags != 1); in __libeth_xdp_complete_tx()
1784 xdp_return_frame_bulk(sqe->xdpf, cp->bq); in __libeth_xdp_complete_tx()
1788 xsk(sqe->xsk); in __libeth_xdp_complete_tx()
1798 cp->xdp_tx -= sqe->nr_frags; in __libeth_xdp_complete_tx()
1800 cp->xss->packets++; in __libeth_xdp_complete_tx()
1801 cp->xss->bytes += sqe->bytes; in __libeth_xdp_complete_tx()
1807 sqe->type = LIBETH_SQE_EMPTY; in __libeth_xdp_complete_tx()
1828 * libeth_xdp_set_features - set XDP features for netdev
1850 * libeth_xdp_set_features_noredir - enable all libeth_xdp features w/o redir