Lines Matching +full:count +full:- +full:up

1 // SPDX-License-Identifier: GPL-2.0-only
22 spin_lock_init(&lock->lock); in __libeth_xdpsq_get()
23 lock->share = true; in __libeth_xdpsq_get()
41 lock->share = false; in __libeth_xdpsq_put()
45 void __acquires(&lock->lock)
48 spin_lock(&lock->lock); in __libeth_xdpsq_lock()
52 void __releases(&lock->lock)
55 spin_unlock(&lock->lock); in __libeth_xdpsq_unlock()
59 /* XDPSQ clean-up timers */
62 * libeth_xdpsq_init_timer - initialize an XDPSQ clean-up timer
68 * XDPSQ clean-up timers must be set up before using at the queue configuration
75 timer->xdpsq = xdpsq; in libeth_xdpsq_init_timer()
76 timer->lock = lock; in libeth_xdpsq_init_timer()
78 INIT_DELAYED_WORK(&timer->dwork, poll); in libeth_xdpsq_init_timer()
87 if (frm->len_fl & LIBETH_XDP_TX_MULTI) in libeth_xdp_tx_return_one()
88 libeth_xdp_return_frags(frm->data + frm->soff, true); in libeth_xdp_tx_return_one()
90 libeth_xdp_return_va(frm->data, true); in libeth_xdp_tx_return_one()
94 libeth_xdp_tx_return_bulk(const struct libeth_xdp_tx_frame *bq, u32 count) in libeth_xdp_tx_return_bulk() argument
96 for (u32 i = 0; i < count; i++) { in libeth_xdp_tx_return_bulk()
99 if (!(frm->len_fl & LIBETH_XDP_TX_FIRST)) in libeth_xdp_tx_return_bulk()
114 * libeth_xdp_tx_exception - handle Tx exceptions of XDP frames
126 const struct libeth_xdp_tx_frame *pos = &bq->bulk[sent]; in libeth_xdp_tx_exception()
127 u32 left = bq->count - sent; in libeth_xdp_tx_exception()
130 libeth_trace_xdp_exception(bq->dev, bq->prog, XDP_TX); in libeth_xdp_tx_exception()
133 memmove(bq->bulk, pos, left * sizeof(*bq->bulk)); in libeth_xdp_tx_exception()
134 bq->count = left; in libeth_xdp_tx_exception()
144 libeth_xdp_xmit_return_bulk(pos, left, bq->dev); in libeth_xdp_tx_exception()
146 bq->count = 0; in libeth_xdp_tx_exception()
153 u32 count, const struct net_device *dev) in libeth_xdp_xmit_return_bulk() argument
157 for (u32 i = 0; i < count; i++) { in libeth_xdp_xmit_return_bulk()
161 if (frm->flags & LIBETH_XDP_TX_FIRST) in libeth_xdp_xmit_return_bulk()
162 dma = *libeth_xdp_xmit_frame_dma(frm->xdpf); in libeth_xdp_xmit_return_bulk()
166 dma_unmap_page(dev->dev.parent, dma, dma_unmap_len(frm, len), in libeth_xdp_xmit_return_bulk()
170 n += !!(frm->flags & LIBETH_XDP_TX_FIRST); in libeth_xdp_xmit_return_bulk()
180 * libeth_xdp_load_stash - recreate an &xdp_buff from libeth_xdp buffer stash
192 dst->data = src->data; in libeth_xdp_load_stash()
193 dst->base.data_end = src->data + src->len; in libeth_xdp_load_stash()
194 dst->base.data_meta = src->data; in libeth_xdp_load_stash()
195 dst->base.data_hard_start = src->data - src->headroom; in libeth_xdp_load_stash()
197 dst->base.frame_sz = src->frame_sz; in libeth_xdp_load_stash()
198 dst->base.flags = src->flags; in libeth_xdp_load_stash()
203 * libeth_xdp_save_stash - convert &xdp_buff to a libeth_xdp buffer stash
215 dst->data = src->data; in libeth_xdp_save_stash()
216 dst->headroom = src->data - src->base.data_hard_start; in libeth_xdp_save_stash()
217 dst->len = src->base.data_end - src->data; in libeth_xdp_save_stash()
219 dst->frame_sz = src->base.frame_sz; in libeth_xdp_save_stash()
220 dst->flags = src->base.flags; in libeth_xdp_save_stash()
222 WARN_ON_ONCE(dst->flags != src->base.flags); in libeth_xdp_save_stash()
233 stash->data = NULL; in __libeth_xdp_return_stash()
238 * libeth_xdp_return_buff_slow - free &libeth_xdp_buff
242 * queue clean-ups etc., without unwanted inlining.
251 * libeth_xdp_buff_add_frag - add frag to XDP buffer
265 netmem_ref netmem = fqe->netmem; in libeth_xdp_buff_add_frag()
267 if (!xdp_buff_add_frag(&xdp->base, netmem, in libeth_xdp_buff_add_frag()
268 fqe->offset + netmem_get_pp(netmem)->p.offset, in libeth_xdp_buff_add_frag()
269 len, fqe->truesize)) in libeth_xdp_buff_add_frag()
283 * libeth_xdp_prog_exception - handle XDP prog exceptions
300 bpf_warn_invalid_xdp_action(bq->dev, bq->prog, act); in libeth_xdp_prog_exception()
302 libeth_trace_xdp_exception(bq->dev, bq->prog, act); in libeth_xdp_prog_exception()
304 if (xdp->base.rxq->mem.type == MEM_TYPE_XSK_BUFF_POOL) in libeth_xdp_prog_exception()
318 if (unlikely(bq->count == XDP_BULK_QUEUE_SIZE)) in libeth_xdp_put_netmem_bulk()
321 bq->q[bq->count++] = netmem; in libeth_xdp_put_netmem_bulk()
325 * libeth_xdp_return_buff_bulk - free &xdp_buff as part of a bulk
330 * Same as xdp_return_frame_bulk(), but for &libeth_xdp_buff, speeds up Tx
340 for (u32 i = 0; i < sinfo->nr_frags; i++) in libeth_xdp_return_buff_bulk()
341 libeth_xdp_put_netmem_bulk(skb_frag_netmem(&sinfo->frags[i]), in libeth_xdp_return_buff_bulk()
352 * libeth_xdp_queue_threshold - calculate XDP queue clean/refill threshold
353 * @count: number of descriptors in the queue
356 * empty buffers exceeds it) and SQs get cleaned up (when the number of free
357 * descriptors goes below it). To speed up hotpath processing, threshold is
358 * always pow-2, closest to 1/4 of the queue length.
364 u32 libeth_xdp_queue_threshold(u32 count) in libeth_xdp_queue_threshold() argument
368 if (likely(is_power_of_2(count))) in libeth_xdp_queue_threshold()
369 return count >> 2; in libeth_xdp_queue_threshold()
371 quarter = DIV_ROUND_CLOSEST(count, 4); in libeth_xdp_queue_threshold()
375 return high - quarter <= quarter - low ? high : low; in libeth_xdp_queue_threshold()
380 * __libeth_xdp_set_features - set XDP features for netdev
388 * Use the non-underscored versions in drivers instead.
402 dev->xdp_metadata_ops = xmo; in __libeth_xdp_set_features()
406 dev->xdp_zc_max_segs = zc_segs ? : 1; in __libeth_xdp_set_features()
407 dev->xsk_tx_metadata_ops = zc_segs ? tmo : NULL; in __libeth_xdp_set_features()
412 * libeth_xdp_set_redirect - toggle the XDP redirect feature
449 MODULE_DESCRIPTION("Common Ethernet library - XDP infra");