Lines Matching +full:max +full:- +full:functions

1 /* SPDX-License-Identifier: GPL-2.0 */
2 /* XDP user-space ring structure
58 * Documentation/core-api/circular-buffers.rst. For the Rx and
65 * if (LOAD ->consumer) { (A) LOAD.acq ->producer (C)
67 * STORE.rel ->producer (B) STORE.rel ->consumer (D)
82 * between ->producer and data.
84 * (A) is a control dependency that separates the load of ->consumer
85 * from the stores of $data. In case ->consumer indicates there is no
118 /* Functions that read and validate content from consumer rings. */
122 struct xdp_umem_ring *ring = (struct xdp_umem_ring *)q->ring; in __xskq_cons_read_addr_unchecked()
123 u32 idx = cached_cons & q->ring_mask; in __xskq_cons_read_addr_unchecked()
125 *addr = ring->desc[idx]; in __xskq_cons_read_addr_unchecked()
130 if (q->cached_cons != q->cached_prod) { in xskq_cons_read_addr_unchecked()
131 __xskq_cons_read_addr_unchecked(q, q->cached_cons, addr); in xskq_cons_read_addr_unchecked()
146 u64 addr = desc->addr - pool->tx_metadata_len; in xp_aligned_validate_desc()
147 u64 len = desc->len + pool->tx_metadata_len; in xp_aligned_validate_desc()
148 u64 offset = addr & (pool->chunk_size - 1); in xp_aligned_validate_desc()
150 if (!desc->len) in xp_aligned_validate_desc()
153 if (offset + len > pool->chunk_size) in xp_aligned_validate_desc()
156 if (addr >= pool->addrs_cnt) in xp_aligned_validate_desc()
159 if (xp_unused_options_set(desc->options)) in xp_aligned_validate_desc()
167 u64 addr = xp_unaligned_add_offset_to_addr(desc->addr) - pool->tx_metadata_len; in xp_unaligned_validate_desc()
168 u64 len = desc->len + pool->tx_metadata_len; in xp_unaligned_validate_desc()
170 if (!desc->len) in xp_unaligned_validate_desc()
173 if (len > pool->chunk_size) in xp_unaligned_validate_desc()
176 if (addr >= pool->addrs_cnt || addr + len > pool->addrs_cnt || in xp_unaligned_validate_desc()
180 if (xp_unused_options_set(desc->options)) in xp_unaligned_validate_desc()
188 return pool->unaligned ? xp_unaligned_validate_desc(pool, desc) : in xp_validate_desc()
194 return q->cached_cons != q->cached_prod; in xskq_has_descs()
202 q->invalid_descs++; in xskq_cons_is_valid_desc()
212 if (q->cached_cons != q->cached_prod) { in xskq_cons_read_desc()
213 struct xdp_rxtx_ring *ring = (struct xdp_rxtx_ring *)q->ring; in xskq_cons_read_desc()
214 u32 idx = q->cached_cons & q->ring_mask; in xskq_cons_read_desc()
216 *desc = ring->desc[idx]; in xskq_cons_read_desc()
220 q->queue_empty_descs++; in xskq_cons_read_desc()
226 q->cached_cons += cnt; in xskq_cons_release_n()
232 parsed->valid = xskq_cons_is_valid_desc(q, desc, pool); in parse_desc()
233 parsed->mb = xp_mb_desc(desc); in parse_desc()
238 u32 max) in xskq_cons_read_desc_batch() argument
240 u32 cached_cons = q->cached_cons, nb_entries = 0; in xskq_cons_read_desc_batch()
241 struct xdp_desc *descs = pool->tx_descs; in xskq_cons_read_desc_batch()
247 while (cached_cons != q->cached_prod && nb_entries < max) { in xskq_cons_read_desc_batch()
248 struct xdp_rxtx_ring *ring = (struct xdp_rxtx_ring *)q->ring; in xskq_cons_read_desc_batch()
249 u32 idx = cached_cons & q->ring_mask; in xskq_cons_read_desc_batch()
252 descs[nb_entries] = ring->desc[idx]; in xskq_cons_read_desc_batch()
263 if (nr_frags == pool->netdev->xdp_zc_max_segs) { in xskq_cons_read_desc_batch()
271 cached_cons -= nr_frags; in xskq_cons_read_desc_batch()
273 xskq_cons_release_n(q, cached_cons - q->cached_cons); in xskq_cons_read_desc_batch()
277 /* Functions for consumers */
281 smp_store_release(&q->ring->consumer, q->cached_cons); /* D, matchees A */ in __xskq_cons_release()
287 q->cached_prod = smp_load_acquire(&q->ring->producer); /* C, matches B */ in __xskq_cons_peek()
296 static inline u32 xskq_cons_nb_entries(struct xsk_queue *q, u32 max) in xskq_cons_nb_entries() argument
298 u32 entries = q->cached_prod - q->cached_cons; in xskq_cons_nb_entries()
300 if (entries >= max) in xskq_cons_nb_entries()
301 return max; in xskq_cons_nb_entries()
304 entries = q->cached_prod - q->cached_cons; in xskq_cons_nb_entries()
306 return entries >= max ? max : entries; in xskq_cons_nb_entries()
311 if (q->cached_prod == q->cached_cons) in xskq_cons_peek_addr_unchecked()
320 if (q->cached_prod == q->cached_cons) in xskq_cons_peek_desc()
325 /* To improve performance in the xskq_cons_release functions, only update local state here.
331 q->cached_cons++; in xskq_cons_release()
336 q->cached_cons -= cnt; in xskq_cons_cancel_n()
342 return READ_ONCE(q->ring->producer) - READ_ONCE(q->ring->consumer); in xskq_cons_present_entries()
345 /* Functions for producers */
347 static inline u32 xskq_prod_nb_free(struct xsk_queue *q, u32 max) in xskq_prod_nb_free() argument
349 u32 free_entries = q->nentries - (q->cached_prod - q->cached_cons); in xskq_prod_nb_free()
351 if (free_entries >= max) in xskq_prod_nb_free()
352 return max; in xskq_prod_nb_free()
355 q->cached_cons = READ_ONCE(q->ring->consumer); in xskq_prod_nb_free()
356 free_entries = q->nentries - (q->cached_prod - q->cached_cons); in xskq_prod_nb_free()
358 return free_entries >= max ? max : free_entries; in xskq_prod_nb_free()
368 q->cached_prod -= cnt; in xskq_prod_cancel_n()
374 return -ENOSPC; in xskq_prod_reserve()
377 q->cached_prod++; in xskq_prod_reserve()
383 struct xdp_umem_ring *ring = (struct xdp_umem_ring *)q->ring; in xskq_prod_reserve_addr()
386 return -ENOSPC; in xskq_prod_reserve_addr()
389 ring->desc[q->cached_prod++ & q->ring_mask] = addr; in xskq_prod_reserve_addr()
396 struct xdp_umem_ring *ring = (struct xdp_umem_ring *)q->ring; in xskq_prod_write_addr_batch()
400 cached_prod = q->cached_prod; in xskq_prod_write_addr_batch()
402 ring->desc[cached_prod++ & q->ring_mask] = descs[i].addr; in xskq_prod_write_addr_batch()
403 q->cached_prod = cached_prod; in xskq_prod_write_addr_batch()
409 struct xdp_rxtx_ring *ring = (struct xdp_rxtx_ring *)q->ring; in xskq_prod_reserve_desc()
413 return -ENOBUFS; in xskq_prod_reserve_desc()
416 idx = q->cached_prod++ & q->ring_mask; in xskq_prod_reserve_desc()
417 ring->desc[idx].addr = addr; in xskq_prod_reserve_desc()
418 ring->desc[idx].len = len; in xskq_prod_reserve_desc()
419 ring->desc[idx].options = flags; in xskq_prod_reserve_desc()
426 smp_store_release(&q->ring->producer, idx); /* B, matches C */ in __xskq_prod_submit()
431 __xskq_prod_submit(q, q->cached_prod); in xskq_prod_submit()
436 __xskq_prod_submit(q, q->ring->producer + nb_entries); in xskq_prod_submit_n()
442 return READ_ONCE(q->ring->consumer) == READ_ONCE(q->ring->producer); in xskq_prod_is_empty()
449 return q ? q->invalid_descs : 0; in xskq_nb_invalid_descs()
454 return q ? q->queue_empty_descs : 0; in xskq_nb_queue_empty_descs()