1dac09149SBjörn Töpel /* SPDX-License-Identifier: GPL-2.0 */
2dac09149SBjörn Töpel /* XDP user-space ring structure
3423f3832SMagnus Karlsson * Copyright(c) 2018 Intel Corporation.
4423f3832SMagnus Karlsson */
5423f3832SMagnus Karlsson
6423f3832SMagnus Karlsson #ifndef _LINUX_XSK_QUEUE_H
7423f3832SMagnus Karlsson #define _LINUX_XSK_QUEUE_H
8423f3832SMagnus Karlsson
9423f3832SMagnus Karlsson #include <linux/types.h>
10423f3832SMagnus Karlsson #include <linux/if_xdp.h>
11e61e62b9SBjörn Töpel #include <net/xdp_sock.h>
122b43470aSBjörn Töpel #include <net/xsk_buff_pool.h>
13423f3832SMagnus Karlsson
1489e4a376SBjörn Töpel #include "xsk.h"
1589e4a376SBjörn Töpel
16b3a9e0beSBjörn Töpel struct xdp_ring {
17b3a9e0beSBjörn Töpel u32 producer ____cacheline_aligned_in_smp;
18c3f01fdcSMagnus Karlsson /* Hinder the adjacent cache prefetcher to prefetch the consumer
19c3f01fdcSMagnus Karlsson * pointer if the producer pointer is touched and vice versa.
20c3f01fdcSMagnus Karlsson */
21b8c7aeceSMagnus Karlsson u32 pad1 ____cacheline_aligned_in_smp;
22b3a9e0beSBjörn Töpel u32 consumer ____cacheline_aligned_in_smp;
23b8c7aeceSMagnus Karlsson u32 pad2 ____cacheline_aligned_in_smp;
2477cd0d7bSMagnus Karlsson u32 flags;
25b8c7aeceSMagnus Karlsson u32 pad3 ____cacheline_aligned_in_smp;
26b3a9e0beSBjörn Töpel };
27b3a9e0beSBjörn Töpel
28b3a9e0beSBjörn Töpel /* Used for the RX and TX queues for packets */
29b3a9e0beSBjörn Töpel struct xdp_rxtx_ring {
30b3a9e0beSBjörn Töpel struct xdp_ring ptrs;
3195e486f5SGustavo A. R. Silva struct xdp_desc desc[] ____cacheline_aligned_in_smp;
32b3a9e0beSBjörn Töpel };
33b3a9e0beSBjörn Töpel
34b3a9e0beSBjörn Töpel /* Used for the fill and completion queues for buffers */
35b3a9e0beSBjörn Töpel struct xdp_umem_ring {
36b3a9e0beSBjörn Töpel struct xdp_ring ptrs;
3795e486f5SGustavo A. R. Silva u64 desc[] ____cacheline_aligned_in_smp;
38b3a9e0beSBjörn Töpel };
39b3a9e0beSBjörn Töpel
40423f3832SMagnus Karlsson struct xsk_queue {
41423f3832SMagnus Karlsson u32 ring_mask;
42423f3832SMagnus Karlsson u32 nentries;
43d7012f05SMagnus Karlsson u32 cached_prod;
44c5ed924bSMagnus Karlsson u32 cached_cons;
45423f3832SMagnus Karlsson struct xdp_ring *ring;
46423f3832SMagnus Karlsson u64 invalid_descs;
478aa5a335SCiara Loftus u64 queue_empty_descs;
489f78bf33SXuan Zhuo size_t ring_vmalloc_size;
49423f3832SMagnus Karlsson };
50423f3832SMagnus Karlsson
51d5581966SMaciej Fijalkowski struct parsed_desc {
52d5581966SMaciej Fijalkowski u32 mb;
53d5581966SMaciej Fijalkowski u32 valid;
54d5581966SMaciej Fijalkowski };
55d5581966SMaciej Fijalkowski
56a23b3f56SBjörn Töpel /* The structure of the shared state of the rings are a simple
57a23b3f56SBjörn Töpel * circular buffer, as outlined in
58a23b3f56SBjörn Töpel * Documentation/core-api/circular-buffers.rst. For the Rx and
59a23b3f56SBjörn Töpel * completion ring, the kernel is the producer and user space is the
60a23b3f56SBjörn Töpel * consumer. For the Tx and fill rings, the kernel is the consumer and
61a23b3f56SBjörn Töpel * user space is the producer.
62f63666deSMagnus Karlsson *
63f63666deSMagnus Karlsson * producer consumer
64f63666deSMagnus Karlsson *
65a23b3f56SBjörn Töpel * if (LOAD ->consumer) { (A) LOAD.acq ->producer (C)
66f63666deSMagnus Karlsson * STORE $data LOAD $data
67a23b3f56SBjörn Töpel * STORE.rel ->producer (B) STORE.rel ->consumer (D)
68f63666deSMagnus Karlsson * }
69f63666deSMagnus Karlsson *
70f63666deSMagnus Karlsson * (A) pairs with (D), and (B) pairs with (C).
71f63666deSMagnus Karlsson *
72f63666deSMagnus Karlsson * Starting with (B), it protects the data from being written after
73f63666deSMagnus Karlsson * the producer pointer. If this barrier was missing, the consumer
74f63666deSMagnus Karlsson * could observe the producer pointer being set and thus load the data
75f63666deSMagnus Karlsson * before the producer has written the new data. The consumer would in
76f63666deSMagnus Karlsson * this case load the old data.
77f63666deSMagnus Karlsson *
78f63666deSMagnus Karlsson * (C) protects the consumer from speculatively loading the data before
79f63666deSMagnus Karlsson * the producer pointer actually has been read. If we do not have this
80f63666deSMagnus Karlsson * barrier, some architectures could load old data as speculative loads
81f63666deSMagnus Karlsson * are not discarded as the CPU does not know there is a dependency
82f63666deSMagnus Karlsson * between ->producer and data.
83f63666deSMagnus Karlsson *
84f63666deSMagnus Karlsson * (A) is a control dependency that separates the load of ->consumer
85f63666deSMagnus Karlsson * from the stores of $data. In case ->consumer indicates there is no
86a23b3f56SBjörn Töpel * room in the buffer to store $data we do not. The dependency will
87a23b3f56SBjörn Töpel * order both of the stores after the loads. So no barrier is needed.
88f63666deSMagnus Karlsson *
89f63666deSMagnus Karlsson * (D) protects the load of the data to be observed to happen after the
90f63666deSMagnus Karlsson * store of the consumer pointer. If we did not have this memory
91f63666deSMagnus Karlsson * barrier, the producer could observe the consumer pointer being set
92f63666deSMagnus Karlsson * and overwrite the data with a new value before the consumer got the
93f63666deSMagnus Karlsson * chance to read the old value. The consumer would thus miss reading
94f63666deSMagnus Karlsson * the old entry and very likely read the new entry twice, once right
95f63666deSMagnus Karlsson * now and again after circling through the ring.
96f63666deSMagnus Karlsson */
97f63666deSMagnus Karlsson
9815d8c916SMagnus Karlsson /* The operations on the rings are the following:
9915d8c916SMagnus Karlsson *
10015d8c916SMagnus Karlsson * producer consumer
10115d8c916SMagnus Karlsson *
10215d8c916SMagnus Karlsson * RESERVE entries PEEK in the ring for entries
10315d8c916SMagnus Karlsson * WRITE data into the ring READ data from the ring
10415d8c916SMagnus Karlsson * SUBMIT entries RELEASE entries
10515d8c916SMagnus Karlsson *
10615d8c916SMagnus Karlsson * The producer reserves one or more entries in the ring. It can then
10715d8c916SMagnus Karlsson * fill in these entries and finally submit them so that they can be
10815d8c916SMagnus Karlsson * seen and read by the consumer.
10915d8c916SMagnus Karlsson *
11015d8c916SMagnus Karlsson * The consumer peeks into the ring to see if the producer has written
111f1fc8eceSCiara Loftus * any new entries. If so, the consumer can then read these entries
11215d8c916SMagnus Karlsson * and when it is done reading them release them back to the producer
11315d8c916SMagnus Karlsson * so that the producer can use these slots to fill in new entries.
11415d8c916SMagnus Karlsson *
11515d8c916SMagnus Karlsson * The function names below reflect these operations.
11615d8c916SMagnus Karlsson */
117c497176cSBjörn Töpel
11815d8c916SMagnus Karlsson /* Functions that read and validate content from consumer rings. */
119c497176cSBjörn Töpel
__xskq_cons_read_addr_unchecked(struct xsk_queue * q,u32 cached_cons,u64 * addr)12047e4075dSMagnus Karlsson static inline void __xskq_cons_read_addr_unchecked(struct xsk_queue *q, u32 cached_cons, u64 *addr)
1212b43470aSBjörn Töpel {
1222b43470aSBjörn Töpel struct xdp_umem_ring *ring = (struct xdp_umem_ring *)q->ring;
12347e4075dSMagnus Karlsson u32 idx = cached_cons & q->ring_mask;
1242b43470aSBjörn Töpel
1252b43470aSBjörn Töpel *addr = ring->desc[idx];
12647e4075dSMagnus Karlsson }
12747e4075dSMagnus Karlsson
xskq_cons_read_addr_unchecked(struct xsk_queue * q,u64 * addr)12847e4075dSMagnus Karlsson static inline bool xskq_cons_read_addr_unchecked(struct xsk_queue *q, u64 *addr)
12947e4075dSMagnus Karlsson {
13047e4075dSMagnus Karlsson if (q->cached_cons != q->cached_prod) {
13147e4075dSMagnus Karlsson __xskq_cons_read_addr_unchecked(q, q->cached_cons, addr);
1322b43470aSBjörn Töpel return true;
1332b43470aSBjörn Töpel }
1342b43470aSBjörn Töpel
1352b43470aSBjörn Töpel return false;
1362b43470aSBjörn Töpel }
1372b43470aSBjörn Töpel
xp_unused_options_set(u32 options)13863a64a56STirthendu Sarkar static inline bool xp_unused_options_set(u32 options)
13963a64a56STirthendu Sarkar {
140*48eb03ddSStanislav Fomichev return options & ~(XDP_PKT_CONTD | XDP_TX_METADATA);
14163a64a56STirthendu Sarkar }
14263a64a56STirthendu Sarkar
xp_aligned_validate_desc(struct xsk_buff_pool * pool,struct xdp_desc * desc)14326062b18SBjörn Töpel static inline bool xp_aligned_validate_desc(struct xsk_buff_pool *pool,
14426062b18SBjörn Töpel struct xdp_desc *desc)
14526062b18SBjörn Töpel {
146341ac980SStanislav Fomichev u64 addr = desc->addr - pool->tx_metadata_len;
147341ac980SStanislav Fomichev u64 len = desc->len + pool->tx_metadata_len;
148341ac980SStanislav Fomichev u64 offset = addr & (pool->chunk_size - 1);
14926062b18SBjörn Töpel
15007428da9STirthendu Sarkar if (!desc->len)
15107428da9STirthendu Sarkar return false;
15207428da9STirthendu Sarkar
153341ac980SStanislav Fomichev if (offset + len > pool->chunk_size)
154f654fae4SMagnus Karlsson return false;
155f654fae4SMagnus Karlsson
156341ac980SStanislav Fomichev if (addr >= pool->addrs_cnt)
15726062b18SBjörn Töpel return false;
15826062b18SBjörn Töpel
15963a64a56STirthendu Sarkar if (xp_unused_options_set(desc->options))
16026062b18SBjörn Töpel return false;
16126062b18SBjörn Töpel return true;
16226062b18SBjörn Töpel }
16326062b18SBjörn Töpel
xp_unaligned_validate_desc(struct xsk_buff_pool * pool,struct xdp_desc * desc)16426062b18SBjörn Töpel static inline bool xp_unaligned_validate_desc(struct xsk_buff_pool *pool,
16526062b18SBjörn Töpel struct xdp_desc *desc)
16626062b18SBjörn Töpel {
167341ac980SStanislav Fomichev u64 addr = xp_unaligned_add_offset_to_addr(desc->addr) - pool->tx_metadata_len;
168341ac980SStanislav Fomichev u64 len = desc->len + pool->tx_metadata_len;
16926062b18SBjörn Töpel
17007428da9STirthendu Sarkar if (!desc->len)
17107428da9STirthendu Sarkar return false;
17207428da9STirthendu Sarkar
173341ac980SStanislav Fomichev if (len > pool->chunk_size)
17426062b18SBjörn Töpel return false;
17526062b18SBjörn Töpel
176341ac980SStanislav Fomichev if (addr >= pool->addrs_cnt || addr + len > pool->addrs_cnt ||
177341ac980SStanislav Fomichev xp_desc_crosses_non_contig_pg(pool, addr, len))
17826062b18SBjörn Töpel return false;
17926062b18SBjörn Töpel
18063a64a56STirthendu Sarkar if (xp_unused_options_set(desc->options))
18126062b18SBjörn Töpel return false;
18226062b18SBjörn Töpel return true;
18326062b18SBjörn Töpel }
18426062b18SBjörn Töpel
xp_validate_desc(struct xsk_buff_pool * pool,struct xdp_desc * desc)18526062b18SBjörn Töpel static inline bool xp_validate_desc(struct xsk_buff_pool *pool,
18626062b18SBjörn Töpel struct xdp_desc *desc)
18726062b18SBjörn Töpel {
18826062b18SBjörn Töpel return pool->unaligned ? xp_unaligned_validate_desc(pool, desc) :
18926062b18SBjörn Töpel xp_aligned_validate_desc(pool, desc);
19026062b18SBjörn Töpel }
19126062b18SBjörn Töpel
xskq_has_descs(struct xsk_queue * q)192cf24f5a5STirthendu Sarkar static inline bool xskq_has_descs(struct xsk_queue *q)
193cf24f5a5STirthendu Sarkar {
194cf24f5a5STirthendu Sarkar return q->cached_cons != q->cached_prod;
195cf24f5a5STirthendu Sarkar }
196cf24f5a5STirthendu Sarkar
xskq_cons_is_valid_desc(struct xsk_queue * q,struct xdp_desc * d,struct xsk_buff_pool * pool)19703896ef1SMagnus Karlsson static inline bool xskq_cons_is_valid_desc(struct xsk_queue *q,
19803896ef1SMagnus Karlsson struct xdp_desc *d,
1991c1efc2aSMagnus Karlsson struct xsk_buff_pool *pool)
20035fcde7fSMagnus Karlsson {
2011c1efc2aSMagnus Karlsson if (!xp_validate_desc(pool, d)) {
202c05cd364SKevin Laatz q->invalid_descs++;
203c05cd364SKevin Laatz return false;
204c05cd364SKevin Laatz }
20535fcde7fSMagnus Karlsson return true;
20635fcde7fSMagnus Karlsson }
20735fcde7fSMagnus Karlsson
xskq_cons_read_desc(struct xsk_queue * q,struct xdp_desc * desc,struct xsk_buff_pool * pool)20803896ef1SMagnus Karlsson static inline bool xskq_cons_read_desc(struct xsk_queue *q,
209c05cd364SKevin Laatz struct xdp_desc *desc,
2101c1efc2aSMagnus Karlsson struct xsk_buff_pool *pool)
21135fcde7fSMagnus Karlsson {
212cf24f5a5STirthendu Sarkar if (q->cached_cons != q->cached_prod) {
21335fcde7fSMagnus Karlsson struct xdp_rxtx_ring *ring = (struct xdp_rxtx_ring *)q->ring;
214c5ed924bSMagnus Karlsson u32 idx = q->cached_cons & q->ring_mask;
21535fcde7fSMagnus Karlsson
216c34787fcSMagnus Karlsson *desc = ring->desc[idx];
217cf24f5a5STirthendu Sarkar return xskq_cons_is_valid_desc(q, desc, pool);
21835fcde7fSMagnus Karlsson }
21935fcde7fSMagnus Karlsson
220cf24f5a5STirthendu Sarkar q->queue_empty_descs++;
22103896ef1SMagnus Karlsson return false;
22235fcde7fSMagnus Karlsson }
22335fcde7fSMagnus Karlsson
xskq_cons_release_n(struct xsk_queue * q,u32 cnt)224c00c4461SMaciej Fijalkowski static inline void xskq_cons_release_n(struct xsk_queue *q, u32 cnt)
225c00c4461SMaciej Fijalkowski {
226c00c4461SMaciej Fijalkowski q->cached_cons += cnt;
227c00c4461SMaciej Fijalkowski }
228c00c4461SMaciej Fijalkowski
parse_desc(struct xsk_queue * q,struct xsk_buff_pool * pool,struct xdp_desc * desc,struct parsed_desc * parsed)229d5581966SMaciej Fijalkowski static inline void parse_desc(struct xsk_queue *q, struct xsk_buff_pool *pool,
230d5581966SMaciej Fijalkowski struct xdp_desc *desc, struct parsed_desc *parsed)
231d5581966SMaciej Fijalkowski {
232d5581966SMaciej Fijalkowski parsed->valid = xskq_cons_is_valid_desc(q, desc, pool);
233d5581966SMaciej Fijalkowski parsed->mb = xp_mb_desc(desc);
234d5581966SMaciej Fijalkowski }
235d5581966SMaciej Fijalkowski
236d5581966SMaciej Fijalkowski static inline
xskq_cons_read_desc_batch(struct xsk_queue * q,struct xsk_buff_pool * pool,u32 max)237d5581966SMaciej Fijalkowski u32 xskq_cons_read_desc_batch(struct xsk_queue *q, struct xsk_buff_pool *pool,
238d1bc532eSMagnus Karlsson u32 max)
2399349eb3aSMagnus Karlsson {
2409349eb3aSMagnus Karlsson u32 cached_cons = q->cached_cons, nb_entries = 0;
241d1bc532eSMagnus Karlsson struct xdp_desc *descs = pool->tx_descs;
242d5581966SMaciej Fijalkowski u32 total_descs = 0, nr_frags = 0;
2439349eb3aSMagnus Karlsson
244d5581966SMaciej Fijalkowski /* track first entry, if stumble upon *any* invalid descriptor, rewind
245d5581966SMaciej Fijalkowski * current packet that consists of frags and stop the processing
246d5581966SMaciej Fijalkowski */
2479349eb3aSMagnus Karlsson while (cached_cons != q->cached_prod && nb_entries < max) {
2489349eb3aSMagnus Karlsson struct xdp_rxtx_ring *ring = (struct xdp_rxtx_ring *)q->ring;
2499349eb3aSMagnus Karlsson u32 idx = cached_cons & q->ring_mask;
250d5581966SMaciej Fijalkowski struct parsed_desc parsed;
2519349eb3aSMagnus Karlsson
2529349eb3aSMagnus Karlsson descs[nb_entries] = ring->desc[idx];
2539349eb3aSMagnus Karlsson cached_cons++;
254d5581966SMaciej Fijalkowski parse_desc(q, pool, &descs[nb_entries], &parsed);
255d5581966SMaciej Fijalkowski if (unlikely(!parsed.valid))
256d5581966SMaciej Fijalkowski break;
2579349eb3aSMagnus Karlsson
258d5581966SMaciej Fijalkowski if (likely(!parsed.mb)) {
259d5581966SMaciej Fijalkowski total_descs += (nr_frags + 1);
260d5581966SMaciej Fijalkowski nr_frags = 0;
261d5581966SMaciej Fijalkowski } else {
262d5581966SMaciej Fijalkowski nr_frags++;
263d5581966SMaciej Fijalkowski if (nr_frags == pool->netdev->xdp_zc_max_segs) {
264d5581966SMaciej Fijalkowski nr_frags = 0;
265d5581966SMaciej Fijalkowski break;
266d5581966SMaciej Fijalkowski }
267d5581966SMaciej Fijalkowski }
2689349eb3aSMagnus Karlsson nb_entries++;
2699349eb3aSMagnus Karlsson }
2709349eb3aSMagnus Karlsson
271d5581966SMaciej Fijalkowski cached_cons -= nr_frags;
272c00c4461SMaciej Fijalkowski /* Release valid plus any invalid entries */
273c00c4461SMaciej Fijalkowski xskq_cons_release_n(q, cached_cons - q->cached_cons);
274d5581966SMaciej Fijalkowski return total_descs;
2759349eb3aSMagnus Karlsson }
2769349eb3aSMagnus Karlsson
27715d8c916SMagnus Karlsson /* Functions for consumers */
27815d8c916SMagnus Karlsson
__xskq_cons_release(struct xsk_queue * q)27915d8c916SMagnus Karlsson static inline void __xskq_cons_release(struct xsk_queue *q)
28015d8c916SMagnus Karlsson {
281a23b3f56SBjörn Töpel smp_store_release(&q->ring->consumer, q->cached_cons); /* D, matchees A */
28215d8c916SMagnus Karlsson }
28315d8c916SMagnus Karlsson
__xskq_cons_peek(struct xsk_queue * q)28415d8c916SMagnus Karlsson static inline void __xskq_cons_peek(struct xsk_queue *q)
28515d8c916SMagnus Karlsson {
28615d8c916SMagnus Karlsson /* Refresh the local pointer */
287a23b3f56SBjörn Töpel q->cached_prod = smp_load_acquire(&q->ring->producer); /* C, matches B */
28815d8c916SMagnus Karlsson }
28915d8c916SMagnus Karlsson
xskq_cons_get_entries(struct xsk_queue * q)29015d8c916SMagnus Karlsson static inline void xskq_cons_get_entries(struct xsk_queue *q)
29115d8c916SMagnus Karlsson {
29215d8c916SMagnus Karlsson __xskq_cons_release(q);
29315d8c916SMagnus Karlsson __xskq_cons_peek(q);
29415d8c916SMagnus Karlsson }
29515d8c916SMagnus Karlsson
xskq_cons_nb_entries(struct xsk_queue * q,u32 max)2969349eb3aSMagnus Karlsson static inline u32 xskq_cons_nb_entries(struct xsk_queue *q, u32 max)
29715d8c916SMagnus Karlsson {
29815d8c916SMagnus Karlsson u32 entries = q->cached_prod - q->cached_cons;
29915d8c916SMagnus Karlsson
3009349eb3aSMagnus Karlsson if (entries >= max)
3019349eb3aSMagnus Karlsson return max;
30215d8c916SMagnus Karlsson
30315d8c916SMagnus Karlsson __xskq_cons_peek(q);
30415d8c916SMagnus Karlsson entries = q->cached_prod - q->cached_cons;
30515d8c916SMagnus Karlsson
3069349eb3aSMagnus Karlsson return entries >= max ? max : entries;
3079349eb3aSMagnus Karlsson }
3089349eb3aSMagnus Karlsson
xskq_cons_peek_addr_unchecked(struct xsk_queue * q,u64 * addr)3092b43470aSBjörn Töpel static inline bool xskq_cons_peek_addr_unchecked(struct xsk_queue *q, u64 *addr)
3102b43470aSBjörn Töpel {
3112b43470aSBjörn Töpel if (q->cached_prod == q->cached_cons)
3122b43470aSBjörn Töpel xskq_cons_get_entries(q);
3132b43470aSBjörn Töpel return xskq_cons_read_addr_unchecked(q, addr);
3142b43470aSBjörn Töpel }
3152b43470aSBjörn Töpel
xskq_cons_peek_desc(struct xsk_queue * q,struct xdp_desc * desc,struct xsk_buff_pool * pool)31603896ef1SMagnus Karlsson static inline bool xskq_cons_peek_desc(struct xsk_queue *q,
317c05cd364SKevin Laatz struct xdp_desc *desc,
3181c1efc2aSMagnus Karlsson struct xsk_buff_pool *pool)
31935fcde7fSMagnus Karlsson {
320c5ed924bSMagnus Karlsson if (q->cached_prod == q->cached_cons)
321c5ed924bSMagnus Karlsson xskq_cons_get_entries(q);
3221c1efc2aSMagnus Karlsson return xskq_cons_read_desc(q, desc, pool);
32335fcde7fSMagnus Karlsson }
32435fcde7fSMagnus Karlsson
3259349eb3aSMagnus Karlsson /* To improve performance in the xskq_cons_release functions, only update local state here.
3269349eb3aSMagnus Karlsson * Reflect this to global state when we get new entries from the ring in
3279349eb3aSMagnus Karlsson * xskq_cons_get_entries() and whenever Rx or Tx processing are completed in the NAPI loop.
3289349eb3aSMagnus Karlsson */
xskq_cons_release(struct xsk_queue * q)32915d8c916SMagnus Karlsson static inline void xskq_cons_release(struct xsk_queue *q)
33015d8c916SMagnus Karlsson {
33115d8c916SMagnus Karlsson q->cached_cons++;
33215d8c916SMagnus Karlsson }
33315d8c916SMagnus Karlsson
xskq_cons_cancel_n(struct xsk_queue * q,u32 cnt)334b7f72a30STirthendu Sarkar static inline void xskq_cons_cancel_n(struct xsk_queue *q, u32 cnt)
335b7f72a30STirthendu Sarkar {
336b7f72a30STirthendu Sarkar q->cached_cons -= cnt;
337b7f72a30STirthendu Sarkar }
338b7f72a30STirthendu Sarkar
xskq_cons_present_entries(struct xsk_queue * q)3393413f041SXuan Zhuo static inline u32 xskq_cons_present_entries(struct xsk_queue *q)
3403413f041SXuan Zhuo {
3413413f041SXuan Zhuo /* No barriers needed since data is not accessed */
3423413f041SXuan Zhuo return READ_ONCE(q->ring->producer) - READ_ONCE(q->ring->consumer);
3433413f041SXuan Zhuo }
3443413f041SXuan Zhuo
34515d8c916SMagnus Karlsson /* Functions for producers */
34615d8c916SMagnus Karlsson
xskq_prod_nb_free(struct xsk_queue * q,u32 max)3479349eb3aSMagnus Karlsson static inline u32 xskq_prod_nb_free(struct xsk_queue *q, u32 max)
34815d8c916SMagnus Karlsson {
34915d8c916SMagnus Karlsson u32 free_entries = q->nentries - (q->cached_prod - q->cached_cons);
35015d8c916SMagnus Karlsson
3519349eb3aSMagnus Karlsson if (free_entries >= max)
3529349eb3aSMagnus Karlsson return max;
35315d8c916SMagnus Karlsson
35415d8c916SMagnus Karlsson /* Refresh the local tail pointer */
35515d8c916SMagnus Karlsson q->cached_cons = READ_ONCE(q->ring->consumer);
35615d8c916SMagnus Karlsson free_entries = q->nentries - (q->cached_prod - q->cached_cons);
35715d8c916SMagnus Karlsson
3589349eb3aSMagnus Karlsson return free_entries >= max ? max : free_entries;
3599349eb3aSMagnus Karlsson }
3609349eb3aSMagnus Karlsson
xskq_prod_is_full(struct xsk_queue * q)3619349eb3aSMagnus Karlsson static inline bool xskq_prod_is_full(struct xsk_queue *q)
3629349eb3aSMagnus Karlsson {
3639349eb3aSMagnus Karlsson return xskq_prod_nb_free(q, 1) ? false : true;
36415d8c916SMagnus Karlsson }
36515d8c916SMagnus Karlsson
xskq_prod_cancel_n(struct xsk_queue * q,u32 cnt)366b7f72a30STirthendu Sarkar static inline void xskq_prod_cancel_n(struct xsk_queue *q, u32 cnt)
367b1b95cb5SMagnus Karlsson {
368b7f72a30STirthendu Sarkar q->cached_prod -= cnt;
369b1b95cb5SMagnus Karlsson }
370b1b95cb5SMagnus Karlsson
xskq_prod_reserve(struct xsk_queue * q)37115d8c916SMagnus Karlsson static inline int xskq_prod_reserve(struct xsk_queue *q)
37215d8c916SMagnus Karlsson {
37315d8c916SMagnus Karlsson if (xskq_prod_is_full(q))
37415d8c916SMagnus Karlsson return -ENOSPC;
37515d8c916SMagnus Karlsson
37615d8c916SMagnus Karlsson /* A, matches D */
37715d8c916SMagnus Karlsson q->cached_prod++;
37815d8c916SMagnus Karlsson return 0;
37915d8c916SMagnus Karlsson }
38015d8c916SMagnus Karlsson
xskq_prod_reserve_addr(struct xsk_queue * q,u64 addr)38115d8c916SMagnus Karlsson static inline int xskq_prod_reserve_addr(struct xsk_queue *q, u64 addr)
38215d8c916SMagnus Karlsson {
38315d8c916SMagnus Karlsson struct xdp_umem_ring *ring = (struct xdp_umem_ring *)q->ring;
38415d8c916SMagnus Karlsson
38515d8c916SMagnus Karlsson if (xskq_prod_is_full(q))
38615d8c916SMagnus Karlsson return -ENOSPC;
38715d8c916SMagnus Karlsson
38815d8c916SMagnus Karlsson /* A, matches D */
38915d8c916SMagnus Karlsson ring->desc[q->cached_prod++ & q->ring_mask] = addr;
39015d8c916SMagnus Karlsson return 0;
39115d8c916SMagnus Karlsson }
39215d8c916SMagnus Karlsson
xskq_prod_write_addr_batch(struct xsk_queue * q,struct xdp_desc * descs,u32 nb_entries)393c00c4461SMaciej Fijalkowski static inline void xskq_prod_write_addr_batch(struct xsk_queue *q, struct xdp_desc *descs,
394c00c4461SMaciej Fijalkowski u32 nb_entries)
3959349eb3aSMagnus Karlsson {
3969349eb3aSMagnus Karlsson struct xdp_umem_ring *ring = (struct xdp_umem_ring *)q->ring;
397c00c4461SMaciej Fijalkowski u32 i, cached_prod;
3989349eb3aSMagnus Karlsson
3999349eb3aSMagnus Karlsson /* A, matches D */
4009349eb3aSMagnus Karlsson cached_prod = q->cached_prod;
4019349eb3aSMagnus Karlsson for (i = 0; i < nb_entries; i++)
4029349eb3aSMagnus Karlsson ring->desc[cached_prod++ & q->ring_mask] = descs[i].addr;
4039349eb3aSMagnus Karlsson q->cached_prod = cached_prod;
4049349eb3aSMagnus Karlsson }
4059349eb3aSMagnus Karlsson
xskq_prod_reserve_desc(struct xsk_queue * q,u64 addr,u32 len,u32 flags)40659e35e55SMagnus Karlsson static inline int xskq_prod_reserve_desc(struct xsk_queue *q,
40763a64a56STirthendu Sarkar u64 addr, u32 len, u32 flags)
408c497176cSBjörn Töpel {
409c497176cSBjörn Töpel struct xdp_rxtx_ring *ring = (struct xdp_rxtx_ring *)q->ring;
41059e35e55SMagnus Karlsson u32 idx;
411c497176cSBjörn Töpel
412df0ae6f7SMagnus Karlsson if (xskq_prod_is_full(q))
413c6c1f11bSBjörn Töpel return -ENOBUFS;
414c497176cSBjörn Töpel
415f63666deSMagnus Karlsson /* A, matches D */
416d7012f05SMagnus Karlsson idx = q->cached_prod++ & q->ring_mask;
417bbff2f32SBjörn Töpel ring->desc[idx].addr = addr;
418c497176cSBjörn Töpel ring->desc[idx].len = len;
41963a64a56STirthendu Sarkar ring->desc[idx].options = flags;
420c497176cSBjörn Töpel
421c497176cSBjörn Töpel return 0;
422c497176cSBjörn Töpel }
423c497176cSBjörn Töpel
__xskq_prod_submit(struct xsk_queue * q,u32 idx)42415d8c916SMagnus Karlsson static inline void __xskq_prod_submit(struct xsk_queue *q, u32 idx)
42535fcde7fSMagnus Karlsson {
426a23b3f56SBjörn Töpel smp_store_release(&q->ring->producer, idx); /* B, matches C */
42715d8c916SMagnus Karlsson }
42815d8c916SMagnus Karlsson
xskq_prod_submit(struct xsk_queue * q)42915d8c916SMagnus Karlsson static inline void xskq_prod_submit(struct xsk_queue *q)
43015d8c916SMagnus Karlsson {
43115d8c916SMagnus Karlsson __xskq_prod_submit(q, q->cached_prod);
43215d8c916SMagnus Karlsson }
43315d8c916SMagnus Karlsson
xskq_prod_submit_n(struct xsk_queue * q,u32 nb_entries)43415d8c916SMagnus Karlsson static inline void xskq_prod_submit_n(struct xsk_queue *q, u32 nb_entries)
43515d8c916SMagnus Karlsson {
43615d8c916SMagnus Karlsson __xskq_prod_submit(q, q->ring->producer + nb_entries);
43735fcde7fSMagnus Karlsson }
43835fcde7fSMagnus Karlsson
xskq_prod_is_empty(struct xsk_queue * q)43959e35e55SMagnus Karlsson static inline bool xskq_prod_is_empty(struct xsk_queue *q)
440c497176cSBjörn Töpel {
44111cc2d21SMagnus Karlsson /* No barriers needed since data is not accessed */
44211cc2d21SMagnus Karlsson return READ_ONCE(q->ring->consumer) == READ_ONCE(q->ring->producer);
443c497176cSBjörn Töpel }
444c497176cSBjörn Töpel
44515d8c916SMagnus Karlsson /* For both producers and consumers */
44615d8c916SMagnus Karlsson
xskq_nb_invalid_descs(struct xsk_queue * q)44715d8c916SMagnus Karlsson static inline u64 xskq_nb_invalid_descs(struct xsk_queue *q)
44815d8c916SMagnus Karlsson {
44915d8c916SMagnus Karlsson return q ? q->invalid_descs : 0;
45015d8c916SMagnus Karlsson }
45115d8c916SMagnus Karlsson
xskq_nb_queue_empty_descs(struct xsk_queue * q)4528aa5a335SCiara Loftus static inline u64 xskq_nb_queue_empty_descs(struct xsk_queue *q)
4538aa5a335SCiara Loftus {
4548aa5a335SCiara Loftus return q ? q->queue_empty_descs : 0;
4558aa5a335SCiara Loftus }
4568aa5a335SCiara Loftus
457b9b6b68eSBjörn Töpel struct xsk_queue *xskq_create(u32 nentries, bool umem_queue);
458c497176cSBjörn Töpel void xskq_destroy(struct xsk_queue *q_ops);
459423f3832SMagnus Karlsson
460423f3832SMagnus Karlsson #endif /* _LINUX_XSK_QUEUE_H */
461