1 /* SPDX-License-Identifier: GPL-2.0 */
2 /* XDP user-space ring structure
3 * Copyright(c) 2018 Intel Corporation.
4 */
5
6 #ifndef _LINUX_XSK_QUEUE_H
7 #define _LINUX_XSK_QUEUE_H
8
9 #include <linux/types.h>
10 #include <linux/if_xdp.h>
11 #include <net/xdp_sock.h>
12 #include <net/xsk_buff_pool.h>
13
14 #include "xsk.h"
15
16 struct xdp_ring {
17 u32 producer ____cacheline_aligned_in_smp;
18 /* Hinder the adjacent cache prefetcher to prefetch the consumer
19 * pointer if the producer pointer is touched and vice versa.
20 */
21 u32 pad1 ____cacheline_aligned_in_smp;
22 u32 consumer ____cacheline_aligned_in_smp;
23 u32 pad2 ____cacheline_aligned_in_smp;
24 u32 flags;
25 u32 pad3 ____cacheline_aligned_in_smp;
26 };
27
28 /* Used for the RX and TX queues for packets */
29 struct xdp_rxtx_ring {
30 struct xdp_ring ptrs;
31 struct xdp_desc desc[] ____cacheline_aligned_in_smp;
32 };
33
34 /* Used for the fill and completion queues for buffers */
35 struct xdp_umem_ring {
36 struct xdp_ring ptrs;
37 u64 desc[] ____cacheline_aligned_in_smp;
38 };
39
40 struct xsk_queue {
41 u32 ring_mask;
42 u32 nentries;
43 u32 cached_prod;
44 u32 cached_cons;
45 struct xdp_ring *ring;
46 u64 invalid_descs;
47 u64 queue_empty_descs;
48 size_t ring_vmalloc_size;
49 };
50
51 struct parsed_desc {
52 u32 mb;
53 u32 valid;
54 };
55
56 /* The structure of the shared state of the rings are a simple
57 * circular buffer, as outlined in
58 * Documentation/core-api/circular-buffers.rst. For the Rx and
59 * completion ring, the kernel is the producer and user space is the
60 * consumer. For the Tx and fill rings, the kernel is the consumer and
61 * user space is the producer.
62 *
63 * producer consumer
64 *
65 * if (LOAD ->consumer) { (A) LOAD.acq ->producer (C)
66 * STORE $data LOAD $data
67 * STORE.rel ->producer (B) STORE.rel ->consumer (D)
68 * }
69 *
70 * (A) pairs with (D), and (B) pairs with (C).
71 *
72 * Starting with (B), it protects the data from being written after
73 * the producer pointer. If this barrier was missing, the consumer
74 * could observe the producer pointer being set and thus load the data
75 * before the producer has written the new data. The consumer would in
76 * this case load the old data.
77 *
78 * (C) protects the consumer from speculatively loading the data before
79 * the producer pointer actually has been read. If we do not have this
80 * barrier, some architectures could load old data as speculative loads
81 * are not discarded as the CPU does not know there is a dependency
82 * between ->producer and data.
83 *
84 * (A) is a control dependency that separates the load of ->consumer
85 * from the stores of $data. In case ->consumer indicates there is no
86 * room in the buffer to store $data we do not. The dependency will
87 * order both of the stores after the loads. So no barrier is needed.
88 *
89 * (D) protects the load of the data to be observed to happen after the
90 * store of the consumer pointer. If we did not have this memory
91 * barrier, the producer could observe the consumer pointer being set
92 * and overwrite the data with a new value before the consumer got the
93 * chance to read the old value. The consumer would thus miss reading
94 * the old entry and very likely read the new entry twice, once right
95 * now and again after circling through the ring.
96 */
97
98 /* The operations on the rings are the following:
99 *
100 * producer consumer
101 *
102 * RESERVE entries PEEK in the ring for entries
103 * WRITE data into the ring READ data from the ring
104 * SUBMIT entries RELEASE entries
105 *
106 * The producer reserves one or more entries in the ring. It can then
107 * fill in these entries and finally submit them so that they can be
108 * seen and read by the consumer.
109 *
110 * The consumer peeks into the ring to see if the producer has written
111 * any new entries. If so, the consumer can then read these entries
112 * and when it is done reading them release them back to the producer
113 * so that the producer can use these slots to fill in new entries.
114 *
115 * The function names below reflect these operations.
116 */
117
118 /* Functions that read and validate content from consumer rings. */
119
__xskq_cons_read_addr_unchecked(struct xsk_queue * q,u32 cached_cons,u64 * addr)120 static inline void __xskq_cons_read_addr_unchecked(struct xsk_queue *q, u32 cached_cons, u64 *addr)
121 {
122 struct xdp_umem_ring *ring = (struct xdp_umem_ring *)q->ring;
123 u32 idx = cached_cons & q->ring_mask;
124
125 *addr = ring->desc[idx];
126 }
127
xskq_cons_read_addr_unchecked(struct xsk_queue * q,u64 * addr)128 static inline bool xskq_cons_read_addr_unchecked(struct xsk_queue *q, u64 *addr)
129 {
130 if (q->cached_cons != q->cached_prod) {
131 __xskq_cons_read_addr_unchecked(q, q->cached_cons, addr);
132 return true;
133 }
134
135 return false;
136 }
137
xp_unused_options_set(u32 options)138 static inline bool xp_unused_options_set(u32 options)
139 {
140 return options & ~(XDP_PKT_CONTD | XDP_TX_METADATA);
141 }
142
xp_aligned_validate_desc(struct xsk_buff_pool * pool,struct xdp_desc * desc)143 static inline bool xp_aligned_validate_desc(struct xsk_buff_pool *pool,
144 struct xdp_desc *desc)
145 {
146 u64 len = desc->len;
147 u64 addr, offset;
148
149 if (!len)
150 return false;
151
152 /* Can overflow if desc->addr < pool->tx_metadata_len */
153 if (check_sub_overflow(desc->addr, pool->tx_metadata_len, &addr))
154 return false;
155
156 offset = addr & (pool->chunk_size - 1);
157
158 /*
159 * Can't overflow: @offset is guaranteed to be < ``U32_MAX``
160 * (pool->chunk_size is ``u32``), @len is guaranteed
161 * to be <= ``U32_MAX``.
162 */
163 if (offset + len + pool->tx_metadata_len > pool->chunk_size)
164 return false;
165
166 if (addr >= pool->addrs_cnt)
167 return false;
168
169 if (xp_unused_options_set(desc->options))
170 return false;
171
172 return true;
173 }
174
xp_unaligned_validate_desc(struct xsk_buff_pool * pool,struct xdp_desc * desc)175 static inline bool xp_unaligned_validate_desc(struct xsk_buff_pool *pool,
176 struct xdp_desc *desc)
177 {
178 u64 len = desc->len;
179 u64 addr, end;
180
181 if (!len)
182 return false;
183
184 /* Can't overflow: @len is guaranteed to be <= ``U32_MAX`` */
185 len += pool->tx_metadata_len;
186 if (len > pool->chunk_size)
187 return false;
188
189 /* Can overflow if desc->addr is close to 0 */
190 if (check_sub_overflow(xp_unaligned_add_offset_to_addr(desc->addr),
191 pool->tx_metadata_len, &addr))
192 return false;
193
194 if (addr >= pool->addrs_cnt)
195 return false;
196
197 /* Can overflow if pool->addrs_cnt is high enough */
198 if (check_add_overflow(addr, len, &end) || end > pool->addrs_cnt)
199 return false;
200
201 if (xp_desc_crosses_non_contig_pg(pool, addr, len))
202 return false;
203
204 if (xp_unused_options_set(desc->options))
205 return false;
206
207 return true;
208 }
209
xp_validate_desc(struct xsk_buff_pool * pool,struct xdp_desc * desc)210 static inline bool xp_validate_desc(struct xsk_buff_pool *pool,
211 struct xdp_desc *desc)
212 {
213 return pool->unaligned ? xp_unaligned_validate_desc(pool, desc) :
214 xp_aligned_validate_desc(pool, desc);
215 }
216
xskq_has_descs(struct xsk_queue * q)217 static inline bool xskq_has_descs(struct xsk_queue *q)
218 {
219 return q->cached_cons != q->cached_prod;
220 }
221
xskq_cons_is_valid_desc(struct xsk_queue * q,struct xdp_desc * d,struct xsk_buff_pool * pool)222 static inline bool xskq_cons_is_valid_desc(struct xsk_queue *q,
223 struct xdp_desc *d,
224 struct xsk_buff_pool *pool)
225 {
226 if (!xp_validate_desc(pool, d)) {
227 q->invalid_descs++;
228 return false;
229 }
230 return true;
231 }
232
xskq_cons_read_desc(struct xsk_queue * q,struct xdp_desc * desc,struct xsk_buff_pool * pool)233 static inline bool xskq_cons_read_desc(struct xsk_queue *q,
234 struct xdp_desc *desc,
235 struct xsk_buff_pool *pool)
236 {
237 if (q->cached_cons != q->cached_prod) {
238 struct xdp_rxtx_ring *ring = (struct xdp_rxtx_ring *)q->ring;
239 u32 idx = q->cached_cons & q->ring_mask;
240
241 *desc = ring->desc[idx];
242 return xskq_cons_is_valid_desc(q, desc, pool);
243 }
244
245 q->queue_empty_descs++;
246 return false;
247 }
248
xskq_cons_release_n(struct xsk_queue * q,u32 cnt)249 static inline void xskq_cons_release_n(struct xsk_queue *q, u32 cnt)
250 {
251 q->cached_cons += cnt;
252 }
253
parse_desc(struct xsk_queue * q,struct xsk_buff_pool * pool,struct xdp_desc * desc,struct parsed_desc * parsed)254 static inline void parse_desc(struct xsk_queue *q, struct xsk_buff_pool *pool,
255 struct xdp_desc *desc, struct parsed_desc *parsed)
256 {
257 parsed->valid = xskq_cons_is_valid_desc(q, desc, pool);
258 parsed->mb = xp_mb_desc(desc);
259 }
260
261 static inline
xskq_cons_read_desc_batch(struct xsk_queue * q,struct xsk_buff_pool * pool,u32 max)262 u32 xskq_cons_read_desc_batch(struct xsk_queue *q, struct xsk_buff_pool *pool,
263 u32 max)
264 {
265 u32 cached_cons = q->cached_cons, nb_entries = 0;
266 struct xdp_desc *descs = pool->tx_descs;
267 u32 total_descs = 0, nr_frags = 0;
268
269 /* track first entry, if stumble upon *any* invalid descriptor, rewind
270 * current packet that consists of frags and stop the processing
271 */
272 while (cached_cons != q->cached_prod && nb_entries < max) {
273 struct xdp_rxtx_ring *ring = (struct xdp_rxtx_ring *)q->ring;
274 u32 idx = cached_cons & q->ring_mask;
275 struct parsed_desc parsed;
276
277 descs[nb_entries] = ring->desc[idx];
278 cached_cons++;
279 parse_desc(q, pool, &descs[nb_entries], &parsed);
280 if (unlikely(!parsed.valid))
281 break;
282
283 if (likely(!parsed.mb)) {
284 total_descs += (nr_frags + 1);
285 nr_frags = 0;
286 } else {
287 nr_frags++;
288 if (nr_frags == pool->xdp_zc_max_segs) {
289 nr_frags = 0;
290 break;
291 }
292 }
293 nb_entries++;
294 }
295
296 cached_cons -= nr_frags;
297 /* Release valid plus any invalid entries */
298 xskq_cons_release_n(q, cached_cons - q->cached_cons);
299 return total_descs;
300 }
301
302 /* Functions for consumers */
303
__xskq_cons_release(struct xsk_queue * q)304 static inline void __xskq_cons_release(struct xsk_queue *q)
305 {
306 smp_store_release(&q->ring->consumer, q->cached_cons); /* D, matchees A */
307 }
308
__xskq_cons_peek(struct xsk_queue * q)309 static inline void __xskq_cons_peek(struct xsk_queue *q)
310 {
311 /* Refresh the local pointer */
312 q->cached_prod = smp_load_acquire(&q->ring->producer); /* C, matches B */
313 }
314
xskq_cons_get_entries(struct xsk_queue * q)315 static inline void xskq_cons_get_entries(struct xsk_queue *q)
316 {
317 __xskq_cons_release(q);
318 __xskq_cons_peek(q);
319 }
320
xskq_cons_nb_entries(struct xsk_queue * q,u32 max)321 static inline u32 xskq_cons_nb_entries(struct xsk_queue *q, u32 max)
322 {
323 u32 entries = q->cached_prod - q->cached_cons;
324
325 if (entries >= max)
326 return max;
327
328 __xskq_cons_peek(q);
329 entries = q->cached_prod - q->cached_cons;
330
331 return entries >= max ? max : entries;
332 }
333
xskq_cons_peek_addr_unchecked(struct xsk_queue * q,u64 * addr)334 static inline bool xskq_cons_peek_addr_unchecked(struct xsk_queue *q, u64 *addr)
335 {
336 if (q->cached_prod == q->cached_cons)
337 xskq_cons_get_entries(q);
338 return xskq_cons_read_addr_unchecked(q, addr);
339 }
340
xskq_cons_peek_desc(struct xsk_queue * q,struct xdp_desc * desc,struct xsk_buff_pool * pool)341 static inline bool xskq_cons_peek_desc(struct xsk_queue *q,
342 struct xdp_desc *desc,
343 struct xsk_buff_pool *pool)
344 {
345 if (q->cached_prod == q->cached_cons)
346 xskq_cons_get_entries(q);
347 return xskq_cons_read_desc(q, desc, pool);
348 }
349
350 /* To improve performance in the xskq_cons_release functions, only update local state here.
351 * Reflect this to global state when we get new entries from the ring in
352 * xskq_cons_get_entries() and whenever Rx or Tx processing are completed in the NAPI loop.
353 */
xskq_cons_release(struct xsk_queue * q)354 static inline void xskq_cons_release(struct xsk_queue *q)
355 {
356 q->cached_cons++;
357 }
358
xskq_cons_cancel_n(struct xsk_queue * q,u32 cnt)359 static inline void xskq_cons_cancel_n(struct xsk_queue *q, u32 cnt)
360 {
361 q->cached_cons -= cnt;
362 }
363
xskq_cons_present_entries(struct xsk_queue * q)364 static inline u32 xskq_cons_present_entries(struct xsk_queue *q)
365 {
366 /* No barriers needed since data is not accessed */
367 return READ_ONCE(q->ring->producer) - READ_ONCE(q->ring->consumer);
368 }
369
370 /* Functions for producers */
371
xskq_get_prod(struct xsk_queue * q)372 static inline u32 xskq_get_prod(struct xsk_queue *q)
373 {
374 return READ_ONCE(q->ring->producer);
375 }
376
xskq_prod_nb_free(struct xsk_queue * q,u32 max)377 static inline u32 xskq_prod_nb_free(struct xsk_queue *q, u32 max)
378 {
379 u32 free_entries = q->nentries - (q->cached_prod - q->cached_cons);
380
381 if (free_entries >= max)
382 return max;
383
384 /* Refresh the local tail pointer */
385 q->cached_cons = READ_ONCE(q->ring->consumer);
386 free_entries = q->nentries - (q->cached_prod - q->cached_cons);
387
388 return free_entries >= max ? max : free_entries;
389 }
390
xskq_prod_is_full(struct xsk_queue * q)391 static inline bool xskq_prod_is_full(struct xsk_queue *q)
392 {
393 return xskq_prod_nb_free(q, 1) ? false : true;
394 }
395
xskq_prod_cancel_n(struct xsk_queue * q,u32 cnt)396 static inline void xskq_prod_cancel_n(struct xsk_queue *q, u32 cnt)
397 {
398 q->cached_prod -= cnt;
399 }
400
xskq_prod_reserve(struct xsk_queue * q)401 static inline int xskq_prod_reserve(struct xsk_queue *q)
402 {
403 if (xskq_prod_is_full(q))
404 return -ENOSPC;
405
406 /* A, matches D */
407 q->cached_prod++;
408 return 0;
409 }
410
xskq_prod_reserve_addr(struct xsk_queue * q,u64 addr)411 static inline int xskq_prod_reserve_addr(struct xsk_queue *q, u64 addr)
412 {
413 struct xdp_umem_ring *ring = (struct xdp_umem_ring *)q->ring;
414
415 if (xskq_prod_is_full(q))
416 return -ENOSPC;
417
418 /* A, matches D */
419 ring->desc[q->cached_prod++ & q->ring_mask] = addr;
420 return 0;
421 }
422
xskq_prod_write_addr(struct xsk_queue * q,u32 idx,u64 addr)423 static inline void xskq_prod_write_addr(struct xsk_queue *q, u32 idx, u64 addr)
424 {
425 struct xdp_umem_ring *ring = (struct xdp_umem_ring *)q->ring;
426
427 ring->desc[idx & q->ring_mask] = addr;
428 }
429
xskq_prod_write_addr_batch(struct xsk_queue * q,struct xdp_desc * descs,u32 nb_entries)430 static inline void xskq_prod_write_addr_batch(struct xsk_queue *q, struct xdp_desc *descs,
431 u32 nb_entries)
432 {
433 struct xdp_umem_ring *ring = (struct xdp_umem_ring *)q->ring;
434 u32 i, cached_prod;
435
436 /* A, matches D */
437 cached_prod = q->cached_prod;
438 for (i = 0; i < nb_entries; i++)
439 ring->desc[cached_prod++ & q->ring_mask] = descs[i].addr;
440 q->cached_prod = cached_prod;
441 }
442
xskq_prod_reserve_desc(struct xsk_queue * q,u64 addr,u32 len,u32 flags)443 static inline int xskq_prod_reserve_desc(struct xsk_queue *q,
444 u64 addr, u32 len, u32 flags)
445 {
446 struct xdp_rxtx_ring *ring = (struct xdp_rxtx_ring *)q->ring;
447 u32 idx;
448
449 if (xskq_prod_is_full(q))
450 return -ENOBUFS;
451
452 /* A, matches D */
453 idx = q->cached_prod++ & q->ring_mask;
454 ring->desc[idx].addr = addr;
455 ring->desc[idx].len = len;
456 ring->desc[idx].options = flags;
457
458 return 0;
459 }
460
__xskq_prod_submit(struct xsk_queue * q,u32 idx)461 static inline void __xskq_prod_submit(struct xsk_queue *q, u32 idx)
462 {
463 smp_store_release(&q->ring->producer, idx); /* B, matches C */
464 }
465
xskq_prod_submit(struct xsk_queue * q)466 static inline void xskq_prod_submit(struct xsk_queue *q)
467 {
468 __xskq_prod_submit(q, q->cached_prod);
469 }
470
xskq_prod_submit_n(struct xsk_queue * q,u32 nb_entries)471 static inline void xskq_prod_submit_n(struct xsk_queue *q, u32 nb_entries)
472 {
473 __xskq_prod_submit(q, q->ring->producer + nb_entries);
474 }
475
xskq_prod_is_empty(struct xsk_queue * q)476 static inline bool xskq_prod_is_empty(struct xsk_queue *q)
477 {
478 /* No barriers needed since data is not accessed */
479 return READ_ONCE(q->ring->consumer) == READ_ONCE(q->ring->producer);
480 }
481
482 /* For both producers and consumers */
483
xskq_nb_invalid_descs(struct xsk_queue * q)484 static inline u64 xskq_nb_invalid_descs(struct xsk_queue *q)
485 {
486 return q ? q->invalid_descs : 0;
487 }
488
xskq_nb_queue_empty_descs(struct xsk_queue * q)489 static inline u64 xskq_nb_queue_empty_descs(struct xsk_queue *q)
490 {
491 return q ? q->queue_empty_descs : 0;
492 }
493
494 struct xsk_queue *xskq_create(u32 nentries, bool umem_queue);
495 void xskq_destroy(struct xsk_queue *q_ops);
496
497 #endif /* _LINUX_XSK_QUEUE_H */
498