xref: /linux/include/net/libeth/xsk.h (revision 8be4d31cb8aaeea27bde4b7ddb26e28a89062ebf)
1 /* SPDX-License-Identifier: GPL-2.0-only */
2 /* Copyright (C) 2025 Intel Corporation */
3 
4 #ifndef __LIBETH_XSK_H
5 #define __LIBETH_XSK_H
6 
7 #include <net/libeth/xdp.h>
8 #include <net/xdp_sock_drv.h>
9 
10 /* ``XDP_TXMD_FLAGS_VALID`` is defined only under ``CONFIG_XDP_SOCKETS`` */
11 #ifdef XDP_TXMD_FLAGS_VALID
12 static_assert(XDP_TXMD_FLAGS_VALID <= LIBETH_XDP_TX_XSKMD);
13 #endif
14 
15 /* ``XDP_TX`` bulking */
16 
17 /**
18  * libeth_xsk_tx_queue_head - internal helper for queueing XSk ``XDP_TX`` head
19  * @bq: XDP Tx bulk to queue the head frag to
20  * @xdp: XSk buffer with the head to queue
21  *
22  * Return: false if it's the only frag of the frame, true if it's an S/G frame.
23  */
libeth_xsk_tx_queue_head(struct libeth_xdp_tx_bulk * bq,struct libeth_xdp_buff * xdp)24 static inline bool libeth_xsk_tx_queue_head(struct libeth_xdp_tx_bulk *bq,
25 					    struct libeth_xdp_buff *xdp)
26 {
27 	bq->bulk[bq->count++] = (typeof(*bq->bulk)){
28 		.xsk	= xdp,
29 		__libeth_xdp_tx_len(xdp->base.data_end - xdp->data,
30 				    LIBETH_XDP_TX_FIRST),
31 	};
32 
33 	if (likely(!xdp_buff_has_frags(&xdp->base)))
34 		return false;
35 
36 	bq->bulk[bq->count - 1].flags |= LIBETH_XDP_TX_MULTI;
37 
38 	return true;
39 }
40 
41 /**
42  * libeth_xsk_tx_queue_frag - internal helper for queueing XSk ``XDP_TX`` frag
43  * @bq: XDP Tx bulk to queue the frag to
44  * @frag: XSk frag to queue
45  */
libeth_xsk_tx_queue_frag(struct libeth_xdp_tx_bulk * bq,struct libeth_xdp_buff * frag)46 static inline void libeth_xsk_tx_queue_frag(struct libeth_xdp_tx_bulk *bq,
47 					    struct libeth_xdp_buff *frag)
48 {
49 	bq->bulk[bq->count++] = (typeof(*bq->bulk)){
50 		.xsk	= frag,
51 		__libeth_xdp_tx_len(frag->base.data_end - frag->data),
52 	};
53 }
54 
55 /**
56  * libeth_xsk_tx_queue_bulk - internal helper for queueing XSk ``XDP_TX`` frame
57  * @bq: XDP Tx bulk to queue the frame to
58  * @xdp: XSk buffer to queue
59  * @flush_bulk: driver callback to flush the bulk to the HW queue
60  *
61  * Return: true on success, false on flush error.
62  */
63 static __always_inline bool
libeth_xsk_tx_queue_bulk(struct libeth_xdp_tx_bulk * bq,struct libeth_xdp_buff * xdp,bool (* flush_bulk)(struct libeth_xdp_tx_bulk * bq,u32 flags))64 libeth_xsk_tx_queue_bulk(struct libeth_xdp_tx_bulk *bq,
65 			 struct libeth_xdp_buff *xdp,
66 			 bool (*flush_bulk)(struct libeth_xdp_tx_bulk *bq,
67 					    u32 flags))
68 {
69 	bool ret = true;
70 
71 	if (unlikely(bq->count == LIBETH_XDP_TX_BULK) &&
72 	    unlikely(!flush_bulk(bq, LIBETH_XDP_TX_XSK))) {
73 		libeth_xsk_buff_free_slow(xdp);
74 		return false;
75 	}
76 
77 	if (!libeth_xsk_tx_queue_head(bq, xdp))
78 		goto out;
79 
80 	for (const struct libeth_xdp_buff *head = xdp; ; ) {
81 		xdp = container_of(xsk_buff_get_frag(&head->base),
82 				   typeof(*xdp), base);
83 		if (!xdp)
84 			break;
85 
86 		if (unlikely(bq->count == LIBETH_XDP_TX_BULK) &&
87 		    unlikely(!flush_bulk(bq, LIBETH_XDP_TX_XSK))) {
88 			ret = false;
89 			break;
90 		}
91 
92 		libeth_xsk_tx_queue_frag(bq, xdp);
93 	}
94 
95 out:
96 	bq->bulk[bq->count - 1].flags |= LIBETH_XDP_TX_LAST;
97 
98 	return ret;
99 }
100 
101 /**
102  * libeth_xsk_tx_fill_buf - internal helper to fill XSk ``XDP_TX`` &libeth_sqe
103  * @frm: XDP Tx frame from the bulk
104  * @i: index on the HW queue
105  * @sq: XDPSQ abstraction for the queue
106  * @priv: private data
107  *
108  * Return: XDP Tx descriptor with the synced DMA and other info to pass to
109  * the driver callback.
110  */
111 static inline struct libeth_xdp_tx_desc
libeth_xsk_tx_fill_buf(struct libeth_xdp_tx_frame frm,u32 i,const struct libeth_xdpsq * sq,u64 priv)112 libeth_xsk_tx_fill_buf(struct libeth_xdp_tx_frame frm, u32 i,
113 		       const struct libeth_xdpsq *sq, u64 priv)
114 {
115 	struct libeth_xdp_buff *xdp = frm.xsk;
116 	struct libeth_xdp_tx_desc desc = {
117 		.addr	= xsk_buff_xdp_get_dma(&xdp->base),
118 		.opts	= frm.opts,
119 	};
120 	struct libeth_sqe *sqe;
121 
122 	xsk_buff_raw_dma_sync_for_device(sq->pool, desc.addr, desc.len);
123 
124 	sqe = &sq->sqes[i];
125 	sqe->xsk = xdp;
126 
127 	if (!(desc.flags & LIBETH_XDP_TX_FIRST)) {
128 		sqe->type = LIBETH_SQE_XSK_TX_FRAG;
129 		return desc;
130 	}
131 
132 	sqe->type = LIBETH_SQE_XSK_TX;
133 	libeth_xdp_tx_fill_stats(sqe, &desc,
134 				 xdp_get_shared_info_from_buff(&xdp->base));
135 
136 	return desc;
137 }
138 
139 /**
140  * libeth_xsk_tx_flush_bulk - wrapper to define flush of XSk ``XDP_TX`` bulk
141  * @bq: bulk to flush
142  * @flags: Tx flags, see __libeth_xdp_tx_flush_bulk()
143  * @prep: driver callback to prepare the queue
144  * @xmit: driver callback to fill a HW descriptor
145  *
146  * Use via LIBETH_XSK_DEFINE_FLUSH_TX() to define an XSk ``XDP_TX`` driver
147  * callback.
148  */
149 #define libeth_xsk_tx_flush_bulk(bq, flags, prep, xmit)			     \
150 	__libeth_xdp_tx_flush_bulk(bq, (flags) | LIBETH_XDP_TX_XSK, prep,    \
151 				   libeth_xsk_tx_fill_buf, xmit)
152 
153 /* XSk TMO */
154 
155 /**
156  * libeth_xsktmo_req_csum - XSk Tx metadata op to request checksum offload
157  * @csum_start: unused
158  * @csum_offset: unused
159  * @priv: &libeth_xdp_tx_desc from the filling helper
160  *
161  * Generic implementation of ::tmo_request_checksum. Works only when HW doesn't
162  * require filling checksum offsets and other parameters beside the checksum
163  * request bit.
164  * Consider using within @libeth_xsktmo unless the driver requires HW-specific
165  * callbacks.
166  */
libeth_xsktmo_req_csum(u16 csum_start,u16 csum_offset,void * priv)167 static inline void libeth_xsktmo_req_csum(u16 csum_start, u16 csum_offset,
168 					  void *priv)
169 {
170 	((struct libeth_xdp_tx_desc *)priv)->flags |= LIBETH_XDP_TX_CSUM;
171 }
172 
173 /* Only to inline the callbacks below, use @libeth_xsktmo in drivers instead */
174 static const struct xsk_tx_metadata_ops __libeth_xsktmo = {
175 	.tmo_request_checksum	= libeth_xsktmo_req_csum,
176 };
177 
178 /**
179  * __libeth_xsk_xmit_fill_buf_md - internal helper to prepare XSk xmit w/meta
180  * @xdesc: &xdp_desc from the XSk buffer pool
181  * @sq: XDPSQ abstraction for the queue
182  * @priv: XSk Tx metadata ops
183  *
184  * Same as __libeth_xsk_xmit_fill_buf(), but requests metadata pointer and
185  * fills additional fields in &libeth_xdp_tx_desc to ask for metadata offload.
186  *
187  * Return: XDP Tx descriptor with the DMA, metadata request bits, and other
188  * info to pass to the driver callback.
189  */
190 static __always_inline struct libeth_xdp_tx_desc
__libeth_xsk_xmit_fill_buf_md(const struct xdp_desc * xdesc,const struct libeth_xdpsq * sq,u64 priv)191 __libeth_xsk_xmit_fill_buf_md(const struct xdp_desc *xdesc,
192 			      const struct libeth_xdpsq *sq,
193 			      u64 priv)
194 {
195 	const struct xsk_tx_metadata_ops *tmo = libeth_xdp_priv_to_ptr(priv);
196 	struct libeth_xdp_tx_desc desc;
197 	struct xdp_desc_ctx ctx;
198 
199 	ctx = xsk_buff_raw_get_ctx(sq->pool, xdesc->addr);
200 	desc = (typeof(desc)){
201 		.addr	= ctx.dma,
202 		__libeth_xdp_tx_len(xdesc->len),
203 	};
204 
205 	BUILD_BUG_ON(!__builtin_constant_p(tmo == libeth_xsktmo));
206 	tmo = tmo == libeth_xsktmo ? &__libeth_xsktmo : tmo;
207 
208 	xsk_tx_metadata_request(ctx.meta, tmo, &desc);
209 
210 	return desc;
211 }
212 
213 /* XSk xmit implementation */
214 
215 /**
216  * __libeth_xsk_xmit_fill_buf - internal helper to prepare XSk xmit w/o meta
217  * @xdesc: &xdp_desc from the XSk buffer pool
218  * @sq: XDPSQ abstraction for the queue
219  *
220  * Return: XDP Tx descriptor with the DMA and other info to pass to
221  * the driver callback.
222  */
223 static inline struct libeth_xdp_tx_desc
__libeth_xsk_xmit_fill_buf(const struct xdp_desc * xdesc,const struct libeth_xdpsq * sq)224 __libeth_xsk_xmit_fill_buf(const struct xdp_desc *xdesc,
225 			   const struct libeth_xdpsq *sq)
226 {
227 	return (struct libeth_xdp_tx_desc){
228 		.addr	= xsk_buff_raw_get_dma(sq->pool, xdesc->addr),
229 		__libeth_xdp_tx_len(xdesc->len),
230 	};
231 }
232 
233 /**
234  * libeth_xsk_xmit_fill_buf - internal helper to prepare an XSk xmit
235  * @frm: &xdp_desc from the XSk buffer pool
236  * @i: index on the HW queue
237  * @sq: XDPSQ abstraction for the queue
238  * @priv: XSk Tx metadata ops
239  *
240  * Depending on the metadata ops presence (determined at compile time), calls
241  * the quickest helper to build a libeth XDP Tx descriptor.
242  *
243  * Return: XDP Tx descriptor with the synced DMA, metadata request bits,
244  * and other info to pass to the driver callback.
245  */
246 static __always_inline struct libeth_xdp_tx_desc
libeth_xsk_xmit_fill_buf(struct libeth_xdp_tx_frame frm,u32 i,const struct libeth_xdpsq * sq,u64 priv)247 libeth_xsk_xmit_fill_buf(struct libeth_xdp_tx_frame frm, u32 i,
248 			 const struct libeth_xdpsq *sq, u64 priv)
249 {
250 	struct libeth_xdp_tx_desc desc;
251 
252 	if (priv)
253 		desc = __libeth_xsk_xmit_fill_buf_md(&frm.desc, sq, priv);
254 	else
255 		desc = __libeth_xsk_xmit_fill_buf(&frm.desc, sq);
256 
257 	desc.flags |= xsk_is_eop_desc(&frm.desc) ? LIBETH_XDP_TX_LAST : 0;
258 
259 	xsk_buff_raw_dma_sync_for_device(sq->pool, desc.addr, desc.len);
260 
261 	return desc;
262 }
263 
264 /**
265  * libeth_xsk_xmit_do_bulk - send XSk xmit frames
266  * @pool: XSk buffer pool containing the frames to send
267  * @xdpsq: opaque pointer to driver's XDPSQ struct
268  * @budget: maximum number of frames can be sent
269  * @tmo: optional XSk Tx metadata ops
270  * @prep: driver callback to build a &libeth_xdpsq
271  * @xmit: driver callback to put frames to a HW queue
272  * @finalize: driver callback to start a transmission
273  *
274  * Implements generic XSk xmit. Always turns on XSk Tx wakeup as it's assumed
275  * lazy cleaning is used and interrupts are disabled for the queue.
276  * HW descriptor filling is unrolled by ``LIBETH_XDP_TX_BATCH`` to optimize
277  * writes.
278  * Note that unlike other XDP Tx ops, the queue must be locked and cleaned
279  * prior to calling this function to already know available @budget.
280  * @prepare must only build a &libeth_xdpsq and return ``U32_MAX``.
281  *
282  * Return: false if @budget was exhausted, true otherwise.
283  */
284 static __always_inline bool
libeth_xsk_xmit_do_bulk(struct xsk_buff_pool * pool,void * xdpsq,u32 budget,const struct xsk_tx_metadata_ops * tmo,u32 (* prep)(void * xdpsq,struct libeth_xdpsq * sq),void (* xmit)(struct libeth_xdp_tx_desc desc,u32 i,const struct libeth_xdpsq * sq,u64 priv),void (* finalize)(void * xdpsq,bool sent,bool flush))285 libeth_xsk_xmit_do_bulk(struct xsk_buff_pool *pool, void *xdpsq, u32 budget,
286 			const struct xsk_tx_metadata_ops *tmo,
287 			u32 (*prep)(void *xdpsq, struct libeth_xdpsq *sq),
288 			void (*xmit)(struct libeth_xdp_tx_desc desc, u32 i,
289 				     const struct libeth_xdpsq *sq, u64 priv),
290 			void (*finalize)(void *xdpsq, bool sent, bool flush))
291 {
292 	const struct libeth_xdp_tx_frame *bulk;
293 	bool wake;
294 	u32 n;
295 
296 	wake = xsk_uses_need_wakeup(pool);
297 	if (wake)
298 		xsk_clear_tx_need_wakeup(pool);
299 
300 	n = xsk_tx_peek_release_desc_batch(pool, budget);
301 	bulk = container_of(&pool->tx_descs[0], typeof(*bulk), desc);
302 
303 	libeth_xdp_tx_xmit_bulk(bulk, xdpsq, n, true,
304 				libeth_xdp_ptr_to_priv(tmo), prep,
305 				libeth_xsk_xmit_fill_buf, xmit);
306 	finalize(xdpsq, n, true);
307 
308 	if (wake)
309 		xsk_set_tx_need_wakeup(pool);
310 
311 	return n < budget;
312 }
313 
314 /* Rx polling path */
315 
316 /**
317  * libeth_xsk_tx_init_bulk - initialize XDP Tx bulk for an XSk Rx NAPI poll
318  * @bq: bulk to initialize
319  * @prog: RCU pointer to the XDP program (never %NULL)
320  * @dev: target &net_device
321  * @xdpsqs: array of driver XDPSQ structs
322  * @num: number of active XDPSQs, the above array length
323  *
324  * Should be called on an onstack XDP Tx bulk before the XSk NAPI polling loop.
325  * Initializes all the needed fields to run libeth_xdp functions.
326  * Never checks if @prog is %NULL or @num == 0 as XDP must always be enabled
327  * when hitting this path.
328  */
329 #define libeth_xsk_tx_init_bulk(bq, prog, dev, xdpsqs, num)		     \
330 	__libeth_xdp_tx_init_bulk(bq, prog, dev, xdpsqs, num, true,	     \
331 				  __UNIQUE_ID(bq_), __UNIQUE_ID(nqs_))
332 
333 struct libeth_xdp_buff *libeth_xsk_buff_add_frag(struct libeth_xdp_buff *head,
334 						 struct libeth_xdp_buff *xdp);
335 
336 /**
337  * libeth_xsk_process_buff - attach XSk Rx buffer to &libeth_xdp_buff
338  * @head: head XSk buffer to attach the XSk buffer to (or %NULL)
339  * @xdp: XSk buffer to process
340  * @len: received data length from the descriptor
341  *
342  * If @head == %NULL, treats the XSk buffer as head and initializes
343  * the required fields. Otherwise, attaches the buffer as a frag.
344  * Already performs DMA sync-for-CPU and frame start prefetch
345  * (for head buffers only).
346  *
347  * Return: head XSk buffer on success or if the descriptor must be skipped
348  * (empty), %NULL if there is no space for a new frag.
349  */
350 static inline struct libeth_xdp_buff *
libeth_xsk_process_buff(struct libeth_xdp_buff * head,struct libeth_xdp_buff * xdp,u32 len)351 libeth_xsk_process_buff(struct libeth_xdp_buff *head,
352 			struct libeth_xdp_buff *xdp, u32 len)
353 {
354 	if (unlikely(!len)) {
355 		libeth_xsk_buff_free_slow(xdp);
356 		return head;
357 	}
358 
359 	xsk_buff_set_size(&xdp->base, len);
360 	xsk_buff_dma_sync_for_cpu(&xdp->base);
361 
362 	if (head)
363 		return libeth_xsk_buff_add_frag(head, xdp);
364 
365 	prefetch(xdp->data);
366 
367 	return xdp;
368 }
369 
370 void libeth_xsk_buff_stats_frags(struct libeth_rq_napi_stats *rs,
371 				 const struct libeth_xdp_buff *xdp);
372 
373 u32 __libeth_xsk_run_prog_slow(struct libeth_xdp_buff *xdp,
374 			       const struct libeth_xdp_tx_bulk *bq,
375 			       enum xdp_action act, int ret);
376 
377 /**
378  * __libeth_xsk_run_prog - run XDP program on XSk buffer
379  * @xdp: XSk buffer to run the prog on
380  * @bq: buffer bulk for ``XDP_TX`` queueing
381  *
382  * Internal inline abstraction to run XDP program on XSk Rx path. Handles
383  * only the most common ``XDP_REDIRECT`` inline, the rest is processed
384  * externally.
385  * Reports an XDP prog exception on errors.
386  *
387  * Return: libeth_xdp prog verdict depending on the prog's verdict.
388  */
389 static __always_inline u32
__libeth_xsk_run_prog(struct libeth_xdp_buff * xdp,const struct libeth_xdp_tx_bulk * bq)390 __libeth_xsk_run_prog(struct libeth_xdp_buff *xdp,
391 		      const struct libeth_xdp_tx_bulk *bq)
392 {
393 	enum xdp_action act;
394 	int ret = 0;
395 
396 	act = bpf_prog_run_xdp(bq->prog, &xdp->base);
397 	if (unlikely(act != XDP_REDIRECT))
398 rest:
399 		return __libeth_xsk_run_prog_slow(xdp, bq, act, ret);
400 
401 	ret = xdp_do_redirect(bq->dev, &xdp->base, bq->prog);
402 	if (unlikely(ret))
403 		goto rest;
404 
405 	return LIBETH_XDP_REDIRECT;
406 }
407 
408 /**
409  * libeth_xsk_run_prog - run XDP program on XSk path and handle all verdicts
410  * @xdp: XSk buffer to process
411  * @bq: XDP Tx bulk to queue ``XDP_TX`` buffers
412  * @fl: driver ``XDP_TX`` bulk flush callback
413  *
414  * Run the attached XDP program and handle all possible verdicts.
415  * Prefer using it via LIBETH_XSK_DEFINE_RUN{,_PASS,_PROG}().
416  *
417  * Return: libeth_xdp prog verdict depending on the prog's verdict.
418  */
419 #define libeth_xsk_run_prog(xdp, bq, fl)				     \
420 	__libeth_xdp_run_flush(xdp, bq, __libeth_xsk_run_prog,		     \
421 			       libeth_xsk_tx_queue_bulk, fl)
422 
423 /**
424  * __libeth_xsk_run_pass - helper to run XDP program and handle the result
425  * @xdp: XSk buffer to process
426  * @bq: XDP Tx bulk to queue ``XDP_TX`` frames
427  * @napi: NAPI to build an skb and pass it up the stack
428  * @rs: onstack libeth RQ stats
429  * @md: metadata that should be filled to the XSk buffer
430  * @prep: callback for filling the metadata
431  * @run: driver wrapper to run XDP program
432  * @populate: driver callback to populate an skb with the HW descriptor data
433  *
434  * Inline abstraction, XSk's counterpart of __libeth_xdp_run_pass(), see its
435  * doc for details.
436  *
437  * Return: false if the polling loop must be exited due to lack of free
438  * buffers, true otherwise.
439  */
440 static __always_inline bool
__libeth_xsk_run_pass(struct libeth_xdp_buff * xdp,struct libeth_xdp_tx_bulk * bq,struct napi_struct * napi,struct libeth_rq_napi_stats * rs,const void * md,void (* prep)(struct libeth_xdp_buff * xdp,const void * md),u32 (* run)(struct libeth_xdp_buff * xdp,struct libeth_xdp_tx_bulk * bq),bool (* populate)(struct sk_buff * skb,const struct libeth_xdp_buff * xdp,struct libeth_rq_napi_stats * rs))441 __libeth_xsk_run_pass(struct libeth_xdp_buff *xdp,
442 		      struct libeth_xdp_tx_bulk *bq, struct napi_struct *napi,
443 		      struct libeth_rq_napi_stats *rs, const void *md,
444 		      void (*prep)(struct libeth_xdp_buff *xdp,
445 				   const void *md),
446 		      u32 (*run)(struct libeth_xdp_buff *xdp,
447 				 struct libeth_xdp_tx_bulk *bq),
448 		      bool (*populate)(struct sk_buff *skb,
449 				       const struct libeth_xdp_buff *xdp,
450 				       struct libeth_rq_napi_stats *rs))
451 {
452 	struct sk_buff *skb;
453 	u32 act;
454 
455 	rs->bytes += xdp->base.data_end - xdp->data;
456 	rs->packets++;
457 
458 	if (unlikely(xdp_buff_has_frags(&xdp->base)))
459 		libeth_xsk_buff_stats_frags(rs, xdp);
460 
461 	if (prep && (!__builtin_constant_p(!!md) || md))
462 		prep(xdp, md);
463 
464 	act = run(xdp, bq);
465 	if (likely(act == LIBETH_XDP_REDIRECT))
466 		return true;
467 
468 	if (act != LIBETH_XDP_PASS)
469 		return act != LIBETH_XDP_ABORTED;
470 
471 	skb = xdp_build_skb_from_zc(&xdp->base);
472 	if (unlikely(!skb)) {
473 		libeth_xsk_buff_free_slow(xdp);
474 		return true;
475 	}
476 
477 	if (unlikely(!populate(skb, xdp, rs))) {
478 		napi_consume_skb(skb, true);
479 		return true;
480 	}
481 
482 	napi_gro_receive(napi, skb);
483 
484 	return true;
485 }
486 
487 /**
488  * libeth_xsk_run_pass - helper to run XDP program and handle the result
489  * @xdp: XSk buffer to process
490  * @bq: XDP Tx bulk to queue ``XDP_TX`` frames
491  * @napi: NAPI to build an skb and pass it up the stack
492  * @rs: onstack libeth RQ stats
493  * @desc: pointer to the HW descriptor for that frame
494  * @run: driver wrapper to run XDP program
495  * @populate: driver callback to populate an skb with the HW descriptor data
496  *
497  * Wrapper around the underscored version when "fill the descriptor metadata"
498  * means just writing the pointer to the HW descriptor as @xdp->desc.
499  */
500 #define libeth_xsk_run_pass(xdp, bq, napi, rs, desc, run, populate)	     \
501 	__libeth_xsk_run_pass(xdp, bq, napi, rs, desc, libeth_xdp_prep_desc, \
502 			      run, populate)
503 
504 /**
505  * libeth_xsk_finalize_rx - finalize XDPSQ after an XSk NAPI polling loop
506  * @bq: ``XDP_TX`` frame bulk
507  * @flush: driver callback to flush the bulk
508  * @finalize: driver callback to start sending the frames and run the timer
509  *
510  * Flush the bulk if there are frames left to send, kick the queue and flush
511  * the XDP maps.
512  */
513 #define libeth_xsk_finalize_rx(bq, flush, finalize)			     \
514 	__libeth_xdp_finalize_rx(bq, LIBETH_XDP_TX_XSK, flush, finalize)
515 
516 /*
517  * Helpers to reduce boilerplate code in drivers.
518  *
519  * Typical driver XSk Rx flow would be (excl. bulk and buff init, frag attach):
520  *
521  * LIBETH_XDP_DEFINE_START();
522  * LIBETH_XSK_DEFINE_FLUSH_TX(static driver_xsk_flush_tx, driver_xsk_tx_prep,
523  *			      driver_xdp_xmit);
524  * LIBETH_XSK_DEFINE_RUN(static driver_xsk_run, driver_xsk_run_prog,
525  *			 driver_xsk_flush_tx, driver_populate_skb);
526  * LIBETH_XSK_DEFINE_FINALIZE(static driver_xsk_finalize_rx,
527  *			      driver_xsk_flush_tx, driver_xdp_finalize_sq);
528  * LIBETH_XDP_DEFINE_END();
529  *
530  * This will build a set of 4 static functions. The compiler is free to decide
531  * whether to inline them.
532  * Then, in the NAPI polling function:
533  *
534  *	while (packets < budget) {
535  *		// ...
536  *		if (!driver_xsk_run(xdp, &bq, napi, &rs, desc))
537  *			break;
538  *	}
539  *	driver_xsk_finalize_rx(&bq);
540  */
541 
542 /**
543  * LIBETH_XSK_DEFINE_FLUSH_TX - define a driver XSk ``XDP_TX`` flush function
544  * @name: name of the function to define
545  * @prep: driver callback to clean an XDPSQ
546  * @xmit: driver callback to write a HW Tx descriptor
547  */
548 #define LIBETH_XSK_DEFINE_FLUSH_TX(name, prep, xmit)			     \
549 	__LIBETH_XDP_DEFINE_FLUSH_TX(name, prep, xmit, xsk)
550 
551 /**
552  * LIBETH_XSK_DEFINE_RUN_PROG - define a driver XDP program run function
553  * @name: name of the function to define
554  * @flush: driver callback to flush an XSk ``XDP_TX`` bulk
555  */
556 #define LIBETH_XSK_DEFINE_RUN_PROG(name, flush)				     \
557 	u32 __LIBETH_XDP_DEFINE_RUN_PROG(name, flush, xsk)
558 
559 /**
560  * LIBETH_XSK_DEFINE_RUN_PASS - define a driver buffer process + pass function
561  * @name: name of the function to define
562  * @run: driver callback to run XDP program (above)
563  * @populate: driver callback to fill an skb with HW descriptor info
564  */
565 #define LIBETH_XSK_DEFINE_RUN_PASS(name, run, populate)			     \
566 	bool __LIBETH_XDP_DEFINE_RUN_PASS(name, run, populate, xsk)
567 
568 /**
569  * LIBETH_XSK_DEFINE_RUN - define a driver buffer process, run + pass function
570  * @name: name of the function to define
571  * @run: name of the XDP prog run function to define
572  * @flush: driver callback to flush an XSk ``XDP_TX`` bulk
573  * @populate: driver callback to fill an skb with HW descriptor info
574  */
575 #define LIBETH_XSK_DEFINE_RUN(name, run, flush, populate)		     \
576 	__LIBETH_XDP_DEFINE_RUN(name, run, flush, populate, XSK)
577 
578 /**
579  * LIBETH_XSK_DEFINE_FINALIZE - define a driver XSk NAPI poll finalize function
580  * @name: name of the function to define
581  * @flush: driver callback to flush an XSk ``XDP_TX`` bulk
582  * @finalize: driver callback to finalize an XDPSQ and run the timer
583  */
584 #define LIBETH_XSK_DEFINE_FINALIZE(name, flush, finalize)		     \
585 	__LIBETH_XDP_DEFINE_FINALIZE(name, flush, finalize, xsk)
586 
587 /* Refilling */
588 
589 /**
590  * struct libeth_xskfq - structure representing an XSk buffer (fill) queue
591  * @fp: hotpath part of the structure
592  * @pool: &xsk_buff_pool for buffer management
593  * @fqes: array of XSk buffer pointers
594  * @descs: opaque pointer to the HW descriptor array
595  * @ntu: index of the next buffer to poll
596  * @count: number of descriptors/buffers the queue has
597  * @pending: current number of XSkFQEs to refill
598  * @thresh: threshold below which the queue is refilled
599  * @buf_len: HW-writeable length per each buffer
600  * @nid: ID of the closest NUMA node with memory
601  */
602 struct libeth_xskfq {
603 	struct_group_tagged(libeth_xskfq_fp, fp,
604 		struct xsk_buff_pool	*pool;
605 		struct libeth_xdp_buff	**fqes;
606 		void			*descs;
607 
608 		u32			ntu;
609 		u32			count;
610 	);
611 
612 	/* Cold fields */
613 	u32			pending;
614 	u32			thresh;
615 
616 	u32			buf_len;
617 	int			nid;
618 };
619 
620 int libeth_xskfq_create(struct libeth_xskfq *fq);
621 void libeth_xskfq_destroy(struct libeth_xskfq *fq);
622 
623 /**
624  * libeth_xsk_buff_xdp_get_dma - get DMA address of XSk &libeth_xdp_buff
625  * @xdp: buffer to get the DMA addr for
626  */
627 #define libeth_xsk_buff_xdp_get_dma(xdp)				     \
628 	xsk_buff_xdp_get_dma(&(xdp)->base)
629 
630 /**
631  * libeth_xskfqe_alloc - allocate @n XSk Rx buffers
632  * @fq: hotpath part of the XSkFQ, usually onstack
633  * @n: number of buffers to allocate
634  * @fill: driver callback to write DMA addresses to HW descriptors
635  *
636  * Note that @fq->ntu gets updated, but ::pending must be recalculated
637  * by the caller.
638  *
639  * Return: number of buffers refilled.
640  */
641 static __always_inline u32
libeth_xskfqe_alloc(struct libeth_xskfq_fp * fq,u32 n,void (* fill)(const struct libeth_xskfq_fp * fq,u32 i))642 libeth_xskfqe_alloc(struct libeth_xskfq_fp *fq, u32 n,
643 		    void (*fill)(const struct libeth_xskfq_fp *fq, u32 i))
644 {
645 	u32 this, ret, done = 0;
646 	struct xdp_buff **xskb;
647 
648 	this = fq->count - fq->ntu;
649 	if (likely(this > n))
650 		this = n;
651 
652 again:
653 	xskb = (typeof(xskb))&fq->fqes[fq->ntu];
654 	ret = xsk_buff_alloc_batch(fq->pool, xskb, this);
655 
656 	for (u32 i = 0, ntu = fq->ntu; likely(i < ret); i++)
657 		fill(fq, ntu + i);
658 
659 	done += ret;
660 	fq->ntu += ret;
661 
662 	if (likely(fq->ntu < fq->count) || unlikely(ret < this))
663 		goto out;
664 
665 	fq->ntu = 0;
666 
667 	if (this < n) {
668 		this = n - this;
669 		goto again;
670 	}
671 
672 out:
673 	return done;
674 }
675 
676 /* .ndo_xsk_wakeup */
677 
678 void libeth_xsk_init_wakeup(call_single_data_t *csd, struct napi_struct *napi);
679 void libeth_xsk_wakeup(call_single_data_t *csd, u32 qid);
680 
681 /* Pool setup */
682 
683 int libeth_xsk_setup_pool(struct net_device *dev, u32 qid, bool enable);
684 
685 #endif /* __LIBETH_XSK_H */
686