xref: /linux/drivers/net/ethernet/intel/i40e/i40e_xsk.c (revision 385f186aba3d2f7122b71d6d4c7e236b9d4e8003)
1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright(c) 2018 Intel Corporation. */
3 
4 #include <linux/bpf_trace.h>
5 #include <net/xdp_sock_drv.h>
6 #include "i40e_txrx_common.h"
7 #include "i40e_xsk.h"
8 
9 void i40e_clear_rx_bi_zc(struct i40e_ring *rx_ring)
10 {
11 	memset(rx_ring->rx_bi_zc, 0,
12 	       sizeof(*rx_ring->rx_bi_zc) * rx_ring->count);
13 }
14 
15 static struct xdp_buff **i40e_rx_bi(struct i40e_ring *rx_ring, u32 idx)
16 {
17 	return &rx_ring->rx_bi_zc[idx];
18 }
19 
20 /**
21  * i40e_realloc_rx_xdp_bi - reallocate SW ring for either XSK or normal buffer
22  * @rx_ring: Current rx ring
23  * @pool_present: is pool for XSK present
24  *
25  * Try allocating memory and return ENOMEM, if failed to allocate.
26  * If allocation was successful, substitute buffer with allocated one.
27  * Returns 0 on success, negative on failure
28  */
29 static int i40e_realloc_rx_xdp_bi(struct i40e_ring *rx_ring, bool pool_present)
30 {
31 	size_t elem_size = pool_present ? sizeof(*rx_ring->rx_bi_zc) :
32 					  sizeof(*rx_ring->rx_bi);
33 	void *sw_ring = kcalloc(rx_ring->count, elem_size, GFP_KERNEL);
34 
35 	if (!sw_ring)
36 		return -ENOMEM;
37 
38 	if (pool_present) {
39 		kfree(rx_ring->rx_bi);
40 		rx_ring->rx_bi = NULL;
41 		rx_ring->rx_bi_zc = sw_ring;
42 	} else {
43 		kfree(rx_ring->rx_bi_zc);
44 		rx_ring->rx_bi_zc = NULL;
45 		rx_ring->rx_bi = sw_ring;
46 	}
47 	return 0;
48 }
49 
50 /**
51  * i40e_realloc_rx_bi_zc - reallocate rx SW rings
52  * @vsi: Current VSI
53  * @zc: is zero copy set
54  *
55  * Reallocate buffer for rx_rings that might be used by XSK.
56  * XDP requires more memory, than rx_buf provides.
57  * Returns 0 on success, negative on failure
58  */
59 int i40e_realloc_rx_bi_zc(struct i40e_vsi *vsi, bool zc)
60 {
61 	struct i40e_ring *rx_ring;
62 	unsigned long q;
63 
64 	for_each_set_bit(q, vsi->af_xdp_zc_qps, vsi->alloc_queue_pairs) {
65 		rx_ring = vsi->rx_rings[q];
66 		if (i40e_realloc_rx_xdp_bi(rx_ring, zc))
67 			return -ENOMEM;
68 	}
69 	return 0;
70 }
71 
72 /**
73  * i40e_xsk_pool_enable - Enable/associate an AF_XDP buffer pool to a
74  * certain ring/qid
75  * @vsi: Current VSI
76  * @pool: buffer pool
77  * @qid: Rx ring to associate buffer pool with
78  *
79  * Returns 0 on success, <0 on failure
80  **/
81 static int i40e_xsk_pool_enable(struct i40e_vsi *vsi,
82 				struct xsk_buff_pool *pool,
83 				u16 qid)
84 {
85 	struct net_device *netdev = vsi->netdev;
86 	bool if_running;
87 	int err;
88 
89 	if (vsi->type != I40E_VSI_MAIN)
90 		return -EINVAL;
91 
92 	if (qid >= vsi->num_queue_pairs)
93 		return -EINVAL;
94 
95 	if (qid >= netdev->real_num_rx_queues ||
96 	    qid >= netdev->real_num_tx_queues)
97 		return -EINVAL;
98 
99 	err = xsk_pool_dma_map(pool, &vsi->back->pdev->dev, I40E_RX_DMA_ATTR);
100 	if (err)
101 		return err;
102 
103 	set_bit(qid, vsi->af_xdp_zc_qps);
104 
105 	if_running = netif_running(vsi->netdev) && i40e_enabled_xdp_vsi(vsi);
106 
107 	if (if_running) {
108 		err = i40e_queue_pair_disable(vsi, qid);
109 		if (err)
110 			return err;
111 
112 		err = i40e_realloc_rx_xdp_bi(vsi->rx_rings[qid], true);
113 		if (err)
114 			return err;
115 
116 		err = i40e_queue_pair_enable(vsi, qid);
117 		if (err)
118 			return err;
119 
120 		/* Kick start the NAPI context so that receiving will start */
121 		err = i40e_xsk_wakeup(vsi->netdev, qid, XDP_WAKEUP_RX);
122 		if (err)
123 			return err;
124 	}
125 
126 	return 0;
127 }
128 
129 /**
130  * i40e_xsk_pool_disable - Disassociate an AF_XDP buffer pool from a
131  * certain ring/qid
132  * @vsi: Current VSI
133  * @qid: Rx ring to associate buffer pool with
134  *
135  * Returns 0 on success, <0 on failure
136  **/
137 static int i40e_xsk_pool_disable(struct i40e_vsi *vsi, u16 qid)
138 {
139 	struct net_device *netdev = vsi->netdev;
140 	struct xsk_buff_pool *pool;
141 	bool if_running;
142 	int err;
143 
144 	pool = xsk_get_pool_from_qid(netdev, qid);
145 	if (!pool)
146 		return -EINVAL;
147 
148 	if_running = netif_running(vsi->netdev) && i40e_enabled_xdp_vsi(vsi);
149 
150 	if (if_running) {
151 		err = i40e_queue_pair_disable(vsi, qid);
152 		if (err)
153 			return err;
154 	}
155 
156 	clear_bit(qid, vsi->af_xdp_zc_qps);
157 	xsk_pool_dma_unmap(pool, I40E_RX_DMA_ATTR);
158 
159 	if (if_running) {
160 		err = i40e_realloc_rx_xdp_bi(vsi->rx_rings[qid], false);
161 		if (err)
162 			return err;
163 		err = i40e_queue_pair_enable(vsi, qid);
164 		if (err)
165 			return err;
166 	}
167 
168 	return 0;
169 }
170 
171 /**
172  * i40e_xsk_pool_setup - Enable/disassociate an AF_XDP buffer pool to/from
173  * a ring/qid
174  * @vsi: Current VSI
175  * @pool: Buffer pool to enable/associate to a ring, or NULL to disable
176  * @qid: Rx ring to (dis)associate buffer pool (from)to
177  *
178  * This function enables or disables a buffer pool to a certain ring.
179  *
180  * Returns 0 on success, <0 on failure
181  **/
182 int i40e_xsk_pool_setup(struct i40e_vsi *vsi, struct xsk_buff_pool *pool,
183 			u16 qid)
184 {
185 	return pool ? i40e_xsk_pool_enable(vsi, pool, qid) :
186 		i40e_xsk_pool_disable(vsi, qid);
187 }
188 
189 /**
190  * i40e_run_xdp_zc - Executes an XDP program on an xdp_buff
191  * @rx_ring: Rx ring
192  * @xdp: xdp_buff used as input to the XDP program
193  * @xdp_prog: XDP program to run
194  *
195  * Returns any of I40E_XDP_{PASS, CONSUMED, TX, REDIR}
196  **/
197 static int i40e_run_xdp_zc(struct i40e_ring *rx_ring, struct xdp_buff *xdp,
198 			   struct bpf_prog *xdp_prog)
199 {
200 	int err, result = I40E_XDP_PASS;
201 	struct i40e_ring *xdp_ring;
202 	u32 act;
203 
204 	act = bpf_prog_run_xdp(xdp_prog, xdp);
205 
206 	if (likely(act == XDP_REDIRECT)) {
207 		err = xdp_do_redirect(rx_ring->netdev, xdp, xdp_prog);
208 		if (!err)
209 			return I40E_XDP_REDIR;
210 		if (xsk_uses_need_wakeup(rx_ring->xsk_pool) && err == -ENOBUFS)
211 			result = I40E_XDP_EXIT;
212 		else
213 			result = I40E_XDP_CONSUMED;
214 		goto out_failure;
215 	}
216 
217 	switch (act) {
218 	case XDP_PASS:
219 		break;
220 	case XDP_TX:
221 		xdp_ring = rx_ring->vsi->xdp_rings[rx_ring->queue_index];
222 		result = i40e_xmit_xdp_tx_ring(xdp, xdp_ring);
223 		if (result == I40E_XDP_CONSUMED)
224 			goto out_failure;
225 		break;
226 	case XDP_DROP:
227 		result = I40E_XDP_CONSUMED;
228 		break;
229 	default:
230 		bpf_warn_invalid_xdp_action(rx_ring->netdev, xdp_prog, act);
231 		fallthrough;
232 	case XDP_ABORTED:
233 		result = I40E_XDP_CONSUMED;
234 out_failure:
235 		trace_xdp_exception(rx_ring->netdev, xdp_prog, act);
236 	}
237 	return result;
238 }
239 
240 bool i40e_alloc_rx_buffers_zc(struct i40e_ring *rx_ring, u16 count)
241 {
242 	u16 ntu = rx_ring->next_to_use;
243 	union i40e_rx_desc *rx_desc;
244 	struct xdp_buff **xdp;
245 	u32 nb_buffs, i;
246 	dma_addr_t dma;
247 
248 	rx_desc = I40E_RX_DESC(rx_ring, ntu);
249 	xdp = i40e_rx_bi(rx_ring, ntu);
250 
251 	nb_buffs = min_t(u16, count, rx_ring->count - ntu);
252 	nb_buffs = xsk_buff_alloc_batch(rx_ring->xsk_pool, xdp, nb_buffs);
253 	if (!nb_buffs)
254 		return false;
255 
256 	i = nb_buffs;
257 	while (i--) {
258 		dma = xsk_buff_xdp_get_dma(*xdp);
259 		rx_desc->read.pkt_addr = cpu_to_le64(dma);
260 		rx_desc->read.hdr_addr = 0;
261 
262 		rx_desc++;
263 		xdp++;
264 	}
265 
266 	ntu += nb_buffs;
267 	if (ntu == rx_ring->count) {
268 		rx_desc = I40E_RX_DESC(rx_ring, 0);
269 		ntu = 0;
270 	}
271 
272 	/* clear the status bits for the next_to_use descriptor */
273 	rx_desc->wb.qword1.status_error_len = 0;
274 	i40e_release_rx_desc(rx_ring, ntu);
275 
276 	return count == nb_buffs;
277 }
278 
279 /**
280  * i40e_construct_skb_zc - Create skbuff from zero-copy Rx buffer
281  * @rx_ring: Rx ring
282  * @xdp: xdp_buff
283  *
284  * This functions allocates a new skb from a zero-copy Rx buffer.
285  *
286  * Returns the skb, or NULL on failure.
287  **/
288 static struct sk_buff *i40e_construct_skb_zc(struct i40e_ring *rx_ring,
289 					     struct xdp_buff *xdp)
290 {
291 	unsigned int totalsize = xdp->data_end - xdp->data_meta;
292 	unsigned int metasize = xdp->data - xdp->data_meta;
293 	struct skb_shared_info *sinfo = NULL;
294 	struct sk_buff *skb;
295 	u32 nr_frags = 0;
296 
297 	if (unlikely(xdp_buff_has_frags(xdp))) {
298 		sinfo = xdp_get_shared_info_from_buff(xdp);
299 		nr_frags = sinfo->nr_frags;
300 	}
301 	net_prefetch(xdp->data_meta);
302 
303 	/* allocate a skb to store the frags */
304 	skb = napi_alloc_skb(&rx_ring->q_vector->napi, totalsize);
305 	if (unlikely(!skb))
306 		goto out;
307 
308 	memcpy(__skb_put(skb, totalsize), xdp->data_meta,
309 	       ALIGN(totalsize, sizeof(long)));
310 
311 	if (metasize) {
312 		skb_metadata_set(skb, metasize);
313 		__skb_pull(skb, metasize);
314 	}
315 
316 	if (likely(!xdp_buff_has_frags(xdp)))
317 		goto out;
318 
319 	for (int i = 0; i < nr_frags; i++) {
320 		struct skb_shared_info *skinfo = skb_shinfo(skb);
321 		skb_frag_t *frag = &sinfo->frags[i];
322 		struct page *page;
323 		void *addr;
324 
325 		page = dev_alloc_page();
326 		if (!page) {
327 			dev_kfree_skb(skb);
328 			return NULL;
329 		}
330 		addr = page_to_virt(page);
331 
332 		memcpy(addr, skb_frag_page(frag), skb_frag_size(frag));
333 
334 		__skb_fill_page_desc_noacc(skinfo, skinfo->nr_frags++,
335 					   addr, 0, skb_frag_size(frag));
336 	}
337 
338 out:
339 	xsk_buff_free(xdp);
340 	return skb;
341 }
342 
343 static void i40e_handle_xdp_result_zc(struct i40e_ring *rx_ring,
344 				      struct xdp_buff *xdp_buff,
345 				      union i40e_rx_desc *rx_desc,
346 				      unsigned int *rx_packets,
347 				      unsigned int *rx_bytes,
348 				      unsigned int xdp_res,
349 				      bool *failure)
350 {
351 	struct sk_buff *skb;
352 
353 	*rx_packets = 1;
354 	*rx_bytes = xdp_get_buff_len(xdp_buff);
355 
356 	if (likely(xdp_res == I40E_XDP_REDIR) || xdp_res == I40E_XDP_TX)
357 		return;
358 
359 	if (xdp_res == I40E_XDP_EXIT) {
360 		*failure = true;
361 		return;
362 	}
363 
364 	if (xdp_res == I40E_XDP_CONSUMED) {
365 		xsk_buff_free(xdp_buff);
366 		return;
367 	}
368 	if (xdp_res == I40E_XDP_PASS) {
369 		/* NB! We are not checking for errors using
370 		 * i40e_test_staterr with
371 		 * BIT(I40E_RXD_QW1_ERROR_SHIFT). This is due to that
372 		 * SBP is *not* set in PRT_SBPVSI (default not set).
373 		 */
374 		skb = i40e_construct_skb_zc(rx_ring, xdp_buff);
375 		if (!skb) {
376 			rx_ring->rx_stats.alloc_buff_failed++;
377 			*rx_packets = 0;
378 			*rx_bytes = 0;
379 			return;
380 		}
381 
382 		if (eth_skb_pad(skb)) {
383 			*rx_packets = 0;
384 			*rx_bytes = 0;
385 			return;
386 		}
387 
388 		i40e_process_skb_fields(rx_ring, rx_desc, skb);
389 		napi_gro_receive(&rx_ring->q_vector->napi, skb);
390 		return;
391 	}
392 
393 	/* Should never get here, as all valid cases have been handled already.
394 	 */
395 	WARN_ON_ONCE(1);
396 }
397 
398 /**
399  * i40e_clean_rx_irq_zc - Consumes Rx packets from the hardware ring
400  * @rx_ring: Rx ring
401  * @budget: NAPI budget
402  *
403  * Returns amount of work completed
404  **/
405 int i40e_clean_rx_irq_zc(struct i40e_ring *rx_ring, int budget)
406 {
407 	unsigned int total_rx_bytes = 0, total_rx_packets = 0;
408 	u16 next_to_process = rx_ring->next_to_process;
409 	u16 next_to_clean = rx_ring->next_to_clean;
410 	unsigned int xdp_res, xdp_xmit = 0;
411 	struct xdp_buff *first = NULL;
412 	u32 count = rx_ring->count;
413 	struct bpf_prog *xdp_prog;
414 	u32 entries_to_alloc;
415 	bool failure = false;
416 
417 	if (next_to_process != next_to_clean)
418 		first = *i40e_rx_bi(rx_ring, next_to_clean);
419 
420 	/* NB! xdp_prog will always be !NULL, due to the fact that
421 	 * this path is enabled by setting an XDP program.
422 	 */
423 	xdp_prog = READ_ONCE(rx_ring->xdp_prog);
424 
425 	while (likely(total_rx_packets < (unsigned int)budget)) {
426 		union i40e_rx_desc *rx_desc;
427 		unsigned int rx_packets;
428 		unsigned int rx_bytes;
429 		struct xdp_buff *bi;
430 		unsigned int size;
431 		u64 qword;
432 
433 		rx_desc = I40E_RX_DESC(rx_ring, next_to_process);
434 		qword = le64_to_cpu(rx_desc->wb.qword1.status_error_len);
435 
436 		/* This memory barrier is needed to keep us from reading
437 		 * any other fields out of the rx_desc until we have
438 		 * verified the descriptor has been written back.
439 		 */
440 		dma_rmb();
441 
442 		if (i40e_rx_is_programming_status(qword)) {
443 			i40e_clean_programming_status(rx_ring,
444 						      rx_desc->raw.qword[0],
445 						      qword);
446 			bi = *i40e_rx_bi(rx_ring, next_to_process);
447 			xsk_buff_free(bi);
448 			if (++next_to_process == count)
449 				next_to_process = 0;
450 			continue;
451 		}
452 
453 		size = FIELD_GET(I40E_RXD_QW1_LENGTH_PBUF_MASK, qword);
454 		if (!size)
455 			break;
456 
457 		bi = *i40e_rx_bi(rx_ring, next_to_process);
458 		xsk_buff_set_size(bi, size);
459 		xsk_buff_dma_sync_for_cpu(bi);
460 
461 		if (!first)
462 			first = bi;
463 		else if (!xsk_buff_add_frag(first, bi)) {
464 			xsk_buff_free(first);
465 			break;
466 		}
467 
468 		if (++next_to_process == count)
469 			next_to_process = 0;
470 
471 		if (i40e_is_non_eop(rx_ring, rx_desc))
472 			continue;
473 
474 		xdp_res = i40e_run_xdp_zc(rx_ring, first, xdp_prog);
475 		i40e_handle_xdp_result_zc(rx_ring, first, rx_desc, &rx_packets,
476 					  &rx_bytes, xdp_res, &failure);
477 		next_to_clean = next_to_process;
478 		if (failure)
479 			break;
480 		total_rx_packets += rx_packets;
481 		total_rx_bytes += rx_bytes;
482 		xdp_xmit |= xdp_res & (I40E_XDP_TX | I40E_XDP_REDIR);
483 		first = NULL;
484 	}
485 
486 	rx_ring->next_to_clean = next_to_clean;
487 	rx_ring->next_to_process = next_to_process;
488 
489 	entries_to_alloc = I40E_DESC_UNUSED(rx_ring);
490 	if (entries_to_alloc >= I40E_RX_BUFFER_WRITE)
491 		failure |= !i40e_alloc_rx_buffers_zc(rx_ring, entries_to_alloc);
492 
493 	i40e_finalize_xdp_rx(rx_ring, xdp_xmit);
494 	i40e_update_rx_stats(rx_ring, total_rx_bytes, total_rx_packets);
495 
496 	if (xsk_uses_need_wakeup(rx_ring->xsk_pool)) {
497 		if (failure || next_to_clean == rx_ring->next_to_use)
498 			xsk_set_rx_need_wakeup(rx_ring->xsk_pool);
499 		else
500 			xsk_clear_rx_need_wakeup(rx_ring->xsk_pool);
501 
502 		return (int)total_rx_packets;
503 	}
504 	return failure ? budget : (int)total_rx_packets;
505 }
506 
507 static void i40e_xmit_pkt(struct i40e_ring *xdp_ring, struct xdp_desc *desc,
508 			  unsigned int *total_bytes)
509 {
510 	u32 cmd = I40E_TX_DESC_CMD_ICRC | xsk_is_eop_desc(desc);
511 	struct i40e_tx_desc *tx_desc;
512 	dma_addr_t dma;
513 
514 	dma = xsk_buff_raw_get_dma(xdp_ring->xsk_pool, desc->addr);
515 	xsk_buff_raw_dma_sync_for_device(xdp_ring->xsk_pool, dma, desc->len);
516 
517 	tx_desc = I40E_TX_DESC(xdp_ring, xdp_ring->next_to_use++);
518 	tx_desc->buffer_addr = cpu_to_le64(dma);
519 	tx_desc->cmd_type_offset_bsz = build_ctob(cmd, 0, desc->len, 0);
520 
521 	*total_bytes += desc->len;
522 }
523 
524 static void i40e_xmit_pkt_batch(struct i40e_ring *xdp_ring, struct xdp_desc *desc,
525 				unsigned int *total_bytes)
526 {
527 	u16 ntu = xdp_ring->next_to_use;
528 	struct i40e_tx_desc *tx_desc;
529 	dma_addr_t dma;
530 	u32 i;
531 
532 	loop_unrolled_for(i = 0; i < PKTS_PER_BATCH; i++) {
533 		u32 cmd = I40E_TX_DESC_CMD_ICRC | xsk_is_eop_desc(&desc[i]);
534 
535 		dma = xsk_buff_raw_get_dma(xdp_ring->xsk_pool, desc[i].addr);
536 		xsk_buff_raw_dma_sync_for_device(xdp_ring->xsk_pool, dma, desc[i].len);
537 
538 		tx_desc = I40E_TX_DESC(xdp_ring, ntu++);
539 		tx_desc->buffer_addr = cpu_to_le64(dma);
540 		tx_desc->cmd_type_offset_bsz = build_ctob(cmd, 0, desc[i].len, 0);
541 
542 		*total_bytes += desc[i].len;
543 	}
544 
545 	xdp_ring->next_to_use = ntu;
546 }
547 
548 static void i40e_fill_tx_hw_ring(struct i40e_ring *xdp_ring, struct xdp_desc *descs, u32 nb_pkts,
549 				 unsigned int *total_bytes)
550 {
551 	u32 batched, leftover, i;
552 
553 	batched = nb_pkts & ~(PKTS_PER_BATCH - 1);
554 	leftover = nb_pkts & (PKTS_PER_BATCH - 1);
555 	for (i = 0; i < batched; i += PKTS_PER_BATCH)
556 		i40e_xmit_pkt_batch(xdp_ring, &descs[i], total_bytes);
557 	for (i = batched; i < batched + leftover; i++)
558 		i40e_xmit_pkt(xdp_ring, &descs[i], total_bytes);
559 }
560 
561 static void i40e_set_rs_bit(struct i40e_ring *xdp_ring)
562 {
563 	u16 ntu = xdp_ring->next_to_use ? xdp_ring->next_to_use - 1 : xdp_ring->count - 1;
564 	struct i40e_tx_desc *tx_desc;
565 
566 	tx_desc = I40E_TX_DESC(xdp_ring, ntu);
567 	tx_desc->cmd_type_offset_bsz |= cpu_to_le64(I40E_TX_DESC_CMD_RS << I40E_TXD_QW1_CMD_SHIFT);
568 }
569 
570 /**
571  * i40e_xmit_zc - Performs zero-copy Tx AF_XDP
572  * @xdp_ring: XDP Tx ring
573  * @budget: NAPI budget
574  *
575  * Returns true if the work is finished.
576  **/
577 static bool i40e_xmit_zc(struct i40e_ring *xdp_ring, unsigned int budget)
578 {
579 	struct xdp_desc *descs = xdp_ring->xsk_pool->tx_descs;
580 	u32 nb_pkts, nb_processed = 0;
581 	unsigned int total_bytes = 0;
582 
583 	nb_pkts = xsk_tx_peek_release_desc_batch(xdp_ring->xsk_pool, budget);
584 	if (!nb_pkts)
585 		return true;
586 
587 	if (xdp_ring->next_to_use + nb_pkts >= xdp_ring->count) {
588 		nb_processed = xdp_ring->count - xdp_ring->next_to_use;
589 		i40e_fill_tx_hw_ring(xdp_ring, descs, nb_processed, &total_bytes);
590 		xdp_ring->next_to_use = 0;
591 	}
592 
593 	i40e_fill_tx_hw_ring(xdp_ring, &descs[nb_processed], nb_pkts - nb_processed,
594 			     &total_bytes);
595 
596 	/* Request an interrupt for the last frame and bump tail ptr. */
597 	i40e_set_rs_bit(xdp_ring);
598 	i40e_xdp_ring_update_tail(xdp_ring);
599 
600 	i40e_update_tx_stats(xdp_ring, nb_pkts, total_bytes);
601 
602 	return nb_pkts < budget;
603 }
604 
605 /**
606  * i40e_clean_xdp_tx_buffer - Frees and unmaps an XDP Tx entry
607  * @tx_ring: XDP Tx ring
608  * @tx_bi: Tx buffer info to clean
609  **/
610 static void i40e_clean_xdp_tx_buffer(struct i40e_ring *tx_ring,
611 				     struct i40e_tx_buffer *tx_bi)
612 {
613 	xdp_return_frame(tx_bi->xdpf);
614 	tx_ring->xdp_tx_active--;
615 	dma_unmap_single(tx_ring->dev,
616 			 dma_unmap_addr(tx_bi, dma),
617 			 dma_unmap_len(tx_bi, len), DMA_TO_DEVICE);
618 	dma_unmap_len_set(tx_bi, len, 0);
619 }
620 
621 /**
622  * i40e_clean_xdp_tx_irq - Completes AF_XDP entries, and cleans XDP entries
623  * @vsi: Current VSI
624  * @tx_ring: XDP Tx ring
625  *
626  * Returns true if cleanup/transmission is done.
627  **/
628 bool i40e_clean_xdp_tx_irq(struct i40e_vsi *vsi, struct i40e_ring *tx_ring)
629 {
630 	struct xsk_buff_pool *bp = tx_ring->xsk_pool;
631 	u32 i, completed_frames, xsk_frames = 0;
632 	u32 head_idx = i40e_get_head(tx_ring);
633 	struct i40e_tx_buffer *tx_bi;
634 	unsigned int ntc;
635 
636 	if (head_idx < tx_ring->next_to_clean)
637 		head_idx += tx_ring->count;
638 	completed_frames = head_idx - tx_ring->next_to_clean;
639 
640 	if (completed_frames == 0)
641 		goto out_xmit;
642 
643 	if (likely(!tx_ring->xdp_tx_active)) {
644 		xsk_frames = completed_frames;
645 		goto skip;
646 	}
647 
648 	ntc = tx_ring->next_to_clean;
649 
650 	for (i = 0; i < completed_frames; i++) {
651 		tx_bi = &tx_ring->tx_bi[ntc];
652 
653 		if (tx_bi->xdpf) {
654 			i40e_clean_xdp_tx_buffer(tx_ring, tx_bi);
655 			tx_bi->xdpf = NULL;
656 		} else {
657 			xsk_frames++;
658 		}
659 
660 		if (++ntc >= tx_ring->count)
661 			ntc = 0;
662 	}
663 
664 skip:
665 	tx_ring->next_to_clean += completed_frames;
666 	if (unlikely(tx_ring->next_to_clean >= tx_ring->count))
667 		tx_ring->next_to_clean -= tx_ring->count;
668 
669 	if (xsk_frames)
670 		xsk_tx_completed(bp, xsk_frames);
671 
672 	i40e_arm_wb(tx_ring, vsi, completed_frames);
673 
674 out_xmit:
675 	if (xsk_uses_need_wakeup(tx_ring->xsk_pool))
676 		xsk_set_tx_need_wakeup(tx_ring->xsk_pool);
677 
678 	return i40e_xmit_zc(tx_ring, I40E_DESC_UNUSED(tx_ring));
679 }
680 
681 /**
682  * i40e_xsk_wakeup - Implements the ndo_xsk_wakeup
683  * @dev: the netdevice
684  * @queue_id: queue id to wake up
685  * @flags: ignored in our case since we have Rx and Tx in the same NAPI.
686  *
687  * Returns <0 for errors, 0 otherwise.
688  **/
689 int i40e_xsk_wakeup(struct net_device *dev, u32 queue_id, u32 flags)
690 {
691 	struct i40e_netdev_priv *np = netdev_priv(dev);
692 	struct i40e_vsi *vsi = np->vsi;
693 	struct i40e_pf *pf = vsi->back;
694 	struct i40e_ring *ring;
695 
696 	if (test_bit(__I40E_CONFIG_BUSY, pf->state))
697 		return -EAGAIN;
698 
699 	if (test_bit(__I40E_VSI_DOWN, vsi->state))
700 		return -ENETDOWN;
701 
702 	if (!i40e_enabled_xdp_vsi(vsi))
703 		return -EINVAL;
704 
705 	if (queue_id >= vsi->num_queue_pairs)
706 		return -EINVAL;
707 
708 	if (!vsi->xdp_rings[queue_id]->xsk_pool)
709 		return -EINVAL;
710 
711 	ring = vsi->xdp_rings[queue_id];
712 
713 	/* The idea here is that if NAPI is running, mark a miss, so
714 	 * it will run again. If not, trigger an interrupt and
715 	 * schedule the NAPI from interrupt context. If NAPI would be
716 	 * scheduled here, the interrupt affinity would not be
717 	 * honored.
718 	 */
719 	if (!napi_if_scheduled_mark_missed(&ring->q_vector->napi))
720 		i40e_force_wb(vsi, ring->q_vector);
721 
722 	return 0;
723 }
724 
725 void i40e_xsk_clean_rx_ring(struct i40e_ring *rx_ring)
726 {
727 	u16 ntc = rx_ring->next_to_clean;
728 	u16 ntu = rx_ring->next_to_use;
729 
730 	while (ntc != ntu) {
731 		struct xdp_buff *rx_bi = *i40e_rx_bi(rx_ring, ntc);
732 
733 		xsk_buff_free(rx_bi);
734 		ntc++;
735 		if (ntc >= rx_ring->count)
736 			ntc = 0;
737 	}
738 }
739 
740 /**
741  * i40e_xsk_clean_tx_ring - Clean the XDP Tx ring on shutdown
742  * @tx_ring: XDP Tx ring
743  **/
744 void i40e_xsk_clean_tx_ring(struct i40e_ring *tx_ring)
745 {
746 	u16 ntc = tx_ring->next_to_clean, ntu = tx_ring->next_to_use;
747 	struct xsk_buff_pool *bp = tx_ring->xsk_pool;
748 	struct i40e_tx_buffer *tx_bi;
749 	u32 xsk_frames = 0;
750 
751 	while (ntc != ntu) {
752 		tx_bi = &tx_ring->tx_bi[ntc];
753 
754 		if (tx_bi->xdpf)
755 			i40e_clean_xdp_tx_buffer(tx_ring, tx_bi);
756 		else
757 			xsk_frames++;
758 
759 		tx_bi->xdpf = NULL;
760 
761 		ntc++;
762 		if (ntc >= tx_ring->count)
763 			ntc = 0;
764 	}
765 
766 	if (xsk_frames)
767 		xsk_tx_completed(bp, xsk_frames);
768 }
769 
770 /**
771  * i40e_xsk_any_rx_ring_enabled - Checks if Rx rings have an AF_XDP
772  * buffer pool attached
773  * @vsi: vsi
774  *
775  * Returns true if any of the Rx rings has an AF_XDP buffer pool attached
776  **/
777 bool i40e_xsk_any_rx_ring_enabled(struct i40e_vsi *vsi)
778 {
779 	struct net_device *netdev = vsi->netdev;
780 	int i;
781 
782 	for (i = 0; i < vsi->num_queue_pairs; i++) {
783 		if (xsk_get_pool_from_qid(netdev, i))
784 			return true;
785 	}
786 
787 	return false;
788 }
789