xref: /linux/drivers/net/ethernet/intel/ice/ice_xsk.c (revision 79997eda0d31bc68203c95ecb978773ee6ce7a1f)
1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (c) 2019, Intel Corporation. */
3 
4 #include <linux/bpf_trace.h>
5 #include <net/xdp_sock_drv.h>
6 #include <net/xdp.h>
7 #include "ice.h"
8 #include "ice_base.h"
9 #include "ice_type.h"
10 #include "ice_xsk.h"
11 #include "ice_txrx.h"
12 #include "ice_txrx_lib.h"
13 #include "ice_lib.h"
14 
15 static struct xdp_buff **ice_xdp_buf(struct ice_rx_ring *rx_ring, u32 idx)
16 {
17 	return &rx_ring->xdp_buf[idx];
18 }
19 
20 /**
21  * ice_qp_reset_stats - Resets all stats for rings of given index
22  * @vsi: VSI that contains rings of interest
23  * @q_idx: ring index in array
24  */
25 static void ice_qp_reset_stats(struct ice_vsi *vsi, u16 q_idx)
26 {
27 	struct ice_vsi_stats *vsi_stat;
28 	struct ice_pf *pf;
29 
30 	pf = vsi->back;
31 	if (!pf->vsi_stats)
32 		return;
33 
34 	vsi_stat = pf->vsi_stats[vsi->idx];
35 	if (!vsi_stat)
36 		return;
37 
38 	memset(&vsi_stat->rx_ring_stats[q_idx]->rx_stats, 0,
39 	       sizeof(vsi_stat->rx_ring_stats[q_idx]->rx_stats));
40 	memset(&vsi_stat->tx_ring_stats[q_idx]->stats, 0,
41 	       sizeof(vsi_stat->tx_ring_stats[q_idx]->stats));
42 	if (ice_is_xdp_ena_vsi(vsi))
43 		memset(&vsi->xdp_rings[q_idx]->ring_stats->stats, 0,
44 		       sizeof(vsi->xdp_rings[q_idx]->ring_stats->stats));
45 }
46 
47 /**
48  * ice_qp_clean_rings - Cleans all the rings of a given index
49  * @vsi: VSI that contains rings of interest
50  * @q_idx: ring index in array
51  */
52 static void ice_qp_clean_rings(struct ice_vsi *vsi, u16 q_idx)
53 {
54 	ice_clean_tx_ring(vsi->tx_rings[q_idx]);
55 	if (ice_is_xdp_ena_vsi(vsi)) {
56 		synchronize_rcu();
57 		ice_clean_tx_ring(vsi->xdp_rings[q_idx]);
58 	}
59 	ice_clean_rx_ring(vsi->rx_rings[q_idx]);
60 }
61 
62 /**
63  * ice_qvec_toggle_napi - Enables/disables NAPI for a given q_vector
64  * @vsi: VSI that has netdev
65  * @q_vector: q_vector that has NAPI context
66  * @enable: true for enable, false for disable
67  */
68 static void
69 ice_qvec_toggle_napi(struct ice_vsi *vsi, struct ice_q_vector *q_vector,
70 		     bool enable)
71 {
72 	if (!vsi->netdev || !q_vector)
73 		return;
74 
75 	if (enable)
76 		napi_enable(&q_vector->napi);
77 	else
78 		napi_disable(&q_vector->napi);
79 }
80 
81 /**
82  * ice_qvec_dis_irq - Mask off queue interrupt generation on given ring
83  * @vsi: the VSI that contains queue vector being un-configured
84  * @rx_ring: Rx ring that will have its IRQ disabled
85  * @q_vector: queue vector
86  */
87 static void
88 ice_qvec_dis_irq(struct ice_vsi *vsi, struct ice_rx_ring *rx_ring,
89 		 struct ice_q_vector *q_vector)
90 {
91 	struct ice_pf *pf = vsi->back;
92 	struct ice_hw *hw = &pf->hw;
93 	u16 reg;
94 	u32 val;
95 
96 	/* QINT_TQCTL is being cleared in ice_vsi_stop_tx_ring, so handle
97 	 * here only QINT_RQCTL
98 	 */
99 	reg = rx_ring->reg_idx;
100 	val = rd32(hw, QINT_RQCTL(reg));
101 	val &= ~QINT_RQCTL_CAUSE_ENA_M;
102 	wr32(hw, QINT_RQCTL(reg), val);
103 
104 	if (q_vector) {
105 		wr32(hw, GLINT_DYN_CTL(q_vector->reg_idx), 0);
106 		ice_flush(hw);
107 		synchronize_irq(q_vector->irq.virq);
108 	}
109 }
110 
111 /**
112  * ice_qvec_cfg_msix - Enable IRQ for given queue vector
113  * @vsi: the VSI that contains queue vector
114  * @q_vector: queue vector
115  */
116 static void
117 ice_qvec_cfg_msix(struct ice_vsi *vsi, struct ice_q_vector *q_vector)
118 {
119 	u16 reg_idx = q_vector->reg_idx;
120 	struct ice_pf *pf = vsi->back;
121 	struct ice_hw *hw = &pf->hw;
122 	struct ice_tx_ring *tx_ring;
123 	struct ice_rx_ring *rx_ring;
124 
125 	ice_cfg_itr(hw, q_vector);
126 
127 	ice_for_each_tx_ring(tx_ring, q_vector->tx)
128 		ice_cfg_txq_interrupt(vsi, tx_ring->reg_idx, reg_idx,
129 				      q_vector->tx.itr_idx);
130 
131 	ice_for_each_rx_ring(rx_ring, q_vector->rx)
132 		ice_cfg_rxq_interrupt(vsi, rx_ring->reg_idx, reg_idx,
133 				      q_vector->rx.itr_idx);
134 
135 	ice_flush(hw);
136 }
137 
138 /**
139  * ice_qvec_ena_irq - Enable IRQ for given queue vector
140  * @vsi: the VSI that contains queue vector
141  * @q_vector: queue vector
142  */
143 static void ice_qvec_ena_irq(struct ice_vsi *vsi, struct ice_q_vector *q_vector)
144 {
145 	struct ice_pf *pf = vsi->back;
146 	struct ice_hw *hw = &pf->hw;
147 
148 	ice_irq_dynamic_ena(hw, vsi, q_vector);
149 
150 	ice_flush(hw);
151 }
152 
153 /**
154  * ice_qp_dis - Disables a queue pair
155  * @vsi: VSI of interest
156  * @q_idx: ring index in array
157  *
158  * Returns 0 on success, negative on failure.
159  */
160 static int ice_qp_dis(struct ice_vsi *vsi, u16 q_idx)
161 {
162 	struct ice_txq_meta txq_meta = { };
163 	struct ice_q_vector *q_vector;
164 	struct ice_tx_ring *tx_ring;
165 	struct ice_rx_ring *rx_ring;
166 	int timeout = 50;
167 	int err;
168 
169 	if (q_idx >= vsi->num_rxq || q_idx >= vsi->num_txq)
170 		return -EINVAL;
171 
172 	tx_ring = vsi->tx_rings[q_idx];
173 	rx_ring = vsi->rx_rings[q_idx];
174 	q_vector = rx_ring->q_vector;
175 
176 	while (test_and_set_bit(ICE_CFG_BUSY, vsi->state)) {
177 		timeout--;
178 		if (!timeout)
179 			return -EBUSY;
180 		usleep_range(1000, 2000);
181 	}
182 	netif_tx_stop_queue(netdev_get_tx_queue(vsi->netdev, q_idx));
183 
184 	ice_fill_txq_meta(vsi, tx_ring, &txq_meta);
185 	err = ice_vsi_stop_tx_ring(vsi, ICE_NO_RESET, 0, tx_ring, &txq_meta);
186 	if (err)
187 		return err;
188 	if (ice_is_xdp_ena_vsi(vsi)) {
189 		struct ice_tx_ring *xdp_ring = vsi->xdp_rings[q_idx];
190 
191 		memset(&txq_meta, 0, sizeof(txq_meta));
192 		ice_fill_txq_meta(vsi, xdp_ring, &txq_meta);
193 		err = ice_vsi_stop_tx_ring(vsi, ICE_NO_RESET, 0, xdp_ring,
194 					   &txq_meta);
195 		if (err)
196 			return err;
197 	}
198 	ice_qvec_dis_irq(vsi, rx_ring, q_vector);
199 
200 	err = ice_vsi_ctrl_one_rx_ring(vsi, false, q_idx, true);
201 	if (err)
202 		return err;
203 
204 	ice_qvec_toggle_napi(vsi, q_vector, false);
205 	ice_qp_clean_rings(vsi, q_idx);
206 	ice_qp_reset_stats(vsi, q_idx);
207 
208 	return 0;
209 }
210 
211 /**
212  * ice_qp_ena - Enables a queue pair
213  * @vsi: VSI of interest
214  * @q_idx: ring index in array
215  *
216  * Returns 0 on success, negative on failure.
217  */
218 static int ice_qp_ena(struct ice_vsi *vsi, u16 q_idx)
219 {
220 	DEFINE_FLEX(struct ice_aqc_add_tx_qgrp, qg_buf, txqs, 1);
221 	u16 size = __struct_size(qg_buf);
222 	struct ice_q_vector *q_vector;
223 	struct ice_tx_ring *tx_ring;
224 	struct ice_rx_ring *rx_ring;
225 	int err;
226 
227 	if (q_idx >= vsi->num_rxq || q_idx >= vsi->num_txq)
228 		return -EINVAL;
229 
230 	qg_buf->num_txqs = 1;
231 
232 	tx_ring = vsi->tx_rings[q_idx];
233 	rx_ring = vsi->rx_rings[q_idx];
234 	q_vector = rx_ring->q_vector;
235 
236 	err = ice_vsi_cfg_txq(vsi, tx_ring, qg_buf);
237 	if (err)
238 		return err;
239 
240 	if (ice_is_xdp_ena_vsi(vsi)) {
241 		struct ice_tx_ring *xdp_ring = vsi->xdp_rings[q_idx];
242 
243 		memset(qg_buf, 0, size);
244 		qg_buf->num_txqs = 1;
245 		err = ice_vsi_cfg_txq(vsi, xdp_ring, qg_buf);
246 		if (err)
247 			return err;
248 		ice_set_ring_xdp(xdp_ring);
249 		ice_tx_xsk_pool(vsi, q_idx);
250 	}
251 
252 	err = ice_vsi_cfg_rxq(rx_ring);
253 	if (err)
254 		return err;
255 
256 	ice_qvec_cfg_msix(vsi, q_vector);
257 
258 	err = ice_vsi_ctrl_one_rx_ring(vsi, true, q_idx, true);
259 	if (err)
260 		return err;
261 
262 	clear_bit(ICE_CFG_BUSY, vsi->state);
263 	ice_qvec_toggle_napi(vsi, q_vector, true);
264 	ice_qvec_ena_irq(vsi, q_vector);
265 
266 	netif_tx_start_queue(netdev_get_tx_queue(vsi->netdev, q_idx));
267 
268 	return 0;
269 }
270 
271 /**
272  * ice_xsk_pool_disable - disable a buffer pool region
273  * @vsi: Current VSI
274  * @qid: queue ID
275  *
276  * Returns 0 on success, negative on failure
277  */
278 static int ice_xsk_pool_disable(struct ice_vsi *vsi, u16 qid)
279 {
280 	struct xsk_buff_pool *pool = xsk_get_pool_from_qid(vsi->netdev, qid);
281 
282 	if (!pool)
283 		return -EINVAL;
284 
285 	clear_bit(qid, vsi->af_xdp_zc_qps);
286 	xsk_pool_dma_unmap(pool, ICE_RX_DMA_ATTR);
287 
288 	return 0;
289 }
290 
291 /**
292  * ice_xsk_pool_enable - enable a buffer pool region
293  * @vsi: Current VSI
294  * @pool: pointer to a requested buffer pool region
295  * @qid: queue ID
296  *
297  * Returns 0 on success, negative on failure
298  */
299 static int
300 ice_xsk_pool_enable(struct ice_vsi *vsi, struct xsk_buff_pool *pool, u16 qid)
301 {
302 	int err;
303 
304 	if (vsi->type != ICE_VSI_PF)
305 		return -EINVAL;
306 
307 	if (qid >= vsi->netdev->real_num_rx_queues ||
308 	    qid >= vsi->netdev->real_num_tx_queues)
309 		return -EINVAL;
310 
311 	err = xsk_pool_dma_map(pool, ice_pf_to_dev(vsi->back),
312 			       ICE_RX_DMA_ATTR);
313 	if (err)
314 		return err;
315 
316 	set_bit(qid, vsi->af_xdp_zc_qps);
317 
318 	return 0;
319 }
320 
321 /**
322  * ice_realloc_rx_xdp_bufs - reallocate for either XSK or normal buffer
323  * @rx_ring: Rx ring
324  * @pool_present: is pool for XSK present
325  *
326  * Try allocating memory and return ENOMEM, if failed to allocate.
327  * If allocation was successful, substitute buffer with allocated one.
328  * Returns 0 on success, negative on failure
329  */
330 static int
331 ice_realloc_rx_xdp_bufs(struct ice_rx_ring *rx_ring, bool pool_present)
332 {
333 	size_t elem_size = pool_present ? sizeof(*rx_ring->xdp_buf) :
334 					  sizeof(*rx_ring->rx_buf);
335 	void *sw_ring = kcalloc(rx_ring->count, elem_size, GFP_KERNEL);
336 
337 	if (!sw_ring)
338 		return -ENOMEM;
339 
340 	if (pool_present) {
341 		kfree(rx_ring->rx_buf);
342 		rx_ring->rx_buf = NULL;
343 		rx_ring->xdp_buf = sw_ring;
344 	} else {
345 		kfree(rx_ring->xdp_buf);
346 		rx_ring->xdp_buf = NULL;
347 		rx_ring->rx_buf = sw_ring;
348 	}
349 
350 	return 0;
351 }
352 
353 /**
354  * ice_realloc_zc_buf - reallocate XDP ZC queue pairs
355  * @vsi: Current VSI
356  * @zc: is zero copy set
357  *
358  * Reallocate buffer for rx_rings that might be used by XSK.
359  * XDP requires more memory, than rx_buf provides.
360  * Returns 0 on success, negative on failure
361  */
362 int ice_realloc_zc_buf(struct ice_vsi *vsi, bool zc)
363 {
364 	struct ice_rx_ring *rx_ring;
365 	unsigned long q;
366 
367 	for_each_set_bit(q, vsi->af_xdp_zc_qps,
368 			 max_t(int, vsi->alloc_txq, vsi->alloc_rxq)) {
369 		rx_ring = vsi->rx_rings[q];
370 		if (ice_realloc_rx_xdp_bufs(rx_ring, zc))
371 			return -ENOMEM;
372 	}
373 
374 	return 0;
375 }
376 
377 /**
378  * ice_xsk_pool_setup - enable/disable a buffer pool region depending on its state
379  * @vsi: Current VSI
380  * @pool: buffer pool to enable/associate to a ring, NULL to disable
381  * @qid: queue ID
382  *
383  * Returns 0 on success, negative on failure
384  */
385 int ice_xsk_pool_setup(struct ice_vsi *vsi, struct xsk_buff_pool *pool, u16 qid)
386 {
387 	bool if_running, pool_present = !!pool;
388 	int ret = 0, pool_failure = 0;
389 
390 	if (qid >= vsi->num_rxq || qid >= vsi->num_txq) {
391 		netdev_err(vsi->netdev, "Please use queue id in scope of combined queues count\n");
392 		pool_failure = -EINVAL;
393 		goto failure;
394 	}
395 
396 	if_running = netif_running(vsi->netdev) && ice_is_xdp_ena_vsi(vsi);
397 
398 	if (if_running) {
399 		struct ice_rx_ring *rx_ring = vsi->rx_rings[qid];
400 
401 		ret = ice_qp_dis(vsi, qid);
402 		if (ret) {
403 			netdev_err(vsi->netdev, "ice_qp_dis error = %d\n", ret);
404 			goto xsk_pool_if_up;
405 		}
406 
407 		ret = ice_realloc_rx_xdp_bufs(rx_ring, pool_present);
408 		if (ret)
409 			goto xsk_pool_if_up;
410 	}
411 
412 	pool_failure = pool_present ? ice_xsk_pool_enable(vsi, pool, qid) :
413 				      ice_xsk_pool_disable(vsi, qid);
414 
415 xsk_pool_if_up:
416 	if (if_running) {
417 		ret = ice_qp_ena(vsi, qid);
418 		if (!ret && pool_present)
419 			napi_schedule(&vsi->rx_rings[qid]->xdp_ring->q_vector->napi);
420 		else if (ret)
421 			netdev_err(vsi->netdev, "ice_qp_ena error = %d\n", ret);
422 	}
423 
424 failure:
425 	if (pool_failure) {
426 		netdev_err(vsi->netdev, "Could not %sable buffer pool, error = %d\n",
427 			   pool_present ? "en" : "dis", pool_failure);
428 		return pool_failure;
429 	}
430 
431 	return ret;
432 }
433 
434 /**
435  * ice_fill_rx_descs - pick buffers from XSK buffer pool and use it
436  * @pool: XSK Buffer pool to pull the buffers from
437  * @xdp: SW ring of xdp_buff that will hold the buffers
438  * @rx_desc: Pointer to Rx descriptors that will be filled
439  * @count: The number of buffers to allocate
440  *
441  * This function allocates a number of Rx buffers from the fill ring
442  * or the internal recycle mechanism and places them on the Rx ring.
443  *
444  * Note that ring wrap should be handled by caller of this function.
445  *
446  * Returns the amount of allocated Rx descriptors
447  */
448 static u16 ice_fill_rx_descs(struct xsk_buff_pool *pool, struct xdp_buff **xdp,
449 			     union ice_32b_rx_flex_desc *rx_desc, u16 count)
450 {
451 	dma_addr_t dma;
452 	u16 buffs;
453 	int i;
454 
455 	buffs = xsk_buff_alloc_batch(pool, xdp, count);
456 	for (i = 0; i < buffs; i++) {
457 		dma = xsk_buff_xdp_get_dma(*xdp);
458 		rx_desc->read.pkt_addr = cpu_to_le64(dma);
459 		rx_desc->wb.status_error0 = 0;
460 
461 		rx_desc++;
462 		xdp++;
463 	}
464 
465 	return buffs;
466 }
467 
468 /**
469  * __ice_alloc_rx_bufs_zc - allocate a number of Rx buffers
470  * @rx_ring: Rx ring
471  * @count: The number of buffers to allocate
472  *
473  * Place the @count of descriptors onto Rx ring. Handle the ring wrap
474  * for case where space from next_to_use up to the end of ring is less
475  * than @count. Finally do a tail bump.
476  *
477  * Returns true if all allocations were successful, false if any fail.
478  */
479 static bool __ice_alloc_rx_bufs_zc(struct ice_rx_ring *rx_ring, u16 count)
480 {
481 	u32 nb_buffs_extra = 0, nb_buffs = 0;
482 	union ice_32b_rx_flex_desc *rx_desc;
483 	u16 ntu = rx_ring->next_to_use;
484 	u16 total_count = count;
485 	struct xdp_buff **xdp;
486 
487 	rx_desc = ICE_RX_DESC(rx_ring, ntu);
488 	xdp = ice_xdp_buf(rx_ring, ntu);
489 
490 	if (ntu + count >= rx_ring->count) {
491 		nb_buffs_extra = ice_fill_rx_descs(rx_ring->xsk_pool, xdp,
492 						   rx_desc,
493 						   rx_ring->count - ntu);
494 		if (nb_buffs_extra != rx_ring->count - ntu) {
495 			ntu += nb_buffs_extra;
496 			goto exit;
497 		}
498 		rx_desc = ICE_RX_DESC(rx_ring, 0);
499 		xdp = ice_xdp_buf(rx_ring, 0);
500 		ntu = 0;
501 		count -= nb_buffs_extra;
502 		ice_release_rx_desc(rx_ring, 0);
503 	}
504 
505 	nb_buffs = ice_fill_rx_descs(rx_ring->xsk_pool, xdp, rx_desc, count);
506 
507 	ntu += nb_buffs;
508 	if (ntu == rx_ring->count)
509 		ntu = 0;
510 
511 exit:
512 	if (rx_ring->next_to_use != ntu)
513 		ice_release_rx_desc(rx_ring, ntu);
514 
515 	return total_count == (nb_buffs_extra + nb_buffs);
516 }
517 
518 /**
519  * ice_alloc_rx_bufs_zc - allocate a number of Rx buffers
520  * @rx_ring: Rx ring
521  * @count: The number of buffers to allocate
522  *
523  * Wrapper for internal allocation routine; figure out how many tail
524  * bumps should take place based on the given threshold
525  *
526  * Returns true if all calls to internal alloc routine succeeded
527  */
528 bool ice_alloc_rx_bufs_zc(struct ice_rx_ring *rx_ring, u16 count)
529 {
530 	u16 rx_thresh = ICE_RING_QUARTER(rx_ring);
531 	u16 leftover, i, tail_bumps;
532 
533 	tail_bumps = count / rx_thresh;
534 	leftover = count - (tail_bumps * rx_thresh);
535 
536 	for (i = 0; i < tail_bumps; i++)
537 		if (!__ice_alloc_rx_bufs_zc(rx_ring, rx_thresh))
538 			return false;
539 	return __ice_alloc_rx_bufs_zc(rx_ring, leftover);
540 }
541 
542 /**
543  * ice_construct_skb_zc - Create an sk_buff from zero-copy buffer
544  * @rx_ring: Rx ring
545  * @xdp: Pointer to XDP buffer
546  *
547  * This function allocates a new skb from a zero-copy Rx buffer.
548  *
549  * Returns the skb on success, NULL on failure.
550  */
551 static struct sk_buff *
552 ice_construct_skb_zc(struct ice_rx_ring *rx_ring, struct xdp_buff *xdp)
553 {
554 	unsigned int totalsize = xdp->data_end - xdp->data_meta;
555 	unsigned int metasize = xdp->data - xdp->data_meta;
556 	struct skb_shared_info *sinfo = NULL;
557 	struct sk_buff *skb;
558 	u32 nr_frags = 0;
559 
560 	if (unlikely(xdp_buff_has_frags(xdp))) {
561 		sinfo = xdp_get_shared_info_from_buff(xdp);
562 		nr_frags = sinfo->nr_frags;
563 	}
564 	net_prefetch(xdp->data_meta);
565 
566 	skb = __napi_alloc_skb(&rx_ring->q_vector->napi, totalsize,
567 			       GFP_ATOMIC | __GFP_NOWARN);
568 	if (unlikely(!skb))
569 		return NULL;
570 
571 	memcpy(__skb_put(skb, totalsize), xdp->data_meta,
572 	       ALIGN(totalsize, sizeof(long)));
573 
574 	if (metasize) {
575 		skb_metadata_set(skb, metasize);
576 		__skb_pull(skb, metasize);
577 	}
578 
579 	if (likely(!xdp_buff_has_frags(xdp)))
580 		goto out;
581 
582 	for (int i = 0; i < nr_frags; i++) {
583 		struct skb_shared_info *skinfo = skb_shinfo(skb);
584 		skb_frag_t *frag = &sinfo->frags[i];
585 		struct page *page;
586 		void *addr;
587 
588 		page = dev_alloc_page();
589 		if (!page) {
590 			dev_kfree_skb(skb);
591 			return NULL;
592 		}
593 		addr = page_to_virt(page);
594 
595 		memcpy(addr, skb_frag_page(frag), skb_frag_size(frag));
596 
597 		__skb_fill_page_desc_noacc(skinfo, skinfo->nr_frags++,
598 					   addr, 0, skb_frag_size(frag));
599 	}
600 
601 out:
602 	xsk_buff_free(xdp);
603 	return skb;
604 }
605 
606 /**
607  * ice_clean_xdp_irq_zc - produce AF_XDP descriptors to CQ
608  * @xdp_ring: XDP Tx ring
609  */
610 static u32 ice_clean_xdp_irq_zc(struct ice_tx_ring *xdp_ring)
611 {
612 	u16 ntc = xdp_ring->next_to_clean;
613 	struct ice_tx_desc *tx_desc;
614 	u16 cnt = xdp_ring->count;
615 	struct ice_tx_buf *tx_buf;
616 	u16 completed_frames = 0;
617 	u16 xsk_frames = 0;
618 	u16 last_rs;
619 	int i;
620 
621 	last_rs = xdp_ring->next_to_use ? xdp_ring->next_to_use - 1 : cnt - 1;
622 	tx_desc = ICE_TX_DESC(xdp_ring, last_rs);
623 	if (tx_desc->cmd_type_offset_bsz &
624 	    cpu_to_le64(ICE_TX_DESC_DTYPE_DESC_DONE)) {
625 		if (last_rs >= ntc)
626 			completed_frames = last_rs - ntc + 1;
627 		else
628 			completed_frames = last_rs + cnt - ntc + 1;
629 	}
630 
631 	if (!completed_frames)
632 		return 0;
633 
634 	if (likely(!xdp_ring->xdp_tx_active)) {
635 		xsk_frames = completed_frames;
636 		goto skip;
637 	}
638 
639 	ntc = xdp_ring->next_to_clean;
640 	for (i = 0; i < completed_frames; i++) {
641 		tx_buf = &xdp_ring->tx_buf[ntc];
642 
643 		if (tx_buf->type == ICE_TX_BUF_XSK_TX) {
644 			tx_buf->type = ICE_TX_BUF_EMPTY;
645 			xsk_buff_free(tx_buf->xdp);
646 			xdp_ring->xdp_tx_active--;
647 		} else {
648 			xsk_frames++;
649 		}
650 
651 		ntc++;
652 		if (ntc >= xdp_ring->count)
653 			ntc = 0;
654 	}
655 skip:
656 	tx_desc->cmd_type_offset_bsz = 0;
657 	xdp_ring->next_to_clean += completed_frames;
658 	if (xdp_ring->next_to_clean >= cnt)
659 		xdp_ring->next_to_clean -= cnt;
660 	if (xsk_frames)
661 		xsk_tx_completed(xdp_ring->xsk_pool, xsk_frames);
662 
663 	return completed_frames;
664 }
665 
666 /**
667  * ice_xmit_xdp_tx_zc - AF_XDP ZC handler for XDP_TX
668  * @xdp: XDP buffer to xmit
669  * @xdp_ring: XDP ring to produce descriptor onto
670  *
671  * note that this function works directly on xdp_buff, no need to convert
672  * it to xdp_frame. xdp_buff pointer is stored to ice_tx_buf so that cleaning
673  * side will be able to xsk_buff_free() it.
674  *
675  * Returns ICE_XDP_TX for successfully produced desc, ICE_XDP_CONSUMED if there
676  * was not enough space on XDP ring
677  */
678 static int ice_xmit_xdp_tx_zc(struct xdp_buff *xdp,
679 			      struct ice_tx_ring *xdp_ring)
680 {
681 	struct skb_shared_info *sinfo = NULL;
682 	u32 size = xdp->data_end - xdp->data;
683 	u32 ntu = xdp_ring->next_to_use;
684 	struct ice_tx_desc *tx_desc;
685 	struct ice_tx_buf *tx_buf;
686 	struct xdp_buff *head;
687 	u32 nr_frags = 0;
688 	u32 free_space;
689 	u32 frag = 0;
690 
691 	free_space = ICE_DESC_UNUSED(xdp_ring);
692 	if (free_space < ICE_RING_QUARTER(xdp_ring))
693 		free_space += ice_clean_xdp_irq_zc(xdp_ring);
694 
695 	if (unlikely(!free_space))
696 		goto busy;
697 
698 	if (unlikely(xdp_buff_has_frags(xdp))) {
699 		sinfo = xdp_get_shared_info_from_buff(xdp);
700 		nr_frags = sinfo->nr_frags;
701 		if (free_space < nr_frags + 1)
702 			goto busy;
703 	}
704 
705 	tx_desc = ICE_TX_DESC(xdp_ring, ntu);
706 	tx_buf = &xdp_ring->tx_buf[ntu];
707 	head = xdp;
708 
709 	for (;;) {
710 		dma_addr_t dma;
711 
712 		dma = xsk_buff_xdp_get_dma(xdp);
713 		xsk_buff_raw_dma_sync_for_device(xdp_ring->xsk_pool, dma, size);
714 
715 		tx_buf->xdp = xdp;
716 		tx_buf->type = ICE_TX_BUF_XSK_TX;
717 		tx_desc->buf_addr = cpu_to_le64(dma);
718 		tx_desc->cmd_type_offset_bsz = ice_build_ctob(0, 0, size, 0);
719 		/* account for each xdp_buff from xsk_buff_pool */
720 		xdp_ring->xdp_tx_active++;
721 
722 		if (++ntu == xdp_ring->count)
723 			ntu = 0;
724 
725 		if (frag == nr_frags)
726 			break;
727 
728 		tx_desc = ICE_TX_DESC(xdp_ring, ntu);
729 		tx_buf = &xdp_ring->tx_buf[ntu];
730 
731 		xdp = xsk_buff_get_frag(head);
732 		size = skb_frag_size(&sinfo->frags[frag]);
733 		frag++;
734 	}
735 
736 	xdp_ring->next_to_use = ntu;
737 	/* update last descriptor from a frame with EOP */
738 	tx_desc->cmd_type_offset_bsz |=
739 		cpu_to_le64(ICE_TX_DESC_CMD_EOP << ICE_TXD_QW1_CMD_S);
740 
741 	return ICE_XDP_TX;
742 
743 busy:
744 	xdp_ring->ring_stats->tx_stats.tx_busy++;
745 
746 	return ICE_XDP_CONSUMED;
747 }
748 
749 /**
750  * ice_run_xdp_zc - Executes an XDP program in zero-copy path
751  * @rx_ring: Rx ring
752  * @xdp: xdp_buff used as input to the XDP program
753  * @xdp_prog: XDP program to run
754  * @xdp_ring: ring to be used for XDP_TX action
755  *
756  * Returns any of ICE_XDP_{PASS, CONSUMED, TX, REDIR}
757  */
758 static int
759 ice_run_xdp_zc(struct ice_rx_ring *rx_ring, struct xdp_buff *xdp,
760 	       struct bpf_prog *xdp_prog, struct ice_tx_ring *xdp_ring)
761 {
762 	int err, result = ICE_XDP_PASS;
763 	u32 act;
764 
765 	act = bpf_prog_run_xdp(xdp_prog, xdp);
766 
767 	if (likely(act == XDP_REDIRECT)) {
768 		err = xdp_do_redirect(rx_ring->netdev, xdp, xdp_prog);
769 		if (!err)
770 			return ICE_XDP_REDIR;
771 		if (xsk_uses_need_wakeup(rx_ring->xsk_pool) && err == -ENOBUFS)
772 			result = ICE_XDP_EXIT;
773 		else
774 			result = ICE_XDP_CONSUMED;
775 		goto out_failure;
776 	}
777 
778 	switch (act) {
779 	case XDP_PASS:
780 		break;
781 	case XDP_TX:
782 		result = ice_xmit_xdp_tx_zc(xdp, xdp_ring);
783 		if (result == ICE_XDP_CONSUMED)
784 			goto out_failure;
785 		break;
786 	case XDP_DROP:
787 		result = ICE_XDP_CONSUMED;
788 		break;
789 	default:
790 		bpf_warn_invalid_xdp_action(rx_ring->netdev, xdp_prog, act);
791 		fallthrough;
792 	case XDP_ABORTED:
793 		result = ICE_XDP_CONSUMED;
794 out_failure:
795 		trace_xdp_exception(rx_ring->netdev, xdp_prog, act);
796 		break;
797 	}
798 
799 	return result;
800 }
801 
802 static int
803 ice_add_xsk_frag(struct ice_rx_ring *rx_ring, struct xdp_buff *first,
804 		 struct xdp_buff *xdp, const unsigned int size)
805 {
806 	struct skb_shared_info *sinfo = xdp_get_shared_info_from_buff(first);
807 
808 	if (!size)
809 		return 0;
810 
811 	if (!xdp_buff_has_frags(first)) {
812 		sinfo->nr_frags = 0;
813 		sinfo->xdp_frags_size = 0;
814 		xdp_buff_set_frags_flag(first);
815 	}
816 
817 	if (unlikely(sinfo->nr_frags == MAX_SKB_FRAGS)) {
818 		xsk_buff_free(first);
819 		return -ENOMEM;
820 	}
821 
822 	__skb_fill_page_desc_noacc(sinfo, sinfo->nr_frags++,
823 				   virt_to_page(xdp->data_hard_start), 0, size);
824 	sinfo->xdp_frags_size += size;
825 	xsk_buff_add_frag(xdp);
826 
827 	return 0;
828 }
829 
830 /**
831  * ice_clean_rx_irq_zc - consumes packets from the hardware ring
832  * @rx_ring: AF_XDP Rx ring
833  * @budget: NAPI budget
834  *
835  * Returns number of processed packets on success, remaining budget on failure.
836  */
837 int ice_clean_rx_irq_zc(struct ice_rx_ring *rx_ring, int budget)
838 {
839 	unsigned int total_rx_bytes = 0, total_rx_packets = 0;
840 	struct xsk_buff_pool *xsk_pool = rx_ring->xsk_pool;
841 	u32 ntc = rx_ring->next_to_clean;
842 	u32 ntu = rx_ring->next_to_use;
843 	struct xdp_buff *first = NULL;
844 	struct ice_tx_ring *xdp_ring;
845 	unsigned int xdp_xmit = 0;
846 	struct bpf_prog *xdp_prog;
847 	u32 cnt = rx_ring->count;
848 	bool failure = false;
849 	int entries_to_alloc;
850 
851 	/* ZC patch is enabled only when XDP program is set,
852 	 * so here it can not be NULL
853 	 */
854 	xdp_prog = READ_ONCE(rx_ring->xdp_prog);
855 	xdp_ring = rx_ring->xdp_ring;
856 
857 	if (ntc != rx_ring->first_desc)
858 		first = *ice_xdp_buf(rx_ring, rx_ring->first_desc);
859 
860 	while (likely(total_rx_packets < (unsigned int)budget)) {
861 		union ice_32b_rx_flex_desc *rx_desc;
862 		unsigned int size, xdp_res = 0;
863 		struct xdp_buff *xdp;
864 		struct sk_buff *skb;
865 		u16 stat_err_bits;
866 		u16 vlan_tag = 0;
867 		u16 rx_ptype;
868 
869 		rx_desc = ICE_RX_DESC(rx_ring, ntc);
870 
871 		stat_err_bits = BIT(ICE_RX_FLEX_DESC_STATUS0_DD_S);
872 		if (!ice_test_staterr(rx_desc->wb.status_error0, stat_err_bits))
873 			break;
874 
875 		/* This memory barrier is needed to keep us from reading
876 		 * any other fields out of the rx_desc until we have
877 		 * verified the descriptor has been written back.
878 		 */
879 		dma_rmb();
880 
881 		if (unlikely(ntc == ntu))
882 			break;
883 
884 		xdp = *ice_xdp_buf(rx_ring, ntc);
885 
886 		size = le16_to_cpu(rx_desc->wb.pkt_len) &
887 				   ICE_RX_FLX_DESC_PKT_LEN_M;
888 
889 		xsk_buff_set_size(xdp, size);
890 		xsk_buff_dma_sync_for_cpu(xdp, xsk_pool);
891 
892 		if (!first) {
893 			first = xdp;
894 			xdp_buff_clear_frags_flag(first);
895 		} else if (ice_add_xsk_frag(rx_ring, first, xdp, size)) {
896 			break;
897 		}
898 
899 		if (++ntc == cnt)
900 			ntc = 0;
901 
902 		if (ice_is_non_eop(rx_ring, rx_desc))
903 			continue;
904 
905 		xdp_res = ice_run_xdp_zc(rx_ring, first, xdp_prog, xdp_ring);
906 		if (likely(xdp_res & (ICE_XDP_TX | ICE_XDP_REDIR))) {
907 			xdp_xmit |= xdp_res;
908 		} else if (xdp_res == ICE_XDP_EXIT) {
909 			failure = true;
910 			first = NULL;
911 			rx_ring->first_desc = ntc;
912 			break;
913 		} else if (xdp_res == ICE_XDP_CONSUMED) {
914 			xsk_buff_free(first);
915 		} else if (xdp_res == ICE_XDP_PASS) {
916 			goto construct_skb;
917 		}
918 
919 		total_rx_bytes += xdp_get_buff_len(first);
920 		total_rx_packets++;
921 
922 		first = NULL;
923 		rx_ring->first_desc = ntc;
924 		continue;
925 
926 construct_skb:
927 		/* XDP_PASS path */
928 		skb = ice_construct_skb_zc(rx_ring, first);
929 		if (!skb) {
930 			rx_ring->ring_stats->rx_stats.alloc_buf_failed++;
931 			break;
932 		}
933 
934 		first = NULL;
935 		rx_ring->first_desc = ntc;
936 
937 		if (eth_skb_pad(skb)) {
938 			skb = NULL;
939 			continue;
940 		}
941 
942 		total_rx_bytes += skb->len;
943 		total_rx_packets++;
944 
945 		vlan_tag = ice_get_vlan_tag_from_rx_desc(rx_desc);
946 
947 		rx_ptype = le16_to_cpu(rx_desc->wb.ptype_flex_flags0) &
948 				       ICE_RX_FLEX_DESC_PTYPE_M;
949 
950 		ice_process_skb_fields(rx_ring, rx_desc, skb, rx_ptype);
951 		ice_receive_skb(rx_ring, skb, vlan_tag);
952 	}
953 
954 	rx_ring->next_to_clean = ntc;
955 	entries_to_alloc = ICE_RX_DESC_UNUSED(rx_ring);
956 	if (entries_to_alloc > ICE_RING_QUARTER(rx_ring))
957 		failure |= !ice_alloc_rx_bufs_zc(rx_ring, entries_to_alloc);
958 
959 	ice_finalize_xdp_rx(xdp_ring, xdp_xmit, 0);
960 	ice_update_rx_ring_stats(rx_ring, total_rx_packets, total_rx_bytes);
961 
962 	if (xsk_uses_need_wakeup(xsk_pool)) {
963 		/* ntu could have changed when allocating entries above, so
964 		 * use rx_ring value instead of stack based one
965 		 */
966 		if (failure || ntc == rx_ring->next_to_use)
967 			xsk_set_rx_need_wakeup(xsk_pool);
968 		else
969 			xsk_clear_rx_need_wakeup(xsk_pool);
970 
971 		return (int)total_rx_packets;
972 	}
973 
974 	return failure ? budget : (int)total_rx_packets;
975 }
976 
977 /**
978  * ice_xmit_pkt - produce a single HW Tx descriptor out of AF_XDP descriptor
979  * @xdp_ring: XDP ring to produce the HW Tx descriptor on
980  * @desc: AF_XDP descriptor to pull the DMA address and length from
981  * @total_bytes: bytes accumulator that will be used for stats update
982  */
983 static void ice_xmit_pkt(struct ice_tx_ring *xdp_ring, struct xdp_desc *desc,
984 			 unsigned int *total_bytes)
985 {
986 	struct ice_tx_desc *tx_desc;
987 	dma_addr_t dma;
988 
989 	dma = xsk_buff_raw_get_dma(xdp_ring->xsk_pool, desc->addr);
990 	xsk_buff_raw_dma_sync_for_device(xdp_ring->xsk_pool, dma, desc->len);
991 
992 	tx_desc = ICE_TX_DESC(xdp_ring, xdp_ring->next_to_use++);
993 	tx_desc->buf_addr = cpu_to_le64(dma);
994 	tx_desc->cmd_type_offset_bsz = ice_build_ctob(xsk_is_eop_desc(desc),
995 						      0, desc->len, 0);
996 
997 	*total_bytes += desc->len;
998 }
999 
1000 /**
1001  * ice_xmit_pkt_batch - produce a batch of HW Tx descriptors out of AF_XDP descriptors
1002  * @xdp_ring: XDP ring to produce the HW Tx descriptors on
1003  * @descs: AF_XDP descriptors to pull the DMA addresses and lengths from
1004  * @total_bytes: bytes accumulator that will be used for stats update
1005  */
1006 static void ice_xmit_pkt_batch(struct ice_tx_ring *xdp_ring, struct xdp_desc *descs,
1007 			       unsigned int *total_bytes)
1008 {
1009 	u16 ntu = xdp_ring->next_to_use;
1010 	struct ice_tx_desc *tx_desc;
1011 	u32 i;
1012 
1013 	loop_unrolled_for(i = 0; i < PKTS_PER_BATCH; i++) {
1014 		dma_addr_t dma;
1015 
1016 		dma = xsk_buff_raw_get_dma(xdp_ring->xsk_pool, descs[i].addr);
1017 		xsk_buff_raw_dma_sync_for_device(xdp_ring->xsk_pool, dma, descs[i].len);
1018 
1019 		tx_desc = ICE_TX_DESC(xdp_ring, ntu++);
1020 		tx_desc->buf_addr = cpu_to_le64(dma);
1021 		tx_desc->cmd_type_offset_bsz = ice_build_ctob(xsk_is_eop_desc(&descs[i]),
1022 							      0, descs[i].len, 0);
1023 
1024 		*total_bytes += descs[i].len;
1025 	}
1026 
1027 	xdp_ring->next_to_use = ntu;
1028 }
1029 
1030 /**
1031  * ice_fill_tx_hw_ring - produce the number of Tx descriptors onto ring
1032  * @xdp_ring: XDP ring to produce the HW Tx descriptors on
1033  * @descs: AF_XDP descriptors to pull the DMA addresses and lengths from
1034  * @nb_pkts: count of packets to be send
1035  * @total_bytes: bytes accumulator that will be used for stats update
1036  */
1037 static void ice_fill_tx_hw_ring(struct ice_tx_ring *xdp_ring, struct xdp_desc *descs,
1038 				u32 nb_pkts, unsigned int *total_bytes)
1039 {
1040 	u32 batched, leftover, i;
1041 
1042 	batched = ALIGN_DOWN(nb_pkts, PKTS_PER_BATCH);
1043 	leftover = nb_pkts & (PKTS_PER_BATCH - 1);
1044 	for (i = 0; i < batched; i += PKTS_PER_BATCH)
1045 		ice_xmit_pkt_batch(xdp_ring, &descs[i], total_bytes);
1046 	for (; i < batched + leftover; i++)
1047 		ice_xmit_pkt(xdp_ring, &descs[i], total_bytes);
1048 }
1049 
1050 /**
1051  * ice_xmit_zc - take entries from XSK Tx ring and place them onto HW Tx ring
1052  * @xdp_ring: XDP ring to produce the HW Tx descriptors on
1053  *
1054  * Returns true if there is no more work that needs to be done, false otherwise
1055  */
1056 bool ice_xmit_zc(struct ice_tx_ring *xdp_ring)
1057 {
1058 	struct xdp_desc *descs = xdp_ring->xsk_pool->tx_descs;
1059 	u32 nb_pkts, nb_processed = 0;
1060 	unsigned int total_bytes = 0;
1061 	int budget;
1062 
1063 	ice_clean_xdp_irq_zc(xdp_ring);
1064 
1065 	budget = ICE_DESC_UNUSED(xdp_ring);
1066 	budget = min_t(u16, budget, ICE_RING_QUARTER(xdp_ring));
1067 
1068 	nb_pkts = xsk_tx_peek_release_desc_batch(xdp_ring->xsk_pool, budget);
1069 	if (!nb_pkts)
1070 		return true;
1071 
1072 	if (xdp_ring->next_to_use + nb_pkts >= xdp_ring->count) {
1073 		nb_processed = xdp_ring->count - xdp_ring->next_to_use;
1074 		ice_fill_tx_hw_ring(xdp_ring, descs, nb_processed, &total_bytes);
1075 		xdp_ring->next_to_use = 0;
1076 	}
1077 
1078 	ice_fill_tx_hw_ring(xdp_ring, &descs[nb_processed], nb_pkts - nb_processed,
1079 			    &total_bytes);
1080 
1081 	ice_set_rs_bit(xdp_ring);
1082 	ice_xdp_ring_update_tail(xdp_ring);
1083 	ice_update_tx_ring_stats(xdp_ring, nb_pkts, total_bytes);
1084 
1085 	if (xsk_uses_need_wakeup(xdp_ring->xsk_pool))
1086 		xsk_set_tx_need_wakeup(xdp_ring->xsk_pool);
1087 
1088 	return nb_pkts < budget;
1089 }
1090 
1091 /**
1092  * ice_xsk_wakeup - Implements ndo_xsk_wakeup
1093  * @netdev: net_device
1094  * @queue_id: queue to wake up
1095  * @flags: ignored in our case, since we have Rx and Tx in the same NAPI
1096  *
1097  * Returns negative on error, zero otherwise.
1098  */
1099 int
1100 ice_xsk_wakeup(struct net_device *netdev, u32 queue_id,
1101 	       u32 __always_unused flags)
1102 {
1103 	struct ice_netdev_priv *np = netdev_priv(netdev);
1104 	struct ice_q_vector *q_vector;
1105 	struct ice_vsi *vsi = np->vsi;
1106 	struct ice_tx_ring *ring;
1107 
1108 	if (test_bit(ICE_VSI_DOWN, vsi->state))
1109 		return -ENETDOWN;
1110 
1111 	if (!ice_is_xdp_ena_vsi(vsi))
1112 		return -EINVAL;
1113 
1114 	if (queue_id >= vsi->num_txq || queue_id >= vsi->num_rxq)
1115 		return -EINVAL;
1116 
1117 	ring = vsi->rx_rings[queue_id]->xdp_ring;
1118 
1119 	if (!ring->xsk_pool)
1120 		return -EINVAL;
1121 
1122 	/* The idea here is that if NAPI is running, mark a miss, so
1123 	 * it will run again. If not, trigger an interrupt and
1124 	 * schedule the NAPI from interrupt context. If NAPI would be
1125 	 * scheduled here, the interrupt affinity would not be
1126 	 * honored.
1127 	 */
1128 	q_vector = ring->q_vector;
1129 	if (!napi_if_scheduled_mark_missed(&q_vector->napi))
1130 		ice_trigger_sw_intr(&vsi->back->hw, q_vector);
1131 
1132 	return 0;
1133 }
1134 
1135 /**
1136  * ice_xsk_any_rx_ring_ena - Checks if Rx rings have AF_XDP buff pool attached
1137  * @vsi: VSI to be checked
1138  *
1139  * Returns true if any of the Rx rings has an AF_XDP buff pool attached
1140  */
1141 bool ice_xsk_any_rx_ring_ena(struct ice_vsi *vsi)
1142 {
1143 	int i;
1144 
1145 	ice_for_each_rxq(vsi, i) {
1146 		if (xsk_get_pool_from_qid(vsi->netdev, i))
1147 			return true;
1148 	}
1149 
1150 	return false;
1151 }
1152 
1153 /**
1154  * ice_xsk_clean_rx_ring - clean buffer pool queues connected to a given Rx ring
1155  * @rx_ring: ring to be cleaned
1156  */
1157 void ice_xsk_clean_rx_ring(struct ice_rx_ring *rx_ring)
1158 {
1159 	u16 ntc = rx_ring->next_to_clean;
1160 	u16 ntu = rx_ring->next_to_use;
1161 
1162 	while (ntc != ntu) {
1163 		struct xdp_buff *xdp = *ice_xdp_buf(rx_ring, ntc);
1164 
1165 		xsk_buff_free(xdp);
1166 		ntc++;
1167 		if (ntc >= rx_ring->count)
1168 			ntc = 0;
1169 	}
1170 }
1171 
1172 /**
1173  * ice_xsk_clean_xdp_ring - Clean the XDP Tx ring and its buffer pool queues
1174  * @xdp_ring: XDP_Tx ring
1175  */
1176 void ice_xsk_clean_xdp_ring(struct ice_tx_ring *xdp_ring)
1177 {
1178 	u16 ntc = xdp_ring->next_to_clean, ntu = xdp_ring->next_to_use;
1179 	u32 xsk_frames = 0;
1180 
1181 	while (ntc != ntu) {
1182 		struct ice_tx_buf *tx_buf = &xdp_ring->tx_buf[ntc];
1183 
1184 		if (tx_buf->type == ICE_TX_BUF_XSK_TX) {
1185 			tx_buf->type = ICE_TX_BUF_EMPTY;
1186 			xsk_buff_free(tx_buf->xdp);
1187 		} else {
1188 			xsk_frames++;
1189 		}
1190 
1191 		ntc++;
1192 		if (ntc >= xdp_ring->count)
1193 			ntc = 0;
1194 	}
1195 
1196 	if (xsk_frames)
1197 		xsk_tx_completed(xdp_ring->xsk_pool, xsk_frames);
1198 }
1199