xref: /linux/drivers/net/ethernet/intel/ice/ice_txrx.c (revision fc3a2810412c163b5df1b377d332e048860f45db)
1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (c) 2018, Intel Corporation. */
3 
4 /* The driver transmit and receive code */
5 
6 #include <linux/mm.h>
7 #include <linux/netdevice.h>
8 #include <linux/prefetch.h>
9 #include <linux/bpf_trace.h>
10 #include <net/dsfield.h>
11 #include <net/mpls.h>
12 #include <net/xdp.h>
13 #include "ice_txrx_lib.h"
14 #include "ice_lib.h"
15 #include "ice.h"
16 #include "ice_trace.h"
17 #include "ice_dcb_lib.h"
18 #include "ice_xsk.h"
19 #include "ice_eswitch.h"
20 
21 #define ICE_RX_HDR_SIZE		256
22 
23 #define ICE_FDIR_CLEAN_DELAY 10
24 
25 /**
26  * ice_prgm_fdir_fltr - Program a Flow Director filter
27  * @vsi: VSI to send dummy packet
28  * @fdir_desc: flow director descriptor
29  * @raw_packet: allocated buffer for flow director
30  */
31 int
32 ice_prgm_fdir_fltr(struct ice_vsi *vsi, struct ice_fltr_desc *fdir_desc,
33 		   u8 *raw_packet)
34 {
35 	struct ice_tx_buf *tx_buf, *first;
36 	struct ice_fltr_desc *f_desc;
37 	struct ice_tx_desc *tx_desc;
38 	struct ice_tx_ring *tx_ring;
39 	struct device *dev;
40 	dma_addr_t dma;
41 	u32 td_cmd;
42 	u16 i;
43 
44 	/* VSI and Tx ring */
45 	if (!vsi)
46 		return -ENOENT;
47 	tx_ring = vsi->tx_rings[0];
48 	if (!tx_ring || !tx_ring->desc)
49 		return -ENOENT;
50 	dev = tx_ring->dev;
51 
52 	/* we are using two descriptors to add/del a filter and we can wait */
53 	for (i = ICE_FDIR_CLEAN_DELAY; ICE_DESC_UNUSED(tx_ring) < 2; i--) {
54 		if (!i)
55 			return -EAGAIN;
56 		msleep_interruptible(1);
57 	}
58 
59 	dma = dma_map_single(dev, raw_packet, ICE_FDIR_MAX_RAW_PKT_SIZE,
60 			     DMA_TO_DEVICE);
61 
62 	if (dma_mapping_error(dev, dma))
63 		return -EINVAL;
64 
65 	/* grab the next descriptor */
66 	i = tx_ring->next_to_use;
67 	first = &tx_ring->tx_buf[i];
68 	f_desc = ICE_TX_FDIRDESC(tx_ring, i);
69 	memcpy(f_desc, fdir_desc, sizeof(*f_desc));
70 
71 	i++;
72 	i = (i < tx_ring->count) ? i : 0;
73 	tx_desc = ICE_TX_DESC(tx_ring, i);
74 	tx_buf = &tx_ring->tx_buf[i];
75 
76 	i++;
77 	tx_ring->next_to_use = (i < tx_ring->count) ? i : 0;
78 
79 	memset(tx_buf, 0, sizeof(*tx_buf));
80 	dma_unmap_len_set(tx_buf, len, ICE_FDIR_MAX_RAW_PKT_SIZE);
81 	dma_unmap_addr_set(tx_buf, dma, dma);
82 
83 	tx_desc->buf_addr = cpu_to_le64(dma);
84 	td_cmd = ICE_TXD_LAST_DESC_CMD | ICE_TX_DESC_CMD_DUMMY |
85 		 ICE_TX_DESC_CMD_RE;
86 
87 	tx_buf->type = ICE_TX_BUF_DUMMY;
88 	tx_buf->raw_buf = raw_packet;
89 
90 	tx_desc->cmd_type_offset_bsz =
91 		ice_build_ctob(td_cmd, 0, ICE_FDIR_MAX_RAW_PKT_SIZE, 0);
92 
93 	/* Force memory write to complete before letting h/w know
94 	 * there are new descriptors to fetch.
95 	 */
96 	wmb();
97 
98 	/* mark the data descriptor to be watched */
99 	first->next_to_watch = tx_desc;
100 
101 	writel(tx_ring->next_to_use, tx_ring->tail);
102 
103 	return 0;
104 }
105 
106 /**
107  * ice_unmap_and_free_tx_buf - Release a Tx buffer
108  * @ring: the ring that owns the buffer
109  * @tx_buf: the buffer to free
110  */
111 static void
112 ice_unmap_and_free_tx_buf(struct ice_tx_ring *ring, struct ice_tx_buf *tx_buf)
113 {
114 	if (dma_unmap_len(tx_buf, len))
115 		dma_unmap_page(ring->dev,
116 			       dma_unmap_addr(tx_buf, dma),
117 			       dma_unmap_len(tx_buf, len),
118 			       DMA_TO_DEVICE);
119 
120 	switch (tx_buf->type) {
121 	case ICE_TX_BUF_DUMMY:
122 		devm_kfree(ring->dev, tx_buf->raw_buf);
123 		break;
124 	case ICE_TX_BUF_SKB:
125 		dev_kfree_skb_any(tx_buf->skb);
126 		break;
127 	case ICE_TX_BUF_XDP_TX:
128 		page_frag_free(tx_buf->raw_buf);
129 		break;
130 	case ICE_TX_BUF_XDP_XMIT:
131 		xdp_return_frame(tx_buf->xdpf);
132 		break;
133 	}
134 
135 	tx_buf->next_to_watch = NULL;
136 	tx_buf->type = ICE_TX_BUF_EMPTY;
137 	dma_unmap_len_set(tx_buf, len, 0);
138 	/* tx_buf must be completely set up in the transmit path */
139 }
140 
141 static struct netdev_queue *txring_txq(const struct ice_tx_ring *ring)
142 {
143 	return netdev_get_tx_queue(ring->netdev, ring->q_index);
144 }
145 
146 /**
147  * ice_clean_tx_ring - Free any empty Tx buffers
148  * @tx_ring: ring to be cleaned
149  */
150 void ice_clean_tx_ring(struct ice_tx_ring *tx_ring)
151 {
152 	u32 size;
153 	u16 i;
154 
155 	if (ice_ring_is_xdp(tx_ring) && tx_ring->xsk_pool) {
156 		ice_xsk_clean_xdp_ring(tx_ring);
157 		goto tx_skip_free;
158 	}
159 
160 	/* ring already cleared, nothing to do */
161 	if (!tx_ring->tx_buf)
162 		return;
163 
164 	/* Free all the Tx ring sk_buffs */
165 	for (i = 0; i < tx_ring->count; i++)
166 		ice_unmap_and_free_tx_buf(tx_ring, &tx_ring->tx_buf[i]);
167 
168 tx_skip_free:
169 	memset(tx_ring->tx_buf, 0, sizeof(*tx_ring->tx_buf) * tx_ring->count);
170 
171 	size = ALIGN(tx_ring->count * sizeof(struct ice_tx_desc),
172 		     PAGE_SIZE);
173 	/* Zero out the descriptor ring */
174 	memset(tx_ring->desc, 0, size);
175 
176 	tx_ring->next_to_use = 0;
177 	tx_ring->next_to_clean = 0;
178 
179 	if (!tx_ring->netdev)
180 		return;
181 
182 	/* cleanup Tx queue statistics */
183 	netdev_tx_reset_queue(txring_txq(tx_ring));
184 }
185 
186 /**
187  * ice_free_tx_ring - Free Tx resources per queue
188  * @tx_ring: Tx descriptor ring for a specific queue
189  *
190  * Free all transmit software resources
191  */
192 void ice_free_tx_ring(struct ice_tx_ring *tx_ring)
193 {
194 	u32 size;
195 
196 	ice_clean_tx_ring(tx_ring);
197 	devm_kfree(tx_ring->dev, tx_ring->tx_buf);
198 	tx_ring->tx_buf = NULL;
199 
200 	if (tx_ring->desc) {
201 		size = ALIGN(tx_ring->count * sizeof(struct ice_tx_desc),
202 			     PAGE_SIZE);
203 		dmam_free_coherent(tx_ring->dev, size,
204 				   tx_ring->desc, tx_ring->dma);
205 		tx_ring->desc = NULL;
206 	}
207 }
208 
209 /**
210  * ice_clean_tx_irq - Reclaim resources after transmit completes
211  * @tx_ring: Tx ring to clean
212  * @napi_budget: Used to determine if we are in netpoll
213  *
214  * Returns true if there's any budget left (e.g. the clean is finished)
215  */
216 static bool ice_clean_tx_irq(struct ice_tx_ring *tx_ring, int napi_budget)
217 {
218 	unsigned int total_bytes = 0, total_pkts = 0;
219 	unsigned int budget = ICE_DFLT_IRQ_WORK;
220 	struct ice_vsi *vsi = tx_ring->vsi;
221 	s16 i = tx_ring->next_to_clean;
222 	struct ice_tx_desc *tx_desc;
223 	struct ice_tx_buf *tx_buf;
224 
225 	/* get the bql data ready */
226 	netdev_txq_bql_complete_prefetchw(txring_txq(tx_ring));
227 
228 	tx_buf = &tx_ring->tx_buf[i];
229 	tx_desc = ICE_TX_DESC(tx_ring, i);
230 	i -= tx_ring->count;
231 
232 	prefetch(&vsi->state);
233 
234 	do {
235 		struct ice_tx_desc *eop_desc = tx_buf->next_to_watch;
236 
237 		/* if next_to_watch is not set then there is no work pending */
238 		if (!eop_desc)
239 			break;
240 
241 		/* follow the guidelines of other drivers */
242 		prefetchw(&tx_buf->skb->users);
243 
244 		smp_rmb();	/* prevent any other reads prior to eop_desc */
245 
246 		ice_trace(clean_tx_irq, tx_ring, tx_desc, tx_buf);
247 		/* if the descriptor isn't done, no work yet to do */
248 		if (!(eop_desc->cmd_type_offset_bsz &
249 		      cpu_to_le64(ICE_TX_DESC_DTYPE_DESC_DONE)))
250 			break;
251 
252 		/* clear next_to_watch to prevent false hangs */
253 		tx_buf->next_to_watch = NULL;
254 
255 		/* update the statistics for this packet */
256 		total_bytes += tx_buf->bytecount;
257 		total_pkts += tx_buf->gso_segs;
258 
259 		/* free the skb */
260 		napi_consume_skb(tx_buf->skb, napi_budget);
261 
262 		/* unmap skb header data */
263 		dma_unmap_single(tx_ring->dev,
264 				 dma_unmap_addr(tx_buf, dma),
265 				 dma_unmap_len(tx_buf, len),
266 				 DMA_TO_DEVICE);
267 
268 		/* clear tx_buf data */
269 		tx_buf->type = ICE_TX_BUF_EMPTY;
270 		dma_unmap_len_set(tx_buf, len, 0);
271 
272 		/* unmap remaining buffers */
273 		while (tx_desc != eop_desc) {
274 			ice_trace(clean_tx_irq_unmap, tx_ring, tx_desc, tx_buf);
275 			tx_buf++;
276 			tx_desc++;
277 			i++;
278 			if (unlikely(!i)) {
279 				i -= tx_ring->count;
280 				tx_buf = tx_ring->tx_buf;
281 				tx_desc = ICE_TX_DESC(tx_ring, 0);
282 			}
283 
284 			/* unmap any remaining paged data */
285 			if (dma_unmap_len(tx_buf, len)) {
286 				dma_unmap_page(tx_ring->dev,
287 					       dma_unmap_addr(tx_buf, dma),
288 					       dma_unmap_len(tx_buf, len),
289 					       DMA_TO_DEVICE);
290 				dma_unmap_len_set(tx_buf, len, 0);
291 			}
292 		}
293 		ice_trace(clean_tx_irq_unmap_eop, tx_ring, tx_desc, tx_buf);
294 
295 		/* move us one more past the eop_desc for start of next pkt */
296 		tx_buf++;
297 		tx_desc++;
298 		i++;
299 		if (unlikely(!i)) {
300 			i -= tx_ring->count;
301 			tx_buf = tx_ring->tx_buf;
302 			tx_desc = ICE_TX_DESC(tx_ring, 0);
303 		}
304 
305 		prefetch(tx_desc);
306 
307 		/* update budget accounting */
308 		budget--;
309 	} while (likely(budget));
310 
311 	i += tx_ring->count;
312 	tx_ring->next_to_clean = i;
313 
314 	ice_update_tx_ring_stats(tx_ring, total_pkts, total_bytes);
315 	netdev_tx_completed_queue(txring_txq(tx_ring), total_pkts, total_bytes);
316 
317 #define TX_WAKE_THRESHOLD ((s16)(DESC_NEEDED * 2))
318 	if (unlikely(total_pkts && netif_carrier_ok(tx_ring->netdev) &&
319 		     (ICE_DESC_UNUSED(tx_ring) >= TX_WAKE_THRESHOLD))) {
320 		/* Make sure that anybody stopping the queue after this
321 		 * sees the new next_to_clean.
322 		 */
323 		smp_mb();
324 		if (netif_tx_queue_stopped(txring_txq(tx_ring)) &&
325 		    !test_bit(ICE_VSI_DOWN, vsi->state)) {
326 			netif_tx_wake_queue(txring_txq(tx_ring));
327 			++tx_ring->ring_stats->tx_stats.restart_q;
328 		}
329 	}
330 
331 	return !!budget;
332 }
333 
334 /**
335  * ice_setup_tx_ring - Allocate the Tx descriptors
336  * @tx_ring: the Tx ring to set up
337  *
338  * Return 0 on success, negative on error
339  */
340 int ice_setup_tx_ring(struct ice_tx_ring *tx_ring)
341 {
342 	struct device *dev = tx_ring->dev;
343 	u32 size;
344 
345 	if (!dev)
346 		return -ENOMEM;
347 
348 	/* warn if we are about to overwrite the pointer */
349 	WARN_ON(tx_ring->tx_buf);
350 	tx_ring->tx_buf =
351 		devm_kcalloc(dev, sizeof(*tx_ring->tx_buf), tx_ring->count,
352 			     GFP_KERNEL);
353 	if (!tx_ring->tx_buf)
354 		return -ENOMEM;
355 
356 	/* round up to nearest page */
357 	size = ALIGN(tx_ring->count * sizeof(struct ice_tx_desc),
358 		     PAGE_SIZE);
359 	tx_ring->desc = dmam_alloc_coherent(dev, size, &tx_ring->dma,
360 					    GFP_KERNEL);
361 	if (!tx_ring->desc) {
362 		dev_err(dev, "Unable to allocate memory for the Tx descriptor ring, size=%d\n",
363 			size);
364 		goto err;
365 	}
366 
367 	tx_ring->next_to_use = 0;
368 	tx_ring->next_to_clean = 0;
369 	tx_ring->ring_stats->tx_stats.prev_pkt = -1;
370 	return 0;
371 
372 err:
373 	devm_kfree(dev, tx_ring->tx_buf);
374 	tx_ring->tx_buf = NULL;
375 	return -ENOMEM;
376 }
377 
378 /**
379  * ice_clean_rx_ring - Free Rx buffers
380  * @rx_ring: ring to be cleaned
381  */
382 void ice_clean_rx_ring(struct ice_rx_ring *rx_ring)
383 {
384 	struct xdp_buff *xdp = &rx_ring->xdp;
385 	struct device *dev = rx_ring->dev;
386 	u32 size;
387 	u16 i;
388 
389 	/* ring already cleared, nothing to do */
390 	if (!rx_ring->rx_buf)
391 		return;
392 
393 	if (rx_ring->xsk_pool) {
394 		ice_xsk_clean_rx_ring(rx_ring);
395 		goto rx_skip_free;
396 	}
397 
398 	if (xdp->data) {
399 		xdp_return_buff(xdp);
400 		xdp->data = NULL;
401 	}
402 
403 	/* Free all the Rx ring sk_buffs */
404 	for (i = 0; i < rx_ring->count; i++) {
405 		struct ice_rx_buf *rx_buf = &rx_ring->rx_buf[i];
406 
407 		if (!rx_buf->page)
408 			continue;
409 
410 		/* Invalidate cache lines that may have been written to by
411 		 * device so that we avoid corrupting memory.
412 		 */
413 		dma_sync_single_range_for_cpu(dev, rx_buf->dma,
414 					      rx_buf->page_offset,
415 					      rx_ring->rx_buf_len,
416 					      DMA_FROM_DEVICE);
417 
418 		/* free resources associated with mapping */
419 		dma_unmap_page_attrs(dev, rx_buf->dma, ice_rx_pg_size(rx_ring),
420 				     DMA_FROM_DEVICE, ICE_RX_DMA_ATTR);
421 		__page_frag_cache_drain(rx_buf->page, rx_buf->pagecnt_bias);
422 
423 		rx_buf->page = NULL;
424 		rx_buf->page_offset = 0;
425 	}
426 
427 rx_skip_free:
428 	if (rx_ring->xsk_pool)
429 		memset(rx_ring->xdp_buf, 0, array_size(rx_ring->count, sizeof(*rx_ring->xdp_buf)));
430 	else
431 		memset(rx_ring->rx_buf, 0, array_size(rx_ring->count, sizeof(*rx_ring->rx_buf)));
432 
433 	/* Zero out the descriptor ring */
434 	size = ALIGN(rx_ring->count * sizeof(union ice_32byte_rx_desc),
435 		     PAGE_SIZE);
436 	memset(rx_ring->desc, 0, size);
437 
438 	rx_ring->next_to_alloc = 0;
439 	rx_ring->next_to_clean = 0;
440 	rx_ring->first_desc = 0;
441 	rx_ring->next_to_use = 0;
442 }
443 
444 /**
445  * ice_free_rx_ring - Free Rx resources
446  * @rx_ring: ring to clean the resources from
447  *
448  * Free all receive software resources
449  */
450 void ice_free_rx_ring(struct ice_rx_ring *rx_ring)
451 {
452 	u32 size;
453 
454 	ice_clean_rx_ring(rx_ring);
455 	if (rx_ring->vsi->type == ICE_VSI_PF)
456 		if (xdp_rxq_info_is_reg(&rx_ring->xdp_rxq))
457 			xdp_rxq_info_unreg(&rx_ring->xdp_rxq);
458 	WRITE_ONCE(rx_ring->xdp_prog, NULL);
459 	if (rx_ring->xsk_pool) {
460 		kfree(rx_ring->xdp_buf);
461 		rx_ring->xdp_buf = NULL;
462 	} else {
463 		kfree(rx_ring->rx_buf);
464 		rx_ring->rx_buf = NULL;
465 	}
466 
467 	if (rx_ring->desc) {
468 		size = ALIGN(rx_ring->count * sizeof(union ice_32byte_rx_desc),
469 			     PAGE_SIZE);
470 		dmam_free_coherent(rx_ring->dev, size,
471 				   rx_ring->desc, rx_ring->dma);
472 		rx_ring->desc = NULL;
473 	}
474 }
475 
476 /**
477  * ice_setup_rx_ring - Allocate the Rx descriptors
478  * @rx_ring: the Rx ring to set up
479  *
480  * Return 0 on success, negative on error
481  */
482 int ice_setup_rx_ring(struct ice_rx_ring *rx_ring)
483 {
484 	struct device *dev = rx_ring->dev;
485 	u32 size;
486 
487 	if (!dev)
488 		return -ENOMEM;
489 
490 	/* warn if we are about to overwrite the pointer */
491 	WARN_ON(rx_ring->rx_buf);
492 	rx_ring->rx_buf =
493 		kcalloc(rx_ring->count, sizeof(*rx_ring->rx_buf), GFP_KERNEL);
494 	if (!rx_ring->rx_buf)
495 		return -ENOMEM;
496 
497 	/* round up to nearest page */
498 	size = ALIGN(rx_ring->count * sizeof(union ice_32byte_rx_desc),
499 		     PAGE_SIZE);
500 	rx_ring->desc = dmam_alloc_coherent(dev, size, &rx_ring->dma,
501 					    GFP_KERNEL);
502 	if (!rx_ring->desc) {
503 		dev_err(dev, "Unable to allocate memory for the Rx descriptor ring, size=%d\n",
504 			size);
505 		goto err;
506 	}
507 
508 	rx_ring->next_to_use = 0;
509 	rx_ring->next_to_clean = 0;
510 	rx_ring->first_desc = 0;
511 
512 	if (ice_is_xdp_ena_vsi(rx_ring->vsi))
513 		WRITE_ONCE(rx_ring->xdp_prog, rx_ring->vsi->xdp_prog);
514 
515 	return 0;
516 
517 err:
518 	kfree(rx_ring->rx_buf);
519 	rx_ring->rx_buf = NULL;
520 	return -ENOMEM;
521 }
522 
523 /**
524  * ice_run_xdp - Executes an XDP program on initialized xdp_buff
525  * @rx_ring: Rx ring
526  * @xdp: xdp_buff used as input to the XDP program
527  * @xdp_prog: XDP program to run
528  * @xdp_ring: ring to be used for XDP_TX action
529  * @eop_desc: Last descriptor in packet to read metadata from
530  *
531  * Returns any of ICE_XDP_{PASS, CONSUMED, TX, REDIR}
532  */
533 static u32
534 ice_run_xdp(struct ice_rx_ring *rx_ring, struct xdp_buff *xdp,
535 	    struct bpf_prog *xdp_prog, struct ice_tx_ring *xdp_ring,
536 	    union ice_32b_rx_flex_desc *eop_desc)
537 {
538 	unsigned int ret = ICE_XDP_PASS;
539 	u32 act;
540 
541 	if (!xdp_prog)
542 		goto exit;
543 
544 	ice_xdp_meta_set_desc(xdp, eop_desc);
545 
546 	act = bpf_prog_run_xdp(xdp_prog, xdp);
547 	switch (act) {
548 	case XDP_PASS:
549 		break;
550 	case XDP_TX:
551 		if (static_branch_unlikely(&ice_xdp_locking_key))
552 			spin_lock(&xdp_ring->tx_lock);
553 		ret = __ice_xmit_xdp_ring(xdp, xdp_ring, false);
554 		if (static_branch_unlikely(&ice_xdp_locking_key))
555 			spin_unlock(&xdp_ring->tx_lock);
556 		if (ret == ICE_XDP_CONSUMED)
557 			goto out_failure;
558 		break;
559 	case XDP_REDIRECT:
560 		if (xdp_do_redirect(rx_ring->netdev, xdp, xdp_prog))
561 			goto out_failure;
562 		ret = ICE_XDP_REDIR;
563 		break;
564 	default:
565 		bpf_warn_invalid_xdp_action(rx_ring->netdev, xdp_prog, act);
566 		fallthrough;
567 	case XDP_ABORTED:
568 out_failure:
569 		trace_xdp_exception(rx_ring->netdev, xdp_prog, act);
570 		fallthrough;
571 	case XDP_DROP:
572 		ret = ICE_XDP_CONSUMED;
573 	}
574 exit:
575 	return ret;
576 }
577 
578 /**
579  * ice_xmit_xdp_ring - submit frame to XDP ring for transmission
580  * @xdpf: XDP frame that will be converted to XDP buff
581  * @xdp_ring: XDP ring for transmission
582  */
583 static int ice_xmit_xdp_ring(const struct xdp_frame *xdpf,
584 			     struct ice_tx_ring *xdp_ring)
585 {
586 	struct xdp_buff xdp;
587 
588 	xdp.data_hard_start = (void *)xdpf;
589 	xdp.data = xdpf->data;
590 	xdp.data_end = xdp.data + xdpf->len;
591 	xdp.frame_sz = xdpf->frame_sz;
592 	xdp.flags = xdpf->flags;
593 
594 	return __ice_xmit_xdp_ring(&xdp, xdp_ring, true);
595 }
596 
597 /**
598  * ice_xdp_xmit - submit packets to XDP ring for transmission
599  * @dev: netdev
600  * @n: number of XDP frames to be transmitted
601  * @frames: XDP frames to be transmitted
602  * @flags: transmit flags
603  *
604  * Returns number of frames successfully sent. Failed frames
605  * will be free'ed by XDP core.
606  * For error cases, a negative errno code is returned and no-frames
607  * are transmitted (caller must handle freeing frames).
608  */
609 int
610 ice_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames,
611 	     u32 flags)
612 {
613 	struct ice_netdev_priv *np = netdev_priv(dev);
614 	unsigned int queue_index = smp_processor_id();
615 	struct ice_vsi *vsi = np->vsi;
616 	struct ice_tx_ring *xdp_ring;
617 	struct ice_tx_buf *tx_buf;
618 	int nxmit = 0, i;
619 
620 	if (test_bit(ICE_VSI_DOWN, vsi->state))
621 		return -ENETDOWN;
622 
623 	if (!ice_is_xdp_ena_vsi(vsi))
624 		return -ENXIO;
625 
626 	if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK))
627 		return -EINVAL;
628 
629 	if (static_branch_unlikely(&ice_xdp_locking_key)) {
630 		queue_index %= vsi->num_xdp_txq;
631 		xdp_ring = vsi->xdp_rings[queue_index];
632 		spin_lock(&xdp_ring->tx_lock);
633 	} else {
634 		/* Generally, should not happen */
635 		if (unlikely(queue_index >= vsi->num_xdp_txq))
636 			return -ENXIO;
637 		xdp_ring = vsi->xdp_rings[queue_index];
638 	}
639 
640 	tx_buf = &xdp_ring->tx_buf[xdp_ring->next_to_use];
641 	for (i = 0; i < n; i++) {
642 		const struct xdp_frame *xdpf = frames[i];
643 		int err;
644 
645 		err = ice_xmit_xdp_ring(xdpf, xdp_ring);
646 		if (err != ICE_XDP_TX)
647 			break;
648 		nxmit++;
649 	}
650 
651 	tx_buf->rs_idx = ice_set_rs_bit(xdp_ring);
652 	if (unlikely(flags & XDP_XMIT_FLUSH))
653 		ice_xdp_ring_update_tail(xdp_ring);
654 
655 	if (static_branch_unlikely(&ice_xdp_locking_key))
656 		spin_unlock(&xdp_ring->tx_lock);
657 
658 	return nxmit;
659 }
660 
661 /**
662  * ice_alloc_mapped_page - recycle or make a new page
663  * @rx_ring: ring to use
664  * @bi: rx_buf struct to modify
665  *
666  * Returns true if the page was successfully allocated or
667  * reused.
668  */
669 static bool
670 ice_alloc_mapped_page(struct ice_rx_ring *rx_ring, struct ice_rx_buf *bi)
671 {
672 	struct page *page = bi->page;
673 	dma_addr_t dma;
674 
675 	/* since we are recycling buffers we should seldom need to alloc */
676 	if (likely(page))
677 		return true;
678 
679 	/* alloc new page for storage */
680 	page = dev_alloc_pages(ice_rx_pg_order(rx_ring));
681 	if (unlikely(!page)) {
682 		rx_ring->ring_stats->rx_stats.alloc_page_failed++;
683 		return false;
684 	}
685 
686 	/* map page for use */
687 	dma = dma_map_page_attrs(rx_ring->dev, page, 0, ice_rx_pg_size(rx_ring),
688 				 DMA_FROM_DEVICE, ICE_RX_DMA_ATTR);
689 
690 	/* if mapping failed free memory back to system since
691 	 * there isn't much point in holding memory we can't use
692 	 */
693 	if (dma_mapping_error(rx_ring->dev, dma)) {
694 		__free_pages(page, ice_rx_pg_order(rx_ring));
695 		rx_ring->ring_stats->rx_stats.alloc_page_failed++;
696 		return false;
697 	}
698 
699 	bi->dma = dma;
700 	bi->page = page;
701 	bi->page_offset = rx_ring->rx_offset;
702 	page_ref_add(page, USHRT_MAX - 1);
703 	bi->pagecnt_bias = USHRT_MAX;
704 
705 	return true;
706 }
707 
708 /**
709  * ice_init_ctrl_rx_descs - Initialize Rx descriptors for control vsi.
710  * @rx_ring: ring to init descriptors on
711  * @count: number of descriptors to initialize
712  */
713 void ice_init_ctrl_rx_descs(struct ice_rx_ring *rx_ring, u32 count)
714 {
715 	union ice_32b_rx_flex_desc *rx_desc;
716 	u32 ntu = rx_ring->next_to_use;
717 
718 	if (!count)
719 		return;
720 
721 	rx_desc = ICE_RX_DESC(rx_ring, ntu);
722 
723 	do {
724 		rx_desc++;
725 		ntu++;
726 		if (unlikely(ntu == rx_ring->count)) {
727 			rx_desc = ICE_RX_DESC(rx_ring, 0);
728 			ntu = 0;
729 		}
730 
731 		rx_desc->wb.status_error0 = 0;
732 		count--;
733 	} while (count);
734 
735 	if (rx_ring->next_to_use != ntu)
736 		ice_release_rx_desc(rx_ring, ntu);
737 }
738 
739 /**
740  * ice_alloc_rx_bufs - Replace used receive buffers
741  * @rx_ring: ring to place buffers on
742  * @cleaned_count: number of buffers to replace
743  *
744  * Returns false if all allocations were successful, true if any fail. Returning
745  * true signals to the caller that we didn't replace cleaned_count buffers and
746  * there is more work to do.
747  *
748  * First, try to clean "cleaned_count" Rx buffers. Then refill the cleaned Rx
749  * buffers. Then bump tail at most one time. Grouping like this lets us avoid
750  * multiple tail writes per call.
751  */
752 bool ice_alloc_rx_bufs(struct ice_rx_ring *rx_ring, unsigned int cleaned_count)
753 {
754 	union ice_32b_rx_flex_desc *rx_desc;
755 	u16 ntu = rx_ring->next_to_use;
756 	struct ice_rx_buf *bi;
757 
758 	/* do nothing if no valid netdev defined */
759 	if (!rx_ring->netdev || !cleaned_count)
760 		return false;
761 
762 	/* get the Rx descriptor and buffer based on next_to_use */
763 	rx_desc = ICE_RX_DESC(rx_ring, ntu);
764 	bi = &rx_ring->rx_buf[ntu];
765 
766 	do {
767 		/* if we fail here, we have work remaining */
768 		if (!ice_alloc_mapped_page(rx_ring, bi))
769 			break;
770 
771 		/* sync the buffer for use by the device */
772 		dma_sync_single_range_for_device(rx_ring->dev, bi->dma,
773 						 bi->page_offset,
774 						 rx_ring->rx_buf_len,
775 						 DMA_FROM_DEVICE);
776 
777 		/* Refresh the desc even if buffer_addrs didn't change
778 		 * because each write-back erases this info.
779 		 */
780 		rx_desc->read.pkt_addr = cpu_to_le64(bi->dma + bi->page_offset);
781 
782 		rx_desc++;
783 		bi++;
784 		ntu++;
785 		if (unlikely(ntu == rx_ring->count)) {
786 			rx_desc = ICE_RX_DESC(rx_ring, 0);
787 			bi = rx_ring->rx_buf;
788 			ntu = 0;
789 		}
790 
791 		/* clear the status bits for the next_to_use descriptor */
792 		rx_desc->wb.status_error0 = 0;
793 
794 		cleaned_count--;
795 	} while (cleaned_count);
796 
797 	if (rx_ring->next_to_use != ntu)
798 		ice_release_rx_desc(rx_ring, ntu);
799 
800 	return !!cleaned_count;
801 }
802 
803 /**
804  * ice_rx_buf_adjust_pg_offset - Prepare Rx buffer for reuse
805  * @rx_buf: Rx buffer to adjust
806  * @size: Size of adjustment
807  *
808  * Update the offset within page so that Rx buf will be ready to be reused.
809  * For systems with PAGE_SIZE < 8192 this function will flip the page offset
810  * so the second half of page assigned to Rx buffer will be used, otherwise
811  * the offset is moved by "size" bytes
812  */
813 static void
814 ice_rx_buf_adjust_pg_offset(struct ice_rx_buf *rx_buf, unsigned int size)
815 {
816 #if (PAGE_SIZE < 8192)
817 	/* flip page offset to other buffer */
818 	rx_buf->page_offset ^= size;
819 #else
820 	/* move offset up to the next cache line */
821 	rx_buf->page_offset += size;
822 #endif
823 }
824 
825 /**
826  * ice_can_reuse_rx_page - Determine if page can be reused for another Rx
827  * @rx_buf: buffer containing the page
828  *
829  * If page is reusable, we have a green light for calling ice_reuse_rx_page,
830  * which will assign the current buffer to the buffer that next_to_alloc is
831  * pointing to; otherwise, the DMA mapping needs to be destroyed and
832  * page freed
833  */
834 static bool
835 ice_can_reuse_rx_page(struct ice_rx_buf *rx_buf)
836 {
837 	unsigned int pagecnt_bias = rx_buf->pagecnt_bias;
838 	struct page *page = rx_buf->page;
839 
840 	/* avoid re-using remote and pfmemalloc pages */
841 	if (!dev_page_is_reusable(page))
842 		return false;
843 
844 	/* if we are only owner of page we can reuse it */
845 	if (unlikely(rx_buf->pgcnt - pagecnt_bias > 1))
846 		return false;
847 #if (PAGE_SIZE >= 8192)
848 #define ICE_LAST_OFFSET \
849 	(SKB_WITH_OVERHEAD(PAGE_SIZE) - ICE_RXBUF_3072)
850 	if (rx_buf->page_offset > ICE_LAST_OFFSET)
851 		return false;
852 #endif /* PAGE_SIZE >= 8192) */
853 
854 	/* If we have drained the page fragment pool we need to update
855 	 * the pagecnt_bias and page count so that we fully restock the
856 	 * number of references the driver holds.
857 	 */
858 	if (unlikely(pagecnt_bias == 1)) {
859 		page_ref_add(page, USHRT_MAX - 1);
860 		rx_buf->pagecnt_bias = USHRT_MAX;
861 	}
862 
863 	return true;
864 }
865 
866 /**
867  * ice_add_xdp_frag - Add contents of Rx buffer to xdp buf as a frag
868  * @rx_ring: Rx descriptor ring to transact packets on
869  * @xdp: xdp buff to place the data into
870  * @rx_buf: buffer containing page to add
871  * @size: packet length from rx_desc
872  *
873  * This function will add the data contained in rx_buf->page to the xdp buf.
874  * It will just attach the page as a frag.
875  */
876 static int
877 ice_add_xdp_frag(struct ice_rx_ring *rx_ring, struct xdp_buff *xdp,
878 		 struct ice_rx_buf *rx_buf, const unsigned int size)
879 {
880 	struct skb_shared_info *sinfo = xdp_get_shared_info_from_buff(xdp);
881 
882 	if (!size)
883 		return 0;
884 
885 	if (!xdp_buff_has_frags(xdp)) {
886 		sinfo->nr_frags = 0;
887 		sinfo->xdp_frags_size = 0;
888 		xdp_buff_set_frags_flag(xdp);
889 	}
890 
891 	if (unlikely(sinfo->nr_frags == MAX_SKB_FRAGS))
892 		return -ENOMEM;
893 
894 	__skb_fill_page_desc_noacc(sinfo, sinfo->nr_frags++, rx_buf->page,
895 				   rx_buf->page_offset, size);
896 	sinfo->xdp_frags_size += size;
897 	/* remember frag count before XDP prog execution; bpf_xdp_adjust_tail()
898 	 * can pop off frags but driver has to handle it on its own
899 	 */
900 	rx_ring->nr_frags = sinfo->nr_frags;
901 
902 	if (page_is_pfmemalloc(rx_buf->page))
903 		xdp_buff_set_frag_pfmemalloc(xdp);
904 
905 	return 0;
906 }
907 
908 /**
909  * ice_reuse_rx_page - page flip buffer and store it back on the ring
910  * @rx_ring: Rx descriptor ring to store buffers on
911  * @old_buf: donor buffer to have page reused
912  *
913  * Synchronizes page for reuse by the adapter
914  */
915 static void
916 ice_reuse_rx_page(struct ice_rx_ring *rx_ring, struct ice_rx_buf *old_buf)
917 {
918 	u16 nta = rx_ring->next_to_alloc;
919 	struct ice_rx_buf *new_buf;
920 
921 	new_buf = &rx_ring->rx_buf[nta];
922 
923 	/* update, and store next to alloc */
924 	nta++;
925 	rx_ring->next_to_alloc = (nta < rx_ring->count) ? nta : 0;
926 
927 	/* Transfer page from old buffer to new buffer.
928 	 * Move each member individually to avoid possible store
929 	 * forwarding stalls and unnecessary copy of skb.
930 	 */
931 	new_buf->dma = old_buf->dma;
932 	new_buf->page = old_buf->page;
933 	new_buf->page_offset = old_buf->page_offset;
934 	new_buf->pagecnt_bias = old_buf->pagecnt_bias;
935 }
936 
937 /**
938  * ice_get_rx_buf - Fetch Rx buffer and synchronize data for use
939  * @rx_ring: Rx descriptor ring to transact packets on
940  * @size: size of buffer to add to skb
941  * @ntc: index of next to clean element
942  *
943  * This function will pull an Rx buffer from the ring and synchronize it
944  * for use by the CPU.
945  */
946 static struct ice_rx_buf *
947 ice_get_rx_buf(struct ice_rx_ring *rx_ring, const unsigned int size,
948 	       const unsigned int ntc)
949 {
950 	struct ice_rx_buf *rx_buf;
951 
952 	rx_buf = &rx_ring->rx_buf[ntc];
953 	prefetchw(rx_buf->page);
954 
955 	if (!size)
956 		return rx_buf;
957 	/* we are reusing so sync this buffer for CPU use */
958 	dma_sync_single_range_for_cpu(rx_ring->dev, rx_buf->dma,
959 				      rx_buf->page_offset, size,
960 				      DMA_FROM_DEVICE);
961 
962 	/* We have pulled a buffer for use, so decrement pagecnt_bias */
963 	rx_buf->pagecnt_bias--;
964 
965 	return rx_buf;
966 }
967 
968 /**
969  * ice_get_pgcnts - grab page_count() for gathered fragments
970  * @rx_ring: Rx descriptor ring to store the page counts on
971  *
972  * This function is intended to be called right before running XDP
973  * program so that the page recycling mechanism will be able to take
974  * a correct decision regarding underlying pages; this is done in such
975  * way as XDP program can change the refcount of page
976  */
977 static void ice_get_pgcnts(struct ice_rx_ring *rx_ring)
978 {
979 	u32 nr_frags = rx_ring->nr_frags + 1;
980 	u32 idx = rx_ring->first_desc;
981 	struct ice_rx_buf *rx_buf;
982 	u32 cnt = rx_ring->count;
983 
984 	for (int i = 0; i < nr_frags; i++) {
985 		rx_buf = &rx_ring->rx_buf[idx];
986 		rx_buf->pgcnt = page_count(rx_buf->page);
987 
988 		if (++idx == cnt)
989 			idx = 0;
990 	}
991 }
992 
993 /**
994  * ice_build_skb - Build skb around an existing buffer
995  * @rx_ring: Rx descriptor ring to transact packets on
996  * @xdp: xdp_buff pointing to the data
997  *
998  * This function builds an skb around an existing XDP buffer, taking care
999  * to set up the skb correctly and avoid any memcpy overhead. Driver has
1000  * already combined frags (if any) to skb_shared_info.
1001  */
1002 static struct sk_buff *
1003 ice_build_skb(struct ice_rx_ring *rx_ring, struct xdp_buff *xdp)
1004 {
1005 	u8 metasize = xdp->data - xdp->data_meta;
1006 	struct skb_shared_info *sinfo = NULL;
1007 	unsigned int nr_frags;
1008 	struct sk_buff *skb;
1009 
1010 	if (unlikely(xdp_buff_has_frags(xdp))) {
1011 		sinfo = xdp_get_shared_info_from_buff(xdp);
1012 		nr_frags = sinfo->nr_frags;
1013 	}
1014 
1015 	/* Prefetch first cache line of first page. If xdp->data_meta
1016 	 * is unused, this points exactly as xdp->data, otherwise we
1017 	 * likely have a consumer accessing first few bytes of meta
1018 	 * data, and then actual data.
1019 	 */
1020 	net_prefetch(xdp->data_meta);
1021 	/* build an skb around the page buffer */
1022 	skb = napi_build_skb(xdp->data_hard_start, xdp->frame_sz);
1023 	if (unlikely(!skb))
1024 		return NULL;
1025 
1026 	/* must to record Rx queue, otherwise OS features such as
1027 	 * symmetric queue won't work
1028 	 */
1029 	skb_record_rx_queue(skb, rx_ring->q_index);
1030 
1031 	/* update pointers within the skb to store the data */
1032 	skb_reserve(skb, xdp->data - xdp->data_hard_start);
1033 	__skb_put(skb, xdp->data_end - xdp->data);
1034 	if (metasize)
1035 		skb_metadata_set(skb, metasize);
1036 
1037 	if (unlikely(xdp_buff_has_frags(xdp)))
1038 		xdp_update_skb_frags_info(skb, nr_frags, sinfo->xdp_frags_size,
1039 					  nr_frags * xdp->frame_sz,
1040 					  xdp_buff_get_skb_flags(xdp));
1041 
1042 	return skb;
1043 }
1044 
1045 /**
1046  * ice_construct_skb - Allocate skb and populate it
1047  * @rx_ring: Rx descriptor ring to transact packets on
1048  * @xdp: xdp_buff pointing to the data
1049  *
1050  * This function allocates an skb. It then populates it with the page
1051  * data from the current receive descriptor, taking care to set up the
1052  * skb correctly.
1053  */
1054 static struct sk_buff *
1055 ice_construct_skb(struct ice_rx_ring *rx_ring, struct xdp_buff *xdp)
1056 {
1057 	unsigned int size = xdp->data_end - xdp->data;
1058 	struct skb_shared_info *sinfo = NULL;
1059 	struct ice_rx_buf *rx_buf;
1060 	unsigned int nr_frags = 0;
1061 	unsigned int headlen;
1062 	struct sk_buff *skb;
1063 
1064 	/* prefetch first cache line of first page */
1065 	net_prefetch(xdp->data);
1066 
1067 	if (unlikely(xdp_buff_has_frags(xdp))) {
1068 		sinfo = xdp_get_shared_info_from_buff(xdp);
1069 		nr_frags = sinfo->nr_frags;
1070 	}
1071 
1072 	/* allocate a skb to store the frags */
1073 	skb = napi_alloc_skb(&rx_ring->q_vector->napi, ICE_RX_HDR_SIZE);
1074 	if (unlikely(!skb))
1075 		return NULL;
1076 
1077 	rx_buf = &rx_ring->rx_buf[rx_ring->first_desc];
1078 	skb_record_rx_queue(skb, rx_ring->q_index);
1079 	/* Determine available headroom for copy */
1080 	headlen = size;
1081 	if (headlen > ICE_RX_HDR_SIZE)
1082 		headlen = eth_get_headlen(skb->dev, xdp->data, ICE_RX_HDR_SIZE);
1083 
1084 	/* align pull length to size of long to optimize memcpy performance */
1085 	memcpy(__skb_put(skb, headlen), xdp->data, ALIGN(headlen,
1086 							 sizeof(long)));
1087 
1088 	/* if we exhaust the linear part then add what is left as a frag */
1089 	size -= headlen;
1090 	if (size) {
1091 		/* besides adding here a partial frag, we are going to add
1092 		 * frags from xdp_buff, make sure there is enough space for
1093 		 * them
1094 		 */
1095 		if (unlikely(nr_frags >= MAX_SKB_FRAGS - 1)) {
1096 			dev_kfree_skb(skb);
1097 			return NULL;
1098 		}
1099 		skb_add_rx_frag(skb, 0, rx_buf->page,
1100 				rx_buf->page_offset + headlen, size,
1101 				xdp->frame_sz);
1102 	} else {
1103 		/* buffer is unused, restore biased page count in Rx buffer;
1104 		 * data was copied onto skb's linear part so there's no
1105 		 * need for adjusting page offset and we can reuse this buffer
1106 		 * as-is
1107 		 */
1108 		rx_buf->pagecnt_bias++;
1109 	}
1110 
1111 	if (unlikely(xdp_buff_has_frags(xdp))) {
1112 		struct skb_shared_info *skinfo = skb_shinfo(skb);
1113 
1114 		memcpy(&skinfo->frags[skinfo->nr_frags], &sinfo->frags[0],
1115 		       sizeof(skb_frag_t) * nr_frags);
1116 
1117 		xdp_update_skb_frags_info(skb, skinfo->nr_frags + nr_frags,
1118 					  sinfo->xdp_frags_size,
1119 					  nr_frags * xdp->frame_sz,
1120 					  xdp_buff_get_skb_flags(xdp));
1121 	}
1122 
1123 	return skb;
1124 }
1125 
1126 /**
1127  * ice_put_rx_buf - Clean up used buffer and either recycle or free
1128  * @rx_ring: Rx descriptor ring to transact packets on
1129  * @rx_buf: Rx buffer to pull data from
1130  *
1131  * This function will clean up the contents of the rx_buf. It will either
1132  * recycle the buffer or unmap it and free the associated resources.
1133  */
1134 static void
1135 ice_put_rx_buf(struct ice_rx_ring *rx_ring, struct ice_rx_buf *rx_buf)
1136 {
1137 	if (!rx_buf)
1138 		return;
1139 
1140 	if (ice_can_reuse_rx_page(rx_buf)) {
1141 		/* hand second half of page back to the ring */
1142 		ice_reuse_rx_page(rx_ring, rx_buf);
1143 	} else {
1144 		/* we are not reusing the buffer so unmap it */
1145 		dma_unmap_page_attrs(rx_ring->dev, rx_buf->dma,
1146 				     ice_rx_pg_size(rx_ring), DMA_FROM_DEVICE,
1147 				     ICE_RX_DMA_ATTR);
1148 		__page_frag_cache_drain(rx_buf->page, rx_buf->pagecnt_bias);
1149 	}
1150 
1151 	/* clear contents of buffer_info */
1152 	rx_buf->page = NULL;
1153 }
1154 
1155 /**
1156  * ice_put_rx_mbuf - ice_put_rx_buf() caller, for all frame frags
1157  * @rx_ring: Rx ring with all the auxiliary data
1158  * @xdp: XDP buffer carrying linear + frags part
1159  * @xdp_xmit: XDP_TX/XDP_REDIRECT verdict storage
1160  * @ntc: a current next_to_clean value to be stored at rx_ring
1161  * @verdict: return code from XDP program execution
1162  *
1163  * Walk through gathered fragments and satisfy internal page
1164  * recycle mechanism; we take here an action related to verdict
1165  * returned by XDP program;
1166  */
1167 static void ice_put_rx_mbuf(struct ice_rx_ring *rx_ring, struct xdp_buff *xdp,
1168 			    u32 *xdp_xmit, u32 ntc, u32 verdict)
1169 {
1170 	u32 nr_frags = rx_ring->nr_frags + 1;
1171 	u32 idx = rx_ring->first_desc;
1172 	u32 cnt = rx_ring->count;
1173 	u32 post_xdp_frags = 1;
1174 	struct ice_rx_buf *buf;
1175 	int i;
1176 
1177 	if (unlikely(xdp_buff_has_frags(xdp)))
1178 		post_xdp_frags += xdp_get_shared_info_from_buff(xdp)->nr_frags;
1179 
1180 	for (i = 0; i < post_xdp_frags; i++) {
1181 		buf = &rx_ring->rx_buf[idx];
1182 
1183 		if (verdict & (ICE_XDP_TX | ICE_XDP_REDIR)) {
1184 			ice_rx_buf_adjust_pg_offset(buf, xdp->frame_sz);
1185 			*xdp_xmit |= verdict;
1186 		} else if (verdict & ICE_XDP_CONSUMED) {
1187 			buf->pagecnt_bias++;
1188 		} else if (verdict == ICE_XDP_PASS) {
1189 			ice_rx_buf_adjust_pg_offset(buf, xdp->frame_sz);
1190 		}
1191 
1192 		ice_put_rx_buf(rx_ring, buf);
1193 
1194 		if (++idx == cnt)
1195 			idx = 0;
1196 	}
1197 	/* handle buffers that represented frags released by XDP prog;
1198 	 * for these we keep pagecnt_bias as-is; refcount from struct page
1199 	 * has been decremented within XDP prog and we do not have to increase
1200 	 * the biased refcnt
1201 	 */
1202 	for (; i < nr_frags; i++) {
1203 		buf = &rx_ring->rx_buf[idx];
1204 		ice_put_rx_buf(rx_ring, buf);
1205 		if (++idx == cnt)
1206 			idx = 0;
1207 	}
1208 
1209 	xdp->data = NULL;
1210 	rx_ring->first_desc = ntc;
1211 	rx_ring->nr_frags = 0;
1212 }
1213 
1214 /**
1215  * ice_clean_ctrl_rx_irq - Clean descriptors from flow director Rx ring
1216  * @rx_ring: Rx descriptor ring for ctrl_vsi to transact packets on
1217  *
1218  * This function cleans Rx descriptors from the ctrl_vsi Rx ring used
1219  * to set flow director rules on VFs.
1220  */
1221 void ice_clean_ctrl_rx_irq(struct ice_rx_ring *rx_ring)
1222 {
1223 	u32 ntc = rx_ring->next_to_clean;
1224 	unsigned int total_rx_pkts = 0;
1225 	u32 cnt = rx_ring->count;
1226 
1227 	while (likely(total_rx_pkts < ICE_DFLT_IRQ_WORK)) {
1228 		struct ice_vsi *ctrl_vsi = rx_ring->vsi;
1229 		union ice_32b_rx_flex_desc *rx_desc;
1230 		u16 stat_err_bits;
1231 
1232 		rx_desc = ICE_RX_DESC(rx_ring, ntc);
1233 
1234 		stat_err_bits = BIT(ICE_RX_FLEX_DESC_STATUS0_DD_S);
1235 		if (!ice_test_staterr(rx_desc->wb.status_error0, stat_err_bits))
1236 			break;
1237 
1238 		dma_rmb();
1239 
1240 		if (ctrl_vsi->vf)
1241 			ice_vc_fdir_irq_handler(ctrl_vsi, rx_desc);
1242 
1243 		if (++ntc == cnt)
1244 			ntc = 0;
1245 		total_rx_pkts++;
1246 	}
1247 
1248 	rx_ring->first_desc = ntc;
1249 	rx_ring->next_to_clean = ntc;
1250 	ice_init_ctrl_rx_descs(rx_ring, ICE_RX_DESC_UNUSED(rx_ring));
1251 }
1252 
1253 /**
1254  * ice_clean_rx_irq - Clean completed descriptors from Rx ring - bounce buf
1255  * @rx_ring: Rx descriptor ring to transact packets on
1256  * @budget: Total limit on number of packets to process
1257  *
1258  * This function provides a "bounce buffer" approach to Rx interrupt
1259  * processing. The advantage to this is that on systems that have
1260  * expensive overhead for IOMMU access this provides a means of avoiding
1261  * it by maintaining the mapping of the page to the system.
1262  *
1263  * Returns amount of work completed
1264  */
1265 static int ice_clean_rx_irq(struct ice_rx_ring *rx_ring, int budget)
1266 {
1267 	unsigned int total_rx_bytes = 0, total_rx_pkts = 0;
1268 	unsigned int offset = rx_ring->rx_offset;
1269 	struct xdp_buff *xdp = &rx_ring->xdp;
1270 	struct ice_tx_ring *xdp_ring = NULL;
1271 	struct bpf_prog *xdp_prog = NULL;
1272 	u32 ntc = rx_ring->next_to_clean;
1273 	u32 cached_ntu, xdp_verdict;
1274 	u32 cnt = rx_ring->count;
1275 	u32 xdp_xmit = 0;
1276 	bool failure;
1277 
1278 	xdp_prog = READ_ONCE(rx_ring->xdp_prog);
1279 	if (xdp_prog) {
1280 		xdp_ring = rx_ring->xdp_ring;
1281 		cached_ntu = xdp_ring->next_to_use;
1282 	}
1283 
1284 	/* start the loop to process Rx packets bounded by 'budget' */
1285 	while (likely(total_rx_pkts < (unsigned int)budget)) {
1286 		union ice_32b_rx_flex_desc *rx_desc;
1287 		struct ice_rx_buf *rx_buf;
1288 		struct sk_buff *skb;
1289 		unsigned int size;
1290 		u16 stat_err_bits;
1291 		u16 vlan_tci;
1292 
1293 		/* get the Rx desc from Rx ring based on 'next_to_clean' */
1294 		rx_desc = ICE_RX_DESC(rx_ring, ntc);
1295 
1296 		/* status_error_len will always be zero for unused descriptors
1297 		 * because it's cleared in cleanup, and overlaps with hdr_addr
1298 		 * which is always zero because packet split isn't used, if the
1299 		 * hardware wrote DD then it will be non-zero
1300 		 */
1301 		stat_err_bits = BIT(ICE_RX_FLEX_DESC_STATUS0_DD_S);
1302 		if (!ice_test_staterr(rx_desc->wb.status_error0, stat_err_bits))
1303 			break;
1304 
1305 		/* This memory barrier is needed to keep us from reading
1306 		 * any other fields out of the rx_desc until we know the
1307 		 * DD bit is set.
1308 		 */
1309 		dma_rmb();
1310 
1311 		ice_trace(clean_rx_irq, rx_ring, rx_desc);
1312 
1313 		size = le16_to_cpu(rx_desc->wb.pkt_len) &
1314 			ICE_RX_FLX_DESC_PKT_LEN_M;
1315 
1316 		/* retrieve a buffer from the ring */
1317 		rx_buf = ice_get_rx_buf(rx_ring, size, ntc);
1318 
1319 		if (!xdp->data) {
1320 			void *hard_start;
1321 
1322 			hard_start = page_address(rx_buf->page) + rx_buf->page_offset -
1323 				     offset;
1324 			xdp_prepare_buff(xdp, hard_start, offset, size, !!offset);
1325 			xdp_buff_clear_frags_flag(xdp);
1326 		} else if (ice_add_xdp_frag(rx_ring, xdp, rx_buf, size)) {
1327 			ice_put_rx_mbuf(rx_ring, xdp, NULL, ntc, ICE_XDP_CONSUMED);
1328 			break;
1329 		}
1330 		if (++ntc == cnt)
1331 			ntc = 0;
1332 
1333 		/* skip if it is NOP desc */
1334 		if (ice_is_non_eop(rx_ring, rx_desc))
1335 			continue;
1336 
1337 		ice_get_pgcnts(rx_ring);
1338 		xdp_verdict = ice_run_xdp(rx_ring, xdp, xdp_prog, xdp_ring, rx_desc);
1339 		if (xdp_verdict == ICE_XDP_PASS)
1340 			goto construct_skb;
1341 		total_rx_bytes += xdp_get_buff_len(xdp);
1342 		total_rx_pkts++;
1343 
1344 		ice_put_rx_mbuf(rx_ring, xdp, &xdp_xmit, ntc, xdp_verdict);
1345 
1346 		continue;
1347 construct_skb:
1348 		if (likely(ice_ring_uses_build_skb(rx_ring)))
1349 			skb = ice_build_skb(rx_ring, xdp);
1350 		else
1351 			skb = ice_construct_skb(rx_ring, xdp);
1352 		/* exit if we failed to retrieve a buffer */
1353 		if (!skb) {
1354 			rx_ring->ring_stats->rx_stats.alloc_buf_failed++;
1355 			xdp_verdict = ICE_XDP_CONSUMED;
1356 		}
1357 		ice_put_rx_mbuf(rx_ring, xdp, &xdp_xmit, ntc, xdp_verdict);
1358 
1359 		if (!skb)
1360 			break;
1361 
1362 		stat_err_bits = BIT(ICE_RX_FLEX_DESC_STATUS0_RXE_S);
1363 		if (unlikely(ice_test_staterr(rx_desc->wb.status_error0,
1364 					      stat_err_bits))) {
1365 			dev_kfree_skb_any(skb);
1366 			continue;
1367 		}
1368 
1369 		vlan_tci = ice_get_vlan_tci(rx_desc);
1370 
1371 		/* pad the skb if needed, to make a valid ethernet frame */
1372 		if (eth_skb_pad(skb))
1373 			continue;
1374 
1375 		/* probably a little skewed due to removing CRC */
1376 		total_rx_bytes += skb->len;
1377 
1378 		/* populate checksum, VLAN, and protocol */
1379 		ice_process_skb_fields(rx_ring, rx_desc, skb);
1380 
1381 		ice_trace(clean_rx_irq_indicate, rx_ring, rx_desc, skb);
1382 		/* send completed skb up the stack */
1383 		ice_receive_skb(rx_ring, skb, vlan_tci);
1384 
1385 		/* update budget accounting */
1386 		total_rx_pkts++;
1387 	}
1388 
1389 	rx_ring->next_to_clean = ntc;
1390 	/* return up to cleaned_count buffers to hardware */
1391 	failure = ice_alloc_rx_bufs(rx_ring, ICE_RX_DESC_UNUSED(rx_ring));
1392 
1393 	if (xdp_xmit)
1394 		ice_finalize_xdp_rx(xdp_ring, xdp_xmit, cached_ntu);
1395 
1396 	if (rx_ring->ring_stats)
1397 		ice_update_rx_ring_stats(rx_ring, total_rx_pkts,
1398 					 total_rx_bytes);
1399 
1400 	/* guarantee a trip back through this routine if there was a failure */
1401 	return failure ? budget : (int)total_rx_pkts;
1402 }
1403 
1404 static void __ice_update_sample(struct ice_q_vector *q_vector,
1405 				struct ice_ring_container *rc,
1406 				struct dim_sample *sample,
1407 				bool is_tx)
1408 {
1409 	u64 packets = 0, bytes = 0;
1410 
1411 	if (is_tx) {
1412 		struct ice_tx_ring *tx_ring;
1413 
1414 		ice_for_each_tx_ring(tx_ring, *rc) {
1415 			struct ice_ring_stats *ring_stats;
1416 
1417 			ring_stats = tx_ring->ring_stats;
1418 			if (!ring_stats)
1419 				continue;
1420 			packets += ring_stats->stats.pkts;
1421 			bytes += ring_stats->stats.bytes;
1422 		}
1423 	} else {
1424 		struct ice_rx_ring *rx_ring;
1425 
1426 		ice_for_each_rx_ring(rx_ring, *rc) {
1427 			struct ice_ring_stats *ring_stats;
1428 
1429 			ring_stats = rx_ring->ring_stats;
1430 			if (!ring_stats)
1431 				continue;
1432 			packets += ring_stats->stats.pkts;
1433 			bytes += ring_stats->stats.bytes;
1434 		}
1435 	}
1436 
1437 	dim_update_sample(q_vector->total_events, packets, bytes, sample);
1438 	sample->comp_ctr = 0;
1439 
1440 	/* if dim settings get stale, like when not updated for 1
1441 	 * second or longer, force it to start again. This addresses the
1442 	 * frequent case of an idle queue being switched to by the
1443 	 * scheduler. The 1,000 here means 1,000 milliseconds.
1444 	 */
1445 	if (ktime_ms_delta(sample->time, rc->dim.start_sample.time) >= 1000)
1446 		rc->dim.state = DIM_START_MEASURE;
1447 }
1448 
1449 /**
1450  * ice_net_dim - Update net DIM algorithm
1451  * @q_vector: the vector associated with the interrupt
1452  *
1453  * Create a DIM sample and notify net_dim() so that it can possibly decide
1454  * a new ITR value based on incoming packets, bytes, and interrupts.
1455  *
1456  * This function is a no-op if the ring is not configured to dynamic ITR.
1457  */
1458 static void ice_net_dim(struct ice_q_vector *q_vector)
1459 {
1460 	struct ice_ring_container *tx = &q_vector->tx;
1461 	struct ice_ring_container *rx = &q_vector->rx;
1462 
1463 	if (ITR_IS_DYNAMIC(tx)) {
1464 		struct dim_sample dim_sample;
1465 
1466 		__ice_update_sample(q_vector, tx, &dim_sample, true);
1467 		net_dim(&tx->dim, &dim_sample);
1468 	}
1469 
1470 	if (ITR_IS_DYNAMIC(rx)) {
1471 		struct dim_sample dim_sample;
1472 
1473 		__ice_update_sample(q_vector, rx, &dim_sample, false);
1474 		net_dim(&rx->dim, &dim_sample);
1475 	}
1476 }
1477 
1478 /**
1479  * ice_buildreg_itr - build value for writing to the GLINT_DYN_CTL register
1480  * @itr_idx: interrupt throttling index
1481  * @itr: interrupt throttling value in usecs
1482  */
1483 static u32 ice_buildreg_itr(u16 itr_idx, u16 itr)
1484 {
1485 	/* The ITR value is reported in microseconds, and the register value is
1486 	 * recorded in 2 microsecond units. For this reason we only need to
1487 	 * shift by the GLINT_DYN_CTL_INTERVAL_S - ICE_ITR_GRAN_S to apply this
1488 	 * granularity as a shift instead of division. The mask makes sure the
1489 	 * ITR value is never odd so we don't accidentally write into the field
1490 	 * prior to the ITR field.
1491 	 */
1492 	itr &= ICE_ITR_MASK;
1493 
1494 	return GLINT_DYN_CTL_INTENA_M | GLINT_DYN_CTL_CLEARPBA_M |
1495 		(itr_idx << GLINT_DYN_CTL_ITR_INDX_S) |
1496 		(itr << (GLINT_DYN_CTL_INTERVAL_S - ICE_ITR_GRAN_S));
1497 }
1498 
1499 /**
1500  * ice_enable_interrupt - re-enable MSI-X interrupt
1501  * @q_vector: the vector associated with the interrupt to enable
1502  *
1503  * If the VSI is down, the interrupt will not be re-enabled. Also,
1504  * when enabling the interrupt always reset the wb_on_itr to false
1505  * and trigger a software interrupt to clean out internal state.
1506  */
1507 static void ice_enable_interrupt(struct ice_q_vector *q_vector)
1508 {
1509 	struct ice_vsi *vsi = q_vector->vsi;
1510 	bool wb_en = q_vector->wb_on_itr;
1511 	u32 itr_val;
1512 
1513 	if (test_bit(ICE_DOWN, vsi->state))
1514 		return;
1515 
1516 	/* trigger an ITR delayed software interrupt when exiting busy poll, to
1517 	 * make sure to catch any pending cleanups that might have been missed
1518 	 * due to interrupt state transition. If busy poll or poll isn't
1519 	 * enabled, then don't update ITR, and just enable the interrupt.
1520 	 */
1521 	if (!wb_en) {
1522 		itr_val = ice_buildreg_itr(ICE_ITR_NONE, 0);
1523 	} else {
1524 		q_vector->wb_on_itr = false;
1525 
1526 		/* do two things here with a single write. Set up the third ITR
1527 		 * index to be used for software interrupt moderation, and then
1528 		 * trigger a software interrupt with a rate limit of 20K on
1529 		 * software interrupts, this will help avoid high interrupt
1530 		 * loads due to frequently polling and exiting polling.
1531 		 */
1532 		itr_val = ice_buildreg_itr(ICE_IDX_ITR2, ICE_ITR_20K);
1533 		itr_val |= GLINT_DYN_CTL_SWINT_TRIG_M |
1534 			   ICE_IDX_ITR2 << GLINT_DYN_CTL_SW_ITR_INDX_S |
1535 			   GLINT_DYN_CTL_SW_ITR_INDX_ENA_M;
1536 	}
1537 	wr32(&vsi->back->hw, GLINT_DYN_CTL(q_vector->reg_idx), itr_val);
1538 }
1539 
1540 /**
1541  * ice_set_wb_on_itr - set WB_ON_ITR for this q_vector
1542  * @q_vector: q_vector to set WB_ON_ITR on
1543  *
1544  * We need to tell hardware to write-back completed descriptors even when
1545  * interrupts are disabled. Descriptors will be written back on cache line
1546  * boundaries without WB_ON_ITR enabled, but if we don't enable WB_ON_ITR
1547  * descriptors may not be written back if they don't fill a cache line until
1548  * the next interrupt.
1549  *
1550  * This sets the write-back frequency to whatever was set previously for the
1551  * ITR indices. Also, set the INTENA_MSK bit to make sure hardware knows we
1552  * aren't meddling with the INTENA_M bit.
1553  */
1554 static void ice_set_wb_on_itr(struct ice_q_vector *q_vector)
1555 {
1556 	struct ice_vsi *vsi = q_vector->vsi;
1557 
1558 	/* already in wb_on_itr mode no need to change it */
1559 	if (q_vector->wb_on_itr)
1560 		return;
1561 
1562 	/* use previously set ITR values for all of the ITR indices by
1563 	 * specifying ICE_ITR_NONE, which will vary in adaptive (AIM) mode and
1564 	 * be static in non-adaptive mode (user configured)
1565 	 */
1566 	wr32(&vsi->back->hw, GLINT_DYN_CTL(q_vector->reg_idx),
1567 	     FIELD_PREP(GLINT_DYN_CTL_ITR_INDX_M, ICE_ITR_NONE) |
1568 	     FIELD_PREP(GLINT_DYN_CTL_INTENA_MSK_M, 1) |
1569 	     FIELD_PREP(GLINT_DYN_CTL_WB_ON_ITR_M, 1));
1570 
1571 	q_vector->wb_on_itr = true;
1572 }
1573 
1574 /**
1575  * ice_napi_poll - NAPI polling Rx/Tx cleanup routine
1576  * @napi: napi struct with our devices info in it
1577  * @budget: amount of work driver is allowed to do this pass, in packets
1578  *
1579  * This function will clean all queues associated with a q_vector.
1580  *
1581  * Returns the amount of work done
1582  */
1583 int ice_napi_poll(struct napi_struct *napi, int budget)
1584 {
1585 	struct ice_q_vector *q_vector =
1586 				container_of(napi, struct ice_q_vector, napi);
1587 	struct ice_tx_ring *tx_ring;
1588 	struct ice_rx_ring *rx_ring;
1589 	bool clean_complete = true;
1590 	int budget_per_ring;
1591 	int work_done = 0;
1592 
1593 	/* Since the actual Tx work is minimal, we can give the Tx a larger
1594 	 * budget and be more aggressive about cleaning up the Tx descriptors.
1595 	 */
1596 	ice_for_each_tx_ring(tx_ring, q_vector->tx) {
1597 		struct xsk_buff_pool *xsk_pool = READ_ONCE(tx_ring->xsk_pool);
1598 		bool wd;
1599 
1600 		if (xsk_pool)
1601 			wd = ice_xmit_zc(tx_ring, xsk_pool);
1602 		else if (ice_ring_is_xdp(tx_ring))
1603 			wd = true;
1604 		else
1605 			wd = ice_clean_tx_irq(tx_ring, budget);
1606 
1607 		if (!wd)
1608 			clean_complete = false;
1609 	}
1610 
1611 	/* Handle case where we are called by netpoll with a budget of 0 */
1612 	if (unlikely(budget <= 0))
1613 		return budget;
1614 
1615 	/* normally we have 1 Rx ring per q_vector */
1616 	if (unlikely(q_vector->num_ring_rx > 1))
1617 		/* We attempt to distribute budget to each Rx queue fairly, but
1618 		 * don't allow the budget to go below 1 because that would exit
1619 		 * polling early.
1620 		 */
1621 		budget_per_ring = max_t(int, budget / q_vector->num_ring_rx, 1);
1622 	else
1623 		/* Max of 1 Rx ring in this q_vector so give it the budget */
1624 		budget_per_ring = budget;
1625 
1626 	ice_for_each_rx_ring(rx_ring, q_vector->rx) {
1627 		struct xsk_buff_pool *xsk_pool = READ_ONCE(rx_ring->xsk_pool);
1628 		int cleaned;
1629 
1630 		/* A dedicated path for zero-copy allows making a single
1631 		 * comparison in the irq context instead of many inside the
1632 		 * ice_clean_rx_irq function and makes the codebase cleaner.
1633 		 */
1634 		cleaned = rx_ring->xsk_pool ?
1635 			  ice_clean_rx_irq_zc(rx_ring, xsk_pool, budget_per_ring) :
1636 			  ice_clean_rx_irq(rx_ring, budget_per_ring);
1637 		work_done += cleaned;
1638 		/* if we clean as many as budgeted, we must not be done */
1639 		if (cleaned >= budget_per_ring)
1640 			clean_complete = false;
1641 	}
1642 
1643 	/* If work not completed, return budget and polling will return */
1644 	if (!clean_complete) {
1645 		/* Set the writeback on ITR so partial completions of
1646 		 * cache-lines will still continue even if we're polling.
1647 		 */
1648 		ice_set_wb_on_itr(q_vector);
1649 		return budget;
1650 	}
1651 
1652 	/* Exit the polling mode, but don't re-enable interrupts if stack might
1653 	 * poll us due to busy-polling
1654 	 */
1655 	if (napi_complete_done(napi, work_done)) {
1656 		ice_net_dim(q_vector);
1657 		ice_enable_interrupt(q_vector);
1658 	} else {
1659 		ice_set_wb_on_itr(q_vector);
1660 	}
1661 
1662 	return min_t(int, work_done, budget - 1);
1663 }
1664 
1665 /**
1666  * __ice_maybe_stop_tx - 2nd level check for Tx stop conditions
1667  * @tx_ring: the ring to be checked
1668  * @size: the size buffer we want to assure is available
1669  *
1670  * Returns -EBUSY if a stop is needed, else 0
1671  */
1672 static int __ice_maybe_stop_tx(struct ice_tx_ring *tx_ring, unsigned int size)
1673 {
1674 	netif_tx_stop_queue(txring_txq(tx_ring));
1675 	/* Memory barrier before checking head and tail */
1676 	smp_mb();
1677 
1678 	/* Check again in a case another CPU has just made room available. */
1679 	if (likely(ICE_DESC_UNUSED(tx_ring) < size))
1680 		return -EBUSY;
1681 
1682 	/* A reprieve! - use start_queue because it doesn't call schedule */
1683 	netif_tx_start_queue(txring_txq(tx_ring));
1684 	++tx_ring->ring_stats->tx_stats.restart_q;
1685 	return 0;
1686 }
1687 
1688 /**
1689  * ice_maybe_stop_tx - 1st level check for Tx stop conditions
1690  * @tx_ring: the ring to be checked
1691  * @size:    the size buffer we want to assure is available
1692  *
1693  * Returns 0 if stop is not needed
1694  */
1695 static int ice_maybe_stop_tx(struct ice_tx_ring *tx_ring, unsigned int size)
1696 {
1697 	if (likely(ICE_DESC_UNUSED(tx_ring) >= size))
1698 		return 0;
1699 
1700 	return __ice_maybe_stop_tx(tx_ring, size);
1701 }
1702 
1703 /**
1704  * ice_tx_map - Build the Tx descriptor
1705  * @tx_ring: ring to send buffer on
1706  * @first: first buffer info buffer to use
1707  * @off: pointer to struct that holds offload parameters
1708  *
1709  * This function loops over the skb data pointed to by *first
1710  * and gets a physical address for each memory location and programs
1711  * it and the length into the transmit descriptor.
1712  */
1713 static void
1714 ice_tx_map(struct ice_tx_ring *tx_ring, struct ice_tx_buf *first,
1715 	   struct ice_tx_offload_params *off)
1716 {
1717 	u64 td_offset, td_tag, td_cmd;
1718 	u16 i = tx_ring->next_to_use;
1719 	unsigned int data_len, size;
1720 	struct ice_tx_desc *tx_desc;
1721 	struct ice_tx_buf *tx_buf;
1722 	struct sk_buff *skb;
1723 	skb_frag_t *frag;
1724 	dma_addr_t dma;
1725 	bool kick;
1726 
1727 	td_tag = off->td_l2tag1;
1728 	td_cmd = off->td_cmd;
1729 	td_offset = off->td_offset;
1730 	skb = first->skb;
1731 
1732 	data_len = skb->data_len;
1733 	size = skb_headlen(skb);
1734 
1735 	tx_desc = ICE_TX_DESC(tx_ring, i);
1736 
1737 	if (first->tx_flags & ICE_TX_FLAGS_HW_VLAN) {
1738 		td_cmd |= (u64)ICE_TX_DESC_CMD_IL2TAG1;
1739 		td_tag = first->vid;
1740 	}
1741 
1742 	dma = dma_map_single(tx_ring->dev, skb->data, size, DMA_TO_DEVICE);
1743 
1744 	tx_buf = first;
1745 
1746 	for (frag = &skb_shinfo(skb)->frags[0];; frag++) {
1747 		unsigned int max_data = ICE_MAX_DATA_PER_TXD_ALIGNED;
1748 
1749 		if (dma_mapping_error(tx_ring->dev, dma))
1750 			goto dma_error;
1751 
1752 		/* record length, and DMA address */
1753 		dma_unmap_len_set(tx_buf, len, size);
1754 		dma_unmap_addr_set(tx_buf, dma, dma);
1755 
1756 		/* align size to end of page */
1757 		max_data += -dma & (ICE_MAX_READ_REQ_SIZE - 1);
1758 		tx_desc->buf_addr = cpu_to_le64(dma);
1759 
1760 		/* account for data chunks larger than the hardware
1761 		 * can handle
1762 		 */
1763 		while (unlikely(size > ICE_MAX_DATA_PER_TXD)) {
1764 			tx_desc->cmd_type_offset_bsz =
1765 				ice_build_ctob(td_cmd, td_offset, max_data,
1766 					       td_tag);
1767 
1768 			tx_desc++;
1769 			i++;
1770 
1771 			if (i == tx_ring->count) {
1772 				tx_desc = ICE_TX_DESC(tx_ring, 0);
1773 				i = 0;
1774 			}
1775 
1776 			dma += max_data;
1777 			size -= max_data;
1778 
1779 			max_data = ICE_MAX_DATA_PER_TXD_ALIGNED;
1780 			tx_desc->buf_addr = cpu_to_le64(dma);
1781 		}
1782 
1783 		if (likely(!data_len))
1784 			break;
1785 
1786 		tx_desc->cmd_type_offset_bsz = ice_build_ctob(td_cmd, td_offset,
1787 							      size, td_tag);
1788 
1789 		tx_desc++;
1790 		i++;
1791 
1792 		if (i == tx_ring->count) {
1793 			tx_desc = ICE_TX_DESC(tx_ring, 0);
1794 			i = 0;
1795 		}
1796 
1797 		size = skb_frag_size(frag);
1798 		data_len -= size;
1799 
1800 		dma = skb_frag_dma_map(tx_ring->dev, frag, 0, size,
1801 				       DMA_TO_DEVICE);
1802 
1803 		tx_buf = &tx_ring->tx_buf[i];
1804 		tx_buf->type = ICE_TX_BUF_FRAG;
1805 	}
1806 
1807 	/* record SW timestamp if HW timestamp is not available */
1808 	skb_tx_timestamp(first->skb);
1809 
1810 	i++;
1811 	if (i == tx_ring->count)
1812 		i = 0;
1813 
1814 	/* write last descriptor with RS and EOP bits */
1815 	td_cmd |= (u64)ICE_TXD_LAST_DESC_CMD;
1816 	tx_desc->cmd_type_offset_bsz =
1817 			ice_build_ctob(td_cmd, td_offset, size, td_tag);
1818 
1819 	/* Force memory writes to complete before letting h/w know there
1820 	 * are new descriptors to fetch.
1821 	 *
1822 	 * We also use this memory barrier to make certain all of the
1823 	 * status bits have been updated before next_to_watch is written.
1824 	 */
1825 	wmb();
1826 
1827 	/* set next_to_watch value indicating a packet is present */
1828 	first->next_to_watch = tx_desc;
1829 
1830 	tx_ring->next_to_use = i;
1831 
1832 	ice_maybe_stop_tx(tx_ring, DESC_NEEDED);
1833 
1834 	/* notify HW of packet */
1835 	kick = __netdev_tx_sent_queue(txring_txq(tx_ring), first->bytecount,
1836 				      netdev_xmit_more());
1837 	if (kick)
1838 		/* notify HW of packet */
1839 		writel(i, tx_ring->tail);
1840 
1841 	return;
1842 
1843 dma_error:
1844 	/* clear DMA mappings for failed tx_buf map */
1845 	for (;;) {
1846 		tx_buf = &tx_ring->tx_buf[i];
1847 		ice_unmap_and_free_tx_buf(tx_ring, tx_buf);
1848 		if (tx_buf == first)
1849 			break;
1850 		if (i == 0)
1851 			i = tx_ring->count;
1852 		i--;
1853 	}
1854 
1855 	tx_ring->next_to_use = i;
1856 }
1857 
1858 /**
1859  * ice_tx_csum - Enable Tx checksum offloads
1860  * @first: pointer to the first descriptor
1861  * @off: pointer to struct that holds offload parameters
1862  *
1863  * Returns 0 or error (negative) if checksum offload can't happen, 1 otherwise.
1864  */
1865 static
1866 int ice_tx_csum(struct ice_tx_buf *first, struct ice_tx_offload_params *off)
1867 {
1868 	const struct ice_tx_ring *tx_ring = off->tx_ring;
1869 	u32 l4_len = 0, l3_len = 0, l2_len = 0;
1870 	struct sk_buff *skb = first->skb;
1871 	union {
1872 		struct iphdr *v4;
1873 		struct ipv6hdr *v6;
1874 		unsigned char *hdr;
1875 	} ip;
1876 	union {
1877 		struct tcphdr *tcp;
1878 		unsigned char *hdr;
1879 	} l4;
1880 	__be16 frag_off, protocol;
1881 	unsigned char *exthdr;
1882 	u32 offset, cmd = 0;
1883 	u8 l4_proto = 0;
1884 
1885 	if (skb->ip_summed != CHECKSUM_PARTIAL)
1886 		return 0;
1887 
1888 	protocol = vlan_get_protocol(skb);
1889 
1890 	if (eth_p_mpls(protocol)) {
1891 		ip.hdr = skb_inner_network_header(skb);
1892 		l4.hdr = skb_checksum_start(skb);
1893 	} else {
1894 		ip.hdr = skb_network_header(skb);
1895 		l4.hdr = skb_transport_header(skb);
1896 	}
1897 
1898 	/* compute outer L2 header size */
1899 	l2_len = ip.hdr - skb->data;
1900 	offset = (l2_len / 2) << ICE_TX_DESC_LEN_MACLEN_S;
1901 
1902 	/* set the tx_flags to indicate the IP protocol type. this is
1903 	 * required so that checksum header computation below is accurate.
1904 	 */
1905 	if (ip.v4->version == 4)
1906 		first->tx_flags |= ICE_TX_FLAGS_IPV4;
1907 	else if (ip.v6->version == 6)
1908 		first->tx_flags |= ICE_TX_FLAGS_IPV6;
1909 
1910 	if (skb->encapsulation) {
1911 		bool gso_ena = false;
1912 		u32 tunnel = 0;
1913 
1914 		/* define outer network header type */
1915 		if (first->tx_flags & ICE_TX_FLAGS_IPV4) {
1916 			tunnel |= (first->tx_flags & ICE_TX_FLAGS_TSO) ?
1917 				  ICE_TX_CTX_EIPT_IPV4 :
1918 				  ICE_TX_CTX_EIPT_IPV4_NO_CSUM;
1919 			l4_proto = ip.v4->protocol;
1920 		} else if (first->tx_flags & ICE_TX_FLAGS_IPV6) {
1921 			int ret;
1922 
1923 			tunnel |= ICE_TX_CTX_EIPT_IPV6;
1924 			exthdr = ip.hdr + sizeof(*ip.v6);
1925 			l4_proto = ip.v6->nexthdr;
1926 			ret = ipv6_skip_exthdr(skb, exthdr - skb->data,
1927 					       &l4_proto, &frag_off);
1928 			if (ret < 0)
1929 				return -1;
1930 		}
1931 
1932 		/* define outer transport */
1933 		switch (l4_proto) {
1934 		case IPPROTO_UDP:
1935 			tunnel |= ICE_TXD_CTX_UDP_TUNNELING;
1936 			first->tx_flags |= ICE_TX_FLAGS_TUNNEL;
1937 			break;
1938 		case IPPROTO_GRE:
1939 			tunnel |= ICE_TXD_CTX_GRE_TUNNELING;
1940 			first->tx_flags |= ICE_TX_FLAGS_TUNNEL;
1941 			break;
1942 		case IPPROTO_IPIP:
1943 		case IPPROTO_IPV6:
1944 			first->tx_flags |= ICE_TX_FLAGS_TUNNEL;
1945 			l4.hdr = skb_inner_network_header(skb);
1946 			break;
1947 		default:
1948 			if (first->tx_flags & ICE_TX_FLAGS_TSO)
1949 				return -1;
1950 
1951 			skb_checksum_help(skb);
1952 			return 0;
1953 		}
1954 
1955 		/* compute outer L3 header size */
1956 		tunnel |= ((l4.hdr - ip.hdr) / 4) <<
1957 			  ICE_TXD_CTX_QW0_EIPLEN_S;
1958 
1959 		/* switch IP header pointer from outer to inner header */
1960 		ip.hdr = skb_inner_network_header(skb);
1961 
1962 		/* compute tunnel header size */
1963 		tunnel |= ((ip.hdr - l4.hdr) / 2) <<
1964 			   ICE_TXD_CTX_QW0_NATLEN_S;
1965 
1966 		gso_ena = skb_shinfo(skb)->gso_type & SKB_GSO_PARTIAL;
1967 		/* indicate if we need to offload outer UDP header */
1968 		if ((first->tx_flags & ICE_TX_FLAGS_TSO) && !gso_ena &&
1969 		    (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_TUNNEL_CSUM))
1970 			tunnel |= ICE_TXD_CTX_QW0_L4T_CS_M;
1971 
1972 		/* record tunnel offload values */
1973 		off->cd_tunnel_params |= tunnel;
1974 
1975 		/* set DTYP=1 to indicate that it's an Tx context descriptor
1976 		 * in IPsec tunnel mode with Tx offloads in Quad word 1
1977 		 */
1978 		off->cd_qw1 |= (u64)ICE_TX_DESC_DTYPE_CTX;
1979 
1980 		/* switch L4 header pointer from outer to inner */
1981 		l4.hdr = skb_inner_transport_header(skb);
1982 		l4_proto = 0;
1983 
1984 		/* reset type as we transition from outer to inner headers */
1985 		first->tx_flags &= ~(ICE_TX_FLAGS_IPV4 | ICE_TX_FLAGS_IPV6);
1986 		if (ip.v4->version == 4)
1987 			first->tx_flags |= ICE_TX_FLAGS_IPV4;
1988 		if (ip.v6->version == 6)
1989 			first->tx_flags |= ICE_TX_FLAGS_IPV6;
1990 	}
1991 
1992 	/* Enable IP checksum offloads */
1993 	if (first->tx_flags & ICE_TX_FLAGS_IPV4) {
1994 		l4_proto = ip.v4->protocol;
1995 		/* the stack computes the IP header already, the only time we
1996 		 * need the hardware to recompute it is in the case of TSO.
1997 		 */
1998 		if (first->tx_flags & ICE_TX_FLAGS_TSO)
1999 			cmd |= ICE_TX_DESC_CMD_IIPT_IPV4_CSUM;
2000 		else
2001 			cmd |= ICE_TX_DESC_CMD_IIPT_IPV4;
2002 
2003 	} else if (first->tx_flags & ICE_TX_FLAGS_IPV6) {
2004 		cmd |= ICE_TX_DESC_CMD_IIPT_IPV6;
2005 		exthdr = ip.hdr + sizeof(*ip.v6);
2006 		l4_proto = ip.v6->nexthdr;
2007 		if (l4.hdr != exthdr)
2008 			ipv6_skip_exthdr(skb, exthdr - skb->data, &l4_proto,
2009 					 &frag_off);
2010 	} else {
2011 		return -1;
2012 	}
2013 
2014 	/* compute inner L3 header size */
2015 	l3_len = l4.hdr - ip.hdr;
2016 	offset |= (l3_len / 4) << ICE_TX_DESC_LEN_IPLEN_S;
2017 
2018 	if ((tx_ring->netdev->features & NETIF_F_HW_CSUM) &&
2019 	    !(first->tx_flags & ICE_TX_FLAGS_TSO) &&
2020 	    !skb_csum_is_sctp(skb)) {
2021 		/* Set GCS */
2022 		u16 csum_start = (skb->csum_start - skb->mac_header) / 2;
2023 		u16 csum_offset = skb->csum_offset / 2;
2024 		u16 gcs_params;
2025 
2026 		gcs_params = FIELD_PREP(ICE_TX_GCS_DESC_START_M, csum_start) |
2027 			     FIELD_PREP(ICE_TX_GCS_DESC_OFFSET_M, csum_offset) |
2028 			     FIELD_PREP(ICE_TX_GCS_DESC_TYPE_M,
2029 					ICE_TX_GCS_DESC_CSUM_PSH);
2030 
2031 		/* Unlike legacy HW checksums, GCS requires a context
2032 		 * descriptor.
2033 		 */
2034 		off->cd_qw1 |= ICE_TX_DESC_DTYPE_CTX;
2035 		off->cd_gcs_params = gcs_params;
2036 		/* Fill out CSO info in data descriptors */
2037 		off->td_offset |= offset;
2038 		off->td_cmd |= cmd;
2039 		return 1;
2040 	}
2041 
2042 	/* Enable L4 checksum offloads */
2043 	switch (l4_proto) {
2044 	case IPPROTO_TCP:
2045 		/* enable checksum offloads */
2046 		cmd |= ICE_TX_DESC_CMD_L4T_EOFT_TCP;
2047 		l4_len = l4.tcp->doff;
2048 		offset |= l4_len << ICE_TX_DESC_LEN_L4_LEN_S;
2049 		break;
2050 	case IPPROTO_UDP:
2051 		/* enable UDP checksum offload */
2052 		cmd |= ICE_TX_DESC_CMD_L4T_EOFT_UDP;
2053 		l4_len = (sizeof(struct udphdr) >> 2);
2054 		offset |= l4_len << ICE_TX_DESC_LEN_L4_LEN_S;
2055 		break;
2056 	case IPPROTO_SCTP:
2057 		/* enable SCTP checksum offload */
2058 		cmd |= ICE_TX_DESC_CMD_L4T_EOFT_SCTP;
2059 		l4_len = sizeof(struct sctphdr) >> 2;
2060 		offset |= l4_len << ICE_TX_DESC_LEN_L4_LEN_S;
2061 		break;
2062 
2063 	default:
2064 		if (first->tx_flags & ICE_TX_FLAGS_TSO)
2065 			return -1;
2066 		skb_checksum_help(skb);
2067 		return 0;
2068 	}
2069 
2070 	off->td_cmd |= cmd;
2071 	off->td_offset |= offset;
2072 	return 1;
2073 }
2074 
2075 /**
2076  * ice_tx_prepare_vlan_flags - prepare generic Tx VLAN tagging flags for HW
2077  * @tx_ring: ring to send buffer on
2078  * @first: pointer to struct ice_tx_buf
2079  *
2080  * Checks the skb and set up correspondingly several generic transmit flags
2081  * related to VLAN tagging for the HW, such as VLAN, DCB, etc.
2082  */
2083 static void
2084 ice_tx_prepare_vlan_flags(struct ice_tx_ring *tx_ring, struct ice_tx_buf *first)
2085 {
2086 	struct sk_buff *skb = first->skb;
2087 
2088 	/* nothing left to do, software offloaded VLAN */
2089 	if (!skb_vlan_tag_present(skb) && eth_type_vlan(skb->protocol))
2090 		return;
2091 
2092 	/* the VLAN ethertype/tpid is determined by VSI configuration and netdev
2093 	 * feature flags, which the driver only allows either 802.1Q or 802.1ad
2094 	 * VLAN offloads exclusively so we only care about the VLAN ID here
2095 	 */
2096 	if (skb_vlan_tag_present(skb)) {
2097 		first->vid = skb_vlan_tag_get(skb);
2098 		if (tx_ring->flags & ICE_TX_FLAGS_RING_VLAN_L2TAG2)
2099 			first->tx_flags |= ICE_TX_FLAGS_HW_OUTER_SINGLE_VLAN;
2100 		else
2101 			first->tx_flags |= ICE_TX_FLAGS_HW_VLAN;
2102 	}
2103 
2104 	ice_tx_prepare_vlan_flags_dcb(tx_ring, first);
2105 }
2106 
2107 /**
2108  * ice_tso - computes mss and TSO length to prepare for TSO
2109  * @first: pointer to struct ice_tx_buf
2110  * @off: pointer to struct that holds offload parameters
2111  *
2112  * Returns 0 or error (negative) if TSO can't happen, 1 otherwise.
2113  */
2114 static
2115 int ice_tso(struct ice_tx_buf *first, struct ice_tx_offload_params *off)
2116 {
2117 	struct sk_buff *skb = first->skb;
2118 	union {
2119 		struct iphdr *v4;
2120 		struct ipv6hdr *v6;
2121 		unsigned char *hdr;
2122 	} ip;
2123 	union {
2124 		struct tcphdr *tcp;
2125 		struct udphdr *udp;
2126 		unsigned char *hdr;
2127 	} l4;
2128 	u64 cd_mss, cd_tso_len;
2129 	__be16 protocol;
2130 	u32 paylen;
2131 	u8 l4_start;
2132 	int err;
2133 
2134 	if (skb->ip_summed != CHECKSUM_PARTIAL)
2135 		return 0;
2136 
2137 	if (!skb_is_gso(skb))
2138 		return 0;
2139 
2140 	err = skb_cow_head(skb, 0);
2141 	if (err < 0)
2142 		return err;
2143 
2144 	protocol = vlan_get_protocol(skb);
2145 
2146 	if (eth_p_mpls(protocol))
2147 		ip.hdr = skb_inner_network_header(skb);
2148 	else
2149 		ip.hdr = skb_network_header(skb);
2150 	l4.hdr = skb_checksum_start(skb);
2151 
2152 	/* initialize outer IP header fields */
2153 	if (ip.v4->version == 4) {
2154 		ip.v4->tot_len = 0;
2155 		ip.v4->check = 0;
2156 	} else {
2157 		ip.v6->payload_len = 0;
2158 	}
2159 
2160 	if (skb_shinfo(skb)->gso_type & (SKB_GSO_GRE |
2161 					 SKB_GSO_GRE_CSUM |
2162 					 SKB_GSO_IPXIP4 |
2163 					 SKB_GSO_IPXIP6 |
2164 					 SKB_GSO_UDP_TUNNEL |
2165 					 SKB_GSO_UDP_TUNNEL_CSUM)) {
2166 		if (!(skb_shinfo(skb)->gso_type & SKB_GSO_PARTIAL) &&
2167 		    (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_TUNNEL_CSUM)) {
2168 			l4.udp->len = 0;
2169 
2170 			/* determine offset of outer transport header */
2171 			l4_start = (u8)(l4.hdr - skb->data);
2172 
2173 			/* remove payload length from outer checksum */
2174 			paylen = skb->len - l4_start;
2175 			csum_replace_by_diff(&l4.udp->check,
2176 					     (__force __wsum)htonl(paylen));
2177 		}
2178 
2179 		/* reset pointers to inner headers */
2180 		ip.hdr = skb_inner_network_header(skb);
2181 		l4.hdr = skb_inner_transport_header(skb);
2182 
2183 		/* initialize inner IP header fields */
2184 		if (ip.v4->version == 4) {
2185 			ip.v4->tot_len = 0;
2186 			ip.v4->check = 0;
2187 		} else {
2188 			ip.v6->payload_len = 0;
2189 		}
2190 	}
2191 
2192 	/* determine offset of transport header */
2193 	l4_start = (u8)(l4.hdr - skb->data);
2194 
2195 	/* remove payload length from checksum */
2196 	paylen = skb->len - l4_start;
2197 
2198 	if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4) {
2199 		csum_replace_by_diff(&l4.udp->check,
2200 				     (__force __wsum)htonl(paylen));
2201 		/* compute length of UDP segmentation header */
2202 		off->header_len = (u8)sizeof(l4.udp) + l4_start;
2203 	} else {
2204 		csum_replace_by_diff(&l4.tcp->check,
2205 				     (__force __wsum)htonl(paylen));
2206 		/* compute length of TCP segmentation header */
2207 		off->header_len = (u8)((l4.tcp->doff * 4) + l4_start);
2208 	}
2209 
2210 	/* update gso_segs and bytecount */
2211 	first->gso_segs = skb_shinfo(skb)->gso_segs;
2212 	first->bytecount += (first->gso_segs - 1) * off->header_len;
2213 
2214 	cd_tso_len = skb->len - off->header_len;
2215 	cd_mss = skb_shinfo(skb)->gso_size;
2216 
2217 	/* record cdesc_qw1 with TSO parameters */
2218 	off->cd_qw1 |= (u64)(ICE_TX_DESC_DTYPE_CTX |
2219 			     (ICE_TX_CTX_DESC_TSO << ICE_TXD_CTX_QW1_CMD_S) |
2220 			     (cd_tso_len << ICE_TXD_CTX_QW1_TSO_LEN_S) |
2221 			     (cd_mss << ICE_TXD_CTX_QW1_MSS_S));
2222 	first->tx_flags |= ICE_TX_FLAGS_TSO;
2223 	return 1;
2224 }
2225 
2226 /**
2227  * ice_txd_use_count  - estimate the number of descriptors needed for Tx
2228  * @size: transmit request size in bytes
2229  *
2230  * Due to hardware alignment restrictions (4K alignment), we need to
2231  * assume that we can have no more than 12K of data per descriptor, even
2232  * though each descriptor can take up to 16K - 1 bytes of aligned memory.
2233  * Thus, we need to divide by 12K. But division is slow! Instead,
2234  * we decompose the operation into shifts and one relatively cheap
2235  * multiply operation.
2236  *
2237  * To divide by 12K, we first divide by 4K, then divide by 3:
2238  *     To divide by 4K, shift right by 12 bits
2239  *     To divide by 3, multiply by 85, then divide by 256
2240  *     (Divide by 256 is done by shifting right by 8 bits)
2241  * Finally, we add one to round up. Because 256 isn't an exact multiple of
2242  * 3, we'll underestimate near each multiple of 12K. This is actually more
2243  * accurate as we have 4K - 1 of wiggle room that we can fit into the last
2244  * segment. For our purposes this is accurate out to 1M which is orders of
2245  * magnitude greater than our largest possible GSO size.
2246  *
2247  * This would then be implemented as:
2248  *     return (((size >> 12) * 85) >> 8) + ICE_DESCS_FOR_SKB_DATA_PTR;
2249  *
2250  * Since multiplication and division are commutative, we can reorder
2251  * operations into:
2252  *     return ((size * 85) >> 20) + ICE_DESCS_FOR_SKB_DATA_PTR;
2253  */
2254 static unsigned int ice_txd_use_count(unsigned int size)
2255 {
2256 	return ((size * 85) >> 20) + ICE_DESCS_FOR_SKB_DATA_PTR;
2257 }
2258 
2259 /**
2260  * ice_xmit_desc_count - calculate number of Tx descriptors needed
2261  * @skb: send buffer
2262  *
2263  * Returns number of data descriptors needed for this skb.
2264  */
2265 static unsigned int ice_xmit_desc_count(struct sk_buff *skb)
2266 {
2267 	const skb_frag_t *frag = &skb_shinfo(skb)->frags[0];
2268 	unsigned int nr_frags = skb_shinfo(skb)->nr_frags;
2269 	unsigned int count = 0, size = skb_headlen(skb);
2270 
2271 	for (;;) {
2272 		count += ice_txd_use_count(size);
2273 
2274 		if (!nr_frags--)
2275 			break;
2276 
2277 		size = skb_frag_size(frag++);
2278 	}
2279 
2280 	return count;
2281 }
2282 
2283 /**
2284  * __ice_chk_linearize - Check if there are more than 8 buffers per packet
2285  * @skb: send buffer
2286  *
2287  * Note: This HW can't DMA more than 8 buffers to build a packet on the wire
2288  * and so we need to figure out the cases where we need to linearize the skb.
2289  *
2290  * For TSO we need to count the TSO header and segment payload separately.
2291  * As such we need to check cases where we have 7 fragments or more as we
2292  * can potentially require 9 DMA transactions, 1 for the TSO header, 1 for
2293  * the segment payload in the first descriptor, and another 7 for the
2294  * fragments.
2295  */
2296 static bool __ice_chk_linearize(struct sk_buff *skb)
2297 {
2298 	const skb_frag_t *frag, *stale;
2299 	int nr_frags, sum;
2300 
2301 	/* no need to check if number of frags is less than 7 */
2302 	nr_frags = skb_shinfo(skb)->nr_frags;
2303 	if (nr_frags < (ICE_MAX_BUF_TXD - 1))
2304 		return false;
2305 
2306 	/* We need to walk through the list and validate that each group
2307 	 * of 6 fragments totals at least gso_size.
2308 	 */
2309 	nr_frags -= ICE_MAX_BUF_TXD - 2;
2310 	frag = &skb_shinfo(skb)->frags[0];
2311 
2312 	/* Initialize size to the negative value of gso_size minus 1. We
2313 	 * use this as the worst case scenario in which the frag ahead
2314 	 * of us only provides one byte which is why we are limited to 6
2315 	 * descriptors for a single transmit as the header and previous
2316 	 * fragment are already consuming 2 descriptors.
2317 	 */
2318 	sum = 1 - skb_shinfo(skb)->gso_size;
2319 
2320 	/* Add size of frags 0 through 4 to create our initial sum */
2321 	sum += skb_frag_size(frag++);
2322 	sum += skb_frag_size(frag++);
2323 	sum += skb_frag_size(frag++);
2324 	sum += skb_frag_size(frag++);
2325 	sum += skb_frag_size(frag++);
2326 
2327 	/* Walk through fragments adding latest fragment, testing it, and
2328 	 * then removing stale fragments from the sum.
2329 	 */
2330 	for (stale = &skb_shinfo(skb)->frags[0];; stale++) {
2331 		int stale_size = skb_frag_size(stale);
2332 
2333 		sum += skb_frag_size(frag++);
2334 
2335 		/* The stale fragment may present us with a smaller
2336 		 * descriptor than the actual fragment size. To account
2337 		 * for that we need to remove all the data on the front and
2338 		 * figure out what the remainder would be in the last
2339 		 * descriptor associated with the fragment.
2340 		 */
2341 		if (stale_size > ICE_MAX_DATA_PER_TXD) {
2342 			int align_pad = -(skb_frag_off(stale)) &
2343 					(ICE_MAX_READ_REQ_SIZE - 1);
2344 
2345 			sum -= align_pad;
2346 			stale_size -= align_pad;
2347 
2348 			do {
2349 				sum -= ICE_MAX_DATA_PER_TXD_ALIGNED;
2350 				stale_size -= ICE_MAX_DATA_PER_TXD_ALIGNED;
2351 			} while (stale_size > ICE_MAX_DATA_PER_TXD);
2352 		}
2353 
2354 		/* if sum is negative we failed to make sufficient progress */
2355 		if (sum < 0)
2356 			return true;
2357 
2358 		if (!nr_frags--)
2359 			break;
2360 
2361 		sum -= stale_size;
2362 	}
2363 
2364 	return false;
2365 }
2366 
2367 /**
2368  * ice_chk_linearize - Check if there are more than 8 fragments per packet
2369  * @skb:      send buffer
2370  * @count:    number of buffers used
2371  *
2372  * Note: Our HW can't scatter-gather more than 8 fragments to build
2373  * a packet on the wire and so we need to figure out the cases where we
2374  * need to linearize the skb.
2375  */
2376 static bool ice_chk_linearize(struct sk_buff *skb, unsigned int count)
2377 {
2378 	/* Both TSO and single send will work if count is less than 8 */
2379 	if (likely(count < ICE_MAX_BUF_TXD))
2380 		return false;
2381 
2382 	if (skb_is_gso(skb))
2383 		return __ice_chk_linearize(skb);
2384 
2385 	/* we can support up to 8 data buffers for a single send */
2386 	return count != ICE_MAX_BUF_TXD;
2387 }
2388 
2389 /**
2390  * ice_tstamp - set up context descriptor for hardware timestamp
2391  * @tx_ring: pointer to the Tx ring to send buffer on
2392  * @skb: pointer to the SKB we're sending
2393  * @first: Tx buffer
2394  * @off: Tx offload parameters
2395  */
2396 static void
2397 ice_tstamp(struct ice_tx_ring *tx_ring, struct sk_buff *skb,
2398 	   struct ice_tx_buf *first, struct ice_tx_offload_params *off)
2399 {
2400 	s8 idx;
2401 
2402 	/* only timestamp the outbound packet if the user has requested it */
2403 	if (likely(!(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)))
2404 		return;
2405 
2406 	/* Tx timestamps cannot be sampled when doing TSO */
2407 	if (first->tx_flags & ICE_TX_FLAGS_TSO)
2408 		return;
2409 
2410 	/* Grab an open timestamp slot */
2411 	idx = ice_ptp_request_ts(tx_ring->tx_tstamps, skb);
2412 	if (idx < 0) {
2413 		tx_ring->vsi->back->ptp.tx_hwtstamp_skipped++;
2414 		return;
2415 	}
2416 
2417 	off->cd_qw1 |= (u64)(ICE_TX_DESC_DTYPE_CTX |
2418 			     (ICE_TX_CTX_DESC_TSYN << ICE_TXD_CTX_QW1_CMD_S) |
2419 			     ((u64)idx << ICE_TXD_CTX_QW1_TSO_LEN_S));
2420 	first->tx_flags |= ICE_TX_FLAGS_TSYN;
2421 }
2422 
2423 /**
2424  * ice_xmit_frame_ring - Sends buffer on Tx ring
2425  * @skb: send buffer
2426  * @tx_ring: ring to send buffer on
2427  *
2428  * Returns NETDEV_TX_OK if sent, else an error code
2429  */
2430 static netdev_tx_t
2431 ice_xmit_frame_ring(struct sk_buff *skb, struct ice_tx_ring *tx_ring)
2432 {
2433 	struct ice_tx_offload_params offload = { 0 };
2434 	struct ice_vsi *vsi = tx_ring->vsi;
2435 	struct ice_tx_buf *first;
2436 	struct ethhdr *eth;
2437 	unsigned int count;
2438 	int tso, csum;
2439 
2440 	ice_trace(xmit_frame_ring, tx_ring, skb);
2441 
2442 	if (unlikely(ipv6_hopopt_jumbo_remove(skb)))
2443 		goto out_drop;
2444 
2445 	count = ice_xmit_desc_count(skb);
2446 	if (ice_chk_linearize(skb, count)) {
2447 		if (__skb_linearize(skb))
2448 			goto out_drop;
2449 		count = ice_txd_use_count(skb->len);
2450 		tx_ring->ring_stats->tx_stats.tx_linearize++;
2451 	}
2452 
2453 	/* need: 1 descriptor per page * PAGE_SIZE/ICE_MAX_DATA_PER_TXD,
2454 	 *       + 1 desc for skb_head_len/ICE_MAX_DATA_PER_TXD,
2455 	 *       + 4 desc gap to avoid the cache line where head is,
2456 	 *       + 1 desc for context descriptor,
2457 	 * otherwise try next time
2458 	 */
2459 	if (ice_maybe_stop_tx(tx_ring, count + ICE_DESCS_PER_CACHE_LINE +
2460 			      ICE_DESCS_FOR_CTX_DESC)) {
2461 		tx_ring->ring_stats->tx_stats.tx_busy++;
2462 		return NETDEV_TX_BUSY;
2463 	}
2464 
2465 	/* prefetch for bql data which is infrequently used */
2466 	netdev_txq_bql_enqueue_prefetchw(txring_txq(tx_ring));
2467 
2468 	offload.tx_ring = tx_ring;
2469 
2470 	/* record the location of the first descriptor for this packet */
2471 	first = &tx_ring->tx_buf[tx_ring->next_to_use];
2472 	first->skb = skb;
2473 	first->type = ICE_TX_BUF_SKB;
2474 	first->bytecount = max_t(unsigned int, skb->len, ETH_ZLEN);
2475 	first->gso_segs = 1;
2476 	first->tx_flags = 0;
2477 
2478 	/* prepare the VLAN tagging flags for Tx */
2479 	ice_tx_prepare_vlan_flags(tx_ring, first);
2480 	if (first->tx_flags & ICE_TX_FLAGS_HW_OUTER_SINGLE_VLAN) {
2481 		offload.cd_qw1 |= (u64)(ICE_TX_DESC_DTYPE_CTX |
2482 					(ICE_TX_CTX_DESC_IL2TAG2 <<
2483 					ICE_TXD_CTX_QW1_CMD_S));
2484 		offload.cd_l2tag2 = first->vid;
2485 	}
2486 
2487 	/* set up TSO offload */
2488 	tso = ice_tso(first, &offload);
2489 	if (tso < 0)
2490 		goto out_drop;
2491 
2492 	/* always set up Tx checksum offload */
2493 	csum = ice_tx_csum(first, &offload);
2494 	if (csum < 0)
2495 		goto out_drop;
2496 
2497 	/* allow CONTROL frames egress from main VSI if FW LLDP disabled */
2498 	eth = (struct ethhdr *)skb_mac_header(skb);
2499 
2500 	if ((ice_is_switchdev_running(vsi->back) ||
2501 	     ice_lag_is_switchdev_running(vsi->back)) &&
2502 	    vsi->type != ICE_VSI_SF)
2503 		ice_eswitch_set_target_vsi(skb, &offload);
2504 	else if (unlikely((skb->priority == TC_PRIO_CONTROL ||
2505 			   eth->h_proto == htons(ETH_P_LLDP)) &&
2506 			   vsi->type == ICE_VSI_PF &&
2507 			   vsi->port_info->qos_cfg.is_sw_lldp))
2508 		offload.cd_qw1 |= (u64)(ICE_TX_DESC_DTYPE_CTX |
2509 					ICE_TX_CTX_DESC_SWTCH_UPLINK <<
2510 					ICE_TXD_CTX_QW1_CMD_S);
2511 
2512 	ice_tstamp(tx_ring, skb, first, &offload);
2513 
2514 	if (offload.cd_qw1 & ICE_TX_DESC_DTYPE_CTX) {
2515 		struct ice_tx_ctx_desc *cdesc;
2516 		u16 i = tx_ring->next_to_use;
2517 
2518 		/* grab the next descriptor */
2519 		cdesc = ICE_TX_CTX_DESC(tx_ring, i);
2520 		i++;
2521 		tx_ring->next_to_use = (i < tx_ring->count) ? i : 0;
2522 
2523 		/* setup context descriptor */
2524 		cdesc->tunneling_params = cpu_to_le32(offload.cd_tunnel_params);
2525 		cdesc->l2tag2 = cpu_to_le16(offload.cd_l2tag2);
2526 		cdesc->gcs = cpu_to_le16(offload.cd_gcs_params);
2527 		cdesc->qw1 = cpu_to_le64(offload.cd_qw1);
2528 	}
2529 
2530 	ice_tx_map(tx_ring, first, &offload);
2531 	return NETDEV_TX_OK;
2532 
2533 out_drop:
2534 	ice_trace(xmit_frame_ring_drop, tx_ring, skb);
2535 	dev_kfree_skb_any(skb);
2536 	return NETDEV_TX_OK;
2537 }
2538 
2539 /**
2540  * ice_start_xmit - Selects the correct VSI and Tx queue to send buffer
2541  * @skb: send buffer
2542  * @netdev: network interface device structure
2543  *
2544  * Returns NETDEV_TX_OK if sent, else an error code
2545  */
2546 netdev_tx_t ice_start_xmit(struct sk_buff *skb, struct net_device *netdev)
2547 {
2548 	struct ice_netdev_priv *np = netdev_priv(netdev);
2549 	struct ice_vsi *vsi = np->vsi;
2550 	struct ice_tx_ring *tx_ring;
2551 
2552 	tx_ring = vsi->tx_rings[skb->queue_mapping];
2553 
2554 	/* hardware can't handle really short frames, hardware padding works
2555 	 * beyond this point
2556 	 */
2557 	if (skb_put_padto(skb, ICE_MIN_TX_LEN))
2558 		return NETDEV_TX_OK;
2559 
2560 	return ice_xmit_frame_ring(skb, tx_ring);
2561 }
2562 
2563 /**
2564  * ice_get_dscp_up - return the UP/TC value for a SKB
2565  * @dcbcfg: DCB config that contains DSCP to UP/TC mapping
2566  * @skb: SKB to query for info to determine UP/TC
2567  *
2568  * This function is to only be called when the PF is in L3 DSCP PFC mode
2569  */
2570 static u8 ice_get_dscp_up(struct ice_dcbx_cfg *dcbcfg, struct sk_buff *skb)
2571 {
2572 	u8 dscp = 0;
2573 
2574 	if (skb->protocol == htons(ETH_P_IP))
2575 		dscp = ipv4_get_dsfield(ip_hdr(skb)) >> 2;
2576 	else if (skb->protocol == htons(ETH_P_IPV6))
2577 		dscp = ipv6_get_dsfield(ipv6_hdr(skb)) >> 2;
2578 
2579 	return dcbcfg->dscp_map[dscp];
2580 }
2581 
2582 u16
2583 ice_select_queue(struct net_device *netdev, struct sk_buff *skb,
2584 		 struct net_device *sb_dev)
2585 {
2586 	struct ice_pf *pf = ice_netdev_to_pf(netdev);
2587 	struct ice_dcbx_cfg *dcbcfg;
2588 
2589 	dcbcfg = &pf->hw.port_info->qos_cfg.local_dcbx_cfg;
2590 	if (dcbcfg->pfc_mode == ICE_QOS_MODE_DSCP)
2591 		skb->priority = ice_get_dscp_up(dcbcfg, skb);
2592 
2593 	return netdev_pick_tx(netdev, skb, sb_dev);
2594 }
2595 
2596 /**
2597  * ice_clean_ctrl_tx_irq - interrupt handler for flow director Tx queue
2598  * @tx_ring: tx_ring to clean
2599  */
2600 void ice_clean_ctrl_tx_irq(struct ice_tx_ring *tx_ring)
2601 {
2602 	struct ice_vsi *vsi = tx_ring->vsi;
2603 	s16 i = tx_ring->next_to_clean;
2604 	int budget = ICE_DFLT_IRQ_WORK;
2605 	struct ice_tx_desc *tx_desc;
2606 	struct ice_tx_buf *tx_buf;
2607 
2608 	tx_buf = &tx_ring->tx_buf[i];
2609 	tx_desc = ICE_TX_DESC(tx_ring, i);
2610 	i -= tx_ring->count;
2611 
2612 	do {
2613 		struct ice_tx_desc *eop_desc = tx_buf->next_to_watch;
2614 
2615 		/* if next_to_watch is not set then there is no pending work */
2616 		if (!eop_desc)
2617 			break;
2618 
2619 		/* prevent any other reads prior to eop_desc */
2620 		smp_rmb();
2621 
2622 		/* if the descriptor isn't done, no work to do */
2623 		if (!(eop_desc->cmd_type_offset_bsz &
2624 		      cpu_to_le64(ICE_TX_DESC_DTYPE_DESC_DONE)))
2625 			break;
2626 
2627 		/* clear next_to_watch to prevent false hangs */
2628 		tx_buf->next_to_watch = NULL;
2629 		tx_desc->buf_addr = 0;
2630 		tx_desc->cmd_type_offset_bsz = 0;
2631 
2632 		/* move past filter desc */
2633 		tx_buf++;
2634 		tx_desc++;
2635 		i++;
2636 		if (unlikely(!i)) {
2637 			i -= tx_ring->count;
2638 			tx_buf = tx_ring->tx_buf;
2639 			tx_desc = ICE_TX_DESC(tx_ring, 0);
2640 		}
2641 
2642 		/* unmap the data header */
2643 		if (dma_unmap_len(tx_buf, len))
2644 			dma_unmap_single(tx_ring->dev,
2645 					 dma_unmap_addr(tx_buf, dma),
2646 					 dma_unmap_len(tx_buf, len),
2647 					 DMA_TO_DEVICE);
2648 		if (tx_buf->type == ICE_TX_BUF_DUMMY)
2649 			devm_kfree(tx_ring->dev, tx_buf->raw_buf);
2650 
2651 		/* clear next_to_watch to prevent false hangs */
2652 		tx_buf->type = ICE_TX_BUF_EMPTY;
2653 		tx_buf->tx_flags = 0;
2654 		tx_buf->next_to_watch = NULL;
2655 		dma_unmap_len_set(tx_buf, len, 0);
2656 		tx_desc->buf_addr = 0;
2657 		tx_desc->cmd_type_offset_bsz = 0;
2658 
2659 		/* move past eop_desc for start of next FD desc */
2660 		tx_buf++;
2661 		tx_desc++;
2662 		i++;
2663 		if (unlikely(!i)) {
2664 			i -= tx_ring->count;
2665 			tx_buf = tx_ring->tx_buf;
2666 			tx_desc = ICE_TX_DESC(tx_ring, 0);
2667 		}
2668 
2669 		budget--;
2670 	} while (likely(budget));
2671 
2672 	i += tx_ring->count;
2673 	tx_ring->next_to_clean = i;
2674 
2675 	/* re-enable interrupt if needed */
2676 	ice_irq_dynamic_ena(&vsi->back->hw, vsi, vsi->q_vectors[0]);
2677 }
2678