xref: /linux/drivers/net/ethernet/intel/ice/ice_txrx.c (revision 2b64b2ed277ff23e785fbdb65098ee7e1252d64f)
1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (c) 2018, Intel Corporation. */
3 
4 /* The driver transmit and receive code */
5 
6 #include <linux/prefetch.h>
7 #include <linux/mm.h>
8 #include "ice.h"
9 
10 #define ICE_RX_HDR_SIZE		256
11 
12 /**
13  * ice_unmap_and_free_tx_buf - Release a Tx buffer
14  * @ring: the ring that owns the buffer
15  * @tx_buf: the buffer to free
16  */
17 static void
18 ice_unmap_and_free_tx_buf(struct ice_ring *ring, struct ice_tx_buf *tx_buf)
19 {
20 	if (tx_buf->skb) {
21 		dev_kfree_skb_any(tx_buf->skb);
22 		if (dma_unmap_len(tx_buf, len))
23 			dma_unmap_single(ring->dev,
24 					 dma_unmap_addr(tx_buf, dma),
25 					 dma_unmap_len(tx_buf, len),
26 					 DMA_TO_DEVICE);
27 	} else if (dma_unmap_len(tx_buf, len)) {
28 		dma_unmap_page(ring->dev,
29 			       dma_unmap_addr(tx_buf, dma),
30 			       dma_unmap_len(tx_buf, len),
31 			       DMA_TO_DEVICE);
32 	}
33 
34 	tx_buf->next_to_watch = NULL;
35 	tx_buf->skb = NULL;
36 	dma_unmap_len_set(tx_buf, len, 0);
37 	/* tx_buf must be completely set up in the transmit path */
38 }
39 
40 static struct netdev_queue *txring_txq(const struct ice_ring *ring)
41 {
42 	return netdev_get_tx_queue(ring->netdev, ring->q_index);
43 }
44 
45 /**
46  * ice_clean_tx_ring - Free any empty Tx buffers
47  * @tx_ring: ring to be cleaned
48  */
49 void ice_clean_tx_ring(struct ice_ring *tx_ring)
50 {
51 	u16 i;
52 
53 	/* ring already cleared, nothing to do */
54 	if (!tx_ring->tx_buf)
55 		return;
56 
57 	/* Free all the Tx ring sk_bufss */
58 	for (i = 0; i < tx_ring->count; i++)
59 		ice_unmap_and_free_tx_buf(tx_ring, &tx_ring->tx_buf[i]);
60 
61 	memset(tx_ring->tx_buf, 0, sizeof(*tx_ring->tx_buf) * tx_ring->count);
62 
63 	/* Zero out the descriptor ring */
64 	memset(tx_ring->desc, 0, tx_ring->size);
65 
66 	tx_ring->next_to_use = 0;
67 	tx_ring->next_to_clean = 0;
68 
69 	if (!tx_ring->netdev)
70 		return;
71 
72 	/* cleanup Tx queue statistics */
73 	netdev_tx_reset_queue(txring_txq(tx_ring));
74 }
75 
76 /**
77  * ice_free_tx_ring - Free Tx resources per queue
78  * @tx_ring: Tx descriptor ring for a specific queue
79  *
80  * Free all transmit software resources
81  */
82 void ice_free_tx_ring(struct ice_ring *tx_ring)
83 {
84 	ice_clean_tx_ring(tx_ring);
85 	devm_kfree(tx_ring->dev, tx_ring->tx_buf);
86 	tx_ring->tx_buf = NULL;
87 
88 	if (tx_ring->desc) {
89 		dmam_free_coherent(tx_ring->dev, tx_ring->size,
90 				   tx_ring->desc, tx_ring->dma);
91 		tx_ring->desc = NULL;
92 	}
93 }
94 
95 /**
96  * ice_clean_tx_irq - Reclaim resources after transmit completes
97  * @vsi: the VSI we care about
98  * @tx_ring: Tx ring to clean
99  * @napi_budget: Used to determine if we are in netpoll
100  *
101  * Returns true if there's any budget left (e.g. the clean is finished)
102  */
103 static bool ice_clean_tx_irq(struct ice_vsi *vsi, struct ice_ring *tx_ring,
104 			     int napi_budget)
105 {
106 	unsigned int total_bytes = 0, total_pkts = 0;
107 	unsigned int budget = vsi->work_lmt;
108 	s16 i = tx_ring->next_to_clean;
109 	struct ice_tx_desc *tx_desc;
110 	struct ice_tx_buf *tx_buf;
111 
112 	tx_buf = &tx_ring->tx_buf[i];
113 	tx_desc = ICE_TX_DESC(tx_ring, i);
114 	i -= tx_ring->count;
115 
116 	do {
117 		struct ice_tx_desc *eop_desc = tx_buf->next_to_watch;
118 
119 		/* if next_to_watch is not set then there is no work pending */
120 		if (!eop_desc)
121 			break;
122 
123 		smp_rmb();	/* prevent any other reads prior to eop_desc */
124 
125 		/* if the descriptor isn't done, no work yet to do */
126 		if (!(eop_desc->cmd_type_offset_bsz &
127 		      cpu_to_le64(ICE_TX_DESC_DTYPE_DESC_DONE)))
128 			break;
129 
130 		/* clear next_to_watch to prevent false hangs */
131 		tx_buf->next_to_watch = NULL;
132 
133 		/* update the statistics for this packet */
134 		total_bytes += tx_buf->bytecount;
135 		total_pkts += tx_buf->gso_segs;
136 
137 		/* free the skb */
138 		napi_consume_skb(tx_buf->skb, napi_budget);
139 
140 		/* unmap skb header data */
141 		dma_unmap_single(tx_ring->dev,
142 				 dma_unmap_addr(tx_buf, dma),
143 				 dma_unmap_len(tx_buf, len),
144 				 DMA_TO_DEVICE);
145 
146 		/* clear tx_buf data */
147 		tx_buf->skb = NULL;
148 		dma_unmap_len_set(tx_buf, len, 0);
149 
150 		/* unmap remaining buffers */
151 		while (tx_desc != eop_desc) {
152 			tx_buf++;
153 			tx_desc++;
154 			i++;
155 			if (unlikely(!i)) {
156 				i -= tx_ring->count;
157 				tx_buf = tx_ring->tx_buf;
158 				tx_desc = ICE_TX_DESC(tx_ring, 0);
159 			}
160 
161 			/* unmap any remaining paged data */
162 			if (dma_unmap_len(tx_buf, len)) {
163 				dma_unmap_page(tx_ring->dev,
164 					       dma_unmap_addr(tx_buf, dma),
165 					       dma_unmap_len(tx_buf, len),
166 					       DMA_TO_DEVICE);
167 				dma_unmap_len_set(tx_buf, len, 0);
168 			}
169 		}
170 
171 		/* move us one more past the eop_desc for start of next pkt */
172 		tx_buf++;
173 		tx_desc++;
174 		i++;
175 		if (unlikely(!i)) {
176 			i -= tx_ring->count;
177 			tx_buf = tx_ring->tx_buf;
178 			tx_desc = ICE_TX_DESC(tx_ring, 0);
179 		}
180 
181 		prefetch(tx_desc);
182 
183 		/* update budget accounting */
184 		budget--;
185 	} while (likely(budget));
186 
187 	i += tx_ring->count;
188 	tx_ring->next_to_clean = i;
189 	u64_stats_update_begin(&tx_ring->syncp);
190 	tx_ring->stats.bytes += total_bytes;
191 	tx_ring->stats.pkts += total_pkts;
192 	u64_stats_update_end(&tx_ring->syncp);
193 	tx_ring->q_vector->tx.total_bytes += total_bytes;
194 	tx_ring->q_vector->tx.total_pkts += total_pkts;
195 
196 	netdev_tx_completed_queue(txring_txq(tx_ring), total_pkts,
197 				  total_bytes);
198 
199 #define TX_WAKE_THRESHOLD ((s16)(DESC_NEEDED * 2))
200 	if (unlikely(total_pkts && netif_carrier_ok(tx_ring->netdev) &&
201 		     (ICE_DESC_UNUSED(tx_ring) >= TX_WAKE_THRESHOLD))) {
202 		/* Make sure that anybody stopping the queue after this
203 		 * sees the new next_to_clean.
204 		 */
205 		smp_mb();
206 		if (__netif_subqueue_stopped(tx_ring->netdev,
207 					     tx_ring->q_index) &&
208 		   !test_bit(__ICE_DOWN, vsi->state)) {
209 			netif_wake_subqueue(tx_ring->netdev,
210 					    tx_ring->q_index);
211 			++tx_ring->tx_stats.restart_q;
212 		}
213 	}
214 
215 	return !!budget;
216 }
217 
218 /**
219  * ice_setup_tx_ring - Allocate the Tx descriptors
220  * @tx_ring: the Tx ring to set up
221  *
222  * Return 0 on success, negative on error
223  */
224 int ice_setup_tx_ring(struct ice_ring *tx_ring)
225 {
226 	struct device *dev = tx_ring->dev;
227 
228 	if (!dev)
229 		return -ENOMEM;
230 
231 	/* warn if we are about to overwrite the pointer */
232 	WARN_ON(tx_ring->tx_buf);
233 	tx_ring->tx_buf =
234 		devm_kzalloc(dev, sizeof(*tx_ring->tx_buf) * tx_ring->count,
235 			     GFP_KERNEL);
236 	if (!tx_ring->tx_buf)
237 		return -ENOMEM;
238 
239 	/* round up to nearest 4K */
240 	tx_ring->size = ALIGN(tx_ring->count * sizeof(struct ice_tx_desc),
241 			      4096);
242 	tx_ring->desc = dmam_alloc_coherent(dev, tx_ring->size, &tx_ring->dma,
243 					    GFP_KERNEL);
244 	if (!tx_ring->desc) {
245 		dev_err(dev, "Unable to allocate memory for the Tx descriptor ring, size=%d\n",
246 			tx_ring->size);
247 		goto err;
248 	}
249 
250 	tx_ring->next_to_use = 0;
251 	tx_ring->next_to_clean = 0;
252 	tx_ring->tx_stats.prev_pkt = -1;
253 	return 0;
254 
255 err:
256 	devm_kfree(dev, tx_ring->tx_buf);
257 	tx_ring->tx_buf = NULL;
258 	return -ENOMEM;
259 }
260 
261 /**
262  * ice_clean_rx_ring - Free Rx buffers
263  * @rx_ring: ring to be cleaned
264  */
265 void ice_clean_rx_ring(struct ice_ring *rx_ring)
266 {
267 	struct device *dev = rx_ring->dev;
268 	u16 i;
269 
270 	/* ring already cleared, nothing to do */
271 	if (!rx_ring->rx_buf)
272 		return;
273 
274 	/* Free all the Rx ring sk_buffs */
275 	for (i = 0; i < rx_ring->count; i++) {
276 		struct ice_rx_buf *rx_buf = &rx_ring->rx_buf[i];
277 
278 		if (rx_buf->skb) {
279 			dev_kfree_skb(rx_buf->skb);
280 			rx_buf->skb = NULL;
281 		}
282 		if (!rx_buf->page)
283 			continue;
284 
285 		dma_unmap_page(dev, rx_buf->dma, PAGE_SIZE, DMA_FROM_DEVICE);
286 		__free_pages(rx_buf->page, 0);
287 
288 		rx_buf->page = NULL;
289 		rx_buf->page_offset = 0;
290 	}
291 
292 	memset(rx_ring->rx_buf, 0, sizeof(*rx_ring->rx_buf) * rx_ring->count);
293 
294 	/* Zero out the descriptor ring */
295 	memset(rx_ring->desc, 0, rx_ring->size);
296 
297 	rx_ring->next_to_alloc = 0;
298 	rx_ring->next_to_clean = 0;
299 	rx_ring->next_to_use = 0;
300 }
301 
302 /**
303  * ice_free_rx_ring - Free Rx resources
304  * @rx_ring: ring to clean the resources from
305  *
306  * Free all receive software resources
307  */
308 void ice_free_rx_ring(struct ice_ring *rx_ring)
309 {
310 	ice_clean_rx_ring(rx_ring);
311 	devm_kfree(rx_ring->dev, rx_ring->rx_buf);
312 	rx_ring->rx_buf = NULL;
313 
314 	if (rx_ring->desc) {
315 		dmam_free_coherent(rx_ring->dev, rx_ring->size,
316 				   rx_ring->desc, rx_ring->dma);
317 		rx_ring->desc = NULL;
318 	}
319 }
320 
321 /**
322  * ice_setup_rx_ring - Allocate the Rx descriptors
323  * @rx_ring: the Rx ring to set up
324  *
325  * Return 0 on success, negative on error
326  */
327 int ice_setup_rx_ring(struct ice_ring *rx_ring)
328 {
329 	struct device *dev = rx_ring->dev;
330 
331 	if (!dev)
332 		return -ENOMEM;
333 
334 	/* warn if we are about to overwrite the pointer */
335 	WARN_ON(rx_ring->rx_buf);
336 	rx_ring->rx_buf =
337 		devm_kzalloc(dev, sizeof(*rx_ring->rx_buf) * rx_ring->count,
338 			     GFP_KERNEL);
339 	if (!rx_ring->rx_buf)
340 		return -ENOMEM;
341 
342 	/* round up to nearest 4K */
343 	rx_ring->size = rx_ring->count * sizeof(union ice_32byte_rx_desc);
344 	rx_ring->size = ALIGN(rx_ring->size, 4096);
345 	rx_ring->desc = dmam_alloc_coherent(dev, rx_ring->size, &rx_ring->dma,
346 					    GFP_KERNEL);
347 	if (!rx_ring->desc) {
348 		dev_err(dev, "Unable to allocate memory for the Rx descriptor ring, size=%d\n",
349 			rx_ring->size);
350 		goto err;
351 	}
352 
353 	rx_ring->next_to_use = 0;
354 	rx_ring->next_to_clean = 0;
355 	return 0;
356 
357 err:
358 	devm_kfree(dev, rx_ring->rx_buf);
359 	rx_ring->rx_buf = NULL;
360 	return -ENOMEM;
361 }
362 
363 /**
364  * ice_release_rx_desc - Store the new tail and head values
365  * @rx_ring: ring to bump
366  * @val: new head index
367  */
368 static void ice_release_rx_desc(struct ice_ring *rx_ring, u32 val)
369 {
370 	rx_ring->next_to_use = val;
371 
372 	/* update next to alloc since we have filled the ring */
373 	rx_ring->next_to_alloc = val;
374 
375 	/* Force memory writes to complete before letting h/w
376 	 * know there are new descriptors to fetch. (Only
377 	 * applicable for weak-ordered memory model archs,
378 	 * such as IA-64).
379 	 */
380 	wmb();
381 	writel(val, rx_ring->tail);
382 }
383 
384 /**
385  * ice_alloc_mapped_page - recycle or make a new page
386  * @rx_ring: ring to use
387  * @bi: rx_buf struct to modify
388  *
389  * Returns true if the page was successfully allocated or
390  * reused.
391  */
392 static bool ice_alloc_mapped_page(struct ice_ring *rx_ring,
393 				  struct ice_rx_buf *bi)
394 {
395 	struct page *page = bi->page;
396 	dma_addr_t dma;
397 
398 	/* since we are recycling buffers we should seldom need to alloc */
399 	if (likely(page)) {
400 		rx_ring->rx_stats.page_reuse_count++;
401 		return true;
402 	}
403 
404 	/* alloc new page for storage */
405 	page = alloc_page(GFP_ATOMIC | __GFP_NOWARN);
406 	if (unlikely(!page)) {
407 		rx_ring->rx_stats.alloc_page_failed++;
408 		return false;
409 	}
410 
411 	/* map page for use */
412 	dma = dma_map_page(rx_ring->dev, page, 0, PAGE_SIZE, DMA_FROM_DEVICE);
413 
414 	/* if mapping failed free memory back to system since
415 	 * there isn't much point in holding memory we can't use
416 	 */
417 	if (dma_mapping_error(rx_ring->dev, dma)) {
418 		__free_pages(page, 0);
419 		rx_ring->rx_stats.alloc_page_failed++;
420 		return false;
421 	}
422 
423 	bi->dma = dma;
424 	bi->page = page;
425 	bi->page_offset = 0;
426 
427 	return true;
428 }
429 
430 /**
431  * ice_alloc_rx_bufs - Replace used receive buffers
432  * @rx_ring: ring to place buffers on
433  * @cleaned_count: number of buffers to replace
434  *
435  * Returns false if all allocations were successful, true if any fail
436  */
437 bool ice_alloc_rx_bufs(struct ice_ring *rx_ring, u16 cleaned_count)
438 {
439 	union ice_32b_rx_flex_desc *rx_desc;
440 	u16 ntu = rx_ring->next_to_use;
441 	struct ice_rx_buf *bi;
442 
443 	/* do nothing if no valid netdev defined */
444 	if (!rx_ring->netdev || !cleaned_count)
445 		return false;
446 
447 	/* get the RX descriptor and buffer based on next_to_use */
448 	rx_desc = ICE_RX_DESC(rx_ring, ntu);
449 	bi = &rx_ring->rx_buf[ntu];
450 
451 	do {
452 		if (!ice_alloc_mapped_page(rx_ring, bi))
453 			goto no_bufs;
454 
455 		/* Refresh the desc even if buffer_addrs didn't change
456 		 * because each write-back erases this info.
457 		 */
458 		rx_desc->read.pkt_addr = cpu_to_le64(bi->dma + bi->page_offset);
459 
460 		rx_desc++;
461 		bi++;
462 		ntu++;
463 		if (unlikely(ntu == rx_ring->count)) {
464 			rx_desc = ICE_RX_DESC(rx_ring, 0);
465 			bi = rx_ring->rx_buf;
466 			ntu = 0;
467 		}
468 
469 		/* clear the status bits for the next_to_use descriptor */
470 		rx_desc->wb.status_error0 = 0;
471 
472 		cleaned_count--;
473 	} while (cleaned_count);
474 
475 	if (rx_ring->next_to_use != ntu)
476 		ice_release_rx_desc(rx_ring, ntu);
477 
478 	return false;
479 
480 no_bufs:
481 	if (rx_ring->next_to_use != ntu)
482 		ice_release_rx_desc(rx_ring, ntu);
483 
484 	/* make sure to come back via polling to try again after
485 	 * allocation failure
486 	 */
487 	return true;
488 }
489 
490 /**
491  * ice_page_is_reserved - check if reuse is possible
492  * @page: page struct to check
493  */
494 static bool ice_page_is_reserved(struct page *page)
495 {
496 	return (page_to_nid(page) != numa_mem_id()) || page_is_pfmemalloc(page);
497 }
498 
499 /**
500  * ice_add_rx_frag - Add contents of Rx buffer to sk_buff
501  * @rx_buf: buffer containing page to add
502  * @rx_desc: descriptor containing length of buffer written by hardware
503  * @skb: sk_buf to place the data into
504  *
505  * This function will add the data contained in rx_buf->page to the skb.
506  * This is done either through a direct copy if the data in the buffer is
507  * less than the skb header size, otherwise it will just attach the page as
508  * a frag to the skb.
509  *
510  * The function will then update the page offset if necessary and return
511  * true if the buffer can be reused by the adapter.
512  */
513 static bool ice_add_rx_frag(struct ice_rx_buf *rx_buf,
514 			    union ice_32b_rx_flex_desc *rx_desc,
515 			    struct sk_buff *skb)
516 {
517 #if (PAGE_SIZE < 8192)
518 	unsigned int truesize = ICE_RXBUF_2048;
519 #else
520 	unsigned int last_offset = PAGE_SIZE - ICE_RXBUF_2048;
521 	unsigned int truesize;
522 #endif /* PAGE_SIZE < 8192) */
523 
524 	struct page *page;
525 	unsigned int size;
526 
527 	size = le16_to_cpu(rx_desc->wb.pkt_len) &
528 		ICE_RX_FLX_DESC_PKT_LEN_M;
529 
530 	page = rx_buf->page;
531 
532 #if (PAGE_SIZE >= 8192)
533 	truesize = ALIGN(size, L1_CACHE_BYTES);
534 #endif /* PAGE_SIZE >= 8192) */
535 
536 	/* will the data fit in the skb we allocated? if so, just
537 	 * copy it as it is pretty small anyway
538 	 */
539 	if (size <= ICE_RX_HDR_SIZE && !skb_is_nonlinear(skb)) {
540 		unsigned char *va = page_address(page) + rx_buf->page_offset;
541 
542 		memcpy(__skb_put(skb, size), va, ALIGN(size, sizeof(long)));
543 
544 		/* page is not reserved, we can reuse buffer as-is */
545 		if (likely(!ice_page_is_reserved(page)))
546 			return true;
547 
548 		/* this page cannot be reused so discard it */
549 		__free_pages(page, 0);
550 		return false;
551 	}
552 
553 	skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page,
554 			rx_buf->page_offset, size, truesize);
555 
556 	/* avoid re-using remote pages */
557 	if (unlikely(ice_page_is_reserved(page)))
558 		return false;
559 
560 #if (PAGE_SIZE < 8192)
561 	/* if we are only owner of page we can reuse it */
562 	if (unlikely(page_count(page) != 1))
563 		return false;
564 
565 	/* flip page offset to other buffer */
566 	rx_buf->page_offset ^= truesize;
567 #else
568 	/* move offset up to the next cache line */
569 	rx_buf->page_offset += truesize;
570 
571 	if (rx_buf->page_offset > last_offset)
572 		return false;
573 #endif /* PAGE_SIZE < 8192) */
574 
575 	/* Even if we own the page, we are not allowed to use atomic_set()
576 	 * This would break get_page_unless_zero() users.
577 	 */
578 	get_page(rx_buf->page);
579 
580 	return true;
581 }
582 
583 /**
584  * ice_reuse_rx_page - page flip buffer and store it back on the ring
585  * @rx_ring: Rx descriptor ring to store buffers on
586  * @old_buf: donor buffer to have page reused
587  *
588  * Synchronizes page for reuse by the adapter
589  */
590 static void ice_reuse_rx_page(struct ice_ring *rx_ring,
591 			      struct ice_rx_buf *old_buf)
592 {
593 	u16 nta = rx_ring->next_to_alloc;
594 	struct ice_rx_buf *new_buf;
595 
596 	new_buf = &rx_ring->rx_buf[nta];
597 
598 	/* update, and store next to alloc */
599 	nta++;
600 	rx_ring->next_to_alloc = (nta < rx_ring->count) ? nta : 0;
601 
602 	/* transfer page from old buffer to new buffer */
603 	*new_buf = *old_buf;
604 }
605 
606 /**
607  * ice_fetch_rx_buf - Allocate skb and populate it
608  * @rx_ring: Rx descriptor ring to transact packets on
609  * @rx_desc: descriptor containing info written by hardware
610  *
611  * This function allocates an skb on the fly, and populates it with the page
612  * data from the current receive descriptor, taking care to set up the skb
613  * correctly, as well as handling calling the page recycle function if
614  * necessary.
615  */
616 static struct sk_buff *ice_fetch_rx_buf(struct ice_ring *rx_ring,
617 					union ice_32b_rx_flex_desc *rx_desc)
618 {
619 	struct ice_rx_buf *rx_buf;
620 	struct sk_buff *skb;
621 	struct page *page;
622 
623 	rx_buf = &rx_ring->rx_buf[rx_ring->next_to_clean];
624 	page = rx_buf->page;
625 	prefetchw(page);
626 
627 	skb = rx_buf->skb;
628 
629 	if (likely(!skb)) {
630 		u8 *page_addr = page_address(page) + rx_buf->page_offset;
631 
632 		/* prefetch first cache line of first page */
633 		prefetch(page_addr);
634 #if L1_CACHE_BYTES < 128
635 		prefetch((void *)(page_addr + L1_CACHE_BYTES));
636 #endif /* L1_CACHE_BYTES */
637 
638 		/* allocate a skb to store the frags */
639 		skb = __napi_alloc_skb(&rx_ring->q_vector->napi,
640 				       ICE_RX_HDR_SIZE,
641 				       GFP_ATOMIC | __GFP_NOWARN);
642 		if (unlikely(!skb)) {
643 			rx_ring->rx_stats.alloc_buf_failed++;
644 			return NULL;
645 		}
646 
647 		/* we will be copying header into skb->data in
648 		 * pskb_may_pull so it is in our interest to prefetch
649 		 * it now to avoid a possible cache miss
650 		 */
651 		prefetchw(skb->data);
652 
653 		skb_record_rx_queue(skb, rx_ring->q_index);
654 	} else {
655 		/* we are reusing so sync this buffer for CPU use */
656 		dma_sync_single_range_for_cpu(rx_ring->dev, rx_buf->dma,
657 					      rx_buf->page_offset,
658 					      ICE_RXBUF_2048,
659 					      DMA_FROM_DEVICE);
660 
661 		rx_buf->skb = NULL;
662 	}
663 
664 	/* pull page into skb */
665 	if (ice_add_rx_frag(rx_buf, rx_desc, skb)) {
666 		/* hand second half of page back to the ring */
667 		ice_reuse_rx_page(rx_ring, rx_buf);
668 		rx_ring->rx_stats.page_reuse_count++;
669 	} else {
670 		/* we are not reusing the buffer so unmap it */
671 		dma_unmap_page(rx_ring->dev, rx_buf->dma, PAGE_SIZE,
672 			       DMA_FROM_DEVICE);
673 	}
674 
675 	/* clear contents of buffer_info */
676 	rx_buf->page = NULL;
677 
678 	return skb;
679 }
680 
681 /**
682  * ice_pull_tail - ice specific version of skb_pull_tail
683  * @skb: pointer to current skb being adjusted
684  *
685  * This function is an ice specific version of __pskb_pull_tail. The
686  * main difference between this version and the original function is that
687  * this function can make several assumptions about the state of things
688  * that allow for significant optimizations versus the standard function.
689  * As a result we can do things like drop a frag and maintain an accurate
690  * truesize for the skb.
691  */
692 static void ice_pull_tail(struct sk_buff *skb)
693 {
694 	struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[0];
695 	unsigned int pull_len;
696 	unsigned char *va;
697 
698 	/* it is valid to use page_address instead of kmap since we are
699 	 * working with pages allocated out of the lomem pool per
700 	 * alloc_page(GFP_ATOMIC)
701 	 */
702 	va = skb_frag_address(frag);
703 
704 	/* we need the header to contain the greater of either ETH_HLEN or
705 	 * 60 bytes if the skb->len is less than 60 for skb_pad.
706 	 */
707 	pull_len = eth_get_headlen(va, ICE_RX_HDR_SIZE);
708 
709 	/* align pull length to size of long to optimize memcpy performance */
710 	skb_copy_to_linear_data(skb, va, ALIGN(pull_len, sizeof(long)));
711 
712 	/* update all of the pointers */
713 	skb_frag_size_sub(frag, pull_len);
714 	frag->page_offset += pull_len;
715 	skb->data_len -= pull_len;
716 	skb->tail += pull_len;
717 }
718 
719 /**
720  * ice_cleanup_headers - Correct empty headers
721  * @skb: pointer to current skb being fixed
722  *
723  * Also address the case where we are pulling data in on pages only
724  * and as such no data is present in the skb header.
725  *
726  * In addition if skb is not at least 60 bytes we need to pad it so that
727  * it is large enough to qualify as a valid Ethernet frame.
728  *
729  * Returns true if an error was encountered and skb was freed.
730  */
731 static bool ice_cleanup_headers(struct sk_buff *skb)
732 {
733 	/* place header in linear portion of buffer */
734 	if (skb_is_nonlinear(skb))
735 		ice_pull_tail(skb);
736 
737 	/* if eth_skb_pad returns an error the skb was freed */
738 	if (eth_skb_pad(skb))
739 		return true;
740 
741 	return false;
742 }
743 
744 /**
745  * ice_test_staterr - tests bits in Rx descriptor status and error fields
746  * @rx_desc: pointer to receive descriptor (in le64 format)
747  * @stat_err_bits: value to mask
748  *
749  * This function does some fast chicanery in order to return the
750  * value of the mask which is really only used for boolean tests.
751  * The status_error_len doesn't need to be shifted because it begins
752  * at offset zero.
753  */
754 static bool ice_test_staterr(union ice_32b_rx_flex_desc *rx_desc,
755 			     const u16 stat_err_bits)
756 {
757 	return !!(rx_desc->wb.status_error0 &
758 		  cpu_to_le16(stat_err_bits));
759 }
760 
761 /**
762  * ice_is_non_eop - process handling of non-EOP buffers
763  * @rx_ring: Rx ring being processed
764  * @rx_desc: Rx descriptor for current buffer
765  * @skb: Current socket buffer containing buffer in progress
766  *
767  * This function updates next to clean. If the buffer is an EOP buffer
768  * this function exits returning false, otherwise it will place the
769  * sk_buff in the next buffer to be chained and return true indicating
770  * that this is in fact a non-EOP buffer.
771  */
772 static bool ice_is_non_eop(struct ice_ring *rx_ring,
773 			   union ice_32b_rx_flex_desc *rx_desc,
774 			   struct sk_buff *skb)
775 {
776 	u32 ntc = rx_ring->next_to_clean + 1;
777 
778 	/* fetch, update, and store next to clean */
779 	ntc = (ntc < rx_ring->count) ? ntc : 0;
780 	rx_ring->next_to_clean = ntc;
781 
782 	prefetch(ICE_RX_DESC(rx_ring, ntc));
783 
784 	/* if we are the last buffer then there is nothing else to do */
785 #define ICE_RXD_EOF BIT(ICE_RX_FLEX_DESC_STATUS0_EOF_S)
786 	if (likely(ice_test_staterr(rx_desc, ICE_RXD_EOF)))
787 		return false;
788 
789 	/* place skb in next buffer to be received */
790 	rx_ring->rx_buf[ntc].skb = skb;
791 	rx_ring->rx_stats.non_eop_descs++;
792 
793 	return true;
794 }
795 
796 /**
797  * ice_ptype_to_htype - get a hash type
798  * @ptype: the ptype value from the descriptor
799  *
800  * Returns a hash type to be used by skb_set_hash
801  */
802 static enum pkt_hash_types ice_ptype_to_htype(u8 __always_unused ptype)
803 {
804 	return PKT_HASH_TYPE_NONE;
805 }
806 
807 /**
808  * ice_rx_hash - set the hash value in the skb
809  * @rx_ring: descriptor ring
810  * @rx_desc: specific descriptor
811  * @skb: pointer to current skb
812  * @rx_ptype: the ptype value from the descriptor
813  */
814 static void
815 ice_rx_hash(struct ice_ring *rx_ring, union ice_32b_rx_flex_desc *rx_desc,
816 	    struct sk_buff *skb, u8 rx_ptype)
817 {
818 	struct ice_32b_rx_flex_desc_nic *nic_mdid;
819 	u32 hash;
820 
821 	if (!(rx_ring->netdev->features & NETIF_F_RXHASH))
822 		return;
823 
824 	if (rx_desc->wb.rxdid != ICE_RXDID_FLEX_NIC)
825 		return;
826 
827 	nic_mdid = (struct ice_32b_rx_flex_desc_nic *)rx_desc;
828 	hash = le32_to_cpu(nic_mdid->rss_hash);
829 	skb_set_hash(skb, hash, ice_ptype_to_htype(rx_ptype));
830 }
831 
832 /**
833  * ice_rx_csum - Indicate in skb if checksum is good
834  * @vsi: the VSI we care about
835  * @skb: skb currently being received and modified
836  * @rx_desc: the receive descriptor
837  * @ptype: the packet type decoded by hardware
838  *
839  * skb->protocol must be set before this function is called
840  */
841 static void ice_rx_csum(struct ice_vsi *vsi, struct sk_buff *skb,
842 			union ice_32b_rx_flex_desc *rx_desc, u8 ptype)
843 {
844 	struct ice_rx_ptype_decoded decoded;
845 	u32 rx_error, rx_status;
846 	bool ipv4, ipv6;
847 
848 	rx_status = le16_to_cpu(rx_desc->wb.status_error0);
849 	rx_error = rx_status;
850 
851 	decoded = ice_decode_rx_desc_ptype(ptype);
852 
853 	/* Start with CHECKSUM_NONE and by default csum_level = 0 */
854 	skb->ip_summed = CHECKSUM_NONE;
855 	skb_checksum_none_assert(skb);
856 
857 	/* check if Rx checksum is enabled */
858 	if (!(vsi->netdev->features & NETIF_F_RXCSUM))
859 		return;
860 
861 	/* check if HW has decoded the packet and checksum */
862 	if (!(rx_status & BIT(ICE_RX_FLEX_DESC_STATUS0_L3L4P_S)))
863 		return;
864 
865 	if (!(decoded.known && decoded.outer_ip))
866 		return;
867 
868 	ipv4 = (decoded.outer_ip == ICE_RX_PTYPE_OUTER_IP) &&
869 	       (decoded.outer_ip_ver == ICE_RX_PTYPE_OUTER_IPV4);
870 	ipv6 = (decoded.outer_ip == ICE_RX_PTYPE_OUTER_IP) &&
871 	       (decoded.outer_ip_ver == ICE_RX_PTYPE_OUTER_IPV6);
872 
873 	if (ipv4 && (rx_error & (BIT(ICE_RX_FLEX_DESC_STATUS0_XSUM_IPE_S) |
874 				 BIT(ICE_RX_FLEX_DESC_STATUS0_XSUM_EIPE_S))))
875 		goto checksum_fail;
876 	else if (ipv6 && (rx_status &
877 		 (BIT(ICE_RX_FLEX_DESC_STATUS0_IPV6EXADD_S))))
878 		goto checksum_fail;
879 
880 	/* check for L4 errors and handle packets that were not able to be
881 	 * checksummed due to arrival speed
882 	 */
883 	if (rx_error & BIT(ICE_RX_FLEX_DESC_STATUS0_XSUM_L4E_S))
884 		goto checksum_fail;
885 
886 	/* Only report checksum unnecessary for TCP, UDP, or SCTP */
887 	switch (decoded.inner_prot) {
888 	case ICE_RX_PTYPE_INNER_PROT_TCP:
889 	case ICE_RX_PTYPE_INNER_PROT_UDP:
890 	case ICE_RX_PTYPE_INNER_PROT_SCTP:
891 		skb->ip_summed = CHECKSUM_UNNECESSARY;
892 	default:
893 		break;
894 	}
895 	return;
896 
897 checksum_fail:
898 	vsi->back->hw_csum_rx_error++;
899 }
900 
901 /**
902  * ice_process_skb_fields - Populate skb header fields from Rx descriptor
903  * @rx_ring: Rx descriptor ring packet is being transacted on
904  * @rx_desc: pointer to the EOP Rx descriptor
905  * @skb: pointer to current skb being populated
906  * @ptype: the packet type decoded by hardware
907  *
908  * This function checks the ring, descriptor, and packet information in
909  * order to populate the hash, checksum, VLAN, protocol, and
910  * other fields within the skb.
911  */
912 static void ice_process_skb_fields(struct ice_ring *rx_ring,
913 				   union ice_32b_rx_flex_desc *rx_desc,
914 				   struct sk_buff *skb, u8 ptype)
915 {
916 	ice_rx_hash(rx_ring, rx_desc, skb, ptype);
917 
918 	/* modifies the skb - consumes the enet header */
919 	skb->protocol = eth_type_trans(skb, rx_ring->netdev);
920 
921 	ice_rx_csum(rx_ring->vsi, skb, rx_desc, ptype);
922 }
923 
924 /**
925  * ice_receive_skb - Send a completed packet up the stack
926  * @rx_ring: Rx ring in play
927  * @skb: packet to send up
928  * @vlan_tag: vlan tag for packet
929  *
930  * This function sends the completed packet (via. skb) up the stack using
931  * gro receive functions (with/without vlan tag)
932  */
933 static void ice_receive_skb(struct ice_ring *rx_ring, struct sk_buff *skb,
934 			    u16 vlan_tag)
935 {
936 	if ((rx_ring->netdev->features & NETIF_F_HW_VLAN_CTAG_RX) &&
937 	    (vlan_tag & VLAN_VID_MASK)) {
938 		__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_tag);
939 	}
940 	napi_gro_receive(&rx_ring->q_vector->napi, skb);
941 }
942 
943 /**
944  * ice_clean_rx_irq - Clean completed descriptors from Rx ring - bounce buf
945  * @rx_ring: Rx descriptor ring to transact packets on
946  * @budget: Total limit on number of packets to process
947  *
948  * This function provides a "bounce buffer" approach to Rx interrupt
949  * processing. The advantage to this is that on systems that have
950  * expensive overhead for IOMMU access this provides a means of avoiding
951  * it by maintaining the mapping of the page to the system.
952  *
953  * Returns amount of work completed
954  */
955 static int ice_clean_rx_irq(struct ice_ring *rx_ring, int budget)
956 {
957 	unsigned int total_rx_bytes = 0, total_rx_pkts = 0;
958 	u16 cleaned_count = ICE_DESC_UNUSED(rx_ring);
959 	bool failure = false;
960 
961 	/* start the loop to process RX packets bounded by 'budget' */
962 	while (likely(total_rx_pkts < (unsigned int)budget)) {
963 		union ice_32b_rx_flex_desc *rx_desc;
964 		struct sk_buff *skb;
965 		u16 stat_err_bits;
966 		u16 vlan_tag = 0;
967 		u8 rx_ptype;
968 
969 		/* return some buffers to hardware, one at a time is too slow */
970 		if (cleaned_count >= ICE_RX_BUF_WRITE) {
971 			failure = failure ||
972 				  ice_alloc_rx_bufs(rx_ring, cleaned_count);
973 			cleaned_count = 0;
974 		}
975 
976 		/* get the RX desc from RX ring based on 'next_to_clean' */
977 		rx_desc = ICE_RX_DESC(rx_ring, rx_ring->next_to_clean);
978 
979 		/* status_error_len will always be zero for unused descriptors
980 		 * because it's cleared in cleanup, and overlaps with hdr_addr
981 		 * which is always zero because packet split isn't used, if the
982 		 * hardware wrote DD then it will be non-zero
983 		 */
984 		stat_err_bits = BIT(ICE_RX_FLEX_DESC_STATUS0_DD_S);
985 		if (!ice_test_staterr(rx_desc, stat_err_bits))
986 			break;
987 
988 		/* This memory barrier is needed to keep us from reading
989 		 * any other fields out of the rx_desc until we know the
990 		 * DD bit is set.
991 		 */
992 		dma_rmb();
993 
994 		/* allocate (if needed) and populate skb */
995 		skb = ice_fetch_rx_buf(rx_ring, rx_desc);
996 		if (!skb)
997 			break;
998 
999 		cleaned_count++;
1000 
1001 		/* skip if it is NOP desc */
1002 		if (ice_is_non_eop(rx_ring, rx_desc, skb))
1003 			continue;
1004 
1005 		stat_err_bits = BIT(ICE_RX_FLEX_DESC_STATUS0_RXE_S);
1006 		if (unlikely(ice_test_staterr(rx_desc, stat_err_bits))) {
1007 			dev_kfree_skb_any(skb);
1008 			continue;
1009 		}
1010 
1011 		rx_ptype = le16_to_cpu(rx_desc->wb.ptype_flex_flags0) &
1012 			ICE_RX_FLEX_DESC_PTYPE_M;
1013 
1014 		stat_err_bits = BIT(ICE_RX_FLEX_DESC_STATUS0_L2TAG1P_S);
1015 		if (ice_test_staterr(rx_desc, stat_err_bits))
1016 			vlan_tag = le16_to_cpu(rx_desc->wb.l2tag1);
1017 
1018 		/* correct empty headers and pad skb if needed (to make valid
1019 		 * ethernet frame
1020 		 */
1021 		if (ice_cleanup_headers(skb)) {
1022 			skb = NULL;
1023 			continue;
1024 		}
1025 
1026 		/* probably a little skewed due to removing CRC */
1027 		total_rx_bytes += skb->len;
1028 
1029 		/* populate checksum, VLAN, and protocol */
1030 		ice_process_skb_fields(rx_ring, rx_desc, skb, rx_ptype);
1031 
1032 		/* send completed skb up the stack */
1033 		ice_receive_skb(rx_ring, skb, vlan_tag);
1034 
1035 		/* update budget accounting */
1036 		total_rx_pkts++;
1037 	}
1038 
1039 	/* update queue and vector specific stats */
1040 	u64_stats_update_begin(&rx_ring->syncp);
1041 	rx_ring->stats.pkts += total_rx_pkts;
1042 	rx_ring->stats.bytes += total_rx_bytes;
1043 	u64_stats_update_end(&rx_ring->syncp);
1044 	rx_ring->q_vector->rx.total_pkts += total_rx_pkts;
1045 	rx_ring->q_vector->rx.total_bytes += total_rx_bytes;
1046 
1047 	/* guarantee a trip back through this routine if there was a failure */
1048 	return failure ? budget : (int)total_rx_pkts;
1049 }
1050 
1051 /**
1052  * ice_buildreg_itr - build value for writing to the GLINT_DYN_CTL register
1053  * @itr_idx: interrupt throttling index
1054  * @reg_itr: interrupt throttling value adjusted based on ITR granularity
1055  */
1056 static u32 ice_buildreg_itr(int itr_idx, u16 reg_itr)
1057 {
1058 	return GLINT_DYN_CTL_INTENA_M | GLINT_DYN_CTL_CLEARPBA_M |
1059 		(itr_idx << GLINT_DYN_CTL_ITR_INDX_S) |
1060 		(reg_itr << GLINT_DYN_CTL_INTERVAL_S);
1061 }
1062 
1063 /**
1064  * ice_update_ena_itr - Update ITR and re-enable MSIX interrupt
1065  * @vsi: the VSI associated with the q_vector
1066  * @q_vector: q_vector for which ITR is being updated and interrupt enabled
1067  */
1068 static void
1069 ice_update_ena_itr(struct ice_vsi *vsi, struct ice_q_vector *q_vector)
1070 {
1071 	struct ice_hw *hw = &vsi->back->hw;
1072 	struct ice_ring_container *rc;
1073 	u32 itr_val;
1074 
1075 	/* This block of logic allows us to get away with only updating
1076 	 * one ITR value with each interrupt. The idea is to perform a
1077 	 * pseudo-lazy update with the following criteria.
1078 	 *
1079 	 * 1. Rx is given higher priority than Tx if both are in same state
1080 	 * 2. If we must reduce an ITR that is given highest priority.
1081 	 * 3. We then give priority to increasing ITR based on amount.
1082 	 */
1083 	if (q_vector->rx.target_itr < q_vector->rx.current_itr) {
1084 		rc = &q_vector->rx;
1085 		/* Rx ITR needs to be reduced, this is highest priority */
1086 		itr_val = ice_buildreg_itr(rc->itr_idx, rc->target_itr);
1087 		rc->current_itr = rc->target_itr;
1088 	} else if ((q_vector->tx.target_itr < q_vector->tx.current_itr) ||
1089 		   ((q_vector->rx.target_itr - q_vector->rx.current_itr) <
1090 		    (q_vector->tx.target_itr - q_vector->tx.current_itr))) {
1091 		rc = &q_vector->tx;
1092 		/* Tx ITR needs to be reduced, this is second priority
1093 		 * Tx ITR needs to be increased more than Rx, fourth priority
1094 		 */
1095 		itr_val = ice_buildreg_itr(rc->itr_idx, rc->target_itr);
1096 		rc->current_itr = rc->target_itr;
1097 	} else if (q_vector->rx.current_itr != q_vector->rx.target_itr) {
1098 		rc = &q_vector->rx;
1099 		/* Rx ITR needs to be increased, third priority */
1100 		itr_val = ice_buildreg_itr(rc->itr_idx, rc->target_itr);
1101 		rc->current_itr = rc->target_itr;
1102 	} else {
1103 		/* Still have to re-enable the interrupts */
1104 		itr_val = ice_buildreg_itr(ICE_ITR_NONE, 0);
1105 	}
1106 
1107 	if (!test_bit(__ICE_DOWN, vsi->state)) {
1108 		int vector = vsi->hw_base_vector + q_vector->v_idx;
1109 
1110 		wr32(hw, GLINT_DYN_CTL(vector), itr_val);
1111 	}
1112 }
1113 
1114 /**
1115  * ice_napi_poll - NAPI polling Rx/Tx cleanup routine
1116  * @napi: napi struct with our devices info in it
1117  * @budget: amount of work driver is allowed to do this pass, in packets
1118  *
1119  * This function will clean all queues associated with a q_vector.
1120  *
1121  * Returns the amount of work done
1122  */
1123 int ice_napi_poll(struct napi_struct *napi, int budget)
1124 {
1125 	struct ice_q_vector *q_vector =
1126 				container_of(napi, struct ice_q_vector, napi);
1127 	struct ice_vsi *vsi = q_vector->vsi;
1128 	struct ice_pf *pf = vsi->back;
1129 	bool clean_complete = true;
1130 	int budget_per_ring = 0;
1131 	struct ice_ring *ring;
1132 	int work_done = 0;
1133 
1134 	/* Since the actual Tx work is minimal, we can give the Tx a larger
1135 	 * budget and be more aggressive about cleaning up the Tx descriptors.
1136 	 */
1137 	ice_for_each_ring(ring, q_vector->tx)
1138 		if (!ice_clean_tx_irq(vsi, ring, budget))
1139 			clean_complete = false;
1140 
1141 	/* Handle case where we are called by netpoll with a budget of 0 */
1142 	if (budget <= 0)
1143 		return budget;
1144 
1145 	/* We attempt to distribute budget to each Rx queue fairly, but don't
1146 	 * allow the budget to go below 1 because that would exit polling early.
1147 	 */
1148 	if (q_vector->num_ring_rx)
1149 		budget_per_ring = max(budget / q_vector->num_ring_rx, 1);
1150 
1151 	ice_for_each_ring(ring, q_vector->rx) {
1152 		int cleaned;
1153 
1154 		cleaned = ice_clean_rx_irq(ring, budget_per_ring);
1155 		work_done += cleaned;
1156 		/* if we clean as many as budgeted, we must not be done */
1157 		if (cleaned >= budget_per_ring)
1158 			clean_complete = false;
1159 	}
1160 
1161 	/* If work not completed, return budget and polling will return */
1162 	if (!clean_complete)
1163 		return budget;
1164 
1165 	/* Exit the polling mode, but don't re-enable interrupts if stack might
1166 	 * poll us due to busy-polling
1167 	 */
1168 	if (likely(napi_complete_done(napi, work_done)))
1169 		if (test_bit(ICE_FLAG_MSIX_ENA, pf->flags))
1170 			ice_update_ena_itr(vsi, q_vector);
1171 
1172 	return min_t(int, work_done, budget - 1);
1173 }
1174 
1175 /* helper function for building cmd/type/offset */
1176 static __le64
1177 build_ctob(u64 td_cmd, u64 td_offset, unsigned int size, u64 td_tag)
1178 {
1179 	return cpu_to_le64(ICE_TX_DESC_DTYPE_DATA |
1180 			   (td_cmd    << ICE_TXD_QW1_CMD_S) |
1181 			   (td_offset << ICE_TXD_QW1_OFFSET_S) |
1182 			   ((u64)size << ICE_TXD_QW1_TX_BUF_SZ_S) |
1183 			   (td_tag    << ICE_TXD_QW1_L2TAG1_S));
1184 }
1185 
1186 /**
1187  * __ice_maybe_stop_tx - 2nd level check for Tx stop conditions
1188  * @tx_ring: the ring to be checked
1189  * @size: the size buffer we want to assure is available
1190  *
1191  * Returns -EBUSY if a stop is needed, else 0
1192  */
1193 static int __ice_maybe_stop_tx(struct ice_ring *tx_ring, unsigned int size)
1194 {
1195 	netif_stop_subqueue(tx_ring->netdev, tx_ring->q_index);
1196 	/* Memory barrier before checking head and tail */
1197 	smp_mb();
1198 
1199 	/* Check again in a case another CPU has just made room available. */
1200 	if (likely(ICE_DESC_UNUSED(tx_ring) < size))
1201 		return -EBUSY;
1202 
1203 	/* A reprieve! - use start_subqueue because it doesn't call schedule */
1204 	netif_start_subqueue(tx_ring->netdev, tx_ring->q_index);
1205 	++tx_ring->tx_stats.restart_q;
1206 	return 0;
1207 }
1208 
1209 /**
1210  * ice_maybe_stop_tx - 1st level check for Tx stop conditions
1211  * @tx_ring: the ring to be checked
1212  * @size:    the size buffer we want to assure is available
1213  *
1214  * Returns 0 if stop is not needed
1215  */
1216 static int ice_maybe_stop_tx(struct ice_ring *tx_ring, unsigned int size)
1217 {
1218 	if (likely(ICE_DESC_UNUSED(tx_ring) >= size))
1219 		return 0;
1220 
1221 	return __ice_maybe_stop_tx(tx_ring, size);
1222 }
1223 
1224 /**
1225  * ice_tx_map - Build the Tx descriptor
1226  * @tx_ring: ring to send buffer on
1227  * @first: first buffer info buffer to use
1228  * @off: pointer to struct that holds offload parameters
1229  *
1230  * This function loops over the skb data pointed to by *first
1231  * and gets a physical address for each memory location and programs
1232  * it and the length into the transmit descriptor.
1233  */
1234 static void
1235 ice_tx_map(struct ice_ring *tx_ring, struct ice_tx_buf *first,
1236 	   struct ice_tx_offload_params *off)
1237 {
1238 	u64 td_offset, td_tag, td_cmd;
1239 	u16 i = tx_ring->next_to_use;
1240 	struct skb_frag_struct *frag;
1241 	unsigned int data_len, size;
1242 	struct ice_tx_desc *tx_desc;
1243 	struct ice_tx_buf *tx_buf;
1244 	struct sk_buff *skb;
1245 	dma_addr_t dma;
1246 
1247 	td_tag = off->td_l2tag1;
1248 	td_cmd = off->td_cmd;
1249 	td_offset = off->td_offset;
1250 	skb = first->skb;
1251 
1252 	data_len = skb->data_len;
1253 	size = skb_headlen(skb);
1254 
1255 	tx_desc = ICE_TX_DESC(tx_ring, i);
1256 
1257 	if (first->tx_flags & ICE_TX_FLAGS_HW_VLAN) {
1258 		td_cmd |= (u64)ICE_TX_DESC_CMD_IL2TAG1;
1259 		td_tag = (first->tx_flags & ICE_TX_FLAGS_VLAN_M) >>
1260 			  ICE_TX_FLAGS_VLAN_S;
1261 	}
1262 
1263 	dma = dma_map_single(tx_ring->dev, skb->data, size, DMA_TO_DEVICE);
1264 
1265 	tx_buf = first;
1266 
1267 	for (frag = &skb_shinfo(skb)->frags[0];; frag++) {
1268 		unsigned int max_data = ICE_MAX_DATA_PER_TXD_ALIGNED;
1269 
1270 		if (dma_mapping_error(tx_ring->dev, dma))
1271 			goto dma_error;
1272 
1273 		/* record length, and DMA address */
1274 		dma_unmap_len_set(tx_buf, len, size);
1275 		dma_unmap_addr_set(tx_buf, dma, dma);
1276 
1277 		/* align size to end of page */
1278 		max_data += -dma & (ICE_MAX_READ_REQ_SIZE - 1);
1279 		tx_desc->buf_addr = cpu_to_le64(dma);
1280 
1281 		/* account for data chunks larger than the hardware
1282 		 * can handle
1283 		 */
1284 		while (unlikely(size > ICE_MAX_DATA_PER_TXD)) {
1285 			tx_desc->cmd_type_offset_bsz =
1286 				build_ctob(td_cmd, td_offset, max_data, td_tag);
1287 
1288 			tx_desc++;
1289 			i++;
1290 
1291 			if (i == tx_ring->count) {
1292 				tx_desc = ICE_TX_DESC(tx_ring, 0);
1293 				i = 0;
1294 			}
1295 
1296 			dma += max_data;
1297 			size -= max_data;
1298 
1299 			max_data = ICE_MAX_DATA_PER_TXD_ALIGNED;
1300 			tx_desc->buf_addr = cpu_to_le64(dma);
1301 		}
1302 
1303 		if (likely(!data_len))
1304 			break;
1305 
1306 		tx_desc->cmd_type_offset_bsz = build_ctob(td_cmd, td_offset,
1307 							  size, td_tag);
1308 
1309 		tx_desc++;
1310 		i++;
1311 
1312 		if (i == tx_ring->count) {
1313 			tx_desc = ICE_TX_DESC(tx_ring, 0);
1314 			i = 0;
1315 		}
1316 
1317 		size = skb_frag_size(frag);
1318 		data_len -= size;
1319 
1320 		dma = skb_frag_dma_map(tx_ring->dev, frag, 0, size,
1321 				       DMA_TO_DEVICE);
1322 
1323 		tx_buf = &tx_ring->tx_buf[i];
1324 	}
1325 
1326 	/* record bytecount for BQL */
1327 	netdev_tx_sent_queue(txring_txq(tx_ring), first->bytecount);
1328 
1329 	/* record SW timestamp if HW timestamp is not available */
1330 	skb_tx_timestamp(first->skb);
1331 
1332 	i++;
1333 	if (i == tx_ring->count)
1334 		i = 0;
1335 
1336 	/* write last descriptor with RS and EOP bits */
1337 	td_cmd |= (u64)(ICE_TX_DESC_CMD_EOP | ICE_TX_DESC_CMD_RS);
1338 	tx_desc->cmd_type_offset_bsz =
1339 			build_ctob(td_cmd, td_offset, size, td_tag);
1340 
1341 	/* Force memory writes to complete before letting h/w know there
1342 	 * are new descriptors to fetch.
1343 	 *
1344 	 * We also use this memory barrier to make certain all of the
1345 	 * status bits have been updated before next_to_watch is written.
1346 	 */
1347 	wmb();
1348 
1349 	/* set next_to_watch value indicating a packet is present */
1350 	first->next_to_watch = tx_desc;
1351 
1352 	tx_ring->next_to_use = i;
1353 
1354 	ice_maybe_stop_tx(tx_ring, DESC_NEEDED);
1355 
1356 	/* notify HW of packet */
1357 	if (netif_xmit_stopped(txring_txq(tx_ring)) || !skb->xmit_more) {
1358 		writel(i, tx_ring->tail);
1359 
1360 		/* we need this if more than one processor can write to our tail
1361 		 * at a time, it synchronizes IO on IA64/Altix systems
1362 		 */
1363 		mmiowb();
1364 	}
1365 
1366 	return;
1367 
1368 dma_error:
1369 	/* clear dma mappings for failed tx_buf map */
1370 	for (;;) {
1371 		tx_buf = &tx_ring->tx_buf[i];
1372 		ice_unmap_and_free_tx_buf(tx_ring, tx_buf);
1373 		if (tx_buf == first)
1374 			break;
1375 		if (i == 0)
1376 			i = tx_ring->count;
1377 		i--;
1378 	}
1379 
1380 	tx_ring->next_to_use = i;
1381 }
1382 
1383 /**
1384  * ice_tx_csum - Enable Tx checksum offloads
1385  * @first: pointer to the first descriptor
1386  * @off: pointer to struct that holds offload parameters
1387  *
1388  * Returns 0 or error (negative) if checksum offload can't happen, 1 otherwise.
1389  */
1390 static
1391 int ice_tx_csum(struct ice_tx_buf *first, struct ice_tx_offload_params *off)
1392 {
1393 	u32 l4_len = 0, l3_len = 0, l2_len = 0;
1394 	struct sk_buff *skb = first->skb;
1395 	union {
1396 		struct iphdr *v4;
1397 		struct ipv6hdr *v6;
1398 		unsigned char *hdr;
1399 	} ip;
1400 	union {
1401 		struct tcphdr *tcp;
1402 		unsigned char *hdr;
1403 	} l4;
1404 	__be16 frag_off, protocol;
1405 	unsigned char *exthdr;
1406 	u32 offset, cmd = 0;
1407 	u8 l4_proto = 0;
1408 
1409 	if (skb->ip_summed != CHECKSUM_PARTIAL)
1410 		return 0;
1411 
1412 	ip.hdr = skb_network_header(skb);
1413 	l4.hdr = skb_transport_header(skb);
1414 
1415 	/* compute outer L2 header size */
1416 	l2_len = ip.hdr - skb->data;
1417 	offset = (l2_len / 2) << ICE_TX_DESC_LEN_MACLEN_S;
1418 
1419 	if (skb->encapsulation)
1420 		return -1;
1421 
1422 	/* Enable IP checksum offloads */
1423 	protocol = vlan_get_protocol(skb);
1424 	if (protocol == htons(ETH_P_IP)) {
1425 		l4_proto = ip.v4->protocol;
1426 		/* the stack computes the IP header already, the only time we
1427 		 * need the hardware to recompute it is in the case of TSO.
1428 		 */
1429 		if (first->tx_flags & ICE_TX_FLAGS_TSO)
1430 			cmd |= ICE_TX_DESC_CMD_IIPT_IPV4_CSUM;
1431 		else
1432 			cmd |= ICE_TX_DESC_CMD_IIPT_IPV4;
1433 
1434 	} else if (protocol == htons(ETH_P_IPV6)) {
1435 		cmd |= ICE_TX_DESC_CMD_IIPT_IPV6;
1436 		exthdr = ip.hdr + sizeof(*ip.v6);
1437 		l4_proto = ip.v6->nexthdr;
1438 		if (l4.hdr != exthdr)
1439 			ipv6_skip_exthdr(skb, exthdr - skb->data, &l4_proto,
1440 					 &frag_off);
1441 	} else {
1442 		return -1;
1443 	}
1444 
1445 	/* compute inner L3 header size */
1446 	l3_len = l4.hdr - ip.hdr;
1447 	offset |= (l3_len / 4) << ICE_TX_DESC_LEN_IPLEN_S;
1448 
1449 	/* Enable L4 checksum offloads */
1450 	switch (l4_proto) {
1451 	case IPPROTO_TCP:
1452 		/* enable checksum offloads */
1453 		cmd |= ICE_TX_DESC_CMD_L4T_EOFT_TCP;
1454 		l4_len = l4.tcp->doff;
1455 		offset |= l4_len << ICE_TX_DESC_LEN_L4_LEN_S;
1456 		break;
1457 	case IPPROTO_UDP:
1458 		/* enable UDP checksum offload */
1459 		cmd |= ICE_TX_DESC_CMD_L4T_EOFT_UDP;
1460 		l4_len = (sizeof(struct udphdr) >> 2);
1461 		offset |= l4_len << ICE_TX_DESC_LEN_L4_LEN_S;
1462 		break;
1463 	case IPPROTO_SCTP:
1464 		/* enable SCTP checksum offload */
1465 		cmd |= ICE_TX_DESC_CMD_L4T_EOFT_SCTP;
1466 		l4_len = sizeof(struct sctphdr) >> 2;
1467 		offset |= l4_len << ICE_TX_DESC_LEN_L4_LEN_S;
1468 		break;
1469 
1470 	default:
1471 		if (first->tx_flags & ICE_TX_FLAGS_TSO)
1472 			return -1;
1473 		skb_checksum_help(skb);
1474 		return 0;
1475 	}
1476 
1477 	off->td_cmd |= cmd;
1478 	off->td_offset |= offset;
1479 	return 1;
1480 }
1481 
1482 /**
1483  * ice_tx_prepare_vlan_flags - prepare generic TX VLAN tagging flags for HW
1484  * @tx_ring: ring to send buffer on
1485  * @first: pointer to struct ice_tx_buf
1486  *
1487  * Checks the skb and set up correspondingly several generic transmit flags
1488  * related to VLAN tagging for the HW, such as VLAN, DCB, etc.
1489  *
1490  * Returns error code indicate the frame should be dropped upon error and the
1491  * otherwise returns 0 to indicate the flags has been set properly.
1492  */
1493 static int
1494 ice_tx_prepare_vlan_flags(struct ice_ring *tx_ring, struct ice_tx_buf *first)
1495 {
1496 	struct sk_buff *skb = first->skb;
1497 	__be16 protocol = skb->protocol;
1498 
1499 	if (protocol == htons(ETH_P_8021Q) &&
1500 	    !(tx_ring->netdev->features & NETIF_F_HW_VLAN_CTAG_TX)) {
1501 		/* when HW VLAN acceleration is turned off by the user the
1502 		 * stack sets the protocol to 8021q so that the driver
1503 		 * can take any steps required to support the SW only
1504 		 * VLAN handling. In our case the driver doesn't need
1505 		 * to take any further steps so just set the protocol
1506 		 * to the encapsulated ethertype.
1507 		 */
1508 		skb->protocol = vlan_get_protocol(skb);
1509 		goto out;
1510 	}
1511 
1512 	/* if we have a HW VLAN tag being added, default to the HW one */
1513 	if (skb_vlan_tag_present(skb)) {
1514 		first->tx_flags |= skb_vlan_tag_get(skb) << ICE_TX_FLAGS_VLAN_S;
1515 		first->tx_flags |= ICE_TX_FLAGS_HW_VLAN;
1516 	} else if (protocol == htons(ETH_P_8021Q)) {
1517 		struct vlan_hdr *vhdr, _vhdr;
1518 
1519 		/* for SW VLAN, check the next protocol and store the tag */
1520 		vhdr = (struct vlan_hdr *)skb_header_pointer(skb, ETH_HLEN,
1521 							     sizeof(_vhdr),
1522 							     &_vhdr);
1523 		if (!vhdr)
1524 			return -EINVAL;
1525 
1526 		first->tx_flags |= ntohs(vhdr->h_vlan_TCI) <<
1527 				   ICE_TX_FLAGS_VLAN_S;
1528 		first->tx_flags |= ICE_TX_FLAGS_SW_VLAN;
1529 	}
1530 
1531 out:
1532 	return 0;
1533 }
1534 
1535 /**
1536  * ice_tso - computes mss and TSO length to prepare for TSO
1537  * @first: pointer to struct ice_tx_buf
1538  * @off: pointer to struct that holds offload parameters
1539  *
1540  * Returns 0 or error (negative) if TSO can't happen, 1 otherwise.
1541  */
1542 static
1543 int ice_tso(struct ice_tx_buf *first, struct ice_tx_offload_params *off)
1544 {
1545 	struct sk_buff *skb = first->skb;
1546 	union {
1547 		struct iphdr *v4;
1548 		struct ipv6hdr *v6;
1549 		unsigned char *hdr;
1550 	} ip;
1551 	union {
1552 		struct tcphdr *tcp;
1553 		unsigned char *hdr;
1554 	} l4;
1555 	u64 cd_mss, cd_tso_len;
1556 	u32 paylen, l4_start;
1557 	int err;
1558 
1559 	if (skb->ip_summed != CHECKSUM_PARTIAL)
1560 		return 0;
1561 
1562 	if (!skb_is_gso(skb))
1563 		return 0;
1564 
1565 	err = skb_cow_head(skb, 0);
1566 	if (err < 0)
1567 		return err;
1568 
1569 	ip.hdr = skb_network_header(skb);
1570 	l4.hdr = skb_transport_header(skb);
1571 
1572 	/* initialize outer IP header fields */
1573 	if (ip.v4->version == 4) {
1574 		ip.v4->tot_len = 0;
1575 		ip.v4->check = 0;
1576 	} else {
1577 		ip.v6->payload_len = 0;
1578 	}
1579 
1580 	/* determine offset of transport header */
1581 	l4_start = l4.hdr - skb->data;
1582 
1583 	/* remove payload length from checksum */
1584 	paylen = skb->len - l4_start;
1585 	csum_replace_by_diff(&l4.tcp->check, (__force __wsum)htonl(paylen));
1586 
1587 	/* compute length of segmentation header */
1588 	off->header_len = (l4.tcp->doff * 4) + l4_start;
1589 
1590 	/* update gso_segs and bytecount */
1591 	first->gso_segs = skb_shinfo(skb)->gso_segs;
1592 	first->bytecount += (first->gso_segs - 1) * off->header_len;
1593 
1594 	cd_tso_len = skb->len - off->header_len;
1595 	cd_mss = skb_shinfo(skb)->gso_size;
1596 
1597 	/* record cdesc_qw1 with TSO parameters */
1598 	off->cd_qw1 |= ICE_TX_DESC_DTYPE_CTX |
1599 			 (ICE_TX_CTX_DESC_TSO << ICE_TXD_CTX_QW1_CMD_S) |
1600 			 (cd_tso_len << ICE_TXD_CTX_QW1_TSO_LEN_S) |
1601 			 (cd_mss << ICE_TXD_CTX_QW1_MSS_S);
1602 	first->tx_flags |= ICE_TX_FLAGS_TSO;
1603 	return 1;
1604 }
1605 
1606 /**
1607  * ice_txd_use_count  - estimate the number of descriptors needed for Tx
1608  * @size: transmit request size in bytes
1609  *
1610  * Due to hardware alignment restrictions (4K alignment), we need to
1611  * assume that we can have no more than 12K of data per descriptor, even
1612  * though each descriptor can take up to 16K - 1 bytes of aligned memory.
1613  * Thus, we need to divide by 12K. But division is slow! Instead,
1614  * we decompose the operation into shifts and one relatively cheap
1615  * multiply operation.
1616  *
1617  * To divide by 12K, we first divide by 4K, then divide by 3:
1618  *     To divide by 4K, shift right by 12 bits
1619  *     To divide by 3, multiply by 85, then divide by 256
1620  *     (Divide by 256 is done by shifting right by 8 bits)
1621  * Finally, we add one to round up. Because 256 isn't an exact multiple of
1622  * 3, we'll underestimate near each multiple of 12K. This is actually more
1623  * accurate as we have 4K - 1 of wiggle room that we can fit into the last
1624  * segment. For our purposes this is accurate out to 1M which is orders of
1625  * magnitude greater than our largest possible GSO size.
1626  *
1627  * This would then be implemented as:
1628  *     return (((size >> 12) * 85) >> 8) + ICE_DESCS_FOR_SKB_DATA_PTR;
1629  *
1630  * Since multiplication and division are commutative, we can reorder
1631  * operations into:
1632  *     return ((size * 85) >> 20) + ICE_DESCS_FOR_SKB_DATA_PTR;
1633  */
1634 static unsigned int ice_txd_use_count(unsigned int size)
1635 {
1636 	return ((size * 85) >> 20) + ICE_DESCS_FOR_SKB_DATA_PTR;
1637 }
1638 
1639 /**
1640  * ice_xmit_desc_count - calculate number of Tx descriptors needed
1641  * @skb: send buffer
1642  *
1643  * Returns number of data descriptors needed for this skb.
1644  */
1645 static unsigned int ice_xmit_desc_count(struct sk_buff *skb)
1646 {
1647 	const struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[0];
1648 	unsigned int nr_frags = skb_shinfo(skb)->nr_frags;
1649 	unsigned int count = 0, size = skb_headlen(skb);
1650 
1651 	for (;;) {
1652 		count += ice_txd_use_count(size);
1653 
1654 		if (!nr_frags--)
1655 			break;
1656 
1657 		size = skb_frag_size(frag++);
1658 	}
1659 
1660 	return count;
1661 }
1662 
1663 /**
1664  * __ice_chk_linearize - Check if there are more than 8 buffers per packet
1665  * @skb: send buffer
1666  *
1667  * Note: This HW can't DMA more than 8 buffers to build a packet on the wire
1668  * and so we need to figure out the cases where we need to linearize the skb.
1669  *
1670  * For TSO we need to count the TSO header and segment payload separately.
1671  * As such we need to check cases where we have 7 fragments or more as we
1672  * can potentially require 9 DMA transactions, 1 for the TSO header, 1 for
1673  * the segment payload in the first descriptor, and another 7 for the
1674  * fragments.
1675  */
1676 static bool __ice_chk_linearize(struct sk_buff *skb)
1677 {
1678 	const struct skb_frag_struct *frag, *stale;
1679 	int nr_frags, sum;
1680 
1681 	/* no need to check if number of frags is less than 7 */
1682 	nr_frags = skb_shinfo(skb)->nr_frags;
1683 	if (nr_frags < (ICE_MAX_BUF_TXD - 1))
1684 		return false;
1685 
1686 	/* We need to walk through the list and validate that each group
1687 	 * of 6 fragments totals at least gso_size.
1688 	 */
1689 	nr_frags -= ICE_MAX_BUF_TXD - 2;
1690 	frag = &skb_shinfo(skb)->frags[0];
1691 
1692 	/* Initialize size to the negative value of gso_size minus 1. We
1693 	 * use this as the worst case scenerio in which the frag ahead
1694 	 * of us only provides one byte which is why we are limited to 6
1695 	 * descriptors for a single transmit as the header and previous
1696 	 * fragment are already consuming 2 descriptors.
1697 	 */
1698 	sum = 1 - skb_shinfo(skb)->gso_size;
1699 
1700 	/* Add size of frags 0 through 4 to create our initial sum */
1701 	sum += skb_frag_size(frag++);
1702 	sum += skb_frag_size(frag++);
1703 	sum += skb_frag_size(frag++);
1704 	sum += skb_frag_size(frag++);
1705 	sum += skb_frag_size(frag++);
1706 
1707 	/* Walk through fragments adding latest fragment, testing it, and
1708 	 * then removing stale fragments from the sum.
1709 	 */
1710 	stale = &skb_shinfo(skb)->frags[0];
1711 	for (;;) {
1712 		sum += skb_frag_size(frag++);
1713 
1714 		/* if sum is negative we failed to make sufficient progress */
1715 		if (sum < 0)
1716 			return true;
1717 
1718 		if (!nr_frags--)
1719 			break;
1720 
1721 		sum -= skb_frag_size(stale++);
1722 	}
1723 
1724 	return false;
1725 }
1726 
1727 /**
1728  * ice_chk_linearize - Check if there are more than 8 fragments per packet
1729  * @skb:      send buffer
1730  * @count:    number of buffers used
1731  *
1732  * Note: Our HW can't scatter-gather more than 8 fragments to build
1733  * a packet on the wire and so we need to figure out the cases where we
1734  * need to linearize the skb.
1735  */
1736 static bool ice_chk_linearize(struct sk_buff *skb, unsigned int count)
1737 {
1738 	/* Both TSO and single send will work if count is less than 8 */
1739 	if (likely(count < ICE_MAX_BUF_TXD))
1740 		return false;
1741 
1742 	if (skb_is_gso(skb))
1743 		return __ice_chk_linearize(skb);
1744 
1745 	/* we can support up to 8 data buffers for a single send */
1746 	return count != ICE_MAX_BUF_TXD;
1747 }
1748 
1749 /**
1750  * ice_xmit_frame_ring - Sends buffer on Tx ring
1751  * @skb: send buffer
1752  * @tx_ring: ring to send buffer on
1753  *
1754  * Returns NETDEV_TX_OK if sent, else an error code
1755  */
1756 static netdev_tx_t
1757 ice_xmit_frame_ring(struct sk_buff *skb, struct ice_ring *tx_ring)
1758 {
1759 	struct ice_tx_offload_params offload = { 0 };
1760 	struct ice_tx_buf *first;
1761 	unsigned int count;
1762 	int tso, csum;
1763 
1764 	count = ice_xmit_desc_count(skb);
1765 	if (ice_chk_linearize(skb, count)) {
1766 		if (__skb_linearize(skb))
1767 			goto out_drop;
1768 		count = ice_txd_use_count(skb->len);
1769 		tx_ring->tx_stats.tx_linearize++;
1770 	}
1771 
1772 	/* need: 1 descriptor per page * PAGE_SIZE/ICE_MAX_DATA_PER_TXD,
1773 	 *       + 1 desc for skb_head_len/ICE_MAX_DATA_PER_TXD,
1774 	 *       + 4 desc gap to avoid the cache line where head is,
1775 	 *       + 1 desc for context descriptor,
1776 	 * otherwise try next time
1777 	 */
1778 	if (ice_maybe_stop_tx(tx_ring, count + ICE_DESCS_PER_CACHE_LINE +
1779 			      ICE_DESCS_FOR_CTX_DESC)) {
1780 		tx_ring->tx_stats.tx_busy++;
1781 		return NETDEV_TX_BUSY;
1782 	}
1783 
1784 	offload.tx_ring = tx_ring;
1785 
1786 	/* record the location of the first descriptor for this packet */
1787 	first = &tx_ring->tx_buf[tx_ring->next_to_use];
1788 	first->skb = skb;
1789 	first->bytecount = max_t(unsigned int, skb->len, ETH_ZLEN);
1790 	first->gso_segs = 1;
1791 	first->tx_flags = 0;
1792 
1793 	/* prepare the VLAN tagging flags for Tx */
1794 	if (ice_tx_prepare_vlan_flags(tx_ring, first))
1795 		goto out_drop;
1796 
1797 	/* set up TSO offload */
1798 	tso = ice_tso(first, &offload);
1799 	if (tso < 0)
1800 		goto out_drop;
1801 
1802 	/* always set up Tx checksum offload */
1803 	csum = ice_tx_csum(first, &offload);
1804 	if (csum < 0)
1805 		goto out_drop;
1806 
1807 	if (tso || offload.cd_tunnel_params) {
1808 		struct ice_tx_ctx_desc *cdesc;
1809 		int i = tx_ring->next_to_use;
1810 
1811 		/* grab the next descriptor */
1812 		cdesc = ICE_TX_CTX_DESC(tx_ring, i);
1813 		i++;
1814 		tx_ring->next_to_use = (i < tx_ring->count) ? i : 0;
1815 
1816 		/* setup context descriptor */
1817 		cdesc->tunneling_params = cpu_to_le32(offload.cd_tunnel_params);
1818 		cdesc->l2tag2 = cpu_to_le16(offload.cd_l2tag2);
1819 		cdesc->rsvd = cpu_to_le16(0);
1820 		cdesc->qw1 = cpu_to_le64(offload.cd_qw1);
1821 	}
1822 
1823 	ice_tx_map(tx_ring, first, &offload);
1824 	return NETDEV_TX_OK;
1825 
1826 out_drop:
1827 	dev_kfree_skb_any(skb);
1828 	return NETDEV_TX_OK;
1829 }
1830 
1831 /**
1832  * ice_start_xmit - Selects the correct VSI and Tx queue to send buffer
1833  * @skb: send buffer
1834  * @netdev: network interface device structure
1835  *
1836  * Returns NETDEV_TX_OK if sent, else an error code
1837  */
1838 netdev_tx_t ice_start_xmit(struct sk_buff *skb, struct net_device *netdev)
1839 {
1840 	struct ice_netdev_priv *np = netdev_priv(netdev);
1841 	struct ice_vsi *vsi = np->vsi;
1842 	struct ice_ring *tx_ring;
1843 
1844 	tx_ring = vsi->tx_rings[skb->queue_mapping];
1845 
1846 	/* hardware can't handle really short frames, hardware padding works
1847 	 * beyond this point
1848 	 */
1849 	if (skb_put_padto(skb, ICE_MIN_TX_LEN))
1850 		return NETDEV_TX_OK;
1851 
1852 	return ice_xmit_frame_ring(skb, tx_ring);
1853 }
1854