xref: /linux/drivers/net/ethernet/pensando/ionic/ionic_txrx.c (revision a751449f8b477e0e1d97f778ed97ae9f6576b690)
1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright(c) 2017 - 2019 Pensando Systems, Inc */
3 
4 #include <linux/ip.h>
5 #include <linux/ipv6.h>
6 #include <linux/if_vlan.h>
7 #include <net/ip6_checksum.h>
8 
9 #include "ionic.h"
10 #include "ionic_lif.h"
11 #include "ionic_txrx.h"
12 
13 
14 static inline void ionic_txq_post(struct ionic_queue *q, bool ring_dbell,
15 				  ionic_desc_cb cb_func, void *cb_arg)
16 {
17 	DEBUG_STATS_TXQ_POST(q, ring_dbell);
18 
19 	ionic_q_post(q, ring_dbell, cb_func, cb_arg);
20 }
21 
22 static inline void ionic_rxq_post(struct ionic_queue *q, bool ring_dbell,
23 				  ionic_desc_cb cb_func, void *cb_arg)
24 {
25 	ionic_q_post(q, ring_dbell, cb_func, cb_arg);
26 
27 	DEBUG_STATS_RX_BUFF_CNT(q);
28 }
29 
30 static inline struct netdev_queue *q_to_ndq(struct ionic_queue *q)
31 {
32 	return netdev_get_tx_queue(q->lif->netdev, q->index);
33 }
34 
35 static int ionic_rx_page_alloc(struct ionic_queue *q,
36 			       struct ionic_buf_info *buf_info)
37 {
38 	struct net_device *netdev = q->lif->netdev;
39 	struct ionic_rx_stats *stats;
40 	struct device *dev;
41 	struct page *page;
42 
43 	dev = q->dev;
44 	stats = q_to_rx_stats(q);
45 
46 	if (unlikely(!buf_info)) {
47 		net_err_ratelimited("%s: %s invalid buf_info in alloc\n",
48 				    netdev->name, q->name);
49 		return -EINVAL;
50 	}
51 
52 	page = alloc_pages(IONIC_PAGE_GFP_MASK, 0);
53 	if (unlikely(!page)) {
54 		net_err_ratelimited("%s: %s page alloc failed\n",
55 				    netdev->name, q->name);
56 		stats->alloc_err++;
57 		return -ENOMEM;
58 	}
59 
60 	buf_info->dma_addr = dma_map_page(dev, page, 0,
61 					  IONIC_PAGE_SIZE, DMA_FROM_DEVICE);
62 	if (unlikely(dma_mapping_error(dev, buf_info->dma_addr))) {
63 		__free_pages(page, 0);
64 		net_err_ratelimited("%s: %s dma map failed\n",
65 				    netdev->name, q->name);
66 		stats->dma_map_err++;
67 		return -EIO;
68 	}
69 
70 	buf_info->page = page;
71 	buf_info->page_offset = 0;
72 
73 	return 0;
74 }
75 
76 static void ionic_rx_page_free(struct ionic_queue *q,
77 			       struct ionic_buf_info *buf_info)
78 {
79 	struct net_device *netdev = q->lif->netdev;
80 	struct device *dev = q->dev;
81 
82 	if (unlikely(!buf_info)) {
83 		net_err_ratelimited("%s: %s invalid buf_info in free\n",
84 				    netdev->name, q->name);
85 		return;
86 	}
87 
88 	if (!buf_info->page)
89 		return;
90 
91 	dma_unmap_page(dev, buf_info->dma_addr, IONIC_PAGE_SIZE, DMA_FROM_DEVICE);
92 	__free_pages(buf_info->page, 0);
93 	buf_info->page = NULL;
94 }
95 
96 static bool ionic_rx_buf_recycle(struct ionic_queue *q,
97 				 struct ionic_buf_info *buf_info, u32 used)
98 {
99 	u32 size;
100 
101 	/* don't re-use pages allocated in low-mem condition */
102 	if (page_is_pfmemalloc(buf_info->page))
103 		return false;
104 
105 	/* don't re-use buffers from non-local numa nodes */
106 	if (page_to_nid(buf_info->page) != numa_mem_id())
107 		return false;
108 
109 	size = ALIGN(used, IONIC_PAGE_SPLIT_SZ);
110 	buf_info->page_offset += size;
111 	if (buf_info->page_offset >= IONIC_PAGE_SIZE)
112 		return false;
113 
114 	get_page(buf_info->page);
115 
116 	return true;
117 }
118 
119 static struct sk_buff *ionic_rx_frags(struct ionic_queue *q,
120 				      struct ionic_desc_info *desc_info,
121 				      struct ionic_rxq_comp *comp)
122 {
123 	struct net_device *netdev = q->lif->netdev;
124 	struct ionic_buf_info *buf_info;
125 	struct ionic_rx_stats *stats;
126 	struct device *dev = q->dev;
127 	struct sk_buff *skb;
128 	unsigned int i;
129 	u16 frag_len;
130 	u16 len;
131 
132 	stats = q_to_rx_stats(q);
133 
134 	buf_info = &desc_info->bufs[0];
135 	len = le16_to_cpu(comp->len);
136 
137 	prefetchw(buf_info->page);
138 
139 	skb = napi_get_frags(&q_to_qcq(q)->napi);
140 	if (unlikely(!skb)) {
141 		net_warn_ratelimited("%s: SKB alloc failed on %s!\n",
142 				     netdev->name, q->name);
143 		stats->alloc_err++;
144 		return NULL;
145 	}
146 
147 	i = comp->num_sg_elems + 1;
148 	do {
149 		if (unlikely(!buf_info->page)) {
150 			dev_kfree_skb(skb);
151 			return NULL;
152 		}
153 
154 		frag_len = min_t(u16, len, IONIC_PAGE_SIZE - buf_info->page_offset);
155 		len -= frag_len;
156 
157 		dma_sync_single_for_cpu(dev,
158 					buf_info->dma_addr + buf_info->page_offset,
159 					frag_len, DMA_FROM_DEVICE);
160 
161 		skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
162 				buf_info->page, buf_info->page_offset, frag_len,
163 				IONIC_PAGE_SIZE);
164 
165 		if (!ionic_rx_buf_recycle(q, buf_info, frag_len)) {
166 			dma_unmap_page(dev, buf_info->dma_addr,
167 				       IONIC_PAGE_SIZE, DMA_FROM_DEVICE);
168 			buf_info->page = NULL;
169 		}
170 
171 		buf_info++;
172 
173 		i--;
174 	} while (i > 0);
175 
176 	return skb;
177 }
178 
179 static struct sk_buff *ionic_rx_copybreak(struct ionic_queue *q,
180 					  struct ionic_desc_info *desc_info,
181 					  struct ionic_rxq_comp *comp)
182 {
183 	struct net_device *netdev = q->lif->netdev;
184 	struct ionic_buf_info *buf_info;
185 	struct ionic_rx_stats *stats;
186 	struct device *dev = q->dev;
187 	struct sk_buff *skb;
188 	u16 len;
189 
190 	stats = q_to_rx_stats(q);
191 
192 	buf_info = &desc_info->bufs[0];
193 	len = le16_to_cpu(comp->len);
194 
195 	skb = napi_alloc_skb(&q_to_qcq(q)->napi, len);
196 	if (unlikely(!skb)) {
197 		net_warn_ratelimited("%s: SKB alloc failed on %s!\n",
198 				     netdev->name, q->name);
199 		stats->alloc_err++;
200 		return NULL;
201 	}
202 
203 	if (unlikely(!buf_info->page)) {
204 		dev_kfree_skb(skb);
205 		return NULL;
206 	}
207 
208 	dma_sync_single_for_cpu(dev, buf_info->dma_addr + buf_info->page_offset,
209 				len, DMA_FROM_DEVICE);
210 	skb_copy_to_linear_data(skb, page_address(buf_info->page) + buf_info->page_offset, len);
211 	dma_sync_single_for_device(dev, buf_info->dma_addr + buf_info->page_offset,
212 				   len, DMA_FROM_DEVICE);
213 
214 	skb_put(skb, len);
215 	skb->protocol = eth_type_trans(skb, q->lif->netdev);
216 
217 	return skb;
218 }
219 
220 static void ionic_rx_clean(struct ionic_queue *q,
221 			   struct ionic_desc_info *desc_info,
222 			   struct ionic_cq_info *cq_info,
223 			   void *cb_arg)
224 {
225 	struct net_device *netdev = q->lif->netdev;
226 	struct ionic_qcq *qcq = q_to_qcq(q);
227 	struct ionic_rx_stats *stats;
228 	struct ionic_rxq_comp *comp;
229 	struct sk_buff *skb;
230 
231 	comp = cq_info->cq_desc + qcq->cq.desc_size - sizeof(*comp);
232 
233 	stats = q_to_rx_stats(q);
234 
235 	if (comp->status) {
236 		stats->dropped++;
237 		return;
238 	}
239 
240 	stats->pkts++;
241 	stats->bytes += le16_to_cpu(comp->len);
242 
243 	if (le16_to_cpu(comp->len) <= q->lif->rx_copybreak)
244 		skb = ionic_rx_copybreak(q, desc_info, comp);
245 	else
246 		skb = ionic_rx_frags(q, desc_info, comp);
247 
248 	if (unlikely(!skb)) {
249 		stats->dropped++;
250 		return;
251 	}
252 
253 	skb_record_rx_queue(skb, q->index);
254 
255 	if (likely(netdev->features & NETIF_F_RXHASH)) {
256 		switch (comp->pkt_type_color & IONIC_RXQ_COMP_PKT_TYPE_MASK) {
257 		case IONIC_PKT_TYPE_IPV4:
258 		case IONIC_PKT_TYPE_IPV6:
259 			skb_set_hash(skb, le32_to_cpu(comp->rss_hash),
260 				     PKT_HASH_TYPE_L3);
261 			break;
262 		case IONIC_PKT_TYPE_IPV4_TCP:
263 		case IONIC_PKT_TYPE_IPV6_TCP:
264 		case IONIC_PKT_TYPE_IPV4_UDP:
265 		case IONIC_PKT_TYPE_IPV6_UDP:
266 			skb_set_hash(skb, le32_to_cpu(comp->rss_hash),
267 				     PKT_HASH_TYPE_L4);
268 			break;
269 		}
270 	}
271 
272 	if (likely(netdev->features & NETIF_F_RXCSUM)) {
273 		if (comp->csum_flags & IONIC_RXQ_COMP_CSUM_F_CALC) {
274 			skb->ip_summed = CHECKSUM_COMPLETE;
275 			skb->csum = (__force __wsum)le16_to_cpu(comp->csum);
276 			stats->csum_complete++;
277 		}
278 	} else {
279 		stats->csum_none++;
280 	}
281 
282 	if (unlikely((comp->csum_flags & IONIC_RXQ_COMP_CSUM_F_TCP_BAD) ||
283 		     (comp->csum_flags & IONIC_RXQ_COMP_CSUM_F_UDP_BAD) ||
284 		     (comp->csum_flags & IONIC_RXQ_COMP_CSUM_F_IP_BAD)))
285 		stats->csum_error++;
286 
287 	if (likely(netdev->features & NETIF_F_HW_VLAN_CTAG_RX) &&
288 	    (comp->csum_flags & IONIC_RXQ_COMP_CSUM_F_VLAN)) {
289 		__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
290 				       le16_to_cpu(comp->vlan_tci));
291 		stats->vlan_stripped++;
292 	}
293 
294 	if (unlikely(q->features & IONIC_RXQ_F_HWSTAMP)) {
295 		__le64 *cq_desc_hwstamp;
296 		u64 hwstamp;
297 
298 		cq_desc_hwstamp =
299 			cq_info->cq_desc +
300 			qcq->cq.desc_size -
301 			sizeof(struct ionic_rxq_comp) -
302 			IONIC_HWSTAMP_CQ_NEGOFFSET;
303 
304 		hwstamp = le64_to_cpu(*cq_desc_hwstamp);
305 
306 		if (hwstamp != IONIC_HWSTAMP_INVALID) {
307 			skb_hwtstamps(skb)->hwtstamp = ionic_lif_phc_ktime(q->lif, hwstamp);
308 			stats->hwstamp_valid++;
309 		} else {
310 			stats->hwstamp_invalid++;
311 		}
312 	}
313 
314 	if (le16_to_cpu(comp->len) <= q->lif->rx_copybreak)
315 		napi_gro_receive(&qcq->napi, skb);
316 	else
317 		napi_gro_frags(&qcq->napi);
318 }
319 
320 bool ionic_rx_service(struct ionic_cq *cq, struct ionic_cq_info *cq_info)
321 {
322 	struct ionic_queue *q = cq->bound_q;
323 	struct ionic_desc_info *desc_info;
324 	struct ionic_rxq_comp *comp;
325 
326 	comp = cq_info->cq_desc + cq->desc_size - sizeof(*comp);
327 
328 	if (!color_match(comp->pkt_type_color, cq->done_color))
329 		return false;
330 
331 	/* check for empty queue */
332 	if (q->tail_idx == q->head_idx)
333 		return false;
334 
335 	if (q->tail_idx != le16_to_cpu(comp->comp_index))
336 		return false;
337 
338 	desc_info = &q->info[q->tail_idx];
339 	q->tail_idx = (q->tail_idx + 1) & (q->num_descs - 1);
340 
341 	/* clean the related q entry, only one per qc completion */
342 	ionic_rx_clean(q, desc_info, cq_info, desc_info->cb_arg);
343 
344 	desc_info->cb = NULL;
345 	desc_info->cb_arg = NULL;
346 
347 	return true;
348 }
349 
350 void ionic_rx_fill(struct ionic_queue *q)
351 {
352 	struct net_device *netdev = q->lif->netdev;
353 	struct ionic_desc_info *desc_info;
354 	struct ionic_rxq_sg_desc *sg_desc;
355 	struct ionic_rxq_sg_elem *sg_elem;
356 	struct ionic_buf_info *buf_info;
357 	struct ionic_rxq_desc *desc;
358 	unsigned int remain_len;
359 	unsigned int frag_len;
360 	unsigned int nfrags;
361 	unsigned int i, j;
362 	unsigned int len;
363 
364 	len = netdev->mtu + ETH_HLEN + VLAN_HLEN;
365 
366 	for (i = ionic_q_space_avail(q); i; i--) {
367 		nfrags = 0;
368 		remain_len = len;
369 		desc_info = &q->info[q->head_idx];
370 		desc = desc_info->desc;
371 		buf_info = &desc_info->bufs[0];
372 
373 		if (!buf_info->page) { /* alloc a new buffer? */
374 			if (unlikely(ionic_rx_page_alloc(q, buf_info))) {
375 				desc->addr = 0;
376 				desc->len = 0;
377 				return;
378 			}
379 		}
380 
381 		/* fill main descriptor - buf[0] */
382 		desc->addr = cpu_to_le64(buf_info->dma_addr + buf_info->page_offset);
383 		frag_len = min_t(u16, len, IONIC_PAGE_SIZE - buf_info->page_offset);
384 		desc->len = cpu_to_le16(frag_len);
385 		remain_len -= frag_len;
386 		buf_info++;
387 		nfrags++;
388 
389 		/* fill sg descriptors - buf[1..n] */
390 		sg_desc = desc_info->sg_desc;
391 		for (j = 0; remain_len > 0 && j < q->max_sg_elems; j++) {
392 			sg_elem = &sg_desc->elems[j];
393 			if (!buf_info->page) { /* alloc a new sg buffer? */
394 				if (unlikely(ionic_rx_page_alloc(q, buf_info))) {
395 					sg_elem->addr = 0;
396 					sg_elem->len = 0;
397 					return;
398 				}
399 			}
400 
401 			sg_elem->addr = cpu_to_le64(buf_info->dma_addr + buf_info->page_offset);
402 			frag_len = min_t(u16, remain_len, IONIC_PAGE_SIZE - buf_info->page_offset);
403 			sg_elem->len = cpu_to_le16(frag_len);
404 			remain_len -= frag_len;
405 			buf_info++;
406 			nfrags++;
407 		}
408 
409 		/* clear end sg element as a sentinel */
410 		if (j < q->max_sg_elems) {
411 			sg_elem = &sg_desc->elems[j];
412 			memset(sg_elem, 0, sizeof(*sg_elem));
413 		}
414 
415 		desc->opcode = (nfrags > 1) ? IONIC_RXQ_DESC_OPCODE_SG :
416 					      IONIC_RXQ_DESC_OPCODE_SIMPLE;
417 		desc_info->nbufs = nfrags;
418 
419 		ionic_rxq_post(q, false, ionic_rx_clean, NULL);
420 	}
421 
422 	ionic_dbell_ring(q->lif->kern_dbpage, q->hw_type,
423 			 q->dbval | q->head_idx);
424 }
425 
426 void ionic_rx_empty(struct ionic_queue *q)
427 {
428 	struct ionic_desc_info *desc_info;
429 	struct ionic_buf_info *buf_info;
430 	unsigned int i, j;
431 
432 	for (i = 0; i < q->num_descs; i++) {
433 		desc_info = &q->info[i];
434 		for (j = 0; j < IONIC_RX_MAX_SG_ELEMS + 1; j++) {
435 			buf_info = &desc_info->bufs[j];
436 			if (buf_info->page)
437 				ionic_rx_page_free(q, buf_info);
438 		}
439 
440 		desc_info->nbufs = 0;
441 		desc_info->cb = NULL;
442 		desc_info->cb_arg = NULL;
443 	}
444 
445 	q->head_idx = 0;
446 	q->tail_idx = 0;
447 }
448 
449 static void ionic_dim_update(struct ionic_qcq *qcq)
450 {
451 	struct dim_sample dim_sample;
452 	struct ionic_lif *lif;
453 	unsigned int qi;
454 
455 	if (!qcq->intr.dim_coal_hw)
456 		return;
457 
458 	lif = qcq->q.lif;
459 	qi = qcq->cq.bound_q->index;
460 
461 	ionic_intr_coal_init(lif->ionic->idev.intr_ctrl,
462 			     lif->rxqcqs[qi]->intr.index,
463 			     qcq->intr.dim_coal_hw);
464 
465 	dim_update_sample(qcq->cq.bound_intr->rearm_count,
466 			  lif->txqstats[qi].pkts,
467 			  lif->txqstats[qi].bytes,
468 			  &dim_sample);
469 
470 	net_dim(&qcq->dim, dim_sample);
471 }
472 
473 int ionic_tx_napi(struct napi_struct *napi, int budget)
474 {
475 	struct ionic_qcq *qcq = napi_to_qcq(napi);
476 	struct ionic_cq *cq = napi_to_cq(napi);
477 	struct ionic_dev *idev;
478 	struct ionic_lif *lif;
479 	u32 work_done = 0;
480 	u32 flags = 0;
481 
482 	lif = cq->bound_q->lif;
483 	idev = &lif->ionic->idev;
484 
485 	work_done = ionic_cq_service(cq, budget,
486 				     ionic_tx_service, NULL, NULL);
487 
488 	if (work_done < budget && napi_complete_done(napi, work_done)) {
489 		ionic_dim_update(qcq);
490 		flags |= IONIC_INTR_CRED_UNMASK;
491 		cq->bound_intr->rearm_count++;
492 	}
493 
494 	if (work_done || flags) {
495 		flags |= IONIC_INTR_CRED_RESET_COALESCE;
496 		ionic_intr_credits(idev->intr_ctrl,
497 				   cq->bound_intr->index,
498 				   work_done, flags);
499 	}
500 
501 	DEBUG_STATS_NAPI_POLL(qcq, work_done);
502 
503 	return work_done;
504 }
505 
506 int ionic_rx_napi(struct napi_struct *napi, int budget)
507 {
508 	struct ionic_qcq *qcq = napi_to_qcq(napi);
509 	struct ionic_cq *cq = napi_to_cq(napi);
510 	struct ionic_dev *idev;
511 	struct ionic_lif *lif;
512 	u16 rx_fill_threshold;
513 	u32 work_done = 0;
514 	u32 flags = 0;
515 
516 	lif = cq->bound_q->lif;
517 	idev = &lif->ionic->idev;
518 
519 	work_done = ionic_cq_service(cq, budget,
520 				     ionic_rx_service, NULL, NULL);
521 
522 	rx_fill_threshold = min_t(u16, IONIC_RX_FILL_THRESHOLD,
523 				  cq->num_descs / IONIC_RX_FILL_DIV);
524 	if (work_done && ionic_q_space_avail(cq->bound_q) >= rx_fill_threshold)
525 		ionic_rx_fill(cq->bound_q);
526 
527 	if (work_done < budget && napi_complete_done(napi, work_done)) {
528 		ionic_dim_update(qcq);
529 		flags |= IONIC_INTR_CRED_UNMASK;
530 		cq->bound_intr->rearm_count++;
531 	}
532 
533 	if (work_done || flags) {
534 		flags |= IONIC_INTR_CRED_RESET_COALESCE;
535 		ionic_intr_credits(idev->intr_ctrl,
536 				   cq->bound_intr->index,
537 				   work_done, flags);
538 	}
539 
540 	DEBUG_STATS_NAPI_POLL(qcq, work_done);
541 
542 	return work_done;
543 }
544 
545 int ionic_txrx_napi(struct napi_struct *napi, int budget)
546 {
547 	struct ionic_qcq *qcq = napi_to_qcq(napi);
548 	struct ionic_cq *rxcq = napi_to_cq(napi);
549 	unsigned int qi = rxcq->bound_q->index;
550 	struct ionic_dev *idev;
551 	struct ionic_lif *lif;
552 	struct ionic_cq *txcq;
553 	u16 rx_fill_threshold;
554 	u32 rx_work_done = 0;
555 	u32 tx_work_done = 0;
556 	u32 flags = 0;
557 
558 	lif = rxcq->bound_q->lif;
559 	idev = &lif->ionic->idev;
560 	txcq = &lif->txqcqs[qi]->cq;
561 
562 	tx_work_done = ionic_cq_service(txcq, IONIC_TX_BUDGET_DEFAULT,
563 					ionic_tx_service, NULL, NULL);
564 
565 	rx_work_done = ionic_cq_service(rxcq, budget,
566 					ionic_rx_service, NULL, NULL);
567 
568 	rx_fill_threshold = min_t(u16, IONIC_RX_FILL_THRESHOLD,
569 				  rxcq->num_descs / IONIC_RX_FILL_DIV);
570 	if (rx_work_done && ionic_q_space_avail(rxcq->bound_q) >= rx_fill_threshold)
571 		ionic_rx_fill(rxcq->bound_q);
572 
573 	if (rx_work_done < budget && napi_complete_done(napi, rx_work_done)) {
574 		ionic_dim_update(qcq);
575 		flags |= IONIC_INTR_CRED_UNMASK;
576 		rxcq->bound_intr->rearm_count++;
577 	}
578 
579 	if (rx_work_done || flags) {
580 		flags |= IONIC_INTR_CRED_RESET_COALESCE;
581 		ionic_intr_credits(idev->intr_ctrl, rxcq->bound_intr->index,
582 				   tx_work_done + rx_work_done, flags);
583 	}
584 
585 	DEBUG_STATS_NAPI_POLL(qcq, rx_work_done);
586 	DEBUG_STATS_NAPI_POLL(qcq, tx_work_done);
587 
588 	return rx_work_done;
589 }
590 
591 static dma_addr_t ionic_tx_map_single(struct ionic_queue *q,
592 				      void *data, size_t len)
593 {
594 	struct ionic_tx_stats *stats = q_to_tx_stats(q);
595 	struct device *dev = q->dev;
596 	dma_addr_t dma_addr;
597 
598 	dma_addr = dma_map_single(dev, data, len, DMA_TO_DEVICE);
599 	if (dma_mapping_error(dev, dma_addr)) {
600 		net_warn_ratelimited("%s: DMA single map failed on %s!\n",
601 				     q->lif->netdev->name, q->name);
602 		stats->dma_map_err++;
603 		return 0;
604 	}
605 	return dma_addr;
606 }
607 
608 static dma_addr_t ionic_tx_map_frag(struct ionic_queue *q,
609 				    const skb_frag_t *frag,
610 				    size_t offset, size_t len)
611 {
612 	struct ionic_tx_stats *stats = q_to_tx_stats(q);
613 	struct device *dev = q->dev;
614 	dma_addr_t dma_addr;
615 
616 	dma_addr = skb_frag_dma_map(dev, frag, offset, len, DMA_TO_DEVICE);
617 	if (dma_mapping_error(dev, dma_addr)) {
618 		net_warn_ratelimited("%s: DMA frag map failed on %s!\n",
619 				     q->lif->netdev->name, q->name);
620 		stats->dma_map_err++;
621 	}
622 	return dma_addr;
623 }
624 
625 static int ionic_tx_map_skb(struct ionic_queue *q, struct sk_buff *skb,
626 			    struct ionic_desc_info *desc_info)
627 {
628 	struct ionic_buf_info *buf_info = desc_info->bufs;
629 	struct ionic_tx_stats *stats = q_to_tx_stats(q);
630 	struct device *dev = q->dev;
631 	dma_addr_t dma_addr;
632 	unsigned int nfrags;
633 	skb_frag_t *frag;
634 	int frag_idx;
635 
636 	dma_addr = ionic_tx_map_single(q, skb->data, skb_headlen(skb));
637 	if (dma_mapping_error(dev, dma_addr)) {
638 		stats->dma_map_err++;
639 		return -EIO;
640 	}
641 	buf_info->dma_addr = dma_addr;
642 	buf_info->len = skb_headlen(skb);
643 	buf_info++;
644 
645 	frag = skb_shinfo(skb)->frags;
646 	nfrags = skb_shinfo(skb)->nr_frags;
647 	for (frag_idx = 0; frag_idx < nfrags; frag_idx++, frag++) {
648 		dma_addr = ionic_tx_map_frag(q, frag, 0, skb_frag_size(frag));
649 		if (dma_mapping_error(dev, dma_addr)) {
650 			stats->dma_map_err++;
651 			goto dma_fail;
652 		}
653 		buf_info->dma_addr = dma_addr;
654 		buf_info->len = skb_frag_size(frag);
655 		buf_info++;
656 	}
657 
658 	desc_info->nbufs = 1 + nfrags;
659 
660 	return 0;
661 
662 dma_fail:
663 	/* unwind the frag mappings and the head mapping */
664 	while (frag_idx > 0) {
665 		frag_idx--;
666 		buf_info--;
667 		dma_unmap_page(dev, buf_info->dma_addr,
668 			       buf_info->len, DMA_TO_DEVICE);
669 	}
670 	dma_unmap_single(dev, buf_info->dma_addr, buf_info->len, DMA_TO_DEVICE);
671 	return -EIO;
672 }
673 
674 static void ionic_tx_clean(struct ionic_queue *q,
675 			   struct ionic_desc_info *desc_info,
676 			   struct ionic_cq_info *cq_info,
677 			   void *cb_arg)
678 {
679 	struct ionic_buf_info *buf_info = desc_info->bufs;
680 	struct ionic_tx_stats *stats = q_to_tx_stats(q);
681 	struct ionic_qcq *qcq = q_to_qcq(q);
682 	struct sk_buff *skb = cb_arg;
683 	struct device *dev = q->dev;
684 	unsigned int i;
685 	u16 qi;
686 
687 	if (desc_info->nbufs) {
688 		dma_unmap_single(dev, (dma_addr_t)buf_info->dma_addr,
689 				 buf_info->len, DMA_TO_DEVICE);
690 		buf_info++;
691 		for (i = 1; i < desc_info->nbufs; i++, buf_info++)
692 			dma_unmap_page(dev, (dma_addr_t)buf_info->dma_addr,
693 				       buf_info->len, DMA_TO_DEVICE);
694 	}
695 
696 	if (!skb)
697 		return;
698 
699 	qi = skb_get_queue_mapping(skb);
700 
701 	if (unlikely(q->features & IONIC_TXQ_F_HWSTAMP)) {
702 		if (cq_info) {
703 			struct skb_shared_hwtstamps hwts = {};
704 			__le64 *cq_desc_hwstamp;
705 			u64 hwstamp;
706 
707 			cq_desc_hwstamp =
708 				cq_info->cq_desc +
709 				qcq->cq.desc_size -
710 				sizeof(struct ionic_txq_comp) -
711 				IONIC_HWSTAMP_CQ_NEGOFFSET;
712 
713 			hwstamp = le64_to_cpu(*cq_desc_hwstamp);
714 
715 			if (hwstamp != IONIC_HWSTAMP_INVALID) {
716 				hwts.hwtstamp = ionic_lif_phc_ktime(q->lif, hwstamp);
717 
718 				skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
719 				skb_tstamp_tx(skb, &hwts);
720 
721 				stats->hwstamp_valid++;
722 			} else {
723 				stats->hwstamp_invalid++;
724 			}
725 		}
726 
727 	} else if (unlikely(__netif_subqueue_stopped(q->lif->netdev, qi))) {
728 		netif_wake_subqueue(q->lif->netdev, qi);
729 		q->wake++;
730 	}
731 
732 	desc_info->bytes = skb->len;
733 	stats->clean++;
734 
735 	dev_consume_skb_any(skb);
736 }
737 
738 bool ionic_tx_service(struct ionic_cq *cq, struct ionic_cq_info *cq_info)
739 {
740 	struct ionic_queue *q = cq->bound_q;
741 	struct ionic_desc_info *desc_info;
742 	struct ionic_txq_comp *comp;
743 	int bytes = 0;
744 	int pkts = 0;
745 	u16 index;
746 
747 	comp = cq_info->cq_desc + cq->desc_size - sizeof(*comp);
748 
749 	if (!color_match(comp->color, cq->done_color))
750 		return false;
751 
752 	/* clean the related q entries, there could be
753 	 * several q entries completed for each cq completion
754 	 */
755 	do {
756 		desc_info = &q->info[q->tail_idx];
757 		desc_info->bytes = 0;
758 		index = q->tail_idx;
759 		q->tail_idx = (q->tail_idx + 1) & (q->num_descs - 1);
760 		ionic_tx_clean(q, desc_info, cq_info, desc_info->cb_arg);
761 		if (desc_info->cb_arg) {
762 			pkts++;
763 			bytes += desc_info->bytes;
764 		}
765 		desc_info->cb = NULL;
766 		desc_info->cb_arg = NULL;
767 	} while (index != le16_to_cpu(comp->comp_index));
768 
769 	if (pkts && bytes && !unlikely(q->features & IONIC_TXQ_F_HWSTAMP))
770 		netdev_tx_completed_queue(q_to_ndq(q), pkts, bytes);
771 
772 	return true;
773 }
774 
775 void ionic_tx_flush(struct ionic_cq *cq)
776 {
777 	struct ionic_dev *idev = &cq->lif->ionic->idev;
778 	u32 work_done;
779 
780 	work_done = ionic_cq_service(cq, cq->num_descs,
781 				     ionic_tx_service, NULL, NULL);
782 	if (work_done)
783 		ionic_intr_credits(idev->intr_ctrl, cq->bound_intr->index,
784 				   work_done, IONIC_INTR_CRED_RESET_COALESCE);
785 }
786 
787 void ionic_tx_empty(struct ionic_queue *q)
788 {
789 	struct ionic_desc_info *desc_info;
790 	int bytes = 0;
791 	int pkts = 0;
792 
793 	/* walk the not completed tx entries, if any */
794 	while (q->head_idx != q->tail_idx) {
795 		desc_info = &q->info[q->tail_idx];
796 		desc_info->bytes = 0;
797 		q->tail_idx = (q->tail_idx + 1) & (q->num_descs - 1);
798 		ionic_tx_clean(q, desc_info, NULL, desc_info->cb_arg);
799 		if (desc_info->cb_arg) {
800 			pkts++;
801 			bytes += desc_info->bytes;
802 		}
803 		desc_info->cb = NULL;
804 		desc_info->cb_arg = NULL;
805 	}
806 
807 	if (pkts && bytes && !unlikely(q->features & IONIC_TXQ_F_HWSTAMP))
808 		netdev_tx_completed_queue(q_to_ndq(q), pkts, bytes);
809 }
810 
811 static int ionic_tx_tcp_inner_pseudo_csum(struct sk_buff *skb)
812 {
813 	int err;
814 
815 	err = skb_cow_head(skb, 0);
816 	if (err)
817 		return err;
818 
819 	if (skb->protocol == cpu_to_be16(ETH_P_IP)) {
820 		inner_ip_hdr(skb)->check = 0;
821 		inner_tcp_hdr(skb)->check =
822 			~csum_tcpudp_magic(inner_ip_hdr(skb)->saddr,
823 					   inner_ip_hdr(skb)->daddr,
824 					   0, IPPROTO_TCP, 0);
825 	} else if (skb->protocol == cpu_to_be16(ETH_P_IPV6)) {
826 		inner_tcp_hdr(skb)->check =
827 			~csum_ipv6_magic(&inner_ipv6_hdr(skb)->saddr,
828 					 &inner_ipv6_hdr(skb)->daddr,
829 					 0, IPPROTO_TCP, 0);
830 	}
831 
832 	return 0;
833 }
834 
835 static int ionic_tx_tcp_pseudo_csum(struct sk_buff *skb)
836 {
837 	int err;
838 
839 	err = skb_cow_head(skb, 0);
840 	if (err)
841 		return err;
842 
843 	if (skb->protocol == cpu_to_be16(ETH_P_IP)) {
844 		ip_hdr(skb)->check = 0;
845 		tcp_hdr(skb)->check =
846 			~csum_tcpudp_magic(ip_hdr(skb)->saddr,
847 					   ip_hdr(skb)->daddr,
848 					   0, IPPROTO_TCP, 0);
849 	} else if (skb->protocol == cpu_to_be16(ETH_P_IPV6)) {
850 		tcp_v6_gso_csum_prep(skb);
851 	}
852 
853 	return 0;
854 }
855 
856 static void ionic_tx_tso_post(struct ionic_queue *q, struct ionic_txq_desc *desc,
857 			      struct sk_buff *skb,
858 			      dma_addr_t addr, u8 nsge, u16 len,
859 			      unsigned int hdrlen, unsigned int mss,
860 			      bool outer_csum,
861 			      u16 vlan_tci, bool has_vlan,
862 			      bool start, bool done)
863 {
864 	u8 flags = 0;
865 	u64 cmd;
866 
867 	flags |= has_vlan ? IONIC_TXQ_DESC_FLAG_VLAN : 0;
868 	flags |= outer_csum ? IONIC_TXQ_DESC_FLAG_ENCAP : 0;
869 	flags |= start ? IONIC_TXQ_DESC_FLAG_TSO_SOT : 0;
870 	flags |= done ? IONIC_TXQ_DESC_FLAG_TSO_EOT : 0;
871 
872 	cmd = encode_txq_desc_cmd(IONIC_TXQ_DESC_OPCODE_TSO, flags, nsge, addr);
873 	desc->cmd = cpu_to_le64(cmd);
874 	desc->len = cpu_to_le16(len);
875 	desc->vlan_tci = cpu_to_le16(vlan_tci);
876 	desc->hdr_len = cpu_to_le16(hdrlen);
877 	desc->mss = cpu_to_le16(mss);
878 
879 	if (start) {
880 		skb_tx_timestamp(skb);
881 		if (!unlikely(q->features & IONIC_TXQ_F_HWSTAMP))
882 			netdev_tx_sent_queue(q_to_ndq(q), skb->len);
883 		ionic_txq_post(q, false, ionic_tx_clean, skb);
884 	} else {
885 		ionic_txq_post(q, done, NULL, NULL);
886 	}
887 }
888 
889 static int ionic_tx_tso(struct ionic_queue *q, struct sk_buff *skb)
890 {
891 	struct ionic_tx_stats *stats = q_to_tx_stats(q);
892 	struct ionic_desc_info *desc_info;
893 	struct ionic_buf_info *buf_info;
894 	struct ionic_txq_sg_elem *elem;
895 	struct ionic_txq_desc *desc;
896 	unsigned int chunk_len;
897 	unsigned int frag_rem;
898 	unsigned int tso_rem;
899 	unsigned int seg_rem;
900 	dma_addr_t desc_addr;
901 	dma_addr_t frag_addr;
902 	unsigned int hdrlen;
903 	unsigned int len;
904 	unsigned int mss;
905 	bool start, done;
906 	bool outer_csum;
907 	bool has_vlan;
908 	u16 desc_len;
909 	u8 desc_nsge;
910 	u16 vlan_tci;
911 	bool encap;
912 	int err;
913 
914 	desc_info = &q->info[q->head_idx];
915 	buf_info = desc_info->bufs;
916 
917 	if (unlikely(ionic_tx_map_skb(q, skb, desc_info)))
918 		return -EIO;
919 
920 	len = skb->len;
921 	mss = skb_shinfo(skb)->gso_size;
922 	outer_csum = (skb_shinfo(skb)->gso_type & SKB_GSO_GRE_CSUM) ||
923 		     (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_TUNNEL_CSUM);
924 	has_vlan = !!skb_vlan_tag_present(skb);
925 	vlan_tci = skb_vlan_tag_get(skb);
926 	encap = skb->encapsulation;
927 
928 	/* Preload inner-most TCP csum field with IP pseudo hdr
929 	 * calculated with IP length set to zero.  HW will later
930 	 * add in length to each TCP segment resulting from the TSO.
931 	 */
932 
933 	if (encap)
934 		err = ionic_tx_tcp_inner_pseudo_csum(skb);
935 	else
936 		err = ionic_tx_tcp_pseudo_csum(skb);
937 	if (err)
938 		return err;
939 
940 	if (encap)
941 		hdrlen = skb_inner_transport_header(skb) - skb->data +
942 			 inner_tcp_hdrlen(skb);
943 	else
944 		hdrlen = skb_transport_offset(skb) + tcp_hdrlen(skb);
945 
946 	tso_rem = len;
947 	seg_rem = min(tso_rem, hdrlen + mss);
948 
949 	frag_addr = 0;
950 	frag_rem = 0;
951 
952 	start = true;
953 
954 	while (tso_rem > 0) {
955 		desc = NULL;
956 		elem = NULL;
957 		desc_addr = 0;
958 		desc_len = 0;
959 		desc_nsge = 0;
960 		/* use fragments until we have enough to post a single descriptor */
961 		while (seg_rem > 0) {
962 			/* if the fragment is exhausted then move to the next one */
963 			if (frag_rem == 0) {
964 				/* grab the next fragment */
965 				frag_addr = buf_info->dma_addr;
966 				frag_rem = buf_info->len;
967 				buf_info++;
968 			}
969 			chunk_len = min(frag_rem, seg_rem);
970 			if (!desc) {
971 				/* fill main descriptor */
972 				desc = desc_info->txq_desc;
973 				elem = desc_info->txq_sg_desc->elems;
974 				desc_addr = frag_addr;
975 				desc_len = chunk_len;
976 			} else {
977 				/* fill sg descriptor */
978 				elem->addr = cpu_to_le64(frag_addr);
979 				elem->len = cpu_to_le16(chunk_len);
980 				elem++;
981 				desc_nsge++;
982 			}
983 			frag_addr += chunk_len;
984 			frag_rem -= chunk_len;
985 			tso_rem -= chunk_len;
986 			seg_rem -= chunk_len;
987 		}
988 		seg_rem = min(tso_rem, mss);
989 		done = (tso_rem == 0);
990 		/* post descriptor */
991 		ionic_tx_tso_post(q, desc, skb,
992 				  desc_addr, desc_nsge, desc_len,
993 				  hdrlen, mss, outer_csum, vlan_tci, has_vlan,
994 				  start, done);
995 		start = false;
996 		/* Buffer information is stored with the first tso descriptor */
997 		desc_info = &q->info[q->head_idx];
998 		desc_info->nbufs = 0;
999 	}
1000 
1001 	stats->pkts += DIV_ROUND_UP(len - hdrlen, mss);
1002 	stats->bytes += len;
1003 	stats->tso++;
1004 	stats->tso_bytes = len;
1005 
1006 	return 0;
1007 }
1008 
1009 static int ionic_tx_calc_csum(struct ionic_queue *q, struct sk_buff *skb,
1010 			      struct ionic_desc_info *desc_info)
1011 {
1012 	struct ionic_txq_desc *desc = desc_info->txq_desc;
1013 	struct ionic_buf_info *buf_info = desc_info->bufs;
1014 	struct ionic_tx_stats *stats = q_to_tx_stats(q);
1015 	bool has_vlan;
1016 	u8 flags = 0;
1017 	bool encap;
1018 	u64 cmd;
1019 
1020 	has_vlan = !!skb_vlan_tag_present(skb);
1021 	encap = skb->encapsulation;
1022 
1023 	flags |= has_vlan ? IONIC_TXQ_DESC_FLAG_VLAN : 0;
1024 	flags |= encap ? IONIC_TXQ_DESC_FLAG_ENCAP : 0;
1025 
1026 	cmd = encode_txq_desc_cmd(IONIC_TXQ_DESC_OPCODE_CSUM_PARTIAL,
1027 				  flags, skb_shinfo(skb)->nr_frags,
1028 				  buf_info->dma_addr);
1029 	desc->cmd = cpu_to_le64(cmd);
1030 	desc->len = cpu_to_le16(buf_info->len);
1031 	if (has_vlan) {
1032 		desc->vlan_tci = cpu_to_le16(skb_vlan_tag_get(skb));
1033 		stats->vlan_inserted++;
1034 	} else {
1035 		desc->vlan_tci = 0;
1036 	}
1037 	desc->csum_start = cpu_to_le16(skb_checksum_start_offset(skb));
1038 	desc->csum_offset = cpu_to_le16(skb->csum_offset);
1039 
1040 	if (skb_csum_is_sctp(skb))
1041 		stats->crc32_csum++;
1042 	else
1043 		stats->csum++;
1044 
1045 	return 0;
1046 }
1047 
1048 static int ionic_tx_calc_no_csum(struct ionic_queue *q, struct sk_buff *skb,
1049 				 struct ionic_desc_info *desc_info)
1050 {
1051 	struct ionic_txq_desc *desc = desc_info->txq_desc;
1052 	struct ionic_buf_info *buf_info = desc_info->bufs;
1053 	struct ionic_tx_stats *stats = q_to_tx_stats(q);
1054 	bool has_vlan;
1055 	u8 flags = 0;
1056 	bool encap;
1057 	u64 cmd;
1058 
1059 	has_vlan = !!skb_vlan_tag_present(skb);
1060 	encap = skb->encapsulation;
1061 
1062 	flags |= has_vlan ? IONIC_TXQ_DESC_FLAG_VLAN : 0;
1063 	flags |= encap ? IONIC_TXQ_DESC_FLAG_ENCAP : 0;
1064 
1065 	cmd = encode_txq_desc_cmd(IONIC_TXQ_DESC_OPCODE_CSUM_NONE,
1066 				  flags, skb_shinfo(skb)->nr_frags,
1067 				  buf_info->dma_addr);
1068 	desc->cmd = cpu_to_le64(cmd);
1069 	desc->len = cpu_to_le16(buf_info->len);
1070 	if (has_vlan) {
1071 		desc->vlan_tci = cpu_to_le16(skb_vlan_tag_get(skb));
1072 		stats->vlan_inserted++;
1073 	} else {
1074 		desc->vlan_tci = 0;
1075 	}
1076 	desc->csum_start = 0;
1077 	desc->csum_offset = 0;
1078 
1079 	stats->csum_none++;
1080 
1081 	return 0;
1082 }
1083 
1084 static int ionic_tx_skb_frags(struct ionic_queue *q, struct sk_buff *skb,
1085 			      struct ionic_desc_info *desc_info)
1086 {
1087 	struct ionic_txq_sg_desc *sg_desc = desc_info->txq_sg_desc;
1088 	struct ionic_buf_info *buf_info = &desc_info->bufs[1];
1089 	struct ionic_txq_sg_elem *elem = sg_desc->elems;
1090 	struct ionic_tx_stats *stats = q_to_tx_stats(q);
1091 	unsigned int i;
1092 
1093 	for (i = 0; i < skb_shinfo(skb)->nr_frags; i++, buf_info++, elem++) {
1094 		elem->addr = cpu_to_le64(buf_info->dma_addr);
1095 		elem->len = cpu_to_le16(buf_info->len);
1096 	}
1097 
1098 	stats->frags += skb_shinfo(skb)->nr_frags;
1099 
1100 	return 0;
1101 }
1102 
1103 static int ionic_tx(struct ionic_queue *q, struct sk_buff *skb)
1104 {
1105 	struct ionic_desc_info *desc_info = &q->info[q->head_idx];
1106 	struct ionic_tx_stats *stats = q_to_tx_stats(q);
1107 	int err;
1108 
1109 	if (unlikely(ionic_tx_map_skb(q, skb, desc_info)))
1110 		return -EIO;
1111 
1112 	/* set up the initial descriptor */
1113 	if (skb->ip_summed == CHECKSUM_PARTIAL)
1114 		err = ionic_tx_calc_csum(q, skb, desc_info);
1115 	else
1116 		err = ionic_tx_calc_no_csum(q, skb, desc_info);
1117 	if (err)
1118 		return err;
1119 
1120 	/* add frags */
1121 	err = ionic_tx_skb_frags(q, skb, desc_info);
1122 	if (err)
1123 		return err;
1124 
1125 	skb_tx_timestamp(skb);
1126 	stats->pkts++;
1127 	stats->bytes += skb->len;
1128 
1129 	if (!unlikely(q->features & IONIC_TXQ_F_HWSTAMP))
1130 		netdev_tx_sent_queue(q_to_ndq(q), skb->len);
1131 	ionic_txq_post(q, !netdev_xmit_more(), ionic_tx_clean, skb);
1132 
1133 	return 0;
1134 }
1135 
1136 static int ionic_tx_descs_needed(struct ionic_queue *q, struct sk_buff *skb)
1137 {
1138 	struct ionic_tx_stats *stats = q_to_tx_stats(q);
1139 	int ndescs;
1140 	int err;
1141 
1142 	/* Each desc is mss long max, so a descriptor for each gso_seg */
1143 	if (skb_is_gso(skb))
1144 		ndescs = skb_shinfo(skb)->gso_segs;
1145 	else
1146 		ndescs = 1;
1147 
1148 	/* If non-TSO, just need 1 desc and nr_frags sg elems */
1149 	if (skb_shinfo(skb)->nr_frags <= q->max_sg_elems)
1150 		return ndescs;
1151 
1152 	/* Too many frags, so linearize */
1153 	err = skb_linearize(skb);
1154 	if (err)
1155 		return err;
1156 
1157 	stats->linearize++;
1158 
1159 	return ndescs;
1160 }
1161 
1162 static int ionic_maybe_stop_tx(struct ionic_queue *q, int ndescs)
1163 {
1164 	int stopped = 0;
1165 
1166 	if (unlikely(!ionic_q_has_space(q, ndescs))) {
1167 		netif_stop_subqueue(q->lif->netdev, q->index);
1168 		q->stop++;
1169 		stopped = 1;
1170 
1171 		/* Might race with ionic_tx_clean, check again */
1172 		smp_rmb();
1173 		if (ionic_q_has_space(q, ndescs)) {
1174 			netif_wake_subqueue(q->lif->netdev, q->index);
1175 			stopped = 0;
1176 		}
1177 	}
1178 
1179 	return stopped;
1180 }
1181 
1182 static netdev_tx_t ionic_start_hwstamp_xmit(struct sk_buff *skb,
1183 					    struct net_device *netdev)
1184 {
1185 	struct ionic_lif *lif = netdev_priv(netdev);
1186 	struct ionic_queue *q = &lif->hwstamp_txq->q;
1187 	int err, ndescs;
1188 
1189 	/* Does not stop/start txq, because we post to a separate tx queue
1190 	 * for timestamping, and if a packet can't be posted immediately to
1191 	 * the timestamping queue, it is dropped.
1192 	 */
1193 
1194 	ndescs = ionic_tx_descs_needed(q, skb);
1195 	if (unlikely(ndescs < 0))
1196 		goto err_out_drop;
1197 
1198 	if (unlikely(!ionic_q_has_space(q, ndescs)))
1199 		goto err_out_drop;
1200 
1201 	skb_shinfo(skb)->tx_flags |= SKBTX_HW_TSTAMP;
1202 	if (skb_is_gso(skb))
1203 		err = ionic_tx_tso(q, skb);
1204 	else
1205 		err = ionic_tx(q, skb);
1206 
1207 	if (err)
1208 		goto err_out_drop;
1209 
1210 	return NETDEV_TX_OK;
1211 
1212 err_out_drop:
1213 	q->drop++;
1214 	dev_kfree_skb(skb);
1215 	return NETDEV_TX_OK;
1216 }
1217 
1218 netdev_tx_t ionic_start_xmit(struct sk_buff *skb, struct net_device *netdev)
1219 {
1220 	u16 queue_index = skb_get_queue_mapping(skb);
1221 	struct ionic_lif *lif = netdev_priv(netdev);
1222 	struct ionic_queue *q;
1223 	int ndescs;
1224 	int err;
1225 
1226 	if (unlikely(!test_bit(IONIC_LIF_F_UP, lif->state))) {
1227 		dev_kfree_skb(skb);
1228 		return NETDEV_TX_OK;
1229 	}
1230 
1231 	if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP))
1232 		if (lif->hwstamp_txq && lif->phc->ts_config_tx_mode)
1233 			return ionic_start_hwstamp_xmit(skb, netdev);
1234 
1235 	if (unlikely(queue_index >= lif->nxqs))
1236 		queue_index = 0;
1237 	q = &lif->txqcqs[queue_index]->q;
1238 
1239 	ndescs = ionic_tx_descs_needed(q, skb);
1240 	if (ndescs < 0)
1241 		goto err_out_drop;
1242 
1243 	if (unlikely(ionic_maybe_stop_tx(q, ndescs)))
1244 		return NETDEV_TX_BUSY;
1245 
1246 	if (skb_is_gso(skb))
1247 		err = ionic_tx_tso(q, skb);
1248 	else
1249 		err = ionic_tx(q, skb);
1250 
1251 	if (err)
1252 		goto err_out_drop;
1253 
1254 	/* Stop the queue if there aren't descriptors for the next packet.
1255 	 * Since our SG lists per descriptor take care of most of the possible
1256 	 * fragmentation, we don't need to have many descriptors available.
1257 	 */
1258 	ionic_maybe_stop_tx(q, 4);
1259 
1260 	return NETDEV_TX_OK;
1261 
1262 err_out_drop:
1263 	q->stop++;
1264 	q->drop++;
1265 	dev_kfree_skb(skb);
1266 	return NETDEV_TX_OK;
1267 }
1268