xref: /linux/drivers/net/ethernet/pensando/ionic/ionic_txrx.c (revision 508ecc78b6c983a7921bee2f4bd22682f9f0396e)
1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright(c) 2017 - 2019 Pensando Systems, Inc */
3 
4 #include <linux/ip.h>
5 #include <linux/ipv6.h>
6 #include <linux/if_vlan.h>
7 #include <net/ip6_checksum.h>
8 
9 #include "ionic.h"
10 #include "ionic_lif.h"
11 #include "ionic_txrx.h"
12 
13 static inline void ionic_txq_post(struct ionic_queue *q, bool ring_dbell,
14 				  ionic_desc_cb cb_func, void *cb_arg)
15 {
16 	ionic_q_post(q, ring_dbell, cb_func, cb_arg);
17 }
18 
19 static inline void ionic_rxq_post(struct ionic_queue *q, bool ring_dbell,
20 				  ionic_desc_cb cb_func, void *cb_arg)
21 {
22 	ionic_q_post(q, ring_dbell, cb_func, cb_arg);
23 }
24 
25 bool ionic_txq_poke_doorbell(struct ionic_queue *q)
26 {
27 	unsigned long now, then, dif;
28 	struct netdev_queue *netdev_txq;
29 	struct net_device *netdev;
30 
31 	netdev = q->lif->netdev;
32 	netdev_txq = netdev_get_tx_queue(netdev, q->index);
33 
34 	HARD_TX_LOCK(netdev, netdev_txq, smp_processor_id());
35 
36 	if (q->tail_idx == q->head_idx) {
37 		HARD_TX_UNLOCK(netdev, netdev_txq);
38 		return false;
39 	}
40 
41 	now = READ_ONCE(jiffies);
42 	then = q->dbell_jiffies;
43 	dif = now - then;
44 
45 	if (dif > q->dbell_deadline) {
46 		ionic_dbell_ring(q->lif->kern_dbpage, q->hw_type,
47 				 q->dbval | q->head_idx);
48 
49 		q->dbell_jiffies = now;
50 	}
51 
52 	HARD_TX_UNLOCK(netdev, netdev_txq);
53 
54 	return true;
55 }
56 
57 bool ionic_rxq_poke_doorbell(struct ionic_queue *q)
58 {
59 	unsigned long now, then, dif;
60 
61 	/* no lock, called from rx napi or txrx napi, nothing else can fill */
62 
63 	if (q->tail_idx == q->head_idx)
64 		return false;
65 
66 	now = READ_ONCE(jiffies);
67 	then = q->dbell_jiffies;
68 	dif = now - then;
69 
70 	if (dif > q->dbell_deadline) {
71 		ionic_dbell_ring(q->lif->kern_dbpage, q->hw_type,
72 				 q->dbval | q->head_idx);
73 
74 		q->dbell_jiffies = now;
75 
76 		dif = 2 * q->dbell_deadline;
77 		if (dif > IONIC_RX_MAX_DOORBELL_DEADLINE)
78 			dif = IONIC_RX_MAX_DOORBELL_DEADLINE;
79 
80 		q->dbell_deadline = dif;
81 	}
82 
83 	return true;
84 }
85 
86 static inline struct netdev_queue *q_to_ndq(struct ionic_queue *q)
87 {
88 	return netdev_get_tx_queue(q->lif->netdev, q->index);
89 }
90 
91 static int ionic_rx_page_alloc(struct ionic_queue *q,
92 			       struct ionic_buf_info *buf_info)
93 {
94 	struct net_device *netdev = q->lif->netdev;
95 	struct ionic_rx_stats *stats;
96 	struct device *dev;
97 	struct page *page;
98 
99 	dev = q->dev;
100 	stats = q_to_rx_stats(q);
101 
102 	if (unlikely(!buf_info)) {
103 		net_err_ratelimited("%s: %s invalid buf_info in alloc\n",
104 				    netdev->name, q->name);
105 		return -EINVAL;
106 	}
107 
108 	page = alloc_pages(IONIC_PAGE_GFP_MASK, 0);
109 	if (unlikely(!page)) {
110 		net_err_ratelimited("%s: %s page alloc failed\n",
111 				    netdev->name, q->name);
112 		stats->alloc_err++;
113 		return -ENOMEM;
114 	}
115 
116 	buf_info->dma_addr = dma_map_page(dev, page, 0,
117 					  IONIC_PAGE_SIZE, DMA_FROM_DEVICE);
118 	if (unlikely(dma_mapping_error(dev, buf_info->dma_addr))) {
119 		__free_pages(page, 0);
120 		net_err_ratelimited("%s: %s dma map failed\n",
121 				    netdev->name, q->name);
122 		stats->dma_map_err++;
123 		return -EIO;
124 	}
125 
126 	buf_info->page = page;
127 	buf_info->page_offset = 0;
128 
129 	return 0;
130 }
131 
132 static void ionic_rx_page_free(struct ionic_queue *q,
133 			       struct ionic_buf_info *buf_info)
134 {
135 	struct net_device *netdev = q->lif->netdev;
136 	struct device *dev = q->dev;
137 
138 	if (unlikely(!buf_info)) {
139 		net_err_ratelimited("%s: %s invalid buf_info in free\n",
140 				    netdev->name, q->name);
141 		return;
142 	}
143 
144 	if (!buf_info->page)
145 		return;
146 
147 	dma_unmap_page(dev, buf_info->dma_addr, IONIC_PAGE_SIZE, DMA_FROM_DEVICE);
148 	__free_pages(buf_info->page, 0);
149 	buf_info->page = NULL;
150 }
151 
152 static bool ionic_rx_buf_recycle(struct ionic_queue *q,
153 				 struct ionic_buf_info *buf_info, u32 used)
154 {
155 	u32 size;
156 
157 	/* don't re-use pages allocated in low-mem condition */
158 	if (page_is_pfmemalloc(buf_info->page))
159 		return false;
160 
161 	/* don't re-use buffers from non-local numa nodes */
162 	if (page_to_nid(buf_info->page) != numa_mem_id())
163 		return false;
164 
165 	size = ALIGN(used, IONIC_PAGE_SPLIT_SZ);
166 	buf_info->page_offset += size;
167 	if (buf_info->page_offset >= IONIC_PAGE_SIZE)
168 		return false;
169 
170 	get_page(buf_info->page);
171 
172 	return true;
173 }
174 
175 static struct sk_buff *ionic_rx_frags(struct ionic_queue *q,
176 				      struct ionic_desc_info *desc_info,
177 				      struct ionic_rxq_comp *comp)
178 {
179 	struct net_device *netdev = q->lif->netdev;
180 	struct ionic_buf_info *buf_info;
181 	struct ionic_rx_stats *stats;
182 	struct device *dev = q->dev;
183 	struct sk_buff *skb;
184 	unsigned int i;
185 	u16 frag_len;
186 	u16 len;
187 
188 	stats = q_to_rx_stats(q);
189 
190 	buf_info = &desc_info->bufs[0];
191 	len = le16_to_cpu(comp->len);
192 
193 	prefetchw(buf_info->page);
194 
195 	skb = napi_get_frags(&q_to_qcq(q)->napi);
196 	if (unlikely(!skb)) {
197 		net_warn_ratelimited("%s: SKB alloc failed on %s!\n",
198 				     netdev->name, q->name);
199 		stats->alloc_err++;
200 		return NULL;
201 	}
202 
203 	i = comp->num_sg_elems + 1;
204 	do {
205 		if (unlikely(!buf_info->page)) {
206 			dev_kfree_skb(skb);
207 			return NULL;
208 		}
209 
210 		frag_len = min_t(u16, len, min_t(u32, IONIC_MAX_BUF_LEN,
211 						 IONIC_PAGE_SIZE - buf_info->page_offset));
212 		len -= frag_len;
213 
214 		dma_sync_single_for_cpu(dev,
215 					buf_info->dma_addr + buf_info->page_offset,
216 					frag_len, DMA_FROM_DEVICE);
217 
218 		skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
219 				buf_info->page, buf_info->page_offset, frag_len,
220 				IONIC_PAGE_SIZE);
221 
222 		if (!ionic_rx_buf_recycle(q, buf_info, frag_len)) {
223 			dma_unmap_page(dev, buf_info->dma_addr,
224 				       IONIC_PAGE_SIZE, DMA_FROM_DEVICE);
225 			buf_info->page = NULL;
226 		}
227 
228 		buf_info++;
229 
230 		i--;
231 	} while (i > 0);
232 
233 	return skb;
234 }
235 
236 static struct sk_buff *ionic_rx_copybreak(struct ionic_queue *q,
237 					  struct ionic_desc_info *desc_info,
238 					  struct ionic_rxq_comp *comp)
239 {
240 	struct net_device *netdev = q->lif->netdev;
241 	struct ionic_buf_info *buf_info;
242 	struct ionic_rx_stats *stats;
243 	struct device *dev = q->dev;
244 	struct sk_buff *skb;
245 	u16 len;
246 
247 	stats = q_to_rx_stats(q);
248 
249 	buf_info = &desc_info->bufs[0];
250 	len = le16_to_cpu(comp->len);
251 
252 	skb = napi_alloc_skb(&q_to_qcq(q)->napi, len);
253 	if (unlikely(!skb)) {
254 		net_warn_ratelimited("%s: SKB alloc failed on %s!\n",
255 				     netdev->name, q->name);
256 		stats->alloc_err++;
257 		return NULL;
258 	}
259 
260 	if (unlikely(!buf_info->page)) {
261 		dev_kfree_skb(skb);
262 		return NULL;
263 	}
264 
265 	dma_sync_single_for_cpu(dev, buf_info->dma_addr + buf_info->page_offset,
266 				len, DMA_FROM_DEVICE);
267 	skb_copy_to_linear_data(skb, page_address(buf_info->page) + buf_info->page_offset, len);
268 	dma_sync_single_for_device(dev, buf_info->dma_addr + buf_info->page_offset,
269 				   len, DMA_FROM_DEVICE);
270 
271 	skb_put(skb, len);
272 	skb->protocol = eth_type_trans(skb, q->lif->netdev);
273 
274 	return skb;
275 }
276 
277 static void ionic_rx_clean(struct ionic_queue *q,
278 			   struct ionic_desc_info *desc_info,
279 			   struct ionic_cq_info *cq_info,
280 			   void *cb_arg)
281 {
282 	struct net_device *netdev = q->lif->netdev;
283 	struct ionic_qcq *qcq = q_to_qcq(q);
284 	struct ionic_rx_stats *stats;
285 	struct ionic_rxq_comp *comp;
286 	struct sk_buff *skb;
287 
288 	comp = cq_info->cq_desc + qcq->cq.desc_size - sizeof(*comp);
289 
290 	stats = q_to_rx_stats(q);
291 
292 	if (comp->status) {
293 		stats->dropped++;
294 		return;
295 	}
296 
297 	stats->pkts++;
298 	stats->bytes += le16_to_cpu(comp->len);
299 
300 	if (le16_to_cpu(comp->len) <= q->lif->rx_copybreak)
301 		skb = ionic_rx_copybreak(q, desc_info, comp);
302 	else
303 		skb = ionic_rx_frags(q, desc_info, comp);
304 
305 	if (unlikely(!skb)) {
306 		stats->dropped++;
307 		return;
308 	}
309 
310 	skb_record_rx_queue(skb, q->index);
311 
312 	if (likely(netdev->features & NETIF_F_RXHASH)) {
313 		switch (comp->pkt_type_color & IONIC_RXQ_COMP_PKT_TYPE_MASK) {
314 		case IONIC_PKT_TYPE_IPV4:
315 		case IONIC_PKT_TYPE_IPV6:
316 			skb_set_hash(skb, le32_to_cpu(comp->rss_hash),
317 				     PKT_HASH_TYPE_L3);
318 			break;
319 		case IONIC_PKT_TYPE_IPV4_TCP:
320 		case IONIC_PKT_TYPE_IPV6_TCP:
321 		case IONIC_PKT_TYPE_IPV4_UDP:
322 		case IONIC_PKT_TYPE_IPV6_UDP:
323 			skb_set_hash(skb, le32_to_cpu(comp->rss_hash),
324 				     PKT_HASH_TYPE_L4);
325 			break;
326 		}
327 	}
328 
329 	if (likely(netdev->features & NETIF_F_RXCSUM) &&
330 	    (comp->csum_flags & IONIC_RXQ_COMP_CSUM_F_CALC)) {
331 		skb->ip_summed = CHECKSUM_COMPLETE;
332 		skb->csum = (__force __wsum)le16_to_cpu(comp->csum);
333 		stats->csum_complete++;
334 	} else {
335 		stats->csum_none++;
336 	}
337 
338 	if (unlikely((comp->csum_flags & IONIC_RXQ_COMP_CSUM_F_TCP_BAD) ||
339 		     (comp->csum_flags & IONIC_RXQ_COMP_CSUM_F_UDP_BAD) ||
340 		     (comp->csum_flags & IONIC_RXQ_COMP_CSUM_F_IP_BAD)))
341 		stats->csum_error++;
342 
343 	if (likely(netdev->features & NETIF_F_HW_VLAN_CTAG_RX) &&
344 	    (comp->csum_flags & IONIC_RXQ_COMP_CSUM_F_VLAN)) {
345 		__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
346 				       le16_to_cpu(comp->vlan_tci));
347 		stats->vlan_stripped++;
348 	}
349 
350 	if (unlikely(q->features & IONIC_RXQ_F_HWSTAMP)) {
351 		__le64 *cq_desc_hwstamp;
352 		u64 hwstamp;
353 
354 		cq_desc_hwstamp =
355 			cq_info->cq_desc +
356 			qcq->cq.desc_size -
357 			sizeof(struct ionic_rxq_comp) -
358 			IONIC_HWSTAMP_CQ_NEGOFFSET;
359 
360 		hwstamp = le64_to_cpu(*cq_desc_hwstamp);
361 
362 		if (hwstamp != IONIC_HWSTAMP_INVALID) {
363 			skb_hwtstamps(skb)->hwtstamp = ionic_lif_phc_ktime(q->lif, hwstamp);
364 			stats->hwstamp_valid++;
365 		} else {
366 			stats->hwstamp_invalid++;
367 		}
368 	}
369 
370 	if (le16_to_cpu(comp->len) <= q->lif->rx_copybreak)
371 		napi_gro_receive(&qcq->napi, skb);
372 	else
373 		napi_gro_frags(&qcq->napi);
374 }
375 
376 bool ionic_rx_service(struct ionic_cq *cq, struct ionic_cq_info *cq_info)
377 {
378 	struct ionic_queue *q = cq->bound_q;
379 	struct ionic_desc_info *desc_info;
380 	struct ionic_rxq_comp *comp;
381 
382 	comp = cq_info->cq_desc + cq->desc_size - sizeof(*comp);
383 
384 	if (!color_match(comp->pkt_type_color, cq->done_color))
385 		return false;
386 
387 	/* check for empty queue */
388 	if (q->tail_idx == q->head_idx)
389 		return false;
390 
391 	if (q->tail_idx != le16_to_cpu(comp->comp_index))
392 		return false;
393 
394 	desc_info = &q->info[q->tail_idx];
395 	q->tail_idx = (q->tail_idx + 1) & (q->num_descs - 1);
396 
397 	/* clean the related q entry, only one per qc completion */
398 	ionic_rx_clean(q, desc_info, cq_info, desc_info->cb_arg);
399 
400 	desc_info->cb = NULL;
401 	desc_info->cb_arg = NULL;
402 
403 	return true;
404 }
405 
406 static inline void ionic_write_cmb_desc(struct ionic_queue *q,
407 					void __iomem *cmb_desc,
408 					void *desc)
409 {
410 	if (q_to_qcq(q)->flags & IONIC_QCQ_F_CMB_RINGS)
411 		memcpy_toio(cmb_desc, desc, q->desc_size);
412 }
413 
414 void ionic_rx_fill(struct ionic_queue *q)
415 {
416 	struct net_device *netdev = q->lif->netdev;
417 	struct ionic_desc_info *desc_info;
418 	struct ionic_rxq_sg_desc *sg_desc;
419 	struct ionic_rxq_sg_elem *sg_elem;
420 	struct ionic_buf_info *buf_info;
421 	unsigned int fill_threshold;
422 	struct ionic_rxq_desc *desc;
423 	unsigned int remain_len;
424 	unsigned int frag_len;
425 	unsigned int nfrags;
426 	unsigned int n_fill;
427 	unsigned int i, j;
428 	unsigned int len;
429 
430 	n_fill = ionic_q_space_avail(q);
431 
432 	fill_threshold = min_t(unsigned int, IONIC_RX_FILL_THRESHOLD,
433 			       q->num_descs / IONIC_RX_FILL_DIV);
434 	if (n_fill < fill_threshold)
435 		return;
436 
437 	len = netdev->mtu + ETH_HLEN + VLAN_HLEN;
438 
439 	for (i = n_fill; i; i--) {
440 		nfrags = 0;
441 		remain_len = len;
442 		desc_info = &q->info[q->head_idx];
443 		desc = desc_info->desc;
444 		buf_info = &desc_info->bufs[0];
445 
446 		if (!buf_info->page) { /* alloc a new buffer? */
447 			if (unlikely(ionic_rx_page_alloc(q, buf_info))) {
448 				desc->addr = 0;
449 				desc->len = 0;
450 				return;
451 			}
452 		}
453 
454 		/* fill main descriptor - buf[0] */
455 		desc->addr = cpu_to_le64(buf_info->dma_addr + buf_info->page_offset);
456 		frag_len = min_t(u16, len, min_t(u32, IONIC_MAX_BUF_LEN,
457 						 IONIC_PAGE_SIZE - buf_info->page_offset));
458 		desc->len = cpu_to_le16(frag_len);
459 		remain_len -= frag_len;
460 		buf_info++;
461 		nfrags++;
462 
463 		/* fill sg descriptors - buf[1..n] */
464 		sg_desc = desc_info->sg_desc;
465 		for (j = 0; remain_len > 0 && j < q->max_sg_elems; j++) {
466 			sg_elem = &sg_desc->elems[j];
467 			if (!buf_info->page) { /* alloc a new sg buffer? */
468 				if (unlikely(ionic_rx_page_alloc(q, buf_info))) {
469 					sg_elem->addr = 0;
470 					sg_elem->len = 0;
471 					return;
472 				}
473 			}
474 
475 			sg_elem->addr = cpu_to_le64(buf_info->dma_addr + buf_info->page_offset);
476 			frag_len = min_t(u16, remain_len, min_t(u32, IONIC_MAX_BUF_LEN,
477 								IONIC_PAGE_SIZE -
478 								buf_info->page_offset));
479 			sg_elem->len = cpu_to_le16(frag_len);
480 			remain_len -= frag_len;
481 			buf_info++;
482 			nfrags++;
483 		}
484 
485 		/* clear end sg element as a sentinel */
486 		if (j < q->max_sg_elems) {
487 			sg_elem = &sg_desc->elems[j];
488 			memset(sg_elem, 0, sizeof(*sg_elem));
489 		}
490 
491 		desc->opcode = (nfrags > 1) ? IONIC_RXQ_DESC_OPCODE_SG :
492 					      IONIC_RXQ_DESC_OPCODE_SIMPLE;
493 		desc_info->nbufs = nfrags;
494 
495 		ionic_write_cmb_desc(q, desc_info->cmb_desc, desc);
496 
497 		ionic_rxq_post(q, false, ionic_rx_clean, NULL);
498 	}
499 
500 	ionic_dbell_ring(q->lif->kern_dbpage, q->hw_type,
501 			 q->dbval | q->head_idx);
502 
503 	q->dbell_deadline = IONIC_RX_MIN_DOORBELL_DEADLINE;
504 	q->dbell_jiffies = jiffies;
505 
506 	mod_timer(&q_to_qcq(q)->napi_qcq->napi_deadline,
507 		  jiffies + IONIC_NAPI_DEADLINE);
508 }
509 
510 void ionic_rx_empty(struct ionic_queue *q)
511 {
512 	struct ionic_desc_info *desc_info;
513 	struct ionic_buf_info *buf_info;
514 	unsigned int i, j;
515 
516 	for (i = 0; i < q->num_descs; i++) {
517 		desc_info = &q->info[i];
518 		for (j = 0; j < IONIC_RX_MAX_SG_ELEMS + 1; j++) {
519 			buf_info = &desc_info->bufs[j];
520 			if (buf_info->page)
521 				ionic_rx_page_free(q, buf_info);
522 		}
523 
524 		desc_info->nbufs = 0;
525 		desc_info->cb = NULL;
526 		desc_info->cb_arg = NULL;
527 	}
528 
529 	q->head_idx = 0;
530 	q->tail_idx = 0;
531 }
532 
533 static void ionic_dim_update(struct ionic_qcq *qcq, int napi_mode)
534 {
535 	struct dim_sample dim_sample;
536 	struct ionic_lif *lif;
537 	unsigned int qi;
538 	u64 pkts, bytes;
539 
540 	if (!qcq->intr.dim_coal_hw)
541 		return;
542 
543 	lif = qcq->q.lif;
544 	qi = qcq->cq.bound_q->index;
545 
546 	switch (napi_mode) {
547 	case IONIC_LIF_F_TX_DIM_INTR:
548 		pkts = lif->txqstats[qi].pkts;
549 		bytes = lif->txqstats[qi].bytes;
550 		break;
551 	case IONIC_LIF_F_RX_DIM_INTR:
552 		pkts = lif->rxqstats[qi].pkts;
553 		bytes = lif->rxqstats[qi].bytes;
554 		break;
555 	default:
556 		pkts = lif->txqstats[qi].pkts + lif->rxqstats[qi].pkts;
557 		bytes = lif->txqstats[qi].bytes + lif->rxqstats[qi].bytes;
558 		break;
559 	}
560 
561 	dim_update_sample(qcq->cq.bound_intr->rearm_count,
562 			  pkts, bytes, &dim_sample);
563 
564 	net_dim(&qcq->dim, dim_sample);
565 }
566 
567 int ionic_tx_napi(struct napi_struct *napi, int budget)
568 {
569 	struct ionic_qcq *qcq = napi_to_qcq(napi);
570 	struct ionic_cq *cq = napi_to_cq(napi);
571 	struct ionic_dev *idev;
572 	struct ionic_lif *lif;
573 	u32 work_done = 0;
574 	u32 flags = 0;
575 
576 	lif = cq->bound_q->lif;
577 	idev = &lif->ionic->idev;
578 
579 	work_done = ionic_cq_service(cq, budget,
580 				     ionic_tx_service, NULL, NULL);
581 
582 	if (unlikely(!budget))
583 		return budget;
584 
585 	if (work_done < budget && napi_complete_done(napi, work_done)) {
586 		ionic_dim_update(qcq, IONIC_LIF_F_TX_DIM_INTR);
587 		flags |= IONIC_INTR_CRED_UNMASK;
588 		cq->bound_intr->rearm_count++;
589 	}
590 
591 	if (work_done || flags) {
592 		flags |= IONIC_INTR_CRED_RESET_COALESCE;
593 		ionic_intr_credits(idev->intr_ctrl,
594 				   cq->bound_intr->index,
595 				   work_done, flags);
596 	}
597 
598 	if (!work_done && ionic_txq_poke_doorbell(&qcq->q))
599 		mod_timer(&qcq->napi_deadline, jiffies + IONIC_NAPI_DEADLINE);
600 
601 	return work_done;
602 }
603 
604 int ionic_rx_napi(struct napi_struct *napi, int budget)
605 {
606 	struct ionic_qcq *qcq = napi_to_qcq(napi);
607 	struct ionic_cq *cq = napi_to_cq(napi);
608 	struct ionic_dev *idev;
609 	struct ionic_lif *lif;
610 	u32 work_done = 0;
611 	u32 flags = 0;
612 
613 	if (unlikely(!budget))
614 		return budget;
615 
616 	lif = cq->bound_q->lif;
617 	idev = &lif->ionic->idev;
618 
619 	work_done = ionic_cq_service(cq, budget,
620 				     ionic_rx_service, NULL, NULL);
621 
622 	ionic_rx_fill(cq->bound_q);
623 
624 	if (work_done < budget && napi_complete_done(napi, work_done)) {
625 		ionic_dim_update(qcq, IONIC_LIF_F_RX_DIM_INTR);
626 		flags |= IONIC_INTR_CRED_UNMASK;
627 		cq->bound_intr->rearm_count++;
628 	}
629 
630 	if (work_done || flags) {
631 		flags |= IONIC_INTR_CRED_RESET_COALESCE;
632 		ionic_intr_credits(idev->intr_ctrl,
633 				   cq->bound_intr->index,
634 				   work_done, flags);
635 	}
636 
637 	if (!work_done && ionic_rxq_poke_doorbell(&qcq->q))
638 		mod_timer(&qcq->napi_deadline, jiffies + IONIC_NAPI_DEADLINE);
639 
640 	return work_done;
641 }
642 
643 int ionic_txrx_napi(struct napi_struct *napi, int budget)
644 {
645 	struct ionic_qcq *rxqcq = napi_to_qcq(napi);
646 	struct ionic_cq *rxcq = napi_to_cq(napi);
647 	unsigned int qi = rxcq->bound_q->index;
648 	struct ionic_qcq *txqcq;
649 	struct ionic_dev *idev;
650 	struct ionic_lif *lif;
651 	struct ionic_cq *txcq;
652 	bool resched = false;
653 	u32 rx_work_done = 0;
654 	u32 tx_work_done = 0;
655 	u32 flags = 0;
656 
657 	lif = rxcq->bound_q->lif;
658 	idev = &lif->ionic->idev;
659 	txqcq = lif->txqcqs[qi];
660 	txcq = &lif->txqcqs[qi]->cq;
661 
662 	tx_work_done = ionic_cq_service(txcq, IONIC_TX_BUDGET_DEFAULT,
663 					ionic_tx_service, NULL, NULL);
664 
665 	if (unlikely(!budget))
666 		return budget;
667 
668 	rx_work_done = ionic_cq_service(rxcq, budget,
669 					ionic_rx_service, NULL, NULL);
670 
671 	ionic_rx_fill(rxcq->bound_q);
672 
673 	if (rx_work_done < budget && napi_complete_done(napi, rx_work_done)) {
674 		ionic_dim_update(rxqcq, 0);
675 		flags |= IONIC_INTR_CRED_UNMASK;
676 		rxcq->bound_intr->rearm_count++;
677 	}
678 
679 	if (rx_work_done || flags) {
680 		flags |= IONIC_INTR_CRED_RESET_COALESCE;
681 		ionic_intr_credits(idev->intr_ctrl, rxcq->bound_intr->index,
682 				   tx_work_done + rx_work_done, flags);
683 	}
684 
685 	if (!rx_work_done && ionic_rxq_poke_doorbell(&rxqcq->q))
686 		resched = true;
687 	if (!tx_work_done && ionic_txq_poke_doorbell(&txqcq->q))
688 		resched = true;
689 	if (resched)
690 		mod_timer(&rxqcq->napi_deadline, jiffies + IONIC_NAPI_DEADLINE);
691 
692 	return rx_work_done;
693 }
694 
695 static dma_addr_t ionic_tx_map_single(struct ionic_queue *q,
696 				      void *data, size_t len)
697 {
698 	struct ionic_tx_stats *stats = q_to_tx_stats(q);
699 	struct device *dev = q->dev;
700 	dma_addr_t dma_addr;
701 
702 	dma_addr = dma_map_single(dev, data, len, DMA_TO_DEVICE);
703 	if (dma_mapping_error(dev, dma_addr)) {
704 		net_warn_ratelimited("%s: DMA single map failed on %s!\n",
705 				     q->lif->netdev->name, q->name);
706 		stats->dma_map_err++;
707 		return 0;
708 	}
709 	return dma_addr;
710 }
711 
712 static dma_addr_t ionic_tx_map_frag(struct ionic_queue *q,
713 				    const skb_frag_t *frag,
714 				    size_t offset, size_t len)
715 {
716 	struct ionic_tx_stats *stats = q_to_tx_stats(q);
717 	struct device *dev = q->dev;
718 	dma_addr_t dma_addr;
719 
720 	dma_addr = skb_frag_dma_map(dev, frag, offset, len, DMA_TO_DEVICE);
721 	if (dma_mapping_error(dev, dma_addr)) {
722 		net_warn_ratelimited("%s: DMA frag map failed on %s!\n",
723 				     q->lif->netdev->name, q->name);
724 		stats->dma_map_err++;
725 	}
726 	return dma_addr;
727 }
728 
729 static int ionic_tx_map_skb(struct ionic_queue *q, struct sk_buff *skb,
730 			    struct ionic_desc_info *desc_info)
731 {
732 	struct ionic_buf_info *buf_info = desc_info->bufs;
733 	struct ionic_tx_stats *stats = q_to_tx_stats(q);
734 	struct device *dev = q->dev;
735 	dma_addr_t dma_addr;
736 	unsigned int nfrags;
737 	skb_frag_t *frag;
738 	int frag_idx;
739 
740 	dma_addr = ionic_tx_map_single(q, skb->data, skb_headlen(skb));
741 	if (dma_mapping_error(dev, dma_addr)) {
742 		stats->dma_map_err++;
743 		return -EIO;
744 	}
745 	buf_info->dma_addr = dma_addr;
746 	buf_info->len = skb_headlen(skb);
747 	buf_info++;
748 
749 	frag = skb_shinfo(skb)->frags;
750 	nfrags = skb_shinfo(skb)->nr_frags;
751 	for (frag_idx = 0; frag_idx < nfrags; frag_idx++, frag++) {
752 		dma_addr = ionic_tx_map_frag(q, frag, 0, skb_frag_size(frag));
753 		if (dma_mapping_error(dev, dma_addr)) {
754 			stats->dma_map_err++;
755 			goto dma_fail;
756 		}
757 		buf_info->dma_addr = dma_addr;
758 		buf_info->len = skb_frag_size(frag);
759 		buf_info++;
760 	}
761 
762 	desc_info->nbufs = 1 + nfrags;
763 
764 	return 0;
765 
766 dma_fail:
767 	/* unwind the frag mappings and the head mapping */
768 	while (frag_idx > 0) {
769 		frag_idx--;
770 		buf_info--;
771 		dma_unmap_page(dev, buf_info->dma_addr,
772 			       buf_info->len, DMA_TO_DEVICE);
773 	}
774 	dma_unmap_single(dev, buf_info->dma_addr, buf_info->len, DMA_TO_DEVICE);
775 	return -EIO;
776 }
777 
778 static void ionic_tx_desc_unmap_bufs(struct ionic_queue *q,
779 				     struct ionic_desc_info *desc_info)
780 {
781 	struct ionic_buf_info *buf_info = desc_info->bufs;
782 	struct device *dev = q->dev;
783 	unsigned int i;
784 
785 	if (!desc_info->nbufs)
786 		return;
787 
788 	dma_unmap_single(dev, (dma_addr_t)buf_info->dma_addr,
789 			 buf_info->len, DMA_TO_DEVICE);
790 	buf_info++;
791 	for (i = 1; i < desc_info->nbufs; i++, buf_info++)
792 		dma_unmap_page(dev, (dma_addr_t)buf_info->dma_addr,
793 			       buf_info->len, DMA_TO_DEVICE);
794 
795 	desc_info->nbufs = 0;
796 }
797 
798 static void ionic_tx_clean(struct ionic_queue *q,
799 			   struct ionic_desc_info *desc_info,
800 			   struct ionic_cq_info *cq_info,
801 			   void *cb_arg)
802 {
803 	struct ionic_tx_stats *stats = q_to_tx_stats(q);
804 	struct ionic_qcq *qcq = q_to_qcq(q);
805 	struct sk_buff *skb = cb_arg;
806 	u16 qi;
807 
808 	ionic_tx_desc_unmap_bufs(q, desc_info);
809 
810 	if (!skb)
811 		return;
812 
813 	qi = skb_get_queue_mapping(skb);
814 
815 	if (ionic_txq_hwstamp_enabled(q)) {
816 		if (cq_info) {
817 			struct skb_shared_hwtstamps hwts = {};
818 			__le64 *cq_desc_hwstamp;
819 			u64 hwstamp;
820 
821 			cq_desc_hwstamp =
822 				cq_info->cq_desc +
823 				qcq->cq.desc_size -
824 				sizeof(struct ionic_txq_comp) -
825 				IONIC_HWSTAMP_CQ_NEGOFFSET;
826 
827 			hwstamp = le64_to_cpu(*cq_desc_hwstamp);
828 
829 			if (hwstamp != IONIC_HWSTAMP_INVALID) {
830 				hwts.hwtstamp = ionic_lif_phc_ktime(q->lif, hwstamp);
831 
832 				skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
833 				skb_tstamp_tx(skb, &hwts);
834 
835 				stats->hwstamp_valid++;
836 			} else {
837 				stats->hwstamp_invalid++;
838 			}
839 		}
840 
841 	} else if (unlikely(__netif_subqueue_stopped(q->lif->netdev, qi))) {
842 		netif_wake_subqueue(q->lif->netdev, qi);
843 	}
844 
845 	desc_info->bytes = skb->len;
846 	stats->clean++;
847 
848 	dev_consume_skb_any(skb);
849 }
850 
851 bool ionic_tx_service(struct ionic_cq *cq, struct ionic_cq_info *cq_info)
852 {
853 	struct ionic_queue *q = cq->bound_q;
854 	struct ionic_desc_info *desc_info;
855 	struct ionic_txq_comp *comp;
856 	int bytes = 0;
857 	int pkts = 0;
858 	u16 index;
859 
860 	comp = cq_info->cq_desc + cq->desc_size - sizeof(*comp);
861 
862 	if (!color_match(comp->color, cq->done_color))
863 		return false;
864 
865 	/* clean the related q entries, there could be
866 	 * several q entries completed for each cq completion
867 	 */
868 	do {
869 		desc_info = &q->info[q->tail_idx];
870 		desc_info->bytes = 0;
871 		index = q->tail_idx;
872 		q->tail_idx = (q->tail_idx + 1) & (q->num_descs - 1);
873 		ionic_tx_clean(q, desc_info, cq_info, desc_info->cb_arg);
874 		if (desc_info->cb_arg) {
875 			pkts++;
876 			bytes += desc_info->bytes;
877 		}
878 		desc_info->cb = NULL;
879 		desc_info->cb_arg = NULL;
880 	} while (index != le16_to_cpu(comp->comp_index));
881 
882 	if (pkts && bytes && !ionic_txq_hwstamp_enabled(q))
883 		netdev_tx_completed_queue(q_to_ndq(q), pkts, bytes);
884 
885 	return true;
886 }
887 
888 void ionic_tx_flush(struct ionic_cq *cq)
889 {
890 	struct ionic_dev *idev = &cq->lif->ionic->idev;
891 	u32 work_done;
892 
893 	work_done = ionic_cq_service(cq, cq->num_descs,
894 				     ionic_tx_service, NULL, NULL);
895 	if (work_done)
896 		ionic_intr_credits(idev->intr_ctrl, cq->bound_intr->index,
897 				   work_done, IONIC_INTR_CRED_RESET_COALESCE);
898 }
899 
900 void ionic_tx_empty(struct ionic_queue *q)
901 {
902 	struct ionic_desc_info *desc_info;
903 	int bytes = 0;
904 	int pkts = 0;
905 
906 	/* walk the not completed tx entries, if any */
907 	while (q->head_idx != q->tail_idx) {
908 		desc_info = &q->info[q->tail_idx];
909 		desc_info->bytes = 0;
910 		q->tail_idx = (q->tail_idx + 1) & (q->num_descs - 1);
911 		ionic_tx_clean(q, desc_info, NULL, desc_info->cb_arg);
912 		if (desc_info->cb_arg) {
913 			pkts++;
914 			bytes += desc_info->bytes;
915 		}
916 		desc_info->cb = NULL;
917 		desc_info->cb_arg = NULL;
918 	}
919 
920 	if (pkts && bytes && !ionic_txq_hwstamp_enabled(q))
921 		netdev_tx_completed_queue(q_to_ndq(q), pkts, bytes);
922 }
923 
924 static int ionic_tx_tcp_inner_pseudo_csum(struct sk_buff *skb)
925 {
926 	int err;
927 
928 	err = skb_cow_head(skb, 0);
929 	if (err)
930 		return err;
931 
932 	if (skb->protocol == cpu_to_be16(ETH_P_IP)) {
933 		inner_ip_hdr(skb)->check = 0;
934 		inner_tcp_hdr(skb)->check =
935 			~csum_tcpudp_magic(inner_ip_hdr(skb)->saddr,
936 					   inner_ip_hdr(skb)->daddr,
937 					   0, IPPROTO_TCP, 0);
938 	} else if (skb->protocol == cpu_to_be16(ETH_P_IPV6)) {
939 		inner_tcp_hdr(skb)->check =
940 			~csum_ipv6_magic(&inner_ipv6_hdr(skb)->saddr,
941 					 &inner_ipv6_hdr(skb)->daddr,
942 					 0, IPPROTO_TCP, 0);
943 	}
944 
945 	return 0;
946 }
947 
948 static int ionic_tx_tcp_pseudo_csum(struct sk_buff *skb)
949 {
950 	int err;
951 
952 	err = skb_cow_head(skb, 0);
953 	if (err)
954 		return err;
955 
956 	if (skb->protocol == cpu_to_be16(ETH_P_IP)) {
957 		ip_hdr(skb)->check = 0;
958 		tcp_hdr(skb)->check =
959 			~csum_tcpudp_magic(ip_hdr(skb)->saddr,
960 					   ip_hdr(skb)->daddr,
961 					   0, IPPROTO_TCP, 0);
962 	} else if (skb->protocol == cpu_to_be16(ETH_P_IPV6)) {
963 		tcp_v6_gso_csum_prep(skb);
964 	}
965 
966 	return 0;
967 }
968 
969 static void ionic_tx_tso_post(struct ionic_queue *q,
970 			      struct ionic_desc_info *desc_info,
971 			      struct sk_buff *skb,
972 			      dma_addr_t addr, u8 nsge, u16 len,
973 			      unsigned int hdrlen, unsigned int mss,
974 			      bool outer_csum,
975 			      u16 vlan_tci, bool has_vlan,
976 			      bool start, bool done)
977 {
978 	struct ionic_txq_desc *desc = desc_info->desc;
979 	u8 flags = 0;
980 	u64 cmd;
981 
982 	flags |= has_vlan ? IONIC_TXQ_DESC_FLAG_VLAN : 0;
983 	flags |= outer_csum ? IONIC_TXQ_DESC_FLAG_ENCAP : 0;
984 	flags |= start ? IONIC_TXQ_DESC_FLAG_TSO_SOT : 0;
985 	flags |= done ? IONIC_TXQ_DESC_FLAG_TSO_EOT : 0;
986 
987 	cmd = encode_txq_desc_cmd(IONIC_TXQ_DESC_OPCODE_TSO, flags, nsge, addr);
988 	desc->cmd = cpu_to_le64(cmd);
989 	desc->len = cpu_to_le16(len);
990 	desc->vlan_tci = cpu_to_le16(vlan_tci);
991 	desc->hdr_len = cpu_to_le16(hdrlen);
992 	desc->mss = cpu_to_le16(mss);
993 
994 	ionic_write_cmb_desc(q, desc_info->cmb_desc, desc);
995 
996 	if (start) {
997 		skb_tx_timestamp(skb);
998 		if (!ionic_txq_hwstamp_enabled(q))
999 			netdev_tx_sent_queue(q_to_ndq(q), skb->len);
1000 		ionic_txq_post(q, false, ionic_tx_clean, skb);
1001 	} else {
1002 		ionic_txq_post(q, done, NULL, NULL);
1003 	}
1004 }
1005 
1006 static int ionic_tx_tso(struct ionic_queue *q, struct sk_buff *skb)
1007 {
1008 	struct ionic_tx_stats *stats = q_to_tx_stats(q);
1009 	struct ionic_desc_info *desc_info;
1010 	struct ionic_buf_info *buf_info;
1011 	struct ionic_txq_sg_elem *elem;
1012 	struct ionic_txq_desc *desc;
1013 	unsigned int chunk_len;
1014 	unsigned int frag_rem;
1015 	unsigned int tso_rem;
1016 	unsigned int seg_rem;
1017 	dma_addr_t desc_addr;
1018 	dma_addr_t frag_addr;
1019 	unsigned int hdrlen;
1020 	unsigned int len;
1021 	unsigned int mss;
1022 	bool start, done;
1023 	bool outer_csum;
1024 	bool has_vlan;
1025 	u16 desc_len;
1026 	u8 desc_nsge;
1027 	u16 vlan_tci;
1028 	bool encap;
1029 	int err;
1030 
1031 	desc_info = &q->info[q->head_idx];
1032 	buf_info = desc_info->bufs;
1033 
1034 	if (unlikely(ionic_tx_map_skb(q, skb, desc_info)))
1035 		return -EIO;
1036 
1037 	len = skb->len;
1038 	mss = skb_shinfo(skb)->gso_size;
1039 	outer_csum = (skb_shinfo(skb)->gso_type & (SKB_GSO_GRE |
1040 						   SKB_GSO_GRE_CSUM |
1041 						   SKB_GSO_IPXIP4 |
1042 						   SKB_GSO_IPXIP6 |
1043 						   SKB_GSO_UDP_TUNNEL |
1044 						   SKB_GSO_UDP_TUNNEL_CSUM));
1045 	has_vlan = !!skb_vlan_tag_present(skb);
1046 	vlan_tci = skb_vlan_tag_get(skb);
1047 	encap = skb->encapsulation;
1048 
1049 	/* Preload inner-most TCP csum field with IP pseudo hdr
1050 	 * calculated with IP length set to zero.  HW will later
1051 	 * add in length to each TCP segment resulting from the TSO.
1052 	 */
1053 
1054 	if (encap)
1055 		err = ionic_tx_tcp_inner_pseudo_csum(skb);
1056 	else
1057 		err = ionic_tx_tcp_pseudo_csum(skb);
1058 	if (err) {
1059 		/* clean up mapping from ionic_tx_map_skb */
1060 		ionic_tx_desc_unmap_bufs(q, desc_info);
1061 		return err;
1062 	}
1063 
1064 	if (encap)
1065 		hdrlen = skb_inner_tcp_all_headers(skb);
1066 	else
1067 		hdrlen = skb_tcp_all_headers(skb);
1068 
1069 	tso_rem = len;
1070 	seg_rem = min(tso_rem, hdrlen + mss);
1071 
1072 	frag_addr = 0;
1073 	frag_rem = 0;
1074 
1075 	start = true;
1076 
1077 	while (tso_rem > 0) {
1078 		desc = NULL;
1079 		elem = NULL;
1080 		desc_addr = 0;
1081 		desc_len = 0;
1082 		desc_nsge = 0;
1083 		/* use fragments until we have enough to post a single descriptor */
1084 		while (seg_rem > 0) {
1085 			/* if the fragment is exhausted then move to the next one */
1086 			if (frag_rem == 0) {
1087 				/* grab the next fragment */
1088 				frag_addr = buf_info->dma_addr;
1089 				frag_rem = buf_info->len;
1090 				buf_info++;
1091 			}
1092 			chunk_len = min(frag_rem, seg_rem);
1093 			if (!desc) {
1094 				/* fill main descriptor */
1095 				desc = desc_info->txq_desc;
1096 				elem = desc_info->txq_sg_desc->elems;
1097 				desc_addr = frag_addr;
1098 				desc_len = chunk_len;
1099 			} else {
1100 				/* fill sg descriptor */
1101 				elem->addr = cpu_to_le64(frag_addr);
1102 				elem->len = cpu_to_le16(chunk_len);
1103 				elem++;
1104 				desc_nsge++;
1105 			}
1106 			frag_addr += chunk_len;
1107 			frag_rem -= chunk_len;
1108 			tso_rem -= chunk_len;
1109 			seg_rem -= chunk_len;
1110 		}
1111 		seg_rem = min(tso_rem, mss);
1112 		done = (tso_rem == 0);
1113 		/* post descriptor */
1114 		ionic_tx_tso_post(q, desc_info, skb,
1115 				  desc_addr, desc_nsge, desc_len,
1116 				  hdrlen, mss, outer_csum, vlan_tci, has_vlan,
1117 				  start, done);
1118 		start = false;
1119 		/* Buffer information is stored with the first tso descriptor */
1120 		desc_info = &q->info[q->head_idx];
1121 		desc_info->nbufs = 0;
1122 	}
1123 
1124 	stats->pkts += DIV_ROUND_UP(len - hdrlen, mss);
1125 	stats->bytes += len;
1126 	stats->tso++;
1127 	stats->tso_bytes = len;
1128 
1129 	return 0;
1130 }
1131 
1132 static void ionic_tx_calc_csum(struct ionic_queue *q, struct sk_buff *skb,
1133 			       struct ionic_desc_info *desc_info)
1134 {
1135 	struct ionic_txq_desc *desc = desc_info->txq_desc;
1136 	struct ionic_buf_info *buf_info = desc_info->bufs;
1137 	struct ionic_tx_stats *stats = q_to_tx_stats(q);
1138 	bool has_vlan;
1139 	u8 flags = 0;
1140 	bool encap;
1141 	u64 cmd;
1142 
1143 	has_vlan = !!skb_vlan_tag_present(skb);
1144 	encap = skb->encapsulation;
1145 
1146 	flags |= has_vlan ? IONIC_TXQ_DESC_FLAG_VLAN : 0;
1147 	flags |= encap ? IONIC_TXQ_DESC_FLAG_ENCAP : 0;
1148 
1149 	cmd = encode_txq_desc_cmd(IONIC_TXQ_DESC_OPCODE_CSUM_PARTIAL,
1150 				  flags, skb_shinfo(skb)->nr_frags,
1151 				  buf_info->dma_addr);
1152 	desc->cmd = cpu_to_le64(cmd);
1153 	desc->len = cpu_to_le16(buf_info->len);
1154 	if (has_vlan) {
1155 		desc->vlan_tci = cpu_to_le16(skb_vlan_tag_get(skb));
1156 		stats->vlan_inserted++;
1157 	} else {
1158 		desc->vlan_tci = 0;
1159 	}
1160 	desc->csum_start = cpu_to_le16(skb_checksum_start_offset(skb));
1161 	desc->csum_offset = cpu_to_le16(skb->csum_offset);
1162 
1163 	ionic_write_cmb_desc(q, desc_info->cmb_desc, desc);
1164 
1165 	if (skb_csum_is_sctp(skb))
1166 		stats->crc32_csum++;
1167 	else
1168 		stats->csum++;
1169 }
1170 
1171 static void ionic_tx_calc_no_csum(struct ionic_queue *q, struct sk_buff *skb,
1172 				  struct ionic_desc_info *desc_info)
1173 {
1174 	struct ionic_txq_desc *desc = desc_info->txq_desc;
1175 	struct ionic_buf_info *buf_info = desc_info->bufs;
1176 	struct ionic_tx_stats *stats = q_to_tx_stats(q);
1177 	bool has_vlan;
1178 	u8 flags = 0;
1179 	bool encap;
1180 	u64 cmd;
1181 
1182 	has_vlan = !!skb_vlan_tag_present(skb);
1183 	encap = skb->encapsulation;
1184 
1185 	flags |= has_vlan ? IONIC_TXQ_DESC_FLAG_VLAN : 0;
1186 	flags |= encap ? IONIC_TXQ_DESC_FLAG_ENCAP : 0;
1187 
1188 	cmd = encode_txq_desc_cmd(IONIC_TXQ_DESC_OPCODE_CSUM_NONE,
1189 				  flags, skb_shinfo(skb)->nr_frags,
1190 				  buf_info->dma_addr);
1191 	desc->cmd = cpu_to_le64(cmd);
1192 	desc->len = cpu_to_le16(buf_info->len);
1193 	if (has_vlan) {
1194 		desc->vlan_tci = cpu_to_le16(skb_vlan_tag_get(skb));
1195 		stats->vlan_inserted++;
1196 	} else {
1197 		desc->vlan_tci = 0;
1198 	}
1199 	desc->csum_start = 0;
1200 	desc->csum_offset = 0;
1201 
1202 	ionic_write_cmb_desc(q, desc_info->cmb_desc, desc);
1203 
1204 	stats->csum_none++;
1205 }
1206 
1207 static void ionic_tx_skb_frags(struct ionic_queue *q, struct sk_buff *skb,
1208 			       struct ionic_desc_info *desc_info)
1209 {
1210 	struct ionic_txq_sg_desc *sg_desc = desc_info->txq_sg_desc;
1211 	struct ionic_buf_info *buf_info = &desc_info->bufs[1];
1212 	struct ionic_txq_sg_elem *elem = sg_desc->elems;
1213 	struct ionic_tx_stats *stats = q_to_tx_stats(q);
1214 	unsigned int i;
1215 
1216 	for (i = 0; i < skb_shinfo(skb)->nr_frags; i++, buf_info++, elem++) {
1217 		elem->addr = cpu_to_le64(buf_info->dma_addr);
1218 		elem->len = cpu_to_le16(buf_info->len);
1219 	}
1220 
1221 	stats->frags += skb_shinfo(skb)->nr_frags;
1222 }
1223 
1224 static int ionic_tx(struct ionic_queue *q, struct sk_buff *skb)
1225 {
1226 	struct ionic_desc_info *desc_info = &q->info[q->head_idx];
1227 	struct ionic_tx_stats *stats = q_to_tx_stats(q);
1228 
1229 	if (unlikely(ionic_tx_map_skb(q, skb, desc_info)))
1230 		return -EIO;
1231 
1232 	/* set up the initial descriptor */
1233 	if (skb->ip_summed == CHECKSUM_PARTIAL)
1234 		ionic_tx_calc_csum(q, skb, desc_info);
1235 	else
1236 		ionic_tx_calc_no_csum(q, skb, desc_info);
1237 
1238 	/* add frags */
1239 	ionic_tx_skb_frags(q, skb, desc_info);
1240 
1241 	skb_tx_timestamp(skb);
1242 	stats->pkts++;
1243 	stats->bytes += skb->len;
1244 
1245 	if (!ionic_txq_hwstamp_enabled(q))
1246 		netdev_tx_sent_queue(q_to_ndq(q), skb->len);
1247 	ionic_txq_post(q, !netdev_xmit_more(), ionic_tx_clean, skb);
1248 
1249 	return 0;
1250 }
1251 
1252 static int ionic_tx_descs_needed(struct ionic_queue *q, struct sk_buff *skb)
1253 {
1254 	struct ionic_tx_stats *stats = q_to_tx_stats(q);
1255 	bool too_many_frags = false;
1256 	skb_frag_t *frag;
1257 	int desc_bufs;
1258 	int chunk_len;
1259 	int frag_rem;
1260 	int tso_rem;
1261 	int seg_rem;
1262 	bool encap;
1263 	int hdrlen;
1264 	int ndescs;
1265 	int err;
1266 
1267 	/* Each desc is mss long max, so a descriptor for each gso_seg */
1268 	if (skb_is_gso(skb)) {
1269 		ndescs = skb_shinfo(skb)->gso_segs;
1270 	} else {
1271 		ndescs = 1;
1272 		if (skb_shinfo(skb)->nr_frags > q->max_sg_elems) {
1273 			too_many_frags = true;
1274 			goto linearize;
1275 		}
1276 	}
1277 
1278 	/* If non-TSO, or no frags to check, we're done */
1279 	if (!skb_is_gso(skb) || !skb_shinfo(skb)->nr_frags)
1280 		return ndescs;
1281 
1282 	/* We need to scan the skb to be sure that none of the MTU sized
1283 	 * packets in the TSO will require more sgs per descriptor than we
1284 	 * can support.  We loop through the frags, add up the lengths for
1285 	 * a packet, and count the number of sgs used per packet.
1286 	 */
1287 	tso_rem = skb->len;
1288 	frag = skb_shinfo(skb)->frags;
1289 	encap = skb->encapsulation;
1290 
1291 	/* start with just hdr in first part of first descriptor */
1292 	if (encap)
1293 		hdrlen = skb_inner_tcp_all_headers(skb);
1294 	else
1295 		hdrlen = skb_tcp_all_headers(skb);
1296 	seg_rem = min_t(int, tso_rem, hdrlen + skb_shinfo(skb)->gso_size);
1297 	frag_rem = hdrlen;
1298 
1299 	while (tso_rem > 0) {
1300 		desc_bufs = 0;
1301 		while (seg_rem > 0) {
1302 			desc_bufs++;
1303 
1304 			/* We add the +1 because we can take buffers for one
1305 			 * more than we have SGs: one for the initial desc data
1306 			 * in addition to the SG segments that might follow.
1307 			 */
1308 			if (desc_bufs > q->max_sg_elems + 1) {
1309 				too_many_frags = true;
1310 				goto linearize;
1311 			}
1312 
1313 			if (frag_rem == 0) {
1314 				frag_rem = skb_frag_size(frag);
1315 				frag++;
1316 			}
1317 			chunk_len = min(frag_rem, seg_rem);
1318 			frag_rem -= chunk_len;
1319 			tso_rem -= chunk_len;
1320 			seg_rem -= chunk_len;
1321 		}
1322 
1323 		seg_rem = min_t(int, tso_rem, skb_shinfo(skb)->gso_size);
1324 	}
1325 
1326 linearize:
1327 	if (too_many_frags) {
1328 		err = skb_linearize(skb);
1329 		if (err)
1330 			return err;
1331 		stats->linearize++;
1332 	}
1333 
1334 	return ndescs;
1335 }
1336 
1337 static int ionic_maybe_stop_tx(struct ionic_queue *q, int ndescs)
1338 {
1339 	int stopped = 0;
1340 
1341 	if (unlikely(!ionic_q_has_space(q, ndescs))) {
1342 		netif_stop_subqueue(q->lif->netdev, q->index);
1343 		stopped = 1;
1344 
1345 		/* Might race with ionic_tx_clean, check again */
1346 		smp_rmb();
1347 		if (ionic_q_has_space(q, ndescs)) {
1348 			netif_wake_subqueue(q->lif->netdev, q->index);
1349 			stopped = 0;
1350 		}
1351 	}
1352 
1353 	return stopped;
1354 }
1355 
1356 static netdev_tx_t ionic_start_hwstamp_xmit(struct sk_buff *skb,
1357 					    struct net_device *netdev)
1358 {
1359 	struct ionic_lif *lif = netdev_priv(netdev);
1360 	struct ionic_queue *q = &lif->hwstamp_txq->q;
1361 	int err, ndescs;
1362 
1363 	/* Does not stop/start txq, because we post to a separate tx queue
1364 	 * for timestamping, and if a packet can't be posted immediately to
1365 	 * the timestamping queue, it is dropped.
1366 	 */
1367 
1368 	ndescs = ionic_tx_descs_needed(q, skb);
1369 	if (unlikely(ndescs < 0))
1370 		goto err_out_drop;
1371 
1372 	if (unlikely(!ionic_q_has_space(q, ndescs)))
1373 		goto err_out_drop;
1374 
1375 	skb_shinfo(skb)->tx_flags |= SKBTX_HW_TSTAMP;
1376 	if (skb_is_gso(skb))
1377 		err = ionic_tx_tso(q, skb);
1378 	else
1379 		err = ionic_tx(q, skb);
1380 
1381 	if (err)
1382 		goto err_out_drop;
1383 
1384 	return NETDEV_TX_OK;
1385 
1386 err_out_drop:
1387 	q->drop++;
1388 	dev_kfree_skb(skb);
1389 	return NETDEV_TX_OK;
1390 }
1391 
1392 netdev_tx_t ionic_start_xmit(struct sk_buff *skb, struct net_device *netdev)
1393 {
1394 	u16 queue_index = skb_get_queue_mapping(skb);
1395 	struct ionic_lif *lif = netdev_priv(netdev);
1396 	struct ionic_queue *q;
1397 	int ndescs;
1398 	int err;
1399 
1400 	if (unlikely(!test_bit(IONIC_LIF_F_UP, lif->state))) {
1401 		dev_kfree_skb(skb);
1402 		return NETDEV_TX_OK;
1403 	}
1404 
1405 	if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP))
1406 		if (lif->hwstamp_txq && lif->phc->ts_config_tx_mode)
1407 			return ionic_start_hwstamp_xmit(skb, netdev);
1408 
1409 	if (unlikely(queue_index >= lif->nxqs))
1410 		queue_index = 0;
1411 	q = &lif->txqcqs[queue_index]->q;
1412 
1413 	ndescs = ionic_tx_descs_needed(q, skb);
1414 	if (ndescs < 0)
1415 		goto err_out_drop;
1416 
1417 	if (unlikely(ionic_maybe_stop_tx(q, ndescs)))
1418 		return NETDEV_TX_BUSY;
1419 
1420 	if (skb_is_gso(skb))
1421 		err = ionic_tx_tso(q, skb);
1422 	else
1423 		err = ionic_tx(q, skb);
1424 
1425 	if (err)
1426 		goto err_out_drop;
1427 
1428 	/* Stop the queue if there aren't descriptors for the next packet.
1429 	 * Since our SG lists per descriptor take care of most of the possible
1430 	 * fragmentation, we don't need to have many descriptors available.
1431 	 */
1432 	ionic_maybe_stop_tx(q, 4);
1433 
1434 	return NETDEV_TX_OK;
1435 
1436 err_out_drop:
1437 	q->drop++;
1438 	dev_kfree_skb(skb);
1439 	return NETDEV_TX_OK;
1440 }
1441