xref: /linux/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c (revision e3b9626f09d429788d929c9b9000a069fcfc056e)
1 // SPDX-License-Identifier: GPL-2.0
2 /* Marvell OcteonTx2 RVU Ethernet driver
3  *
4  * Copyright (C) 2020 Marvell International Ltd.
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 as
8  * published by the Free Software Foundation.
9  */
10 
11 #include <linux/etherdevice.h>
12 #include <net/ip.h>
13 #include <net/tso.h>
14 
15 #include "otx2_reg.h"
16 #include "otx2_common.h"
17 #include "otx2_struct.h"
18 #include "otx2_txrx.h"
19 #include "otx2_ptp.h"
20 
21 #define CQE_ADDR(CQ, idx) ((CQ)->cqe_base + ((CQ)->cqe_size * (idx)))
22 
23 static struct nix_cqe_hdr_s *otx2_get_next_cqe(struct otx2_cq_queue *cq)
24 {
25 	struct nix_cqe_hdr_s *cqe_hdr;
26 
27 	cqe_hdr = (struct nix_cqe_hdr_s *)CQE_ADDR(cq, cq->cq_head);
28 	if (cqe_hdr->cqe_type == NIX_XQE_TYPE_INVALID)
29 		return NULL;
30 
31 	cq->cq_head++;
32 	cq->cq_head &= (cq->cqe_cnt - 1);
33 
34 	return cqe_hdr;
35 }
36 
37 static unsigned int frag_num(unsigned int i)
38 {
39 #ifdef __BIG_ENDIAN
40 	return (i & ~3) + 3 - (i & 3);
41 #else
42 	return i;
43 #endif
44 }
45 
46 static dma_addr_t otx2_dma_map_skb_frag(struct otx2_nic *pfvf,
47 					struct sk_buff *skb, int seg, int *len)
48 {
49 	const skb_frag_t *frag;
50 	struct page *page;
51 	int offset;
52 
53 	/* First segment is always skb->data */
54 	if (!seg) {
55 		page = virt_to_page(skb->data);
56 		offset = offset_in_page(skb->data);
57 		*len = skb_headlen(skb);
58 	} else {
59 		frag = &skb_shinfo(skb)->frags[seg - 1];
60 		page = skb_frag_page(frag);
61 		offset = skb_frag_off(frag);
62 		*len = skb_frag_size(frag);
63 	}
64 	return otx2_dma_map_page(pfvf, page, offset, *len, DMA_TO_DEVICE);
65 }
66 
67 static void otx2_dma_unmap_skb_frags(struct otx2_nic *pfvf, struct sg_list *sg)
68 {
69 	int seg;
70 
71 	for (seg = 0; seg < sg->num_segs; seg++) {
72 		otx2_dma_unmap_page(pfvf, sg->dma_addr[seg],
73 				    sg->size[seg], DMA_TO_DEVICE);
74 	}
75 	sg->num_segs = 0;
76 }
77 
78 static void otx2_snd_pkt_handler(struct otx2_nic *pfvf,
79 				 struct otx2_cq_queue *cq,
80 				 struct otx2_snd_queue *sq,
81 				 struct nix_cqe_tx_s *cqe,
82 				 int budget, int *tx_pkts, int *tx_bytes)
83 {
84 	struct nix_send_comp_s *snd_comp = &cqe->comp;
85 	struct skb_shared_hwtstamps ts;
86 	struct sk_buff *skb = NULL;
87 	u64 timestamp, tsns;
88 	struct sg_list *sg;
89 	int err;
90 
91 	if (unlikely(snd_comp->status) && netif_msg_tx_err(pfvf))
92 		net_err_ratelimited("%s: TX%d: Error in send CQ status:%x\n",
93 				    pfvf->netdev->name, cq->cint_idx,
94 				    snd_comp->status);
95 
96 	sg = &sq->sg[snd_comp->sqe_id];
97 	skb = (struct sk_buff *)sg->skb;
98 	if (unlikely(!skb))
99 		return;
100 
101 	if (skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS) {
102 		timestamp = ((u64 *)sq->timestamps->base)[snd_comp->sqe_id];
103 		if (timestamp != 1) {
104 			err = otx2_ptp_tstamp2time(pfvf, timestamp, &tsns);
105 			if (!err) {
106 				memset(&ts, 0, sizeof(ts));
107 				ts.hwtstamp = ns_to_ktime(tsns);
108 				skb_tstamp_tx(skb, &ts);
109 			}
110 		}
111 	}
112 
113 	*tx_bytes += skb->len;
114 	(*tx_pkts)++;
115 	otx2_dma_unmap_skb_frags(pfvf, sg);
116 	napi_consume_skb(skb, budget);
117 	sg->skb = (u64)NULL;
118 }
119 
120 static void otx2_set_rxtstamp(struct otx2_nic *pfvf,
121 			      struct sk_buff *skb, void *data)
122 {
123 	u64 tsns;
124 	int err;
125 
126 	if (!(pfvf->flags & OTX2_FLAG_RX_TSTAMP_ENABLED))
127 		return;
128 
129 	/* The first 8 bytes is the timestamp */
130 	err = otx2_ptp_tstamp2time(pfvf, be64_to_cpu(*(__be64 *)data), &tsns);
131 	if (err)
132 		return;
133 
134 	skb_hwtstamps(skb)->hwtstamp = ns_to_ktime(tsns);
135 }
136 
137 static void otx2_skb_add_frag(struct otx2_nic *pfvf, struct sk_buff *skb,
138 			      u64 iova, int len, struct nix_rx_parse_s *parse)
139 {
140 	struct page *page;
141 	int off = 0;
142 	void *va;
143 
144 	va = phys_to_virt(otx2_iova_to_phys(pfvf->iommu_domain, iova));
145 
146 	if (likely(!skb_shinfo(skb)->nr_frags)) {
147 		/* Check if data starts at some nonzero offset
148 		 * from the start of the buffer.  For now the
149 		 * only possible offset is 8 bytes in the case
150 		 * where packet is prepended by a timestamp.
151 		 */
152 		if (parse->laptr) {
153 			otx2_set_rxtstamp(pfvf, skb, va);
154 			off = OTX2_HW_TIMESTAMP_LEN;
155 		}
156 	}
157 
158 	page = virt_to_page(va);
159 	skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page,
160 			va - page_address(page) + off, len - off, pfvf->rbsize);
161 
162 	otx2_dma_unmap_page(pfvf, iova - OTX2_HEAD_ROOM,
163 			    pfvf->rbsize, DMA_FROM_DEVICE);
164 }
165 
166 static void otx2_set_rxhash(struct otx2_nic *pfvf,
167 			    struct nix_cqe_rx_s *cqe, struct sk_buff *skb)
168 {
169 	enum pkt_hash_types hash_type = PKT_HASH_TYPE_NONE;
170 	struct otx2_rss_info *rss;
171 	u32 hash = 0;
172 
173 	if (!(pfvf->netdev->features & NETIF_F_RXHASH))
174 		return;
175 
176 	rss = &pfvf->hw.rss_info;
177 	if (rss->flowkey_cfg) {
178 		if (rss->flowkey_cfg &
179 		    ~(NIX_FLOW_KEY_TYPE_IPV4 | NIX_FLOW_KEY_TYPE_IPV6))
180 			hash_type = PKT_HASH_TYPE_L4;
181 		else
182 			hash_type = PKT_HASH_TYPE_L3;
183 		hash = cqe->hdr.flow_tag;
184 	}
185 	skb_set_hash(skb, hash, hash_type);
186 }
187 
188 static void otx2_free_rcv_seg(struct otx2_nic *pfvf, struct nix_cqe_rx_s *cqe,
189 			      int qidx)
190 {
191 	struct nix_rx_sg_s *sg = &cqe->sg;
192 	void *end, *start;
193 	u64 *seg_addr;
194 	int seg;
195 
196 	start = (void *)sg;
197 	end = start + ((cqe->parse.desc_sizem1 + 1) * 16);
198 	while (start < end) {
199 		sg = (struct nix_rx_sg_s *)start;
200 		seg_addr = &sg->seg_addr;
201 		for (seg = 0; seg < sg->segs; seg++, seg_addr++)
202 			otx2_aura_freeptr(pfvf, qidx, *seg_addr & ~0x07ULL);
203 		start += sizeof(*sg);
204 	}
205 }
206 
207 static bool otx2_check_rcv_errors(struct otx2_nic *pfvf,
208 				  struct nix_cqe_rx_s *cqe, int qidx)
209 {
210 	struct otx2_drv_stats *stats = &pfvf->hw.drv_stats;
211 	struct nix_rx_parse_s *parse = &cqe->parse;
212 
213 	if (netif_msg_rx_err(pfvf))
214 		netdev_err(pfvf->netdev,
215 			   "RQ%d: Error pkt with errlev:0x%x errcode:0x%x\n",
216 			   qidx, parse->errlev, parse->errcode);
217 
218 	if (parse->errlev == NPC_ERRLVL_RE) {
219 		switch (parse->errcode) {
220 		case ERRCODE_FCS:
221 		case ERRCODE_FCS_RCV:
222 			atomic_inc(&stats->rx_fcs_errs);
223 			break;
224 		case ERRCODE_UNDERSIZE:
225 			atomic_inc(&stats->rx_undersize_errs);
226 			break;
227 		case ERRCODE_OVERSIZE:
228 			atomic_inc(&stats->rx_oversize_errs);
229 			break;
230 		case ERRCODE_OL2_LEN_MISMATCH:
231 			atomic_inc(&stats->rx_len_errs);
232 			break;
233 		default:
234 			atomic_inc(&stats->rx_other_errs);
235 			break;
236 		}
237 	} else if (parse->errlev == NPC_ERRLVL_NIX) {
238 		switch (parse->errcode) {
239 		case ERRCODE_OL3_LEN:
240 		case ERRCODE_OL4_LEN:
241 		case ERRCODE_IL3_LEN:
242 		case ERRCODE_IL4_LEN:
243 			atomic_inc(&stats->rx_len_errs);
244 			break;
245 		case ERRCODE_OL4_CSUM:
246 		case ERRCODE_IL4_CSUM:
247 			atomic_inc(&stats->rx_csum_errs);
248 			break;
249 		default:
250 			atomic_inc(&stats->rx_other_errs);
251 			break;
252 		}
253 	} else {
254 		atomic_inc(&stats->rx_other_errs);
255 		/* For now ignore all the NPC parser errors and
256 		 * pass the packets to stack.
257 		 */
258 		if (cqe->sg.segs == 1)
259 			return false;
260 	}
261 
262 	/* If RXALL is enabled pass on packets to stack. */
263 	if (cqe->sg.segs == 1 && (pfvf->netdev->features & NETIF_F_RXALL))
264 		return false;
265 
266 	/* Free buffer back to pool */
267 	if (cqe->sg.segs)
268 		otx2_free_rcv_seg(pfvf, cqe, qidx);
269 	return true;
270 }
271 
272 static void otx2_rcv_pkt_handler(struct otx2_nic *pfvf,
273 				 struct napi_struct *napi,
274 				 struct otx2_cq_queue *cq,
275 				 struct nix_cqe_rx_s *cqe)
276 {
277 	struct nix_rx_parse_s *parse = &cqe->parse;
278 	struct sk_buff *skb = NULL;
279 
280 	if (unlikely(parse->errlev || parse->errcode || cqe->sg.segs > 1)) {
281 		if (otx2_check_rcv_errors(pfvf, cqe, cq->cq_idx))
282 			return;
283 	}
284 
285 	skb = napi_get_frags(napi);
286 	if (unlikely(!skb))
287 		return;
288 
289 	otx2_skb_add_frag(pfvf, skb, cqe->sg.seg_addr, cqe->sg.seg_size, parse);
290 	cq->pool_ptrs++;
291 
292 	otx2_set_rxhash(pfvf, cqe, skb);
293 
294 	skb_record_rx_queue(skb, cq->cq_idx);
295 	if (pfvf->netdev->features & NETIF_F_RXCSUM)
296 		skb->ip_summed = CHECKSUM_UNNECESSARY;
297 
298 	napi_gro_frags(napi);
299 }
300 
301 static int otx2_rx_napi_handler(struct otx2_nic *pfvf,
302 				struct napi_struct *napi,
303 				struct otx2_cq_queue *cq, int budget)
304 {
305 	struct nix_cqe_rx_s *cqe;
306 	int processed_cqe = 0;
307 	s64 bufptr;
308 
309 	while (likely(processed_cqe < budget)) {
310 		cqe = (struct nix_cqe_rx_s *)CQE_ADDR(cq, cq->cq_head);
311 		if (cqe->hdr.cqe_type == NIX_XQE_TYPE_INVALID ||
312 		    !cqe->sg.seg_addr) {
313 			if (!processed_cqe)
314 				return 0;
315 			break;
316 		}
317 		cq->cq_head++;
318 		cq->cq_head &= (cq->cqe_cnt - 1);
319 
320 		otx2_rcv_pkt_handler(pfvf, napi, cq, cqe);
321 
322 		cqe->hdr.cqe_type = NIX_XQE_TYPE_INVALID;
323 		cqe->sg.seg_addr = 0x00;
324 		processed_cqe++;
325 	}
326 
327 	/* Free CQEs to HW */
328 	otx2_write64(pfvf, NIX_LF_CQ_OP_DOOR,
329 		     ((u64)cq->cq_idx << 32) | processed_cqe);
330 
331 	if (unlikely(!cq->pool_ptrs))
332 		return 0;
333 
334 	/* Refill pool with new buffers */
335 	while (cq->pool_ptrs) {
336 		bufptr = __otx2_alloc_rbuf(pfvf, cq->rbpool);
337 		if (unlikely(bufptr <= 0)) {
338 			struct refill_work *work;
339 			struct delayed_work *dwork;
340 
341 			work = &pfvf->refill_wrk[cq->cq_idx];
342 			dwork = &work->pool_refill_work;
343 			/* Schedule a task if no other task is running */
344 			if (!cq->refill_task_sched) {
345 				cq->refill_task_sched = true;
346 				schedule_delayed_work(dwork,
347 						      msecs_to_jiffies(100));
348 			}
349 			break;
350 		}
351 		otx2_aura_freeptr(pfvf, cq->cq_idx, bufptr + OTX2_HEAD_ROOM);
352 		cq->pool_ptrs--;
353 	}
354 
355 	return processed_cqe;
356 }
357 
358 static int otx2_tx_napi_handler(struct otx2_nic *pfvf,
359 				struct otx2_cq_queue *cq, int budget)
360 {
361 	int tx_pkts = 0, tx_bytes = 0;
362 	struct nix_cqe_tx_s *cqe;
363 	int processed_cqe = 0;
364 
365 	while (likely(processed_cqe < budget)) {
366 		cqe = (struct nix_cqe_tx_s *)otx2_get_next_cqe(cq);
367 		if (unlikely(!cqe)) {
368 			if (!processed_cqe)
369 				return 0;
370 			break;
371 		}
372 		otx2_snd_pkt_handler(pfvf, cq, &pfvf->qset.sq[cq->cint_idx],
373 				     cqe, budget, &tx_pkts, &tx_bytes);
374 
375 		cqe->hdr.cqe_type = NIX_XQE_TYPE_INVALID;
376 		processed_cqe++;
377 	}
378 
379 	/* Free CQEs to HW */
380 	otx2_write64(pfvf, NIX_LF_CQ_OP_DOOR,
381 		     ((u64)cq->cq_idx << 32) | processed_cqe);
382 
383 	if (likely(tx_pkts)) {
384 		struct netdev_queue *txq;
385 
386 		txq = netdev_get_tx_queue(pfvf->netdev, cq->cint_idx);
387 		netdev_tx_completed_queue(txq, tx_pkts, tx_bytes);
388 		/* Check if queue was stopped earlier due to ring full */
389 		smp_mb();
390 		if (netif_tx_queue_stopped(txq) &&
391 		    netif_carrier_ok(pfvf->netdev))
392 			netif_tx_wake_queue(txq);
393 	}
394 	return 0;
395 }
396 
397 int otx2_napi_handler(struct napi_struct *napi, int budget)
398 {
399 	struct otx2_cq_poll *cq_poll;
400 	int workdone = 0, cq_idx, i;
401 	struct otx2_cq_queue *cq;
402 	struct otx2_qset *qset;
403 	struct otx2_nic *pfvf;
404 
405 	cq_poll = container_of(napi, struct otx2_cq_poll, napi);
406 	pfvf = (struct otx2_nic *)cq_poll->dev;
407 	qset = &pfvf->qset;
408 
409 	for (i = CQS_PER_CINT - 1; i >= 0; i--) {
410 		cq_idx = cq_poll->cq_ids[i];
411 		if (unlikely(cq_idx == CINT_INVALID_CQ))
412 			continue;
413 		cq = &qset->cq[cq_idx];
414 		if (cq->cq_type == CQ_RX) {
415 			/* If the RQ refill WQ task is running, skip napi
416 			 * scheduler for this queue.
417 			 */
418 			if (cq->refill_task_sched)
419 				continue;
420 			workdone += otx2_rx_napi_handler(pfvf, napi,
421 							 cq, budget);
422 		} else {
423 			workdone += otx2_tx_napi_handler(pfvf, cq, budget);
424 		}
425 	}
426 
427 	/* Clear the IRQ */
428 	otx2_write64(pfvf, NIX_LF_CINTX_INT(cq_poll->cint_idx), BIT_ULL(0));
429 
430 	if (workdone < budget && napi_complete_done(napi, workdone)) {
431 		/* If interface is going down, don't re-enable IRQ */
432 		if (pfvf->flags & OTX2_FLAG_INTF_DOWN)
433 			return workdone;
434 
435 		/* Re-enable interrupts */
436 		otx2_write64(pfvf, NIX_LF_CINTX_ENA_W1S(cq_poll->cint_idx),
437 			     BIT_ULL(0));
438 	}
439 	return workdone;
440 }
441 
442 static void otx2_sqe_flush(struct otx2_snd_queue *sq, int size)
443 {
444 	u64 status;
445 
446 	/* Packet data stores should finish before SQE is flushed to HW */
447 	dma_wmb();
448 
449 	do {
450 		memcpy(sq->lmt_addr, sq->sqe_base, size);
451 		status = otx2_lmt_flush(sq->io_addr);
452 	} while (status == 0);
453 
454 	sq->head++;
455 	sq->head &= (sq->sqe_cnt - 1);
456 }
457 
458 #define MAX_SEGS_PER_SG	3
459 /* Add SQE scatter/gather subdescriptor structure */
460 static bool otx2_sqe_add_sg(struct otx2_nic *pfvf, struct otx2_snd_queue *sq,
461 			    struct sk_buff *skb, int num_segs, int *offset)
462 {
463 	struct nix_sqe_sg_s *sg = NULL;
464 	u64 dma_addr, *iova = NULL;
465 	u16 *sg_lens = NULL;
466 	int seg, len;
467 
468 	sq->sg[sq->head].num_segs = 0;
469 
470 	for (seg = 0; seg < num_segs; seg++) {
471 		if ((seg % MAX_SEGS_PER_SG) == 0) {
472 			sg = (struct nix_sqe_sg_s *)(sq->sqe_base + *offset);
473 			sg->ld_type = NIX_SEND_LDTYPE_LDD;
474 			sg->subdc = NIX_SUBDC_SG;
475 			sg->segs = 0;
476 			sg_lens = (void *)sg;
477 			iova = (void *)sg + sizeof(*sg);
478 			/* Next subdc always starts at a 16byte boundary.
479 			 * So if sg->segs is whether 2 or 3, offset += 16bytes.
480 			 */
481 			if ((num_segs - seg) >= (MAX_SEGS_PER_SG - 1))
482 				*offset += sizeof(*sg) + (3 * sizeof(u64));
483 			else
484 				*offset += sizeof(*sg) + sizeof(u64);
485 		}
486 		dma_addr = otx2_dma_map_skb_frag(pfvf, skb, seg, &len);
487 		if (dma_mapping_error(pfvf->dev, dma_addr))
488 			return false;
489 
490 		sg_lens[frag_num(seg % MAX_SEGS_PER_SG)] = len;
491 		sg->segs++;
492 		*iova++ = dma_addr;
493 
494 		/* Save DMA mapping info for later unmapping */
495 		sq->sg[sq->head].dma_addr[seg] = dma_addr;
496 		sq->sg[sq->head].size[seg] = len;
497 		sq->sg[sq->head].num_segs++;
498 	}
499 
500 	sq->sg[sq->head].skb = (u64)skb;
501 	return true;
502 }
503 
504 /* Add SQE extended header subdescriptor */
505 static void otx2_sqe_add_ext(struct otx2_nic *pfvf, struct otx2_snd_queue *sq,
506 			     struct sk_buff *skb, int *offset)
507 {
508 	struct nix_sqe_ext_s *ext;
509 
510 	ext = (struct nix_sqe_ext_s *)(sq->sqe_base + *offset);
511 	ext->subdc = NIX_SUBDC_EXT;
512 	if (skb_shinfo(skb)->gso_size) {
513 		ext->lso = 1;
514 		ext->lso_sb = skb_transport_offset(skb) + tcp_hdrlen(skb);
515 		ext->lso_mps = skb_shinfo(skb)->gso_size;
516 
517 		/* Only TSOv4 and TSOv6 GSO offloads are supported */
518 		if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4) {
519 			ext->lso_format = pfvf->hw.lso_tsov4_idx;
520 
521 			/* HW adds payload size to 'ip_hdr->tot_len' while
522 			 * sending TSO segment, hence set payload length
523 			 * in IP header of the packet to just header length.
524 			 */
525 			ip_hdr(skb)->tot_len =
526 				htons(ext->lso_sb - skb_network_offset(skb));
527 		} else {
528 			ext->lso_format = pfvf->hw.lso_tsov6_idx;
529 			ipv6_hdr(skb)->payload_len =
530 				htons(ext->lso_sb - skb_network_offset(skb));
531 		}
532 	} else if (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) {
533 		ext->tstmp = 1;
534 	}
535 
536 	*offset += sizeof(*ext);
537 }
538 
539 static void otx2_sqe_add_mem(struct otx2_snd_queue *sq, int *offset,
540 			     int alg, u64 iova)
541 {
542 	struct nix_sqe_mem_s *mem;
543 
544 	mem = (struct nix_sqe_mem_s *)(sq->sqe_base + *offset);
545 	mem->subdc = NIX_SUBDC_MEM;
546 	mem->alg = alg;
547 	mem->wmem = 1; /* wait for the memory operation */
548 	mem->addr = iova;
549 
550 	*offset += sizeof(*mem);
551 }
552 
553 /* Add SQE header subdescriptor structure */
554 static void otx2_sqe_add_hdr(struct otx2_nic *pfvf, struct otx2_snd_queue *sq,
555 			     struct nix_sqe_hdr_s *sqe_hdr,
556 			     struct sk_buff *skb, u16 qidx)
557 {
558 	int proto = 0;
559 
560 	/* Check if SQE was framed before, if yes then no need to
561 	 * set these constants again and again.
562 	 */
563 	if (!sqe_hdr->total) {
564 		/* Don't free Tx buffers to Aura */
565 		sqe_hdr->df = 1;
566 		sqe_hdr->aura = sq->aura_id;
567 		/* Post a CQE Tx after pkt transmission */
568 		sqe_hdr->pnc = 1;
569 		sqe_hdr->sq = qidx;
570 	}
571 	sqe_hdr->total = skb->len;
572 	/* Set SQE identifier which will be used later for freeing SKB */
573 	sqe_hdr->sqe_id = sq->head;
574 
575 	/* Offload TCP/UDP checksum to HW */
576 	if (skb->ip_summed == CHECKSUM_PARTIAL) {
577 		sqe_hdr->ol3ptr = skb_network_offset(skb);
578 		sqe_hdr->ol4ptr = skb_transport_offset(skb);
579 		/* get vlan protocol Ethertype */
580 		if (eth_type_vlan(skb->protocol))
581 			skb->protocol = vlan_get_protocol(skb);
582 
583 		if (skb->protocol == htons(ETH_P_IP)) {
584 			proto = ip_hdr(skb)->protocol;
585 			/* In case of TSO, HW needs this to be explicitly set.
586 			 * So set this always, instead of adding a check.
587 			 */
588 			sqe_hdr->ol3type = NIX_SENDL3TYPE_IP4_CKSUM;
589 		} else if (skb->protocol == htons(ETH_P_IPV6)) {
590 			proto = ipv6_hdr(skb)->nexthdr;
591 		}
592 
593 		if (proto == IPPROTO_TCP)
594 			sqe_hdr->ol4type = NIX_SENDL4TYPE_TCP_CKSUM;
595 		else if (proto == IPPROTO_UDP)
596 			sqe_hdr->ol4type = NIX_SENDL4TYPE_UDP_CKSUM;
597 	}
598 }
599 
600 static int otx2_dma_map_tso_skb(struct otx2_nic *pfvf,
601 				struct otx2_snd_queue *sq,
602 				struct sk_buff *skb, int sqe, int hdr_len)
603 {
604 	int num_segs = skb_shinfo(skb)->nr_frags + 1;
605 	struct sg_list *sg = &sq->sg[sqe];
606 	u64 dma_addr;
607 	int seg, len;
608 
609 	sg->num_segs = 0;
610 
611 	/* Get payload length at skb->data */
612 	len = skb_headlen(skb) - hdr_len;
613 
614 	for (seg = 0; seg < num_segs; seg++) {
615 		/* Skip skb->data, if there is no payload */
616 		if (!seg && !len)
617 			continue;
618 		dma_addr = otx2_dma_map_skb_frag(pfvf, skb, seg, &len);
619 		if (dma_mapping_error(pfvf->dev, dma_addr))
620 			goto unmap;
621 
622 		/* Save DMA mapping info for later unmapping */
623 		sg->dma_addr[sg->num_segs] = dma_addr;
624 		sg->size[sg->num_segs] = len;
625 		sg->num_segs++;
626 	}
627 	return 0;
628 unmap:
629 	otx2_dma_unmap_skb_frags(pfvf, sg);
630 	return -EINVAL;
631 }
632 
633 static u64 otx2_tso_frag_dma_addr(struct otx2_snd_queue *sq,
634 				  struct sk_buff *skb, int seg,
635 				  u64 seg_addr, int hdr_len, int sqe)
636 {
637 	struct sg_list *sg = &sq->sg[sqe];
638 	const skb_frag_t *frag;
639 	int offset;
640 
641 	if (seg < 0)
642 		return sg->dma_addr[0] + (seg_addr - (u64)skb->data);
643 
644 	frag = &skb_shinfo(skb)->frags[seg];
645 	offset = seg_addr - (u64)skb_frag_address(frag);
646 	if (skb_headlen(skb) - hdr_len)
647 		seg++;
648 	return sg->dma_addr[seg] + offset;
649 }
650 
651 static void otx2_sqe_tso_add_sg(struct otx2_snd_queue *sq,
652 				struct sg_list *list, int *offset)
653 {
654 	struct nix_sqe_sg_s *sg = NULL;
655 	u16 *sg_lens = NULL;
656 	u64 *iova = NULL;
657 	int seg;
658 
659 	/* Add SG descriptors with buffer addresses */
660 	for (seg = 0; seg < list->num_segs; seg++) {
661 		if ((seg % MAX_SEGS_PER_SG) == 0) {
662 			sg = (struct nix_sqe_sg_s *)(sq->sqe_base + *offset);
663 			sg->ld_type = NIX_SEND_LDTYPE_LDD;
664 			sg->subdc = NIX_SUBDC_SG;
665 			sg->segs = 0;
666 			sg_lens = (void *)sg;
667 			iova = (void *)sg + sizeof(*sg);
668 			/* Next subdc always starts at a 16byte boundary.
669 			 * So if sg->segs is whether 2 or 3, offset += 16bytes.
670 			 */
671 			if ((list->num_segs - seg) >= (MAX_SEGS_PER_SG - 1))
672 				*offset += sizeof(*sg) + (3 * sizeof(u64));
673 			else
674 				*offset += sizeof(*sg) + sizeof(u64);
675 		}
676 		sg_lens[frag_num(seg % MAX_SEGS_PER_SG)] = list->size[seg];
677 		*iova++ = list->dma_addr[seg];
678 		sg->segs++;
679 	}
680 }
681 
682 static void otx2_sq_append_tso(struct otx2_nic *pfvf, struct otx2_snd_queue *sq,
683 			       struct sk_buff *skb, u16 qidx)
684 {
685 	struct netdev_queue *txq = netdev_get_tx_queue(pfvf->netdev, qidx);
686 	int hdr_len, tcp_data, seg_len, pkt_len, offset;
687 	struct nix_sqe_hdr_s *sqe_hdr;
688 	int first_sqe = sq->head;
689 	struct sg_list list;
690 	struct tso_t tso;
691 
692 	hdr_len = tso_start(skb, &tso);
693 
694 	/* Map SKB's fragments to DMA.
695 	 * It's done here to avoid mapping for every TSO segment's packet.
696 	 */
697 	if (otx2_dma_map_tso_skb(pfvf, sq, skb, first_sqe, hdr_len)) {
698 		dev_kfree_skb_any(skb);
699 		return;
700 	}
701 
702 	netdev_tx_sent_queue(txq, skb->len);
703 
704 	tcp_data = skb->len - hdr_len;
705 	while (tcp_data > 0) {
706 		char *hdr;
707 
708 		seg_len = min_t(int, skb_shinfo(skb)->gso_size, tcp_data);
709 		tcp_data -= seg_len;
710 
711 		/* Set SQE's SEND_HDR */
712 		memset(sq->sqe_base, 0, sq->sqe_size);
713 		sqe_hdr = (struct nix_sqe_hdr_s *)(sq->sqe_base);
714 		otx2_sqe_add_hdr(pfvf, sq, sqe_hdr, skb, qidx);
715 		offset = sizeof(*sqe_hdr);
716 
717 		/* Add TSO segment's pkt header */
718 		hdr = sq->tso_hdrs->base + (sq->head * TSO_HEADER_SIZE);
719 		tso_build_hdr(skb, hdr, &tso, seg_len, tcp_data == 0);
720 		list.dma_addr[0] =
721 			sq->tso_hdrs->iova + (sq->head * TSO_HEADER_SIZE);
722 		list.size[0] = hdr_len;
723 		list.num_segs = 1;
724 
725 		/* Add TSO segment's payload data fragments */
726 		pkt_len = hdr_len;
727 		while (seg_len > 0) {
728 			int size;
729 
730 			size = min_t(int, tso.size, seg_len);
731 
732 			list.size[list.num_segs] = size;
733 			list.dma_addr[list.num_segs] =
734 				otx2_tso_frag_dma_addr(sq, skb,
735 						       tso.next_frag_idx - 1,
736 						       (u64)tso.data, hdr_len,
737 						       first_sqe);
738 			list.num_segs++;
739 			pkt_len += size;
740 			seg_len -= size;
741 			tso_build_data(skb, &tso, size);
742 		}
743 		sqe_hdr->total = pkt_len;
744 		otx2_sqe_tso_add_sg(sq, &list, &offset);
745 
746 		/* DMA mappings and skb needs to be freed only after last
747 		 * TSO segment is transmitted out. So set 'PNC' only for
748 		 * last segment. Also point last segment's sqe_id to first
749 		 * segment's SQE index where skb address and DMA mappings
750 		 * are saved.
751 		 */
752 		if (!tcp_data) {
753 			sqe_hdr->pnc = 1;
754 			sqe_hdr->sqe_id = first_sqe;
755 			sq->sg[first_sqe].skb = (u64)skb;
756 		} else {
757 			sqe_hdr->pnc = 0;
758 		}
759 
760 		sqe_hdr->sizem1 = (offset / 16) - 1;
761 
762 		/* Flush SQE to HW */
763 		otx2_sqe_flush(sq, offset);
764 	}
765 }
766 
767 static bool is_hw_tso_supported(struct otx2_nic *pfvf,
768 				struct sk_buff *skb)
769 {
770 	int payload_len, last_seg_size;
771 
772 	if (!pfvf->hw.hw_tso)
773 		return false;
774 
775 	/* HW has an issue due to which when the payload of the last LSO
776 	 * segment is shorter than 16 bytes, some header fields may not
777 	 * be correctly modified, hence don't offload such TSO segments.
778 	 */
779 	if (!is_96xx_B0(pfvf->pdev))
780 		return true;
781 
782 	payload_len = skb->len - (skb_transport_offset(skb) + tcp_hdrlen(skb));
783 	last_seg_size = payload_len % skb_shinfo(skb)->gso_size;
784 	if (last_seg_size && last_seg_size < 16)
785 		return false;
786 
787 	return true;
788 }
789 
790 static int otx2_get_sqe_count(struct otx2_nic *pfvf, struct sk_buff *skb)
791 {
792 	if (!skb_shinfo(skb)->gso_size)
793 		return 1;
794 
795 	/* HW TSO */
796 	if (is_hw_tso_supported(pfvf, skb))
797 		return 1;
798 
799 	/* SW TSO */
800 	return skb_shinfo(skb)->gso_segs;
801 }
802 
803 static void otx2_set_txtstamp(struct otx2_nic *pfvf, struct sk_buff *skb,
804 			      struct otx2_snd_queue *sq, int *offset)
805 {
806 	u64 iova;
807 
808 	if (!skb_shinfo(skb)->gso_size &&
809 	    skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) {
810 		skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
811 		iova = sq->timestamps->iova + (sq->head * sizeof(u64));
812 		otx2_sqe_add_mem(sq, offset, NIX_SENDMEMALG_E_SETTSTMP, iova);
813 	} else {
814 		skb_tx_timestamp(skb);
815 	}
816 }
817 
818 bool otx2_sq_append_skb(struct net_device *netdev, struct otx2_snd_queue *sq,
819 			struct sk_buff *skb, u16 qidx)
820 {
821 	struct netdev_queue *txq = netdev_get_tx_queue(netdev, qidx);
822 	struct otx2_nic *pfvf = netdev_priv(netdev);
823 	int offset, num_segs, free_sqe;
824 	struct nix_sqe_hdr_s *sqe_hdr;
825 
826 	/* Check if there is room for new SQE.
827 	 * 'Num of SQBs freed to SQ's pool - SQ's Aura count'
828 	 * will give free SQE count.
829 	 */
830 	free_sqe = (sq->num_sqbs - *sq->aura_fc_addr) * sq->sqe_per_sqb;
831 
832 	if (free_sqe < sq->sqe_thresh ||
833 	    free_sqe < otx2_get_sqe_count(pfvf, skb))
834 		return false;
835 
836 	num_segs = skb_shinfo(skb)->nr_frags + 1;
837 
838 	/* If SKB doesn't fit in a single SQE, linearize it.
839 	 * TODO: Consider adding JUMP descriptor instead.
840 	 */
841 	if (unlikely(num_segs > OTX2_MAX_FRAGS_IN_SQE)) {
842 		if (__skb_linearize(skb)) {
843 			dev_kfree_skb_any(skb);
844 			return true;
845 		}
846 		num_segs = skb_shinfo(skb)->nr_frags + 1;
847 	}
848 
849 	if (skb_shinfo(skb)->gso_size && !is_hw_tso_supported(pfvf, skb)) {
850 		otx2_sq_append_tso(pfvf, sq, skb, qidx);
851 		return true;
852 	}
853 
854 	/* Set SQE's SEND_HDR.
855 	 * Do not clear the first 64bit as it contains constant info.
856 	 */
857 	memset(sq->sqe_base + 8, 0, sq->sqe_size - 8);
858 	sqe_hdr = (struct nix_sqe_hdr_s *)(sq->sqe_base);
859 	otx2_sqe_add_hdr(pfvf, sq, sqe_hdr, skb, qidx);
860 	offset = sizeof(*sqe_hdr);
861 
862 	/* Add extended header if needed */
863 	otx2_sqe_add_ext(pfvf, sq, skb, &offset);
864 
865 	/* Add SG subdesc with data frags */
866 	if (!otx2_sqe_add_sg(pfvf, sq, skb, num_segs, &offset)) {
867 		otx2_dma_unmap_skb_frags(pfvf, &sq->sg[sq->head]);
868 		return false;
869 	}
870 
871 	otx2_set_txtstamp(pfvf, skb, sq, &offset);
872 
873 	sqe_hdr->sizem1 = (offset / 16) - 1;
874 
875 	netdev_tx_sent_queue(txq, skb->len);
876 
877 	/* Flush SQE to HW */
878 	otx2_sqe_flush(sq, offset);
879 
880 	return true;
881 }
882 EXPORT_SYMBOL(otx2_sq_append_skb);
883 
884 void otx2_cleanup_rx_cqes(struct otx2_nic *pfvf, struct otx2_cq_queue *cq)
885 {
886 	struct nix_cqe_rx_s *cqe;
887 	int processed_cqe = 0;
888 	u64 iova, pa;
889 
890 	while ((cqe = (struct nix_cqe_rx_s *)otx2_get_next_cqe(cq))) {
891 		if (!cqe->sg.subdc)
892 			continue;
893 		processed_cqe++;
894 		if (cqe->sg.segs > 1) {
895 			otx2_free_rcv_seg(pfvf, cqe, cq->cq_idx);
896 			continue;
897 		}
898 		iova = cqe->sg.seg_addr - OTX2_HEAD_ROOM;
899 		pa = otx2_iova_to_phys(pfvf->iommu_domain, iova);
900 		otx2_dma_unmap_page(pfvf, iova, pfvf->rbsize, DMA_FROM_DEVICE);
901 		put_page(virt_to_page(phys_to_virt(pa)));
902 	}
903 
904 	/* Free CQEs to HW */
905 	otx2_write64(pfvf, NIX_LF_CQ_OP_DOOR,
906 		     ((u64)cq->cq_idx << 32) | processed_cqe);
907 }
908 
909 void otx2_cleanup_tx_cqes(struct otx2_nic *pfvf, struct otx2_cq_queue *cq)
910 {
911 	struct sk_buff *skb = NULL;
912 	struct otx2_snd_queue *sq;
913 	struct nix_cqe_tx_s *cqe;
914 	int processed_cqe = 0;
915 	struct sg_list *sg;
916 
917 	sq = &pfvf->qset.sq[cq->cint_idx];
918 
919 	while ((cqe = (struct nix_cqe_tx_s *)otx2_get_next_cqe(cq))) {
920 		sg = &sq->sg[cqe->comp.sqe_id];
921 		skb = (struct sk_buff *)sg->skb;
922 		if (skb) {
923 			otx2_dma_unmap_skb_frags(pfvf, sg);
924 			dev_kfree_skb_any(skb);
925 			sg->skb = (u64)NULL;
926 		}
927 		processed_cqe++;
928 	}
929 
930 	/* Free CQEs to HW */
931 	otx2_write64(pfvf, NIX_LF_CQ_OP_DOOR,
932 		     ((u64)cq->cq_idx << 32) | processed_cqe);
933 }
934 
935 int otx2_rxtx_enable(struct otx2_nic *pfvf, bool enable)
936 {
937 	struct msg_req *msg;
938 	int err;
939 
940 	mutex_lock(&pfvf->mbox.lock);
941 	if (enable)
942 		msg = otx2_mbox_alloc_msg_nix_lf_start_rx(&pfvf->mbox);
943 	else
944 		msg = otx2_mbox_alloc_msg_nix_lf_stop_rx(&pfvf->mbox);
945 
946 	if (!msg) {
947 		mutex_unlock(&pfvf->mbox.lock);
948 		return -ENOMEM;
949 	}
950 
951 	err = otx2_sync_mbox_msg(&pfvf->mbox);
952 	mutex_unlock(&pfvf->mbox.lock);
953 	return err;
954 }
955