xref: /freebsd/sys/dev/ixl/ixl_txrx.c (revision 190cef3d52236565eb22e18b33e9e865ec634aa3)
1 /******************************************************************************
2 
3   Copyright (c) 2013-2018, Intel Corporation
4   All rights reserved.
5 
6   Redistribution and use in source and binary forms, with or without
7   modification, are permitted provided that the following conditions are met:
8 
9    1. Redistributions of source code must retain the above copyright notice,
10       this list of conditions and the following disclaimer.
11 
12    2. Redistributions in binary form must reproduce the above copyright
13       notice, this list of conditions and the following disclaimer in the
14       documentation and/or other materials provided with the distribution.
15 
16    3. Neither the name of the Intel Corporation nor the names of its
17       contributors may be used to endorse or promote products derived from
18       this software without specific prior written permission.
19 
20   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21   AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22   IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23   ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24   LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25   CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26   SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27   INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28   CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29   ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30   POSSIBILITY OF SUCH DAMAGE.
31 
32 ******************************************************************************/
33 /*$FreeBSD$*/
34 
35 /*
36 **	IXL driver TX/RX Routines:
37 **	    This was seperated to allow usage by
38 ** 	    both the PF and VF drivers.
39 */
40 
41 #ifndef IXL_STANDALONE_BUILD
42 #include "opt_inet.h"
43 #include "opt_inet6.h"
44 #include "opt_rss.h"
45 #endif
46 
47 #include "ixl.h"
48 
49 #ifdef RSS
50 #include <net/rss_config.h>
51 #endif
52 
53 /* Local Prototypes */
54 static void	ixl_rx_checksum(if_rxd_info_t ri, u32 status, u32 error, u8 ptype);
55 
56 static int	ixl_isc_txd_encap(void *arg, if_pkt_info_t pi);
57 static void	ixl_isc_txd_flush(void *arg, uint16_t txqid, qidx_t pidx);
58 static int	ixl_isc_txd_credits_update_hwb(void *arg, uint16_t txqid, bool clear);
59 static int	ixl_isc_txd_credits_update_dwb(void *arg, uint16_t txqid, bool clear);
60 
61 static void	ixl_isc_rxd_refill(void *arg, if_rxd_update_t iru);
62 static void	ixl_isc_rxd_flush(void *arg, uint16_t rxqid, uint8_t flid __unused,
63 				  qidx_t pidx);
64 static int	ixl_isc_rxd_available(void *arg, uint16_t rxqid, qidx_t idx,
65 				      qidx_t budget);
66 static int	ixl_isc_rxd_pkt_get(void *arg, if_rxd_info_t ri);
67 
68 extern int	ixl_intr(void *arg);
69 
70 struct if_txrx ixl_txrx_hwb = {
71 	ixl_isc_txd_encap,
72 	ixl_isc_txd_flush,
73 	ixl_isc_txd_credits_update_hwb,
74 	ixl_isc_rxd_available,
75 	ixl_isc_rxd_pkt_get,
76 	ixl_isc_rxd_refill,
77 	ixl_isc_rxd_flush,
78 	ixl_intr
79 };
80 
81 struct if_txrx ixl_txrx_dwb = {
82 	ixl_isc_txd_encap,
83 	ixl_isc_txd_flush,
84 	ixl_isc_txd_credits_update_dwb,
85 	ixl_isc_rxd_available,
86 	ixl_isc_rxd_pkt_get,
87 	ixl_isc_rxd_refill,
88 	ixl_isc_rxd_flush,
89 	ixl_intr
90 };
91 
92 /*
93  * @key key is saved into this parameter
94  */
95 void
96 ixl_get_default_rss_key(u32 *key)
97 {
98 	MPASS(key != NULL);
99 
100 	u32 rss_seed[IXL_RSS_KEY_SIZE_REG] = {0x41b01687,
101 	    0x183cfd8c, 0xce880440, 0x580cbc3c,
102 	    0x35897377, 0x328b25e1, 0x4fa98922,
103 	    0xb7d90c14, 0xd5bad70d, 0xcd15a2c1,
104 	    0x0, 0x0, 0x0};
105 
106 	bcopy(rss_seed, key, IXL_RSS_KEY_SIZE);
107 }
108 
109 /**
110  * i40e_vc_stat_str - convert virtchnl status err code to a string
111  * @hw: pointer to the HW structure
112  * @stat_err: the status error code to convert
113  **/
114 const char *
115 i40e_vc_stat_str(struct i40e_hw *hw, enum virtchnl_status_code stat_err)
116 {
117 	switch (stat_err) {
118 	case VIRTCHNL_STATUS_SUCCESS:
119 		return "OK";
120 	case VIRTCHNL_ERR_PARAM:
121 		return "VIRTCHNL_ERR_PARAM";
122 	case VIRTCHNL_STATUS_ERR_OPCODE_MISMATCH:
123 		return "VIRTCHNL_STATUS_ERR_OPCODE_MISMATCH";
124 	case VIRTCHNL_STATUS_ERR_CQP_COMPL_ERROR:
125 		return "VIRTCHNL_STATUS_ERR_CQP_COMPL_ERROR";
126 	case VIRTCHNL_STATUS_ERR_INVALID_VF_ID:
127 		return "VIRTCHNL_STATUS_ERR_INVALID_VF_ID";
128 	case VIRTCHNL_STATUS_NOT_SUPPORTED:
129 		return "VIRTCHNL_STATUS_NOT_SUPPORTED";
130 	}
131 
132 	snprintf(hw->err_str, sizeof(hw->err_str), "%d", stat_err);
133 	return hw->err_str;
134 }
135 
136 static bool
137 ixl_is_tx_desc_done(struct tx_ring *txr, int idx)
138 {
139 	return (((txr->tx_base[idx].cmd_type_offset_bsz >> I40E_TXD_QW1_DTYPE_SHIFT)
140 	    & I40E_TXD_QW1_DTYPE_MASK) == I40E_TX_DESC_DTYPE_DESC_DONE);
141 }
142 
143 static int
144 ixl_tso_detect_sparse(bus_dma_segment_t *segs, int nsegs, if_pkt_info_t pi)
145 {
146 	int	count, curseg, i, hlen, segsz, seglen, tsolen;
147 
148 	if (nsegs <= IXL_MAX_TX_SEGS-2)
149 		return (0);
150 	segsz = pi->ipi_tso_segsz;
151 	curseg = count = 0;
152 
153 	hlen = pi->ipi_ehdrlen + pi->ipi_ip_hlen + pi->ipi_tcp_hlen;
154 	tsolen = pi->ipi_len - hlen;
155 
156 	i = 0;
157 	curseg = segs[0].ds_len;
158 	while (hlen > 0) {
159 		count++;
160 		if (count > IXL_MAX_TX_SEGS - 2)
161 			return (1);
162 		if (curseg == 0) {
163 			i++;
164 			if (__predict_false(i == nsegs))
165 				return (1);
166 
167 			curseg = segs[i].ds_len;
168 		}
169 		seglen = min(curseg, hlen);
170 		curseg -= seglen;
171 		hlen -= seglen;
172 		// printf("H:seglen = %d, count=%d\n", seglen, count);
173 	}
174 	while (tsolen > 0) {
175 		segsz = pi->ipi_tso_segsz;
176 		while (segsz > 0 && tsolen != 0) {
177 			count++;
178 			if (count > IXL_MAX_TX_SEGS - 2) {
179 				// printf("bad: count = %d\n", count);
180 				return (1);
181 			}
182 			if (curseg == 0) {
183 				i++;
184 				if (__predict_false(i == nsegs)) {
185 					// printf("bad: tsolen = %d", tsolen);
186 					return (1);
187 				}
188 				curseg = segs[i].ds_len;
189 			}
190 			seglen = min(curseg, segsz);
191 			segsz -= seglen;
192 			curseg -= seglen;
193 			tsolen -= seglen;
194 			// printf("D:seglen = %d, count=%d\n", seglen, count);
195 		}
196 		count = 0;
197 	}
198 
199  	return (0);
200 }
201 
202 /*********************************************************************
203  *
204  *  Setup descriptor for hw offloads
205  *
206  **********************************************************************/
207 
208 static void
209 ixl_tx_setup_offload(struct ixl_tx_queue *que,
210     if_pkt_info_t pi, u32 *cmd, u32 *off)
211 {
212 	switch (pi->ipi_etype) {
213 #ifdef INET
214 		case ETHERTYPE_IP:
215 			if (pi->ipi_csum_flags & CSUM_IP)
216 				*cmd |= I40E_TX_DESC_CMD_IIPT_IPV4_CSUM;
217 			else
218 				*cmd |= I40E_TX_DESC_CMD_IIPT_IPV4;
219 			break;
220 #endif
221 #ifdef INET6
222 		case ETHERTYPE_IPV6:
223 			*cmd |= I40E_TX_DESC_CMD_IIPT_IPV6;
224 			break;
225 #endif
226 		default:
227 			break;
228 	}
229 
230 	*off |= (pi->ipi_ehdrlen >> 1) << I40E_TX_DESC_LENGTH_MACLEN_SHIFT;
231 	*off |= (pi->ipi_ip_hlen >> 2) << I40E_TX_DESC_LENGTH_IPLEN_SHIFT;
232 
233 	switch (pi->ipi_ipproto) {
234 		case IPPROTO_TCP:
235 			if (pi->ipi_csum_flags & IXL_CSUM_TCP) {
236 				*cmd |= I40E_TX_DESC_CMD_L4T_EOFT_TCP;
237 				*off |= (pi->ipi_tcp_hlen >> 2) <<
238 				    I40E_TX_DESC_LENGTH_L4_FC_LEN_SHIFT;
239 			}
240 			break;
241 		case IPPROTO_UDP:
242 			if (pi->ipi_csum_flags & IXL_CSUM_UDP) {
243 				*cmd |= I40E_TX_DESC_CMD_L4T_EOFT_UDP;
244 				*off |= (sizeof(struct udphdr) >> 2) <<
245 				    I40E_TX_DESC_LENGTH_L4_FC_LEN_SHIFT;
246 			}
247 			break;
248 		case IPPROTO_SCTP:
249 			if (pi->ipi_csum_flags & IXL_CSUM_SCTP) {
250 				*cmd |= I40E_TX_DESC_CMD_L4T_EOFT_SCTP;
251 				*off |= (sizeof(struct sctphdr) >> 2) <<
252 				    I40E_TX_DESC_LENGTH_L4_FC_LEN_SHIFT;
253 			}
254 			/* Fall Thru */
255 		default:
256 			break;
257 	}
258 }
259 
260 /**********************************************************************
261  *
262  *  Setup context for hardware segmentation offload (TSO)
263  *
264  **********************************************************************/
265 static int
266 ixl_tso_setup(struct tx_ring *txr, if_pkt_info_t pi)
267 {
268 	if_softc_ctx_t			scctx;
269 	struct i40e_tx_context_desc	*TXD;
270 	u32				cmd, mss, type, tsolen;
271 	int				idx;
272 	u64				type_cmd_tso_mss;
273 
274 	idx = pi->ipi_pidx;
275 	TXD = (struct i40e_tx_context_desc *) &txr->tx_base[idx];
276 	tsolen = pi->ipi_len - (pi->ipi_ehdrlen + pi->ipi_ip_hlen + pi->ipi_tcp_hlen);
277 	scctx = txr->que->vsi->shared;
278 
279 	type = I40E_TX_DESC_DTYPE_CONTEXT;
280 	cmd = I40E_TX_CTX_DESC_TSO;
281 	/* TSO MSS must not be less than 64 */
282 	if (pi->ipi_tso_segsz < IXL_MIN_TSO_MSS) {
283 		txr->mss_too_small++;
284 		pi->ipi_tso_segsz = IXL_MIN_TSO_MSS;
285 	}
286 	mss = pi->ipi_tso_segsz;
287 
288 	type_cmd_tso_mss = ((u64)type << I40E_TXD_CTX_QW1_DTYPE_SHIFT) |
289 	    ((u64)cmd << I40E_TXD_CTX_QW1_CMD_SHIFT) |
290 	    ((u64)tsolen << I40E_TXD_CTX_QW1_TSO_LEN_SHIFT) |
291 	    ((u64)mss << I40E_TXD_CTX_QW1_MSS_SHIFT);
292 	TXD->type_cmd_tso_mss = htole64(type_cmd_tso_mss);
293 
294 	TXD->tunneling_params = htole32(0);
295 	txr->que->tso++;
296 
297 	return ((idx + 1) & (scctx->isc_ntxd[0]-1));
298 }
299 
300 /*********************************************************************
301   *
302  *  This routine maps the mbufs to tx descriptors, allowing the
303  *  TX engine to transmit the packets.
304  *  	- return 0 on success, positive on failure
305   *
306   **********************************************************************/
307 #define IXL_TXD_CMD (I40E_TX_DESC_CMD_EOP | I40E_TX_DESC_CMD_RS)
308 
309 static int
310 ixl_isc_txd_encap(void *arg, if_pkt_info_t pi)
311 {
312 	struct ixl_vsi		*vsi = arg;
313 	if_softc_ctx_t		scctx = vsi->shared;
314 	struct ixl_tx_queue	*que = &vsi->tx_queues[pi->ipi_qsidx];
315  	struct tx_ring		*txr = &que->txr;
316 	int			nsegs = pi->ipi_nsegs;
317 	bus_dma_segment_t *segs = pi->ipi_segs;
318 	struct i40e_tx_desc	*txd = NULL;
319 	int             	i, j, mask, pidx_last;
320 	u32			cmd, off, tx_intr;
321 
322 	// device_printf(iflib_get_dev(vsi->ctx), "%s: begin\n", __func__);
323 
324 	cmd = off = 0;
325 	i = pi->ipi_pidx;
326 
327 	tx_intr = (pi->ipi_flags & IPI_TX_INTR);
328 #if 0
329 	device_printf(iflib_get_dev(vsi->ctx), "%s: tx_intr %d\n", __func__, tx_intr);
330 #endif
331 
332 	/* Set up the TSO/CSUM offload */
333 	if (pi->ipi_csum_flags & CSUM_OFFLOAD) {
334 		/* Set up the TSO context descriptor if required */
335 		if (pi->ipi_csum_flags & CSUM_TSO) {
336 			if (ixl_tso_detect_sparse(segs, nsegs, pi))
337 				return (EFBIG);
338 			i = ixl_tso_setup(txr, pi);
339 		}
340 		ixl_tx_setup_offload(que, pi, &cmd, &off);
341 	}
342 	if (pi->ipi_mflags & M_VLANTAG)
343 		cmd |= I40E_TX_DESC_CMD_IL2TAG1;
344 
345 	cmd |= I40E_TX_DESC_CMD_ICRC;
346 	mask = scctx->isc_ntxd[0] - 1;
347 	for (j = 0; j < nsegs; j++) {
348 		bus_size_t seglen;
349 
350 		txd = &txr->tx_base[i];
351 		seglen = segs[j].ds_len;
352 
353 		txd->buffer_addr = htole64(segs[j].ds_addr);
354 		txd->cmd_type_offset_bsz =
355 		    htole64(I40E_TX_DESC_DTYPE_DATA
356 		    | ((u64)cmd  << I40E_TXD_QW1_CMD_SHIFT)
357 		    | ((u64)off << I40E_TXD_QW1_OFFSET_SHIFT)
358 		    | ((u64)seglen  << I40E_TXD_QW1_TX_BUF_SZ_SHIFT)
359 	            | ((u64)htole16(pi->ipi_vtag) << I40E_TXD_QW1_L2TAG1_SHIFT));
360 
361 		txr->tx_bytes += seglen;
362 		pidx_last = i;
363 		i = (i+1) & mask;
364 	}
365 	/* Set the last descriptor for report */
366 	txd->cmd_type_offset_bsz |=
367 	    htole64(((u64)IXL_TXD_CMD << I40E_TXD_QW1_CMD_SHIFT));
368 	/* Add to report status array (if using TX interrupts) */
369 	if (!vsi->enable_head_writeback && tx_intr) {
370 		txr->tx_rsq[txr->tx_rs_pidx] = pidx_last;
371 		txr->tx_rs_pidx = (txr->tx_rs_pidx+1) & mask;
372 		MPASS(txr->tx_rs_pidx != txr->tx_rs_cidx);
373  	}
374 	pi->ipi_new_pidx = i;
375 
376 	++txr->tx_packets;
377 	return (0);
378 }
379 
380 static void
381 ixl_isc_txd_flush(void *arg, uint16_t txqid, qidx_t pidx)
382 {
383 	struct ixl_vsi *vsi = arg;
384 	struct tx_ring *txr = &vsi->tx_queues[txqid].txr;
385 
386  	/*
387 	 * Advance the Transmit Descriptor Tail (Tdt), this tells the
388 	 * hardware that this frame is available to transmit.
389  	 */
390 	wr32(vsi->hw, txr->tail, pidx);
391 }
392 
393 
394 /*********************************************************************
395  *
396  *  (Re)Initialize a queue transmit ring by clearing its memory.
397  *
398  **********************************************************************/
399 void
400 ixl_init_tx_ring(struct ixl_vsi *vsi, struct ixl_tx_queue *que)
401 {
402 	struct tx_ring *txr = &que->txr;
403 
404 	/* Clear the old ring contents */
405 	bzero((void *)txr->tx_base,
406 	      (sizeof(struct i40e_tx_desc)) *
407 	      (vsi->shared->isc_ntxd[0] + (vsi->enable_head_writeback ? 1 : 0)));
408 
409 	// TODO: Write max descriptor index instead of 0?
410 	wr32(vsi->hw, txr->tail, 0);
411 	wr32(vsi->hw, I40E_QTX_HEAD(txr->me), 0);
412 }
413 
414 /*
415  * ixl_get_tx_head - Retrieve the value from the
416  *    location the HW records its HEAD index
417  */
418 static inline u32
419 ixl_get_tx_head(struct ixl_tx_queue *que)
420 {
421 	if_softc_ctx_t          scctx = que->vsi->shared;
422 	struct tx_ring  *txr = &que->txr;
423 	void *head = &txr->tx_base[scctx->isc_ntxd[0]];
424 
425 	return LE32_TO_CPU(*(volatile __le32 *)head);
426 }
427 
428 static int
429 ixl_isc_txd_credits_update_hwb(void *arg, uint16_t qid, bool clear)
430 {
431 	struct ixl_vsi          *vsi = arg;
432 	if_softc_ctx_t          scctx = vsi->shared;
433 	struct ixl_tx_queue     *que = &vsi->tx_queues[qid];
434 	struct tx_ring		*txr = &que->txr;
435 	int			 head, credits;
436 
437 	/* Get the Head WB value */
438 	head = ixl_get_tx_head(que);
439 
440 	credits = head - txr->tx_cidx_processed;
441 	if (credits < 0)
442 		credits += scctx->isc_ntxd[0];
443 	if (clear)
444 		txr->tx_cidx_processed = head;
445 
446 	return (credits);
447 }
448 
449 static int
450 ixl_isc_txd_credits_update_dwb(void *arg, uint16_t txqid, bool clear)
451 {
452 	struct ixl_vsi *vsi = arg;
453 	struct ixl_tx_queue *tx_que = &vsi->tx_queues[txqid];
454 	if_softc_ctx_t scctx = vsi->shared;
455 	struct tx_ring *txr = &tx_que->txr;
456 
457 	qidx_t processed = 0;
458 	qidx_t cur, prev, ntxd, rs_cidx;
459 	int32_t delta;
460 	bool is_done;
461 
462 	rs_cidx = txr->tx_rs_cidx;
463 #if 0
464 	device_printf(iflib_get_dev(vsi->ctx), "%s: (q%d) rs_cidx %d, txr->tx_rs_pidx %d\n", __func__,
465 	    txr->me, rs_cidx, txr->tx_rs_pidx);
466 #endif
467 	if (rs_cidx == txr->tx_rs_pidx)
468 		return (0);
469 	cur = txr->tx_rsq[rs_cidx];
470 	MPASS(cur != QIDX_INVALID);
471 	is_done = ixl_is_tx_desc_done(txr, cur);
472 
473 	if (clear == false || !is_done)
474 		return (0);
475 
476 	prev = txr->tx_cidx_processed;
477 	ntxd = scctx->isc_ntxd[0];
478 	do {
479 		delta = (int32_t)cur - (int32_t)prev;
480 		MPASS(prev == 0 || delta != 0);
481 		if (delta < 0)
482 			delta += ntxd;
483 #if 0
484 		device_printf(iflib_get_dev(vsi->ctx),
485 			      "%s: (q%d) cidx_processed=%u cur=%u clear=%d delta=%d\n",
486 			      __func__, txr->me, prev, cur, clear, delta);
487 #endif
488 		processed += delta;
489 		prev = cur;
490 		rs_cidx = (rs_cidx + 1) & (ntxd-1);
491 		if (rs_cidx == txr->tx_rs_pidx)
492 			break;
493 		cur = txr->tx_rsq[rs_cidx];
494 		MPASS(cur != QIDX_INVALID);
495 		is_done = ixl_is_tx_desc_done(txr, cur);
496 	} while (is_done);
497 
498 	txr->tx_rs_cidx = rs_cidx;
499 	txr->tx_cidx_processed = prev;
500 
501 #if 0
502 	device_printf(iflib_get_dev(vsi->ctx), "%s: (q%d) processed %d\n", __func__, txr->me, processed);
503 #endif
504 	return (processed);
505 }
506 
507 static void
508 ixl_isc_rxd_refill(void *arg, if_rxd_update_t iru)
509 {
510 	struct ixl_vsi *vsi = arg;
511 	if_softc_ctx_t scctx = vsi->shared;
512 	struct rx_ring *rxr = &((vsi->rx_queues[iru->iru_qsidx]).rxr);
513 	uint64_t *paddrs;
514 	uint32_t next_pidx, pidx;
515 	uint16_t count;
516 	int i;
517 
518 	paddrs = iru->iru_paddrs;
519 	pidx = iru->iru_pidx;
520 	count = iru->iru_count;
521 
522 	for (i = 0, next_pidx = pidx; i < count; i++) {
523 		rxr->rx_base[next_pidx].read.pkt_addr = htole64(paddrs[i]);
524 		if (++next_pidx == scctx->isc_nrxd[0])
525 			next_pidx = 0;
526  	}
527 }
528 
529 static void
530 ixl_isc_rxd_flush(void * arg, uint16_t rxqid, uint8_t flid __unused, qidx_t pidx)
531 {
532 	struct ixl_vsi		*vsi = arg;
533 	struct rx_ring		*rxr = &vsi->rx_queues[rxqid].rxr;
534 
535 	wr32(vsi->hw, rxr->tail, pidx);
536 }
537 
538 static int
539 ixl_isc_rxd_available(void *arg, uint16_t rxqid, qidx_t idx, qidx_t budget)
540 {
541 	struct ixl_vsi *vsi = arg;
542 	struct rx_ring *rxr = &vsi->rx_queues[rxqid].rxr;
543 	union i40e_rx_desc *rxd;
544 	u64 qword;
545 	uint32_t status;
546 	int cnt, i, nrxd;
547 
548 	nrxd = vsi->shared->isc_nrxd[0];
549 
550 	if (budget == 1) {
551 		rxd = &rxr->rx_base[idx];
552 		qword = le64toh(rxd->wb.qword1.status_error_len);
553 		status = (qword & I40E_RXD_QW1_STATUS_MASK)
554 			>> I40E_RXD_QW1_STATUS_SHIFT;
555 		return !!(status & (1 << I40E_RX_DESC_STATUS_DD_SHIFT));
556  	}
557 
558 	for (cnt = 0, i = idx; cnt < nrxd - 1 && cnt <= budget;) {
559 		rxd = &rxr->rx_base[i];
560 		qword = le64toh(rxd->wb.qword1.status_error_len);
561 		status = (qword & I40E_RXD_QW1_STATUS_MASK)
562 			>> I40E_RXD_QW1_STATUS_SHIFT;
563 
564 		if ((status & (1 << I40E_RX_DESC_STATUS_DD_SHIFT)) == 0)
565 			break;
566 		if (++i == nrxd)
567 			i = 0;
568 		if (status & (1 << I40E_RX_DESC_STATUS_EOF_SHIFT))
569 			cnt++;
570 	}
571 
572 	return (cnt);
573 }
574 
575 /*
576 ** i40e_ptype_to_hash: parse the packet type
577 ** to determine the appropriate hash.
578 */
579 static inline int
580 ixl_ptype_to_hash(u8 ptype)
581 {
582         struct i40e_rx_ptype_decoded	decoded;
583 
584 	decoded = decode_rx_desc_ptype(ptype);
585 
586 	if (!decoded.known)
587 		return M_HASHTYPE_OPAQUE;
588 
589 	if (decoded.outer_ip == I40E_RX_PTYPE_OUTER_L2)
590 		return M_HASHTYPE_OPAQUE;
591 
592 	/* Note: anything that gets to this point is IP */
593         if (decoded.outer_ip_ver == I40E_RX_PTYPE_OUTER_IPV6) {
594 		switch (decoded.inner_prot) {
595 		case I40E_RX_PTYPE_INNER_PROT_TCP:
596 			return M_HASHTYPE_RSS_TCP_IPV6;
597 		case I40E_RX_PTYPE_INNER_PROT_UDP:
598 			return M_HASHTYPE_RSS_UDP_IPV6;
599 		default:
600 			return M_HASHTYPE_RSS_IPV6;
601 		}
602 	}
603         if (decoded.outer_ip_ver == I40E_RX_PTYPE_OUTER_IPV4) {
604 		switch (decoded.inner_prot) {
605 		case I40E_RX_PTYPE_INNER_PROT_TCP:
606 			return M_HASHTYPE_RSS_TCP_IPV4;
607 		case I40E_RX_PTYPE_INNER_PROT_UDP:
608 			return M_HASHTYPE_RSS_UDP_IPV4;
609 		default:
610 			return M_HASHTYPE_RSS_IPV4;
611 		}
612 	}
613 	/* We should never get here!! */
614 	return M_HASHTYPE_OPAQUE;
615 }
616 
617 /*********************************************************************
618  *
619  *  This routine executes in ithread context. It sends data which has been
620  *  dma'ed into host memory to upper layer.
621  *
622  *  Returns 0 upon success, errno on failure
623  *
624  *********************************************************************/
625 static int
626 ixl_isc_rxd_pkt_get(void *arg, if_rxd_info_t ri)
627 {
628 	struct ixl_vsi		*vsi = arg;
629 	struct ixl_rx_queue	*que = &vsi->rx_queues[ri->iri_qsidx];
630 	struct rx_ring		*rxr = &que->rxr;
631 	union i40e_rx_desc	*cur;
632 	u32		status, error;
633 	u16		plen, vtag;
634 	u64		qword;
635 	u8		ptype;
636 	bool		eop;
637 	int i, cidx;
638 
639 	cidx = ri->iri_cidx;
640 	i = 0;
641 	do {
642 		/* 5 descriptor receive limit */
643 		MPASS(i < IXL_MAX_RX_SEGS);
644 
645 		cur = &rxr->rx_base[cidx];
646 		qword = le64toh(cur->wb.qword1.status_error_len);
647 		status = (qword & I40E_RXD_QW1_STATUS_MASK)
648 		    >> I40E_RXD_QW1_STATUS_SHIFT;
649 		error = (qword & I40E_RXD_QW1_ERROR_MASK)
650 		    >> I40E_RXD_QW1_ERROR_SHIFT;
651 		plen = (qword & I40E_RXD_QW1_LENGTH_PBUF_MASK)
652 		    >> I40E_RXD_QW1_LENGTH_PBUF_SHIFT;
653 		ptype = (qword & I40E_RXD_QW1_PTYPE_MASK)
654 		    >> I40E_RXD_QW1_PTYPE_SHIFT;
655 
656 		/* we should never be called without a valid descriptor */
657 		MPASS((status & (1 << I40E_RX_DESC_STATUS_DD_SHIFT)) != 0);
658 
659 		ri->iri_len += plen;
660 		rxr->bytes += plen;
661 
662 		cur->wb.qword1.status_error_len = 0;
663 		eop = (status & (1 << I40E_RX_DESC_STATUS_EOF_SHIFT));
664 		if (status & (1 << I40E_RX_DESC_STATUS_L2TAG1P_SHIFT))
665 			vtag = le16toh(cur->wb.qword0.lo_dword.l2tag1);
666 		else
667 			vtag = 0;
668 
669 		/*
670 		** Make sure bad packets are discarded,
671 		** note that only EOP descriptor has valid
672 		** error results.
673 		*/
674 		if (eop && (error & (1 << I40E_RX_DESC_ERROR_RXE_SHIFT))) {
675 			rxr->desc_errs++;
676 			return (EBADMSG);
677 		}
678 		ri->iri_frags[i].irf_flid = 0;
679 		ri->iri_frags[i].irf_idx = cidx;
680 		ri->iri_frags[i].irf_len = plen;
681 		if (++cidx == vsi->shared->isc_nrxd[0])
682 			cidx = 0;
683 		i++;
684 	} while (!eop);
685 
686 	/* capture data for dynamic ITR adjustment */
687 	rxr->packets++;
688 	rxr->rx_packets++;
689 
690 	if ((if_getcapenable(vsi->ifp) & IFCAP_RXCSUM) != 0)
691 		ixl_rx_checksum(ri, status, error, ptype);
692 	ri->iri_flowid = le32toh(cur->wb.qword0.hi_dword.rss);
693 	ri->iri_rsstype = ixl_ptype_to_hash(ptype);
694 	ri->iri_vtag = vtag;
695 	ri->iri_nfrags = i;
696 	if (vtag)
697 		ri->iri_flags |= M_VLANTAG;
698 	return (0);
699 }
700 
701 /*********************************************************************
702  *
703  *  Verify that the hardware indicated that the checksum is valid.
704  *  Inform the stack about the status of checksum so that stack
705  *  doesn't spend time verifying the checksum.
706  *
707  *********************************************************************/
708 static void
709 ixl_rx_checksum(if_rxd_info_t ri, u32 status, u32 error, u8 ptype)
710 {
711 	struct i40e_rx_ptype_decoded decoded;
712 
713 	ri->iri_csum_flags = 0;
714 
715 	/* No L3 or L4 checksum was calculated */
716 	if (!(status & (1 << I40E_RX_DESC_STATUS_L3L4P_SHIFT)))
717 		return;
718 
719 	decoded = decode_rx_desc_ptype(ptype);
720 
721 	/* IPv6 with extension headers likely have bad csum */
722 	if (decoded.outer_ip == I40E_RX_PTYPE_OUTER_IP &&
723 	    decoded.outer_ip_ver == I40E_RX_PTYPE_OUTER_IPV6) {
724 		if (status &
725 		    (1 << I40E_RX_DESC_STATUS_IPV6EXADD_SHIFT)) {
726 			ri->iri_csum_flags = 0;
727 			return;
728 		}
729 	}
730 
731 	ri->iri_csum_flags |= CSUM_L3_CALC;
732 
733 	/* IPv4 checksum error */
734 	if (error & (1 << I40E_RX_DESC_ERROR_IPE_SHIFT))
735 		return;
736 
737 	ri->iri_csum_flags |= CSUM_L3_VALID;
738 	ri->iri_csum_flags |= CSUM_L4_CALC;
739 
740 	/* L4 checksum error */
741 	if (error & (1 << I40E_RX_DESC_ERROR_L4E_SHIFT))
742 		return;
743 
744 	ri->iri_csum_flags |= CSUM_L4_VALID;
745 	ri->iri_csum_data |= htons(0xffff);
746 }
747 
748 /*
749  * Input: bitmap of enum i40e_aq_link_speed
750  */
751 u64
752 ixl_max_aq_speed_to_value(u8 link_speeds)
753 {
754 	if (link_speeds & I40E_LINK_SPEED_40GB)
755 		return IF_Gbps(40);
756 	if (link_speeds & I40E_LINK_SPEED_25GB)
757 		return IF_Gbps(25);
758 	if (link_speeds & I40E_LINK_SPEED_20GB)
759 		return IF_Gbps(20);
760 	if (link_speeds & I40E_LINK_SPEED_10GB)
761 		return IF_Gbps(10);
762 	if (link_speeds & I40E_LINK_SPEED_1GB)
763 		return IF_Gbps(1);
764 	if (link_speeds & I40E_LINK_SPEED_100MB)
765 		return IF_Mbps(100);
766 	else
767 		/* Minimum supported link speed */
768 		return IF_Mbps(100);
769 }
770