xref: /freebsd/sys/dev/bnxt/bnxt_en/bnxt_txrx.c (revision 39c0b8b7994b0d339bffb0b17291c4a2b14cae3a)
1 /*-
2  * Broadcom NetXtreme-C/E network driver.
3  *
4  * Copyright (c) 2016 Broadcom, All Rights Reserved.
5  * The term Broadcom refers to Broadcom Limited and/or its subsidiaries
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions and the following disclaimer.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  *
16  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS'
17  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19  * ARE DISCLAIMED.  IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS
20  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
21  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
22  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
23  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
24  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
25  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
26  * THE POSSIBILITY OF SUCH DAMAGE.
27  */
28 
29 #include <sys/types.h>
30 #include <sys/socket.h>
31 #include <sys/endian.h>
32 #include <net/if.h>
33 #include <net/if_var.h>
34 #include <net/ethernet.h>
35 #include <net/iflib.h>
36 
37 #include "opt_inet.h"
38 #include "opt_inet6.h"
39 #include "opt_rss.h"
40 
41 #include "bnxt.h"
42 
43 /*
44  * Function prototypes
45  */
46 
47 static int bnxt_isc_txd_encap(void *sc, if_pkt_info_t pi);
48 static void bnxt_isc_txd_flush(void *sc, uint16_t txqid, qidx_t pidx);
49 static int bnxt_isc_txd_credits_update(void *sc, uint16_t txqid, bool clear);
50 
51 static void bnxt_isc_rxd_refill(void *sc, if_rxd_update_t iru);
52 
53 /*				uint16_t rxqid, uint8_t flid,
54     uint32_t pidx, uint64_t *paddrs, caddr_t *vaddrs, uint16_t count,
55     uint16_t buf_size);
56 */
57 static void bnxt_isc_rxd_flush(void *sc, uint16_t rxqid, uint8_t flid,
58     qidx_t pidx);
59 static int bnxt_isc_rxd_available(void *sc, uint16_t rxqid, qidx_t idx,
60     qidx_t budget);
61 static int bnxt_isc_rxd_pkt_get(void *sc, if_rxd_info_t ri);
62 
63 static int bnxt_intr(void *sc);
64 
65 struct if_txrx bnxt_txrx  = {
66 	.ift_txd_encap = bnxt_isc_txd_encap,
67 	.ift_txd_flush = bnxt_isc_txd_flush,
68 	.ift_txd_credits_update = bnxt_isc_txd_credits_update,
69 	.ift_rxd_available = bnxt_isc_rxd_available,
70 	.ift_rxd_pkt_get = bnxt_isc_rxd_pkt_get,
71 	.ift_rxd_refill = bnxt_isc_rxd_refill,
72 	.ift_rxd_flush = bnxt_isc_rxd_flush,
73 	.ift_legacy_intr = bnxt_intr
74 };
75 
76 /*
77  * Device Dependent Packet Transmit and Receive Functions
78  */
79 
80 static const uint16_t bnxt_tx_lhint[] = {
81 	TX_BD_SHORT_FLAGS_LHINT_LT512,
82 	TX_BD_SHORT_FLAGS_LHINT_LT1K,
83 	TX_BD_SHORT_FLAGS_LHINT_LT2K,
84 	TX_BD_SHORT_FLAGS_LHINT_LT2K,
85 	TX_BD_SHORT_FLAGS_LHINT_GTE2K,
86 };
87 
88 static int
bnxt_isc_txd_encap(void * sc,if_pkt_info_t pi)89 bnxt_isc_txd_encap(void *sc, if_pkt_info_t pi)
90 {
91 	struct bnxt_softc *softc = (struct bnxt_softc *)sc;
92 	struct bnxt_ring *txr = &softc->tx_rings[pi->ipi_qsidx];
93 	struct tx_bd_long *tbd;
94 	struct tx_bd_long_hi *tbdh;
95 	bool need_hi = false;
96 	uint16_t flags_type;
97 	uint16_t lflags;
98 	uint32_t cfa_meta;
99 	int seg = 0;
100 	uint8_t wrap = 0;
101 
102 	/* If we have offloads enabled, we need to use two BDs. */
103 	if ((pi->ipi_csum_flags & (CSUM_OFFLOAD | CSUM_TSO | CSUM_IP)) ||
104 	    pi->ipi_mflags & M_VLANTAG)
105 		need_hi = true;
106 
107 	/* TODO: Devices before Cu+B1 need to not mix long and short BDs */
108 	need_hi = true;
109 
110 	pi->ipi_new_pidx = pi->ipi_pidx;
111 	tbd = &((struct tx_bd_long *)txr->vaddr)[pi->ipi_new_pidx];
112 	pi->ipi_ndescs = 0;
113 	/* No need to byte-swap the opaque value */
114 	tbd->opaque = ((pi->ipi_nsegs + need_hi) << 24) | pi->ipi_new_pidx;
115 	tbd->len = htole16(pi->ipi_segs[seg].ds_len);
116 	tbd->addr = htole64(pi->ipi_segs[seg++].ds_addr);
117 	flags_type = ((pi->ipi_nsegs + need_hi) <<
118 	    TX_BD_SHORT_FLAGS_BD_CNT_SFT) & TX_BD_SHORT_FLAGS_BD_CNT_MASK;
119 	if (pi->ipi_len >= 2048)
120 		flags_type |= TX_BD_SHORT_FLAGS_LHINT_GTE2K;
121 	else
122 		flags_type |= bnxt_tx_lhint[pi->ipi_len >> 9];
123 
124 	if (need_hi) {
125 		flags_type |= TX_BD_LONG_TYPE_TX_BD_LONG;
126 
127 		/* Handle wrapping */
128 		if (pi->ipi_new_pidx == txr->ring_size - 1)
129 			wrap = 1;
130 
131 		pi->ipi_new_pidx = RING_NEXT(txr, pi->ipi_new_pidx);
132 
133 		/* Toggle epoch bit on wrap */
134 		if (wrap && pi->ipi_new_pidx == 0)
135 			txr->epoch_bit = !txr->epoch_bit;
136 		if (pi->ipi_new_pidx < EPOCH_ARR_SZ)
137 			txr->epoch_arr[pi->ipi_new_pidx] = txr->epoch_bit;
138 
139 		tbdh = &((struct tx_bd_long_hi *)txr->vaddr)[pi->ipi_new_pidx];
140 		tbdh->kid_or_ts_high_mss = htole16(pi->ipi_tso_segsz);
141 		tbdh->kid_or_ts_low_hdr_size = htole16((pi->ipi_ehdrlen + pi->ipi_ip_hlen +
142 		    pi->ipi_tcp_hlen) >> 1);
143 		tbdh->cfa_action = 0;
144 		lflags = 0;
145 		cfa_meta = 0;
146 		if (pi->ipi_mflags & M_VLANTAG) {
147 			/* TODO: Do we need to byte-swap the vtag here? */
148 			cfa_meta = TX_BD_LONG_CFA_META_KEY_VLAN_TAG |
149 			    pi->ipi_vtag;
150 			cfa_meta |= TX_BD_LONG_CFA_META_VLAN_TPID_TPID8100;
151 		}
152 		tbdh->cfa_meta = htole32(cfa_meta);
153 		if (pi->ipi_csum_flags & CSUM_TSO) {
154 			lflags |= TX_BD_LONG_LFLAGS_LSO |
155 			    TX_BD_LONG_LFLAGS_T_IPID;
156 		}
157 		else if(pi->ipi_csum_flags & CSUM_OFFLOAD) {
158 			lflags |= TX_BD_LONG_LFLAGS_TCP_UDP_CHKSUM |
159 			    TX_BD_LONG_LFLAGS_IP_CHKSUM;
160 		}
161 		else if(pi->ipi_csum_flags & CSUM_IP) {
162 			lflags |= TX_BD_LONG_LFLAGS_IP_CHKSUM;
163 		}
164 		tbdh->lflags = htole16(lflags);
165 	}
166 	else {
167 		flags_type |= TX_BD_SHORT_TYPE_TX_BD_SHORT;
168 	}
169 
170 	for (; seg < pi->ipi_nsegs; seg++) {
171 		tbd->flags_type = htole16(flags_type);
172 
173 		if (pi->ipi_new_pidx == txr->ring_size - 1)
174 			wrap = 1;
175 		pi->ipi_new_pidx = RING_NEXT(txr, pi->ipi_new_pidx);
176 		if (wrap && pi->ipi_new_pidx == 0)
177 			txr->epoch_bit = !txr->epoch_bit;
178 		if (pi->ipi_new_pidx < EPOCH_ARR_SZ)
179 			txr->epoch_arr[pi->ipi_new_pidx] = txr->epoch_bit;
180 
181 		tbd = &((struct tx_bd_long *)txr->vaddr)[pi->ipi_new_pidx];
182 		tbd->len = htole16(pi->ipi_segs[seg].ds_len);
183 		tbd->addr = htole64(pi->ipi_segs[seg].ds_addr);
184 		flags_type = TX_BD_SHORT_TYPE_TX_BD_SHORT;
185 	}
186 	flags_type |= TX_BD_SHORT_FLAGS_PACKET_END;
187 	tbd->flags_type = htole16(flags_type);
188 	if (pi->ipi_new_pidx == txr->ring_size - 1)
189 		wrap = 1;
190 	pi->ipi_new_pidx = RING_NEXT(txr, pi->ipi_new_pidx);
191 	if (wrap && pi->ipi_new_pidx == 0)
192 		txr->epoch_bit = !txr->epoch_bit;
193 	if (pi->ipi_new_pidx < EPOCH_ARR_SZ)
194 		txr->epoch_arr[pi->ipi_new_pidx] = txr->epoch_bit;
195 
196 	return 0;
197 }
198 
199 static void
bnxt_isc_txd_flush(void * sc,uint16_t txqid,qidx_t pidx)200 bnxt_isc_txd_flush(void *sc, uint16_t txqid, qidx_t pidx)
201 {
202 	struct bnxt_softc *softc = (struct bnxt_softc *)sc;
203 	struct bnxt_ring *tx_ring = &softc->tx_rings[txqid];
204 
205 	/* pidx is what we last set ipi_new_pidx to */
206 	softc->db_ops.bnxt_db_tx(tx_ring, pidx);
207 	return;
208 }
209 
210 static int
bnxt_isc_txd_credits_update(void * sc,uint16_t txqid,bool clear)211 bnxt_isc_txd_credits_update(void *sc, uint16_t txqid, bool clear)
212 {
213 	struct bnxt_softc *softc = (struct bnxt_softc *)sc;
214 	struct bnxt_cp_ring *cpr = &softc->tx_cp_rings[txqid];
215 	struct tx_cmpl *cmpl = (struct tx_cmpl *)cpr->ring.vaddr;
216 	int avail = 0;
217 	uint32_t cons = cpr->cons;
218 	uint32_t raw_cons = cpr->raw_cons;
219 	bool v_bit = cpr->v_bit;
220 	bool last_v_bit;
221 	uint32_t last_cons;
222 	uint32_t last_raw_cons;
223 	uint16_t type;
224 	uint16_t err;
225 
226 	for (;;) {
227 		last_cons = cons;
228 		last_raw_cons = raw_cons;
229 		last_v_bit = v_bit;
230 
231 		NEXT_CP_CONS_V(&cpr->ring, cons, v_bit);
232 		raw_cons++;
233 		CMPL_PREFETCH_NEXT(cpr, cons);
234 
235 		if (!CMP_VALID(&cmpl[cons], v_bit))
236 			goto done;
237 
238 		type = cmpl[cons].flags_type & TX_CMPL_TYPE_MASK;
239 		switch (type) {
240 		case TX_CMPL_TYPE_TX_L2:
241 			err = (le16toh(cmpl[cons].errors_v) &
242 			    TX_CMPL_ERRORS_BUFFER_ERROR_MASK) >>
243 			    TX_CMPL_ERRORS_BUFFER_ERROR_SFT;
244 			if (err)
245 				device_printf(softc->dev,
246 				    "TX completion error %u\n", err);
247 			/* No need to byte-swap the opaque value */
248 			avail += cmpl[cons].opaque >> 24;
249 			/*
250 			 * If we're not clearing, iflib only cares if there's
251 			 * at least one buffer.  Don't scan the whole ring in
252 			 * this case.
253 			 */
254 			if (!clear)
255 				goto done;
256 			break;
257 		default:
258 			if (type & 1) {
259 				NEXT_CP_CONS_V(&cpr->ring, cons, v_bit);
260 				raw_cons++;
261 				if (!CMP_VALID(&cmpl[cons], v_bit)) {
262 					goto done;
263 				}
264 			}
265 			device_printf(softc->dev,
266 			    "Unhandled TX completion type %u\n", type);
267 			break;
268 		}
269 	}
270 done:
271 
272 	if (clear && avail) {
273 		cpr->cons = last_cons;
274 		cpr->raw_cons = last_raw_cons;
275 		cpr->v_bit = last_v_bit;
276 		softc->db_ops.bnxt_db_tx_cq(cpr, 0);
277 	}
278 
279 	return avail;
280 }
281 
282 static void
bnxt_isc_rxd_refill(void * sc,if_rxd_update_t iru)283 bnxt_isc_rxd_refill(void *sc, if_rxd_update_t iru)
284 {
285 	struct bnxt_softc *softc = (struct bnxt_softc *)sc;
286 	struct bnxt_ring *rx_ring;
287 	struct rx_prod_pkt_bd *rxbd;
288 	uint16_t type;
289 	uint16_t i;
290 	uint16_t rxqid;
291 	uint16_t count;
292 	uint32_t pidx;
293 	uint8_t flid;
294 	uint64_t *paddrs;
295 	qidx_t	*frag_idxs;
296 
297 	rxqid = iru->iru_qsidx;
298 	count = iru->iru_count;
299 	pidx = iru->iru_pidx;
300 	flid = iru->iru_flidx;
301 	paddrs = iru->iru_paddrs;
302 	frag_idxs = iru->iru_idxs;
303 
304 	if (flid == 0) {
305 		rx_ring = &softc->rx_rings[rxqid];
306 		type = RX_PROD_PKT_BD_TYPE_RX_PROD_PKT;
307 	}
308 	else {
309 		rx_ring = &softc->ag_rings[rxqid];
310 		type = RX_PROD_AGG_BD_TYPE_RX_PROD_AGG;
311 	}
312 	rxbd = (void *)rx_ring->vaddr;
313 
314 	for (i=0; i<count; i++) {
315 		rxbd[pidx].flags_type = htole16(type);
316 		rxbd[pidx].len = htole16(softc->rx_buf_size);
317 		/* No need to byte-swap the opaque value */
318 		rxbd[pidx].opaque = (((rxqid & 0xff) << 24) | (flid << 16)
319 		    | (frag_idxs[i]));
320 		rxbd[pidx].addr = htole64(paddrs[i]);
321 
322 		/* Increment pidx and handle wrap-around */
323 		if (++pidx == rx_ring->ring_size) {
324 			pidx = 0;
325 			rx_ring->epoch_bit = !rx_ring->epoch_bit;
326 		}
327 		if (pidx < EPOCH_ARR_SZ)
328 			rx_ring->epoch_arr[pidx] = rx_ring->epoch_bit;
329 	}
330 
331 	return;
332 }
333 
334 static void
bnxt_isc_rxd_flush(void * sc,uint16_t rxqid,uint8_t flid,qidx_t pidx)335 bnxt_isc_rxd_flush(void *sc, uint16_t rxqid, uint8_t flid,
336     qidx_t pidx)
337 {
338 	struct bnxt_softc *softc = (struct bnxt_softc *)sc;
339 	struct bnxt_ring *rx_ring;
340 
341 	if (flid == 0)
342 		rx_ring = &softc->rx_rings[rxqid];
343 	else
344 		rx_ring = &softc->ag_rings[rxqid];
345 
346 	/*
347 	 * We *must* update the completion ring before updating the RX ring
348 	 * or we will overrun the completion ring and the device will wedge for
349 	 * RX.
350 	 */
351 	softc->db_ops.bnxt_db_rx_cq(&softc->rx_cp_rings[rxqid], 0);
352 	softc->db_ops.bnxt_db_rx(rx_ring, pidx);
353 	return;
354 }
355 
356 static int
bnxt_isc_rxd_available(void * sc,uint16_t rxqid,qidx_t idx,qidx_t budget)357 bnxt_isc_rxd_available(void *sc, uint16_t rxqid, qidx_t idx, qidx_t budget)
358 {
359 	struct bnxt_softc *softc = (struct bnxt_softc *)sc;
360 	struct bnxt_cp_ring *cpr = &softc->rx_cp_rings[rxqid];
361 	struct rx_pkt_cmpl *rcp;
362 	struct rx_tpa_end_cmpl *rtpae;
363 	struct cmpl_base *cmp = (struct cmpl_base *)cpr->ring.vaddr;
364 	int avail = 0;
365 	uint32_t cons = cpr->cons;
366 	bool v_bit = cpr->v_bit;
367 	uint8_t ags;
368 	int i;
369 	uint16_t type;
370 
371 	for (;;) {
372 		NEXT_CP_CONS_V(&cpr->ring, cons, v_bit);
373 		CMPL_PREFETCH_NEXT(cpr, cons);
374 
375 		if (!CMP_VALID(&cmp[cons], v_bit))
376 			goto cmpl_invalid;
377 
378 		type = le16toh(cmp[cons].type) & CMPL_BASE_TYPE_MASK;
379 		switch (type) {
380 		case CMPL_BASE_TYPE_RX_L2:
381 		case CMPL_BASE_TYPE_RX_L2_V3:
382 			rcp = (void *)&cmp[cons];
383 			ags = (rcp->agg_bufs_v1 & RX_PKT_CMPL_AGG_BUFS_MASK) >>
384 			    RX_PKT_CMPL_AGG_BUFS_SFT;
385 			NEXT_CP_CONS_V(&cpr->ring, cons, v_bit);
386 			CMPL_PREFETCH_NEXT(cpr, cons);
387 
388 			if (!CMP_VALID(&cmp[cons], v_bit))
389 				goto cmpl_invalid;
390 
391 			/* Now account for all the AG completions */
392 			for (i=0; i<ags; i++) {
393 				NEXT_CP_CONS_V(&cpr->ring, cons, v_bit);
394 				CMPL_PREFETCH_NEXT(cpr, cons);
395 				if (!CMP_VALID(&cmp[cons], v_bit))
396 					goto cmpl_invalid;
397 			}
398 			avail++;
399 			break;
400 		case CMPL_BASE_TYPE_RX_TPA_END:
401 			rtpae = (void *)&cmp[cons];
402 			ags = (rtpae->agg_bufs_v1 &
403 			    RX_TPA_END_CMPL_AGG_BUFS_MASK) >>
404 			    RX_TPA_END_CMPL_AGG_BUFS_SFT;
405 			NEXT_CP_CONS_V(&cpr->ring, cons, v_bit);
406 			CMPL_PREFETCH_NEXT(cpr, cons);
407 
408 			if (!CMP_VALID(&cmp[cons], v_bit))
409 				goto cmpl_invalid;
410 			/* Now account for all the AG completions */
411 			for (i=0; i<ags; i++) {
412 				NEXT_CP_CONS_V(&cpr->ring, cons, v_bit);
413 				CMPL_PREFETCH_NEXT(cpr, cons);
414 				if (!CMP_VALID(&cmp[cons], v_bit))
415 					goto cmpl_invalid;
416 			}
417 			avail++;
418 			break;
419 		case CMPL_BASE_TYPE_RX_TPA_START:
420 			NEXT_CP_CONS_V(&cpr->ring, cons, v_bit);
421 			CMPL_PREFETCH_NEXT(cpr, cons);
422 
423 			if (!CMP_VALID(&cmp[cons], v_bit))
424 				goto cmpl_invalid;
425 			break;
426 		case CMPL_BASE_TYPE_RX_AGG:
427 			break;
428 		default:
429 			device_printf(softc->dev,
430 			    "Unhandled completion type %d on RXQ %d\n",
431 			    type, rxqid);
432 
433 			/* Odd completion types use two completions */
434 			if (type & 1) {
435 				NEXT_CP_CONS_V(&cpr->ring, cons, v_bit);
436 				CMPL_PREFETCH_NEXT(cpr, cons);
437 
438 				if (!CMP_VALID(&cmp[cons], v_bit))
439 					goto cmpl_invalid;
440 			}
441 			break;
442 		}
443 		if (avail > budget)
444 			break;
445 	}
446 cmpl_invalid:
447 
448 	return avail;
449 }
450 
451 static void
bnxt_set_rsstype(if_rxd_info_t ri,uint8_t rss_hash_type)452 bnxt_set_rsstype(if_rxd_info_t ri, uint8_t rss_hash_type)
453 {
454 	uint8_t rss_profile_id;
455 
456 	rss_profile_id = BNXT_GET_RSS_PROFILE_ID(rss_hash_type);
457 	switch (rss_profile_id) {
458 	case BNXT_RSS_HASH_TYPE_TCPV4:
459 		ri->iri_rsstype = M_HASHTYPE_RSS_TCP_IPV4;
460 		break;
461 	case BNXT_RSS_HASH_TYPE_UDPV4:
462 		ri->iri_rsstype = M_HASHTYPE_RSS_UDP_IPV4;
463 		break;
464 	case BNXT_RSS_HASH_TYPE_IPV4:
465 		ri->iri_rsstype = M_HASHTYPE_RSS_IPV4;
466 		break;
467 	case BNXT_RSS_HASH_TYPE_TCPV6:
468 		ri->iri_rsstype = M_HASHTYPE_RSS_TCP_IPV6;
469 		break;
470 	case BNXT_RSS_HASH_TYPE_UDPV6:
471 		ri->iri_rsstype = M_HASHTYPE_RSS_UDP_IPV6;
472 		break;
473 	case BNXT_RSS_HASH_TYPE_IPV6:
474 		ri->iri_rsstype = M_HASHTYPE_RSS_IPV6;
475 		break;
476 	default:
477 		ri->iri_rsstype = M_HASHTYPE_OPAQUE_HASH;
478 		break;
479 	}
480 }
481 
482 static int
bnxt_pkt_get_l2(struct bnxt_softc * softc,if_rxd_info_t ri,struct bnxt_cp_ring * cpr,uint16_t flags_type)483 bnxt_pkt_get_l2(struct bnxt_softc *softc, if_rxd_info_t ri,
484     struct bnxt_cp_ring *cpr, uint16_t flags_type)
485 {
486 	struct rx_pkt_cmpl *rcp;
487 	struct rx_pkt_cmpl_hi *rcph;
488 	struct rx_abuf_cmpl *acp;
489 	uint32_t flags2;
490 	uint32_t errors;
491 	uint8_t	ags;
492 	int i;
493 
494 	rcp = &((struct rx_pkt_cmpl *)cpr->ring.vaddr)[cpr->cons];
495 
496 	/* Extract from the first 16-byte BD */
497 	if (flags_type & RX_PKT_CMPL_FLAGS_RSS_VALID) {
498 		ri->iri_flowid = le32toh(rcp->rss_hash);
499 		bnxt_set_rsstype(ri, rcp->rss_hash_type);
500 	}
501 	else {
502 		ri->iri_rsstype = M_HASHTYPE_NONE;
503 	}
504 	ags = (rcp->agg_bufs_v1 & RX_PKT_CMPL_AGG_BUFS_MASK) >>
505 	    RX_PKT_CMPL_AGG_BUFS_SFT;
506 	ri->iri_nfrags = ags + 1;
507 	/* No need to byte-swap the opaque value */
508 	ri->iri_frags[0].irf_flid = (rcp->opaque >> 16) & 0xff;
509 	ri->iri_frags[0].irf_idx = rcp->opaque & 0xffff;
510 	ri->iri_frags[0].irf_len = le16toh(rcp->len);
511 	ri->iri_len = le16toh(rcp->len);
512 
513 	/* Now the second 16-byte BD */
514 	NEXT_CP_CONS_V(&cpr->ring, cpr->cons, cpr->v_bit);
515 	cpr->raw_cons++;
516 	ri->iri_cidx = RING_NEXT(&cpr->ring, ri->iri_cidx);
517 	rcph = &((struct rx_pkt_cmpl_hi *)cpr->ring.vaddr)[cpr->cons];
518 
519 	flags2 = le32toh(rcph->flags2);
520 	errors = le16toh(rcph->errors_v2);
521 	if ((flags2 & RX_PKT_CMPL_FLAGS2_META_FORMAT_MASK) ==
522 	    RX_PKT_CMPL_FLAGS2_META_FORMAT_VLAN) {
523 		ri->iri_flags |= M_VLANTAG;
524 		/* TODO: Should this be the entire 16-bits? */
525 		ri->iri_vtag = le32toh(rcph->metadata) &
526 		    (RX_PKT_CMPL_METADATA_VID_MASK | RX_PKT_CMPL_METADATA_DE |
527 		    RX_PKT_CMPL_METADATA_PRI_MASK);
528 	}
529 	if (flags2 & RX_PKT_CMPL_FLAGS2_IP_CS_CALC) {
530 		ri->iri_csum_flags |= CSUM_IP_CHECKED;
531 		if (!(errors & RX_PKT_CMPL_ERRORS_IP_CS_ERROR))
532 			ri->iri_csum_flags |= CSUM_IP_VALID;
533 	}
534 	if (flags2 & (RX_PKT_CMPL_FLAGS2_L4_CS_CALC |
535 		      RX_PKT_CMPL_FLAGS2_T_L4_CS_CALC)) {
536 		ri->iri_csum_flags |= CSUM_L4_CALC;
537 		if (!(errors & (RX_PKT_CMPL_ERRORS_L4_CS_ERROR |
538 				RX_PKT_CMPL_ERRORS_T_L4_CS_ERROR))) {
539 			ri->iri_csum_flags |= CSUM_L4_VALID;
540 			ri->iri_csum_data = 0xffff;
541 		}
542 	}
543 
544 	/* And finally the ag ring stuff. */
545 	for (i=1; i < ri->iri_nfrags; i++) {
546 		NEXT_CP_CONS_V(&cpr->ring, cpr->cons, cpr->v_bit);
547 		cpr->raw_cons++;
548 		ri->iri_cidx = RING_NEXT(&cpr->ring, ri->iri_cidx);
549 		acp = &((struct rx_abuf_cmpl *)cpr->ring.vaddr)[cpr->cons];
550 
551 		/* No need to byte-swap the opaque value */
552 		ri->iri_frags[i].irf_flid = (acp->opaque >> 16 & 0xff);
553 		ri->iri_frags[i].irf_idx = acp->opaque & 0xffff;
554 		ri->iri_frags[i].irf_len = le16toh(acp->len);
555 		ri->iri_len += le16toh(acp->len);
556 	}
557 
558 	return 0;
559 }
560 
561 static int
bnxt_pkt_get_tpa(struct bnxt_softc * softc,if_rxd_info_t ri,struct bnxt_cp_ring * cpr,uint16_t flags_type)562 bnxt_pkt_get_tpa(struct bnxt_softc *softc, if_rxd_info_t ri,
563     struct bnxt_cp_ring *cpr, uint16_t flags_type)
564 {
565 	struct rx_tpa_end_cmpl *agend =
566 	    &((struct rx_tpa_end_cmpl *)cpr->ring.vaddr)[cpr->cons];
567 	struct rx_abuf_cmpl *acp;
568 	struct bnxt_full_tpa_start *tpas;
569 	uint32_t flags2;
570 	uint8_t	ags;
571 	uint8_t agg_id;
572 	int i;
573 
574 	/* Get the agg_id */
575 	agg_id = (agend->agg_id & RX_TPA_END_CMPL_AGG_ID_MASK) >>
576 	    RX_TPA_END_CMPL_AGG_ID_SFT;
577 	tpas = &(softc->rx_rings[ri->iri_qsidx].tpa_start[agg_id]);
578 
579 	/* Extract from the first 16-byte BD */
580 	if (le16toh(tpas->low.flags_type) & RX_TPA_START_CMPL_FLAGS_RSS_VALID) {
581 		ri->iri_flowid = le32toh(tpas->low.rss_hash);
582 		bnxt_set_rsstype(ri, tpas->low.rss_hash_type);
583 	}
584 	else {
585 		ri->iri_rsstype = M_HASHTYPE_NONE;
586 	}
587 	ags = (agend->agg_bufs_v1 & RX_TPA_END_CMPL_AGG_BUFS_MASK) >>
588 	    RX_TPA_END_CMPL_AGG_BUFS_SFT;
589 	ri->iri_nfrags = ags + 1;
590 	/* No need to byte-swap the opaque value */
591 	ri->iri_frags[0].irf_flid = ((tpas->low.opaque >> 16) & 0xff);
592 	ri->iri_frags[0].irf_idx = (tpas->low.opaque & 0xffff);
593 	ri->iri_frags[0].irf_len = le16toh(tpas->low.len);
594 	ri->iri_len = le16toh(tpas->low.len);
595 
596 	/* Now the second 16-byte BD */
597 	NEXT_CP_CONS_V(&cpr->ring, cpr->cons, cpr->v_bit);
598 	cpr->raw_cons++;
599 	ri->iri_cidx = RING_NEXT(&cpr->ring, ri->iri_cidx);
600 
601 	flags2 = le32toh(tpas->high.flags2);
602 	if ((flags2 & RX_TPA_START_CMPL_FLAGS2_META_FORMAT_MASK) ==
603 	    RX_TPA_START_CMPL_FLAGS2_META_FORMAT_VLAN) {
604 		ri->iri_flags |= M_VLANTAG;
605 		/* TODO: Should this be the entire 16-bits? */
606 		ri->iri_vtag = le32toh(tpas->high.metadata) &
607 		    (RX_TPA_START_CMPL_METADATA_VID_MASK |
608 		    RX_TPA_START_CMPL_METADATA_DE |
609 		    RX_TPA_START_CMPL_METADATA_PRI_MASK);
610 	}
611 	if (flags2 & RX_TPA_START_CMPL_FLAGS2_IP_CS_CALC) {
612 		ri->iri_csum_flags |= CSUM_IP_CHECKED;
613 		ri->iri_csum_flags |= CSUM_IP_VALID;
614 	}
615 	if (flags2 & RX_TPA_START_CMPL_FLAGS2_L4_CS_CALC) {
616 		ri->iri_csum_flags |= CSUM_L4_CALC;
617 		ri->iri_csum_flags |= CSUM_L4_VALID;
618 		ri->iri_csum_data = 0xffff;
619 	}
620 
621 	/* Now the ag ring stuff. */
622 	for (i=1; i < ri->iri_nfrags; i++) {
623 		NEXT_CP_CONS_V(&cpr->ring, cpr->cons, cpr->v_bit);
624 		cpr->raw_cons++;
625 		ri->iri_cidx = RING_NEXT(&cpr->ring, ri->iri_cidx);
626 		acp = &((struct rx_abuf_cmpl *)cpr->ring.vaddr)[cpr->cons];
627 
628 		/* No need to byte-swap the opaque value */
629 		ri->iri_frags[i].irf_flid = ((acp->opaque >> 16) & 0xff);
630 		ri->iri_frags[i].irf_idx = (acp->opaque & 0xffff);
631 		ri->iri_frags[i].irf_len = le16toh(acp->len);
632 		ri->iri_len += le16toh(acp->len);
633 	}
634 
635 	/* And finally, the empty BD at the end... */
636 	ri->iri_nfrags++;
637 	/* No need to byte-swap the opaque value */
638 	ri->iri_frags[i].irf_flid = ((agend->opaque >> 16) & 0xff);
639 	ri->iri_frags[i].irf_idx = (agend->opaque & 0xffff);
640 	ri->iri_frags[i].irf_len = le16toh(agend->len);
641 	ri->iri_len += le16toh(agend->len);
642 
643 	return 0;
644 }
645 
646 /* If we return anything but zero, iflib will assert... */
647 static int
bnxt_isc_rxd_pkt_get(void * sc,if_rxd_info_t ri)648 bnxt_isc_rxd_pkt_get(void *sc, if_rxd_info_t ri)
649 {
650 	struct bnxt_softc *softc = (struct bnxt_softc *)sc;
651 	struct bnxt_cp_ring *cpr = &softc->rx_cp_rings[ri->iri_qsidx];
652 	struct cmpl_base *cmp_q = (struct cmpl_base *)cpr->ring.vaddr;
653 	struct cmpl_base *cmp;
654 	struct rx_tpa_start_cmpl *rtpa;
655 	uint16_t flags_type;
656 	uint16_t type;
657 	uint8_t agg_id;
658 
659 	for (;;) {
660 		NEXT_CP_CONS_V(&cpr->ring, cpr->cons, cpr->v_bit);
661 		cpr->raw_cons++;
662 		ri->iri_cidx = RING_NEXT(&cpr->ring, ri->iri_cidx);
663 		CMPL_PREFETCH_NEXT(cpr, cpr->cons);
664 		cmp = &((struct cmpl_base *)cpr->ring.vaddr)[cpr->cons];
665 
666 		flags_type = le16toh(cmp->type);
667 		type = flags_type & CMPL_BASE_TYPE_MASK;
668 
669 		switch (type) {
670 		case CMPL_BASE_TYPE_RX_L2:
671 		case CMPL_BASE_TYPE_RX_L2_V3:
672 			return bnxt_pkt_get_l2(softc, ri, cpr, flags_type);
673 		case CMPL_BASE_TYPE_RX_TPA_END:
674 			return bnxt_pkt_get_tpa(softc, ri, cpr, flags_type);
675 		case CMPL_BASE_TYPE_RX_TPA_START:
676 		case CMPL_BASE_TYPE_RX_TPA_START_V3:
677 			rtpa = (void *)&cmp_q[cpr->cons];
678 			agg_id = (rtpa->agg_id &
679 			    RX_TPA_START_CMPL_AGG_ID_MASK) >>
680 			    RX_TPA_START_CMPL_AGG_ID_SFT;
681 			softc->rx_rings[ri->iri_qsidx].tpa_start[agg_id].low = *rtpa;
682 
683 			NEXT_CP_CONS_V(&cpr->ring, cpr->cons, cpr->v_bit);
684 			cpr->raw_cons++;
685 			ri->iri_cidx = RING_NEXT(&cpr->ring, ri->iri_cidx);
686 			CMPL_PREFETCH_NEXT(cpr, cpr->cons);
687 
688 			softc->rx_rings[ri->iri_qsidx].tpa_start[agg_id].high =
689 			    ((struct rx_tpa_start_cmpl_hi *)cmp_q)[cpr->cons];
690 			break;
691 		default:
692 			device_printf(softc->dev,
693 			    "Unhandled completion type %d on RXQ %d get\n",
694 			    type, ri->iri_qsidx);
695 			if (type & 1) {
696 				NEXT_CP_CONS_V(&cpr->ring, cpr->cons,
697 				    cpr->v_bit);
698 				cpr->raw_cons++;
699 				ri->iri_cidx = RING_NEXT(&cpr->ring,
700 				    ri->iri_cidx);
701 				CMPL_PREFETCH_NEXT(cpr, cpr->cons);
702 			}
703 			break;
704 		}
705 	}
706 
707 	return 0;
708 }
709 
710 static int
bnxt_intr(void * sc)711 bnxt_intr(void *sc)
712 {
713 	struct bnxt_softc *softc = (struct bnxt_softc *)sc;
714 
715 	device_printf(softc->dev, "STUB: %s @ %s:%d\n", __func__, __FILE__, __LINE__);
716 	return ENOSYS;
717 }
718