xref: /freebsd/sys/dev/e1000/em_txrx.c (revision 09606165a0ab82291c6443d5c82b205256c56458)
1 /*-
2  * Copyright (c) 2016 Matt Macy <mmacy@nextbsd.org>
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer.
10  * 2. Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in the
12  *    documentation and/or other materials provided with the distribution.
13  *
14  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24  * SUCH DAMAGE.
25  */
26 
27 /* $FreeBSD$ */
28 #include "if_em.h"
29 
30 #ifdef	RSS
31 #include <net/rss_config.h>
32 #include <netinet/in_rss.h>
33 #endif
34 
35 #ifdef VERBOSE_DEBUG
36 #define DPRINTF device_printf
37 #else
38 #define DPRINTF(...)
39 #endif
40 
41 /*********************************************************************
42  *  Local Function prototypes
43  *********************************************************************/
44 static int em_tso_setup(struct adapter *adapter, if_pkt_info_t pi, u32 *txd_upper, u32 *txd_lower);
45 static int em_transmit_checksum_setup(struct adapter *adapter, if_pkt_info_t pi, u32 *txd_upper, u32 *txd_lower);
46 static int em_isc_txd_encap(void *arg, if_pkt_info_t pi);
47 static void em_isc_txd_flush(void *arg, uint16_t txqid, uint32_t pidx);
48 static int em_isc_txd_credits_update(void *arg, uint16_t txqid, uint32_t cidx_init, bool clear);
49 static void em_isc_rxd_refill(void *arg, uint16_t rxqid, uint8_t flid __unused,
50 			      uint32_t pidx, uint64_t *paddrs, caddr_t *vaddrs __unused, uint16_t count, uint16_t buflen __unused);
51 static void em_isc_rxd_flush(void *arg, uint16_t rxqid, uint8_t flid __unused, uint32_t pidx);
52 static int em_isc_rxd_available(void *arg, uint16_t rxqid, uint32_t idx,
53 				int budget);
54 static int em_isc_rxd_pkt_get(void *arg, if_rxd_info_t ri);
55 
56 static void lem_isc_rxd_refill(void *arg, uint16_t rxqid, uint8_t flid __unused,
57 			      uint32_t pidx, uint64_t *paddrs, caddr_t *vaddrs __unused, uint16_t count, uint16_t buflen __unused);
58 
59 static int lem_isc_rxd_available(void *arg, uint16_t rxqid, uint32_t idx,
60 				int budget);
61 static int lem_isc_rxd_pkt_get(void *arg, if_rxd_info_t ri);
62 
63 static void lem_receive_checksum(int status, int errors, if_rxd_info_t ri);
64 static void em_receive_checksum(uint32_t status, if_rxd_info_t ri);
65 extern int em_intr(void *arg);
66 
67 struct if_txrx em_txrx  = {
68 	em_isc_txd_encap,
69 	em_isc_txd_flush,
70 	em_isc_txd_credits_update,
71 	em_isc_rxd_available,
72 	em_isc_rxd_pkt_get,
73 	em_isc_rxd_refill,
74 	em_isc_rxd_flush,
75 	em_intr
76 };
77 
78 struct if_txrx lem_txrx  = {
79 	em_isc_txd_encap,
80 	em_isc_txd_flush,
81 	em_isc_txd_credits_update,
82 	lem_isc_rxd_available,
83 	lem_isc_rxd_pkt_get,
84 	lem_isc_rxd_refill,
85 	em_isc_rxd_flush,
86 	em_intr
87 };
88 
89 extern if_shared_ctx_t em_sctx;
90 
91 /**********************************************************************
92  *
93  *  Setup work for hardware segmentation offload (TSO) on
94  *  adapters using advanced tx descriptors
95  *
96  **********************************************************************/
97 static int
98 em_tso_setup(struct adapter *adapter, if_pkt_info_t pi, u32 *txd_upper, u32 *txd_lower)
99 {
100 	if_softc_ctx_t scctx = adapter->shared;
101         struct em_tx_queue *que = &adapter->tx_queues[pi->ipi_qsidx];
102         struct tx_ring *txr = &que->txr;
103 	struct e1000_context_desc *TXD;
104 	struct em_txbuffer  *tx_buffer;
105         int cur, hdr_len;
106 
107 	hdr_len = pi->ipi_ehdrlen + pi->ipi_ip_hlen + pi->ipi_tcp_hlen;
108   	*txd_lower = (E1000_TXD_CMD_DEXT |	/* Extended descr type */
109 		      E1000_TXD_DTYP_D |	/* Data descr type */
110 		      E1000_TXD_CMD_TSE);	/* Do TSE on this packet */
111 
112 	/* IP and/or TCP header checksum calculation and insertion. */
113 	*txd_upper = (E1000_TXD_POPTS_IXSM | E1000_TXD_POPTS_TXSM) << 8;
114 
115 	cur = pi->ipi_pidx;
116         TXD = (struct e1000_context_desc *)&txr->tx_base[cur];
117         tx_buffer = &txr->tx_buffers[cur];
118 
119 	 /*
120 	 * Start offset for header checksum calculation.
121 	 * End offset for header checksum calculation.
122 	 * Offset of place put the checksum.
123 	 */
124 	TXD->lower_setup.ip_fields.ipcss = pi->ipi_ehdrlen;
125 	TXD->lower_setup.ip_fields.ipcse =
126 	    htole16(pi->ipi_ehdrlen + pi->ipi_ip_hlen - 1);
127 	TXD->lower_setup.ip_fields.ipcso = pi->ipi_ehdrlen + offsetof(struct ip, ip_sum);
128 
129          /*
130 	 * Start offset for payload checksum calculation.
131 	 * End offset for payload checksum calculation.
132 	 * Offset of place to put the checksum.
133 	 */
134 	TXD->upper_setup.tcp_fields.tucss = pi->ipi_ehdrlen + pi->ipi_ip_hlen;
135 	TXD->upper_setup.tcp_fields.tucse = 0;
136 	TXD->upper_setup.tcp_fields.tucso =
137 	    pi->ipi_ehdrlen + pi->ipi_ip_hlen + offsetof(struct tcphdr, th_sum);
138 
139          /*
140 	 * Payload size per packet w/o any headers.
141 	 * Length of all headers up to payload.
142 	 */
143 	TXD->tcp_seg_setup.fields.mss = htole16(pi->ipi_tso_segsz);
144 	TXD->tcp_seg_setup.fields.hdr_len = hdr_len;
145 
146 	TXD->cmd_and_length = htole32(adapter->txd_cmd |
147 				E1000_TXD_CMD_DEXT |	/* Extended descr */
148 				E1000_TXD_CMD_TSE |	/* TSE context */
149 				E1000_TXD_CMD_IP |	/* Do IP csum */
150 				E1000_TXD_CMD_TCP |	/* Do TCP checksum */
151 				      (pi->ipi_len - hdr_len)); /* Total len */
152 	tx_buffer->eop = -1;
153 	txr->tx_tso = TRUE;
154 
155 	if (++cur == scctx->isc_ntxd[0]) {
156 		cur = 0;
157 	}
158 	DPRINTF(iflib_get_dev(adapter->ctx), "%s: pidx: %d cur: %d\n", __FUNCTION__, pi->ipi_pidx, cur);
159 	return (cur);
160 }
161 
162 #define TSO_WORKAROUND 4
163 #define DONT_FORCE_CTX 1
164 
165 
166 /*********************************************************************
167  *  The offload context is protocol specific (TCP/UDP) and thus
168  *  only needs to be set when the protocol changes. The occasion
169  *  of a context change can be a performance detriment, and
170  *  might be better just disabled. The reason arises in the way
171  *  in which the controller supports pipelined requests from the
172  *  Tx data DMA. Up to four requests can be pipelined, and they may
173  *  belong to the same packet or to multiple packets. However all
174  *  requests for one packet are issued before a request is issued
175  *  for a subsequent packet and if a request for the next packet
176  *  requires a context change, that request will be stalled
177  *  until the previous request completes. This means setting up
178  *  a new context effectively disables pipelined Tx data DMA which
179  *  in turn greatly slow down performance to send small sized
180  *  frames.
181  **********************************************************************/
182 
183 static int
184 em_transmit_checksum_setup(struct adapter *adapter, if_pkt_info_t pi, u32 *txd_upper, u32 *txd_lower)
185 {
186         struct e1000_context_desc   *TXD = NULL;
187 	if_softc_ctx_t              scctx = adapter->shared;
188  	struct em_tx_queue          *que = &adapter->tx_queues[pi->ipi_qsidx];
189 	struct tx_ring              *txr = &que->txr;
190 	struct em_txbuffer          *tx_buffer;
191 	int                         csum_flags = pi->ipi_csum_flags;
192 	int                         cur, hdr_len;
193 	u32                         cmd;
194 
195 	cur = pi->ipi_pidx;
196 	hdr_len = pi->ipi_ehdrlen + pi->ipi_ip_hlen;
197 	cmd = adapter->txd_cmd;
198 
199 	/*
200 	 * The 82574L can only remember the *last* context used
201 	 * regardless of queue that it was use for.  We cannot reuse
202 	 * contexts on this hardware platform and must generate a new
203 	 * context every time.  82574L hardware spec, section 7.2.6,
204 	 * second note.
205 	 */
206 	if (DONT_FORCE_CTX &&
207 	    adapter->tx_num_queues == 1 &&
208 	    txr->csum_lhlen == pi->ipi_ehdrlen &&
209 	    txr->csum_iphlen == pi->ipi_ip_hlen &&
210 	    txr->csum_flags == csum_flags) {
211 		/*
212 		 * Same csum offload context as the previous packets;
213 		 * just return.
214 		 */
215 		*txd_upper = txr->csum_txd_upper;
216 		*txd_lower = txr->csum_txd_lower;
217 		return (cur);
218 	}
219 
220 	TXD = (struct e1000_context_desc *)&txr->tx_base[cur];
221 	if (csum_flags & CSUM_IP) {
222 	  	*txd_upper |= E1000_TXD_POPTS_IXSM << 8;
223 		/*
224 		 * Start offset for header checksum calculation.
225 		 * End offset for header checksum calculation.
226 		 * Offset of place to put the checksum.
227 		 */
228 		TXD->lower_setup.ip_fields.ipcss = pi->ipi_ehdrlen;
229 		TXD->lower_setup.ip_fields.ipcse = htole16(hdr_len);
230 		TXD->lower_setup.ip_fields.ipcso = pi->ipi_ehdrlen + offsetof(struct ip, ip_sum);
231 		cmd |= E1000_TXD_CMD_IP;
232 	}
233 
234 	if (csum_flags & (CSUM_TCP|CSUM_UDP)) {
235 		uint8_t tucso;
236 
237  		*txd_upper |= E1000_TXD_POPTS_TXSM << 8;
238 		*txd_lower = E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D;
239 
240 		if (csum_flags & CSUM_TCP) {
241 			tucso = hdr_len + offsetof(struct tcphdr, th_sum);
242 			cmd |= E1000_TXD_CMD_TCP;
243 		} else
244 			tucso = hdr_len + offsetof(struct udphdr, uh_sum);
245  		TXD->upper_setup.tcp_fields.tucss = hdr_len;
246  		TXD->upper_setup.tcp_fields.tucse = htole16(0);
247  		TXD->upper_setup.tcp_fields.tucso = tucso;
248 	}
249 
250 	txr->csum_lhlen = pi->ipi_ehdrlen;
251 	txr->csum_iphlen = pi->ipi_ip_hlen;
252 	txr->csum_flags = csum_flags;
253 	txr->csum_txd_upper = *txd_upper;
254 	txr->csum_txd_lower = *txd_lower;
255 
256 	TXD->tcp_seg_setup.data = htole32(0);
257 	TXD->cmd_and_length =
258 		htole32(E1000_TXD_CMD_IFCS | E1000_TXD_CMD_DEXT | cmd);
259 
260 	tx_buffer = &txr->tx_buffers[cur];
261 	tx_buffer->eop = -1;
262 
263 	if (++cur == scctx->isc_ntxd[0]) {
264 		cur = 0;
265 	}
266 	DPRINTF(iflib_get_dev(adapter->ctx), "checksum_setup csum_flags=%x txd_upper=%x txd_lower=%x hdr_len=%d cmd=%x\n",
267 		      csum_flags, *txd_upper, *txd_lower, hdr_len, cmd);
268 	return (cur);
269 }
270 
271 static int
272 em_isc_txd_encap(void *arg, if_pkt_info_t pi)
273 {
274         struct adapter *sc       = arg;
275 	if_softc_ctx_t scctx     = sc->shared;
276 	struct em_tx_queue *que  = &sc->tx_queues[pi->ipi_qsidx];
277 	struct tx_ring *txr      = &que->txr;
278 	bus_dma_segment_t *segs  = pi->ipi_segs;
279 	int nsegs                = pi->ipi_nsegs;
280 	int csum_flags           = pi->ipi_csum_flags;
281         int i, j, first, pidx_last;
282 	u32                     txd_upper = 0, txd_lower = 0;
283 
284 	struct em_txbuffer *tx_buffer;
285 	struct e1000_tx_desc *ctxd = NULL;
286 	bool do_tso, tso_desc;
287 
288 	i = first = pi->ipi_pidx;
289 	do_tso = (csum_flags & CSUM_TSO);
290 	tso_desc = FALSE;
291            /*
292 	 * TSO Hardware workaround, if this packet is not
293 	 * TSO, and is only a single descriptor long, and
294 	 * it follows a TSO burst, then we need to add a
295 	 * sentinel descriptor to prevent premature writeback.
296 	 */
297 	if ((!do_tso) && (txr->tx_tso == TRUE)) {
298 		if (nsegs == 1)
299 			tso_desc = TRUE;
300 		txr->tx_tso = FALSE;
301 	}
302 
303 	/* Do hardware assists */
304 	if (do_tso) {
305 		i = em_tso_setup(sc, pi, &txd_upper, &txd_lower);
306 		tso_desc = TRUE;
307 	} else if (csum_flags & EM_CSUM_OFFLOAD) {
308 		i = em_transmit_checksum_setup(sc, pi, &txd_upper, &txd_lower);
309 	}
310 
311 	if (pi->ipi_mflags & M_VLANTAG) {
312 	  /* Set the vlan id. */
313 		txd_upper |= htole16(pi->ipi_vtag) << 16;
314                 /* Tell hardware to add tag */
315                 txd_lower |= htole32(E1000_TXD_CMD_VLE);
316 	}
317 
318 	DPRINTF(iflib_get_dev(sc->ctx), "encap: set up tx: nsegs=%d first=%d i=%d\n", nsegs, first, i);
319 	/* XXX adapter->pcix_82544 -- lem_fill_descriptors */
320 
321 	/* Set up our transmit descriptors */
322 	for (j = 0; j < nsegs; j++) {
323 		bus_size_t seg_len;
324 		bus_addr_t seg_addr;
325 		uint32_t cmd;
326 
327 		ctxd = &txr->tx_base[i];
328 		tx_buffer = &txr->tx_buffers[i];
329 		seg_addr = segs[j].ds_addr;
330 		seg_len = segs[j].ds_len;
331 		cmd = E1000_TXD_CMD_IFCS | sc->txd_cmd;
332 
333 		/*
334 		** TSO Workaround:
335 		** If this is the last descriptor, we want to
336 		** split it so we have a small final sentinel
337 		*/
338 		if (tso_desc && (j == (nsegs - 1)) && (seg_len > 8)) {
339 			seg_len -= TSO_WORKAROUND;
340 			ctxd->buffer_addr = htole64(seg_addr);
341 			ctxd->lower.data = htole32(cmd | txd_lower | seg_len);
342 			ctxd->upper.data = htole32(txd_upper);
343 
344                         if (++i == scctx->isc_ntxd[0])
345 				i = 0;
346 
347 			/* Now make the sentinel */
348 			ctxd = &txr->tx_base[i];
349 			tx_buffer = &txr->tx_buffers[i];
350 			ctxd->buffer_addr = htole64(seg_addr + seg_len);
351 			ctxd->lower.data = htole32(cmd | txd_lower | TSO_WORKAROUND);
352 			ctxd->upper.data = htole32(txd_upper);
353 			pidx_last = i;
354 			if (++i == scctx->isc_ntxd[0])
355 				i = 0;
356 			DPRINTF(iflib_get_dev(sc->ctx), "TSO path pidx_last=%d i=%d ntxd[0]=%d\n", pidx_last, i, scctx->isc_ntxd[0]);
357 		} else {
358 			ctxd->buffer_addr = htole64(seg_addr);
359 			ctxd->lower.data = htole32(cmd | txd_lower | seg_len);
360 			ctxd->upper.data = htole32(txd_upper);
361 			pidx_last = i;
362 			if (++i == scctx->isc_ntxd[0])
363 				i = 0;
364 			DPRINTF(iflib_get_dev(sc->ctx), "pidx_last=%d i=%d ntxd[0]=%d\n", pidx_last, i, scctx->isc_ntxd[0]);
365 		}
366 		tx_buffer->eop = -1;
367 	}
368 
369 	/*
370          * Last Descriptor of Packet
371 	 * needs End Of Packet (EOP)
372 	 * and Report Status (RS)
373          */
374         ctxd->lower.data |=
375 		htole32(E1000_TXD_CMD_EOP | E1000_TXD_CMD_RS);
376 
377 	tx_buffer = &txr->tx_buffers[first];
378 	tx_buffer->eop = pidx_last;
379 	DPRINTF(iflib_get_dev(sc->ctx), "tx_buffers[%d]->eop = %d ipi_new_pidx=%d\n", first, pidx_last, i);
380 	pi->ipi_new_pidx = i;
381 
382 	return (0);
383 }
384 
385 static void
386 em_isc_txd_flush(void *arg, uint16_t txqid, uint32_t pidx)
387 {
388 	struct adapter *adapter = arg;
389 	struct em_tx_queue *que = &adapter->tx_queues[txqid];
390 	struct tx_ring *txr = &que->txr;
391 
392 	E1000_WRITE_REG(&adapter->hw, E1000_TDT(txr->me), pidx);
393 }
394 
395 static int
396 em_isc_txd_credits_update(void *arg, uint16_t txqid, uint32_t cidx_init, bool clear)
397 {
398 	struct adapter *adapter = arg;
399 	if_softc_ctx_t scctx = adapter->shared;
400 	struct em_tx_queue *que = &adapter->tx_queues[txqid];
401 	struct tx_ring *txr = &que->txr;
402 
403 	u32 cidx, processed = 0;
404 	int last, done;
405 	struct em_txbuffer *buf;
406 	struct e1000_tx_desc *tx_desc, *eop_desc;
407 
408 	cidx = cidx_init;
409 	buf = &txr->tx_buffers[cidx];
410 	tx_desc = &txr->tx_base[cidx];
411         last = buf->eop;
412 	eop_desc = &txr->tx_base[last];
413 
414 	DPRINTF(iflib_get_dev(adapter->ctx), "credits_update: cidx_init=%d clear=%d last=%d\n",
415 		      cidx_init, clear, last);
416 	/*
417 	 * What this does is get the index of the
418 	 * first descriptor AFTER the EOP of the
419 	 * first packet, that way we can do the
420 	 * simple comparison on the inner while loop.
421 	 */
422 	if (++last == scctx->isc_ntxd[0])
423 	     last = 0;
424 	done = last;
425 
426 
427 	while (eop_desc->upper.fields.status & E1000_TXD_STAT_DD) {
428 		/* We clean the range of the packet */
429 		while (cidx != done) {
430 			if (clear) {
431 				tx_desc->upper.data = 0;
432 				tx_desc->lower.data = 0;
433 				tx_desc->buffer_addr = 0;
434 				buf->eop = -1;
435 			}
436 			tx_desc++;
437 			buf++;
438 			processed++;
439 
440 			/* wrap the ring ? */
441 			if (++cidx == scctx->isc_ntxd[0]) {
442 				cidx = 0;
443 			}
444 			buf = &txr->tx_buffers[cidx];
445 			tx_desc = &txr->tx_base[cidx];
446 		}
447 		/* See if we can continue to the next packet */
448 		last = buf->eop;
449 		if (last == -1)
450 			break;
451 		eop_desc = &txr->tx_base[last];
452 		/* Get new done point */
453 		if (++last == scctx->isc_ntxd[0])
454 			last = 0;
455 		done = last;
456 	}
457 
458 	DPRINTF(iflib_get_dev(adapter->ctx), "Processed %d credits update\n", processed);
459 	return(processed);
460 }
461 
462 static void
463 lem_isc_rxd_refill(void *arg, uint16_t rxqid, uint8_t flid __unused,
464 		  uint32_t pidx, uint64_t *paddrs, caddr_t *vaddrs __unused, uint16_t count, uint16_t buflen __unused)
465 {
466 	struct adapter *sc = arg;
467 	if_softc_ctx_t scctx = sc->shared;
468 	struct em_rx_queue *que = &sc->rx_queues[rxqid];
469 	struct rx_ring *rxr = &que->rxr;
470 	struct e1000_rx_desc *rxd;
471 	int i;
472 	uint32_t next_pidx;
473 
474 	for (i = 0, next_pidx = pidx; i < count; i++) {
475 		rxd = (struct e1000_rx_desc *)&rxr->rx_base[next_pidx];
476 		rxd->buffer_addr = htole64(paddrs[i]);
477 		/* status bits must be cleared */
478 		rxd->status = 0;
479 
480 		if (++next_pidx == scctx->isc_nrxd[0])
481 			next_pidx = 0;
482 	}
483 }
484 
485 static void
486 em_isc_rxd_refill(void *arg, uint16_t rxqid, uint8_t flid __unused,
487 		  uint32_t pidx, uint64_t *paddrs, caddr_t *vaddrs __unused, uint16_t count, uint16_t buflen __unused)
488 {
489 	struct adapter *sc = arg;
490 	if_softc_ctx_t scctx = sc->shared;
491 	struct em_rx_queue *que = &sc->rx_queues[rxqid];
492 	struct rx_ring *rxr = &que->rxr;
493 	union e1000_rx_desc_extended *rxd;
494 	int i;
495 	uint32_t next_pidx;
496 
497 	for (i = 0, next_pidx = pidx; i < count; i++) {
498 		rxd = &rxr->rx_base[next_pidx];
499 		rxd->read.buffer_addr = htole64(paddrs[i]);
500 		/* DD bits must be cleared */
501 		rxd->wb.upper.status_error = 0;
502 
503 		if (++next_pidx == scctx->isc_nrxd[0])
504 			next_pidx = 0;
505 	}
506 }
507 
508 static void
509 em_isc_rxd_flush(void *arg, uint16_t rxqid, uint8_t flid __unused, uint32_t pidx)
510 {
511     	struct adapter *sc       = arg;
512 	struct em_rx_queue *que     = &sc->rx_queues[rxqid];
513 	struct rx_ring *rxr      = &que->rxr;
514 
515         E1000_WRITE_REG(&sc->hw, E1000_RDT(rxr->me), pidx);
516 }
517 
518 static int
519 lem_isc_rxd_available(void *arg, uint16_t rxqid, uint32_t idx, int budget)
520 {
521 	struct adapter *sc         = arg;
522 	if_softc_ctx_t scctx = sc->shared;
523 	struct em_rx_queue *que   = &sc->rx_queues[rxqid];
524 	struct rx_ring *rxr        = &que->rxr;
525 	struct e1000_rx_desc *rxd;
526 	u32                      staterr = 0;
527 	int                      cnt, i;
528 
529 	for (cnt = 0, i = idx; cnt < scctx->isc_nrxd[0] && cnt <= budget;) {
530 		rxd = (struct e1000_rx_desc *)&rxr->rx_base[i];
531 		staterr = rxd->status;
532 
533 		if ((staterr & E1000_RXD_STAT_DD) == 0)
534 			break;
535 
536 		if (++i == scctx->isc_nrxd[0])
537 			i = 0;
538 
539 		if (staterr & E1000_RXD_STAT_EOP)
540 			cnt++;
541 	}
542 	return (cnt);
543 }
544 
545 static int
546 em_isc_rxd_available(void *arg, uint16_t rxqid, uint32_t idx, int budget)
547 {
548        	struct adapter *sc         = arg;
549 	if_softc_ctx_t scctx = sc->shared;
550 	struct em_rx_queue *que   = &sc->rx_queues[rxqid];
551 	struct rx_ring *rxr        = &que->rxr;
552 	union e1000_rx_desc_extended *rxd;
553 	u32                      staterr = 0;
554 	int                      cnt, i;
555 
556 	for (cnt = 0, i = idx; cnt < scctx->isc_nrxd[0] && cnt <= budget;) {
557 		rxd = &rxr->rx_base[i];
558 		staterr = le32toh(rxd->wb.upper.status_error);
559 
560 		if ((staterr & E1000_RXD_STAT_DD) == 0)
561 			break;
562 
563 		if (++i == scctx->isc_nrxd[0]) {
564 			i = 0;
565 		}
566 
567 		if (staterr & E1000_RXD_STAT_EOP)
568 			cnt++;
569 
570 	}
571 	return (cnt);
572 }
573 
574 static int
575 lem_isc_rxd_pkt_get(void *arg, if_rxd_info_t ri)
576 {
577 	struct adapter           *adapter = arg;
578 	if_softc_ctx_t           scctx = adapter->shared;
579 	struct em_rx_queue       *que = &adapter->rx_queues[ri->iri_qsidx];
580 	struct rx_ring           *rxr = &que->rxr;
581 	struct e1000_rx_desc *rxd;
582 	u16                      len;
583 	u32                      status, errors;
584 	bool                     eop;
585 	int                      i, cidx;
586 
587 	status = errors = i = 0;
588 	cidx = ri->iri_cidx;
589 
590 	do {
591 		rxd = (struct e1000_rx_desc *)&rxr->rx_base[cidx];
592 		status = rxd->status;
593 		errors = rxd->errors;
594 
595 		/* Error Checking then decrement count */
596 		MPASS ((status & E1000_RXD_STAT_DD) != 0);
597 
598 		len = le16toh(rxd->length);
599 		ri->iri_len += len;
600 
601 		eop = (status & E1000_RXD_STAT_EOP) != 0;
602 
603 		/* Make sure bad packets are discarded */
604 		if (errors & E1000_RXD_ERR_FRAME_ERR_MASK) {
605 			adapter->dropped_pkts++;
606 			/* XXX fixup if common */
607 			return (EBADMSG);
608 		}
609 
610 		ri->iri_frags[i].irf_flid = 0;
611 		ri->iri_frags[i].irf_idx = cidx;
612 		ri->iri_frags[i].irf_len = len;
613 		/* Zero out the receive descriptors status. */
614 		rxd->status = 0;
615 
616 		if (++cidx == scctx->isc_nrxd[0])
617 			cidx = 0;
618 		i++;
619 	} while (!eop);
620 
621 	/* XXX add a faster way to look this up */
622 	if (adapter->hw.mac.type >= e1000_82543 && !(status & E1000_RXD_STAT_IXSM))
623 		lem_receive_checksum(status, errors, ri);
624 
625 	if (status & E1000_RXD_STAT_VP) {
626 		ri->iri_vtag = le16toh(rxd->special);
627 		ri->iri_flags |= M_VLANTAG;
628 	}
629 
630 	ri->iri_nfrags = i;
631 
632 	return (0);
633 }
634 
635 static int
636 em_isc_rxd_pkt_get(void *arg, if_rxd_info_t ri)
637 {
638       	struct adapter           *adapter = arg;
639 	if_softc_ctx_t           scctx = adapter->shared;
640 	struct em_rx_queue       *que = &adapter->rx_queues[ri->iri_qsidx];
641 	struct rx_ring           *rxr = &que->rxr;
642 	union e1000_rx_desc_extended *rxd;
643 
644 	u16                      len;
645 	u32                      staterr = 0;
646 	bool                     eop;
647 	int                      i, cidx, vtag;
648 
649 	i = vtag = 0;
650 	cidx = ri->iri_cidx;
651 
652 	do {
653 		rxd = &rxr->rx_base[cidx];
654 		staterr = le32toh(rxd->wb.upper.status_error);
655 
656 		/* Error Checking then decrement count */
657 		MPASS ((staterr & E1000_RXD_STAT_DD) != 0);
658 
659 		len = le16toh(rxd->wb.upper.length);
660 		ri->iri_len += len;
661 
662 		eop = (staterr & E1000_RXD_STAT_EOP) != 0;
663 
664 		/* Make sure bad packets are discarded */
665 		if (staterr & E1000_RXDEXT_ERR_FRAME_ERR_MASK) {
666 			adapter->dropped_pkts++;
667 			return EBADMSG;
668 		}
669 
670 		ri->iri_frags[i].irf_flid = 0;
671 		ri->iri_frags[i].irf_idx = cidx;
672 		ri->iri_frags[i].irf_len = len;
673 		/* Zero out the receive descriptors status. */
674 		rxd->wb.upper.status_error &= htole32(~0xFF);
675 
676 		if (++cidx == scctx->isc_nrxd[0])
677 			cidx = 0;
678 		i++;
679 	} while (!eop);
680 
681 	/* XXX add a faster way to look this up */
682 	if (adapter->hw.mac.type >= e1000_82543)
683 		em_receive_checksum(staterr, ri);
684 
685 	if (staterr & E1000_RXD_STAT_VP) {
686 		vtag = le16toh(rxd->wb.upper.vlan);
687 	}
688 
689 	ri->iri_vtag = vtag;
690 	ri->iri_nfrags = i;
691 	if (vtag)
692 		ri->iri_flags |= M_VLANTAG;
693 
694 	return (0);
695 }
696 
697 /*********************************************************************
698  *
699  *  Verify that the hardware indicated that the checksum is valid.
700  *  Inform the stack about the status of checksum so that stack
701  *  doesn't spend time verifying the checksum.
702  *
703  *********************************************************************/
704 static void
705 lem_receive_checksum(int status, int errors, if_rxd_info_t ri)
706 {
707 	/* Did it pass? */
708 	if (status & E1000_RXD_STAT_IPCS && !(errors & E1000_RXD_ERR_IPE))
709 		ri->iri_csum_flags = (CSUM_IP_CHECKED|CSUM_IP_VALID);
710 
711 	if (status & E1000_RXD_STAT_TCPCS) {
712 		/* Did it pass? */
713 		if (!(errors & E1000_RXD_ERR_TCPE)) {
714 			ri->iri_csum_flags |=
715 			(CSUM_DATA_VALID | CSUM_PSEUDO_HDR);
716 			ri->iri_csum_data = htons(0xffff);
717 		}
718 	}
719 }
720 
721 static void
722 em_receive_checksum(uint32_t status, if_rxd_info_t ri)
723 {
724 	ri->iri_csum_flags = 0;
725 
726 	/* Ignore Checksum bit is set */
727 	if (status & E1000_RXD_STAT_IXSM)
728 		return;
729 
730 	/* If the IP checksum exists and there is no IP Checksum error */
731 	if ((status & (E1000_RXD_STAT_IPCS | E1000_RXDEXT_STATERR_IPE)) ==
732 		E1000_RXD_STAT_IPCS) {
733 		ri->iri_csum_flags = (CSUM_IP_CHECKED | CSUM_IP_VALID);
734 	}
735 
736 	/* TCP or UDP checksum */
737 	if ((status & (E1000_RXD_STAT_TCPCS | E1000_RXDEXT_STATERR_TCPE)) ==
738 	    E1000_RXD_STAT_TCPCS) {
739 		ri->iri_csum_flags |= (CSUM_DATA_VALID | CSUM_PSEUDO_HDR);
740 		ri->iri_csum_data = htons(0xffff);
741 	}
742 	if (status & E1000_RXD_STAT_UDPCS) {
743 		ri->iri_csum_flags |= (CSUM_DATA_VALID | CSUM_PSEUDO_HDR);
744 		ri->iri_csum_data = htons(0xffff);
745 	}
746 }
747