xref: /freebsd/sys/dev/axgbe/xgbe-txrx.c (revision e2eeea75eb8b6dd50c1298067a0655880d186734)
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
3  *
4  * Copyright (c) 2020 Advanced Micro Devices, Inc.
5  *
6  * Redistribution and use in source and binary forms, with or without
7  * modification, are permitted provided that the following conditions
8  * are met:
9  * 1. Redistributions of source code must retain the above copyright
10  *    notice, this list of conditions and the following disclaimer.
11  * 2. Redistributions in binary form must reproduce the above copyright
12  *    notice, this list of conditions and the following disclaimer in the
13  *    documentation and/or other materials provided with the distribution.
14  *
15  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
16  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18  * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
25  * SUCH DAMAGE.
26  *
27  * Contact Information :
28  * Rajesh Kumar <rajesh1.kumar@amd.com>
29  * Shreyank Amartya <Shreyank.Amartya@amd.com>
30  *
31  */
32 
33 #include <sys/cdefs.h>
34 __FBSDID("$FreeBSD$");
35 
36 #include "xgbe.h"
37 #include "xgbe-common.h"
38 
39 /*
40  * IFLIB interfaces
41  */
42 static int axgbe_isc_txd_encap(void *, if_pkt_info_t);
43 static void axgbe_isc_txd_flush(void *, uint16_t, qidx_t);
44 static int axgbe_isc_txd_credits_update(void *, uint16_t, bool);
45 static void axgbe_isc_rxd_refill(void *, if_rxd_update_t);
46 static void axgbe_isc_rxd_flush(void *, uint16_t, uint8_t, qidx_t);
47 static int axgbe_isc_rxd_available(void *, uint16_t, qidx_t, qidx_t);
48 static int axgbe_isc_rxd_pkt_get(void *, if_rxd_info_t);
49 
50 struct if_txrx axgbe_txrx = {
51 	.ift_txd_encap = axgbe_isc_txd_encap,
52 	.ift_txd_flush = axgbe_isc_txd_flush,
53 	.ift_txd_credits_update = axgbe_isc_txd_credits_update,
54 	.ift_rxd_available = axgbe_isc_rxd_available,
55 	.ift_rxd_pkt_get = axgbe_isc_rxd_pkt_get,
56 	.ift_rxd_refill = axgbe_isc_rxd_refill,
57 	.ift_rxd_flush = axgbe_isc_rxd_flush,
58 	.ift_legacy_intr = NULL
59 };
60 
61 static void
62 xgbe_print_pkt_info(struct xgbe_prv_data *pdata, if_pkt_info_t pi)
63 {
64 
65 	axgbe_printf(1, "------Packet Info Start------\n");
66 	axgbe_printf(1, "pi len:  %d qsidx: %d nsegs: %d ndescs: %d flags: %x pidx: %d\n",
67                pi->ipi_len, pi->ipi_qsidx, pi->ipi_nsegs, pi->ipi_ndescs, pi->ipi_flags, pi->ipi_pidx);
68         axgbe_printf(1, "pi new_pidx: %d csum_flags: %x mflags: %x vtag: %d\n",
69                pi->ipi_new_pidx, pi->ipi_csum_flags, pi->ipi_mflags, pi->ipi_vtag);
70         axgbe_printf(1, "pi etype: %d ehdrlen: %d ip_hlen: %d ipproto: %d\n",
71                pi->ipi_etype, pi->ipi_ehdrlen, pi->ipi_ip_hlen, pi->ipi_ipproto);
72         axgbe_printf(1, "pi tcp_hlen: %d tcp_hflags: %x tcp_seq: %d tso_segsz %d\n",
73                pi->ipi_tcp_hlen, pi->ipi_tcp_hflags, pi->ipi_tcp_seq, pi->ipi_tso_segsz);
74 }
75 
76 static bool
77 axgbe_ctx_desc_setup(struct xgbe_prv_data *pdata, struct xgbe_ring *ring,
78     if_pkt_info_t pi)
79 {
80 	struct xgbe_ring_desc	*rdesc;
81 	struct xgbe_ring_data	*rdata;
82 	bool inc_cur = false;
83 
84 	rdata = XGBE_GET_DESC_DATA(ring, ring->cur);
85 	rdesc = rdata->rdesc;
86 
87 	axgbe_printf(1, "ipi_tso_segsz %d cur_mss %d idx %d\n",
88 	    pi->ipi_tso_segsz, ring->tx.cur_mss, ring->cur);
89 
90 	axgbe_printf(1, "ipi_vtag 0x%x cur_vlan_ctag 0x%x\n",
91 	    pi->ipi_vtag, ring->tx.cur_vlan_ctag);
92 
93 	if ((pi->ipi_csum_flags & CSUM_TSO) &&
94 	    (pi->ipi_tso_segsz != ring->tx.cur_mss)) {
95 		/*
96 		 * Set TSO maximum segment size
97 		 * Mark as context descriptor
98 		 * Indicate this descriptor contains MSS
99 		 */
100 		XGMAC_SET_BITS_LE(rdesc->desc2, TX_CONTEXT_DESC2,
101 		    MSS, pi->ipi_tso_segsz);
102 		XGMAC_SET_BITS_LE(rdesc->desc3, TX_CONTEXT_DESC3, CTXT, 1);
103 		XGMAC_SET_BITS_LE(rdesc->desc3, TX_CONTEXT_DESC3, TCMSSV, 1);
104 		ring->tx.cur_mss = pi->ipi_tso_segsz;
105 		inc_cur = true;
106 	}
107 
108 	if (pi->ipi_vtag && (pi->ipi_vtag != ring->tx.cur_vlan_ctag)) {
109 		/*
110 		 * Mark it as context descriptor
111 		 * Set the VLAN tag
112 		 * Indicate this descriptor contains the VLAN tag
113 		 */
114 		XGMAC_SET_BITS_LE(rdesc->desc3, TX_CONTEXT_DESC3, CTXT, 1);
115 		XGMAC_SET_BITS_LE(rdesc->desc3, TX_CONTEXT_DESC3,
116 		    VT, pi->ipi_vtag);
117 		XGMAC_SET_BITS_LE(rdesc->desc3, TX_CONTEXT_DESC3, VLTV, 1);
118 		ring->tx.cur_vlan_ctag = pi->ipi_vtag;
119 		inc_cur = true;
120 	}
121 
122 	return (inc_cur);
123 }
124 
125 static uint16_t
126 axgbe_calculate_tx_parms(struct xgbe_prv_data *pdata, if_pkt_info_t pi,
127     struct xgbe_packet_data *packet)
128 {
129 	uint32_t tcp_payload_len = 0, bytes = 0;
130 	uint16_t max_len, hlen, payload_len, pkts = 0;
131 
132 	packet->tx_packets = packet->tx_bytes = 0;
133 
134 	hlen = pi->ipi_ehdrlen + pi->ipi_ip_hlen + pi->ipi_tcp_hlen;
135 	if (pi->ipi_csum_flags & CSUM_TSO) {
136 
137 		tcp_payload_len = pi->ipi_len - hlen;
138 		axgbe_printf(1, "%s: ipi_len %x elen %d iplen %d tcplen %d\n",
139 		    __func__, pi->ipi_len, pi->ipi_ehdrlen, pi->ipi_ip_hlen,
140 		    pi->ipi_tcp_hlen);
141 
142 		max_len = if_getmtu(pdata->netdev) + ETH_HLEN;
143 		if (pi->ipi_vtag)
144 			max_len += VLAN_HLEN;
145 
146 		while (tcp_payload_len) {
147 
148 			payload_len = max_len - hlen;
149 			payload_len = min(payload_len, tcp_payload_len);
150 			tcp_payload_len -= payload_len;
151 			pkts++;
152 			bytes += (hlen + payload_len);
153 			axgbe_printf(1, "%s: max_len %d payload_len %d "
154 			    "tcp_len %d\n", __func__, max_len, payload_len,
155 			    tcp_payload_len);
156 		}
157 	} else {
158 		pkts = 1;
159 		bytes = pi->ipi_len;
160 	}
161 
162 	packet->tx_packets = pkts;
163 	packet->tx_bytes = bytes;
164 
165 	axgbe_printf(1, "%s: packets %d bytes %d hlen %d\n", __func__,
166 	    packet->tx_packets, packet->tx_bytes, hlen);
167 
168 	return (hlen);
169 }
170 
171 static int
172 axgbe_isc_txd_encap(void *arg, if_pkt_info_t pi)
173 {
174 	struct axgbe_if_softc	*sc = (struct axgbe_if_softc*)arg;
175 	struct xgbe_prv_data	*pdata = &sc->pdata;
176 	struct xgbe_channel	*channel;
177 	struct xgbe_ring	*ring;
178 	struct xgbe_ring_desc	*rdesc;
179 	struct xgbe_ring_data	*rdata;
180 	struct xgbe_packet_data *packet;
181 	unsigned int cur, start, tx_set_ic;
182 	uint16_t offset, hlen, datalen, tcp_payload_len = 0;
183 	int cur_seg = 0;
184 
185 	xgbe_print_pkt_info(pdata, pi);
186 
187 	channel = pdata->channel[pi->ipi_qsidx];
188 	ring = channel->tx_ring;
189 	packet = &ring->packet_data;
190 	cur = start = ring->cur;
191 
192 	axgbe_printf(1, "--> %s: txq %d cur %d dirty %d\n",
193 	    __func__, pi->ipi_qsidx, ring->cur, ring->dirty);
194 
195 	MPASS(pi->ipi_len != 0);
196 	if (__predict_false(pi->ipi_len == 0)) {
197 		axgbe_error("empty packet received from stack\n");
198 		return (0);
199 	}
200 
201 	MPASS(ring->cur == pi->ipi_pidx);
202 	if (__predict_false(ring->cur != pi->ipi_pidx)) {
203 		axgbe_error("--> %s: cur(%d) ne pidx(%d)\n", __func__,
204 		    ring->cur, pi->ipi_pidx);
205 	}
206 
207 	/* Determine if an interrupt should be generated for this Tx:
208 	 *   Interrupt:
209 	 *     - Tx frame count exceeds the frame count setting
210 	 *     - Addition of Tx frame count to the frame count since the
211 	 *       last interrupt was set exceeds the frame count setting
212 	 *   No interrupt:
213 	 *     - No frame count setting specified (ethtool -C ethX tx-frames 0)
214 	 *     - Addition of Tx frame count to the frame count since the
215 	 *       last interrupt was set does not exceed the frame count setting
216 	 */
217 	memset(packet, 0, sizeof(*packet));
218 	hlen = axgbe_calculate_tx_parms(pdata, pi, packet);
219 	axgbe_printf(1, "%s: ipi_len %d tx_pkts %d tx_bytes %d hlen %d\n",
220 	    __func__, pi->ipi_len, packet->tx_packets, packet->tx_bytes, hlen);
221 
222 	ring->coalesce_count += packet->tx_packets;
223 	if (!pdata->tx_frames)
224 		tx_set_ic = 0;
225 	else if (packet->tx_packets > pdata->tx_frames)
226 		tx_set_ic = 1;
227 	else if ((ring->coalesce_count % pdata->tx_frames) < (packet->tx_packets))
228 		tx_set_ic = 1;
229 	else
230 		tx_set_ic = 0;
231 
232 	/* Add Context descriptor if needed (for TSO, VLAN cases) */
233 	if (axgbe_ctx_desc_setup(pdata, ring, pi))
234 		cur++;
235 
236 	rdata = XGBE_GET_DESC_DATA(ring, cur);
237 	rdesc = rdata->rdesc;
238 
239 	axgbe_printf(1, "%s: cur %d lo 0x%lx hi 0x%lx ds_len 0x%x "
240 	    "ipi_len 0x%x\n", __func__, cur,
241 	    lower_32_bits(pi->ipi_segs[cur_seg].ds_addr),
242 	    upper_32_bits(pi->ipi_segs[cur_seg].ds_addr),
243 	    (int)pi->ipi_segs[cur_seg].ds_len, pi->ipi_len);
244 
245 	/* Update buffer address (for TSO this is the header) */
246 	rdesc->desc0 = cpu_to_le32(lower_32_bits(pi->ipi_segs[cur_seg].ds_addr));
247 	rdesc->desc1 = cpu_to_le32(upper_32_bits(pi->ipi_segs[cur_seg].ds_addr));
248 
249 	/* Update the buffer length */
250 	if (hlen == 0)
251 		hlen = pi->ipi_segs[cur_seg].ds_len;
252 	XGMAC_SET_BITS_LE(rdesc->desc2, TX_NORMAL_DESC2, HL_B1L, hlen);
253 
254 	/* VLAN tag insertion check */
255 	if (pi->ipi_vtag) {
256 		XGMAC_SET_BITS_LE(rdesc->desc2, TX_NORMAL_DESC2, VTIR,
257 		    TX_NORMAL_DESC2_VLAN_INSERT);
258 	}
259 
260 	/* Mark it as First Descriptor */
261 	XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, FD, 1);
262 
263 	/* Mark it as a NORMAL descriptor */
264 	XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, CTXT, 0);
265 
266 	/*
267 	 * Set the OWN bit if this is not the first descriptor. For first
268 	 * descriptor, OWN bit will be set at last so that hardware will
269 	 * process the descriptors only after the OWN bit for the first
270 	 * descriptor is set
271 	 */
272 	if (cur != start)
273 		XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, OWN, 1);
274 
275 	if (pi->ipi_csum_flags & CSUM_TSO) {
276 		/* Enable TSO */
277 		XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, TSE, 1);
278 
279 		tcp_payload_len = pi->ipi_len - hlen;
280 
281 		/* Set TCP payload length*/
282 		XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, TCPPL,
283 		    tcp_payload_len);
284 
285 		XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, TCPHDRLEN,
286 		    pi->ipi_tcp_hlen/4);
287 
288 		axgbe_printf(1, "tcp_payload %d tcp_hlen %d\n", tcp_payload_len,
289 		    pi->ipi_tcp_hlen/4);
290 	} else {
291 		/* Enable CRC and Pad Insertion */
292 		XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, CPC, 0);
293 
294 		/* Enable HW CSUM*/
295 		if (pi->ipi_csum_flags)
296 			XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, CIC, 0x3);
297 
298 		/* Set total length to be transmitted */
299 		XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, FL, pi->ipi_len);
300 	}
301 
302 	cur++;
303 
304 	for (cur_seg = 0 ; cur_seg < pi->ipi_nsegs ; cur_seg++) {
305 
306 		if (cur_seg == 0) {
307 			offset = hlen;
308 			datalen = pi->ipi_segs[cur_seg].ds_len - hlen;
309 		} else {
310 			offset = 0;
311 			datalen = pi->ipi_segs[cur_seg].ds_len;
312 		}
313 
314 		if (datalen) {
315 			rdata = XGBE_GET_DESC_DATA(ring, cur);
316 			rdesc = rdata->rdesc;
317 
318 
319 			/* Update buffer address */
320 			rdesc->desc0 =
321 			    cpu_to_le32(lower_32_bits(pi->ipi_segs[cur_seg].ds_addr + offset));
322 			rdesc->desc1 =
323 			    cpu_to_le32(upper_32_bits(pi->ipi_segs[cur_seg].ds_addr + offset));
324 
325 			/* Update the buffer length */
326 			XGMAC_SET_BITS_LE(rdesc->desc2, TX_NORMAL_DESC2, HL_B1L, datalen);
327 
328 			/* Set OWN bit */
329 			XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, OWN, 1);
330 
331 			/* Mark it as NORMAL descriptor */
332 			XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, CTXT, 0);
333 
334 			/* Enable HW CSUM*/
335 			if (pi->ipi_csum_flags)
336 				XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, CIC, 0x3);
337 
338 			axgbe_printf(1, "%s: cur %d lo 0x%lx hi 0x%lx ds_len 0x%x "
339 			    "ipi_len 0x%x\n", __func__, cur,
340 			    lower_32_bits(pi->ipi_segs[cur_seg].ds_addr),
341 			    upper_32_bits(pi->ipi_segs[cur_seg].ds_addr),
342 			    (int)pi->ipi_segs[cur_seg].ds_len, pi->ipi_len);
343 
344 			cur++;
345 		}
346 	}
347 
348 	/* Set LAST bit for the last descriptor */
349 	XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, LD, 1);
350 
351 	/* Set IC bit based on Tx coalescing settings */
352 	if (tx_set_ic)
353 		XGMAC_SET_BITS_LE(rdesc->desc2, TX_NORMAL_DESC2, IC, 1);
354 
355 	wmb();
356 
357 	/* Set OWN bit for the first descriptor */
358 	rdata = XGBE_GET_DESC_DATA(ring, start);
359 	rdesc = rdata->rdesc;
360 	XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, OWN, 1);
361 
362 	ring->cur = pi->ipi_new_pidx = (cur & (ring->rdesc_count - 1));
363 
364 	axgbe_printf(1, "<-- %s: end cur %d dirty %d\n", __func__, ring->cur,
365 	    ring->dirty);
366 
367 	return (0);
368 }
369 
370 static void
371 axgbe_isc_txd_flush(void *arg, uint16_t txqid, qidx_t pidx)
372 {
373 	struct axgbe_if_softc	*sc = (struct axgbe_if_softc*)arg;
374 	struct xgbe_prv_data	*pdata = &sc->pdata;
375 	struct xgbe_channel	*channel = pdata->channel[txqid];
376 	struct xgbe_ring	*ring = channel->tx_ring;
377 	struct xgbe_ring_data	*rdata = XGBE_GET_DESC_DATA(ring, pidx);
378 
379 	axgbe_printf(1, "--> %s: flush txq %d pidx %d cur %d dirty %d\n",
380 	    __func__, txqid, pidx, ring->cur, ring->dirty);
381 
382 	MPASS(ring->cur == pidx);
383 	if (__predict_false(ring->cur != pidx)) {
384 		axgbe_error("--> %s: cur(%d) ne pidx(%d)\n", __func__,
385 		    ring->cur, pidx);
386 	}
387 
388 	wmb();
389 
390 	/* Ring Doorbell */
391 	if (XGMAC_DMA_IOREAD(channel, DMA_CH_TDTR_LO) !=
392 	    lower_32_bits(rdata->rdata_paddr)) {
393 		XGMAC_DMA_IOWRITE(channel, DMA_CH_TDTR_LO,
394 	    	    lower_32_bits(rdata->rdata_paddr));
395 	}
396 }
397 
398 static int
399 axgbe_isc_txd_credits_update(void *arg, uint16_t txqid, bool clear)
400 {
401 	struct axgbe_if_softc   *sc = (struct axgbe_if_softc*)arg;
402 	struct xgbe_hw_if	*hw_if = &sc->pdata.hw_if;
403 	struct xgbe_prv_data	*pdata = &sc->pdata;
404 	struct xgbe_channel     *channel = pdata->channel[txqid];
405 	struct xgbe_ring	*ring = channel->tx_ring;
406 	struct xgbe_ring_data	*rdata;
407 	int processed = 0;
408 
409 	axgbe_printf(1, "%s: txq %d clear %d cur %d dirty %d\n",
410 	    __func__, txqid, clear, ring->cur, ring->dirty);
411 
412 	if (__predict_false(ring->cur == ring->dirty)) {
413 		axgbe_printf(1, "<-- %s: cur(%d) equals dirty(%d)\n",
414 		    __func__, ring->cur, ring->dirty);
415 		return (0);
416 	}
417 
418 	/* Check whether the first dirty descriptor is Tx complete */
419 	rdata = XGBE_GET_DESC_DATA(ring, ring->dirty);
420 	if (!hw_if->tx_complete(rdata->rdesc)) {
421 		axgbe_printf(1, "<-- %s: (dirty %d)\n", __func__, ring->dirty);
422 		return (0);
423 	}
424 
425 	/*
426 	 * If clear is false just let the caller know that there
427 	 * are descriptors to reclaim
428 	 */
429 	if (!clear) {
430 		axgbe_printf(1, "<-- %s: (!clear)\n", __func__);
431 		return (1);
432 	}
433 
434 	do {
435 		hw_if->tx_desc_reset(rdata);
436 		processed++;
437 		ring->dirty = (ring->dirty + 1) & (ring->rdesc_count - 1);
438 
439 		/*
440 		 * tx_complete will return true for unused descriptors also.
441 		 * so, check tx_complete only until used descriptors.
442 		 */
443 		if (ring->cur == ring->dirty)
444 			break;
445 
446 		rdata = XGBE_GET_DESC_DATA(ring, ring->dirty);
447 	} while (hw_if->tx_complete(rdata->rdesc));
448 
449 	axgbe_printf(1, "<-- %s: processed %d cur %d dirty %d\n", __func__,
450 	    processed, ring->cur, ring->dirty);
451 
452 	return (processed);
453 }
454 
455 static void
456 axgbe_isc_rxd_refill(void *arg, if_rxd_update_t iru)
457 {
458  	struct axgbe_if_softc   *sc = (struct axgbe_if_softc*)arg;
459 	struct xgbe_prv_data	*pdata = &sc->pdata;
460 	struct xgbe_channel     *channel = pdata->channel[iru->iru_qsidx];
461 	struct xgbe_ring	*ring = channel->rx_ring;
462 	struct xgbe_ring_data	*rdata;
463 	struct xgbe_ring_desc	*rdesc;
464 	unsigned int rx_usecs = pdata->rx_usecs;
465 	unsigned int rx_frames = pdata->rx_frames;
466 	unsigned int inte;
467 	uint8_t	count = iru->iru_count;
468 	int i, j;
469 
470 	axgbe_printf(1, "--> %s: rxq %d fl %d pidx %d count %d ring cur %d "
471 	    "dirty %d\n", __func__, iru->iru_qsidx, iru->iru_flidx,
472 	    iru->iru_pidx, count, ring->cur, ring->dirty);
473 
474 	for (i = iru->iru_pidx, j = 0 ; j < count ; i++, j++) {
475 
476 		if (i == XGBE_RX_DESC_CNT_DEFAULT)
477 			i = 0;
478 
479 		rdata = XGBE_GET_DESC_DATA(ring, i);
480 		rdesc = rdata->rdesc;
481 
482 		if (__predict_false(XGMAC_GET_BITS_LE(rdesc->desc3,
483 		    RX_NORMAL_DESC3, OWN))) {
484 			axgbe_error("%s: refill clash, cur %d dirty %d index %d"
485 			    "pidx %d\n", __func__, ring->cur, ring->dirty, j, i);
486 		}
487 
488 		/* Assuming split header is enabled */
489 		if (iru->iru_flidx == 0) {
490 
491 			/* Fill header/buffer1 address */
492 			rdesc->desc0 =
493 			    cpu_to_le32(lower_32_bits(iru->iru_paddrs[j]));
494 			rdesc->desc1 =
495 			    cpu_to_le32(upper_32_bits(iru->iru_paddrs[j]));
496 		} else {
497 
498 			/* Fill data/buffer2 address */
499 			rdesc->desc2 =
500 			    cpu_to_le32(lower_32_bits(iru->iru_paddrs[j]));
501 			rdesc->desc3 =
502 			    cpu_to_le32(upper_32_bits(iru->iru_paddrs[j]));
503 
504 			if (!rx_usecs && !rx_frames) {
505 				/* No coalescing, interrupt for every descriptor */
506 				inte = 1;
507 			} else {
508 				/* Set interrupt based on Rx frame coalescing setting */
509 				if (rx_frames &&
510 				    !(((ring->dirty + 1) &(ring->rdesc_count - 1)) % rx_frames))
511 					inte = 1;
512 				else
513 					inte = 0;
514 			}
515 
516 			XGMAC_SET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, INTE, inte);
517 
518 			XGMAC_SET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, OWN, 1);
519 
520 			wmb();
521 
522 			ring->dirty = ((ring->dirty + 1) & (ring->rdesc_count - 1));
523 		}
524 	}
525 
526 	axgbe_printf(1, "<-- %s: rxq: %d cur: %d dirty: %d\n", __func__,
527 	    channel->queue_index, ring->cur, ring->dirty);
528 }
529 
530 static void
531 axgbe_isc_rxd_flush(void *arg, uint16_t qsidx, uint8_t flidx, qidx_t pidx)
532 {
533  	struct axgbe_if_softc   *sc = (struct axgbe_if_softc*)arg;
534 	struct xgbe_prv_data	*pdata = &sc->pdata;
535 	struct xgbe_channel     *channel = pdata->channel[qsidx];
536 	struct xgbe_ring	*ring = channel->rx_ring;
537 	struct xgbe_ring_data 	*rdata;
538 
539 	axgbe_printf(1, "--> %s: rxq %d fl %d pidx %d cur %d dirty %d\n",
540 	    __func__, qsidx, flidx, pidx, ring->cur, ring->dirty);
541 
542 	if (flidx == 1) {
543 
544 		rdata = XGBE_GET_DESC_DATA(ring, pidx);
545 
546 		XGMAC_DMA_IOWRITE(channel, DMA_CH_RDTR_LO,
547 		    lower_32_bits(rdata->rdata_paddr));
548 	}
549 
550 	wmb();
551 }
552 
553 static int
554 axgbe_isc_rxd_available(void *arg, uint16_t qsidx, qidx_t idx, qidx_t budget)
555 {
556 	struct axgbe_if_softc   *sc = (struct axgbe_if_softc*)arg;
557 	struct xgbe_prv_data 	*pdata = &sc->pdata;
558 	struct xgbe_channel     *channel = pdata->channel[qsidx];
559 	struct xgbe_ring	*ring = channel->rx_ring;
560 	struct xgbe_ring_data   *rdata;
561 	struct xgbe_ring_desc   *rdesc;
562 	unsigned int cur;
563 	int count;
564 	uint8_t incomplete = 1, context_next = 0, running = 0;
565 
566 	axgbe_printf(1, "--> %s: rxq %d idx %d budget %d cur %d dirty %d\n",
567 	    __func__, qsidx, idx, budget, ring->cur, ring->dirty);
568 
569 	cur = ring->cur;
570 	for (count = 0; count <= budget; ) {
571 
572 		rdata = XGBE_GET_DESC_DATA(ring, cur);
573 		rdesc = rdata->rdesc;
574 
575 		if (XGMAC_GET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, OWN))
576 			break;
577 
578 		running = 1;
579 
580 		if (XGMAC_GET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, LD))
581 			incomplete = 0;
582 
583 		if (XGMAC_GET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, CDA))
584 			context_next = 1;
585 
586 		if (XGMAC_GET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, CTXT))
587 			context_next = 0;
588 
589 		cur = (cur + 1) & (ring->rdesc_count - 1);
590 
591 		if (incomplete || context_next)
592 			continue;
593 
594 		/* Increment pkt count & reset variables for next full packet */
595 		count++;
596 		incomplete = 1;
597 		context_next = 0;
598 		running = 0;
599 	}
600 
601 	axgbe_printf(1, "--> %s: rxq %d cur %d incomp %d con_next %d running %d "
602 	    "count %d\n", __func__, qsidx, cur, incomplete, context_next,
603 	    running, count);
604 
605 	return (count);
606 }
607 
608 static unsigned int
609 xgbe_rx_buf1_len(struct xgbe_prv_data *pdata, struct xgbe_ring_data *rdata,
610     struct xgbe_packet_data *packet)
611 {
612 
613 	/* Always zero if not the first descriptor */
614 	if (!XGMAC_GET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES, FIRST)) {
615 		axgbe_printf(1, "%s: Not First\n", __func__);
616 		return (0);
617 	}
618 
619 	/* First descriptor with split header, return header length */
620 	if (rdata->rx.hdr_len) {
621 		axgbe_printf(1, "%s: hdr_len %d\n", __func__, rdata->rx.hdr_len);
622 		return (rdata->rx.hdr_len);
623 	}
624 
625 	/* First descriptor but not the last descriptor and no split header,
626 	 * so the full buffer was used
627 	 */
628 	if (!XGMAC_GET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES, LAST)) {
629 		axgbe_printf(1, "%s: Not last %d\n", __func__,
630 		    pdata->rx_buf_size);
631 		return (256);
632 	}
633 
634 	/* First descriptor and last descriptor and no split header, so
635 	 * calculate how much of the buffer was used
636 	 */
637 	axgbe_printf(1, "%s: pkt_len %d buf_size %d\n", __func__, rdata->rx.len,
638 	    pdata->rx_buf_size);
639 
640 	return (min_t(unsigned int, 256, rdata->rx.len));
641 }
642 
643 static unsigned int
644 xgbe_rx_buf2_len(struct xgbe_prv_data *pdata, struct xgbe_ring_data *rdata,
645     struct xgbe_packet_data *packet, unsigned int len)
646 {
647 
648 	/* Always the full buffer if not the last descriptor */
649 	if (!XGMAC_GET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES, LAST)) {
650 		axgbe_printf(1, "%s: Not last %d\n", __func__, pdata->rx_buf_size);
651 		return (pdata->rx_buf_size);
652 	}
653 
654 	/* Last descriptor so calculate how much of the buffer was used
655 	 * for the last bit of data
656 	 */
657 	return ((rdata->rx.len != 0)? (rdata->rx.len - len) : 0);
658 }
659 
660 static inline void
661 axgbe_add_frag(struct xgbe_prv_data *pdata, if_rxd_info_t ri, int idx, int len,
662     int pos, int flid)
663 {
664 	axgbe_printf(2, "idx %d len %d pos %d flid %d\n", idx, len, pos, flid);
665 	ri->iri_frags[pos].irf_flid = flid;
666 	ri->iri_frags[pos].irf_idx = idx;
667 	ri->iri_frags[pos].irf_len = len;
668 }
669 
670 static int
671 axgbe_isc_rxd_pkt_get(void *arg, if_rxd_info_t ri)
672 {
673  	struct axgbe_if_softc   *sc = (struct axgbe_if_softc*)arg;
674 	struct xgbe_prv_data 	*pdata = &sc->pdata;
675 	struct xgbe_hw_if	*hw_if = &pdata->hw_if;
676 	struct xgbe_channel     *channel = pdata->channel[ri->iri_qsidx];
677 	struct xgbe_ring	*ring = channel->rx_ring;
678 	struct xgbe_packet_data *packet = &ring->packet_data;
679 	struct xgbe_ring_data	*rdata;
680 	unsigned int last, context_next, context;
681 	unsigned int buf1_len, buf2_len, max_len, len = 0, prev_cur;
682 	int i = 0;
683 
684 	axgbe_printf(2, "%s: rxq %d cidx %d cur %d dirty %d\n", __func__,
685 	    ri->iri_qsidx, ri->iri_cidx, ring->cur, ring->dirty);
686 
687 	memset(packet, 0, sizeof(struct xgbe_packet_data));
688 
689 	while (1) {
690 
691 read_again:
692 		if (hw_if->dev_read(channel)) {
693 			axgbe_printf(2, "<-- %s: OWN bit seen on %d\n",
694 		    	    __func__, ring->cur);
695 			break;
696 		}
697 
698 		rdata = XGBE_GET_DESC_DATA(ring, ring->cur);
699 		prev_cur = ring->cur;
700 		ring->cur = (ring->cur + 1) & (ring->rdesc_count - 1);
701 
702 		last = XGMAC_GET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES,
703 		    LAST);
704 
705 		context_next = XGMAC_GET_BITS(packet->attributes,
706 		    RX_PACKET_ATTRIBUTES, CONTEXT_NEXT);
707 
708 		context = XGMAC_GET_BITS(packet->attributes,
709 		    RX_PACKET_ATTRIBUTES, CONTEXT);
710 
711 		if (!context) {
712 			/* Get the data length in the descriptor buffers */
713 			buf1_len = xgbe_rx_buf1_len(pdata, rdata, packet);
714 			len += buf1_len;
715 			buf2_len = xgbe_rx_buf2_len(pdata, rdata, packet, len);
716 			len += buf2_len;
717 		} else
718 			buf1_len = buf2_len = 0;
719 
720 		if (packet->errors)
721 			axgbe_printf(1, "%s: last %d context %d con_next %d buf1 %d "
722 			    "buf2 %d len %d frags %d error %d\n", __func__, last, context,
723 			    context_next, buf1_len, buf2_len, len, i, packet->errors);
724 
725 		axgbe_add_frag(pdata, ri, prev_cur, buf1_len, i, 0);
726 		i++;
727 		axgbe_add_frag(pdata, ri, prev_cur, buf2_len, i, 1);
728 		i++;
729 
730 		if (!last || context_next)
731 			goto read_again;
732 
733 		break;
734 	}
735 
736 	if (XGMAC_GET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES, CSUM_DONE)) {
737 		ri->iri_csum_flags |= CSUM_IP_CHECKED;
738 		ri->iri_csum_flags |= CSUM_IP_VALID;
739 		axgbe_printf(2, "%s: csum flags 0x%x\n", __func__, ri->iri_csum_flags);
740 	}
741 
742 	max_len = if_getmtu(pdata->netdev) + ETH_HLEN;
743 	if (XGMAC_GET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES, VLAN_CTAG)) {
744 		ri->iri_flags |= M_VLANTAG;
745 		ri->iri_vtag = packet->vlan_ctag;
746 		max_len += VLAN_HLEN;
747 		axgbe_printf(2, "%s: iri_flags 0x%x vtag 0x%x\n", __func__,
748 		    ri->iri_flags, ri->iri_vtag);
749 	}
750 
751 
752 	if (XGMAC_GET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES, RSS_HASH)) {
753 		ri->iri_flowid = packet->rss_hash;
754 		ri->iri_rsstype = packet->rss_hash_type;
755 		axgbe_printf(2, "%s: hash 0x%x/0x%x rsstype 0x%x/0x%x\n",
756 		    __func__, packet->rss_hash, ri->iri_flowid,
757 		    packet->rss_hash_type, ri->iri_rsstype);
758 	}
759 
760 	if (__predict_false(len == 0))
761 		axgbe_error("%s: Zero len packet\n", __func__);
762 
763 	if (__predict_false(len > max_len))
764 		axgbe_error("%s: Big packet %d/%d\n", __func__, len, max_len);
765 
766 	if (__predict_false(packet->errors))
767 		axgbe_printf(1, "<-- %s: rxq: %d len: %d frags: %d cidx %d cur: %d "
768 		    "dirty: %d error 0x%x\n", __func__, ri->iri_qsidx, len, i,
769 		    ri->iri_cidx, ring->cur, ring->dirty, packet->errors);
770 
771 	axgbe_printf(1, "%s: Packet len %d frags %d\n", __func__, len, i);
772 
773 	ri->iri_len = len;
774 	ri->iri_nfrags = i;
775 
776 	return (0);
777 }
778