xref: /freebsd/sys/dev/axgbe/xgbe-txrx.c (revision 7113afc84c0b68f1e531dbd6d57d024d868d11c0)
1*7113afc8SEmmanuel Vadot /*-
2*7113afc8SEmmanuel Vadot  * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
3*7113afc8SEmmanuel Vadot  *
4*7113afc8SEmmanuel Vadot  * Copyright (c) 2020 Advanced Micro Devices, Inc.
5*7113afc8SEmmanuel Vadot  *
6*7113afc8SEmmanuel Vadot  * Redistribution and use in source and binary forms, with or without
7*7113afc8SEmmanuel Vadot  * modification, are permitted provided that the following conditions
8*7113afc8SEmmanuel Vadot  * are met:
9*7113afc8SEmmanuel Vadot  * 1. Redistributions of source code must retain the above copyright
10*7113afc8SEmmanuel Vadot  *    notice, this list of conditions and the following disclaimer.
11*7113afc8SEmmanuel Vadot  * 2. Redistributions in binary form must reproduce the above copyright
12*7113afc8SEmmanuel Vadot  *    notice, this list of conditions and the following disclaimer in the
13*7113afc8SEmmanuel Vadot  *    documentation and/or other materials provided with the distribution.
14*7113afc8SEmmanuel Vadot  *
15*7113afc8SEmmanuel Vadot  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
16*7113afc8SEmmanuel Vadot  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17*7113afc8SEmmanuel Vadot  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18*7113afc8SEmmanuel Vadot  * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19*7113afc8SEmmanuel Vadot  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20*7113afc8SEmmanuel Vadot  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21*7113afc8SEmmanuel Vadot  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22*7113afc8SEmmanuel Vadot  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23*7113afc8SEmmanuel Vadot  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24*7113afc8SEmmanuel Vadot  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
25*7113afc8SEmmanuel Vadot  * SUCH DAMAGE.
26*7113afc8SEmmanuel Vadot  *
27*7113afc8SEmmanuel Vadot  * Contact Information :
28*7113afc8SEmmanuel Vadot  * Rajesh Kumar <rajesh1.kumar@amd.com>
29*7113afc8SEmmanuel Vadot  * Shreyank Amartya <Shreyank.Amartya@amd.com>
30*7113afc8SEmmanuel Vadot  *
31*7113afc8SEmmanuel Vadot  */
32*7113afc8SEmmanuel Vadot 
33*7113afc8SEmmanuel Vadot #include <sys/cdefs.h>
34*7113afc8SEmmanuel Vadot __FBSDID("$FreeBSD$");
35*7113afc8SEmmanuel Vadot 
36*7113afc8SEmmanuel Vadot #include "xgbe.h"
37*7113afc8SEmmanuel Vadot #include "xgbe-common.h"
38*7113afc8SEmmanuel Vadot 
39*7113afc8SEmmanuel Vadot /*
40*7113afc8SEmmanuel Vadot  * IFLIB interfaces
41*7113afc8SEmmanuel Vadot  */
42*7113afc8SEmmanuel Vadot static int axgbe_isc_txd_encap(void *, if_pkt_info_t);
43*7113afc8SEmmanuel Vadot static void axgbe_isc_txd_flush(void *, uint16_t, qidx_t);
44*7113afc8SEmmanuel Vadot static int axgbe_isc_txd_credits_update(void *, uint16_t, bool);
45*7113afc8SEmmanuel Vadot static void axgbe_isc_rxd_refill(void *, if_rxd_update_t);
46*7113afc8SEmmanuel Vadot static void axgbe_isc_rxd_flush(void *, uint16_t, uint8_t, qidx_t);
47*7113afc8SEmmanuel Vadot static int axgbe_isc_rxd_available(void *, uint16_t, qidx_t, qidx_t);
48*7113afc8SEmmanuel Vadot static int axgbe_isc_rxd_pkt_get(void *, if_rxd_info_t);
49*7113afc8SEmmanuel Vadot 
50*7113afc8SEmmanuel Vadot struct if_txrx axgbe_txrx = {
51*7113afc8SEmmanuel Vadot 	.ift_txd_encap = axgbe_isc_txd_encap,
52*7113afc8SEmmanuel Vadot 	.ift_txd_flush = axgbe_isc_txd_flush,
53*7113afc8SEmmanuel Vadot 	.ift_txd_credits_update = axgbe_isc_txd_credits_update,
54*7113afc8SEmmanuel Vadot 	.ift_rxd_available = axgbe_isc_rxd_available,
55*7113afc8SEmmanuel Vadot 	.ift_rxd_pkt_get = axgbe_isc_rxd_pkt_get,
56*7113afc8SEmmanuel Vadot 	.ift_rxd_refill = axgbe_isc_rxd_refill,
57*7113afc8SEmmanuel Vadot 	.ift_rxd_flush = axgbe_isc_rxd_flush,
58*7113afc8SEmmanuel Vadot 	.ift_legacy_intr = NULL
59*7113afc8SEmmanuel Vadot };
60*7113afc8SEmmanuel Vadot 
61*7113afc8SEmmanuel Vadot static void
62*7113afc8SEmmanuel Vadot xgbe_print_pkt_info(struct xgbe_prv_data *pdata, if_pkt_info_t pi)
63*7113afc8SEmmanuel Vadot {
64*7113afc8SEmmanuel Vadot 
65*7113afc8SEmmanuel Vadot 	axgbe_printf(1, "------Packet Info Start------\n");
66*7113afc8SEmmanuel Vadot 	axgbe_printf(1, "pi len:  %d qsidx: %d nsegs: %d ndescs: %d flags: %x pidx: %d\n",
67*7113afc8SEmmanuel Vadot                pi->ipi_len, pi->ipi_qsidx, pi->ipi_nsegs, pi->ipi_ndescs, pi->ipi_flags, pi->ipi_pidx);
68*7113afc8SEmmanuel Vadot         axgbe_printf(1, "pi new_pidx: %d csum_flags: %x mflags: %x vtag: %d\n",
69*7113afc8SEmmanuel Vadot                pi->ipi_new_pidx, pi->ipi_csum_flags, pi->ipi_mflags, pi->ipi_vtag);
70*7113afc8SEmmanuel Vadot         axgbe_printf(1, "pi etype: %d ehdrlen: %d ip_hlen: %d ipproto: %d\n",
71*7113afc8SEmmanuel Vadot                pi->ipi_etype, pi->ipi_ehdrlen, pi->ipi_ip_hlen, pi->ipi_ipproto);
72*7113afc8SEmmanuel Vadot         axgbe_printf(1, "pi tcp_hlen: %d tcp_hflags: %x tcp_seq: %d tso_segsz %d\n",
73*7113afc8SEmmanuel Vadot                pi->ipi_tcp_hlen, pi->ipi_tcp_hflags, pi->ipi_tcp_seq, pi->ipi_tso_segsz);
74*7113afc8SEmmanuel Vadot }
75*7113afc8SEmmanuel Vadot 
76*7113afc8SEmmanuel Vadot static bool
77*7113afc8SEmmanuel Vadot axgbe_ctx_desc_setup(struct xgbe_prv_data *pdata, struct xgbe_ring *ring,
78*7113afc8SEmmanuel Vadot     if_pkt_info_t pi)
79*7113afc8SEmmanuel Vadot {
80*7113afc8SEmmanuel Vadot 	struct xgbe_ring_desc	*rdesc;
81*7113afc8SEmmanuel Vadot 	struct xgbe_ring_data	*rdata;
82*7113afc8SEmmanuel Vadot 	bool inc_cur = false;
83*7113afc8SEmmanuel Vadot 
84*7113afc8SEmmanuel Vadot 	rdata = XGBE_GET_DESC_DATA(ring, ring->cur);
85*7113afc8SEmmanuel Vadot 	rdesc = rdata->rdesc;
86*7113afc8SEmmanuel Vadot 
87*7113afc8SEmmanuel Vadot 	axgbe_printf(1, "ipi_tso_segsz %d cur_mss %d idx %d\n",
88*7113afc8SEmmanuel Vadot 	    pi->ipi_tso_segsz, ring->tx.cur_mss, ring->cur);
89*7113afc8SEmmanuel Vadot 
90*7113afc8SEmmanuel Vadot 	axgbe_printf(1, "ipi_vtag 0x%x cur_vlan_ctag 0x%x\n",
91*7113afc8SEmmanuel Vadot 	    pi->ipi_vtag, ring->tx.cur_vlan_ctag);
92*7113afc8SEmmanuel Vadot 
93*7113afc8SEmmanuel Vadot 	if ((pi->ipi_csum_flags & CSUM_TSO) &&
94*7113afc8SEmmanuel Vadot 	    (pi->ipi_tso_segsz != ring->tx.cur_mss)) {
95*7113afc8SEmmanuel Vadot 		/*
96*7113afc8SEmmanuel Vadot 		 * Set TSO maximum segment size
97*7113afc8SEmmanuel Vadot 		 * Mark as context descriptor
98*7113afc8SEmmanuel Vadot 		 * Indicate this descriptor contains MSS
99*7113afc8SEmmanuel Vadot 		 */
100*7113afc8SEmmanuel Vadot 		XGMAC_SET_BITS_LE(rdesc->desc2, TX_CONTEXT_DESC2,
101*7113afc8SEmmanuel Vadot 		    MSS, pi->ipi_tso_segsz);
102*7113afc8SEmmanuel Vadot 		XGMAC_SET_BITS_LE(rdesc->desc3, TX_CONTEXT_DESC3, CTXT, 1);
103*7113afc8SEmmanuel Vadot 		XGMAC_SET_BITS_LE(rdesc->desc3, TX_CONTEXT_DESC3, TCMSSV, 1);
104*7113afc8SEmmanuel Vadot 		ring->tx.cur_mss = pi->ipi_tso_segsz;
105*7113afc8SEmmanuel Vadot 		inc_cur = true;
106*7113afc8SEmmanuel Vadot 	}
107*7113afc8SEmmanuel Vadot 
108*7113afc8SEmmanuel Vadot 	if (pi->ipi_vtag && (pi->ipi_vtag != ring->tx.cur_vlan_ctag)) {
109*7113afc8SEmmanuel Vadot 		/*
110*7113afc8SEmmanuel Vadot 		 * Mark it as context descriptor
111*7113afc8SEmmanuel Vadot 		 * Set the VLAN tag
112*7113afc8SEmmanuel Vadot 		 * Indicate this descriptor contains the VLAN tag
113*7113afc8SEmmanuel Vadot 		 */
114*7113afc8SEmmanuel Vadot 		XGMAC_SET_BITS_LE(rdesc->desc3, TX_CONTEXT_DESC3, CTXT, 1);
115*7113afc8SEmmanuel Vadot 		XGMAC_SET_BITS_LE(rdesc->desc3, TX_CONTEXT_DESC3,
116*7113afc8SEmmanuel Vadot 		    VT, pi->ipi_vtag);
117*7113afc8SEmmanuel Vadot 		XGMAC_SET_BITS_LE(rdesc->desc3, TX_CONTEXT_DESC3, VLTV, 1);
118*7113afc8SEmmanuel Vadot 		ring->tx.cur_vlan_ctag = pi->ipi_vtag;
119*7113afc8SEmmanuel Vadot 		inc_cur = true;
120*7113afc8SEmmanuel Vadot 	}
121*7113afc8SEmmanuel Vadot 
122*7113afc8SEmmanuel Vadot 	return (inc_cur);
123*7113afc8SEmmanuel Vadot }
124*7113afc8SEmmanuel Vadot 
125*7113afc8SEmmanuel Vadot static uint16_t
126*7113afc8SEmmanuel Vadot axgbe_calculate_tx_parms(struct xgbe_prv_data *pdata, if_pkt_info_t pi,
127*7113afc8SEmmanuel Vadot     struct xgbe_packet_data *packet)
128*7113afc8SEmmanuel Vadot {
129*7113afc8SEmmanuel Vadot 	uint32_t tcp_payload_len = 0, bytes = 0;
130*7113afc8SEmmanuel Vadot 	uint16_t max_len, hlen, payload_len, pkts = 0;
131*7113afc8SEmmanuel Vadot 
132*7113afc8SEmmanuel Vadot 	packet->tx_packets = packet->tx_bytes = 0;
133*7113afc8SEmmanuel Vadot 
134*7113afc8SEmmanuel Vadot 	hlen = pi->ipi_ehdrlen + pi->ipi_ip_hlen + pi->ipi_tcp_hlen;
135*7113afc8SEmmanuel Vadot 	if (pi->ipi_csum_flags & CSUM_TSO) {
136*7113afc8SEmmanuel Vadot 
137*7113afc8SEmmanuel Vadot 		tcp_payload_len = pi->ipi_len - hlen;
138*7113afc8SEmmanuel Vadot 		axgbe_printf(1, "%s: ipi_len %x elen %d iplen %d tcplen %d\n",
139*7113afc8SEmmanuel Vadot 		    __func__, pi->ipi_len, pi->ipi_ehdrlen, pi->ipi_ip_hlen,
140*7113afc8SEmmanuel Vadot 		    pi->ipi_tcp_hlen);
141*7113afc8SEmmanuel Vadot 
142*7113afc8SEmmanuel Vadot 		max_len = if_getmtu(pdata->netdev) + ETH_HLEN;
143*7113afc8SEmmanuel Vadot 		if (pi->ipi_vtag)
144*7113afc8SEmmanuel Vadot 			max_len += VLAN_HLEN;
145*7113afc8SEmmanuel Vadot 
146*7113afc8SEmmanuel Vadot 		while (tcp_payload_len) {
147*7113afc8SEmmanuel Vadot 
148*7113afc8SEmmanuel Vadot 			payload_len = max_len - hlen;
149*7113afc8SEmmanuel Vadot 			payload_len = min(payload_len, tcp_payload_len);
150*7113afc8SEmmanuel Vadot 			tcp_payload_len -= payload_len;
151*7113afc8SEmmanuel Vadot 			pkts++;
152*7113afc8SEmmanuel Vadot 			bytes += (hlen + payload_len);
153*7113afc8SEmmanuel Vadot 			axgbe_printf(1, "%s: max_len %d payload_len %d "
154*7113afc8SEmmanuel Vadot 			    "tcp_len %d\n", __func__, max_len, payload_len,
155*7113afc8SEmmanuel Vadot 			    tcp_payload_len);
156*7113afc8SEmmanuel Vadot 		}
157*7113afc8SEmmanuel Vadot 	} else {
158*7113afc8SEmmanuel Vadot 		pkts = 1;
159*7113afc8SEmmanuel Vadot 		bytes = pi->ipi_len;
160*7113afc8SEmmanuel Vadot 	}
161*7113afc8SEmmanuel Vadot 
162*7113afc8SEmmanuel Vadot 	packet->tx_packets = pkts;
163*7113afc8SEmmanuel Vadot 	packet->tx_bytes = bytes;
164*7113afc8SEmmanuel Vadot 
165*7113afc8SEmmanuel Vadot 	axgbe_printf(1, "%s: packets %d bytes %d hlen %d\n", __func__,
166*7113afc8SEmmanuel Vadot 	    packet->tx_packets, packet->tx_bytes, hlen);
167*7113afc8SEmmanuel Vadot 
168*7113afc8SEmmanuel Vadot 	return (hlen);
169*7113afc8SEmmanuel Vadot }
170*7113afc8SEmmanuel Vadot 
171*7113afc8SEmmanuel Vadot static int
172*7113afc8SEmmanuel Vadot axgbe_isc_txd_encap(void *arg, if_pkt_info_t pi)
173*7113afc8SEmmanuel Vadot {
174*7113afc8SEmmanuel Vadot 	struct axgbe_if_softc	*sc = (struct axgbe_if_softc*)arg;
175*7113afc8SEmmanuel Vadot 	struct xgbe_prv_data	*pdata = &sc->pdata;
176*7113afc8SEmmanuel Vadot 	struct xgbe_channel	*channel;
177*7113afc8SEmmanuel Vadot 	struct xgbe_ring	*ring;
178*7113afc8SEmmanuel Vadot 	struct xgbe_ring_desc	*rdesc;
179*7113afc8SEmmanuel Vadot 	struct xgbe_ring_data	*rdata;
180*7113afc8SEmmanuel Vadot 	struct xgbe_packet_data *packet;
181*7113afc8SEmmanuel Vadot 	unsigned int cur, start, tx_set_ic;
182*7113afc8SEmmanuel Vadot 	uint16_t offset, hlen, datalen, tcp_payload_len = 0;
183*7113afc8SEmmanuel Vadot 	int cur_seg = 0;
184*7113afc8SEmmanuel Vadot 
185*7113afc8SEmmanuel Vadot 	xgbe_print_pkt_info(pdata, pi);
186*7113afc8SEmmanuel Vadot 
187*7113afc8SEmmanuel Vadot 	channel = pdata->channel[pi->ipi_qsidx];
188*7113afc8SEmmanuel Vadot 	ring = channel->tx_ring;
189*7113afc8SEmmanuel Vadot 	packet = &ring->packet_data;
190*7113afc8SEmmanuel Vadot 	cur = start = ring->cur;
191*7113afc8SEmmanuel Vadot 
192*7113afc8SEmmanuel Vadot 	axgbe_printf(1, "--> %s: txq %d cur %d dirty %d\n",
193*7113afc8SEmmanuel Vadot 	    __func__, pi->ipi_qsidx, ring->cur, ring->dirty);
194*7113afc8SEmmanuel Vadot 
195*7113afc8SEmmanuel Vadot 	MPASS(pi->ipi_len != 0);
196*7113afc8SEmmanuel Vadot 	if (__predict_false(pi->ipi_len == 0)) {
197*7113afc8SEmmanuel Vadot 		axgbe_error("empty packet received from stack\n");
198*7113afc8SEmmanuel Vadot 		return (0);
199*7113afc8SEmmanuel Vadot 	}
200*7113afc8SEmmanuel Vadot 
201*7113afc8SEmmanuel Vadot 	MPASS(ring->cur == pi->ipi_pidx);
202*7113afc8SEmmanuel Vadot 	if (__predict_false(ring->cur != pi->ipi_pidx)) {
203*7113afc8SEmmanuel Vadot 		axgbe_error("--> %s: cur(%d) ne pidx(%d)\n", __func__,
204*7113afc8SEmmanuel Vadot 		    ring->cur, pi->ipi_pidx);
205*7113afc8SEmmanuel Vadot 	}
206*7113afc8SEmmanuel Vadot 
207*7113afc8SEmmanuel Vadot 	/* Determine if an interrupt should be generated for this Tx:
208*7113afc8SEmmanuel Vadot 	 *   Interrupt:
209*7113afc8SEmmanuel Vadot 	 *     - Tx frame count exceeds the frame count setting
210*7113afc8SEmmanuel Vadot 	 *     - Addition of Tx frame count to the frame count since the
211*7113afc8SEmmanuel Vadot 	 *       last interrupt was set exceeds the frame count setting
212*7113afc8SEmmanuel Vadot 	 *   No interrupt:
213*7113afc8SEmmanuel Vadot 	 *     - No frame count setting specified (ethtool -C ethX tx-frames 0)
214*7113afc8SEmmanuel Vadot 	 *     - Addition of Tx frame count to the frame count since the
215*7113afc8SEmmanuel Vadot 	 *       last interrupt was set does not exceed the frame count setting
216*7113afc8SEmmanuel Vadot 	 */
217*7113afc8SEmmanuel Vadot 	memset(packet, 0, sizeof(*packet));
218*7113afc8SEmmanuel Vadot 	hlen = axgbe_calculate_tx_parms(pdata, pi, packet);
219*7113afc8SEmmanuel Vadot 	axgbe_printf(1, "%s: ipi_len %d tx_pkts %d tx_bytes %d hlen %d\n",
220*7113afc8SEmmanuel Vadot 	    __func__, pi->ipi_len, packet->tx_packets, packet->tx_bytes, hlen);
221*7113afc8SEmmanuel Vadot 
222*7113afc8SEmmanuel Vadot 	ring->coalesce_count += packet->tx_packets;
223*7113afc8SEmmanuel Vadot 	if (!pdata->tx_frames)
224*7113afc8SEmmanuel Vadot 		tx_set_ic = 0;
225*7113afc8SEmmanuel Vadot 	else if (packet->tx_packets > pdata->tx_frames)
226*7113afc8SEmmanuel Vadot 		tx_set_ic = 1;
227*7113afc8SEmmanuel Vadot 	else if ((ring->coalesce_count % pdata->tx_frames) < (packet->tx_packets))
228*7113afc8SEmmanuel Vadot 		tx_set_ic = 1;
229*7113afc8SEmmanuel Vadot 	else
230*7113afc8SEmmanuel Vadot 		tx_set_ic = 0;
231*7113afc8SEmmanuel Vadot 
232*7113afc8SEmmanuel Vadot 	/* Add Context descriptor if needed (for TSO, VLAN cases) */
233*7113afc8SEmmanuel Vadot 	if (axgbe_ctx_desc_setup(pdata, ring, pi))
234*7113afc8SEmmanuel Vadot 		cur++;
235*7113afc8SEmmanuel Vadot 
236*7113afc8SEmmanuel Vadot 	rdata = XGBE_GET_DESC_DATA(ring, cur);
237*7113afc8SEmmanuel Vadot 	rdesc = rdata->rdesc;
238*7113afc8SEmmanuel Vadot 
239*7113afc8SEmmanuel Vadot 	axgbe_printf(1, "%s: cur %d lo 0x%lx hi 0x%lx ds_len 0x%x "
240*7113afc8SEmmanuel Vadot 	    "ipi_len 0x%x\n", __func__, cur,
241*7113afc8SEmmanuel Vadot 	    lower_32_bits(pi->ipi_segs[cur_seg].ds_addr),
242*7113afc8SEmmanuel Vadot 	    upper_32_bits(pi->ipi_segs[cur_seg].ds_addr),
243*7113afc8SEmmanuel Vadot 	    (int)pi->ipi_segs[cur_seg].ds_len, pi->ipi_len);
244*7113afc8SEmmanuel Vadot 
245*7113afc8SEmmanuel Vadot 	/* Update buffer address (for TSO this is the header) */
246*7113afc8SEmmanuel Vadot 	rdesc->desc0 = cpu_to_le32(lower_32_bits(pi->ipi_segs[cur_seg].ds_addr));
247*7113afc8SEmmanuel Vadot 	rdesc->desc1 = cpu_to_le32(upper_32_bits(pi->ipi_segs[cur_seg].ds_addr));
248*7113afc8SEmmanuel Vadot 
249*7113afc8SEmmanuel Vadot 	/* Update the buffer length */
250*7113afc8SEmmanuel Vadot 	if (hlen == 0)
251*7113afc8SEmmanuel Vadot 		hlen = pi->ipi_segs[cur_seg].ds_len;
252*7113afc8SEmmanuel Vadot 	XGMAC_SET_BITS_LE(rdesc->desc2, TX_NORMAL_DESC2, HL_B1L, hlen);
253*7113afc8SEmmanuel Vadot 
254*7113afc8SEmmanuel Vadot 	/* VLAN tag insertion check */
255*7113afc8SEmmanuel Vadot 	if (pi->ipi_vtag) {
256*7113afc8SEmmanuel Vadot 		XGMAC_SET_BITS_LE(rdesc->desc2, TX_NORMAL_DESC2, VTIR,
257*7113afc8SEmmanuel Vadot 		    TX_NORMAL_DESC2_VLAN_INSERT);
258*7113afc8SEmmanuel Vadot 	}
259*7113afc8SEmmanuel Vadot 
260*7113afc8SEmmanuel Vadot 	/* Mark it as First Descriptor */
261*7113afc8SEmmanuel Vadot 	XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, FD, 1);
262*7113afc8SEmmanuel Vadot 
263*7113afc8SEmmanuel Vadot 	/* Mark it as a NORMAL descriptor */
264*7113afc8SEmmanuel Vadot 	XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, CTXT, 0);
265*7113afc8SEmmanuel Vadot 
266*7113afc8SEmmanuel Vadot 	/*
267*7113afc8SEmmanuel Vadot 	 * Set the OWN bit if this is not the first descriptor. For first
268*7113afc8SEmmanuel Vadot 	 * descriptor, OWN bit will be set at last so that hardware will
269*7113afc8SEmmanuel Vadot 	 * process the descriptors only after the OWN bit for the first
270*7113afc8SEmmanuel Vadot 	 * descriptor is set
271*7113afc8SEmmanuel Vadot 	 */
272*7113afc8SEmmanuel Vadot 	if (cur != start)
273*7113afc8SEmmanuel Vadot 		XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, OWN, 1);
274*7113afc8SEmmanuel Vadot 
275*7113afc8SEmmanuel Vadot 	if (pi->ipi_csum_flags & CSUM_TSO) {
276*7113afc8SEmmanuel Vadot 		/* Enable TSO */
277*7113afc8SEmmanuel Vadot 		XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, TSE, 1);
278*7113afc8SEmmanuel Vadot 
279*7113afc8SEmmanuel Vadot 		tcp_payload_len = pi->ipi_len - hlen;
280*7113afc8SEmmanuel Vadot 
281*7113afc8SEmmanuel Vadot 		/* Set TCP payload length*/
282*7113afc8SEmmanuel Vadot 		XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, TCPPL,
283*7113afc8SEmmanuel Vadot 		    tcp_payload_len);
284*7113afc8SEmmanuel Vadot 
285*7113afc8SEmmanuel Vadot 		XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, TCPHDRLEN,
286*7113afc8SEmmanuel Vadot 		    pi->ipi_tcp_hlen/4);
287*7113afc8SEmmanuel Vadot 
288*7113afc8SEmmanuel Vadot 		axgbe_printf(1, "tcp_payload %d tcp_hlen %d\n", tcp_payload_len,
289*7113afc8SEmmanuel Vadot 		    pi->ipi_tcp_hlen/4);
290*7113afc8SEmmanuel Vadot 	} else {
291*7113afc8SEmmanuel Vadot 		/* Enable CRC and Pad Insertion */
292*7113afc8SEmmanuel Vadot 		XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, CPC, 0);
293*7113afc8SEmmanuel Vadot 
294*7113afc8SEmmanuel Vadot 		/* Enable HW CSUM*/
295*7113afc8SEmmanuel Vadot 		if (pi->ipi_csum_flags)
296*7113afc8SEmmanuel Vadot 			XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, CIC, 0x3);
297*7113afc8SEmmanuel Vadot 
298*7113afc8SEmmanuel Vadot 		/* Set total length to be transmitted */
299*7113afc8SEmmanuel Vadot 		XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, FL, pi->ipi_len);
300*7113afc8SEmmanuel Vadot 	}
301*7113afc8SEmmanuel Vadot 
302*7113afc8SEmmanuel Vadot 	cur++;
303*7113afc8SEmmanuel Vadot 
304*7113afc8SEmmanuel Vadot 	for (cur_seg = 0 ; cur_seg < pi->ipi_nsegs ; cur_seg++) {
305*7113afc8SEmmanuel Vadot 
306*7113afc8SEmmanuel Vadot 		if (cur_seg == 0) {
307*7113afc8SEmmanuel Vadot 			offset = hlen;
308*7113afc8SEmmanuel Vadot 			datalen = pi->ipi_segs[cur_seg].ds_len - hlen;
309*7113afc8SEmmanuel Vadot 		} else {
310*7113afc8SEmmanuel Vadot 			offset = 0;
311*7113afc8SEmmanuel Vadot 			datalen = pi->ipi_segs[cur_seg].ds_len;
312*7113afc8SEmmanuel Vadot 		}
313*7113afc8SEmmanuel Vadot 
314*7113afc8SEmmanuel Vadot 		if (datalen) {
315*7113afc8SEmmanuel Vadot 			rdata = XGBE_GET_DESC_DATA(ring, cur);
316*7113afc8SEmmanuel Vadot 			rdesc = rdata->rdesc;
317*7113afc8SEmmanuel Vadot 
318*7113afc8SEmmanuel Vadot 
319*7113afc8SEmmanuel Vadot 			/* Update buffer address */
320*7113afc8SEmmanuel Vadot 			rdesc->desc0 =
321*7113afc8SEmmanuel Vadot 			    cpu_to_le32(lower_32_bits(pi->ipi_segs[cur_seg].ds_addr + offset));
322*7113afc8SEmmanuel Vadot 			rdesc->desc1 =
323*7113afc8SEmmanuel Vadot 			    cpu_to_le32(upper_32_bits(pi->ipi_segs[cur_seg].ds_addr + offset));
324*7113afc8SEmmanuel Vadot 
325*7113afc8SEmmanuel Vadot 			/* Update the buffer length */
326*7113afc8SEmmanuel Vadot 			XGMAC_SET_BITS_LE(rdesc->desc2, TX_NORMAL_DESC2, HL_B1L, datalen);
327*7113afc8SEmmanuel Vadot 
328*7113afc8SEmmanuel Vadot 			/* Set OWN bit */
329*7113afc8SEmmanuel Vadot 			XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, OWN, 1);
330*7113afc8SEmmanuel Vadot 
331*7113afc8SEmmanuel Vadot 			/* Mark it as NORMAL descriptor */
332*7113afc8SEmmanuel Vadot 			XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, CTXT, 0);
333*7113afc8SEmmanuel Vadot 
334*7113afc8SEmmanuel Vadot 			/* Enable HW CSUM*/
335*7113afc8SEmmanuel Vadot 			if (pi->ipi_csum_flags)
336*7113afc8SEmmanuel Vadot 				XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, CIC, 0x3);
337*7113afc8SEmmanuel Vadot 
338*7113afc8SEmmanuel Vadot 			axgbe_printf(1, "%s: cur %d lo 0x%lx hi 0x%lx ds_len 0x%x "
339*7113afc8SEmmanuel Vadot 			    "ipi_len 0x%x\n", __func__, cur,
340*7113afc8SEmmanuel Vadot 			    lower_32_bits(pi->ipi_segs[cur_seg].ds_addr),
341*7113afc8SEmmanuel Vadot 			    upper_32_bits(pi->ipi_segs[cur_seg].ds_addr),
342*7113afc8SEmmanuel Vadot 			    (int)pi->ipi_segs[cur_seg].ds_len, pi->ipi_len);
343*7113afc8SEmmanuel Vadot 
344*7113afc8SEmmanuel Vadot 			cur++;
345*7113afc8SEmmanuel Vadot 		}
346*7113afc8SEmmanuel Vadot 	}
347*7113afc8SEmmanuel Vadot 
348*7113afc8SEmmanuel Vadot 	/* Set LAST bit for the last descriptor */
349*7113afc8SEmmanuel Vadot 	XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, LD, 1);
350*7113afc8SEmmanuel Vadot 
351*7113afc8SEmmanuel Vadot 	/* Set IC bit based on Tx coalescing settings */
352*7113afc8SEmmanuel Vadot 	if (tx_set_ic)
353*7113afc8SEmmanuel Vadot 		XGMAC_SET_BITS_LE(rdesc->desc2, TX_NORMAL_DESC2, IC, 1);
354*7113afc8SEmmanuel Vadot 
355*7113afc8SEmmanuel Vadot 	wmb();
356*7113afc8SEmmanuel Vadot 
357*7113afc8SEmmanuel Vadot 	/* Set OWN bit for the first descriptor */
358*7113afc8SEmmanuel Vadot 	rdata = XGBE_GET_DESC_DATA(ring, start);
359*7113afc8SEmmanuel Vadot 	rdesc = rdata->rdesc;
360*7113afc8SEmmanuel Vadot 	XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, OWN, 1);
361*7113afc8SEmmanuel Vadot 
362*7113afc8SEmmanuel Vadot 	ring->cur = pi->ipi_new_pidx = (cur & (ring->rdesc_count - 1));
363*7113afc8SEmmanuel Vadot 
364*7113afc8SEmmanuel Vadot 	axgbe_printf(1, "<-- %s: end cur %d dirty %d\n", __func__, ring->cur,
365*7113afc8SEmmanuel Vadot 	    ring->dirty);
366*7113afc8SEmmanuel Vadot 
367*7113afc8SEmmanuel Vadot 	return (0);
368*7113afc8SEmmanuel Vadot }
369*7113afc8SEmmanuel Vadot 
370*7113afc8SEmmanuel Vadot static void
371*7113afc8SEmmanuel Vadot axgbe_isc_txd_flush(void *arg, uint16_t txqid, qidx_t pidx)
372*7113afc8SEmmanuel Vadot {
373*7113afc8SEmmanuel Vadot 	struct axgbe_if_softc	*sc = (struct axgbe_if_softc*)arg;
374*7113afc8SEmmanuel Vadot 	struct xgbe_prv_data	*pdata = &sc->pdata;
375*7113afc8SEmmanuel Vadot 	struct xgbe_channel	*channel = pdata->channel[txqid];
376*7113afc8SEmmanuel Vadot 	struct xgbe_ring	*ring = channel->tx_ring;
377*7113afc8SEmmanuel Vadot 	struct xgbe_ring_data	*rdata = XGBE_GET_DESC_DATA(ring, pidx);
378*7113afc8SEmmanuel Vadot 
379*7113afc8SEmmanuel Vadot 	axgbe_printf(1, "--> %s: flush txq %d pidx %d cur %d dirty %d\n",
380*7113afc8SEmmanuel Vadot 	    __func__, txqid, pidx, ring->cur, ring->dirty);
381*7113afc8SEmmanuel Vadot 
382*7113afc8SEmmanuel Vadot 	MPASS(ring->cur == pidx);
383*7113afc8SEmmanuel Vadot 	if (__predict_false(ring->cur != pidx)) {
384*7113afc8SEmmanuel Vadot 		axgbe_error("--> %s: cur(%d) ne pidx(%d)\n", __func__,
385*7113afc8SEmmanuel Vadot 		    ring->cur, pidx);
386*7113afc8SEmmanuel Vadot 	}
387*7113afc8SEmmanuel Vadot 
388*7113afc8SEmmanuel Vadot 	wmb();
389*7113afc8SEmmanuel Vadot 
390*7113afc8SEmmanuel Vadot 	/* Ring Doorbell */
391*7113afc8SEmmanuel Vadot 	if (XGMAC_DMA_IOREAD(channel, DMA_CH_TDTR_LO) !=
392*7113afc8SEmmanuel Vadot 	    lower_32_bits(rdata->rdata_paddr)) {
393*7113afc8SEmmanuel Vadot 		XGMAC_DMA_IOWRITE(channel, DMA_CH_TDTR_LO,
394*7113afc8SEmmanuel Vadot 	    	    lower_32_bits(rdata->rdata_paddr));
395*7113afc8SEmmanuel Vadot 	}
396*7113afc8SEmmanuel Vadot }
397*7113afc8SEmmanuel Vadot 
398*7113afc8SEmmanuel Vadot static int
399*7113afc8SEmmanuel Vadot axgbe_isc_txd_credits_update(void *arg, uint16_t txqid, bool clear)
400*7113afc8SEmmanuel Vadot {
401*7113afc8SEmmanuel Vadot 	struct axgbe_if_softc   *sc = (struct axgbe_if_softc*)arg;
402*7113afc8SEmmanuel Vadot 	struct xgbe_hw_if	*hw_if = &sc->pdata.hw_if;
403*7113afc8SEmmanuel Vadot 	struct xgbe_prv_data	*pdata = &sc->pdata;
404*7113afc8SEmmanuel Vadot 	struct xgbe_channel     *channel = pdata->channel[txqid];
405*7113afc8SEmmanuel Vadot 	struct xgbe_ring	*ring = channel->tx_ring;
406*7113afc8SEmmanuel Vadot 	struct xgbe_ring_data	*rdata;
407*7113afc8SEmmanuel Vadot 	int processed = 0;
408*7113afc8SEmmanuel Vadot 
409*7113afc8SEmmanuel Vadot 	axgbe_printf(1, "%s: txq %d clear %d cur %d dirty %d\n",
410*7113afc8SEmmanuel Vadot 	    __func__, txqid, clear, ring->cur, ring->dirty);
411*7113afc8SEmmanuel Vadot 
412*7113afc8SEmmanuel Vadot 	if (__predict_false(ring->cur == ring->dirty)) {
413*7113afc8SEmmanuel Vadot 		axgbe_printf(1, "<-- %s: cur(%d) equals dirty(%d)\n",
414*7113afc8SEmmanuel Vadot 		    __func__, ring->cur, ring->dirty);
415*7113afc8SEmmanuel Vadot 		return (0);
416*7113afc8SEmmanuel Vadot 	}
417*7113afc8SEmmanuel Vadot 
418*7113afc8SEmmanuel Vadot 	/* Check whether the first dirty descriptor is Tx complete */
419*7113afc8SEmmanuel Vadot 	rdata = XGBE_GET_DESC_DATA(ring, ring->dirty);
420*7113afc8SEmmanuel Vadot 	if (!hw_if->tx_complete(rdata->rdesc)) {
421*7113afc8SEmmanuel Vadot 		axgbe_printf(1, "<-- %s: (dirty %d)\n", __func__, ring->dirty);
422*7113afc8SEmmanuel Vadot 		return (0);
423*7113afc8SEmmanuel Vadot 	}
424*7113afc8SEmmanuel Vadot 
425*7113afc8SEmmanuel Vadot 	/*
426*7113afc8SEmmanuel Vadot 	 * If clear is false just let the caller know that there
427*7113afc8SEmmanuel Vadot 	 * are descriptors to reclaim
428*7113afc8SEmmanuel Vadot 	 */
429*7113afc8SEmmanuel Vadot 	if (!clear) {
430*7113afc8SEmmanuel Vadot 		axgbe_printf(1, "<-- %s: (!clear)\n", __func__);
431*7113afc8SEmmanuel Vadot 		return (1);
432*7113afc8SEmmanuel Vadot 	}
433*7113afc8SEmmanuel Vadot 
434*7113afc8SEmmanuel Vadot 	do {
435*7113afc8SEmmanuel Vadot 		hw_if->tx_desc_reset(rdata);
436*7113afc8SEmmanuel Vadot 		processed++;
437*7113afc8SEmmanuel Vadot 		ring->dirty = (ring->dirty + 1) & (ring->rdesc_count - 1);
438*7113afc8SEmmanuel Vadot 
439*7113afc8SEmmanuel Vadot 		/*
440*7113afc8SEmmanuel Vadot 		 * tx_complete will return true for unused descriptors also.
441*7113afc8SEmmanuel Vadot 		 * so, check tx_complete only until used descriptors.
442*7113afc8SEmmanuel Vadot 		 */
443*7113afc8SEmmanuel Vadot 		if (ring->cur == ring->dirty)
444*7113afc8SEmmanuel Vadot 			break;
445*7113afc8SEmmanuel Vadot 
446*7113afc8SEmmanuel Vadot 		rdata = XGBE_GET_DESC_DATA(ring, ring->dirty);
447*7113afc8SEmmanuel Vadot 	} while (hw_if->tx_complete(rdata->rdesc));
448*7113afc8SEmmanuel Vadot 
449*7113afc8SEmmanuel Vadot 	axgbe_printf(1, "<-- %s: processed %d cur %d dirty %d\n", __func__,
450*7113afc8SEmmanuel Vadot 	    processed, ring->cur, ring->dirty);
451*7113afc8SEmmanuel Vadot 
452*7113afc8SEmmanuel Vadot 	return (processed);
453*7113afc8SEmmanuel Vadot }
454*7113afc8SEmmanuel Vadot 
455*7113afc8SEmmanuel Vadot static void
456*7113afc8SEmmanuel Vadot axgbe_isc_rxd_refill(void *arg, if_rxd_update_t iru)
457*7113afc8SEmmanuel Vadot {
458*7113afc8SEmmanuel Vadot  	struct axgbe_if_softc   *sc = (struct axgbe_if_softc*)arg;
459*7113afc8SEmmanuel Vadot 	struct xgbe_prv_data	*pdata = &sc->pdata;
460*7113afc8SEmmanuel Vadot 	struct xgbe_channel     *channel = pdata->channel[iru->iru_qsidx];
461*7113afc8SEmmanuel Vadot 	struct xgbe_ring	*ring = channel->rx_ring;
462*7113afc8SEmmanuel Vadot 	struct xgbe_ring_data	*rdata;
463*7113afc8SEmmanuel Vadot 	struct xgbe_ring_desc	*rdesc;
464*7113afc8SEmmanuel Vadot 	unsigned int rx_usecs = pdata->rx_usecs;
465*7113afc8SEmmanuel Vadot 	unsigned int rx_frames = pdata->rx_frames;
466*7113afc8SEmmanuel Vadot 	unsigned int inte;
467*7113afc8SEmmanuel Vadot 	uint8_t	count = iru->iru_count;
468*7113afc8SEmmanuel Vadot 	int i, j;
469*7113afc8SEmmanuel Vadot 
470*7113afc8SEmmanuel Vadot 	axgbe_printf(1, "--> %s: rxq %d fl %d pidx %d count %d ring cur %d "
471*7113afc8SEmmanuel Vadot 	    "dirty %d\n", __func__, iru->iru_qsidx, iru->iru_flidx,
472*7113afc8SEmmanuel Vadot 	    iru->iru_pidx, count, ring->cur, ring->dirty);
473*7113afc8SEmmanuel Vadot 
474*7113afc8SEmmanuel Vadot 	for (i = iru->iru_pidx, j = 0 ; j < count ; i++, j++) {
475*7113afc8SEmmanuel Vadot 
476*7113afc8SEmmanuel Vadot 		if (i == XGBE_RX_DESC_CNT_DEFAULT)
477*7113afc8SEmmanuel Vadot 			i = 0;
478*7113afc8SEmmanuel Vadot 
479*7113afc8SEmmanuel Vadot 		rdata = XGBE_GET_DESC_DATA(ring, i);
480*7113afc8SEmmanuel Vadot 		rdesc = rdata->rdesc;
481*7113afc8SEmmanuel Vadot 
482*7113afc8SEmmanuel Vadot 		if (__predict_false(XGMAC_GET_BITS_LE(rdesc->desc3,
483*7113afc8SEmmanuel Vadot 		    RX_NORMAL_DESC3, OWN))) {
484*7113afc8SEmmanuel Vadot 			axgbe_error("%s: refill clash, cur %d dirty %d index %d"
485*7113afc8SEmmanuel Vadot 			    "pidx %d\n", __func__, ring->cur, ring->dirty, j, i);
486*7113afc8SEmmanuel Vadot 		}
487*7113afc8SEmmanuel Vadot 
488*7113afc8SEmmanuel Vadot 		/* Assuming split header is enabled */
489*7113afc8SEmmanuel Vadot 		if (iru->iru_flidx == 0) {
490*7113afc8SEmmanuel Vadot 
491*7113afc8SEmmanuel Vadot 			/* Fill header/buffer1 address */
492*7113afc8SEmmanuel Vadot 			rdesc->desc0 =
493*7113afc8SEmmanuel Vadot 			    cpu_to_le32(lower_32_bits(iru->iru_paddrs[j]));
494*7113afc8SEmmanuel Vadot 			rdesc->desc1 =
495*7113afc8SEmmanuel Vadot 			    cpu_to_le32(upper_32_bits(iru->iru_paddrs[j]));
496*7113afc8SEmmanuel Vadot 		} else {
497*7113afc8SEmmanuel Vadot 
498*7113afc8SEmmanuel Vadot 			/* Fill data/buffer2 address */
499*7113afc8SEmmanuel Vadot 			rdesc->desc2 =
500*7113afc8SEmmanuel Vadot 			    cpu_to_le32(lower_32_bits(iru->iru_paddrs[j]));
501*7113afc8SEmmanuel Vadot 			rdesc->desc3 =
502*7113afc8SEmmanuel Vadot 			    cpu_to_le32(upper_32_bits(iru->iru_paddrs[j]));
503*7113afc8SEmmanuel Vadot 
504*7113afc8SEmmanuel Vadot 			if (!rx_usecs && !rx_frames) {
505*7113afc8SEmmanuel Vadot 				/* No coalescing, interrupt for every descriptor */
506*7113afc8SEmmanuel Vadot 				inte = 1;
507*7113afc8SEmmanuel Vadot 			} else {
508*7113afc8SEmmanuel Vadot 				/* Set interrupt based on Rx frame coalescing setting */
509*7113afc8SEmmanuel Vadot 				if (rx_frames &&
510*7113afc8SEmmanuel Vadot 				    !(((ring->dirty + 1) &(ring->rdesc_count - 1)) % rx_frames))
511*7113afc8SEmmanuel Vadot 					inte = 1;
512*7113afc8SEmmanuel Vadot 				else
513*7113afc8SEmmanuel Vadot 					inte = 0;
514*7113afc8SEmmanuel Vadot 			}
515*7113afc8SEmmanuel Vadot 
516*7113afc8SEmmanuel Vadot 			XGMAC_SET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, INTE, inte);
517*7113afc8SEmmanuel Vadot 
518*7113afc8SEmmanuel Vadot 			XGMAC_SET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, OWN, 1);
519*7113afc8SEmmanuel Vadot 
520*7113afc8SEmmanuel Vadot 			wmb();
521*7113afc8SEmmanuel Vadot 
522*7113afc8SEmmanuel Vadot 			ring->dirty = ((ring->dirty + 1) & (ring->rdesc_count - 1));
523*7113afc8SEmmanuel Vadot 		}
524*7113afc8SEmmanuel Vadot 	}
525*7113afc8SEmmanuel Vadot 
526*7113afc8SEmmanuel Vadot 	axgbe_printf(1, "<-- %s: rxq: %d cur: %d dirty: %d\n", __func__,
527*7113afc8SEmmanuel Vadot 	    channel->queue_index, ring->cur, ring->dirty);
528*7113afc8SEmmanuel Vadot }
529*7113afc8SEmmanuel Vadot 
530*7113afc8SEmmanuel Vadot static void
531*7113afc8SEmmanuel Vadot axgbe_isc_rxd_flush(void *arg, uint16_t qsidx, uint8_t flidx, qidx_t pidx)
532*7113afc8SEmmanuel Vadot {
533*7113afc8SEmmanuel Vadot  	struct axgbe_if_softc   *sc = (struct axgbe_if_softc*)arg;
534*7113afc8SEmmanuel Vadot 	struct xgbe_prv_data	*pdata = &sc->pdata;
535*7113afc8SEmmanuel Vadot 	struct xgbe_channel     *channel = pdata->channel[qsidx];
536*7113afc8SEmmanuel Vadot 	struct xgbe_ring	*ring = channel->rx_ring;
537*7113afc8SEmmanuel Vadot 	struct xgbe_ring_data 	*rdata;
538*7113afc8SEmmanuel Vadot 
539*7113afc8SEmmanuel Vadot 	axgbe_printf(1, "--> %s: rxq %d fl %d pidx %d cur %d dirty %d\n",
540*7113afc8SEmmanuel Vadot 	    __func__, qsidx, flidx, pidx, ring->cur, ring->dirty);
541*7113afc8SEmmanuel Vadot 
542*7113afc8SEmmanuel Vadot 	if (flidx == 1) {
543*7113afc8SEmmanuel Vadot 
544*7113afc8SEmmanuel Vadot 		rdata = XGBE_GET_DESC_DATA(ring, pidx);
545*7113afc8SEmmanuel Vadot 
546*7113afc8SEmmanuel Vadot 		XGMAC_DMA_IOWRITE(channel, DMA_CH_RDTR_LO,
547*7113afc8SEmmanuel Vadot 		    lower_32_bits(rdata->rdata_paddr));
548*7113afc8SEmmanuel Vadot 	}
549*7113afc8SEmmanuel Vadot 
550*7113afc8SEmmanuel Vadot 	wmb();
551*7113afc8SEmmanuel Vadot }
552*7113afc8SEmmanuel Vadot 
553*7113afc8SEmmanuel Vadot static int
554*7113afc8SEmmanuel Vadot axgbe_isc_rxd_available(void *arg, uint16_t qsidx, qidx_t idx, qidx_t budget)
555*7113afc8SEmmanuel Vadot {
556*7113afc8SEmmanuel Vadot 	struct axgbe_if_softc   *sc = (struct axgbe_if_softc*)arg;
557*7113afc8SEmmanuel Vadot 	struct xgbe_prv_data 	*pdata = &sc->pdata;
558*7113afc8SEmmanuel Vadot 	struct xgbe_channel     *channel = pdata->channel[qsidx];
559*7113afc8SEmmanuel Vadot 	struct xgbe_ring	*ring = channel->rx_ring;
560*7113afc8SEmmanuel Vadot 	struct xgbe_ring_data   *rdata;
561*7113afc8SEmmanuel Vadot 	struct xgbe_ring_desc   *rdesc;
562*7113afc8SEmmanuel Vadot 	unsigned int cur;
563*7113afc8SEmmanuel Vadot 	int count;
564*7113afc8SEmmanuel Vadot 	uint8_t incomplete = 1, context_next = 0, running = 0;
565*7113afc8SEmmanuel Vadot 
566*7113afc8SEmmanuel Vadot 	axgbe_printf(1, "--> %s: rxq %d idx %d budget %d cur %d dirty %d\n",
567*7113afc8SEmmanuel Vadot 	    __func__, qsidx, idx, budget, ring->cur, ring->dirty);
568*7113afc8SEmmanuel Vadot 
569*7113afc8SEmmanuel Vadot 	cur = ring->cur;
570*7113afc8SEmmanuel Vadot 	for (count = 0; count <= budget; ) {
571*7113afc8SEmmanuel Vadot 
572*7113afc8SEmmanuel Vadot 		rdata = XGBE_GET_DESC_DATA(ring, cur);
573*7113afc8SEmmanuel Vadot 		rdesc = rdata->rdesc;
574*7113afc8SEmmanuel Vadot 
575*7113afc8SEmmanuel Vadot 		if (XGMAC_GET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, OWN))
576*7113afc8SEmmanuel Vadot 			break;
577*7113afc8SEmmanuel Vadot 
578*7113afc8SEmmanuel Vadot 		running = 1;
579*7113afc8SEmmanuel Vadot 
580*7113afc8SEmmanuel Vadot 		if (XGMAC_GET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, LD))
581*7113afc8SEmmanuel Vadot 			incomplete = 0;
582*7113afc8SEmmanuel Vadot 
583*7113afc8SEmmanuel Vadot 		if (XGMAC_GET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, CDA))
584*7113afc8SEmmanuel Vadot 			context_next = 1;
585*7113afc8SEmmanuel Vadot 
586*7113afc8SEmmanuel Vadot 		if (XGMAC_GET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, CTXT))
587*7113afc8SEmmanuel Vadot 			context_next = 0;
588*7113afc8SEmmanuel Vadot 
589*7113afc8SEmmanuel Vadot 		cur = (cur + 1) & (ring->rdesc_count - 1);
590*7113afc8SEmmanuel Vadot 
591*7113afc8SEmmanuel Vadot 		if (incomplete || context_next)
592*7113afc8SEmmanuel Vadot 			continue;
593*7113afc8SEmmanuel Vadot 
594*7113afc8SEmmanuel Vadot 		/* Increment pkt count & reset variables for next full packet */
595*7113afc8SEmmanuel Vadot 		count++;
596*7113afc8SEmmanuel Vadot 		incomplete = 1;
597*7113afc8SEmmanuel Vadot 		context_next = 0;
598*7113afc8SEmmanuel Vadot 		running = 0;
599*7113afc8SEmmanuel Vadot 	}
600*7113afc8SEmmanuel Vadot 
601*7113afc8SEmmanuel Vadot 	axgbe_printf(1, "--> %s: rxq %d cur %d incomp %d con_next %d running %d "
602*7113afc8SEmmanuel Vadot 	    "count %d\n", __func__, qsidx, cur, incomplete, context_next,
603*7113afc8SEmmanuel Vadot 	    running, count);
604*7113afc8SEmmanuel Vadot 
605*7113afc8SEmmanuel Vadot 	return (count);
606*7113afc8SEmmanuel Vadot }
607*7113afc8SEmmanuel Vadot 
608*7113afc8SEmmanuel Vadot static unsigned int
609*7113afc8SEmmanuel Vadot xgbe_rx_buf1_len(struct xgbe_prv_data *pdata, struct xgbe_ring_data *rdata,
610*7113afc8SEmmanuel Vadot     struct xgbe_packet_data *packet)
611*7113afc8SEmmanuel Vadot {
612*7113afc8SEmmanuel Vadot 
613*7113afc8SEmmanuel Vadot 	/* Always zero if not the first descriptor */
614*7113afc8SEmmanuel Vadot 	if (!XGMAC_GET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES, FIRST)) {
615*7113afc8SEmmanuel Vadot 		axgbe_printf(1, "%s: Not First\n", __func__);
616*7113afc8SEmmanuel Vadot 		return (0);
617*7113afc8SEmmanuel Vadot 	}
618*7113afc8SEmmanuel Vadot 
619*7113afc8SEmmanuel Vadot 	/* First descriptor with split header, return header length */
620*7113afc8SEmmanuel Vadot 	if (rdata->rx.hdr_len) {
621*7113afc8SEmmanuel Vadot 		axgbe_printf(1, "%s: hdr_len %d\n", __func__, rdata->rx.hdr_len);
622*7113afc8SEmmanuel Vadot 		return (rdata->rx.hdr_len);
623*7113afc8SEmmanuel Vadot 	}
624*7113afc8SEmmanuel Vadot 
625*7113afc8SEmmanuel Vadot 	/* First descriptor but not the last descriptor and no split header,
626*7113afc8SEmmanuel Vadot 	 * so the full buffer was used
627*7113afc8SEmmanuel Vadot 	 */
628*7113afc8SEmmanuel Vadot 	if (!XGMAC_GET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES, LAST)) {
629*7113afc8SEmmanuel Vadot 		axgbe_printf(1, "%s: Not last %d\n", __func__,
630*7113afc8SEmmanuel Vadot 		    pdata->rx_buf_size);
631*7113afc8SEmmanuel Vadot 		return (256);
632*7113afc8SEmmanuel Vadot 	}
633*7113afc8SEmmanuel Vadot 
634*7113afc8SEmmanuel Vadot 	/* First descriptor and last descriptor and no split header, so
635*7113afc8SEmmanuel Vadot 	 * calculate how much of the buffer was used
636*7113afc8SEmmanuel Vadot 	 */
637*7113afc8SEmmanuel Vadot 	axgbe_printf(1, "%s: pkt_len %d buf_size %d\n", __func__, rdata->rx.len,
638*7113afc8SEmmanuel Vadot 	    pdata->rx_buf_size);
639*7113afc8SEmmanuel Vadot 
640*7113afc8SEmmanuel Vadot 	return (min_t(unsigned int, 256, rdata->rx.len));
641*7113afc8SEmmanuel Vadot }
642*7113afc8SEmmanuel Vadot 
643*7113afc8SEmmanuel Vadot static unsigned int
644*7113afc8SEmmanuel Vadot xgbe_rx_buf2_len(struct xgbe_prv_data *pdata, struct xgbe_ring_data *rdata,
645*7113afc8SEmmanuel Vadot     struct xgbe_packet_data *packet, unsigned int len)
646*7113afc8SEmmanuel Vadot {
647*7113afc8SEmmanuel Vadot 
648*7113afc8SEmmanuel Vadot 	/* Always the full buffer if not the last descriptor */
649*7113afc8SEmmanuel Vadot 	if (!XGMAC_GET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES, LAST)) {
650*7113afc8SEmmanuel Vadot 		axgbe_printf(1, "%s: Not last %d\n", __func__, pdata->rx_buf_size);
651*7113afc8SEmmanuel Vadot 		return (pdata->rx_buf_size);
652*7113afc8SEmmanuel Vadot 	}
653*7113afc8SEmmanuel Vadot 
654*7113afc8SEmmanuel Vadot 	/* Last descriptor so calculate how much of the buffer was used
655*7113afc8SEmmanuel Vadot 	 * for the last bit of data
656*7113afc8SEmmanuel Vadot 	 */
657*7113afc8SEmmanuel Vadot 	return ((rdata->rx.len != 0)? (rdata->rx.len - len) : 0);
658*7113afc8SEmmanuel Vadot }
659*7113afc8SEmmanuel Vadot 
660*7113afc8SEmmanuel Vadot static inline void
661*7113afc8SEmmanuel Vadot axgbe_add_frag(struct xgbe_prv_data *pdata, if_rxd_info_t ri, int idx, int len,
662*7113afc8SEmmanuel Vadot     int pos, int flid)
663*7113afc8SEmmanuel Vadot {
664*7113afc8SEmmanuel Vadot 	axgbe_printf(2, "idx %d len %d pos %d flid %d\n", idx, len, pos, flid);
665*7113afc8SEmmanuel Vadot 	ri->iri_frags[pos].irf_flid = flid;
666*7113afc8SEmmanuel Vadot 	ri->iri_frags[pos].irf_idx = idx;
667*7113afc8SEmmanuel Vadot 	ri->iri_frags[pos].irf_len = len;
668*7113afc8SEmmanuel Vadot }
669*7113afc8SEmmanuel Vadot 
670*7113afc8SEmmanuel Vadot static int
671*7113afc8SEmmanuel Vadot axgbe_isc_rxd_pkt_get(void *arg, if_rxd_info_t ri)
672*7113afc8SEmmanuel Vadot {
673*7113afc8SEmmanuel Vadot  	struct axgbe_if_softc   *sc = (struct axgbe_if_softc*)arg;
674*7113afc8SEmmanuel Vadot 	struct xgbe_prv_data 	*pdata = &sc->pdata;
675*7113afc8SEmmanuel Vadot 	struct xgbe_hw_if	*hw_if = &pdata->hw_if;
676*7113afc8SEmmanuel Vadot 	struct xgbe_channel     *channel = pdata->channel[ri->iri_qsidx];
677*7113afc8SEmmanuel Vadot 	struct xgbe_ring	*ring = channel->rx_ring;
678*7113afc8SEmmanuel Vadot 	struct xgbe_packet_data *packet = &ring->packet_data;
679*7113afc8SEmmanuel Vadot 	struct xgbe_ring_data	*rdata;
680*7113afc8SEmmanuel Vadot 	unsigned int last, context_next, context;
681*7113afc8SEmmanuel Vadot 	unsigned int buf1_len, buf2_len, max_len, len = 0, prev_cur;
682*7113afc8SEmmanuel Vadot 	int i = 0;
683*7113afc8SEmmanuel Vadot 
684*7113afc8SEmmanuel Vadot 	axgbe_printf(2, "%s: rxq %d cidx %d cur %d dirty %d\n", __func__,
685*7113afc8SEmmanuel Vadot 	    ri->iri_qsidx, ri->iri_cidx, ring->cur, ring->dirty);
686*7113afc8SEmmanuel Vadot 
687*7113afc8SEmmanuel Vadot 	memset(packet, 0, sizeof(struct xgbe_packet_data));
688*7113afc8SEmmanuel Vadot 
689*7113afc8SEmmanuel Vadot 	while (1) {
690*7113afc8SEmmanuel Vadot 
691*7113afc8SEmmanuel Vadot read_again:
692*7113afc8SEmmanuel Vadot 		if (hw_if->dev_read(channel)) {
693*7113afc8SEmmanuel Vadot 			axgbe_printf(2, "<-- %s: OWN bit seen on %d\n",
694*7113afc8SEmmanuel Vadot 		    	    __func__, ring->cur);
695*7113afc8SEmmanuel Vadot 			break;
696*7113afc8SEmmanuel Vadot 		}
697*7113afc8SEmmanuel Vadot 
698*7113afc8SEmmanuel Vadot 		rdata = XGBE_GET_DESC_DATA(ring, ring->cur);
699*7113afc8SEmmanuel Vadot 		prev_cur = ring->cur;
700*7113afc8SEmmanuel Vadot 		ring->cur = (ring->cur + 1) & (ring->rdesc_count - 1);
701*7113afc8SEmmanuel Vadot 
702*7113afc8SEmmanuel Vadot 		last = XGMAC_GET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES,
703*7113afc8SEmmanuel Vadot 		    LAST);
704*7113afc8SEmmanuel Vadot 
705*7113afc8SEmmanuel Vadot 		context_next = XGMAC_GET_BITS(packet->attributes,
706*7113afc8SEmmanuel Vadot 		    RX_PACKET_ATTRIBUTES, CONTEXT_NEXT);
707*7113afc8SEmmanuel Vadot 
708*7113afc8SEmmanuel Vadot 		context = XGMAC_GET_BITS(packet->attributes,
709*7113afc8SEmmanuel Vadot 		    RX_PACKET_ATTRIBUTES, CONTEXT);
710*7113afc8SEmmanuel Vadot 
711*7113afc8SEmmanuel Vadot 		if (!context) {
712*7113afc8SEmmanuel Vadot 			/* Get the data length in the descriptor buffers */
713*7113afc8SEmmanuel Vadot 			buf1_len = xgbe_rx_buf1_len(pdata, rdata, packet);
714*7113afc8SEmmanuel Vadot 			len += buf1_len;
715*7113afc8SEmmanuel Vadot 			buf2_len = xgbe_rx_buf2_len(pdata, rdata, packet, len);
716*7113afc8SEmmanuel Vadot 			len += buf2_len;
717*7113afc8SEmmanuel Vadot 		} else
718*7113afc8SEmmanuel Vadot 			buf1_len = buf2_len = 0;
719*7113afc8SEmmanuel Vadot 
720*7113afc8SEmmanuel Vadot 		if (packet->errors)
721*7113afc8SEmmanuel Vadot 			axgbe_printf(1, "%s: last %d context %d con_next %d buf1 %d "
722*7113afc8SEmmanuel Vadot 			    "buf2 %d len %d frags %d error %d\n", __func__, last, context,
723*7113afc8SEmmanuel Vadot 			    context_next, buf1_len, buf2_len, len, i, packet->errors);
724*7113afc8SEmmanuel Vadot 
725*7113afc8SEmmanuel Vadot 		axgbe_add_frag(pdata, ri, prev_cur, buf1_len, i, 0);
726*7113afc8SEmmanuel Vadot 		i++;
727*7113afc8SEmmanuel Vadot 		axgbe_add_frag(pdata, ri, prev_cur, buf2_len, i, 1);
728*7113afc8SEmmanuel Vadot 		i++;
729*7113afc8SEmmanuel Vadot 
730*7113afc8SEmmanuel Vadot 		if (!last || context_next)
731*7113afc8SEmmanuel Vadot 			goto read_again;
732*7113afc8SEmmanuel Vadot 
733*7113afc8SEmmanuel Vadot 		break;
734*7113afc8SEmmanuel Vadot 	}
735*7113afc8SEmmanuel Vadot 
736*7113afc8SEmmanuel Vadot 	if (XGMAC_GET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES, CSUM_DONE)) {
737*7113afc8SEmmanuel Vadot 		ri->iri_csum_flags |= CSUM_IP_CHECKED;
738*7113afc8SEmmanuel Vadot 		ri->iri_csum_flags |= CSUM_IP_VALID;
739*7113afc8SEmmanuel Vadot 		axgbe_printf(2, "%s: csum flags 0x%x\n", __func__, ri->iri_csum_flags);
740*7113afc8SEmmanuel Vadot 	}
741*7113afc8SEmmanuel Vadot 
742*7113afc8SEmmanuel Vadot 	max_len = if_getmtu(pdata->netdev) + ETH_HLEN;
743*7113afc8SEmmanuel Vadot 	if (XGMAC_GET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES, VLAN_CTAG)) {
744*7113afc8SEmmanuel Vadot 		ri->iri_flags |= M_VLANTAG;
745*7113afc8SEmmanuel Vadot 		ri->iri_vtag = packet->vlan_ctag;
746*7113afc8SEmmanuel Vadot 		max_len += VLAN_HLEN;
747*7113afc8SEmmanuel Vadot 		axgbe_printf(2, "%s: iri_flags 0x%x vtag 0x%x\n", __func__,
748*7113afc8SEmmanuel Vadot 		    ri->iri_flags, ri->iri_vtag);
749*7113afc8SEmmanuel Vadot 	}
750*7113afc8SEmmanuel Vadot 
751*7113afc8SEmmanuel Vadot 
752*7113afc8SEmmanuel Vadot 	if (XGMAC_GET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES, RSS_HASH)) {
753*7113afc8SEmmanuel Vadot 		ri->iri_flowid = packet->rss_hash;
754*7113afc8SEmmanuel Vadot 		ri->iri_rsstype = packet->rss_hash_type;
755*7113afc8SEmmanuel Vadot 		axgbe_printf(2, "%s: hash 0x%x/0x%x rsstype 0x%x/0x%x\n",
756*7113afc8SEmmanuel Vadot 		    __func__, packet->rss_hash, ri->iri_flowid,
757*7113afc8SEmmanuel Vadot 		    packet->rss_hash_type, ri->iri_rsstype);
758*7113afc8SEmmanuel Vadot 	}
759*7113afc8SEmmanuel Vadot 
760*7113afc8SEmmanuel Vadot 	if (__predict_false(len == 0))
761*7113afc8SEmmanuel Vadot 		axgbe_error("%s: Zero len packet\n", __func__);
762*7113afc8SEmmanuel Vadot 
763*7113afc8SEmmanuel Vadot 	if (__predict_false(len > max_len))
764*7113afc8SEmmanuel Vadot 		axgbe_error("%s: Big packet %d/%d\n", __func__, len, max_len);
765*7113afc8SEmmanuel Vadot 
766*7113afc8SEmmanuel Vadot 	if (__predict_false(packet->errors))
767*7113afc8SEmmanuel Vadot 		axgbe_printf(1, "<-- %s: rxq: %d len: %d frags: %d cidx %d cur: %d "
768*7113afc8SEmmanuel Vadot 		    "dirty: %d error 0x%x\n", __func__, ri->iri_qsidx, len, i,
769*7113afc8SEmmanuel Vadot 		    ri->iri_cidx, ring->cur, ring->dirty, packet->errors);
770*7113afc8SEmmanuel Vadot 
771*7113afc8SEmmanuel Vadot 	axgbe_printf(1, "%s: Packet len %d frags %d\n", __func__, len, i);
772*7113afc8SEmmanuel Vadot 
773*7113afc8SEmmanuel Vadot 	ri->iri_len = len;
774*7113afc8SEmmanuel Vadot 	ri->iri_nfrags = i;
775*7113afc8SEmmanuel Vadot 
776*7113afc8SEmmanuel Vadot 	return (0);
777*7113afc8SEmmanuel Vadot }
778