xref: /freebsd/sys/dev/cxgbe/t4_sge.c (revision 54e4ee7163f3eec364acc94def3d03cc6dd7fd0f)
1*54e4ee71SNavdeep Parhar /*-
2*54e4ee71SNavdeep Parhar  * Copyright (c) 2011 Chelsio Communications, Inc.
3*54e4ee71SNavdeep Parhar  * All rights reserved.
4*54e4ee71SNavdeep Parhar  * Written by: Navdeep Parhar <np@FreeBSD.org>
5*54e4ee71SNavdeep Parhar  *
6*54e4ee71SNavdeep Parhar  * Redistribution and use in source and binary forms, with or without
7*54e4ee71SNavdeep Parhar  * modification, are permitted provided that the following conditions
8*54e4ee71SNavdeep Parhar  * are met:
9*54e4ee71SNavdeep Parhar  * 1. Redistributions of source code must retain the above copyright
10*54e4ee71SNavdeep Parhar  *    notice, this list of conditions and the following disclaimer.
11*54e4ee71SNavdeep Parhar  * 2. Redistributions in binary form must reproduce the above copyright
12*54e4ee71SNavdeep Parhar  *    notice, this list of conditions and the following disclaimer in the
13*54e4ee71SNavdeep Parhar  *    documentation and/or other materials provided with the distribution.
14*54e4ee71SNavdeep Parhar  *
15*54e4ee71SNavdeep Parhar  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
16*54e4ee71SNavdeep Parhar  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17*54e4ee71SNavdeep Parhar  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18*54e4ee71SNavdeep Parhar  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19*54e4ee71SNavdeep Parhar  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20*54e4ee71SNavdeep Parhar  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21*54e4ee71SNavdeep Parhar  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22*54e4ee71SNavdeep Parhar  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23*54e4ee71SNavdeep Parhar  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24*54e4ee71SNavdeep Parhar  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
25*54e4ee71SNavdeep Parhar  * SUCH DAMAGE.
26*54e4ee71SNavdeep Parhar  */
27*54e4ee71SNavdeep Parhar 
28*54e4ee71SNavdeep Parhar #include <sys/cdefs.h>
29*54e4ee71SNavdeep Parhar __FBSDID("$FreeBSD$");
30*54e4ee71SNavdeep Parhar 
31*54e4ee71SNavdeep Parhar #include "opt_inet.h"
32*54e4ee71SNavdeep Parhar 
33*54e4ee71SNavdeep Parhar #include <sys/types.h>
34*54e4ee71SNavdeep Parhar #include <sys/mbuf.h>
35*54e4ee71SNavdeep Parhar #include <sys/socket.h>
36*54e4ee71SNavdeep Parhar #include <sys/kernel.h>
37*54e4ee71SNavdeep Parhar #include <sys/sysctl.h>
38*54e4ee71SNavdeep Parhar #include <net/bpf.h>
39*54e4ee71SNavdeep Parhar #include <net/ethernet.h>
40*54e4ee71SNavdeep Parhar #include <net/if.h>
41*54e4ee71SNavdeep Parhar #include <net/if_vlan_var.h>
42*54e4ee71SNavdeep Parhar #include <netinet/in.h>
43*54e4ee71SNavdeep Parhar #include <netinet/ip.h>
44*54e4ee71SNavdeep Parhar #include <netinet/tcp.h>
45*54e4ee71SNavdeep Parhar 
46*54e4ee71SNavdeep Parhar #include "common/common.h"
47*54e4ee71SNavdeep Parhar #include "common/t4_regs.h"
48*54e4ee71SNavdeep Parhar #include "common/t4_regs_values.h"
49*54e4ee71SNavdeep Parhar #include "common/t4_msg.h"
50*54e4ee71SNavdeep Parhar #include "common/t4fw_interface.h"
51*54e4ee71SNavdeep Parhar 
52*54e4ee71SNavdeep Parhar struct fl_buf_info {
53*54e4ee71SNavdeep Parhar 	int size;
54*54e4ee71SNavdeep Parhar 	int type;
55*54e4ee71SNavdeep Parhar 	uma_zone_t zone;
56*54e4ee71SNavdeep Parhar };
57*54e4ee71SNavdeep Parhar 
58*54e4ee71SNavdeep Parhar /* t4_sge_init will fill up the zone */
59*54e4ee71SNavdeep Parhar static struct fl_buf_info fl_buf_info[FL_BUF_SIZES] = {
60*54e4ee71SNavdeep Parhar 	{ MCLBYTES, EXT_CLUSTER, NULL},
61*54e4ee71SNavdeep Parhar 	{ MJUMPAGESIZE, EXT_JUMBOP, NULL},
62*54e4ee71SNavdeep Parhar 	{ MJUM9BYTES, EXT_JUMBO9, NULL},
63*54e4ee71SNavdeep Parhar 	{ MJUM16BYTES, EXT_JUMBO16, NULL}
64*54e4ee71SNavdeep Parhar };
65*54e4ee71SNavdeep Parhar #define FL_BUF_SIZE(x)	(fl_buf_info[x].size)
66*54e4ee71SNavdeep Parhar #define FL_BUF_TYPE(x)	(fl_buf_info[x].type)
67*54e4ee71SNavdeep Parhar #define FL_BUF_ZONE(x)	(fl_buf_info[x].zone)
68*54e4ee71SNavdeep Parhar 
69*54e4ee71SNavdeep Parhar enum {
70*54e4ee71SNavdeep Parhar 	FL_PKTSHIFT = 2
71*54e4ee71SNavdeep Parhar };
72*54e4ee71SNavdeep Parhar 
73*54e4ee71SNavdeep Parhar #define FL_ALIGN	min(CACHE_LINE_SIZE, 32)
74*54e4ee71SNavdeep Parhar #if CACHE_LINE_SIZE > 64
75*54e4ee71SNavdeep Parhar #define SPG_LEN		128
76*54e4ee71SNavdeep Parhar #else
77*54e4ee71SNavdeep Parhar #define SPG_LEN		64
78*54e4ee71SNavdeep Parhar #endif
79*54e4ee71SNavdeep Parhar 
80*54e4ee71SNavdeep Parhar /* Used to track coalesced tx work request */
81*54e4ee71SNavdeep Parhar struct txpkts {
82*54e4ee71SNavdeep Parhar 	uint64_t *flitp;	/* ptr to flit where next pkt should start */
83*54e4ee71SNavdeep Parhar 	uint8_t npkt;		/* # of packets in this work request */
84*54e4ee71SNavdeep Parhar 	uint8_t nflits;		/* # of flits used by this work request */
85*54e4ee71SNavdeep Parhar 	uint16_t plen;		/* total payload (sum of all packets) */
86*54e4ee71SNavdeep Parhar };
87*54e4ee71SNavdeep Parhar 
88*54e4ee71SNavdeep Parhar /* A packet's SGL.  This + m_pkthdr has all info needed for tx */
89*54e4ee71SNavdeep Parhar struct sgl {
90*54e4ee71SNavdeep Parhar 	int nsegs;		/* # of segments in the SGL, 0 means imm. tx */
91*54e4ee71SNavdeep Parhar 	int nflits;		/* # of flits needed for the SGL */
92*54e4ee71SNavdeep Parhar 	bus_dma_segment_t seg[TX_SGL_SEGS];
93*54e4ee71SNavdeep Parhar };
94*54e4ee71SNavdeep Parhar 
95*54e4ee71SNavdeep Parhar static inline void init_iq(struct sge_iq *, struct adapter *, int, int, int,
96*54e4ee71SNavdeep Parhar     int, iq_intr_handler_t *, char *);
97*54e4ee71SNavdeep Parhar static inline void init_fl(struct sge_fl *, int, char *);
98*54e4ee71SNavdeep Parhar static inline void init_txq(struct sge_txq *, int, char *);
99*54e4ee71SNavdeep Parhar static int alloc_ring(struct adapter *, size_t, bus_dma_tag_t *, bus_dmamap_t *,
100*54e4ee71SNavdeep Parhar     bus_addr_t *, void **);
101*54e4ee71SNavdeep Parhar static int free_ring(struct adapter *, bus_dma_tag_t, bus_dmamap_t, bus_addr_t,
102*54e4ee71SNavdeep Parhar     void *);
103*54e4ee71SNavdeep Parhar static int alloc_iq_fl(struct port_info *, struct sge_iq *, struct sge_fl *,
104*54e4ee71SNavdeep Parhar     int);
105*54e4ee71SNavdeep Parhar static int free_iq_fl(struct port_info *, struct sge_iq *, struct sge_fl *);
106*54e4ee71SNavdeep Parhar static int alloc_iq(struct sge_iq *, int);
107*54e4ee71SNavdeep Parhar static int free_iq(struct sge_iq *);
108*54e4ee71SNavdeep Parhar static int alloc_rxq(struct port_info *, struct sge_rxq *, int, int);
109*54e4ee71SNavdeep Parhar static int free_rxq(struct port_info *, struct sge_rxq *);
110*54e4ee71SNavdeep Parhar static int alloc_txq(struct port_info *, struct sge_txq *, int);
111*54e4ee71SNavdeep Parhar static int free_txq(struct port_info *, struct sge_txq *);
112*54e4ee71SNavdeep Parhar static void oneseg_dma_callback(void *, bus_dma_segment_t *, int, int);
113*54e4ee71SNavdeep Parhar static inline bool is_new_response(const struct sge_iq *, struct rsp_ctrl **);
114*54e4ee71SNavdeep Parhar static inline void iq_next(struct sge_iq *);
115*54e4ee71SNavdeep Parhar static inline void ring_fl_db(struct adapter *, struct sge_fl *);
116*54e4ee71SNavdeep Parhar static void refill_fl(struct sge_fl *, int);
117*54e4ee71SNavdeep Parhar static int alloc_fl_sdesc(struct sge_fl *);
118*54e4ee71SNavdeep Parhar static void free_fl_sdesc(struct sge_fl *);
119*54e4ee71SNavdeep Parhar static int alloc_eq_maps(struct sge_eq *);
120*54e4ee71SNavdeep Parhar static void free_eq_maps(struct sge_eq *);
121*54e4ee71SNavdeep Parhar static struct mbuf *get_fl_sdesc_data(struct sge_fl *, int, int);
122*54e4ee71SNavdeep Parhar static void set_fl_tag_idx(struct sge_fl *, int);
123*54e4ee71SNavdeep Parhar 
124*54e4ee71SNavdeep Parhar static int get_pkt_sgl(struct sge_txq *, struct mbuf **, struct sgl *, int);
125*54e4ee71SNavdeep Parhar static int free_pkt_sgl(struct sge_txq *, struct sgl *);
126*54e4ee71SNavdeep Parhar static int write_txpkt_wr(struct port_info *, struct sge_txq *, struct mbuf *,
127*54e4ee71SNavdeep Parhar     struct sgl *);
128*54e4ee71SNavdeep Parhar static int add_to_txpkts(struct port_info *, struct sge_txq *, struct txpkts *,
129*54e4ee71SNavdeep Parhar     struct mbuf *, struct sgl *);
130*54e4ee71SNavdeep Parhar static void write_txpkts_wr(struct sge_txq *, struct txpkts *);
131*54e4ee71SNavdeep Parhar static inline void write_ulp_cpl_sgl(struct port_info *, struct sge_txq *,
132*54e4ee71SNavdeep Parhar     struct txpkts *, struct mbuf *, struct sgl *);
133*54e4ee71SNavdeep Parhar static int write_sgl_to_txd(struct sge_eq *, struct sgl *, caddr_t *);
134*54e4ee71SNavdeep Parhar static inline void copy_to_txd(struct sge_eq *, caddr_t, caddr_t *, int);
135*54e4ee71SNavdeep Parhar static inline void ring_tx_db(struct adapter *, struct sge_eq *);
136*54e4ee71SNavdeep Parhar static int reclaim_tx_descs(struct sge_eq *, int, int);
137*54e4ee71SNavdeep Parhar static void write_eqflush_wr(struct sge_eq *);
138*54e4ee71SNavdeep Parhar static __be64 get_flit(bus_dma_segment_t *, int, int);
139*54e4ee71SNavdeep Parhar 
140*54e4ee71SNavdeep Parhar /**
141*54e4ee71SNavdeep Parhar  *	t4_sge_init - initialize SGE
142*54e4ee71SNavdeep Parhar  *	@sc: the adapter
143*54e4ee71SNavdeep Parhar  *
144*54e4ee71SNavdeep Parhar  *	Performs SGE initialization needed every time after a chip reset.
145*54e4ee71SNavdeep Parhar  *	We do not initialize any of the queues here, instead the driver
146*54e4ee71SNavdeep Parhar  *	top-level must request them individually.
147*54e4ee71SNavdeep Parhar  */
148*54e4ee71SNavdeep Parhar void
149*54e4ee71SNavdeep Parhar t4_sge_init(struct adapter *sc)
150*54e4ee71SNavdeep Parhar {
151*54e4ee71SNavdeep Parhar 	struct sge *s = &sc->sge;
152*54e4ee71SNavdeep Parhar 	int i;
153*54e4ee71SNavdeep Parhar 
154*54e4ee71SNavdeep Parhar 	FL_BUF_ZONE(0) = zone_clust;
155*54e4ee71SNavdeep Parhar 	FL_BUF_ZONE(1) = zone_jumbop;
156*54e4ee71SNavdeep Parhar 	FL_BUF_ZONE(2) = zone_jumbo9;
157*54e4ee71SNavdeep Parhar 	FL_BUF_ZONE(3) = zone_jumbo16;
158*54e4ee71SNavdeep Parhar 
159*54e4ee71SNavdeep Parhar 	t4_set_reg_field(sc, A_SGE_CONTROL, V_PKTSHIFT(M_PKTSHIFT) |
160*54e4ee71SNavdeep Parhar 			 V_INGPADBOUNDARY(M_INGPADBOUNDARY) |
161*54e4ee71SNavdeep Parhar 			 F_EGRSTATUSPAGESIZE,
162*54e4ee71SNavdeep Parhar 			 V_INGPADBOUNDARY(ilog2(FL_ALIGN) - 5) |
163*54e4ee71SNavdeep Parhar 			 V_PKTSHIFT(FL_PKTSHIFT) |
164*54e4ee71SNavdeep Parhar 			 F_RXPKTCPLMODE |
165*54e4ee71SNavdeep Parhar 			 V_EGRSTATUSPAGESIZE(SPG_LEN == 128));
166*54e4ee71SNavdeep Parhar 	t4_set_reg_field(sc, A_SGE_HOST_PAGE_SIZE,
167*54e4ee71SNavdeep Parhar 			 V_HOSTPAGESIZEPF0(M_HOSTPAGESIZEPF0),
168*54e4ee71SNavdeep Parhar 			 V_HOSTPAGESIZEPF0(PAGE_SHIFT - 10));
169*54e4ee71SNavdeep Parhar 
170*54e4ee71SNavdeep Parhar 	for (i = 0; i < FL_BUF_SIZES; i++) {
171*54e4ee71SNavdeep Parhar 		t4_write_reg(sc, A_SGE_FL_BUFFER_SIZE0 + (4 * i),
172*54e4ee71SNavdeep Parhar 		    FL_BUF_SIZE(i));
173*54e4ee71SNavdeep Parhar 	}
174*54e4ee71SNavdeep Parhar 
175*54e4ee71SNavdeep Parhar 	t4_write_reg(sc, A_SGE_INGRESS_RX_THRESHOLD,
176*54e4ee71SNavdeep Parhar 		     V_THRESHOLD_0(s->counter_val[0]) |
177*54e4ee71SNavdeep Parhar 		     V_THRESHOLD_1(s->counter_val[1]) |
178*54e4ee71SNavdeep Parhar 		     V_THRESHOLD_2(s->counter_val[2]) |
179*54e4ee71SNavdeep Parhar 		     V_THRESHOLD_3(s->counter_val[3]));
180*54e4ee71SNavdeep Parhar 
181*54e4ee71SNavdeep Parhar 	t4_write_reg(sc, A_SGE_TIMER_VALUE_0_AND_1,
182*54e4ee71SNavdeep Parhar 		     V_TIMERVALUE0(us_to_core_ticks(sc, s->timer_val[0])) |
183*54e4ee71SNavdeep Parhar 		     V_TIMERVALUE1(us_to_core_ticks(sc, s->timer_val[1])));
184*54e4ee71SNavdeep Parhar 	t4_write_reg(sc, A_SGE_TIMER_VALUE_2_AND_3,
185*54e4ee71SNavdeep Parhar 		     V_TIMERVALUE2(us_to_core_ticks(sc, s->timer_val[2])) |
186*54e4ee71SNavdeep Parhar 		     V_TIMERVALUE3(us_to_core_ticks(sc, s->timer_val[3])));
187*54e4ee71SNavdeep Parhar 	t4_write_reg(sc, A_SGE_TIMER_VALUE_4_AND_5,
188*54e4ee71SNavdeep Parhar 		     V_TIMERVALUE4(us_to_core_ticks(sc, s->timer_val[4])) |
189*54e4ee71SNavdeep Parhar 		     V_TIMERVALUE5(us_to_core_ticks(sc, s->timer_val[5])));
190*54e4ee71SNavdeep Parhar }
191*54e4ee71SNavdeep Parhar 
192*54e4ee71SNavdeep Parhar int
193*54e4ee71SNavdeep Parhar t4_create_dma_tag(struct adapter *sc)
194*54e4ee71SNavdeep Parhar {
195*54e4ee71SNavdeep Parhar 	int rc;
196*54e4ee71SNavdeep Parhar 
197*54e4ee71SNavdeep Parhar 	rc = bus_dma_tag_create(bus_get_dma_tag(sc->dev), 1, 0,
198*54e4ee71SNavdeep Parhar 	    BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL, BUS_SPACE_MAXSIZE,
199*54e4ee71SNavdeep Parhar 	    BUS_SPACE_UNRESTRICTED, BUS_SPACE_MAXSIZE, BUS_DMA_ALLOCNOW, NULL,
200*54e4ee71SNavdeep Parhar 	    NULL, &sc->dmat);
201*54e4ee71SNavdeep Parhar 	if (rc != 0) {
202*54e4ee71SNavdeep Parhar 		device_printf(sc->dev,
203*54e4ee71SNavdeep Parhar 		    "failed to create main DMA tag: %d\n", rc);
204*54e4ee71SNavdeep Parhar 	}
205*54e4ee71SNavdeep Parhar 
206*54e4ee71SNavdeep Parhar 	return (rc);
207*54e4ee71SNavdeep Parhar }
208*54e4ee71SNavdeep Parhar 
209*54e4ee71SNavdeep Parhar int
210*54e4ee71SNavdeep Parhar t4_destroy_dma_tag(struct adapter *sc)
211*54e4ee71SNavdeep Parhar {
212*54e4ee71SNavdeep Parhar 	if (sc->dmat)
213*54e4ee71SNavdeep Parhar 		bus_dma_tag_destroy(sc->dmat);
214*54e4ee71SNavdeep Parhar 
215*54e4ee71SNavdeep Parhar 	return (0);
216*54e4ee71SNavdeep Parhar }
217*54e4ee71SNavdeep Parhar 
218*54e4ee71SNavdeep Parhar /*
219*54e4ee71SNavdeep Parhar  * Allocate and initialize the firmware event queue and the forwarded interrupt
220*54e4ee71SNavdeep Parhar  * queues, if any.  The adapter owns all these queues as they are not associated
221*54e4ee71SNavdeep Parhar  * with any particular port.
222*54e4ee71SNavdeep Parhar  *
223*54e4ee71SNavdeep Parhar  * Returns errno on failure.  Resources allocated up to that point may still be
224*54e4ee71SNavdeep Parhar  * allocated.  Caller is responsible for cleanup in case this function fails.
225*54e4ee71SNavdeep Parhar  */
226*54e4ee71SNavdeep Parhar int
227*54e4ee71SNavdeep Parhar t4_setup_adapter_iqs(struct adapter *sc)
228*54e4ee71SNavdeep Parhar {
229*54e4ee71SNavdeep Parhar 	int i, rc;
230*54e4ee71SNavdeep Parhar 	struct sge_iq *iq, *fwq;
231*54e4ee71SNavdeep Parhar 	iq_intr_handler_t *handler;
232*54e4ee71SNavdeep Parhar 	char name[16];
233*54e4ee71SNavdeep Parhar 
234*54e4ee71SNavdeep Parhar 	ADAPTER_LOCK_ASSERT_NOTOWNED(sc);
235*54e4ee71SNavdeep Parhar 
236*54e4ee71SNavdeep Parhar 	fwq = &sc->sge.fwq;
237*54e4ee71SNavdeep Parhar 	if (sc->flags & INTR_FWD) {
238*54e4ee71SNavdeep Parhar 		iq = &sc->sge.fiq[0];
239*54e4ee71SNavdeep Parhar 
240*54e4ee71SNavdeep Parhar 		/*
241*54e4ee71SNavdeep Parhar 		 * Forwarded interrupt queues - allocate 1 if there's only 1
242*54e4ee71SNavdeep Parhar 		 * vector available, one less than the number of vectors
243*54e4ee71SNavdeep Parhar 		 * otherwise (the first vector is reserved for the error
244*54e4ee71SNavdeep Parhar 		 * interrupt in that case).
245*54e4ee71SNavdeep Parhar 		 */
246*54e4ee71SNavdeep Parhar 		i = sc->intr_count > 1 ? 1 : 0;
247*54e4ee71SNavdeep Parhar 		for (; i < sc->intr_count; i++, iq++) {
248*54e4ee71SNavdeep Parhar 
249*54e4ee71SNavdeep Parhar 			snprintf(name, sizeof(name), "%s fiq%d",
250*54e4ee71SNavdeep Parhar 			    device_get_nameunit(sc->dev), i);
251*54e4ee71SNavdeep Parhar 			init_iq(iq, sc, 0, 0, (sc->sge.nrxq + 1) * 2, 16, NULL,
252*54e4ee71SNavdeep Parhar 			    name);
253*54e4ee71SNavdeep Parhar 
254*54e4ee71SNavdeep Parhar 			rc = alloc_iq(iq, i);
255*54e4ee71SNavdeep Parhar 			if (rc != 0) {
256*54e4ee71SNavdeep Parhar 				device_printf(sc->dev,
257*54e4ee71SNavdeep Parhar 				    "failed to create fwd intr queue %d: %d\n",
258*54e4ee71SNavdeep Parhar 				    i, rc);
259*54e4ee71SNavdeep Parhar 				return (rc);
260*54e4ee71SNavdeep Parhar 			}
261*54e4ee71SNavdeep Parhar 		}
262*54e4ee71SNavdeep Parhar 
263*54e4ee71SNavdeep Parhar 		handler = t4_intr_evt;
264*54e4ee71SNavdeep Parhar 		i = 0;	/* forward fwq's interrupt to the first fiq */
265*54e4ee71SNavdeep Parhar 	} else {
266*54e4ee71SNavdeep Parhar 		handler = NULL;
267*54e4ee71SNavdeep Parhar 		i = 1;	/* fwq should use vector 1 (0 is used by error) */
268*54e4ee71SNavdeep Parhar 	}
269*54e4ee71SNavdeep Parhar 
270*54e4ee71SNavdeep Parhar 	snprintf(name, sizeof(name), "%s fwq", device_get_nameunit(sc->dev));
271*54e4ee71SNavdeep Parhar 	init_iq(fwq, sc, 0, 0, FW_IQ_QSIZE, FW_IQ_ESIZE, handler, name);
272*54e4ee71SNavdeep Parhar 	rc = alloc_iq(fwq, i);
273*54e4ee71SNavdeep Parhar 	if (rc != 0) {
274*54e4ee71SNavdeep Parhar 		device_printf(sc->dev,
275*54e4ee71SNavdeep Parhar 		    "failed to create firmware event queue: %d\n", rc);
276*54e4ee71SNavdeep Parhar 	}
277*54e4ee71SNavdeep Parhar 
278*54e4ee71SNavdeep Parhar 	return (rc);
279*54e4ee71SNavdeep Parhar }
280*54e4ee71SNavdeep Parhar 
281*54e4ee71SNavdeep Parhar /*
282*54e4ee71SNavdeep Parhar  * Idempotent
283*54e4ee71SNavdeep Parhar  */
284*54e4ee71SNavdeep Parhar int
285*54e4ee71SNavdeep Parhar t4_teardown_adapter_iqs(struct adapter *sc)
286*54e4ee71SNavdeep Parhar {
287*54e4ee71SNavdeep Parhar 	int i;
288*54e4ee71SNavdeep Parhar 	struct sge_iq *iq;
289*54e4ee71SNavdeep Parhar 
290*54e4ee71SNavdeep Parhar 	ADAPTER_LOCK_ASSERT_NOTOWNED(sc);
291*54e4ee71SNavdeep Parhar 
292*54e4ee71SNavdeep Parhar 	iq = &sc->sge.fwq;
293*54e4ee71SNavdeep Parhar 	free_iq(iq);
294*54e4ee71SNavdeep Parhar 	if (sc->flags & INTR_FWD) {
295*54e4ee71SNavdeep Parhar 		for (i = 0; i < NFIQ(sc); i++) {
296*54e4ee71SNavdeep Parhar 			iq = &sc->sge.fiq[i];
297*54e4ee71SNavdeep Parhar 			free_iq(iq);
298*54e4ee71SNavdeep Parhar 		}
299*54e4ee71SNavdeep Parhar 	}
300*54e4ee71SNavdeep Parhar 
301*54e4ee71SNavdeep Parhar 	return (0);
302*54e4ee71SNavdeep Parhar }
303*54e4ee71SNavdeep Parhar 
304*54e4ee71SNavdeep Parhar int
305*54e4ee71SNavdeep Parhar t4_setup_eth_queues(struct port_info *pi)
306*54e4ee71SNavdeep Parhar {
307*54e4ee71SNavdeep Parhar 	int rc = 0, i, intr_idx;
308*54e4ee71SNavdeep Parhar 	struct sge_rxq *rxq;
309*54e4ee71SNavdeep Parhar 	struct sge_txq *txq;
310*54e4ee71SNavdeep Parhar 	char name[16];
311*54e4ee71SNavdeep Parhar 	struct adapter *sc = pi->adapter;
312*54e4ee71SNavdeep Parhar 
313*54e4ee71SNavdeep Parhar 	if (sysctl_ctx_init(&pi->ctx) == 0) {
314*54e4ee71SNavdeep Parhar 		struct sysctl_oid *oid = device_get_sysctl_tree(pi->dev);
315*54e4ee71SNavdeep Parhar 		struct sysctl_oid_list *children = SYSCTL_CHILDREN(oid);
316*54e4ee71SNavdeep Parhar 
317*54e4ee71SNavdeep Parhar 		pi->oid_rxq = SYSCTL_ADD_NODE(&pi->ctx, children, OID_AUTO,
318*54e4ee71SNavdeep Parhar 		    "rxq", CTLFLAG_RD, NULL, "rx queues");
319*54e4ee71SNavdeep Parhar 		pi->oid_txq = SYSCTL_ADD_NODE(&pi->ctx, children, OID_AUTO,
320*54e4ee71SNavdeep Parhar 		    "txq", CTLFLAG_RD, NULL, "tx queues");
321*54e4ee71SNavdeep Parhar 	}
322*54e4ee71SNavdeep Parhar 
323*54e4ee71SNavdeep Parhar 	for_each_rxq(pi, i, rxq) {
324*54e4ee71SNavdeep Parhar 
325*54e4ee71SNavdeep Parhar 		snprintf(name, sizeof(name), "%s rxq%d-iq",
326*54e4ee71SNavdeep Parhar 		    device_get_nameunit(pi->dev), i);
327*54e4ee71SNavdeep Parhar 		init_iq(&rxq->iq, sc, pi->tmr_idx, pi->pktc_idx,
328*54e4ee71SNavdeep Parhar 		    pi->qsize_rxq, RX_IQ_ESIZE,
329*54e4ee71SNavdeep Parhar 		    sc->flags & INTR_FWD ? t4_intr_data: NULL, name);
330*54e4ee71SNavdeep Parhar 
331*54e4ee71SNavdeep Parhar 		snprintf(name, sizeof(name), "%s rxq%d-fl",
332*54e4ee71SNavdeep Parhar 		    device_get_nameunit(pi->dev), i);
333*54e4ee71SNavdeep Parhar 		init_fl(&rxq->fl, pi->qsize_rxq / 8, name);
334*54e4ee71SNavdeep Parhar 
335*54e4ee71SNavdeep Parhar 		if (sc->flags & INTR_FWD)
336*54e4ee71SNavdeep Parhar 			intr_idx = (pi->first_rxq + i) % NFIQ(sc);
337*54e4ee71SNavdeep Parhar 		else
338*54e4ee71SNavdeep Parhar 			intr_idx = pi->first_rxq + i + 2;
339*54e4ee71SNavdeep Parhar 
340*54e4ee71SNavdeep Parhar 		rc = alloc_rxq(pi, rxq, intr_idx, i);
341*54e4ee71SNavdeep Parhar 		if (rc != 0)
342*54e4ee71SNavdeep Parhar 			goto done;
343*54e4ee71SNavdeep Parhar 
344*54e4ee71SNavdeep Parhar 		intr_idx++;
345*54e4ee71SNavdeep Parhar 	}
346*54e4ee71SNavdeep Parhar 
347*54e4ee71SNavdeep Parhar 	for_each_txq(pi, i, txq) {
348*54e4ee71SNavdeep Parhar 
349*54e4ee71SNavdeep Parhar 		snprintf(name, sizeof(name), "%s txq%d",
350*54e4ee71SNavdeep Parhar 		    device_get_nameunit(pi->dev), i);
351*54e4ee71SNavdeep Parhar 		init_txq(txq, pi->qsize_txq, name);
352*54e4ee71SNavdeep Parhar 
353*54e4ee71SNavdeep Parhar 		rc = alloc_txq(pi, txq, i);
354*54e4ee71SNavdeep Parhar 		if (rc != 0)
355*54e4ee71SNavdeep Parhar 			goto done;
356*54e4ee71SNavdeep Parhar 	}
357*54e4ee71SNavdeep Parhar 
358*54e4ee71SNavdeep Parhar done:
359*54e4ee71SNavdeep Parhar 	if (rc)
360*54e4ee71SNavdeep Parhar 		t4_teardown_eth_queues(pi);
361*54e4ee71SNavdeep Parhar 
362*54e4ee71SNavdeep Parhar 	return (rc);
363*54e4ee71SNavdeep Parhar }
364*54e4ee71SNavdeep Parhar 
365*54e4ee71SNavdeep Parhar /*
366*54e4ee71SNavdeep Parhar  * Idempotent
367*54e4ee71SNavdeep Parhar  */
368*54e4ee71SNavdeep Parhar int
369*54e4ee71SNavdeep Parhar t4_teardown_eth_queues(struct port_info *pi)
370*54e4ee71SNavdeep Parhar {
371*54e4ee71SNavdeep Parhar 	int i;
372*54e4ee71SNavdeep Parhar 	struct sge_rxq *rxq;
373*54e4ee71SNavdeep Parhar 	struct sge_txq *txq;
374*54e4ee71SNavdeep Parhar 
375*54e4ee71SNavdeep Parhar 	/* Do this before freeing the queues */
376*54e4ee71SNavdeep Parhar 	if (pi->oid_txq || pi->oid_rxq) {
377*54e4ee71SNavdeep Parhar 		sysctl_ctx_free(&pi->ctx);
378*54e4ee71SNavdeep Parhar 		pi->oid_txq = pi->oid_rxq = NULL;
379*54e4ee71SNavdeep Parhar 	}
380*54e4ee71SNavdeep Parhar 
381*54e4ee71SNavdeep Parhar 	for_each_txq(pi, i, txq) {
382*54e4ee71SNavdeep Parhar 		free_txq(pi, txq);
383*54e4ee71SNavdeep Parhar 	}
384*54e4ee71SNavdeep Parhar 
385*54e4ee71SNavdeep Parhar 	for_each_rxq(pi, i, rxq) {
386*54e4ee71SNavdeep Parhar 		free_rxq(pi, rxq);
387*54e4ee71SNavdeep Parhar 	}
388*54e4ee71SNavdeep Parhar 
389*54e4ee71SNavdeep Parhar 	return (0);
390*54e4ee71SNavdeep Parhar }
391*54e4ee71SNavdeep Parhar 
392*54e4ee71SNavdeep Parhar /* Deals with errors and forwarded interrupts */
393*54e4ee71SNavdeep Parhar void
394*54e4ee71SNavdeep Parhar t4_intr_all(void *arg)
395*54e4ee71SNavdeep Parhar {
396*54e4ee71SNavdeep Parhar 	struct adapter *sc = arg;
397*54e4ee71SNavdeep Parhar 
398*54e4ee71SNavdeep Parhar 	t4_intr_err(arg);
399*54e4ee71SNavdeep Parhar 	t4_intr_fwd(&sc->sge.fiq[0]);
400*54e4ee71SNavdeep Parhar }
401*54e4ee71SNavdeep Parhar 
402*54e4ee71SNavdeep Parhar /* Deals with forwarded interrupts on the given ingress queue */
403*54e4ee71SNavdeep Parhar void
404*54e4ee71SNavdeep Parhar t4_intr_fwd(void *arg)
405*54e4ee71SNavdeep Parhar {
406*54e4ee71SNavdeep Parhar 	struct sge_iq *iq = arg, *q;
407*54e4ee71SNavdeep Parhar 	struct adapter *sc = iq->adapter;
408*54e4ee71SNavdeep Parhar 	struct rsp_ctrl *ctrl;
409*54e4ee71SNavdeep Parhar 	int ndesc_pending = 0, ndesc_total = 0;
410*54e4ee71SNavdeep Parhar 	int qid;
411*54e4ee71SNavdeep Parhar 
412*54e4ee71SNavdeep Parhar 	IQ_LOCK(iq);
413*54e4ee71SNavdeep Parhar 	while (is_new_response(iq, &ctrl)) {
414*54e4ee71SNavdeep Parhar 
415*54e4ee71SNavdeep Parhar 		rmb();
416*54e4ee71SNavdeep Parhar 
417*54e4ee71SNavdeep Parhar 		/* Only interrupt muxing expected on this queue */
418*54e4ee71SNavdeep Parhar 		KASSERT(G_RSPD_TYPE(ctrl->u.type_gen) == X_RSPD_TYPE_INTR,
419*54e4ee71SNavdeep Parhar 		    ("unexpected event on forwarded interrupt queue: %x",
420*54e4ee71SNavdeep Parhar 		    G_RSPD_TYPE(ctrl->u.type_gen)));
421*54e4ee71SNavdeep Parhar 
422*54e4ee71SNavdeep Parhar 		qid = ntohl(ctrl->pldbuflen_qid) - sc->sge.iq_start;
423*54e4ee71SNavdeep Parhar 		q = sc->sge.iqmap[qid];
424*54e4ee71SNavdeep Parhar 
425*54e4ee71SNavdeep Parhar 		q->handler(q);
426*54e4ee71SNavdeep Parhar 
427*54e4ee71SNavdeep Parhar 		ndesc_total++;
428*54e4ee71SNavdeep Parhar 		if (++ndesc_pending >= iq->qsize / 4) {
429*54e4ee71SNavdeep Parhar 			t4_write_reg(sc, MYPF_REG(A_SGE_PF_GTS),
430*54e4ee71SNavdeep Parhar 			    V_CIDXINC(ndesc_pending) |
431*54e4ee71SNavdeep Parhar 			    V_INGRESSQID(iq->cntxt_id) |
432*54e4ee71SNavdeep Parhar 			    V_SEINTARM(
433*54e4ee71SNavdeep Parhar 				V_QINTR_TIMER_IDX(X_TIMERREG_UPDATE_CIDX)));
434*54e4ee71SNavdeep Parhar 			ndesc_pending = 0;
435*54e4ee71SNavdeep Parhar 		}
436*54e4ee71SNavdeep Parhar 
437*54e4ee71SNavdeep Parhar 		iq_next(iq);
438*54e4ee71SNavdeep Parhar 	}
439*54e4ee71SNavdeep Parhar 	IQ_UNLOCK(iq);
440*54e4ee71SNavdeep Parhar 
441*54e4ee71SNavdeep Parhar 	if (ndesc_total > 0) {
442*54e4ee71SNavdeep Parhar 		t4_write_reg(sc, MYPF_REG(A_SGE_PF_GTS),
443*54e4ee71SNavdeep Parhar 		    V_CIDXINC(ndesc_pending) | V_INGRESSQID((u32)iq->cntxt_id) |
444*54e4ee71SNavdeep Parhar 		    V_SEINTARM(iq->intr_params));
445*54e4ee71SNavdeep Parhar 	}
446*54e4ee71SNavdeep Parhar }
447*54e4ee71SNavdeep Parhar 
448*54e4ee71SNavdeep Parhar /* Deals with error interrupts */
449*54e4ee71SNavdeep Parhar void
450*54e4ee71SNavdeep Parhar t4_intr_err(void *arg)
451*54e4ee71SNavdeep Parhar {
452*54e4ee71SNavdeep Parhar 	struct adapter *sc = arg;
453*54e4ee71SNavdeep Parhar 
454*54e4ee71SNavdeep Parhar 	if (sc->intr_type == 1)
455*54e4ee71SNavdeep Parhar 		t4_write_reg(sc, MYPF_REG(A_PCIE_PF_CLI), 0);
456*54e4ee71SNavdeep Parhar 
457*54e4ee71SNavdeep Parhar 	t4_slow_intr_handler(sc);
458*54e4ee71SNavdeep Parhar }
459*54e4ee71SNavdeep Parhar 
460*54e4ee71SNavdeep Parhar /* Deals with the firmware event queue */
461*54e4ee71SNavdeep Parhar void
462*54e4ee71SNavdeep Parhar t4_intr_evt(void *arg)
463*54e4ee71SNavdeep Parhar {
464*54e4ee71SNavdeep Parhar 	struct sge_iq *iq = arg;
465*54e4ee71SNavdeep Parhar 	struct adapter *sc = iq->adapter;
466*54e4ee71SNavdeep Parhar 	struct rsp_ctrl *ctrl;
467*54e4ee71SNavdeep Parhar 	const struct rss_header *rss;
468*54e4ee71SNavdeep Parhar 	int ndesc_pending = 0, ndesc_total = 0;
469*54e4ee71SNavdeep Parhar 
470*54e4ee71SNavdeep Parhar 	KASSERT(iq == &sc->sge.fwq, ("%s: unexpected ingress queue", __func__));
471*54e4ee71SNavdeep Parhar 
472*54e4ee71SNavdeep Parhar 	IQ_LOCK(iq);
473*54e4ee71SNavdeep Parhar 	while (is_new_response(iq, &ctrl)) {
474*54e4ee71SNavdeep Parhar 
475*54e4ee71SNavdeep Parhar 		rmb();
476*54e4ee71SNavdeep Parhar 
477*54e4ee71SNavdeep Parhar 		rss = (const void *)iq->cdesc;
478*54e4ee71SNavdeep Parhar 
479*54e4ee71SNavdeep Parhar 		/* Should only get CPL on this queue */
480*54e4ee71SNavdeep Parhar 		KASSERT(G_RSPD_TYPE(ctrl->u.type_gen) == X_RSPD_TYPE_CPL,
481*54e4ee71SNavdeep Parhar 		    ("%s: unexpected type %d", __func__,
482*54e4ee71SNavdeep Parhar 		    G_RSPD_TYPE(ctrl->u.type_gen)));
483*54e4ee71SNavdeep Parhar 
484*54e4ee71SNavdeep Parhar 		switch (rss->opcode) {
485*54e4ee71SNavdeep Parhar 		case CPL_FW4_MSG:
486*54e4ee71SNavdeep Parhar 		case CPL_FW6_MSG: {
487*54e4ee71SNavdeep Parhar 			const struct cpl_fw6_msg *cpl;
488*54e4ee71SNavdeep Parhar 
489*54e4ee71SNavdeep Parhar 			cpl = (const void *)(rss + 1);
490*54e4ee71SNavdeep Parhar 			if (cpl->type == FW6_TYPE_CMD_RPL)
491*54e4ee71SNavdeep Parhar 				t4_handle_fw_rpl(sc, cpl->data);
492*54e4ee71SNavdeep Parhar 
493*54e4ee71SNavdeep Parhar 			break;
494*54e4ee71SNavdeep Parhar 			}
495*54e4ee71SNavdeep Parhar 		case CPL_SGE_EGR_UPDATE: {
496*54e4ee71SNavdeep Parhar 			const struct cpl_sge_egr_update *cpl;
497*54e4ee71SNavdeep Parhar 			unsigned int qid;
498*54e4ee71SNavdeep Parhar 			struct sge *s = &sc->sge;
499*54e4ee71SNavdeep Parhar 			struct sge_txq *txq;
500*54e4ee71SNavdeep Parhar 
501*54e4ee71SNavdeep Parhar 			cpl = (const void *)(rss + 1);
502*54e4ee71SNavdeep Parhar 			qid = G_EGR_QID(ntohl(cpl->opcode_qid));
503*54e4ee71SNavdeep Parhar 			txq = (void *)s->eqmap[qid - s->eq_start];
504*54e4ee71SNavdeep Parhar 			txq->egr_update++;
505*54e4ee71SNavdeep Parhar 
506*54e4ee71SNavdeep Parhar 			/* XXX: wake up stalled tx */
507*54e4ee71SNavdeep Parhar 
508*54e4ee71SNavdeep Parhar 			break;
509*54e4ee71SNavdeep Parhar 			}
510*54e4ee71SNavdeep Parhar 
511*54e4ee71SNavdeep Parhar 		default:
512*54e4ee71SNavdeep Parhar 			device_printf(sc->dev,
513*54e4ee71SNavdeep Parhar 			    "can't handle CPL opcode %d.", rss->opcode);
514*54e4ee71SNavdeep Parhar 		}
515*54e4ee71SNavdeep Parhar 
516*54e4ee71SNavdeep Parhar 		ndesc_total++;
517*54e4ee71SNavdeep Parhar 		if (++ndesc_pending >= iq->qsize / 4) {
518*54e4ee71SNavdeep Parhar 			t4_write_reg(sc, MYPF_REG(A_SGE_PF_GTS),
519*54e4ee71SNavdeep Parhar 			    V_CIDXINC(ndesc_pending) |
520*54e4ee71SNavdeep Parhar 			    V_INGRESSQID(iq->cntxt_id) |
521*54e4ee71SNavdeep Parhar 			    V_SEINTARM(
522*54e4ee71SNavdeep Parhar 				V_QINTR_TIMER_IDX(X_TIMERREG_UPDATE_CIDX)));
523*54e4ee71SNavdeep Parhar 			ndesc_pending = 0;
524*54e4ee71SNavdeep Parhar 		}
525*54e4ee71SNavdeep Parhar 		iq_next(iq);
526*54e4ee71SNavdeep Parhar 	}
527*54e4ee71SNavdeep Parhar 	IQ_UNLOCK(iq);
528*54e4ee71SNavdeep Parhar 
529*54e4ee71SNavdeep Parhar 	if (ndesc_total > 0) {
530*54e4ee71SNavdeep Parhar 		t4_write_reg(sc, MYPF_REG(A_SGE_PF_GTS),
531*54e4ee71SNavdeep Parhar 		    V_CIDXINC(ndesc_pending) | V_INGRESSQID(iq->cntxt_id) |
532*54e4ee71SNavdeep Parhar 		    V_SEINTARM(iq->intr_params));
533*54e4ee71SNavdeep Parhar 	}
534*54e4ee71SNavdeep Parhar }
535*54e4ee71SNavdeep Parhar 
536*54e4ee71SNavdeep Parhar void
537*54e4ee71SNavdeep Parhar t4_intr_data(void *arg)
538*54e4ee71SNavdeep Parhar {
539*54e4ee71SNavdeep Parhar 	struct sge_rxq *rxq = arg;
540*54e4ee71SNavdeep Parhar 	struct sge_iq *iq = arg;
541*54e4ee71SNavdeep Parhar 	struct rsp_ctrl *ctrl;
542*54e4ee71SNavdeep Parhar 	struct sge_fl *fl = &rxq->fl;
543*54e4ee71SNavdeep Parhar 	struct port_info *pi = rxq->port;
544*54e4ee71SNavdeep Parhar 	struct ifnet *ifp = pi->ifp;
545*54e4ee71SNavdeep Parhar 	struct adapter *sc = pi->adapter;
546*54e4ee71SNavdeep Parhar 	const struct rss_header *rss;
547*54e4ee71SNavdeep Parhar 	const struct cpl_rx_pkt *cpl;
548*54e4ee71SNavdeep Parhar 	int ndescs = 0, rsp_type;
549*54e4ee71SNavdeep Parhar 	uint32_t len;
550*54e4ee71SNavdeep Parhar 	struct mbuf *m0, *m;
551*54e4ee71SNavdeep Parhar #ifdef INET
552*54e4ee71SNavdeep Parhar 	struct lro_ctrl *lro = &rxq->lro;
553*54e4ee71SNavdeep Parhar 	struct lro_entry *l;
554*54e4ee71SNavdeep Parhar #endif
555*54e4ee71SNavdeep Parhar 
556*54e4ee71SNavdeep Parhar 	IQ_LOCK(iq);
557*54e4ee71SNavdeep Parhar 	iq->intr_next = iq->intr_params;
558*54e4ee71SNavdeep Parhar 	while (is_new_response(iq, &ctrl)) {
559*54e4ee71SNavdeep Parhar 
560*54e4ee71SNavdeep Parhar 		rmb();
561*54e4ee71SNavdeep Parhar 
562*54e4ee71SNavdeep Parhar 		rss = (const void *)iq->cdesc;
563*54e4ee71SNavdeep Parhar 		cpl = (const void *)(rss + 1);
564*54e4ee71SNavdeep Parhar 
565*54e4ee71SNavdeep Parhar 		rsp_type = G_RSPD_TYPE(ctrl->u.type_gen);
566*54e4ee71SNavdeep Parhar 
567*54e4ee71SNavdeep Parhar 		if (__predict_false(rsp_type == X_RSPD_TYPE_CPL)) {
568*54e4ee71SNavdeep Parhar 			const struct cpl_sge_egr_update *p = (const void *)cpl;
569*54e4ee71SNavdeep Parhar 			unsigned int qid = G_EGR_QID(ntohl(p->opcode_qid));
570*54e4ee71SNavdeep Parhar 
571*54e4ee71SNavdeep Parhar 			KASSERT(cpl->opcode == CPL_SGE_EGR_UPDATE,
572*54e4ee71SNavdeep Parhar 			    ("unexpected opcode on data ingress queue: %x",
573*54e4ee71SNavdeep Parhar 			    cpl->opcode));
574*54e4ee71SNavdeep Parhar 
575*54e4ee71SNavdeep Parhar 			/* XXX: noone's waiting to be woken up... */
576*54e4ee71SNavdeep Parhar 			wakeup(sc->sge.eqmap[qid - sc->sge.eq_start]);
577*54e4ee71SNavdeep Parhar 
578*54e4ee71SNavdeep Parhar 			ndescs++;
579*54e4ee71SNavdeep Parhar 			iq_next(iq);
580*54e4ee71SNavdeep Parhar 
581*54e4ee71SNavdeep Parhar 			continue;
582*54e4ee71SNavdeep Parhar 		}
583*54e4ee71SNavdeep Parhar 
584*54e4ee71SNavdeep Parhar 		KASSERT(G_RSPD_TYPE(ctrl->u.type_gen) == X_RSPD_TYPE_FLBUF,
585*54e4ee71SNavdeep Parhar 		    ("unexpected event on data ingress queue: %x",
586*54e4ee71SNavdeep Parhar 		    G_RSPD_TYPE(ctrl->u.type_gen)));
587*54e4ee71SNavdeep Parhar 
588*54e4ee71SNavdeep Parhar 		len = be32toh(ctrl->pldbuflen_qid);
589*54e4ee71SNavdeep Parhar 
590*54e4ee71SNavdeep Parhar 		KASSERT(len & F_RSPD_NEWBUF,
591*54e4ee71SNavdeep Parhar 		    ("%s: T4 misconfigured to pack buffers.", __func__));
592*54e4ee71SNavdeep Parhar 
593*54e4ee71SNavdeep Parhar 		len = G_RSPD_LEN(len);
594*54e4ee71SNavdeep Parhar 		m0 = get_fl_sdesc_data(fl, len, M_PKTHDR);
595*54e4ee71SNavdeep Parhar 		if (m0 == NULL) {
596*54e4ee71SNavdeep Parhar 			iq->intr_next = V_QINTR_TIMER_IDX(SGE_NTIMERS - 1);
597*54e4ee71SNavdeep Parhar 			break;
598*54e4ee71SNavdeep Parhar 		}
599*54e4ee71SNavdeep Parhar 
600*54e4ee71SNavdeep Parhar 		len -= FL_PKTSHIFT;
601*54e4ee71SNavdeep Parhar 		m0->m_len -= FL_PKTSHIFT;
602*54e4ee71SNavdeep Parhar 		m0->m_data += FL_PKTSHIFT;
603*54e4ee71SNavdeep Parhar 
604*54e4ee71SNavdeep Parhar 		m0->m_pkthdr.len = len;
605*54e4ee71SNavdeep Parhar 		m0->m_pkthdr.rcvif = ifp;
606*54e4ee71SNavdeep Parhar 		m0->m_flags |= M_FLOWID;
607*54e4ee71SNavdeep Parhar 		m0->m_pkthdr.flowid = rss->hash_val;
608*54e4ee71SNavdeep Parhar 
609*54e4ee71SNavdeep Parhar 		if (cpl->csum_calc && !cpl->err_vec &&
610*54e4ee71SNavdeep Parhar 		    ifp->if_capenable & IFCAP_RXCSUM) {
611*54e4ee71SNavdeep Parhar 			m0->m_pkthdr.csum_flags |= (CSUM_IP_CHECKED |
612*54e4ee71SNavdeep Parhar 			    CSUM_IP_VALID | CSUM_DATA_VALID | CSUM_PSEUDO_HDR);
613*54e4ee71SNavdeep Parhar 			if (cpl->ip_frag)
614*54e4ee71SNavdeep Parhar 				m0->m_pkthdr.csum_data = be16toh(cpl->csum);
615*54e4ee71SNavdeep Parhar 			else
616*54e4ee71SNavdeep Parhar 				m0->m_pkthdr.csum_data = 0xffff;
617*54e4ee71SNavdeep Parhar 			rxq->rxcsum++;
618*54e4ee71SNavdeep Parhar 		}
619*54e4ee71SNavdeep Parhar 
620*54e4ee71SNavdeep Parhar 		if (cpl->vlan_ex) {
621*54e4ee71SNavdeep Parhar 			m0->m_pkthdr.ether_vtag = be16toh(cpl->vlan);
622*54e4ee71SNavdeep Parhar 			m0->m_flags |= M_VLANTAG;
623*54e4ee71SNavdeep Parhar 			rxq->vlan_extraction++;
624*54e4ee71SNavdeep Parhar 		}
625*54e4ee71SNavdeep Parhar 
626*54e4ee71SNavdeep Parhar 		len -= m0->m_len;
627*54e4ee71SNavdeep Parhar 		m = m0;
628*54e4ee71SNavdeep Parhar 		while (len) {
629*54e4ee71SNavdeep Parhar 			m->m_next = get_fl_sdesc_data(fl, len, 0);
630*54e4ee71SNavdeep Parhar 			if (m->m_next == NULL)
631*54e4ee71SNavdeep Parhar 				CXGBE_UNIMPLEMENTED("mbuf recovery");
632*54e4ee71SNavdeep Parhar 
633*54e4ee71SNavdeep Parhar 			m = m->m_next;
634*54e4ee71SNavdeep Parhar 			len -= m->m_len;
635*54e4ee71SNavdeep Parhar 		}
636*54e4ee71SNavdeep Parhar #ifdef INET
637*54e4ee71SNavdeep Parhar 		if (cpl->l2info & htobe32(F_RXF_LRO) &&
638*54e4ee71SNavdeep Parhar 		    rxq->flags & RXQ_LRO_ENABLED &&
639*54e4ee71SNavdeep Parhar 		    tcp_lro_rx(lro, m0, 0) == 0) {
640*54e4ee71SNavdeep Parhar 			/* queued for LRO */
641*54e4ee71SNavdeep Parhar 		} else
642*54e4ee71SNavdeep Parhar #endif
643*54e4ee71SNavdeep Parhar 			(*ifp->if_input)(ifp, m0);
644*54e4ee71SNavdeep Parhar 
645*54e4ee71SNavdeep Parhar 		FL_LOCK(fl);
646*54e4ee71SNavdeep Parhar 		if (fl->needed >= 32) {
647*54e4ee71SNavdeep Parhar 			refill_fl(fl, 64);
648*54e4ee71SNavdeep Parhar 			if (fl->pending >= 32)
649*54e4ee71SNavdeep Parhar 				ring_fl_db(sc, fl);
650*54e4ee71SNavdeep Parhar 		}
651*54e4ee71SNavdeep Parhar 		FL_UNLOCK(fl);
652*54e4ee71SNavdeep Parhar 
653*54e4ee71SNavdeep Parhar 		ndescs++;
654*54e4ee71SNavdeep Parhar 		iq_next(iq);
655*54e4ee71SNavdeep Parhar 
656*54e4ee71SNavdeep Parhar 		if (ndescs > 32) {
657*54e4ee71SNavdeep Parhar 			t4_write_reg(sc, MYPF_REG(A_SGE_PF_GTS),
658*54e4ee71SNavdeep Parhar 			    V_CIDXINC(ndescs) |
659*54e4ee71SNavdeep Parhar 			    V_INGRESSQID((u32)iq->cntxt_id) |
660*54e4ee71SNavdeep Parhar 			    V_SEINTARM(V_QINTR_TIMER_IDX(X_TIMERREG_UPDATE_CIDX)));
661*54e4ee71SNavdeep Parhar 			ndescs = 0;
662*54e4ee71SNavdeep Parhar 		}
663*54e4ee71SNavdeep Parhar 	}
664*54e4ee71SNavdeep Parhar 
665*54e4ee71SNavdeep Parhar #ifdef INET
666*54e4ee71SNavdeep Parhar 	while (!SLIST_EMPTY(&lro->lro_active)) {
667*54e4ee71SNavdeep Parhar 		l = SLIST_FIRST(&lro->lro_active);
668*54e4ee71SNavdeep Parhar 		SLIST_REMOVE_HEAD(&lro->lro_active, next);
669*54e4ee71SNavdeep Parhar 		tcp_lro_flush(lro, l);
670*54e4ee71SNavdeep Parhar 	}
671*54e4ee71SNavdeep Parhar #endif
672*54e4ee71SNavdeep Parhar 
673*54e4ee71SNavdeep Parhar 	t4_write_reg(sc, MYPF_REG(A_SGE_PF_GTS), V_CIDXINC(ndescs) |
674*54e4ee71SNavdeep Parhar 	    V_INGRESSQID((u32)iq->cntxt_id) | V_SEINTARM(iq->intr_next));
675*54e4ee71SNavdeep Parhar 
676*54e4ee71SNavdeep Parhar 	IQ_UNLOCK(iq);
677*54e4ee71SNavdeep Parhar 
678*54e4ee71SNavdeep Parhar 	FL_LOCK(fl);
679*54e4ee71SNavdeep Parhar 	if (fl->needed) {
680*54e4ee71SNavdeep Parhar 		refill_fl(fl, -1);
681*54e4ee71SNavdeep Parhar 		if (fl->pending >= 8)
682*54e4ee71SNavdeep Parhar 			ring_fl_db(sc, fl);
683*54e4ee71SNavdeep Parhar 	}
684*54e4ee71SNavdeep Parhar 	FL_UNLOCK(fl);
685*54e4ee71SNavdeep Parhar }
686*54e4ee71SNavdeep Parhar 
687*54e4ee71SNavdeep Parhar /* Per-packet header in a coalesced tx WR, before the SGL starts (in flits) */
688*54e4ee71SNavdeep Parhar #define TXPKTS_PKT_HDR ((\
689*54e4ee71SNavdeep Parhar     sizeof(struct ulp_txpkt) + \
690*54e4ee71SNavdeep Parhar     sizeof(struct ulptx_idata) + \
691*54e4ee71SNavdeep Parhar     sizeof(struct cpl_tx_pkt_core) \
692*54e4ee71SNavdeep Parhar     ) / 8)
693*54e4ee71SNavdeep Parhar 
694*54e4ee71SNavdeep Parhar /* Header of a coalesced tx WR, before SGL of first packet (in flits) */
695*54e4ee71SNavdeep Parhar #define TXPKTS_WR_HDR (\
696*54e4ee71SNavdeep Parhar     sizeof(struct fw_eth_tx_pkts_wr) / 8 + \
697*54e4ee71SNavdeep Parhar     TXPKTS_PKT_HDR)
698*54e4ee71SNavdeep Parhar 
699*54e4ee71SNavdeep Parhar /* Header of a tx WR, before SGL of first packet (in flits) */
700*54e4ee71SNavdeep Parhar #define TXPKT_WR_HDR ((\
701*54e4ee71SNavdeep Parhar     sizeof(struct fw_eth_tx_pkt_wr) + \
702*54e4ee71SNavdeep Parhar     sizeof(struct cpl_tx_pkt_core) \
703*54e4ee71SNavdeep Parhar     ) / 8 )
704*54e4ee71SNavdeep Parhar 
705*54e4ee71SNavdeep Parhar /* Header of a tx LSO WR, before SGL of first packet (in flits) */
706*54e4ee71SNavdeep Parhar #define TXPKT_LSO_WR_HDR ((\
707*54e4ee71SNavdeep Parhar     sizeof(struct fw_eth_tx_pkt_wr) + \
708*54e4ee71SNavdeep Parhar     sizeof(struct cpl_tx_pkt_lso) + \
709*54e4ee71SNavdeep Parhar     sizeof(struct cpl_tx_pkt_core) \
710*54e4ee71SNavdeep Parhar     ) / 8 )
711*54e4ee71SNavdeep Parhar 
712*54e4ee71SNavdeep Parhar int
713*54e4ee71SNavdeep Parhar t4_eth_tx(struct ifnet *ifp, struct sge_txq *txq, struct mbuf *m)
714*54e4ee71SNavdeep Parhar {
715*54e4ee71SNavdeep Parhar 	struct port_info *pi = (void *)ifp->if_softc;
716*54e4ee71SNavdeep Parhar 	struct adapter *sc = pi->adapter;
717*54e4ee71SNavdeep Parhar 	struct sge_eq *eq = &txq->eq;
718*54e4ee71SNavdeep Parhar 	struct buf_ring *br = eq->br;
719*54e4ee71SNavdeep Parhar 	struct mbuf *next;
720*54e4ee71SNavdeep Parhar 	int rc, coalescing;
721*54e4ee71SNavdeep Parhar 	struct txpkts txpkts;
722*54e4ee71SNavdeep Parhar 	struct sgl sgl;
723*54e4ee71SNavdeep Parhar 
724*54e4ee71SNavdeep Parhar 	TXQ_LOCK_ASSERT_OWNED(txq);
725*54e4ee71SNavdeep Parhar 	KASSERT(m, ("%s: called with nothing to do.", __func__));
726*54e4ee71SNavdeep Parhar 
727*54e4ee71SNavdeep Parhar 	txpkts.npkt = 0;/* indicates there's nothing in txpkts */
728*54e4ee71SNavdeep Parhar 	coalescing = 0;
729*54e4ee71SNavdeep Parhar 
730*54e4ee71SNavdeep Parhar 	prefetch(&eq->sdesc[eq->pidx]);
731*54e4ee71SNavdeep Parhar 	prefetch(&eq->desc[eq->pidx]);
732*54e4ee71SNavdeep Parhar 	prefetch(&eq->maps[eq->map_pidx]);
733*54e4ee71SNavdeep Parhar 
734*54e4ee71SNavdeep Parhar 	if (eq->avail < 8)
735*54e4ee71SNavdeep Parhar 		reclaim_tx_descs(eq, 1, 8);
736*54e4ee71SNavdeep Parhar 
737*54e4ee71SNavdeep Parhar 	for (; m; m = next ? next : drbr_dequeue(ifp, br)) {
738*54e4ee71SNavdeep Parhar 
739*54e4ee71SNavdeep Parhar 		if (eq->avail < 8)
740*54e4ee71SNavdeep Parhar 			break;
741*54e4ee71SNavdeep Parhar 
742*54e4ee71SNavdeep Parhar 		next = m->m_nextpkt;
743*54e4ee71SNavdeep Parhar 		m->m_nextpkt = NULL;
744*54e4ee71SNavdeep Parhar 
745*54e4ee71SNavdeep Parhar 		if (next || buf_ring_peek(br))
746*54e4ee71SNavdeep Parhar 			coalescing = 1;
747*54e4ee71SNavdeep Parhar 
748*54e4ee71SNavdeep Parhar 		rc = get_pkt_sgl(txq, &m, &sgl, coalescing);
749*54e4ee71SNavdeep Parhar 		if (rc != 0) {
750*54e4ee71SNavdeep Parhar 			if (rc == ENOMEM) {
751*54e4ee71SNavdeep Parhar 
752*54e4ee71SNavdeep Parhar 				/* Short of resources, suspend tx */
753*54e4ee71SNavdeep Parhar 
754*54e4ee71SNavdeep Parhar 				m->m_nextpkt = next;
755*54e4ee71SNavdeep Parhar 				break;
756*54e4ee71SNavdeep Parhar 			}
757*54e4ee71SNavdeep Parhar 
758*54e4ee71SNavdeep Parhar 			/*
759*54e4ee71SNavdeep Parhar 			 * Unrecoverable error for this packet, throw it away
760*54e4ee71SNavdeep Parhar 			 * and move on to the next.  get_pkt_sgl may already
761*54e4ee71SNavdeep Parhar 			 * have freed m (it will be NULL in that case and the
762*54e4ee71SNavdeep Parhar 			 * m_freem here is still safe).
763*54e4ee71SNavdeep Parhar 			 */
764*54e4ee71SNavdeep Parhar 
765*54e4ee71SNavdeep Parhar 			m_freem(m);
766*54e4ee71SNavdeep Parhar 			continue;
767*54e4ee71SNavdeep Parhar 		}
768*54e4ee71SNavdeep Parhar 
769*54e4ee71SNavdeep Parhar 		if (coalescing &&
770*54e4ee71SNavdeep Parhar 		    add_to_txpkts(pi, txq, &txpkts, m, &sgl) == 0) {
771*54e4ee71SNavdeep Parhar 
772*54e4ee71SNavdeep Parhar 			/* Successfully absorbed into txpkts */
773*54e4ee71SNavdeep Parhar 
774*54e4ee71SNavdeep Parhar 			write_ulp_cpl_sgl(pi, txq, &txpkts, m, &sgl);
775*54e4ee71SNavdeep Parhar 			goto doorbell;
776*54e4ee71SNavdeep Parhar 		}
777*54e4ee71SNavdeep Parhar 
778*54e4ee71SNavdeep Parhar 		/*
779*54e4ee71SNavdeep Parhar 		 * We weren't coalescing to begin with, or current frame could
780*54e4ee71SNavdeep Parhar 		 * not be coalesced (add_to_txpkts flushes txpkts if a frame
781*54e4ee71SNavdeep Parhar 		 * given to it can't be coalesced).  Either way there should be
782*54e4ee71SNavdeep Parhar 		 * nothing in txpkts.
783*54e4ee71SNavdeep Parhar 		 */
784*54e4ee71SNavdeep Parhar 		KASSERT(txpkts.npkt == 0,
785*54e4ee71SNavdeep Parhar 		    ("%s: txpkts not empty: %d", __func__, txpkts.npkt));
786*54e4ee71SNavdeep Parhar 
787*54e4ee71SNavdeep Parhar 		/* We're sending out individual packets now */
788*54e4ee71SNavdeep Parhar 		coalescing = 0;
789*54e4ee71SNavdeep Parhar 
790*54e4ee71SNavdeep Parhar 		if (eq->avail < 8)
791*54e4ee71SNavdeep Parhar 			reclaim_tx_descs(eq, 1, 8);
792*54e4ee71SNavdeep Parhar 		rc = write_txpkt_wr(pi, txq, m, &sgl);
793*54e4ee71SNavdeep Parhar 		if (rc != 0) {
794*54e4ee71SNavdeep Parhar 
795*54e4ee71SNavdeep Parhar 			/* Short of hardware descriptors, suspend tx */
796*54e4ee71SNavdeep Parhar 
797*54e4ee71SNavdeep Parhar 			/*
798*54e4ee71SNavdeep Parhar 			 * This is an unlikely but expensive failure.  We've
799*54e4ee71SNavdeep Parhar 			 * done all the hard work (DMA mappings etc.) and now we
800*54e4ee71SNavdeep Parhar 			 * can't send out the packet.  What's worse, we have to
801*54e4ee71SNavdeep Parhar 			 * spend even more time freeing up everything in sgl.
802*54e4ee71SNavdeep Parhar 			 */
803*54e4ee71SNavdeep Parhar 			txq->no_desc++;
804*54e4ee71SNavdeep Parhar 			free_pkt_sgl(txq, &sgl);
805*54e4ee71SNavdeep Parhar 
806*54e4ee71SNavdeep Parhar 			m->m_nextpkt = next;
807*54e4ee71SNavdeep Parhar 			break;
808*54e4ee71SNavdeep Parhar 		}
809*54e4ee71SNavdeep Parhar 
810*54e4ee71SNavdeep Parhar 		ETHER_BPF_MTAP(ifp, m);
811*54e4ee71SNavdeep Parhar 		if (sgl.nsegs == 0)
812*54e4ee71SNavdeep Parhar 			m_freem(m);
813*54e4ee71SNavdeep Parhar 
814*54e4ee71SNavdeep Parhar doorbell:
815*54e4ee71SNavdeep Parhar 		/* Fewer and fewer doorbells as the queue fills up */
816*54e4ee71SNavdeep Parhar 		if (eq->pending >= (1 << (fls(eq->qsize - eq->avail) / 2)))
817*54e4ee71SNavdeep Parhar 		    ring_tx_db(sc, eq);
818*54e4ee71SNavdeep Parhar 		reclaim_tx_descs(eq, 16, 32);
819*54e4ee71SNavdeep Parhar 	}
820*54e4ee71SNavdeep Parhar 
821*54e4ee71SNavdeep Parhar 	if (txpkts.npkt > 0)
822*54e4ee71SNavdeep Parhar 		write_txpkts_wr(txq, &txpkts);
823*54e4ee71SNavdeep Parhar 
824*54e4ee71SNavdeep Parhar 	/*
825*54e4ee71SNavdeep Parhar 	 * m not NULL means there was an error but we haven't thrown it away.
826*54e4ee71SNavdeep Parhar 	 * This can happen when we're short of tx descriptors (no_desc) or maybe
827*54e4ee71SNavdeep Parhar 	 * even DMA maps (no_dmamap).  Either way, a credit flush and reclaim
828*54e4ee71SNavdeep Parhar 	 * will get things going again.
829*54e4ee71SNavdeep Parhar 	 *
830*54e4ee71SNavdeep Parhar 	 * If eq->avail is already 0 we know a credit flush was requested in the
831*54e4ee71SNavdeep Parhar 	 * WR that reduced it to 0 so we don't need another flush (we don't have
832*54e4ee71SNavdeep Parhar 	 * any descriptor for a flush WR anyway, duh).
833*54e4ee71SNavdeep Parhar 	 */
834*54e4ee71SNavdeep Parhar 	if (m && eq->avail > 0)
835*54e4ee71SNavdeep Parhar 		write_eqflush_wr(eq);
836*54e4ee71SNavdeep Parhar 	txq->m = m;
837*54e4ee71SNavdeep Parhar 
838*54e4ee71SNavdeep Parhar 	if (eq->pending)
839*54e4ee71SNavdeep Parhar 		ring_tx_db(sc, eq);
840*54e4ee71SNavdeep Parhar 
841*54e4ee71SNavdeep Parhar 	reclaim_tx_descs(eq, 16, eq->qsize);
842*54e4ee71SNavdeep Parhar 
843*54e4ee71SNavdeep Parhar 	return (0);
844*54e4ee71SNavdeep Parhar }
845*54e4ee71SNavdeep Parhar 
846*54e4ee71SNavdeep Parhar void
847*54e4ee71SNavdeep Parhar t4_update_fl_bufsize(struct ifnet *ifp)
848*54e4ee71SNavdeep Parhar {
849*54e4ee71SNavdeep Parhar 	struct port_info *pi = ifp->if_softc;
850*54e4ee71SNavdeep Parhar 	struct sge_rxq *rxq;
851*54e4ee71SNavdeep Parhar 	struct sge_fl *fl;
852*54e4ee71SNavdeep Parhar 	int i;
853*54e4ee71SNavdeep Parhar 
854*54e4ee71SNavdeep Parhar 	for_each_rxq(pi, i, rxq) {
855*54e4ee71SNavdeep Parhar 		fl = &rxq->fl;
856*54e4ee71SNavdeep Parhar 
857*54e4ee71SNavdeep Parhar 		FL_LOCK(fl);
858*54e4ee71SNavdeep Parhar 		set_fl_tag_idx(fl, ifp->if_mtu);
859*54e4ee71SNavdeep Parhar 		FL_UNLOCK(fl);
860*54e4ee71SNavdeep Parhar 	}
861*54e4ee71SNavdeep Parhar }
862*54e4ee71SNavdeep Parhar 
863*54e4ee71SNavdeep Parhar /*
864*54e4ee71SNavdeep Parhar  * A non-NULL handler indicates this iq will not receive direct interrupts, the
865*54e4ee71SNavdeep Parhar  * handler will be invoked by a forwarded interrupt queue.
866*54e4ee71SNavdeep Parhar  */
867*54e4ee71SNavdeep Parhar static inline void
868*54e4ee71SNavdeep Parhar init_iq(struct sge_iq *iq, struct adapter *sc, int tmr_idx, int pktc_idx,
869*54e4ee71SNavdeep Parhar     int qsize, int esize, iq_intr_handler_t *handler, char *name)
870*54e4ee71SNavdeep Parhar {
871*54e4ee71SNavdeep Parhar 	KASSERT(tmr_idx >= 0 && tmr_idx < SGE_NTIMERS,
872*54e4ee71SNavdeep Parhar 	    ("%s: bad tmr_idx %d", __func__, tmr_idx));
873*54e4ee71SNavdeep Parhar 	KASSERT(pktc_idx < SGE_NCOUNTERS,	/* -ve is ok, means don't use */
874*54e4ee71SNavdeep Parhar 	    ("%s: bad pktc_idx %d", __func__, pktc_idx));
875*54e4ee71SNavdeep Parhar 
876*54e4ee71SNavdeep Parhar 	iq->flags = 0;
877*54e4ee71SNavdeep Parhar 	iq->adapter = sc;
878*54e4ee71SNavdeep Parhar 	iq->intr_params = V_QINTR_TIMER_IDX(tmr_idx) |
879*54e4ee71SNavdeep Parhar 	    V_QINTR_CNT_EN(pktc_idx >= 0);
880*54e4ee71SNavdeep Parhar 	iq->intr_pktc_idx = pktc_idx;
881*54e4ee71SNavdeep Parhar 	iq->qsize = roundup(qsize, 16);		/* See FW_IQ_CMD/iqsize */
882*54e4ee71SNavdeep Parhar 	iq->esize = max(esize, 16);		/* See FW_IQ_CMD/iqesize */
883*54e4ee71SNavdeep Parhar 	iq->handler = handler;
884*54e4ee71SNavdeep Parhar 	strlcpy(iq->lockname, name, sizeof(iq->lockname));
885*54e4ee71SNavdeep Parhar }
886*54e4ee71SNavdeep Parhar 
887*54e4ee71SNavdeep Parhar static inline void
888*54e4ee71SNavdeep Parhar init_fl(struct sge_fl *fl, int qsize, char *name)
889*54e4ee71SNavdeep Parhar {
890*54e4ee71SNavdeep Parhar 	fl->qsize = qsize;
891*54e4ee71SNavdeep Parhar 	strlcpy(fl->lockname, name, sizeof(fl->lockname));
892*54e4ee71SNavdeep Parhar }
893*54e4ee71SNavdeep Parhar 
894*54e4ee71SNavdeep Parhar static inline void
895*54e4ee71SNavdeep Parhar init_txq(struct sge_txq *txq, int qsize, char *name)
896*54e4ee71SNavdeep Parhar {
897*54e4ee71SNavdeep Parhar 	txq->eq.qsize = qsize;
898*54e4ee71SNavdeep Parhar 	strlcpy(txq->eq.lockname, name, sizeof(txq->eq.lockname));
899*54e4ee71SNavdeep Parhar }
900*54e4ee71SNavdeep Parhar 
901*54e4ee71SNavdeep Parhar static int
902*54e4ee71SNavdeep Parhar alloc_ring(struct adapter *sc, size_t len, bus_dma_tag_t *tag,
903*54e4ee71SNavdeep Parhar     bus_dmamap_t *map, bus_addr_t *pa, void **va)
904*54e4ee71SNavdeep Parhar {
905*54e4ee71SNavdeep Parhar 	int rc;
906*54e4ee71SNavdeep Parhar 
907*54e4ee71SNavdeep Parhar 	rc = bus_dma_tag_create(sc->dmat, 512, 0, BUS_SPACE_MAXADDR,
908*54e4ee71SNavdeep Parhar 	    BUS_SPACE_MAXADDR, NULL, NULL, len, 1, len, 0, NULL, NULL, tag);
909*54e4ee71SNavdeep Parhar 	if (rc != 0) {
910*54e4ee71SNavdeep Parhar 		device_printf(sc->dev, "cannot allocate DMA tag: %d\n", rc);
911*54e4ee71SNavdeep Parhar 		goto done;
912*54e4ee71SNavdeep Parhar 	}
913*54e4ee71SNavdeep Parhar 
914*54e4ee71SNavdeep Parhar 	rc = bus_dmamem_alloc(*tag, va,
915*54e4ee71SNavdeep Parhar 	    BUS_DMA_WAITOK | BUS_DMA_COHERENT | BUS_DMA_ZERO, map);
916*54e4ee71SNavdeep Parhar 	if (rc != 0) {
917*54e4ee71SNavdeep Parhar 		device_printf(sc->dev, "cannot allocate DMA memory: %d\n", rc);
918*54e4ee71SNavdeep Parhar 		goto done;
919*54e4ee71SNavdeep Parhar 	}
920*54e4ee71SNavdeep Parhar 
921*54e4ee71SNavdeep Parhar 	rc = bus_dmamap_load(*tag, *map, *va, len, oneseg_dma_callback, pa, 0);
922*54e4ee71SNavdeep Parhar 	if (rc != 0) {
923*54e4ee71SNavdeep Parhar 		device_printf(sc->dev, "cannot load DMA map: %d\n", rc);
924*54e4ee71SNavdeep Parhar 		goto done;
925*54e4ee71SNavdeep Parhar 	}
926*54e4ee71SNavdeep Parhar done:
927*54e4ee71SNavdeep Parhar 	if (rc)
928*54e4ee71SNavdeep Parhar 		free_ring(sc, *tag, *map, *pa, *va);
929*54e4ee71SNavdeep Parhar 
930*54e4ee71SNavdeep Parhar 	return (rc);
931*54e4ee71SNavdeep Parhar }
932*54e4ee71SNavdeep Parhar 
933*54e4ee71SNavdeep Parhar static int
934*54e4ee71SNavdeep Parhar free_ring(struct adapter *sc, bus_dma_tag_t tag, bus_dmamap_t map,
935*54e4ee71SNavdeep Parhar     bus_addr_t pa, void *va)
936*54e4ee71SNavdeep Parhar {
937*54e4ee71SNavdeep Parhar 	if (pa)
938*54e4ee71SNavdeep Parhar 		bus_dmamap_unload(tag, map);
939*54e4ee71SNavdeep Parhar 	if (va)
940*54e4ee71SNavdeep Parhar 		bus_dmamem_free(tag, va, map);
941*54e4ee71SNavdeep Parhar 	if (tag)
942*54e4ee71SNavdeep Parhar 		bus_dma_tag_destroy(tag);
943*54e4ee71SNavdeep Parhar 
944*54e4ee71SNavdeep Parhar 	return (0);
945*54e4ee71SNavdeep Parhar }
946*54e4ee71SNavdeep Parhar 
947*54e4ee71SNavdeep Parhar /*
948*54e4ee71SNavdeep Parhar  * Allocates the ring for an ingress queue and an optional freelist.  If the
949*54e4ee71SNavdeep Parhar  * freelist is specified it will be allocated and then associated with the
950*54e4ee71SNavdeep Parhar  * ingress queue.
951*54e4ee71SNavdeep Parhar  *
952*54e4ee71SNavdeep Parhar  * Returns errno on failure.  Resources allocated up to that point may still be
953*54e4ee71SNavdeep Parhar  * allocated.  Caller is responsible for cleanup in case this function fails.
954*54e4ee71SNavdeep Parhar  *
955*54e4ee71SNavdeep Parhar  * If the ingress queue will take interrupts directly (iq->handler == NULL) then
956*54e4ee71SNavdeep Parhar  * the intr_idx specifies the vector, starting from 0.  Otherwise it specifies
957*54e4ee71SNavdeep Parhar  * the index of the queue to which its interrupts will be forwarded.
958*54e4ee71SNavdeep Parhar  */
959*54e4ee71SNavdeep Parhar static int
960*54e4ee71SNavdeep Parhar alloc_iq_fl(struct port_info *pi, struct sge_iq *iq, struct sge_fl *fl,
961*54e4ee71SNavdeep Parhar     int intr_idx)
962*54e4ee71SNavdeep Parhar {
963*54e4ee71SNavdeep Parhar 	int rc, i, cntxt_id;
964*54e4ee71SNavdeep Parhar 	size_t len;
965*54e4ee71SNavdeep Parhar 	struct fw_iq_cmd c;
966*54e4ee71SNavdeep Parhar 	struct adapter *sc = iq->adapter;
967*54e4ee71SNavdeep Parhar 	__be32 v = 0;
968*54e4ee71SNavdeep Parhar 
969*54e4ee71SNavdeep Parhar 	/* The adapter queues are nominally allocated in port[0]'s name */
970*54e4ee71SNavdeep Parhar 	if (pi == NULL)
971*54e4ee71SNavdeep Parhar 		pi = sc->port[0];
972*54e4ee71SNavdeep Parhar 
973*54e4ee71SNavdeep Parhar 	mtx_init(&iq->iq_lock, iq->lockname, NULL, MTX_DEF);
974*54e4ee71SNavdeep Parhar 
975*54e4ee71SNavdeep Parhar 	len = iq->qsize * iq->esize;
976*54e4ee71SNavdeep Parhar 	rc = alloc_ring(sc, len, &iq->desc_tag, &iq->desc_map, &iq->ba,
977*54e4ee71SNavdeep Parhar 	    (void **)&iq->desc);
978*54e4ee71SNavdeep Parhar 	if (rc != 0)
979*54e4ee71SNavdeep Parhar 		return (rc);
980*54e4ee71SNavdeep Parhar 
981*54e4ee71SNavdeep Parhar 	bzero(&c, sizeof(c));
982*54e4ee71SNavdeep Parhar 	c.op_to_vfn = htobe32(V_FW_CMD_OP(FW_IQ_CMD) | F_FW_CMD_REQUEST |
983*54e4ee71SNavdeep Parhar 	    F_FW_CMD_WRITE | F_FW_CMD_EXEC | V_FW_IQ_CMD_PFN(sc->pf) |
984*54e4ee71SNavdeep Parhar 	    V_FW_IQ_CMD_VFN(0));
985*54e4ee71SNavdeep Parhar 
986*54e4ee71SNavdeep Parhar 	c.alloc_to_len16 = htobe32(F_FW_IQ_CMD_ALLOC | F_FW_IQ_CMD_IQSTART |
987*54e4ee71SNavdeep Parhar 	    FW_LEN16(c));
988*54e4ee71SNavdeep Parhar 
989*54e4ee71SNavdeep Parhar 	/* Special handling for firmware event queue */
990*54e4ee71SNavdeep Parhar 	if (iq == &sc->sge.fwq)
991*54e4ee71SNavdeep Parhar 		v |= F_FW_IQ_CMD_IQASYNCH;
992*54e4ee71SNavdeep Parhar 
993*54e4ee71SNavdeep Parhar 	if (iq->handler) {
994*54e4ee71SNavdeep Parhar 		KASSERT(intr_idx < NFIQ(sc),
995*54e4ee71SNavdeep Parhar 		    ("%s: invalid indirect intr_idx %d", __func__, intr_idx));
996*54e4ee71SNavdeep Parhar 		v |= F_FW_IQ_CMD_IQANDST;
997*54e4ee71SNavdeep Parhar 		v |= V_FW_IQ_CMD_IQANDSTINDEX(sc->sge.fiq[intr_idx].abs_id);
998*54e4ee71SNavdeep Parhar 	} else {
999*54e4ee71SNavdeep Parhar 		KASSERT(intr_idx < sc->intr_count,
1000*54e4ee71SNavdeep Parhar 		    ("%s: invalid direct intr_idx %d", __func__, intr_idx));
1001*54e4ee71SNavdeep Parhar 		v |= V_FW_IQ_CMD_IQANDSTINDEX(intr_idx);
1002*54e4ee71SNavdeep Parhar 	}
1003*54e4ee71SNavdeep Parhar 
1004*54e4ee71SNavdeep Parhar 	c.type_to_iqandstindex = htobe32(v |
1005*54e4ee71SNavdeep Parhar 	    V_FW_IQ_CMD_TYPE(FW_IQ_TYPE_FL_INT_CAP) |
1006*54e4ee71SNavdeep Parhar 	    V_FW_IQ_CMD_VIID(pi->viid) |
1007*54e4ee71SNavdeep Parhar 	    V_FW_IQ_CMD_IQANUD(X_UPDATEDELIVERY_INTERRUPT));
1008*54e4ee71SNavdeep Parhar 	c.iqdroprss_to_iqesize = htobe16(V_FW_IQ_CMD_IQPCIECH(pi->tx_chan) |
1009*54e4ee71SNavdeep Parhar 	    F_FW_IQ_CMD_IQGTSMODE |
1010*54e4ee71SNavdeep Parhar 	    V_FW_IQ_CMD_IQINTCNTTHRESH(iq->intr_pktc_idx) |
1011*54e4ee71SNavdeep Parhar 	    V_FW_IQ_CMD_IQESIZE(ilog2(iq->esize) - 4));
1012*54e4ee71SNavdeep Parhar 	c.iqsize = htobe16(iq->qsize);
1013*54e4ee71SNavdeep Parhar 	c.iqaddr = htobe64(iq->ba);
1014*54e4ee71SNavdeep Parhar 
1015*54e4ee71SNavdeep Parhar 	if (fl) {
1016*54e4ee71SNavdeep Parhar 		mtx_init(&fl->fl_lock, fl->lockname, NULL, MTX_DEF);
1017*54e4ee71SNavdeep Parhar 
1018*54e4ee71SNavdeep Parhar 		for (i = 0; i < FL_BUF_SIZES; i++) {
1019*54e4ee71SNavdeep Parhar 
1020*54e4ee71SNavdeep Parhar 			/*
1021*54e4ee71SNavdeep Parhar 			 * A freelist buffer must be 16 byte aligned as the SGE
1022*54e4ee71SNavdeep Parhar 			 * uses the low 4 bits of the bus addr to figure out the
1023*54e4ee71SNavdeep Parhar 			 * buffer size.
1024*54e4ee71SNavdeep Parhar 			 */
1025*54e4ee71SNavdeep Parhar 			rc = bus_dma_tag_create(sc->dmat, 16, 0,
1026*54e4ee71SNavdeep Parhar 			    BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL,
1027*54e4ee71SNavdeep Parhar 			    FL_BUF_SIZE(i), 1, FL_BUF_SIZE(i), BUS_DMA_ALLOCNOW,
1028*54e4ee71SNavdeep Parhar 			    NULL, NULL, &fl->tag[i]);
1029*54e4ee71SNavdeep Parhar 			if (rc != 0) {
1030*54e4ee71SNavdeep Parhar 				device_printf(sc->dev,
1031*54e4ee71SNavdeep Parhar 				    "failed to create fl DMA tag[%d]: %d\n",
1032*54e4ee71SNavdeep Parhar 				    i, rc);
1033*54e4ee71SNavdeep Parhar 				return (rc);
1034*54e4ee71SNavdeep Parhar 			}
1035*54e4ee71SNavdeep Parhar 		}
1036*54e4ee71SNavdeep Parhar 		len = fl->qsize * RX_FL_ESIZE;
1037*54e4ee71SNavdeep Parhar 		rc = alloc_ring(sc, len, &fl->desc_tag, &fl->desc_map,
1038*54e4ee71SNavdeep Parhar 		    &fl->ba, (void **)&fl->desc);
1039*54e4ee71SNavdeep Parhar 		if (rc)
1040*54e4ee71SNavdeep Parhar 			return (rc);
1041*54e4ee71SNavdeep Parhar 
1042*54e4ee71SNavdeep Parhar 		/* Allocate space for one software descriptor per buffer. */
1043*54e4ee71SNavdeep Parhar 		fl->cap = (fl->qsize - SPG_LEN / RX_FL_ESIZE) * 8;
1044*54e4ee71SNavdeep Parhar 		FL_LOCK(fl);
1045*54e4ee71SNavdeep Parhar 		set_fl_tag_idx(fl, pi->ifp->if_mtu);
1046*54e4ee71SNavdeep Parhar 		rc = alloc_fl_sdesc(fl);
1047*54e4ee71SNavdeep Parhar 		FL_UNLOCK(fl);
1048*54e4ee71SNavdeep Parhar 		if (rc != 0) {
1049*54e4ee71SNavdeep Parhar 			device_printf(sc->dev,
1050*54e4ee71SNavdeep Parhar 			    "failed to setup fl software descriptors: %d\n",
1051*54e4ee71SNavdeep Parhar 			    rc);
1052*54e4ee71SNavdeep Parhar 			return (rc);
1053*54e4ee71SNavdeep Parhar 		}
1054*54e4ee71SNavdeep Parhar 		fl->needed = fl->cap - 1; /* one less to avoid cidx = pidx */
1055*54e4ee71SNavdeep Parhar 
1056*54e4ee71SNavdeep Parhar 		c.iqns_to_fl0congen =
1057*54e4ee71SNavdeep Parhar 		    htobe32(V_FW_IQ_CMD_FL0HOSTFCMODE(X_HOSTFCMODE_NONE));
1058*54e4ee71SNavdeep Parhar 		c.fl0dcaen_to_fl0cidxfthresh =
1059*54e4ee71SNavdeep Parhar 		    htobe16(V_FW_IQ_CMD_FL0FBMIN(X_FETCHBURSTMIN_64B) |
1060*54e4ee71SNavdeep Parhar 			V_FW_IQ_CMD_FL0FBMAX(X_FETCHBURSTMAX_512B));
1061*54e4ee71SNavdeep Parhar 		c.fl0size = htobe16(fl->qsize);
1062*54e4ee71SNavdeep Parhar 		c.fl0addr = htobe64(fl->ba);
1063*54e4ee71SNavdeep Parhar 	}
1064*54e4ee71SNavdeep Parhar 
1065*54e4ee71SNavdeep Parhar 	rc = -t4_wr_mbox(sc, sc->mbox, &c, sizeof(c), &c);
1066*54e4ee71SNavdeep Parhar 	if (rc != 0) {
1067*54e4ee71SNavdeep Parhar 		device_printf(sc->dev,
1068*54e4ee71SNavdeep Parhar 		    "failed to create ingress queue: %d\n", rc);
1069*54e4ee71SNavdeep Parhar 		return (rc);
1070*54e4ee71SNavdeep Parhar 	}
1071*54e4ee71SNavdeep Parhar 
1072*54e4ee71SNavdeep Parhar 	iq->cdesc = iq->desc;
1073*54e4ee71SNavdeep Parhar 	iq->cidx = 0;
1074*54e4ee71SNavdeep Parhar 	iq->gen = 1;
1075*54e4ee71SNavdeep Parhar 	iq->intr_next = iq->intr_params;
1076*54e4ee71SNavdeep Parhar 	iq->cntxt_id = be16toh(c.iqid);
1077*54e4ee71SNavdeep Parhar 	iq->abs_id = be16toh(c.physiqid);
1078*54e4ee71SNavdeep Parhar 	iq->flags |= (IQ_ALLOCATED | IQ_STARTED);
1079*54e4ee71SNavdeep Parhar 
1080*54e4ee71SNavdeep Parhar 	cntxt_id = iq->cntxt_id - sc->sge.iq_start;
1081*54e4ee71SNavdeep Parhar 	KASSERT(cntxt_id < sc->sge.niq,
1082*54e4ee71SNavdeep Parhar 	    ("%s: iq->cntxt_id (%d) more than the max (%d)", __func__,
1083*54e4ee71SNavdeep Parhar 	    cntxt_id, sc->sge.niq - 1));
1084*54e4ee71SNavdeep Parhar 	sc->sge.iqmap[cntxt_id] = iq;
1085*54e4ee71SNavdeep Parhar 
1086*54e4ee71SNavdeep Parhar 	if (fl) {
1087*54e4ee71SNavdeep Parhar 		fl->cntxt_id = be16toh(c.fl0id);
1088*54e4ee71SNavdeep Parhar 		fl->pidx = fl->cidx = 0;
1089*54e4ee71SNavdeep Parhar 
1090*54e4ee71SNavdeep Parhar 		cntxt_id = iq->cntxt_id - sc->sge.eq_start;
1091*54e4ee71SNavdeep Parhar 		KASSERT(cntxt_id < sc->sge.neq,
1092*54e4ee71SNavdeep Parhar 		    ("%s: fl->cntxt_id (%d) more than the max (%d)", __func__,
1093*54e4ee71SNavdeep Parhar 		    cntxt_id, sc->sge.neq - 1));
1094*54e4ee71SNavdeep Parhar 		sc->sge.eqmap[cntxt_id] = (void *)fl;
1095*54e4ee71SNavdeep Parhar 
1096*54e4ee71SNavdeep Parhar 		FL_LOCK(fl);
1097*54e4ee71SNavdeep Parhar 		refill_fl(fl, -1);
1098*54e4ee71SNavdeep Parhar 		if (fl->pending >= 8)
1099*54e4ee71SNavdeep Parhar 			ring_fl_db(sc, fl);
1100*54e4ee71SNavdeep Parhar 		FL_UNLOCK(fl);
1101*54e4ee71SNavdeep Parhar 	}
1102*54e4ee71SNavdeep Parhar 
1103*54e4ee71SNavdeep Parhar 	/* Enable IQ interrupts */
1104*54e4ee71SNavdeep Parhar 	t4_write_reg(sc, MYPF_REG(A_SGE_PF_GTS), V_SEINTARM(iq->intr_params) |
1105*54e4ee71SNavdeep Parhar 	    V_INGRESSQID(iq->cntxt_id));
1106*54e4ee71SNavdeep Parhar 
1107*54e4ee71SNavdeep Parhar 	return (0);
1108*54e4ee71SNavdeep Parhar }
1109*54e4ee71SNavdeep Parhar 
1110*54e4ee71SNavdeep Parhar /*
1111*54e4ee71SNavdeep Parhar  * This can be called with the iq/fl in any state - fully allocated and
1112*54e4ee71SNavdeep Parhar  * functional, partially allocated, even all-zeroed out.
1113*54e4ee71SNavdeep Parhar  */
1114*54e4ee71SNavdeep Parhar static int
1115*54e4ee71SNavdeep Parhar free_iq_fl(struct port_info *pi, struct sge_iq *iq, struct sge_fl *fl)
1116*54e4ee71SNavdeep Parhar {
1117*54e4ee71SNavdeep Parhar 	int i, rc;
1118*54e4ee71SNavdeep Parhar 	struct adapter *sc = iq->adapter;
1119*54e4ee71SNavdeep Parhar 	device_t dev;
1120*54e4ee71SNavdeep Parhar 
1121*54e4ee71SNavdeep Parhar 	if (sc == NULL)
1122*54e4ee71SNavdeep Parhar 		return (0);	/* nothing to do */
1123*54e4ee71SNavdeep Parhar 
1124*54e4ee71SNavdeep Parhar 	dev = pi ? pi->dev : sc->dev;
1125*54e4ee71SNavdeep Parhar 
1126*54e4ee71SNavdeep Parhar 	if (iq->flags & IQ_STARTED) {
1127*54e4ee71SNavdeep Parhar 		rc = -t4_iq_start_stop(sc, sc->mbox, 0, sc->pf, 0,
1128*54e4ee71SNavdeep Parhar 		    iq->cntxt_id, fl ? fl->cntxt_id : 0xffff, 0xffff);
1129*54e4ee71SNavdeep Parhar 		if (rc != 0) {
1130*54e4ee71SNavdeep Parhar 			device_printf(dev,
1131*54e4ee71SNavdeep Parhar 			    "failed to stop queue %p: %d\n", iq, rc);
1132*54e4ee71SNavdeep Parhar 			return (rc);
1133*54e4ee71SNavdeep Parhar 		}
1134*54e4ee71SNavdeep Parhar 		iq->flags &= ~IQ_STARTED;
1135*54e4ee71SNavdeep Parhar 	}
1136*54e4ee71SNavdeep Parhar 
1137*54e4ee71SNavdeep Parhar 	if (iq->flags & IQ_ALLOCATED) {
1138*54e4ee71SNavdeep Parhar 
1139*54e4ee71SNavdeep Parhar 		rc = -t4_iq_free(sc, sc->mbox, sc->pf, 0,
1140*54e4ee71SNavdeep Parhar 		    FW_IQ_TYPE_FL_INT_CAP, iq->cntxt_id,
1141*54e4ee71SNavdeep Parhar 		    fl ? fl->cntxt_id : 0xffff, 0xffff);
1142*54e4ee71SNavdeep Parhar 		if (rc != 0) {
1143*54e4ee71SNavdeep Parhar 			device_printf(dev,
1144*54e4ee71SNavdeep Parhar 			    "failed to free queue %p: %d\n", iq, rc);
1145*54e4ee71SNavdeep Parhar 			return (rc);
1146*54e4ee71SNavdeep Parhar 		}
1147*54e4ee71SNavdeep Parhar 		iq->flags &= ~IQ_ALLOCATED;
1148*54e4ee71SNavdeep Parhar 	}
1149*54e4ee71SNavdeep Parhar 
1150*54e4ee71SNavdeep Parhar 	free_ring(sc, iq->desc_tag, iq->desc_map, iq->ba, iq->desc);
1151*54e4ee71SNavdeep Parhar 
1152*54e4ee71SNavdeep Parhar 	if (mtx_initialized(&iq->iq_lock))
1153*54e4ee71SNavdeep Parhar 		mtx_destroy(&iq->iq_lock);
1154*54e4ee71SNavdeep Parhar 
1155*54e4ee71SNavdeep Parhar 	bzero(iq, sizeof(*iq));
1156*54e4ee71SNavdeep Parhar 
1157*54e4ee71SNavdeep Parhar 	if (fl) {
1158*54e4ee71SNavdeep Parhar 		free_ring(sc, fl->desc_tag, fl->desc_map, fl->ba,
1159*54e4ee71SNavdeep Parhar 		    fl->desc);
1160*54e4ee71SNavdeep Parhar 
1161*54e4ee71SNavdeep Parhar 		if (fl->sdesc) {
1162*54e4ee71SNavdeep Parhar 			FL_LOCK(fl);
1163*54e4ee71SNavdeep Parhar 			free_fl_sdesc(fl);
1164*54e4ee71SNavdeep Parhar 			FL_UNLOCK(fl);
1165*54e4ee71SNavdeep Parhar 		}
1166*54e4ee71SNavdeep Parhar 
1167*54e4ee71SNavdeep Parhar 		if (mtx_initialized(&fl->fl_lock))
1168*54e4ee71SNavdeep Parhar 			mtx_destroy(&fl->fl_lock);
1169*54e4ee71SNavdeep Parhar 
1170*54e4ee71SNavdeep Parhar 		for (i = 0; i < FL_BUF_SIZES; i++) {
1171*54e4ee71SNavdeep Parhar 			if (fl->tag[i])
1172*54e4ee71SNavdeep Parhar 				bus_dma_tag_destroy(fl->tag[i]);
1173*54e4ee71SNavdeep Parhar 		}
1174*54e4ee71SNavdeep Parhar 
1175*54e4ee71SNavdeep Parhar 		bzero(fl, sizeof(*fl));
1176*54e4ee71SNavdeep Parhar 	}
1177*54e4ee71SNavdeep Parhar 
1178*54e4ee71SNavdeep Parhar 	return (0);
1179*54e4ee71SNavdeep Parhar }
1180*54e4ee71SNavdeep Parhar 
1181*54e4ee71SNavdeep Parhar static int
1182*54e4ee71SNavdeep Parhar alloc_iq(struct sge_iq *iq, int intr_idx)
1183*54e4ee71SNavdeep Parhar {
1184*54e4ee71SNavdeep Parhar 	return alloc_iq_fl(NULL, iq, NULL, intr_idx);
1185*54e4ee71SNavdeep Parhar }
1186*54e4ee71SNavdeep Parhar 
1187*54e4ee71SNavdeep Parhar static int
1188*54e4ee71SNavdeep Parhar free_iq(struct sge_iq *iq)
1189*54e4ee71SNavdeep Parhar {
1190*54e4ee71SNavdeep Parhar 	return free_iq_fl(NULL, iq, NULL);
1191*54e4ee71SNavdeep Parhar }
1192*54e4ee71SNavdeep Parhar 
1193*54e4ee71SNavdeep Parhar static int
1194*54e4ee71SNavdeep Parhar alloc_rxq(struct port_info *pi, struct sge_rxq *rxq, int intr_idx, int idx)
1195*54e4ee71SNavdeep Parhar {
1196*54e4ee71SNavdeep Parhar 	int rc;
1197*54e4ee71SNavdeep Parhar 	struct sysctl_oid *oid;
1198*54e4ee71SNavdeep Parhar 	struct sysctl_oid_list *children;
1199*54e4ee71SNavdeep Parhar 	char name[16];
1200*54e4ee71SNavdeep Parhar 
1201*54e4ee71SNavdeep Parhar 	rc = alloc_iq_fl(pi, &rxq->iq, &rxq->fl, intr_idx);
1202*54e4ee71SNavdeep Parhar 	if (rc != 0)
1203*54e4ee71SNavdeep Parhar 		return (rc);
1204*54e4ee71SNavdeep Parhar 
1205*54e4ee71SNavdeep Parhar #ifdef INET
1206*54e4ee71SNavdeep Parhar 	rc = tcp_lro_init(&rxq->lro);
1207*54e4ee71SNavdeep Parhar 	if (rc != 0)
1208*54e4ee71SNavdeep Parhar 		return (rc);
1209*54e4ee71SNavdeep Parhar 	rxq->lro.ifp = pi->ifp; /* also indicates LRO init'ed */
1210*54e4ee71SNavdeep Parhar 
1211*54e4ee71SNavdeep Parhar 	if (pi->ifp->if_capenable & IFCAP_LRO)
1212*54e4ee71SNavdeep Parhar 		rxq->flags |= RXQ_LRO_ENABLED;
1213*54e4ee71SNavdeep Parhar #endif
1214*54e4ee71SNavdeep Parhar 	rxq->port = pi;
1215*54e4ee71SNavdeep Parhar 
1216*54e4ee71SNavdeep Parhar 	children = SYSCTL_CHILDREN(pi->oid_rxq);
1217*54e4ee71SNavdeep Parhar 
1218*54e4ee71SNavdeep Parhar 	snprintf(name, sizeof(name), "%d", idx);
1219*54e4ee71SNavdeep Parhar 	oid = SYSCTL_ADD_NODE(&pi->ctx, children, OID_AUTO, name, CTLFLAG_RD,
1220*54e4ee71SNavdeep Parhar 	    NULL, "rx queue");
1221*54e4ee71SNavdeep Parhar 	children = SYSCTL_CHILDREN(oid);
1222*54e4ee71SNavdeep Parhar 
1223*54e4ee71SNavdeep Parhar 	SYSCTL_ADD_INT(&pi->ctx, children, OID_AUTO, "lro_queued", CTLFLAG_RD,
1224*54e4ee71SNavdeep Parhar 	    &rxq->lro.lro_queued, 0, NULL);
1225*54e4ee71SNavdeep Parhar 	SYSCTL_ADD_INT(&pi->ctx, children, OID_AUTO, "lro_flushed", CTLFLAG_RD,
1226*54e4ee71SNavdeep Parhar 	    &rxq->lro.lro_flushed, 0, NULL);
1227*54e4ee71SNavdeep Parhar 	SYSCTL_ADD_UQUAD(&pi->ctx, children, OID_AUTO, "rxcsum", CTLFLAG_RD,
1228*54e4ee71SNavdeep Parhar 	    &rxq->rxcsum, "# of times hardware assisted with checksum");
1229*54e4ee71SNavdeep Parhar 	SYSCTL_ADD_UQUAD(&pi->ctx, children, OID_AUTO, "vlan_extraction",
1230*54e4ee71SNavdeep Parhar 	    CTLFLAG_RD, &rxq->vlan_extraction,
1231*54e4ee71SNavdeep Parhar 	    "# of times hardware extracted 802.1Q tag");
1232*54e4ee71SNavdeep Parhar 
1233*54e4ee71SNavdeep Parhar 	return (rc);
1234*54e4ee71SNavdeep Parhar }
1235*54e4ee71SNavdeep Parhar 
1236*54e4ee71SNavdeep Parhar static int
1237*54e4ee71SNavdeep Parhar free_rxq(struct port_info *pi, struct sge_rxq *rxq)
1238*54e4ee71SNavdeep Parhar {
1239*54e4ee71SNavdeep Parhar 	int rc;
1240*54e4ee71SNavdeep Parhar 
1241*54e4ee71SNavdeep Parhar #ifdef INET
1242*54e4ee71SNavdeep Parhar 	if (rxq->lro.ifp) {
1243*54e4ee71SNavdeep Parhar 		tcp_lro_free(&rxq->lro);
1244*54e4ee71SNavdeep Parhar 		rxq->lro.ifp = NULL;
1245*54e4ee71SNavdeep Parhar 	}
1246*54e4ee71SNavdeep Parhar #endif
1247*54e4ee71SNavdeep Parhar 
1248*54e4ee71SNavdeep Parhar 	rc = free_iq_fl(pi, &rxq->iq, &rxq->fl);
1249*54e4ee71SNavdeep Parhar 	if (rc == 0)
1250*54e4ee71SNavdeep Parhar 		bzero(rxq, sizeof(*rxq));
1251*54e4ee71SNavdeep Parhar 
1252*54e4ee71SNavdeep Parhar 	return (rc);
1253*54e4ee71SNavdeep Parhar }
1254*54e4ee71SNavdeep Parhar 
1255*54e4ee71SNavdeep Parhar static int
1256*54e4ee71SNavdeep Parhar alloc_txq(struct port_info *pi, struct sge_txq *txq, int idx)
1257*54e4ee71SNavdeep Parhar {
1258*54e4ee71SNavdeep Parhar 	int rc, cntxt_id;
1259*54e4ee71SNavdeep Parhar 	size_t len;
1260*54e4ee71SNavdeep Parhar 	struct adapter *sc = pi->adapter;
1261*54e4ee71SNavdeep Parhar 	struct fw_eq_eth_cmd c;
1262*54e4ee71SNavdeep Parhar 	struct sge_eq *eq = &txq->eq;
1263*54e4ee71SNavdeep Parhar 	char name[16];
1264*54e4ee71SNavdeep Parhar 	struct sysctl_oid *oid;
1265*54e4ee71SNavdeep Parhar 	struct sysctl_oid_list *children;
1266*54e4ee71SNavdeep Parhar 
1267*54e4ee71SNavdeep Parhar 	mtx_init(&eq->eq_lock, eq->lockname, NULL, MTX_DEF);
1268*54e4ee71SNavdeep Parhar 
1269*54e4ee71SNavdeep Parhar 	len = eq->qsize * TX_EQ_ESIZE;
1270*54e4ee71SNavdeep Parhar 	rc = alloc_ring(sc, len, &eq->desc_tag, &eq->desc_map,
1271*54e4ee71SNavdeep Parhar 	    &eq->ba, (void **)&eq->desc);
1272*54e4ee71SNavdeep Parhar 	if (rc)
1273*54e4ee71SNavdeep Parhar 		return (rc);
1274*54e4ee71SNavdeep Parhar 
1275*54e4ee71SNavdeep Parhar 	eq->cap = eq->qsize - SPG_LEN / TX_EQ_ESIZE;
1276*54e4ee71SNavdeep Parhar 	eq->spg = (void *)&eq->desc[eq->cap];
1277*54e4ee71SNavdeep Parhar 	eq->avail = eq->cap - 1;	/* one less to avoid cidx = pidx */
1278*54e4ee71SNavdeep Parhar 	eq->sdesc = malloc(eq->cap * sizeof(struct tx_sdesc), M_CXGBE,
1279*54e4ee71SNavdeep Parhar 	    M_ZERO | M_WAITOK);
1280*54e4ee71SNavdeep Parhar 	eq->br = buf_ring_alloc(eq->qsize, M_CXGBE, M_WAITOK, &eq->eq_lock);
1281*54e4ee71SNavdeep Parhar 
1282*54e4ee71SNavdeep Parhar 	rc = bus_dma_tag_create(sc->dmat, 1, 0, BUS_SPACE_MAXADDR,
1283*54e4ee71SNavdeep Parhar 	    BUS_SPACE_MAXADDR, NULL, NULL, 64 * 1024, TX_SGL_SEGS,
1284*54e4ee71SNavdeep Parhar 	    BUS_SPACE_MAXSIZE, BUS_DMA_ALLOCNOW, NULL, NULL, &eq->tx_tag);
1285*54e4ee71SNavdeep Parhar 	if (rc != 0) {
1286*54e4ee71SNavdeep Parhar 		device_printf(sc->dev,
1287*54e4ee71SNavdeep Parhar 		    "failed to create tx DMA tag: %d\n", rc);
1288*54e4ee71SNavdeep Parhar 		return (rc);
1289*54e4ee71SNavdeep Parhar 	}
1290*54e4ee71SNavdeep Parhar 
1291*54e4ee71SNavdeep Parhar 	rc = alloc_eq_maps(eq);
1292*54e4ee71SNavdeep Parhar 	if (rc != 0) {
1293*54e4ee71SNavdeep Parhar 		device_printf(sc->dev, "failed to setup tx DMA maps: %d\n", rc);
1294*54e4ee71SNavdeep Parhar 		return (rc);
1295*54e4ee71SNavdeep Parhar 	}
1296*54e4ee71SNavdeep Parhar 
1297*54e4ee71SNavdeep Parhar 	bzero(&c, sizeof(c));
1298*54e4ee71SNavdeep Parhar 
1299*54e4ee71SNavdeep Parhar 	c.op_to_vfn = htobe32(V_FW_CMD_OP(FW_EQ_ETH_CMD) | F_FW_CMD_REQUEST |
1300*54e4ee71SNavdeep Parhar 	    F_FW_CMD_WRITE | F_FW_CMD_EXEC | V_FW_EQ_ETH_CMD_PFN(sc->pf) |
1301*54e4ee71SNavdeep Parhar 	    V_FW_EQ_ETH_CMD_VFN(0));
1302*54e4ee71SNavdeep Parhar 	c.alloc_to_len16 = htobe32(F_FW_EQ_ETH_CMD_ALLOC |
1303*54e4ee71SNavdeep Parhar 	    F_FW_EQ_ETH_CMD_EQSTART | FW_LEN16(c));
1304*54e4ee71SNavdeep Parhar 	c.viid_pkd = htobe32(V_FW_EQ_ETH_CMD_VIID(pi->viid));
1305*54e4ee71SNavdeep Parhar 	c.fetchszm_to_iqid =
1306*54e4ee71SNavdeep Parhar 	    htobe32(V_FW_EQ_ETH_CMD_HOSTFCMODE(X_HOSTFCMODE_STATUS_PAGE) |
1307*54e4ee71SNavdeep Parhar 		V_FW_EQ_ETH_CMD_PCIECHN(pi->tx_chan) |
1308*54e4ee71SNavdeep Parhar 		V_FW_EQ_ETH_CMD_IQID(sc->sge.rxq[pi->first_rxq].iq.cntxt_id));
1309*54e4ee71SNavdeep Parhar 	c.dcaen_to_eqsize = htobe32(V_FW_EQ_ETH_CMD_FBMIN(X_FETCHBURSTMIN_64B) |
1310*54e4ee71SNavdeep Parhar 		      V_FW_EQ_ETH_CMD_FBMAX(X_FETCHBURSTMAX_512B) |
1311*54e4ee71SNavdeep Parhar 		      V_FW_EQ_ETH_CMD_CIDXFTHRESH(X_CIDXFLUSHTHRESH_32) |
1312*54e4ee71SNavdeep Parhar 		      V_FW_EQ_ETH_CMD_EQSIZE(eq->qsize));
1313*54e4ee71SNavdeep Parhar 	c.eqaddr = htobe64(eq->ba);
1314*54e4ee71SNavdeep Parhar 
1315*54e4ee71SNavdeep Parhar 	rc = -t4_wr_mbox(sc, sc->mbox, &c, sizeof(c), &c);
1316*54e4ee71SNavdeep Parhar 	if (rc != 0) {
1317*54e4ee71SNavdeep Parhar 		device_printf(pi->dev,
1318*54e4ee71SNavdeep Parhar 		    "failed to create egress queue: %d\n", rc);
1319*54e4ee71SNavdeep Parhar 		return (rc);
1320*54e4ee71SNavdeep Parhar 	}
1321*54e4ee71SNavdeep Parhar 
1322*54e4ee71SNavdeep Parhar 	eq->pidx = eq->cidx = 0;
1323*54e4ee71SNavdeep Parhar 	eq->cntxt_id = G_FW_EQ_ETH_CMD_EQID(be32toh(c.eqid_pkd));
1324*54e4ee71SNavdeep Parhar 	eq->flags |= (EQ_ALLOCATED | EQ_STARTED);
1325*54e4ee71SNavdeep Parhar 
1326*54e4ee71SNavdeep Parhar 	cntxt_id = eq->cntxt_id - sc->sge.eq_start;
1327*54e4ee71SNavdeep Parhar 	KASSERT(cntxt_id < sc->sge.neq,
1328*54e4ee71SNavdeep Parhar 	    ("%s: eq->cntxt_id (%d) more than the max (%d)", __func__,
1329*54e4ee71SNavdeep Parhar 	    cntxt_id, sc->sge.neq - 1));
1330*54e4ee71SNavdeep Parhar 	sc->sge.eqmap[cntxt_id] = eq;
1331*54e4ee71SNavdeep Parhar 
1332*54e4ee71SNavdeep Parhar 	children = SYSCTL_CHILDREN(pi->oid_txq);
1333*54e4ee71SNavdeep Parhar 
1334*54e4ee71SNavdeep Parhar 	snprintf(name, sizeof(name), "%d", idx);
1335*54e4ee71SNavdeep Parhar 	oid = SYSCTL_ADD_NODE(&pi->ctx, children, OID_AUTO, name, CTLFLAG_RD,
1336*54e4ee71SNavdeep Parhar 	    NULL, "tx queue");
1337*54e4ee71SNavdeep Parhar 	children = SYSCTL_CHILDREN(oid);
1338*54e4ee71SNavdeep Parhar 
1339*54e4ee71SNavdeep Parhar 	SYSCTL_ADD_UQUAD(&pi->ctx, children, OID_AUTO, "txcsum", CTLFLAG_RD,
1340*54e4ee71SNavdeep Parhar 	    &txq->txcsum, "# of times hardware assisted with checksum");
1341*54e4ee71SNavdeep Parhar 	SYSCTL_ADD_UQUAD(&pi->ctx, children, OID_AUTO, "vlan_insertion",
1342*54e4ee71SNavdeep Parhar 	    CTLFLAG_RD, &txq->vlan_insertion,
1343*54e4ee71SNavdeep Parhar 	    "# of times hardware inserted 802.1Q tag");
1344*54e4ee71SNavdeep Parhar 	SYSCTL_ADD_UQUAD(&pi->ctx, children, OID_AUTO, "tso_wrs", CTLFLAG_RD,
1345*54e4ee71SNavdeep Parhar 	    &txq->tso_wrs, "# of IPv4 TSO work requests");
1346*54e4ee71SNavdeep Parhar 	SYSCTL_ADD_UQUAD(&pi->ctx, children, OID_AUTO, "imm_wrs", CTLFLAG_RD,
1347*54e4ee71SNavdeep Parhar 	    &txq->imm_wrs, "# of work requests with immediate data");
1348*54e4ee71SNavdeep Parhar 	SYSCTL_ADD_UQUAD(&pi->ctx, children, OID_AUTO, "sgl_wrs", CTLFLAG_RD,
1349*54e4ee71SNavdeep Parhar 	    &txq->sgl_wrs, "# of work requests with direct SGL");
1350*54e4ee71SNavdeep Parhar 	SYSCTL_ADD_UQUAD(&pi->ctx, children, OID_AUTO, "txpkt_wrs", CTLFLAG_RD,
1351*54e4ee71SNavdeep Parhar 	    &txq->txpkt_wrs, "# of txpkt work requests (one pkt/WR)");
1352*54e4ee71SNavdeep Parhar 	SYSCTL_ADD_UQUAD(&pi->ctx, children, OID_AUTO, "txpkts_wrs", CTLFLAG_RD,
1353*54e4ee71SNavdeep Parhar 	    &txq->txpkts_wrs, "# of txpkts work requests (multiple pkts/WR)");
1354*54e4ee71SNavdeep Parhar 	SYSCTL_ADD_UQUAD(&pi->ctx, children, OID_AUTO, "txpkts_pkts", CTLFLAG_RD,
1355*54e4ee71SNavdeep Parhar 	    &txq->txpkts_pkts, "# of frames tx'd using txpkts work requests");
1356*54e4ee71SNavdeep Parhar 
1357*54e4ee71SNavdeep Parhar 	SYSCTL_ADD_UINT(&pi->ctx, children, OID_AUTO, "no_dmamap", CTLFLAG_RD,
1358*54e4ee71SNavdeep Parhar 	    &txq->no_dmamap, 0, "# of times txq ran out of DMA maps");
1359*54e4ee71SNavdeep Parhar 	SYSCTL_ADD_UINT(&pi->ctx, children, OID_AUTO, "no_desc", CTLFLAG_RD,
1360*54e4ee71SNavdeep Parhar 	    &txq->no_desc, 0, "# of times txq ran out of hardware descriptors");
1361*54e4ee71SNavdeep Parhar 	SYSCTL_ADD_UINT(&pi->ctx, children, OID_AUTO, "egr_update", CTLFLAG_RD,
1362*54e4ee71SNavdeep Parhar 	    &txq->egr_update, 0, "egress update notifications from the SGE");
1363*54e4ee71SNavdeep Parhar 
1364*54e4ee71SNavdeep Parhar 	return (rc);
1365*54e4ee71SNavdeep Parhar }
1366*54e4ee71SNavdeep Parhar 
1367*54e4ee71SNavdeep Parhar static int
1368*54e4ee71SNavdeep Parhar free_txq(struct port_info *pi, struct sge_txq *txq)
1369*54e4ee71SNavdeep Parhar {
1370*54e4ee71SNavdeep Parhar 	int rc;
1371*54e4ee71SNavdeep Parhar 	struct adapter *sc = pi->adapter;
1372*54e4ee71SNavdeep Parhar 	struct sge_eq *eq = &txq->eq;
1373*54e4ee71SNavdeep Parhar 
1374*54e4ee71SNavdeep Parhar 	if (eq->flags & (EQ_ALLOCATED | EQ_STARTED)) {
1375*54e4ee71SNavdeep Parhar 		rc = -t4_eth_eq_free(sc, sc->mbox, sc->pf, 0, eq->cntxt_id);
1376*54e4ee71SNavdeep Parhar 		if (rc != 0) {
1377*54e4ee71SNavdeep Parhar 			device_printf(pi->dev,
1378*54e4ee71SNavdeep Parhar 			    "failed to free egress queue %p: %d\n", eq, rc);
1379*54e4ee71SNavdeep Parhar 			return (rc);
1380*54e4ee71SNavdeep Parhar 		}
1381*54e4ee71SNavdeep Parhar 		eq->flags &= ~(EQ_ALLOCATED | EQ_STARTED);
1382*54e4ee71SNavdeep Parhar 	}
1383*54e4ee71SNavdeep Parhar 
1384*54e4ee71SNavdeep Parhar 	free_ring(sc, eq->desc_tag, eq->desc_map, eq->ba, eq->desc);
1385*54e4ee71SNavdeep Parhar 
1386*54e4ee71SNavdeep Parhar 	free(eq->sdesc, M_CXGBE);
1387*54e4ee71SNavdeep Parhar 
1388*54e4ee71SNavdeep Parhar 	if (eq->maps)
1389*54e4ee71SNavdeep Parhar 		free_eq_maps(eq);
1390*54e4ee71SNavdeep Parhar 
1391*54e4ee71SNavdeep Parhar 	buf_ring_free(eq->br, M_CXGBE);
1392*54e4ee71SNavdeep Parhar 
1393*54e4ee71SNavdeep Parhar 	if (eq->tx_tag)
1394*54e4ee71SNavdeep Parhar 		bus_dma_tag_destroy(eq->tx_tag);
1395*54e4ee71SNavdeep Parhar 
1396*54e4ee71SNavdeep Parhar 	if (mtx_initialized(&eq->eq_lock))
1397*54e4ee71SNavdeep Parhar 		mtx_destroy(&eq->eq_lock);
1398*54e4ee71SNavdeep Parhar 
1399*54e4ee71SNavdeep Parhar 	bzero(txq, sizeof(*txq));
1400*54e4ee71SNavdeep Parhar 	return (0);
1401*54e4ee71SNavdeep Parhar }
1402*54e4ee71SNavdeep Parhar 
1403*54e4ee71SNavdeep Parhar static void
1404*54e4ee71SNavdeep Parhar oneseg_dma_callback(void *arg, bus_dma_segment_t *segs, int nseg, int error)
1405*54e4ee71SNavdeep Parhar {
1406*54e4ee71SNavdeep Parhar 	bus_addr_t *ba = arg;
1407*54e4ee71SNavdeep Parhar 
1408*54e4ee71SNavdeep Parhar 	KASSERT(nseg == 1,
1409*54e4ee71SNavdeep Parhar 	    ("%s meant for single segment mappings only.", __func__));
1410*54e4ee71SNavdeep Parhar 
1411*54e4ee71SNavdeep Parhar 	*ba = error ? 0 : segs->ds_addr;
1412*54e4ee71SNavdeep Parhar }
1413*54e4ee71SNavdeep Parhar 
1414*54e4ee71SNavdeep Parhar static inline bool
1415*54e4ee71SNavdeep Parhar is_new_response(const struct sge_iq *iq, struct rsp_ctrl **ctrl)
1416*54e4ee71SNavdeep Parhar {
1417*54e4ee71SNavdeep Parhar 	*ctrl = (void *)((uintptr_t)iq->cdesc +
1418*54e4ee71SNavdeep Parhar 	    (iq->esize - sizeof(struct rsp_ctrl)));
1419*54e4ee71SNavdeep Parhar 
1420*54e4ee71SNavdeep Parhar 	return (((*ctrl)->u.type_gen >> S_RSPD_GEN) == iq->gen);
1421*54e4ee71SNavdeep Parhar }
1422*54e4ee71SNavdeep Parhar 
1423*54e4ee71SNavdeep Parhar static inline void
1424*54e4ee71SNavdeep Parhar iq_next(struct sge_iq *iq)
1425*54e4ee71SNavdeep Parhar {
1426*54e4ee71SNavdeep Parhar 	iq->cdesc = (void *) ((uintptr_t)iq->cdesc + iq->esize);
1427*54e4ee71SNavdeep Parhar 	if (__predict_false(++iq->cidx == iq->qsize - 1)) {
1428*54e4ee71SNavdeep Parhar 		iq->cidx = 0;
1429*54e4ee71SNavdeep Parhar 		iq->gen ^= 1;
1430*54e4ee71SNavdeep Parhar 		iq->cdesc = iq->desc;
1431*54e4ee71SNavdeep Parhar 	}
1432*54e4ee71SNavdeep Parhar }
1433*54e4ee71SNavdeep Parhar 
1434*54e4ee71SNavdeep Parhar static inline void
1435*54e4ee71SNavdeep Parhar ring_fl_db(struct adapter *sc, struct sge_fl *fl)
1436*54e4ee71SNavdeep Parhar {
1437*54e4ee71SNavdeep Parhar 	int ndesc = fl->pending / 8;
1438*54e4ee71SNavdeep Parhar 
1439*54e4ee71SNavdeep Parhar 	/* Caller responsible for ensuring there's something useful to do */
1440*54e4ee71SNavdeep Parhar 	KASSERT(ndesc > 0, ("%s called with no useful work to do.", __func__));
1441*54e4ee71SNavdeep Parhar 
1442*54e4ee71SNavdeep Parhar 	wmb();
1443*54e4ee71SNavdeep Parhar 
1444*54e4ee71SNavdeep Parhar 	t4_write_reg(sc, MYPF_REG(A_SGE_PF_KDOORBELL), F_DBPRIO |
1445*54e4ee71SNavdeep Parhar 	    V_QID(fl->cntxt_id) | V_PIDX(ndesc));
1446*54e4ee71SNavdeep Parhar 
1447*54e4ee71SNavdeep Parhar 	fl->pending &= 7;
1448*54e4ee71SNavdeep Parhar }
1449*54e4ee71SNavdeep Parhar 
1450*54e4ee71SNavdeep Parhar static void
1451*54e4ee71SNavdeep Parhar refill_fl(struct sge_fl *fl, int nbufs)
1452*54e4ee71SNavdeep Parhar {
1453*54e4ee71SNavdeep Parhar 	__be64 *d = &fl->desc[fl->pidx];
1454*54e4ee71SNavdeep Parhar 	struct fl_sdesc *sd = &fl->sdesc[fl->pidx];
1455*54e4ee71SNavdeep Parhar 	bus_dma_tag_t tag;
1456*54e4ee71SNavdeep Parhar 	bus_addr_t pa;
1457*54e4ee71SNavdeep Parhar 	caddr_t cl;
1458*54e4ee71SNavdeep Parhar 	int rc;
1459*54e4ee71SNavdeep Parhar 
1460*54e4ee71SNavdeep Parhar 	FL_LOCK_ASSERT_OWNED(fl);
1461*54e4ee71SNavdeep Parhar 
1462*54e4ee71SNavdeep Parhar 	if (nbufs < 0 || nbufs > fl->needed)
1463*54e4ee71SNavdeep Parhar 		nbufs = fl->needed;
1464*54e4ee71SNavdeep Parhar 
1465*54e4ee71SNavdeep Parhar 	while (nbufs--) {
1466*54e4ee71SNavdeep Parhar 
1467*54e4ee71SNavdeep Parhar 		if (sd->cl != NULL) {
1468*54e4ee71SNavdeep Parhar 
1469*54e4ee71SNavdeep Parhar 			/*
1470*54e4ee71SNavdeep Parhar 			 * This happens when a frame small enough to fit
1471*54e4ee71SNavdeep Parhar 			 * entirely in an mbuf was received in cl last time.
1472*54e4ee71SNavdeep Parhar 			 * We'd held on to cl and can reuse it now.  Note that
1473*54e4ee71SNavdeep Parhar 			 * we reuse a cluster of the old size if fl->tag_idx is
1474*54e4ee71SNavdeep Parhar 			 * no longer the same as sd->tag_idx.
1475*54e4ee71SNavdeep Parhar 			 */
1476*54e4ee71SNavdeep Parhar 
1477*54e4ee71SNavdeep Parhar 			KASSERT(*d == sd->ba_tag,
1478*54e4ee71SNavdeep Parhar 			    ("%s: recyling problem at pidx %d",
1479*54e4ee71SNavdeep Parhar 			    __func__, fl->pidx));
1480*54e4ee71SNavdeep Parhar 
1481*54e4ee71SNavdeep Parhar 			d++;
1482*54e4ee71SNavdeep Parhar 			goto recycled;
1483*54e4ee71SNavdeep Parhar 		}
1484*54e4ee71SNavdeep Parhar 
1485*54e4ee71SNavdeep Parhar 
1486*54e4ee71SNavdeep Parhar 		if (fl->tag_idx != sd->tag_idx) {
1487*54e4ee71SNavdeep Parhar 			bus_dmamap_t map;
1488*54e4ee71SNavdeep Parhar 			bus_dma_tag_t newtag = fl->tag[fl->tag_idx];
1489*54e4ee71SNavdeep Parhar 			bus_dma_tag_t oldtag = fl->tag[sd->tag_idx];
1490*54e4ee71SNavdeep Parhar 
1491*54e4ee71SNavdeep Parhar 			/*
1492*54e4ee71SNavdeep Parhar 			 * An MTU change can get us here.  Discard the old map
1493*54e4ee71SNavdeep Parhar 			 * which was created with the old tag, but only if
1494*54e4ee71SNavdeep Parhar 			 * we're able to get a new one.
1495*54e4ee71SNavdeep Parhar 			 */
1496*54e4ee71SNavdeep Parhar 			rc = bus_dmamap_create(newtag, 0, &map);
1497*54e4ee71SNavdeep Parhar 			if (rc == 0) {
1498*54e4ee71SNavdeep Parhar 				bus_dmamap_destroy(oldtag, sd->map);
1499*54e4ee71SNavdeep Parhar 				sd->map = map;
1500*54e4ee71SNavdeep Parhar 				sd->tag_idx = fl->tag_idx;
1501*54e4ee71SNavdeep Parhar 			}
1502*54e4ee71SNavdeep Parhar 		}
1503*54e4ee71SNavdeep Parhar 
1504*54e4ee71SNavdeep Parhar 		tag = fl->tag[sd->tag_idx];
1505*54e4ee71SNavdeep Parhar 
1506*54e4ee71SNavdeep Parhar 		cl = m_cljget(NULL, M_NOWAIT, FL_BUF_SIZE(sd->tag_idx));
1507*54e4ee71SNavdeep Parhar 		if (cl == NULL)
1508*54e4ee71SNavdeep Parhar 			break;
1509*54e4ee71SNavdeep Parhar 
1510*54e4ee71SNavdeep Parhar 		rc = bus_dmamap_load(tag, sd->map, cl,
1511*54e4ee71SNavdeep Parhar 		    FL_BUF_SIZE(sd->tag_idx), oneseg_dma_callback,
1512*54e4ee71SNavdeep Parhar 		    &pa, 0);
1513*54e4ee71SNavdeep Parhar 		if (rc != 0 || pa == 0) {
1514*54e4ee71SNavdeep Parhar 			fl->dmamap_failed++;
1515*54e4ee71SNavdeep Parhar 			uma_zfree(FL_BUF_ZONE(sd->tag_idx), cl);
1516*54e4ee71SNavdeep Parhar 			break;
1517*54e4ee71SNavdeep Parhar 		}
1518*54e4ee71SNavdeep Parhar 
1519*54e4ee71SNavdeep Parhar 		sd->cl = cl;
1520*54e4ee71SNavdeep Parhar 		*d++ = htobe64(pa | sd->tag_idx);
1521*54e4ee71SNavdeep Parhar 
1522*54e4ee71SNavdeep Parhar #ifdef INVARIANTS
1523*54e4ee71SNavdeep Parhar 		sd->ba_tag = htobe64(pa | sd->tag_idx);
1524*54e4ee71SNavdeep Parhar #endif
1525*54e4ee71SNavdeep Parhar 
1526*54e4ee71SNavdeep Parhar recycled:	fl->pending++;
1527*54e4ee71SNavdeep Parhar 		fl->needed--;
1528*54e4ee71SNavdeep Parhar 		sd++;
1529*54e4ee71SNavdeep Parhar 		if (++fl->pidx == fl->cap) {
1530*54e4ee71SNavdeep Parhar 			fl->pidx = 0;
1531*54e4ee71SNavdeep Parhar 			sd = fl->sdesc;
1532*54e4ee71SNavdeep Parhar 			d = fl->desc;
1533*54e4ee71SNavdeep Parhar 		}
1534*54e4ee71SNavdeep Parhar 
1535*54e4ee71SNavdeep Parhar 		/* No harm if gethdr fails, we'll retry after rx */
1536*54e4ee71SNavdeep Parhar 		if (sd->m == NULL)
1537*54e4ee71SNavdeep Parhar 			sd->m = m_gethdr(M_NOWAIT, MT_NOINIT);
1538*54e4ee71SNavdeep Parhar 	}
1539*54e4ee71SNavdeep Parhar }
1540*54e4ee71SNavdeep Parhar 
1541*54e4ee71SNavdeep Parhar static int
1542*54e4ee71SNavdeep Parhar alloc_fl_sdesc(struct sge_fl *fl)
1543*54e4ee71SNavdeep Parhar {
1544*54e4ee71SNavdeep Parhar 	struct fl_sdesc *sd;
1545*54e4ee71SNavdeep Parhar 	bus_dma_tag_t tag;
1546*54e4ee71SNavdeep Parhar 	int i, rc;
1547*54e4ee71SNavdeep Parhar 
1548*54e4ee71SNavdeep Parhar 	FL_LOCK_ASSERT_OWNED(fl);
1549*54e4ee71SNavdeep Parhar 
1550*54e4ee71SNavdeep Parhar 	fl->sdesc = malloc(fl->cap * sizeof(struct fl_sdesc), M_CXGBE,
1551*54e4ee71SNavdeep Parhar 	    M_ZERO | M_WAITOK);
1552*54e4ee71SNavdeep Parhar 
1553*54e4ee71SNavdeep Parhar 	tag = fl->tag[fl->tag_idx];
1554*54e4ee71SNavdeep Parhar 	sd = fl->sdesc;
1555*54e4ee71SNavdeep Parhar 	for (i = 0; i < fl->cap; i++, sd++) {
1556*54e4ee71SNavdeep Parhar 
1557*54e4ee71SNavdeep Parhar 		sd->tag_idx = fl->tag_idx;
1558*54e4ee71SNavdeep Parhar 		rc = bus_dmamap_create(tag, 0, &sd->map);
1559*54e4ee71SNavdeep Parhar 		if (rc != 0)
1560*54e4ee71SNavdeep Parhar 			goto failed;
1561*54e4ee71SNavdeep Parhar 
1562*54e4ee71SNavdeep Parhar 		/* Doesn't matter if this succeeds or not */
1563*54e4ee71SNavdeep Parhar 		sd->m = m_gethdr(M_NOWAIT, MT_NOINIT);
1564*54e4ee71SNavdeep Parhar 	}
1565*54e4ee71SNavdeep Parhar 
1566*54e4ee71SNavdeep Parhar 	return (0);
1567*54e4ee71SNavdeep Parhar failed:
1568*54e4ee71SNavdeep Parhar 	while (--i >= 0) {
1569*54e4ee71SNavdeep Parhar 		sd--;
1570*54e4ee71SNavdeep Parhar 		bus_dmamap_destroy(tag, sd->map);
1571*54e4ee71SNavdeep Parhar 		if (sd->m) {
1572*54e4ee71SNavdeep Parhar 			m_init(sd->m, zone_mbuf, MLEN, M_NOWAIT, MT_DATA, 0);
1573*54e4ee71SNavdeep Parhar 			m_free(sd->m);
1574*54e4ee71SNavdeep Parhar 			sd->m = NULL;
1575*54e4ee71SNavdeep Parhar 		}
1576*54e4ee71SNavdeep Parhar 	}
1577*54e4ee71SNavdeep Parhar 	KASSERT(sd == fl->sdesc, ("%s: EDOOFUS", __func__));
1578*54e4ee71SNavdeep Parhar 
1579*54e4ee71SNavdeep Parhar 	free(fl->sdesc, M_CXGBE);
1580*54e4ee71SNavdeep Parhar 	fl->sdesc = NULL;
1581*54e4ee71SNavdeep Parhar 
1582*54e4ee71SNavdeep Parhar 	return (rc);
1583*54e4ee71SNavdeep Parhar }
1584*54e4ee71SNavdeep Parhar 
1585*54e4ee71SNavdeep Parhar static void
1586*54e4ee71SNavdeep Parhar free_fl_sdesc(struct sge_fl *fl)
1587*54e4ee71SNavdeep Parhar {
1588*54e4ee71SNavdeep Parhar 	struct fl_sdesc *sd;
1589*54e4ee71SNavdeep Parhar 	int i;
1590*54e4ee71SNavdeep Parhar 
1591*54e4ee71SNavdeep Parhar 	FL_LOCK_ASSERT_OWNED(fl);
1592*54e4ee71SNavdeep Parhar 
1593*54e4ee71SNavdeep Parhar 	sd = fl->sdesc;
1594*54e4ee71SNavdeep Parhar 	for (i = 0; i < fl->cap; i++, sd++) {
1595*54e4ee71SNavdeep Parhar 
1596*54e4ee71SNavdeep Parhar 		if (sd->m) {
1597*54e4ee71SNavdeep Parhar 			m_init(sd->m, zone_mbuf, MLEN, M_NOWAIT, MT_DATA, 0);
1598*54e4ee71SNavdeep Parhar 			m_free(sd->m);
1599*54e4ee71SNavdeep Parhar 			sd->m = NULL;
1600*54e4ee71SNavdeep Parhar 		}
1601*54e4ee71SNavdeep Parhar 
1602*54e4ee71SNavdeep Parhar 		if (sd->cl) {
1603*54e4ee71SNavdeep Parhar 			bus_dmamap_unload(fl->tag[sd->tag_idx], sd->map);
1604*54e4ee71SNavdeep Parhar 			uma_zfree(FL_BUF_ZONE(sd->tag_idx), sd->cl);
1605*54e4ee71SNavdeep Parhar 			sd->cl = NULL;
1606*54e4ee71SNavdeep Parhar 		}
1607*54e4ee71SNavdeep Parhar 
1608*54e4ee71SNavdeep Parhar 		bus_dmamap_destroy(fl->tag[sd->tag_idx], sd->map);
1609*54e4ee71SNavdeep Parhar 	}
1610*54e4ee71SNavdeep Parhar 
1611*54e4ee71SNavdeep Parhar 	free(fl->sdesc, M_CXGBE);
1612*54e4ee71SNavdeep Parhar 	fl->sdesc = NULL;
1613*54e4ee71SNavdeep Parhar }
1614*54e4ee71SNavdeep Parhar 
1615*54e4ee71SNavdeep Parhar static int
1616*54e4ee71SNavdeep Parhar alloc_eq_maps(struct sge_eq *eq)
1617*54e4ee71SNavdeep Parhar {
1618*54e4ee71SNavdeep Parhar 	struct tx_map *txm;
1619*54e4ee71SNavdeep Parhar 	int i, rc, count;
1620*54e4ee71SNavdeep Parhar 
1621*54e4ee71SNavdeep Parhar 	/*
1622*54e4ee71SNavdeep Parhar 	 * We can stuff ~10 frames in an 8-descriptor txpkts WR (8 is the SGE
1623*54e4ee71SNavdeep Parhar 	 * limit for any WR).  txq->no_dmamap events shouldn't occur if maps is
1624*54e4ee71SNavdeep Parhar 	 * sized for the worst case.
1625*54e4ee71SNavdeep Parhar 	 */
1626*54e4ee71SNavdeep Parhar 	count = eq->qsize * 10 / 8;
1627*54e4ee71SNavdeep Parhar 	eq->map_total = eq->map_avail = count;
1628*54e4ee71SNavdeep Parhar 	eq->map_cidx = eq->map_pidx = 0;
1629*54e4ee71SNavdeep Parhar 
1630*54e4ee71SNavdeep Parhar 	eq->maps = malloc(count * sizeof(struct tx_map), M_CXGBE,
1631*54e4ee71SNavdeep Parhar 	    M_ZERO | M_WAITOK);
1632*54e4ee71SNavdeep Parhar 
1633*54e4ee71SNavdeep Parhar 	txm = eq->maps;
1634*54e4ee71SNavdeep Parhar 	for (i = 0; i < count; i++, txm++) {
1635*54e4ee71SNavdeep Parhar 		rc = bus_dmamap_create(eq->tx_tag, 0, &txm->map);
1636*54e4ee71SNavdeep Parhar 		if (rc != 0)
1637*54e4ee71SNavdeep Parhar 			goto failed;
1638*54e4ee71SNavdeep Parhar 	}
1639*54e4ee71SNavdeep Parhar 
1640*54e4ee71SNavdeep Parhar 	return (0);
1641*54e4ee71SNavdeep Parhar failed:
1642*54e4ee71SNavdeep Parhar 	while (--i >= 0) {
1643*54e4ee71SNavdeep Parhar 		txm--;
1644*54e4ee71SNavdeep Parhar 		bus_dmamap_destroy(eq->tx_tag, txm->map);
1645*54e4ee71SNavdeep Parhar 	}
1646*54e4ee71SNavdeep Parhar 	KASSERT(txm == eq->maps, ("%s: EDOOFUS", __func__));
1647*54e4ee71SNavdeep Parhar 
1648*54e4ee71SNavdeep Parhar 	free(eq->maps, M_CXGBE);
1649*54e4ee71SNavdeep Parhar 	eq->maps = NULL;
1650*54e4ee71SNavdeep Parhar 
1651*54e4ee71SNavdeep Parhar 	return (rc);
1652*54e4ee71SNavdeep Parhar }
1653*54e4ee71SNavdeep Parhar 
1654*54e4ee71SNavdeep Parhar static void
1655*54e4ee71SNavdeep Parhar free_eq_maps(struct sge_eq *eq)
1656*54e4ee71SNavdeep Parhar {
1657*54e4ee71SNavdeep Parhar 	struct tx_map *txm;
1658*54e4ee71SNavdeep Parhar 	int i;
1659*54e4ee71SNavdeep Parhar 
1660*54e4ee71SNavdeep Parhar 	txm = eq->maps;
1661*54e4ee71SNavdeep Parhar 	for (i = 0; i < eq->map_total; i++, txm++) {
1662*54e4ee71SNavdeep Parhar 
1663*54e4ee71SNavdeep Parhar 		if (txm->m) {
1664*54e4ee71SNavdeep Parhar 			bus_dmamap_unload(eq->tx_tag, txm->map);
1665*54e4ee71SNavdeep Parhar 			m_freem(txm->m);
1666*54e4ee71SNavdeep Parhar 			txm->m = NULL;
1667*54e4ee71SNavdeep Parhar 		}
1668*54e4ee71SNavdeep Parhar 
1669*54e4ee71SNavdeep Parhar 		bus_dmamap_destroy(eq->tx_tag, txm->map);
1670*54e4ee71SNavdeep Parhar 	}
1671*54e4ee71SNavdeep Parhar 
1672*54e4ee71SNavdeep Parhar 	free(eq->maps, M_CXGBE);
1673*54e4ee71SNavdeep Parhar 	eq->maps = NULL;
1674*54e4ee71SNavdeep Parhar }
1675*54e4ee71SNavdeep Parhar 
1676*54e4ee71SNavdeep Parhar /*
1677*54e4ee71SNavdeep Parhar  * We'll do immediate data tx for non-TSO, but only when not coalescing.  We're
1678*54e4ee71SNavdeep Parhar  * willing to use upto 2 hardware descriptors which means a maximum of 96 bytes
1679*54e4ee71SNavdeep Parhar  * of immediate data.
1680*54e4ee71SNavdeep Parhar  */
1681*54e4ee71SNavdeep Parhar #define IMM_LEN ( \
1682*54e4ee71SNavdeep Parhar       2 * TX_EQ_ESIZE \
1683*54e4ee71SNavdeep Parhar     - sizeof(struct fw_eth_tx_pkt_wr) \
1684*54e4ee71SNavdeep Parhar     - sizeof(struct cpl_tx_pkt_core))
1685*54e4ee71SNavdeep Parhar 
1686*54e4ee71SNavdeep Parhar /*
1687*54e4ee71SNavdeep Parhar  * Returns non-zero on failure, no need to cleanup anything in that case.
1688*54e4ee71SNavdeep Parhar  *
1689*54e4ee71SNavdeep Parhar  * Note 1: We always try to defrag the mbuf if required and return EFBIG only
1690*54e4ee71SNavdeep Parhar  * if the resulting chain still won't fit in a tx descriptor.
1691*54e4ee71SNavdeep Parhar  *
1692*54e4ee71SNavdeep Parhar  * Note 2: We'll pullup the mbuf chain if TSO is requested and the first mbuf
1693*54e4ee71SNavdeep Parhar  * does not have the TCP header in it.
1694*54e4ee71SNavdeep Parhar  */
1695*54e4ee71SNavdeep Parhar static int
1696*54e4ee71SNavdeep Parhar get_pkt_sgl(struct sge_txq *txq, struct mbuf **fp, struct sgl *sgl,
1697*54e4ee71SNavdeep Parhar     int sgl_only)
1698*54e4ee71SNavdeep Parhar {
1699*54e4ee71SNavdeep Parhar 	struct mbuf *m = *fp;
1700*54e4ee71SNavdeep Parhar 	struct sge_eq *eq = &txq->eq;
1701*54e4ee71SNavdeep Parhar 	struct tx_map *txm;
1702*54e4ee71SNavdeep Parhar 	int rc, defragged = 0, n;
1703*54e4ee71SNavdeep Parhar 
1704*54e4ee71SNavdeep Parhar 	TXQ_LOCK_ASSERT_OWNED(txq);
1705*54e4ee71SNavdeep Parhar 
1706*54e4ee71SNavdeep Parhar 	if (m->m_pkthdr.tso_segsz)
1707*54e4ee71SNavdeep Parhar 		sgl_only = 1;	/* Do not allow immediate data with LSO */
1708*54e4ee71SNavdeep Parhar 
1709*54e4ee71SNavdeep Parhar start:	sgl->nsegs = 0;
1710*54e4ee71SNavdeep Parhar 
1711*54e4ee71SNavdeep Parhar 	if (m->m_pkthdr.len <= IMM_LEN && !sgl_only)
1712*54e4ee71SNavdeep Parhar 		return (0);	/* nsegs = 0 tells caller to use imm. tx */
1713*54e4ee71SNavdeep Parhar 
1714*54e4ee71SNavdeep Parhar 	if (eq->map_avail == 0) {
1715*54e4ee71SNavdeep Parhar 		txq->no_dmamap++;
1716*54e4ee71SNavdeep Parhar 		return (ENOMEM);
1717*54e4ee71SNavdeep Parhar 	}
1718*54e4ee71SNavdeep Parhar 	txm = &eq->maps[eq->map_pidx];
1719*54e4ee71SNavdeep Parhar 
1720*54e4ee71SNavdeep Parhar 	if (m->m_pkthdr.tso_segsz && m->m_len < 50) {
1721*54e4ee71SNavdeep Parhar 		*fp = m_pullup(m, 50);
1722*54e4ee71SNavdeep Parhar 		m = *fp;
1723*54e4ee71SNavdeep Parhar 		if (m == NULL)
1724*54e4ee71SNavdeep Parhar 			return (ENOBUFS);
1725*54e4ee71SNavdeep Parhar 	}
1726*54e4ee71SNavdeep Parhar 
1727*54e4ee71SNavdeep Parhar 	rc = bus_dmamap_load_mbuf_sg(eq->tx_tag, txm->map, m, sgl->seg,
1728*54e4ee71SNavdeep Parhar 	    &sgl->nsegs, BUS_DMA_NOWAIT);
1729*54e4ee71SNavdeep Parhar 	if (rc == EFBIG && defragged == 0) {
1730*54e4ee71SNavdeep Parhar 		m = m_defrag(m, M_DONTWAIT);
1731*54e4ee71SNavdeep Parhar 		if (m == NULL)
1732*54e4ee71SNavdeep Parhar 			return (EFBIG);
1733*54e4ee71SNavdeep Parhar 
1734*54e4ee71SNavdeep Parhar 		defragged = 1;
1735*54e4ee71SNavdeep Parhar 		*fp = m;
1736*54e4ee71SNavdeep Parhar 		goto start;
1737*54e4ee71SNavdeep Parhar 	}
1738*54e4ee71SNavdeep Parhar 	if (rc != 0)
1739*54e4ee71SNavdeep Parhar 		return (rc);
1740*54e4ee71SNavdeep Parhar 
1741*54e4ee71SNavdeep Parhar 	txm->m = m;
1742*54e4ee71SNavdeep Parhar 	eq->map_avail--;
1743*54e4ee71SNavdeep Parhar 	if (++eq->map_pidx == eq->map_total)
1744*54e4ee71SNavdeep Parhar 		eq->map_pidx = 0;
1745*54e4ee71SNavdeep Parhar 
1746*54e4ee71SNavdeep Parhar 	KASSERT(sgl->nsegs > 0 && sgl->nsegs <= TX_SGL_SEGS,
1747*54e4ee71SNavdeep Parhar 	    ("%s: bad DMA mapping (%d segments)", __func__, sgl->nsegs));
1748*54e4ee71SNavdeep Parhar 
1749*54e4ee71SNavdeep Parhar 	/*
1750*54e4ee71SNavdeep Parhar 	 * Store the # of flits required to hold this frame's SGL in nflits.  An
1751*54e4ee71SNavdeep Parhar 	 * SGL has a (ULPTX header + len0, addr0) tuple optionally followed by
1752*54e4ee71SNavdeep Parhar 	 * multiple (len0 + len1, addr0, addr1) tuples.  If addr1 is not used
1753*54e4ee71SNavdeep Parhar 	 * then len1 must be set to 0.
1754*54e4ee71SNavdeep Parhar 	 */
1755*54e4ee71SNavdeep Parhar 	n = sgl->nsegs - 1;
1756*54e4ee71SNavdeep Parhar 	sgl->nflits = (3 * n) / 2 + (n & 1) + 2;
1757*54e4ee71SNavdeep Parhar 
1758*54e4ee71SNavdeep Parhar 	return (0);
1759*54e4ee71SNavdeep Parhar }
1760*54e4ee71SNavdeep Parhar 
1761*54e4ee71SNavdeep Parhar 
1762*54e4ee71SNavdeep Parhar /*
1763*54e4ee71SNavdeep Parhar  * Releases all the txq resources used up in the specified sgl.
1764*54e4ee71SNavdeep Parhar  */
1765*54e4ee71SNavdeep Parhar static int
1766*54e4ee71SNavdeep Parhar free_pkt_sgl(struct sge_txq *txq, struct sgl *sgl)
1767*54e4ee71SNavdeep Parhar {
1768*54e4ee71SNavdeep Parhar 	struct sge_eq *eq = &txq->eq;
1769*54e4ee71SNavdeep Parhar 	struct tx_map *txm;
1770*54e4ee71SNavdeep Parhar 
1771*54e4ee71SNavdeep Parhar 	TXQ_LOCK_ASSERT_OWNED(txq);
1772*54e4ee71SNavdeep Parhar 
1773*54e4ee71SNavdeep Parhar 	if (sgl->nsegs == 0)
1774*54e4ee71SNavdeep Parhar 		return (0);	/* didn't use any map */
1775*54e4ee71SNavdeep Parhar 
1776*54e4ee71SNavdeep Parhar 	/* 1 pkt uses exactly 1 map, back it out */
1777*54e4ee71SNavdeep Parhar 
1778*54e4ee71SNavdeep Parhar 	eq->map_avail++;
1779*54e4ee71SNavdeep Parhar 	if (eq->map_pidx > 0)
1780*54e4ee71SNavdeep Parhar 		eq->map_pidx--;
1781*54e4ee71SNavdeep Parhar 	else
1782*54e4ee71SNavdeep Parhar 		eq->map_pidx = eq->map_total - 1;
1783*54e4ee71SNavdeep Parhar 
1784*54e4ee71SNavdeep Parhar 	txm = &eq->maps[eq->map_pidx];
1785*54e4ee71SNavdeep Parhar 	bus_dmamap_unload(eq->tx_tag, txm->map);
1786*54e4ee71SNavdeep Parhar 	txm->m = NULL;
1787*54e4ee71SNavdeep Parhar 
1788*54e4ee71SNavdeep Parhar 	return (0);
1789*54e4ee71SNavdeep Parhar }
1790*54e4ee71SNavdeep Parhar 
1791*54e4ee71SNavdeep Parhar static int
1792*54e4ee71SNavdeep Parhar write_txpkt_wr(struct port_info *pi, struct sge_txq *txq, struct mbuf *m,
1793*54e4ee71SNavdeep Parhar     struct sgl *sgl)
1794*54e4ee71SNavdeep Parhar {
1795*54e4ee71SNavdeep Parhar 	struct sge_eq *eq = &txq->eq;
1796*54e4ee71SNavdeep Parhar 	struct fw_eth_tx_pkt_wr *wr;
1797*54e4ee71SNavdeep Parhar 	struct cpl_tx_pkt_core *cpl;
1798*54e4ee71SNavdeep Parhar 	uint32_t ctrl;	/* used in many unrelated places */
1799*54e4ee71SNavdeep Parhar 	uint64_t ctrl1;
1800*54e4ee71SNavdeep Parhar 	int nflits, ndesc;
1801*54e4ee71SNavdeep Parhar 	struct tx_sdesc *txsd;
1802*54e4ee71SNavdeep Parhar 	caddr_t dst;
1803*54e4ee71SNavdeep Parhar 
1804*54e4ee71SNavdeep Parhar 	TXQ_LOCK_ASSERT_OWNED(txq);
1805*54e4ee71SNavdeep Parhar 
1806*54e4ee71SNavdeep Parhar 	/*
1807*54e4ee71SNavdeep Parhar 	 * Do we have enough flits to send this frame out?
1808*54e4ee71SNavdeep Parhar 	 */
1809*54e4ee71SNavdeep Parhar 	ctrl = sizeof(struct cpl_tx_pkt_core);
1810*54e4ee71SNavdeep Parhar 	if (m->m_pkthdr.tso_segsz) {
1811*54e4ee71SNavdeep Parhar 		nflits = TXPKT_LSO_WR_HDR;
1812*54e4ee71SNavdeep Parhar 		ctrl += sizeof(struct cpl_tx_pkt_lso);
1813*54e4ee71SNavdeep Parhar 	} else
1814*54e4ee71SNavdeep Parhar 		nflits = TXPKT_WR_HDR;
1815*54e4ee71SNavdeep Parhar 	if (sgl->nsegs > 0)
1816*54e4ee71SNavdeep Parhar 		nflits += sgl->nflits;
1817*54e4ee71SNavdeep Parhar 	else {
1818*54e4ee71SNavdeep Parhar 		nflits += howmany(m->m_pkthdr.len, 8);
1819*54e4ee71SNavdeep Parhar 		ctrl += m->m_pkthdr.len;
1820*54e4ee71SNavdeep Parhar 	}
1821*54e4ee71SNavdeep Parhar 	ndesc = howmany(nflits, 8);
1822*54e4ee71SNavdeep Parhar 	if (ndesc > eq->avail)
1823*54e4ee71SNavdeep Parhar 		return (ENOMEM);
1824*54e4ee71SNavdeep Parhar 
1825*54e4ee71SNavdeep Parhar 	/* Firmware work request header */
1826*54e4ee71SNavdeep Parhar 	wr = (void *)&eq->desc[eq->pidx];
1827*54e4ee71SNavdeep Parhar 	wr->op_immdlen = htobe32(V_FW_WR_OP(FW_ETH_TX_PKT_WR) |
1828*54e4ee71SNavdeep Parhar 	    V_FW_WR_IMMDLEN(ctrl));
1829*54e4ee71SNavdeep Parhar 	ctrl = V_FW_WR_LEN16(howmany(nflits, 2));
1830*54e4ee71SNavdeep Parhar 	if (eq->avail == ndesc)
1831*54e4ee71SNavdeep Parhar 		ctrl |= F_FW_WR_EQUEQ | F_FW_WR_EQUIQ;
1832*54e4ee71SNavdeep Parhar 	wr->equiq_to_len16 = htobe32(ctrl);
1833*54e4ee71SNavdeep Parhar 	wr->r3 = 0;
1834*54e4ee71SNavdeep Parhar 
1835*54e4ee71SNavdeep Parhar 	if (m->m_pkthdr.tso_segsz) {
1836*54e4ee71SNavdeep Parhar 		struct cpl_tx_pkt_lso *lso = (void *)(wr + 1);
1837*54e4ee71SNavdeep Parhar 		struct ether_header *eh;
1838*54e4ee71SNavdeep Parhar 		struct ip *ip;
1839*54e4ee71SNavdeep Parhar 		struct tcphdr *tcp;
1840*54e4ee71SNavdeep Parhar 
1841*54e4ee71SNavdeep Parhar 		ctrl = V_LSO_OPCODE(CPL_TX_PKT_LSO) | F_LSO_FIRST_SLICE |
1842*54e4ee71SNavdeep Parhar 		    F_LSO_LAST_SLICE;
1843*54e4ee71SNavdeep Parhar 
1844*54e4ee71SNavdeep Parhar 		eh = mtod(m, struct ether_header *);
1845*54e4ee71SNavdeep Parhar 		if (eh->ether_type == htons(ETHERTYPE_VLAN)) {
1846*54e4ee71SNavdeep Parhar 			ctrl |= V_LSO_ETHHDR_LEN(1);
1847*54e4ee71SNavdeep Parhar 			ip = (void *)((struct ether_vlan_header *)eh + 1);
1848*54e4ee71SNavdeep Parhar 		} else
1849*54e4ee71SNavdeep Parhar 			ip = (void *)(eh + 1);
1850*54e4ee71SNavdeep Parhar 
1851*54e4ee71SNavdeep Parhar 		tcp = (void *)((uintptr_t)ip + ip->ip_hl * 4);
1852*54e4ee71SNavdeep Parhar 		ctrl |= V_LSO_IPHDR_LEN(ip->ip_hl) |
1853*54e4ee71SNavdeep Parhar 		    V_LSO_TCPHDR_LEN(tcp->th_off);
1854*54e4ee71SNavdeep Parhar 
1855*54e4ee71SNavdeep Parhar 		lso->lso_ctrl = htobe32(ctrl);
1856*54e4ee71SNavdeep Parhar 		lso->ipid_ofst = htobe16(0);
1857*54e4ee71SNavdeep Parhar 		lso->mss = htobe16(m->m_pkthdr.tso_segsz);
1858*54e4ee71SNavdeep Parhar 		lso->seqno_offset = htobe32(0);
1859*54e4ee71SNavdeep Parhar 		lso->len = htobe32(m->m_pkthdr.len);
1860*54e4ee71SNavdeep Parhar 
1861*54e4ee71SNavdeep Parhar 		cpl = (void *)(lso + 1);
1862*54e4ee71SNavdeep Parhar 
1863*54e4ee71SNavdeep Parhar 		txq->tso_wrs++;
1864*54e4ee71SNavdeep Parhar 	} else
1865*54e4ee71SNavdeep Parhar 		cpl = (void *)(wr + 1);
1866*54e4ee71SNavdeep Parhar 
1867*54e4ee71SNavdeep Parhar 	/* Checksum offload */
1868*54e4ee71SNavdeep Parhar 	ctrl1 = 0;
1869*54e4ee71SNavdeep Parhar 	if (!(m->m_pkthdr.csum_flags & CSUM_IP))
1870*54e4ee71SNavdeep Parhar 		ctrl1 |= F_TXPKT_IPCSUM_DIS;
1871*54e4ee71SNavdeep Parhar 	if (!(m->m_pkthdr.csum_flags & (CSUM_TCP | CSUM_UDP)))
1872*54e4ee71SNavdeep Parhar 		ctrl1 |= F_TXPKT_L4CSUM_DIS;
1873*54e4ee71SNavdeep Parhar 	if (m->m_pkthdr.csum_flags & (CSUM_IP | CSUM_TCP | CSUM_UDP))
1874*54e4ee71SNavdeep Parhar 		txq->txcsum++;	/* some hardware assistance provided */
1875*54e4ee71SNavdeep Parhar 
1876*54e4ee71SNavdeep Parhar 	/* VLAN tag insertion */
1877*54e4ee71SNavdeep Parhar 	if (m->m_flags & M_VLANTAG) {
1878*54e4ee71SNavdeep Parhar 		ctrl1 |= F_TXPKT_VLAN_VLD | V_TXPKT_VLAN(m->m_pkthdr.ether_vtag);
1879*54e4ee71SNavdeep Parhar 		txq->vlan_insertion++;
1880*54e4ee71SNavdeep Parhar 	}
1881*54e4ee71SNavdeep Parhar 
1882*54e4ee71SNavdeep Parhar 	/* CPL header */
1883*54e4ee71SNavdeep Parhar 	cpl->ctrl0 = htobe32(V_TXPKT_OPCODE(CPL_TX_PKT) |
1884*54e4ee71SNavdeep Parhar 	    V_TXPKT_INTF(pi->tx_chan) | V_TXPKT_PF(pi->adapter->pf));
1885*54e4ee71SNavdeep Parhar 	cpl->pack = 0;
1886*54e4ee71SNavdeep Parhar 	cpl->len = htobe16(m->m_pkthdr.len);
1887*54e4ee71SNavdeep Parhar 	cpl->ctrl1 = htobe64(ctrl1);
1888*54e4ee71SNavdeep Parhar 
1889*54e4ee71SNavdeep Parhar 	/* Software descriptor */
1890*54e4ee71SNavdeep Parhar 	txsd = &eq->sdesc[eq->pidx];
1891*54e4ee71SNavdeep Parhar 	txsd->desc_used = ndesc;
1892*54e4ee71SNavdeep Parhar 
1893*54e4ee71SNavdeep Parhar 	eq->pending += ndesc;
1894*54e4ee71SNavdeep Parhar 	eq->avail -= ndesc;
1895*54e4ee71SNavdeep Parhar 	eq->pidx += ndesc;
1896*54e4ee71SNavdeep Parhar 	if (eq->pidx >= eq->cap)
1897*54e4ee71SNavdeep Parhar 		eq->pidx -= eq->cap;
1898*54e4ee71SNavdeep Parhar 
1899*54e4ee71SNavdeep Parhar 	/* SGL */
1900*54e4ee71SNavdeep Parhar 	dst = (void *)(cpl + 1);
1901*54e4ee71SNavdeep Parhar 	if (sgl->nsegs > 0) {
1902*54e4ee71SNavdeep Parhar 		txsd->map_used = 1;
1903*54e4ee71SNavdeep Parhar 		txq->sgl_wrs++;
1904*54e4ee71SNavdeep Parhar 		write_sgl_to_txd(eq, sgl, &dst);
1905*54e4ee71SNavdeep Parhar 	} else {
1906*54e4ee71SNavdeep Parhar 		txsd->map_used = 0;
1907*54e4ee71SNavdeep Parhar 		txq->imm_wrs++;
1908*54e4ee71SNavdeep Parhar 		for (; m; m = m->m_next) {
1909*54e4ee71SNavdeep Parhar 			copy_to_txd(eq, mtod(m, caddr_t), &dst, m->m_len);
1910*54e4ee71SNavdeep Parhar 		}
1911*54e4ee71SNavdeep Parhar 	}
1912*54e4ee71SNavdeep Parhar 
1913*54e4ee71SNavdeep Parhar 	txq->txpkt_wrs++;
1914*54e4ee71SNavdeep Parhar 	return (0);
1915*54e4ee71SNavdeep Parhar }
1916*54e4ee71SNavdeep Parhar 
1917*54e4ee71SNavdeep Parhar /*
1918*54e4ee71SNavdeep Parhar  * Returns 0 to indicate that m has been accepted into a coalesced tx work
1919*54e4ee71SNavdeep Parhar  * request.  It has either been folded into txpkts or txpkts was flushed and m
1920*54e4ee71SNavdeep Parhar  * has started a new coalesced work request (as the first frame in a fresh
1921*54e4ee71SNavdeep Parhar  * txpkts).
1922*54e4ee71SNavdeep Parhar  *
1923*54e4ee71SNavdeep Parhar  * Returns non-zero to indicate a failure - caller is responsible for
1924*54e4ee71SNavdeep Parhar  * transmitting m, if there was anything in txpkts it has been flushed.
1925*54e4ee71SNavdeep Parhar  */
1926*54e4ee71SNavdeep Parhar static int
1927*54e4ee71SNavdeep Parhar add_to_txpkts(struct port_info *pi, struct sge_txq *txq, struct txpkts *txpkts,
1928*54e4ee71SNavdeep Parhar     struct mbuf *m, struct sgl *sgl)
1929*54e4ee71SNavdeep Parhar {
1930*54e4ee71SNavdeep Parhar 	struct sge_eq *eq = &txq->eq;
1931*54e4ee71SNavdeep Parhar 	int can_coalesce;
1932*54e4ee71SNavdeep Parhar 	struct tx_sdesc *txsd;
1933*54e4ee71SNavdeep Parhar 	int flits;
1934*54e4ee71SNavdeep Parhar 
1935*54e4ee71SNavdeep Parhar 	TXQ_LOCK_ASSERT_OWNED(txq);
1936*54e4ee71SNavdeep Parhar 
1937*54e4ee71SNavdeep Parhar 	if (txpkts->npkt > 0) {
1938*54e4ee71SNavdeep Parhar 		flits = TXPKTS_PKT_HDR + sgl->nflits;
1939*54e4ee71SNavdeep Parhar 		can_coalesce = m->m_pkthdr.tso_segsz == 0 &&
1940*54e4ee71SNavdeep Parhar 		    txpkts->nflits + flits <= TX_WR_FLITS &&
1941*54e4ee71SNavdeep Parhar 		    txpkts->nflits + flits <= eq->avail * 8 &&
1942*54e4ee71SNavdeep Parhar 		    txpkts->plen + m->m_pkthdr.len < 65536;
1943*54e4ee71SNavdeep Parhar 
1944*54e4ee71SNavdeep Parhar 		if (can_coalesce) {
1945*54e4ee71SNavdeep Parhar 			txpkts->npkt++;
1946*54e4ee71SNavdeep Parhar 			txpkts->nflits += flits;
1947*54e4ee71SNavdeep Parhar 			txpkts->plen += m->m_pkthdr.len;
1948*54e4ee71SNavdeep Parhar 
1949*54e4ee71SNavdeep Parhar 			txsd = &eq->sdesc[eq->pidx];
1950*54e4ee71SNavdeep Parhar 			txsd->map_used++;
1951*54e4ee71SNavdeep Parhar 
1952*54e4ee71SNavdeep Parhar 			return (0);
1953*54e4ee71SNavdeep Parhar 		}
1954*54e4ee71SNavdeep Parhar 
1955*54e4ee71SNavdeep Parhar 		/*
1956*54e4ee71SNavdeep Parhar 		 * Couldn't coalesce m into txpkts.  The first order of business
1957*54e4ee71SNavdeep Parhar 		 * is to send txpkts on its way.  Then we'll revisit m.
1958*54e4ee71SNavdeep Parhar 		 */
1959*54e4ee71SNavdeep Parhar 		write_txpkts_wr(txq, txpkts);
1960*54e4ee71SNavdeep Parhar 	}
1961*54e4ee71SNavdeep Parhar 
1962*54e4ee71SNavdeep Parhar 	/*
1963*54e4ee71SNavdeep Parhar 	 * Check if we can start a new coalesced tx work request with m as
1964*54e4ee71SNavdeep Parhar 	 * the first packet in it.
1965*54e4ee71SNavdeep Parhar 	 */
1966*54e4ee71SNavdeep Parhar 
1967*54e4ee71SNavdeep Parhar 	KASSERT(txpkts->npkt == 0, ("%s: txpkts not empty", __func__));
1968*54e4ee71SNavdeep Parhar 
1969*54e4ee71SNavdeep Parhar 	flits = TXPKTS_WR_HDR + sgl->nflits;
1970*54e4ee71SNavdeep Parhar 	can_coalesce = m->m_pkthdr.tso_segsz == 0 &&
1971*54e4ee71SNavdeep Parhar 	    flits <= eq->avail * 8 && flits <= TX_WR_FLITS;
1972*54e4ee71SNavdeep Parhar 
1973*54e4ee71SNavdeep Parhar 	if (can_coalesce == 0)
1974*54e4ee71SNavdeep Parhar 		return (EINVAL);
1975*54e4ee71SNavdeep Parhar 
1976*54e4ee71SNavdeep Parhar 	/*
1977*54e4ee71SNavdeep Parhar 	 * Start a fresh coalesced tx WR with m as the first frame in it.
1978*54e4ee71SNavdeep Parhar 	 */
1979*54e4ee71SNavdeep Parhar 	txpkts->npkt = 1;
1980*54e4ee71SNavdeep Parhar 	txpkts->nflits = flits;
1981*54e4ee71SNavdeep Parhar 	txpkts->flitp = &eq->desc[eq->pidx].flit[2];
1982*54e4ee71SNavdeep Parhar 	txpkts->plen = m->m_pkthdr.len;
1983*54e4ee71SNavdeep Parhar 
1984*54e4ee71SNavdeep Parhar 	txsd = &eq->sdesc[eq->pidx];
1985*54e4ee71SNavdeep Parhar 	txsd->map_used = 1;
1986*54e4ee71SNavdeep Parhar 
1987*54e4ee71SNavdeep Parhar 	return (0);
1988*54e4ee71SNavdeep Parhar }
1989*54e4ee71SNavdeep Parhar 
1990*54e4ee71SNavdeep Parhar /*
1991*54e4ee71SNavdeep Parhar  * Note that write_txpkts_wr can never run out of hardware descriptors (but
1992*54e4ee71SNavdeep Parhar  * write_txpkt_wr can).  add_to_txpkts ensures that a frame is accepted for
1993*54e4ee71SNavdeep Parhar  * coalescing only if sufficient hardware descriptors are available.
1994*54e4ee71SNavdeep Parhar  */
1995*54e4ee71SNavdeep Parhar static void
1996*54e4ee71SNavdeep Parhar write_txpkts_wr(struct sge_txq *txq, struct txpkts *txpkts)
1997*54e4ee71SNavdeep Parhar {
1998*54e4ee71SNavdeep Parhar 	struct sge_eq *eq = &txq->eq;
1999*54e4ee71SNavdeep Parhar 	struct fw_eth_tx_pkts_wr *wr;
2000*54e4ee71SNavdeep Parhar 	struct tx_sdesc *txsd;
2001*54e4ee71SNavdeep Parhar 	uint32_t ctrl;
2002*54e4ee71SNavdeep Parhar 	int ndesc;
2003*54e4ee71SNavdeep Parhar 
2004*54e4ee71SNavdeep Parhar 	TXQ_LOCK_ASSERT_OWNED(txq);
2005*54e4ee71SNavdeep Parhar 
2006*54e4ee71SNavdeep Parhar 	ndesc = howmany(txpkts->nflits, 8);
2007*54e4ee71SNavdeep Parhar 
2008*54e4ee71SNavdeep Parhar 	wr = (void *)&eq->desc[eq->pidx];
2009*54e4ee71SNavdeep Parhar 	wr->op_immdlen = htobe32(V_FW_WR_OP(FW_ETH_TX_PKTS_WR) |
2010*54e4ee71SNavdeep Parhar 	    V_FW_WR_IMMDLEN(0)); /* immdlen does not matter in this WR */
2011*54e4ee71SNavdeep Parhar 	ctrl = V_FW_WR_LEN16(howmany(txpkts->nflits, 2));
2012*54e4ee71SNavdeep Parhar 	if (eq->avail == ndesc)
2013*54e4ee71SNavdeep Parhar 		ctrl |= F_FW_WR_EQUEQ | F_FW_WR_EQUIQ;
2014*54e4ee71SNavdeep Parhar 	wr->equiq_to_len16 = htobe32(ctrl);
2015*54e4ee71SNavdeep Parhar 	wr->plen = htobe16(txpkts->plen);
2016*54e4ee71SNavdeep Parhar 	wr->npkt = txpkts->npkt;
2017*54e4ee71SNavdeep Parhar 	wr->r3 = wr->r4 = 0;
2018*54e4ee71SNavdeep Parhar 
2019*54e4ee71SNavdeep Parhar 	/* Everything else already written */
2020*54e4ee71SNavdeep Parhar 
2021*54e4ee71SNavdeep Parhar 	txsd = &eq->sdesc[eq->pidx];
2022*54e4ee71SNavdeep Parhar 	txsd->desc_used = ndesc;
2023*54e4ee71SNavdeep Parhar 
2024*54e4ee71SNavdeep Parhar 	KASSERT(eq->avail >= ndesc, ("%s: out ouf descriptors", __func__));
2025*54e4ee71SNavdeep Parhar 
2026*54e4ee71SNavdeep Parhar 	eq->pending += ndesc;
2027*54e4ee71SNavdeep Parhar 	eq->avail -= ndesc;
2028*54e4ee71SNavdeep Parhar 	eq->pidx += ndesc;
2029*54e4ee71SNavdeep Parhar 	if (eq->pidx >= eq->cap)
2030*54e4ee71SNavdeep Parhar 		eq->pidx -= eq->cap;
2031*54e4ee71SNavdeep Parhar 
2032*54e4ee71SNavdeep Parhar 	txq->txpkts_pkts += txpkts->npkt;
2033*54e4ee71SNavdeep Parhar 	txq->txpkts_wrs++;
2034*54e4ee71SNavdeep Parhar 	txpkts->npkt = 0;	/* emptied */
2035*54e4ee71SNavdeep Parhar }
2036*54e4ee71SNavdeep Parhar 
2037*54e4ee71SNavdeep Parhar static inline void
2038*54e4ee71SNavdeep Parhar write_ulp_cpl_sgl(struct port_info *pi, struct sge_txq *txq,
2039*54e4ee71SNavdeep Parhar     struct txpkts *txpkts, struct mbuf *m, struct sgl *sgl)
2040*54e4ee71SNavdeep Parhar {
2041*54e4ee71SNavdeep Parhar 	struct ulp_txpkt *ulpmc;
2042*54e4ee71SNavdeep Parhar 	struct ulptx_idata *ulpsc;
2043*54e4ee71SNavdeep Parhar 	struct cpl_tx_pkt_core *cpl;
2044*54e4ee71SNavdeep Parhar 	struct sge_eq *eq = &txq->eq;
2045*54e4ee71SNavdeep Parhar 	uintptr_t flitp, start, end;
2046*54e4ee71SNavdeep Parhar 	uint64_t ctrl;
2047*54e4ee71SNavdeep Parhar 	caddr_t dst;
2048*54e4ee71SNavdeep Parhar 
2049*54e4ee71SNavdeep Parhar 	KASSERT(txpkts->npkt > 0, ("%s: txpkts is empty", __func__));
2050*54e4ee71SNavdeep Parhar 
2051*54e4ee71SNavdeep Parhar 	start = (uintptr_t)eq->desc;
2052*54e4ee71SNavdeep Parhar 	end = (uintptr_t)eq->spg;
2053*54e4ee71SNavdeep Parhar 
2054*54e4ee71SNavdeep Parhar 	/* Checksum offload */
2055*54e4ee71SNavdeep Parhar 	ctrl = 0;
2056*54e4ee71SNavdeep Parhar 	if (!(m->m_pkthdr.csum_flags & CSUM_IP))
2057*54e4ee71SNavdeep Parhar 		ctrl |= F_TXPKT_IPCSUM_DIS;
2058*54e4ee71SNavdeep Parhar 	if (!(m->m_pkthdr.csum_flags & (CSUM_TCP | CSUM_UDP)))
2059*54e4ee71SNavdeep Parhar 		ctrl |= F_TXPKT_L4CSUM_DIS;
2060*54e4ee71SNavdeep Parhar 	if (m->m_pkthdr.csum_flags & (CSUM_IP | CSUM_TCP | CSUM_UDP))
2061*54e4ee71SNavdeep Parhar 		txq->txcsum++;	/* some hardware assistance provided */
2062*54e4ee71SNavdeep Parhar 
2063*54e4ee71SNavdeep Parhar 	/* VLAN tag insertion */
2064*54e4ee71SNavdeep Parhar 	if (m->m_flags & M_VLANTAG) {
2065*54e4ee71SNavdeep Parhar 		ctrl |= F_TXPKT_VLAN_VLD | V_TXPKT_VLAN(m->m_pkthdr.ether_vtag);
2066*54e4ee71SNavdeep Parhar 		txq->vlan_insertion++;
2067*54e4ee71SNavdeep Parhar 	}
2068*54e4ee71SNavdeep Parhar 
2069*54e4ee71SNavdeep Parhar 	/*
2070*54e4ee71SNavdeep Parhar 	 * The previous packet's SGL must have ended at a 16 byte boundary (this
2071*54e4ee71SNavdeep Parhar 	 * is required by the firmware/hardware).  It follows that flitp cannot
2072*54e4ee71SNavdeep Parhar 	 * wrap around between the ULPTX master command and ULPTX subcommand (8
2073*54e4ee71SNavdeep Parhar 	 * bytes each), and that it can not wrap around in the middle of the
2074*54e4ee71SNavdeep Parhar 	 * cpl_tx_pkt_core either.
2075*54e4ee71SNavdeep Parhar 	 */
2076*54e4ee71SNavdeep Parhar 	flitp = (uintptr_t)txpkts->flitp;
2077*54e4ee71SNavdeep Parhar 	KASSERT((flitp & 0xf) == 0,
2078*54e4ee71SNavdeep Parhar 	    ("%s: last SGL did not end at 16 byte boundary: %p",
2079*54e4ee71SNavdeep Parhar 	    __func__, txpkts->flitp));
2080*54e4ee71SNavdeep Parhar 
2081*54e4ee71SNavdeep Parhar 	/* ULP master command */
2082*54e4ee71SNavdeep Parhar 	ulpmc = (void *)flitp;
2083*54e4ee71SNavdeep Parhar 	ulpmc->cmd_dest = htonl(V_ULPTX_CMD(ULP_TX_PKT) | V_ULP_TXPKT_DEST(0));
2084*54e4ee71SNavdeep Parhar 	ulpmc->len = htonl(howmany(sizeof(*ulpmc) + sizeof(*ulpsc) +
2085*54e4ee71SNavdeep Parhar 	    sizeof(*cpl) + 8 * sgl->nflits, 16));
2086*54e4ee71SNavdeep Parhar 
2087*54e4ee71SNavdeep Parhar 	/* ULP subcommand */
2088*54e4ee71SNavdeep Parhar 	ulpsc = (void *)(ulpmc + 1);
2089*54e4ee71SNavdeep Parhar 	ulpsc->cmd_more = htobe32(V_ULPTX_CMD((u32)ULP_TX_SC_IMM) |
2090*54e4ee71SNavdeep Parhar 	    F_ULP_TX_SC_MORE);
2091*54e4ee71SNavdeep Parhar 	ulpsc->len = htobe32(sizeof(struct cpl_tx_pkt_core));
2092*54e4ee71SNavdeep Parhar 
2093*54e4ee71SNavdeep Parhar 	flitp += sizeof(*ulpmc) + sizeof(*ulpsc);
2094*54e4ee71SNavdeep Parhar 	if (flitp == end)
2095*54e4ee71SNavdeep Parhar 		flitp = start;
2096*54e4ee71SNavdeep Parhar 
2097*54e4ee71SNavdeep Parhar 	/* CPL_TX_PKT */
2098*54e4ee71SNavdeep Parhar 	cpl = (void *)flitp;
2099*54e4ee71SNavdeep Parhar 	cpl->ctrl0 = htobe32(V_TXPKT_OPCODE(CPL_TX_PKT) |
2100*54e4ee71SNavdeep Parhar 	    V_TXPKT_INTF(pi->tx_chan) | V_TXPKT_PF(pi->adapter->pf));
2101*54e4ee71SNavdeep Parhar 	cpl->pack = 0;
2102*54e4ee71SNavdeep Parhar 	cpl->len = htobe16(m->m_pkthdr.len);
2103*54e4ee71SNavdeep Parhar 	cpl->ctrl1 = htobe64(ctrl);
2104*54e4ee71SNavdeep Parhar 
2105*54e4ee71SNavdeep Parhar 	flitp += sizeof(*cpl);
2106*54e4ee71SNavdeep Parhar 	if (flitp == end)
2107*54e4ee71SNavdeep Parhar 		flitp = start;
2108*54e4ee71SNavdeep Parhar 
2109*54e4ee71SNavdeep Parhar 	/* SGL for this frame */
2110*54e4ee71SNavdeep Parhar 	dst = (caddr_t)flitp;
2111*54e4ee71SNavdeep Parhar 	txpkts->nflits += write_sgl_to_txd(eq, sgl, &dst);
2112*54e4ee71SNavdeep Parhar 	txpkts->flitp = (void *)dst;
2113*54e4ee71SNavdeep Parhar 
2114*54e4ee71SNavdeep Parhar 	KASSERT(((uintptr_t)dst & 0xf) == 0,
2115*54e4ee71SNavdeep Parhar 	    ("%s: SGL ends at %p (not a 16 byte boundary)", __func__, dst));
2116*54e4ee71SNavdeep Parhar }
2117*54e4ee71SNavdeep Parhar 
2118*54e4ee71SNavdeep Parhar /*
2119*54e4ee71SNavdeep Parhar  * If the SGL ends on an address that is not 16 byte aligned, this function will
2120*54e4ee71SNavdeep Parhar  * add a 0 filled flit at the end.  It returns 1 in that case.
2121*54e4ee71SNavdeep Parhar  */
2122*54e4ee71SNavdeep Parhar static int
2123*54e4ee71SNavdeep Parhar write_sgl_to_txd(struct sge_eq *eq, struct sgl *sgl, caddr_t *to)
2124*54e4ee71SNavdeep Parhar {
2125*54e4ee71SNavdeep Parhar 	__be64 *flitp, *end;
2126*54e4ee71SNavdeep Parhar 	struct ulptx_sgl *usgl;
2127*54e4ee71SNavdeep Parhar 	bus_dma_segment_t *seg;
2128*54e4ee71SNavdeep Parhar 	int i, padded;
2129*54e4ee71SNavdeep Parhar 
2130*54e4ee71SNavdeep Parhar 	KASSERT(sgl->nsegs > 0 && sgl->nflits > 0,
2131*54e4ee71SNavdeep Parhar 	    ("%s: bad SGL - nsegs=%d, nflits=%d",
2132*54e4ee71SNavdeep Parhar 	    __func__, sgl->nsegs, sgl->nflits));
2133*54e4ee71SNavdeep Parhar 
2134*54e4ee71SNavdeep Parhar 	KASSERT(((uintptr_t)(*to) & 0xf) == 0,
2135*54e4ee71SNavdeep Parhar 	    ("%s: SGL must start at a 16 byte boundary: %p", __func__, *to));
2136*54e4ee71SNavdeep Parhar 
2137*54e4ee71SNavdeep Parhar 	flitp = (__be64 *)(*to);
2138*54e4ee71SNavdeep Parhar 	end = flitp + sgl->nflits;
2139*54e4ee71SNavdeep Parhar 	seg = &sgl->seg[0];
2140*54e4ee71SNavdeep Parhar 	usgl = (void *)flitp;
2141*54e4ee71SNavdeep Parhar 
2142*54e4ee71SNavdeep Parhar 	/*
2143*54e4ee71SNavdeep Parhar 	 * We start at a 16 byte boundary somewhere inside the tx descriptor
2144*54e4ee71SNavdeep Parhar 	 * ring, so we're at least 16 bytes away from the status page.  There is
2145*54e4ee71SNavdeep Parhar 	 * no chance of a wrap around in the middle of usgl (which is 16 bytes).
2146*54e4ee71SNavdeep Parhar 	 */
2147*54e4ee71SNavdeep Parhar 
2148*54e4ee71SNavdeep Parhar 	usgl->cmd_nsge = htobe32(V_ULPTX_CMD(ULP_TX_SC_DSGL) |
2149*54e4ee71SNavdeep Parhar 	    V_ULPTX_NSGE(sgl->nsegs));
2150*54e4ee71SNavdeep Parhar 	usgl->len0 = htobe32(seg->ds_len);
2151*54e4ee71SNavdeep Parhar 	usgl->addr0 = htobe64(seg->ds_addr);
2152*54e4ee71SNavdeep Parhar 	seg++;
2153*54e4ee71SNavdeep Parhar 
2154*54e4ee71SNavdeep Parhar 	if ((uintptr_t)end <= (uintptr_t)eq->spg) {
2155*54e4ee71SNavdeep Parhar 
2156*54e4ee71SNavdeep Parhar 		/* Won't wrap around at all */
2157*54e4ee71SNavdeep Parhar 
2158*54e4ee71SNavdeep Parhar 		for (i = 0; i < sgl->nsegs - 1; i++, seg++) {
2159*54e4ee71SNavdeep Parhar 			usgl->sge[i / 2].len[i & 1] = htobe32(seg->ds_len);
2160*54e4ee71SNavdeep Parhar 			usgl->sge[i / 2].addr[i & 1] = htobe64(seg->ds_addr);
2161*54e4ee71SNavdeep Parhar 		}
2162*54e4ee71SNavdeep Parhar 		if (i & 1)
2163*54e4ee71SNavdeep Parhar 			usgl->sge[i / 2].len[1] = htobe32(0);
2164*54e4ee71SNavdeep Parhar 	} else {
2165*54e4ee71SNavdeep Parhar 
2166*54e4ee71SNavdeep Parhar 		/* Will wrap somewhere in the rest of the SGL */
2167*54e4ee71SNavdeep Parhar 
2168*54e4ee71SNavdeep Parhar 		/* 2 flits already written, write the rest flit by flit */
2169*54e4ee71SNavdeep Parhar 		flitp = (void *)(usgl + 1);
2170*54e4ee71SNavdeep Parhar 		for (i = 0; i < sgl->nflits - 2; i++) {
2171*54e4ee71SNavdeep Parhar 			if ((uintptr_t)flitp == (uintptr_t)eq->spg)
2172*54e4ee71SNavdeep Parhar 				flitp = (void *)eq->desc;
2173*54e4ee71SNavdeep Parhar 			*flitp++ = get_flit(seg, sgl->nsegs - 1, i);
2174*54e4ee71SNavdeep Parhar 		}
2175*54e4ee71SNavdeep Parhar 		end = flitp;
2176*54e4ee71SNavdeep Parhar 	}
2177*54e4ee71SNavdeep Parhar 
2178*54e4ee71SNavdeep Parhar 	if ((uintptr_t)end & 0xf) {
2179*54e4ee71SNavdeep Parhar 		*(uint64_t *)end = 0;
2180*54e4ee71SNavdeep Parhar 		end++;
2181*54e4ee71SNavdeep Parhar 		padded = 1;
2182*54e4ee71SNavdeep Parhar 	} else
2183*54e4ee71SNavdeep Parhar 		padded = 0;
2184*54e4ee71SNavdeep Parhar 
2185*54e4ee71SNavdeep Parhar 	if ((uintptr_t)end == (uintptr_t)eq->spg)
2186*54e4ee71SNavdeep Parhar 		*to = (void *)eq->desc;
2187*54e4ee71SNavdeep Parhar 	else
2188*54e4ee71SNavdeep Parhar 		*to = (void *)end;
2189*54e4ee71SNavdeep Parhar 
2190*54e4ee71SNavdeep Parhar 	return (padded);
2191*54e4ee71SNavdeep Parhar }
2192*54e4ee71SNavdeep Parhar 
2193*54e4ee71SNavdeep Parhar static inline void
2194*54e4ee71SNavdeep Parhar copy_to_txd(struct sge_eq *eq, caddr_t from, caddr_t *to, int len)
2195*54e4ee71SNavdeep Parhar {
2196*54e4ee71SNavdeep Parhar 	if ((uintptr_t)(*to) + len <= (uintptr_t)eq->spg) {
2197*54e4ee71SNavdeep Parhar 		bcopy(from, *to, len);
2198*54e4ee71SNavdeep Parhar 		(*to) += len;
2199*54e4ee71SNavdeep Parhar 	} else {
2200*54e4ee71SNavdeep Parhar 		int portion = (uintptr_t)eq->spg - (uintptr_t)(*to);
2201*54e4ee71SNavdeep Parhar 
2202*54e4ee71SNavdeep Parhar 		bcopy(from, *to, portion);
2203*54e4ee71SNavdeep Parhar 		from += portion;
2204*54e4ee71SNavdeep Parhar 		portion = len - portion;	/* remaining */
2205*54e4ee71SNavdeep Parhar 		bcopy(from, (void *)eq->desc, portion);
2206*54e4ee71SNavdeep Parhar 		(*to) = (caddr_t)eq->desc + portion;
2207*54e4ee71SNavdeep Parhar 	}
2208*54e4ee71SNavdeep Parhar }
2209*54e4ee71SNavdeep Parhar 
2210*54e4ee71SNavdeep Parhar static inline void
2211*54e4ee71SNavdeep Parhar ring_tx_db(struct adapter *sc, struct sge_eq *eq)
2212*54e4ee71SNavdeep Parhar {
2213*54e4ee71SNavdeep Parhar 	wmb();
2214*54e4ee71SNavdeep Parhar 	t4_write_reg(sc, MYPF_REG(A_SGE_PF_KDOORBELL),
2215*54e4ee71SNavdeep Parhar 	    V_QID(eq->cntxt_id) | V_PIDX(eq->pending));
2216*54e4ee71SNavdeep Parhar 	eq->pending = 0;
2217*54e4ee71SNavdeep Parhar }
2218*54e4ee71SNavdeep Parhar 
2219*54e4ee71SNavdeep Parhar static int
2220*54e4ee71SNavdeep Parhar reclaim_tx_descs(struct sge_eq *eq, int atleast, int howmany)
2221*54e4ee71SNavdeep Parhar {
2222*54e4ee71SNavdeep Parhar 	struct tx_sdesc *txsd;
2223*54e4ee71SNavdeep Parhar 	struct tx_map *txm, *next_txm;
2224*54e4ee71SNavdeep Parhar 	unsigned int cidx, can_reclaim, reclaimed, maps, next_map_cidx;
2225*54e4ee71SNavdeep Parhar 
2226*54e4ee71SNavdeep Parhar 	EQ_LOCK_ASSERT_OWNED(eq);
2227*54e4ee71SNavdeep Parhar 
2228*54e4ee71SNavdeep Parhar 	cidx = eq->spg->cidx;	/* stable snapshot */
2229*54e4ee71SNavdeep Parhar 	cidx = be16_to_cpu(cidx);
2230*54e4ee71SNavdeep Parhar 
2231*54e4ee71SNavdeep Parhar 	if (cidx >= eq->cidx)
2232*54e4ee71SNavdeep Parhar 		can_reclaim = cidx - eq->cidx;
2233*54e4ee71SNavdeep Parhar 	else
2234*54e4ee71SNavdeep Parhar 		can_reclaim = cidx + eq->cap - eq->cidx;
2235*54e4ee71SNavdeep Parhar 
2236*54e4ee71SNavdeep Parhar 	if (can_reclaim < atleast)
2237*54e4ee71SNavdeep Parhar 		return (0);
2238*54e4ee71SNavdeep Parhar 
2239*54e4ee71SNavdeep Parhar 	next_map_cidx = eq->map_cidx;
2240*54e4ee71SNavdeep Parhar 	next_txm = txm = &eq->maps[next_map_cidx];
2241*54e4ee71SNavdeep Parhar 	prefetch(txm);
2242*54e4ee71SNavdeep Parhar 
2243*54e4ee71SNavdeep Parhar 	maps = reclaimed = 0;
2244*54e4ee71SNavdeep Parhar 	do {
2245*54e4ee71SNavdeep Parhar 		int ndesc;
2246*54e4ee71SNavdeep Parhar 
2247*54e4ee71SNavdeep Parhar 		txsd = &eq->sdesc[eq->cidx];
2248*54e4ee71SNavdeep Parhar 		ndesc = txsd->desc_used;
2249*54e4ee71SNavdeep Parhar 
2250*54e4ee71SNavdeep Parhar 		/* Firmware doesn't return "partial" credits. */
2251*54e4ee71SNavdeep Parhar 		KASSERT(can_reclaim >= ndesc,
2252*54e4ee71SNavdeep Parhar 		    ("%s: unexpected number of credits: %d, %d",
2253*54e4ee71SNavdeep Parhar 		    __func__, can_reclaim, ndesc));
2254*54e4ee71SNavdeep Parhar 
2255*54e4ee71SNavdeep Parhar 		maps += txsd->map_used;
2256*54e4ee71SNavdeep Parhar 		reclaimed += ndesc;
2257*54e4ee71SNavdeep Parhar 
2258*54e4ee71SNavdeep Parhar 		eq->cidx += ndesc;
2259*54e4ee71SNavdeep Parhar 		if (eq->cidx >= eq->cap)
2260*54e4ee71SNavdeep Parhar 			eq->cidx -= eq->cap;
2261*54e4ee71SNavdeep Parhar 
2262*54e4ee71SNavdeep Parhar 		can_reclaim -= ndesc;
2263*54e4ee71SNavdeep Parhar 
2264*54e4ee71SNavdeep Parhar 	} while (can_reclaim && reclaimed < howmany);
2265*54e4ee71SNavdeep Parhar 
2266*54e4ee71SNavdeep Parhar 	eq->avail += reclaimed;
2267*54e4ee71SNavdeep Parhar 	KASSERT(eq->avail < eq->cap,	/* avail tops out at (cap - 1) */
2268*54e4ee71SNavdeep Parhar 	    ("%s: too many descriptors available", __func__));
2269*54e4ee71SNavdeep Parhar 
2270*54e4ee71SNavdeep Parhar 	eq->map_avail += maps;
2271*54e4ee71SNavdeep Parhar 	KASSERT(eq->map_avail <= eq->map_total,
2272*54e4ee71SNavdeep Parhar 	    ("%s: too many maps available", __func__));
2273*54e4ee71SNavdeep Parhar 
2274*54e4ee71SNavdeep Parhar 	prefetch(txm->m);
2275*54e4ee71SNavdeep Parhar 	while (maps--) {
2276*54e4ee71SNavdeep Parhar 		next_txm++;
2277*54e4ee71SNavdeep Parhar 		if (++next_map_cidx == eq->map_total) {
2278*54e4ee71SNavdeep Parhar 			next_map_cidx = 0;
2279*54e4ee71SNavdeep Parhar 			next_txm = eq->maps;
2280*54e4ee71SNavdeep Parhar 		}
2281*54e4ee71SNavdeep Parhar 		prefetch(next_txm->m);
2282*54e4ee71SNavdeep Parhar 
2283*54e4ee71SNavdeep Parhar 		bus_dmamap_unload(eq->tx_tag, txm->map);
2284*54e4ee71SNavdeep Parhar 		m_freem(txm->m);
2285*54e4ee71SNavdeep Parhar 		txm->m = NULL;
2286*54e4ee71SNavdeep Parhar 
2287*54e4ee71SNavdeep Parhar 		txm = next_txm;
2288*54e4ee71SNavdeep Parhar 	}
2289*54e4ee71SNavdeep Parhar 	eq->map_cidx = next_map_cidx;
2290*54e4ee71SNavdeep Parhar 
2291*54e4ee71SNavdeep Parhar 	return (reclaimed);
2292*54e4ee71SNavdeep Parhar }
2293*54e4ee71SNavdeep Parhar 
2294*54e4ee71SNavdeep Parhar static void
2295*54e4ee71SNavdeep Parhar write_eqflush_wr(struct sge_eq *eq)
2296*54e4ee71SNavdeep Parhar {
2297*54e4ee71SNavdeep Parhar 	struct fw_eq_flush_wr *wr;
2298*54e4ee71SNavdeep Parhar 	struct tx_sdesc *txsd;
2299*54e4ee71SNavdeep Parhar 
2300*54e4ee71SNavdeep Parhar 	EQ_LOCK_ASSERT_OWNED(eq);
2301*54e4ee71SNavdeep Parhar 	KASSERT(eq->avail > 0, ("%s: no descriptors left.", __func__));
2302*54e4ee71SNavdeep Parhar 
2303*54e4ee71SNavdeep Parhar 	wr = (void *)&eq->desc[eq->pidx];
2304*54e4ee71SNavdeep Parhar 	bzero(wr, sizeof(*wr));
2305*54e4ee71SNavdeep Parhar 	wr->opcode = FW_EQ_FLUSH_WR;
2306*54e4ee71SNavdeep Parhar 	wr->equiq_to_len16 = htobe32(V_FW_WR_LEN16(sizeof(*wr) / 16) |
2307*54e4ee71SNavdeep Parhar 	    F_FW_WR_EQUEQ | F_FW_WR_EQUIQ);
2308*54e4ee71SNavdeep Parhar 
2309*54e4ee71SNavdeep Parhar 	txsd = &eq->sdesc[eq->pidx];
2310*54e4ee71SNavdeep Parhar 	txsd->desc_used = 1;
2311*54e4ee71SNavdeep Parhar 	txsd->map_used = 0;
2312*54e4ee71SNavdeep Parhar 
2313*54e4ee71SNavdeep Parhar 	eq->pending++;
2314*54e4ee71SNavdeep Parhar 	eq->avail--;
2315*54e4ee71SNavdeep Parhar 	if (++eq->pidx == eq->cap)
2316*54e4ee71SNavdeep Parhar 		eq->pidx = 0;
2317*54e4ee71SNavdeep Parhar }
2318*54e4ee71SNavdeep Parhar 
2319*54e4ee71SNavdeep Parhar static __be64
2320*54e4ee71SNavdeep Parhar get_flit(bus_dma_segment_t *sgl, int nsegs, int idx)
2321*54e4ee71SNavdeep Parhar {
2322*54e4ee71SNavdeep Parhar 	int i = (idx / 3) * 2;
2323*54e4ee71SNavdeep Parhar 
2324*54e4ee71SNavdeep Parhar 	switch (idx % 3) {
2325*54e4ee71SNavdeep Parhar 	case 0: {
2326*54e4ee71SNavdeep Parhar 		__be64 rc;
2327*54e4ee71SNavdeep Parhar 
2328*54e4ee71SNavdeep Parhar 		rc = htobe32(sgl[i].ds_len);
2329*54e4ee71SNavdeep Parhar 		if (i + 1 < nsegs)
2330*54e4ee71SNavdeep Parhar 			rc |= (uint64_t)htobe32(sgl[i + 1].ds_len) << 32;
2331*54e4ee71SNavdeep Parhar 
2332*54e4ee71SNavdeep Parhar 		return (rc);
2333*54e4ee71SNavdeep Parhar 	}
2334*54e4ee71SNavdeep Parhar 	case 1:
2335*54e4ee71SNavdeep Parhar 		return htobe64(sgl[i].ds_addr);
2336*54e4ee71SNavdeep Parhar 	case 2:
2337*54e4ee71SNavdeep Parhar 		return htobe64(sgl[i + 1].ds_addr);
2338*54e4ee71SNavdeep Parhar 	}
2339*54e4ee71SNavdeep Parhar 
2340*54e4ee71SNavdeep Parhar 	return (0);
2341*54e4ee71SNavdeep Parhar }
2342*54e4ee71SNavdeep Parhar 
2343*54e4ee71SNavdeep Parhar static struct mbuf *
2344*54e4ee71SNavdeep Parhar get_fl_sdesc_data(struct sge_fl *fl, int len, int flags)
2345*54e4ee71SNavdeep Parhar {
2346*54e4ee71SNavdeep Parhar 	struct fl_sdesc *sd;
2347*54e4ee71SNavdeep Parhar 	struct mbuf *m;
2348*54e4ee71SNavdeep Parhar 
2349*54e4ee71SNavdeep Parhar 	sd = &fl->sdesc[fl->cidx];
2350*54e4ee71SNavdeep Parhar 	FL_LOCK(fl);
2351*54e4ee71SNavdeep Parhar 	if (++fl->cidx == fl->cap)
2352*54e4ee71SNavdeep Parhar 		fl->cidx = 0;
2353*54e4ee71SNavdeep Parhar 	fl->needed++;
2354*54e4ee71SNavdeep Parhar 	FL_UNLOCK(fl);
2355*54e4ee71SNavdeep Parhar 
2356*54e4ee71SNavdeep Parhar 	m = sd->m;
2357*54e4ee71SNavdeep Parhar 	if (m == NULL) {
2358*54e4ee71SNavdeep Parhar 		m = m_gethdr(M_NOWAIT, MT_NOINIT);
2359*54e4ee71SNavdeep Parhar 		if (m == NULL)
2360*54e4ee71SNavdeep Parhar 			return (NULL);
2361*54e4ee71SNavdeep Parhar 	}
2362*54e4ee71SNavdeep Parhar 	sd->m = NULL;	/* consumed */
2363*54e4ee71SNavdeep Parhar 
2364*54e4ee71SNavdeep Parhar 	bus_dmamap_sync(fl->tag[sd->tag_idx], sd->map, BUS_DMASYNC_POSTREAD);
2365*54e4ee71SNavdeep Parhar 	m_init(m, zone_mbuf, MLEN, M_NOWAIT, MT_DATA, flags);
2366*54e4ee71SNavdeep Parhar 	if ((flags && len < MINCLSIZE) || (!flags && len <= MLEN))
2367*54e4ee71SNavdeep Parhar 		bcopy(sd->cl, mtod(m, caddr_t), len);
2368*54e4ee71SNavdeep Parhar 	else {
2369*54e4ee71SNavdeep Parhar 		bus_dmamap_unload(fl->tag[sd->tag_idx], sd->map);
2370*54e4ee71SNavdeep Parhar 		m_cljset(m, sd->cl, FL_BUF_TYPE(sd->tag_idx));
2371*54e4ee71SNavdeep Parhar 		sd->cl = NULL;	/* consumed */
2372*54e4ee71SNavdeep Parhar 	}
2373*54e4ee71SNavdeep Parhar 
2374*54e4ee71SNavdeep Parhar 	m->m_len = min(len, FL_BUF_SIZE(sd->tag_idx));
2375*54e4ee71SNavdeep Parhar 
2376*54e4ee71SNavdeep Parhar 	return (m);
2377*54e4ee71SNavdeep Parhar }
2378*54e4ee71SNavdeep Parhar 
2379*54e4ee71SNavdeep Parhar static void
2380*54e4ee71SNavdeep Parhar set_fl_tag_idx(struct sge_fl *fl, int mtu)
2381*54e4ee71SNavdeep Parhar {
2382*54e4ee71SNavdeep Parhar 	int i;
2383*54e4ee71SNavdeep Parhar 
2384*54e4ee71SNavdeep Parhar 	FL_LOCK_ASSERT_OWNED(fl);
2385*54e4ee71SNavdeep Parhar 
2386*54e4ee71SNavdeep Parhar 	for (i = 0; i < FL_BUF_SIZES - 1; i++) {
2387*54e4ee71SNavdeep Parhar 		if (FL_BUF_SIZE(i) >= (mtu + FL_PKTSHIFT))
2388*54e4ee71SNavdeep Parhar 			break;
2389*54e4ee71SNavdeep Parhar 	}
2390*54e4ee71SNavdeep Parhar 
2391*54e4ee71SNavdeep Parhar 	fl->tag_idx = i;
2392*54e4ee71SNavdeep Parhar }
2393