xref: /freebsd/sys/dev/cxgbe/t4_sge.c (revision 06e8e46410776c1d2a28c89004cda266402a8e69)
1 /*-
2  * Copyright (c) 2011 Chelsio Communications, Inc.
3  * All rights reserved.
4  * Written by: Navdeep Parhar <np@FreeBSD.org>
5  *
6  * Redistribution and use in source and binary forms, with or without
7  * modification, are permitted provided that the following conditions
8  * are met:
9  * 1. Redistributions of source code must retain the above copyright
10  *    notice, this list of conditions and the following disclaimer.
11  * 2. Redistributions in binary form must reproduce the above copyright
12  *    notice, this list of conditions and the following disclaimer in the
13  *    documentation and/or other materials provided with the distribution.
14  *
15  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
16  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
25  * SUCH DAMAGE.
26  */
27 
28 #include <sys/cdefs.h>
29 __FBSDID("$FreeBSD$");
30 
31 #include "opt_inet.h"
32 #include "opt_inet6.h"
33 
34 #include <sys/types.h>
35 #include <sys/mbuf.h>
36 #include <sys/socket.h>
37 #include <sys/kernel.h>
38 #include <sys/kdb.h>
39 #include <sys/malloc.h>
40 #include <sys/queue.h>
41 #include <sys/taskqueue.h>
42 #include <sys/sysctl.h>
43 #include <sys/smp.h>
44 #include <net/bpf.h>
45 #include <net/ethernet.h>
46 #include <net/if.h>
47 #include <net/if_vlan_var.h>
48 #include <netinet/in.h>
49 #include <netinet/ip.h>
50 #include <netinet/ip6.h>
51 #include <netinet/tcp.h>
52 
53 #include "common/common.h"
54 #include "common/t4_regs.h"
55 #include "common/t4_regs_values.h"
56 #include "common/t4_msg.h"
57 
58 struct fl_buf_info {
59 	int size;
60 	int type;
61 	uma_zone_t zone;
62 };
63 
64 /* Filled up by t4_sge_modload */
65 static struct fl_buf_info fl_buf_info[FL_BUF_SIZES];
66 
67 #define FL_BUF_SIZE(x)	(fl_buf_info[x].size)
68 #define FL_BUF_TYPE(x)	(fl_buf_info[x].type)
69 #define FL_BUF_ZONE(x)	(fl_buf_info[x].zone)
70 
71 #ifdef T4_PKT_TIMESTAMP
72 #define RX_COPY_THRESHOLD (MINCLSIZE - 8)
73 #else
74 #define RX_COPY_THRESHOLD MINCLSIZE
75 #endif
76 
77 /*
78  * Ethernet frames are DMA'd at this byte offset into the freelist buffer.
79  * 0-7 are valid values.
80  */
81 static int fl_pktshift = 2;
82 TUNABLE_INT("hw.cxgbe.fl_pktshift", &fl_pktshift);
83 
84 /*
85  * Pad ethernet payload up to this boundary.
86  * -1: driver should figure out a good value.
87  *  Any power of 2, from 32 to 4096 (both inclusive) is a valid value.
88  */
89 static int fl_pad = -1;
90 TUNABLE_INT("hw.cxgbe.fl_pad", &fl_pad);
91 
92 /*
93  * Status page length.
94  * -1: driver should figure out a good value.
95  *  64 or 128 are the only other valid values.
96  */
97 static int spg_len = -1;
98 TUNABLE_INT("hw.cxgbe.spg_len", &spg_len);
99 
100 /*
101  * Congestion drops.
102  * -1: no congestion feedback (not recommended).
103  *  0: backpressure the channel instead of dropping packets right away.
104  *  1: no backpressure, drop packets for the congested queue immediately.
105  */
106 static int cong_drop = 0;
107 TUNABLE_INT("hw.cxgbe.cong_drop", &cong_drop);
108 
109 /* Used to track coalesced tx work request */
110 struct txpkts {
111 	uint64_t *flitp;	/* ptr to flit where next pkt should start */
112 	uint8_t npkt;		/* # of packets in this work request */
113 	uint8_t nflits;		/* # of flits used by this work request */
114 	uint16_t plen;		/* total payload (sum of all packets) */
115 };
116 
117 /* A packet's SGL.  This + m_pkthdr has all info needed for tx */
118 struct sgl {
119 	int nsegs;		/* # of segments in the SGL, 0 means imm. tx */
120 	int nflits;		/* # of flits needed for the SGL */
121 	bus_dma_segment_t seg[TX_SGL_SEGS];
122 };
123 
124 static int service_iq(struct sge_iq *, int);
125 static struct mbuf *get_fl_payload(struct adapter *, struct sge_fl *, uint32_t,
126     int *);
127 static int t4_eth_rx(struct sge_iq *, const struct rss_header *, struct mbuf *);
128 static inline void init_iq(struct sge_iq *, struct adapter *, int, int, int,
129     int);
130 static inline void init_fl(struct sge_fl *, int, int, char *);
131 static inline void init_eq(struct sge_eq *, int, int, uint8_t, uint16_t,
132     char *);
133 static int alloc_ring(struct adapter *, size_t, bus_dma_tag_t *, bus_dmamap_t *,
134     bus_addr_t *, void **);
135 static int free_ring(struct adapter *, bus_dma_tag_t, bus_dmamap_t, bus_addr_t,
136     void *);
137 static int alloc_iq_fl(struct port_info *, struct sge_iq *, struct sge_fl *,
138     int, int);
139 static int free_iq_fl(struct port_info *, struct sge_iq *, struct sge_fl *);
140 static int alloc_fwq(struct adapter *);
141 static int free_fwq(struct adapter *);
142 static int alloc_mgmtq(struct adapter *);
143 static int free_mgmtq(struct adapter *);
144 static int alloc_rxq(struct port_info *, struct sge_rxq *, int, int,
145     struct sysctl_oid *);
146 static int free_rxq(struct port_info *, struct sge_rxq *);
147 #ifdef TCP_OFFLOAD
148 static int alloc_ofld_rxq(struct port_info *, struct sge_ofld_rxq *, int, int,
149     struct sysctl_oid *);
150 static int free_ofld_rxq(struct port_info *, struct sge_ofld_rxq *);
151 #endif
152 static int ctrl_eq_alloc(struct adapter *, struct sge_eq *);
153 static int eth_eq_alloc(struct adapter *, struct port_info *, struct sge_eq *);
154 #ifdef TCP_OFFLOAD
155 static int ofld_eq_alloc(struct adapter *, struct port_info *, struct sge_eq *);
156 #endif
157 static int alloc_eq(struct adapter *, struct port_info *, struct sge_eq *);
158 static int free_eq(struct adapter *, struct sge_eq *);
159 static int alloc_wrq(struct adapter *, struct port_info *, struct sge_wrq *,
160     struct sysctl_oid *);
161 static int free_wrq(struct adapter *, struct sge_wrq *);
162 static int alloc_txq(struct port_info *, struct sge_txq *, int,
163     struct sysctl_oid *);
164 static int free_txq(struct port_info *, struct sge_txq *);
165 static void oneseg_dma_callback(void *, bus_dma_segment_t *, int, int);
166 static inline bool is_new_response(const struct sge_iq *, struct rsp_ctrl **);
167 static inline void iq_next(struct sge_iq *);
168 static inline void ring_fl_db(struct adapter *, struct sge_fl *);
169 static int refill_fl(struct adapter *, struct sge_fl *, int);
170 static void refill_sfl(void *);
171 static int alloc_fl_sdesc(struct sge_fl *);
172 static void free_fl_sdesc(struct sge_fl *);
173 static void set_fl_tag_idx(struct sge_fl *, int);
174 static void add_fl_to_sfl(struct adapter *, struct sge_fl *);
175 
176 static int get_pkt_sgl(struct sge_txq *, struct mbuf **, struct sgl *, int);
177 static int free_pkt_sgl(struct sge_txq *, struct sgl *);
178 static int write_txpkt_wr(struct port_info *, struct sge_txq *, struct mbuf *,
179     struct sgl *);
180 static int add_to_txpkts(struct port_info *, struct sge_txq *, struct txpkts *,
181     struct mbuf *, struct sgl *);
182 static void write_txpkts_wr(struct sge_txq *, struct txpkts *);
183 static inline void write_ulp_cpl_sgl(struct port_info *, struct sge_txq *,
184     struct txpkts *, struct mbuf *, struct sgl *);
185 static int write_sgl_to_txd(struct sge_eq *, struct sgl *, caddr_t *);
186 static inline void copy_to_txd(struct sge_eq *, caddr_t, caddr_t *, int);
187 static inline void ring_eq_db(struct adapter *, struct sge_eq *);
188 static inline int reclaimable(struct sge_eq *);
189 static int reclaim_tx_descs(struct sge_txq *, int, int);
190 static void write_eqflush_wr(struct sge_eq *);
191 static __be64 get_flit(bus_dma_segment_t *, int, int);
192 static int handle_sge_egr_update(struct sge_iq *, const struct rss_header *,
193     struct mbuf *);
194 static int handle_fw_msg(struct sge_iq *, const struct rss_header *,
195     struct mbuf *);
196 
197 static int sysctl_uint16(SYSCTL_HANDLER_ARGS);
198 
199 #if defined(__i386__) || defined(__amd64__)
200 extern u_int cpu_clflush_line_size;
201 #endif
202 
203 /*
204  * Called on MOD_LOAD.  Fills up fl_buf_info[] and validates/calculates the SGE
205  * tunables.
206  */
207 void
208 t4_sge_modload(void)
209 {
210 	int i;
211 	int bufsize[FL_BUF_SIZES] = {
212 		MCLBYTES,
213 #if MJUMPAGESIZE != MCLBYTES
214 		MJUMPAGESIZE,
215 #endif
216 		MJUM9BYTES,
217 		MJUM16BYTES
218 	};
219 
220 	for (i = 0; i < FL_BUF_SIZES; i++) {
221 		FL_BUF_SIZE(i) = bufsize[i];
222 		FL_BUF_TYPE(i) = m_gettype(bufsize[i]);
223 		FL_BUF_ZONE(i) = m_getzone(bufsize[i]);
224 	}
225 
226 	if (fl_pktshift < 0 || fl_pktshift > 7) {
227 		printf("Invalid hw.cxgbe.fl_pktshift value (%d),"
228 		    " using 2 instead.\n", fl_pktshift);
229 		fl_pktshift = 2;
230 	}
231 
232 	if (fl_pad < 32 || fl_pad > 4096 || !powerof2(fl_pad)) {
233 		int pad;
234 
235 #if defined(__i386__) || defined(__amd64__)
236 		pad = max(cpu_clflush_line_size, 32);
237 #else
238 		pad = max(CACHE_LINE_SIZE, 32);
239 #endif
240 		pad = min(pad, 4096);
241 
242 		if (fl_pad != -1) {
243 			printf("Invalid hw.cxgbe.fl_pad value (%d),"
244 			    " using %d instead.\n", fl_pad, pad);
245 		}
246 		fl_pad = pad;
247 	}
248 
249 	if (spg_len != 64 && spg_len != 128) {
250 		int len;
251 
252 #if defined(__i386__) || defined(__amd64__)
253 		len = cpu_clflush_line_size > 64 ? 128 : 64;
254 #else
255 		len = 64;
256 #endif
257 		if (spg_len != -1) {
258 			printf("Invalid hw.cxgbe.spg_len value (%d),"
259 			    " using %d instead.\n", spg_len, len);
260 		}
261 		spg_len = len;
262 	}
263 
264 	if (cong_drop < -1 || cong_drop > 1) {
265 		printf("Invalid hw.cxgbe.cong_drop value (%d),"
266 		    " using 0 instead.\n", cong_drop);
267 		cong_drop = 0;
268 	}
269 }
270 
271 void
272 t4_init_sge_cpl_handlers(struct adapter *sc)
273 {
274 
275 	t4_register_cpl_handler(sc, CPL_FW4_MSG, handle_fw_msg);
276 	t4_register_cpl_handler(sc, CPL_FW6_MSG, handle_fw_msg);
277 	t4_register_cpl_handler(sc, CPL_SGE_EGR_UPDATE, handle_sge_egr_update);
278 	t4_register_cpl_handler(sc, CPL_RX_PKT, t4_eth_rx);
279 	t4_register_fw_msg_handler(sc, FW6_TYPE_CMD_RPL, t4_handle_fw_rpl);
280 }
281 
282 /*
283  * adap->params.vpd.cclk must be set up before this is called.
284  */
285 void
286 t4_tweak_chip_settings(struct adapter *sc)
287 {
288 	int i;
289 	uint32_t v, m;
290 	int intr_timer[SGE_NTIMERS] = {1, 5, 10, 50, 100, 200};
291 	int timer_max = M_TIMERVALUE0 * 1000 / sc->params.vpd.cclk;
292 	int intr_pktcount[SGE_NCOUNTERS] = {1, 8, 16, 32}; /* 63 max */
293 	uint16_t indsz = min(RX_COPY_THRESHOLD - 1, M_INDICATESIZE);
294 
295 	KASSERT(sc->flags & MASTER_PF,
296 	    ("%s: trying to change chip settings when not master.", __func__));
297 
298 	m = V_PKTSHIFT(M_PKTSHIFT) | F_RXPKTCPLMODE |
299 	    V_INGPADBOUNDARY(M_INGPADBOUNDARY) | F_EGRSTATUSPAGESIZE;
300 	v = V_PKTSHIFT(fl_pktshift) | F_RXPKTCPLMODE |
301 	    V_INGPADBOUNDARY(ilog2(fl_pad) - 5) |
302 	    V_EGRSTATUSPAGESIZE(spg_len == 128);
303 	t4_set_reg_field(sc, A_SGE_CONTROL, m, v);
304 
305 	v = V_HOSTPAGESIZEPF0(PAGE_SHIFT - 10) |
306 	    V_HOSTPAGESIZEPF1(PAGE_SHIFT - 10) |
307 	    V_HOSTPAGESIZEPF2(PAGE_SHIFT - 10) |
308 	    V_HOSTPAGESIZEPF3(PAGE_SHIFT - 10) |
309 	    V_HOSTPAGESIZEPF4(PAGE_SHIFT - 10) |
310 	    V_HOSTPAGESIZEPF5(PAGE_SHIFT - 10) |
311 	    V_HOSTPAGESIZEPF6(PAGE_SHIFT - 10) |
312 	    V_HOSTPAGESIZEPF7(PAGE_SHIFT - 10);
313 	t4_write_reg(sc, A_SGE_HOST_PAGE_SIZE, v);
314 
315 	for (i = 0; i < FL_BUF_SIZES; i++) {
316 		t4_write_reg(sc, A_SGE_FL_BUFFER_SIZE0 + (4 * i),
317 		    FL_BUF_SIZE(i));
318 	}
319 
320 	v = V_THRESHOLD_0(intr_pktcount[0]) | V_THRESHOLD_1(intr_pktcount[1]) |
321 	    V_THRESHOLD_2(intr_pktcount[2]) | V_THRESHOLD_3(intr_pktcount[3]);
322 	t4_write_reg(sc, A_SGE_INGRESS_RX_THRESHOLD, v);
323 
324 	KASSERT(intr_timer[0] <= timer_max,
325 	    ("%s: not a single usable timer (%d, %d)", __func__, intr_timer[0],
326 	    timer_max));
327 	for (i = 1; i < nitems(intr_timer); i++) {
328 		KASSERT(intr_timer[i] >= intr_timer[i - 1],
329 		    ("%s: timers not listed in increasing order (%d)",
330 		    __func__, i));
331 
332 		while (intr_timer[i] > timer_max) {
333 			if (i == nitems(intr_timer) - 1) {
334 				intr_timer[i] = timer_max;
335 				break;
336 			}
337 			intr_timer[i] += intr_timer[i - 1];
338 			intr_timer[i] /= 2;
339 		}
340 	}
341 
342 	v = V_TIMERVALUE0(us_to_core_ticks(sc, intr_timer[0])) |
343 	    V_TIMERVALUE1(us_to_core_ticks(sc, intr_timer[1]));
344 	t4_write_reg(sc, A_SGE_TIMER_VALUE_0_AND_1, v);
345 	v = V_TIMERVALUE2(us_to_core_ticks(sc, intr_timer[2])) |
346 	    V_TIMERVALUE3(us_to_core_ticks(sc, intr_timer[3]));
347 	t4_write_reg(sc, A_SGE_TIMER_VALUE_2_AND_3, v);
348 	v = V_TIMERVALUE4(us_to_core_ticks(sc, intr_timer[4])) |
349 	    V_TIMERVALUE5(us_to_core_ticks(sc, intr_timer[5]));
350 	t4_write_reg(sc, A_SGE_TIMER_VALUE_4_AND_5, v);
351 
352 	if (cong_drop == 0) {
353 		m = F_TUNNELCNGDROP0 | F_TUNNELCNGDROP1 | F_TUNNELCNGDROP2 |
354 		    F_TUNNELCNGDROP3;
355 		t4_set_reg_field(sc, A_TP_PARA_REG3, m, 0);
356 	}
357 
358 	/* 4K, 16K, 64K, 256K DDP "page sizes" */
359 	v = V_HPZ0(0) | V_HPZ1(2) | V_HPZ2(4) | V_HPZ3(6);
360 	t4_write_reg(sc, A_ULP_RX_TDDP_PSZ, v);
361 
362 	m = v = F_TDDPTAGTCB;
363 	t4_set_reg_field(sc, A_ULP_RX_CTL, m, v);
364 
365 	m = V_INDICATESIZE(M_INDICATESIZE) | F_REARMDDPOFFSET |
366 	    F_RESETDDPOFFSET;
367 	v = V_INDICATESIZE(indsz) | F_REARMDDPOFFSET | F_RESETDDPOFFSET;
368 	t4_set_reg_field(sc, A_TP_PARA_REG5, m, v);
369 }
370 
371 /*
372  * XXX: driver really should be able to deal with unexpected settings.
373  */
374 int
375 t4_read_chip_settings(struct adapter *sc)
376 {
377 	struct sge *s = &sc->sge;
378 	int i, rc = 0;
379 	uint32_t m, v, r;
380 	uint16_t indsz = min(RX_COPY_THRESHOLD - 1, M_INDICATESIZE);
381 
382 	m = V_PKTSHIFT(M_PKTSHIFT) | F_RXPKTCPLMODE |
383 	    V_INGPADBOUNDARY(M_INGPADBOUNDARY) | F_EGRSTATUSPAGESIZE;
384 	v = V_PKTSHIFT(fl_pktshift) | F_RXPKTCPLMODE |
385 	    V_INGPADBOUNDARY(ilog2(fl_pad) - 5) |
386 	    V_EGRSTATUSPAGESIZE(spg_len == 128);
387 	r = t4_read_reg(sc, A_SGE_CONTROL);
388 	if ((r & m) != v) {
389 		device_printf(sc->dev, "invalid SGE_CONTROL(0x%x)\n", r);
390 		rc = EINVAL;
391 	}
392 
393 	v = V_HOSTPAGESIZEPF0(PAGE_SHIFT - 10) |
394 	    V_HOSTPAGESIZEPF1(PAGE_SHIFT - 10) |
395 	    V_HOSTPAGESIZEPF2(PAGE_SHIFT - 10) |
396 	    V_HOSTPAGESIZEPF3(PAGE_SHIFT - 10) |
397 	    V_HOSTPAGESIZEPF4(PAGE_SHIFT - 10) |
398 	    V_HOSTPAGESIZEPF5(PAGE_SHIFT - 10) |
399 	    V_HOSTPAGESIZEPF6(PAGE_SHIFT - 10) |
400 	    V_HOSTPAGESIZEPF7(PAGE_SHIFT - 10);
401 	r = t4_read_reg(sc, A_SGE_HOST_PAGE_SIZE);
402 	if (r != v) {
403 		device_printf(sc->dev, "invalid SGE_HOST_PAGE_SIZE(0x%x)\n", r);
404 		rc = EINVAL;
405 	}
406 
407 	for (i = 0; i < FL_BUF_SIZES; i++) {
408 		v = t4_read_reg(sc, A_SGE_FL_BUFFER_SIZE0 + (4 * i));
409 		if (v != FL_BUF_SIZE(i)) {
410 			device_printf(sc->dev,
411 			    "invalid SGE_FL_BUFFER_SIZE[%d](0x%x)\n", i, v);
412 			rc = EINVAL;
413 		}
414 	}
415 
416 	r = t4_read_reg(sc, A_SGE_INGRESS_RX_THRESHOLD);
417 	s->counter_val[0] = G_THRESHOLD_0(r);
418 	s->counter_val[1] = G_THRESHOLD_1(r);
419 	s->counter_val[2] = G_THRESHOLD_2(r);
420 	s->counter_val[3] = G_THRESHOLD_3(r);
421 
422 	r = t4_read_reg(sc, A_SGE_TIMER_VALUE_0_AND_1);
423 	s->timer_val[0] = G_TIMERVALUE0(r) / core_ticks_per_usec(sc);
424 	s->timer_val[1] = G_TIMERVALUE1(r) / core_ticks_per_usec(sc);
425 	r = t4_read_reg(sc, A_SGE_TIMER_VALUE_2_AND_3);
426 	s->timer_val[2] = G_TIMERVALUE2(r) / core_ticks_per_usec(sc);
427 	s->timer_val[3] = G_TIMERVALUE3(r) / core_ticks_per_usec(sc);
428 	r = t4_read_reg(sc, A_SGE_TIMER_VALUE_4_AND_5);
429 	s->timer_val[4] = G_TIMERVALUE4(r) / core_ticks_per_usec(sc);
430 	s->timer_val[5] = G_TIMERVALUE5(r) / core_ticks_per_usec(sc);
431 
432 	if (cong_drop == 0) {
433 		m = F_TUNNELCNGDROP0 | F_TUNNELCNGDROP1 | F_TUNNELCNGDROP2 |
434 		    F_TUNNELCNGDROP3;
435 		r = t4_read_reg(sc, A_TP_PARA_REG3);
436 		if (r & m) {
437 			device_printf(sc->dev,
438 			    "invalid TP_PARA_REG3(0x%x)\n", r);
439 			rc = EINVAL;
440 		}
441 	}
442 
443 	v = V_HPZ0(0) | V_HPZ1(2) | V_HPZ2(4) | V_HPZ3(6);
444 	r = t4_read_reg(sc, A_ULP_RX_TDDP_PSZ);
445 	if (r != v) {
446 		device_printf(sc->dev, "invalid ULP_RX_TDDP_PSZ(0x%x)\n", r);
447 		rc = EINVAL;
448 	}
449 
450 	m = v = F_TDDPTAGTCB;
451 	r = t4_read_reg(sc, A_ULP_RX_CTL);
452 	if ((r & m) != v) {
453 		device_printf(sc->dev, "invalid ULP_RX_CTL(0x%x)\n", r);
454 		rc = EINVAL;
455 	}
456 
457 	m = V_INDICATESIZE(M_INDICATESIZE) | F_REARMDDPOFFSET |
458 	    F_RESETDDPOFFSET;
459 	v = V_INDICATESIZE(indsz) | F_REARMDDPOFFSET | F_RESETDDPOFFSET;
460 	r = t4_read_reg(sc, A_TP_PARA_REG5);
461 	if ((r & m) != v) {
462 		device_printf(sc->dev, "invalid TP_PARA_REG5(0x%x)\n", r);
463 		rc = EINVAL;
464 	}
465 
466 	r = t4_read_reg(sc, A_SGE_CONM_CTRL);
467 	s->fl_starve_threshold = G_EGRTHRESHOLD(r) * 2 + 1;
468 
469 	if (is_t5(sc)) {
470 		r = t4_read_reg(sc, A_SGE_EGRESS_QUEUES_PER_PAGE_PF);
471 		r >>= S_QUEUESPERPAGEPF0 +
472 		    (S_QUEUESPERPAGEPF1 - S_QUEUESPERPAGEPF0) * sc->pf;
473 		s->s_qpp = r & M_QUEUESPERPAGEPF0;
474 	}
475 
476 	t4_init_tp_params(sc);
477 
478 	t4_read_mtu_tbl(sc, sc->params.mtus, NULL);
479 	t4_load_mtus(sc, sc->params.mtus, sc->params.a_wnd, sc->params.b_wnd);
480 
481 	return (rc);
482 }
483 
484 int
485 t4_create_dma_tag(struct adapter *sc)
486 {
487 	int rc;
488 
489 	rc = bus_dma_tag_create(bus_get_dma_tag(sc->dev), 1, 0,
490 	    BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL, BUS_SPACE_MAXSIZE,
491 	    BUS_SPACE_UNRESTRICTED, BUS_SPACE_MAXSIZE, BUS_DMA_ALLOCNOW, NULL,
492 	    NULL, &sc->dmat);
493 	if (rc != 0) {
494 		device_printf(sc->dev,
495 		    "failed to create main DMA tag: %d\n", rc);
496 	}
497 
498 	return (rc);
499 }
500 
501 void
502 t4_sge_sysctls(struct adapter *sc, struct sysctl_ctx_list *ctx,
503     struct sysctl_oid_list *children)
504 {
505 
506 	SYSCTL_ADD_INT(ctx, children, OID_AUTO, "fl_pktshift", CTLFLAG_RD,
507 	    NULL, fl_pktshift, "payload DMA offset in rx buffer (bytes)");
508 
509 	SYSCTL_ADD_INT(ctx, children, OID_AUTO, "fl_pad", CTLFLAG_RD,
510 	    NULL, fl_pad, "payload pad boundary (bytes)");
511 
512 	SYSCTL_ADD_INT(ctx, children, OID_AUTO, "spg_len", CTLFLAG_RD,
513 	    NULL, spg_len, "status page size (bytes)");
514 
515 	SYSCTL_ADD_INT(ctx, children, OID_AUTO, "cong_drop", CTLFLAG_RD,
516 	    NULL, cong_drop, "congestion drop setting");
517 }
518 
519 int
520 t4_destroy_dma_tag(struct adapter *sc)
521 {
522 	if (sc->dmat)
523 		bus_dma_tag_destroy(sc->dmat);
524 
525 	return (0);
526 }
527 
528 /*
529  * Allocate and initialize the firmware event queue and the management queue.
530  *
531  * Returns errno on failure.  Resources allocated up to that point may still be
532  * allocated.  Caller is responsible for cleanup in case this function fails.
533  */
534 int
535 t4_setup_adapter_queues(struct adapter *sc)
536 {
537 	int rc;
538 
539 	ADAPTER_LOCK_ASSERT_NOTOWNED(sc);
540 
541 	sysctl_ctx_init(&sc->ctx);
542 	sc->flags |= ADAP_SYSCTL_CTX;
543 
544 	/*
545 	 * Firmware event queue
546 	 */
547 	rc = alloc_fwq(sc);
548 	if (rc != 0)
549 		return (rc);
550 
551 	/*
552 	 * Management queue.  This is just a control queue that uses the fwq as
553 	 * its associated iq.
554 	 */
555 	rc = alloc_mgmtq(sc);
556 
557 	return (rc);
558 }
559 
560 /*
561  * Idempotent
562  */
563 int
564 t4_teardown_adapter_queues(struct adapter *sc)
565 {
566 
567 	ADAPTER_LOCK_ASSERT_NOTOWNED(sc);
568 
569 	/* Do this before freeing the queue */
570 	if (sc->flags & ADAP_SYSCTL_CTX) {
571 		sysctl_ctx_free(&sc->ctx);
572 		sc->flags &= ~ADAP_SYSCTL_CTX;
573 	}
574 
575 	free_mgmtq(sc);
576 	free_fwq(sc);
577 
578 	return (0);
579 }
580 
581 static inline int
582 first_vector(struct port_info *pi)
583 {
584 	struct adapter *sc = pi->adapter;
585 	int rc = T4_EXTRA_INTR, i;
586 
587 	if (sc->intr_count == 1)
588 		return (0);
589 
590 	for_each_port(sc, i) {
591 		struct port_info *p = sc->port[i];
592 
593 		if (i == pi->port_id)
594 			break;
595 
596 #ifdef TCP_OFFLOAD
597 		if (sc->flags & INTR_DIRECT)
598 			rc += p->nrxq + p->nofldrxq;
599 		else
600 			rc += max(p->nrxq, p->nofldrxq);
601 #else
602 		/*
603 		 * Not compiled with offload support and intr_count > 1.  Only
604 		 * NIC queues exist and they'd better be taking direct
605 		 * interrupts.
606 		 */
607 		KASSERT(sc->flags & INTR_DIRECT,
608 		    ("%s: intr_count %d, !INTR_DIRECT", __func__,
609 		    sc->intr_count));
610 
611 		rc += p->nrxq;
612 #endif
613 	}
614 
615 	return (rc);
616 }
617 
618 /*
619  * Given an arbitrary "index," come up with an iq that can be used by other
620  * queues (of this port) for interrupt forwarding, SGE egress updates, etc.
621  * The iq returned is guaranteed to be something that takes direct interrupts.
622  */
623 static struct sge_iq *
624 port_intr_iq(struct port_info *pi, int idx)
625 {
626 	struct adapter *sc = pi->adapter;
627 	struct sge *s = &sc->sge;
628 	struct sge_iq *iq = NULL;
629 
630 	if (sc->intr_count == 1)
631 		return (&sc->sge.fwq);
632 
633 #ifdef TCP_OFFLOAD
634 	if (sc->flags & INTR_DIRECT) {
635 		idx %= pi->nrxq + pi->nofldrxq;
636 
637 		if (idx >= pi->nrxq) {
638 			idx -= pi->nrxq;
639 			iq = &s->ofld_rxq[pi->first_ofld_rxq + idx].iq;
640 		} else
641 			iq = &s->rxq[pi->first_rxq + idx].iq;
642 
643 	} else {
644 		idx %= max(pi->nrxq, pi->nofldrxq);
645 
646 		if (pi->nrxq >= pi->nofldrxq)
647 			iq = &s->rxq[pi->first_rxq + idx].iq;
648 		else
649 			iq = &s->ofld_rxq[pi->first_ofld_rxq + idx].iq;
650 	}
651 #else
652 	/*
653 	 * Not compiled with offload support and intr_count > 1.  Only NIC
654 	 * queues exist and they'd better be taking direct interrupts.
655 	 */
656 	KASSERT(sc->flags & INTR_DIRECT,
657 	    ("%s: intr_count %d, !INTR_DIRECT", __func__, sc->intr_count));
658 
659 	idx %= pi->nrxq;
660 	iq = &s->rxq[pi->first_rxq + idx].iq;
661 #endif
662 
663 	KASSERT(iq->flags & IQ_INTR, ("%s: EDOOFUS", __func__));
664 	return (iq);
665 }
666 
667 static inline int
668 mtu_to_bufsize(int mtu)
669 {
670 	int bufsize;
671 
672 	/* large enough for a frame even when VLAN extraction is disabled */
673 	bufsize = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN + mtu;
674 	bufsize = roundup2(bufsize + fl_pktshift, fl_pad);
675 
676 	return (bufsize);
677 }
678 
679 #ifdef TCP_OFFLOAD
680 static inline int
681 mtu_to_bufsize_toe(struct adapter *sc, int mtu)
682 {
683 
684 	if (sc->tt.rx_coalesce)
685 		return (G_RXCOALESCESIZE(t4_read_reg(sc, A_TP_PARA_REG2)));
686 
687 	return (mtu);
688 }
689 #endif
690 
691 int
692 t4_setup_port_queues(struct port_info *pi)
693 {
694 	int rc = 0, i, j, intr_idx, iqid;
695 	struct sge_rxq *rxq;
696 	struct sge_txq *txq;
697 	struct sge_wrq *ctrlq;
698 #ifdef TCP_OFFLOAD
699 	struct sge_ofld_rxq *ofld_rxq;
700 	struct sge_wrq *ofld_txq;
701 	struct sysctl_oid *oid2 = NULL;
702 #endif
703 	char name[16];
704 	struct adapter *sc = pi->adapter;
705 	struct ifnet *ifp = pi->ifp;
706 	struct sysctl_oid *oid = device_get_sysctl_tree(pi->dev);
707 	struct sysctl_oid_list *children = SYSCTL_CHILDREN(oid);
708 	int bufsize;
709 
710 	oid = SYSCTL_ADD_NODE(&pi->ctx, children, OID_AUTO, "rxq", CTLFLAG_RD,
711 	    NULL, "rx queues");
712 
713 #ifdef TCP_OFFLOAD
714 	if (is_offload(sc)) {
715 		oid2 = SYSCTL_ADD_NODE(&pi->ctx, children, OID_AUTO, "ofld_rxq",
716 		    CTLFLAG_RD, NULL,
717 		    "rx queues for offloaded TCP connections");
718 	}
719 #endif
720 
721 	/* Interrupt vector to start from (when using multiple vectors) */
722 	intr_idx = first_vector(pi);
723 
724 	/*
725 	 * First pass over all rx queues (NIC and TOE):
726 	 * a) initialize iq and fl
727 	 * b) allocate queue iff it will take direct interrupts.
728 	 */
729 	bufsize = mtu_to_bufsize(ifp->if_mtu);
730 	for_each_rxq(pi, i, rxq) {
731 
732 		init_iq(&rxq->iq, sc, pi->tmr_idx, pi->pktc_idx, pi->qsize_rxq,
733 		    RX_IQ_ESIZE);
734 
735 		snprintf(name, sizeof(name), "%s rxq%d-fl",
736 		    device_get_nameunit(pi->dev), i);
737 		init_fl(&rxq->fl, pi->qsize_rxq / 8, bufsize, name);
738 
739 		if (sc->flags & INTR_DIRECT
740 #ifdef TCP_OFFLOAD
741 		    || (sc->intr_count > 1 && pi->nrxq >= pi->nofldrxq)
742 #endif
743 		   ) {
744 			rxq->iq.flags |= IQ_INTR;
745 			rc = alloc_rxq(pi, rxq, intr_idx, i, oid);
746 			if (rc != 0)
747 				goto done;
748 			intr_idx++;
749 		}
750 	}
751 
752 #ifdef TCP_OFFLOAD
753 	bufsize = mtu_to_bufsize_toe(sc, ifp->if_mtu);
754 	for_each_ofld_rxq(pi, i, ofld_rxq) {
755 
756 		init_iq(&ofld_rxq->iq, sc, pi->tmr_idx, pi->pktc_idx,
757 		    pi->qsize_rxq, RX_IQ_ESIZE);
758 
759 		snprintf(name, sizeof(name), "%s ofld_rxq%d-fl",
760 		    device_get_nameunit(pi->dev), i);
761 		init_fl(&ofld_rxq->fl, pi->qsize_rxq / 8, bufsize, name);
762 
763 		if (sc->flags & INTR_DIRECT ||
764 		    (sc->intr_count > 1 && pi->nofldrxq > pi->nrxq)) {
765 			ofld_rxq->iq.flags |= IQ_INTR;
766 			rc = alloc_ofld_rxq(pi, ofld_rxq, intr_idx, i, oid2);
767 			if (rc != 0)
768 				goto done;
769 			intr_idx++;
770 		}
771 	}
772 #endif
773 
774 	/*
775 	 * Second pass over all rx queues (NIC and TOE).  The queues forwarding
776 	 * their interrupts are allocated now.
777 	 */
778 	j = 0;
779 	for_each_rxq(pi, i, rxq) {
780 		if (rxq->iq.flags & IQ_INTR)
781 			continue;
782 
783 		intr_idx = port_intr_iq(pi, j)->abs_id;
784 
785 		rc = alloc_rxq(pi, rxq, intr_idx, i, oid);
786 		if (rc != 0)
787 			goto done;
788 		j++;
789 	}
790 
791 #ifdef TCP_OFFLOAD
792 	for_each_ofld_rxq(pi, i, ofld_rxq) {
793 		if (ofld_rxq->iq.flags & IQ_INTR)
794 			continue;
795 
796 		intr_idx = port_intr_iq(pi, j)->abs_id;
797 
798 		rc = alloc_ofld_rxq(pi, ofld_rxq, intr_idx, i, oid2);
799 		if (rc != 0)
800 			goto done;
801 		j++;
802 	}
803 #endif
804 
805 	/*
806 	 * Now the tx queues.  Only one pass needed.
807 	 */
808 	oid = SYSCTL_ADD_NODE(&pi->ctx, children, OID_AUTO, "txq", CTLFLAG_RD,
809 	    NULL, "tx queues");
810 	j = 0;
811 	for_each_txq(pi, i, txq) {
812 		uint16_t iqid;
813 
814 		iqid = port_intr_iq(pi, j)->cntxt_id;
815 
816 		snprintf(name, sizeof(name), "%s txq%d",
817 		    device_get_nameunit(pi->dev), i);
818 		init_eq(&txq->eq, EQ_ETH, pi->qsize_txq, pi->tx_chan, iqid,
819 		    name);
820 
821 		rc = alloc_txq(pi, txq, i, oid);
822 		if (rc != 0)
823 			goto done;
824 		j++;
825 	}
826 
827 #ifdef TCP_OFFLOAD
828 	oid = SYSCTL_ADD_NODE(&pi->ctx, children, OID_AUTO, "ofld_txq",
829 	    CTLFLAG_RD, NULL, "tx queues for offloaded TCP connections");
830 	for_each_ofld_txq(pi, i, ofld_txq) {
831 		uint16_t iqid;
832 
833 		iqid = port_intr_iq(pi, j)->cntxt_id;
834 
835 		snprintf(name, sizeof(name), "%s ofld_txq%d",
836 		    device_get_nameunit(pi->dev), i);
837 		init_eq(&ofld_txq->eq, EQ_OFLD, pi->qsize_txq, pi->tx_chan,
838 		    iqid, name);
839 
840 		snprintf(name, sizeof(name), "%d", i);
841 		oid2 = SYSCTL_ADD_NODE(&pi->ctx, SYSCTL_CHILDREN(oid), OID_AUTO,
842 		    name, CTLFLAG_RD, NULL, "offload tx queue");
843 
844 		rc = alloc_wrq(sc, pi, ofld_txq, oid2);
845 		if (rc != 0)
846 			goto done;
847 		j++;
848 	}
849 #endif
850 
851 	/*
852 	 * Finally, the control queue.
853 	 */
854 	oid = SYSCTL_ADD_NODE(&pi->ctx, children, OID_AUTO, "ctrlq", CTLFLAG_RD,
855 	    NULL, "ctrl queue");
856 	ctrlq = &sc->sge.ctrlq[pi->port_id];
857 	iqid = port_intr_iq(pi, 0)->cntxt_id;
858 	snprintf(name, sizeof(name), "%s ctrlq", device_get_nameunit(pi->dev));
859 	init_eq(&ctrlq->eq, EQ_CTRL, CTRL_EQ_QSIZE, pi->tx_chan, iqid, name);
860 	rc = alloc_wrq(sc, pi, ctrlq, oid);
861 
862 done:
863 	if (rc)
864 		t4_teardown_port_queues(pi);
865 
866 	return (rc);
867 }
868 
869 /*
870  * Idempotent
871  */
872 int
873 t4_teardown_port_queues(struct port_info *pi)
874 {
875 	int i;
876 	struct adapter *sc = pi->adapter;
877 	struct sge_rxq *rxq;
878 	struct sge_txq *txq;
879 #ifdef TCP_OFFLOAD
880 	struct sge_ofld_rxq *ofld_rxq;
881 	struct sge_wrq *ofld_txq;
882 #endif
883 
884 	/* Do this before freeing the queues */
885 	if (pi->flags & PORT_SYSCTL_CTX) {
886 		sysctl_ctx_free(&pi->ctx);
887 		pi->flags &= ~PORT_SYSCTL_CTX;
888 	}
889 
890 	/*
891 	 * Take down all the tx queues first, as they reference the rx queues
892 	 * (for egress updates, etc.).
893 	 */
894 
895 	free_wrq(sc, &sc->sge.ctrlq[pi->port_id]);
896 
897 	for_each_txq(pi, i, txq) {
898 		free_txq(pi, txq);
899 	}
900 
901 #ifdef TCP_OFFLOAD
902 	for_each_ofld_txq(pi, i, ofld_txq) {
903 		free_wrq(sc, ofld_txq);
904 	}
905 #endif
906 
907 	/*
908 	 * Then take down the rx queues that forward their interrupts, as they
909 	 * reference other rx queues.
910 	 */
911 
912 	for_each_rxq(pi, i, rxq) {
913 		if ((rxq->iq.flags & IQ_INTR) == 0)
914 			free_rxq(pi, rxq);
915 	}
916 
917 #ifdef TCP_OFFLOAD
918 	for_each_ofld_rxq(pi, i, ofld_rxq) {
919 		if ((ofld_rxq->iq.flags & IQ_INTR) == 0)
920 			free_ofld_rxq(pi, ofld_rxq);
921 	}
922 #endif
923 
924 	/*
925 	 * Then take down the rx queues that take direct interrupts.
926 	 */
927 
928 	for_each_rxq(pi, i, rxq) {
929 		if (rxq->iq.flags & IQ_INTR)
930 			free_rxq(pi, rxq);
931 	}
932 
933 #ifdef TCP_OFFLOAD
934 	for_each_ofld_rxq(pi, i, ofld_rxq) {
935 		if (ofld_rxq->iq.flags & IQ_INTR)
936 			free_ofld_rxq(pi, ofld_rxq);
937 	}
938 #endif
939 
940 	return (0);
941 }
942 
943 /*
944  * Deals with errors and the firmware event queue.  All data rx queues forward
945  * their interrupt to the firmware event queue.
946  */
947 void
948 t4_intr_all(void *arg)
949 {
950 	struct adapter *sc = arg;
951 	struct sge_iq *fwq = &sc->sge.fwq;
952 
953 	t4_intr_err(arg);
954 	if (atomic_cmpset_int(&fwq->state, IQS_IDLE, IQS_BUSY)) {
955 		service_iq(fwq, 0);
956 		atomic_cmpset_int(&fwq->state, IQS_BUSY, IQS_IDLE);
957 	}
958 }
959 
960 /* Deals with error interrupts */
961 void
962 t4_intr_err(void *arg)
963 {
964 	struct adapter *sc = arg;
965 
966 	t4_write_reg(sc, MYPF_REG(A_PCIE_PF_CLI), 0);
967 	t4_slow_intr_handler(sc);
968 }
969 
970 void
971 t4_intr_evt(void *arg)
972 {
973 	struct sge_iq *iq = arg;
974 
975 	if (atomic_cmpset_int(&iq->state, IQS_IDLE, IQS_BUSY)) {
976 		service_iq(iq, 0);
977 		atomic_cmpset_int(&iq->state, IQS_BUSY, IQS_IDLE);
978 	}
979 }
980 
981 void
982 t4_intr(void *arg)
983 {
984 	struct sge_iq *iq = arg;
985 
986 	if (atomic_cmpset_int(&iq->state, IQS_IDLE, IQS_BUSY)) {
987 		service_iq(iq, 0);
988 		atomic_cmpset_int(&iq->state, IQS_BUSY, IQS_IDLE);
989 	}
990 }
991 
992 /*
993  * Deals with anything and everything on the given ingress queue.
994  */
995 static int
996 service_iq(struct sge_iq *iq, int budget)
997 {
998 	struct sge_iq *q;
999 	struct sge_rxq *rxq = iq_to_rxq(iq);	/* Use iff iq is part of rxq */
1000 	struct sge_fl *fl = &rxq->fl;		/* Use iff IQ_HAS_FL */
1001 	struct adapter *sc = iq->adapter;
1002 	struct rsp_ctrl *ctrl;
1003 	const struct rss_header *rss;
1004 	int ndescs = 0, limit, fl_bufs_used = 0;
1005 	int rsp_type;
1006 	uint32_t lq;
1007 	struct mbuf *m0;
1008 	STAILQ_HEAD(, sge_iq) iql = STAILQ_HEAD_INITIALIZER(iql);
1009 
1010 	limit = budget ? budget : iq->qsize / 8;
1011 
1012 	KASSERT(iq->state == IQS_BUSY, ("%s: iq %p not BUSY", __func__, iq));
1013 
1014 	/*
1015 	 * We always come back and check the descriptor ring for new indirect
1016 	 * interrupts and other responses after running a single handler.
1017 	 */
1018 	for (;;) {
1019 		while (is_new_response(iq, &ctrl)) {
1020 
1021 			rmb();
1022 
1023 			m0 = NULL;
1024 			rsp_type = G_RSPD_TYPE(ctrl->u.type_gen);
1025 			lq = be32toh(ctrl->pldbuflen_qid);
1026 			rss = (const void *)iq->cdesc;
1027 
1028 			switch (rsp_type) {
1029 			case X_RSPD_TYPE_FLBUF:
1030 
1031 				KASSERT(iq->flags & IQ_HAS_FL,
1032 				    ("%s: data for an iq (%p) with no freelist",
1033 				    __func__, iq));
1034 
1035 				m0 = get_fl_payload(sc, fl, lq, &fl_bufs_used);
1036 #ifdef T4_PKT_TIMESTAMP
1037 				/*
1038 				 * 60 bit timestamp for the payload is
1039 				 * *(uint64_t *)m0->m_pktdat.  Note that it is
1040 				 * in the leading free-space in the mbuf.  The
1041 				 * kernel can clobber it during a pullup,
1042 				 * m_copymdata, etc.  You need to make sure that
1043 				 * the mbuf reaches you unmolested if you care
1044 				 * about the timestamp.
1045 				 */
1046 				*(uint64_t *)m0->m_pktdat =
1047 				    be64toh(ctrl->u.last_flit) &
1048 				    0xfffffffffffffff;
1049 #endif
1050 
1051 				/* fall through */
1052 
1053 			case X_RSPD_TYPE_CPL:
1054 				KASSERT(rss->opcode < NUM_CPL_CMDS,
1055 				    ("%s: bad opcode %02x.", __func__,
1056 				    rss->opcode));
1057 				sc->cpl_handler[rss->opcode](iq, rss, m0);
1058 				break;
1059 
1060 			case X_RSPD_TYPE_INTR:
1061 
1062 				/*
1063 				 * Interrupts should be forwarded only to queues
1064 				 * that are not forwarding their interrupts.
1065 				 * This means service_iq can recurse but only 1
1066 				 * level deep.
1067 				 */
1068 				KASSERT(budget == 0,
1069 				    ("%s: budget %u, rsp_type %u", __func__,
1070 				    budget, rsp_type));
1071 
1072 				q = sc->sge.iqmap[lq - sc->sge.iq_start];
1073 				if (atomic_cmpset_int(&q->state, IQS_IDLE,
1074 				    IQS_BUSY)) {
1075 					if (service_iq(q, q->qsize / 8) == 0) {
1076 						atomic_cmpset_int(&q->state,
1077 						    IQS_BUSY, IQS_IDLE);
1078 					} else {
1079 						STAILQ_INSERT_TAIL(&iql, q,
1080 						    link);
1081 					}
1082 				}
1083 				break;
1084 
1085 			default:
1086 				sc->an_handler(iq, ctrl);
1087 				break;
1088 			}
1089 
1090 			iq_next(iq);
1091 			if (++ndescs == limit) {
1092 				t4_write_reg(sc, MYPF_REG(A_SGE_PF_GTS),
1093 				    V_CIDXINC(ndescs) |
1094 				    V_INGRESSQID(iq->cntxt_id) |
1095 				    V_SEINTARM(V_QINTR_TIMER_IDX(X_TIMERREG_UPDATE_CIDX)));
1096 				ndescs = 0;
1097 
1098 				if (fl_bufs_used > 0) {
1099 					FL_LOCK(fl);
1100 					fl->needed += fl_bufs_used;
1101 					refill_fl(sc, fl, fl->cap / 8);
1102 					FL_UNLOCK(fl);
1103 					fl_bufs_used = 0;
1104 				}
1105 
1106 				if (budget)
1107 					return (EINPROGRESS);
1108 			}
1109 		}
1110 
1111 		if (STAILQ_EMPTY(&iql))
1112 			break;
1113 
1114 		/*
1115 		 * Process the head only, and send it to the back of the list if
1116 		 * it's still not done.
1117 		 */
1118 		q = STAILQ_FIRST(&iql);
1119 		STAILQ_REMOVE_HEAD(&iql, link);
1120 		if (service_iq(q, q->qsize / 8) == 0)
1121 			atomic_cmpset_int(&q->state, IQS_BUSY, IQS_IDLE);
1122 		else
1123 			STAILQ_INSERT_TAIL(&iql, q, link);
1124 	}
1125 
1126 #if defined(INET) || defined(INET6)
1127 	if (iq->flags & IQ_LRO_ENABLED) {
1128 		struct lro_ctrl *lro = &rxq->lro;
1129 		struct lro_entry *l;
1130 
1131 		while (!SLIST_EMPTY(&lro->lro_active)) {
1132 			l = SLIST_FIRST(&lro->lro_active);
1133 			SLIST_REMOVE_HEAD(&lro->lro_active, next);
1134 			tcp_lro_flush(lro, l);
1135 		}
1136 	}
1137 #endif
1138 
1139 	t4_write_reg(sc, MYPF_REG(A_SGE_PF_GTS), V_CIDXINC(ndescs) |
1140 	    V_INGRESSQID((u32)iq->cntxt_id) | V_SEINTARM(iq->intr_params));
1141 
1142 	if (iq->flags & IQ_HAS_FL) {
1143 		int starved;
1144 
1145 		FL_LOCK(fl);
1146 		fl->needed += fl_bufs_used;
1147 		starved = refill_fl(sc, fl, fl->cap / 4);
1148 		FL_UNLOCK(fl);
1149 		if (__predict_false(starved != 0))
1150 			add_fl_to_sfl(sc, fl);
1151 	}
1152 
1153 	return (0);
1154 }
1155 
1156 static struct mbuf *
1157 get_fl_payload(struct adapter *sc, struct sge_fl *fl, uint32_t len_newbuf,
1158     int *fl_bufs_used)
1159 {
1160 	struct mbuf *m0, *m;
1161 	struct fl_sdesc *sd = &fl->sdesc[fl->cidx];
1162 	unsigned int nbuf, len;
1163 
1164 	/*
1165 	 * No assertion for the fl lock because we don't need it.  This routine
1166 	 * is called only from the rx interrupt handler and it only updates
1167 	 * fl->cidx.  (Contrast that with fl->pidx/fl->needed which could be
1168 	 * updated in the rx interrupt handler or the starvation helper routine.
1169 	 * That's why code that manipulates fl->pidx/fl->needed needs the fl
1170 	 * lock but this routine does not).
1171 	 */
1172 
1173 	if (__predict_false((len_newbuf & F_RSPD_NEWBUF) == 0))
1174 		panic("%s: cannot handle packed frames", __func__);
1175 	len = G_RSPD_LEN(len_newbuf);
1176 
1177 	m0 = sd->m;
1178 	sd->m = NULL;	/* consumed */
1179 
1180 	bus_dmamap_sync(fl->tag[sd->tag_idx], sd->map, BUS_DMASYNC_POSTREAD);
1181 	m_init(m0, NULL, 0, M_NOWAIT, MT_DATA, M_PKTHDR);
1182 #ifdef T4_PKT_TIMESTAMP
1183 	/* Leave room for a timestamp */
1184 	m0->m_data += 8;
1185 #endif
1186 
1187 	if (len < RX_COPY_THRESHOLD) {
1188 		/* copy data to mbuf, buffer will be recycled */
1189 		bcopy(sd->cl, mtod(m0, caddr_t), len);
1190 		m0->m_len = len;
1191 	} else {
1192 		bus_dmamap_unload(fl->tag[sd->tag_idx], sd->map);
1193 		m_cljset(m0, sd->cl, FL_BUF_TYPE(sd->tag_idx));
1194 		sd->cl = NULL;	/* consumed */
1195 		m0->m_len = min(len, FL_BUF_SIZE(sd->tag_idx));
1196 	}
1197 	m0->m_pkthdr.len = len;
1198 
1199 	sd++;
1200 	if (__predict_false(++fl->cidx == fl->cap)) {
1201 		sd = fl->sdesc;
1202 		fl->cidx = 0;
1203 	}
1204 
1205 	m = m0;
1206 	len -= m->m_len;
1207 	nbuf = 1;	/* # of fl buffers used */
1208 
1209 	while (len > 0) {
1210 		m->m_next = sd->m;
1211 		sd->m = NULL;	/* consumed */
1212 		m = m->m_next;
1213 
1214 		bus_dmamap_sync(fl->tag[sd->tag_idx], sd->map,
1215 		    BUS_DMASYNC_POSTREAD);
1216 
1217 		m_init(m, NULL, 0, M_NOWAIT, MT_DATA, 0);
1218 		if (len <= MLEN) {
1219 			bcopy(sd->cl, mtod(m, caddr_t), len);
1220 			m->m_len = len;
1221 		} else {
1222 			bus_dmamap_unload(fl->tag[sd->tag_idx],
1223 			    sd->map);
1224 			m_cljset(m, sd->cl, FL_BUF_TYPE(sd->tag_idx));
1225 			sd->cl = NULL;	/* consumed */
1226 			m->m_len = min(len, FL_BUF_SIZE(sd->tag_idx));
1227 		}
1228 
1229 		sd++;
1230 		if (__predict_false(++fl->cidx == fl->cap)) {
1231 			sd = fl->sdesc;
1232 			fl->cidx = 0;
1233 		}
1234 
1235 		len -= m->m_len;
1236 		nbuf++;
1237 	}
1238 
1239 	(*fl_bufs_used) += nbuf;
1240 
1241 	return (m0);
1242 }
1243 
1244 static int
1245 t4_eth_rx(struct sge_iq *iq, const struct rss_header *rss, struct mbuf *m0)
1246 {
1247 	struct sge_rxq *rxq = iq_to_rxq(iq);
1248 	struct ifnet *ifp = rxq->ifp;
1249 	const struct cpl_rx_pkt *cpl = (const void *)(rss + 1);
1250 #if defined(INET) || defined(INET6)
1251 	struct lro_ctrl *lro = &rxq->lro;
1252 #endif
1253 
1254 	KASSERT(m0 != NULL, ("%s: no payload with opcode %02x", __func__,
1255 	    rss->opcode));
1256 
1257 	m0->m_pkthdr.len -= fl_pktshift;
1258 	m0->m_len -= fl_pktshift;
1259 	m0->m_data += fl_pktshift;
1260 
1261 	m0->m_pkthdr.rcvif = ifp;
1262 	m0->m_flags |= M_FLOWID;
1263 	m0->m_pkthdr.flowid = rss->hash_val;
1264 
1265 	if (cpl->csum_calc && !cpl->err_vec) {
1266 		if (ifp->if_capenable & IFCAP_RXCSUM &&
1267 		    cpl->l2info & htobe32(F_RXF_IP)) {
1268 			m0->m_pkthdr.csum_flags = (CSUM_IP_CHECKED |
1269 			    CSUM_IP_VALID | CSUM_DATA_VALID | CSUM_PSEUDO_HDR);
1270 			rxq->rxcsum++;
1271 		} else if (ifp->if_capenable & IFCAP_RXCSUM_IPV6 &&
1272 		    cpl->l2info & htobe32(F_RXF_IP6)) {
1273 			m0->m_pkthdr.csum_flags = (CSUM_DATA_VALID_IPV6 |
1274 			    CSUM_PSEUDO_HDR);
1275 			rxq->rxcsum++;
1276 		}
1277 
1278 		if (__predict_false(cpl->ip_frag))
1279 			m0->m_pkthdr.csum_data = be16toh(cpl->csum);
1280 		else
1281 			m0->m_pkthdr.csum_data = 0xffff;
1282 	}
1283 
1284 	if (cpl->vlan_ex) {
1285 		m0->m_pkthdr.ether_vtag = be16toh(cpl->vlan);
1286 		m0->m_flags |= M_VLANTAG;
1287 		rxq->vlan_extraction++;
1288 	}
1289 
1290 #if defined(INET) || defined(INET6)
1291 	if (cpl->l2info & htobe32(F_RXF_LRO) &&
1292 	    iq->flags & IQ_LRO_ENABLED &&
1293 	    tcp_lro_rx(lro, m0, 0) == 0) {
1294 		/* queued for LRO */
1295 	} else
1296 #endif
1297 	ifp->if_input(ifp, m0);
1298 
1299 	return (0);
1300 }
1301 
1302 /*
1303  * Doesn't fail.  Holds on to work requests it can't send right away.
1304  */
1305 void
1306 t4_wrq_tx_locked(struct adapter *sc, struct sge_wrq *wrq, struct wrqe *wr)
1307 {
1308 	struct sge_eq *eq = &wrq->eq;
1309 	int can_reclaim;
1310 	caddr_t dst;
1311 
1312 	TXQ_LOCK_ASSERT_OWNED(wrq);
1313 #ifdef TCP_OFFLOAD
1314 	KASSERT((eq->flags & EQ_TYPEMASK) == EQ_OFLD ||
1315 	    (eq->flags & EQ_TYPEMASK) == EQ_CTRL,
1316 	    ("%s: eq type %d", __func__, eq->flags & EQ_TYPEMASK));
1317 #else
1318 	KASSERT((eq->flags & EQ_TYPEMASK) == EQ_CTRL,
1319 	    ("%s: eq type %d", __func__, eq->flags & EQ_TYPEMASK));
1320 #endif
1321 
1322 	if (__predict_true(wr != NULL))
1323 		STAILQ_INSERT_TAIL(&wrq->wr_list, wr, link);
1324 
1325 	can_reclaim = reclaimable(eq);
1326 	if (__predict_false(eq->flags & EQ_STALLED)) {
1327 		if (can_reclaim < tx_resume_threshold(eq))
1328 			return;
1329 		eq->flags &= ~EQ_STALLED;
1330 		eq->unstalled++;
1331 	}
1332 	eq->cidx += can_reclaim;
1333 	eq->avail += can_reclaim;
1334 	if (__predict_false(eq->cidx >= eq->cap))
1335 		eq->cidx -= eq->cap;
1336 
1337 	while ((wr = STAILQ_FIRST(&wrq->wr_list)) != NULL) {
1338 		int ndesc;
1339 
1340 		if (__predict_false(wr->wr_len < 0 ||
1341 		    wr->wr_len > SGE_MAX_WR_LEN || (wr->wr_len & 0x7))) {
1342 
1343 #ifdef INVARIANTS
1344 			panic("%s: work request with length %d", __func__,
1345 			    wr->wr_len);
1346 #endif
1347 #ifdef KDB
1348 			kdb_backtrace();
1349 #endif
1350 			log(LOG_ERR, "%s: %s work request with length %d",
1351 			    device_get_nameunit(sc->dev), __func__, wr->wr_len);
1352 			STAILQ_REMOVE_HEAD(&wrq->wr_list, link);
1353 			free_wrqe(wr);
1354 			continue;
1355 		}
1356 
1357 		ndesc = howmany(wr->wr_len, EQ_ESIZE);
1358 		if (eq->avail < ndesc) {
1359 			wrq->no_desc++;
1360 			break;
1361 		}
1362 
1363 		dst = (void *)&eq->desc[eq->pidx];
1364 		copy_to_txd(eq, wrtod(wr), &dst, wr->wr_len);
1365 
1366 		eq->pidx += ndesc;
1367 		eq->avail -= ndesc;
1368 		if (__predict_false(eq->pidx >= eq->cap))
1369 			eq->pidx -= eq->cap;
1370 
1371 		eq->pending += ndesc;
1372 		if (eq->pending >= 8)
1373 			ring_eq_db(sc, eq);
1374 
1375 		wrq->tx_wrs++;
1376 		STAILQ_REMOVE_HEAD(&wrq->wr_list, link);
1377 		free_wrqe(wr);
1378 
1379 		if (eq->avail < 8) {
1380 			can_reclaim = reclaimable(eq);
1381 			eq->cidx += can_reclaim;
1382 			eq->avail += can_reclaim;
1383 			if (__predict_false(eq->cidx >= eq->cap))
1384 				eq->cidx -= eq->cap;
1385 		}
1386 	}
1387 
1388 	if (eq->pending)
1389 		ring_eq_db(sc, eq);
1390 
1391 	if (wr != NULL) {
1392 		eq->flags |= EQ_STALLED;
1393 		if (callout_pending(&eq->tx_callout) == 0)
1394 			callout_reset(&eq->tx_callout, 1, t4_tx_callout, eq);
1395 	}
1396 }
1397 
1398 /* Per-packet header in a coalesced tx WR, before the SGL starts (in flits) */
1399 #define TXPKTS_PKT_HDR ((\
1400     sizeof(struct ulp_txpkt) + \
1401     sizeof(struct ulptx_idata) + \
1402     sizeof(struct cpl_tx_pkt_core) \
1403     ) / 8)
1404 
1405 /* Header of a coalesced tx WR, before SGL of first packet (in flits) */
1406 #define TXPKTS_WR_HDR (\
1407     sizeof(struct fw_eth_tx_pkts_wr) / 8 + \
1408     TXPKTS_PKT_HDR)
1409 
1410 /* Header of a tx WR, before SGL of first packet (in flits) */
1411 #define TXPKT_WR_HDR ((\
1412     sizeof(struct fw_eth_tx_pkt_wr) + \
1413     sizeof(struct cpl_tx_pkt_core) \
1414     ) / 8 )
1415 
1416 /* Header of a tx LSO WR, before SGL of first packet (in flits) */
1417 #define TXPKT_LSO_WR_HDR ((\
1418     sizeof(struct fw_eth_tx_pkt_wr) + \
1419     sizeof(struct cpl_tx_pkt_lso_core) + \
1420     sizeof(struct cpl_tx_pkt_core) \
1421     ) / 8 )
1422 
1423 int
1424 t4_eth_tx(struct ifnet *ifp, struct sge_txq *txq, struct mbuf *m)
1425 {
1426 	struct port_info *pi = (void *)ifp->if_softc;
1427 	struct adapter *sc = pi->adapter;
1428 	struct sge_eq *eq = &txq->eq;
1429 	struct buf_ring *br = txq->br;
1430 	struct mbuf *next;
1431 	int rc, coalescing, can_reclaim;
1432 	struct txpkts txpkts;
1433 	struct sgl sgl;
1434 
1435 	TXQ_LOCK_ASSERT_OWNED(txq);
1436 	KASSERT(m, ("%s: called with nothing to do.", __func__));
1437 	KASSERT((eq->flags & EQ_TYPEMASK) == EQ_ETH,
1438 	    ("%s: eq type %d", __func__, eq->flags & EQ_TYPEMASK));
1439 
1440 	prefetch(&eq->desc[eq->pidx]);
1441 	prefetch(&txq->sdesc[eq->pidx]);
1442 
1443 	txpkts.npkt = 0;/* indicates there's nothing in txpkts */
1444 	coalescing = 0;
1445 
1446 	can_reclaim = reclaimable(eq);
1447 	if (__predict_false(eq->flags & EQ_STALLED)) {
1448 		if (can_reclaim < tx_resume_threshold(eq)) {
1449 			txq->m = m;
1450 			return (0);
1451 		}
1452 		eq->flags &= ~EQ_STALLED;
1453 		eq->unstalled++;
1454 	}
1455 
1456 	if (__predict_false(eq->flags & EQ_DOOMED)) {
1457 		m_freem(m);
1458 		while ((m = buf_ring_dequeue_sc(txq->br)) != NULL)
1459 			m_freem(m);
1460 		return (ENETDOWN);
1461 	}
1462 
1463 	if (eq->avail < 8 && can_reclaim)
1464 		reclaim_tx_descs(txq, can_reclaim, 32);
1465 
1466 	for (; m; m = next ? next : drbr_dequeue(ifp, br)) {
1467 
1468 		if (eq->avail < 8)
1469 			break;
1470 
1471 		next = m->m_nextpkt;
1472 		m->m_nextpkt = NULL;
1473 
1474 		if (next || buf_ring_peek(br))
1475 			coalescing = 1;
1476 
1477 		rc = get_pkt_sgl(txq, &m, &sgl, coalescing);
1478 		if (rc != 0) {
1479 			if (rc == ENOMEM) {
1480 
1481 				/* Short of resources, suspend tx */
1482 
1483 				m->m_nextpkt = next;
1484 				break;
1485 			}
1486 
1487 			/*
1488 			 * Unrecoverable error for this packet, throw it away
1489 			 * and move on to the next.  get_pkt_sgl may already
1490 			 * have freed m (it will be NULL in that case and the
1491 			 * m_freem here is still safe).
1492 			 */
1493 
1494 			m_freem(m);
1495 			continue;
1496 		}
1497 
1498 		if (coalescing &&
1499 		    add_to_txpkts(pi, txq, &txpkts, m, &sgl) == 0) {
1500 
1501 			/* Successfully absorbed into txpkts */
1502 
1503 			write_ulp_cpl_sgl(pi, txq, &txpkts, m, &sgl);
1504 			goto doorbell;
1505 		}
1506 
1507 		/*
1508 		 * We weren't coalescing to begin with, or current frame could
1509 		 * not be coalesced (add_to_txpkts flushes txpkts if a frame
1510 		 * given to it can't be coalesced).  Either way there should be
1511 		 * nothing in txpkts.
1512 		 */
1513 		KASSERT(txpkts.npkt == 0,
1514 		    ("%s: txpkts not empty: %d", __func__, txpkts.npkt));
1515 
1516 		/* We're sending out individual packets now */
1517 		coalescing = 0;
1518 
1519 		if (eq->avail < 8)
1520 			reclaim_tx_descs(txq, 0, 8);
1521 		rc = write_txpkt_wr(pi, txq, m, &sgl);
1522 		if (rc != 0) {
1523 
1524 			/* Short of hardware descriptors, suspend tx */
1525 
1526 			/*
1527 			 * This is an unlikely but expensive failure.  We've
1528 			 * done all the hard work (DMA mappings etc.) and now we
1529 			 * can't send out the packet.  What's worse, we have to
1530 			 * spend even more time freeing up everything in sgl.
1531 			 */
1532 			txq->no_desc++;
1533 			free_pkt_sgl(txq, &sgl);
1534 
1535 			m->m_nextpkt = next;
1536 			break;
1537 		}
1538 
1539 		ETHER_BPF_MTAP(ifp, m);
1540 		if (sgl.nsegs == 0)
1541 			m_freem(m);
1542 doorbell:
1543 		if (eq->pending >= 8)
1544 			ring_eq_db(sc, eq);
1545 
1546 		can_reclaim = reclaimable(eq);
1547 		if (can_reclaim >= 32)
1548 			reclaim_tx_descs(txq, can_reclaim, 64);
1549 	}
1550 
1551 	if (txpkts.npkt > 0)
1552 		write_txpkts_wr(txq, &txpkts);
1553 
1554 	/*
1555 	 * m not NULL means there was an error but we haven't thrown it away.
1556 	 * This can happen when we're short of tx descriptors (no_desc) or maybe
1557 	 * even DMA maps (no_dmamap).  Either way, a credit flush and reclaim
1558 	 * will get things going again.
1559 	 */
1560 	if (m && !(eq->flags & EQ_CRFLUSHED)) {
1561 		struct tx_sdesc *txsd = &txq->sdesc[eq->pidx];
1562 
1563 		/*
1564 		 * If EQ_CRFLUSHED is not set then we know we have at least one
1565 		 * available descriptor because any WR that reduces eq->avail to
1566 		 * 0 also sets EQ_CRFLUSHED.
1567 		 */
1568 		KASSERT(eq->avail > 0, ("%s: no space for eqflush.", __func__));
1569 
1570 		txsd->desc_used = 1;
1571 		txsd->credits = 0;
1572 		write_eqflush_wr(eq);
1573 	}
1574 	txq->m = m;
1575 
1576 	if (eq->pending)
1577 		ring_eq_db(sc, eq);
1578 
1579 	reclaim_tx_descs(txq, 0, 128);
1580 
1581 	if (eq->flags & EQ_STALLED && callout_pending(&eq->tx_callout) == 0)
1582 		callout_reset(&eq->tx_callout, 1, t4_tx_callout, eq);
1583 
1584 	return (0);
1585 }
1586 
1587 void
1588 t4_update_fl_bufsize(struct ifnet *ifp)
1589 {
1590 	struct port_info *pi = ifp->if_softc;
1591 	struct sge_rxq *rxq;
1592 #ifdef TCP_OFFLOAD
1593 	struct sge_ofld_rxq *ofld_rxq;
1594 #endif
1595 	struct sge_fl *fl;
1596 	int i, bufsize;
1597 
1598 	bufsize = mtu_to_bufsize(ifp->if_mtu);
1599 	for_each_rxq(pi, i, rxq) {
1600 		fl = &rxq->fl;
1601 
1602 		FL_LOCK(fl);
1603 		set_fl_tag_idx(fl, bufsize);
1604 		FL_UNLOCK(fl);
1605 	}
1606 #ifdef TCP_OFFLOAD
1607 	bufsize = mtu_to_bufsize_toe(pi->adapter, ifp->if_mtu);
1608 	for_each_ofld_rxq(pi, i, ofld_rxq) {
1609 		fl = &ofld_rxq->fl;
1610 
1611 		FL_LOCK(fl);
1612 		set_fl_tag_idx(fl, bufsize);
1613 		FL_UNLOCK(fl);
1614 	}
1615 #endif
1616 }
1617 
1618 int
1619 can_resume_tx(struct sge_eq *eq)
1620 {
1621 	return (reclaimable(eq) >= tx_resume_threshold(eq));
1622 }
1623 
1624 static inline void
1625 init_iq(struct sge_iq *iq, struct adapter *sc, int tmr_idx, int pktc_idx,
1626     int qsize, int esize)
1627 {
1628 	KASSERT(tmr_idx >= 0 && tmr_idx < SGE_NTIMERS,
1629 	    ("%s: bad tmr_idx %d", __func__, tmr_idx));
1630 	KASSERT(pktc_idx < SGE_NCOUNTERS,	/* -ve is ok, means don't use */
1631 	    ("%s: bad pktc_idx %d", __func__, pktc_idx));
1632 
1633 	iq->flags = 0;
1634 	iq->adapter = sc;
1635 	iq->intr_params = V_QINTR_TIMER_IDX(tmr_idx);
1636 	iq->intr_pktc_idx = SGE_NCOUNTERS - 1;
1637 	if (pktc_idx >= 0) {
1638 		iq->intr_params |= F_QINTR_CNT_EN;
1639 		iq->intr_pktc_idx = pktc_idx;
1640 	}
1641 	iq->qsize = roundup2(qsize, 16);	/* See FW_IQ_CMD/iqsize */
1642 	iq->esize = max(esize, 16);		/* See FW_IQ_CMD/iqesize */
1643 }
1644 
1645 static inline void
1646 init_fl(struct sge_fl *fl, int qsize, int bufsize, char *name)
1647 {
1648 	fl->qsize = qsize;
1649 	strlcpy(fl->lockname, name, sizeof(fl->lockname));
1650 	set_fl_tag_idx(fl, bufsize);
1651 }
1652 
1653 static inline void
1654 init_eq(struct sge_eq *eq, int eqtype, int qsize, uint8_t tx_chan,
1655     uint16_t iqid, char *name)
1656 {
1657 	KASSERT(tx_chan < NCHAN, ("%s: bad tx channel %d", __func__, tx_chan));
1658 	KASSERT(eqtype <= EQ_TYPEMASK, ("%s: bad qtype %d", __func__, eqtype));
1659 
1660 	eq->flags = eqtype & EQ_TYPEMASK;
1661 	eq->tx_chan = tx_chan;
1662 	eq->iqid = iqid;
1663 	eq->qsize = qsize;
1664 	strlcpy(eq->lockname, name, sizeof(eq->lockname));
1665 
1666 	TASK_INIT(&eq->tx_task, 0, t4_tx_task, eq);
1667 	callout_init(&eq->tx_callout, CALLOUT_MPSAFE);
1668 }
1669 
1670 static int
1671 alloc_ring(struct adapter *sc, size_t len, bus_dma_tag_t *tag,
1672     bus_dmamap_t *map, bus_addr_t *pa, void **va)
1673 {
1674 	int rc;
1675 
1676 	rc = bus_dma_tag_create(sc->dmat, 512, 0, BUS_SPACE_MAXADDR,
1677 	    BUS_SPACE_MAXADDR, NULL, NULL, len, 1, len, 0, NULL, NULL, tag);
1678 	if (rc != 0) {
1679 		device_printf(sc->dev, "cannot allocate DMA tag: %d\n", rc);
1680 		goto done;
1681 	}
1682 
1683 	rc = bus_dmamem_alloc(*tag, va,
1684 	    BUS_DMA_WAITOK | BUS_DMA_COHERENT | BUS_DMA_ZERO, map);
1685 	if (rc != 0) {
1686 		device_printf(sc->dev, "cannot allocate DMA memory: %d\n", rc);
1687 		goto done;
1688 	}
1689 
1690 	rc = bus_dmamap_load(*tag, *map, *va, len, oneseg_dma_callback, pa, 0);
1691 	if (rc != 0) {
1692 		device_printf(sc->dev, "cannot load DMA map: %d\n", rc);
1693 		goto done;
1694 	}
1695 done:
1696 	if (rc)
1697 		free_ring(sc, *tag, *map, *pa, *va);
1698 
1699 	return (rc);
1700 }
1701 
1702 static int
1703 free_ring(struct adapter *sc, bus_dma_tag_t tag, bus_dmamap_t map,
1704     bus_addr_t pa, void *va)
1705 {
1706 	if (pa)
1707 		bus_dmamap_unload(tag, map);
1708 	if (va)
1709 		bus_dmamem_free(tag, va, map);
1710 	if (tag)
1711 		bus_dma_tag_destroy(tag);
1712 
1713 	return (0);
1714 }
1715 
1716 /*
1717  * Allocates the ring for an ingress queue and an optional freelist.  If the
1718  * freelist is specified it will be allocated and then associated with the
1719  * ingress queue.
1720  *
1721  * Returns errno on failure.  Resources allocated up to that point may still be
1722  * allocated.  Caller is responsible for cleanup in case this function fails.
1723  *
1724  * If the ingress queue will take interrupts directly (iq->flags & IQ_INTR) then
1725  * the intr_idx specifies the vector, starting from 0.  Otherwise it specifies
1726  * the abs_id of the ingress queue to which its interrupts should be forwarded.
1727  */
1728 static int
1729 alloc_iq_fl(struct port_info *pi, struct sge_iq *iq, struct sge_fl *fl,
1730     int intr_idx, int cong)
1731 {
1732 	int rc, i, cntxt_id;
1733 	size_t len;
1734 	struct fw_iq_cmd c;
1735 	struct adapter *sc = iq->adapter;
1736 	__be32 v = 0;
1737 
1738 	len = iq->qsize * iq->esize;
1739 	rc = alloc_ring(sc, len, &iq->desc_tag, &iq->desc_map, &iq->ba,
1740 	    (void **)&iq->desc);
1741 	if (rc != 0)
1742 		return (rc);
1743 
1744 	bzero(&c, sizeof(c));
1745 	c.op_to_vfn = htobe32(V_FW_CMD_OP(FW_IQ_CMD) | F_FW_CMD_REQUEST |
1746 	    F_FW_CMD_WRITE | F_FW_CMD_EXEC | V_FW_IQ_CMD_PFN(sc->pf) |
1747 	    V_FW_IQ_CMD_VFN(0));
1748 
1749 	c.alloc_to_len16 = htobe32(F_FW_IQ_CMD_ALLOC | F_FW_IQ_CMD_IQSTART |
1750 	    FW_LEN16(c));
1751 
1752 	/* Special handling for firmware event queue */
1753 	if (iq == &sc->sge.fwq)
1754 		v |= F_FW_IQ_CMD_IQASYNCH;
1755 
1756 	if (iq->flags & IQ_INTR) {
1757 		KASSERT(intr_idx < sc->intr_count,
1758 		    ("%s: invalid direct intr_idx %d", __func__, intr_idx));
1759 	} else
1760 		v |= F_FW_IQ_CMD_IQANDST;
1761 	v |= V_FW_IQ_CMD_IQANDSTINDEX(intr_idx);
1762 
1763 	c.type_to_iqandstindex = htobe32(v |
1764 	    V_FW_IQ_CMD_TYPE(FW_IQ_TYPE_FL_INT_CAP) |
1765 	    V_FW_IQ_CMD_VIID(pi->viid) |
1766 	    V_FW_IQ_CMD_IQANUD(X_UPDATEDELIVERY_INTERRUPT));
1767 	c.iqdroprss_to_iqesize = htobe16(V_FW_IQ_CMD_IQPCIECH(pi->tx_chan) |
1768 	    F_FW_IQ_CMD_IQGTSMODE |
1769 	    V_FW_IQ_CMD_IQINTCNTTHRESH(iq->intr_pktc_idx) |
1770 	    V_FW_IQ_CMD_IQESIZE(ilog2(iq->esize) - 4));
1771 	c.iqsize = htobe16(iq->qsize);
1772 	c.iqaddr = htobe64(iq->ba);
1773 	if (cong >= 0)
1774 		c.iqns_to_fl0congen = htobe32(F_FW_IQ_CMD_IQFLINTCONGEN);
1775 
1776 	if (fl) {
1777 		mtx_init(&fl->fl_lock, fl->lockname, NULL, MTX_DEF);
1778 
1779 		for (i = 0; i < FL_BUF_SIZES; i++) {
1780 
1781 			/*
1782 			 * A freelist buffer must be 16 byte aligned as the SGE
1783 			 * uses the low 4 bits of the bus addr to figure out the
1784 			 * buffer size.
1785 			 */
1786 			rc = bus_dma_tag_create(sc->dmat, 16, 0,
1787 			    BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL,
1788 			    FL_BUF_SIZE(i), 1, FL_BUF_SIZE(i), BUS_DMA_ALLOCNOW,
1789 			    NULL, NULL, &fl->tag[i]);
1790 			if (rc != 0) {
1791 				device_printf(sc->dev,
1792 				    "failed to create fl DMA tag[%d]: %d\n",
1793 				    i, rc);
1794 				return (rc);
1795 			}
1796 		}
1797 		len = fl->qsize * RX_FL_ESIZE;
1798 		rc = alloc_ring(sc, len, &fl->desc_tag, &fl->desc_map,
1799 		    &fl->ba, (void **)&fl->desc);
1800 		if (rc)
1801 			return (rc);
1802 
1803 		/* Allocate space for one software descriptor per buffer. */
1804 		fl->cap = (fl->qsize - spg_len / RX_FL_ESIZE) * 8;
1805 		rc = alloc_fl_sdesc(fl);
1806 		if (rc != 0) {
1807 			device_printf(sc->dev,
1808 			    "failed to setup fl software descriptors: %d\n",
1809 			    rc);
1810 			return (rc);
1811 		}
1812 		fl->needed = fl->cap;
1813 		fl->lowat = roundup2(sc->sge.fl_starve_threshold, 8);
1814 
1815 		c.iqns_to_fl0congen |=
1816 		    htobe32(V_FW_IQ_CMD_FL0HOSTFCMODE(X_HOSTFCMODE_NONE) |
1817 			F_FW_IQ_CMD_FL0FETCHRO | F_FW_IQ_CMD_FL0DATARO |
1818 			F_FW_IQ_CMD_FL0PADEN);
1819 		if (cong >= 0) {
1820 			c.iqns_to_fl0congen |=
1821 				htobe32(V_FW_IQ_CMD_FL0CNGCHMAP(cong) |
1822 				    F_FW_IQ_CMD_FL0CONGCIF |
1823 				    F_FW_IQ_CMD_FL0CONGEN);
1824 		}
1825 		c.fl0dcaen_to_fl0cidxfthresh =
1826 		    htobe16(V_FW_IQ_CMD_FL0FBMIN(X_FETCHBURSTMIN_64B) |
1827 			V_FW_IQ_CMD_FL0FBMAX(X_FETCHBURSTMAX_512B));
1828 		c.fl0size = htobe16(fl->qsize);
1829 		c.fl0addr = htobe64(fl->ba);
1830 	}
1831 
1832 	rc = -t4_wr_mbox(sc, sc->mbox, &c, sizeof(c), &c);
1833 	if (rc != 0) {
1834 		device_printf(sc->dev,
1835 		    "failed to create ingress queue: %d\n", rc);
1836 		return (rc);
1837 	}
1838 
1839 	iq->cdesc = iq->desc;
1840 	iq->cidx = 0;
1841 	iq->gen = 1;
1842 	iq->intr_next = iq->intr_params;
1843 	iq->cntxt_id = be16toh(c.iqid);
1844 	iq->abs_id = be16toh(c.physiqid);
1845 	iq->flags |= IQ_ALLOCATED;
1846 
1847 	cntxt_id = iq->cntxt_id - sc->sge.iq_start;
1848 	if (cntxt_id >= sc->sge.niq) {
1849 		panic ("%s: iq->cntxt_id (%d) more than the max (%d)", __func__,
1850 		    cntxt_id, sc->sge.niq - 1);
1851 	}
1852 	sc->sge.iqmap[cntxt_id] = iq;
1853 
1854 	if (fl) {
1855 		fl->cntxt_id = be16toh(c.fl0id);
1856 		fl->pidx = fl->cidx = 0;
1857 
1858 		cntxt_id = fl->cntxt_id - sc->sge.eq_start;
1859 		if (cntxt_id >= sc->sge.neq) {
1860 			panic("%s: fl->cntxt_id (%d) more than the max (%d)",
1861 			    __func__, cntxt_id, sc->sge.neq - 1);
1862 		}
1863 		sc->sge.eqmap[cntxt_id] = (void *)fl;
1864 
1865 		FL_LOCK(fl);
1866 		/* Enough to make sure the SGE doesn't think it's starved */
1867 		refill_fl(sc, fl, fl->lowat);
1868 		FL_UNLOCK(fl);
1869 
1870 		iq->flags |= IQ_HAS_FL;
1871 	}
1872 
1873 	if (is_t5(sc) && cong >= 0) {
1874 		uint32_t param, val;
1875 
1876 		param = V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DMAQ) |
1877 		    V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DMAQ_CONM_CTXT) |
1878 		    V_FW_PARAMS_PARAM_YZ(iq->cntxt_id);
1879 		if (cong == 0)
1880 			val = 1 << 19;
1881 		else {
1882 			val = 2 << 19;
1883 			for (i = 0; i < 4; i++) {
1884 				if (cong & (1 << i))
1885 					val |= 1 << (i << 2);
1886 			}
1887 		}
1888 
1889 		rc = -t4_set_params(sc, sc->mbox, sc->pf, 0, 1, &param, &val);
1890 		if (rc != 0) {
1891 			/* report error but carry on */
1892 			device_printf(sc->dev,
1893 			    "failed to set congestion manager context for "
1894 			    "ingress queue %d: %d\n", iq->cntxt_id, rc);
1895 		}
1896 	}
1897 
1898 	/* Enable IQ interrupts */
1899 	atomic_store_rel_int(&iq->state, IQS_IDLE);
1900 	t4_write_reg(sc, MYPF_REG(A_SGE_PF_GTS), V_SEINTARM(iq->intr_params) |
1901 	    V_INGRESSQID(iq->cntxt_id));
1902 
1903 	return (0);
1904 }
1905 
1906 static int
1907 free_iq_fl(struct port_info *pi, struct sge_iq *iq, struct sge_fl *fl)
1908 {
1909 	int i, rc;
1910 	struct adapter *sc = iq->adapter;
1911 	device_t dev;
1912 
1913 	if (sc == NULL)
1914 		return (0);	/* nothing to do */
1915 
1916 	dev = pi ? pi->dev : sc->dev;
1917 
1918 	if (iq->flags & IQ_ALLOCATED) {
1919 		rc = -t4_iq_free(sc, sc->mbox, sc->pf, 0,
1920 		    FW_IQ_TYPE_FL_INT_CAP, iq->cntxt_id,
1921 		    fl ? fl->cntxt_id : 0xffff, 0xffff);
1922 		if (rc != 0) {
1923 			device_printf(dev,
1924 			    "failed to free queue %p: %d\n", iq, rc);
1925 			return (rc);
1926 		}
1927 		iq->flags &= ~IQ_ALLOCATED;
1928 	}
1929 
1930 	free_ring(sc, iq->desc_tag, iq->desc_map, iq->ba, iq->desc);
1931 
1932 	bzero(iq, sizeof(*iq));
1933 
1934 	if (fl) {
1935 		free_ring(sc, fl->desc_tag, fl->desc_map, fl->ba,
1936 		    fl->desc);
1937 
1938 		if (fl->sdesc)
1939 			free_fl_sdesc(fl);
1940 
1941 		if (mtx_initialized(&fl->fl_lock))
1942 			mtx_destroy(&fl->fl_lock);
1943 
1944 		for (i = 0; i < FL_BUF_SIZES; i++) {
1945 			if (fl->tag[i])
1946 				bus_dma_tag_destroy(fl->tag[i]);
1947 		}
1948 
1949 		bzero(fl, sizeof(*fl));
1950 	}
1951 
1952 	return (0);
1953 }
1954 
1955 static int
1956 alloc_fwq(struct adapter *sc)
1957 {
1958 	int rc, intr_idx;
1959 	struct sge_iq *fwq = &sc->sge.fwq;
1960 	struct sysctl_oid *oid = device_get_sysctl_tree(sc->dev);
1961 	struct sysctl_oid_list *children = SYSCTL_CHILDREN(oid);
1962 
1963 	init_iq(fwq, sc, 0, 0, FW_IQ_QSIZE, FW_IQ_ESIZE);
1964 	fwq->flags |= IQ_INTR;	/* always */
1965 	intr_idx = sc->intr_count > 1 ? 1 : 0;
1966 	rc = alloc_iq_fl(sc->port[0], fwq, NULL, intr_idx, -1);
1967 	if (rc != 0) {
1968 		device_printf(sc->dev,
1969 		    "failed to create firmware event queue: %d\n", rc);
1970 		return (rc);
1971 	}
1972 
1973 	oid = SYSCTL_ADD_NODE(&sc->ctx, children, OID_AUTO, "fwq", CTLFLAG_RD,
1974 	    NULL, "firmware event queue");
1975 	children = SYSCTL_CHILDREN(oid);
1976 
1977 	SYSCTL_ADD_PROC(&sc->ctx, children, OID_AUTO, "abs_id",
1978 	    CTLTYPE_INT | CTLFLAG_RD, &fwq->abs_id, 0, sysctl_uint16, "I",
1979 	    "absolute id of the queue");
1980 	SYSCTL_ADD_PROC(&sc->ctx, children, OID_AUTO, "cntxt_id",
1981 	    CTLTYPE_INT | CTLFLAG_RD, &fwq->cntxt_id, 0, sysctl_uint16, "I",
1982 	    "SGE context id of the queue");
1983 	SYSCTL_ADD_PROC(&sc->ctx, children, OID_AUTO, "cidx",
1984 	    CTLTYPE_INT | CTLFLAG_RD, &fwq->cidx, 0, sysctl_uint16, "I",
1985 	    "consumer index");
1986 
1987 	return (0);
1988 }
1989 
1990 static int
1991 free_fwq(struct adapter *sc)
1992 {
1993 	return free_iq_fl(NULL, &sc->sge.fwq, NULL);
1994 }
1995 
1996 static int
1997 alloc_mgmtq(struct adapter *sc)
1998 {
1999 	int rc;
2000 	struct sge_wrq *mgmtq = &sc->sge.mgmtq;
2001 	char name[16];
2002 	struct sysctl_oid *oid = device_get_sysctl_tree(sc->dev);
2003 	struct sysctl_oid_list *children = SYSCTL_CHILDREN(oid);
2004 
2005 	oid = SYSCTL_ADD_NODE(&sc->ctx, children, OID_AUTO, "mgmtq", CTLFLAG_RD,
2006 	    NULL, "management queue");
2007 
2008 	snprintf(name, sizeof(name), "%s mgmtq", device_get_nameunit(sc->dev));
2009 	init_eq(&mgmtq->eq, EQ_CTRL, CTRL_EQ_QSIZE, sc->port[0]->tx_chan,
2010 	    sc->sge.fwq.cntxt_id, name);
2011 	rc = alloc_wrq(sc, NULL, mgmtq, oid);
2012 	if (rc != 0) {
2013 		device_printf(sc->dev,
2014 		    "failed to create management queue: %d\n", rc);
2015 		return (rc);
2016 	}
2017 
2018 	return (0);
2019 }
2020 
2021 static int
2022 free_mgmtq(struct adapter *sc)
2023 {
2024 
2025 	return free_wrq(sc, &sc->sge.mgmtq);
2026 }
2027 
2028 static inline int
2029 tnl_cong(struct port_info *pi)
2030 {
2031 
2032 	if (cong_drop == -1)
2033 		return (-1);
2034 	else if (cong_drop == 1)
2035 		return (0);
2036 	else
2037 		return (1 << pi->tx_chan);
2038 }
2039 
2040 static int
2041 alloc_rxq(struct port_info *pi, struct sge_rxq *rxq, int intr_idx, int idx,
2042     struct sysctl_oid *oid)
2043 {
2044 	int rc;
2045 	struct sysctl_oid_list *children;
2046 	char name[16];
2047 
2048 	rc = alloc_iq_fl(pi, &rxq->iq, &rxq->fl, intr_idx, tnl_cong(pi));
2049 	if (rc != 0)
2050 		return (rc);
2051 
2052 	FL_LOCK(&rxq->fl);
2053 	refill_fl(pi->adapter, &rxq->fl, rxq->fl.needed / 8);
2054 	FL_UNLOCK(&rxq->fl);
2055 
2056 #if defined(INET) || defined(INET6)
2057 	rc = tcp_lro_init(&rxq->lro);
2058 	if (rc != 0)
2059 		return (rc);
2060 	rxq->lro.ifp = pi->ifp; /* also indicates LRO init'ed */
2061 
2062 	if (pi->ifp->if_capenable & IFCAP_LRO)
2063 		rxq->iq.flags |= IQ_LRO_ENABLED;
2064 #endif
2065 	rxq->ifp = pi->ifp;
2066 
2067 	children = SYSCTL_CHILDREN(oid);
2068 
2069 	snprintf(name, sizeof(name), "%d", idx);
2070 	oid = SYSCTL_ADD_NODE(&pi->ctx, children, OID_AUTO, name, CTLFLAG_RD,
2071 	    NULL, "rx queue");
2072 	children = SYSCTL_CHILDREN(oid);
2073 
2074 	SYSCTL_ADD_PROC(&pi->ctx, children, OID_AUTO, "abs_id",
2075 	    CTLTYPE_INT | CTLFLAG_RD, &rxq->iq.abs_id, 0, sysctl_uint16, "I",
2076 	    "absolute id of the queue");
2077 	SYSCTL_ADD_PROC(&pi->ctx, children, OID_AUTO, "cntxt_id",
2078 	    CTLTYPE_INT | CTLFLAG_RD, &rxq->iq.cntxt_id, 0, sysctl_uint16, "I",
2079 	    "SGE context id of the queue");
2080 	SYSCTL_ADD_PROC(&pi->ctx, children, OID_AUTO, "cidx",
2081 	    CTLTYPE_INT | CTLFLAG_RD, &rxq->iq.cidx, 0, sysctl_uint16, "I",
2082 	    "consumer index");
2083 #if defined(INET) || defined(INET6)
2084 	SYSCTL_ADD_INT(&pi->ctx, children, OID_AUTO, "lro_queued", CTLFLAG_RD,
2085 	    &rxq->lro.lro_queued, 0, NULL);
2086 	SYSCTL_ADD_INT(&pi->ctx, children, OID_AUTO, "lro_flushed", CTLFLAG_RD,
2087 	    &rxq->lro.lro_flushed, 0, NULL);
2088 #endif
2089 	SYSCTL_ADD_UQUAD(&pi->ctx, children, OID_AUTO, "rxcsum", CTLFLAG_RD,
2090 	    &rxq->rxcsum, "# of times hardware assisted with checksum");
2091 	SYSCTL_ADD_UQUAD(&pi->ctx, children, OID_AUTO, "vlan_extraction",
2092 	    CTLFLAG_RD, &rxq->vlan_extraction,
2093 	    "# of times hardware extracted 802.1Q tag");
2094 
2095 	children = SYSCTL_CHILDREN(oid);
2096 	oid = SYSCTL_ADD_NODE(&pi->ctx, children, OID_AUTO, "fl", CTLFLAG_RD,
2097 	    NULL, "freelist");
2098 	children = SYSCTL_CHILDREN(oid);
2099 
2100 	SYSCTL_ADD_PROC(&pi->ctx, children, OID_AUTO, "cntxt_id",
2101 	    CTLTYPE_INT | CTLFLAG_RD, &rxq->fl.cntxt_id, 0, sysctl_uint16, "I",
2102 	    "SGE context id of the queue");
2103 	SYSCTL_ADD_UINT(&pi->ctx, children, OID_AUTO, "cidx", CTLFLAG_RD,
2104 	    &rxq->fl.cidx, 0, "consumer index");
2105 	SYSCTL_ADD_UINT(&pi->ctx, children, OID_AUTO, "pidx", CTLFLAG_RD,
2106 	    &rxq->fl.pidx, 0, "producer index");
2107 
2108 	return (rc);
2109 }
2110 
2111 static int
2112 free_rxq(struct port_info *pi, struct sge_rxq *rxq)
2113 {
2114 	int rc;
2115 
2116 #if defined(INET) || defined(INET6)
2117 	if (rxq->lro.ifp) {
2118 		tcp_lro_free(&rxq->lro);
2119 		rxq->lro.ifp = NULL;
2120 	}
2121 #endif
2122 
2123 	rc = free_iq_fl(pi, &rxq->iq, &rxq->fl);
2124 	if (rc == 0)
2125 		bzero(rxq, sizeof(*rxq));
2126 
2127 	return (rc);
2128 }
2129 
2130 #ifdef TCP_OFFLOAD
2131 static int
2132 alloc_ofld_rxq(struct port_info *pi, struct sge_ofld_rxq *ofld_rxq,
2133     int intr_idx, int idx, struct sysctl_oid *oid)
2134 {
2135 	int rc;
2136 	struct sysctl_oid_list *children;
2137 	char name[16];
2138 
2139 	rc = alloc_iq_fl(pi, &ofld_rxq->iq, &ofld_rxq->fl, intr_idx,
2140 	    1 << pi->tx_chan);
2141 	if (rc != 0)
2142 		return (rc);
2143 
2144 	children = SYSCTL_CHILDREN(oid);
2145 
2146 	snprintf(name, sizeof(name), "%d", idx);
2147 	oid = SYSCTL_ADD_NODE(&pi->ctx, children, OID_AUTO, name, CTLFLAG_RD,
2148 	    NULL, "rx queue");
2149 	children = SYSCTL_CHILDREN(oid);
2150 
2151 	SYSCTL_ADD_PROC(&pi->ctx, children, OID_AUTO, "abs_id",
2152 	    CTLTYPE_INT | CTLFLAG_RD, &ofld_rxq->iq.abs_id, 0, sysctl_uint16,
2153 	    "I", "absolute id of the queue");
2154 	SYSCTL_ADD_PROC(&pi->ctx, children, OID_AUTO, "cntxt_id",
2155 	    CTLTYPE_INT | CTLFLAG_RD, &ofld_rxq->iq.cntxt_id, 0, sysctl_uint16,
2156 	    "I", "SGE context id of the queue");
2157 	SYSCTL_ADD_PROC(&pi->ctx, children, OID_AUTO, "cidx",
2158 	    CTLTYPE_INT | CTLFLAG_RD, &ofld_rxq->iq.cidx, 0, sysctl_uint16, "I",
2159 	    "consumer index");
2160 
2161 	children = SYSCTL_CHILDREN(oid);
2162 	oid = SYSCTL_ADD_NODE(&pi->ctx, children, OID_AUTO, "fl", CTLFLAG_RD,
2163 	    NULL, "freelist");
2164 	children = SYSCTL_CHILDREN(oid);
2165 
2166 	SYSCTL_ADD_PROC(&pi->ctx, children, OID_AUTO, "cntxt_id",
2167 	    CTLTYPE_INT | CTLFLAG_RD, &ofld_rxq->fl.cntxt_id, 0, sysctl_uint16,
2168 	    "I", "SGE context id of the queue");
2169 	SYSCTL_ADD_UINT(&pi->ctx, children, OID_AUTO, "cidx", CTLFLAG_RD,
2170 	    &ofld_rxq->fl.cidx, 0, "consumer index");
2171 	SYSCTL_ADD_UINT(&pi->ctx, children, OID_AUTO, "pidx", CTLFLAG_RD,
2172 	    &ofld_rxq->fl.pidx, 0, "producer index");
2173 
2174 	return (rc);
2175 }
2176 
2177 static int
2178 free_ofld_rxq(struct port_info *pi, struct sge_ofld_rxq *ofld_rxq)
2179 {
2180 	int rc;
2181 
2182 	rc = free_iq_fl(pi, &ofld_rxq->iq, &ofld_rxq->fl);
2183 	if (rc == 0)
2184 		bzero(ofld_rxq, sizeof(*ofld_rxq));
2185 
2186 	return (rc);
2187 }
2188 #endif
2189 
2190 static int
2191 ctrl_eq_alloc(struct adapter *sc, struct sge_eq *eq)
2192 {
2193 	int rc, cntxt_id;
2194 	struct fw_eq_ctrl_cmd c;
2195 
2196 	bzero(&c, sizeof(c));
2197 
2198 	c.op_to_vfn = htobe32(V_FW_CMD_OP(FW_EQ_CTRL_CMD) | F_FW_CMD_REQUEST |
2199 	    F_FW_CMD_WRITE | F_FW_CMD_EXEC | V_FW_EQ_CTRL_CMD_PFN(sc->pf) |
2200 	    V_FW_EQ_CTRL_CMD_VFN(0));
2201 	c.alloc_to_len16 = htobe32(F_FW_EQ_CTRL_CMD_ALLOC |
2202 	    F_FW_EQ_CTRL_CMD_EQSTART | FW_LEN16(c));
2203 	c.cmpliqid_eqid = htonl(V_FW_EQ_CTRL_CMD_CMPLIQID(eq->iqid)); /* XXX */
2204 	c.physeqid_pkd = htobe32(0);
2205 	c.fetchszm_to_iqid =
2206 	    htobe32(V_FW_EQ_CTRL_CMD_HOSTFCMODE(X_HOSTFCMODE_STATUS_PAGE) |
2207 		V_FW_EQ_CTRL_CMD_PCIECHN(eq->tx_chan) |
2208 		F_FW_EQ_CTRL_CMD_FETCHRO | V_FW_EQ_CTRL_CMD_IQID(eq->iqid));
2209 	c.dcaen_to_eqsize =
2210 	    htobe32(V_FW_EQ_CTRL_CMD_FBMIN(X_FETCHBURSTMIN_64B) |
2211 		V_FW_EQ_CTRL_CMD_FBMAX(X_FETCHBURSTMAX_512B) |
2212 		V_FW_EQ_CTRL_CMD_CIDXFTHRESH(X_CIDXFLUSHTHRESH_32) |
2213 		V_FW_EQ_CTRL_CMD_EQSIZE(eq->qsize));
2214 	c.eqaddr = htobe64(eq->ba);
2215 
2216 	rc = -t4_wr_mbox(sc, sc->mbox, &c, sizeof(c), &c);
2217 	if (rc != 0) {
2218 		device_printf(sc->dev,
2219 		    "failed to create control queue %d: %d\n", eq->tx_chan, rc);
2220 		return (rc);
2221 	}
2222 	eq->flags |= EQ_ALLOCATED;
2223 
2224 	eq->cntxt_id = G_FW_EQ_CTRL_CMD_EQID(be32toh(c.cmpliqid_eqid));
2225 	cntxt_id = eq->cntxt_id - sc->sge.eq_start;
2226 	if (cntxt_id >= sc->sge.neq)
2227 	    panic("%s: eq->cntxt_id (%d) more than the max (%d)", __func__,
2228 		cntxt_id, sc->sge.neq - 1);
2229 	sc->sge.eqmap[cntxt_id] = eq;
2230 
2231 	return (rc);
2232 }
2233 
2234 static int
2235 eth_eq_alloc(struct adapter *sc, struct port_info *pi, struct sge_eq *eq)
2236 {
2237 	int rc, cntxt_id;
2238 	struct fw_eq_eth_cmd c;
2239 
2240 	bzero(&c, sizeof(c));
2241 
2242 	c.op_to_vfn = htobe32(V_FW_CMD_OP(FW_EQ_ETH_CMD) | F_FW_CMD_REQUEST |
2243 	    F_FW_CMD_WRITE | F_FW_CMD_EXEC | V_FW_EQ_ETH_CMD_PFN(sc->pf) |
2244 	    V_FW_EQ_ETH_CMD_VFN(0));
2245 	c.alloc_to_len16 = htobe32(F_FW_EQ_ETH_CMD_ALLOC |
2246 	    F_FW_EQ_ETH_CMD_EQSTART | FW_LEN16(c));
2247 	c.viid_pkd = htobe32(V_FW_EQ_ETH_CMD_VIID(pi->viid));
2248 	c.fetchszm_to_iqid =
2249 	    htobe32(V_FW_EQ_ETH_CMD_HOSTFCMODE(X_HOSTFCMODE_STATUS_PAGE) |
2250 		V_FW_EQ_ETH_CMD_PCIECHN(eq->tx_chan) | F_FW_EQ_ETH_CMD_FETCHRO |
2251 		V_FW_EQ_ETH_CMD_IQID(eq->iqid));
2252 	c.dcaen_to_eqsize = htobe32(V_FW_EQ_ETH_CMD_FBMIN(X_FETCHBURSTMIN_64B) |
2253 		      V_FW_EQ_ETH_CMD_FBMAX(X_FETCHBURSTMAX_512B) |
2254 		      V_FW_EQ_ETH_CMD_CIDXFTHRESH(X_CIDXFLUSHTHRESH_32) |
2255 		      V_FW_EQ_ETH_CMD_EQSIZE(eq->qsize));
2256 	c.eqaddr = htobe64(eq->ba);
2257 
2258 	rc = -t4_wr_mbox(sc, sc->mbox, &c, sizeof(c), &c);
2259 	if (rc != 0) {
2260 		device_printf(pi->dev,
2261 		    "failed to create Ethernet egress queue: %d\n", rc);
2262 		return (rc);
2263 	}
2264 	eq->flags |= EQ_ALLOCATED;
2265 
2266 	eq->cntxt_id = G_FW_EQ_ETH_CMD_EQID(be32toh(c.eqid_pkd));
2267 	cntxt_id = eq->cntxt_id - sc->sge.eq_start;
2268 	if (cntxt_id >= sc->sge.neq)
2269 	    panic("%s: eq->cntxt_id (%d) more than the max (%d)", __func__,
2270 		cntxt_id, sc->sge.neq - 1);
2271 	sc->sge.eqmap[cntxt_id] = eq;
2272 
2273 	return (rc);
2274 }
2275 
2276 #ifdef TCP_OFFLOAD
2277 static int
2278 ofld_eq_alloc(struct adapter *sc, struct port_info *pi, struct sge_eq *eq)
2279 {
2280 	int rc, cntxt_id;
2281 	struct fw_eq_ofld_cmd c;
2282 
2283 	bzero(&c, sizeof(c));
2284 
2285 	c.op_to_vfn = htonl(V_FW_CMD_OP(FW_EQ_OFLD_CMD) | F_FW_CMD_REQUEST |
2286 	    F_FW_CMD_WRITE | F_FW_CMD_EXEC | V_FW_EQ_OFLD_CMD_PFN(sc->pf) |
2287 	    V_FW_EQ_OFLD_CMD_VFN(0));
2288 	c.alloc_to_len16 = htonl(F_FW_EQ_OFLD_CMD_ALLOC |
2289 	    F_FW_EQ_OFLD_CMD_EQSTART | FW_LEN16(c));
2290 	c.fetchszm_to_iqid =
2291 		htonl(V_FW_EQ_OFLD_CMD_HOSTFCMODE(X_HOSTFCMODE_STATUS_PAGE) |
2292 		    V_FW_EQ_OFLD_CMD_PCIECHN(eq->tx_chan) |
2293 		    F_FW_EQ_OFLD_CMD_FETCHRO | V_FW_EQ_OFLD_CMD_IQID(eq->iqid));
2294 	c.dcaen_to_eqsize =
2295 	    htobe32(V_FW_EQ_OFLD_CMD_FBMIN(X_FETCHBURSTMIN_64B) |
2296 		V_FW_EQ_OFLD_CMD_FBMAX(X_FETCHBURSTMAX_512B) |
2297 		V_FW_EQ_OFLD_CMD_CIDXFTHRESH(X_CIDXFLUSHTHRESH_32) |
2298 		V_FW_EQ_OFLD_CMD_EQSIZE(eq->qsize));
2299 	c.eqaddr = htobe64(eq->ba);
2300 
2301 	rc = -t4_wr_mbox(sc, sc->mbox, &c, sizeof(c), &c);
2302 	if (rc != 0) {
2303 		device_printf(pi->dev,
2304 		    "failed to create egress queue for TCP offload: %d\n", rc);
2305 		return (rc);
2306 	}
2307 	eq->flags |= EQ_ALLOCATED;
2308 
2309 	eq->cntxt_id = G_FW_EQ_OFLD_CMD_EQID(be32toh(c.eqid_pkd));
2310 	cntxt_id = eq->cntxt_id - sc->sge.eq_start;
2311 	if (cntxt_id >= sc->sge.neq)
2312 	    panic("%s: eq->cntxt_id (%d) more than the max (%d)", __func__,
2313 		cntxt_id, sc->sge.neq - 1);
2314 	sc->sge.eqmap[cntxt_id] = eq;
2315 
2316 	return (rc);
2317 }
2318 #endif
2319 
2320 static int
2321 alloc_eq(struct adapter *sc, struct port_info *pi, struct sge_eq *eq)
2322 {
2323 	int rc;
2324 	size_t len;
2325 
2326 	mtx_init(&eq->eq_lock, eq->lockname, NULL, MTX_DEF);
2327 
2328 	len = eq->qsize * EQ_ESIZE;
2329 	rc = alloc_ring(sc, len, &eq->desc_tag, &eq->desc_map,
2330 	    &eq->ba, (void **)&eq->desc);
2331 	if (rc)
2332 		return (rc);
2333 
2334 	eq->cap = eq->qsize - spg_len / EQ_ESIZE;
2335 	eq->spg = (void *)&eq->desc[eq->cap];
2336 	eq->avail = eq->cap - 1;	/* one less to avoid cidx = pidx */
2337 	eq->pidx = eq->cidx = 0;
2338 	eq->doorbells = sc->doorbells;
2339 
2340 	switch (eq->flags & EQ_TYPEMASK) {
2341 	case EQ_CTRL:
2342 		rc = ctrl_eq_alloc(sc, eq);
2343 		break;
2344 
2345 	case EQ_ETH:
2346 		rc = eth_eq_alloc(sc, pi, eq);
2347 		break;
2348 
2349 #ifdef TCP_OFFLOAD
2350 	case EQ_OFLD:
2351 		rc = ofld_eq_alloc(sc, pi, eq);
2352 		break;
2353 #endif
2354 
2355 	default:
2356 		panic("%s: invalid eq type %d.", __func__,
2357 		    eq->flags & EQ_TYPEMASK);
2358 	}
2359 	if (rc != 0) {
2360 		device_printf(sc->dev,
2361 		    "failed to allocate egress queue(%d): %d",
2362 		    eq->flags & EQ_TYPEMASK, rc);
2363 	}
2364 
2365 	eq->tx_callout.c_cpu = eq->cntxt_id % mp_ncpus;
2366 
2367 	if (isset(&eq->doorbells, DOORBELL_UDB) ||
2368 	    isset(&eq->doorbells, DOORBELL_UDBWC) ||
2369 	    isset(&eq->doorbells, DOORBELL_WCWR)) {
2370 		uint32_t s_qpp = sc->sge.s_qpp;
2371 		uint32_t mask = (1 << s_qpp) - 1;
2372 		volatile uint8_t *udb;
2373 
2374 		udb = sc->udbs_base + UDBS_DB_OFFSET;
2375 		udb += (eq->cntxt_id >> s_qpp) << PAGE_SHIFT;	/* pg offset */
2376 		eq->udb_qid = eq->cntxt_id & mask;		/* id in page */
2377 		if (eq->udb_qid > PAGE_SIZE / UDBS_SEG_SIZE)
2378 	    		clrbit(&eq->doorbells, DOORBELL_WCWR);
2379 		else {
2380 			udb += eq->udb_qid << UDBS_SEG_SHIFT;	/* seg offset */
2381 			eq->udb_qid = 0;
2382 		}
2383 		eq->udb = (volatile void *)udb;
2384 	}
2385 
2386 	return (rc);
2387 }
2388 
2389 static int
2390 free_eq(struct adapter *sc, struct sge_eq *eq)
2391 {
2392 	int rc;
2393 
2394 	if (eq->flags & EQ_ALLOCATED) {
2395 		switch (eq->flags & EQ_TYPEMASK) {
2396 		case EQ_CTRL:
2397 			rc = -t4_ctrl_eq_free(sc, sc->mbox, sc->pf, 0,
2398 			    eq->cntxt_id);
2399 			break;
2400 
2401 		case EQ_ETH:
2402 			rc = -t4_eth_eq_free(sc, sc->mbox, sc->pf, 0,
2403 			    eq->cntxt_id);
2404 			break;
2405 
2406 #ifdef TCP_OFFLOAD
2407 		case EQ_OFLD:
2408 			rc = -t4_ofld_eq_free(sc, sc->mbox, sc->pf, 0,
2409 			    eq->cntxt_id);
2410 			break;
2411 #endif
2412 
2413 		default:
2414 			panic("%s: invalid eq type %d.", __func__,
2415 			    eq->flags & EQ_TYPEMASK);
2416 		}
2417 		if (rc != 0) {
2418 			device_printf(sc->dev,
2419 			    "failed to free egress queue (%d): %d\n",
2420 			    eq->flags & EQ_TYPEMASK, rc);
2421 			return (rc);
2422 		}
2423 		eq->flags &= ~EQ_ALLOCATED;
2424 	}
2425 
2426 	free_ring(sc, eq->desc_tag, eq->desc_map, eq->ba, eq->desc);
2427 
2428 	if (mtx_initialized(&eq->eq_lock))
2429 		mtx_destroy(&eq->eq_lock);
2430 
2431 	bzero(eq, sizeof(*eq));
2432 	return (0);
2433 }
2434 
2435 static int
2436 alloc_wrq(struct adapter *sc, struct port_info *pi, struct sge_wrq *wrq,
2437     struct sysctl_oid *oid)
2438 {
2439 	int rc;
2440 	struct sysctl_ctx_list *ctx = pi ? &pi->ctx : &sc->ctx;
2441 	struct sysctl_oid_list *children = SYSCTL_CHILDREN(oid);
2442 
2443 	rc = alloc_eq(sc, pi, &wrq->eq);
2444 	if (rc)
2445 		return (rc);
2446 
2447 	wrq->adapter = sc;
2448 	STAILQ_INIT(&wrq->wr_list);
2449 
2450 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "cntxt_id", CTLFLAG_RD,
2451 	    &wrq->eq.cntxt_id, 0, "SGE context id of the queue");
2452 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cidx",
2453 	    CTLTYPE_INT | CTLFLAG_RD, &wrq->eq.cidx, 0, sysctl_uint16, "I",
2454 	    "consumer index");
2455 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "pidx",
2456 	    CTLTYPE_INT | CTLFLAG_RD, &wrq->eq.pidx, 0, sysctl_uint16, "I",
2457 	    "producer index");
2458 	SYSCTL_ADD_UQUAD(ctx, children, OID_AUTO, "tx_wrs", CTLFLAG_RD,
2459 	    &wrq->tx_wrs, "# of work requests");
2460 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "no_desc", CTLFLAG_RD,
2461 	    &wrq->no_desc, 0,
2462 	    "# of times queue ran out of hardware descriptors");
2463 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "unstalled", CTLFLAG_RD,
2464 	    &wrq->eq.unstalled, 0, "# of times queue recovered after stall");
2465 
2466 
2467 	return (rc);
2468 }
2469 
2470 static int
2471 free_wrq(struct adapter *sc, struct sge_wrq *wrq)
2472 {
2473 	int rc;
2474 
2475 	rc = free_eq(sc, &wrq->eq);
2476 	if (rc)
2477 		return (rc);
2478 
2479 	bzero(wrq, sizeof(*wrq));
2480 	return (0);
2481 }
2482 
2483 static int
2484 alloc_txq(struct port_info *pi, struct sge_txq *txq, int idx,
2485     struct sysctl_oid *oid)
2486 {
2487 	int rc;
2488 	struct adapter *sc = pi->adapter;
2489 	struct sge_eq *eq = &txq->eq;
2490 	char name[16];
2491 	struct sysctl_oid_list *children = SYSCTL_CHILDREN(oid);
2492 
2493 	rc = alloc_eq(sc, pi, eq);
2494 	if (rc)
2495 		return (rc);
2496 
2497 	txq->ifp = pi->ifp;
2498 
2499 	txq->sdesc = malloc(eq->cap * sizeof(struct tx_sdesc), M_CXGBE,
2500 	    M_ZERO | M_WAITOK);
2501 	txq->br = buf_ring_alloc(eq->qsize, M_CXGBE, M_WAITOK, &eq->eq_lock);
2502 
2503 	rc = bus_dma_tag_create(sc->dmat, 1, 0, BUS_SPACE_MAXADDR,
2504 	    BUS_SPACE_MAXADDR, NULL, NULL, 64 * 1024, TX_SGL_SEGS,
2505 	    BUS_SPACE_MAXSIZE, BUS_DMA_ALLOCNOW, NULL, NULL, &txq->tx_tag);
2506 	if (rc != 0) {
2507 		device_printf(sc->dev,
2508 		    "failed to create tx DMA tag: %d\n", rc);
2509 		return (rc);
2510 	}
2511 
2512 	/*
2513 	 * We can stuff ~10 frames in an 8-descriptor txpkts WR (8 is the SGE
2514 	 * limit for any WR).  txq->no_dmamap events shouldn't occur if maps is
2515 	 * sized for the worst case.
2516 	 */
2517 	rc = t4_alloc_tx_maps(&txq->txmaps, txq->tx_tag, eq->qsize * 10 / 8,
2518 	    M_WAITOK);
2519 	if (rc != 0) {
2520 		device_printf(sc->dev, "failed to setup tx DMA maps: %d\n", rc);
2521 		return (rc);
2522 	}
2523 
2524 	snprintf(name, sizeof(name), "%d", idx);
2525 	oid = SYSCTL_ADD_NODE(&pi->ctx, children, OID_AUTO, name, CTLFLAG_RD,
2526 	    NULL, "tx queue");
2527 	children = SYSCTL_CHILDREN(oid);
2528 
2529 	SYSCTL_ADD_UINT(&pi->ctx, children, OID_AUTO, "cntxt_id", CTLFLAG_RD,
2530 	    &eq->cntxt_id, 0, "SGE context id of the queue");
2531 	SYSCTL_ADD_PROC(&pi->ctx, children, OID_AUTO, "cidx",
2532 	    CTLTYPE_INT | CTLFLAG_RD, &eq->cidx, 0, sysctl_uint16, "I",
2533 	    "consumer index");
2534 	SYSCTL_ADD_PROC(&pi->ctx, children, OID_AUTO, "pidx",
2535 	    CTLTYPE_INT | CTLFLAG_RD, &eq->pidx, 0, sysctl_uint16, "I",
2536 	    "producer index");
2537 
2538 	SYSCTL_ADD_UQUAD(&pi->ctx, children, OID_AUTO, "txcsum", CTLFLAG_RD,
2539 	    &txq->txcsum, "# of times hardware assisted with checksum");
2540 	SYSCTL_ADD_UQUAD(&pi->ctx, children, OID_AUTO, "vlan_insertion",
2541 	    CTLFLAG_RD, &txq->vlan_insertion,
2542 	    "# of times hardware inserted 802.1Q tag");
2543 	SYSCTL_ADD_UQUAD(&pi->ctx, children, OID_AUTO, "tso_wrs", CTLFLAG_RD,
2544 	    &txq->tso_wrs, "# of TSO work requests");
2545 	SYSCTL_ADD_UQUAD(&pi->ctx, children, OID_AUTO, "imm_wrs", CTLFLAG_RD,
2546 	    &txq->imm_wrs, "# of work requests with immediate data");
2547 	SYSCTL_ADD_UQUAD(&pi->ctx, children, OID_AUTO, "sgl_wrs", CTLFLAG_RD,
2548 	    &txq->sgl_wrs, "# of work requests with direct SGL");
2549 	SYSCTL_ADD_UQUAD(&pi->ctx, children, OID_AUTO, "txpkt_wrs", CTLFLAG_RD,
2550 	    &txq->txpkt_wrs, "# of txpkt work requests (one pkt/WR)");
2551 	SYSCTL_ADD_UQUAD(&pi->ctx, children, OID_AUTO, "txpkts_wrs", CTLFLAG_RD,
2552 	    &txq->txpkts_wrs, "# of txpkts work requests (multiple pkts/WR)");
2553 	SYSCTL_ADD_UQUAD(&pi->ctx, children, OID_AUTO, "txpkts_pkts", CTLFLAG_RD,
2554 	    &txq->txpkts_pkts, "# of frames tx'd using txpkts work requests");
2555 
2556 	SYSCTL_ADD_UQUAD(&pi->ctx, children, OID_AUTO, "br_drops", CTLFLAG_RD,
2557 	    &txq->br->br_drops, "# of drops in the buf_ring for this queue");
2558 	SYSCTL_ADD_UINT(&pi->ctx, children, OID_AUTO, "no_dmamap", CTLFLAG_RD,
2559 	    &txq->no_dmamap, 0, "# of times txq ran out of DMA maps");
2560 	SYSCTL_ADD_UINT(&pi->ctx, children, OID_AUTO, "no_desc", CTLFLAG_RD,
2561 	    &txq->no_desc, 0, "# of times txq ran out of hardware descriptors");
2562 	SYSCTL_ADD_UINT(&pi->ctx, children, OID_AUTO, "egr_update", CTLFLAG_RD,
2563 	    &eq->egr_update, 0, "egress update notifications from the SGE");
2564 	SYSCTL_ADD_UINT(&pi->ctx, children, OID_AUTO, "unstalled", CTLFLAG_RD,
2565 	    &eq->unstalled, 0, "# of times txq recovered after stall");
2566 
2567 	return (rc);
2568 }
2569 
2570 static int
2571 free_txq(struct port_info *pi, struct sge_txq *txq)
2572 {
2573 	int rc;
2574 	struct adapter *sc = pi->adapter;
2575 	struct sge_eq *eq = &txq->eq;
2576 
2577 	rc = free_eq(sc, eq);
2578 	if (rc)
2579 		return (rc);
2580 
2581 	free(txq->sdesc, M_CXGBE);
2582 
2583 	if (txq->txmaps.maps)
2584 		t4_free_tx_maps(&txq->txmaps, txq->tx_tag);
2585 
2586 	buf_ring_free(txq->br, M_CXGBE);
2587 
2588 	if (txq->tx_tag)
2589 		bus_dma_tag_destroy(txq->tx_tag);
2590 
2591 	bzero(txq, sizeof(*txq));
2592 	return (0);
2593 }
2594 
2595 static void
2596 oneseg_dma_callback(void *arg, bus_dma_segment_t *segs, int nseg, int error)
2597 {
2598 	bus_addr_t *ba = arg;
2599 
2600 	KASSERT(nseg == 1,
2601 	    ("%s meant for single segment mappings only.", __func__));
2602 
2603 	*ba = error ? 0 : segs->ds_addr;
2604 }
2605 
2606 static inline bool
2607 is_new_response(const struct sge_iq *iq, struct rsp_ctrl **ctrl)
2608 {
2609 	*ctrl = (void *)((uintptr_t)iq->cdesc +
2610 	    (iq->esize - sizeof(struct rsp_ctrl)));
2611 
2612 	return (((*ctrl)->u.type_gen >> S_RSPD_GEN) == iq->gen);
2613 }
2614 
2615 static inline void
2616 iq_next(struct sge_iq *iq)
2617 {
2618 	iq->cdesc = (void *) ((uintptr_t)iq->cdesc + iq->esize);
2619 	if (__predict_false(++iq->cidx == iq->qsize - 1)) {
2620 		iq->cidx = 0;
2621 		iq->gen ^= 1;
2622 		iq->cdesc = iq->desc;
2623 	}
2624 }
2625 
2626 #define FL_HW_IDX(x) ((x) >> 3)
2627 static inline void
2628 ring_fl_db(struct adapter *sc, struct sge_fl *fl)
2629 {
2630 	int ndesc = fl->pending / 8;
2631 	uint32_t v;
2632 
2633 	if (FL_HW_IDX(fl->pidx) == FL_HW_IDX(fl->cidx))
2634 		ndesc--;	/* hold back one credit */
2635 
2636 	if (ndesc <= 0)
2637 		return;		/* nothing to do */
2638 
2639 	v = F_DBPRIO | V_QID(fl->cntxt_id) | V_PIDX(ndesc);
2640 	if (is_t5(sc))
2641 		v |= F_DBTYPE;
2642 
2643 	wmb();
2644 
2645 	t4_write_reg(sc, MYPF_REG(A_SGE_PF_KDOORBELL), v);
2646 	fl->pending -= ndesc * 8;
2647 }
2648 
2649 /*
2650  * Fill up the freelist by upto nbufs and maybe ring its doorbell.
2651  *
2652  * Returns non-zero to indicate that it should be added to the list of starving
2653  * freelists.
2654  */
2655 static int
2656 refill_fl(struct adapter *sc, struct sge_fl *fl, int nbufs)
2657 {
2658 	__be64 *d = &fl->desc[fl->pidx];
2659 	struct fl_sdesc *sd = &fl->sdesc[fl->pidx];
2660 	bus_dma_tag_t tag;
2661 	bus_addr_t pa;
2662 	caddr_t cl;
2663 	int rc;
2664 
2665 	FL_LOCK_ASSERT_OWNED(fl);
2666 
2667 	if (nbufs > fl->needed)
2668 		nbufs = fl->needed;
2669 
2670 	while (nbufs--) {
2671 
2672 		if (sd->cl != NULL) {
2673 
2674 			/*
2675 			 * This happens when a frame small enough to fit
2676 			 * entirely in an mbuf was received in cl last time.
2677 			 * We'd held on to cl and can reuse it now.  Note that
2678 			 * we reuse a cluster of the old size if fl->tag_idx is
2679 			 * no longer the same as sd->tag_idx.
2680 			 */
2681 
2682 			KASSERT(*d == sd->ba_tag,
2683 			    ("%s: recyling problem at pidx %d",
2684 			    __func__, fl->pidx));
2685 
2686 			d++;
2687 			goto recycled;
2688 		}
2689 
2690 
2691 		if (fl->tag_idx != sd->tag_idx) {
2692 			bus_dmamap_t map;
2693 			bus_dma_tag_t newtag = fl->tag[fl->tag_idx];
2694 			bus_dma_tag_t oldtag = fl->tag[sd->tag_idx];
2695 
2696 			/*
2697 			 * An MTU change can get us here.  Discard the old map
2698 			 * which was created with the old tag, but only if
2699 			 * we're able to get a new one.
2700 			 */
2701 			rc = bus_dmamap_create(newtag, 0, &map);
2702 			if (rc == 0) {
2703 				bus_dmamap_destroy(oldtag, sd->map);
2704 				sd->map = map;
2705 				sd->tag_idx = fl->tag_idx;
2706 			}
2707 		}
2708 
2709 		tag = fl->tag[sd->tag_idx];
2710 
2711 		cl = m_cljget(NULL, M_NOWAIT, FL_BUF_SIZE(sd->tag_idx));
2712 		if (cl == NULL)
2713 			break;
2714 
2715 		rc = bus_dmamap_load(tag, sd->map, cl, FL_BUF_SIZE(sd->tag_idx),
2716 		    oneseg_dma_callback, &pa, 0);
2717 		if (rc != 0 || pa == 0) {
2718 			fl->dmamap_failed++;
2719 			uma_zfree(FL_BUF_ZONE(sd->tag_idx), cl);
2720 			break;
2721 		}
2722 
2723 		sd->cl = cl;
2724 		*d++ = htobe64(pa | sd->tag_idx);
2725 
2726 #ifdef INVARIANTS
2727 		sd->ba_tag = htobe64(pa | sd->tag_idx);
2728 #endif
2729 
2730 recycled:
2731 		/* sd->m is never recycled, should always be NULL */
2732 		KASSERT(sd->m == NULL, ("%s: stray mbuf", __func__));
2733 
2734 		sd->m = m_gethdr(M_NOWAIT, MT_NOINIT);
2735 		if (sd->m == NULL)
2736 			break;
2737 
2738 		fl->pending++;
2739 		fl->needed--;
2740 		sd++;
2741 		if (++fl->pidx == fl->cap) {
2742 			fl->pidx = 0;
2743 			sd = fl->sdesc;
2744 			d = fl->desc;
2745 		}
2746 	}
2747 
2748 	if (fl->pending >= 8)
2749 		ring_fl_db(sc, fl);
2750 
2751 	return (FL_RUNNING_LOW(fl) && !(fl->flags & FL_STARVING));
2752 }
2753 
2754 /*
2755  * Attempt to refill all starving freelists.
2756  */
2757 static void
2758 refill_sfl(void *arg)
2759 {
2760 	struct adapter *sc = arg;
2761 	struct sge_fl *fl, *fl_temp;
2762 
2763 	mtx_lock(&sc->sfl_lock);
2764 	TAILQ_FOREACH_SAFE(fl, &sc->sfl, link, fl_temp) {
2765 		FL_LOCK(fl);
2766 		refill_fl(sc, fl, 64);
2767 		if (FL_NOT_RUNNING_LOW(fl) || fl->flags & FL_DOOMED) {
2768 			TAILQ_REMOVE(&sc->sfl, fl, link);
2769 			fl->flags &= ~FL_STARVING;
2770 		}
2771 		FL_UNLOCK(fl);
2772 	}
2773 
2774 	if (!TAILQ_EMPTY(&sc->sfl))
2775 		callout_schedule(&sc->sfl_callout, hz / 5);
2776 	mtx_unlock(&sc->sfl_lock);
2777 }
2778 
2779 static int
2780 alloc_fl_sdesc(struct sge_fl *fl)
2781 {
2782 	struct fl_sdesc *sd;
2783 	bus_dma_tag_t tag;
2784 	int i, rc;
2785 
2786 	fl->sdesc = malloc(fl->cap * sizeof(struct fl_sdesc), M_CXGBE,
2787 	    M_ZERO | M_WAITOK);
2788 
2789 	tag = fl->tag[fl->tag_idx];
2790 	sd = fl->sdesc;
2791 	for (i = 0; i < fl->cap; i++, sd++) {
2792 
2793 		sd->tag_idx = fl->tag_idx;
2794 		rc = bus_dmamap_create(tag, 0, &sd->map);
2795 		if (rc != 0)
2796 			goto failed;
2797 	}
2798 
2799 	return (0);
2800 failed:
2801 	while (--i >= 0) {
2802 		sd--;
2803 		bus_dmamap_destroy(tag, sd->map);
2804 		if (sd->m) {
2805 			m_init(sd->m, NULL, 0, M_NOWAIT, MT_DATA, 0);
2806 			m_free(sd->m);
2807 			sd->m = NULL;
2808 		}
2809 	}
2810 	KASSERT(sd == fl->sdesc, ("%s: EDOOFUS", __func__));
2811 
2812 	free(fl->sdesc, M_CXGBE);
2813 	fl->sdesc = NULL;
2814 
2815 	return (rc);
2816 }
2817 
2818 static void
2819 free_fl_sdesc(struct sge_fl *fl)
2820 {
2821 	struct fl_sdesc *sd;
2822 	int i;
2823 
2824 	sd = fl->sdesc;
2825 	for (i = 0; i < fl->cap; i++, sd++) {
2826 
2827 		if (sd->m) {
2828 			m_init(sd->m, NULL, 0, M_NOWAIT, MT_DATA, 0);
2829 			m_free(sd->m);
2830 			sd->m = NULL;
2831 		}
2832 
2833 		if (sd->cl) {
2834 			bus_dmamap_unload(fl->tag[sd->tag_idx], sd->map);
2835 			uma_zfree(FL_BUF_ZONE(sd->tag_idx), sd->cl);
2836 			sd->cl = NULL;
2837 		}
2838 
2839 		bus_dmamap_destroy(fl->tag[sd->tag_idx], sd->map);
2840 	}
2841 
2842 	free(fl->sdesc, M_CXGBE);
2843 	fl->sdesc = NULL;
2844 }
2845 
2846 int
2847 t4_alloc_tx_maps(struct tx_maps *txmaps, bus_dma_tag_t tx_tag, int count,
2848     int flags)
2849 {
2850 	struct tx_map *txm;
2851 	int i, rc;
2852 
2853 	txmaps->map_total = txmaps->map_avail = count;
2854 	txmaps->map_cidx = txmaps->map_pidx = 0;
2855 
2856 	txmaps->maps = malloc(count * sizeof(struct tx_map), M_CXGBE,
2857 	    M_ZERO | flags);
2858 
2859 	txm = txmaps->maps;
2860 	for (i = 0; i < count; i++, txm++) {
2861 		rc = bus_dmamap_create(tx_tag, 0, &txm->map);
2862 		if (rc != 0)
2863 			goto failed;
2864 	}
2865 
2866 	return (0);
2867 failed:
2868 	while (--i >= 0) {
2869 		txm--;
2870 		bus_dmamap_destroy(tx_tag, txm->map);
2871 	}
2872 	KASSERT(txm == txmaps->maps, ("%s: EDOOFUS", __func__));
2873 
2874 	free(txmaps->maps, M_CXGBE);
2875 	txmaps->maps = NULL;
2876 
2877 	return (rc);
2878 }
2879 
2880 void
2881 t4_free_tx_maps(struct tx_maps *txmaps, bus_dma_tag_t tx_tag)
2882 {
2883 	struct tx_map *txm;
2884 	int i;
2885 
2886 	txm = txmaps->maps;
2887 	for (i = 0; i < txmaps->map_total; i++, txm++) {
2888 
2889 		if (txm->m) {
2890 			bus_dmamap_unload(tx_tag, txm->map);
2891 			m_freem(txm->m);
2892 			txm->m = NULL;
2893 		}
2894 
2895 		bus_dmamap_destroy(tx_tag, txm->map);
2896 	}
2897 
2898 	free(txmaps->maps, M_CXGBE);
2899 	txmaps->maps = NULL;
2900 }
2901 
2902 /*
2903  * We'll do immediate data tx for non-TSO, but only when not coalescing.  We're
2904  * willing to use upto 2 hardware descriptors which means a maximum of 96 bytes
2905  * of immediate data.
2906  */
2907 #define IMM_LEN ( \
2908       2 * EQ_ESIZE \
2909     - sizeof(struct fw_eth_tx_pkt_wr) \
2910     - sizeof(struct cpl_tx_pkt_core))
2911 
2912 /*
2913  * Returns non-zero on failure, no need to cleanup anything in that case.
2914  *
2915  * Note 1: We always try to defrag the mbuf if required and return EFBIG only
2916  * if the resulting chain still won't fit in a tx descriptor.
2917  *
2918  * Note 2: We'll pullup the mbuf chain if TSO is requested and the first mbuf
2919  * does not have the TCP header in it.
2920  */
2921 static int
2922 get_pkt_sgl(struct sge_txq *txq, struct mbuf **fp, struct sgl *sgl,
2923     int sgl_only)
2924 {
2925 	struct mbuf *m = *fp;
2926 	struct tx_maps *txmaps;
2927 	struct tx_map *txm;
2928 	int rc, defragged = 0, n;
2929 
2930 	TXQ_LOCK_ASSERT_OWNED(txq);
2931 
2932 	if (m->m_pkthdr.tso_segsz)
2933 		sgl_only = 1;	/* Do not allow immediate data with LSO */
2934 
2935 start:	sgl->nsegs = 0;
2936 
2937 	if (m->m_pkthdr.len <= IMM_LEN && !sgl_only)
2938 		return (0);	/* nsegs = 0 tells caller to use imm. tx */
2939 
2940 	txmaps = &txq->txmaps;
2941 	if (txmaps->map_avail == 0) {
2942 		txq->no_dmamap++;
2943 		return (ENOMEM);
2944 	}
2945 	txm = &txmaps->maps[txmaps->map_pidx];
2946 
2947 	if (m->m_pkthdr.tso_segsz && m->m_len < 50) {
2948 		*fp = m_pullup(m, 50);
2949 		m = *fp;
2950 		if (m == NULL)
2951 			return (ENOBUFS);
2952 	}
2953 
2954 	rc = bus_dmamap_load_mbuf_sg(txq->tx_tag, txm->map, m, sgl->seg,
2955 	    &sgl->nsegs, BUS_DMA_NOWAIT);
2956 	if (rc == EFBIG && defragged == 0) {
2957 		m = m_defrag(m, M_NOWAIT);
2958 		if (m == NULL)
2959 			return (EFBIG);
2960 
2961 		defragged = 1;
2962 		*fp = m;
2963 		goto start;
2964 	}
2965 	if (rc != 0)
2966 		return (rc);
2967 
2968 	txm->m = m;
2969 	txmaps->map_avail--;
2970 	if (++txmaps->map_pidx == txmaps->map_total)
2971 		txmaps->map_pidx = 0;
2972 
2973 	KASSERT(sgl->nsegs > 0 && sgl->nsegs <= TX_SGL_SEGS,
2974 	    ("%s: bad DMA mapping (%d segments)", __func__, sgl->nsegs));
2975 
2976 	/*
2977 	 * Store the # of flits required to hold this frame's SGL in nflits.  An
2978 	 * SGL has a (ULPTX header + len0, addr0) tuple optionally followed by
2979 	 * multiple (len0 + len1, addr0, addr1) tuples.  If addr1 is not used
2980 	 * then len1 must be set to 0.
2981 	 */
2982 	n = sgl->nsegs - 1;
2983 	sgl->nflits = (3 * n) / 2 + (n & 1) + 2;
2984 
2985 	return (0);
2986 }
2987 
2988 
2989 /*
2990  * Releases all the txq resources used up in the specified sgl.
2991  */
2992 static int
2993 free_pkt_sgl(struct sge_txq *txq, struct sgl *sgl)
2994 {
2995 	struct tx_maps *txmaps;
2996 	struct tx_map *txm;
2997 
2998 	TXQ_LOCK_ASSERT_OWNED(txq);
2999 
3000 	if (sgl->nsegs == 0)
3001 		return (0);	/* didn't use any map */
3002 
3003 	txmaps = &txq->txmaps;
3004 
3005 	/* 1 pkt uses exactly 1 map, back it out */
3006 
3007 	txmaps->map_avail++;
3008 	if (txmaps->map_pidx > 0)
3009 		txmaps->map_pidx--;
3010 	else
3011 		txmaps->map_pidx = txmaps->map_total - 1;
3012 
3013 	txm = &txmaps->maps[txmaps->map_pidx];
3014 	bus_dmamap_unload(txq->tx_tag, txm->map);
3015 	txm->m = NULL;
3016 
3017 	return (0);
3018 }
3019 
3020 static int
3021 write_txpkt_wr(struct port_info *pi, struct sge_txq *txq, struct mbuf *m,
3022     struct sgl *sgl)
3023 {
3024 	struct sge_eq *eq = &txq->eq;
3025 	struct fw_eth_tx_pkt_wr *wr;
3026 	struct cpl_tx_pkt_core *cpl;
3027 	uint32_t ctrl;	/* used in many unrelated places */
3028 	uint64_t ctrl1;
3029 	int nflits, ndesc, pktlen;
3030 	struct tx_sdesc *txsd;
3031 	caddr_t dst;
3032 
3033 	TXQ_LOCK_ASSERT_OWNED(txq);
3034 
3035 	pktlen = m->m_pkthdr.len;
3036 
3037 	/*
3038 	 * Do we have enough flits to send this frame out?
3039 	 */
3040 	ctrl = sizeof(struct cpl_tx_pkt_core);
3041 	if (m->m_pkthdr.tso_segsz) {
3042 		nflits = TXPKT_LSO_WR_HDR;
3043 		ctrl += sizeof(struct cpl_tx_pkt_lso_core);
3044 	} else
3045 		nflits = TXPKT_WR_HDR;
3046 	if (sgl->nsegs > 0)
3047 		nflits += sgl->nflits;
3048 	else {
3049 		nflits += howmany(pktlen, 8);
3050 		ctrl += pktlen;
3051 	}
3052 	ndesc = howmany(nflits, 8);
3053 	if (ndesc > eq->avail)
3054 		return (ENOMEM);
3055 
3056 	/* Firmware work request header */
3057 	wr = (void *)&eq->desc[eq->pidx];
3058 	wr->op_immdlen = htobe32(V_FW_WR_OP(FW_ETH_TX_PKT_WR) |
3059 	    V_FW_ETH_TX_PKT_WR_IMMDLEN(ctrl));
3060 	ctrl = V_FW_WR_LEN16(howmany(nflits, 2));
3061 	if (eq->avail == ndesc) {
3062 		if (!(eq->flags & EQ_CRFLUSHED)) {
3063 			ctrl |= F_FW_WR_EQUEQ | F_FW_WR_EQUIQ;
3064 			eq->flags |= EQ_CRFLUSHED;
3065 		}
3066 		eq->flags |= EQ_STALLED;
3067 	}
3068 
3069 	wr->equiq_to_len16 = htobe32(ctrl);
3070 	wr->r3 = 0;
3071 
3072 	if (m->m_pkthdr.tso_segsz) {
3073 		struct cpl_tx_pkt_lso_core *lso = (void *)(wr + 1);
3074 		struct ether_header *eh;
3075 		void *l3hdr;
3076 #if defined(INET) || defined(INET6)
3077 		struct tcphdr *tcp;
3078 #endif
3079 		uint16_t eh_type;
3080 
3081 		ctrl = V_LSO_OPCODE(CPL_TX_PKT_LSO) | F_LSO_FIRST_SLICE |
3082 		    F_LSO_LAST_SLICE;
3083 
3084 		eh = mtod(m, struct ether_header *);
3085 		eh_type = ntohs(eh->ether_type);
3086 		if (eh_type == ETHERTYPE_VLAN) {
3087 			struct ether_vlan_header *evh = (void *)eh;
3088 
3089 			ctrl |= V_LSO_ETHHDR_LEN(1);
3090 			l3hdr = evh + 1;
3091 			eh_type = ntohs(evh->evl_proto);
3092 		} else
3093 			l3hdr = eh + 1;
3094 
3095 		switch (eh_type) {
3096 #ifdef INET6
3097 		case ETHERTYPE_IPV6:
3098 		{
3099 			struct ip6_hdr *ip6 = l3hdr;
3100 
3101 			/*
3102 			 * XXX-BZ For now we do not pretend to support
3103 			 * IPv6 extension headers.
3104 			 */
3105 			KASSERT(ip6->ip6_nxt == IPPROTO_TCP, ("%s: CSUM_TSO "
3106 			    "with ip6_nxt != TCP: %u", __func__, ip6->ip6_nxt));
3107 			tcp = (struct tcphdr *)(ip6 + 1);
3108 			ctrl |= F_LSO_IPV6;
3109 			ctrl |= V_LSO_IPHDR_LEN(sizeof(*ip6) >> 2) |
3110 			    V_LSO_TCPHDR_LEN(tcp->th_off);
3111 			break;
3112 		}
3113 #endif
3114 #ifdef INET
3115 		case ETHERTYPE_IP:
3116 		{
3117 			struct ip *ip = l3hdr;
3118 
3119 			tcp = (void *)((uintptr_t)ip + ip->ip_hl * 4);
3120 			ctrl |= V_LSO_IPHDR_LEN(ip->ip_hl) |
3121 			    V_LSO_TCPHDR_LEN(tcp->th_off);
3122 			break;
3123 		}
3124 #endif
3125 		default:
3126 			panic("%s: CSUM_TSO but no supported IP version "
3127 			    "(0x%04x)", __func__, eh_type);
3128 		}
3129 
3130 		lso->lso_ctrl = htobe32(ctrl);
3131 		lso->ipid_ofst = htobe16(0);
3132 		lso->mss = htobe16(m->m_pkthdr.tso_segsz);
3133 		lso->seqno_offset = htobe32(0);
3134 		lso->len = htobe32(pktlen);
3135 
3136 		cpl = (void *)(lso + 1);
3137 
3138 		txq->tso_wrs++;
3139 	} else
3140 		cpl = (void *)(wr + 1);
3141 
3142 	/* Checksum offload */
3143 	ctrl1 = 0;
3144 	if (!(m->m_pkthdr.csum_flags & (CSUM_IP | CSUM_TSO)))
3145 		ctrl1 |= F_TXPKT_IPCSUM_DIS;
3146 	if (!(m->m_pkthdr.csum_flags & (CSUM_TCP | CSUM_UDP | CSUM_UDP_IPV6 |
3147 	    CSUM_TCP_IPV6 | CSUM_TSO)))
3148 		ctrl1 |= F_TXPKT_L4CSUM_DIS;
3149 	if (m->m_pkthdr.csum_flags & (CSUM_IP | CSUM_TCP | CSUM_UDP |
3150 	    CSUM_UDP_IPV6 | CSUM_TCP_IPV6 | CSUM_TSO))
3151 		txq->txcsum++;	/* some hardware assistance provided */
3152 
3153 	/* VLAN tag insertion */
3154 	if (m->m_flags & M_VLANTAG) {
3155 		ctrl1 |= F_TXPKT_VLAN_VLD | V_TXPKT_VLAN(m->m_pkthdr.ether_vtag);
3156 		txq->vlan_insertion++;
3157 	}
3158 
3159 	/* CPL header */
3160 	cpl->ctrl0 = htobe32(V_TXPKT_OPCODE(CPL_TX_PKT) |
3161 	    V_TXPKT_INTF(pi->tx_chan) | V_TXPKT_PF(pi->adapter->pf));
3162 	cpl->pack = 0;
3163 	cpl->len = htobe16(pktlen);
3164 	cpl->ctrl1 = htobe64(ctrl1);
3165 
3166 	/* Software descriptor */
3167 	txsd = &txq->sdesc[eq->pidx];
3168 	txsd->desc_used = ndesc;
3169 
3170 	eq->pending += ndesc;
3171 	eq->avail -= ndesc;
3172 	eq->pidx += ndesc;
3173 	if (eq->pidx >= eq->cap)
3174 		eq->pidx -= eq->cap;
3175 
3176 	/* SGL */
3177 	dst = (void *)(cpl + 1);
3178 	if (sgl->nsegs > 0) {
3179 		txsd->credits = 1;
3180 		txq->sgl_wrs++;
3181 		write_sgl_to_txd(eq, sgl, &dst);
3182 	} else {
3183 		txsd->credits = 0;
3184 		txq->imm_wrs++;
3185 		for (; m; m = m->m_next) {
3186 			copy_to_txd(eq, mtod(m, caddr_t), &dst, m->m_len);
3187 #ifdef INVARIANTS
3188 			pktlen -= m->m_len;
3189 #endif
3190 		}
3191 #ifdef INVARIANTS
3192 		KASSERT(pktlen == 0, ("%s: %d bytes left.", __func__, pktlen));
3193 #endif
3194 
3195 	}
3196 
3197 	txq->txpkt_wrs++;
3198 	return (0);
3199 }
3200 
3201 /*
3202  * Returns 0 to indicate that m has been accepted into a coalesced tx work
3203  * request.  It has either been folded into txpkts or txpkts was flushed and m
3204  * has started a new coalesced work request (as the first frame in a fresh
3205  * txpkts).
3206  *
3207  * Returns non-zero to indicate a failure - caller is responsible for
3208  * transmitting m, if there was anything in txpkts it has been flushed.
3209  */
3210 static int
3211 add_to_txpkts(struct port_info *pi, struct sge_txq *txq, struct txpkts *txpkts,
3212     struct mbuf *m, struct sgl *sgl)
3213 {
3214 	struct sge_eq *eq = &txq->eq;
3215 	int can_coalesce;
3216 	struct tx_sdesc *txsd;
3217 	int flits;
3218 
3219 	TXQ_LOCK_ASSERT_OWNED(txq);
3220 
3221 	KASSERT(sgl->nsegs, ("%s: can't coalesce imm data", __func__));
3222 
3223 	if (txpkts->npkt > 0) {
3224 		flits = TXPKTS_PKT_HDR + sgl->nflits;
3225 		can_coalesce = m->m_pkthdr.tso_segsz == 0 &&
3226 		    txpkts->nflits + flits <= TX_WR_FLITS &&
3227 		    txpkts->nflits + flits <= eq->avail * 8 &&
3228 		    txpkts->plen + m->m_pkthdr.len < 65536;
3229 
3230 		if (can_coalesce) {
3231 			txpkts->npkt++;
3232 			txpkts->nflits += flits;
3233 			txpkts->plen += m->m_pkthdr.len;
3234 
3235 			txsd = &txq->sdesc[eq->pidx];
3236 			txsd->credits++;
3237 
3238 			return (0);
3239 		}
3240 
3241 		/*
3242 		 * Couldn't coalesce m into txpkts.  The first order of business
3243 		 * is to send txpkts on its way.  Then we'll revisit m.
3244 		 */
3245 		write_txpkts_wr(txq, txpkts);
3246 	}
3247 
3248 	/*
3249 	 * Check if we can start a new coalesced tx work request with m as
3250 	 * the first packet in it.
3251 	 */
3252 
3253 	KASSERT(txpkts->npkt == 0, ("%s: txpkts not empty", __func__));
3254 
3255 	flits = TXPKTS_WR_HDR + sgl->nflits;
3256 	can_coalesce = m->m_pkthdr.tso_segsz == 0 &&
3257 	    flits <= eq->avail * 8 && flits <= TX_WR_FLITS;
3258 
3259 	if (can_coalesce == 0)
3260 		return (EINVAL);
3261 
3262 	/*
3263 	 * Start a fresh coalesced tx WR with m as the first frame in it.
3264 	 */
3265 	txpkts->npkt = 1;
3266 	txpkts->nflits = flits;
3267 	txpkts->flitp = &eq->desc[eq->pidx].flit[2];
3268 	txpkts->plen = m->m_pkthdr.len;
3269 
3270 	txsd = &txq->sdesc[eq->pidx];
3271 	txsd->credits = 1;
3272 
3273 	return (0);
3274 }
3275 
3276 /*
3277  * Note that write_txpkts_wr can never run out of hardware descriptors (but
3278  * write_txpkt_wr can).  add_to_txpkts ensures that a frame is accepted for
3279  * coalescing only if sufficient hardware descriptors are available.
3280  */
3281 static void
3282 write_txpkts_wr(struct sge_txq *txq, struct txpkts *txpkts)
3283 {
3284 	struct sge_eq *eq = &txq->eq;
3285 	struct fw_eth_tx_pkts_wr *wr;
3286 	struct tx_sdesc *txsd;
3287 	uint32_t ctrl;
3288 	int ndesc;
3289 
3290 	TXQ_LOCK_ASSERT_OWNED(txq);
3291 
3292 	ndesc = howmany(txpkts->nflits, 8);
3293 
3294 	wr = (void *)&eq->desc[eq->pidx];
3295 	wr->op_pkd = htobe32(V_FW_WR_OP(FW_ETH_TX_PKTS_WR));
3296 	ctrl = V_FW_WR_LEN16(howmany(txpkts->nflits, 2));
3297 	if (eq->avail == ndesc) {
3298 		if (!(eq->flags & EQ_CRFLUSHED)) {
3299 			ctrl |= F_FW_WR_EQUEQ | F_FW_WR_EQUIQ;
3300 			eq->flags |= EQ_CRFLUSHED;
3301 		}
3302 		eq->flags |= EQ_STALLED;
3303 	}
3304 	wr->equiq_to_len16 = htobe32(ctrl);
3305 	wr->plen = htobe16(txpkts->plen);
3306 	wr->npkt = txpkts->npkt;
3307 	wr->r3 = wr->type = 0;
3308 
3309 	/* Everything else already written */
3310 
3311 	txsd = &txq->sdesc[eq->pidx];
3312 	txsd->desc_used = ndesc;
3313 
3314 	KASSERT(eq->avail >= ndesc, ("%s: out of descriptors", __func__));
3315 
3316 	eq->pending += ndesc;
3317 	eq->avail -= ndesc;
3318 	eq->pidx += ndesc;
3319 	if (eq->pidx >= eq->cap)
3320 		eq->pidx -= eq->cap;
3321 
3322 	txq->txpkts_pkts += txpkts->npkt;
3323 	txq->txpkts_wrs++;
3324 	txpkts->npkt = 0;	/* emptied */
3325 }
3326 
3327 static inline void
3328 write_ulp_cpl_sgl(struct port_info *pi, struct sge_txq *txq,
3329     struct txpkts *txpkts, struct mbuf *m, struct sgl *sgl)
3330 {
3331 	struct ulp_txpkt *ulpmc;
3332 	struct ulptx_idata *ulpsc;
3333 	struct cpl_tx_pkt_core *cpl;
3334 	struct sge_eq *eq = &txq->eq;
3335 	uintptr_t flitp, start, end;
3336 	uint64_t ctrl;
3337 	caddr_t dst;
3338 
3339 	KASSERT(txpkts->npkt > 0, ("%s: txpkts is empty", __func__));
3340 
3341 	start = (uintptr_t)eq->desc;
3342 	end = (uintptr_t)eq->spg;
3343 
3344 	/* Checksum offload */
3345 	ctrl = 0;
3346 	if (!(m->m_pkthdr.csum_flags & (CSUM_IP | CSUM_TSO)))
3347 		ctrl |= F_TXPKT_IPCSUM_DIS;
3348 	if (!(m->m_pkthdr.csum_flags & (CSUM_TCP | CSUM_UDP | CSUM_UDP_IPV6 |
3349 	    CSUM_TCP_IPV6 | CSUM_TSO)))
3350 		ctrl |= F_TXPKT_L4CSUM_DIS;
3351 	if (m->m_pkthdr.csum_flags & (CSUM_IP | CSUM_TCP | CSUM_UDP |
3352 	    CSUM_UDP_IPV6 | CSUM_TCP_IPV6 | CSUM_TSO))
3353 		txq->txcsum++;	/* some hardware assistance provided */
3354 
3355 	/* VLAN tag insertion */
3356 	if (m->m_flags & M_VLANTAG) {
3357 		ctrl |= F_TXPKT_VLAN_VLD | V_TXPKT_VLAN(m->m_pkthdr.ether_vtag);
3358 		txq->vlan_insertion++;
3359 	}
3360 
3361 	/*
3362 	 * The previous packet's SGL must have ended at a 16 byte boundary (this
3363 	 * is required by the firmware/hardware).  It follows that flitp cannot
3364 	 * wrap around between the ULPTX master command and ULPTX subcommand (8
3365 	 * bytes each), and that it can not wrap around in the middle of the
3366 	 * cpl_tx_pkt_core either.
3367 	 */
3368 	flitp = (uintptr_t)txpkts->flitp;
3369 	KASSERT((flitp & 0xf) == 0,
3370 	    ("%s: last SGL did not end at 16 byte boundary: %p",
3371 	    __func__, txpkts->flitp));
3372 
3373 	/* ULP master command */
3374 	ulpmc = (void *)flitp;
3375 	ulpmc->cmd_dest = htonl(V_ULPTX_CMD(ULP_TX_PKT) | V_ULP_TXPKT_DEST(0) |
3376 	    V_ULP_TXPKT_FID(eq->iqid));
3377 	ulpmc->len = htonl(howmany(sizeof(*ulpmc) + sizeof(*ulpsc) +
3378 	    sizeof(*cpl) + 8 * sgl->nflits, 16));
3379 
3380 	/* ULP subcommand */
3381 	ulpsc = (void *)(ulpmc + 1);
3382 	ulpsc->cmd_more = htobe32(V_ULPTX_CMD((u32)ULP_TX_SC_IMM) |
3383 	    F_ULP_TX_SC_MORE);
3384 	ulpsc->len = htobe32(sizeof(struct cpl_tx_pkt_core));
3385 
3386 	flitp += sizeof(*ulpmc) + sizeof(*ulpsc);
3387 	if (flitp == end)
3388 		flitp = start;
3389 
3390 	/* CPL_TX_PKT */
3391 	cpl = (void *)flitp;
3392 	cpl->ctrl0 = htobe32(V_TXPKT_OPCODE(CPL_TX_PKT) |
3393 	    V_TXPKT_INTF(pi->tx_chan) | V_TXPKT_PF(pi->adapter->pf));
3394 	cpl->pack = 0;
3395 	cpl->len = htobe16(m->m_pkthdr.len);
3396 	cpl->ctrl1 = htobe64(ctrl);
3397 
3398 	flitp += sizeof(*cpl);
3399 	if (flitp == end)
3400 		flitp = start;
3401 
3402 	/* SGL for this frame */
3403 	dst = (caddr_t)flitp;
3404 	txpkts->nflits += write_sgl_to_txd(eq, sgl, &dst);
3405 	txpkts->flitp = (void *)dst;
3406 
3407 	KASSERT(((uintptr_t)dst & 0xf) == 0,
3408 	    ("%s: SGL ends at %p (not a 16 byte boundary)", __func__, dst));
3409 }
3410 
3411 /*
3412  * If the SGL ends on an address that is not 16 byte aligned, this function will
3413  * add a 0 filled flit at the end.  It returns 1 in that case.
3414  */
3415 static int
3416 write_sgl_to_txd(struct sge_eq *eq, struct sgl *sgl, caddr_t *to)
3417 {
3418 	__be64 *flitp, *end;
3419 	struct ulptx_sgl *usgl;
3420 	bus_dma_segment_t *seg;
3421 	int i, padded;
3422 
3423 	KASSERT(sgl->nsegs > 0 && sgl->nflits > 0,
3424 	    ("%s: bad SGL - nsegs=%d, nflits=%d",
3425 	    __func__, sgl->nsegs, sgl->nflits));
3426 
3427 	KASSERT(((uintptr_t)(*to) & 0xf) == 0,
3428 	    ("%s: SGL must start at a 16 byte boundary: %p", __func__, *to));
3429 
3430 	flitp = (__be64 *)(*to);
3431 	end = flitp + sgl->nflits;
3432 	seg = &sgl->seg[0];
3433 	usgl = (void *)flitp;
3434 
3435 	/*
3436 	 * We start at a 16 byte boundary somewhere inside the tx descriptor
3437 	 * ring, so we're at least 16 bytes away from the status page.  There is
3438 	 * no chance of a wrap around in the middle of usgl (which is 16 bytes).
3439 	 */
3440 
3441 	usgl->cmd_nsge = htobe32(V_ULPTX_CMD(ULP_TX_SC_DSGL) |
3442 	    V_ULPTX_NSGE(sgl->nsegs));
3443 	usgl->len0 = htobe32(seg->ds_len);
3444 	usgl->addr0 = htobe64(seg->ds_addr);
3445 	seg++;
3446 
3447 	if ((uintptr_t)end <= (uintptr_t)eq->spg) {
3448 
3449 		/* Won't wrap around at all */
3450 
3451 		for (i = 0; i < sgl->nsegs - 1; i++, seg++) {
3452 			usgl->sge[i / 2].len[i & 1] = htobe32(seg->ds_len);
3453 			usgl->sge[i / 2].addr[i & 1] = htobe64(seg->ds_addr);
3454 		}
3455 		if (i & 1)
3456 			usgl->sge[i / 2].len[1] = htobe32(0);
3457 	} else {
3458 
3459 		/* Will wrap somewhere in the rest of the SGL */
3460 
3461 		/* 2 flits already written, write the rest flit by flit */
3462 		flitp = (void *)(usgl + 1);
3463 		for (i = 0; i < sgl->nflits - 2; i++) {
3464 			if ((uintptr_t)flitp == (uintptr_t)eq->spg)
3465 				flitp = (void *)eq->desc;
3466 			*flitp++ = get_flit(seg, sgl->nsegs - 1, i);
3467 		}
3468 		end = flitp;
3469 	}
3470 
3471 	if ((uintptr_t)end & 0xf) {
3472 		*(uint64_t *)end = 0;
3473 		end++;
3474 		padded = 1;
3475 	} else
3476 		padded = 0;
3477 
3478 	if ((uintptr_t)end == (uintptr_t)eq->spg)
3479 		*to = (void *)eq->desc;
3480 	else
3481 		*to = (void *)end;
3482 
3483 	return (padded);
3484 }
3485 
3486 static inline void
3487 copy_to_txd(struct sge_eq *eq, caddr_t from, caddr_t *to, int len)
3488 {
3489 	if (__predict_true((uintptr_t)(*to) + len <= (uintptr_t)eq->spg)) {
3490 		bcopy(from, *to, len);
3491 		(*to) += len;
3492 	} else {
3493 		int portion = (uintptr_t)eq->spg - (uintptr_t)(*to);
3494 
3495 		bcopy(from, *to, portion);
3496 		from += portion;
3497 		portion = len - portion;	/* remaining */
3498 		bcopy(from, (void *)eq->desc, portion);
3499 		(*to) = (caddr_t)eq->desc + portion;
3500 	}
3501 }
3502 
3503 static inline void
3504 ring_eq_db(struct adapter *sc, struct sge_eq *eq)
3505 {
3506 	u_int db, pending;
3507 
3508 	db = eq->doorbells;
3509 	pending = eq->pending;
3510 	if (pending > 1)
3511 		clrbit(&db, DOORBELL_WCWR);
3512 	eq->pending = 0;
3513 	wmb();
3514 
3515 	switch (ffs(db) - 1) {
3516 	case DOORBELL_UDB:
3517 		*eq->udb = htole32(V_QID(eq->udb_qid) | V_PIDX(pending));
3518 		return;
3519 
3520 	case DOORBELL_WCWR: {
3521 		volatile uint64_t *dst, *src;
3522 		int i;
3523 
3524 		/*
3525 		 * Queues whose 128B doorbell segment fits in the page do not
3526 		 * use relative qid (udb_qid is always 0).  Only queues with
3527 		 * doorbell segments can do WCWR.
3528 		 */
3529 		KASSERT(eq->udb_qid == 0 && pending == 1,
3530 		    ("%s: inappropriate doorbell (0x%x, %d, %d) for eq %p",
3531 		    __func__, eq->doorbells, pending, eq->pidx, eq));
3532 
3533 		dst = (volatile void *)((uintptr_t)eq->udb + UDBS_WR_OFFSET -
3534 		    UDBS_DB_OFFSET);
3535 		i = eq->pidx ? eq->pidx - 1 : eq->cap - 1;
3536 		src = (void *)&eq->desc[i];
3537 		while (src != (void *)&eq->desc[i + 1])
3538 			*dst++ = *src++;
3539 		wmb();
3540 		return;
3541 	}
3542 
3543 	case DOORBELL_UDBWC:
3544 		*eq->udb = htole32(V_QID(eq->udb_qid) | V_PIDX(pending));
3545 		wmb();
3546 		return;
3547 
3548 	case DOORBELL_KDB:
3549 		t4_write_reg(sc, MYPF_REG(A_SGE_PF_KDOORBELL),
3550 		    V_QID(eq->cntxt_id) | V_PIDX(pending));
3551 		return;
3552 	}
3553 }
3554 
3555 static inline int
3556 reclaimable(struct sge_eq *eq)
3557 {
3558 	unsigned int cidx;
3559 
3560 	cidx = eq->spg->cidx;	/* stable snapshot */
3561 	cidx = be16toh(cidx);
3562 
3563 	if (cidx >= eq->cidx)
3564 		return (cidx - eq->cidx);
3565 	else
3566 		return (cidx + eq->cap - eq->cidx);
3567 }
3568 
3569 /*
3570  * There are "can_reclaim" tx descriptors ready to be reclaimed.  Reclaim as
3571  * many as possible but stop when there are around "n" mbufs to free.
3572  *
3573  * The actual number reclaimed is provided as the return value.
3574  */
3575 static int
3576 reclaim_tx_descs(struct sge_txq *txq, int can_reclaim, int n)
3577 {
3578 	struct tx_sdesc *txsd;
3579 	struct tx_maps *txmaps;
3580 	struct tx_map *txm;
3581 	unsigned int reclaimed, maps;
3582 	struct sge_eq *eq = &txq->eq;
3583 
3584 	TXQ_LOCK_ASSERT_OWNED(txq);
3585 
3586 	if (can_reclaim == 0)
3587 		can_reclaim = reclaimable(eq);
3588 
3589 	maps = reclaimed = 0;
3590 	while (can_reclaim && maps < n) {
3591 		int ndesc;
3592 
3593 		txsd = &txq->sdesc[eq->cidx];
3594 		ndesc = txsd->desc_used;
3595 
3596 		/* Firmware doesn't return "partial" credits. */
3597 		KASSERT(can_reclaim >= ndesc,
3598 		    ("%s: unexpected number of credits: %d, %d",
3599 		    __func__, can_reclaim, ndesc));
3600 
3601 		maps += txsd->credits;
3602 
3603 		reclaimed += ndesc;
3604 		can_reclaim -= ndesc;
3605 
3606 		eq->cidx += ndesc;
3607 		if (__predict_false(eq->cidx >= eq->cap))
3608 			eq->cidx -= eq->cap;
3609 	}
3610 
3611 	txmaps = &txq->txmaps;
3612 	txm = &txmaps->maps[txmaps->map_cidx];
3613 	if (maps)
3614 		prefetch(txm->m);
3615 
3616 	eq->avail += reclaimed;
3617 	KASSERT(eq->avail < eq->cap,	/* avail tops out at (cap - 1) */
3618 	    ("%s: too many descriptors available", __func__));
3619 
3620 	txmaps->map_avail += maps;
3621 	KASSERT(txmaps->map_avail <= txmaps->map_total,
3622 	    ("%s: too many maps available", __func__));
3623 
3624 	while (maps--) {
3625 		struct tx_map *next;
3626 
3627 		next = txm + 1;
3628 		if (__predict_false(txmaps->map_cidx + 1 == txmaps->map_total))
3629 			next = txmaps->maps;
3630 		prefetch(next->m);
3631 
3632 		bus_dmamap_unload(txq->tx_tag, txm->map);
3633 		m_freem(txm->m);
3634 		txm->m = NULL;
3635 
3636 		txm = next;
3637 		if (__predict_false(++txmaps->map_cidx == txmaps->map_total))
3638 			txmaps->map_cidx = 0;
3639 	}
3640 
3641 	return (reclaimed);
3642 }
3643 
3644 static void
3645 write_eqflush_wr(struct sge_eq *eq)
3646 {
3647 	struct fw_eq_flush_wr *wr;
3648 
3649 	EQ_LOCK_ASSERT_OWNED(eq);
3650 	KASSERT(eq->avail > 0, ("%s: no descriptors left.", __func__));
3651 	KASSERT(!(eq->flags & EQ_CRFLUSHED), ("%s: flushed already", __func__));
3652 
3653 	wr = (void *)&eq->desc[eq->pidx];
3654 	bzero(wr, sizeof(*wr));
3655 	wr->opcode = FW_EQ_FLUSH_WR;
3656 	wr->equiq_to_len16 = htobe32(V_FW_WR_LEN16(sizeof(*wr) / 16) |
3657 	    F_FW_WR_EQUEQ | F_FW_WR_EQUIQ);
3658 
3659 	eq->flags |= (EQ_CRFLUSHED | EQ_STALLED);
3660 	eq->pending++;
3661 	eq->avail--;
3662 	if (++eq->pidx == eq->cap)
3663 		eq->pidx = 0;
3664 }
3665 
3666 static __be64
3667 get_flit(bus_dma_segment_t *sgl, int nsegs, int idx)
3668 {
3669 	int i = (idx / 3) * 2;
3670 
3671 	switch (idx % 3) {
3672 	case 0: {
3673 		__be64 rc;
3674 
3675 		rc = htobe32(sgl[i].ds_len);
3676 		if (i + 1 < nsegs)
3677 			rc |= (uint64_t)htobe32(sgl[i + 1].ds_len) << 32;
3678 
3679 		return (rc);
3680 	}
3681 	case 1:
3682 		return htobe64(sgl[i].ds_addr);
3683 	case 2:
3684 		return htobe64(sgl[i + 1].ds_addr);
3685 	}
3686 
3687 	return (0);
3688 }
3689 
3690 static void
3691 set_fl_tag_idx(struct sge_fl *fl, int bufsize)
3692 {
3693 	int i;
3694 
3695 	for (i = 0; i < FL_BUF_SIZES - 1; i++) {
3696 		if (FL_BUF_SIZE(i) >= bufsize)
3697 			break;
3698 	}
3699 
3700 	fl->tag_idx = i;
3701 }
3702 
3703 static void
3704 add_fl_to_sfl(struct adapter *sc, struct sge_fl *fl)
3705 {
3706 	mtx_lock(&sc->sfl_lock);
3707 	FL_LOCK(fl);
3708 	if ((fl->flags & FL_DOOMED) == 0) {
3709 		fl->flags |= FL_STARVING;
3710 		TAILQ_INSERT_TAIL(&sc->sfl, fl, link);
3711 		callout_reset(&sc->sfl_callout, hz / 5, refill_sfl, sc);
3712 	}
3713 	FL_UNLOCK(fl);
3714 	mtx_unlock(&sc->sfl_lock);
3715 }
3716 
3717 static int
3718 handle_sge_egr_update(struct sge_iq *iq, const struct rss_header *rss,
3719     struct mbuf *m)
3720 {
3721 	const struct cpl_sge_egr_update *cpl = (const void *)(rss + 1);
3722 	unsigned int qid = G_EGR_QID(ntohl(cpl->opcode_qid));
3723 	struct adapter *sc = iq->adapter;
3724 	struct sge *s = &sc->sge;
3725 	struct sge_eq *eq;
3726 
3727 	KASSERT(m == NULL, ("%s: payload with opcode %02x", __func__,
3728 	    rss->opcode));
3729 
3730 	eq = s->eqmap[qid - s->eq_start];
3731 	EQ_LOCK(eq);
3732 	KASSERT(eq->flags & EQ_CRFLUSHED,
3733 	    ("%s: unsolicited egress update", __func__));
3734 	eq->flags &= ~EQ_CRFLUSHED;
3735 	eq->egr_update++;
3736 
3737 	if (__predict_false(eq->flags & EQ_DOOMED))
3738 		wakeup_one(eq);
3739 	else if (eq->flags & EQ_STALLED && can_resume_tx(eq))
3740 		taskqueue_enqueue(sc->tq[eq->tx_chan], &eq->tx_task);
3741 	EQ_UNLOCK(eq);
3742 
3743 	return (0);
3744 }
3745 
3746 /* handle_fw_msg works for both fw4_msg and fw6_msg because this is valid */
3747 CTASSERT(offsetof(struct cpl_fw4_msg, data) == \
3748     offsetof(struct cpl_fw6_msg, data));
3749 
3750 static int
3751 handle_fw_msg(struct sge_iq *iq, const struct rss_header *rss, struct mbuf *m)
3752 {
3753 	struct adapter *sc = iq->adapter;
3754 	const struct cpl_fw6_msg *cpl = (const void *)(rss + 1);
3755 
3756 	KASSERT(m == NULL, ("%s: payload with opcode %02x", __func__,
3757 	    rss->opcode));
3758 
3759 	if (cpl->type == FW_TYPE_RSSCPL || cpl->type == FW6_TYPE_RSSCPL) {
3760 		const struct rss_header *rss2;
3761 
3762 		rss2 = (const struct rss_header *)&cpl->data[0];
3763 		return (sc->cpl_handler[rss2->opcode](iq, rss2, m));
3764 	}
3765 
3766 	return (sc->fw_msg_handler[cpl->type](sc, &cpl->data[0]));
3767 }
3768 
3769 static int
3770 sysctl_uint16(SYSCTL_HANDLER_ARGS)
3771 {
3772 	uint16_t *id = arg1;
3773 	int i = *id;
3774 
3775 	return sysctl_handle_int(oidp, &i, 0, req);
3776 }
3777