xref: /freebsd/sys/dev/cxgbe/t4_sge.c (revision 7750ad47a9a7dbc83f87158464170c8640723293)
1 /*-
2  * Copyright (c) 2011 Chelsio Communications, Inc.
3  * All rights reserved.
4  * Written by: Navdeep Parhar <np@FreeBSD.org>
5  *
6  * Redistribution and use in source and binary forms, with or without
7  * modification, are permitted provided that the following conditions
8  * are met:
9  * 1. Redistributions of source code must retain the above copyright
10  *    notice, this list of conditions and the following disclaimer.
11  * 2. Redistributions in binary form must reproduce the above copyright
12  *    notice, this list of conditions and the following disclaimer in the
13  *    documentation and/or other materials provided with the distribution.
14  *
15  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
16  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
25  * SUCH DAMAGE.
26  */
27 
28 #include <sys/cdefs.h>
29 __FBSDID("$FreeBSD$");
30 
31 #include "opt_inet.h"
32 
33 #include <sys/types.h>
34 #include <sys/mbuf.h>
35 #include <sys/socket.h>
36 #include <sys/kernel.h>
37 #include <sys/malloc.h>
38 #include <sys/queue.h>
39 #include <sys/taskqueue.h>
40 #include <sys/sysctl.h>
41 #include <sys/smp.h>
42 #include <net/bpf.h>
43 #include <net/ethernet.h>
44 #include <net/if.h>
45 #include <net/if_vlan_var.h>
46 #include <netinet/in.h>
47 #include <netinet/ip.h>
48 #include <netinet/tcp.h>
49 
50 #include "common/common.h"
51 #include "common/t4_regs.h"
52 #include "common/t4_regs_values.h"
53 #include "common/t4_msg.h"
54 #include "t4_l2t.h"
55 
56 struct fl_buf_info {
57 	int size;
58 	int type;
59 	uma_zone_t zone;
60 };
61 
62 /* Filled up by t4_sge_modload */
63 static struct fl_buf_info fl_buf_info[FL_BUF_SIZES];
64 
65 #define FL_BUF_SIZE(x)	(fl_buf_info[x].size)
66 #define FL_BUF_TYPE(x)	(fl_buf_info[x].type)
67 #define FL_BUF_ZONE(x)	(fl_buf_info[x].zone)
68 
69 enum {
70 	FL_PKTSHIFT = 2
71 };
72 
73 #define FL_ALIGN	min(CACHE_LINE_SIZE, 32)
74 #if CACHE_LINE_SIZE > 64
75 #define SPG_LEN		128
76 #else
77 #define SPG_LEN		64
78 #endif
79 
80 /* Used to track coalesced tx work request */
81 struct txpkts {
82 	uint64_t *flitp;	/* ptr to flit where next pkt should start */
83 	uint8_t npkt;		/* # of packets in this work request */
84 	uint8_t nflits;		/* # of flits used by this work request */
85 	uint16_t plen;		/* total payload (sum of all packets) */
86 };
87 
88 /* A packet's SGL.  This + m_pkthdr has all info needed for tx */
89 struct sgl {
90 	int nsegs;		/* # of segments in the SGL, 0 means imm. tx */
91 	int nflits;		/* # of flits needed for the SGL */
92 	bus_dma_segment_t seg[TX_SGL_SEGS];
93 };
94 
95 static int service_iq(struct sge_iq *, int);
96 static struct mbuf *get_fl_payload(struct adapter *, struct sge_fl *, uint32_t,
97     int *);
98 static int t4_eth_rx(struct sge_iq *, const struct rss_header *, struct mbuf *);
99 static inline void init_iq(struct sge_iq *, struct adapter *, int, int, int,
100     int, char *);
101 static inline void init_fl(struct sge_fl *, int, int, char *);
102 static inline void init_eq(struct sge_eq *, int, int, uint8_t, uint16_t,
103     char *);
104 static int alloc_ring(struct adapter *, size_t, bus_dma_tag_t *, bus_dmamap_t *,
105     bus_addr_t *, void **);
106 static int free_ring(struct adapter *, bus_dma_tag_t, bus_dmamap_t, bus_addr_t,
107     void *);
108 static int alloc_iq_fl(struct port_info *, struct sge_iq *, struct sge_fl *,
109     int, int);
110 static int free_iq_fl(struct port_info *, struct sge_iq *, struct sge_fl *);
111 static int alloc_fwq(struct adapter *);
112 static int free_fwq(struct adapter *);
113 static int alloc_mgmtq(struct adapter *);
114 static int free_mgmtq(struct adapter *);
115 static int alloc_rxq(struct port_info *, struct sge_rxq *, int, int,
116     struct sysctl_oid *);
117 static int free_rxq(struct port_info *, struct sge_rxq *);
118 #ifndef TCP_OFFLOAD_DISABLE
119 static int alloc_ofld_rxq(struct port_info *, struct sge_ofld_rxq *, int, int,
120     struct sysctl_oid *);
121 static int free_ofld_rxq(struct port_info *, struct sge_ofld_rxq *);
122 #endif
123 static int ctrl_eq_alloc(struct adapter *, struct sge_eq *);
124 static int eth_eq_alloc(struct adapter *, struct port_info *, struct sge_eq *);
125 #ifndef TCP_OFFLOAD_DISABLE
126 static int ofld_eq_alloc(struct adapter *, struct port_info *, struct sge_eq *);
127 #endif
128 static int alloc_eq(struct adapter *, struct port_info *, struct sge_eq *);
129 static int free_eq(struct adapter *, struct sge_eq *);
130 static int alloc_wrq(struct adapter *, struct port_info *, struct sge_wrq *,
131     struct sysctl_oid *);
132 static int free_wrq(struct adapter *, struct sge_wrq *);
133 static int alloc_txq(struct port_info *, struct sge_txq *, int,
134     struct sysctl_oid *);
135 static int free_txq(struct port_info *, struct sge_txq *);
136 static void oneseg_dma_callback(void *, bus_dma_segment_t *, int, int);
137 static inline bool is_new_response(const struct sge_iq *, struct rsp_ctrl **);
138 static inline void iq_next(struct sge_iq *);
139 static inline void ring_fl_db(struct adapter *, struct sge_fl *);
140 static int refill_fl(struct adapter *, struct sge_fl *, int);
141 static void refill_sfl(void *);
142 static int alloc_fl_sdesc(struct sge_fl *);
143 static void free_fl_sdesc(struct sge_fl *);
144 static void set_fl_tag_idx(struct sge_fl *, int);
145 static void add_fl_to_sfl(struct adapter *, struct sge_fl *);
146 
147 static int get_pkt_sgl(struct sge_txq *, struct mbuf **, struct sgl *, int);
148 static int free_pkt_sgl(struct sge_txq *, struct sgl *);
149 static int write_txpkt_wr(struct port_info *, struct sge_txq *, struct mbuf *,
150     struct sgl *);
151 static int add_to_txpkts(struct port_info *, struct sge_txq *, struct txpkts *,
152     struct mbuf *, struct sgl *);
153 static void write_txpkts_wr(struct sge_txq *, struct txpkts *);
154 static inline void write_ulp_cpl_sgl(struct port_info *, struct sge_txq *,
155     struct txpkts *, struct mbuf *, struct sgl *);
156 static int write_sgl_to_txd(struct sge_eq *, struct sgl *, caddr_t *);
157 static inline void copy_to_txd(struct sge_eq *, caddr_t, caddr_t *, int);
158 static inline void ring_eq_db(struct adapter *, struct sge_eq *);
159 static inline int reclaimable(struct sge_eq *);
160 static int reclaim_tx_descs(struct sge_txq *, int, int);
161 static void write_eqflush_wr(struct sge_eq *);
162 static __be64 get_flit(bus_dma_segment_t *, int, int);
163 static int handle_sge_egr_update(struct sge_iq *, const struct rss_header *,
164     struct mbuf *);
165 static int handle_fw_rpl(struct sge_iq *, const struct rss_header *,
166     struct mbuf *);
167 
168 static int sysctl_uint16(SYSCTL_HANDLER_ARGS);
169 
170 /*
171  * Called on MOD_LOAD and fills up fl_buf_info[].
172  */
173 void
174 t4_sge_modload(void)
175 {
176 	int i;
177 	int bufsize[FL_BUF_SIZES] = {
178 		MCLBYTES,
179 #if MJUMPAGESIZE != MCLBYTES
180 		MJUMPAGESIZE,
181 #endif
182 		MJUM9BYTES,
183 		MJUM16BYTES
184 	};
185 
186 	for (i = 0; i < FL_BUF_SIZES; i++) {
187 		FL_BUF_SIZE(i) = bufsize[i];
188 		FL_BUF_TYPE(i) = m_gettype(bufsize[i]);
189 		FL_BUF_ZONE(i) = m_getzone(bufsize[i]);
190 	}
191 }
192 
193 /**
194  *	t4_sge_init - initialize SGE
195  *	@sc: the adapter
196  *
197  *	Performs SGE initialization needed every time after a chip reset.
198  *	We do not initialize any of the queues here, instead the driver
199  *	top-level must request them individually.
200  */
201 int
202 t4_sge_init(struct adapter *sc)
203 {
204 	struct sge *s = &sc->sge;
205 	int i, rc = 0;
206 	uint32_t ctrl_mask, ctrl_val, hpsize, v;
207 
208 	ctrl_mask = V_PKTSHIFT(M_PKTSHIFT) | F_RXPKTCPLMODE |
209 	    V_INGPADBOUNDARY(M_INGPADBOUNDARY) |
210 	    F_EGRSTATUSPAGESIZE;
211 	ctrl_val = V_PKTSHIFT(FL_PKTSHIFT) | F_RXPKTCPLMODE |
212 	    V_INGPADBOUNDARY(ilog2(FL_ALIGN) - 5) |
213 	    V_EGRSTATUSPAGESIZE(SPG_LEN == 128);
214 
215 	hpsize = V_HOSTPAGESIZEPF0(PAGE_SHIFT - 10) |
216 	    V_HOSTPAGESIZEPF1(PAGE_SHIFT - 10) |
217 	    V_HOSTPAGESIZEPF2(PAGE_SHIFT - 10) |
218 	    V_HOSTPAGESIZEPF3(PAGE_SHIFT - 10) |
219 	    V_HOSTPAGESIZEPF4(PAGE_SHIFT - 10) |
220 	    V_HOSTPAGESIZEPF5(PAGE_SHIFT - 10) |
221 	    V_HOSTPAGESIZEPF6(PAGE_SHIFT - 10) |
222 	    V_HOSTPAGESIZEPF7(PAGE_SHIFT - 10);
223 
224 	if (sc->flags & MASTER_PF) {
225 		int intr_timer[SGE_NTIMERS] = {1, 5, 10, 50, 100, 200};
226 		int intr_pktcount[SGE_NCOUNTERS] = {1, 8, 16, 32}; /* 63 max */
227 
228 		t4_set_reg_field(sc, A_SGE_CONTROL, ctrl_mask, ctrl_val);
229 		t4_write_reg(sc, A_SGE_HOST_PAGE_SIZE, hpsize);
230 		for (i = 0; i < FL_BUF_SIZES; i++) {
231 			t4_write_reg(sc, A_SGE_FL_BUFFER_SIZE0 + (4 * i),
232 			    FL_BUF_SIZE(i));
233 		}
234 
235 		t4_write_reg(sc, A_SGE_INGRESS_RX_THRESHOLD,
236 		    V_THRESHOLD_0(intr_pktcount[0]) |
237 		    V_THRESHOLD_1(intr_pktcount[1]) |
238 		    V_THRESHOLD_2(intr_pktcount[2]) |
239 		    V_THRESHOLD_3(intr_pktcount[3]));
240 
241 		t4_write_reg(sc, A_SGE_TIMER_VALUE_0_AND_1,
242 		    V_TIMERVALUE0(us_to_core_ticks(sc, intr_timer[0])) |
243 		    V_TIMERVALUE1(us_to_core_ticks(sc, intr_timer[1])));
244 		t4_write_reg(sc, A_SGE_TIMER_VALUE_2_AND_3,
245 		    V_TIMERVALUE2(us_to_core_ticks(sc, intr_timer[2])) |
246 		    V_TIMERVALUE3(us_to_core_ticks(sc, intr_timer[3])));
247 		t4_write_reg(sc, A_SGE_TIMER_VALUE_4_AND_5,
248 		    V_TIMERVALUE4(us_to_core_ticks(sc, intr_timer[4])) |
249 		    V_TIMERVALUE5(us_to_core_ticks(sc, intr_timer[5])));
250 	}
251 
252 	v = t4_read_reg(sc, A_SGE_CONTROL);
253 	if ((v & ctrl_mask) != ctrl_val) {
254 		device_printf(sc->dev, "invalid SGE_CONTROL(0x%x)\n", v);
255 		rc = EINVAL;
256 	}
257 
258 	v = t4_read_reg(sc, A_SGE_HOST_PAGE_SIZE);
259 	if (v != hpsize) {
260 		device_printf(sc->dev, "invalid SGE_HOST_PAGE_SIZE(0x%x)\n", v);
261 		rc = EINVAL;
262 	}
263 
264 	for (i = 0; i < FL_BUF_SIZES; i++) {
265 		v = t4_read_reg(sc, A_SGE_FL_BUFFER_SIZE0 + (4 * i));
266 		if (v != FL_BUF_SIZE(i)) {
267 			device_printf(sc->dev,
268 			    "invalid SGE_FL_BUFFER_SIZE[%d](0x%x)\n", i, v);
269 			rc = EINVAL;
270 		}
271 	}
272 
273 	v = t4_read_reg(sc, A_SGE_CONM_CTRL);
274 	s->fl_starve_threshold = G_EGRTHRESHOLD(v) * 2 + 1;
275 
276 	v = t4_read_reg(sc, A_SGE_INGRESS_RX_THRESHOLD);
277 	sc->sge.counter_val[0] = G_THRESHOLD_0(v);
278 	sc->sge.counter_val[1] = G_THRESHOLD_1(v);
279 	sc->sge.counter_val[2] = G_THRESHOLD_2(v);
280 	sc->sge.counter_val[3] = G_THRESHOLD_3(v);
281 
282 	v = t4_read_reg(sc, A_SGE_TIMER_VALUE_0_AND_1);
283 	sc->sge.timer_val[0] = G_TIMERVALUE0(v) / core_ticks_per_usec(sc);
284 	sc->sge.timer_val[1] = G_TIMERVALUE1(v) / core_ticks_per_usec(sc);
285 	v = t4_read_reg(sc, A_SGE_TIMER_VALUE_2_AND_3);
286 	sc->sge.timer_val[2] = G_TIMERVALUE2(v) / core_ticks_per_usec(sc);
287 	sc->sge.timer_val[3] = G_TIMERVALUE3(v) / core_ticks_per_usec(sc);
288 	v = t4_read_reg(sc, A_SGE_TIMER_VALUE_4_AND_5);
289 	sc->sge.timer_val[4] = G_TIMERVALUE4(v) / core_ticks_per_usec(sc);
290 	sc->sge.timer_val[5] = G_TIMERVALUE5(v) / core_ticks_per_usec(sc);
291 
292 	t4_register_cpl_handler(sc, CPL_FW4_MSG, handle_fw_rpl);
293 	t4_register_cpl_handler(sc, CPL_FW6_MSG, handle_fw_rpl);
294 	t4_register_cpl_handler(sc, CPL_SGE_EGR_UPDATE, handle_sge_egr_update);
295 	t4_register_cpl_handler(sc, CPL_RX_PKT, t4_eth_rx);
296 
297 	return (rc);
298 }
299 
300 int
301 t4_create_dma_tag(struct adapter *sc)
302 {
303 	int rc;
304 
305 	rc = bus_dma_tag_create(bus_get_dma_tag(sc->dev), 1, 0,
306 	    BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL, BUS_SPACE_MAXSIZE,
307 	    BUS_SPACE_UNRESTRICTED, BUS_SPACE_MAXSIZE, BUS_DMA_ALLOCNOW, NULL,
308 	    NULL, &sc->dmat);
309 	if (rc != 0) {
310 		device_printf(sc->dev,
311 		    "failed to create main DMA tag: %d\n", rc);
312 	}
313 
314 	return (rc);
315 }
316 
317 int
318 t4_destroy_dma_tag(struct adapter *sc)
319 {
320 	if (sc->dmat)
321 		bus_dma_tag_destroy(sc->dmat);
322 
323 	return (0);
324 }
325 
326 /*
327  * Allocate and initialize the firmware event queue and the management queue.
328  *
329  * Returns errno on failure.  Resources allocated up to that point may still be
330  * allocated.  Caller is responsible for cleanup in case this function fails.
331  */
332 int
333 t4_setup_adapter_queues(struct adapter *sc)
334 {
335 	int rc;
336 
337 	ADAPTER_LOCK_ASSERT_NOTOWNED(sc);
338 
339 	sysctl_ctx_init(&sc->ctx);
340 	sc->flags |= ADAP_SYSCTL_CTX;
341 
342 	/*
343 	 * Firmware event queue
344 	 */
345 	rc = alloc_fwq(sc);
346 	if (rc != 0) {
347 		device_printf(sc->dev,
348 		    "failed to create firmware event queue: %d\n", rc);
349 		return (rc);
350 	}
351 
352 	/*
353 	 * Management queue.  This is just a control queue that uses the fwq as
354 	 * its associated iq.
355 	 */
356 	rc = alloc_mgmtq(sc);
357 	if (rc != 0) {
358 		device_printf(sc->dev,
359 		    "failed to create management queue: %d\n", rc);
360 		return (rc);
361 	}
362 
363 	return (rc);
364 }
365 
366 /*
367  * Idempotent
368  */
369 int
370 t4_teardown_adapter_queues(struct adapter *sc)
371 {
372 
373 	ADAPTER_LOCK_ASSERT_NOTOWNED(sc);
374 
375 	/* Do this before freeing the queue */
376 	if (sc->flags & ADAP_SYSCTL_CTX) {
377 		sysctl_ctx_free(&sc->ctx);
378 		sc->flags &= ~ADAP_SYSCTL_CTX;
379 	}
380 
381 	free_mgmtq(sc);
382 	free_fwq(sc);
383 
384 	return (0);
385 }
386 
387 static inline int
388 first_vector(struct port_info *pi)
389 {
390 	struct adapter *sc = pi->adapter;
391 	int rc = T4_EXTRA_INTR, i;
392 
393 	if (sc->intr_count == 1)
394 		return (0);
395 
396 	for_each_port(sc, i) {
397 		if (i == pi->port_id)
398 			break;
399 
400 #ifndef TCP_OFFLOAD_DISABLE
401 		if (sc->flags & INTR_DIRECT)
402 			rc += pi->nrxq + pi->nofldrxq;
403 		else
404 			rc += max(pi->nrxq, pi->nofldrxq);
405 #else
406 		/*
407 		 * Not compiled with offload support and intr_count > 1.  Only
408 		 * NIC queues exist and they'd better be taking direct
409 		 * interrupts.
410 		 */
411 		KASSERT(sc->flags & INTR_DIRECT,
412 		    ("%s: intr_count %d, !INTR_DIRECT", __func__,
413 		    sc->intr_count));
414 
415 		rc += pi->nrxq;
416 #endif
417 	}
418 
419 	return (rc);
420 }
421 
422 /*
423  * Given an arbitrary "index," come up with an iq that can be used by other
424  * queues (of this port) for interrupt forwarding, SGE egress updates, etc.
425  * The iq returned is guaranteed to be something that takes direct interrupts.
426  */
427 static struct sge_iq *
428 port_intr_iq(struct port_info *pi, int idx)
429 {
430 	struct adapter *sc = pi->adapter;
431 	struct sge *s = &sc->sge;
432 	struct sge_iq *iq = NULL;
433 
434 	if (sc->intr_count == 1)
435 		return (&sc->sge.fwq);
436 
437 #ifndef TCP_OFFLOAD_DISABLE
438 	if (sc->flags & INTR_DIRECT) {
439 		idx %= pi->nrxq + pi->nofldrxq;
440 
441 		if (idx >= pi->nrxq) {
442 			idx -= pi->nrxq;
443 			iq = &s->ofld_rxq[pi->first_ofld_rxq + idx].iq;
444 		} else
445 			iq = &s->rxq[pi->first_rxq + idx].iq;
446 
447 	} else {
448 		idx %= max(pi->nrxq, pi->nofldrxq);
449 
450 		if (pi->nrxq >= pi->nofldrxq)
451 			iq = &s->rxq[pi->first_rxq + idx].iq;
452 		else
453 			iq = &s->ofld_rxq[pi->first_ofld_rxq + idx].iq;
454 	}
455 #else
456 	/*
457 	 * Not compiled with offload support and intr_count > 1.  Only NIC
458 	 * queues exist and they'd better be taking direct interrupts.
459 	 */
460 	KASSERT(sc->flags & INTR_DIRECT,
461 	    ("%s: intr_count %d, !INTR_DIRECT", __func__, sc->intr_count));
462 
463 	idx %= pi->nrxq;
464 	iq = &s->rxq[pi->first_rxq + idx].iq;
465 #endif
466 
467 	KASSERT(iq->flags & IQ_INTR, ("%s: EDOOFUS", __func__));
468 	return (iq);
469 }
470 
471 int
472 t4_setup_port_queues(struct port_info *pi)
473 {
474 	int rc = 0, i, j, intr_idx, iqid;
475 	struct sge_rxq *rxq;
476 	struct sge_txq *txq;
477 	struct sge_wrq *ctrlq;
478 #ifndef TCP_OFFLOAD_DISABLE
479 	struct sge_ofld_rxq *ofld_rxq;
480 	struct sge_wrq *ofld_txq;
481 #endif
482 	char name[16];
483 	struct adapter *sc = pi->adapter;
484 	struct sysctl_oid *oid = device_get_sysctl_tree(pi->dev), *oid2 = NULL;
485 	struct sysctl_oid_list *children = SYSCTL_CHILDREN(oid);
486 
487 	oid = SYSCTL_ADD_NODE(&pi->ctx, children, OID_AUTO, "rxq", CTLFLAG_RD,
488 	    NULL, "rx queues");
489 
490 #ifndef TCP_OFFLOAD_DISABLE
491 	if (is_offload(sc)) {
492 		oid2 = SYSCTL_ADD_NODE(&pi->ctx, children, OID_AUTO, "ofld_rxq",
493 		    CTLFLAG_RD, NULL,
494 		    "rx queues for offloaded TCP connections");
495 	}
496 #endif
497 
498 	/* Interrupt vector to start from (when using multiple vectors) */
499 	intr_idx = first_vector(pi);
500 
501 	/*
502 	 * First pass over all rx queues (NIC and TOE):
503 	 * a) initialize iq and fl
504 	 * b) allocate queue iff it will take direct interrupts.
505 	 */
506 	for_each_rxq(pi, i, rxq) {
507 
508 		snprintf(name, sizeof(name), "%s rxq%d-iq",
509 		    device_get_nameunit(pi->dev), i);
510 		init_iq(&rxq->iq, sc, pi->tmr_idx, pi->pktc_idx, pi->qsize_rxq,
511 		    RX_IQ_ESIZE, name);
512 
513 		snprintf(name, sizeof(name), "%s rxq%d-fl",
514 		    device_get_nameunit(pi->dev), i);
515 		init_fl(&rxq->fl, pi->qsize_rxq / 8, pi->ifp->if_mtu, name);
516 
517 		if (sc->flags & INTR_DIRECT
518 #ifndef TCP_OFFLOAD_DISABLE
519 		    || (sc->intr_count > 1 && pi->nrxq >= pi->nofldrxq)
520 #endif
521 		   ) {
522 			rxq->iq.flags |= IQ_INTR;
523 			rc = alloc_rxq(pi, rxq, intr_idx, i, oid);
524 			if (rc != 0)
525 				goto done;
526 			intr_idx++;
527 		}
528 	}
529 
530 #ifndef TCP_OFFLOAD_DISABLE
531 	for_each_ofld_rxq(pi, i, ofld_rxq) {
532 
533 		snprintf(name, sizeof(name), "%s ofld_rxq%d-iq",
534 		    device_get_nameunit(pi->dev), i);
535 		init_iq(&ofld_rxq->iq, sc, pi->tmr_idx, pi->pktc_idx,
536 		    pi->qsize_rxq, RX_IQ_ESIZE, name);
537 
538 		snprintf(name, sizeof(name), "%s ofld_rxq%d-fl",
539 		    device_get_nameunit(pi->dev), i);
540 		init_fl(&ofld_rxq->fl, pi->qsize_rxq / 8, MJUM16BYTES, name);
541 
542 		if (sc->flags & INTR_DIRECT ||
543 		    (sc->intr_count > 1 && pi->nofldrxq > pi->nrxq)) {
544 			ofld_rxq->iq.flags |= IQ_INTR;
545 			rc = alloc_ofld_rxq(pi, ofld_rxq, intr_idx, i, oid2);
546 			if (rc != 0)
547 				goto done;
548 			intr_idx++;
549 		}
550 	}
551 #endif
552 
553 	/*
554 	 * Second pass over all rx queues (NIC and TOE).  The queues forwarding
555 	 * their interrupts are allocated now.
556 	 */
557 	j = 0;
558 	for_each_rxq(pi, i, rxq) {
559 		if (rxq->iq.flags & IQ_INTR)
560 			continue;
561 
562 		intr_idx = port_intr_iq(pi, j)->abs_id;
563 
564 		rc = alloc_rxq(pi, rxq, intr_idx, i, oid);
565 		if (rc != 0)
566 			goto done;
567 		j++;
568 	}
569 
570 #ifndef TCP_OFFLOAD_DISABLE
571 	for_each_ofld_rxq(pi, i, ofld_rxq) {
572 		if (ofld_rxq->iq.flags & IQ_INTR)
573 			continue;
574 
575 		intr_idx = port_intr_iq(pi, j)->abs_id;
576 
577 		rc = alloc_ofld_rxq(pi, ofld_rxq, intr_idx, i, oid2);
578 		if (rc != 0)
579 			goto done;
580 		j++;
581 	}
582 #endif
583 
584 	/*
585 	 * Now the tx queues.  Only one pass needed.
586 	 */
587 	oid = SYSCTL_ADD_NODE(&pi->ctx, children, OID_AUTO, "txq", CTLFLAG_RD,
588 	    NULL, "tx queues");
589 	j = 0;
590 	for_each_txq(pi, i, txq) {
591 		uint16_t iqid;
592 
593 		iqid = port_intr_iq(pi, j)->cntxt_id;
594 
595 		snprintf(name, sizeof(name), "%s txq%d",
596 		    device_get_nameunit(pi->dev), i);
597 		init_eq(&txq->eq, EQ_ETH, pi->qsize_txq, pi->tx_chan, iqid,
598 		    name);
599 
600 		rc = alloc_txq(pi, txq, i, oid);
601 		if (rc != 0)
602 			goto done;
603 		j++;
604 	}
605 
606 #ifndef TCP_OFFLOAD_DISABLE
607 	oid = SYSCTL_ADD_NODE(&pi->ctx, children, OID_AUTO, "ofld_txq",
608 	    CTLFLAG_RD, NULL, "tx queues for offloaded TCP connections");
609 	for_each_ofld_txq(pi, i, ofld_txq) {
610 		uint16_t iqid;
611 
612 		iqid = port_intr_iq(pi, j)->cntxt_id;
613 
614 		snprintf(name, sizeof(name), "%s ofld_txq%d",
615 		    device_get_nameunit(pi->dev), i);
616 		init_eq(&ofld_txq->eq, EQ_OFLD, pi->qsize_txq, pi->tx_chan,
617 		    iqid, name);
618 
619 		snprintf(name, sizeof(name), "%d", i);
620 		oid2 = SYSCTL_ADD_NODE(&pi->ctx, SYSCTL_CHILDREN(oid), OID_AUTO,
621 		    name, CTLFLAG_RD, NULL, "offload tx queue");
622 
623 		rc = alloc_wrq(sc, pi, ofld_txq, oid2);
624 		if (rc != 0)
625 			goto done;
626 		j++;
627 	}
628 #endif
629 
630 	/*
631 	 * Finally, the control queue.
632 	 */
633 	oid = SYSCTL_ADD_NODE(&pi->ctx, children, OID_AUTO, "ctrlq", CTLFLAG_RD,
634 	    NULL, "ctrl queue");
635 	ctrlq = &sc->sge.ctrlq[pi->port_id];
636 	iqid = port_intr_iq(pi, 0)->cntxt_id;
637 	snprintf(name, sizeof(name), "%s ctrlq", device_get_nameunit(pi->dev));
638 	init_eq(&ctrlq->eq, EQ_CTRL, CTRL_EQ_QSIZE, pi->tx_chan, iqid, name);
639 	rc = alloc_wrq(sc, pi, ctrlq, oid);
640 
641 done:
642 	if (rc)
643 		t4_teardown_port_queues(pi);
644 
645 	return (rc);
646 }
647 
648 /*
649  * Idempotent
650  */
651 int
652 t4_teardown_port_queues(struct port_info *pi)
653 {
654 	int i;
655 	struct adapter *sc = pi->adapter;
656 	struct sge_rxq *rxq;
657 	struct sge_txq *txq;
658 #ifndef TCP_OFFLOAD_DISABLE
659 	struct sge_ofld_rxq *ofld_rxq;
660 	struct sge_wrq *ofld_txq;
661 #endif
662 
663 	/* Do this before freeing the queues */
664 	if (pi->flags & PORT_SYSCTL_CTX) {
665 		sysctl_ctx_free(&pi->ctx);
666 		pi->flags &= ~PORT_SYSCTL_CTX;
667 	}
668 
669 	/*
670 	 * Take down all the tx queues first, as they reference the rx queues
671 	 * (for egress updates, etc.).
672 	 */
673 
674 	free_wrq(sc, &sc->sge.ctrlq[pi->port_id]);
675 
676 	for_each_txq(pi, i, txq) {
677 		free_txq(pi, txq);
678 	}
679 
680 #ifndef TCP_OFFLOAD_DISABLE
681 	for_each_ofld_txq(pi, i, ofld_txq) {
682 		free_wrq(sc, ofld_txq);
683 	}
684 #endif
685 
686 	/*
687 	 * Then take down the rx queues that forward their interrupts, as they
688 	 * reference other rx queues.
689 	 */
690 
691 	for_each_rxq(pi, i, rxq) {
692 		if ((rxq->iq.flags & IQ_INTR) == 0)
693 			free_rxq(pi, rxq);
694 	}
695 
696 #ifndef TCP_OFFLOAD_DISABLE
697 	for_each_ofld_rxq(pi, i, ofld_rxq) {
698 		if ((ofld_rxq->iq.flags & IQ_INTR) == 0)
699 			free_ofld_rxq(pi, ofld_rxq);
700 	}
701 #endif
702 
703 	/*
704 	 * Then take down the rx queues that take direct interrupts.
705 	 */
706 
707 	for_each_rxq(pi, i, rxq) {
708 		if (rxq->iq.flags & IQ_INTR)
709 			free_rxq(pi, rxq);
710 	}
711 
712 #ifndef TCP_OFFLOAD_DISABLE
713 	for_each_ofld_rxq(pi, i, ofld_rxq) {
714 		if (ofld_rxq->iq.flags & IQ_INTR)
715 			free_ofld_rxq(pi, ofld_rxq);
716 	}
717 #endif
718 
719 	return (0);
720 }
721 
722 /*
723  * Deals with errors and the firmware event queue.  All data rx queues forward
724  * their interrupt to the firmware event queue.
725  */
726 void
727 t4_intr_all(void *arg)
728 {
729 	struct adapter *sc = arg;
730 	struct sge_iq *fwq = &sc->sge.fwq;
731 
732 	t4_intr_err(arg);
733 	if (atomic_cmpset_int(&fwq->state, IQS_IDLE, IQS_BUSY)) {
734 		service_iq(fwq, 0);
735 		atomic_cmpset_int(&fwq->state, IQS_BUSY, IQS_IDLE);
736 	}
737 }
738 
739 /* Deals with error interrupts */
740 void
741 t4_intr_err(void *arg)
742 {
743 	struct adapter *sc = arg;
744 
745 	t4_write_reg(sc, MYPF_REG(A_PCIE_PF_CLI), 0);
746 	t4_slow_intr_handler(sc);
747 }
748 
749 void
750 t4_intr_evt(void *arg)
751 {
752 	struct sge_iq *iq = arg;
753 
754 	if (atomic_cmpset_int(&iq->state, IQS_IDLE, IQS_BUSY)) {
755 		service_iq(iq, 0);
756 		atomic_cmpset_int(&iq->state, IQS_BUSY, IQS_IDLE);
757 	}
758 }
759 
760 void
761 t4_intr(void *arg)
762 {
763 	struct sge_iq *iq = arg;
764 
765 	if (atomic_cmpset_int(&iq->state, IQS_IDLE, IQS_BUSY)) {
766 		service_iq(iq, 0);
767 		atomic_cmpset_int(&iq->state, IQS_BUSY, IQS_IDLE);
768 	}
769 }
770 
771 /*
772  * Deals with anything and everything on the given ingress queue.
773  */
774 static int
775 service_iq(struct sge_iq *iq, int budget)
776 {
777 	struct sge_iq *q;
778 	struct sge_rxq *rxq = (void *)iq;	/* Use iff iq is part of rxq */
779 	struct sge_fl *fl = &rxq->fl;		/* Use iff IQ_HAS_FL */
780 	struct adapter *sc = iq->adapter;
781 	struct rsp_ctrl *ctrl;
782 	const struct rss_header *rss;
783 	int ndescs = 0, limit, fl_bufs_used = 0;
784 	int rsp_type;
785 	uint32_t lq;
786 	struct mbuf *m0;
787 	STAILQ_HEAD(, sge_iq) iql = STAILQ_HEAD_INITIALIZER(iql);
788 
789 	limit = budget ? budget : iq->qsize / 8;
790 
791 	KASSERT(iq->state == IQS_BUSY, ("%s: iq %p not BUSY", __func__, iq));
792 
793 	/*
794 	 * We always come back and check the descriptor ring for new indirect
795 	 * interrupts and other responses after running a single handler.
796 	 */
797 	for (;;) {
798 		while (is_new_response(iq, &ctrl)) {
799 
800 			rmb();
801 
802 			m0 = NULL;
803 			rsp_type = G_RSPD_TYPE(ctrl->u.type_gen);
804 			lq = be32toh(ctrl->pldbuflen_qid);
805 			rss = (const void *)iq->cdesc;
806 
807 			switch (rsp_type) {
808 			case X_RSPD_TYPE_FLBUF:
809 
810 				KASSERT(iq->flags & IQ_HAS_FL,
811 				    ("%s: data for an iq (%p) with no freelist",
812 				    __func__, iq));
813 
814 				m0 = get_fl_payload(sc, fl, lq, &fl_bufs_used);
815 #ifdef T4_PKT_TIMESTAMP
816 				/*
817 				 * 60 bit timestamp for the payload is
818 				 * *(uint64_t *)m0->m_pktdat.  Note that it is
819 				 * in the leading free-space in the mbuf.  The
820 				 * kernel can clobber it during a pullup,
821 				 * m_copymdata, etc.  You need to make sure that
822 				 * the mbuf reaches you unmolested if you care
823 				 * about the timestamp.
824 				 */
825 				*(uint64_t *)m0->m_pktdat =
826 				    be64toh(ctrl->u.last_flit) &
827 				    0xfffffffffffffff;
828 #endif
829 
830 				/* fall through */
831 
832 			case X_RSPD_TYPE_CPL:
833 				KASSERT(rss->opcode < NUM_CPL_CMDS,
834 				    ("%s: bad opcode %02x.", __func__,
835 				    rss->opcode));
836 				sc->cpl_handler[rss->opcode](iq, rss, m0);
837 				break;
838 
839 			case X_RSPD_TYPE_INTR:
840 
841 				/*
842 				 * Interrupts should be forwarded only to queues
843 				 * that are not forwarding their interrupts.
844 				 * This means service_iq can recurse but only 1
845 				 * level deep.
846 				 */
847 				KASSERT(budget == 0,
848 				    ("%s: budget %u, rsp_type %u", __func__,
849 				    budget, rsp_type));
850 
851 				q = sc->sge.iqmap[lq - sc->sge.iq_start];
852 				if (atomic_cmpset_int(&q->state, IQS_IDLE,
853 				    IQS_BUSY)) {
854 					if (service_iq(q, q->qsize / 8) == 0) {
855 						atomic_cmpset_int(&q->state,
856 						    IQS_BUSY, IQS_IDLE);
857 					} else {
858 						STAILQ_INSERT_TAIL(&iql, q,
859 						    link);
860 					}
861 				}
862 				break;
863 
864 			default:
865 				panic("%s: rsp_type %u", __func__, rsp_type);
866 			}
867 
868 			iq_next(iq);
869 			if (++ndescs == limit) {
870 				t4_write_reg(sc, MYPF_REG(A_SGE_PF_GTS),
871 				    V_CIDXINC(ndescs) |
872 				    V_INGRESSQID(iq->cntxt_id) |
873 				    V_SEINTARM(V_QINTR_TIMER_IDX(X_TIMERREG_UPDATE_CIDX)));
874 				ndescs = 0;
875 
876 				if (fl_bufs_used > 0) {
877 					FL_LOCK(fl);
878 					fl->needed += fl_bufs_used;
879 					refill_fl(sc, fl, fl->cap / 8);
880 					FL_UNLOCK(fl);
881 					fl_bufs_used = 0;
882 				}
883 
884 				if (budget)
885 					return (EINPROGRESS);
886 			}
887 		}
888 
889 		if (STAILQ_EMPTY(&iql))
890 			break;
891 
892 		/*
893 		 * Process the head only, and send it to the back of the list if
894 		 * it's still not done.
895 		 */
896 		q = STAILQ_FIRST(&iql);
897 		STAILQ_REMOVE_HEAD(&iql, link);
898 		if (service_iq(q, q->qsize / 8) == 0)
899 			atomic_cmpset_int(&q->state, IQS_BUSY, IQS_IDLE);
900 		else
901 			STAILQ_INSERT_TAIL(&iql, q, link);
902 	}
903 
904 #ifdef INET
905 	if (iq->flags & IQ_LRO_ENABLED) {
906 		struct lro_ctrl *lro = &rxq->lro;
907 		struct lro_entry *l;
908 
909 		while (!SLIST_EMPTY(&lro->lro_active)) {
910 			l = SLIST_FIRST(&lro->lro_active);
911 			SLIST_REMOVE_HEAD(&lro->lro_active, next);
912 			tcp_lro_flush(lro, l);
913 		}
914 	}
915 #endif
916 
917 	t4_write_reg(sc, MYPF_REG(A_SGE_PF_GTS), V_CIDXINC(ndescs) |
918 	    V_INGRESSQID((u32)iq->cntxt_id) | V_SEINTARM(iq->intr_params));
919 
920 	if (iq->flags & IQ_HAS_FL) {
921 		int starved;
922 
923 		FL_LOCK(fl);
924 		fl->needed += fl_bufs_used;
925 		starved = refill_fl(sc, fl, fl->cap / 4);
926 		FL_UNLOCK(fl);
927 		if (__predict_false(starved != 0))
928 			add_fl_to_sfl(sc, fl);
929 	}
930 
931 	return (0);
932 }
933 
934 
935 #ifdef T4_PKT_TIMESTAMP
936 #define RX_COPY_THRESHOLD (MINCLSIZE - 8)
937 #else
938 #define RX_COPY_THRESHOLD MINCLSIZE
939 #endif
940 
941 static struct mbuf *
942 get_fl_payload(struct adapter *sc, struct sge_fl *fl, uint32_t len_newbuf,
943     int *fl_bufs_used)
944 {
945 	struct mbuf *m0, *m;
946 	struct fl_sdesc *sd = &fl->sdesc[fl->cidx];
947 	unsigned int nbuf, len;
948 
949 	/*
950 	 * No assertion for the fl lock because we don't need it.  This routine
951 	 * is called only from the rx interrupt handler and it only updates
952 	 * fl->cidx.  (Contrast that with fl->pidx/fl->needed which could be
953 	 * updated in the rx interrupt handler or the starvation helper routine.
954 	 * That's why code that manipulates fl->pidx/fl->needed needs the fl
955 	 * lock but this routine does not).
956 	 */
957 
958 	if (__predict_false((len_newbuf & F_RSPD_NEWBUF) == 0))
959 		panic("%s: cannot handle packed frames", __func__);
960 	len = G_RSPD_LEN(len_newbuf);
961 
962 	m0 = sd->m;
963 	sd->m = NULL;	/* consumed */
964 
965 	bus_dmamap_sync(fl->tag[sd->tag_idx], sd->map, BUS_DMASYNC_POSTREAD);
966 	m_init(m0, NULL, 0, M_NOWAIT, MT_DATA, M_PKTHDR);
967 #ifdef T4_PKT_TIMESTAMP
968 	/* Leave room for a timestamp */
969 	m0->m_data += 8;
970 #endif
971 
972 	if (len < RX_COPY_THRESHOLD) {
973 		/* copy data to mbuf, buffer will be recycled */
974 		bcopy(sd->cl, mtod(m0, caddr_t), len);
975 		m0->m_len = len;
976 	} else {
977 		bus_dmamap_unload(fl->tag[sd->tag_idx], sd->map);
978 		m_cljset(m0, sd->cl, FL_BUF_TYPE(sd->tag_idx));
979 		sd->cl = NULL;	/* consumed */
980 		m0->m_len = min(len, FL_BUF_SIZE(sd->tag_idx));
981 	}
982 	m0->m_pkthdr.len = len;
983 
984 	sd++;
985 	if (__predict_false(++fl->cidx == fl->cap)) {
986 		sd = fl->sdesc;
987 		fl->cidx = 0;
988 	}
989 
990 	m = m0;
991 	len -= m->m_len;
992 	nbuf = 1;	/* # of fl buffers used */
993 
994 	while (len > 0) {
995 		m->m_next = sd->m;
996 		sd->m = NULL;	/* consumed */
997 		m = m->m_next;
998 
999 		bus_dmamap_sync(fl->tag[sd->tag_idx], sd->map,
1000 		    BUS_DMASYNC_POSTREAD);
1001 
1002 		m_init(m, NULL, 0, M_NOWAIT, MT_DATA, 0);
1003 		if (len <= MLEN) {
1004 			bcopy(sd->cl, mtod(m, caddr_t), len);
1005 			m->m_len = len;
1006 		} else {
1007 			bus_dmamap_unload(fl->tag[sd->tag_idx],
1008 			    sd->map);
1009 			m_cljset(m, sd->cl, FL_BUF_TYPE(sd->tag_idx));
1010 			sd->cl = NULL;	/* consumed */
1011 			m->m_len = min(len, FL_BUF_SIZE(sd->tag_idx));
1012 		}
1013 
1014 		sd++;
1015 		if (__predict_false(++fl->cidx == fl->cap)) {
1016 			sd = fl->sdesc;
1017 			fl->cidx = 0;
1018 		}
1019 
1020 		len -= m->m_len;
1021 		nbuf++;
1022 	}
1023 
1024 	(*fl_bufs_used) += nbuf;
1025 
1026 	return (m0);
1027 }
1028 
1029 static int
1030 t4_eth_rx(struct sge_iq *iq, const struct rss_header *rss, struct mbuf *m0)
1031 {
1032 	struct sge_rxq *rxq = (void *)iq;
1033 	struct ifnet *ifp = rxq->ifp;
1034 	const struct cpl_rx_pkt *cpl = (const void *)(rss + 1);
1035 #ifdef INET
1036 	struct lro_ctrl *lro = &rxq->lro;
1037 #endif
1038 
1039 	KASSERT(m0 != NULL, ("%s: no payload with opcode %02x", __func__,
1040 	    rss->opcode));
1041 
1042 	m0->m_pkthdr.len -= FL_PKTSHIFT;
1043 	m0->m_len -= FL_PKTSHIFT;
1044 	m0->m_data += FL_PKTSHIFT;
1045 
1046 	m0->m_pkthdr.rcvif = ifp;
1047 	m0->m_flags |= M_FLOWID;
1048 	m0->m_pkthdr.flowid = rss->hash_val;
1049 
1050 	if (cpl->csum_calc && !cpl->err_vec &&
1051 	    ifp->if_capenable & IFCAP_RXCSUM) {
1052 		m0->m_pkthdr.csum_flags |= (CSUM_IP_CHECKED |
1053 		    CSUM_IP_VALID | CSUM_DATA_VALID | CSUM_PSEUDO_HDR);
1054 		if (cpl->ip_frag)
1055 			m0->m_pkthdr.csum_data = be16toh(cpl->csum);
1056 		else
1057 			m0->m_pkthdr.csum_data = 0xffff;
1058 		rxq->rxcsum++;
1059 	}
1060 
1061 	if (cpl->vlan_ex) {
1062 		m0->m_pkthdr.ether_vtag = be16toh(cpl->vlan);
1063 		m0->m_flags |= M_VLANTAG;
1064 		rxq->vlan_extraction++;
1065 	}
1066 
1067 #ifdef INET
1068 	if (cpl->l2info & htobe32(F_RXF_LRO) &&
1069 	    iq->flags & IQ_LRO_ENABLED &&
1070 	    tcp_lro_rx(lro, m0, 0) == 0) {
1071 		/* queued for LRO */
1072 	} else
1073 #endif
1074 	ifp->if_input(ifp, m0);
1075 
1076 	return (0);
1077 }
1078 
1079 int
1080 t4_mgmt_tx(struct adapter *sc, struct mbuf *m)
1081 {
1082 	return t4_wrq_tx(sc, &sc->sge.mgmtq, m);
1083 }
1084 
1085 /*
1086  * Doesn't fail.  Holds on to work requests it can't send right away.
1087  */
1088 int
1089 t4_wrq_tx_locked(struct adapter *sc, struct sge_wrq *wrq, struct mbuf *m0)
1090 {
1091 	struct sge_eq *eq = &wrq->eq;
1092 	int can_reclaim;
1093 	caddr_t dst;
1094 	struct mbuf *wr, *next;
1095 
1096 	TXQ_LOCK_ASSERT_OWNED(wrq);
1097 	KASSERT((eq->flags & EQ_TYPEMASK) == EQ_OFLD ||
1098 	    (eq->flags & EQ_TYPEMASK) == EQ_CTRL,
1099 	    ("%s: eq type %d", __func__, eq->flags & EQ_TYPEMASK));
1100 
1101 	if (__predict_true(m0 != NULL)) {
1102 		if (wrq->head)
1103 			wrq->tail->m_nextpkt = m0;
1104 		else
1105 			wrq->head = m0;
1106 		while (m0->m_nextpkt)
1107 			m0 = m0->m_nextpkt;
1108 		wrq->tail = m0;
1109 	}
1110 
1111 	can_reclaim = reclaimable(eq);
1112 	if (__predict_false(eq->flags & EQ_STALLED)) {
1113 		if (can_reclaim < tx_resume_threshold(eq))
1114 			return (0);
1115 		eq->flags &= ~EQ_STALLED;
1116 		eq->unstalled++;
1117 	}
1118 	eq->cidx += can_reclaim;
1119 	eq->avail += can_reclaim;
1120 	if (__predict_false(eq->cidx >= eq->cap))
1121 		eq->cidx -= eq->cap;
1122 
1123 	for (wr = wrq->head; wr; wr = next) {
1124 		int ndesc;
1125 		struct mbuf *m;
1126 
1127 		next = wr->m_nextpkt;
1128 		wr->m_nextpkt = NULL;
1129 
1130 		M_ASSERTPKTHDR(wr);
1131 		KASSERT(wr->m_pkthdr.len > 0 && (wr->m_pkthdr.len & 0x7) == 0,
1132 		    ("%s: work request len %d.", __func__, wr->m_pkthdr.len));
1133 
1134 		if (wr->m_pkthdr.len > SGE_MAX_WR_LEN) {
1135 #ifdef INVARIANTS
1136 			panic("%s: oversized work request", __func__);
1137 #else
1138 			log(LOG_ERR, "%s: %s work request too long (%d)",
1139 			    device_get_nameunit(sc->dev), __func__,
1140 			    wr->m_pkthdr.len);
1141 			m_freem(wr);
1142 			continue;
1143 #endif
1144 		}
1145 
1146 		ndesc = howmany(wr->m_pkthdr.len, EQ_ESIZE);
1147 		if (eq->avail < ndesc) {
1148 			wr->m_nextpkt = next;
1149 			wrq->no_desc++;
1150 			break;
1151 		}
1152 
1153 		dst = (void *)&eq->desc[eq->pidx];
1154 		for (m = wr; m; m = m->m_next)
1155 			copy_to_txd(eq, mtod(m, caddr_t), &dst, m->m_len);
1156 
1157 		eq->pidx += ndesc;
1158 		eq->avail -= ndesc;
1159 		if (__predict_false(eq->pidx >= eq->cap))
1160 			eq->pidx -= eq->cap;
1161 
1162 		eq->pending += ndesc;
1163 		if (eq->pending > 16)
1164 			ring_eq_db(sc, eq);
1165 
1166 		wrq->tx_wrs++;
1167 		m_freem(wr);
1168 
1169 		if (eq->avail < 8) {
1170 			can_reclaim = reclaimable(eq);
1171 			eq->cidx += can_reclaim;
1172 			eq->avail += can_reclaim;
1173 			if (__predict_false(eq->cidx >= eq->cap))
1174 				eq->cidx -= eq->cap;
1175 		}
1176 	}
1177 
1178 	if (eq->pending)
1179 		ring_eq_db(sc, eq);
1180 
1181 	if (wr == NULL)
1182 		wrq->head = wrq->tail = NULL;
1183 	else {
1184 		wrq->head = wr;
1185 
1186 		KASSERT(wrq->tail->m_nextpkt == NULL,
1187 		    ("%s: wrq->tail grew a tail of its own", __func__));
1188 
1189 		eq->flags |= EQ_STALLED;
1190 		if (callout_pending(&eq->tx_callout) == 0)
1191 			callout_reset(&eq->tx_callout, 1, t4_tx_callout, eq);
1192 	}
1193 
1194 	return (0);
1195 }
1196 
1197 /* Per-packet header in a coalesced tx WR, before the SGL starts (in flits) */
1198 #define TXPKTS_PKT_HDR ((\
1199     sizeof(struct ulp_txpkt) + \
1200     sizeof(struct ulptx_idata) + \
1201     sizeof(struct cpl_tx_pkt_core) \
1202     ) / 8)
1203 
1204 /* Header of a coalesced tx WR, before SGL of first packet (in flits) */
1205 #define TXPKTS_WR_HDR (\
1206     sizeof(struct fw_eth_tx_pkts_wr) / 8 + \
1207     TXPKTS_PKT_HDR)
1208 
1209 /* Header of a tx WR, before SGL of first packet (in flits) */
1210 #define TXPKT_WR_HDR ((\
1211     sizeof(struct fw_eth_tx_pkt_wr) + \
1212     sizeof(struct cpl_tx_pkt_core) \
1213     ) / 8 )
1214 
1215 /* Header of a tx LSO WR, before SGL of first packet (in flits) */
1216 #define TXPKT_LSO_WR_HDR ((\
1217     sizeof(struct fw_eth_tx_pkt_wr) + \
1218     sizeof(struct cpl_tx_pkt_lso) + \
1219     sizeof(struct cpl_tx_pkt_core) \
1220     ) / 8 )
1221 
1222 int
1223 t4_eth_tx(struct ifnet *ifp, struct sge_txq *txq, struct mbuf *m)
1224 {
1225 	struct port_info *pi = (void *)ifp->if_softc;
1226 	struct adapter *sc = pi->adapter;
1227 	struct sge_eq *eq = &txq->eq;
1228 	struct buf_ring *br = txq->br;
1229 	struct mbuf *next;
1230 	int rc, coalescing, can_reclaim;
1231 	struct txpkts txpkts;
1232 	struct sgl sgl;
1233 
1234 	TXQ_LOCK_ASSERT_OWNED(txq);
1235 	KASSERT(m, ("%s: called with nothing to do.", __func__));
1236 	KASSERT((eq->flags & EQ_TYPEMASK) == EQ_ETH,
1237 	    ("%s: eq type %d", __func__, eq->flags & EQ_TYPEMASK));
1238 
1239 	prefetch(&eq->desc[eq->pidx]);
1240 	prefetch(&txq->sdesc[eq->pidx]);
1241 
1242 	txpkts.npkt = 0;/* indicates there's nothing in txpkts */
1243 	coalescing = 0;
1244 
1245 	can_reclaim = reclaimable(eq);
1246 	if (__predict_false(eq->flags & EQ_STALLED)) {
1247 		if (can_reclaim < tx_resume_threshold(eq)) {
1248 			txq->m = m;
1249 			return (0);
1250 		}
1251 		eq->flags &= ~EQ_STALLED;
1252 		eq->unstalled++;
1253 	}
1254 
1255 	if (__predict_false(eq->flags & EQ_DOOMED)) {
1256 		m_freem(m);
1257 		while ((m = buf_ring_dequeue_sc(txq->br)) != NULL)
1258 			m_freem(m);
1259 		return (ENETDOWN);
1260 	}
1261 
1262 	if (eq->avail < 8 && can_reclaim)
1263 		reclaim_tx_descs(txq, can_reclaim, 32);
1264 
1265 	for (; m; m = next ? next : drbr_dequeue(ifp, br)) {
1266 
1267 		if (eq->avail < 8)
1268 			break;
1269 
1270 		next = m->m_nextpkt;
1271 		m->m_nextpkt = NULL;
1272 
1273 		if (next || buf_ring_peek(br))
1274 			coalescing = 1;
1275 
1276 		rc = get_pkt_sgl(txq, &m, &sgl, coalescing);
1277 		if (rc != 0) {
1278 			if (rc == ENOMEM) {
1279 
1280 				/* Short of resources, suspend tx */
1281 
1282 				m->m_nextpkt = next;
1283 				break;
1284 			}
1285 
1286 			/*
1287 			 * Unrecoverable error for this packet, throw it away
1288 			 * and move on to the next.  get_pkt_sgl may already
1289 			 * have freed m (it will be NULL in that case and the
1290 			 * m_freem here is still safe).
1291 			 */
1292 
1293 			m_freem(m);
1294 			continue;
1295 		}
1296 
1297 		if (coalescing &&
1298 		    add_to_txpkts(pi, txq, &txpkts, m, &sgl) == 0) {
1299 
1300 			/* Successfully absorbed into txpkts */
1301 
1302 			write_ulp_cpl_sgl(pi, txq, &txpkts, m, &sgl);
1303 			goto doorbell;
1304 		}
1305 
1306 		/*
1307 		 * We weren't coalescing to begin with, or current frame could
1308 		 * not be coalesced (add_to_txpkts flushes txpkts if a frame
1309 		 * given to it can't be coalesced).  Either way there should be
1310 		 * nothing in txpkts.
1311 		 */
1312 		KASSERT(txpkts.npkt == 0,
1313 		    ("%s: txpkts not empty: %d", __func__, txpkts.npkt));
1314 
1315 		/* We're sending out individual packets now */
1316 		coalescing = 0;
1317 
1318 		if (eq->avail < 8)
1319 			reclaim_tx_descs(txq, 0, 8);
1320 		rc = write_txpkt_wr(pi, txq, m, &sgl);
1321 		if (rc != 0) {
1322 
1323 			/* Short of hardware descriptors, suspend tx */
1324 
1325 			/*
1326 			 * This is an unlikely but expensive failure.  We've
1327 			 * done all the hard work (DMA mappings etc.) and now we
1328 			 * can't send out the packet.  What's worse, we have to
1329 			 * spend even more time freeing up everything in sgl.
1330 			 */
1331 			txq->no_desc++;
1332 			free_pkt_sgl(txq, &sgl);
1333 
1334 			m->m_nextpkt = next;
1335 			break;
1336 		}
1337 
1338 		ETHER_BPF_MTAP(ifp, m);
1339 		if (sgl.nsegs == 0)
1340 			m_freem(m);
1341 doorbell:
1342 		if (eq->pending >= 64)
1343 		    ring_eq_db(sc, eq);
1344 
1345 		can_reclaim = reclaimable(eq);
1346 		if (can_reclaim >= 32)
1347 			reclaim_tx_descs(txq, can_reclaim, 64);
1348 	}
1349 
1350 	if (txpkts.npkt > 0)
1351 		write_txpkts_wr(txq, &txpkts);
1352 
1353 	/*
1354 	 * m not NULL means there was an error but we haven't thrown it away.
1355 	 * This can happen when we're short of tx descriptors (no_desc) or maybe
1356 	 * even DMA maps (no_dmamap).  Either way, a credit flush and reclaim
1357 	 * will get things going again.
1358 	 */
1359 	if (m && !(eq->flags & EQ_CRFLUSHED)) {
1360 		struct tx_sdesc *txsd = &txq->sdesc[eq->pidx];
1361 
1362 		/*
1363 		 * If EQ_CRFLUSHED is not set then we know we have at least one
1364 		 * available descriptor because any WR that reduces eq->avail to
1365 		 * 0 also sets EQ_CRFLUSHED.
1366 		 */
1367 		KASSERT(eq->avail > 0, ("%s: no space for eqflush.", __func__));
1368 
1369 		txsd->desc_used = 1;
1370 		txsd->credits = 0;
1371 		write_eqflush_wr(eq);
1372 	}
1373 	txq->m = m;
1374 
1375 	if (eq->pending)
1376 		ring_eq_db(sc, eq);
1377 
1378 	reclaim_tx_descs(txq, 0, 128);
1379 
1380 	if (eq->flags & EQ_STALLED && callout_pending(&eq->tx_callout) == 0)
1381 		callout_reset(&eq->tx_callout, 1, t4_tx_callout, eq);
1382 
1383 	return (0);
1384 }
1385 
1386 void
1387 t4_update_fl_bufsize(struct ifnet *ifp)
1388 {
1389 	struct port_info *pi = ifp->if_softc;
1390 	struct sge_rxq *rxq;
1391 	struct sge_fl *fl;
1392 	int i, bufsize;
1393 
1394 	/* large enough for a frame even when VLAN extraction is disabled */
1395 	bufsize = FL_PKTSHIFT + ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN +
1396 	    ifp->if_mtu;
1397 	for_each_rxq(pi, i, rxq) {
1398 		fl = &rxq->fl;
1399 
1400 		FL_LOCK(fl);
1401 		set_fl_tag_idx(fl, bufsize);
1402 		FL_UNLOCK(fl);
1403 	}
1404 }
1405 
1406 int
1407 can_resume_tx(struct sge_eq *eq)
1408 {
1409 	return (reclaimable(eq) >= tx_resume_threshold(eq));
1410 }
1411 
1412 static inline void
1413 init_iq(struct sge_iq *iq, struct adapter *sc, int tmr_idx, int pktc_idx,
1414     int qsize, int esize, char *name)
1415 {
1416 	KASSERT(tmr_idx >= 0 && tmr_idx < SGE_NTIMERS,
1417 	    ("%s: bad tmr_idx %d", __func__, tmr_idx));
1418 	KASSERT(pktc_idx < SGE_NCOUNTERS,	/* -ve is ok, means don't use */
1419 	    ("%s: bad pktc_idx %d", __func__, pktc_idx));
1420 
1421 	iq->flags = 0;
1422 	iq->adapter = sc;
1423 	iq->intr_params = V_QINTR_TIMER_IDX(tmr_idx);
1424 	iq->intr_pktc_idx = SGE_NCOUNTERS - 1;
1425 	if (pktc_idx >= 0) {
1426 		iq->intr_params |= F_QINTR_CNT_EN;
1427 		iq->intr_pktc_idx = pktc_idx;
1428 	}
1429 	iq->qsize = roundup(qsize, 16);		/* See FW_IQ_CMD/iqsize */
1430 	iq->esize = max(esize, 16);		/* See FW_IQ_CMD/iqesize */
1431 	strlcpy(iq->lockname, name, sizeof(iq->lockname));
1432 }
1433 
1434 static inline void
1435 init_fl(struct sge_fl *fl, int qsize, int bufsize, char *name)
1436 {
1437 	fl->qsize = qsize;
1438 	strlcpy(fl->lockname, name, sizeof(fl->lockname));
1439 	set_fl_tag_idx(fl, bufsize);
1440 }
1441 
1442 static inline void
1443 init_eq(struct sge_eq *eq, int eqtype, int qsize, uint8_t tx_chan,
1444     uint16_t iqid, char *name)
1445 {
1446 	KASSERT(tx_chan < NCHAN, ("%s: bad tx channel %d", __func__, tx_chan));
1447 	KASSERT(eqtype <= EQ_TYPEMASK, ("%s: bad qtype %d", __func__, eqtype));
1448 
1449 	eq->flags = eqtype & EQ_TYPEMASK;
1450 	eq->tx_chan = tx_chan;
1451 	eq->iqid = iqid;
1452 	eq->qsize = qsize;
1453 	strlcpy(eq->lockname, name, sizeof(eq->lockname));
1454 
1455 	TASK_INIT(&eq->tx_task, 0, t4_tx_task, eq);
1456 	callout_init(&eq->tx_callout, CALLOUT_MPSAFE);
1457 }
1458 
1459 static int
1460 alloc_ring(struct adapter *sc, size_t len, bus_dma_tag_t *tag,
1461     bus_dmamap_t *map, bus_addr_t *pa, void **va)
1462 {
1463 	int rc;
1464 
1465 	rc = bus_dma_tag_create(sc->dmat, 512, 0, BUS_SPACE_MAXADDR,
1466 	    BUS_SPACE_MAXADDR, NULL, NULL, len, 1, len, 0, NULL, NULL, tag);
1467 	if (rc != 0) {
1468 		device_printf(sc->dev, "cannot allocate DMA tag: %d\n", rc);
1469 		goto done;
1470 	}
1471 
1472 	rc = bus_dmamem_alloc(*tag, va,
1473 	    BUS_DMA_WAITOK | BUS_DMA_COHERENT | BUS_DMA_ZERO, map);
1474 	if (rc != 0) {
1475 		device_printf(sc->dev, "cannot allocate DMA memory: %d\n", rc);
1476 		goto done;
1477 	}
1478 
1479 	rc = bus_dmamap_load(*tag, *map, *va, len, oneseg_dma_callback, pa, 0);
1480 	if (rc != 0) {
1481 		device_printf(sc->dev, "cannot load DMA map: %d\n", rc);
1482 		goto done;
1483 	}
1484 done:
1485 	if (rc)
1486 		free_ring(sc, *tag, *map, *pa, *va);
1487 
1488 	return (rc);
1489 }
1490 
1491 static int
1492 free_ring(struct adapter *sc, bus_dma_tag_t tag, bus_dmamap_t map,
1493     bus_addr_t pa, void *va)
1494 {
1495 	if (pa)
1496 		bus_dmamap_unload(tag, map);
1497 	if (va)
1498 		bus_dmamem_free(tag, va, map);
1499 	if (tag)
1500 		bus_dma_tag_destroy(tag);
1501 
1502 	return (0);
1503 }
1504 
1505 /*
1506  * Allocates the ring for an ingress queue and an optional freelist.  If the
1507  * freelist is specified it will be allocated and then associated with the
1508  * ingress queue.
1509  *
1510  * Returns errno on failure.  Resources allocated up to that point may still be
1511  * allocated.  Caller is responsible for cleanup in case this function fails.
1512  *
1513  * If the ingress queue will take interrupts directly (iq->flags & IQ_INTR) then
1514  * the intr_idx specifies the vector, starting from 0.  Otherwise it specifies
1515  * the abs_id of the ingress queue to which its interrupts should be forwarded.
1516  */
1517 static int
1518 alloc_iq_fl(struct port_info *pi, struct sge_iq *iq, struct sge_fl *fl,
1519     int intr_idx, int cong)
1520 {
1521 	int rc, i, cntxt_id;
1522 	size_t len;
1523 	struct fw_iq_cmd c;
1524 	struct adapter *sc = iq->adapter;
1525 	__be32 v = 0;
1526 
1527 	len = iq->qsize * iq->esize;
1528 	rc = alloc_ring(sc, len, &iq->desc_tag, &iq->desc_map, &iq->ba,
1529 	    (void **)&iq->desc);
1530 	if (rc != 0)
1531 		return (rc);
1532 
1533 	bzero(&c, sizeof(c));
1534 	c.op_to_vfn = htobe32(V_FW_CMD_OP(FW_IQ_CMD) | F_FW_CMD_REQUEST |
1535 	    F_FW_CMD_WRITE | F_FW_CMD_EXEC | V_FW_IQ_CMD_PFN(sc->pf) |
1536 	    V_FW_IQ_CMD_VFN(0));
1537 
1538 	c.alloc_to_len16 = htobe32(F_FW_IQ_CMD_ALLOC | F_FW_IQ_CMD_IQSTART |
1539 	    FW_LEN16(c));
1540 
1541 	/* Special handling for firmware event queue */
1542 	if (iq == &sc->sge.fwq)
1543 		v |= F_FW_IQ_CMD_IQASYNCH;
1544 
1545 	if (iq->flags & IQ_INTR) {
1546 		KASSERT(intr_idx < sc->intr_count,
1547 		    ("%s: invalid direct intr_idx %d", __func__, intr_idx));
1548 	} else
1549 		v |= F_FW_IQ_CMD_IQANDST;
1550 	v |= V_FW_IQ_CMD_IQANDSTINDEX(intr_idx);
1551 
1552 	c.type_to_iqandstindex = htobe32(v |
1553 	    V_FW_IQ_CMD_TYPE(FW_IQ_TYPE_FL_INT_CAP) |
1554 	    V_FW_IQ_CMD_VIID(pi->viid) |
1555 	    V_FW_IQ_CMD_IQANUD(X_UPDATEDELIVERY_INTERRUPT));
1556 	c.iqdroprss_to_iqesize = htobe16(V_FW_IQ_CMD_IQPCIECH(pi->tx_chan) |
1557 	    F_FW_IQ_CMD_IQGTSMODE |
1558 	    V_FW_IQ_CMD_IQINTCNTTHRESH(iq->intr_pktc_idx) |
1559 	    V_FW_IQ_CMD_IQESIZE(ilog2(iq->esize) - 4));
1560 	c.iqsize = htobe16(iq->qsize);
1561 	c.iqaddr = htobe64(iq->ba);
1562 	if (cong >= 0)
1563 		c.iqns_to_fl0congen = htobe32(F_FW_IQ_CMD_IQFLINTCONGEN);
1564 
1565 	if (fl) {
1566 		mtx_init(&fl->fl_lock, fl->lockname, NULL, MTX_DEF);
1567 
1568 		for (i = 0; i < FL_BUF_SIZES; i++) {
1569 
1570 			/*
1571 			 * A freelist buffer must be 16 byte aligned as the SGE
1572 			 * uses the low 4 bits of the bus addr to figure out the
1573 			 * buffer size.
1574 			 */
1575 			rc = bus_dma_tag_create(sc->dmat, 16, 0,
1576 			    BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL,
1577 			    FL_BUF_SIZE(i), 1, FL_BUF_SIZE(i), BUS_DMA_ALLOCNOW,
1578 			    NULL, NULL, &fl->tag[i]);
1579 			if (rc != 0) {
1580 				device_printf(sc->dev,
1581 				    "failed to create fl DMA tag[%d]: %d\n",
1582 				    i, rc);
1583 				return (rc);
1584 			}
1585 		}
1586 		len = fl->qsize * RX_FL_ESIZE;
1587 		rc = alloc_ring(sc, len, &fl->desc_tag, &fl->desc_map,
1588 		    &fl->ba, (void **)&fl->desc);
1589 		if (rc)
1590 			return (rc);
1591 
1592 		/* Allocate space for one software descriptor per buffer. */
1593 		fl->cap = (fl->qsize - SPG_LEN / RX_FL_ESIZE) * 8;
1594 		FL_LOCK(fl);
1595 		rc = alloc_fl_sdesc(fl);
1596 		FL_UNLOCK(fl);
1597 		if (rc != 0) {
1598 			device_printf(sc->dev,
1599 			    "failed to setup fl software descriptors: %d\n",
1600 			    rc);
1601 			return (rc);
1602 		}
1603 		fl->needed = fl->cap;
1604 		fl->lowat = roundup(sc->sge.fl_starve_threshold, 8);
1605 
1606 		c.iqns_to_fl0congen |=
1607 		    htobe32(V_FW_IQ_CMD_FL0HOSTFCMODE(X_HOSTFCMODE_NONE) |
1608 			F_FW_IQ_CMD_FL0FETCHRO | F_FW_IQ_CMD_FL0DATARO |
1609 			F_FW_IQ_CMD_FL0PADEN);
1610 		if (cong >= 0) {
1611 			c.iqns_to_fl0congen |=
1612 				htobe32(V_FW_IQ_CMD_FL0CNGCHMAP(cong) |
1613 				    F_FW_IQ_CMD_FL0CONGCIF |
1614 				    F_FW_IQ_CMD_FL0CONGEN);
1615 		}
1616 		c.fl0dcaen_to_fl0cidxfthresh =
1617 		    htobe16(V_FW_IQ_CMD_FL0FBMIN(X_FETCHBURSTMIN_64B) |
1618 			V_FW_IQ_CMD_FL0FBMAX(X_FETCHBURSTMAX_512B));
1619 		c.fl0size = htobe16(fl->qsize);
1620 		c.fl0addr = htobe64(fl->ba);
1621 	}
1622 
1623 	rc = -t4_wr_mbox(sc, sc->mbox, &c, sizeof(c), &c);
1624 	if (rc != 0) {
1625 		device_printf(sc->dev,
1626 		    "failed to create ingress queue: %d\n", rc);
1627 		return (rc);
1628 	}
1629 
1630 	iq->cdesc = iq->desc;
1631 	iq->cidx = 0;
1632 	iq->gen = 1;
1633 	iq->intr_next = iq->intr_params;
1634 	iq->cntxt_id = be16toh(c.iqid);
1635 	iq->abs_id = be16toh(c.physiqid);
1636 	iq->flags |= IQ_ALLOCATED;
1637 
1638 	cntxt_id = iq->cntxt_id - sc->sge.iq_start;
1639 	if (cntxt_id >= sc->sge.niq) {
1640 		panic ("%s: iq->cntxt_id (%d) more than the max (%d)", __func__,
1641 		    cntxt_id, sc->sge.niq - 1);
1642 	}
1643 	sc->sge.iqmap[cntxt_id] = iq;
1644 
1645 	if (fl) {
1646 		fl->cntxt_id = be16toh(c.fl0id);
1647 		fl->pidx = fl->cidx = 0;
1648 
1649 		cntxt_id = fl->cntxt_id - sc->sge.eq_start;
1650 		if (cntxt_id >= sc->sge.neq) {
1651 			panic("%s: fl->cntxt_id (%d) more than the max (%d)",
1652 			    __func__, cntxt_id, sc->sge.neq - 1);
1653 		}
1654 		sc->sge.eqmap[cntxt_id] = (void *)fl;
1655 
1656 		FL_LOCK(fl);
1657 		/* Enough to make sure the SGE doesn't think it's starved */
1658 		refill_fl(sc, fl, fl->lowat);
1659 		FL_UNLOCK(fl);
1660 
1661 		iq->flags |= IQ_HAS_FL;
1662 	}
1663 
1664 	/* Enable IQ interrupts */
1665 	atomic_store_rel_int(&iq->state, IQS_IDLE);
1666 	t4_write_reg(sc, MYPF_REG(A_SGE_PF_GTS), V_SEINTARM(iq->intr_params) |
1667 	    V_INGRESSQID(iq->cntxt_id));
1668 
1669 	return (0);
1670 }
1671 
1672 static int
1673 free_iq_fl(struct port_info *pi, struct sge_iq *iq, struct sge_fl *fl)
1674 {
1675 	int i, rc;
1676 	struct adapter *sc = iq->adapter;
1677 	device_t dev;
1678 
1679 	if (sc == NULL)
1680 		return (0);	/* nothing to do */
1681 
1682 	dev = pi ? pi->dev : sc->dev;
1683 
1684 	if (iq->flags & IQ_ALLOCATED) {
1685 		rc = -t4_iq_free(sc, sc->mbox, sc->pf, 0,
1686 		    FW_IQ_TYPE_FL_INT_CAP, iq->cntxt_id,
1687 		    fl ? fl->cntxt_id : 0xffff, 0xffff);
1688 		if (rc != 0) {
1689 			device_printf(dev,
1690 			    "failed to free queue %p: %d\n", iq, rc);
1691 			return (rc);
1692 		}
1693 		iq->flags &= ~IQ_ALLOCATED;
1694 	}
1695 
1696 	free_ring(sc, iq->desc_tag, iq->desc_map, iq->ba, iq->desc);
1697 
1698 	bzero(iq, sizeof(*iq));
1699 
1700 	if (fl) {
1701 		free_ring(sc, fl->desc_tag, fl->desc_map, fl->ba,
1702 		    fl->desc);
1703 
1704 		if (fl->sdesc) {
1705 			FL_LOCK(fl);
1706 			free_fl_sdesc(fl);
1707 			FL_UNLOCK(fl);
1708 		}
1709 
1710 		if (mtx_initialized(&fl->fl_lock))
1711 			mtx_destroy(&fl->fl_lock);
1712 
1713 		for (i = 0; i < FL_BUF_SIZES; i++) {
1714 			if (fl->tag[i])
1715 				bus_dma_tag_destroy(fl->tag[i]);
1716 		}
1717 
1718 		bzero(fl, sizeof(*fl));
1719 	}
1720 
1721 	return (0);
1722 }
1723 
1724 static int
1725 alloc_fwq(struct adapter *sc)
1726 {
1727 	int rc, intr_idx;
1728 	struct sge_iq *fwq = &sc->sge.fwq;
1729 	char name[16];
1730 	struct sysctl_oid *oid = device_get_sysctl_tree(sc->dev);
1731 	struct sysctl_oid_list *children = SYSCTL_CHILDREN(oid);
1732 
1733 	snprintf(name, sizeof(name), "%s fwq", device_get_nameunit(sc->dev));
1734 	init_iq(fwq, sc, 0, 0, FW_IQ_QSIZE, FW_IQ_ESIZE, name);
1735 	fwq->flags |= IQ_INTR;	/* always */
1736 	intr_idx = sc->intr_count > 1 ? 1 : 0;
1737 	rc = alloc_iq_fl(sc->port[0], fwq, NULL, intr_idx, -1);
1738 	if (rc != 0) {
1739 		device_printf(sc->dev,
1740 		    "failed to create firmware event queue: %d\n", rc);
1741 		return (rc);
1742 	}
1743 
1744 	oid = SYSCTL_ADD_NODE(&sc->ctx, children, OID_AUTO, "fwq", CTLFLAG_RD,
1745 	    NULL, "firmware event queue");
1746 	children = SYSCTL_CHILDREN(oid);
1747 
1748 	SYSCTL_ADD_PROC(&sc->ctx, children, OID_AUTO, "abs_id",
1749 	    CTLTYPE_INT | CTLFLAG_RD, &fwq->abs_id, 0, sysctl_uint16, "I",
1750 	    "absolute id of the queue");
1751 	SYSCTL_ADD_PROC(&sc->ctx, children, OID_AUTO, "cntxt_id",
1752 	    CTLTYPE_INT | CTLFLAG_RD, &fwq->cntxt_id, 0, sysctl_uint16, "I",
1753 	    "SGE context id of the queue");
1754 	SYSCTL_ADD_PROC(&sc->ctx, children, OID_AUTO, "cidx",
1755 	    CTLTYPE_INT | CTLFLAG_RD, &fwq->cidx, 0, sysctl_uint16, "I",
1756 	    "consumer index");
1757 
1758 	return (0);
1759 }
1760 
1761 static int
1762 free_fwq(struct adapter *sc)
1763 {
1764 	return free_iq_fl(NULL, &sc->sge.fwq, NULL);
1765 }
1766 
1767 static int
1768 alloc_mgmtq(struct adapter *sc)
1769 {
1770 	int rc;
1771 	struct sge_wrq *mgmtq = &sc->sge.mgmtq;
1772 	char name[16];
1773 	struct sysctl_oid *oid = device_get_sysctl_tree(sc->dev);
1774 	struct sysctl_oid_list *children = SYSCTL_CHILDREN(oid);
1775 
1776 	oid = SYSCTL_ADD_NODE(&sc->ctx, children, OID_AUTO, "mgmtq", CTLFLAG_RD,
1777 	    NULL, "management queue");
1778 
1779 	snprintf(name, sizeof(name), "%s mgmtq", device_get_nameunit(sc->dev));
1780 	init_eq(&mgmtq->eq, EQ_CTRL, CTRL_EQ_QSIZE, sc->port[0]->tx_chan,
1781 	    sc->sge.fwq.cntxt_id, name);
1782 	rc = alloc_wrq(sc, NULL, mgmtq, oid);
1783 	if (rc != 0) {
1784 		device_printf(sc->dev,
1785 		    "failed to create management queue: %d\n", rc);
1786 		return (rc);
1787 	}
1788 
1789 	return (0);
1790 }
1791 
1792 static int
1793 free_mgmtq(struct adapter *sc)
1794 {
1795 	return free_wrq(sc, &sc->sge.mgmtq);
1796 }
1797 
1798 static int
1799 alloc_rxq(struct port_info *pi, struct sge_rxq *rxq, int intr_idx, int idx,
1800     struct sysctl_oid *oid)
1801 {
1802 	int rc;
1803 	struct sysctl_oid_list *children;
1804 	char name[16];
1805 
1806 	rc = alloc_iq_fl(pi, &rxq->iq, &rxq->fl, intr_idx, 1 << pi->tx_chan);
1807 	if (rc != 0)
1808 		return (rc);
1809 
1810 	FL_LOCK(&rxq->fl);
1811 	refill_fl(pi->adapter, &rxq->fl, rxq->fl.needed / 8);
1812 	FL_UNLOCK(&rxq->fl);
1813 
1814 #ifdef INET
1815 	rc = tcp_lro_init(&rxq->lro);
1816 	if (rc != 0)
1817 		return (rc);
1818 	rxq->lro.ifp = pi->ifp; /* also indicates LRO init'ed */
1819 
1820 	if (pi->ifp->if_capenable & IFCAP_LRO)
1821 		rxq->iq.flags |= IQ_LRO_ENABLED;
1822 #endif
1823 	rxq->ifp = pi->ifp;
1824 
1825 	children = SYSCTL_CHILDREN(oid);
1826 
1827 	snprintf(name, sizeof(name), "%d", idx);
1828 	oid = SYSCTL_ADD_NODE(&pi->ctx, children, OID_AUTO, name, CTLFLAG_RD,
1829 	    NULL, "rx queue");
1830 	children = SYSCTL_CHILDREN(oid);
1831 
1832 	SYSCTL_ADD_PROC(&pi->ctx, children, OID_AUTO, "abs_id",
1833 	    CTLTYPE_INT | CTLFLAG_RD, &rxq->iq.abs_id, 0, sysctl_uint16, "I",
1834 	    "absolute id of the queue");
1835 	SYSCTL_ADD_PROC(&pi->ctx, children, OID_AUTO, "cntxt_id",
1836 	    CTLTYPE_INT | CTLFLAG_RD, &rxq->iq.cntxt_id, 0, sysctl_uint16, "I",
1837 	    "SGE context id of the queue");
1838 	SYSCTL_ADD_PROC(&pi->ctx, children, OID_AUTO, "cidx",
1839 	    CTLTYPE_INT | CTLFLAG_RD, &rxq->iq.cidx, 0, sysctl_uint16, "I",
1840 	    "consumer index");
1841 #ifdef INET
1842 	SYSCTL_ADD_INT(&pi->ctx, children, OID_AUTO, "lro_queued", CTLFLAG_RD,
1843 	    &rxq->lro.lro_queued, 0, NULL);
1844 	SYSCTL_ADD_INT(&pi->ctx, children, OID_AUTO, "lro_flushed", CTLFLAG_RD,
1845 	    &rxq->lro.lro_flushed, 0, NULL);
1846 #endif
1847 	SYSCTL_ADD_UQUAD(&pi->ctx, children, OID_AUTO, "rxcsum", CTLFLAG_RD,
1848 	    &rxq->rxcsum, "# of times hardware assisted with checksum");
1849 	SYSCTL_ADD_UQUAD(&pi->ctx, children, OID_AUTO, "vlan_extraction",
1850 	    CTLFLAG_RD, &rxq->vlan_extraction,
1851 	    "# of times hardware extracted 802.1Q tag");
1852 
1853 	children = SYSCTL_CHILDREN(oid);
1854 	oid = SYSCTL_ADD_NODE(&pi->ctx, children, OID_AUTO, "fl", CTLFLAG_RD,
1855 	    NULL, "freelist");
1856 	children = SYSCTL_CHILDREN(oid);
1857 
1858 	SYSCTL_ADD_PROC(&pi->ctx, children, OID_AUTO, "cntxt_id",
1859 	    CTLTYPE_INT | CTLFLAG_RD, &rxq->fl.cntxt_id, 0, sysctl_uint16, "I",
1860 	    "SGE context id of the queue");
1861 	SYSCTL_ADD_UINT(&pi->ctx, children, OID_AUTO, "cidx", CTLFLAG_RD,
1862 	    &rxq->fl.cidx, 0, "consumer index");
1863 	SYSCTL_ADD_UINT(&pi->ctx, children, OID_AUTO, "pidx", CTLFLAG_RD,
1864 	    &rxq->fl.pidx, 0, "producer index");
1865 
1866 	return (rc);
1867 }
1868 
1869 static int
1870 free_rxq(struct port_info *pi, struct sge_rxq *rxq)
1871 {
1872 	int rc;
1873 
1874 #ifdef INET
1875 	if (rxq->lro.ifp) {
1876 		tcp_lro_free(&rxq->lro);
1877 		rxq->lro.ifp = NULL;
1878 	}
1879 #endif
1880 
1881 	rc = free_iq_fl(pi, &rxq->iq, &rxq->fl);
1882 	if (rc == 0)
1883 		bzero(rxq, sizeof(*rxq));
1884 
1885 	return (rc);
1886 }
1887 
1888 #ifndef TCP_OFFLOAD_DISABLE
1889 static int
1890 alloc_ofld_rxq(struct port_info *pi, struct sge_ofld_rxq *ofld_rxq,
1891     int intr_idx, int idx, struct sysctl_oid *oid)
1892 {
1893 	int rc;
1894 	struct sysctl_oid_list *children;
1895 	char name[16];
1896 
1897 	rc = alloc_iq_fl(pi, &ofld_rxq->iq, &ofld_rxq->fl, intr_idx,
1898 	    1 << pi->tx_chan);
1899 	if (rc != 0)
1900 		return (rc);
1901 
1902 	children = SYSCTL_CHILDREN(oid);
1903 
1904 	snprintf(name, sizeof(name), "%d", idx);
1905 	oid = SYSCTL_ADD_NODE(&pi->ctx, children, OID_AUTO, name, CTLFLAG_RD,
1906 	    NULL, "rx queue");
1907 	children = SYSCTL_CHILDREN(oid);
1908 
1909 	SYSCTL_ADD_PROC(&pi->ctx, children, OID_AUTO, "abs_id",
1910 	    CTLTYPE_INT | CTLFLAG_RD, &ofld_rxq->iq.abs_id, 0, sysctl_uint16,
1911 	    "I", "absolute id of the queue");
1912 	SYSCTL_ADD_PROC(&pi->ctx, children, OID_AUTO, "cntxt_id",
1913 	    CTLTYPE_INT | CTLFLAG_RD, &ofld_rxq->iq.cntxt_id, 0, sysctl_uint16,
1914 	    "I", "SGE context id of the queue");
1915 	SYSCTL_ADD_PROC(&pi->ctx, children, OID_AUTO, "cidx",
1916 	    CTLTYPE_INT | CTLFLAG_RD, &ofld_rxq->iq.cidx, 0, sysctl_uint16, "I",
1917 	    "consumer index");
1918 
1919 	children = SYSCTL_CHILDREN(oid);
1920 	oid = SYSCTL_ADD_NODE(&pi->ctx, children, OID_AUTO, "fl", CTLFLAG_RD,
1921 	    NULL, "freelist");
1922 	children = SYSCTL_CHILDREN(oid);
1923 
1924 	SYSCTL_ADD_PROC(&pi->ctx, children, OID_AUTO, "cntxt_id",
1925 	    CTLTYPE_INT | CTLFLAG_RD, &ofld_rxq->fl.cntxt_id, 0, sysctl_uint16,
1926 	    "I", "SGE context id of the queue");
1927 	SYSCTL_ADD_UINT(&pi->ctx, children, OID_AUTO, "cidx", CTLFLAG_RD,
1928 	    &ofld_rxq->fl.cidx, 0, "consumer index");
1929 	SYSCTL_ADD_UINT(&pi->ctx, children, OID_AUTO, "pidx", CTLFLAG_RD,
1930 	    &ofld_rxq->fl.pidx, 0, "producer index");
1931 
1932 	return (rc);
1933 }
1934 
1935 static int
1936 free_ofld_rxq(struct port_info *pi, struct sge_ofld_rxq *ofld_rxq)
1937 {
1938 	int rc;
1939 
1940 	rc = free_iq_fl(pi, &ofld_rxq->iq, &ofld_rxq->fl);
1941 	if (rc == 0)
1942 		bzero(ofld_rxq, sizeof(*ofld_rxq));
1943 
1944 	return (rc);
1945 }
1946 #endif
1947 
1948 static int
1949 ctrl_eq_alloc(struct adapter *sc, struct sge_eq *eq)
1950 {
1951 	int rc, cntxt_id;
1952 	struct fw_eq_ctrl_cmd c;
1953 
1954 	bzero(&c, sizeof(c));
1955 
1956 	c.op_to_vfn = htobe32(V_FW_CMD_OP(FW_EQ_CTRL_CMD) | F_FW_CMD_REQUEST |
1957 	    F_FW_CMD_WRITE | F_FW_CMD_EXEC | V_FW_EQ_CTRL_CMD_PFN(sc->pf) |
1958 	    V_FW_EQ_CTRL_CMD_VFN(0));
1959 	c.alloc_to_len16 = htobe32(F_FW_EQ_CTRL_CMD_ALLOC |
1960 	    F_FW_EQ_CTRL_CMD_EQSTART | FW_LEN16(c));
1961 	c.cmpliqid_eqid = htonl(V_FW_EQ_CTRL_CMD_CMPLIQID(eq->iqid)); /* XXX */
1962 	c.physeqid_pkd = htobe32(0);
1963 	c.fetchszm_to_iqid =
1964 	    htobe32(V_FW_EQ_CTRL_CMD_HOSTFCMODE(X_HOSTFCMODE_STATUS_PAGE) |
1965 		V_FW_EQ_CTRL_CMD_PCIECHN(eq->tx_chan) |
1966 		F_FW_EQ_CTRL_CMD_FETCHRO | V_FW_EQ_CTRL_CMD_IQID(eq->iqid));
1967 	c.dcaen_to_eqsize =
1968 	    htobe32(V_FW_EQ_CTRL_CMD_FBMIN(X_FETCHBURSTMIN_64B) |
1969 		V_FW_EQ_CTRL_CMD_FBMAX(X_FETCHBURSTMAX_512B) |
1970 		V_FW_EQ_CTRL_CMD_CIDXFTHRESH(X_CIDXFLUSHTHRESH_32) |
1971 		V_FW_EQ_CTRL_CMD_EQSIZE(eq->qsize));
1972 	c.eqaddr = htobe64(eq->ba);
1973 
1974 	rc = -t4_wr_mbox(sc, sc->mbox, &c, sizeof(c), &c);
1975 	if (rc != 0) {
1976 		device_printf(sc->dev,
1977 		    "failed to create control queue %d: %d\n", eq->tx_chan, rc);
1978 		return (rc);
1979 	}
1980 	eq->flags |= EQ_ALLOCATED;
1981 
1982 	eq->cntxt_id = G_FW_EQ_CTRL_CMD_EQID(be32toh(c.cmpliqid_eqid));
1983 	cntxt_id = eq->cntxt_id - sc->sge.eq_start;
1984 	if (cntxt_id >= sc->sge.neq)
1985 	    panic("%s: eq->cntxt_id (%d) more than the max (%d)", __func__,
1986 		cntxt_id, sc->sge.neq - 1);
1987 	sc->sge.eqmap[cntxt_id] = eq;
1988 
1989 	return (rc);
1990 }
1991 
1992 static int
1993 eth_eq_alloc(struct adapter *sc, struct port_info *pi, struct sge_eq *eq)
1994 {
1995 	int rc, cntxt_id;
1996 	struct fw_eq_eth_cmd c;
1997 
1998 	bzero(&c, sizeof(c));
1999 
2000 	c.op_to_vfn = htobe32(V_FW_CMD_OP(FW_EQ_ETH_CMD) | F_FW_CMD_REQUEST |
2001 	    F_FW_CMD_WRITE | F_FW_CMD_EXEC | V_FW_EQ_ETH_CMD_PFN(sc->pf) |
2002 	    V_FW_EQ_ETH_CMD_VFN(0));
2003 	c.alloc_to_len16 = htobe32(F_FW_EQ_ETH_CMD_ALLOC |
2004 	    F_FW_EQ_ETH_CMD_EQSTART | FW_LEN16(c));
2005 	c.viid_pkd = htobe32(V_FW_EQ_ETH_CMD_VIID(pi->viid));
2006 	c.fetchszm_to_iqid =
2007 	    htobe32(V_FW_EQ_ETH_CMD_HOSTFCMODE(X_HOSTFCMODE_STATUS_PAGE) |
2008 		V_FW_EQ_ETH_CMD_PCIECHN(eq->tx_chan) | F_FW_EQ_ETH_CMD_FETCHRO |
2009 		V_FW_EQ_ETH_CMD_IQID(eq->iqid));
2010 	c.dcaen_to_eqsize = htobe32(V_FW_EQ_ETH_CMD_FBMIN(X_FETCHBURSTMIN_64B) |
2011 		      V_FW_EQ_ETH_CMD_FBMAX(X_FETCHBURSTMAX_512B) |
2012 		      V_FW_EQ_ETH_CMD_CIDXFTHRESH(X_CIDXFLUSHTHRESH_32) |
2013 		      V_FW_EQ_ETH_CMD_EQSIZE(eq->qsize));
2014 	c.eqaddr = htobe64(eq->ba);
2015 
2016 	rc = -t4_wr_mbox(sc, sc->mbox, &c, sizeof(c), &c);
2017 	if (rc != 0) {
2018 		device_printf(pi->dev,
2019 		    "failed to create Ethernet egress queue: %d\n", rc);
2020 		return (rc);
2021 	}
2022 	eq->flags |= EQ_ALLOCATED;
2023 
2024 	eq->cntxt_id = G_FW_EQ_ETH_CMD_EQID(be32toh(c.eqid_pkd));
2025 	cntxt_id = eq->cntxt_id - sc->sge.eq_start;
2026 	if (cntxt_id >= sc->sge.neq)
2027 	    panic("%s: eq->cntxt_id (%d) more than the max (%d)", __func__,
2028 		cntxt_id, sc->sge.neq - 1);
2029 	sc->sge.eqmap[cntxt_id] = eq;
2030 
2031 	return (rc);
2032 }
2033 
2034 #ifndef TCP_OFFLOAD_DISABLE
2035 static int
2036 ofld_eq_alloc(struct adapter *sc, struct port_info *pi, struct sge_eq *eq)
2037 {
2038 	int rc, cntxt_id;
2039 	struct fw_eq_ofld_cmd c;
2040 
2041 	bzero(&c, sizeof(c));
2042 
2043 	c.op_to_vfn = htonl(V_FW_CMD_OP(FW_EQ_OFLD_CMD) | F_FW_CMD_REQUEST |
2044 	    F_FW_CMD_WRITE | F_FW_CMD_EXEC | V_FW_EQ_OFLD_CMD_PFN(sc->pf) |
2045 	    V_FW_EQ_OFLD_CMD_VFN(0));
2046 	c.alloc_to_len16 = htonl(F_FW_EQ_OFLD_CMD_ALLOC |
2047 	    F_FW_EQ_OFLD_CMD_EQSTART | FW_LEN16(c));
2048 	c.fetchszm_to_iqid =
2049 		htonl(V_FW_EQ_OFLD_CMD_HOSTFCMODE(X_HOSTFCMODE_STATUS_PAGE) |
2050 		    V_FW_EQ_OFLD_CMD_PCIECHN(eq->tx_chan) |
2051 		    F_FW_EQ_OFLD_CMD_FETCHRO | V_FW_EQ_OFLD_CMD_IQID(eq->iqid));
2052 	c.dcaen_to_eqsize =
2053 	    htobe32(V_FW_EQ_OFLD_CMD_FBMIN(X_FETCHBURSTMIN_64B) |
2054 		V_FW_EQ_OFLD_CMD_FBMAX(X_FETCHBURSTMAX_512B) |
2055 		V_FW_EQ_OFLD_CMD_CIDXFTHRESH(X_CIDXFLUSHTHRESH_32) |
2056 		V_FW_EQ_OFLD_CMD_EQSIZE(eq->qsize));
2057 	c.eqaddr = htobe64(eq->ba);
2058 
2059 	rc = -t4_wr_mbox(sc, sc->mbox, &c, sizeof(c), &c);
2060 	if (rc != 0) {
2061 		device_printf(pi->dev,
2062 		    "failed to create egress queue for TCP offload: %d\n", rc);
2063 		return (rc);
2064 	}
2065 	eq->flags |= EQ_ALLOCATED;
2066 
2067 	eq->cntxt_id = G_FW_EQ_OFLD_CMD_EQID(be32toh(c.eqid_pkd));
2068 	cntxt_id = eq->cntxt_id - sc->sge.eq_start;
2069 	if (cntxt_id >= sc->sge.neq)
2070 	    panic("%s: eq->cntxt_id (%d) more than the max (%d)", __func__,
2071 		cntxt_id, sc->sge.neq - 1);
2072 	sc->sge.eqmap[cntxt_id] = eq;
2073 
2074 	return (rc);
2075 }
2076 #endif
2077 
2078 static int
2079 alloc_eq(struct adapter *sc, struct port_info *pi, struct sge_eq *eq)
2080 {
2081 	int rc;
2082 	size_t len;
2083 
2084 	mtx_init(&eq->eq_lock, eq->lockname, NULL, MTX_DEF);
2085 
2086 	len = eq->qsize * EQ_ESIZE;
2087 	rc = alloc_ring(sc, len, &eq->desc_tag, &eq->desc_map,
2088 	    &eq->ba, (void **)&eq->desc);
2089 	if (rc)
2090 		return (rc);
2091 
2092 	eq->cap = eq->qsize - SPG_LEN / EQ_ESIZE;
2093 	eq->spg = (void *)&eq->desc[eq->cap];
2094 	eq->avail = eq->cap - 1;	/* one less to avoid cidx = pidx */
2095 	eq->pidx = eq->cidx = 0;
2096 
2097 	switch (eq->flags & EQ_TYPEMASK) {
2098 	case EQ_CTRL:
2099 		rc = ctrl_eq_alloc(sc, eq);
2100 		break;
2101 
2102 	case EQ_ETH:
2103 		rc = eth_eq_alloc(sc, pi, eq);
2104 		break;
2105 
2106 #ifndef TCP_OFFLOAD_DISABLE
2107 	case EQ_OFLD:
2108 		rc = ofld_eq_alloc(sc, pi, eq);
2109 		break;
2110 #endif
2111 
2112 	default:
2113 		panic("%s: invalid eq type %d.", __func__,
2114 		    eq->flags & EQ_TYPEMASK);
2115 	}
2116 	if (rc != 0) {
2117 		device_printf(sc->dev,
2118 		    "failed to allocate egress queue(%d): %d",
2119 		    eq->flags & EQ_TYPEMASK, rc);
2120 	}
2121 
2122 	eq->tx_callout.c_cpu = eq->cntxt_id % mp_ncpus;
2123 
2124 	return (rc);
2125 }
2126 
2127 static int
2128 free_eq(struct adapter *sc, struct sge_eq *eq)
2129 {
2130 	int rc;
2131 
2132 	if (eq->flags & EQ_ALLOCATED) {
2133 		switch (eq->flags & EQ_TYPEMASK) {
2134 		case EQ_CTRL:
2135 			rc = -t4_ctrl_eq_free(sc, sc->mbox, sc->pf, 0,
2136 			    eq->cntxt_id);
2137 			break;
2138 
2139 		case EQ_ETH:
2140 			rc = -t4_eth_eq_free(sc, sc->mbox, sc->pf, 0,
2141 			    eq->cntxt_id);
2142 			break;
2143 
2144 #ifndef TCP_OFFLOAD_DISABLE
2145 		case EQ_OFLD:
2146 			rc = -t4_ofld_eq_free(sc, sc->mbox, sc->pf, 0,
2147 			    eq->cntxt_id);
2148 			break;
2149 #endif
2150 
2151 		default:
2152 			panic("%s: invalid eq type %d.", __func__,
2153 			    eq->flags & EQ_TYPEMASK);
2154 		}
2155 		if (rc != 0) {
2156 			device_printf(sc->dev,
2157 			    "failed to free egress queue (%d): %d\n",
2158 			    eq->flags & EQ_TYPEMASK, rc);
2159 			return (rc);
2160 		}
2161 		eq->flags &= ~EQ_ALLOCATED;
2162 	}
2163 
2164 	free_ring(sc, eq->desc_tag, eq->desc_map, eq->ba, eq->desc);
2165 
2166 	if (mtx_initialized(&eq->eq_lock))
2167 		mtx_destroy(&eq->eq_lock);
2168 
2169 	bzero(eq, sizeof(*eq));
2170 	return (0);
2171 }
2172 
2173 static int
2174 alloc_wrq(struct adapter *sc, struct port_info *pi, struct sge_wrq *wrq,
2175     struct sysctl_oid *oid)
2176 {
2177 	int rc;
2178 	struct sysctl_ctx_list *ctx = pi ? &pi->ctx : &sc->ctx;
2179 	struct sysctl_oid_list *children = SYSCTL_CHILDREN(oid);
2180 
2181 	rc = alloc_eq(sc, pi, &wrq->eq);
2182 	if (rc)
2183 		return (rc);
2184 
2185 	wrq->adapter = sc;
2186 
2187 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "cntxt_id", CTLFLAG_RD,
2188 	    &wrq->eq.cntxt_id, 0, "SGE context id of the queue");
2189 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cidx",
2190 	    CTLTYPE_INT | CTLFLAG_RD, &wrq->eq.cidx, 0, sysctl_uint16, "I",
2191 	    "consumer index");
2192 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "pidx",
2193 	    CTLTYPE_INT | CTLFLAG_RD, &wrq->eq.pidx, 0, sysctl_uint16, "I",
2194 	    "producer index");
2195 	SYSCTL_ADD_UQUAD(ctx, children, OID_AUTO, "tx_wrs", CTLFLAG_RD,
2196 	    &wrq->tx_wrs, "# of work requests");
2197 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "no_desc", CTLFLAG_RD,
2198 	    &wrq->no_desc, 0,
2199 	    "# of times queue ran out of hardware descriptors");
2200 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "unstalled", CTLFLAG_RD,
2201 	    &wrq->eq.unstalled, 0, "# of times queue recovered after stall");
2202 
2203 
2204 	return (rc);
2205 }
2206 
2207 static int
2208 free_wrq(struct adapter *sc, struct sge_wrq *wrq)
2209 {
2210 	int rc;
2211 
2212 	rc = free_eq(sc, &wrq->eq);
2213 	if (rc)
2214 		return (rc);
2215 
2216 	bzero(wrq, sizeof(*wrq));
2217 	return (0);
2218 }
2219 
2220 static int
2221 alloc_txq(struct port_info *pi, struct sge_txq *txq, int idx,
2222     struct sysctl_oid *oid)
2223 {
2224 	int rc;
2225 	struct adapter *sc = pi->adapter;
2226 	struct sge_eq *eq = &txq->eq;
2227 	char name[16];
2228 	struct sysctl_oid_list *children = SYSCTL_CHILDREN(oid);
2229 
2230 	rc = alloc_eq(sc, pi, eq);
2231 	if (rc)
2232 		return (rc);
2233 
2234 	txq->ifp = pi->ifp;
2235 
2236 	txq->sdesc = malloc(eq->cap * sizeof(struct tx_sdesc), M_CXGBE,
2237 	    M_ZERO | M_WAITOK);
2238 	txq->br = buf_ring_alloc(eq->qsize, M_CXGBE, M_WAITOK, &eq->eq_lock);
2239 
2240 	rc = bus_dma_tag_create(sc->dmat, 1, 0, BUS_SPACE_MAXADDR,
2241 	    BUS_SPACE_MAXADDR, NULL, NULL, 64 * 1024, TX_SGL_SEGS,
2242 	    BUS_SPACE_MAXSIZE, BUS_DMA_ALLOCNOW, NULL, NULL, &txq->tx_tag);
2243 	if (rc != 0) {
2244 		device_printf(sc->dev,
2245 		    "failed to create tx DMA tag: %d\n", rc);
2246 		return (rc);
2247 	}
2248 
2249 	/*
2250 	 * We can stuff ~10 frames in an 8-descriptor txpkts WR (8 is the SGE
2251 	 * limit for any WR).  txq->no_dmamap events shouldn't occur if maps is
2252 	 * sized for the worst case.
2253 	 */
2254 	rc = t4_alloc_tx_maps(&txq->txmaps, txq->tx_tag, eq->qsize * 10 / 8,
2255 	    M_WAITOK);
2256 	if (rc != 0) {
2257 		device_printf(sc->dev, "failed to setup tx DMA maps: %d\n", rc);
2258 		return (rc);
2259 	}
2260 
2261 	snprintf(name, sizeof(name), "%d", idx);
2262 	oid = SYSCTL_ADD_NODE(&pi->ctx, children, OID_AUTO, name, CTLFLAG_RD,
2263 	    NULL, "tx queue");
2264 	children = SYSCTL_CHILDREN(oid);
2265 
2266 	SYSCTL_ADD_UINT(&pi->ctx, children, OID_AUTO, "cntxt_id", CTLFLAG_RD,
2267 	    &eq->cntxt_id, 0, "SGE context id of the queue");
2268 	SYSCTL_ADD_PROC(&pi->ctx, children, OID_AUTO, "cidx",
2269 	    CTLTYPE_INT | CTLFLAG_RD, &eq->cidx, 0, sysctl_uint16, "I",
2270 	    "consumer index");
2271 	SYSCTL_ADD_PROC(&pi->ctx, children, OID_AUTO, "pidx",
2272 	    CTLTYPE_INT | CTLFLAG_RD, &eq->pidx, 0, sysctl_uint16, "I",
2273 	    "producer index");
2274 
2275 	SYSCTL_ADD_UQUAD(&pi->ctx, children, OID_AUTO, "txcsum", CTLFLAG_RD,
2276 	    &txq->txcsum, "# of times hardware assisted with checksum");
2277 	SYSCTL_ADD_UQUAD(&pi->ctx, children, OID_AUTO, "vlan_insertion",
2278 	    CTLFLAG_RD, &txq->vlan_insertion,
2279 	    "# of times hardware inserted 802.1Q tag");
2280 	SYSCTL_ADD_UQUAD(&pi->ctx, children, OID_AUTO, "tso_wrs", CTLFLAG_RD,
2281 	    &txq->tso_wrs, "# of IPv4 TSO work requests");
2282 	SYSCTL_ADD_UQUAD(&pi->ctx, children, OID_AUTO, "imm_wrs", CTLFLAG_RD,
2283 	    &txq->imm_wrs, "# of work requests with immediate data");
2284 	SYSCTL_ADD_UQUAD(&pi->ctx, children, OID_AUTO, "sgl_wrs", CTLFLAG_RD,
2285 	    &txq->sgl_wrs, "# of work requests with direct SGL");
2286 	SYSCTL_ADD_UQUAD(&pi->ctx, children, OID_AUTO, "txpkt_wrs", CTLFLAG_RD,
2287 	    &txq->txpkt_wrs, "# of txpkt work requests (one pkt/WR)");
2288 	SYSCTL_ADD_UQUAD(&pi->ctx, children, OID_AUTO, "txpkts_wrs", CTLFLAG_RD,
2289 	    &txq->txpkts_wrs, "# of txpkts work requests (multiple pkts/WR)");
2290 	SYSCTL_ADD_UQUAD(&pi->ctx, children, OID_AUTO, "txpkts_pkts", CTLFLAG_RD,
2291 	    &txq->txpkts_pkts, "# of frames tx'd using txpkts work requests");
2292 
2293 	SYSCTL_ADD_UINT(&pi->ctx, children, OID_AUTO, "no_dmamap", CTLFLAG_RD,
2294 	    &txq->no_dmamap, 0, "# of times txq ran out of DMA maps");
2295 	SYSCTL_ADD_UINT(&pi->ctx, children, OID_AUTO, "no_desc", CTLFLAG_RD,
2296 	    &txq->no_desc, 0, "# of times txq ran out of hardware descriptors");
2297 	SYSCTL_ADD_UINT(&pi->ctx, children, OID_AUTO, "egr_update", CTLFLAG_RD,
2298 	    &eq->egr_update, 0, "egress update notifications from the SGE");
2299 	SYSCTL_ADD_UINT(&pi->ctx, children, OID_AUTO, "unstalled", CTLFLAG_RD,
2300 	    &eq->unstalled, 0, "# of times txq recovered after stall");
2301 
2302 	return (rc);
2303 }
2304 
2305 static int
2306 free_txq(struct port_info *pi, struct sge_txq *txq)
2307 {
2308 	int rc;
2309 	struct adapter *sc = pi->adapter;
2310 	struct sge_eq *eq = &txq->eq;
2311 
2312 	rc = free_eq(sc, eq);
2313 	if (rc)
2314 		return (rc);
2315 
2316 	free(txq->sdesc, M_CXGBE);
2317 
2318 	if (txq->txmaps.maps)
2319 		t4_free_tx_maps(&txq->txmaps, txq->tx_tag);
2320 
2321 	buf_ring_free(txq->br, M_CXGBE);
2322 
2323 	if (txq->tx_tag)
2324 		bus_dma_tag_destroy(txq->tx_tag);
2325 
2326 	bzero(txq, sizeof(*txq));
2327 	return (0);
2328 }
2329 
2330 static void
2331 oneseg_dma_callback(void *arg, bus_dma_segment_t *segs, int nseg, int error)
2332 {
2333 	bus_addr_t *ba = arg;
2334 
2335 	KASSERT(nseg == 1,
2336 	    ("%s meant for single segment mappings only.", __func__));
2337 
2338 	*ba = error ? 0 : segs->ds_addr;
2339 }
2340 
2341 static inline bool
2342 is_new_response(const struct sge_iq *iq, struct rsp_ctrl **ctrl)
2343 {
2344 	*ctrl = (void *)((uintptr_t)iq->cdesc +
2345 	    (iq->esize - sizeof(struct rsp_ctrl)));
2346 
2347 	return (((*ctrl)->u.type_gen >> S_RSPD_GEN) == iq->gen);
2348 }
2349 
2350 static inline void
2351 iq_next(struct sge_iq *iq)
2352 {
2353 	iq->cdesc = (void *) ((uintptr_t)iq->cdesc + iq->esize);
2354 	if (__predict_false(++iq->cidx == iq->qsize - 1)) {
2355 		iq->cidx = 0;
2356 		iq->gen ^= 1;
2357 		iq->cdesc = iq->desc;
2358 	}
2359 }
2360 
2361 #define FL_HW_IDX(x) ((x) >> 3)
2362 static inline void
2363 ring_fl_db(struct adapter *sc, struct sge_fl *fl)
2364 {
2365 	int ndesc = fl->pending / 8;
2366 
2367 	if (FL_HW_IDX(fl->pidx) == FL_HW_IDX(fl->cidx))
2368 		ndesc--;	/* hold back one credit */
2369 
2370 	if (ndesc <= 0)
2371 		return;		/* nothing to do */
2372 
2373 	wmb();
2374 
2375 	t4_write_reg(sc, MYPF_REG(A_SGE_PF_KDOORBELL), F_DBPRIO |
2376 	    V_QID(fl->cntxt_id) | V_PIDX(ndesc));
2377 	fl->pending -= ndesc * 8;
2378 }
2379 
2380 /*
2381  * Fill up the freelist by upto nbufs and maybe ring its doorbell.
2382  *
2383  * Returns non-zero to indicate that it should be added to the list of starving
2384  * freelists.
2385  */
2386 static int
2387 refill_fl(struct adapter *sc, struct sge_fl *fl, int nbufs)
2388 {
2389 	__be64 *d = &fl->desc[fl->pidx];
2390 	struct fl_sdesc *sd = &fl->sdesc[fl->pidx];
2391 	bus_dma_tag_t tag;
2392 	bus_addr_t pa;
2393 	caddr_t cl;
2394 	int rc;
2395 
2396 	FL_LOCK_ASSERT_OWNED(fl);
2397 
2398 	if (nbufs > fl->needed)
2399 		nbufs = fl->needed;
2400 
2401 	while (nbufs--) {
2402 
2403 		if (sd->cl != NULL) {
2404 
2405 			/*
2406 			 * This happens when a frame small enough to fit
2407 			 * entirely in an mbuf was received in cl last time.
2408 			 * We'd held on to cl and can reuse it now.  Note that
2409 			 * we reuse a cluster of the old size if fl->tag_idx is
2410 			 * no longer the same as sd->tag_idx.
2411 			 */
2412 
2413 			KASSERT(*d == sd->ba_tag,
2414 			    ("%s: recyling problem at pidx %d",
2415 			    __func__, fl->pidx));
2416 
2417 			d++;
2418 			goto recycled;
2419 		}
2420 
2421 
2422 		if (fl->tag_idx != sd->tag_idx) {
2423 			bus_dmamap_t map;
2424 			bus_dma_tag_t newtag = fl->tag[fl->tag_idx];
2425 			bus_dma_tag_t oldtag = fl->tag[sd->tag_idx];
2426 
2427 			/*
2428 			 * An MTU change can get us here.  Discard the old map
2429 			 * which was created with the old tag, but only if
2430 			 * we're able to get a new one.
2431 			 */
2432 			rc = bus_dmamap_create(newtag, 0, &map);
2433 			if (rc == 0) {
2434 				bus_dmamap_destroy(oldtag, sd->map);
2435 				sd->map = map;
2436 				sd->tag_idx = fl->tag_idx;
2437 			}
2438 		}
2439 
2440 		tag = fl->tag[sd->tag_idx];
2441 
2442 		cl = m_cljget(NULL, M_NOWAIT, FL_BUF_SIZE(sd->tag_idx));
2443 		if (cl == NULL)
2444 			break;
2445 
2446 		rc = bus_dmamap_load(tag, sd->map, cl, FL_BUF_SIZE(sd->tag_idx),
2447 		    oneseg_dma_callback, &pa, 0);
2448 		if (rc != 0 || pa == 0) {
2449 			fl->dmamap_failed++;
2450 			uma_zfree(FL_BUF_ZONE(sd->tag_idx), cl);
2451 			break;
2452 		}
2453 
2454 		sd->cl = cl;
2455 		*d++ = htobe64(pa | sd->tag_idx);
2456 
2457 #ifdef INVARIANTS
2458 		sd->ba_tag = htobe64(pa | sd->tag_idx);
2459 #endif
2460 
2461 recycled:
2462 		/* sd->m is never recycled, should always be NULL */
2463 		KASSERT(sd->m == NULL, ("%s: stray mbuf", __func__));
2464 
2465 		sd->m = m_gethdr(M_NOWAIT, MT_NOINIT);
2466 		if (sd->m == NULL)
2467 			break;
2468 
2469 		fl->pending++;
2470 		fl->needed--;
2471 		sd++;
2472 		if (++fl->pidx == fl->cap) {
2473 			fl->pidx = 0;
2474 			sd = fl->sdesc;
2475 			d = fl->desc;
2476 		}
2477 	}
2478 
2479 	if (fl->pending >= 8)
2480 		ring_fl_db(sc, fl);
2481 
2482 	return (FL_RUNNING_LOW(fl) && !(fl->flags & FL_STARVING));
2483 }
2484 
2485 /*
2486  * Attempt to refill all starving freelists.
2487  */
2488 static void
2489 refill_sfl(void *arg)
2490 {
2491 	struct adapter *sc = arg;
2492 	struct sge_fl *fl, *fl_temp;
2493 
2494 	mtx_lock(&sc->sfl_lock);
2495 	TAILQ_FOREACH_SAFE(fl, &sc->sfl, link, fl_temp) {
2496 		FL_LOCK(fl);
2497 		refill_fl(sc, fl, 64);
2498 		if (FL_NOT_RUNNING_LOW(fl) || fl->flags & FL_DOOMED) {
2499 			TAILQ_REMOVE(&sc->sfl, fl, link);
2500 			fl->flags &= ~FL_STARVING;
2501 		}
2502 		FL_UNLOCK(fl);
2503 	}
2504 
2505 	if (!TAILQ_EMPTY(&sc->sfl))
2506 		callout_schedule(&sc->sfl_callout, hz / 5);
2507 	mtx_unlock(&sc->sfl_lock);
2508 }
2509 
2510 static int
2511 alloc_fl_sdesc(struct sge_fl *fl)
2512 {
2513 	struct fl_sdesc *sd;
2514 	bus_dma_tag_t tag;
2515 	int i, rc;
2516 
2517 	FL_LOCK_ASSERT_OWNED(fl);
2518 
2519 	fl->sdesc = malloc(fl->cap * sizeof(struct fl_sdesc), M_CXGBE,
2520 	    M_ZERO | M_WAITOK);
2521 
2522 	tag = fl->tag[fl->tag_idx];
2523 	sd = fl->sdesc;
2524 	for (i = 0; i < fl->cap; i++, sd++) {
2525 
2526 		sd->tag_idx = fl->tag_idx;
2527 		rc = bus_dmamap_create(tag, 0, &sd->map);
2528 		if (rc != 0)
2529 			goto failed;
2530 	}
2531 
2532 	return (0);
2533 failed:
2534 	while (--i >= 0) {
2535 		sd--;
2536 		bus_dmamap_destroy(tag, sd->map);
2537 		if (sd->m) {
2538 			m_init(sd->m, NULL, 0, M_NOWAIT, MT_DATA, 0);
2539 			m_free(sd->m);
2540 			sd->m = NULL;
2541 		}
2542 	}
2543 	KASSERT(sd == fl->sdesc, ("%s: EDOOFUS", __func__));
2544 
2545 	free(fl->sdesc, M_CXGBE);
2546 	fl->sdesc = NULL;
2547 
2548 	return (rc);
2549 }
2550 
2551 static void
2552 free_fl_sdesc(struct sge_fl *fl)
2553 {
2554 	struct fl_sdesc *sd;
2555 	int i;
2556 
2557 	FL_LOCK_ASSERT_OWNED(fl);
2558 
2559 	sd = fl->sdesc;
2560 	for (i = 0; i < fl->cap; i++, sd++) {
2561 
2562 		if (sd->m) {
2563 			m_init(sd->m, NULL, 0, M_NOWAIT, MT_DATA, 0);
2564 			m_free(sd->m);
2565 			sd->m = NULL;
2566 		}
2567 
2568 		if (sd->cl) {
2569 			bus_dmamap_unload(fl->tag[sd->tag_idx], sd->map);
2570 			uma_zfree(FL_BUF_ZONE(sd->tag_idx), sd->cl);
2571 			sd->cl = NULL;
2572 		}
2573 
2574 		bus_dmamap_destroy(fl->tag[sd->tag_idx], sd->map);
2575 	}
2576 
2577 	free(fl->sdesc, M_CXGBE);
2578 	fl->sdesc = NULL;
2579 }
2580 
2581 int
2582 t4_alloc_tx_maps(struct tx_maps *txmaps, bus_dma_tag_t tx_tag, int count,
2583     int flags)
2584 {
2585 	struct tx_map *txm;
2586 	int i, rc;
2587 
2588 	txmaps->map_total = txmaps->map_avail = count;
2589 	txmaps->map_cidx = txmaps->map_pidx = 0;
2590 
2591 	txmaps->maps = malloc(count * sizeof(struct tx_map), M_CXGBE,
2592 	    M_ZERO | flags);
2593 
2594 	txm = txmaps->maps;
2595 	for (i = 0; i < count; i++, txm++) {
2596 		rc = bus_dmamap_create(tx_tag, 0, &txm->map);
2597 		if (rc != 0)
2598 			goto failed;
2599 	}
2600 
2601 	return (0);
2602 failed:
2603 	while (--i >= 0) {
2604 		txm--;
2605 		bus_dmamap_destroy(tx_tag, txm->map);
2606 	}
2607 	KASSERT(txm == txmaps->maps, ("%s: EDOOFUS", __func__));
2608 
2609 	free(txmaps->maps, M_CXGBE);
2610 	txmaps->maps = NULL;
2611 
2612 	return (rc);
2613 }
2614 
2615 void
2616 t4_free_tx_maps(struct tx_maps *txmaps, bus_dma_tag_t tx_tag)
2617 {
2618 	struct tx_map *txm;
2619 	int i;
2620 
2621 	txm = txmaps->maps;
2622 	for (i = 0; i < txmaps->map_total; i++, txm++) {
2623 
2624 		if (txm->m) {
2625 			bus_dmamap_unload(tx_tag, txm->map);
2626 			m_freem(txm->m);
2627 			txm->m = NULL;
2628 		}
2629 
2630 		bus_dmamap_destroy(tx_tag, txm->map);
2631 	}
2632 
2633 	free(txmaps->maps, M_CXGBE);
2634 	txmaps->maps = NULL;
2635 }
2636 
2637 /*
2638  * We'll do immediate data tx for non-TSO, but only when not coalescing.  We're
2639  * willing to use upto 2 hardware descriptors which means a maximum of 96 bytes
2640  * of immediate data.
2641  */
2642 #define IMM_LEN ( \
2643       2 * EQ_ESIZE \
2644     - sizeof(struct fw_eth_tx_pkt_wr) \
2645     - sizeof(struct cpl_tx_pkt_core))
2646 
2647 /*
2648  * Returns non-zero on failure, no need to cleanup anything in that case.
2649  *
2650  * Note 1: We always try to defrag the mbuf if required and return EFBIG only
2651  * if the resulting chain still won't fit in a tx descriptor.
2652  *
2653  * Note 2: We'll pullup the mbuf chain if TSO is requested and the first mbuf
2654  * does not have the TCP header in it.
2655  */
2656 static int
2657 get_pkt_sgl(struct sge_txq *txq, struct mbuf **fp, struct sgl *sgl,
2658     int sgl_only)
2659 {
2660 	struct mbuf *m = *fp;
2661 	struct tx_maps *txmaps;
2662 	struct tx_map *txm;
2663 	int rc, defragged = 0, n;
2664 
2665 	TXQ_LOCK_ASSERT_OWNED(txq);
2666 
2667 	if (m->m_pkthdr.tso_segsz)
2668 		sgl_only = 1;	/* Do not allow immediate data with LSO */
2669 
2670 start:	sgl->nsegs = 0;
2671 
2672 	if (m->m_pkthdr.len <= IMM_LEN && !sgl_only)
2673 		return (0);	/* nsegs = 0 tells caller to use imm. tx */
2674 
2675 	txmaps = &txq->txmaps;
2676 	if (txmaps->map_avail == 0) {
2677 		txq->no_dmamap++;
2678 		return (ENOMEM);
2679 	}
2680 	txm = &txmaps->maps[txmaps->map_pidx];
2681 
2682 	if (m->m_pkthdr.tso_segsz && m->m_len < 50) {
2683 		*fp = m_pullup(m, 50);
2684 		m = *fp;
2685 		if (m == NULL)
2686 			return (ENOBUFS);
2687 	}
2688 
2689 	rc = bus_dmamap_load_mbuf_sg(txq->tx_tag, txm->map, m, sgl->seg,
2690 	    &sgl->nsegs, BUS_DMA_NOWAIT);
2691 	if (rc == EFBIG && defragged == 0) {
2692 		m = m_defrag(m, M_DONTWAIT);
2693 		if (m == NULL)
2694 			return (EFBIG);
2695 
2696 		defragged = 1;
2697 		*fp = m;
2698 		goto start;
2699 	}
2700 	if (rc != 0)
2701 		return (rc);
2702 
2703 	txm->m = m;
2704 	txmaps->map_avail--;
2705 	if (++txmaps->map_pidx == txmaps->map_total)
2706 		txmaps->map_pidx = 0;
2707 
2708 	KASSERT(sgl->nsegs > 0 && sgl->nsegs <= TX_SGL_SEGS,
2709 	    ("%s: bad DMA mapping (%d segments)", __func__, sgl->nsegs));
2710 
2711 	/*
2712 	 * Store the # of flits required to hold this frame's SGL in nflits.  An
2713 	 * SGL has a (ULPTX header + len0, addr0) tuple optionally followed by
2714 	 * multiple (len0 + len1, addr0, addr1) tuples.  If addr1 is not used
2715 	 * then len1 must be set to 0.
2716 	 */
2717 	n = sgl->nsegs - 1;
2718 	sgl->nflits = (3 * n) / 2 + (n & 1) + 2;
2719 
2720 	return (0);
2721 }
2722 
2723 
2724 /*
2725  * Releases all the txq resources used up in the specified sgl.
2726  */
2727 static int
2728 free_pkt_sgl(struct sge_txq *txq, struct sgl *sgl)
2729 {
2730 	struct tx_maps *txmaps;
2731 	struct tx_map *txm;
2732 
2733 	TXQ_LOCK_ASSERT_OWNED(txq);
2734 
2735 	if (sgl->nsegs == 0)
2736 		return (0);	/* didn't use any map */
2737 
2738 	txmaps = &txq->txmaps;
2739 
2740 	/* 1 pkt uses exactly 1 map, back it out */
2741 
2742 	txmaps->map_avail++;
2743 	if (txmaps->map_pidx > 0)
2744 		txmaps->map_pidx--;
2745 	else
2746 		txmaps->map_pidx = txmaps->map_total - 1;
2747 
2748 	txm = &txmaps->maps[txmaps->map_pidx];
2749 	bus_dmamap_unload(txq->tx_tag, txm->map);
2750 	txm->m = NULL;
2751 
2752 	return (0);
2753 }
2754 
2755 static int
2756 write_txpkt_wr(struct port_info *pi, struct sge_txq *txq, struct mbuf *m,
2757     struct sgl *sgl)
2758 {
2759 	struct sge_eq *eq = &txq->eq;
2760 	struct fw_eth_tx_pkt_wr *wr;
2761 	struct cpl_tx_pkt_core *cpl;
2762 	uint32_t ctrl;	/* used in many unrelated places */
2763 	uint64_t ctrl1;
2764 	int nflits, ndesc, pktlen;
2765 	struct tx_sdesc *txsd;
2766 	caddr_t dst;
2767 
2768 	TXQ_LOCK_ASSERT_OWNED(txq);
2769 
2770 	pktlen = m->m_pkthdr.len;
2771 
2772 	/*
2773 	 * Do we have enough flits to send this frame out?
2774 	 */
2775 	ctrl = sizeof(struct cpl_tx_pkt_core);
2776 	if (m->m_pkthdr.tso_segsz) {
2777 		nflits = TXPKT_LSO_WR_HDR;
2778 		ctrl += sizeof(struct cpl_tx_pkt_lso);
2779 	} else
2780 		nflits = TXPKT_WR_HDR;
2781 	if (sgl->nsegs > 0)
2782 		nflits += sgl->nflits;
2783 	else {
2784 		nflits += howmany(pktlen, 8);
2785 		ctrl += pktlen;
2786 	}
2787 	ndesc = howmany(nflits, 8);
2788 	if (ndesc > eq->avail)
2789 		return (ENOMEM);
2790 
2791 	/* Firmware work request header */
2792 	wr = (void *)&eq->desc[eq->pidx];
2793 	wr->op_immdlen = htobe32(V_FW_WR_OP(FW_ETH_TX_PKT_WR) |
2794 	    V_FW_ETH_TX_PKT_WR_IMMDLEN(ctrl));
2795 	ctrl = V_FW_WR_LEN16(howmany(nflits, 2));
2796 	if (eq->avail == ndesc) {
2797 		if (!(eq->flags & EQ_CRFLUSHED)) {
2798 			ctrl |= F_FW_WR_EQUEQ | F_FW_WR_EQUIQ;
2799 			eq->flags |= EQ_CRFLUSHED;
2800 		}
2801 		eq->flags |= EQ_STALLED;
2802 	}
2803 
2804 	wr->equiq_to_len16 = htobe32(ctrl);
2805 	wr->r3 = 0;
2806 
2807 	if (m->m_pkthdr.tso_segsz) {
2808 		struct cpl_tx_pkt_lso *lso = (void *)(wr + 1);
2809 		struct ether_header *eh;
2810 		struct ip *ip;
2811 		struct tcphdr *tcp;
2812 
2813 		ctrl = V_LSO_OPCODE(CPL_TX_PKT_LSO) | F_LSO_FIRST_SLICE |
2814 		    F_LSO_LAST_SLICE;
2815 
2816 		eh = mtod(m, struct ether_header *);
2817 		if (eh->ether_type == htons(ETHERTYPE_VLAN)) {
2818 			ctrl |= V_LSO_ETHHDR_LEN(1);
2819 			ip = (void *)((struct ether_vlan_header *)eh + 1);
2820 		} else
2821 			ip = (void *)(eh + 1);
2822 
2823 		tcp = (void *)((uintptr_t)ip + ip->ip_hl * 4);
2824 		ctrl |= V_LSO_IPHDR_LEN(ip->ip_hl) |
2825 		    V_LSO_TCPHDR_LEN(tcp->th_off);
2826 
2827 		lso->lso_ctrl = htobe32(ctrl);
2828 		lso->ipid_ofst = htobe16(0);
2829 		lso->mss = htobe16(m->m_pkthdr.tso_segsz);
2830 		lso->seqno_offset = htobe32(0);
2831 		lso->len = htobe32(pktlen);
2832 
2833 		cpl = (void *)(lso + 1);
2834 
2835 		txq->tso_wrs++;
2836 	} else
2837 		cpl = (void *)(wr + 1);
2838 
2839 	/* Checksum offload */
2840 	ctrl1 = 0;
2841 	if (!(m->m_pkthdr.csum_flags & CSUM_IP))
2842 		ctrl1 |= F_TXPKT_IPCSUM_DIS;
2843 	if (!(m->m_pkthdr.csum_flags & (CSUM_TCP | CSUM_UDP)))
2844 		ctrl1 |= F_TXPKT_L4CSUM_DIS;
2845 	if (m->m_pkthdr.csum_flags & (CSUM_IP | CSUM_TCP | CSUM_UDP))
2846 		txq->txcsum++;	/* some hardware assistance provided */
2847 
2848 	/* VLAN tag insertion */
2849 	if (m->m_flags & M_VLANTAG) {
2850 		ctrl1 |= F_TXPKT_VLAN_VLD | V_TXPKT_VLAN(m->m_pkthdr.ether_vtag);
2851 		txq->vlan_insertion++;
2852 	}
2853 
2854 	/* CPL header */
2855 	cpl->ctrl0 = htobe32(V_TXPKT_OPCODE(CPL_TX_PKT) |
2856 	    V_TXPKT_INTF(pi->tx_chan) | V_TXPKT_PF(pi->adapter->pf));
2857 	cpl->pack = 0;
2858 	cpl->len = htobe16(pktlen);
2859 	cpl->ctrl1 = htobe64(ctrl1);
2860 
2861 	/* Software descriptor */
2862 	txsd = &txq->sdesc[eq->pidx];
2863 	txsd->desc_used = ndesc;
2864 
2865 	eq->pending += ndesc;
2866 	eq->avail -= ndesc;
2867 	eq->pidx += ndesc;
2868 	if (eq->pidx >= eq->cap)
2869 		eq->pidx -= eq->cap;
2870 
2871 	/* SGL */
2872 	dst = (void *)(cpl + 1);
2873 	if (sgl->nsegs > 0) {
2874 		txsd->credits = 1;
2875 		txq->sgl_wrs++;
2876 		write_sgl_to_txd(eq, sgl, &dst);
2877 	} else {
2878 		txsd->credits = 0;
2879 		txq->imm_wrs++;
2880 		for (; m; m = m->m_next) {
2881 			copy_to_txd(eq, mtod(m, caddr_t), &dst, m->m_len);
2882 #ifdef INVARIANTS
2883 			pktlen -= m->m_len;
2884 #endif
2885 		}
2886 #ifdef INVARIANTS
2887 		KASSERT(pktlen == 0, ("%s: %d bytes left.", __func__, pktlen));
2888 #endif
2889 
2890 	}
2891 
2892 	txq->txpkt_wrs++;
2893 	return (0);
2894 }
2895 
2896 /*
2897  * Returns 0 to indicate that m has been accepted into a coalesced tx work
2898  * request.  It has either been folded into txpkts or txpkts was flushed and m
2899  * has started a new coalesced work request (as the first frame in a fresh
2900  * txpkts).
2901  *
2902  * Returns non-zero to indicate a failure - caller is responsible for
2903  * transmitting m, if there was anything in txpkts it has been flushed.
2904  */
2905 static int
2906 add_to_txpkts(struct port_info *pi, struct sge_txq *txq, struct txpkts *txpkts,
2907     struct mbuf *m, struct sgl *sgl)
2908 {
2909 	struct sge_eq *eq = &txq->eq;
2910 	int can_coalesce;
2911 	struct tx_sdesc *txsd;
2912 	int flits;
2913 
2914 	TXQ_LOCK_ASSERT_OWNED(txq);
2915 
2916 	KASSERT(sgl->nsegs, ("%s: can't coalesce imm data", __func__));
2917 
2918 	if (txpkts->npkt > 0) {
2919 		flits = TXPKTS_PKT_HDR + sgl->nflits;
2920 		can_coalesce = m->m_pkthdr.tso_segsz == 0 &&
2921 		    txpkts->nflits + flits <= TX_WR_FLITS &&
2922 		    txpkts->nflits + flits <= eq->avail * 8 &&
2923 		    txpkts->plen + m->m_pkthdr.len < 65536;
2924 
2925 		if (can_coalesce) {
2926 			txpkts->npkt++;
2927 			txpkts->nflits += flits;
2928 			txpkts->plen += m->m_pkthdr.len;
2929 
2930 			txsd = &txq->sdesc[eq->pidx];
2931 			txsd->credits++;
2932 
2933 			return (0);
2934 		}
2935 
2936 		/*
2937 		 * Couldn't coalesce m into txpkts.  The first order of business
2938 		 * is to send txpkts on its way.  Then we'll revisit m.
2939 		 */
2940 		write_txpkts_wr(txq, txpkts);
2941 	}
2942 
2943 	/*
2944 	 * Check if we can start a new coalesced tx work request with m as
2945 	 * the first packet in it.
2946 	 */
2947 
2948 	KASSERT(txpkts->npkt == 0, ("%s: txpkts not empty", __func__));
2949 
2950 	flits = TXPKTS_WR_HDR + sgl->nflits;
2951 	can_coalesce = m->m_pkthdr.tso_segsz == 0 &&
2952 	    flits <= eq->avail * 8 && flits <= TX_WR_FLITS;
2953 
2954 	if (can_coalesce == 0)
2955 		return (EINVAL);
2956 
2957 	/*
2958 	 * Start a fresh coalesced tx WR with m as the first frame in it.
2959 	 */
2960 	txpkts->npkt = 1;
2961 	txpkts->nflits = flits;
2962 	txpkts->flitp = &eq->desc[eq->pidx].flit[2];
2963 	txpkts->plen = m->m_pkthdr.len;
2964 
2965 	txsd = &txq->sdesc[eq->pidx];
2966 	txsd->credits = 1;
2967 
2968 	return (0);
2969 }
2970 
2971 /*
2972  * Note that write_txpkts_wr can never run out of hardware descriptors (but
2973  * write_txpkt_wr can).  add_to_txpkts ensures that a frame is accepted for
2974  * coalescing only if sufficient hardware descriptors are available.
2975  */
2976 static void
2977 write_txpkts_wr(struct sge_txq *txq, struct txpkts *txpkts)
2978 {
2979 	struct sge_eq *eq = &txq->eq;
2980 	struct fw_eth_tx_pkts_wr *wr;
2981 	struct tx_sdesc *txsd;
2982 	uint32_t ctrl;
2983 	int ndesc;
2984 
2985 	TXQ_LOCK_ASSERT_OWNED(txq);
2986 
2987 	ndesc = howmany(txpkts->nflits, 8);
2988 
2989 	wr = (void *)&eq->desc[eq->pidx];
2990 	wr->op_pkd = htobe32(V_FW_WR_OP(FW_ETH_TX_PKTS_WR));
2991 	ctrl = V_FW_WR_LEN16(howmany(txpkts->nflits, 2));
2992 	if (eq->avail == ndesc) {
2993 		if (!(eq->flags & EQ_CRFLUSHED)) {
2994 			ctrl |= F_FW_WR_EQUEQ | F_FW_WR_EQUIQ;
2995 			eq->flags |= EQ_CRFLUSHED;
2996 		}
2997 		eq->flags |= EQ_STALLED;
2998 	}
2999 	wr->equiq_to_len16 = htobe32(ctrl);
3000 	wr->plen = htobe16(txpkts->plen);
3001 	wr->npkt = txpkts->npkt;
3002 	wr->r3 = wr->type = 0;
3003 
3004 	/* Everything else already written */
3005 
3006 	txsd = &txq->sdesc[eq->pidx];
3007 	txsd->desc_used = ndesc;
3008 
3009 	KASSERT(eq->avail >= ndesc, ("%s: out of descriptors", __func__));
3010 
3011 	eq->pending += ndesc;
3012 	eq->avail -= ndesc;
3013 	eq->pidx += ndesc;
3014 	if (eq->pidx >= eq->cap)
3015 		eq->pidx -= eq->cap;
3016 
3017 	txq->txpkts_pkts += txpkts->npkt;
3018 	txq->txpkts_wrs++;
3019 	txpkts->npkt = 0;	/* emptied */
3020 }
3021 
3022 static inline void
3023 write_ulp_cpl_sgl(struct port_info *pi, struct sge_txq *txq,
3024     struct txpkts *txpkts, struct mbuf *m, struct sgl *sgl)
3025 {
3026 	struct ulp_txpkt *ulpmc;
3027 	struct ulptx_idata *ulpsc;
3028 	struct cpl_tx_pkt_core *cpl;
3029 	struct sge_eq *eq = &txq->eq;
3030 	uintptr_t flitp, start, end;
3031 	uint64_t ctrl;
3032 	caddr_t dst;
3033 
3034 	KASSERT(txpkts->npkt > 0, ("%s: txpkts is empty", __func__));
3035 
3036 	start = (uintptr_t)eq->desc;
3037 	end = (uintptr_t)eq->spg;
3038 
3039 	/* Checksum offload */
3040 	ctrl = 0;
3041 	if (!(m->m_pkthdr.csum_flags & CSUM_IP))
3042 		ctrl |= F_TXPKT_IPCSUM_DIS;
3043 	if (!(m->m_pkthdr.csum_flags & (CSUM_TCP | CSUM_UDP)))
3044 		ctrl |= F_TXPKT_L4CSUM_DIS;
3045 	if (m->m_pkthdr.csum_flags & (CSUM_IP | CSUM_TCP | CSUM_UDP))
3046 		txq->txcsum++;	/* some hardware assistance provided */
3047 
3048 	/* VLAN tag insertion */
3049 	if (m->m_flags & M_VLANTAG) {
3050 		ctrl |= F_TXPKT_VLAN_VLD | V_TXPKT_VLAN(m->m_pkthdr.ether_vtag);
3051 		txq->vlan_insertion++;
3052 	}
3053 
3054 	/*
3055 	 * The previous packet's SGL must have ended at a 16 byte boundary (this
3056 	 * is required by the firmware/hardware).  It follows that flitp cannot
3057 	 * wrap around between the ULPTX master command and ULPTX subcommand (8
3058 	 * bytes each), and that it can not wrap around in the middle of the
3059 	 * cpl_tx_pkt_core either.
3060 	 */
3061 	flitp = (uintptr_t)txpkts->flitp;
3062 	KASSERT((flitp & 0xf) == 0,
3063 	    ("%s: last SGL did not end at 16 byte boundary: %p",
3064 	    __func__, txpkts->flitp));
3065 
3066 	/* ULP master command */
3067 	ulpmc = (void *)flitp;
3068 	ulpmc->cmd_dest = htonl(V_ULPTX_CMD(ULP_TX_PKT) | V_ULP_TXPKT_DEST(0) |
3069 	    V_ULP_TXPKT_FID(eq->iqid));
3070 	ulpmc->len = htonl(howmany(sizeof(*ulpmc) + sizeof(*ulpsc) +
3071 	    sizeof(*cpl) + 8 * sgl->nflits, 16));
3072 
3073 	/* ULP subcommand */
3074 	ulpsc = (void *)(ulpmc + 1);
3075 	ulpsc->cmd_more = htobe32(V_ULPTX_CMD((u32)ULP_TX_SC_IMM) |
3076 	    F_ULP_TX_SC_MORE);
3077 	ulpsc->len = htobe32(sizeof(struct cpl_tx_pkt_core));
3078 
3079 	flitp += sizeof(*ulpmc) + sizeof(*ulpsc);
3080 	if (flitp == end)
3081 		flitp = start;
3082 
3083 	/* CPL_TX_PKT */
3084 	cpl = (void *)flitp;
3085 	cpl->ctrl0 = htobe32(V_TXPKT_OPCODE(CPL_TX_PKT) |
3086 	    V_TXPKT_INTF(pi->tx_chan) | V_TXPKT_PF(pi->adapter->pf));
3087 	cpl->pack = 0;
3088 	cpl->len = htobe16(m->m_pkthdr.len);
3089 	cpl->ctrl1 = htobe64(ctrl);
3090 
3091 	flitp += sizeof(*cpl);
3092 	if (flitp == end)
3093 		flitp = start;
3094 
3095 	/* SGL for this frame */
3096 	dst = (caddr_t)flitp;
3097 	txpkts->nflits += write_sgl_to_txd(eq, sgl, &dst);
3098 	txpkts->flitp = (void *)dst;
3099 
3100 	KASSERT(((uintptr_t)dst & 0xf) == 0,
3101 	    ("%s: SGL ends at %p (not a 16 byte boundary)", __func__, dst));
3102 }
3103 
3104 /*
3105  * If the SGL ends on an address that is not 16 byte aligned, this function will
3106  * add a 0 filled flit at the end.  It returns 1 in that case.
3107  */
3108 static int
3109 write_sgl_to_txd(struct sge_eq *eq, struct sgl *sgl, caddr_t *to)
3110 {
3111 	__be64 *flitp, *end;
3112 	struct ulptx_sgl *usgl;
3113 	bus_dma_segment_t *seg;
3114 	int i, padded;
3115 
3116 	KASSERT(sgl->nsegs > 0 && sgl->nflits > 0,
3117 	    ("%s: bad SGL - nsegs=%d, nflits=%d",
3118 	    __func__, sgl->nsegs, sgl->nflits));
3119 
3120 	KASSERT(((uintptr_t)(*to) & 0xf) == 0,
3121 	    ("%s: SGL must start at a 16 byte boundary: %p", __func__, *to));
3122 
3123 	flitp = (__be64 *)(*to);
3124 	end = flitp + sgl->nflits;
3125 	seg = &sgl->seg[0];
3126 	usgl = (void *)flitp;
3127 
3128 	/*
3129 	 * We start at a 16 byte boundary somewhere inside the tx descriptor
3130 	 * ring, so we're at least 16 bytes away from the status page.  There is
3131 	 * no chance of a wrap around in the middle of usgl (which is 16 bytes).
3132 	 */
3133 
3134 	usgl->cmd_nsge = htobe32(V_ULPTX_CMD(ULP_TX_SC_DSGL) |
3135 	    V_ULPTX_NSGE(sgl->nsegs));
3136 	usgl->len0 = htobe32(seg->ds_len);
3137 	usgl->addr0 = htobe64(seg->ds_addr);
3138 	seg++;
3139 
3140 	if ((uintptr_t)end <= (uintptr_t)eq->spg) {
3141 
3142 		/* Won't wrap around at all */
3143 
3144 		for (i = 0; i < sgl->nsegs - 1; i++, seg++) {
3145 			usgl->sge[i / 2].len[i & 1] = htobe32(seg->ds_len);
3146 			usgl->sge[i / 2].addr[i & 1] = htobe64(seg->ds_addr);
3147 		}
3148 		if (i & 1)
3149 			usgl->sge[i / 2].len[1] = htobe32(0);
3150 	} else {
3151 
3152 		/* Will wrap somewhere in the rest of the SGL */
3153 
3154 		/* 2 flits already written, write the rest flit by flit */
3155 		flitp = (void *)(usgl + 1);
3156 		for (i = 0; i < sgl->nflits - 2; i++) {
3157 			if ((uintptr_t)flitp == (uintptr_t)eq->spg)
3158 				flitp = (void *)eq->desc;
3159 			*flitp++ = get_flit(seg, sgl->nsegs - 1, i);
3160 		}
3161 		end = flitp;
3162 	}
3163 
3164 	if ((uintptr_t)end & 0xf) {
3165 		*(uint64_t *)end = 0;
3166 		end++;
3167 		padded = 1;
3168 	} else
3169 		padded = 0;
3170 
3171 	if ((uintptr_t)end == (uintptr_t)eq->spg)
3172 		*to = (void *)eq->desc;
3173 	else
3174 		*to = (void *)end;
3175 
3176 	return (padded);
3177 }
3178 
3179 static inline void
3180 copy_to_txd(struct sge_eq *eq, caddr_t from, caddr_t *to, int len)
3181 {
3182 	if ((uintptr_t)(*to) + len <= (uintptr_t)eq->spg) {
3183 		bcopy(from, *to, len);
3184 		(*to) += len;
3185 	} else {
3186 		int portion = (uintptr_t)eq->spg - (uintptr_t)(*to);
3187 
3188 		bcopy(from, *to, portion);
3189 		from += portion;
3190 		portion = len - portion;	/* remaining */
3191 		bcopy(from, (void *)eq->desc, portion);
3192 		(*to) = (caddr_t)eq->desc + portion;
3193 	}
3194 }
3195 
3196 static inline void
3197 ring_eq_db(struct adapter *sc, struct sge_eq *eq)
3198 {
3199 	wmb();
3200 	t4_write_reg(sc, MYPF_REG(A_SGE_PF_KDOORBELL),
3201 	    V_QID(eq->cntxt_id) | V_PIDX(eq->pending));
3202 	eq->pending = 0;
3203 }
3204 
3205 static inline int
3206 reclaimable(struct sge_eq *eq)
3207 {
3208 	unsigned int cidx;
3209 
3210 	cidx = eq->spg->cidx;	/* stable snapshot */
3211 	cidx = be16toh(cidx);
3212 
3213 	if (cidx >= eq->cidx)
3214 		return (cidx - eq->cidx);
3215 	else
3216 		return (cidx + eq->cap - eq->cidx);
3217 }
3218 
3219 /*
3220  * There are "can_reclaim" tx descriptors ready to be reclaimed.  Reclaim as
3221  * many as possible but stop when there are around "n" mbufs to free.
3222  *
3223  * The actual number reclaimed is provided as the return value.
3224  */
3225 static int
3226 reclaim_tx_descs(struct sge_txq *txq, int can_reclaim, int n)
3227 {
3228 	struct tx_sdesc *txsd;
3229 	struct tx_maps *txmaps;
3230 	struct tx_map *txm;
3231 	unsigned int reclaimed, maps;
3232 	struct sge_eq *eq = &txq->eq;
3233 
3234 	TXQ_LOCK_ASSERT_OWNED(txq);
3235 
3236 	if (can_reclaim == 0)
3237 		can_reclaim = reclaimable(eq);
3238 
3239 	maps = reclaimed = 0;
3240 	while (can_reclaim && maps < n) {
3241 		int ndesc;
3242 
3243 		txsd = &txq->sdesc[eq->cidx];
3244 		ndesc = txsd->desc_used;
3245 
3246 		/* Firmware doesn't return "partial" credits. */
3247 		KASSERT(can_reclaim >= ndesc,
3248 		    ("%s: unexpected number of credits: %d, %d",
3249 		    __func__, can_reclaim, ndesc));
3250 
3251 		maps += txsd->credits;
3252 
3253 		reclaimed += ndesc;
3254 		can_reclaim -= ndesc;
3255 
3256 		eq->cidx += ndesc;
3257 		if (__predict_false(eq->cidx >= eq->cap))
3258 			eq->cidx -= eq->cap;
3259 	}
3260 
3261 	txmaps = &txq->txmaps;
3262 	txm = &txmaps->maps[txmaps->map_cidx];
3263 	if (maps)
3264 		prefetch(txm->m);
3265 
3266 	eq->avail += reclaimed;
3267 	KASSERT(eq->avail < eq->cap,	/* avail tops out at (cap - 1) */
3268 	    ("%s: too many descriptors available", __func__));
3269 
3270 	txmaps->map_avail += maps;
3271 	KASSERT(txmaps->map_avail <= txmaps->map_total,
3272 	    ("%s: too many maps available", __func__));
3273 
3274 	while (maps--) {
3275 		struct tx_map *next;
3276 
3277 		next = txm + 1;
3278 		if (__predict_false(txmaps->map_cidx + 1 == txmaps->map_total))
3279 			next = txmaps->maps;
3280 		prefetch(next->m);
3281 
3282 		bus_dmamap_unload(txq->tx_tag, txm->map);
3283 		m_freem(txm->m);
3284 		txm->m = NULL;
3285 
3286 		txm = next;
3287 		if (__predict_false(++txmaps->map_cidx == txmaps->map_total))
3288 			txmaps->map_cidx = 0;
3289 	}
3290 
3291 	return (reclaimed);
3292 }
3293 
3294 static void
3295 write_eqflush_wr(struct sge_eq *eq)
3296 {
3297 	struct fw_eq_flush_wr *wr;
3298 
3299 	EQ_LOCK_ASSERT_OWNED(eq);
3300 	KASSERT(eq->avail > 0, ("%s: no descriptors left.", __func__));
3301 	KASSERT(!(eq->flags & EQ_CRFLUSHED), ("%s: flushed already", __func__));
3302 
3303 	wr = (void *)&eq->desc[eq->pidx];
3304 	bzero(wr, sizeof(*wr));
3305 	wr->opcode = FW_EQ_FLUSH_WR;
3306 	wr->equiq_to_len16 = htobe32(V_FW_WR_LEN16(sizeof(*wr) / 16) |
3307 	    F_FW_WR_EQUEQ | F_FW_WR_EQUIQ);
3308 
3309 	eq->flags |= (EQ_CRFLUSHED | EQ_STALLED);
3310 	eq->pending++;
3311 	eq->avail--;
3312 	if (++eq->pidx == eq->cap)
3313 		eq->pidx = 0;
3314 }
3315 
3316 static __be64
3317 get_flit(bus_dma_segment_t *sgl, int nsegs, int idx)
3318 {
3319 	int i = (idx / 3) * 2;
3320 
3321 	switch (idx % 3) {
3322 	case 0: {
3323 		__be64 rc;
3324 
3325 		rc = htobe32(sgl[i].ds_len);
3326 		if (i + 1 < nsegs)
3327 			rc |= (uint64_t)htobe32(sgl[i + 1].ds_len) << 32;
3328 
3329 		return (rc);
3330 	}
3331 	case 1:
3332 		return htobe64(sgl[i].ds_addr);
3333 	case 2:
3334 		return htobe64(sgl[i + 1].ds_addr);
3335 	}
3336 
3337 	return (0);
3338 }
3339 
3340 static void
3341 set_fl_tag_idx(struct sge_fl *fl, int bufsize)
3342 {
3343 	int i;
3344 
3345 	for (i = 0; i < FL_BUF_SIZES - 1; i++) {
3346 		if (FL_BUF_SIZE(i) >= bufsize)
3347 			break;
3348 	}
3349 
3350 	fl->tag_idx = i;
3351 }
3352 
3353 static void
3354 add_fl_to_sfl(struct adapter *sc, struct sge_fl *fl)
3355 {
3356 	mtx_lock(&sc->sfl_lock);
3357 	FL_LOCK(fl);
3358 	if ((fl->flags & FL_DOOMED) == 0) {
3359 		fl->flags |= FL_STARVING;
3360 		TAILQ_INSERT_TAIL(&sc->sfl, fl, link);
3361 		callout_reset(&sc->sfl_callout, hz / 5, refill_sfl, sc);
3362 	}
3363 	FL_UNLOCK(fl);
3364 	mtx_unlock(&sc->sfl_lock);
3365 }
3366 
3367 static int
3368 handle_sge_egr_update(struct sge_iq *iq, const struct rss_header *rss,
3369     struct mbuf *m)
3370 {
3371 	const struct cpl_sge_egr_update *cpl = (const void *)(rss + 1);
3372 	unsigned int qid = G_EGR_QID(ntohl(cpl->opcode_qid));
3373 	struct adapter *sc = iq->adapter;
3374 	struct sge *s = &sc->sge;
3375 	struct sge_eq *eq;
3376 
3377 	KASSERT(m == NULL, ("%s: payload with opcode %02x", __func__,
3378 	    rss->opcode));
3379 
3380 	eq = s->eqmap[qid - s->eq_start];
3381 	EQ_LOCK(eq);
3382 	KASSERT(eq->flags & EQ_CRFLUSHED,
3383 	    ("%s: unsolicited egress update", __func__));
3384 	eq->flags &= ~EQ_CRFLUSHED;
3385 	eq->egr_update++;
3386 
3387 	if (__predict_false(eq->flags & EQ_DOOMED))
3388 		wakeup_one(eq);
3389 	else if (eq->flags & EQ_STALLED && can_resume_tx(eq))
3390 		taskqueue_enqueue(sc->tq[eq->tx_chan], &eq->tx_task);
3391 	EQ_UNLOCK(eq);
3392 
3393 	return (0);
3394 }
3395 
3396 static int
3397 handle_fw_rpl(struct sge_iq *iq, const struct rss_header *rss, struct mbuf *m)
3398 {
3399 	const struct cpl_fw6_msg *cpl = (const void *)(rss + 1);
3400 
3401 	KASSERT(m == NULL, ("%s: payload with opcode %02x", __func__,
3402 	    rss->opcode));
3403 
3404 	if (cpl->type == FW6_TYPE_CMD_RPL)
3405 		t4_handle_fw_rpl(iq->adapter, cpl->data);
3406 
3407 	return (0);
3408 }
3409 
3410 static int
3411 sysctl_uint16(SYSCTL_HANDLER_ARGS)
3412 {
3413 	uint16_t *id = arg1;
3414 	int i = *id;
3415 
3416 	return sysctl_handle_int(oidp, &i, 0, req);
3417 }
3418