xref: /freebsd/sys/dev/cxgb/cxgb_sge.c (revision 2b15cb3d0922bd70ea592f0da9b4a5b167f4d53f)
1 /**************************************************************************
2 
3 Copyright (c) 2007-2009, Chelsio Inc.
4 All rights reserved.
5 
6 Redistribution and use in source and binary forms, with or without
7 modification, are permitted provided that the following conditions are met:
8 
9  1. Redistributions of source code must retain the above copyright notice,
10     this list of conditions and the following disclaimer.
11 
12  2. Neither the name of the Chelsio Corporation nor the names of its
13     contributors may be used to endorse or promote products derived from
14     this software without specific prior written permission.
15 
16 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
17 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
20 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
21 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
22 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
23 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
24 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
25 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
26 POSSIBILITY OF SUCH DAMAGE.
27 
28 ***************************************************************************/
29 
30 #include <sys/cdefs.h>
31 __FBSDID("$FreeBSD$");
32 
33 #include "opt_inet6.h"
34 #include "opt_inet.h"
35 
36 #include <sys/param.h>
37 #include <sys/systm.h>
38 #include <sys/kernel.h>
39 #include <sys/module.h>
40 #include <sys/bus.h>
41 #include <sys/conf.h>
42 #include <machine/bus.h>
43 #include <machine/resource.h>
44 #include <sys/bus_dma.h>
45 #include <sys/rman.h>
46 #include <sys/queue.h>
47 #include <sys/sysctl.h>
48 #include <sys/taskqueue.h>
49 
50 #include <sys/proc.h>
51 #include <sys/sbuf.h>
52 #include <sys/sched.h>
53 #include <sys/smp.h>
54 #include <sys/systm.h>
55 #include <sys/syslog.h>
56 #include <sys/socket.h>
57 #include <sys/sglist.h>
58 
59 #include <net/if.h>
60 #include <net/if_var.h>
61 #include <net/bpf.h>
62 #include <net/ethernet.h>
63 #include <net/if_vlan_var.h>
64 
65 #include <netinet/in_systm.h>
66 #include <netinet/in.h>
67 #include <netinet/ip.h>
68 #include <netinet/ip6.h>
69 #include <netinet/tcp.h>
70 
71 #include <dev/pci/pcireg.h>
72 #include <dev/pci/pcivar.h>
73 
74 #include <vm/vm.h>
75 #include <vm/pmap.h>
76 
77 #include <cxgb_include.h>
78 #include <sys/mvec.h>
79 
80 int	txq_fills = 0;
81 int	multiq_tx_enable = 1;
82 
83 #ifdef TCP_OFFLOAD
84 CTASSERT(NUM_CPL_HANDLERS >= NUM_CPL_CMDS);
85 #endif
86 
87 extern struct sysctl_oid_list sysctl__hw_cxgb_children;
88 int cxgb_txq_buf_ring_size = TX_ETH_Q_SIZE;
89 SYSCTL_INT(_hw_cxgb, OID_AUTO, txq_mr_size, CTLFLAG_RDTUN, &cxgb_txq_buf_ring_size, 0,
90     "size of per-queue mbuf ring");
91 
92 static int cxgb_tx_coalesce_force = 0;
93 SYSCTL_INT(_hw_cxgb, OID_AUTO, tx_coalesce_force, CTLFLAG_RWTUN,
94     &cxgb_tx_coalesce_force, 0,
95     "coalesce small packets into a single work request regardless of ring state");
96 
97 #define	COALESCE_START_DEFAULT		TX_ETH_Q_SIZE>>1
98 #define	COALESCE_START_MAX		(TX_ETH_Q_SIZE-(TX_ETH_Q_SIZE>>3))
99 #define	COALESCE_STOP_DEFAULT		TX_ETH_Q_SIZE>>2
100 #define	COALESCE_STOP_MIN		TX_ETH_Q_SIZE>>5
101 #define	TX_RECLAIM_DEFAULT		TX_ETH_Q_SIZE>>5
102 #define	TX_RECLAIM_MAX			TX_ETH_Q_SIZE>>2
103 #define	TX_RECLAIM_MIN			TX_ETH_Q_SIZE>>6
104 
105 
106 static int cxgb_tx_coalesce_enable_start = COALESCE_START_DEFAULT;
107 SYSCTL_INT(_hw_cxgb, OID_AUTO, tx_coalesce_enable_start, CTLFLAG_RWTUN,
108     &cxgb_tx_coalesce_enable_start, 0,
109     "coalesce enable threshold");
110 static int cxgb_tx_coalesce_enable_stop = COALESCE_STOP_DEFAULT;
111 SYSCTL_INT(_hw_cxgb, OID_AUTO, tx_coalesce_enable_stop, CTLFLAG_RWTUN,
112     &cxgb_tx_coalesce_enable_stop, 0,
113     "coalesce disable threshold");
114 static int cxgb_tx_reclaim_threshold = TX_RECLAIM_DEFAULT;
115 SYSCTL_INT(_hw_cxgb, OID_AUTO, tx_reclaim_threshold, CTLFLAG_RWTUN,
116     &cxgb_tx_reclaim_threshold, 0,
117     "tx cleaning minimum threshold");
118 
119 /*
120  * XXX don't re-enable this until TOE stops assuming
121  * we have an m_ext
122  */
123 static int recycle_enable = 0;
124 
125 extern int cxgb_use_16k_clusters;
126 extern int nmbjumbop;
127 extern int nmbjumbo9;
128 extern int nmbjumbo16;
129 
130 #define USE_GTS 0
131 
132 #define SGE_RX_SM_BUF_SIZE	1536
133 #define SGE_RX_DROP_THRES	16
134 #define SGE_RX_COPY_THRES	128
135 
136 /*
137  * Period of the Tx buffer reclaim timer.  This timer does not need to run
138  * frequently as Tx buffers are usually reclaimed by new Tx packets.
139  */
140 #define TX_RECLAIM_PERIOD       (hz >> 1)
141 
142 /*
143  * Values for sge_txq.flags
144  */
145 enum {
146 	TXQ_RUNNING	= 1 << 0,  /* fetch engine is running */
147 	TXQ_LAST_PKT_DB = 1 << 1,  /* last packet rang the doorbell */
148 };
149 
150 struct tx_desc {
151 	uint64_t	flit[TX_DESC_FLITS];
152 } __packed;
153 
154 struct rx_desc {
155 	uint32_t	addr_lo;
156 	uint32_t	len_gen;
157 	uint32_t	gen2;
158 	uint32_t	addr_hi;
159 } __packed;
160 
161 struct rsp_desc {               /* response queue descriptor */
162 	struct rss_header	rss_hdr;
163 	uint32_t		flags;
164 	uint32_t		len_cq;
165 	uint8_t			imm_data[47];
166 	uint8_t			intr_gen;
167 } __packed;
168 
169 #define RX_SW_DESC_MAP_CREATED	(1 << 0)
170 #define TX_SW_DESC_MAP_CREATED	(1 << 1)
171 #define RX_SW_DESC_INUSE        (1 << 3)
172 #define TX_SW_DESC_MAPPED       (1 << 4)
173 
174 #define RSPQ_NSOP_NEOP           G_RSPD_SOP_EOP(0)
175 #define RSPQ_EOP                 G_RSPD_SOP_EOP(F_RSPD_EOP)
176 #define RSPQ_SOP                 G_RSPD_SOP_EOP(F_RSPD_SOP)
177 #define RSPQ_SOP_EOP             G_RSPD_SOP_EOP(F_RSPD_SOP|F_RSPD_EOP)
178 
179 struct tx_sw_desc {                /* SW state per Tx descriptor */
180 	struct mbuf	*m;
181 	bus_dmamap_t	map;
182 	int		flags;
183 };
184 
185 struct rx_sw_desc {                /* SW state per Rx descriptor */
186 	caddr_t		rxsd_cl;
187 	struct mbuf	*m;
188 	bus_dmamap_t	map;
189 	int		flags;
190 };
191 
192 struct txq_state {
193 	unsigned int	compl;
194 	unsigned int	gen;
195 	unsigned int	pidx;
196 };
197 
198 struct refill_fl_cb_arg {
199 	int               error;
200 	bus_dma_segment_t seg;
201 	int               nseg;
202 };
203 
204 
205 /*
206  * Maps a number of flits to the number of Tx descriptors that can hold them.
207  * The formula is
208  *
209  * desc = 1 + (flits - 2) / (WR_FLITS - 1).
210  *
211  * HW allows up to 4 descriptors to be combined into a WR.
212  */
213 static uint8_t flit_desc_map[] = {
214 	0,
215 #if SGE_NUM_GENBITS == 1
216 	1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
217 	2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
218 	3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
219 	4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4
220 #elif SGE_NUM_GENBITS == 2
221 	1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
222 	2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
223 	3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
224 	4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
225 #else
226 # error "SGE_NUM_GENBITS must be 1 or 2"
227 #endif
228 };
229 
230 #define	TXQ_LOCK_ASSERT(qs)	mtx_assert(&(qs)->lock, MA_OWNED)
231 #define	TXQ_TRYLOCK(qs)		mtx_trylock(&(qs)->lock)
232 #define	TXQ_LOCK(qs)		mtx_lock(&(qs)->lock)
233 #define	TXQ_UNLOCK(qs)		mtx_unlock(&(qs)->lock)
234 #define	TXQ_RING_EMPTY(qs)	drbr_empty((qs)->port->ifp, (qs)->txq[TXQ_ETH].txq_mr)
235 #define	TXQ_RING_NEEDS_ENQUEUE(qs)					\
236 	drbr_needs_enqueue((qs)->port->ifp, (qs)->txq[TXQ_ETH].txq_mr)
237 #define	TXQ_RING_FLUSH(qs)	drbr_flush((qs)->port->ifp, (qs)->txq[TXQ_ETH].txq_mr)
238 #define	TXQ_RING_DEQUEUE_COND(qs, func, arg)				\
239 	drbr_dequeue_cond((qs)->port->ifp, (qs)->txq[TXQ_ETH].txq_mr, func, arg)
240 #define	TXQ_RING_DEQUEUE(qs) \
241 	drbr_dequeue((qs)->port->ifp, (qs)->txq[TXQ_ETH].txq_mr)
242 
243 int cxgb_debug = 0;
244 
245 static void sge_timer_cb(void *arg);
246 static void sge_timer_reclaim(void *arg, int ncount);
247 static void sge_txq_reclaim_handler(void *arg, int ncount);
248 static void cxgb_start_locked(struct sge_qset *qs);
249 
250 /*
251  * XXX need to cope with bursty scheduling by looking at a wider
252  * window than we are now for determining the need for coalescing
253  *
254  */
255 static __inline uint64_t
256 check_pkt_coalesce(struct sge_qset *qs)
257 {
258         struct adapter *sc;
259         struct sge_txq *txq;
260 	uint8_t *fill;
261 
262 	if (__predict_false(cxgb_tx_coalesce_force))
263 		return (1);
264 	txq = &qs->txq[TXQ_ETH];
265         sc = qs->port->adapter;
266 	fill = &sc->tunq_fill[qs->idx];
267 
268 	if (cxgb_tx_coalesce_enable_start > COALESCE_START_MAX)
269 		cxgb_tx_coalesce_enable_start = COALESCE_START_MAX;
270 	if (cxgb_tx_coalesce_enable_stop < COALESCE_STOP_MIN)
271 		cxgb_tx_coalesce_enable_start = COALESCE_STOP_MIN;
272 	/*
273 	 * if the hardware transmit queue is more than 1/8 full
274 	 * we mark it as coalescing - we drop back from coalescing
275 	 * when we go below 1/32 full and there are no packets enqueued,
276 	 * this provides us with some degree of hysteresis
277 	 */
278         if (*fill != 0 && (txq->in_use <= cxgb_tx_coalesce_enable_stop) &&
279 	    TXQ_RING_EMPTY(qs) && (qs->coalescing == 0))
280                 *fill = 0;
281         else if (*fill == 0 && (txq->in_use >= cxgb_tx_coalesce_enable_start))
282                 *fill = 1;
283 
284 	return (sc->tunq_coalesce);
285 }
286 
287 #ifdef __LP64__
288 static void
289 set_wr_hdr(struct work_request_hdr *wrp, uint32_t wr_hi, uint32_t wr_lo)
290 {
291 	uint64_t wr_hilo;
292 #if _BYTE_ORDER == _LITTLE_ENDIAN
293 	wr_hilo = wr_hi;
294 	wr_hilo |= (((uint64_t)wr_lo)<<32);
295 #else
296 	wr_hilo = wr_lo;
297 	wr_hilo |= (((uint64_t)wr_hi)<<32);
298 #endif
299 	wrp->wrh_hilo = wr_hilo;
300 }
301 #else
302 static void
303 set_wr_hdr(struct work_request_hdr *wrp, uint32_t wr_hi, uint32_t wr_lo)
304 {
305 
306 	wrp->wrh_hi = wr_hi;
307 	wmb();
308 	wrp->wrh_lo = wr_lo;
309 }
310 #endif
311 
312 struct coalesce_info {
313 	int count;
314 	int nbytes;
315 };
316 
317 static int
318 coalesce_check(struct mbuf *m, void *arg)
319 {
320 	struct coalesce_info *ci = arg;
321 	int *count = &ci->count;
322 	int *nbytes = &ci->nbytes;
323 
324 	if ((*nbytes == 0) || ((*nbytes + m->m_len <= 10500) &&
325 		(*count < 7) && (m->m_next == NULL))) {
326 		*count += 1;
327 		*nbytes += m->m_len;
328 		return (1);
329 	}
330 	return (0);
331 }
332 
333 static struct mbuf *
334 cxgb_dequeue(struct sge_qset *qs)
335 {
336 	struct mbuf *m, *m_head, *m_tail;
337 	struct coalesce_info ci;
338 
339 
340 	if (check_pkt_coalesce(qs) == 0)
341 		return TXQ_RING_DEQUEUE(qs);
342 
343 	m_head = m_tail = NULL;
344 	ci.count = ci.nbytes = 0;
345 	do {
346 		m = TXQ_RING_DEQUEUE_COND(qs, coalesce_check, &ci);
347 		if (m_head == NULL) {
348 			m_tail = m_head = m;
349 		} else if (m != NULL) {
350 			m_tail->m_nextpkt = m;
351 			m_tail = m;
352 		}
353 	} while (m != NULL);
354 	if (ci.count > 7)
355 		panic("trying to coalesce %d packets in to one WR", ci.count);
356 	return (m_head);
357 }
358 
359 /**
360  *	reclaim_completed_tx - reclaims completed Tx descriptors
361  *	@adapter: the adapter
362  *	@q: the Tx queue to reclaim completed descriptors from
363  *
364  *	Reclaims Tx descriptors that the SGE has indicated it has processed,
365  *	and frees the associated buffers if possible.  Called with the Tx
366  *	queue's lock held.
367  */
368 static __inline int
369 reclaim_completed_tx(struct sge_qset *qs, int reclaim_min, int queue)
370 {
371 	struct sge_txq *q = &qs->txq[queue];
372 	int reclaim = desc_reclaimable(q);
373 
374 	if ((cxgb_tx_reclaim_threshold > TX_RECLAIM_MAX) ||
375 	    (cxgb_tx_reclaim_threshold < TX_RECLAIM_MIN))
376 		cxgb_tx_reclaim_threshold = TX_RECLAIM_DEFAULT;
377 
378 	if (reclaim < reclaim_min)
379 		return (0);
380 
381 	mtx_assert(&qs->lock, MA_OWNED);
382 	if (reclaim > 0) {
383 		t3_free_tx_desc(qs, reclaim, queue);
384 		q->cleaned += reclaim;
385 		q->in_use -= reclaim;
386 	}
387 	if (isset(&qs->txq_stopped, TXQ_ETH))
388                 clrbit(&qs->txq_stopped, TXQ_ETH);
389 
390 	return (reclaim);
391 }
392 
393 /**
394  *	should_restart_tx - are there enough resources to restart a Tx queue?
395  *	@q: the Tx queue
396  *
397  *	Checks if there are enough descriptors to restart a suspended Tx queue.
398  */
399 static __inline int
400 should_restart_tx(const struct sge_txq *q)
401 {
402 	unsigned int r = q->processed - q->cleaned;
403 
404 	return q->in_use - r < (q->size >> 1);
405 }
406 
407 /**
408  *	t3_sge_init - initialize SGE
409  *	@adap: the adapter
410  *	@p: the SGE parameters
411  *
412  *	Performs SGE initialization needed every time after a chip reset.
413  *	We do not initialize any of the queue sets here, instead the driver
414  *	top-level must request those individually.  We also do not enable DMA
415  *	here, that should be done after the queues have been set up.
416  */
417 void
418 t3_sge_init(adapter_t *adap, struct sge_params *p)
419 {
420 	u_int ctrl, ups;
421 
422 	ups = 0; /* = ffs(pci_resource_len(adap->pdev, 2) >> 12); */
423 
424 	ctrl = F_DROPPKT | V_PKTSHIFT(2) | F_FLMODE | F_AVOIDCQOVFL |
425 	       F_CQCRDTCTRL | F_CONGMODE | F_TNLFLMODE | F_FATLPERREN |
426 	       V_HOSTPAGESIZE(PAGE_SHIFT - 11) | F_BIGENDIANINGRESS |
427 	       V_USERSPACESIZE(ups ? ups - 1 : 0) | F_ISCSICOALESCING;
428 #if SGE_NUM_GENBITS == 1
429 	ctrl |= F_EGRGENCTRL;
430 #endif
431 	if (adap->params.rev > 0) {
432 		if (!(adap->flags & (USING_MSIX | USING_MSI)))
433 			ctrl |= F_ONEINTMULTQ | F_OPTONEINTMULTQ;
434 	}
435 	t3_write_reg(adap, A_SG_CONTROL, ctrl);
436 	t3_write_reg(adap, A_SG_EGR_RCQ_DRB_THRSH, V_HIRCQDRBTHRSH(512) |
437 		     V_LORCQDRBTHRSH(512));
438 	t3_write_reg(adap, A_SG_TIMER_TICK, core_ticks_per_usec(adap) / 10);
439 	t3_write_reg(adap, A_SG_CMDQ_CREDIT_TH, V_THRESHOLD(32) |
440 		     V_TIMEOUT(200 * core_ticks_per_usec(adap)));
441 	t3_write_reg(adap, A_SG_HI_DRB_HI_THRSH,
442 		     adap->params.rev < T3_REV_C ? 1000 : 500);
443 	t3_write_reg(adap, A_SG_HI_DRB_LO_THRSH, 256);
444 	t3_write_reg(adap, A_SG_LO_DRB_HI_THRSH, 1000);
445 	t3_write_reg(adap, A_SG_LO_DRB_LO_THRSH, 256);
446 	t3_write_reg(adap, A_SG_OCO_BASE, V_BASE1(0xfff));
447 	t3_write_reg(adap, A_SG_DRB_PRI_THRESH, 63 * 1024);
448 }
449 
450 
451 /**
452  *	sgl_len - calculates the size of an SGL of the given capacity
453  *	@n: the number of SGL entries
454  *
455  *	Calculates the number of flits needed for a scatter/gather list that
456  *	can hold the given number of entries.
457  */
458 static __inline unsigned int
459 sgl_len(unsigned int n)
460 {
461 	return ((3 * n) / 2 + (n & 1));
462 }
463 
464 /**
465  *	get_imm_packet - return the next ingress packet buffer from a response
466  *	@resp: the response descriptor containing the packet data
467  *
468  *	Return a packet containing the immediate data of the given response.
469  */
470 static int
471 get_imm_packet(adapter_t *sc, const struct rsp_desc *resp, struct mbuf *m)
472 {
473 
474 	if (resp->rss_hdr.opcode == CPL_RX_DATA) {
475 		const struct cpl_rx_data *cpl = (const void *)&resp->imm_data[0];
476 		m->m_len = sizeof(*cpl) + ntohs(cpl->len);
477 	} else if (resp->rss_hdr.opcode == CPL_RX_PKT) {
478 		const struct cpl_rx_pkt *cpl = (const void *)&resp->imm_data[0];
479 		m->m_len = sizeof(*cpl) + ntohs(cpl->len);
480 	} else
481 		m->m_len = IMMED_PKT_SIZE;
482 	m->m_ext.ext_buf = NULL;
483 	m->m_ext.ext_type = 0;
484 	memcpy(mtod(m, uint8_t *), resp->imm_data, m->m_len);
485 	return (0);
486 }
487 
488 static __inline u_int
489 flits_to_desc(u_int n)
490 {
491 	return (flit_desc_map[n]);
492 }
493 
494 #define SGE_PARERR (F_CPPARITYERROR | F_OCPARITYERROR | F_RCPARITYERROR | \
495 		    F_IRPARITYERROR | V_ITPARITYERROR(M_ITPARITYERROR) | \
496 		    V_FLPARITYERROR(M_FLPARITYERROR) | F_LODRBPARITYERROR | \
497 		    F_HIDRBPARITYERROR | F_LORCQPARITYERROR | \
498 		    F_HIRCQPARITYERROR)
499 #define SGE_FRAMINGERR (F_UC_REQ_FRAMINGERROR | F_R_REQ_FRAMINGERROR)
500 #define SGE_FATALERR (SGE_PARERR | SGE_FRAMINGERR | F_RSPQCREDITOVERFOW | \
501 		      F_RSPQDISABLED)
502 
503 /**
504  *	t3_sge_err_intr_handler - SGE async event interrupt handler
505  *	@adapter: the adapter
506  *
507  *	Interrupt handler for SGE asynchronous (non-data) events.
508  */
509 void
510 t3_sge_err_intr_handler(adapter_t *adapter)
511 {
512 	unsigned int v, status;
513 
514 	status = t3_read_reg(adapter, A_SG_INT_CAUSE);
515 	if (status & SGE_PARERR)
516 		CH_ALERT(adapter, "SGE parity error (0x%x)\n",
517 			 status & SGE_PARERR);
518 	if (status & SGE_FRAMINGERR)
519 		CH_ALERT(adapter, "SGE framing error (0x%x)\n",
520 			 status & SGE_FRAMINGERR);
521 	if (status & F_RSPQCREDITOVERFOW)
522 		CH_ALERT(adapter, "SGE response queue credit overflow\n");
523 
524 	if (status & F_RSPQDISABLED) {
525 		v = t3_read_reg(adapter, A_SG_RSPQ_FL_STATUS);
526 
527 		CH_ALERT(adapter,
528 			 "packet delivered to disabled response queue (0x%x)\n",
529 			 (v >> S_RSPQ0DISABLED) & 0xff);
530 	}
531 
532 	t3_write_reg(adapter, A_SG_INT_CAUSE, status);
533 	if (status & SGE_FATALERR)
534 		t3_fatal_err(adapter);
535 }
536 
537 void
538 t3_sge_prep(adapter_t *adap, struct sge_params *p)
539 {
540 	int i, nqsets, fl_q_size, jumbo_q_size, use_16k, jumbo_buf_size;
541 
542 	nqsets = min(SGE_QSETS / adap->params.nports, mp_ncpus);
543 	nqsets *= adap->params.nports;
544 
545 	fl_q_size = min(nmbclusters/(3*nqsets), FL_Q_SIZE);
546 
547 	while (!powerof2(fl_q_size))
548 		fl_q_size--;
549 
550 	use_16k = cxgb_use_16k_clusters != -1 ? cxgb_use_16k_clusters :
551 	    is_offload(adap);
552 
553 #if __FreeBSD_version >= 700111
554 	if (use_16k) {
555 		jumbo_q_size = min(nmbjumbo16/(3*nqsets), JUMBO_Q_SIZE);
556 		jumbo_buf_size = MJUM16BYTES;
557 	} else {
558 		jumbo_q_size = min(nmbjumbo9/(3*nqsets), JUMBO_Q_SIZE);
559 		jumbo_buf_size = MJUM9BYTES;
560 	}
561 #else
562 	jumbo_q_size = min(nmbjumbop/(3*nqsets), JUMBO_Q_SIZE);
563 	jumbo_buf_size = MJUMPAGESIZE;
564 #endif
565 	while (!powerof2(jumbo_q_size))
566 		jumbo_q_size--;
567 
568 	if (fl_q_size < (FL_Q_SIZE / 4) || jumbo_q_size < (JUMBO_Q_SIZE / 2))
569 		device_printf(adap->dev,
570 		    "Insufficient clusters and/or jumbo buffers.\n");
571 
572 	p->max_pkt_size = jumbo_buf_size - sizeof(struct cpl_rx_data);
573 
574 	for (i = 0; i < SGE_QSETS; ++i) {
575 		struct qset_params *q = p->qset + i;
576 
577 		if (adap->params.nports > 2) {
578 			q->coalesce_usecs = 50;
579 		} else {
580 #ifdef INVARIANTS
581 			q->coalesce_usecs = 10;
582 #else
583 			q->coalesce_usecs = 5;
584 #endif
585 		}
586 		q->polling = 0;
587 		q->rspq_size = RSPQ_Q_SIZE;
588 		q->fl_size = fl_q_size;
589 		q->jumbo_size = jumbo_q_size;
590 		q->jumbo_buf_size = jumbo_buf_size;
591 		q->txq_size[TXQ_ETH] = TX_ETH_Q_SIZE;
592 		q->txq_size[TXQ_OFLD] = is_offload(adap) ? TX_OFLD_Q_SIZE : 16;
593 		q->txq_size[TXQ_CTRL] = TX_CTRL_Q_SIZE;
594 		q->cong_thres = 0;
595 	}
596 }
597 
598 int
599 t3_sge_alloc(adapter_t *sc)
600 {
601 
602 	/* The parent tag. */
603 	if (bus_dma_tag_create( bus_get_dma_tag(sc->dev),/* PCI parent */
604 				1, 0,			/* algnmnt, boundary */
605 				BUS_SPACE_MAXADDR,	/* lowaddr */
606 				BUS_SPACE_MAXADDR,	/* highaddr */
607 				NULL, NULL,		/* filter, filterarg */
608 				BUS_SPACE_MAXSIZE_32BIT,/* maxsize */
609 				BUS_SPACE_UNRESTRICTED, /* nsegments */
610 				BUS_SPACE_MAXSIZE_32BIT,/* maxsegsize */
611 				0,			/* flags */
612 				NULL, NULL,		/* lock, lockarg */
613 				&sc->parent_dmat)) {
614 		device_printf(sc->dev, "Cannot allocate parent DMA tag\n");
615 		return (ENOMEM);
616 	}
617 
618 	/*
619 	 * DMA tag for normal sized RX frames
620 	 */
621 	if (bus_dma_tag_create(sc->parent_dmat, MCLBYTES, 0, BUS_SPACE_MAXADDR,
622 		BUS_SPACE_MAXADDR, NULL, NULL, MCLBYTES, 1,
623 		MCLBYTES, BUS_DMA_ALLOCNOW, NULL, NULL, &sc->rx_dmat)) {
624 		device_printf(sc->dev, "Cannot allocate RX DMA tag\n");
625 		return (ENOMEM);
626 	}
627 
628 	/*
629 	 * DMA tag for jumbo sized RX frames.
630 	 */
631 	if (bus_dma_tag_create(sc->parent_dmat, MJUM16BYTES, 0, BUS_SPACE_MAXADDR,
632 		BUS_SPACE_MAXADDR, NULL, NULL, MJUM16BYTES, 1, MJUM16BYTES,
633 		BUS_DMA_ALLOCNOW, NULL, NULL, &sc->rx_jumbo_dmat)) {
634 		device_printf(sc->dev, "Cannot allocate RX jumbo DMA tag\n");
635 		return (ENOMEM);
636 	}
637 
638 	/*
639 	 * DMA tag for TX frames.
640 	 */
641 	if (bus_dma_tag_create(sc->parent_dmat, 1, 0, BUS_SPACE_MAXADDR,
642 		BUS_SPACE_MAXADDR, NULL, NULL, TX_MAX_SIZE, TX_MAX_SEGS,
643 		TX_MAX_SIZE, BUS_DMA_ALLOCNOW,
644 		NULL, NULL, &sc->tx_dmat)) {
645 		device_printf(sc->dev, "Cannot allocate TX DMA tag\n");
646 		return (ENOMEM);
647 	}
648 
649 	return (0);
650 }
651 
652 int
653 t3_sge_free(struct adapter * sc)
654 {
655 
656 	if (sc->tx_dmat != NULL)
657 		bus_dma_tag_destroy(sc->tx_dmat);
658 
659 	if (sc->rx_jumbo_dmat != NULL)
660 		bus_dma_tag_destroy(sc->rx_jumbo_dmat);
661 
662 	if (sc->rx_dmat != NULL)
663 		bus_dma_tag_destroy(sc->rx_dmat);
664 
665 	if (sc->parent_dmat != NULL)
666 		bus_dma_tag_destroy(sc->parent_dmat);
667 
668 	return (0);
669 }
670 
671 void
672 t3_update_qset_coalesce(struct sge_qset *qs, const struct qset_params *p)
673 {
674 
675 	qs->rspq.holdoff_tmr = max(p->coalesce_usecs * 10, 1U);
676 	qs->rspq.polling = 0 /* p->polling */;
677 }
678 
679 #if !defined(__i386__) && !defined(__amd64__)
680 static void
681 refill_fl_cb(void *arg, bus_dma_segment_t *segs, int nseg, int error)
682 {
683 	struct refill_fl_cb_arg *cb_arg = arg;
684 
685 	cb_arg->error = error;
686 	cb_arg->seg = segs[0];
687 	cb_arg->nseg = nseg;
688 
689 }
690 #endif
691 /**
692  *	refill_fl - refill an SGE free-buffer list
693  *	@sc: the controller softc
694  *	@q: the free-list to refill
695  *	@n: the number of new buffers to allocate
696  *
697  *	(Re)populate an SGE free-buffer list with up to @n new packet buffers.
698  *	The caller must assure that @n does not exceed the queue's capacity.
699  */
700 static void
701 refill_fl(adapter_t *sc, struct sge_fl *q, int n)
702 {
703 	struct rx_sw_desc *sd = &q->sdesc[q->pidx];
704 	struct rx_desc *d = &q->desc[q->pidx];
705 	struct refill_fl_cb_arg cb_arg;
706 	struct mbuf *m;
707 	caddr_t cl;
708 	int err;
709 
710 	cb_arg.error = 0;
711 	while (n--) {
712 		/*
713 		 * We allocate an uninitialized mbuf + cluster, mbuf is
714 		 * initialized after rx.
715 		 */
716 		if (q->zone == zone_pack) {
717 			if ((m = m_getcl(M_NOWAIT, MT_NOINIT, M_PKTHDR)) == NULL)
718 				break;
719 			cl = m->m_ext.ext_buf;
720 		} else {
721 			if ((cl = m_cljget(NULL, M_NOWAIT, q->buf_size)) == NULL)
722 				break;
723 			if ((m = m_gethdr(M_NOWAIT, MT_NOINIT)) == NULL) {
724 				uma_zfree(q->zone, cl);
725 				break;
726 			}
727 		}
728 		if ((sd->flags & RX_SW_DESC_MAP_CREATED) == 0) {
729 			if ((err = bus_dmamap_create(q->entry_tag, 0, &sd->map))) {
730 				log(LOG_WARNING, "bus_dmamap_create failed %d\n", err);
731 				uma_zfree(q->zone, cl);
732 				goto done;
733 			}
734 			sd->flags |= RX_SW_DESC_MAP_CREATED;
735 		}
736 #if !defined(__i386__) && !defined(__amd64__)
737 		err = bus_dmamap_load(q->entry_tag, sd->map,
738 		    cl, q->buf_size, refill_fl_cb, &cb_arg, 0);
739 
740 		if (err != 0 || cb_arg.error) {
741 			if (q->zone == zone_pack)
742 				uma_zfree(q->zone, cl);
743 			m_free(m);
744 			goto done;
745 		}
746 #else
747 		cb_arg.seg.ds_addr = pmap_kextract((vm_offset_t)cl);
748 #endif
749 		sd->flags |= RX_SW_DESC_INUSE;
750 		sd->rxsd_cl = cl;
751 		sd->m = m;
752 		d->addr_lo = htobe32(cb_arg.seg.ds_addr & 0xffffffff);
753 		d->addr_hi = htobe32(((uint64_t)cb_arg.seg.ds_addr >>32) & 0xffffffff);
754 		d->len_gen = htobe32(V_FLD_GEN1(q->gen));
755 		d->gen2 = htobe32(V_FLD_GEN2(q->gen));
756 
757 		d++;
758 		sd++;
759 
760 		if (++q->pidx == q->size) {
761 			q->pidx = 0;
762 			q->gen ^= 1;
763 			sd = q->sdesc;
764 			d = q->desc;
765 		}
766 		q->credits++;
767 		q->db_pending++;
768 	}
769 
770 done:
771 	if (q->db_pending >= 32) {
772 		q->db_pending = 0;
773 		t3_write_reg(sc, A_SG_KDOORBELL, V_EGRCNTX(q->cntxt_id));
774 	}
775 }
776 
777 
778 /**
779  *	free_rx_bufs - free the Rx buffers on an SGE free list
780  *	@sc: the controle softc
781  *	@q: the SGE free list to clean up
782  *
783  *	Release the buffers on an SGE free-buffer Rx queue.  HW fetching from
784  *	this queue should be stopped before calling this function.
785  */
786 static void
787 free_rx_bufs(adapter_t *sc, struct sge_fl *q)
788 {
789 	u_int cidx = q->cidx;
790 
791 	while (q->credits--) {
792 		struct rx_sw_desc *d = &q->sdesc[cidx];
793 
794 		if (d->flags & RX_SW_DESC_INUSE) {
795 			bus_dmamap_unload(q->entry_tag, d->map);
796 			bus_dmamap_destroy(q->entry_tag, d->map);
797 			if (q->zone == zone_pack) {
798 				m_init(d->m, zone_pack, MCLBYTES,
799 				    M_NOWAIT, MT_DATA, M_EXT);
800 				uma_zfree(zone_pack, d->m);
801 			} else {
802 				m_init(d->m, zone_mbuf, MLEN,
803 				    M_NOWAIT, MT_DATA, 0);
804 				uma_zfree(zone_mbuf, d->m);
805 				uma_zfree(q->zone, d->rxsd_cl);
806 			}
807 		}
808 
809 		d->rxsd_cl = NULL;
810 		d->m = NULL;
811 		if (++cidx == q->size)
812 			cidx = 0;
813 	}
814 }
815 
816 static __inline void
817 __refill_fl(adapter_t *adap, struct sge_fl *fl)
818 {
819 	refill_fl(adap, fl, min(16U, fl->size - fl->credits));
820 }
821 
822 static __inline void
823 __refill_fl_lt(adapter_t *adap, struct sge_fl *fl, int max)
824 {
825 	uint32_t reclaimable = fl->size - fl->credits;
826 
827 	if (reclaimable > 0)
828 		refill_fl(adap, fl, min(max, reclaimable));
829 }
830 
831 /**
832  *	recycle_rx_buf - recycle a receive buffer
833  *	@adapter: the adapter
834  *	@q: the SGE free list
835  *	@idx: index of buffer to recycle
836  *
837  *	Recycles the specified buffer on the given free list by adding it at
838  *	the next available slot on the list.
839  */
840 static void
841 recycle_rx_buf(adapter_t *adap, struct sge_fl *q, unsigned int idx)
842 {
843 	struct rx_desc *from = &q->desc[idx];
844 	struct rx_desc *to   = &q->desc[q->pidx];
845 
846 	q->sdesc[q->pidx] = q->sdesc[idx];
847 	to->addr_lo = from->addr_lo;        // already big endian
848 	to->addr_hi = from->addr_hi;        // likewise
849 	wmb();	/* necessary ? */
850 	to->len_gen = htobe32(V_FLD_GEN1(q->gen));
851 	to->gen2 = htobe32(V_FLD_GEN2(q->gen));
852 	q->credits++;
853 
854 	if (++q->pidx == q->size) {
855 		q->pidx = 0;
856 		q->gen ^= 1;
857 	}
858 	t3_write_reg(adap, A_SG_KDOORBELL, V_EGRCNTX(q->cntxt_id));
859 }
860 
861 static void
862 alloc_ring_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
863 {
864 	uint32_t *addr;
865 
866 	addr = arg;
867 	*addr = segs[0].ds_addr;
868 }
869 
870 static int
871 alloc_ring(adapter_t *sc, size_t nelem, size_t elem_size, size_t sw_size,
872     bus_addr_t *phys, void *desc, void *sdesc, bus_dma_tag_t *tag,
873     bus_dmamap_t *map, bus_dma_tag_t parent_entry_tag, bus_dma_tag_t *entry_tag)
874 {
875 	size_t len = nelem * elem_size;
876 	void *s = NULL;
877 	void *p = NULL;
878 	int err;
879 
880 	if ((err = bus_dma_tag_create(sc->parent_dmat, PAGE_SIZE, 0,
881 				      BUS_SPACE_MAXADDR_32BIT,
882 				      BUS_SPACE_MAXADDR, NULL, NULL, len, 1,
883 				      len, 0, NULL, NULL, tag)) != 0) {
884 		device_printf(sc->dev, "Cannot allocate descriptor tag\n");
885 		return (ENOMEM);
886 	}
887 
888 	if ((err = bus_dmamem_alloc(*tag, (void **)&p, BUS_DMA_NOWAIT,
889 				    map)) != 0) {
890 		device_printf(sc->dev, "Cannot allocate descriptor memory\n");
891 		return (ENOMEM);
892 	}
893 
894 	bus_dmamap_load(*tag, *map, p, len, alloc_ring_cb, phys, 0);
895 	bzero(p, len);
896 	*(void **)desc = p;
897 
898 	if (sw_size) {
899 		len = nelem * sw_size;
900 		s = malloc(len, M_DEVBUF, M_WAITOK|M_ZERO);
901 		*(void **)sdesc = s;
902 	}
903 	if (parent_entry_tag == NULL)
904 		return (0);
905 
906 	if ((err = bus_dma_tag_create(parent_entry_tag, 1, 0,
907 				      BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR,
908 		                      NULL, NULL, TX_MAX_SIZE, TX_MAX_SEGS,
909 				      TX_MAX_SIZE, BUS_DMA_ALLOCNOW,
910 		                      NULL, NULL, entry_tag)) != 0) {
911 		device_printf(sc->dev, "Cannot allocate descriptor entry tag\n");
912 		return (ENOMEM);
913 	}
914 	return (0);
915 }
916 
917 static void
918 sge_slow_intr_handler(void *arg, int ncount)
919 {
920 	adapter_t *sc = arg;
921 
922 	t3_slow_intr_handler(sc);
923 	t3_write_reg(sc, A_PL_INT_ENABLE0, sc->slow_intr_mask);
924 	(void) t3_read_reg(sc, A_PL_INT_ENABLE0);
925 }
926 
927 /**
928  *	sge_timer_cb - perform periodic maintenance of an SGE qset
929  *	@data: the SGE queue set to maintain
930  *
931  *	Runs periodically from a timer to perform maintenance of an SGE queue
932  *	set.  It performs two tasks:
933  *
934  *	a) Cleans up any completed Tx descriptors that may still be pending.
935  *	Normal descriptor cleanup happens when new packets are added to a Tx
936  *	queue so this timer is relatively infrequent and does any cleanup only
937  *	if the Tx queue has not seen any new packets in a while.  We make a
938  *	best effort attempt to reclaim descriptors, in that we don't wait
939  *	around if we cannot get a queue's lock (which most likely is because
940  *	someone else is queueing new packets and so will also handle the clean
941  *	up).  Since control queues use immediate data exclusively we don't
942  *	bother cleaning them up here.
943  *
944  *	b) Replenishes Rx queues that have run out due to memory shortage.
945  *	Normally new Rx buffers are added when existing ones are consumed but
946  *	when out of memory a queue can become empty.  We try to add only a few
947  *	buffers here, the queue will be replenished fully as these new buffers
948  *	are used up if memory shortage has subsided.
949  *
950  *	c) Return coalesced response queue credits in case a response queue is
951  *	starved.
952  *
953  *	d) Ring doorbells for T304 tunnel queues since we have seen doorbell
954  *	fifo overflows and the FW doesn't implement any recovery scheme yet.
955  */
956 static void
957 sge_timer_cb(void *arg)
958 {
959 	adapter_t *sc = arg;
960 	if ((sc->flags & USING_MSIX) == 0) {
961 
962 		struct port_info *pi;
963 		struct sge_qset *qs;
964 		struct sge_txq  *txq;
965 		int i, j;
966 		int reclaim_ofl, refill_rx;
967 
968 		if (sc->open_device_map == 0)
969 			return;
970 
971 		for (i = 0; i < sc->params.nports; i++) {
972 			pi = &sc->port[i];
973 			for (j = 0; j < pi->nqsets; j++) {
974 				qs = &sc->sge.qs[pi->first_qset + j];
975 				txq = &qs->txq[0];
976 				reclaim_ofl = txq[TXQ_OFLD].processed - txq[TXQ_OFLD].cleaned;
977 				refill_rx = ((qs->fl[0].credits < qs->fl[0].size) ||
978 				    (qs->fl[1].credits < qs->fl[1].size));
979 				if (reclaim_ofl || refill_rx) {
980 					taskqueue_enqueue(sc->tq, &pi->timer_reclaim_task);
981 					break;
982 				}
983 			}
984 		}
985 	}
986 
987 	if (sc->params.nports > 2) {
988 		int i;
989 
990 		for_each_port(sc, i) {
991 			struct port_info *pi = &sc->port[i];
992 
993 			t3_write_reg(sc, A_SG_KDOORBELL,
994 				     F_SELEGRCNTX |
995 				     (FW_TUNNEL_SGEEC_START + pi->first_qset));
996 		}
997 	}
998 	if (((sc->flags & USING_MSIX) == 0 || sc->params.nports > 2) &&
999 	    sc->open_device_map != 0)
1000 		callout_reset(&sc->sge_timer_ch, TX_RECLAIM_PERIOD, sge_timer_cb, sc);
1001 }
1002 
1003 /*
1004  * This is meant to be a catch-all function to keep sge state private
1005  * to sge.c
1006  *
1007  */
1008 int
1009 t3_sge_init_adapter(adapter_t *sc)
1010 {
1011 	callout_init(&sc->sge_timer_ch, CALLOUT_MPSAFE);
1012 	callout_reset(&sc->sge_timer_ch, TX_RECLAIM_PERIOD, sge_timer_cb, sc);
1013 	TASK_INIT(&sc->slow_intr_task, 0, sge_slow_intr_handler, sc);
1014 	return (0);
1015 }
1016 
1017 int
1018 t3_sge_reset_adapter(adapter_t *sc)
1019 {
1020 	callout_reset(&sc->sge_timer_ch, TX_RECLAIM_PERIOD, sge_timer_cb, sc);
1021 	return (0);
1022 }
1023 
1024 int
1025 t3_sge_init_port(struct port_info *pi)
1026 {
1027 	TASK_INIT(&pi->timer_reclaim_task, 0, sge_timer_reclaim, pi);
1028 	return (0);
1029 }
1030 
1031 /**
1032  *	refill_rspq - replenish an SGE response queue
1033  *	@adapter: the adapter
1034  *	@q: the response queue to replenish
1035  *	@credits: how many new responses to make available
1036  *
1037  *	Replenishes a response queue by making the supplied number of responses
1038  *	available to HW.
1039  */
1040 static __inline void
1041 refill_rspq(adapter_t *sc, const struct sge_rspq *q, u_int credits)
1042 {
1043 
1044 	/* mbufs are allocated on demand when a rspq entry is processed. */
1045 	t3_write_reg(sc, A_SG_RSPQ_CREDIT_RETURN,
1046 		     V_RSPQ(q->cntxt_id) | V_CREDITS(credits));
1047 }
1048 
1049 static void
1050 sge_txq_reclaim_handler(void *arg, int ncount)
1051 {
1052 	struct sge_qset *qs = arg;
1053 	int i;
1054 
1055 	for (i = 0; i < 3; i++)
1056 		reclaim_completed_tx(qs, 16, i);
1057 }
1058 
1059 static void
1060 sge_timer_reclaim(void *arg, int ncount)
1061 {
1062 	struct port_info *pi = arg;
1063 	int i, nqsets = pi->nqsets;
1064 	adapter_t *sc = pi->adapter;
1065 	struct sge_qset *qs;
1066 	struct mtx *lock;
1067 
1068 	KASSERT((sc->flags & USING_MSIX) == 0,
1069 	    ("can't call timer reclaim for msi-x"));
1070 
1071 	for (i = 0; i < nqsets; i++) {
1072 		qs = &sc->sge.qs[pi->first_qset + i];
1073 
1074 		reclaim_completed_tx(qs, 16, TXQ_OFLD);
1075 		lock = (sc->flags & USING_MSIX) ? &qs->rspq.lock :
1076 			    &sc->sge.qs[0].rspq.lock;
1077 
1078 		if (mtx_trylock(lock)) {
1079 			/* XXX currently assume that we are *NOT* polling */
1080 			uint32_t status = t3_read_reg(sc, A_SG_RSPQ_FL_STATUS);
1081 
1082 			if (qs->fl[0].credits < qs->fl[0].size - 16)
1083 				__refill_fl(sc, &qs->fl[0]);
1084 			if (qs->fl[1].credits < qs->fl[1].size - 16)
1085 				__refill_fl(sc, &qs->fl[1]);
1086 
1087 			if (status & (1 << qs->rspq.cntxt_id)) {
1088 				if (qs->rspq.credits) {
1089 					refill_rspq(sc, &qs->rspq, 1);
1090 					qs->rspq.credits--;
1091 					t3_write_reg(sc, A_SG_RSPQ_FL_STATUS,
1092 					    1 << qs->rspq.cntxt_id);
1093 				}
1094 			}
1095 			mtx_unlock(lock);
1096 		}
1097 	}
1098 }
1099 
1100 /**
1101  *	init_qset_cntxt - initialize an SGE queue set context info
1102  *	@qs: the queue set
1103  *	@id: the queue set id
1104  *
1105  *	Initializes the TIDs and context ids for the queues of a queue set.
1106  */
1107 static void
1108 init_qset_cntxt(struct sge_qset *qs, u_int id)
1109 {
1110 
1111 	qs->rspq.cntxt_id = id;
1112 	qs->fl[0].cntxt_id = 2 * id;
1113 	qs->fl[1].cntxt_id = 2 * id + 1;
1114 	qs->txq[TXQ_ETH].cntxt_id = FW_TUNNEL_SGEEC_START + id;
1115 	qs->txq[TXQ_ETH].token = FW_TUNNEL_TID_START + id;
1116 	qs->txq[TXQ_OFLD].cntxt_id = FW_OFLD_SGEEC_START + id;
1117 	qs->txq[TXQ_CTRL].cntxt_id = FW_CTRL_SGEEC_START + id;
1118 	qs->txq[TXQ_CTRL].token = FW_CTRL_TID_START + id;
1119 
1120 	/* XXX: a sane limit is needed instead of INT_MAX */
1121 	mbufq_init(&qs->txq[TXQ_ETH].sendq, INT_MAX);
1122 	mbufq_init(&qs->txq[TXQ_OFLD].sendq, INT_MAX);
1123 	mbufq_init(&qs->txq[TXQ_CTRL].sendq, INT_MAX);
1124 }
1125 
1126 
1127 static void
1128 txq_prod(struct sge_txq *txq, unsigned int ndesc, struct txq_state *txqs)
1129 {
1130 	txq->in_use += ndesc;
1131 	/*
1132 	 * XXX we don't handle stopping of queue
1133 	 * presumably start handles this when we bump against the end
1134 	 */
1135 	txqs->gen = txq->gen;
1136 	txq->unacked += ndesc;
1137 	txqs->compl = (txq->unacked & 32) << (S_WR_COMPL - 5);
1138 	txq->unacked &= 31;
1139 	txqs->pidx = txq->pidx;
1140 	txq->pidx += ndesc;
1141 #ifdef INVARIANTS
1142 	if (((txqs->pidx > txq->cidx) &&
1143 		(txq->pidx < txqs->pidx) &&
1144 		(txq->pidx >= txq->cidx)) ||
1145 	    ((txqs->pidx < txq->cidx) &&
1146 		(txq->pidx >= txq-> cidx)) ||
1147 	    ((txqs->pidx < txq->cidx) &&
1148 		(txq->cidx < txqs->pidx)))
1149 		panic("txqs->pidx=%d txq->pidx=%d txq->cidx=%d",
1150 		    txqs->pidx, txq->pidx, txq->cidx);
1151 #endif
1152 	if (txq->pidx >= txq->size) {
1153 		txq->pidx -= txq->size;
1154 		txq->gen ^= 1;
1155 	}
1156 
1157 }
1158 
1159 /**
1160  *	calc_tx_descs - calculate the number of Tx descriptors for a packet
1161  *	@m: the packet mbufs
1162  *      @nsegs: the number of segments
1163  *
1164  * 	Returns the number of Tx descriptors needed for the given Ethernet
1165  * 	packet.  Ethernet packets require addition of WR and CPL headers.
1166  */
1167 static __inline unsigned int
1168 calc_tx_descs(const struct mbuf *m, int nsegs)
1169 {
1170 	unsigned int flits;
1171 
1172 	if (m->m_pkthdr.len <= PIO_LEN)
1173 		return 1;
1174 
1175 	flits = sgl_len(nsegs) + 2;
1176 	if (m->m_pkthdr.csum_flags & CSUM_TSO)
1177 		flits++;
1178 
1179 	return flits_to_desc(flits);
1180 }
1181 
1182 /**
1183  *	make_sgl - populate a scatter/gather list for a packet
1184  *	@sgp: the SGL to populate
1185  *	@segs: the packet dma segments
1186  *	@nsegs: the number of segments
1187  *
1188  *	Generates a scatter/gather list for the buffers that make up a packet
1189  *	and returns the SGL size in 8-byte words.  The caller must size the SGL
1190  *	appropriately.
1191  */
1192 static __inline void
1193 make_sgl(struct sg_ent *sgp, bus_dma_segment_t *segs, int nsegs)
1194 {
1195 	int i, idx;
1196 
1197 	for (idx = 0, i = 0; i < nsegs; i++) {
1198 		/*
1199 		 * firmware doesn't like empty segments
1200 		 */
1201 		if (segs[i].ds_len == 0)
1202 			continue;
1203 		if (i && idx == 0)
1204 			++sgp;
1205 
1206 		sgp->len[idx] = htobe32(segs[i].ds_len);
1207 		sgp->addr[idx] = htobe64(segs[i].ds_addr);
1208 		idx ^= 1;
1209 	}
1210 
1211 	if (idx) {
1212 		sgp->len[idx] = 0;
1213 		sgp->addr[idx] = 0;
1214 	}
1215 }
1216 
1217 /**
1218  *	check_ring_tx_db - check and potentially ring a Tx queue's doorbell
1219  *	@adap: the adapter
1220  *	@q: the Tx queue
1221  *
1222  *	Ring the doorbell if a Tx queue is asleep.  There is a natural race,
1223  *	where the HW is going to sleep just after we checked, however,
1224  *	then the interrupt handler will detect the outstanding TX packet
1225  *	and ring the doorbell for us.
1226  *
1227  *	When GTS is disabled we unconditionally ring the doorbell.
1228  */
1229 static __inline void
1230 check_ring_tx_db(adapter_t *adap, struct sge_txq *q, int mustring)
1231 {
1232 #if USE_GTS
1233 	clear_bit(TXQ_LAST_PKT_DB, &q->flags);
1234 	if (test_and_set_bit(TXQ_RUNNING, &q->flags) == 0) {
1235 		set_bit(TXQ_LAST_PKT_DB, &q->flags);
1236 #ifdef T3_TRACE
1237 		T3_TRACE1(adap->tb[q->cntxt_id & 7], "doorbell Tx, cntxt %d",
1238 			  q->cntxt_id);
1239 #endif
1240 		t3_write_reg(adap, A_SG_KDOORBELL,
1241 			     F_SELEGRCNTX | V_EGRCNTX(q->cntxt_id));
1242 	}
1243 #else
1244 	if (mustring || ++q->db_pending >= 32) {
1245 		wmb();            /* write descriptors before telling HW */
1246 		t3_write_reg(adap, A_SG_KDOORBELL,
1247 		    F_SELEGRCNTX | V_EGRCNTX(q->cntxt_id));
1248 		q->db_pending = 0;
1249 	}
1250 #endif
1251 }
1252 
1253 static __inline void
1254 wr_gen2(struct tx_desc *d, unsigned int gen)
1255 {
1256 #if SGE_NUM_GENBITS == 2
1257 	d->flit[TX_DESC_FLITS - 1] = htobe64(gen);
1258 #endif
1259 }
1260 
1261 /**
1262  *	write_wr_hdr_sgl - write a WR header and, optionally, SGL
1263  *	@ndesc: number of Tx descriptors spanned by the SGL
1264  *	@txd: first Tx descriptor to be written
1265  *	@txqs: txq state (generation and producer index)
1266  *	@txq: the SGE Tx queue
1267  *	@sgl: the SGL
1268  *	@flits: number of flits to the start of the SGL in the first descriptor
1269  *	@sgl_flits: the SGL size in flits
1270  *	@wr_hi: top 32 bits of WR header based on WR type (big endian)
1271  *	@wr_lo: low 32 bits of WR header based on WR type (big endian)
1272  *
1273  *	Write a work request header and an associated SGL.  If the SGL is
1274  *	small enough to fit into one Tx descriptor it has already been written
1275  *	and we just need to write the WR header.  Otherwise we distribute the
1276  *	SGL across the number of descriptors it spans.
1277  */
1278 static void
1279 write_wr_hdr_sgl(unsigned int ndesc, struct tx_desc *txd, struct txq_state *txqs,
1280     const struct sge_txq *txq, const struct sg_ent *sgl, unsigned int flits,
1281     unsigned int sgl_flits, unsigned int wr_hi, unsigned int wr_lo)
1282 {
1283 
1284 	struct work_request_hdr *wrp = (struct work_request_hdr *)txd;
1285 	struct tx_sw_desc *txsd = &txq->sdesc[txqs->pidx];
1286 
1287 	if (__predict_true(ndesc == 1)) {
1288 		set_wr_hdr(wrp, htonl(F_WR_SOP | F_WR_EOP | V_WR_DATATYPE(1) |
1289 		    V_WR_SGLSFLT(flits)) | wr_hi,
1290 		    htonl(V_WR_LEN(flits + sgl_flits) | V_WR_GEN(txqs->gen)) |
1291 		    wr_lo);
1292 
1293 		wr_gen2(txd, txqs->gen);
1294 
1295 	} else {
1296 		unsigned int ogen = txqs->gen;
1297 		const uint64_t *fp = (const uint64_t *)sgl;
1298 		struct work_request_hdr *wp = wrp;
1299 
1300 		wrp->wrh_hi = htonl(F_WR_SOP | V_WR_DATATYPE(1) |
1301 		    V_WR_SGLSFLT(flits)) | wr_hi;
1302 
1303 		while (sgl_flits) {
1304 			unsigned int avail = WR_FLITS - flits;
1305 
1306 			if (avail > sgl_flits)
1307 				avail = sgl_flits;
1308 			memcpy(&txd->flit[flits], fp, avail * sizeof(*fp));
1309 			sgl_flits -= avail;
1310 			ndesc--;
1311 			if (!sgl_flits)
1312 				break;
1313 
1314 			fp += avail;
1315 			txd++;
1316 			txsd++;
1317 			if (++txqs->pidx == txq->size) {
1318 				txqs->pidx = 0;
1319 				txqs->gen ^= 1;
1320 				txd = txq->desc;
1321 				txsd = txq->sdesc;
1322 			}
1323 
1324 			/*
1325 			 * when the head of the mbuf chain
1326 			 * is freed all clusters will be freed
1327 			 * with it
1328 			 */
1329 			wrp = (struct work_request_hdr *)txd;
1330 			wrp->wrh_hi = htonl(V_WR_DATATYPE(1) |
1331 			    V_WR_SGLSFLT(1)) | wr_hi;
1332 			wrp->wrh_lo = htonl(V_WR_LEN(min(WR_FLITS,
1333 				    sgl_flits + 1)) |
1334 			    V_WR_GEN(txqs->gen)) | wr_lo;
1335 			wr_gen2(txd, txqs->gen);
1336 			flits = 1;
1337 		}
1338 		wrp->wrh_hi |= htonl(F_WR_EOP);
1339 		wmb();
1340 		wp->wrh_lo = htonl(V_WR_LEN(WR_FLITS) | V_WR_GEN(ogen)) | wr_lo;
1341 		wr_gen2((struct tx_desc *)wp, ogen);
1342 	}
1343 }
1344 
1345 /* sizeof(*eh) + sizeof(*ip) + sizeof(*tcp) */
1346 #define TCPPKTHDRSIZE (ETHER_HDR_LEN + 20 + 20)
1347 
1348 #define GET_VTAG(cntrl, m) \
1349 do { \
1350 	if ((m)->m_flags & M_VLANTAG)					            \
1351 		cntrl |= F_TXPKT_VLAN_VLD | V_TXPKT_VLAN((m)->m_pkthdr.ether_vtag); \
1352 } while (0)
1353 
1354 static int
1355 t3_encap(struct sge_qset *qs, struct mbuf **m)
1356 {
1357 	adapter_t *sc;
1358 	struct mbuf *m0;
1359 	struct sge_txq *txq;
1360 	struct txq_state txqs;
1361 	struct port_info *pi;
1362 	unsigned int ndesc, flits, cntrl, mlen;
1363 	int err, nsegs, tso_info = 0;
1364 
1365 	struct work_request_hdr *wrp;
1366 	struct tx_sw_desc *txsd;
1367 	struct sg_ent *sgp, *sgl;
1368 	uint32_t wr_hi, wr_lo, sgl_flits;
1369 	bus_dma_segment_t segs[TX_MAX_SEGS];
1370 
1371 	struct tx_desc *txd;
1372 
1373 	pi = qs->port;
1374 	sc = pi->adapter;
1375 	txq = &qs->txq[TXQ_ETH];
1376 	txd = &txq->desc[txq->pidx];
1377 	txsd = &txq->sdesc[txq->pidx];
1378 	sgl = txq->txq_sgl;
1379 
1380 	prefetch(txd);
1381 	m0 = *m;
1382 
1383 	mtx_assert(&qs->lock, MA_OWNED);
1384 	cntrl = V_TXPKT_INTF(pi->txpkt_intf);
1385 	KASSERT(m0->m_flags & M_PKTHDR, ("not packet header\n"));
1386 
1387 	if  (m0->m_nextpkt == NULL && m0->m_next != NULL &&
1388 	    m0->m_pkthdr.csum_flags & (CSUM_TSO))
1389 		tso_info = V_LSO_MSS(m0->m_pkthdr.tso_segsz);
1390 
1391 	if (m0->m_nextpkt != NULL) {
1392 		busdma_map_sg_vec(txq->entry_tag, txsd->map, m0, segs, &nsegs);
1393 		ndesc = 1;
1394 		mlen = 0;
1395 	} else {
1396 		if ((err = busdma_map_sg_collapse(txq->entry_tag, txsd->map,
1397 		    &m0, segs, &nsegs))) {
1398 			if (cxgb_debug)
1399 				printf("failed ... err=%d\n", err);
1400 			return (err);
1401 		}
1402 		mlen = m0->m_pkthdr.len;
1403 		ndesc = calc_tx_descs(m0, nsegs);
1404 	}
1405 	txq_prod(txq, ndesc, &txqs);
1406 
1407 	KASSERT(m0->m_pkthdr.len, ("empty packet nsegs=%d", nsegs));
1408 	txsd->m = m0;
1409 
1410 	if (m0->m_nextpkt != NULL) {
1411 		struct cpl_tx_pkt_batch *cpl_batch = (struct cpl_tx_pkt_batch *)txd;
1412 		int i, fidx;
1413 
1414 		if (nsegs > 7)
1415 			panic("trying to coalesce %d packets in to one WR", nsegs);
1416 		txq->txq_coalesced += nsegs;
1417 		wrp = (struct work_request_hdr *)txd;
1418 		flits = nsegs*2 + 1;
1419 
1420 		for (fidx = 1, i = 0; i < nsegs; i++, fidx += 2) {
1421 			struct cpl_tx_pkt_batch_entry *cbe;
1422 			uint64_t flit;
1423 			uint32_t *hflit = (uint32_t *)&flit;
1424 			int cflags = m0->m_pkthdr.csum_flags;
1425 
1426 			cntrl = V_TXPKT_INTF(pi->txpkt_intf);
1427 			GET_VTAG(cntrl, m0);
1428 			cntrl |= V_TXPKT_OPCODE(CPL_TX_PKT);
1429 			if (__predict_false(!(cflags & CSUM_IP)))
1430 				cntrl |= F_TXPKT_IPCSUM_DIS;
1431 			if (__predict_false(!(cflags & (CSUM_TCP | CSUM_UDP |
1432 			    CSUM_UDP_IPV6 | CSUM_TCP_IPV6))))
1433 				cntrl |= F_TXPKT_L4CSUM_DIS;
1434 
1435 			hflit[0] = htonl(cntrl);
1436 			hflit[1] = htonl(segs[i].ds_len | 0x80000000);
1437 			flit |= htobe64(1 << 24);
1438 			cbe = &cpl_batch->pkt_entry[i];
1439 			cbe->cntrl = hflit[0];
1440 			cbe->len = hflit[1];
1441 			cbe->addr = htobe64(segs[i].ds_addr);
1442 		}
1443 
1444 		wr_hi = htonl(F_WR_SOP | F_WR_EOP | V_WR_DATATYPE(1) |
1445 		    V_WR_SGLSFLT(flits)) |
1446 		    htonl(V_WR_OP(FW_WROPCODE_TUNNEL_TX_PKT) | txqs.compl);
1447 		wr_lo = htonl(V_WR_LEN(flits) |
1448 		    V_WR_GEN(txqs.gen)) | htonl(V_WR_TID(txq->token));
1449 		set_wr_hdr(wrp, wr_hi, wr_lo);
1450 		wmb();
1451 		ETHER_BPF_MTAP(pi->ifp, m0);
1452 		wr_gen2(txd, txqs.gen);
1453 		check_ring_tx_db(sc, txq, 0);
1454 		return (0);
1455 	} else if (tso_info) {
1456 		uint16_t eth_type;
1457 		struct cpl_tx_pkt_lso *hdr = (struct cpl_tx_pkt_lso *)txd;
1458 		struct ether_header *eh;
1459 		void *l3hdr;
1460 		struct tcphdr *tcp;
1461 
1462 		txd->flit[2] = 0;
1463 		GET_VTAG(cntrl, m0);
1464 		cntrl |= V_TXPKT_OPCODE(CPL_TX_PKT_LSO);
1465 		hdr->cntrl = htonl(cntrl);
1466 		hdr->len = htonl(mlen | 0x80000000);
1467 
1468 		if (__predict_false(mlen < TCPPKTHDRSIZE)) {
1469 			printf("mbuf=%p,len=%d,tso_segsz=%d,csum_flags=%b,flags=%#x",
1470 			    m0, mlen, m0->m_pkthdr.tso_segsz,
1471 			    (int)m0->m_pkthdr.csum_flags, CSUM_BITS, m0->m_flags);
1472 			panic("tx tso packet too small");
1473 		}
1474 
1475 		/* Make sure that ether, ip, tcp headers are all in m0 */
1476 		if (__predict_false(m0->m_len < TCPPKTHDRSIZE)) {
1477 			m0 = m_pullup(m0, TCPPKTHDRSIZE);
1478 			if (__predict_false(m0 == NULL)) {
1479 				/* XXX panic probably an overreaction */
1480 				panic("couldn't fit header into mbuf");
1481 			}
1482 		}
1483 
1484 		eh = mtod(m0, struct ether_header *);
1485 		eth_type = eh->ether_type;
1486 		if (eth_type == htons(ETHERTYPE_VLAN)) {
1487 			struct ether_vlan_header *evh = (void *)eh;
1488 
1489 			tso_info |= V_LSO_ETH_TYPE(CPL_ETH_II_VLAN);
1490 			l3hdr = evh + 1;
1491 			eth_type = evh->evl_proto;
1492 		} else {
1493 			tso_info |= V_LSO_ETH_TYPE(CPL_ETH_II);
1494 			l3hdr = eh + 1;
1495 		}
1496 
1497 		if (eth_type == htons(ETHERTYPE_IP)) {
1498 			struct ip *ip = l3hdr;
1499 
1500 			tso_info |= V_LSO_IPHDR_WORDS(ip->ip_hl);
1501 			tcp = (struct tcphdr *)(ip + 1);
1502 		} else if (eth_type == htons(ETHERTYPE_IPV6)) {
1503 			struct ip6_hdr *ip6 = l3hdr;
1504 
1505 			KASSERT(ip6->ip6_nxt == IPPROTO_TCP,
1506 			    ("%s: CSUM_TSO with ip6_nxt %d",
1507 			    __func__, ip6->ip6_nxt));
1508 
1509 			tso_info |= F_LSO_IPV6;
1510 			tso_info |= V_LSO_IPHDR_WORDS(sizeof(*ip6) >> 2);
1511 			tcp = (struct tcphdr *)(ip6 + 1);
1512 		} else
1513 			panic("%s: CSUM_TSO but neither ip nor ip6", __func__);
1514 
1515 		tso_info |= V_LSO_TCPHDR_WORDS(tcp->th_off);
1516 		hdr->lso_info = htonl(tso_info);
1517 
1518 		if (__predict_false(mlen <= PIO_LEN)) {
1519 			/*
1520 			 * pkt not undersized but fits in PIO_LEN
1521 			 * Indicates a TSO bug at the higher levels.
1522 			 */
1523 			txsd->m = NULL;
1524 			m_copydata(m0, 0, mlen, (caddr_t)&txd->flit[3]);
1525 			flits = (mlen + 7) / 8 + 3;
1526 			wr_hi = htonl(V_WR_BCNTLFLT(mlen & 7) |
1527 					  V_WR_OP(FW_WROPCODE_TUNNEL_TX_PKT) |
1528 					  F_WR_SOP | F_WR_EOP | txqs.compl);
1529 			wr_lo = htonl(V_WR_LEN(flits) |
1530 			    V_WR_GEN(txqs.gen) | V_WR_TID(txq->token));
1531 			set_wr_hdr(&hdr->wr, wr_hi, wr_lo);
1532 			wmb();
1533 			ETHER_BPF_MTAP(pi->ifp, m0);
1534 			wr_gen2(txd, txqs.gen);
1535 			check_ring_tx_db(sc, txq, 0);
1536 			m_freem(m0);
1537 			return (0);
1538 		}
1539 		flits = 3;
1540 	} else {
1541 		struct cpl_tx_pkt *cpl = (struct cpl_tx_pkt *)txd;
1542 
1543 		GET_VTAG(cntrl, m0);
1544 		cntrl |= V_TXPKT_OPCODE(CPL_TX_PKT);
1545 		if (__predict_false(!(m0->m_pkthdr.csum_flags & CSUM_IP)))
1546 			cntrl |= F_TXPKT_IPCSUM_DIS;
1547 		if (__predict_false(!(m0->m_pkthdr.csum_flags & (CSUM_TCP |
1548 		    CSUM_UDP | CSUM_UDP_IPV6 | CSUM_TCP_IPV6))))
1549 			cntrl |= F_TXPKT_L4CSUM_DIS;
1550 		cpl->cntrl = htonl(cntrl);
1551 		cpl->len = htonl(mlen | 0x80000000);
1552 
1553 		if (mlen <= PIO_LEN) {
1554 			txsd->m = NULL;
1555 			m_copydata(m0, 0, mlen, (caddr_t)&txd->flit[2]);
1556 			flits = (mlen + 7) / 8 + 2;
1557 
1558 			wr_hi = htonl(V_WR_BCNTLFLT(mlen & 7) |
1559 			    V_WR_OP(FW_WROPCODE_TUNNEL_TX_PKT) |
1560 					  F_WR_SOP | F_WR_EOP | txqs.compl);
1561 			wr_lo = htonl(V_WR_LEN(flits) |
1562 			    V_WR_GEN(txqs.gen) | V_WR_TID(txq->token));
1563 			set_wr_hdr(&cpl->wr, wr_hi, wr_lo);
1564 			wmb();
1565 			ETHER_BPF_MTAP(pi->ifp, m0);
1566 			wr_gen2(txd, txqs.gen);
1567 			check_ring_tx_db(sc, txq, 0);
1568 			m_freem(m0);
1569 			return (0);
1570 		}
1571 		flits = 2;
1572 	}
1573 	wrp = (struct work_request_hdr *)txd;
1574 	sgp = (ndesc == 1) ? (struct sg_ent *)&txd->flit[flits] : sgl;
1575 	make_sgl(sgp, segs, nsegs);
1576 
1577 	sgl_flits = sgl_len(nsegs);
1578 
1579 	ETHER_BPF_MTAP(pi->ifp, m0);
1580 
1581 	KASSERT(ndesc <= 4, ("ndesc too large %d", ndesc));
1582 	wr_hi = htonl(V_WR_OP(FW_WROPCODE_TUNNEL_TX_PKT) | txqs.compl);
1583 	wr_lo = htonl(V_WR_TID(txq->token));
1584 	write_wr_hdr_sgl(ndesc, txd, &txqs, txq, sgl, flits,
1585 	    sgl_flits, wr_hi, wr_lo);
1586 	check_ring_tx_db(sc, txq, 0);
1587 
1588 	return (0);
1589 }
1590 
1591 void
1592 cxgb_tx_watchdog(void *arg)
1593 {
1594 	struct sge_qset *qs = arg;
1595 	struct sge_txq *txq = &qs->txq[TXQ_ETH];
1596 
1597         if (qs->coalescing != 0 &&
1598 	    (txq->in_use <= cxgb_tx_coalesce_enable_stop) &&
1599 	    TXQ_RING_EMPTY(qs))
1600                 qs->coalescing = 0;
1601         else if (qs->coalescing == 0 &&
1602 	    (txq->in_use >= cxgb_tx_coalesce_enable_start))
1603                 qs->coalescing = 1;
1604 	if (TXQ_TRYLOCK(qs)) {
1605 		qs->qs_flags |= QS_FLUSHING;
1606 		cxgb_start_locked(qs);
1607 		qs->qs_flags &= ~QS_FLUSHING;
1608 		TXQ_UNLOCK(qs);
1609 	}
1610 	if (qs->port->ifp->if_drv_flags & IFF_DRV_RUNNING)
1611 		callout_reset_on(&txq->txq_watchdog, hz/4, cxgb_tx_watchdog,
1612 		    qs, txq->txq_watchdog.c_cpu);
1613 }
1614 
1615 static void
1616 cxgb_tx_timeout(void *arg)
1617 {
1618 	struct sge_qset *qs = arg;
1619 	struct sge_txq *txq = &qs->txq[TXQ_ETH];
1620 
1621 	if (qs->coalescing == 0 && (txq->in_use >= (txq->size>>3)))
1622                 qs->coalescing = 1;
1623 	if (TXQ_TRYLOCK(qs)) {
1624 		qs->qs_flags |= QS_TIMEOUT;
1625 		cxgb_start_locked(qs);
1626 		qs->qs_flags &= ~QS_TIMEOUT;
1627 		TXQ_UNLOCK(qs);
1628 	}
1629 }
1630 
1631 static void
1632 cxgb_start_locked(struct sge_qset *qs)
1633 {
1634 	struct mbuf *m_head = NULL;
1635 	struct sge_txq *txq = &qs->txq[TXQ_ETH];
1636 	struct port_info *pi = qs->port;
1637 	struct ifnet *ifp = pi->ifp;
1638 
1639 	if (qs->qs_flags & (QS_FLUSHING|QS_TIMEOUT))
1640 		reclaim_completed_tx(qs, 0, TXQ_ETH);
1641 
1642 	if (!pi->link_config.link_ok) {
1643 		TXQ_RING_FLUSH(qs);
1644 		return;
1645 	}
1646 	TXQ_LOCK_ASSERT(qs);
1647 	while (!TXQ_RING_EMPTY(qs) && (ifp->if_drv_flags & IFF_DRV_RUNNING) &&
1648 	    pi->link_config.link_ok) {
1649 		reclaim_completed_tx(qs, cxgb_tx_reclaim_threshold, TXQ_ETH);
1650 
1651 		if (txq->size - txq->in_use <= TX_MAX_DESC)
1652 			break;
1653 
1654 		if ((m_head = cxgb_dequeue(qs)) == NULL)
1655 			break;
1656 		/*
1657 		 *  Encapsulation can modify our pointer, and or make it
1658 		 *  NULL on failure.  In that event, we can't requeue.
1659 		 */
1660 		if (t3_encap(qs, &m_head) || m_head == NULL)
1661 			break;
1662 
1663 		m_head = NULL;
1664 	}
1665 
1666 	if (txq->db_pending)
1667 		check_ring_tx_db(pi->adapter, txq, 1);
1668 
1669 	if (!TXQ_RING_EMPTY(qs) && callout_pending(&txq->txq_timer) == 0 &&
1670 	    pi->link_config.link_ok)
1671 		callout_reset_on(&txq->txq_timer, 1, cxgb_tx_timeout,
1672 		    qs, txq->txq_timer.c_cpu);
1673 	if (m_head != NULL)
1674 		m_freem(m_head);
1675 }
1676 
1677 static int
1678 cxgb_transmit_locked(struct ifnet *ifp, struct sge_qset *qs, struct mbuf *m)
1679 {
1680 	struct port_info *pi = qs->port;
1681 	struct sge_txq *txq = &qs->txq[TXQ_ETH];
1682 	struct buf_ring *br = txq->txq_mr;
1683 	int error, avail;
1684 
1685 	avail = txq->size - txq->in_use;
1686 	TXQ_LOCK_ASSERT(qs);
1687 
1688 	/*
1689 	 * We can only do a direct transmit if the following are true:
1690 	 * - we aren't coalescing (ring < 3/4 full)
1691 	 * - the link is up -- checked in caller
1692 	 * - there are no packets enqueued already
1693 	 * - there is space in hardware transmit queue
1694 	 */
1695 	if (check_pkt_coalesce(qs) == 0 &&
1696 	    !TXQ_RING_NEEDS_ENQUEUE(qs) && avail > TX_MAX_DESC) {
1697 		if (t3_encap(qs, &m)) {
1698 			if (m != NULL &&
1699 			    (error = drbr_enqueue(ifp, br, m)) != 0)
1700 				return (error);
1701 		} else {
1702 			if (txq->db_pending)
1703 				check_ring_tx_db(pi->adapter, txq, 1);
1704 
1705 			/*
1706 			 * We've bypassed the buf ring so we need to update
1707 			 * the stats directly
1708 			 */
1709 			txq->txq_direct_packets++;
1710 			txq->txq_direct_bytes += m->m_pkthdr.len;
1711 		}
1712 	} else if ((error = drbr_enqueue(ifp, br, m)) != 0)
1713 		return (error);
1714 
1715 	reclaim_completed_tx(qs, cxgb_tx_reclaim_threshold, TXQ_ETH);
1716 	if (!TXQ_RING_EMPTY(qs) && pi->link_config.link_ok &&
1717 	    (!check_pkt_coalesce(qs) || (drbr_inuse(ifp, br) >= 7)))
1718 		cxgb_start_locked(qs);
1719 	else if (!TXQ_RING_EMPTY(qs) && !callout_pending(&txq->txq_timer))
1720 		callout_reset_on(&txq->txq_timer, 1, cxgb_tx_timeout,
1721 		    qs, txq->txq_timer.c_cpu);
1722 	return (0);
1723 }
1724 
1725 int
1726 cxgb_transmit(struct ifnet *ifp, struct mbuf *m)
1727 {
1728 	struct sge_qset *qs;
1729 	struct port_info *pi = ifp->if_softc;
1730 	int error, qidx = pi->first_qset;
1731 
1732 	if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0
1733 	    ||(!pi->link_config.link_ok)) {
1734 		m_freem(m);
1735 		return (0);
1736 	}
1737 
1738 	/* check if flowid is set */
1739 	if (M_HASHTYPE_GET(m) != M_HASHTYPE_NONE)
1740 		qidx = (m->m_pkthdr.flowid % pi->nqsets) + pi->first_qset;
1741 
1742 	qs = &pi->adapter->sge.qs[qidx];
1743 
1744 	if (TXQ_TRYLOCK(qs)) {
1745 		/* XXX running */
1746 		error = cxgb_transmit_locked(ifp, qs, m);
1747 		TXQ_UNLOCK(qs);
1748 	} else
1749 		error = drbr_enqueue(ifp, qs->txq[TXQ_ETH].txq_mr, m);
1750 	return (error);
1751 }
1752 
1753 void
1754 cxgb_qflush(struct ifnet *ifp)
1755 {
1756 	/*
1757 	 * flush any enqueued mbufs in the buf_rings
1758 	 * and in the transmit queues
1759 	 * no-op for now
1760 	 */
1761 	return;
1762 }
1763 
1764 /**
1765  *	write_imm - write a packet into a Tx descriptor as immediate data
1766  *	@d: the Tx descriptor to write
1767  *	@m: the packet
1768  *	@len: the length of packet data to write as immediate data
1769  *	@gen: the generation bit value to write
1770  *
1771  *	Writes a packet as immediate data into a Tx descriptor.  The packet
1772  *	contains a work request at its beginning.  We must write the packet
1773  *	carefully so the SGE doesn't read accidentally before it's written in
1774  *	its entirety.
1775  */
1776 static __inline void
1777 write_imm(struct tx_desc *d, caddr_t src,
1778 	  unsigned int len, unsigned int gen)
1779 {
1780 	struct work_request_hdr *from = (struct work_request_hdr *)src;
1781 	struct work_request_hdr *to = (struct work_request_hdr *)d;
1782 	uint32_t wr_hi, wr_lo;
1783 
1784 	KASSERT(len <= WR_LEN && len >= sizeof(*from),
1785 	    ("%s: invalid len %d", __func__, len));
1786 
1787 	memcpy(&to[1], &from[1], len - sizeof(*from));
1788 	wr_hi = from->wrh_hi | htonl(F_WR_SOP | F_WR_EOP |
1789 	    V_WR_BCNTLFLT(len & 7));
1790 	wr_lo = from->wrh_lo | htonl(V_WR_GEN(gen) | V_WR_LEN((len + 7) / 8));
1791 	set_wr_hdr(to, wr_hi, wr_lo);
1792 	wmb();
1793 	wr_gen2(d, gen);
1794 }
1795 
1796 /**
1797  *	check_desc_avail - check descriptor availability on a send queue
1798  *	@adap: the adapter
1799  *	@q: the TX queue
1800  *	@m: the packet needing the descriptors
1801  *	@ndesc: the number of Tx descriptors needed
1802  *	@qid: the Tx queue number in its queue set (TXQ_OFLD or TXQ_CTRL)
1803  *
1804  *	Checks if the requested number of Tx descriptors is available on an
1805  *	SGE send queue.  If the queue is already suspended or not enough
1806  *	descriptors are available the packet is queued for later transmission.
1807  *	Must be called with the Tx queue locked.
1808  *
1809  *	Returns 0 if enough descriptors are available, 1 if there aren't
1810  *	enough descriptors and the packet has been queued, and 2 if the caller
1811  *	needs to retry because there weren't enough descriptors at the
1812  *	beginning of the call but some freed up in the mean time.
1813  */
1814 static __inline int
1815 check_desc_avail(adapter_t *adap, struct sge_txq *q,
1816 		 struct mbuf *m, unsigned int ndesc,
1817 		 unsigned int qid)
1818 {
1819 	/*
1820 	 * XXX We currently only use this for checking the control queue
1821 	 * the control queue is only used for binding qsets which happens
1822 	 * at init time so we are guaranteed enough descriptors
1823 	 */
1824 	if (__predict_false(mbufq_len(&q->sendq))) {
1825 addq_exit:	(void )mbufq_enqueue(&q->sendq, m);
1826 		return 1;
1827 	}
1828 	if (__predict_false(q->size - q->in_use < ndesc)) {
1829 
1830 		struct sge_qset *qs = txq_to_qset(q, qid);
1831 
1832 		setbit(&qs->txq_stopped, qid);
1833 		if (should_restart_tx(q) &&
1834 		    test_and_clear_bit(qid, &qs->txq_stopped))
1835 			return 2;
1836 
1837 		q->stops++;
1838 		goto addq_exit;
1839 	}
1840 	return 0;
1841 }
1842 
1843 
1844 /**
1845  *	reclaim_completed_tx_imm - reclaim completed control-queue Tx descs
1846  *	@q: the SGE control Tx queue
1847  *
1848  *	This is a variant of reclaim_completed_tx() that is used for Tx queues
1849  *	that send only immediate data (presently just the control queues) and
1850  *	thus do not have any mbufs
1851  */
1852 static __inline void
1853 reclaim_completed_tx_imm(struct sge_txq *q)
1854 {
1855 	unsigned int reclaim = q->processed - q->cleaned;
1856 
1857 	q->in_use -= reclaim;
1858 	q->cleaned += reclaim;
1859 }
1860 
1861 /**
1862  *	ctrl_xmit - send a packet through an SGE control Tx queue
1863  *	@adap: the adapter
1864  *	@q: the control queue
1865  *	@m: the packet
1866  *
1867  *	Send a packet through an SGE control Tx queue.  Packets sent through
1868  *	a control queue must fit entirely as immediate data in a single Tx
1869  *	descriptor and have no page fragments.
1870  */
1871 static int
1872 ctrl_xmit(adapter_t *adap, struct sge_qset *qs, struct mbuf *m)
1873 {
1874 	int ret;
1875 	struct work_request_hdr *wrp = mtod(m, struct work_request_hdr *);
1876 	struct sge_txq *q = &qs->txq[TXQ_CTRL];
1877 
1878 	KASSERT(m->m_len <= WR_LEN, ("%s: bad tx data", __func__));
1879 
1880 	wrp->wrh_hi |= htonl(F_WR_SOP | F_WR_EOP);
1881 	wrp->wrh_lo = htonl(V_WR_TID(q->token));
1882 
1883 	TXQ_LOCK(qs);
1884 again:	reclaim_completed_tx_imm(q);
1885 
1886 	ret = check_desc_avail(adap, q, m, 1, TXQ_CTRL);
1887 	if (__predict_false(ret)) {
1888 		if (ret == 1) {
1889 			TXQ_UNLOCK(qs);
1890 			return (ENOSPC);
1891 		}
1892 		goto again;
1893 	}
1894 	write_imm(&q->desc[q->pidx], m->m_data, m->m_len, q->gen);
1895 
1896 	q->in_use++;
1897 	if (++q->pidx >= q->size) {
1898 		q->pidx = 0;
1899 		q->gen ^= 1;
1900 	}
1901 	TXQ_UNLOCK(qs);
1902 	wmb();
1903 	t3_write_reg(adap, A_SG_KDOORBELL,
1904 	    F_SELEGRCNTX | V_EGRCNTX(q->cntxt_id));
1905 
1906 	m_free(m);
1907 	return (0);
1908 }
1909 
1910 
1911 /**
1912  *	restart_ctrlq - restart a suspended control queue
1913  *	@qs: the queue set cotaining the control queue
1914  *
1915  *	Resumes transmission on a suspended Tx control queue.
1916  */
1917 static void
1918 restart_ctrlq(void *data, int npending)
1919 {
1920 	struct mbuf *m;
1921 	struct sge_qset *qs = (struct sge_qset *)data;
1922 	struct sge_txq *q = &qs->txq[TXQ_CTRL];
1923 	adapter_t *adap = qs->port->adapter;
1924 
1925 	TXQ_LOCK(qs);
1926 again:	reclaim_completed_tx_imm(q);
1927 
1928 	while (q->in_use < q->size &&
1929 	       (m = mbufq_dequeue(&q->sendq)) != NULL) {
1930 
1931 		write_imm(&q->desc[q->pidx], m->m_data, m->m_len, q->gen);
1932 		m_free(m);
1933 
1934 		if (++q->pidx >= q->size) {
1935 			q->pidx = 0;
1936 			q->gen ^= 1;
1937 		}
1938 		q->in_use++;
1939 	}
1940 	if (mbufq_len(&q->sendq)) {
1941 		setbit(&qs->txq_stopped, TXQ_CTRL);
1942 
1943 		if (should_restart_tx(q) &&
1944 		    test_and_clear_bit(TXQ_CTRL, &qs->txq_stopped))
1945 			goto again;
1946 		q->stops++;
1947 	}
1948 	TXQ_UNLOCK(qs);
1949 	t3_write_reg(adap, A_SG_KDOORBELL,
1950 		     F_SELEGRCNTX | V_EGRCNTX(q->cntxt_id));
1951 }
1952 
1953 
1954 /*
1955  * Send a management message through control queue 0
1956  */
1957 int
1958 t3_mgmt_tx(struct adapter *adap, struct mbuf *m)
1959 {
1960 	return ctrl_xmit(adap, &adap->sge.qs[0], m);
1961 }
1962 
1963 /**
1964  *	free_qset - free the resources of an SGE queue set
1965  *	@sc: the controller owning the queue set
1966  *	@q: the queue set
1967  *
1968  *	Release the HW and SW resources associated with an SGE queue set, such
1969  *	as HW contexts, packet buffers, and descriptor rings.  Traffic to the
1970  *	queue set must be quiesced prior to calling this.
1971  */
1972 static void
1973 t3_free_qset(adapter_t *sc, struct sge_qset *q)
1974 {
1975 	int i;
1976 
1977 	reclaim_completed_tx(q, 0, TXQ_ETH);
1978 	if (q->txq[TXQ_ETH].txq_mr != NULL)
1979 		buf_ring_free(q->txq[TXQ_ETH].txq_mr, M_DEVBUF);
1980 	if (q->txq[TXQ_ETH].txq_ifq != NULL) {
1981 		ifq_delete(q->txq[TXQ_ETH].txq_ifq);
1982 		free(q->txq[TXQ_ETH].txq_ifq, M_DEVBUF);
1983 	}
1984 
1985 	for (i = 0; i < SGE_RXQ_PER_SET; ++i) {
1986 		if (q->fl[i].desc) {
1987 			mtx_lock_spin(&sc->sge.reg_lock);
1988 			t3_sge_disable_fl(sc, q->fl[i].cntxt_id);
1989 			mtx_unlock_spin(&sc->sge.reg_lock);
1990 			bus_dmamap_unload(q->fl[i].desc_tag, q->fl[i].desc_map);
1991 			bus_dmamem_free(q->fl[i].desc_tag, q->fl[i].desc,
1992 					q->fl[i].desc_map);
1993 			bus_dma_tag_destroy(q->fl[i].desc_tag);
1994 			bus_dma_tag_destroy(q->fl[i].entry_tag);
1995 		}
1996 		if (q->fl[i].sdesc) {
1997 			free_rx_bufs(sc, &q->fl[i]);
1998 			free(q->fl[i].sdesc, M_DEVBUF);
1999 		}
2000 	}
2001 
2002 	mtx_unlock(&q->lock);
2003 	MTX_DESTROY(&q->lock);
2004 	for (i = 0; i < SGE_TXQ_PER_SET; i++) {
2005 		if (q->txq[i].desc) {
2006 			mtx_lock_spin(&sc->sge.reg_lock);
2007 			t3_sge_enable_ecntxt(sc, q->txq[i].cntxt_id, 0);
2008 			mtx_unlock_spin(&sc->sge.reg_lock);
2009 			bus_dmamap_unload(q->txq[i].desc_tag,
2010 					q->txq[i].desc_map);
2011 			bus_dmamem_free(q->txq[i].desc_tag, q->txq[i].desc,
2012 					q->txq[i].desc_map);
2013 			bus_dma_tag_destroy(q->txq[i].desc_tag);
2014 			bus_dma_tag_destroy(q->txq[i].entry_tag);
2015 		}
2016 		if (q->txq[i].sdesc) {
2017 			free(q->txq[i].sdesc, M_DEVBUF);
2018 		}
2019 	}
2020 
2021 	if (q->rspq.desc) {
2022 		mtx_lock_spin(&sc->sge.reg_lock);
2023 		t3_sge_disable_rspcntxt(sc, q->rspq.cntxt_id);
2024 		mtx_unlock_spin(&sc->sge.reg_lock);
2025 
2026 		bus_dmamap_unload(q->rspq.desc_tag, q->rspq.desc_map);
2027 		bus_dmamem_free(q->rspq.desc_tag, q->rspq.desc,
2028 			        q->rspq.desc_map);
2029 		bus_dma_tag_destroy(q->rspq.desc_tag);
2030 		MTX_DESTROY(&q->rspq.lock);
2031 	}
2032 
2033 #if defined(INET6) || defined(INET)
2034 	tcp_lro_free(&q->lro.ctrl);
2035 #endif
2036 
2037 	bzero(q, sizeof(*q));
2038 }
2039 
2040 /**
2041  *	t3_free_sge_resources - free SGE resources
2042  *	@sc: the adapter softc
2043  *
2044  *	Frees resources used by the SGE queue sets.
2045  */
2046 void
2047 t3_free_sge_resources(adapter_t *sc, int nqsets)
2048 {
2049 	int i;
2050 
2051 	for (i = 0; i < nqsets; ++i) {
2052 		TXQ_LOCK(&sc->sge.qs[i]);
2053 		t3_free_qset(sc, &sc->sge.qs[i]);
2054 	}
2055 }
2056 
2057 /**
2058  *	t3_sge_start - enable SGE
2059  *	@sc: the controller softc
2060  *
2061  *	Enables the SGE for DMAs.  This is the last step in starting packet
2062  *	transfers.
2063  */
2064 void
2065 t3_sge_start(adapter_t *sc)
2066 {
2067 	t3_set_reg_field(sc, A_SG_CONTROL, F_GLOBALENABLE, F_GLOBALENABLE);
2068 }
2069 
2070 /**
2071  *	t3_sge_stop - disable SGE operation
2072  *	@sc: the adapter
2073  *
2074  *	Disables the DMA engine.  This can be called in emeregencies (e.g.,
2075  *	from error interrupts) or from normal process context.  In the latter
2076  *	case it also disables any pending queue restart tasklets.  Note that
2077  *	if it is called in interrupt context it cannot disable the restart
2078  *	tasklets as it cannot wait, however the tasklets will have no effect
2079  *	since the doorbells are disabled and the driver will call this again
2080  *	later from process context, at which time the tasklets will be stopped
2081  *	if they are still running.
2082  */
2083 void
2084 t3_sge_stop(adapter_t *sc)
2085 {
2086 	int i, nqsets;
2087 
2088 	t3_set_reg_field(sc, A_SG_CONTROL, F_GLOBALENABLE, 0);
2089 
2090 	if (sc->tq == NULL)
2091 		return;
2092 
2093 	for (nqsets = i = 0; i < (sc)->params.nports; i++)
2094 		nqsets += sc->port[i].nqsets;
2095 #ifdef notyet
2096 	/*
2097 	 *
2098 	 * XXX
2099 	 */
2100 	for (i = 0; i < nqsets; ++i) {
2101 		struct sge_qset *qs = &sc->sge.qs[i];
2102 
2103 		taskqueue_drain(sc->tq, &qs->txq[TXQ_OFLD].qresume_task);
2104 		taskqueue_drain(sc->tq, &qs->txq[TXQ_CTRL].qresume_task);
2105 	}
2106 #endif
2107 }
2108 
2109 /**
2110  *	t3_free_tx_desc - reclaims Tx descriptors and their buffers
2111  *	@adapter: the adapter
2112  *	@q: the Tx queue to reclaim descriptors from
2113  *	@reclaimable: the number of descriptors to reclaim
2114  *      @m_vec_size: maximum number of buffers to reclaim
2115  *      @desc_reclaimed: returns the number of descriptors reclaimed
2116  *
2117  *	Reclaims Tx descriptors from an SGE Tx queue and frees the associated
2118  *	Tx buffers.  Called with the Tx queue lock held.
2119  *
2120  *      Returns number of buffers of reclaimed
2121  */
2122 void
2123 t3_free_tx_desc(struct sge_qset *qs, int reclaimable, int queue)
2124 {
2125 	struct tx_sw_desc *txsd;
2126 	unsigned int cidx, mask;
2127 	struct sge_txq *q = &qs->txq[queue];
2128 
2129 #ifdef T3_TRACE
2130 	T3_TRACE2(sc->tb[q->cntxt_id & 7],
2131 		  "reclaiming %u Tx descriptors at cidx %u", reclaimable, cidx);
2132 #endif
2133 	cidx = q->cidx;
2134 	mask = q->size - 1;
2135 	txsd = &q->sdesc[cidx];
2136 
2137 	mtx_assert(&qs->lock, MA_OWNED);
2138 	while (reclaimable--) {
2139 		prefetch(q->sdesc[(cidx + 1) & mask].m);
2140 		prefetch(q->sdesc[(cidx + 2) & mask].m);
2141 
2142 		if (txsd->m != NULL) {
2143 			if (txsd->flags & TX_SW_DESC_MAPPED) {
2144 				bus_dmamap_unload(q->entry_tag, txsd->map);
2145 				txsd->flags &= ~TX_SW_DESC_MAPPED;
2146 			}
2147 			m_freem_list(txsd->m);
2148 			txsd->m = NULL;
2149 		} else
2150 			q->txq_skipped++;
2151 
2152 		++txsd;
2153 		if (++cidx == q->size) {
2154 			cidx = 0;
2155 			txsd = q->sdesc;
2156 		}
2157 	}
2158 	q->cidx = cidx;
2159 
2160 }
2161 
2162 /**
2163  *	is_new_response - check if a response is newly written
2164  *	@r: the response descriptor
2165  *	@q: the response queue
2166  *
2167  *	Returns true if a response descriptor contains a yet unprocessed
2168  *	response.
2169  */
2170 static __inline int
2171 is_new_response(const struct rsp_desc *r,
2172     const struct sge_rspq *q)
2173 {
2174 	return (r->intr_gen & F_RSPD_GEN2) == q->gen;
2175 }
2176 
2177 #define RSPD_GTS_MASK  (F_RSPD_TXQ0_GTS | F_RSPD_TXQ1_GTS)
2178 #define RSPD_CTRL_MASK (RSPD_GTS_MASK | \
2179 			V_RSPD_TXQ0_CR(M_RSPD_TXQ0_CR) | \
2180 			V_RSPD_TXQ1_CR(M_RSPD_TXQ1_CR) | \
2181 			V_RSPD_TXQ2_CR(M_RSPD_TXQ2_CR))
2182 
2183 /* How long to delay the next interrupt in case of memory shortage, in 0.1us. */
2184 #define NOMEM_INTR_DELAY 2500
2185 
2186 #ifdef TCP_OFFLOAD
2187 /**
2188  *	write_ofld_wr - write an offload work request
2189  *	@adap: the adapter
2190  *	@m: the packet to send
2191  *	@q: the Tx queue
2192  *	@pidx: index of the first Tx descriptor to write
2193  *	@gen: the generation value to use
2194  *	@ndesc: number of descriptors the packet will occupy
2195  *
2196  *	Write an offload work request to send the supplied packet.  The packet
2197  *	data already carry the work request with most fields populated.
2198  */
2199 static void
2200 write_ofld_wr(adapter_t *adap, struct mbuf *m, struct sge_txq *q,
2201     unsigned int pidx, unsigned int gen, unsigned int ndesc)
2202 {
2203 	unsigned int sgl_flits, flits;
2204 	int i, idx, nsegs, wrlen;
2205 	struct work_request_hdr *from;
2206 	struct sg_ent *sgp, t3sgl[TX_MAX_SEGS / 2 + 1];
2207 	struct tx_desc *d = &q->desc[pidx];
2208 	struct txq_state txqs;
2209 	struct sglist_seg *segs;
2210 	struct ofld_hdr *oh = mtod(m, struct ofld_hdr *);
2211 	struct sglist *sgl;
2212 
2213 	from = (void *)(oh + 1);	/* Start of WR within mbuf */
2214 	wrlen = m->m_len - sizeof(*oh);
2215 
2216 	if (!(oh->flags & F_HDR_SGL)) {
2217 		write_imm(d, (caddr_t)from, wrlen, gen);
2218 
2219 		/*
2220 		 * mbuf with "real" immediate tx data will be enqueue_wr'd by
2221 		 * t3_push_frames and freed in wr_ack.  Others, like those sent
2222 		 * down by close_conn, t3_send_reset, etc. should be freed here.
2223 		 */
2224 		if (!(oh->flags & F_HDR_DF))
2225 			m_free(m);
2226 		return;
2227 	}
2228 
2229 	memcpy(&d->flit[1], &from[1], wrlen - sizeof(*from));
2230 
2231 	sgl = oh->sgl;
2232 	flits = wrlen / 8;
2233 	sgp = (ndesc == 1) ? (struct sg_ent *)&d->flit[flits] : t3sgl;
2234 
2235 	nsegs = sgl->sg_nseg;
2236 	segs = sgl->sg_segs;
2237 	for (idx = 0, i = 0; i < nsegs; i++) {
2238 		KASSERT(segs[i].ss_len, ("%s: 0 len in sgl", __func__));
2239 		if (i && idx == 0)
2240 			++sgp;
2241 		sgp->len[idx] = htobe32(segs[i].ss_len);
2242 		sgp->addr[idx] = htobe64(segs[i].ss_paddr);
2243 		idx ^= 1;
2244 	}
2245 	if (idx) {
2246 		sgp->len[idx] = 0;
2247 		sgp->addr[idx] = 0;
2248 	}
2249 
2250 	sgl_flits = sgl_len(nsegs);
2251 	txqs.gen = gen;
2252 	txqs.pidx = pidx;
2253 	txqs.compl = 0;
2254 
2255 	write_wr_hdr_sgl(ndesc, d, &txqs, q, t3sgl, flits, sgl_flits,
2256 	    from->wrh_hi, from->wrh_lo);
2257 }
2258 
2259 /**
2260  *	ofld_xmit - send a packet through an offload queue
2261  *	@adap: the adapter
2262  *	@q: the Tx offload queue
2263  *	@m: the packet
2264  *
2265  *	Send an offload packet through an SGE offload queue.
2266  */
2267 static int
2268 ofld_xmit(adapter_t *adap, struct sge_qset *qs, struct mbuf *m)
2269 {
2270 	int ret;
2271 	unsigned int ndesc;
2272 	unsigned int pidx, gen;
2273 	struct sge_txq *q = &qs->txq[TXQ_OFLD];
2274 	struct ofld_hdr *oh = mtod(m, struct ofld_hdr *);
2275 
2276 	ndesc = G_HDR_NDESC(oh->flags);
2277 
2278 	TXQ_LOCK(qs);
2279 again:	reclaim_completed_tx(qs, 16, TXQ_OFLD);
2280 	ret = check_desc_avail(adap, q, m, ndesc, TXQ_OFLD);
2281 	if (__predict_false(ret)) {
2282 		if (ret == 1) {
2283 			TXQ_UNLOCK(qs);
2284 			return (EINTR);
2285 		}
2286 		goto again;
2287 	}
2288 
2289 	gen = q->gen;
2290 	q->in_use += ndesc;
2291 	pidx = q->pidx;
2292 	q->pidx += ndesc;
2293 	if (q->pidx >= q->size) {
2294 		q->pidx -= q->size;
2295 		q->gen ^= 1;
2296 	}
2297 
2298 	write_ofld_wr(adap, m, q, pidx, gen, ndesc);
2299 	check_ring_tx_db(adap, q, 1);
2300 	TXQ_UNLOCK(qs);
2301 
2302 	return (0);
2303 }
2304 
2305 /**
2306  *	restart_offloadq - restart a suspended offload queue
2307  *	@qs: the queue set cotaining the offload queue
2308  *
2309  *	Resumes transmission on a suspended Tx offload queue.
2310  */
2311 static void
2312 restart_offloadq(void *data, int npending)
2313 {
2314 	struct mbuf *m;
2315 	struct sge_qset *qs = data;
2316 	struct sge_txq *q = &qs->txq[TXQ_OFLD];
2317 	adapter_t *adap = qs->port->adapter;
2318 	int cleaned;
2319 
2320 	TXQ_LOCK(qs);
2321 again:	cleaned = reclaim_completed_tx(qs, 16, TXQ_OFLD);
2322 
2323 	while ((m = mbufq_first(&q->sendq)) != NULL) {
2324 		unsigned int gen, pidx;
2325 		struct ofld_hdr *oh = mtod(m, struct ofld_hdr *);
2326 		unsigned int ndesc = G_HDR_NDESC(oh->flags);
2327 
2328 		if (__predict_false(q->size - q->in_use < ndesc)) {
2329 			setbit(&qs->txq_stopped, TXQ_OFLD);
2330 			if (should_restart_tx(q) &&
2331 			    test_and_clear_bit(TXQ_OFLD, &qs->txq_stopped))
2332 				goto again;
2333 			q->stops++;
2334 			break;
2335 		}
2336 
2337 		gen = q->gen;
2338 		q->in_use += ndesc;
2339 		pidx = q->pidx;
2340 		q->pidx += ndesc;
2341 		if (q->pidx >= q->size) {
2342 			q->pidx -= q->size;
2343 			q->gen ^= 1;
2344 		}
2345 
2346 		(void)mbufq_dequeue(&q->sendq);
2347 		TXQ_UNLOCK(qs);
2348 		write_ofld_wr(adap, m, q, pidx, gen, ndesc);
2349 		TXQ_LOCK(qs);
2350 	}
2351 #if USE_GTS
2352 	set_bit(TXQ_RUNNING, &q->flags);
2353 	set_bit(TXQ_LAST_PKT_DB, &q->flags);
2354 #endif
2355 	TXQ_UNLOCK(qs);
2356 	wmb();
2357 	t3_write_reg(adap, A_SG_KDOORBELL,
2358 		     F_SELEGRCNTX | V_EGRCNTX(q->cntxt_id));
2359 }
2360 
2361 /**
2362  *	t3_offload_tx - send an offload packet
2363  *	@m: the packet
2364  *
2365  *	Sends an offload packet.  We use the packet priority to select the
2366  *	appropriate Tx queue as follows: bit 0 indicates whether the packet
2367  *	should be sent as regular or control, bits 1-3 select the queue set.
2368  */
2369 int
2370 t3_offload_tx(struct adapter *sc, struct mbuf *m)
2371 {
2372 	struct ofld_hdr *oh = mtod(m, struct ofld_hdr *);
2373 	struct sge_qset *qs = &sc->sge.qs[G_HDR_QSET(oh->flags)];
2374 
2375 	if (oh->flags & F_HDR_CTRL) {
2376 		m_adj(m, sizeof (*oh));	/* trim ofld_hdr off */
2377 		return (ctrl_xmit(sc, qs, m));
2378 	} else
2379 		return (ofld_xmit(sc, qs, m));
2380 }
2381 #endif
2382 
2383 static void
2384 restart_tx(struct sge_qset *qs)
2385 {
2386 	struct adapter *sc = qs->port->adapter;
2387 
2388 	if (isset(&qs->txq_stopped, TXQ_OFLD) &&
2389 	    should_restart_tx(&qs->txq[TXQ_OFLD]) &&
2390 	    test_and_clear_bit(TXQ_OFLD, &qs->txq_stopped)) {
2391 		qs->txq[TXQ_OFLD].restarts++;
2392 		taskqueue_enqueue(sc->tq, &qs->txq[TXQ_OFLD].qresume_task);
2393 	}
2394 
2395 	if (isset(&qs->txq_stopped, TXQ_CTRL) &&
2396 	    should_restart_tx(&qs->txq[TXQ_CTRL]) &&
2397 	    test_and_clear_bit(TXQ_CTRL, &qs->txq_stopped)) {
2398 		qs->txq[TXQ_CTRL].restarts++;
2399 		taskqueue_enqueue(sc->tq, &qs->txq[TXQ_CTRL].qresume_task);
2400 	}
2401 }
2402 
2403 /**
2404  *	t3_sge_alloc_qset - initialize an SGE queue set
2405  *	@sc: the controller softc
2406  *	@id: the queue set id
2407  *	@nports: how many Ethernet ports will be using this queue set
2408  *	@irq_vec_idx: the IRQ vector index for response queue interrupts
2409  *	@p: configuration parameters for this queue set
2410  *	@ntxq: number of Tx queues for the queue set
2411  *	@pi: port info for queue set
2412  *
2413  *	Allocate resources and initialize an SGE queue set.  A queue set
2414  *	comprises a response queue, two Rx free-buffer queues, and up to 3
2415  *	Tx queues.  The Tx queues are assigned roles in the order Ethernet
2416  *	queue, offload queue, and control queue.
2417  */
2418 int
2419 t3_sge_alloc_qset(adapter_t *sc, u_int id, int nports, int irq_vec_idx,
2420 		  const struct qset_params *p, int ntxq, struct port_info *pi)
2421 {
2422 	struct sge_qset *q = &sc->sge.qs[id];
2423 	int i, ret = 0;
2424 
2425 	MTX_INIT(&q->lock, q->namebuf, NULL, MTX_DEF);
2426 	q->port = pi;
2427 	q->adap = sc;
2428 
2429 	if ((q->txq[TXQ_ETH].txq_mr = buf_ring_alloc(cxgb_txq_buf_ring_size,
2430 	    M_DEVBUF, M_WAITOK, &q->lock)) == NULL) {
2431 		device_printf(sc->dev, "failed to allocate mbuf ring\n");
2432 		goto err;
2433 	}
2434 	if ((q->txq[TXQ_ETH].txq_ifq = malloc(sizeof(struct ifaltq), M_DEVBUF,
2435 	    M_NOWAIT | M_ZERO)) == NULL) {
2436 		device_printf(sc->dev, "failed to allocate ifq\n");
2437 		goto err;
2438 	}
2439 	ifq_init(q->txq[TXQ_ETH].txq_ifq, pi->ifp);
2440 	callout_init(&q->txq[TXQ_ETH].txq_timer, 1);
2441 	callout_init(&q->txq[TXQ_ETH].txq_watchdog, 1);
2442 	q->txq[TXQ_ETH].txq_timer.c_cpu = id % mp_ncpus;
2443 	q->txq[TXQ_ETH].txq_watchdog.c_cpu = id % mp_ncpus;
2444 
2445 	init_qset_cntxt(q, id);
2446 	q->idx = id;
2447 	if ((ret = alloc_ring(sc, p->fl_size, sizeof(struct rx_desc),
2448 		    sizeof(struct rx_sw_desc), &q->fl[0].phys_addr,
2449 		    &q->fl[0].desc, &q->fl[0].sdesc,
2450 		    &q->fl[0].desc_tag, &q->fl[0].desc_map,
2451 		    sc->rx_dmat, &q->fl[0].entry_tag)) != 0) {
2452 		printf("error %d from alloc ring fl0\n", ret);
2453 		goto err;
2454 	}
2455 
2456 	if ((ret = alloc_ring(sc, p->jumbo_size, sizeof(struct rx_desc),
2457 		    sizeof(struct rx_sw_desc), &q->fl[1].phys_addr,
2458 		    &q->fl[1].desc, &q->fl[1].sdesc,
2459 		    &q->fl[1].desc_tag, &q->fl[1].desc_map,
2460 		    sc->rx_jumbo_dmat, &q->fl[1].entry_tag)) != 0) {
2461 		printf("error %d from alloc ring fl1\n", ret);
2462 		goto err;
2463 	}
2464 
2465 	if ((ret = alloc_ring(sc, p->rspq_size, sizeof(struct rsp_desc), 0,
2466 		    &q->rspq.phys_addr, &q->rspq.desc, NULL,
2467 		    &q->rspq.desc_tag, &q->rspq.desc_map,
2468 		    NULL, NULL)) != 0) {
2469 		printf("error %d from alloc ring rspq\n", ret);
2470 		goto err;
2471 	}
2472 
2473 	snprintf(q->rspq.lockbuf, RSPQ_NAME_LEN, "t3 rspq lock %d:%d",
2474 	    device_get_unit(sc->dev), irq_vec_idx);
2475 	MTX_INIT(&q->rspq.lock, q->rspq.lockbuf, NULL, MTX_DEF);
2476 
2477 	for (i = 0; i < ntxq; ++i) {
2478 		size_t sz = i == TXQ_CTRL ? 0 : sizeof(struct tx_sw_desc);
2479 
2480 		if ((ret = alloc_ring(sc, p->txq_size[i],
2481 			    sizeof(struct tx_desc), sz,
2482 			    &q->txq[i].phys_addr, &q->txq[i].desc,
2483 			    &q->txq[i].sdesc, &q->txq[i].desc_tag,
2484 			    &q->txq[i].desc_map,
2485 			    sc->tx_dmat, &q->txq[i].entry_tag)) != 0) {
2486 			printf("error %d from alloc ring tx %i\n", ret, i);
2487 			goto err;
2488 		}
2489 		mbufq_init(&q->txq[i].sendq, INT_MAX);
2490 		q->txq[i].gen = 1;
2491 		q->txq[i].size = p->txq_size[i];
2492 	}
2493 
2494 #ifdef TCP_OFFLOAD
2495 	TASK_INIT(&q->txq[TXQ_OFLD].qresume_task, 0, restart_offloadq, q);
2496 #endif
2497 	TASK_INIT(&q->txq[TXQ_CTRL].qresume_task, 0, restart_ctrlq, q);
2498 	TASK_INIT(&q->txq[TXQ_ETH].qreclaim_task, 0, sge_txq_reclaim_handler, q);
2499 	TASK_INIT(&q->txq[TXQ_OFLD].qreclaim_task, 0, sge_txq_reclaim_handler, q);
2500 
2501 	q->fl[0].gen = q->fl[1].gen = 1;
2502 	q->fl[0].size = p->fl_size;
2503 	q->fl[1].size = p->jumbo_size;
2504 
2505 	q->rspq.gen = 1;
2506 	q->rspq.cidx = 0;
2507 	q->rspq.size = p->rspq_size;
2508 
2509 	q->txq[TXQ_ETH].stop_thres = nports *
2510 	    flits_to_desc(sgl_len(TX_MAX_SEGS + 1) + 3);
2511 
2512 	q->fl[0].buf_size = MCLBYTES;
2513 	q->fl[0].zone = zone_pack;
2514 	q->fl[0].type = EXT_PACKET;
2515 
2516 	if (p->jumbo_buf_size ==  MJUM16BYTES) {
2517 		q->fl[1].zone = zone_jumbo16;
2518 		q->fl[1].type = EXT_JUMBO16;
2519 	} else if (p->jumbo_buf_size ==  MJUM9BYTES) {
2520 		q->fl[1].zone = zone_jumbo9;
2521 		q->fl[1].type = EXT_JUMBO9;
2522 	} else if (p->jumbo_buf_size ==  MJUMPAGESIZE) {
2523 		q->fl[1].zone = zone_jumbop;
2524 		q->fl[1].type = EXT_JUMBOP;
2525 	} else {
2526 		KASSERT(0, ("can't deal with jumbo_buf_size %d.", p->jumbo_buf_size));
2527 		ret = EDOOFUS;
2528 		goto err;
2529 	}
2530 	q->fl[1].buf_size = p->jumbo_buf_size;
2531 
2532 	/* Allocate and setup the lro_ctrl structure */
2533 	q->lro.enabled = !!(pi->ifp->if_capenable & IFCAP_LRO);
2534 #if defined(INET6) || defined(INET)
2535 	ret = tcp_lro_init(&q->lro.ctrl);
2536 	if (ret) {
2537 		printf("error %d from tcp_lro_init\n", ret);
2538 		goto err;
2539 	}
2540 #endif
2541 	q->lro.ctrl.ifp = pi->ifp;
2542 
2543 	mtx_lock_spin(&sc->sge.reg_lock);
2544 	ret = -t3_sge_init_rspcntxt(sc, q->rspq.cntxt_id, irq_vec_idx,
2545 				   q->rspq.phys_addr, q->rspq.size,
2546 				   q->fl[0].buf_size, 1, 0);
2547 	if (ret) {
2548 		printf("error %d from t3_sge_init_rspcntxt\n", ret);
2549 		goto err_unlock;
2550 	}
2551 
2552 	for (i = 0; i < SGE_RXQ_PER_SET; ++i) {
2553 		ret = -t3_sge_init_flcntxt(sc, q->fl[i].cntxt_id, 0,
2554 					  q->fl[i].phys_addr, q->fl[i].size,
2555 					  q->fl[i].buf_size, p->cong_thres, 1,
2556 					  0);
2557 		if (ret) {
2558 			printf("error %d from t3_sge_init_flcntxt for index i=%d\n", ret, i);
2559 			goto err_unlock;
2560 		}
2561 	}
2562 
2563 	ret = -t3_sge_init_ecntxt(sc, q->txq[TXQ_ETH].cntxt_id, USE_GTS,
2564 				 SGE_CNTXT_ETH, id, q->txq[TXQ_ETH].phys_addr,
2565 				 q->txq[TXQ_ETH].size, q->txq[TXQ_ETH].token,
2566 				 1, 0);
2567 	if (ret) {
2568 		printf("error %d from t3_sge_init_ecntxt\n", ret);
2569 		goto err_unlock;
2570 	}
2571 
2572 	if (ntxq > 1) {
2573 		ret = -t3_sge_init_ecntxt(sc, q->txq[TXQ_OFLD].cntxt_id,
2574 					 USE_GTS, SGE_CNTXT_OFLD, id,
2575 					 q->txq[TXQ_OFLD].phys_addr,
2576 					 q->txq[TXQ_OFLD].size, 0, 1, 0);
2577 		if (ret) {
2578 			printf("error %d from t3_sge_init_ecntxt\n", ret);
2579 			goto err_unlock;
2580 		}
2581 	}
2582 
2583 	if (ntxq > 2) {
2584 		ret = -t3_sge_init_ecntxt(sc, q->txq[TXQ_CTRL].cntxt_id, 0,
2585 					 SGE_CNTXT_CTRL, id,
2586 					 q->txq[TXQ_CTRL].phys_addr,
2587 					 q->txq[TXQ_CTRL].size,
2588 					 q->txq[TXQ_CTRL].token, 1, 0);
2589 		if (ret) {
2590 			printf("error %d from t3_sge_init_ecntxt\n", ret);
2591 			goto err_unlock;
2592 		}
2593 	}
2594 
2595 	mtx_unlock_spin(&sc->sge.reg_lock);
2596 	t3_update_qset_coalesce(q, p);
2597 
2598 	refill_fl(sc, &q->fl[0], q->fl[0].size);
2599 	refill_fl(sc, &q->fl[1], q->fl[1].size);
2600 	refill_rspq(sc, &q->rspq, q->rspq.size - 1);
2601 
2602 	t3_write_reg(sc, A_SG_GTS, V_RSPQ(q->rspq.cntxt_id) |
2603 		     V_NEWTIMER(q->rspq.holdoff_tmr));
2604 
2605 	return (0);
2606 
2607 err_unlock:
2608 	mtx_unlock_spin(&sc->sge.reg_lock);
2609 err:
2610 	TXQ_LOCK(q);
2611 	t3_free_qset(sc, q);
2612 
2613 	return (ret);
2614 }
2615 
2616 /*
2617  * Remove CPL_RX_PKT headers from the mbuf and reduce it to a regular mbuf with
2618  * ethernet data.  Hardware assistance with various checksums and any vlan tag
2619  * will also be taken into account here.
2620  */
2621 void
2622 t3_rx_eth(struct adapter *adap, struct mbuf *m, int ethpad)
2623 {
2624 	struct cpl_rx_pkt *cpl = (struct cpl_rx_pkt *)(mtod(m, uint8_t *) + ethpad);
2625 	struct port_info *pi = &adap->port[adap->rxpkt_map[cpl->iff]];
2626 	struct ifnet *ifp = pi->ifp;
2627 
2628 	if (cpl->vlan_valid) {
2629 		m->m_pkthdr.ether_vtag = ntohs(cpl->vlan);
2630 		m->m_flags |= M_VLANTAG;
2631 	}
2632 
2633 	m->m_pkthdr.rcvif = ifp;
2634 	/*
2635 	 * adjust after conversion to mbuf chain
2636 	 */
2637 	m->m_pkthdr.len -= (sizeof(*cpl) + ethpad);
2638 	m->m_len -= (sizeof(*cpl) + ethpad);
2639 	m->m_data += (sizeof(*cpl) + ethpad);
2640 
2641 	if (!cpl->fragment && cpl->csum_valid && cpl->csum == 0xffff) {
2642 		struct ether_header *eh = mtod(m, void *);
2643 		uint16_t eh_type;
2644 
2645 		if (eh->ether_type == htons(ETHERTYPE_VLAN)) {
2646 			struct ether_vlan_header *evh = mtod(m, void *);
2647 
2648 			eh_type = evh->evl_proto;
2649 		} else
2650 			eh_type = eh->ether_type;
2651 
2652 		if (ifp->if_capenable & IFCAP_RXCSUM &&
2653 		    eh_type == htons(ETHERTYPE_IP)) {
2654 			m->m_pkthdr.csum_flags = (CSUM_IP_CHECKED |
2655 			    CSUM_IP_VALID | CSUM_DATA_VALID | CSUM_PSEUDO_HDR);
2656 			m->m_pkthdr.csum_data = 0xffff;
2657 		} else if (ifp->if_capenable & IFCAP_RXCSUM_IPV6 &&
2658 		    eh_type == htons(ETHERTYPE_IPV6)) {
2659 			m->m_pkthdr.csum_flags = (CSUM_DATA_VALID_IPV6 |
2660 			    CSUM_PSEUDO_HDR);
2661 			m->m_pkthdr.csum_data = 0xffff;
2662 		}
2663 	}
2664 }
2665 
2666 /**
2667  *	get_packet - return the next ingress packet buffer from a free list
2668  *	@adap: the adapter that received the packet
2669  *	@drop_thres: # of remaining buffers before we start dropping packets
2670  *	@qs: the qset that the SGE free list holding the packet belongs to
2671  *      @mh: the mbuf header, contains a pointer to the head and tail of the mbuf chain
2672  *      @r: response descriptor
2673  *
2674  *	Get the next packet from a free list and complete setup of the
2675  *	sk_buff.  If the packet is small we make a copy and recycle the
2676  *	original buffer, otherwise we use the original buffer itself.  If a
2677  *	positive drop threshold is supplied packets are dropped and their
2678  *	buffers recycled if (a) the number of remaining buffers is under the
2679  *	threshold and the packet is too big to copy, or (b) the packet should
2680  *	be copied but there is no memory for the copy.
2681  */
2682 static int
2683 get_packet(adapter_t *adap, unsigned int drop_thres, struct sge_qset *qs,
2684     struct t3_mbuf_hdr *mh, struct rsp_desc *r)
2685 {
2686 
2687 	unsigned int len_cq =  ntohl(r->len_cq);
2688 	struct sge_fl *fl = (len_cq & F_RSPD_FLQ) ? &qs->fl[1] : &qs->fl[0];
2689 	int mask, cidx = fl->cidx;
2690 	struct rx_sw_desc *sd = &fl->sdesc[cidx];
2691 	uint32_t len = G_RSPD_LEN(len_cq);
2692 	uint32_t flags = M_EXT;
2693 	uint8_t sopeop = G_RSPD_SOP_EOP(ntohl(r->flags));
2694 	caddr_t cl;
2695 	struct mbuf *m;
2696 	int ret = 0;
2697 
2698 	mask = fl->size - 1;
2699 	prefetch(fl->sdesc[(cidx + 1) & mask].m);
2700 	prefetch(fl->sdesc[(cidx + 2) & mask].m);
2701 	prefetch(fl->sdesc[(cidx + 1) & mask].rxsd_cl);
2702 	prefetch(fl->sdesc[(cidx + 2) & mask].rxsd_cl);
2703 
2704 	fl->credits--;
2705 	bus_dmamap_sync(fl->entry_tag, sd->map, BUS_DMASYNC_POSTREAD);
2706 
2707 	if (recycle_enable && len <= SGE_RX_COPY_THRES &&
2708 	    sopeop == RSPQ_SOP_EOP) {
2709 		if ((m = m_gethdr(M_NOWAIT, MT_DATA)) == NULL)
2710 			goto skip_recycle;
2711 		cl = mtod(m, void *);
2712 		memcpy(cl, sd->rxsd_cl, len);
2713 		recycle_rx_buf(adap, fl, fl->cidx);
2714 		m->m_pkthdr.len = m->m_len = len;
2715 		m->m_flags = 0;
2716 		mh->mh_head = mh->mh_tail = m;
2717 		ret = 1;
2718 		goto done;
2719 	} else {
2720 	skip_recycle:
2721 		bus_dmamap_unload(fl->entry_tag, sd->map);
2722 		cl = sd->rxsd_cl;
2723 		m = sd->m;
2724 
2725 		if ((sopeop == RSPQ_SOP_EOP) ||
2726 		    (sopeop == RSPQ_SOP))
2727 			flags |= M_PKTHDR;
2728 		m_init(m, fl->zone, fl->buf_size, M_NOWAIT, MT_DATA, flags);
2729 		if (fl->zone == zone_pack) {
2730 			/*
2731 			 * restore clobbered data pointer
2732 			 */
2733 			m->m_data = m->m_ext.ext_buf;
2734 		} else {
2735 			m_cljset(m, cl, fl->type);
2736 		}
2737 		m->m_len = len;
2738 	}
2739 	switch(sopeop) {
2740 	case RSPQ_SOP_EOP:
2741 		ret = 1;
2742 		/* FALLTHROUGH */
2743 	case RSPQ_SOP:
2744 		mh->mh_head = mh->mh_tail = m;
2745 		m->m_pkthdr.len = len;
2746 		break;
2747 	case RSPQ_EOP:
2748 		ret = 1;
2749 		/* FALLTHROUGH */
2750 	case RSPQ_NSOP_NEOP:
2751 		if (mh->mh_tail == NULL) {
2752 			log(LOG_ERR, "discarding intermediate descriptor entry\n");
2753 			m_freem(m);
2754 			break;
2755 		}
2756 		mh->mh_tail->m_next = m;
2757 		mh->mh_tail = m;
2758 		mh->mh_head->m_pkthdr.len += len;
2759 		break;
2760 	}
2761 	if (cxgb_debug)
2762 		printf("len=%d pktlen=%d\n", m->m_len, m->m_pkthdr.len);
2763 done:
2764 	if (++fl->cidx == fl->size)
2765 		fl->cidx = 0;
2766 
2767 	return (ret);
2768 }
2769 
2770 /**
2771  *	handle_rsp_cntrl_info - handles control information in a response
2772  *	@qs: the queue set corresponding to the response
2773  *	@flags: the response control flags
2774  *
2775  *	Handles the control information of an SGE response, such as GTS
2776  *	indications and completion credits for the queue set's Tx queues.
2777  *	HW coalesces credits, we don't do any extra SW coalescing.
2778  */
2779 static __inline void
2780 handle_rsp_cntrl_info(struct sge_qset *qs, uint32_t flags)
2781 {
2782 	unsigned int credits;
2783 
2784 #if USE_GTS
2785 	if (flags & F_RSPD_TXQ0_GTS)
2786 		clear_bit(TXQ_RUNNING, &qs->txq[TXQ_ETH].flags);
2787 #endif
2788 	credits = G_RSPD_TXQ0_CR(flags);
2789 	if (credits)
2790 		qs->txq[TXQ_ETH].processed += credits;
2791 
2792 	credits = G_RSPD_TXQ2_CR(flags);
2793 	if (credits)
2794 		qs->txq[TXQ_CTRL].processed += credits;
2795 
2796 # if USE_GTS
2797 	if (flags & F_RSPD_TXQ1_GTS)
2798 		clear_bit(TXQ_RUNNING, &qs->txq[TXQ_OFLD].flags);
2799 # endif
2800 	credits = G_RSPD_TXQ1_CR(flags);
2801 	if (credits)
2802 		qs->txq[TXQ_OFLD].processed += credits;
2803 
2804 }
2805 
2806 static void
2807 check_ring_db(adapter_t *adap, struct sge_qset *qs,
2808     unsigned int sleeping)
2809 {
2810 	;
2811 }
2812 
2813 /**
2814  *	process_responses - process responses from an SGE response queue
2815  *	@adap: the adapter
2816  *	@qs: the queue set to which the response queue belongs
2817  *	@budget: how many responses can be processed in this round
2818  *
2819  *	Process responses from an SGE response queue up to the supplied budget.
2820  *	Responses include received packets as well as credits and other events
2821  *	for the queues that belong to the response queue's queue set.
2822  *	A negative budget is effectively unlimited.
2823  *
2824  *	Additionally choose the interrupt holdoff time for the next interrupt
2825  *	on this queue.  If the system is under memory shortage use a fairly
2826  *	long delay to help recovery.
2827  */
2828 static int
2829 process_responses(adapter_t *adap, struct sge_qset *qs, int budget)
2830 {
2831 	struct sge_rspq *rspq = &qs->rspq;
2832 	struct rsp_desc *r = &rspq->desc[rspq->cidx];
2833 	int budget_left = budget;
2834 	unsigned int sleeping = 0;
2835 #if defined(INET6) || defined(INET)
2836 	int lro_enabled = qs->lro.enabled;
2837 	int skip_lro;
2838 	struct lro_ctrl *lro_ctrl = &qs->lro.ctrl;
2839 #endif
2840 	struct t3_mbuf_hdr *mh = &rspq->rspq_mh;
2841 #ifdef DEBUG
2842 	static int last_holdoff = 0;
2843 	if (cxgb_debug && rspq->holdoff_tmr != last_holdoff) {
2844 		printf("next_holdoff=%d\n", rspq->holdoff_tmr);
2845 		last_holdoff = rspq->holdoff_tmr;
2846 	}
2847 #endif
2848 	rspq->next_holdoff = rspq->holdoff_tmr;
2849 
2850 	while (__predict_true(budget_left && is_new_response(r, rspq))) {
2851 		int eth, eop = 0, ethpad = 0;
2852 		uint32_t flags = ntohl(r->flags);
2853 		uint32_t rss_hash = be32toh(r->rss_hdr.rss_hash_val);
2854 		uint8_t opcode = r->rss_hdr.opcode;
2855 
2856 		eth = (opcode == CPL_RX_PKT);
2857 
2858 		if (__predict_false(flags & F_RSPD_ASYNC_NOTIF)) {
2859 			struct mbuf *m;
2860 
2861 			if (cxgb_debug)
2862 				printf("async notification\n");
2863 
2864 			if (mh->mh_head == NULL) {
2865 				mh->mh_head = m_gethdr(M_NOWAIT, MT_DATA);
2866 				m = mh->mh_head;
2867 			} else {
2868 				m = m_gethdr(M_NOWAIT, MT_DATA);
2869 			}
2870 			if (m == NULL)
2871 				goto no_mem;
2872 
2873                         memcpy(mtod(m, char *), r, AN_PKT_SIZE);
2874 			m->m_len = m->m_pkthdr.len = AN_PKT_SIZE;
2875                         *mtod(m, char *) = CPL_ASYNC_NOTIF;
2876 			opcode = CPL_ASYNC_NOTIF;
2877 			eop = 1;
2878                         rspq->async_notif++;
2879 			goto skip;
2880 		} else if  (flags & F_RSPD_IMM_DATA_VALID) {
2881 			struct mbuf *m = m_gethdr(M_NOWAIT, MT_DATA);
2882 
2883 			if (m == NULL) {
2884 		no_mem:
2885 				rspq->next_holdoff = NOMEM_INTR_DELAY;
2886 				budget_left--;
2887 				break;
2888 			}
2889 			if (mh->mh_head == NULL)
2890 				mh->mh_head = m;
2891                         else
2892 				mh->mh_tail->m_next = m;
2893 			mh->mh_tail = m;
2894 
2895 			get_imm_packet(adap, r, m);
2896 			mh->mh_head->m_pkthdr.len += m->m_len;
2897 			eop = 1;
2898 			rspq->imm_data++;
2899 		} else if (r->len_cq) {
2900 			int drop_thresh = eth ? SGE_RX_DROP_THRES : 0;
2901 
2902 			eop = get_packet(adap, drop_thresh, qs, mh, r);
2903 			if (eop) {
2904 				if (r->rss_hdr.hash_type && !adap->timestamp) {
2905 					M_HASHTYPE_SET(mh->mh_head, M_HASHTYPE_OPAQUE);
2906 					mh->mh_head->m_pkthdr.flowid = rss_hash;
2907 				}
2908 			}
2909 
2910 			ethpad = 2;
2911 		} else {
2912 			rspq->pure_rsps++;
2913 		}
2914 	skip:
2915 		if (flags & RSPD_CTRL_MASK) {
2916 			sleeping |= flags & RSPD_GTS_MASK;
2917 			handle_rsp_cntrl_info(qs, flags);
2918 		}
2919 
2920 		if (!eth && eop) {
2921 			rspq->offload_pkts++;
2922 #ifdef TCP_OFFLOAD
2923 			adap->cpl_handler[opcode](qs, r, mh->mh_head);
2924 #else
2925 			m_freem(mh->mh_head);
2926 #endif
2927 			mh->mh_head = NULL;
2928 		} else if (eth && eop) {
2929 			struct mbuf *m = mh->mh_head;
2930 
2931 			t3_rx_eth(adap, m, ethpad);
2932 
2933 			/*
2934 			 * The T304 sends incoming packets on any qset.  If LRO
2935 			 * is also enabled, we could end up sending packet up
2936 			 * lro_ctrl->ifp's input.  That is incorrect.
2937 			 *
2938 			 * The mbuf's rcvif was derived from the cpl header and
2939 			 * is accurate.  Skip LRO and just use that.
2940 			 */
2941 #if defined(INET6) || defined(INET)
2942 			skip_lro = __predict_false(qs->port->ifp != m->m_pkthdr.rcvif);
2943 
2944 			if (lro_enabled && lro_ctrl->lro_cnt && !skip_lro
2945 			    && (tcp_lro_rx(lro_ctrl, m, 0) == 0)
2946 			    ) {
2947 				/* successfully queue'd for LRO */
2948 			} else
2949 #endif
2950 			{
2951 				/*
2952 				 * LRO not enabled, packet unsuitable for LRO,
2953 				 * or unable to queue.  Pass it up right now in
2954 				 * either case.
2955 				 */
2956 				struct ifnet *ifp = m->m_pkthdr.rcvif;
2957 				(*ifp->if_input)(ifp, m);
2958 			}
2959 			mh->mh_head = NULL;
2960 
2961 		}
2962 
2963 		r++;
2964 		if (__predict_false(++rspq->cidx == rspq->size)) {
2965 			rspq->cidx = 0;
2966 			rspq->gen ^= 1;
2967 			r = rspq->desc;
2968 		}
2969 
2970 		if (++rspq->credits >= 64) {
2971 			refill_rspq(adap, rspq, rspq->credits);
2972 			rspq->credits = 0;
2973 		}
2974 		__refill_fl_lt(adap, &qs->fl[0], 32);
2975 		__refill_fl_lt(adap, &qs->fl[1], 32);
2976 		--budget_left;
2977 	}
2978 
2979 #if defined(INET6) || defined(INET)
2980 	/* Flush LRO */
2981 	while (!SLIST_EMPTY(&lro_ctrl->lro_active)) {
2982 		struct lro_entry *queued = SLIST_FIRST(&lro_ctrl->lro_active);
2983 		SLIST_REMOVE_HEAD(&lro_ctrl->lro_active, next);
2984 		tcp_lro_flush(lro_ctrl, queued);
2985 	}
2986 #endif
2987 
2988 	if (sleeping)
2989 		check_ring_db(adap, qs, sleeping);
2990 
2991 	mb();  /* commit Tx queue processed updates */
2992 	if (__predict_false(qs->txq_stopped > 1))
2993 		restart_tx(qs);
2994 
2995 	__refill_fl_lt(adap, &qs->fl[0], 512);
2996 	__refill_fl_lt(adap, &qs->fl[1], 512);
2997 	budget -= budget_left;
2998 	return (budget);
2999 }
3000 
3001 /*
3002  * A helper function that processes responses and issues GTS.
3003  */
3004 static __inline int
3005 process_responses_gts(adapter_t *adap, struct sge_rspq *rq)
3006 {
3007 	int work;
3008 	static int last_holdoff = 0;
3009 
3010 	work = process_responses(adap, rspq_to_qset(rq), -1);
3011 
3012 	if (cxgb_debug && (rq->next_holdoff != last_holdoff)) {
3013 		printf("next_holdoff=%d\n", rq->next_holdoff);
3014 		last_holdoff = rq->next_holdoff;
3015 	}
3016 	t3_write_reg(adap, A_SG_GTS, V_RSPQ(rq->cntxt_id) |
3017 	    V_NEWTIMER(rq->next_holdoff) | V_NEWINDEX(rq->cidx));
3018 
3019 	return (work);
3020 }
3021 
3022 
3023 /*
3024  * Interrupt handler for legacy INTx interrupts for T3B-based cards.
3025  * Handles data events from SGE response queues as well as error and other
3026  * async events as they all use the same interrupt pin.  We use one SGE
3027  * response queue per port in this mode and protect all response queues with
3028  * queue 0's lock.
3029  */
3030 void
3031 t3b_intr(void *data)
3032 {
3033 	uint32_t i, map;
3034 	adapter_t *adap = data;
3035 	struct sge_rspq *q0 = &adap->sge.qs[0].rspq;
3036 
3037 	t3_write_reg(adap, A_PL_CLI, 0);
3038 	map = t3_read_reg(adap, A_SG_DATA_INTR);
3039 
3040 	if (!map)
3041 		return;
3042 
3043 	if (__predict_false(map & F_ERRINTR)) {
3044 		t3_write_reg(adap, A_PL_INT_ENABLE0, 0);
3045 		(void) t3_read_reg(adap, A_PL_INT_ENABLE0);
3046 		taskqueue_enqueue(adap->tq, &adap->slow_intr_task);
3047 	}
3048 
3049 	mtx_lock(&q0->lock);
3050 	for_each_port(adap, i)
3051 	    if (map & (1 << i))
3052 			process_responses_gts(adap, &adap->sge.qs[i].rspq);
3053 	mtx_unlock(&q0->lock);
3054 }
3055 
3056 /*
3057  * The MSI interrupt handler.  This needs to handle data events from SGE
3058  * response queues as well as error and other async events as they all use
3059  * the same MSI vector.  We use one SGE response queue per port in this mode
3060  * and protect all response queues with queue 0's lock.
3061  */
3062 void
3063 t3_intr_msi(void *data)
3064 {
3065 	adapter_t *adap = data;
3066 	struct sge_rspq *q0 = &adap->sge.qs[0].rspq;
3067 	int i, new_packets = 0;
3068 
3069 	mtx_lock(&q0->lock);
3070 
3071 	for_each_port(adap, i)
3072 	    if (process_responses_gts(adap, &adap->sge.qs[i].rspq))
3073 		    new_packets = 1;
3074 	mtx_unlock(&q0->lock);
3075 	if (new_packets == 0) {
3076 		t3_write_reg(adap, A_PL_INT_ENABLE0, 0);
3077 		(void) t3_read_reg(adap, A_PL_INT_ENABLE0);
3078 		taskqueue_enqueue(adap->tq, &adap->slow_intr_task);
3079 	}
3080 }
3081 
3082 void
3083 t3_intr_msix(void *data)
3084 {
3085 	struct sge_qset *qs = data;
3086 	adapter_t *adap = qs->port->adapter;
3087 	struct sge_rspq *rspq = &qs->rspq;
3088 
3089 	if (process_responses_gts(adap, rspq) == 0)
3090 		rspq->unhandled_irqs++;
3091 }
3092 
3093 #define QDUMP_SBUF_SIZE		32 * 400
3094 static int
3095 t3_dump_rspq(SYSCTL_HANDLER_ARGS)
3096 {
3097 	struct sge_rspq *rspq;
3098 	struct sge_qset *qs;
3099 	int i, err, dump_end, idx;
3100 	struct sbuf *sb;
3101 	struct rsp_desc *rspd;
3102 	uint32_t data[4];
3103 
3104 	rspq = arg1;
3105 	qs = rspq_to_qset(rspq);
3106 	if (rspq->rspq_dump_count == 0)
3107 		return (0);
3108 	if (rspq->rspq_dump_count > RSPQ_Q_SIZE) {
3109 		log(LOG_WARNING,
3110 		    "dump count is too large %d\n", rspq->rspq_dump_count);
3111 		rspq->rspq_dump_count = 0;
3112 		return (EINVAL);
3113 	}
3114 	if (rspq->rspq_dump_start > (RSPQ_Q_SIZE-1)) {
3115 		log(LOG_WARNING,
3116 		    "dump start of %d is greater than queue size\n",
3117 		    rspq->rspq_dump_start);
3118 		rspq->rspq_dump_start = 0;
3119 		return (EINVAL);
3120 	}
3121 	err = t3_sge_read_rspq(qs->port->adapter, rspq->cntxt_id, data);
3122 	if (err)
3123 		return (err);
3124 	err = sysctl_wire_old_buffer(req, 0);
3125 	if (err)
3126 		return (err);
3127 	sb = sbuf_new_for_sysctl(NULL, NULL, QDUMP_SBUF_SIZE, req);
3128 
3129 	sbuf_printf(sb, " \n index=%u size=%u MSI-X/RspQ=%u intr enable=%u intr armed=%u\n",
3130 	    (data[0] & 0xffff), data[0] >> 16, ((data[2] >> 20) & 0x3f),
3131 	    ((data[2] >> 26) & 1), ((data[2] >> 27) & 1));
3132 	sbuf_printf(sb, " generation=%u CQ mode=%u FL threshold=%u\n",
3133 	    ((data[2] >> 28) & 1), ((data[2] >> 31) & 1), data[3]);
3134 
3135 	sbuf_printf(sb, " start=%d -> end=%d\n", rspq->rspq_dump_start,
3136 	    (rspq->rspq_dump_start + rspq->rspq_dump_count) & (RSPQ_Q_SIZE-1));
3137 
3138 	dump_end = rspq->rspq_dump_start + rspq->rspq_dump_count;
3139 	for (i = rspq->rspq_dump_start; i < dump_end; i++) {
3140 		idx = i & (RSPQ_Q_SIZE-1);
3141 
3142 		rspd = &rspq->desc[idx];
3143 		sbuf_printf(sb, "\tidx=%04d opcode=%02x cpu_idx=%x hash_type=%x cq_idx=%x\n",
3144 		    idx, rspd->rss_hdr.opcode, rspd->rss_hdr.cpu_idx,
3145 		    rspd->rss_hdr.hash_type, be16toh(rspd->rss_hdr.cq_idx));
3146 		sbuf_printf(sb, "\trss_hash_val=%x flags=%08x len_cq=%x intr_gen=%x\n",
3147 		    rspd->rss_hdr.rss_hash_val, be32toh(rspd->flags),
3148 		    be32toh(rspd->len_cq), rspd->intr_gen);
3149 	}
3150 
3151 	err = sbuf_finish(sb);
3152 	sbuf_delete(sb);
3153 	return (err);
3154 }
3155 
3156 static int
3157 t3_dump_txq_eth(SYSCTL_HANDLER_ARGS)
3158 {
3159 	struct sge_txq *txq;
3160 	struct sge_qset *qs;
3161 	int i, j, err, dump_end;
3162 	struct sbuf *sb;
3163 	struct tx_desc *txd;
3164 	uint32_t *WR, wr_hi, wr_lo, gen;
3165 	uint32_t data[4];
3166 
3167 	txq = arg1;
3168 	qs = txq_to_qset(txq, TXQ_ETH);
3169 	if (txq->txq_dump_count == 0) {
3170 		return (0);
3171 	}
3172 	if (txq->txq_dump_count > TX_ETH_Q_SIZE) {
3173 		log(LOG_WARNING,
3174 		    "dump count is too large %d\n", txq->txq_dump_count);
3175 		txq->txq_dump_count = 1;
3176 		return (EINVAL);
3177 	}
3178 	if (txq->txq_dump_start > (TX_ETH_Q_SIZE-1)) {
3179 		log(LOG_WARNING,
3180 		    "dump start of %d is greater than queue size\n",
3181 		    txq->txq_dump_start);
3182 		txq->txq_dump_start = 0;
3183 		return (EINVAL);
3184 	}
3185 	err = t3_sge_read_ecntxt(qs->port->adapter, qs->rspq.cntxt_id, data);
3186 	if (err)
3187 		return (err);
3188 	err = sysctl_wire_old_buffer(req, 0);
3189 	if (err)
3190 		return (err);
3191 	sb = sbuf_new_for_sysctl(NULL, NULL, QDUMP_SBUF_SIZE, req);
3192 
3193 	sbuf_printf(sb, " \n credits=%u GTS=%u index=%u size=%u rspq#=%u cmdq#=%u\n",
3194 	    (data[0] & 0x7fff), ((data[0] >> 15) & 1), (data[0] >> 16),
3195 	    (data[1] & 0xffff), ((data[3] >> 4) & 7), ((data[3] >> 7) & 1));
3196 	sbuf_printf(sb, " TUN=%u TOE=%u generation%u uP token=%u valid=%u\n",
3197 	    ((data[3] >> 8) & 1), ((data[3] >> 9) & 1), ((data[3] >> 10) & 1),
3198 	    ((data[3] >> 11) & 0xfffff), ((data[3] >> 31) & 1));
3199 	sbuf_printf(sb, " qid=%d start=%d -> end=%d\n", qs->idx,
3200 	    txq->txq_dump_start,
3201 	    (txq->txq_dump_start + txq->txq_dump_count) & (TX_ETH_Q_SIZE-1));
3202 
3203 	dump_end = txq->txq_dump_start + txq->txq_dump_count;
3204 	for (i = txq->txq_dump_start; i < dump_end; i++) {
3205 		txd = &txq->desc[i & (TX_ETH_Q_SIZE-1)];
3206 		WR = (uint32_t *)txd->flit;
3207 		wr_hi = ntohl(WR[0]);
3208 		wr_lo = ntohl(WR[1]);
3209 		gen = G_WR_GEN(wr_lo);
3210 
3211 		sbuf_printf(sb," wr_hi %08x wr_lo %08x gen %d\n",
3212 		    wr_hi, wr_lo, gen);
3213 		for (j = 2; j < 30; j += 4)
3214 			sbuf_printf(sb, "\t%08x %08x %08x %08x \n",
3215 			    WR[j], WR[j + 1], WR[j + 2], WR[j + 3]);
3216 
3217 	}
3218 	err = sbuf_finish(sb);
3219 	sbuf_delete(sb);
3220 	return (err);
3221 }
3222 
3223 static int
3224 t3_dump_txq_ctrl(SYSCTL_HANDLER_ARGS)
3225 {
3226 	struct sge_txq *txq;
3227 	struct sge_qset *qs;
3228 	int i, j, err, dump_end;
3229 	struct sbuf *sb;
3230 	struct tx_desc *txd;
3231 	uint32_t *WR, wr_hi, wr_lo, gen;
3232 
3233 	txq = arg1;
3234 	qs = txq_to_qset(txq, TXQ_CTRL);
3235 	if (txq->txq_dump_count == 0) {
3236 		return (0);
3237 	}
3238 	if (txq->txq_dump_count > 256) {
3239 		log(LOG_WARNING,
3240 		    "dump count is too large %d\n", txq->txq_dump_count);
3241 		txq->txq_dump_count = 1;
3242 		return (EINVAL);
3243 	}
3244 	if (txq->txq_dump_start > 255) {
3245 		log(LOG_WARNING,
3246 		    "dump start of %d is greater than queue size\n",
3247 		    txq->txq_dump_start);
3248 		txq->txq_dump_start = 0;
3249 		return (EINVAL);
3250 	}
3251 
3252 	err = sysctl_wire_old_buffer(req, 0);
3253 	if (err != 0)
3254 		return (err);
3255 	sb = sbuf_new_for_sysctl(NULL, NULL, QDUMP_SBUF_SIZE, req);
3256 	sbuf_printf(sb, " qid=%d start=%d -> end=%d\n", qs->idx,
3257 	    txq->txq_dump_start,
3258 	    (txq->txq_dump_start + txq->txq_dump_count) & 255);
3259 
3260 	dump_end = txq->txq_dump_start + txq->txq_dump_count;
3261 	for (i = txq->txq_dump_start; i < dump_end; i++) {
3262 		txd = &txq->desc[i & (255)];
3263 		WR = (uint32_t *)txd->flit;
3264 		wr_hi = ntohl(WR[0]);
3265 		wr_lo = ntohl(WR[1]);
3266 		gen = G_WR_GEN(wr_lo);
3267 
3268 		sbuf_printf(sb," wr_hi %08x wr_lo %08x gen %d\n",
3269 		    wr_hi, wr_lo, gen);
3270 		for (j = 2; j < 30; j += 4)
3271 			sbuf_printf(sb, "\t%08x %08x %08x %08x \n",
3272 			    WR[j], WR[j + 1], WR[j + 2], WR[j + 3]);
3273 
3274 	}
3275 	err = sbuf_finish(sb);
3276 	sbuf_delete(sb);
3277 	return (err);
3278 }
3279 
3280 static int
3281 t3_set_coalesce_usecs(SYSCTL_HANDLER_ARGS)
3282 {
3283 	adapter_t *sc = arg1;
3284 	struct qset_params *qsp = &sc->params.sge.qset[0];
3285 	int coalesce_usecs;
3286 	struct sge_qset *qs;
3287 	int i, j, err, nqsets = 0;
3288 	struct mtx *lock;
3289 
3290 	if ((sc->flags & FULL_INIT_DONE) == 0)
3291 		return (ENXIO);
3292 
3293 	coalesce_usecs = qsp->coalesce_usecs;
3294         err = sysctl_handle_int(oidp, &coalesce_usecs, arg2, req);
3295 
3296 	if (err != 0) {
3297 		return (err);
3298 	}
3299 	if (coalesce_usecs == qsp->coalesce_usecs)
3300 		return (0);
3301 
3302 	for (i = 0; i < sc->params.nports; i++)
3303 		for (j = 0; j < sc->port[i].nqsets; j++)
3304 			nqsets++;
3305 
3306 	coalesce_usecs = max(1, coalesce_usecs);
3307 
3308 	for (i = 0; i < nqsets; i++) {
3309 		qs = &sc->sge.qs[i];
3310 		qsp = &sc->params.sge.qset[i];
3311 		qsp->coalesce_usecs = coalesce_usecs;
3312 
3313 		lock = (sc->flags & USING_MSIX) ? &qs->rspq.lock :
3314 			    &sc->sge.qs[0].rspq.lock;
3315 
3316 		mtx_lock(lock);
3317 		t3_update_qset_coalesce(qs, qsp);
3318 		t3_write_reg(sc, A_SG_GTS, V_RSPQ(qs->rspq.cntxt_id) |
3319 		    V_NEWTIMER(qs->rspq.holdoff_tmr));
3320 		mtx_unlock(lock);
3321 	}
3322 
3323 	return (0);
3324 }
3325 
3326 static int
3327 t3_pkt_timestamp(SYSCTL_HANDLER_ARGS)
3328 {
3329 	adapter_t *sc = arg1;
3330 	int rc, timestamp;
3331 
3332 	if ((sc->flags & FULL_INIT_DONE) == 0)
3333 		return (ENXIO);
3334 
3335 	timestamp = sc->timestamp;
3336 	rc = sysctl_handle_int(oidp, &timestamp, arg2, req);
3337 
3338 	if (rc != 0)
3339 		return (rc);
3340 
3341 	if (timestamp != sc->timestamp) {
3342 		t3_set_reg_field(sc, A_TP_PC_CONFIG2, F_ENABLERXPKTTMSTPRSS,
3343 		    timestamp ? F_ENABLERXPKTTMSTPRSS : 0);
3344 		sc->timestamp = timestamp;
3345 	}
3346 
3347 	return (0);
3348 }
3349 
3350 void
3351 t3_add_attach_sysctls(adapter_t *sc)
3352 {
3353 	struct sysctl_ctx_list *ctx;
3354 	struct sysctl_oid_list *children;
3355 
3356 	ctx = device_get_sysctl_ctx(sc->dev);
3357 	children = SYSCTL_CHILDREN(device_get_sysctl_tree(sc->dev));
3358 
3359 	/* random information */
3360 	SYSCTL_ADD_STRING(ctx, children, OID_AUTO,
3361 	    "firmware_version",
3362 	    CTLFLAG_RD, sc->fw_version,
3363 	    0, "firmware version");
3364 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
3365 	    "hw_revision",
3366 	    CTLFLAG_RD, &sc->params.rev,
3367 	    0, "chip model");
3368 	SYSCTL_ADD_STRING(ctx, children, OID_AUTO,
3369 	    "port_types",
3370 	    CTLFLAG_RD, sc->port_types,
3371 	    0, "type of ports");
3372 	SYSCTL_ADD_INT(ctx, children, OID_AUTO,
3373 	    "enable_debug",
3374 	    CTLFLAG_RW, &cxgb_debug,
3375 	    0, "enable verbose debugging output");
3376 	SYSCTL_ADD_UQUAD(ctx, children, OID_AUTO, "tunq_coalesce",
3377 	    CTLFLAG_RD, &sc->tunq_coalesce,
3378 	    "#tunneled packets freed");
3379 	SYSCTL_ADD_INT(ctx, children, OID_AUTO,
3380 	    "txq_overrun",
3381 	    CTLFLAG_RD, &txq_fills,
3382 	    0, "#times txq overrun");
3383 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
3384 	    "core_clock",
3385 	    CTLFLAG_RD, &sc->params.vpd.cclk,
3386 	    0, "core clock frequency (in KHz)");
3387 }
3388 
3389 
3390 static const char *rspq_name = "rspq";
3391 static const char *txq_names[] =
3392 {
3393 	"txq_eth",
3394 	"txq_ofld",
3395 	"txq_ctrl"
3396 };
3397 
3398 static int
3399 sysctl_handle_macstat(SYSCTL_HANDLER_ARGS)
3400 {
3401 	struct port_info *p = arg1;
3402 	uint64_t *parg;
3403 
3404 	if (!p)
3405 		return (EINVAL);
3406 
3407 	cxgb_refresh_stats(p);
3408 	parg = (uint64_t *) ((uint8_t *)&p->mac.stats + arg2);
3409 
3410 	return (sysctl_handle_64(oidp, parg, 0, req));
3411 }
3412 
3413 void
3414 t3_add_configured_sysctls(adapter_t *sc)
3415 {
3416 	struct sysctl_ctx_list *ctx;
3417 	struct sysctl_oid_list *children;
3418 	int i, j;
3419 
3420 	ctx = device_get_sysctl_ctx(sc->dev);
3421 	children = SYSCTL_CHILDREN(device_get_sysctl_tree(sc->dev));
3422 
3423 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO,
3424 	    "intr_coal",
3425 	    CTLTYPE_INT|CTLFLAG_RW, sc,
3426 	    0, t3_set_coalesce_usecs,
3427 	    "I", "interrupt coalescing timer (us)");
3428 
3429 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO,
3430 	    "pkt_timestamp",
3431 	    CTLTYPE_INT | CTLFLAG_RW, sc,
3432 	    0, t3_pkt_timestamp,
3433 	    "I", "provide packet timestamp instead of connection hash");
3434 
3435 	for (i = 0; i < sc->params.nports; i++) {
3436 		struct port_info *pi = &sc->port[i];
3437 		struct sysctl_oid *poid;
3438 		struct sysctl_oid_list *poidlist;
3439 		struct mac_stats *mstats = &pi->mac.stats;
3440 
3441 		snprintf(pi->namebuf, PORT_NAME_LEN, "port%d", i);
3442 		poid = SYSCTL_ADD_NODE(ctx, children, OID_AUTO,
3443 		    pi->namebuf, CTLFLAG_RD, NULL, "port statistics");
3444 		poidlist = SYSCTL_CHILDREN(poid);
3445 		SYSCTL_ADD_UINT(ctx, poidlist, OID_AUTO,
3446 		    "nqsets", CTLFLAG_RD, &pi->nqsets,
3447 		    0, "#queue sets");
3448 
3449 		for (j = 0; j < pi->nqsets; j++) {
3450 			struct sge_qset *qs = &sc->sge.qs[pi->first_qset + j];
3451 			struct sysctl_oid *qspoid, *rspqpoid, *txqpoid,
3452 					  *ctrlqpoid, *lropoid;
3453 			struct sysctl_oid_list *qspoidlist, *rspqpoidlist,
3454 					       *txqpoidlist, *ctrlqpoidlist,
3455 					       *lropoidlist;
3456 			struct sge_txq *txq = &qs->txq[TXQ_ETH];
3457 
3458 			snprintf(qs->namebuf, QS_NAME_LEN, "qs%d", j);
3459 
3460 			qspoid = SYSCTL_ADD_NODE(ctx, poidlist, OID_AUTO,
3461 			    qs->namebuf, CTLFLAG_RD, NULL, "qset statistics");
3462 			qspoidlist = SYSCTL_CHILDREN(qspoid);
3463 
3464 			SYSCTL_ADD_UINT(ctx, qspoidlist, OID_AUTO, "fl0_empty",
3465 					CTLFLAG_RD, &qs->fl[0].empty, 0,
3466 					"freelist #0 empty");
3467 			SYSCTL_ADD_UINT(ctx, qspoidlist, OID_AUTO, "fl1_empty",
3468 					CTLFLAG_RD, &qs->fl[1].empty, 0,
3469 					"freelist #1 empty");
3470 
3471 			rspqpoid = SYSCTL_ADD_NODE(ctx, qspoidlist, OID_AUTO,
3472 			    rspq_name, CTLFLAG_RD, NULL, "rspq statistics");
3473 			rspqpoidlist = SYSCTL_CHILDREN(rspqpoid);
3474 
3475 			txqpoid = SYSCTL_ADD_NODE(ctx, qspoidlist, OID_AUTO,
3476 			    txq_names[0], CTLFLAG_RD, NULL, "txq statistics");
3477 			txqpoidlist = SYSCTL_CHILDREN(txqpoid);
3478 
3479 			ctrlqpoid = SYSCTL_ADD_NODE(ctx, qspoidlist, OID_AUTO,
3480 			    txq_names[2], CTLFLAG_RD, NULL, "ctrlq statistics");
3481 			ctrlqpoidlist = SYSCTL_CHILDREN(ctrlqpoid);
3482 
3483 			lropoid = SYSCTL_ADD_NODE(ctx, qspoidlist, OID_AUTO,
3484 			    "lro_stats", CTLFLAG_RD, NULL, "LRO statistics");
3485 			lropoidlist = SYSCTL_CHILDREN(lropoid);
3486 
3487 			SYSCTL_ADD_UINT(ctx, rspqpoidlist, OID_AUTO, "size",
3488 			    CTLFLAG_RD, &qs->rspq.size,
3489 			    0, "#entries in response queue");
3490 			SYSCTL_ADD_UINT(ctx, rspqpoidlist, OID_AUTO, "cidx",
3491 			    CTLFLAG_RD, &qs->rspq.cidx,
3492 			    0, "consumer index");
3493 			SYSCTL_ADD_UINT(ctx, rspqpoidlist, OID_AUTO, "credits",
3494 			    CTLFLAG_RD, &qs->rspq.credits,
3495 			    0, "#credits");
3496 			SYSCTL_ADD_UINT(ctx, rspqpoidlist, OID_AUTO, "starved",
3497 			    CTLFLAG_RD, &qs->rspq.starved,
3498 			    0, "#times starved");
3499 			SYSCTL_ADD_UAUTO(ctx, rspqpoidlist, OID_AUTO, "phys_addr",
3500 			    CTLFLAG_RD, &qs->rspq.phys_addr,
3501 			    "physical_address_of the queue");
3502 			SYSCTL_ADD_UINT(ctx, rspqpoidlist, OID_AUTO, "dump_start",
3503 			    CTLFLAG_RW, &qs->rspq.rspq_dump_start,
3504 			    0, "start rspq dump entry");
3505 			SYSCTL_ADD_UINT(ctx, rspqpoidlist, OID_AUTO, "dump_count",
3506 			    CTLFLAG_RW, &qs->rspq.rspq_dump_count,
3507 			    0, "#rspq entries to dump");
3508 			SYSCTL_ADD_PROC(ctx, rspqpoidlist, OID_AUTO, "qdump",
3509 			    CTLTYPE_STRING | CTLFLAG_RD, &qs->rspq,
3510 			    0, t3_dump_rspq, "A", "dump of the response queue");
3511 
3512 			SYSCTL_ADD_UQUAD(ctx, txqpoidlist, OID_AUTO, "dropped",
3513 			    CTLFLAG_RD, &qs->txq[TXQ_ETH].txq_mr->br_drops,
3514 			    "#tunneled packets dropped");
3515 			SYSCTL_ADD_UINT(ctx, txqpoidlist, OID_AUTO, "sendqlen",
3516 			    CTLFLAG_RD, &qs->txq[TXQ_ETH].sendq.mq_len,
3517 			    0, "#tunneled packets waiting to be sent");
3518 #if 0
3519 			SYSCTL_ADD_UINT(ctx, txqpoidlist, OID_AUTO, "queue_pidx",
3520 			    CTLFLAG_RD, (uint32_t *)(uintptr_t)&qs->txq[TXQ_ETH].txq_mr.br_prod,
3521 			    0, "#tunneled packets queue producer index");
3522 			SYSCTL_ADD_UINT(ctx, txqpoidlist, OID_AUTO, "queue_cidx",
3523 			    CTLFLAG_RD, (uint32_t *)(uintptr_t)&qs->txq[TXQ_ETH].txq_mr.br_cons,
3524 			    0, "#tunneled packets queue consumer index");
3525 #endif
3526 			SYSCTL_ADD_UINT(ctx, txqpoidlist, OID_AUTO, "processed",
3527 			    CTLFLAG_RD, &qs->txq[TXQ_ETH].processed,
3528 			    0, "#tunneled packets processed by the card");
3529 			SYSCTL_ADD_UINT(ctx, txqpoidlist, OID_AUTO, "cleaned",
3530 			    CTLFLAG_RD, &txq->cleaned,
3531 			    0, "#tunneled packets cleaned");
3532 			SYSCTL_ADD_UINT(ctx, txqpoidlist, OID_AUTO, "in_use",
3533 			    CTLFLAG_RD, &txq->in_use,
3534 			    0, "#tunneled packet slots in use");
3535 			SYSCTL_ADD_UQUAD(ctx, txqpoidlist, OID_AUTO, "frees",
3536 			    CTLFLAG_RD, &txq->txq_frees,
3537 			    "#tunneled packets freed");
3538 			SYSCTL_ADD_UINT(ctx, txqpoidlist, OID_AUTO, "skipped",
3539 			    CTLFLAG_RD, &txq->txq_skipped,
3540 			    0, "#tunneled packet descriptors skipped");
3541 			SYSCTL_ADD_UQUAD(ctx, txqpoidlist, OID_AUTO, "coalesced",
3542 			    CTLFLAG_RD, &txq->txq_coalesced,
3543 			    "#tunneled packets coalesced");
3544 			SYSCTL_ADD_UINT(ctx, txqpoidlist, OID_AUTO, "enqueued",
3545 			    CTLFLAG_RD, &txq->txq_enqueued,
3546 			    0, "#tunneled packets enqueued to hardware");
3547 			SYSCTL_ADD_UINT(ctx, txqpoidlist, OID_AUTO, "stopped_flags",
3548 			    CTLFLAG_RD, &qs->txq_stopped,
3549 			    0, "tx queues stopped");
3550 			SYSCTL_ADD_UAUTO(ctx, txqpoidlist, OID_AUTO, "phys_addr",
3551 			    CTLFLAG_RD, &txq->phys_addr,
3552 			    "physical_address_of the queue");
3553 			SYSCTL_ADD_UINT(ctx, txqpoidlist, OID_AUTO, "qgen",
3554 			    CTLFLAG_RW, &qs->txq[TXQ_ETH].gen,
3555 			    0, "txq generation");
3556 			SYSCTL_ADD_UINT(ctx, txqpoidlist, OID_AUTO, "hw_cidx",
3557 			    CTLFLAG_RD, &txq->cidx,
3558 			    0, "hardware queue cidx");
3559 			SYSCTL_ADD_UINT(ctx, txqpoidlist, OID_AUTO, "hw_pidx",
3560 			    CTLFLAG_RD, &txq->pidx,
3561 			    0, "hardware queue pidx");
3562 			SYSCTL_ADD_UINT(ctx, txqpoidlist, OID_AUTO, "dump_start",
3563 			    CTLFLAG_RW, &qs->txq[TXQ_ETH].txq_dump_start,
3564 			    0, "txq start idx for dump");
3565 			SYSCTL_ADD_UINT(ctx, txqpoidlist, OID_AUTO, "dump_count",
3566 			    CTLFLAG_RW, &qs->txq[TXQ_ETH].txq_dump_count,
3567 			    0, "txq #entries to dump");
3568 			SYSCTL_ADD_PROC(ctx, txqpoidlist, OID_AUTO, "qdump",
3569 			    CTLTYPE_STRING | CTLFLAG_RD, &qs->txq[TXQ_ETH],
3570 			    0, t3_dump_txq_eth, "A", "dump of the transmit queue");
3571 
3572 			SYSCTL_ADD_UINT(ctx, ctrlqpoidlist, OID_AUTO, "dump_start",
3573 			    CTLFLAG_RW, &qs->txq[TXQ_CTRL].txq_dump_start,
3574 			    0, "ctrlq start idx for dump");
3575 			SYSCTL_ADD_UINT(ctx, ctrlqpoidlist, OID_AUTO, "dump_count",
3576 			    CTLFLAG_RW, &qs->txq[TXQ_CTRL].txq_dump_count,
3577 			    0, "ctrl #entries to dump");
3578 			SYSCTL_ADD_PROC(ctx, ctrlqpoidlist, OID_AUTO, "qdump",
3579 			    CTLTYPE_STRING | CTLFLAG_RD, &qs->txq[TXQ_CTRL],
3580 			    0, t3_dump_txq_ctrl, "A", "dump of the transmit queue");
3581 
3582 			SYSCTL_ADD_INT(ctx, lropoidlist, OID_AUTO, "lro_queued",
3583 			    CTLFLAG_RD, &qs->lro.ctrl.lro_queued, 0, NULL);
3584 			SYSCTL_ADD_INT(ctx, lropoidlist, OID_AUTO, "lro_flushed",
3585 			    CTLFLAG_RD, &qs->lro.ctrl.lro_flushed, 0, NULL);
3586 			SYSCTL_ADD_INT(ctx, lropoidlist, OID_AUTO, "lro_bad_csum",
3587 			    CTLFLAG_RD, &qs->lro.ctrl.lro_bad_csum, 0, NULL);
3588 			SYSCTL_ADD_INT(ctx, lropoidlist, OID_AUTO, "lro_cnt",
3589 			    CTLFLAG_RD, &qs->lro.ctrl.lro_cnt, 0, NULL);
3590 		}
3591 
3592 		/* Now add a node for mac stats. */
3593 		poid = SYSCTL_ADD_NODE(ctx, poidlist, OID_AUTO, "mac_stats",
3594 		    CTLFLAG_RD, NULL, "MAC statistics");
3595 		poidlist = SYSCTL_CHILDREN(poid);
3596 
3597 		/*
3598 		 * We (ab)use the length argument (arg2) to pass on the offset
3599 		 * of the data that we are interested in.  This is only required
3600 		 * for the quad counters that are updated from the hardware (we
3601 		 * make sure that we return the latest value).
3602 		 * sysctl_handle_macstat first updates *all* the counters from
3603 		 * the hardware, and then returns the latest value of the
3604 		 * requested counter.  Best would be to update only the
3605 		 * requested counter from hardware, but t3_mac_update_stats()
3606 		 * hides all the register details and we don't want to dive into
3607 		 * all that here.
3608 		 */
3609 #define CXGB_SYSCTL_ADD_QUAD(a)	SYSCTL_ADD_OID(ctx, poidlist, OID_AUTO, #a, \
3610     (CTLTYPE_U64 | CTLFLAG_RD), pi, offsetof(struct mac_stats, a), \
3611     sysctl_handle_macstat, "QU", 0)
3612 		CXGB_SYSCTL_ADD_QUAD(tx_octets);
3613 		CXGB_SYSCTL_ADD_QUAD(tx_octets_bad);
3614 		CXGB_SYSCTL_ADD_QUAD(tx_frames);
3615 		CXGB_SYSCTL_ADD_QUAD(tx_mcast_frames);
3616 		CXGB_SYSCTL_ADD_QUAD(tx_bcast_frames);
3617 		CXGB_SYSCTL_ADD_QUAD(tx_pause);
3618 		CXGB_SYSCTL_ADD_QUAD(tx_deferred);
3619 		CXGB_SYSCTL_ADD_QUAD(tx_late_collisions);
3620 		CXGB_SYSCTL_ADD_QUAD(tx_total_collisions);
3621 		CXGB_SYSCTL_ADD_QUAD(tx_excess_collisions);
3622 		CXGB_SYSCTL_ADD_QUAD(tx_underrun);
3623 		CXGB_SYSCTL_ADD_QUAD(tx_len_errs);
3624 		CXGB_SYSCTL_ADD_QUAD(tx_mac_internal_errs);
3625 		CXGB_SYSCTL_ADD_QUAD(tx_excess_deferral);
3626 		CXGB_SYSCTL_ADD_QUAD(tx_fcs_errs);
3627 		CXGB_SYSCTL_ADD_QUAD(tx_frames_64);
3628 		CXGB_SYSCTL_ADD_QUAD(tx_frames_65_127);
3629 		CXGB_SYSCTL_ADD_QUAD(tx_frames_128_255);
3630 		CXGB_SYSCTL_ADD_QUAD(tx_frames_256_511);
3631 		CXGB_SYSCTL_ADD_QUAD(tx_frames_512_1023);
3632 		CXGB_SYSCTL_ADD_QUAD(tx_frames_1024_1518);
3633 		CXGB_SYSCTL_ADD_QUAD(tx_frames_1519_max);
3634 		CXGB_SYSCTL_ADD_QUAD(rx_octets);
3635 		CXGB_SYSCTL_ADD_QUAD(rx_octets_bad);
3636 		CXGB_SYSCTL_ADD_QUAD(rx_frames);
3637 		CXGB_SYSCTL_ADD_QUAD(rx_mcast_frames);
3638 		CXGB_SYSCTL_ADD_QUAD(rx_bcast_frames);
3639 		CXGB_SYSCTL_ADD_QUAD(rx_pause);
3640 		CXGB_SYSCTL_ADD_QUAD(rx_fcs_errs);
3641 		CXGB_SYSCTL_ADD_QUAD(rx_align_errs);
3642 		CXGB_SYSCTL_ADD_QUAD(rx_symbol_errs);
3643 		CXGB_SYSCTL_ADD_QUAD(rx_data_errs);
3644 		CXGB_SYSCTL_ADD_QUAD(rx_sequence_errs);
3645 		CXGB_SYSCTL_ADD_QUAD(rx_runt);
3646 		CXGB_SYSCTL_ADD_QUAD(rx_jabber);
3647 		CXGB_SYSCTL_ADD_QUAD(rx_short);
3648 		CXGB_SYSCTL_ADD_QUAD(rx_too_long);
3649 		CXGB_SYSCTL_ADD_QUAD(rx_mac_internal_errs);
3650 		CXGB_SYSCTL_ADD_QUAD(rx_cong_drops);
3651 		CXGB_SYSCTL_ADD_QUAD(rx_frames_64);
3652 		CXGB_SYSCTL_ADD_QUAD(rx_frames_65_127);
3653 		CXGB_SYSCTL_ADD_QUAD(rx_frames_128_255);
3654 		CXGB_SYSCTL_ADD_QUAD(rx_frames_256_511);
3655 		CXGB_SYSCTL_ADD_QUAD(rx_frames_512_1023);
3656 		CXGB_SYSCTL_ADD_QUAD(rx_frames_1024_1518);
3657 		CXGB_SYSCTL_ADD_QUAD(rx_frames_1519_max);
3658 #undef CXGB_SYSCTL_ADD_QUAD
3659 
3660 #define CXGB_SYSCTL_ADD_ULONG(a) SYSCTL_ADD_ULONG(ctx, poidlist, OID_AUTO, #a, \
3661     CTLFLAG_RD, &mstats->a, 0)
3662 		CXGB_SYSCTL_ADD_ULONG(tx_fifo_parity_err);
3663 		CXGB_SYSCTL_ADD_ULONG(rx_fifo_parity_err);
3664 		CXGB_SYSCTL_ADD_ULONG(tx_fifo_urun);
3665 		CXGB_SYSCTL_ADD_ULONG(rx_fifo_ovfl);
3666 		CXGB_SYSCTL_ADD_ULONG(serdes_signal_loss);
3667 		CXGB_SYSCTL_ADD_ULONG(xaui_pcs_ctc_err);
3668 		CXGB_SYSCTL_ADD_ULONG(xaui_pcs_align_change);
3669 		CXGB_SYSCTL_ADD_ULONG(num_toggled);
3670 		CXGB_SYSCTL_ADD_ULONG(num_resets);
3671 		CXGB_SYSCTL_ADD_ULONG(link_faults);
3672 #undef CXGB_SYSCTL_ADD_ULONG
3673 	}
3674 }
3675 
3676 /**
3677  *	t3_get_desc - dump an SGE descriptor for debugging purposes
3678  *	@qs: the queue set
3679  *	@qnum: identifies the specific queue (0..2: Tx, 3:response, 4..5: Rx)
3680  *	@idx: the descriptor index in the queue
3681  *	@data: where to dump the descriptor contents
3682  *
3683  *	Dumps the contents of a HW descriptor of an SGE queue.  Returns the
3684  *	size of the descriptor.
3685  */
3686 int
3687 t3_get_desc(const struct sge_qset *qs, unsigned int qnum, unsigned int idx,
3688 		unsigned char *data)
3689 {
3690 	if (qnum >= 6)
3691 		return (EINVAL);
3692 
3693 	if (qnum < 3) {
3694 		if (!qs->txq[qnum].desc || idx >= qs->txq[qnum].size)
3695 			return -EINVAL;
3696 		memcpy(data, &qs->txq[qnum].desc[idx], sizeof(struct tx_desc));
3697 		return sizeof(struct tx_desc);
3698 	}
3699 
3700 	if (qnum == 3) {
3701 		if (!qs->rspq.desc || idx >= qs->rspq.size)
3702 			return (EINVAL);
3703 		memcpy(data, &qs->rspq.desc[idx], sizeof(struct rsp_desc));
3704 		return sizeof(struct rsp_desc);
3705 	}
3706 
3707 	qnum -= 4;
3708 	if (!qs->fl[qnum].desc || idx >= qs->fl[qnum].size)
3709 		return (EINVAL);
3710 	memcpy(data, &qs->fl[qnum].desc[idx], sizeof(struct rx_desc));
3711 	return sizeof(struct rx_desc);
3712 }
3713