xref: /freebsd/sys/dev/cxgb/cxgb_sge.c (revision d22c735e033e47d58878a9c00aa09e90e6e83f06)
1 /**************************************************************************
2 
3 Copyright (c) 2007-2009, Chelsio Inc.
4 All rights reserved.
5 
6 Redistribution and use in source and binary forms, with or without
7 modification, are permitted provided that the following conditions are met:
8 
9  1. Redistributions of source code must retain the above copyright notice,
10     this list of conditions and the following disclaimer.
11 
12  2. Neither the name of the Chelsio Corporation nor the names of its
13     contributors may be used to endorse or promote products derived from
14     this software without specific prior written permission.
15 
16 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
17 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
20 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
21 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
22 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
23 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
24 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
25 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
26 POSSIBILITY OF SUCH DAMAGE.
27 
28 ***************************************************************************/
29 
30 #include <sys/cdefs.h>
31 __FBSDID("$FreeBSD$");
32 
33 #include "opt_inet6.h"
34 #include "opt_inet.h"
35 
36 #include <sys/param.h>
37 #include <sys/systm.h>
38 #include <sys/kernel.h>
39 #include <sys/module.h>
40 #include <sys/bus.h>
41 #include <sys/conf.h>
42 #include <machine/bus.h>
43 #include <machine/resource.h>
44 #include <sys/bus_dma.h>
45 #include <sys/rman.h>
46 #include <sys/queue.h>
47 #include <sys/sysctl.h>
48 #include <sys/taskqueue.h>
49 
50 #include <sys/proc.h>
51 #include <sys/sbuf.h>
52 #include <sys/sched.h>
53 #include <sys/smp.h>
54 #include <sys/systm.h>
55 #include <sys/syslog.h>
56 #include <sys/socket.h>
57 #include <sys/sglist.h>
58 
59 #include <net/if.h>
60 #include <net/if_var.h>
61 #include <net/bpf.h>
62 #include <net/ethernet.h>
63 #include <net/if_vlan_var.h>
64 
65 #include <netinet/in_systm.h>
66 #include <netinet/in.h>
67 #include <netinet/ip.h>
68 #include <netinet/ip6.h>
69 #include <netinet/tcp.h>
70 
71 #include <dev/pci/pcireg.h>
72 #include <dev/pci/pcivar.h>
73 
74 #include <vm/vm.h>
75 #include <vm/pmap.h>
76 
77 #include <cxgb_include.h>
78 #include <sys/mvec.h>
79 
80 int	txq_fills = 0;
81 int	multiq_tx_enable = 1;
82 
83 #ifdef TCP_OFFLOAD
84 CTASSERT(NUM_CPL_HANDLERS >= NUM_CPL_CMDS);
85 #endif
86 
87 extern struct sysctl_oid_list sysctl__hw_cxgb_children;
88 int cxgb_txq_buf_ring_size = TX_ETH_Q_SIZE;
89 TUNABLE_INT("hw.cxgb.txq_mr_size", &cxgb_txq_buf_ring_size);
90 SYSCTL_INT(_hw_cxgb, OID_AUTO, txq_mr_size, CTLFLAG_RDTUN, &cxgb_txq_buf_ring_size, 0,
91     "size of per-queue mbuf ring");
92 
93 static int cxgb_tx_coalesce_force = 0;
94 TUNABLE_INT("hw.cxgb.tx_coalesce_force", &cxgb_tx_coalesce_force);
95 SYSCTL_INT(_hw_cxgb, OID_AUTO, tx_coalesce_force, CTLFLAG_RW,
96     &cxgb_tx_coalesce_force, 0,
97     "coalesce small packets into a single work request regardless of ring state");
98 
99 #define	COALESCE_START_DEFAULT		TX_ETH_Q_SIZE>>1
100 #define	COALESCE_START_MAX		(TX_ETH_Q_SIZE-(TX_ETH_Q_SIZE>>3))
101 #define	COALESCE_STOP_DEFAULT		TX_ETH_Q_SIZE>>2
102 #define	COALESCE_STOP_MIN		TX_ETH_Q_SIZE>>5
103 #define	TX_RECLAIM_DEFAULT		TX_ETH_Q_SIZE>>5
104 #define	TX_RECLAIM_MAX			TX_ETH_Q_SIZE>>2
105 #define	TX_RECLAIM_MIN			TX_ETH_Q_SIZE>>6
106 
107 
108 static int cxgb_tx_coalesce_enable_start = COALESCE_START_DEFAULT;
109 TUNABLE_INT("hw.cxgb.tx_coalesce_enable_start",
110     &cxgb_tx_coalesce_enable_start);
111 SYSCTL_INT(_hw_cxgb, OID_AUTO, tx_coalesce_enable_start, CTLFLAG_RW,
112     &cxgb_tx_coalesce_enable_start, 0,
113     "coalesce enable threshold");
114 static int cxgb_tx_coalesce_enable_stop = COALESCE_STOP_DEFAULT;
115 TUNABLE_INT("hw.cxgb.tx_coalesce_enable_stop", &cxgb_tx_coalesce_enable_stop);
116 SYSCTL_INT(_hw_cxgb, OID_AUTO, tx_coalesce_enable_stop, CTLFLAG_RW,
117     &cxgb_tx_coalesce_enable_stop, 0,
118     "coalesce disable threshold");
119 static int cxgb_tx_reclaim_threshold = TX_RECLAIM_DEFAULT;
120 TUNABLE_INT("hw.cxgb.tx_reclaim_threshold", &cxgb_tx_reclaim_threshold);
121 SYSCTL_INT(_hw_cxgb, OID_AUTO, tx_reclaim_threshold, CTLFLAG_RW,
122     &cxgb_tx_reclaim_threshold, 0,
123     "tx cleaning minimum threshold");
124 
125 /*
126  * XXX don't re-enable this until TOE stops assuming
127  * we have an m_ext
128  */
129 static int recycle_enable = 0;
130 
131 extern int cxgb_use_16k_clusters;
132 extern int nmbjumbop;
133 extern int nmbjumbo9;
134 extern int nmbjumbo16;
135 
136 #define USE_GTS 0
137 
138 #define SGE_RX_SM_BUF_SIZE	1536
139 #define SGE_RX_DROP_THRES	16
140 #define SGE_RX_COPY_THRES	128
141 
142 /*
143  * Period of the Tx buffer reclaim timer.  This timer does not need to run
144  * frequently as Tx buffers are usually reclaimed by new Tx packets.
145  */
146 #define TX_RECLAIM_PERIOD       (hz >> 1)
147 
148 /*
149  * Values for sge_txq.flags
150  */
151 enum {
152 	TXQ_RUNNING	= 1 << 0,  /* fetch engine is running */
153 	TXQ_LAST_PKT_DB = 1 << 1,  /* last packet rang the doorbell */
154 };
155 
156 struct tx_desc {
157 	uint64_t	flit[TX_DESC_FLITS];
158 } __packed;
159 
160 struct rx_desc {
161 	uint32_t	addr_lo;
162 	uint32_t	len_gen;
163 	uint32_t	gen2;
164 	uint32_t	addr_hi;
165 } __packed;
166 
167 struct rsp_desc {               /* response queue descriptor */
168 	struct rss_header	rss_hdr;
169 	uint32_t		flags;
170 	uint32_t		len_cq;
171 	uint8_t			imm_data[47];
172 	uint8_t			intr_gen;
173 } __packed;
174 
175 #define RX_SW_DESC_MAP_CREATED	(1 << 0)
176 #define TX_SW_DESC_MAP_CREATED	(1 << 1)
177 #define RX_SW_DESC_INUSE        (1 << 3)
178 #define TX_SW_DESC_MAPPED       (1 << 4)
179 
180 #define RSPQ_NSOP_NEOP           G_RSPD_SOP_EOP(0)
181 #define RSPQ_EOP                 G_RSPD_SOP_EOP(F_RSPD_EOP)
182 #define RSPQ_SOP                 G_RSPD_SOP_EOP(F_RSPD_SOP)
183 #define RSPQ_SOP_EOP             G_RSPD_SOP_EOP(F_RSPD_SOP|F_RSPD_EOP)
184 
185 struct tx_sw_desc {                /* SW state per Tx descriptor */
186 	struct mbuf	*m;
187 	bus_dmamap_t	map;
188 	int		flags;
189 };
190 
191 struct rx_sw_desc {                /* SW state per Rx descriptor */
192 	caddr_t		rxsd_cl;
193 	struct mbuf	*m;
194 	bus_dmamap_t	map;
195 	int		flags;
196 };
197 
198 struct txq_state {
199 	unsigned int	compl;
200 	unsigned int	gen;
201 	unsigned int	pidx;
202 };
203 
204 struct refill_fl_cb_arg {
205 	int               error;
206 	bus_dma_segment_t seg;
207 	int               nseg;
208 };
209 
210 
211 /*
212  * Maps a number of flits to the number of Tx descriptors that can hold them.
213  * The formula is
214  *
215  * desc = 1 + (flits - 2) / (WR_FLITS - 1).
216  *
217  * HW allows up to 4 descriptors to be combined into a WR.
218  */
219 static uint8_t flit_desc_map[] = {
220 	0,
221 #if SGE_NUM_GENBITS == 1
222 	1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
223 	2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
224 	3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
225 	4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4
226 #elif SGE_NUM_GENBITS == 2
227 	1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
228 	2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
229 	3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
230 	4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
231 #else
232 # error "SGE_NUM_GENBITS must be 1 or 2"
233 #endif
234 };
235 
236 #define	TXQ_LOCK_ASSERT(qs)	mtx_assert(&(qs)->lock, MA_OWNED)
237 #define	TXQ_TRYLOCK(qs)		mtx_trylock(&(qs)->lock)
238 #define	TXQ_LOCK(qs)		mtx_lock(&(qs)->lock)
239 #define	TXQ_UNLOCK(qs)		mtx_unlock(&(qs)->lock)
240 #define	TXQ_RING_EMPTY(qs)	drbr_empty((qs)->port->ifp, (qs)->txq[TXQ_ETH].txq_mr)
241 #define	TXQ_RING_NEEDS_ENQUEUE(qs)					\
242 	drbr_needs_enqueue((qs)->port->ifp, (qs)->txq[TXQ_ETH].txq_mr)
243 #define	TXQ_RING_FLUSH(qs)	drbr_flush((qs)->port->ifp, (qs)->txq[TXQ_ETH].txq_mr)
244 #define	TXQ_RING_DEQUEUE_COND(qs, func, arg)				\
245 	drbr_dequeue_cond((qs)->port->ifp, (qs)->txq[TXQ_ETH].txq_mr, func, arg)
246 #define	TXQ_RING_DEQUEUE(qs) \
247 	drbr_dequeue((qs)->port->ifp, (qs)->txq[TXQ_ETH].txq_mr)
248 
249 int cxgb_debug = 0;
250 
251 static void sge_timer_cb(void *arg);
252 static void sge_timer_reclaim(void *arg, int ncount);
253 static void sge_txq_reclaim_handler(void *arg, int ncount);
254 static void cxgb_start_locked(struct sge_qset *qs);
255 
256 /*
257  * XXX need to cope with bursty scheduling by looking at a wider
258  * window than we are now for determining the need for coalescing
259  *
260  */
261 static __inline uint64_t
262 check_pkt_coalesce(struct sge_qset *qs)
263 {
264         struct adapter *sc;
265         struct sge_txq *txq;
266 	uint8_t *fill;
267 
268 	if (__predict_false(cxgb_tx_coalesce_force))
269 		return (1);
270 	txq = &qs->txq[TXQ_ETH];
271         sc = qs->port->adapter;
272 	fill = &sc->tunq_fill[qs->idx];
273 
274 	if (cxgb_tx_coalesce_enable_start > COALESCE_START_MAX)
275 		cxgb_tx_coalesce_enable_start = COALESCE_START_MAX;
276 	if (cxgb_tx_coalesce_enable_stop < COALESCE_STOP_MIN)
277 		cxgb_tx_coalesce_enable_start = COALESCE_STOP_MIN;
278 	/*
279 	 * if the hardware transmit queue is more than 1/8 full
280 	 * we mark it as coalescing - we drop back from coalescing
281 	 * when we go below 1/32 full and there are no packets enqueued,
282 	 * this provides us with some degree of hysteresis
283 	 */
284         if (*fill != 0 && (txq->in_use <= cxgb_tx_coalesce_enable_stop) &&
285 	    TXQ_RING_EMPTY(qs) && (qs->coalescing == 0))
286                 *fill = 0;
287         else if (*fill == 0 && (txq->in_use >= cxgb_tx_coalesce_enable_start))
288                 *fill = 1;
289 
290 	return (sc->tunq_coalesce);
291 }
292 
293 #ifdef __LP64__
294 static void
295 set_wr_hdr(struct work_request_hdr *wrp, uint32_t wr_hi, uint32_t wr_lo)
296 {
297 	uint64_t wr_hilo;
298 #if _BYTE_ORDER == _LITTLE_ENDIAN
299 	wr_hilo = wr_hi;
300 	wr_hilo |= (((uint64_t)wr_lo)<<32);
301 #else
302 	wr_hilo = wr_lo;
303 	wr_hilo |= (((uint64_t)wr_hi)<<32);
304 #endif
305 	wrp->wrh_hilo = wr_hilo;
306 }
307 #else
308 static void
309 set_wr_hdr(struct work_request_hdr *wrp, uint32_t wr_hi, uint32_t wr_lo)
310 {
311 
312 	wrp->wrh_hi = wr_hi;
313 	wmb();
314 	wrp->wrh_lo = wr_lo;
315 }
316 #endif
317 
318 struct coalesce_info {
319 	int count;
320 	int nbytes;
321 };
322 
323 static int
324 coalesce_check(struct mbuf *m, void *arg)
325 {
326 	struct coalesce_info *ci = arg;
327 	int *count = &ci->count;
328 	int *nbytes = &ci->nbytes;
329 
330 	if ((*nbytes == 0) || ((*nbytes + m->m_len <= 10500) &&
331 		(*count < 7) && (m->m_next == NULL))) {
332 		*count += 1;
333 		*nbytes += m->m_len;
334 		return (1);
335 	}
336 	return (0);
337 }
338 
339 static struct mbuf *
340 cxgb_dequeue(struct sge_qset *qs)
341 {
342 	struct mbuf *m, *m_head, *m_tail;
343 	struct coalesce_info ci;
344 
345 
346 	if (check_pkt_coalesce(qs) == 0)
347 		return TXQ_RING_DEQUEUE(qs);
348 
349 	m_head = m_tail = NULL;
350 	ci.count = ci.nbytes = 0;
351 	do {
352 		m = TXQ_RING_DEQUEUE_COND(qs, coalesce_check, &ci);
353 		if (m_head == NULL) {
354 			m_tail = m_head = m;
355 		} else if (m != NULL) {
356 			m_tail->m_nextpkt = m;
357 			m_tail = m;
358 		}
359 	} while (m != NULL);
360 	if (ci.count > 7)
361 		panic("trying to coalesce %d packets in to one WR", ci.count);
362 	return (m_head);
363 }
364 
365 /**
366  *	reclaim_completed_tx - reclaims completed Tx descriptors
367  *	@adapter: the adapter
368  *	@q: the Tx queue to reclaim completed descriptors from
369  *
370  *	Reclaims Tx descriptors that the SGE has indicated it has processed,
371  *	and frees the associated buffers if possible.  Called with the Tx
372  *	queue's lock held.
373  */
374 static __inline int
375 reclaim_completed_tx(struct sge_qset *qs, int reclaim_min, int queue)
376 {
377 	struct sge_txq *q = &qs->txq[queue];
378 	int reclaim = desc_reclaimable(q);
379 
380 	if ((cxgb_tx_reclaim_threshold > TX_RECLAIM_MAX) ||
381 	    (cxgb_tx_reclaim_threshold < TX_RECLAIM_MIN))
382 		cxgb_tx_reclaim_threshold = TX_RECLAIM_DEFAULT;
383 
384 	if (reclaim < reclaim_min)
385 		return (0);
386 
387 	mtx_assert(&qs->lock, MA_OWNED);
388 	if (reclaim > 0) {
389 		t3_free_tx_desc(qs, reclaim, queue);
390 		q->cleaned += reclaim;
391 		q->in_use -= reclaim;
392 	}
393 	if (isset(&qs->txq_stopped, TXQ_ETH))
394                 clrbit(&qs->txq_stopped, TXQ_ETH);
395 
396 	return (reclaim);
397 }
398 
399 /**
400  *	should_restart_tx - are there enough resources to restart a Tx queue?
401  *	@q: the Tx queue
402  *
403  *	Checks if there are enough descriptors to restart a suspended Tx queue.
404  */
405 static __inline int
406 should_restart_tx(const struct sge_txq *q)
407 {
408 	unsigned int r = q->processed - q->cleaned;
409 
410 	return q->in_use - r < (q->size >> 1);
411 }
412 
413 /**
414  *	t3_sge_init - initialize SGE
415  *	@adap: the adapter
416  *	@p: the SGE parameters
417  *
418  *	Performs SGE initialization needed every time after a chip reset.
419  *	We do not initialize any of the queue sets here, instead the driver
420  *	top-level must request those individually.  We also do not enable DMA
421  *	here, that should be done after the queues have been set up.
422  */
423 void
424 t3_sge_init(adapter_t *adap, struct sge_params *p)
425 {
426 	u_int ctrl, ups;
427 
428 	ups = 0; /* = ffs(pci_resource_len(adap->pdev, 2) >> 12); */
429 
430 	ctrl = F_DROPPKT | V_PKTSHIFT(2) | F_FLMODE | F_AVOIDCQOVFL |
431 	       F_CQCRDTCTRL | F_CONGMODE | F_TNLFLMODE | F_FATLPERREN |
432 	       V_HOSTPAGESIZE(PAGE_SHIFT - 11) | F_BIGENDIANINGRESS |
433 	       V_USERSPACESIZE(ups ? ups - 1 : 0) | F_ISCSICOALESCING;
434 #if SGE_NUM_GENBITS == 1
435 	ctrl |= F_EGRGENCTRL;
436 #endif
437 	if (adap->params.rev > 0) {
438 		if (!(adap->flags & (USING_MSIX | USING_MSI)))
439 			ctrl |= F_ONEINTMULTQ | F_OPTONEINTMULTQ;
440 	}
441 	t3_write_reg(adap, A_SG_CONTROL, ctrl);
442 	t3_write_reg(adap, A_SG_EGR_RCQ_DRB_THRSH, V_HIRCQDRBTHRSH(512) |
443 		     V_LORCQDRBTHRSH(512));
444 	t3_write_reg(adap, A_SG_TIMER_TICK, core_ticks_per_usec(adap) / 10);
445 	t3_write_reg(adap, A_SG_CMDQ_CREDIT_TH, V_THRESHOLD(32) |
446 		     V_TIMEOUT(200 * core_ticks_per_usec(adap)));
447 	t3_write_reg(adap, A_SG_HI_DRB_HI_THRSH,
448 		     adap->params.rev < T3_REV_C ? 1000 : 500);
449 	t3_write_reg(adap, A_SG_HI_DRB_LO_THRSH, 256);
450 	t3_write_reg(adap, A_SG_LO_DRB_HI_THRSH, 1000);
451 	t3_write_reg(adap, A_SG_LO_DRB_LO_THRSH, 256);
452 	t3_write_reg(adap, A_SG_OCO_BASE, V_BASE1(0xfff));
453 	t3_write_reg(adap, A_SG_DRB_PRI_THRESH, 63 * 1024);
454 }
455 
456 
457 /**
458  *	sgl_len - calculates the size of an SGL of the given capacity
459  *	@n: the number of SGL entries
460  *
461  *	Calculates the number of flits needed for a scatter/gather list that
462  *	can hold the given number of entries.
463  */
464 static __inline unsigned int
465 sgl_len(unsigned int n)
466 {
467 	return ((3 * n) / 2 + (n & 1));
468 }
469 
470 /**
471  *	get_imm_packet - return the next ingress packet buffer from a response
472  *	@resp: the response descriptor containing the packet data
473  *
474  *	Return a packet containing the immediate data of the given response.
475  */
476 static int
477 get_imm_packet(adapter_t *sc, const struct rsp_desc *resp, struct mbuf *m)
478 {
479 
480 	if (resp->rss_hdr.opcode == CPL_RX_DATA) {
481 		const struct cpl_rx_data *cpl = (const void *)&resp->imm_data[0];
482 		m->m_len = sizeof(*cpl) + ntohs(cpl->len);
483 	} else if (resp->rss_hdr.opcode == CPL_RX_PKT) {
484 		const struct cpl_rx_pkt *cpl = (const void *)&resp->imm_data[0];
485 		m->m_len = sizeof(*cpl) + ntohs(cpl->len);
486 	} else
487 		m->m_len = IMMED_PKT_SIZE;
488 	m->m_ext.ext_buf = NULL;
489 	m->m_ext.ext_type = 0;
490 	memcpy(mtod(m, uint8_t *), resp->imm_data, m->m_len);
491 	return (0);
492 }
493 
494 static __inline u_int
495 flits_to_desc(u_int n)
496 {
497 	return (flit_desc_map[n]);
498 }
499 
500 #define SGE_PARERR (F_CPPARITYERROR | F_OCPARITYERROR | F_RCPARITYERROR | \
501 		    F_IRPARITYERROR | V_ITPARITYERROR(M_ITPARITYERROR) | \
502 		    V_FLPARITYERROR(M_FLPARITYERROR) | F_LODRBPARITYERROR | \
503 		    F_HIDRBPARITYERROR | F_LORCQPARITYERROR | \
504 		    F_HIRCQPARITYERROR)
505 #define SGE_FRAMINGERR (F_UC_REQ_FRAMINGERROR | F_R_REQ_FRAMINGERROR)
506 #define SGE_FATALERR (SGE_PARERR | SGE_FRAMINGERR | F_RSPQCREDITOVERFOW | \
507 		      F_RSPQDISABLED)
508 
509 /**
510  *	t3_sge_err_intr_handler - SGE async event interrupt handler
511  *	@adapter: the adapter
512  *
513  *	Interrupt handler for SGE asynchronous (non-data) events.
514  */
515 void
516 t3_sge_err_intr_handler(adapter_t *adapter)
517 {
518 	unsigned int v, status;
519 
520 	status = t3_read_reg(adapter, A_SG_INT_CAUSE);
521 	if (status & SGE_PARERR)
522 		CH_ALERT(adapter, "SGE parity error (0x%x)\n",
523 			 status & SGE_PARERR);
524 	if (status & SGE_FRAMINGERR)
525 		CH_ALERT(adapter, "SGE framing error (0x%x)\n",
526 			 status & SGE_FRAMINGERR);
527 	if (status & F_RSPQCREDITOVERFOW)
528 		CH_ALERT(adapter, "SGE response queue credit overflow\n");
529 
530 	if (status & F_RSPQDISABLED) {
531 		v = t3_read_reg(adapter, A_SG_RSPQ_FL_STATUS);
532 
533 		CH_ALERT(adapter,
534 			 "packet delivered to disabled response queue (0x%x)\n",
535 			 (v >> S_RSPQ0DISABLED) & 0xff);
536 	}
537 
538 	t3_write_reg(adapter, A_SG_INT_CAUSE, status);
539 	if (status & SGE_FATALERR)
540 		t3_fatal_err(adapter);
541 }
542 
543 void
544 t3_sge_prep(adapter_t *adap, struct sge_params *p)
545 {
546 	int i, nqsets, fl_q_size, jumbo_q_size, use_16k, jumbo_buf_size;
547 
548 	nqsets = min(SGE_QSETS / adap->params.nports, mp_ncpus);
549 	nqsets *= adap->params.nports;
550 
551 	fl_q_size = min(nmbclusters/(3*nqsets), FL_Q_SIZE);
552 
553 	while (!powerof2(fl_q_size))
554 		fl_q_size--;
555 
556 	use_16k = cxgb_use_16k_clusters != -1 ? cxgb_use_16k_clusters :
557 	    is_offload(adap);
558 
559 #if __FreeBSD_version >= 700111
560 	if (use_16k) {
561 		jumbo_q_size = min(nmbjumbo16/(3*nqsets), JUMBO_Q_SIZE);
562 		jumbo_buf_size = MJUM16BYTES;
563 	} else {
564 		jumbo_q_size = min(nmbjumbo9/(3*nqsets), JUMBO_Q_SIZE);
565 		jumbo_buf_size = MJUM9BYTES;
566 	}
567 #else
568 	jumbo_q_size = min(nmbjumbop/(3*nqsets), JUMBO_Q_SIZE);
569 	jumbo_buf_size = MJUMPAGESIZE;
570 #endif
571 	while (!powerof2(jumbo_q_size))
572 		jumbo_q_size--;
573 
574 	if (fl_q_size < (FL_Q_SIZE / 4) || jumbo_q_size < (JUMBO_Q_SIZE / 2))
575 		device_printf(adap->dev,
576 		    "Insufficient clusters and/or jumbo buffers.\n");
577 
578 	p->max_pkt_size = jumbo_buf_size - sizeof(struct cpl_rx_data);
579 
580 	for (i = 0; i < SGE_QSETS; ++i) {
581 		struct qset_params *q = p->qset + i;
582 
583 		if (adap->params.nports > 2) {
584 			q->coalesce_usecs = 50;
585 		} else {
586 #ifdef INVARIANTS
587 			q->coalesce_usecs = 10;
588 #else
589 			q->coalesce_usecs = 5;
590 #endif
591 		}
592 		q->polling = 0;
593 		q->rspq_size = RSPQ_Q_SIZE;
594 		q->fl_size = fl_q_size;
595 		q->jumbo_size = jumbo_q_size;
596 		q->jumbo_buf_size = jumbo_buf_size;
597 		q->txq_size[TXQ_ETH] = TX_ETH_Q_SIZE;
598 		q->txq_size[TXQ_OFLD] = is_offload(adap) ? TX_OFLD_Q_SIZE : 16;
599 		q->txq_size[TXQ_CTRL] = TX_CTRL_Q_SIZE;
600 		q->cong_thres = 0;
601 	}
602 }
603 
604 int
605 t3_sge_alloc(adapter_t *sc)
606 {
607 
608 	/* The parent tag. */
609 	if (bus_dma_tag_create( bus_get_dma_tag(sc->dev),/* PCI parent */
610 				1, 0,			/* algnmnt, boundary */
611 				BUS_SPACE_MAXADDR,	/* lowaddr */
612 				BUS_SPACE_MAXADDR,	/* highaddr */
613 				NULL, NULL,		/* filter, filterarg */
614 				BUS_SPACE_MAXSIZE_32BIT,/* maxsize */
615 				BUS_SPACE_UNRESTRICTED, /* nsegments */
616 				BUS_SPACE_MAXSIZE_32BIT,/* maxsegsize */
617 				0,			/* flags */
618 				NULL, NULL,		/* lock, lockarg */
619 				&sc->parent_dmat)) {
620 		device_printf(sc->dev, "Cannot allocate parent DMA tag\n");
621 		return (ENOMEM);
622 	}
623 
624 	/*
625 	 * DMA tag for normal sized RX frames
626 	 */
627 	if (bus_dma_tag_create(sc->parent_dmat, MCLBYTES, 0, BUS_SPACE_MAXADDR,
628 		BUS_SPACE_MAXADDR, NULL, NULL, MCLBYTES, 1,
629 		MCLBYTES, BUS_DMA_ALLOCNOW, NULL, NULL, &sc->rx_dmat)) {
630 		device_printf(sc->dev, "Cannot allocate RX DMA tag\n");
631 		return (ENOMEM);
632 	}
633 
634 	/*
635 	 * DMA tag for jumbo sized RX frames.
636 	 */
637 	if (bus_dma_tag_create(sc->parent_dmat, MJUM16BYTES, 0, BUS_SPACE_MAXADDR,
638 		BUS_SPACE_MAXADDR, NULL, NULL, MJUM16BYTES, 1, MJUM16BYTES,
639 		BUS_DMA_ALLOCNOW, NULL, NULL, &sc->rx_jumbo_dmat)) {
640 		device_printf(sc->dev, "Cannot allocate RX jumbo DMA tag\n");
641 		return (ENOMEM);
642 	}
643 
644 	/*
645 	 * DMA tag for TX frames.
646 	 */
647 	if (bus_dma_tag_create(sc->parent_dmat, 1, 0, BUS_SPACE_MAXADDR,
648 		BUS_SPACE_MAXADDR, NULL, NULL, TX_MAX_SIZE, TX_MAX_SEGS,
649 		TX_MAX_SIZE, BUS_DMA_ALLOCNOW,
650 		NULL, NULL, &sc->tx_dmat)) {
651 		device_printf(sc->dev, "Cannot allocate TX DMA tag\n");
652 		return (ENOMEM);
653 	}
654 
655 	return (0);
656 }
657 
658 int
659 t3_sge_free(struct adapter * sc)
660 {
661 
662 	if (sc->tx_dmat != NULL)
663 		bus_dma_tag_destroy(sc->tx_dmat);
664 
665 	if (sc->rx_jumbo_dmat != NULL)
666 		bus_dma_tag_destroy(sc->rx_jumbo_dmat);
667 
668 	if (sc->rx_dmat != NULL)
669 		bus_dma_tag_destroy(sc->rx_dmat);
670 
671 	if (sc->parent_dmat != NULL)
672 		bus_dma_tag_destroy(sc->parent_dmat);
673 
674 	return (0);
675 }
676 
677 void
678 t3_update_qset_coalesce(struct sge_qset *qs, const struct qset_params *p)
679 {
680 
681 	qs->rspq.holdoff_tmr = max(p->coalesce_usecs * 10, 1U);
682 	qs->rspq.polling = 0 /* p->polling */;
683 }
684 
685 #if !defined(__i386__) && !defined(__amd64__)
686 static void
687 refill_fl_cb(void *arg, bus_dma_segment_t *segs, int nseg, int error)
688 {
689 	struct refill_fl_cb_arg *cb_arg = arg;
690 
691 	cb_arg->error = error;
692 	cb_arg->seg = segs[0];
693 	cb_arg->nseg = nseg;
694 
695 }
696 #endif
697 /**
698  *	refill_fl - refill an SGE free-buffer list
699  *	@sc: the controller softc
700  *	@q: the free-list to refill
701  *	@n: the number of new buffers to allocate
702  *
703  *	(Re)populate an SGE free-buffer list with up to @n new packet buffers.
704  *	The caller must assure that @n does not exceed the queue's capacity.
705  */
706 static void
707 refill_fl(adapter_t *sc, struct sge_fl *q, int n)
708 {
709 	struct rx_sw_desc *sd = &q->sdesc[q->pidx];
710 	struct rx_desc *d = &q->desc[q->pidx];
711 	struct refill_fl_cb_arg cb_arg;
712 	struct mbuf *m;
713 	caddr_t cl;
714 	int err;
715 
716 	cb_arg.error = 0;
717 	while (n--) {
718 		/*
719 		 * We allocate an uninitialized mbuf + cluster, mbuf is
720 		 * initialized after rx.
721 		 */
722 		if (q->zone == zone_pack) {
723 			if ((m = m_getcl(M_NOWAIT, MT_NOINIT, M_PKTHDR)) == NULL)
724 				break;
725 			cl = m->m_ext.ext_buf;
726 		} else {
727 			if ((cl = m_cljget(NULL, M_NOWAIT, q->buf_size)) == NULL)
728 				break;
729 			if ((m = m_gethdr(M_NOWAIT, MT_NOINIT)) == NULL) {
730 				uma_zfree(q->zone, cl);
731 				break;
732 			}
733 		}
734 		if ((sd->flags & RX_SW_DESC_MAP_CREATED) == 0) {
735 			if ((err = bus_dmamap_create(q->entry_tag, 0, &sd->map))) {
736 				log(LOG_WARNING, "bus_dmamap_create failed %d\n", err);
737 				uma_zfree(q->zone, cl);
738 				goto done;
739 			}
740 			sd->flags |= RX_SW_DESC_MAP_CREATED;
741 		}
742 #if !defined(__i386__) && !defined(__amd64__)
743 		err = bus_dmamap_load(q->entry_tag, sd->map,
744 		    cl, q->buf_size, refill_fl_cb, &cb_arg, 0);
745 
746 		if (err != 0 || cb_arg.error) {
747 			if (q->zone == zone_pack)
748 				uma_zfree(q->zone, cl);
749 			m_free(m);
750 			goto done;
751 		}
752 #else
753 		cb_arg.seg.ds_addr = pmap_kextract((vm_offset_t)cl);
754 #endif
755 		sd->flags |= RX_SW_DESC_INUSE;
756 		sd->rxsd_cl = cl;
757 		sd->m = m;
758 		d->addr_lo = htobe32(cb_arg.seg.ds_addr & 0xffffffff);
759 		d->addr_hi = htobe32(((uint64_t)cb_arg.seg.ds_addr >>32) & 0xffffffff);
760 		d->len_gen = htobe32(V_FLD_GEN1(q->gen));
761 		d->gen2 = htobe32(V_FLD_GEN2(q->gen));
762 
763 		d++;
764 		sd++;
765 
766 		if (++q->pidx == q->size) {
767 			q->pidx = 0;
768 			q->gen ^= 1;
769 			sd = q->sdesc;
770 			d = q->desc;
771 		}
772 		q->credits++;
773 		q->db_pending++;
774 	}
775 
776 done:
777 	if (q->db_pending >= 32) {
778 		q->db_pending = 0;
779 		t3_write_reg(sc, A_SG_KDOORBELL, V_EGRCNTX(q->cntxt_id));
780 	}
781 }
782 
783 
784 /**
785  *	free_rx_bufs - free the Rx buffers on an SGE free list
786  *	@sc: the controle softc
787  *	@q: the SGE free list to clean up
788  *
789  *	Release the buffers on an SGE free-buffer Rx queue.  HW fetching from
790  *	this queue should be stopped before calling this function.
791  */
792 static void
793 free_rx_bufs(adapter_t *sc, struct sge_fl *q)
794 {
795 	u_int cidx = q->cidx;
796 
797 	while (q->credits--) {
798 		struct rx_sw_desc *d = &q->sdesc[cidx];
799 
800 		if (d->flags & RX_SW_DESC_INUSE) {
801 			bus_dmamap_unload(q->entry_tag, d->map);
802 			bus_dmamap_destroy(q->entry_tag, d->map);
803 			if (q->zone == zone_pack) {
804 				m_init(d->m, zone_pack, MCLBYTES,
805 				    M_NOWAIT, MT_DATA, M_EXT);
806 				uma_zfree(zone_pack, d->m);
807 			} else {
808 				m_init(d->m, zone_mbuf, MLEN,
809 				    M_NOWAIT, MT_DATA, 0);
810 				uma_zfree(zone_mbuf, d->m);
811 				uma_zfree(q->zone, d->rxsd_cl);
812 			}
813 		}
814 
815 		d->rxsd_cl = NULL;
816 		d->m = NULL;
817 		if (++cidx == q->size)
818 			cidx = 0;
819 	}
820 }
821 
822 static __inline void
823 __refill_fl(adapter_t *adap, struct sge_fl *fl)
824 {
825 	refill_fl(adap, fl, min(16U, fl->size - fl->credits));
826 }
827 
828 static __inline void
829 __refill_fl_lt(adapter_t *adap, struct sge_fl *fl, int max)
830 {
831 	uint32_t reclaimable = fl->size - fl->credits;
832 
833 	if (reclaimable > 0)
834 		refill_fl(adap, fl, min(max, reclaimable));
835 }
836 
837 /**
838  *	recycle_rx_buf - recycle a receive buffer
839  *	@adapter: the adapter
840  *	@q: the SGE free list
841  *	@idx: index of buffer to recycle
842  *
843  *	Recycles the specified buffer on the given free list by adding it at
844  *	the next available slot on the list.
845  */
846 static void
847 recycle_rx_buf(adapter_t *adap, struct sge_fl *q, unsigned int idx)
848 {
849 	struct rx_desc *from = &q->desc[idx];
850 	struct rx_desc *to   = &q->desc[q->pidx];
851 
852 	q->sdesc[q->pidx] = q->sdesc[idx];
853 	to->addr_lo = from->addr_lo;        // already big endian
854 	to->addr_hi = from->addr_hi;        // likewise
855 	wmb();	/* necessary ? */
856 	to->len_gen = htobe32(V_FLD_GEN1(q->gen));
857 	to->gen2 = htobe32(V_FLD_GEN2(q->gen));
858 	q->credits++;
859 
860 	if (++q->pidx == q->size) {
861 		q->pidx = 0;
862 		q->gen ^= 1;
863 	}
864 	t3_write_reg(adap, A_SG_KDOORBELL, V_EGRCNTX(q->cntxt_id));
865 }
866 
867 static void
868 alloc_ring_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
869 {
870 	uint32_t *addr;
871 
872 	addr = arg;
873 	*addr = segs[0].ds_addr;
874 }
875 
876 static int
877 alloc_ring(adapter_t *sc, size_t nelem, size_t elem_size, size_t sw_size,
878     bus_addr_t *phys, void *desc, void *sdesc, bus_dma_tag_t *tag,
879     bus_dmamap_t *map, bus_dma_tag_t parent_entry_tag, bus_dma_tag_t *entry_tag)
880 {
881 	size_t len = nelem * elem_size;
882 	void *s = NULL;
883 	void *p = NULL;
884 	int err;
885 
886 	if ((err = bus_dma_tag_create(sc->parent_dmat, PAGE_SIZE, 0,
887 				      BUS_SPACE_MAXADDR_32BIT,
888 				      BUS_SPACE_MAXADDR, NULL, NULL, len, 1,
889 				      len, 0, NULL, NULL, tag)) != 0) {
890 		device_printf(sc->dev, "Cannot allocate descriptor tag\n");
891 		return (ENOMEM);
892 	}
893 
894 	if ((err = bus_dmamem_alloc(*tag, (void **)&p, BUS_DMA_NOWAIT,
895 				    map)) != 0) {
896 		device_printf(sc->dev, "Cannot allocate descriptor memory\n");
897 		return (ENOMEM);
898 	}
899 
900 	bus_dmamap_load(*tag, *map, p, len, alloc_ring_cb, phys, 0);
901 	bzero(p, len);
902 	*(void **)desc = p;
903 
904 	if (sw_size) {
905 		len = nelem * sw_size;
906 		s = malloc(len, M_DEVBUF, M_WAITOK|M_ZERO);
907 		*(void **)sdesc = s;
908 	}
909 	if (parent_entry_tag == NULL)
910 		return (0);
911 
912 	if ((err = bus_dma_tag_create(parent_entry_tag, 1, 0,
913 				      BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR,
914 		                      NULL, NULL, TX_MAX_SIZE, TX_MAX_SEGS,
915 				      TX_MAX_SIZE, BUS_DMA_ALLOCNOW,
916 		                      NULL, NULL, entry_tag)) != 0) {
917 		device_printf(sc->dev, "Cannot allocate descriptor entry tag\n");
918 		return (ENOMEM);
919 	}
920 	return (0);
921 }
922 
923 static void
924 sge_slow_intr_handler(void *arg, int ncount)
925 {
926 	adapter_t *sc = arg;
927 
928 	t3_slow_intr_handler(sc);
929 	t3_write_reg(sc, A_PL_INT_ENABLE0, sc->slow_intr_mask);
930 	(void) t3_read_reg(sc, A_PL_INT_ENABLE0);
931 }
932 
933 /**
934  *	sge_timer_cb - perform periodic maintenance of an SGE qset
935  *	@data: the SGE queue set to maintain
936  *
937  *	Runs periodically from a timer to perform maintenance of an SGE queue
938  *	set.  It performs two tasks:
939  *
940  *	a) Cleans up any completed Tx descriptors that may still be pending.
941  *	Normal descriptor cleanup happens when new packets are added to a Tx
942  *	queue so this timer is relatively infrequent and does any cleanup only
943  *	if the Tx queue has not seen any new packets in a while.  We make a
944  *	best effort attempt to reclaim descriptors, in that we don't wait
945  *	around if we cannot get a queue's lock (which most likely is because
946  *	someone else is queueing new packets and so will also handle the clean
947  *	up).  Since control queues use immediate data exclusively we don't
948  *	bother cleaning them up here.
949  *
950  *	b) Replenishes Rx queues that have run out due to memory shortage.
951  *	Normally new Rx buffers are added when existing ones are consumed but
952  *	when out of memory a queue can become empty.  We try to add only a few
953  *	buffers here, the queue will be replenished fully as these new buffers
954  *	are used up if memory shortage has subsided.
955  *
956  *	c) Return coalesced response queue credits in case a response queue is
957  *	starved.
958  *
959  *	d) Ring doorbells for T304 tunnel queues since we have seen doorbell
960  *	fifo overflows and the FW doesn't implement any recovery scheme yet.
961  */
962 static void
963 sge_timer_cb(void *arg)
964 {
965 	adapter_t *sc = arg;
966 	if ((sc->flags & USING_MSIX) == 0) {
967 
968 		struct port_info *pi;
969 		struct sge_qset *qs;
970 		struct sge_txq  *txq;
971 		int i, j;
972 		int reclaim_ofl, refill_rx;
973 
974 		if (sc->open_device_map == 0)
975 			return;
976 
977 		for (i = 0; i < sc->params.nports; i++) {
978 			pi = &sc->port[i];
979 			for (j = 0; j < pi->nqsets; j++) {
980 				qs = &sc->sge.qs[pi->first_qset + j];
981 				txq = &qs->txq[0];
982 				reclaim_ofl = txq[TXQ_OFLD].processed - txq[TXQ_OFLD].cleaned;
983 				refill_rx = ((qs->fl[0].credits < qs->fl[0].size) ||
984 				    (qs->fl[1].credits < qs->fl[1].size));
985 				if (reclaim_ofl || refill_rx) {
986 					taskqueue_enqueue(sc->tq, &pi->timer_reclaim_task);
987 					break;
988 				}
989 			}
990 		}
991 	}
992 
993 	if (sc->params.nports > 2) {
994 		int i;
995 
996 		for_each_port(sc, i) {
997 			struct port_info *pi = &sc->port[i];
998 
999 			t3_write_reg(sc, A_SG_KDOORBELL,
1000 				     F_SELEGRCNTX |
1001 				     (FW_TUNNEL_SGEEC_START + pi->first_qset));
1002 		}
1003 	}
1004 	if (((sc->flags & USING_MSIX) == 0 || sc->params.nports > 2) &&
1005 	    sc->open_device_map != 0)
1006 		callout_reset(&sc->sge_timer_ch, TX_RECLAIM_PERIOD, sge_timer_cb, sc);
1007 }
1008 
1009 /*
1010  * This is meant to be a catch-all function to keep sge state private
1011  * to sge.c
1012  *
1013  */
1014 int
1015 t3_sge_init_adapter(adapter_t *sc)
1016 {
1017 	callout_init(&sc->sge_timer_ch, CALLOUT_MPSAFE);
1018 	callout_reset(&sc->sge_timer_ch, TX_RECLAIM_PERIOD, sge_timer_cb, sc);
1019 	TASK_INIT(&sc->slow_intr_task, 0, sge_slow_intr_handler, sc);
1020 	return (0);
1021 }
1022 
1023 int
1024 t3_sge_reset_adapter(adapter_t *sc)
1025 {
1026 	callout_reset(&sc->sge_timer_ch, TX_RECLAIM_PERIOD, sge_timer_cb, sc);
1027 	return (0);
1028 }
1029 
1030 int
1031 t3_sge_init_port(struct port_info *pi)
1032 {
1033 	TASK_INIT(&pi->timer_reclaim_task, 0, sge_timer_reclaim, pi);
1034 	return (0);
1035 }
1036 
1037 /**
1038  *	refill_rspq - replenish an SGE response queue
1039  *	@adapter: the adapter
1040  *	@q: the response queue to replenish
1041  *	@credits: how many new responses to make available
1042  *
1043  *	Replenishes a response queue by making the supplied number of responses
1044  *	available to HW.
1045  */
1046 static __inline void
1047 refill_rspq(adapter_t *sc, const struct sge_rspq *q, u_int credits)
1048 {
1049 
1050 	/* mbufs are allocated on demand when a rspq entry is processed. */
1051 	t3_write_reg(sc, A_SG_RSPQ_CREDIT_RETURN,
1052 		     V_RSPQ(q->cntxt_id) | V_CREDITS(credits));
1053 }
1054 
1055 static void
1056 sge_txq_reclaim_handler(void *arg, int ncount)
1057 {
1058 	struct sge_qset *qs = arg;
1059 	int i;
1060 
1061 	for (i = 0; i < 3; i++)
1062 		reclaim_completed_tx(qs, 16, i);
1063 }
1064 
1065 static void
1066 sge_timer_reclaim(void *arg, int ncount)
1067 {
1068 	struct port_info *pi = arg;
1069 	int i, nqsets = pi->nqsets;
1070 	adapter_t *sc = pi->adapter;
1071 	struct sge_qset *qs;
1072 	struct mtx *lock;
1073 
1074 	KASSERT((sc->flags & USING_MSIX) == 0,
1075 	    ("can't call timer reclaim for msi-x"));
1076 
1077 	for (i = 0; i < nqsets; i++) {
1078 		qs = &sc->sge.qs[pi->first_qset + i];
1079 
1080 		reclaim_completed_tx(qs, 16, TXQ_OFLD);
1081 		lock = (sc->flags & USING_MSIX) ? &qs->rspq.lock :
1082 			    &sc->sge.qs[0].rspq.lock;
1083 
1084 		if (mtx_trylock(lock)) {
1085 			/* XXX currently assume that we are *NOT* polling */
1086 			uint32_t status = t3_read_reg(sc, A_SG_RSPQ_FL_STATUS);
1087 
1088 			if (qs->fl[0].credits < qs->fl[0].size - 16)
1089 				__refill_fl(sc, &qs->fl[0]);
1090 			if (qs->fl[1].credits < qs->fl[1].size - 16)
1091 				__refill_fl(sc, &qs->fl[1]);
1092 
1093 			if (status & (1 << qs->rspq.cntxt_id)) {
1094 				if (qs->rspq.credits) {
1095 					refill_rspq(sc, &qs->rspq, 1);
1096 					qs->rspq.credits--;
1097 					t3_write_reg(sc, A_SG_RSPQ_FL_STATUS,
1098 					    1 << qs->rspq.cntxt_id);
1099 				}
1100 			}
1101 			mtx_unlock(lock);
1102 		}
1103 	}
1104 }
1105 
1106 /**
1107  *	init_qset_cntxt - initialize an SGE queue set context info
1108  *	@qs: the queue set
1109  *	@id: the queue set id
1110  *
1111  *	Initializes the TIDs and context ids for the queues of a queue set.
1112  */
1113 static void
1114 init_qset_cntxt(struct sge_qset *qs, u_int id)
1115 {
1116 
1117 	qs->rspq.cntxt_id = id;
1118 	qs->fl[0].cntxt_id = 2 * id;
1119 	qs->fl[1].cntxt_id = 2 * id + 1;
1120 	qs->txq[TXQ_ETH].cntxt_id = FW_TUNNEL_SGEEC_START + id;
1121 	qs->txq[TXQ_ETH].token = FW_TUNNEL_TID_START + id;
1122 	qs->txq[TXQ_OFLD].cntxt_id = FW_OFLD_SGEEC_START + id;
1123 	qs->txq[TXQ_CTRL].cntxt_id = FW_CTRL_SGEEC_START + id;
1124 	qs->txq[TXQ_CTRL].token = FW_CTRL_TID_START + id;
1125 
1126 	mbufq_init(&qs->txq[TXQ_ETH].sendq);
1127 	mbufq_init(&qs->txq[TXQ_OFLD].sendq);
1128 	mbufq_init(&qs->txq[TXQ_CTRL].sendq);
1129 }
1130 
1131 
1132 static void
1133 txq_prod(struct sge_txq *txq, unsigned int ndesc, struct txq_state *txqs)
1134 {
1135 	txq->in_use += ndesc;
1136 	/*
1137 	 * XXX we don't handle stopping of queue
1138 	 * presumably start handles this when we bump against the end
1139 	 */
1140 	txqs->gen = txq->gen;
1141 	txq->unacked += ndesc;
1142 	txqs->compl = (txq->unacked & 32) << (S_WR_COMPL - 5);
1143 	txq->unacked &= 31;
1144 	txqs->pidx = txq->pidx;
1145 	txq->pidx += ndesc;
1146 #ifdef INVARIANTS
1147 	if (((txqs->pidx > txq->cidx) &&
1148 		(txq->pidx < txqs->pidx) &&
1149 		(txq->pidx >= txq->cidx)) ||
1150 	    ((txqs->pidx < txq->cidx) &&
1151 		(txq->pidx >= txq-> cidx)) ||
1152 	    ((txqs->pidx < txq->cidx) &&
1153 		(txq->cidx < txqs->pidx)))
1154 		panic("txqs->pidx=%d txq->pidx=%d txq->cidx=%d",
1155 		    txqs->pidx, txq->pidx, txq->cidx);
1156 #endif
1157 	if (txq->pidx >= txq->size) {
1158 		txq->pidx -= txq->size;
1159 		txq->gen ^= 1;
1160 	}
1161 
1162 }
1163 
1164 /**
1165  *	calc_tx_descs - calculate the number of Tx descriptors for a packet
1166  *	@m: the packet mbufs
1167  *      @nsegs: the number of segments
1168  *
1169  * 	Returns the number of Tx descriptors needed for the given Ethernet
1170  * 	packet.  Ethernet packets require addition of WR and CPL headers.
1171  */
1172 static __inline unsigned int
1173 calc_tx_descs(const struct mbuf *m, int nsegs)
1174 {
1175 	unsigned int flits;
1176 
1177 	if (m->m_pkthdr.len <= PIO_LEN)
1178 		return 1;
1179 
1180 	flits = sgl_len(nsegs) + 2;
1181 	if (m->m_pkthdr.csum_flags & CSUM_TSO)
1182 		flits++;
1183 
1184 	return flits_to_desc(flits);
1185 }
1186 
1187 /**
1188  *	make_sgl - populate a scatter/gather list for a packet
1189  *	@sgp: the SGL to populate
1190  *	@segs: the packet dma segments
1191  *	@nsegs: the number of segments
1192  *
1193  *	Generates a scatter/gather list for the buffers that make up a packet
1194  *	and returns the SGL size in 8-byte words.  The caller must size the SGL
1195  *	appropriately.
1196  */
1197 static __inline void
1198 make_sgl(struct sg_ent *sgp, bus_dma_segment_t *segs, int nsegs)
1199 {
1200 	int i, idx;
1201 
1202 	for (idx = 0, i = 0; i < nsegs; i++) {
1203 		/*
1204 		 * firmware doesn't like empty segments
1205 		 */
1206 		if (segs[i].ds_len == 0)
1207 			continue;
1208 		if (i && idx == 0)
1209 			++sgp;
1210 
1211 		sgp->len[idx] = htobe32(segs[i].ds_len);
1212 		sgp->addr[idx] = htobe64(segs[i].ds_addr);
1213 		idx ^= 1;
1214 	}
1215 
1216 	if (idx) {
1217 		sgp->len[idx] = 0;
1218 		sgp->addr[idx] = 0;
1219 	}
1220 }
1221 
1222 /**
1223  *	check_ring_tx_db - check and potentially ring a Tx queue's doorbell
1224  *	@adap: the adapter
1225  *	@q: the Tx queue
1226  *
1227  *	Ring the doorbell if a Tx queue is asleep.  There is a natural race,
1228  *	where the HW is going to sleep just after we checked, however,
1229  *	then the interrupt handler will detect the outstanding TX packet
1230  *	and ring the doorbell for us.
1231  *
1232  *	When GTS is disabled we unconditionally ring the doorbell.
1233  */
1234 static __inline void
1235 check_ring_tx_db(adapter_t *adap, struct sge_txq *q, int mustring)
1236 {
1237 #if USE_GTS
1238 	clear_bit(TXQ_LAST_PKT_DB, &q->flags);
1239 	if (test_and_set_bit(TXQ_RUNNING, &q->flags) == 0) {
1240 		set_bit(TXQ_LAST_PKT_DB, &q->flags);
1241 #ifdef T3_TRACE
1242 		T3_TRACE1(adap->tb[q->cntxt_id & 7], "doorbell Tx, cntxt %d",
1243 			  q->cntxt_id);
1244 #endif
1245 		t3_write_reg(adap, A_SG_KDOORBELL,
1246 			     F_SELEGRCNTX | V_EGRCNTX(q->cntxt_id));
1247 	}
1248 #else
1249 	if (mustring || ++q->db_pending >= 32) {
1250 		wmb();            /* write descriptors before telling HW */
1251 		t3_write_reg(adap, A_SG_KDOORBELL,
1252 		    F_SELEGRCNTX | V_EGRCNTX(q->cntxt_id));
1253 		q->db_pending = 0;
1254 	}
1255 #endif
1256 }
1257 
1258 static __inline void
1259 wr_gen2(struct tx_desc *d, unsigned int gen)
1260 {
1261 #if SGE_NUM_GENBITS == 2
1262 	d->flit[TX_DESC_FLITS - 1] = htobe64(gen);
1263 #endif
1264 }
1265 
1266 /**
1267  *	write_wr_hdr_sgl - write a WR header and, optionally, SGL
1268  *	@ndesc: number of Tx descriptors spanned by the SGL
1269  *	@txd: first Tx descriptor to be written
1270  *	@txqs: txq state (generation and producer index)
1271  *	@txq: the SGE Tx queue
1272  *	@sgl: the SGL
1273  *	@flits: number of flits to the start of the SGL in the first descriptor
1274  *	@sgl_flits: the SGL size in flits
1275  *	@wr_hi: top 32 bits of WR header based on WR type (big endian)
1276  *	@wr_lo: low 32 bits of WR header based on WR type (big endian)
1277  *
1278  *	Write a work request header and an associated SGL.  If the SGL is
1279  *	small enough to fit into one Tx descriptor it has already been written
1280  *	and we just need to write the WR header.  Otherwise we distribute the
1281  *	SGL across the number of descriptors it spans.
1282  */
1283 static void
1284 write_wr_hdr_sgl(unsigned int ndesc, struct tx_desc *txd, struct txq_state *txqs,
1285     const struct sge_txq *txq, const struct sg_ent *sgl, unsigned int flits,
1286     unsigned int sgl_flits, unsigned int wr_hi, unsigned int wr_lo)
1287 {
1288 
1289 	struct work_request_hdr *wrp = (struct work_request_hdr *)txd;
1290 	struct tx_sw_desc *txsd = &txq->sdesc[txqs->pidx];
1291 
1292 	if (__predict_true(ndesc == 1)) {
1293 		set_wr_hdr(wrp, htonl(F_WR_SOP | F_WR_EOP | V_WR_DATATYPE(1) |
1294 		    V_WR_SGLSFLT(flits)) | wr_hi,
1295 		    htonl(V_WR_LEN(flits + sgl_flits) | V_WR_GEN(txqs->gen)) |
1296 		    wr_lo);
1297 
1298 		wr_gen2(txd, txqs->gen);
1299 
1300 	} else {
1301 		unsigned int ogen = txqs->gen;
1302 		const uint64_t *fp = (const uint64_t *)sgl;
1303 		struct work_request_hdr *wp = wrp;
1304 
1305 		wrp->wrh_hi = htonl(F_WR_SOP | V_WR_DATATYPE(1) |
1306 		    V_WR_SGLSFLT(flits)) | wr_hi;
1307 
1308 		while (sgl_flits) {
1309 			unsigned int avail = WR_FLITS - flits;
1310 
1311 			if (avail > sgl_flits)
1312 				avail = sgl_flits;
1313 			memcpy(&txd->flit[flits], fp, avail * sizeof(*fp));
1314 			sgl_flits -= avail;
1315 			ndesc--;
1316 			if (!sgl_flits)
1317 				break;
1318 
1319 			fp += avail;
1320 			txd++;
1321 			txsd++;
1322 			if (++txqs->pidx == txq->size) {
1323 				txqs->pidx = 0;
1324 				txqs->gen ^= 1;
1325 				txd = txq->desc;
1326 				txsd = txq->sdesc;
1327 			}
1328 
1329 			/*
1330 			 * when the head of the mbuf chain
1331 			 * is freed all clusters will be freed
1332 			 * with it
1333 			 */
1334 			wrp = (struct work_request_hdr *)txd;
1335 			wrp->wrh_hi = htonl(V_WR_DATATYPE(1) |
1336 			    V_WR_SGLSFLT(1)) | wr_hi;
1337 			wrp->wrh_lo = htonl(V_WR_LEN(min(WR_FLITS,
1338 				    sgl_flits + 1)) |
1339 			    V_WR_GEN(txqs->gen)) | wr_lo;
1340 			wr_gen2(txd, txqs->gen);
1341 			flits = 1;
1342 		}
1343 		wrp->wrh_hi |= htonl(F_WR_EOP);
1344 		wmb();
1345 		wp->wrh_lo = htonl(V_WR_LEN(WR_FLITS) | V_WR_GEN(ogen)) | wr_lo;
1346 		wr_gen2((struct tx_desc *)wp, ogen);
1347 	}
1348 }
1349 
1350 /* sizeof(*eh) + sizeof(*ip) + sizeof(*tcp) */
1351 #define TCPPKTHDRSIZE (ETHER_HDR_LEN + 20 + 20)
1352 
1353 #define GET_VTAG(cntrl, m) \
1354 do { \
1355 	if ((m)->m_flags & M_VLANTAG)					            \
1356 		cntrl |= F_TXPKT_VLAN_VLD | V_TXPKT_VLAN((m)->m_pkthdr.ether_vtag); \
1357 } while (0)
1358 
1359 static int
1360 t3_encap(struct sge_qset *qs, struct mbuf **m)
1361 {
1362 	adapter_t *sc;
1363 	struct mbuf *m0;
1364 	struct sge_txq *txq;
1365 	struct txq_state txqs;
1366 	struct port_info *pi;
1367 	unsigned int ndesc, flits, cntrl, mlen;
1368 	int err, nsegs, tso_info = 0;
1369 
1370 	struct work_request_hdr *wrp;
1371 	struct tx_sw_desc *txsd;
1372 	struct sg_ent *sgp, *sgl;
1373 	uint32_t wr_hi, wr_lo, sgl_flits;
1374 	bus_dma_segment_t segs[TX_MAX_SEGS];
1375 
1376 	struct tx_desc *txd;
1377 
1378 	pi = qs->port;
1379 	sc = pi->adapter;
1380 	txq = &qs->txq[TXQ_ETH];
1381 	txd = &txq->desc[txq->pidx];
1382 	txsd = &txq->sdesc[txq->pidx];
1383 	sgl = txq->txq_sgl;
1384 
1385 	prefetch(txd);
1386 	m0 = *m;
1387 
1388 	mtx_assert(&qs->lock, MA_OWNED);
1389 	cntrl = V_TXPKT_INTF(pi->txpkt_intf);
1390 	KASSERT(m0->m_flags & M_PKTHDR, ("not packet header\n"));
1391 
1392 	if  (m0->m_nextpkt == NULL && m0->m_next != NULL &&
1393 	    m0->m_pkthdr.csum_flags & (CSUM_TSO))
1394 		tso_info = V_LSO_MSS(m0->m_pkthdr.tso_segsz);
1395 
1396 	if (m0->m_nextpkt != NULL) {
1397 		busdma_map_sg_vec(txq->entry_tag, txsd->map, m0, segs, &nsegs);
1398 		ndesc = 1;
1399 		mlen = 0;
1400 	} else {
1401 		if ((err = busdma_map_sg_collapse(txq->entry_tag, txsd->map,
1402 		    &m0, segs, &nsegs))) {
1403 			if (cxgb_debug)
1404 				printf("failed ... err=%d\n", err);
1405 			return (err);
1406 		}
1407 		mlen = m0->m_pkthdr.len;
1408 		ndesc = calc_tx_descs(m0, nsegs);
1409 	}
1410 	txq_prod(txq, ndesc, &txqs);
1411 
1412 	KASSERT(m0->m_pkthdr.len, ("empty packet nsegs=%d", nsegs));
1413 	txsd->m = m0;
1414 
1415 	if (m0->m_nextpkt != NULL) {
1416 		struct cpl_tx_pkt_batch *cpl_batch = (struct cpl_tx_pkt_batch *)txd;
1417 		int i, fidx;
1418 
1419 		if (nsegs > 7)
1420 			panic("trying to coalesce %d packets in to one WR", nsegs);
1421 		txq->txq_coalesced += nsegs;
1422 		wrp = (struct work_request_hdr *)txd;
1423 		flits = nsegs*2 + 1;
1424 
1425 		for (fidx = 1, i = 0; i < nsegs; i++, fidx += 2) {
1426 			struct cpl_tx_pkt_batch_entry *cbe;
1427 			uint64_t flit;
1428 			uint32_t *hflit = (uint32_t *)&flit;
1429 			int cflags = m0->m_pkthdr.csum_flags;
1430 
1431 			cntrl = V_TXPKT_INTF(pi->txpkt_intf);
1432 			GET_VTAG(cntrl, m0);
1433 			cntrl |= V_TXPKT_OPCODE(CPL_TX_PKT);
1434 			if (__predict_false(!(cflags & CSUM_IP)))
1435 				cntrl |= F_TXPKT_IPCSUM_DIS;
1436 			if (__predict_false(!(cflags & (CSUM_TCP | CSUM_UDP |
1437 			    CSUM_UDP_IPV6 | CSUM_TCP_IPV6))))
1438 				cntrl |= F_TXPKT_L4CSUM_DIS;
1439 
1440 			hflit[0] = htonl(cntrl);
1441 			hflit[1] = htonl(segs[i].ds_len | 0x80000000);
1442 			flit |= htobe64(1 << 24);
1443 			cbe = &cpl_batch->pkt_entry[i];
1444 			cbe->cntrl = hflit[0];
1445 			cbe->len = hflit[1];
1446 			cbe->addr = htobe64(segs[i].ds_addr);
1447 		}
1448 
1449 		wr_hi = htonl(F_WR_SOP | F_WR_EOP | V_WR_DATATYPE(1) |
1450 		    V_WR_SGLSFLT(flits)) |
1451 		    htonl(V_WR_OP(FW_WROPCODE_TUNNEL_TX_PKT) | txqs.compl);
1452 		wr_lo = htonl(V_WR_LEN(flits) |
1453 		    V_WR_GEN(txqs.gen)) | htonl(V_WR_TID(txq->token));
1454 		set_wr_hdr(wrp, wr_hi, wr_lo);
1455 		wmb();
1456 		ETHER_BPF_MTAP(pi->ifp, m0);
1457 		wr_gen2(txd, txqs.gen);
1458 		check_ring_tx_db(sc, txq, 0);
1459 		return (0);
1460 	} else if (tso_info) {
1461 		uint16_t eth_type;
1462 		struct cpl_tx_pkt_lso *hdr = (struct cpl_tx_pkt_lso *)txd;
1463 		struct ether_header *eh;
1464 		void *l3hdr;
1465 		struct tcphdr *tcp;
1466 
1467 		txd->flit[2] = 0;
1468 		GET_VTAG(cntrl, m0);
1469 		cntrl |= V_TXPKT_OPCODE(CPL_TX_PKT_LSO);
1470 		hdr->cntrl = htonl(cntrl);
1471 		hdr->len = htonl(mlen | 0x80000000);
1472 
1473 		if (__predict_false(mlen < TCPPKTHDRSIZE)) {
1474 			printf("mbuf=%p,len=%d,tso_segsz=%d,csum_flags=%b,flags=%#x",
1475 			    m0, mlen, m0->m_pkthdr.tso_segsz,
1476 			    (int)m0->m_pkthdr.csum_flags, CSUM_BITS, m0->m_flags);
1477 			panic("tx tso packet too small");
1478 		}
1479 
1480 		/* Make sure that ether, ip, tcp headers are all in m0 */
1481 		if (__predict_false(m0->m_len < TCPPKTHDRSIZE)) {
1482 			m0 = m_pullup(m0, TCPPKTHDRSIZE);
1483 			if (__predict_false(m0 == NULL)) {
1484 				/* XXX panic probably an overreaction */
1485 				panic("couldn't fit header into mbuf");
1486 			}
1487 		}
1488 
1489 		eh = mtod(m0, struct ether_header *);
1490 		eth_type = eh->ether_type;
1491 		if (eth_type == htons(ETHERTYPE_VLAN)) {
1492 			struct ether_vlan_header *evh = (void *)eh;
1493 
1494 			tso_info |= V_LSO_ETH_TYPE(CPL_ETH_II_VLAN);
1495 			l3hdr = evh + 1;
1496 			eth_type = evh->evl_proto;
1497 		} else {
1498 			tso_info |= V_LSO_ETH_TYPE(CPL_ETH_II);
1499 			l3hdr = eh + 1;
1500 		}
1501 
1502 		if (eth_type == htons(ETHERTYPE_IP)) {
1503 			struct ip *ip = l3hdr;
1504 
1505 			tso_info |= V_LSO_IPHDR_WORDS(ip->ip_hl);
1506 			tcp = (struct tcphdr *)(ip + 1);
1507 		} else if (eth_type == htons(ETHERTYPE_IPV6)) {
1508 			struct ip6_hdr *ip6 = l3hdr;
1509 
1510 			KASSERT(ip6->ip6_nxt == IPPROTO_TCP,
1511 			    ("%s: CSUM_TSO with ip6_nxt %d",
1512 			    __func__, ip6->ip6_nxt));
1513 
1514 			tso_info |= F_LSO_IPV6;
1515 			tso_info |= V_LSO_IPHDR_WORDS(sizeof(*ip6) >> 2);
1516 			tcp = (struct tcphdr *)(ip6 + 1);
1517 		} else
1518 			panic("%s: CSUM_TSO but neither ip nor ip6", __func__);
1519 
1520 		tso_info |= V_LSO_TCPHDR_WORDS(tcp->th_off);
1521 		hdr->lso_info = htonl(tso_info);
1522 
1523 		if (__predict_false(mlen <= PIO_LEN)) {
1524 			/*
1525 			 * pkt not undersized but fits in PIO_LEN
1526 			 * Indicates a TSO bug at the higher levels.
1527 			 */
1528 			txsd->m = NULL;
1529 			m_copydata(m0, 0, mlen, (caddr_t)&txd->flit[3]);
1530 			flits = (mlen + 7) / 8 + 3;
1531 			wr_hi = htonl(V_WR_BCNTLFLT(mlen & 7) |
1532 					  V_WR_OP(FW_WROPCODE_TUNNEL_TX_PKT) |
1533 					  F_WR_SOP | F_WR_EOP | txqs.compl);
1534 			wr_lo = htonl(V_WR_LEN(flits) |
1535 			    V_WR_GEN(txqs.gen) | V_WR_TID(txq->token));
1536 			set_wr_hdr(&hdr->wr, wr_hi, wr_lo);
1537 			wmb();
1538 			ETHER_BPF_MTAP(pi->ifp, m0);
1539 			wr_gen2(txd, txqs.gen);
1540 			check_ring_tx_db(sc, txq, 0);
1541 			m_freem(m0);
1542 			return (0);
1543 		}
1544 		flits = 3;
1545 	} else {
1546 		struct cpl_tx_pkt *cpl = (struct cpl_tx_pkt *)txd;
1547 
1548 		GET_VTAG(cntrl, m0);
1549 		cntrl |= V_TXPKT_OPCODE(CPL_TX_PKT);
1550 		if (__predict_false(!(m0->m_pkthdr.csum_flags & CSUM_IP)))
1551 			cntrl |= F_TXPKT_IPCSUM_DIS;
1552 		if (__predict_false(!(m0->m_pkthdr.csum_flags & (CSUM_TCP |
1553 		    CSUM_UDP | CSUM_UDP_IPV6 | CSUM_TCP_IPV6))))
1554 			cntrl |= F_TXPKT_L4CSUM_DIS;
1555 		cpl->cntrl = htonl(cntrl);
1556 		cpl->len = htonl(mlen | 0x80000000);
1557 
1558 		if (mlen <= PIO_LEN) {
1559 			txsd->m = NULL;
1560 			m_copydata(m0, 0, mlen, (caddr_t)&txd->flit[2]);
1561 			flits = (mlen + 7) / 8 + 2;
1562 
1563 			wr_hi = htonl(V_WR_BCNTLFLT(mlen & 7) |
1564 			    V_WR_OP(FW_WROPCODE_TUNNEL_TX_PKT) |
1565 					  F_WR_SOP | F_WR_EOP | txqs.compl);
1566 			wr_lo = htonl(V_WR_LEN(flits) |
1567 			    V_WR_GEN(txqs.gen) | V_WR_TID(txq->token));
1568 			set_wr_hdr(&cpl->wr, wr_hi, wr_lo);
1569 			wmb();
1570 			ETHER_BPF_MTAP(pi->ifp, m0);
1571 			wr_gen2(txd, txqs.gen);
1572 			check_ring_tx_db(sc, txq, 0);
1573 			m_freem(m0);
1574 			return (0);
1575 		}
1576 		flits = 2;
1577 	}
1578 	wrp = (struct work_request_hdr *)txd;
1579 	sgp = (ndesc == 1) ? (struct sg_ent *)&txd->flit[flits] : sgl;
1580 	make_sgl(sgp, segs, nsegs);
1581 
1582 	sgl_flits = sgl_len(nsegs);
1583 
1584 	ETHER_BPF_MTAP(pi->ifp, m0);
1585 
1586 	KASSERT(ndesc <= 4, ("ndesc too large %d", ndesc));
1587 	wr_hi = htonl(V_WR_OP(FW_WROPCODE_TUNNEL_TX_PKT) | txqs.compl);
1588 	wr_lo = htonl(V_WR_TID(txq->token));
1589 	write_wr_hdr_sgl(ndesc, txd, &txqs, txq, sgl, flits,
1590 	    sgl_flits, wr_hi, wr_lo);
1591 	check_ring_tx_db(sc, txq, 0);
1592 
1593 	return (0);
1594 }
1595 
1596 void
1597 cxgb_tx_watchdog(void *arg)
1598 {
1599 	struct sge_qset *qs = arg;
1600 	struct sge_txq *txq = &qs->txq[TXQ_ETH];
1601 
1602         if (qs->coalescing != 0 &&
1603 	    (txq->in_use <= cxgb_tx_coalesce_enable_stop) &&
1604 	    TXQ_RING_EMPTY(qs))
1605                 qs->coalescing = 0;
1606         else if (qs->coalescing == 0 &&
1607 	    (txq->in_use >= cxgb_tx_coalesce_enable_start))
1608                 qs->coalescing = 1;
1609 	if (TXQ_TRYLOCK(qs)) {
1610 		qs->qs_flags |= QS_FLUSHING;
1611 		cxgb_start_locked(qs);
1612 		qs->qs_flags &= ~QS_FLUSHING;
1613 		TXQ_UNLOCK(qs);
1614 	}
1615 	if (qs->port->ifp->if_drv_flags & IFF_DRV_RUNNING)
1616 		callout_reset_on(&txq->txq_watchdog, hz/4, cxgb_tx_watchdog,
1617 		    qs, txq->txq_watchdog.c_cpu);
1618 }
1619 
1620 static void
1621 cxgb_tx_timeout(void *arg)
1622 {
1623 	struct sge_qset *qs = arg;
1624 	struct sge_txq *txq = &qs->txq[TXQ_ETH];
1625 
1626 	if (qs->coalescing == 0 && (txq->in_use >= (txq->size>>3)))
1627                 qs->coalescing = 1;
1628 	if (TXQ_TRYLOCK(qs)) {
1629 		qs->qs_flags |= QS_TIMEOUT;
1630 		cxgb_start_locked(qs);
1631 		qs->qs_flags &= ~QS_TIMEOUT;
1632 		TXQ_UNLOCK(qs);
1633 	}
1634 }
1635 
1636 static void
1637 cxgb_start_locked(struct sge_qset *qs)
1638 {
1639 	struct mbuf *m_head = NULL;
1640 	struct sge_txq *txq = &qs->txq[TXQ_ETH];
1641 	struct port_info *pi = qs->port;
1642 	struct ifnet *ifp = pi->ifp;
1643 
1644 	if (qs->qs_flags & (QS_FLUSHING|QS_TIMEOUT))
1645 		reclaim_completed_tx(qs, 0, TXQ_ETH);
1646 
1647 	if (!pi->link_config.link_ok) {
1648 		TXQ_RING_FLUSH(qs);
1649 		return;
1650 	}
1651 	TXQ_LOCK_ASSERT(qs);
1652 	while (!TXQ_RING_EMPTY(qs) && (ifp->if_drv_flags & IFF_DRV_RUNNING) &&
1653 	    pi->link_config.link_ok) {
1654 		reclaim_completed_tx(qs, cxgb_tx_reclaim_threshold, TXQ_ETH);
1655 
1656 		if (txq->size - txq->in_use <= TX_MAX_DESC)
1657 			break;
1658 
1659 		if ((m_head = cxgb_dequeue(qs)) == NULL)
1660 			break;
1661 		/*
1662 		 *  Encapsulation can modify our pointer, and or make it
1663 		 *  NULL on failure.  In that event, we can't requeue.
1664 		 */
1665 		if (t3_encap(qs, &m_head) || m_head == NULL)
1666 			break;
1667 
1668 		m_head = NULL;
1669 	}
1670 
1671 	if (txq->db_pending)
1672 		check_ring_tx_db(pi->adapter, txq, 1);
1673 
1674 	if (!TXQ_RING_EMPTY(qs) && callout_pending(&txq->txq_timer) == 0 &&
1675 	    pi->link_config.link_ok)
1676 		callout_reset_on(&txq->txq_timer, 1, cxgb_tx_timeout,
1677 		    qs, txq->txq_timer.c_cpu);
1678 	if (m_head != NULL)
1679 		m_freem(m_head);
1680 }
1681 
1682 static int
1683 cxgb_transmit_locked(struct ifnet *ifp, struct sge_qset *qs, struct mbuf *m)
1684 {
1685 	struct port_info *pi = qs->port;
1686 	struct sge_txq *txq = &qs->txq[TXQ_ETH];
1687 	struct buf_ring *br = txq->txq_mr;
1688 	int error, avail;
1689 
1690 	avail = txq->size - txq->in_use;
1691 	TXQ_LOCK_ASSERT(qs);
1692 
1693 	/*
1694 	 * We can only do a direct transmit if the following are true:
1695 	 * - we aren't coalescing (ring < 3/4 full)
1696 	 * - the link is up -- checked in caller
1697 	 * - there are no packets enqueued already
1698 	 * - there is space in hardware transmit queue
1699 	 */
1700 	if (check_pkt_coalesce(qs) == 0 &&
1701 	    !TXQ_RING_NEEDS_ENQUEUE(qs) && avail > TX_MAX_DESC) {
1702 		if (t3_encap(qs, &m)) {
1703 			if (m != NULL &&
1704 			    (error = drbr_enqueue(ifp, br, m)) != 0)
1705 				return (error);
1706 		} else {
1707 			if (txq->db_pending)
1708 				check_ring_tx_db(pi->adapter, txq, 1);
1709 
1710 			/*
1711 			 * We've bypassed the buf ring so we need to update
1712 			 * the stats directly
1713 			 */
1714 			txq->txq_direct_packets++;
1715 			txq->txq_direct_bytes += m->m_pkthdr.len;
1716 		}
1717 	} else if ((error = drbr_enqueue(ifp, br, m)) != 0)
1718 		return (error);
1719 
1720 	reclaim_completed_tx(qs, cxgb_tx_reclaim_threshold, TXQ_ETH);
1721 	if (!TXQ_RING_EMPTY(qs) && pi->link_config.link_ok &&
1722 	    (!check_pkt_coalesce(qs) || (drbr_inuse(ifp, br) >= 7)))
1723 		cxgb_start_locked(qs);
1724 	else if (!TXQ_RING_EMPTY(qs) && !callout_pending(&txq->txq_timer))
1725 		callout_reset_on(&txq->txq_timer, 1, cxgb_tx_timeout,
1726 		    qs, txq->txq_timer.c_cpu);
1727 	return (0);
1728 }
1729 
1730 int
1731 cxgb_transmit(struct ifnet *ifp, struct mbuf *m)
1732 {
1733 	struct sge_qset *qs;
1734 	struct port_info *pi = ifp->if_softc;
1735 	int error, qidx = pi->first_qset;
1736 
1737 	if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0
1738 	    ||(!pi->link_config.link_ok)) {
1739 		m_freem(m);
1740 		return (0);
1741 	}
1742 
1743 	if (m->m_flags & M_FLOWID)
1744 		qidx = (m->m_pkthdr.flowid % pi->nqsets) + pi->first_qset;
1745 
1746 	qs = &pi->adapter->sge.qs[qidx];
1747 
1748 	if (TXQ_TRYLOCK(qs)) {
1749 		/* XXX running */
1750 		error = cxgb_transmit_locked(ifp, qs, m);
1751 		TXQ_UNLOCK(qs);
1752 	} else
1753 		error = drbr_enqueue(ifp, qs->txq[TXQ_ETH].txq_mr, m);
1754 	return (error);
1755 }
1756 
1757 void
1758 cxgb_qflush(struct ifnet *ifp)
1759 {
1760 	/*
1761 	 * flush any enqueued mbufs in the buf_rings
1762 	 * and in the transmit queues
1763 	 * no-op for now
1764 	 */
1765 	return;
1766 }
1767 
1768 /**
1769  *	write_imm - write a packet into a Tx descriptor as immediate data
1770  *	@d: the Tx descriptor to write
1771  *	@m: the packet
1772  *	@len: the length of packet data to write as immediate data
1773  *	@gen: the generation bit value to write
1774  *
1775  *	Writes a packet as immediate data into a Tx descriptor.  The packet
1776  *	contains a work request at its beginning.  We must write the packet
1777  *	carefully so the SGE doesn't read accidentally before it's written in
1778  *	its entirety.
1779  */
1780 static __inline void
1781 write_imm(struct tx_desc *d, caddr_t src,
1782 	  unsigned int len, unsigned int gen)
1783 {
1784 	struct work_request_hdr *from = (struct work_request_hdr *)src;
1785 	struct work_request_hdr *to = (struct work_request_hdr *)d;
1786 	uint32_t wr_hi, wr_lo;
1787 
1788 	KASSERT(len <= WR_LEN && len >= sizeof(*from),
1789 	    ("%s: invalid len %d", __func__, len));
1790 
1791 	memcpy(&to[1], &from[1], len - sizeof(*from));
1792 	wr_hi = from->wrh_hi | htonl(F_WR_SOP | F_WR_EOP |
1793 	    V_WR_BCNTLFLT(len & 7));
1794 	wr_lo = from->wrh_lo | htonl(V_WR_GEN(gen) | V_WR_LEN((len + 7) / 8));
1795 	set_wr_hdr(to, wr_hi, wr_lo);
1796 	wmb();
1797 	wr_gen2(d, gen);
1798 }
1799 
1800 /**
1801  *	check_desc_avail - check descriptor availability on a send queue
1802  *	@adap: the adapter
1803  *	@q: the TX queue
1804  *	@m: the packet needing the descriptors
1805  *	@ndesc: the number of Tx descriptors needed
1806  *	@qid: the Tx queue number in its queue set (TXQ_OFLD or TXQ_CTRL)
1807  *
1808  *	Checks if the requested number of Tx descriptors is available on an
1809  *	SGE send queue.  If the queue is already suspended or not enough
1810  *	descriptors are available the packet is queued for later transmission.
1811  *	Must be called with the Tx queue locked.
1812  *
1813  *	Returns 0 if enough descriptors are available, 1 if there aren't
1814  *	enough descriptors and the packet has been queued, and 2 if the caller
1815  *	needs to retry because there weren't enough descriptors at the
1816  *	beginning of the call but some freed up in the mean time.
1817  */
1818 static __inline int
1819 check_desc_avail(adapter_t *adap, struct sge_txq *q,
1820 		 struct mbuf *m, unsigned int ndesc,
1821 		 unsigned int qid)
1822 {
1823 	/*
1824 	 * XXX We currently only use this for checking the control queue
1825 	 * the control queue is only used for binding qsets which happens
1826 	 * at init time so we are guaranteed enough descriptors
1827 	 */
1828 	if (__predict_false(!mbufq_empty(&q->sendq))) {
1829 addq_exit:	mbufq_tail(&q->sendq, m);
1830 		return 1;
1831 	}
1832 	if (__predict_false(q->size - q->in_use < ndesc)) {
1833 
1834 		struct sge_qset *qs = txq_to_qset(q, qid);
1835 
1836 		setbit(&qs->txq_stopped, qid);
1837 		if (should_restart_tx(q) &&
1838 		    test_and_clear_bit(qid, &qs->txq_stopped))
1839 			return 2;
1840 
1841 		q->stops++;
1842 		goto addq_exit;
1843 	}
1844 	return 0;
1845 }
1846 
1847 
1848 /**
1849  *	reclaim_completed_tx_imm - reclaim completed control-queue Tx descs
1850  *	@q: the SGE control Tx queue
1851  *
1852  *	This is a variant of reclaim_completed_tx() that is used for Tx queues
1853  *	that send only immediate data (presently just the control queues) and
1854  *	thus do not have any mbufs
1855  */
1856 static __inline void
1857 reclaim_completed_tx_imm(struct sge_txq *q)
1858 {
1859 	unsigned int reclaim = q->processed - q->cleaned;
1860 
1861 	q->in_use -= reclaim;
1862 	q->cleaned += reclaim;
1863 }
1864 
1865 /**
1866  *	ctrl_xmit - send a packet through an SGE control Tx queue
1867  *	@adap: the adapter
1868  *	@q: the control queue
1869  *	@m: the packet
1870  *
1871  *	Send a packet through an SGE control Tx queue.  Packets sent through
1872  *	a control queue must fit entirely as immediate data in a single Tx
1873  *	descriptor and have no page fragments.
1874  */
1875 static int
1876 ctrl_xmit(adapter_t *adap, struct sge_qset *qs, struct mbuf *m)
1877 {
1878 	int ret;
1879 	struct work_request_hdr *wrp = mtod(m, struct work_request_hdr *);
1880 	struct sge_txq *q = &qs->txq[TXQ_CTRL];
1881 
1882 	KASSERT(m->m_len <= WR_LEN, ("%s: bad tx data", __func__));
1883 
1884 	wrp->wrh_hi |= htonl(F_WR_SOP | F_WR_EOP);
1885 	wrp->wrh_lo = htonl(V_WR_TID(q->token));
1886 
1887 	TXQ_LOCK(qs);
1888 again:	reclaim_completed_tx_imm(q);
1889 
1890 	ret = check_desc_avail(adap, q, m, 1, TXQ_CTRL);
1891 	if (__predict_false(ret)) {
1892 		if (ret == 1) {
1893 			TXQ_UNLOCK(qs);
1894 			return (ENOSPC);
1895 		}
1896 		goto again;
1897 	}
1898 	write_imm(&q->desc[q->pidx], m->m_data, m->m_len, q->gen);
1899 
1900 	q->in_use++;
1901 	if (++q->pidx >= q->size) {
1902 		q->pidx = 0;
1903 		q->gen ^= 1;
1904 	}
1905 	TXQ_UNLOCK(qs);
1906 	wmb();
1907 	t3_write_reg(adap, A_SG_KDOORBELL,
1908 	    F_SELEGRCNTX | V_EGRCNTX(q->cntxt_id));
1909 
1910 	m_free(m);
1911 	return (0);
1912 }
1913 
1914 
1915 /**
1916  *	restart_ctrlq - restart a suspended control queue
1917  *	@qs: the queue set cotaining the control queue
1918  *
1919  *	Resumes transmission on a suspended Tx control queue.
1920  */
1921 static void
1922 restart_ctrlq(void *data, int npending)
1923 {
1924 	struct mbuf *m;
1925 	struct sge_qset *qs = (struct sge_qset *)data;
1926 	struct sge_txq *q = &qs->txq[TXQ_CTRL];
1927 	adapter_t *adap = qs->port->adapter;
1928 
1929 	TXQ_LOCK(qs);
1930 again:	reclaim_completed_tx_imm(q);
1931 
1932 	while (q->in_use < q->size &&
1933 	       (m = mbufq_dequeue(&q->sendq)) != NULL) {
1934 
1935 		write_imm(&q->desc[q->pidx], m->m_data, m->m_len, q->gen);
1936 		m_free(m);
1937 
1938 		if (++q->pidx >= q->size) {
1939 			q->pidx = 0;
1940 			q->gen ^= 1;
1941 		}
1942 		q->in_use++;
1943 	}
1944 	if (!mbufq_empty(&q->sendq)) {
1945 		setbit(&qs->txq_stopped, TXQ_CTRL);
1946 
1947 		if (should_restart_tx(q) &&
1948 		    test_and_clear_bit(TXQ_CTRL, &qs->txq_stopped))
1949 			goto again;
1950 		q->stops++;
1951 	}
1952 	TXQ_UNLOCK(qs);
1953 	t3_write_reg(adap, A_SG_KDOORBELL,
1954 		     F_SELEGRCNTX | V_EGRCNTX(q->cntxt_id));
1955 }
1956 
1957 
1958 /*
1959  * Send a management message through control queue 0
1960  */
1961 int
1962 t3_mgmt_tx(struct adapter *adap, struct mbuf *m)
1963 {
1964 	return ctrl_xmit(adap, &adap->sge.qs[0], m);
1965 }
1966 
1967 /**
1968  *	free_qset - free the resources of an SGE queue set
1969  *	@sc: the controller owning the queue set
1970  *	@q: the queue set
1971  *
1972  *	Release the HW and SW resources associated with an SGE queue set, such
1973  *	as HW contexts, packet buffers, and descriptor rings.  Traffic to the
1974  *	queue set must be quiesced prior to calling this.
1975  */
1976 static void
1977 t3_free_qset(adapter_t *sc, struct sge_qset *q)
1978 {
1979 	int i;
1980 
1981 	reclaim_completed_tx(q, 0, TXQ_ETH);
1982 	if (q->txq[TXQ_ETH].txq_mr != NULL)
1983 		buf_ring_free(q->txq[TXQ_ETH].txq_mr, M_DEVBUF);
1984 	if (q->txq[TXQ_ETH].txq_ifq != NULL) {
1985 		ifq_delete(q->txq[TXQ_ETH].txq_ifq);
1986 		free(q->txq[TXQ_ETH].txq_ifq, M_DEVBUF);
1987 	}
1988 
1989 	for (i = 0; i < SGE_RXQ_PER_SET; ++i) {
1990 		if (q->fl[i].desc) {
1991 			mtx_lock_spin(&sc->sge.reg_lock);
1992 			t3_sge_disable_fl(sc, q->fl[i].cntxt_id);
1993 			mtx_unlock_spin(&sc->sge.reg_lock);
1994 			bus_dmamap_unload(q->fl[i].desc_tag, q->fl[i].desc_map);
1995 			bus_dmamem_free(q->fl[i].desc_tag, q->fl[i].desc,
1996 					q->fl[i].desc_map);
1997 			bus_dma_tag_destroy(q->fl[i].desc_tag);
1998 			bus_dma_tag_destroy(q->fl[i].entry_tag);
1999 		}
2000 		if (q->fl[i].sdesc) {
2001 			free_rx_bufs(sc, &q->fl[i]);
2002 			free(q->fl[i].sdesc, M_DEVBUF);
2003 		}
2004 	}
2005 
2006 	mtx_unlock(&q->lock);
2007 	MTX_DESTROY(&q->lock);
2008 	for (i = 0; i < SGE_TXQ_PER_SET; i++) {
2009 		if (q->txq[i].desc) {
2010 			mtx_lock_spin(&sc->sge.reg_lock);
2011 			t3_sge_enable_ecntxt(sc, q->txq[i].cntxt_id, 0);
2012 			mtx_unlock_spin(&sc->sge.reg_lock);
2013 			bus_dmamap_unload(q->txq[i].desc_tag,
2014 					q->txq[i].desc_map);
2015 			bus_dmamem_free(q->txq[i].desc_tag, q->txq[i].desc,
2016 					q->txq[i].desc_map);
2017 			bus_dma_tag_destroy(q->txq[i].desc_tag);
2018 			bus_dma_tag_destroy(q->txq[i].entry_tag);
2019 		}
2020 		if (q->txq[i].sdesc) {
2021 			free(q->txq[i].sdesc, M_DEVBUF);
2022 		}
2023 	}
2024 
2025 	if (q->rspq.desc) {
2026 		mtx_lock_spin(&sc->sge.reg_lock);
2027 		t3_sge_disable_rspcntxt(sc, q->rspq.cntxt_id);
2028 		mtx_unlock_spin(&sc->sge.reg_lock);
2029 
2030 		bus_dmamap_unload(q->rspq.desc_tag, q->rspq.desc_map);
2031 		bus_dmamem_free(q->rspq.desc_tag, q->rspq.desc,
2032 			        q->rspq.desc_map);
2033 		bus_dma_tag_destroy(q->rspq.desc_tag);
2034 		MTX_DESTROY(&q->rspq.lock);
2035 	}
2036 
2037 #if defined(INET6) || defined(INET)
2038 	tcp_lro_free(&q->lro.ctrl);
2039 #endif
2040 
2041 	bzero(q, sizeof(*q));
2042 }
2043 
2044 /**
2045  *	t3_free_sge_resources - free SGE resources
2046  *	@sc: the adapter softc
2047  *
2048  *	Frees resources used by the SGE queue sets.
2049  */
2050 void
2051 t3_free_sge_resources(adapter_t *sc, int nqsets)
2052 {
2053 	int i;
2054 
2055 	for (i = 0; i < nqsets; ++i) {
2056 		TXQ_LOCK(&sc->sge.qs[i]);
2057 		t3_free_qset(sc, &sc->sge.qs[i]);
2058 	}
2059 }
2060 
2061 /**
2062  *	t3_sge_start - enable SGE
2063  *	@sc: the controller softc
2064  *
2065  *	Enables the SGE for DMAs.  This is the last step in starting packet
2066  *	transfers.
2067  */
2068 void
2069 t3_sge_start(adapter_t *sc)
2070 {
2071 	t3_set_reg_field(sc, A_SG_CONTROL, F_GLOBALENABLE, F_GLOBALENABLE);
2072 }
2073 
2074 /**
2075  *	t3_sge_stop - disable SGE operation
2076  *	@sc: the adapter
2077  *
2078  *	Disables the DMA engine.  This can be called in emeregencies (e.g.,
2079  *	from error interrupts) or from normal process context.  In the latter
2080  *	case it also disables any pending queue restart tasklets.  Note that
2081  *	if it is called in interrupt context it cannot disable the restart
2082  *	tasklets as it cannot wait, however the tasklets will have no effect
2083  *	since the doorbells are disabled and the driver will call this again
2084  *	later from process context, at which time the tasklets will be stopped
2085  *	if they are still running.
2086  */
2087 void
2088 t3_sge_stop(adapter_t *sc)
2089 {
2090 	int i, nqsets;
2091 
2092 	t3_set_reg_field(sc, A_SG_CONTROL, F_GLOBALENABLE, 0);
2093 
2094 	if (sc->tq == NULL)
2095 		return;
2096 
2097 	for (nqsets = i = 0; i < (sc)->params.nports; i++)
2098 		nqsets += sc->port[i].nqsets;
2099 #ifdef notyet
2100 	/*
2101 	 *
2102 	 * XXX
2103 	 */
2104 	for (i = 0; i < nqsets; ++i) {
2105 		struct sge_qset *qs = &sc->sge.qs[i];
2106 
2107 		taskqueue_drain(sc->tq, &qs->txq[TXQ_OFLD].qresume_task);
2108 		taskqueue_drain(sc->tq, &qs->txq[TXQ_CTRL].qresume_task);
2109 	}
2110 #endif
2111 }
2112 
2113 /**
2114  *	t3_free_tx_desc - reclaims Tx descriptors and their buffers
2115  *	@adapter: the adapter
2116  *	@q: the Tx queue to reclaim descriptors from
2117  *	@reclaimable: the number of descriptors to reclaim
2118  *      @m_vec_size: maximum number of buffers to reclaim
2119  *      @desc_reclaimed: returns the number of descriptors reclaimed
2120  *
2121  *	Reclaims Tx descriptors from an SGE Tx queue and frees the associated
2122  *	Tx buffers.  Called with the Tx queue lock held.
2123  *
2124  *      Returns number of buffers of reclaimed
2125  */
2126 void
2127 t3_free_tx_desc(struct sge_qset *qs, int reclaimable, int queue)
2128 {
2129 	struct tx_sw_desc *txsd;
2130 	unsigned int cidx, mask;
2131 	struct sge_txq *q = &qs->txq[queue];
2132 
2133 #ifdef T3_TRACE
2134 	T3_TRACE2(sc->tb[q->cntxt_id & 7],
2135 		  "reclaiming %u Tx descriptors at cidx %u", reclaimable, cidx);
2136 #endif
2137 	cidx = q->cidx;
2138 	mask = q->size - 1;
2139 	txsd = &q->sdesc[cidx];
2140 
2141 	mtx_assert(&qs->lock, MA_OWNED);
2142 	while (reclaimable--) {
2143 		prefetch(q->sdesc[(cidx + 1) & mask].m);
2144 		prefetch(q->sdesc[(cidx + 2) & mask].m);
2145 
2146 		if (txsd->m != NULL) {
2147 			if (txsd->flags & TX_SW_DESC_MAPPED) {
2148 				bus_dmamap_unload(q->entry_tag, txsd->map);
2149 				txsd->flags &= ~TX_SW_DESC_MAPPED;
2150 			}
2151 			m_freem_list(txsd->m);
2152 			txsd->m = NULL;
2153 		} else
2154 			q->txq_skipped++;
2155 
2156 		++txsd;
2157 		if (++cidx == q->size) {
2158 			cidx = 0;
2159 			txsd = q->sdesc;
2160 		}
2161 	}
2162 	q->cidx = cidx;
2163 
2164 }
2165 
2166 /**
2167  *	is_new_response - check if a response is newly written
2168  *	@r: the response descriptor
2169  *	@q: the response queue
2170  *
2171  *	Returns true if a response descriptor contains a yet unprocessed
2172  *	response.
2173  */
2174 static __inline int
2175 is_new_response(const struct rsp_desc *r,
2176     const struct sge_rspq *q)
2177 {
2178 	return (r->intr_gen & F_RSPD_GEN2) == q->gen;
2179 }
2180 
2181 #define RSPD_GTS_MASK  (F_RSPD_TXQ0_GTS | F_RSPD_TXQ1_GTS)
2182 #define RSPD_CTRL_MASK (RSPD_GTS_MASK | \
2183 			V_RSPD_TXQ0_CR(M_RSPD_TXQ0_CR) | \
2184 			V_RSPD_TXQ1_CR(M_RSPD_TXQ1_CR) | \
2185 			V_RSPD_TXQ2_CR(M_RSPD_TXQ2_CR))
2186 
2187 /* How long to delay the next interrupt in case of memory shortage, in 0.1us. */
2188 #define NOMEM_INTR_DELAY 2500
2189 
2190 #ifdef TCP_OFFLOAD
2191 /**
2192  *	write_ofld_wr - write an offload work request
2193  *	@adap: the adapter
2194  *	@m: the packet to send
2195  *	@q: the Tx queue
2196  *	@pidx: index of the first Tx descriptor to write
2197  *	@gen: the generation value to use
2198  *	@ndesc: number of descriptors the packet will occupy
2199  *
2200  *	Write an offload work request to send the supplied packet.  The packet
2201  *	data already carry the work request with most fields populated.
2202  */
2203 static void
2204 write_ofld_wr(adapter_t *adap, struct mbuf *m, struct sge_txq *q,
2205     unsigned int pidx, unsigned int gen, unsigned int ndesc)
2206 {
2207 	unsigned int sgl_flits, flits;
2208 	int i, idx, nsegs, wrlen;
2209 	struct work_request_hdr *from;
2210 	struct sg_ent *sgp, t3sgl[TX_MAX_SEGS / 2 + 1];
2211 	struct tx_desc *d = &q->desc[pidx];
2212 	struct txq_state txqs;
2213 	struct sglist_seg *segs;
2214 	struct ofld_hdr *oh = mtod(m, struct ofld_hdr *);
2215 	struct sglist *sgl;
2216 
2217 	from = (void *)(oh + 1);	/* Start of WR within mbuf */
2218 	wrlen = m->m_len - sizeof(*oh);
2219 
2220 	if (!(oh->flags & F_HDR_SGL)) {
2221 		write_imm(d, (caddr_t)from, wrlen, gen);
2222 
2223 		/*
2224 		 * mbuf with "real" immediate tx data will be enqueue_wr'd by
2225 		 * t3_push_frames and freed in wr_ack.  Others, like those sent
2226 		 * down by close_conn, t3_send_reset, etc. should be freed here.
2227 		 */
2228 		if (!(oh->flags & F_HDR_DF))
2229 			m_free(m);
2230 		return;
2231 	}
2232 
2233 	memcpy(&d->flit[1], &from[1], wrlen - sizeof(*from));
2234 
2235 	sgl = oh->sgl;
2236 	flits = wrlen / 8;
2237 	sgp = (ndesc == 1) ? (struct sg_ent *)&d->flit[flits] : t3sgl;
2238 
2239 	nsegs = sgl->sg_nseg;
2240 	segs = sgl->sg_segs;
2241 	for (idx = 0, i = 0; i < nsegs; i++) {
2242 		KASSERT(segs[i].ss_len, ("%s: 0 len in sgl", __func__));
2243 		if (i && idx == 0)
2244 			++sgp;
2245 		sgp->len[idx] = htobe32(segs[i].ss_len);
2246 		sgp->addr[idx] = htobe64(segs[i].ss_paddr);
2247 		idx ^= 1;
2248 	}
2249 	if (idx) {
2250 		sgp->len[idx] = 0;
2251 		sgp->addr[idx] = 0;
2252 	}
2253 
2254 	sgl_flits = sgl_len(nsegs);
2255 	txqs.gen = gen;
2256 	txqs.pidx = pidx;
2257 	txqs.compl = 0;
2258 
2259 	write_wr_hdr_sgl(ndesc, d, &txqs, q, t3sgl, flits, sgl_flits,
2260 	    from->wrh_hi, from->wrh_lo);
2261 }
2262 
2263 /**
2264  *	ofld_xmit - send a packet through an offload queue
2265  *	@adap: the adapter
2266  *	@q: the Tx offload queue
2267  *	@m: the packet
2268  *
2269  *	Send an offload packet through an SGE offload queue.
2270  */
2271 static int
2272 ofld_xmit(adapter_t *adap, struct sge_qset *qs, struct mbuf *m)
2273 {
2274 	int ret;
2275 	unsigned int ndesc;
2276 	unsigned int pidx, gen;
2277 	struct sge_txq *q = &qs->txq[TXQ_OFLD];
2278 	struct ofld_hdr *oh = mtod(m, struct ofld_hdr *);
2279 
2280 	ndesc = G_HDR_NDESC(oh->flags);
2281 
2282 	TXQ_LOCK(qs);
2283 again:	reclaim_completed_tx(qs, 16, TXQ_OFLD);
2284 	ret = check_desc_avail(adap, q, m, ndesc, TXQ_OFLD);
2285 	if (__predict_false(ret)) {
2286 		if (ret == 1) {
2287 			TXQ_UNLOCK(qs);
2288 			return (EINTR);
2289 		}
2290 		goto again;
2291 	}
2292 
2293 	gen = q->gen;
2294 	q->in_use += ndesc;
2295 	pidx = q->pidx;
2296 	q->pidx += ndesc;
2297 	if (q->pidx >= q->size) {
2298 		q->pidx -= q->size;
2299 		q->gen ^= 1;
2300 	}
2301 
2302 	write_ofld_wr(adap, m, q, pidx, gen, ndesc);
2303 	check_ring_tx_db(adap, q, 1);
2304 	TXQ_UNLOCK(qs);
2305 
2306 	return (0);
2307 }
2308 
2309 /**
2310  *	restart_offloadq - restart a suspended offload queue
2311  *	@qs: the queue set cotaining the offload queue
2312  *
2313  *	Resumes transmission on a suspended Tx offload queue.
2314  */
2315 static void
2316 restart_offloadq(void *data, int npending)
2317 {
2318 	struct mbuf *m;
2319 	struct sge_qset *qs = data;
2320 	struct sge_txq *q = &qs->txq[TXQ_OFLD];
2321 	adapter_t *adap = qs->port->adapter;
2322 	int cleaned;
2323 
2324 	TXQ_LOCK(qs);
2325 again:	cleaned = reclaim_completed_tx(qs, 16, TXQ_OFLD);
2326 
2327 	while ((m = mbufq_peek(&q->sendq)) != NULL) {
2328 		unsigned int gen, pidx;
2329 		struct ofld_hdr *oh = mtod(m, struct ofld_hdr *);
2330 		unsigned int ndesc = G_HDR_NDESC(oh->flags);
2331 
2332 		if (__predict_false(q->size - q->in_use < ndesc)) {
2333 			setbit(&qs->txq_stopped, TXQ_OFLD);
2334 			if (should_restart_tx(q) &&
2335 			    test_and_clear_bit(TXQ_OFLD, &qs->txq_stopped))
2336 				goto again;
2337 			q->stops++;
2338 			break;
2339 		}
2340 
2341 		gen = q->gen;
2342 		q->in_use += ndesc;
2343 		pidx = q->pidx;
2344 		q->pidx += ndesc;
2345 		if (q->pidx >= q->size) {
2346 			q->pidx -= q->size;
2347 			q->gen ^= 1;
2348 		}
2349 
2350 		(void)mbufq_dequeue(&q->sendq);
2351 		TXQ_UNLOCK(qs);
2352 		write_ofld_wr(adap, m, q, pidx, gen, ndesc);
2353 		TXQ_LOCK(qs);
2354 	}
2355 #if USE_GTS
2356 	set_bit(TXQ_RUNNING, &q->flags);
2357 	set_bit(TXQ_LAST_PKT_DB, &q->flags);
2358 #endif
2359 	TXQ_UNLOCK(qs);
2360 	wmb();
2361 	t3_write_reg(adap, A_SG_KDOORBELL,
2362 		     F_SELEGRCNTX | V_EGRCNTX(q->cntxt_id));
2363 }
2364 
2365 /**
2366  *	t3_offload_tx - send an offload packet
2367  *	@m: the packet
2368  *
2369  *	Sends an offload packet.  We use the packet priority to select the
2370  *	appropriate Tx queue as follows: bit 0 indicates whether the packet
2371  *	should be sent as regular or control, bits 1-3 select the queue set.
2372  */
2373 int
2374 t3_offload_tx(struct adapter *sc, struct mbuf *m)
2375 {
2376 	struct ofld_hdr *oh = mtod(m, struct ofld_hdr *);
2377 	struct sge_qset *qs = &sc->sge.qs[G_HDR_QSET(oh->flags)];
2378 
2379 	if (oh->flags & F_HDR_CTRL) {
2380 		m_adj(m, sizeof (*oh));	/* trim ofld_hdr off */
2381 		return (ctrl_xmit(sc, qs, m));
2382 	} else
2383 		return (ofld_xmit(sc, qs, m));
2384 }
2385 #endif
2386 
2387 static void
2388 restart_tx(struct sge_qset *qs)
2389 {
2390 	struct adapter *sc = qs->port->adapter;
2391 
2392 	if (isset(&qs->txq_stopped, TXQ_OFLD) &&
2393 	    should_restart_tx(&qs->txq[TXQ_OFLD]) &&
2394 	    test_and_clear_bit(TXQ_OFLD, &qs->txq_stopped)) {
2395 		qs->txq[TXQ_OFLD].restarts++;
2396 		taskqueue_enqueue(sc->tq, &qs->txq[TXQ_OFLD].qresume_task);
2397 	}
2398 
2399 	if (isset(&qs->txq_stopped, TXQ_CTRL) &&
2400 	    should_restart_tx(&qs->txq[TXQ_CTRL]) &&
2401 	    test_and_clear_bit(TXQ_CTRL, &qs->txq_stopped)) {
2402 		qs->txq[TXQ_CTRL].restarts++;
2403 		taskqueue_enqueue(sc->tq, &qs->txq[TXQ_CTRL].qresume_task);
2404 	}
2405 }
2406 
2407 /**
2408  *	t3_sge_alloc_qset - initialize an SGE queue set
2409  *	@sc: the controller softc
2410  *	@id: the queue set id
2411  *	@nports: how many Ethernet ports will be using this queue set
2412  *	@irq_vec_idx: the IRQ vector index for response queue interrupts
2413  *	@p: configuration parameters for this queue set
2414  *	@ntxq: number of Tx queues for the queue set
2415  *	@pi: port info for queue set
2416  *
2417  *	Allocate resources and initialize an SGE queue set.  A queue set
2418  *	comprises a response queue, two Rx free-buffer queues, and up to 3
2419  *	Tx queues.  The Tx queues are assigned roles in the order Ethernet
2420  *	queue, offload queue, and control queue.
2421  */
2422 int
2423 t3_sge_alloc_qset(adapter_t *sc, u_int id, int nports, int irq_vec_idx,
2424 		  const struct qset_params *p, int ntxq, struct port_info *pi)
2425 {
2426 	struct sge_qset *q = &sc->sge.qs[id];
2427 	int i, ret = 0;
2428 
2429 	MTX_INIT(&q->lock, q->namebuf, NULL, MTX_DEF);
2430 	q->port = pi;
2431 	q->adap = sc;
2432 
2433 	if ((q->txq[TXQ_ETH].txq_mr = buf_ring_alloc(cxgb_txq_buf_ring_size,
2434 	    M_DEVBUF, M_WAITOK, &q->lock)) == NULL) {
2435 		device_printf(sc->dev, "failed to allocate mbuf ring\n");
2436 		goto err;
2437 	}
2438 	if ((q->txq[TXQ_ETH].txq_ifq = malloc(sizeof(struct ifaltq), M_DEVBUF,
2439 	    M_NOWAIT | M_ZERO)) == NULL) {
2440 		device_printf(sc->dev, "failed to allocate ifq\n");
2441 		goto err;
2442 	}
2443 	ifq_init(q->txq[TXQ_ETH].txq_ifq, pi->ifp);
2444 	callout_init(&q->txq[TXQ_ETH].txq_timer, 1);
2445 	callout_init(&q->txq[TXQ_ETH].txq_watchdog, 1);
2446 	q->txq[TXQ_ETH].txq_timer.c_cpu = id % mp_ncpus;
2447 	q->txq[TXQ_ETH].txq_watchdog.c_cpu = id % mp_ncpus;
2448 
2449 	init_qset_cntxt(q, id);
2450 	q->idx = id;
2451 	if ((ret = alloc_ring(sc, p->fl_size, sizeof(struct rx_desc),
2452 		    sizeof(struct rx_sw_desc), &q->fl[0].phys_addr,
2453 		    &q->fl[0].desc, &q->fl[0].sdesc,
2454 		    &q->fl[0].desc_tag, &q->fl[0].desc_map,
2455 		    sc->rx_dmat, &q->fl[0].entry_tag)) != 0) {
2456 		printf("error %d from alloc ring fl0\n", ret);
2457 		goto err;
2458 	}
2459 
2460 	if ((ret = alloc_ring(sc, p->jumbo_size, sizeof(struct rx_desc),
2461 		    sizeof(struct rx_sw_desc), &q->fl[1].phys_addr,
2462 		    &q->fl[1].desc, &q->fl[1].sdesc,
2463 		    &q->fl[1].desc_tag, &q->fl[1].desc_map,
2464 		    sc->rx_jumbo_dmat, &q->fl[1].entry_tag)) != 0) {
2465 		printf("error %d from alloc ring fl1\n", ret);
2466 		goto err;
2467 	}
2468 
2469 	if ((ret = alloc_ring(sc, p->rspq_size, sizeof(struct rsp_desc), 0,
2470 		    &q->rspq.phys_addr, &q->rspq.desc, NULL,
2471 		    &q->rspq.desc_tag, &q->rspq.desc_map,
2472 		    NULL, NULL)) != 0) {
2473 		printf("error %d from alloc ring rspq\n", ret);
2474 		goto err;
2475 	}
2476 
2477 	snprintf(q->rspq.lockbuf, RSPQ_NAME_LEN, "t3 rspq lock %d:%d",
2478 	    device_get_unit(sc->dev), irq_vec_idx);
2479 	MTX_INIT(&q->rspq.lock, q->rspq.lockbuf, NULL, MTX_DEF);
2480 
2481 	for (i = 0; i < ntxq; ++i) {
2482 		size_t sz = i == TXQ_CTRL ? 0 : sizeof(struct tx_sw_desc);
2483 
2484 		if ((ret = alloc_ring(sc, p->txq_size[i],
2485 			    sizeof(struct tx_desc), sz,
2486 			    &q->txq[i].phys_addr, &q->txq[i].desc,
2487 			    &q->txq[i].sdesc, &q->txq[i].desc_tag,
2488 			    &q->txq[i].desc_map,
2489 			    sc->tx_dmat, &q->txq[i].entry_tag)) != 0) {
2490 			printf("error %d from alloc ring tx %i\n", ret, i);
2491 			goto err;
2492 		}
2493 		mbufq_init(&q->txq[i].sendq);
2494 		q->txq[i].gen = 1;
2495 		q->txq[i].size = p->txq_size[i];
2496 	}
2497 
2498 #ifdef TCP_OFFLOAD
2499 	TASK_INIT(&q->txq[TXQ_OFLD].qresume_task, 0, restart_offloadq, q);
2500 #endif
2501 	TASK_INIT(&q->txq[TXQ_CTRL].qresume_task, 0, restart_ctrlq, q);
2502 	TASK_INIT(&q->txq[TXQ_ETH].qreclaim_task, 0, sge_txq_reclaim_handler, q);
2503 	TASK_INIT(&q->txq[TXQ_OFLD].qreclaim_task, 0, sge_txq_reclaim_handler, q);
2504 
2505 	q->fl[0].gen = q->fl[1].gen = 1;
2506 	q->fl[0].size = p->fl_size;
2507 	q->fl[1].size = p->jumbo_size;
2508 
2509 	q->rspq.gen = 1;
2510 	q->rspq.cidx = 0;
2511 	q->rspq.size = p->rspq_size;
2512 
2513 	q->txq[TXQ_ETH].stop_thres = nports *
2514 	    flits_to_desc(sgl_len(TX_MAX_SEGS + 1) + 3);
2515 
2516 	q->fl[0].buf_size = MCLBYTES;
2517 	q->fl[0].zone = zone_pack;
2518 	q->fl[0].type = EXT_PACKET;
2519 
2520 	if (p->jumbo_buf_size ==  MJUM16BYTES) {
2521 		q->fl[1].zone = zone_jumbo16;
2522 		q->fl[1].type = EXT_JUMBO16;
2523 	} else if (p->jumbo_buf_size ==  MJUM9BYTES) {
2524 		q->fl[1].zone = zone_jumbo9;
2525 		q->fl[1].type = EXT_JUMBO9;
2526 	} else if (p->jumbo_buf_size ==  MJUMPAGESIZE) {
2527 		q->fl[1].zone = zone_jumbop;
2528 		q->fl[1].type = EXT_JUMBOP;
2529 	} else {
2530 		KASSERT(0, ("can't deal with jumbo_buf_size %d.", p->jumbo_buf_size));
2531 		ret = EDOOFUS;
2532 		goto err;
2533 	}
2534 	q->fl[1].buf_size = p->jumbo_buf_size;
2535 
2536 	/* Allocate and setup the lro_ctrl structure */
2537 	q->lro.enabled = !!(pi->ifp->if_capenable & IFCAP_LRO);
2538 #if defined(INET6) || defined(INET)
2539 	ret = tcp_lro_init(&q->lro.ctrl);
2540 	if (ret) {
2541 		printf("error %d from tcp_lro_init\n", ret);
2542 		goto err;
2543 	}
2544 #endif
2545 	q->lro.ctrl.ifp = pi->ifp;
2546 
2547 	mtx_lock_spin(&sc->sge.reg_lock);
2548 	ret = -t3_sge_init_rspcntxt(sc, q->rspq.cntxt_id, irq_vec_idx,
2549 				   q->rspq.phys_addr, q->rspq.size,
2550 				   q->fl[0].buf_size, 1, 0);
2551 	if (ret) {
2552 		printf("error %d from t3_sge_init_rspcntxt\n", ret);
2553 		goto err_unlock;
2554 	}
2555 
2556 	for (i = 0; i < SGE_RXQ_PER_SET; ++i) {
2557 		ret = -t3_sge_init_flcntxt(sc, q->fl[i].cntxt_id, 0,
2558 					  q->fl[i].phys_addr, q->fl[i].size,
2559 					  q->fl[i].buf_size, p->cong_thres, 1,
2560 					  0);
2561 		if (ret) {
2562 			printf("error %d from t3_sge_init_flcntxt for index i=%d\n", ret, i);
2563 			goto err_unlock;
2564 		}
2565 	}
2566 
2567 	ret = -t3_sge_init_ecntxt(sc, q->txq[TXQ_ETH].cntxt_id, USE_GTS,
2568 				 SGE_CNTXT_ETH, id, q->txq[TXQ_ETH].phys_addr,
2569 				 q->txq[TXQ_ETH].size, q->txq[TXQ_ETH].token,
2570 				 1, 0);
2571 	if (ret) {
2572 		printf("error %d from t3_sge_init_ecntxt\n", ret);
2573 		goto err_unlock;
2574 	}
2575 
2576 	if (ntxq > 1) {
2577 		ret = -t3_sge_init_ecntxt(sc, q->txq[TXQ_OFLD].cntxt_id,
2578 					 USE_GTS, SGE_CNTXT_OFLD, id,
2579 					 q->txq[TXQ_OFLD].phys_addr,
2580 					 q->txq[TXQ_OFLD].size, 0, 1, 0);
2581 		if (ret) {
2582 			printf("error %d from t3_sge_init_ecntxt\n", ret);
2583 			goto err_unlock;
2584 		}
2585 	}
2586 
2587 	if (ntxq > 2) {
2588 		ret = -t3_sge_init_ecntxt(sc, q->txq[TXQ_CTRL].cntxt_id, 0,
2589 					 SGE_CNTXT_CTRL, id,
2590 					 q->txq[TXQ_CTRL].phys_addr,
2591 					 q->txq[TXQ_CTRL].size,
2592 					 q->txq[TXQ_CTRL].token, 1, 0);
2593 		if (ret) {
2594 			printf("error %d from t3_sge_init_ecntxt\n", ret);
2595 			goto err_unlock;
2596 		}
2597 	}
2598 
2599 	mtx_unlock_spin(&sc->sge.reg_lock);
2600 	t3_update_qset_coalesce(q, p);
2601 
2602 	refill_fl(sc, &q->fl[0], q->fl[0].size);
2603 	refill_fl(sc, &q->fl[1], q->fl[1].size);
2604 	refill_rspq(sc, &q->rspq, q->rspq.size - 1);
2605 
2606 	t3_write_reg(sc, A_SG_GTS, V_RSPQ(q->rspq.cntxt_id) |
2607 		     V_NEWTIMER(q->rspq.holdoff_tmr));
2608 
2609 	return (0);
2610 
2611 err_unlock:
2612 	mtx_unlock_spin(&sc->sge.reg_lock);
2613 err:
2614 	TXQ_LOCK(q);
2615 	t3_free_qset(sc, q);
2616 
2617 	return (ret);
2618 }
2619 
2620 /*
2621  * Remove CPL_RX_PKT headers from the mbuf and reduce it to a regular mbuf with
2622  * ethernet data.  Hardware assistance with various checksums and any vlan tag
2623  * will also be taken into account here.
2624  */
2625 void
2626 t3_rx_eth(struct adapter *adap, struct mbuf *m, int ethpad)
2627 {
2628 	struct cpl_rx_pkt *cpl = (struct cpl_rx_pkt *)(mtod(m, uint8_t *) + ethpad);
2629 	struct port_info *pi = &adap->port[adap->rxpkt_map[cpl->iff]];
2630 	struct ifnet *ifp = pi->ifp;
2631 
2632 	if (cpl->vlan_valid) {
2633 		m->m_pkthdr.ether_vtag = ntohs(cpl->vlan);
2634 		m->m_flags |= M_VLANTAG;
2635 	}
2636 
2637 	m->m_pkthdr.rcvif = ifp;
2638 	/*
2639 	 * adjust after conversion to mbuf chain
2640 	 */
2641 	m->m_pkthdr.len -= (sizeof(*cpl) + ethpad);
2642 	m->m_len -= (sizeof(*cpl) + ethpad);
2643 	m->m_data += (sizeof(*cpl) + ethpad);
2644 
2645 	if (!cpl->fragment && cpl->csum_valid && cpl->csum == 0xffff) {
2646 		struct ether_header *eh = mtod(m, void *);
2647 		uint16_t eh_type;
2648 
2649 		if (eh->ether_type == htons(ETHERTYPE_VLAN)) {
2650 			struct ether_vlan_header *evh = mtod(m, void *);
2651 
2652 			eh_type = evh->evl_proto;
2653 		} else
2654 			eh_type = eh->ether_type;
2655 
2656 		if (ifp->if_capenable & IFCAP_RXCSUM &&
2657 		    eh_type == htons(ETHERTYPE_IP)) {
2658 			m->m_pkthdr.csum_flags = (CSUM_IP_CHECKED |
2659 			    CSUM_IP_VALID | CSUM_DATA_VALID | CSUM_PSEUDO_HDR);
2660 			m->m_pkthdr.csum_data = 0xffff;
2661 		} else if (ifp->if_capenable & IFCAP_RXCSUM_IPV6 &&
2662 		    eh_type == htons(ETHERTYPE_IPV6)) {
2663 			m->m_pkthdr.csum_flags = (CSUM_DATA_VALID_IPV6 |
2664 			    CSUM_PSEUDO_HDR);
2665 			m->m_pkthdr.csum_data = 0xffff;
2666 		}
2667 	}
2668 }
2669 
2670 /**
2671  *	get_packet - return the next ingress packet buffer from a free list
2672  *	@adap: the adapter that received the packet
2673  *	@drop_thres: # of remaining buffers before we start dropping packets
2674  *	@qs: the qset that the SGE free list holding the packet belongs to
2675  *      @mh: the mbuf header, contains a pointer to the head and tail of the mbuf chain
2676  *      @r: response descriptor
2677  *
2678  *	Get the next packet from a free list and complete setup of the
2679  *	sk_buff.  If the packet is small we make a copy and recycle the
2680  *	original buffer, otherwise we use the original buffer itself.  If a
2681  *	positive drop threshold is supplied packets are dropped and their
2682  *	buffers recycled if (a) the number of remaining buffers is under the
2683  *	threshold and the packet is too big to copy, or (b) the packet should
2684  *	be copied but there is no memory for the copy.
2685  */
2686 static int
2687 get_packet(adapter_t *adap, unsigned int drop_thres, struct sge_qset *qs,
2688     struct t3_mbuf_hdr *mh, struct rsp_desc *r)
2689 {
2690 
2691 	unsigned int len_cq =  ntohl(r->len_cq);
2692 	struct sge_fl *fl = (len_cq & F_RSPD_FLQ) ? &qs->fl[1] : &qs->fl[0];
2693 	int mask, cidx = fl->cidx;
2694 	struct rx_sw_desc *sd = &fl->sdesc[cidx];
2695 	uint32_t len = G_RSPD_LEN(len_cq);
2696 	uint32_t flags = M_EXT;
2697 	uint8_t sopeop = G_RSPD_SOP_EOP(ntohl(r->flags));
2698 	caddr_t cl;
2699 	struct mbuf *m;
2700 	int ret = 0;
2701 
2702 	mask = fl->size - 1;
2703 	prefetch(fl->sdesc[(cidx + 1) & mask].m);
2704 	prefetch(fl->sdesc[(cidx + 2) & mask].m);
2705 	prefetch(fl->sdesc[(cidx + 1) & mask].rxsd_cl);
2706 	prefetch(fl->sdesc[(cidx + 2) & mask].rxsd_cl);
2707 
2708 	fl->credits--;
2709 	bus_dmamap_sync(fl->entry_tag, sd->map, BUS_DMASYNC_POSTREAD);
2710 
2711 	if (recycle_enable && len <= SGE_RX_COPY_THRES &&
2712 	    sopeop == RSPQ_SOP_EOP) {
2713 		if ((m = m_gethdr(M_NOWAIT, MT_DATA)) == NULL)
2714 			goto skip_recycle;
2715 		cl = mtod(m, void *);
2716 		memcpy(cl, sd->rxsd_cl, len);
2717 		recycle_rx_buf(adap, fl, fl->cidx);
2718 		m->m_pkthdr.len = m->m_len = len;
2719 		m->m_flags = 0;
2720 		mh->mh_head = mh->mh_tail = m;
2721 		ret = 1;
2722 		goto done;
2723 	} else {
2724 	skip_recycle:
2725 		bus_dmamap_unload(fl->entry_tag, sd->map);
2726 		cl = sd->rxsd_cl;
2727 		m = sd->m;
2728 
2729 		if ((sopeop == RSPQ_SOP_EOP) ||
2730 		    (sopeop == RSPQ_SOP))
2731 			flags |= M_PKTHDR;
2732 		m_init(m, fl->zone, fl->buf_size, M_NOWAIT, MT_DATA, flags);
2733 		if (fl->zone == zone_pack) {
2734 			/*
2735 			 * restore clobbered data pointer
2736 			 */
2737 			m->m_data = m->m_ext.ext_buf;
2738 		} else {
2739 			m_cljset(m, cl, fl->type);
2740 		}
2741 		m->m_len = len;
2742 	}
2743 	switch(sopeop) {
2744 	case RSPQ_SOP_EOP:
2745 		ret = 1;
2746 		/* FALLTHROUGH */
2747 	case RSPQ_SOP:
2748 		mh->mh_head = mh->mh_tail = m;
2749 		m->m_pkthdr.len = len;
2750 		break;
2751 	case RSPQ_EOP:
2752 		ret = 1;
2753 		/* FALLTHROUGH */
2754 	case RSPQ_NSOP_NEOP:
2755 		if (mh->mh_tail == NULL) {
2756 			log(LOG_ERR, "discarding intermediate descriptor entry\n");
2757 			m_freem(m);
2758 			break;
2759 		}
2760 		mh->mh_tail->m_next = m;
2761 		mh->mh_tail = m;
2762 		mh->mh_head->m_pkthdr.len += len;
2763 		break;
2764 	}
2765 	if (cxgb_debug)
2766 		printf("len=%d pktlen=%d\n", m->m_len, m->m_pkthdr.len);
2767 done:
2768 	if (++fl->cidx == fl->size)
2769 		fl->cidx = 0;
2770 
2771 	return (ret);
2772 }
2773 
2774 /**
2775  *	handle_rsp_cntrl_info - handles control information in a response
2776  *	@qs: the queue set corresponding to the response
2777  *	@flags: the response control flags
2778  *
2779  *	Handles the control information of an SGE response, such as GTS
2780  *	indications and completion credits for the queue set's Tx queues.
2781  *	HW coalesces credits, we don't do any extra SW coalescing.
2782  */
2783 static __inline void
2784 handle_rsp_cntrl_info(struct sge_qset *qs, uint32_t flags)
2785 {
2786 	unsigned int credits;
2787 
2788 #if USE_GTS
2789 	if (flags & F_RSPD_TXQ0_GTS)
2790 		clear_bit(TXQ_RUNNING, &qs->txq[TXQ_ETH].flags);
2791 #endif
2792 	credits = G_RSPD_TXQ0_CR(flags);
2793 	if (credits)
2794 		qs->txq[TXQ_ETH].processed += credits;
2795 
2796 	credits = G_RSPD_TXQ2_CR(flags);
2797 	if (credits)
2798 		qs->txq[TXQ_CTRL].processed += credits;
2799 
2800 # if USE_GTS
2801 	if (flags & F_RSPD_TXQ1_GTS)
2802 		clear_bit(TXQ_RUNNING, &qs->txq[TXQ_OFLD].flags);
2803 # endif
2804 	credits = G_RSPD_TXQ1_CR(flags);
2805 	if (credits)
2806 		qs->txq[TXQ_OFLD].processed += credits;
2807 
2808 }
2809 
2810 static void
2811 check_ring_db(adapter_t *adap, struct sge_qset *qs,
2812     unsigned int sleeping)
2813 {
2814 	;
2815 }
2816 
2817 /**
2818  *	process_responses - process responses from an SGE response queue
2819  *	@adap: the adapter
2820  *	@qs: the queue set to which the response queue belongs
2821  *	@budget: how many responses can be processed in this round
2822  *
2823  *	Process responses from an SGE response queue up to the supplied budget.
2824  *	Responses include received packets as well as credits and other events
2825  *	for the queues that belong to the response queue's queue set.
2826  *	A negative budget is effectively unlimited.
2827  *
2828  *	Additionally choose the interrupt holdoff time for the next interrupt
2829  *	on this queue.  If the system is under memory shortage use a fairly
2830  *	long delay to help recovery.
2831  */
2832 static int
2833 process_responses(adapter_t *adap, struct sge_qset *qs, int budget)
2834 {
2835 	struct sge_rspq *rspq = &qs->rspq;
2836 	struct rsp_desc *r = &rspq->desc[rspq->cidx];
2837 	int budget_left = budget;
2838 	unsigned int sleeping = 0;
2839 #if defined(INET6) || defined(INET)
2840 	int lro_enabled = qs->lro.enabled;
2841 	int skip_lro;
2842 	struct lro_ctrl *lro_ctrl = &qs->lro.ctrl;
2843 #endif
2844 	struct t3_mbuf_hdr *mh = &rspq->rspq_mh;
2845 #ifdef DEBUG
2846 	static int last_holdoff = 0;
2847 	if (cxgb_debug && rspq->holdoff_tmr != last_holdoff) {
2848 		printf("next_holdoff=%d\n", rspq->holdoff_tmr);
2849 		last_holdoff = rspq->holdoff_tmr;
2850 	}
2851 #endif
2852 	rspq->next_holdoff = rspq->holdoff_tmr;
2853 
2854 	while (__predict_true(budget_left && is_new_response(r, rspq))) {
2855 		int eth, eop = 0, ethpad = 0;
2856 		uint32_t flags = ntohl(r->flags);
2857 		uint32_t rss_hash = be32toh(r->rss_hdr.rss_hash_val);
2858 		uint8_t opcode = r->rss_hdr.opcode;
2859 
2860 		eth = (opcode == CPL_RX_PKT);
2861 
2862 		if (__predict_false(flags & F_RSPD_ASYNC_NOTIF)) {
2863 			struct mbuf *m;
2864 
2865 			if (cxgb_debug)
2866 				printf("async notification\n");
2867 
2868 			if (mh->mh_head == NULL) {
2869 				mh->mh_head = m_gethdr(M_NOWAIT, MT_DATA);
2870 				m = mh->mh_head;
2871 			} else {
2872 				m = m_gethdr(M_NOWAIT, MT_DATA);
2873 			}
2874 			if (m == NULL)
2875 				goto no_mem;
2876 
2877                         memcpy(mtod(m, char *), r, AN_PKT_SIZE);
2878 			m->m_len = m->m_pkthdr.len = AN_PKT_SIZE;
2879                         *mtod(m, char *) = CPL_ASYNC_NOTIF;
2880 			opcode = CPL_ASYNC_NOTIF;
2881 			eop = 1;
2882                         rspq->async_notif++;
2883 			goto skip;
2884 		} else if  (flags & F_RSPD_IMM_DATA_VALID) {
2885 			struct mbuf *m = m_gethdr(M_NOWAIT, MT_DATA);
2886 
2887 			if (m == NULL) {
2888 		no_mem:
2889 				rspq->next_holdoff = NOMEM_INTR_DELAY;
2890 				budget_left--;
2891 				break;
2892 			}
2893 			if (mh->mh_head == NULL)
2894 				mh->mh_head = m;
2895                         else
2896 				mh->mh_tail->m_next = m;
2897 			mh->mh_tail = m;
2898 
2899 			get_imm_packet(adap, r, m);
2900 			mh->mh_head->m_pkthdr.len += m->m_len;
2901 			eop = 1;
2902 			rspq->imm_data++;
2903 		} else if (r->len_cq) {
2904 			int drop_thresh = eth ? SGE_RX_DROP_THRES : 0;
2905 
2906 			eop = get_packet(adap, drop_thresh, qs, mh, r);
2907 			if (eop) {
2908 				if (r->rss_hdr.hash_type && !adap->timestamp)
2909 					mh->mh_head->m_flags |= M_FLOWID;
2910 				mh->mh_head->m_pkthdr.flowid = rss_hash;
2911 			}
2912 
2913 			ethpad = 2;
2914 		} else {
2915 			rspq->pure_rsps++;
2916 		}
2917 	skip:
2918 		if (flags & RSPD_CTRL_MASK) {
2919 			sleeping |= flags & RSPD_GTS_MASK;
2920 			handle_rsp_cntrl_info(qs, flags);
2921 		}
2922 
2923 		if (!eth && eop) {
2924 			rspq->offload_pkts++;
2925 #ifdef TCP_OFFLOAD
2926 			adap->cpl_handler[opcode](qs, r, mh->mh_head);
2927 #else
2928 			m_freem(mh->mh_head);
2929 #endif
2930 			mh->mh_head = NULL;
2931 		} else if (eth && eop) {
2932 			struct mbuf *m = mh->mh_head;
2933 
2934 			t3_rx_eth(adap, m, ethpad);
2935 
2936 			/*
2937 			 * The T304 sends incoming packets on any qset.  If LRO
2938 			 * is also enabled, we could end up sending packet up
2939 			 * lro_ctrl->ifp's input.  That is incorrect.
2940 			 *
2941 			 * The mbuf's rcvif was derived from the cpl header and
2942 			 * is accurate.  Skip LRO and just use that.
2943 			 */
2944 #if defined(INET6) || defined(INET)
2945 			skip_lro = __predict_false(qs->port->ifp != m->m_pkthdr.rcvif);
2946 
2947 			if (lro_enabled && lro_ctrl->lro_cnt && !skip_lro
2948 			    && (tcp_lro_rx(lro_ctrl, m, 0) == 0)
2949 			    ) {
2950 				/* successfully queue'd for LRO */
2951 			} else
2952 #endif
2953 			{
2954 				/*
2955 				 * LRO not enabled, packet unsuitable for LRO,
2956 				 * or unable to queue.  Pass it up right now in
2957 				 * either case.
2958 				 */
2959 				struct ifnet *ifp = m->m_pkthdr.rcvif;
2960 				(*ifp->if_input)(ifp, m);
2961 			}
2962 			mh->mh_head = NULL;
2963 
2964 		}
2965 
2966 		r++;
2967 		if (__predict_false(++rspq->cidx == rspq->size)) {
2968 			rspq->cidx = 0;
2969 			rspq->gen ^= 1;
2970 			r = rspq->desc;
2971 		}
2972 
2973 		if (++rspq->credits >= 64) {
2974 			refill_rspq(adap, rspq, rspq->credits);
2975 			rspq->credits = 0;
2976 		}
2977 		__refill_fl_lt(adap, &qs->fl[0], 32);
2978 		__refill_fl_lt(adap, &qs->fl[1], 32);
2979 		--budget_left;
2980 	}
2981 
2982 #if defined(INET6) || defined(INET)
2983 	/* Flush LRO */
2984 	while (!SLIST_EMPTY(&lro_ctrl->lro_active)) {
2985 		struct lro_entry *queued = SLIST_FIRST(&lro_ctrl->lro_active);
2986 		SLIST_REMOVE_HEAD(&lro_ctrl->lro_active, next);
2987 		tcp_lro_flush(lro_ctrl, queued);
2988 	}
2989 #endif
2990 
2991 	if (sleeping)
2992 		check_ring_db(adap, qs, sleeping);
2993 
2994 	mb();  /* commit Tx queue processed updates */
2995 	if (__predict_false(qs->txq_stopped > 1))
2996 		restart_tx(qs);
2997 
2998 	__refill_fl_lt(adap, &qs->fl[0], 512);
2999 	__refill_fl_lt(adap, &qs->fl[1], 512);
3000 	budget -= budget_left;
3001 	return (budget);
3002 }
3003 
3004 /*
3005  * A helper function that processes responses and issues GTS.
3006  */
3007 static __inline int
3008 process_responses_gts(adapter_t *adap, struct sge_rspq *rq)
3009 {
3010 	int work;
3011 	static int last_holdoff = 0;
3012 
3013 	work = process_responses(adap, rspq_to_qset(rq), -1);
3014 
3015 	if (cxgb_debug && (rq->next_holdoff != last_holdoff)) {
3016 		printf("next_holdoff=%d\n", rq->next_holdoff);
3017 		last_holdoff = rq->next_holdoff;
3018 	}
3019 	t3_write_reg(adap, A_SG_GTS, V_RSPQ(rq->cntxt_id) |
3020 	    V_NEWTIMER(rq->next_holdoff) | V_NEWINDEX(rq->cidx));
3021 
3022 	return (work);
3023 }
3024 
3025 
3026 /*
3027  * Interrupt handler for legacy INTx interrupts for T3B-based cards.
3028  * Handles data events from SGE response queues as well as error and other
3029  * async events as they all use the same interrupt pin.  We use one SGE
3030  * response queue per port in this mode and protect all response queues with
3031  * queue 0's lock.
3032  */
3033 void
3034 t3b_intr(void *data)
3035 {
3036 	uint32_t i, map;
3037 	adapter_t *adap = data;
3038 	struct sge_rspq *q0 = &adap->sge.qs[0].rspq;
3039 
3040 	t3_write_reg(adap, A_PL_CLI, 0);
3041 	map = t3_read_reg(adap, A_SG_DATA_INTR);
3042 
3043 	if (!map)
3044 		return;
3045 
3046 	if (__predict_false(map & F_ERRINTR)) {
3047 		t3_write_reg(adap, A_PL_INT_ENABLE0, 0);
3048 		(void) t3_read_reg(adap, A_PL_INT_ENABLE0);
3049 		taskqueue_enqueue(adap->tq, &adap->slow_intr_task);
3050 	}
3051 
3052 	mtx_lock(&q0->lock);
3053 	for_each_port(adap, i)
3054 	    if (map & (1 << i))
3055 			process_responses_gts(adap, &adap->sge.qs[i].rspq);
3056 	mtx_unlock(&q0->lock);
3057 }
3058 
3059 /*
3060  * The MSI interrupt handler.  This needs to handle data events from SGE
3061  * response queues as well as error and other async events as they all use
3062  * the same MSI vector.  We use one SGE response queue per port in this mode
3063  * and protect all response queues with queue 0's lock.
3064  */
3065 void
3066 t3_intr_msi(void *data)
3067 {
3068 	adapter_t *adap = data;
3069 	struct sge_rspq *q0 = &adap->sge.qs[0].rspq;
3070 	int i, new_packets = 0;
3071 
3072 	mtx_lock(&q0->lock);
3073 
3074 	for_each_port(adap, i)
3075 	    if (process_responses_gts(adap, &adap->sge.qs[i].rspq))
3076 		    new_packets = 1;
3077 	mtx_unlock(&q0->lock);
3078 	if (new_packets == 0) {
3079 		t3_write_reg(adap, A_PL_INT_ENABLE0, 0);
3080 		(void) t3_read_reg(adap, A_PL_INT_ENABLE0);
3081 		taskqueue_enqueue(adap->tq, &adap->slow_intr_task);
3082 	}
3083 }
3084 
3085 void
3086 t3_intr_msix(void *data)
3087 {
3088 	struct sge_qset *qs = data;
3089 	adapter_t *adap = qs->port->adapter;
3090 	struct sge_rspq *rspq = &qs->rspq;
3091 
3092 	if (process_responses_gts(adap, rspq) == 0)
3093 		rspq->unhandled_irqs++;
3094 }
3095 
3096 #define QDUMP_SBUF_SIZE		32 * 400
3097 static int
3098 t3_dump_rspq(SYSCTL_HANDLER_ARGS)
3099 {
3100 	struct sge_rspq *rspq;
3101 	struct sge_qset *qs;
3102 	int i, err, dump_end, idx;
3103 	struct sbuf *sb;
3104 	struct rsp_desc *rspd;
3105 	uint32_t data[4];
3106 
3107 	rspq = arg1;
3108 	qs = rspq_to_qset(rspq);
3109 	if (rspq->rspq_dump_count == 0)
3110 		return (0);
3111 	if (rspq->rspq_dump_count > RSPQ_Q_SIZE) {
3112 		log(LOG_WARNING,
3113 		    "dump count is too large %d\n", rspq->rspq_dump_count);
3114 		rspq->rspq_dump_count = 0;
3115 		return (EINVAL);
3116 	}
3117 	if (rspq->rspq_dump_start > (RSPQ_Q_SIZE-1)) {
3118 		log(LOG_WARNING,
3119 		    "dump start of %d is greater than queue size\n",
3120 		    rspq->rspq_dump_start);
3121 		rspq->rspq_dump_start = 0;
3122 		return (EINVAL);
3123 	}
3124 	err = t3_sge_read_rspq(qs->port->adapter, rspq->cntxt_id, data);
3125 	if (err)
3126 		return (err);
3127 	err = sysctl_wire_old_buffer(req, 0);
3128 	if (err)
3129 		return (err);
3130 	sb = sbuf_new_for_sysctl(NULL, NULL, QDUMP_SBUF_SIZE, req);
3131 
3132 	sbuf_printf(sb, " \n index=%u size=%u MSI-X/RspQ=%u intr enable=%u intr armed=%u\n",
3133 	    (data[0] & 0xffff), data[0] >> 16, ((data[2] >> 20) & 0x3f),
3134 	    ((data[2] >> 26) & 1), ((data[2] >> 27) & 1));
3135 	sbuf_printf(sb, " generation=%u CQ mode=%u FL threshold=%u\n",
3136 	    ((data[2] >> 28) & 1), ((data[2] >> 31) & 1), data[3]);
3137 
3138 	sbuf_printf(sb, " start=%d -> end=%d\n", rspq->rspq_dump_start,
3139 	    (rspq->rspq_dump_start + rspq->rspq_dump_count) & (RSPQ_Q_SIZE-1));
3140 
3141 	dump_end = rspq->rspq_dump_start + rspq->rspq_dump_count;
3142 	for (i = rspq->rspq_dump_start; i < dump_end; i++) {
3143 		idx = i & (RSPQ_Q_SIZE-1);
3144 
3145 		rspd = &rspq->desc[idx];
3146 		sbuf_printf(sb, "\tidx=%04d opcode=%02x cpu_idx=%x hash_type=%x cq_idx=%x\n",
3147 		    idx, rspd->rss_hdr.opcode, rspd->rss_hdr.cpu_idx,
3148 		    rspd->rss_hdr.hash_type, be16toh(rspd->rss_hdr.cq_idx));
3149 		sbuf_printf(sb, "\trss_hash_val=%x flags=%08x len_cq=%x intr_gen=%x\n",
3150 		    rspd->rss_hdr.rss_hash_val, be32toh(rspd->flags),
3151 		    be32toh(rspd->len_cq), rspd->intr_gen);
3152 	}
3153 
3154 	err = sbuf_finish(sb);
3155 	/* Output a trailing NUL. */
3156 	if (err == 0)
3157 		err = SYSCTL_OUT(req, "", 1);
3158 	sbuf_delete(sb);
3159 	return (err);
3160 }
3161 
3162 static int
3163 t3_dump_txq_eth(SYSCTL_HANDLER_ARGS)
3164 {
3165 	struct sge_txq *txq;
3166 	struct sge_qset *qs;
3167 	int i, j, err, dump_end;
3168 	struct sbuf *sb;
3169 	struct tx_desc *txd;
3170 	uint32_t *WR, wr_hi, wr_lo, gen;
3171 	uint32_t data[4];
3172 
3173 	txq = arg1;
3174 	qs = txq_to_qset(txq, TXQ_ETH);
3175 	if (txq->txq_dump_count == 0) {
3176 		return (0);
3177 	}
3178 	if (txq->txq_dump_count > TX_ETH_Q_SIZE) {
3179 		log(LOG_WARNING,
3180 		    "dump count is too large %d\n", txq->txq_dump_count);
3181 		txq->txq_dump_count = 1;
3182 		return (EINVAL);
3183 	}
3184 	if (txq->txq_dump_start > (TX_ETH_Q_SIZE-1)) {
3185 		log(LOG_WARNING,
3186 		    "dump start of %d is greater than queue size\n",
3187 		    txq->txq_dump_start);
3188 		txq->txq_dump_start = 0;
3189 		return (EINVAL);
3190 	}
3191 	err = t3_sge_read_ecntxt(qs->port->adapter, qs->rspq.cntxt_id, data);
3192 	if (err)
3193 		return (err);
3194 	err = sysctl_wire_old_buffer(req, 0);
3195 	if (err)
3196 		return (err);
3197 	sb = sbuf_new_for_sysctl(NULL, NULL, QDUMP_SBUF_SIZE, req);
3198 
3199 	sbuf_printf(sb, " \n credits=%u GTS=%u index=%u size=%u rspq#=%u cmdq#=%u\n",
3200 	    (data[0] & 0x7fff), ((data[0] >> 15) & 1), (data[0] >> 16),
3201 	    (data[1] & 0xffff), ((data[3] >> 4) & 7), ((data[3] >> 7) & 1));
3202 	sbuf_printf(sb, " TUN=%u TOE=%u generation%u uP token=%u valid=%u\n",
3203 	    ((data[3] >> 8) & 1), ((data[3] >> 9) & 1), ((data[3] >> 10) & 1),
3204 	    ((data[3] >> 11) & 0xfffff), ((data[3] >> 31) & 1));
3205 	sbuf_printf(sb, " qid=%d start=%d -> end=%d\n", qs->idx,
3206 	    txq->txq_dump_start,
3207 	    (txq->txq_dump_start + txq->txq_dump_count) & (TX_ETH_Q_SIZE-1));
3208 
3209 	dump_end = txq->txq_dump_start + txq->txq_dump_count;
3210 	for (i = txq->txq_dump_start; i < dump_end; i++) {
3211 		txd = &txq->desc[i & (TX_ETH_Q_SIZE-1)];
3212 		WR = (uint32_t *)txd->flit;
3213 		wr_hi = ntohl(WR[0]);
3214 		wr_lo = ntohl(WR[1]);
3215 		gen = G_WR_GEN(wr_lo);
3216 
3217 		sbuf_printf(sb," wr_hi %08x wr_lo %08x gen %d\n",
3218 		    wr_hi, wr_lo, gen);
3219 		for (j = 2; j < 30; j += 4)
3220 			sbuf_printf(sb, "\t%08x %08x %08x %08x \n",
3221 			    WR[j], WR[j + 1], WR[j + 2], WR[j + 3]);
3222 
3223 	}
3224 	err = sbuf_finish(sb);
3225 	/* Output a trailing NUL. */
3226 	if (err == 0)
3227 		err = SYSCTL_OUT(req, "", 1);
3228 	sbuf_delete(sb);
3229 	return (err);
3230 }
3231 
3232 static int
3233 t3_dump_txq_ctrl(SYSCTL_HANDLER_ARGS)
3234 {
3235 	struct sge_txq *txq;
3236 	struct sge_qset *qs;
3237 	int i, j, err, dump_end;
3238 	struct sbuf *sb;
3239 	struct tx_desc *txd;
3240 	uint32_t *WR, wr_hi, wr_lo, gen;
3241 
3242 	txq = arg1;
3243 	qs = txq_to_qset(txq, TXQ_CTRL);
3244 	if (txq->txq_dump_count == 0) {
3245 		return (0);
3246 	}
3247 	if (txq->txq_dump_count > 256) {
3248 		log(LOG_WARNING,
3249 		    "dump count is too large %d\n", txq->txq_dump_count);
3250 		txq->txq_dump_count = 1;
3251 		return (EINVAL);
3252 	}
3253 	if (txq->txq_dump_start > 255) {
3254 		log(LOG_WARNING,
3255 		    "dump start of %d is greater than queue size\n",
3256 		    txq->txq_dump_start);
3257 		txq->txq_dump_start = 0;
3258 		return (EINVAL);
3259 	}
3260 
3261 	err = sysctl_wire_old_buffer(req, 0);
3262 	if (err != 0)
3263 		return (err);
3264 	sb = sbuf_new_for_sysctl(NULL, NULL, QDUMP_SBUF_SIZE, req);
3265 	sbuf_printf(sb, " qid=%d start=%d -> end=%d\n", qs->idx,
3266 	    txq->txq_dump_start,
3267 	    (txq->txq_dump_start + txq->txq_dump_count) & 255);
3268 
3269 	dump_end = txq->txq_dump_start + txq->txq_dump_count;
3270 	for (i = txq->txq_dump_start; i < dump_end; i++) {
3271 		txd = &txq->desc[i & (255)];
3272 		WR = (uint32_t *)txd->flit;
3273 		wr_hi = ntohl(WR[0]);
3274 		wr_lo = ntohl(WR[1]);
3275 		gen = G_WR_GEN(wr_lo);
3276 
3277 		sbuf_printf(sb," wr_hi %08x wr_lo %08x gen %d\n",
3278 		    wr_hi, wr_lo, gen);
3279 		for (j = 2; j < 30; j += 4)
3280 			sbuf_printf(sb, "\t%08x %08x %08x %08x \n",
3281 			    WR[j], WR[j + 1], WR[j + 2], WR[j + 3]);
3282 
3283 	}
3284 	err = sbuf_finish(sb);
3285 	/* Output a trailing NUL. */
3286 	if (err == 0)
3287 		err = SYSCTL_OUT(req, "", 1);
3288 	sbuf_delete(sb);
3289 	return (err);
3290 }
3291 
3292 static int
3293 t3_set_coalesce_usecs(SYSCTL_HANDLER_ARGS)
3294 {
3295 	adapter_t *sc = arg1;
3296 	struct qset_params *qsp = &sc->params.sge.qset[0];
3297 	int coalesce_usecs;
3298 	struct sge_qset *qs;
3299 	int i, j, err, nqsets = 0;
3300 	struct mtx *lock;
3301 
3302 	if ((sc->flags & FULL_INIT_DONE) == 0)
3303 		return (ENXIO);
3304 
3305 	coalesce_usecs = qsp->coalesce_usecs;
3306         err = sysctl_handle_int(oidp, &coalesce_usecs, arg2, req);
3307 
3308 	if (err != 0) {
3309 		return (err);
3310 	}
3311 	if (coalesce_usecs == qsp->coalesce_usecs)
3312 		return (0);
3313 
3314 	for (i = 0; i < sc->params.nports; i++)
3315 		for (j = 0; j < sc->port[i].nqsets; j++)
3316 			nqsets++;
3317 
3318 	coalesce_usecs = max(1, coalesce_usecs);
3319 
3320 	for (i = 0; i < nqsets; i++) {
3321 		qs = &sc->sge.qs[i];
3322 		qsp = &sc->params.sge.qset[i];
3323 		qsp->coalesce_usecs = coalesce_usecs;
3324 
3325 		lock = (sc->flags & USING_MSIX) ? &qs->rspq.lock :
3326 			    &sc->sge.qs[0].rspq.lock;
3327 
3328 		mtx_lock(lock);
3329 		t3_update_qset_coalesce(qs, qsp);
3330 		t3_write_reg(sc, A_SG_GTS, V_RSPQ(qs->rspq.cntxt_id) |
3331 		    V_NEWTIMER(qs->rspq.holdoff_tmr));
3332 		mtx_unlock(lock);
3333 	}
3334 
3335 	return (0);
3336 }
3337 
3338 static int
3339 t3_pkt_timestamp(SYSCTL_HANDLER_ARGS)
3340 {
3341 	adapter_t *sc = arg1;
3342 	int rc, timestamp;
3343 
3344 	if ((sc->flags & FULL_INIT_DONE) == 0)
3345 		return (ENXIO);
3346 
3347 	timestamp = sc->timestamp;
3348 	rc = sysctl_handle_int(oidp, &timestamp, arg2, req);
3349 
3350 	if (rc != 0)
3351 		return (rc);
3352 
3353 	if (timestamp != sc->timestamp) {
3354 		t3_set_reg_field(sc, A_TP_PC_CONFIG2, F_ENABLERXPKTTMSTPRSS,
3355 		    timestamp ? F_ENABLERXPKTTMSTPRSS : 0);
3356 		sc->timestamp = timestamp;
3357 	}
3358 
3359 	return (0);
3360 }
3361 
3362 void
3363 t3_add_attach_sysctls(adapter_t *sc)
3364 {
3365 	struct sysctl_ctx_list *ctx;
3366 	struct sysctl_oid_list *children;
3367 
3368 	ctx = device_get_sysctl_ctx(sc->dev);
3369 	children = SYSCTL_CHILDREN(device_get_sysctl_tree(sc->dev));
3370 
3371 	/* random information */
3372 	SYSCTL_ADD_STRING(ctx, children, OID_AUTO,
3373 	    "firmware_version",
3374 	    CTLFLAG_RD, &sc->fw_version,
3375 	    0, "firmware version");
3376 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
3377 	    "hw_revision",
3378 	    CTLFLAG_RD, &sc->params.rev,
3379 	    0, "chip model");
3380 	SYSCTL_ADD_STRING(ctx, children, OID_AUTO,
3381 	    "port_types",
3382 	    CTLFLAG_RD, &sc->port_types,
3383 	    0, "type of ports");
3384 	SYSCTL_ADD_INT(ctx, children, OID_AUTO,
3385 	    "enable_debug",
3386 	    CTLFLAG_RW, &cxgb_debug,
3387 	    0, "enable verbose debugging output");
3388 	SYSCTL_ADD_UQUAD(ctx, children, OID_AUTO, "tunq_coalesce",
3389 	    CTLFLAG_RD, &sc->tunq_coalesce,
3390 	    "#tunneled packets freed");
3391 	SYSCTL_ADD_INT(ctx, children, OID_AUTO,
3392 	    "txq_overrun",
3393 	    CTLFLAG_RD, &txq_fills,
3394 	    0, "#times txq overrun");
3395 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
3396 	    "core_clock",
3397 	    CTLFLAG_RD, &sc->params.vpd.cclk,
3398 	    0, "core clock frequency (in KHz)");
3399 }
3400 
3401 
3402 static const char *rspq_name = "rspq";
3403 static const char *txq_names[] =
3404 {
3405 	"txq_eth",
3406 	"txq_ofld",
3407 	"txq_ctrl"
3408 };
3409 
3410 static int
3411 sysctl_handle_macstat(SYSCTL_HANDLER_ARGS)
3412 {
3413 	struct port_info *p = arg1;
3414 	uint64_t *parg;
3415 
3416 	if (!p)
3417 		return (EINVAL);
3418 
3419 	parg = (uint64_t *) ((uint8_t *)&p->mac.stats + arg2);
3420 	PORT_LOCK(p);
3421 	t3_mac_update_stats(&p->mac);
3422 	PORT_UNLOCK(p);
3423 
3424 	return (sysctl_handle_64(oidp, parg, 0, req));
3425 }
3426 
3427 void
3428 t3_add_configured_sysctls(adapter_t *sc)
3429 {
3430 	struct sysctl_ctx_list *ctx;
3431 	struct sysctl_oid_list *children;
3432 	int i, j;
3433 
3434 	ctx = device_get_sysctl_ctx(sc->dev);
3435 	children = SYSCTL_CHILDREN(device_get_sysctl_tree(sc->dev));
3436 
3437 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO,
3438 	    "intr_coal",
3439 	    CTLTYPE_INT|CTLFLAG_RW, sc,
3440 	    0, t3_set_coalesce_usecs,
3441 	    "I", "interrupt coalescing timer (us)");
3442 
3443 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO,
3444 	    "pkt_timestamp",
3445 	    CTLTYPE_INT | CTLFLAG_RW, sc,
3446 	    0, t3_pkt_timestamp,
3447 	    "I", "provide packet timestamp instead of connection hash");
3448 
3449 	for (i = 0; i < sc->params.nports; i++) {
3450 		struct port_info *pi = &sc->port[i];
3451 		struct sysctl_oid *poid;
3452 		struct sysctl_oid_list *poidlist;
3453 		struct mac_stats *mstats = &pi->mac.stats;
3454 
3455 		snprintf(pi->namebuf, PORT_NAME_LEN, "port%d", i);
3456 		poid = SYSCTL_ADD_NODE(ctx, children, OID_AUTO,
3457 		    pi->namebuf, CTLFLAG_RD, NULL, "port statistics");
3458 		poidlist = SYSCTL_CHILDREN(poid);
3459 		SYSCTL_ADD_UINT(ctx, poidlist, OID_AUTO,
3460 		    "nqsets", CTLFLAG_RD, &pi->nqsets,
3461 		    0, "#queue sets");
3462 
3463 		for (j = 0; j < pi->nqsets; j++) {
3464 			struct sge_qset *qs = &sc->sge.qs[pi->first_qset + j];
3465 			struct sysctl_oid *qspoid, *rspqpoid, *txqpoid,
3466 					  *ctrlqpoid, *lropoid;
3467 			struct sysctl_oid_list *qspoidlist, *rspqpoidlist,
3468 					       *txqpoidlist, *ctrlqpoidlist,
3469 					       *lropoidlist;
3470 			struct sge_txq *txq = &qs->txq[TXQ_ETH];
3471 
3472 			snprintf(qs->namebuf, QS_NAME_LEN, "qs%d", j);
3473 
3474 			qspoid = SYSCTL_ADD_NODE(ctx, poidlist, OID_AUTO,
3475 			    qs->namebuf, CTLFLAG_RD, NULL, "qset statistics");
3476 			qspoidlist = SYSCTL_CHILDREN(qspoid);
3477 
3478 			SYSCTL_ADD_UINT(ctx, qspoidlist, OID_AUTO, "fl0_empty",
3479 					CTLFLAG_RD, &qs->fl[0].empty, 0,
3480 					"freelist #0 empty");
3481 			SYSCTL_ADD_UINT(ctx, qspoidlist, OID_AUTO, "fl1_empty",
3482 					CTLFLAG_RD, &qs->fl[1].empty, 0,
3483 					"freelist #1 empty");
3484 
3485 			rspqpoid = SYSCTL_ADD_NODE(ctx, qspoidlist, OID_AUTO,
3486 			    rspq_name, CTLFLAG_RD, NULL, "rspq statistics");
3487 			rspqpoidlist = SYSCTL_CHILDREN(rspqpoid);
3488 
3489 			txqpoid = SYSCTL_ADD_NODE(ctx, qspoidlist, OID_AUTO,
3490 			    txq_names[0], CTLFLAG_RD, NULL, "txq statistics");
3491 			txqpoidlist = SYSCTL_CHILDREN(txqpoid);
3492 
3493 			ctrlqpoid = SYSCTL_ADD_NODE(ctx, qspoidlist, OID_AUTO,
3494 			    txq_names[2], CTLFLAG_RD, NULL, "ctrlq statistics");
3495 			ctrlqpoidlist = SYSCTL_CHILDREN(ctrlqpoid);
3496 
3497 			lropoid = SYSCTL_ADD_NODE(ctx, qspoidlist, OID_AUTO,
3498 			    "lro_stats", CTLFLAG_RD, NULL, "LRO statistics");
3499 			lropoidlist = SYSCTL_CHILDREN(lropoid);
3500 
3501 			SYSCTL_ADD_UINT(ctx, rspqpoidlist, OID_AUTO, "size",
3502 			    CTLFLAG_RD, &qs->rspq.size,
3503 			    0, "#entries in response queue");
3504 			SYSCTL_ADD_UINT(ctx, rspqpoidlist, OID_AUTO, "cidx",
3505 			    CTLFLAG_RD, &qs->rspq.cidx,
3506 			    0, "consumer index");
3507 			SYSCTL_ADD_UINT(ctx, rspqpoidlist, OID_AUTO, "credits",
3508 			    CTLFLAG_RD, &qs->rspq.credits,
3509 			    0, "#credits");
3510 			SYSCTL_ADD_UINT(ctx, rspqpoidlist, OID_AUTO, "starved",
3511 			    CTLFLAG_RD, &qs->rspq.starved,
3512 			    0, "#times starved");
3513 			SYSCTL_ADD_ULONG(ctx, rspqpoidlist, OID_AUTO, "phys_addr",
3514 			    CTLFLAG_RD, &qs->rspq.phys_addr,
3515 			    "physical_address_of the queue");
3516 			SYSCTL_ADD_UINT(ctx, rspqpoidlist, OID_AUTO, "dump_start",
3517 			    CTLFLAG_RW, &qs->rspq.rspq_dump_start,
3518 			    0, "start rspq dump entry");
3519 			SYSCTL_ADD_UINT(ctx, rspqpoidlist, OID_AUTO, "dump_count",
3520 			    CTLFLAG_RW, &qs->rspq.rspq_dump_count,
3521 			    0, "#rspq entries to dump");
3522 			SYSCTL_ADD_PROC(ctx, rspqpoidlist, OID_AUTO, "qdump",
3523 			    CTLTYPE_STRING | CTLFLAG_RD, &qs->rspq,
3524 			    0, t3_dump_rspq, "A", "dump of the response queue");
3525 
3526 			SYSCTL_ADD_UQUAD(ctx, txqpoidlist, OID_AUTO, "dropped",
3527 			    CTLFLAG_RD, &qs->txq[TXQ_ETH].txq_mr->br_drops,
3528 			    "#tunneled packets dropped");
3529 			SYSCTL_ADD_UINT(ctx, txqpoidlist, OID_AUTO, "sendqlen",
3530 			    CTLFLAG_RD, &qs->txq[TXQ_ETH].sendq.qlen,
3531 			    0, "#tunneled packets waiting to be sent");
3532 #if 0
3533 			SYSCTL_ADD_UINT(ctx, txqpoidlist, OID_AUTO, "queue_pidx",
3534 			    CTLFLAG_RD, (uint32_t *)(uintptr_t)&qs->txq[TXQ_ETH].txq_mr.br_prod,
3535 			    0, "#tunneled packets queue producer index");
3536 			SYSCTL_ADD_UINT(ctx, txqpoidlist, OID_AUTO, "queue_cidx",
3537 			    CTLFLAG_RD, (uint32_t *)(uintptr_t)&qs->txq[TXQ_ETH].txq_mr.br_cons,
3538 			    0, "#tunneled packets queue consumer index");
3539 #endif
3540 			SYSCTL_ADD_UINT(ctx, txqpoidlist, OID_AUTO, "processed",
3541 			    CTLFLAG_RD, &qs->txq[TXQ_ETH].processed,
3542 			    0, "#tunneled packets processed by the card");
3543 			SYSCTL_ADD_UINT(ctx, txqpoidlist, OID_AUTO, "cleaned",
3544 			    CTLFLAG_RD, &txq->cleaned,
3545 			    0, "#tunneled packets cleaned");
3546 			SYSCTL_ADD_UINT(ctx, txqpoidlist, OID_AUTO, "in_use",
3547 			    CTLFLAG_RD, &txq->in_use,
3548 			    0, "#tunneled packet slots in use");
3549 			SYSCTL_ADD_ULONG(ctx, txqpoidlist, OID_AUTO, "frees",
3550 			    CTLFLAG_RD, &txq->txq_frees,
3551 			    "#tunneled packets freed");
3552 			SYSCTL_ADD_UINT(ctx, txqpoidlist, OID_AUTO, "skipped",
3553 			    CTLFLAG_RD, &txq->txq_skipped,
3554 			    0, "#tunneled packet descriptors skipped");
3555 			SYSCTL_ADD_UQUAD(ctx, txqpoidlist, OID_AUTO, "coalesced",
3556 			    CTLFLAG_RD, &txq->txq_coalesced,
3557 			    "#tunneled packets coalesced");
3558 			SYSCTL_ADD_UINT(ctx, txqpoidlist, OID_AUTO, "enqueued",
3559 			    CTLFLAG_RD, &txq->txq_enqueued,
3560 			    0, "#tunneled packets enqueued to hardware");
3561 			SYSCTL_ADD_UINT(ctx, txqpoidlist, OID_AUTO, "stopped_flags",
3562 			    CTLFLAG_RD, &qs->txq_stopped,
3563 			    0, "tx queues stopped");
3564 			SYSCTL_ADD_ULONG(ctx, txqpoidlist, OID_AUTO, "phys_addr",
3565 			    CTLFLAG_RD, &txq->phys_addr,
3566 			    "physical_address_of the queue");
3567 			SYSCTL_ADD_UINT(ctx, txqpoidlist, OID_AUTO, "qgen",
3568 			    CTLFLAG_RW, &qs->txq[TXQ_ETH].gen,
3569 			    0, "txq generation");
3570 			SYSCTL_ADD_UINT(ctx, txqpoidlist, OID_AUTO, "hw_cidx",
3571 			    CTLFLAG_RD, &txq->cidx,
3572 			    0, "hardware queue cidx");
3573 			SYSCTL_ADD_UINT(ctx, txqpoidlist, OID_AUTO, "hw_pidx",
3574 			    CTLFLAG_RD, &txq->pidx,
3575 			    0, "hardware queue pidx");
3576 			SYSCTL_ADD_UINT(ctx, txqpoidlist, OID_AUTO, "dump_start",
3577 			    CTLFLAG_RW, &qs->txq[TXQ_ETH].txq_dump_start,
3578 			    0, "txq start idx for dump");
3579 			SYSCTL_ADD_UINT(ctx, txqpoidlist, OID_AUTO, "dump_count",
3580 			    CTLFLAG_RW, &qs->txq[TXQ_ETH].txq_dump_count,
3581 			    0, "txq #entries to dump");
3582 			SYSCTL_ADD_PROC(ctx, txqpoidlist, OID_AUTO, "qdump",
3583 			    CTLTYPE_STRING | CTLFLAG_RD, &qs->txq[TXQ_ETH],
3584 			    0, t3_dump_txq_eth, "A", "dump of the transmit queue");
3585 
3586 			SYSCTL_ADD_UINT(ctx, ctrlqpoidlist, OID_AUTO, "dump_start",
3587 			    CTLFLAG_RW, &qs->txq[TXQ_CTRL].txq_dump_start,
3588 			    0, "ctrlq start idx for dump");
3589 			SYSCTL_ADD_UINT(ctx, ctrlqpoidlist, OID_AUTO, "dump_count",
3590 			    CTLFLAG_RW, &qs->txq[TXQ_CTRL].txq_dump_count,
3591 			    0, "ctrl #entries to dump");
3592 			SYSCTL_ADD_PROC(ctx, ctrlqpoidlist, OID_AUTO, "qdump",
3593 			    CTLTYPE_STRING | CTLFLAG_RD, &qs->txq[TXQ_CTRL],
3594 			    0, t3_dump_txq_ctrl, "A", "dump of the transmit queue");
3595 
3596 			SYSCTL_ADD_INT(ctx, lropoidlist, OID_AUTO, "lro_queued",
3597 			    CTLFLAG_RD, &qs->lro.ctrl.lro_queued, 0, NULL);
3598 			SYSCTL_ADD_INT(ctx, lropoidlist, OID_AUTO, "lro_flushed",
3599 			    CTLFLAG_RD, &qs->lro.ctrl.lro_flushed, 0, NULL);
3600 			SYSCTL_ADD_INT(ctx, lropoidlist, OID_AUTO, "lro_bad_csum",
3601 			    CTLFLAG_RD, &qs->lro.ctrl.lro_bad_csum, 0, NULL);
3602 			SYSCTL_ADD_INT(ctx, lropoidlist, OID_AUTO, "lro_cnt",
3603 			    CTLFLAG_RD, &qs->lro.ctrl.lro_cnt, 0, NULL);
3604 		}
3605 
3606 		/* Now add a node for mac stats. */
3607 		poid = SYSCTL_ADD_NODE(ctx, poidlist, OID_AUTO, "mac_stats",
3608 		    CTLFLAG_RD, NULL, "MAC statistics");
3609 		poidlist = SYSCTL_CHILDREN(poid);
3610 
3611 		/*
3612 		 * We (ab)use the length argument (arg2) to pass on the offset
3613 		 * of the data that we are interested in.  This is only required
3614 		 * for the quad counters that are updated from the hardware (we
3615 		 * make sure that we return the latest value).
3616 		 * sysctl_handle_macstat first updates *all* the counters from
3617 		 * the hardware, and then returns the latest value of the
3618 		 * requested counter.  Best would be to update only the
3619 		 * requested counter from hardware, but t3_mac_update_stats()
3620 		 * hides all the register details and we don't want to dive into
3621 		 * all that here.
3622 		 */
3623 #define CXGB_SYSCTL_ADD_QUAD(a)	SYSCTL_ADD_OID(ctx, poidlist, OID_AUTO, #a, \
3624     (CTLTYPE_U64 | CTLFLAG_RD), pi, offsetof(struct mac_stats, a), \
3625     sysctl_handle_macstat, "QU", 0)
3626 		CXGB_SYSCTL_ADD_QUAD(tx_octets);
3627 		CXGB_SYSCTL_ADD_QUAD(tx_octets_bad);
3628 		CXGB_SYSCTL_ADD_QUAD(tx_frames);
3629 		CXGB_SYSCTL_ADD_QUAD(tx_mcast_frames);
3630 		CXGB_SYSCTL_ADD_QUAD(tx_bcast_frames);
3631 		CXGB_SYSCTL_ADD_QUAD(tx_pause);
3632 		CXGB_SYSCTL_ADD_QUAD(tx_deferred);
3633 		CXGB_SYSCTL_ADD_QUAD(tx_late_collisions);
3634 		CXGB_SYSCTL_ADD_QUAD(tx_total_collisions);
3635 		CXGB_SYSCTL_ADD_QUAD(tx_excess_collisions);
3636 		CXGB_SYSCTL_ADD_QUAD(tx_underrun);
3637 		CXGB_SYSCTL_ADD_QUAD(tx_len_errs);
3638 		CXGB_SYSCTL_ADD_QUAD(tx_mac_internal_errs);
3639 		CXGB_SYSCTL_ADD_QUAD(tx_excess_deferral);
3640 		CXGB_SYSCTL_ADD_QUAD(tx_fcs_errs);
3641 		CXGB_SYSCTL_ADD_QUAD(tx_frames_64);
3642 		CXGB_SYSCTL_ADD_QUAD(tx_frames_65_127);
3643 		CXGB_SYSCTL_ADD_QUAD(tx_frames_128_255);
3644 		CXGB_SYSCTL_ADD_QUAD(tx_frames_256_511);
3645 		CXGB_SYSCTL_ADD_QUAD(tx_frames_512_1023);
3646 		CXGB_SYSCTL_ADD_QUAD(tx_frames_1024_1518);
3647 		CXGB_SYSCTL_ADD_QUAD(tx_frames_1519_max);
3648 		CXGB_SYSCTL_ADD_QUAD(rx_octets);
3649 		CXGB_SYSCTL_ADD_QUAD(rx_octets_bad);
3650 		CXGB_SYSCTL_ADD_QUAD(rx_frames);
3651 		CXGB_SYSCTL_ADD_QUAD(rx_mcast_frames);
3652 		CXGB_SYSCTL_ADD_QUAD(rx_bcast_frames);
3653 		CXGB_SYSCTL_ADD_QUAD(rx_pause);
3654 		CXGB_SYSCTL_ADD_QUAD(rx_fcs_errs);
3655 		CXGB_SYSCTL_ADD_QUAD(rx_align_errs);
3656 		CXGB_SYSCTL_ADD_QUAD(rx_symbol_errs);
3657 		CXGB_SYSCTL_ADD_QUAD(rx_data_errs);
3658 		CXGB_SYSCTL_ADD_QUAD(rx_sequence_errs);
3659 		CXGB_SYSCTL_ADD_QUAD(rx_runt);
3660 		CXGB_SYSCTL_ADD_QUAD(rx_jabber);
3661 		CXGB_SYSCTL_ADD_QUAD(rx_short);
3662 		CXGB_SYSCTL_ADD_QUAD(rx_too_long);
3663 		CXGB_SYSCTL_ADD_QUAD(rx_mac_internal_errs);
3664 		CXGB_SYSCTL_ADD_QUAD(rx_cong_drops);
3665 		CXGB_SYSCTL_ADD_QUAD(rx_frames_64);
3666 		CXGB_SYSCTL_ADD_QUAD(rx_frames_65_127);
3667 		CXGB_SYSCTL_ADD_QUAD(rx_frames_128_255);
3668 		CXGB_SYSCTL_ADD_QUAD(rx_frames_256_511);
3669 		CXGB_SYSCTL_ADD_QUAD(rx_frames_512_1023);
3670 		CXGB_SYSCTL_ADD_QUAD(rx_frames_1024_1518);
3671 		CXGB_SYSCTL_ADD_QUAD(rx_frames_1519_max);
3672 #undef CXGB_SYSCTL_ADD_QUAD
3673 
3674 #define CXGB_SYSCTL_ADD_ULONG(a) SYSCTL_ADD_ULONG(ctx, poidlist, OID_AUTO, #a, \
3675     CTLFLAG_RD, &mstats->a, 0)
3676 		CXGB_SYSCTL_ADD_ULONG(tx_fifo_parity_err);
3677 		CXGB_SYSCTL_ADD_ULONG(rx_fifo_parity_err);
3678 		CXGB_SYSCTL_ADD_ULONG(tx_fifo_urun);
3679 		CXGB_SYSCTL_ADD_ULONG(rx_fifo_ovfl);
3680 		CXGB_SYSCTL_ADD_ULONG(serdes_signal_loss);
3681 		CXGB_SYSCTL_ADD_ULONG(xaui_pcs_ctc_err);
3682 		CXGB_SYSCTL_ADD_ULONG(xaui_pcs_align_change);
3683 		CXGB_SYSCTL_ADD_ULONG(num_toggled);
3684 		CXGB_SYSCTL_ADD_ULONG(num_resets);
3685 		CXGB_SYSCTL_ADD_ULONG(link_faults);
3686 #undef CXGB_SYSCTL_ADD_ULONG
3687 	}
3688 }
3689 
3690 /**
3691  *	t3_get_desc - dump an SGE descriptor for debugging purposes
3692  *	@qs: the queue set
3693  *	@qnum: identifies the specific queue (0..2: Tx, 3:response, 4..5: Rx)
3694  *	@idx: the descriptor index in the queue
3695  *	@data: where to dump the descriptor contents
3696  *
3697  *	Dumps the contents of a HW descriptor of an SGE queue.  Returns the
3698  *	size of the descriptor.
3699  */
3700 int
3701 t3_get_desc(const struct sge_qset *qs, unsigned int qnum, unsigned int idx,
3702 		unsigned char *data)
3703 {
3704 	if (qnum >= 6)
3705 		return (EINVAL);
3706 
3707 	if (qnum < 3) {
3708 		if (!qs->txq[qnum].desc || idx >= qs->txq[qnum].size)
3709 			return -EINVAL;
3710 		memcpy(data, &qs->txq[qnum].desc[idx], sizeof(struct tx_desc));
3711 		return sizeof(struct tx_desc);
3712 	}
3713 
3714 	if (qnum == 3) {
3715 		if (!qs->rspq.desc || idx >= qs->rspq.size)
3716 			return (EINVAL);
3717 		memcpy(data, &qs->rspq.desc[idx], sizeof(struct rsp_desc));
3718 		return sizeof(struct rsp_desc);
3719 	}
3720 
3721 	qnum -= 4;
3722 	if (!qs->fl[qnum].desc || idx >= qs->fl[qnum].size)
3723 		return (EINVAL);
3724 	memcpy(data, &qs->fl[qnum].desc[idx], sizeof(struct rx_desc));
3725 	return sizeof(struct rx_desc);
3726 }
3727