xref: /freebsd/sys/dev/cxgb/cxgb_sge.c (revision dd41de95a84d979615a2ef11df6850622bf6184e)
1 /**************************************************************************
2 SPDX-License-Identifier: BSD-2-Clause-FreeBSD
3 
4 Copyright (c) 2007-2009, Chelsio Inc.
5 All rights reserved.
6 
7 Redistribution and use in source and binary forms, with or without
8 modification, are permitted provided that the following conditions are met:
9 
10  1. Redistributions of source code must retain the above copyright notice,
11     this list of conditions and the following disclaimer.
12 
13  2. Neither the name of the Chelsio Corporation nor the names of its
14     contributors may be used to endorse or promote products derived from
15     this software without specific prior written permission.
16 
17 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
18 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
21 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
22 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
23 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
24 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
25 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
26 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
27 POSSIBILITY OF SUCH DAMAGE.
28 
29 ***************************************************************************/
30 
31 #include <sys/cdefs.h>
32 __FBSDID("$FreeBSD$");
33 
34 #include "opt_inet6.h"
35 #include "opt_inet.h"
36 
37 #include <sys/param.h>
38 #include <sys/systm.h>
39 #include <sys/kernel.h>
40 #include <sys/module.h>
41 #include <sys/bus.h>
42 #include <sys/conf.h>
43 #include <machine/bus.h>
44 #include <machine/resource.h>
45 #include <sys/rman.h>
46 #include <sys/queue.h>
47 #include <sys/sysctl.h>
48 #include <sys/taskqueue.h>
49 
50 #include <sys/proc.h>
51 #include <sys/sbuf.h>
52 #include <sys/sched.h>
53 #include <sys/smp.h>
54 #include <sys/systm.h>
55 #include <sys/syslog.h>
56 #include <sys/socket.h>
57 #include <sys/sglist.h>
58 
59 #include <net/if.h>
60 #include <net/if_var.h>
61 #include <net/bpf.h>
62 #include <net/ethernet.h>
63 #include <net/if_vlan_var.h>
64 
65 #include <netinet/in_systm.h>
66 #include <netinet/in.h>
67 #include <netinet/ip.h>
68 #include <netinet/ip6.h>
69 #include <netinet/tcp.h>
70 
71 #include <dev/pci/pcireg.h>
72 #include <dev/pci/pcivar.h>
73 
74 #include <vm/vm.h>
75 #include <vm/pmap.h>
76 
77 #include <cxgb_include.h>
78 #include <sys/mvec.h>
79 
80 int	txq_fills = 0;
81 int	multiq_tx_enable = 1;
82 
83 #ifdef TCP_OFFLOAD
84 CTASSERT(NUM_CPL_HANDLERS >= NUM_CPL_CMDS);
85 #endif
86 
87 extern struct sysctl_oid_list sysctl__hw_cxgb_children;
88 int cxgb_txq_buf_ring_size = TX_ETH_Q_SIZE;
89 SYSCTL_INT(_hw_cxgb, OID_AUTO, txq_mr_size, CTLFLAG_RDTUN, &cxgb_txq_buf_ring_size, 0,
90     "size of per-queue mbuf ring");
91 
92 static int cxgb_tx_coalesce_force = 0;
93 SYSCTL_INT(_hw_cxgb, OID_AUTO, tx_coalesce_force, CTLFLAG_RWTUN,
94     &cxgb_tx_coalesce_force, 0,
95     "coalesce small packets into a single work request regardless of ring state");
96 
97 #define	COALESCE_START_DEFAULT		TX_ETH_Q_SIZE>>1
98 #define	COALESCE_START_MAX		(TX_ETH_Q_SIZE-(TX_ETH_Q_SIZE>>3))
99 #define	COALESCE_STOP_DEFAULT		TX_ETH_Q_SIZE>>2
100 #define	COALESCE_STOP_MIN		TX_ETH_Q_SIZE>>5
101 #define	TX_RECLAIM_DEFAULT		TX_ETH_Q_SIZE>>5
102 #define	TX_RECLAIM_MAX			TX_ETH_Q_SIZE>>2
103 #define	TX_RECLAIM_MIN			TX_ETH_Q_SIZE>>6
104 
105 
106 static int cxgb_tx_coalesce_enable_start = COALESCE_START_DEFAULT;
107 SYSCTL_INT(_hw_cxgb, OID_AUTO, tx_coalesce_enable_start, CTLFLAG_RWTUN,
108     &cxgb_tx_coalesce_enable_start, 0,
109     "coalesce enable threshold");
110 static int cxgb_tx_coalesce_enable_stop = COALESCE_STOP_DEFAULT;
111 SYSCTL_INT(_hw_cxgb, OID_AUTO, tx_coalesce_enable_stop, CTLFLAG_RWTUN,
112     &cxgb_tx_coalesce_enable_stop, 0,
113     "coalesce disable threshold");
114 static int cxgb_tx_reclaim_threshold = TX_RECLAIM_DEFAULT;
115 SYSCTL_INT(_hw_cxgb, OID_AUTO, tx_reclaim_threshold, CTLFLAG_RWTUN,
116     &cxgb_tx_reclaim_threshold, 0,
117     "tx cleaning minimum threshold");
118 
119 /*
120  * XXX don't re-enable this until TOE stops assuming
121  * we have an m_ext
122  */
123 static int recycle_enable = 0;
124 
125 extern int cxgb_use_16k_clusters;
126 extern int nmbjumbop;
127 extern int nmbjumbo9;
128 extern int nmbjumbo16;
129 
130 #define USE_GTS 0
131 
132 #define SGE_RX_SM_BUF_SIZE	1536
133 #define SGE_RX_DROP_THRES	16
134 #define SGE_RX_COPY_THRES	128
135 
136 /*
137  * Period of the Tx buffer reclaim timer.  This timer does not need to run
138  * frequently as Tx buffers are usually reclaimed by new Tx packets.
139  */
140 #define TX_RECLAIM_PERIOD       (hz >> 1)
141 
142 /*
143  * Values for sge_txq.flags
144  */
145 enum {
146 	TXQ_RUNNING	= 1 << 0,  /* fetch engine is running */
147 	TXQ_LAST_PKT_DB = 1 << 1,  /* last packet rang the doorbell */
148 };
149 
150 struct tx_desc {
151 	uint64_t	flit[TX_DESC_FLITS];
152 } __packed;
153 
154 struct rx_desc {
155 	uint32_t	addr_lo;
156 	uint32_t	len_gen;
157 	uint32_t	gen2;
158 	uint32_t	addr_hi;
159 } __packed;
160 
161 struct rsp_desc {               /* response queue descriptor */
162 	struct rss_header	rss_hdr;
163 	uint32_t		flags;
164 	uint32_t		len_cq;
165 	uint8_t			imm_data[47];
166 	uint8_t			intr_gen;
167 } __packed;
168 
169 #define RX_SW_DESC_MAP_CREATED	(1 << 0)
170 #define TX_SW_DESC_MAP_CREATED	(1 << 1)
171 #define RX_SW_DESC_INUSE        (1 << 3)
172 #define TX_SW_DESC_MAPPED       (1 << 4)
173 
174 #define RSPQ_NSOP_NEOP           G_RSPD_SOP_EOP(0)
175 #define RSPQ_EOP                 G_RSPD_SOP_EOP(F_RSPD_EOP)
176 #define RSPQ_SOP                 G_RSPD_SOP_EOP(F_RSPD_SOP)
177 #define RSPQ_SOP_EOP             G_RSPD_SOP_EOP(F_RSPD_SOP|F_RSPD_EOP)
178 
179 struct tx_sw_desc {                /* SW state per Tx descriptor */
180 	struct mbuf	*m;
181 	bus_dmamap_t	map;
182 	int		flags;
183 };
184 
185 struct rx_sw_desc {                /* SW state per Rx descriptor */
186 	caddr_t		rxsd_cl;
187 	struct mbuf	*m;
188 	bus_dmamap_t	map;
189 	int		flags;
190 };
191 
192 struct txq_state {
193 	unsigned int	compl;
194 	unsigned int	gen;
195 	unsigned int	pidx;
196 };
197 
198 struct refill_fl_cb_arg {
199 	int               error;
200 	bus_dma_segment_t seg;
201 	int               nseg;
202 };
203 
204 
205 /*
206  * Maps a number of flits to the number of Tx descriptors that can hold them.
207  * The formula is
208  *
209  * desc = 1 + (flits - 2) / (WR_FLITS - 1).
210  *
211  * HW allows up to 4 descriptors to be combined into a WR.
212  */
213 static uint8_t flit_desc_map[] = {
214 	0,
215 #if SGE_NUM_GENBITS == 1
216 	1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
217 	2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
218 	3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
219 	4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4
220 #elif SGE_NUM_GENBITS == 2
221 	1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
222 	2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
223 	3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
224 	4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
225 #else
226 # error "SGE_NUM_GENBITS must be 1 or 2"
227 #endif
228 };
229 
230 #define	TXQ_LOCK_ASSERT(qs)	mtx_assert(&(qs)->lock, MA_OWNED)
231 #define	TXQ_TRYLOCK(qs)		mtx_trylock(&(qs)->lock)
232 #define	TXQ_LOCK(qs)		mtx_lock(&(qs)->lock)
233 #define	TXQ_UNLOCK(qs)		mtx_unlock(&(qs)->lock)
234 #define	TXQ_RING_EMPTY(qs)	drbr_empty((qs)->port->ifp, (qs)->txq[TXQ_ETH].txq_mr)
235 #define	TXQ_RING_NEEDS_ENQUEUE(qs)					\
236 	drbr_needs_enqueue((qs)->port->ifp, (qs)->txq[TXQ_ETH].txq_mr)
237 #define	TXQ_RING_FLUSH(qs)	drbr_flush((qs)->port->ifp, (qs)->txq[TXQ_ETH].txq_mr)
238 #define	TXQ_RING_DEQUEUE_COND(qs, func, arg)				\
239 	drbr_dequeue_cond((qs)->port->ifp, (qs)->txq[TXQ_ETH].txq_mr, func, arg)
240 #define	TXQ_RING_DEQUEUE(qs) \
241 	drbr_dequeue((qs)->port->ifp, (qs)->txq[TXQ_ETH].txq_mr)
242 
243 int cxgb_debug = 0;
244 
245 static void sge_timer_cb(void *arg);
246 static void sge_timer_reclaim(void *arg, int ncount);
247 static void sge_txq_reclaim_handler(void *arg, int ncount);
248 static void cxgb_start_locked(struct sge_qset *qs);
249 
250 /*
251  * XXX need to cope with bursty scheduling by looking at a wider
252  * window than we are now for determining the need for coalescing
253  *
254  */
255 static __inline uint64_t
256 check_pkt_coalesce(struct sge_qset *qs)
257 {
258         struct adapter *sc;
259         struct sge_txq *txq;
260 	uint8_t *fill;
261 
262 	if (__predict_false(cxgb_tx_coalesce_force))
263 		return (1);
264 	txq = &qs->txq[TXQ_ETH];
265         sc = qs->port->adapter;
266 	fill = &sc->tunq_fill[qs->idx];
267 
268 	if (cxgb_tx_coalesce_enable_start > COALESCE_START_MAX)
269 		cxgb_tx_coalesce_enable_start = COALESCE_START_MAX;
270 	if (cxgb_tx_coalesce_enable_stop < COALESCE_STOP_MIN)
271 		cxgb_tx_coalesce_enable_start = COALESCE_STOP_MIN;
272 	/*
273 	 * if the hardware transmit queue is more than 1/8 full
274 	 * we mark it as coalescing - we drop back from coalescing
275 	 * when we go below 1/32 full and there are no packets enqueued,
276 	 * this provides us with some degree of hysteresis
277 	 */
278         if (*fill != 0 && (txq->in_use <= cxgb_tx_coalesce_enable_stop) &&
279 	    TXQ_RING_EMPTY(qs) && (qs->coalescing == 0))
280                 *fill = 0;
281         else if (*fill == 0 && (txq->in_use >= cxgb_tx_coalesce_enable_start))
282                 *fill = 1;
283 
284 	return (sc->tunq_coalesce);
285 }
286 
287 #ifdef __LP64__
288 static void
289 set_wr_hdr(struct work_request_hdr *wrp, uint32_t wr_hi, uint32_t wr_lo)
290 {
291 	uint64_t wr_hilo;
292 #if _BYTE_ORDER == _LITTLE_ENDIAN
293 	wr_hilo = wr_hi;
294 	wr_hilo |= (((uint64_t)wr_lo)<<32);
295 #else
296 	wr_hilo = wr_lo;
297 	wr_hilo |= (((uint64_t)wr_hi)<<32);
298 #endif
299 	wrp->wrh_hilo = wr_hilo;
300 }
301 #else
302 static void
303 set_wr_hdr(struct work_request_hdr *wrp, uint32_t wr_hi, uint32_t wr_lo)
304 {
305 
306 	wrp->wrh_hi = wr_hi;
307 	wmb();
308 	wrp->wrh_lo = wr_lo;
309 }
310 #endif
311 
312 struct coalesce_info {
313 	int count;
314 	int nbytes;
315 	int noncoal;
316 };
317 
318 static int
319 coalesce_check(struct mbuf *m, void *arg)
320 {
321 	struct coalesce_info *ci = arg;
322 
323 	if ((m->m_next != NULL) ||
324 	    ((mtod(m, vm_offset_t) & PAGE_MASK) + m->m_len > PAGE_SIZE))
325 		ci->noncoal = 1;
326 
327 	if ((ci->count == 0) || (ci->noncoal == 0 && (ci->count < 7) &&
328 	    (ci->nbytes + m->m_len <= 10500))) {
329 		ci->count++;
330 		ci->nbytes += m->m_len;
331 		return (1);
332 	}
333 	return (0);
334 }
335 
336 static struct mbuf *
337 cxgb_dequeue(struct sge_qset *qs)
338 {
339 	struct mbuf *m, *m_head, *m_tail;
340 	struct coalesce_info ci;
341 
342 
343 	if (check_pkt_coalesce(qs) == 0)
344 		return TXQ_RING_DEQUEUE(qs);
345 
346 	m_head = m_tail = NULL;
347 	ci.count = ci.nbytes = ci.noncoal = 0;
348 	do {
349 		m = TXQ_RING_DEQUEUE_COND(qs, coalesce_check, &ci);
350 		if (m_head == NULL) {
351 			m_tail = m_head = m;
352 		} else if (m != NULL) {
353 			m_tail->m_nextpkt = m;
354 			m_tail = m;
355 		}
356 	} while (m != NULL);
357 	if (ci.count > 7)
358 		panic("trying to coalesce %d packets in to one WR", ci.count);
359 	return (m_head);
360 }
361 
362 /**
363  *	reclaim_completed_tx - reclaims completed Tx descriptors
364  *	@adapter: the adapter
365  *	@q: the Tx queue to reclaim completed descriptors from
366  *
367  *	Reclaims Tx descriptors that the SGE has indicated it has processed,
368  *	and frees the associated buffers if possible.  Called with the Tx
369  *	queue's lock held.
370  */
371 static __inline int
372 reclaim_completed_tx(struct sge_qset *qs, int reclaim_min, int queue)
373 {
374 	struct sge_txq *q = &qs->txq[queue];
375 	int reclaim = desc_reclaimable(q);
376 
377 	if ((cxgb_tx_reclaim_threshold > TX_RECLAIM_MAX) ||
378 	    (cxgb_tx_reclaim_threshold < TX_RECLAIM_MIN))
379 		cxgb_tx_reclaim_threshold = TX_RECLAIM_DEFAULT;
380 
381 	if (reclaim < reclaim_min)
382 		return (0);
383 
384 	mtx_assert(&qs->lock, MA_OWNED);
385 	if (reclaim > 0) {
386 		t3_free_tx_desc(qs, reclaim, queue);
387 		q->cleaned += reclaim;
388 		q->in_use -= reclaim;
389 	}
390 	if (isset(&qs->txq_stopped, TXQ_ETH))
391                 clrbit(&qs->txq_stopped, TXQ_ETH);
392 
393 	return (reclaim);
394 }
395 
396 #ifdef DEBUGNET
397 int
398 cxgb_debugnet_poll_tx(struct sge_qset *qs)
399 {
400 
401 	return (reclaim_completed_tx(qs, TX_RECLAIM_MAX, TXQ_ETH));
402 }
403 #endif
404 
405 /**
406  *	should_restart_tx - are there enough resources to restart a Tx queue?
407  *	@q: the Tx queue
408  *
409  *	Checks if there are enough descriptors to restart a suspended Tx queue.
410  */
411 static __inline int
412 should_restart_tx(const struct sge_txq *q)
413 {
414 	unsigned int r = q->processed - q->cleaned;
415 
416 	return q->in_use - r < (q->size >> 1);
417 }
418 
419 /**
420  *	t3_sge_init - initialize SGE
421  *	@adap: the adapter
422  *	@p: the SGE parameters
423  *
424  *	Performs SGE initialization needed every time after a chip reset.
425  *	We do not initialize any of the queue sets here, instead the driver
426  *	top-level must request those individually.  We also do not enable DMA
427  *	here, that should be done after the queues have been set up.
428  */
429 void
430 t3_sge_init(adapter_t *adap, struct sge_params *p)
431 {
432 	u_int ctrl, ups;
433 
434 	ups = 0; /* = ffs(pci_resource_len(adap->pdev, 2) >> 12); */
435 
436 	ctrl = F_DROPPKT | V_PKTSHIFT(2) | F_FLMODE | F_AVOIDCQOVFL |
437 	       F_CQCRDTCTRL | F_CONGMODE | F_TNLFLMODE | F_FATLPERREN |
438 	       V_HOSTPAGESIZE(PAGE_SHIFT - 11) | F_BIGENDIANINGRESS |
439 	       V_USERSPACESIZE(ups ? ups - 1 : 0) | F_ISCSICOALESCING;
440 #if SGE_NUM_GENBITS == 1
441 	ctrl |= F_EGRGENCTRL;
442 #endif
443 	if (adap->params.rev > 0) {
444 		if (!(adap->flags & (USING_MSIX | USING_MSI)))
445 			ctrl |= F_ONEINTMULTQ | F_OPTONEINTMULTQ;
446 	}
447 	t3_write_reg(adap, A_SG_CONTROL, ctrl);
448 	t3_write_reg(adap, A_SG_EGR_RCQ_DRB_THRSH, V_HIRCQDRBTHRSH(512) |
449 		     V_LORCQDRBTHRSH(512));
450 	t3_write_reg(adap, A_SG_TIMER_TICK, core_ticks_per_usec(adap) / 10);
451 	t3_write_reg(adap, A_SG_CMDQ_CREDIT_TH, V_THRESHOLD(32) |
452 		     V_TIMEOUT(200 * core_ticks_per_usec(adap)));
453 	t3_write_reg(adap, A_SG_HI_DRB_HI_THRSH,
454 		     adap->params.rev < T3_REV_C ? 1000 : 500);
455 	t3_write_reg(adap, A_SG_HI_DRB_LO_THRSH, 256);
456 	t3_write_reg(adap, A_SG_LO_DRB_HI_THRSH, 1000);
457 	t3_write_reg(adap, A_SG_LO_DRB_LO_THRSH, 256);
458 	t3_write_reg(adap, A_SG_OCO_BASE, V_BASE1(0xfff));
459 	t3_write_reg(adap, A_SG_DRB_PRI_THRESH, 63 * 1024);
460 }
461 
462 
463 /**
464  *	sgl_len - calculates the size of an SGL of the given capacity
465  *	@n: the number of SGL entries
466  *
467  *	Calculates the number of flits needed for a scatter/gather list that
468  *	can hold the given number of entries.
469  */
470 static __inline unsigned int
471 sgl_len(unsigned int n)
472 {
473 	return ((3 * n) / 2 + (n & 1));
474 }
475 
476 /**
477  *	get_imm_packet - return the next ingress packet buffer from a response
478  *	@resp: the response descriptor containing the packet data
479  *
480  *	Return a packet containing the immediate data of the given response.
481  */
482 static int
483 get_imm_packet(adapter_t *sc, const struct rsp_desc *resp, struct mbuf *m)
484 {
485 
486 	if (resp->rss_hdr.opcode == CPL_RX_DATA) {
487 		const struct cpl_rx_data *cpl = (const void *)&resp->imm_data[0];
488 		m->m_len = sizeof(*cpl) + ntohs(cpl->len);
489 	} else if (resp->rss_hdr.opcode == CPL_RX_PKT) {
490 		const struct cpl_rx_pkt *cpl = (const void *)&resp->imm_data[0];
491 		m->m_len = sizeof(*cpl) + ntohs(cpl->len);
492 	} else
493 		m->m_len = IMMED_PKT_SIZE;
494 	m->m_ext.ext_buf = NULL;
495 	m->m_ext.ext_type = 0;
496 	memcpy(mtod(m, uint8_t *), resp->imm_data, m->m_len);
497 	return (0);
498 }
499 
500 static __inline u_int
501 flits_to_desc(u_int n)
502 {
503 	return (flit_desc_map[n]);
504 }
505 
506 #define SGE_PARERR (F_CPPARITYERROR | F_OCPARITYERROR | F_RCPARITYERROR | \
507 		    F_IRPARITYERROR | V_ITPARITYERROR(M_ITPARITYERROR) | \
508 		    V_FLPARITYERROR(M_FLPARITYERROR) | F_LODRBPARITYERROR | \
509 		    F_HIDRBPARITYERROR | F_LORCQPARITYERROR | \
510 		    F_HIRCQPARITYERROR)
511 #define SGE_FRAMINGERR (F_UC_REQ_FRAMINGERROR | F_R_REQ_FRAMINGERROR)
512 #define SGE_FATALERR (SGE_PARERR | SGE_FRAMINGERR | F_RSPQCREDITOVERFOW | \
513 		      F_RSPQDISABLED)
514 
515 /**
516  *	t3_sge_err_intr_handler - SGE async event interrupt handler
517  *	@adapter: the adapter
518  *
519  *	Interrupt handler for SGE asynchronous (non-data) events.
520  */
521 void
522 t3_sge_err_intr_handler(adapter_t *adapter)
523 {
524 	unsigned int v, status;
525 
526 	status = t3_read_reg(adapter, A_SG_INT_CAUSE);
527 	if (status & SGE_PARERR)
528 		CH_ALERT(adapter, "SGE parity error (0x%x)\n",
529 			 status & SGE_PARERR);
530 	if (status & SGE_FRAMINGERR)
531 		CH_ALERT(adapter, "SGE framing error (0x%x)\n",
532 			 status & SGE_FRAMINGERR);
533 	if (status & F_RSPQCREDITOVERFOW)
534 		CH_ALERT(adapter, "SGE response queue credit overflow\n");
535 
536 	if (status & F_RSPQDISABLED) {
537 		v = t3_read_reg(adapter, A_SG_RSPQ_FL_STATUS);
538 
539 		CH_ALERT(adapter,
540 			 "packet delivered to disabled response queue (0x%x)\n",
541 			 (v >> S_RSPQ0DISABLED) & 0xff);
542 	}
543 
544 	t3_write_reg(adapter, A_SG_INT_CAUSE, status);
545 	if (status & SGE_FATALERR)
546 		t3_fatal_err(adapter);
547 }
548 
549 void
550 t3_sge_prep(adapter_t *adap, struct sge_params *p)
551 {
552 	int i, nqsets, fl_q_size, jumbo_q_size, use_16k, jumbo_buf_size;
553 
554 	nqsets = min(SGE_QSETS / adap->params.nports, mp_ncpus);
555 	nqsets *= adap->params.nports;
556 
557 	fl_q_size = min(nmbclusters/(3*nqsets), FL_Q_SIZE);
558 
559 	while (!powerof2(fl_q_size))
560 		fl_q_size--;
561 
562 	use_16k = cxgb_use_16k_clusters != -1 ? cxgb_use_16k_clusters :
563 	    is_offload(adap);
564 
565 	if (use_16k) {
566 		jumbo_q_size = min(nmbjumbo16/(3*nqsets), JUMBO_Q_SIZE);
567 		jumbo_buf_size = MJUM16BYTES;
568 	} else {
569 		jumbo_q_size = min(nmbjumbo9/(3*nqsets), JUMBO_Q_SIZE);
570 		jumbo_buf_size = MJUM9BYTES;
571 	}
572 	while (!powerof2(jumbo_q_size))
573 		jumbo_q_size--;
574 
575 	if (fl_q_size < (FL_Q_SIZE / 4) || jumbo_q_size < (JUMBO_Q_SIZE / 2))
576 		device_printf(adap->dev,
577 		    "Insufficient clusters and/or jumbo buffers.\n");
578 
579 	p->max_pkt_size = jumbo_buf_size - sizeof(struct cpl_rx_data);
580 
581 	for (i = 0; i < SGE_QSETS; ++i) {
582 		struct qset_params *q = p->qset + i;
583 
584 		if (adap->params.nports > 2) {
585 			q->coalesce_usecs = 50;
586 		} else {
587 #ifdef INVARIANTS
588 			q->coalesce_usecs = 10;
589 #else
590 			q->coalesce_usecs = 5;
591 #endif
592 		}
593 		q->polling = 0;
594 		q->rspq_size = RSPQ_Q_SIZE;
595 		q->fl_size = fl_q_size;
596 		q->jumbo_size = jumbo_q_size;
597 		q->jumbo_buf_size = jumbo_buf_size;
598 		q->txq_size[TXQ_ETH] = TX_ETH_Q_SIZE;
599 		q->txq_size[TXQ_OFLD] = is_offload(adap) ? TX_OFLD_Q_SIZE : 16;
600 		q->txq_size[TXQ_CTRL] = TX_CTRL_Q_SIZE;
601 		q->cong_thres = 0;
602 	}
603 }
604 
605 int
606 t3_sge_alloc(adapter_t *sc)
607 {
608 
609 	/* The parent tag. */
610 	if (bus_dma_tag_create( bus_get_dma_tag(sc->dev),/* PCI parent */
611 				1, 0,			/* algnmnt, boundary */
612 				BUS_SPACE_MAXADDR,	/* lowaddr */
613 				BUS_SPACE_MAXADDR,	/* highaddr */
614 				NULL, NULL,		/* filter, filterarg */
615 				BUS_SPACE_MAXSIZE_32BIT,/* maxsize */
616 				BUS_SPACE_UNRESTRICTED, /* nsegments */
617 				BUS_SPACE_MAXSIZE_32BIT,/* maxsegsize */
618 				0,			/* flags */
619 				NULL, NULL,		/* lock, lockarg */
620 				&sc->parent_dmat)) {
621 		device_printf(sc->dev, "Cannot allocate parent DMA tag\n");
622 		return (ENOMEM);
623 	}
624 
625 	/*
626 	 * DMA tag for normal sized RX frames
627 	 */
628 	if (bus_dma_tag_create(sc->parent_dmat, MCLBYTES, 0, BUS_SPACE_MAXADDR,
629 		BUS_SPACE_MAXADDR, NULL, NULL, MCLBYTES, 1,
630 		MCLBYTES, BUS_DMA_ALLOCNOW, NULL, NULL, &sc->rx_dmat)) {
631 		device_printf(sc->dev, "Cannot allocate RX DMA tag\n");
632 		return (ENOMEM);
633 	}
634 
635 	/*
636 	 * DMA tag for jumbo sized RX frames.
637 	 */
638 	if (bus_dma_tag_create(sc->parent_dmat, MJUM16BYTES, 0, BUS_SPACE_MAXADDR,
639 		BUS_SPACE_MAXADDR, NULL, NULL, MJUM16BYTES, 1, MJUM16BYTES,
640 		BUS_DMA_ALLOCNOW, NULL, NULL, &sc->rx_jumbo_dmat)) {
641 		device_printf(sc->dev, "Cannot allocate RX jumbo DMA tag\n");
642 		return (ENOMEM);
643 	}
644 
645 	/*
646 	 * DMA tag for TX frames.
647 	 */
648 	if (bus_dma_tag_create(sc->parent_dmat, 1, 0, BUS_SPACE_MAXADDR,
649 		BUS_SPACE_MAXADDR, NULL, NULL, TX_MAX_SIZE, TX_MAX_SEGS,
650 		TX_MAX_SIZE, BUS_DMA_ALLOCNOW,
651 		NULL, NULL, &sc->tx_dmat)) {
652 		device_printf(sc->dev, "Cannot allocate TX DMA tag\n");
653 		return (ENOMEM);
654 	}
655 
656 	return (0);
657 }
658 
659 int
660 t3_sge_free(struct adapter * sc)
661 {
662 
663 	if (sc->tx_dmat != NULL)
664 		bus_dma_tag_destroy(sc->tx_dmat);
665 
666 	if (sc->rx_jumbo_dmat != NULL)
667 		bus_dma_tag_destroy(sc->rx_jumbo_dmat);
668 
669 	if (sc->rx_dmat != NULL)
670 		bus_dma_tag_destroy(sc->rx_dmat);
671 
672 	if (sc->parent_dmat != NULL)
673 		bus_dma_tag_destroy(sc->parent_dmat);
674 
675 	return (0);
676 }
677 
678 void
679 t3_update_qset_coalesce(struct sge_qset *qs, const struct qset_params *p)
680 {
681 
682 	qs->rspq.holdoff_tmr = max(p->coalesce_usecs * 10, 1U);
683 	qs->rspq.polling = 0 /* p->polling */;
684 }
685 
686 #if !defined(__i386__) && !defined(__amd64__)
687 static void
688 refill_fl_cb(void *arg, bus_dma_segment_t *segs, int nseg, int error)
689 {
690 	struct refill_fl_cb_arg *cb_arg = arg;
691 
692 	cb_arg->error = error;
693 	cb_arg->seg = segs[0];
694 	cb_arg->nseg = nseg;
695 
696 }
697 #endif
698 /**
699  *	refill_fl - refill an SGE free-buffer list
700  *	@sc: the controller softc
701  *	@q: the free-list to refill
702  *	@n: the number of new buffers to allocate
703  *
704  *	(Re)populate an SGE free-buffer list with up to @n new packet buffers.
705  *	The caller must assure that @n does not exceed the queue's capacity.
706  */
707 static void
708 refill_fl(adapter_t *sc, struct sge_fl *q, int n)
709 {
710 	struct rx_sw_desc *sd = &q->sdesc[q->pidx];
711 	struct rx_desc *d = &q->desc[q->pidx];
712 	struct refill_fl_cb_arg cb_arg;
713 	struct mbuf *m;
714 	caddr_t cl;
715 	int err;
716 
717 	cb_arg.error = 0;
718 	while (n--) {
719 		/*
720 		 * We allocate an uninitialized mbuf + cluster, mbuf is
721 		 * initialized after rx.
722 		 */
723 		if (q->zone == zone_pack) {
724 			if ((m = m_getcl(M_NOWAIT, MT_NOINIT, M_PKTHDR)) == NULL)
725 				break;
726 			cl = m->m_ext.ext_buf;
727 		} else {
728 			if ((cl = m_cljget(NULL, M_NOWAIT, q->buf_size)) == NULL)
729 				break;
730 			if ((m = m_gethdr(M_NOWAIT, MT_NOINIT)) == NULL) {
731 				uma_zfree(q->zone, cl);
732 				break;
733 			}
734 		}
735 		if ((sd->flags & RX_SW_DESC_MAP_CREATED) == 0) {
736 			if ((err = bus_dmamap_create(q->entry_tag, 0, &sd->map))) {
737 				log(LOG_WARNING, "bus_dmamap_create failed %d\n", err);
738 				uma_zfree(q->zone, cl);
739 				goto done;
740 			}
741 			sd->flags |= RX_SW_DESC_MAP_CREATED;
742 		}
743 #if !defined(__i386__) && !defined(__amd64__)
744 		err = bus_dmamap_load(q->entry_tag, sd->map,
745 		    cl, q->buf_size, refill_fl_cb, &cb_arg, 0);
746 
747 		if (err != 0 || cb_arg.error) {
748 			if (q->zone != zone_pack)
749 				uma_zfree(q->zone, cl);
750 			m_free(m);
751 			goto done;
752 		}
753 #else
754 		cb_arg.seg.ds_addr = pmap_kextract((vm_offset_t)cl);
755 #endif
756 		sd->flags |= RX_SW_DESC_INUSE;
757 		sd->rxsd_cl = cl;
758 		sd->m = m;
759 		d->addr_lo = htobe32(cb_arg.seg.ds_addr & 0xffffffff);
760 		d->addr_hi = htobe32(((uint64_t)cb_arg.seg.ds_addr >>32) & 0xffffffff);
761 		d->len_gen = htobe32(V_FLD_GEN1(q->gen));
762 		d->gen2 = htobe32(V_FLD_GEN2(q->gen));
763 
764 		d++;
765 		sd++;
766 
767 		if (++q->pidx == q->size) {
768 			q->pidx = 0;
769 			q->gen ^= 1;
770 			sd = q->sdesc;
771 			d = q->desc;
772 		}
773 		q->credits++;
774 		q->db_pending++;
775 	}
776 
777 done:
778 	if (q->db_pending >= 32) {
779 		q->db_pending = 0;
780 		t3_write_reg(sc, A_SG_KDOORBELL, V_EGRCNTX(q->cntxt_id));
781 	}
782 }
783 
784 
785 /**
786  *	free_rx_bufs - free the Rx buffers on an SGE free list
787  *	@sc: the controle softc
788  *	@q: the SGE free list to clean up
789  *
790  *	Release the buffers on an SGE free-buffer Rx queue.  HW fetching from
791  *	this queue should be stopped before calling this function.
792  */
793 static void
794 free_rx_bufs(adapter_t *sc, struct sge_fl *q)
795 {
796 	u_int cidx = q->cidx;
797 
798 	while (q->credits--) {
799 		struct rx_sw_desc *d = &q->sdesc[cidx];
800 
801 		if (d->flags & RX_SW_DESC_INUSE) {
802 			bus_dmamap_unload(q->entry_tag, d->map);
803 			bus_dmamap_destroy(q->entry_tag, d->map);
804 			if (q->zone == zone_pack) {
805 				m_init(d->m, M_NOWAIT, MT_DATA, M_EXT);
806 				uma_zfree(zone_pack, d->m);
807 			} else {
808 				m_init(d->m, M_NOWAIT, MT_DATA, 0);
809 				uma_zfree(zone_mbuf, d->m);
810 				uma_zfree(q->zone, d->rxsd_cl);
811 			}
812 		}
813 
814 		d->rxsd_cl = NULL;
815 		d->m = NULL;
816 		if (++cidx == q->size)
817 			cidx = 0;
818 	}
819 }
820 
821 static __inline void
822 __refill_fl(adapter_t *adap, struct sge_fl *fl)
823 {
824 	refill_fl(adap, fl, min(16U, fl->size - fl->credits));
825 }
826 
827 static __inline void
828 __refill_fl_lt(adapter_t *adap, struct sge_fl *fl, int max)
829 {
830 	uint32_t reclaimable = fl->size - fl->credits;
831 
832 	if (reclaimable > 0)
833 		refill_fl(adap, fl, min(max, reclaimable));
834 }
835 
836 /**
837  *	recycle_rx_buf - recycle a receive buffer
838  *	@adapter: the adapter
839  *	@q: the SGE free list
840  *	@idx: index of buffer to recycle
841  *
842  *	Recycles the specified buffer on the given free list by adding it at
843  *	the next available slot on the list.
844  */
845 static void
846 recycle_rx_buf(adapter_t *adap, struct sge_fl *q, unsigned int idx)
847 {
848 	struct rx_desc *from = &q->desc[idx];
849 	struct rx_desc *to   = &q->desc[q->pidx];
850 
851 	q->sdesc[q->pidx] = q->sdesc[idx];
852 	to->addr_lo = from->addr_lo;        // already big endian
853 	to->addr_hi = from->addr_hi;        // likewise
854 	wmb();	/* necessary ? */
855 	to->len_gen = htobe32(V_FLD_GEN1(q->gen));
856 	to->gen2 = htobe32(V_FLD_GEN2(q->gen));
857 	q->credits++;
858 
859 	if (++q->pidx == q->size) {
860 		q->pidx = 0;
861 		q->gen ^= 1;
862 	}
863 	t3_write_reg(adap, A_SG_KDOORBELL, V_EGRCNTX(q->cntxt_id));
864 }
865 
866 static void
867 alloc_ring_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
868 {
869 	uint32_t *addr;
870 
871 	addr = arg;
872 	*addr = segs[0].ds_addr;
873 }
874 
875 static int
876 alloc_ring(adapter_t *sc, size_t nelem, size_t elem_size, size_t sw_size,
877     bus_addr_t *phys, void *desc, void *sdesc, bus_dma_tag_t *tag,
878     bus_dmamap_t *map, bus_dma_tag_t parent_entry_tag, bus_dma_tag_t *entry_tag)
879 {
880 	size_t len = nelem * elem_size;
881 	void *s = NULL;
882 	void *p = NULL;
883 	int err;
884 
885 	if ((err = bus_dma_tag_create(sc->parent_dmat, PAGE_SIZE, 0,
886 				      BUS_SPACE_MAXADDR_32BIT,
887 				      BUS_SPACE_MAXADDR, NULL, NULL, len, 1,
888 				      len, 0, NULL, NULL, tag)) != 0) {
889 		device_printf(sc->dev, "Cannot allocate descriptor tag\n");
890 		return (ENOMEM);
891 	}
892 
893 	if ((err = bus_dmamem_alloc(*tag, (void **)&p, BUS_DMA_NOWAIT,
894 				    map)) != 0) {
895 		device_printf(sc->dev, "Cannot allocate descriptor memory\n");
896 		return (ENOMEM);
897 	}
898 
899 	bus_dmamap_load(*tag, *map, p, len, alloc_ring_cb, phys, 0);
900 	bzero(p, len);
901 	*(void **)desc = p;
902 
903 	if (sw_size) {
904 		len = nelem * sw_size;
905 		s = malloc(len, M_DEVBUF, M_WAITOK|M_ZERO);
906 		*(void **)sdesc = s;
907 	}
908 	if (parent_entry_tag == NULL)
909 		return (0);
910 
911 	if ((err = bus_dma_tag_create(parent_entry_tag, 1, 0,
912 				      BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR,
913 		                      NULL, NULL, TX_MAX_SIZE, TX_MAX_SEGS,
914 				      TX_MAX_SIZE, BUS_DMA_ALLOCNOW,
915 		                      NULL, NULL, entry_tag)) != 0) {
916 		device_printf(sc->dev, "Cannot allocate descriptor entry tag\n");
917 		return (ENOMEM);
918 	}
919 	return (0);
920 }
921 
922 static void
923 sge_slow_intr_handler(void *arg, int ncount)
924 {
925 	adapter_t *sc = arg;
926 
927 	t3_slow_intr_handler(sc);
928 	t3_write_reg(sc, A_PL_INT_ENABLE0, sc->slow_intr_mask);
929 	(void) t3_read_reg(sc, A_PL_INT_ENABLE0);
930 }
931 
932 /**
933  *	sge_timer_cb - perform periodic maintenance of an SGE qset
934  *	@data: the SGE queue set to maintain
935  *
936  *	Runs periodically from a timer to perform maintenance of an SGE queue
937  *	set.  It performs two tasks:
938  *
939  *	a) Cleans up any completed Tx descriptors that may still be pending.
940  *	Normal descriptor cleanup happens when new packets are added to a Tx
941  *	queue so this timer is relatively infrequent and does any cleanup only
942  *	if the Tx queue has not seen any new packets in a while.  We make a
943  *	best effort attempt to reclaim descriptors, in that we don't wait
944  *	around if we cannot get a queue's lock (which most likely is because
945  *	someone else is queueing new packets and so will also handle the clean
946  *	up).  Since control queues use immediate data exclusively we don't
947  *	bother cleaning them up here.
948  *
949  *	b) Replenishes Rx queues that have run out due to memory shortage.
950  *	Normally new Rx buffers are added when existing ones are consumed but
951  *	when out of memory a queue can become empty.  We try to add only a few
952  *	buffers here, the queue will be replenished fully as these new buffers
953  *	are used up if memory shortage has subsided.
954  *
955  *	c) Return coalesced response queue credits in case a response queue is
956  *	starved.
957  *
958  *	d) Ring doorbells for T304 tunnel queues since we have seen doorbell
959  *	fifo overflows and the FW doesn't implement any recovery scheme yet.
960  */
961 static void
962 sge_timer_cb(void *arg)
963 {
964 	adapter_t *sc = arg;
965 	if ((sc->flags & USING_MSIX) == 0) {
966 
967 		struct port_info *pi;
968 		struct sge_qset *qs;
969 		struct sge_txq  *txq;
970 		int i, j;
971 		int reclaim_ofl, refill_rx;
972 
973 		if (sc->open_device_map == 0)
974 			return;
975 
976 		for (i = 0; i < sc->params.nports; i++) {
977 			pi = &sc->port[i];
978 			for (j = 0; j < pi->nqsets; j++) {
979 				qs = &sc->sge.qs[pi->first_qset + j];
980 				txq = &qs->txq[0];
981 				reclaim_ofl = txq[TXQ_OFLD].processed - txq[TXQ_OFLD].cleaned;
982 				refill_rx = ((qs->fl[0].credits < qs->fl[0].size) ||
983 				    (qs->fl[1].credits < qs->fl[1].size));
984 				if (reclaim_ofl || refill_rx) {
985 					taskqueue_enqueue(sc->tq, &pi->timer_reclaim_task);
986 					break;
987 				}
988 			}
989 		}
990 	}
991 
992 	if (sc->params.nports > 2) {
993 		int i;
994 
995 		for_each_port(sc, i) {
996 			struct port_info *pi = &sc->port[i];
997 
998 			t3_write_reg(sc, A_SG_KDOORBELL,
999 				     F_SELEGRCNTX |
1000 				     (FW_TUNNEL_SGEEC_START + pi->first_qset));
1001 		}
1002 	}
1003 	if (((sc->flags & USING_MSIX) == 0 || sc->params.nports > 2) &&
1004 	    sc->open_device_map != 0)
1005 		callout_reset(&sc->sge_timer_ch, TX_RECLAIM_PERIOD, sge_timer_cb, sc);
1006 }
1007 
1008 /*
1009  * This is meant to be a catch-all function to keep sge state private
1010  * to sge.c
1011  *
1012  */
1013 int
1014 t3_sge_init_adapter(adapter_t *sc)
1015 {
1016 	callout_init(&sc->sge_timer_ch, 1);
1017 	callout_reset(&sc->sge_timer_ch, TX_RECLAIM_PERIOD, sge_timer_cb, sc);
1018 	TASK_INIT(&sc->slow_intr_task, 0, sge_slow_intr_handler, sc);
1019 	return (0);
1020 }
1021 
1022 int
1023 t3_sge_reset_adapter(adapter_t *sc)
1024 {
1025 	callout_reset(&sc->sge_timer_ch, TX_RECLAIM_PERIOD, sge_timer_cb, sc);
1026 	return (0);
1027 }
1028 
1029 int
1030 t3_sge_init_port(struct port_info *pi)
1031 {
1032 	TASK_INIT(&pi->timer_reclaim_task, 0, sge_timer_reclaim, pi);
1033 	return (0);
1034 }
1035 
1036 /**
1037  *	refill_rspq - replenish an SGE response queue
1038  *	@adapter: the adapter
1039  *	@q: the response queue to replenish
1040  *	@credits: how many new responses to make available
1041  *
1042  *	Replenishes a response queue by making the supplied number of responses
1043  *	available to HW.
1044  */
1045 static __inline void
1046 refill_rspq(adapter_t *sc, const struct sge_rspq *q, u_int credits)
1047 {
1048 
1049 	/* mbufs are allocated on demand when a rspq entry is processed. */
1050 	t3_write_reg(sc, A_SG_RSPQ_CREDIT_RETURN,
1051 		     V_RSPQ(q->cntxt_id) | V_CREDITS(credits));
1052 }
1053 
1054 static void
1055 sge_txq_reclaim_handler(void *arg, int ncount)
1056 {
1057 	struct sge_qset *qs = arg;
1058 	int i;
1059 
1060 	for (i = 0; i < 3; i++)
1061 		reclaim_completed_tx(qs, 16, i);
1062 }
1063 
1064 static void
1065 sge_timer_reclaim(void *arg, int ncount)
1066 {
1067 	struct port_info *pi = arg;
1068 	int i, nqsets = pi->nqsets;
1069 	adapter_t *sc = pi->adapter;
1070 	struct sge_qset *qs;
1071 	struct mtx *lock;
1072 
1073 	KASSERT((sc->flags & USING_MSIX) == 0,
1074 	    ("can't call timer reclaim for msi-x"));
1075 
1076 	for (i = 0; i < nqsets; i++) {
1077 		qs = &sc->sge.qs[pi->first_qset + i];
1078 
1079 		reclaim_completed_tx(qs, 16, TXQ_OFLD);
1080 		lock = (sc->flags & USING_MSIX) ? &qs->rspq.lock :
1081 			    &sc->sge.qs[0].rspq.lock;
1082 
1083 		if (mtx_trylock(lock)) {
1084 			/* XXX currently assume that we are *NOT* polling */
1085 			uint32_t status = t3_read_reg(sc, A_SG_RSPQ_FL_STATUS);
1086 
1087 			if (qs->fl[0].credits < qs->fl[0].size - 16)
1088 				__refill_fl(sc, &qs->fl[0]);
1089 			if (qs->fl[1].credits < qs->fl[1].size - 16)
1090 				__refill_fl(sc, &qs->fl[1]);
1091 
1092 			if (status & (1 << qs->rspq.cntxt_id)) {
1093 				if (qs->rspq.credits) {
1094 					refill_rspq(sc, &qs->rspq, 1);
1095 					qs->rspq.credits--;
1096 					t3_write_reg(sc, A_SG_RSPQ_FL_STATUS,
1097 					    1 << qs->rspq.cntxt_id);
1098 				}
1099 			}
1100 			mtx_unlock(lock);
1101 		}
1102 	}
1103 }
1104 
1105 /**
1106  *	init_qset_cntxt - initialize an SGE queue set context info
1107  *	@qs: the queue set
1108  *	@id: the queue set id
1109  *
1110  *	Initializes the TIDs and context ids for the queues of a queue set.
1111  */
1112 static void
1113 init_qset_cntxt(struct sge_qset *qs, u_int id)
1114 {
1115 
1116 	qs->rspq.cntxt_id = id;
1117 	qs->fl[0].cntxt_id = 2 * id;
1118 	qs->fl[1].cntxt_id = 2 * id + 1;
1119 	qs->txq[TXQ_ETH].cntxt_id = FW_TUNNEL_SGEEC_START + id;
1120 	qs->txq[TXQ_ETH].token = FW_TUNNEL_TID_START + id;
1121 	qs->txq[TXQ_OFLD].cntxt_id = FW_OFLD_SGEEC_START + id;
1122 	qs->txq[TXQ_CTRL].cntxt_id = FW_CTRL_SGEEC_START + id;
1123 	qs->txq[TXQ_CTRL].token = FW_CTRL_TID_START + id;
1124 
1125 	/* XXX: a sane limit is needed instead of INT_MAX */
1126 	mbufq_init(&qs->txq[TXQ_ETH].sendq, INT_MAX);
1127 	mbufq_init(&qs->txq[TXQ_OFLD].sendq, INT_MAX);
1128 	mbufq_init(&qs->txq[TXQ_CTRL].sendq, INT_MAX);
1129 }
1130 
1131 
1132 static void
1133 txq_prod(struct sge_txq *txq, unsigned int ndesc, struct txq_state *txqs)
1134 {
1135 	txq->in_use += ndesc;
1136 	/*
1137 	 * XXX we don't handle stopping of queue
1138 	 * presumably start handles this when we bump against the end
1139 	 */
1140 	txqs->gen = txq->gen;
1141 	txq->unacked += ndesc;
1142 	txqs->compl = (txq->unacked & 32) << (S_WR_COMPL - 5);
1143 	txq->unacked &= 31;
1144 	txqs->pidx = txq->pidx;
1145 	txq->pidx += ndesc;
1146 #ifdef INVARIANTS
1147 	if (((txqs->pidx > txq->cidx) &&
1148 		(txq->pidx < txqs->pidx) &&
1149 		(txq->pidx >= txq->cidx)) ||
1150 	    ((txqs->pidx < txq->cidx) &&
1151 		(txq->pidx >= txq-> cidx)) ||
1152 	    ((txqs->pidx < txq->cidx) &&
1153 		(txq->cidx < txqs->pidx)))
1154 		panic("txqs->pidx=%d txq->pidx=%d txq->cidx=%d",
1155 		    txqs->pidx, txq->pidx, txq->cidx);
1156 #endif
1157 	if (txq->pidx >= txq->size) {
1158 		txq->pidx -= txq->size;
1159 		txq->gen ^= 1;
1160 	}
1161 
1162 }
1163 
1164 /**
1165  *	calc_tx_descs - calculate the number of Tx descriptors for a packet
1166  *	@m: the packet mbufs
1167  *      @nsegs: the number of segments
1168  *
1169  * 	Returns the number of Tx descriptors needed for the given Ethernet
1170  * 	packet.  Ethernet packets require addition of WR and CPL headers.
1171  */
1172 static __inline unsigned int
1173 calc_tx_descs(const struct mbuf *m, int nsegs)
1174 {
1175 	unsigned int flits;
1176 
1177 	if (m->m_pkthdr.len <= PIO_LEN)
1178 		return 1;
1179 
1180 	flits = sgl_len(nsegs) + 2;
1181 	if (m->m_pkthdr.csum_flags & CSUM_TSO)
1182 		flits++;
1183 
1184 	return flits_to_desc(flits);
1185 }
1186 
1187 /**
1188  *	make_sgl - populate a scatter/gather list for a packet
1189  *	@sgp: the SGL to populate
1190  *	@segs: the packet dma segments
1191  *	@nsegs: the number of segments
1192  *
1193  *	Generates a scatter/gather list for the buffers that make up a packet
1194  *	and returns the SGL size in 8-byte words.  The caller must size the SGL
1195  *	appropriately.
1196  */
1197 static __inline void
1198 make_sgl(struct sg_ent *sgp, bus_dma_segment_t *segs, int nsegs)
1199 {
1200 	int i, idx;
1201 
1202 	for (idx = 0, i = 0; i < nsegs; i++) {
1203 		/*
1204 		 * firmware doesn't like empty segments
1205 		 */
1206 		if (segs[i].ds_len == 0)
1207 			continue;
1208 		if (i && idx == 0)
1209 			++sgp;
1210 
1211 		sgp->len[idx] = htobe32(segs[i].ds_len);
1212 		sgp->addr[idx] = htobe64(segs[i].ds_addr);
1213 		idx ^= 1;
1214 	}
1215 
1216 	if (idx) {
1217 		sgp->len[idx] = 0;
1218 		sgp->addr[idx] = 0;
1219 	}
1220 }
1221 
1222 /**
1223  *	check_ring_tx_db - check and potentially ring a Tx queue's doorbell
1224  *	@adap: the adapter
1225  *	@q: the Tx queue
1226  *
1227  *	Ring the doorbell if a Tx queue is asleep.  There is a natural race,
1228  *	where the HW is going to sleep just after we checked, however,
1229  *	then the interrupt handler will detect the outstanding TX packet
1230  *	and ring the doorbell for us.
1231  *
1232  *	When GTS is disabled we unconditionally ring the doorbell.
1233  */
1234 static __inline void
1235 check_ring_tx_db(adapter_t *adap, struct sge_txq *q, int mustring)
1236 {
1237 #if USE_GTS
1238 	clear_bit(TXQ_LAST_PKT_DB, &q->flags);
1239 	if (test_and_set_bit(TXQ_RUNNING, &q->flags) == 0) {
1240 		set_bit(TXQ_LAST_PKT_DB, &q->flags);
1241 #ifdef T3_TRACE
1242 		T3_TRACE1(adap->tb[q->cntxt_id & 7], "doorbell Tx, cntxt %d",
1243 			  q->cntxt_id);
1244 #endif
1245 		t3_write_reg(adap, A_SG_KDOORBELL,
1246 			     F_SELEGRCNTX | V_EGRCNTX(q->cntxt_id));
1247 	}
1248 #else
1249 	if (mustring || ++q->db_pending >= 32) {
1250 		wmb();            /* write descriptors before telling HW */
1251 		t3_write_reg(adap, A_SG_KDOORBELL,
1252 		    F_SELEGRCNTX | V_EGRCNTX(q->cntxt_id));
1253 		q->db_pending = 0;
1254 	}
1255 #endif
1256 }
1257 
1258 static __inline void
1259 wr_gen2(struct tx_desc *d, unsigned int gen)
1260 {
1261 #if SGE_NUM_GENBITS == 2
1262 	d->flit[TX_DESC_FLITS - 1] = htobe64(gen);
1263 #endif
1264 }
1265 
1266 /**
1267  *	write_wr_hdr_sgl - write a WR header and, optionally, SGL
1268  *	@ndesc: number of Tx descriptors spanned by the SGL
1269  *	@txd: first Tx descriptor to be written
1270  *	@txqs: txq state (generation and producer index)
1271  *	@txq: the SGE Tx queue
1272  *	@sgl: the SGL
1273  *	@flits: number of flits to the start of the SGL in the first descriptor
1274  *	@sgl_flits: the SGL size in flits
1275  *	@wr_hi: top 32 bits of WR header based on WR type (big endian)
1276  *	@wr_lo: low 32 bits of WR header based on WR type (big endian)
1277  *
1278  *	Write a work request header and an associated SGL.  If the SGL is
1279  *	small enough to fit into one Tx descriptor it has already been written
1280  *	and we just need to write the WR header.  Otherwise we distribute the
1281  *	SGL across the number of descriptors it spans.
1282  */
1283 static void
1284 write_wr_hdr_sgl(unsigned int ndesc, struct tx_desc *txd, struct txq_state *txqs,
1285     const struct sge_txq *txq, const struct sg_ent *sgl, unsigned int flits,
1286     unsigned int sgl_flits, unsigned int wr_hi, unsigned int wr_lo)
1287 {
1288 
1289 	struct work_request_hdr *wrp = (struct work_request_hdr *)txd;
1290 	struct tx_sw_desc *txsd = &txq->sdesc[txqs->pidx];
1291 
1292 	if (__predict_true(ndesc == 1)) {
1293 		set_wr_hdr(wrp, htonl(F_WR_SOP | F_WR_EOP | V_WR_DATATYPE(1) |
1294 		    V_WR_SGLSFLT(flits)) | wr_hi,
1295 		    htonl(V_WR_LEN(flits + sgl_flits) | V_WR_GEN(txqs->gen)) |
1296 		    wr_lo);
1297 
1298 		wr_gen2(txd, txqs->gen);
1299 
1300 	} else {
1301 		unsigned int ogen = txqs->gen;
1302 		const uint64_t *fp = (const uint64_t *)sgl;
1303 		struct work_request_hdr *wp = wrp;
1304 
1305 		wrp->wrh_hi = htonl(F_WR_SOP | V_WR_DATATYPE(1) |
1306 		    V_WR_SGLSFLT(flits)) | wr_hi;
1307 
1308 		while (sgl_flits) {
1309 			unsigned int avail = WR_FLITS - flits;
1310 
1311 			if (avail > sgl_flits)
1312 				avail = sgl_flits;
1313 			memcpy(&txd->flit[flits], fp, avail * sizeof(*fp));
1314 			sgl_flits -= avail;
1315 			ndesc--;
1316 			if (!sgl_flits)
1317 				break;
1318 
1319 			fp += avail;
1320 			txd++;
1321 			txsd++;
1322 			if (++txqs->pidx == txq->size) {
1323 				txqs->pidx = 0;
1324 				txqs->gen ^= 1;
1325 				txd = txq->desc;
1326 				txsd = txq->sdesc;
1327 			}
1328 
1329 			/*
1330 			 * when the head of the mbuf chain
1331 			 * is freed all clusters will be freed
1332 			 * with it
1333 			 */
1334 			wrp = (struct work_request_hdr *)txd;
1335 			wrp->wrh_hi = htonl(V_WR_DATATYPE(1) |
1336 			    V_WR_SGLSFLT(1)) | wr_hi;
1337 			wrp->wrh_lo = htonl(V_WR_LEN(min(WR_FLITS,
1338 				    sgl_flits + 1)) |
1339 			    V_WR_GEN(txqs->gen)) | wr_lo;
1340 			wr_gen2(txd, txqs->gen);
1341 			flits = 1;
1342 		}
1343 		wrp->wrh_hi |= htonl(F_WR_EOP);
1344 		wmb();
1345 		wp->wrh_lo = htonl(V_WR_LEN(WR_FLITS) | V_WR_GEN(ogen)) | wr_lo;
1346 		wr_gen2((struct tx_desc *)wp, ogen);
1347 	}
1348 }
1349 
1350 /* sizeof(*eh) + sizeof(*ip) + sizeof(*tcp) */
1351 #define TCPPKTHDRSIZE (ETHER_HDR_LEN + 20 + 20)
1352 
1353 #define GET_VTAG(cntrl, m) \
1354 do { \
1355 	if ((m)->m_flags & M_VLANTAG)					            \
1356 		cntrl |= F_TXPKT_VLAN_VLD | V_TXPKT_VLAN((m)->m_pkthdr.ether_vtag); \
1357 } while (0)
1358 
1359 static int
1360 t3_encap(struct sge_qset *qs, struct mbuf **m)
1361 {
1362 	adapter_t *sc;
1363 	struct mbuf *m0;
1364 	struct sge_txq *txq;
1365 	struct txq_state txqs;
1366 	struct port_info *pi;
1367 	unsigned int ndesc, flits, cntrl, mlen;
1368 	int err, nsegs, tso_info = 0;
1369 
1370 	struct work_request_hdr *wrp;
1371 	struct tx_sw_desc *txsd;
1372 	struct sg_ent *sgp, *sgl;
1373 	uint32_t wr_hi, wr_lo, sgl_flits;
1374 	bus_dma_segment_t segs[TX_MAX_SEGS];
1375 
1376 	struct tx_desc *txd;
1377 
1378 	pi = qs->port;
1379 	sc = pi->adapter;
1380 	txq = &qs->txq[TXQ_ETH];
1381 	txd = &txq->desc[txq->pidx];
1382 	txsd = &txq->sdesc[txq->pidx];
1383 	sgl = txq->txq_sgl;
1384 
1385 	prefetch(txd);
1386 	m0 = *m;
1387 
1388 	mtx_assert(&qs->lock, MA_OWNED);
1389 	cntrl = V_TXPKT_INTF(pi->txpkt_intf);
1390 	KASSERT(m0->m_flags & M_PKTHDR, ("not packet header\n"));
1391 
1392 	if  (m0->m_nextpkt == NULL && m0->m_next != NULL &&
1393 	    m0->m_pkthdr.csum_flags & (CSUM_TSO))
1394 		tso_info = V_LSO_MSS(m0->m_pkthdr.tso_segsz);
1395 
1396 	if (m0->m_nextpkt != NULL) {
1397 		busdma_map_sg_vec(txq->entry_tag, txsd->map, m0, segs, &nsegs);
1398 		ndesc = 1;
1399 		mlen = 0;
1400 	} else {
1401 		if ((err = busdma_map_sg_collapse(txq->entry_tag, txsd->map,
1402 		    &m0, segs, &nsegs))) {
1403 			if (cxgb_debug)
1404 				printf("failed ... err=%d\n", err);
1405 			return (err);
1406 		}
1407 		mlen = m0->m_pkthdr.len;
1408 		ndesc = calc_tx_descs(m0, nsegs);
1409 	}
1410 	txq_prod(txq, ndesc, &txqs);
1411 
1412 	KASSERT(m0->m_pkthdr.len, ("empty packet nsegs=%d", nsegs));
1413 	txsd->m = m0;
1414 
1415 	if (m0->m_nextpkt != NULL) {
1416 		struct cpl_tx_pkt_batch *cpl_batch = (struct cpl_tx_pkt_batch *)txd;
1417 		int i, fidx;
1418 
1419 		if (nsegs > 7)
1420 			panic("trying to coalesce %d packets in to one WR", nsegs);
1421 		txq->txq_coalesced += nsegs;
1422 		wrp = (struct work_request_hdr *)txd;
1423 		flits = nsegs*2 + 1;
1424 
1425 		for (fidx = 1, i = 0; i < nsegs; i++, fidx += 2) {
1426 			struct cpl_tx_pkt_batch_entry *cbe;
1427 			uint64_t flit;
1428 			uint32_t *hflit = (uint32_t *)&flit;
1429 			int cflags = m0->m_pkthdr.csum_flags;
1430 
1431 			cntrl = V_TXPKT_INTF(pi->txpkt_intf);
1432 			GET_VTAG(cntrl, m0);
1433 			cntrl |= V_TXPKT_OPCODE(CPL_TX_PKT);
1434 			if (__predict_false(!(cflags & CSUM_IP)))
1435 				cntrl |= F_TXPKT_IPCSUM_DIS;
1436 			if (__predict_false(!(cflags & (CSUM_TCP | CSUM_UDP |
1437 			    CSUM_UDP_IPV6 | CSUM_TCP_IPV6))))
1438 				cntrl |= F_TXPKT_L4CSUM_DIS;
1439 
1440 			hflit[0] = htonl(cntrl);
1441 			hflit[1] = htonl(segs[i].ds_len | 0x80000000);
1442 			flit |= htobe64(1 << 24);
1443 			cbe = &cpl_batch->pkt_entry[i];
1444 			cbe->cntrl = hflit[0];
1445 			cbe->len = hflit[1];
1446 			cbe->addr = htobe64(segs[i].ds_addr);
1447 		}
1448 
1449 		wr_hi = htonl(F_WR_SOP | F_WR_EOP | V_WR_DATATYPE(1) |
1450 		    V_WR_SGLSFLT(flits)) |
1451 		    htonl(V_WR_OP(FW_WROPCODE_TUNNEL_TX_PKT) | txqs.compl);
1452 		wr_lo = htonl(V_WR_LEN(flits) |
1453 		    V_WR_GEN(txqs.gen)) | htonl(V_WR_TID(txq->token));
1454 		set_wr_hdr(wrp, wr_hi, wr_lo);
1455 		wmb();
1456 		ETHER_BPF_MTAP(pi->ifp, m0);
1457 		wr_gen2(txd, txqs.gen);
1458 		check_ring_tx_db(sc, txq, 0);
1459 		return (0);
1460 	} else if (tso_info) {
1461 		uint16_t eth_type;
1462 		struct cpl_tx_pkt_lso *hdr = (struct cpl_tx_pkt_lso *)txd;
1463 		struct ether_header *eh;
1464 		void *l3hdr;
1465 		struct tcphdr *tcp;
1466 
1467 		txd->flit[2] = 0;
1468 		GET_VTAG(cntrl, m0);
1469 		cntrl |= V_TXPKT_OPCODE(CPL_TX_PKT_LSO);
1470 		hdr->cntrl = htonl(cntrl);
1471 		hdr->len = htonl(mlen | 0x80000000);
1472 
1473 		if (__predict_false(mlen < TCPPKTHDRSIZE)) {
1474 			printf("mbuf=%p,len=%d,tso_segsz=%d,csum_flags=%b,flags=%#x",
1475 			    m0, mlen, m0->m_pkthdr.tso_segsz,
1476 			    (int)m0->m_pkthdr.csum_flags, CSUM_BITS, m0->m_flags);
1477 			panic("tx tso packet too small");
1478 		}
1479 
1480 		/* Make sure that ether, ip, tcp headers are all in m0 */
1481 		if (__predict_false(m0->m_len < TCPPKTHDRSIZE)) {
1482 			m0 = m_pullup(m0, TCPPKTHDRSIZE);
1483 			if (__predict_false(m0 == NULL)) {
1484 				/* XXX panic probably an overreaction */
1485 				panic("couldn't fit header into mbuf");
1486 			}
1487 		}
1488 
1489 		eh = mtod(m0, struct ether_header *);
1490 		eth_type = eh->ether_type;
1491 		if (eth_type == htons(ETHERTYPE_VLAN)) {
1492 			struct ether_vlan_header *evh = (void *)eh;
1493 
1494 			tso_info |= V_LSO_ETH_TYPE(CPL_ETH_II_VLAN);
1495 			l3hdr = evh + 1;
1496 			eth_type = evh->evl_proto;
1497 		} else {
1498 			tso_info |= V_LSO_ETH_TYPE(CPL_ETH_II);
1499 			l3hdr = eh + 1;
1500 		}
1501 
1502 		if (eth_type == htons(ETHERTYPE_IP)) {
1503 			struct ip *ip = l3hdr;
1504 
1505 			tso_info |= V_LSO_IPHDR_WORDS(ip->ip_hl);
1506 			tcp = (struct tcphdr *)(ip + 1);
1507 		} else if (eth_type == htons(ETHERTYPE_IPV6)) {
1508 			struct ip6_hdr *ip6 = l3hdr;
1509 
1510 			KASSERT(ip6->ip6_nxt == IPPROTO_TCP,
1511 			    ("%s: CSUM_TSO with ip6_nxt %d",
1512 			    __func__, ip6->ip6_nxt));
1513 
1514 			tso_info |= F_LSO_IPV6;
1515 			tso_info |= V_LSO_IPHDR_WORDS(sizeof(*ip6) >> 2);
1516 			tcp = (struct tcphdr *)(ip6 + 1);
1517 		} else
1518 			panic("%s: CSUM_TSO but neither ip nor ip6", __func__);
1519 
1520 		tso_info |= V_LSO_TCPHDR_WORDS(tcp->th_off);
1521 		hdr->lso_info = htonl(tso_info);
1522 
1523 		if (__predict_false(mlen <= PIO_LEN)) {
1524 			/*
1525 			 * pkt not undersized but fits in PIO_LEN
1526 			 * Indicates a TSO bug at the higher levels.
1527 			 */
1528 			txsd->m = NULL;
1529 			m_copydata(m0, 0, mlen, (caddr_t)&txd->flit[3]);
1530 			flits = (mlen + 7) / 8 + 3;
1531 			wr_hi = htonl(V_WR_BCNTLFLT(mlen & 7) |
1532 					  V_WR_OP(FW_WROPCODE_TUNNEL_TX_PKT) |
1533 					  F_WR_SOP | F_WR_EOP | txqs.compl);
1534 			wr_lo = htonl(V_WR_LEN(flits) |
1535 			    V_WR_GEN(txqs.gen) | V_WR_TID(txq->token));
1536 			set_wr_hdr(&hdr->wr, wr_hi, wr_lo);
1537 			wmb();
1538 			ETHER_BPF_MTAP(pi->ifp, m0);
1539 			wr_gen2(txd, txqs.gen);
1540 			check_ring_tx_db(sc, txq, 0);
1541 			m_freem(m0);
1542 			return (0);
1543 		}
1544 		flits = 3;
1545 	} else {
1546 		struct cpl_tx_pkt *cpl = (struct cpl_tx_pkt *)txd;
1547 
1548 		GET_VTAG(cntrl, m0);
1549 		cntrl |= V_TXPKT_OPCODE(CPL_TX_PKT);
1550 		if (__predict_false(!(m0->m_pkthdr.csum_flags & CSUM_IP)))
1551 			cntrl |= F_TXPKT_IPCSUM_DIS;
1552 		if (__predict_false(!(m0->m_pkthdr.csum_flags & (CSUM_TCP |
1553 		    CSUM_UDP | CSUM_UDP_IPV6 | CSUM_TCP_IPV6))))
1554 			cntrl |= F_TXPKT_L4CSUM_DIS;
1555 		cpl->cntrl = htonl(cntrl);
1556 		cpl->len = htonl(mlen | 0x80000000);
1557 
1558 		if (mlen <= PIO_LEN) {
1559 			txsd->m = NULL;
1560 			m_copydata(m0, 0, mlen, (caddr_t)&txd->flit[2]);
1561 			flits = (mlen + 7) / 8 + 2;
1562 
1563 			wr_hi = htonl(V_WR_BCNTLFLT(mlen & 7) |
1564 			    V_WR_OP(FW_WROPCODE_TUNNEL_TX_PKT) |
1565 					  F_WR_SOP | F_WR_EOP | txqs.compl);
1566 			wr_lo = htonl(V_WR_LEN(flits) |
1567 			    V_WR_GEN(txqs.gen) | V_WR_TID(txq->token));
1568 			set_wr_hdr(&cpl->wr, wr_hi, wr_lo);
1569 			wmb();
1570 			ETHER_BPF_MTAP(pi->ifp, m0);
1571 			wr_gen2(txd, txqs.gen);
1572 			check_ring_tx_db(sc, txq, 0);
1573 			m_freem(m0);
1574 			return (0);
1575 		}
1576 		flits = 2;
1577 	}
1578 	wrp = (struct work_request_hdr *)txd;
1579 	sgp = (ndesc == 1) ? (struct sg_ent *)&txd->flit[flits] : sgl;
1580 	make_sgl(sgp, segs, nsegs);
1581 
1582 	sgl_flits = sgl_len(nsegs);
1583 
1584 	ETHER_BPF_MTAP(pi->ifp, m0);
1585 
1586 	KASSERT(ndesc <= 4, ("ndesc too large %d", ndesc));
1587 	wr_hi = htonl(V_WR_OP(FW_WROPCODE_TUNNEL_TX_PKT) | txqs.compl);
1588 	wr_lo = htonl(V_WR_TID(txq->token));
1589 	write_wr_hdr_sgl(ndesc, txd, &txqs, txq, sgl, flits,
1590 	    sgl_flits, wr_hi, wr_lo);
1591 	check_ring_tx_db(sc, txq, 0);
1592 
1593 	return (0);
1594 }
1595 
1596 #ifdef DEBUGNET
1597 int
1598 cxgb_debugnet_encap(struct sge_qset *qs, struct mbuf **m)
1599 {
1600 	int error;
1601 
1602 	error = t3_encap(qs, m);
1603 	if (error == 0)
1604 		check_ring_tx_db(qs->port->adapter, &qs->txq[TXQ_ETH], 1);
1605 	else if (*m != NULL) {
1606 		m_freem(*m);
1607 		*m = NULL;
1608 	}
1609 	return (error);
1610 }
1611 #endif
1612 
1613 void
1614 cxgb_tx_watchdog(void *arg)
1615 {
1616 	struct sge_qset *qs = arg;
1617 	struct sge_txq *txq = &qs->txq[TXQ_ETH];
1618 
1619         if (qs->coalescing != 0 &&
1620 	    (txq->in_use <= cxgb_tx_coalesce_enable_stop) &&
1621 	    TXQ_RING_EMPTY(qs))
1622                 qs->coalescing = 0;
1623         else if (qs->coalescing == 0 &&
1624 	    (txq->in_use >= cxgb_tx_coalesce_enable_start))
1625                 qs->coalescing = 1;
1626 	if (TXQ_TRYLOCK(qs)) {
1627 		qs->qs_flags |= QS_FLUSHING;
1628 		cxgb_start_locked(qs);
1629 		qs->qs_flags &= ~QS_FLUSHING;
1630 		TXQ_UNLOCK(qs);
1631 	}
1632 	if (qs->port->ifp->if_drv_flags & IFF_DRV_RUNNING)
1633 		callout_reset_on(&txq->txq_watchdog, hz/4, cxgb_tx_watchdog,
1634 		    qs, txq->txq_watchdog.c_cpu);
1635 }
1636 
1637 static void
1638 cxgb_tx_timeout(void *arg)
1639 {
1640 	struct sge_qset *qs = arg;
1641 	struct sge_txq *txq = &qs->txq[TXQ_ETH];
1642 
1643 	if (qs->coalescing == 0 && (txq->in_use >= (txq->size>>3)))
1644                 qs->coalescing = 1;
1645 	if (TXQ_TRYLOCK(qs)) {
1646 		qs->qs_flags |= QS_TIMEOUT;
1647 		cxgb_start_locked(qs);
1648 		qs->qs_flags &= ~QS_TIMEOUT;
1649 		TXQ_UNLOCK(qs);
1650 	}
1651 }
1652 
1653 static void
1654 cxgb_start_locked(struct sge_qset *qs)
1655 {
1656 	struct mbuf *m_head = NULL;
1657 	struct sge_txq *txq = &qs->txq[TXQ_ETH];
1658 	struct port_info *pi = qs->port;
1659 	struct ifnet *ifp = pi->ifp;
1660 
1661 	if (qs->qs_flags & (QS_FLUSHING|QS_TIMEOUT))
1662 		reclaim_completed_tx(qs, 0, TXQ_ETH);
1663 
1664 	if (!pi->link_config.link_ok) {
1665 		TXQ_RING_FLUSH(qs);
1666 		return;
1667 	}
1668 	TXQ_LOCK_ASSERT(qs);
1669 	while (!TXQ_RING_EMPTY(qs) && (ifp->if_drv_flags & IFF_DRV_RUNNING) &&
1670 	    pi->link_config.link_ok) {
1671 		reclaim_completed_tx(qs, cxgb_tx_reclaim_threshold, TXQ_ETH);
1672 
1673 		if (txq->size - txq->in_use <= TX_MAX_DESC)
1674 			break;
1675 
1676 		if ((m_head = cxgb_dequeue(qs)) == NULL)
1677 			break;
1678 		/*
1679 		 *  Encapsulation can modify our pointer, and or make it
1680 		 *  NULL on failure.  In that event, we can't requeue.
1681 		 */
1682 		if (t3_encap(qs, &m_head) || m_head == NULL)
1683 			break;
1684 
1685 		m_head = NULL;
1686 	}
1687 
1688 	if (txq->db_pending)
1689 		check_ring_tx_db(pi->adapter, txq, 1);
1690 
1691 	if (!TXQ_RING_EMPTY(qs) && callout_pending(&txq->txq_timer) == 0 &&
1692 	    pi->link_config.link_ok)
1693 		callout_reset_on(&txq->txq_timer, 1, cxgb_tx_timeout,
1694 		    qs, txq->txq_timer.c_cpu);
1695 	if (m_head != NULL)
1696 		m_freem(m_head);
1697 }
1698 
1699 static int
1700 cxgb_transmit_locked(struct ifnet *ifp, struct sge_qset *qs, struct mbuf *m)
1701 {
1702 	struct port_info *pi = qs->port;
1703 	struct sge_txq *txq = &qs->txq[TXQ_ETH];
1704 	struct buf_ring *br = txq->txq_mr;
1705 	int error, avail;
1706 
1707 	avail = txq->size - txq->in_use;
1708 	TXQ_LOCK_ASSERT(qs);
1709 
1710 	/*
1711 	 * We can only do a direct transmit if the following are true:
1712 	 * - we aren't coalescing (ring < 3/4 full)
1713 	 * - the link is up -- checked in caller
1714 	 * - there are no packets enqueued already
1715 	 * - there is space in hardware transmit queue
1716 	 */
1717 	if (check_pkt_coalesce(qs) == 0 &&
1718 	    !TXQ_RING_NEEDS_ENQUEUE(qs) && avail > TX_MAX_DESC) {
1719 		if (t3_encap(qs, &m)) {
1720 			if (m != NULL &&
1721 			    (error = drbr_enqueue(ifp, br, m)) != 0)
1722 				return (error);
1723 		} else {
1724 			if (txq->db_pending)
1725 				check_ring_tx_db(pi->adapter, txq, 1);
1726 
1727 			/*
1728 			 * We've bypassed the buf ring so we need to update
1729 			 * the stats directly
1730 			 */
1731 			txq->txq_direct_packets++;
1732 			txq->txq_direct_bytes += m->m_pkthdr.len;
1733 		}
1734 	} else if ((error = drbr_enqueue(ifp, br, m)) != 0)
1735 		return (error);
1736 
1737 	reclaim_completed_tx(qs, cxgb_tx_reclaim_threshold, TXQ_ETH);
1738 	if (!TXQ_RING_EMPTY(qs) && pi->link_config.link_ok &&
1739 	    (!check_pkt_coalesce(qs) || (drbr_inuse(ifp, br) >= 7)))
1740 		cxgb_start_locked(qs);
1741 	else if (!TXQ_RING_EMPTY(qs) && !callout_pending(&txq->txq_timer))
1742 		callout_reset_on(&txq->txq_timer, 1, cxgb_tx_timeout,
1743 		    qs, txq->txq_timer.c_cpu);
1744 	return (0);
1745 }
1746 
1747 int
1748 cxgb_transmit(struct ifnet *ifp, struct mbuf *m)
1749 {
1750 	struct sge_qset *qs;
1751 	struct port_info *pi = ifp->if_softc;
1752 	int error, qidx = pi->first_qset;
1753 
1754 	if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0
1755 	    ||(!pi->link_config.link_ok)) {
1756 		m_freem(m);
1757 		return (0);
1758 	}
1759 
1760 	/* check if flowid is set */
1761 	if (M_HASHTYPE_GET(m) != M_HASHTYPE_NONE)
1762 		qidx = (m->m_pkthdr.flowid % pi->nqsets) + pi->first_qset;
1763 
1764 	qs = &pi->adapter->sge.qs[qidx];
1765 
1766 	if (TXQ_TRYLOCK(qs)) {
1767 		/* XXX running */
1768 		error = cxgb_transmit_locked(ifp, qs, m);
1769 		TXQ_UNLOCK(qs);
1770 	} else
1771 		error = drbr_enqueue(ifp, qs->txq[TXQ_ETH].txq_mr, m);
1772 	return (error);
1773 }
1774 
1775 void
1776 cxgb_qflush(struct ifnet *ifp)
1777 {
1778 	/*
1779 	 * flush any enqueued mbufs in the buf_rings
1780 	 * and in the transmit queues
1781 	 * no-op for now
1782 	 */
1783 	return;
1784 }
1785 
1786 /**
1787  *	write_imm - write a packet into a Tx descriptor as immediate data
1788  *	@d: the Tx descriptor to write
1789  *	@m: the packet
1790  *	@len: the length of packet data to write as immediate data
1791  *	@gen: the generation bit value to write
1792  *
1793  *	Writes a packet as immediate data into a Tx descriptor.  The packet
1794  *	contains a work request at its beginning.  We must write the packet
1795  *	carefully so the SGE doesn't read accidentally before it's written in
1796  *	its entirety.
1797  */
1798 static __inline void
1799 write_imm(struct tx_desc *d, caddr_t src,
1800 	  unsigned int len, unsigned int gen)
1801 {
1802 	struct work_request_hdr *from = (struct work_request_hdr *)src;
1803 	struct work_request_hdr *to = (struct work_request_hdr *)d;
1804 	uint32_t wr_hi, wr_lo;
1805 
1806 	KASSERT(len <= WR_LEN && len >= sizeof(*from),
1807 	    ("%s: invalid len %d", __func__, len));
1808 
1809 	memcpy(&to[1], &from[1], len - sizeof(*from));
1810 	wr_hi = from->wrh_hi | htonl(F_WR_SOP | F_WR_EOP |
1811 	    V_WR_BCNTLFLT(len & 7));
1812 	wr_lo = from->wrh_lo | htonl(V_WR_GEN(gen) | V_WR_LEN((len + 7) / 8));
1813 	set_wr_hdr(to, wr_hi, wr_lo);
1814 	wmb();
1815 	wr_gen2(d, gen);
1816 }
1817 
1818 /**
1819  *	check_desc_avail - check descriptor availability on a send queue
1820  *	@adap: the adapter
1821  *	@q: the TX queue
1822  *	@m: the packet needing the descriptors
1823  *	@ndesc: the number of Tx descriptors needed
1824  *	@qid: the Tx queue number in its queue set (TXQ_OFLD or TXQ_CTRL)
1825  *
1826  *	Checks if the requested number of Tx descriptors is available on an
1827  *	SGE send queue.  If the queue is already suspended or not enough
1828  *	descriptors are available the packet is queued for later transmission.
1829  *	Must be called with the Tx queue locked.
1830  *
1831  *	Returns 0 if enough descriptors are available, 1 if there aren't
1832  *	enough descriptors and the packet has been queued, and 2 if the caller
1833  *	needs to retry because there weren't enough descriptors at the
1834  *	beginning of the call but some freed up in the mean time.
1835  */
1836 static __inline int
1837 check_desc_avail(adapter_t *adap, struct sge_txq *q,
1838 		 struct mbuf *m, unsigned int ndesc,
1839 		 unsigned int qid)
1840 {
1841 	/*
1842 	 * XXX We currently only use this for checking the control queue
1843 	 * the control queue is only used for binding qsets which happens
1844 	 * at init time so we are guaranteed enough descriptors
1845 	 */
1846 	if (__predict_false(mbufq_len(&q->sendq))) {
1847 addq_exit:	(void )mbufq_enqueue(&q->sendq, m);
1848 		return 1;
1849 	}
1850 	if (__predict_false(q->size - q->in_use < ndesc)) {
1851 
1852 		struct sge_qset *qs = txq_to_qset(q, qid);
1853 
1854 		setbit(&qs->txq_stopped, qid);
1855 		if (should_restart_tx(q) &&
1856 		    test_and_clear_bit(qid, &qs->txq_stopped))
1857 			return 2;
1858 
1859 		q->stops++;
1860 		goto addq_exit;
1861 	}
1862 	return 0;
1863 }
1864 
1865 
1866 /**
1867  *	reclaim_completed_tx_imm - reclaim completed control-queue Tx descs
1868  *	@q: the SGE control Tx queue
1869  *
1870  *	This is a variant of reclaim_completed_tx() that is used for Tx queues
1871  *	that send only immediate data (presently just the control queues) and
1872  *	thus do not have any mbufs
1873  */
1874 static __inline void
1875 reclaim_completed_tx_imm(struct sge_txq *q)
1876 {
1877 	unsigned int reclaim = q->processed - q->cleaned;
1878 
1879 	q->in_use -= reclaim;
1880 	q->cleaned += reclaim;
1881 }
1882 
1883 /**
1884  *	ctrl_xmit - send a packet through an SGE control Tx queue
1885  *	@adap: the adapter
1886  *	@q: the control queue
1887  *	@m: the packet
1888  *
1889  *	Send a packet through an SGE control Tx queue.  Packets sent through
1890  *	a control queue must fit entirely as immediate data in a single Tx
1891  *	descriptor and have no page fragments.
1892  */
1893 static int
1894 ctrl_xmit(adapter_t *adap, struct sge_qset *qs, struct mbuf *m)
1895 {
1896 	int ret;
1897 	struct work_request_hdr *wrp = mtod(m, struct work_request_hdr *);
1898 	struct sge_txq *q = &qs->txq[TXQ_CTRL];
1899 
1900 	KASSERT(m->m_len <= WR_LEN, ("%s: bad tx data", __func__));
1901 
1902 	wrp->wrh_hi |= htonl(F_WR_SOP | F_WR_EOP);
1903 	wrp->wrh_lo = htonl(V_WR_TID(q->token));
1904 
1905 	TXQ_LOCK(qs);
1906 again:	reclaim_completed_tx_imm(q);
1907 
1908 	ret = check_desc_avail(adap, q, m, 1, TXQ_CTRL);
1909 	if (__predict_false(ret)) {
1910 		if (ret == 1) {
1911 			TXQ_UNLOCK(qs);
1912 			return (ENOSPC);
1913 		}
1914 		goto again;
1915 	}
1916 	write_imm(&q->desc[q->pidx], m->m_data, m->m_len, q->gen);
1917 
1918 	q->in_use++;
1919 	if (++q->pidx >= q->size) {
1920 		q->pidx = 0;
1921 		q->gen ^= 1;
1922 	}
1923 	TXQ_UNLOCK(qs);
1924 	wmb();
1925 	t3_write_reg(adap, A_SG_KDOORBELL,
1926 	    F_SELEGRCNTX | V_EGRCNTX(q->cntxt_id));
1927 
1928 	m_free(m);
1929 	return (0);
1930 }
1931 
1932 
1933 /**
1934  *	restart_ctrlq - restart a suspended control queue
1935  *	@qs: the queue set cotaining the control queue
1936  *
1937  *	Resumes transmission on a suspended Tx control queue.
1938  */
1939 static void
1940 restart_ctrlq(void *data, int npending)
1941 {
1942 	struct mbuf *m;
1943 	struct sge_qset *qs = (struct sge_qset *)data;
1944 	struct sge_txq *q = &qs->txq[TXQ_CTRL];
1945 	adapter_t *adap = qs->port->adapter;
1946 
1947 	TXQ_LOCK(qs);
1948 again:	reclaim_completed_tx_imm(q);
1949 
1950 	while (q->in_use < q->size &&
1951 	       (m = mbufq_dequeue(&q->sendq)) != NULL) {
1952 
1953 		write_imm(&q->desc[q->pidx], m->m_data, m->m_len, q->gen);
1954 		m_free(m);
1955 
1956 		if (++q->pidx >= q->size) {
1957 			q->pidx = 0;
1958 			q->gen ^= 1;
1959 		}
1960 		q->in_use++;
1961 	}
1962 	if (mbufq_len(&q->sendq)) {
1963 		setbit(&qs->txq_stopped, TXQ_CTRL);
1964 
1965 		if (should_restart_tx(q) &&
1966 		    test_and_clear_bit(TXQ_CTRL, &qs->txq_stopped))
1967 			goto again;
1968 		q->stops++;
1969 	}
1970 	TXQ_UNLOCK(qs);
1971 	t3_write_reg(adap, A_SG_KDOORBELL,
1972 		     F_SELEGRCNTX | V_EGRCNTX(q->cntxt_id));
1973 }
1974 
1975 
1976 /*
1977  * Send a management message through control queue 0
1978  */
1979 int
1980 t3_mgmt_tx(struct adapter *adap, struct mbuf *m)
1981 {
1982 	return ctrl_xmit(adap, &adap->sge.qs[0], m);
1983 }
1984 
1985 /**
1986  *	free_qset - free the resources of an SGE queue set
1987  *	@sc: the controller owning the queue set
1988  *	@q: the queue set
1989  *
1990  *	Release the HW and SW resources associated with an SGE queue set, such
1991  *	as HW contexts, packet buffers, and descriptor rings.  Traffic to the
1992  *	queue set must be quiesced prior to calling this.
1993  */
1994 static void
1995 t3_free_qset(adapter_t *sc, struct sge_qset *q)
1996 {
1997 	int i;
1998 
1999 	reclaim_completed_tx(q, 0, TXQ_ETH);
2000 	if (q->txq[TXQ_ETH].txq_mr != NULL)
2001 		buf_ring_free(q->txq[TXQ_ETH].txq_mr, M_DEVBUF);
2002 	if (q->txq[TXQ_ETH].txq_ifq != NULL) {
2003 		ifq_delete(q->txq[TXQ_ETH].txq_ifq);
2004 		free(q->txq[TXQ_ETH].txq_ifq, M_DEVBUF);
2005 	}
2006 
2007 	for (i = 0; i < SGE_RXQ_PER_SET; ++i) {
2008 		if (q->fl[i].desc) {
2009 			mtx_lock_spin(&sc->sge.reg_lock);
2010 			t3_sge_disable_fl(sc, q->fl[i].cntxt_id);
2011 			mtx_unlock_spin(&sc->sge.reg_lock);
2012 			bus_dmamap_unload(q->fl[i].desc_tag, q->fl[i].desc_map);
2013 			bus_dmamem_free(q->fl[i].desc_tag, q->fl[i].desc,
2014 					q->fl[i].desc_map);
2015 			bus_dma_tag_destroy(q->fl[i].desc_tag);
2016 			bus_dma_tag_destroy(q->fl[i].entry_tag);
2017 		}
2018 		if (q->fl[i].sdesc) {
2019 			free_rx_bufs(sc, &q->fl[i]);
2020 			free(q->fl[i].sdesc, M_DEVBUF);
2021 		}
2022 	}
2023 
2024 	mtx_unlock(&q->lock);
2025 	MTX_DESTROY(&q->lock);
2026 	for (i = 0; i < SGE_TXQ_PER_SET; i++) {
2027 		if (q->txq[i].desc) {
2028 			mtx_lock_spin(&sc->sge.reg_lock);
2029 			t3_sge_enable_ecntxt(sc, q->txq[i].cntxt_id, 0);
2030 			mtx_unlock_spin(&sc->sge.reg_lock);
2031 			bus_dmamap_unload(q->txq[i].desc_tag,
2032 					q->txq[i].desc_map);
2033 			bus_dmamem_free(q->txq[i].desc_tag, q->txq[i].desc,
2034 					q->txq[i].desc_map);
2035 			bus_dma_tag_destroy(q->txq[i].desc_tag);
2036 			bus_dma_tag_destroy(q->txq[i].entry_tag);
2037 		}
2038 		if (q->txq[i].sdesc) {
2039 			free(q->txq[i].sdesc, M_DEVBUF);
2040 		}
2041 	}
2042 
2043 	if (q->rspq.desc) {
2044 		mtx_lock_spin(&sc->sge.reg_lock);
2045 		t3_sge_disable_rspcntxt(sc, q->rspq.cntxt_id);
2046 		mtx_unlock_spin(&sc->sge.reg_lock);
2047 
2048 		bus_dmamap_unload(q->rspq.desc_tag, q->rspq.desc_map);
2049 		bus_dmamem_free(q->rspq.desc_tag, q->rspq.desc,
2050 			        q->rspq.desc_map);
2051 		bus_dma_tag_destroy(q->rspq.desc_tag);
2052 		MTX_DESTROY(&q->rspq.lock);
2053 	}
2054 
2055 #if defined(INET6) || defined(INET)
2056 	tcp_lro_free(&q->lro.ctrl);
2057 #endif
2058 
2059 	bzero(q, sizeof(*q));
2060 }
2061 
2062 /**
2063  *	t3_free_sge_resources - free SGE resources
2064  *	@sc: the adapter softc
2065  *
2066  *	Frees resources used by the SGE queue sets.
2067  */
2068 void
2069 t3_free_sge_resources(adapter_t *sc, int nqsets)
2070 {
2071 	int i;
2072 
2073 	for (i = 0; i < nqsets; ++i) {
2074 		TXQ_LOCK(&sc->sge.qs[i]);
2075 		t3_free_qset(sc, &sc->sge.qs[i]);
2076 	}
2077 }
2078 
2079 /**
2080  *	t3_sge_start - enable SGE
2081  *	@sc: the controller softc
2082  *
2083  *	Enables the SGE for DMAs.  This is the last step in starting packet
2084  *	transfers.
2085  */
2086 void
2087 t3_sge_start(adapter_t *sc)
2088 {
2089 	t3_set_reg_field(sc, A_SG_CONTROL, F_GLOBALENABLE, F_GLOBALENABLE);
2090 }
2091 
2092 /**
2093  *	t3_sge_stop - disable SGE operation
2094  *	@sc: the adapter
2095  *
2096  *	Disables the DMA engine.  This can be called in emeregencies (e.g.,
2097  *	from error interrupts) or from normal process context.  In the latter
2098  *	case it also disables any pending queue restart tasklets.  Note that
2099  *	if it is called in interrupt context it cannot disable the restart
2100  *	tasklets as it cannot wait, however the tasklets will have no effect
2101  *	since the doorbells are disabled and the driver will call this again
2102  *	later from process context, at which time the tasklets will be stopped
2103  *	if they are still running.
2104  */
2105 void
2106 t3_sge_stop(adapter_t *sc)
2107 {
2108 	int i, nqsets;
2109 
2110 	t3_set_reg_field(sc, A_SG_CONTROL, F_GLOBALENABLE, 0);
2111 
2112 	if (sc->tq == NULL)
2113 		return;
2114 
2115 	for (nqsets = i = 0; i < (sc)->params.nports; i++)
2116 		nqsets += sc->port[i].nqsets;
2117 #ifdef notyet
2118 	/*
2119 	 *
2120 	 * XXX
2121 	 */
2122 	for (i = 0; i < nqsets; ++i) {
2123 		struct sge_qset *qs = &sc->sge.qs[i];
2124 
2125 		taskqueue_drain(sc->tq, &qs->txq[TXQ_OFLD].qresume_task);
2126 		taskqueue_drain(sc->tq, &qs->txq[TXQ_CTRL].qresume_task);
2127 	}
2128 #endif
2129 }
2130 
2131 /**
2132  *	t3_free_tx_desc - reclaims Tx descriptors and their buffers
2133  *	@adapter: the adapter
2134  *	@q: the Tx queue to reclaim descriptors from
2135  *	@reclaimable: the number of descriptors to reclaim
2136  *      @m_vec_size: maximum number of buffers to reclaim
2137  *      @desc_reclaimed: returns the number of descriptors reclaimed
2138  *
2139  *	Reclaims Tx descriptors from an SGE Tx queue and frees the associated
2140  *	Tx buffers.  Called with the Tx queue lock held.
2141  *
2142  *      Returns number of buffers of reclaimed
2143  */
2144 void
2145 t3_free_tx_desc(struct sge_qset *qs, int reclaimable, int queue)
2146 {
2147 	struct tx_sw_desc *txsd;
2148 	unsigned int cidx, mask;
2149 	struct sge_txq *q = &qs->txq[queue];
2150 
2151 #ifdef T3_TRACE
2152 	T3_TRACE2(sc->tb[q->cntxt_id & 7],
2153 		  "reclaiming %u Tx descriptors at cidx %u", reclaimable, cidx);
2154 #endif
2155 	cidx = q->cidx;
2156 	mask = q->size - 1;
2157 	txsd = &q->sdesc[cidx];
2158 
2159 	mtx_assert(&qs->lock, MA_OWNED);
2160 	while (reclaimable--) {
2161 		prefetch(q->sdesc[(cidx + 1) & mask].m);
2162 		prefetch(q->sdesc[(cidx + 2) & mask].m);
2163 
2164 		if (txsd->m != NULL) {
2165 			if (txsd->flags & TX_SW_DESC_MAPPED) {
2166 				bus_dmamap_unload(q->entry_tag, txsd->map);
2167 				txsd->flags &= ~TX_SW_DESC_MAPPED;
2168 			}
2169 			m_freem_list(txsd->m);
2170 			txsd->m = NULL;
2171 		} else
2172 			q->txq_skipped++;
2173 
2174 		++txsd;
2175 		if (++cidx == q->size) {
2176 			cidx = 0;
2177 			txsd = q->sdesc;
2178 		}
2179 	}
2180 	q->cidx = cidx;
2181 
2182 }
2183 
2184 /**
2185  *	is_new_response - check if a response is newly written
2186  *	@r: the response descriptor
2187  *	@q: the response queue
2188  *
2189  *	Returns true if a response descriptor contains a yet unprocessed
2190  *	response.
2191  */
2192 static __inline int
2193 is_new_response(const struct rsp_desc *r,
2194     const struct sge_rspq *q)
2195 {
2196 	return (r->intr_gen & F_RSPD_GEN2) == q->gen;
2197 }
2198 
2199 #define RSPD_GTS_MASK  (F_RSPD_TXQ0_GTS | F_RSPD_TXQ1_GTS)
2200 #define RSPD_CTRL_MASK (RSPD_GTS_MASK | \
2201 			V_RSPD_TXQ0_CR(M_RSPD_TXQ0_CR) | \
2202 			V_RSPD_TXQ1_CR(M_RSPD_TXQ1_CR) | \
2203 			V_RSPD_TXQ2_CR(M_RSPD_TXQ2_CR))
2204 
2205 /* How long to delay the next interrupt in case of memory shortage, in 0.1us. */
2206 #define NOMEM_INTR_DELAY 2500
2207 
2208 #ifdef TCP_OFFLOAD
2209 /**
2210  *	write_ofld_wr - write an offload work request
2211  *	@adap: the adapter
2212  *	@m: the packet to send
2213  *	@q: the Tx queue
2214  *	@pidx: index of the first Tx descriptor to write
2215  *	@gen: the generation value to use
2216  *	@ndesc: number of descriptors the packet will occupy
2217  *
2218  *	Write an offload work request to send the supplied packet.  The packet
2219  *	data already carry the work request with most fields populated.
2220  */
2221 static void
2222 write_ofld_wr(adapter_t *adap, struct mbuf *m, struct sge_txq *q,
2223     unsigned int pidx, unsigned int gen, unsigned int ndesc)
2224 {
2225 	unsigned int sgl_flits, flits;
2226 	int i, idx, nsegs, wrlen;
2227 	struct work_request_hdr *from;
2228 	struct sg_ent *sgp, t3sgl[TX_MAX_SEGS / 2 + 1];
2229 	struct tx_desc *d = &q->desc[pidx];
2230 	struct txq_state txqs;
2231 	struct sglist_seg *segs;
2232 	struct ofld_hdr *oh = mtod(m, struct ofld_hdr *);
2233 	struct sglist *sgl;
2234 
2235 	from = (void *)(oh + 1);	/* Start of WR within mbuf */
2236 	wrlen = m->m_len - sizeof(*oh);
2237 
2238 	if (!(oh->flags & F_HDR_SGL)) {
2239 		write_imm(d, (caddr_t)from, wrlen, gen);
2240 
2241 		/*
2242 		 * mbuf with "real" immediate tx data will be enqueue_wr'd by
2243 		 * t3_push_frames and freed in wr_ack.  Others, like those sent
2244 		 * down by close_conn, t3_send_reset, etc. should be freed here.
2245 		 */
2246 		if (!(oh->flags & F_HDR_DF))
2247 			m_free(m);
2248 		return;
2249 	}
2250 
2251 	memcpy(&d->flit[1], &from[1], wrlen - sizeof(*from));
2252 
2253 	sgl = oh->sgl;
2254 	flits = wrlen / 8;
2255 	sgp = (ndesc == 1) ? (struct sg_ent *)&d->flit[flits] : t3sgl;
2256 
2257 	nsegs = sgl->sg_nseg;
2258 	segs = sgl->sg_segs;
2259 	for (idx = 0, i = 0; i < nsegs; i++) {
2260 		KASSERT(segs[i].ss_len, ("%s: 0 len in sgl", __func__));
2261 		if (i && idx == 0)
2262 			++sgp;
2263 		sgp->len[idx] = htobe32(segs[i].ss_len);
2264 		sgp->addr[idx] = htobe64(segs[i].ss_paddr);
2265 		idx ^= 1;
2266 	}
2267 	if (idx) {
2268 		sgp->len[idx] = 0;
2269 		sgp->addr[idx] = 0;
2270 	}
2271 
2272 	sgl_flits = sgl_len(nsegs);
2273 	txqs.gen = gen;
2274 	txqs.pidx = pidx;
2275 	txqs.compl = 0;
2276 
2277 	write_wr_hdr_sgl(ndesc, d, &txqs, q, t3sgl, flits, sgl_flits,
2278 	    from->wrh_hi, from->wrh_lo);
2279 }
2280 
2281 /**
2282  *	ofld_xmit - send a packet through an offload queue
2283  *	@adap: the adapter
2284  *	@q: the Tx offload queue
2285  *	@m: the packet
2286  *
2287  *	Send an offload packet through an SGE offload queue.
2288  */
2289 static int
2290 ofld_xmit(adapter_t *adap, struct sge_qset *qs, struct mbuf *m)
2291 {
2292 	int ret;
2293 	unsigned int ndesc;
2294 	unsigned int pidx, gen;
2295 	struct sge_txq *q = &qs->txq[TXQ_OFLD];
2296 	struct ofld_hdr *oh = mtod(m, struct ofld_hdr *);
2297 
2298 	ndesc = G_HDR_NDESC(oh->flags);
2299 
2300 	TXQ_LOCK(qs);
2301 again:	reclaim_completed_tx(qs, 16, TXQ_OFLD);
2302 	ret = check_desc_avail(adap, q, m, ndesc, TXQ_OFLD);
2303 	if (__predict_false(ret)) {
2304 		if (ret == 1) {
2305 			TXQ_UNLOCK(qs);
2306 			return (EINTR);
2307 		}
2308 		goto again;
2309 	}
2310 
2311 	gen = q->gen;
2312 	q->in_use += ndesc;
2313 	pidx = q->pidx;
2314 	q->pidx += ndesc;
2315 	if (q->pidx >= q->size) {
2316 		q->pidx -= q->size;
2317 		q->gen ^= 1;
2318 	}
2319 
2320 	write_ofld_wr(adap, m, q, pidx, gen, ndesc);
2321 	check_ring_tx_db(adap, q, 1);
2322 	TXQ_UNLOCK(qs);
2323 
2324 	return (0);
2325 }
2326 
2327 /**
2328  *	restart_offloadq - restart a suspended offload queue
2329  *	@qs: the queue set cotaining the offload queue
2330  *
2331  *	Resumes transmission on a suspended Tx offload queue.
2332  */
2333 static void
2334 restart_offloadq(void *data, int npending)
2335 {
2336 	struct mbuf *m;
2337 	struct sge_qset *qs = data;
2338 	struct sge_txq *q = &qs->txq[TXQ_OFLD];
2339 	adapter_t *adap = qs->port->adapter;
2340 	int cleaned;
2341 
2342 	TXQ_LOCK(qs);
2343 again:	cleaned = reclaim_completed_tx(qs, 16, TXQ_OFLD);
2344 
2345 	while ((m = mbufq_first(&q->sendq)) != NULL) {
2346 		unsigned int gen, pidx;
2347 		struct ofld_hdr *oh = mtod(m, struct ofld_hdr *);
2348 		unsigned int ndesc = G_HDR_NDESC(oh->flags);
2349 
2350 		if (__predict_false(q->size - q->in_use < ndesc)) {
2351 			setbit(&qs->txq_stopped, TXQ_OFLD);
2352 			if (should_restart_tx(q) &&
2353 			    test_and_clear_bit(TXQ_OFLD, &qs->txq_stopped))
2354 				goto again;
2355 			q->stops++;
2356 			break;
2357 		}
2358 
2359 		gen = q->gen;
2360 		q->in_use += ndesc;
2361 		pidx = q->pidx;
2362 		q->pidx += ndesc;
2363 		if (q->pidx >= q->size) {
2364 			q->pidx -= q->size;
2365 			q->gen ^= 1;
2366 		}
2367 
2368 		(void)mbufq_dequeue(&q->sendq);
2369 		TXQ_UNLOCK(qs);
2370 		write_ofld_wr(adap, m, q, pidx, gen, ndesc);
2371 		TXQ_LOCK(qs);
2372 	}
2373 #if USE_GTS
2374 	set_bit(TXQ_RUNNING, &q->flags);
2375 	set_bit(TXQ_LAST_PKT_DB, &q->flags);
2376 #endif
2377 	TXQ_UNLOCK(qs);
2378 	wmb();
2379 	t3_write_reg(adap, A_SG_KDOORBELL,
2380 		     F_SELEGRCNTX | V_EGRCNTX(q->cntxt_id));
2381 }
2382 
2383 /**
2384  *	t3_offload_tx - send an offload packet
2385  *	@m: the packet
2386  *
2387  *	Sends an offload packet.  We use the packet priority to select the
2388  *	appropriate Tx queue as follows: bit 0 indicates whether the packet
2389  *	should be sent as regular or control, bits 1-3 select the queue set.
2390  */
2391 int
2392 t3_offload_tx(struct adapter *sc, struct mbuf *m)
2393 {
2394 	struct ofld_hdr *oh = mtod(m, struct ofld_hdr *);
2395 	struct sge_qset *qs = &sc->sge.qs[G_HDR_QSET(oh->flags)];
2396 
2397 	if (oh->flags & F_HDR_CTRL) {
2398 		m_adj(m, sizeof (*oh));	/* trim ofld_hdr off */
2399 		return (ctrl_xmit(sc, qs, m));
2400 	} else
2401 		return (ofld_xmit(sc, qs, m));
2402 }
2403 #endif
2404 
2405 static void
2406 restart_tx(struct sge_qset *qs)
2407 {
2408 	struct adapter *sc = qs->port->adapter;
2409 
2410 	if (isset(&qs->txq_stopped, TXQ_OFLD) &&
2411 	    should_restart_tx(&qs->txq[TXQ_OFLD]) &&
2412 	    test_and_clear_bit(TXQ_OFLD, &qs->txq_stopped)) {
2413 		qs->txq[TXQ_OFLD].restarts++;
2414 		taskqueue_enqueue(sc->tq, &qs->txq[TXQ_OFLD].qresume_task);
2415 	}
2416 
2417 	if (isset(&qs->txq_stopped, TXQ_CTRL) &&
2418 	    should_restart_tx(&qs->txq[TXQ_CTRL]) &&
2419 	    test_and_clear_bit(TXQ_CTRL, &qs->txq_stopped)) {
2420 		qs->txq[TXQ_CTRL].restarts++;
2421 		taskqueue_enqueue(sc->tq, &qs->txq[TXQ_CTRL].qresume_task);
2422 	}
2423 }
2424 
2425 /**
2426  *	t3_sge_alloc_qset - initialize an SGE queue set
2427  *	@sc: the controller softc
2428  *	@id: the queue set id
2429  *	@nports: how many Ethernet ports will be using this queue set
2430  *	@irq_vec_idx: the IRQ vector index for response queue interrupts
2431  *	@p: configuration parameters for this queue set
2432  *	@ntxq: number of Tx queues for the queue set
2433  *	@pi: port info for queue set
2434  *
2435  *	Allocate resources and initialize an SGE queue set.  A queue set
2436  *	comprises a response queue, two Rx free-buffer queues, and up to 3
2437  *	Tx queues.  The Tx queues are assigned roles in the order Ethernet
2438  *	queue, offload queue, and control queue.
2439  */
2440 int
2441 t3_sge_alloc_qset(adapter_t *sc, u_int id, int nports, int irq_vec_idx,
2442 		  const struct qset_params *p, int ntxq, struct port_info *pi)
2443 {
2444 	struct sge_qset *q = &sc->sge.qs[id];
2445 	int i, ret = 0;
2446 
2447 	MTX_INIT(&q->lock, q->namebuf, NULL, MTX_DEF);
2448 	q->port = pi;
2449 	q->adap = sc;
2450 
2451 	if ((q->txq[TXQ_ETH].txq_mr = buf_ring_alloc(cxgb_txq_buf_ring_size,
2452 	    M_DEVBUF, M_WAITOK, &q->lock)) == NULL) {
2453 		device_printf(sc->dev, "failed to allocate mbuf ring\n");
2454 		goto err;
2455 	}
2456 	if ((q->txq[TXQ_ETH].txq_ifq = malloc(sizeof(struct ifaltq), M_DEVBUF,
2457 	    M_NOWAIT | M_ZERO)) == NULL) {
2458 		device_printf(sc->dev, "failed to allocate ifq\n");
2459 		goto err;
2460 	}
2461 	ifq_init(q->txq[TXQ_ETH].txq_ifq, pi->ifp);
2462 	callout_init(&q->txq[TXQ_ETH].txq_timer, 1);
2463 	callout_init(&q->txq[TXQ_ETH].txq_watchdog, 1);
2464 	q->txq[TXQ_ETH].txq_timer.c_cpu = id % mp_ncpus;
2465 	q->txq[TXQ_ETH].txq_watchdog.c_cpu = id % mp_ncpus;
2466 
2467 	init_qset_cntxt(q, id);
2468 	q->idx = id;
2469 	if ((ret = alloc_ring(sc, p->fl_size, sizeof(struct rx_desc),
2470 		    sizeof(struct rx_sw_desc), &q->fl[0].phys_addr,
2471 		    &q->fl[0].desc, &q->fl[0].sdesc,
2472 		    &q->fl[0].desc_tag, &q->fl[0].desc_map,
2473 		    sc->rx_dmat, &q->fl[0].entry_tag)) != 0) {
2474 		printf("error %d from alloc ring fl0\n", ret);
2475 		goto err;
2476 	}
2477 
2478 	if ((ret = alloc_ring(sc, p->jumbo_size, sizeof(struct rx_desc),
2479 		    sizeof(struct rx_sw_desc), &q->fl[1].phys_addr,
2480 		    &q->fl[1].desc, &q->fl[1].sdesc,
2481 		    &q->fl[1].desc_tag, &q->fl[1].desc_map,
2482 		    sc->rx_jumbo_dmat, &q->fl[1].entry_tag)) != 0) {
2483 		printf("error %d from alloc ring fl1\n", ret);
2484 		goto err;
2485 	}
2486 
2487 	if ((ret = alloc_ring(sc, p->rspq_size, sizeof(struct rsp_desc), 0,
2488 		    &q->rspq.phys_addr, &q->rspq.desc, NULL,
2489 		    &q->rspq.desc_tag, &q->rspq.desc_map,
2490 		    NULL, NULL)) != 0) {
2491 		printf("error %d from alloc ring rspq\n", ret);
2492 		goto err;
2493 	}
2494 
2495 	snprintf(q->rspq.lockbuf, RSPQ_NAME_LEN, "t3 rspq lock %d:%d",
2496 	    device_get_unit(sc->dev), irq_vec_idx);
2497 	MTX_INIT(&q->rspq.lock, q->rspq.lockbuf, NULL, MTX_DEF);
2498 
2499 	for (i = 0; i < ntxq; ++i) {
2500 		size_t sz = i == TXQ_CTRL ? 0 : sizeof(struct tx_sw_desc);
2501 
2502 		if ((ret = alloc_ring(sc, p->txq_size[i],
2503 			    sizeof(struct tx_desc), sz,
2504 			    &q->txq[i].phys_addr, &q->txq[i].desc,
2505 			    &q->txq[i].sdesc, &q->txq[i].desc_tag,
2506 			    &q->txq[i].desc_map,
2507 			    sc->tx_dmat, &q->txq[i].entry_tag)) != 0) {
2508 			printf("error %d from alloc ring tx %i\n", ret, i);
2509 			goto err;
2510 		}
2511 		mbufq_init(&q->txq[i].sendq, INT_MAX);
2512 		q->txq[i].gen = 1;
2513 		q->txq[i].size = p->txq_size[i];
2514 	}
2515 
2516 #ifdef TCP_OFFLOAD
2517 	TASK_INIT(&q->txq[TXQ_OFLD].qresume_task, 0, restart_offloadq, q);
2518 #endif
2519 	TASK_INIT(&q->txq[TXQ_CTRL].qresume_task, 0, restart_ctrlq, q);
2520 	TASK_INIT(&q->txq[TXQ_ETH].qreclaim_task, 0, sge_txq_reclaim_handler, q);
2521 	TASK_INIT(&q->txq[TXQ_OFLD].qreclaim_task, 0, sge_txq_reclaim_handler, q);
2522 
2523 	q->fl[0].gen = q->fl[1].gen = 1;
2524 	q->fl[0].size = p->fl_size;
2525 	q->fl[1].size = p->jumbo_size;
2526 
2527 	q->rspq.gen = 1;
2528 	q->rspq.cidx = 0;
2529 	q->rspq.size = p->rspq_size;
2530 
2531 	q->txq[TXQ_ETH].stop_thres = nports *
2532 	    flits_to_desc(sgl_len(TX_MAX_SEGS + 1) + 3);
2533 
2534 	q->fl[0].buf_size = MCLBYTES;
2535 	q->fl[0].zone = zone_pack;
2536 	q->fl[0].type = EXT_PACKET;
2537 
2538 	if (p->jumbo_buf_size ==  MJUM16BYTES) {
2539 		q->fl[1].zone = zone_jumbo16;
2540 		q->fl[1].type = EXT_JUMBO16;
2541 	} else if (p->jumbo_buf_size ==  MJUM9BYTES) {
2542 		q->fl[1].zone = zone_jumbo9;
2543 		q->fl[1].type = EXT_JUMBO9;
2544 	} else if (p->jumbo_buf_size ==  MJUMPAGESIZE) {
2545 		q->fl[1].zone = zone_jumbop;
2546 		q->fl[1].type = EXT_JUMBOP;
2547 	} else {
2548 		KASSERT(0, ("can't deal with jumbo_buf_size %d.", p->jumbo_buf_size));
2549 		ret = EDOOFUS;
2550 		goto err;
2551 	}
2552 	q->fl[1].buf_size = p->jumbo_buf_size;
2553 
2554 	/* Allocate and setup the lro_ctrl structure */
2555 	q->lro.enabled = !!(pi->ifp->if_capenable & IFCAP_LRO);
2556 #if defined(INET6) || defined(INET)
2557 	ret = tcp_lro_init(&q->lro.ctrl);
2558 	if (ret) {
2559 		printf("error %d from tcp_lro_init\n", ret);
2560 		goto err;
2561 	}
2562 #endif
2563 	q->lro.ctrl.ifp = pi->ifp;
2564 
2565 	mtx_lock_spin(&sc->sge.reg_lock);
2566 	ret = -t3_sge_init_rspcntxt(sc, q->rspq.cntxt_id, irq_vec_idx,
2567 				   q->rspq.phys_addr, q->rspq.size,
2568 				   q->fl[0].buf_size, 1, 0);
2569 	if (ret) {
2570 		printf("error %d from t3_sge_init_rspcntxt\n", ret);
2571 		goto err_unlock;
2572 	}
2573 
2574 	for (i = 0; i < SGE_RXQ_PER_SET; ++i) {
2575 		ret = -t3_sge_init_flcntxt(sc, q->fl[i].cntxt_id, 0,
2576 					  q->fl[i].phys_addr, q->fl[i].size,
2577 					  q->fl[i].buf_size, p->cong_thres, 1,
2578 					  0);
2579 		if (ret) {
2580 			printf("error %d from t3_sge_init_flcntxt for index i=%d\n", ret, i);
2581 			goto err_unlock;
2582 		}
2583 	}
2584 
2585 	ret = -t3_sge_init_ecntxt(sc, q->txq[TXQ_ETH].cntxt_id, USE_GTS,
2586 				 SGE_CNTXT_ETH, id, q->txq[TXQ_ETH].phys_addr,
2587 				 q->txq[TXQ_ETH].size, q->txq[TXQ_ETH].token,
2588 				 1, 0);
2589 	if (ret) {
2590 		printf("error %d from t3_sge_init_ecntxt\n", ret);
2591 		goto err_unlock;
2592 	}
2593 
2594 	if (ntxq > 1) {
2595 		ret = -t3_sge_init_ecntxt(sc, q->txq[TXQ_OFLD].cntxt_id,
2596 					 USE_GTS, SGE_CNTXT_OFLD, id,
2597 					 q->txq[TXQ_OFLD].phys_addr,
2598 					 q->txq[TXQ_OFLD].size, 0, 1, 0);
2599 		if (ret) {
2600 			printf("error %d from t3_sge_init_ecntxt\n", ret);
2601 			goto err_unlock;
2602 		}
2603 	}
2604 
2605 	if (ntxq > 2) {
2606 		ret = -t3_sge_init_ecntxt(sc, q->txq[TXQ_CTRL].cntxt_id, 0,
2607 					 SGE_CNTXT_CTRL, id,
2608 					 q->txq[TXQ_CTRL].phys_addr,
2609 					 q->txq[TXQ_CTRL].size,
2610 					 q->txq[TXQ_CTRL].token, 1, 0);
2611 		if (ret) {
2612 			printf("error %d from t3_sge_init_ecntxt\n", ret);
2613 			goto err_unlock;
2614 		}
2615 	}
2616 
2617 	mtx_unlock_spin(&sc->sge.reg_lock);
2618 	t3_update_qset_coalesce(q, p);
2619 
2620 	refill_fl(sc, &q->fl[0], q->fl[0].size);
2621 	refill_fl(sc, &q->fl[1], q->fl[1].size);
2622 	refill_rspq(sc, &q->rspq, q->rspq.size - 1);
2623 
2624 	t3_write_reg(sc, A_SG_GTS, V_RSPQ(q->rspq.cntxt_id) |
2625 		     V_NEWTIMER(q->rspq.holdoff_tmr));
2626 
2627 	return (0);
2628 
2629 err_unlock:
2630 	mtx_unlock_spin(&sc->sge.reg_lock);
2631 err:
2632 	TXQ_LOCK(q);
2633 	t3_free_qset(sc, q);
2634 
2635 	return (ret);
2636 }
2637 
2638 /*
2639  * Remove CPL_RX_PKT headers from the mbuf and reduce it to a regular mbuf with
2640  * ethernet data.  Hardware assistance with various checksums and any vlan tag
2641  * will also be taken into account here.
2642  */
2643 void
2644 t3_rx_eth(struct adapter *adap, struct mbuf *m, int ethpad)
2645 {
2646 	struct cpl_rx_pkt *cpl = (struct cpl_rx_pkt *)(mtod(m, uint8_t *) + ethpad);
2647 	struct port_info *pi = &adap->port[adap->rxpkt_map[cpl->iff]];
2648 	struct ifnet *ifp = pi->ifp;
2649 
2650 	if (cpl->vlan_valid) {
2651 		m->m_pkthdr.ether_vtag = ntohs(cpl->vlan);
2652 		m->m_flags |= M_VLANTAG;
2653 	}
2654 
2655 	m->m_pkthdr.rcvif = ifp;
2656 	/*
2657 	 * adjust after conversion to mbuf chain
2658 	 */
2659 	m->m_pkthdr.len -= (sizeof(*cpl) + ethpad);
2660 	m->m_len -= (sizeof(*cpl) + ethpad);
2661 	m->m_data += (sizeof(*cpl) + ethpad);
2662 
2663 	if (!cpl->fragment && cpl->csum_valid && cpl->csum == 0xffff) {
2664 		struct ether_header *eh = mtod(m, void *);
2665 		uint16_t eh_type;
2666 
2667 		if (eh->ether_type == htons(ETHERTYPE_VLAN)) {
2668 			struct ether_vlan_header *evh = mtod(m, void *);
2669 
2670 			eh_type = evh->evl_proto;
2671 		} else
2672 			eh_type = eh->ether_type;
2673 
2674 		if (ifp->if_capenable & IFCAP_RXCSUM &&
2675 		    eh_type == htons(ETHERTYPE_IP)) {
2676 			m->m_pkthdr.csum_flags = (CSUM_IP_CHECKED |
2677 			    CSUM_IP_VALID | CSUM_DATA_VALID | CSUM_PSEUDO_HDR);
2678 			m->m_pkthdr.csum_data = 0xffff;
2679 		} else if (ifp->if_capenable & IFCAP_RXCSUM_IPV6 &&
2680 		    eh_type == htons(ETHERTYPE_IPV6)) {
2681 			m->m_pkthdr.csum_flags = (CSUM_DATA_VALID_IPV6 |
2682 			    CSUM_PSEUDO_HDR);
2683 			m->m_pkthdr.csum_data = 0xffff;
2684 		}
2685 	}
2686 }
2687 
2688 /**
2689  *	get_packet - return the next ingress packet buffer from a free list
2690  *	@adap: the adapter that received the packet
2691  *	@drop_thres: # of remaining buffers before we start dropping packets
2692  *	@qs: the qset that the SGE free list holding the packet belongs to
2693  *      @mh: the mbuf header, contains a pointer to the head and tail of the mbuf chain
2694  *      @r: response descriptor
2695  *
2696  *	Get the next packet from a free list and complete setup of the
2697  *	sk_buff.  If the packet is small we make a copy and recycle the
2698  *	original buffer, otherwise we use the original buffer itself.  If a
2699  *	positive drop threshold is supplied packets are dropped and their
2700  *	buffers recycled if (a) the number of remaining buffers is under the
2701  *	threshold and the packet is too big to copy, or (b) the packet should
2702  *	be copied but there is no memory for the copy.
2703  */
2704 static int
2705 get_packet(adapter_t *adap, unsigned int drop_thres, struct sge_qset *qs,
2706     struct t3_mbuf_hdr *mh, struct rsp_desc *r)
2707 {
2708 
2709 	unsigned int len_cq =  ntohl(r->len_cq);
2710 	struct sge_fl *fl = (len_cq & F_RSPD_FLQ) ? &qs->fl[1] : &qs->fl[0];
2711 	int mask, cidx = fl->cidx;
2712 	struct rx_sw_desc *sd = &fl->sdesc[cidx];
2713 	uint32_t len = G_RSPD_LEN(len_cq);
2714 	uint32_t flags = M_EXT;
2715 	uint8_t sopeop = G_RSPD_SOP_EOP(ntohl(r->flags));
2716 	caddr_t cl;
2717 	struct mbuf *m;
2718 	int ret = 0;
2719 
2720 	mask = fl->size - 1;
2721 	prefetch(fl->sdesc[(cidx + 1) & mask].m);
2722 	prefetch(fl->sdesc[(cidx + 2) & mask].m);
2723 	prefetch(fl->sdesc[(cidx + 1) & mask].rxsd_cl);
2724 	prefetch(fl->sdesc[(cidx + 2) & mask].rxsd_cl);
2725 
2726 	fl->credits--;
2727 	bus_dmamap_sync(fl->entry_tag, sd->map, BUS_DMASYNC_POSTREAD);
2728 
2729 	if (recycle_enable && len <= SGE_RX_COPY_THRES &&
2730 	    sopeop == RSPQ_SOP_EOP) {
2731 		if ((m = m_gethdr(M_NOWAIT, MT_DATA)) == NULL)
2732 			goto skip_recycle;
2733 		cl = mtod(m, void *);
2734 		memcpy(cl, sd->rxsd_cl, len);
2735 		recycle_rx_buf(adap, fl, fl->cidx);
2736 		m->m_pkthdr.len = m->m_len = len;
2737 		m->m_flags = 0;
2738 		mh->mh_head = mh->mh_tail = m;
2739 		ret = 1;
2740 		goto done;
2741 	} else {
2742 	skip_recycle:
2743 		bus_dmamap_unload(fl->entry_tag, sd->map);
2744 		cl = sd->rxsd_cl;
2745 		m = sd->m;
2746 
2747 		if ((sopeop == RSPQ_SOP_EOP) ||
2748 		    (sopeop == RSPQ_SOP))
2749 			flags |= M_PKTHDR;
2750 		m_init(m, M_NOWAIT, MT_DATA, flags);
2751 		if (fl->zone == zone_pack) {
2752 			/*
2753 			 * restore clobbered data pointer
2754 			 */
2755 			m->m_data = m->m_ext.ext_buf;
2756 		} else {
2757 			m_cljset(m, cl, fl->type);
2758 		}
2759 		m->m_len = len;
2760 	}
2761 	switch(sopeop) {
2762 	case RSPQ_SOP_EOP:
2763 		ret = 1;
2764 		/* FALLTHROUGH */
2765 	case RSPQ_SOP:
2766 		mh->mh_head = mh->mh_tail = m;
2767 		m->m_pkthdr.len = len;
2768 		break;
2769 	case RSPQ_EOP:
2770 		ret = 1;
2771 		/* FALLTHROUGH */
2772 	case RSPQ_NSOP_NEOP:
2773 		if (mh->mh_tail == NULL) {
2774 			log(LOG_ERR, "discarding intermediate descriptor entry\n");
2775 			m_freem(m);
2776 			m = NULL;
2777 			break;
2778 		}
2779 		mh->mh_tail->m_next = m;
2780 		mh->mh_tail = m;
2781 		mh->mh_head->m_pkthdr.len += len;
2782 		break;
2783 	}
2784 	if (cxgb_debug && m != NULL)
2785 		printf("len=%d pktlen=%d\n", m->m_len, m->m_pkthdr.len);
2786 done:
2787 	if (++fl->cidx == fl->size)
2788 		fl->cidx = 0;
2789 
2790 	return (ret);
2791 }
2792 
2793 /**
2794  *	handle_rsp_cntrl_info - handles control information in a response
2795  *	@qs: the queue set corresponding to the response
2796  *	@flags: the response control flags
2797  *
2798  *	Handles the control information of an SGE response, such as GTS
2799  *	indications and completion credits for the queue set's Tx queues.
2800  *	HW coalesces credits, we don't do any extra SW coalescing.
2801  */
2802 static __inline void
2803 handle_rsp_cntrl_info(struct sge_qset *qs, uint32_t flags)
2804 {
2805 	unsigned int credits;
2806 
2807 #if USE_GTS
2808 	if (flags & F_RSPD_TXQ0_GTS)
2809 		clear_bit(TXQ_RUNNING, &qs->txq[TXQ_ETH].flags);
2810 #endif
2811 	credits = G_RSPD_TXQ0_CR(flags);
2812 	if (credits)
2813 		qs->txq[TXQ_ETH].processed += credits;
2814 
2815 	credits = G_RSPD_TXQ2_CR(flags);
2816 	if (credits)
2817 		qs->txq[TXQ_CTRL].processed += credits;
2818 
2819 # if USE_GTS
2820 	if (flags & F_RSPD_TXQ1_GTS)
2821 		clear_bit(TXQ_RUNNING, &qs->txq[TXQ_OFLD].flags);
2822 # endif
2823 	credits = G_RSPD_TXQ1_CR(flags);
2824 	if (credits)
2825 		qs->txq[TXQ_OFLD].processed += credits;
2826 
2827 }
2828 
2829 static void
2830 check_ring_db(adapter_t *adap, struct sge_qset *qs,
2831     unsigned int sleeping)
2832 {
2833 	;
2834 }
2835 
2836 /**
2837  *	process_responses - process responses from an SGE response queue
2838  *	@adap: the adapter
2839  *	@qs: the queue set to which the response queue belongs
2840  *	@budget: how many responses can be processed in this round
2841  *
2842  *	Process responses from an SGE response queue up to the supplied budget.
2843  *	Responses include received packets as well as credits and other events
2844  *	for the queues that belong to the response queue's queue set.
2845  *	A negative budget is effectively unlimited.
2846  *
2847  *	Additionally choose the interrupt holdoff time for the next interrupt
2848  *	on this queue.  If the system is under memory shortage use a fairly
2849  *	long delay to help recovery.
2850  */
2851 static int
2852 process_responses(adapter_t *adap, struct sge_qset *qs, int budget)
2853 {
2854 	struct sge_rspq *rspq = &qs->rspq;
2855 	struct rsp_desc *r = &rspq->desc[rspq->cidx];
2856 	int budget_left = budget;
2857 	unsigned int sleeping = 0;
2858 #if defined(INET6) || defined(INET)
2859 	int lro_enabled = qs->lro.enabled;
2860 	int skip_lro;
2861 	struct lro_ctrl *lro_ctrl = &qs->lro.ctrl;
2862 #endif
2863 	struct t3_mbuf_hdr *mh = &rspq->rspq_mh;
2864 #ifdef DEBUG
2865 	static int last_holdoff = 0;
2866 	if (cxgb_debug && rspq->holdoff_tmr != last_holdoff) {
2867 		printf("next_holdoff=%d\n", rspq->holdoff_tmr);
2868 		last_holdoff = rspq->holdoff_tmr;
2869 	}
2870 #endif
2871 	rspq->next_holdoff = rspq->holdoff_tmr;
2872 
2873 	while (__predict_true(budget_left && is_new_response(r, rspq))) {
2874 		int eth, eop = 0, ethpad = 0;
2875 		uint32_t flags = ntohl(r->flags);
2876 		uint32_t rss_hash = be32toh(r->rss_hdr.rss_hash_val);
2877 		uint8_t opcode = r->rss_hdr.opcode;
2878 
2879 		eth = (opcode == CPL_RX_PKT);
2880 
2881 		if (__predict_false(flags & F_RSPD_ASYNC_NOTIF)) {
2882 			struct mbuf *m;
2883 
2884 			if (cxgb_debug)
2885 				printf("async notification\n");
2886 
2887 			if (mh->mh_head == NULL) {
2888 				mh->mh_head = m_gethdr(M_NOWAIT, MT_DATA);
2889 				m = mh->mh_head;
2890 			} else {
2891 				m = m_gethdr(M_NOWAIT, MT_DATA);
2892 			}
2893 			if (m == NULL)
2894 				goto no_mem;
2895 
2896                         memcpy(mtod(m, char *), r, AN_PKT_SIZE);
2897 			m->m_len = m->m_pkthdr.len = AN_PKT_SIZE;
2898                         *mtod(m, uint8_t *) = CPL_ASYNC_NOTIF;
2899 			opcode = CPL_ASYNC_NOTIF;
2900 			eop = 1;
2901                         rspq->async_notif++;
2902 			goto skip;
2903 		} else if  (flags & F_RSPD_IMM_DATA_VALID) {
2904 			struct mbuf *m = m_gethdr(M_NOWAIT, MT_DATA);
2905 
2906 			if (m == NULL) {
2907 		no_mem:
2908 				rspq->next_holdoff = NOMEM_INTR_DELAY;
2909 				budget_left--;
2910 				break;
2911 			}
2912 			if (mh->mh_head == NULL)
2913 				mh->mh_head = m;
2914                         else
2915 				mh->mh_tail->m_next = m;
2916 			mh->mh_tail = m;
2917 
2918 			get_imm_packet(adap, r, m);
2919 			mh->mh_head->m_pkthdr.len += m->m_len;
2920 			eop = 1;
2921 			rspq->imm_data++;
2922 		} else if (r->len_cq) {
2923 			int drop_thresh = eth ? SGE_RX_DROP_THRES : 0;
2924 
2925 			eop = get_packet(adap, drop_thresh, qs, mh, r);
2926 			if (eop) {
2927 				if (r->rss_hdr.hash_type && !adap->timestamp) {
2928 					M_HASHTYPE_SET(mh->mh_head,
2929 					    M_HASHTYPE_OPAQUE_HASH);
2930 					mh->mh_head->m_pkthdr.flowid = rss_hash;
2931 				}
2932 			}
2933 
2934 			ethpad = 2;
2935 		} else {
2936 			rspq->pure_rsps++;
2937 		}
2938 	skip:
2939 		if (flags & RSPD_CTRL_MASK) {
2940 			sleeping |= flags & RSPD_GTS_MASK;
2941 			handle_rsp_cntrl_info(qs, flags);
2942 		}
2943 
2944 		if (!eth && eop) {
2945 			rspq->offload_pkts++;
2946 #ifdef TCP_OFFLOAD
2947 			adap->cpl_handler[opcode](qs, r, mh->mh_head);
2948 #else
2949 			m_freem(mh->mh_head);
2950 #endif
2951 			mh->mh_head = NULL;
2952 		} else if (eth && eop) {
2953 			struct mbuf *m = mh->mh_head;
2954 
2955 			t3_rx_eth(adap, m, ethpad);
2956 
2957 			/*
2958 			 * The T304 sends incoming packets on any qset.  If LRO
2959 			 * is also enabled, we could end up sending packet up
2960 			 * lro_ctrl->ifp's input.  That is incorrect.
2961 			 *
2962 			 * The mbuf's rcvif was derived from the cpl header and
2963 			 * is accurate.  Skip LRO and just use that.
2964 			 */
2965 #if defined(INET6) || defined(INET)
2966 			skip_lro = __predict_false(qs->port->ifp != m->m_pkthdr.rcvif);
2967 
2968 			if (lro_enabled && lro_ctrl->lro_cnt && !skip_lro
2969 			    && (tcp_lro_rx(lro_ctrl, m, 0) == 0)
2970 			    ) {
2971 				/* successfully queue'd for LRO */
2972 			} else
2973 #endif
2974 			{
2975 				/*
2976 				 * LRO not enabled, packet unsuitable for LRO,
2977 				 * or unable to queue.  Pass it up right now in
2978 				 * either case.
2979 				 */
2980 				struct ifnet *ifp = m->m_pkthdr.rcvif;
2981 				(*ifp->if_input)(ifp, m);
2982 			}
2983 			mh->mh_head = NULL;
2984 
2985 		}
2986 
2987 		r++;
2988 		if (__predict_false(++rspq->cidx == rspq->size)) {
2989 			rspq->cidx = 0;
2990 			rspq->gen ^= 1;
2991 			r = rspq->desc;
2992 		}
2993 
2994 		if (++rspq->credits >= 64) {
2995 			refill_rspq(adap, rspq, rspq->credits);
2996 			rspq->credits = 0;
2997 		}
2998 		__refill_fl_lt(adap, &qs->fl[0], 32);
2999 		__refill_fl_lt(adap, &qs->fl[1], 32);
3000 		--budget_left;
3001 	}
3002 
3003 #if defined(INET6) || defined(INET)
3004 	/* Flush LRO */
3005 	tcp_lro_flush_all(lro_ctrl);
3006 #endif
3007 
3008 	if (sleeping)
3009 		check_ring_db(adap, qs, sleeping);
3010 
3011 	mb();  /* commit Tx queue processed updates */
3012 	if (__predict_false(qs->txq_stopped > 1))
3013 		restart_tx(qs);
3014 
3015 	__refill_fl_lt(adap, &qs->fl[0], 512);
3016 	__refill_fl_lt(adap, &qs->fl[1], 512);
3017 	budget -= budget_left;
3018 	return (budget);
3019 }
3020 
3021 /*
3022  * A helper function that processes responses and issues GTS.
3023  */
3024 static __inline int
3025 process_responses_gts(adapter_t *adap, struct sge_rspq *rq)
3026 {
3027 	int work;
3028 	static int last_holdoff = 0;
3029 
3030 	work = process_responses(adap, rspq_to_qset(rq), -1);
3031 
3032 	if (cxgb_debug && (rq->next_holdoff != last_holdoff)) {
3033 		printf("next_holdoff=%d\n", rq->next_holdoff);
3034 		last_holdoff = rq->next_holdoff;
3035 	}
3036 	t3_write_reg(adap, A_SG_GTS, V_RSPQ(rq->cntxt_id) |
3037 	    V_NEWTIMER(rq->next_holdoff) | V_NEWINDEX(rq->cidx));
3038 
3039 	return (work);
3040 }
3041 
3042 #ifdef DEBUGNET
3043 int
3044 cxgb_debugnet_poll_rx(adapter_t *adap, struct sge_qset *qs)
3045 {
3046 
3047 	return (process_responses_gts(adap, &qs->rspq));
3048 }
3049 #endif
3050 
3051 /*
3052  * Interrupt handler for legacy INTx interrupts for T3B-based cards.
3053  * Handles data events from SGE response queues as well as error and other
3054  * async events as they all use the same interrupt pin.  We use one SGE
3055  * response queue per port in this mode and protect all response queues with
3056  * queue 0's lock.
3057  */
3058 void
3059 t3b_intr(void *data)
3060 {
3061 	uint32_t i, map;
3062 	adapter_t *adap = data;
3063 	struct sge_rspq *q0 = &adap->sge.qs[0].rspq;
3064 
3065 	t3_write_reg(adap, A_PL_CLI, 0);
3066 	map = t3_read_reg(adap, A_SG_DATA_INTR);
3067 
3068 	if (!map)
3069 		return;
3070 
3071 	if (__predict_false(map & F_ERRINTR)) {
3072 		t3_write_reg(adap, A_PL_INT_ENABLE0, 0);
3073 		(void) t3_read_reg(adap, A_PL_INT_ENABLE0);
3074 		taskqueue_enqueue(adap->tq, &adap->slow_intr_task);
3075 	}
3076 
3077 	mtx_lock(&q0->lock);
3078 	for_each_port(adap, i)
3079 	    if (map & (1 << i))
3080 			process_responses_gts(adap, &adap->sge.qs[i].rspq);
3081 	mtx_unlock(&q0->lock);
3082 }
3083 
3084 /*
3085  * The MSI interrupt handler.  This needs to handle data events from SGE
3086  * response queues as well as error and other async events as they all use
3087  * the same MSI vector.  We use one SGE response queue per port in this mode
3088  * and protect all response queues with queue 0's lock.
3089  */
3090 void
3091 t3_intr_msi(void *data)
3092 {
3093 	adapter_t *adap = data;
3094 	struct sge_rspq *q0 = &adap->sge.qs[0].rspq;
3095 	int i, new_packets = 0;
3096 
3097 	mtx_lock(&q0->lock);
3098 
3099 	for_each_port(adap, i)
3100 	    if (process_responses_gts(adap, &adap->sge.qs[i].rspq))
3101 		    new_packets = 1;
3102 	mtx_unlock(&q0->lock);
3103 	if (new_packets == 0) {
3104 		t3_write_reg(adap, A_PL_INT_ENABLE0, 0);
3105 		(void) t3_read_reg(adap, A_PL_INT_ENABLE0);
3106 		taskqueue_enqueue(adap->tq, &adap->slow_intr_task);
3107 	}
3108 }
3109 
3110 void
3111 t3_intr_msix(void *data)
3112 {
3113 	struct sge_qset *qs = data;
3114 	adapter_t *adap = qs->port->adapter;
3115 	struct sge_rspq *rspq = &qs->rspq;
3116 
3117 	if (process_responses_gts(adap, rspq) == 0)
3118 		rspq->unhandled_irqs++;
3119 }
3120 
3121 #define QDUMP_SBUF_SIZE		32 * 400
3122 static int
3123 t3_dump_rspq(SYSCTL_HANDLER_ARGS)
3124 {
3125 	struct sge_rspq *rspq;
3126 	struct sge_qset *qs;
3127 	int i, err, dump_end, idx;
3128 	struct sbuf *sb;
3129 	struct rsp_desc *rspd;
3130 	uint32_t data[4];
3131 
3132 	rspq = arg1;
3133 	qs = rspq_to_qset(rspq);
3134 	if (rspq->rspq_dump_count == 0)
3135 		return (0);
3136 	if (rspq->rspq_dump_count > RSPQ_Q_SIZE) {
3137 		log(LOG_WARNING,
3138 		    "dump count is too large %d\n", rspq->rspq_dump_count);
3139 		rspq->rspq_dump_count = 0;
3140 		return (EINVAL);
3141 	}
3142 	if (rspq->rspq_dump_start > (RSPQ_Q_SIZE-1)) {
3143 		log(LOG_WARNING,
3144 		    "dump start of %d is greater than queue size\n",
3145 		    rspq->rspq_dump_start);
3146 		rspq->rspq_dump_start = 0;
3147 		return (EINVAL);
3148 	}
3149 	err = t3_sge_read_rspq(qs->port->adapter, rspq->cntxt_id, data);
3150 	if (err)
3151 		return (err);
3152 	err = sysctl_wire_old_buffer(req, 0);
3153 	if (err)
3154 		return (err);
3155 	sb = sbuf_new_for_sysctl(NULL, NULL, QDUMP_SBUF_SIZE, req);
3156 
3157 	sbuf_printf(sb, " \n index=%u size=%u MSI-X/RspQ=%u intr enable=%u intr armed=%u\n",
3158 	    (data[0] & 0xffff), data[0] >> 16, ((data[2] >> 20) & 0x3f),
3159 	    ((data[2] >> 26) & 1), ((data[2] >> 27) & 1));
3160 	sbuf_printf(sb, " generation=%u CQ mode=%u FL threshold=%u\n",
3161 	    ((data[2] >> 28) & 1), ((data[2] >> 31) & 1), data[3]);
3162 
3163 	sbuf_printf(sb, " start=%d -> end=%d\n", rspq->rspq_dump_start,
3164 	    (rspq->rspq_dump_start + rspq->rspq_dump_count) & (RSPQ_Q_SIZE-1));
3165 
3166 	dump_end = rspq->rspq_dump_start + rspq->rspq_dump_count;
3167 	for (i = rspq->rspq_dump_start; i < dump_end; i++) {
3168 		idx = i & (RSPQ_Q_SIZE-1);
3169 
3170 		rspd = &rspq->desc[idx];
3171 		sbuf_printf(sb, "\tidx=%04d opcode=%02x cpu_idx=%x hash_type=%x cq_idx=%x\n",
3172 		    idx, rspd->rss_hdr.opcode, rspd->rss_hdr.cpu_idx,
3173 		    rspd->rss_hdr.hash_type, be16toh(rspd->rss_hdr.cq_idx));
3174 		sbuf_printf(sb, "\trss_hash_val=%x flags=%08x len_cq=%x intr_gen=%x\n",
3175 		    rspd->rss_hdr.rss_hash_val, be32toh(rspd->flags),
3176 		    be32toh(rspd->len_cq), rspd->intr_gen);
3177 	}
3178 
3179 	err = sbuf_finish(sb);
3180 	sbuf_delete(sb);
3181 	return (err);
3182 }
3183 
3184 static int
3185 t3_dump_txq_eth(SYSCTL_HANDLER_ARGS)
3186 {
3187 	struct sge_txq *txq;
3188 	struct sge_qset *qs;
3189 	int i, j, err, dump_end;
3190 	struct sbuf *sb;
3191 	struct tx_desc *txd;
3192 	uint32_t *WR, wr_hi, wr_lo, gen;
3193 	uint32_t data[4];
3194 
3195 	txq = arg1;
3196 	qs = txq_to_qset(txq, TXQ_ETH);
3197 	if (txq->txq_dump_count == 0) {
3198 		return (0);
3199 	}
3200 	if (txq->txq_dump_count > TX_ETH_Q_SIZE) {
3201 		log(LOG_WARNING,
3202 		    "dump count is too large %d\n", txq->txq_dump_count);
3203 		txq->txq_dump_count = 1;
3204 		return (EINVAL);
3205 	}
3206 	if (txq->txq_dump_start > (TX_ETH_Q_SIZE-1)) {
3207 		log(LOG_WARNING,
3208 		    "dump start of %d is greater than queue size\n",
3209 		    txq->txq_dump_start);
3210 		txq->txq_dump_start = 0;
3211 		return (EINVAL);
3212 	}
3213 	err = t3_sge_read_ecntxt(qs->port->adapter, qs->rspq.cntxt_id, data);
3214 	if (err)
3215 		return (err);
3216 	err = sysctl_wire_old_buffer(req, 0);
3217 	if (err)
3218 		return (err);
3219 	sb = sbuf_new_for_sysctl(NULL, NULL, QDUMP_SBUF_SIZE, req);
3220 
3221 	sbuf_printf(sb, " \n credits=%u GTS=%u index=%u size=%u rspq#=%u cmdq#=%u\n",
3222 	    (data[0] & 0x7fff), ((data[0] >> 15) & 1), (data[0] >> 16),
3223 	    (data[1] & 0xffff), ((data[3] >> 4) & 7), ((data[3] >> 7) & 1));
3224 	sbuf_printf(sb, " TUN=%u TOE=%u generation%u uP token=%u valid=%u\n",
3225 	    ((data[3] >> 8) & 1), ((data[3] >> 9) & 1), ((data[3] >> 10) & 1),
3226 	    ((data[3] >> 11) & 0xfffff), ((data[3] >> 31) & 1));
3227 	sbuf_printf(sb, " qid=%d start=%d -> end=%d\n", qs->idx,
3228 	    txq->txq_dump_start,
3229 	    (txq->txq_dump_start + txq->txq_dump_count) & (TX_ETH_Q_SIZE-1));
3230 
3231 	dump_end = txq->txq_dump_start + txq->txq_dump_count;
3232 	for (i = txq->txq_dump_start; i < dump_end; i++) {
3233 		txd = &txq->desc[i & (TX_ETH_Q_SIZE-1)];
3234 		WR = (uint32_t *)txd->flit;
3235 		wr_hi = ntohl(WR[0]);
3236 		wr_lo = ntohl(WR[1]);
3237 		gen = G_WR_GEN(wr_lo);
3238 
3239 		sbuf_printf(sb," wr_hi %08x wr_lo %08x gen %d\n",
3240 		    wr_hi, wr_lo, gen);
3241 		for (j = 2; j < 30; j += 4)
3242 			sbuf_printf(sb, "\t%08x %08x %08x %08x \n",
3243 			    WR[j], WR[j + 1], WR[j + 2], WR[j + 3]);
3244 
3245 	}
3246 	err = sbuf_finish(sb);
3247 	sbuf_delete(sb);
3248 	return (err);
3249 }
3250 
3251 static int
3252 t3_dump_txq_ctrl(SYSCTL_HANDLER_ARGS)
3253 {
3254 	struct sge_txq *txq;
3255 	struct sge_qset *qs;
3256 	int i, j, err, dump_end;
3257 	struct sbuf *sb;
3258 	struct tx_desc *txd;
3259 	uint32_t *WR, wr_hi, wr_lo, gen;
3260 
3261 	txq = arg1;
3262 	qs = txq_to_qset(txq, TXQ_CTRL);
3263 	if (txq->txq_dump_count == 0) {
3264 		return (0);
3265 	}
3266 	if (txq->txq_dump_count > 256) {
3267 		log(LOG_WARNING,
3268 		    "dump count is too large %d\n", txq->txq_dump_count);
3269 		txq->txq_dump_count = 1;
3270 		return (EINVAL);
3271 	}
3272 	if (txq->txq_dump_start > 255) {
3273 		log(LOG_WARNING,
3274 		    "dump start of %d is greater than queue size\n",
3275 		    txq->txq_dump_start);
3276 		txq->txq_dump_start = 0;
3277 		return (EINVAL);
3278 	}
3279 
3280 	err = sysctl_wire_old_buffer(req, 0);
3281 	if (err != 0)
3282 		return (err);
3283 	sb = sbuf_new_for_sysctl(NULL, NULL, QDUMP_SBUF_SIZE, req);
3284 	sbuf_printf(sb, " qid=%d start=%d -> end=%d\n", qs->idx,
3285 	    txq->txq_dump_start,
3286 	    (txq->txq_dump_start + txq->txq_dump_count) & 255);
3287 
3288 	dump_end = txq->txq_dump_start + txq->txq_dump_count;
3289 	for (i = txq->txq_dump_start; i < dump_end; i++) {
3290 		txd = &txq->desc[i & (255)];
3291 		WR = (uint32_t *)txd->flit;
3292 		wr_hi = ntohl(WR[0]);
3293 		wr_lo = ntohl(WR[1]);
3294 		gen = G_WR_GEN(wr_lo);
3295 
3296 		sbuf_printf(sb," wr_hi %08x wr_lo %08x gen %d\n",
3297 		    wr_hi, wr_lo, gen);
3298 		for (j = 2; j < 30; j += 4)
3299 			sbuf_printf(sb, "\t%08x %08x %08x %08x \n",
3300 			    WR[j], WR[j + 1], WR[j + 2], WR[j + 3]);
3301 
3302 	}
3303 	err = sbuf_finish(sb);
3304 	sbuf_delete(sb);
3305 	return (err);
3306 }
3307 
3308 static int
3309 t3_set_coalesce_usecs(SYSCTL_HANDLER_ARGS)
3310 {
3311 	adapter_t *sc = arg1;
3312 	struct qset_params *qsp = &sc->params.sge.qset[0];
3313 	int coalesce_usecs;
3314 	struct sge_qset *qs;
3315 	int i, j, err, nqsets = 0;
3316 	struct mtx *lock;
3317 
3318 	if ((sc->flags & FULL_INIT_DONE) == 0)
3319 		return (ENXIO);
3320 
3321 	coalesce_usecs = qsp->coalesce_usecs;
3322         err = sysctl_handle_int(oidp, &coalesce_usecs, arg2, req);
3323 
3324 	if (err != 0) {
3325 		return (err);
3326 	}
3327 	if (coalesce_usecs == qsp->coalesce_usecs)
3328 		return (0);
3329 
3330 	for (i = 0; i < sc->params.nports; i++)
3331 		for (j = 0; j < sc->port[i].nqsets; j++)
3332 			nqsets++;
3333 
3334 	coalesce_usecs = max(1, coalesce_usecs);
3335 
3336 	for (i = 0; i < nqsets; i++) {
3337 		qs = &sc->sge.qs[i];
3338 		qsp = &sc->params.sge.qset[i];
3339 		qsp->coalesce_usecs = coalesce_usecs;
3340 
3341 		lock = (sc->flags & USING_MSIX) ? &qs->rspq.lock :
3342 			    &sc->sge.qs[0].rspq.lock;
3343 
3344 		mtx_lock(lock);
3345 		t3_update_qset_coalesce(qs, qsp);
3346 		t3_write_reg(sc, A_SG_GTS, V_RSPQ(qs->rspq.cntxt_id) |
3347 		    V_NEWTIMER(qs->rspq.holdoff_tmr));
3348 		mtx_unlock(lock);
3349 	}
3350 
3351 	return (0);
3352 }
3353 
3354 static int
3355 t3_pkt_timestamp(SYSCTL_HANDLER_ARGS)
3356 {
3357 	adapter_t *sc = arg1;
3358 	int rc, timestamp;
3359 
3360 	if ((sc->flags & FULL_INIT_DONE) == 0)
3361 		return (ENXIO);
3362 
3363 	timestamp = sc->timestamp;
3364 	rc = sysctl_handle_int(oidp, &timestamp, arg2, req);
3365 
3366 	if (rc != 0)
3367 		return (rc);
3368 
3369 	if (timestamp != sc->timestamp) {
3370 		t3_set_reg_field(sc, A_TP_PC_CONFIG2, F_ENABLERXPKTTMSTPRSS,
3371 		    timestamp ? F_ENABLERXPKTTMSTPRSS : 0);
3372 		sc->timestamp = timestamp;
3373 	}
3374 
3375 	return (0);
3376 }
3377 
3378 void
3379 t3_add_attach_sysctls(adapter_t *sc)
3380 {
3381 	struct sysctl_ctx_list *ctx;
3382 	struct sysctl_oid_list *children;
3383 
3384 	ctx = device_get_sysctl_ctx(sc->dev);
3385 	children = SYSCTL_CHILDREN(device_get_sysctl_tree(sc->dev));
3386 
3387 	/* random information */
3388 	SYSCTL_ADD_STRING(ctx, children, OID_AUTO,
3389 	    "firmware_version",
3390 	    CTLFLAG_RD, sc->fw_version,
3391 	    0, "firmware version");
3392 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
3393 	    "hw_revision",
3394 	    CTLFLAG_RD, &sc->params.rev,
3395 	    0, "chip model");
3396 	SYSCTL_ADD_STRING(ctx, children, OID_AUTO,
3397 	    "port_types",
3398 	    CTLFLAG_RD, sc->port_types,
3399 	    0, "type of ports");
3400 	SYSCTL_ADD_INT(ctx, children, OID_AUTO,
3401 	    "enable_debug",
3402 	    CTLFLAG_RW, &cxgb_debug,
3403 	    0, "enable verbose debugging output");
3404 	SYSCTL_ADD_UQUAD(ctx, children, OID_AUTO, "tunq_coalesce",
3405 	    CTLFLAG_RD, &sc->tunq_coalesce,
3406 	    "#tunneled packets freed");
3407 	SYSCTL_ADD_INT(ctx, children, OID_AUTO,
3408 	    "txq_overrun",
3409 	    CTLFLAG_RD, &txq_fills,
3410 	    0, "#times txq overrun");
3411 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
3412 	    "core_clock",
3413 	    CTLFLAG_RD, &sc->params.vpd.cclk,
3414 	    0, "core clock frequency (in KHz)");
3415 }
3416 
3417 
3418 static const char *rspq_name = "rspq";
3419 static const char *txq_names[] =
3420 {
3421 	"txq_eth",
3422 	"txq_ofld",
3423 	"txq_ctrl"
3424 };
3425 
3426 static int
3427 sysctl_handle_macstat(SYSCTL_HANDLER_ARGS)
3428 {
3429 	struct port_info *p = arg1;
3430 	uint64_t *parg;
3431 
3432 	if (!p)
3433 		return (EINVAL);
3434 
3435 	cxgb_refresh_stats(p);
3436 	parg = (uint64_t *) ((uint8_t *)&p->mac.stats + arg2);
3437 
3438 	return (sysctl_handle_64(oidp, parg, 0, req));
3439 }
3440 
3441 void
3442 t3_add_configured_sysctls(adapter_t *sc)
3443 {
3444 	struct sysctl_ctx_list *ctx;
3445 	struct sysctl_oid_list *children;
3446 	int i, j;
3447 
3448 	ctx = device_get_sysctl_ctx(sc->dev);
3449 	children = SYSCTL_CHILDREN(device_get_sysctl_tree(sc->dev));
3450 
3451 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO,
3452 	    "intr_coal",
3453 	    CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, sc,
3454 	    0, t3_set_coalesce_usecs,
3455 	    "I", "interrupt coalescing timer (us)");
3456 
3457 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO,
3458 	    "pkt_timestamp",
3459 	    CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, sc,
3460 	    0, t3_pkt_timestamp,
3461 	    "I", "provide packet timestamp instead of connection hash");
3462 
3463 	for (i = 0; i < sc->params.nports; i++) {
3464 		struct port_info *pi = &sc->port[i];
3465 		struct sysctl_oid *poid;
3466 		struct sysctl_oid_list *poidlist;
3467 		struct mac_stats *mstats = &pi->mac.stats;
3468 
3469 		snprintf(pi->namebuf, PORT_NAME_LEN, "port%d", i);
3470 		poid = SYSCTL_ADD_NODE(ctx, children, OID_AUTO,
3471 		    pi->namebuf, CTLFLAG_RD | CTLFLAG_MPSAFE, NULL,
3472 		    "port statistics");
3473 		poidlist = SYSCTL_CHILDREN(poid);
3474 		SYSCTL_ADD_UINT(ctx, poidlist, OID_AUTO,
3475 		    "nqsets", CTLFLAG_RD, &pi->nqsets,
3476 		    0, "#queue sets");
3477 
3478 		for (j = 0; j < pi->nqsets; j++) {
3479 			struct sge_qset *qs = &sc->sge.qs[pi->first_qset + j];
3480 			struct sysctl_oid *qspoid, *rspqpoid, *txqpoid,
3481 					  *ctrlqpoid, *lropoid;
3482 			struct sysctl_oid_list *qspoidlist, *rspqpoidlist,
3483 					       *txqpoidlist, *ctrlqpoidlist,
3484 					       *lropoidlist;
3485 			struct sge_txq *txq = &qs->txq[TXQ_ETH];
3486 
3487 			snprintf(qs->namebuf, QS_NAME_LEN, "qs%d", j);
3488 
3489 			qspoid = SYSCTL_ADD_NODE(ctx, poidlist, OID_AUTO,
3490 			    qs->namebuf, CTLFLAG_RD | CTLFLAG_MPSAFE, NULL,
3491 			    "qset statistics");
3492 			qspoidlist = SYSCTL_CHILDREN(qspoid);
3493 
3494 			SYSCTL_ADD_UINT(ctx, qspoidlist, OID_AUTO, "fl0_empty",
3495 					CTLFLAG_RD, &qs->fl[0].empty, 0,
3496 					"freelist #0 empty");
3497 			SYSCTL_ADD_UINT(ctx, qspoidlist, OID_AUTO, "fl1_empty",
3498 					CTLFLAG_RD, &qs->fl[1].empty, 0,
3499 					"freelist #1 empty");
3500 
3501 			rspqpoid = SYSCTL_ADD_NODE(ctx, qspoidlist, OID_AUTO,
3502 			    rspq_name, CTLFLAG_RD | CTLFLAG_MPSAFE, NULL,
3503 			    "rspq statistics");
3504 			rspqpoidlist = SYSCTL_CHILDREN(rspqpoid);
3505 
3506 			txqpoid = SYSCTL_ADD_NODE(ctx, qspoidlist, OID_AUTO,
3507 			    txq_names[0], CTLFLAG_RD | CTLFLAG_MPSAFE, NULL,
3508 			    "txq statistics");
3509 			txqpoidlist = SYSCTL_CHILDREN(txqpoid);
3510 
3511 			ctrlqpoid = SYSCTL_ADD_NODE(ctx, qspoidlist, OID_AUTO,
3512 			    txq_names[2], CTLFLAG_RD | CTLFLAG_MPSAFE, NULL,
3513 			    "ctrlq statistics");
3514 			ctrlqpoidlist = SYSCTL_CHILDREN(ctrlqpoid);
3515 
3516 			lropoid = SYSCTL_ADD_NODE(ctx, qspoidlist, OID_AUTO,
3517 			    "lro_stats", CTLFLAG_RD | CTLFLAG_MPSAFE, NULL,
3518 			    "LRO statistics");
3519 			lropoidlist = SYSCTL_CHILDREN(lropoid);
3520 
3521 			SYSCTL_ADD_UINT(ctx, rspqpoidlist, OID_AUTO, "size",
3522 			    CTLFLAG_RD, &qs->rspq.size,
3523 			    0, "#entries in response queue");
3524 			SYSCTL_ADD_UINT(ctx, rspqpoidlist, OID_AUTO, "cidx",
3525 			    CTLFLAG_RD, &qs->rspq.cidx,
3526 			    0, "consumer index");
3527 			SYSCTL_ADD_UINT(ctx, rspqpoidlist, OID_AUTO, "credits",
3528 			    CTLFLAG_RD, &qs->rspq.credits,
3529 			    0, "#credits");
3530 			SYSCTL_ADD_UINT(ctx, rspqpoidlist, OID_AUTO, "starved",
3531 			    CTLFLAG_RD, &qs->rspq.starved,
3532 			    0, "#times starved");
3533 			SYSCTL_ADD_UAUTO(ctx, rspqpoidlist, OID_AUTO, "phys_addr",
3534 			    CTLFLAG_RD, &qs->rspq.phys_addr,
3535 			    "physical_address_of the queue");
3536 			SYSCTL_ADD_UINT(ctx, rspqpoidlist, OID_AUTO, "dump_start",
3537 			    CTLFLAG_RW, &qs->rspq.rspq_dump_start,
3538 			    0, "start rspq dump entry");
3539 			SYSCTL_ADD_UINT(ctx, rspqpoidlist, OID_AUTO, "dump_count",
3540 			    CTLFLAG_RW, &qs->rspq.rspq_dump_count,
3541 			    0, "#rspq entries to dump");
3542 			SYSCTL_ADD_PROC(ctx, rspqpoidlist, OID_AUTO, "qdump",
3543 			    CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_NEEDGIANT,
3544 			    &qs->rspq, 0, t3_dump_rspq, "A",
3545 			    "dump of the response queue");
3546 
3547 			SYSCTL_ADD_UQUAD(ctx, txqpoidlist, OID_AUTO, "dropped",
3548 			    CTLFLAG_RD, &qs->txq[TXQ_ETH].txq_mr->br_drops,
3549 			    "#tunneled packets dropped");
3550 			SYSCTL_ADD_UINT(ctx, txqpoidlist, OID_AUTO, "sendqlen",
3551 			    CTLFLAG_RD, &qs->txq[TXQ_ETH].sendq.mq_len,
3552 			    0, "#tunneled packets waiting to be sent");
3553 #if 0
3554 			SYSCTL_ADD_UINT(ctx, txqpoidlist, OID_AUTO, "queue_pidx",
3555 			    CTLFLAG_RD, (uint32_t *)(uintptr_t)&qs->txq[TXQ_ETH].txq_mr.br_prod,
3556 			    0, "#tunneled packets queue producer index");
3557 			SYSCTL_ADD_UINT(ctx, txqpoidlist, OID_AUTO, "queue_cidx",
3558 			    CTLFLAG_RD, (uint32_t *)(uintptr_t)&qs->txq[TXQ_ETH].txq_mr.br_cons,
3559 			    0, "#tunneled packets queue consumer index");
3560 #endif
3561 			SYSCTL_ADD_UINT(ctx, txqpoidlist, OID_AUTO, "processed",
3562 			    CTLFLAG_RD, &qs->txq[TXQ_ETH].processed,
3563 			    0, "#tunneled packets processed by the card");
3564 			SYSCTL_ADD_UINT(ctx, txqpoidlist, OID_AUTO, "cleaned",
3565 			    CTLFLAG_RD, &txq->cleaned,
3566 			    0, "#tunneled packets cleaned");
3567 			SYSCTL_ADD_UINT(ctx, txqpoidlist, OID_AUTO, "in_use",
3568 			    CTLFLAG_RD, &txq->in_use,
3569 			    0, "#tunneled packet slots in use");
3570 			SYSCTL_ADD_UQUAD(ctx, txqpoidlist, OID_AUTO, "frees",
3571 			    CTLFLAG_RD, &txq->txq_frees,
3572 			    "#tunneled packets freed");
3573 			SYSCTL_ADD_UINT(ctx, txqpoidlist, OID_AUTO, "skipped",
3574 			    CTLFLAG_RD, &txq->txq_skipped,
3575 			    0, "#tunneled packet descriptors skipped");
3576 			SYSCTL_ADD_UQUAD(ctx, txqpoidlist, OID_AUTO, "coalesced",
3577 			    CTLFLAG_RD, &txq->txq_coalesced,
3578 			    "#tunneled packets coalesced");
3579 			SYSCTL_ADD_UINT(ctx, txqpoidlist, OID_AUTO, "enqueued",
3580 			    CTLFLAG_RD, &txq->txq_enqueued,
3581 			    0, "#tunneled packets enqueued to hardware");
3582 			SYSCTL_ADD_UINT(ctx, txqpoidlist, OID_AUTO, "stopped_flags",
3583 			    CTLFLAG_RD, &qs->txq_stopped,
3584 			    0, "tx queues stopped");
3585 			SYSCTL_ADD_UAUTO(ctx, txqpoidlist, OID_AUTO, "phys_addr",
3586 			    CTLFLAG_RD, &txq->phys_addr,
3587 			    "physical_address_of the queue");
3588 			SYSCTL_ADD_UINT(ctx, txqpoidlist, OID_AUTO, "qgen",
3589 			    CTLFLAG_RW, &qs->txq[TXQ_ETH].gen,
3590 			    0, "txq generation");
3591 			SYSCTL_ADD_UINT(ctx, txqpoidlist, OID_AUTO, "hw_cidx",
3592 			    CTLFLAG_RD, &txq->cidx,
3593 			    0, "hardware queue cidx");
3594 			SYSCTL_ADD_UINT(ctx, txqpoidlist, OID_AUTO, "hw_pidx",
3595 			    CTLFLAG_RD, &txq->pidx,
3596 			    0, "hardware queue pidx");
3597 			SYSCTL_ADD_UINT(ctx, txqpoidlist, OID_AUTO, "dump_start",
3598 			    CTLFLAG_RW, &qs->txq[TXQ_ETH].txq_dump_start,
3599 			    0, "txq start idx for dump");
3600 			SYSCTL_ADD_UINT(ctx, txqpoidlist, OID_AUTO, "dump_count",
3601 			    CTLFLAG_RW, &qs->txq[TXQ_ETH].txq_dump_count,
3602 			    0, "txq #entries to dump");
3603 			SYSCTL_ADD_PROC(ctx, txqpoidlist, OID_AUTO, "qdump",
3604 			    CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_NEEDGIANT,
3605 			    &qs->txq[TXQ_ETH], 0, t3_dump_txq_eth, "A",
3606 			    "dump of the transmit queue");
3607 
3608 			SYSCTL_ADD_UINT(ctx, ctrlqpoidlist, OID_AUTO, "dump_start",
3609 			    CTLFLAG_RW, &qs->txq[TXQ_CTRL].txq_dump_start,
3610 			    0, "ctrlq start idx for dump");
3611 			SYSCTL_ADD_UINT(ctx, ctrlqpoidlist, OID_AUTO, "dump_count",
3612 			    CTLFLAG_RW, &qs->txq[TXQ_CTRL].txq_dump_count,
3613 			    0, "ctrl #entries to dump");
3614 			SYSCTL_ADD_PROC(ctx, ctrlqpoidlist, OID_AUTO, "qdump",
3615 			    CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_NEEDGIANT,
3616 			    &qs->txq[TXQ_CTRL], 0, t3_dump_txq_ctrl, "A",
3617 			    "dump of the transmit queue");
3618 
3619 			SYSCTL_ADD_U64(ctx, lropoidlist, OID_AUTO, "lro_queued",
3620 			    CTLFLAG_RD, &qs->lro.ctrl.lro_queued, 0, NULL);
3621 			SYSCTL_ADD_U64(ctx, lropoidlist, OID_AUTO, "lro_flushed",
3622 			    CTLFLAG_RD, &qs->lro.ctrl.lro_flushed, 0, NULL);
3623 			SYSCTL_ADD_U64(ctx, lropoidlist, OID_AUTO, "lro_bad_csum",
3624 			    CTLFLAG_RD, &qs->lro.ctrl.lro_bad_csum, 0, NULL);
3625 			SYSCTL_ADD_INT(ctx, lropoidlist, OID_AUTO, "lro_cnt",
3626 			    CTLFLAG_RD, &qs->lro.ctrl.lro_cnt, 0, NULL);
3627 		}
3628 
3629 		/* Now add a node for mac stats. */
3630 		poid = SYSCTL_ADD_NODE(ctx, poidlist, OID_AUTO, "mac_stats",
3631 		    CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "MAC statistics");
3632 		poidlist = SYSCTL_CHILDREN(poid);
3633 
3634 		/*
3635 		 * We (ab)use the length argument (arg2) to pass on the offset
3636 		 * of the data that we are interested in.  This is only required
3637 		 * for the quad counters that are updated from the hardware (we
3638 		 * make sure that we return the latest value).
3639 		 * sysctl_handle_macstat first updates *all* the counters from
3640 		 * the hardware, and then returns the latest value of the
3641 		 * requested counter.  Best would be to update only the
3642 		 * requested counter from hardware, but t3_mac_update_stats()
3643 		 * hides all the register details and we don't want to dive into
3644 		 * all that here.
3645 		 */
3646 #define CXGB_SYSCTL_ADD_QUAD(a)	SYSCTL_ADD_OID(ctx, poidlist, OID_AUTO, #a, \
3647     CTLTYPE_U64 | CTLFLAG_RD | CTLFLAG_NEEDGIANT, pi, \
3648     offsetof(struct mac_stats, a), sysctl_handle_macstat, "QU", 0)
3649 		CXGB_SYSCTL_ADD_QUAD(tx_octets);
3650 		CXGB_SYSCTL_ADD_QUAD(tx_octets_bad);
3651 		CXGB_SYSCTL_ADD_QUAD(tx_frames);
3652 		CXGB_SYSCTL_ADD_QUAD(tx_mcast_frames);
3653 		CXGB_SYSCTL_ADD_QUAD(tx_bcast_frames);
3654 		CXGB_SYSCTL_ADD_QUAD(tx_pause);
3655 		CXGB_SYSCTL_ADD_QUAD(tx_deferred);
3656 		CXGB_SYSCTL_ADD_QUAD(tx_late_collisions);
3657 		CXGB_SYSCTL_ADD_QUAD(tx_total_collisions);
3658 		CXGB_SYSCTL_ADD_QUAD(tx_excess_collisions);
3659 		CXGB_SYSCTL_ADD_QUAD(tx_underrun);
3660 		CXGB_SYSCTL_ADD_QUAD(tx_len_errs);
3661 		CXGB_SYSCTL_ADD_QUAD(tx_mac_internal_errs);
3662 		CXGB_SYSCTL_ADD_QUAD(tx_excess_deferral);
3663 		CXGB_SYSCTL_ADD_QUAD(tx_fcs_errs);
3664 		CXGB_SYSCTL_ADD_QUAD(tx_frames_64);
3665 		CXGB_SYSCTL_ADD_QUAD(tx_frames_65_127);
3666 		CXGB_SYSCTL_ADD_QUAD(tx_frames_128_255);
3667 		CXGB_SYSCTL_ADD_QUAD(tx_frames_256_511);
3668 		CXGB_SYSCTL_ADD_QUAD(tx_frames_512_1023);
3669 		CXGB_SYSCTL_ADD_QUAD(tx_frames_1024_1518);
3670 		CXGB_SYSCTL_ADD_QUAD(tx_frames_1519_max);
3671 		CXGB_SYSCTL_ADD_QUAD(rx_octets);
3672 		CXGB_SYSCTL_ADD_QUAD(rx_octets_bad);
3673 		CXGB_SYSCTL_ADD_QUAD(rx_frames);
3674 		CXGB_SYSCTL_ADD_QUAD(rx_mcast_frames);
3675 		CXGB_SYSCTL_ADD_QUAD(rx_bcast_frames);
3676 		CXGB_SYSCTL_ADD_QUAD(rx_pause);
3677 		CXGB_SYSCTL_ADD_QUAD(rx_fcs_errs);
3678 		CXGB_SYSCTL_ADD_QUAD(rx_align_errs);
3679 		CXGB_SYSCTL_ADD_QUAD(rx_symbol_errs);
3680 		CXGB_SYSCTL_ADD_QUAD(rx_data_errs);
3681 		CXGB_SYSCTL_ADD_QUAD(rx_sequence_errs);
3682 		CXGB_SYSCTL_ADD_QUAD(rx_runt);
3683 		CXGB_SYSCTL_ADD_QUAD(rx_jabber);
3684 		CXGB_SYSCTL_ADD_QUAD(rx_short);
3685 		CXGB_SYSCTL_ADD_QUAD(rx_too_long);
3686 		CXGB_SYSCTL_ADD_QUAD(rx_mac_internal_errs);
3687 		CXGB_SYSCTL_ADD_QUAD(rx_cong_drops);
3688 		CXGB_SYSCTL_ADD_QUAD(rx_frames_64);
3689 		CXGB_SYSCTL_ADD_QUAD(rx_frames_65_127);
3690 		CXGB_SYSCTL_ADD_QUAD(rx_frames_128_255);
3691 		CXGB_SYSCTL_ADD_QUAD(rx_frames_256_511);
3692 		CXGB_SYSCTL_ADD_QUAD(rx_frames_512_1023);
3693 		CXGB_SYSCTL_ADD_QUAD(rx_frames_1024_1518);
3694 		CXGB_SYSCTL_ADD_QUAD(rx_frames_1519_max);
3695 #undef CXGB_SYSCTL_ADD_QUAD
3696 
3697 #define CXGB_SYSCTL_ADD_ULONG(a) SYSCTL_ADD_ULONG(ctx, poidlist, OID_AUTO, #a, \
3698     CTLFLAG_RD, &mstats->a, 0)
3699 		CXGB_SYSCTL_ADD_ULONG(tx_fifo_parity_err);
3700 		CXGB_SYSCTL_ADD_ULONG(rx_fifo_parity_err);
3701 		CXGB_SYSCTL_ADD_ULONG(tx_fifo_urun);
3702 		CXGB_SYSCTL_ADD_ULONG(rx_fifo_ovfl);
3703 		CXGB_SYSCTL_ADD_ULONG(serdes_signal_loss);
3704 		CXGB_SYSCTL_ADD_ULONG(xaui_pcs_ctc_err);
3705 		CXGB_SYSCTL_ADD_ULONG(xaui_pcs_align_change);
3706 		CXGB_SYSCTL_ADD_ULONG(num_toggled);
3707 		CXGB_SYSCTL_ADD_ULONG(num_resets);
3708 		CXGB_SYSCTL_ADD_ULONG(link_faults);
3709 #undef CXGB_SYSCTL_ADD_ULONG
3710 	}
3711 }
3712 
3713 /**
3714  *	t3_get_desc - dump an SGE descriptor for debugging purposes
3715  *	@qs: the queue set
3716  *	@qnum: identifies the specific queue (0..2: Tx, 3:response, 4..5: Rx)
3717  *	@idx: the descriptor index in the queue
3718  *	@data: where to dump the descriptor contents
3719  *
3720  *	Dumps the contents of a HW descriptor of an SGE queue.  Returns the
3721  *	size of the descriptor.
3722  */
3723 int
3724 t3_get_desc(const struct sge_qset *qs, unsigned int qnum, unsigned int idx,
3725 		unsigned char *data)
3726 {
3727 	if (qnum >= 6)
3728 		return (EINVAL);
3729 
3730 	if (qnum < 3) {
3731 		if (!qs->txq[qnum].desc || idx >= qs->txq[qnum].size)
3732 			return -EINVAL;
3733 		memcpy(data, &qs->txq[qnum].desc[idx], sizeof(struct tx_desc));
3734 		return sizeof(struct tx_desc);
3735 	}
3736 
3737 	if (qnum == 3) {
3738 		if (!qs->rspq.desc || idx >= qs->rspq.size)
3739 			return (EINVAL);
3740 		memcpy(data, &qs->rspq.desc[idx], sizeof(struct rsp_desc));
3741 		return sizeof(struct rsp_desc);
3742 	}
3743 
3744 	qnum -= 4;
3745 	if (!qs->fl[qnum].desc || idx >= qs->fl[qnum].size)
3746 		return (EINVAL);
3747 	memcpy(data, &qs->fl[qnum].desc[idx], sizeof(struct rx_desc));
3748 	return sizeof(struct rx_desc);
3749 }
3750