xref: /freebsd/sys/dev/cxgbe/t4_main.c (revision cc759c1995237364b02829feb9e5fdd1e6ed2c5b)
1 /*-
2  * Copyright (c) 2011 Chelsio Communications, Inc.
3  * All rights reserved.
4  * Written by: Navdeep Parhar <np@FreeBSD.org>
5  *
6  * Redistribution and use in source and binary forms, with or without
7  * modification, are permitted provided that the following conditions
8  * are met:
9  * 1. Redistributions of source code must retain the above copyright
10  *    notice, this list of conditions and the following disclaimer.
11  * 2. Redistributions in binary form must reproduce the above copyright
12  *    notice, this list of conditions and the following disclaimer in the
13  *    documentation and/or other materials provided with the distribution.
14  *
15  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
16  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
25  * SUCH DAMAGE.
26  */
27 
28 #include <sys/cdefs.h>
29 __FBSDID("$FreeBSD$");
30 
31 #include "opt_inet.h"
32 #include "opt_inet6.h"
33 
34 #include <sys/param.h>
35 #include <sys/conf.h>
36 #include <sys/priv.h>
37 #include <sys/kernel.h>
38 #include <sys/bus.h>
39 #include <sys/module.h>
40 #include <sys/malloc.h>
41 #include <sys/queue.h>
42 #include <sys/taskqueue.h>
43 #include <sys/pciio.h>
44 #include <dev/pci/pcireg.h>
45 #include <dev/pci/pcivar.h>
46 #include <dev/pci/pci_private.h>
47 #include <sys/firmware.h>
48 #include <sys/sbuf.h>
49 #include <sys/smp.h>
50 #include <sys/socket.h>
51 #include <sys/sockio.h>
52 #include <sys/sysctl.h>
53 #include <net/ethernet.h>
54 #include <net/if.h>
55 #include <net/if_types.h>
56 #include <net/if_dl.h>
57 #include <net/if_vlan_var.h>
58 #if defined(__i386__) || defined(__amd64__)
59 #include <vm/vm.h>
60 #include <vm/pmap.h>
61 #endif
62 
63 #include "common/common.h"
64 #include "common/t4_msg.h"
65 #include "common/t4_regs.h"
66 #include "common/t4_regs_values.h"
67 #include "t4_ioctl.h"
68 #include "t4_l2t.h"
69 
70 /* T4 bus driver interface */
71 static int t4_probe(device_t);
72 static int t4_attach(device_t);
73 static int t4_detach(device_t);
74 static device_method_t t4_methods[] = {
75 	DEVMETHOD(device_probe,		t4_probe),
76 	DEVMETHOD(device_attach,	t4_attach),
77 	DEVMETHOD(device_detach,	t4_detach),
78 
79 	DEVMETHOD_END
80 };
81 static driver_t t4_driver = {
82 	"t4nex",
83 	t4_methods,
84 	sizeof(struct adapter)
85 };
86 
87 
88 /* T4 port (cxgbe) interface */
89 static int cxgbe_probe(device_t);
90 static int cxgbe_attach(device_t);
91 static int cxgbe_detach(device_t);
92 static device_method_t cxgbe_methods[] = {
93 	DEVMETHOD(device_probe,		cxgbe_probe),
94 	DEVMETHOD(device_attach,	cxgbe_attach),
95 	DEVMETHOD(device_detach,	cxgbe_detach),
96 	{ 0, 0 }
97 };
98 static driver_t cxgbe_driver = {
99 	"cxgbe",
100 	cxgbe_methods,
101 	sizeof(struct port_info)
102 };
103 
104 static d_ioctl_t t4_ioctl;
105 static d_open_t t4_open;
106 static d_close_t t4_close;
107 
108 static struct cdevsw t4_cdevsw = {
109        .d_version = D_VERSION,
110        .d_flags = 0,
111        .d_open = t4_open,
112        .d_close = t4_close,
113        .d_ioctl = t4_ioctl,
114        .d_name = "t4nex",
115 };
116 
117 /* T5 bus driver interface */
118 static int t5_probe(device_t);
119 static device_method_t t5_methods[] = {
120 	DEVMETHOD(device_probe,		t5_probe),
121 	DEVMETHOD(device_attach,	t4_attach),
122 	DEVMETHOD(device_detach,	t4_detach),
123 
124 	DEVMETHOD_END
125 };
126 static driver_t t5_driver = {
127 	"t5nex",
128 	t5_methods,
129 	sizeof(struct adapter)
130 };
131 
132 
133 /* T5 port (cxl) interface */
134 static driver_t cxl_driver = {
135 	"cxl",
136 	cxgbe_methods,
137 	sizeof(struct port_info)
138 };
139 
140 static struct cdevsw t5_cdevsw = {
141        .d_version = D_VERSION,
142        .d_flags = 0,
143        .d_open = t4_open,
144        .d_close = t4_close,
145        .d_ioctl = t4_ioctl,
146        .d_name = "t5nex",
147 };
148 
149 /* ifnet + media interface */
150 static void cxgbe_init(void *);
151 static int cxgbe_ioctl(struct ifnet *, unsigned long, caddr_t);
152 static int cxgbe_transmit(struct ifnet *, struct mbuf *);
153 static void cxgbe_qflush(struct ifnet *);
154 static int cxgbe_media_change(struct ifnet *);
155 static void cxgbe_media_status(struct ifnet *, struct ifmediareq *);
156 
157 MALLOC_DEFINE(M_CXGBE, "cxgbe", "Chelsio T4/T5 Ethernet driver and services");
158 
159 /*
160  * Correct lock order when you need to acquire multiple locks is t4_list_lock,
161  * then ADAPTER_LOCK, then t4_uld_list_lock.
162  */
163 static struct mtx t4_list_lock;
164 static SLIST_HEAD(, adapter) t4_list;
165 #ifdef TCP_OFFLOAD
166 static struct mtx t4_uld_list_lock;
167 static SLIST_HEAD(, uld_info) t4_uld_list;
168 #endif
169 
170 /*
171  * Tunables.  See tweak_tunables() too.
172  *
173  * Each tunable is set to a default value here if it's known at compile-time.
174  * Otherwise it is set to -1 as an indication to tweak_tunables() that it should
175  * provide a reasonable default when the driver is loaded.
176  *
177  * Tunables applicable to both T4 and T5 are under hw.cxgbe.  Those specific to
178  * T5 are under hw.cxl.
179  */
180 
181 /*
182  * Number of queues for tx and rx, 10G and 1G, NIC and offload.
183  */
184 #define NTXQ_10G 16
185 static int t4_ntxq10g = -1;
186 TUNABLE_INT("hw.cxgbe.ntxq10g", &t4_ntxq10g);
187 
188 #define NRXQ_10G 8
189 static int t4_nrxq10g = -1;
190 TUNABLE_INT("hw.cxgbe.nrxq10g", &t4_nrxq10g);
191 
192 #define NTXQ_1G 4
193 static int t4_ntxq1g = -1;
194 TUNABLE_INT("hw.cxgbe.ntxq1g", &t4_ntxq1g);
195 
196 #define NRXQ_1G 2
197 static int t4_nrxq1g = -1;
198 TUNABLE_INT("hw.cxgbe.nrxq1g", &t4_nrxq1g);
199 
200 #ifdef TCP_OFFLOAD
201 #define NOFLDTXQ_10G 8
202 static int t4_nofldtxq10g = -1;
203 TUNABLE_INT("hw.cxgbe.nofldtxq10g", &t4_nofldtxq10g);
204 
205 #define NOFLDRXQ_10G 2
206 static int t4_nofldrxq10g = -1;
207 TUNABLE_INT("hw.cxgbe.nofldrxq10g", &t4_nofldrxq10g);
208 
209 #define NOFLDTXQ_1G 2
210 static int t4_nofldtxq1g = -1;
211 TUNABLE_INT("hw.cxgbe.nofldtxq1g", &t4_nofldtxq1g);
212 
213 #define NOFLDRXQ_1G 1
214 static int t4_nofldrxq1g = -1;
215 TUNABLE_INT("hw.cxgbe.nofldrxq1g", &t4_nofldrxq1g);
216 #endif
217 
218 /*
219  * Holdoff parameters for 10G and 1G ports.
220  */
221 #define TMR_IDX_10G 1
222 static int t4_tmr_idx_10g = TMR_IDX_10G;
223 TUNABLE_INT("hw.cxgbe.holdoff_timer_idx_10G", &t4_tmr_idx_10g);
224 
225 #define PKTC_IDX_10G (-1)
226 static int t4_pktc_idx_10g = PKTC_IDX_10G;
227 TUNABLE_INT("hw.cxgbe.holdoff_pktc_idx_10G", &t4_pktc_idx_10g);
228 
229 #define TMR_IDX_1G 1
230 static int t4_tmr_idx_1g = TMR_IDX_1G;
231 TUNABLE_INT("hw.cxgbe.holdoff_timer_idx_1G", &t4_tmr_idx_1g);
232 
233 #define PKTC_IDX_1G (-1)
234 static int t4_pktc_idx_1g = PKTC_IDX_1G;
235 TUNABLE_INT("hw.cxgbe.holdoff_pktc_idx_1G", &t4_pktc_idx_1g);
236 
237 /*
238  * Size (# of entries) of each tx and rx queue.
239  */
240 static unsigned int t4_qsize_txq = TX_EQ_QSIZE;
241 TUNABLE_INT("hw.cxgbe.qsize_txq", &t4_qsize_txq);
242 
243 static unsigned int t4_qsize_rxq = RX_IQ_QSIZE;
244 TUNABLE_INT("hw.cxgbe.qsize_rxq", &t4_qsize_rxq);
245 
246 /*
247  * Interrupt types allowed (bits 0, 1, 2 = INTx, MSI, MSI-X respectively).
248  */
249 static int t4_intr_types = INTR_MSIX | INTR_MSI | INTR_INTX;
250 TUNABLE_INT("hw.cxgbe.interrupt_types", &t4_intr_types);
251 
252 /*
253  * Configuration file.
254  */
255 #define DEFAULT_CF	"default"
256 #define FLASH_CF	"flash"
257 #define UWIRE_CF	"uwire"
258 #define FPGA_CF		"fpga"
259 static char t4_cfg_file[32] = DEFAULT_CF;
260 TUNABLE_STR("hw.cxgbe.config_file", t4_cfg_file, sizeof(t4_cfg_file));
261 
262 /*
263  * Firmware auto-install by driver during attach (0, 1, 2 = prohibited, allowed,
264  * encouraged respectively).
265  */
266 static unsigned int t4_fw_install = 1;
267 TUNABLE_INT("hw.cxgbe.fw_install", &t4_fw_install);
268 
269 /*
270  * ASIC features that will be used.  Disable the ones you don't want so that the
271  * chip resources aren't wasted on features that will not be used.
272  */
273 static int t4_linkcaps_allowed = 0;	/* No DCBX, PPP, etc. by default */
274 TUNABLE_INT("hw.cxgbe.linkcaps_allowed", &t4_linkcaps_allowed);
275 
276 static int t4_niccaps_allowed = FW_CAPS_CONFIG_NIC;
277 TUNABLE_INT("hw.cxgbe.niccaps_allowed", &t4_niccaps_allowed);
278 
279 static int t4_toecaps_allowed = -1;
280 TUNABLE_INT("hw.cxgbe.toecaps_allowed", &t4_toecaps_allowed);
281 
282 static int t4_rdmacaps_allowed = 0;
283 TUNABLE_INT("hw.cxgbe.rdmacaps_allowed", &t4_rdmacaps_allowed);
284 
285 static int t4_iscsicaps_allowed = 0;
286 TUNABLE_INT("hw.cxgbe.iscsicaps_allowed", &t4_iscsicaps_allowed);
287 
288 static int t4_fcoecaps_allowed = 0;
289 TUNABLE_INT("hw.cxgbe.fcoecaps_allowed", &t4_fcoecaps_allowed);
290 
291 static int t5_write_combine = 0;
292 TUNABLE_INT("hw.cxl.write_combine", &t5_write_combine);
293 
294 struct intrs_and_queues {
295 	int intr_type;		/* INTx, MSI, or MSI-X */
296 	int nirq;		/* Number of vectors */
297 	int intr_flags;
298 	int ntxq10g;		/* # of NIC txq's for each 10G port */
299 	int nrxq10g;		/* # of NIC rxq's for each 10G port */
300 	int ntxq1g;		/* # of NIC txq's for each 1G port */
301 	int nrxq1g;		/* # of NIC rxq's for each 1G port */
302 #ifdef TCP_OFFLOAD
303 	int nofldtxq10g;	/* # of TOE txq's for each 10G port */
304 	int nofldrxq10g;	/* # of TOE rxq's for each 10G port */
305 	int nofldtxq1g;		/* # of TOE txq's for each 1G port */
306 	int nofldrxq1g;		/* # of TOE rxq's for each 1G port */
307 #endif
308 };
309 
310 struct filter_entry {
311         uint32_t valid:1;	/* filter allocated and valid */
312         uint32_t locked:1;	/* filter is administratively locked */
313         uint32_t pending:1;	/* filter action is pending firmware reply */
314 	uint32_t smtidx:8;	/* Source MAC Table index for smac */
315 	struct l2t_entry *l2t;	/* Layer Two Table entry for dmac */
316 
317         struct t4_filter_specification fs;
318 };
319 
320 enum {
321 	XGMAC_MTU	= (1 << 0),
322 	XGMAC_PROMISC	= (1 << 1),
323 	XGMAC_ALLMULTI	= (1 << 2),
324 	XGMAC_VLANEX	= (1 << 3),
325 	XGMAC_UCADDR	= (1 << 4),
326 	XGMAC_MCADDRS	= (1 << 5),
327 
328 	XGMAC_ALL	= 0xffff
329 };
330 
331 static int map_bars_0_and_4(struct adapter *);
332 static int map_bar_2(struct adapter *);
333 static void setup_memwin(struct adapter *);
334 static int validate_mem_range(struct adapter *, uint32_t, int);
335 static int validate_mt_off_len(struct adapter *, int, uint32_t, int,
336     uint32_t *);
337 static void memwin_info(struct adapter *, int, uint32_t *, uint32_t *);
338 static uint32_t position_memwin(struct adapter *, int, uint32_t);
339 static int cfg_itype_and_nqueues(struct adapter *, int, int,
340     struct intrs_and_queues *);
341 static int prep_firmware(struct adapter *);
342 static int partition_resources(struct adapter *, const struct firmware *,
343     const char *);
344 static int get_params__pre_init(struct adapter *);
345 static int get_params__post_init(struct adapter *);
346 static int set_params__post_init(struct adapter *);
347 static void t4_set_desc(struct adapter *);
348 static void build_medialist(struct port_info *);
349 static int update_mac_settings(struct port_info *, int);
350 static int cxgbe_init_synchronized(struct port_info *);
351 static int cxgbe_uninit_synchronized(struct port_info *);
352 static int setup_intr_handlers(struct adapter *);
353 static int adapter_full_init(struct adapter *);
354 static int adapter_full_uninit(struct adapter *);
355 static int port_full_init(struct port_info *);
356 static int port_full_uninit(struct port_info *);
357 static void quiesce_eq(struct adapter *, struct sge_eq *);
358 static void quiesce_iq(struct adapter *, struct sge_iq *);
359 static void quiesce_fl(struct adapter *, struct sge_fl *);
360 static int t4_alloc_irq(struct adapter *, struct irq *, int rid,
361     driver_intr_t *, void *, char *);
362 static int t4_free_irq(struct adapter *, struct irq *);
363 static void reg_block_dump(struct adapter *, uint8_t *, unsigned int,
364     unsigned int);
365 static void t4_get_regs(struct adapter *, struct t4_regdump *, uint8_t *);
366 static void cxgbe_tick(void *);
367 static void cxgbe_vlan_config(void *, struct ifnet *, uint16_t);
368 static int cpl_not_handled(struct sge_iq *, const struct rss_header *,
369     struct mbuf *);
370 static int an_not_handled(struct sge_iq *, const struct rsp_ctrl *);
371 static int fw_msg_not_handled(struct adapter *, const __be64 *);
372 static int t4_sysctls(struct adapter *);
373 static int cxgbe_sysctls(struct port_info *);
374 static int sysctl_int_array(SYSCTL_HANDLER_ARGS);
375 static int sysctl_bitfield(SYSCTL_HANDLER_ARGS);
376 static int sysctl_btphy(SYSCTL_HANDLER_ARGS);
377 static int sysctl_holdoff_tmr_idx(SYSCTL_HANDLER_ARGS);
378 static int sysctl_holdoff_pktc_idx(SYSCTL_HANDLER_ARGS);
379 static int sysctl_qsize_rxq(SYSCTL_HANDLER_ARGS);
380 static int sysctl_qsize_txq(SYSCTL_HANDLER_ARGS);
381 static int sysctl_handle_t4_reg64(SYSCTL_HANDLER_ARGS);
382 #ifdef SBUF_DRAIN
383 static int sysctl_cctrl(SYSCTL_HANDLER_ARGS);
384 static int sysctl_cim_ibq_obq(SYSCTL_HANDLER_ARGS);
385 static int sysctl_cim_la(SYSCTL_HANDLER_ARGS);
386 static int sysctl_cim_ma_la(SYSCTL_HANDLER_ARGS);
387 static int sysctl_cim_pif_la(SYSCTL_HANDLER_ARGS);
388 static int sysctl_cim_qcfg(SYSCTL_HANDLER_ARGS);
389 static int sysctl_cpl_stats(SYSCTL_HANDLER_ARGS);
390 static int sysctl_ddp_stats(SYSCTL_HANDLER_ARGS);
391 static int sysctl_devlog(SYSCTL_HANDLER_ARGS);
392 static int sysctl_fcoe_stats(SYSCTL_HANDLER_ARGS);
393 static int sysctl_hw_sched(SYSCTL_HANDLER_ARGS);
394 static int sysctl_lb_stats(SYSCTL_HANDLER_ARGS);
395 static int sysctl_meminfo(SYSCTL_HANDLER_ARGS);
396 static int sysctl_mps_tcam(SYSCTL_HANDLER_ARGS);
397 static int sysctl_path_mtus(SYSCTL_HANDLER_ARGS);
398 static int sysctl_pm_stats(SYSCTL_HANDLER_ARGS);
399 static int sysctl_rdma_stats(SYSCTL_HANDLER_ARGS);
400 static int sysctl_tcp_stats(SYSCTL_HANDLER_ARGS);
401 static int sysctl_tids(SYSCTL_HANDLER_ARGS);
402 static int sysctl_tp_err_stats(SYSCTL_HANDLER_ARGS);
403 static int sysctl_tp_la(SYSCTL_HANDLER_ARGS);
404 static int sysctl_tx_rate(SYSCTL_HANDLER_ARGS);
405 static int sysctl_ulprx_la(SYSCTL_HANDLER_ARGS);
406 static int sysctl_wcwr_stats(SYSCTL_HANDLER_ARGS);
407 #endif
408 static inline void txq_start(struct ifnet *, struct sge_txq *);
409 static uint32_t fconf_to_mode(uint32_t);
410 static uint32_t mode_to_fconf(uint32_t);
411 static uint32_t fspec_to_fconf(struct t4_filter_specification *);
412 static int get_filter_mode(struct adapter *, uint32_t *);
413 static int set_filter_mode(struct adapter *, uint32_t);
414 static inline uint64_t get_filter_hits(struct adapter *, uint32_t);
415 static int get_filter(struct adapter *, struct t4_filter *);
416 static int set_filter(struct adapter *, struct t4_filter *);
417 static int del_filter(struct adapter *, struct t4_filter *);
418 static void clear_filter(struct filter_entry *);
419 static int set_filter_wr(struct adapter *, int);
420 static int del_filter_wr(struct adapter *, int);
421 static int get_sge_context(struct adapter *, struct t4_sge_context *);
422 static int load_fw(struct adapter *, struct t4_data *);
423 static int read_card_mem(struct adapter *, int, struct t4_mem_range *);
424 static int read_i2c(struct adapter *, struct t4_i2c_data *);
425 #ifdef TCP_OFFLOAD
426 static int toe_capability(struct port_info *, int);
427 #endif
428 static int mod_event(module_t, int, void *);
429 
430 struct {
431 	uint16_t device;
432 	char *desc;
433 } t4_pciids[] = {
434 	{0xa000, "Chelsio Terminator 4 FPGA"},
435 	{0x4400, "Chelsio T440-dbg"},
436 	{0x4401, "Chelsio T420-CR"},
437 	{0x4402, "Chelsio T422-CR"},
438 	{0x4403, "Chelsio T440-CR"},
439 	{0x4404, "Chelsio T420-BCH"},
440 	{0x4405, "Chelsio T440-BCH"},
441 	{0x4406, "Chelsio T440-CH"},
442 	{0x4407, "Chelsio T420-SO"},
443 	{0x4408, "Chelsio T420-CX"},
444 	{0x4409, "Chelsio T420-BT"},
445 	{0x440a, "Chelsio T404-BT"},
446 	{0x440e, "Chelsio T440-LP-CR"},
447 }, t5_pciids[] = {
448 	{0xb000, "Chelsio Terminator 5 FPGA"},
449 	{0x5400, "Chelsio T580-dbg"},
450 	{0x5401,  "Chelsio T520-CR"},
451 	{0x5407,  "Chelsio T520-SO"},
452 	{0x5408,  "Chelsio T520-CX"},
453 	{0x5410,  "Chelsio T580-LP-CR"},	/* 2 x 40G */
454 	{0x5411,  "Chelsio T520-LL-CR"},
455 #ifdef notyet
456 	{0x5402,  "Chelsio T522-CR"},
457 	{0x5403,  "Chelsio T540-CR"},
458 	{0x5404,  "Chelsio T520-BCH"},
459 	{0x5405,  "Chelsio T540-BCH"},
460 	{0x5406,  "Chelsio T540-CH"},
461 	{0x5409,  "Chelsio T520-BT"},
462 	{0x540a,  "Chelsio T504-BT"},
463 	{0x540b,  "Chelsio B520-SR"},
464 	{0x540c,  "Chelsio B504-BT"},
465 	{0x540d,  "Chelsio T580-CR"},
466 	{0x540e,  "Chelsio T540-LP-CR"},
467 	{0x540f,  "Chelsio Amsterdam"},
468 	{0x5412,  "Chelsio T560-CR"},
469 	{0x5413,  "Chelsio T580-CR"},
470 #endif
471 };
472 
473 #ifdef TCP_OFFLOAD
474 /*
475  * service_iq() has an iq and needs the fl.  Offset of fl from the iq should be
476  * exactly the same for both rxq and ofld_rxq.
477  */
478 CTASSERT(offsetof(struct sge_ofld_rxq, iq) == offsetof(struct sge_rxq, iq));
479 CTASSERT(offsetof(struct sge_ofld_rxq, fl) == offsetof(struct sge_rxq, fl));
480 #endif
481 
482 /* No easy way to include t4_msg.h before adapter.h so we check this way */
483 CTASSERT(nitems(((struct adapter *)0)->cpl_handler) == NUM_CPL_CMDS);
484 CTASSERT(nitems(((struct adapter *)0)->fw_msg_handler) == NUM_FW6_TYPES);
485 
486 static int
487 t4_probe(device_t dev)
488 {
489 	int i;
490 	uint16_t v = pci_get_vendor(dev);
491 	uint16_t d = pci_get_device(dev);
492 	uint8_t f = pci_get_function(dev);
493 
494 	if (v != PCI_VENDOR_ID_CHELSIO)
495 		return (ENXIO);
496 
497 	/* Attach only to PF0 of the FPGA */
498 	if (d == 0xa000 && f != 0)
499 		return (ENXIO);
500 
501 	for (i = 0; i < nitems(t4_pciids); i++) {
502 		if (d == t4_pciids[i].device) {
503 			device_set_desc(dev, t4_pciids[i].desc);
504 			return (BUS_PROBE_DEFAULT);
505 		}
506 	}
507 
508 	return (ENXIO);
509 }
510 
511 static int
512 t5_probe(device_t dev)
513 {
514 	int i;
515 	uint16_t v = pci_get_vendor(dev);
516 	uint16_t d = pci_get_device(dev);
517 	uint8_t f = pci_get_function(dev);
518 
519 	if (v != PCI_VENDOR_ID_CHELSIO)
520 		return (ENXIO);
521 
522 	/* Attach only to PF0 of the FPGA */
523 	if (d == 0xb000 && f != 0)
524 		return (ENXIO);
525 
526 	for (i = 0; i < nitems(t5_pciids); i++) {
527 		if (d == t5_pciids[i].device) {
528 			device_set_desc(dev, t5_pciids[i].desc);
529 			return (BUS_PROBE_DEFAULT);
530 		}
531 	}
532 
533 	return (ENXIO);
534 }
535 
536 static int
537 t4_attach(device_t dev)
538 {
539 	struct adapter *sc;
540 	int rc = 0, i, n10g, n1g, rqidx, tqidx;
541 	struct intrs_and_queues iaq;
542 	struct sge *s;
543 #ifdef TCP_OFFLOAD
544 	int ofld_rqidx, ofld_tqidx;
545 #endif
546 
547 	sc = device_get_softc(dev);
548 	sc->dev = dev;
549 
550 	pci_enable_busmaster(dev);
551 	if (pci_find_cap(dev, PCIY_EXPRESS, &i) == 0) {
552 		uint32_t v;
553 
554 		pci_set_max_read_req(dev, 4096);
555 		v = pci_read_config(dev, i + PCIER_DEVICE_CTL, 2);
556 		v |= PCIEM_CTL_RELAXED_ORD_ENABLE;
557 		pci_write_config(dev, i + PCIER_DEVICE_CTL, v, 2);
558 	}
559 
560 	snprintf(sc->lockname, sizeof(sc->lockname), "%s",
561 	    device_get_nameunit(dev));
562 	mtx_init(&sc->sc_lock, sc->lockname, 0, MTX_DEF);
563 	mtx_lock(&t4_list_lock);
564 	SLIST_INSERT_HEAD(&t4_list, sc, link);
565 	mtx_unlock(&t4_list_lock);
566 
567 	mtx_init(&sc->sfl_lock, "starving freelists", 0, MTX_DEF);
568 	TAILQ_INIT(&sc->sfl);
569 	callout_init(&sc->sfl_callout, CALLOUT_MPSAFE);
570 
571 	rc = map_bars_0_and_4(sc);
572 	if (rc != 0)
573 		goto done; /* error message displayed already */
574 
575 	/*
576 	 * This is the real PF# to which we're attaching.  Works from within PCI
577 	 * passthrough environments too, where pci_get_function() could return a
578 	 * different PF# depending on the passthrough configuration.  We need to
579 	 * use the real PF# in all our communication with the firmware.
580 	 */
581 	sc->pf = G_SOURCEPF(t4_read_reg(sc, A_PL_WHOAMI));
582 	sc->mbox = sc->pf;
583 
584 	memset(sc->chan_map, 0xff, sizeof(sc->chan_map));
585 	sc->an_handler = an_not_handled;
586 	for (i = 0; i < nitems(sc->cpl_handler); i++)
587 		sc->cpl_handler[i] = cpl_not_handled;
588 	for (i = 0; i < nitems(sc->fw_msg_handler); i++)
589 		sc->fw_msg_handler[i] = fw_msg_not_handled;
590 	t4_register_cpl_handler(sc, CPL_SET_TCB_RPL, t4_filter_rpl);
591 	t4_init_sge_cpl_handlers(sc);
592 
593 	/* Prepare the adapter for operation */
594 	rc = -t4_prep_adapter(sc);
595 	if (rc != 0) {
596 		device_printf(dev, "failed to prepare adapter: %d.\n", rc);
597 		goto done;
598 	}
599 
600 	/*
601 	 * Do this really early, with the memory windows set up even before the
602 	 * character device.  The userland tool's register i/o and mem read
603 	 * will work even in "recovery mode".
604 	 */
605 	setup_memwin(sc);
606 	sc->cdev = make_dev(is_t4(sc) ? &t4_cdevsw : &t5_cdevsw,
607 	    device_get_unit(dev), UID_ROOT, GID_WHEEL, 0600, "%s",
608 	    device_get_nameunit(dev));
609 	if (sc->cdev == NULL)
610 		device_printf(dev, "failed to create nexus char device.\n");
611 	else
612 		sc->cdev->si_drv1 = sc;
613 
614 	/* Go no further if recovery mode has been requested. */
615 	if (TUNABLE_INT_FETCH("hw.cxgbe.sos", &i) && i != 0) {
616 		device_printf(dev, "recovery mode.\n");
617 		goto done;
618 	}
619 
620 	/* Prepare the firmware for operation */
621 	rc = prep_firmware(sc);
622 	if (rc != 0)
623 		goto done; /* error message displayed already */
624 
625 	rc = get_params__post_init(sc);
626 	if (rc != 0)
627 		goto done; /* error message displayed already */
628 
629 	rc = set_params__post_init(sc);
630 	if (rc != 0)
631 		goto done; /* error message displayed already */
632 
633 	rc = map_bar_2(sc);
634 	if (rc != 0)
635 		goto done; /* error message displayed already */
636 
637 	rc = t4_create_dma_tag(sc);
638 	if (rc != 0)
639 		goto done; /* error message displayed already */
640 
641 	/*
642 	 * First pass over all the ports - allocate VIs and initialize some
643 	 * basic parameters like mac address, port type, etc.  We also figure
644 	 * out whether a port is 10G or 1G and use that information when
645 	 * calculating how many interrupts to attempt to allocate.
646 	 */
647 	n10g = n1g = 0;
648 	for_each_port(sc, i) {
649 		struct port_info *pi;
650 
651 		pi = malloc(sizeof(*pi), M_CXGBE, M_ZERO | M_WAITOK);
652 		sc->port[i] = pi;
653 
654 		/* These must be set before t4_port_init */
655 		pi->adapter = sc;
656 		pi->port_id = i;
657 
658 		/* Allocate the vi and initialize parameters like mac addr */
659 		rc = -t4_port_init(pi, sc->mbox, sc->pf, 0);
660 		if (rc != 0) {
661 			device_printf(dev, "unable to initialize port %d: %d\n",
662 			    i, rc);
663 			free(pi, M_CXGBE);
664 			sc->port[i] = NULL;
665 			goto done;
666 		}
667 
668 		snprintf(pi->lockname, sizeof(pi->lockname), "%sp%d",
669 		    device_get_nameunit(dev), i);
670 		mtx_init(&pi->pi_lock, pi->lockname, 0, MTX_DEF);
671 
672 		if (is_10G_port(pi) || is_40G_port(pi)) {
673 			n10g++;
674 			pi->tmr_idx = t4_tmr_idx_10g;
675 			pi->pktc_idx = t4_pktc_idx_10g;
676 		} else {
677 			n1g++;
678 			pi->tmr_idx = t4_tmr_idx_1g;
679 			pi->pktc_idx = t4_pktc_idx_1g;
680 		}
681 
682 		pi->xact_addr_filt = -1;
683 		pi->linkdnrc = -1;
684 
685 		pi->qsize_rxq = t4_qsize_rxq;
686 		pi->qsize_txq = t4_qsize_txq;
687 
688 		pi->dev = device_add_child(dev, is_t4(sc) ? "cxgbe" : "cxl", -1);
689 		if (pi->dev == NULL) {
690 			device_printf(dev,
691 			    "failed to add device for port %d.\n", i);
692 			rc = ENXIO;
693 			goto done;
694 		}
695 		device_set_softc(pi->dev, pi);
696 	}
697 
698 	/*
699 	 * Interrupt type, # of interrupts, # of rx/tx queues, etc.
700 	 */
701 	rc = cfg_itype_and_nqueues(sc, n10g, n1g, &iaq);
702 	if (rc != 0)
703 		goto done; /* error message displayed already */
704 
705 	sc->intr_type = iaq.intr_type;
706 	sc->intr_count = iaq.nirq;
707 	sc->flags |= iaq.intr_flags;
708 
709 	s = &sc->sge;
710 	s->nrxq = n10g * iaq.nrxq10g + n1g * iaq.nrxq1g;
711 	s->ntxq = n10g * iaq.ntxq10g + n1g * iaq.ntxq1g;
712 	s->neq = s->ntxq + s->nrxq;	/* the free list in an rxq is an eq */
713 	s->neq += sc->params.nports + 1;/* ctrl queues: 1 per port + 1 mgmt */
714 	s->niq = s->nrxq + 1;		/* 1 extra for firmware event queue */
715 
716 #ifdef TCP_OFFLOAD
717 	if (is_offload(sc)) {
718 
719 		s->nofldrxq = n10g * iaq.nofldrxq10g + n1g * iaq.nofldrxq1g;
720 		s->nofldtxq = n10g * iaq.nofldtxq10g + n1g * iaq.nofldtxq1g;
721 		s->neq += s->nofldtxq + s->nofldrxq;
722 		s->niq += s->nofldrxq;
723 
724 		s->ofld_rxq = malloc(s->nofldrxq * sizeof(struct sge_ofld_rxq),
725 		    M_CXGBE, M_ZERO | M_WAITOK);
726 		s->ofld_txq = malloc(s->nofldtxq * sizeof(struct sge_wrq),
727 		    M_CXGBE, M_ZERO | M_WAITOK);
728 	}
729 #endif
730 
731 	s->ctrlq = malloc(sc->params.nports * sizeof(struct sge_wrq), M_CXGBE,
732 	    M_ZERO | M_WAITOK);
733 	s->rxq = malloc(s->nrxq * sizeof(struct sge_rxq), M_CXGBE,
734 	    M_ZERO | M_WAITOK);
735 	s->txq = malloc(s->ntxq * sizeof(struct sge_txq), M_CXGBE,
736 	    M_ZERO | M_WAITOK);
737 	s->iqmap = malloc(s->niq * sizeof(struct sge_iq *), M_CXGBE,
738 	    M_ZERO | M_WAITOK);
739 	s->eqmap = malloc(s->neq * sizeof(struct sge_eq *), M_CXGBE,
740 	    M_ZERO | M_WAITOK);
741 
742 	sc->irq = malloc(sc->intr_count * sizeof(struct irq), M_CXGBE,
743 	    M_ZERO | M_WAITOK);
744 
745 	t4_init_l2t(sc, M_WAITOK);
746 
747 	/*
748 	 * Second pass over the ports.  This time we know the number of rx and
749 	 * tx queues that each port should get.
750 	 */
751 	rqidx = tqidx = 0;
752 #ifdef TCP_OFFLOAD
753 	ofld_rqidx = ofld_tqidx = 0;
754 #endif
755 	for_each_port(sc, i) {
756 		struct port_info *pi = sc->port[i];
757 
758 		if (pi == NULL)
759 			continue;
760 
761 		pi->first_rxq = rqidx;
762 		pi->first_txq = tqidx;
763 		if (is_10G_port(pi) || is_40G_port(pi)) {
764 			pi->nrxq = iaq.nrxq10g;
765 			pi->ntxq = iaq.ntxq10g;
766 		} else {
767 			pi->nrxq = iaq.nrxq1g;
768 			pi->ntxq = iaq.ntxq1g;
769 		}
770 
771 		rqidx += pi->nrxq;
772 		tqidx += pi->ntxq;
773 
774 #ifdef TCP_OFFLOAD
775 		if (is_offload(sc)) {
776 			pi->first_ofld_rxq = ofld_rqidx;
777 			pi->first_ofld_txq = ofld_tqidx;
778 			if (is_10G_port(pi) || is_40G_port(pi)) {
779 				pi->nofldrxq = iaq.nofldrxq10g;
780 				pi->nofldtxq = iaq.nofldtxq10g;
781 			} else {
782 				pi->nofldrxq = iaq.nofldrxq1g;
783 				pi->nofldtxq = iaq.nofldtxq1g;
784 			}
785 			ofld_rqidx += pi->nofldrxq;
786 			ofld_tqidx += pi->nofldtxq;
787 		}
788 #endif
789 	}
790 
791 	rc = setup_intr_handlers(sc);
792 	if (rc != 0) {
793 		device_printf(dev,
794 		    "failed to setup interrupt handlers: %d\n", rc);
795 		goto done;
796 	}
797 
798 	rc = bus_generic_attach(dev);
799 	if (rc != 0) {
800 		device_printf(dev,
801 		    "failed to attach all child ports: %d\n", rc);
802 		goto done;
803 	}
804 
805 	device_printf(dev,
806 	    "PCIe x%d, %d ports, %d %s interrupt%s, %d eq, %d iq\n",
807 	    sc->params.pci.width, sc->params.nports, sc->intr_count,
808 	    sc->intr_type == INTR_MSIX ? "MSI-X" :
809 	    (sc->intr_type == INTR_MSI ? "MSI" : "INTx"),
810 	    sc->intr_count > 1 ? "s" : "", sc->sge.neq, sc->sge.niq);
811 
812 	t4_set_desc(sc);
813 
814 done:
815 	if (rc != 0 && sc->cdev) {
816 		/* cdev was created and so cxgbetool works; recover that way. */
817 		device_printf(dev,
818 		    "error during attach, adapter is now in recovery mode.\n");
819 		rc = 0;
820 	}
821 
822 	if (rc != 0)
823 		t4_detach(dev);
824 	else
825 		t4_sysctls(sc);
826 
827 	return (rc);
828 }
829 
830 /*
831  * Idempotent
832  */
833 static int
834 t4_detach(device_t dev)
835 {
836 	struct adapter *sc;
837 	struct port_info *pi;
838 	int i, rc;
839 
840 	sc = device_get_softc(dev);
841 
842 	if (sc->flags & FULL_INIT_DONE)
843 		t4_intr_disable(sc);
844 
845 	if (sc->cdev) {
846 		destroy_dev(sc->cdev);
847 		sc->cdev = NULL;
848 	}
849 
850 	rc = bus_generic_detach(dev);
851 	if (rc) {
852 		device_printf(dev,
853 		    "failed to detach child devices: %d\n", rc);
854 		return (rc);
855 	}
856 
857 	for (i = 0; i < sc->intr_count; i++)
858 		t4_free_irq(sc, &sc->irq[i]);
859 
860 	for (i = 0; i < MAX_NPORTS; i++) {
861 		pi = sc->port[i];
862 		if (pi) {
863 			t4_free_vi(pi->adapter, sc->mbox, sc->pf, 0, pi->viid);
864 			if (pi->dev)
865 				device_delete_child(dev, pi->dev);
866 
867 			mtx_destroy(&pi->pi_lock);
868 			free(pi, M_CXGBE);
869 		}
870 	}
871 
872 	if (sc->flags & FULL_INIT_DONE)
873 		adapter_full_uninit(sc);
874 
875 	if (sc->flags & FW_OK)
876 		t4_fw_bye(sc, sc->mbox);
877 
878 	if (sc->intr_type == INTR_MSI || sc->intr_type == INTR_MSIX)
879 		pci_release_msi(dev);
880 
881 	if (sc->regs_res)
882 		bus_release_resource(dev, SYS_RES_MEMORY, sc->regs_rid,
883 		    sc->regs_res);
884 
885 	if (sc->udbs_res)
886 		bus_release_resource(dev, SYS_RES_MEMORY, sc->udbs_rid,
887 		    sc->udbs_res);
888 
889 	if (sc->msix_res)
890 		bus_release_resource(dev, SYS_RES_MEMORY, sc->msix_rid,
891 		    sc->msix_res);
892 
893 	if (sc->l2t)
894 		t4_free_l2t(sc->l2t);
895 
896 #ifdef TCP_OFFLOAD
897 	free(sc->sge.ofld_rxq, M_CXGBE);
898 	free(sc->sge.ofld_txq, M_CXGBE);
899 #endif
900 	free(sc->irq, M_CXGBE);
901 	free(sc->sge.rxq, M_CXGBE);
902 	free(sc->sge.txq, M_CXGBE);
903 	free(sc->sge.ctrlq, M_CXGBE);
904 	free(sc->sge.iqmap, M_CXGBE);
905 	free(sc->sge.eqmap, M_CXGBE);
906 	free(sc->tids.ftid_tab, M_CXGBE);
907 	t4_destroy_dma_tag(sc);
908 	if (mtx_initialized(&sc->sc_lock)) {
909 		mtx_lock(&t4_list_lock);
910 		SLIST_REMOVE(&t4_list, sc, adapter, link);
911 		mtx_unlock(&t4_list_lock);
912 		mtx_destroy(&sc->sc_lock);
913 	}
914 
915 	if (mtx_initialized(&sc->tids.ftid_lock))
916 		mtx_destroy(&sc->tids.ftid_lock);
917 	if (mtx_initialized(&sc->sfl_lock))
918 		mtx_destroy(&sc->sfl_lock);
919 
920 	bzero(sc, sizeof(*sc));
921 
922 	return (0);
923 }
924 
925 
926 static int
927 cxgbe_probe(device_t dev)
928 {
929 	char buf[128];
930 	struct port_info *pi = device_get_softc(dev);
931 
932 	snprintf(buf, sizeof(buf), "port %d", pi->port_id);
933 	device_set_desc_copy(dev, buf);
934 
935 	return (BUS_PROBE_DEFAULT);
936 }
937 
938 #define T4_CAP (IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_MTU | IFCAP_HWCSUM | \
939     IFCAP_VLAN_HWCSUM | IFCAP_TSO | IFCAP_JUMBO_MTU | IFCAP_LRO | \
940     IFCAP_VLAN_HWTSO | IFCAP_LINKSTATE | IFCAP_HWCSUM_IPV6)
941 #define T4_CAP_ENABLE (T4_CAP)
942 
943 static int
944 cxgbe_attach(device_t dev)
945 {
946 	struct port_info *pi = device_get_softc(dev);
947 	struct ifnet *ifp;
948 
949 	/* Allocate an ifnet and set it up */
950 	ifp = if_alloc(IFT_ETHER);
951 	if (ifp == NULL) {
952 		device_printf(dev, "Cannot allocate ifnet\n");
953 		return (ENOMEM);
954 	}
955 	pi->ifp = ifp;
956 	ifp->if_softc = pi;
957 
958 	callout_init(&pi->tick, CALLOUT_MPSAFE);
959 
960 	if_initname(ifp, device_get_name(dev), device_get_unit(dev));
961 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
962 
963 	ifp->if_init = cxgbe_init;
964 	ifp->if_ioctl = cxgbe_ioctl;
965 	ifp->if_transmit = cxgbe_transmit;
966 	ifp->if_qflush = cxgbe_qflush;
967 
968 	ifp->if_capabilities = T4_CAP;
969 #ifdef TCP_OFFLOAD
970 	if (is_offload(pi->adapter))
971 		ifp->if_capabilities |= IFCAP_TOE;
972 #endif
973 	ifp->if_capenable = T4_CAP_ENABLE;
974 	ifp->if_hwassist = CSUM_TCP | CSUM_UDP | CSUM_IP | CSUM_TSO |
975 	    CSUM_UDP_IPV6 | CSUM_TCP_IPV6;
976 
977 	/* Initialize ifmedia for this port */
978 	ifmedia_init(&pi->media, IFM_IMASK, cxgbe_media_change,
979 	    cxgbe_media_status);
980 	build_medialist(pi);
981 
982 	pi->vlan_c = EVENTHANDLER_REGISTER(vlan_config, cxgbe_vlan_config, ifp,
983 	    EVENTHANDLER_PRI_ANY);
984 
985 	ether_ifattach(ifp, pi->hw_addr);
986 
987 #ifdef TCP_OFFLOAD
988 	if (is_offload(pi->adapter)) {
989 		device_printf(dev,
990 		    "%d txq, %d rxq (NIC); %d txq, %d rxq (TOE)\n",
991 		    pi->ntxq, pi->nrxq, pi->nofldtxq, pi->nofldrxq);
992 	} else
993 #endif
994 		device_printf(dev, "%d txq, %d rxq\n", pi->ntxq, pi->nrxq);
995 
996 	cxgbe_sysctls(pi);
997 
998 	return (0);
999 }
1000 
1001 static int
1002 cxgbe_detach(device_t dev)
1003 {
1004 	struct port_info *pi = device_get_softc(dev);
1005 	struct adapter *sc = pi->adapter;
1006 	struct ifnet *ifp = pi->ifp;
1007 
1008 	/* Tell if_ioctl and if_init that the port is going away */
1009 	ADAPTER_LOCK(sc);
1010 	SET_DOOMED(pi);
1011 	wakeup(&sc->flags);
1012 	while (IS_BUSY(sc))
1013 		mtx_sleep(&sc->flags, &sc->sc_lock, 0, "t4detach", 0);
1014 	SET_BUSY(sc);
1015 #ifdef INVARIANTS
1016 	sc->last_op = "t4detach";
1017 	sc->last_op_thr = curthread;
1018 #endif
1019 	ADAPTER_UNLOCK(sc);
1020 
1021 	if (pi->vlan_c)
1022 		EVENTHANDLER_DEREGISTER(vlan_config, pi->vlan_c);
1023 
1024 	PORT_LOCK(pi);
1025 	ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
1026 	callout_stop(&pi->tick);
1027 	PORT_UNLOCK(pi);
1028 	callout_drain(&pi->tick);
1029 
1030 	/* Let detach proceed even if these fail. */
1031 	cxgbe_uninit_synchronized(pi);
1032 	port_full_uninit(pi);
1033 
1034 	ifmedia_removeall(&pi->media);
1035 	ether_ifdetach(pi->ifp);
1036 	if_free(pi->ifp);
1037 
1038 	ADAPTER_LOCK(sc);
1039 	CLR_BUSY(sc);
1040 	wakeup(&sc->flags);
1041 	ADAPTER_UNLOCK(sc);
1042 
1043 	return (0);
1044 }
1045 
1046 static void
1047 cxgbe_init(void *arg)
1048 {
1049 	struct port_info *pi = arg;
1050 	struct adapter *sc = pi->adapter;
1051 
1052 	if (begin_synchronized_op(sc, pi, SLEEP_OK | INTR_OK, "t4init") != 0)
1053 		return;
1054 	cxgbe_init_synchronized(pi);
1055 	end_synchronized_op(sc, 0);
1056 }
1057 
1058 static int
1059 cxgbe_ioctl(struct ifnet *ifp, unsigned long cmd, caddr_t data)
1060 {
1061 	int rc = 0, mtu, flags;
1062 	struct port_info *pi = ifp->if_softc;
1063 	struct adapter *sc = pi->adapter;
1064 	struct ifreq *ifr = (struct ifreq *)data;
1065 	uint32_t mask;
1066 
1067 	switch (cmd) {
1068 	case SIOCSIFMTU:
1069 		mtu = ifr->ifr_mtu;
1070 		if ((mtu < ETHERMIN) || (mtu > ETHERMTU_JUMBO))
1071 			return (EINVAL);
1072 
1073 		rc = begin_synchronized_op(sc, pi, SLEEP_OK | INTR_OK, "t4mtu");
1074 		if (rc)
1075 			return (rc);
1076 		ifp->if_mtu = mtu;
1077 		if (pi->flags & PORT_INIT_DONE) {
1078 			t4_update_fl_bufsize(ifp);
1079 			if (ifp->if_drv_flags & IFF_DRV_RUNNING)
1080 				rc = update_mac_settings(pi, XGMAC_MTU);
1081 		}
1082 		end_synchronized_op(sc, 0);
1083 		break;
1084 
1085 	case SIOCSIFFLAGS:
1086 		rc = begin_synchronized_op(sc, pi, SLEEP_OK | INTR_OK, "t4flg");
1087 		if (rc)
1088 			return (rc);
1089 
1090 		if (ifp->if_flags & IFF_UP) {
1091 			if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1092 				flags = pi->if_flags;
1093 				if ((ifp->if_flags ^ flags) &
1094 				    (IFF_PROMISC | IFF_ALLMULTI)) {
1095 					rc = update_mac_settings(pi,
1096 					    XGMAC_PROMISC | XGMAC_ALLMULTI);
1097 				}
1098 			} else
1099 				rc = cxgbe_init_synchronized(pi);
1100 			pi->if_flags = ifp->if_flags;
1101 		} else if (ifp->if_drv_flags & IFF_DRV_RUNNING)
1102 			rc = cxgbe_uninit_synchronized(pi);
1103 		end_synchronized_op(sc, 0);
1104 		break;
1105 
1106 	case SIOCADDMULTI:
1107 	case SIOCDELMULTI: /* these two are called with a mutex held :-( */
1108 		rc = begin_synchronized_op(sc, pi, HOLD_LOCK, "t4multi");
1109 		if (rc)
1110 			return (rc);
1111 		if (ifp->if_drv_flags & IFF_DRV_RUNNING)
1112 			rc = update_mac_settings(pi, XGMAC_MCADDRS);
1113 		end_synchronized_op(sc, LOCK_HELD);
1114 		break;
1115 
1116 	case SIOCSIFCAP:
1117 		rc = begin_synchronized_op(sc, pi, SLEEP_OK | INTR_OK, "t4cap");
1118 		if (rc)
1119 			return (rc);
1120 
1121 		mask = ifr->ifr_reqcap ^ ifp->if_capenable;
1122 		if (mask & IFCAP_TXCSUM) {
1123 			ifp->if_capenable ^= IFCAP_TXCSUM;
1124 			ifp->if_hwassist ^= (CSUM_TCP | CSUM_UDP | CSUM_IP);
1125 
1126 			if (IFCAP_TSO4 & ifp->if_capenable &&
1127 			    !(IFCAP_TXCSUM & ifp->if_capenable)) {
1128 				ifp->if_capenable &= ~IFCAP_TSO4;
1129 				if_printf(ifp,
1130 				    "tso4 disabled due to -txcsum.\n");
1131 			}
1132 		}
1133 		if (mask & IFCAP_TXCSUM_IPV6) {
1134 			ifp->if_capenable ^= IFCAP_TXCSUM_IPV6;
1135 			ifp->if_hwassist ^= (CSUM_UDP_IPV6 | CSUM_TCP_IPV6);
1136 
1137 			if (IFCAP_TSO6 & ifp->if_capenable &&
1138 			    !(IFCAP_TXCSUM_IPV6 & ifp->if_capenable)) {
1139 				ifp->if_capenable &= ~IFCAP_TSO6;
1140 				if_printf(ifp,
1141 				    "tso6 disabled due to -txcsum6.\n");
1142 			}
1143 		}
1144 		if (mask & IFCAP_RXCSUM)
1145 			ifp->if_capenable ^= IFCAP_RXCSUM;
1146 		if (mask & IFCAP_RXCSUM_IPV6)
1147 			ifp->if_capenable ^= IFCAP_RXCSUM_IPV6;
1148 
1149 		/*
1150 		 * Note that we leave CSUM_TSO alone (it is always set).  The
1151 		 * kernel takes both IFCAP_TSOx and CSUM_TSO into account before
1152 		 * sending a TSO request our way, so it's sufficient to toggle
1153 		 * IFCAP_TSOx only.
1154 		 */
1155 		if (mask & IFCAP_TSO4) {
1156 			if (!(IFCAP_TSO4 & ifp->if_capenable) &&
1157 			    !(IFCAP_TXCSUM & ifp->if_capenable)) {
1158 				if_printf(ifp, "enable txcsum first.\n");
1159 				rc = EAGAIN;
1160 				goto fail;
1161 			}
1162 			ifp->if_capenable ^= IFCAP_TSO4;
1163 		}
1164 		if (mask & IFCAP_TSO6) {
1165 			if (!(IFCAP_TSO6 & ifp->if_capenable) &&
1166 			    !(IFCAP_TXCSUM_IPV6 & ifp->if_capenable)) {
1167 				if_printf(ifp, "enable txcsum6 first.\n");
1168 				rc = EAGAIN;
1169 				goto fail;
1170 			}
1171 			ifp->if_capenable ^= IFCAP_TSO6;
1172 		}
1173 		if (mask & IFCAP_LRO) {
1174 #if defined(INET) || defined(INET6)
1175 			int i;
1176 			struct sge_rxq *rxq;
1177 
1178 			ifp->if_capenable ^= IFCAP_LRO;
1179 			for_each_rxq(pi, i, rxq) {
1180 				if (ifp->if_capenable & IFCAP_LRO)
1181 					rxq->iq.flags |= IQ_LRO_ENABLED;
1182 				else
1183 					rxq->iq.flags &= ~IQ_LRO_ENABLED;
1184 			}
1185 #endif
1186 		}
1187 #ifdef TCP_OFFLOAD
1188 		if (mask & IFCAP_TOE) {
1189 			int enable = (ifp->if_capenable ^ mask) & IFCAP_TOE;
1190 
1191 			rc = toe_capability(pi, enable);
1192 			if (rc != 0)
1193 				goto fail;
1194 
1195 			ifp->if_capenable ^= mask;
1196 		}
1197 #endif
1198 		if (mask & IFCAP_VLAN_HWTAGGING) {
1199 			ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING;
1200 			if (ifp->if_drv_flags & IFF_DRV_RUNNING)
1201 				rc = update_mac_settings(pi, XGMAC_VLANEX);
1202 		}
1203 		if (mask & IFCAP_VLAN_MTU) {
1204 			ifp->if_capenable ^= IFCAP_VLAN_MTU;
1205 
1206 			/* Need to find out how to disable auto-mtu-inflation */
1207 		}
1208 		if (mask & IFCAP_VLAN_HWTSO)
1209 			ifp->if_capenable ^= IFCAP_VLAN_HWTSO;
1210 		if (mask & IFCAP_VLAN_HWCSUM)
1211 			ifp->if_capenable ^= IFCAP_VLAN_HWCSUM;
1212 
1213 #ifdef VLAN_CAPABILITIES
1214 		VLAN_CAPABILITIES(ifp);
1215 #endif
1216 fail:
1217 		end_synchronized_op(sc, 0);
1218 		break;
1219 
1220 	case SIOCSIFMEDIA:
1221 	case SIOCGIFMEDIA:
1222 		ifmedia_ioctl(ifp, ifr, &pi->media, cmd);
1223 		break;
1224 
1225 	default:
1226 		rc = ether_ioctl(ifp, cmd, data);
1227 	}
1228 
1229 	return (rc);
1230 }
1231 
1232 static int
1233 cxgbe_transmit(struct ifnet *ifp, struct mbuf *m)
1234 {
1235 	struct port_info *pi = ifp->if_softc;
1236 	struct adapter *sc = pi->adapter;
1237 	struct sge_txq *txq = &sc->sge.txq[pi->first_txq];
1238 	struct buf_ring *br;
1239 	int rc;
1240 
1241 	M_ASSERTPKTHDR(m);
1242 
1243 	if (__predict_false(pi->link_cfg.link_ok == 0)) {
1244 		m_freem(m);
1245 		return (ENETDOWN);
1246 	}
1247 
1248 	if (m->m_flags & M_FLOWID)
1249 		txq += (m->m_pkthdr.flowid % pi->ntxq);
1250 	br = txq->br;
1251 
1252 	if (TXQ_TRYLOCK(txq) == 0) {
1253 		struct sge_eq *eq = &txq->eq;
1254 
1255 		/*
1256 		 * It is possible that t4_eth_tx finishes up and releases the
1257 		 * lock between the TRYLOCK above and the drbr_enqueue here.  We
1258 		 * need to make sure that this mbuf doesn't just sit there in
1259 		 * the drbr.
1260 		 */
1261 
1262 		rc = drbr_enqueue(ifp, br, m);
1263 		if (rc == 0 && callout_pending(&eq->tx_callout) == 0 &&
1264 		    !(eq->flags & EQ_DOOMED))
1265 			callout_reset(&eq->tx_callout, 1, t4_tx_callout, eq);
1266 		return (rc);
1267 	}
1268 
1269 	/*
1270 	 * txq->m is the mbuf that is held up due to a temporary shortage of
1271 	 * resources and it should be put on the wire first.  Then what's in
1272 	 * drbr and finally the mbuf that was just passed in to us.
1273 	 *
1274 	 * Return code should indicate the fate of the mbuf that was passed in
1275 	 * this time.
1276 	 */
1277 
1278 	TXQ_LOCK_ASSERT_OWNED(txq);
1279 	if (drbr_needs_enqueue(ifp, br) || txq->m) {
1280 
1281 		/* Queued for transmission. */
1282 
1283 		rc = drbr_enqueue(ifp, br, m);
1284 		m = txq->m ? txq->m : drbr_dequeue(ifp, br);
1285 		(void) t4_eth_tx(ifp, txq, m);
1286 		TXQ_UNLOCK(txq);
1287 		return (rc);
1288 	}
1289 
1290 	/* Direct transmission. */
1291 	rc = t4_eth_tx(ifp, txq, m);
1292 	if (rc != 0 && txq->m)
1293 		rc = 0;	/* held, will be transmitted soon (hopefully) */
1294 
1295 	TXQ_UNLOCK(txq);
1296 	return (rc);
1297 }
1298 
1299 static void
1300 cxgbe_qflush(struct ifnet *ifp)
1301 {
1302 	struct port_info *pi = ifp->if_softc;
1303 	struct sge_txq *txq;
1304 	int i;
1305 	struct mbuf *m;
1306 
1307 	/* queues do not exist if !PORT_INIT_DONE. */
1308 	if (pi->flags & PORT_INIT_DONE) {
1309 		for_each_txq(pi, i, txq) {
1310 			TXQ_LOCK(txq);
1311 			m_freem(txq->m);
1312 			txq->m = NULL;
1313 			while ((m = buf_ring_dequeue_sc(txq->br)) != NULL)
1314 				m_freem(m);
1315 			TXQ_UNLOCK(txq);
1316 		}
1317 	}
1318 	if_qflush(ifp);
1319 }
1320 
1321 static int
1322 cxgbe_media_change(struct ifnet *ifp)
1323 {
1324 	struct port_info *pi = ifp->if_softc;
1325 
1326 	device_printf(pi->dev, "%s unimplemented.\n", __func__);
1327 
1328 	return (EOPNOTSUPP);
1329 }
1330 
1331 static void
1332 cxgbe_media_status(struct ifnet *ifp, struct ifmediareq *ifmr)
1333 {
1334 	struct port_info *pi = ifp->if_softc;
1335 	struct ifmedia_entry *cur = pi->media.ifm_cur;
1336 	int speed = pi->link_cfg.speed;
1337 	int data = (pi->port_type << 8) | pi->mod_type;
1338 
1339 	if (cur->ifm_data != data) {
1340 		build_medialist(pi);
1341 		cur = pi->media.ifm_cur;
1342 	}
1343 
1344 	ifmr->ifm_status = IFM_AVALID;
1345 	if (!pi->link_cfg.link_ok)
1346 		return;
1347 
1348 	ifmr->ifm_status |= IFM_ACTIVE;
1349 
1350 	/* active and current will differ iff current media is autoselect. */
1351 	if (IFM_SUBTYPE(cur->ifm_media) != IFM_AUTO)
1352 		return;
1353 
1354 	ifmr->ifm_active = IFM_ETHER | IFM_FDX;
1355 	if (speed == SPEED_10000)
1356 		ifmr->ifm_active |= IFM_10G_T;
1357 	else if (speed == SPEED_1000)
1358 		ifmr->ifm_active |= IFM_1000_T;
1359 	else if (speed == SPEED_100)
1360 		ifmr->ifm_active |= IFM_100_TX;
1361 	else if (speed == SPEED_10)
1362 		ifmr->ifm_active |= IFM_10_T;
1363 	else
1364 		KASSERT(0, ("%s: link up but speed unknown (%u)", __func__,
1365 			    speed));
1366 }
1367 
1368 void
1369 t4_fatal_err(struct adapter *sc)
1370 {
1371 	t4_set_reg_field(sc, A_SGE_CONTROL, F_GLOBALENABLE, 0);
1372 	t4_intr_disable(sc);
1373 	log(LOG_EMERG, "%s: encountered fatal error, adapter stopped.\n",
1374 	    device_get_nameunit(sc->dev));
1375 }
1376 
1377 static int
1378 map_bars_0_and_4(struct adapter *sc)
1379 {
1380 	sc->regs_rid = PCIR_BAR(0);
1381 	sc->regs_res = bus_alloc_resource_any(sc->dev, SYS_RES_MEMORY,
1382 	    &sc->regs_rid, RF_ACTIVE);
1383 	if (sc->regs_res == NULL) {
1384 		device_printf(sc->dev, "cannot map registers.\n");
1385 		return (ENXIO);
1386 	}
1387 	sc->bt = rman_get_bustag(sc->regs_res);
1388 	sc->bh = rman_get_bushandle(sc->regs_res);
1389 	sc->mmio_len = rman_get_size(sc->regs_res);
1390 	setbit(&sc->doorbells, DOORBELL_KDB);
1391 
1392 	sc->msix_rid = PCIR_BAR(4);
1393 	sc->msix_res = bus_alloc_resource_any(sc->dev, SYS_RES_MEMORY,
1394 	    &sc->msix_rid, RF_ACTIVE);
1395 	if (sc->msix_res == NULL) {
1396 		device_printf(sc->dev, "cannot map MSI-X BAR.\n");
1397 		return (ENXIO);
1398 	}
1399 
1400 	return (0);
1401 }
1402 
1403 static int
1404 map_bar_2(struct adapter *sc)
1405 {
1406 
1407 	/*
1408 	 * T4: only iWARP driver uses the userspace doorbells.  There is no need
1409 	 * to map it if RDMA is disabled.
1410 	 */
1411 	if (is_t4(sc) && sc->rdmacaps == 0)
1412 		return (0);
1413 
1414 	sc->udbs_rid = PCIR_BAR(2);
1415 	sc->udbs_res = bus_alloc_resource_any(sc->dev, SYS_RES_MEMORY,
1416 	    &sc->udbs_rid, RF_ACTIVE);
1417 	if (sc->udbs_res == NULL) {
1418 		device_printf(sc->dev, "cannot map doorbell BAR.\n");
1419 		return (ENXIO);
1420 	}
1421 	sc->udbs_base = rman_get_virtual(sc->udbs_res);
1422 
1423 	if (is_t5(sc)) {
1424 		setbit(&sc->doorbells, DOORBELL_UDB);
1425 #if defined(__i386__) || defined(__amd64__)
1426 		if (t5_write_combine) {
1427 			int rc;
1428 
1429 			/*
1430 			 * Enable write combining on BAR2.  This is the
1431 			 * userspace doorbell BAR and is split into 128B
1432 			 * (UDBS_SEG_SIZE) doorbell regions, each associated
1433 			 * with an egress queue.  The first 64B has the doorbell
1434 			 * and the second 64B can be used to submit a tx work
1435 			 * request with an implicit doorbell.
1436 			 */
1437 
1438 			rc = pmap_change_attr((vm_offset_t)sc->udbs_base,
1439 			    rman_get_size(sc->udbs_res), PAT_WRITE_COMBINING);
1440 			if (rc == 0) {
1441 				clrbit(&sc->doorbells, DOORBELL_UDB);
1442 				setbit(&sc->doorbells, DOORBELL_WCWR);
1443 				setbit(&sc->doorbells, DOORBELL_UDBWC);
1444 			} else {
1445 				device_printf(sc->dev,
1446 				    "couldn't enable write combining: %d\n",
1447 				    rc);
1448 			}
1449 
1450 			t4_write_reg(sc, A_SGE_STAT_CFG,
1451 			    V_STATSOURCE_T5(7) | V_STATMODE(0));
1452 		}
1453 #endif
1454 	}
1455 
1456 	return (0);
1457 }
1458 
1459 static const struct memwin t4_memwin[] = {
1460 	{ MEMWIN0_BASE, MEMWIN0_APERTURE },
1461 	{ MEMWIN1_BASE, MEMWIN1_APERTURE },
1462 	{ MEMWIN2_BASE_T4, MEMWIN2_APERTURE_T4 }
1463 };
1464 
1465 static const struct memwin t5_memwin[] = {
1466 	{ MEMWIN0_BASE, MEMWIN0_APERTURE },
1467 	{ MEMWIN1_BASE, MEMWIN1_APERTURE },
1468 	{ MEMWIN2_BASE_T5, MEMWIN2_APERTURE_T5 },
1469 };
1470 
1471 static void
1472 setup_memwin(struct adapter *sc)
1473 {
1474 	const struct memwin *mw;
1475 	int i, n;
1476 	uint32_t bar0;
1477 
1478 	if (is_t4(sc)) {
1479 		/*
1480 		 * Read low 32b of bar0 indirectly via the hardware backdoor
1481 		 * mechanism.  Works from within PCI passthrough environments
1482 		 * too, where rman_get_start() can return a different value.  We
1483 		 * need to program the T4 memory window decoders with the actual
1484 		 * addresses that will be coming across the PCIe link.
1485 		 */
1486 		bar0 = t4_hw_pci_read_cfg4(sc, PCIR_BAR(0));
1487 		bar0 &= (uint32_t) PCIM_BAR_MEM_BASE;
1488 
1489 		mw = &t4_memwin[0];
1490 		n = nitems(t4_memwin);
1491 	} else {
1492 		/* T5 uses the relative offset inside the PCIe BAR */
1493 		bar0 = 0;
1494 
1495 		mw = &t5_memwin[0];
1496 		n = nitems(t5_memwin);
1497 	}
1498 
1499 	for (i = 0; i < n; i++, mw++) {
1500 		t4_write_reg(sc,
1501 		    PCIE_MEM_ACCESS_REG(A_PCIE_MEM_ACCESS_BASE_WIN, i),
1502 		    (mw->base + bar0) | V_BIR(0) |
1503 		    V_WINDOW(ilog2(mw->aperture) - 10));
1504 	}
1505 
1506 	/* flush */
1507 	t4_read_reg(sc, PCIE_MEM_ACCESS_REG(A_PCIE_MEM_ACCESS_BASE_WIN, 2));
1508 }
1509 
1510 /*
1511  * Verify that the memory range specified by the addr/len pair is valid and lies
1512  * entirely within a single region (EDCx or MCx).
1513  */
1514 static int
1515 validate_mem_range(struct adapter *sc, uint32_t addr, int len)
1516 {
1517 	uint32_t em, addr_len, maddr, mlen;
1518 
1519 	/* Memory can only be accessed in naturally aligned 4 byte units */
1520 	if (addr & 3 || len & 3 || len == 0)
1521 		return (EINVAL);
1522 
1523 	/* Enabled memories */
1524 	em = t4_read_reg(sc, A_MA_TARGET_MEM_ENABLE);
1525 	if (em & F_EDRAM0_ENABLE) {
1526 		addr_len = t4_read_reg(sc, A_MA_EDRAM0_BAR);
1527 		maddr = G_EDRAM0_BASE(addr_len) << 20;
1528 		mlen = G_EDRAM0_SIZE(addr_len) << 20;
1529 		if (mlen > 0 && addr >= maddr && addr < maddr + mlen &&
1530 		    addr + len <= maddr + mlen)
1531 			return (0);
1532 	}
1533 	if (em & F_EDRAM1_ENABLE) {
1534 		addr_len = t4_read_reg(sc, A_MA_EDRAM1_BAR);
1535 		maddr = G_EDRAM1_BASE(addr_len) << 20;
1536 		mlen = G_EDRAM1_SIZE(addr_len) << 20;
1537 		if (mlen > 0 && addr >= maddr && addr < maddr + mlen &&
1538 		    addr + len <= maddr + mlen)
1539 			return (0);
1540 	}
1541 	if (em & F_EXT_MEM_ENABLE) {
1542 		addr_len = t4_read_reg(sc, A_MA_EXT_MEMORY_BAR);
1543 		maddr = G_EXT_MEM_BASE(addr_len) << 20;
1544 		mlen = G_EXT_MEM_SIZE(addr_len) << 20;
1545 		if (mlen > 0 && addr >= maddr && addr < maddr + mlen &&
1546 		    addr + len <= maddr + mlen)
1547 			return (0);
1548 	}
1549 	if (!is_t4(sc) && em & F_EXT_MEM1_ENABLE) {
1550 		addr_len = t4_read_reg(sc, A_MA_EXT_MEMORY1_BAR);
1551 		maddr = G_EXT_MEM1_BASE(addr_len) << 20;
1552 		mlen = G_EXT_MEM1_SIZE(addr_len) << 20;
1553 		if (mlen > 0 && addr >= maddr && addr < maddr + mlen &&
1554 		    addr + len <= maddr + mlen)
1555 			return (0);
1556 	}
1557 
1558 	return (EFAULT);
1559 }
1560 
1561 /*
1562  * Verify that the memory range specified by the memtype/offset/len pair is
1563  * valid and lies entirely within the memtype specified.  The global address of
1564  * the start of the range is returned in addr.
1565  */
1566 static int
1567 validate_mt_off_len(struct adapter *sc, int mtype, uint32_t off, int len,
1568     uint32_t *addr)
1569 {
1570 	uint32_t em, addr_len, maddr, mlen;
1571 
1572 	/* Memory can only be accessed in naturally aligned 4 byte units */
1573 	if (off & 3 || len & 3 || len == 0)
1574 		return (EINVAL);
1575 
1576 	em = t4_read_reg(sc, A_MA_TARGET_MEM_ENABLE);
1577 	switch (mtype) {
1578 	case MEM_EDC0:
1579 		if (!(em & F_EDRAM0_ENABLE))
1580 			return (EINVAL);
1581 		addr_len = t4_read_reg(sc, A_MA_EDRAM0_BAR);
1582 		maddr = G_EDRAM0_BASE(addr_len) << 20;
1583 		mlen = G_EDRAM0_SIZE(addr_len) << 20;
1584 		break;
1585 	case MEM_EDC1:
1586 		if (!(em & F_EDRAM1_ENABLE))
1587 			return (EINVAL);
1588 		addr_len = t4_read_reg(sc, A_MA_EDRAM1_BAR);
1589 		maddr = G_EDRAM1_BASE(addr_len) << 20;
1590 		mlen = G_EDRAM1_SIZE(addr_len) << 20;
1591 		break;
1592 	case MEM_MC:
1593 		if (!(em & F_EXT_MEM_ENABLE))
1594 			return (EINVAL);
1595 		addr_len = t4_read_reg(sc, A_MA_EXT_MEMORY_BAR);
1596 		maddr = G_EXT_MEM_BASE(addr_len) << 20;
1597 		mlen = G_EXT_MEM_SIZE(addr_len) << 20;
1598 		break;
1599 	case MEM_MC1:
1600 		if (is_t4(sc) || !(em & F_EXT_MEM1_ENABLE))
1601 			return (EINVAL);
1602 		addr_len = t4_read_reg(sc, A_MA_EXT_MEMORY1_BAR);
1603 		maddr = G_EXT_MEM1_BASE(addr_len) << 20;
1604 		mlen = G_EXT_MEM1_SIZE(addr_len) << 20;
1605 		break;
1606 	default:
1607 		return (EINVAL);
1608 	}
1609 
1610 	if (mlen > 0 && off < mlen && off + len <= mlen) {
1611 		*addr = maddr + off;	/* global address */
1612 		return (0);
1613 	}
1614 
1615 	return (EFAULT);
1616 }
1617 
1618 static void
1619 memwin_info(struct adapter *sc, int win, uint32_t *base, uint32_t *aperture)
1620 {
1621 	const struct memwin *mw;
1622 
1623 	if (is_t4(sc)) {
1624 		KASSERT(win >= 0 && win < nitems(t4_memwin),
1625 		    ("%s: incorrect memwin# (%d)", __func__, win));
1626 		mw = &t4_memwin[win];
1627 	} else {
1628 		KASSERT(win >= 0 && win < nitems(t5_memwin),
1629 		    ("%s: incorrect memwin# (%d)", __func__, win));
1630 		mw = &t5_memwin[win];
1631 	}
1632 
1633 	if (base != NULL)
1634 		*base = mw->base;
1635 	if (aperture != NULL)
1636 		*aperture = mw->aperture;
1637 }
1638 
1639 /*
1640  * Positions the memory window such that it can be used to access the specified
1641  * address in the chip's address space.  The return value is the offset of addr
1642  * from the start of the window.
1643  */
1644 static uint32_t
1645 position_memwin(struct adapter *sc, int n, uint32_t addr)
1646 {
1647 	uint32_t start, pf;
1648 	uint32_t reg;
1649 
1650 	KASSERT(n >= 0 && n <= 3,
1651 	    ("%s: invalid window %d.", __func__, n));
1652 	KASSERT((addr & 3) == 0,
1653 	    ("%s: addr (0x%x) is not at a 4B boundary.", __func__, addr));
1654 
1655 	if (is_t4(sc)) {
1656 		pf = 0;
1657 		start = addr & ~0xf;	/* start must be 16B aligned */
1658 	} else {
1659 		pf = V_PFNUM(sc->pf);
1660 		start = addr & ~0x7f;	/* start must be 128B aligned */
1661 	}
1662 	reg = PCIE_MEM_ACCESS_REG(A_PCIE_MEM_ACCESS_OFFSET, n);
1663 
1664 	t4_write_reg(sc, reg, start | pf);
1665 	t4_read_reg(sc, reg);
1666 
1667 	return (addr - start);
1668 }
1669 
1670 static int
1671 cfg_itype_and_nqueues(struct adapter *sc, int n10g, int n1g,
1672     struct intrs_and_queues *iaq)
1673 {
1674 	int rc, itype, navail, nrxq10g, nrxq1g, n;
1675 	int nofldrxq10g = 0, nofldrxq1g = 0;
1676 
1677 	bzero(iaq, sizeof(*iaq));
1678 
1679 	iaq->ntxq10g = t4_ntxq10g;
1680 	iaq->ntxq1g = t4_ntxq1g;
1681 	iaq->nrxq10g = nrxq10g = t4_nrxq10g;
1682 	iaq->nrxq1g = nrxq1g = t4_nrxq1g;
1683 #ifdef TCP_OFFLOAD
1684 	if (is_offload(sc)) {
1685 		iaq->nofldtxq10g = t4_nofldtxq10g;
1686 		iaq->nofldtxq1g = t4_nofldtxq1g;
1687 		iaq->nofldrxq10g = nofldrxq10g = t4_nofldrxq10g;
1688 		iaq->nofldrxq1g = nofldrxq1g = t4_nofldrxq1g;
1689 	}
1690 #endif
1691 
1692 	for (itype = INTR_MSIX; itype; itype >>= 1) {
1693 
1694 		if ((itype & t4_intr_types) == 0)
1695 			continue;	/* not allowed */
1696 
1697 		if (itype == INTR_MSIX)
1698 			navail = pci_msix_count(sc->dev);
1699 		else if (itype == INTR_MSI)
1700 			navail = pci_msi_count(sc->dev);
1701 		else
1702 			navail = 1;
1703 restart:
1704 		if (navail == 0)
1705 			continue;
1706 
1707 		iaq->intr_type = itype;
1708 		iaq->intr_flags = 0;
1709 
1710 		/*
1711 		 * Best option: an interrupt vector for errors, one for the
1712 		 * firmware event queue, and one each for each rxq (NIC as well
1713 		 * as offload).
1714 		 */
1715 		iaq->nirq = T4_EXTRA_INTR;
1716 		iaq->nirq += n10g * (nrxq10g + nofldrxq10g);
1717 		iaq->nirq += n1g * (nrxq1g + nofldrxq1g);
1718 		if (iaq->nirq <= navail &&
1719 		    (itype != INTR_MSI || powerof2(iaq->nirq))) {
1720 			iaq->intr_flags |= INTR_DIRECT;
1721 			goto allocate;
1722 		}
1723 
1724 		/*
1725 		 * Second best option: an interrupt vector for errors, one for
1726 		 * the firmware event queue, and one each for either NIC or
1727 		 * offload rxq's.
1728 		 */
1729 		iaq->nirq = T4_EXTRA_INTR;
1730 		iaq->nirq += n10g * max(nrxq10g, nofldrxq10g);
1731 		iaq->nirq += n1g * max(nrxq1g, nofldrxq1g);
1732 		if (iaq->nirq <= navail &&
1733 		    (itype != INTR_MSI || powerof2(iaq->nirq)))
1734 			goto allocate;
1735 
1736 		/*
1737 		 * Next best option: an interrupt vector for errors, one for the
1738 		 * firmware event queue, and at least one per port.  At this
1739 		 * point we know we'll have to downsize nrxq or nofldrxq to fit
1740 		 * what's available to us.
1741 		 */
1742 		iaq->nirq = T4_EXTRA_INTR;
1743 		iaq->nirq += n10g + n1g;
1744 		if (iaq->nirq <= navail) {
1745 			int leftover = navail - iaq->nirq;
1746 
1747 			if (n10g > 0) {
1748 				int target = max(nrxq10g, nofldrxq10g);
1749 
1750 				n = 1;
1751 				while (n < target && leftover >= n10g) {
1752 					leftover -= n10g;
1753 					iaq->nirq += n10g;
1754 					n++;
1755 				}
1756 				iaq->nrxq10g = min(n, nrxq10g);
1757 #ifdef TCP_OFFLOAD
1758 				if (is_offload(sc))
1759 					iaq->nofldrxq10g = min(n, nofldrxq10g);
1760 #endif
1761 			}
1762 
1763 			if (n1g > 0) {
1764 				int target = max(nrxq1g, nofldrxq1g);
1765 
1766 				n = 1;
1767 				while (n < target && leftover >= n1g) {
1768 					leftover -= n1g;
1769 					iaq->nirq += n1g;
1770 					n++;
1771 				}
1772 				iaq->nrxq1g = min(n, nrxq1g);
1773 #ifdef TCP_OFFLOAD
1774 				if (is_offload(sc))
1775 					iaq->nofldrxq1g = min(n, nofldrxq1g);
1776 #endif
1777 			}
1778 
1779 			if (itype != INTR_MSI || powerof2(iaq->nirq))
1780 				goto allocate;
1781 		}
1782 
1783 		/*
1784 		 * Least desirable option: one interrupt vector for everything.
1785 		 */
1786 		iaq->nirq = iaq->nrxq10g = iaq->nrxq1g = 1;
1787 #ifdef TCP_OFFLOAD
1788 		if (is_offload(sc))
1789 			iaq->nofldrxq10g = iaq->nofldrxq1g = 1;
1790 #endif
1791 
1792 allocate:
1793 		navail = iaq->nirq;
1794 		rc = 0;
1795 		if (itype == INTR_MSIX)
1796 			rc = pci_alloc_msix(sc->dev, &navail);
1797 		else if (itype == INTR_MSI)
1798 			rc = pci_alloc_msi(sc->dev, &navail);
1799 
1800 		if (rc == 0) {
1801 			if (navail == iaq->nirq)
1802 				return (0);
1803 
1804 			/*
1805 			 * Didn't get the number requested.  Use whatever number
1806 			 * the kernel is willing to allocate (it's in navail).
1807 			 */
1808 			device_printf(sc->dev, "fewer vectors than requested, "
1809 			    "type=%d, req=%d, rcvd=%d; will downshift req.\n",
1810 			    itype, iaq->nirq, navail);
1811 			pci_release_msi(sc->dev);
1812 			goto restart;
1813 		}
1814 
1815 		device_printf(sc->dev,
1816 		    "failed to allocate vectors:%d, type=%d, req=%d, rcvd=%d\n",
1817 		    itype, rc, iaq->nirq, navail);
1818 	}
1819 
1820 	device_printf(sc->dev,
1821 	    "failed to find a usable interrupt type.  "
1822 	    "allowed=%d, msi-x=%d, msi=%d, intx=1", t4_intr_types,
1823 	    pci_msix_count(sc->dev), pci_msi_count(sc->dev));
1824 
1825 	return (ENXIO);
1826 }
1827 
1828 #define FW_VERSION(chip) ( \
1829     V_FW_HDR_FW_VER_MAJOR(chip##FW_VERSION_MAJOR) | \
1830     V_FW_HDR_FW_VER_MINOR(chip##FW_VERSION_MINOR) | \
1831     V_FW_HDR_FW_VER_MICRO(chip##FW_VERSION_MICRO) | \
1832     V_FW_HDR_FW_VER_BUILD(chip##FW_VERSION_BUILD))
1833 #define FW_INTFVER(chip, intf) (chip##FW_HDR_INTFVER_##intf)
1834 
1835 struct fw_info {
1836 	uint8_t chip;
1837 	char *kld_name;
1838 	char *fw_mod_name;
1839 	struct fw_hdr fw_hdr;	/* XXX: waste of space, need a sparse struct */
1840 } fw_info[] = {
1841 	{
1842 		.chip = CHELSIO_T4,
1843 		.kld_name = "t4fw_cfg",
1844 		.fw_mod_name = "t4fw",
1845 		.fw_hdr = {
1846 			.chip = FW_HDR_CHIP_T4,
1847 			.fw_ver = htobe32_const(FW_VERSION(T4)),
1848 			.intfver_nic = FW_INTFVER(T4, NIC),
1849 			.intfver_vnic = FW_INTFVER(T4, VNIC),
1850 			.intfver_ofld = FW_INTFVER(T4, OFLD),
1851 			.intfver_ri = FW_INTFVER(T4, RI),
1852 			.intfver_iscsipdu = FW_INTFVER(T4, ISCSIPDU),
1853 			.intfver_iscsi = FW_INTFVER(T4, ISCSI),
1854 			.intfver_fcoepdu = FW_INTFVER(T4, FCOEPDU),
1855 			.intfver_fcoe = FW_INTFVER(T4, FCOE),
1856 		},
1857 	}, {
1858 		.chip = CHELSIO_T5,
1859 		.kld_name = "t5fw_cfg",
1860 		.fw_mod_name = "t5fw",
1861 		.fw_hdr = {
1862 			.chip = FW_HDR_CHIP_T5,
1863 			.fw_ver = htobe32_const(FW_VERSION(T5)),
1864 			.intfver_nic = FW_INTFVER(T5, NIC),
1865 			.intfver_vnic = FW_INTFVER(T5, VNIC),
1866 			.intfver_ofld = FW_INTFVER(T5, OFLD),
1867 			.intfver_ri = FW_INTFVER(T5, RI),
1868 			.intfver_iscsipdu = FW_INTFVER(T5, ISCSIPDU),
1869 			.intfver_iscsi = FW_INTFVER(T5, ISCSI),
1870 			.intfver_fcoepdu = FW_INTFVER(T5, FCOEPDU),
1871 			.intfver_fcoe = FW_INTFVER(T5, FCOE),
1872 		},
1873 	}
1874 };
1875 
1876 static struct fw_info *
1877 find_fw_info(int chip)
1878 {
1879 	int i;
1880 
1881 	for (i = 0; i < nitems(fw_info); i++) {
1882 		if (fw_info[i].chip == chip)
1883 			return (&fw_info[i]);
1884 	}
1885 	return (NULL);
1886 }
1887 
1888 /*
1889  * Is the given firmware API compatible with the one the driver was compiled
1890  * with?
1891  */
1892 static int
1893 fw_compatible(const struct fw_hdr *hdr1, const struct fw_hdr *hdr2)
1894 {
1895 
1896 	/* short circuit if it's the exact same firmware version */
1897 	if (hdr1->chip == hdr2->chip && hdr1->fw_ver == hdr2->fw_ver)
1898 		return (1);
1899 
1900 	/*
1901 	 * XXX: Is this too conservative?  Perhaps I should limit this to the
1902 	 * features that are supported in the driver.
1903 	 */
1904 #define SAME_INTF(x) (hdr1->intfver_##x == hdr2->intfver_##x)
1905 	if (hdr1->chip == hdr2->chip && SAME_INTF(nic) && SAME_INTF(vnic) &&
1906 	    SAME_INTF(ofld) && SAME_INTF(ri) && SAME_INTF(iscsipdu) &&
1907 	    SAME_INTF(iscsi) && SAME_INTF(fcoepdu) && SAME_INTF(fcoe))
1908 		return (1);
1909 #undef SAME_INTF
1910 
1911 	return (0);
1912 }
1913 
1914 /*
1915  * The firmware in the KLD is usable, but should it be installed?  This routine
1916  * explains itself in detail if it indicates the KLD firmware should be
1917  * installed.
1918  */
1919 static int
1920 should_install_kld_fw(struct adapter *sc, int card_fw_usable, int k, int c)
1921 {
1922 	const char *reason;
1923 
1924 	if (!card_fw_usable) {
1925 		reason = "incompatible or unusable";
1926 		goto install;
1927 	}
1928 
1929 	if (k > c) {
1930 		reason = "older than the version bundled with this driver";
1931 		goto install;
1932 	}
1933 
1934 	if (t4_fw_install == 2 && k != c) {
1935 		reason = "different than the version bundled with this driver";
1936 		goto install;
1937 	}
1938 
1939 	return (0);
1940 
1941 install:
1942 	if (t4_fw_install == 0) {
1943 		device_printf(sc->dev, "firmware on card (%u.%u.%u.%u) is %s, "
1944 		    "but the driver is prohibited from installing a different "
1945 		    "firmware on the card.\n",
1946 		    G_FW_HDR_FW_VER_MAJOR(c), G_FW_HDR_FW_VER_MINOR(c),
1947 		    G_FW_HDR_FW_VER_MICRO(c), G_FW_HDR_FW_VER_BUILD(c), reason);
1948 
1949 		return (0);
1950 	}
1951 
1952 	device_printf(sc->dev, "firmware on card (%u.%u.%u.%u) is %s, "
1953 	    "installing firmware %u.%u.%u.%u on card.\n",
1954 	    G_FW_HDR_FW_VER_MAJOR(c), G_FW_HDR_FW_VER_MINOR(c),
1955 	    G_FW_HDR_FW_VER_MICRO(c), G_FW_HDR_FW_VER_BUILD(c), reason,
1956 	    G_FW_HDR_FW_VER_MAJOR(k), G_FW_HDR_FW_VER_MINOR(k),
1957 	    G_FW_HDR_FW_VER_MICRO(k), G_FW_HDR_FW_VER_BUILD(k));
1958 
1959 	return (1);
1960 }
1961 /*
1962  * Establish contact with the firmware and determine if we are the master driver
1963  * or not, and whether we are responsible for chip initialization.
1964  */
1965 static int
1966 prep_firmware(struct adapter *sc)
1967 {
1968 	const struct firmware *fw = NULL, *default_cfg;
1969 	int rc, pf, card_fw_usable, kld_fw_usable, need_fw_reset = 1;
1970 	enum dev_state state;
1971 	struct fw_info *fw_info;
1972 	struct fw_hdr *card_fw;		/* fw on the card */
1973 	const struct fw_hdr *kld_fw;	/* fw in the KLD */
1974 	const struct fw_hdr *drv_fw;	/* fw header the driver was compiled
1975 					   against */
1976 
1977 	/* Contact firmware. */
1978 	rc = t4_fw_hello(sc, sc->mbox, sc->mbox, MASTER_MAY, &state);
1979 	if (rc < 0 || state == DEV_STATE_ERR) {
1980 		rc = -rc;
1981 		device_printf(sc->dev,
1982 		    "failed to connect to the firmware: %d, %d.\n", rc, state);
1983 		return (rc);
1984 	}
1985 	pf = rc;
1986 	if (pf == sc->mbox)
1987 		sc->flags |= MASTER_PF;
1988 	else if (state == DEV_STATE_UNINIT) {
1989 		/*
1990 		 * We didn't get to be the master so we definitely won't be
1991 		 * configuring the chip.  It's a bug if someone else hasn't
1992 		 * configured it already.
1993 		 */
1994 		device_printf(sc->dev, "couldn't be master(%d), "
1995 		    "device not already initialized either(%d).\n", rc, state);
1996 		return (EDOOFUS);
1997 	}
1998 
1999 	/* This is the firmware whose headers the driver was compiled against */
2000 	fw_info = find_fw_info(chip_id(sc));
2001 	if (fw_info == NULL) {
2002 		device_printf(sc->dev,
2003 		    "unable to look up firmware information for chip %d.\n",
2004 		    chip_id(sc));
2005 		return (EINVAL);
2006 	}
2007 	drv_fw = &fw_info->fw_hdr;
2008 
2009 	/*
2010 	 * The firmware KLD contains many modules.  The KLD name is also the
2011 	 * name of the module that contains the default config file.
2012 	 */
2013 	default_cfg = firmware_get(fw_info->kld_name);
2014 
2015 	/* Read the header of the firmware on the card */
2016 	card_fw = malloc(sizeof(*card_fw), M_CXGBE, M_ZERO | M_WAITOK);
2017 	rc = -t4_read_flash(sc, FLASH_FW_START,
2018 	    sizeof (*card_fw) / sizeof (uint32_t), (uint32_t *)card_fw, 1);
2019 	if (rc == 0)
2020 		card_fw_usable = fw_compatible(drv_fw, (const void*)card_fw);
2021 	else {
2022 		device_printf(sc->dev,
2023 		    "Unable to read card's firmware header: %d\n", rc);
2024 		card_fw_usable = 0;
2025 	}
2026 
2027 	/* This is the firmware in the KLD */
2028 	fw = firmware_get(fw_info->fw_mod_name);
2029 	if (fw != NULL) {
2030 		kld_fw = (const void *)fw->data;
2031 		kld_fw_usable = fw_compatible(drv_fw, kld_fw);
2032 	} else {
2033 		kld_fw = NULL;
2034 		kld_fw_usable = 0;
2035 	}
2036 
2037 	if (card_fw_usable && card_fw->fw_ver == drv_fw->fw_ver &&
2038 	    (!kld_fw_usable || kld_fw->fw_ver == drv_fw->fw_ver)) {
2039 		/*
2040 		 * Common case: the firmware on the card is an exact match and
2041 		 * the KLD is an exact match too, or the KLD is
2042 		 * absent/incompatible.  Note that t4_fw_install = 2 is ignored
2043 		 * here -- use cxgbetool loadfw if you want to reinstall the
2044 		 * same firmware as the one on the card.
2045 		 */
2046 	} else if (kld_fw_usable && state == DEV_STATE_UNINIT &&
2047 	    should_install_kld_fw(sc, card_fw_usable, be32toh(kld_fw->fw_ver),
2048 	    be32toh(card_fw->fw_ver))) {
2049 
2050 		rc = -t4_fw_upgrade(sc, sc->mbox, fw->data, fw->datasize, 0);
2051 		if (rc != 0) {
2052 			device_printf(sc->dev,
2053 			    "failed to install firmware: %d\n", rc);
2054 			goto done;
2055 		}
2056 
2057 		/* Installed successfully, update the cached header too. */
2058 		memcpy(card_fw, kld_fw, sizeof(*card_fw));
2059 		card_fw_usable = 1;
2060 		need_fw_reset = 0;	/* already reset as part of load_fw */
2061 	}
2062 
2063 	if (!card_fw_usable) {
2064 		uint32_t d, c, k;
2065 
2066 		d = ntohl(drv_fw->fw_ver);
2067 		c = ntohl(card_fw->fw_ver);
2068 		k = kld_fw ? ntohl(kld_fw->fw_ver) : 0;
2069 
2070 		device_printf(sc->dev, "Cannot find a usable firmware: "
2071 		    "fw_install %d, chip state %d, "
2072 		    "driver compiled with %d.%d.%d.%d, "
2073 		    "card has %d.%d.%d.%d, KLD has %d.%d.%d.%d\n",
2074 		    t4_fw_install, state,
2075 		    G_FW_HDR_FW_VER_MAJOR(d), G_FW_HDR_FW_VER_MINOR(d),
2076 		    G_FW_HDR_FW_VER_MICRO(d), G_FW_HDR_FW_VER_BUILD(d),
2077 		    G_FW_HDR_FW_VER_MAJOR(c), G_FW_HDR_FW_VER_MINOR(c),
2078 		    G_FW_HDR_FW_VER_MICRO(c), G_FW_HDR_FW_VER_BUILD(c),
2079 		    G_FW_HDR_FW_VER_MAJOR(k), G_FW_HDR_FW_VER_MINOR(k),
2080 		    G_FW_HDR_FW_VER_MICRO(k), G_FW_HDR_FW_VER_BUILD(k));
2081 		rc = EINVAL;
2082 		goto done;
2083 	}
2084 
2085 	/* We're using whatever's on the card and it's known to be good. */
2086 	sc->params.fw_vers = ntohl(card_fw->fw_ver);
2087 	snprintf(sc->fw_version, sizeof(sc->fw_version), "%u.%u.%u.%u",
2088 	    G_FW_HDR_FW_VER_MAJOR(sc->params.fw_vers),
2089 	    G_FW_HDR_FW_VER_MINOR(sc->params.fw_vers),
2090 	    G_FW_HDR_FW_VER_MICRO(sc->params.fw_vers),
2091 	    G_FW_HDR_FW_VER_BUILD(sc->params.fw_vers));
2092 	t4_get_tp_version(sc, &sc->params.tp_vers);
2093 
2094 	/* Reset device */
2095 	if (need_fw_reset &&
2096 	    (rc = -t4_fw_reset(sc, sc->mbox, F_PIORSTMODE | F_PIORST)) != 0) {
2097 		device_printf(sc->dev, "firmware reset failed: %d.\n", rc);
2098 		if (rc != ETIMEDOUT && rc != EIO)
2099 			t4_fw_bye(sc, sc->mbox);
2100 		goto done;
2101 	}
2102 	sc->flags |= FW_OK;
2103 
2104 	rc = get_params__pre_init(sc);
2105 	if (rc != 0)
2106 		goto done; /* error message displayed already */
2107 
2108 	/* Partition adapter resources as specified in the config file. */
2109 	if (state == DEV_STATE_UNINIT) {
2110 
2111 		KASSERT(sc->flags & MASTER_PF,
2112 		    ("%s: trying to change chip settings when not master.",
2113 		    __func__));
2114 
2115 		rc = partition_resources(sc, default_cfg, fw_info->kld_name);
2116 		if (rc != 0)
2117 			goto done;	/* error message displayed already */
2118 
2119 		t4_tweak_chip_settings(sc);
2120 
2121 		/* get basic stuff going */
2122 		rc = -t4_fw_initialize(sc, sc->mbox);
2123 		if (rc != 0) {
2124 			device_printf(sc->dev, "fw init failed: %d.\n", rc);
2125 			goto done;
2126 		}
2127 	} else {
2128 		snprintf(sc->cfg_file, sizeof(sc->cfg_file), "pf%d", pf);
2129 		sc->cfcsum = 0;
2130 	}
2131 
2132 done:
2133 	free(card_fw, M_CXGBE);
2134 	if (fw != NULL)
2135 		firmware_put(fw, FIRMWARE_UNLOAD);
2136 	if (default_cfg != NULL)
2137 		firmware_put(default_cfg, FIRMWARE_UNLOAD);
2138 
2139 	return (rc);
2140 }
2141 
2142 #define FW_PARAM_DEV(param) \
2143 	(V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) | \
2144 	 V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_##param))
2145 #define FW_PARAM_PFVF(param) \
2146 	(V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_PFVF) | \
2147 	 V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_PFVF_##param))
2148 
2149 /*
2150  * Partition chip resources for use between various PFs, VFs, etc.
2151  */
2152 static int
2153 partition_resources(struct adapter *sc, const struct firmware *default_cfg,
2154     const char *name_prefix)
2155 {
2156 	const struct firmware *cfg = NULL;
2157 	int rc = 0;
2158 	struct fw_caps_config_cmd caps;
2159 	uint32_t mtype, moff, finicsum, cfcsum;
2160 
2161 	/*
2162 	 * Figure out what configuration file to use.  Pick the default config
2163 	 * file for the card if the user hasn't specified one explicitly.
2164 	 */
2165 	snprintf(sc->cfg_file, sizeof(sc->cfg_file), "%s", t4_cfg_file);
2166 	if (strncmp(t4_cfg_file, DEFAULT_CF, sizeof(t4_cfg_file)) == 0) {
2167 		/* Card specific overrides go here. */
2168 		if (pci_get_device(sc->dev) == 0x440a)
2169 			snprintf(sc->cfg_file, sizeof(sc->cfg_file), UWIRE_CF);
2170 		if (is_fpga(sc))
2171 			snprintf(sc->cfg_file, sizeof(sc->cfg_file), FPGA_CF);
2172 	}
2173 
2174 	/*
2175 	 * We need to load another module if the profile is anything except
2176 	 * "default" or "flash".
2177 	 */
2178 	if (strncmp(sc->cfg_file, DEFAULT_CF, sizeof(sc->cfg_file)) != 0 &&
2179 	    strncmp(sc->cfg_file, FLASH_CF, sizeof(sc->cfg_file)) != 0) {
2180 		char s[32];
2181 
2182 		snprintf(s, sizeof(s), "%s_%s", name_prefix, sc->cfg_file);
2183 		cfg = firmware_get(s);
2184 		if (cfg == NULL) {
2185 			if (default_cfg != NULL) {
2186 				device_printf(sc->dev,
2187 				    "unable to load module \"%s\" for "
2188 				    "configuration profile \"%s\", will use "
2189 				    "the default config file instead.\n",
2190 				    s, sc->cfg_file);
2191 				snprintf(sc->cfg_file, sizeof(sc->cfg_file),
2192 				    "%s", DEFAULT_CF);
2193 			} else {
2194 				device_printf(sc->dev,
2195 				    "unable to load module \"%s\" for "
2196 				    "configuration profile \"%s\", will use "
2197 				    "the config file on the card's flash "
2198 				    "instead.\n", s, sc->cfg_file);
2199 				snprintf(sc->cfg_file, sizeof(sc->cfg_file),
2200 				    "%s", FLASH_CF);
2201 			}
2202 		}
2203 	}
2204 
2205 	if (strncmp(sc->cfg_file, DEFAULT_CF, sizeof(sc->cfg_file)) == 0 &&
2206 	    default_cfg == NULL) {
2207 		device_printf(sc->dev,
2208 		    "default config file not available, will use the config "
2209 		    "file on the card's flash instead.\n");
2210 		snprintf(sc->cfg_file, sizeof(sc->cfg_file), "%s", FLASH_CF);
2211 	}
2212 
2213 	if (strncmp(sc->cfg_file, FLASH_CF, sizeof(sc->cfg_file)) != 0) {
2214 		u_int cflen, i, n;
2215 		const uint32_t *cfdata;
2216 		uint32_t param, val, addr, off, mw_base, mw_aperture;
2217 
2218 		KASSERT(cfg != NULL || default_cfg != NULL,
2219 		    ("%s: no config to upload", __func__));
2220 
2221 		/*
2222 		 * Ask the firmware where it wants us to upload the config file.
2223 		 */
2224 		param = FW_PARAM_DEV(CF);
2225 		rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 1, &param, &val);
2226 		if (rc != 0) {
2227 			/* No support for config file?  Shouldn't happen. */
2228 			device_printf(sc->dev,
2229 			    "failed to query config file location: %d.\n", rc);
2230 			goto done;
2231 		}
2232 		mtype = G_FW_PARAMS_PARAM_Y(val);
2233 		moff = G_FW_PARAMS_PARAM_Z(val) << 16;
2234 
2235 		/*
2236 		 * XXX: sheer laziness.  We deliberately added 4 bytes of
2237 		 * useless stuffing/comments at the end of the config file so
2238 		 * it's ok to simply throw away the last remaining bytes when
2239 		 * the config file is not an exact multiple of 4.  This also
2240 		 * helps with the validate_mt_off_len check.
2241 		 */
2242 		if (cfg != NULL) {
2243 			cflen = cfg->datasize & ~3;
2244 			cfdata = cfg->data;
2245 		} else {
2246 			cflen = default_cfg->datasize & ~3;
2247 			cfdata = default_cfg->data;
2248 		}
2249 
2250 		if (cflen > FLASH_CFG_MAX_SIZE) {
2251 			device_printf(sc->dev,
2252 			    "config file too long (%d, max allowed is %d).  "
2253 			    "Will try to use the config on the card, if any.\n",
2254 			    cflen, FLASH_CFG_MAX_SIZE);
2255 			goto use_config_on_flash;
2256 		}
2257 
2258 		rc = validate_mt_off_len(sc, mtype, moff, cflen, &addr);
2259 		if (rc != 0) {
2260 			device_printf(sc->dev,
2261 			    "%s: addr (%d/0x%x) or len %d is not valid: %d.  "
2262 			    "Will try to use the config on the card, if any.\n",
2263 			    __func__, mtype, moff, cflen, rc);
2264 			goto use_config_on_flash;
2265 		}
2266 
2267 		memwin_info(sc, 2, &mw_base, &mw_aperture);
2268 		while (cflen) {
2269 			off = position_memwin(sc, 2, addr);
2270 			n = min(cflen, mw_aperture - off);
2271 			for (i = 0; i < n; i += 4)
2272 				t4_write_reg(sc, mw_base + off + i, *cfdata++);
2273 			cflen -= n;
2274 			addr += n;
2275 		}
2276 	} else {
2277 use_config_on_flash:
2278 		mtype = FW_MEMTYPE_CF_FLASH;
2279 		moff = t4_flash_cfg_addr(sc);
2280 	}
2281 
2282 	bzero(&caps, sizeof(caps));
2283 	caps.op_to_write = htobe32(V_FW_CMD_OP(FW_CAPS_CONFIG_CMD) |
2284 	    F_FW_CMD_REQUEST | F_FW_CMD_READ);
2285 	caps.cfvalid_to_len16 = htobe32(F_FW_CAPS_CONFIG_CMD_CFVALID |
2286 	    V_FW_CAPS_CONFIG_CMD_MEMTYPE_CF(mtype) |
2287 	    V_FW_CAPS_CONFIG_CMD_MEMADDR64K_CF(moff >> 16) | FW_LEN16(caps));
2288 	rc = -t4_wr_mbox(sc, sc->mbox, &caps, sizeof(caps), &caps);
2289 	if (rc != 0) {
2290 		device_printf(sc->dev,
2291 		    "failed to pre-process config file: %d "
2292 		    "(mtype %d, moff 0x%x).\n", rc, mtype, moff);
2293 		goto done;
2294 	}
2295 
2296 	finicsum = be32toh(caps.finicsum);
2297 	cfcsum = be32toh(caps.cfcsum);
2298 	if (finicsum != cfcsum) {
2299 		device_printf(sc->dev,
2300 		    "WARNING: config file checksum mismatch: %08x %08x\n",
2301 		    finicsum, cfcsum);
2302 	}
2303 	sc->cfcsum = cfcsum;
2304 
2305 #define LIMIT_CAPS(x) do { \
2306 	caps.x &= htobe16(t4_##x##_allowed); \
2307 	sc->x = htobe16(caps.x); \
2308 } while (0)
2309 
2310 	/*
2311 	 * Let the firmware know what features will (not) be used so it can tune
2312 	 * things accordingly.
2313 	 */
2314 	LIMIT_CAPS(linkcaps);
2315 	LIMIT_CAPS(niccaps);
2316 	LIMIT_CAPS(toecaps);
2317 	LIMIT_CAPS(rdmacaps);
2318 	LIMIT_CAPS(iscsicaps);
2319 	LIMIT_CAPS(fcoecaps);
2320 #undef LIMIT_CAPS
2321 
2322 	caps.op_to_write = htobe32(V_FW_CMD_OP(FW_CAPS_CONFIG_CMD) |
2323 	    F_FW_CMD_REQUEST | F_FW_CMD_WRITE);
2324 	caps.cfvalid_to_len16 = htobe32(FW_LEN16(caps));
2325 	rc = -t4_wr_mbox(sc, sc->mbox, &caps, sizeof(caps), NULL);
2326 	if (rc != 0) {
2327 		device_printf(sc->dev,
2328 		    "failed to process config file: %d.\n", rc);
2329 	}
2330 done:
2331 	if (cfg != NULL)
2332 		firmware_put(cfg, FIRMWARE_UNLOAD);
2333 	return (rc);
2334 }
2335 
2336 /*
2337  * Retrieve parameters that are needed (or nice to have) very early.
2338  */
2339 static int
2340 get_params__pre_init(struct adapter *sc)
2341 {
2342 	int rc;
2343 	uint32_t param[2], val[2];
2344 	struct fw_devlog_cmd cmd;
2345 	struct devlog_params *dlog = &sc->params.devlog;
2346 
2347 	param[0] = FW_PARAM_DEV(PORTVEC);
2348 	param[1] = FW_PARAM_DEV(CCLK);
2349 	rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 2, param, val);
2350 	if (rc != 0) {
2351 		device_printf(sc->dev,
2352 		    "failed to query parameters (pre_init): %d.\n", rc);
2353 		return (rc);
2354 	}
2355 
2356 	sc->params.portvec = val[0];
2357 	sc->params.nports = bitcount32(val[0]);
2358 	sc->params.vpd.cclk = val[1];
2359 
2360 	/* Read device log parameters. */
2361 	bzero(&cmd, sizeof(cmd));
2362 	cmd.op_to_write = htobe32(V_FW_CMD_OP(FW_DEVLOG_CMD) |
2363 	    F_FW_CMD_REQUEST | F_FW_CMD_READ);
2364 	cmd.retval_len16 = htobe32(FW_LEN16(cmd));
2365 	rc = -t4_wr_mbox(sc, sc->mbox, &cmd, sizeof(cmd), &cmd);
2366 	if (rc != 0) {
2367 		device_printf(sc->dev,
2368 		    "failed to get devlog parameters: %d.\n", rc);
2369 		bzero(dlog, sizeof (*dlog));
2370 		rc = 0;	/* devlog isn't critical for device operation */
2371 	} else {
2372 		val[0] = be32toh(cmd.memtype_devlog_memaddr16_devlog);
2373 		dlog->memtype = G_FW_DEVLOG_CMD_MEMTYPE_DEVLOG(val[0]);
2374 		dlog->start = G_FW_DEVLOG_CMD_MEMADDR16_DEVLOG(val[0]) << 4;
2375 		dlog->size = be32toh(cmd.memsize_devlog);
2376 	}
2377 
2378 	return (rc);
2379 }
2380 
2381 /*
2382  * Retrieve various parameters that are of interest to the driver.  The device
2383  * has been initialized by the firmware at this point.
2384  */
2385 static int
2386 get_params__post_init(struct adapter *sc)
2387 {
2388 	int rc;
2389 	uint32_t param[7], val[7];
2390 	struct fw_caps_config_cmd caps;
2391 
2392 	param[0] = FW_PARAM_PFVF(IQFLINT_START);
2393 	param[1] = FW_PARAM_PFVF(EQ_START);
2394 	param[2] = FW_PARAM_PFVF(FILTER_START);
2395 	param[3] = FW_PARAM_PFVF(FILTER_END);
2396 	param[4] = FW_PARAM_PFVF(L2T_START);
2397 	param[5] = FW_PARAM_PFVF(L2T_END);
2398 	rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 6, param, val);
2399 	if (rc != 0) {
2400 		device_printf(sc->dev,
2401 		    "failed to query parameters (post_init): %d.\n", rc);
2402 		return (rc);
2403 	}
2404 
2405 	sc->sge.iq_start = val[0];
2406 	sc->sge.eq_start = val[1];
2407 	sc->tids.ftid_base = val[2];
2408 	sc->tids.nftids = val[3] - val[2] + 1;
2409 	sc->vres.l2t.start = val[4];
2410 	sc->vres.l2t.size = val[5] - val[4] + 1;
2411 	KASSERT(sc->vres.l2t.size <= L2T_SIZE,
2412 	    ("%s: L2 table size (%u) larger than expected (%u)",
2413 	    __func__, sc->vres.l2t.size, L2T_SIZE));
2414 
2415 	/* get capabilites */
2416 	bzero(&caps, sizeof(caps));
2417 	caps.op_to_write = htobe32(V_FW_CMD_OP(FW_CAPS_CONFIG_CMD) |
2418 	    F_FW_CMD_REQUEST | F_FW_CMD_READ);
2419 	caps.cfvalid_to_len16 = htobe32(FW_LEN16(caps));
2420 	rc = -t4_wr_mbox(sc, sc->mbox, &caps, sizeof(caps), &caps);
2421 	if (rc != 0) {
2422 		device_printf(sc->dev,
2423 		    "failed to get card capabilities: %d.\n", rc);
2424 		return (rc);
2425 	}
2426 
2427 	if (caps.toecaps) {
2428 		/* query offload-related parameters */
2429 		param[0] = FW_PARAM_DEV(NTID);
2430 		param[1] = FW_PARAM_PFVF(SERVER_START);
2431 		param[2] = FW_PARAM_PFVF(SERVER_END);
2432 		param[3] = FW_PARAM_PFVF(TDDP_START);
2433 		param[4] = FW_PARAM_PFVF(TDDP_END);
2434 		param[5] = FW_PARAM_DEV(FLOWC_BUFFIFO_SZ);
2435 		rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 6, param, val);
2436 		if (rc != 0) {
2437 			device_printf(sc->dev,
2438 			    "failed to query TOE parameters: %d.\n", rc);
2439 			return (rc);
2440 		}
2441 		sc->tids.ntids = val[0];
2442 		sc->tids.natids = min(sc->tids.ntids / 2, MAX_ATIDS);
2443 		sc->tids.stid_base = val[1];
2444 		sc->tids.nstids = val[2] - val[1] + 1;
2445 		sc->vres.ddp.start = val[3];
2446 		sc->vres.ddp.size = val[4] - val[3] + 1;
2447 		sc->params.ofldq_wr_cred = val[5];
2448 		sc->params.offload = 1;
2449 	}
2450 	if (caps.rdmacaps) {
2451 		param[0] = FW_PARAM_PFVF(STAG_START);
2452 		param[1] = FW_PARAM_PFVF(STAG_END);
2453 		param[2] = FW_PARAM_PFVF(RQ_START);
2454 		param[3] = FW_PARAM_PFVF(RQ_END);
2455 		param[4] = FW_PARAM_PFVF(PBL_START);
2456 		param[5] = FW_PARAM_PFVF(PBL_END);
2457 		rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 6, param, val);
2458 		if (rc != 0) {
2459 			device_printf(sc->dev,
2460 			    "failed to query RDMA parameters(1): %d.\n", rc);
2461 			return (rc);
2462 		}
2463 		sc->vres.stag.start = val[0];
2464 		sc->vres.stag.size = val[1] - val[0] + 1;
2465 		sc->vres.rq.start = val[2];
2466 		sc->vres.rq.size = val[3] - val[2] + 1;
2467 		sc->vres.pbl.start = val[4];
2468 		sc->vres.pbl.size = val[5] - val[4] + 1;
2469 
2470 		param[0] = FW_PARAM_PFVF(SQRQ_START);
2471 		param[1] = FW_PARAM_PFVF(SQRQ_END);
2472 		param[2] = FW_PARAM_PFVF(CQ_START);
2473 		param[3] = FW_PARAM_PFVF(CQ_END);
2474 		param[4] = FW_PARAM_PFVF(OCQ_START);
2475 		param[5] = FW_PARAM_PFVF(OCQ_END);
2476 		rc = -t4_query_params(sc, 0, 0, 0, 6, param, val);
2477 		if (rc != 0) {
2478 			device_printf(sc->dev,
2479 			    "failed to query RDMA parameters(2): %d.\n", rc);
2480 			return (rc);
2481 		}
2482 		sc->vres.qp.start = val[0];
2483 		sc->vres.qp.size = val[1] - val[0] + 1;
2484 		sc->vres.cq.start = val[2];
2485 		sc->vres.cq.size = val[3] - val[2] + 1;
2486 		sc->vres.ocq.start = val[4];
2487 		sc->vres.ocq.size = val[5] - val[4] + 1;
2488 	}
2489 	if (caps.iscsicaps) {
2490 		param[0] = FW_PARAM_PFVF(ISCSI_START);
2491 		param[1] = FW_PARAM_PFVF(ISCSI_END);
2492 		rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 2, param, val);
2493 		if (rc != 0) {
2494 			device_printf(sc->dev,
2495 			    "failed to query iSCSI parameters: %d.\n", rc);
2496 			return (rc);
2497 		}
2498 		sc->vres.iscsi.start = val[0];
2499 		sc->vres.iscsi.size = val[1] - val[0] + 1;
2500 	}
2501 
2502 	/*
2503 	 * We've got the params we wanted to query via the firmware.  Now grab
2504 	 * some others directly from the chip.
2505 	 */
2506 	rc = t4_read_chip_settings(sc);
2507 
2508 	return (rc);
2509 }
2510 
2511 static int
2512 set_params__post_init(struct adapter *sc)
2513 {
2514 	uint32_t param, val;
2515 
2516 	/* ask for encapsulated CPLs */
2517 	param = FW_PARAM_PFVF(CPLFW4MSG_ENCAP);
2518 	val = 1;
2519 	(void)t4_set_params(sc, sc->mbox, sc->pf, 0, 1, &param, &val);
2520 
2521 	return (0);
2522 }
2523 
2524 #undef FW_PARAM_PFVF
2525 #undef FW_PARAM_DEV
2526 
2527 static void
2528 t4_set_desc(struct adapter *sc)
2529 {
2530 	char buf[128];
2531 	struct adapter_params *p = &sc->params;
2532 
2533 	snprintf(buf, sizeof(buf), "Chelsio %s %sNIC (rev %d), S/N:%s, E/C:%s",
2534 	    p->vpd.id, is_offload(sc) ? "R" : "", chip_rev(sc), p->vpd.sn,
2535 	    p->vpd.ec);
2536 
2537 	device_set_desc_copy(sc->dev, buf);
2538 }
2539 
2540 static void
2541 build_medialist(struct port_info *pi)
2542 {
2543 	struct ifmedia *media = &pi->media;
2544 	int data, m;
2545 
2546 	PORT_LOCK(pi);
2547 
2548 	ifmedia_removeall(media);
2549 
2550 	m = IFM_ETHER | IFM_FDX;
2551 	data = (pi->port_type << 8) | pi->mod_type;
2552 
2553 	switch(pi->port_type) {
2554 	case FW_PORT_TYPE_BT_XFI:
2555 		ifmedia_add(media, m | IFM_10G_T, data, NULL);
2556 		break;
2557 
2558 	case FW_PORT_TYPE_BT_XAUI:
2559 		ifmedia_add(media, m | IFM_10G_T, data, NULL);
2560 		/* fall through */
2561 
2562 	case FW_PORT_TYPE_BT_SGMII:
2563 		ifmedia_add(media, m | IFM_1000_T, data, NULL);
2564 		ifmedia_add(media, m | IFM_100_TX, data, NULL);
2565 		ifmedia_add(media, IFM_ETHER | IFM_AUTO, data, NULL);
2566 		ifmedia_set(media, IFM_ETHER | IFM_AUTO);
2567 		break;
2568 
2569 	case FW_PORT_TYPE_CX4:
2570 		ifmedia_add(media, m | IFM_10G_CX4, data, NULL);
2571 		ifmedia_set(media, m | IFM_10G_CX4);
2572 		break;
2573 
2574 	case FW_PORT_TYPE_SFP:
2575 	case FW_PORT_TYPE_FIBER_XFI:
2576 	case FW_PORT_TYPE_FIBER_XAUI:
2577 		switch (pi->mod_type) {
2578 
2579 		case FW_PORT_MOD_TYPE_LR:
2580 			ifmedia_add(media, m | IFM_10G_LR, data, NULL);
2581 			ifmedia_set(media, m | IFM_10G_LR);
2582 			break;
2583 
2584 		case FW_PORT_MOD_TYPE_SR:
2585 			ifmedia_add(media, m | IFM_10G_SR, data, NULL);
2586 			ifmedia_set(media, m | IFM_10G_SR);
2587 			break;
2588 
2589 		case FW_PORT_MOD_TYPE_LRM:
2590 			ifmedia_add(media, m | IFM_10G_LRM, data, NULL);
2591 			ifmedia_set(media, m | IFM_10G_LRM);
2592 			break;
2593 
2594 		case FW_PORT_MOD_TYPE_TWINAX_PASSIVE:
2595 		case FW_PORT_MOD_TYPE_TWINAX_ACTIVE:
2596 			ifmedia_add(media, m | IFM_10G_TWINAX, data, NULL);
2597 			ifmedia_set(media, m | IFM_10G_TWINAX);
2598 			break;
2599 
2600 		case FW_PORT_MOD_TYPE_NONE:
2601 			m &= ~IFM_FDX;
2602 			ifmedia_add(media, m | IFM_NONE, data, NULL);
2603 			ifmedia_set(media, m | IFM_NONE);
2604 			break;
2605 
2606 		case FW_PORT_MOD_TYPE_NA:
2607 		case FW_PORT_MOD_TYPE_ER:
2608 		default:
2609 			device_printf(pi->dev,
2610 			    "unknown port_type (%d), mod_type (%d)\n",
2611 			    pi->port_type, pi->mod_type);
2612 			ifmedia_add(media, m | IFM_UNKNOWN, data, NULL);
2613 			ifmedia_set(media, m | IFM_UNKNOWN);
2614 			break;
2615 		}
2616 		break;
2617 
2618 	case FW_PORT_TYPE_QSFP:
2619 		switch (pi->mod_type) {
2620 
2621 		case FW_PORT_MOD_TYPE_LR:
2622 			ifmedia_add(media, m | IFM_40G_LR4, data, NULL);
2623 			ifmedia_set(media, m | IFM_40G_LR4);
2624 			break;
2625 
2626 		case FW_PORT_MOD_TYPE_SR:
2627 			ifmedia_add(media, m | IFM_40G_SR4, data, NULL);
2628 			ifmedia_set(media, m | IFM_40G_SR4);
2629 			break;
2630 
2631 		case FW_PORT_MOD_TYPE_TWINAX_PASSIVE:
2632 		case FW_PORT_MOD_TYPE_TWINAX_ACTIVE:
2633 			ifmedia_add(media, m | IFM_40G_CR4, data, NULL);
2634 			ifmedia_set(media, m | IFM_40G_CR4);
2635 			break;
2636 
2637 		case FW_PORT_MOD_TYPE_NONE:
2638 			m &= ~IFM_FDX;
2639 			ifmedia_add(media, m | IFM_NONE, data, NULL);
2640 			ifmedia_set(media, m | IFM_NONE);
2641 			break;
2642 
2643 		default:
2644 			device_printf(pi->dev,
2645 			    "unknown port_type (%d), mod_type (%d)\n",
2646 			    pi->port_type, pi->mod_type);
2647 			ifmedia_add(media, m | IFM_UNKNOWN, data, NULL);
2648 			ifmedia_set(media, m | IFM_UNKNOWN);
2649 			break;
2650 		}
2651 		break;
2652 
2653 	default:
2654 		device_printf(pi->dev,
2655 		    "unknown port_type (%d), mod_type (%d)\n", pi->port_type,
2656 		    pi->mod_type);
2657 		ifmedia_add(media, m | IFM_UNKNOWN, data, NULL);
2658 		ifmedia_set(media, m | IFM_UNKNOWN);
2659 		break;
2660 	}
2661 
2662 	PORT_UNLOCK(pi);
2663 }
2664 
2665 #define FW_MAC_EXACT_CHUNK	7
2666 
2667 /*
2668  * Program the port's XGMAC based on parameters in ifnet.  The caller also
2669  * indicates which parameters should be programmed (the rest are left alone).
2670  */
2671 static int
2672 update_mac_settings(struct port_info *pi, int flags)
2673 {
2674 	int rc;
2675 	struct ifnet *ifp = pi->ifp;
2676 	struct adapter *sc = pi->adapter;
2677 	int mtu = -1, promisc = -1, allmulti = -1, vlanex = -1;
2678 
2679 	ASSERT_SYNCHRONIZED_OP(sc);
2680 	KASSERT(flags, ("%s: not told what to update.", __func__));
2681 
2682 	if (flags & XGMAC_MTU)
2683 		mtu = ifp->if_mtu;
2684 
2685 	if (flags & XGMAC_PROMISC)
2686 		promisc = ifp->if_flags & IFF_PROMISC ? 1 : 0;
2687 
2688 	if (flags & XGMAC_ALLMULTI)
2689 		allmulti = ifp->if_flags & IFF_ALLMULTI ? 1 : 0;
2690 
2691 	if (flags & XGMAC_VLANEX)
2692 		vlanex = ifp->if_capenable & IFCAP_VLAN_HWTAGGING ? 1 : 0;
2693 
2694 	rc = -t4_set_rxmode(sc, sc->mbox, pi->viid, mtu, promisc, allmulti, 1,
2695 	    vlanex, false);
2696 	if (rc) {
2697 		if_printf(ifp, "set_rxmode (%x) failed: %d\n", flags, rc);
2698 		return (rc);
2699 	}
2700 
2701 	if (flags & XGMAC_UCADDR) {
2702 		uint8_t ucaddr[ETHER_ADDR_LEN];
2703 
2704 		bcopy(IF_LLADDR(ifp), ucaddr, sizeof(ucaddr));
2705 		rc = t4_change_mac(sc, sc->mbox, pi->viid, pi->xact_addr_filt,
2706 		    ucaddr, true, true);
2707 		if (rc < 0) {
2708 			rc = -rc;
2709 			if_printf(ifp, "change_mac failed: %d\n", rc);
2710 			return (rc);
2711 		} else {
2712 			pi->xact_addr_filt = rc;
2713 			rc = 0;
2714 		}
2715 	}
2716 
2717 	if (flags & XGMAC_MCADDRS) {
2718 		const uint8_t *mcaddr[FW_MAC_EXACT_CHUNK];
2719 		int del = 1;
2720 		uint64_t hash = 0;
2721 		struct ifmultiaddr *ifma;
2722 		int i = 0, j;
2723 
2724 		if_maddr_rlock(ifp);
2725 		TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
2726 			if (ifma->ifma_addr->sa_family != AF_LINK)
2727 				continue;
2728 			mcaddr[i++] =
2729 			    LLADDR((struct sockaddr_dl *)ifma->ifma_addr);
2730 
2731 			if (i == FW_MAC_EXACT_CHUNK) {
2732 				rc = t4_alloc_mac_filt(sc, sc->mbox, pi->viid,
2733 				    del, i, mcaddr, NULL, &hash, 0);
2734 				if (rc < 0) {
2735 					rc = -rc;
2736 					for (j = 0; j < i; j++) {
2737 						if_printf(ifp,
2738 						    "failed to add mc address"
2739 						    " %02x:%02x:%02x:"
2740 						    "%02x:%02x:%02x rc=%d\n",
2741 						    mcaddr[j][0], mcaddr[j][1],
2742 						    mcaddr[j][2], mcaddr[j][3],
2743 						    mcaddr[j][4], mcaddr[j][5],
2744 						    rc);
2745 					}
2746 					goto mcfail;
2747 				}
2748 				del = 0;
2749 				i = 0;
2750 			}
2751 		}
2752 		if (i > 0) {
2753 			rc = t4_alloc_mac_filt(sc, sc->mbox, pi->viid,
2754 			    del, i, mcaddr, NULL, &hash, 0);
2755 			if (rc < 0) {
2756 				rc = -rc;
2757 				for (j = 0; j < i; j++) {
2758 					if_printf(ifp,
2759 					    "failed to add mc address"
2760 					    " %02x:%02x:%02x:"
2761 					    "%02x:%02x:%02x rc=%d\n",
2762 					    mcaddr[j][0], mcaddr[j][1],
2763 					    mcaddr[j][2], mcaddr[j][3],
2764 					    mcaddr[j][4], mcaddr[j][5],
2765 					    rc);
2766 				}
2767 				goto mcfail;
2768 			}
2769 		}
2770 
2771 		rc = -t4_set_addr_hash(sc, sc->mbox, pi->viid, 0, hash, 0);
2772 		if (rc != 0)
2773 			if_printf(ifp, "failed to set mc address hash: %d", rc);
2774 mcfail:
2775 		if_maddr_runlock(ifp);
2776 	}
2777 
2778 	return (rc);
2779 }
2780 
2781 int
2782 begin_synchronized_op(struct adapter *sc, struct port_info *pi, int flags,
2783     char *wmesg)
2784 {
2785 	int rc, pri;
2786 
2787 #ifdef WITNESS
2788 	/* the caller thinks it's ok to sleep, but is it really? */
2789 	if (flags & SLEEP_OK)
2790 		pause("t4slptst", 1);
2791 #endif
2792 
2793 	if (INTR_OK)
2794 		pri = PCATCH;
2795 	else
2796 		pri = 0;
2797 
2798 	ADAPTER_LOCK(sc);
2799 	for (;;) {
2800 
2801 		if (pi && IS_DOOMED(pi)) {
2802 			rc = ENXIO;
2803 			goto done;
2804 		}
2805 
2806 		if (!IS_BUSY(sc)) {
2807 			rc = 0;
2808 			break;
2809 		}
2810 
2811 		if (!(flags & SLEEP_OK)) {
2812 			rc = EBUSY;
2813 			goto done;
2814 		}
2815 
2816 		if (mtx_sleep(&sc->flags, &sc->sc_lock, pri, wmesg, 0)) {
2817 			rc = EINTR;
2818 			goto done;
2819 		}
2820 	}
2821 
2822 	KASSERT(!IS_BUSY(sc), ("%s: controller busy.", __func__));
2823 	SET_BUSY(sc);
2824 #ifdef INVARIANTS
2825 	sc->last_op = wmesg;
2826 	sc->last_op_thr = curthread;
2827 #endif
2828 
2829 done:
2830 	if (!(flags & HOLD_LOCK) || rc)
2831 		ADAPTER_UNLOCK(sc);
2832 
2833 	return (rc);
2834 }
2835 
2836 void
2837 end_synchronized_op(struct adapter *sc, int flags)
2838 {
2839 
2840 	if (flags & LOCK_HELD)
2841 		ADAPTER_LOCK_ASSERT_OWNED(sc);
2842 	else
2843 		ADAPTER_LOCK(sc);
2844 
2845 	KASSERT(IS_BUSY(sc), ("%s: controller not busy.", __func__));
2846 	CLR_BUSY(sc);
2847 	wakeup(&sc->flags);
2848 	ADAPTER_UNLOCK(sc);
2849 }
2850 
2851 static int
2852 cxgbe_init_synchronized(struct port_info *pi)
2853 {
2854 	struct adapter *sc = pi->adapter;
2855 	struct ifnet *ifp = pi->ifp;
2856 	int rc = 0;
2857 
2858 	ASSERT_SYNCHRONIZED_OP(sc);
2859 
2860 	if (isset(&sc->open_device_map, pi->port_id)) {
2861 		KASSERT(ifp->if_drv_flags & IFF_DRV_RUNNING,
2862 		    ("mismatch between open_device_map and if_drv_flags"));
2863 		return (0);	/* already running */
2864 	}
2865 
2866 	if (!(sc->flags & FULL_INIT_DONE) &&
2867 	    ((rc = adapter_full_init(sc)) != 0))
2868 		return (rc);	/* error message displayed already */
2869 
2870 	if (!(pi->flags & PORT_INIT_DONE) &&
2871 	    ((rc = port_full_init(pi)) != 0))
2872 		return (rc); /* error message displayed already */
2873 
2874 	rc = update_mac_settings(pi, XGMAC_ALL);
2875 	if (rc)
2876 		goto done;	/* error message displayed already */
2877 
2878 	rc = -t4_link_start(sc, sc->mbox, pi->tx_chan, &pi->link_cfg);
2879 	if (rc != 0) {
2880 		if_printf(ifp, "start_link failed: %d\n", rc);
2881 		goto done;
2882 	}
2883 
2884 	rc = -t4_enable_vi(sc, sc->mbox, pi->viid, true, true);
2885 	if (rc != 0) {
2886 		if_printf(ifp, "enable_vi failed: %d\n", rc);
2887 		goto done;
2888 	}
2889 
2890 	/* all ok */
2891 	setbit(&sc->open_device_map, pi->port_id);
2892 	PORT_LOCK(pi);
2893 	ifp->if_drv_flags |= IFF_DRV_RUNNING;
2894 	PORT_UNLOCK(pi);
2895 
2896 	callout_reset(&pi->tick, hz, cxgbe_tick, pi);
2897 done:
2898 	if (rc != 0)
2899 		cxgbe_uninit_synchronized(pi);
2900 
2901 	return (rc);
2902 }
2903 
2904 /*
2905  * Idempotent.
2906  */
2907 static int
2908 cxgbe_uninit_synchronized(struct port_info *pi)
2909 {
2910 	struct adapter *sc = pi->adapter;
2911 	struct ifnet *ifp = pi->ifp;
2912 	int rc;
2913 
2914 	ASSERT_SYNCHRONIZED_OP(sc);
2915 
2916 	/*
2917 	 * Disable the VI so that all its data in either direction is discarded
2918 	 * by the MPS.  Leave everything else (the queues, interrupts, and 1Hz
2919 	 * tick) intact as the TP can deliver negative advice or data that it's
2920 	 * holding in its RAM (for an offloaded connection) even after the VI is
2921 	 * disabled.
2922 	 */
2923 	rc = -t4_enable_vi(sc, sc->mbox, pi->viid, false, false);
2924 	if (rc) {
2925 		if_printf(ifp, "disable_vi failed: %d\n", rc);
2926 		return (rc);
2927 	}
2928 
2929 	clrbit(&sc->open_device_map, pi->port_id);
2930 	PORT_LOCK(pi);
2931 	ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
2932 	PORT_UNLOCK(pi);
2933 
2934 	pi->link_cfg.link_ok = 0;
2935 	pi->link_cfg.speed = 0;
2936 	pi->linkdnrc = -1;
2937 	t4_os_link_changed(sc, pi->port_id, 0, -1);
2938 
2939 	return (0);
2940 }
2941 
2942 /*
2943  * It is ok for this function to fail midway and return right away.  t4_detach
2944  * will walk the entire sc->irq list and clean up whatever is valid.
2945  */
2946 static int
2947 setup_intr_handlers(struct adapter *sc)
2948 {
2949 	int rc, rid, p, q;
2950 	char s[8];
2951 	struct irq *irq;
2952 	struct port_info *pi;
2953 	struct sge_rxq *rxq;
2954 #ifdef TCP_OFFLOAD
2955 	struct sge_ofld_rxq *ofld_rxq;
2956 #endif
2957 
2958 	/*
2959 	 * Setup interrupts.
2960 	 */
2961 	irq = &sc->irq[0];
2962 	rid = sc->intr_type == INTR_INTX ? 0 : 1;
2963 	if (sc->intr_count == 1) {
2964 		KASSERT(!(sc->flags & INTR_DIRECT),
2965 		    ("%s: single interrupt && INTR_DIRECT?", __func__));
2966 
2967 		rc = t4_alloc_irq(sc, irq, rid, t4_intr_all, sc, "all");
2968 		if (rc != 0)
2969 			return (rc);
2970 	} else {
2971 		/* Multiple interrupts. */
2972 		KASSERT(sc->intr_count >= T4_EXTRA_INTR + sc->params.nports,
2973 		    ("%s: too few intr.", __func__));
2974 
2975 		/* The first one is always error intr */
2976 		rc = t4_alloc_irq(sc, irq, rid, t4_intr_err, sc, "err");
2977 		if (rc != 0)
2978 			return (rc);
2979 		irq++;
2980 		rid++;
2981 
2982 		/* The second one is always the firmware event queue */
2983 		rc = t4_alloc_irq(sc, irq, rid, t4_intr_evt, &sc->sge.fwq,
2984 		    "evt");
2985 		if (rc != 0)
2986 			return (rc);
2987 		irq++;
2988 		rid++;
2989 
2990 		/*
2991 		 * Note that if INTR_DIRECT is not set then either the NIC rx
2992 		 * queues or (exclusive or) the TOE rx queueus will be taking
2993 		 * direct interrupts.
2994 		 *
2995 		 * There is no need to check for is_offload(sc) as nofldrxq
2996 		 * will be 0 if offload is disabled.
2997 		 */
2998 		for_each_port(sc, p) {
2999 			pi = sc->port[p];
3000 
3001 #ifdef TCP_OFFLOAD
3002 			/*
3003 			 * Skip over the NIC queues if they aren't taking direct
3004 			 * interrupts.
3005 			 */
3006 			if (!(sc->flags & INTR_DIRECT) &&
3007 			    pi->nofldrxq > pi->nrxq)
3008 				goto ofld_queues;
3009 #endif
3010 			rxq = &sc->sge.rxq[pi->first_rxq];
3011 			for (q = 0; q < pi->nrxq; q++, rxq++) {
3012 				snprintf(s, sizeof(s), "%d.%d", p, q);
3013 				rc = t4_alloc_irq(sc, irq, rid, t4_intr, rxq,
3014 				    s);
3015 				if (rc != 0)
3016 					return (rc);
3017 				irq++;
3018 				rid++;
3019 			}
3020 
3021 #ifdef TCP_OFFLOAD
3022 			/*
3023 			 * Skip over the offload queues if they aren't taking
3024 			 * direct interrupts.
3025 			 */
3026 			if (!(sc->flags & INTR_DIRECT))
3027 				continue;
3028 ofld_queues:
3029 			ofld_rxq = &sc->sge.ofld_rxq[pi->first_ofld_rxq];
3030 			for (q = 0; q < pi->nofldrxq; q++, ofld_rxq++) {
3031 				snprintf(s, sizeof(s), "%d,%d", p, q);
3032 				rc = t4_alloc_irq(sc, irq, rid, t4_intr,
3033 				    ofld_rxq, s);
3034 				if (rc != 0)
3035 					return (rc);
3036 				irq++;
3037 				rid++;
3038 			}
3039 #endif
3040 		}
3041 	}
3042 
3043 	return (0);
3044 }
3045 
3046 static int
3047 adapter_full_init(struct adapter *sc)
3048 {
3049 	int rc, i;
3050 
3051 	ADAPTER_LOCK_ASSERT_NOTOWNED(sc);
3052 	KASSERT((sc->flags & FULL_INIT_DONE) == 0,
3053 	    ("%s: FULL_INIT_DONE already", __func__));
3054 
3055 	/*
3056 	 * queues that belong to the adapter (not any particular port).
3057 	 */
3058 	rc = t4_setup_adapter_queues(sc);
3059 	if (rc != 0)
3060 		goto done;
3061 
3062 	for (i = 0; i < nitems(sc->tq); i++) {
3063 		sc->tq[i] = taskqueue_create("t4 taskq", M_NOWAIT,
3064 		    taskqueue_thread_enqueue, &sc->tq[i]);
3065 		if (sc->tq[i] == NULL) {
3066 			device_printf(sc->dev,
3067 			    "failed to allocate task queue %d\n", i);
3068 			rc = ENOMEM;
3069 			goto done;
3070 		}
3071 		taskqueue_start_threads(&sc->tq[i], 1, PI_NET, "%s tq%d",
3072 		    device_get_nameunit(sc->dev), i);
3073 	}
3074 
3075 	t4_intr_enable(sc);
3076 	sc->flags |= FULL_INIT_DONE;
3077 done:
3078 	if (rc != 0)
3079 		adapter_full_uninit(sc);
3080 
3081 	return (rc);
3082 }
3083 
3084 static int
3085 adapter_full_uninit(struct adapter *sc)
3086 {
3087 	int i;
3088 
3089 	ADAPTER_LOCK_ASSERT_NOTOWNED(sc);
3090 
3091 	t4_teardown_adapter_queues(sc);
3092 
3093 	for (i = 0; i < nitems(sc->tq) && sc->tq[i]; i++) {
3094 		taskqueue_free(sc->tq[i]);
3095 		sc->tq[i] = NULL;
3096 	}
3097 
3098 	sc->flags &= ~FULL_INIT_DONE;
3099 
3100 	return (0);
3101 }
3102 
3103 static int
3104 port_full_init(struct port_info *pi)
3105 {
3106 	struct adapter *sc = pi->adapter;
3107 	struct ifnet *ifp = pi->ifp;
3108 	uint16_t *rss;
3109 	struct sge_rxq *rxq;
3110 	int rc, i;
3111 
3112 	ASSERT_SYNCHRONIZED_OP(sc);
3113 	KASSERT((pi->flags & PORT_INIT_DONE) == 0,
3114 	    ("%s: PORT_INIT_DONE already", __func__));
3115 
3116 	sysctl_ctx_init(&pi->ctx);
3117 	pi->flags |= PORT_SYSCTL_CTX;
3118 
3119 	/*
3120 	 * Allocate tx/rx/fl queues for this port.
3121 	 */
3122 	rc = t4_setup_port_queues(pi);
3123 	if (rc != 0)
3124 		goto done;	/* error message displayed already */
3125 
3126 	/*
3127 	 * Setup RSS for this port.
3128 	 */
3129 	rss = malloc(pi->nrxq * sizeof (*rss), M_CXGBE,
3130 	    M_ZERO | M_WAITOK);
3131 	for_each_rxq(pi, i, rxq) {
3132 		rss[i] = rxq->iq.abs_id;
3133 	}
3134 	rc = -t4_config_rss_range(sc, sc->mbox, pi->viid, 0,
3135 	    pi->rss_size, rss, pi->nrxq);
3136 	free(rss, M_CXGBE);
3137 	if (rc != 0) {
3138 		if_printf(ifp, "rss_config failed: %d\n", rc);
3139 		goto done;
3140 	}
3141 
3142 	pi->flags |= PORT_INIT_DONE;
3143 done:
3144 	if (rc != 0)
3145 		port_full_uninit(pi);
3146 
3147 	return (rc);
3148 }
3149 
3150 /*
3151  * Idempotent.
3152  */
3153 static int
3154 port_full_uninit(struct port_info *pi)
3155 {
3156 	struct adapter *sc = pi->adapter;
3157 	int i;
3158 	struct sge_rxq *rxq;
3159 	struct sge_txq *txq;
3160 #ifdef TCP_OFFLOAD
3161 	struct sge_ofld_rxq *ofld_rxq;
3162 	struct sge_wrq *ofld_txq;
3163 #endif
3164 
3165 	if (pi->flags & PORT_INIT_DONE) {
3166 
3167 		/* Need to quiesce queues.  XXX: ctrl queues? */
3168 
3169 		for_each_txq(pi, i, txq) {
3170 			quiesce_eq(sc, &txq->eq);
3171 		}
3172 
3173 #ifdef TCP_OFFLOAD
3174 		for_each_ofld_txq(pi, i, ofld_txq) {
3175 			quiesce_eq(sc, &ofld_txq->eq);
3176 		}
3177 #endif
3178 
3179 		for_each_rxq(pi, i, rxq) {
3180 			quiesce_iq(sc, &rxq->iq);
3181 			quiesce_fl(sc, &rxq->fl);
3182 		}
3183 
3184 #ifdef TCP_OFFLOAD
3185 		for_each_ofld_rxq(pi, i, ofld_rxq) {
3186 			quiesce_iq(sc, &ofld_rxq->iq);
3187 			quiesce_fl(sc, &ofld_rxq->fl);
3188 		}
3189 #endif
3190 	}
3191 
3192 	t4_teardown_port_queues(pi);
3193 	pi->flags &= ~PORT_INIT_DONE;
3194 
3195 	return (0);
3196 }
3197 
3198 static void
3199 quiesce_eq(struct adapter *sc, struct sge_eq *eq)
3200 {
3201 	EQ_LOCK(eq);
3202 	eq->flags |= EQ_DOOMED;
3203 
3204 	/*
3205 	 * Wait for the response to a credit flush if one's
3206 	 * pending.
3207 	 */
3208 	while (eq->flags & EQ_CRFLUSHED)
3209 		mtx_sleep(eq, &eq->eq_lock, 0, "crflush", 0);
3210 	EQ_UNLOCK(eq);
3211 
3212 	callout_drain(&eq->tx_callout);	/* XXX: iffy */
3213 	pause("callout", 10);		/* Still iffy */
3214 
3215 	taskqueue_drain(sc->tq[eq->tx_chan], &eq->tx_task);
3216 }
3217 
3218 static void
3219 quiesce_iq(struct adapter *sc, struct sge_iq *iq)
3220 {
3221 	(void) sc;	/* unused */
3222 
3223 	/* Synchronize with the interrupt handler */
3224 	while (!atomic_cmpset_int(&iq->state, IQS_IDLE, IQS_DISABLED))
3225 		pause("iqfree", 1);
3226 }
3227 
3228 static void
3229 quiesce_fl(struct adapter *sc, struct sge_fl *fl)
3230 {
3231 	mtx_lock(&sc->sfl_lock);
3232 	FL_LOCK(fl);
3233 	fl->flags |= FL_DOOMED;
3234 	FL_UNLOCK(fl);
3235 	mtx_unlock(&sc->sfl_lock);
3236 
3237 	callout_drain(&sc->sfl_callout);
3238 	KASSERT((fl->flags & FL_STARVING) == 0,
3239 	    ("%s: still starving", __func__));
3240 }
3241 
3242 static int
3243 t4_alloc_irq(struct adapter *sc, struct irq *irq, int rid,
3244     driver_intr_t *handler, void *arg, char *name)
3245 {
3246 	int rc;
3247 
3248 	irq->rid = rid;
3249 	irq->res = bus_alloc_resource_any(sc->dev, SYS_RES_IRQ, &irq->rid,
3250 	    RF_SHAREABLE | RF_ACTIVE);
3251 	if (irq->res == NULL) {
3252 		device_printf(sc->dev,
3253 		    "failed to allocate IRQ for rid %d, name %s.\n", rid, name);
3254 		return (ENOMEM);
3255 	}
3256 
3257 	rc = bus_setup_intr(sc->dev, irq->res, INTR_MPSAFE | INTR_TYPE_NET,
3258 	    NULL, handler, arg, &irq->tag);
3259 	if (rc != 0) {
3260 		device_printf(sc->dev,
3261 		    "failed to setup interrupt for rid %d, name %s: %d\n",
3262 		    rid, name, rc);
3263 	} else if (name)
3264 		bus_describe_intr(sc->dev, irq->res, irq->tag, name);
3265 
3266 	return (rc);
3267 }
3268 
3269 static int
3270 t4_free_irq(struct adapter *sc, struct irq *irq)
3271 {
3272 	if (irq->tag)
3273 		bus_teardown_intr(sc->dev, irq->res, irq->tag);
3274 	if (irq->res)
3275 		bus_release_resource(sc->dev, SYS_RES_IRQ, irq->rid, irq->res);
3276 
3277 	bzero(irq, sizeof(*irq));
3278 
3279 	return (0);
3280 }
3281 
3282 static void
3283 reg_block_dump(struct adapter *sc, uint8_t *buf, unsigned int start,
3284     unsigned int end)
3285 {
3286 	uint32_t *p = (uint32_t *)(buf + start);
3287 
3288 	for ( ; start <= end; start += sizeof(uint32_t))
3289 		*p++ = t4_read_reg(sc, start);
3290 }
3291 
3292 static void
3293 t4_get_regs(struct adapter *sc, struct t4_regdump *regs, uint8_t *buf)
3294 {
3295 	int i, n;
3296 	const unsigned int *reg_ranges;
3297 	static const unsigned int t4_reg_ranges[] = {
3298 		0x1008, 0x1108,
3299 		0x1180, 0x11b4,
3300 		0x11fc, 0x123c,
3301 		0x1300, 0x173c,
3302 		0x1800, 0x18fc,
3303 		0x3000, 0x30d8,
3304 		0x30e0, 0x5924,
3305 		0x5960, 0x59d4,
3306 		0x5a00, 0x5af8,
3307 		0x6000, 0x6098,
3308 		0x6100, 0x6150,
3309 		0x6200, 0x6208,
3310 		0x6240, 0x6248,
3311 		0x6280, 0x6338,
3312 		0x6370, 0x638c,
3313 		0x6400, 0x643c,
3314 		0x6500, 0x6524,
3315 		0x6a00, 0x6a38,
3316 		0x6a60, 0x6a78,
3317 		0x6b00, 0x6b84,
3318 		0x6bf0, 0x6c84,
3319 		0x6cf0, 0x6d84,
3320 		0x6df0, 0x6e84,
3321 		0x6ef0, 0x6f84,
3322 		0x6ff0, 0x7084,
3323 		0x70f0, 0x7184,
3324 		0x71f0, 0x7284,
3325 		0x72f0, 0x7384,
3326 		0x73f0, 0x7450,
3327 		0x7500, 0x7530,
3328 		0x7600, 0x761c,
3329 		0x7680, 0x76cc,
3330 		0x7700, 0x7798,
3331 		0x77c0, 0x77fc,
3332 		0x7900, 0x79fc,
3333 		0x7b00, 0x7c38,
3334 		0x7d00, 0x7efc,
3335 		0x8dc0, 0x8e1c,
3336 		0x8e30, 0x8e78,
3337 		0x8ea0, 0x8f6c,
3338 		0x8fc0, 0x9074,
3339 		0x90fc, 0x90fc,
3340 		0x9400, 0x9458,
3341 		0x9600, 0x96bc,
3342 		0x9800, 0x9808,
3343 		0x9820, 0x983c,
3344 		0x9850, 0x9864,
3345 		0x9c00, 0x9c6c,
3346 		0x9c80, 0x9cec,
3347 		0x9d00, 0x9d6c,
3348 		0x9d80, 0x9dec,
3349 		0x9e00, 0x9e6c,
3350 		0x9e80, 0x9eec,
3351 		0x9f00, 0x9f6c,
3352 		0x9f80, 0x9fec,
3353 		0xd004, 0xd03c,
3354 		0xdfc0, 0xdfe0,
3355 		0xe000, 0xea7c,
3356 		0xf000, 0x11190,
3357 		0x19040, 0x1906c,
3358 		0x19078, 0x19080,
3359 		0x1908c, 0x19124,
3360 		0x19150, 0x191b0,
3361 		0x191d0, 0x191e8,
3362 		0x19238, 0x1924c,
3363 		0x193f8, 0x19474,
3364 		0x19490, 0x194f8,
3365 		0x19800, 0x19f30,
3366 		0x1a000, 0x1a06c,
3367 		0x1a0b0, 0x1a120,
3368 		0x1a128, 0x1a138,
3369 		0x1a190, 0x1a1c4,
3370 		0x1a1fc, 0x1a1fc,
3371 		0x1e040, 0x1e04c,
3372 		0x1e284, 0x1e28c,
3373 		0x1e2c0, 0x1e2c0,
3374 		0x1e2e0, 0x1e2e0,
3375 		0x1e300, 0x1e384,
3376 		0x1e3c0, 0x1e3c8,
3377 		0x1e440, 0x1e44c,
3378 		0x1e684, 0x1e68c,
3379 		0x1e6c0, 0x1e6c0,
3380 		0x1e6e0, 0x1e6e0,
3381 		0x1e700, 0x1e784,
3382 		0x1e7c0, 0x1e7c8,
3383 		0x1e840, 0x1e84c,
3384 		0x1ea84, 0x1ea8c,
3385 		0x1eac0, 0x1eac0,
3386 		0x1eae0, 0x1eae0,
3387 		0x1eb00, 0x1eb84,
3388 		0x1ebc0, 0x1ebc8,
3389 		0x1ec40, 0x1ec4c,
3390 		0x1ee84, 0x1ee8c,
3391 		0x1eec0, 0x1eec0,
3392 		0x1eee0, 0x1eee0,
3393 		0x1ef00, 0x1ef84,
3394 		0x1efc0, 0x1efc8,
3395 		0x1f040, 0x1f04c,
3396 		0x1f284, 0x1f28c,
3397 		0x1f2c0, 0x1f2c0,
3398 		0x1f2e0, 0x1f2e0,
3399 		0x1f300, 0x1f384,
3400 		0x1f3c0, 0x1f3c8,
3401 		0x1f440, 0x1f44c,
3402 		0x1f684, 0x1f68c,
3403 		0x1f6c0, 0x1f6c0,
3404 		0x1f6e0, 0x1f6e0,
3405 		0x1f700, 0x1f784,
3406 		0x1f7c0, 0x1f7c8,
3407 		0x1f840, 0x1f84c,
3408 		0x1fa84, 0x1fa8c,
3409 		0x1fac0, 0x1fac0,
3410 		0x1fae0, 0x1fae0,
3411 		0x1fb00, 0x1fb84,
3412 		0x1fbc0, 0x1fbc8,
3413 		0x1fc40, 0x1fc4c,
3414 		0x1fe84, 0x1fe8c,
3415 		0x1fec0, 0x1fec0,
3416 		0x1fee0, 0x1fee0,
3417 		0x1ff00, 0x1ff84,
3418 		0x1ffc0, 0x1ffc8,
3419 		0x20000, 0x2002c,
3420 		0x20100, 0x2013c,
3421 		0x20190, 0x201c8,
3422 		0x20200, 0x20318,
3423 		0x20400, 0x20528,
3424 		0x20540, 0x20614,
3425 		0x21000, 0x21040,
3426 		0x2104c, 0x21060,
3427 		0x210c0, 0x210ec,
3428 		0x21200, 0x21268,
3429 		0x21270, 0x21284,
3430 		0x212fc, 0x21388,
3431 		0x21400, 0x21404,
3432 		0x21500, 0x21518,
3433 		0x2152c, 0x2153c,
3434 		0x21550, 0x21554,
3435 		0x21600, 0x21600,
3436 		0x21608, 0x21628,
3437 		0x21630, 0x2163c,
3438 		0x21700, 0x2171c,
3439 		0x21780, 0x2178c,
3440 		0x21800, 0x21c38,
3441 		0x21c80, 0x21d7c,
3442 		0x21e00, 0x21e04,
3443 		0x22000, 0x2202c,
3444 		0x22100, 0x2213c,
3445 		0x22190, 0x221c8,
3446 		0x22200, 0x22318,
3447 		0x22400, 0x22528,
3448 		0x22540, 0x22614,
3449 		0x23000, 0x23040,
3450 		0x2304c, 0x23060,
3451 		0x230c0, 0x230ec,
3452 		0x23200, 0x23268,
3453 		0x23270, 0x23284,
3454 		0x232fc, 0x23388,
3455 		0x23400, 0x23404,
3456 		0x23500, 0x23518,
3457 		0x2352c, 0x2353c,
3458 		0x23550, 0x23554,
3459 		0x23600, 0x23600,
3460 		0x23608, 0x23628,
3461 		0x23630, 0x2363c,
3462 		0x23700, 0x2371c,
3463 		0x23780, 0x2378c,
3464 		0x23800, 0x23c38,
3465 		0x23c80, 0x23d7c,
3466 		0x23e00, 0x23e04,
3467 		0x24000, 0x2402c,
3468 		0x24100, 0x2413c,
3469 		0x24190, 0x241c8,
3470 		0x24200, 0x24318,
3471 		0x24400, 0x24528,
3472 		0x24540, 0x24614,
3473 		0x25000, 0x25040,
3474 		0x2504c, 0x25060,
3475 		0x250c0, 0x250ec,
3476 		0x25200, 0x25268,
3477 		0x25270, 0x25284,
3478 		0x252fc, 0x25388,
3479 		0x25400, 0x25404,
3480 		0x25500, 0x25518,
3481 		0x2552c, 0x2553c,
3482 		0x25550, 0x25554,
3483 		0x25600, 0x25600,
3484 		0x25608, 0x25628,
3485 		0x25630, 0x2563c,
3486 		0x25700, 0x2571c,
3487 		0x25780, 0x2578c,
3488 		0x25800, 0x25c38,
3489 		0x25c80, 0x25d7c,
3490 		0x25e00, 0x25e04,
3491 		0x26000, 0x2602c,
3492 		0x26100, 0x2613c,
3493 		0x26190, 0x261c8,
3494 		0x26200, 0x26318,
3495 		0x26400, 0x26528,
3496 		0x26540, 0x26614,
3497 		0x27000, 0x27040,
3498 		0x2704c, 0x27060,
3499 		0x270c0, 0x270ec,
3500 		0x27200, 0x27268,
3501 		0x27270, 0x27284,
3502 		0x272fc, 0x27388,
3503 		0x27400, 0x27404,
3504 		0x27500, 0x27518,
3505 		0x2752c, 0x2753c,
3506 		0x27550, 0x27554,
3507 		0x27600, 0x27600,
3508 		0x27608, 0x27628,
3509 		0x27630, 0x2763c,
3510 		0x27700, 0x2771c,
3511 		0x27780, 0x2778c,
3512 		0x27800, 0x27c38,
3513 		0x27c80, 0x27d7c,
3514 		0x27e00, 0x27e04
3515 	};
3516 	static const unsigned int t5_reg_ranges[] = {
3517 		0x1008, 0x1148,
3518 		0x1180, 0x11b4,
3519 		0x11fc, 0x123c,
3520 		0x1280, 0x173c,
3521 		0x1800, 0x18fc,
3522 		0x3000, 0x3028,
3523 		0x3060, 0x30d8,
3524 		0x30e0, 0x30fc,
3525 		0x3140, 0x357c,
3526 		0x35a8, 0x35cc,
3527 		0x35ec, 0x35ec,
3528 		0x3600, 0x5624,
3529 		0x56cc, 0x575c,
3530 		0x580c, 0x5814,
3531 		0x5890, 0x58bc,
3532 		0x5940, 0x59dc,
3533 		0x59fc, 0x5a18,
3534 		0x5a60, 0x5a9c,
3535 		0x5b94, 0x5bfc,
3536 		0x6000, 0x6040,
3537 		0x6058, 0x614c,
3538 		0x7700, 0x7798,
3539 		0x77c0, 0x78fc,
3540 		0x7b00, 0x7c54,
3541 		0x7d00, 0x7efc,
3542 		0x8dc0, 0x8de0,
3543 		0x8df8, 0x8e84,
3544 		0x8ea0, 0x8f84,
3545 		0x8fc0, 0x90f8,
3546 		0x9400, 0x9470,
3547 		0x9600, 0x96f4,
3548 		0x9800, 0x9808,
3549 		0x9820, 0x983c,
3550 		0x9850, 0x9864,
3551 		0x9c00, 0x9c6c,
3552 		0x9c80, 0x9cec,
3553 		0x9d00, 0x9d6c,
3554 		0x9d80, 0x9dec,
3555 		0x9e00, 0x9e6c,
3556 		0x9e80, 0x9eec,
3557 		0x9f00, 0x9f6c,
3558 		0x9f80, 0xa020,
3559 		0xd004, 0xd03c,
3560 		0xdfc0, 0xdfe0,
3561 		0xe000, 0x11088,
3562 		0x1109c, 0x1117c,
3563 		0x11190, 0x11204,
3564 		0x19040, 0x1906c,
3565 		0x19078, 0x19080,
3566 		0x1908c, 0x19124,
3567 		0x19150, 0x191b0,
3568 		0x191d0, 0x191e8,
3569 		0x19238, 0x19290,
3570 		0x193f8, 0x19474,
3571 		0x19490, 0x194cc,
3572 		0x194f0, 0x194f8,
3573 		0x19c00, 0x19c60,
3574 		0x19c94, 0x19e10,
3575 		0x19e50, 0x19f34,
3576 		0x19f40, 0x19f50,
3577 		0x19f90, 0x19fe4,
3578 		0x1a000, 0x1a06c,
3579 		0x1a0b0, 0x1a120,
3580 		0x1a128, 0x1a138,
3581 		0x1a190, 0x1a1c4,
3582 		0x1a1fc, 0x1a1fc,
3583 		0x1e008, 0x1e00c,
3584 		0x1e040, 0x1e04c,
3585 		0x1e284, 0x1e290,
3586 		0x1e2c0, 0x1e2c0,
3587 		0x1e2e0, 0x1e2e0,
3588 		0x1e300, 0x1e384,
3589 		0x1e3c0, 0x1e3c8,
3590 		0x1e408, 0x1e40c,
3591 		0x1e440, 0x1e44c,
3592 		0x1e684, 0x1e690,
3593 		0x1e6c0, 0x1e6c0,
3594 		0x1e6e0, 0x1e6e0,
3595 		0x1e700, 0x1e784,
3596 		0x1e7c0, 0x1e7c8,
3597 		0x1e808, 0x1e80c,
3598 		0x1e840, 0x1e84c,
3599 		0x1ea84, 0x1ea90,
3600 		0x1eac0, 0x1eac0,
3601 		0x1eae0, 0x1eae0,
3602 		0x1eb00, 0x1eb84,
3603 		0x1ebc0, 0x1ebc8,
3604 		0x1ec08, 0x1ec0c,
3605 		0x1ec40, 0x1ec4c,
3606 		0x1ee84, 0x1ee90,
3607 		0x1eec0, 0x1eec0,
3608 		0x1eee0, 0x1eee0,
3609 		0x1ef00, 0x1ef84,
3610 		0x1efc0, 0x1efc8,
3611 		0x1f008, 0x1f00c,
3612 		0x1f040, 0x1f04c,
3613 		0x1f284, 0x1f290,
3614 		0x1f2c0, 0x1f2c0,
3615 		0x1f2e0, 0x1f2e0,
3616 		0x1f300, 0x1f384,
3617 		0x1f3c0, 0x1f3c8,
3618 		0x1f408, 0x1f40c,
3619 		0x1f440, 0x1f44c,
3620 		0x1f684, 0x1f690,
3621 		0x1f6c0, 0x1f6c0,
3622 		0x1f6e0, 0x1f6e0,
3623 		0x1f700, 0x1f784,
3624 		0x1f7c0, 0x1f7c8,
3625 		0x1f808, 0x1f80c,
3626 		0x1f840, 0x1f84c,
3627 		0x1fa84, 0x1fa90,
3628 		0x1fac0, 0x1fac0,
3629 		0x1fae0, 0x1fae0,
3630 		0x1fb00, 0x1fb84,
3631 		0x1fbc0, 0x1fbc8,
3632 		0x1fc08, 0x1fc0c,
3633 		0x1fc40, 0x1fc4c,
3634 		0x1fe84, 0x1fe90,
3635 		0x1fec0, 0x1fec0,
3636 		0x1fee0, 0x1fee0,
3637 		0x1ff00, 0x1ff84,
3638 		0x1ffc0, 0x1ffc8,
3639 		0x30000, 0x30030,
3640 		0x30100, 0x30144,
3641 		0x30190, 0x301d0,
3642 		0x30200, 0x30318,
3643 		0x30400, 0x3052c,
3644 		0x30540, 0x3061c,
3645 		0x30800, 0x30834,
3646 		0x308c0, 0x30908,
3647 		0x30910, 0x309ac,
3648 		0x30a00, 0x30a2c,
3649 		0x30a44, 0x30a50,
3650 		0x30a74, 0x30c24,
3651 		0x30d00, 0x30d00,
3652 		0x30d08, 0x30d14,
3653 		0x30d1c, 0x30d20,
3654 		0x30d3c, 0x30d50,
3655 		0x31200, 0x3120c,
3656 		0x31220, 0x31220,
3657 		0x31240, 0x31240,
3658 		0x31600, 0x3160c,
3659 		0x31a00, 0x31a1c,
3660 		0x31e00, 0x31e20,
3661 		0x31e38, 0x31e3c,
3662 		0x31e80, 0x31e80,
3663 		0x31e88, 0x31ea8,
3664 		0x31eb0, 0x31eb4,
3665 		0x31ec8, 0x31ed4,
3666 		0x31fb8, 0x32004,
3667 		0x32200, 0x32200,
3668 		0x32208, 0x32240,
3669 		0x32248, 0x32280,
3670 		0x32288, 0x322c0,
3671 		0x322c8, 0x322fc,
3672 		0x32600, 0x32630,
3673 		0x32a00, 0x32abc,
3674 		0x32b00, 0x32b70,
3675 		0x33000, 0x33048,
3676 		0x33060, 0x3309c,
3677 		0x330f0, 0x33148,
3678 		0x33160, 0x3319c,
3679 		0x331f0, 0x332e4,
3680 		0x332f8, 0x333e4,
3681 		0x333f8, 0x33448,
3682 		0x33460, 0x3349c,
3683 		0x334f0, 0x33548,
3684 		0x33560, 0x3359c,
3685 		0x335f0, 0x336e4,
3686 		0x336f8, 0x337e4,
3687 		0x337f8, 0x337fc,
3688 		0x33814, 0x33814,
3689 		0x3382c, 0x3382c,
3690 		0x33880, 0x3388c,
3691 		0x338e8, 0x338ec,
3692 		0x33900, 0x33948,
3693 		0x33960, 0x3399c,
3694 		0x339f0, 0x33ae4,
3695 		0x33af8, 0x33b10,
3696 		0x33b28, 0x33b28,
3697 		0x33b3c, 0x33b50,
3698 		0x33bf0, 0x33c10,
3699 		0x33c28, 0x33c28,
3700 		0x33c3c, 0x33c50,
3701 		0x33cf0, 0x33cfc,
3702 		0x34000, 0x34030,
3703 		0x34100, 0x34144,
3704 		0x34190, 0x341d0,
3705 		0x34200, 0x34318,
3706 		0x34400, 0x3452c,
3707 		0x34540, 0x3461c,
3708 		0x34800, 0x34834,
3709 		0x348c0, 0x34908,
3710 		0x34910, 0x349ac,
3711 		0x34a00, 0x34a2c,
3712 		0x34a44, 0x34a50,
3713 		0x34a74, 0x34c24,
3714 		0x34d00, 0x34d00,
3715 		0x34d08, 0x34d14,
3716 		0x34d1c, 0x34d20,
3717 		0x34d3c, 0x34d50,
3718 		0x35200, 0x3520c,
3719 		0x35220, 0x35220,
3720 		0x35240, 0x35240,
3721 		0x35600, 0x3560c,
3722 		0x35a00, 0x35a1c,
3723 		0x35e00, 0x35e20,
3724 		0x35e38, 0x35e3c,
3725 		0x35e80, 0x35e80,
3726 		0x35e88, 0x35ea8,
3727 		0x35eb0, 0x35eb4,
3728 		0x35ec8, 0x35ed4,
3729 		0x35fb8, 0x36004,
3730 		0x36200, 0x36200,
3731 		0x36208, 0x36240,
3732 		0x36248, 0x36280,
3733 		0x36288, 0x362c0,
3734 		0x362c8, 0x362fc,
3735 		0x36600, 0x36630,
3736 		0x36a00, 0x36abc,
3737 		0x36b00, 0x36b70,
3738 		0x37000, 0x37048,
3739 		0x37060, 0x3709c,
3740 		0x370f0, 0x37148,
3741 		0x37160, 0x3719c,
3742 		0x371f0, 0x372e4,
3743 		0x372f8, 0x373e4,
3744 		0x373f8, 0x37448,
3745 		0x37460, 0x3749c,
3746 		0x374f0, 0x37548,
3747 		0x37560, 0x3759c,
3748 		0x375f0, 0x376e4,
3749 		0x376f8, 0x377e4,
3750 		0x377f8, 0x377fc,
3751 		0x37814, 0x37814,
3752 		0x3782c, 0x3782c,
3753 		0x37880, 0x3788c,
3754 		0x378e8, 0x378ec,
3755 		0x37900, 0x37948,
3756 		0x37960, 0x3799c,
3757 		0x379f0, 0x37ae4,
3758 		0x37af8, 0x37b10,
3759 		0x37b28, 0x37b28,
3760 		0x37b3c, 0x37b50,
3761 		0x37bf0, 0x37c10,
3762 		0x37c28, 0x37c28,
3763 		0x37c3c, 0x37c50,
3764 		0x37cf0, 0x37cfc,
3765 		0x38000, 0x38030,
3766 		0x38100, 0x38144,
3767 		0x38190, 0x381d0,
3768 		0x38200, 0x38318,
3769 		0x38400, 0x3852c,
3770 		0x38540, 0x3861c,
3771 		0x38800, 0x38834,
3772 		0x388c0, 0x38908,
3773 		0x38910, 0x389ac,
3774 		0x38a00, 0x38a2c,
3775 		0x38a44, 0x38a50,
3776 		0x38a74, 0x38c24,
3777 		0x38d00, 0x38d00,
3778 		0x38d08, 0x38d14,
3779 		0x38d1c, 0x38d20,
3780 		0x38d3c, 0x38d50,
3781 		0x39200, 0x3920c,
3782 		0x39220, 0x39220,
3783 		0x39240, 0x39240,
3784 		0x39600, 0x3960c,
3785 		0x39a00, 0x39a1c,
3786 		0x39e00, 0x39e20,
3787 		0x39e38, 0x39e3c,
3788 		0x39e80, 0x39e80,
3789 		0x39e88, 0x39ea8,
3790 		0x39eb0, 0x39eb4,
3791 		0x39ec8, 0x39ed4,
3792 		0x39fb8, 0x3a004,
3793 		0x3a200, 0x3a200,
3794 		0x3a208, 0x3a240,
3795 		0x3a248, 0x3a280,
3796 		0x3a288, 0x3a2c0,
3797 		0x3a2c8, 0x3a2fc,
3798 		0x3a600, 0x3a630,
3799 		0x3aa00, 0x3aabc,
3800 		0x3ab00, 0x3ab70,
3801 		0x3b000, 0x3b048,
3802 		0x3b060, 0x3b09c,
3803 		0x3b0f0, 0x3b148,
3804 		0x3b160, 0x3b19c,
3805 		0x3b1f0, 0x3b2e4,
3806 		0x3b2f8, 0x3b3e4,
3807 		0x3b3f8, 0x3b448,
3808 		0x3b460, 0x3b49c,
3809 		0x3b4f0, 0x3b548,
3810 		0x3b560, 0x3b59c,
3811 		0x3b5f0, 0x3b6e4,
3812 		0x3b6f8, 0x3b7e4,
3813 		0x3b7f8, 0x3b7fc,
3814 		0x3b814, 0x3b814,
3815 		0x3b82c, 0x3b82c,
3816 		0x3b880, 0x3b88c,
3817 		0x3b8e8, 0x3b8ec,
3818 		0x3b900, 0x3b948,
3819 		0x3b960, 0x3b99c,
3820 		0x3b9f0, 0x3bae4,
3821 		0x3baf8, 0x3bb10,
3822 		0x3bb28, 0x3bb28,
3823 		0x3bb3c, 0x3bb50,
3824 		0x3bbf0, 0x3bc10,
3825 		0x3bc28, 0x3bc28,
3826 		0x3bc3c, 0x3bc50,
3827 		0x3bcf0, 0x3bcfc,
3828 		0x3c000, 0x3c030,
3829 		0x3c100, 0x3c144,
3830 		0x3c190, 0x3c1d0,
3831 		0x3c200, 0x3c318,
3832 		0x3c400, 0x3c52c,
3833 		0x3c540, 0x3c61c,
3834 		0x3c800, 0x3c834,
3835 		0x3c8c0, 0x3c908,
3836 		0x3c910, 0x3c9ac,
3837 		0x3ca00, 0x3ca2c,
3838 		0x3ca44, 0x3ca50,
3839 		0x3ca74, 0x3cc24,
3840 		0x3cd00, 0x3cd00,
3841 		0x3cd08, 0x3cd14,
3842 		0x3cd1c, 0x3cd20,
3843 		0x3cd3c, 0x3cd50,
3844 		0x3d200, 0x3d20c,
3845 		0x3d220, 0x3d220,
3846 		0x3d240, 0x3d240,
3847 		0x3d600, 0x3d60c,
3848 		0x3da00, 0x3da1c,
3849 		0x3de00, 0x3de20,
3850 		0x3de38, 0x3de3c,
3851 		0x3de80, 0x3de80,
3852 		0x3de88, 0x3dea8,
3853 		0x3deb0, 0x3deb4,
3854 		0x3dec8, 0x3ded4,
3855 		0x3dfb8, 0x3e004,
3856 		0x3e200, 0x3e200,
3857 		0x3e208, 0x3e240,
3858 		0x3e248, 0x3e280,
3859 		0x3e288, 0x3e2c0,
3860 		0x3e2c8, 0x3e2fc,
3861 		0x3e600, 0x3e630,
3862 		0x3ea00, 0x3eabc,
3863 		0x3eb00, 0x3eb70,
3864 		0x3f000, 0x3f048,
3865 		0x3f060, 0x3f09c,
3866 		0x3f0f0, 0x3f148,
3867 		0x3f160, 0x3f19c,
3868 		0x3f1f0, 0x3f2e4,
3869 		0x3f2f8, 0x3f3e4,
3870 		0x3f3f8, 0x3f448,
3871 		0x3f460, 0x3f49c,
3872 		0x3f4f0, 0x3f548,
3873 		0x3f560, 0x3f59c,
3874 		0x3f5f0, 0x3f6e4,
3875 		0x3f6f8, 0x3f7e4,
3876 		0x3f7f8, 0x3f7fc,
3877 		0x3f814, 0x3f814,
3878 		0x3f82c, 0x3f82c,
3879 		0x3f880, 0x3f88c,
3880 		0x3f8e8, 0x3f8ec,
3881 		0x3f900, 0x3f948,
3882 		0x3f960, 0x3f99c,
3883 		0x3f9f0, 0x3fae4,
3884 		0x3faf8, 0x3fb10,
3885 		0x3fb28, 0x3fb28,
3886 		0x3fb3c, 0x3fb50,
3887 		0x3fbf0, 0x3fc10,
3888 		0x3fc28, 0x3fc28,
3889 		0x3fc3c, 0x3fc50,
3890 		0x3fcf0, 0x3fcfc,
3891 		0x40000, 0x4000c,
3892 		0x40040, 0x40068,
3893 		0x4007c, 0x40144,
3894 		0x40180, 0x4018c,
3895 		0x40200, 0x40298,
3896 		0x402ac, 0x4033c,
3897 		0x403f8, 0x403fc,
3898 		0x41304, 0x413c4,
3899 		0x41400, 0x4141c,
3900 		0x41480, 0x414d0,
3901 		0x44000, 0x44078,
3902 		0x440c0, 0x44278,
3903 		0x442c0, 0x44478,
3904 		0x444c0, 0x44678,
3905 		0x446c0, 0x44878,
3906 		0x448c0, 0x449fc,
3907 		0x45000, 0x45068,
3908 		0x45080, 0x45084,
3909 		0x450a0, 0x450b0,
3910 		0x45200, 0x45268,
3911 		0x45280, 0x45284,
3912 		0x452a0, 0x452b0,
3913 		0x460c0, 0x460e4,
3914 		0x47000, 0x4708c,
3915 		0x47200, 0x47250,
3916 		0x47400, 0x47420,
3917 		0x47600, 0x47618,
3918 		0x47800, 0x47814,
3919 		0x48000, 0x4800c,
3920 		0x48040, 0x48068,
3921 		0x4807c, 0x48144,
3922 		0x48180, 0x4818c,
3923 		0x48200, 0x48298,
3924 		0x482ac, 0x4833c,
3925 		0x483f8, 0x483fc,
3926 		0x49304, 0x493c4,
3927 		0x49400, 0x4941c,
3928 		0x49480, 0x494d0,
3929 		0x4c000, 0x4c078,
3930 		0x4c0c0, 0x4c278,
3931 		0x4c2c0, 0x4c478,
3932 		0x4c4c0, 0x4c678,
3933 		0x4c6c0, 0x4c878,
3934 		0x4c8c0, 0x4c9fc,
3935 		0x4d000, 0x4d068,
3936 		0x4d080, 0x4d084,
3937 		0x4d0a0, 0x4d0b0,
3938 		0x4d200, 0x4d268,
3939 		0x4d280, 0x4d284,
3940 		0x4d2a0, 0x4d2b0,
3941 		0x4e0c0, 0x4e0e4,
3942 		0x4f000, 0x4f08c,
3943 		0x4f200, 0x4f250,
3944 		0x4f400, 0x4f420,
3945 		0x4f600, 0x4f618,
3946 		0x4f800, 0x4f814,
3947 		0x50000, 0x500cc,
3948 		0x50400, 0x50400,
3949 		0x50800, 0x508cc,
3950 		0x50c00, 0x50c00,
3951 		0x51000, 0x5101c,
3952 		0x51300, 0x51308,
3953 	};
3954 
3955 	if (is_t4(sc)) {
3956 		reg_ranges = &t4_reg_ranges[0];
3957 		n = nitems(t4_reg_ranges);
3958 	} else {
3959 		reg_ranges = &t5_reg_ranges[0];
3960 		n = nitems(t5_reg_ranges);
3961 	}
3962 
3963 	regs->version = chip_id(sc) | chip_rev(sc) << 10;
3964 	for (i = 0; i < n; i += 2)
3965 		reg_block_dump(sc, buf, reg_ranges[i], reg_ranges[i + 1]);
3966 }
3967 
3968 static void
3969 cxgbe_tick(void *arg)
3970 {
3971 	struct port_info *pi = arg;
3972 	struct ifnet *ifp = pi->ifp;
3973 	struct sge_txq *txq;
3974 	int i, drops;
3975 	struct port_stats *s = &pi->stats;
3976 
3977 	PORT_LOCK(pi);
3978 	if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) {
3979 		PORT_UNLOCK(pi);
3980 		return;	/* without scheduling another callout */
3981 	}
3982 
3983 	t4_get_port_stats(pi->adapter, pi->tx_chan, s);
3984 
3985 	ifp->if_opackets = s->tx_frames - s->tx_pause;
3986 	ifp->if_ipackets = s->rx_frames - s->rx_pause;
3987 	ifp->if_obytes = s->tx_octets - s->tx_pause * 64;
3988 	ifp->if_ibytes = s->rx_octets - s->rx_pause * 64;
3989 	ifp->if_omcasts = s->tx_mcast_frames - s->tx_pause;
3990 	ifp->if_imcasts = s->rx_mcast_frames - s->rx_pause;
3991 	ifp->if_iqdrops = s->rx_ovflow0 + s->rx_ovflow1 + s->rx_ovflow2 +
3992 	    s->rx_ovflow3 + s->rx_trunc0 + s->rx_trunc1 + s->rx_trunc2 +
3993 	    s->rx_trunc3;
3994 
3995 	drops = s->tx_drop;
3996 	for_each_txq(pi, i, txq)
3997 		drops += txq->br->br_drops;
3998 	ifp->if_snd.ifq_drops = drops;
3999 
4000 	ifp->if_oerrors = s->tx_error_frames;
4001 	ifp->if_ierrors = s->rx_jabber + s->rx_runt + s->rx_too_long +
4002 	    s->rx_fcs_err + s->rx_len_err;
4003 
4004 	callout_schedule(&pi->tick, hz);
4005 	PORT_UNLOCK(pi);
4006 }
4007 
4008 static void
4009 cxgbe_vlan_config(void *arg, struct ifnet *ifp, uint16_t vid)
4010 {
4011 	struct ifnet *vlan;
4012 
4013 	if (arg != ifp || ifp->if_type != IFT_ETHER)
4014 		return;
4015 
4016 	vlan = VLAN_DEVAT(ifp, vid);
4017 	VLAN_SETCOOKIE(vlan, ifp);
4018 }
4019 
4020 static int
4021 cpl_not_handled(struct sge_iq *iq, const struct rss_header *rss, struct mbuf *m)
4022 {
4023 
4024 #ifdef INVARIANTS
4025 	panic("%s: opcode 0x%02x on iq %p with payload %p",
4026 	    __func__, rss->opcode, iq, m);
4027 #else
4028 	log(LOG_ERR, "%s: opcode 0x%02x on iq %p with payload %p\n",
4029 	    __func__, rss->opcode, iq, m);
4030 	m_freem(m);
4031 #endif
4032 	return (EDOOFUS);
4033 }
4034 
4035 int
4036 t4_register_cpl_handler(struct adapter *sc, int opcode, cpl_handler_t h)
4037 {
4038 	uintptr_t *loc, new;
4039 
4040 	if (opcode >= nitems(sc->cpl_handler))
4041 		return (EINVAL);
4042 
4043 	new = h ? (uintptr_t)h : (uintptr_t)cpl_not_handled;
4044 	loc = (uintptr_t *) &sc->cpl_handler[opcode];
4045 	atomic_store_rel_ptr(loc, new);
4046 
4047 	return (0);
4048 }
4049 
4050 static int
4051 an_not_handled(struct sge_iq *iq, const struct rsp_ctrl *ctrl)
4052 {
4053 
4054 #ifdef INVARIANTS
4055 	panic("%s: async notification on iq %p (ctrl %p)", __func__, iq, ctrl);
4056 #else
4057 	log(LOG_ERR, "%s: async notification on iq %p (ctrl %p)\n",
4058 	    __func__, iq, ctrl);
4059 #endif
4060 	return (EDOOFUS);
4061 }
4062 
4063 int
4064 t4_register_an_handler(struct adapter *sc, an_handler_t h)
4065 {
4066 	uintptr_t *loc, new;
4067 
4068 	new = h ? (uintptr_t)h : (uintptr_t)an_not_handled;
4069 	loc = (uintptr_t *) &sc->an_handler;
4070 	atomic_store_rel_ptr(loc, new);
4071 
4072 	return (0);
4073 }
4074 
4075 static int
4076 fw_msg_not_handled(struct adapter *sc, const __be64 *rpl)
4077 {
4078 	const struct cpl_fw6_msg *cpl =
4079 	    __containerof(rpl, struct cpl_fw6_msg, data[0]);
4080 
4081 #ifdef INVARIANTS
4082 	panic("%s: fw_msg type %d", __func__, cpl->type);
4083 #else
4084 	log(LOG_ERR, "%s: fw_msg type %d\n", __func__, cpl->type);
4085 #endif
4086 	return (EDOOFUS);
4087 }
4088 
4089 int
4090 t4_register_fw_msg_handler(struct adapter *sc, int type, fw_msg_handler_t h)
4091 {
4092 	uintptr_t *loc, new;
4093 
4094 	if (type >= nitems(sc->fw_msg_handler))
4095 		return (EINVAL);
4096 
4097 	/*
4098 	 * These are dispatched by the handler for FW{4|6}_CPL_MSG using the CPL
4099 	 * handler dispatch table.  Reject any attempt to install a handler for
4100 	 * this subtype.
4101 	 */
4102 	if (type == FW_TYPE_RSSCPL || type == FW6_TYPE_RSSCPL)
4103 		return (EINVAL);
4104 
4105 	new = h ? (uintptr_t)h : (uintptr_t)fw_msg_not_handled;
4106 	loc = (uintptr_t *) &sc->fw_msg_handler[type];
4107 	atomic_store_rel_ptr(loc, new);
4108 
4109 	return (0);
4110 }
4111 
4112 static int
4113 t4_sysctls(struct adapter *sc)
4114 {
4115 	struct sysctl_ctx_list *ctx;
4116 	struct sysctl_oid *oid;
4117 	struct sysctl_oid_list *children, *c0;
4118 	static char *caps[] = {
4119 		"\20\1PPP\2QFC\3DCBX",			/* caps[0] linkcaps */
4120 		"\20\1NIC\2VM\3IDS\4UM\5UM_ISGL",	/* caps[1] niccaps */
4121 		"\20\1TOE",				/* caps[2] toecaps */
4122 		"\20\1RDDP\2RDMAC",			/* caps[3] rdmacaps */
4123 		"\20\1INITIATOR_PDU\2TARGET_PDU"	/* caps[4] iscsicaps */
4124 		    "\3INITIATOR_CNXOFLD\4TARGET_CNXOFLD"
4125 		    "\5INITIATOR_SSNOFLD\6TARGET_SSNOFLD",
4126 		"\20\1INITIATOR\2TARGET\3CTRL_OFLD"	/* caps[5] fcoecaps */
4127 	};
4128 	static char *doorbells = {"\20\1UDB\2WCWR\3UDBWC\4KDB"};
4129 
4130 	ctx = device_get_sysctl_ctx(sc->dev);
4131 
4132 	/*
4133 	 * dev.t4nex.X.
4134 	 */
4135 	oid = device_get_sysctl_tree(sc->dev);
4136 	c0 = children = SYSCTL_CHILDREN(oid);
4137 
4138 	SYSCTL_ADD_INT(ctx, children, OID_AUTO, "nports", CTLFLAG_RD, NULL,
4139 	    sc->params.nports, "# of ports");
4140 
4141 	SYSCTL_ADD_INT(ctx, children, OID_AUTO, "hw_revision", CTLFLAG_RD,
4142 	    NULL, chip_rev(sc), "chip hardware revision");
4143 
4144 	SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "firmware_version",
4145 	    CTLFLAG_RD, &sc->fw_version, 0, "firmware version");
4146 
4147 	SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "cf",
4148 	    CTLFLAG_RD, &sc->cfg_file, 0, "configuration file");
4149 
4150 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "cfcsum", CTLFLAG_RD, NULL,
4151 	    sc->cfcsum, "config file checksum");
4152 
4153 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "doorbells",
4154 	    CTLTYPE_STRING | CTLFLAG_RD, doorbells, sc->doorbells,
4155 	    sysctl_bitfield, "A", "available doorbells");
4156 
4157 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "linkcaps",
4158 	    CTLTYPE_STRING | CTLFLAG_RD, caps[0], sc->linkcaps,
4159 	    sysctl_bitfield, "A", "available link capabilities");
4160 
4161 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "niccaps",
4162 	    CTLTYPE_STRING | CTLFLAG_RD, caps[1], sc->niccaps,
4163 	    sysctl_bitfield, "A", "available NIC capabilities");
4164 
4165 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "toecaps",
4166 	    CTLTYPE_STRING | CTLFLAG_RD, caps[2], sc->toecaps,
4167 	    sysctl_bitfield, "A", "available TCP offload capabilities");
4168 
4169 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "rdmacaps",
4170 	    CTLTYPE_STRING | CTLFLAG_RD, caps[3], sc->rdmacaps,
4171 	    sysctl_bitfield, "A", "available RDMA capabilities");
4172 
4173 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "iscsicaps",
4174 	    CTLTYPE_STRING | CTLFLAG_RD, caps[4], sc->iscsicaps,
4175 	    sysctl_bitfield, "A", "available iSCSI capabilities");
4176 
4177 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "fcoecaps",
4178 	    CTLTYPE_STRING | CTLFLAG_RD, caps[5], sc->fcoecaps,
4179 	    sysctl_bitfield, "A", "available FCoE capabilities");
4180 
4181 	SYSCTL_ADD_INT(ctx, children, OID_AUTO, "core_clock", CTLFLAG_RD, NULL,
4182 	    sc->params.vpd.cclk, "core clock frequency (in KHz)");
4183 
4184 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "holdoff_timers",
4185 	    CTLTYPE_STRING | CTLFLAG_RD, sc->sge.timer_val,
4186 	    sizeof(sc->sge.timer_val), sysctl_int_array, "A",
4187 	    "interrupt holdoff timer values (us)");
4188 
4189 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "holdoff_pkt_counts",
4190 	    CTLTYPE_STRING | CTLFLAG_RD, sc->sge.counter_val,
4191 	    sizeof(sc->sge.counter_val), sysctl_int_array, "A",
4192 	    "interrupt holdoff packet counter values");
4193 
4194 	SYSCTL_ADD_INT(ctx, children, OID_AUTO, "nfilters", CTLFLAG_RD,
4195 	    NULL, sc->tids.nftids, "number of filters");
4196 
4197 #ifdef SBUF_DRAIN
4198 	/*
4199 	 * dev.t4nex.X.misc.  Marked CTLFLAG_SKIP to avoid information overload.
4200 	 */
4201 	oid = SYSCTL_ADD_NODE(ctx, c0, OID_AUTO, "misc",
4202 	    CTLFLAG_RD | CTLFLAG_SKIP, NULL,
4203 	    "logs and miscellaneous information");
4204 	children = SYSCTL_CHILDREN(oid);
4205 
4206 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cctrl",
4207 	    CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
4208 	    sysctl_cctrl, "A", "congestion control");
4209 
4210 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_ibq_tp0",
4211 	    CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
4212 	    sysctl_cim_ibq_obq, "A", "CIM IBQ 0 (TP0)");
4213 
4214 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_ibq_tp1",
4215 	    CTLTYPE_STRING | CTLFLAG_RD, sc, 1,
4216 	    sysctl_cim_ibq_obq, "A", "CIM IBQ 1 (TP1)");
4217 
4218 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_ibq_ulp",
4219 	    CTLTYPE_STRING | CTLFLAG_RD, sc, 2,
4220 	    sysctl_cim_ibq_obq, "A", "CIM IBQ 2 (ULP)");
4221 
4222 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_ibq_sge0",
4223 	    CTLTYPE_STRING | CTLFLAG_RD, sc, 3,
4224 	    sysctl_cim_ibq_obq, "A", "CIM IBQ 3 (SGE0)");
4225 
4226 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_ibq_sge1",
4227 	    CTLTYPE_STRING | CTLFLAG_RD, sc, 4,
4228 	    sysctl_cim_ibq_obq, "A", "CIM IBQ 4 (SGE1)");
4229 
4230 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_ibq_ncsi",
4231 	    CTLTYPE_STRING | CTLFLAG_RD, sc, 5,
4232 	    sysctl_cim_ibq_obq, "A", "CIM IBQ 5 (NCSI)");
4233 
4234 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_la",
4235 	    CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
4236 	    sysctl_cim_la, "A", "CIM logic analyzer");
4237 
4238 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_ma_la",
4239 	    CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
4240 	    sysctl_cim_ma_la, "A", "CIM MA logic analyzer");
4241 
4242 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_obq_ulp0",
4243 	    CTLTYPE_STRING | CTLFLAG_RD, sc, 0 + CIM_NUM_IBQ,
4244 	    sysctl_cim_ibq_obq, "A", "CIM OBQ 0 (ULP0)");
4245 
4246 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_obq_ulp1",
4247 	    CTLTYPE_STRING | CTLFLAG_RD, sc, 1 + CIM_NUM_IBQ,
4248 	    sysctl_cim_ibq_obq, "A", "CIM OBQ 1 (ULP1)");
4249 
4250 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_obq_ulp2",
4251 	    CTLTYPE_STRING | CTLFLAG_RD, sc, 2 + CIM_NUM_IBQ,
4252 	    sysctl_cim_ibq_obq, "A", "CIM OBQ 2 (ULP2)");
4253 
4254 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_obq_ulp3",
4255 	    CTLTYPE_STRING | CTLFLAG_RD, sc, 3 + CIM_NUM_IBQ,
4256 	    sysctl_cim_ibq_obq, "A", "CIM OBQ 3 (ULP3)");
4257 
4258 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_obq_sge",
4259 	    CTLTYPE_STRING | CTLFLAG_RD, sc, 4 + CIM_NUM_IBQ,
4260 	    sysctl_cim_ibq_obq, "A", "CIM OBQ 4 (SGE)");
4261 
4262 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_obq_ncsi",
4263 	    CTLTYPE_STRING | CTLFLAG_RD, sc, 5 + CIM_NUM_IBQ,
4264 	    sysctl_cim_ibq_obq, "A", "CIM OBQ 5 (NCSI)");
4265 
4266 	if (is_t5(sc)) {
4267 		SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_obq_sge0_rx",
4268 		    CTLTYPE_STRING | CTLFLAG_RD, sc, 6 + CIM_NUM_IBQ,
4269 		    sysctl_cim_ibq_obq, "A", "CIM OBQ 6 (SGE0-RX)");
4270 
4271 		SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_obq_sge1_rx",
4272 		    CTLTYPE_STRING | CTLFLAG_RD, sc, 7 + CIM_NUM_IBQ,
4273 		    sysctl_cim_ibq_obq, "A", "CIM OBQ 7 (SGE1-RX)");
4274 	}
4275 
4276 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_pif_la",
4277 	    CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
4278 	    sysctl_cim_pif_la, "A", "CIM PIF logic analyzer");
4279 
4280 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_qcfg",
4281 	    CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
4282 	    sysctl_cim_qcfg, "A", "CIM queue configuration");
4283 
4284 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cpl_stats",
4285 	    CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
4286 	    sysctl_cpl_stats, "A", "CPL statistics");
4287 
4288 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "ddp_stats",
4289 	    CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
4290 	    sysctl_ddp_stats, "A", "DDP statistics");
4291 
4292 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "devlog",
4293 	    CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
4294 	    sysctl_devlog, "A", "firmware's device log");
4295 
4296 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "fcoe_stats",
4297 	    CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
4298 	    sysctl_fcoe_stats, "A", "FCoE statistics");
4299 
4300 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "hw_sched",
4301 	    CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
4302 	    sysctl_hw_sched, "A", "hardware scheduler ");
4303 
4304 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "l2t",
4305 	    CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
4306 	    sysctl_l2t, "A", "hardware L2 table");
4307 
4308 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "lb_stats",
4309 	    CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
4310 	    sysctl_lb_stats, "A", "loopback statistics");
4311 
4312 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "meminfo",
4313 	    CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
4314 	    sysctl_meminfo, "A", "memory regions");
4315 
4316 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "mps_tcam",
4317 	    CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
4318 	    sysctl_mps_tcam, "A", "MPS TCAM entries");
4319 
4320 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "path_mtus",
4321 	    CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
4322 	    sysctl_path_mtus, "A", "path MTUs");
4323 
4324 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "pm_stats",
4325 	    CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
4326 	    sysctl_pm_stats, "A", "PM statistics");
4327 
4328 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "rdma_stats",
4329 	    CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
4330 	    sysctl_rdma_stats, "A", "RDMA statistics");
4331 
4332 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "tcp_stats",
4333 	    CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
4334 	    sysctl_tcp_stats, "A", "TCP statistics");
4335 
4336 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "tids",
4337 	    CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
4338 	    sysctl_tids, "A", "TID information");
4339 
4340 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "tp_err_stats",
4341 	    CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
4342 	    sysctl_tp_err_stats, "A", "TP error statistics");
4343 
4344 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "tp_la",
4345 	    CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
4346 	    sysctl_tp_la, "A", "TP logic analyzer");
4347 
4348 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "tx_rate",
4349 	    CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
4350 	    sysctl_tx_rate, "A", "Tx rate");
4351 
4352 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "ulprx_la",
4353 	    CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
4354 	    sysctl_ulprx_la, "A", "ULPRX logic analyzer");
4355 
4356 	if (is_t5(sc)) {
4357 		SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "wcwr_stats",
4358 		    CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
4359 		    sysctl_wcwr_stats, "A", "write combined work requests");
4360 	}
4361 #endif
4362 
4363 #ifdef TCP_OFFLOAD
4364 	if (is_offload(sc)) {
4365 		/*
4366 		 * dev.t4nex.X.toe.
4367 		 */
4368 		oid = SYSCTL_ADD_NODE(ctx, c0, OID_AUTO, "toe", CTLFLAG_RD,
4369 		    NULL, "TOE parameters");
4370 		children = SYSCTL_CHILDREN(oid);
4371 
4372 		sc->tt.sndbuf = 256 * 1024;
4373 		SYSCTL_ADD_INT(ctx, children, OID_AUTO, "sndbuf", CTLFLAG_RW,
4374 		    &sc->tt.sndbuf, 0, "max hardware send buffer size");
4375 
4376 		sc->tt.ddp = 0;
4377 		SYSCTL_ADD_INT(ctx, children, OID_AUTO, "ddp", CTLFLAG_RW,
4378 		    &sc->tt.ddp, 0, "DDP allowed");
4379 
4380 		sc->tt.indsz = G_INDICATESIZE(t4_read_reg(sc, A_TP_PARA_REG5));
4381 		SYSCTL_ADD_INT(ctx, children, OID_AUTO, "indsz", CTLFLAG_RW,
4382 		    &sc->tt.indsz, 0, "DDP max indicate size allowed");
4383 
4384 		sc->tt.ddp_thres =
4385 		    G_RXCOALESCESIZE(t4_read_reg(sc, A_TP_PARA_REG2));
4386 		SYSCTL_ADD_INT(ctx, children, OID_AUTO, "ddp_thres", CTLFLAG_RW,
4387 		    &sc->tt.ddp_thres, 0, "DDP threshold");
4388 
4389 		sc->tt.rx_coalesce = 1;
4390 		SYSCTL_ADD_INT(ctx, children, OID_AUTO, "rx_coalesce",
4391 		    CTLFLAG_RW, &sc->tt.rx_coalesce, 0, "receive coalescing");
4392 	}
4393 #endif
4394 
4395 
4396 	return (0);
4397 }
4398 
4399 static int
4400 cxgbe_sysctls(struct port_info *pi)
4401 {
4402 	struct sysctl_ctx_list *ctx;
4403 	struct sysctl_oid *oid;
4404 	struct sysctl_oid_list *children;
4405 
4406 	ctx = device_get_sysctl_ctx(pi->dev);
4407 
4408 	/*
4409 	 * dev.cxgbe.X.
4410 	 */
4411 	oid = device_get_sysctl_tree(pi->dev);
4412 	children = SYSCTL_CHILDREN(oid);
4413 
4414 	SYSCTL_ADD_INT(ctx, children, OID_AUTO, "linkdnrc", CTLFLAG_RD,
4415 	    &pi->linkdnrc, 0, "reason why link is down");
4416 	if (pi->port_type == FW_PORT_TYPE_BT_XAUI) {
4417 		SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "temperature",
4418 		    CTLTYPE_INT | CTLFLAG_RD, pi, 0, sysctl_btphy, "I",
4419 		    "PHY temperature (in Celsius)");
4420 		SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "fw_version",
4421 		    CTLTYPE_INT | CTLFLAG_RD, pi, 1, sysctl_btphy, "I",
4422 		    "PHY firmware version");
4423 	}
4424 	SYSCTL_ADD_INT(ctx, children, OID_AUTO, "nrxq", CTLFLAG_RD,
4425 	    &pi->nrxq, 0, "# of rx queues");
4426 	SYSCTL_ADD_INT(ctx, children, OID_AUTO, "ntxq", CTLFLAG_RD,
4427 	    &pi->ntxq, 0, "# of tx queues");
4428 	SYSCTL_ADD_INT(ctx, children, OID_AUTO, "first_rxq", CTLFLAG_RD,
4429 	    &pi->first_rxq, 0, "index of first rx queue");
4430 	SYSCTL_ADD_INT(ctx, children, OID_AUTO, "first_txq", CTLFLAG_RD,
4431 	    &pi->first_txq, 0, "index of first tx queue");
4432 
4433 #ifdef TCP_OFFLOAD
4434 	if (is_offload(pi->adapter)) {
4435 		SYSCTL_ADD_INT(ctx, children, OID_AUTO, "nofldrxq", CTLFLAG_RD,
4436 		    &pi->nofldrxq, 0,
4437 		    "# of rx queues for offloaded TCP connections");
4438 		SYSCTL_ADD_INT(ctx, children, OID_AUTO, "nofldtxq", CTLFLAG_RD,
4439 		    &pi->nofldtxq, 0,
4440 		    "# of tx queues for offloaded TCP connections");
4441 		SYSCTL_ADD_INT(ctx, children, OID_AUTO, "first_ofld_rxq",
4442 		    CTLFLAG_RD, &pi->first_ofld_rxq, 0,
4443 		    "index of first TOE rx queue");
4444 		SYSCTL_ADD_INT(ctx, children, OID_AUTO, "first_ofld_txq",
4445 		    CTLFLAG_RD, &pi->first_ofld_txq, 0,
4446 		    "index of first TOE tx queue");
4447 	}
4448 #endif
4449 
4450 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "holdoff_tmr_idx",
4451 	    CTLTYPE_INT | CTLFLAG_RW, pi, 0, sysctl_holdoff_tmr_idx, "I",
4452 	    "holdoff timer index");
4453 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "holdoff_pktc_idx",
4454 	    CTLTYPE_INT | CTLFLAG_RW, pi, 0, sysctl_holdoff_pktc_idx, "I",
4455 	    "holdoff packet counter index");
4456 
4457 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "qsize_rxq",
4458 	    CTLTYPE_INT | CTLFLAG_RW, pi, 0, sysctl_qsize_rxq, "I",
4459 	    "rx queue size");
4460 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "qsize_txq",
4461 	    CTLTYPE_INT | CTLFLAG_RW, pi, 0, sysctl_qsize_txq, "I",
4462 	    "tx queue size");
4463 
4464 	/*
4465 	 * dev.cxgbe.X.stats.
4466 	 */
4467 	oid = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, "stats", CTLFLAG_RD,
4468 	    NULL, "port statistics");
4469 	children = SYSCTL_CHILDREN(oid);
4470 
4471 #define SYSCTL_ADD_T4_REG64(pi, name, desc, reg) \
4472 	SYSCTL_ADD_OID(ctx, children, OID_AUTO, name, \
4473 	    CTLTYPE_U64 | CTLFLAG_RD, pi->adapter, reg, \
4474 	    sysctl_handle_t4_reg64, "QU", desc)
4475 
4476 	SYSCTL_ADD_T4_REG64(pi, "tx_octets", "# of octets in good frames",
4477 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_BYTES_L));
4478 	SYSCTL_ADD_T4_REG64(pi, "tx_frames", "total # of good frames",
4479 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_FRAMES_L));
4480 	SYSCTL_ADD_T4_REG64(pi, "tx_bcast_frames", "# of broadcast frames",
4481 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_BCAST_L));
4482 	SYSCTL_ADD_T4_REG64(pi, "tx_mcast_frames", "# of multicast frames",
4483 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_MCAST_L));
4484 	SYSCTL_ADD_T4_REG64(pi, "tx_ucast_frames", "# of unicast frames",
4485 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_UCAST_L));
4486 	SYSCTL_ADD_T4_REG64(pi, "tx_error_frames", "# of error frames",
4487 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_ERROR_L));
4488 	SYSCTL_ADD_T4_REG64(pi, "tx_frames_64",
4489 	    "# of tx frames in this range",
4490 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_64B_L));
4491 	SYSCTL_ADD_T4_REG64(pi, "tx_frames_65_127",
4492 	    "# of tx frames in this range",
4493 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_65B_127B_L));
4494 	SYSCTL_ADD_T4_REG64(pi, "tx_frames_128_255",
4495 	    "# of tx frames in this range",
4496 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_128B_255B_L));
4497 	SYSCTL_ADD_T4_REG64(pi, "tx_frames_256_511",
4498 	    "# of tx frames in this range",
4499 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_256B_511B_L));
4500 	SYSCTL_ADD_T4_REG64(pi, "tx_frames_512_1023",
4501 	    "# of tx frames in this range",
4502 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_512B_1023B_L));
4503 	SYSCTL_ADD_T4_REG64(pi, "tx_frames_1024_1518",
4504 	    "# of tx frames in this range",
4505 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_1024B_1518B_L));
4506 	SYSCTL_ADD_T4_REG64(pi, "tx_frames_1519_max",
4507 	    "# of tx frames in this range",
4508 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_1519B_MAX_L));
4509 	SYSCTL_ADD_T4_REG64(pi, "tx_drop", "# of dropped tx frames",
4510 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_DROP_L));
4511 	SYSCTL_ADD_T4_REG64(pi, "tx_pause", "# of pause frames transmitted",
4512 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_PAUSE_L));
4513 	SYSCTL_ADD_T4_REG64(pi, "tx_ppp0", "# of PPP prio 0 frames transmitted",
4514 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_PPP0_L));
4515 	SYSCTL_ADD_T4_REG64(pi, "tx_ppp1", "# of PPP prio 1 frames transmitted",
4516 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_PPP1_L));
4517 	SYSCTL_ADD_T4_REG64(pi, "tx_ppp2", "# of PPP prio 2 frames transmitted",
4518 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_PPP2_L));
4519 	SYSCTL_ADD_T4_REG64(pi, "tx_ppp3", "# of PPP prio 3 frames transmitted",
4520 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_PPP3_L));
4521 	SYSCTL_ADD_T4_REG64(pi, "tx_ppp4", "# of PPP prio 4 frames transmitted",
4522 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_PPP4_L));
4523 	SYSCTL_ADD_T4_REG64(pi, "tx_ppp5", "# of PPP prio 5 frames transmitted",
4524 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_PPP5_L));
4525 	SYSCTL_ADD_T4_REG64(pi, "tx_ppp6", "# of PPP prio 6 frames transmitted",
4526 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_PPP6_L));
4527 	SYSCTL_ADD_T4_REG64(pi, "tx_ppp7", "# of PPP prio 7 frames transmitted",
4528 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_PPP7_L));
4529 
4530 	SYSCTL_ADD_T4_REG64(pi, "rx_octets", "# of octets in good frames",
4531 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_BYTES_L));
4532 	SYSCTL_ADD_T4_REG64(pi, "rx_frames", "total # of good frames",
4533 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_FRAMES_L));
4534 	SYSCTL_ADD_T4_REG64(pi, "rx_bcast_frames", "# of broadcast frames",
4535 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_BCAST_L));
4536 	SYSCTL_ADD_T4_REG64(pi, "rx_mcast_frames", "# of multicast frames",
4537 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_MCAST_L));
4538 	SYSCTL_ADD_T4_REG64(pi, "rx_ucast_frames", "# of unicast frames",
4539 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_UCAST_L));
4540 	SYSCTL_ADD_T4_REG64(pi, "rx_too_long", "# of frames exceeding MTU",
4541 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_MTU_ERROR_L));
4542 	SYSCTL_ADD_T4_REG64(pi, "rx_jabber", "# of jabber frames",
4543 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_MTU_CRC_ERROR_L));
4544 	SYSCTL_ADD_T4_REG64(pi, "rx_fcs_err",
4545 	    "# of frames received with bad FCS",
4546 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_CRC_ERROR_L));
4547 	SYSCTL_ADD_T4_REG64(pi, "rx_len_err",
4548 	    "# of frames received with length error",
4549 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_LEN_ERROR_L));
4550 	SYSCTL_ADD_T4_REG64(pi, "rx_symbol_err", "symbol errors",
4551 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_SYM_ERROR_L));
4552 	SYSCTL_ADD_T4_REG64(pi, "rx_runt", "# of short frames received",
4553 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_LESS_64B_L));
4554 	SYSCTL_ADD_T4_REG64(pi, "rx_frames_64",
4555 	    "# of rx frames in this range",
4556 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_64B_L));
4557 	SYSCTL_ADD_T4_REG64(pi, "rx_frames_65_127",
4558 	    "# of rx frames in this range",
4559 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_65B_127B_L));
4560 	SYSCTL_ADD_T4_REG64(pi, "rx_frames_128_255",
4561 	    "# of rx frames in this range",
4562 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_128B_255B_L));
4563 	SYSCTL_ADD_T4_REG64(pi, "rx_frames_256_511",
4564 	    "# of rx frames in this range",
4565 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_256B_511B_L));
4566 	SYSCTL_ADD_T4_REG64(pi, "rx_frames_512_1023",
4567 	    "# of rx frames in this range",
4568 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_512B_1023B_L));
4569 	SYSCTL_ADD_T4_REG64(pi, "rx_frames_1024_1518",
4570 	    "# of rx frames in this range",
4571 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_1024B_1518B_L));
4572 	SYSCTL_ADD_T4_REG64(pi, "rx_frames_1519_max",
4573 	    "# of rx frames in this range",
4574 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_1519B_MAX_L));
4575 	SYSCTL_ADD_T4_REG64(pi, "rx_pause", "# of pause frames received",
4576 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_PAUSE_L));
4577 	SYSCTL_ADD_T4_REG64(pi, "rx_ppp0", "# of PPP prio 0 frames received",
4578 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_PPP0_L));
4579 	SYSCTL_ADD_T4_REG64(pi, "rx_ppp1", "# of PPP prio 1 frames received",
4580 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_PPP1_L));
4581 	SYSCTL_ADD_T4_REG64(pi, "rx_ppp2", "# of PPP prio 2 frames received",
4582 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_PPP2_L));
4583 	SYSCTL_ADD_T4_REG64(pi, "rx_ppp3", "# of PPP prio 3 frames received",
4584 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_PPP3_L));
4585 	SYSCTL_ADD_T4_REG64(pi, "rx_ppp4", "# of PPP prio 4 frames received",
4586 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_PPP4_L));
4587 	SYSCTL_ADD_T4_REG64(pi, "rx_ppp5", "# of PPP prio 5 frames received",
4588 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_PPP5_L));
4589 	SYSCTL_ADD_T4_REG64(pi, "rx_ppp6", "# of PPP prio 6 frames received",
4590 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_PPP6_L));
4591 	SYSCTL_ADD_T4_REG64(pi, "rx_ppp7", "# of PPP prio 7 frames received",
4592 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_PPP7_L));
4593 
4594 #undef SYSCTL_ADD_T4_REG64
4595 
4596 #define SYSCTL_ADD_T4_PORTSTAT(name, desc) \
4597 	SYSCTL_ADD_UQUAD(ctx, children, OID_AUTO, #name, CTLFLAG_RD, \
4598 	    &pi->stats.name, desc)
4599 
4600 	/* We get these from port_stats and they may be stale by upto 1s */
4601 	SYSCTL_ADD_T4_PORTSTAT(rx_ovflow0,
4602 	    "# drops due to buffer-group 0 overflows");
4603 	SYSCTL_ADD_T4_PORTSTAT(rx_ovflow1,
4604 	    "# drops due to buffer-group 1 overflows");
4605 	SYSCTL_ADD_T4_PORTSTAT(rx_ovflow2,
4606 	    "# drops due to buffer-group 2 overflows");
4607 	SYSCTL_ADD_T4_PORTSTAT(rx_ovflow3,
4608 	    "# drops due to buffer-group 3 overflows");
4609 	SYSCTL_ADD_T4_PORTSTAT(rx_trunc0,
4610 	    "# of buffer-group 0 truncated packets");
4611 	SYSCTL_ADD_T4_PORTSTAT(rx_trunc1,
4612 	    "# of buffer-group 1 truncated packets");
4613 	SYSCTL_ADD_T4_PORTSTAT(rx_trunc2,
4614 	    "# of buffer-group 2 truncated packets");
4615 	SYSCTL_ADD_T4_PORTSTAT(rx_trunc3,
4616 	    "# of buffer-group 3 truncated packets");
4617 
4618 #undef SYSCTL_ADD_T4_PORTSTAT
4619 
4620 	return (0);
4621 }
4622 
4623 static int
4624 sysctl_int_array(SYSCTL_HANDLER_ARGS)
4625 {
4626 	int rc, *i;
4627 	struct sbuf sb;
4628 
4629 	sbuf_new(&sb, NULL, 32, SBUF_AUTOEXTEND);
4630 	for (i = arg1; arg2; arg2 -= sizeof(int), i++)
4631 		sbuf_printf(&sb, "%d ", *i);
4632 	sbuf_trim(&sb);
4633 	sbuf_finish(&sb);
4634 	rc = sysctl_handle_string(oidp, sbuf_data(&sb), sbuf_len(&sb), req);
4635 	sbuf_delete(&sb);
4636 	return (rc);
4637 }
4638 
4639 static int
4640 sysctl_bitfield(SYSCTL_HANDLER_ARGS)
4641 {
4642 	int rc;
4643 	struct sbuf *sb;
4644 
4645 	rc = sysctl_wire_old_buffer(req, 0);
4646 	if (rc != 0)
4647 		return(rc);
4648 
4649 	sb = sbuf_new_for_sysctl(NULL, NULL, 128, req);
4650 	if (sb == NULL)
4651 		return (ENOMEM);
4652 
4653 	sbuf_printf(sb, "%b", (int)arg2, (char *)arg1);
4654 	rc = sbuf_finish(sb);
4655 	sbuf_delete(sb);
4656 
4657 	return (rc);
4658 }
4659 
4660 static int
4661 sysctl_btphy(SYSCTL_HANDLER_ARGS)
4662 {
4663 	struct port_info *pi = arg1;
4664 	int op = arg2;
4665 	struct adapter *sc = pi->adapter;
4666 	u_int v;
4667 	int rc;
4668 
4669 	rc = begin_synchronized_op(sc, pi, SLEEP_OK | INTR_OK, "t4btt");
4670 	if (rc)
4671 		return (rc);
4672 	/* XXX: magic numbers */
4673 	rc = -t4_mdio_rd(sc, sc->mbox, pi->mdio_addr, 0x1e, op ? 0x20 : 0xc820,
4674 	    &v);
4675 	end_synchronized_op(sc, 0);
4676 	if (rc)
4677 		return (rc);
4678 	if (op == 0)
4679 		v /= 256;
4680 
4681 	rc = sysctl_handle_int(oidp, &v, 0, req);
4682 	return (rc);
4683 }
4684 
4685 static int
4686 sysctl_holdoff_tmr_idx(SYSCTL_HANDLER_ARGS)
4687 {
4688 	struct port_info *pi = arg1;
4689 	struct adapter *sc = pi->adapter;
4690 	int idx, rc, i;
4691 	struct sge_rxq *rxq;
4692 #ifdef TCP_OFFLOAD
4693 	struct sge_ofld_rxq *ofld_rxq;
4694 #endif
4695 	uint8_t v;
4696 
4697 	idx = pi->tmr_idx;
4698 
4699 	rc = sysctl_handle_int(oidp, &idx, 0, req);
4700 	if (rc != 0 || req->newptr == NULL)
4701 		return (rc);
4702 
4703 	if (idx < 0 || idx >= SGE_NTIMERS)
4704 		return (EINVAL);
4705 
4706 	rc = begin_synchronized_op(sc, pi, HOLD_LOCK | SLEEP_OK | INTR_OK,
4707 	    "t4tmr");
4708 	if (rc)
4709 		return (rc);
4710 
4711 	v = V_QINTR_TIMER_IDX(idx) | V_QINTR_CNT_EN(pi->pktc_idx != -1);
4712 	for_each_rxq(pi, i, rxq) {
4713 #ifdef atomic_store_rel_8
4714 		atomic_store_rel_8(&rxq->iq.intr_params, v);
4715 #else
4716 		rxq->iq.intr_params = v;
4717 #endif
4718 	}
4719 #ifdef TCP_OFFLOAD
4720 	for_each_ofld_rxq(pi, i, ofld_rxq) {
4721 #ifdef atomic_store_rel_8
4722 		atomic_store_rel_8(&ofld_rxq->iq.intr_params, v);
4723 #else
4724 		ofld_rxq->iq.intr_params = v;
4725 #endif
4726 	}
4727 #endif
4728 	pi->tmr_idx = idx;
4729 
4730 	end_synchronized_op(sc, LOCK_HELD);
4731 	return (0);
4732 }
4733 
4734 static int
4735 sysctl_holdoff_pktc_idx(SYSCTL_HANDLER_ARGS)
4736 {
4737 	struct port_info *pi = arg1;
4738 	struct adapter *sc = pi->adapter;
4739 	int idx, rc;
4740 
4741 	idx = pi->pktc_idx;
4742 
4743 	rc = sysctl_handle_int(oidp, &idx, 0, req);
4744 	if (rc != 0 || req->newptr == NULL)
4745 		return (rc);
4746 
4747 	if (idx < -1 || idx >= SGE_NCOUNTERS)
4748 		return (EINVAL);
4749 
4750 	rc = begin_synchronized_op(sc, pi, HOLD_LOCK | SLEEP_OK | INTR_OK,
4751 	    "t4pktc");
4752 	if (rc)
4753 		return (rc);
4754 
4755 	if (pi->flags & PORT_INIT_DONE)
4756 		rc = EBUSY; /* cannot be changed once the queues are created */
4757 	else
4758 		pi->pktc_idx = idx;
4759 
4760 	end_synchronized_op(sc, LOCK_HELD);
4761 	return (rc);
4762 }
4763 
4764 static int
4765 sysctl_qsize_rxq(SYSCTL_HANDLER_ARGS)
4766 {
4767 	struct port_info *pi = arg1;
4768 	struct adapter *sc = pi->adapter;
4769 	int qsize, rc;
4770 
4771 	qsize = pi->qsize_rxq;
4772 
4773 	rc = sysctl_handle_int(oidp, &qsize, 0, req);
4774 	if (rc != 0 || req->newptr == NULL)
4775 		return (rc);
4776 
4777 	if (qsize < 128 || (qsize & 7))
4778 		return (EINVAL);
4779 
4780 	rc = begin_synchronized_op(sc, pi, HOLD_LOCK | SLEEP_OK | INTR_OK,
4781 	    "t4rxqs");
4782 	if (rc)
4783 		return (rc);
4784 
4785 	if (pi->flags & PORT_INIT_DONE)
4786 		rc = EBUSY; /* cannot be changed once the queues are created */
4787 	else
4788 		pi->qsize_rxq = qsize;
4789 
4790 	end_synchronized_op(sc, LOCK_HELD);
4791 	return (rc);
4792 }
4793 
4794 static int
4795 sysctl_qsize_txq(SYSCTL_HANDLER_ARGS)
4796 {
4797 	struct port_info *pi = arg1;
4798 	struct adapter *sc = pi->adapter;
4799 	int qsize, rc;
4800 
4801 	qsize = pi->qsize_txq;
4802 
4803 	rc = sysctl_handle_int(oidp, &qsize, 0, req);
4804 	if (rc != 0 || req->newptr == NULL)
4805 		return (rc);
4806 
4807 	/* bufring size must be powerof2 */
4808 	if (qsize < 128 || !powerof2(qsize))
4809 		return (EINVAL);
4810 
4811 	rc = begin_synchronized_op(sc, pi, HOLD_LOCK | SLEEP_OK | INTR_OK,
4812 	    "t4txqs");
4813 	if (rc)
4814 		return (rc);
4815 
4816 	if (pi->flags & PORT_INIT_DONE)
4817 		rc = EBUSY; /* cannot be changed once the queues are created */
4818 	else
4819 		pi->qsize_txq = qsize;
4820 
4821 	end_synchronized_op(sc, LOCK_HELD);
4822 	return (rc);
4823 }
4824 
4825 static int
4826 sysctl_handle_t4_reg64(SYSCTL_HANDLER_ARGS)
4827 {
4828 	struct adapter *sc = arg1;
4829 	int reg = arg2;
4830 	uint64_t val;
4831 
4832 	val = t4_read_reg64(sc, reg);
4833 
4834 	return (sysctl_handle_64(oidp, &val, 0, req));
4835 }
4836 
4837 #ifdef SBUF_DRAIN
4838 static int
4839 sysctl_cctrl(SYSCTL_HANDLER_ARGS)
4840 {
4841 	struct adapter *sc = arg1;
4842 	struct sbuf *sb;
4843 	int rc, i;
4844 	uint16_t incr[NMTUS][NCCTRL_WIN];
4845 	static const char *dec_fac[] = {
4846 		"0.5", "0.5625", "0.625", "0.6875", "0.75", "0.8125", "0.875",
4847 		"0.9375"
4848 	};
4849 
4850 	rc = sysctl_wire_old_buffer(req, 0);
4851 	if (rc != 0)
4852 		return (rc);
4853 
4854 	sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req);
4855 	if (sb == NULL)
4856 		return (ENOMEM);
4857 
4858 	t4_read_cong_tbl(sc, incr);
4859 
4860 	for (i = 0; i < NCCTRL_WIN; ++i) {
4861 		sbuf_printf(sb, "%2d: %4u %4u %4u %4u %4u %4u %4u %4u\n", i,
4862 		    incr[0][i], incr[1][i], incr[2][i], incr[3][i], incr[4][i],
4863 		    incr[5][i], incr[6][i], incr[7][i]);
4864 		sbuf_printf(sb, "%8u %4u %4u %4u %4u %4u %4u %4u %5u %s\n",
4865 		    incr[8][i], incr[9][i], incr[10][i], incr[11][i],
4866 		    incr[12][i], incr[13][i], incr[14][i], incr[15][i],
4867 		    sc->params.a_wnd[i], dec_fac[sc->params.b_wnd[i]]);
4868 	}
4869 
4870 	rc = sbuf_finish(sb);
4871 	sbuf_delete(sb);
4872 
4873 	return (rc);
4874 }
4875 
4876 static const char *qname[CIM_NUM_IBQ + CIM_NUM_OBQ_T5] = {
4877 	"TP0", "TP1", "ULP", "SGE0", "SGE1", "NC-SI",	/* ibq's */
4878 	"ULP0", "ULP1", "ULP2", "ULP3", "SGE", "NC-SI",	/* obq's */
4879 	"SGE0-RX", "SGE1-RX"	/* additional obq's (T5 onwards) */
4880 };
4881 
4882 static int
4883 sysctl_cim_ibq_obq(SYSCTL_HANDLER_ARGS)
4884 {
4885 	struct adapter *sc = arg1;
4886 	struct sbuf *sb;
4887 	int rc, i, n, qid = arg2;
4888 	uint32_t *buf, *p;
4889 	char *qtype;
4890 	u_int cim_num_obq = is_t4(sc) ? CIM_NUM_OBQ : CIM_NUM_OBQ_T5;
4891 
4892 	KASSERT(qid >= 0 && qid < CIM_NUM_IBQ + cim_num_obq,
4893 	    ("%s: bad qid %d\n", __func__, qid));
4894 
4895 	if (qid < CIM_NUM_IBQ) {
4896 		/* inbound queue */
4897 		qtype = "IBQ";
4898 		n = 4 * CIM_IBQ_SIZE;
4899 		buf = malloc(n * sizeof(uint32_t), M_CXGBE, M_ZERO | M_WAITOK);
4900 		rc = t4_read_cim_ibq(sc, qid, buf, n);
4901 	} else {
4902 		/* outbound queue */
4903 		qtype = "OBQ";
4904 		qid -= CIM_NUM_IBQ;
4905 		n = 4 * cim_num_obq * CIM_OBQ_SIZE;
4906 		buf = malloc(n * sizeof(uint32_t), M_CXGBE, M_ZERO | M_WAITOK);
4907 		rc = t4_read_cim_obq(sc, qid, buf, n);
4908 	}
4909 
4910 	if (rc < 0) {
4911 		rc = -rc;
4912 		goto done;
4913 	}
4914 	n = rc * sizeof(uint32_t);	/* rc has # of words actually read */
4915 
4916 	rc = sysctl_wire_old_buffer(req, 0);
4917 	if (rc != 0)
4918 		goto done;
4919 
4920 	sb = sbuf_new_for_sysctl(NULL, NULL, PAGE_SIZE, req);
4921 	if (sb == NULL) {
4922 		rc = ENOMEM;
4923 		goto done;
4924 	}
4925 
4926 	sbuf_printf(sb, "%s%d %s", qtype , qid, qname[arg2]);
4927 	for (i = 0, p = buf; i < n; i += 16, p += 4)
4928 		sbuf_printf(sb, "\n%#06x: %08x %08x %08x %08x", i, p[0], p[1],
4929 		    p[2], p[3]);
4930 
4931 	rc = sbuf_finish(sb);
4932 	sbuf_delete(sb);
4933 done:
4934 	free(buf, M_CXGBE);
4935 	return (rc);
4936 }
4937 
4938 static int
4939 sysctl_cim_la(SYSCTL_HANDLER_ARGS)
4940 {
4941 	struct adapter *sc = arg1;
4942 	u_int cfg;
4943 	struct sbuf *sb;
4944 	uint32_t *buf, *p;
4945 	int rc;
4946 
4947 	rc = -t4_cim_read(sc, A_UP_UP_DBG_LA_CFG, 1, &cfg);
4948 	if (rc != 0)
4949 		return (rc);
4950 
4951 	rc = sysctl_wire_old_buffer(req, 0);
4952 	if (rc != 0)
4953 		return (rc);
4954 
4955 	sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req);
4956 	if (sb == NULL)
4957 		return (ENOMEM);
4958 
4959 	buf = malloc(sc->params.cim_la_size * sizeof(uint32_t), M_CXGBE,
4960 	    M_ZERO | M_WAITOK);
4961 
4962 	rc = -t4_cim_read_la(sc, buf, NULL);
4963 	if (rc != 0)
4964 		goto done;
4965 
4966 	sbuf_printf(sb, "Status   Data      PC%s",
4967 	    cfg & F_UPDBGLACAPTPCONLY ? "" :
4968 	    "     LS0Stat  LS0Addr             LS0Data");
4969 
4970 	KASSERT((sc->params.cim_la_size & 7) == 0,
4971 	    ("%s: p will walk off the end of buf", __func__));
4972 
4973 	for (p = buf; p < &buf[sc->params.cim_la_size]; p += 8) {
4974 		if (cfg & F_UPDBGLACAPTPCONLY) {
4975 			sbuf_printf(sb, "\n  %02x   %08x %08x", p[5] & 0xff,
4976 			    p[6], p[7]);
4977 			sbuf_printf(sb, "\n  %02x   %02x%06x %02x%06x",
4978 			    (p[3] >> 8) & 0xff, p[3] & 0xff, p[4] >> 8,
4979 			    p[4] & 0xff, p[5] >> 8);
4980 			sbuf_printf(sb, "\n  %02x   %x%07x %x%07x",
4981 			    (p[0] >> 4) & 0xff, p[0] & 0xf, p[1] >> 4,
4982 			    p[1] & 0xf, p[2] >> 4);
4983 		} else {
4984 			sbuf_printf(sb,
4985 			    "\n  %02x   %x%07x %x%07x %08x %08x "
4986 			    "%08x%08x%08x%08x",
4987 			    (p[0] >> 4) & 0xff, p[0] & 0xf, p[1] >> 4,
4988 			    p[1] & 0xf, p[2] >> 4, p[2] & 0xf, p[3], p[4], p[5],
4989 			    p[6], p[7]);
4990 		}
4991 	}
4992 
4993 	rc = sbuf_finish(sb);
4994 	sbuf_delete(sb);
4995 done:
4996 	free(buf, M_CXGBE);
4997 	return (rc);
4998 }
4999 
5000 static int
5001 sysctl_cim_ma_la(SYSCTL_HANDLER_ARGS)
5002 {
5003 	struct adapter *sc = arg1;
5004 	u_int i;
5005 	struct sbuf *sb;
5006 	uint32_t *buf, *p;
5007 	int rc;
5008 
5009 	rc = sysctl_wire_old_buffer(req, 0);
5010 	if (rc != 0)
5011 		return (rc);
5012 
5013 	sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req);
5014 	if (sb == NULL)
5015 		return (ENOMEM);
5016 
5017 	buf = malloc(2 * CIM_MALA_SIZE * 5 * sizeof(uint32_t), M_CXGBE,
5018 	    M_ZERO | M_WAITOK);
5019 
5020 	t4_cim_read_ma_la(sc, buf, buf + 5 * CIM_MALA_SIZE);
5021 	p = buf;
5022 
5023 	for (i = 0; i < CIM_MALA_SIZE; i++, p += 5) {
5024 		sbuf_printf(sb, "\n%02x%08x%08x%08x%08x", p[4], p[3], p[2],
5025 		    p[1], p[0]);
5026 	}
5027 
5028 	sbuf_printf(sb, "\n\nCnt ID Tag UE       Data       RDY VLD");
5029 	for (i = 0; i < CIM_MALA_SIZE; i++, p += 5) {
5030 		sbuf_printf(sb, "\n%3u %2u  %x   %u %08x%08x  %u   %u",
5031 		    (p[2] >> 10) & 0xff, (p[2] >> 7) & 7,
5032 		    (p[2] >> 3) & 0xf, (p[2] >> 2) & 1,
5033 		    (p[1] >> 2) | ((p[2] & 3) << 30),
5034 		    (p[0] >> 2) | ((p[1] & 3) << 30), (p[0] >> 1) & 1,
5035 		    p[0] & 1);
5036 	}
5037 
5038 	rc = sbuf_finish(sb);
5039 	sbuf_delete(sb);
5040 	free(buf, M_CXGBE);
5041 	return (rc);
5042 }
5043 
5044 static int
5045 sysctl_cim_pif_la(SYSCTL_HANDLER_ARGS)
5046 {
5047 	struct adapter *sc = arg1;
5048 	u_int i;
5049 	struct sbuf *sb;
5050 	uint32_t *buf, *p;
5051 	int rc;
5052 
5053 	rc = sysctl_wire_old_buffer(req, 0);
5054 	if (rc != 0)
5055 		return (rc);
5056 
5057 	sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req);
5058 	if (sb == NULL)
5059 		return (ENOMEM);
5060 
5061 	buf = malloc(2 * CIM_PIFLA_SIZE * 6 * sizeof(uint32_t), M_CXGBE,
5062 	    M_ZERO | M_WAITOK);
5063 
5064 	t4_cim_read_pif_la(sc, buf, buf + 6 * CIM_PIFLA_SIZE, NULL, NULL);
5065 	p = buf;
5066 
5067 	sbuf_printf(sb, "Cntl ID DataBE   Addr                 Data");
5068 	for (i = 0; i < CIM_MALA_SIZE; i++, p += 6) {
5069 		sbuf_printf(sb, "\n %02x  %02x  %04x  %08x %08x%08x%08x%08x",
5070 		    (p[5] >> 22) & 0xff, (p[5] >> 16) & 0x3f, p[5] & 0xffff,
5071 		    p[4], p[3], p[2], p[1], p[0]);
5072 	}
5073 
5074 	sbuf_printf(sb, "\n\nCntl ID               Data");
5075 	for (i = 0; i < CIM_MALA_SIZE; i++, p += 6) {
5076 		sbuf_printf(sb, "\n %02x  %02x %08x%08x%08x%08x",
5077 		    (p[4] >> 6) & 0xff, p[4] & 0x3f, p[3], p[2], p[1], p[0]);
5078 	}
5079 
5080 	rc = sbuf_finish(sb);
5081 	sbuf_delete(sb);
5082 	free(buf, M_CXGBE);
5083 	return (rc);
5084 }
5085 
5086 static int
5087 sysctl_cim_qcfg(SYSCTL_HANDLER_ARGS)
5088 {
5089 	struct adapter *sc = arg1;
5090 	struct sbuf *sb;
5091 	int rc, i;
5092 	uint16_t base[CIM_NUM_IBQ + CIM_NUM_OBQ_T5];
5093 	uint16_t size[CIM_NUM_IBQ + CIM_NUM_OBQ_T5];
5094 	uint16_t thres[CIM_NUM_IBQ];
5095 	uint32_t obq_wr[2 * CIM_NUM_OBQ_T5], *wr = obq_wr;
5096 	uint32_t stat[4 * (CIM_NUM_IBQ + CIM_NUM_OBQ_T5)], *p = stat;
5097 	u_int cim_num_obq, ibq_rdaddr, obq_rdaddr, nq;
5098 
5099 	if (is_t4(sc)) {
5100 		cim_num_obq = CIM_NUM_OBQ;
5101 		ibq_rdaddr = A_UP_IBQ_0_RDADDR;
5102 		obq_rdaddr = A_UP_OBQ_0_REALADDR;
5103 	} else {
5104 		cim_num_obq = CIM_NUM_OBQ_T5;
5105 		ibq_rdaddr = A_UP_IBQ_0_SHADOW_RDADDR;
5106 		obq_rdaddr = A_UP_OBQ_0_SHADOW_REALADDR;
5107 	}
5108 	nq = CIM_NUM_IBQ + cim_num_obq;
5109 
5110 	rc = -t4_cim_read(sc, ibq_rdaddr, 4 * nq, stat);
5111 	if (rc == 0)
5112 		rc = -t4_cim_read(sc, obq_rdaddr, 2 * cim_num_obq, obq_wr);
5113 	if (rc != 0)
5114 		return (rc);
5115 
5116 	t4_read_cimq_cfg(sc, base, size, thres);
5117 
5118 	rc = sysctl_wire_old_buffer(req, 0);
5119 	if (rc != 0)
5120 		return (rc);
5121 
5122 	sb = sbuf_new_for_sysctl(NULL, NULL, PAGE_SIZE, req);
5123 	if (sb == NULL)
5124 		return (ENOMEM);
5125 
5126 	sbuf_printf(sb, "Queue  Base  Size Thres RdPtr WrPtr  SOP  EOP Avail");
5127 
5128 	for (i = 0; i < CIM_NUM_IBQ; i++, p += 4)
5129 		sbuf_printf(sb, "\n%7s %5x %5u %5u %6x  %4x %4u %4u %5u",
5130 		    qname[i], base[i], size[i], thres[i], G_IBQRDADDR(p[0]),
5131 		    G_IBQWRADDR(p[1]), G_QUESOPCNT(p[3]), G_QUEEOPCNT(p[3]),
5132 		    G_QUEREMFLITS(p[2]) * 16);
5133 	for ( ; i < nq; i++, p += 4, wr += 2)
5134 		sbuf_printf(sb, "\n%7s %5x %5u %12x  %4x %4u %4u %5u", qname[i],
5135 		    base[i], size[i], G_QUERDADDR(p[0]) & 0x3fff,
5136 		    wr[0] - base[i], G_QUESOPCNT(p[3]), G_QUEEOPCNT(p[3]),
5137 		    G_QUEREMFLITS(p[2]) * 16);
5138 
5139 	rc = sbuf_finish(sb);
5140 	sbuf_delete(sb);
5141 
5142 	return (rc);
5143 }
5144 
5145 static int
5146 sysctl_cpl_stats(SYSCTL_HANDLER_ARGS)
5147 {
5148 	struct adapter *sc = arg1;
5149 	struct sbuf *sb;
5150 	int rc;
5151 	struct tp_cpl_stats stats;
5152 
5153 	rc = sysctl_wire_old_buffer(req, 0);
5154 	if (rc != 0)
5155 		return (rc);
5156 
5157 	sb = sbuf_new_for_sysctl(NULL, NULL, 256, req);
5158 	if (sb == NULL)
5159 		return (ENOMEM);
5160 
5161 	t4_tp_get_cpl_stats(sc, &stats);
5162 
5163 	sbuf_printf(sb, "                 channel 0  channel 1  channel 2  "
5164 	    "channel 3\n");
5165 	sbuf_printf(sb, "CPL requests:   %10u %10u %10u %10u\n",
5166 		   stats.req[0], stats.req[1], stats.req[2], stats.req[3]);
5167 	sbuf_printf(sb, "CPL responses:  %10u %10u %10u %10u",
5168 		   stats.rsp[0], stats.rsp[1], stats.rsp[2], stats.rsp[3]);
5169 
5170 	rc = sbuf_finish(sb);
5171 	sbuf_delete(sb);
5172 
5173 	return (rc);
5174 }
5175 
5176 static int
5177 sysctl_ddp_stats(SYSCTL_HANDLER_ARGS)
5178 {
5179 	struct adapter *sc = arg1;
5180 	struct sbuf *sb;
5181 	int rc;
5182 	struct tp_usm_stats stats;
5183 
5184 	rc = sysctl_wire_old_buffer(req, 0);
5185 	if (rc != 0)
5186 		return(rc);
5187 
5188 	sb = sbuf_new_for_sysctl(NULL, NULL, 256, req);
5189 	if (sb == NULL)
5190 		return (ENOMEM);
5191 
5192 	t4_get_usm_stats(sc, &stats);
5193 
5194 	sbuf_printf(sb, "Frames: %u\n", stats.frames);
5195 	sbuf_printf(sb, "Octets: %ju\n", stats.octets);
5196 	sbuf_printf(sb, "Drops:  %u", stats.drops);
5197 
5198 	rc = sbuf_finish(sb);
5199 	sbuf_delete(sb);
5200 
5201 	return (rc);
5202 }
5203 
5204 const char *devlog_level_strings[] = {
5205 	[FW_DEVLOG_LEVEL_EMERG]		= "EMERG",
5206 	[FW_DEVLOG_LEVEL_CRIT]		= "CRIT",
5207 	[FW_DEVLOG_LEVEL_ERR]		= "ERR",
5208 	[FW_DEVLOG_LEVEL_NOTICE]	= "NOTICE",
5209 	[FW_DEVLOG_LEVEL_INFO]		= "INFO",
5210 	[FW_DEVLOG_LEVEL_DEBUG]		= "DEBUG"
5211 };
5212 
5213 const char *devlog_facility_strings[] = {
5214 	[FW_DEVLOG_FACILITY_CORE]	= "CORE",
5215 	[FW_DEVLOG_FACILITY_SCHED]	= "SCHED",
5216 	[FW_DEVLOG_FACILITY_TIMER]	= "TIMER",
5217 	[FW_DEVLOG_FACILITY_RES]	= "RES",
5218 	[FW_DEVLOG_FACILITY_HW]		= "HW",
5219 	[FW_DEVLOG_FACILITY_FLR]	= "FLR",
5220 	[FW_DEVLOG_FACILITY_DMAQ]	= "DMAQ",
5221 	[FW_DEVLOG_FACILITY_PHY]	= "PHY",
5222 	[FW_DEVLOG_FACILITY_MAC]	= "MAC",
5223 	[FW_DEVLOG_FACILITY_PORT]	= "PORT",
5224 	[FW_DEVLOG_FACILITY_VI]		= "VI",
5225 	[FW_DEVLOG_FACILITY_FILTER]	= "FILTER",
5226 	[FW_DEVLOG_FACILITY_ACL]	= "ACL",
5227 	[FW_DEVLOG_FACILITY_TM]		= "TM",
5228 	[FW_DEVLOG_FACILITY_QFC]	= "QFC",
5229 	[FW_DEVLOG_FACILITY_DCB]	= "DCB",
5230 	[FW_DEVLOG_FACILITY_ETH]	= "ETH",
5231 	[FW_DEVLOG_FACILITY_OFLD]	= "OFLD",
5232 	[FW_DEVLOG_FACILITY_RI]		= "RI",
5233 	[FW_DEVLOG_FACILITY_ISCSI]	= "ISCSI",
5234 	[FW_DEVLOG_FACILITY_FCOE]	= "FCOE",
5235 	[FW_DEVLOG_FACILITY_FOISCSI]	= "FOISCSI",
5236 	[FW_DEVLOG_FACILITY_FOFCOE]	= "FOFCOE"
5237 };
5238 
5239 static int
5240 sysctl_devlog(SYSCTL_HANDLER_ARGS)
5241 {
5242 	struct adapter *sc = arg1;
5243 	struct devlog_params *dparams = &sc->params.devlog;
5244 	struct fw_devlog_e *buf, *e;
5245 	int i, j, rc, nentries, first = 0;
5246 	struct sbuf *sb;
5247 	uint64_t ftstamp = UINT64_MAX;
5248 
5249 	if (dparams->start == 0) {
5250 		dparams->memtype = 0;
5251 		dparams->start = 0x84000;
5252 		dparams->size = 32768;
5253 	}
5254 
5255 	nentries = dparams->size / sizeof(struct fw_devlog_e);
5256 
5257 	buf = malloc(dparams->size, M_CXGBE, M_NOWAIT);
5258 	if (buf == NULL)
5259 		return (ENOMEM);
5260 
5261 	rc = -t4_mem_read(sc, dparams->memtype, dparams->start, dparams->size,
5262 	    (void *)buf);
5263 	if (rc != 0)
5264 		goto done;
5265 
5266 	for (i = 0; i < nentries; i++) {
5267 		e = &buf[i];
5268 
5269 		if (e->timestamp == 0)
5270 			break;	/* end */
5271 
5272 		e->timestamp = be64toh(e->timestamp);
5273 		e->seqno = be32toh(e->seqno);
5274 		for (j = 0; j < 8; j++)
5275 			e->params[j] = be32toh(e->params[j]);
5276 
5277 		if (e->timestamp < ftstamp) {
5278 			ftstamp = e->timestamp;
5279 			first = i;
5280 		}
5281 	}
5282 
5283 	if (buf[first].timestamp == 0)
5284 		goto done;	/* nothing in the log */
5285 
5286 	rc = sysctl_wire_old_buffer(req, 0);
5287 	if (rc != 0)
5288 		goto done;
5289 
5290 	sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req);
5291 	if (sb == NULL) {
5292 		rc = ENOMEM;
5293 		goto done;
5294 	}
5295 	sbuf_printf(sb, "%10s  %15s  %8s  %8s  %s\n",
5296 	    "Seq#", "Tstamp", "Level", "Facility", "Message");
5297 
5298 	i = first;
5299 	do {
5300 		e = &buf[i];
5301 		if (e->timestamp == 0)
5302 			break;	/* end */
5303 
5304 		sbuf_printf(sb, "%10d  %15ju  %8s  %8s  ",
5305 		    e->seqno, e->timestamp,
5306 		    (e->level < nitems(devlog_level_strings) ?
5307 			devlog_level_strings[e->level] : "UNKNOWN"),
5308 		    (e->facility < nitems(devlog_facility_strings) ?
5309 			devlog_facility_strings[e->facility] : "UNKNOWN"));
5310 		sbuf_printf(sb, e->fmt, e->params[0], e->params[1],
5311 		    e->params[2], e->params[3], e->params[4],
5312 		    e->params[5], e->params[6], e->params[7]);
5313 
5314 		if (++i == nentries)
5315 			i = 0;
5316 	} while (i != first);
5317 
5318 	rc = sbuf_finish(sb);
5319 	sbuf_delete(sb);
5320 done:
5321 	free(buf, M_CXGBE);
5322 	return (rc);
5323 }
5324 
5325 static int
5326 sysctl_fcoe_stats(SYSCTL_HANDLER_ARGS)
5327 {
5328 	struct adapter *sc = arg1;
5329 	struct sbuf *sb;
5330 	int rc;
5331 	struct tp_fcoe_stats stats[4];
5332 
5333 	rc = sysctl_wire_old_buffer(req, 0);
5334 	if (rc != 0)
5335 		return (rc);
5336 
5337 	sb = sbuf_new_for_sysctl(NULL, NULL, 256, req);
5338 	if (sb == NULL)
5339 		return (ENOMEM);
5340 
5341 	t4_get_fcoe_stats(sc, 0, &stats[0]);
5342 	t4_get_fcoe_stats(sc, 1, &stats[1]);
5343 	t4_get_fcoe_stats(sc, 2, &stats[2]);
5344 	t4_get_fcoe_stats(sc, 3, &stats[3]);
5345 
5346 	sbuf_printf(sb, "                   channel 0        channel 1        "
5347 	    "channel 2        channel 3\n");
5348 	sbuf_printf(sb, "octetsDDP:  %16ju %16ju %16ju %16ju\n",
5349 	    stats[0].octetsDDP, stats[1].octetsDDP, stats[2].octetsDDP,
5350 	    stats[3].octetsDDP);
5351 	sbuf_printf(sb, "framesDDP:  %16u %16u %16u %16u\n", stats[0].framesDDP,
5352 	    stats[1].framesDDP, stats[2].framesDDP, stats[3].framesDDP);
5353 	sbuf_printf(sb, "framesDrop: %16u %16u %16u %16u",
5354 	    stats[0].framesDrop, stats[1].framesDrop, stats[2].framesDrop,
5355 	    stats[3].framesDrop);
5356 
5357 	rc = sbuf_finish(sb);
5358 	sbuf_delete(sb);
5359 
5360 	return (rc);
5361 }
5362 
5363 static int
5364 sysctl_hw_sched(SYSCTL_HANDLER_ARGS)
5365 {
5366 	struct adapter *sc = arg1;
5367 	struct sbuf *sb;
5368 	int rc, i;
5369 	unsigned int map, kbps, ipg, mode;
5370 	unsigned int pace_tab[NTX_SCHED];
5371 
5372 	rc = sysctl_wire_old_buffer(req, 0);
5373 	if (rc != 0)
5374 		return (rc);
5375 
5376 	sb = sbuf_new_for_sysctl(NULL, NULL, 256, req);
5377 	if (sb == NULL)
5378 		return (ENOMEM);
5379 
5380 	map = t4_read_reg(sc, A_TP_TX_MOD_QUEUE_REQ_MAP);
5381 	mode = G_TIMERMODE(t4_read_reg(sc, A_TP_MOD_CONFIG));
5382 	t4_read_pace_tbl(sc, pace_tab);
5383 
5384 	sbuf_printf(sb, "Scheduler  Mode   Channel  Rate (Kbps)   "
5385 	    "Class IPG (0.1 ns)   Flow IPG (us)");
5386 
5387 	for (i = 0; i < NTX_SCHED; ++i, map >>= 2) {
5388 		t4_get_tx_sched(sc, i, &kbps, &ipg);
5389 		sbuf_printf(sb, "\n    %u      %-5s     %u     ", i,
5390 		    (mode & (1 << i)) ? "flow" : "class", map & 3);
5391 		if (kbps)
5392 			sbuf_printf(sb, "%9u     ", kbps);
5393 		else
5394 			sbuf_printf(sb, " disabled     ");
5395 
5396 		if (ipg)
5397 			sbuf_printf(sb, "%13u        ", ipg);
5398 		else
5399 			sbuf_printf(sb, "     disabled        ");
5400 
5401 		if (pace_tab[i])
5402 			sbuf_printf(sb, "%10u", pace_tab[i]);
5403 		else
5404 			sbuf_printf(sb, "  disabled");
5405 	}
5406 
5407 	rc = sbuf_finish(sb);
5408 	sbuf_delete(sb);
5409 
5410 	return (rc);
5411 }
5412 
5413 static int
5414 sysctl_lb_stats(SYSCTL_HANDLER_ARGS)
5415 {
5416 	struct adapter *sc = arg1;
5417 	struct sbuf *sb;
5418 	int rc, i, j;
5419 	uint64_t *p0, *p1;
5420 	struct lb_port_stats s[2];
5421 	static const char *stat_name[] = {
5422 		"OctetsOK:", "FramesOK:", "BcastFrames:", "McastFrames:",
5423 		"UcastFrames:", "ErrorFrames:", "Frames64:", "Frames65To127:",
5424 		"Frames128To255:", "Frames256To511:", "Frames512To1023:",
5425 		"Frames1024To1518:", "Frames1519ToMax:", "FramesDropped:",
5426 		"BG0FramesDropped:", "BG1FramesDropped:", "BG2FramesDropped:",
5427 		"BG3FramesDropped:", "BG0FramesTrunc:", "BG1FramesTrunc:",
5428 		"BG2FramesTrunc:", "BG3FramesTrunc:"
5429 	};
5430 
5431 	rc = sysctl_wire_old_buffer(req, 0);
5432 	if (rc != 0)
5433 		return (rc);
5434 
5435 	sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req);
5436 	if (sb == NULL)
5437 		return (ENOMEM);
5438 
5439 	memset(s, 0, sizeof(s));
5440 
5441 	for (i = 0; i < 4; i += 2) {
5442 		t4_get_lb_stats(sc, i, &s[0]);
5443 		t4_get_lb_stats(sc, i + 1, &s[1]);
5444 
5445 		p0 = &s[0].octets;
5446 		p1 = &s[1].octets;
5447 		sbuf_printf(sb, "%s                       Loopback %u"
5448 		    "           Loopback %u", i == 0 ? "" : "\n", i, i + 1);
5449 
5450 		for (j = 0; j < nitems(stat_name); j++)
5451 			sbuf_printf(sb, "\n%-17s %20ju %20ju", stat_name[j],
5452 				   *p0++, *p1++);
5453 	}
5454 
5455 	rc = sbuf_finish(sb);
5456 	sbuf_delete(sb);
5457 
5458 	return (rc);
5459 }
5460 
5461 struct mem_desc {
5462 	unsigned int base;
5463 	unsigned int limit;
5464 	unsigned int idx;
5465 };
5466 
5467 static int
5468 mem_desc_cmp(const void *a, const void *b)
5469 {
5470 	return ((const struct mem_desc *)a)->base -
5471 	       ((const struct mem_desc *)b)->base;
5472 }
5473 
5474 static void
5475 mem_region_show(struct sbuf *sb, const char *name, unsigned int from,
5476     unsigned int to)
5477 {
5478 	unsigned int size;
5479 
5480 	size = to - from + 1;
5481 	if (size == 0)
5482 		return;
5483 
5484 	/* XXX: need humanize_number(3) in libkern for a more readable 'size' */
5485 	sbuf_printf(sb, "%-15s %#x-%#x [%u]\n", name, from, to, size);
5486 }
5487 
5488 static int
5489 sysctl_meminfo(SYSCTL_HANDLER_ARGS)
5490 {
5491 	struct adapter *sc = arg1;
5492 	struct sbuf *sb;
5493 	int rc, i, n;
5494 	uint32_t lo, hi, used, alloc;
5495 	static const char *memory[] = {"EDC0:", "EDC1:", "MC:", "MC0:", "MC1:"};
5496 	static const char *region[] = {
5497 		"DBQ contexts:", "IMSG contexts:", "FLM cache:", "TCBs:",
5498 		"Pstructs:", "Timers:", "Rx FL:", "Tx FL:", "Pstruct FL:",
5499 		"Tx payload:", "Rx payload:", "LE hash:", "iSCSI region:",
5500 		"TDDP region:", "TPT region:", "STAG region:", "RQ region:",
5501 		"RQUDP region:", "PBL region:", "TXPBL region:",
5502 		"DBVFIFO region:", "ULPRX state:", "ULPTX state:",
5503 		"On-chip queues:"
5504 	};
5505 	struct mem_desc avail[4];
5506 	struct mem_desc mem[nitems(region) + 3];	/* up to 3 holes */
5507 	struct mem_desc *md = mem;
5508 
5509 	rc = sysctl_wire_old_buffer(req, 0);
5510 	if (rc != 0)
5511 		return (rc);
5512 
5513 	sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req);
5514 	if (sb == NULL)
5515 		return (ENOMEM);
5516 
5517 	for (i = 0; i < nitems(mem); i++) {
5518 		mem[i].limit = 0;
5519 		mem[i].idx = i;
5520 	}
5521 
5522 	/* Find and sort the populated memory ranges */
5523 	i = 0;
5524 	lo = t4_read_reg(sc, A_MA_TARGET_MEM_ENABLE);
5525 	if (lo & F_EDRAM0_ENABLE) {
5526 		hi = t4_read_reg(sc, A_MA_EDRAM0_BAR);
5527 		avail[i].base = G_EDRAM0_BASE(hi) << 20;
5528 		avail[i].limit = avail[i].base + (G_EDRAM0_SIZE(hi) << 20);
5529 		avail[i].idx = 0;
5530 		i++;
5531 	}
5532 	if (lo & F_EDRAM1_ENABLE) {
5533 		hi = t4_read_reg(sc, A_MA_EDRAM1_BAR);
5534 		avail[i].base = G_EDRAM1_BASE(hi) << 20;
5535 		avail[i].limit = avail[i].base + (G_EDRAM1_SIZE(hi) << 20);
5536 		avail[i].idx = 1;
5537 		i++;
5538 	}
5539 	if (lo & F_EXT_MEM_ENABLE) {
5540 		hi = t4_read_reg(sc, A_MA_EXT_MEMORY_BAR);
5541 		avail[i].base = G_EXT_MEM_BASE(hi) << 20;
5542 		avail[i].limit = avail[i].base +
5543 		    (G_EXT_MEM_SIZE(hi) << 20);
5544 		avail[i].idx = is_t4(sc) ? 2 : 3;	/* Call it MC for T4 */
5545 		i++;
5546 	}
5547 	if (!is_t4(sc) && lo & F_EXT_MEM1_ENABLE) {
5548 		hi = t4_read_reg(sc, A_MA_EXT_MEMORY1_BAR);
5549 		avail[i].base = G_EXT_MEM1_BASE(hi) << 20;
5550 		avail[i].limit = avail[i].base +
5551 		    (G_EXT_MEM1_SIZE(hi) << 20);
5552 		avail[i].idx = 4;
5553 		i++;
5554 	}
5555 	if (!i)                                    /* no memory available */
5556 		return 0;
5557 	qsort(avail, i, sizeof(struct mem_desc), mem_desc_cmp);
5558 
5559 	(md++)->base = t4_read_reg(sc, A_SGE_DBQ_CTXT_BADDR);
5560 	(md++)->base = t4_read_reg(sc, A_SGE_IMSG_CTXT_BADDR);
5561 	(md++)->base = t4_read_reg(sc, A_SGE_FLM_CACHE_BADDR);
5562 	(md++)->base = t4_read_reg(sc, A_TP_CMM_TCB_BASE);
5563 	(md++)->base = t4_read_reg(sc, A_TP_CMM_MM_BASE);
5564 	(md++)->base = t4_read_reg(sc, A_TP_CMM_TIMER_BASE);
5565 	(md++)->base = t4_read_reg(sc, A_TP_CMM_MM_RX_FLST_BASE);
5566 	(md++)->base = t4_read_reg(sc, A_TP_CMM_MM_TX_FLST_BASE);
5567 	(md++)->base = t4_read_reg(sc, A_TP_CMM_MM_PS_FLST_BASE);
5568 
5569 	/* the next few have explicit upper bounds */
5570 	md->base = t4_read_reg(sc, A_TP_PMM_TX_BASE);
5571 	md->limit = md->base - 1 +
5572 		    t4_read_reg(sc, A_TP_PMM_TX_PAGE_SIZE) *
5573 		    G_PMTXMAXPAGE(t4_read_reg(sc, A_TP_PMM_TX_MAX_PAGE));
5574 	md++;
5575 
5576 	md->base = t4_read_reg(sc, A_TP_PMM_RX_BASE);
5577 	md->limit = md->base - 1 +
5578 		    t4_read_reg(sc, A_TP_PMM_RX_PAGE_SIZE) *
5579 		    G_PMRXMAXPAGE(t4_read_reg(sc, A_TP_PMM_RX_MAX_PAGE));
5580 	md++;
5581 
5582 	if (t4_read_reg(sc, A_LE_DB_CONFIG) & F_HASHEN) {
5583 		hi = t4_read_reg(sc, A_LE_DB_TID_HASHBASE) / 4;
5584 		md->base = t4_read_reg(sc, A_LE_DB_HASH_TID_BASE);
5585 		md->limit = (sc->tids.ntids - hi) * 16 + md->base - 1;
5586 	} else {
5587 		md->base = 0;
5588 		md->idx = nitems(region);  /* hide it */
5589 	}
5590 	md++;
5591 
5592 #define ulp_region(reg) \
5593 	md->base = t4_read_reg(sc, A_ULP_ ## reg ## _LLIMIT);\
5594 	(md++)->limit = t4_read_reg(sc, A_ULP_ ## reg ## _ULIMIT)
5595 
5596 	ulp_region(RX_ISCSI);
5597 	ulp_region(RX_TDDP);
5598 	ulp_region(TX_TPT);
5599 	ulp_region(RX_STAG);
5600 	ulp_region(RX_RQ);
5601 	ulp_region(RX_RQUDP);
5602 	ulp_region(RX_PBL);
5603 	ulp_region(TX_PBL);
5604 #undef ulp_region
5605 
5606 	md->base = 0;
5607 	md->idx = nitems(region);
5608 	if (!is_t4(sc) && t4_read_reg(sc, A_SGE_CONTROL2) & F_VFIFO_ENABLE) {
5609 		md->base = G_BASEADDR(t4_read_reg(sc, A_SGE_DBVFIFO_BADDR));
5610 		md->limit = md->base + (G_DBVFIFO_SIZE((t4_read_reg(sc,
5611 		    A_SGE_DBVFIFO_SIZE))) << 2) - 1;
5612 	}
5613 	md++;
5614 
5615 	md->base = t4_read_reg(sc, A_ULP_RX_CTX_BASE);
5616 	md->limit = md->base + sc->tids.ntids - 1;
5617 	md++;
5618 	md->base = t4_read_reg(sc, A_ULP_TX_ERR_TABLE_BASE);
5619 	md->limit = md->base + sc->tids.ntids - 1;
5620 	md++;
5621 
5622 	md->base = sc->vres.ocq.start;
5623 	if (sc->vres.ocq.size)
5624 		md->limit = md->base + sc->vres.ocq.size - 1;
5625 	else
5626 		md->idx = nitems(region);  /* hide it */
5627 	md++;
5628 
5629 	/* add any address-space holes, there can be up to 3 */
5630 	for (n = 0; n < i - 1; n++)
5631 		if (avail[n].limit < avail[n + 1].base)
5632 			(md++)->base = avail[n].limit;
5633 	if (avail[n].limit)
5634 		(md++)->base = avail[n].limit;
5635 
5636 	n = md - mem;
5637 	qsort(mem, n, sizeof(struct mem_desc), mem_desc_cmp);
5638 
5639 	for (lo = 0; lo < i; lo++)
5640 		mem_region_show(sb, memory[avail[lo].idx], avail[lo].base,
5641 				avail[lo].limit - 1);
5642 
5643 	sbuf_printf(sb, "\n");
5644 	for (i = 0; i < n; i++) {
5645 		if (mem[i].idx >= nitems(region))
5646 			continue;                        /* skip holes */
5647 		if (!mem[i].limit)
5648 			mem[i].limit = i < n - 1 ? mem[i + 1].base - 1 : ~0;
5649 		mem_region_show(sb, region[mem[i].idx], mem[i].base,
5650 				mem[i].limit);
5651 	}
5652 
5653 	sbuf_printf(sb, "\n");
5654 	lo = t4_read_reg(sc, A_CIM_SDRAM_BASE_ADDR);
5655 	hi = t4_read_reg(sc, A_CIM_SDRAM_ADDR_SIZE) + lo - 1;
5656 	mem_region_show(sb, "uP RAM:", lo, hi);
5657 
5658 	lo = t4_read_reg(sc, A_CIM_EXTMEM2_BASE_ADDR);
5659 	hi = t4_read_reg(sc, A_CIM_EXTMEM2_ADDR_SIZE) + lo - 1;
5660 	mem_region_show(sb, "uP Extmem2:", lo, hi);
5661 
5662 	lo = t4_read_reg(sc, A_TP_PMM_RX_MAX_PAGE);
5663 	sbuf_printf(sb, "\n%u Rx pages of size %uKiB for %u channels\n",
5664 		   G_PMRXMAXPAGE(lo),
5665 		   t4_read_reg(sc, A_TP_PMM_RX_PAGE_SIZE) >> 10,
5666 		   (lo & F_PMRXNUMCHN) ? 2 : 1);
5667 
5668 	lo = t4_read_reg(sc, A_TP_PMM_TX_MAX_PAGE);
5669 	hi = t4_read_reg(sc, A_TP_PMM_TX_PAGE_SIZE);
5670 	sbuf_printf(sb, "%u Tx pages of size %u%ciB for %u channels\n",
5671 		   G_PMTXMAXPAGE(lo),
5672 		   hi >= (1 << 20) ? (hi >> 20) : (hi >> 10),
5673 		   hi >= (1 << 20) ? 'M' : 'K', 1 << G_PMTXNUMCHN(lo));
5674 	sbuf_printf(sb, "%u p-structs\n",
5675 		   t4_read_reg(sc, A_TP_CMM_MM_MAX_PSTRUCT));
5676 
5677 	for (i = 0; i < 4; i++) {
5678 		lo = t4_read_reg(sc, A_MPS_RX_PG_RSV0 + i * 4);
5679 		if (is_t4(sc)) {
5680 			used = G_USED(lo);
5681 			alloc = G_ALLOC(lo);
5682 		} else {
5683 			used = G_T5_USED(lo);
5684 			alloc = G_T5_ALLOC(lo);
5685 		}
5686 		sbuf_printf(sb, "\nPort %d using %u pages out of %u allocated",
5687 			   i, used, alloc);
5688 	}
5689 	for (i = 0; i < 4; i++) {
5690 		lo = t4_read_reg(sc, A_MPS_RX_PG_RSV4 + i * 4);
5691 		if (is_t4(sc)) {
5692 			used = G_USED(lo);
5693 			alloc = G_ALLOC(lo);
5694 		} else {
5695 			used = G_T5_USED(lo);
5696 			alloc = G_T5_ALLOC(lo);
5697 		}
5698 		sbuf_printf(sb,
5699 			   "\nLoopback %d using %u pages out of %u allocated",
5700 			   i, used, alloc);
5701 	}
5702 
5703 	rc = sbuf_finish(sb);
5704 	sbuf_delete(sb);
5705 
5706 	return (rc);
5707 }
5708 
5709 static inline void
5710 tcamxy2valmask(uint64_t x, uint64_t y, uint8_t *addr, uint64_t *mask)
5711 {
5712 	*mask = x | y;
5713 	y = htobe64(y);
5714 	memcpy(addr, (char *)&y + 2, ETHER_ADDR_LEN);
5715 }
5716 
5717 static int
5718 sysctl_mps_tcam(SYSCTL_HANDLER_ARGS)
5719 {
5720 	struct adapter *sc = arg1;
5721 	struct sbuf *sb;
5722 	int rc, i, n;
5723 
5724 	rc = sysctl_wire_old_buffer(req, 0);
5725 	if (rc != 0)
5726 		return (rc);
5727 
5728 	sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req);
5729 	if (sb == NULL)
5730 		return (ENOMEM);
5731 
5732 	sbuf_printf(sb,
5733 	    "Idx  Ethernet address     Mask     Vld Ports PF"
5734 	    "  VF              Replication             P0 P1 P2 P3  ML");
5735 	n = is_t4(sc) ? NUM_MPS_CLS_SRAM_L_INSTANCES :
5736 	    NUM_MPS_T5_CLS_SRAM_L_INSTANCES;
5737 	for (i = 0; i < n; i++) {
5738 		uint64_t tcamx, tcamy, mask;
5739 		uint32_t cls_lo, cls_hi;
5740 		uint8_t addr[ETHER_ADDR_LEN];
5741 
5742 		tcamy = t4_read_reg64(sc, MPS_CLS_TCAM_Y_L(i));
5743 		tcamx = t4_read_reg64(sc, MPS_CLS_TCAM_X_L(i));
5744 		cls_lo = t4_read_reg(sc, MPS_CLS_SRAM_L(i));
5745 		cls_hi = t4_read_reg(sc, MPS_CLS_SRAM_H(i));
5746 
5747 		if (tcamx & tcamy)
5748 			continue;
5749 
5750 		tcamxy2valmask(tcamx, tcamy, addr, &mask);
5751 		sbuf_printf(sb, "\n%3u %02x:%02x:%02x:%02x:%02x:%02x %012jx"
5752 			   "  %c   %#x%4u%4d", i, addr[0], addr[1], addr[2],
5753 			   addr[3], addr[4], addr[5], (uintmax_t)mask,
5754 			   (cls_lo & F_SRAM_VLD) ? 'Y' : 'N',
5755 			   G_PORTMAP(cls_hi), G_PF(cls_lo),
5756 			   (cls_lo & F_VF_VALID) ? G_VF(cls_lo) : -1);
5757 
5758 		if (cls_lo & F_REPLICATE) {
5759 			struct fw_ldst_cmd ldst_cmd;
5760 
5761 			memset(&ldst_cmd, 0, sizeof(ldst_cmd));
5762 			ldst_cmd.op_to_addrspace =
5763 			    htobe32(V_FW_CMD_OP(FW_LDST_CMD) |
5764 				F_FW_CMD_REQUEST | F_FW_CMD_READ |
5765 				V_FW_LDST_CMD_ADDRSPACE(FW_LDST_ADDRSPC_MPS));
5766 			ldst_cmd.cycles_to_len16 = htobe32(FW_LEN16(ldst_cmd));
5767 			ldst_cmd.u.mps.fid_ctl =
5768 			    htobe16(V_FW_LDST_CMD_FID(FW_LDST_MPS_RPLC) |
5769 				V_FW_LDST_CMD_CTL(i));
5770 
5771 			rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK,
5772 			    "t4mps");
5773 			if (rc)
5774 				break;
5775 			rc = -t4_wr_mbox(sc, sc->mbox, &ldst_cmd,
5776 			    sizeof(ldst_cmd), &ldst_cmd);
5777 			end_synchronized_op(sc, 0);
5778 
5779 			if (rc != 0) {
5780 				sbuf_printf(sb,
5781 				    " ------------ error %3u ------------", rc);
5782 				rc = 0;
5783 			} else {
5784 				sbuf_printf(sb, " %08x %08x %08x %08x",
5785 				    be32toh(ldst_cmd.u.mps.rplc127_96),
5786 				    be32toh(ldst_cmd.u.mps.rplc95_64),
5787 				    be32toh(ldst_cmd.u.mps.rplc63_32),
5788 				    be32toh(ldst_cmd.u.mps.rplc31_0));
5789 			}
5790 		} else
5791 			sbuf_printf(sb, "%36s", "");
5792 
5793 		sbuf_printf(sb, "%4u%3u%3u%3u %#3x", G_SRAM_PRIO0(cls_lo),
5794 		    G_SRAM_PRIO1(cls_lo), G_SRAM_PRIO2(cls_lo),
5795 		    G_SRAM_PRIO3(cls_lo), (cls_lo >> S_MULTILISTEN0) & 0xf);
5796 	}
5797 
5798 	if (rc)
5799 		(void) sbuf_finish(sb);
5800 	else
5801 		rc = sbuf_finish(sb);
5802 	sbuf_delete(sb);
5803 
5804 	return (rc);
5805 }
5806 
5807 static int
5808 sysctl_path_mtus(SYSCTL_HANDLER_ARGS)
5809 {
5810 	struct adapter *sc = arg1;
5811 	struct sbuf *sb;
5812 	int rc;
5813 	uint16_t mtus[NMTUS];
5814 
5815 	rc = sysctl_wire_old_buffer(req, 0);
5816 	if (rc != 0)
5817 		return (rc);
5818 
5819 	sb = sbuf_new_for_sysctl(NULL, NULL, 256, req);
5820 	if (sb == NULL)
5821 		return (ENOMEM);
5822 
5823 	t4_read_mtu_tbl(sc, mtus, NULL);
5824 
5825 	sbuf_printf(sb, "%u %u %u %u %u %u %u %u %u %u %u %u %u %u %u %u",
5826 	    mtus[0], mtus[1], mtus[2], mtus[3], mtus[4], mtus[5], mtus[6],
5827 	    mtus[7], mtus[8], mtus[9], mtus[10], mtus[11], mtus[12], mtus[13],
5828 	    mtus[14], mtus[15]);
5829 
5830 	rc = sbuf_finish(sb);
5831 	sbuf_delete(sb);
5832 
5833 	return (rc);
5834 }
5835 
5836 static int
5837 sysctl_pm_stats(SYSCTL_HANDLER_ARGS)
5838 {
5839 	struct adapter *sc = arg1;
5840 	struct sbuf *sb;
5841 	int rc, i;
5842 	uint32_t tx_cnt[PM_NSTATS], rx_cnt[PM_NSTATS];
5843 	uint64_t tx_cyc[PM_NSTATS], rx_cyc[PM_NSTATS];
5844 	static const char *pm_stats[] = {
5845 		"Read:", "Write bypass:", "Write mem:", "Flush:", "FIFO wait:"
5846 	};
5847 
5848 	rc = sysctl_wire_old_buffer(req, 0);
5849 	if (rc != 0)
5850 		return (rc);
5851 
5852 	sb = sbuf_new_for_sysctl(NULL, NULL, 256, req);
5853 	if (sb == NULL)
5854 		return (ENOMEM);
5855 
5856 	t4_pmtx_get_stats(sc, tx_cnt, tx_cyc);
5857 	t4_pmrx_get_stats(sc, rx_cnt, rx_cyc);
5858 
5859 	sbuf_printf(sb, "                Tx count            Tx cycles    "
5860 	    "Rx count            Rx cycles");
5861 	for (i = 0; i < PM_NSTATS; i++)
5862 		sbuf_printf(sb, "\n%-13s %10u %20ju  %10u %20ju",
5863 		    pm_stats[i], tx_cnt[i], tx_cyc[i], rx_cnt[i], rx_cyc[i]);
5864 
5865 	rc = sbuf_finish(sb);
5866 	sbuf_delete(sb);
5867 
5868 	return (rc);
5869 }
5870 
5871 static int
5872 sysctl_rdma_stats(SYSCTL_HANDLER_ARGS)
5873 {
5874 	struct adapter *sc = arg1;
5875 	struct sbuf *sb;
5876 	int rc;
5877 	struct tp_rdma_stats stats;
5878 
5879 	rc = sysctl_wire_old_buffer(req, 0);
5880 	if (rc != 0)
5881 		return (rc);
5882 
5883 	sb = sbuf_new_for_sysctl(NULL, NULL, 256, req);
5884 	if (sb == NULL)
5885 		return (ENOMEM);
5886 
5887 	t4_tp_get_rdma_stats(sc, &stats);
5888 	sbuf_printf(sb, "NoRQEModDefferals: %u\n", stats.rqe_dfr_mod);
5889 	sbuf_printf(sb, "NoRQEPktDefferals: %u", stats.rqe_dfr_pkt);
5890 
5891 	rc = sbuf_finish(sb);
5892 	sbuf_delete(sb);
5893 
5894 	return (rc);
5895 }
5896 
5897 static int
5898 sysctl_tcp_stats(SYSCTL_HANDLER_ARGS)
5899 {
5900 	struct adapter *sc = arg1;
5901 	struct sbuf *sb;
5902 	int rc;
5903 	struct tp_tcp_stats v4, v6;
5904 
5905 	rc = sysctl_wire_old_buffer(req, 0);
5906 	if (rc != 0)
5907 		return (rc);
5908 
5909 	sb = sbuf_new_for_sysctl(NULL, NULL, 256, req);
5910 	if (sb == NULL)
5911 		return (ENOMEM);
5912 
5913 	t4_tp_get_tcp_stats(sc, &v4, &v6);
5914 	sbuf_printf(sb,
5915 	    "                                IP                 IPv6\n");
5916 	sbuf_printf(sb, "OutRsts:      %20u %20u\n",
5917 	    v4.tcpOutRsts, v6.tcpOutRsts);
5918 	sbuf_printf(sb, "InSegs:       %20ju %20ju\n",
5919 	    v4.tcpInSegs, v6.tcpInSegs);
5920 	sbuf_printf(sb, "OutSegs:      %20ju %20ju\n",
5921 	    v4.tcpOutSegs, v6.tcpOutSegs);
5922 	sbuf_printf(sb, "RetransSegs:  %20ju %20ju",
5923 	    v4.tcpRetransSegs, v6.tcpRetransSegs);
5924 
5925 	rc = sbuf_finish(sb);
5926 	sbuf_delete(sb);
5927 
5928 	return (rc);
5929 }
5930 
5931 static int
5932 sysctl_tids(SYSCTL_HANDLER_ARGS)
5933 {
5934 	struct adapter *sc = arg1;
5935 	struct sbuf *sb;
5936 	int rc;
5937 	struct tid_info *t = &sc->tids;
5938 
5939 	rc = sysctl_wire_old_buffer(req, 0);
5940 	if (rc != 0)
5941 		return (rc);
5942 
5943 	sb = sbuf_new_for_sysctl(NULL, NULL, 256, req);
5944 	if (sb == NULL)
5945 		return (ENOMEM);
5946 
5947 	if (t->natids) {
5948 		sbuf_printf(sb, "ATID range: 0-%u, in use: %u\n", t->natids - 1,
5949 		    t->atids_in_use);
5950 	}
5951 
5952 	if (t->ntids) {
5953 		if (t4_read_reg(sc, A_LE_DB_CONFIG) & F_HASHEN) {
5954 			uint32_t b = t4_read_reg(sc, A_LE_DB_SERVER_INDEX) / 4;
5955 
5956 			if (b) {
5957 				sbuf_printf(sb, "TID range: 0-%u, %u-%u", b - 1,
5958 				    t4_read_reg(sc, A_LE_DB_TID_HASHBASE) / 4,
5959 				    t->ntids - 1);
5960 			} else {
5961 				sbuf_printf(sb, "TID range: %u-%u",
5962 				    t4_read_reg(sc, A_LE_DB_TID_HASHBASE) / 4,
5963 				    t->ntids - 1);
5964 			}
5965 		} else
5966 			sbuf_printf(sb, "TID range: 0-%u", t->ntids - 1);
5967 		sbuf_printf(sb, ", in use: %u\n",
5968 		    atomic_load_acq_int(&t->tids_in_use));
5969 	}
5970 
5971 	if (t->nstids) {
5972 		sbuf_printf(sb, "STID range: %u-%u, in use: %u\n", t->stid_base,
5973 		    t->stid_base + t->nstids - 1, t->stids_in_use);
5974 	}
5975 
5976 	if (t->nftids) {
5977 		sbuf_printf(sb, "FTID range: %u-%u\n", t->ftid_base,
5978 		    t->ftid_base + t->nftids - 1);
5979 	}
5980 
5981 	sbuf_printf(sb, "HW TID usage: %u IP users, %u IPv6 users",
5982 	    t4_read_reg(sc, A_LE_DB_ACT_CNT_IPV4),
5983 	    t4_read_reg(sc, A_LE_DB_ACT_CNT_IPV6));
5984 
5985 	rc = sbuf_finish(sb);
5986 	sbuf_delete(sb);
5987 
5988 	return (rc);
5989 }
5990 
5991 static int
5992 sysctl_tp_err_stats(SYSCTL_HANDLER_ARGS)
5993 {
5994 	struct adapter *sc = arg1;
5995 	struct sbuf *sb;
5996 	int rc;
5997 	struct tp_err_stats stats;
5998 
5999 	rc = sysctl_wire_old_buffer(req, 0);
6000 	if (rc != 0)
6001 		return (rc);
6002 
6003 	sb = sbuf_new_for_sysctl(NULL, NULL, 256, req);
6004 	if (sb == NULL)
6005 		return (ENOMEM);
6006 
6007 	t4_tp_get_err_stats(sc, &stats);
6008 
6009 	sbuf_printf(sb, "                 channel 0  channel 1  channel 2  "
6010 		      "channel 3\n");
6011 	sbuf_printf(sb, "macInErrs:      %10u %10u %10u %10u\n",
6012 	    stats.macInErrs[0], stats.macInErrs[1], stats.macInErrs[2],
6013 	    stats.macInErrs[3]);
6014 	sbuf_printf(sb, "hdrInErrs:      %10u %10u %10u %10u\n",
6015 	    stats.hdrInErrs[0], stats.hdrInErrs[1], stats.hdrInErrs[2],
6016 	    stats.hdrInErrs[3]);
6017 	sbuf_printf(sb, "tcpInErrs:      %10u %10u %10u %10u\n",
6018 	    stats.tcpInErrs[0], stats.tcpInErrs[1], stats.tcpInErrs[2],
6019 	    stats.tcpInErrs[3]);
6020 	sbuf_printf(sb, "tcp6InErrs:     %10u %10u %10u %10u\n",
6021 	    stats.tcp6InErrs[0], stats.tcp6InErrs[1], stats.tcp6InErrs[2],
6022 	    stats.tcp6InErrs[3]);
6023 	sbuf_printf(sb, "tnlCongDrops:   %10u %10u %10u %10u\n",
6024 	    stats.tnlCongDrops[0], stats.tnlCongDrops[1], stats.tnlCongDrops[2],
6025 	    stats.tnlCongDrops[3]);
6026 	sbuf_printf(sb, "tnlTxDrops:     %10u %10u %10u %10u\n",
6027 	    stats.tnlTxDrops[0], stats.tnlTxDrops[1], stats.tnlTxDrops[2],
6028 	    stats.tnlTxDrops[3]);
6029 	sbuf_printf(sb, "ofldVlanDrops:  %10u %10u %10u %10u\n",
6030 	    stats.ofldVlanDrops[0], stats.ofldVlanDrops[1],
6031 	    stats.ofldVlanDrops[2], stats.ofldVlanDrops[3]);
6032 	sbuf_printf(sb, "ofldChanDrops:  %10u %10u %10u %10u\n\n",
6033 	    stats.ofldChanDrops[0], stats.ofldChanDrops[1],
6034 	    stats.ofldChanDrops[2], stats.ofldChanDrops[3]);
6035 	sbuf_printf(sb, "ofldNoNeigh:    %u\nofldCongDefer:  %u",
6036 	    stats.ofldNoNeigh, stats.ofldCongDefer);
6037 
6038 	rc = sbuf_finish(sb);
6039 	sbuf_delete(sb);
6040 
6041 	return (rc);
6042 }
6043 
6044 struct field_desc {
6045 	const char *name;
6046 	u_int start;
6047 	u_int width;
6048 };
6049 
6050 static void
6051 field_desc_show(struct sbuf *sb, uint64_t v, const struct field_desc *f)
6052 {
6053 	char buf[32];
6054 	int line_size = 0;
6055 
6056 	while (f->name) {
6057 		uint64_t mask = (1ULL << f->width) - 1;
6058 		int len = snprintf(buf, sizeof(buf), "%s: %ju", f->name,
6059 		    ((uintmax_t)v >> f->start) & mask);
6060 
6061 		if (line_size + len >= 79) {
6062 			line_size = 8;
6063 			sbuf_printf(sb, "\n        ");
6064 		}
6065 		sbuf_printf(sb, "%s ", buf);
6066 		line_size += len + 1;
6067 		f++;
6068 	}
6069 	sbuf_printf(sb, "\n");
6070 }
6071 
6072 static struct field_desc tp_la0[] = {
6073 	{ "RcfOpCodeOut", 60, 4 },
6074 	{ "State", 56, 4 },
6075 	{ "WcfState", 52, 4 },
6076 	{ "RcfOpcSrcOut", 50, 2 },
6077 	{ "CRxError", 49, 1 },
6078 	{ "ERxError", 48, 1 },
6079 	{ "SanityFailed", 47, 1 },
6080 	{ "SpuriousMsg", 46, 1 },
6081 	{ "FlushInputMsg", 45, 1 },
6082 	{ "FlushInputCpl", 44, 1 },
6083 	{ "RssUpBit", 43, 1 },
6084 	{ "RssFilterHit", 42, 1 },
6085 	{ "Tid", 32, 10 },
6086 	{ "InitTcb", 31, 1 },
6087 	{ "LineNumber", 24, 7 },
6088 	{ "Emsg", 23, 1 },
6089 	{ "EdataOut", 22, 1 },
6090 	{ "Cmsg", 21, 1 },
6091 	{ "CdataOut", 20, 1 },
6092 	{ "EreadPdu", 19, 1 },
6093 	{ "CreadPdu", 18, 1 },
6094 	{ "TunnelPkt", 17, 1 },
6095 	{ "RcfPeerFin", 16, 1 },
6096 	{ "RcfReasonOut", 12, 4 },
6097 	{ "TxCchannel", 10, 2 },
6098 	{ "RcfTxChannel", 8, 2 },
6099 	{ "RxEchannel", 6, 2 },
6100 	{ "RcfRxChannel", 5, 1 },
6101 	{ "RcfDataOutSrdy", 4, 1 },
6102 	{ "RxDvld", 3, 1 },
6103 	{ "RxOoDvld", 2, 1 },
6104 	{ "RxCongestion", 1, 1 },
6105 	{ "TxCongestion", 0, 1 },
6106 	{ NULL }
6107 };
6108 
6109 static struct field_desc tp_la1[] = {
6110 	{ "CplCmdIn", 56, 8 },
6111 	{ "CplCmdOut", 48, 8 },
6112 	{ "ESynOut", 47, 1 },
6113 	{ "EAckOut", 46, 1 },
6114 	{ "EFinOut", 45, 1 },
6115 	{ "ERstOut", 44, 1 },
6116 	{ "SynIn", 43, 1 },
6117 	{ "AckIn", 42, 1 },
6118 	{ "FinIn", 41, 1 },
6119 	{ "RstIn", 40, 1 },
6120 	{ "DataIn", 39, 1 },
6121 	{ "DataInVld", 38, 1 },
6122 	{ "PadIn", 37, 1 },
6123 	{ "RxBufEmpty", 36, 1 },
6124 	{ "RxDdp", 35, 1 },
6125 	{ "RxFbCongestion", 34, 1 },
6126 	{ "TxFbCongestion", 33, 1 },
6127 	{ "TxPktSumSrdy", 32, 1 },
6128 	{ "RcfUlpType", 28, 4 },
6129 	{ "Eread", 27, 1 },
6130 	{ "Ebypass", 26, 1 },
6131 	{ "Esave", 25, 1 },
6132 	{ "Static0", 24, 1 },
6133 	{ "Cread", 23, 1 },
6134 	{ "Cbypass", 22, 1 },
6135 	{ "Csave", 21, 1 },
6136 	{ "CPktOut", 20, 1 },
6137 	{ "RxPagePoolFull", 18, 2 },
6138 	{ "RxLpbkPkt", 17, 1 },
6139 	{ "TxLpbkPkt", 16, 1 },
6140 	{ "RxVfValid", 15, 1 },
6141 	{ "SynLearned", 14, 1 },
6142 	{ "SetDelEntry", 13, 1 },
6143 	{ "SetInvEntry", 12, 1 },
6144 	{ "CpcmdDvld", 11, 1 },
6145 	{ "CpcmdSave", 10, 1 },
6146 	{ "RxPstructsFull", 8, 2 },
6147 	{ "EpcmdDvld", 7, 1 },
6148 	{ "EpcmdFlush", 6, 1 },
6149 	{ "EpcmdTrimPrefix", 5, 1 },
6150 	{ "EpcmdTrimPostfix", 4, 1 },
6151 	{ "ERssIp4Pkt", 3, 1 },
6152 	{ "ERssIp6Pkt", 2, 1 },
6153 	{ "ERssTcpUdpPkt", 1, 1 },
6154 	{ "ERssFceFipPkt", 0, 1 },
6155 	{ NULL }
6156 };
6157 
6158 static struct field_desc tp_la2[] = {
6159 	{ "CplCmdIn", 56, 8 },
6160 	{ "MpsVfVld", 55, 1 },
6161 	{ "MpsPf", 52, 3 },
6162 	{ "MpsVf", 44, 8 },
6163 	{ "SynIn", 43, 1 },
6164 	{ "AckIn", 42, 1 },
6165 	{ "FinIn", 41, 1 },
6166 	{ "RstIn", 40, 1 },
6167 	{ "DataIn", 39, 1 },
6168 	{ "DataInVld", 38, 1 },
6169 	{ "PadIn", 37, 1 },
6170 	{ "RxBufEmpty", 36, 1 },
6171 	{ "RxDdp", 35, 1 },
6172 	{ "RxFbCongestion", 34, 1 },
6173 	{ "TxFbCongestion", 33, 1 },
6174 	{ "TxPktSumSrdy", 32, 1 },
6175 	{ "RcfUlpType", 28, 4 },
6176 	{ "Eread", 27, 1 },
6177 	{ "Ebypass", 26, 1 },
6178 	{ "Esave", 25, 1 },
6179 	{ "Static0", 24, 1 },
6180 	{ "Cread", 23, 1 },
6181 	{ "Cbypass", 22, 1 },
6182 	{ "Csave", 21, 1 },
6183 	{ "CPktOut", 20, 1 },
6184 	{ "RxPagePoolFull", 18, 2 },
6185 	{ "RxLpbkPkt", 17, 1 },
6186 	{ "TxLpbkPkt", 16, 1 },
6187 	{ "RxVfValid", 15, 1 },
6188 	{ "SynLearned", 14, 1 },
6189 	{ "SetDelEntry", 13, 1 },
6190 	{ "SetInvEntry", 12, 1 },
6191 	{ "CpcmdDvld", 11, 1 },
6192 	{ "CpcmdSave", 10, 1 },
6193 	{ "RxPstructsFull", 8, 2 },
6194 	{ "EpcmdDvld", 7, 1 },
6195 	{ "EpcmdFlush", 6, 1 },
6196 	{ "EpcmdTrimPrefix", 5, 1 },
6197 	{ "EpcmdTrimPostfix", 4, 1 },
6198 	{ "ERssIp4Pkt", 3, 1 },
6199 	{ "ERssIp6Pkt", 2, 1 },
6200 	{ "ERssTcpUdpPkt", 1, 1 },
6201 	{ "ERssFceFipPkt", 0, 1 },
6202 	{ NULL }
6203 };
6204 
6205 static void
6206 tp_la_show(struct sbuf *sb, uint64_t *p, int idx)
6207 {
6208 
6209 	field_desc_show(sb, *p, tp_la0);
6210 }
6211 
6212 static void
6213 tp_la_show2(struct sbuf *sb, uint64_t *p, int idx)
6214 {
6215 
6216 	if (idx)
6217 		sbuf_printf(sb, "\n");
6218 	field_desc_show(sb, p[0], tp_la0);
6219 	if (idx < (TPLA_SIZE / 2 - 1) || p[1] != ~0ULL)
6220 		field_desc_show(sb, p[1], tp_la0);
6221 }
6222 
6223 static void
6224 tp_la_show3(struct sbuf *sb, uint64_t *p, int idx)
6225 {
6226 
6227 	if (idx)
6228 		sbuf_printf(sb, "\n");
6229 	field_desc_show(sb, p[0], tp_la0);
6230 	if (idx < (TPLA_SIZE / 2 - 1) || p[1] != ~0ULL)
6231 		field_desc_show(sb, p[1], (p[0] & (1 << 17)) ? tp_la2 : tp_la1);
6232 }
6233 
6234 static int
6235 sysctl_tp_la(SYSCTL_HANDLER_ARGS)
6236 {
6237 	struct adapter *sc = arg1;
6238 	struct sbuf *sb;
6239 	uint64_t *buf, *p;
6240 	int rc;
6241 	u_int i, inc;
6242 	void (*show_func)(struct sbuf *, uint64_t *, int);
6243 
6244 	rc = sysctl_wire_old_buffer(req, 0);
6245 	if (rc != 0)
6246 		return (rc);
6247 
6248 	sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req);
6249 	if (sb == NULL)
6250 		return (ENOMEM);
6251 
6252 	buf = malloc(TPLA_SIZE * sizeof(uint64_t), M_CXGBE, M_ZERO | M_WAITOK);
6253 
6254 	t4_tp_read_la(sc, buf, NULL);
6255 	p = buf;
6256 
6257 	switch (G_DBGLAMODE(t4_read_reg(sc, A_TP_DBG_LA_CONFIG))) {
6258 	case 2:
6259 		inc = 2;
6260 		show_func = tp_la_show2;
6261 		break;
6262 	case 3:
6263 		inc = 2;
6264 		show_func = tp_la_show3;
6265 		break;
6266 	default:
6267 		inc = 1;
6268 		show_func = tp_la_show;
6269 	}
6270 
6271 	for (i = 0; i < TPLA_SIZE / inc; i++, p += inc)
6272 		(*show_func)(sb, p, i);
6273 
6274 	rc = sbuf_finish(sb);
6275 	sbuf_delete(sb);
6276 	free(buf, M_CXGBE);
6277 	return (rc);
6278 }
6279 
6280 static int
6281 sysctl_tx_rate(SYSCTL_HANDLER_ARGS)
6282 {
6283 	struct adapter *sc = arg1;
6284 	struct sbuf *sb;
6285 	int rc;
6286 	u64 nrate[NCHAN], orate[NCHAN];
6287 
6288 	rc = sysctl_wire_old_buffer(req, 0);
6289 	if (rc != 0)
6290 		return (rc);
6291 
6292 	sb = sbuf_new_for_sysctl(NULL, NULL, 256, req);
6293 	if (sb == NULL)
6294 		return (ENOMEM);
6295 
6296 	t4_get_chan_txrate(sc, nrate, orate);
6297 	sbuf_printf(sb, "              channel 0   channel 1   channel 2   "
6298 		 "channel 3\n");
6299 	sbuf_printf(sb, "NIC B/s:     %10ju  %10ju  %10ju  %10ju\n",
6300 	    nrate[0], nrate[1], nrate[2], nrate[3]);
6301 	sbuf_printf(sb, "Offload B/s: %10ju  %10ju  %10ju  %10ju",
6302 	    orate[0], orate[1], orate[2], orate[3]);
6303 
6304 	rc = sbuf_finish(sb);
6305 	sbuf_delete(sb);
6306 
6307 	return (rc);
6308 }
6309 
6310 static int
6311 sysctl_ulprx_la(SYSCTL_HANDLER_ARGS)
6312 {
6313 	struct adapter *sc = arg1;
6314 	struct sbuf *sb;
6315 	uint32_t *buf, *p;
6316 	int rc, i;
6317 
6318 	rc = sysctl_wire_old_buffer(req, 0);
6319 	if (rc != 0)
6320 		return (rc);
6321 
6322 	sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req);
6323 	if (sb == NULL)
6324 		return (ENOMEM);
6325 
6326 	buf = malloc(ULPRX_LA_SIZE * 8 * sizeof(uint32_t), M_CXGBE,
6327 	    M_ZERO | M_WAITOK);
6328 
6329 	t4_ulprx_read_la(sc, buf);
6330 	p = buf;
6331 
6332 	sbuf_printf(sb, "      Pcmd        Type   Message"
6333 	    "                Data");
6334 	for (i = 0; i < ULPRX_LA_SIZE; i++, p += 8) {
6335 		sbuf_printf(sb, "\n%08x%08x  %4x  %08x  %08x%08x%08x%08x",
6336 		    p[1], p[0], p[2], p[3], p[7], p[6], p[5], p[4]);
6337 	}
6338 
6339 	rc = sbuf_finish(sb);
6340 	sbuf_delete(sb);
6341 	free(buf, M_CXGBE);
6342 	return (rc);
6343 }
6344 
6345 static int
6346 sysctl_wcwr_stats(SYSCTL_HANDLER_ARGS)
6347 {
6348 	struct adapter *sc = arg1;
6349 	struct sbuf *sb;
6350 	int rc, v;
6351 
6352 	rc = sysctl_wire_old_buffer(req, 0);
6353 	if (rc != 0)
6354 		return (rc);
6355 
6356 	sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req);
6357 	if (sb == NULL)
6358 		return (ENOMEM);
6359 
6360 	v = t4_read_reg(sc, A_SGE_STAT_CFG);
6361 	if (G_STATSOURCE_T5(v) == 7) {
6362 		if (G_STATMODE(v) == 0) {
6363 			sbuf_printf(sb, "total %d, incomplete %d",
6364 			    t4_read_reg(sc, A_SGE_STAT_TOTAL),
6365 			    t4_read_reg(sc, A_SGE_STAT_MATCH));
6366 		} else if (G_STATMODE(v) == 1) {
6367 			sbuf_printf(sb, "total %d, data overflow %d",
6368 			    t4_read_reg(sc, A_SGE_STAT_TOTAL),
6369 			    t4_read_reg(sc, A_SGE_STAT_MATCH));
6370 		}
6371 	}
6372 	rc = sbuf_finish(sb);
6373 	sbuf_delete(sb);
6374 
6375 	return (rc);
6376 }
6377 #endif
6378 
6379 static inline void
6380 txq_start(struct ifnet *ifp, struct sge_txq *txq)
6381 {
6382 	struct buf_ring *br;
6383 	struct mbuf *m;
6384 
6385 	TXQ_LOCK_ASSERT_OWNED(txq);
6386 
6387 	br = txq->br;
6388 	m = txq->m ? txq->m : drbr_dequeue(ifp, br);
6389 	if (m)
6390 		t4_eth_tx(ifp, txq, m);
6391 }
6392 
6393 void
6394 t4_tx_callout(void *arg)
6395 {
6396 	struct sge_eq *eq = arg;
6397 	struct adapter *sc;
6398 
6399 	if (EQ_TRYLOCK(eq) == 0)
6400 		goto reschedule;
6401 
6402 	if (eq->flags & EQ_STALLED && !can_resume_tx(eq)) {
6403 		EQ_UNLOCK(eq);
6404 reschedule:
6405 		if (__predict_true(!(eq->flags && EQ_DOOMED)))
6406 			callout_schedule(&eq->tx_callout, 1);
6407 		return;
6408 	}
6409 
6410 	EQ_LOCK_ASSERT_OWNED(eq);
6411 
6412 	if (__predict_true((eq->flags & EQ_DOOMED) == 0)) {
6413 
6414 		if ((eq->flags & EQ_TYPEMASK) == EQ_ETH) {
6415 			struct sge_txq *txq = arg;
6416 			struct port_info *pi = txq->ifp->if_softc;
6417 
6418 			sc = pi->adapter;
6419 		} else {
6420 			struct sge_wrq *wrq = arg;
6421 
6422 			sc = wrq->adapter;
6423 		}
6424 
6425 		taskqueue_enqueue(sc->tq[eq->tx_chan], &eq->tx_task);
6426 	}
6427 
6428 	EQ_UNLOCK(eq);
6429 }
6430 
6431 void
6432 t4_tx_task(void *arg, int count)
6433 {
6434 	struct sge_eq *eq = arg;
6435 
6436 	EQ_LOCK(eq);
6437 	if ((eq->flags & EQ_TYPEMASK) == EQ_ETH) {
6438 		struct sge_txq *txq = arg;
6439 		txq_start(txq->ifp, txq);
6440 	} else {
6441 		struct sge_wrq *wrq = arg;
6442 		t4_wrq_tx_locked(wrq->adapter, wrq, NULL);
6443 	}
6444 	EQ_UNLOCK(eq);
6445 }
6446 
6447 static uint32_t
6448 fconf_to_mode(uint32_t fconf)
6449 {
6450 	uint32_t mode;
6451 
6452 	mode = T4_FILTER_IPv4 | T4_FILTER_IPv6 | T4_FILTER_IP_SADDR |
6453 	    T4_FILTER_IP_DADDR | T4_FILTER_IP_SPORT | T4_FILTER_IP_DPORT;
6454 
6455 	if (fconf & F_FRAGMENTATION)
6456 		mode |= T4_FILTER_IP_FRAGMENT;
6457 
6458 	if (fconf & F_MPSHITTYPE)
6459 		mode |= T4_FILTER_MPS_HIT_TYPE;
6460 
6461 	if (fconf & F_MACMATCH)
6462 		mode |= T4_FILTER_MAC_IDX;
6463 
6464 	if (fconf & F_ETHERTYPE)
6465 		mode |= T4_FILTER_ETH_TYPE;
6466 
6467 	if (fconf & F_PROTOCOL)
6468 		mode |= T4_FILTER_IP_PROTO;
6469 
6470 	if (fconf & F_TOS)
6471 		mode |= T4_FILTER_IP_TOS;
6472 
6473 	if (fconf & F_VLAN)
6474 		mode |= T4_FILTER_VLAN;
6475 
6476 	if (fconf & F_VNIC_ID)
6477 		mode |= T4_FILTER_VNIC;
6478 
6479 	if (fconf & F_PORT)
6480 		mode |= T4_FILTER_PORT;
6481 
6482 	if (fconf & F_FCOE)
6483 		mode |= T4_FILTER_FCoE;
6484 
6485 	return (mode);
6486 }
6487 
6488 static uint32_t
6489 mode_to_fconf(uint32_t mode)
6490 {
6491 	uint32_t fconf = 0;
6492 
6493 	if (mode & T4_FILTER_IP_FRAGMENT)
6494 		fconf |= F_FRAGMENTATION;
6495 
6496 	if (mode & T4_FILTER_MPS_HIT_TYPE)
6497 		fconf |= F_MPSHITTYPE;
6498 
6499 	if (mode & T4_FILTER_MAC_IDX)
6500 		fconf |= F_MACMATCH;
6501 
6502 	if (mode & T4_FILTER_ETH_TYPE)
6503 		fconf |= F_ETHERTYPE;
6504 
6505 	if (mode & T4_FILTER_IP_PROTO)
6506 		fconf |= F_PROTOCOL;
6507 
6508 	if (mode & T4_FILTER_IP_TOS)
6509 		fconf |= F_TOS;
6510 
6511 	if (mode & T4_FILTER_VLAN)
6512 		fconf |= F_VLAN;
6513 
6514 	if (mode & T4_FILTER_VNIC)
6515 		fconf |= F_VNIC_ID;
6516 
6517 	if (mode & T4_FILTER_PORT)
6518 		fconf |= F_PORT;
6519 
6520 	if (mode & T4_FILTER_FCoE)
6521 		fconf |= F_FCOE;
6522 
6523 	return (fconf);
6524 }
6525 
6526 static uint32_t
6527 fspec_to_fconf(struct t4_filter_specification *fs)
6528 {
6529 	uint32_t fconf = 0;
6530 
6531 	if (fs->val.frag || fs->mask.frag)
6532 		fconf |= F_FRAGMENTATION;
6533 
6534 	if (fs->val.matchtype || fs->mask.matchtype)
6535 		fconf |= F_MPSHITTYPE;
6536 
6537 	if (fs->val.macidx || fs->mask.macidx)
6538 		fconf |= F_MACMATCH;
6539 
6540 	if (fs->val.ethtype || fs->mask.ethtype)
6541 		fconf |= F_ETHERTYPE;
6542 
6543 	if (fs->val.proto || fs->mask.proto)
6544 		fconf |= F_PROTOCOL;
6545 
6546 	if (fs->val.tos || fs->mask.tos)
6547 		fconf |= F_TOS;
6548 
6549 	if (fs->val.vlan_vld || fs->mask.vlan_vld)
6550 		fconf |= F_VLAN;
6551 
6552 	if (fs->val.vnic_vld || fs->mask.vnic_vld)
6553 		fconf |= F_VNIC_ID;
6554 
6555 	if (fs->val.iport || fs->mask.iport)
6556 		fconf |= F_PORT;
6557 
6558 	if (fs->val.fcoe || fs->mask.fcoe)
6559 		fconf |= F_FCOE;
6560 
6561 	return (fconf);
6562 }
6563 
6564 static int
6565 get_filter_mode(struct adapter *sc, uint32_t *mode)
6566 {
6567 	int rc;
6568 	uint32_t fconf;
6569 
6570 	rc = begin_synchronized_op(sc, NULL, HOLD_LOCK | SLEEP_OK | INTR_OK,
6571 	    "t4getfm");
6572 	if (rc)
6573 		return (rc);
6574 
6575 	t4_read_indirect(sc, A_TP_PIO_ADDR, A_TP_PIO_DATA, &fconf, 1,
6576 	    A_TP_VLAN_PRI_MAP);
6577 
6578 	if (sc->params.tp.vlan_pri_map != fconf) {
6579 		log(LOG_WARNING, "%s: cached filter mode out of sync %x %x.\n",
6580 		    device_get_nameunit(sc->dev), sc->params.tp.vlan_pri_map,
6581 		    fconf);
6582 		sc->params.tp.vlan_pri_map = fconf;
6583 	}
6584 
6585 	*mode = fconf_to_mode(sc->params.tp.vlan_pri_map);
6586 
6587 	end_synchronized_op(sc, LOCK_HELD);
6588 	return (0);
6589 }
6590 
6591 static int
6592 set_filter_mode(struct adapter *sc, uint32_t mode)
6593 {
6594 	uint32_t fconf;
6595 	int rc;
6596 
6597 	fconf = mode_to_fconf(mode);
6598 
6599 	rc = begin_synchronized_op(sc, NULL, HOLD_LOCK | SLEEP_OK | INTR_OK,
6600 	    "t4setfm");
6601 	if (rc)
6602 		return (rc);
6603 
6604 	if (sc->tids.ftids_in_use > 0) {
6605 		rc = EBUSY;
6606 		goto done;
6607 	}
6608 
6609 #ifdef TCP_OFFLOAD
6610 	if (sc->offload_map) {
6611 		rc = EBUSY;
6612 		goto done;
6613 	}
6614 #endif
6615 
6616 #ifdef notyet
6617 	rc = -t4_set_filter_mode(sc, fconf);
6618 	if (rc == 0)
6619 		sc->filter_mode = fconf;
6620 #else
6621 	rc = ENOTSUP;
6622 #endif
6623 
6624 done:
6625 	end_synchronized_op(sc, LOCK_HELD);
6626 	return (rc);
6627 }
6628 
6629 static inline uint64_t
6630 get_filter_hits(struct adapter *sc, uint32_t fid)
6631 {
6632 	uint32_t mw_base, off, tcb_base = t4_read_reg(sc, A_TP_CMM_TCB_BASE);
6633 	uint64_t hits;
6634 
6635 	memwin_info(sc, 0, &mw_base, NULL);
6636 	off = position_memwin(sc, 0,
6637 	    tcb_base + (fid + sc->tids.ftid_base) * TCB_SIZE);
6638 	if (is_t4(sc)) {
6639 		hits = t4_read_reg64(sc, mw_base + off + 16);
6640 		hits = be64toh(hits);
6641 	} else {
6642 		hits = t4_read_reg(sc, mw_base + off + 24);
6643 		hits = be32toh(hits);
6644 	}
6645 
6646 	return (hits);
6647 }
6648 
6649 static int
6650 get_filter(struct adapter *sc, struct t4_filter *t)
6651 {
6652 	int i, rc, nfilters = sc->tids.nftids;
6653 	struct filter_entry *f;
6654 
6655 	rc = begin_synchronized_op(sc, NULL, HOLD_LOCK | SLEEP_OK | INTR_OK,
6656 	    "t4getf");
6657 	if (rc)
6658 		return (rc);
6659 
6660 	if (sc->tids.ftids_in_use == 0 || sc->tids.ftid_tab == NULL ||
6661 	    t->idx >= nfilters) {
6662 		t->idx = 0xffffffff;
6663 		goto done;
6664 	}
6665 
6666 	f = &sc->tids.ftid_tab[t->idx];
6667 	for (i = t->idx; i < nfilters; i++, f++) {
6668 		if (f->valid) {
6669 			t->idx = i;
6670 			t->l2tidx = f->l2t ? f->l2t->idx : 0;
6671 			t->smtidx = f->smtidx;
6672 			if (f->fs.hitcnts)
6673 				t->hits = get_filter_hits(sc, t->idx);
6674 			else
6675 				t->hits = UINT64_MAX;
6676 			t->fs = f->fs;
6677 
6678 			goto done;
6679 		}
6680 	}
6681 
6682 	t->idx = 0xffffffff;
6683 done:
6684 	end_synchronized_op(sc, LOCK_HELD);
6685 	return (0);
6686 }
6687 
6688 static int
6689 set_filter(struct adapter *sc, struct t4_filter *t)
6690 {
6691 	unsigned int nfilters, nports;
6692 	struct filter_entry *f;
6693 	int i, rc;
6694 
6695 	rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4setf");
6696 	if (rc)
6697 		return (rc);
6698 
6699 	nfilters = sc->tids.nftids;
6700 	nports = sc->params.nports;
6701 
6702 	if (nfilters == 0) {
6703 		rc = ENOTSUP;
6704 		goto done;
6705 	}
6706 
6707 	if (!(sc->flags & FULL_INIT_DONE)) {
6708 		rc = EAGAIN;
6709 		goto done;
6710 	}
6711 
6712 	if (t->idx >= nfilters) {
6713 		rc = EINVAL;
6714 		goto done;
6715 	}
6716 
6717 	/* Validate against the global filter mode */
6718 	if ((sc->params.tp.vlan_pri_map | fspec_to_fconf(&t->fs)) !=
6719 	    sc->params.tp.vlan_pri_map) {
6720 		rc = E2BIG;
6721 		goto done;
6722 	}
6723 
6724 	if (t->fs.action == FILTER_SWITCH && t->fs.eport >= nports) {
6725 		rc = EINVAL;
6726 		goto done;
6727 	}
6728 
6729 	if (t->fs.val.iport >= nports) {
6730 		rc = EINVAL;
6731 		goto done;
6732 	}
6733 
6734 	/* Can't specify an iq if not steering to it */
6735 	if (!t->fs.dirsteer && t->fs.iq) {
6736 		rc = EINVAL;
6737 		goto done;
6738 	}
6739 
6740 	/* IPv6 filter idx must be 4 aligned */
6741 	if (t->fs.type == 1 &&
6742 	    ((t->idx & 0x3) || t->idx + 4 >= nfilters)) {
6743 		rc = EINVAL;
6744 		goto done;
6745 	}
6746 
6747 	if (sc->tids.ftid_tab == NULL) {
6748 		KASSERT(sc->tids.ftids_in_use == 0,
6749 		    ("%s: no memory allocated but filters_in_use > 0",
6750 		    __func__));
6751 
6752 		sc->tids.ftid_tab = malloc(sizeof (struct filter_entry) *
6753 		    nfilters, M_CXGBE, M_NOWAIT | M_ZERO);
6754 		if (sc->tids.ftid_tab == NULL) {
6755 			rc = ENOMEM;
6756 			goto done;
6757 		}
6758 		mtx_init(&sc->tids.ftid_lock, "T4 filters", 0, MTX_DEF);
6759 	}
6760 
6761 	for (i = 0; i < 4; i++) {
6762 		f = &sc->tids.ftid_tab[t->idx + i];
6763 
6764 		if (f->pending || f->valid) {
6765 			rc = EBUSY;
6766 			goto done;
6767 		}
6768 		if (f->locked) {
6769 			rc = EPERM;
6770 			goto done;
6771 		}
6772 
6773 		if (t->fs.type == 0)
6774 			break;
6775 	}
6776 
6777 	f = &sc->tids.ftid_tab[t->idx];
6778 	f->fs = t->fs;
6779 
6780 	rc = set_filter_wr(sc, t->idx);
6781 done:
6782 	end_synchronized_op(sc, 0);
6783 
6784 	if (rc == 0) {
6785 		mtx_lock(&sc->tids.ftid_lock);
6786 		for (;;) {
6787 			if (f->pending == 0) {
6788 				rc = f->valid ? 0 : EIO;
6789 				break;
6790 			}
6791 
6792 			if (mtx_sleep(&sc->tids.ftid_tab, &sc->tids.ftid_lock,
6793 			    PCATCH, "t4setfw", 0)) {
6794 				rc = EINPROGRESS;
6795 				break;
6796 			}
6797 		}
6798 		mtx_unlock(&sc->tids.ftid_lock);
6799 	}
6800 	return (rc);
6801 }
6802 
6803 static int
6804 del_filter(struct adapter *sc, struct t4_filter *t)
6805 {
6806 	unsigned int nfilters;
6807 	struct filter_entry *f;
6808 	int rc;
6809 
6810 	rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4delf");
6811 	if (rc)
6812 		return (rc);
6813 
6814 	nfilters = sc->tids.nftids;
6815 
6816 	if (nfilters == 0) {
6817 		rc = ENOTSUP;
6818 		goto done;
6819 	}
6820 
6821 	if (sc->tids.ftid_tab == NULL || sc->tids.ftids_in_use == 0 ||
6822 	    t->idx >= nfilters) {
6823 		rc = EINVAL;
6824 		goto done;
6825 	}
6826 
6827 	if (!(sc->flags & FULL_INIT_DONE)) {
6828 		rc = EAGAIN;
6829 		goto done;
6830 	}
6831 
6832 	f = &sc->tids.ftid_tab[t->idx];
6833 
6834 	if (f->pending) {
6835 		rc = EBUSY;
6836 		goto done;
6837 	}
6838 	if (f->locked) {
6839 		rc = EPERM;
6840 		goto done;
6841 	}
6842 
6843 	if (f->valid) {
6844 		t->fs = f->fs;	/* extra info for the caller */
6845 		rc = del_filter_wr(sc, t->idx);
6846 	}
6847 
6848 done:
6849 	end_synchronized_op(sc, 0);
6850 
6851 	if (rc == 0) {
6852 		mtx_lock(&sc->tids.ftid_lock);
6853 		for (;;) {
6854 			if (f->pending == 0) {
6855 				rc = f->valid ? EIO : 0;
6856 				break;
6857 			}
6858 
6859 			if (mtx_sleep(&sc->tids.ftid_tab, &sc->tids.ftid_lock,
6860 			    PCATCH, "t4delfw", 0)) {
6861 				rc = EINPROGRESS;
6862 				break;
6863 			}
6864 		}
6865 		mtx_unlock(&sc->tids.ftid_lock);
6866 	}
6867 
6868 	return (rc);
6869 }
6870 
6871 static void
6872 clear_filter(struct filter_entry *f)
6873 {
6874 	if (f->l2t)
6875 		t4_l2t_release(f->l2t);
6876 
6877 	bzero(f, sizeof (*f));
6878 }
6879 
6880 static int
6881 set_filter_wr(struct adapter *sc, int fidx)
6882 {
6883 	struct filter_entry *f = &sc->tids.ftid_tab[fidx];
6884 	struct wrqe *wr;
6885 	struct fw_filter_wr *fwr;
6886 	unsigned int ftid;
6887 
6888 	ASSERT_SYNCHRONIZED_OP(sc);
6889 
6890 	if (f->fs.newdmac || f->fs.newvlan) {
6891 		/* This filter needs an L2T entry; allocate one. */
6892 		f->l2t = t4_l2t_alloc_switching(sc->l2t);
6893 		if (f->l2t == NULL)
6894 			return (EAGAIN);
6895 		if (t4_l2t_set_switching(sc, f->l2t, f->fs.vlan, f->fs.eport,
6896 		    f->fs.dmac)) {
6897 			t4_l2t_release(f->l2t);
6898 			f->l2t = NULL;
6899 			return (ENOMEM);
6900 		}
6901 	}
6902 
6903 	ftid = sc->tids.ftid_base + fidx;
6904 
6905 	wr = alloc_wrqe(sizeof(*fwr), &sc->sge.mgmtq);
6906 	if (wr == NULL)
6907 		return (ENOMEM);
6908 
6909 	fwr = wrtod(wr);
6910 	bzero(fwr, sizeof (*fwr));
6911 
6912 	fwr->op_pkd = htobe32(V_FW_WR_OP(FW_FILTER_WR));
6913 	fwr->len16_pkd = htobe32(FW_LEN16(*fwr));
6914 	fwr->tid_to_iq =
6915 	    htobe32(V_FW_FILTER_WR_TID(ftid) |
6916 		V_FW_FILTER_WR_RQTYPE(f->fs.type) |
6917 		V_FW_FILTER_WR_NOREPLY(0) |
6918 		V_FW_FILTER_WR_IQ(f->fs.iq));
6919 	fwr->del_filter_to_l2tix =
6920 	    htobe32(V_FW_FILTER_WR_RPTTID(f->fs.rpttid) |
6921 		V_FW_FILTER_WR_DROP(f->fs.action == FILTER_DROP) |
6922 		V_FW_FILTER_WR_DIRSTEER(f->fs.dirsteer) |
6923 		V_FW_FILTER_WR_MASKHASH(f->fs.maskhash) |
6924 		V_FW_FILTER_WR_DIRSTEERHASH(f->fs.dirsteerhash) |
6925 		V_FW_FILTER_WR_LPBK(f->fs.action == FILTER_SWITCH) |
6926 		V_FW_FILTER_WR_DMAC(f->fs.newdmac) |
6927 		V_FW_FILTER_WR_SMAC(f->fs.newsmac) |
6928 		V_FW_FILTER_WR_INSVLAN(f->fs.newvlan == VLAN_INSERT ||
6929 		    f->fs.newvlan == VLAN_REWRITE) |
6930 		V_FW_FILTER_WR_RMVLAN(f->fs.newvlan == VLAN_REMOVE ||
6931 		    f->fs.newvlan == VLAN_REWRITE) |
6932 		V_FW_FILTER_WR_HITCNTS(f->fs.hitcnts) |
6933 		V_FW_FILTER_WR_TXCHAN(f->fs.eport) |
6934 		V_FW_FILTER_WR_PRIO(f->fs.prio) |
6935 		V_FW_FILTER_WR_L2TIX(f->l2t ? f->l2t->idx : 0));
6936 	fwr->ethtype = htobe16(f->fs.val.ethtype);
6937 	fwr->ethtypem = htobe16(f->fs.mask.ethtype);
6938 	fwr->frag_to_ovlan_vldm =
6939 	    (V_FW_FILTER_WR_FRAG(f->fs.val.frag) |
6940 		V_FW_FILTER_WR_FRAGM(f->fs.mask.frag) |
6941 		V_FW_FILTER_WR_IVLAN_VLD(f->fs.val.vlan_vld) |
6942 		V_FW_FILTER_WR_OVLAN_VLD(f->fs.val.vnic_vld) |
6943 		V_FW_FILTER_WR_IVLAN_VLDM(f->fs.mask.vlan_vld) |
6944 		V_FW_FILTER_WR_OVLAN_VLDM(f->fs.mask.vnic_vld));
6945 	fwr->smac_sel = 0;
6946 	fwr->rx_chan_rx_rpl_iq = htobe16(V_FW_FILTER_WR_RX_CHAN(0) |
6947 	    V_FW_FILTER_WR_RX_RPL_IQ(sc->sge.fwq.abs_id));
6948 	fwr->maci_to_matchtypem =
6949 	    htobe32(V_FW_FILTER_WR_MACI(f->fs.val.macidx) |
6950 		V_FW_FILTER_WR_MACIM(f->fs.mask.macidx) |
6951 		V_FW_FILTER_WR_FCOE(f->fs.val.fcoe) |
6952 		V_FW_FILTER_WR_FCOEM(f->fs.mask.fcoe) |
6953 		V_FW_FILTER_WR_PORT(f->fs.val.iport) |
6954 		V_FW_FILTER_WR_PORTM(f->fs.mask.iport) |
6955 		V_FW_FILTER_WR_MATCHTYPE(f->fs.val.matchtype) |
6956 		V_FW_FILTER_WR_MATCHTYPEM(f->fs.mask.matchtype));
6957 	fwr->ptcl = f->fs.val.proto;
6958 	fwr->ptclm = f->fs.mask.proto;
6959 	fwr->ttyp = f->fs.val.tos;
6960 	fwr->ttypm = f->fs.mask.tos;
6961 	fwr->ivlan = htobe16(f->fs.val.vlan);
6962 	fwr->ivlanm = htobe16(f->fs.mask.vlan);
6963 	fwr->ovlan = htobe16(f->fs.val.vnic);
6964 	fwr->ovlanm = htobe16(f->fs.mask.vnic);
6965 	bcopy(f->fs.val.dip, fwr->lip, sizeof (fwr->lip));
6966 	bcopy(f->fs.mask.dip, fwr->lipm, sizeof (fwr->lipm));
6967 	bcopy(f->fs.val.sip, fwr->fip, sizeof (fwr->fip));
6968 	bcopy(f->fs.mask.sip, fwr->fipm, sizeof (fwr->fipm));
6969 	fwr->lp = htobe16(f->fs.val.dport);
6970 	fwr->lpm = htobe16(f->fs.mask.dport);
6971 	fwr->fp = htobe16(f->fs.val.sport);
6972 	fwr->fpm = htobe16(f->fs.mask.sport);
6973 	if (f->fs.newsmac)
6974 		bcopy(f->fs.smac, fwr->sma, sizeof (fwr->sma));
6975 
6976 	f->pending = 1;
6977 	sc->tids.ftids_in_use++;
6978 
6979 	t4_wrq_tx(sc, wr);
6980 	return (0);
6981 }
6982 
6983 static int
6984 del_filter_wr(struct adapter *sc, int fidx)
6985 {
6986 	struct filter_entry *f = &sc->tids.ftid_tab[fidx];
6987 	struct wrqe *wr;
6988 	struct fw_filter_wr *fwr;
6989 	unsigned int ftid;
6990 
6991 	ftid = sc->tids.ftid_base + fidx;
6992 
6993 	wr = alloc_wrqe(sizeof(*fwr), &sc->sge.mgmtq);
6994 	if (wr == NULL)
6995 		return (ENOMEM);
6996 	fwr = wrtod(wr);
6997 	bzero(fwr, sizeof (*fwr));
6998 
6999 	t4_mk_filtdelwr(ftid, fwr, sc->sge.fwq.abs_id);
7000 
7001 	f->pending = 1;
7002 	t4_wrq_tx(sc, wr);
7003 	return (0);
7004 }
7005 
7006 int
7007 t4_filter_rpl(struct sge_iq *iq, const struct rss_header *rss, struct mbuf *m)
7008 {
7009 	struct adapter *sc = iq->adapter;
7010 	const struct cpl_set_tcb_rpl *rpl = (const void *)(rss + 1);
7011 	unsigned int idx = GET_TID(rpl);
7012 
7013 	KASSERT(m == NULL, ("%s: payload with opcode %02x", __func__,
7014 	    rss->opcode));
7015 
7016 	if (idx >= sc->tids.ftid_base &&
7017 	    (idx -= sc->tids.ftid_base) < sc->tids.nftids) {
7018 		unsigned int rc = G_COOKIE(rpl->cookie);
7019 		struct filter_entry *f = &sc->tids.ftid_tab[idx];
7020 
7021 		mtx_lock(&sc->tids.ftid_lock);
7022 		if (rc == FW_FILTER_WR_FLT_ADDED) {
7023 			KASSERT(f->pending, ("%s: filter[%u] isn't pending.",
7024 			    __func__, idx));
7025 			f->smtidx = (be64toh(rpl->oldval) >> 24) & 0xff;
7026 			f->pending = 0;  /* asynchronous setup completed */
7027 			f->valid = 1;
7028 		} else {
7029 			if (rc != FW_FILTER_WR_FLT_DELETED) {
7030 				/* Add or delete failed, display an error */
7031 				log(LOG_ERR,
7032 				    "filter %u setup failed with error %u\n",
7033 				    idx, rc);
7034 			}
7035 
7036 			clear_filter(f);
7037 			sc->tids.ftids_in_use--;
7038 		}
7039 		wakeup(&sc->tids.ftid_tab);
7040 		mtx_unlock(&sc->tids.ftid_lock);
7041 	}
7042 
7043 	return (0);
7044 }
7045 
7046 static int
7047 get_sge_context(struct adapter *sc, struct t4_sge_context *cntxt)
7048 {
7049 	int rc;
7050 
7051 	if (cntxt->cid > M_CTXTQID)
7052 		return (EINVAL);
7053 
7054 	if (cntxt->mem_id != CTXT_EGRESS && cntxt->mem_id != CTXT_INGRESS &&
7055 	    cntxt->mem_id != CTXT_FLM && cntxt->mem_id != CTXT_CNM)
7056 		return (EINVAL);
7057 
7058 	rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4ctxt");
7059 	if (rc)
7060 		return (rc);
7061 
7062 	if (sc->flags & FW_OK) {
7063 		rc = -t4_sge_ctxt_rd(sc, sc->mbox, cntxt->cid, cntxt->mem_id,
7064 		    &cntxt->data[0]);
7065 		if (rc == 0)
7066 			goto done;
7067 	}
7068 
7069 	/*
7070 	 * Read via firmware failed or wasn't even attempted.  Read directly via
7071 	 * the backdoor.
7072 	 */
7073 	rc = -t4_sge_ctxt_rd_bd(sc, cntxt->cid, cntxt->mem_id, &cntxt->data[0]);
7074 done:
7075 	end_synchronized_op(sc, 0);
7076 	return (rc);
7077 }
7078 
7079 static int
7080 load_fw(struct adapter *sc, struct t4_data *fw)
7081 {
7082 	int rc;
7083 	uint8_t *fw_data;
7084 
7085 	rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4ldfw");
7086 	if (rc)
7087 		return (rc);
7088 
7089 	if (sc->flags & FULL_INIT_DONE) {
7090 		rc = EBUSY;
7091 		goto done;
7092 	}
7093 
7094 	fw_data = malloc(fw->len, M_CXGBE, M_WAITOK);
7095 	if (fw_data == NULL) {
7096 		rc = ENOMEM;
7097 		goto done;
7098 	}
7099 
7100 	rc = copyin(fw->data, fw_data, fw->len);
7101 	if (rc == 0)
7102 		rc = -t4_load_fw(sc, fw_data, fw->len);
7103 
7104 	free(fw_data, M_CXGBE);
7105 done:
7106 	end_synchronized_op(sc, 0);
7107 	return (rc);
7108 }
7109 
7110 static int
7111 read_card_mem(struct adapter *sc, int win, struct t4_mem_range *mr)
7112 {
7113 	uint32_t addr, off, remaining, i, n;
7114 	uint32_t *buf, *b;
7115 	uint32_t mw_base, mw_aperture;
7116 	int rc;
7117 	uint8_t *dst;
7118 
7119 	rc = validate_mem_range(sc, mr->addr, mr->len);
7120 	if (rc != 0)
7121 		return (rc);
7122 
7123 	memwin_info(sc, win, &mw_base, &mw_aperture);
7124 	buf = b = malloc(min(mr->len, mw_aperture), M_CXGBE, M_WAITOK);
7125 	addr = mr->addr;
7126 	remaining = mr->len;
7127 	dst = (void *)mr->data;
7128 
7129 	while (remaining) {
7130 		off = position_memwin(sc, win, addr);
7131 
7132 		/* number of bytes that we'll copy in the inner loop */
7133 		n = min(remaining, mw_aperture - off);
7134 		for (i = 0; i < n; i += 4)
7135 			*b++ = t4_read_reg(sc, mw_base + off + i);
7136 
7137 		rc = copyout(buf, dst, n);
7138 		if (rc != 0)
7139 			break;
7140 
7141 		b = buf;
7142 		dst += n;
7143 		remaining -= n;
7144 		addr += n;
7145 	}
7146 
7147 	free(buf, M_CXGBE);
7148 	return (rc);
7149 }
7150 
7151 static int
7152 read_i2c(struct adapter *sc, struct t4_i2c_data *i2cd)
7153 {
7154 	int rc;
7155 
7156 	if (i2cd->len == 0 || i2cd->port_id >= sc->params.nports)
7157 		return (EINVAL);
7158 
7159 	if (i2cd->len > 1) {
7160 		/* XXX: need fw support for longer reads in one go */
7161 		return (ENOTSUP);
7162 	}
7163 
7164 	rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4i2crd");
7165 	if (rc)
7166 		return (rc);
7167 	rc = -t4_i2c_rd(sc, sc->mbox, i2cd->port_id, i2cd->dev_addr,
7168 	    i2cd->offset, &i2cd->data[0]);
7169 	end_synchronized_op(sc, 0);
7170 
7171 	return (rc);
7172 }
7173 
7174 int
7175 t4_os_find_pci_capability(struct adapter *sc, int cap)
7176 {
7177 	int i;
7178 
7179 	return (pci_find_cap(sc->dev, cap, &i) == 0 ? i : 0);
7180 }
7181 
7182 int
7183 t4_os_pci_save_state(struct adapter *sc)
7184 {
7185 	device_t dev;
7186 	struct pci_devinfo *dinfo;
7187 
7188 	dev = sc->dev;
7189 	dinfo = device_get_ivars(dev);
7190 
7191 	pci_cfg_save(dev, dinfo, 0);
7192 	return (0);
7193 }
7194 
7195 int
7196 t4_os_pci_restore_state(struct adapter *sc)
7197 {
7198 	device_t dev;
7199 	struct pci_devinfo *dinfo;
7200 
7201 	dev = sc->dev;
7202 	dinfo = device_get_ivars(dev);
7203 
7204 	pci_cfg_restore(dev, dinfo);
7205 	return (0);
7206 }
7207 
7208 void
7209 t4_os_portmod_changed(const struct adapter *sc, int idx)
7210 {
7211 	struct port_info *pi = sc->port[idx];
7212 	static const char *mod_str[] = {
7213 		NULL, "LR", "SR", "ER", "TWINAX", "active TWINAX", "LRM"
7214 	};
7215 
7216 	if (pi->mod_type == FW_PORT_MOD_TYPE_NONE)
7217 		if_printf(pi->ifp, "transceiver unplugged.\n");
7218 	else if (pi->mod_type == FW_PORT_MOD_TYPE_UNKNOWN)
7219 		if_printf(pi->ifp, "unknown transceiver inserted.\n");
7220 	else if (pi->mod_type == FW_PORT_MOD_TYPE_NOTSUPPORTED)
7221 		if_printf(pi->ifp, "unsupported transceiver inserted.\n");
7222 	else if (pi->mod_type > 0 && pi->mod_type < nitems(mod_str)) {
7223 		if_printf(pi->ifp, "%s transceiver inserted.\n",
7224 		    mod_str[pi->mod_type]);
7225 	} else {
7226 		if_printf(pi->ifp, "transceiver (type %d) inserted.\n",
7227 		    pi->mod_type);
7228 	}
7229 }
7230 
7231 void
7232 t4_os_link_changed(struct adapter *sc, int idx, int link_stat, int reason)
7233 {
7234 	struct port_info *pi = sc->port[idx];
7235 	struct ifnet *ifp = pi->ifp;
7236 
7237 	if (link_stat) {
7238 		pi->linkdnrc = -1;
7239 		ifp->if_baudrate = IF_Mbps(pi->link_cfg.speed);
7240 		if_link_state_change(ifp, LINK_STATE_UP);
7241 	} else {
7242 		if (reason >= 0)
7243 			pi->linkdnrc = reason;
7244 		if_link_state_change(ifp, LINK_STATE_DOWN);
7245 	}
7246 }
7247 
7248 void
7249 t4_iterate(void (*func)(struct adapter *, void *), void *arg)
7250 {
7251 	struct adapter *sc;
7252 
7253 	mtx_lock(&t4_list_lock);
7254 	SLIST_FOREACH(sc, &t4_list, link) {
7255 		/*
7256 		 * func should not make any assumptions about what state sc is
7257 		 * in - the only guarantee is that sc->sc_lock is a valid lock.
7258 		 */
7259 		func(sc, arg);
7260 	}
7261 	mtx_unlock(&t4_list_lock);
7262 }
7263 
7264 static int
7265 t4_open(struct cdev *dev, int flags, int type, struct thread *td)
7266 {
7267        return (0);
7268 }
7269 
7270 static int
7271 t4_close(struct cdev *dev, int flags, int type, struct thread *td)
7272 {
7273        return (0);
7274 }
7275 
7276 static int
7277 t4_ioctl(struct cdev *dev, unsigned long cmd, caddr_t data, int fflag,
7278     struct thread *td)
7279 {
7280 	int rc;
7281 	struct adapter *sc = dev->si_drv1;
7282 
7283 	rc = priv_check(td, PRIV_DRIVER);
7284 	if (rc != 0)
7285 		return (rc);
7286 
7287 	switch (cmd) {
7288 	case CHELSIO_T4_GETREG: {
7289 		struct t4_reg *edata = (struct t4_reg *)data;
7290 
7291 		if ((edata->addr & 0x3) != 0 || edata->addr >= sc->mmio_len)
7292 			return (EFAULT);
7293 
7294 		if (edata->size == 4)
7295 			edata->val = t4_read_reg(sc, edata->addr);
7296 		else if (edata->size == 8)
7297 			edata->val = t4_read_reg64(sc, edata->addr);
7298 		else
7299 			return (EINVAL);
7300 
7301 		break;
7302 	}
7303 	case CHELSIO_T4_SETREG: {
7304 		struct t4_reg *edata = (struct t4_reg *)data;
7305 
7306 		if ((edata->addr & 0x3) != 0 || edata->addr >= sc->mmio_len)
7307 			return (EFAULT);
7308 
7309 		if (edata->size == 4) {
7310 			if (edata->val & 0xffffffff00000000)
7311 				return (EINVAL);
7312 			t4_write_reg(sc, edata->addr, (uint32_t) edata->val);
7313 		} else if (edata->size == 8)
7314 			t4_write_reg64(sc, edata->addr, edata->val);
7315 		else
7316 			return (EINVAL);
7317 		break;
7318 	}
7319 	case CHELSIO_T4_REGDUMP: {
7320 		struct t4_regdump *regs = (struct t4_regdump *)data;
7321 		int reglen = is_t4(sc) ? T4_REGDUMP_SIZE : T5_REGDUMP_SIZE;
7322 		uint8_t *buf;
7323 
7324 		if (regs->len < reglen) {
7325 			regs->len = reglen; /* hint to the caller */
7326 			return (ENOBUFS);
7327 		}
7328 
7329 		regs->len = reglen;
7330 		buf = malloc(reglen, M_CXGBE, M_WAITOK | M_ZERO);
7331 		t4_get_regs(sc, regs, buf);
7332 		rc = copyout(buf, regs->data, reglen);
7333 		free(buf, M_CXGBE);
7334 		break;
7335 	}
7336 	case CHELSIO_T4_GET_FILTER_MODE:
7337 		rc = get_filter_mode(sc, (uint32_t *)data);
7338 		break;
7339 	case CHELSIO_T4_SET_FILTER_MODE:
7340 		rc = set_filter_mode(sc, *(uint32_t *)data);
7341 		break;
7342 	case CHELSIO_T4_GET_FILTER:
7343 		rc = get_filter(sc, (struct t4_filter *)data);
7344 		break;
7345 	case CHELSIO_T4_SET_FILTER:
7346 		rc = set_filter(sc, (struct t4_filter *)data);
7347 		break;
7348 	case CHELSIO_T4_DEL_FILTER:
7349 		rc = del_filter(sc, (struct t4_filter *)data);
7350 		break;
7351 	case CHELSIO_T4_GET_SGE_CONTEXT:
7352 		rc = get_sge_context(sc, (struct t4_sge_context *)data);
7353 		break;
7354 	case CHELSIO_T4_LOAD_FW:
7355 		rc = load_fw(sc, (struct t4_data *)data);
7356 		break;
7357 	case CHELSIO_T4_GET_MEM:
7358 		rc = read_card_mem(sc, 2, (struct t4_mem_range *)data);
7359 		break;
7360 	case CHELSIO_T4_GET_I2C:
7361 		rc = read_i2c(sc, (struct t4_i2c_data *)data);
7362 		break;
7363 	case CHELSIO_T4_CLEAR_STATS: {
7364 		int i;
7365 		u_int port_id = *(uint32_t *)data;
7366 		struct port_info *pi;
7367 
7368 		if (port_id >= sc->params.nports)
7369 			return (EINVAL);
7370 
7371 		/* MAC stats */
7372 		t4_clr_port_stats(sc, port_id);
7373 
7374 		pi = sc->port[port_id];
7375 		if (pi->flags & PORT_INIT_DONE) {
7376 			struct sge_rxq *rxq;
7377 			struct sge_txq *txq;
7378 			struct sge_wrq *wrq;
7379 
7380 			for_each_rxq(pi, i, rxq) {
7381 #if defined(INET) || defined(INET6)
7382 				rxq->lro.lro_queued = 0;
7383 				rxq->lro.lro_flushed = 0;
7384 #endif
7385 				rxq->rxcsum = 0;
7386 				rxq->vlan_extraction = 0;
7387 			}
7388 
7389 			for_each_txq(pi, i, txq) {
7390 				txq->txcsum = 0;
7391 				txq->tso_wrs = 0;
7392 				txq->vlan_insertion = 0;
7393 				txq->imm_wrs = 0;
7394 				txq->sgl_wrs = 0;
7395 				txq->txpkt_wrs = 0;
7396 				txq->txpkts_wrs = 0;
7397 				txq->txpkts_pkts = 0;
7398 				txq->br->br_drops = 0;
7399 				txq->no_dmamap = 0;
7400 				txq->no_desc = 0;
7401 			}
7402 
7403 #ifdef TCP_OFFLOAD
7404 			/* nothing to clear for each ofld_rxq */
7405 
7406 			for_each_ofld_txq(pi, i, wrq) {
7407 				wrq->tx_wrs = 0;
7408 				wrq->no_desc = 0;
7409 			}
7410 #endif
7411 			wrq = &sc->sge.ctrlq[pi->port_id];
7412 			wrq->tx_wrs = 0;
7413 			wrq->no_desc = 0;
7414 		}
7415 		break;
7416 	}
7417 	default:
7418 		rc = EINVAL;
7419 	}
7420 
7421 	return (rc);
7422 }
7423 
7424 #ifdef TCP_OFFLOAD
7425 static int
7426 toe_capability(struct port_info *pi, int enable)
7427 {
7428 	int rc;
7429 	struct adapter *sc = pi->adapter;
7430 
7431 	ASSERT_SYNCHRONIZED_OP(sc);
7432 
7433 	if (!is_offload(sc))
7434 		return (ENODEV);
7435 
7436 	if (enable) {
7437 		if (!(sc->flags & FULL_INIT_DONE)) {
7438 			rc = cxgbe_init_synchronized(pi);
7439 			if (rc)
7440 				return (rc);
7441 		}
7442 
7443 		if (isset(&sc->offload_map, pi->port_id))
7444 			return (0);
7445 
7446 		if (!(sc->flags & TOM_INIT_DONE)) {
7447 			rc = t4_activate_uld(sc, ULD_TOM);
7448 			if (rc == EAGAIN) {
7449 				log(LOG_WARNING,
7450 				    "You must kldload t4_tom.ko before trying "
7451 				    "to enable TOE on a cxgbe interface.\n");
7452 			}
7453 			if (rc != 0)
7454 				return (rc);
7455 			KASSERT(sc->tom_softc != NULL,
7456 			    ("%s: TOM activated but softc NULL", __func__));
7457 			KASSERT(sc->flags & TOM_INIT_DONE,
7458 			    ("%s: TOM activated but flag not set", __func__));
7459 		}
7460 
7461 		setbit(&sc->offload_map, pi->port_id);
7462 	} else {
7463 		if (!isset(&sc->offload_map, pi->port_id))
7464 			return (0);
7465 
7466 		KASSERT(sc->flags & TOM_INIT_DONE,
7467 		    ("%s: TOM never initialized?", __func__));
7468 		clrbit(&sc->offload_map, pi->port_id);
7469 	}
7470 
7471 	return (0);
7472 }
7473 
7474 /*
7475  * Add an upper layer driver to the global list.
7476  */
7477 int
7478 t4_register_uld(struct uld_info *ui)
7479 {
7480 	int rc = 0;
7481 	struct uld_info *u;
7482 
7483 	mtx_lock(&t4_uld_list_lock);
7484 	SLIST_FOREACH(u, &t4_uld_list, link) {
7485 	    if (u->uld_id == ui->uld_id) {
7486 		    rc = EEXIST;
7487 		    goto done;
7488 	    }
7489 	}
7490 
7491 	SLIST_INSERT_HEAD(&t4_uld_list, ui, link);
7492 	ui->refcount = 0;
7493 done:
7494 	mtx_unlock(&t4_uld_list_lock);
7495 	return (rc);
7496 }
7497 
7498 int
7499 t4_unregister_uld(struct uld_info *ui)
7500 {
7501 	int rc = EINVAL;
7502 	struct uld_info *u;
7503 
7504 	mtx_lock(&t4_uld_list_lock);
7505 
7506 	SLIST_FOREACH(u, &t4_uld_list, link) {
7507 	    if (u == ui) {
7508 		    if (ui->refcount > 0) {
7509 			    rc = EBUSY;
7510 			    goto done;
7511 		    }
7512 
7513 		    SLIST_REMOVE(&t4_uld_list, ui, uld_info, link);
7514 		    rc = 0;
7515 		    goto done;
7516 	    }
7517 	}
7518 done:
7519 	mtx_unlock(&t4_uld_list_lock);
7520 	return (rc);
7521 }
7522 
7523 int
7524 t4_activate_uld(struct adapter *sc, int id)
7525 {
7526 	int rc = EAGAIN;
7527 	struct uld_info *ui;
7528 
7529 	ASSERT_SYNCHRONIZED_OP(sc);
7530 
7531 	mtx_lock(&t4_uld_list_lock);
7532 
7533 	SLIST_FOREACH(ui, &t4_uld_list, link) {
7534 		if (ui->uld_id == id) {
7535 			rc = ui->activate(sc);
7536 			if (rc == 0)
7537 				ui->refcount++;
7538 			goto done;
7539 		}
7540 	}
7541 done:
7542 	mtx_unlock(&t4_uld_list_lock);
7543 
7544 	return (rc);
7545 }
7546 
7547 int
7548 t4_deactivate_uld(struct adapter *sc, int id)
7549 {
7550 	int rc = EINVAL;
7551 	struct uld_info *ui;
7552 
7553 	ASSERT_SYNCHRONIZED_OP(sc);
7554 
7555 	mtx_lock(&t4_uld_list_lock);
7556 
7557 	SLIST_FOREACH(ui, &t4_uld_list, link) {
7558 		if (ui->uld_id == id) {
7559 			rc = ui->deactivate(sc);
7560 			if (rc == 0)
7561 				ui->refcount--;
7562 			goto done;
7563 		}
7564 	}
7565 done:
7566 	mtx_unlock(&t4_uld_list_lock);
7567 
7568 	return (rc);
7569 }
7570 #endif
7571 
7572 /*
7573  * Come up with reasonable defaults for some of the tunables, provided they're
7574  * not set by the user (in which case we'll use the values as is).
7575  */
7576 static void
7577 tweak_tunables(void)
7578 {
7579 	int nc = mp_ncpus;	/* our snapshot of the number of CPUs */
7580 
7581 	if (t4_ntxq10g < 1)
7582 		t4_ntxq10g = min(nc, NTXQ_10G);
7583 
7584 	if (t4_ntxq1g < 1)
7585 		t4_ntxq1g = min(nc, NTXQ_1G);
7586 
7587 	if (t4_nrxq10g < 1)
7588 		t4_nrxq10g = min(nc, NRXQ_10G);
7589 
7590 	if (t4_nrxq1g < 1)
7591 		t4_nrxq1g = min(nc, NRXQ_1G);
7592 
7593 #ifdef TCP_OFFLOAD
7594 	if (t4_nofldtxq10g < 1)
7595 		t4_nofldtxq10g = min(nc, NOFLDTXQ_10G);
7596 
7597 	if (t4_nofldtxq1g < 1)
7598 		t4_nofldtxq1g = min(nc, NOFLDTXQ_1G);
7599 
7600 	if (t4_nofldrxq10g < 1)
7601 		t4_nofldrxq10g = min(nc, NOFLDRXQ_10G);
7602 
7603 	if (t4_nofldrxq1g < 1)
7604 		t4_nofldrxq1g = min(nc, NOFLDRXQ_1G);
7605 
7606 	if (t4_toecaps_allowed == -1)
7607 		t4_toecaps_allowed = FW_CAPS_CONFIG_TOE;
7608 #else
7609 	if (t4_toecaps_allowed == -1)
7610 		t4_toecaps_allowed = 0;
7611 #endif
7612 
7613 	if (t4_tmr_idx_10g < 0 || t4_tmr_idx_10g >= SGE_NTIMERS)
7614 		t4_tmr_idx_10g = TMR_IDX_10G;
7615 
7616 	if (t4_pktc_idx_10g < -1 || t4_pktc_idx_10g >= SGE_NCOUNTERS)
7617 		t4_pktc_idx_10g = PKTC_IDX_10G;
7618 
7619 	if (t4_tmr_idx_1g < 0 || t4_tmr_idx_1g >= SGE_NTIMERS)
7620 		t4_tmr_idx_1g = TMR_IDX_1G;
7621 
7622 	if (t4_pktc_idx_1g < -1 || t4_pktc_idx_1g >= SGE_NCOUNTERS)
7623 		t4_pktc_idx_1g = PKTC_IDX_1G;
7624 
7625 	if (t4_qsize_txq < 128)
7626 		t4_qsize_txq = 128;
7627 
7628 	if (t4_qsize_rxq < 128)
7629 		t4_qsize_rxq = 128;
7630 	while (t4_qsize_rxq & 7)
7631 		t4_qsize_rxq++;
7632 
7633 	t4_intr_types &= INTR_MSIX | INTR_MSI | INTR_INTX;
7634 }
7635 
7636 static int
7637 mod_event(module_t mod, int cmd, void *arg)
7638 {
7639 	int rc = 0;
7640 	static int loaded = 0;
7641 
7642 	switch (cmd) {
7643 	case MOD_LOAD:
7644 		if (atomic_fetchadd_int(&loaded, 1))
7645 			break;
7646 		t4_sge_modload();
7647 		mtx_init(&t4_list_lock, "T4 adapters", 0, MTX_DEF);
7648 		SLIST_INIT(&t4_list);
7649 #ifdef TCP_OFFLOAD
7650 		mtx_init(&t4_uld_list_lock, "T4 ULDs", 0, MTX_DEF);
7651 		SLIST_INIT(&t4_uld_list);
7652 #endif
7653 		tweak_tunables();
7654 		break;
7655 
7656 	case MOD_UNLOAD:
7657 		if (atomic_fetchadd_int(&loaded, -1) > 1)
7658 			break;
7659 #ifdef TCP_OFFLOAD
7660 		mtx_lock(&t4_uld_list_lock);
7661 		if (!SLIST_EMPTY(&t4_uld_list)) {
7662 			rc = EBUSY;
7663 			mtx_unlock(&t4_uld_list_lock);
7664 			break;
7665 		}
7666 		mtx_unlock(&t4_uld_list_lock);
7667 		mtx_destroy(&t4_uld_list_lock);
7668 #endif
7669 		mtx_lock(&t4_list_lock);
7670 		if (!SLIST_EMPTY(&t4_list)) {
7671 			rc = EBUSY;
7672 			mtx_unlock(&t4_list_lock);
7673 			break;
7674 		}
7675 		mtx_unlock(&t4_list_lock);
7676 		mtx_destroy(&t4_list_lock);
7677 		break;
7678 	}
7679 
7680 	return (rc);
7681 }
7682 
7683 static devclass_t t4_devclass, t5_devclass;
7684 static devclass_t cxgbe_devclass, cxl_devclass;
7685 
7686 DRIVER_MODULE(t4nex, pci, t4_driver, t4_devclass, mod_event, 0);
7687 MODULE_VERSION(t4nex, 1);
7688 MODULE_DEPEND(t4nex, firmware, 1, 1, 1);
7689 
7690 DRIVER_MODULE(t5nex, pci, t5_driver, t5_devclass, mod_event, 0);
7691 MODULE_VERSION(t5nex, 1);
7692 MODULE_DEPEND(t5nex, firmware, 1, 1, 1);
7693 
7694 DRIVER_MODULE(cxgbe, t4nex, cxgbe_driver, cxgbe_devclass, 0, 0);
7695 MODULE_VERSION(cxgbe, 1);
7696 
7697 DRIVER_MODULE(cxl, t5nex, cxl_driver, cxl_devclass, 0, 0);
7698 MODULE_VERSION(cxl, 1);
7699