xref: /freebsd/sys/dev/cxgbe/t4_main.c (revision f18d3c411697ff46d85e579a72be54ca0cc67dd0)
1 /*-
2  * Copyright (c) 2011 Chelsio Communications, Inc.
3  * All rights reserved.
4  * Written by: Navdeep Parhar <np@FreeBSD.org>
5  *
6  * Redistribution and use in source and binary forms, with or without
7  * modification, are permitted provided that the following conditions
8  * are met:
9  * 1. Redistributions of source code must retain the above copyright
10  *    notice, this list of conditions and the following disclaimer.
11  * 2. Redistributions in binary form must reproduce the above copyright
12  *    notice, this list of conditions and the following disclaimer in the
13  *    documentation and/or other materials provided with the distribution.
14  *
15  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
16  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
25  * SUCH DAMAGE.
26  */
27 
28 #include <sys/cdefs.h>
29 __FBSDID("$FreeBSD$");
30 
31 #include "opt_inet.h"
32 #include "opt_inet6.h"
33 
34 #include <sys/param.h>
35 #include <sys/conf.h>
36 #include <sys/priv.h>
37 #include <sys/kernel.h>
38 #include <sys/bus.h>
39 #include <sys/module.h>
40 #include <sys/malloc.h>
41 #include <sys/queue.h>
42 #include <sys/taskqueue.h>
43 #include <sys/pciio.h>
44 #include <dev/pci/pcireg.h>
45 #include <dev/pci/pcivar.h>
46 #include <dev/pci/pci_private.h>
47 #include <sys/firmware.h>
48 #include <sys/sbuf.h>
49 #include <sys/smp.h>
50 #include <sys/socket.h>
51 #include <sys/sockio.h>
52 #include <sys/sysctl.h>
53 #include <net/ethernet.h>
54 #include <net/if.h>
55 #include <net/if_types.h>
56 #include <net/if_dl.h>
57 #include <net/if_vlan_var.h>
58 #if defined(__i386__) || defined(__amd64__)
59 #include <vm/vm.h>
60 #include <vm/pmap.h>
61 #endif
62 
63 #include "common/common.h"
64 #include "common/t4_msg.h"
65 #include "common/t4_regs.h"
66 #include "common/t4_regs_values.h"
67 #include "t4_ioctl.h"
68 #include "t4_l2t.h"
69 
70 /* T4 bus driver interface */
71 static int t4_probe(device_t);
72 static int t4_attach(device_t);
73 static int t4_detach(device_t);
74 static device_method_t t4_methods[] = {
75 	DEVMETHOD(device_probe,		t4_probe),
76 	DEVMETHOD(device_attach,	t4_attach),
77 	DEVMETHOD(device_detach,	t4_detach),
78 
79 	DEVMETHOD_END
80 };
81 static driver_t t4_driver = {
82 	"t4nex",
83 	t4_methods,
84 	sizeof(struct adapter)
85 };
86 
87 
88 /* T4 port (cxgbe) interface */
89 static int cxgbe_probe(device_t);
90 static int cxgbe_attach(device_t);
91 static int cxgbe_detach(device_t);
92 static device_method_t cxgbe_methods[] = {
93 	DEVMETHOD(device_probe,		cxgbe_probe),
94 	DEVMETHOD(device_attach,	cxgbe_attach),
95 	DEVMETHOD(device_detach,	cxgbe_detach),
96 	{ 0, 0 }
97 };
98 static driver_t cxgbe_driver = {
99 	"cxgbe",
100 	cxgbe_methods,
101 	sizeof(struct port_info)
102 };
103 
104 static d_ioctl_t t4_ioctl;
105 static d_open_t t4_open;
106 static d_close_t t4_close;
107 
108 static struct cdevsw t4_cdevsw = {
109        .d_version = D_VERSION,
110        .d_flags = 0,
111        .d_open = t4_open,
112        .d_close = t4_close,
113        .d_ioctl = t4_ioctl,
114        .d_name = "t4nex",
115 };
116 
117 /* T5 bus driver interface */
118 static int t5_probe(device_t);
119 static device_method_t t5_methods[] = {
120 	DEVMETHOD(device_probe,		t5_probe),
121 	DEVMETHOD(device_attach,	t4_attach),
122 	DEVMETHOD(device_detach,	t4_detach),
123 
124 	DEVMETHOD_END
125 };
126 static driver_t t5_driver = {
127 	"t5nex",
128 	t5_methods,
129 	sizeof(struct adapter)
130 };
131 
132 
133 /* T5 port (cxl) interface */
134 static driver_t cxl_driver = {
135 	"cxl",
136 	cxgbe_methods,
137 	sizeof(struct port_info)
138 };
139 
140 static struct cdevsw t5_cdevsw = {
141        .d_version = D_VERSION,
142        .d_flags = 0,
143        .d_open = t4_open,
144        .d_close = t4_close,
145        .d_ioctl = t4_ioctl,
146        .d_name = "t5nex",
147 };
148 
149 /* ifnet + media interface */
150 static void cxgbe_init(void *);
151 static int cxgbe_ioctl(struct ifnet *, unsigned long, caddr_t);
152 static int cxgbe_transmit(struct ifnet *, struct mbuf *);
153 static void cxgbe_qflush(struct ifnet *);
154 static int cxgbe_media_change(struct ifnet *);
155 static void cxgbe_media_status(struct ifnet *, struct ifmediareq *);
156 
157 MALLOC_DEFINE(M_CXGBE, "cxgbe", "Chelsio T4/T5 Ethernet driver and services");
158 
159 /*
160  * Correct lock order when you need to acquire multiple locks is t4_list_lock,
161  * then ADAPTER_LOCK, then t4_uld_list_lock.
162  */
163 static struct sx t4_list_lock;
164 static SLIST_HEAD(, adapter) t4_list;
165 #ifdef TCP_OFFLOAD
166 static struct sx t4_uld_list_lock;
167 static SLIST_HEAD(, uld_info) t4_uld_list;
168 #endif
169 
170 /*
171  * Tunables.  See tweak_tunables() too.
172  *
173  * Each tunable is set to a default value here if it's known at compile-time.
174  * Otherwise it is set to -1 as an indication to tweak_tunables() that it should
175  * provide a reasonable default when the driver is loaded.
176  *
177  * Tunables applicable to both T4 and T5 are under hw.cxgbe.  Those specific to
178  * T5 are under hw.cxl.
179  */
180 
181 /*
182  * Number of queues for tx and rx, 10G and 1G, NIC and offload.
183  */
184 #define NTXQ_10G 16
185 static int t4_ntxq10g = -1;
186 TUNABLE_INT("hw.cxgbe.ntxq10g", &t4_ntxq10g);
187 
188 #define NRXQ_10G 8
189 static int t4_nrxq10g = -1;
190 TUNABLE_INT("hw.cxgbe.nrxq10g", &t4_nrxq10g);
191 
192 #define NTXQ_1G 4
193 static int t4_ntxq1g = -1;
194 TUNABLE_INT("hw.cxgbe.ntxq1g", &t4_ntxq1g);
195 
196 #define NRXQ_1G 2
197 static int t4_nrxq1g = -1;
198 TUNABLE_INT("hw.cxgbe.nrxq1g", &t4_nrxq1g);
199 
200 #ifdef TCP_OFFLOAD
201 #define NOFLDTXQ_10G 8
202 static int t4_nofldtxq10g = -1;
203 TUNABLE_INT("hw.cxgbe.nofldtxq10g", &t4_nofldtxq10g);
204 
205 #define NOFLDRXQ_10G 2
206 static int t4_nofldrxq10g = -1;
207 TUNABLE_INT("hw.cxgbe.nofldrxq10g", &t4_nofldrxq10g);
208 
209 #define NOFLDTXQ_1G 2
210 static int t4_nofldtxq1g = -1;
211 TUNABLE_INT("hw.cxgbe.nofldtxq1g", &t4_nofldtxq1g);
212 
213 #define NOFLDRXQ_1G 1
214 static int t4_nofldrxq1g = -1;
215 TUNABLE_INT("hw.cxgbe.nofldrxq1g", &t4_nofldrxq1g);
216 #endif
217 
218 /*
219  * Holdoff parameters for 10G and 1G ports.
220  */
221 #define TMR_IDX_10G 1
222 static int t4_tmr_idx_10g = TMR_IDX_10G;
223 TUNABLE_INT("hw.cxgbe.holdoff_timer_idx_10G", &t4_tmr_idx_10g);
224 
225 #define PKTC_IDX_10G (-1)
226 static int t4_pktc_idx_10g = PKTC_IDX_10G;
227 TUNABLE_INT("hw.cxgbe.holdoff_pktc_idx_10G", &t4_pktc_idx_10g);
228 
229 #define TMR_IDX_1G 1
230 static int t4_tmr_idx_1g = TMR_IDX_1G;
231 TUNABLE_INT("hw.cxgbe.holdoff_timer_idx_1G", &t4_tmr_idx_1g);
232 
233 #define PKTC_IDX_1G (-1)
234 static int t4_pktc_idx_1g = PKTC_IDX_1G;
235 TUNABLE_INT("hw.cxgbe.holdoff_pktc_idx_1G", &t4_pktc_idx_1g);
236 
237 /*
238  * Size (# of entries) of each tx and rx queue.
239  */
240 static unsigned int t4_qsize_txq = TX_EQ_QSIZE;
241 TUNABLE_INT("hw.cxgbe.qsize_txq", &t4_qsize_txq);
242 
243 static unsigned int t4_qsize_rxq = RX_IQ_QSIZE;
244 TUNABLE_INT("hw.cxgbe.qsize_rxq", &t4_qsize_rxq);
245 
246 /*
247  * Interrupt types allowed (bits 0, 1, 2 = INTx, MSI, MSI-X respectively).
248  */
249 static int t4_intr_types = INTR_MSIX | INTR_MSI | INTR_INTX;
250 TUNABLE_INT("hw.cxgbe.interrupt_types", &t4_intr_types);
251 
252 /*
253  * Configuration file.
254  */
255 #define DEFAULT_CF	"default"
256 #define FLASH_CF	"flash"
257 #define UWIRE_CF	"uwire"
258 #define FPGA_CF		"fpga"
259 static char t4_cfg_file[32] = DEFAULT_CF;
260 TUNABLE_STR("hw.cxgbe.config_file", t4_cfg_file, sizeof(t4_cfg_file));
261 
262 /*
263  * Firmware auto-install by driver during attach (0, 1, 2 = prohibited, allowed,
264  * encouraged respectively).
265  */
266 static unsigned int t4_fw_install = 1;
267 TUNABLE_INT("hw.cxgbe.fw_install", &t4_fw_install);
268 
269 /*
270  * ASIC features that will be used.  Disable the ones you don't want so that the
271  * chip resources aren't wasted on features that will not be used.
272  */
273 static int t4_linkcaps_allowed = 0;	/* No DCBX, PPP, etc. by default */
274 TUNABLE_INT("hw.cxgbe.linkcaps_allowed", &t4_linkcaps_allowed);
275 
276 static int t4_niccaps_allowed = FW_CAPS_CONFIG_NIC;
277 TUNABLE_INT("hw.cxgbe.niccaps_allowed", &t4_niccaps_allowed);
278 
279 static int t4_toecaps_allowed = -1;
280 TUNABLE_INT("hw.cxgbe.toecaps_allowed", &t4_toecaps_allowed);
281 
282 static int t4_rdmacaps_allowed = 0;
283 TUNABLE_INT("hw.cxgbe.rdmacaps_allowed", &t4_rdmacaps_allowed);
284 
285 static int t4_iscsicaps_allowed = 0;
286 TUNABLE_INT("hw.cxgbe.iscsicaps_allowed", &t4_iscsicaps_allowed);
287 
288 static int t4_fcoecaps_allowed = 0;
289 TUNABLE_INT("hw.cxgbe.fcoecaps_allowed", &t4_fcoecaps_allowed);
290 
291 static int t5_write_combine = 0;
292 TUNABLE_INT("hw.cxl.write_combine", &t5_write_combine);
293 
294 struct intrs_and_queues {
295 	int intr_type;		/* INTx, MSI, or MSI-X */
296 	int nirq;		/* Number of vectors */
297 	int intr_flags;
298 	int ntxq10g;		/* # of NIC txq's for each 10G port */
299 	int nrxq10g;		/* # of NIC rxq's for each 10G port */
300 	int ntxq1g;		/* # of NIC txq's for each 1G port */
301 	int nrxq1g;		/* # of NIC rxq's for each 1G port */
302 #ifdef TCP_OFFLOAD
303 	int nofldtxq10g;	/* # of TOE txq's for each 10G port */
304 	int nofldrxq10g;	/* # of TOE rxq's for each 10G port */
305 	int nofldtxq1g;		/* # of TOE txq's for each 1G port */
306 	int nofldrxq1g;		/* # of TOE rxq's for each 1G port */
307 #endif
308 };
309 
310 struct filter_entry {
311         uint32_t valid:1;	/* filter allocated and valid */
312         uint32_t locked:1;	/* filter is administratively locked */
313         uint32_t pending:1;	/* filter action is pending firmware reply */
314 	uint32_t smtidx:8;	/* Source MAC Table index for smac */
315 	struct l2t_entry *l2t;	/* Layer Two Table entry for dmac */
316 
317         struct t4_filter_specification fs;
318 };
319 
320 enum {
321 	XGMAC_MTU	= (1 << 0),
322 	XGMAC_PROMISC	= (1 << 1),
323 	XGMAC_ALLMULTI	= (1 << 2),
324 	XGMAC_VLANEX	= (1 << 3),
325 	XGMAC_UCADDR	= (1 << 4),
326 	XGMAC_MCADDRS	= (1 << 5),
327 
328 	XGMAC_ALL	= 0xffff
329 };
330 
331 static int map_bars_0_and_4(struct adapter *);
332 static int map_bar_2(struct adapter *);
333 static void setup_memwin(struct adapter *);
334 static int validate_mem_range(struct adapter *, uint32_t, int);
335 static int fwmtype_to_hwmtype(int);
336 static int validate_mt_off_len(struct adapter *, int, uint32_t, int,
337     uint32_t *);
338 static void memwin_info(struct adapter *, int, uint32_t *, uint32_t *);
339 static uint32_t position_memwin(struct adapter *, int, uint32_t);
340 static int cfg_itype_and_nqueues(struct adapter *, int, int,
341     struct intrs_and_queues *);
342 static int prep_firmware(struct adapter *);
343 static int partition_resources(struct adapter *, const struct firmware *,
344     const char *);
345 static int get_params__pre_init(struct adapter *);
346 static int get_params__post_init(struct adapter *);
347 static int set_params__post_init(struct adapter *);
348 static void t4_set_desc(struct adapter *);
349 static void build_medialist(struct port_info *);
350 static int update_mac_settings(struct port_info *, int);
351 static int cxgbe_init_synchronized(struct port_info *);
352 static int cxgbe_uninit_synchronized(struct port_info *);
353 static int setup_intr_handlers(struct adapter *);
354 static int adapter_full_init(struct adapter *);
355 static int adapter_full_uninit(struct adapter *);
356 static int port_full_init(struct port_info *);
357 static int port_full_uninit(struct port_info *);
358 static void quiesce_eq(struct adapter *, struct sge_eq *);
359 static void quiesce_iq(struct adapter *, struct sge_iq *);
360 static void quiesce_fl(struct adapter *, struct sge_fl *);
361 static int t4_alloc_irq(struct adapter *, struct irq *, int rid,
362     driver_intr_t *, void *, char *);
363 static int t4_free_irq(struct adapter *, struct irq *);
364 static void reg_block_dump(struct adapter *, uint8_t *, unsigned int,
365     unsigned int);
366 static void t4_get_regs(struct adapter *, struct t4_regdump *, uint8_t *);
367 static void cxgbe_tick(void *);
368 static void cxgbe_vlan_config(void *, struct ifnet *, uint16_t);
369 static int cpl_not_handled(struct sge_iq *, const struct rss_header *,
370     struct mbuf *);
371 static int an_not_handled(struct sge_iq *, const struct rsp_ctrl *);
372 static int fw_msg_not_handled(struct adapter *, const __be64 *);
373 static int t4_sysctls(struct adapter *);
374 static int cxgbe_sysctls(struct port_info *);
375 static int sysctl_int_array(SYSCTL_HANDLER_ARGS);
376 static int sysctl_bitfield(SYSCTL_HANDLER_ARGS);
377 static int sysctl_btphy(SYSCTL_HANDLER_ARGS);
378 static int sysctl_holdoff_tmr_idx(SYSCTL_HANDLER_ARGS);
379 static int sysctl_holdoff_pktc_idx(SYSCTL_HANDLER_ARGS);
380 static int sysctl_qsize_rxq(SYSCTL_HANDLER_ARGS);
381 static int sysctl_qsize_txq(SYSCTL_HANDLER_ARGS);
382 static int sysctl_handle_t4_reg64(SYSCTL_HANDLER_ARGS);
383 static int sysctl_temperature(SYSCTL_HANDLER_ARGS);
384 #ifdef SBUF_DRAIN
385 static int sysctl_cctrl(SYSCTL_HANDLER_ARGS);
386 static int sysctl_cim_ibq_obq(SYSCTL_HANDLER_ARGS);
387 static int sysctl_cim_la(SYSCTL_HANDLER_ARGS);
388 static int sysctl_cim_ma_la(SYSCTL_HANDLER_ARGS);
389 static int sysctl_cim_pif_la(SYSCTL_HANDLER_ARGS);
390 static int sysctl_cim_qcfg(SYSCTL_HANDLER_ARGS);
391 static int sysctl_cpl_stats(SYSCTL_HANDLER_ARGS);
392 static int sysctl_ddp_stats(SYSCTL_HANDLER_ARGS);
393 static int sysctl_devlog(SYSCTL_HANDLER_ARGS);
394 static int sysctl_fcoe_stats(SYSCTL_HANDLER_ARGS);
395 static int sysctl_hw_sched(SYSCTL_HANDLER_ARGS);
396 static int sysctl_lb_stats(SYSCTL_HANDLER_ARGS);
397 static int sysctl_linkdnrc(SYSCTL_HANDLER_ARGS);
398 static int sysctl_meminfo(SYSCTL_HANDLER_ARGS);
399 static int sysctl_mps_tcam(SYSCTL_HANDLER_ARGS);
400 static int sysctl_path_mtus(SYSCTL_HANDLER_ARGS);
401 static int sysctl_pm_stats(SYSCTL_HANDLER_ARGS);
402 static int sysctl_rdma_stats(SYSCTL_HANDLER_ARGS);
403 static int sysctl_tcp_stats(SYSCTL_HANDLER_ARGS);
404 static int sysctl_tids(SYSCTL_HANDLER_ARGS);
405 static int sysctl_tp_err_stats(SYSCTL_HANDLER_ARGS);
406 static int sysctl_tp_la(SYSCTL_HANDLER_ARGS);
407 static int sysctl_tx_rate(SYSCTL_HANDLER_ARGS);
408 static int sysctl_ulprx_la(SYSCTL_HANDLER_ARGS);
409 static int sysctl_wcwr_stats(SYSCTL_HANDLER_ARGS);
410 #endif
411 static inline void txq_start(struct ifnet *, struct sge_txq *);
412 static uint32_t fconf_to_mode(uint32_t);
413 static uint32_t mode_to_fconf(uint32_t);
414 static uint32_t fspec_to_fconf(struct t4_filter_specification *);
415 static int get_filter_mode(struct adapter *, uint32_t *);
416 static int set_filter_mode(struct adapter *, uint32_t);
417 static inline uint64_t get_filter_hits(struct adapter *, uint32_t);
418 static int get_filter(struct adapter *, struct t4_filter *);
419 static int set_filter(struct adapter *, struct t4_filter *);
420 static int del_filter(struct adapter *, struct t4_filter *);
421 static void clear_filter(struct filter_entry *);
422 static int set_filter_wr(struct adapter *, int);
423 static int del_filter_wr(struct adapter *, int);
424 static int get_sge_context(struct adapter *, struct t4_sge_context *);
425 static int load_fw(struct adapter *, struct t4_data *);
426 static int read_card_mem(struct adapter *, int, struct t4_mem_range *);
427 static int read_i2c(struct adapter *, struct t4_i2c_data *);
428 #ifdef TCP_OFFLOAD
429 static int toe_capability(struct port_info *, int);
430 #endif
431 static int mod_event(module_t, int, void *);
432 
433 struct {
434 	uint16_t device;
435 	char *desc;
436 } t4_pciids[] = {
437 	{0xa000, "Chelsio Terminator 4 FPGA"},
438 	{0x4400, "Chelsio T440-dbg"},
439 	{0x4401, "Chelsio T420-CR"},
440 	{0x4402, "Chelsio T422-CR"},
441 	{0x4403, "Chelsio T440-CR"},
442 	{0x4404, "Chelsio T420-BCH"},
443 	{0x4405, "Chelsio T440-BCH"},
444 	{0x4406, "Chelsio T440-CH"},
445 	{0x4407, "Chelsio T420-SO"},
446 	{0x4408, "Chelsio T420-CX"},
447 	{0x4409, "Chelsio T420-BT"},
448 	{0x440a, "Chelsio T404-BT"},
449 	{0x440e, "Chelsio T440-LP-CR"},
450 }, t5_pciids[] = {
451 	{0xb000, "Chelsio Terminator 5 FPGA"},
452 	{0x5400, "Chelsio T580-dbg"},
453 	{0x5401,  "Chelsio T520-CR"},		/* 2 x 10G */
454 	{0x5402,  "Chelsio T522-CR"},		/* 2 x 10G, 2 X 1G */
455 	{0x5403,  "Chelsio T540-CR"},		/* 4 x 10G */
456 	{0x5407,  "Chelsio T520-SO"},		/* 2 x 10G, nomem */
457 	{0x5409,  "Chelsio T520-BT"},		/* 2 x 10GBaseT */
458 	{0x540a,  "Chelsio T504-BT"},		/* 4 x 1G */
459 	{0x540d,  "Chelsio T580-CR"},		/* 2 x 40G */
460 	{0x540e,  "Chelsio T540-LP-CR"},	/* 4 x 10G */
461 	{0x5410,  "Chelsio T580-LP-CR"},	/* 2 x 40G */
462 	{0x5411,  "Chelsio T520-LL-CR"},	/* 2 x 10G */
463 	{0x5412,  "Chelsio T560-CR"},		/* 1 x 40G, 2 x 10G */
464 	{0x5414,  "Chelsio T580-LP-SO-CR"},	/* 2 x 40G, nomem */
465 #ifdef notyet
466 	{0x5404,  "Chelsio T520-BCH"},
467 	{0x5405,  "Chelsio T540-BCH"},
468 	{0x5406,  "Chelsio T540-CH"},
469 	{0x5408,  "Chelsio T520-CX"},
470 	{0x540b,  "Chelsio B520-SR"},
471 	{0x540c,  "Chelsio B504-BT"},
472 	{0x540f,  "Chelsio Amsterdam"},
473 	{0x5413,  "Chelsio T580-CHR"},
474 #endif
475 };
476 
477 #ifdef TCP_OFFLOAD
478 /*
479  * service_iq() has an iq and needs the fl.  Offset of fl from the iq should be
480  * exactly the same for both rxq and ofld_rxq.
481  */
482 CTASSERT(offsetof(struct sge_ofld_rxq, iq) == offsetof(struct sge_rxq, iq));
483 CTASSERT(offsetof(struct sge_ofld_rxq, fl) == offsetof(struct sge_rxq, fl));
484 #endif
485 
486 /* No easy way to include t4_msg.h before adapter.h so we check this way */
487 CTASSERT(nitems(((struct adapter *)0)->cpl_handler) == NUM_CPL_CMDS);
488 CTASSERT(nitems(((struct adapter *)0)->fw_msg_handler) == NUM_FW6_TYPES);
489 
490 static int
491 t4_probe(device_t dev)
492 {
493 	int i;
494 	uint16_t v = pci_get_vendor(dev);
495 	uint16_t d = pci_get_device(dev);
496 	uint8_t f = pci_get_function(dev);
497 
498 	if (v != PCI_VENDOR_ID_CHELSIO)
499 		return (ENXIO);
500 
501 	/* Attach only to PF0 of the FPGA */
502 	if (d == 0xa000 && f != 0)
503 		return (ENXIO);
504 
505 	for (i = 0; i < nitems(t4_pciids); i++) {
506 		if (d == t4_pciids[i].device) {
507 			device_set_desc(dev, t4_pciids[i].desc);
508 			return (BUS_PROBE_DEFAULT);
509 		}
510 	}
511 
512 	return (ENXIO);
513 }
514 
515 static int
516 t5_probe(device_t dev)
517 {
518 	int i;
519 	uint16_t v = pci_get_vendor(dev);
520 	uint16_t d = pci_get_device(dev);
521 	uint8_t f = pci_get_function(dev);
522 
523 	if (v != PCI_VENDOR_ID_CHELSIO)
524 		return (ENXIO);
525 
526 	/* Attach only to PF0 of the FPGA */
527 	if (d == 0xb000 && f != 0)
528 		return (ENXIO);
529 
530 	for (i = 0; i < nitems(t5_pciids); i++) {
531 		if (d == t5_pciids[i].device) {
532 			device_set_desc(dev, t5_pciids[i].desc);
533 			return (BUS_PROBE_DEFAULT);
534 		}
535 	}
536 
537 	return (ENXIO);
538 }
539 
540 static int
541 t4_attach(device_t dev)
542 {
543 	struct adapter *sc;
544 	int rc = 0, i, n10g, n1g, rqidx, tqidx;
545 	struct intrs_and_queues iaq;
546 	struct sge *s;
547 #ifdef TCP_OFFLOAD
548 	int ofld_rqidx, ofld_tqidx;
549 #endif
550 
551 	sc = device_get_softc(dev);
552 	sc->dev = dev;
553 
554 	pci_enable_busmaster(dev);
555 	if (pci_find_cap(dev, PCIY_EXPRESS, &i) == 0) {
556 		uint32_t v;
557 
558 		pci_set_max_read_req(dev, 4096);
559 		v = pci_read_config(dev, i + PCIER_DEVICE_CTL, 2);
560 		v |= PCIEM_CTL_RELAXED_ORD_ENABLE;
561 		pci_write_config(dev, i + PCIER_DEVICE_CTL, v, 2);
562 	}
563 
564 	sc->traceq = -1;
565 	mtx_init(&sc->ifp_lock, sc->ifp_lockname, 0, MTX_DEF);
566 	snprintf(sc->ifp_lockname, sizeof(sc->ifp_lockname), "%s tracer",
567 	    device_get_nameunit(dev));
568 
569 	snprintf(sc->lockname, sizeof(sc->lockname), "%s",
570 	    device_get_nameunit(dev));
571 	mtx_init(&sc->sc_lock, sc->lockname, 0, MTX_DEF);
572 	sx_xlock(&t4_list_lock);
573 	SLIST_INSERT_HEAD(&t4_list, sc, link);
574 	sx_xunlock(&t4_list_lock);
575 
576 	mtx_init(&sc->sfl_lock, "starving freelists", 0, MTX_DEF);
577 	TAILQ_INIT(&sc->sfl);
578 	callout_init(&sc->sfl_callout, CALLOUT_MPSAFE);
579 
580 	rc = map_bars_0_and_4(sc);
581 	if (rc != 0)
582 		goto done; /* error message displayed already */
583 
584 	/*
585 	 * This is the real PF# to which we're attaching.  Works from within PCI
586 	 * passthrough environments too, where pci_get_function() could return a
587 	 * different PF# depending on the passthrough configuration.  We need to
588 	 * use the real PF# in all our communication with the firmware.
589 	 */
590 	sc->pf = G_SOURCEPF(t4_read_reg(sc, A_PL_WHOAMI));
591 	sc->mbox = sc->pf;
592 
593 	memset(sc->chan_map, 0xff, sizeof(sc->chan_map));
594 	sc->an_handler = an_not_handled;
595 	for (i = 0; i < nitems(sc->cpl_handler); i++)
596 		sc->cpl_handler[i] = cpl_not_handled;
597 	for (i = 0; i < nitems(sc->fw_msg_handler); i++)
598 		sc->fw_msg_handler[i] = fw_msg_not_handled;
599 	t4_register_cpl_handler(sc, CPL_SET_TCB_RPL, t4_filter_rpl);
600 	t4_register_cpl_handler(sc, CPL_TRACE_PKT, t4_trace_pkt);
601 	t4_register_cpl_handler(sc, CPL_TRACE_PKT_T5, t5_trace_pkt);
602 	t4_init_sge_cpl_handlers(sc);
603 
604 	/* Prepare the adapter for operation */
605 	rc = -t4_prep_adapter(sc);
606 	if (rc != 0) {
607 		device_printf(dev, "failed to prepare adapter: %d.\n", rc);
608 		goto done;
609 	}
610 
611 	/*
612 	 * Do this really early, with the memory windows set up even before the
613 	 * character device.  The userland tool's register i/o and mem read
614 	 * will work even in "recovery mode".
615 	 */
616 	setup_memwin(sc);
617 	sc->cdev = make_dev(is_t4(sc) ? &t4_cdevsw : &t5_cdevsw,
618 	    device_get_unit(dev), UID_ROOT, GID_WHEEL, 0600, "%s",
619 	    device_get_nameunit(dev));
620 	if (sc->cdev == NULL)
621 		device_printf(dev, "failed to create nexus char device.\n");
622 	else
623 		sc->cdev->si_drv1 = sc;
624 
625 	/* Go no further if recovery mode has been requested. */
626 	if (TUNABLE_INT_FETCH("hw.cxgbe.sos", &i) && i != 0) {
627 		device_printf(dev, "recovery mode.\n");
628 		goto done;
629 	}
630 
631 	/* Prepare the firmware for operation */
632 	rc = prep_firmware(sc);
633 	if (rc != 0)
634 		goto done; /* error message displayed already */
635 
636 	rc = get_params__post_init(sc);
637 	if (rc != 0)
638 		goto done; /* error message displayed already */
639 
640 	rc = set_params__post_init(sc);
641 	if (rc != 0)
642 		goto done; /* error message displayed already */
643 
644 	rc = map_bar_2(sc);
645 	if (rc != 0)
646 		goto done; /* error message displayed already */
647 
648 	rc = t4_create_dma_tag(sc);
649 	if (rc != 0)
650 		goto done; /* error message displayed already */
651 
652 	/*
653 	 * First pass over all the ports - allocate VIs and initialize some
654 	 * basic parameters like mac address, port type, etc.  We also figure
655 	 * out whether a port is 10G or 1G and use that information when
656 	 * calculating how many interrupts to attempt to allocate.
657 	 */
658 	n10g = n1g = 0;
659 	for_each_port(sc, i) {
660 		struct port_info *pi;
661 
662 		pi = malloc(sizeof(*pi), M_CXGBE, M_ZERO | M_WAITOK);
663 		sc->port[i] = pi;
664 
665 		/* These must be set before t4_port_init */
666 		pi->adapter = sc;
667 		pi->port_id = i;
668 
669 		/* Allocate the vi and initialize parameters like mac addr */
670 		rc = -t4_port_init(pi, sc->mbox, sc->pf, 0);
671 		if (rc != 0) {
672 			device_printf(dev, "unable to initialize port %d: %d\n",
673 			    i, rc);
674 			free(pi, M_CXGBE);
675 			sc->port[i] = NULL;
676 			goto done;
677 		}
678 
679 		snprintf(pi->lockname, sizeof(pi->lockname), "%sp%d",
680 		    device_get_nameunit(dev), i);
681 		mtx_init(&pi->pi_lock, pi->lockname, 0, MTX_DEF);
682 		sc->chan_map[pi->tx_chan] = i;
683 
684 		if (is_10G_port(pi) || is_40G_port(pi)) {
685 			n10g++;
686 			pi->tmr_idx = t4_tmr_idx_10g;
687 			pi->pktc_idx = t4_pktc_idx_10g;
688 		} else {
689 			n1g++;
690 			pi->tmr_idx = t4_tmr_idx_1g;
691 			pi->pktc_idx = t4_pktc_idx_1g;
692 		}
693 
694 		pi->xact_addr_filt = -1;
695 		pi->linkdnrc = -1;
696 
697 		pi->qsize_rxq = t4_qsize_rxq;
698 		pi->qsize_txq = t4_qsize_txq;
699 
700 		pi->dev = device_add_child(dev, is_t4(sc) ? "cxgbe" : "cxl", -1);
701 		if (pi->dev == NULL) {
702 			device_printf(dev,
703 			    "failed to add device for port %d.\n", i);
704 			rc = ENXIO;
705 			goto done;
706 		}
707 		device_set_softc(pi->dev, pi);
708 	}
709 
710 	/*
711 	 * Interrupt type, # of interrupts, # of rx/tx queues, etc.
712 	 */
713 	rc = cfg_itype_and_nqueues(sc, n10g, n1g, &iaq);
714 	if (rc != 0)
715 		goto done; /* error message displayed already */
716 
717 	sc->intr_type = iaq.intr_type;
718 	sc->intr_count = iaq.nirq;
719 	sc->flags |= iaq.intr_flags;
720 
721 	s = &sc->sge;
722 	s->nrxq = n10g * iaq.nrxq10g + n1g * iaq.nrxq1g;
723 	s->ntxq = n10g * iaq.ntxq10g + n1g * iaq.ntxq1g;
724 	s->neq = s->ntxq + s->nrxq;	/* the free list in an rxq is an eq */
725 	s->neq += sc->params.nports + 1;/* ctrl queues: 1 per port + 1 mgmt */
726 	s->niq = s->nrxq + 1;		/* 1 extra for firmware event queue */
727 
728 #ifdef TCP_OFFLOAD
729 	if (is_offload(sc)) {
730 
731 		s->nofldrxq = n10g * iaq.nofldrxq10g + n1g * iaq.nofldrxq1g;
732 		s->nofldtxq = n10g * iaq.nofldtxq10g + n1g * iaq.nofldtxq1g;
733 		s->neq += s->nofldtxq + s->nofldrxq;
734 		s->niq += s->nofldrxq;
735 
736 		s->ofld_rxq = malloc(s->nofldrxq * sizeof(struct sge_ofld_rxq),
737 		    M_CXGBE, M_ZERO | M_WAITOK);
738 		s->ofld_txq = malloc(s->nofldtxq * sizeof(struct sge_wrq),
739 		    M_CXGBE, M_ZERO | M_WAITOK);
740 	}
741 #endif
742 
743 	s->ctrlq = malloc(sc->params.nports * sizeof(struct sge_wrq), M_CXGBE,
744 	    M_ZERO | M_WAITOK);
745 	s->rxq = malloc(s->nrxq * sizeof(struct sge_rxq), M_CXGBE,
746 	    M_ZERO | M_WAITOK);
747 	s->txq = malloc(s->ntxq * sizeof(struct sge_txq), M_CXGBE,
748 	    M_ZERO | M_WAITOK);
749 	s->iqmap = malloc(s->niq * sizeof(struct sge_iq *), M_CXGBE,
750 	    M_ZERO | M_WAITOK);
751 	s->eqmap = malloc(s->neq * sizeof(struct sge_eq *), M_CXGBE,
752 	    M_ZERO | M_WAITOK);
753 
754 	sc->irq = malloc(sc->intr_count * sizeof(struct irq), M_CXGBE,
755 	    M_ZERO | M_WAITOK);
756 
757 	t4_init_l2t(sc, M_WAITOK);
758 
759 	/*
760 	 * Second pass over the ports.  This time we know the number of rx and
761 	 * tx queues that each port should get.
762 	 */
763 	rqidx = tqidx = 0;
764 #ifdef TCP_OFFLOAD
765 	ofld_rqidx = ofld_tqidx = 0;
766 #endif
767 	for_each_port(sc, i) {
768 		struct port_info *pi = sc->port[i];
769 
770 		if (pi == NULL)
771 			continue;
772 
773 		pi->first_rxq = rqidx;
774 		pi->first_txq = tqidx;
775 		if (is_10G_port(pi) || is_40G_port(pi)) {
776 			pi->nrxq = iaq.nrxq10g;
777 			pi->ntxq = iaq.ntxq10g;
778 		} else {
779 			pi->nrxq = iaq.nrxq1g;
780 			pi->ntxq = iaq.ntxq1g;
781 		}
782 
783 		rqidx += pi->nrxq;
784 		tqidx += pi->ntxq;
785 
786 #ifdef TCP_OFFLOAD
787 		if (is_offload(sc)) {
788 			pi->first_ofld_rxq = ofld_rqidx;
789 			pi->first_ofld_txq = ofld_tqidx;
790 			if (is_10G_port(pi) || is_40G_port(pi)) {
791 				pi->nofldrxq = iaq.nofldrxq10g;
792 				pi->nofldtxq = iaq.nofldtxq10g;
793 			} else {
794 				pi->nofldrxq = iaq.nofldrxq1g;
795 				pi->nofldtxq = iaq.nofldtxq1g;
796 			}
797 			ofld_rqidx += pi->nofldrxq;
798 			ofld_tqidx += pi->nofldtxq;
799 		}
800 #endif
801 	}
802 
803 	rc = setup_intr_handlers(sc);
804 	if (rc != 0) {
805 		device_printf(dev,
806 		    "failed to setup interrupt handlers: %d\n", rc);
807 		goto done;
808 	}
809 
810 	rc = bus_generic_attach(dev);
811 	if (rc != 0) {
812 		device_printf(dev,
813 		    "failed to attach all child ports: %d\n", rc);
814 		goto done;
815 	}
816 
817 	device_printf(dev,
818 	    "PCIe x%d, %d ports, %d %s interrupt%s, %d eq, %d iq\n",
819 	    sc->params.pci.width, sc->params.nports, sc->intr_count,
820 	    sc->intr_type == INTR_MSIX ? "MSI-X" :
821 	    (sc->intr_type == INTR_MSI ? "MSI" : "INTx"),
822 	    sc->intr_count > 1 ? "s" : "", sc->sge.neq, sc->sge.niq);
823 
824 	t4_set_desc(sc);
825 
826 done:
827 	if (rc != 0 && sc->cdev) {
828 		/* cdev was created and so cxgbetool works; recover that way. */
829 		device_printf(dev,
830 		    "error during attach, adapter is now in recovery mode.\n");
831 		rc = 0;
832 	}
833 
834 	if (rc != 0)
835 		t4_detach(dev);
836 	else
837 		t4_sysctls(sc);
838 
839 	return (rc);
840 }
841 
842 /*
843  * Idempotent
844  */
845 static int
846 t4_detach(device_t dev)
847 {
848 	struct adapter *sc;
849 	struct port_info *pi;
850 	int i, rc;
851 
852 	sc = device_get_softc(dev);
853 
854 	if (sc->flags & FULL_INIT_DONE)
855 		t4_intr_disable(sc);
856 
857 	if (sc->cdev) {
858 		destroy_dev(sc->cdev);
859 		sc->cdev = NULL;
860 	}
861 
862 	rc = bus_generic_detach(dev);
863 	if (rc) {
864 		device_printf(dev,
865 		    "failed to detach child devices: %d\n", rc);
866 		return (rc);
867 	}
868 
869 	for (i = 0; i < sc->intr_count; i++)
870 		t4_free_irq(sc, &sc->irq[i]);
871 
872 	for (i = 0; i < MAX_NPORTS; i++) {
873 		pi = sc->port[i];
874 		if (pi) {
875 			t4_free_vi(pi->adapter, sc->mbox, sc->pf, 0, pi->viid);
876 			if (pi->dev)
877 				device_delete_child(dev, pi->dev);
878 
879 			mtx_destroy(&pi->pi_lock);
880 			free(pi, M_CXGBE);
881 		}
882 	}
883 
884 	if (sc->flags & FULL_INIT_DONE)
885 		adapter_full_uninit(sc);
886 
887 	if (sc->flags & FW_OK)
888 		t4_fw_bye(sc, sc->mbox);
889 
890 	if (sc->intr_type == INTR_MSI || sc->intr_type == INTR_MSIX)
891 		pci_release_msi(dev);
892 
893 	if (sc->regs_res)
894 		bus_release_resource(dev, SYS_RES_MEMORY, sc->regs_rid,
895 		    sc->regs_res);
896 
897 	if (sc->udbs_res)
898 		bus_release_resource(dev, SYS_RES_MEMORY, sc->udbs_rid,
899 		    sc->udbs_res);
900 
901 	if (sc->msix_res)
902 		bus_release_resource(dev, SYS_RES_MEMORY, sc->msix_rid,
903 		    sc->msix_res);
904 
905 	if (sc->l2t)
906 		t4_free_l2t(sc->l2t);
907 
908 #ifdef TCP_OFFLOAD
909 	free(sc->sge.ofld_rxq, M_CXGBE);
910 	free(sc->sge.ofld_txq, M_CXGBE);
911 #endif
912 	free(sc->irq, M_CXGBE);
913 	free(sc->sge.rxq, M_CXGBE);
914 	free(sc->sge.txq, M_CXGBE);
915 	free(sc->sge.ctrlq, M_CXGBE);
916 	free(sc->sge.iqmap, M_CXGBE);
917 	free(sc->sge.eqmap, M_CXGBE);
918 	free(sc->tids.ftid_tab, M_CXGBE);
919 	t4_destroy_dma_tag(sc);
920 	if (mtx_initialized(&sc->sc_lock)) {
921 		sx_xlock(&t4_list_lock);
922 		SLIST_REMOVE(&t4_list, sc, adapter, link);
923 		sx_xunlock(&t4_list_lock);
924 		mtx_destroy(&sc->sc_lock);
925 	}
926 
927 	if (mtx_initialized(&sc->tids.ftid_lock))
928 		mtx_destroy(&sc->tids.ftid_lock);
929 	if (mtx_initialized(&sc->sfl_lock))
930 		mtx_destroy(&sc->sfl_lock);
931 	if (mtx_initialized(&sc->ifp_lock))
932 		mtx_destroy(&sc->ifp_lock);
933 
934 	bzero(sc, sizeof(*sc));
935 
936 	return (0);
937 }
938 
939 
940 static int
941 cxgbe_probe(device_t dev)
942 {
943 	char buf[128];
944 	struct port_info *pi = device_get_softc(dev);
945 
946 	snprintf(buf, sizeof(buf), "port %d", pi->port_id);
947 	device_set_desc_copy(dev, buf);
948 
949 	return (BUS_PROBE_DEFAULT);
950 }
951 
952 #define T4_CAP (IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_MTU | IFCAP_HWCSUM | \
953     IFCAP_VLAN_HWCSUM | IFCAP_TSO | IFCAP_JUMBO_MTU | IFCAP_LRO | \
954     IFCAP_VLAN_HWTSO | IFCAP_LINKSTATE | IFCAP_HWCSUM_IPV6 | IFCAP_HWSTATS)
955 #define T4_CAP_ENABLE (T4_CAP)
956 
957 static int
958 cxgbe_attach(device_t dev)
959 {
960 	struct port_info *pi = device_get_softc(dev);
961 	struct ifnet *ifp;
962 
963 	/* Allocate an ifnet and set it up */
964 	ifp = if_alloc(IFT_ETHER);
965 	if (ifp == NULL) {
966 		device_printf(dev, "Cannot allocate ifnet\n");
967 		return (ENOMEM);
968 	}
969 	pi->ifp = ifp;
970 	ifp->if_softc = pi;
971 
972 	callout_init(&pi->tick, CALLOUT_MPSAFE);
973 
974 	if_initname(ifp, device_get_name(dev), device_get_unit(dev));
975 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
976 
977 	ifp->if_init = cxgbe_init;
978 	ifp->if_ioctl = cxgbe_ioctl;
979 	ifp->if_transmit = cxgbe_transmit;
980 	ifp->if_qflush = cxgbe_qflush;
981 
982 	ifp->if_capabilities = T4_CAP;
983 #ifdef TCP_OFFLOAD
984 	if (is_offload(pi->adapter))
985 		ifp->if_capabilities |= IFCAP_TOE;
986 #endif
987 	ifp->if_capenable = T4_CAP_ENABLE;
988 	ifp->if_hwassist = CSUM_TCP | CSUM_UDP | CSUM_IP | CSUM_TSO |
989 	    CSUM_UDP_IPV6 | CSUM_TCP_IPV6;
990 
991 	/* Initialize ifmedia for this port */
992 	ifmedia_init(&pi->media, IFM_IMASK, cxgbe_media_change,
993 	    cxgbe_media_status);
994 	build_medialist(pi);
995 
996 	pi->vlan_c = EVENTHANDLER_REGISTER(vlan_config, cxgbe_vlan_config, ifp,
997 	    EVENTHANDLER_PRI_ANY);
998 
999 	ether_ifattach(ifp, pi->hw_addr);
1000 
1001 #ifdef TCP_OFFLOAD
1002 	if (is_offload(pi->adapter)) {
1003 		device_printf(dev,
1004 		    "%d txq, %d rxq (NIC); %d txq, %d rxq (TOE)\n",
1005 		    pi->ntxq, pi->nrxq, pi->nofldtxq, pi->nofldrxq);
1006 	} else
1007 #endif
1008 		device_printf(dev, "%d txq, %d rxq\n", pi->ntxq, pi->nrxq);
1009 
1010 	cxgbe_sysctls(pi);
1011 
1012 	return (0);
1013 }
1014 
1015 static int
1016 cxgbe_detach(device_t dev)
1017 {
1018 	struct port_info *pi = device_get_softc(dev);
1019 	struct adapter *sc = pi->adapter;
1020 	struct ifnet *ifp = pi->ifp;
1021 
1022 	/* Tell if_ioctl and if_init that the port is going away */
1023 	ADAPTER_LOCK(sc);
1024 	SET_DOOMED(pi);
1025 	wakeup(&sc->flags);
1026 	while (IS_BUSY(sc))
1027 		mtx_sleep(&sc->flags, &sc->sc_lock, 0, "t4detach", 0);
1028 	SET_BUSY(sc);
1029 #ifdef INVARIANTS
1030 	sc->last_op = "t4detach";
1031 	sc->last_op_thr = curthread;
1032 #endif
1033 	ADAPTER_UNLOCK(sc);
1034 
1035 	if (pi->flags & HAS_TRACEQ) {
1036 		sc->traceq = -1;	/* cloner should not create ifnet */
1037 		t4_tracer_port_detach(sc);
1038 	}
1039 
1040 	if (pi->vlan_c)
1041 		EVENTHANDLER_DEREGISTER(vlan_config, pi->vlan_c);
1042 
1043 	PORT_LOCK(pi);
1044 	ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
1045 	callout_stop(&pi->tick);
1046 	PORT_UNLOCK(pi);
1047 	callout_drain(&pi->tick);
1048 
1049 	/* Let detach proceed even if these fail. */
1050 	cxgbe_uninit_synchronized(pi);
1051 	port_full_uninit(pi);
1052 
1053 	ifmedia_removeall(&pi->media);
1054 	ether_ifdetach(pi->ifp);
1055 	if_free(pi->ifp);
1056 
1057 	ADAPTER_LOCK(sc);
1058 	CLR_BUSY(sc);
1059 	wakeup(&sc->flags);
1060 	ADAPTER_UNLOCK(sc);
1061 
1062 	return (0);
1063 }
1064 
1065 static void
1066 cxgbe_init(void *arg)
1067 {
1068 	struct port_info *pi = arg;
1069 	struct adapter *sc = pi->adapter;
1070 
1071 	if (begin_synchronized_op(sc, pi, SLEEP_OK | INTR_OK, "t4init") != 0)
1072 		return;
1073 	cxgbe_init_synchronized(pi);
1074 	end_synchronized_op(sc, 0);
1075 }
1076 
1077 static int
1078 cxgbe_ioctl(struct ifnet *ifp, unsigned long cmd, caddr_t data)
1079 {
1080 	int rc = 0, mtu, flags;
1081 	struct port_info *pi = ifp->if_softc;
1082 	struct adapter *sc = pi->adapter;
1083 	struct ifreq *ifr = (struct ifreq *)data;
1084 	uint32_t mask;
1085 
1086 	switch (cmd) {
1087 	case SIOCSIFMTU:
1088 		mtu = ifr->ifr_mtu;
1089 		if ((mtu < ETHERMIN) || (mtu > ETHERMTU_JUMBO))
1090 			return (EINVAL);
1091 
1092 		rc = begin_synchronized_op(sc, pi, SLEEP_OK | INTR_OK, "t4mtu");
1093 		if (rc)
1094 			return (rc);
1095 		ifp->if_mtu = mtu;
1096 		if (pi->flags & PORT_INIT_DONE) {
1097 			t4_update_fl_bufsize(ifp);
1098 			if (ifp->if_drv_flags & IFF_DRV_RUNNING)
1099 				rc = update_mac_settings(pi, XGMAC_MTU);
1100 		}
1101 		end_synchronized_op(sc, 0);
1102 		break;
1103 
1104 	case SIOCSIFFLAGS:
1105 		rc = begin_synchronized_op(sc, pi, SLEEP_OK | INTR_OK, "t4flg");
1106 		if (rc)
1107 			return (rc);
1108 
1109 		if (ifp->if_flags & IFF_UP) {
1110 			if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1111 				flags = pi->if_flags;
1112 				if ((ifp->if_flags ^ flags) &
1113 				    (IFF_PROMISC | IFF_ALLMULTI)) {
1114 					rc = update_mac_settings(pi,
1115 					    XGMAC_PROMISC | XGMAC_ALLMULTI);
1116 				}
1117 			} else
1118 				rc = cxgbe_init_synchronized(pi);
1119 			pi->if_flags = ifp->if_flags;
1120 		} else if (ifp->if_drv_flags & IFF_DRV_RUNNING)
1121 			rc = cxgbe_uninit_synchronized(pi);
1122 		end_synchronized_op(sc, 0);
1123 		break;
1124 
1125 	case SIOCADDMULTI:
1126 	case SIOCDELMULTI: /* these two are called with a mutex held :-( */
1127 		rc = begin_synchronized_op(sc, pi, HOLD_LOCK, "t4multi");
1128 		if (rc)
1129 			return (rc);
1130 		if (ifp->if_drv_flags & IFF_DRV_RUNNING)
1131 			rc = update_mac_settings(pi, XGMAC_MCADDRS);
1132 		end_synchronized_op(sc, LOCK_HELD);
1133 		break;
1134 
1135 	case SIOCSIFCAP:
1136 		rc = begin_synchronized_op(sc, pi, SLEEP_OK | INTR_OK, "t4cap");
1137 		if (rc)
1138 			return (rc);
1139 
1140 		mask = ifr->ifr_reqcap ^ ifp->if_capenable;
1141 		if (mask & IFCAP_TXCSUM) {
1142 			ifp->if_capenable ^= IFCAP_TXCSUM;
1143 			ifp->if_hwassist ^= (CSUM_TCP | CSUM_UDP | CSUM_IP);
1144 
1145 			if (IFCAP_TSO4 & ifp->if_capenable &&
1146 			    !(IFCAP_TXCSUM & ifp->if_capenable)) {
1147 				ifp->if_capenable &= ~IFCAP_TSO4;
1148 				if_printf(ifp,
1149 				    "tso4 disabled due to -txcsum.\n");
1150 			}
1151 		}
1152 		if (mask & IFCAP_TXCSUM_IPV6) {
1153 			ifp->if_capenable ^= IFCAP_TXCSUM_IPV6;
1154 			ifp->if_hwassist ^= (CSUM_UDP_IPV6 | CSUM_TCP_IPV6);
1155 
1156 			if (IFCAP_TSO6 & ifp->if_capenable &&
1157 			    !(IFCAP_TXCSUM_IPV6 & ifp->if_capenable)) {
1158 				ifp->if_capenable &= ~IFCAP_TSO6;
1159 				if_printf(ifp,
1160 				    "tso6 disabled due to -txcsum6.\n");
1161 			}
1162 		}
1163 		if (mask & IFCAP_RXCSUM)
1164 			ifp->if_capenable ^= IFCAP_RXCSUM;
1165 		if (mask & IFCAP_RXCSUM_IPV6)
1166 			ifp->if_capenable ^= IFCAP_RXCSUM_IPV6;
1167 
1168 		/*
1169 		 * Note that we leave CSUM_TSO alone (it is always set).  The
1170 		 * kernel takes both IFCAP_TSOx and CSUM_TSO into account before
1171 		 * sending a TSO request our way, so it's sufficient to toggle
1172 		 * IFCAP_TSOx only.
1173 		 */
1174 		if (mask & IFCAP_TSO4) {
1175 			if (!(IFCAP_TSO4 & ifp->if_capenable) &&
1176 			    !(IFCAP_TXCSUM & ifp->if_capenable)) {
1177 				if_printf(ifp, "enable txcsum first.\n");
1178 				rc = EAGAIN;
1179 				goto fail;
1180 			}
1181 			ifp->if_capenable ^= IFCAP_TSO4;
1182 		}
1183 		if (mask & IFCAP_TSO6) {
1184 			if (!(IFCAP_TSO6 & ifp->if_capenable) &&
1185 			    !(IFCAP_TXCSUM_IPV6 & ifp->if_capenable)) {
1186 				if_printf(ifp, "enable txcsum6 first.\n");
1187 				rc = EAGAIN;
1188 				goto fail;
1189 			}
1190 			ifp->if_capenable ^= IFCAP_TSO6;
1191 		}
1192 		if (mask & IFCAP_LRO) {
1193 #if defined(INET) || defined(INET6)
1194 			int i;
1195 			struct sge_rxq *rxq;
1196 
1197 			ifp->if_capenable ^= IFCAP_LRO;
1198 			for_each_rxq(pi, i, rxq) {
1199 				if (ifp->if_capenable & IFCAP_LRO)
1200 					rxq->iq.flags |= IQ_LRO_ENABLED;
1201 				else
1202 					rxq->iq.flags &= ~IQ_LRO_ENABLED;
1203 			}
1204 #endif
1205 		}
1206 #ifdef TCP_OFFLOAD
1207 		if (mask & IFCAP_TOE) {
1208 			int enable = (ifp->if_capenable ^ mask) & IFCAP_TOE;
1209 
1210 			rc = toe_capability(pi, enable);
1211 			if (rc != 0)
1212 				goto fail;
1213 
1214 			ifp->if_capenable ^= mask;
1215 		}
1216 #endif
1217 		if (mask & IFCAP_VLAN_HWTAGGING) {
1218 			ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING;
1219 			if (ifp->if_drv_flags & IFF_DRV_RUNNING)
1220 				rc = update_mac_settings(pi, XGMAC_VLANEX);
1221 		}
1222 		if (mask & IFCAP_VLAN_MTU) {
1223 			ifp->if_capenable ^= IFCAP_VLAN_MTU;
1224 
1225 			/* Need to find out how to disable auto-mtu-inflation */
1226 		}
1227 		if (mask & IFCAP_VLAN_HWTSO)
1228 			ifp->if_capenable ^= IFCAP_VLAN_HWTSO;
1229 		if (mask & IFCAP_VLAN_HWCSUM)
1230 			ifp->if_capenable ^= IFCAP_VLAN_HWCSUM;
1231 
1232 #ifdef VLAN_CAPABILITIES
1233 		VLAN_CAPABILITIES(ifp);
1234 #endif
1235 fail:
1236 		end_synchronized_op(sc, 0);
1237 		break;
1238 
1239 	case SIOCSIFMEDIA:
1240 	case SIOCGIFMEDIA:
1241 		ifmedia_ioctl(ifp, ifr, &pi->media, cmd);
1242 		break;
1243 
1244 	default:
1245 		rc = ether_ioctl(ifp, cmd, data);
1246 	}
1247 
1248 	return (rc);
1249 }
1250 
1251 static int
1252 cxgbe_transmit(struct ifnet *ifp, struct mbuf *m)
1253 {
1254 	struct port_info *pi = ifp->if_softc;
1255 	struct adapter *sc = pi->adapter;
1256 	struct sge_txq *txq = &sc->sge.txq[pi->first_txq];
1257 	struct buf_ring *br;
1258 	int rc;
1259 
1260 	M_ASSERTPKTHDR(m);
1261 
1262 	if (__predict_false(pi->link_cfg.link_ok == 0)) {
1263 		m_freem(m);
1264 		return (ENETDOWN);
1265 	}
1266 
1267 	if (m->m_flags & M_FLOWID)
1268 		txq += (m->m_pkthdr.flowid % pi->ntxq);
1269 	br = txq->br;
1270 
1271 	if (TXQ_TRYLOCK(txq) == 0) {
1272 		struct sge_eq *eq = &txq->eq;
1273 
1274 		/*
1275 		 * It is possible that t4_eth_tx finishes up and releases the
1276 		 * lock between the TRYLOCK above and the drbr_enqueue here.  We
1277 		 * need to make sure that this mbuf doesn't just sit there in
1278 		 * the drbr.
1279 		 */
1280 
1281 		rc = drbr_enqueue(ifp, br, m);
1282 		if (rc == 0 && callout_pending(&eq->tx_callout) == 0 &&
1283 		    !(eq->flags & EQ_DOOMED))
1284 			callout_reset(&eq->tx_callout, 1, t4_tx_callout, eq);
1285 		return (rc);
1286 	}
1287 
1288 	/*
1289 	 * txq->m is the mbuf that is held up due to a temporary shortage of
1290 	 * resources and it should be put on the wire first.  Then what's in
1291 	 * drbr and finally the mbuf that was just passed in to us.
1292 	 *
1293 	 * Return code should indicate the fate of the mbuf that was passed in
1294 	 * this time.
1295 	 */
1296 
1297 	TXQ_LOCK_ASSERT_OWNED(txq);
1298 	if (drbr_needs_enqueue(ifp, br) || txq->m) {
1299 
1300 		/* Queued for transmission. */
1301 
1302 		rc = drbr_enqueue(ifp, br, m);
1303 		m = txq->m ? txq->m : drbr_dequeue(ifp, br);
1304 		(void) t4_eth_tx(ifp, txq, m);
1305 		TXQ_UNLOCK(txq);
1306 		return (rc);
1307 	}
1308 
1309 	/* Direct transmission. */
1310 	rc = t4_eth_tx(ifp, txq, m);
1311 	if (rc != 0 && txq->m)
1312 		rc = 0;	/* held, will be transmitted soon (hopefully) */
1313 
1314 	TXQ_UNLOCK(txq);
1315 	return (rc);
1316 }
1317 
1318 static void
1319 cxgbe_qflush(struct ifnet *ifp)
1320 {
1321 	struct port_info *pi = ifp->if_softc;
1322 	struct sge_txq *txq;
1323 	int i;
1324 	struct mbuf *m;
1325 
1326 	/* queues do not exist if !PORT_INIT_DONE. */
1327 	if (pi->flags & PORT_INIT_DONE) {
1328 		for_each_txq(pi, i, txq) {
1329 			TXQ_LOCK(txq);
1330 			m_freem(txq->m);
1331 			txq->m = NULL;
1332 			while ((m = buf_ring_dequeue_sc(txq->br)) != NULL)
1333 				m_freem(m);
1334 			TXQ_UNLOCK(txq);
1335 		}
1336 	}
1337 	if_qflush(ifp);
1338 }
1339 
1340 static int
1341 cxgbe_media_change(struct ifnet *ifp)
1342 {
1343 	struct port_info *pi = ifp->if_softc;
1344 
1345 	device_printf(pi->dev, "%s unimplemented.\n", __func__);
1346 
1347 	return (EOPNOTSUPP);
1348 }
1349 
1350 static void
1351 cxgbe_media_status(struct ifnet *ifp, struct ifmediareq *ifmr)
1352 {
1353 	struct port_info *pi = ifp->if_softc;
1354 	struct ifmedia_entry *cur = pi->media.ifm_cur;
1355 	int speed = pi->link_cfg.speed;
1356 	int data = (pi->port_type << 8) | pi->mod_type;
1357 
1358 	if (cur->ifm_data != data) {
1359 		build_medialist(pi);
1360 		cur = pi->media.ifm_cur;
1361 	}
1362 
1363 	ifmr->ifm_status = IFM_AVALID;
1364 	if (!pi->link_cfg.link_ok)
1365 		return;
1366 
1367 	ifmr->ifm_status |= IFM_ACTIVE;
1368 
1369 	/* active and current will differ iff current media is autoselect. */
1370 	if (IFM_SUBTYPE(cur->ifm_media) != IFM_AUTO)
1371 		return;
1372 
1373 	ifmr->ifm_active = IFM_ETHER | IFM_FDX;
1374 	if (speed == SPEED_10000)
1375 		ifmr->ifm_active |= IFM_10G_T;
1376 	else if (speed == SPEED_1000)
1377 		ifmr->ifm_active |= IFM_1000_T;
1378 	else if (speed == SPEED_100)
1379 		ifmr->ifm_active |= IFM_100_TX;
1380 	else if (speed == SPEED_10)
1381 		ifmr->ifm_active |= IFM_10_T;
1382 	else
1383 		KASSERT(0, ("%s: link up but speed unknown (%u)", __func__,
1384 			    speed));
1385 }
1386 
1387 void
1388 t4_fatal_err(struct adapter *sc)
1389 {
1390 	t4_set_reg_field(sc, A_SGE_CONTROL, F_GLOBALENABLE, 0);
1391 	t4_intr_disable(sc);
1392 	log(LOG_EMERG, "%s: encountered fatal error, adapter stopped.\n",
1393 	    device_get_nameunit(sc->dev));
1394 }
1395 
1396 static int
1397 map_bars_0_and_4(struct adapter *sc)
1398 {
1399 	sc->regs_rid = PCIR_BAR(0);
1400 	sc->regs_res = bus_alloc_resource_any(sc->dev, SYS_RES_MEMORY,
1401 	    &sc->regs_rid, RF_ACTIVE);
1402 	if (sc->regs_res == NULL) {
1403 		device_printf(sc->dev, "cannot map registers.\n");
1404 		return (ENXIO);
1405 	}
1406 	sc->bt = rman_get_bustag(sc->regs_res);
1407 	sc->bh = rman_get_bushandle(sc->regs_res);
1408 	sc->mmio_len = rman_get_size(sc->regs_res);
1409 	setbit(&sc->doorbells, DOORBELL_KDB);
1410 
1411 	sc->msix_rid = PCIR_BAR(4);
1412 	sc->msix_res = bus_alloc_resource_any(sc->dev, SYS_RES_MEMORY,
1413 	    &sc->msix_rid, RF_ACTIVE);
1414 	if (sc->msix_res == NULL) {
1415 		device_printf(sc->dev, "cannot map MSI-X BAR.\n");
1416 		return (ENXIO);
1417 	}
1418 
1419 	return (0);
1420 }
1421 
1422 static int
1423 map_bar_2(struct adapter *sc)
1424 {
1425 
1426 	/*
1427 	 * T4: only iWARP driver uses the userspace doorbells.  There is no need
1428 	 * to map it if RDMA is disabled.
1429 	 */
1430 	if (is_t4(sc) && sc->rdmacaps == 0)
1431 		return (0);
1432 
1433 	sc->udbs_rid = PCIR_BAR(2);
1434 	sc->udbs_res = bus_alloc_resource_any(sc->dev, SYS_RES_MEMORY,
1435 	    &sc->udbs_rid, RF_ACTIVE);
1436 	if (sc->udbs_res == NULL) {
1437 		device_printf(sc->dev, "cannot map doorbell BAR.\n");
1438 		return (ENXIO);
1439 	}
1440 	sc->udbs_base = rman_get_virtual(sc->udbs_res);
1441 
1442 	if (is_t5(sc)) {
1443 		setbit(&sc->doorbells, DOORBELL_UDB);
1444 #if defined(__i386__) || defined(__amd64__)
1445 		if (t5_write_combine) {
1446 			int rc;
1447 
1448 			/*
1449 			 * Enable write combining on BAR2.  This is the
1450 			 * userspace doorbell BAR and is split into 128B
1451 			 * (UDBS_SEG_SIZE) doorbell regions, each associated
1452 			 * with an egress queue.  The first 64B has the doorbell
1453 			 * and the second 64B can be used to submit a tx work
1454 			 * request with an implicit doorbell.
1455 			 */
1456 
1457 			rc = pmap_change_attr((vm_offset_t)sc->udbs_base,
1458 			    rman_get_size(sc->udbs_res), PAT_WRITE_COMBINING);
1459 			if (rc == 0) {
1460 				clrbit(&sc->doorbells, DOORBELL_UDB);
1461 				setbit(&sc->doorbells, DOORBELL_WCWR);
1462 				setbit(&sc->doorbells, DOORBELL_UDBWC);
1463 			} else {
1464 				device_printf(sc->dev,
1465 				    "couldn't enable write combining: %d\n",
1466 				    rc);
1467 			}
1468 
1469 			t4_write_reg(sc, A_SGE_STAT_CFG,
1470 			    V_STATSOURCE_T5(7) | V_STATMODE(0));
1471 		}
1472 #endif
1473 	}
1474 
1475 	return (0);
1476 }
1477 
1478 static const struct memwin t4_memwin[] = {
1479 	{ MEMWIN0_BASE, MEMWIN0_APERTURE },
1480 	{ MEMWIN1_BASE, MEMWIN1_APERTURE },
1481 	{ MEMWIN2_BASE_T4, MEMWIN2_APERTURE_T4 }
1482 };
1483 
1484 static const struct memwin t5_memwin[] = {
1485 	{ MEMWIN0_BASE, MEMWIN0_APERTURE },
1486 	{ MEMWIN1_BASE, MEMWIN1_APERTURE },
1487 	{ MEMWIN2_BASE_T5, MEMWIN2_APERTURE_T5 },
1488 };
1489 
1490 static void
1491 setup_memwin(struct adapter *sc)
1492 {
1493 	const struct memwin *mw;
1494 	int i, n;
1495 	uint32_t bar0;
1496 
1497 	if (is_t4(sc)) {
1498 		/*
1499 		 * Read low 32b of bar0 indirectly via the hardware backdoor
1500 		 * mechanism.  Works from within PCI passthrough environments
1501 		 * too, where rman_get_start() can return a different value.  We
1502 		 * need to program the T4 memory window decoders with the actual
1503 		 * addresses that will be coming across the PCIe link.
1504 		 */
1505 		bar0 = t4_hw_pci_read_cfg4(sc, PCIR_BAR(0));
1506 		bar0 &= (uint32_t) PCIM_BAR_MEM_BASE;
1507 
1508 		mw = &t4_memwin[0];
1509 		n = nitems(t4_memwin);
1510 	} else {
1511 		/* T5 uses the relative offset inside the PCIe BAR */
1512 		bar0 = 0;
1513 
1514 		mw = &t5_memwin[0];
1515 		n = nitems(t5_memwin);
1516 	}
1517 
1518 	for (i = 0; i < n; i++, mw++) {
1519 		t4_write_reg(sc,
1520 		    PCIE_MEM_ACCESS_REG(A_PCIE_MEM_ACCESS_BASE_WIN, i),
1521 		    (mw->base + bar0) | V_BIR(0) |
1522 		    V_WINDOW(ilog2(mw->aperture) - 10));
1523 	}
1524 
1525 	/* flush */
1526 	t4_read_reg(sc, PCIE_MEM_ACCESS_REG(A_PCIE_MEM_ACCESS_BASE_WIN, 2));
1527 }
1528 
1529 /*
1530  * Verify that the memory range specified by the addr/len pair is valid and lies
1531  * entirely within a single region (EDCx or MCx).
1532  */
1533 static int
1534 validate_mem_range(struct adapter *sc, uint32_t addr, int len)
1535 {
1536 	uint32_t em, addr_len, maddr, mlen;
1537 
1538 	/* Memory can only be accessed in naturally aligned 4 byte units */
1539 	if (addr & 3 || len & 3 || len == 0)
1540 		return (EINVAL);
1541 
1542 	/* Enabled memories */
1543 	em = t4_read_reg(sc, A_MA_TARGET_MEM_ENABLE);
1544 	if (em & F_EDRAM0_ENABLE) {
1545 		addr_len = t4_read_reg(sc, A_MA_EDRAM0_BAR);
1546 		maddr = G_EDRAM0_BASE(addr_len) << 20;
1547 		mlen = G_EDRAM0_SIZE(addr_len) << 20;
1548 		if (mlen > 0 && addr >= maddr && addr < maddr + mlen &&
1549 		    addr + len <= maddr + mlen)
1550 			return (0);
1551 	}
1552 	if (em & F_EDRAM1_ENABLE) {
1553 		addr_len = t4_read_reg(sc, A_MA_EDRAM1_BAR);
1554 		maddr = G_EDRAM1_BASE(addr_len) << 20;
1555 		mlen = G_EDRAM1_SIZE(addr_len) << 20;
1556 		if (mlen > 0 && addr >= maddr && addr < maddr + mlen &&
1557 		    addr + len <= maddr + mlen)
1558 			return (0);
1559 	}
1560 	if (em & F_EXT_MEM_ENABLE) {
1561 		addr_len = t4_read_reg(sc, A_MA_EXT_MEMORY_BAR);
1562 		maddr = G_EXT_MEM_BASE(addr_len) << 20;
1563 		mlen = G_EXT_MEM_SIZE(addr_len) << 20;
1564 		if (mlen > 0 && addr >= maddr && addr < maddr + mlen &&
1565 		    addr + len <= maddr + mlen)
1566 			return (0);
1567 	}
1568 	if (!is_t4(sc) && em & F_EXT_MEM1_ENABLE) {
1569 		addr_len = t4_read_reg(sc, A_MA_EXT_MEMORY1_BAR);
1570 		maddr = G_EXT_MEM1_BASE(addr_len) << 20;
1571 		mlen = G_EXT_MEM1_SIZE(addr_len) << 20;
1572 		if (mlen > 0 && addr >= maddr && addr < maddr + mlen &&
1573 		    addr + len <= maddr + mlen)
1574 			return (0);
1575 	}
1576 
1577 	return (EFAULT);
1578 }
1579 
1580 static int
1581 fwmtype_to_hwmtype(int mtype)
1582 {
1583 
1584 	switch (mtype) {
1585 	case FW_MEMTYPE_EDC0:
1586 		return (MEM_EDC0);
1587 	case FW_MEMTYPE_EDC1:
1588 		return (MEM_EDC1);
1589 	case FW_MEMTYPE_EXTMEM:
1590 		return (MEM_MC0);
1591 	case FW_MEMTYPE_EXTMEM1:
1592 		return (MEM_MC1);
1593 	default:
1594 		panic("%s: cannot translate fw mtype %d.", __func__, mtype);
1595 	}
1596 }
1597 
1598 /*
1599  * Verify that the memory range specified by the memtype/offset/len pair is
1600  * valid and lies entirely within the memtype specified.  The global address of
1601  * the start of the range is returned in addr.
1602  */
1603 static int
1604 validate_mt_off_len(struct adapter *sc, int mtype, uint32_t off, int len,
1605     uint32_t *addr)
1606 {
1607 	uint32_t em, addr_len, maddr, mlen;
1608 
1609 	/* Memory can only be accessed in naturally aligned 4 byte units */
1610 	if (off & 3 || len & 3 || len == 0)
1611 		return (EINVAL);
1612 
1613 	em = t4_read_reg(sc, A_MA_TARGET_MEM_ENABLE);
1614 	switch (fwmtype_to_hwmtype(mtype)) {
1615 	case MEM_EDC0:
1616 		if (!(em & F_EDRAM0_ENABLE))
1617 			return (EINVAL);
1618 		addr_len = t4_read_reg(sc, A_MA_EDRAM0_BAR);
1619 		maddr = G_EDRAM0_BASE(addr_len) << 20;
1620 		mlen = G_EDRAM0_SIZE(addr_len) << 20;
1621 		break;
1622 	case MEM_EDC1:
1623 		if (!(em & F_EDRAM1_ENABLE))
1624 			return (EINVAL);
1625 		addr_len = t4_read_reg(sc, A_MA_EDRAM1_BAR);
1626 		maddr = G_EDRAM1_BASE(addr_len) << 20;
1627 		mlen = G_EDRAM1_SIZE(addr_len) << 20;
1628 		break;
1629 	case MEM_MC:
1630 		if (!(em & F_EXT_MEM_ENABLE))
1631 			return (EINVAL);
1632 		addr_len = t4_read_reg(sc, A_MA_EXT_MEMORY_BAR);
1633 		maddr = G_EXT_MEM_BASE(addr_len) << 20;
1634 		mlen = G_EXT_MEM_SIZE(addr_len) << 20;
1635 		break;
1636 	case MEM_MC1:
1637 		if (is_t4(sc) || !(em & F_EXT_MEM1_ENABLE))
1638 			return (EINVAL);
1639 		addr_len = t4_read_reg(sc, A_MA_EXT_MEMORY1_BAR);
1640 		maddr = G_EXT_MEM1_BASE(addr_len) << 20;
1641 		mlen = G_EXT_MEM1_SIZE(addr_len) << 20;
1642 		break;
1643 	default:
1644 		return (EINVAL);
1645 	}
1646 
1647 	if (mlen > 0 && off < mlen && off + len <= mlen) {
1648 		*addr = maddr + off;	/* global address */
1649 		return (0);
1650 	}
1651 
1652 	return (EFAULT);
1653 }
1654 
1655 static void
1656 memwin_info(struct adapter *sc, int win, uint32_t *base, uint32_t *aperture)
1657 {
1658 	const struct memwin *mw;
1659 
1660 	if (is_t4(sc)) {
1661 		KASSERT(win >= 0 && win < nitems(t4_memwin),
1662 		    ("%s: incorrect memwin# (%d)", __func__, win));
1663 		mw = &t4_memwin[win];
1664 	} else {
1665 		KASSERT(win >= 0 && win < nitems(t5_memwin),
1666 		    ("%s: incorrect memwin# (%d)", __func__, win));
1667 		mw = &t5_memwin[win];
1668 	}
1669 
1670 	if (base != NULL)
1671 		*base = mw->base;
1672 	if (aperture != NULL)
1673 		*aperture = mw->aperture;
1674 }
1675 
1676 /*
1677  * Positions the memory window such that it can be used to access the specified
1678  * address in the chip's address space.  The return value is the offset of addr
1679  * from the start of the window.
1680  */
1681 static uint32_t
1682 position_memwin(struct adapter *sc, int n, uint32_t addr)
1683 {
1684 	uint32_t start, pf;
1685 	uint32_t reg;
1686 
1687 	KASSERT(n >= 0 && n <= 3,
1688 	    ("%s: invalid window %d.", __func__, n));
1689 	KASSERT((addr & 3) == 0,
1690 	    ("%s: addr (0x%x) is not at a 4B boundary.", __func__, addr));
1691 
1692 	if (is_t4(sc)) {
1693 		pf = 0;
1694 		start = addr & ~0xf;	/* start must be 16B aligned */
1695 	} else {
1696 		pf = V_PFNUM(sc->pf);
1697 		start = addr & ~0x7f;	/* start must be 128B aligned */
1698 	}
1699 	reg = PCIE_MEM_ACCESS_REG(A_PCIE_MEM_ACCESS_OFFSET, n);
1700 
1701 	t4_write_reg(sc, reg, start | pf);
1702 	t4_read_reg(sc, reg);
1703 
1704 	return (addr - start);
1705 }
1706 
1707 static int
1708 cfg_itype_and_nqueues(struct adapter *sc, int n10g, int n1g,
1709     struct intrs_and_queues *iaq)
1710 {
1711 	int rc, itype, navail, nrxq10g, nrxq1g, n;
1712 	int nofldrxq10g = 0, nofldrxq1g = 0;
1713 
1714 	bzero(iaq, sizeof(*iaq));
1715 
1716 	iaq->ntxq10g = t4_ntxq10g;
1717 	iaq->ntxq1g = t4_ntxq1g;
1718 	iaq->nrxq10g = nrxq10g = t4_nrxq10g;
1719 	iaq->nrxq1g = nrxq1g = t4_nrxq1g;
1720 #ifdef TCP_OFFLOAD
1721 	if (is_offload(sc)) {
1722 		iaq->nofldtxq10g = t4_nofldtxq10g;
1723 		iaq->nofldtxq1g = t4_nofldtxq1g;
1724 		iaq->nofldrxq10g = nofldrxq10g = t4_nofldrxq10g;
1725 		iaq->nofldrxq1g = nofldrxq1g = t4_nofldrxq1g;
1726 	}
1727 #endif
1728 
1729 	for (itype = INTR_MSIX; itype; itype >>= 1) {
1730 
1731 		if ((itype & t4_intr_types) == 0)
1732 			continue;	/* not allowed */
1733 
1734 		if (itype == INTR_MSIX)
1735 			navail = pci_msix_count(sc->dev);
1736 		else if (itype == INTR_MSI)
1737 			navail = pci_msi_count(sc->dev);
1738 		else
1739 			navail = 1;
1740 restart:
1741 		if (navail == 0)
1742 			continue;
1743 
1744 		iaq->intr_type = itype;
1745 		iaq->intr_flags = 0;
1746 
1747 		/*
1748 		 * Best option: an interrupt vector for errors, one for the
1749 		 * firmware event queue, and one each for each rxq (NIC as well
1750 		 * as offload).
1751 		 */
1752 		iaq->nirq = T4_EXTRA_INTR;
1753 		iaq->nirq += n10g * (nrxq10g + nofldrxq10g);
1754 		iaq->nirq += n1g * (nrxq1g + nofldrxq1g);
1755 		if (iaq->nirq <= navail &&
1756 		    (itype != INTR_MSI || powerof2(iaq->nirq))) {
1757 			iaq->intr_flags |= INTR_DIRECT;
1758 			goto allocate;
1759 		}
1760 
1761 		/*
1762 		 * Second best option: an interrupt vector for errors, one for
1763 		 * the firmware event queue, and one each for either NIC or
1764 		 * offload rxq's.
1765 		 */
1766 		iaq->nirq = T4_EXTRA_INTR;
1767 		iaq->nirq += n10g * max(nrxq10g, nofldrxq10g);
1768 		iaq->nirq += n1g * max(nrxq1g, nofldrxq1g);
1769 		if (iaq->nirq <= navail &&
1770 		    (itype != INTR_MSI || powerof2(iaq->nirq)))
1771 			goto allocate;
1772 
1773 		/*
1774 		 * Next best option: an interrupt vector for errors, one for the
1775 		 * firmware event queue, and at least one per port.  At this
1776 		 * point we know we'll have to downsize nrxq or nofldrxq to fit
1777 		 * what's available to us.
1778 		 */
1779 		iaq->nirq = T4_EXTRA_INTR;
1780 		iaq->nirq += n10g + n1g;
1781 		if (iaq->nirq <= navail) {
1782 			int leftover = navail - iaq->nirq;
1783 
1784 			if (n10g > 0) {
1785 				int target = max(nrxq10g, nofldrxq10g);
1786 
1787 				n = 1;
1788 				while (n < target && leftover >= n10g) {
1789 					leftover -= n10g;
1790 					iaq->nirq += n10g;
1791 					n++;
1792 				}
1793 				iaq->nrxq10g = min(n, nrxq10g);
1794 #ifdef TCP_OFFLOAD
1795 				if (is_offload(sc))
1796 					iaq->nofldrxq10g = min(n, nofldrxq10g);
1797 #endif
1798 			}
1799 
1800 			if (n1g > 0) {
1801 				int target = max(nrxq1g, nofldrxq1g);
1802 
1803 				n = 1;
1804 				while (n < target && leftover >= n1g) {
1805 					leftover -= n1g;
1806 					iaq->nirq += n1g;
1807 					n++;
1808 				}
1809 				iaq->nrxq1g = min(n, nrxq1g);
1810 #ifdef TCP_OFFLOAD
1811 				if (is_offload(sc))
1812 					iaq->nofldrxq1g = min(n, nofldrxq1g);
1813 #endif
1814 			}
1815 
1816 			if (itype != INTR_MSI || powerof2(iaq->nirq))
1817 				goto allocate;
1818 		}
1819 
1820 		/*
1821 		 * Least desirable option: one interrupt vector for everything.
1822 		 */
1823 		iaq->nirq = iaq->nrxq10g = iaq->nrxq1g = 1;
1824 #ifdef TCP_OFFLOAD
1825 		if (is_offload(sc))
1826 			iaq->nofldrxq10g = iaq->nofldrxq1g = 1;
1827 #endif
1828 
1829 allocate:
1830 		navail = iaq->nirq;
1831 		rc = 0;
1832 		if (itype == INTR_MSIX)
1833 			rc = pci_alloc_msix(sc->dev, &navail);
1834 		else if (itype == INTR_MSI)
1835 			rc = pci_alloc_msi(sc->dev, &navail);
1836 
1837 		if (rc == 0) {
1838 			if (navail == iaq->nirq)
1839 				return (0);
1840 
1841 			/*
1842 			 * Didn't get the number requested.  Use whatever number
1843 			 * the kernel is willing to allocate (it's in navail).
1844 			 */
1845 			device_printf(sc->dev, "fewer vectors than requested, "
1846 			    "type=%d, req=%d, rcvd=%d; will downshift req.\n",
1847 			    itype, iaq->nirq, navail);
1848 			pci_release_msi(sc->dev);
1849 			goto restart;
1850 		}
1851 
1852 		device_printf(sc->dev,
1853 		    "failed to allocate vectors:%d, type=%d, req=%d, rcvd=%d\n",
1854 		    itype, rc, iaq->nirq, navail);
1855 	}
1856 
1857 	device_printf(sc->dev,
1858 	    "failed to find a usable interrupt type.  "
1859 	    "allowed=%d, msi-x=%d, msi=%d, intx=1", t4_intr_types,
1860 	    pci_msix_count(sc->dev), pci_msi_count(sc->dev));
1861 
1862 	return (ENXIO);
1863 }
1864 
1865 #define FW_VERSION(chip) ( \
1866     V_FW_HDR_FW_VER_MAJOR(chip##FW_VERSION_MAJOR) | \
1867     V_FW_HDR_FW_VER_MINOR(chip##FW_VERSION_MINOR) | \
1868     V_FW_HDR_FW_VER_MICRO(chip##FW_VERSION_MICRO) | \
1869     V_FW_HDR_FW_VER_BUILD(chip##FW_VERSION_BUILD))
1870 #define FW_INTFVER(chip, intf) (chip##FW_HDR_INTFVER_##intf)
1871 
1872 struct fw_info {
1873 	uint8_t chip;
1874 	char *kld_name;
1875 	char *fw_mod_name;
1876 	struct fw_hdr fw_hdr;	/* XXX: waste of space, need a sparse struct */
1877 } fw_info[] = {
1878 	{
1879 		.chip = CHELSIO_T4,
1880 		.kld_name = "t4fw_cfg",
1881 		.fw_mod_name = "t4fw",
1882 		.fw_hdr = {
1883 			.chip = FW_HDR_CHIP_T4,
1884 			.fw_ver = htobe32_const(FW_VERSION(T4)),
1885 			.intfver_nic = FW_INTFVER(T4, NIC),
1886 			.intfver_vnic = FW_INTFVER(T4, VNIC),
1887 			.intfver_ofld = FW_INTFVER(T4, OFLD),
1888 			.intfver_ri = FW_INTFVER(T4, RI),
1889 			.intfver_iscsipdu = FW_INTFVER(T4, ISCSIPDU),
1890 			.intfver_iscsi = FW_INTFVER(T4, ISCSI),
1891 			.intfver_fcoepdu = FW_INTFVER(T4, FCOEPDU),
1892 			.intfver_fcoe = FW_INTFVER(T4, FCOE),
1893 		},
1894 	}, {
1895 		.chip = CHELSIO_T5,
1896 		.kld_name = "t5fw_cfg",
1897 		.fw_mod_name = "t5fw",
1898 		.fw_hdr = {
1899 			.chip = FW_HDR_CHIP_T5,
1900 			.fw_ver = htobe32_const(FW_VERSION(T5)),
1901 			.intfver_nic = FW_INTFVER(T5, NIC),
1902 			.intfver_vnic = FW_INTFVER(T5, VNIC),
1903 			.intfver_ofld = FW_INTFVER(T5, OFLD),
1904 			.intfver_ri = FW_INTFVER(T5, RI),
1905 			.intfver_iscsipdu = FW_INTFVER(T5, ISCSIPDU),
1906 			.intfver_iscsi = FW_INTFVER(T5, ISCSI),
1907 			.intfver_fcoepdu = FW_INTFVER(T5, FCOEPDU),
1908 			.intfver_fcoe = FW_INTFVER(T5, FCOE),
1909 		},
1910 	}
1911 };
1912 
1913 static struct fw_info *
1914 find_fw_info(int chip)
1915 {
1916 	int i;
1917 
1918 	for (i = 0; i < nitems(fw_info); i++) {
1919 		if (fw_info[i].chip == chip)
1920 			return (&fw_info[i]);
1921 	}
1922 	return (NULL);
1923 }
1924 
1925 /*
1926  * Is the given firmware API compatible with the one the driver was compiled
1927  * with?
1928  */
1929 static int
1930 fw_compatible(const struct fw_hdr *hdr1, const struct fw_hdr *hdr2)
1931 {
1932 
1933 	/* short circuit if it's the exact same firmware version */
1934 	if (hdr1->chip == hdr2->chip && hdr1->fw_ver == hdr2->fw_ver)
1935 		return (1);
1936 
1937 	/*
1938 	 * XXX: Is this too conservative?  Perhaps I should limit this to the
1939 	 * features that are supported in the driver.
1940 	 */
1941 #define SAME_INTF(x) (hdr1->intfver_##x == hdr2->intfver_##x)
1942 	if (hdr1->chip == hdr2->chip && SAME_INTF(nic) && SAME_INTF(vnic) &&
1943 	    SAME_INTF(ofld) && SAME_INTF(ri) && SAME_INTF(iscsipdu) &&
1944 	    SAME_INTF(iscsi) && SAME_INTF(fcoepdu) && SAME_INTF(fcoe))
1945 		return (1);
1946 #undef SAME_INTF
1947 
1948 	return (0);
1949 }
1950 
1951 /*
1952  * The firmware in the KLD is usable, but should it be installed?  This routine
1953  * explains itself in detail if it indicates the KLD firmware should be
1954  * installed.
1955  */
1956 static int
1957 should_install_kld_fw(struct adapter *sc, int card_fw_usable, int k, int c)
1958 {
1959 	const char *reason;
1960 
1961 	if (!card_fw_usable) {
1962 		reason = "incompatible or unusable";
1963 		goto install;
1964 	}
1965 
1966 	if (k > c) {
1967 		reason = "older than the version bundled with this driver";
1968 		goto install;
1969 	}
1970 
1971 	if (t4_fw_install == 2 && k != c) {
1972 		reason = "different than the version bundled with this driver";
1973 		goto install;
1974 	}
1975 
1976 	return (0);
1977 
1978 install:
1979 	if (t4_fw_install == 0) {
1980 		device_printf(sc->dev, "firmware on card (%u.%u.%u.%u) is %s, "
1981 		    "but the driver is prohibited from installing a different "
1982 		    "firmware on the card.\n",
1983 		    G_FW_HDR_FW_VER_MAJOR(c), G_FW_HDR_FW_VER_MINOR(c),
1984 		    G_FW_HDR_FW_VER_MICRO(c), G_FW_HDR_FW_VER_BUILD(c), reason);
1985 
1986 		return (0);
1987 	}
1988 
1989 	device_printf(sc->dev, "firmware on card (%u.%u.%u.%u) is %s, "
1990 	    "installing firmware %u.%u.%u.%u on card.\n",
1991 	    G_FW_HDR_FW_VER_MAJOR(c), G_FW_HDR_FW_VER_MINOR(c),
1992 	    G_FW_HDR_FW_VER_MICRO(c), G_FW_HDR_FW_VER_BUILD(c), reason,
1993 	    G_FW_HDR_FW_VER_MAJOR(k), G_FW_HDR_FW_VER_MINOR(k),
1994 	    G_FW_HDR_FW_VER_MICRO(k), G_FW_HDR_FW_VER_BUILD(k));
1995 
1996 	return (1);
1997 }
1998 /*
1999  * Establish contact with the firmware and determine if we are the master driver
2000  * or not, and whether we are responsible for chip initialization.
2001  */
2002 static int
2003 prep_firmware(struct adapter *sc)
2004 {
2005 	const struct firmware *fw = NULL, *default_cfg;
2006 	int rc, pf, card_fw_usable, kld_fw_usable, need_fw_reset = 1;
2007 	enum dev_state state;
2008 	struct fw_info *fw_info;
2009 	struct fw_hdr *card_fw;		/* fw on the card */
2010 	const struct fw_hdr *kld_fw;	/* fw in the KLD */
2011 	const struct fw_hdr *drv_fw;	/* fw header the driver was compiled
2012 					   against */
2013 
2014 	/* Contact firmware. */
2015 	rc = t4_fw_hello(sc, sc->mbox, sc->mbox, MASTER_MAY, &state);
2016 	if (rc < 0 || state == DEV_STATE_ERR) {
2017 		rc = -rc;
2018 		device_printf(sc->dev,
2019 		    "failed to connect to the firmware: %d, %d.\n", rc, state);
2020 		return (rc);
2021 	}
2022 	pf = rc;
2023 	if (pf == sc->mbox)
2024 		sc->flags |= MASTER_PF;
2025 	else if (state == DEV_STATE_UNINIT) {
2026 		/*
2027 		 * We didn't get to be the master so we definitely won't be
2028 		 * configuring the chip.  It's a bug if someone else hasn't
2029 		 * configured it already.
2030 		 */
2031 		device_printf(sc->dev, "couldn't be master(%d), "
2032 		    "device not already initialized either(%d).\n", rc, state);
2033 		return (EDOOFUS);
2034 	}
2035 
2036 	/* This is the firmware whose headers the driver was compiled against */
2037 	fw_info = find_fw_info(chip_id(sc));
2038 	if (fw_info == NULL) {
2039 		device_printf(sc->dev,
2040 		    "unable to look up firmware information for chip %d.\n",
2041 		    chip_id(sc));
2042 		return (EINVAL);
2043 	}
2044 	drv_fw = &fw_info->fw_hdr;
2045 
2046 	/*
2047 	 * The firmware KLD contains many modules.  The KLD name is also the
2048 	 * name of the module that contains the default config file.
2049 	 */
2050 	default_cfg = firmware_get(fw_info->kld_name);
2051 
2052 	/* Read the header of the firmware on the card */
2053 	card_fw = malloc(sizeof(*card_fw), M_CXGBE, M_ZERO | M_WAITOK);
2054 	rc = -t4_read_flash(sc, FLASH_FW_START,
2055 	    sizeof (*card_fw) / sizeof (uint32_t), (uint32_t *)card_fw, 1);
2056 	if (rc == 0)
2057 		card_fw_usable = fw_compatible(drv_fw, (const void*)card_fw);
2058 	else {
2059 		device_printf(sc->dev,
2060 		    "Unable to read card's firmware header: %d\n", rc);
2061 		card_fw_usable = 0;
2062 	}
2063 
2064 	/* This is the firmware in the KLD */
2065 	fw = firmware_get(fw_info->fw_mod_name);
2066 	if (fw != NULL) {
2067 		kld_fw = (const void *)fw->data;
2068 		kld_fw_usable = fw_compatible(drv_fw, kld_fw);
2069 	} else {
2070 		kld_fw = NULL;
2071 		kld_fw_usable = 0;
2072 	}
2073 
2074 	if (card_fw_usable && card_fw->fw_ver == drv_fw->fw_ver &&
2075 	    (!kld_fw_usable || kld_fw->fw_ver == drv_fw->fw_ver)) {
2076 		/*
2077 		 * Common case: the firmware on the card is an exact match and
2078 		 * the KLD is an exact match too, or the KLD is
2079 		 * absent/incompatible.  Note that t4_fw_install = 2 is ignored
2080 		 * here -- use cxgbetool loadfw if you want to reinstall the
2081 		 * same firmware as the one on the card.
2082 		 */
2083 	} else if (kld_fw_usable && state == DEV_STATE_UNINIT &&
2084 	    should_install_kld_fw(sc, card_fw_usable, be32toh(kld_fw->fw_ver),
2085 	    be32toh(card_fw->fw_ver))) {
2086 
2087 		rc = -t4_fw_upgrade(sc, sc->mbox, fw->data, fw->datasize, 0);
2088 		if (rc != 0) {
2089 			device_printf(sc->dev,
2090 			    "failed to install firmware: %d\n", rc);
2091 			goto done;
2092 		}
2093 
2094 		/* Installed successfully, update the cached header too. */
2095 		memcpy(card_fw, kld_fw, sizeof(*card_fw));
2096 		card_fw_usable = 1;
2097 		need_fw_reset = 0;	/* already reset as part of load_fw */
2098 	}
2099 
2100 	if (!card_fw_usable) {
2101 		uint32_t d, c, k;
2102 
2103 		d = ntohl(drv_fw->fw_ver);
2104 		c = ntohl(card_fw->fw_ver);
2105 		k = kld_fw ? ntohl(kld_fw->fw_ver) : 0;
2106 
2107 		device_printf(sc->dev, "Cannot find a usable firmware: "
2108 		    "fw_install %d, chip state %d, "
2109 		    "driver compiled with %d.%d.%d.%d, "
2110 		    "card has %d.%d.%d.%d, KLD has %d.%d.%d.%d\n",
2111 		    t4_fw_install, state,
2112 		    G_FW_HDR_FW_VER_MAJOR(d), G_FW_HDR_FW_VER_MINOR(d),
2113 		    G_FW_HDR_FW_VER_MICRO(d), G_FW_HDR_FW_VER_BUILD(d),
2114 		    G_FW_HDR_FW_VER_MAJOR(c), G_FW_HDR_FW_VER_MINOR(c),
2115 		    G_FW_HDR_FW_VER_MICRO(c), G_FW_HDR_FW_VER_BUILD(c),
2116 		    G_FW_HDR_FW_VER_MAJOR(k), G_FW_HDR_FW_VER_MINOR(k),
2117 		    G_FW_HDR_FW_VER_MICRO(k), G_FW_HDR_FW_VER_BUILD(k));
2118 		rc = EINVAL;
2119 		goto done;
2120 	}
2121 
2122 	/* We're using whatever's on the card and it's known to be good. */
2123 	sc->params.fw_vers = ntohl(card_fw->fw_ver);
2124 	snprintf(sc->fw_version, sizeof(sc->fw_version), "%u.%u.%u.%u",
2125 	    G_FW_HDR_FW_VER_MAJOR(sc->params.fw_vers),
2126 	    G_FW_HDR_FW_VER_MINOR(sc->params.fw_vers),
2127 	    G_FW_HDR_FW_VER_MICRO(sc->params.fw_vers),
2128 	    G_FW_HDR_FW_VER_BUILD(sc->params.fw_vers));
2129 	t4_get_tp_version(sc, &sc->params.tp_vers);
2130 
2131 	/* Reset device */
2132 	if (need_fw_reset &&
2133 	    (rc = -t4_fw_reset(sc, sc->mbox, F_PIORSTMODE | F_PIORST)) != 0) {
2134 		device_printf(sc->dev, "firmware reset failed: %d.\n", rc);
2135 		if (rc != ETIMEDOUT && rc != EIO)
2136 			t4_fw_bye(sc, sc->mbox);
2137 		goto done;
2138 	}
2139 	sc->flags |= FW_OK;
2140 
2141 	rc = get_params__pre_init(sc);
2142 	if (rc != 0)
2143 		goto done; /* error message displayed already */
2144 
2145 	/* Partition adapter resources as specified in the config file. */
2146 	if (state == DEV_STATE_UNINIT) {
2147 
2148 		KASSERT(sc->flags & MASTER_PF,
2149 		    ("%s: trying to change chip settings when not master.",
2150 		    __func__));
2151 
2152 		rc = partition_resources(sc, default_cfg, fw_info->kld_name);
2153 		if (rc != 0)
2154 			goto done;	/* error message displayed already */
2155 
2156 		t4_tweak_chip_settings(sc);
2157 
2158 		/* get basic stuff going */
2159 		rc = -t4_fw_initialize(sc, sc->mbox);
2160 		if (rc != 0) {
2161 			device_printf(sc->dev, "fw init failed: %d.\n", rc);
2162 			goto done;
2163 		}
2164 	} else {
2165 		snprintf(sc->cfg_file, sizeof(sc->cfg_file), "pf%d", pf);
2166 		sc->cfcsum = 0;
2167 	}
2168 
2169 done:
2170 	free(card_fw, M_CXGBE);
2171 	if (fw != NULL)
2172 		firmware_put(fw, FIRMWARE_UNLOAD);
2173 	if (default_cfg != NULL)
2174 		firmware_put(default_cfg, FIRMWARE_UNLOAD);
2175 
2176 	return (rc);
2177 }
2178 
2179 #define FW_PARAM_DEV(param) \
2180 	(V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) | \
2181 	 V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_##param))
2182 #define FW_PARAM_PFVF(param) \
2183 	(V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_PFVF) | \
2184 	 V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_PFVF_##param))
2185 
2186 /*
2187  * Partition chip resources for use between various PFs, VFs, etc.
2188  */
2189 static int
2190 partition_resources(struct adapter *sc, const struct firmware *default_cfg,
2191     const char *name_prefix)
2192 {
2193 	const struct firmware *cfg = NULL;
2194 	int rc = 0;
2195 	struct fw_caps_config_cmd caps;
2196 	uint32_t mtype, moff, finicsum, cfcsum;
2197 
2198 	/*
2199 	 * Figure out what configuration file to use.  Pick the default config
2200 	 * file for the card if the user hasn't specified one explicitly.
2201 	 */
2202 	snprintf(sc->cfg_file, sizeof(sc->cfg_file), "%s", t4_cfg_file);
2203 	if (strncmp(t4_cfg_file, DEFAULT_CF, sizeof(t4_cfg_file)) == 0) {
2204 		/* Card specific overrides go here. */
2205 		if (pci_get_device(sc->dev) == 0x440a)
2206 			snprintf(sc->cfg_file, sizeof(sc->cfg_file), UWIRE_CF);
2207 		if (is_fpga(sc))
2208 			snprintf(sc->cfg_file, sizeof(sc->cfg_file), FPGA_CF);
2209 	}
2210 
2211 	/*
2212 	 * We need to load another module if the profile is anything except
2213 	 * "default" or "flash".
2214 	 */
2215 	if (strncmp(sc->cfg_file, DEFAULT_CF, sizeof(sc->cfg_file)) != 0 &&
2216 	    strncmp(sc->cfg_file, FLASH_CF, sizeof(sc->cfg_file)) != 0) {
2217 		char s[32];
2218 
2219 		snprintf(s, sizeof(s), "%s_%s", name_prefix, sc->cfg_file);
2220 		cfg = firmware_get(s);
2221 		if (cfg == NULL) {
2222 			if (default_cfg != NULL) {
2223 				device_printf(sc->dev,
2224 				    "unable to load module \"%s\" for "
2225 				    "configuration profile \"%s\", will use "
2226 				    "the default config file instead.\n",
2227 				    s, sc->cfg_file);
2228 				snprintf(sc->cfg_file, sizeof(sc->cfg_file),
2229 				    "%s", DEFAULT_CF);
2230 			} else {
2231 				device_printf(sc->dev,
2232 				    "unable to load module \"%s\" for "
2233 				    "configuration profile \"%s\", will use "
2234 				    "the config file on the card's flash "
2235 				    "instead.\n", s, sc->cfg_file);
2236 				snprintf(sc->cfg_file, sizeof(sc->cfg_file),
2237 				    "%s", FLASH_CF);
2238 			}
2239 		}
2240 	}
2241 
2242 	if (strncmp(sc->cfg_file, DEFAULT_CF, sizeof(sc->cfg_file)) == 0 &&
2243 	    default_cfg == NULL) {
2244 		device_printf(sc->dev,
2245 		    "default config file not available, will use the config "
2246 		    "file on the card's flash instead.\n");
2247 		snprintf(sc->cfg_file, sizeof(sc->cfg_file), "%s", FLASH_CF);
2248 	}
2249 
2250 	if (strncmp(sc->cfg_file, FLASH_CF, sizeof(sc->cfg_file)) != 0) {
2251 		u_int cflen, i, n;
2252 		const uint32_t *cfdata;
2253 		uint32_t param, val, addr, off, mw_base, mw_aperture;
2254 
2255 		KASSERT(cfg != NULL || default_cfg != NULL,
2256 		    ("%s: no config to upload", __func__));
2257 
2258 		/*
2259 		 * Ask the firmware where it wants us to upload the config file.
2260 		 */
2261 		param = FW_PARAM_DEV(CF);
2262 		rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 1, &param, &val);
2263 		if (rc != 0) {
2264 			/* No support for config file?  Shouldn't happen. */
2265 			device_printf(sc->dev,
2266 			    "failed to query config file location: %d.\n", rc);
2267 			goto done;
2268 		}
2269 		mtype = G_FW_PARAMS_PARAM_Y(val);
2270 		moff = G_FW_PARAMS_PARAM_Z(val) << 16;
2271 
2272 		/*
2273 		 * XXX: sheer laziness.  We deliberately added 4 bytes of
2274 		 * useless stuffing/comments at the end of the config file so
2275 		 * it's ok to simply throw away the last remaining bytes when
2276 		 * the config file is not an exact multiple of 4.  This also
2277 		 * helps with the validate_mt_off_len check.
2278 		 */
2279 		if (cfg != NULL) {
2280 			cflen = cfg->datasize & ~3;
2281 			cfdata = cfg->data;
2282 		} else {
2283 			cflen = default_cfg->datasize & ~3;
2284 			cfdata = default_cfg->data;
2285 		}
2286 
2287 		if (cflen > FLASH_CFG_MAX_SIZE) {
2288 			device_printf(sc->dev,
2289 			    "config file too long (%d, max allowed is %d).  "
2290 			    "Will try to use the config on the card, if any.\n",
2291 			    cflen, FLASH_CFG_MAX_SIZE);
2292 			goto use_config_on_flash;
2293 		}
2294 
2295 		rc = validate_mt_off_len(sc, mtype, moff, cflen, &addr);
2296 		if (rc != 0) {
2297 			device_printf(sc->dev,
2298 			    "%s: addr (%d/0x%x) or len %d is not valid: %d.  "
2299 			    "Will try to use the config on the card, if any.\n",
2300 			    __func__, mtype, moff, cflen, rc);
2301 			goto use_config_on_flash;
2302 		}
2303 
2304 		memwin_info(sc, 2, &mw_base, &mw_aperture);
2305 		while (cflen) {
2306 			off = position_memwin(sc, 2, addr);
2307 			n = min(cflen, mw_aperture - off);
2308 			for (i = 0; i < n; i += 4)
2309 				t4_write_reg(sc, mw_base + off + i, *cfdata++);
2310 			cflen -= n;
2311 			addr += n;
2312 		}
2313 	} else {
2314 use_config_on_flash:
2315 		mtype = FW_MEMTYPE_FLASH;
2316 		moff = t4_flash_cfg_addr(sc);
2317 	}
2318 
2319 	bzero(&caps, sizeof(caps));
2320 	caps.op_to_write = htobe32(V_FW_CMD_OP(FW_CAPS_CONFIG_CMD) |
2321 	    F_FW_CMD_REQUEST | F_FW_CMD_READ);
2322 	caps.cfvalid_to_len16 = htobe32(F_FW_CAPS_CONFIG_CMD_CFVALID |
2323 	    V_FW_CAPS_CONFIG_CMD_MEMTYPE_CF(mtype) |
2324 	    V_FW_CAPS_CONFIG_CMD_MEMADDR64K_CF(moff >> 16) | FW_LEN16(caps));
2325 	rc = -t4_wr_mbox(sc, sc->mbox, &caps, sizeof(caps), &caps);
2326 	if (rc != 0) {
2327 		device_printf(sc->dev,
2328 		    "failed to pre-process config file: %d "
2329 		    "(mtype %d, moff 0x%x).\n", rc, mtype, moff);
2330 		goto done;
2331 	}
2332 
2333 	finicsum = be32toh(caps.finicsum);
2334 	cfcsum = be32toh(caps.cfcsum);
2335 	if (finicsum != cfcsum) {
2336 		device_printf(sc->dev,
2337 		    "WARNING: config file checksum mismatch: %08x %08x\n",
2338 		    finicsum, cfcsum);
2339 	}
2340 	sc->cfcsum = cfcsum;
2341 
2342 #define LIMIT_CAPS(x) do { \
2343 	caps.x &= htobe16(t4_##x##_allowed); \
2344 	sc->x = htobe16(caps.x); \
2345 } while (0)
2346 
2347 	/*
2348 	 * Let the firmware know what features will (not) be used so it can tune
2349 	 * things accordingly.
2350 	 */
2351 	LIMIT_CAPS(linkcaps);
2352 	LIMIT_CAPS(niccaps);
2353 	LIMIT_CAPS(toecaps);
2354 	LIMIT_CAPS(rdmacaps);
2355 	LIMIT_CAPS(iscsicaps);
2356 	LIMIT_CAPS(fcoecaps);
2357 #undef LIMIT_CAPS
2358 
2359 	caps.op_to_write = htobe32(V_FW_CMD_OP(FW_CAPS_CONFIG_CMD) |
2360 	    F_FW_CMD_REQUEST | F_FW_CMD_WRITE);
2361 	caps.cfvalid_to_len16 = htobe32(FW_LEN16(caps));
2362 	rc = -t4_wr_mbox(sc, sc->mbox, &caps, sizeof(caps), NULL);
2363 	if (rc != 0) {
2364 		device_printf(sc->dev,
2365 		    "failed to process config file: %d.\n", rc);
2366 	}
2367 done:
2368 	if (cfg != NULL)
2369 		firmware_put(cfg, FIRMWARE_UNLOAD);
2370 	return (rc);
2371 }
2372 
2373 /*
2374  * Retrieve parameters that are needed (or nice to have) very early.
2375  */
2376 static int
2377 get_params__pre_init(struct adapter *sc)
2378 {
2379 	int rc;
2380 	uint32_t param[2], val[2];
2381 	struct fw_devlog_cmd cmd;
2382 	struct devlog_params *dlog = &sc->params.devlog;
2383 
2384 	param[0] = FW_PARAM_DEV(PORTVEC);
2385 	param[1] = FW_PARAM_DEV(CCLK);
2386 	rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 2, param, val);
2387 	if (rc != 0) {
2388 		device_printf(sc->dev,
2389 		    "failed to query parameters (pre_init): %d.\n", rc);
2390 		return (rc);
2391 	}
2392 
2393 	sc->params.portvec = val[0];
2394 	sc->params.nports = bitcount32(val[0]);
2395 	sc->params.vpd.cclk = val[1];
2396 
2397 	/* Read device log parameters. */
2398 	bzero(&cmd, sizeof(cmd));
2399 	cmd.op_to_write = htobe32(V_FW_CMD_OP(FW_DEVLOG_CMD) |
2400 	    F_FW_CMD_REQUEST | F_FW_CMD_READ);
2401 	cmd.retval_len16 = htobe32(FW_LEN16(cmd));
2402 	rc = -t4_wr_mbox(sc, sc->mbox, &cmd, sizeof(cmd), &cmd);
2403 	if (rc != 0) {
2404 		device_printf(sc->dev,
2405 		    "failed to get devlog parameters: %d.\n", rc);
2406 		bzero(dlog, sizeof (*dlog));
2407 		rc = 0;	/* devlog isn't critical for device operation */
2408 	} else {
2409 		val[0] = be32toh(cmd.memtype_devlog_memaddr16_devlog);
2410 		dlog->memtype = G_FW_DEVLOG_CMD_MEMTYPE_DEVLOG(val[0]);
2411 		dlog->start = G_FW_DEVLOG_CMD_MEMADDR16_DEVLOG(val[0]) << 4;
2412 		dlog->size = be32toh(cmd.memsize_devlog);
2413 	}
2414 
2415 	return (rc);
2416 }
2417 
2418 /*
2419  * Retrieve various parameters that are of interest to the driver.  The device
2420  * has been initialized by the firmware at this point.
2421  */
2422 static int
2423 get_params__post_init(struct adapter *sc)
2424 {
2425 	int rc;
2426 	uint32_t param[7], val[7];
2427 	struct fw_caps_config_cmd caps;
2428 
2429 	param[0] = FW_PARAM_PFVF(IQFLINT_START);
2430 	param[1] = FW_PARAM_PFVF(EQ_START);
2431 	param[2] = FW_PARAM_PFVF(FILTER_START);
2432 	param[3] = FW_PARAM_PFVF(FILTER_END);
2433 	param[4] = FW_PARAM_PFVF(L2T_START);
2434 	param[5] = FW_PARAM_PFVF(L2T_END);
2435 	rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 6, param, val);
2436 	if (rc != 0) {
2437 		device_printf(sc->dev,
2438 		    "failed to query parameters (post_init): %d.\n", rc);
2439 		return (rc);
2440 	}
2441 
2442 	sc->sge.iq_start = val[0];
2443 	sc->sge.eq_start = val[1];
2444 	sc->tids.ftid_base = val[2];
2445 	sc->tids.nftids = val[3] - val[2] + 1;
2446 	sc->vres.l2t.start = val[4];
2447 	sc->vres.l2t.size = val[5] - val[4] + 1;
2448 	KASSERT(sc->vres.l2t.size <= L2T_SIZE,
2449 	    ("%s: L2 table size (%u) larger than expected (%u)",
2450 	    __func__, sc->vres.l2t.size, L2T_SIZE));
2451 
2452 	/* get capabilites */
2453 	bzero(&caps, sizeof(caps));
2454 	caps.op_to_write = htobe32(V_FW_CMD_OP(FW_CAPS_CONFIG_CMD) |
2455 	    F_FW_CMD_REQUEST | F_FW_CMD_READ);
2456 	caps.cfvalid_to_len16 = htobe32(FW_LEN16(caps));
2457 	rc = -t4_wr_mbox(sc, sc->mbox, &caps, sizeof(caps), &caps);
2458 	if (rc != 0) {
2459 		device_printf(sc->dev,
2460 		    "failed to get card capabilities: %d.\n", rc);
2461 		return (rc);
2462 	}
2463 
2464 	if (caps.toecaps) {
2465 		/* query offload-related parameters */
2466 		param[0] = FW_PARAM_DEV(NTID);
2467 		param[1] = FW_PARAM_PFVF(SERVER_START);
2468 		param[2] = FW_PARAM_PFVF(SERVER_END);
2469 		param[3] = FW_PARAM_PFVF(TDDP_START);
2470 		param[4] = FW_PARAM_PFVF(TDDP_END);
2471 		param[5] = FW_PARAM_DEV(FLOWC_BUFFIFO_SZ);
2472 		rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 6, param, val);
2473 		if (rc != 0) {
2474 			device_printf(sc->dev,
2475 			    "failed to query TOE parameters: %d.\n", rc);
2476 			return (rc);
2477 		}
2478 		sc->tids.ntids = val[0];
2479 		sc->tids.natids = min(sc->tids.ntids / 2, MAX_ATIDS);
2480 		sc->tids.stid_base = val[1];
2481 		sc->tids.nstids = val[2] - val[1] + 1;
2482 		sc->vres.ddp.start = val[3];
2483 		sc->vres.ddp.size = val[4] - val[3] + 1;
2484 		sc->params.ofldq_wr_cred = val[5];
2485 		sc->params.offload = 1;
2486 	}
2487 	if (caps.rdmacaps) {
2488 		param[0] = FW_PARAM_PFVF(STAG_START);
2489 		param[1] = FW_PARAM_PFVF(STAG_END);
2490 		param[2] = FW_PARAM_PFVF(RQ_START);
2491 		param[3] = FW_PARAM_PFVF(RQ_END);
2492 		param[4] = FW_PARAM_PFVF(PBL_START);
2493 		param[5] = FW_PARAM_PFVF(PBL_END);
2494 		rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 6, param, val);
2495 		if (rc != 0) {
2496 			device_printf(sc->dev,
2497 			    "failed to query RDMA parameters(1): %d.\n", rc);
2498 			return (rc);
2499 		}
2500 		sc->vres.stag.start = val[0];
2501 		sc->vres.stag.size = val[1] - val[0] + 1;
2502 		sc->vres.rq.start = val[2];
2503 		sc->vres.rq.size = val[3] - val[2] + 1;
2504 		sc->vres.pbl.start = val[4];
2505 		sc->vres.pbl.size = val[5] - val[4] + 1;
2506 
2507 		param[0] = FW_PARAM_PFVF(SQRQ_START);
2508 		param[1] = FW_PARAM_PFVF(SQRQ_END);
2509 		param[2] = FW_PARAM_PFVF(CQ_START);
2510 		param[3] = FW_PARAM_PFVF(CQ_END);
2511 		param[4] = FW_PARAM_PFVF(OCQ_START);
2512 		param[5] = FW_PARAM_PFVF(OCQ_END);
2513 		rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 6, param, val);
2514 		if (rc != 0) {
2515 			device_printf(sc->dev,
2516 			    "failed to query RDMA parameters(2): %d.\n", rc);
2517 			return (rc);
2518 		}
2519 		sc->vres.qp.start = val[0];
2520 		sc->vres.qp.size = val[1] - val[0] + 1;
2521 		sc->vres.cq.start = val[2];
2522 		sc->vres.cq.size = val[3] - val[2] + 1;
2523 		sc->vres.ocq.start = val[4];
2524 		sc->vres.ocq.size = val[5] - val[4] + 1;
2525 	}
2526 	if (caps.iscsicaps) {
2527 		param[0] = FW_PARAM_PFVF(ISCSI_START);
2528 		param[1] = FW_PARAM_PFVF(ISCSI_END);
2529 		rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 2, param, val);
2530 		if (rc != 0) {
2531 			device_printf(sc->dev,
2532 			    "failed to query iSCSI parameters: %d.\n", rc);
2533 			return (rc);
2534 		}
2535 		sc->vres.iscsi.start = val[0];
2536 		sc->vres.iscsi.size = val[1] - val[0] + 1;
2537 	}
2538 
2539 	/*
2540 	 * We've got the params we wanted to query via the firmware.  Now grab
2541 	 * some others directly from the chip.
2542 	 */
2543 	rc = t4_read_chip_settings(sc);
2544 
2545 	return (rc);
2546 }
2547 
2548 static int
2549 set_params__post_init(struct adapter *sc)
2550 {
2551 	uint32_t param, val;
2552 
2553 	/* ask for encapsulated CPLs */
2554 	param = FW_PARAM_PFVF(CPLFW4MSG_ENCAP);
2555 	val = 1;
2556 	(void)t4_set_params(sc, sc->mbox, sc->pf, 0, 1, &param, &val);
2557 
2558 	return (0);
2559 }
2560 
2561 #undef FW_PARAM_PFVF
2562 #undef FW_PARAM_DEV
2563 
2564 static void
2565 t4_set_desc(struct adapter *sc)
2566 {
2567 	char buf[128];
2568 	struct adapter_params *p = &sc->params;
2569 
2570 	snprintf(buf, sizeof(buf), "Chelsio %s %sNIC (rev %d), S/N:%s, "
2571 	    "P/N:%s, E/C:%s", p->vpd.id, is_offload(sc) ? "R" : "",
2572 	    chip_rev(sc), p->vpd.sn, p->vpd.pn, p->vpd.ec);
2573 
2574 	device_set_desc_copy(sc->dev, buf);
2575 }
2576 
2577 static void
2578 build_medialist(struct port_info *pi)
2579 {
2580 	struct ifmedia *media = &pi->media;
2581 	int data, m;
2582 
2583 	PORT_LOCK(pi);
2584 
2585 	ifmedia_removeall(media);
2586 
2587 	m = IFM_ETHER | IFM_FDX;
2588 	data = (pi->port_type << 8) | pi->mod_type;
2589 
2590 	switch(pi->port_type) {
2591 	case FW_PORT_TYPE_BT_XFI:
2592 		ifmedia_add(media, m | IFM_10G_T, data, NULL);
2593 		break;
2594 
2595 	case FW_PORT_TYPE_BT_XAUI:
2596 		ifmedia_add(media, m | IFM_10G_T, data, NULL);
2597 		/* fall through */
2598 
2599 	case FW_PORT_TYPE_BT_SGMII:
2600 		ifmedia_add(media, m | IFM_1000_T, data, NULL);
2601 		ifmedia_add(media, m | IFM_100_TX, data, NULL);
2602 		ifmedia_add(media, IFM_ETHER | IFM_AUTO, data, NULL);
2603 		ifmedia_set(media, IFM_ETHER | IFM_AUTO);
2604 		break;
2605 
2606 	case FW_PORT_TYPE_CX4:
2607 		ifmedia_add(media, m | IFM_10G_CX4, data, NULL);
2608 		ifmedia_set(media, m | IFM_10G_CX4);
2609 		break;
2610 
2611 	case FW_PORT_TYPE_SFP:
2612 	case FW_PORT_TYPE_FIBER_XFI:
2613 	case FW_PORT_TYPE_FIBER_XAUI:
2614 		switch (pi->mod_type) {
2615 
2616 		case FW_PORT_MOD_TYPE_LR:
2617 			ifmedia_add(media, m | IFM_10G_LR, data, NULL);
2618 			ifmedia_set(media, m | IFM_10G_LR);
2619 			break;
2620 
2621 		case FW_PORT_MOD_TYPE_SR:
2622 			ifmedia_add(media, m | IFM_10G_SR, data, NULL);
2623 			ifmedia_set(media, m | IFM_10G_SR);
2624 			break;
2625 
2626 		case FW_PORT_MOD_TYPE_LRM:
2627 			ifmedia_add(media, m | IFM_10G_LRM, data, NULL);
2628 			ifmedia_set(media, m | IFM_10G_LRM);
2629 			break;
2630 
2631 		case FW_PORT_MOD_TYPE_TWINAX_PASSIVE:
2632 		case FW_PORT_MOD_TYPE_TWINAX_ACTIVE:
2633 			ifmedia_add(media, m | IFM_10G_TWINAX, data, NULL);
2634 			ifmedia_set(media, m | IFM_10G_TWINAX);
2635 			break;
2636 
2637 		case FW_PORT_MOD_TYPE_NONE:
2638 			m &= ~IFM_FDX;
2639 			ifmedia_add(media, m | IFM_NONE, data, NULL);
2640 			ifmedia_set(media, m | IFM_NONE);
2641 			break;
2642 
2643 		case FW_PORT_MOD_TYPE_NA:
2644 		case FW_PORT_MOD_TYPE_ER:
2645 		default:
2646 			device_printf(pi->dev,
2647 			    "unknown port_type (%d), mod_type (%d)\n",
2648 			    pi->port_type, pi->mod_type);
2649 			ifmedia_add(media, m | IFM_UNKNOWN, data, NULL);
2650 			ifmedia_set(media, m | IFM_UNKNOWN);
2651 			break;
2652 		}
2653 		break;
2654 
2655 	case FW_PORT_TYPE_QSFP:
2656 		switch (pi->mod_type) {
2657 
2658 		case FW_PORT_MOD_TYPE_LR:
2659 			ifmedia_add(media, m | IFM_40G_LR4, data, NULL);
2660 			ifmedia_set(media, m | IFM_40G_LR4);
2661 			break;
2662 
2663 		case FW_PORT_MOD_TYPE_SR:
2664 			ifmedia_add(media, m | IFM_40G_SR4, data, NULL);
2665 			ifmedia_set(media, m | IFM_40G_SR4);
2666 			break;
2667 
2668 		case FW_PORT_MOD_TYPE_TWINAX_PASSIVE:
2669 		case FW_PORT_MOD_TYPE_TWINAX_ACTIVE:
2670 			ifmedia_add(media, m | IFM_40G_CR4, data, NULL);
2671 			ifmedia_set(media, m | IFM_40G_CR4);
2672 			break;
2673 
2674 		case FW_PORT_MOD_TYPE_NONE:
2675 			m &= ~IFM_FDX;
2676 			ifmedia_add(media, m | IFM_NONE, data, NULL);
2677 			ifmedia_set(media, m | IFM_NONE);
2678 			break;
2679 
2680 		default:
2681 			device_printf(pi->dev,
2682 			    "unknown port_type (%d), mod_type (%d)\n",
2683 			    pi->port_type, pi->mod_type);
2684 			ifmedia_add(media, m | IFM_UNKNOWN, data, NULL);
2685 			ifmedia_set(media, m | IFM_UNKNOWN);
2686 			break;
2687 		}
2688 		break;
2689 
2690 	default:
2691 		device_printf(pi->dev,
2692 		    "unknown port_type (%d), mod_type (%d)\n", pi->port_type,
2693 		    pi->mod_type);
2694 		ifmedia_add(media, m | IFM_UNKNOWN, data, NULL);
2695 		ifmedia_set(media, m | IFM_UNKNOWN);
2696 		break;
2697 	}
2698 
2699 	PORT_UNLOCK(pi);
2700 }
2701 
2702 #define FW_MAC_EXACT_CHUNK	7
2703 
2704 /*
2705  * Program the port's XGMAC based on parameters in ifnet.  The caller also
2706  * indicates which parameters should be programmed (the rest are left alone).
2707  */
2708 static int
2709 update_mac_settings(struct port_info *pi, int flags)
2710 {
2711 	int rc;
2712 	struct ifnet *ifp = pi->ifp;
2713 	struct adapter *sc = pi->adapter;
2714 	int mtu = -1, promisc = -1, allmulti = -1, vlanex = -1;
2715 
2716 	ASSERT_SYNCHRONIZED_OP(sc);
2717 	KASSERT(flags, ("%s: not told what to update.", __func__));
2718 
2719 	if (flags & XGMAC_MTU)
2720 		mtu = ifp->if_mtu;
2721 
2722 	if (flags & XGMAC_PROMISC)
2723 		promisc = ifp->if_flags & IFF_PROMISC ? 1 : 0;
2724 
2725 	if (flags & XGMAC_ALLMULTI)
2726 		allmulti = ifp->if_flags & IFF_ALLMULTI ? 1 : 0;
2727 
2728 	if (flags & XGMAC_VLANEX)
2729 		vlanex = ifp->if_capenable & IFCAP_VLAN_HWTAGGING ? 1 : 0;
2730 
2731 	rc = -t4_set_rxmode(sc, sc->mbox, pi->viid, mtu, promisc, allmulti, 1,
2732 	    vlanex, false);
2733 	if (rc) {
2734 		if_printf(ifp, "set_rxmode (%x) failed: %d\n", flags, rc);
2735 		return (rc);
2736 	}
2737 
2738 	if (flags & XGMAC_UCADDR) {
2739 		uint8_t ucaddr[ETHER_ADDR_LEN];
2740 
2741 		bcopy(IF_LLADDR(ifp), ucaddr, sizeof(ucaddr));
2742 		rc = t4_change_mac(sc, sc->mbox, pi->viid, pi->xact_addr_filt,
2743 		    ucaddr, true, true);
2744 		if (rc < 0) {
2745 			rc = -rc;
2746 			if_printf(ifp, "change_mac failed: %d\n", rc);
2747 			return (rc);
2748 		} else {
2749 			pi->xact_addr_filt = rc;
2750 			rc = 0;
2751 		}
2752 	}
2753 
2754 	if (flags & XGMAC_MCADDRS) {
2755 		const uint8_t *mcaddr[FW_MAC_EXACT_CHUNK];
2756 		int del = 1;
2757 		uint64_t hash = 0;
2758 		struct ifmultiaddr *ifma;
2759 		int i = 0, j;
2760 
2761 		if_maddr_rlock(ifp);
2762 		TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
2763 			if (ifma->ifma_addr->sa_family != AF_LINK)
2764 				continue;
2765 			mcaddr[i++] =
2766 			    LLADDR((struct sockaddr_dl *)ifma->ifma_addr);
2767 
2768 			if (i == FW_MAC_EXACT_CHUNK) {
2769 				rc = t4_alloc_mac_filt(sc, sc->mbox, pi->viid,
2770 				    del, i, mcaddr, NULL, &hash, 0);
2771 				if (rc < 0) {
2772 					rc = -rc;
2773 					for (j = 0; j < i; j++) {
2774 						if_printf(ifp,
2775 						    "failed to add mc address"
2776 						    " %02x:%02x:%02x:"
2777 						    "%02x:%02x:%02x rc=%d\n",
2778 						    mcaddr[j][0], mcaddr[j][1],
2779 						    mcaddr[j][2], mcaddr[j][3],
2780 						    mcaddr[j][4], mcaddr[j][5],
2781 						    rc);
2782 					}
2783 					goto mcfail;
2784 				}
2785 				del = 0;
2786 				i = 0;
2787 			}
2788 		}
2789 		if (i > 0) {
2790 			rc = t4_alloc_mac_filt(sc, sc->mbox, pi->viid,
2791 			    del, i, mcaddr, NULL, &hash, 0);
2792 			if (rc < 0) {
2793 				rc = -rc;
2794 				for (j = 0; j < i; j++) {
2795 					if_printf(ifp,
2796 					    "failed to add mc address"
2797 					    " %02x:%02x:%02x:"
2798 					    "%02x:%02x:%02x rc=%d\n",
2799 					    mcaddr[j][0], mcaddr[j][1],
2800 					    mcaddr[j][2], mcaddr[j][3],
2801 					    mcaddr[j][4], mcaddr[j][5],
2802 					    rc);
2803 				}
2804 				goto mcfail;
2805 			}
2806 		}
2807 
2808 		rc = -t4_set_addr_hash(sc, sc->mbox, pi->viid, 0, hash, 0);
2809 		if (rc != 0)
2810 			if_printf(ifp, "failed to set mc address hash: %d", rc);
2811 mcfail:
2812 		if_maddr_runlock(ifp);
2813 	}
2814 
2815 	return (rc);
2816 }
2817 
2818 int
2819 begin_synchronized_op(struct adapter *sc, struct port_info *pi, int flags,
2820     char *wmesg)
2821 {
2822 	int rc, pri;
2823 
2824 #ifdef WITNESS
2825 	/* the caller thinks it's ok to sleep, but is it really? */
2826 	if (flags & SLEEP_OK)
2827 		pause("t4slptst", 1);
2828 #endif
2829 
2830 	if (INTR_OK)
2831 		pri = PCATCH;
2832 	else
2833 		pri = 0;
2834 
2835 	ADAPTER_LOCK(sc);
2836 	for (;;) {
2837 
2838 		if (pi && IS_DOOMED(pi)) {
2839 			rc = ENXIO;
2840 			goto done;
2841 		}
2842 
2843 		if (!IS_BUSY(sc)) {
2844 			rc = 0;
2845 			break;
2846 		}
2847 
2848 		if (!(flags & SLEEP_OK)) {
2849 			rc = EBUSY;
2850 			goto done;
2851 		}
2852 
2853 		if (mtx_sleep(&sc->flags, &sc->sc_lock, pri, wmesg, 0)) {
2854 			rc = EINTR;
2855 			goto done;
2856 		}
2857 	}
2858 
2859 	KASSERT(!IS_BUSY(sc), ("%s: controller busy.", __func__));
2860 	SET_BUSY(sc);
2861 #ifdef INVARIANTS
2862 	sc->last_op = wmesg;
2863 	sc->last_op_thr = curthread;
2864 #endif
2865 
2866 done:
2867 	if (!(flags & HOLD_LOCK) || rc)
2868 		ADAPTER_UNLOCK(sc);
2869 
2870 	return (rc);
2871 }
2872 
2873 void
2874 end_synchronized_op(struct adapter *sc, int flags)
2875 {
2876 
2877 	if (flags & LOCK_HELD)
2878 		ADAPTER_LOCK_ASSERT_OWNED(sc);
2879 	else
2880 		ADAPTER_LOCK(sc);
2881 
2882 	KASSERT(IS_BUSY(sc), ("%s: controller not busy.", __func__));
2883 	CLR_BUSY(sc);
2884 	wakeup(&sc->flags);
2885 	ADAPTER_UNLOCK(sc);
2886 }
2887 
2888 static int
2889 cxgbe_init_synchronized(struct port_info *pi)
2890 {
2891 	struct adapter *sc = pi->adapter;
2892 	struct ifnet *ifp = pi->ifp;
2893 	int rc = 0;
2894 
2895 	ASSERT_SYNCHRONIZED_OP(sc);
2896 
2897 	if (isset(&sc->open_device_map, pi->port_id)) {
2898 		KASSERT(ifp->if_drv_flags & IFF_DRV_RUNNING,
2899 		    ("mismatch between open_device_map and if_drv_flags"));
2900 		return (0);	/* already running */
2901 	}
2902 
2903 	if (!(sc->flags & FULL_INIT_DONE) &&
2904 	    ((rc = adapter_full_init(sc)) != 0))
2905 		return (rc);	/* error message displayed already */
2906 
2907 	if (!(pi->flags & PORT_INIT_DONE) &&
2908 	    ((rc = port_full_init(pi)) != 0))
2909 		return (rc); /* error message displayed already */
2910 
2911 	rc = update_mac_settings(pi, XGMAC_ALL);
2912 	if (rc)
2913 		goto done;	/* error message displayed already */
2914 
2915 	rc = -t4_link_start(sc, sc->mbox, pi->tx_chan, &pi->link_cfg);
2916 	if (rc != 0) {
2917 		if_printf(ifp, "start_link failed: %d\n", rc);
2918 		goto done;
2919 	}
2920 
2921 	rc = -t4_enable_vi(sc, sc->mbox, pi->viid, true, true);
2922 	if (rc != 0) {
2923 		if_printf(ifp, "enable_vi failed: %d\n", rc);
2924 		goto done;
2925 	}
2926 
2927 	/*
2928 	 * The first iq of the first port to come up is used for tracing.
2929 	 */
2930 	if (sc->traceq < 0) {
2931 		sc->traceq = sc->sge.rxq[pi->first_rxq].iq.abs_id;
2932 		t4_write_reg(sc, is_t4(sc) ?  A_MPS_TRC_RSS_CONTROL :
2933 		    A_MPS_T5_TRC_RSS_CONTROL, V_RSSCONTROL(pi->tx_chan) |
2934 		    V_QUEUENUMBER(sc->traceq));
2935 		pi->flags |= HAS_TRACEQ;
2936 	}
2937 
2938 	/* all ok */
2939 	setbit(&sc->open_device_map, pi->port_id);
2940 	PORT_LOCK(pi);
2941 	ifp->if_drv_flags |= IFF_DRV_RUNNING;
2942 	PORT_UNLOCK(pi);
2943 
2944 	callout_reset(&pi->tick, hz, cxgbe_tick, pi);
2945 done:
2946 	if (rc != 0)
2947 		cxgbe_uninit_synchronized(pi);
2948 
2949 	return (rc);
2950 }
2951 
2952 /*
2953  * Idempotent.
2954  */
2955 static int
2956 cxgbe_uninit_synchronized(struct port_info *pi)
2957 {
2958 	struct adapter *sc = pi->adapter;
2959 	struct ifnet *ifp = pi->ifp;
2960 	int rc;
2961 
2962 	ASSERT_SYNCHRONIZED_OP(sc);
2963 
2964 	/*
2965 	 * Disable the VI so that all its data in either direction is discarded
2966 	 * by the MPS.  Leave everything else (the queues, interrupts, and 1Hz
2967 	 * tick) intact as the TP can deliver negative advice or data that it's
2968 	 * holding in its RAM (for an offloaded connection) even after the VI is
2969 	 * disabled.
2970 	 */
2971 	rc = -t4_enable_vi(sc, sc->mbox, pi->viid, false, false);
2972 	if (rc) {
2973 		if_printf(ifp, "disable_vi failed: %d\n", rc);
2974 		return (rc);
2975 	}
2976 
2977 	clrbit(&sc->open_device_map, pi->port_id);
2978 	PORT_LOCK(pi);
2979 	ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
2980 	PORT_UNLOCK(pi);
2981 
2982 	pi->link_cfg.link_ok = 0;
2983 	pi->link_cfg.speed = 0;
2984 	pi->linkdnrc = -1;
2985 	t4_os_link_changed(sc, pi->port_id, 0, -1);
2986 
2987 	return (0);
2988 }
2989 
2990 /*
2991  * It is ok for this function to fail midway and return right away.  t4_detach
2992  * will walk the entire sc->irq list and clean up whatever is valid.
2993  */
2994 static int
2995 setup_intr_handlers(struct adapter *sc)
2996 {
2997 	int rc, rid, p, q;
2998 	char s[8];
2999 	struct irq *irq;
3000 	struct port_info *pi;
3001 	struct sge_rxq *rxq;
3002 #ifdef TCP_OFFLOAD
3003 	struct sge_ofld_rxq *ofld_rxq;
3004 #endif
3005 
3006 	/*
3007 	 * Setup interrupts.
3008 	 */
3009 	irq = &sc->irq[0];
3010 	rid = sc->intr_type == INTR_INTX ? 0 : 1;
3011 	if (sc->intr_count == 1) {
3012 		KASSERT(!(sc->flags & INTR_DIRECT),
3013 		    ("%s: single interrupt && INTR_DIRECT?", __func__));
3014 
3015 		rc = t4_alloc_irq(sc, irq, rid, t4_intr_all, sc, "all");
3016 		if (rc != 0)
3017 			return (rc);
3018 	} else {
3019 		/* Multiple interrupts. */
3020 		KASSERT(sc->intr_count >= T4_EXTRA_INTR + sc->params.nports,
3021 		    ("%s: too few intr.", __func__));
3022 
3023 		/* The first one is always error intr */
3024 		rc = t4_alloc_irq(sc, irq, rid, t4_intr_err, sc, "err");
3025 		if (rc != 0)
3026 			return (rc);
3027 		irq++;
3028 		rid++;
3029 
3030 		/* The second one is always the firmware event queue */
3031 		rc = t4_alloc_irq(sc, irq, rid, t4_intr_evt, &sc->sge.fwq,
3032 		    "evt");
3033 		if (rc != 0)
3034 			return (rc);
3035 		irq++;
3036 		rid++;
3037 
3038 		/*
3039 		 * Note that if INTR_DIRECT is not set then either the NIC rx
3040 		 * queues or (exclusive or) the TOE rx queueus will be taking
3041 		 * direct interrupts.
3042 		 *
3043 		 * There is no need to check for is_offload(sc) as nofldrxq
3044 		 * will be 0 if offload is disabled.
3045 		 */
3046 		for_each_port(sc, p) {
3047 			pi = sc->port[p];
3048 
3049 #ifdef TCP_OFFLOAD
3050 			/*
3051 			 * Skip over the NIC queues if they aren't taking direct
3052 			 * interrupts.
3053 			 */
3054 			if (!(sc->flags & INTR_DIRECT) &&
3055 			    pi->nofldrxq > pi->nrxq)
3056 				goto ofld_queues;
3057 #endif
3058 			rxq = &sc->sge.rxq[pi->first_rxq];
3059 			for (q = 0; q < pi->nrxq; q++, rxq++) {
3060 				snprintf(s, sizeof(s), "%d.%d", p, q);
3061 				rc = t4_alloc_irq(sc, irq, rid, t4_intr, rxq,
3062 				    s);
3063 				if (rc != 0)
3064 					return (rc);
3065 				irq++;
3066 				rid++;
3067 			}
3068 
3069 #ifdef TCP_OFFLOAD
3070 			/*
3071 			 * Skip over the offload queues if they aren't taking
3072 			 * direct interrupts.
3073 			 */
3074 			if (!(sc->flags & INTR_DIRECT))
3075 				continue;
3076 ofld_queues:
3077 			ofld_rxq = &sc->sge.ofld_rxq[pi->first_ofld_rxq];
3078 			for (q = 0; q < pi->nofldrxq; q++, ofld_rxq++) {
3079 				snprintf(s, sizeof(s), "%d,%d", p, q);
3080 				rc = t4_alloc_irq(sc, irq, rid, t4_intr,
3081 				    ofld_rxq, s);
3082 				if (rc != 0)
3083 					return (rc);
3084 				irq++;
3085 				rid++;
3086 			}
3087 #endif
3088 		}
3089 	}
3090 
3091 	return (0);
3092 }
3093 
3094 static int
3095 adapter_full_init(struct adapter *sc)
3096 {
3097 	int rc, i;
3098 
3099 	ADAPTER_LOCK_ASSERT_NOTOWNED(sc);
3100 	KASSERT((sc->flags & FULL_INIT_DONE) == 0,
3101 	    ("%s: FULL_INIT_DONE already", __func__));
3102 
3103 	/*
3104 	 * queues that belong to the adapter (not any particular port).
3105 	 */
3106 	rc = t4_setup_adapter_queues(sc);
3107 	if (rc != 0)
3108 		goto done;
3109 
3110 	for (i = 0; i < nitems(sc->tq); i++) {
3111 		sc->tq[i] = taskqueue_create("t4 taskq", M_NOWAIT,
3112 		    taskqueue_thread_enqueue, &sc->tq[i]);
3113 		if (sc->tq[i] == NULL) {
3114 			device_printf(sc->dev,
3115 			    "failed to allocate task queue %d\n", i);
3116 			rc = ENOMEM;
3117 			goto done;
3118 		}
3119 		taskqueue_start_threads(&sc->tq[i], 1, PI_NET, "%s tq%d",
3120 		    device_get_nameunit(sc->dev), i);
3121 	}
3122 
3123 	t4_intr_enable(sc);
3124 	sc->flags |= FULL_INIT_DONE;
3125 done:
3126 	if (rc != 0)
3127 		adapter_full_uninit(sc);
3128 
3129 	return (rc);
3130 }
3131 
3132 static int
3133 adapter_full_uninit(struct adapter *sc)
3134 {
3135 	int i;
3136 
3137 	ADAPTER_LOCK_ASSERT_NOTOWNED(sc);
3138 
3139 	t4_teardown_adapter_queues(sc);
3140 
3141 	for (i = 0; i < nitems(sc->tq) && sc->tq[i]; i++) {
3142 		taskqueue_free(sc->tq[i]);
3143 		sc->tq[i] = NULL;
3144 	}
3145 
3146 	sc->flags &= ~FULL_INIT_DONE;
3147 
3148 	return (0);
3149 }
3150 
3151 static int
3152 port_full_init(struct port_info *pi)
3153 {
3154 	struct adapter *sc = pi->adapter;
3155 	struct ifnet *ifp = pi->ifp;
3156 	uint16_t *rss;
3157 	struct sge_rxq *rxq;
3158 	int rc, i;
3159 
3160 	ASSERT_SYNCHRONIZED_OP(sc);
3161 	KASSERT((pi->flags & PORT_INIT_DONE) == 0,
3162 	    ("%s: PORT_INIT_DONE already", __func__));
3163 
3164 	sysctl_ctx_init(&pi->ctx);
3165 	pi->flags |= PORT_SYSCTL_CTX;
3166 
3167 	/*
3168 	 * Allocate tx/rx/fl queues for this port.
3169 	 */
3170 	rc = t4_setup_port_queues(pi);
3171 	if (rc != 0)
3172 		goto done;	/* error message displayed already */
3173 
3174 	/*
3175 	 * Setup RSS for this port.
3176 	 */
3177 	rss = malloc(pi->nrxq * sizeof (*rss), M_CXGBE,
3178 	    M_ZERO | M_WAITOK);
3179 	for_each_rxq(pi, i, rxq) {
3180 		rss[i] = rxq->iq.abs_id;
3181 	}
3182 	rc = -t4_config_rss_range(sc, sc->mbox, pi->viid, 0,
3183 	    pi->rss_size, rss, pi->nrxq);
3184 	free(rss, M_CXGBE);
3185 	if (rc != 0) {
3186 		if_printf(ifp, "rss_config failed: %d\n", rc);
3187 		goto done;
3188 	}
3189 
3190 	pi->flags |= PORT_INIT_DONE;
3191 done:
3192 	if (rc != 0)
3193 		port_full_uninit(pi);
3194 
3195 	return (rc);
3196 }
3197 
3198 /*
3199  * Idempotent.
3200  */
3201 static int
3202 port_full_uninit(struct port_info *pi)
3203 {
3204 	struct adapter *sc = pi->adapter;
3205 	int i;
3206 	struct sge_rxq *rxq;
3207 	struct sge_txq *txq;
3208 #ifdef TCP_OFFLOAD
3209 	struct sge_ofld_rxq *ofld_rxq;
3210 	struct sge_wrq *ofld_txq;
3211 #endif
3212 
3213 	if (pi->flags & PORT_INIT_DONE) {
3214 
3215 		/* Need to quiesce queues.  XXX: ctrl queues? */
3216 
3217 		for_each_txq(pi, i, txq) {
3218 			quiesce_eq(sc, &txq->eq);
3219 		}
3220 
3221 #ifdef TCP_OFFLOAD
3222 		for_each_ofld_txq(pi, i, ofld_txq) {
3223 			quiesce_eq(sc, &ofld_txq->eq);
3224 		}
3225 #endif
3226 
3227 		for_each_rxq(pi, i, rxq) {
3228 			quiesce_iq(sc, &rxq->iq);
3229 			quiesce_fl(sc, &rxq->fl);
3230 		}
3231 
3232 #ifdef TCP_OFFLOAD
3233 		for_each_ofld_rxq(pi, i, ofld_rxq) {
3234 			quiesce_iq(sc, &ofld_rxq->iq);
3235 			quiesce_fl(sc, &ofld_rxq->fl);
3236 		}
3237 #endif
3238 	}
3239 
3240 	t4_teardown_port_queues(pi);
3241 	pi->flags &= ~PORT_INIT_DONE;
3242 
3243 	return (0);
3244 }
3245 
3246 static void
3247 quiesce_eq(struct adapter *sc, struct sge_eq *eq)
3248 {
3249 	EQ_LOCK(eq);
3250 	eq->flags |= EQ_DOOMED;
3251 
3252 	/*
3253 	 * Wait for the response to a credit flush if one's
3254 	 * pending.
3255 	 */
3256 	while (eq->flags & EQ_CRFLUSHED)
3257 		mtx_sleep(eq, &eq->eq_lock, 0, "crflush", 0);
3258 	EQ_UNLOCK(eq);
3259 
3260 	callout_drain(&eq->tx_callout);	/* XXX: iffy */
3261 	pause("callout", 10);		/* Still iffy */
3262 
3263 	taskqueue_drain(sc->tq[eq->tx_chan], &eq->tx_task);
3264 }
3265 
3266 static void
3267 quiesce_iq(struct adapter *sc, struct sge_iq *iq)
3268 {
3269 	(void) sc;	/* unused */
3270 
3271 	/* Synchronize with the interrupt handler */
3272 	while (!atomic_cmpset_int(&iq->state, IQS_IDLE, IQS_DISABLED))
3273 		pause("iqfree", 1);
3274 }
3275 
3276 static void
3277 quiesce_fl(struct adapter *sc, struct sge_fl *fl)
3278 {
3279 	mtx_lock(&sc->sfl_lock);
3280 	FL_LOCK(fl);
3281 	fl->flags |= FL_DOOMED;
3282 	FL_UNLOCK(fl);
3283 	mtx_unlock(&sc->sfl_lock);
3284 
3285 	callout_drain(&sc->sfl_callout);
3286 	KASSERT((fl->flags & FL_STARVING) == 0,
3287 	    ("%s: still starving", __func__));
3288 }
3289 
3290 static int
3291 t4_alloc_irq(struct adapter *sc, struct irq *irq, int rid,
3292     driver_intr_t *handler, void *arg, char *name)
3293 {
3294 	int rc;
3295 
3296 	irq->rid = rid;
3297 	irq->res = bus_alloc_resource_any(sc->dev, SYS_RES_IRQ, &irq->rid,
3298 	    RF_SHAREABLE | RF_ACTIVE);
3299 	if (irq->res == NULL) {
3300 		device_printf(sc->dev,
3301 		    "failed to allocate IRQ for rid %d, name %s.\n", rid, name);
3302 		return (ENOMEM);
3303 	}
3304 
3305 	rc = bus_setup_intr(sc->dev, irq->res, INTR_MPSAFE | INTR_TYPE_NET,
3306 	    NULL, handler, arg, &irq->tag);
3307 	if (rc != 0) {
3308 		device_printf(sc->dev,
3309 		    "failed to setup interrupt for rid %d, name %s: %d\n",
3310 		    rid, name, rc);
3311 	} else if (name)
3312 		bus_describe_intr(sc->dev, irq->res, irq->tag, name);
3313 
3314 	return (rc);
3315 }
3316 
3317 static int
3318 t4_free_irq(struct adapter *sc, struct irq *irq)
3319 {
3320 	if (irq->tag)
3321 		bus_teardown_intr(sc->dev, irq->res, irq->tag);
3322 	if (irq->res)
3323 		bus_release_resource(sc->dev, SYS_RES_IRQ, irq->rid, irq->res);
3324 
3325 	bzero(irq, sizeof(*irq));
3326 
3327 	return (0);
3328 }
3329 
3330 static void
3331 reg_block_dump(struct adapter *sc, uint8_t *buf, unsigned int start,
3332     unsigned int end)
3333 {
3334 	uint32_t *p = (uint32_t *)(buf + start);
3335 
3336 	for ( ; start <= end; start += sizeof(uint32_t))
3337 		*p++ = t4_read_reg(sc, start);
3338 }
3339 
3340 static void
3341 t4_get_regs(struct adapter *sc, struct t4_regdump *regs, uint8_t *buf)
3342 {
3343 	int i, n;
3344 	const unsigned int *reg_ranges;
3345 	static const unsigned int t4_reg_ranges[] = {
3346 		0x1008, 0x1108,
3347 		0x1180, 0x11b4,
3348 		0x11fc, 0x123c,
3349 		0x1300, 0x173c,
3350 		0x1800, 0x18fc,
3351 		0x3000, 0x30d8,
3352 		0x30e0, 0x5924,
3353 		0x5960, 0x59d4,
3354 		0x5a00, 0x5af8,
3355 		0x6000, 0x6098,
3356 		0x6100, 0x6150,
3357 		0x6200, 0x6208,
3358 		0x6240, 0x6248,
3359 		0x6280, 0x6338,
3360 		0x6370, 0x638c,
3361 		0x6400, 0x643c,
3362 		0x6500, 0x6524,
3363 		0x6a00, 0x6a38,
3364 		0x6a60, 0x6a78,
3365 		0x6b00, 0x6b84,
3366 		0x6bf0, 0x6c84,
3367 		0x6cf0, 0x6d84,
3368 		0x6df0, 0x6e84,
3369 		0x6ef0, 0x6f84,
3370 		0x6ff0, 0x7084,
3371 		0x70f0, 0x7184,
3372 		0x71f0, 0x7284,
3373 		0x72f0, 0x7384,
3374 		0x73f0, 0x7450,
3375 		0x7500, 0x7530,
3376 		0x7600, 0x761c,
3377 		0x7680, 0x76cc,
3378 		0x7700, 0x7798,
3379 		0x77c0, 0x77fc,
3380 		0x7900, 0x79fc,
3381 		0x7b00, 0x7c38,
3382 		0x7d00, 0x7efc,
3383 		0x8dc0, 0x8e1c,
3384 		0x8e30, 0x8e78,
3385 		0x8ea0, 0x8f6c,
3386 		0x8fc0, 0x9074,
3387 		0x90fc, 0x90fc,
3388 		0x9400, 0x9458,
3389 		0x9600, 0x96bc,
3390 		0x9800, 0x9808,
3391 		0x9820, 0x983c,
3392 		0x9850, 0x9864,
3393 		0x9c00, 0x9c6c,
3394 		0x9c80, 0x9cec,
3395 		0x9d00, 0x9d6c,
3396 		0x9d80, 0x9dec,
3397 		0x9e00, 0x9e6c,
3398 		0x9e80, 0x9eec,
3399 		0x9f00, 0x9f6c,
3400 		0x9f80, 0x9fec,
3401 		0xd004, 0xd03c,
3402 		0xdfc0, 0xdfe0,
3403 		0xe000, 0xea7c,
3404 		0xf000, 0x11110,
3405 		0x11118, 0x11190,
3406 		0x19040, 0x1906c,
3407 		0x19078, 0x19080,
3408 		0x1908c, 0x19124,
3409 		0x19150, 0x191b0,
3410 		0x191d0, 0x191e8,
3411 		0x19238, 0x1924c,
3412 		0x193f8, 0x19474,
3413 		0x19490, 0x194f8,
3414 		0x19800, 0x19f30,
3415 		0x1a000, 0x1a06c,
3416 		0x1a0b0, 0x1a120,
3417 		0x1a128, 0x1a138,
3418 		0x1a190, 0x1a1c4,
3419 		0x1a1fc, 0x1a1fc,
3420 		0x1e040, 0x1e04c,
3421 		0x1e284, 0x1e28c,
3422 		0x1e2c0, 0x1e2c0,
3423 		0x1e2e0, 0x1e2e0,
3424 		0x1e300, 0x1e384,
3425 		0x1e3c0, 0x1e3c8,
3426 		0x1e440, 0x1e44c,
3427 		0x1e684, 0x1e68c,
3428 		0x1e6c0, 0x1e6c0,
3429 		0x1e6e0, 0x1e6e0,
3430 		0x1e700, 0x1e784,
3431 		0x1e7c0, 0x1e7c8,
3432 		0x1e840, 0x1e84c,
3433 		0x1ea84, 0x1ea8c,
3434 		0x1eac0, 0x1eac0,
3435 		0x1eae0, 0x1eae0,
3436 		0x1eb00, 0x1eb84,
3437 		0x1ebc0, 0x1ebc8,
3438 		0x1ec40, 0x1ec4c,
3439 		0x1ee84, 0x1ee8c,
3440 		0x1eec0, 0x1eec0,
3441 		0x1eee0, 0x1eee0,
3442 		0x1ef00, 0x1ef84,
3443 		0x1efc0, 0x1efc8,
3444 		0x1f040, 0x1f04c,
3445 		0x1f284, 0x1f28c,
3446 		0x1f2c0, 0x1f2c0,
3447 		0x1f2e0, 0x1f2e0,
3448 		0x1f300, 0x1f384,
3449 		0x1f3c0, 0x1f3c8,
3450 		0x1f440, 0x1f44c,
3451 		0x1f684, 0x1f68c,
3452 		0x1f6c0, 0x1f6c0,
3453 		0x1f6e0, 0x1f6e0,
3454 		0x1f700, 0x1f784,
3455 		0x1f7c0, 0x1f7c8,
3456 		0x1f840, 0x1f84c,
3457 		0x1fa84, 0x1fa8c,
3458 		0x1fac0, 0x1fac0,
3459 		0x1fae0, 0x1fae0,
3460 		0x1fb00, 0x1fb84,
3461 		0x1fbc0, 0x1fbc8,
3462 		0x1fc40, 0x1fc4c,
3463 		0x1fe84, 0x1fe8c,
3464 		0x1fec0, 0x1fec0,
3465 		0x1fee0, 0x1fee0,
3466 		0x1ff00, 0x1ff84,
3467 		0x1ffc0, 0x1ffc8,
3468 		0x20000, 0x2002c,
3469 		0x20100, 0x2013c,
3470 		0x20190, 0x201c8,
3471 		0x20200, 0x20318,
3472 		0x20400, 0x20528,
3473 		0x20540, 0x20614,
3474 		0x21000, 0x21040,
3475 		0x2104c, 0x21060,
3476 		0x210c0, 0x210ec,
3477 		0x21200, 0x21268,
3478 		0x21270, 0x21284,
3479 		0x212fc, 0x21388,
3480 		0x21400, 0x21404,
3481 		0x21500, 0x21518,
3482 		0x2152c, 0x2153c,
3483 		0x21550, 0x21554,
3484 		0x21600, 0x21600,
3485 		0x21608, 0x21628,
3486 		0x21630, 0x2163c,
3487 		0x21700, 0x2171c,
3488 		0x21780, 0x2178c,
3489 		0x21800, 0x21c38,
3490 		0x21c80, 0x21d7c,
3491 		0x21e00, 0x21e04,
3492 		0x22000, 0x2202c,
3493 		0x22100, 0x2213c,
3494 		0x22190, 0x221c8,
3495 		0x22200, 0x22318,
3496 		0x22400, 0x22528,
3497 		0x22540, 0x22614,
3498 		0x23000, 0x23040,
3499 		0x2304c, 0x23060,
3500 		0x230c0, 0x230ec,
3501 		0x23200, 0x23268,
3502 		0x23270, 0x23284,
3503 		0x232fc, 0x23388,
3504 		0x23400, 0x23404,
3505 		0x23500, 0x23518,
3506 		0x2352c, 0x2353c,
3507 		0x23550, 0x23554,
3508 		0x23600, 0x23600,
3509 		0x23608, 0x23628,
3510 		0x23630, 0x2363c,
3511 		0x23700, 0x2371c,
3512 		0x23780, 0x2378c,
3513 		0x23800, 0x23c38,
3514 		0x23c80, 0x23d7c,
3515 		0x23e00, 0x23e04,
3516 		0x24000, 0x2402c,
3517 		0x24100, 0x2413c,
3518 		0x24190, 0x241c8,
3519 		0x24200, 0x24318,
3520 		0x24400, 0x24528,
3521 		0x24540, 0x24614,
3522 		0x25000, 0x25040,
3523 		0x2504c, 0x25060,
3524 		0x250c0, 0x250ec,
3525 		0x25200, 0x25268,
3526 		0x25270, 0x25284,
3527 		0x252fc, 0x25388,
3528 		0x25400, 0x25404,
3529 		0x25500, 0x25518,
3530 		0x2552c, 0x2553c,
3531 		0x25550, 0x25554,
3532 		0x25600, 0x25600,
3533 		0x25608, 0x25628,
3534 		0x25630, 0x2563c,
3535 		0x25700, 0x2571c,
3536 		0x25780, 0x2578c,
3537 		0x25800, 0x25c38,
3538 		0x25c80, 0x25d7c,
3539 		0x25e00, 0x25e04,
3540 		0x26000, 0x2602c,
3541 		0x26100, 0x2613c,
3542 		0x26190, 0x261c8,
3543 		0x26200, 0x26318,
3544 		0x26400, 0x26528,
3545 		0x26540, 0x26614,
3546 		0x27000, 0x27040,
3547 		0x2704c, 0x27060,
3548 		0x270c0, 0x270ec,
3549 		0x27200, 0x27268,
3550 		0x27270, 0x27284,
3551 		0x272fc, 0x27388,
3552 		0x27400, 0x27404,
3553 		0x27500, 0x27518,
3554 		0x2752c, 0x2753c,
3555 		0x27550, 0x27554,
3556 		0x27600, 0x27600,
3557 		0x27608, 0x27628,
3558 		0x27630, 0x2763c,
3559 		0x27700, 0x2771c,
3560 		0x27780, 0x2778c,
3561 		0x27800, 0x27c38,
3562 		0x27c80, 0x27d7c,
3563 		0x27e00, 0x27e04
3564 	};
3565 	static const unsigned int t5_reg_ranges[] = {
3566 		0x1008, 0x1148,
3567 		0x1180, 0x11b4,
3568 		0x11fc, 0x123c,
3569 		0x1280, 0x173c,
3570 		0x1800, 0x18fc,
3571 		0x3000, 0x3028,
3572 		0x3060, 0x30d8,
3573 		0x30e0, 0x30fc,
3574 		0x3140, 0x357c,
3575 		0x35a8, 0x35cc,
3576 		0x35ec, 0x35ec,
3577 		0x3600, 0x5624,
3578 		0x56cc, 0x575c,
3579 		0x580c, 0x5814,
3580 		0x5890, 0x58bc,
3581 		0x5940, 0x59dc,
3582 		0x59fc, 0x5a18,
3583 		0x5a60, 0x5a9c,
3584 		0x5b94, 0x5bfc,
3585 		0x6000, 0x6040,
3586 		0x6058, 0x614c,
3587 		0x7700, 0x7798,
3588 		0x77c0, 0x78fc,
3589 		0x7b00, 0x7c54,
3590 		0x7d00, 0x7efc,
3591 		0x8dc0, 0x8de0,
3592 		0x8df8, 0x8e84,
3593 		0x8ea0, 0x8f84,
3594 		0x8fc0, 0x90f8,
3595 		0x9400, 0x9470,
3596 		0x9600, 0x96f4,
3597 		0x9800, 0x9808,
3598 		0x9820, 0x983c,
3599 		0x9850, 0x9864,
3600 		0x9c00, 0x9c6c,
3601 		0x9c80, 0x9cec,
3602 		0x9d00, 0x9d6c,
3603 		0x9d80, 0x9dec,
3604 		0x9e00, 0x9e6c,
3605 		0x9e80, 0x9eec,
3606 		0x9f00, 0x9f6c,
3607 		0x9f80, 0xa020,
3608 		0xd004, 0xd03c,
3609 		0xdfc0, 0xdfe0,
3610 		0xe000, 0x11088,
3611 		0x1109c, 0x11110,
3612 		0x11118, 0x1117c,
3613 		0x11190, 0x11204,
3614 		0x19040, 0x1906c,
3615 		0x19078, 0x19080,
3616 		0x1908c, 0x19124,
3617 		0x19150, 0x191b0,
3618 		0x191d0, 0x191e8,
3619 		0x19238, 0x19290,
3620 		0x193f8, 0x19474,
3621 		0x19490, 0x194cc,
3622 		0x194f0, 0x194f8,
3623 		0x19c00, 0x19c60,
3624 		0x19c94, 0x19e10,
3625 		0x19e50, 0x19f34,
3626 		0x19f40, 0x19f50,
3627 		0x19f90, 0x19fe4,
3628 		0x1a000, 0x1a06c,
3629 		0x1a0b0, 0x1a120,
3630 		0x1a128, 0x1a138,
3631 		0x1a190, 0x1a1c4,
3632 		0x1a1fc, 0x1a1fc,
3633 		0x1e008, 0x1e00c,
3634 		0x1e040, 0x1e04c,
3635 		0x1e284, 0x1e290,
3636 		0x1e2c0, 0x1e2c0,
3637 		0x1e2e0, 0x1e2e0,
3638 		0x1e300, 0x1e384,
3639 		0x1e3c0, 0x1e3c8,
3640 		0x1e408, 0x1e40c,
3641 		0x1e440, 0x1e44c,
3642 		0x1e684, 0x1e690,
3643 		0x1e6c0, 0x1e6c0,
3644 		0x1e6e0, 0x1e6e0,
3645 		0x1e700, 0x1e784,
3646 		0x1e7c0, 0x1e7c8,
3647 		0x1e808, 0x1e80c,
3648 		0x1e840, 0x1e84c,
3649 		0x1ea84, 0x1ea90,
3650 		0x1eac0, 0x1eac0,
3651 		0x1eae0, 0x1eae0,
3652 		0x1eb00, 0x1eb84,
3653 		0x1ebc0, 0x1ebc8,
3654 		0x1ec08, 0x1ec0c,
3655 		0x1ec40, 0x1ec4c,
3656 		0x1ee84, 0x1ee90,
3657 		0x1eec0, 0x1eec0,
3658 		0x1eee0, 0x1eee0,
3659 		0x1ef00, 0x1ef84,
3660 		0x1efc0, 0x1efc8,
3661 		0x1f008, 0x1f00c,
3662 		0x1f040, 0x1f04c,
3663 		0x1f284, 0x1f290,
3664 		0x1f2c0, 0x1f2c0,
3665 		0x1f2e0, 0x1f2e0,
3666 		0x1f300, 0x1f384,
3667 		0x1f3c0, 0x1f3c8,
3668 		0x1f408, 0x1f40c,
3669 		0x1f440, 0x1f44c,
3670 		0x1f684, 0x1f690,
3671 		0x1f6c0, 0x1f6c0,
3672 		0x1f6e0, 0x1f6e0,
3673 		0x1f700, 0x1f784,
3674 		0x1f7c0, 0x1f7c8,
3675 		0x1f808, 0x1f80c,
3676 		0x1f840, 0x1f84c,
3677 		0x1fa84, 0x1fa90,
3678 		0x1fac0, 0x1fac0,
3679 		0x1fae0, 0x1fae0,
3680 		0x1fb00, 0x1fb84,
3681 		0x1fbc0, 0x1fbc8,
3682 		0x1fc08, 0x1fc0c,
3683 		0x1fc40, 0x1fc4c,
3684 		0x1fe84, 0x1fe90,
3685 		0x1fec0, 0x1fec0,
3686 		0x1fee0, 0x1fee0,
3687 		0x1ff00, 0x1ff84,
3688 		0x1ffc0, 0x1ffc8,
3689 		0x30000, 0x30030,
3690 		0x30100, 0x30144,
3691 		0x30190, 0x301d0,
3692 		0x30200, 0x30318,
3693 		0x30400, 0x3052c,
3694 		0x30540, 0x3061c,
3695 		0x30800, 0x30834,
3696 		0x308c0, 0x30908,
3697 		0x30910, 0x309ac,
3698 		0x30a00, 0x30a2c,
3699 		0x30a44, 0x30a50,
3700 		0x30a74, 0x30c24,
3701 		0x30d00, 0x30d00,
3702 		0x30d08, 0x30d14,
3703 		0x30d1c, 0x30d20,
3704 		0x30d3c, 0x30d50,
3705 		0x31200, 0x3120c,
3706 		0x31220, 0x31220,
3707 		0x31240, 0x31240,
3708 		0x31600, 0x3160c,
3709 		0x31a00, 0x31a1c,
3710 		0x31e00, 0x31e20,
3711 		0x31e38, 0x31e3c,
3712 		0x31e80, 0x31e80,
3713 		0x31e88, 0x31ea8,
3714 		0x31eb0, 0x31eb4,
3715 		0x31ec8, 0x31ed4,
3716 		0x31fb8, 0x32004,
3717 		0x32200, 0x32200,
3718 		0x32208, 0x32240,
3719 		0x32248, 0x32280,
3720 		0x32288, 0x322c0,
3721 		0x322c8, 0x322fc,
3722 		0x32600, 0x32630,
3723 		0x32a00, 0x32abc,
3724 		0x32b00, 0x32b70,
3725 		0x33000, 0x33048,
3726 		0x33060, 0x3309c,
3727 		0x330f0, 0x33148,
3728 		0x33160, 0x3319c,
3729 		0x331f0, 0x332e4,
3730 		0x332f8, 0x333e4,
3731 		0x333f8, 0x33448,
3732 		0x33460, 0x3349c,
3733 		0x334f0, 0x33548,
3734 		0x33560, 0x3359c,
3735 		0x335f0, 0x336e4,
3736 		0x336f8, 0x337e4,
3737 		0x337f8, 0x337fc,
3738 		0x33814, 0x33814,
3739 		0x3382c, 0x3382c,
3740 		0x33880, 0x3388c,
3741 		0x338e8, 0x338ec,
3742 		0x33900, 0x33948,
3743 		0x33960, 0x3399c,
3744 		0x339f0, 0x33ae4,
3745 		0x33af8, 0x33b10,
3746 		0x33b28, 0x33b28,
3747 		0x33b3c, 0x33b50,
3748 		0x33bf0, 0x33c10,
3749 		0x33c28, 0x33c28,
3750 		0x33c3c, 0x33c50,
3751 		0x33cf0, 0x33cfc,
3752 		0x34000, 0x34030,
3753 		0x34100, 0x34144,
3754 		0x34190, 0x341d0,
3755 		0x34200, 0x34318,
3756 		0x34400, 0x3452c,
3757 		0x34540, 0x3461c,
3758 		0x34800, 0x34834,
3759 		0x348c0, 0x34908,
3760 		0x34910, 0x349ac,
3761 		0x34a00, 0x34a2c,
3762 		0x34a44, 0x34a50,
3763 		0x34a74, 0x34c24,
3764 		0x34d00, 0x34d00,
3765 		0x34d08, 0x34d14,
3766 		0x34d1c, 0x34d20,
3767 		0x34d3c, 0x34d50,
3768 		0x35200, 0x3520c,
3769 		0x35220, 0x35220,
3770 		0x35240, 0x35240,
3771 		0x35600, 0x3560c,
3772 		0x35a00, 0x35a1c,
3773 		0x35e00, 0x35e20,
3774 		0x35e38, 0x35e3c,
3775 		0x35e80, 0x35e80,
3776 		0x35e88, 0x35ea8,
3777 		0x35eb0, 0x35eb4,
3778 		0x35ec8, 0x35ed4,
3779 		0x35fb8, 0x36004,
3780 		0x36200, 0x36200,
3781 		0x36208, 0x36240,
3782 		0x36248, 0x36280,
3783 		0x36288, 0x362c0,
3784 		0x362c8, 0x362fc,
3785 		0x36600, 0x36630,
3786 		0x36a00, 0x36abc,
3787 		0x36b00, 0x36b70,
3788 		0x37000, 0x37048,
3789 		0x37060, 0x3709c,
3790 		0x370f0, 0x37148,
3791 		0x37160, 0x3719c,
3792 		0x371f0, 0x372e4,
3793 		0x372f8, 0x373e4,
3794 		0x373f8, 0x37448,
3795 		0x37460, 0x3749c,
3796 		0x374f0, 0x37548,
3797 		0x37560, 0x3759c,
3798 		0x375f0, 0x376e4,
3799 		0x376f8, 0x377e4,
3800 		0x377f8, 0x377fc,
3801 		0x37814, 0x37814,
3802 		0x3782c, 0x3782c,
3803 		0x37880, 0x3788c,
3804 		0x378e8, 0x378ec,
3805 		0x37900, 0x37948,
3806 		0x37960, 0x3799c,
3807 		0x379f0, 0x37ae4,
3808 		0x37af8, 0x37b10,
3809 		0x37b28, 0x37b28,
3810 		0x37b3c, 0x37b50,
3811 		0x37bf0, 0x37c10,
3812 		0x37c28, 0x37c28,
3813 		0x37c3c, 0x37c50,
3814 		0x37cf0, 0x37cfc,
3815 		0x38000, 0x38030,
3816 		0x38100, 0x38144,
3817 		0x38190, 0x381d0,
3818 		0x38200, 0x38318,
3819 		0x38400, 0x3852c,
3820 		0x38540, 0x3861c,
3821 		0x38800, 0x38834,
3822 		0x388c0, 0x38908,
3823 		0x38910, 0x389ac,
3824 		0x38a00, 0x38a2c,
3825 		0x38a44, 0x38a50,
3826 		0x38a74, 0x38c24,
3827 		0x38d00, 0x38d00,
3828 		0x38d08, 0x38d14,
3829 		0x38d1c, 0x38d20,
3830 		0x38d3c, 0x38d50,
3831 		0x39200, 0x3920c,
3832 		0x39220, 0x39220,
3833 		0x39240, 0x39240,
3834 		0x39600, 0x3960c,
3835 		0x39a00, 0x39a1c,
3836 		0x39e00, 0x39e20,
3837 		0x39e38, 0x39e3c,
3838 		0x39e80, 0x39e80,
3839 		0x39e88, 0x39ea8,
3840 		0x39eb0, 0x39eb4,
3841 		0x39ec8, 0x39ed4,
3842 		0x39fb8, 0x3a004,
3843 		0x3a200, 0x3a200,
3844 		0x3a208, 0x3a240,
3845 		0x3a248, 0x3a280,
3846 		0x3a288, 0x3a2c0,
3847 		0x3a2c8, 0x3a2fc,
3848 		0x3a600, 0x3a630,
3849 		0x3aa00, 0x3aabc,
3850 		0x3ab00, 0x3ab70,
3851 		0x3b000, 0x3b048,
3852 		0x3b060, 0x3b09c,
3853 		0x3b0f0, 0x3b148,
3854 		0x3b160, 0x3b19c,
3855 		0x3b1f0, 0x3b2e4,
3856 		0x3b2f8, 0x3b3e4,
3857 		0x3b3f8, 0x3b448,
3858 		0x3b460, 0x3b49c,
3859 		0x3b4f0, 0x3b548,
3860 		0x3b560, 0x3b59c,
3861 		0x3b5f0, 0x3b6e4,
3862 		0x3b6f8, 0x3b7e4,
3863 		0x3b7f8, 0x3b7fc,
3864 		0x3b814, 0x3b814,
3865 		0x3b82c, 0x3b82c,
3866 		0x3b880, 0x3b88c,
3867 		0x3b8e8, 0x3b8ec,
3868 		0x3b900, 0x3b948,
3869 		0x3b960, 0x3b99c,
3870 		0x3b9f0, 0x3bae4,
3871 		0x3baf8, 0x3bb10,
3872 		0x3bb28, 0x3bb28,
3873 		0x3bb3c, 0x3bb50,
3874 		0x3bbf0, 0x3bc10,
3875 		0x3bc28, 0x3bc28,
3876 		0x3bc3c, 0x3bc50,
3877 		0x3bcf0, 0x3bcfc,
3878 		0x3c000, 0x3c030,
3879 		0x3c100, 0x3c144,
3880 		0x3c190, 0x3c1d0,
3881 		0x3c200, 0x3c318,
3882 		0x3c400, 0x3c52c,
3883 		0x3c540, 0x3c61c,
3884 		0x3c800, 0x3c834,
3885 		0x3c8c0, 0x3c908,
3886 		0x3c910, 0x3c9ac,
3887 		0x3ca00, 0x3ca2c,
3888 		0x3ca44, 0x3ca50,
3889 		0x3ca74, 0x3cc24,
3890 		0x3cd00, 0x3cd00,
3891 		0x3cd08, 0x3cd14,
3892 		0x3cd1c, 0x3cd20,
3893 		0x3cd3c, 0x3cd50,
3894 		0x3d200, 0x3d20c,
3895 		0x3d220, 0x3d220,
3896 		0x3d240, 0x3d240,
3897 		0x3d600, 0x3d60c,
3898 		0x3da00, 0x3da1c,
3899 		0x3de00, 0x3de20,
3900 		0x3de38, 0x3de3c,
3901 		0x3de80, 0x3de80,
3902 		0x3de88, 0x3dea8,
3903 		0x3deb0, 0x3deb4,
3904 		0x3dec8, 0x3ded4,
3905 		0x3dfb8, 0x3e004,
3906 		0x3e200, 0x3e200,
3907 		0x3e208, 0x3e240,
3908 		0x3e248, 0x3e280,
3909 		0x3e288, 0x3e2c0,
3910 		0x3e2c8, 0x3e2fc,
3911 		0x3e600, 0x3e630,
3912 		0x3ea00, 0x3eabc,
3913 		0x3eb00, 0x3eb70,
3914 		0x3f000, 0x3f048,
3915 		0x3f060, 0x3f09c,
3916 		0x3f0f0, 0x3f148,
3917 		0x3f160, 0x3f19c,
3918 		0x3f1f0, 0x3f2e4,
3919 		0x3f2f8, 0x3f3e4,
3920 		0x3f3f8, 0x3f448,
3921 		0x3f460, 0x3f49c,
3922 		0x3f4f0, 0x3f548,
3923 		0x3f560, 0x3f59c,
3924 		0x3f5f0, 0x3f6e4,
3925 		0x3f6f8, 0x3f7e4,
3926 		0x3f7f8, 0x3f7fc,
3927 		0x3f814, 0x3f814,
3928 		0x3f82c, 0x3f82c,
3929 		0x3f880, 0x3f88c,
3930 		0x3f8e8, 0x3f8ec,
3931 		0x3f900, 0x3f948,
3932 		0x3f960, 0x3f99c,
3933 		0x3f9f0, 0x3fae4,
3934 		0x3faf8, 0x3fb10,
3935 		0x3fb28, 0x3fb28,
3936 		0x3fb3c, 0x3fb50,
3937 		0x3fbf0, 0x3fc10,
3938 		0x3fc28, 0x3fc28,
3939 		0x3fc3c, 0x3fc50,
3940 		0x3fcf0, 0x3fcfc,
3941 		0x40000, 0x4000c,
3942 		0x40040, 0x40068,
3943 		0x4007c, 0x40144,
3944 		0x40180, 0x4018c,
3945 		0x40200, 0x40298,
3946 		0x402ac, 0x4033c,
3947 		0x403f8, 0x403fc,
3948 		0x41304, 0x413c4,
3949 		0x41400, 0x4141c,
3950 		0x41480, 0x414d0,
3951 		0x44000, 0x44078,
3952 		0x440c0, 0x44278,
3953 		0x442c0, 0x44478,
3954 		0x444c0, 0x44678,
3955 		0x446c0, 0x44878,
3956 		0x448c0, 0x449fc,
3957 		0x45000, 0x45068,
3958 		0x45080, 0x45084,
3959 		0x450a0, 0x450b0,
3960 		0x45200, 0x45268,
3961 		0x45280, 0x45284,
3962 		0x452a0, 0x452b0,
3963 		0x460c0, 0x460e4,
3964 		0x47000, 0x4708c,
3965 		0x47200, 0x47250,
3966 		0x47400, 0x47420,
3967 		0x47600, 0x47618,
3968 		0x47800, 0x47814,
3969 		0x48000, 0x4800c,
3970 		0x48040, 0x48068,
3971 		0x4807c, 0x48144,
3972 		0x48180, 0x4818c,
3973 		0x48200, 0x48298,
3974 		0x482ac, 0x4833c,
3975 		0x483f8, 0x483fc,
3976 		0x49304, 0x493c4,
3977 		0x49400, 0x4941c,
3978 		0x49480, 0x494d0,
3979 		0x4c000, 0x4c078,
3980 		0x4c0c0, 0x4c278,
3981 		0x4c2c0, 0x4c478,
3982 		0x4c4c0, 0x4c678,
3983 		0x4c6c0, 0x4c878,
3984 		0x4c8c0, 0x4c9fc,
3985 		0x4d000, 0x4d068,
3986 		0x4d080, 0x4d084,
3987 		0x4d0a0, 0x4d0b0,
3988 		0x4d200, 0x4d268,
3989 		0x4d280, 0x4d284,
3990 		0x4d2a0, 0x4d2b0,
3991 		0x4e0c0, 0x4e0e4,
3992 		0x4f000, 0x4f08c,
3993 		0x4f200, 0x4f250,
3994 		0x4f400, 0x4f420,
3995 		0x4f600, 0x4f618,
3996 		0x4f800, 0x4f814,
3997 		0x50000, 0x500cc,
3998 		0x50400, 0x50400,
3999 		0x50800, 0x508cc,
4000 		0x50c00, 0x50c00,
4001 		0x51000, 0x5101c,
4002 		0x51300, 0x51308,
4003 	};
4004 
4005 	if (is_t4(sc)) {
4006 		reg_ranges = &t4_reg_ranges[0];
4007 		n = nitems(t4_reg_ranges);
4008 	} else {
4009 		reg_ranges = &t5_reg_ranges[0];
4010 		n = nitems(t5_reg_ranges);
4011 	}
4012 
4013 	regs->version = chip_id(sc) | chip_rev(sc) << 10;
4014 	for (i = 0; i < n; i += 2)
4015 		reg_block_dump(sc, buf, reg_ranges[i], reg_ranges[i + 1]);
4016 }
4017 
4018 static void
4019 cxgbe_tick(void *arg)
4020 {
4021 	struct port_info *pi = arg;
4022 	struct ifnet *ifp = pi->ifp;
4023 	struct sge_txq *txq;
4024 	int i, drops;
4025 	struct port_stats *s = &pi->stats;
4026 
4027 	PORT_LOCK(pi);
4028 	if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) {
4029 		PORT_UNLOCK(pi);
4030 		return;	/* without scheduling another callout */
4031 	}
4032 
4033 	t4_get_port_stats(pi->adapter, pi->tx_chan, s);
4034 
4035 	ifp->if_opackets = s->tx_frames - s->tx_pause;
4036 	ifp->if_ipackets = s->rx_frames - s->rx_pause;
4037 	ifp->if_obytes = s->tx_octets - s->tx_pause * 64;
4038 	ifp->if_ibytes = s->rx_octets - s->rx_pause * 64;
4039 	ifp->if_omcasts = s->tx_mcast_frames - s->tx_pause;
4040 	ifp->if_imcasts = s->rx_mcast_frames - s->rx_pause;
4041 	ifp->if_iqdrops = s->rx_ovflow0 + s->rx_ovflow1 + s->rx_ovflow2 +
4042 	    s->rx_ovflow3 + s->rx_trunc0 + s->rx_trunc1 + s->rx_trunc2 +
4043 	    s->rx_trunc3;
4044 
4045 	drops = s->tx_drop;
4046 	for_each_txq(pi, i, txq)
4047 		drops += txq->br->br_drops;
4048 	ifp->if_snd.ifq_drops = drops;
4049 
4050 	ifp->if_oerrors = s->tx_error_frames;
4051 	ifp->if_ierrors = s->rx_jabber + s->rx_runt + s->rx_too_long +
4052 	    s->rx_fcs_err + s->rx_len_err;
4053 
4054 	callout_schedule(&pi->tick, hz);
4055 	PORT_UNLOCK(pi);
4056 }
4057 
4058 static void
4059 cxgbe_vlan_config(void *arg, struct ifnet *ifp, uint16_t vid)
4060 {
4061 	struct ifnet *vlan;
4062 
4063 	if (arg != ifp || ifp->if_type != IFT_ETHER)
4064 		return;
4065 
4066 	vlan = VLAN_DEVAT(ifp, vid);
4067 	VLAN_SETCOOKIE(vlan, ifp);
4068 }
4069 
4070 static int
4071 cpl_not_handled(struct sge_iq *iq, const struct rss_header *rss, struct mbuf *m)
4072 {
4073 
4074 #ifdef INVARIANTS
4075 	panic("%s: opcode 0x%02x on iq %p with payload %p",
4076 	    __func__, rss->opcode, iq, m);
4077 #else
4078 	log(LOG_ERR, "%s: opcode 0x%02x on iq %p with payload %p\n",
4079 	    __func__, rss->opcode, iq, m);
4080 	m_freem(m);
4081 #endif
4082 	return (EDOOFUS);
4083 }
4084 
4085 int
4086 t4_register_cpl_handler(struct adapter *sc, int opcode, cpl_handler_t h)
4087 {
4088 	uintptr_t *loc, new;
4089 
4090 	if (opcode >= nitems(sc->cpl_handler))
4091 		return (EINVAL);
4092 
4093 	new = h ? (uintptr_t)h : (uintptr_t)cpl_not_handled;
4094 	loc = (uintptr_t *) &sc->cpl_handler[opcode];
4095 	atomic_store_rel_ptr(loc, new);
4096 
4097 	return (0);
4098 }
4099 
4100 static int
4101 an_not_handled(struct sge_iq *iq, const struct rsp_ctrl *ctrl)
4102 {
4103 
4104 #ifdef INVARIANTS
4105 	panic("%s: async notification on iq %p (ctrl %p)", __func__, iq, ctrl);
4106 #else
4107 	log(LOG_ERR, "%s: async notification on iq %p (ctrl %p)\n",
4108 	    __func__, iq, ctrl);
4109 #endif
4110 	return (EDOOFUS);
4111 }
4112 
4113 int
4114 t4_register_an_handler(struct adapter *sc, an_handler_t h)
4115 {
4116 	uintptr_t *loc, new;
4117 
4118 	new = h ? (uintptr_t)h : (uintptr_t)an_not_handled;
4119 	loc = (uintptr_t *) &sc->an_handler;
4120 	atomic_store_rel_ptr(loc, new);
4121 
4122 	return (0);
4123 }
4124 
4125 static int
4126 fw_msg_not_handled(struct adapter *sc, const __be64 *rpl)
4127 {
4128 	const struct cpl_fw6_msg *cpl =
4129 	    __containerof(rpl, struct cpl_fw6_msg, data[0]);
4130 
4131 #ifdef INVARIANTS
4132 	panic("%s: fw_msg type %d", __func__, cpl->type);
4133 #else
4134 	log(LOG_ERR, "%s: fw_msg type %d\n", __func__, cpl->type);
4135 #endif
4136 	return (EDOOFUS);
4137 }
4138 
4139 int
4140 t4_register_fw_msg_handler(struct adapter *sc, int type, fw_msg_handler_t h)
4141 {
4142 	uintptr_t *loc, new;
4143 
4144 	if (type >= nitems(sc->fw_msg_handler))
4145 		return (EINVAL);
4146 
4147 	/*
4148 	 * These are dispatched by the handler for FW{4|6}_CPL_MSG using the CPL
4149 	 * handler dispatch table.  Reject any attempt to install a handler for
4150 	 * this subtype.
4151 	 */
4152 	if (type == FW_TYPE_RSSCPL || type == FW6_TYPE_RSSCPL)
4153 		return (EINVAL);
4154 
4155 	new = h ? (uintptr_t)h : (uintptr_t)fw_msg_not_handled;
4156 	loc = (uintptr_t *) &sc->fw_msg_handler[type];
4157 	atomic_store_rel_ptr(loc, new);
4158 
4159 	return (0);
4160 }
4161 
4162 static int
4163 t4_sysctls(struct adapter *sc)
4164 {
4165 	struct sysctl_ctx_list *ctx;
4166 	struct sysctl_oid *oid;
4167 	struct sysctl_oid_list *children, *c0;
4168 	static char *caps[] = {
4169 		"\20\1PPP\2QFC\3DCBX",			/* caps[0] linkcaps */
4170 		"\20\1NIC\2VM\3IDS\4UM\5UM_ISGL"	/* caps[1] niccaps */
4171 		    "\6HASHFILTER\7ETHOFLD",
4172 		"\20\1TOE",				/* caps[2] toecaps */
4173 		"\20\1RDDP\2RDMAC",			/* caps[3] rdmacaps */
4174 		"\20\1INITIATOR_PDU\2TARGET_PDU"	/* caps[4] iscsicaps */
4175 		    "\3INITIATOR_CNXOFLD\4TARGET_CNXOFLD"
4176 		    "\5INITIATOR_SSNOFLD\6TARGET_SSNOFLD",
4177 		"\20\1INITIATOR\2TARGET\3CTRL_OFLD"	/* caps[5] fcoecaps */
4178 		    "\4PO_INITIAOR\5PO_TARGET"
4179 	};
4180 	static char *doorbells = {"\20\1UDB\2WCWR\3UDBWC\4KDB"};
4181 
4182 	ctx = device_get_sysctl_ctx(sc->dev);
4183 
4184 	/*
4185 	 * dev.t4nex.X.
4186 	 */
4187 	oid = device_get_sysctl_tree(sc->dev);
4188 	c0 = children = SYSCTL_CHILDREN(oid);
4189 
4190 	SYSCTL_ADD_INT(ctx, children, OID_AUTO, "nports", CTLFLAG_RD, NULL,
4191 	    sc->params.nports, "# of ports");
4192 
4193 	SYSCTL_ADD_INT(ctx, children, OID_AUTO, "hw_revision", CTLFLAG_RD,
4194 	    NULL, chip_rev(sc), "chip hardware revision");
4195 
4196 	SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "firmware_version",
4197 	    CTLFLAG_RD, &sc->fw_version, 0, "firmware version");
4198 
4199 	SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "cf",
4200 	    CTLFLAG_RD, &sc->cfg_file, 0, "configuration file");
4201 
4202 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "cfcsum", CTLFLAG_RD, NULL,
4203 	    sc->cfcsum, "config file checksum");
4204 
4205 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "doorbells",
4206 	    CTLTYPE_STRING | CTLFLAG_RD, doorbells, sc->doorbells,
4207 	    sysctl_bitfield, "A", "available doorbells");
4208 
4209 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "linkcaps",
4210 	    CTLTYPE_STRING | CTLFLAG_RD, caps[0], sc->linkcaps,
4211 	    sysctl_bitfield, "A", "available link capabilities");
4212 
4213 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "niccaps",
4214 	    CTLTYPE_STRING | CTLFLAG_RD, caps[1], sc->niccaps,
4215 	    sysctl_bitfield, "A", "available NIC capabilities");
4216 
4217 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "toecaps",
4218 	    CTLTYPE_STRING | CTLFLAG_RD, caps[2], sc->toecaps,
4219 	    sysctl_bitfield, "A", "available TCP offload capabilities");
4220 
4221 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "rdmacaps",
4222 	    CTLTYPE_STRING | CTLFLAG_RD, caps[3], sc->rdmacaps,
4223 	    sysctl_bitfield, "A", "available RDMA capabilities");
4224 
4225 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "iscsicaps",
4226 	    CTLTYPE_STRING | CTLFLAG_RD, caps[4], sc->iscsicaps,
4227 	    sysctl_bitfield, "A", "available iSCSI capabilities");
4228 
4229 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "fcoecaps",
4230 	    CTLTYPE_STRING | CTLFLAG_RD, caps[5], sc->fcoecaps,
4231 	    sysctl_bitfield, "A", "available FCoE capabilities");
4232 
4233 	SYSCTL_ADD_INT(ctx, children, OID_AUTO, "core_clock", CTLFLAG_RD, NULL,
4234 	    sc->params.vpd.cclk, "core clock frequency (in KHz)");
4235 
4236 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "holdoff_timers",
4237 	    CTLTYPE_STRING | CTLFLAG_RD, sc->sge.timer_val,
4238 	    sizeof(sc->sge.timer_val), sysctl_int_array, "A",
4239 	    "interrupt holdoff timer values (us)");
4240 
4241 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "holdoff_pkt_counts",
4242 	    CTLTYPE_STRING | CTLFLAG_RD, sc->sge.counter_val,
4243 	    sizeof(sc->sge.counter_val), sysctl_int_array, "A",
4244 	    "interrupt holdoff packet counter values");
4245 
4246 	SYSCTL_ADD_INT(ctx, children, OID_AUTO, "nfilters", CTLFLAG_RD,
4247 	    NULL, sc->tids.nftids, "number of filters");
4248 
4249 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "temperature", CTLTYPE_INT |
4250 	    CTLFLAG_RD, sc, 0, sysctl_temperature, "A",
4251 	    "chip temperature (in Celsius)");
4252 
4253 	t4_sge_sysctls(sc, ctx, children);
4254 
4255 	sc->lro_timeout = 100;
4256 	SYSCTL_ADD_INT(ctx, children, OID_AUTO, "lro_timeout", CTLFLAG_RW,
4257 	    &sc->lro_timeout, 0, "lro inactive-flush timeout (in us)");
4258 
4259 #ifdef SBUF_DRAIN
4260 	/*
4261 	 * dev.t4nex.X.misc.  Marked CTLFLAG_SKIP to avoid information overload.
4262 	 */
4263 	oid = SYSCTL_ADD_NODE(ctx, c0, OID_AUTO, "misc",
4264 	    CTLFLAG_RD | CTLFLAG_SKIP, NULL,
4265 	    "logs and miscellaneous information");
4266 	children = SYSCTL_CHILDREN(oid);
4267 
4268 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cctrl",
4269 	    CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
4270 	    sysctl_cctrl, "A", "congestion control");
4271 
4272 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_ibq_tp0",
4273 	    CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
4274 	    sysctl_cim_ibq_obq, "A", "CIM IBQ 0 (TP0)");
4275 
4276 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_ibq_tp1",
4277 	    CTLTYPE_STRING | CTLFLAG_RD, sc, 1,
4278 	    sysctl_cim_ibq_obq, "A", "CIM IBQ 1 (TP1)");
4279 
4280 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_ibq_ulp",
4281 	    CTLTYPE_STRING | CTLFLAG_RD, sc, 2,
4282 	    sysctl_cim_ibq_obq, "A", "CIM IBQ 2 (ULP)");
4283 
4284 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_ibq_sge0",
4285 	    CTLTYPE_STRING | CTLFLAG_RD, sc, 3,
4286 	    sysctl_cim_ibq_obq, "A", "CIM IBQ 3 (SGE0)");
4287 
4288 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_ibq_sge1",
4289 	    CTLTYPE_STRING | CTLFLAG_RD, sc, 4,
4290 	    sysctl_cim_ibq_obq, "A", "CIM IBQ 4 (SGE1)");
4291 
4292 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_ibq_ncsi",
4293 	    CTLTYPE_STRING | CTLFLAG_RD, sc, 5,
4294 	    sysctl_cim_ibq_obq, "A", "CIM IBQ 5 (NCSI)");
4295 
4296 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_la",
4297 	    CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
4298 	    sysctl_cim_la, "A", "CIM logic analyzer");
4299 
4300 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_ma_la",
4301 	    CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
4302 	    sysctl_cim_ma_la, "A", "CIM MA logic analyzer");
4303 
4304 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_obq_ulp0",
4305 	    CTLTYPE_STRING | CTLFLAG_RD, sc, 0 + CIM_NUM_IBQ,
4306 	    sysctl_cim_ibq_obq, "A", "CIM OBQ 0 (ULP0)");
4307 
4308 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_obq_ulp1",
4309 	    CTLTYPE_STRING | CTLFLAG_RD, sc, 1 + CIM_NUM_IBQ,
4310 	    sysctl_cim_ibq_obq, "A", "CIM OBQ 1 (ULP1)");
4311 
4312 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_obq_ulp2",
4313 	    CTLTYPE_STRING | CTLFLAG_RD, sc, 2 + CIM_NUM_IBQ,
4314 	    sysctl_cim_ibq_obq, "A", "CIM OBQ 2 (ULP2)");
4315 
4316 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_obq_ulp3",
4317 	    CTLTYPE_STRING | CTLFLAG_RD, sc, 3 + CIM_NUM_IBQ,
4318 	    sysctl_cim_ibq_obq, "A", "CIM OBQ 3 (ULP3)");
4319 
4320 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_obq_sge",
4321 	    CTLTYPE_STRING | CTLFLAG_RD, sc, 4 + CIM_NUM_IBQ,
4322 	    sysctl_cim_ibq_obq, "A", "CIM OBQ 4 (SGE)");
4323 
4324 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_obq_ncsi",
4325 	    CTLTYPE_STRING | CTLFLAG_RD, sc, 5 + CIM_NUM_IBQ,
4326 	    sysctl_cim_ibq_obq, "A", "CIM OBQ 5 (NCSI)");
4327 
4328 	if (is_t5(sc)) {
4329 		SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_obq_sge0_rx",
4330 		    CTLTYPE_STRING | CTLFLAG_RD, sc, 6 + CIM_NUM_IBQ,
4331 		    sysctl_cim_ibq_obq, "A", "CIM OBQ 6 (SGE0-RX)");
4332 
4333 		SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_obq_sge1_rx",
4334 		    CTLTYPE_STRING | CTLFLAG_RD, sc, 7 + CIM_NUM_IBQ,
4335 		    sysctl_cim_ibq_obq, "A", "CIM OBQ 7 (SGE1-RX)");
4336 	}
4337 
4338 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_pif_la",
4339 	    CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
4340 	    sysctl_cim_pif_la, "A", "CIM PIF logic analyzer");
4341 
4342 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_qcfg",
4343 	    CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
4344 	    sysctl_cim_qcfg, "A", "CIM queue configuration");
4345 
4346 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cpl_stats",
4347 	    CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
4348 	    sysctl_cpl_stats, "A", "CPL statistics");
4349 
4350 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "ddp_stats",
4351 	    CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
4352 	    sysctl_ddp_stats, "A", "DDP statistics");
4353 
4354 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "devlog",
4355 	    CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
4356 	    sysctl_devlog, "A", "firmware's device log");
4357 
4358 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "fcoe_stats",
4359 	    CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
4360 	    sysctl_fcoe_stats, "A", "FCoE statistics");
4361 
4362 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "hw_sched",
4363 	    CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
4364 	    sysctl_hw_sched, "A", "hardware scheduler ");
4365 
4366 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "l2t",
4367 	    CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
4368 	    sysctl_l2t, "A", "hardware L2 table");
4369 
4370 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "lb_stats",
4371 	    CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
4372 	    sysctl_lb_stats, "A", "loopback statistics");
4373 
4374 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "meminfo",
4375 	    CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
4376 	    sysctl_meminfo, "A", "memory regions");
4377 
4378 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "mps_tcam",
4379 	    CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
4380 	    sysctl_mps_tcam, "A", "MPS TCAM entries");
4381 
4382 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "path_mtus",
4383 	    CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
4384 	    sysctl_path_mtus, "A", "path MTUs");
4385 
4386 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "pm_stats",
4387 	    CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
4388 	    sysctl_pm_stats, "A", "PM statistics");
4389 
4390 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "rdma_stats",
4391 	    CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
4392 	    sysctl_rdma_stats, "A", "RDMA statistics");
4393 
4394 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "tcp_stats",
4395 	    CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
4396 	    sysctl_tcp_stats, "A", "TCP statistics");
4397 
4398 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "tids",
4399 	    CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
4400 	    sysctl_tids, "A", "TID information");
4401 
4402 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "tp_err_stats",
4403 	    CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
4404 	    sysctl_tp_err_stats, "A", "TP error statistics");
4405 
4406 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "tp_la",
4407 	    CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
4408 	    sysctl_tp_la, "A", "TP logic analyzer");
4409 
4410 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "tx_rate",
4411 	    CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
4412 	    sysctl_tx_rate, "A", "Tx rate");
4413 
4414 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "ulprx_la",
4415 	    CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
4416 	    sysctl_ulprx_la, "A", "ULPRX logic analyzer");
4417 
4418 	if (is_t5(sc)) {
4419 		SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "wcwr_stats",
4420 		    CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
4421 		    sysctl_wcwr_stats, "A", "write combined work requests");
4422 	}
4423 #endif
4424 
4425 #ifdef TCP_OFFLOAD
4426 	if (is_offload(sc)) {
4427 		/*
4428 		 * dev.t4nex.X.toe.
4429 		 */
4430 		oid = SYSCTL_ADD_NODE(ctx, c0, OID_AUTO, "toe", CTLFLAG_RD,
4431 		    NULL, "TOE parameters");
4432 		children = SYSCTL_CHILDREN(oid);
4433 
4434 		sc->tt.sndbuf = 256 * 1024;
4435 		SYSCTL_ADD_INT(ctx, children, OID_AUTO, "sndbuf", CTLFLAG_RW,
4436 		    &sc->tt.sndbuf, 0, "max hardware send buffer size");
4437 
4438 		sc->tt.ddp = 0;
4439 		SYSCTL_ADD_INT(ctx, children, OID_AUTO, "ddp", CTLFLAG_RW,
4440 		    &sc->tt.ddp, 0, "DDP allowed");
4441 
4442 		sc->tt.indsz = G_INDICATESIZE(t4_read_reg(sc, A_TP_PARA_REG5));
4443 		SYSCTL_ADD_INT(ctx, children, OID_AUTO, "indsz", CTLFLAG_RW,
4444 		    &sc->tt.indsz, 0, "DDP max indicate size allowed");
4445 
4446 		sc->tt.ddp_thres =
4447 		    G_RXCOALESCESIZE(t4_read_reg(sc, A_TP_PARA_REG2));
4448 		SYSCTL_ADD_INT(ctx, children, OID_AUTO, "ddp_thres", CTLFLAG_RW,
4449 		    &sc->tt.ddp_thres, 0, "DDP threshold");
4450 
4451 		sc->tt.rx_coalesce = 1;
4452 		SYSCTL_ADD_INT(ctx, children, OID_AUTO, "rx_coalesce",
4453 		    CTLFLAG_RW, &sc->tt.rx_coalesce, 0, "receive coalescing");
4454 	}
4455 #endif
4456 
4457 
4458 	return (0);
4459 }
4460 
4461 static int
4462 cxgbe_sysctls(struct port_info *pi)
4463 {
4464 	struct sysctl_ctx_list *ctx;
4465 	struct sysctl_oid *oid;
4466 	struct sysctl_oid_list *children;
4467 
4468 	ctx = device_get_sysctl_ctx(pi->dev);
4469 
4470 	/*
4471 	 * dev.cxgbe.X.
4472 	 */
4473 	oid = device_get_sysctl_tree(pi->dev);
4474 	children = SYSCTL_CHILDREN(oid);
4475 
4476 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "linkdnrc", CTLTYPE_STRING |
4477 	   CTLFLAG_RD, pi, 0, sysctl_linkdnrc, "A", "reason why link is down");
4478 	if (pi->port_type == FW_PORT_TYPE_BT_XAUI) {
4479 		SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "temperature",
4480 		    CTLTYPE_INT | CTLFLAG_RD, pi, 0, sysctl_btphy, "I",
4481 		    "PHY temperature (in Celsius)");
4482 		SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "fw_version",
4483 		    CTLTYPE_INT | CTLFLAG_RD, pi, 1, sysctl_btphy, "I",
4484 		    "PHY firmware version");
4485 	}
4486 	SYSCTL_ADD_INT(ctx, children, OID_AUTO, "nrxq", CTLFLAG_RD,
4487 	    &pi->nrxq, 0, "# of rx queues");
4488 	SYSCTL_ADD_INT(ctx, children, OID_AUTO, "ntxq", CTLFLAG_RD,
4489 	    &pi->ntxq, 0, "# of tx queues");
4490 	SYSCTL_ADD_INT(ctx, children, OID_AUTO, "first_rxq", CTLFLAG_RD,
4491 	    &pi->first_rxq, 0, "index of first rx queue");
4492 	SYSCTL_ADD_INT(ctx, children, OID_AUTO, "first_txq", CTLFLAG_RD,
4493 	    &pi->first_txq, 0, "index of first tx queue");
4494 
4495 #ifdef TCP_OFFLOAD
4496 	if (is_offload(pi->adapter)) {
4497 		SYSCTL_ADD_INT(ctx, children, OID_AUTO, "nofldrxq", CTLFLAG_RD,
4498 		    &pi->nofldrxq, 0,
4499 		    "# of rx queues for offloaded TCP connections");
4500 		SYSCTL_ADD_INT(ctx, children, OID_AUTO, "nofldtxq", CTLFLAG_RD,
4501 		    &pi->nofldtxq, 0,
4502 		    "# of tx queues for offloaded TCP connections");
4503 		SYSCTL_ADD_INT(ctx, children, OID_AUTO, "first_ofld_rxq",
4504 		    CTLFLAG_RD, &pi->first_ofld_rxq, 0,
4505 		    "index of first TOE rx queue");
4506 		SYSCTL_ADD_INT(ctx, children, OID_AUTO, "first_ofld_txq",
4507 		    CTLFLAG_RD, &pi->first_ofld_txq, 0,
4508 		    "index of first TOE tx queue");
4509 	}
4510 #endif
4511 
4512 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "holdoff_tmr_idx",
4513 	    CTLTYPE_INT | CTLFLAG_RW, pi, 0, sysctl_holdoff_tmr_idx, "I",
4514 	    "holdoff timer index");
4515 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "holdoff_pktc_idx",
4516 	    CTLTYPE_INT | CTLFLAG_RW, pi, 0, sysctl_holdoff_pktc_idx, "I",
4517 	    "holdoff packet counter index");
4518 
4519 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "qsize_rxq",
4520 	    CTLTYPE_INT | CTLFLAG_RW, pi, 0, sysctl_qsize_rxq, "I",
4521 	    "rx queue size");
4522 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "qsize_txq",
4523 	    CTLTYPE_INT | CTLFLAG_RW, pi, 0, sysctl_qsize_txq, "I",
4524 	    "tx queue size");
4525 
4526 	/*
4527 	 * dev.cxgbe.X.stats.
4528 	 */
4529 	oid = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, "stats", CTLFLAG_RD,
4530 	    NULL, "port statistics");
4531 	children = SYSCTL_CHILDREN(oid);
4532 
4533 #define SYSCTL_ADD_T4_REG64(pi, name, desc, reg) \
4534 	SYSCTL_ADD_OID(ctx, children, OID_AUTO, name, \
4535 	    CTLTYPE_U64 | CTLFLAG_RD, pi->adapter, reg, \
4536 	    sysctl_handle_t4_reg64, "QU", desc)
4537 
4538 	SYSCTL_ADD_T4_REG64(pi, "tx_octets", "# of octets in good frames",
4539 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_BYTES_L));
4540 	SYSCTL_ADD_T4_REG64(pi, "tx_frames", "total # of good frames",
4541 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_FRAMES_L));
4542 	SYSCTL_ADD_T4_REG64(pi, "tx_bcast_frames", "# of broadcast frames",
4543 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_BCAST_L));
4544 	SYSCTL_ADD_T4_REG64(pi, "tx_mcast_frames", "# of multicast frames",
4545 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_MCAST_L));
4546 	SYSCTL_ADD_T4_REG64(pi, "tx_ucast_frames", "# of unicast frames",
4547 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_UCAST_L));
4548 	SYSCTL_ADD_T4_REG64(pi, "tx_error_frames", "# of error frames",
4549 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_ERROR_L));
4550 	SYSCTL_ADD_T4_REG64(pi, "tx_frames_64",
4551 	    "# of tx frames in this range",
4552 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_64B_L));
4553 	SYSCTL_ADD_T4_REG64(pi, "tx_frames_65_127",
4554 	    "# of tx frames in this range",
4555 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_65B_127B_L));
4556 	SYSCTL_ADD_T4_REG64(pi, "tx_frames_128_255",
4557 	    "# of tx frames in this range",
4558 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_128B_255B_L));
4559 	SYSCTL_ADD_T4_REG64(pi, "tx_frames_256_511",
4560 	    "# of tx frames in this range",
4561 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_256B_511B_L));
4562 	SYSCTL_ADD_T4_REG64(pi, "tx_frames_512_1023",
4563 	    "# of tx frames in this range",
4564 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_512B_1023B_L));
4565 	SYSCTL_ADD_T4_REG64(pi, "tx_frames_1024_1518",
4566 	    "# of tx frames in this range",
4567 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_1024B_1518B_L));
4568 	SYSCTL_ADD_T4_REG64(pi, "tx_frames_1519_max",
4569 	    "# of tx frames in this range",
4570 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_1519B_MAX_L));
4571 	SYSCTL_ADD_T4_REG64(pi, "tx_drop", "# of dropped tx frames",
4572 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_DROP_L));
4573 	SYSCTL_ADD_T4_REG64(pi, "tx_pause", "# of pause frames transmitted",
4574 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_PAUSE_L));
4575 	SYSCTL_ADD_T4_REG64(pi, "tx_ppp0", "# of PPP prio 0 frames transmitted",
4576 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_PPP0_L));
4577 	SYSCTL_ADD_T4_REG64(pi, "tx_ppp1", "# of PPP prio 1 frames transmitted",
4578 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_PPP1_L));
4579 	SYSCTL_ADD_T4_REG64(pi, "tx_ppp2", "# of PPP prio 2 frames transmitted",
4580 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_PPP2_L));
4581 	SYSCTL_ADD_T4_REG64(pi, "tx_ppp3", "# of PPP prio 3 frames transmitted",
4582 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_PPP3_L));
4583 	SYSCTL_ADD_T4_REG64(pi, "tx_ppp4", "# of PPP prio 4 frames transmitted",
4584 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_PPP4_L));
4585 	SYSCTL_ADD_T4_REG64(pi, "tx_ppp5", "# of PPP prio 5 frames transmitted",
4586 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_PPP5_L));
4587 	SYSCTL_ADD_T4_REG64(pi, "tx_ppp6", "# of PPP prio 6 frames transmitted",
4588 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_PPP6_L));
4589 	SYSCTL_ADD_T4_REG64(pi, "tx_ppp7", "# of PPP prio 7 frames transmitted",
4590 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_PPP7_L));
4591 
4592 	SYSCTL_ADD_T4_REG64(pi, "rx_octets", "# of octets in good frames",
4593 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_BYTES_L));
4594 	SYSCTL_ADD_T4_REG64(pi, "rx_frames", "total # of good frames",
4595 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_FRAMES_L));
4596 	SYSCTL_ADD_T4_REG64(pi, "rx_bcast_frames", "# of broadcast frames",
4597 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_BCAST_L));
4598 	SYSCTL_ADD_T4_REG64(pi, "rx_mcast_frames", "# of multicast frames",
4599 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_MCAST_L));
4600 	SYSCTL_ADD_T4_REG64(pi, "rx_ucast_frames", "# of unicast frames",
4601 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_UCAST_L));
4602 	SYSCTL_ADD_T4_REG64(pi, "rx_too_long", "# of frames exceeding MTU",
4603 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_MTU_ERROR_L));
4604 	SYSCTL_ADD_T4_REG64(pi, "rx_jabber", "# of jabber frames",
4605 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_MTU_CRC_ERROR_L));
4606 	SYSCTL_ADD_T4_REG64(pi, "rx_fcs_err",
4607 	    "# of frames received with bad FCS",
4608 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_CRC_ERROR_L));
4609 	SYSCTL_ADD_T4_REG64(pi, "rx_len_err",
4610 	    "# of frames received with length error",
4611 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_LEN_ERROR_L));
4612 	SYSCTL_ADD_T4_REG64(pi, "rx_symbol_err", "symbol errors",
4613 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_SYM_ERROR_L));
4614 	SYSCTL_ADD_T4_REG64(pi, "rx_runt", "# of short frames received",
4615 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_LESS_64B_L));
4616 	SYSCTL_ADD_T4_REG64(pi, "rx_frames_64",
4617 	    "# of rx frames in this range",
4618 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_64B_L));
4619 	SYSCTL_ADD_T4_REG64(pi, "rx_frames_65_127",
4620 	    "# of rx frames in this range",
4621 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_65B_127B_L));
4622 	SYSCTL_ADD_T4_REG64(pi, "rx_frames_128_255",
4623 	    "# of rx frames in this range",
4624 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_128B_255B_L));
4625 	SYSCTL_ADD_T4_REG64(pi, "rx_frames_256_511",
4626 	    "# of rx frames in this range",
4627 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_256B_511B_L));
4628 	SYSCTL_ADD_T4_REG64(pi, "rx_frames_512_1023",
4629 	    "# of rx frames in this range",
4630 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_512B_1023B_L));
4631 	SYSCTL_ADD_T4_REG64(pi, "rx_frames_1024_1518",
4632 	    "# of rx frames in this range",
4633 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_1024B_1518B_L));
4634 	SYSCTL_ADD_T4_REG64(pi, "rx_frames_1519_max",
4635 	    "# of rx frames in this range",
4636 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_1519B_MAX_L));
4637 	SYSCTL_ADD_T4_REG64(pi, "rx_pause", "# of pause frames received",
4638 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_PAUSE_L));
4639 	SYSCTL_ADD_T4_REG64(pi, "rx_ppp0", "# of PPP prio 0 frames received",
4640 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_PPP0_L));
4641 	SYSCTL_ADD_T4_REG64(pi, "rx_ppp1", "# of PPP prio 1 frames received",
4642 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_PPP1_L));
4643 	SYSCTL_ADD_T4_REG64(pi, "rx_ppp2", "# of PPP prio 2 frames received",
4644 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_PPP2_L));
4645 	SYSCTL_ADD_T4_REG64(pi, "rx_ppp3", "# of PPP prio 3 frames received",
4646 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_PPP3_L));
4647 	SYSCTL_ADD_T4_REG64(pi, "rx_ppp4", "# of PPP prio 4 frames received",
4648 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_PPP4_L));
4649 	SYSCTL_ADD_T4_REG64(pi, "rx_ppp5", "# of PPP prio 5 frames received",
4650 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_PPP5_L));
4651 	SYSCTL_ADD_T4_REG64(pi, "rx_ppp6", "# of PPP prio 6 frames received",
4652 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_PPP6_L));
4653 	SYSCTL_ADD_T4_REG64(pi, "rx_ppp7", "# of PPP prio 7 frames received",
4654 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_PPP7_L));
4655 
4656 #undef SYSCTL_ADD_T4_REG64
4657 
4658 #define SYSCTL_ADD_T4_PORTSTAT(name, desc) \
4659 	SYSCTL_ADD_UQUAD(ctx, children, OID_AUTO, #name, CTLFLAG_RD, \
4660 	    &pi->stats.name, desc)
4661 
4662 	/* We get these from port_stats and they may be stale by upto 1s */
4663 	SYSCTL_ADD_T4_PORTSTAT(rx_ovflow0,
4664 	    "# drops due to buffer-group 0 overflows");
4665 	SYSCTL_ADD_T4_PORTSTAT(rx_ovflow1,
4666 	    "# drops due to buffer-group 1 overflows");
4667 	SYSCTL_ADD_T4_PORTSTAT(rx_ovflow2,
4668 	    "# drops due to buffer-group 2 overflows");
4669 	SYSCTL_ADD_T4_PORTSTAT(rx_ovflow3,
4670 	    "# drops due to buffer-group 3 overflows");
4671 	SYSCTL_ADD_T4_PORTSTAT(rx_trunc0,
4672 	    "# of buffer-group 0 truncated packets");
4673 	SYSCTL_ADD_T4_PORTSTAT(rx_trunc1,
4674 	    "# of buffer-group 1 truncated packets");
4675 	SYSCTL_ADD_T4_PORTSTAT(rx_trunc2,
4676 	    "# of buffer-group 2 truncated packets");
4677 	SYSCTL_ADD_T4_PORTSTAT(rx_trunc3,
4678 	    "# of buffer-group 3 truncated packets");
4679 
4680 #undef SYSCTL_ADD_T4_PORTSTAT
4681 
4682 	return (0);
4683 }
4684 
4685 static int
4686 sysctl_int_array(SYSCTL_HANDLER_ARGS)
4687 {
4688 	int rc, *i;
4689 	struct sbuf sb;
4690 
4691 	sbuf_new(&sb, NULL, 32, SBUF_AUTOEXTEND);
4692 	for (i = arg1; arg2; arg2 -= sizeof(int), i++)
4693 		sbuf_printf(&sb, "%d ", *i);
4694 	sbuf_trim(&sb);
4695 	sbuf_finish(&sb);
4696 	rc = sysctl_handle_string(oidp, sbuf_data(&sb), sbuf_len(&sb), req);
4697 	sbuf_delete(&sb);
4698 	return (rc);
4699 }
4700 
4701 static int
4702 sysctl_bitfield(SYSCTL_HANDLER_ARGS)
4703 {
4704 	int rc;
4705 	struct sbuf *sb;
4706 
4707 	rc = sysctl_wire_old_buffer(req, 0);
4708 	if (rc != 0)
4709 		return(rc);
4710 
4711 	sb = sbuf_new_for_sysctl(NULL, NULL, 128, req);
4712 	if (sb == NULL)
4713 		return (ENOMEM);
4714 
4715 	sbuf_printf(sb, "%b", (int)arg2, (char *)arg1);
4716 	rc = sbuf_finish(sb);
4717 	sbuf_delete(sb);
4718 
4719 	return (rc);
4720 }
4721 
4722 static int
4723 sysctl_btphy(SYSCTL_HANDLER_ARGS)
4724 {
4725 	struct port_info *pi = arg1;
4726 	int op = arg2;
4727 	struct adapter *sc = pi->adapter;
4728 	u_int v;
4729 	int rc;
4730 
4731 	rc = begin_synchronized_op(sc, pi, SLEEP_OK | INTR_OK, "t4btt");
4732 	if (rc)
4733 		return (rc);
4734 	/* XXX: magic numbers */
4735 	rc = -t4_mdio_rd(sc, sc->mbox, pi->mdio_addr, 0x1e, op ? 0x20 : 0xc820,
4736 	    &v);
4737 	end_synchronized_op(sc, 0);
4738 	if (rc)
4739 		return (rc);
4740 	if (op == 0)
4741 		v /= 256;
4742 
4743 	rc = sysctl_handle_int(oidp, &v, 0, req);
4744 	return (rc);
4745 }
4746 
4747 static int
4748 sysctl_holdoff_tmr_idx(SYSCTL_HANDLER_ARGS)
4749 {
4750 	struct port_info *pi = arg1;
4751 	struct adapter *sc = pi->adapter;
4752 	int idx, rc, i;
4753 	struct sge_rxq *rxq;
4754 #ifdef TCP_OFFLOAD
4755 	struct sge_ofld_rxq *ofld_rxq;
4756 #endif
4757 	uint8_t v;
4758 
4759 	idx = pi->tmr_idx;
4760 
4761 	rc = sysctl_handle_int(oidp, &idx, 0, req);
4762 	if (rc != 0 || req->newptr == NULL)
4763 		return (rc);
4764 
4765 	if (idx < 0 || idx >= SGE_NTIMERS)
4766 		return (EINVAL);
4767 
4768 	rc = begin_synchronized_op(sc, pi, HOLD_LOCK | SLEEP_OK | INTR_OK,
4769 	    "t4tmr");
4770 	if (rc)
4771 		return (rc);
4772 
4773 	v = V_QINTR_TIMER_IDX(idx) | V_QINTR_CNT_EN(pi->pktc_idx != -1);
4774 	for_each_rxq(pi, i, rxq) {
4775 #ifdef atomic_store_rel_8
4776 		atomic_store_rel_8(&rxq->iq.intr_params, v);
4777 #else
4778 		rxq->iq.intr_params = v;
4779 #endif
4780 	}
4781 #ifdef TCP_OFFLOAD
4782 	for_each_ofld_rxq(pi, i, ofld_rxq) {
4783 #ifdef atomic_store_rel_8
4784 		atomic_store_rel_8(&ofld_rxq->iq.intr_params, v);
4785 #else
4786 		ofld_rxq->iq.intr_params = v;
4787 #endif
4788 	}
4789 #endif
4790 	pi->tmr_idx = idx;
4791 
4792 	end_synchronized_op(sc, LOCK_HELD);
4793 	return (0);
4794 }
4795 
4796 static int
4797 sysctl_holdoff_pktc_idx(SYSCTL_HANDLER_ARGS)
4798 {
4799 	struct port_info *pi = arg1;
4800 	struct adapter *sc = pi->adapter;
4801 	int idx, rc;
4802 
4803 	idx = pi->pktc_idx;
4804 
4805 	rc = sysctl_handle_int(oidp, &idx, 0, req);
4806 	if (rc != 0 || req->newptr == NULL)
4807 		return (rc);
4808 
4809 	if (idx < -1 || idx >= SGE_NCOUNTERS)
4810 		return (EINVAL);
4811 
4812 	rc = begin_synchronized_op(sc, pi, HOLD_LOCK | SLEEP_OK | INTR_OK,
4813 	    "t4pktc");
4814 	if (rc)
4815 		return (rc);
4816 
4817 	if (pi->flags & PORT_INIT_DONE)
4818 		rc = EBUSY; /* cannot be changed once the queues are created */
4819 	else
4820 		pi->pktc_idx = idx;
4821 
4822 	end_synchronized_op(sc, LOCK_HELD);
4823 	return (rc);
4824 }
4825 
4826 static int
4827 sysctl_qsize_rxq(SYSCTL_HANDLER_ARGS)
4828 {
4829 	struct port_info *pi = arg1;
4830 	struct adapter *sc = pi->adapter;
4831 	int qsize, rc;
4832 
4833 	qsize = pi->qsize_rxq;
4834 
4835 	rc = sysctl_handle_int(oidp, &qsize, 0, req);
4836 	if (rc != 0 || req->newptr == NULL)
4837 		return (rc);
4838 
4839 	if (qsize < 128 || (qsize & 7))
4840 		return (EINVAL);
4841 
4842 	rc = begin_synchronized_op(sc, pi, HOLD_LOCK | SLEEP_OK | INTR_OK,
4843 	    "t4rxqs");
4844 	if (rc)
4845 		return (rc);
4846 
4847 	if (pi->flags & PORT_INIT_DONE)
4848 		rc = EBUSY; /* cannot be changed once the queues are created */
4849 	else
4850 		pi->qsize_rxq = qsize;
4851 
4852 	end_synchronized_op(sc, LOCK_HELD);
4853 	return (rc);
4854 }
4855 
4856 static int
4857 sysctl_qsize_txq(SYSCTL_HANDLER_ARGS)
4858 {
4859 	struct port_info *pi = arg1;
4860 	struct adapter *sc = pi->adapter;
4861 	int qsize, rc;
4862 
4863 	qsize = pi->qsize_txq;
4864 
4865 	rc = sysctl_handle_int(oidp, &qsize, 0, req);
4866 	if (rc != 0 || req->newptr == NULL)
4867 		return (rc);
4868 
4869 	/* bufring size must be powerof2 */
4870 	if (qsize < 128 || !powerof2(qsize))
4871 		return (EINVAL);
4872 
4873 	rc = begin_synchronized_op(sc, pi, HOLD_LOCK | SLEEP_OK | INTR_OK,
4874 	    "t4txqs");
4875 	if (rc)
4876 		return (rc);
4877 
4878 	if (pi->flags & PORT_INIT_DONE)
4879 		rc = EBUSY; /* cannot be changed once the queues are created */
4880 	else
4881 		pi->qsize_txq = qsize;
4882 
4883 	end_synchronized_op(sc, LOCK_HELD);
4884 	return (rc);
4885 }
4886 
4887 static int
4888 sysctl_handle_t4_reg64(SYSCTL_HANDLER_ARGS)
4889 {
4890 	struct adapter *sc = arg1;
4891 	int reg = arg2;
4892 	uint64_t val;
4893 
4894 	val = t4_read_reg64(sc, reg);
4895 
4896 	return (sysctl_handle_64(oidp, &val, 0, req));
4897 }
4898 
4899 static int
4900 sysctl_temperature(SYSCTL_HANDLER_ARGS)
4901 {
4902 	struct adapter *sc = arg1;
4903 	int rc, t;
4904 	uint32_t param, val;
4905 
4906 	rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4temp");
4907 	if (rc)
4908 		return (rc);
4909 	param = V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) |
4910 	    V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_DIAG) |
4911 	    V_FW_PARAMS_PARAM_Y(FW_PARAM_DEV_DIAG_TMP);
4912 	rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 1, &param, &val);
4913 	end_synchronized_op(sc, 0);
4914 	if (rc)
4915 		return (rc);
4916 
4917 	/* unknown is returned as 0 but we display -1 in that case */
4918 	t = val == 0 ? -1 : val;
4919 
4920 	rc = sysctl_handle_int(oidp, &t, 0, req);
4921 	return (rc);
4922 }
4923 
4924 #ifdef SBUF_DRAIN
4925 static int
4926 sysctl_cctrl(SYSCTL_HANDLER_ARGS)
4927 {
4928 	struct adapter *sc = arg1;
4929 	struct sbuf *sb;
4930 	int rc, i;
4931 	uint16_t incr[NMTUS][NCCTRL_WIN];
4932 	static const char *dec_fac[] = {
4933 		"0.5", "0.5625", "0.625", "0.6875", "0.75", "0.8125", "0.875",
4934 		"0.9375"
4935 	};
4936 
4937 	rc = sysctl_wire_old_buffer(req, 0);
4938 	if (rc != 0)
4939 		return (rc);
4940 
4941 	sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req);
4942 	if (sb == NULL)
4943 		return (ENOMEM);
4944 
4945 	t4_read_cong_tbl(sc, incr);
4946 
4947 	for (i = 0; i < NCCTRL_WIN; ++i) {
4948 		sbuf_printf(sb, "%2d: %4u %4u %4u %4u %4u %4u %4u %4u\n", i,
4949 		    incr[0][i], incr[1][i], incr[2][i], incr[3][i], incr[4][i],
4950 		    incr[5][i], incr[6][i], incr[7][i]);
4951 		sbuf_printf(sb, "%8u %4u %4u %4u %4u %4u %4u %4u %5u %s\n",
4952 		    incr[8][i], incr[9][i], incr[10][i], incr[11][i],
4953 		    incr[12][i], incr[13][i], incr[14][i], incr[15][i],
4954 		    sc->params.a_wnd[i], dec_fac[sc->params.b_wnd[i]]);
4955 	}
4956 
4957 	rc = sbuf_finish(sb);
4958 	sbuf_delete(sb);
4959 
4960 	return (rc);
4961 }
4962 
4963 static const char *qname[CIM_NUM_IBQ + CIM_NUM_OBQ_T5] = {
4964 	"TP0", "TP1", "ULP", "SGE0", "SGE1", "NC-SI",	/* ibq's */
4965 	"ULP0", "ULP1", "ULP2", "ULP3", "SGE", "NC-SI",	/* obq's */
4966 	"SGE0-RX", "SGE1-RX"	/* additional obq's (T5 onwards) */
4967 };
4968 
4969 static int
4970 sysctl_cim_ibq_obq(SYSCTL_HANDLER_ARGS)
4971 {
4972 	struct adapter *sc = arg1;
4973 	struct sbuf *sb;
4974 	int rc, i, n, qid = arg2;
4975 	uint32_t *buf, *p;
4976 	char *qtype;
4977 	u_int cim_num_obq = is_t4(sc) ? CIM_NUM_OBQ : CIM_NUM_OBQ_T5;
4978 
4979 	KASSERT(qid >= 0 && qid < CIM_NUM_IBQ + cim_num_obq,
4980 	    ("%s: bad qid %d\n", __func__, qid));
4981 
4982 	if (qid < CIM_NUM_IBQ) {
4983 		/* inbound queue */
4984 		qtype = "IBQ";
4985 		n = 4 * CIM_IBQ_SIZE;
4986 		buf = malloc(n * sizeof(uint32_t), M_CXGBE, M_ZERO | M_WAITOK);
4987 		rc = t4_read_cim_ibq(sc, qid, buf, n);
4988 	} else {
4989 		/* outbound queue */
4990 		qtype = "OBQ";
4991 		qid -= CIM_NUM_IBQ;
4992 		n = 4 * cim_num_obq * CIM_OBQ_SIZE;
4993 		buf = malloc(n * sizeof(uint32_t), M_CXGBE, M_ZERO | M_WAITOK);
4994 		rc = t4_read_cim_obq(sc, qid, buf, n);
4995 	}
4996 
4997 	if (rc < 0) {
4998 		rc = -rc;
4999 		goto done;
5000 	}
5001 	n = rc * sizeof(uint32_t);	/* rc has # of words actually read */
5002 
5003 	rc = sysctl_wire_old_buffer(req, 0);
5004 	if (rc != 0)
5005 		goto done;
5006 
5007 	sb = sbuf_new_for_sysctl(NULL, NULL, PAGE_SIZE, req);
5008 	if (sb == NULL) {
5009 		rc = ENOMEM;
5010 		goto done;
5011 	}
5012 
5013 	sbuf_printf(sb, "%s%d %s", qtype , qid, qname[arg2]);
5014 	for (i = 0, p = buf; i < n; i += 16, p += 4)
5015 		sbuf_printf(sb, "\n%#06x: %08x %08x %08x %08x", i, p[0], p[1],
5016 		    p[2], p[3]);
5017 
5018 	rc = sbuf_finish(sb);
5019 	sbuf_delete(sb);
5020 done:
5021 	free(buf, M_CXGBE);
5022 	return (rc);
5023 }
5024 
5025 static int
5026 sysctl_cim_la(SYSCTL_HANDLER_ARGS)
5027 {
5028 	struct adapter *sc = arg1;
5029 	u_int cfg;
5030 	struct sbuf *sb;
5031 	uint32_t *buf, *p;
5032 	int rc;
5033 
5034 	rc = -t4_cim_read(sc, A_UP_UP_DBG_LA_CFG, 1, &cfg);
5035 	if (rc != 0)
5036 		return (rc);
5037 
5038 	rc = sysctl_wire_old_buffer(req, 0);
5039 	if (rc != 0)
5040 		return (rc);
5041 
5042 	sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req);
5043 	if (sb == NULL)
5044 		return (ENOMEM);
5045 
5046 	buf = malloc(sc->params.cim_la_size * sizeof(uint32_t), M_CXGBE,
5047 	    M_ZERO | M_WAITOK);
5048 
5049 	rc = -t4_cim_read_la(sc, buf, NULL);
5050 	if (rc != 0)
5051 		goto done;
5052 
5053 	sbuf_printf(sb, "Status   Data      PC%s",
5054 	    cfg & F_UPDBGLACAPTPCONLY ? "" :
5055 	    "     LS0Stat  LS0Addr             LS0Data");
5056 
5057 	KASSERT((sc->params.cim_la_size & 7) == 0,
5058 	    ("%s: p will walk off the end of buf", __func__));
5059 
5060 	for (p = buf; p < &buf[sc->params.cim_la_size]; p += 8) {
5061 		if (cfg & F_UPDBGLACAPTPCONLY) {
5062 			sbuf_printf(sb, "\n  %02x   %08x %08x", p[5] & 0xff,
5063 			    p[6], p[7]);
5064 			sbuf_printf(sb, "\n  %02x   %02x%06x %02x%06x",
5065 			    (p[3] >> 8) & 0xff, p[3] & 0xff, p[4] >> 8,
5066 			    p[4] & 0xff, p[5] >> 8);
5067 			sbuf_printf(sb, "\n  %02x   %x%07x %x%07x",
5068 			    (p[0] >> 4) & 0xff, p[0] & 0xf, p[1] >> 4,
5069 			    p[1] & 0xf, p[2] >> 4);
5070 		} else {
5071 			sbuf_printf(sb,
5072 			    "\n  %02x   %x%07x %x%07x %08x %08x "
5073 			    "%08x%08x%08x%08x",
5074 			    (p[0] >> 4) & 0xff, p[0] & 0xf, p[1] >> 4,
5075 			    p[1] & 0xf, p[2] >> 4, p[2] & 0xf, p[3], p[4], p[5],
5076 			    p[6], p[7]);
5077 		}
5078 	}
5079 
5080 	rc = sbuf_finish(sb);
5081 	sbuf_delete(sb);
5082 done:
5083 	free(buf, M_CXGBE);
5084 	return (rc);
5085 }
5086 
5087 static int
5088 sysctl_cim_ma_la(SYSCTL_HANDLER_ARGS)
5089 {
5090 	struct adapter *sc = arg1;
5091 	u_int i;
5092 	struct sbuf *sb;
5093 	uint32_t *buf, *p;
5094 	int rc;
5095 
5096 	rc = sysctl_wire_old_buffer(req, 0);
5097 	if (rc != 0)
5098 		return (rc);
5099 
5100 	sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req);
5101 	if (sb == NULL)
5102 		return (ENOMEM);
5103 
5104 	buf = malloc(2 * CIM_MALA_SIZE * 5 * sizeof(uint32_t), M_CXGBE,
5105 	    M_ZERO | M_WAITOK);
5106 
5107 	t4_cim_read_ma_la(sc, buf, buf + 5 * CIM_MALA_SIZE);
5108 	p = buf;
5109 
5110 	for (i = 0; i < CIM_MALA_SIZE; i++, p += 5) {
5111 		sbuf_printf(sb, "\n%02x%08x%08x%08x%08x", p[4], p[3], p[2],
5112 		    p[1], p[0]);
5113 	}
5114 
5115 	sbuf_printf(sb, "\n\nCnt ID Tag UE       Data       RDY VLD");
5116 	for (i = 0; i < CIM_MALA_SIZE; i++, p += 5) {
5117 		sbuf_printf(sb, "\n%3u %2u  %x   %u %08x%08x  %u   %u",
5118 		    (p[2] >> 10) & 0xff, (p[2] >> 7) & 7,
5119 		    (p[2] >> 3) & 0xf, (p[2] >> 2) & 1,
5120 		    (p[1] >> 2) | ((p[2] & 3) << 30),
5121 		    (p[0] >> 2) | ((p[1] & 3) << 30), (p[0] >> 1) & 1,
5122 		    p[0] & 1);
5123 	}
5124 
5125 	rc = sbuf_finish(sb);
5126 	sbuf_delete(sb);
5127 	free(buf, M_CXGBE);
5128 	return (rc);
5129 }
5130 
5131 static int
5132 sysctl_cim_pif_la(SYSCTL_HANDLER_ARGS)
5133 {
5134 	struct adapter *sc = arg1;
5135 	u_int i;
5136 	struct sbuf *sb;
5137 	uint32_t *buf, *p;
5138 	int rc;
5139 
5140 	rc = sysctl_wire_old_buffer(req, 0);
5141 	if (rc != 0)
5142 		return (rc);
5143 
5144 	sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req);
5145 	if (sb == NULL)
5146 		return (ENOMEM);
5147 
5148 	buf = malloc(2 * CIM_PIFLA_SIZE * 6 * sizeof(uint32_t), M_CXGBE,
5149 	    M_ZERO | M_WAITOK);
5150 
5151 	t4_cim_read_pif_la(sc, buf, buf + 6 * CIM_PIFLA_SIZE, NULL, NULL);
5152 	p = buf;
5153 
5154 	sbuf_printf(sb, "Cntl ID DataBE   Addr                 Data");
5155 	for (i = 0; i < CIM_MALA_SIZE; i++, p += 6) {
5156 		sbuf_printf(sb, "\n %02x  %02x  %04x  %08x %08x%08x%08x%08x",
5157 		    (p[5] >> 22) & 0xff, (p[5] >> 16) & 0x3f, p[5] & 0xffff,
5158 		    p[4], p[3], p[2], p[1], p[0]);
5159 	}
5160 
5161 	sbuf_printf(sb, "\n\nCntl ID               Data");
5162 	for (i = 0; i < CIM_MALA_SIZE; i++, p += 6) {
5163 		sbuf_printf(sb, "\n %02x  %02x %08x%08x%08x%08x",
5164 		    (p[4] >> 6) & 0xff, p[4] & 0x3f, p[3], p[2], p[1], p[0]);
5165 	}
5166 
5167 	rc = sbuf_finish(sb);
5168 	sbuf_delete(sb);
5169 	free(buf, M_CXGBE);
5170 	return (rc);
5171 }
5172 
5173 static int
5174 sysctl_cim_qcfg(SYSCTL_HANDLER_ARGS)
5175 {
5176 	struct adapter *sc = arg1;
5177 	struct sbuf *sb;
5178 	int rc, i;
5179 	uint16_t base[CIM_NUM_IBQ + CIM_NUM_OBQ_T5];
5180 	uint16_t size[CIM_NUM_IBQ + CIM_NUM_OBQ_T5];
5181 	uint16_t thres[CIM_NUM_IBQ];
5182 	uint32_t obq_wr[2 * CIM_NUM_OBQ_T5], *wr = obq_wr;
5183 	uint32_t stat[4 * (CIM_NUM_IBQ + CIM_NUM_OBQ_T5)], *p = stat;
5184 	u_int cim_num_obq, ibq_rdaddr, obq_rdaddr, nq;
5185 
5186 	if (is_t4(sc)) {
5187 		cim_num_obq = CIM_NUM_OBQ;
5188 		ibq_rdaddr = A_UP_IBQ_0_RDADDR;
5189 		obq_rdaddr = A_UP_OBQ_0_REALADDR;
5190 	} else {
5191 		cim_num_obq = CIM_NUM_OBQ_T5;
5192 		ibq_rdaddr = A_UP_IBQ_0_SHADOW_RDADDR;
5193 		obq_rdaddr = A_UP_OBQ_0_SHADOW_REALADDR;
5194 	}
5195 	nq = CIM_NUM_IBQ + cim_num_obq;
5196 
5197 	rc = -t4_cim_read(sc, ibq_rdaddr, 4 * nq, stat);
5198 	if (rc == 0)
5199 		rc = -t4_cim_read(sc, obq_rdaddr, 2 * cim_num_obq, obq_wr);
5200 	if (rc != 0)
5201 		return (rc);
5202 
5203 	t4_read_cimq_cfg(sc, base, size, thres);
5204 
5205 	rc = sysctl_wire_old_buffer(req, 0);
5206 	if (rc != 0)
5207 		return (rc);
5208 
5209 	sb = sbuf_new_for_sysctl(NULL, NULL, PAGE_SIZE, req);
5210 	if (sb == NULL)
5211 		return (ENOMEM);
5212 
5213 	sbuf_printf(sb, "Queue  Base  Size Thres RdPtr WrPtr  SOP  EOP Avail");
5214 
5215 	for (i = 0; i < CIM_NUM_IBQ; i++, p += 4)
5216 		sbuf_printf(sb, "\n%7s %5x %5u %5u %6x  %4x %4u %4u %5u",
5217 		    qname[i], base[i], size[i], thres[i], G_IBQRDADDR(p[0]),
5218 		    G_IBQWRADDR(p[1]), G_QUESOPCNT(p[3]), G_QUEEOPCNT(p[3]),
5219 		    G_QUEREMFLITS(p[2]) * 16);
5220 	for ( ; i < nq; i++, p += 4, wr += 2)
5221 		sbuf_printf(sb, "\n%7s %5x %5u %12x  %4x %4u %4u %5u", qname[i],
5222 		    base[i], size[i], G_QUERDADDR(p[0]) & 0x3fff,
5223 		    wr[0] - base[i], G_QUESOPCNT(p[3]), G_QUEEOPCNT(p[3]),
5224 		    G_QUEREMFLITS(p[2]) * 16);
5225 
5226 	rc = sbuf_finish(sb);
5227 	sbuf_delete(sb);
5228 
5229 	return (rc);
5230 }
5231 
5232 static int
5233 sysctl_cpl_stats(SYSCTL_HANDLER_ARGS)
5234 {
5235 	struct adapter *sc = arg1;
5236 	struct sbuf *sb;
5237 	int rc;
5238 	struct tp_cpl_stats stats;
5239 
5240 	rc = sysctl_wire_old_buffer(req, 0);
5241 	if (rc != 0)
5242 		return (rc);
5243 
5244 	sb = sbuf_new_for_sysctl(NULL, NULL, 256, req);
5245 	if (sb == NULL)
5246 		return (ENOMEM);
5247 
5248 	t4_tp_get_cpl_stats(sc, &stats);
5249 
5250 	sbuf_printf(sb, "                 channel 0  channel 1  channel 2  "
5251 	    "channel 3\n");
5252 	sbuf_printf(sb, "CPL requests:   %10u %10u %10u %10u\n",
5253 		   stats.req[0], stats.req[1], stats.req[2], stats.req[3]);
5254 	sbuf_printf(sb, "CPL responses:  %10u %10u %10u %10u",
5255 		   stats.rsp[0], stats.rsp[1], stats.rsp[2], stats.rsp[3]);
5256 
5257 	rc = sbuf_finish(sb);
5258 	sbuf_delete(sb);
5259 
5260 	return (rc);
5261 }
5262 
5263 static int
5264 sysctl_ddp_stats(SYSCTL_HANDLER_ARGS)
5265 {
5266 	struct adapter *sc = arg1;
5267 	struct sbuf *sb;
5268 	int rc;
5269 	struct tp_usm_stats stats;
5270 
5271 	rc = sysctl_wire_old_buffer(req, 0);
5272 	if (rc != 0)
5273 		return(rc);
5274 
5275 	sb = sbuf_new_for_sysctl(NULL, NULL, 256, req);
5276 	if (sb == NULL)
5277 		return (ENOMEM);
5278 
5279 	t4_get_usm_stats(sc, &stats);
5280 
5281 	sbuf_printf(sb, "Frames: %u\n", stats.frames);
5282 	sbuf_printf(sb, "Octets: %ju\n", stats.octets);
5283 	sbuf_printf(sb, "Drops:  %u", stats.drops);
5284 
5285 	rc = sbuf_finish(sb);
5286 	sbuf_delete(sb);
5287 
5288 	return (rc);
5289 }
5290 
5291 const char *devlog_level_strings[] = {
5292 	[FW_DEVLOG_LEVEL_EMERG]		= "EMERG",
5293 	[FW_DEVLOG_LEVEL_CRIT]		= "CRIT",
5294 	[FW_DEVLOG_LEVEL_ERR]		= "ERR",
5295 	[FW_DEVLOG_LEVEL_NOTICE]	= "NOTICE",
5296 	[FW_DEVLOG_LEVEL_INFO]		= "INFO",
5297 	[FW_DEVLOG_LEVEL_DEBUG]		= "DEBUG"
5298 };
5299 
5300 const char *devlog_facility_strings[] = {
5301 	[FW_DEVLOG_FACILITY_CORE]	= "CORE",
5302 	[FW_DEVLOG_FACILITY_SCHED]	= "SCHED",
5303 	[FW_DEVLOG_FACILITY_TIMER]	= "TIMER",
5304 	[FW_DEVLOG_FACILITY_RES]	= "RES",
5305 	[FW_DEVLOG_FACILITY_HW]		= "HW",
5306 	[FW_DEVLOG_FACILITY_FLR]	= "FLR",
5307 	[FW_DEVLOG_FACILITY_DMAQ]	= "DMAQ",
5308 	[FW_DEVLOG_FACILITY_PHY]	= "PHY",
5309 	[FW_DEVLOG_FACILITY_MAC]	= "MAC",
5310 	[FW_DEVLOG_FACILITY_PORT]	= "PORT",
5311 	[FW_DEVLOG_FACILITY_VI]		= "VI",
5312 	[FW_DEVLOG_FACILITY_FILTER]	= "FILTER",
5313 	[FW_DEVLOG_FACILITY_ACL]	= "ACL",
5314 	[FW_DEVLOG_FACILITY_TM]		= "TM",
5315 	[FW_DEVLOG_FACILITY_QFC]	= "QFC",
5316 	[FW_DEVLOG_FACILITY_DCB]	= "DCB",
5317 	[FW_DEVLOG_FACILITY_ETH]	= "ETH",
5318 	[FW_DEVLOG_FACILITY_OFLD]	= "OFLD",
5319 	[FW_DEVLOG_FACILITY_RI]		= "RI",
5320 	[FW_DEVLOG_FACILITY_ISCSI]	= "ISCSI",
5321 	[FW_DEVLOG_FACILITY_FCOE]	= "FCOE",
5322 	[FW_DEVLOG_FACILITY_FOISCSI]	= "FOISCSI",
5323 	[FW_DEVLOG_FACILITY_FOFCOE]	= "FOFCOE"
5324 };
5325 
5326 static int
5327 sysctl_devlog(SYSCTL_HANDLER_ARGS)
5328 {
5329 	struct adapter *sc = arg1;
5330 	struct devlog_params *dparams = &sc->params.devlog;
5331 	struct fw_devlog_e *buf, *e;
5332 	int i, j, rc, nentries, first = 0, m;
5333 	struct sbuf *sb;
5334 	uint64_t ftstamp = UINT64_MAX;
5335 
5336 	if (dparams->start == 0) {
5337 		dparams->memtype = FW_MEMTYPE_EDC0;
5338 		dparams->start = 0x84000;
5339 		dparams->size = 32768;
5340 	}
5341 
5342 	nentries = dparams->size / sizeof(struct fw_devlog_e);
5343 
5344 	buf = malloc(dparams->size, M_CXGBE, M_NOWAIT);
5345 	if (buf == NULL)
5346 		return (ENOMEM);
5347 
5348 	m = fwmtype_to_hwmtype(dparams->memtype);
5349 	rc = -t4_mem_read(sc, m, dparams->start, dparams->size, (void *)buf);
5350 	if (rc != 0)
5351 		goto done;
5352 
5353 	for (i = 0; i < nentries; i++) {
5354 		e = &buf[i];
5355 
5356 		if (e->timestamp == 0)
5357 			break;	/* end */
5358 
5359 		e->timestamp = be64toh(e->timestamp);
5360 		e->seqno = be32toh(e->seqno);
5361 		for (j = 0; j < 8; j++)
5362 			e->params[j] = be32toh(e->params[j]);
5363 
5364 		if (e->timestamp < ftstamp) {
5365 			ftstamp = e->timestamp;
5366 			first = i;
5367 		}
5368 	}
5369 
5370 	if (buf[first].timestamp == 0)
5371 		goto done;	/* nothing in the log */
5372 
5373 	rc = sysctl_wire_old_buffer(req, 0);
5374 	if (rc != 0)
5375 		goto done;
5376 
5377 	sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req);
5378 	if (sb == NULL) {
5379 		rc = ENOMEM;
5380 		goto done;
5381 	}
5382 	sbuf_printf(sb, "%10s  %15s  %8s  %8s  %s\n",
5383 	    "Seq#", "Tstamp", "Level", "Facility", "Message");
5384 
5385 	i = first;
5386 	do {
5387 		e = &buf[i];
5388 		if (e->timestamp == 0)
5389 			break;	/* end */
5390 
5391 		sbuf_printf(sb, "%10d  %15ju  %8s  %8s  ",
5392 		    e->seqno, e->timestamp,
5393 		    (e->level < nitems(devlog_level_strings) ?
5394 			devlog_level_strings[e->level] : "UNKNOWN"),
5395 		    (e->facility < nitems(devlog_facility_strings) ?
5396 			devlog_facility_strings[e->facility] : "UNKNOWN"));
5397 		sbuf_printf(sb, e->fmt, e->params[0], e->params[1],
5398 		    e->params[2], e->params[3], e->params[4],
5399 		    e->params[5], e->params[6], e->params[7]);
5400 
5401 		if (++i == nentries)
5402 			i = 0;
5403 	} while (i != first);
5404 
5405 	rc = sbuf_finish(sb);
5406 	sbuf_delete(sb);
5407 done:
5408 	free(buf, M_CXGBE);
5409 	return (rc);
5410 }
5411 
5412 static int
5413 sysctl_fcoe_stats(SYSCTL_HANDLER_ARGS)
5414 {
5415 	struct adapter *sc = arg1;
5416 	struct sbuf *sb;
5417 	int rc;
5418 	struct tp_fcoe_stats stats[4];
5419 
5420 	rc = sysctl_wire_old_buffer(req, 0);
5421 	if (rc != 0)
5422 		return (rc);
5423 
5424 	sb = sbuf_new_for_sysctl(NULL, NULL, 256, req);
5425 	if (sb == NULL)
5426 		return (ENOMEM);
5427 
5428 	t4_get_fcoe_stats(sc, 0, &stats[0]);
5429 	t4_get_fcoe_stats(sc, 1, &stats[1]);
5430 	t4_get_fcoe_stats(sc, 2, &stats[2]);
5431 	t4_get_fcoe_stats(sc, 3, &stats[3]);
5432 
5433 	sbuf_printf(sb, "                   channel 0        channel 1        "
5434 	    "channel 2        channel 3\n");
5435 	sbuf_printf(sb, "octetsDDP:  %16ju %16ju %16ju %16ju\n",
5436 	    stats[0].octetsDDP, stats[1].octetsDDP, stats[2].octetsDDP,
5437 	    stats[3].octetsDDP);
5438 	sbuf_printf(sb, "framesDDP:  %16u %16u %16u %16u\n", stats[0].framesDDP,
5439 	    stats[1].framesDDP, stats[2].framesDDP, stats[3].framesDDP);
5440 	sbuf_printf(sb, "framesDrop: %16u %16u %16u %16u",
5441 	    stats[0].framesDrop, stats[1].framesDrop, stats[2].framesDrop,
5442 	    stats[3].framesDrop);
5443 
5444 	rc = sbuf_finish(sb);
5445 	sbuf_delete(sb);
5446 
5447 	return (rc);
5448 }
5449 
5450 static int
5451 sysctl_hw_sched(SYSCTL_HANDLER_ARGS)
5452 {
5453 	struct adapter *sc = arg1;
5454 	struct sbuf *sb;
5455 	int rc, i;
5456 	unsigned int map, kbps, ipg, mode;
5457 	unsigned int pace_tab[NTX_SCHED];
5458 
5459 	rc = sysctl_wire_old_buffer(req, 0);
5460 	if (rc != 0)
5461 		return (rc);
5462 
5463 	sb = sbuf_new_for_sysctl(NULL, NULL, 256, req);
5464 	if (sb == NULL)
5465 		return (ENOMEM);
5466 
5467 	map = t4_read_reg(sc, A_TP_TX_MOD_QUEUE_REQ_MAP);
5468 	mode = G_TIMERMODE(t4_read_reg(sc, A_TP_MOD_CONFIG));
5469 	t4_read_pace_tbl(sc, pace_tab);
5470 
5471 	sbuf_printf(sb, "Scheduler  Mode   Channel  Rate (Kbps)   "
5472 	    "Class IPG (0.1 ns)   Flow IPG (us)");
5473 
5474 	for (i = 0; i < NTX_SCHED; ++i, map >>= 2) {
5475 		t4_get_tx_sched(sc, i, &kbps, &ipg);
5476 		sbuf_printf(sb, "\n    %u      %-5s     %u     ", i,
5477 		    (mode & (1 << i)) ? "flow" : "class", map & 3);
5478 		if (kbps)
5479 			sbuf_printf(sb, "%9u     ", kbps);
5480 		else
5481 			sbuf_printf(sb, " disabled     ");
5482 
5483 		if (ipg)
5484 			sbuf_printf(sb, "%13u        ", ipg);
5485 		else
5486 			sbuf_printf(sb, "     disabled        ");
5487 
5488 		if (pace_tab[i])
5489 			sbuf_printf(sb, "%10u", pace_tab[i]);
5490 		else
5491 			sbuf_printf(sb, "  disabled");
5492 	}
5493 
5494 	rc = sbuf_finish(sb);
5495 	sbuf_delete(sb);
5496 
5497 	return (rc);
5498 }
5499 
5500 static int
5501 sysctl_lb_stats(SYSCTL_HANDLER_ARGS)
5502 {
5503 	struct adapter *sc = arg1;
5504 	struct sbuf *sb;
5505 	int rc, i, j;
5506 	uint64_t *p0, *p1;
5507 	struct lb_port_stats s[2];
5508 	static const char *stat_name[] = {
5509 		"OctetsOK:", "FramesOK:", "BcastFrames:", "McastFrames:",
5510 		"UcastFrames:", "ErrorFrames:", "Frames64:", "Frames65To127:",
5511 		"Frames128To255:", "Frames256To511:", "Frames512To1023:",
5512 		"Frames1024To1518:", "Frames1519ToMax:", "FramesDropped:",
5513 		"BG0FramesDropped:", "BG1FramesDropped:", "BG2FramesDropped:",
5514 		"BG3FramesDropped:", "BG0FramesTrunc:", "BG1FramesTrunc:",
5515 		"BG2FramesTrunc:", "BG3FramesTrunc:"
5516 	};
5517 
5518 	rc = sysctl_wire_old_buffer(req, 0);
5519 	if (rc != 0)
5520 		return (rc);
5521 
5522 	sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req);
5523 	if (sb == NULL)
5524 		return (ENOMEM);
5525 
5526 	memset(s, 0, sizeof(s));
5527 
5528 	for (i = 0; i < 4; i += 2) {
5529 		t4_get_lb_stats(sc, i, &s[0]);
5530 		t4_get_lb_stats(sc, i + 1, &s[1]);
5531 
5532 		p0 = &s[0].octets;
5533 		p1 = &s[1].octets;
5534 		sbuf_printf(sb, "%s                       Loopback %u"
5535 		    "           Loopback %u", i == 0 ? "" : "\n", i, i + 1);
5536 
5537 		for (j = 0; j < nitems(stat_name); j++)
5538 			sbuf_printf(sb, "\n%-17s %20ju %20ju", stat_name[j],
5539 				   *p0++, *p1++);
5540 	}
5541 
5542 	rc = sbuf_finish(sb);
5543 	sbuf_delete(sb);
5544 
5545 	return (rc);
5546 }
5547 
5548 static int
5549 sysctl_linkdnrc(SYSCTL_HANDLER_ARGS)
5550 {
5551 	int rc = 0;
5552 	struct port_info *pi = arg1;
5553 	struct sbuf *sb;
5554 	static const char *linkdnreasons[] = {
5555 		"non-specific", "remote fault", "autoneg failed", "reserved3",
5556 		"PHY overheated", "unknown", "rx los", "reserved7"
5557 	};
5558 
5559 	rc = sysctl_wire_old_buffer(req, 0);
5560 	if (rc != 0)
5561 		return(rc);
5562 	sb = sbuf_new_for_sysctl(NULL, NULL, 64, req);
5563 	if (sb == NULL)
5564 		return (ENOMEM);
5565 
5566 	if (pi->linkdnrc < 0)
5567 		sbuf_printf(sb, "n/a");
5568 	else if (pi->linkdnrc < nitems(linkdnreasons))
5569 		sbuf_printf(sb, "%s", linkdnreasons[pi->linkdnrc]);
5570 	else
5571 		sbuf_printf(sb, "%d", pi->linkdnrc);
5572 
5573 	rc = sbuf_finish(sb);
5574 	sbuf_delete(sb);
5575 
5576 	return (rc);
5577 }
5578 
5579 struct mem_desc {
5580 	unsigned int base;
5581 	unsigned int limit;
5582 	unsigned int idx;
5583 };
5584 
5585 static int
5586 mem_desc_cmp(const void *a, const void *b)
5587 {
5588 	return ((const struct mem_desc *)a)->base -
5589 	       ((const struct mem_desc *)b)->base;
5590 }
5591 
5592 static void
5593 mem_region_show(struct sbuf *sb, const char *name, unsigned int from,
5594     unsigned int to)
5595 {
5596 	unsigned int size;
5597 
5598 	size = to - from + 1;
5599 	if (size == 0)
5600 		return;
5601 
5602 	/* XXX: need humanize_number(3) in libkern for a more readable 'size' */
5603 	sbuf_printf(sb, "%-15s %#x-%#x [%u]\n", name, from, to, size);
5604 }
5605 
5606 static int
5607 sysctl_meminfo(SYSCTL_HANDLER_ARGS)
5608 {
5609 	struct adapter *sc = arg1;
5610 	struct sbuf *sb;
5611 	int rc, i, n;
5612 	uint32_t lo, hi, used, alloc;
5613 	static const char *memory[] = {"EDC0:", "EDC1:", "MC:", "MC0:", "MC1:"};
5614 	static const char *region[] = {
5615 		"DBQ contexts:", "IMSG contexts:", "FLM cache:", "TCBs:",
5616 		"Pstructs:", "Timers:", "Rx FL:", "Tx FL:", "Pstruct FL:",
5617 		"Tx payload:", "Rx payload:", "LE hash:", "iSCSI region:",
5618 		"TDDP region:", "TPT region:", "STAG region:", "RQ region:",
5619 		"RQUDP region:", "PBL region:", "TXPBL region:",
5620 		"DBVFIFO region:", "ULPRX state:", "ULPTX state:",
5621 		"On-chip queues:"
5622 	};
5623 	struct mem_desc avail[4];
5624 	struct mem_desc mem[nitems(region) + 3];	/* up to 3 holes */
5625 	struct mem_desc *md = mem;
5626 
5627 	rc = sysctl_wire_old_buffer(req, 0);
5628 	if (rc != 0)
5629 		return (rc);
5630 
5631 	sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req);
5632 	if (sb == NULL)
5633 		return (ENOMEM);
5634 
5635 	for (i = 0; i < nitems(mem); i++) {
5636 		mem[i].limit = 0;
5637 		mem[i].idx = i;
5638 	}
5639 
5640 	/* Find and sort the populated memory ranges */
5641 	i = 0;
5642 	lo = t4_read_reg(sc, A_MA_TARGET_MEM_ENABLE);
5643 	if (lo & F_EDRAM0_ENABLE) {
5644 		hi = t4_read_reg(sc, A_MA_EDRAM0_BAR);
5645 		avail[i].base = G_EDRAM0_BASE(hi) << 20;
5646 		avail[i].limit = avail[i].base + (G_EDRAM0_SIZE(hi) << 20);
5647 		avail[i].idx = 0;
5648 		i++;
5649 	}
5650 	if (lo & F_EDRAM1_ENABLE) {
5651 		hi = t4_read_reg(sc, A_MA_EDRAM1_BAR);
5652 		avail[i].base = G_EDRAM1_BASE(hi) << 20;
5653 		avail[i].limit = avail[i].base + (G_EDRAM1_SIZE(hi) << 20);
5654 		avail[i].idx = 1;
5655 		i++;
5656 	}
5657 	if (lo & F_EXT_MEM_ENABLE) {
5658 		hi = t4_read_reg(sc, A_MA_EXT_MEMORY_BAR);
5659 		avail[i].base = G_EXT_MEM_BASE(hi) << 20;
5660 		avail[i].limit = avail[i].base +
5661 		    (G_EXT_MEM_SIZE(hi) << 20);
5662 		avail[i].idx = is_t4(sc) ? 2 : 3;	/* Call it MC for T4 */
5663 		i++;
5664 	}
5665 	if (!is_t4(sc) && lo & F_EXT_MEM1_ENABLE) {
5666 		hi = t4_read_reg(sc, A_MA_EXT_MEMORY1_BAR);
5667 		avail[i].base = G_EXT_MEM1_BASE(hi) << 20;
5668 		avail[i].limit = avail[i].base +
5669 		    (G_EXT_MEM1_SIZE(hi) << 20);
5670 		avail[i].idx = 4;
5671 		i++;
5672 	}
5673 	if (!i)                                    /* no memory available */
5674 		return 0;
5675 	qsort(avail, i, sizeof(struct mem_desc), mem_desc_cmp);
5676 
5677 	(md++)->base = t4_read_reg(sc, A_SGE_DBQ_CTXT_BADDR);
5678 	(md++)->base = t4_read_reg(sc, A_SGE_IMSG_CTXT_BADDR);
5679 	(md++)->base = t4_read_reg(sc, A_SGE_FLM_CACHE_BADDR);
5680 	(md++)->base = t4_read_reg(sc, A_TP_CMM_TCB_BASE);
5681 	(md++)->base = t4_read_reg(sc, A_TP_CMM_MM_BASE);
5682 	(md++)->base = t4_read_reg(sc, A_TP_CMM_TIMER_BASE);
5683 	(md++)->base = t4_read_reg(sc, A_TP_CMM_MM_RX_FLST_BASE);
5684 	(md++)->base = t4_read_reg(sc, A_TP_CMM_MM_TX_FLST_BASE);
5685 	(md++)->base = t4_read_reg(sc, A_TP_CMM_MM_PS_FLST_BASE);
5686 
5687 	/* the next few have explicit upper bounds */
5688 	md->base = t4_read_reg(sc, A_TP_PMM_TX_BASE);
5689 	md->limit = md->base - 1 +
5690 		    t4_read_reg(sc, A_TP_PMM_TX_PAGE_SIZE) *
5691 		    G_PMTXMAXPAGE(t4_read_reg(sc, A_TP_PMM_TX_MAX_PAGE));
5692 	md++;
5693 
5694 	md->base = t4_read_reg(sc, A_TP_PMM_RX_BASE);
5695 	md->limit = md->base - 1 +
5696 		    t4_read_reg(sc, A_TP_PMM_RX_PAGE_SIZE) *
5697 		    G_PMRXMAXPAGE(t4_read_reg(sc, A_TP_PMM_RX_MAX_PAGE));
5698 	md++;
5699 
5700 	if (t4_read_reg(sc, A_LE_DB_CONFIG) & F_HASHEN) {
5701 		hi = t4_read_reg(sc, A_LE_DB_TID_HASHBASE) / 4;
5702 		md->base = t4_read_reg(sc, A_LE_DB_HASH_TID_BASE);
5703 		md->limit = (sc->tids.ntids - hi) * 16 + md->base - 1;
5704 	} else {
5705 		md->base = 0;
5706 		md->idx = nitems(region);  /* hide it */
5707 	}
5708 	md++;
5709 
5710 #define ulp_region(reg) \
5711 	md->base = t4_read_reg(sc, A_ULP_ ## reg ## _LLIMIT);\
5712 	(md++)->limit = t4_read_reg(sc, A_ULP_ ## reg ## _ULIMIT)
5713 
5714 	ulp_region(RX_ISCSI);
5715 	ulp_region(RX_TDDP);
5716 	ulp_region(TX_TPT);
5717 	ulp_region(RX_STAG);
5718 	ulp_region(RX_RQ);
5719 	ulp_region(RX_RQUDP);
5720 	ulp_region(RX_PBL);
5721 	ulp_region(TX_PBL);
5722 #undef ulp_region
5723 
5724 	md->base = 0;
5725 	md->idx = nitems(region);
5726 	if (!is_t4(sc) && t4_read_reg(sc, A_SGE_CONTROL2) & F_VFIFO_ENABLE) {
5727 		md->base = G_BASEADDR(t4_read_reg(sc, A_SGE_DBVFIFO_BADDR));
5728 		md->limit = md->base + (G_DBVFIFO_SIZE((t4_read_reg(sc,
5729 		    A_SGE_DBVFIFO_SIZE))) << 2) - 1;
5730 	}
5731 	md++;
5732 
5733 	md->base = t4_read_reg(sc, A_ULP_RX_CTX_BASE);
5734 	md->limit = md->base + sc->tids.ntids - 1;
5735 	md++;
5736 	md->base = t4_read_reg(sc, A_ULP_TX_ERR_TABLE_BASE);
5737 	md->limit = md->base + sc->tids.ntids - 1;
5738 	md++;
5739 
5740 	md->base = sc->vres.ocq.start;
5741 	if (sc->vres.ocq.size)
5742 		md->limit = md->base + sc->vres.ocq.size - 1;
5743 	else
5744 		md->idx = nitems(region);  /* hide it */
5745 	md++;
5746 
5747 	/* add any address-space holes, there can be up to 3 */
5748 	for (n = 0; n < i - 1; n++)
5749 		if (avail[n].limit < avail[n + 1].base)
5750 			(md++)->base = avail[n].limit;
5751 	if (avail[n].limit)
5752 		(md++)->base = avail[n].limit;
5753 
5754 	n = md - mem;
5755 	qsort(mem, n, sizeof(struct mem_desc), mem_desc_cmp);
5756 
5757 	for (lo = 0; lo < i; lo++)
5758 		mem_region_show(sb, memory[avail[lo].idx], avail[lo].base,
5759 				avail[lo].limit - 1);
5760 
5761 	sbuf_printf(sb, "\n");
5762 	for (i = 0; i < n; i++) {
5763 		if (mem[i].idx >= nitems(region))
5764 			continue;                        /* skip holes */
5765 		if (!mem[i].limit)
5766 			mem[i].limit = i < n - 1 ? mem[i + 1].base - 1 : ~0;
5767 		mem_region_show(sb, region[mem[i].idx], mem[i].base,
5768 				mem[i].limit);
5769 	}
5770 
5771 	sbuf_printf(sb, "\n");
5772 	lo = t4_read_reg(sc, A_CIM_SDRAM_BASE_ADDR);
5773 	hi = t4_read_reg(sc, A_CIM_SDRAM_ADDR_SIZE) + lo - 1;
5774 	mem_region_show(sb, "uP RAM:", lo, hi);
5775 
5776 	lo = t4_read_reg(sc, A_CIM_EXTMEM2_BASE_ADDR);
5777 	hi = t4_read_reg(sc, A_CIM_EXTMEM2_ADDR_SIZE) + lo - 1;
5778 	mem_region_show(sb, "uP Extmem2:", lo, hi);
5779 
5780 	lo = t4_read_reg(sc, A_TP_PMM_RX_MAX_PAGE);
5781 	sbuf_printf(sb, "\n%u Rx pages of size %uKiB for %u channels\n",
5782 		   G_PMRXMAXPAGE(lo),
5783 		   t4_read_reg(sc, A_TP_PMM_RX_PAGE_SIZE) >> 10,
5784 		   (lo & F_PMRXNUMCHN) ? 2 : 1);
5785 
5786 	lo = t4_read_reg(sc, A_TP_PMM_TX_MAX_PAGE);
5787 	hi = t4_read_reg(sc, A_TP_PMM_TX_PAGE_SIZE);
5788 	sbuf_printf(sb, "%u Tx pages of size %u%ciB for %u channels\n",
5789 		   G_PMTXMAXPAGE(lo),
5790 		   hi >= (1 << 20) ? (hi >> 20) : (hi >> 10),
5791 		   hi >= (1 << 20) ? 'M' : 'K', 1 << G_PMTXNUMCHN(lo));
5792 	sbuf_printf(sb, "%u p-structs\n",
5793 		   t4_read_reg(sc, A_TP_CMM_MM_MAX_PSTRUCT));
5794 
5795 	for (i = 0; i < 4; i++) {
5796 		lo = t4_read_reg(sc, A_MPS_RX_PG_RSV0 + i * 4);
5797 		if (is_t4(sc)) {
5798 			used = G_USED(lo);
5799 			alloc = G_ALLOC(lo);
5800 		} else {
5801 			used = G_T5_USED(lo);
5802 			alloc = G_T5_ALLOC(lo);
5803 		}
5804 		sbuf_printf(sb, "\nPort %d using %u pages out of %u allocated",
5805 			   i, used, alloc);
5806 	}
5807 	for (i = 0; i < 4; i++) {
5808 		lo = t4_read_reg(sc, A_MPS_RX_PG_RSV4 + i * 4);
5809 		if (is_t4(sc)) {
5810 			used = G_USED(lo);
5811 			alloc = G_ALLOC(lo);
5812 		} else {
5813 			used = G_T5_USED(lo);
5814 			alloc = G_T5_ALLOC(lo);
5815 		}
5816 		sbuf_printf(sb,
5817 			   "\nLoopback %d using %u pages out of %u allocated",
5818 			   i, used, alloc);
5819 	}
5820 
5821 	rc = sbuf_finish(sb);
5822 	sbuf_delete(sb);
5823 
5824 	return (rc);
5825 }
5826 
5827 static inline void
5828 tcamxy2valmask(uint64_t x, uint64_t y, uint8_t *addr, uint64_t *mask)
5829 {
5830 	*mask = x | y;
5831 	y = htobe64(y);
5832 	memcpy(addr, (char *)&y + 2, ETHER_ADDR_LEN);
5833 }
5834 
5835 static int
5836 sysctl_mps_tcam(SYSCTL_HANDLER_ARGS)
5837 {
5838 	struct adapter *sc = arg1;
5839 	struct sbuf *sb;
5840 	int rc, i, n;
5841 
5842 	rc = sysctl_wire_old_buffer(req, 0);
5843 	if (rc != 0)
5844 		return (rc);
5845 
5846 	sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req);
5847 	if (sb == NULL)
5848 		return (ENOMEM);
5849 
5850 	sbuf_printf(sb,
5851 	    "Idx  Ethernet address     Mask     Vld Ports PF"
5852 	    "  VF              Replication             P0 P1 P2 P3  ML");
5853 	n = is_t4(sc) ? NUM_MPS_CLS_SRAM_L_INSTANCES :
5854 	    NUM_MPS_T5_CLS_SRAM_L_INSTANCES;
5855 	for (i = 0; i < n; i++) {
5856 		uint64_t tcamx, tcamy, mask;
5857 		uint32_t cls_lo, cls_hi;
5858 		uint8_t addr[ETHER_ADDR_LEN];
5859 
5860 		tcamy = t4_read_reg64(sc, MPS_CLS_TCAM_Y_L(i));
5861 		tcamx = t4_read_reg64(sc, MPS_CLS_TCAM_X_L(i));
5862 		cls_lo = t4_read_reg(sc, MPS_CLS_SRAM_L(i));
5863 		cls_hi = t4_read_reg(sc, MPS_CLS_SRAM_H(i));
5864 
5865 		if (tcamx & tcamy)
5866 			continue;
5867 
5868 		tcamxy2valmask(tcamx, tcamy, addr, &mask);
5869 		sbuf_printf(sb, "\n%3u %02x:%02x:%02x:%02x:%02x:%02x %012jx"
5870 			   "  %c   %#x%4u%4d", i, addr[0], addr[1], addr[2],
5871 			   addr[3], addr[4], addr[5], (uintmax_t)mask,
5872 			   (cls_lo & F_SRAM_VLD) ? 'Y' : 'N',
5873 			   G_PORTMAP(cls_hi), G_PF(cls_lo),
5874 			   (cls_lo & F_VF_VALID) ? G_VF(cls_lo) : -1);
5875 
5876 		if (cls_lo & F_REPLICATE) {
5877 			struct fw_ldst_cmd ldst_cmd;
5878 
5879 			memset(&ldst_cmd, 0, sizeof(ldst_cmd));
5880 			ldst_cmd.op_to_addrspace =
5881 			    htobe32(V_FW_CMD_OP(FW_LDST_CMD) |
5882 				F_FW_CMD_REQUEST | F_FW_CMD_READ |
5883 				V_FW_LDST_CMD_ADDRSPACE(FW_LDST_ADDRSPC_MPS));
5884 			ldst_cmd.cycles_to_len16 = htobe32(FW_LEN16(ldst_cmd));
5885 			ldst_cmd.u.mps.fid_ctl =
5886 			    htobe16(V_FW_LDST_CMD_FID(FW_LDST_MPS_RPLC) |
5887 				V_FW_LDST_CMD_CTL(i));
5888 
5889 			rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK,
5890 			    "t4mps");
5891 			if (rc)
5892 				break;
5893 			rc = -t4_wr_mbox(sc, sc->mbox, &ldst_cmd,
5894 			    sizeof(ldst_cmd), &ldst_cmd);
5895 			end_synchronized_op(sc, 0);
5896 
5897 			if (rc != 0) {
5898 				sbuf_printf(sb,
5899 				    " ------------ error %3u ------------", rc);
5900 				rc = 0;
5901 			} else {
5902 				sbuf_printf(sb, " %08x %08x %08x %08x",
5903 				    be32toh(ldst_cmd.u.mps.rplc127_96),
5904 				    be32toh(ldst_cmd.u.mps.rplc95_64),
5905 				    be32toh(ldst_cmd.u.mps.rplc63_32),
5906 				    be32toh(ldst_cmd.u.mps.rplc31_0));
5907 			}
5908 		} else
5909 			sbuf_printf(sb, "%36s", "");
5910 
5911 		sbuf_printf(sb, "%4u%3u%3u%3u %#3x", G_SRAM_PRIO0(cls_lo),
5912 		    G_SRAM_PRIO1(cls_lo), G_SRAM_PRIO2(cls_lo),
5913 		    G_SRAM_PRIO3(cls_lo), (cls_lo >> S_MULTILISTEN0) & 0xf);
5914 	}
5915 
5916 	if (rc)
5917 		(void) sbuf_finish(sb);
5918 	else
5919 		rc = sbuf_finish(sb);
5920 	sbuf_delete(sb);
5921 
5922 	return (rc);
5923 }
5924 
5925 static int
5926 sysctl_path_mtus(SYSCTL_HANDLER_ARGS)
5927 {
5928 	struct adapter *sc = arg1;
5929 	struct sbuf *sb;
5930 	int rc;
5931 	uint16_t mtus[NMTUS];
5932 
5933 	rc = sysctl_wire_old_buffer(req, 0);
5934 	if (rc != 0)
5935 		return (rc);
5936 
5937 	sb = sbuf_new_for_sysctl(NULL, NULL, 256, req);
5938 	if (sb == NULL)
5939 		return (ENOMEM);
5940 
5941 	t4_read_mtu_tbl(sc, mtus, NULL);
5942 
5943 	sbuf_printf(sb, "%u %u %u %u %u %u %u %u %u %u %u %u %u %u %u %u",
5944 	    mtus[0], mtus[1], mtus[2], mtus[3], mtus[4], mtus[5], mtus[6],
5945 	    mtus[7], mtus[8], mtus[9], mtus[10], mtus[11], mtus[12], mtus[13],
5946 	    mtus[14], mtus[15]);
5947 
5948 	rc = sbuf_finish(sb);
5949 	sbuf_delete(sb);
5950 
5951 	return (rc);
5952 }
5953 
5954 static int
5955 sysctl_pm_stats(SYSCTL_HANDLER_ARGS)
5956 {
5957 	struct adapter *sc = arg1;
5958 	struct sbuf *sb;
5959 	int rc, i;
5960 	uint32_t cnt[PM_NSTATS];
5961 	uint64_t cyc[PM_NSTATS];
5962 	static const char *rx_stats[] = {
5963 		"Read:", "Write bypass:", "Write mem:", "Flush:"
5964 	};
5965 	static const char *tx_stats[] = {
5966 		"Read:", "Write bypass:", "Write mem:", "Bypass + mem:"
5967 	};
5968 
5969 	rc = sysctl_wire_old_buffer(req, 0);
5970 	if (rc != 0)
5971 		return (rc);
5972 
5973 	sb = sbuf_new_for_sysctl(NULL, NULL, 256, req);
5974 	if (sb == NULL)
5975 		return (ENOMEM);
5976 
5977 	t4_pmtx_get_stats(sc, cnt, cyc);
5978 	sbuf_printf(sb, "                Tx pcmds             Tx bytes");
5979 	for (i = 0; i < ARRAY_SIZE(tx_stats); i++)
5980 		sbuf_printf(sb, "\n%-13s %10u %20ju", tx_stats[i], cnt[i],
5981 		    cyc[i]);
5982 
5983 	t4_pmrx_get_stats(sc, cnt, cyc);
5984 	sbuf_printf(sb, "\n                Rx pcmds             Rx bytes");
5985 	for (i = 0; i < ARRAY_SIZE(rx_stats); i++)
5986 		sbuf_printf(sb, "\n%-13s %10u %20ju", rx_stats[i], cnt[i],
5987 		    cyc[i]);
5988 
5989 	rc = sbuf_finish(sb);
5990 	sbuf_delete(sb);
5991 
5992 	return (rc);
5993 }
5994 
5995 static int
5996 sysctl_rdma_stats(SYSCTL_HANDLER_ARGS)
5997 {
5998 	struct adapter *sc = arg1;
5999 	struct sbuf *sb;
6000 	int rc;
6001 	struct tp_rdma_stats stats;
6002 
6003 	rc = sysctl_wire_old_buffer(req, 0);
6004 	if (rc != 0)
6005 		return (rc);
6006 
6007 	sb = sbuf_new_for_sysctl(NULL, NULL, 256, req);
6008 	if (sb == NULL)
6009 		return (ENOMEM);
6010 
6011 	t4_tp_get_rdma_stats(sc, &stats);
6012 	sbuf_printf(sb, "NoRQEModDefferals: %u\n", stats.rqe_dfr_mod);
6013 	sbuf_printf(sb, "NoRQEPktDefferals: %u", stats.rqe_dfr_pkt);
6014 
6015 	rc = sbuf_finish(sb);
6016 	sbuf_delete(sb);
6017 
6018 	return (rc);
6019 }
6020 
6021 static int
6022 sysctl_tcp_stats(SYSCTL_HANDLER_ARGS)
6023 {
6024 	struct adapter *sc = arg1;
6025 	struct sbuf *sb;
6026 	int rc;
6027 	struct tp_tcp_stats v4, v6;
6028 
6029 	rc = sysctl_wire_old_buffer(req, 0);
6030 	if (rc != 0)
6031 		return (rc);
6032 
6033 	sb = sbuf_new_for_sysctl(NULL, NULL, 256, req);
6034 	if (sb == NULL)
6035 		return (ENOMEM);
6036 
6037 	t4_tp_get_tcp_stats(sc, &v4, &v6);
6038 	sbuf_printf(sb,
6039 	    "                                IP                 IPv6\n");
6040 	sbuf_printf(sb, "OutRsts:      %20u %20u\n",
6041 	    v4.tcpOutRsts, v6.tcpOutRsts);
6042 	sbuf_printf(sb, "InSegs:       %20ju %20ju\n",
6043 	    v4.tcpInSegs, v6.tcpInSegs);
6044 	sbuf_printf(sb, "OutSegs:      %20ju %20ju\n",
6045 	    v4.tcpOutSegs, v6.tcpOutSegs);
6046 	sbuf_printf(sb, "RetransSegs:  %20ju %20ju",
6047 	    v4.tcpRetransSegs, v6.tcpRetransSegs);
6048 
6049 	rc = sbuf_finish(sb);
6050 	sbuf_delete(sb);
6051 
6052 	return (rc);
6053 }
6054 
6055 static int
6056 sysctl_tids(SYSCTL_HANDLER_ARGS)
6057 {
6058 	struct adapter *sc = arg1;
6059 	struct sbuf *sb;
6060 	int rc;
6061 	struct tid_info *t = &sc->tids;
6062 
6063 	rc = sysctl_wire_old_buffer(req, 0);
6064 	if (rc != 0)
6065 		return (rc);
6066 
6067 	sb = sbuf_new_for_sysctl(NULL, NULL, 256, req);
6068 	if (sb == NULL)
6069 		return (ENOMEM);
6070 
6071 	if (t->natids) {
6072 		sbuf_printf(sb, "ATID range: 0-%u, in use: %u\n", t->natids - 1,
6073 		    t->atids_in_use);
6074 	}
6075 
6076 	if (t->ntids) {
6077 		if (t4_read_reg(sc, A_LE_DB_CONFIG) & F_HASHEN) {
6078 			uint32_t b = t4_read_reg(sc, A_LE_DB_SERVER_INDEX) / 4;
6079 
6080 			if (b) {
6081 				sbuf_printf(sb, "TID range: 0-%u, %u-%u", b - 1,
6082 				    t4_read_reg(sc, A_LE_DB_TID_HASHBASE) / 4,
6083 				    t->ntids - 1);
6084 			} else {
6085 				sbuf_printf(sb, "TID range: %u-%u",
6086 				    t4_read_reg(sc, A_LE_DB_TID_HASHBASE) / 4,
6087 				    t->ntids - 1);
6088 			}
6089 		} else
6090 			sbuf_printf(sb, "TID range: 0-%u", t->ntids - 1);
6091 		sbuf_printf(sb, ", in use: %u\n",
6092 		    atomic_load_acq_int(&t->tids_in_use));
6093 	}
6094 
6095 	if (t->nstids) {
6096 		sbuf_printf(sb, "STID range: %u-%u, in use: %u\n", t->stid_base,
6097 		    t->stid_base + t->nstids - 1, t->stids_in_use);
6098 	}
6099 
6100 	if (t->nftids) {
6101 		sbuf_printf(sb, "FTID range: %u-%u\n", t->ftid_base,
6102 		    t->ftid_base + t->nftids - 1);
6103 	}
6104 
6105 	sbuf_printf(sb, "HW TID usage: %u IP users, %u IPv6 users",
6106 	    t4_read_reg(sc, A_LE_DB_ACT_CNT_IPV4),
6107 	    t4_read_reg(sc, A_LE_DB_ACT_CNT_IPV6));
6108 
6109 	rc = sbuf_finish(sb);
6110 	sbuf_delete(sb);
6111 
6112 	return (rc);
6113 }
6114 
6115 static int
6116 sysctl_tp_err_stats(SYSCTL_HANDLER_ARGS)
6117 {
6118 	struct adapter *sc = arg1;
6119 	struct sbuf *sb;
6120 	int rc;
6121 	struct tp_err_stats stats;
6122 
6123 	rc = sysctl_wire_old_buffer(req, 0);
6124 	if (rc != 0)
6125 		return (rc);
6126 
6127 	sb = sbuf_new_for_sysctl(NULL, NULL, 256, req);
6128 	if (sb == NULL)
6129 		return (ENOMEM);
6130 
6131 	t4_tp_get_err_stats(sc, &stats);
6132 
6133 	sbuf_printf(sb, "                 channel 0  channel 1  channel 2  "
6134 		      "channel 3\n");
6135 	sbuf_printf(sb, "macInErrs:      %10u %10u %10u %10u\n",
6136 	    stats.macInErrs[0], stats.macInErrs[1], stats.macInErrs[2],
6137 	    stats.macInErrs[3]);
6138 	sbuf_printf(sb, "hdrInErrs:      %10u %10u %10u %10u\n",
6139 	    stats.hdrInErrs[0], stats.hdrInErrs[1], stats.hdrInErrs[2],
6140 	    stats.hdrInErrs[3]);
6141 	sbuf_printf(sb, "tcpInErrs:      %10u %10u %10u %10u\n",
6142 	    stats.tcpInErrs[0], stats.tcpInErrs[1], stats.tcpInErrs[2],
6143 	    stats.tcpInErrs[3]);
6144 	sbuf_printf(sb, "tcp6InErrs:     %10u %10u %10u %10u\n",
6145 	    stats.tcp6InErrs[0], stats.tcp6InErrs[1], stats.tcp6InErrs[2],
6146 	    stats.tcp6InErrs[3]);
6147 	sbuf_printf(sb, "tnlCongDrops:   %10u %10u %10u %10u\n",
6148 	    stats.tnlCongDrops[0], stats.tnlCongDrops[1], stats.tnlCongDrops[2],
6149 	    stats.tnlCongDrops[3]);
6150 	sbuf_printf(sb, "tnlTxDrops:     %10u %10u %10u %10u\n",
6151 	    stats.tnlTxDrops[0], stats.tnlTxDrops[1], stats.tnlTxDrops[2],
6152 	    stats.tnlTxDrops[3]);
6153 	sbuf_printf(sb, "ofldVlanDrops:  %10u %10u %10u %10u\n",
6154 	    stats.ofldVlanDrops[0], stats.ofldVlanDrops[1],
6155 	    stats.ofldVlanDrops[2], stats.ofldVlanDrops[3]);
6156 	sbuf_printf(sb, "ofldChanDrops:  %10u %10u %10u %10u\n\n",
6157 	    stats.ofldChanDrops[0], stats.ofldChanDrops[1],
6158 	    stats.ofldChanDrops[2], stats.ofldChanDrops[3]);
6159 	sbuf_printf(sb, "ofldNoNeigh:    %u\nofldCongDefer:  %u",
6160 	    stats.ofldNoNeigh, stats.ofldCongDefer);
6161 
6162 	rc = sbuf_finish(sb);
6163 	sbuf_delete(sb);
6164 
6165 	return (rc);
6166 }
6167 
6168 struct field_desc {
6169 	const char *name;
6170 	u_int start;
6171 	u_int width;
6172 };
6173 
6174 static void
6175 field_desc_show(struct sbuf *sb, uint64_t v, const struct field_desc *f)
6176 {
6177 	char buf[32];
6178 	int line_size = 0;
6179 
6180 	while (f->name) {
6181 		uint64_t mask = (1ULL << f->width) - 1;
6182 		int len = snprintf(buf, sizeof(buf), "%s: %ju", f->name,
6183 		    ((uintmax_t)v >> f->start) & mask);
6184 
6185 		if (line_size + len >= 79) {
6186 			line_size = 8;
6187 			sbuf_printf(sb, "\n        ");
6188 		}
6189 		sbuf_printf(sb, "%s ", buf);
6190 		line_size += len + 1;
6191 		f++;
6192 	}
6193 	sbuf_printf(sb, "\n");
6194 }
6195 
6196 static struct field_desc tp_la0[] = {
6197 	{ "RcfOpCodeOut", 60, 4 },
6198 	{ "State", 56, 4 },
6199 	{ "WcfState", 52, 4 },
6200 	{ "RcfOpcSrcOut", 50, 2 },
6201 	{ "CRxError", 49, 1 },
6202 	{ "ERxError", 48, 1 },
6203 	{ "SanityFailed", 47, 1 },
6204 	{ "SpuriousMsg", 46, 1 },
6205 	{ "FlushInputMsg", 45, 1 },
6206 	{ "FlushInputCpl", 44, 1 },
6207 	{ "RssUpBit", 43, 1 },
6208 	{ "RssFilterHit", 42, 1 },
6209 	{ "Tid", 32, 10 },
6210 	{ "InitTcb", 31, 1 },
6211 	{ "LineNumber", 24, 7 },
6212 	{ "Emsg", 23, 1 },
6213 	{ "EdataOut", 22, 1 },
6214 	{ "Cmsg", 21, 1 },
6215 	{ "CdataOut", 20, 1 },
6216 	{ "EreadPdu", 19, 1 },
6217 	{ "CreadPdu", 18, 1 },
6218 	{ "TunnelPkt", 17, 1 },
6219 	{ "RcfPeerFin", 16, 1 },
6220 	{ "RcfReasonOut", 12, 4 },
6221 	{ "TxCchannel", 10, 2 },
6222 	{ "RcfTxChannel", 8, 2 },
6223 	{ "RxEchannel", 6, 2 },
6224 	{ "RcfRxChannel", 5, 1 },
6225 	{ "RcfDataOutSrdy", 4, 1 },
6226 	{ "RxDvld", 3, 1 },
6227 	{ "RxOoDvld", 2, 1 },
6228 	{ "RxCongestion", 1, 1 },
6229 	{ "TxCongestion", 0, 1 },
6230 	{ NULL }
6231 };
6232 
6233 static struct field_desc tp_la1[] = {
6234 	{ "CplCmdIn", 56, 8 },
6235 	{ "CplCmdOut", 48, 8 },
6236 	{ "ESynOut", 47, 1 },
6237 	{ "EAckOut", 46, 1 },
6238 	{ "EFinOut", 45, 1 },
6239 	{ "ERstOut", 44, 1 },
6240 	{ "SynIn", 43, 1 },
6241 	{ "AckIn", 42, 1 },
6242 	{ "FinIn", 41, 1 },
6243 	{ "RstIn", 40, 1 },
6244 	{ "DataIn", 39, 1 },
6245 	{ "DataInVld", 38, 1 },
6246 	{ "PadIn", 37, 1 },
6247 	{ "RxBufEmpty", 36, 1 },
6248 	{ "RxDdp", 35, 1 },
6249 	{ "RxFbCongestion", 34, 1 },
6250 	{ "TxFbCongestion", 33, 1 },
6251 	{ "TxPktSumSrdy", 32, 1 },
6252 	{ "RcfUlpType", 28, 4 },
6253 	{ "Eread", 27, 1 },
6254 	{ "Ebypass", 26, 1 },
6255 	{ "Esave", 25, 1 },
6256 	{ "Static0", 24, 1 },
6257 	{ "Cread", 23, 1 },
6258 	{ "Cbypass", 22, 1 },
6259 	{ "Csave", 21, 1 },
6260 	{ "CPktOut", 20, 1 },
6261 	{ "RxPagePoolFull", 18, 2 },
6262 	{ "RxLpbkPkt", 17, 1 },
6263 	{ "TxLpbkPkt", 16, 1 },
6264 	{ "RxVfValid", 15, 1 },
6265 	{ "SynLearned", 14, 1 },
6266 	{ "SetDelEntry", 13, 1 },
6267 	{ "SetInvEntry", 12, 1 },
6268 	{ "CpcmdDvld", 11, 1 },
6269 	{ "CpcmdSave", 10, 1 },
6270 	{ "RxPstructsFull", 8, 2 },
6271 	{ "EpcmdDvld", 7, 1 },
6272 	{ "EpcmdFlush", 6, 1 },
6273 	{ "EpcmdTrimPrefix", 5, 1 },
6274 	{ "EpcmdTrimPostfix", 4, 1 },
6275 	{ "ERssIp4Pkt", 3, 1 },
6276 	{ "ERssIp6Pkt", 2, 1 },
6277 	{ "ERssTcpUdpPkt", 1, 1 },
6278 	{ "ERssFceFipPkt", 0, 1 },
6279 	{ NULL }
6280 };
6281 
6282 static struct field_desc tp_la2[] = {
6283 	{ "CplCmdIn", 56, 8 },
6284 	{ "MpsVfVld", 55, 1 },
6285 	{ "MpsPf", 52, 3 },
6286 	{ "MpsVf", 44, 8 },
6287 	{ "SynIn", 43, 1 },
6288 	{ "AckIn", 42, 1 },
6289 	{ "FinIn", 41, 1 },
6290 	{ "RstIn", 40, 1 },
6291 	{ "DataIn", 39, 1 },
6292 	{ "DataInVld", 38, 1 },
6293 	{ "PadIn", 37, 1 },
6294 	{ "RxBufEmpty", 36, 1 },
6295 	{ "RxDdp", 35, 1 },
6296 	{ "RxFbCongestion", 34, 1 },
6297 	{ "TxFbCongestion", 33, 1 },
6298 	{ "TxPktSumSrdy", 32, 1 },
6299 	{ "RcfUlpType", 28, 4 },
6300 	{ "Eread", 27, 1 },
6301 	{ "Ebypass", 26, 1 },
6302 	{ "Esave", 25, 1 },
6303 	{ "Static0", 24, 1 },
6304 	{ "Cread", 23, 1 },
6305 	{ "Cbypass", 22, 1 },
6306 	{ "Csave", 21, 1 },
6307 	{ "CPktOut", 20, 1 },
6308 	{ "RxPagePoolFull", 18, 2 },
6309 	{ "RxLpbkPkt", 17, 1 },
6310 	{ "TxLpbkPkt", 16, 1 },
6311 	{ "RxVfValid", 15, 1 },
6312 	{ "SynLearned", 14, 1 },
6313 	{ "SetDelEntry", 13, 1 },
6314 	{ "SetInvEntry", 12, 1 },
6315 	{ "CpcmdDvld", 11, 1 },
6316 	{ "CpcmdSave", 10, 1 },
6317 	{ "RxPstructsFull", 8, 2 },
6318 	{ "EpcmdDvld", 7, 1 },
6319 	{ "EpcmdFlush", 6, 1 },
6320 	{ "EpcmdTrimPrefix", 5, 1 },
6321 	{ "EpcmdTrimPostfix", 4, 1 },
6322 	{ "ERssIp4Pkt", 3, 1 },
6323 	{ "ERssIp6Pkt", 2, 1 },
6324 	{ "ERssTcpUdpPkt", 1, 1 },
6325 	{ "ERssFceFipPkt", 0, 1 },
6326 	{ NULL }
6327 };
6328 
6329 static void
6330 tp_la_show(struct sbuf *sb, uint64_t *p, int idx)
6331 {
6332 
6333 	field_desc_show(sb, *p, tp_la0);
6334 }
6335 
6336 static void
6337 tp_la_show2(struct sbuf *sb, uint64_t *p, int idx)
6338 {
6339 
6340 	if (idx)
6341 		sbuf_printf(sb, "\n");
6342 	field_desc_show(sb, p[0], tp_la0);
6343 	if (idx < (TPLA_SIZE / 2 - 1) || p[1] != ~0ULL)
6344 		field_desc_show(sb, p[1], tp_la0);
6345 }
6346 
6347 static void
6348 tp_la_show3(struct sbuf *sb, uint64_t *p, int idx)
6349 {
6350 
6351 	if (idx)
6352 		sbuf_printf(sb, "\n");
6353 	field_desc_show(sb, p[0], tp_la0);
6354 	if (idx < (TPLA_SIZE / 2 - 1) || p[1] != ~0ULL)
6355 		field_desc_show(sb, p[1], (p[0] & (1 << 17)) ? tp_la2 : tp_la1);
6356 }
6357 
6358 static int
6359 sysctl_tp_la(SYSCTL_HANDLER_ARGS)
6360 {
6361 	struct adapter *sc = arg1;
6362 	struct sbuf *sb;
6363 	uint64_t *buf, *p;
6364 	int rc;
6365 	u_int i, inc;
6366 	void (*show_func)(struct sbuf *, uint64_t *, int);
6367 
6368 	rc = sysctl_wire_old_buffer(req, 0);
6369 	if (rc != 0)
6370 		return (rc);
6371 
6372 	sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req);
6373 	if (sb == NULL)
6374 		return (ENOMEM);
6375 
6376 	buf = malloc(TPLA_SIZE * sizeof(uint64_t), M_CXGBE, M_ZERO | M_WAITOK);
6377 
6378 	t4_tp_read_la(sc, buf, NULL);
6379 	p = buf;
6380 
6381 	switch (G_DBGLAMODE(t4_read_reg(sc, A_TP_DBG_LA_CONFIG))) {
6382 	case 2:
6383 		inc = 2;
6384 		show_func = tp_la_show2;
6385 		break;
6386 	case 3:
6387 		inc = 2;
6388 		show_func = tp_la_show3;
6389 		break;
6390 	default:
6391 		inc = 1;
6392 		show_func = tp_la_show;
6393 	}
6394 
6395 	for (i = 0; i < TPLA_SIZE / inc; i++, p += inc)
6396 		(*show_func)(sb, p, i);
6397 
6398 	rc = sbuf_finish(sb);
6399 	sbuf_delete(sb);
6400 	free(buf, M_CXGBE);
6401 	return (rc);
6402 }
6403 
6404 static int
6405 sysctl_tx_rate(SYSCTL_HANDLER_ARGS)
6406 {
6407 	struct adapter *sc = arg1;
6408 	struct sbuf *sb;
6409 	int rc;
6410 	u64 nrate[NCHAN], orate[NCHAN];
6411 
6412 	rc = sysctl_wire_old_buffer(req, 0);
6413 	if (rc != 0)
6414 		return (rc);
6415 
6416 	sb = sbuf_new_for_sysctl(NULL, NULL, 256, req);
6417 	if (sb == NULL)
6418 		return (ENOMEM);
6419 
6420 	t4_get_chan_txrate(sc, nrate, orate);
6421 	sbuf_printf(sb, "              channel 0   channel 1   channel 2   "
6422 		 "channel 3\n");
6423 	sbuf_printf(sb, "NIC B/s:     %10ju  %10ju  %10ju  %10ju\n",
6424 	    nrate[0], nrate[1], nrate[2], nrate[3]);
6425 	sbuf_printf(sb, "Offload B/s: %10ju  %10ju  %10ju  %10ju",
6426 	    orate[0], orate[1], orate[2], orate[3]);
6427 
6428 	rc = sbuf_finish(sb);
6429 	sbuf_delete(sb);
6430 
6431 	return (rc);
6432 }
6433 
6434 static int
6435 sysctl_ulprx_la(SYSCTL_HANDLER_ARGS)
6436 {
6437 	struct adapter *sc = arg1;
6438 	struct sbuf *sb;
6439 	uint32_t *buf, *p;
6440 	int rc, i;
6441 
6442 	rc = sysctl_wire_old_buffer(req, 0);
6443 	if (rc != 0)
6444 		return (rc);
6445 
6446 	sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req);
6447 	if (sb == NULL)
6448 		return (ENOMEM);
6449 
6450 	buf = malloc(ULPRX_LA_SIZE * 8 * sizeof(uint32_t), M_CXGBE,
6451 	    M_ZERO | M_WAITOK);
6452 
6453 	t4_ulprx_read_la(sc, buf);
6454 	p = buf;
6455 
6456 	sbuf_printf(sb, "      Pcmd        Type   Message"
6457 	    "                Data");
6458 	for (i = 0; i < ULPRX_LA_SIZE; i++, p += 8) {
6459 		sbuf_printf(sb, "\n%08x%08x  %4x  %08x  %08x%08x%08x%08x",
6460 		    p[1], p[0], p[2], p[3], p[7], p[6], p[5], p[4]);
6461 	}
6462 
6463 	rc = sbuf_finish(sb);
6464 	sbuf_delete(sb);
6465 	free(buf, M_CXGBE);
6466 	return (rc);
6467 }
6468 
6469 static int
6470 sysctl_wcwr_stats(SYSCTL_HANDLER_ARGS)
6471 {
6472 	struct adapter *sc = arg1;
6473 	struct sbuf *sb;
6474 	int rc, v;
6475 
6476 	rc = sysctl_wire_old_buffer(req, 0);
6477 	if (rc != 0)
6478 		return (rc);
6479 
6480 	sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req);
6481 	if (sb == NULL)
6482 		return (ENOMEM);
6483 
6484 	v = t4_read_reg(sc, A_SGE_STAT_CFG);
6485 	if (G_STATSOURCE_T5(v) == 7) {
6486 		if (G_STATMODE(v) == 0) {
6487 			sbuf_printf(sb, "total %d, incomplete %d",
6488 			    t4_read_reg(sc, A_SGE_STAT_TOTAL),
6489 			    t4_read_reg(sc, A_SGE_STAT_MATCH));
6490 		} else if (G_STATMODE(v) == 1) {
6491 			sbuf_printf(sb, "total %d, data overflow %d",
6492 			    t4_read_reg(sc, A_SGE_STAT_TOTAL),
6493 			    t4_read_reg(sc, A_SGE_STAT_MATCH));
6494 		}
6495 	}
6496 	rc = sbuf_finish(sb);
6497 	sbuf_delete(sb);
6498 
6499 	return (rc);
6500 }
6501 #endif
6502 
6503 static inline void
6504 txq_start(struct ifnet *ifp, struct sge_txq *txq)
6505 {
6506 	struct buf_ring *br;
6507 	struct mbuf *m;
6508 
6509 	TXQ_LOCK_ASSERT_OWNED(txq);
6510 
6511 	br = txq->br;
6512 	m = txq->m ? txq->m : drbr_dequeue(ifp, br);
6513 	if (m)
6514 		t4_eth_tx(ifp, txq, m);
6515 }
6516 
6517 void
6518 t4_tx_callout(void *arg)
6519 {
6520 	struct sge_eq *eq = arg;
6521 	struct adapter *sc;
6522 
6523 	if (EQ_TRYLOCK(eq) == 0)
6524 		goto reschedule;
6525 
6526 	if (eq->flags & EQ_STALLED && !can_resume_tx(eq)) {
6527 		EQ_UNLOCK(eq);
6528 reschedule:
6529 		if (__predict_true(!(eq->flags && EQ_DOOMED)))
6530 			callout_schedule(&eq->tx_callout, 1);
6531 		return;
6532 	}
6533 
6534 	EQ_LOCK_ASSERT_OWNED(eq);
6535 
6536 	if (__predict_true((eq->flags & EQ_DOOMED) == 0)) {
6537 
6538 		if ((eq->flags & EQ_TYPEMASK) == EQ_ETH) {
6539 			struct sge_txq *txq = arg;
6540 			struct port_info *pi = txq->ifp->if_softc;
6541 
6542 			sc = pi->adapter;
6543 		} else {
6544 			struct sge_wrq *wrq = arg;
6545 
6546 			sc = wrq->adapter;
6547 		}
6548 
6549 		taskqueue_enqueue(sc->tq[eq->tx_chan], &eq->tx_task);
6550 	}
6551 
6552 	EQ_UNLOCK(eq);
6553 }
6554 
6555 void
6556 t4_tx_task(void *arg, int count)
6557 {
6558 	struct sge_eq *eq = arg;
6559 
6560 	EQ_LOCK(eq);
6561 	if ((eq->flags & EQ_TYPEMASK) == EQ_ETH) {
6562 		struct sge_txq *txq = arg;
6563 		txq_start(txq->ifp, txq);
6564 	} else {
6565 		struct sge_wrq *wrq = arg;
6566 		t4_wrq_tx_locked(wrq->adapter, wrq, NULL);
6567 	}
6568 	EQ_UNLOCK(eq);
6569 }
6570 
6571 static uint32_t
6572 fconf_to_mode(uint32_t fconf)
6573 {
6574 	uint32_t mode;
6575 
6576 	mode = T4_FILTER_IPv4 | T4_FILTER_IPv6 | T4_FILTER_IP_SADDR |
6577 	    T4_FILTER_IP_DADDR | T4_FILTER_IP_SPORT | T4_FILTER_IP_DPORT;
6578 
6579 	if (fconf & F_FRAGMENTATION)
6580 		mode |= T4_FILTER_IP_FRAGMENT;
6581 
6582 	if (fconf & F_MPSHITTYPE)
6583 		mode |= T4_FILTER_MPS_HIT_TYPE;
6584 
6585 	if (fconf & F_MACMATCH)
6586 		mode |= T4_FILTER_MAC_IDX;
6587 
6588 	if (fconf & F_ETHERTYPE)
6589 		mode |= T4_FILTER_ETH_TYPE;
6590 
6591 	if (fconf & F_PROTOCOL)
6592 		mode |= T4_FILTER_IP_PROTO;
6593 
6594 	if (fconf & F_TOS)
6595 		mode |= T4_FILTER_IP_TOS;
6596 
6597 	if (fconf & F_VLAN)
6598 		mode |= T4_FILTER_VLAN;
6599 
6600 	if (fconf & F_VNIC_ID)
6601 		mode |= T4_FILTER_VNIC;
6602 
6603 	if (fconf & F_PORT)
6604 		mode |= T4_FILTER_PORT;
6605 
6606 	if (fconf & F_FCOE)
6607 		mode |= T4_FILTER_FCoE;
6608 
6609 	return (mode);
6610 }
6611 
6612 static uint32_t
6613 mode_to_fconf(uint32_t mode)
6614 {
6615 	uint32_t fconf = 0;
6616 
6617 	if (mode & T4_FILTER_IP_FRAGMENT)
6618 		fconf |= F_FRAGMENTATION;
6619 
6620 	if (mode & T4_FILTER_MPS_HIT_TYPE)
6621 		fconf |= F_MPSHITTYPE;
6622 
6623 	if (mode & T4_FILTER_MAC_IDX)
6624 		fconf |= F_MACMATCH;
6625 
6626 	if (mode & T4_FILTER_ETH_TYPE)
6627 		fconf |= F_ETHERTYPE;
6628 
6629 	if (mode & T4_FILTER_IP_PROTO)
6630 		fconf |= F_PROTOCOL;
6631 
6632 	if (mode & T4_FILTER_IP_TOS)
6633 		fconf |= F_TOS;
6634 
6635 	if (mode & T4_FILTER_VLAN)
6636 		fconf |= F_VLAN;
6637 
6638 	if (mode & T4_FILTER_VNIC)
6639 		fconf |= F_VNIC_ID;
6640 
6641 	if (mode & T4_FILTER_PORT)
6642 		fconf |= F_PORT;
6643 
6644 	if (mode & T4_FILTER_FCoE)
6645 		fconf |= F_FCOE;
6646 
6647 	return (fconf);
6648 }
6649 
6650 static uint32_t
6651 fspec_to_fconf(struct t4_filter_specification *fs)
6652 {
6653 	uint32_t fconf = 0;
6654 
6655 	if (fs->val.frag || fs->mask.frag)
6656 		fconf |= F_FRAGMENTATION;
6657 
6658 	if (fs->val.matchtype || fs->mask.matchtype)
6659 		fconf |= F_MPSHITTYPE;
6660 
6661 	if (fs->val.macidx || fs->mask.macidx)
6662 		fconf |= F_MACMATCH;
6663 
6664 	if (fs->val.ethtype || fs->mask.ethtype)
6665 		fconf |= F_ETHERTYPE;
6666 
6667 	if (fs->val.proto || fs->mask.proto)
6668 		fconf |= F_PROTOCOL;
6669 
6670 	if (fs->val.tos || fs->mask.tos)
6671 		fconf |= F_TOS;
6672 
6673 	if (fs->val.vlan_vld || fs->mask.vlan_vld)
6674 		fconf |= F_VLAN;
6675 
6676 	if (fs->val.vnic_vld || fs->mask.vnic_vld)
6677 		fconf |= F_VNIC_ID;
6678 
6679 	if (fs->val.iport || fs->mask.iport)
6680 		fconf |= F_PORT;
6681 
6682 	if (fs->val.fcoe || fs->mask.fcoe)
6683 		fconf |= F_FCOE;
6684 
6685 	return (fconf);
6686 }
6687 
6688 static int
6689 get_filter_mode(struct adapter *sc, uint32_t *mode)
6690 {
6691 	int rc;
6692 	uint32_t fconf;
6693 
6694 	rc = begin_synchronized_op(sc, NULL, HOLD_LOCK | SLEEP_OK | INTR_OK,
6695 	    "t4getfm");
6696 	if (rc)
6697 		return (rc);
6698 
6699 	t4_read_indirect(sc, A_TP_PIO_ADDR, A_TP_PIO_DATA, &fconf, 1,
6700 	    A_TP_VLAN_PRI_MAP);
6701 
6702 	if (sc->params.tp.vlan_pri_map != fconf) {
6703 		log(LOG_WARNING, "%s: cached filter mode out of sync %x %x.\n",
6704 		    device_get_nameunit(sc->dev), sc->params.tp.vlan_pri_map,
6705 		    fconf);
6706 		sc->params.tp.vlan_pri_map = fconf;
6707 	}
6708 
6709 	*mode = fconf_to_mode(sc->params.tp.vlan_pri_map);
6710 
6711 	end_synchronized_op(sc, LOCK_HELD);
6712 	return (0);
6713 }
6714 
6715 static int
6716 set_filter_mode(struct adapter *sc, uint32_t mode)
6717 {
6718 	uint32_t fconf;
6719 	int rc;
6720 
6721 	fconf = mode_to_fconf(mode);
6722 
6723 	rc = begin_synchronized_op(sc, NULL, HOLD_LOCK | SLEEP_OK | INTR_OK,
6724 	    "t4setfm");
6725 	if (rc)
6726 		return (rc);
6727 
6728 	if (sc->tids.ftids_in_use > 0) {
6729 		rc = EBUSY;
6730 		goto done;
6731 	}
6732 
6733 #ifdef TCP_OFFLOAD
6734 	if (sc->offload_map) {
6735 		rc = EBUSY;
6736 		goto done;
6737 	}
6738 #endif
6739 
6740 #ifdef notyet
6741 	rc = -t4_set_filter_mode(sc, fconf);
6742 	if (rc == 0)
6743 		sc->filter_mode = fconf;
6744 #else
6745 	rc = ENOTSUP;
6746 #endif
6747 
6748 done:
6749 	end_synchronized_op(sc, LOCK_HELD);
6750 	return (rc);
6751 }
6752 
6753 static inline uint64_t
6754 get_filter_hits(struct adapter *sc, uint32_t fid)
6755 {
6756 	uint32_t mw_base, off, tcb_base = t4_read_reg(sc, A_TP_CMM_TCB_BASE);
6757 	uint64_t hits;
6758 
6759 	memwin_info(sc, 0, &mw_base, NULL);
6760 	off = position_memwin(sc, 0,
6761 	    tcb_base + (fid + sc->tids.ftid_base) * TCB_SIZE);
6762 	if (is_t4(sc)) {
6763 		hits = t4_read_reg64(sc, mw_base + off + 16);
6764 		hits = be64toh(hits);
6765 	} else {
6766 		hits = t4_read_reg(sc, mw_base + off + 24);
6767 		hits = be32toh(hits);
6768 	}
6769 
6770 	return (hits);
6771 }
6772 
6773 static int
6774 get_filter(struct adapter *sc, struct t4_filter *t)
6775 {
6776 	int i, rc, nfilters = sc->tids.nftids;
6777 	struct filter_entry *f;
6778 
6779 	rc = begin_synchronized_op(sc, NULL, HOLD_LOCK | SLEEP_OK | INTR_OK,
6780 	    "t4getf");
6781 	if (rc)
6782 		return (rc);
6783 
6784 	if (sc->tids.ftids_in_use == 0 || sc->tids.ftid_tab == NULL ||
6785 	    t->idx >= nfilters) {
6786 		t->idx = 0xffffffff;
6787 		goto done;
6788 	}
6789 
6790 	f = &sc->tids.ftid_tab[t->idx];
6791 	for (i = t->idx; i < nfilters; i++, f++) {
6792 		if (f->valid) {
6793 			t->idx = i;
6794 			t->l2tidx = f->l2t ? f->l2t->idx : 0;
6795 			t->smtidx = f->smtidx;
6796 			if (f->fs.hitcnts)
6797 				t->hits = get_filter_hits(sc, t->idx);
6798 			else
6799 				t->hits = UINT64_MAX;
6800 			t->fs = f->fs;
6801 
6802 			goto done;
6803 		}
6804 	}
6805 
6806 	t->idx = 0xffffffff;
6807 done:
6808 	end_synchronized_op(sc, LOCK_HELD);
6809 	return (0);
6810 }
6811 
6812 static int
6813 set_filter(struct adapter *sc, struct t4_filter *t)
6814 {
6815 	unsigned int nfilters, nports;
6816 	struct filter_entry *f;
6817 	int i, rc;
6818 
6819 	rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4setf");
6820 	if (rc)
6821 		return (rc);
6822 
6823 	nfilters = sc->tids.nftids;
6824 	nports = sc->params.nports;
6825 
6826 	if (nfilters == 0) {
6827 		rc = ENOTSUP;
6828 		goto done;
6829 	}
6830 
6831 	if (!(sc->flags & FULL_INIT_DONE)) {
6832 		rc = EAGAIN;
6833 		goto done;
6834 	}
6835 
6836 	if (t->idx >= nfilters) {
6837 		rc = EINVAL;
6838 		goto done;
6839 	}
6840 
6841 	/* Validate against the global filter mode */
6842 	if ((sc->params.tp.vlan_pri_map | fspec_to_fconf(&t->fs)) !=
6843 	    sc->params.tp.vlan_pri_map) {
6844 		rc = E2BIG;
6845 		goto done;
6846 	}
6847 
6848 	if (t->fs.action == FILTER_SWITCH && t->fs.eport >= nports) {
6849 		rc = EINVAL;
6850 		goto done;
6851 	}
6852 
6853 	if (t->fs.val.iport >= nports) {
6854 		rc = EINVAL;
6855 		goto done;
6856 	}
6857 
6858 	/* Can't specify an iq if not steering to it */
6859 	if (!t->fs.dirsteer && t->fs.iq) {
6860 		rc = EINVAL;
6861 		goto done;
6862 	}
6863 
6864 	/* IPv6 filter idx must be 4 aligned */
6865 	if (t->fs.type == 1 &&
6866 	    ((t->idx & 0x3) || t->idx + 4 >= nfilters)) {
6867 		rc = EINVAL;
6868 		goto done;
6869 	}
6870 
6871 	if (sc->tids.ftid_tab == NULL) {
6872 		KASSERT(sc->tids.ftids_in_use == 0,
6873 		    ("%s: no memory allocated but filters_in_use > 0",
6874 		    __func__));
6875 
6876 		sc->tids.ftid_tab = malloc(sizeof (struct filter_entry) *
6877 		    nfilters, M_CXGBE, M_NOWAIT | M_ZERO);
6878 		if (sc->tids.ftid_tab == NULL) {
6879 			rc = ENOMEM;
6880 			goto done;
6881 		}
6882 		mtx_init(&sc->tids.ftid_lock, "T4 filters", 0, MTX_DEF);
6883 	}
6884 
6885 	for (i = 0; i < 4; i++) {
6886 		f = &sc->tids.ftid_tab[t->idx + i];
6887 
6888 		if (f->pending || f->valid) {
6889 			rc = EBUSY;
6890 			goto done;
6891 		}
6892 		if (f->locked) {
6893 			rc = EPERM;
6894 			goto done;
6895 		}
6896 
6897 		if (t->fs.type == 0)
6898 			break;
6899 	}
6900 
6901 	f = &sc->tids.ftid_tab[t->idx];
6902 	f->fs = t->fs;
6903 
6904 	rc = set_filter_wr(sc, t->idx);
6905 done:
6906 	end_synchronized_op(sc, 0);
6907 
6908 	if (rc == 0) {
6909 		mtx_lock(&sc->tids.ftid_lock);
6910 		for (;;) {
6911 			if (f->pending == 0) {
6912 				rc = f->valid ? 0 : EIO;
6913 				break;
6914 			}
6915 
6916 			if (mtx_sleep(&sc->tids.ftid_tab, &sc->tids.ftid_lock,
6917 			    PCATCH, "t4setfw", 0)) {
6918 				rc = EINPROGRESS;
6919 				break;
6920 			}
6921 		}
6922 		mtx_unlock(&sc->tids.ftid_lock);
6923 	}
6924 	return (rc);
6925 }
6926 
6927 static int
6928 del_filter(struct adapter *sc, struct t4_filter *t)
6929 {
6930 	unsigned int nfilters;
6931 	struct filter_entry *f;
6932 	int rc;
6933 
6934 	rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4delf");
6935 	if (rc)
6936 		return (rc);
6937 
6938 	nfilters = sc->tids.nftids;
6939 
6940 	if (nfilters == 0) {
6941 		rc = ENOTSUP;
6942 		goto done;
6943 	}
6944 
6945 	if (sc->tids.ftid_tab == NULL || sc->tids.ftids_in_use == 0 ||
6946 	    t->idx >= nfilters) {
6947 		rc = EINVAL;
6948 		goto done;
6949 	}
6950 
6951 	if (!(sc->flags & FULL_INIT_DONE)) {
6952 		rc = EAGAIN;
6953 		goto done;
6954 	}
6955 
6956 	f = &sc->tids.ftid_tab[t->idx];
6957 
6958 	if (f->pending) {
6959 		rc = EBUSY;
6960 		goto done;
6961 	}
6962 	if (f->locked) {
6963 		rc = EPERM;
6964 		goto done;
6965 	}
6966 
6967 	if (f->valid) {
6968 		t->fs = f->fs;	/* extra info for the caller */
6969 		rc = del_filter_wr(sc, t->idx);
6970 	}
6971 
6972 done:
6973 	end_synchronized_op(sc, 0);
6974 
6975 	if (rc == 0) {
6976 		mtx_lock(&sc->tids.ftid_lock);
6977 		for (;;) {
6978 			if (f->pending == 0) {
6979 				rc = f->valid ? EIO : 0;
6980 				break;
6981 			}
6982 
6983 			if (mtx_sleep(&sc->tids.ftid_tab, &sc->tids.ftid_lock,
6984 			    PCATCH, "t4delfw", 0)) {
6985 				rc = EINPROGRESS;
6986 				break;
6987 			}
6988 		}
6989 		mtx_unlock(&sc->tids.ftid_lock);
6990 	}
6991 
6992 	return (rc);
6993 }
6994 
6995 static void
6996 clear_filter(struct filter_entry *f)
6997 {
6998 	if (f->l2t)
6999 		t4_l2t_release(f->l2t);
7000 
7001 	bzero(f, sizeof (*f));
7002 }
7003 
7004 static int
7005 set_filter_wr(struct adapter *sc, int fidx)
7006 {
7007 	struct filter_entry *f = &sc->tids.ftid_tab[fidx];
7008 	struct wrqe *wr;
7009 	struct fw_filter_wr *fwr;
7010 	unsigned int ftid;
7011 
7012 	ASSERT_SYNCHRONIZED_OP(sc);
7013 
7014 	if (f->fs.newdmac || f->fs.newvlan) {
7015 		/* This filter needs an L2T entry; allocate one. */
7016 		f->l2t = t4_l2t_alloc_switching(sc->l2t);
7017 		if (f->l2t == NULL)
7018 			return (EAGAIN);
7019 		if (t4_l2t_set_switching(sc, f->l2t, f->fs.vlan, f->fs.eport,
7020 		    f->fs.dmac)) {
7021 			t4_l2t_release(f->l2t);
7022 			f->l2t = NULL;
7023 			return (ENOMEM);
7024 		}
7025 	}
7026 
7027 	ftid = sc->tids.ftid_base + fidx;
7028 
7029 	wr = alloc_wrqe(sizeof(*fwr), &sc->sge.mgmtq);
7030 	if (wr == NULL)
7031 		return (ENOMEM);
7032 
7033 	fwr = wrtod(wr);
7034 	bzero(fwr, sizeof (*fwr));
7035 
7036 	fwr->op_pkd = htobe32(V_FW_WR_OP(FW_FILTER_WR));
7037 	fwr->len16_pkd = htobe32(FW_LEN16(*fwr));
7038 	fwr->tid_to_iq =
7039 	    htobe32(V_FW_FILTER_WR_TID(ftid) |
7040 		V_FW_FILTER_WR_RQTYPE(f->fs.type) |
7041 		V_FW_FILTER_WR_NOREPLY(0) |
7042 		V_FW_FILTER_WR_IQ(f->fs.iq));
7043 	fwr->del_filter_to_l2tix =
7044 	    htobe32(V_FW_FILTER_WR_RPTTID(f->fs.rpttid) |
7045 		V_FW_FILTER_WR_DROP(f->fs.action == FILTER_DROP) |
7046 		V_FW_FILTER_WR_DIRSTEER(f->fs.dirsteer) |
7047 		V_FW_FILTER_WR_MASKHASH(f->fs.maskhash) |
7048 		V_FW_FILTER_WR_DIRSTEERHASH(f->fs.dirsteerhash) |
7049 		V_FW_FILTER_WR_LPBK(f->fs.action == FILTER_SWITCH) |
7050 		V_FW_FILTER_WR_DMAC(f->fs.newdmac) |
7051 		V_FW_FILTER_WR_SMAC(f->fs.newsmac) |
7052 		V_FW_FILTER_WR_INSVLAN(f->fs.newvlan == VLAN_INSERT ||
7053 		    f->fs.newvlan == VLAN_REWRITE) |
7054 		V_FW_FILTER_WR_RMVLAN(f->fs.newvlan == VLAN_REMOVE ||
7055 		    f->fs.newvlan == VLAN_REWRITE) |
7056 		V_FW_FILTER_WR_HITCNTS(f->fs.hitcnts) |
7057 		V_FW_FILTER_WR_TXCHAN(f->fs.eport) |
7058 		V_FW_FILTER_WR_PRIO(f->fs.prio) |
7059 		V_FW_FILTER_WR_L2TIX(f->l2t ? f->l2t->idx : 0));
7060 	fwr->ethtype = htobe16(f->fs.val.ethtype);
7061 	fwr->ethtypem = htobe16(f->fs.mask.ethtype);
7062 	fwr->frag_to_ovlan_vldm =
7063 	    (V_FW_FILTER_WR_FRAG(f->fs.val.frag) |
7064 		V_FW_FILTER_WR_FRAGM(f->fs.mask.frag) |
7065 		V_FW_FILTER_WR_IVLAN_VLD(f->fs.val.vlan_vld) |
7066 		V_FW_FILTER_WR_OVLAN_VLD(f->fs.val.vnic_vld) |
7067 		V_FW_FILTER_WR_IVLAN_VLDM(f->fs.mask.vlan_vld) |
7068 		V_FW_FILTER_WR_OVLAN_VLDM(f->fs.mask.vnic_vld));
7069 	fwr->smac_sel = 0;
7070 	fwr->rx_chan_rx_rpl_iq = htobe16(V_FW_FILTER_WR_RX_CHAN(0) |
7071 	    V_FW_FILTER_WR_RX_RPL_IQ(sc->sge.fwq.abs_id));
7072 	fwr->maci_to_matchtypem =
7073 	    htobe32(V_FW_FILTER_WR_MACI(f->fs.val.macidx) |
7074 		V_FW_FILTER_WR_MACIM(f->fs.mask.macidx) |
7075 		V_FW_FILTER_WR_FCOE(f->fs.val.fcoe) |
7076 		V_FW_FILTER_WR_FCOEM(f->fs.mask.fcoe) |
7077 		V_FW_FILTER_WR_PORT(f->fs.val.iport) |
7078 		V_FW_FILTER_WR_PORTM(f->fs.mask.iport) |
7079 		V_FW_FILTER_WR_MATCHTYPE(f->fs.val.matchtype) |
7080 		V_FW_FILTER_WR_MATCHTYPEM(f->fs.mask.matchtype));
7081 	fwr->ptcl = f->fs.val.proto;
7082 	fwr->ptclm = f->fs.mask.proto;
7083 	fwr->ttyp = f->fs.val.tos;
7084 	fwr->ttypm = f->fs.mask.tos;
7085 	fwr->ivlan = htobe16(f->fs.val.vlan);
7086 	fwr->ivlanm = htobe16(f->fs.mask.vlan);
7087 	fwr->ovlan = htobe16(f->fs.val.vnic);
7088 	fwr->ovlanm = htobe16(f->fs.mask.vnic);
7089 	bcopy(f->fs.val.dip, fwr->lip, sizeof (fwr->lip));
7090 	bcopy(f->fs.mask.dip, fwr->lipm, sizeof (fwr->lipm));
7091 	bcopy(f->fs.val.sip, fwr->fip, sizeof (fwr->fip));
7092 	bcopy(f->fs.mask.sip, fwr->fipm, sizeof (fwr->fipm));
7093 	fwr->lp = htobe16(f->fs.val.dport);
7094 	fwr->lpm = htobe16(f->fs.mask.dport);
7095 	fwr->fp = htobe16(f->fs.val.sport);
7096 	fwr->fpm = htobe16(f->fs.mask.sport);
7097 	if (f->fs.newsmac)
7098 		bcopy(f->fs.smac, fwr->sma, sizeof (fwr->sma));
7099 
7100 	f->pending = 1;
7101 	sc->tids.ftids_in_use++;
7102 
7103 	t4_wrq_tx(sc, wr);
7104 	return (0);
7105 }
7106 
7107 static int
7108 del_filter_wr(struct adapter *sc, int fidx)
7109 {
7110 	struct filter_entry *f = &sc->tids.ftid_tab[fidx];
7111 	struct wrqe *wr;
7112 	struct fw_filter_wr *fwr;
7113 	unsigned int ftid;
7114 
7115 	ftid = sc->tids.ftid_base + fidx;
7116 
7117 	wr = alloc_wrqe(sizeof(*fwr), &sc->sge.mgmtq);
7118 	if (wr == NULL)
7119 		return (ENOMEM);
7120 	fwr = wrtod(wr);
7121 	bzero(fwr, sizeof (*fwr));
7122 
7123 	t4_mk_filtdelwr(ftid, fwr, sc->sge.fwq.abs_id);
7124 
7125 	f->pending = 1;
7126 	t4_wrq_tx(sc, wr);
7127 	return (0);
7128 }
7129 
7130 int
7131 t4_filter_rpl(struct sge_iq *iq, const struct rss_header *rss, struct mbuf *m)
7132 {
7133 	struct adapter *sc = iq->adapter;
7134 	const struct cpl_set_tcb_rpl *rpl = (const void *)(rss + 1);
7135 	unsigned int idx = GET_TID(rpl);
7136 
7137 	KASSERT(m == NULL, ("%s: payload with opcode %02x", __func__,
7138 	    rss->opcode));
7139 
7140 	if (idx >= sc->tids.ftid_base &&
7141 	    (idx -= sc->tids.ftid_base) < sc->tids.nftids) {
7142 		unsigned int rc = G_COOKIE(rpl->cookie);
7143 		struct filter_entry *f = &sc->tids.ftid_tab[idx];
7144 
7145 		mtx_lock(&sc->tids.ftid_lock);
7146 		if (rc == FW_FILTER_WR_FLT_ADDED) {
7147 			KASSERT(f->pending, ("%s: filter[%u] isn't pending.",
7148 			    __func__, idx));
7149 			f->smtidx = (be64toh(rpl->oldval) >> 24) & 0xff;
7150 			f->pending = 0;  /* asynchronous setup completed */
7151 			f->valid = 1;
7152 		} else {
7153 			if (rc != FW_FILTER_WR_FLT_DELETED) {
7154 				/* Add or delete failed, display an error */
7155 				log(LOG_ERR,
7156 				    "filter %u setup failed with error %u\n",
7157 				    idx, rc);
7158 			}
7159 
7160 			clear_filter(f);
7161 			sc->tids.ftids_in_use--;
7162 		}
7163 		wakeup(&sc->tids.ftid_tab);
7164 		mtx_unlock(&sc->tids.ftid_lock);
7165 	}
7166 
7167 	return (0);
7168 }
7169 
7170 static int
7171 get_sge_context(struct adapter *sc, struct t4_sge_context *cntxt)
7172 {
7173 	int rc;
7174 
7175 	if (cntxt->cid > M_CTXTQID)
7176 		return (EINVAL);
7177 
7178 	if (cntxt->mem_id != CTXT_EGRESS && cntxt->mem_id != CTXT_INGRESS &&
7179 	    cntxt->mem_id != CTXT_FLM && cntxt->mem_id != CTXT_CNM)
7180 		return (EINVAL);
7181 
7182 	rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4ctxt");
7183 	if (rc)
7184 		return (rc);
7185 
7186 	if (sc->flags & FW_OK) {
7187 		rc = -t4_sge_ctxt_rd(sc, sc->mbox, cntxt->cid, cntxt->mem_id,
7188 		    &cntxt->data[0]);
7189 		if (rc == 0)
7190 			goto done;
7191 	}
7192 
7193 	/*
7194 	 * Read via firmware failed or wasn't even attempted.  Read directly via
7195 	 * the backdoor.
7196 	 */
7197 	rc = -t4_sge_ctxt_rd_bd(sc, cntxt->cid, cntxt->mem_id, &cntxt->data[0]);
7198 done:
7199 	end_synchronized_op(sc, 0);
7200 	return (rc);
7201 }
7202 
7203 static int
7204 load_fw(struct adapter *sc, struct t4_data *fw)
7205 {
7206 	int rc;
7207 	uint8_t *fw_data;
7208 
7209 	rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4ldfw");
7210 	if (rc)
7211 		return (rc);
7212 
7213 	if (sc->flags & FULL_INIT_DONE) {
7214 		rc = EBUSY;
7215 		goto done;
7216 	}
7217 
7218 	fw_data = malloc(fw->len, M_CXGBE, M_WAITOK);
7219 	if (fw_data == NULL) {
7220 		rc = ENOMEM;
7221 		goto done;
7222 	}
7223 
7224 	rc = copyin(fw->data, fw_data, fw->len);
7225 	if (rc == 0)
7226 		rc = -t4_load_fw(sc, fw_data, fw->len);
7227 
7228 	free(fw_data, M_CXGBE);
7229 done:
7230 	end_synchronized_op(sc, 0);
7231 	return (rc);
7232 }
7233 
7234 static int
7235 read_card_mem(struct adapter *sc, int win, struct t4_mem_range *mr)
7236 {
7237 	uint32_t addr, off, remaining, i, n;
7238 	uint32_t *buf, *b;
7239 	uint32_t mw_base, mw_aperture;
7240 	int rc;
7241 	uint8_t *dst;
7242 
7243 	rc = validate_mem_range(sc, mr->addr, mr->len);
7244 	if (rc != 0)
7245 		return (rc);
7246 
7247 	memwin_info(sc, win, &mw_base, &mw_aperture);
7248 	buf = b = malloc(min(mr->len, mw_aperture), M_CXGBE, M_WAITOK);
7249 	addr = mr->addr;
7250 	remaining = mr->len;
7251 	dst = (void *)mr->data;
7252 
7253 	while (remaining) {
7254 		off = position_memwin(sc, win, addr);
7255 
7256 		/* number of bytes that we'll copy in the inner loop */
7257 		n = min(remaining, mw_aperture - off);
7258 		for (i = 0; i < n; i += 4)
7259 			*b++ = t4_read_reg(sc, mw_base + off + i);
7260 
7261 		rc = copyout(buf, dst, n);
7262 		if (rc != 0)
7263 			break;
7264 
7265 		b = buf;
7266 		dst += n;
7267 		remaining -= n;
7268 		addr += n;
7269 	}
7270 
7271 	free(buf, M_CXGBE);
7272 	return (rc);
7273 }
7274 
7275 static int
7276 read_i2c(struct adapter *sc, struct t4_i2c_data *i2cd)
7277 {
7278 	int rc;
7279 
7280 	if (i2cd->len == 0 || i2cd->port_id >= sc->params.nports)
7281 		return (EINVAL);
7282 
7283 	if (i2cd->len > 1) {
7284 		/* XXX: need fw support for longer reads in one go */
7285 		return (ENOTSUP);
7286 	}
7287 
7288 	rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4i2crd");
7289 	if (rc)
7290 		return (rc);
7291 	rc = -t4_i2c_rd(sc, sc->mbox, i2cd->port_id, i2cd->dev_addr,
7292 	    i2cd->offset, &i2cd->data[0]);
7293 	end_synchronized_op(sc, 0);
7294 
7295 	return (rc);
7296 }
7297 
7298 int
7299 t4_os_find_pci_capability(struct adapter *sc, int cap)
7300 {
7301 	int i;
7302 
7303 	return (pci_find_cap(sc->dev, cap, &i) == 0 ? i : 0);
7304 }
7305 
7306 int
7307 t4_os_pci_save_state(struct adapter *sc)
7308 {
7309 	device_t dev;
7310 	struct pci_devinfo *dinfo;
7311 
7312 	dev = sc->dev;
7313 	dinfo = device_get_ivars(dev);
7314 
7315 	pci_cfg_save(dev, dinfo, 0);
7316 	return (0);
7317 }
7318 
7319 int
7320 t4_os_pci_restore_state(struct adapter *sc)
7321 {
7322 	device_t dev;
7323 	struct pci_devinfo *dinfo;
7324 
7325 	dev = sc->dev;
7326 	dinfo = device_get_ivars(dev);
7327 
7328 	pci_cfg_restore(dev, dinfo);
7329 	return (0);
7330 }
7331 
7332 void
7333 t4_os_portmod_changed(const struct adapter *sc, int idx)
7334 {
7335 	struct port_info *pi = sc->port[idx];
7336 	static const char *mod_str[] = {
7337 		NULL, "LR", "SR", "ER", "TWINAX", "active TWINAX", "LRM"
7338 	};
7339 
7340 	if (pi->mod_type == FW_PORT_MOD_TYPE_NONE)
7341 		if_printf(pi->ifp, "transceiver unplugged.\n");
7342 	else if (pi->mod_type == FW_PORT_MOD_TYPE_UNKNOWN)
7343 		if_printf(pi->ifp, "unknown transceiver inserted.\n");
7344 	else if (pi->mod_type == FW_PORT_MOD_TYPE_NOTSUPPORTED)
7345 		if_printf(pi->ifp, "unsupported transceiver inserted.\n");
7346 	else if (pi->mod_type > 0 && pi->mod_type < nitems(mod_str)) {
7347 		if_printf(pi->ifp, "%s transceiver inserted.\n",
7348 		    mod_str[pi->mod_type]);
7349 	} else {
7350 		if_printf(pi->ifp, "transceiver (type %d) inserted.\n",
7351 		    pi->mod_type);
7352 	}
7353 }
7354 
7355 void
7356 t4_os_link_changed(struct adapter *sc, int idx, int link_stat, int reason)
7357 {
7358 	struct port_info *pi = sc->port[idx];
7359 	struct ifnet *ifp = pi->ifp;
7360 
7361 	if (link_stat) {
7362 		pi->linkdnrc = -1;
7363 		ifp->if_baudrate = IF_Mbps(pi->link_cfg.speed);
7364 		if_link_state_change(ifp, LINK_STATE_UP);
7365 	} else {
7366 		if (reason >= 0)
7367 			pi->linkdnrc = reason;
7368 		if_link_state_change(ifp, LINK_STATE_DOWN);
7369 	}
7370 }
7371 
7372 void
7373 t4_iterate(void (*func)(struct adapter *, void *), void *arg)
7374 {
7375 	struct adapter *sc;
7376 
7377 	sx_slock(&t4_list_lock);
7378 	SLIST_FOREACH(sc, &t4_list, link) {
7379 		/*
7380 		 * func should not make any assumptions about what state sc is
7381 		 * in - the only guarantee is that sc->sc_lock is a valid lock.
7382 		 */
7383 		func(sc, arg);
7384 	}
7385 	sx_sunlock(&t4_list_lock);
7386 }
7387 
7388 static int
7389 t4_open(struct cdev *dev, int flags, int type, struct thread *td)
7390 {
7391        return (0);
7392 }
7393 
7394 static int
7395 t4_close(struct cdev *dev, int flags, int type, struct thread *td)
7396 {
7397        return (0);
7398 }
7399 
7400 static int
7401 t4_ioctl(struct cdev *dev, unsigned long cmd, caddr_t data, int fflag,
7402     struct thread *td)
7403 {
7404 	int rc;
7405 	struct adapter *sc = dev->si_drv1;
7406 
7407 	rc = priv_check(td, PRIV_DRIVER);
7408 	if (rc != 0)
7409 		return (rc);
7410 
7411 	switch (cmd) {
7412 	case CHELSIO_T4_GETREG: {
7413 		struct t4_reg *edata = (struct t4_reg *)data;
7414 
7415 		if ((edata->addr & 0x3) != 0 || edata->addr >= sc->mmio_len)
7416 			return (EFAULT);
7417 
7418 		if (edata->size == 4)
7419 			edata->val = t4_read_reg(sc, edata->addr);
7420 		else if (edata->size == 8)
7421 			edata->val = t4_read_reg64(sc, edata->addr);
7422 		else
7423 			return (EINVAL);
7424 
7425 		break;
7426 	}
7427 	case CHELSIO_T4_SETREG: {
7428 		struct t4_reg *edata = (struct t4_reg *)data;
7429 
7430 		if ((edata->addr & 0x3) != 0 || edata->addr >= sc->mmio_len)
7431 			return (EFAULT);
7432 
7433 		if (edata->size == 4) {
7434 			if (edata->val & 0xffffffff00000000)
7435 				return (EINVAL);
7436 			t4_write_reg(sc, edata->addr, (uint32_t) edata->val);
7437 		} else if (edata->size == 8)
7438 			t4_write_reg64(sc, edata->addr, edata->val);
7439 		else
7440 			return (EINVAL);
7441 		break;
7442 	}
7443 	case CHELSIO_T4_REGDUMP: {
7444 		struct t4_regdump *regs = (struct t4_regdump *)data;
7445 		int reglen = is_t4(sc) ? T4_REGDUMP_SIZE : T5_REGDUMP_SIZE;
7446 		uint8_t *buf;
7447 
7448 		if (regs->len < reglen) {
7449 			regs->len = reglen; /* hint to the caller */
7450 			return (ENOBUFS);
7451 		}
7452 
7453 		regs->len = reglen;
7454 		buf = malloc(reglen, M_CXGBE, M_WAITOK | M_ZERO);
7455 		t4_get_regs(sc, regs, buf);
7456 		rc = copyout(buf, regs->data, reglen);
7457 		free(buf, M_CXGBE);
7458 		break;
7459 	}
7460 	case CHELSIO_T4_GET_FILTER_MODE:
7461 		rc = get_filter_mode(sc, (uint32_t *)data);
7462 		break;
7463 	case CHELSIO_T4_SET_FILTER_MODE:
7464 		rc = set_filter_mode(sc, *(uint32_t *)data);
7465 		break;
7466 	case CHELSIO_T4_GET_FILTER:
7467 		rc = get_filter(sc, (struct t4_filter *)data);
7468 		break;
7469 	case CHELSIO_T4_SET_FILTER:
7470 		rc = set_filter(sc, (struct t4_filter *)data);
7471 		break;
7472 	case CHELSIO_T4_DEL_FILTER:
7473 		rc = del_filter(sc, (struct t4_filter *)data);
7474 		break;
7475 	case CHELSIO_T4_GET_SGE_CONTEXT:
7476 		rc = get_sge_context(sc, (struct t4_sge_context *)data);
7477 		break;
7478 	case CHELSIO_T4_LOAD_FW:
7479 		rc = load_fw(sc, (struct t4_data *)data);
7480 		break;
7481 	case CHELSIO_T4_GET_MEM:
7482 		rc = read_card_mem(sc, 2, (struct t4_mem_range *)data);
7483 		break;
7484 	case CHELSIO_T4_GET_I2C:
7485 		rc = read_i2c(sc, (struct t4_i2c_data *)data);
7486 		break;
7487 	case CHELSIO_T4_CLEAR_STATS: {
7488 		int i;
7489 		u_int port_id = *(uint32_t *)data;
7490 		struct port_info *pi;
7491 
7492 		if (port_id >= sc->params.nports)
7493 			return (EINVAL);
7494 
7495 		/* MAC stats */
7496 		t4_clr_port_stats(sc, port_id);
7497 
7498 		pi = sc->port[port_id];
7499 		if (pi->flags & PORT_INIT_DONE) {
7500 			struct sge_rxq *rxq;
7501 			struct sge_txq *txq;
7502 			struct sge_wrq *wrq;
7503 
7504 			for_each_rxq(pi, i, rxq) {
7505 #if defined(INET) || defined(INET6)
7506 				rxq->lro.lro_queued = 0;
7507 				rxq->lro.lro_flushed = 0;
7508 #endif
7509 				rxq->rxcsum = 0;
7510 				rxq->vlan_extraction = 0;
7511 			}
7512 
7513 			for_each_txq(pi, i, txq) {
7514 				txq->txcsum = 0;
7515 				txq->tso_wrs = 0;
7516 				txq->vlan_insertion = 0;
7517 				txq->imm_wrs = 0;
7518 				txq->sgl_wrs = 0;
7519 				txq->txpkt_wrs = 0;
7520 				txq->txpkts_wrs = 0;
7521 				txq->txpkts_pkts = 0;
7522 				txq->br->br_drops = 0;
7523 				txq->no_dmamap = 0;
7524 				txq->no_desc = 0;
7525 			}
7526 
7527 #ifdef TCP_OFFLOAD
7528 			/* nothing to clear for each ofld_rxq */
7529 
7530 			for_each_ofld_txq(pi, i, wrq) {
7531 				wrq->tx_wrs = 0;
7532 				wrq->no_desc = 0;
7533 			}
7534 #endif
7535 			wrq = &sc->sge.ctrlq[pi->port_id];
7536 			wrq->tx_wrs = 0;
7537 			wrq->no_desc = 0;
7538 		}
7539 		break;
7540 	}
7541 	case CHELSIO_T4_GET_TRACER:
7542 		rc = t4_get_tracer(sc, (struct t4_tracer *)data);
7543 		break;
7544 	case CHELSIO_T4_SET_TRACER:
7545 		rc = t4_set_tracer(sc, (struct t4_tracer *)data);
7546 		break;
7547 	default:
7548 		rc = EINVAL;
7549 	}
7550 
7551 	return (rc);
7552 }
7553 
7554 #ifdef TCP_OFFLOAD
7555 static int
7556 toe_capability(struct port_info *pi, int enable)
7557 {
7558 	int rc;
7559 	struct adapter *sc = pi->adapter;
7560 
7561 	ASSERT_SYNCHRONIZED_OP(sc);
7562 
7563 	if (!is_offload(sc))
7564 		return (ENODEV);
7565 
7566 	if (enable) {
7567 		if (!(sc->flags & FULL_INIT_DONE)) {
7568 			rc = cxgbe_init_synchronized(pi);
7569 			if (rc)
7570 				return (rc);
7571 		}
7572 
7573 		if (isset(&sc->offload_map, pi->port_id))
7574 			return (0);
7575 
7576 		if (!(sc->flags & TOM_INIT_DONE)) {
7577 			rc = t4_activate_uld(sc, ULD_TOM);
7578 			if (rc == EAGAIN) {
7579 				log(LOG_WARNING,
7580 				    "You must kldload t4_tom.ko before trying "
7581 				    "to enable TOE on a cxgbe interface.\n");
7582 			}
7583 			if (rc != 0)
7584 				return (rc);
7585 			KASSERT(sc->tom_softc != NULL,
7586 			    ("%s: TOM activated but softc NULL", __func__));
7587 			KASSERT(sc->flags & TOM_INIT_DONE,
7588 			    ("%s: TOM activated but flag not set", __func__));
7589 		}
7590 
7591 		setbit(&sc->offload_map, pi->port_id);
7592 	} else {
7593 		if (!isset(&sc->offload_map, pi->port_id))
7594 			return (0);
7595 
7596 		KASSERT(sc->flags & TOM_INIT_DONE,
7597 		    ("%s: TOM never initialized?", __func__));
7598 		clrbit(&sc->offload_map, pi->port_id);
7599 	}
7600 
7601 	return (0);
7602 }
7603 
7604 /*
7605  * Add an upper layer driver to the global list.
7606  */
7607 int
7608 t4_register_uld(struct uld_info *ui)
7609 {
7610 	int rc = 0;
7611 	struct uld_info *u;
7612 
7613 	sx_xlock(&t4_uld_list_lock);
7614 	SLIST_FOREACH(u, &t4_uld_list, link) {
7615 	    if (u->uld_id == ui->uld_id) {
7616 		    rc = EEXIST;
7617 		    goto done;
7618 	    }
7619 	}
7620 
7621 	SLIST_INSERT_HEAD(&t4_uld_list, ui, link);
7622 	ui->refcount = 0;
7623 done:
7624 	sx_xunlock(&t4_uld_list_lock);
7625 	return (rc);
7626 }
7627 
7628 int
7629 t4_unregister_uld(struct uld_info *ui)
7630 {
7631 	int rc = EINVAL;
7632 	struct uld_info *u;
7633 
7634 	sx_xlock(&t4_uld_list_lock);
7635 
7636 	SLIST_FOREACH(u, &t4_uld_list, link) {
7637 	    if (u == ui) {
7638 		    if (ui->refcount > 0) {
7639 			    rc = EBUSY;
7640 			    goto done;
7641 		    }
7642 
7643 		    SLIST_REMOVE(&t4_uld_list, ui, uld_info, link);
7644 		    rc = 0;
7645 		    goto done;
7646 	    }
7647 	}
7648 done:
7649 	sx_xunlock(&t4_uld_list_lock);
7650 	return (rc);
7651 }
7652 
7653 int
7654 t4_activate_uld(struct adapter *sc, int id)
7655 {
7656 	int rc = EAGAIN;
7657 	struct uld_info *ui;
7658 
7659 	ASSERT_SYNCHRONIZED_OP(sc);
7660 
7661 	sx_slock(&t4_uld_list_lock);
7662 
7663 	SLIST_FOREACH(ui, &t4_uld_list, link) {
7664 		if (ui->uld_id == id) {
7665 			rc = ui->activate(sc);
7666 			if (rc == 0)
7667 				ui->refcount++;
7668 			goto done;
7669 		}
7670 	}
7671 done:
7672 	sx_sunlock(&t4_uld_list_lock);
7673 
7674 	return (rc);
7675 }
7676 
7677 int
7678 t4_deactivate_uld(struct adapter *sc, int id)
7679 {
7680 	int rc = EINVAL;
7681 	struct uld_info *ui;
7682 
7683 	ASSERT_SYNCHRONIZED_OP(sc);
7684 
7685 	sx_slock(&t4_uld_list_lock);
7686 
7687 	SLIST_FOREACH(ui, &t4_uld_list, link) {
7688 		if (ui->uld_id == id) {
7689 			rc = ui->deactivate(sc);
7690 			if (rc == 0)
7691 				ui->refcount--;
7692 			goto done;
7693 		}
7694 	}
7695 done:
7696 	sx_sunlock(&t4_uld_list_lock);
7697 
7698 	return (rc);
7699 }
7700 #endif
7701 
7702 /*
7703  * Come up with reasonable defaults for some of the tunables, provided they're
7704  * not set by the user (in which case we'll use the values as is).
7705  */
7706 static void
7707 tweak_tunables(void)
7708 {
7709 	int nc = mp_ncpus;	/* our snapshot of the number of CPUs */
7710 
7711 	if (t4_ntxq10g < 1)
7712 		t4_ntxq10g = min(nc, NTXQ_10G);
7713 
7714 	if (t4_ntxq1g < 1)
7715 		t4_ntxq1g = min(nc, NTXQ_1G);
7716 
7717 	if (t4_nrxq10g < 1)
7718 		t4_nrxq10g = min(nc, NRXQ_10G);
7719 
7720 	if (t4_nrxq1g < 1)
7721 		t4_nrxq1g = min(nc, NRXQ_1G);
7722 
7723 #ifdef TCP_OFFLOAD
7724 	if (t4_nofldtxq10g < 1)
7725 		t4_nofldtxq10g = min(nc, NOFLDTXQ_10G);
7726 
7727 	if (t4_nofldtxq1g < 1)
7728 		t4_nofldtxq1g = min(nc, NOFLDTXQ_1G);
7729 
7730 	if (t4_nofldrxq10g < 1)
7731 		t4_nofldrxq10g = min(nc, NOFLDRXQ_10G);
7732 
7733 	if (t4_nofldrxq1g < 1)
7734 		t4_nofldrxq1g = min(nc, NOFLDRXQ_1G);
7735 
7736 	if (t4_toecaps_allowed == -1)
7737 		t4_toecaps_allowed = FW_CAPS_CONFIG_TOE;
7738 #else
7739 	if (t4_toecaps_allowed == -1)
7740 		t4_toecaps_allowed = 0;
7741 #endif
7742 
7743 	if (t4_tmr_idx_10g < 0 || t4_tmr_idx_10g >= SGE_NTIMERS)
7744 		t4_tmr_idx_10g = TMR_IDX_10G;
7745 
7746 	if (t4_pktc_idx_10g < -1 || t4_pktc_idx_10g >= SGE_NCOUNTERS)
7747 		t4_pktc_idx_10g = PKTC_IDX_10G;
7748 
7749 	if (t4_tmr_idx_1g < 0 || t4_tmr_idx_1g >= SGE_NTIMERS)
7750 		t4_tmr_idx_1g = TMR_IDX_1G;
7751 
7752 	if (t4_pktc_idx_1g < -1 || t4_pktc_idx_1g >= SGE_NCOUNTERS)
7753 		t4_pktc_idx_1g = PKTC_IDX_1G;
7754 
7755 	if (t4_qsize_txq < 128)
7756 		t4_qsize_txq = 128;
7757 
7758 	if (t4_qsize_rxq < 128)
7759 		t4_qsize_rxq = 128;
7760 	while (t4_qsize_rxq & 7)
7761 		t4_qsize_rxq++;
7762 
7763 	t4_intr_types &= INTR_MSIX | INTR_MSI | INTR_INTX;
7764 }
7765 
7766 static int
7767 mod_event(module_t mod, int cmd, void *arg)
7768 {
7769 	int rc = 0;
7770 	static int loaded = 0;
7771 
7772 	switch (cmd) {
7773 	case MOD_LOAD:
7774 		if (atomic_fetchadd_int(&loaded, 1))
7775 			break;
7776 		t4_sge_modload();
7777 		sx_init(&t4_list_lock, "T4/T5 adapters");
7778 		SLIST_INIT(&t4_list);
7779 #ifdef TCP_OFFLOAD
7780 		sx_init(&t4_uld_list_lock, "T4/T5 ULDs");
7781 		SLIST_INIT(&t4_uld_list);
7782 #endif
7783 		t4_tracer_modload();
7784 		tweak_tunables();
7785 		break;
7786 
7787 	case MOD_UNLOAD:
7788 		if (atomic_fetchadd_int(&loaded, -1) > 1)
7789 			break;
7790 		t4_tracer_modunload();
7791 #ifdef TCP_OFFLOAD
7792 		sx_slock(&t4_uld_list_lock);
7793 		if (!SLIST_EMPTY(&t4_uld_list)) {
7794 			rc = EBUSY;
7795 			sx_sunlock(&t4_uld_list_lock);
7796 			break;
7797 		}
7798 		sx_sunlock(&t4_uld_list_lock);
7799 		sx_destroy(&t4_uld_list_lock);
7800 #endif
7801 		sx_slock(&t4_list_lock);
7802 		if (!SLIST_EMPTY(&t4_list)) {
7803 			rc = EBUSY;
7804 			sx_sunlock(&t4_list_lock);
7805 			break;
7806 		}
7807 		sx_sunlock(&t4_list_lock);
7808 		sx_destroy(&t4_list_lock);
7809 		break;
7810 	}
7811 
7812 	return (rc);
7813 }
7814 
7815 static devclass_t t4_devclass, t5_devclass;
7816 static devclass_t cxgbe_devclass, cxl_devclass;
7817 
7818 DRIVER_MODULE(t4nex, pci, t4_driver, t4_devclass, mod_event, 0);
7819 MODULE_VERSION(t4nex, 1);
7820 MODULE_DEPEND(t4nex, firmware, 1, 1, 1);
7821 
7822 DRIVER_MODULE(t5nex, pci, t5_driver, t5_devclass, mod_event, 0);
7823 MODULE_VERSION(t5nex, 1);
7824 MODULE_DEPEND(t5nex, firmware, 1, 1, 1);
7825 
7826 DRIVER_MODULE(cxgbe, t4nex, cxgbe_driver, cxgbe_devclass, 0, 0);
7827 MODULE_VERSION(cxgbe, 1);
7828 
7829 DRIVER_MODULE(cxl, t5nex, cxl_driver, cxl_devclass, 0, 0);
7830 MODULE_VERSION(cxl, 1);
7831