xref: /freebsd/sys/dev/cxgbe/t4_main.c (revision 3b1f7d9e5d6f44b50ff07fde6fd0e1135f213762)
1 /*-
2  * Copyright (c) 2011 Chelsio Communications, Inc.
3  * All rights reserved.
4  * Written by: Navdeep Parhar <np@FreeBSD.org>
5  *
6  * Redistribution and use in source and binary forms, with or without
7  * modification, are permitted provided that the following conditions
8  * are met:
9  * 1. Redistributions of source code must retain the above copyright
10  *    notice, this list of conditions and the following disclaimer.
11  * 2. Redistributions in binary form must reproduce the above copyright
12  *    notice, this list of conditions and the following disclaimer in the
13  *    documentation and/or other materials provided with the distribution.
14  *
15  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
16  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
25  * SUCH DAMAGE.
26  */
27 
28 #include <sys/cdefs.h>
29 __FBSDID("$FreeBSD$");
30 
31 #include "opt_inet.h"
32 #include "opt_inet6.h"
33 
34 #include <sys/param.h>
35 #include <sys/conf.h>
36 #include <sys/priv.h>
37 #include <sys/kernel.h>
38 #include <sys/bus.h>
39 #include <sys/module.h>
40 #include <sys/malloc.h>
41 #include <sys/queue.h>
42 #include <sys/taskqueue.h>
43 #include <sys/pciio.h>
44 #include <dev/pci/pcireg.h>
45 #include <dev/pci/pcivar.h>
46 #include <dev/pci/pci_private.h>
47 #include <sys/firmware.h>
48 #include <sys/sbuf.h>
49 #include <sys/smp.h>
50 #include <sys/socket.h>
51 #include <sys/sockio.h>
52 #include <sys/sysctl.h>
53 #include <net/ethernet.h>
54 #include <net/if.h>
55 #include <net/if_types.h>
56 #include <net/if_dl.h>
57 #include <net/if_vlan_var.h>
58 #if defined(__i386__) || defined(__amd64__)
59 #include <vm/vm.h>
60 #include <vm/pmap.h>
61 #endif
62 
63 #include "common/common.h"
64 #include "common/t4_msg.h"
65 #include "common/t4_regs.h"
66 #include "common/t4_regs_values.h"
67 #include "t4_ioctl.h"
68 #include "t4_l2t.h"
69 
70 /* T4 bus driver interface */
71 static int t4_probe(device_t);
72 static int t4_attach(device_t);
73 static int t4_detach(device_t);
74 static device_method_t t4_methods[] = {
75 	DEVMETHOD(device_probe,		t4_probe),
76 	DEVMETHOD(device_attach,	t4_attach),
77 	DEVMETHOD(device_detach,	t4_detach),
78 
79 	DEVMETHOD_END
80 };
81 static driver_t t4_driver = {
82 	"t4nex",
83 	t4_methods,
84 	sizeof(struct adapter)
85 };
86 
87 
88 /* T4 port (cxgbe) interface */
89 static int cxgbe_probe(device_t);
90 static int cxgbe_attach(device_t);
91 static int cxgbe_detach(device_t);
92 static device_method_t cxgbe_methods[] = {
93 	DEVMETHOD(device_probe,		cxgbe_probe),
94 	DEVMETHOD(device_attach,	cxgbe_attach),
95 	DEVMETHOD(device_detach,	cxgbe_detach),
96 	{ 0, 0 }
97 };
98 static driver_t cxgbe_driver = {
99 	"cxgbe",
100 	cxgbe_methods,
101 	sizeof(struct port_info)
102 };
103 
104 static d_ioctl_t t4_ioctl;
105 static d_open_t t4_open;
106 static d_close_t t4_close;
107 
108 static struct cdevsw t4_cdevsw = {
109        .d_version = D_VERSION,
110        .d_flags = 0,
111        .d_open = t4_open,
112        .d_close = t4_close,
113        .d_ioctl = t4_ioctl,
114        .d_name = "t4nex",
115 };
116 
117 /* T5 bus driver interface */
118 static int t5_probe(device_t);
119 static device_method_t t5_methods[] = {
120 	DEVMETHOD(device_probe,		t5_probe),
121 	DEVMETHOD(device_attach,	t4_attach),
122 	DEVMETHOD(device_detach,	t4_detach),
123 
124 	DEVMETHOD_END
125 };
126 static driver_t t5_driver = {
127 	"t5nex",
128 	t5_methods,
129 	sizeof(struct adapter)
130 };
131 
132 
133 /* T5 port (cxl) interface */
134 static driver_t cxl_driver = {
135 	"cxl",
136 	cxgbe_methods,
137 	sizeof(struct port_info)
138 };
139 
140 static struct cdevsw t5_cdevsw = {
141        .d_version = D_VERSION,
142        .d_flags = 0,
143        .d_open = t4_open,
144        .d_close = t4_close,
145        .d_ioctl = t4_ioctl,
146        .d_name = "t5nex",
147 };
148 
149 /* ifnet + media interface */
150 static void cxgbe_init(void *);
151 static int cxgbe_ioctl(struct ifnet *, unsigned long, caddr_t);
152 static int cxgbe_transmit(struct ifnet *, struct mbuf *);
153 static void cxgbe_qflush(struct ifnet *);
154 static int cxgbe_media_change(struct ifnet *);
155 static void cxgbe_media_status(struct ifnet *, struct ifmediareq *);
156 
157 MALLOC_DEFINE(M_CXGBE, "cxgbe", "Chelsio T4/T5 Ethernet driver and services");
158 
159 /*
160  * Correct lock order when you need to acquire multiple locks is t4_list_lock,
161  * then ADAPTER_LOCK, then t4_uld_list_lock.
162  */
163 static struct mtx t4_list_lock;
164 static SLIST_HEAD(, adapter) t4_list;
165 #ifdef TCP_OFFLOAD
166 static struct mtx t4_uld_list_lock;
167 static SLIST_HEAD(, uld_info) t4_uld_list;
168 #endif
169 
170 /*
171  * Tunables.  See tweak_tunables() too.
172  *
173  * Each tunable is set to a default value here if it's known at compile-time.
174  * Otherwise it is set to -1 as an indication to tweak_tunables() that it should
175  * provide a reasonable default when the driver is loaded.
176  *
177  * Tunables applicable to both T4 and T5 are under hw.cxgbe.  Those specific to
178  * T5 are under hw.cxl.
179  */
180 
181 /*
182  * Number of queues for tx and rx, 10G and 1G, NIC and offload.
183  */
184 #define NTXQ_10G 16
185 static int t4_ntxq10g = -1;
186 TUNABLE_INT("hw.cxgbe.ntxq10g", &t4_ntxq10g);
187 
188 #define NRXQ_10G 8
189 static int t4_nrxq10g = -1;
190 TUNABLE_INT("hw.cxgbe.nrxq10g", &t4_nrxq10g);
191 
192 #define NTXQ_1G 4
193 static int t4_ntxq1g = -1;
194 TUNABLE_INT("hw.cxgbe.ntxq1g", &t4_ntxq1g);
195 
196 #define NRXQ_1G 2
197 static int t4_nrxq1g = -1;
198 TUNABLE_INT("hw.cxgbe.nrxq1g", &t4_nrxq1g);
199 
200 #ifdef TCP_OFFLOAD
201 #define NOFLDTXQ_10G 8
202 static int t4_nofldtxq10g = -1;
203 TUNABLE_INT("hw.cxgbe.nofldtxq10g", &t4_nofldtxq10g);
204 
205 #define NOFLDRXQ_10G 2
206 static int t4_nofldrxq10g = -1;
207 TUNABLE_INT("hw.cxgbe.nofldrxq10g", &t4_nofldrxq10g);
208 
209 #define NOFLDTXQ_1G 2
210 static int t4_nofldtxq1g = -1;
211 TUNABLE_INT("hw.cxgbe.nofldtxq1g", &t4_nofldtxq1g);
212 
213 #define NOFLDRXQ_1G 1
214 static int t4_nofldrxq1g = -1;
215 TUNABLE_INT("hw.cxgbe.nofldrxq1g", &t4_nofldrxq1g);
216 #endif
217 
218 /*
219  * Holdoff parameters for 10G and 1G ports.
220  */
221 #define TMR_IDX_10G 1
222 static int t4_tmr_idx_10g = TMR_IDX_10G;
223 TUNABLE_INT("hw.cxgbe.holdoff_timer_idx_10G", &t4_tmr_idx_10g);
224 
225 #define PKTC_IDX_10G (-1)
226 static int t4_pktc_idx_10g = PKTC_IDX_10G;
227 TUNABLE_INT("hw.cxgbe.holdoff_pktc_idx_10G", &t4_pktc_idx_10g);
228 
229 #define TMR_IDX_1G 1
230 static int t4_tmr_idx_1g = TMR_IDX_1G;
231 TUNABLE_INT("hw.cxgbe.holdoff_timer_idx_1G", &t4_tmr_idx_1g);
232 
233 #define PKTC_IDX_1G (-1)
234 static int t4_pktc_idx_1g = PKTC_IDX_1G;
235 TUNABLE_INT("hw.cxgbe.holdoff_pktc_idx_1G", &t4_pktc_idx_1g);
236 
237 /*
238  * Size (# of entries) of each tx and rx queue.
239  */
240 static unsigned int t4_qsize_txq = TX_EQ_QSIZE;
241 TUNABLE_INT("hw.cxgbe.qsize_txq", &t4_qsize_txq);
242 
243 static unsigned int t4_qsize_rxq = RX_IQ_QSIZE;
244 TUNABLE_INT("hw.cxgbe.qsize_rxq", &t4_qsize_rxq);
245 
246 /*
247  * Interrupt types allowed (bits 0, 1, 2 = INTx, MSI, MSI-X respectively).
248  */
249 static int t4_intr_types = INTR_MSIX | INTR_MSI | INTR_INTX;
250 TUNABLE_INT("hw.cxgbe.interrupt_types", &t4_intr_types);
251 
252 /*
253  * Configuration file.
254  */
255 #define DEFAULT_CF	"default"
256 #define FLASH_CF	"flash"
257 #define UWIRE_CF	"uwire"
258 #define FPGA_CF		"fpga"
259 static char t4_cfg_file[32] = DEFAULT_CF;
260 TUNABLE_STR("hw.cxgbe.config_file", t4_cfg_file, sizeof(t4_cfg_file));
261 
262 /*
263  * Firmware auto-install by driver during attach (0, 1, 2 = prohibited, allowed,
264  * encouraged respectively).
265  */
266 static unsigned int t4_fw_install = 1;
267 TUNABLE_INT("hw.cxgbe.fw_install", &t4_fw_install);
268 
269 /*
270  * ASIC features that will be used.  Disable the ones you don't want so that the
271  * chip resources aren't wasted on features that will not be used.
272  */
273 static int t4_linkcaps_allowed = 0;	/* No DCBX, PPP, etc. by default */
274 TUNABLE_INT("hw.cxgbe.linkcaps_allowed", &t4_linkcaps_allowed);
275 
276 static int t4_niccaps_allowed = FW_CAPS_CONFIG_NIC;
277 TUNABLE_INT("hw.cxgbe.niccaps_allowed", &t4_niccaps_allowed);
278 
279 static int t4_toecaps_allowed = -1;
280 TUNABLE_INT("hw.cxgbe.toecaps_allowed", &t4_toecaps_allowed);
281 
282 static int t4_rdmacaps_allowed = 0;
283 TUNABLE_INT("hw.cxgbe.rdmacaps_allowed", &t4_rdmacaps_allowed);
284 
285 static int t4_iscsicaps_allowed = 0;
286 TUNABLE_INT("hw.cxgbe.iscsicaps_allowed", &t4_iscsicaps_allowed);
287 
288 static int t4_fcoecaps_allowed = 0;
289 TUNABLE_INT("hw.cxgbe.fcoecaps_allowed", &t4_fcoecaps_allowed);
290 
291 static int t5_write_combine = 0;
292 TUNABLE_INT("hw.cxl.write_combine", &t5_write_combine);
293 
294 struct intrs_and_queues {
295 	int intr_type;		/* INTx, MSI, or MSI-X */
296 	int nirq;		/* Number of vectors */
297 	int intr_flags;
298 	int ntxq10g;		/* # of NIC txq's for each 10G port */
299 	int nrxq10g;		/* # of NIC rxq's for each 10G port */
300 	int ntxq1g;		/* # of NIC txq's for each 1G port */
301 	int nrxq1g;		/* # of NIC rxq's for each 1G port */
302 #ifdef TCP_OFFLOAD
303 	int nofldtxq10g;	/* # of TOE txq's for each 10G port */
304 	int nofldrxq10g;	/* # of TOE rxq's for each 10G port */
305 	int nofldtxq1g;		/* # of TOE txq's for each 1G port */
306 	int nofldrxq1g;		/* # of TOE rxq's for each 1G port */
307 #endif
308 };
309 
310 struct filter_entry {
311         uint32_t valid:1;	/* filter allocated and valid */
312         uint32_t locked:1;	/* filter is administratively locked */
313         uint32_t pending:1;	/* filter action is pending firmware reply */
314 	uint32_t smtidx:8;	/* Source MAC Table index for smac */
315 	struct l2t_entry *l2t;	/* Layer Two Table entry for dmac */
316 
317         struct t4_filter_specification fs;
318 };
319 
320 enum {
321 	XGMAC_MTU	= (1 << 0),
322 	XGMAC_PROMISC	= (1 << 1),
323 	XGMAC_ALLMULTI	= (1 << 2),
324 	XGMAC_VLANEX	= (1 << 3),
325 	XGMAC_UCADDR	= (1 << 4),
326 	XGMAC_MCADDRS	= (1 << 5),
327 
328 	XGMAC_ALL	= 0xffff
329 };
330 
331 static int map_bars_0_and_4(struct adapter *);
332 static int map_bar_2(struct adapter *);
333 static void setup_memwin(struct adapter *);
334 static int validate_mem_range(struct adapter *, uint32_t, int);
335 static int validate_mt_off_len(struct adapter *, int, uint32_t, int,
336     uint32_t *);
337 static void memwin_info(struct adapter *, int, uint32_t *, uint32_t *);
338 static uint32_t position_memwin(struct adapter *, int, uint32_t);
339 static int cfg_itype_and_nqueues(struct adapter *, int, int,
340     struct intrs_and_queues *);
341 static int prep_firmware(struct adapter *);
342 static int partition_resources(struct adapter *, const struct firmware *,
343     const char *);
344 static int get_params__pre_init(struct adapter *);
345 static int get_params__post_init(struct adapter *);
346 static int set_params__post_init(struct adapter *);
347 static void t4_set_desc(struct adapter *);
348 static void build_medialist(struct port_info *);
349 static int update_mac_settings(struct port_info *, int);
350 static int cxgbe_init_synchronized(struct port_info *);
351 static int cxgbe_uninit_synchronized(struct port_info *);
352 static int setup_intr_handlers(struct adapter *);
353 static int adapter_full_init(struct adapter *);
354 static int adapter_full_uninit(struct adapter *);
355 static int port_full_init(struct port_info *);
356 static int port_full_uninit(struct port_info *);
357 static void quiesce_eq(struct adapter *, struct sge_eq *);
358 static void quiesce_iq(struct adapter *, struct sge_iq *);
359 static void quiesce_fl(struct adapter *, struct sge_fl *);
360 static int t4_alloc_irq(struct adapter *, struct irq *, int rid,
361     driver_intr_t *, void *, char *);
362 static int t4_free_irq(struct adapter *, struct irq *);
363 static void reg_block_dump(struct adapter *, uint8_t *, unsigned int,
364     unsigned int);
365 static void t4_get_regs(struct adapter *, struct t4_regdump *, uint8_t *);
366 static void cxgbe_tick(void *);
367 static void cxgbe_vlan_config(void *, struct ifnet *, uint16_t);
368 static int cpl_not_handled(struct sge_iq *, const struct rss_header *,
369     struct mbuf *);
370 static int an_not_handled(struct sge_iq *, const struct rsp_ctrl *);
371 static int fw_msg_not_handled(struct adapter *, const __be64 *);
372 static int t4_sysctls(struct adapter *);
373 static int cxgbe_sysctls(struct port_info *);
374 static int sysctl_int_array(SYSCTL_HANDLER_ARGS);
375 static int sysctl_bitfield(SYSCTL_HANDLER_ARGS);
376 static int sysctl_holdoff_tmr_idx(SYSCTL_HANDLER_ARGS);
377 static int sysctl_holdoff_pktc_idx(SYSCTL_HANDLER_ARGS);
378 static int sysctl_qsize_rxq(SYSCTL_HANDLER_ARGS);
379 static int sysctl_qsize_txq(SYSCTL_HANDLER_ARGS);
380 static int sysctl_handle_t4_reg64(SYSCTL_HANDLER_ARGS);
381 #ifdef SBUF_DRAIN
382 static int sysctl_cctrl(SYSCTL_HANDLER_ARGS);
383 static int sysctl_cim_ibq_obq(SYSCTL_HANDLER_ARGS);
384 static int sysctl_cim_la(SYSCTL_HANDLER_ARGS);
385 static int sysctl_cim_ma_la(SYSCTL_HANDLER_ARGS);
386 static int sysctl_cim_pif_la(SYSCTL_HANDLER_ARGS);
387 static int sysctl_cim_qcfg(SYSCTL_HANDLER_ARGS);
388 static int sysctl_cpl_stats(SYSCTL_HANDLER_ARGS);
389 static int sysctl_ddp_stats(SYSCTL_HANDLER_ARGS);
390 static int sysctl_devlog(SYSCTL_HANDLER_ARGS);
391 static int sysctl_fcoe_stats(SYSCTL_HANDLER_ARGS);
392 static int sysctl_hw_sched(SYSCTL_HANDLER_ARGS);
393 static int sysctl_lb_stats(SYSCTL_HANDLER_ARGS);
394 static int sysctl_meminfo(SYSCTL_HANDLER_ARGS);
395 static int sysctl_mps_tcam(SYSCTL_HANDLER_ARGS);
396 static int sysctl_path_mtus(SYSCTL_HANDLER_ARGS);
397 static int sysctl_pm_stats(SYSCTL_HANDLER_ARGS);
398 static int sysctl_rdma_stats(SYSCTL_HANDLER_ARGS);
399 static int sysctl_tcp_stats(SYSCTL_HANDLER_ARGS);
400 static int sysctl_tids(SYSCTL_HANDLER_ARGS);
401 static int sysctl_tp_err_stats(SYSCTL_HANDLER_ARGS);
402 static int sysctl_tp_la(SYSCTL_HANDLER_ARGS);
403 static int sysctl_tx_rate(SYSCTL_HANDLER_ARGS);
404 static int sysctl_ulprx_la(SYSCTL_HANDLER_ARGS);
405 static int sysctl_wcwr_stats(SYSCTL_HANDLER_ARGS);
406 #endif
407 static inline void txq_start(struct ifnet *, struct sge_txq *);
408 static uint32_t fconf_to_mode(uint32_t);
409 static uint32_t mode_to_fconf(uint32_t);
410 static uint32_t fspec_to_fconf(struct t4_filter_specification *);
411 static int get_filter_mode(struct adapter *, uint32_t *);
412 static int set_filter_mode(struct adapter *, uint32_t);
413 static inline uint64_t get_filter_hits(struct adapter *, uint32_t);
414 static int get_filter(struct adapter *, struct t4_filter *);
415 static int set_filter(struct adapter *, struct t4_filter *);
416 static int del_filter(struct adapter *, struct t4_filter *);
417 static void clear_filter(struct filter_entry *);
418 static int set_filter_wr(struct adapter *, int);
419 static int del_filter_wr(struct adapter *, int);
420 static int get_sge_context(struct adapter *, struct t4_sge_context *);
421 static int load_fw(struct adapter *, struct t4_data *);
422 static int read_card_mem(struct adapter *, int, struct t4_mem_range *);
423 static int read_i2c(struct adapter *, struct t4_i2c_data *);
424 #ifdef TCP_OFFLOAD
425 static int toe_capability(struct port_info *, int);
426 #endif
427 static int mod_event(module_t, int, void *);
428 
429 struct {
430 	uint16_t device;
431 	char *desc;
432 } t4_pciids[] = {
433 	{0xa000, "Chelsio Terminator 4 FPGA"},
434 	{0x4400, "Chelsio T440-dbg"},
435 	{0x4401, "Chelsio T420-CR"},
436 	{0x4402, "Chelsio T422-CR"},
437 	{0x4403, "Chelsio T440-CR"},
438 	{0x4404, "Chelsio T420-BCH"},
439 	{0x4405, "Chelsio T440-BCH"},
440 	{0x4406, "Chelsio T440-CH"},
441 	{0x4407, "Chelsio T420-SO"},
442 	{0x4408, "Chelsio T420-CX"},
443 	{0x4409, "Chelsio T420-BT"},
444 	{0x440a, "Chelsio T404-BT"},
445 	{0x440e, "Chelsio T440-LP-CR"},
446 }, t5_pciids[] = {
447 	{0xb000, "Chelsio Terminator 5 FPGA"},
448 	{0x5400, "Chelsio T580-dbg"},
449 	{0x5401,  "Chelsio T520-CR"},
450 	{0x5407,  "Chelsio T520-SO"},
451 	{0x5408,  "Chelsio T520-CX"},
452 	{0x5410,  "Chelsio T580-LP-CR"},	/* 2 x 40G */
453 	{0x5411,  "Chelsio T520-LL-CR"},
454 #ifdef notyet
455 	{0x5402,  "Chelsio T522-CR"},
456 	{0x5403,  "Chelsio T540-CR"},
457 	{0x5404,  "Chelsio T520-BCH"},
458 	{0x5405,  "Chelsio T540-BCH"},
459 	{0x5406,  "Chelsio T540-CH"},
460 	{0x5409,  "Chelsio T520-BT"},
461 	{0x540a,  "Chelsio T504-BT"},
462 	{0x540b,  "Chelsio B520-SR"},
463 	{0x540c,  "Chelsio B504-BT"},
464 	{0x540d,  "Chelsio T580-CR"},
465 	{0x540e,  "Chelsio T540-LP-CR"},
466 	{0x540f,  "Chelsio Amsterdam"},
467 	{0x5412,  "Chelsio T560-CR"},
468 	{0x5413,  "Chelsio T580-CR"},
469 #endif
470 };
471 
472 #ifdef TCP_OFFLOAD
473 /*
474  * service_iq() has an iq and needs the fl.  Offset of fl from the iq should be
475  * exactly the same for both rxq and ofld_rxq.
476  */
477 CTASSERT(offsetof(struct sge_ofld_rxq, iq) == offsetof(struct sge_rxq, iq));
478 CTASSERT(offsetof(struct sge_ofld_rxq, fl) == offsetof(struct sge_rxq, fl));
479 #endif
480 
481 /* No easy way to include t4_msg.h before adapter.h so we check this way */
482 CTASSERT(nitems(((struct adapter *)0)->cpl_handler) == NUM_CPL_CMDS);
483 CTASSERT(nitems(((struct adapter *)0)->fw_msg_handler) == NUM_FW6_TYPES);
484 
485 static int
486 t4_probe(device_t dev)
487 {
488 	int i;
489 	uint16_t v = pci_get_vendor(dev);
490 	uint16_t d = pci_get_device(dev);
491 	uint8_t f = pci_get_function(dev);
492 
493 	if (v != PCI_VENDOR_ID_CHELSIO)
494 		return (ENXIO);
495 
496 	/* Attach only to PF0 of the FPGA */
497 	if (d == 0xa000 && f != 0)
498 		return (ENXIO);
499 
500 	for (i = 0; i < nitems(t4_pciids); i++) {
501 		if (d == t4_pciids[i].device) {
502 			device_set_desc(dev, t4_pciids[i].desc);
503 			return (BUS_PROBE_DEFAULT);
504 		}
505 	}
506 
507 	return (ENXIO);
508 }
509 
510 static int
511 t5_probe(device_t dev)
512 {
513 	int i;
514 	uint16_t v = pci_get_vendor(dev);
515 	uint16_t d = pci_get_device(dev);
516 	uint8_t f = pci_get_function(dev);
517 
518 	if (v != PCI_VENDOR_ID_CHELSIO)
519 		return (ENXIO);
520 
521 	/* Attach only to PF0 of the FPGA */
522 	if (d == 0xb000 && f != 0)
523 		return (ENXIO);
524 
525 	for (i = 0; i < nitems(t5_pciids); i++) {
526 		if (d == t5_pciids[i].device) {
527 			device_set_desc(dev, t5_pciids[i].desc);
528 			return (BUS_PROBE_DEFAULT);
529 		}
530 	}
531 
532 	return (ENXIO);
533 }
534 
535 static int
536 t4_attach(device_t dev)
537 {
538 	struct adapter *sc;
539 	int rc = 0, i, n10g, n1g, rqidx, tqidx;
540 	struct intrs_and_queues iaq;
541 	struct sge *s;
542 #ifdef TCP_OFFLOAD
543 	int ofld_rqidx, ofld_tqidx;
544 #endif
545 
546 	sc = device_get_softc(dev);
547 	sc->dev = dev;
548 
549 	pci_enable_busmaster(dev);
550 	if (pci_find_cap(dev, PCIY_EXPRESS, &i) == 0) {
551 		uint32_t v;
552 
553 		pci_set_max_read_req(dev, 4096);
554 		v = pci_read_config(dev, i + PCIER_DEVICE_CTL, 2);
555 		v |= PCIEM_CTL_RELAXED_ORD_ENABLE;
556 		pci_write_config(dev, i + PCIER_DEVICE_CTL, v, 2);
557 	}
558 
559 	snprintf(sc->lockname, sizeof(sc->lockname), "%s",
560 	    device_get_nameunit(dev));
561 	mtx_init(&sc->sc_lock, sc->lockname, 0, MTX_DEF);
562 	mtx_lock(&t4_list_lock);
563 	SLIST_INSERT_HEAD(&t4_list, sc, link);
564 	mtx_unlock(&t4_list_lock);
565 
566 	mtx_init(&sc->sfl_lock, "starving freelists", 0, MTX_DEF);
567 	TAILQ_INIT(&sc->sfl);
568 	callout_init(&sc->sfl_callout, CALLOUT_MPSAFE);
569 
570 	rc = map_bars_0_and_4(sc);
571 	if (rc != 0)
572 		goto done; /* error message displayed already */
573 
574 	/*
575 	 * This is the real PF# to which we're attaching.  Works from within PCI
576 	 * passthrough environments too, where pci_get_function() could return a
577 	 * different PF# depending on the passthrough configuration.  We need to
578 	 * use the real PF# in all our communication with the firmware.
579 	 */
580 	sc->pf = G_SOURCEPF(t4_read_reg(sc, A_PL_WHOAMI));
581 	sc->mbox = sc->pf;
582 
583 	memset(sc->chan_map, 0xff, sizeof(sc->chan_map));
584 	sc->an_handler = an_not_handled;
585 	for (i = 0; i < nitems(sc->cpl_handler); i++)
586 		sc->cpl_handler[i] = cpl_not_handled;
587 	for (i = 0; i < nitems(sc->fw_msg_handler); i++)
588 		sc->fw_msg_handler[i] = fw_msg_not_handled;
589 	t4_register_cpl_handler(sc, CPL_SET_TCB_RPL, t4_filter_rpl);
590 	t4_init_sge_cpl_handlers(sc);
591 
592 	/* Prepare the adapter for operation */
593 	rc = -t4_prep_adapter(sc);
594 	if (rc != 0) {
595 		device_printf(dev, "failed to prepare adapter: %d.\n", rc);
596 		goto done;
597 	}
598 
599 	/*
600 	 * Do this really early, with the memory windows set up even before the
601 	 * character device.  The userland tool's register i/o and mem read
602 	 * will work even in "recovery mode".
603 	 */
604 	setup_memwin(sc);
605 	sc->cdev = make_dev(is_t4(sc) ? &t4_cdevsw : &t5_cdevsw,
606 	    device_get_unit(dev), UID_ROOT, GID_WHEEL, 0600, "%s",
607 	    device_get_nameunit(dev));
608 	if (sc->cdev == NULL)
609 		device_printf(dev, "failed to create nexus char device.\n");
610 	else
611 		sc->cdev->si_drv1 = sc;
612 
613 	/* Go no further if recovery mode has been requested. */
614 	if (TUNABLE_INT_FETCH("hw.cxgbe.sos", &i) && i != 0) {
615 		device_printf(dev, "recovery mode.\n");
616 		goto done;
617 	}
618 
619 	/* Prepare the firmware for operation */
620 	rc = prep_firmware(sc);
621 	if (rc != 0)
622 		goto done; /* error message displayed already */
623 
624 	rc = get_params__post_init(sc);
625 	if (rc != 0)
626 		goto done; /* error message displayed already */
627 
628 	rc = set_params__post_init(sc);
629 	if (rc != 0)
630 		goto done; /* error message displayed already */
631 
632 	rc = map_bar_2(sc);
633 	if (rc != 0)
634 		goto done; /* error message displayed already */
635 
636 	for (i = 0; i < NCHAN; i++)
637 		sc->params.tp.tx_modq[i] = i;
638 
639 	rc = t4_create_dma_tag(sc);
640 	if (rc != 0)
641 		goto done; /* error message displayed already */
642 
643 	/*
644 	 * First pass over all the ports - allocate VIs and initialize some
645 	 * basic parameters like mac address, port type, etc.  We also figure
646 	 * out whether a port is 10G or 1G and use that information when
647 	 * calculating how many interrupts to attempt to allocate.
648 	 */
649 	n10g = n1g = 0;
650 	for_each_port(sc, i) {
651 		struct port_info *pi;
652 
653 		pi = malloc(sizeof(*pi), M_CXGBE, M_ZERO | M_WAITOK);
654 		sc->port[i] = pi;
655 
656 		/* These must be set before t4_port_init */
657 		pi->adapter = sc;
658 		pi->port_id = i;
659 
660 		/* Allocate the vi and initialize parameters like mac addr */
661 		rc = -t4_port_init(pi, sc->mbox, sc->pf, 0);
662 		if (rc != 0) {
663 			device_printf(dev, "unable to initialize port %d: %d\n",
664 			    i, rc);
665 			free(pi, M_CXGBE);
666 			sc->port[i] = NULL;
667 			goto done;
668 		}
669 
670 		snprintf(pi->lockname, sizeof(pi->lockname), "%sp%d",
671 		    device_get_nameunit(dev), i);
672 		mtx_init(&pi->pi_lock, pi->lockname, 0, MTX_DEF);
673 
674 		if (is_10G_port(pi) || is_40G_port(pi)) {
675 			n10g++;
676 			pi->tmr_idx = t4_tmr_idx_10g;
677 			pi->pktc_idx = t4_pktc_idx_10g;
678 		} else {
679 			n1g++;
680 			pi->tmr_idx = t4_tmr_idx_1g;
681 			pi->pktc_idx = t4_pktc_idx_1g;
682 		}
683 
684 		pi->xact_addr_filt = -1;
685 
686 		pi->qsize_rxq = t4_qsize_rxq;
687 		pi->qsize_txq = t4_qsize_txq;
688 
689 		pi->dev = device_add_child(dev, is_t4(sc) ? "cxgbe" : "cxl", -1);
690 		if (pi->dev == NULL) {
691 			device_printf(dev,
692 			    "failed to add device for port %d.\n", i);
693 			rc = ENXIO;
694 			goto done;
695 		}
696 		device_set_softc(pi->dev, pi);
697 	}
698 
699 	/*
700 	 * Interrupt type, # of interrupts, # of rx/tx queues, etc.
701 	 */
702 	rc = cfg_itype_and_nqueues(sc, n10g, n1g, &iaq);
703 	if (rc != 0)
704 		goto done; /* error message displayed already */
705 
706 	sc->intr_type = iaq.intr_type;
707 	sc->intr_count = iaq.nirq;
708 	sc->flags |= iaq.intr_flags;
709 
710 	s = &sc->sge;
711 	s->nrxq = n10g * iaq.nrxq10g + n1g * iaq.nrxq1g;
712 	s->ntxq = n10g * iaq.ntxq10g + n1g * iaq.ntxq1g;
713 	s->neq = s->ntxq + s->nrxq;	/* the free list in an rxq is an eq */
714 	s->neq += sc->params.nports + 1;/* ctrl queues: 1 per port + 1 mgmt */
715 	s->niq = s->nrxq + 1;		/* 1 extra for firmware event queue */
716 
717 #ifdef TCP_OFFLOAD
718 	if (is_offload(sc)) {
719 
720 		s->nofldrxq = n10g * iaq.nofldrxq10g + n1g * iaq.nofldrxq1g;
721 		s->nofldtxq = n10g * iaq.nofldtxq10g + n1g * iaq.nofldtxq1g;
722 		s->neq += s->nofldtxq + s->nofldrxq;
723 		s->niq += s->nofldrxq;
724 
725 		s->ofld_rxq = malloc(s->nofldrxq * sizeof(struct sge_ofld_rxq),
726 		    M_CXGBE, M_ZERO | M_WAITOK);
727 		s->ofld_txq = malloc(s->nofldtxq * sizeof(struct sge_wrq),
728 		    M_CXGBE, M_ZERO | M_WAITOK);
729 	}
730 #endif
731 
732 	s->ctrlq = malloc(sc->params.nports * sizeof(struct sge_wrq), M_CXGBE,
733 	    M_ZERO | M_WAITOK);
734 	s->rxq = malloc(s->nrxq * sizeof(struct sge_rxq), M_CXGBE,
735 	    M_ZERO | M_WAITOK);
736 	s->txq = malloc(s->ntxq * sizeof(struct sge_txq), M_CXGBE,
737 	    M_ZERO | M_WAITOK);
738 	s->iqmap = malloc(s->niq * sizeof(struct sge_iq *), M_CXGBE,
739 	    M_ZERO | M_WAITOK);
740 	s->eqmap = malloc(s->neq * sizeof(struct sge_eq *), M_CXGBE,
741 	    M_ZERO | M_WAITOK);
742 
743 	sc->irq = malloc(sc->intr_count * sizeof(struct irq), M_CXGBE,
744 	    M_ZERO | M_WAITOK);
745 
746 	t4_init_l2t(sc, M_WAITOK);
747 
748 	/*
749 	 * Second pass over the ports.  This time we know the number of rx and
750 	 * tx queues that each port should get.
751 	 */
752 	rqidx = tqidx = 0;
753 #ifdef TCP_OFFLOAD
754 	ofld_rqidx = ofld_tqidx = 0;
755 #endif
756 	for_each_port(sc, i) {
757 		struct port_info *pi = sc->port[i];
758 
759 		if (pi == NULL)
760 			continue;
761 
762 		pi->first_rxq = rqidx;
763 		pi->first_txq = tqidx;
764 		if (is_10G_port(pi) || is_40G_port(pi)) {
765 			pi->nrxq = iaq.nrxq10g;
766 			pi->ntxq = iaq.ntxq10g;
767 		} else {
768 			pi->nrxq = iaq.nrxq1g;
769 			pi->ntxq = iaq.ntxq1g;
770 		}
771 
772 		rqidx += pi->nrxq;
773 		tqidx += pi->ntxq;
774 
775 #ifdef TCP_OFFLOAD
776 		if (is_offload(sc)) {
777 			pi->first_ofld_rxq = ofld_rqidx;
778 			pi->first_ofld_txq = ofld_tqidx;
779 			if (is_10G_port(pi) || is_40G_port(pi)) {
780 				pi->nofldrxq = iaq.nofldrxq10g;
781 				pi->nofldtxq = iaq.nofldtxq10g;
782 			} else {
783 				pi->nofldrxq = iaq.nofldrxq1g;
784 				pi->nofldtxq = iaq.nofldtxq1g;
785 			}
786 			ofld_rqidx += pi->nofldrxq;
787 			ofld_tqidx += pi->nofldtxq;
788 		}
789 #endif
790 	}
791 
792 	rc = setup_intr_handlers(sc);
793 	if (rc != 0) {
794 		device_printf(dev,
795 		    "failed to setup interrupt handlers: %d\n", rc);
796 		goto done;
797 	}
798 
799 	rc = bus_generic_attach(dev);
800 	if (rc != 0) {
801 		device_printf(dev,
802 		    "failed to attach all child ports: %d\n", rc);
803 		goto done;
804 	}
805 
806 	device_printf(dev,
807 	    "PCIe x%d, %d ports, %d %s interrupt%s, %d eq, %d iq\n",
808 	    sc->params.pci.width, sc->params.nports, sc->intr_count,
809 	    sc->intr_type == INTR_MSIX ? "MSI-X" :
810 	    (sc->intr_type == INTR_MSI ? "MSI" : "INTx"),
811 	    sc->intr_count > 1 ? "s" : "", sc->sge.neq, sc->sge.niq);
812 
813 	t4_set_desc(sc);
814 
815 done:
816 	if (rc != 0 && sc->cdev) {
817 		/* cdev was created and so cxgbetool works; recover that way. */
818 		device_printf(dev,
819 		    "error during attach, adapter is now in recovery mode.\n");
820 		rc = 0;
821 	}
822 
823 	if (rc != 0)
824 		t4_detach(dev);
825 	else
826 		t4_sysctls(sc);
827 
828 	return (rc);
829 }
830 
831 /*
832  * Idempotent
833  */
834 static int
835 t4_detach(device_t dev)
836 {
837 	struct adapter *sc;
838 	struct port_info *pi;
839 	int i, rc;
840 
841 	sc = device_get_softc(dev);
842 
843 	if (sc->flags & FULL_INIT_DONE)
844 		t4_intr_disable(sc);
845 
846 	if (sc->cdev) {
847 		destroy_dev(sc->cdev);
848 		sc->cdev = NULL;
849 	}
850 
851 	rc = bus_generic_detach(dev);
852 	if (rc) {
853 		device_printf(dev,
854 		    "failed to detach child devices: %d\n", rc);
855 		return (rc);
856 	}
857 
858 	for (i = 0; i < sc->intr_count; i++)
859 		t4_free_irq(sc, &sc->irq[i]);
860 
861 	for (i = 0; i < MAX_NPORTS; i++) {
862 		pi = sc->port[i];
863 		if (pi) {
864 			t4_free_vi(pi->adapter, sc->mbox, sc->pf, 0, pi->viid);
865 			if (pi->dev)
866 				device_delete_child(dev, pi->dev);
867 
868 			mtx_destroy(&pi->pi_lock);
869 			free(pi, M_CXGBE);
870 		}
871 	}
872 
873 	if (sc->flags & FULL_INIT_DONE)
874 		adapter_full_uninit(sc);
875 
876 	if (sc->flags & FW_OK)
877 		t4_fw_bye(sc, sc->mbox);
878 
879 	if (sc->intr_type == INTR_MSI || sc->intr_type == INTR_MSIX)
880 		pci_release_msi(dev);
881 
882 	if (sc->regs_res)
883 		bus_release_resource(dev, SYS_RES_MEMORY, sc->regs_rid,
884 		    sc->regs_res);
885 
886 	if (sc->udbs_res)
887 		bus_release_resource(dev, SYS_RES_MEMORY, sc->udbs_rid,
888 		    sc->udbs_res);
889 
890 	if (sc->msix_res)
891 		bus_release_resource(dev, SYS_RES_MEMORY, sc->msix_rid,
892 		    sc->msix_res);
893 
894 	if (sc->l2t)
895 		t4_free_l2t(sc->l2t);
896 
897 #ifdef TCP_OFFLOAD
898 	free(sc->sge.ofld_rxq, M_CXGBE);
899 	free(sc->sge.ofld_txq, M_CXGBE);
900 #endif
901 	free(sc->irq, M_CXGBE);
902 	free(sc->sge.rxq, M_CXGBE);
903 	free(sc->sge.txq, M_CXGBE);
904 	free(sc->sge.ctrlq, M_CXGBE);
905 	free(sc->sge.iqmap, M_CXGBE);
906 	free(sc->sge.eqmap, M_CXGBE);
907 	free(sc->tids.ftid_tab, M_CXGBE);
908 	t4_destroy_dma_tag(sc);
909 	if (mtx_initialized(&sc->sc_lock)) {
910 		mtx_lock(&t4_list_lock);
911 		SLIST_REMOVE(&t4_list, sc, adapter, link);
912 		mtx_unlock(&t4_list_lock);
913 		mtx_destroy(&sc->sc_lock);
914 	}
915 
916 	if (mtx_initialized(&sc->tids.ftid_lock))
917 		mtx_destroy(&sc->tids.ftid_lock);
918 	if (mtx_initialized(&sc->sfl_lock))
919 		mtx_destroy(&sc->sfl_lock);
920 
921 	bzero(sc, sizeof(*sc));
922 
923 	return (0);
924 }
925 
926 
927 static int
928 cxgbe_probe(device_t dev)
929 {
930 	char buf[128];
931 	struct port_info *pi = device_get_softc(dev);
932 
933 	snprintf(buf, sizeof(buf), "port %d", pi->port_id);
934 	device_set_desc_copy(dev, buf);
935 
936 	return (BUS_PROBE_DEFAULT);
937 }
938 
939 #define T4_CAP (IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_MTU | IFCAP_HWCSUM | \
940     IFCAP_VLAN_HWCSUM | IFCAP_TSO | IFCAP_JUMBO_MTU | IFCAP_LRO | \
941     IFCAP_VLAN_HWTSO | IFCAP_LINKSTATE | IFCAP_HWCSUM_IPV6)
942 #define T4_CAP_ENABLE (T4_CAP)
943 
944 static int
945 cxgbe_attach(device_t dev)
946 {
947 	struct port_info *pi = device_get_softc(dev);
948 	struct ifnet *ifp;
949 
950 	/* Allocate an ifnet and set it up */
951 	ifp = if_alloc(IFT_ETHER);
952 	if (ifp == NULL) {
953 		device_printf(dev, "Cannot allocate ifnet\n");
954 		return (ENOMEM);
955 	}
956 	pi->ifp = ifp;
957 	ifp->if_softc = pi;
958 
959 	callout_init(&pi->tick, CALLOUT_MPSAFE);
960 
961 	if_initname(ifp, device_get_name(dev), device_get_unit(dev));
962 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
963 
964 	ifp->if_init = cxgbe_init;
965 	ifp->if_ioctl = cxgbe_ioctl;
966 	ifp->if_transmit = cxgbe_transmit;
967 	ifp->if_qflush = cxgbe_qflush;
968 
969 	ifp->if_capabilities = T4_CAP;
970 #ifdef TCP_OFFLOAD
971 	if (is_offload(pi->adapter))
972 		ifp->if_capabilities |= IFCAP_TOE;
973 #endif
974 	ifp->if_capenable = T4_CAP_ENABLE;
975 	ifp->if_hwassist = CSUM_TCP | CSUM_UDP | CSUM_IP | CSUM_TSO |
976 	    CSUM_UDP_IPV6 | CSUM_TCP_IPV6;
977 
978 	/* Initialize ifmedia for this port */
979 	ifmedia_init(&pi->media, IFM_IMASK, cxgbe_media_change,
980 	    cxgbe_media_status);
981 	build_medialist(pi);
982 
983 	pi->vlan_c = EVENTHANDLER_REGISTER(vlan_config, cxgbe_vlan_config, ifp,
984 	    EVENTHANDLER_PRI_ANY);
985 
986 	ether_ifattach(ifp, pi->hw_addr);
987 
988 #ifdef TCP_OFFLOAD
989 	if (is_offload(pi->adapter)) {
990 		device_printf(dev,
991 		    "%d txq, %d rxq (NIC); %d txq, %d rxq (TOE)\n",
992 		    pi->ntxq, pi->nrxq, pi->nofldtxq, pi->nofldrxq);
993 	} else
994 #endif
995 		device_printf(dev, "%d txq, %d rxq\n", pi->ntxq, pi->nrxq);
996 
997 	cxgbe_sysctls(pi);
998 
999 	return (0);
1000 }
1001 
1002 static int
1003 cxgbe_detach(device_t dev)
1004 {
1005 	struct port_info *pi = device_get_softc(dev);
1006 	struct adapter *sc = pi->adapter;
1007 	struct ifnet *ifp = pi->ifp;
1008 
1009 	/* Tell if_ioctl and if_init that the port is going away */
1010 	ADAPTER_LOCK(sc);
1011 	SET_DOOMED(pi);
1012 	wakeup(&sc->flags);
1013 	while (IS_BUSY(sc))
1014 		mtx_sleep(&sc->flags, &sc->sc_lock, 0, "t4detach", 0);
1015 	SET_BUSY(sc);
1016 #ifdef INVARIANTS
1017 	sc->last_op = "t4detach";
1018 	sc->last_op_thr = curthread;
1019 #endif
1020 	ADAPTER_UNLOCK(sc);
1021 
1022 	if (pi->vlan_c)
1023 		EVENTHANDLER_DEREGISTER(vlan_config, pi->vlan_c);
1024 
1025 	PORT_LOCK(pi);
1026 	ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
1027 	callout_stop(&pi->tick);
1028 	PORT_UNLOCK(pi);
1029 	callout_drain(&pi->tick);
1030 
1031 	/* Let detach proceed even if these fail. */
1032 	cxgbe_uninit_synchronized(pi);
1033 	port_full_uninit(pi);
1034 
1035 	ifmedia_removeall(&pi->media);
1036 	ether_ifdetach(pi->ifp);
1037 	if_free(pi->ifp);
1038 
1039 	ADAPTER_LOCK(sc);
1040 	CLR_BUSY(sc);
1041 	wakeup(&sc->flags);
1042 	ADAPTER_UNLOCK(sc);
1043 
1044 	return (0);
1045 }
1046 
1047 static void
1048 cxgbe_init(void *arg)
1049 {
1050 	struct port_info *pi = arg;
1051 	struct adapter *sc = pi->adapter;
1052 
1053 	if (begin_synchronized_op(sc, pi, SLEEP_OK | INTR_OK, "t4init") != 0)
1054 		return;
1055 	cxgbe_init_synchronized(pi);
1056 	end_synchronized_op(sc, 0);
1057 }
1058 
1059 static int
1060 cxgbe_ioctl(struct ifnet *ifp, unsigned long cmd, caddr_t data)
1061 {
1062 	int rc = 0, mtu, flags;
1063 	struct port_info *pi = ifp->if_softc;
1064 	struct adapter *sc = pi->adapter;
1065 	struct ifreq *ifr = (struct ifreq *)data;
1066 	uint32_t mask;
1067 
1068 	switch (cmd) {
1069 	case SIOCSIFMTU:
1070 		mtu = ifr->ifr_mtu;
1071 		if ((mtu < ETHERMIN) || (mtu > ETHERMTU_JUMBO))
1072 			return (EINVAL);
1073 
1074 		rc = begin_synchronized_op(sc, pi, SLEEP_OK | INTR_OK, "t4mtu");
1075 		if (rc)
1076 			return (rc);
1077 		ifp->if_mtu = mtu;
1078 		if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1079 			t4_update_fl_bufsize(ifp);
1080 			rc = update_mac_settings(pi, XGMAC_MTU);
1081 		}
1082 		end_synchronized_op(sc, 0);
1083 		break;
1084 
1085 	case SIOCSIFFLAGS:
1086 		rc = begin_synchronized_op(sc, pi, SLEEP_OK | INTR_OK, "t4flg");
1087 		if (rc)
1088 			return (rc);
1089 
1090 		if (ifp->if_flags & IFF_UP) {
1091 			if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1092 				flags = pi->if_flags;
1093 				if ((ifp->if_flags ^ flags) &
1094 				    (IFF_PROMISC | IFF_ALLMULTI)) {
1095 					rc = update_mac_settings(pi,
1096 					    XGMAC_PROMISC | XGMAC_ALLMULTI);
1097 				}
1098 			} else
1099 				rc = cxgbe_init_synchronized(pi);
1100 			pi->if_flags = ifp->if_flags;
1101 		} else if (ifp->if_drv_flags & IFF_DRV_RUNNING)
1102 			rc = cxgbe_uninit_synchronized(pi);
1103 		end_synchronized_op(sc, 0);
1104 		break;
1105 
1106 	case SIOCADDMULTI:
1107 	case SIOCDELMULTI: /* these two are called with a mutex held :-( */
1108 		rc = begin_synchronized_op(sc, pi, HOLD_LOCK, "t4multi");
1109 		if (rc)
1110 			return (rc);
1111 		if (ifp->if_drv_flags & IFF_DRV_RUNNING)
1112 			rc = update_mac_settings(pi, XGMAC_MCADDRS);
1113 		end_synchronized_op(sc, LOCK_HELD);
1114 		break;
1115 
1116 	case SIOCSIFCAP:
1117 		rc = begin_synchronized_op(sc, pi, SLEEP_OK | INTR_OK, "t4cap");
1118 		if (rc)
1119 			return (rc);
1120 
1121 		mask = ifr->ifr_reqcap ^ ifp->if_capenable;
1122 		if (mask & IFCAP_TXCSUM) {
1123 			ifp->if_capenable ^= IFCAP_TXCSUM;
1124 			ifp->if_hwassist ^= (CSUM_TCP | CSUM_UDP | CSUM_IP);
1125 
1126 			if (IFCAP_TSO4 & ifp->if_capenable &&
1127 			    !(IFCAP_TXCSUM & ifp->if_capenable)) {
1128 				ifp->if_capenable &= ~IFCAP_TSO4;
1129 				if_printf(ifp,
1130 				    "tso4 disabled due to -txcsum.\n");
1131 			}
1132 		}
1133 		if (mask & IFCAP_TXCSUM_IPV6) {
1134 			ifp->if_capenable ^= IFCAP_TXCSUM_IPV6;
1135 			ifp->if_hwassist ^= (CSUM_UDP_IPV6 | CSUM_TCP_IPV6);
1136 
1137 			if (IFCAP_TSO6 & ifp->if_capenable &&
1138 			    !(IFCAP_TXCSUM_IPV6 & ifp->if_capenable)) {
1139 				ifp->if_capenable &= ~IFCAP_TSO6;
1140 				if_printf(ifp,
1141 				    "tso6 disabled due to -txcsum6.\n");
1142 			}
1143 		}
1144 		if (mask & IFCAP_RXCSUM)
1145 			ifp->if_capenable ^= IFCAP_RXCSUM;
1146 		if (mask & IFCAP_RXCSUM_IPV6)
1147 			ifp->if_capenable ^= IFCAP_RXCSUM_IPV6;
1148 
1149 		/*
1150 		 * Note that we leave CSUM_TSO alone (it is always set).  The
1151 		 * kernel takes both IFCAP_TSOx and CSUM_TSO into account before
1152 		 * sending a TSO request our way, so it's sufficient to toggle
1153 		 * IFCAP_TSOx only.
1154 		 */
1155 		if (mask & IFCAP_TSO4) {
1156 			if (!(IFCAP_TSO4 & ifp->if_capenable) &&
1157 			    !(IFCAP_TXCSUM & ifp->if_capenable)) {
1158 				if_printf(ifp, "enable txcsum first.\n");
1159 				rc = EAGAIN;
1160 				goto fail;
1161 			}
1162 			ifp->if_capenable ^= IFCAP_TSO4;
1163 		}
1164 		if (mask & IFCAP_TSO6) {
1165 			if (!(IFCAP_TSO6 & ifp->if_capenable) &&
1166 			    !(IFCAP_TXCSUM_IPV6 & ifp->if_capenable)) {
1167 				if_printf(ifp, "enable txcsum6 first.\n");
1168 				rc = EAGAIN;
1169 				goto fail;
1170 			}
1171 			ifp->if_capenable ^= IFCAP_TSO6;
1172 		}
1173 		if (mask & IFCAP_LRO) {
1174 #if defined(INET) || defined(INET6)
1175 			int i;
1176 			struct sge_rxq *rxq;
1177 
1178 			ifp->if_capenable ^= IFCAP_LRO;
1179 			for_each_rxq(pi, i, rxq) {
1180 				if (ifp->if_capenable & IFCAP_LRO)
1181 					rxq->iq.flags |= IQ_LRO_ENABLED;
1182 				else
1183 					rxq->iq.flags &= ~IQ_LRO_ENABLED;
1184 			}
1185 #endif
1186 		}
1187 #ifdef TCP_OFFLOAD
1188 		if (mask & IFCAP_TOE) {
1189 			int enable = (ifp->if_capenable ^ mask) & IFCAP_TOE;
1190 
1191 			rc = toe_capability(pi, enable);
1192 			if (rc != 0)
1193 				goto fail;
1194 
1195 			ifp->if_capenable ^= mask;
1196 		}
1197 #endif
1198 		if (mask & IFCAP_VLAN_HWTAGGING) {
1199 			ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING;
1200 			if (ifp->if_drv_flags & IFF_DRV_RUNNING)
1201 				rc = update_mac_settings(pi, XGMAC_VLANEX);
1202 		}
1203 		if (mask & IFCAP_VLAN_MTU) {
1204 			ifp->if_capenable ^= IFCAP_VLAN_MTU;
1205 
1206 			/* Need to find out how to disable auto-mtu-inflation */
1207 		}
1208 		if (mask & IFCAP_VLAN_HWTSO)
1209 			ifp->if_capenable ^= IFCAP_VLAN_HWTSO;
1210 		if (mask & IFCAP_VLAN_HWCSUM)
1211 			ifp->if_capenable ^= IFCAP_VLAN_HWCSUM;
1212 
1213 #ifdef VLAN_CAPABILITIES
1214 		VLAN_CAPABILITIES(ifp);
1215 #endif
1216 fail:
1217 		end_synchronized_op(sc, 0);
1218 		break;
1219 
1220 	case SIOCSIFMEDIA:
1221 	case SIOCGIFMEDIA:
1222 		ifmedia_ioctl(ifp, ifr, &pi->media, cmd);
1223 		break;
1224 
1225 	default:
1226 		rc = ether_ioctl(ifp, cmd, data);
1227 	}
1228 
1229 	return (rc);
1230 }
1231 
1232 static int
1233 cxgbe_transmit(struct ifnet *ifp, struct mbuf *m)
1234 {
1235 	struct port_info *pi = ifp->if_softc;
1236 	struct adapter *sc = pi->adapter;
1237 	struct sge_txq *txq = &sc->sge.txq[pi->first_txq];
1238 	struct buf_ring *br;
1239 	int rc;
1240 
1241 	M_ASSERTPKTHDR(m);
1242 
1243 	if (__predict_false(pi->link_cfg.link_ok == 0)) {
1244 		m_freem(m);
1245 		return (ENETDOWN);
1246 	}
1247 
1248 	if (m->m_flags & M_FLOWID)
1249 		txq += (m->m_pkthdr.flowid % pi->ntxq);
1250 	br = txq->br;
1251 
1252 	if (TXQ_TRYLOCK(txq) == 0) {
1253 		struct sge_eq *eq = &txq->eq;
1254 
1255 		/*
1256 		 * It is possible that t4_eth_tx finishes up and releases the
1257 		 * lock between the TRYLOCK above and the drbr_enqueue here.  We
1258 		 * need to make sure that this mbuf doesn't just sit there in
1259 		 * the drbr.
1260 		 */
1261 
1262 		rc = drbr_enqueue(ifp, br, m);
1263 		if (rc == 0 && callout_pending(&eq->tx_callout) == 0 &&
1264 		    !(eq->flags & EQ_DOOMED))
1265 			callout_reset(&eq->tx_callout, 1, t4_tx_callout, eq);
1266 		return (rc);
1267 	}
1268 
1269 	/*
1270 	 * txq->m is the mbuf that is held up due to a temporary shortage of
1271 	 * resources and it should be put on the wire first.  Then what's in
1272 	 * drbr and finally the mbuf that was just passed in to us.
1273 	 *
1274 	 * Return code should indicate the fate of the mbuf that was passed in
1275 	 * this time.
1276 	 */
1277 
1278 	TXQ_LOCK_ASSERT_OWNED(txq);
1279 	if (drbr_needs_enqueue(ifp, br) || txq->m) {
1280 
1281 		/* Queued for transmission. */
1282 
1283 		rc = drbr_enqueue(ifp, br, m);
1284 		m = txq->m ? txq->m : drbr_dequeue(ifp, br);
1285 		(void) t4_eth_tx(ifp, txq, m);
1286 		TXQ_UNLOCK(txq);
1287 		return (rc);
1288 	}
1289 
1290 	/* Direct transmission. */
1291 	rc = t4_eth_tx(ifp, txq, m);
1292 	if (rc != 0 && txq->m)
1293 		rc = 0;	/* held, will be transmitted soon (hopefully) */
1294 
1295 	TXQ_UNLOCK(txq);
1296 	return (rc);
1297 }
1298 
1299 static void
1300 cxgbe_qflush(struct ifnet *ifp)
1301 {
1302 	struct port_info *pi = ifp->if_softc;
1303 	struct sge_txq *txq;
1304 	int i;
1305 	struct mbuf *m;
1306 
1307 	/* queues do not exist if !PORT_INIT_DONE. */
1308 	if (pi->flags & PORT_INIT_DONE) {
1309 		for_each_txq(pi, i, txq) {
1310 			TXQ_LOCK(txq);
1311 			m_freem(txq->m);
1312 			txq->m = NULL;
1313 			while ((m = buf_ring_dequeue_sc(txq->br)) != NULL)
1314 				m_freem(m);
1315 			TXQ_UNLOCK(txq);
1316 		}
1317 	}
1318 	if_qflush(ifp);
1319 }
1320 
1321 static int
1322 cxgbe_media_change(struct ifnet *ifp)
1323 {
1324 	struct port_info *pi = ifp->if_softc;
1325 
1326 	device_printf(pi->dev, "%s unimplemented.\n", __func__);
1327 
1328 	return (EOPNOTSUPP);
1329 }
1330 
1331 static void
1332 cxgbe_media_status(struct ifnet *ifp, struct ifmediareq *ifmr)
1333 {
1334 	struct port_info *pi = ifp->if_softc;
1335 	struct ifmedia_entry *cur = pi->media.ifm_cur;
1336 	int speed = pi->link_cfg.speed;
1337 	int data = (pi->port_type << 8) | pi->mod_type;
1338 
1339 	if (cur->ifm_data != data) {
1340 		build_medialist(pi);
1341 		cur = pi->media.ifm_cur;
1342 	}
1343 
1344 	ifmr->ifm_status = IFM_AVALID;
1345 	if (!pi->link_cfg.link_ok)
1346 		return;
1347 
1348 	ifmr->ifm_status |= IFM_ACTIVE;
1349 
1350 	/* active and current will differ iff current media is autoselect. */
1351 	if (IFM_SUBTYPE(cur->ifm_media) != IFM_AUTO)
1352 		return;
1353 
1354 	ifmr->ifm_active = IFM_ETHER | IFM_FDX;
1355 	if (speed == SPEED_10000)
1356 		ifmr->ifm_active |= IFM_10G_T;
1357 	else if (speed == SPEED_1000)
1358 		ifmr->ifm_active |= IFM_1000_T;
1359 	else if (speed == SPEED_100)
1360 		ifmr->ifm_active |= IFM_100_TX;
1361 	else if (speed == SPEED_10)
1362 		ifmr->ifm_active |= IFM_10_T;
1363 	else
1364 		KASSERT(0, ("%s: link up but speed unknown (%u)", __func__,
1365 			    speed));
1366 }
1367 
1368 void
1369 t4_fatal_err(struct adapter *sc)
1370 {
1371 	t4_set_reg_field(sc, A_SGE_CONTROL, F_GLOBALENABLE, 0);
1372 	t4_intr_disable(sc);
1373 	log(LOG_EMERG, "%s: encountered fatal error, adapter stopped.\n",
1374 	    device_get_nameunit(sc->dev));
1375 }
1376 
1377 static int
1378 map_bars_0_and_4(struct adapter *sc)
1379 {
1380 	sc->regs_rid = PCIR_BAR(0);
1381 	sc->regs_res = bus_alloc_resource_any(sc->dev, SYS_RES_MEMORY,
1382 	    &sc->regs_rid, RF_ACTIVE);
1383 	if (sc->regs_res == NULL) {
1384 		device_printf(sc->dev, "cannot map registers.\n");
1385 		return (ENXIO);
1386 	}
1387 	sc->bt = rman_get_bustag(sc->regs_res);
1388 	sc->bh = rman_get_bushandle(sc->regs_res);
1389 	sc->mmio_len = rman_get_size(sc->regs_res);
1390 	setbit(&sc->doorbells, DOORBELL_KDB);
1391 
1392 	sc->msix_rid = PCIR_BAR(4);
1393 	sc->msix_res = bus_alloc_resource_any(sc->dev, SYS_RES_MEMORY,
1394 	    &sc->msix_rid, RF_ACTIVE);
1395 	if (sc->msix_res == NULL) {
1396 		device_printf(sc->dev, "cannot map MSI-X BAR.\n");
1397 		return (ENXIO);
1398 	}
1399 
1400 	return (0);
1401 }
1402 
1403 static int
1404 map_bar_2(struct adapter *sc)
1405 {
1406 
1407 	/*
1408 	 * T4: only iWARP driver uses the userspace doorbells.  There is no need
1409 	 * to map it if RDMA is disabled.
1410 	 */
1411 	if (is_t4(sc) && sc->rdmacaps == 0)
1412 		return (0);
1413 
1414 	sc->udbs_rid = PCIR_BAR(2);
1415 	sc->udbs_res = bus_alloc_resource_any(sc->dev, SYS_RES_MEMORY,
1416 	    &sc->udbs_rid, RF_ACTIVE);
1417 	if (sc->udbs_res == NULL) {
1418 		device_printf(sc->dev, "cannot map doorbell BAR.\n");
1419 		return (ENXIO);
1420 	}
1421 	sc->udbs_base = rman_get_virtual(sc->udbs_res);
1422 
1423 	if (is_t5(sc)) {
1424 		setbit(&sc->doorbells, DOORBELL_UDB);
1425 #if defined(__i386__) || defined(__amd64__)
1426 		if (t5_write_combine) {
1427 			int rc;
1428 
1429 			/*
1430 			 * Enable write combining on BAR2.  This is the
1431 			 * userspace doorbell BAR and is split into 128B
1432 			 * (UDBS_SEG_SIZE) doorbell regions, each associated
1433 			 * with an egress queue.  The first 64B has the doorbell
1434 			 * and the second 64B can be used to submit a tx work
1435 			 * request with an implicit doorbell.
1436 			 */
1437 
1438 			rc = pmap_change_attr((vm_offset_t)sc->udbs_base,
1439 			    rman_get_size(sc->udbs_res), PAT_WRITE_COMBINING);
1440 			if (rc == 0) {
1441 				clrbit(&sc->doorbells, DOORBELL_UDB);
1442 				setbit(&sc->doorbells, DOORBELL_WCWR);
1443 				setbit(&sc->doorbells, DOORBELL_UDBWC);
1444 			} else {
1445 				device_printf(sc->dev,
1446 				    "couldn't enable write combining: %d\n",
1447 				    rc);
1448 			}
1449 
1450 			t4_write_reg(sc, A_SGE_STAT_CFG,
1451 			    V_STATSOURCE_T5(7) | V_STATMODE(0));
1452 		}
1453 #endif
1454 	}
1455 
1456 	return (0);
1457 }
1458 
1459 static const struct memwin t4_memwin[] = {
1460 	{ MEMWIN0_BASE, MEMWIN0_APERTURE },
1461 	{ MEMWIN1_BASE, MEMWIN1_APERTURE },
1462 	{ MEMWIN2_BASE_T4, MEMWIN2_APERTURE_T4 }
1463 };
1464 
1465 static const struct memwin t5_memwin[] = {
1466 	{ MEMWIN0_BASE, MEMWIN0_APERTURE },
1467 	{ MEMWIN1_BASE, MEMWIN1_APERTURE },
1468 	{ MEMWIN2_BASE_T5, MEMWIN2_APERTURE_T5 },
1469 };
1470 
1471 static void
1472 setup_memwin(struct adapter *sc)
1473 {
1474 	const struct memwin *mw;
1475 	int i, n;
1476 	uint32_t bar0;
1477 
1478 	if (is_t4(sc)) {
1479 		/*
1480 		 * Read low 32b of bar0 indirectly via the hardware backdoor
1481 		 * mechanism.  Works from within PCI passthrough environments
1482 		 * too, where rman_get_start() can return a different value.  We
1483 		 * need to program the T4 memory window decoders with the actual
1484 		 * addresses that will be coming across the PCIe link.
1485 		 */
1486 		bar0 = t4_hw_pci_read_cfg4(sc, PCIR_BAR(0));
1487 		bar0 &= (uint32_t) PCIM_BAR_MEM_BASE;
1488 
1489 		mw = &t4_memwin[0];
1490 		n = nitems(t4_memwin);
1491 	} else {
1492 		/* T5 uses the relative offset inside the PCIe BAR */
1493 		bar0 = 0;
1494 
1495 		mw = &t5_memwin[0];
1496 		n = nitems(t5_memwin);
1497 	}
1498 
1499 	for (i = 0; i < n; i++, mw++) {
1500 		t4_write_reg(sc,
1501 		    PCIE_MEM_ACCESS_REG(A_PCIE_MEM_ACCESS_BASE_WIN, i),
1502 		    (mw->base + bar0) | V_BIR(0) |
1503 		    V_WINDOW(ilog2(mw->aperture) - 10));
1504 	}
1505 
1506 	/* flush */
1507 	t4_read_reg(sc, PCIE_MEM_ACCESS_REG(A_PCIE_MEM_ACCESS_BASE_WIN, 2));
1508 }
1509 
1510 /*
1511  * Verify that the memory range specified by the addr/len pair is valid and lies
1512  * entirely within a single region (EDCx or MCx).
1513  */
1514 static int
1515 validate_mem_range(struct adapter *sc, uint32_t addr, int len)
1516 {
1517 	uint32_t em, addr_len, maddr, mlen;
1518 
1519 	/* Memory can only be accessed in naturally aligned 4 byte units */
1520 	if (addr & 3 || len & 3 || len == 0)
1521 		return (EINVAL);
1522 
1523 	/* Enabled memories */
1524 	em = t4_read_reg(sc, A_MA_TARGET_MEM_ENABLE);
1525 	if (em & F_EDRAM0_ENABLE) {
1526 		addr_len = t4_read_reg(sc, A_MA_EDRAM0_BAR);
1527 		maddr = G_EDRAM0_BASE(addr_len) << 20;
1528 		mlen = G_EDRAM0_SIZE(addr_len) << 20;
1529 		if (mlen > 0 && addr >= maddr && addr < maddr + mlen &&
1530 		    addr + len <= maddr + mlen)
1531 			return (0);
1532 	}
1533 	if (em & F_EDRAM1_ENABLE) {
1534 		addr_len = t4_read_reg(sc, A_MA_EDRAM1_BAR);
1535 		maddr = G_EDRAM1_BASE(addr_len) << 20;
1536 		mlen = G_EDRAM1_SIZE(addr_len) << 20;
1537 		if (mlen > 0 && addr >= maddr && addr < maddr + mlen &&
1538 		    addr + len <= maddr + mlen)
1539 			return (0);
1540 	}
1541 	if (em & F_EXT_MEM_ENABLE) {
1542 		addr_len = t4_read_reg(sc, A_MA_EXT_MEMORY_BAR);
1543 		maddr = G_EXT_MEM_BASE(addr_len) << 20;
1544 		mlen = G_EXT_MEM_SIZE(addr_len) << 20;
1545 		if (mlen > 0 && addr >= maddr && addr < maddr + mlen &&
1546 		    addr + len <= maddr + mlen)
1547 			return (0);
1548 	}
1549 	if (!is_t4(sc) && em & F_EXT_MEM1_ENABLE) {
1550 		addr_len = t4_read_reg(sc, A_MA_EXT_MEMORY1_BAR);
1551 		maddr = G_EXT_MEM1_BASE(addr_len) << 20;
1552 		mlen = G_EXT_MEM1_SIZE(addr_len) << 20;
1553 		if (mlen > 0 && addr >= maddr && addr < maddr + mlen &&
1554 		    addr + len <= maddr + mlen)
1555 			return (0);
1556 	}
1557 
1558 	return (EFAULT);
1559 }
1560 
1561 /*
1562  * Verify that the memory range specified by the memtype/offset/len pair is
1563  * valid and lies entirely within the memtype specified.  The global address of
1564  * the start of the range is returned in addr.
1565  */
1566 static int
1567 validate_mt_off_len(struct adapter *sc, int mtype, uint32_t off, int len,
1568     uint32_t *addr)
1569 {
1570 	uint32_t em, addr_len, maddr, mlen;
1571 
1572 	/* Memory can only be accessed in naturally aligned 4 byte units */
1573 	if (off & 3 || len & 3 || len == 0)
1574 		return (EINVAL);
1575 
1576 	em = t4_read_reg(sc, A_MA_TARGET_MEM_ENABLE);
1577 	switch (mtype) {
1578 	case MEM_EDC0:
1579 		if (!(em & F_EDRAM0_ENABLE))
1580 			return (EINVAL);
1581 		addr_len = t4_read_reg(sc, A_MA_EDRAM0_BAR);
1582 		maddr = G_EDRAM0_BASE(addr_len) << 20;
1583 		mlen = G_EDRAM0_SIZE(addr_len) << 20;
1584 		break;
1585 	case MEM_EDC1:
1586 		if (!(em & F_EDRAM1_ENABLE))
1587 			return (EINVAL);
1588 		addr_len = t4_read_reg(sc, A_MA_EDRAM1_BAR);
1589 		maddr = G_EDRAM1_BASE(addr_len) << 20;
1590 		mlen = G_EDRAM1_SIZE(addr_len) << 20;
1591 		break;
1592 	case MEM_MC:
1593 		if (!(em & F_EXT_MEM_ENABLE))
1594 			return (EINVAL);
1595 		addr_len = t4_read_reg(sc, A_MA_EXT_MEMORY_BAR);
1596 		maddr = G_EXT_MEM_BASE(addr_len) << 20;
1597 		mlen = G_EXT_MEM_SIZE(addr_len) << 20;
1598 		break;
1599 	case MEM_MC1:
1600 		if (is_t4(sc) || !(em & F_EXT_MEM1_ENABLE))
1601 			return (EINVAL);
1602 		addr_len = t4_read_reg(sc, A_MA_EXT_MEMORY1_BAR);
1603 		maddr = G_EXT_MEM1_BASE(addr_len) << 20;
1604 		mlen = G_EXT_MEM1_SIZE(addr_len) << 20;
1605 		break;
1606 	default:
1607 		return (EINVAL);
1608 	}
1609 
1610 	if (mlen > 0 && off < mlen && off + len <= mlen) {
1611 		*addr = maddr + off;	/* global address */
1612 		return (0);
1613 	}
1614 
1615 	return (EFAULT);
1616 }
1617 
1618 static void
1619 memwin_info(struct adapter *sc, int win, uint32_t *base, uint32_t *aperture)
1620 {
1621 	const struct memwin *mw;
1622 
1623 	if (is_t4(sc)) {
1624 		KASSERT(win >= 0 && win < nitems(t4_memwin),
1625 		    ("%s: incorrect memwin# (%d)", __func__, win));
1626 		mw = &t4_memwin[win];
1627 	} else {
1628 		KASSERT(win >= 0 && win < nitems(t5_memwin),
1629 		    ("%s: incorrect memwin# (%d)", __func__, win));
1630 		mw = &t5_memwin[win];
1631 	}
1632 
1633 	if (base != NULL)
1634 		*base = mw->base;
1635 	if (aperture != NULL)
1636 		*aperture = mw->aperture;
1637 }
1638 
1639 /*
1640  * Positions the memory window such that it can be used to access the specified
1641  * address in the chip's address space.  The return value is the offset of addr
1642  * from the start of the window.
1643  */
1644 static uint32_t
1645 position_memwin(struct adapter *sc, int n, uint32_t addr)
1646 {
1647 	uint32_t start, pf;
1648 	uint32_t reg;
1649 
1650 	KASSERT(n >= 0 && n <= 3,
1651 	    ("%s: invalid window %d.", __func__, n));
1652 	KASSERT((addr & 3) == 0,
1653 	    ("%s: addr (0x%x) is not at a 4B boundary.", __func__, addr));
1654 
1655 	if (is_t4(sc)) {
1656 		pf = 0;
1657 		start = addr & ~0xf;	/* start must be 16B aligned */
1658 	} else {
1659 		pf = V_PFNUM(sc->pf);
1660 		start = addr & ~0x7f;	/* start must be 128B aligned */
1661 	}
1662 	reg = PCIE_MEM_ACCESS_REG(A_PCIE_MEM_ACCESS_OFFSET, n);
1663 
1664 	t4_write_reg(sc, reg, start | pf);
1665 	t4_read_reg(sc, reg);
1666 
1667 	return (addr - start);
1668 }
1669 
1670 static int
1671 cfg_itype_and_nqueues(struct adapter *sc, int n10g, int n1g,
1672     struct intrs_and_queues *iaq)
1673 {
1674 	int rc, itype, navail, nrxq10g, nrxq1g, n;
1675 	int nofldrxq10g = 0, nofldrxq1g = 0;
1676 
1677 	bzero(iaq, sizeof(*iaq));
1678 
1679 	iaq->ntxq10g = t4_ntxq10g;
1680 	iaq->ntxq1g = t4_ntxq1g;
1681 	iaq->nrxq10g = nrxq10g = t4_nrxq10g;
1682 	iaq->nrxq1g = nrxq1g = t4_nrxq1g;
1683 #ifdef TCP_OFFLOAD
1684 	if (is_offload(sc)) {
1685 		iaq->nofldtxq10g = t4_nofldtxq10g;
1686 		iaq->nofldtxq1g = t4_nofldtxq1g;
1687 		iaq->nofldrxq10g = nofldrxq10g = t4_nofldrxq10g;
1688 		iaq->nofldrxq1g = nofldrxq1g = t4_nofldrxq1g;
1689 	}
1690 #endif
1691 
1692 	for (itype = INTR_MSIX; itype; itype >>= 1) {
1693 
1694 		if ((itype & t4_intr_types) == 0)
1695 			continue;	/* not allowed */
1696 
1697 		if (itype == INTR_MSIX)
1698 			navail = pci_msix_count(sc->dev);
1699 		else if (itype == INTR_MSI)
1700 			navail = pci_msi_count(sc->dev);
1701 		else
1702 			navail = 1;
1703 restart:
1704 		if (navail == 0)
1705 			continue;
1706 
1707 		iaq->intr_type = itype;
1708 		iaq->intr_flags = 0;
1709 
1710 		/*
1711 		 * Best option: an interrupt vector for errors, one for the
1712 		 * firmware event queue, and one each for each rxq (NIC as well
1713 		 * as offload).
1714 		 */
1715 		iaq->nirq = T4_EXTRA_INTR;
1716 		iaq->nirq += n10g * (nrxq10g + nofldrxq10g);
1717 		iaq->nirq += n1g * (nrxq1g + nofldrxq1g);
1718 		if (iaq->nirq <= navail &&
1719 		    (itype != INTR_MSI || powerof2(iaq->nirq))) {
1720 			iaq->intr_flags |= INTR_DIRECT;
1721 			goto allocate;
1722 		}
1723 
1724 		/*
1725 		 * Second best option: an interrupt vector for errors, one for
1726 		 * the firmware event queue, and one each for either NIC or
1727 		 * offload rxq's.
1728 		 */
1729 		iaq->nirq = T4_EXTRA_INTR;
1730 		iaq->nirq += n10g * max(nrxq10g, nofldrxq10g);
1731 		iaq->nirq += n1g * max(nrxq1g, nofldrxq1g);
1732 		if (iaq->nirq <= navail &&
1733 		    (itype != INTR_MSI || powerof2(iaq->nirq)))
1734 			goto allocate;
1735 
1736 		/*
1737 		 * Next best option: an interrupt vector for errors, one for the
1738 		 * firmware event queue, and at least one per port.  At this
1739 		 * point we know we'll have to downsize nrxq or nofldrxq to fit
1740 		 * what's available to us.
1741 		 */
1742 		iaq->nirq = T4_EXTRA_INTR;
1743 		iaq->nirq += n10g + n1g;
1744 		if (iaq->nirq <= navail) {
1745 			int leftover = navail - iaq->nirq;
1746 
1747 			if (n10g > 0) {
1748 				int target = max(nrxq10g, nofldrxq10g);
1749 
1750 				n = 1;
1751 				while (n < target && leftover >= n10g) {
1752 					leftover -= n10g;
1753 					iaq->nirq += n10g;
1754 					n++;
1755 				}
1756 				iaq->nrxq10g = min(n, nrxq10g);
1757 #ifdef TCP_OFFLOAD
1758 				if (is_offload(sc))
1759 					iaq->nofldrxq10g = min(n, nofldrxq10g);
1760 #endif
1761 			}
1762 
1763 			if (n1g > 0) {
1764 				int target = max(nrxq1g, nofldrxq1g);
1765 
1766 				n = 1;
1767 				while (n < target && leftover >= n1g) {
1768 					leftover -= n1g;
1769 					iaq->nirq += n1g;
1770 					n++;
1771 				}
1772 				iaq->nrxq1g = min(n, nrxq1g);
1773 #ifdef TCP_OFFLOAD
1774 				if (is_offload(sc))
1775 					iaq->nofldrxq1g = min(n, nofldrxq1g);
1776 #endif
1777 			}
1778 
1779 			if (itype != INTR_MSI || powerof2(iaq->nirq))
1780 				goto allocate;
1781 		}
1782 
1783 		/*
1784 		 * Least desirable option: one interrupt vector for everything.
1785 		 */
1786 		iaq->nirq = iaq->nrxq10g = iaq->nrxq1g = 1;
1787 #ifdef TCP_OFFLOAD
1788 		if (is_offload(sc))
1789 			iaq->nofldrxq10g = iaq->nofldrxq1g = 1;
1790 #endif
1791 
1792 allocate:
1793 		navail = iaq->nirq;
1794 		rc = 0;
1795 		if (itype == INTR_MSIX)
1796 			rc = pci_alloc_msix(sc->dev, &navail);
1797 		else if (itype == INTR_MSI)
1798 			rc = pci_alloc_msi(sc->dev, &navail);
1799 
1800 		if (rc == 0) {
1801 			if (navail == iaq->nirq)
1802 				return (0);
1803 
1804 			/*
1805 			 * Didn't get the number requested.  Use whatever number
1806 			 * the kernel is willing to allocate (it's in navail).
1807 			 */
1808 			device_printf(sc->dev, "fewer vectors than requested, "
1809 			    "type=%d, req=%d, rcvd=%d; will downshift req.\n",
1810 			    itype, iaq->nirq, navail);
1811 			pci_release_msi(sc->dev);
1812 			goto restart;
1813 		}
1814 
1815 		device_printf(sc->dev,
1816 		    "failed to allocate vectors:%d, type=%d, req=%d, rcvd=%d\n",
1817 		    itype, rc, iaq->nirq, navail);
1818 	}
1819 
1820 	device_printf(sc->dev,
1821 	    "failed to find a usable interrupt type.  "
1822 	    "allowed=%d, msi-x=%d, msi=%d, intx=1", t4_intr_types,
1823 	    pci_msix_count(sc->dev), pci_msi_count(sc->dev));
1824 
1825 	return (ENXIO);
1826 }
1827 
1828 #define FW_VERSION(chip) ( \
1829     V_FW_HDR_FW_VER_MAJOR(FW_VERSION_MAJOR_##chip) | \
1830     V_FW_HDR_FW_VER_MINOR(FW_VERSION_MINOR_##chip) | \
1831     V_FW_HDR_FW_VER_MICRO(FW_VERSION_MICRO_##chip) | \
1832     V_FW_HDR_FW_VER_BUILD(FW_VERSION_BUILD_##chip))
1833 #define FW_INTFVER(chip, intf) (FW_HDR_INTFVER_##intf)
1834 
1835 struct fw_info {
1836 	uint8_t chip;
1837 	char *kld_name;
1838 	char *fw_mod_name;
1839 	struct fw_hdr fw_hdr;	/* XXX: waste of space, need a sparse struct */
1840 } fw_info[] = {
1841 	{
1842 		.chip = CHELSIO_T4,
1843 		.kld_name = "t4fw_cfg",
1844 		.fw_mod_name = "t4fw",
1845 		.fw_hdr = {
1846 			.chip = FW_HDR_CHIP_T4,
1847 			.fw_ver = htobe32_const(FW_VERSION(T4)),
1848 			.intfver_nic = FW_INTFVER(T4, NIC),
1849 			.intfver_vnic = FW_INTFVER(T4, VNIC),
1850 			.intfver_ofld = FW_INTFVER(T4, OFLD),
1851 			.intfver_ri = FW_INTFVER(T4, RI),
1852 			.intfver_iscsipdu = FW_INTFVER(T4, ISCSIPDU),
1853 			.intfver_iscsi = FW_INTFVER(T4, ISCSI),
1854 			.intfver_fcoepdu = FW_INTFVER(T4, FCOEPDU),
1855 			.intfver_fcoe = FW_INTFVER(T4, FCOE),
1856 		},
1857 	}, {
1858 		.chip = CHELSIO_T5,
1859 		.kld_name = "t5fw_cfg",
1860 		.fw_mod_name = "t5fw",
1861 		.fw_hdr = {
1862 			.chip = FW_HDR_CHIP_T5,
1863 			.fw_ver = htobe32_const(FW_VERSION(T5)),
1864 			.intfver_nic = FW_INTFVER(T5, NIC),
1865 			.intfver_vnic = FW_INTFVER(T5, VNIC),
1866 			.intfver_ofld = FW_INTFVER(T5, OFLD),
1867 			.intfver_ri = FW_INTFVER(T5, RI),
1868 			.intfver_iscsipdu = FW_INTFVER(T5, ISCSIPDU),
1869 			.intfver_iscsi = FW_INTFVER(T5, ISCSI),
1870 			.intfver_fcoepdu = FW_INTFVER(T5, FCOEPDU),
1871 			.intfver_fcoe = FW_INTFVER(T5, FCOE),
1872 		},
1873 	}
1874 };
1875 
1876 static struct fw_info *
1877 find_fw_info(int chip)
1878 {
1879 	int i;
1880 
1881 	for (i = 0; i < nitems(fw_info); i++) {
1882 		if (fw_info[i].chip == chip)
1883 			return (&fw_info[i]);
1884 	}
1885 	return (NULL);
1886 }
1887 
1888 /*
1889  * Is the given firmware API compatible with the one the driver was compiled
1890  * with?
1891  */
1892 static int
1893 fw_compatible(const struct fw_hdr *hdr1, const struct fw_hdr *hdr2)
1894 {
1895 
1896 	/* short circuit if it's the exact same firmware version */
1897 	if (hdr1->chip == hdr2->chip && hdr1->fw_ver == hdr2->fw_ver)
1898 		return (1);
1899 
1900 	/*
1901 	 * XXX: Is this too conservative?  Perhaps I should limit this to the
1902 	 * features that are supported in the driver.
1903 	 */
1904 #define SAME_INTF(x) (hdr1->intfver_##x == hdr2->intfver_##x)
1905 	if (hdr1->chip == hdr2->chip && SAME_INTF(nic) && SAME_INTF(vnic) &&
1906 	    SAME_INTF(ofld) && SAME_INTF(ri) && SAME_INTF(iscsipdu) &&
1907 	    SAME_INTF(iscsi) && SAME_INTF(fcoepdu) && SAME_INTF(fcoe))
1908 		return (1);
1909 #undef SAME_INTF
1910 
1911 	return (0);
1912 }
1913 
1914 /*
1915  * The firmware in the KLD is usable, but should it be installed?  This routine
1916  * explains itself in detail if it indicates the KLD firmware should be
1917  * installed.
1918  */
1919 static int
1920 should_install_kld_fw(struct adapter *sc, int card_fw_usable, int k, int c)
1921 {
1922 	const char *reason;
1923 
1924 	if (!card_fw_usable) {
1925 		reason = "incompatible or unusable";
1926 		goto install;
1927 	}
1928 
1929 	if (k > c) {
1930 		reason = "older than the version bundled with this driver";
1931 		goto install;
1932 	}
1933 
1934 	if (t4_fw_install == 2 && k != c) {
1935 		reason = "different than the version bundled with this driver";
1936 		goto install;
1937 	}
1938 
1939 	return (0);
1940 
1941 install:
1942 	if (t4_fw_install == 0) {
1943 		device_printf(sc->dev, "firmware on card (%u.%u.%u.%u) is %s, "
1944 		    "but the driver is prohibited from installing a different "
1945 		    "firmware on the card.\n",
1946 		    G_FW_HDR_FW_VER_MAJOR(c), G_FW_HDR_FW_VER_MINOR(c),
1947 		    G_FW_HDR_FW_VER_MICRO(c), G_FW_HDR_FW_VER_BUILD(c), reason);
1948 
1949 		return (0);
1950 	}
1951 
1952 	device_printf(sc->dev, "firmware on card (%u.%u.%u.%u) is %s, "
1953 	    "installing firmware %u.%u.%u.%u on card.\n",
1954 	    G_FW_HDR_FW_VER_MAJOR(c), G_FW_HDR_FW_VER_MINOR(c),
1955 	    G_FW_HDR_FW_VER_MICRO(c), G_FW_HDR_FW_VER_BUILD(c), reason,
1956 	    G_FW_HDR_FW_VER_MAJOR(k), G_FW_HDR_FW_VER_MINOR(k),
1957 	    G_FW_HDR_FW_VER_MICRO(k), G_FW_HDR_FW_VER_BUILD(k));
1958 
1959 	return (1);
1960 }
1961 /*
1962  * Establish contact with the firmware and determine if we are the master driver
1963  * or not, and whether we are responsible for chip initialization.
1964  */
1965 static int
1966 prep_firmware(struct adapter *sc)
1967 {
1968 	const struct firmware *fw = NULL, *default_cfg;
1969 	int rc, pf, card_fw_usable, kld_fw_usable, need_fw_reset = 1;
1970 	enum dev_state state;
1971 	struct fw_info *fw_info;
1972 	struct fw_hdr *card_fw;		/* fw on the card */
1973 	const struct fw_hdr *kld_fw;	/* fw in the KLD */
1974 	const struct fw_hdr *drv_fw;	/* fw header the driver was compiled
1975 					   against */
1976 
1977 	/* Contact firmware. */
1978 	rc = t4_fw_hello(sc, sc->mbox, sc->mbox, MASTER_MAY, &state);
1979 	if (rc < 0 || state == DEV_STATE_ERR) {
1980 		rc = -rc;
1981 		device_printf(sc->dev,
1982 		    "failed to connect to the firmware: %d, %d.\n", rc, state);
1983 		return (rc);
1984 	}
1985 	pf = rc;
1986 	if (pf == sc->mbox)
1987 		sc->flags |= MASTER_PF;
1988 	else if (state == DEV_STATE_UNINIT) {
1989 		/*
1990 		 * We didn't get to be the master so we definitely won't be
1991 		 * configuring the chip.  It's a bug if someone else hasn't
1992 		 * configured it already.
1993 		 */
1994 		device_printf(sc->dev, "couldn't be master(%d), "
1995 		    "device not already initialized either(%d).\n", rc, state);
1996 		return (EDOOFUS);
1997 	}
1998 
1999 	/* This is the firmware whose headers the driver was compiled against */
2000 	fw_info = find_fw_info(chip_id(sc));
2001 	if (fw_info == NULL) {
2002 		device_printf(sc->dev,
2003 		    "unable to look up firmware information for chip %d.\n",
2004 		    chip_id(sc));
2005 		return (EINVAL);
2006 	}
2007 	drv_fw = &fw_info->fw_hdr;
2008 
2009 	/*
2010 	 * The firmware KLD contains many modules.  The KLD name is also the
2011 	 * name of the module that contains the default config file.
2012 	 */
2013 	default_cfg = firmware_get(fw_info->kld_name);
2014 
2015 	/* Read the header of the firmware on the card */
2016 	card_fw = malloc(sizeof(*card_fw), M_CXGBE, M_ZERO | M_WAITOK);
2017 	rc = -t4_read_flash(sc, FLASH_FW_START,
2018 	    sizeof (*card_fw) / sizeof (uint32_t), (uint32_t *)card_fw, 1);
2019 	if (rc == 0)
2020 		card_fw_usable = fw_compatible(drv_fw, (const void*)card_fw);
2021 	else {
2022 		device_printf(sc->dev,
2023 		    "Unable to read card's firmware header: %d\n", rc);
2024 		card_fw_usable = 0;
2025 	}
2026 
2027 	/* This is the firmware in the KLD */
2028 	fw = firmware_get(fw_info->fw_mod_name);
2029 	if (fw != NULL) {
2030 		kld_fw = (const void *)fw->data;
2031 		kld_fw_usable = fw_compatible(drv_fw, kld_fw);
2032 	} else {
2033 		kld_fw = NULL;
2034 		kld_fw_usable = 0;
2035 	}
2036 
2037 	if (card_fw_usable && card_fw->fw_ver == drv_fw->fw_ver &&
2038 	    (!kld_fw_usable || kld_fw->fw_ver == drv_fw->fw_ver)) {
2039 		/*
2040 		 * Common case: the firmware on the card is an exact match and
2041 		 * the KLD is an exact match too, or the KLD is
2042 		 * absent/incompatible.  Note that t4_fw_install = 2 is ignored
2043 		 * here -- use cxgbetool loadfw if you want to reinstall the
2044 		 * same firmware as the one on the card.
2045 		 */
2046 	} else if (kld_fw_usable && state == DEV_STATE_UNINIT &&
2047 	    should_install_kld_fw(sc, card_fw_usable, be32toh(kld_fw->fw_ver),
2048 	    be32toh(card_fw->fw_ver))) {
2049 
2050 		rc = -t4_fw_upgrade(sc, sc->mbox, fw->data, fw->datasize, 0);
2051 		if (rc != 0) {
2052 			device_printf(sc->dev,
2053 			    "failed to install firmware: %d\n", rc);
2054 			goto done;
2055 		}
2056 
2057 		/* Installed successfully, update the cached header too. */
2058 		memcpy(card_fw, kld_fw, sizeof(*card_fw));
2059 		card_fw_usable = 1;
2060 		need_fw_reset = 0;	/* already reset as part of load_fw */
2061 	}
2062 
2063 	if (!card_fw_usable) {
2064 		uint32_t d, c, k;
2065 
2066 		d = ntohl(drv_fw->fw_ver);
2067 		c = ntohl(card_fw->fw_ver);
2068 		k = kld_fw ? ntohl(kld_fw->fw_ver) : 0;
2069 
2070 		device_printf(sc->dev, "Cannot find a usable firmware: "
2071 		    "fw_install %d, chip state %d, "
2072 		    "driver compiled with %d.%d.%d.%d, "
2073 		    "card has %d.%d.%d.%d, KLD has %d.%d.%d.%d\n",
2074 		    t4_fw_install, state,
2075 		    G_FW_HDR_FW_VER_MAJOR(d), G_FW_HDR_FW_VER_MINOR(d),
2076 		    G_FW_HDR_FW_VER_MICRO(d), G_FW_HDR_FW_VER_BUILD(d),
2077 		    G_FW_HDR_FW_VER_MAJOR(c), G_FW_HDR_FW_VER_MINOR(c),
2078 		    G_FW_HDR_FW_VER_MICRO(c), G_FW_HDR_FW_VER_BUILD(c),
2079 		    G_FW_HDR_FW_VER_MAJOR(k), G_FW_HDR_FW_VER_MINOR(k),
2080 		    G_FW_HDR_FW_VER_MICRO(k), G_FW_HDR_FW_VER_BUILD(k));
2081 		rc = EINVAL;
2082 		goto done;
2083 	}
2084 
2085 	/* We're using whatever's on the card and it's known to be good. */
2086 	sc->params.fw_vers = ntohl(card_fw->fw_ver);
2087 	snprintf(sc->fw_version, sizeof(sc->fw_version), "%u.%u.%u.%u",
2088 	    G_FW_HDR_FW_VER_MAJOR(sc->params.fw_vers),
2089 	    G_FW_HDR_FW_VER_MINOR(sc->params.fw_vers),
2090 	    G_FW_HDR_FW_VER_MICRO(sc->params.fw_vers),
2091 	    G_FW_HDR_FW_VER_BUILD(sc->params.fw_vers));
2092 
2093 	/* Reset device */
2094 	if (need_fw_reset &&
2095 	    (rc = -t4_fw_reset(sc, sc->mbox, F_PIORSTMODE | F_PIORST)) != 0) {
2096 		device_printf(sc->dev, "firmware reset failed: %d.\n", rc);
2097 		if (rc != ETIMEDOUT && rc != EIO)
2098 			t4_fw_bye(sc, sc->mbox);
2099 		goto done;
2100 	}
2101 	sc->flags |= FW_OK;
2102 
2103 	rc = get_params__pre_init(sc);
2104 	if (rc != 0)
2105 		goto done; /* error message displayed already */
2106 
2107 	/* Partition adapter resources as specified in the config file. */
2108 	if (state == DEV_STATE_UNINIT) {
2109 
2110 		KASSERT(sc->flags & MASTER_PF,
2111 		    ("%s: trying to change chip settings when not master.",
2112 		    __func__));
2113 
2114 		rc = partition_resources(sc, default_cfg, fw_info->kld_name);
2115 		if (rc != 0)
2116 			goto done;	/* error message displayed already */
2117 
2118 		t4_tweak_chip_settings(sc);
2119 
2120 		/* get basic stuff going */
2121 		rc = -t4_fw_initialize(sc, sc->mbox);
2122 		if (rc != 0) {
2123 			device_printf(sc->dev, "fw init failed: %d.\n", rc);
2124 			goto done;
2125 		}
2126 	} else {
2127 		snprintf(sc->cfg_file, sizeof(sc->cfg_file), "pf%d", pf);
2128 		sc->cfcsum = 0;
2129 	}
2130 
2131 done:
2132 	free(card_fw, M_CXGBE);
2133 	if (fw != NULL)
2134 		firmware_put(fw, FIRMWARE_UNLOAD);
2135 	if (default_cfg != NULL)
2136 		firmware_put(default_cfg, FIRMWARE_UNLOAD);
2137 
2138 	return (rc);
2139 }
2140 
2141 #define FW_PARAM_DEV(param) \
2142 	(V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) | \
2143 	 V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_##param))
2144 #define FW_PARAM_PFVF(param) \
2145 	(V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_PFVF) | \
2146 	 V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_PFVF_##param))
2147 
2148 /*
2149  * Partition chip resources for use between various PFs, VFs, etc.
2150  */
2151 static int
2152 partition_resources(struct adapter *sc, const struct firmware *default_cfg,
2153     const char *name_prefix)
2154 {
2155 	const struct firmware *cfg = NULL;
2156 	int rc = 0;
2157 	struct fw_caps_config_cmd caps;
2158 	uint32_t mtype, moff, finicsum, cfcsum;
2159 
2160 	/*
2161 	 * Figure out what configuration file to use.  Pick the default config
2162 	 * file for the card if the user hasn't specified one explicitly.
2163 	 */
2164 	snprintf(sc->cfg_file, sizeof(sc->cfg_file), "%s", t4_cfg_file);
2165 	if (strncmp(t4_cfg_file, DEFAULT_CF, sizeof(t4_cfg_file)) == 0) {
2166 		/* Card specific overrides go here. */
2167 		if (pci_get_device(sc->dev) == 0x440a)
2168 			snprintf(sc->cfg_file, sizeof(sc->cfg_file), UWIRE_CF);
2169 		if (is_fpga(sc))
2170 			snprintf(sc->cfg_file, sizeof(sc->cfg_file), FPGA_CF);
2171 	}
2172 
2173 	/*
2174 	 * We need to load another module if the profile is anything except
2175 	 * "default" or "flash".
2176 	 */
2177 	if (strncmp(sc->cfg_file, DEFAULT_CF, sizeof(sc->cfg_file)) != 0 &&
2178 	    strncmp(sc->cfg_file, FLASH_CF, sizeof(sc->cfg_file)) != 0) {
2179 		char s[32];
2180 
2181 		snprintf(s, sizeof(s), "%s_%s", name_prefix, sc->cfg_file);
2182 		cfg = firmware_get(s);
2183 		if (cfg == NULL) {
2184 			if (default_cfg != NULL) {
2185 				device_printf(sc->dev,
2186 				    "unable to load module \"%s\" for "
2187 				    "configuration profile \"%s\", will use "
2188 				    "the default config file instead.\n",
2189 				    s, sc->cfg_file);
2190 				snprintf(sc->cfg_file, sizeof(sc->cfg_file),
2191 				    "%s", DEFAULT_CF);
2192 			} else {
2193 				device_printf(sc->dev,
2194 				    "unable to load module \"%s\" for "
2195 				    "configuration profile \"%s\", will use "
2196 				    "the config file on the card's flash "
2197 				    "instead.\n", s, sc->cfg_file);
2198 				snprintf(sc->cfg_file, sizeof(sc->cfg_file),
2199 				    "%s", FLASH_CF);
2200 			}
2201 		}
2202 	}
2203 
2204 	if (strncmp(sc->cfg_file, DEFAULT_CF, sizeof(sc->cfg_file)) == 0 &&
2205 	    default_cfg == NULL) {
2206 		device_printf(sc->dev,
2207 		    "default config file not available, will use the config "
2208 		    "file on the card's flash instead.\n");
2209 		snprintf(sc->cfg_file, sizeof(sc->cfg_file), "%s", FLASH_CF);
2210 	}
2211 
2212 	if (strncmp(sc->cfg_file, FLASH_CF, sizeof(sc->cfg_file)) != 0) {
2213 		u_int cflen, i, n;
2214 		const uint32_t *cfdata;
2215 		uint32_t param, val, addr, off, mw_base, mw_aperture;
2216 
2217 		KASSERT(cfg != NULL || default_cfg != NULL,
2218 		    ("%s: no config to upload", __func__));
2219 
2220 		/*
2221 		 * Ask the firmware where it wants us to upload the config file.
2222 		 */
2223 		param = FW_PARAM_DEV(CF);
2224 		rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 1, &param, &val);
2225 		if (rc != 0) {
2226 			/* No support for config file?  Shouldn't happen. */
2227 			device_printf(sc->dev,
2228 			    "failed to query config file location: %d.\n", rc);
2229 			goto done;
2230 		}
2231 		mtype = G_FW_PARAMS_PARAM_Y(val);
2232 		moff = G_FW_PARAMS_PARAM_Z(val) << 16;
2233 
2234 		/*
2235 		 * XXX: sheer laziness.  We deliberately added 4 bytes of
2236 		 * useless stuffing/comments at the end of the config file so
2237 		 * it's ok to simply throw away the last remaining bytes when
2238 		 * the config file is not an exact multiple of 4.  This also
2239 		 * helps with the validate_mt_off_len check.
2240 		 */
2241 		if (cfg != NULL) {
2242 			cflen = cfg->datasize & ~3;
2243 			cfdata = cfg->data;
2244 		} else {
2245 			cflen = default_cfg->datasize & ~3;
2246 			cfdata = default_cfg->data;
2247 		}
2248 
2249 		if (cflen > FLASH_CFG_MAX_SIZE) {
2250 			device_printf(sc->dev,
2251 			    "config file too long (%d, max allowed is %d).  "
2252 			    "Will try to use the config on the card, if any.\n",
2253 			    cflen, FLASH_CFG_MAX_SIZE);
2254 			goto use_config_on_flash;
2255 		}
2256 
2257 		rc = validate_mt_off_len(sc, mtype, moff, cflen, &addr);
2258 		if (rc != 0) {
2259 			device_printf(sc->dev,
2260 			    "%s: addr (%d/0x%x) or len %d is not valid: %d.  "
2261 			    "Will try to use the config on the card, if any.\n",
2262 			    __func__, mtype, moff, cflen, rc);
2263 			goto use_config_on_flash;
2264 		}
2265 
2266 		memwin_info(sc, 2, &mw_base, &mw_aperture);
2267 		while (cflen) {
2268 			off = position_memwin(sc, 2, addr);
2269 			n = min(cflen, mw_aperture - off);
2270 			for (i = 0; i < n; i += 4)
2271 				t4_write_reg(sc, mw_base + off + i, *cfdata++);
2272 			cflen -= n;
2273 			addr += n;
2274 		}
2275 	} else {
2276 use_config_on_flash:
2277 		mtype = FW_MEMTYPE_CF_FLASH;
2278 		moff = t4_flash_cfg_addr(sc);
2279 	}
2280 
2281 	bzero(&caps, sizeof(caps));
2282 	caps.op_to_write = htobe32(V_FW_CMD_OP(FW_CAPS_CONFIG_CMD) |
2283 	    F_FW_CMD_REQUEST | F_FW_CMD_READ);
2284 	caps.cfvalid_to_len16 = htobe32(F_FW_CAPS_CONFIG_CMD_CFVALID |
2285 	    V_FW_CAPS_CONFIG_CMD_MEMTYPE_CF(mtype) |
2286 	    V_FW_CAPS_CONFIG_CMD_MEMADDR64K_CF(moff >> 16) | FW_LEN16(caps));
2287 	rc = -t4_wr_mbox(sc, sc->mbox, &caps, sizeof(caps), &caps);
2288 	if (rc != 0) {
2289 		device_printf(sc->dev,
2290 		    "failed to pre-process config file: %d "
2291 		    "(mtype %d, moff 0x%x).\n", rc, mtype, moff);
2292 		goto done;
2293 	}
2294 
2295 	finicsum = be32toh(caps.finicsum);
2296 	cfcsum = be32toh(caps.cfcsum);
2297 	if (finicsum != cfcsum) {
2298 		device_printf(sc->dev,
2299 		    "WARNING: config file checksum mismatch: %08x %08x\n",
2300 		    finicsum, cfcsum);
2301 	}
2302 	sc->cfcsum = cfcsum;
2303 
2304 #define LIMIT_CAPS(x) do { \
2305 	caps.x &= htobe16(t4_##x##_allowed); \
2306 	sc->x = htobe16(caps.x); \
2307 } while (0)
2308 
2309 	/*
2310 	 * Let the firmware know what features will (not) be used so it can tune
2311 	 * things accordingly.
2312 	 */
2313 	LIMIT_CAPS(linkcaps);
2314 	LIMIT_CAPS(niccaps);
2315 	LIMIT_CAPS(toecaps);
2316 	LIMIT_CAPS(rdmacaps);
2317 	LIMIT_CAPS(iscsicaps);
2318 	LIMIT_CAPS(fcoecaps);
2319 #undef LIMIT_CAPS
2320 
2321 	caps.op_to_write = htobe32(V_FW_CMD_OP(FW_CAPS_CONFIG_CMD) |
2322 	    F_FW_CMD_REQUEST | F_FW_CMD_WRITE);
2323 	caps.cfvalid_to_len16 = htobe32(FW_LEN16(caps));
2324 	rc = -t4_wr_mbox(sc, sc->mbox, &caps, sizeof(caps), NULL);
2325 	if (rc != 0) {
2326 		device_printf(sc->dev,
2327 		    "failed to process config file: %d.\n", rc);
2328 	}
2329 done:
2330 	if (cfg != NULL)
2331 		firmware_put(cfg, FIRMWARE_UNLOAD);
2332 	return (rc);
2333 }
2334 
2335 /*
2336  * Retrieve parameters that are needed (or nice to have) very early.
2337  */
2338 static int
2339 get_params__pre_init(struct adapter *sc)
2340 {
2341 	int rc;
2342 	uint32_t param[2], val[2];
2343 	struct fw_devlog_cmd cmd;
2344 	struct devlog_params *dlog = &sc->params.devlog;
2345 
2346 	param[0] = FW_PARAM_DEV(PORTVEC);
2347 	param[1] = FW_PARAM_DEV(CCLK);
2348 	rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 2, param, val);
2349 	if (rc != 0) {
2350 		device_printf(sc->dev,
2351 		    "failed to query parameters (pre_init): %d.\n", rc);
2352 		return (rc);
2353 	}
2354 
2355 	sc->params.portvec = val[0];
2356 	sc->params.nports = bitcount32(val[0]);
2357 	sc->params.vpd.cclk = val[1];
2358 
2359 	/* Read device log parameters. */
2360 	bzero(&cmd, sizeof(cmd));
2361 	cmd.op_to_write = htobe32(V_FW_CMD_OP(FW_DEVLOG_CMD) |
2362 	    F_FW_CMD_REQUEST | F_FW_CMD_READ);
2363 	cmd.retval_len16 = htobe32(FW_LEN16(cmd));
2364 	rc = -t4_wr_mbox(sc, sc->mbox, &cmd, sizeof(cmd), &cmd);
2365 	if (rc != 0) {
2366 		device_printf(sc->dev,
2367 		    "failed to get devlog parameters: %d.\n", rc);
2368 		bzero(dlog, sizeof (*dlog));
2369 		rc = 0;	/* devlog isn't critical for device operation */
2370 	} else {
2371 		val[0] = be32toh(cmd.memtype_devlog_memaddr16_devlog);
2372 		dlog->memtype = G_FW_DEVLOG_CMD_MEMTYPE_DEVLOG(val[0]);
2373 		dlog->start = G_FW_DEVLOG_CMD_MEMADDR16_DEVLOG(val[0]) << 4;
2374 		dlog->size = be32toh(cmd.memsize_devlog);
2375 	}
2376 
2377 	return (rc);
2378 }
2379 
2380 /*
2381  * Retrieve various parameters that are of interest to the driver.  The device
2382  * has been initialized by the firmware at this point.
2383  */
2384 static int
2385 get_params__post_init(struct adapter *sc)
2386 {
2387 	int rc;
2388 	uint32_t param[7], val[7];
2389 	struct fw_caps_config_cmd caps;
2390 
2391 	param[0] = FW_PARAM_PFVF(IQFLINT_START);
2392 	param[1] = FW_PARAM_PFVF(EQ_START);
2393 	param[2] = FW_PARAM_PFVF(FILTER_START);
2394 	param[3] = FW_PARAM_PFVF(FILTER_END);
2395 	param[4] = FW_PARAM_PFVF(L2T_START);
2396 	param[5] = FW_PARAM_PFVF(L2T_END);
2397 	rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 6, param, val);
2398 	if (rc != 0) {
2399 		device_printf(sc->dev,
2400 		    "failed to query parameters (post_init): %d.\n", rc);
2401 		return (rc);
2402 	}
2403 
2404 	sc->sge.iq_start = val[0];
2405 	sc->sge.eq_start = val[1];
2406 	sc->tids.ftid_base = val[2];
2407 	sc->tids.nftids = val[3] - val[2] + 1;
2408 	sc->vres.l2t.start = val[4];
2409 	sc->vres.l2t.size = val[5] - val[4] + 1;
2410 	KASSERT(sc->vres.l2t.size <= L2T_SIZE,
2411 	    ("%s: L2 table size (%u) larger than expected (%u)",
2412 	    __func__, sc->vres.l2t.size, L2T_SIZE));
2413 
2414 	/* get capabilites */
2415 	bzero(&caps, sizeof(caps));
2416 	caps.op_to_write = htobe32(V_FW_CMD_OP(FW_CAPS_CONFIG_CMD) |
2417 	    F_FW_CMD_REQUEST | F_FW_CMD_READ);
2418 	caps.cfvalid_to_len16 = htobe32(FW_LEN16(caps));
2419 	rc = -t4_wr_mbox(sc, sc->mbox, &caps, sizeof(caps), &caps);
2420 	if (rc != 0) {
2421 		device_printf(sc->dev,
2422 		    "failed to get card capabilities: %d.\n", rc);
2423 		return (rc);
2424 	}
2425 
2426 	if (caps.toecaps) {
2427 		/* query offload-related parameters */
2428 		param[0] = FW_PARAM_DEV(NTID);
2429 		param[1] = FW_PARAM_PFVF(SERVER_START);
2430 		param[2] = FW_PARAM_PFVF(SERVER_END);
2431 		param[3] = FW_PARAM_PFVF(TDDP_START);
2432 		param[4] = FW_PARAM_PFVF(TDDP_END);
2433 		param[5] = FW_PARAM_DEV(FLOWC_BUFFIFO_SZ);
2434 		rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 6, param, val);
2435 		if (rc != 0) {
2436 			device_printf(sc->dev,
2437 			    "failed to query TOE parameters: %d.\n", rc);
2438 			return (rc);
2439 		}
2440 		sc->tids.ntids = val[0];
2441 		sc->tids.natids = min(sc->tids.ntids / 2, MAX_ATIDS);
2442 		sc->tids.stid_base = val[1];
2443 		sc->tids.nstids = val[2] - val[1] + 1;
2444 		sc->vres.ddp.start = val[3];
2445 		sc->vres.ddp.size = val[4] - val[3] + 1;
2446 		sc->params.ofldq_wr_cred = val[5];
2447 		sc->params.offload = 1;
2448 	}
2449 	if (caps.rdmacaps) {
2450 		param[0] = FW_PARAM_PFVF(STAG_START);
2451 		param[1] = FW_PARAM_PFVF(STAG_END);
2452 		param[2] = FW_PARAM_PFVF(RQ_START);
2453 		param[3] = FW_PARAM_PFVF(RQ_END);
2454 		param[4] = FW_PARAM_PFVF(PBL_START);
2455 		param[5] = FW_PARAM_PFVF(PBL_END);
2456 		rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 6, param, val);
2457 		if (rc != 0) {
2458 			device_printf(sc->dev,
2459 			    "failed to query RDMA parameters(1): %d.\n", rc);
2460 			return (rc);
2461 		}
2462 		sc->vres.stag.start = val[0];
2463 		sc->vres.stag.size = val[1] - val[0] + 1;
2464 		sc->vres.rq.start = val[2];
2465 		sc->vres.rq.size = val[3] - val[2] + 1;
2466 		sc->vres.pbl.start = val[4];
2467 		sc->vres.pbl.size = val[5] - val[4] + 1;
2468 
2469 		param[0] = FW_PARAM_PFVF(SQRQ_START);
2470 		param[1] = FW_PARAM_PFVF(SQRQ_END);
2471 		param[2] = FW_PARAM_PFVF(CQ_START);
2472 		param[3] = FW_PARAM_PFVF(CQ_END);
2473 		param[4] = FW_PARAM_PFVF(OCQ_START);
2474 		param[5] = FW_PARAM_PFVF(OCQ_END);
2475 		rc = -t4_query_params(sc, 0, 0, 0, 6, param, val);
2476 		if (rc != 0) {
2477 			device_printf(sc->dev,
2478 			    "failed to query RDMA parameters(2): %d.\n", rc);
2479 			return (rc);
2480 		}
2481 		sc->vres.qp.start = val[0];
2482 		sc->vres.qp.size = val[1] - val[0] + 1;
2483 		sc->vres.cq.start = val[2];
2484 		sc->vres.cq.size = val[3] - val[2] + 1;
2485 		sc->vres.ocq.start = val[4];
2486 		sc->vres.ocq.size = val[5] - val[4] + 1;
2487 	}
2488 	if (caps.iscsicaps) {
2489 		param[0] = FW_PARAM_PFVF(ISCSI_START);
2490 		param[1] = FW_PARAM_PFVF(ISCSI_END);
2491 		rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 2, param, val);
2492 		if (rc != 0) {
2493 			device_printf(sc->dev,
2494 			    "failed to query iSCSI parameters: %d.\n", rc);
2495 			return (rc);
2496 		}
2497 		sc->vres.iscsi.start = val[0];
2498 		sc->vres.iscsi.size = val[1] - val[0] + 1;
2499 	}
2500 
2501 	/*
2502 	 * We've got the params we wanted to query via the firmware.  Now grab
2503 	 * some others directly from the chip.
2504 	 */
2505 	rc = t4_read_chip_settings(sc);
2506 
2507 	return (rc);
2508 }
2509 
2510 static int
2511 set_params__post_init(struct adapter *sc)
2512 {
2513 	uint32_t param, val;
2514 
2515 	/* ask for encapsulated CPLs */
2516 	param = FW_PARAM_PFVF(CPLFW4MSG_ENCAP);
2517 	val = 1;
2518 	(void)t4_set_params(sc, sc->mbox, sc->pf, 0, 1, &param, &val);
2519 
2520 	return (0);
2521 }
2522 
2523 #undef FW_PARAM_PFVF
2524 #undef FW_PARAM_DEV
2525 
2526 static void
2527 t4_set_desc(struct adapter *sc)
2528 {
2529 	char buf[128];
2530 	struct adapter_params *p = &sc->params;
2531 
2532 	snprintf(buf, sizeof(buf), "Chelsio %s %sNIC (rev %d), S/N:%s, E/C:%s",
2533 	    p->vpd.id, is_offload(sc) ? "R" : "", chip_rev(sc), p->vpd.sn,
2534 	    p->vpd.ec);
2535 
2536 	device_set_desc_copy(sc->dev, buf);
2537 }
2538 
2539 static void
2540 build_medialist(struct port_info *pi)
2541 {
2542 	struct ifmedia *media = &pi->media;
2543 	int data, m;
2544 
2545 	PORT_LOCK(pi);
2546 
2547 	ifmedia_removeall(media);
2548 
2549 	m = IFM_ETHER | IFM_FDX;
2550 	data = (pi->port_type << 8) | pi->mod_type;
2551 
2552 	switch(pi->port_type) {
2553 	case FW_PORT_TYPE_BT_XFI:
2554 		ifmedia_add(media, m | IFM_10G_T, data, NULL);
2555 		break;
2556 
2557 	case FW_PORT_TYPE_BT_XAUI:
2558 		ifmedia_add(media, m | IFM_10G_T, data, NULL);
2559 		/* fall through */
2560 
2561 	case FW_PORT_TYPE_BT_SGMII:
2562 		ifmedia_add(media, m | IFM_1000_T, data, NULL);
2563 		ifmedia_add(media, m | IFM_100_TX, data, NULL);
2564 		ifmedia_add(media, IFM_ETHER | IFM_AUTO, data, NULL);
2565 		ifmedia_set(media, IFM_ETHER | IFM_AUTO);
2566 		break;
2567 
2568 	case FW_PORT_TYPE_CX4:
2569 		ifmedia_add(media, m | IFM_10G_CX4, data, NULL);
2570 		ifmedia_set(media, m | IFM_10G_CX4);
2571 		break;
2572 
2573 	case FW_PORT_TYPE_SFP:
2574 	case FW_PORT_TYPE_FIBER_XFI:
2575 	case FW_PORT_TYPE_FIBER_XAUI:
2576 		switch (pi->mod_type) {
2577 
2578 		case FW_PORT_MOD_TYPE_LR:
2579 			ifmedia_add(media, m | IFM_10G_LR, data, NULL);
2580 			ifmedia_set(media, m | IFM_10G_LR);
2581 			break;
2582 
2583 		case FW_PORT_MOD_TYPE_SR:
2584 			ifmedia_add(media, m | IFM_10G_SR, data, NULL);
2585 			ifmedia_set(media, m | IFM_10G_SR);
2586 			break;
2587 
2588 		case FW_PORT_MOD_TYPE_LRM:
2589 			ifmedia_add(media, m | IFM_10G_LRM, data, NULL);
2590 			ifmedia_set(media, m | IFM_10G_LRM);
2591 			break;
2592 
2593 		case FW_PORT_MOD_TYPE_TWINAX_PASSIVE:
2594 		case FW_PORT_MOD_TYPE_TWINAX_ACTIVE:
2595 			ifmedia_add(media, m | IFM_10G_TWINAX, data, NULL);
2596 			ifmedia_set(media, m | IFM_10G_TWINAX);
2597 			break;
2598 
2599 		case FW_PORT_MOD_TYPE_NONE:
2600 			m &= ~IFM_FDX;
2601 			ifmedia_add(media, m | IFM_NONE, data, NULL);
2602 			ifmedia_set(media, m | IFM_NONE);
2603 			break;
2604 
2605 		case FW_PORT_MOD_TYPE_NA:
2606 		case FW_PORT_MOD_TYPE_ER:
2607 		default:
2608 			device_printf(pi->dev,
2609 			    "unknown port_type (%d), mod_type (%d)\n",
2610 			    pi->port_type, pi->mod_type);
2611 			ifmedia_add(media, m | IFM_UNKNOWN, data, NULL);
2612 			ifmedia_set(media, m | IFM_UNKNOWN);
2613 			break;
2614 		}
2615 		break;
2616 
2617 	case FW_PORT_TYPE_QSFP:
2618 		switch (pi->mod_type) {
2619 
2620 		case FW_PORT_MOD_TYPE_LR:
2621 			ifmedia_add(media, m | IFM_40G_LR4, data, NULL);
2622 			ifmedia_set(media, m | IFM_40G_LR4);
2623 			break;
2624 
2625 		case FW_PORT_MOD_TYPE_SR:
2626 			ifmedia_add(media, m | IFM_40G_SR4, data, NULL);
2627 			ifmedia_set(media, m | IFM_40G_SR4);
2628 			break;
2629 
2630 		case FW_PORT_MOD_TYPE_TWINAX_PASSIVE:
2631 		case FW_PORT_MOD_TYPE_TWINAX_ACTIVE:
2632 			ifmedia_add(media, m | IFM_40G_CR4, data, NULL);
2633 			ifmedia_set(media, m | IFM_40G_CR4);
2634 			break;
2635 
2636 		case FW_PORT_MOD_TYPE_NONE:
2637 			m &= ~IFM_FDX;
2638 			ifmedia_add(media, m | IFM_NONE, data, NULL);
2639 			ifmedia_set(media, m | IFM_NONE);
2640 			break;
2641 
2642 		default:
2643 			device_printf(pi->dev,
2644 			    "unknown port_type (%d), mod_type (%d)\n",
2645 			    pi->port_type, pi->mod_type);
2646 			ifmedia_add(media, m | IFM_UNKNOWN, data, NULL);
2647 			ifmedia_set(media, m | IFM_UNKNOWN);
2648 			break;
2649 		}
2650 		break;
2651 
2652 	default:
2653 		device_printf(pi->dev,
2654 		    "unknown port_type (%d), mod_type (%d)\n", pi->port_type,
2655 		    pi->mod_type);
2656 		ifmedia_add(media, m | IFM_UNKNOWN, data, NULL);
2657 		ifmedia_set(media, m | IFM_UNKNOWN);
2658 		break;
2659 	}
2660 
2661 	PORT_UNLOCK(pi);
2662 }
2663 
2664 #define FW_MAC_EXACT_CHUNK	7
2665 
2666 /*
2667  * Program the port's XGMAC based on parameters in ifnet.  The caller also
2668  * indicates which parameters should be programmed (the rest are left alone).
2669  */
2670 static int
2671 update_mac_settings(struct port_info *pi, int flags)
2672 {
2673 	int rc;
2674 	struct ifnet *ifp = pi->ifp;
2675 	struct adapter *sc = pi->adapter;
2676 	int mtu = -1, promisc = -1, allmulti = -1, vlanex = -1;
2677 
2678 	ASSERT_SYNCHRONIZED_OP(sc);
2679 	KASSERT(flags, ("%s: not told what to update.", __func__));
2680 
2681 	if (flags & XGMAC_MTU)
2682 		mtu = ifp->if_mtu;
2683 
2684 	if (flags & XGMAC_PROMISC)
2685 		promisc = ifp->if_flags & IFF_PROMISC ? 1 : 0;
2686 
2687 	if (flags & XGMAC_ALLMULTI)
2688 		allmulti = ifp->if_flags & IFF_ALLMULTI ? 1 : 0;
2689 
2690 	if (flags & XGMAC_VLANEX)
2691 		vlanex = ifp->if_capenable & IFCAP_VLAN_HWTAGGING ? 1 : 0;
2692 
2693 	rc = -t4_set_rxmode(sc, sc->mbox, pi->viid, mtu, promisc, allmulti, 1,
2694 	    vlanex, false);
2695 	if (rc) {
2696 		if_printf(ifp, "set_rxmode (%x) failed: %d\n", flags, rc);
2697 		return (rc);
2698 	}
2699 
2700 	if (flags & XGMAC_UCADDR) {
2701 		uint8_t ucaddr[ETHER_ADDR_LEN];
2702 
2703 		bcopy(IF_LLADDR(ifp), ucaddr, sizeof(ucaddr));
2704 		rc = t4_change_mac(sc, sc->mbox, pi->viid, pi->xact_addr_filt,
2705 		    ucaddr, true, true);
2706 		if (rc < 0) {
2707 			rc = -rc;
2708 			if_printf(ifp, "change_mac failed: %d\n", rc);
2709 			return (rc);
2710 		} else {
2711 			pi->xact_addr_filt = rc;
2712 			rc = 0;
2713 		}
2714 	}
2715 
2716 	if (flags & XGMAC_MCADDRS) {
2717 		const uint8_t *mcaddr[FW_MAC_EXACT_CHUNK];
2718 		int del = 1;
2719 		uint64_t hash = 0;
2720 		struct ifmultiaddr *ifma;
2721 		int i = 0, j;
2722 
2723 		if_maddr_rlock(ifp);
2724 		TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
2725 			if (ifma->ifma_addr->sa_family != AF_LINK)
2726 				continue;
2727 			mcaddr[i++] =
2728 			    LLADDR((struct sockaddr_dl *)ifma->ifma_addr);
2729 
2730 			if (i == FW_MAC_EXACT_CHUNK) {
2731 				rc = t4_alloc_mac_filt(sc, sc->mbox, pi->viid,
2732 				    del, i, mcaddr, NULL, &hash, 0);
2733 				if (rc < 0) {
2734 					rc = -rc;
2735 					for (j = 0; j < i; j++) {
2736 						if_printf(ifp,
2737 						    "failed to add mc address"
2738 						    " %02x:%02x:%02x:"
2739 						    "%02x:%02x:%02x rc=%d\n",
2740 						    mcaddr[j][0], mcaddr[j][1],
2741 						    mcaddr[j][2], mcaddr[j][3],
2742 						    mcaddr[j][4], mcaddr[j][5],
2743 						    rc);
2744 					}
2745 					goto mcfail;
2746 				}
2747 				del = 0;
2748 				i = 0;
2749 			}
2750 		}
2751 		if (i > 0) {
2752 			rc = t4_alloc_mac_filt(sc, sc->mbox, pi->viid,
2753 			    del, i, mcaddr, NULL, &hash, 0);
2754 			if (rc < 0) {
2755 				rc = -rc;
2756 				for (j = 0; j < i; j++) {
2757 					if_printf(ifp,
2758 					    "failed to add mc address"
2759 					    " %02x:%02x:%02x:"
2760 					    "%02x:%02x:%02x rc=%d\n",
2761 					    mcaddr[j][0], mcaddr[j][1],
2762 					    mcaddr[j][2], mcaddr[j][3],
2763 					    mcaddr[j][4], mcaddr[j][5],
2764 					    rc);
2765 				}
2766 				goto mcfail;
2767 			}
2768 		}
2769 
2770 		rc = -t4_set_addr_hash(sc, sc->mbox, pi->viid, 0, hash, 0);
2771 		if (rc != 0)
2772 			if_printf(ifp, "failed to set mc address hash: %d", rc);
2773 mcfail:
2774 		if_maddr_runlock(ifp);
2775 	}
2776 
2777 	return (rc);
2778 }
2779 
2780 int
2781 begin_synchronized_op(struct adapter *sc, struct port_info *pi, int flags,
2782     char *wmesg)
2783 {
2784 	int rc, pri;
2785 
2786 #ifdef WITNESS
2787 	/* the caller thinks it's ok to sleep, but is it really? */
2788 	if (flags & SLEEP_OK)
2789 		pause("t4slptst", 1);
2790 #endif
2791 
2792 	if (INTR_OK)
2793 		pri = PCATCH;
2794 	else
2795 		pri = 0;
2796 
2797 	ADAPTER_LOCK(sc);
2798 	for (;;) {
2799 
2800 		if (pi && IS_DOOMED(pi)) {
2801 			rc = ENXIO;
2802 			goto done;
2803 		}
2804 
2805 		if (!IS_BUSY(sc)) {
2806 			rc = 0;
2807 			break;
2808 		}
2809 
2810 		if (!(flags & SLEEP_OK)) {
2811 			rc = EBUSY;
2812 			goto done;
2813 		}
2814 
2815 		if (mtx_sleep(&sc->flags, &sc->sc_lock, pri, wmesg, 0)) {
2816 			rc = EINTR;
2817 			goto done;
2818 		}
2819 	}
2820 
2821 	KASSERT(!IS_BUSY(sc), ("%s: controller busy.", __func__));
2822 	SET_BUSY(sc);
2823 #ifdef INVARIANTS
2824 	sc->last_op = wmesg;
2825 	sc->last_op_thr = curthread;
2826 #endif
2827 
2828 done:
2829 	if (!(flags & HOLD_LOCK) || rc)
2830 		ADAPTER_UNLOCK(sc);
2831 
2832 	return (rc);
2833 }
2834 
2835 void
2836 end_synchronized_op(struct adapter *sc, int flags)
2837 {
2838 
2839 	if (flags & LOCK_HELD)
2840 		ADAPTER_LOCK_ASSERT_OWNED(sc);
2841 	else
2842 		ADAPTER_LOCK(sc);
2843 
2844 	KASSERT(IS_BUSY(sc), ("%s: controller not busy.", __func__));
2845 	CLR_BUSY(sc);
2846 	wakeup(&sc->flags);
2847 	ADAPTER_UNLOCK(sc);
2848 }
2849 
2850 static int
2851 cxgbe_init_synchronized(struct port_info *pi)
2852 {
2853 	struct adapter *sc = pi->adapter;
2854 	struct ifnet *ifp = pi->ifp;
2855 	int rc = 0;
2856 
2857 	ASSERT_SYNCHRONIZED_OP(sc);
2858 
2859 	if (isset(&sc->open_device_map, pi->port_id)) {
2860 		KASSERT(ifp->if_drv_flags & IFF_DRV_RUNNING,
2861 		    ("mismatch between open_device_map and if_drv_flags"));
2862 		return (0);	/* already running */
2863 	}
2864 
2865 	if (!(sc->flags & FULL_INIT_DONE) &&
2866 	    ((rc = adapter_full_init(sc)) != 0))
2867 		return (rc);	/* error message displayed already */
2868 
2869 	if (!(pi->flags & PORT_INIT_DONE) &&
2870 	    ((rc = port_full_init(pi)) != 0))
2871 		return (rc); /* error message displayed already */
2872 
2873 	rc = update_mac_settings(pi, XGMAC_ALL);
2874 	if (rc)
2875 		goto done;	/* error message displayed already */
2876 
2877 	rc = -t4_link_start(sc, sc->mbox, pi->tx_chan, &pi->link_cfg);
2878 	if (rc != 0) {
2879 		if_printf(ifp, "start_link failed: %d\n", rc);
2880 		goto done;
2881 	}
2882 
2883 	rc = -t4_enable_vi(sc, sc->mbox, pi->viid, true, true);
2884 	if (rc != 0) {
2885 		if_printf(ifp, "enable_vi failed: %d\n", rc);
2886 		goto done;
2887 	}
2888 
2889 	/* all ok */
2890 	setbit(&sc->open_device_map, pi->port_id);
2891 	PORT_LOCK(pi);
2892 	ifp->if_drv_flags |= IFF_DRV_RUNNING;
2893 	PORT_UNLOCK(pi);
2894 
2895 	callout_reset(&pi->tick, hz, cxgbe_tick, pi);
2896 done:
2897 	if (rc != 0)
2898 		cxgbe_uninit_synchronized(pi);
2899 
2900 	return (rc);
2901 }
2902 
2903 /*
2904  * Idempotent.
2905  */
2906 static int
2907 cxgbe_uninit_synchronized(struct port_info *pi)
2908 {
2909 	struct adapter *sc = pi->adapter;
2910 	struct ifnet *ifp = pi->ifp;
2911 	int rc;
2912 
2913 	ASSERT_SYNCHRONIZED_OP(sc);
2914 
2915 	/*
2916 	 * Disable the VI so that all its data in either direction is discarded
2917 	 * by the MPS.  Leave everything else (the queues, interrupts, and 1Hz
2918 	 * tick) intact as the TP can deliver negative advice or data that it's
2919 	 * holding in its RAM (for an offloaded connection) even after the VI is
2920 	 * disabled.
2921 	 */
2922 	rc = -t4_enable_vi(sc, sc->mbox, pi->viid, false, false);
2923 	if (rc) {
2924 		if_printf(ifp, "disable_vi failed: %d\n", rc);
2925 		return (rc);
2926 	}
2927 
2928 	clrbit(&sc->open_device_map, pi->port_id);
2929 	PORT_LOCK(pi);
2930 	ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
2931 	PORT_UNLOCK(pi);
2932 
2933 	pi->link_cfg.link_ok = 0;
2934 	pi->link_cfg.speed = 0;
2935 	t4_os_link_changed(sc, pi->port_id, 0);
2936 
2937 	return (0);
2938 }
2939 
2940 /*
2941  * It is ok for this function to fail midway and return right away.  t4_detach
2942  * will walk the entire sc->irq list and clean up whatever is valid.
2943  */
2944 static int
2945 setup_intr_handlers(struct adapter *sc)
2946 {
2947 	int rc, rid, p, q;
2948 	char s[8];
2949 	struct irq *irq;
2950 	struct port_info *pi;
2951 	struct sge_rxq *rxq;
2952 #ifdef TCP_OFFLOAD
2953 	struct sge_ofld_rxq *ofld_rxq;
2954 #endif
2955 
2956 	/*
2957 	 * Setup interrupts.
2958 	 */
2959 	irq = &sc->irq[0];
2960 	rid = sc->intr_type == INTR_INTX ? 0 : 1;
2961 	if (sc->intr_count == 1) {
2962 		KASSERT(!(sc->flags & INTR_DIRECT),
2963 		    ("%s: single interrupt && INTR_DIRECT?", __func__));
2964 
2965 		rc = t4_alloc_irq(sc, irq, rid, t4_intr_all, sc, "all");
2966 		if (rc != 0)
2967 			return (rc);
2968 	} else {
2969 		/* Multiple interrupts. */
2970 		KASSERT(sc->intr_count >= T4_EXTRA_INTR + sc->params.nports,
2971 		    ("%s: too few intr.", __func__));
2972 
2973 		/* The first one is always error intr */
2974 		rc = t4_alloc_irq(sc, irq, rid, t4_intr_err, sc, "err");
2975 		if (rc != 0)
2976 			return (rc);
2977 		irq++;
2978 		rid++;
2979 
2980 		/* The second one is always the firmware event queue */
2981 		rc = t4_alloc_irq(sc, irq, rid, t4_intr_evt, &sc->sge.fwq,
2982 		    "evt");
2983 		if (rc != 0)
2984 			return (rc);
2985 		irq++;
2986 		rid++;
2987 
2988 		/*
2989 		 * Note that if INTR_DIRECT is not set then either the NIC rx
2990 		 * queues or (exclusive or) the TOE rx queueus will be taking
2991 		 * direct interrupts.
2992 		 *
2993 		 * There is no need to check for is_offload(sc) as nofldrxq
2994 		 * will be 0 if offload is disabled.
2995 		 */
2996 		for_each_port(sc, p) {
2997 			pi = sc->port[p];
2998 
2999 #ifdef TCP_OFFLOAD
3000 			/*
3001 			 * Skip over the NIC queues if they aren't taking direct
3002 			 * interrupts.
3003 			 */
3004 			if (!(sc->flags & INTR_DIRECT) &&
3005 			    pi->nofldrxq > pi->nrxq)
3006 				goto ofld_queues;
3007 #endif
3008 			rxq = &sc->sge.rxq[pi->first_rxq];
3009 			for (q = 0; q < pi->nrxq; q++, rxq++) {
3010 				snprintf(s, sizeof(s), "%d.%d", p, q);
3011 				rc = t4_alloc_irq(sc, irq, rid, t4_intr, rxq,
3012 				    s);
3013 				if (rc != 0)
3014 					return (rc);
3015 				irq++;
3016 				rid++;
3017 			}
3018 
3019 #ifdef TCP_OFFLOAD
3020 			/*
3021 			 * Skip over the offload queues if they aren't taking
3022 			 * direct interrupts.
3023 			 */
3024 			if (!(sc->flags & INTR_DIRECT))
3025 				continue;
3026 ofld_queues:
3027 			ofld_rxq = &sc->sge.ofld_rxq[pi->first_ofld_rxq];
3028 			for (q = 0; q < pi->nofldrxq; q++, ofld_rxq++) {
3029 				snprintf(s, sizeof(s), "%d,%d", p, q);
3030 				rc = t4_alloc_irq(sc, irq, rid, t4_intr,
3031 				    ofld_rxq, s);
3032 				if (rc != 0)
3033 					return (rc);
3034 				irq++;
3035 				rid++;
3036 			}
3037 #endif
3038 		}
3039 	}
3040 
3041 	return (0);
3042 }
3043 
3044 static int
3045 adapter_full_init(struct adapter *sc)
3046 {
3047 	int rc, i;
3048 
3049 	ADAPTER_LOCK_ASSERT_NOTOWNED(sc);
3050 	KASSERT((sc->flags & FULL_INIT_DONE) == 0,
3051 	    ("%s: FULL_INIT_DONE already", __func__));
3052 
3053 	/*
3054 	 * queues that belong to the adapter (not any particular port).
3055 	 */
3056 	rc = t4_setup_adapter_queues(sc);
3057 	if (rc != 0)
3058 		goto done;
3059 
3060 	for (i = 0; i < nitems(sc->tq); i++) {
3061 		sc->tq[i] = taskqueue_create("t4 taskq", M_NOWAIT,
3062 		    taskqueue_thread_enqueue, &sc->tq[i]);
3063 		if (sc->tq[i] == NULL) {
3064 			device_printf(sc->dev,
3065 			    "failed to allocate task queue %d\n", i);
3066 			rc = ENOMEM;
3067 			goto done;
3068 		}
3069 		taskqueue_start_threads(&sc->tq[i], 1, PI_NET, "%s tq%d",
3070 		    device_get_nameunit(sc->dev), i);
3071 	}
3072 
3073 	t4_intr_enable(sc);
3074 	sc->flags |= FULL_INIT_DONE;
3075 done:
3076 	if (rc != 0)
3077 		adapter_full_uninit(sc);
3078 
3079 	return (rc);
3080 }
3081 
3082 static int
3083 adapter_full_uninit(struct adapter *sc)
3084 {
3085 	int i;
3086 
3087 	ADAPTER_LOCK_ASSERT_NOTOWNED(sc);
3088 
3089 	t4_teardown_adapter_queues(sc);
3090 
3091 	for (i = 0; i < nitems(sc->tq) && sc->tq[i]; i++) {
3092 		taskqueue_free(sc->tq[i]);
3093 		sc->tq[i] = NULL;
3094 	}
3095 
3096 	sc->flags &= ~FULL_INIT_DONE;
3097 
3098 	return (0);
3099 }
3100 
3101 static int
3102 port_full_init(struct port_info *pi)
3103 {
3104 	struct adapter *sc = pi->adapter;
3105 	struct ifnet *ifp = pi->ifp;
3106 	uint16_t *rss;
3107 	struct sge_rxq *rxq;
3108 	int rc, i;
3109 
3110 	ASSERT_SYNCHRONIZED_OP(sc);
3111 	KASSERT((pi->flags & PORT_INIT_DONE) == 0,
3112 	    ("%s: PORT_INIT_DONE already", __func__));
3113 
3114 	sysctl_ctx_init(&pi->ctx);
3115 	pi->flags |= PORT_SYSCTL_CTX;
3116 
3117 	/*
3118 	 * Allocate tx/rx/fl queues for this port.
3119 	 */
3120 	rc = t4_setup_port_queues(pi);
3121 	if (rc != 0)
3122 		goto done;	/* error message displayed already */
3123 
3124 	/*
3125 	 * Setup RSS for this port.
3126 	 */
3127 	rss = malloc(pi->nrxq * sizeof (*rss), M_CXGBE,
3128 	    M_ZERO | M_WAITOK);
3129 	for_each_rxq(pi, i, rxq) {
3130 		rss[i] = rxq->iq.abs_id;
3131 	}
3132 	rc = -t4_config_rss_range(sc, sc->mbox, pi->viid, 0,
3133 	    pi->rss_size, rss, pi->nrxq);
3134 	free(rss, M_CXGBE);
3135 	if (rc != 0) {
3136 		if_printf(ifp, "rss_config failed: %d\n", rc);
3137 		goto done;
3138 	}
3139 
3140 	pi->flags |= PORT_INIT_DONE;
3141 done:
3142 	if (rc != 0)
3143 		port_full_uninit(pi);
3144 
3145 	return (rc);
3146 }
3147 
3148 /*
3149  * Idempotent.
3150  */
3151 static int
3152 port_full_uninit(struct port_info *pi)
3153 {
3154 	struct adapter *sc = pi->adapter;
3155 	int i;
3156 	struct sge_rxq *rxq;
3157 	struct sge_txq *txq;
3158 #ifdef TCP_OFFLOAD
3159 	struct sge_ofld_rxq *ofld_rxq;
3160 	struct sge_wrq *ofld_txq;
3161 #endif
3162 
3163 	if (pi->flags & PORT_INIT_DONE) {
3164 
3165 		/* Need to quiesce queues.  XXX: ctrl queues? */
3166 
3167 		for_each_txq(pi, i, txq) {
3168 			quiesce_eq(sc, &txq->eq);
3169 		}
3170 
3171 #ifdef TCP_OFFLOAD
3172 		for_each_ofld_txq(pi, i, ofld_txq) {
3173 			quiesce_eq(sc, &ofld_txq->eq);
3174 		}
3175 #endif
3176 
3177 		for_each_rxq(pi, i, rxq) {
3178 			quiesce_iq(sc, &rxq->iq);
3179 			quiesce_fl(sc, &rxq->fl);
3180 		}
3181 
3182 #ifdef TCP_OFFLOAD
3183 		for_each_ofld_rxq(pi, i, ofld_rxq) {
3184 			quiesce_iq(sc, &ofld_rxq->iq);
3185 			quiesce_fl(sc, &ofld_rxq->fl);
3186 		}
3187 #endif
3188 	}
3189 
3190 	t4_teardown_port_queues(pi);
3191 	pi->flags &= ~PORT_INIT_DONE;
3192 
3193 	return (0);
3194 }
3195 
3196 static void
3197 quiesce_eq(struct adapter *sc, struct sge_eq *eq)
3198 {
3199 	EQ_LOCK(eq);
3200 	eq->flags |= EQ_DOOMED;
3201 
3202 	/*
3203 	 * Wait for the response to a credit flush if one's
3204 	 * pending.
3205 	 */
3206 	while (eq->flags & EQ_CRFLUSHED)
3207 		mtx_sleep(eq, &eq->eq_lock, 0, "crflush", 0);
3208 	EQ_UNLOCK(eq);
3209 
3210 	callout_drain(&eq->tx_callout);	/* XXX: iffy */
3211 	pause("callout", 10);		/* Still iffy */
3212 
3213 	taskqueue_drain(sc->tq[eq->tx_chan], &eq->tx_task);
3214 }
3215 
3216 static void
3217 quiesce_iq(struct adapter *sc, struct sge_iq *iq)
3218 {
3219 	(void) sc;	/* unused */
3220 
3221 	/* Synchronize with the interrupt handler */
3222 	while (!atomic_cmpset_int(&iq->state, IQS_IDLE, IQS_DISABLED))
3223 		pause("iqfree", 1);
3224 }
3225 
3226 static void
3227 quiesce_fl(struct adapter *sc, struct sge_fl *fl)
3228 {
3229 	mtx_lock(&sc->sfl_lock);
3230 	FL_LOCK(fl);
3231 	fl->flags |= FL_DOOMED;
3232 	FL_UNLOCK(fl);
3233 	mtx_unlock(&sc->sfl_lock);
3234 
3235 	callout_drain(&sc->sfl_callout);
3236 	KASSERT((fl->flags & FL_STARVING) == 0,
3237 	    ("%s: still starving", __func__));
3238 }
3239 
3240 static int
3241 t4_alloc_irq(struct adapter *sc, struct irq *irq, int rid,
3242     driver_intr_t *handler, void *arg, char *name)
3243 {
3244 	int rc;
3245 
3246 	irq->rid = rid;
3247 	irq->res = bus_alloc_resource_any(sc->dev, SYS_RES_IRQ, &irq->rid,
3248 	    RF_SHAREABLE | RF_ACTIVE);
3249 	if (irq->res == NULL) {
3250 		device_printf(sc->dev,
3251 		    "failed to allocate IRQ for rid %d, name %s.\n", rid, name);
3252 		return (ENOMEM);
3253 	}
3254 
3255 	rc = bus_setup_intr(sc->dev, irq->res, INTR_MPSAFE | INTR_TYPE_NET,
3256 	    NULL, handler, arg, &irq->tag);
3257 	if (rc != 0) {
3258 		device_printf(sc->dev,
3259 		    "failed to setup interrupt for rid %d, name %s: %d\n",
3260 		    rid, name, rc);
3261 	} else if (name)
3262 		bus_describe_intr(sc->dev, irq->res, irq->tag, name);
3263 
3264 	return (rc);
3265 }
3266 
3267 static int
3268 t4_free_irq(struct adapter *sc, struct irq *irq)
3269 {
3270 	if (irq->tag)
3271 		bus_teardown_intr(sc->dev, irq->res, irq->tag);
3272 	if (irq->res)
3273 		bus_release_resource(sc->dev, SYS_RES_IRQ, irq->rid, irq->res);
3274 
3275 	bzero(irq, sizeof(*irq));
3276 
3277 	return (0);
3278 }
3279 
3280 static void
3281 reg_block_dump(struct adapter *sc, uint8_t *buf, unsigned int start,
3282     unsigned int end)
3283 {
3284 	uint32_t *p = (uint32_t *)(buf + start);
3285 
3286 	for ( ; start <= end; start += sizeof(uint32_t))
3287 		*p++ = t4_read_reg(sc, start);
3288 }
3289 
3290 static void
3291 t4_get_regs(struct adapter *sc, struct t4_regdump *regs, uint8_t *buf)
3292 {
3293 	int i, n;
3294 	const unsigned int *reg_ranges;
3295 	static const unsigned int t4_reg_ranges[] = {
3296 		0x1008, 0x1108,
3297 		0x1180, 0x11b4,
3298 		0x11fc, 0x123c,
3299 		0x1300, 0x173c,
3300 		0x1800, 0x18fc,
3301 		0x3000, 0x30d8,
3302 		0x30e0, 0x5924,
3303 		0x5960, 0x59d4,
3304 		0x5a00, 0x5af8,
3305 		0x6000, 0x6098,
3306 		0x6100, 0x6150,
3307 		0x6200, 0x6208,
3308 		0x6240, 0x6248,
3309 		0x6280, 0x6338,
3310 		0x6370, 0x638c,
3311 		0x6400, 0x643c,
3312 		0x6500, 0x6524,
3313 		0x6a00, 0x6a38,
3314 		0x6a60, 0x6a78,
3315 		0x6b00, 0x6b84,
3316 		0x6bf0, 0x6c84,
3317 		0x6cf0, 0x6d84,
3318 		0x6df0, 0x6e84,
3319 		0x6ef0, 0x6f84,
3320 		0x6ff0, 0x7084,
3321 		0x70f0, 0x7184,
3322 		0x71f0, 0x7284,
3323 		0x72f0, 0x7384,
3324 		0x73f0, 0x7450,
3325 		0x7500, 0x7530,
3326 		0x7600, 0x761c,
3327 		0x7680, 0x76cc,
3328 		0x7700, 0x7798,
3329 		0x77c0, 0x77fc,
3330 		0x7900, 0x79fc,
3331 		0x7b00, 0x7c38,
3332 		0x7d00, 0x7efc,
3333 		0x8dc0, 0x8e1c,
3334 		0x8e30, 0x8e78,
3335 		0x8ea0, 0x8f6c,
3336 		0x8fc0, 0x9074,
3337 		0x90fc, 0x90fc,
3338 		0x9400, 0x9458,
3339 		0x9600, 0x96bc,
3340 		0x9800, 0x9808,
3341 		0x9820, 0x983c,
3342 		0x9850, 0x9864,
3343 		0x9c00, 0x9c6c,
3344 		0x9c80, 0x9cec,
3345 		0x9d00, 0x9d6c,
3346 		0x9d80, 0x9dec,
3347 		0x9e00, 0x9e6c,
3348 		0x9e80, 0x9eec,
3349 		0x9f00, 0x9f6c,
3350 		0x9f80, 0x9fec,
3351 		0xd004, 0xd03c,
3352 		0xdfc0, 0xdfe0,
3353 		0xe000, 0xea7c,
3354 		0xf000, 0x11190,
3355 		0x19040, 0x1906c,
3356 		0x19078, 0x19080,
3357 		0x1908c, 0x19124,
3358 		0x19150, 0x191b0,
3359 		0x191d0, 0x191e8,
3360 		0x19238, 0x1924c,
3361 		0x193f8, 0x19474,
3362 		0x19490, 0x194f8,
3363 		0x19800, 0x19f30,
3364 		0x1a000, 0x1a06c,
3365 		0x1a0b0, 0x1a120,
3366 		0x1a128, 0x1a138,
3367 		0x1a190, 0x1a1c4,
3368 		0x1a1fc, 0x1a1fc,
3369 		0x1e040, 0x1e04c,
3370 		0x1e284, 0x1e28c,
3371 		0x1e2c0, 0x1e2c0,
3372 		0x1e2e0, 0x1e2e0,
3373 		0x1e300, 0x1e384,
3374 		0x1e3c0, 0x1e3c8,
3375 		0x1e440, 0x1e44c,
3376 		0x1e684, 0x1e68c,
3377 		0x1e6c0, 0x1e6c0,
3378 		0x1e6e0, 0x1e6e0,
3379 		0x1e700, 0x1e784,
3380 		0x1e7c0, 0x1e7c8,
3381 		0x1e840, 0x1e84c,
3382 		0x1ea84, 0x1ea8c,
3383 		0x1eac0, 0x1eac0,
3384 		0x1eae0, 0x1eae0,
3385 		0x1eb00, 0x1eb84,
3386 		0x1ebc0, 0x1ebc8,
3387 		0x1ec40, 0x1ec4c,
3388 		0x1ee84, 0x1ee8c,
3389 		0x1eec0, 0x1eec0,
3390 		0x1eee0, 0x1eee0,
3391 		0x1ef00, 0x1ef84,
3392 		0x1efc0, 0x1efc8,
3393 		0x1f040, 0x1f04c,
3394 		0x1f284, 0x1f28c,
3395 		0x1f2c0, 0x1f2c0,
3396 		0x1f2e0, 0x1f2e0,
3397 		0x1f300, 0x1f384,
3398 		0x1f3c0, 0x1f3c8,
3399 		0x1f440, 0x1f44c,
3400 		0x1f684, 0x1f68c,
3401 		0x1f6c0, 0x1f6c0,
3402 		0x1f6e0, 0x1f6e0,
3403 		0x1f700, 0x1f784,
3404 		0x1f7c0, 0x1f7c8,
3405 		0x1f840, 0x1f84c,
3406 		0x1fa84, 0x1fa8c,
3407 		0x1fac0, 0x1fac0,
3408 		0x1fae0, 0x1fae0,
3409 		0x1fb00, 0x1fb84,
3410 		0x1fbc0, 0x1fbc8,
3411 		0x1fc40, 0x1fc4c,
3412 		0x1fe84, 0x1fe8c,
3413 		0x1fec0, 0x1fec0,
3414 		0x1fee0, 0x1fee0,
3415 		0x1ff00, 0x1ff84,
3416 		0x1ffc0, 0x1ffc8,
3417 		0x20000, 0x2002c,
3418 		0x20100, 0x2013c,
3419 		0x20190, 0x201c8,
3420 		0x20200, 0x20318,
3421 		0x20400, 0x20528,
3422 		0x20540, 0x20614,
3423 		0x21000, 0x21040,
3424 		0x2104c, 0x21060,
3425 		0x210c0, 0x210ec,
3426 		0x21200, 0x21268,
3427 		0x21270, 0x21284,
3428 		0x212fc, 0x21388,
3429 		0x21400, 0x21404,
3430 		0x21500, 0x21518,
3431 		0x2152c, 0x2153c,
3432 		0x21550, 0x21554,
3433 		0x21600, 0x21600,
3434 		0x21608, 0x21628,
3435 		0x21630, 0x2163c,
3436 		0x21700, 0x2171c,
3437 		0x21780, 0x2178c,
3438 		0x21800, 0x21c38,
3439 		0x21c80, 0x21d7c,
3440 		0x21e00, 0x21e04,
3441 		0x22000, 0x2202c,
3442 		0x22100, 0x2213c,
3443 		0x22190, 0x221c8,
3444 		0x22200, 0x22318,
3445 		0x22400, 0x22528,
3446 		0x22540, 0x22614,
3447 		0x23000, 0x23040,
3448 		0x2304c, 0x23060,
3449 		0x230c0, 0x230ec,
3450 		0x23200, 0x23268,
3451 		0x23270, 0x23284,
3452 		0x232fc, 0x23388,
3453 		0x23400, 0x23404,
3454 		0x23500, 0x23518,
3455 		0x2352c, 0x2353c,
3456 		0x23550, 0x23554,
3457 		0x23600, 0x23600,
3458 		0x23608, 0x23628,
3459 		0x23630, 0x2363c,
3460 		0x23700, 0x2371c,
3461 		0x23780, 0x2378c,
3462 		0x23800, 0x23c38,
3463 		0x23c80, 0x23d7c,
3464 		0x23e00, 0x23e04,
3465 		0x24000, 0x2402c,
3466 		0x24100, 0x2413c,
3467 		0x24190, 0x241c8,
3468 		0x24200, 0x24318,
3469 		0x24400, 0x24528,
3470 		0x24540, 0x24614,
3471 		0x25000, 0x25040,
3472 		0x2504c, 0x25060,
3473 		0x250c0, 0x250ec,
3474 		0x25200, 0x25268,
3475 		0x25270, 0x25284,
3476 		0x252fc, 0x25388,
3477 		0x25400, 0x25404,
3478 		0x25500, 0x25518,
3479 		0x2552c, 0x2553c,
3480 		0x25550, 0x25554,
3481 		0x25600, 0x25600,
3482 		0x25608, 0x25628,
3483 		0x25630, 0x2563c,
3484 		0x25700, 0x2571c,
3485 		0x25780, 0x2578c,
3486 		0x25800, 0x25c38,
3487 		0x25c80, 0x25d7c,
3488 		0x25e00, 0x25e04,
3489 		0x26000, 0x2602c,
3490 		0x26100, 0x2613c,
3491 		0x26190, 0x261c8,
3492 		0x26200, 0x26318,
3493 		0x26400, 0x26528,
3494 		0x26540, 0x26614,
3495 		0x27000, 0x27040,
3496 		0x2704c, 0x27060,
3497 		0x270c0, 0x270ec,
3498 		0x27200, 0x27268,
3499 		0x27270, 0x27284,
3500 		0x272fc, 0x27388,
3501 		0x27400, 0x27404,
3502 		0x27500, 0x27518,
3503 		0x2752c, 0x2753c,
3504 		0x27550, 0x27554,
3505 		0x27600, 0x27600,
3506 		0x27608, 0x27628,
3507 		0x27630, 0x2763c,
3508 		0x27700, 0x2771c,
3509 		0x27780, 0x2778c,
3510 		0x27800, 0x27c38,
3511 		0x27c80, 0x27d7c,
3512 		0x27e00, 0x27e04
3513 	};
3514 	static const unsigned int t5_reg_ranges[] = {
3515 		0x1008, 0x1148,
3516 		0x1180, 0x11b4,
3517 		0x11fc, 0x123c,
3518 		0x1280, 0x173c,
3519 		0x1800, 0x18fc,
3520 		0x3000, 0x3028,
3521 		0x3060, 0x30d8,
3522 		0x30e0, 0x30fc,
3523 		0x3140, 0x357c,
3524 		0x35a8, 0x35cc,
3525 		0x35ec, 0x35ec,
3526 		0x3600, 0x5624,
3527 		0x56cc, 0x575c,
3528 		0x580c, 0x5814,
3529 		0x5890, 0x58bc,
3530 		0x5940, 0x59dc,
3531 		0x59fc, 0x5a18,
3532 		0x5a60, 0x5a9c,
3533 		0x5b94, 0x5bfc,
3534 		0x6000, 0x6040,
3535 		0x6058, 0x614c,
3536 		0x7700, 0x7798,
3537 		0x77c0, 0x78fc,
3538 		0x7b00, 0x7c54,
3539 		0x7d00, 0x7efc,
3540 		0x8dc0, 0x8de0,
3541 		0x8df8, 0x8e84,
3542 		0x8ea0, 0x8f84,
3543 		0x8fc0, 0x90f8,
3544 		0x9400, 0x9470,
3545 		0x9600, 0x96f4,
3546 		0x9800, 0x9808,
3547 		0x9820, 0x983c,
3548 		0x9850, 0x9864,
3549 		0x9c00, 0x9c6c,
3550 		0x9c80, 0x9cec,
3551 		0x9d00, 0x9d6c,
3552 		0x9d80, 0x9dec,
3553 		0x9e00, 0x9e6c,
3554 		0x9e80, 0x9eec,
3555 		0x9f00, 0x9f6c,
3556 		0x9f80, 0xa020,
3557 		0xd004, 0xd03c,
3558 		0xdfc0, 0xdfe0,
3559 		0xe000, 0x11088,
3560 		0x1109c, 0x1117c,
3561 		0x11190, 0x11204,
3562 		0x19040, 0x1906c,
3563 		0x19078, 0x19080,
3564 		0x1908c, 0x19124,
3565 		0x19150, 0x191b0,
3566 		0x191d0, 0x191e8,
3567 		0x19238, 0x19290,
3568 		0x193f8, 0x19474,
3569 		0x19490, 0x194cc,
3570 		0x194f0, 0x194f8,
3571 		0x19c00, 0x19c60,
3572 		0x19c94, 0x19e10,
3573 		0x19e50, 0x19f34,
3574 		0x19f40, 0x19f50,
3575 		0x19f90, 0x19fe4,
3576 		0x1a000, 0x1a06c,
3577 		0x1a0b0, 0x1a120,
3578 		0x1a128, 0x1a138,
3579 		0x1a190, 0x1a1c4,
3580 		0x1a1fc, 0x1a1fc,
3581 		0x1e008, 0x1e00c,
3582 		0x1e040, 0x1e04c,
3583 		0x1e284, 0x1e290,
3584 		0x1e2c0, 0x1e2c0,
3585 		0x1e2e0, 0x1e2e0,
3586 		0x1e300, 0x1e384,
3587 		0x1e3c0, 0x1e3c8,
3588 		0x1e408, 0x1e40c,
3589 		0x1e440, 0x1e44c,
3590 		0x1e684, 0x1e690,
3591 		0x1e6c0, 0x1e6c0,
3592 		0x1e6e0, 0x1e6e0,
3593 		0x1e700, 0x1e784,
3594 		0x1e7c0, 0x1e7c8,
3595 		0x1e808, 0x1e80c,
3596 		0x1e840, 0x1e84c,
3597 		0x1ea84, 0x1ea90,
3598 		0x1eac0, 0x1eac0,
3599 		0x1eae0, 0x1eae0,
3600 		0x1eb00, 0x1eb84,
3601 		0x1ebc0, 0x1ebc8,
3602 		0x1ec08, 0x1ec0c,
3603 		0x1ec40, 0x1ec4c,
3604 		0x1ee84, 0x1ee90,
3605 		0x1eec0, 0x1eec0,
3606 		0x1eee0, 0x1eee0,
3607 		0x1ef00, 0x1ef84,
3608 		0x1efc0, 0x1efc8,
3609 		0x1f008, 0x1f00c,
3610 		0x1f040, 0x1f04c,
3611 		0x1f284, 0x1f290,
3612 		0x1f2c0, 0x1f2c0,
3613 		0x1f2e0, 0x1f2e0,
3614 		0x1f300, 0x1f384,
3615 		0x1f3c0, 0x1f3c8,
3616 		0x1f408, 0x1f40c,
3617 		0x1f440, 0x1f44c,
3618 		0x1f684, 0x1f690,
3619 		0x1f6c0, 0x1f6c0,
3620 		0x1f6e0, 0x1f6e0,
3621 		0x1f700, 0x1f784,
3622 		0x1f7c0, 0x1f7c8,
3623 		0x1f808, 0x1f80c,
3624 		0x1f840, 0x1f84c,
3625 		0x1fa84, 0x1fa90,
3626 		0x1fac0, 0x1fac0,
3627 		0x1fae0, 0x1fae0,
3628 		0x1fb00, 0x1fb84,
3629 		0x1fbc0, 0x1fbc8,
3630 		0x1fc08, 0x1fc0c,
3631 		0x1fc40, 0x1fc4c,
3632 		0x1fe84, 0x1fe90,
3633 		0x1fec0, 0x1fec0,
3634 		0x1fee0, 0x1fee0,
3635 		0x1ff00, 0x1ff84,
3636 		0x1ffc0, 0x1ffc8,
3637 		0x30000, 0x30040,
3638 		0x30100, 0x30144,
3639 		0x30190, 0x301d0,
3640 		0x30200, 0x30318,
3641 		0x30400, 0x3052c,
3642 		0x30540, 0x3061c,
3643 		0x30800, 0x30834,
3644 		0x308c0, 0x30908,
3645 		0x30910, 0x309ac,
3646 		0x30a00, 0x30a04,
3647 		0x30a0c, 0x30a2c,
3648 		0x30a44, 0x30a50,
3649 		0x30a74, 0x30c24,
3650 		0x30d08, 0x30d14,
3651 		0x30d1c, 0x30d20,
3652 		0x30d3c, 0x30d50,
3653 		0x31200, 0x3120c,
3654 		0x31220, 0x31220,
3655 		0x31240, 0x31240,
3656 		0x31600, 0x31600,
3657 		0x31608, 0x3160c,
3658 		0x31a00, 0x31a1c,
3659 		0x31e04, 0x31e20,
3660 		0x31e38, 0x31e3c,
3661 		0x31e80, 0x31e80,
3662 		0x31e88, 0x31ea8,
3663 		0x31eb0, 0x31eb4,
3664 		0x31ec8, 0x31ed4,
3665 		0x31fb8, 0x32004,
3666 		0x32208, 0x3223c,
3667 		0x32248, 0x3227c,
3668 		0x32288, 0x322bc,
3669 		0x322c8, 0x322fc,
3670 		0x32600, 0x32630,
3671 		0x32a00, 0x32abc,
3672 		0x32b00, 0x32b70,
3673 		0x33000, 0x33048,
3674 		0x33060, 0x3309c,
3675 		0x330f0, 0x33148,
3676 		0x33160, 0x3319c,
3677 		0x331f0, 0x332e4,
3678 		0x332f8, 0x333e4,
3679 		0x333f8, 0x33448,
3680 		0x33460, 0x3349c,
3681 		0x334f0, 0x33548,
3682 		0x33560, 0x3359c,
3683 		0x335f0, 0x336e4,
3684 		0x336f8, 0x337e4,
3685 		0x337f8, 0x337fc,
3686 		0x33814, 0x33814,
3687 		0x3382c, 0x3382c,
3688 		0x33880, 0x3388c,
3689 		0x338e8, 0x338ec,
3690 		0x33900, 0x33948,
3691 		0x33960, 0x3399c,
3692 		0x339f0, 0x33ae4,
3693 		0x33af8, 0x33b10,
3694 		0x33b28, 0x33b28,
3695 		0x33b3c, 0x33b50,
3696 		0x33bf0, 0x33c10,
3697 		0x33c28, 0x33c28,
3698 		0x33c3c, 0x33c50,
3699 		0x33cf0, 0x33cfc,
3700 		0x34000, 0x34040,
3701 		0x34100, 0x34144,
3702 		0x34190, 0x341d0,
3703 		0x34200, 0x34318,
3704 		0x34400, 0x3452c,
3705 		0x34540, 0x3461c,
3706 		0x34800, 0x34834,
3707 		0x348c0, 0x34908,
3708 		0x34910, 0x349ac,
3709 		0x34a00, 0x34a04,
3710 		0x34a0c, 0x34a2c,
3711 		0x34a44, 0x34a50,
3712 		0x34a74, 0x34c24,
3713 		0x34d08, 0x34d14,
3714 		0x34d1c, 0x34d20,
3715 		0x34d3c, 0x34d50,
3716 		0x35200, 0x3520c,
3717 		0x35220, 0x35220,
3718 		0x35240, 0x35240,
3719 		0x35600, 0x35600,
3720 		0x35608, 0x3560c,
3721 		0x35a00, 0x35a1c,
3722 		0x35e04, 0x35e20,
3723 		0x35e38, 0x35e3c,
3724 		0x35e80, 0x35e80,
3725 		0x35e88, 0x35ea8,
3726 		0x35eb0, 0x35eb4,
3727 		0x35ec8, 0x35ed4,
3728 		0x35fb8, 0x36004,
3729 		0x36208, 0x3623c,
3730 		0x36248, 0x3627c,
3731 		0x36288, 0x362bc,
3732 		0x362c8, 0x362fc,
3733 		0x36600, 0x36630,
3734 		0x36a00, 0x36abc,
3735 		0x36b00, 0x36b70,
3736 		0x37000, 0x37048,
3737 		0x37060, 0x3709c,
3738 		0x370f0, 0x37148,
3739 		0x37160, 0x3719c,
3740 		0x371f0, 0x372e4,
3741 		0x372f8, 0x373e4,
3742 		0x373f8, 0x37448,
3743 		0x37460, 0x3749c,
3744 		0x374f0, 0x37548,
3745 		0x37560, 0x3759c,
3746 		0x375f0, 0x376e4,
3747 		0x376f8, 0x377e4,
3748 		0x377f8, 0x377fc,
3749 		0x37814, 0x37814,
3750 		0x3782c, 0x3782c,
3751 		0x37880, 0x3788c,
3752 		0x378e8, 0x378ec,
3753 		0x37900, 0x37948,
3754 		0x37960, 0x3799c,
3755 		0x379f0, 0x37ae4,
3756 		0x37af8, 0x37b10,
3757 		0x37b28, 0x37b28,
3758 		0x37b3c, 0x37b50,
3759 		0x37bf0, 0x37c10,
3760 		0x37c28, 0x37c28,
3761 		0x37c3c, 0x37c50,
3762 		0x37cf0, 0x37cfc,
3763 		0x38000, 0x38040,
3764 		0x38100, 0x38144,
3765 		0x38190, 0x381d0,
3766 		0x38200, 0x38318,
3767 		0x38400, 0x3852c,
3768 		0x38540, 0x3861c,
3769 		0x38800, 0x38834,
3770 		0x388c0, 0x38908,
3771 		0x38910, 0x389ac,
3772 		0x38a00, 0x38a04,
3773 		0x38a0c, 0x38a2c,
3774 		0x38a44, 0x38a50,
3775 		0x38a74, 0x38c24,
3776 		0x38d08, 0x38d14,
3777 		0x38d1c, 0x38d20,
3778 		0x38d3c, 0x38d50,
3779 		0x39200, 0x3920c,
3780 		0x39220, 0x39220,
3781 		0x39240, 0x39240,
3782 		0x39600, 0x39600,
3783 		0x39608, 0x3960c,
3784 		0x39a00, 0x39a1c,
3785 		0x39e04, 0x39e20,
3786 		0x39e38, 0x39e3c,
3787 		0x39e80, 0x39e80,
3788 		0x39e88, 0x39ea8,
3789 		0x39eb0, 0x39eb4,
3790 		0x39ec8, 0x39ed4,
3791 		0x39fb8, 0x3a004,
3792 		0x3a208, 0x3a23c,
3793 		0x3a248, 0x3a27c,
3794 		0x3a288, 0x3a2bc,
3795 		0x3a2c8, 0x3a2fc,
3796 		0x3a600, 0x3a630,
3797 		0x3aa00, 0x3aabc,
3798 		0x3ab00, 0x3ab70,
3799 		0x3b000, 0x3b048,
3800 		0x3b060, 0x3b09c,
3801 		0x3b0f0, 0x3b148,
3802 		0x3b160, 0x3b19c,
3803 		0x3b1f0, 0x3b2e4,
3804 		0x3b2f8, 0x3b3e4,
3805 		0x3b3f8, 0x3b448,
3806 		0x3b460, 0x3b49c,
3807 		0x3b4f0, 0x3b548,
3808 		0x3b560, 0x3b59c,
3809 		0x3b5f0, 0x3b6e4,
3810 		0x3b6f8, 0x3b7e4,
3811 		0x3b7f8, 0x3b7fc,
3812 		0x3b814, 0x3b814,
3813 		0x3b82c, 0x3b82c,
3814 		0x3b880, 0x3b88c,
3815 		0x3b8e8, 0x3b8ec,
3816 		0x3b900, 0x3b948,
3817 		0x3b960, 0x3b99c,
3818 		0x3b9f0, 0x3bae4,
3819 		0x3baf8, 0x3bb10,
3820 		0x3bb28, 0x3bb28,
3821 		0x3bb3c, 0x3bb50,
3822 		0x3bbf0, 0x3bc10,
3823 		0x3bc28, 0x3bc28,
3824 		0x3bc3c, 0x3bc50,
3825 		0x3bcf0, 0x3bcfc,
3826 		0x3c000, 0x3c040,
3827 		0x3c100, 0x3c144,
3828 		0x3c190, 0x3c1d0,
3829 		0x3c200, 0x3c318,
3830 		0x3c400, 0x3c52c,
3831 		0x3c540, 0x3c61c,
3832 		0x3c800, 0x3c834,
3833 		0x3c8c0, 0x3c908,
3834 		0x3c910, 0x3c9ac,
3835 		0x3ca00, 0x3ca04,
3836 		0x3ca0c, 0x3ca2c,
3837 		0x3ca44, 0x3ca50,
3838 		0x3ca74, 0x3cc24,
3839 		0x3cd08, 0x3cd14,
3840 		0x3cd1c, 0x3cd20,
3841 		0x3cd3c, 0x3cd50,
3842 		0x3d200, 0x3d20c,
3843 		0x3d220, 0x3d220,
3844 		0x3d240, 0x3d240,
3845 		0x3d600, 0x3d600,
3846 		0x3d608, 0x3d60c,
3847 		0x3da00, 0x3da1c,
3848 		0x3de04, 0x3de20,
3849 		0x3de38, 0x3de3c,
3850 		0x3de80, 0x3de80,
3851 		0x3de88, 0x3dea8,
3852 		0x3deb0, 0x3deb4,
3853 		0x3dec8, 0x3ded4,
3854 		0x3dfb8, 0x3e004,
3855 		0x3e208, 0x3e23c,
3856 		0x3e248, 0x3e27c,
3857 		0x3e288, 0x3e2bc,
3858 		0x3e2c8, 0x3e2fc,
3859 		0x3e600, 0x3e630,
3860 		0x3ea00, 0x3eabc,
3861 		0x3eb00, 0x3eb70,
3862 		0x3f000, 0x3f048,
3863 		0x3f060, 0x3f09c,
3864 		0x3f0f0, 0x3f148,
3865 		0x3f160, 0x3f19c,
3866 		0x3f1f0, 0x3f2e4,
3867 		0x3f2f8, 0x3f3e4,
3868 		0x3f3f8, 0x3f448,
3869 		0x3f460, 0x3f49c,
3870 		0x3f4f0, 0x3f548,
3871 		0x3f560, 0x3f59c,
3872 		0x3f5f0, 0x3f6e4,
3873 		0x3f6f8, 0x3f7e4,
3874 		0x3f7f8, 0x3f7fc,
3875 		0x3f814, 0x3f814,
3876 		0x3f82c, 0x3f82c,
3877 		0x3f880, 0x3f88c,
3878 		0x3f8e8, 0x3f8ec,
3879 		0x3f900, 0x3f948,
3880 		0x3f960, 0x3f99c,
3881 		0x3f9f0, 0x3fae4,
3882 		0x3faf8, 0x3fb10,
3883 		0x3fb28, 0x3fb28,
3884 		0x3fb3c, 0x3fb50,
3885 		0x3fbf0, 0x3fc10,
3886 		0x3fc28, 0x3fc28,
3887 		0x3fc3c, 0x3fc50,
3888 		0x3fcf0, 0x3fcfc,
3889 		0x40000, 0x4000c,
3890 		0x40040, 0x40068,
3891 		0x4007c, 0x40144,
3892 		0x40180, 0x4018c,
3893 		0x40200, 0x40298,
3894 		0x402ac, 0x4033c,
3895 		0x403f8, 0x403fc,
3896 		0x41300, 0x413c4,
3897 		0x41400, 0x4141c,
3898 		0x41480, 0x414d0,
3899 		0x44000, 0x44078,
3900 		0x440c0, 0x44278,
3901 		0x442c0, 0x44478,
3902 		0x444c0, 0x44678,
3903 		0x446c0, 0x44878,
3904 		0x448c0, 0x449fc,
3905 		0x45000, 0x45068,
3906 		0x45080, 0x45084,
3907 		0x450a0, 0x450b0,
3908 		0x45200, 0x45268,
3909 		0x45280, 0x45284,
3910 		0x452a0, 0x452b0,
3911 		0x460c0, 0x460e4,
3912 		0x47000, 0x4708c,
3913 		0x47200, 0x47250,
3914 		0x47400, 0x47420,
3915 		0x47600, 0x47618,
3916 		0x47800, 0x47814,
3917 		0x48000, 0x4800c,
3918 		0x48040, 0x48068,
3919 		0x4807c, 0x48144,
3920 		0x48180, 0x4818c,
3921 		0x48200, 0x48298,
3922 		0x482ac, 0x4833c,
3923 		0x483f8, 0x483fc,
3924 		0x49300, 0x493c4,
3925 		0x49400, 0x4941c,
3926 		0x49480, 0x494d0,
3927 		0x4c000, 0x4c078,
3928 		0x4c0c0, 0x4c278,
3929 		0x4c2c0, 0x4c478,
3930 		0x4c4c0, 0x4c678,
3931 		0x4c6c0, 0x4c878,
3932 		0x4c8c0, 0x4c9fc,
3933 		0x4d000, 0x4d068,
3934 		0x4d080, 0x4d084,
3935 		0x4d0a0, 0x4d0b0,
3936 		0x4d200, 0x4d268,
3937 		0x4d280, 0x4d284,
3938 		0x4d2a0, 0x4d2b0,
3939 		0x4e0c0, 0x4e0e4,
3940 		0x4f000, 0x4f08c,
3941 		0x4f200, 0x4f250,
3942 		0x4f400, 0x4f420,
3943 		0x4f600, 0x4f618,
3944 		0x4f800, 0x4f814,
3945 		0x50000, 0x500cc,
3946 		0x50400, 0x50400,
3947 		0x50800, 0x508cc,
3948 		0x50c00, 0x50c00,
3949 		0x51000, 0x5101c,
3950 		0x51300, 0x51308,
3951 	};
3952 
3953 	if (is_t4(sc)) {
3954 		reg_ranges = &t4_reg_ranges[0];
3955 		n = nitems(t4_reg_ranges);
3956 	} else {
3957 		reg_ranges = &t5_reg_ranges[0];
3958 		n = nitems(t5_reg_ranges);
3959 	}
3960 
3961 	regs->version = chip_id(sc) | chip_rev(sc) << 10;
3962 	for (i = 0; i < n; i += 2)
3963 		reg_block_dump(sc, buf, reg_ranges[i], reg_ranges[i + 1]);
3964 }
3965 
3966 static void
3967 cxgbe_tick(void *arg)
3968 {
3969 	struct port_info *pi = arg;
3970 	struct ifnet *ifp = pi->ifp;
3971 	struct sge_txq *txq;
3972 	int i, drops;
3973 	struct port_stats *s = &pi->stats;
3974 
3975 	PORT_LOCK(pi);
3976 	if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) {
3977 		PORT_UNLOCK(pi);
3978 		return;	/* without scheduling another callout */
3979 	}
3980 
3981 	t4_get_port_stats(pi->adapter, pi->tx_chan, s);
3982 
3983 	ifp->if_opackets = s->tx_frames - s->tx_pause;
3984 	ifp->if_ipackets = s->rx_frames - s->rx_pause;
3985 	ifp->if_obytes = s->tx_octets - s->tx_pause * 64;
3986 	ifp->if_ibytes = s->rx_octets - s->rx_pause * 64;
3987 	ifp->if_omcasts = s->tx_mcast_frames - s->tx_pause;
3988 	ifp->if_imcasts = s->rx_mcast_frames - s->rx_pause;
3989 	ifp->if_iqdrops = s->rx_ovflow0 + s->rx_ovflow1 + s->rx_ovflow2 +
3990 	    s->rx_ovflow3 + s->rx_trunc0 + s->rx_trunc1 + s->rx_trunc2 +
3991 	    s->rx_trunc3;
3992 
3993 	drops = s->tx_drop;
3994 	for_each_txq(pi, i, txq)
3995 		drops += txq->br->br_drops;
3996 	ifp->if_snd.ifq_drops = drops;
3997 
3998 	ifp->if_oerrors = s->tx_error_frames;
3999 	ifp->if_ierrors = s->rx_jabber + s->rx_runt + s->rx_too_long +
4000 	    s->rx_fcs_err + s->rx_len_err;
4001 
4002 	callout_schedule(&pi->tick, hz);
4003 	PORT_UNLOCK(pi);
4004 }
4005 
4006 static void
4007 cxgbe_vlan_config(void *arg, struct ifnet *ifp, uint16_t vid)
4008 {
4009 	struct ifnet *vlan;
4010 
4011 	if (arg != ifp || ifp->if_type != IFT_ETHER)
4012 		return;
4013 
4014 	vlan = VLAN_DEVAT(ifp, vid);
4015 	VLAN_SETCOOKIE(vlan, ifp);
4016 }
4017 
4018 static int
4019 cpl_not_handled(struct sge_iq *iq, const struct rss_header *rss, struct mbuf *m)
4020 {
4021 
4022 #ifdef INVARIANTS
4023 	panic("%s: opcode 0x%02x on iq %p with payload %p",
4024 	    __func__, rss->opcode, iq, m);
4025 #else
4026 	log(LOG_ERR, "%s: opcode 0x%02x on iq %p with payload %p\n",
4027 	    __func__, rss->opcode, iq, m);
4028 	m_freem(m);
4029 #endif
4030 	return (EDOOFUS);
4031 }
4032 
4033 int
4034 t4_register_cpl_handler(struct adapter *sc, int opcode, cpl_handler_t h)
4035 {
4036 	uintptr_t *loc, new;
4037 
4038 	if (opcode >= nitems(sc->cpl_handler))
4039 		return (EINVAL);
4040 
4041 	new = h ? (uintptr_t)h : (uintptr_t)cpl_not_handled;
4042 	loc = (uintptr_t *) &sc->cpl_handler[opcode];
4043 	atomic_store_rel_ptr(loc, new);
4044 
4045 	return (0);
4046 }
4047 
4048 static int
4049 an_not_handled(struct sge_iq *iq, const struct rsp_ctrl *ctrl)
4050 {
4051 
4052 #ifdef INVARIANTS
4053 	panic("%s: async notification on iq %p (ctrl %p)", __func__, iq, ctrl);
4054 #else
4055 	log(LOG_ERR, "%s: async notification on iq %p (ctrl %p)\n",
4056 	    __func__, iq, ctrl);
4057 #endif
4058 	return (EDOOFUS);
4059 }
4060 
4061 int
4062 t4_register_an_handler(struct adapter *sc, an_handler_t h)
4063 {
4064 	uintptr_t *loc, new;
4065 
4066 	new = h ? (uintptr_t)h : (uintptr_t)an_not_handled;
4067 	loc = (uintptr_t *) &sc->an_handler;
4068 	atomic_store_rel_ptr(loc, new);
4069 
4070 	return (0);
4071 }
4072 
4073 static int
4074 fw_msg_not_handled(struct adapter *sc, const __be64 *rpl)
4075 {
4076 	const struct cpl_fw6_msg *cpl =
4077 	    __containerof(rpl, struct cpl_fw6_msg, data[0]);
4078 
4079 #ifdef INVARIANTS
4080 	panic("%s: fw_msg type %d", __func__, cpl->type);
4081 #else
4082 	log(LOG_ERR, "%s: fw_msg type %d\n", __func__, cpl->type);
4083 #endif
4084 	return (EDOOFUS);
4085 }
4086 
4087 int
4088 t4_register_fw_msg_handler(struct adapter *sc, int type, fw_msg_handler_t h)
4089 {
4090 	uintptr_t *loc, new;
4091 
4092 	if (type >= nitems(sc->fw_msg_handler))
4093 		return (EINVAL);
4094 
4095 	/*
4096 	 * These are dispatched by the handler for FW{4|6}_CPL_MSG using the CPL
4097 	 * handler dispatch table.  Reject any attempt to install a handler for
4098 	 * this subtype.
4099 	 */
4100 	if (type == FW_TYPE_RSSCPL || type == FW6_TYPE_RSSCPL)
4101 		return (EINVAL);
4102 
4103 	new = h ? (uintptr_t)h : (uintptr_t)fw_msg_not_handled;
4104 	loc = (uintptr_t *) &sc->fw_msg_handler[type];
4105 	atomic_store_rel_ptr(loc, new);
4106 
4107 	return (0);
4108 }
4109 
4110 static int
4111 t4_sysctls(struct adapter *sc)
4112 {
4113 	struct sysctl_ctx_list *ctx;
4114 	struct sysctl_oid *oid;
4115 	struct sysctl_oid_list *children, *c0;
4116 	static char *caps[] = {
4117 		"\20\1PPP\2QFC\3DCBX",			/* caps[0] linkcaps */
4118 		"\20\1NIC\2VM\3IDS\4UM\5UM_ISGL",	/* caps[1] niccaps */
4119 		"\20\1TOE",				/* caps[2] toecaps */
4120 		"\20\1RDDP\2RDMAC",			/* caps[3] rdmacaps */
4121 		"\20\1INITIATOR_PDU\2TARGET_PDU"	/* caps[4] iscsicaps */
4122 		    "\3INITIATOR_CNXOFLD\4TARGET_CNXOFLD"
4123 		    "\5INITIATOR_SSNOFLD\6TARGET_SSNOFLD",
4124 		"\20\1INITIATOR\2TARGET\3CTRL_OFLD"	/* caps[5] fcoecaps */
4125 	};
4126 	static char *doorbells = {"\20\1UDB\2WCWR\3UDBWC\4KDB"};
4127 
4128 	ctx = device_get_sysctl_ctx(sc->dev);
4129 
4130 	/*
4131 	 * dev.t4nex.X.
4132 	 */
4133 	oid = device_get_sysctl_tree(sc->dev);
4134 	c0 = children = SYSCTL_CHILDREN(oid);
4135 
4136 	SYSCTL_ADD_INT(ctx, children, OID_AUTO, "nports", CTLFLAG_RD, NULL,
4137 	    sc->params.nports, "# of ports");
4138 
4139 	SYSCTL_ADD_INT(ctx, children, OID_AUTO, "hw_revision", CTLFLAG_RD,
4140 	    NULL, chip_rev(sc), "chip hardware revision");
4141 
4142 	SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "firmware_version",
4143 	    CTLFLAG_RD, &sc->fw_version, 0, "firmware version");
4144 
4145 	SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "cf",
4146 	    CTLFLAG_RD, &sc->cfg_file, 0, "configuration file");
4147 
4148 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "cfcsum", CTLFLAG_RD, NULL,
4149 	    sc->cfcsum, "config file checksum");
4150 
4151 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "doorbells",
4152 	    CTLTYPE_STRING | CTLFLAG_RD, doorbells, sc->doorbells,
4153 	    sysctl_bitfield, "A", "available doorbells");
4154 
4155 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "linkcaps",
4156 	    CTLTYPE_STRING | CTLFLAG_RD, caps[0], sc->linkcaps,
4157 	    sysctl_bitfield, "A", "available link capabilities");
4158 
4159 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "niccaps",
4160 	    CTLTYPE_STRING | CTLFLAG_RD, caps[1], sc->niccaps,
4161 	    sysctl_bitfield, "A", "available NIC capabilities");
4162 
4163 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "toecaps",
4164 	    CTLTYPE_STRING | CTLFLAG_RD, caps[2], sc->toecaps,
4165 	    sysctl_bitfield, "A", "available TCP offload capabilities");
4166 
4167 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "rdmacaps",
4168 	    CTLTYPE_STRING | CTLFLAG_RD, caps[3], sc->rdmacaps,
4169 	    sysctl_bitfield, "A", "available RDMA capabilities");
4170 
4171 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "iscsicaps",
4172 	    CTLTYPE_STRING | CTLFLAG_RD, caps[4], sc->iscsicaps,
4173 	    sysctl_bitfield, "A", "available iSCSI capabilities");
4174 
4175 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "fcoecaps",
4176 	    CTLTYPE_STRING | CTLFLAG_RD, caps[5], sc->fcoecaps,
4177 	    sysctl_bitfield, "A", "available FCoE capabilities");
4178 
4179 	SYSCTL_ADD_INT(ctx, children, OID_AUTO, "core_clock", CTLFLAG_RD, NULL,
4180 	    sc->params.vpd.cclk, "core clock frequency (in KHz)");
4181 
4182 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "holdoff_timers",
4183 	    CTLTYPE_STRING | CTLFLAG_RD, sc->sge.timer_val,
4184 	    sizeof(sc->sge.timer_val), sysctl_int_array, "A",
4185 	    "interrupt holdoff timer values (us)");
4186 
4187 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "holdoff_pkt_counts",
4188 	    CTLTYPE_STRING | CTLFLAG_RD, sc->sge.counter_val,
4189 	    sizeof(sc->sge.counter_val), sysctl_int_array, "A",
4190 	    "interrupt holdoff packet counter values");
4191 
4192 #ifdef SBUF_DRAIN
4193 	/*
4194 	 * dev.t4nex.X.misc.  Marked CTLFLAG_SKIP to avoid information overload.
4195 	 */
4196 	oid = SYSCTL_ADD_NODE(ctx, c0, OID_AUTO, "misc",
4197 	    CTLFLAG_RD | CTLFLAG_SKIP, NULL,
4198 	    "logs and miscellaneous information");
4199 	children = SYSCTL_CHILDREN(oid);
4200 
4201 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cctrl",
4202 	    CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
4203 	    sysctl_cctrl, "A", "congestion control");
4204 
4205 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_ibq_tp0",
4206 	    CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
4207 	    sysctl_cim_ibq_obq, "A", "CIM IBQ 0 (TP0)");
4208 
4209 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_ibq_tp1",
4210 	    CTLTYPE_STRING | CTLFLAG_RD, sc, 1,
4211 	    sysctl_cim_ibq_obq, "A", "CIM IBQ 1 (TP1)");
4212 
4213 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_ibq_ulp",
4214 	    CTLTYPE_STRING | CTLFLAG_RD, sc, 2,
4215 	    sysctl_cim_ibq_obq, "A", "CIM IBQ 2 (ULP)");
4216 
4217 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_ibq_sge0",
4218 	    CTLTYPE_STRING | CTLFLAG_RD, sc, 3,
4219 	    sysctl_cim_ibq_obq, "A", "CIM IBQ 3 (SGE0)");
4220 
4221 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_ibq_sge1",
4222 	    CTLTYPE_STRING | CTLFLAG_RD, sc, 4,
4223 	    sysctl_cim_ibq_obq, "A", "CIM IBQ 4 (SGE1)");
4224 
4225 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_ibq_ncsi",
4226 	    CTLTYPE_STRING | CTLFLAG_RD, sc, 5,
4227 	    sysctl_cim_ibq_obq, "A", "CIM IBQ 5 (NCSI)");
4228 
4229 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_la",
4230 	    CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
4231 	    sysctl_cim_la, "A", "CIM logic analyzer");
4232 
4233 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_ma_la",
4234 	    CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
4235 	    sysctl_cim_ma_la, "A", "CIM MA logic analyzer");
4236 
4237 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_obq_ulp0",
4238 	    CTLTYPE_STRING | CTLFLAG_RD, sc, 0 + CIM_NUM_IBQ,
4239 	    sysctl_cim_ibq_obq, "A", "CIM OBQ 0 (ULP0)");
4240 
4241 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_obq_ulp1",
4242 	    CTLTYPE_STRING | CTLFLAG_RD, sc, 1 + CIM_NUM_IBQ,
4243 	    sysctl_cim_ibq_obq, "A", "CIM OBQ 1 (ULP1)");
4244 
4245 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_obq_ulp2",
4246 	    CTLTYPE_STRING | CTLFLAG_RD, sc, 2 + CIM_NUM_IBQ,
4247 	    sysctl_cim_ibq_obq, "A", "CIM OBQ 2 (ULP2)");
4248 
4249 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_obq_ulp3",
4250 	    CTLTYPE_STRING | CTLFLAG_RD, sc, 3 + CIM_NUM_IBQ,
4251 	    sysctl_cim_ibq_obq, "A", "CIM OBQ 3 (ULP3)");
4252 
4253 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_obq_sge",
4254 	    CTLTYPE_STRING | CTLFLAG_RD, sc, 4 + CIM_NUM_IBQ,
4255 	    sysctl_cim_ibq_obq, "A", "CIM OBQ 4 (SGE)");
4256 
4257 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_obq_ncsi",
4258 	    CTLTYPE_STRING | CTLFLAG_RD, sc, 5 + CIM_NUM_IBQ,
4259 	    sysctl_cim_ibq_obq, "A", "CIM OBQ 5 (NCSI)");
4260 
4261 	if (is_t5(sc)) {
4262 		SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_obq_sge0_rx",
4263 		    CTLTYPE_STRING | CTLFLAG_RD, sc, 6 + CIM_NUM_IBQ,
4264 		    sysctl_cim_ibq_obq, "A", "CIM OBQ 6 (SGE0-RX)");
4265 
4266 		SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_obq_sge1_rx",
4267 		    CTLTYPE_STRING | CTLFLAG_RD, sc, 7 + CIM_NUM_IBQ,
4268 		    sysctl_cim_ibq_obq, "A", "CIM OBQ 7 (SGE1-RX)");
4269 	}
4270 
4271 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_pif_la",
4272 	    CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
4273 	    sysctl_cim_pif_la, "A", "CIM PIF logic analyzer");
4274 
4275 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_qcfg",
4276 	    CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
4277 	    sysctl_cim_qcfg, "A", "CIM queue configuration");
4278 
4279 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cpl_stats",
4280 	    CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
4281 	    sysctl_cpl_stats, "A", "CPL statistics");
4282 
4283 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "ddp_stats",
4284 	    CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
4285 	    sysctl_ddp_stats, "A", "DDP statistics");
4286 
4287 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "devlog",
4288 	    CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
4289 	    sysctl_devlog, "A", "firmware's device log");
4290 
4291 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "fcoe_stats",
4292 	    CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
4293 	    sysctl_fcoe_stats, "A", "FCoE statistics");
4294 
4295 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "hw_sched",
4296 	    CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
4297 	    sysctl_hw_sched, "A", "hardware scheduler ");
4298 
4299 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "l2t",
4300 	    CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
4301 	    sysctl_l2t, "A", "hardware L2 table");
4302 
4303 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "lb_stats",
4304 	    CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
4305 	    sysctl_lb_stats, "A", "loopback statistics");
4306 
4307 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "meminfo",
4308 	    CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
4309 	    sysctl_meminfo, "A", "memory regions");
4310 
4311 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "mps_tcam",
4312 	    CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
4313 	    sysctl_mps_tcam, "A", "MPS TCAM entries");
4314 
4315 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "path_mtus",
4316 	    CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
4317 	    sysctl_path_mtus, "A", "path MTUs");
4318 
4319 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "pm_stats",
4320 	    CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
4321 	    sysctl_pm_stats, "A", "PM statistics");
4322 
4323 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "rdma_stats",
4324 	    CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
4325 	    sysctl_rdma_stats, "A", "RDMA statistics");
4326 
4327 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "tcp_stats",
4328 	    CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
4329 	    sysctl_tcp_stats, "A", "TCP statistics");
4330 
4331 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "tids",
4332 	    CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
4333 	    sysctl_tids, "A", "TID information");
4334 
4335 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "tp_err_stats",
4336 	    CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
4337 	    sysctl_tp_err_stats, "A", "TP error statistics");
4338 
4339 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "tp_la",
4340 	    CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
4341 	    sysctl_tp_la, "A", "TP logic analyzer");
4342 
4343 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "tx_rate",
4344 	    CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
4345 	    sysctl_tx_rate, "A", "Tx rate");
4346 
4347 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "ulprx_la",
4348 	    CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
4349 	    sysctl_ulprx_la, "A", "ULPRX logic analyzer");
4350 
4351 	if (is_t5(sc)) {
4352 		SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "wcwr_stats",
4353 		    CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
4354 		    sysctl_wcwr_stats, "A", "write combined work requests");
4355 	}
4356 #endif
4357 
4358 #ifdef TCP_OFFLOAD
4359 	if (is_offload(sc)) {
4360 		/*
4361 		 * dev.t4nex.X.toe.
4362 		 */
4363 		oid = SYSCTL_ADD_NODE(ctx, c0, OID_AUTO, "toe", CTLFLAG_RD,
4364 		    NULL, "TOE parameters");
4365 		children = SYSCTL_CHILDREN(oid);
4366 
4367 		sc->tt.sndbuf = 256 * 1024;
4368 		SYSCTL_ADD_INT(ctx, children, OID_AUTO, "sndbuf", CTLFLAG_RW,
4369 		    &sc->tt.sndbuf, 0, "max hardware send buffer size");
4370 
4371 		sc->tt.ddp = 0;
4372 		SYSCTL_ADD_INT(ctx, children, OID_AUTO, "ddp", CTLFLAG_RW,
4373 		    &sc->tt.ddp, 0, "DDP allowed");
4374 
4375 		sc->tt.indsz = G_INDICATESIZE(t4_read_reg(sc, A_TP_PARA_REG5));
4376 		SYSCTL_ADD_INT(ctx, children, OID_AUTO, "indsz", CTLFLAG_RW,
4377 		    &sc->tt.indsz, 0, "DDP max indicate size allowed");
4378 
4379 		sc->tt.ddp_thres =
4380 		    G_RXCOALESCESIZE(t4_read_reg(sc, A_TP_PARA_REG2));
4381 		SYSCTL_ADD_INT(ctx, children, OID_AUTO, "ddp_thres", CTLFLAG_RW,
4382 		    &sc->tt.ddp_thres, 0, "DDP threshold");
4383 	}
4384 #endif
4385 
4386 
4387 	return (0);
4388 }
4389 
4390 static int
4391 cxgbe_sysctls(struct port_info *pi)
4392 {
4393 	struct sysctl_ctx_list *ctx;
4394 	struct sysctl_oid *oid;
4395 	struct sysctl_oid_list *children;
4396 
4397 	ctx = device_get_sysctl_ctx(pi->dev);
4398 
4399 	/*
4400 	 * dev.cxgbe.X.
4401 	 */
4402 	oid = device_get_sysctl_tree(pi->dev);
4403 	children = SYSCTL_CHILDREN(oid);
4404 
4405 	SYSCTL_ADD_INT(ctx, children, OID_AUTO, "nrxq", CTLFLAG_RD,
4406 	    &pi->nrxq, 0, "# of rx queues");
4407 	SYSCTL_ADD_INT(ctx, children, OID_AUTO, "ntxq", CTLFLAG_RD,
4408 	    &pi->ntxq, 0, "# of tx queues");
4409 	SYSCTL_ADD_INT(ctx, children, OID_AUTO, "first_rxq", CTLFLAG_RD,
4410 	    &pi->first_rxq, 0, "index of first rx queue");
4411 	SYSCTL_ADD_INT(ctx, children, OID_AUTO, "first_txq", CTLFLAG_RD,
4412 	    &pi->first_txq, 0, "index of first tx queue");
4413 
4414 #ifdef TCP_OFFLOAD
4415 	if (is_offload(pi->adapter)) {
4416 		SYSCTL_ADD_INT(ctx, children, OID_AUTO, "nofldrxq", CTLFLAG_RD,
4417 		    &pi->nofldrxq, 0,
4418 		    "# of rx queues for offloaded TCP connections");
4419 		SYSCTL_ADD_INT(ctx, children, OID_AUTO, "nofldtxq", CTLFLAG_RD,
4420 		    &pi->nofldtxq, 0,
4421 		    "# of tx queues for offloaded TCP connections");
4422 		SYSCTL_ADD_INT(ctx, children, OID_AUTO, "first_ofld_rxq",
4423 		    CTLFLAG_RD, &pi->first_ofld_rxq, 0,
4424 		    "index of first TOE rx queue");
4425 		SYSCTL_ADD_INT(ctx, children, OID_AUTO, "first_ofld_txq",
4426 		    CTLFLAG_RD, &pi->first_ofld_txq, 0,
4427 		    "index of first TOE tx queue");
4428 	}
4429 #endif
4430 
4431 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "holdoff_tmr_idx",
4432 	    CTLTYPE_INT | CTLFLAG_RW, pi, 0, sysctl_holdoff_tmr_idx, "I",
4433 	    "holdoff timer index");
4434 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "holdoff_pktc_idx",
4435 	    CTLTYPE_INT | CTLFLAG_RW, pi, 0, sysctl_holdoff_pktc_idx, "I",
4436 	    "holdoff packet counter index");
4437 
4438 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "qsize_rxq",
4439 	    CTLTYPE_INT | CTLFLAG_RW, pi, 0, sysctl_qsize_rxq, "I",
4440 	    "rx queue size");
4441 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "qsize_txq",
4442 	    CTLTYPE_INT | CTLFLAG_RW, pi, 0, sysctl_qsize_txq, "I",
4443 	    "tx queue size");
4444 
4445 	/*
4446 	 * dev.cxgbe.X.stats.
4447 	 */
4448 	oid = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, "stats", CTLFLAG_RD,
4449 	    NULL, "port statistics");
4450 	children = SYSCTL_CHILDREN(oid);
4451 
4452 #define SYSCTL_ADD_T4_REG64(pi, name, desc, reg) \
4453 	SYSCTL_ADD_OID(ctx, children, OID_AUTO, name, \
4454 	    CTLTYPE_U64 | CTLFLAG_RD, pi->adapter, reg, \
4455 	    sysctl_handle_t4_reg64, "QU", desc)
4456 
4457 	SYSCTL_ADD_T4_REG64(pi, "tx_octets", "# of octets in good frames",
4458 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_BYTES_L));
4459 	SYSCTL_ADD_T4_REG64(pi, "tx_frames", "total # of good frames",
4460 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_FRAMES_L));
4461 	SYSCTL_ADD_T4_REG64(pi, "tx_bcast_frames", "# of broadcast frames",
4462 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_BCAST_L));
4463 	SYSCTL_ADD_T4_REG64(pi, "tx_mcast_frames", "# of multicast frames",
4464 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_MCAST_L));
4465 	SYSCTL_ADD_T4_REG64(pi, "tx_ucast_frames", "# of unicast frames",
4466 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_UCAST_L));
4467 	SYSCTL_ADD_T4_REG64(pi, "tx_error_frames", "# of error frames",
4468 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_ERROR_L));
4469 	SYSCTL_ADD_T4_REG64(pi, "tx_frames_64",
4470 	    "# of tx frames in this range",
4471 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_64B_L));
4472 	SYSCTL_ADD_T4_REG64(pi, "tx_frames_65_127",
4473 	    "# of tx frames in this range",
4474 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_65B_127B_L));
4475 	SYSCTL_ADD_T4_REG64(pi, "tx_frames_128_255",
4476 	    "# of tx frames in this range",
4477 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_128B_255B_L));
4478 	SYSCTL_ADD_T4_REG64(pi, "tx_frames_256_511",
4479 	    "# of tx frames in this range",
4480 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_256B_511B_L));
4481 	SYSCTL_ADD_T4_REG64(pi, "tx_frames_512_1023",
4482 	    "# of tx frames in this range",
4483 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_512B_1023B_L));
4484 	SYSCTL_ADD_T4_REG64(pi, "tx_frames_1024_1518",
4485 	    "# of tx frames in this range",
4486 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_1024B_1518B_L));
4487 	SYSCTL_ADD_T4_REG64(pi, "tx_frames_1519_max",
4488 	    "# of tx frames in this range",
4489 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_1519B_MAX_L));
4490 	SYSCTL_ADD_T4_REG64(pi, "tx_drop", "# of dropped tx frames",
4491 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_DROP_L));
4492 	SYSCTL_ADD_T4_REG64(pi, "tx_pause", "# of pause frames transmitted",
4493 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_PAUSE_L));
4494 	SYSCTL_ADD_T4_REG64(pi, "tx_ppp0", "# of PPP prio 0 frames transmitted",
4495 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_PPP0_L));
4496 	SYSCTL_ADD_T4_REG64(pi, "tx_ppp1", "# of PPP prio 1 frames transmitted",
4497 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_PPP1_L));
4498 	SYSCTL_ADD_T4_REG64(pi, "tx_ppp2", "# of PPP prio 2 frames transmitted",
4499 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_PPP2_L));
4500 	SYSCTL_ADD_T4_REG64(pi, "tx_ppp3", "# of PPP prio 3 frames transmitted",
4501 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_PPP3_L));
4502 	SYSCTL_ADD_T4_REG64(pi, "tx_ppp4", "# of PPP prio 4 frames transmitted",
4503 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_PPP4_L));
4504 	SYSCTL_ADD_T4_REG64(pi, "tx_ppp5", "# of PPP prio 5 frames transmitted",
4505 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_PPP5_L));
4506 	SYSCTL_ADD_T4_REG64(pi, "tx_ppp6", "# of PPP prio 6 frames transmitted",
4507 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_PPP6_L));
4508 	SYSCTL_ADD_T4_REG64(pi, "tx_ppp7", "# of PPP prio 7 frames transmitted",
4509 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_PPP7_L));
4510 
4511 	SYSCTL_ADD_T4_REG64(pi, "rx_octets", "# of octets in good frames",
4512 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_BYTES_L));
4513 	SYSCTL_ADD_T4_REG64(pi, "rx_frames", "total # of good frames",
4514 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_FRAMES_L));
4515 	SYSCTL_ADD_T4_REG64(pi, "rx_bcast_frames", "# of broadcast frames",
4516 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_BCAST_L));
4517 	SYSCTL_ADD_T4_REG64(pi, "rx_mcast_frames", "# of multicast frames",
4518 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_MCAST_L));
4519 	SYSCTL_ADD_T4_REG64(pi, "rx_ucast_frames", "# of unicast frames",
4520 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_UCAST_L));
4521 	SYSCTL_ADD_T4_REG64(pi, "rx_too_long", "# of frames exceeding MTU",
4522 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_MTU_ERROR_L));
4523 	SYSCTL_ADD_T4_REG64(pi, "rx_jabber", "# of jabber frames",
4524 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_MTU_CRC_ERROR_L));
4525 	SYSCTL_ADD_T4_REG64(pi, "rx_fcs_err",
4526 	    "# of frames received with bad FCS",
4527 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_CRC_ERROR_L));
4528 	SYSCTL_ADD_T4_REG64(pi, "rx_len_err",
4529 	    "# of frames received with length error",
4530 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_LEN_ERROR_L));
4531 	SYSCTL_ADD_T4_REG64(pi, "rx_symbol_err", "symbol errors",
4532 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_SYM_ERROR_L));
4533 	SYSCTL_ADD_T4_REG64(pi, "rx_runt", "# of short frames received",
4534 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_LESS_64B_L));
4535 	SYSCTL_ADD_T4_REG64(pi, "rx_frames_64",
4536 	    "# of rx frames in this range",
4537 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_64B_L));
4538 	SYSCTL_ADD_T4_REG64(pi, "rx_frames_65_127",
4539 	    "# of rx frames in this range",
4540 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_65B_127B_L));
4541 	SYSCTL_ADD_T4_REG64(pi, "rx_frames_128_255",
4542 	    "# of rx frames in this range",
4543 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_128B_255B_L));
4544 	SYSCTL_ADD_T4_REG64(pi, "rx_frames_256_511",
4545 	    "# of rx frames in this range",
4546 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_256B_511B_L));
4547 	SYSCTL_ADD_T4_REG64(pi, "rx_frames_512_1023",
4548 	    "# of rx frames in this range",
4549 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_512B_1023B_L));
4550 	SYSCTL_ADD_T4_REG64(pi, "rx_frames_1024_1518",
4551 	    "# of rx frames in this range",
4552 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_1024B_1518B_L));
4553 	SYSCTL_ADD_T4_REG64(pi, "rx_frames_1519_max",
4554 	    "# of rx frames in this range",
4555 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_1519B_MAX_L));
4556 	SYSCTL_ADD_T4_REG64(pi, "rx_pause", "# of pause frames received",
4557 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_PAUSE_L));
4558 	SYSCTL_ADD_T4_REG64(pi, "rx_ppp0", "# of PPP prio 0 frames received",
4559 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_PPP0_L));
4560 	SYSCTL_ADD_T4_REG64(pi, "rx_ppp1", "# of PPP prio 1 frames received",
4561 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_PPP1_L));
4562 	SYSCTL_ADD_T4_REG64(pi, "rx_ppp2", "# of PPP prio 2 frames received",
4563 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_PPP2_L));
4564 	SYSCTL_ADD_T4_REG64(pi, "rx_ppp3", "# of PPP prio 3 frames received",
4565 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_PPP3_L));
4566 	SYSCTL_ADD_T4_REG64(pi, "rx_ppp4", "# of PPP prio 4 frames received",
4567 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_PPP4_L));
4568 	SYSCTL_ADD_T4_REG64(pi, "rx_ppp5", "# of PPP prio 5 frames received",
4569 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_PPP5_L));
4570 	SYSCTL_ADD_T4_REG64(pi, "rx_ppp6", "# of PPP prio 6 frames received",
4571 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_PPP6_L));
4572 	SYSCTL_ADD_T4_REG64(pi, "rx_ppp7", "# of PPP prio 7 frames received",
4573 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_PPP7_L));
4574 
4575 #undef SYSCTL_ADD_T4_REG64
4576 
4577 #define SYSCTL_ADD_T4_PORTSTAT(name, desc) \
4578 	SYSCTL_ADD_UQUAD(ctx, children, OID_AUTO, #name, CTLFLAG_RD, \
4579 	    &pi->stats.name, desc)
4580 
4581 	/* We get these from port_stats and they may be stale by upto 1s */
4582 	SYSCTL_ADD_T4_PORTSTAT(rx_ovflow0,
4583 	    "# drops due to buffer-group 0 overflows");
4584 	SYSCTL_ADD_T4_PORTSTAT(rx_ovflow1,
4585 	    "# drops due to buffer-group 1 overflows");
4586 	SYSCTL_ADD_T4_PORTSTAT(rx_ovflow2,
4587 	    "# drops due to buffer-group 2 overflows");
4588 	SYSCTL_ADD_T4_PORTSTAT(rx_ovflow3,
4589 	    "# drops due to buffer-group 3 overflows");
4590 	SYSCTL_ADD_T4_PORTSTAT(rx_trunc0,
4591 	    "# of buffer-group 0 truncated packets");
4592 	SYSCTL_ADD_T4_PORTSTAT(rx_trunc1,
4593 	    "# of buffer-group 1 truncated packets");
4594 	SYSCTL_ADD_T4_PORTSTAT(rx_trunc2,
4595 	    "# of buffer-group 2 truncated packets");
4596 	SYSCTL_ADD_T4_PORTSTAT(rx_trunc3,
4597 	    "# of buffer-group 3 truncated packets");
4598 
4599 #undef SYSCTL_ADD_T4_PORTSTAT
4600 
4601 	return (0);
4602 }
4603 
4604 static int
4605 sysctl_int_array(SYSCTL_HANDLER_ARGS)
4606 {
4607 	int rc, *i;
4608 	struct sbuf sb;
4609 
4610 	sbuf_new(&sb, NULL, 32, SBUF_AUTOEXTEND);
4611 	for (i = arg1; arg2; arg2 -= sizeof(int), i++)
4612 		sbuf_printf(&sb, "%d ", *i);
4613 	sbuf_trim(&sb);
4614 	sbuf_finish(&sb);
4615 	rc = sysctl_handle_string(oidp, sbuf_data(&sb), sbuf_len(&sb), req);
4616 	sbuf_delete(&sb);
4617 	return (rc);
4618 }
4619 
4620 static int
4621 sysctl_bitfield(SYSCTL_HANDLER_ARGS)
4622 {
4623 	int rc;
4624 	struct sbuf *sb;
4625 
4626 	rc = sysctl_wire_old_buffer(req, 0);
4627 	if (rc != 0)
4628 		return(rc);
4629 
4630 	sb = sbuf_new_for_sysctl(NULL, NULL, 128, req);
4631 	if (sb == NULL)
4632 		return (ENOMEM);
4633 
4634 	sbuf_printf(sb, "%b", (int)arg2, (char *)arg1);
4635 	rc = sbuf_finish(sb);
4636 	sbuf_delete(sb);
4637 
4638 	return (rc);
4639 }
4640 
4641 static int
4642 sysctl_holdoff_tmr_idx(SYSCTL_HANDLER_ARGS)
4643 {
4644 	struct port_info *pi = arg1;
4645 	struct adapter *sc = pi->adapter;
4646 	int idx, rc, i;
4647 	struct sge_rxq *rxq;
4648 	uint8_t v;
4649 
4650 	idx = pi->tmr_idx;
4651 
4652 	rc = sysctl_handle_int(oidp, &idx, 0, req);
4653 	if (rc != 0 || req->newptr == NULL)
4654 		return (rc);
4655 
4656 	if (idx < 0 || idx >= SGE_NTIMERS)
4657 		return (EINVAL);
4658 
4659 	rc = begin_synchronized_op(sc, pi, HOLD_LOCK | SLEEP_OK | INTR_OK,
4660 	    "t4tmr");
4661 	if (rc)
4662 		return (rc);
4663 
4664 	v = V_QINTR_TIMER_IDX(idx) | V_QINTR_CNT_EN(pi->pktc_idx != -1);
4665 	for_each_rxq(pi, i, rxq) {
4666 #ifdef atomic_store_rel_8
4667 		atomic_store_rel_8(&rxq->iq.intr_params, v);
4668 #else
4669 		rxq->iq.intr_params = v;
4670 #endif
4671 	}
4672 	pi->tmr_idx = idx;
4673 
4674 	end_synchronized_op(sc, LOCK_HELD);
4675 	return (0);
4676 }
4677 
4678 static int
4679 sysctl_holdoff_pktc_idx(SYSCTL_HANDLER_ARGS)
4680 {
4681 	struct port_info *pi = arg1;
4682 	struct adapter *sc = pi->adapter;
4683 	int idx, rc;
4684 
4685 	idx = pi->pktc_idx;
4686 
4687 	rc = sysctl_handle_int(oidp, &idx, 0, req);
4688 	if (rc != 0 || req->newptr == NULL)
4689 		return (rc);
4690 
4691 	if (idx < -1 || idx >= SGE_NCOUNTERS)
4692 		return (EINVAL);
4693 
4694 	rc = begin_synchronized_op(sc, pi, HOLD_LOCK | SLEEP_OK | INTR_OK,
4695 	    "t4pktc");
4696 	if (rc)
4697 		return (rc);
4698 
4699 	if (pi->flags & PORT_INIT_DONE)
4700 		rc = EBUSY; /* cannot be changed once the queues are created */
4701 	else
4702 		pi->pktc_idx = idx;
4703 
4704 	end_synchronized_op(sc, LOCK_HELD);
4705 	return (rc);
4706 }
4707 
4708 static int
4709 sysctl_qsize_rxq(SYSCTL_HANDLER_ARGS)
4710 {
4711 	struct port_info *pi = arg1;
4712 	struct adapter *sc = pi->adapter;
4713 	int qsize, rc;
4714 
4715 	qsize = pi->qsize_rxq;
4716 
4717 	rc = sysctl_handle_int(oidp, &qsize, 0, req);
4718 	if (rc != 0 || req->newptr == NULL)
4719 		return (rc);
4720 
4721 	if (qsize < 128 || (qsize & 7))
4722 		return (EINVAL);
4723 
4724 	rc = begin_synchronized_op(sc, pi, HOLD_LOCK | SLEEP_OK | INTR_OK,
4725 	    "t4rxqs");
4726 	if (rc)
4727 		return (rc);
4728 
4729 	if (pi->flags & PORT_INIT_DONE)
4730 		rc = EBUSY; /* cannot be changed once the queues are created */
4731 	else
4732 		pi->qsize_rxq = qsize;
4733 
4734 	end_synchronized_op(sc, LOCK_HELD);
4735 	return (rc);
4736 }
4737 
4738 static int
4739 sysctl_qsize_txq(SYSCTL_HANDLER_ARGS)
4740 {
4741 	struct port_info *pi = arg1;
4742 	struct adapter *sc = pi->adapter;
4743 	int qsize, rc;
4744 
4745 	qsize = pi->qsize_txq;
4746 
4747 	rc = sysctl_handle_int(oidp, &qsize, 0, req);
4748 	if (rc != 0 || req->newptr == NULL)
4749 		return (rc);
4750 
4751 	/* bufring size must be powerof2 */
4752 	if (qsize < 128 || !powerof2(qsize))
4753 		return (EINVAL);
4754 
4755 	rc = begin_synchronized_op(sc, pi, HOLD_LOCK | SLEEP_OK | INTR_OK,
4756 	    "t4txqs");
4757 	if (rc)
4758 		return (rc);
4759 
4760 	if (pi->flags & PORT_INIT_DONE)
4761 		rc = EBUSY; /* cannot be changed once the queues are created */
4762 	else
4763 		pi->qsize_txq = qsize;
4764 
4765 	end_synchronized_op(sc, LOCK_HELD);
4766 	return (rc);
4767 }
4768 
4769 static int
4770 sysctl_handle_t4_reg64(SYSCTL_HANDLER_ARGS)
4771 {
4772 	struct adapter *sc = arg1;
4773 	int reg = arg2;
4774 	uint64_t val;
4775 
4776 	val = t4_read_reg64(sc, reg);
4777 
4778 	return (sysctl_handle_64(oidp, &val, 0, req));
4779 }
4780 
4781 #ifdef SBUF_DRAIN
4782 static int
4783 sysctl_cctrl(SYSCTL_HANDLER_ARGS)
4784 {
4785 	struct adapter *sc = arg1;
4786 	struct sbuf *sb;
4787 	int rc, i;
4788 	uint16_t incr[NMTUS][NCCTRL_WIN];
4789 	static const char *dec_fac[] = {
4790 		"0.5", "0.5625", "0.625", "0.6875", "0.75", "0.8125", "0.875",
4791 		"0.9375"
4792 	};
4793 
4794 	rc = sysctl_wire_old_buffer(req, 0);
4795 	if (rc != 0)
4796 		return (rc);
4797 
4798 	sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req);
4799 	if (sb == NULL)
4800 		return (ENOMEM);
4801 
4802 	t4_read_cong_tbl(sc, incr);
4803 
4804 	for (i = 0; i < NCCTRL_WIN; ++i) {
4805 		sbuf_printf(sb, "%2d: %4u %4u %4u %4u %4u %4u %4u %4u\n", i,
4806 		    incr[0][i], incr[1][i], incr[2][i], incr[3][i], incr[4][i],
4807 		    incr[5][i], incr[6][i], incr[7][i]);
4808 		sbuf_printf(sb, "%8u %4u %4u %4u %4u %4u %4u %4u %5u %s\n",
4809 		    incr[8][i], incr[9][i], incr[10][i], incr[11][i],
4810 		    incr[12][i], incr[13][i], incr[14][i], incr[15][i],
4811 		    sc->params.a_wnd[i], dec_fac[sc->params.b_wnd[i]]);
4812 	}
4813 
4814 	rc = sbuf_finish(sb);
4815 	sbuf_delete(sb);
4816 
4817 	return (rc);
4818 }
4819 
4820 static const char *qname[CIM_NUM_IBQ + CIM_NUM_OBQ_T5] = {
4821 	"TP0", "TP1", "ULP", "SGE0", "SGE1", "NC-SI",	/* ibq's */
4822 	"ULP0", "ULP1", "ULP2", "ULP3", "SGE", "NC-SI",	/* obq's */
4823 	"SGE0-RX", "SGE1-RX"	/* additional obq's (T5 onwards) */
4824 };
4825 
4826 static int
4827 sysctl_cim_ibq_obq(SYSCTL_HANDLER_ARGS)
4828 {
4829 	struct adapter *sc = arg1;
4830 	struct sbuf *sb;
4831 	int rc, i, n, qid = arg2;
4832 	uint32_t *buf, *p;
4833 	char *qtype;
4834 	u_int cim_num_obq = is_t4(sc) ? CIM_NUM_OBQ : CIM_NUM_OBQ_T5;
4835 
4836 	KASSERT(qid >= 0 && qid < CIM_NUM_IBQ + cim_num_obq,
4837 	    ("%s: bad qid %d\n", __func__, qid));
4838 
4839 	if (qid < CIM_NUM_IBQ) {
4840 		/* inbound queue */
4841 		qtype = "IBQ";
4842 		n = 4 * CIM_IBQ_SIZE;
4843 		buf = malloc(n * sizeof(uint32_t), M_CXGBE, M_ZERO | M_WAITOK);
4844 		rc = t4_read_cim_ibq(sc, qid, buf, n);
4845 	} else {
4846 		/* outbound queue */
4847 		qtype = "OBQ";
4848 		qid -= CIM_NUM_IBQ;
4849 		n = 4 * cim_num_obq * CIM_OBQ_SIZE;
4850 		buf = malloc(n * sizeof(uint32_t), M_CXGBE, M_ZERO | M_WAITOK);
4851 		rc = t4_read_cim_obq(sc, qid, buf, n);
4852 	}
4853 
4854 	if (rc < 0) {
4855 		rc = -rc;
4856 		goto done;
4857 	}
4858 	n = rc * sizeof(uint32_t);	/* rc has # of words actually read */
4859 
4860 	rc = sysctl_wire_old_buffer(req, 0);
4861 	if (rc != 0)
4862 		goto done;
4863 
4864 	sb = sbuf_new_for_sysctl(NULL, NULL, PAGE_SIZE, req);
4865 	if (sb == NULL) {
4866 		rc = ENOMEM;
4867 		goto done;
4868 	}
4869 
4870 	sbuf_printf(sb, "%s%d %s", qtype , qid, qname[arg2]);
4871 	for (i = 0, p = buf; i < n; i += 16, p += 4)
4872 		sbuf_printf(sb, "\n%#06x: %08x %08x %08x %08x", i, p[0], p[1],
4873 		    p[2], p[3]);
4874 
4875 	rc = sbuf_finish(sb);
4876 	sbuf_delete(sb);
4877 done:
4878 	free(buf, M_CXGBE);
4879 	return (rc);
4880 }
4881 
4882 static int
4883 sysctl_cim_la(SYSCTL_HANDLER_ARGS)
4884 {
4885 	struct adapter *sc = arg1;
4886 	u_int cfg;
4887 	struct sbuf *sb;
4888 	uint32_t *buf, *p;
4889 	int rc;
4890 
4891 	rc = -t4_cim_read(sc, A_UP_UP_DBG_LA_CFG, 1, &cfg);
4892 	if (rc != 0)
4893 		return (rc);
4894 
4895 	rc = sysctl_wire_old_buffer(req, 0);
4896 	if (rc != 0)
4897 		return (rc);
4898 
4899 	sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req);
4900 	if (sb == NULL)
4901 		return (ENOMEM);
4902 
4903 	buf = malloc(sc->params.cim_la_size * sizeof(uint32_t), M_CXGBE,
4904 	    M_ZERO | M_WAITOK);
4905 
4906 	rc = -t4_cim_read_la(sc, buf, NULL);
4907 	if (rc != 0)
4908 		goto done;
4909 
4910 	sbuf_printf(sb, "Status   Data      PC%s",
4911 	    cfg & F_UPDBGLACAPTPCONLY ? "" :
4912 	    "     LS0Stat  LS0Addr             LS0Data");
4913 
4914 	KASSERT((sc->params.cim_la_size & 7) == 0,
4915 	    ("%s: p will walk off the end of buf", __func__));
4916 
4917 	for (p = buf; p < &buf[sc->params.cim_la_size]; p += 8) {
4918 		if (cfg & F_UPDBGLACAPTPCONLY) {
4919 			sbuf_printf(sb, "\n  %02x   %08x %08x", p[5] & 0xff,
4920 			    p[6], p[7]);
4921 			sbuf_printf(sb, "\n  %02x   %02x%06x %02x%06x",
4922 			    (p[3] >> 8) & 0xff, p[3] & 0xff, p[4] >> 8,
4923 			    p[4] & 0xff, p[5] >> 8);
4924 			sbuf_printf(sb, "\n  %02x   %x%07x %x%07x",
4925 			    (p[0] >> 4) & 0xff, p[0] & 0xf, p[1] >> 4,
4926 			    p[1] & 0xf, p[2] >> 4);
4927 		} else {
4928 			sbuf_printf(sb,
4929 			    "\n  %02x   %x%07x %x%07x %08x %08x "
4930 			    "%08x%08x%08x%08x",
4931 			    (p[0] >> 4) & 0xff, p[0] & 0xf, p[1] >> 4,
4932 			    p[1] & 0xf, p[2] >> 4, p[2] & 0xf, p[3], p[4], p[5],
4933 			    p[6], p[7]);
4934 		}
4935 	}
4936 
4937 	rc = sbuf_finish(sb);
4938 	sbuf_delete(sb);
4939 done:
4940 	free(buf, M_CXGBE);
4941 	return (rc);
4942 }
4943 
4944 static int
4945 sysctl_cim_ma_la(SYSCTL_HANDLER_ARGS)
4946 {
4947 	struct adapter *sc = arg1;
4948 	u_int i;
4949 	struct sbuf *sb;
4950 	uint32_t *buf, *p;
4951 	int rc;
4952 
4953 	rc = sysctl_wire_old_buffer(req, 0);
4954 	if (rc != 0)
4955 		return (rc);
4956 
4957 	sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req);
4958 	if (sb == NULL)
4959 		return (ENOMEM);
4960 
4961 	buf = malloc(2 * CIM_MALA_SIZE * 5 * sizeof(uint32_t), M_CXGBE,
4962 	    M_ZERO | M_WAITOK);
4963 
4964 	t4_cim_read_ma_la(sc, buf, buf + 5 * CIM_MALA_SIZE);
4965 	p = buf;
4966 
4967 	for (i = 0; i < CIM_MALA_SIZE; i++, p += 5) {
4968 		sbuf_printf(sb, "\n%02x%08x%08x%08x%08x", p[4], p[3], p[2],
4969 		    p[1], p[0]);
4970 	}
4971 
4972 	sbuf_printf(sb, "\n\nCnt ID Tag UE       Data       RDY VLD");
4973 	for (i = 0; i < CIM_MALA_SIZE; i++, p += 5) {
4974 		sbuf_printf(sb, "\n%3u %2u  %x   %u %08x%08x  %u   %u",
4975 		    (p[2] >> 10) & 0xff, (p[2] >> 7) & 7,
4976 		    (p[2] >> 3) & 0xf, (p[2] >> 2) & 1,
4977 		    (p[1] >> 2) | ((p[2] & 3) << 30),
4978 		    (p[0] >> 2) | ((p[1] & 3) << 30), (p[0] >> 1) & 1,
4979 		    p[0] & 1);
4980 	}
4981 
4982 	rc = sbuf_finish(sb);
4983 	sbuf_delete(sb);
4984 	free(buf, M_CXGBE);
4985 	return (rc);
4986 }
4987 
4988 static int
4989 sysctl_cim_pif_la(SYSCTL_HANDLER_ARGS)
4990 {
4991 	struct adapter *sc = arg1;
4992 	u_int i;
4993 	struct sbuf *sb;
4994 	uint32_t *buf, *p;
4995 	int rc;
4996 
4997 	rc = sysctl_wire_old_buffer(req, 0);
4998 	if (rc != 0)
4999 		return (rc);
5000 
5001 	sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req);
5002 	if (sb == NULL)
5003 		return (ENOMEM);
5004 
5005 	buf = malloc(2 * CIM_PIFLA_SIZE * 6 * sizeof(uint32_t), M_CXGBE,
5006 	    M_ZERO | M_WAITOK);
5007 
5008 	t4_cim_read_pif_la(sc, buf, buf + 6 * CIM_PIFLA_SIZE, NULL, NULL);
5009 	p = buf;
5010 
5011 	sbuf_printf(sb, "Cntl ID DataBE   Addr                 Data");
5012 	for (i = 0; i < CIM_MALA_SIZE; i++, p += 6) {
5013 		sbuf_printf(sb, "\n %02x  %02x  %04x  %08x %08x%08x%08x%08x",
5014 		    (p[5] >> 22) & 0xff, (p[5] >> 16) & 0x3f, p[5] & 0xffff,
5015 		    p[4], p[3], p[2], p[1], p[0]);
5016 	}
5017 
5018 	sbuf_printf(sb, "\n\nCntl ID               Data");
5019 	for (i = 0; i < CIM_MALA_SIZE; i++, p += 6) {
5020 		sbuf_printf(sb, "\n %02x  %02x %08x%08x%08x%08x",
5021 		    (p[4] >> 6) & 0xff, p[4] & 0x3f, p[3], p[2], p[1], p[0]);
5022 	}
5023 
5024 	rc = sbuf_finish(sb);
5025 	sbuf_delete(sb);
5026 	free(buf, M_CXGBE);
5027 	return (rc);
5028 }
5029 
5030 static int
5031 sysctl_cim_qcfg(SYSCTL_HANDLER_ARGS)
5032 {
5033 	struct adapter *sc = arg1;
5034 	struct sbuf *sb;
5035 	int rc, i;
5036 	uint16_t base[CIM_NUM_IBQ + CIM_NUM_OBQ_T5];
5037 	uint16_t size[CIM_NUM_IBQ + CIM_NUM_OBQ_T5];
5038 	uint16_t thres[CIM_NUM_IBQ];
5039 	uint32_t obq_wr[2 * CIM_NUM_OBQ_T5], *wr = obq_wr;
5040 	uint32_t stat[4 * (CIM_NUM_IBQ + CIM_NUM_OBQ_T5)], *p = stat;
5041 	u_int cim_num_obq, ibq_rdaddr, obq_rdaddr, nq;
5042 
5043 	if (is_t4(sc)) {
5044 		cim_num_obq = CIM_NUM_OBQ;
5045 		ibq_rdaddr = A_UP_IBQ_0_RDADDR;
5046 		obq_rdaddr = A_UP_OBQ_0_REALADDR;
5047 	} else {
5048 		cim_num_obq = CIM_NUM_OBQ_T5;
5049 		ibq_rdaddr = A_UP_IBQ_0_SHADOW_RDADDR;
5050 		obq_rdaddr = A_UP_OBQ_0_SHADOW_REALADDR;
5051 	}
5052 	nq = CIM_NUM_IBQ + cim_num_obq;
5053 
5054 	rc = -t4_cim_read(sc, ibq_rdaddr, 4 * nq, stat);
5055 	if (rc == 0)
5056 		rc = -t4_cim_read(sc, obq_rdaddr, 2 * cim_num_obq, obq_wr);
5057 	if (rc != 0)
5058 		return (rc);
5059 
5060 	t4_read_cimq_cfg(sc, base, size, thres);
5061 
5062 	rc = sysctl_wire_old_buffer(req, 0);
5063 	if (rc != 0)
5064 		return (rc);
5065 
5066 	sb = sbuf_new_for_sysctl(NULL, NULL, PAGE_SIZE, req);
5067 	if (sb == NULL)
5068 		return (ENOMEM);
5069 
5070 	sbuf_printf(sb, "Queue  Base  Size Thres RdPtr WrPtr  SOP  EOP Avail");
5071 
5072 	for (i = 0; i < CIM_NUM_IBQ; i++, p += 4)
5073 		sbuf_printf(sb, "\n%7s %5x %5u %5u %6x  %4x %4u %4u %5u",
5074 		    qname[i], base[i], size[i], thres[i], G_IBQRDADDR(p[0]),
5075 		    G_IBQWRADDR(p[1]), G_QUESOPCNT(p[3]), G_QUEEOPCNT(p[3]),
5076 		    G_QUEREMFLITS(p[2]) * 16);
5077 	for ( ; i < nq; i++, p += 4, wr += 2)
5078 		sbuf_printf(sb, "\n%7s %5x %5u %12x  %4x %4u %4u %5u", qname[i],
5079 		    base[i], size[i], G_QUERDADDR(p[0]) & 0x3fff,
5080 		    wr[0] - base[i], G_QUESOPCNT(p[3]), G_QUEEOPCNT(p[3]),
5081 		    G_QUEREMFLITS(p[2]) * 16);
5082 
5083 	rc = sbuf_finish(sb);
5084 	sbuf_delete(sb);
5085 
5086 	return (rc);
5087 }
5088 
5089 static int
5090 sysctl_cpl_stats(SYSCTL_HANDLER_ARGS)
5091 {
5092 	struct adapter *sc = arg1;
5093 	struct sbuf *sb;
5094 	int rc;
5095 	struct tp_cpl_stats stats;
5096 
5097 	rc = sysctl_wire_old_buffer(req, 0);
5098 	if (rc != 0)
5099 		return (rc);
5100 
5101 	sb = sbuf_new_for_sysctl(NULL, NULL, 256, req);
5102 	if (sb == NULL)
5103 		return (ENOMEM);
5104 
5105 	t4_tp_get_cpl_stats(sc, &stats);
5106 
5107 	sbuf_printf(sb, "                 channel 0  channel 1  channel 2  "
5108 	    "channel 3\n");
5109 	sbuf_printf(sb, "CPL requests:   %10u %10u %10u %10u\n",
5110 		   stats.req[0], stats.req[1], stats.req[2], stats.req[3]);
5111 	sbuf_printf(sb, "CPL responses:  %10u %10u %10u %10u",
5112 		   stats.rsp[0], stats.rsp[1], stats.rsp[2], stats.rsp[3]);
5113 
5114 	rc = sbuf_finish(sb);
5115 	sbuf_delete(sb);
5116 
5117 	return (rc);
5118 }
5119 
5120 static int
5121 sysctl_ddp_stats(SYSCTL_HANDLER_ARGS)
5122 {
5123 	struct adapter *sc = arg1;
5124 	struct sbuf *sb;
5125 	int rc;
5126 	struct tp_usm_stats stats;
5127 
5128 	rc = sysctl_wire_old_buffer(req, 0);
5129 	if (rc != 0)
5130 		return(rc);
5131 
5132 	sb = sbuf_new_for_sysctl(NULL, NULL, 256, req);
5133 	if (sb == NULL)
5134 		return (ENOMEM);
5135 
5136 	t4_get_usm_stats(sc, &stats);
5137 
5138 	sbuf_printf(sb, "Frames: %u\n", stats.frames);
5139 	sbuf_printf(sb, "Octets: %ju\n", stats.octets);
5140 	sbuf_printf(sb, "Drops:  %u", stats.drops);
5141 
5142 	rc = sbuf_finish(sb);
5143 	sbuf_delete(sb);
5144 
5145 	return (rc);
5146 }
5147 
5148 const char *devlog_level_strings[] = {
5149 	[FW_DEVLOG_LEVEL_EMERG]		= "EMERG",
5150 	[FW_DEVLOG_LEVEL_CRIT]		= "CRIT",
5151 	[FW_DEVLOG_LEVEL_ERR]		= "ERR",
5152 	[FW_DEVLOG_LEVEL_NOTICE]	= "NOTICE",
5153 	[FW_DEVLOG_LEVEL_INFO]		= "INFO",
5154 	[FW_DEVLOG_LEVEL_DEBUG]		= "DEBUG"
5155 };
5156 
5157 const char *devlog_facility_strings[] = {
5158 	[FW_DEVLOG_FACILITY_CORE]	= "CORE",
5159 	[FW_DEVLOG_FACILITY_SCHED]	= "SCHED",
5160 	[FW_DEVLOG_FACILITY_TIMER]	= "TIMER",
5161 	[FW_DEVLOG_FACILITY_RES]	= "RES",
5162 	[FW_DEVLOG_FACILITY_HW]		= "HW",
5163 	[FW_DEVLOG_FACILITY_FLR]	= "FLR",
5164 	[FW_DEVLOG_FACILITY_DMAQ]	= "DMAQ",
5165 	[FW_DEVLOG_FACILITY_PHY]	= "PHY",
5166 	[FW_DEVLOG_FACILITY_MAC]	= "MAC",
5167 	[FW_DEVLOG_FACILITY_PORT]	= "PORT",
5168 	[FW_DEVLOG_FACILITY_VI]		= "VI",
5169 	[FW_DEVLOG_FACILITY_FILTER]	= "FILTER",
5170 	[FW_DEVLOG_FACILITY_ACL]	= "ACL",
5171 	[FW_DEVLOG_FACILITY_TM]		= "TM",
5172 	[FW_DEVLOG_FACILITY_QFC]	= "QFC",
5173 	[FW_DEVLOG_FACILITY_DCB]	= "DCB",
5174 	[FW_DEVLOG_FACILITY_ETH]	= "ETH",
5175 	[FW_DEVLOG_FACILITY_OFLD]	= "OFLD",
5176 	[FW_DEVLOG_FACILITY_RI]		= "RI",
5177 	[FW_DEVLOG_FACILITY_ISCSI]	= "ISCSI",
5178 	[FW_DEVLOG_FACILITY_FCOE]	= "FCOE",
5179 	[FW_DEVLOG_FACILITY_FOISCSI]	= "FOISCSI",
5180 	[FW_DEVLOG_FACILITY_FOFCOE]	= "FOFCOE"
5181 };
5182 
5183 static int
5184 sysctl_devlog(SYSCTL_HANDLER_ARGS)
5185 {
5186 	struct adapter *sc = arg1;
5187 	struct devlog_params *dparams = &sc->params.devlog;
5188 	struct fw_devlog_e *buf, *e;
5189 	int i, j, rc, nentries, first = 0;
5190 	struct sbuf *sb;
5191 	uint64_t ftstamp = UINT64_MAX;
5192 
5193 	if (dparams->start == 0) {
5194 		dparams->memtype = 0;
5195 		dparams->start = 0x84000;
5196 		dparams->size = 32768;
5197 	}
5198 
5199 	nentries = dparams->size / sizeof(struct fw_devlog_e);
5200 
5201 	buf = malloc(dparams->size, M_CXGBE, M_NOWAIT);
5202 	if (buf == NULL)
5203 		return (ENOMEM);
5204 
5205 	rc = -t4_mem_read(sc, dparams->memtype, dparams->start, dparams->size,
5206 	    (void *)buf);
5207 	if (rc != 0)
5208 		goto done;
5209 
5210 	for (i = 0; i < nentries; i++) {
5211 		e = &buf[i];
5212 
5213 		if (e->timestamp == 0)
5214 			break;	/* end */
5215 
5216 		e->timestamp = be64toh(e->timestamp);
5217 		e->seqno = be32toh(e->seqno);
5218 		for (j = 0; j < 8; j++)
5219 			e->params[j] = be32toh(e->params[j]);
5220 
5221 		if (e->timestamp < ftstamp) {
5222 			ftstamp = e->timestamp;
5223 			first = i;
5224 		}
5225 	}
5226 
5227 	if (buf[first].timestamp == 0)
5228 		goto done;	/* nothing in the log */
5229 
5230 	rc = sysctl_wire_old_buffer(req, 0);
5231 	if (rc != 0)
5232 		goto done;
5233 
5234 	sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req);
5235 	if (sb == NULL) {
5236 		rc = ENOMEM;
5237 		goto done;
5238 	}
5239 	sbuf_printf(sb, "%10s  %15s  %8s  %8s  %s\n",
5240 	    "Seq#", "Tstamp", "Level", "Facility", "Message");
5241 
5242 	i = first;
5243 	do {
5244 		e = &buf[i];
5245 		if (e->timestamp == 0)
5246 			break;	/* end */
5247 
5248 		sbuf_printf(sb, "%10d  %15ju  %8s  %8s  ",
5249 		    e->seqno, e->timestamp,
5250 		    (e->level < nitems(devlog_level_strings) ?
5251 			devlog_level_strings[e->level] : "UNKNOWN"),
5252 		    (e->facility < nitems(devlog_facility_strings) ?
5253 			devlog_facility_strings[e->facility] : "UNKNOWN"));
5254 		sbuf_printf(sb, e->fmt, e->params[0], e->params[1],
5255 		    e->params[2], e->params[3], e->params[4],
5256 		    e->params[5], e->params[6], e->params[7]);
5257 
5258 		if (++i == nentries)
5259 			i = 0;
5260 	} while (i != first);
5261 
5262 	rc = sbuf_finish(sb);
5263 	sbuf_delete(sb);
5264 done:
5265 	free(buf, M_CXGBE);
5266 	return (rc);
5267 }
5268 
5269 static int
5270 sysctl_fcoe_stats(SYSCTL_HANDLER_ARGS)
5271 {
5272 	struct adapter *sc = arg1;
5273 	struct sbuf *sb;
5274 	int rc;
5275 	struct tp_fcoe_stats stats[4];
5276 
5277 	rc = sysctl_wire_old_buffer(req, 0);
5278 	if (rc != 0)
5279 		return (rc);
5280 
5281 	sb = sbuf_new_for_sysctl(NULL, NULL, 256, req);
5282 	if (sb == NULL)
5283 		return (ENOMEM);
5284 
5285 	t4_get_fcoe_stats(sc, 0, &stats[0]);
5286 	t4_get_fcoe_stats(sc, 1, &stats[1]);
5287 	t4_get_fcoe_stats(sc, 2, &stats[2]);
5288 	t4_get_fcoe_stats(sc, 3, &stats[3]);
5289 
5290 	sbuf_printf(sb, "                   channel 0        channel 1        "
5291 	    "channel 2        channel 3\n");
5292 	sbuf_printf(sb, "octetsDDP:  %16ju %16ju %16ju %16ju\n",
5293 	    stats[0].octetsDDP, stats[1].octetsDDP, stats[2].octetsDDP,
5294 	    stats[3].octetsDDP);
5295 	sbuf_printf(sb, "framesDDP:  %16u %16u %16u %16u\n", stats[0].framesDDP,
5296 	    stats[1].framesDDP, stats[2].framesDDP, stats[3].framesDDP);
5297 	sbuf_printf(sb, "framesDrop: %16u %16u %16u %16u",
5298 	    stats[0].framesDrop, stats[1].framesDrop, stats[2].framesDrop,
5299 	    stats[3].framesDrop);
5300 
5301 	rc = sbuf_finish(sb);
5302 	sbuf_delete(sb);
5303 
5304 	return (rc);
5305 }
5306 
5307 static int
5308 sysctl_hw_sched(SYSCTL_HANDLER_ARGS)
5309 {
5310 	struct adapter *sc = arg1;
5311 	struct sbuf *sb;
5312 	int rc, i;
5313 	unsigned int map, kbps, ipg, mode;
5314 	unsigned int pace_tab[NTX_SCHED];
5315 
5316 	rc = sysctl_wire_old_buffer(req, 0);
5317 	if (rc != 0)
5318 		return (rc);
5319 
5320 	sb = sbuf_new_for_sysctl(NULL, NULL, 256, req);
5321 	if (sb == NULL)
5322 		return (ENOMEM);
5323 
5324 	map = t4_read_reg(sc, A_TP_TX_MOD_QUEUE_REQ_MAP);
5325 	mode = G_TIMERMODE(t4_read_reg(sc, A_TP_MOD_CONFIG));
5326 	t4_read_pace_tbl(sc, pace_tab);
5327 
5328 	sbuf_printf(sb, "Scheduler  Mode   Channel  Rate (Kbps)   "
5329 	    "Class IPG (0.1 ns)   Flow IPG (us)");
5330 
5331 	for (i = 0; i < NTX_SCHED; ++i, map >>= 2) {
5332 		t4_get_tx_sched(sc, i, &kbps, &ipg);
5333 		sbuf_printf(sb, "\n    %u      %-5s     %u     ", i,
5334 		    (mode & (1 << i)) ? "flow" : "class", map & 3);
5335 		if (kbps)
5336 			sbuf_printf(sb, "%9u     ", kbps);
5337 		else
5338 			sbuf_printf(sb, " disabled     ");
5339 
5340 		if (ipg)
5341 			sbuf_printf(sb, "%13u        ", ipg);
5342 		else
5343 			sbuf_printf(sb, "     disabled        ");
5344 
5345 		if (pace_tab[i])
5346 			sbuf_printf(sb, "%10u", pace_tab[i]);
5347 		else
5348 			sbuf_printf(sb, "  disabled");
5349 	}
5350 
5351 	rc = sbuf_finish(sb);
5352 	sbuf_delete(sb);
5353 
5354 	return (rc);
5355 }
5356 
5357 static int
5358 sysctl_lb_stats(SYSCTL_HANDLER_ARGS)
5359 {
5360 	struct adapter *sc = arg1;
5361 	struct sbuf *sb;
5362 	int rc, i, j;
5363 	uint64_t *p0, *p1;
5364 	struct lb_port_stats s[2];
5365 	static const char *stat_name[] = {
5366 		"OctetsOK:", "FramesOK:", "BcastFrames:", "McastFrames:",
5367 		"UcastFrames:", "ErrorFrames:", "Frames64:", "Frames65To127:",
5368 		"Frames128To255:", "Frames256To511:", "Frames512To1023:",
5369 		"Frames1024To1518:", "Frames1519ToMax:", "FramesDropped:",
5370 		"BG0FramesDropped:", "BG1FramesDropped:", "BG2FramesDropped:",
5371 		"BG3FramesDropped:", "BG0FramesTrunc:", "BG1FramesTrunc:",
5372 		"BG2FramesTrunc:", "BG3FramesTrunc:"
5373 	};
5374 
5375 	rc = sysctl_wire_old_buffer(req, 0);
5376 	if (rc != 0)
5377 		return (rc);
5378 
5379 	sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req);
5380 	if (sb == NULL)
5381 		return (ENOMEM);
5382 
5383 	memset(s, 0, sizeof(s));
5384 
5385 	for (i = 0; i < 4; i += 2) {
5386 		t4_get_lb_stats(sc, i, &s[0]);
5387 		t4_get_lb_stats(sc, i + 1, &s[1]);
5388 
5389 		p0 = &s[0].octets;
5390 		p1 = &s[1].octets;
5391 		sbuf_printf(sb, "%s                       Loopback %u"
5392 		    "           Loopback %u", i == 0 ? "" : "\n", i, i + 1);
5393 
5394 		for (j = 0; j < nitems(stat_name); j++)
5395 			sbuf_printf(sb, "\n%-17s %20ju %20ju", stat_name[j],
5396 				   *p0++, *p1++);
5397 	}
5398 
5399 	rc = sbuf_finish(sb);
5400 	sbuf_delete(sb);
5401 
5402 	return (rc);
5403 }
5404 
5405 struct mem_desc {
5406 	unsigned int base;
5407 	unsigned int limit;
5408 	unsigned int idx;
5409 };
5410 
5411 static int
5412 mem_desc_cmp(const void *a, const void *b)
5413 {
5414 	return ((const struct mem_desc *)a)->base -
5415 	       ((const struct mem_desc *)b)->base;
5416 }
5417 
5418 static void
5419 mem_region_show(struct sbuf *sb, const char *name, unsigned int from,
5420     unsigned int to)
5421 {
5422 	unsigned int size;
5423 
5424 	size = to - from + 1;
5425 	if (size == 0)
5426 		return;
5427 
5428 	/* XXX: need humanize_number(3) in libkern for a more readable 'size' */
5429 	sbuf_printf(sb, "%-15s %#x-%#x [%u]\n", name, from, to, size);
5430 }
5431 
5432 static int
5433 sysctl_meminfo(SYSCTL_HANDLER_ARGS)
5434 {
5435 	struct adapter *sc = arg1;
5436 	struct sbuf *sb;
5437 	int rc, i, n;
5438 	uint32_t lo, hi, used, alloc;
5439 	static const char *memory[] = {"EDC0:", "EDC1:", "MC:", "MC0:", "MC1:"};
5440 	static const char *region[] = {
5441 		"DBQ contexts:", "IMSG contexts:", "FLM cache:", "TCBs:",
5442 		"Pstructs:", "Timers:", "Rx FL:", "Tx FL:", "Pstruct FL:",
5443 		"Tx payload:", "Rx payload:", "LE hash:", "iSCSI region:",
5444 		"TDDP region:", "TPT region:", "STAG region:", "RQ region:",
5445 		"RQUDP region:", "PBL region:", "TXPBL region:",
5446 		"DBVFIFO region:", "ULPRX state:", "ULPTX state:",
5447 		"On-chip queues:"
5448 	};
5449 	struct mem_desc avail[4];
5450 	struct mem_desc mem[nitems(region) + 3];	/* up to 3 holes */
5451 	struct mem_desc *md = mem;
5452 
5453 	rc = sysctl_wire_old_buffer(req, 0);
5454 	if (rc != 0)
5455 		return (rc);
5456 
5457 	sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req);
5458 	if (sb == NULL)
5459 		return (ENOMEM);
5460 
5461 	for (i = 0; i < nitems(mem); i++) {
5462 		mem[i].limit = 0;
5463 		mem[i].idx = i;
5464 	}
5465 
5466 	/* Find and sort the populated memory ranges */
5467 	i = 0;
5468 	lo = t4_read_reg(sc, A_MA_TARGET_MEM_ENABLE);
5469 	if (lo & F_EDRAM0_ENABLE) {
5470 		hi = t4_read_reg(sc, A_MA_EDRAM0_BAR);
5471 		avail[i].base = G_EDRAM0_BASE(hi) << 20;
5472 		avail[i].limit = avail[i].base + (G_EDRAM0_SIZE(hi) << 20);
5473 		avail[i].idx = 0;
5474 		i++;
5475 	}
5476 	if (lo & F_EDRAM1_ENABLE) {
5477 		hi = t4_read_reg(sc, A_MA_EDRAM1_BAR);
5478 		avail[i].base = G_EDRAM1_BASE(hi) << 20;
5479 		avail[i].limit = avail[i].base + (G_EDRAM1_SIZE(hi) << 20);
5480 		avail[i].idx = 1;
5481 		i++;
5482 	}
5483 	if (lo & F_EXT_MEM_ENABLE) {
5484 		hi = t4_read_reg(sc, A_MA_EXT_MEMORY_BAR);
5485 		avail[i].base = G_EXT_MEM_BASE(hi) << 20;
5486 		avail[i].limit = avail[i].base +
5487 		    (G_EXT_MEM_SIZE(hi) << 20);
5488 		avail[i].idx = is_t4(sc) ? 2 : 3;	/* Call it MC for T4 */
5489 		i++;
5490 	}
5491 	if (!is_t4(sc) && lo & F_EXT_MEM1_ENABLE) {
5492 		hi = t4_read_reg(sc, A_MA_EXT_MEMORY1_BAR);
5493 		avail[i].base = G_EXT_MEM1_BASE(hi) << 20;
5494 		avail[i].limit = avail[i].base +
5495 		    (G_EXT_MEM1_SIZE(hi) << 20);
5496 		avail[i].idx = 4;
5497 		i++;
5498 	}
5499 	if (!i)                                    /* no memory available */
5500 		return 0;
5501 	qsort(avail, i, sizeof(struct mem_desc), mem_desc_cmp);
5502 
5503 	(md++)->base = t4_read_reg(sc, A_SGE_DBQ_CTXT_BADDR);
5504 	(md++)->base = t4_read_reg(sc, A_SGE_IMSG_CTXT_BADDR);
5505 	(md++)->base = t4_read_reg(sc, A_SGE_FLM_CACHE_BADDR);
5506 	(md++)->base = t4_read_reg(sc, A_TP_CMM_TCB_BASE);
5507 	(md++)->base = t4_read_reg(sc, A_TP_CMM_MM_BASE);
5508 	(md++)->base = t4_read_reg(sc, A_TP_CMM_TIMER_BASE);
5509 	(md++)->base = t4_read_reg(sc, A_TP_CMM_MM_RX_FLST_BASE);
5510 	(md++)->base = t4_read_reg(sc, A_TP_CMM_MM_TX_FLST_BASE);
5511 	(md++)->base = t4_read_reg(sc, A_TP_CMM_MM_PS_FLST_BASE);
5512 
5513 	/* the next few have explicit upper bounds */
5514 	md->base = t4_read_reg(sc, A_TP_PMM_TX_BASE);
5515 	md->limit = md->base - 1 +
5516 		    t4_read_reg(sc, A_TP_PMM_TX_PAGE_SIZE) *
5517 		    G_PMTXMAXPAGE(t4_read_reg(sc, A_TP_PMM_TX_MAX_PAGE));
5518 	md++;
5519 
5520 	md->base = t4_read_reg(sc, A_TP_PMM_RX_BASE);
5521 	md->limit = md->base - 1 +
5522 		    t4_read_reg(sc, A_TP_PMM_RX_PAGE_SIZE) *
5523 		    G_PMRXMAXPAGE(t4_read_reg(sc, A_TP_PMM_RX_MAX_PAGE));
5524 	md++;
5525 
5526 	if (t4_read_reg(sc, A_LE_DB_CONFIG) & F_HASHEN) {
5527 		hi = t4_read_reg(sc, A_LE_DB_TID_HASHBASE) / 4;
5528 		md->base = t4_read_reg(sc, A_LE_DB_HASH_TID_BASE);
5529 		md->limit = (sc->tids.ntids - hi) * 16 + md->base - 1;
5530 	} else {
5531 		md->base = 0;
5532 		md->idx = nitems(region);  /* hide it */
5533 	}
5534 	md++;
5535 
5536 #define ulp_region(reg) \
5537 	md->base = t4_read_reg(sc, A_ULP_ ## reg ## _LLIMIT);\
5538 	(md++)->limit = t4_read_reg(sc, A_ULP_ ## reg ## _ULIMIT)
5539 
5540 	ulp_region(RX_ISCSI);
5541 	ulp_region(RX_TDDP);
5542 	ulp_region(TX_TPT);
5543 	ulp_region(RX_STAG);
5544 	ulp_region(RX_RQ);
5545 	ulp_region(RX_RQUDP);
5546 	ulp_region(RX_PBL);
5547 	ulp_region(TX_PBL);
5548 #undef ulp_region
5549 
5550 	md->base = 0;
5551 	md->idx = nitems(region);
5552 	if (!is_t4(sc) && t4_read_reg(sc, A_SGE_CONTROL2) & F_VFIFO_ENABLE) {
5553 		md->base = G_BASEADDR(t4_read_reg(sc, A_SGE_DBVFIFO_BADDR));
5554 		md->limit = md->base + (G_DBVFIFO_SIZE((t4_read_reg(sc,
5555 		    A_SGE_DBVFIFO_SIZE))) << 2) - 1;
5556 	}
5557 	md++;
5558 
5559 	md->base = t4_read_reg(sc, A_ULP_RX_CTX_BASE);
5560 	md->limit = md->base + sc->tids.ntids - 1;
5561 	md++;
5562 	md->base = t4_read_reg(sc, A_ULP_TX_ERR_TABLE_BASE);
5563 	md->limit = md->base + sc->tids.ntids - 1;
5564 	md++;
5565 
5566 	md->base = sc->vres.ocq.start;
5567 	if (sc->vres.ocq.size)
5568 		md->limit = md->base + sc->vres.ocq.size - 1;
5569 	else
5570 		md->idx = nitems(region);  /* hide it */
5571 	md++;
5572 
5573 	/* add any address-space holes, there can be up to 3 */
5574 	for (n = 0; n < i - 1; n++)
5575 		if (avail[n].limit < avail[n + 1].base)
5576 			(md++)->base = avail[n].limit;
5577 	if (avail[n].limit)
5578 		(md++)->base = avail[n].limit;
5579 
5580 	n = md - mem;
5581 	qsort(mem, n, sizeof(struct mem_desc), mem_desc_cmp);
5582 
5583 	for (lo = 0; lo < i; lo++)
5584 		mem_region_show(sb, memory[avail[lo].idx], avail[lo].base,
5585 				avail[lo].limit - 1);
5586 
5587 	sbuf_printf(sb, "\n");
5588 	for (i = 0; i < n; i++) {
5589 		if (mem[i].idx >= nitems(region))
5590 			continue;                        /* skip holes */
5591 		if (!mem[i].limit)
5592 			mem[i].limit = i < n - 1 ? mem[i + 1].base - 1 : ~0;
5593 		mem_region_show(sb, region[mem[i].idx], mem[i].base,
5594 				mem[i].limit);
5595 	}
5596 
5597 	sbuf_printf(sb, "\n");
5598 	lo = t4_read_reg(sc, A_CIM_SDRAM_BASE_ADDR);
5599 	hi = t4_read_reg(sc, A_CIM_SDRAM_ADDR_SIZE) + lo - 1;
5600 	mem_region_show(sb, "uP RAM:", lo, hi);
5601 
5602 	lo = t4_read_reg(sc, A_CIM_EXTMEM2_BASE_ADDR);
5603 	hi = t4_read_reg(sc, A_CIM_EXTMEM2_ADDR_SIZE) + lo - 1;
5604 	mem_region_show(sb, "uP Extmem2:", lo, hi);
5605 
5606 	lo = t4_read_reg(sc, A_TP_PMM_RX_MAX_PAGE);
5607 	sbuf_printf(sb, "\n%u Rx pages of size %uKiB for %u channels\n",
5608 		   G_PMRXMAXPAGE(lo),
5609 		   t4_read_reg(sc, A_TP_PMM_RX_PAGE_SIZE) >> 10,
5610 		   (lo & F_PMRXNUMCHN) ? 2 : 1);
5611 
5612 	lo = t4_read_reg(sc, A_TP_PMM_TX_MAX_PAGE);
5613 	hi = t4_read_reg(sc, A_TP_PMM_TX_PAGE_SIZE);
5614 	sbuf_printf(sb, "%u Tx pages of size %u%ciB for %u channels\n",
5615 		   G_PMTXMAXPAGE(lo),
5616 		   hi >= (1 << 20) ? (hi >> 20) : (hi >> 10),
5617 		   hi >= (1 << 20) ? 'M' : 'K', 1 << G_PMTXNUMCHN(lo));
5618 	sbuf_printf(sb, "%u p-structs\n",
5619 		   t4_read_reg(sc, A_TP_CMM_MM_MAX_PSTRUCT));
5620 
5621 	for (i = 0; i < 4; i++) {
5622 		lo = t4_read_reg(sc, A_MPS_RX_PG_RSV0 + i * 4);
5623 		if (is_t4(sc)) {
5624 			used = G_USED(lo);
5625 			alloc = G_ALLOC(lo);
5626 		} else {
5627 			used = G_T5_USED(lo);
5628 			alloc = G_T5_ALLOC(lo);
5629 		}
5630 		sbuf_printf(sb, "\nPort %d using %u pages out of %u allocated",
5631 			   i, used, alloc);
5632 	}
5633 	for (i = 0; i < 4; i++) {
5634 		lo = t4_read_reg(sc, A_MPS_RX_PG_RSV4 + i * 4);
5635 		if (is_t4(sc)) {
5636 			used = G_USED(lo);
5637 			alloc = G_ALLOC(lo);
5638 		} else {
5639 			used = G_T5_USED(lo);
5640 			alloc = G_T5_ALLOC(lo);
5641 		}
5642 		sbuf_printf(sb,
5643 			   "\nLoopback %d using %u pages out of %u allocated",
5644 			   i, used, alloc);
5645 	}
5646 
5647 	rc = sbuf_finish(sb);
5648 	sbuf_delete(sb);
5649 
5650 	return (rc);
5651 }
5652 
5653 static inline void
5654 tcamxy2valmask(uint64_t x, uint64_t y, uint8_t *addr, uint64_t *mask)
5655 {
5656 	*mask = x | y;
5657 	y = htobe64(y);
5658 	memcpy(addr, (char *)&y + 2, ETHER_ADDR_LEN);
5659 }
5660 
5661 static int
5662 sysctl_mps_tcam(SYSCTL_HANDLER_ARGS)
5663 {
5664 	struct adapter *sc = arg1;
5665 	struct sbuf *sb;
5666 	int rc, i, n;
5667 
5668 	rc = sysctl_wire_old_buffer(req, 0);
5669 	if (rc != 0)
5670 		return (rc);
5671 
5672 	sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req);
5673 	if (sb == NULL)
5674 		return (ENOMEM);
5675 
5676 	sbuf_printf(sb,
5677 	    "Idx  Ethernet address     Mask     Vld Ports PF"
5678 	    "  VF              Replication             P0 P1 P2 P3  ML");
5679 	n = is_t4(sc) ? NUM_MPS_CLS_SRAM_L_INSTANCES :
5680 	    NUM_MPS_T5_CLS_SRAM_L_INSTANCES;
5681 	for (i = 0; i < n; i++) {
5682 		uint64_t tcamx, tcamy, mask;
5683 		uint32_t cls_lo, cls_hi;
5684 		uint8_t addr[ETHER_ADDR_LEN];
5685 
5686 		tcamy = t4_read_reg64(sc, MPS_CLS_TCAM_Y_L(i));
5687 		tcamx = t4_read_reg64(sc, MPS_CLS_TCAM_X_L(i));
5688 		cls_lo = t4_read_reg(sc, MPS_CLS_SRAM_L(i));
5689 		cls_hi = t4_read_reg(sc, MPS_CLS_SRAM_H(i));
5690 
5691 		if (tcamx & tcamy)
5692 			continue;
5693 
5694 		tcamxy2valmask(tcamx, tcamy, addr, &mask);
5695 		sbuf_printf(sb, "\n%3u %02x:%02x:%02x:%02x:%02x:%02x %012jx"
5696 			   "  %c   %#x%4u%4d", i, addr[0], addr[1], addr[2],
5697 			   addr[3], addr[4], addr[5], (uintmax_t)mask,
5698 			   (cls_lo & F_SRAM_VLD) ? 'Y' : 'N',
5699 			   G_PORTMAP(cls_hi), G_PF(cls_lo),
5700 			   (cls_lo & F_VF_VALID) ? G_VF(cls_lo) : -1);
5701 
5702 		if (cls_lo & F_REPLICATE) {
5703 			struct fw_ldst_cmd ldst_cmd;
5704 
5705 			memset(&ldst_cmd, 0, sizeof(ldst_cmd));
5706 			ldst_cmd.op_to_addrspace =
5707 			    htobe32(V_FW_CMD_OP(FW_LDST_CMD) |
5708 				F_FW_CMD_REQUEST | F_FW_CMD_READ |
5709 				V_FW_LDST_CMD_ADDRSPACE(FW_LDST_ADDRSPC_MPS));
5710 			ldst_cmd.cycles_to_len16 = htobe32(FW_LEN16(ldst_cmd));
5711 			ldst_cmd.u.mps.fid_ctl =
5712 			    htobe16(V_FW_LDST_CMD_FID(FW_LDST_MPS_RPLC) |
5713 				V_FW_LDST_CMD_CTL(i));
5714 
5715 			rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK,
5716 			    "t4mps");
5717 			if (rc)
5718 				break;
5719 			rc = -t4_wr_mbox(sc, sc->mbox, &ldst_cmd,
5720 			    sizeof(ldst_cmd), &ldst_cmd);
5721 			end_synchronized_op(sc, 0);
5722 
5723 			if (rc != 0) {
5724 				sbuf_printf(sb,
5725 				    " ------------ error %3u ------------", rc);
5726 				rc = 0;
5727 			} else {
5728 				sbuf_printf(sb, " %08x %08x %08x %08x",
5729 				    be32toh(ldst_cmd.u.mps.rplc127_96),
5730 				    be32toh(ldst_cmd.u.mps.rplc95_64),
5731 				    be32toh(ldst_cmd.u.mps.rplc63_32),
5732 				    be32toh(ldst_cmd.u.mps.rplc31_0));
5733 			}
5734 		} else
5735 			sbuf_printf(sb, "%36s", "");
5736 
5737 		sbuf_printf(sb, "%4u%3u%3u%3u %#3x", G_SRAM_PRIO0(cls_lo),
5738 		    G_SRAM_PRIO1(cls_lo), G_SRAM_PRIO2(cls_lo),
5739 		    G_SRAM_PRIO3(cls_lo), (cls_lo >> S_MULTILISTEN0) & 0xf);
5740 	}
5741 
5742 	if (rc)
5743 		(void) sbuf_finish(sb);
5744 	else
5745 		rc = sbuf_finish(sb);
5746 	sbuf_delete(sb);
5747 
5748 	return (rc);
5749 }
5750 
5751 static int
5752 sysctl_path_mtus(SYSCTL_HANDLER_ARGS)
5753 {
5754 	struct adapter *sc = arg1;
5755 	struct sbuf *sb;
5756 	int rc;
5757 	uint16_t mtus[NMTUS];
5758 
5759 	rc = sysctl_wire_old_buffer(req, 0);
5760 	if (rc != 0)
5761 		return (rc);
5762 
5763 	sb = sbuf_new_for_sysctl(NULL, NULL, 256, req);
5764 	if (sb == NULL)
5765 		return (ENOMEM);
5766 
5767 	t4_read_mtu_tbl(sc, mtus, NULL);
5768 
5769 	sbuf_printf(sb, "%u %u %u %u %u %u %u %u %u %u %u %u %u %u %u %u",
5770 	    mtus[0], mtus[1], mtus[2], mtus[3], mtus[4], mtus[5], mtus[6],
5771 	    mtus[7], mtus[8], mtus[9], mtus[10], mtus[11], mtus[12], mtus[13],
5772 	    mtus[14], mtus[15]);
5773 
5774 	rc = sbuf_finish(sb);
5775 	sbuf_delete(sb);
5776 
5777 	return (rc);
5778 }
5779 
5780 static int
5781 sysctl_pm_stats(SYSCTL_HANDLER_ARGS)
5782 {
5783 	struct adapter *sc = arg1;
5784 	struct sbuf *sb;
5785 	int rc, i;
5786 	uint32_t tx_cnt[PM_NSTATS], rx_cnt[PM_NSTATS];
5787 	uint64_t tx_cyc[PM_NSTATS], rx_cyc[PM_NSTATS];
5788 	static const char *pm_stats[] = {
5789 		"Read:", "Write bypass:", "Write mem:", "Flush:", "FIFO wait:"
5790 	};
5791 
5792 	rc = sysctl_wire_old_buffer(req, 0);
5793 	if (rc != 0)
5794 		return (rc);
5795 
5796 	sb = sbuf_new_for_sysctl(NULL, NULL, 256, req);
5797 	if (sb == NULL)
5798 		return (ENOMEM);
5799 
5800 	t4_pmtx_get_stats(sc, tx_cnt, tx_cyc);
5801 	t4_pmrx_get_stats(sc, rx_cnt, rx_cyc);
5802 
5803 	sbuf_printf(sb, "                Tx count            Tx cycles    "
5804 	    "Rx count            Rx cycles");
5805 	for (i = 0; i < PM_NSTATS; i++)
5806 		sbuf_printf(sb, "\n%-13s %10u %20ju  %10u %20ju",
5807 		    pm_stats[i], tx_cnt[i], tx_cyc[i], rx_cnt[i], rx_cyc[i]);
5808 
5809 	rc = sbuf_finish(sb);
5810 	sbuf_delete(sb);
5811 
5812 	return (rc);
5813 }
5814 
5815 static int
5816 sysctl_rdma_stats(SYSCTL_HANDLER_ARGS)
5817 {
5818 	struct adapter *sc = arg1;
5819 	struct sbuf *sb;
5820 	int rc;
5821 	struct tp_rdma_stats stats;
5822 
5823 	rc = sysctl_wire_old_buffer(req, 0);
5824 	if (rc != 0)
5825 		return (rc);
5826 
5827 	sb = sbuf_new_for_sysctl(NULL, NULL, 256, req);
5828 	if (sb == NULL)
5829 		return (ENOMEM);
5830 
5831 	t4_tp_get_rdma_stats(sc, &stats);
5832 	sbuf_printf(sb, "NoRQEModDefferals: %u\n", stats.rqe_dfr_mod);
5833 	sbuf_printf(sb, "NoRQEPktDefferals: %u", stats.rqe_dfr_pkt);
5834 
5835 	rc = sbuf_finish(sb);
5836 	sbuf_delete(sb);
5837 
5838 	return (rc);
5839 }
5840 
5841 static int
5842 sysctl_tcp_stats(SYSCTL_HANDLER_ARGS)
5843 {
5844 	struct adapter *sc = arg1;
5845 	struct sbuf *sb;
5846 	int rc;
5847 	struct tp_tcp_stats v4, v6;
5848 
5849 	rc = sysctl_wire_old_buffer(req, 0);
5850 	if (rc != 0)
5851 		return (rc);
5852 
5853 	sb = sbuf_new_for_sysctl(NULL, NULL, 256, req);
5854 	if (sb == NULL)
5855 		return (ENOMEM);
5856 
5857 	t4_tp_get_tcp_stats(sc, &v4, &v6);
5858 	sbuf_printf(sb,
5859 	    "                                IP                 IPv6\n");
5860 	sbuf_printf(sb, "OutRsts:      %20u %20u\n",
5861 	    v4.tcpOutRsts, v6.tcpOutRsts);
5862 	sbuf_printf(sb, "InSegs:       %20ju %20ju\n",
5863 	    v4.tcpInSegs, v6.tcpInSegs);
5864 	sbuf_printf(sb, "OutSegs:      %20ju %20ju\n",
5865 	    v4.tcpOutSegs, v6.tcpOutSegs);
5866 	sbuf_printf(sb, "RetransSegs:  %20ju %20ju",
5867 	    v4.tcpRetransSegs, v6.tcpRetransSegs);
5868 
5869 	rc = sbuf_finish(sb);
5870 	sbuf_delete(sb);
5871 
5872 	return (rc);
5873 }
5874 
5875 static int
5876 sysctl_tids(SYSCTL_HANDLER_ARGS)
5877 {
5878 	struct adapter *sc = arg1;
5879 	struct sbuf *sb;
5880 	int rc;
5881 	struct tid_info *t = &sc->tids;
5882 
5883 	rc = sysctl_wire_old_buffer(req, 0);
5884 	if (rc != 0)
5885 		return (rc);
5886 
5887 	sb = sbuf_new_for_sysctl(NULL, NULL, 256, req);
5888 	if (sb == NULL)
5889 		return (ENOMEM);
5890 
5891 	if (t->natids) {
5892 		sbuf_printf(sb, "ATID range: 0-%u, in use: %u\n", t->natids - 1,
5893 		    t->atids_in_use);
5894 	}
5895 
5896 	if (t->ntids) {
5897 		if (t4_read_reg(sc, A_LE_DB_CONFIG) & F_HASHEN) {
5898 			uint32_t b = t4_read_reg(sc, A_LE_DB_SERVER_INDEX) / 4;
5899 
5900 			if (b) {
5901 				sbuf_printf(sb, "TID range: 0-%u, %u-%u", b - 1,
5902 				    t4_read_reg(sc, A_LE_DB_TID_HASHBASE) / 4,
5903 				    t->ntids - 1);
5904 			} else {
5905 				sbuf_printf(sb, "TID range: %u-%u",
5906 				    t4_read_reg(sc, A_LE_DB_TID_HASHBASE) / 4,
5907 				    t->ntids - 1);
5908 			}
5909 		} else
5910 			sbuf_printf(sb, "TID range: 0-%u", t->ntids - 1);
5911 		sbuf_printf(sb, ", in use: %u\n",
5912 		    atomic_load_acq_int(&t->tids_in_use));
5913 	}
5914 
5915 	if (t->nstids) {
5916 		sbuf_printf(sb, "STID range: %u-%u, in use: %u\n", t->stid_base,
5917 		    t->stid_base + t->nstids - 1, t->stids_in_use);
5918 	}
5919 
5920 	if (t->nftids) {
5921 		sbuf_printf(sb, "FTID range: %u-%u\n", t->ftid_base,
5922 		    t->ftid_base + t->nftids - 1);
5923 	}
5924 
5925 	sbuf_printf(sb, "HW TID usage: %u IP users, %u IPv6 users",
5926 	    t4_read_reg(sc, A_LE_DB_ACT_CNT_IPV4),
5927 	    t4_read_reg(sc, A_LE_DB_ACT_CNT_IPV6));
5928 
5929 	rc = sbuf_finish(sb);
5930 	sbuf_delete(sb);
5931 
5932 	return (rc);
5933 }
5934 
5935 static int
5936 sysctl_tp_err_stats(SYSCTL_HANDLER_ARGS)
5937 {
5938 	struct adapter *sc = arg1;
5939 	struct sbuf *sb;
5940 	int rc;
5941 	struct tp_err_stats stats;
5942 
5943 	rc = sysctl_wire_old_buffer(req, 0);
5944 	if (rc != 0)
5945 		return (rc);
5946 
5947 	sb = sbuf_new_for_sysctl(NULL, NULL, 256, req);
5948 	if (sb == NULL)
5949 		return (ENOMEM);
5950 
5951 	t4_tp_get_err_stats(sc, &stats);
5952 
5953 	sbuf_printf(sb, "                 channel 0  channel 1  channel 2  "
5954 		      "channel 3\n");
5955 	sbuf_printf(sb, "macInErrs:      %10u %10u %10u %10u\n",
5956 	    stats.macInErrs[0], stats.macInErrs[1], stats.macInErrs[2],
5957 	    stats.macInErrs[3]);
5958 	sbuf_printf(sb, "hdrInErrs:      %10u %10u %10u %10u\n",
5959 	    stats.hdrInErrs[0], stats.hdrInErrs[1], stats.hdrInErrs[2],
5960 	    stats.hdrInErrs[3]);
5961 	sbuf_printf(sb, "tcpInErrs:      %10u %10u %10u %10u\n",
5962 	    stats.tcpInErrs[0], stats.tcpInErrs[1], stats.tcpInErrs[2],
5963 	    stats.tcpInErrs[3]);
5964 	sbuf_printf(sb, "tcp6InErrs:     %10u %10u %10u %10u\n",
5965 	    stats.tcp6InErrs[0], stats.tcp6InErrs[1], stats.tcp6InErrs[2],
5966 	    stats.tcp6InErrs[3]);
5967 	sbuf_printf(sb, "tnlCongDrops:   %10u %10u %10u %10u\n",
5968 	    stats.tnlCongDrops[0], stats.tnlCongDrops[1], stats.tnlCongDrops[2],
5969 	    stats.tnlCongDrops[3]);
5970 	sbuf_printf(sb, "tnlTxDrops:     %10u %10u %10u %10u\n",
5971 	    stats.tnlTxDrops[0], stats.tnlTxDrops[1], stats.tnlTxDrops[2],
5972 	    stats.tnlTxDrops[3]);
5973 	sbuf_printf(sb, "ofldVlanDrops:  %10u %10u %10u %10u\n",
5974 	    stats.ofldVlanDrops[0], stats.ofldVlanDrops[1],
5975 	    stats.ofldVlanDrops[2], stats.ofldVlanDrops[3]);
5976 	sbuf_printf(sb, "ofldChanDrops:  %10u %10u %10u %10u\n\n",
5977 	    stats.ofldChanDrops[0], stats.ofldChanDrops[1],
5978 	    stats.ofldChanDrops[2], stats.ofldChanDrops[3]);
5979 	sbuf_printf(sb, "ofldNoNeigh:    %u\nofldCongDefer:  %u",
5980 	    stats.ofldNoNeigh, stats.ofldCongDefer);
5981 
5982 	rc = sbuf_finish(sb);
5983 	sbuf_delete(sb);
5984 
5985 	return (rc);
5986 }
5987 
5988 struct field_desc {
5989 	const char *name;
5990 	u_int start;
5991 	u_int width;
5992 };
5993 
5994 static void
5995 field_desc_show(struct sbuf *sb, uint64_t v, const struct field_desc *f)
5996 {
5997 	char buf[32];
5998 	int line_size = 0;
5999 
6000 	while (f->name) {
6001 		uint64_t mask = (1ULL << f->width) - 1;
6002 		int len = snprintf(buf, sizeof(buf), "%s: %ju", f->name,
6003 		    ((uintmax_t)v >> f->start) & mask);
6004 
6005 		if (line_size + len >= 79) {
6006 			line_size = 8;
6007 			sbuf_printf(sb, "\n        ");
6008 		}
6009 		sbuf_printf(sb, "%s ", buf);
6010 		line_size += len + 1;
6011 		f++;
6012 	}
6013 	sbuf_printf(sb, "\n");
6014 }
6015 
6016 static struct field_desc tp_la0[] = {
6017 	{ "RcfOpCodeOut", 60, 4 },
6018 	{ "State", 56, 4 },
6019 	{ "WcfState", 52, 4 },
6020 	{ "RcfOpcSrcOut", 50, 2 },
6021 	{ "CRxError", 49, 1 },
6022 	{ "ERxError", 48, 1 },
6023 	{ "SanityFailed", 47, 1 },
6024 	{ "SpuriousMsg", 46, 1 },
6025 	{ "FlushInputMsg", 45, 1 },
6026 	{ "FlushInputCpl", 44, 1 },
6027 	{ "RssUpBit", 43, 1 },
6028 	{ "RssFilterHit", 42, 1 },
6029 	{ "Tid", 32, 10 },
6030 	{ "InitTcb", 31, 1 },
6031 	{ "LineNumber", 24, 7 },
6032 	{ "Emsg", 23, 1 },
6033 	{ "EdataOut", 22, 1 },
6034 	{ "Cmsg", 21, 1 },
6035 	{ "CdataOut", 20, 1 },
6036 	{ "EreadPdu", 19, 1 },
6037 	{ "CreadPdu", 18, 1 },
6038 	{ "TunnelPkt", 17, 1 },
6039 	{ "RcfPeerFin", 16, 1 },
6040 	{ "RcfReasonOut", 12, 4 },
6041 	{ "TxCchannel", 10, 2 },
6042 	{ "RcfTxChannel", 8, 2 },
6043 	{ "RxEchannel", 6, 2 },
6044 	{ "RcfRxChannel", 5, 1 },
6045 	{ "RcfDataOutSrdy", 4, 1 },
6046 	{ "RxDvld", 3, 1 },
6047 	{ "RxOoDvld", 2, 1 },
6048 	{ "RxCongestion", 1, 1 },
6049 	{ "TxCongestion", 0, 1 },
6050 	{ NULL }
6051 };
6052 
6053 static struct field_desc tp_la1[] = {
6054 	{ "CplCmdIn", 56, 8 },
6055 	{ "CplCmdOut", 48, 8 },
6056 	{ "ESynOut", 47, 1 },
6057 	{ "EAckOut", 46, 1 },
6058 	{ "EFinOut", 45, 1 },
6059 	{ "ERstOut", 44, 1 },
6060 	{ "SynIn", 43, 1 },
6061 	{ "AckIn", 42, 1 },
6062 	{ "FinIn", 41, 1 },
6063 	{ "RstIn", 40, 1 },
6064 	{ "DataIn", 39, 1 },
6065 	{ "DataInVld", 38, 1 },
6066 	{ "PadIn", 37, 1 },
6067 	{ "RxBufEmpty", 36, 1 },
6068 	{ "RxDdp", 35, 1 },
6069 	{ "RxFbCongestion", 34, 1 },
6070 	{ "TxFbCongestion", 33, 1 },
6071 	{ "TxPktSumSrdy", 32, 1 },
6072 	{ "RcfUlpType", 28, 4 },
6073 	{ "Eread", 27, 1 },
6074 	{ "Ebypass", 26, 1 },
6075 	{ "Esave", 25, 1 },
6076 	{ "Static0", 24, 1 },
6077 	{ "Cread", 23, 1 },
6078 	{ "Cbypass", 22, 1 },
6079 	{ "Csave", 21, 1 },
6080 	{ "CPktOut", 20, 1 },
6081 	{ "RxPagePoolFull", 18, 2 },
6082 	{ "RxLpbkPkt", 17, 1 },
6083 	{ "TxLpbkPkt", 16, 1 },
6084 	{ "RxVfValid", 15, 1 },
6085 	{ "SynLearned", 14, 1 },
6086 	{ "SetDelEntry", 13, 1 },
6087 	{ "SetInvEntry", 12, 1 },
6088 	{ "CpcmdDvld", 11, 1 },
6089 	{ "CpcmdSave", 10, 1 },
6090 	{ "RxPstructsFull", 8, 2 },
6091 	{ "EpcmdDvld", 7, 1 },
6092 	{ "EpcmdFlush", 6, 1 },
6093 	{ "EpcmdTrimPrefix", 5, 1 },
6094 	{ "EpcmdTrimPostfix", 4, 1 },
6095 	{ "ERssIp4Pkt", 3, 1 },
6096 	{ "ERssIp6Pkt", 2, 1 },
6097 	{ "ERssTcpUdpPkt", 1, 1 },
6098 	{ "ERssFceFipPkt", 0, 1 },
6099 	{ NULL }
6100 };
6101 
6102 static struct field_desc tp_la2[] = {
6103 	{ "CplCmdIn", 56, 8 },
6104 	{ "MpsVfVld", 55, 1 },
6105 	{ "MpsPf", 52, 3 },
6106 	{ "MpsVf", 44, 8 },
6107 	{ "SynIn", 43, 1 },
6108 	{ "AckIn", 42, 1 },
6109 	{ "FinIn", 41, 1 },
6110 	{ "RstIn", 40, 1 },
6111 	{ "DataIn", 39, 1 },
6112 	{ "DataInVld", 38, 1 },
6113 	{ "PadIn", 37, 1 },
6114 	{ "RxBufEmpty", 36, 1 },
6115 	{ "RxDdp", 35, 1 },
6116 	{ "RxFbCongestion", 34, 1 },
6117 	{ "TxFbCongestion", 33, 1 },
6118 	{ "TxPktSumSrdy", 32, 1 },
6119 	{ "RcfUlpType", 28, 4 },
6120 	{ "Eread", 27, 1 },
6121 	{ "Ebypass", 26, 1 },
6122 	{ "Esave", 25, 1 },
6123 	{ "Static0", 24, 1 },
6124 	{ "Cread", 23, 1 },
6125 	{ "Cbypass", 22, 1 },
6126 	{ "Csave", 21, 1 },
6127 	{ "CPktOut", 20, 1 },
6128 	{ "RxPagePoolFull", 18, 2 },
6129 	{ "RxLpbkPkt", 17, 1 },
6130 	{ "TxLpbkPkt", 16, 1 },
6131 	{ "RxVfValid", 15, 1 },
6132 	{ "SynLearned", 14, 1 },
6133 	{ "SetDelEntry", 13, 1 },
6134 	{ "SetInvEntry", 12, 1 },
6135 	{ "CpcmdDvld", 11, 1 },
6136 	{ "CpcmdSave", 10, 1 },
6137 	{ "RxPstructsFull", 8, 2 },
6138 	{ "EpcmdDvld", 7, 1 },
6139 	{ "EpcmdFlush", 6, 1 },
6140 	{ "EpcmdTrimPrefix", 5, 1 },
6141 	{ "EpcmdTrimPostfix", 4, 1 },
6142 	{ "ERssIp4Pkt", 3, 1 },
6143 	{ "ERssIp6Pkt", 2, 1 },
6144 	{ "ERssTcpUdpPkt", 1, 1 },
6145 	{ "ERssFceFipPkt", 0, 1 },
6146 	{ NULL }
6147 };
6148 
6149 static void
6150 tp_la_show(struct sbuf *sb, uint64_t *p, int idx)
6151 {
6152 
6153 	field_desc_show(sb, *p, tp_la0);
6154 }
6155 
6156 static void
6157 tp_la_show2(struct sbuf *sb, uint64_t *p, int idx)
6158 {
6159 
6160 	if (idx)
6161 		sbuf_printf(sb, "\n");
6162 	field_desc_show(sb, p[0], tp_la0);
6163 	if (idx < (TPLA_SIZE / 2 - 1) || p[1] != ~0ULL)
6164 		field_desc_show(sb, p[1], tp_la0);
6165 }
6166 
6167 static void
6168 tp_la_show3(struct sbuf *sb, uint64_t *p, int idx)
6169 {
6170 
6171 	if (idx)
6172 		sbuf_printf(sb, "\n");
6173 	field_desc_show(sb, p[0], tp_la0);
6174 	if (idx < (TPLA_SIZE / 2 - 1) || p[1] != ~0ULL)
6175 		field_desc_show(sb, p[1], (p[0] & (1 << 17)) ? tp_la2 : tp_la1);
6176 }
6177 
6178 static int
6179 sysctl_tp_la(SYSCTL_HANDLER_ARGS)
6180 {
6181 	struct adapter *sc = arg1;
6182 	struct sbuf *sb;
6183 	uint64_t *buf, *p;
6184 	int rc;
6185 	u_int i, inc;
6186 	void (*show_func)(struct sbuf *, uint64_t *, int);
6187 
6188 	rc = sysctl_wire_old_buffer(req, 0);
6189 	if (rc != 0)
6190 		return (rc);
6191 
6192 	sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req);
6193 	if (sb == NULL)
6194 		return (ENOMEM);
6195 
6196 	buf = malloc(TPLA_SIZE * sizeof(uint64_t), M_CXGBE, M_ZERO | M_WAITOK);
6197 
6198 	t4_tp_read_la(sc, buf, NULL);
6199 	p = buf;
6200 
6201 	switch (G_DBGLAMODE(t4_read_reg(sc, A_TP_DBG_LA_CONFIG))) {
6202 	case 2:
6203 		inc = 2;
6204 		show_func = tp_la_show2;
6205 		break;
6206 	case 3:
6207 		inc = 2;
6208 		show_func = tp_la_show3;
6209 		break;
6210 	default:
6211 		inc = 1;
6212 		show_func = tp_la_show;
6213 	}
6214 
6215 	for (i = 0; i < TPLA_SIZE / inc; i++, p += inc)
6216 		(*show_func)(sb, p, i);
6217 
6218 	rc = sbuf_finish(sb);
6219 	sbuf_delete(sb);
6220 	free(buf, M_CXGBE);
6221 	return (rc);
6222 }
6223 
6224 static int
6225 sysctl_tx_rate(SYSCTL_HANDLER_ARGS)
6226 {
6227 	struct adapter *sc = arg1;
6228 	struct sbuf *sb;
6229 	int rc;
6230 	u64 nrate[NCHAN], orate[NCHAN];
6231 
6232 	rc = sysctl_wire_old_buffer(req, 0);
6233 	if (rc != 0)
6234 		return (rc);
6235 
6236 	sb = sbuf_new_for_sysctl(NULL, NULL, 256, req);
6237 	if (sb == NULL)
6238 		return (ENOMEM);
6239 
6240 	t4_get_chan_txrate(sc, nrate, orate);
6241 	sbuf_printf(sb, "              channel 0   channel 1   channel 2   "
6242 		 "channel 3\n");
6243 	sbuf_printf(sb, "NIC B/s:     %10ju  %10ju  %10ju  %10ju\n",
6244 	    nrate[0], nrate[1], nrate[2], nrate[3]);
6245 	sbuf_printf(sb, "Offload B/s: %10ju  %10ju  %10ju  %10ju",
6246 	    orate[0], orate[1], orate[2], orate[3]);
6247 
6248 	rc = sbuf_finish(sb);
6249 	sbuf_delete(sb);
6250 
6251 	return (rc);
6252 }
6253 
6254 static int
6255 sysctl_ulprx_la(SYSCTL_HANDLER_ARGS)
6256 {
6257 	struct adapter *sc = arg1;
6258 	struct sbuf *sb;
6259 	uint32_t *buf, *p;
6260 	int rc, i;
6261 
6262 	rc = sysctl_wire_old_buffer(req, 0);
6263 	if (rc != 0)
6264 		return (rc);
6265 
6266 	sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req);
6267 	if (sb == NULL)
6268 		return (ENOMEM);
6269 
6270 	buf = malloc(ULPRX_LA_SIZE * 8 * sizeof(uint32_t), M_CXGBE,
6271 	    M_ZERO | M_WAITOK);
6272 
6273 	t4_ulprx_read_la(sc, buf);
6274 	p = buf;
6275 
6276 	sbuf_printf(sb, "      Pcmd        Type   Message"
6277 	    "                Data");
6278 	for (i = 0; i < ULPRX_LA_SIZE; i++, p += 8) {
6279 		sbuf_printf(sb, "\n%08x%08x  %4x  %08x  %08x%08x%08x%08x",
6280 		    p[1], p[0], p[2], p[3], p[7], p[6], p[5], p[4]);
6281 	}
6282 
6283 	rc = sbuf_finish(sb);
6284 	sbuf_delete(sb);
6285 	free(buf, M_CXGBE);
6286 	return (rc);
6287 }
6288 
6289 static int
6290 sysctl_wcwr_stats(SYSCTL_HANDLER_ARGS)
6291 {
6292 	struct adapter *sc = arg1;
6293 	struct sbuf *sb;
6294 	int rc, v;
6295 
6296 	rc = sysctl_wire_old_buffer(req, 0);
6297 	if (rc != 0)
6298 		return (rc);
6299 
6300 	sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req);
6301 	if (sb == NULL)
6302 		return (ENOMEM);
6303 
6304 	v = t4_read_reg(sc, A_SGE_STAT_CFG);
6305 	if (G_STATSOURCE_T5(v) == 7) {
6306 		if (G_STATMODE(v) == 0) {
6307 			sbuf_printf(sb, "total %d, incomplete %d",
6308 			    t4_read_reg(sc, A_SGE_STAT_TOTAL),
6309 			    t4_read_reg(sc, A_SGE_STAT_MATCH));
6310 		} else if (G_STATMODE(v) == 1) {
6311 			sbuf_printf(sb, "total %d, data overflow %d",
6312 			    t4_read_reg(sc, A_SGE_STAT_TOTAL),
6313 			    t4_read_reg(sc, A_SGE_STAT_MATCH));
6314 		}
6315 	}
6316 	rc = sbuf_finish(sb);
6317 	sbuf_delete(sb);
6318 
6319 	return (rc);
6320 }
6321 #endif
6322 
6323 static inline void
6324 txq_start(struct ifnet *ifp, struct sge_txq *txq)
6325 {
6326 	struct buf_ring *br;
6327 	struct mbuf *m;
6328 
6329 	TXQ_LOCK_ASSERT_OWNED(txq);
6330 
6331 	br = txq->br;
6332 	m = txq->m ? txq->m : drbr_dequeue(ifp, br);
6333 	if (m)
6334 		t4_eth_tx(ifp, txq, m);
6335 }
6336 
6337 void
6338 t4_tx_callout(void *arg)
6339 {
6340 	struct sge_eq *eq = arg;
6341 	struct adapter *sc;
6342 
6343 	if (EQ_TRYLOCK(eq) == 0)
6344 		goto reschedule;
6345 
6346 	if (eq->flags & EQ_STALLED && !can_resume_tx(eq)) {
6347 		EQ_UNLOCK(eq);
6348 reschedule:
6349 		if (__predict_true(!(eq->flags && EQ_DOOMED)))
6350 			callout_schedule(&eq->tx_callout, 1);
6351 		return;
6352 	}
6353 
6354 	EQ_LOCK_ASSERT_OWNED(eq);
6355 
6356 	if (__predict_true((eq->flags & EQ_DOOMED) == 0)) {
6357 
6358 		if ((eq->flags & EQ_TYPEMASK) == EQ_ETH) {
6359 			struct sge_txq *txq = arg;
6360 			struct port_info *pi = txq->ifp->if_softc;
6361 
6362 			sc = pi->adapter;
6363 		} else {
6364 			struct sge_wrq *wrq = arg;
6365 
6366 			sc = wrq->adapter;
6367 		}
6368 
6369 		taskqueue_enqueue(sc->tq[eq->tx_chan], &eq->tx_task);
6370 	}
6371 
6372 	EQ_UNLOCK(eq);
6373 }
6374 
6375 void
6376 t4_tx_task(void *arg, int count)
6377 {
6378 	struct sge_eq *eq = arg;
6379 
6380 	EQ_LOCK(eq);
6381 	if ((eq->flags & EQ_TYPEMASK) == EQ_ETH) {
6382 		struct sge_txq *txq = arg;
6383 		txq_start(txq->ifp, txq);
6384 	} else {
6385 		struct sge_wrq *wrq = arg;
6386 		t4_wrq_tx_locked(wrq->adapter, wrq, NULL);
6387 	}
6388 	EQ_UNLOCK(eq);
6389 }
6390 
6391 static uint32_t
6392 fconf_to_mode(uint32_t fconf)
6393 {
6394 	uint32_t mode;
6395 
6396 	mode = T4_FILTER_IPv4 | T4_FILTER_IPv6 | T4_FILTER_IP_SADDR |
6397 	    T4_FILTER_IP_DADDR | T4_FILTER_IP_SPORT | T4_FILTER_IP_DPORT;
6398 
6399 	if (fconf & F_FRAGMENTATION)
6400 		mode |= T4_FILTER_IP_FRAGMENT;
6401 
6402 	if (fconf & F_MPSHITTYPE)
6403 		mode |= T4_FILTER_MPS_HIT_TYPE;
6404 
6405 	if (fconf & F_MACMATCH)
6406 		mode |= T4_FILTER_MAC_IDX;
6407 
6408 	if (fconf & F_ETHERTYPE)
6409 		mode |= T4_FILTER_ETH_TYPE;
6410 
6411 	if (fconf & F_PROTOCOL)
6412 		mode |= T4_FILTER_IP_PROTO;
6413 
6414 	if (fconf & F_TOS)
6415 		mode |= T4_FILTER_IP_TOS;
6416 
6417 	if (fconf & F_VLAN)
6418 		mode |= T4_FILTER_VLAN;
6419 
6420 	if (fconf & F_VNIC_ID)
6421 		mode |= T4_FILTER_VNIC;
6422 
6423 	if (fconf & F_PORT)
6424 		mode |= T4_FILTER_PORT;
6425 
6426 	if (fconf & F_FCOE)
6427 		mode |= T4_FILTER_FCoE;
6428 
6429 	return (mode);
6430 }
6431 
6432 static uint32_t
6433 mode_to_fconf(uint32_t mode)
6434 {
6435 	uint32_t fconf = 0;
6436 
6437 	if (mode & T4_FILTER_IP_FRAGMENT)
6438 		fconf |= F_FRAGMENTATION;
6439 
6440 	if (mode & T4_FILTER_MPS_HIT_TYPE)
6441 		fconf |= F_MPSHITTYPE;
6442 
6443 	if (mode & T4_FILTER_MAC_IDX)
6444 		fconf |= F_MACMATCH;
6445 
6446 	if (mode & T4_FILTER_ETH_TYPE)
6447 		fconf |= F_ETHERTYPE;
6448 
6449 	if (mode & T4_FILTER_IP_PROTO)
6450 		fconf |= F_PROTOCOL;
6451 
6452 	if (mode & T4_FILTER_IP_TOS)
6453 		fconf |= F_TOS;
6454 
6455 	if (mode & T4_FILTER_VLAN)
6456 		fconf |= F_VLAN;
6457 
6458 	if (mode & T4_FILTER_VNIC)
6459 		fconf |= F_VNIC_ID;
6460 
6461 	if (mode & T4_FILTER_PORT)
6462 		fconf |= F_PORT;
6463 
6464 	if (mode & T4_FILTER_FCoE)
6465 		fconf |= F_FCOE;
6466 
6467 	return (fconf);
6468 }
6469 
6470 static uint32_t
6471 fspec_to_fconf(struct t4_filter_specification *fs)
6472 {
6473 	uint32_t fconf = 0;
6474 
6475 	if (fs->val.frag || fs->mask.frag)
6476 		fconf |= F_FRAGMENTATION;
6477 
6478 	if (fs->val.matchtype || fs->mask.matchtype)
6479 		fconf |= F_MPSHITTYPE;
6480 
6481 	if (fs->val.macidx || fs->mask.macidx)
6482 		fconf |= F_MACMATCH;
6483 
6484 	if (fs->val.ethtype || fs->mask.ethtype)
6485 		fconf |= F_ETHERTYPE;
6486 
6487 	if (fs->val.proto || fs->mask.proto)
6488 		fconf |= F_PROTOCOL;
6489 
6490 	if (fs->val.tos || fs->mask.tos)
6491 		fconf |= F_TOS;
6492 
6493 	if (fs->val.vlan_vld || fs->mask.vlan_vld)
6494 		fconf |= F_VLAN;
6495 
6496 	if (fs->val.vnic_vld || fs->mask.vnic_vld)
6497 		fconf |= F_VNIC_ID;
6498 
6499 	if (fs->val.iport || fs->mask.iport)
6500 		fconf |= F_PORT;
6501 
6502 	if (fs->val.fcoe || fs->mask.fcoe)
6503 		fconf |= F_FCOE;
6504 
6505 	return (fconf);
6506 }
6507 
6508 static int
6509 get_filter_mode(struct adapter *sc, uint32_t *mode)
6510 {
6511 	int rc;
6512 	uint32_t fconf;
6513 
6514 	rc = begin_synchronized_op(sc, NULL, HOLD_LOCK | SLEEP_OK | INTR_OK,
6515 	    "t4getfm");
6516 	if (rc)
6517 		return (rc);
6518 
6519 	t4_read_indirect(sc, A_TP_PIO_ADDR, A_TP_PIO_DATA, &fconf, 1,
6520 	    A_TP_VLAN_PRI_MAP);
6521 
6522 	if (sc->filter_mode != fconf) {
6523 		log(LOG_WARNING, "%s: cached filter mode out of sync %x %x.\n",
6524 		    device_get_nameunit(sc->dev), sc->filter_mode, fconf);
6525 		sc->filter_mode = fconf;
6526 	}
6527 
6528 	*mode = fconf_to_mode(sc->filter_mode);
6529 
6530 	end_synchronized_op(sc, LOCK_HELD);
6531 	return (0);
6532 }
6533 
6534 static int
6535 set_filter_mode(struct adapter *sc, uint32_t mode)
6536 {
6537 	uint32_t fconf;
6538 	int rc;
6539 
6540 	fconf = mode_to_fconf(mode);
6541 
6542 	rc = begin_synchronized_op(sc, NULL, HOLD_LOCK | SLEEP_OK | INTR_OK,
6543 	    "t4setfm");
6544 	if (rc)
6545 		return (rc);
6546 
6547 	if (sc->tids.ftids_in_use > 0) {
6548 		rc = EBUSY;
6549 		goto done;
6550 	}
6551 
6552 #ifdef TCP_OFFLOAD
6553 	if (sc->offload_map) {
6554 		rc = EBUSY;
6555 		goto done;
6556 	}
6557 #endif
6558 
6559 #ifdef notyet
6560 	rc = -t4_set_filter_mode(sc, fconf);
6561 	if (rc == 0)
6562 		sc->filter_mode = fconf;
6563 #else
6564 	rc = ENOTSUP;
6565 #endif
6566 
6567 done:
6568 	end_synchronized_op(sc, LOCK_HELD);
6569 	return (rc);
6570 }
6571 
6572 static inline uint64_t
6573 get_filter_hits(struct adapter *sc, uint32_t fid)
6574 {
6575 	uint32_t mw_base, off, tcb_base = t4_read_reg(sc, A_TP_CMM_TCB_BASE);
6576 	uint64_t hits;
6577 
6578 	memwin_info(sc, 0, &mw_base, NULL);
6579 	off = position_memwin(sc, 0,
6580 	    tcb_base + (fid + sc->tids.ftid_base) * TCB_SIZE);
6581 	if (is_t4(sc)) {
6582 		hits = t4_read_reg64(sc, mw_base + off + 16);
6583 		hits = be64toh(hits);
6584 	} else {
6585 		hits = t4_read_reg(sc, mw_base + off + 24);
6586 		hits = be32toh(hits);
6587 	}
6588 
6589 	return (hits);
6590 }
6591 
6592 static int
6593 get_filter(struct adapter *sc, struct t4_filter *t)
6594 {
6595 	int i, rc, nfilters = sc->tids.nftids;
6596 	struct filter_entry *f;
6597 
6598 	rc = begin_synchronized_op(sc, NULL, HOLD_LOCK | SLEEP_OK | INTR_OK,
6599 	    "t4getf");
6600 	if (rc)
6601 		return (rc);
6602 
6603 	if (sc->tids.ftids_in_use == 0 || sc->tids.ftid_tab == NULL ||
6604 	    t->idx >= nfilters) {
6605 		t->idx = 0xffffffff;
6606 		goto done;
6607 	}
6608 
6609 	f = &sc->tids.ftid_tab[t->idx];
6610 	for (i = t->idx; i < nfilters; i++, f++) {
6611 		if (f->valid) {
6612 			t->idx = i;
6613 			t->l2tidx = f->l2t ? f->l2t->idx : 0;
6614 			t->smtidx = f->smtidx;
6615 			if (f->fs.hitcnts)
6616 				t->hits = get_filter_hits(sc, t->idx);
6617 			else
6618 				t->hits = UINT64_MAX;
6619 			t->fs = f->fs;
6620 
6621 			goto done;
6622 		}
6623 	}
6624 
6625 	t->idx = 0xffffffff;
6626 done:
6627 	end_synchronized_op(sc, LOCK_HELD);
6628 	return (0);
6629 }
6630 
6631 static int
6632 set_filter(struct adapter *sc, struct t4_filter *t)
6633 {
6634 	unsigned int nfilters, nports;
6635 	struct filter_entry *f;
6636 	int i, rc;
6637 
6638 	rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4setf");
6639 	if (rc)
6640 		return (rc);
6641 
6642 	nfilters = sc->tids.nftids;
6643 	nports = sc->params.nports;
6644 
6645 	if (nfilters == 0) {
6646 		rc = ENOTSUP;
6647 		goto done;
6648 	}
6649 
6650 	if (!(sc->flags & FULL_INIT_DONE)) {
6651 		rc = EAGAIN;
6652 		goto done;
6653 	}
6654 
6655 	if (t->idx >= nfilters) {
6656 		rc = EINVAL;
6657 		goto done;
6658 	}
6659 
6660 	/* Validate against the global filter mode */
6661 	if ((sc->filter_mode | fspec_to_fconf(&t->fs)) != sc->filter_mode) {
6662 		rc = E2BIG;
6663 		goto done;
6664 	}
6665 
6666 	if (t->fs.action == FILTER_SWITCH && t->fs.eport >= nports) {
6667 		rc = EINVAL;
6668 		goto done;
6669 	}
6670 
6671 	if (t->fs.val.iport >= nports) {
6672 		rc = EINVAL;
6673 		goto done;
6674 	}
6675 
6676 	/* Can't specify an iq if not steering to it */
6677 	if (!t->fs.dirsteer && t->fs.iq) {
6678 		rc = EINVAL;
6679 		goto done;
6680 	}
6681 
6682 	/* IPv6 filter idx must be 4 aligned */
6683 	if (t->fs.type == 1 &&
6684 	    ((t->idx & 0x3) || t->idx + 4 >= nfilters)) {
6685 		rc = EINVAL;
6686 		goto done;
6687 	}
6688 
6689 	if (sc->tids.ftid_tab == NULL) {
6690 		KASSERT(sc->tids.ftids_in_use == 0,
6691 		    ("%s: no memory allocated but filters_in_use > 0",
6692 		    __func__));
6693 
6694 		sc->tids.ftid_tab = malloc(sizeof (struct filter_entry) *
6695 		    nfilters, M_CXGBE, M_NOWAIT | M_ZERO);
6696 		if (sc->tids.ftid_tab == NULL) {
6697 			rc = ENOMEM;
6698 			goto done;
6699 		}
6700 		mtx_init(&sc->tids.ftid_lock, "T4 filters", 0, MTX_DEF);
6701 	}
6702 
6703 	for (i = 0; i < 4; i++) {
6704 		f = &sc->tids.ftid_tab[t->idx + i];
6705 
6706 		if (f->pending || f->valid) {
6707 			rc = EBUSY;
6708 			goto done;
6709 		}
6710 		if (f->locked) {
6711 			rc = EPERM;
6712 			goto done;
6713 		}
6714 
6715 		if (t->fs.type == 0)
6716 			break;
6717 	}
6718 
6719 	f = &sc->tids.ftid_tab[t->idx];
6720 	f->fs = t->fs;
6721 
6722 	rc = set_filter_wr(sc, t->idx);
6723 done:
6724 	end_synchronized_op(sc, 0);
6725 
6726 	if (rc == 0) {
6727 		mtx_lock(&sc->tids.ftid_lock);
6728 		for (;;) {
6729 			if (f->pending == 0) {
6730 				rc = f->valid ? 0 : EIO;
6731 				break;
6732 			}
6733 
6734 			if (mtx_sleep(&sc->tids.ftid_tab, &sc->tids.ftid_lock,
6735 			    PCATCH, "t4setfw", 0)) {
6736 				rc = EINPROGRESS;
6737 				break;
6738 			}
6739 		}
6740 		mtx_unlock(&sc->tids.ftid_lock);
6741 	}
6742 	return (rc);
6743 }
6744 
6745 static int
6746 del_filter(struct adapter *sc, struct t4_filter *t)
6747 {
6748 	unsigned int nfilters;
6749 	struct filter_entry *f;
6750 	int rc;
6751 
6752 	rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4delf");
6753 	if (rc)
6754 		return (rc);
6755 
6756 	nfilters = sc->tids.nftids;
6757 
6758 	if (nfilters == 0) {
6759 		rc = ENOTSUP;
6760 		goto done;
6761 	}
6762 
6763 	if (sc->tids.ftid_tab == NULL || sc->tids.ftids_in_use == 0 ||
6764 	    t->idx >= nfilters) {
6765 		rc = EINVAL;
6766 		goto done;
6767 	}
6768 
6769 	if (!(sc->flags & FULL_INIT_DONE)) {
6770 		rc = EAGAIN;
6771 		goto done;
6772 	}
6773 
6774 	f = &sc->tids.ftid_tab[t->idx];
6775 
6776 	if (f->pending) {
6777 		rc = EBUSY;
6778 		goto done;
6779 	}
6780 	if (f->locked) {
6781 		rc = EPERM;
6782 		goto done;
6783 	}
6784 
6785 	if (f->valid) {
6786 		t->fs = f->fs;	/* extra info for the caller */
6787 		rc = del_filter_wr(sc, t->idx);
6788 	}
6789 
6790 done:
6791 	end_synchronized_op(sc, 0);
6792 
6793 	if (rc == 0) {
6794 		mtx_lock(&sc->tids.ftid_lock);
6795 		for (;;) {
6796 			if (f->pending == 0) {
6797 				rc = f->valid ? EIO : 0;
6798 				break;
6799 			}
6800 
6801 			if (mtx_sleep(&sc->tids.ftid_tab, &sc->tids.ftid_lock,
6802 			    PCATCH, "t4delfw", 0)) {
6803 				rc = EINPROGRESS;
6804 				break;
6805 			}
6806 		}
6807 		mtx_unlock(&sc->tids.ftid_lock);
6808 	}
6809 
6810 	return (rc);
6811 }
6812 
6813 static void
6814 clear_filter(struct filter_entry *f)
6815 {
6816 	if (f->l2t)
6817 		t4_l2t_release(f->l2t);
6818 
6819 	bzero(f, sizeof (*f));
6820 }
6821 
6822 static int
6823 set_filter_wr(struct adapter *sc, int fidx)
6824 {
6825 	struct filter_entry *f = &sc->tids.ftid_tab[fidx];
6826 	struct wrqe *wr;
6827 	struct fw_filter_wr *fwr;
6828 	unsigned int ftid;
6829 
6830 	ASSERT_SYNCHRONIZED_OP(sc);
6831 
6832 	if (f->fs.newdmac || f->fs.newvlan) {
6833 		/* This filter needs an L2T entry; allocate one. */
6834 		f->l2t = t4_l2t_alloc_switching(sc->l2t);
6835 		if (f->l2t == NULL)
6836 			return (EAGAIN);
6837 		if (t4_l2t_set_switching(sc, f->l2t, f->fs.vlan, f->fs.eport,
6838 		    f->fs.dmac)) {
6839 			t4_l2t_release(f->l2t);
6840 			f->l2t = NULL;
6841 			return (ENOMEM);
6842 		}
6843 	}
6844 
6845 	ftid = sc->tids.ftid_base + fidx;
6846 
6847 	wr = alloc_wrqe(sizeof(*fwr), &sc->sge.mgmtq);
6848 	if (wr == NULL)
6849 		return (ENOMEM);
6850 
6851 	fwr = wrtod(wr);
6852 	bzero(fwr, sizeof (*fwr));
6853 
6854 	fwr->op_pkd = htobe32(V_FW_WR_OP(FW_FILTER_WR));
6855 	fwr->len16_pkd = htobe32(FW_LEN16(*fwr));
6856 	fwr->tid_to_iq =
6857 	    htobe32(V_FW_FILTER_WR_TID(ftid) |
6858 		V_FW_FILTER_WR_RQTYPE(f->fs.type) |
6859 		V_FW_FILTER_WR_NOREPLY(0) |
6860 		V_FW_FILTER_WR_IQ(f->fs.iq));
6861 	fwr->del_filter_to_l2tix =
6862 	    htobe32(V_FW_FILTER_WR_RPTTID(f->fs.rpttid) |
6863 		V_FW_FILTER_WR_DROP(f->fs.action == FILTER_DROP) |
6864 		V_FW_FILTER_WR_DIRSTEER(f->fs.dirsteer) |
6865 		V_FW_FILTER_WR_MASKHASH(f->fs.maskhash) |
6866 		V_FW_FILTER_WR_DIRSTEERHASH(f->fs.dirsteerhash) |
6867 		V_FW_FILTER_WR_LPBK(f->fs.action == FILTER_SWITCH) |
6868 		V_FW_FILTER_WR_DMAC(f->fs.newdmac) |
6869 		V_FW_FILTER_WR_SMAC(f->fs.newsmac) |
6870 		V_FW_FILTER_WR_INSVLAN(f->fs.newvlan == VLAN_INSERT ||
6871 		    f->fs.newvlan == VLAN_REWRITE) |
6872 		V_FW_FILTER_WR_RMVLAN(f->fs.newvlan == VLAN_REMOVE ||
6873 		    f->fs.newvlan == VLAN_REWRITE) |
6874 		V_FW_FILTER_WR_HITCNTS(f->fs.hitcnts) |
6875 		V_FW_FILTER_WR_TXCHAN(f->fs.eport) |
6876 		V_FW_FILTER_WR_PRIO(f->fs.prio) |
6877 		V_FW_FILTER_WR_L2TIX(f->l2t ? f->l2t->idx : 0));
6878 	fwr->ethtype = htobe16(f->fs.val.ethtype);
6879 	fwr->ethtypem = htobe16(f->fs.mask.ethtype);
6880 	fwr->frag_to_ovlan_vldm =
6881 	    (V_FW_FILTER_WR_FRAG(f->fs.val.frag) |
6882 		V_FW_FILTER_WR_FRAGM(f->fs.mask.frag) |
6883 		V_FW_FILTER_WR_IVLAN_VLD(f->fs.val.vlan_vld) |
6884 		V_FW_FILTER_WR_OVLAN_VLD(f->fs.val.vnic_vld) |
6885 		V_FW_FILTER_WR_IVLAN_VLDM(f->fs.mask.vlan_vld) |
6886 		V_FW_FILTER_WR_OVLAN_VLDM(f->fs.mask.vnic_vld));
6887 	fwr->smac_sel = 0;
6888 	fwr->rx_chan_rx_rpl_iq = htobe16(V_FW_FILTER_WR_RX_CHAN(0) |
6889 	    V_FW_FILTER_WR_RX_RPL_IQ(sc->sge.fwq.abs_id));
6890 	fwr->maci_to_matchtypem =
6891 	    htobe32(V_FW_FILTER_WR_MACI(f->fs.val.macidx) |
6892 		V_FW_FILTER_WR_MACIM(f->fs.mask.macidx) |
6893 		V_FW_FILTER_WR_FCOE(f->fs.val.fcoe) |
6894 		V_FW_FILTER_WR_FCOEM(f->fs.mask.fcoe) |
6895 		V_FW_FILTER_WR_PORT(f->fs.val.iport) |
6896 		V_FW_FILTER_WR_PORTM(f->fs.mask.iport) |
6897 		V_FW_FILTER_WR_MATCHTYPE(f->fs.val.matchtype) |
6898 		V_FW_FILTER_WR_MATCHTYPEM(f->fs.mask.matchtype));
6899 	fwr->ptcl = f->fs.val.proto;
6900 	fwr->ptclm = f->fs.mask.proto;
6901 	fwr->ttyp = f->fs.val.tos;
6902 	fwr->ttypm = f->fs.mask.tos;
6903 	fwr->ivlan = htobe16(f->fs.val.vlan);
6904 	fwr->ivlanm = htobe16(f->fs.mask.vlan);
6905 	fwr->ovlan = htobe16(f->fs.val.vnic);
6906 	fwr->ovlanm = htobe16(f->fs.mask.vnic);
6907 	bcopy(f->fs.val.dip, fwr->lip, sizeof (fwr->lip));
6908 	bcopy(f->fs.mask.dip, fwr->lipm, sizeof (fwr->lipm));
6909 	bcopy(f->fs.val.sip, fwr->fip, sizeof (fwr->fip));
6910 	bcopy(f->fs.mask.sip, fwr->fipm, sizeof (fwr->fipm));
6911 	fwr->lp = htobe16(f->fs.val.dport);
6912 	fwr->lpm = htobe16(f->fs.mask.dport);
6913 	fwr->fp = htobe16(f->fs.val.sport);
6914 	fwr->fpm = htobe16(f->fs.mask.sport);
6915 	if (f->fs.newsmac)
6916 		bcopy(f->fs.smac, fwr->sma, sizeof (fwr->sma));
6917 
6918 	f->pending = 1;
6919 	sc->tids.ftids_in_use++;
6920 
6921 	t4_wrq_tx(sc, wr);
6922 	return (0);
6923 }
6924 
6925 static int
6926 del_filter_wr(struct adapter *sc, int fidx)
6927 {
6928 	struct filter_entry *f = &sc->tids.ftid_tab[fidx];
6929 	struct wrqe *wr;
6930 	struct fw_filter_wr *fwr;
6931 	unsigned int ftid;
6932 
6933 	ftid = sc->tids.ftid_base + fidx;
6934 
6935 	wr = alloc_wrqe(sizeof(*fwr), &sc->sge.mgmtq);
6936 	if (wr == NULL)
6937 		return (ENOMEM);
6938 	fwr = wrtod(wr);
6939 	bzero(fwr, sizeof (*fwr));
6940 
6941 	t4_mk_filtdelwr(ftid, fwr, sc->sge.fwq.abs_id);
6942 
6943 	f->pending = 1;
6944 	t4_wrq_tx(sc, wr);
6945 	return (0);
6946 }
6947 
6948 int
6949 t4_filter_rpl(struct sge_iq *iq, const struct rss_header *rss, struct mbuf *m)
6950 {
6951 	struct adapter *sc = iq->adapter;
6952 	const struct cpl_set_tcb_rpl *rpl = (const void *)(rss + 1);
6953 	unsigned int idx = GET_TID(rpl);
6954 
6955 	KASSERT(m == NULL, ("%s: payload with opcode %02x", __func__,
6956 	    rss->opcode));
6957 
6958 	if (idx >= sc->tids.ftid_base &&
6959 	    (idx -= sc->tids.ftid_base) < sc->tids.nftids) {
6960 		unsigned int rc = G_COOKIE(rpl->cookie);
6961 		struct filter_entry *f = &sc->tids.ftid_tab[idx];
6962 
6963 		mtx_lock(&sc->tids.ftid_lock);
6964 		if (rc == FW_FILTER_WR_FLT_ADDED) {
6965 			KASSERT(f->pending, ("%s: filter[%u] isn't pending.",
6966 			    __func__, idx));
6967 			f->smtidx = (be64toh(rpl->oldval) >> 24) & 0xff;
6968 			f->pending = 0;  /* asynchronous setup completed */
6969 			f->valid = 1;
6970 		} else {
6971 			if (rc != FW_FILTER_WR_FLT_DELETED) {
6972 				/* Add or delete failed, display an error */
6973 				log(LOG_ERR,
6974 				    "filter %u setup failed with error %u\n",
6975 				    idx, rc);
6976 			}
6977 
6978 			clear_filter(f);
6979 			sc->tids.ftids_in_use--;
6980 		}
6981 		wakeup(&sc->tids.ftid_tab);
6982 		mtx_unlock(&sc->tids.ftid_lock);
6983 	}
6984 
6985 	return (0);
6986 }
6987 
6988 static int
6989 get_sge_context(struct adapter *sc, struct t4_sge_context *cntxt)
6990 {
6991 	int rc;
6992 
6993 	if (cntxt->cid > M_CTXTQID)
6994 		return (EINVAL);
6995 
6996 	if (cntxt->mem_id != CTXT_EGRESS && cntxt->mem_id != CTXT_INGRESS &&
6997 	    cntxt->mem_id != CTXT_FLM && cntxt->mem_id != CTXT_CNM)
6998 		return (EINVAL);
6999 
7000 	rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4ctxt");
7001 	if (rc)
7002 		return (rc);
7003 
7004 	if (sc->flags & FW_OK) {
7005 		rc = -t4_sge_ctxt_rd(sc, sc->mbox, cntxt->cid, cntxt->mem_id,
7006 		    &cntxt->data[0]);
7007 		if (rc == 0)
7008 			goto done;
7009 	}
7010 
7011 	/*
7012 	 * Read via firmware failed or wasn't even attempted.  Read directly via
7013 	 * the backdoor.
7014 	 */
7015 	rc = -t4_sge_ctxt_rd_bd(sc, cntxt->cid, cntxt->mem_id, &cntxt->data[0]);
7016 done:
7017 	end_synchronized_op(sc, 0);
7018 	return (rc);
7019 }
7020 
7021 static int
7022 load_fw(struct adapter *sc, struct t4_data *fw)
7023 {
7024 	int rc;
7025 	uint8_t *fw_data;
7026 
7027 	rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4ldfw");
7028 	if (rc)
7029 		return (rc);
7030 
7031 	if (sc->flags & FULL_INIT_DONE) {
7032 		rc = EBUSY;
7033 		goto done;
7034 	}
7035 
7036 	fw_data = malloc(fw->len, M_CXGBE, M_WAITOK);
7037 	if (fw_data == NULL) {
7038 		rc = ENOMEM;
7039 		goto done;
7040 	}
7041 
7042 	rc = copyin(fw->data, fw_data, fw->len);
7043 	if (rc == 0)
7044 		rc = -t4_load_fw(sc, fw_data, fw->len);
7045 
7046 	free(fw_data, M_CXGBE);
7047 done:
7048 	end_synchronized_op(sc, 0);
7049 	return (rc);
7050 }
7051 
7052 static int
7053 read_card_mem(struct adapter *sc, int win, struct t4_mem_range *mr)
7054 {
7055 	uint32_t addr, off, remaining, i, n;
7056 	uint32_t *buf, *b;
7057 	uint32_t mw_base, mw_aperture;
7058 	int rc;
7059 	uint8_t *dst;
7060 
7061 	rc = validate_mem_range(sc, mr->addr, mr->len);
7062 	if (rc != 0)
7063 		return (rc);
7064 
7065 	memwin_info(sc, win, &mw_base, &mw_aperture);
7066 	buf = b = malloc(min(mr->len, mw_aperture), M_CXGBE, M_WAITOK);
7067 	addr = mr->addr;
7068 	remaining = mr->len;
7069 	dst = (void *)mr->data;
7070 
7071 	while (remaining) {
7072 		off = position_memwin(sc, win, addr);
7073 
7074 		/* number of bytes that we'll copy in the inner loop */
7075 		n = min(remaining, mw_aperture - off);
7076 		for (i = 0; i < n; i += 4)
7077 			*b++ = t4_read_reg(sc, mw_base + off + i);
7078 
7079 		rc = copyout(buf, dst, n);
7080 		if (rc != 0)
7081 			break;
7082 
7083 		b = buf;
7084 		dst += n;
7085 		remaining -= n;
7086 		addr += n;
7087 	}
7088 
7089 	free(buf, M_CXGBE);
7090 	return (rc);
7091 }
7092 
7093 static int
7094 read_i2c(struct adapter *sc, struct t4_i2c_data *i2cd)
7095 {
7096 	int rc;
7097 
7098 	if (i2cd->len == 0 || i2cd->port_id >= sc->params.nports)
7099 		return (EINVAL);
7100 
7101 	if (i2cd->len > 1) {
7102 		/* XXX: need fw support for longer reads in one go */
7103 		return (ENOTSUP);
7104 	}
7105 
7106 	rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4i2crd");
7107 	if (rc)
7108 		return (rc);
7109 	rc = -t4_i2c_rd(sc, sc->mbox, i2cd->port_id, i2cd->dev_addr,
7110 	    i2cd->offset, &i2cd->data[0]);
7111 	end_synchronized_op(sc, 0);
7112 
7113 	return (rc);
7114 }
7115 
7116 int
7117 t4_os_find_pci_capability(struct adapter *sc, int cap)
7118 {
7119 	int i;
7120 
7121 	return (pci_find_cap(sc->dev, cap, &i) == 0 ? i : 0);
7122 }
7123 
7124 int
7125 t4_os_pci_save_state(struct adapter *sc)
7126 {
7127 	device_t dev;
7128 	struct pci_devinfo *dinfo;
7129 
7130 	dev = sc->dev;
7131 	dinfo = device_get_ivars(dev);
7132 
7133 	pci_cfg_save(dev, dinfo, 0);
7134 	return (0);
7135 }
7136 
7137 int
7138 t4_os_pci_restore_state(struct adapter *sc)
7139 {
7140 	device_t dev;
7141 	struct pci_devinfo *dinfo;
7142 
7143 	dev = sc->dev;
7144 	dinfo = device_get_ivars(dev);
7145 
7146 	pci_cfg_restore(dev, dinfo);
7147 	return (0);
7148 }
7149 
7150 void
7151 t4_os_portmod_changed(const struct adapter *sc, int idx)
7152 {
7153 	struct port_info *pi = sc->port[idx];
7154 	static const char *mod_str[] = {
7155 		NULL, "LR", "SR", "ER", "TWINAX", "active TWINAX", "LRM"
7156 	};
7157 
7158 	if (pi->mod_type == FW_PORT_MOD_TYPE_NONE)
7159 		if_printf(pi->ifp, "transceiver unplugged.\n");
7160 	else if (pi->mod_type == FW_PORT_MOD_TYPE_UNKNOWN)
7161 		if_printf(pi->ifp, "unknown transceiver inserted.\n");
7162 	else if (pi->mod_type == FW_PORT_MOD_TYPE_NOTSUPPORTED)
7163 		if_printf(pi->ifp, "unsupported transceiver inserted.\n");
7164 	else if (pi->mod_type > 0 && pi->mod_type < nitems(mod_str)) {
7165 		if_printf(pi->ifp, "%s transceiver inserted.\n",
7166 		    mod_str[pi->mod_type]);
7167 	} else {
7168 		if_printf(pi->ifp, "transceiver (type %d) inserted.\n",
7169 		    pi->mod_type);
7170 	}
7171 }
7172 
7173 void
7174 t4_os_link_changed(struct adapter *sc, int idx, int link_stat)
7175 {
7176 	struct port_info *pi = sc->port[idx];
7177 	struct ifnet *ifp = pi->ifp;
7178 
7179 	if (link_stat) {
7180 		ifp->if_baudrate = IF_Mbps(pi->link_cfg.speed);
7181 		if_link_state_change(ifp, LINK_STATE_UP);
7182 	} else
7183 		if_link_state_change(ifp, LINK_STATE_DOWN);
7184 }
7185 
7186 void
7187 t4_iterate(void (*func)(struct adapter *, void *), void *arg)
7188 {
7189 	struct adapter *sc;
7190 
7191 	mtx_lock(&t4_list_lock);
7192 	SLIST_FOREACH(sc, &t4_list, link) {
7193 		/*
7194 		 * func should not make any assumptions about what state sc is
7195 		 * in - the only guarantee is that sc->sc_lock is a valid lock.
7196 		 */
7197 		func(sc, arg);
7198 	}
7199 	mtx_unlock(&t4_list_lock);
7200 }
7201 
7202 static int
7203 t4_open(struct cdev *dev, int flags, int type, struct thread *td)
7204 {
7205        return (0);
7206 }
7207 
7208 static int
7209 t4_close(struct cdev *dev, int flags, int type, struct thread *td)
7210 {
7211        return (0);
7212 }
7213 
7214 static int
7215 t4_ioctl(struct cdev *dev, unsigned long cmd, caddr_t data, int fflag,
7216     struct thread *td)
7217 {
7218 	int rc;
7219 	struct adapter *sc = dev->si_drv1;
7220 
7221 	rc = priv_check(td, PRIV_DRIVER);
7222 	if (rc != 0)
7223 		return (rc);
7224 
7225 	switch (cmd) {
7226 	case CHELSIO_T4_GETREG: {
7227 		struct t4_reg *edata = (struct t4_reg *)data;
7228 
7229 		if ((edata->addr & 0x3) != 0 || edata->addr >= sc->mmio_len)
7230 			return (EFAULT);
7231 
7232 		if (edata->size == 4)
7233 			edata->val = t4_read_reg(sc, edata->addr);
7234 		else if (edata->size == 8)
7235 			edata->val = t4_read_reg64(sc, edata->addr);
7236 		else
7237 			return (EINVAL);
7238 
7239 		break;
7240 	}
7241 	case CHELSIO_T4_SETREG: {
7242 		struct t4_reg *edata = (struct t4_reg *)data;
7243 
7244 		if ((edata->addr & 0x3) != 0 || edata->addr >= sc->mmio_len)
7245 			return (EFAULT);
7246 
7247 		if (edata->size == 4) {
7248 			if (edata->val & 0xffffffff00000000)
7249 				return (EINVAL);
7250 			t4_write_reg(sc, edata->addr, (uint32_t) edata->val);
7251 		} else if (edata->size == 8)
7252 			t4_write_reg64(sc, edata->addr, edata->val);
7253 		else
7254 			return (EINVAL);
7255 		break;
7256 	}
7257 	case CHELSIO_T4_REGDUMP: {
7258 		struct t4_regdump *regs = (struct t4_regdump *)data;
7259 		int reglen = is_t4(sc) ? T4_REGDUMP_SIZE : T5_REGDUMP_SIZE;
7260 		uint8_t *buf;
7261 
7262 		if (regs->len < reglen) {
7263 			regs->len = reglen; /* hint to the caller */
7264 			return (ENOBUFS);
7265 		}
7266 
7267 		regs->len = reglen;
7268 		buf = malloc(reglen, M_CXGBE, M_WAITOK | M_ZERO);
7269 		t4_get_regs(sc, regs, buf);
7270 		rc = copyout(buf, regs->data, reglen);
7271 		free(buf, M_CXGBE);
7272 		break;
7273 	}
7274 	case CHELSIO_T4_GET_FILTER_MODE:
7275 		rc = get_filter_mode(sc, (uint32_t *)data);
7276 		break;
7277 	case CHELSIO_T4_SET_FILTER_MODE:
7278 		rc = set_filter_mode(sc, *(uint32_t *)data);
7279 		break;
7280 	case CHELSIO_T4_GET_FILTER:
7281 		rc = get_filter(sc, (struct t4_filter *)data);
7282 		break;
7283 	case CHELSIO_T4_SET_FILTER:
7284 		rc = set_filter(sc, (struct t4_filter *)data);
7285 		break;
7286 	case CHELSIO_T4_DEL_FILTER:
7287 		rc = del_filter(sc, (struct t4_filter *)data);
7288 		break;
7289 	case CHELSIO_T4_GET_SGE_CONTEXT:
7290 		rc = get_sge_context(sc, (struct t4_sge_context *)data);
7291 		break;
7292 	case CHELSIO_T4_LOAD_FW:
7293 		rc = load_fw(sc, (struct t4_data *)data);
7294 		break;
7295 	case CHELSIO_T4_GET_MEM:
7296 		rc = read_card_mem(sc, 2, (struct t4_mem_range *)data);
7297 		break;
7298 	case CHELSIO_T4_GET_I2C:
7299 		rc = read_i2c(sc, (struct t4_i2c_data *)data);
7300 		break;
7301 	case CHELSIO_T4_CLEAR_STATS: {
7302 		int i;
7303 		u_int port_id = *(uint32_t *)data;
7304 		struct port_info *pi;
7305 
7306 		if (port_id >= sc->params.nports)
7307 			return (EINVAL);
7308 
7309 		/* MAC stats */
7310 		t4_clr_port_stats(sc, port_id);
7311 
7312 		pi = sc->port[port_id];
7313 		if (pi->flags & PORT_INIT_DONE) {
7314 			struct sge_rxq *rxq;
7315 			struct sge_txq *txq;
7316 			struct sge_wrq *wrq;
7317 
7318 			for_each_rxq(pi, i, rxq) {
7319 #if defined(INET) || defined(INET6)
7320 				rxq->lro.lro_queued = 0;
7321 				rxq->lro.lro_flushed = 0;
7322 #endif
7323 				rxq->rxcsum = 0;
7324 				rxq->vlan_extraction = 0;
7325 			}
7326 
7327 			for_each_txq(pi, i, txq) {
7328 				txq->txcsum = 0;
7329 				txq->tso_wrs = 0;
7330 				txq->vlan_insertion = 0;
7331 				txq->imm_wrs = 0;
7332 				txq->sgl_wrs = 0;
7333 				txq->txpkt_wrs = 0;
7334 				txq->txpkts_wrs = 0;
7335 				txq->txpkts_pkts = 0;
7336 				txq->br->br_drops = 0;
7337 				txq->no_dmamap = 0;
7338 				txq->no_desc = 0;
7339 			}
7340 
7341 #ifdef TCP_OFFLOAD
7342 			/* nothing to clear for each ofld_rxq */
7343 
7344 			for_each_ofld_txq(pi, i, wrq) {
7345 				wrq->tx_wrs = 0;
7346 				wrq->no_desc = 0;
7347 			}
7348 #endif
7349 			wrq = &sc->sge.ctrlq[pi->port_id];
7350 			wrq->tx_wrs = 0;
7351 			wrq->no_desc = 0;
7352 		}
7353 		break;
7354 	}
7355 	default:
7356 		rc = EINVAL;
7357 	}
7358 
7359 	return (rc);
7360 }
7361 
7362 #ifdef TCP_OFFLOAD
7363 static int
7364 toe_capability(struct port_info *pi, int enable)
7365 {
7366 	int rc;
7367 	struct adapter *sc = pi->adapter;
7368 
7369 	ASSERT_SYNCHRONIZED_OP(sc);
7370 
7371 	if (!is_offload(sc))
7372 		return (ENODEV);
7373 
7374 	if (enable) {
7375 		if (!(sc->flags & FULL_INIT_DONE)) {
7376 			rc = cxgbe_init_synchronized(pi);
7377 			if (rc)
7378 				return (rc);
7379 		}
7380 
7381 		if (isset(&sc->offload_map, pi->port_id))
7382 			return (0);
7383 
7384 		if (!(sc->flags & TOM_INIT_DONE)) {
7385 			rc = t4_activate_uld(sc, ULD_TOM);
7386 			if (rc == EAGAIN) {
7387 				log(LOG_WARNING,
7388 				    "You must kldload t4_tom.ko before trying "
7389 				    "to enable TOE on a cxgbe interface.\n");
7390 			}
7391 			if (rc != 0)
7392 				return (rc);
7393 			KASSERT(sc->tom_softc != NULL,
7394 			    ("%s: TOM activated but softc NULL", __func__));
7395 			KASSERT(sc->flags & TOM_INIT_DONE,
7396 			    ("%s: TOM activated but flag not set", __func__));
7397 		}
7398 
7399 		setbit(&sc->offload_map, pi->port_id);
7400 	} else {
7401 		if (!isset(&sc->offload_map, pi->port_id))
7402 			return (0);
7403 
7404 		KASSERT(sc->flags & TOM_INIT_DONE,
7405 		    ("%s: TOM never initialized?", __func__));
7406 		clrbit(&sc->offload_map, pi->port_id);
7407 	}
7408 
7409 	return (0);
7410 }
7411 
7412 /*
7413  * Add an upper layer driver to the global list.
7414  */
7415 int
7416 t4_register_uld(struct uld_info *ui)
7417 {
7418 	int rc = 0;
7419 	struct uld_info *u;
7420 
7421 	mtx_lock(&t4_uld_list_lock);
7422 	SLIST_FOREACH(u, &t4_uld_list, link) {
7423 	    if (u->uld_id == ui->uld_id) {
7424 		    rc = EEXIST;
7425 		    goto done;
7426 	    }
7427 	}
7428 
7429 	SLIST_INSERT_HEAD(&t4_uld_list, ui, link);
7430 	ui->refcount = 0;
7431 done:
7432 	mtx_unlock(&t4_uld_list_lock);
7433 	return (rc);
7434 }
7435 
7436 int
7437 t4_unregister_uld(struct uld_info *ui)
7438 {
7439 	int rc = EINVAL;
7440 	struct uld_info *u;
7441 
7442 	mtx_lock(&t4_uld_list_lock);
7443 
7444 	SLIST_FOREACH(u, &t4_uld_list, link) {
7445 	    if (u == ui) {
7446 		    if (ui->refcount > 0) {
7447 			    rc = EBUSY;
7448 			    goto done;
7449 		    }
7450 
7451 		    SLIST_REMOVE(&t4_uld_list, ui, uld_info, link);
7452 		    rc = 0;
7453 		    goto done;
7454 	    }
7455 	}
7456 done:
7457 	mtx_unlock(&t4_uld_list_lock);
7458 	return (rc);
7459 }
7460 
7461 int
7462 t4_activate_uld(struct adapter *sc, int id)
7463 {
7464 	int rc = EAGAIN;
7465 	struct uld_info *ui;
7466 
7467 	ASSERT_SYNCHRONIZED_OP(sc);
7468 
7469 	mtx_lock(&t4_uld_list_lock);
7470 
7471 	SLIST_FOREACH(ui, &t4_uld_list, link) {
7472 		if (ui->uld_id == id) {
7473 			rc = ui->activate(sc);
7474 			if (rc == 0)
7475 				ui->refcount++;
7476 			goto done;
7477 		}
7478 	}
7479 done:
7480 	mtx_unlock(&t4_uld_list_lock);
7481 
7482 	return (rc);
7483 }
7484 
7485 int
7486 t4_deactivate_uld(struct adapter *sc, int id)
7487 {
7488 	int rc = EINVAL;
7489 	struct uld_info *ui;
7490 
7491 	ASSERT_SYNCHRONIZED_OP(sc);
7492 
7493 	mtx_lock(&t4_uld_list_lock);
7494 
7495 	SLIST_FOREACH(ui, &t4_uld_list, link) {
7496 		if (ui->uld_id == id) {
7497 			rc = ui->deactivate(sc);
7498 			if (rc == 0)
7499 				ui->refcount--;
7500 			goto done;
7501 		}
7502 	}
7503 done:
7504 	mtx_unlock(&t4_uld_list_lock);
7505 
7506 	return (rc);
7507 }
7508 #endif
7509 
7510 /*
7511  * Come up with reasonable defaults for some of the tunables, provided they're
7512  * not set by the user (in which case we'll use the values as is).
7513  */
7514 static void
7515 tweak_tunables(void)
7516 {
7517 	int nc = mp_ncpus;	/* our snapshot of the number of CPUs */
7518 
7519 	if (t4_ntxq10g < 1)
7520 		t4_ntxq10g = min(nc, NTXQ_10G);
7521 
7522 	if (t4_ntxq1g < 1)
7523 		t4_ntxq1g = min(nc, NTXQ_1G);
7524 
7525 	if (t4_nrxq10g < 1)
7526 		t4_nrxq10g = min(nc, NRXQ_10G);
7527 
7528 	if (t4_nrxq1g < 1)
7529 		t4_nrxq1g = min(nc, NRXQ_1G);
7530 
7531 #ifdef TCP_OFFLOAD
7532 	if (t4_nofldtxq10g < 1)
7533 		t4_nofldtxq10g = min(nc, NOFLDTXQ_10G);
7534 
7535 	if (t4_nofldtxq1g < 1)
7536 		t4_nofldtxq1g = min(nc, NOFLDTXQ_1G);
7537 
7538 	if (t4_nofldrxq10g < 1)
7539 		t4_nofldrxq10g = min(nc, NOFLDRXQ_10G);
7540 
7541 	if (t4_nofldrxq1g < 1)
7542 		t4_nofldrxq1g = min(nc, NOFLDRXQ_1G);
7543 
7544 	if (t4_toecaps_allowed == -1)
7545 		t4_toecaps_allowed = FW_CAPS_CONFIG_TOE;
7546 #else
7547 	if (t4_toecaps_allowed == -1)
7548 		t4_toecaps_allowed = 0;
7549 #endif
7550 
7551 	if (t4_tmr_idx_10g < 0 || t4_tmr_idx_10g >= SGE_NTIMERS)
7552 		t4_tmr_idx_10g = TMR_IDX_10G;
7553 
7554 	if (t4_pktc_idx_10g < -1 || t4_pktc_idx_10g >= SGE_NCOUNTERS)
7555 		t4_pktc_idx_10g = PKTC_IDX_10G;
7556 
7557 	if (t4_tmr_idx_1g < 0 || t4_tmr_idx_1g >= SGE_NTIMERS)
7558 		t4_tmr_idx_1g = TMR_IDX_1G;
7559 
7560 	if (t4_pktc_idx_1g < -1 || t4_pktc_idx_1g >= SGE_NCOUNTERS)
7561 		t4_pktc_idx_1g = PKTC_IDX_1G;
7562 
7563 	if (t4_qsize_txq < 128)
7564 		t4_qsize_txq = 128;
7565 
7566 	if (t4_qsize_rxq < 128)
7567 		t4_qsize_rxq = 128;
7568 	while (t4_qsize_rxq & 7)
7569 		t4_qsize_rxq++;
7570 
7571 	t4_intr_types &= INTR_MSIX | INTR_MSI | INTR_INTX;
7572 }
7573 
7574 static int
7575 mod_event(module_t mod, int cmd, void *arg)
7576 {
7577 	int rc = 0;
7578 	static int loaded = 0;
7579 
7580 	switch (cmd) {
7581 	case MOD_LOAD:
7582 		if (atomic_fetchadd_int(&loaded, 1))
7583 			break;
7584 		t4_sge_modload();
7585 		mtx_init(&t4_list_lock, "T4 adapters", 0, MTX_DEF);
7586 		SLIST_INIT(&t4_list);
7587 #ifdef TCP_OFFLOAD
7588 		mtx_init(&t4_uld_list_lock, "T4 ULDs", 0, MTX_DEF);
7589 		SLIST_INIT(&t4_uld_list);
7590 #endif
7591 		tweak_tunables();
7592 		break;
7593 
7594 	case MOD_UNLOAD:
7595 		if (atomic_fetchadd_int(&loaded, -1) > 1)
7596 			break;
7597 #ifdef TCP_OFFLOAD
7598 		mtx_lock(&t4_uld_list_lock);
7599 		if (!SLIST_EMPTY(&t4_uld_list)) {
7600 			rc = EBUSY;
7601 			mtx_unlock(&t4_uld_list_lock);
7602 			break;
7603 		}
7604 		mtx_unlock(&t4_uld_list_lock);
7605 		mtx_destroy(&t4_uld_list_lock);
7606 #endif
7607 		mtx_lock(&t4_list_lock);
7608 		if (!SLIST_EMPTY(&t4_list)) {
7609 			rc = EBUSY;
7610 			mtx_unlock(&t4_list_lock);
7611 			break;
7612 		}
7613 		mtx_unlock(&t4_list_lock);
7614 		mtx_destroy(&t4_list_lock);
7615 		break;
7616 	}
7617 
7618 	return (rc);
7619 }
7620 
7621 static devclass_t t4_devclass, t5_devclass;
7622 static devclass_t cxgbe_devclass, cxl_devclass;
7623 
7624 DRIVER_MODULE(t4nex, pci, t4_driver, t4_devclass, mod_event, 0);
7625 MODULE_VERSION(t4nex, 1);
7626 MODULE_DEPEND(t4nex, firmware, 1, 1, 1);
7627 
7628 DRIVER_MODULE(t5nex, pci, t5_driver, t5_devclass, mod_event, 0);
7629 MODULE_VERSION(t5nex, 1);
7630 MODULE_DEPEND(t5nex, firmware, 1, 1, 1);
7631 
7632 DRIVER_MODULE(cxgbe, t4nex, cxgbe_driver, cxgbe_devclass, 0, 0);
7633 MODULE_VERSION(cxgbe, 1);
7634 
7635 DRIVER_MODULE(cxl, t5nex, cxl_driver, cxl_devclass, 0, 0);
7636 MODULE_VERSION(cxl, 1);
7637