xref: /freebsd/sys/dev/cxgbe/t4_main.c (revision 03836978bec158bdc0ecee7a4198962f91ce8298)
1 /*-
2  * Copyright (c) 2011 Chelsio Communications, Inc.
3  * All rights reserved.
4  * Written by: Navdeep Parhar <np@FreeBSD.org>
5  *
6  * Redistribution and use in source and binary forms, with or without
7  * modification, are permitted provided that the following conditions
8  * are met:
9  * 1. Redistributions of source code must retain the above copyright
10  *    notice, this list of conditions and the following disclaimer.
11  * 2. Redistributions in binary form must reproduce the above copyright
12  *    notice, this list of conditions and the following disclaimer in the
13  *    documentation and/or other materials provided with the distribution.
14  *
15  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
16  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
25  * SUCH DAMAGE.
26  */
27 
28 #include <sys/cdefs.h>
29 __FBSDID("$FreeBSD$");
30 
31 #include "opt_inet.h"
32 #include "opt_inet6.h"
33 
34 #include <sys/param.h>
35 #include <sys/conf.h>
36 #include <sys/priv.h>
37 #include <sys/kernel.h>
38 #include <sys/bus.h>
39 #include <sys/module.h>
40 #include <sys/malloc.h>
41 #include <sys/queue.h>
42 #include <sys/taskqueue.h>
43 #include <sys/pciio.h>
44 #include <dev/pci/pcireg.h>
45 #include <dev/pci/pcivar.h>
46 #include <dev/pci/pci_private.h>
47 #include <sys/firmware.h>
48 #include <sys/sbuf.h>
49 #include <sys/smp.h>
50 #include <sys/socket.h>
51 #include <sys/sockio.h>
52 #include <sys/sysctl.h>
53 #include <net/ethernet.h>
54 #include <net/if.h>
55 #include <net/if_types.h>
56 #include <net/if_dl.h>
57 #include <net/if_vlan_var.h>
58 #if defined(__i386__) || defined(__amd64__)
59 #include <vm/vm.h>
60 #include <vm/pmap.h>
61 #endif
62 
63 #include "common/common.h"
64 #include "common/t4_msg.h"
65 #include "common/t4_regs.h"
66 #include "common/t4_regs_values.h"
67 #include "t4_ioctl.h"
68 #include "t4_l2t.h"
69 
70 /* T4 bus driver interface */
71 static int t4_probe(device_t);
72 static int t4_attach(device_t);
73 static int t4_detach(device_t);
74 static device_method_t t4_methods[] = {
75 	DEVMETHOD(device_probe,		t4_probe),
76 	DEVMETHOD(device_attach,	t4_attach),
77 	DEVMETHOD(device_detach,	t4_detach),
78 
79 	DEVMETHOD_END
80 };
81 static driver_t t4_driver = {
82 	"t4nex",
83 	t4_methods,
84 	sizeof(struct adapter)
85 };
86 
87 
88 /* T4 port (cxgbe) interface */
89 static int cxgbe_probe(device_t);
90 static int cxgbe_attach(device_t);
91 static int cxgbe_detach(device_t);
92 static device_method_t cxgbe_methods[] = {
93 	DEVMETHOD(device_probe,		cxgbe_probe),
94 	DEVMETHOD(device_attach,	cxgbe_attach),
95 	DEVMETHOD(device_detach,	cxgbe_detach),
96 	{ 0, 0 }
97 };
98 static driver_t cxgbe_driver = {
99 	"cxgbe",
100 	cxgbe_methods,
101 	sizeof(struct port_info)
102 };
103 
104 static d_ioctl_t t4_ioctl;
105 static d_open_t t4_open;
106 static d_close_t t4_close;
107 
108 static struct cdevsw t4_cdevsw = {
109        .d_version = D_VERSION,
110        .d_flags = 0,
111        .d_open = t4_open,
112        .d_close = t4_close,
113        .d_ioctl = t4_ioctl,
114        .d_name = "t4nex",
115 };
116 
117 /* T5 bus driver interface */
118 static int t5_probe(device_t);
119 static device_method_t t5_methods[] = {
120 	DEVMETHOD(device_probe,		t5_probe),
121 	DEVMETHOD(device_attach,	t4_attach),
122 	DEVMETHOD(device_detach,	t4_detach),
123 
124 	DEVMETHOD_END
125 };
126 static driver_t t5_driver = {
127 	"t5nex",
128 	t5_methods,
129 	sizeof(struct adapter)
130 };
131 
132 
133 /* T5 port (cxl) interface */
134 static driver_t cxl_driver = {
135 	"cxl",
136 	cxgbe_methods,
137 	sizeof(struct port_info)
138 };
139 
140 static struct cdevsw t5_cdevsw = {
141        .d_version = D_VERSION,
142        .d_flags = 0,
143        .d_open = t4_open,
144        .d_close = t4_close,
145        .d_ioctl = t4_ioctl,
146        .d_name = "t5nex",
147 };
148 
149 /* ifnet + media interface */
150 static void cxgbe_init(void *);
151 static int cxgbe_ioctl(struct ifnet *, unsigned long, caddr_t);
152 static int cxgbe_transmit(struct ifnet *, struct mbuf *);
153 static void cxgbe_qflush(struct ifnet *);
154 static int cxgbe_media_change(struct ifnet *);
155 static void cxgbe_media_status(struct ifnet *, struct ifmediareq *);
156 
157 MALLOC_DEFINE(M_CXGBE, "cxgbe", "Chelsio T4/T5 Ethernet driver and services");
158 
159 /*
160  * Correct lock order when you need to acquire multiple locks is t4_list_lock,
161  * then ADAPTER_LOCK, then t4_uld_list_lock.
162  */
163 static struct mtx t4_list_lock;
164 static SLIST_HEAD(, adapter) t4_list;
165 #ifdef TCP_OFFLOAD
166 static struct mtx t4_uld_list_lock;
167 static SLIST_HEAD(, uld_info) t4_uld_list;
168 #endif
169 
170 /*
171  * Tunables.  See tweak_tunables() too.
172  *
173  * Each tunable is set to a default value here if it's known at compile-time.
174  * Otherwise it is set to -1 as an indication to tweak_tunables() that it should
175  * provide a reasonable default when the driver is loaded.
176  *
177  * Tunables applicable to both T4 and T5 are under hw.cxgbe.  Those specific to
178  * T5 are under hw.cxl.
179  */
180 
181 /*
182  * Number of queues for tx and rx, 10G and 1G, NIC and offload.
183  */
184 #define NTXQ_10G 16
185 static int t4_ntxq10g = -1;
186 TUNABLE_INT("hw.cxgbe.ntxq10g", &t4_ntxq10g);
187 
188 #define NRXQ_10G 8
189 static int t4_nrxq10g = -1;
190 TUNABLE_INT("hw.cxgbe.nrxq10g", &t4_nrxq10g);
191 
192 #define NTXQ_1G 4
193 static int t4_ntxq1g = -1;
194 TUNABLE_INT("hw.cxgbe.ntxq1g", &t4_ntxq1g);
195 
196 #define NRXQ_1G 2
197 static int t4_nrxq1g = -1;
198 TUNABLE_INT("hw.cxgbe.nrxq1g", &t4_nrxq1g);
199 
200 #ifdef TCP_OFFLOAD
201 #define NOFLDTXQ_10G 8
202 static int t4_nofldtxq10g = -1;
203 TUNABLE_INT("hw.cxgbe.nofldtxq10g", &t4_nofldtxq10g);
204 
205 #define NOFLDRXQ_10G 2
206 static int t4_nofldrxq10g = -1;
207 TUNABLE_INT("hw.cxgbe.nofldrxq10g", &t4_nofldrxq10g);
208 
209 #define NOFLDTXQ_1G 2
210 static int t4_nofldtxq1g = -1;
211 TUNABLE_INT("hw.cxgbe.nofldtxq1g", &t4_nofldtxq1g);
212 
213 #define NOFLDRXQ_1G 1
214 static int t4_nofldrxq1g = -1;
215 TUNABLE_INT("hw.cxgbe.nofldrxq1g", &t4_nofldrxq1g);
216 #endif
217 
218 /*
219  * Holdoff parameters for 10G and 1G ports.
220  */
221 #define TMR_IDX_10G 1
222 static int t4_tmr_idx_10g = TMR_IDX_10G;
223 TUNABLE_INT("hw.cxgbe.holdoff_timer_idx_10G", &t4_tmr_idx_10g);
224 
225 #define PKTC_IDX_10G (-1)
226 static int t4_pktc_idx_10g = PKTC_IDX_10G;
227 TUNABLE_INT("hw.cxgbe.holdoff_pktc_idx_10G", &t4_pktc_idx_10g);
228 
229 #define TMR_IDX_1G 1
230 static int t4_tmr_idx_1g = TMR_IDX_1G;
231 TUNABLE_INT("hw.cxgbe.holdoff_timer_idx_1G", &t4_tmr_idx_1g);
232 
233 #define PKTC_IDX_1G (-1)
234 static int t4_pktc_idx_1g = PKTC_IDX_1G;
235 TUNABLE_INT("hw.cxgbe.holdoff_pktc_idx_1G", &t4_pktc_idx_1g);
236 
237 /*
238  * Size (# of entries) of each tx and rx queue.
239  */
240 static unsigned int t4_qsize_txq = TX_EQ_QSIZE;
241 TUNABLE_INT("hw.cxgbe.qsize_txq", &t4_qsize_txq);
242 
243 static unsigned int t4_qsize_rxq = RX_IQ_QSIZE;
244 TUNABLE_INT("hw.cxgbe.qsize_rxq", &t4_qsize_rxq);
245 
246 /*
247  * Interrupt types allowed (bits 0, 1, 2 = INTx, MSI, MSI-X respectively).
248  */
249 static int t4_intr_types = INTR_MSIX | INTR_MSI | INTR_INTX;
250 TUNABLE_INT("hw.cxgbe.interrupt_types", &t4_intr_types);
251 
252 /*
253  * Configuration file.
254  */
255 #define DEFAULT_CF	"default"
256 #define FLASH_CF	"flash"
257 #define UWIRE_CF	"uwire"
258 #define FPGA_CF		"fpga"
259 static char t4_cfg_file[32] = DEFAULT_CF;
260 TUNABLE_STR("hw.cxgbe.config_file", t4_cfg_file, sizeof(t4_cfg_file));
261 
262 /*
263  * Firmware auto-install by driver during attach (0, 1, 2 = prohibited, allowed,
264  * encouraged respectively).
265  */
266 static unsigned int t4_fw_install = 1;
267 TUNABLE_INT("hw.cxgbe.fw_install", &t4_fw_install);
268 
269 /*
270  * ASIC features that will be used.  Disable the ones you don't want so that the
271  * chip resources aren't wasted on features that will not be used.
272  */
273 static int t4_linkcaps_allowed = 0;	/* No DCBX, PPP, etc. by default */
274 TUNABLE_INT("hw.cxgbe.linkcaps_allowed", &t4_linkcaps_allowed);
275 
276 static int t4_niccaps_allowed = FW_CAPS_CONFIG_NIC;
277 TUNABLE_INT("hw.cxgbe.niccaps_allowed", &t4_niccaps_allowed);
278 
279 static int t4_toecaps_allowed = -1;
280 TUNABLE_INT("hw.cxgbe.toecaps_allowed", &t4_toecaps_allowed);
281 
282 static int t4_rdmacaps_allowed = 0;
283 TUNABLE_INT("hw.cxgbe.rdmacaps_allowed", &t4_rdmacaps_allowed);
284 
285 static int t4_iscsicaps_allowed = 0;
286 TUNABLE_INT("hw.cxgbe.iscsicaps_allowed", &t4_iscsicaps_allowed);
287 
288 static int t4_fcoecaps_allowed = 0;
289 TUNABLE_INT("hw.cxgbe.fcoecaps_allowed", &t4_fcoecaps_allowed);
290 
291 static int t5_write_combine = 0;
292 TUNABLE_INT("hw.cxl.write_combine", &t5_write_combine);
293 
294 struct intrs_and_queues {
295 	int intr_type;		/* INTx, MSI, or MSI-X */
296 	int nirq;		/* Number of vectors */
297 	int intr_flags;
298 	int ntxq10g;		/* # of NIC txq's for each 10G port */
299 	int nrxq10g;		/* # of NIC rxq's for each 10G port */
300 	int ntxq1g;		/* # of NIC txq's for each 1G port */
301 	int nrxq1g;		/* # of NIC rxq's for each 1G port */
302 #ifdef TCP_OFFLOAD
303 	int nofldtxq10g;	/* # of TOE txq's for each 10G port */
304 	int nofldrxq10g;	/* # of TOE rxq's for each 10G port */
305 	int nofldtxq1g;		/* # of TOE txq's for each 1G port */
306 	int nofldrxq1g;		/* # of TOE rxq's for each 1G port */
307 #endif
308 };
309 
310 struct filter_entry {
311         uint32_t valid:1;	/* filter allocated and valid */
312         uint32_t locked:1;	/* filter is administratively locked */
313         uint32_t pending:1;	/* filter action is pending firmware reply */
314 	uint32_t smtidx:8;	/* Source MAC Table index for smac */
315 	struct l2t_entry *l2t;	/* Layer Two Table entry for dmac */
316 
317         struct t4_filter_specification fs;
318 };
319 
320 enum {
321 	XGMAC_MTU	= (1 << 0),
322 	XGMAC_PROMISC	= (1 << 1),
323 	XGMAC_ALLMULTI	= (1 << 2),
324 	XGMAC_VLANEX	= (1 << 3),
325 	XGMAC_UCADDR	= (1 << 4),
326 	XGMAC_MCADDRS	= (1 << 5),
327 
328 	XGMAC_ALL	= 0xffff
329 };
330 
331 static int map_bars_0_and_4(struct adapter *);
332 static int map_bar_2(struct adapter *);
333 static void setup_memwin(struct adapter *);
334 static int validate_mem_range(struct adapter *, uint32_t, int);
335 static int validate_mt_off_len(struct adapter *, int, uint32_t, int,
336     uint32_t *);
337 static void memwin_info(struct adapter *, int, uint32_t *, uint32_t *);
338 static uint32_t position_memwin(struct adapter *, int, uint32_t);
339 static int cfg_itype_and_nqueues(struct adapter *, int, int,
340     struct intrs_and_queues *);
341 static int prep_firmware(struct adapter *);
342 static int partition_resources(struct adapter *, const struct firmware *,
343     const char *);
344 static int get_params__pre_init(struct adapter *);
345 static int get_params__post_init(struct adapter *);
346 static int set_params__post_init(struct adapter *);
347 static void t4_set_desc(struct adapter *);
348 static void build_medialist(struct port_info *);
349 static int update_mac_settings(struct port_info *, int);
350 static int cxgbe_init_synchronized(struct port_info *);
351 static int cxgbe_uninit_synchronized(struct port_info *);
352 static int setup_intr_handlers(struct adapter *);
353 static int adapter_full_init(struct adapter *);
354 static int adapter_full_uninit(struct adapter *);
355 static int port_full_init(struct port_info *);
356 static int port_full_uninit(struct port_info *);
357 static void quiesce_eq(struct adapter *, struct sge_eq *);
358 static void quiesce_iq(struct adapter *, struct sge_iq *);
359 static void quiesce_fl(struct adapter *, struct sge_fl *);
360 static int t4_alloc_irq(struct adapter *, struct irq *, int rid,
361     driver_intr_t *, void *, char *);
362 static int t4_free_irq(struct adapter *, struct irq *);
363 static void reg_block_dump(struct adapter *, uint8_t *, unsigned int,
364     unsigned int);
365 static void t4_get_regs(struct adapter *, struct t4_regdump *, uint8_t *);
366 static void cxgbe_tick(void *);
367 static void cxgbe_vlan_config(void *, struct ifnet *, uint16_t);
368 static int cpl_not_handled(struct sge_iq *, const struct rss_header *,
369     struct mbuf *);
370 static int an_not_handled(struct sge_iq *, const struct rsp_ctrl *);
371 static int fw_msg_not_handled(struct adapter *, const __be64 *);
372 static int t4_sysctls(struct adapter *);
373 static int cxgbe_sysctls(struct port_info *);
374 static int sysctl_int_array(SYSCTL_HANDLER_ARGS);
375 static int sysctl_bitfield(SYSCTL_HANDLER_ARGS);
376 static int sysctl_holdoff_tmr_idx(SYSCTL_HANDLER_ARGS);
377 static int sysctl_holdoff_pktc_idx(SYSCTL_HANDLER_ARGS);
378 static int sysctl_qsize_rxq(SYSCTL_HANDLER_ARGS);
379 static int sysctl_qsize_txq(SYSCTL_HANDLER_ARGS);
380 static int sysctl_handle_t4_reg64(SYSCTL_HANDLER_ARGS);
381 #ifdef SBUF_DRAIN
382 static int sysctl_cctrl(SYSCTL_HANDLER_ARGS);
383 static int sysctl_cim_ibq_obq(SYSCTL_HANDLER_ARGS);
384 static int sysctl_cim_la(SYSCTL_HANDLER_ARGS);
385 static int sysctl_cim_qcfg(SYSCTL_HANDLER_ARGS);
386 static int sysctl_cpl_stats(SYSCTL_HANDLER_ARGS);
387 static int sysctl_ddp_stats(SYSCTL_HANDLER_ARGS);
388 static int sysctl_devlog(SYSCTL_HANDLER_ARGS);
389 static int sysctl_fcoe_stats(SYSCTL_HANDLER_ARGS);
390 static int sysctl_hw_sched(SYSCTL_HANDLER_ARGS);
391 static int sysctl_lb_stats(SYSCTL_HANDLER_ARGS);
392 static int sysctl_meminfo(SYSCTL_HANDLER_ARGS);
393 static int sysctl_path_mtus(SYSCTL_HANDLER_ARGS);
394 static int sysctl_pm_stats(SYSCTL_HANDLER_ARGS);
395 static int sysctl_rdma_stats(SYSCTL_HANDLER_ARGS);
396 static int sysctl_tcp_stats(SYSCTL_HANDLER_ARGS);
397 static int sysctl_tids(SYSCTL_HANDLER_ARGS);
398 static int sysctl_tp_err_stats(SYSCTL_HANDLER_ARGS);
399 static int sysctl_tx_rate(SYSCTL_HANDLER_ARGS);
400 static int sysctl_wcwr_stats(SYSCTL_HANDLER_ARGS);
401 #endif
402 static inline void txq_start(struct ifnet *, struct sge_txq *);
403 static uint32_t fconf_to_mode(uint32_t);
404 static uint32_t mode_to_fconf(uint32_t);
405 static uint32_t fspec_to_fconf(struct t4_filter_specification *);
406 static int get_filter_mode(struct adapter *, uint32_t *);
407 static int set_filter_mode(struct adapter *, uint32_t);
408 static inline uint64_t get_filter_hits(struct adapter *, uint32_t);
409 static int get_filter(struct adapter *, struct t4_filter *);
410 static int set_filter(struct adapter *, struct t4_filter *);
411 static int del_filter(struct adapter *, struct t4_filter *);
412 static void clear_filter(struct filter_entry *);
413 static int set_filter_wr(struct adapter *, int);
414 static int del_filter_wr(struct adapter *, int);
415 static int get_sge_context(struct adapter *, struct t4_sge_context *);
416 static int load_fw(struct adapter *, struct t4_data *);
417 static int read_card_mem(struct adapter *, int, struct t4_mem_range *);
418 static int read_i2c(struct adapter *, struct t4_i2c_data *);
419 #ifdef TCP_OFFLOAD
420 static int toe_capability(struct port_info *, int);
421 #endif
422 static int mod_event(module_t, int, void *);
423 
424 struct {
425 	uint16_t device;
426 	char *desc;
427 } t4_pciids[] = {
428 	{0xa000, "Chelsio Terminator 4 FPGA"},
429 	{0x4400, "Chelsio T440-dbg"},
430 	{0x4401, "Chelsio T420-CR"},
431 	{0x4402, "Chelsio T422-CR"},
432 	{0x4403, "Chelsio T440-CR"},
433 	{0x4404, "Chelsio T420-BCH"},
434 	{0x4405, "Chelsio T440-BCH"},
435 	{0x4406, "Chelsio T440-CH"},
436 	{0x4407, "Chelsio T420-SO"},
437 	{0x4408, "Chelsio T420-CX"},
438 	{0x4409, "Chelsio T420-BT"},
439 	{0x440a, "Chelsio T404-BT"},
440 	{0x440e, "Chelsio T440-LP-CR"},
441 }, t5_pciids[] = {
442 	{0xb000, "Chelsio Terminator 5 FPGA"},
443 	{0x5400, "Chelsio T580-dbg"},
444 	{0x5401,  "Chelsio T520-CR"},
445 	{0x5407,  "Chelsio T520-SO"},
446 	{0x5408,  "Chelsio T520-CX"},
447 	{0x5411,  "Chelsio T520-LL-CR"},
448 #ifdef notyet
449 	{0x5402,  "Chelsio T522-CR"},
450 	{0x5403,  "Chelsio T540-CR"},
451 	{0x5404,  "Chelsio T520-BCH"},
452 	{0x5405,  "Chelsio T540-BCH"},
453 	{0x5406,  "Chelsio T540-CH"},
454 	{0x5409,  "Chelsio T520-BT"},
455 	{0x540a,  "Chelsio T504-BT"},
456 	{0x540b,  "Chelsio B520-SR"},
457 	{0x540c,  "Chelsio B504-BT"},
458 	{0x540d,  "Chelsio T580-CR"},
459 	{0x540e,  "Chelsio T540-LP-CR"},
460 	{0x540f,  "Chelsio Amsterdam"},
461 	{0x5410,  "Chelsio T580-LP-CR"},
462 	{0x5412,  "Chelsio T560-CR"},
463 	{0x5413,  "Chelsio T580-CR"},
464 #endif
465 };
466 
467 #ifdef TCP_OFFLOAD
468 /*
469  * service_iq() has an iq and needs the fl.  Offset of fl from the iq should be
470  * exactly the same for both rxq and ofld_rxq.
471  */
472 CTASSERT(offsetof(struct sge_ofld_rxq, iq) == offsetof(struct sge_rxq, iq));
473 CTASSERT(offsetof(struct sge_ofld_rxq, fl) == offsetof(struct sge_rxq, fl));
474 #endif
475 
476 /* No easy way to include t4_msg.h before adapter.h so we check this way */
477 CTASSERT(nitems(((struct adapter *)0)->cpl_handler) == NUM_CPL_CMDS);
478 CTASSERT(nitems(((struct adapter *)0)->fw_msg_handler) == NUM_FW6_TYPES);
479 
480 static int
481 t4_probe(device_t dev)
482 {
483 	int i;
484 	uint16_t v = pci_get_vendor(dev);
485 	uint16_t d = pci_get_device(dev);
486 	uint8_t f = pci_get_function(dev);
487 
488 	if (v != PCI_VENDOR_ID_CHELSIO)
489 		return (ENXIO);
490 
491 	/* Attach only to PF0 of the FPGA */
492 	if (d == 0xa000 && f != 0)
493 		return (ENXIO);
494 
495 	for (i = 0; i < nitems(t4_pciids); i++) {
496 		if (d == t4_pciids[i].device) {
497 			device_set_desc(dev, t4_pciids[i].desc);
498 			return (BUS_PROBE_DEFAULT);
499 		}
500 	}
501 
502 	return (ENXIO);
503 }
504 
505 static int
506 t5_probe(device_t dev)
507 {
508 	int i;
509 	uint16_t v = pci_get_vendor(dev);
510 	uint16_t d = pci_get_device(dev);
511 	uint8_t f = pci_get_function(dev);
512 
513 	if (v != PCI_VENDOR_ID_CHELSIO)
514 		return (ENXIO);
515 
516 	/* Attach only to PF0 of the FPGA */
517 	if (d == 0xb000 && f != 0)
518 		return (ENXIO);
519 
520 	for (i = 0; i < nitems(t5_pciids); i++) {
521 		if (d == t5_pciids[i].device) {
522 			device_set_desc(dev, t5_pciids[i].desc);
523 			return (BUS_PROBE_DEFAULT);
524 		}
525 	}
526 
527 	return (ENXIO);
528 }
529 
530 static int
531 t4_attach(device_t dev)
532 {
533 	struct adapter *sc;
534 	int rc = 0, i, n10g, n1g, rqidx, tqidx;
535 	struct intrs_and_queues iaq;
536 	struct sge *s;
537 #ifdef TCP_OFFLOAD
538 	int ofld_rqidx, ofld_tqidx;
539 #endif
540 
541 	sc = device_get_softc(dev);
542 	sc->dev = dev;
543 
544 	pci_enable_busmaster(dev);
545 	if (pci_find_cap(dev, PCIY_EXPRESS, &i) == 0) {
546 		uint32_t v;
547 
548 		pci_set_max_read_req(dev, 4096);
549 		v = pci_read_config(dev, i + PCIER_DEVICE_CTL, 2);
550 		v |= PCIEM_CTL_RELAXED_ORD_ENABLE;
551 		pci_write_config(dev, i + PCIER_DEVICE_CTL, v, 2);
552 	}
553 
554 	snprintf(sc->lockname, sizeof(sc->lockname), "%s",
555 	    device_get_nameunit(dev));
556 	mtx_init(&sc->sc_lock, sc->lockname, 0, MTX_DEF);
557 	mtx_lock(&t4_list_lock);
558 	SLIST_INSERT_HEAD(&t4_list, sc, link);
559 	mtx_unlock(&t4_list_lock);
560 
561 	mtx_init(&sc->sfl_lock, "starving freelists", 0, MTX_DEF);
562 	TAILQ_INIT(&sc->sfl);
563 	callout_init(&sc->sfl_callout, CALLOUT_MPSAFE);
564 
565 	rc = map_bars_0_and_4(sc);
566 	if (rc != 0)
567 		goto done; /* error message displayed already */
568 
569 	/*
570 	 * This is the real PF# to which we're attaching.  Works from within PCI
571 	 * passthrough environments too, where pci_get_function() could return a
572 	 * different PF# depending on the passthrough configuration.  We need to
573 	 * use the real PF# in all our communication with the firmware.
574 	 */
575 	sc->pf = G_SOURCEPF(t4_read_reg(sc, A_PL_WHOAMI));
576 	sc->mbox = sc->pf;
577 
578 	memset(sc->chan_map, 0xff, sizeof(sc->chan_map));
579 	sc->an_handler = an_not_handled;
580 	for (i = 0; i < nitems(sc->cpl_handler); i++)
581 		sc->cpl_handler[i] = cpl_not_handled;
582 	for (i = 0; i < nitems(sc->fw_msg_handler); i++)
583 		sc->fw_msg_handler[i] = fw_msg_not_handled;
584 	t4_register_cpl_handler(sc, CPL_SET_TCB_RPL, t4_filter_rpl);
585 	t4_init_sge_cpl_handlers(sc);
586 
587 	/* Prepare the adapter for operation */
588 	rc = -t4_prep_adapter(sc);
589 	if (rc != 0) {
590 		device_printf(dev, "failed to prepare adapter: %d.\n", rc);
591 		goto done;
592 	}
593 
594 	/*
595 	 * Do this really early, with the memory windows set up even before the
596 	 * character device.  The userland tool's register i/o and mem read
597 	 * will work even in "recovery mode".
598 	 */
599 	setup_memwin(sc);
600 	sc->cdev = make_dev(is_t4(sc) ? &t4_cdevsw : &t5_cdevsw,
601 	    device_get_unit(dev), UID_ROOT, GID_WHEEL, 0600, "%s",
602 	    device_get_nameunit(dev));
603 	if (sc->cdev == NULL)
604 		device_printf(dev, "failed to create nexus char device.\n");
605 	else
606 		sc->cdev->si_drv1 = sc;
607 
608 	/* Go no further if recovery mode has been requested. */
609 	if (TUNABLE_INT_FETCH("hw.cxgbe.sos", &i) && i != 0) {
610 		device_printf(dev, "recovery mode.\n");
611 		goto done;
612 	}
613 
614 	/* Prepare the firmware for operation */
615 	rc = prep_firmware(sc);
616 	if (rc != 0)
617 		goto done; /* error message displayed already */
618 
619 	rc = get_params__post_init(sc);
620 	if (rc != 0)
621 		goto done; /* error message displayed already */
622 
623 	rc = set_params__post_init(sc);
624 	if (rc != 0)
625 		goto done; /* error message displayed already */
626 
627 	rc = map_bar_2(sc);
628 	if (rc != 0)
629 		goto done; /* error message displayed already */
630 
631 	for (i = 0; i < NCHAN; i++)
632 		sc->params.tp.tx_modq[i] = i;
633 
634 	rc = t4_create_dma_tag(sc);
635 	if (rc != 0)
636 		goto done; /* error message displayed already */
637 
638 	/*
639 	 * First pass over all the ports - allocate VIs and initialize some
640 	 * basic parameters like mac address, port type, etc.  We also figure
641 	 * out whether a port is 10G or 1G and use that information when
642 	 * calculating how many interrupts to attempt to allocate.
643 	 */
644 	n10g = n1g = 0;
645 	for_each_port(sc, i) {
646 		struct port_info *pi;
647 
648 		pi = malloc(sizeof(*pi), M_CXGBE, M_ZERO | M_WAITOK);
649 		sc->port[i] = pi;
650 
651 		/* These must be set before t4_port_init */
652 		pi->adapter = sc;
653 		pi->port_id = i;
654 
655 		/* Allocate the vi and initialize parameters like mac addr */
656 		rc = -t4_port_init(pi, sc->mbox, sc->pf, 0);
657 		if (rc != 0) {
658 			device_printf(dev, "unable to initialize port %d: %d\n",
659 			    i, rc);
660 			free(pi, M_CXGBE);
661 			sc->port[i] = NULL;
662 			goto done;
663 		}
664 
665 		snprintf(pi->lockname, sizeof(pi->lockname), "%sp%d",
666 		    device_get_nameunit(dev), i);
667 		mtx_init(&pi->pi_lock, pi->lockname, 0, MTX_DEF);
668 
669 		if (is_10G_port(pi)) {
670 			n10g++;
671 			pi->tmr_idx = t4_tmr_idx_10g;
672 			pi->pktc_idx = t4_pktc_idx_10g;
673 		} else {
674 			n1g++;
675 			pi->tmr_idx = t4_tmr_idx_1g;
676 			pi->pktc_idx = t4_pktc_idx_1g;
677 		}
678 
679 		pi->xact_addr_filt = -1;
680 
681 		pi->qsize_rxq = t4_qsize_rxq;
682 		pi->qsize_txq = t4_qsize_txq;
683 
684 		pi->dev = device_add_child(dev, is_t4(sc) ? "cxgbe" : "cxl", -1);
685 		if (pi->dev == NULL) {
686 			device_printf(dev,
687 			    "failed to add device for port %d.\n", i);
688 			rc = ENXIO;
689 			goto done;
690 		}
691 		device_set_softc(pi->dev, pi);
692 	}
693 
694 	/*
695 	 * Interrupt type, # of interrupts, # of rx/tx queues, etc.
696 	 */
697 	rc = cfg_itype_and_nqueues(sc, n10g, n1g, &iaq);
698 	if (rc != 0)
699 		goto done; /* error message displayed already */
700 
701 	sc->intr_type = iaq.intr_type;
702 	sc->intr_count = iaq.nirq;
703 	sc->flags |= iaq.intr_flags;
704 
705 	s = &sc->sge;
706 	s->nrxq = n10g * iaq.nrxq10g + n1g * iaq.nrxq1g;
707 	s->ntxq = n10g * iaq.ntxq10g + n1g * iaq.ntxq1g;
708 	s->neq = s->ntxq + s->nrxq;	/* the free list in an rxq is an eq */
709 	s->neq += sc->params.nports + 1;/* ctrl queues: 1 per port + 1 mgmt */
710 	s->niq = s->nrxq + 1;		/* 1 extra for firmware event queue */
711 
712 #ifdef TCP_OFFLOAD
713 	if (is_offload(sc)) {
714 
715 		s->nofldrxq = n10g * iaq.nofldrxq10g + n1g * iaq.nofldrxq1g;
716 		s->nofldtxq = n10g * iaq.nofldtxq10g + n1g * iaq.nofldtxq1g;
717 		s->neq += s->nofldtxq + s->nofldrxq;
718 		s->niq += s->nofldrxq;
719 
720 		s->ofld_rxq = malloc(s->nofldrxq * sizeof(struct sge_ofld_rxq),
721 		    M_CXGBE, M_ZERO | M_WAITOK);
722 		s->ofld_txq = malloc(s->nofldtxq * sizeof(struct sge_wrq),
723 		    M_CXGBE, M_ZERO | M_WAITOK);
724 	}
725 #endif
726 
727 	s->ctrlq = malloc(sc->params.nports * sizeof(struct sge_wrq), M_CXGBE,
728 	    M_ZERO | M_WAITOK);
729 	s->rxq = malloc(s->nrxq * sizeof(struct sge_rxq), M_CXGBE,
730 	    M_ZERO | M_WAITOK);
731 	s->txq = malloc(s->ntxq * sizeof(struct sge_txq), M_CXGBE,
732 	    M_ZERO | M_WAITOK);
733 	s->iqmap = malloc(s->niq * sizeof(struct sge_iq *), M_CXGBE,
734 	    M_ZERO | M_WAITOK);
735 	s->eqmap = malloc(s->neq * sizeof(struct sge_eq *), M_CXGBE,
736 	    M_ZERO | M_WAITOK);
737 
738 	sc->irq = malloc(sc->intr_count * sizeof(struct irq), M_CXGBE,
739 	    M_ZERO | M_WAITOK);
740 
741 	t4_init_l2t(sc, M_WAITOK);
742 
743 	/*
744 	 * Second pass over the ports.  This time we know the number of rx and
745 	 * tx queues that each port should get.
746 	 */
747 	rqidx = tqidx = 0;
748 #ifdef TCP_OFFLOAD
749 	ofld_rqidx = ofld_tqidx = 0;
750 #endif
751 	for_each_port(sc, i) {
752 		struct port_info *pi = sc->port[i];
753 
754 		if (pi == NULL)
755 			continue;
756 
757 		pi->first_rxq = rqidx;
758 		pi->first_txq = tqidx;
759 		if (is_10G_port(pi)) {
760 			pi->nrxq = iaq.nrxq10g;
761 			pi->ntxq = iaq.ntxq10g;
762 		} else {
763 			pi->nrxq = iaq.nrxq1g;
764 			pi->ntxq = iaq.ntxq1g;
765 		}
766 
767 		rqidx += pi->nrxq;
768 		tqidx += pi->ntxq;
769 
770 #ifdef TCP_OFFLOAD
771 		if (is_offload(sc)) {
772 			pi->first_ofld_rxq = ofld_rqidx;
773 			pi->first_ofld_txq = ofld_tqidx;
774 			if (is_10G_port(pi)) {
775 				pi->nofldrxq = iaq.nofldrxq10g;
776 				pi->nofldtxq = iaq.nofldtxq10g;
777 			} else {
778 				pi->nofldrxq = iaq.nofldrxq1g;
779 				pi->nofldtxq = iaq.nofldtxq1g;
780 			}
781 			ofld_rqidx += pi->nofldrxq;
782 			ofld_tqidx += pi->nofldtxq;
783 		}
784 #endif
785 	}
786 
787 	rc = setup_intr_handlers(sc);
788 	if (rc != 0) {
789 		device_printf(dev,
790 		    "failed to setup interrupt handlers: %d\n", rc);
791 		goto done;
792 	}
793 
794 	rc = bus_generic_attach(dev);
795 	if (rc != 0) {
796 		device_printf(dev,
797 		    "failed to attach all child ports: %d\n", rc);
798 		goto done;
799 	}
800 
801 	device_printf(dev,
802 	    "PCIe x%d, %d ports, %d %s interrupt%s, %d eq, %d iq\n",
803 	    sc->params.pci.width, sc->params.nports, sc->intr_count,
804 	    sc->intr_type == INTR_MSIX ? "MSI-X" :
805 	    (sc->intr_type == INTR_MSI ? "MSI" : "INTx"),
806 	    sc->intr_count > 1 ? "s" : "", sc->sge.neq, sc->sge.niq);
807 
808 	t4_set_desc(sc);
809 
810 done:
811 	if (rc != 0 && sc->cdev) {
812 		/* cdev was created and so cxgbetool works; recover that way. */
813 		device_printf(dev,
814 		    "error during attach, adapter is now in recovery mode.\n");
815 		rc = 0;
816 	}
817 
818 	if (rc != 0)
819 		t4_detach(dev);
820 	else
821 		t4_sysctls(sc);
822 
823 	return (rc);
824 }
825 
826 /*
827  * Idempotent
828  */
829 static int
830 t4_detach(device_t dev)
831 {
832 	struct adapter *sc;
833 	struct port_info *pi;
834 	int i, rc;
835 
836 	sc = device_get_softc(dev);
837 
838 	if (sc->flags & FULL_INIT_DONE)
839 		t4_intr_disable(sc);
840 
841 	if (sc->cdev) {
842 		destroy_dev(sc->cdev);
843 		sc->cdev = NULL;
844 	}
845 
846 	rc = bus_generic_detach(dev);
847 	if (rc) {
848 		device_printf(dev,
849 		    "failed to detach child devices: %d\n", rc);
850 		return (rc);
851 	}
852 
853 	for (i = 0; i < sc->intr_count; i++)
854 		t4_free_irq(sc, &sc->irq[i]);
855 
856 	for (i = 0; i < MAX_NPORTS; i++) {
857 		pi = sc->port[i];
858 		if (pi) {
859 			t4_free_vi(pi->adapter, sc->mbox, sc->pf, 0, pi->viid);
860 			if (pi->dev)
861 				device_delete_child(dev, pi->dev);
862 
863 			mtx_destroy(&pi->pi_lock);
864 			free(pi, M_CXGBE);
865 		}
866 	}
867 
868 	if (sc->flags & FULL_INIT_DONE)
869 		adapter_full_uninit(sc);
870 
871 	if (sc->flags & FW_OK)
872 		t4_fw_bye(sc, sc->mbox);
873 
874 	if (sc->intr_type == INTR_MSI || sc->intr_type == INTR_MSIX)
875 		pci_release_msi(dev);
876 
877 	if (sc->regs_res)
878 		bus_release_resource(dev, SYS_RES_MEMORY, sc->regs_rid,
879 		    sc->regs_res);
880 
881 	if (sc->udbs_res)
882 		bus_release_resource(dev, SYS_RES_MEMORY, sc->udbs_rid,
883 		    sc->udbs_res);
884 
885 	if (sc->msix_res)
886 		bus_release_resource(dev, SYS_RES_MEMORY, sc->msix_rid,
887 		    sc->msix_res);
888 
889 	if (sc->l2t)
890 		t4_free_l2t(sc->l2t);
891 
892 #ifdef TCP_OFFLOAD
893 	free(sc->sge.ofld_rxq, M_CXGBE);
894 	free(sc->sge.ofld_txq, M_CXGBE);
895 #endif
896 	free(sc->irq, M_CXGBE);
897 	free(sc->sge.rxq, M_CXGBE);
898 	free(sc->sge.txq, M_CXGBE);
899 	free(sc->sge.ctrlq, M_CXGBE);
900 	free(sc->sge.iqmap, M_CXGBE);
901 	free(sc->sge.eqmap, M_CXGBE);
902 	free(sc->tids.ftid_tab, M_CXGBE);
903 	t4_destroy_dma_tag(sc);
904 	if (mtx_initialized(&sc->sc_lock)) {
905 		mtx_lock(&t4_list_lock);
906 		SLIST_REMOVE(&t4_list, sc, adapter, link);
907 		mtx_unlock(&t4_list_lock);
908 		mtx_destroy(&sc->sc_lock);
909 	}
910 
911 	if (mtx_initialized(&sc->tids.ftid_lock))
912 		mtx_destroy(&sc->tids.ftid_lock);
913 	if (mtx_initialized(&sc->sfl_lock))
914 		mtx_destroy(&sc->sfl_lock);
915 
916 	bzero(sc, sizeof(*sc));
917 
918 	return (0);
919 }
920 
921 
922 static int
923 cxgbe_probe(device_t dev)
924 {
925 	char buf[128];
926 	struct port_info *pi = device_get_softc(dev);
927 
928 	snprintf(buf, sizeof(buf), "port %d", pi->port_id);
929 	device_set_desc_copy(dev, buf);
930 
931 	return (BUS_PROBE_DEFAULT);
932 }
933 
934 #define T4_CAP (IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_MTU | IFCAP_HWCSUM | \
935     IFCAP_VLAN_HWCSUM | IFCAP_TSO | IFCAP_JUMBO_MTU | IFCAP_LRO | \
936     IFCAP_VLAN_HWTSO | IFCAP_LINKSTATE | IFCAP_HWCSUM_IPV6)
937 #define T4_CAP_ENABLE (T4_CAP)
938 
939 static int
940 cxgbe_attach(device_t dev)
941 {
942 	struct port_info *pi = device_get_softc(dev);
943 	struct ifnet *ifp;
944 
945 	/* Allocate an ifnet and set it up */
946 	ifp = if_alloc(IFT_ETHER);
947 	if (ifp == NULL) {
948 		device_printf(dev, "Cannot allocate ifnet\n");
949 		return (ENOMEM);
950 	}
951 	pi->ifp = ifp;
952 	ifp->if_softc = pi;
953 
954 	callout_init(&pi->tick, CALLOUT_MPSAFE);
955 
956 	if_initname(ifp, device_get_name(dev), device_get_unit(dev));
957 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
958 
959 	ifp->if_init = cxgbe_init;
960 	ifp->if_ioctl = cxgbe_ioctl;
961 	ifp->if_transmit = cxgbe_transmit;
962 	ifp->if_qflush = cxgbe_qflush;
963 
964 	ifp->if_capabilities = T4_CAP;
965 #ifdef TCP_OFFLOAD
966 	if (is_offload(pi->adapter))
967 		ifp->if_capabilities |= IFCAP_TOE;
968 #endif
969 	ifp->if_capenable = T4_CAP_ENABLE;
970 	ifp->if_hwassist = CSUM_TCP | CSUM_UDP | CSUM_IP | CSUM_TSO |
971 	    CSUM_UDP_IPV6 | CSUM_TCP_IPV6;
972 
973 	/* Initialize ifmedia for this port */
974 	ifmedia_init(&pi->media, IFM_IMASK, cxgbe_media_change,
975 	    cxgbe_media_status);
976 	build_medialist(pi);
977 
978 	pi->vlan_c = EVENTHANDLER_REGISTER(vlan_config, cxgbe_vlan_config, ifp,
979 	    EVENTHANDLER_PRI_ANY);
980 
981 	ether_ifattach(ifp, pi->hw_addr);
982 
983 #ifdef TCP_OFFLOAD
984 	if (is_offload(pi->adapter)) {
985 		device_printf(dev,
986 		    "%d txq, %d rxq (NIC); %d txq, %d rxq (TOE)\n",
987 		    pi->ntxq, pi->nrxq, pi->nofldtxq, pi->nofldrxq);
988 	} else
989 #endif
990 		device_printf(dev, "%d txq, %d rxq\n", pi->ntxq, pi->nrxq);
991 
992 	cxgbe_sysctls(pi);
993 
994 	return (0);
995 }
996 
997 static int
998 cxgbe_detach(device_t dev)
999 {
1000 	struct port_info *pi = device_get_softc(dev);
1001 	struct adapter *sc = pi->adapter;
1002 	struct ifnet *ifp = pi->ifp;
1003 
1004 	/* Tell if_ioctl and if_init that the port is going away */
1005 	ADAPTER_LOCK(sc);
1006 	SET_DOOMED(pi);
1007 	wakeup(&sc->flags);
1008 	while (IS_BUSY(sc))
1009 		mtx_sleep(&sc->flags, &sc->sc_lock, 0, "t4detach", 0);
1010 	SET_BUSY(sc);
1011 #ifdef INVARIANTS
1012 	sc->last_op = "t4detach";
1013 	sc->last_op_thr = curthread;
1014 #endif
1015 	ADAPTER_UNLOCK(sc);
1016 
1017 	if (pi->vlan_c)
1018 		EVENTHANDLER_DEREGISTER(vlan_config, pi->vlan_c);
1019 
1020 	PORT_LOCK(pi);
1021 	ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
1022 	callout_stop(&pi->tick);
1023 	PORT_UNLOCK(pi);
1024 	callout_drain(&pi->tick);
1025 
1026 	/* Let detach proceed even if these fail. */
1027 	cxgbe_uninit_synchronized(pi);
1028 	port_full_uninit(pi);
1029 
1030 	ifmedia_removeall(&pi->media);
1031 	ether_ifdetach(pi->ifp);
1032 	if_free(pi->ifp);
1033 
1034 	ADAPTER_LOCK(sc);
1035 	CLR_BUSY(sc);
1036 	wakeup(&sc->flags);
1037 	ADAPTER_UNLOCK(sc);
1038 
1039 	return (0);
1040 }
1041 
1042 static void
1043 cxgbe_init(void *arg)
1044 {
1045 	struct port_info *pi = arg;
1046 	struct adapter *sc = pi->adapter;
1047 
1048 	if (begin_synchronized_op(sc, pi, SLEEP_OK | INTR_OK, "t4init") != 0)
1049 		return;
1050 	cxgbe_init_synchronized(pi);
1051 	end_synchronized_op(sc, 0);
1052 }
1053 
1054 static int
1055 cxgbe_ioctl(struct ifnet *ifp, unsigned long cmd, caddr_t data)
1056 {
1057 	int rc = 0, mtu, flags;
1058 	struct port_info *pi = ifp->if_softc;
1059 	struct adapter *sc = pi->adapter;
1060 	struct ifreq *ifr = (struct ifreq *)data;
1061 	uint32_t mask;
1062 
1063 	switch (cmd) {
1064 	case SIOCSIFMTU:
1065 		mtu = ifr->ifr_mtu;
1066 		if ((mtu < ETHERMIN) || (mtu > ETHERMTU_JUMBO))
1067 			return (EINVAL);
1068 
1069 		rc = begin_synchronized_op(sc, pi, SLEEP_OK | INTR_OK, "t4mtu");
1070 		if (rc)
1071 			return (rc);
1072 		ifp->if_mtu = mtu;
1073 		if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1074 			t4_update_fl_bufsize(ifp);
1075 			rc = update_mac_settings(pi, XGMAC_MTU);
1076 		}
1077 		end_synchronized_op(sc, 0);
1078 		break;
1079 
1080 	case SIOCSIFFLAGS:
1081 		rc = begin_synchronized_op(sc, pi, SLEEP_OK | INTR_OK, "t4flg");
1082 		if (rc)
1083 			return (rc);
1084 
1085 		if (ifp->if_flags & IFF_UP) {
1086 			if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1087 				flags = pi->if_flags;
1088 				if ((ifp->if_flags ^ flags) &
1089 				    (IFF_PROMISC | IFF_ALLMULTI)) {
1090 					rc = update_mac_settings(pi,
1091 					    XGMAC_PROMISC | XGMAC_ALLMULTI);
1092 				}
1093 			} else
1094 				rc = cxgbe_init_synchronized(pi);
1095 			pi->if_flags = ifp->if_flags;
1096 		} else if (ifp->if_drv_flags & IFF_DRV_RUNNING)
1097 			rc = cxgbe_uninit_synchronized(pi);
1098 		end_synchronized_op(sc, 0);
1099 		break;
1100 
1101 	case SIOCADDMULTI:
1102 	case SIOCDELMULTI: /* these two are called with a mutex held :-( */
1103 		rc = begin_synchronized_op(sc, pi, HOLD_LOCK, "t4multi");
1104 		if (rc)
1105 			return (rc);
1106 		if (ifp->if_drv_flags & IFF_DRV_RUNNING)
1107 			rc = update_mac_settings(pi, XGMAC_MCADDRS);
1108 		end_synchronized_op(sc, LOCK_HELD);
1109 		break;
1110 
1111 	case SIOCSIFCAP:
1112 		rc = begin_synchronized_op(sc, pi, SLEEP_OK | INTR_OK, "t4cap");
1113 		if (rc)
1114 			return (rc);
1115 
1116 		mask = ifr->ifr_reqcap ^ ifp->if_capenable;
1117 		if (mask & IFCAP_TXCSUM) {
1118 			ifp->if_capenable ^= IFCAP_TXCSUM;
1119 			ifp->if_hwassist ^= (CSUM_TCP | CSUM_UDP | CSUM_IP);
1120 
1121 			if (IFCAP_TSO4 & ifp->if_capenable &&
1122 			    !(IFCAP_TXCSUM & ifp->if_capenable)) {
1123 				ifp->if_capenable &= ~IFCAP_TSO4;
1124 				if_printf(ifp,
1125 				    "tso4 disabled due to -txcsum.\n");
1126 			}
1127 		}
1128 		if (mask & IFCAP_TXCSUM_IPV6) {
1129 			ifp->if_capenable ^= IFCAP_TXCSUM_IPV6;
1130 			ifp->if_hwassist ^= (CSUM_UDP_IPV6 | CSUM_TCP_IPV6);
1131 
1132 			if (IFCAP_TSO6 & ifp->if_capenable &&
1133 			    !(IFCAP_TXCSUM_IPV6 & ifp->if_capenable)) {
1134 				ifp->if_capenable &= ~IFCAP_TSO6;
1135 				if_printf(ifp,
1136 				    "tso6 disabled due to -txcsum6.\n");
1137 			}
1138 		}
1139 		if (mask & IFCAP_RXCSUM)
1140 			ifp->if_capenable ^= IFCAP_RXCSUM;
1141 		if (mask & IFCAP_RXCSUM_IPV6)
1142 			ifp->if_capenable ^= IFCAP_RXCSUM_IPV6;
1143 
1144 		/*
1145 		 * Note that we leave CSUM_TSO alone (it is always set).  The
1146 		 * kernel takes both IFCAP_TSOx and CSUM_TSO into account before
1147 		 * sending a TSO request our way, so it's sufficient to toggle
1148 		 * IFCAP_TSOx only.
1149 		 */
1150 		if (mask & IFCAP_TSO4) {
1151 			if (!(IFCAP_TSO4 & ifp->if_capenable) &&
1152 			    !(IFCAP_TXCSUM & ifp->if_capenable)) {
1153 				if_printf(ifp, "enable txcsum first.\n");
1154 				rc = EAGAIN;
1155 				goto fail;
1156 			}
1157 			ifp->if_capenable ^= IFCAP_TSO4;
1158 		}
1159 		if (mask & IFCAP_TSO6) {
1160 			if (!(IFCAP_TSO6 & ifp->if_capenable) &&
1161 			    !(IFCAP_TXCSUM_IPV6 & ifp->if_capenable)) {
1162 				if_printf(ifp, "enable txcsum6 first.\n");
1163 				rc = EAGAIN;
1164 				goto fail;
1165 			}
1166 			ifp->if_capenable ^= IFCAP_TSO6;
1167 		}
1168 		if (mask & IFCAP_LRO) {
1169 #if defined(INET) || defined(INET6)
1170 			int i;
1171 			struct sge_rxq *rxq;
1172 
1173 			ifp->if_capenable ^= IFCAP_LRO;
1174 			for_each_rxq(pi, i, rxq) {
1175 				if (ifp->if_capenable & IFCAP_LRO)
1176 					rxq->iq.flags |= IQ_LRO_ENABLED;
1177 				else
1178 					rxq->iq.flags &= ~IQ_LRO_ENABLED;
1179 			}
1180 #endif
1181 		}
1182 #ifdef TCP_OFFLOAD
1183 		if (mask & IFCAP_TOE) {
1184 			int enable = (ifp->if_capenable ^ mask) & IFCAP_TOE;
1185 
1186 			rc = toe_capability(pi, enable);
1187 			if (rc != 0)
1188 				goto fail;
1189 
1190 			ifp->if_capenable ^= mask;
1191 		}
1192 #endif
1193 		if (mask & IFCAP_VLAN_HWTAGGING) {
1194 			ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING;
1195 			if (ifp->if_drv_flags & IFF_DRV_RUNNING)
1196 				rc = update_mac_settings(pi, XGMAC_VLANEX);
1197 		}
1198 		if (mask & IFCAP_VLAN_MTU) {
1199 			ifp->if_capenable ^= IFCAP_VLAN_MTU;
1200 
1201 			/* Need to find out how to disable auto-mtu-inflation */
1202 		}
1203 		if (mask & IFCAP_VLAN_HWTSO)
1204 			ifp->if_capenable ^= IFCAP_VLAN_HWTSO;
1205 		if (mask & IFCAP_VLAN_HWCSUM)
1206 			ifp->if_capenable ^= IFCAP_VLAN_HWCSUM;
1207 
1208 #ifdef VLAN_CAPABILITIES
1209 		VLAN_CAPABILITIES(ifp);
1210 #endif
1211 fail:
1212 		end_synchronized_op(sc, 0);
1213 		break;
1214 
1215 	case SIOCSIFMEDIA:
1216 	case SIOCGIFMEDIA:
1217 		ifmedia_ioctl(ifp, ifr, &pi->media, cmd);
1218 		break;
1219 
1220 	default:
1221 		rc = ether_ioctl(ifp, cmd, data);
1222 	}
1223 
1224 	return (rc);
1225 }
1226 
1227 static int
1228 cxgbe_transmit(struct ifnet *ifp, struct mbuf *m)
1229 {
1230 	struct port_info *pi = ifp->if_softc;
1231 	struct adapter *sc = pi->adapter;
1232 	struct sge_txq *txq = &sc->sge.txq[pi->first_txq];
1233 	struct buf_ring *br;
1234 	int rc;
1235 
1236 	M_ASSERTPKTHDR(m);
1237 
1238 	if (__predict_false(pi->link_cfg.link_ok == 0)) {
1239 		m_freem(m);
1240 		return (ENETDOWN);
1241 	}
1242 
1243 	if (m->m_flags & M_FLOWID)
1244 		txq += (m->m_pkthdr.flowid % pi->ntxq);
1245 	br = txq->br;
1246 
1247 	if (TXQ_TRYLOCK(txq) == 0) {
1248 		struct sge_eq *eq = &txq->eq;
1249 
1250 		/*
1251 		 * It is possible that t4_eth_tx finishes up and releases the
1252 		 * lock between the TRYLOCK above and the drbr_enqueue here.  We
1253 		 * need to make sure that this mbuf doesn't just sit there in
1254 		 * the drbr.
1255 		 */
1256 
1257 		rc = drbr_enqueue(ifp, br, m);
1258 		if (rc == 0 && callout_pending(&eq->tx_callout) == 0 &&
1259 		    !(eq->flags & EQ_DOOMED))
1260 			callout_reset(&eq->tx_callout, 1, t4_tx_callout, eq);
1261 		return (rc);
1262 	}
1263 
1264 	/*
1265 	 * txq->m is the mbuf that is held up due to a temporary shortage of
1266 	 * resources and it should be put on the wire first.  Then what's in
1267 	 * drbr and finally the mbuf that was just passed in to us.
1268 	 *
1269 	 * Return code should indicate the fate of the mbuf that was passed in
1270 	 * this time.
1271 	 */
1272 
1273 	TXQ_LOCK_ASSERT_OWNED(txq);
1274 	if (drbr_needs_enqueue(ifp, br) || txq->m) {
1275 
1276 		/* Queued for transmission. */
1277 
1278 		rc = drbr_enqueue(ifp, br, m);
1279 		m = txq->m ? txq->m : drbr_dequeue(ifp, br);
1280 		(void) t4_eth_tx(ifp, txq, m);
1281 		TXQ_UNLOCK(txq);
1282 		return (rc);
1283 	}
1284 
1285 	/* Direct transmission. */
1286 	rc = t4_eth_tx(ifp, txq, m);
1287 	if (rc != 0 && txq->m)
1288 		rc = 0;	/* held, will be transmitted soon (hopefully) */
1289 
1290 	TXQ_UNLOCK(txq);
1291 	return (rc);
1292 }
1293 
1294 static void
1295 cxgbe_qflush(struct ifnet *ifp)
1296 {
1297 	struct port_info *pi = ifp->if_softc;
1298 	struct sge_txq *txq;
1299 	int i;
1300 	struct mbuf *m;
1301 
1302 	/* queues do not exist if !PORT_INIT_DONE. */
1303 	if (pi->flags & PORT_INIT_DONE) {
1304 		for_each_txq(pi, i, txq) {
1305 			TXQ_LOCK(txq);
1306 			m_freem(txq->m);
1307 			txq->m = NULL;
1308 			while ((m = buf_ring_dequeue_sc(txq->br)) != NULL)
1309 				m_freem(m);
1310 			TXQ_UNLOCK(txq);
1311 		}
1312 	}
1313 	if_qflush(ifp);
1314 }
1315 
1316 static int
1317 cxgbe_media_change(struct ifnet *ifp)
1318 {
1319 	struct port_info *pi = ifp->if_softc;
1320 
1321 	device_printf(pi->dev, "%s unimplemented.\n", __func__);
1322 
1323 	return (EOPNOTSUPP);
1324 }
1325 
1326 static void
1327 cxgbe_media_status(struct ifnet *ifp, struct ifmediareq *ifmr)
1328 {
1329 	struct port_info *pi = ifp->if_softc;
1330 	struct ifmedia_entry *cur = pi->media.ifm_cur;
1331 	int speed = pi->link_cfg.speed;
1332 	int data = (pi->port_type << 8) | pi->mod_type;
1333 
1334 	if (cur->ifm_data != data) {
1335 		build_medialist(pi);
1336 		cur = pi->media.ifm_cur;
1337 	}
1338 
1339 	ifmr->ifm_status = IFM_AVALID;
1340 	if (!pi->link_cfg.link_ok)
1341 		return;
1342 
1343 	ifmr->ifm_status |= IFM_ACTIVE;
1344 
1345 	/* active and current will differ iff current media is autoselect. */
1346 	if (IFM_SUBTYPE(cur->ifm_media) != IFM_AUTO)
1347 		return;
1348 
1349 	ifmr->ifm_active = IFM_ETHER | IFM_FDX;
1350 	if (speed == SPEED_10000)
1351 		ifmr->ifm_active |= IFM_10G_T;
1352 	else if (speed == SPEED_1000)
1353 		ifmr->ifm_active |= IFM_1000_T;
1354 	else if (speed == SPEED_100)
1355 		ifmr->ifm_active |= IFM_100_TX;
1356 	else if (speed == SPEED_10)
1357 		ifmr->ifm_active |= IFM_10_T;
1358 	else
1359 		KASSERT(0, ("%s: link up but speed unknown (%u)", __func__,
1360 			    speed));
1361 }
1362 
1363 void
1364 t4_fatal_err(struct adapter *sc)
1365 {
1366 	t4_set_reg_field(sc, A_SGE_CONTROL, F_GLOBALENABLE, 0);
1367 	t4_intr_disable(sc);
1368 	log(LOG_EMERG, "%s: encountered fatal error, adapter stopped.\n",
1369 	    device_get_nameunit(sc->dev));
1370 }
1371 
1372 static int
1373 map_bars_0_and_4(struct adapter *sc)
1374 {
1375 	sc->regs_rid = PCIR_BAR(0);
1376 	sc->regs_res = bus_alloc_resource_any(sc->dev, SYS_RES_MEMORY,
1377 	    &sc->regs_rid, RF_ACTIVE);
1378 	if (sc->regs_res == NULL) {
1379 		device_printf(sc->dev, "cannot map registers.\n");
1380 		return (ENXIO);
1381 	}
1382 	sc->bt = rman_get_bustag(sc->regs_res);
1383 	sc->bh = rman_get_bushandle(sc->regs_res);
1384 	sc->mmio_len = rman_get_size(sc->regs_res);
1385 	setbit(&sc->doorbells, DOORBELL_KDB);
1386 
1387 	sc->msix_rid = PCIR_BAR(4);
1388 	sc->msix_res = bus_alloc_resource_any(sc->dev, SYS_RES_MEMORY,
1389 	    &sc->msix_rid, RF_ACTIVE);
1390 	if (sc->msix_res == NULL) {
1391 		device_printf(sc->dev, "cannot map MSI-X BAR.\n");
1392 		return (ENXIO);
1393 	}
1394 
1395 	return (0);
1396 }
1397 
1398 static int
1399 map_bar_2(struct adapter *sc)
1400 {
1401 
1402 	/*
1403 	 * T4: only iWARP driver uses the userspace doorbells.  There is no need
1404 	 * to map it if RDMA is disabled.
1405 	 */
1406 	if (is_t4(sc) && sc->rdmacaps == 0)
1407 		return (0);
1408 
1409 	sc->udbs_rid = PCIR_BAR(2);
1410 	sc->udbs_res = bus_alloc_resource_any(sc->dev, SYS_RES_MEMORY,
1411 	    &sc->udbs_rid, RF_ACTIVE);
1412 	if (sc->udbs_res == NULL) {
1413 		device_printf(sc->dev, "cannot map doorbell BAR.\n");
1414 		return (ENXIO);
1415 	}
1416 	sc->udbs_base = rman_get_virtual(sc->udbs_res);
1417 
1418 	if (is_t5(sc)) {
1419 		setbit(&sc->doorbells, DOORBELL_UDB);
1420 #if defined(__i386__) || defined(__amd64__)
1421 		if (t5_write_combine) {
1422 			int rc;
1423 
1424 			/*
1425 			 * Enable write combining on BAR2.  This is the
1426 			 * userspace doorbell BAR and is split into 128B
1427 			 * (UDBS_SEG_SIZE) doorbell regions, each associated
1428 			 * with an egress queue.  The first 64B has the doorbell
1429 			 * and the second 64B can be used to submit a tx work
1430 			 * request with an implicit doorbell.
1431 			 */
1432 
1433 			rc = pmap_change_attr((vm_offset_t)sc->udbs_base,
1434 			    rman_get_size(sc->udbs_res), PAT_WRITE_COMBINING);
1435 			if (rc == 0) {
1436 				clrbit(&sc->doorbells, DOORBELL_UDB);
1437 				setbit(&sc->doorbells, DOORBELL_WCWR);
1438 				setbit(&sc->doorbells, DOORBELL_UDBWC);
1439 			} else {
1440 				device_printf(sc->dev,
1441 				    "couldn't enable write combining: %d\n",
1442 				    rc);
1443 			}
1444 
1445 			t4_write_reg(sc, A_SGE_STAT_CFG,
1446 			    V_STATSOURCE_T5(7) | V_STATMODE(0));
1447 		}
1448 #endif
1449 	}
1450 
1451 	return (0);
1452 }
1453 
1454 static const struct memwin t4_memwin[] = {
1455 	{ MEMWIN0_BASE, MEMWIN0_APERTURE },
1456 	{ MEMWIN1_BASE, MEMWIN1_APERTURE },
1457 	{ MEMWIN2_BASE_T4, MEMWIN2_APERTURE_T4 }
1458 };
1459 
1460 static const struct memwin t5_memwin[] = {
1461 	{ MEMWIN0_BASE, MEMWIN0_APERTURE },
1462 	{ MEMWIN1_BASE, MEMWIN1_APERTURE },
1463 	{ MEMWIN2_BASE_T5, MEMWIN2_APERTURE_T5 },
1464 };
1465 
1466 static void
1467 setup_memwin(struct adapter *sc)
1468 {
1469 	const struct memwin *mw;
1470 	int i, n;
1471 	uint32_t bar0;
1472 
1473 	if (is_t4(sc)) {
1474 		/*
1475 		 * Read low 32b of bar0 indirectly via the hardware backdoor
1476 		 * mechanism.  Works from within PCI passthrough environments
1477 		 * too, where rman_get_start() can return a different value.  We
1478 		 * need to program the T4 memory window decoders with the actual
1479 		 * addresses that will be coming across the PCIe link.
1480 		 */
1481 		bar0 = t4_hw_pci_read_cfg4(sc, PCIR_BAR(0));
1482 		bar0 &= (uint32_t) PCIM_BAR_MEM_BASE;
1483 
1484 		mw = &t4_memwin[0];
1485 		n = nitems(t4_memwin);
1486 	} else {
1487 		/* T5 uses the relative offset inside the PCIe BAR */
1488 		bar0 = 0;
1489 
1490 		mw = &t5_memwin[0];
1491 		n = nitems(t5_memwin);
1492 	}
1493 
1494 	for (i = 0; i < n; i++, mw++) {
1495 		t4_write_reg(sc,
1496 		    PCIE_MEM_ACCESS_REG(A_PCIE_MEM_ACCESS_BASE_WIN, i),
1497 		    (mw->base + bar0) | V_BIR(0) |
1498 		    V_WINDOW(ilog2(mw->aperture) - 10));
1499 	}
1500 
1501 	/* flush */
1502 	t4_read_reg(sc, PCIE_MEM_ACCESS_REG(A_PCIE_MEM_ACCESS_BASE_WIN, 2));
1503 }
1504 
1505 /*
1506  * Verify that the memory range specified by the addr/len pair is valid and lies
1507  * entirely within a single region (EDCx or MCx).
1508  */
1509 static int
1510 validate_mem_range(struct adapter *sc, uint32_t addr, int len)
1511 {
1512 	uint32_t em, addr_len, maddr, mlen;
1513 
1514 	/* Memory can only be accessed in naturally aligned 4 byte units */
1515 	if (addr & 3 || len & 3 || len == 0)
1516 		return (EINVAL);
1517 
1518 	/* Enabled memories */
1519 	em = t4_read_reg(sc, A_MA_TARGET_MEM_ENABLE);
1520 	if (em & F_EDRAM0_ENABLE) {
1521 		addr_len = t4_read_reg(sc, A_MA_EDRAM0_BAR);
1522 		maddr = G_EDRAM0_BASE(addr_len) << 20;
1523 		mlen = G_EDRAM0_SIZE(addr_len) << 20;
1524 		if (mlen > 0 && addr >= maddr && addr < maddr + mlen &&
1525 		    addr + len <= maddr + mlen)
1526 			return (0);
1527 	}
1528 	if (em & F_EDRAM1_ENABLE) {
1529 		addr_len = t4_read_reg(sc, A_MA_EDRAM1_BAR);
1530 		maddr = G_EDRAM1_BASE(addr_len) << 20;
1531 		mlen = G_EDRAM1_SIZE(addr_len) << 20;
1532 		if (mlen > 0 && addr >= maddr && addr < maddr + mlen &&
1533 		    addr + len <= maddr + mlen)
1534 			return (0);
1535 	}
1536 	if (em & F_EXT_MEM_ENABLE) {
1537 		addr_len = t4_read_reg(sc, A_MA_EXT_MEMORY_BAR);
1538 		maddr = G_EXT_MEM_BASE(addr_len) << 20;
1539 		mlen = G_EXT_MEM_SIZE(addr_len) << 20;
1540 		if (mlen > 0 && addr >= maddr && addr < maddr + mlen &&
1541 		    addr + len <= maddr + mlen)
1542 			return (0);
1543 	}
1544 	if (!is_t4(sc) && em & F_EXT_MEM1_ENABLE) {
1545 		addr_len = t4_read_reg(sc, A_MA_EXT_MEMORY1_BAR);
1546 		maddr = G_EXT_MEM1_BASE(addr_len) << 20;
1547 		mlen = G_EXT_MEM1_SIZE(addr_len) << 20;
1548 		if (mlen > 0 && addr >= maddr && addr < maddr + mlen &&
1549 		    addr + len <= maddr + mlen)
1550 			return (0);
1551 	}
1552 
1553 	return (EFAULT);
1554 }
1555 
1556 /*
1557  * Verify that the memory range specified by the memtype/offset/len pair is
1558  * valid and lies entirely within the memtype specified.  The global address of
1559  * the start of the range is returned in addr.
1560  */
1561 static int
1562 validate_mt_off_len(struct adapter *sc, int mtype, uint32_t off, int len,
1563     uint32_t *addr)
1564 {
1565 	uint32_t em, addr_len, maddr, mlen;
1566 
1567 	/* Memory can only be accessed in naturally aligned 4 byte units */
1568 	if (off & 3 || len & 3 || len == 0)
1569 		return (EINVAL);
1570 
1571 	em = t4_read_reg(sc, A_MA_TARGET_MEM_ENABLE);
1572 	switch (mtype) {
1573 	case MEM_EDC0:
1574 		if (!(em & F_EDRAM0_ENABLE))
1575 			return (EINVAL);
1576 		addr_len = t4_read_reg(sc, A_MA_EDRAM0_BAR);
1577 		maddr = G_EDRAM0_BASE(addr_len) << 20;
1578 		mlen = G_EDRAM0_SIZE(addr_len) << 20;
1579 		break;
1580 	case MEM_EDC1:
1581 		if (!(em & F_EDRAM1_ENABLE))
1582 			return (EINVAL);
1583 		addr_len = t4_read_reg(sc, A_MA_EDRAM1_BAR);
1584 		maddr = G_EDRAM1_BASE(addr_len) << 20;
1585 		mlen = G_EDRAM1_SIZE(addr_len) << 20;
1586 		break;
1587 	case MEM_MC:
1588 		if (!(em & F_EXT_MEM_ENABLE))
1589 			return (EINVAL);
1590 		addr_len = t4_read_reg(sc, A_MA_EXT_MEMORY_BAR);
1591 		maddr = G_EXT_MEM_BASE(addr_len) << 20;
1592 		mlen = G_EXT_MEM_SIZE(addr_len) << 20;
1593 		break;
1594 	case MEM_MC1:
1595 		if (is_t4(sc) || !(em & F_EXT_MEM1_ENABLE))
1596 			return (EINVAL);
1597 		addr_len = t4_read_reg(sc, A_MA_EXT_MEMORY1_BAR);
1598 		maddr = G_EXT_MEM1_BASE(addr_len) << 20;
1599 		mlen = G_EXT_MEM1_SIZE(addr_len) << 20;
1600 		break;
1601 	default:
1602 		return (EINVAL);
1603 	}
1604 
1605 	if (mlen > 0 && off < mlen && off + len <= mlen) {
1606 		*addr = maddr + off;	/* global address */
1607 		return (0);
1608 	}
1609 
1610 	return (EFAULT);
1611 }
1612 
1613 static void
1614 memwin_info(struct adapter *sc, int win, uint32_t *base, uint32_t *aperture)
1615 {
1616 	const struct memwin *mw;
1617 
1618 	if (is_t4(sc)) {
1619 		KASSERT(win >= 0 && win < nitems(t4_memwin),
1620 		    ("%s: incorrect memwin# (%d)", __func__, win));
1621 		mw = &t4_memwin[win];
1622 	} else {
1623 		KASSERT(win >= 0 && win < nitems(t5_memwin),
1624 		    ("%s: incorrect memwin# (%d)", __func__, win));
1625 		mw = &t5_memwin[win];
1626 	}
1627 
1628 	if (base != NULL)
1629 		*base = mw->base;
1630 	if (aperture != NULL)
1631 		*aperture = mw->aperture;
1632 }
1633 
1634 /*
1635  * Positions the memory window such that it can be used to access the specified
1636  * address in the chip's address space.  The return value is the offset of addr
1637  * from the start of the window.
1638  */
1639 static uint32_t
1640 position_memwin(struct adapter *sc, int n, uint32_t addr)
1641 {
1642 	uint32_t start, pf;
1643 	uint32_t reg;
1644 
1645 	KASSERT(n >= 0 && n <= 3,
1646 	    ("%s: invalid window %d.", __func__, n));
1647 	KASSERT((addr & 3) == 0,
1648 	    ("%s: addr (0x%x) is not at a 4B boundary.", __func__, addr));
1649 
1650 	if (is_t4(sc)) {
1651 		pf = 0;
1652 		start = addr & ~0xf;	/* start must be 16B aligned */
1653 	} else {
1654 		pf = V_PFNUM(sc->pf);
1655 		start = addr & ~0x7f;	/* start must be 128B aligned */
1656 	}
1657 	reg = PCIE_MEM_ACCESS_REG(A_PCIE_MEM_ACCESS_OFFSET, n);
1658 
1659 	t4_write_reg(sc, reg, start | pf);
1660 	t4_read_reg(sc, reg);
1661 
1662 	return (addr - start);
1663 }
1664 
1665 static int
1666 cfg_itype_and_nqueues(struct adapter *sc, int n10g, int n1g,
1667     struct intrs_and_queues *iaq)
1668 {
1669 	int rc, itype, navail, nrxq10g, nrxq1g, n;
1670 	int nofldrxq10g = 0, nofldrxq1g = 0;
1671 
1672 	bzero(iaq, sizeof(*iaq));
1673 
1674 	iaq->ntxq10g = t4_ntxq10g;
1675 	iaq->ntxq1g = t4_ntxq1g;
1676 	iaq->nrxq10g = nrxq10g = t4_nrxq10g;
1677 	iaq->nrxq1g = nrxq1g = t4_nrxq1g;
1678 #ifdef TCP_OFFLOAD
1679 	if (is_offload(sc)) {
1680 		iaq->nofldtxq10g = t4_nofldtxq10g;
1681 		iaq->nofldtxq1g = t4_nofldtxq1g;
1682 		iaq->nofldrxq10g = nofldrxq10g = t4_nofldrxq10g;
1683 		iaq->nofldrxq1g = nofldrxq1g = t4_nofldrxq1g;
1684 	}
1685 #endif
1686 
1687 	for (itype = INTR_MSIX; itype; itype >>= 1) {
1688 
1689 		if ((itype & t4_intr_types) == 0)
1690 			continue;	/* not allowed */
1691 
1692 		if (itype == INTR_MSIX)
1693 			navail = pci_msix_count(sc->dev);
1694 		else if (itype == INTR_MSI)
1695 			navail = pci_msi_count(sc->dev);
1696 		else
1697 			navail = 1;
1698 restart:
1699 		if (navail == 0)
1700 			continue;
1701 
1702 		iaq->intr_type = itype;
1703 		iaq->intr_flags = 0;
1704 
1705 		/*
1706 		 * Best option: an interrupt vector for errors, one for the
1707 		 * firmware event queue, and one each for each rxq (NIC as well
1708 		 * as offload).
1709 		 */
1710 		iaq->nirq = T4_EXTRA_INTR;
1711 		iaq->nirq += n10g * (nrxq10g + nofldrxq10g);
1712 		iaq->nirq += n1g * (nrxq1g + nofldrxq1g);
1713 		if (iaq->nirq <= navail &&
1714 		    (itype != INTR_MSI || powerof2(iaq->nirq))) {
1715 			iaq->intr_flags |= INTR_DIRECT;
1716 			goto allocate;
1717 		}
1718 
1719 		/*
1720 		 * Second best option: an interrupt vector for errors, one for
1721 		 * the firmware event queue, and one each for either NIC or
1722 		 * offload rxq's.
1723 		 */
1724 		iaq->nirq = T4_EXTRA_INTR;
1725 		iaq->nirq += n10g * max(nrxq10g, nofldrxq10g);
1726 		iaq->nirq += n1g * max(nrxq1g, nofldrxq1g);
1727 		if (iaq->nirq <= navail &&
1728 		    (itype != INTR_MSI || powerof2(iaq->nirq)))
1729 			goto allocate;
1730 
1731 		/*
1732 		 * Next best option: an interrupt vector for errors, one for the
1733 		 * firmware event queue, and at least one per port.  At this
1734 		 * point we know we'll have to downsize nrxq or nofldrxq to fit
1735 		 * what's available to us.
1736 		 */
1737 		iaq->nirq = T4_EXTRA_INTR;
1738 		iaq->nirq += n10g + n1g;
1739 		if (iaq->nirq <= navail) {
1740 			int leftover = navail - iaq->nirq;
1741 
1742 			if (n10g > 0) {
1743 				int target = max(nrxq10g, nofldrxq10g);
1744 
1745 				n = 1;
1746 				while (n < target && leftover >= n10g) {
1747 					leftover -= n10g;
1748 					iaq->nirq += n10g;
1749 					n++;
1750 				}
1751 				iaq->nrxq10g = min(n, nrxq10g);
1752 #ifdef TCP_OFFLOAD
1753 				if (is_offload(sc))
1754 					iaq->nofldrxq10g = min(n, nofldrxq10g);
1755 #endif
1756 			}
1757 
1758 			if (n1g > 0) {
1759 				int target = max(nrxq1g, nofldrxq1g);
1760 
1761 				n = 1;
1762 				while (n < target && leftover >= n1g) {
1763 					leftover -= n1g;
1764 					iaq->nirq += n1g;
1765 					n++;
1766 				}
1767 				iaq->nrxq1g = min(n, nrxq1g);
1768 #ifdef TCP_OFFLOAD
1769 				if (is_offload(sc))
1770 					iaq->nofldrxq1g = min(n, nofldrxq1g);
1771 #endif
1772 			}
1773 
1774 			if (itype != INTR_MSI || powerof2(iaq->nirq))
1775 				goto allocate;
1776 		}
1777 
1778 		/*
1779 		 * Least desirable option: one interrupt vector for everything.
1780 		 */
1781 		iaq->nirq = iaq->nrxq10g = iaq->nrxq1g = 1;
1782 #ifdef TCP_OFFLOAD
1783 		if (is_offload(sc))
1784 			iaq->nofldrxq10g = iaq->nofldrxq1g = 1;
1785 #endif
1786 
1787 allocate:
1788 		navail = iaq->nirq;
1789 		rc = 0;
1790 		if (itype == INTR_MSIX)
1791 			rc = pci_alloc_msix(sc->dev, &navail);
1792 		else if (itype == INTR_MSI)
1793 			rc = pci_alloc_msi(sc->dev, &navail);
1794 
1795 		if (rc == 0) {
1796 			if (navail == iaq->nirq)
1797 				return (0);
1798 
1799 			/*
1800 			 * Didn't get the number requested.  Use whatever number
1801 			 * the kernel is willing to allocate (it's in navail).
1802 			 */
1803 			device_printf(sc->dev, "fewer vectors than requested, "
1804 			    "type=%d, req=%d, rcvd=%d; will downshift req.\n",
1805 			    itype, iaq->nirq, navail);
1806 			pci_release_msi(sc->dev);
1807 			goto restart;
1808 		}
1809 
1810 		device_printf(sc->dev,
1811 		    "failed to allocate vectors:%d, type=%d, req=%d, rcvd=%d\n",
1812 		    itype, rc, iaq->nirq, navail);
1813 	}
1814 
1815 	device_printf(sc->dev,
1816 	    "failed to find a usable interrupt type.  "
1817 	    "allowed=%d, msi-x=%d, msi=%d, intx=1", t4_intr_types,
1818 	    pci_msix_count(sc->dev), pci_msi_count(sc->dev));
1819 
1820 	return (ENXIO);
1821 }
1822 
1823 #define FW_VERSION(chip) ( \
1824     V_FW_HDR_FW_VER_MAJOR(FW_VERSION_MAJOR_##chip) | \
1825     V_FW_HDR_FW_VER_MINOR(FW_VERSION_MINOR_##chip) | \
1826     V_FW_HDR_FW_VER_MICRO(FW_VERSION_MICRO_##chip) | \
1827     V_FW_HDR_FW_VER_BUILD(FW_VERSION_BUILD_##chip))
1828 #define FW_INTFVER(chip, intf) (FW_HDR_INTFVER_##intf)
1829 
1830 struct fw_info {
1831 	uint8_t chip;
1832 	char *kld_name;
1833 	char *fw_mod_name;
1834 	struct fw_hdr fw_hdr;	/* XXX: waste of space, need a sparse struct */
1835 } fw_info[] = {
1836 	{
1837 		.chip = CHELSIO_T4,
1838 		.kld_name = "t4fw_cfg",
1839 		.fw_mod_name = "t4fw",
1840 		.fw_hdr = {
1841 			.chip = FW_HDR_CHIP_T4,
1842 			.fw_ver = htobe32_const(FW_VERSION(T4)),
1843 			.intfver_nic = FW_INTFVER(T4, NIC),
1844 			.intfver_vnic = FW_INTFVER(T4, VNIC),
1845 			.intfver_ofld = FW_INTFVER(T4, OFLD),
1846 			.intfver_ri = FW_INTFVER(T4, RI),
1847 			.intfver_iscsipdu = FW_INTFVER(T4, ISCSIPDU),
1848 			.intfver_iscsi = FW_INTFVER(T4, ISCSI),
1849 			.intfver_fcoepdu = FW_INTFVER(T4, FCOEPDU),
1850 			.intfver_fcoe = FW_INTFVER(T4, FCOE),
1851 		},
1852 	}, {
1853 		.chip = CHELSIO_T5,
1854 		.kld_name = "t5fw_cfg",
1855 		.fw_mod_name = "t5fw",
1856 		.fw_hdr = {
1857 			.chip = FW_HDR_CHIP_T5,
1858 			.fw_ver = htobe32_const(FW_VERSION(T5)),
1859 			.intfver_nic = FW_INTFVER(T5, NIC),
1860 			.intfver_vnic = FW_INTFVER(T5, VNIC),
1861 			.intfver_ofld = FW_INTFVER(T5, OFLD),
1862 			.intfver_ri = FW_INTFVER(T5, RI),
1863 			.intfver_iscsipdu = FW_INTFVER(T5, ISCSIPDU),
1864 			.intfver_iscsi = FW_INTFVER(T5, ISCSI),
1865 			.intfver_fcoepdu = FW_INTFVER(T5, FCOEPDU),
1866 			.intfver_fcoe = FW_INTFVER(T5, FCOE),
1867 		},
1868 	}
1869 };
1870 
1871 static struct fw_info *
1872 find_fw_info(int chip)
1873 {
1874 	int i;
1875 
1876 	for (i = 0; i < nitems(fw_info); i++) {
1877 		if (fw_info[i].chip == chip)
1878 			return (&fw_info[i]);
1879 	}
1880 	return (NULL);
1881 }
1882 
1883 /*
1884  * Is the given firmware API compatible with the one the driver was compiled
1885  * with?
1886  */
1887 static int
1888 fw_compatible(const struct fw_hdr *hdr1, const struct fw_hdr *hdr2)
1889 {
1890 
1891 	/* short circuit if it's the exact same firmware version */
1892 	if (hdr1->chip == hdr2->chip && hdr1->fw_ver == hdr2->fw_ver)
1893 		return (1);
1894 
1895 	/*
1896 	 * XXX: Is this too conservative?  Perhaps I should limit this to the
1897 	 * features that are supported in the driver.
1898 	 */
1899 #define SAME_INTF(x) (hdr1->intfver_##x == hdr2->intfver_##x)
1900 	if (hdr1->chip == hdr2->chip && SAME_INTF(nic) && SAME_INTF(vnic) &&
1901 	    SAME_INTF(ofld) && SAME_INTF(ri) && SAME_INTF(iscsipdu) &&
1902 	    SAME_INTF(iscsi) && SAME_INTF(fcoepdu) && SAME_INTF(fcoe))
1903 		return (1);
1904 #undef SAME_INTF
1905 
1906 	return (0);
1907 }
1908 
1909 /*
1910  * The firmware in the KLD is usable and can be installed.  But should it be?
1911  * This routine explains itself in detail if it indicates the KLD firmware
1912  * should be installed.
1913  */
1914 static int
1915 should_install_kld_fw(struct adapter *sc, int card_fw_usable, int k, int c)
1916 {
1917 	const char *reason;
1918 
1919 	KASSERT(t4_fw_install != 0, ("%s: Can't install; shouldn't be asked "
1920 	    "to evaluate if install is a good idea.", __func__));
1921 
1922 	if (!card_fw_usable) {
1923 		reason = "incompatible or unusable";
1924 		goto install;
1925 	}
1926 
1927 	if (k > c) {
1928 		reason = "older than the version bundled with this driver";
1929 		goto install;
1930 	}
1931 
1932 	if (t4_fw_install == 2 && k != c) {
1933 		reason = "different than the version bundled with this driver";
1934 		goto install;
1935 	}
1936 
1937 	return (0);
1938 
1939 install:
1940 	device_printf(sc->dev, "firmware on card (%u.%u.%u.%u) is %s, "
1941 	    "installing firmware %u.%u.%u.%u on card.\n",
1942 	    G_FW_HDR_FW_VER_MAJOR(c), G_FW_HDR_FW_VER_MINOR(c),
1943 	    G_FW_HDR_FW_VER_MICRO(c), G_FW_HDR_FW_VER_BUILD(c), reason,
1944 	    G_FW_HDR_FW_VER_MAJOR(k), G_FW_HDR_FW_VER_MINOR(k),
1945 	    G_FW_HDR_FW_VER_MICRO(k), G_FW_HDR_FW_VER_BUILD(k));
1946 
1947 	return (1);
1948 }
1949 /*
1950  * Establish contact with the firmware and determine if we are the master driver
1951  * or not, and whether we are responsible for chip initialization.
1952  */
1953 static int
1954 prep_firmware(struct adapter *sc)
1955 {
1956 	const struct firmware *fw = NULL, *default_cfg;
1957 	int rc, pf, card_fw_usable, kld_fw_usable, need_fw_reset = 1;
1958 	enum dev_state state;
1959 	struct fw_info *fw_info;
1960 	struct fw_hdr *card_fw;		/* fw on the card */
1961 	const struct fw_hdr *kld_fw;	/* fw in the KLD */
1962 	const struct fw_hdr *drv_fw;	/* fw header the driver was compiled
1963 					   against */
1964 
1965 	/* Contact firmware. */
1966 	rc = t4_fw_hello(sc, sc->mbox, sc->mbox, MASTER_MAY, &state);
1967 	if (rc < 0 || state == DEV_STATE_ERR) {
1968 		rc = -rc;
1969 		device_printf(sc->dev,
1970 		    "failed to connect to the firmware: %d, %d.\n", rc, state);
1971 		return (rc);
1972 	}
1973 	pf = rc;
1974 	if (pf == sc->mbox)
1975 		sc->flags |= MASTER_PF;
1976 	else if (state == DEV_STATE_UNINIT) {
1977 		/*
1978 		 * We didn't get to be the master so we definitely won't be
1979 		 * configuring the chip.  It's a bug if someone else hasn't
1980 		 * configured it already.
1981 		 */
1982 		device_printf(sc->dev, "couldn't be master(%d), "
1983 		    "device not already initialized either(%d).\n", rc, state);
1984 		return (EDOOFUS);
1985 	}
1986 
1987 	/* This is the firmware whose headers the driver was compiled against */
1988 	fw_info = find_fw_info(chip_id(sc));
1989 	if (fw_info == NULL) {
1990 		device_printf(sc->dev,
1991 		    "unable to look up firmware information for chip %d.\n",
1992 		    chip_id(sc));
1993 		return (EINVAL);
1994 	}
1995 	drv_fw = &fw_info->fw_hdr;
1996 
1997 	/*
1998 	 * The firmware KLD contains many modules.  The KLD name is also the
1999 	 * name of the module that contains the default config file.
2000 	 */
2001 	default_cfg = firmware_get(fw_info->kld_name);
2002 
2003 	/* Read the header of the firmware on the card */
2004 	card_fw = malloc(sizeof(*card_fw), M_CXGBE, M_ZERO | M_WAITOK);
2005 	rc = -t4_read_flash(sc, FLASH_FW_START,
2006 	    sizeof (*card_fw) / sizeof (uint32_t), (uint32_t *)card_fw, 1);
2007 	if (rc == 0)
2008 		card_fw_usable = fw_compatible(drv_fw, (const void*)card_fw);
2009 	else {
2010 		device_printf(sc->dev,
2011 		    "Unable to read card's firmware header: %d\n", rc);
2012 		card_fw_usable = 0;
2013 	}
2014 
2015 	/* This is the firmware in the KLD */
2016 	fw = firmware_get(fw_info->fw_mod_name);
2017 	if (fw != NULL) {
2018 		kld_fw = (const void *)fw->data;
2019 		kld_fw_usable = fw_compatible(drv_fw, kld_fw);
2020 	} else {
2021 		kld_fw = NULL;
2022 		kld_fw_usable = 0;
2023 	}
2024 
2025 	if (card_fw_usable && card_fw->fw_ver == drv_fw->fw_ver &&
2026 	    (!kld_fw_usable || kld_fw->fw_ver == drv_fw->fw_ver ||
2027 	    t4_fw_install == 0)) {
2028 		/*
2029 		 * Common case: the firmware on the card is an exact match and
2030 		 * the KLD is an exact match too, or the KLD is
2031 		 * absent/incompatible, or we're prohibited from using it.  Note
2032 		 * that t4_fw_install = 2 is ignored here -- use cxgbetool
2033 		 * loadfw if you want to reinstall the same firmware as the one
2034 		 * on the card.
2035 		 */
2036 	} else if (kld_fw_usable && state == DEV_STATE_UNINIT &&
2037 	    should_install_kld_fw(sc, card_fw_usable, be32toh(kld_fw->fw_ver),
2038 	    be32toh(card_fw->fw_ver))) {
2039 
2040 		rc = -t4_load_fw(sc, fw->data, fw->datasize);
2041 		if (rc != 0) {
2042 			device_printf(sc->dev,
2043 			    "failed to install firmware: %d\n", rc);
2044 			goto done;
2045 		}
2046 
2047 		/* Installed successfully, update the cached header too. */
2048 		memcpy(card_fw, kld_fw, sizeof(*card_fw));
2049 		card_fw_usable = 1;
2050 		need_fw_reset = 0;	/* already reset as part of load_fw */
2051 	}
2052 
2053 	if (!card_fw_usable) {
2054 		uint32_t d, c, k;
2055 
2056 		d = ntohl(drv_fw->fw_ver);
2057 		c = ntohl(card_fw->fw_ver);
2058 		k = kld_fw ? ntohl(kld_fw->fw_ver) : 0;
2059 
2060 		device_printf(sc->dev, "Cannot find a usable firmware: "
2061 		    "fw_install %d, chip state %d, "
2062 		    "driver compiled with %d.%d.%d.%d, "
2063 		    "card has %d.%d.%d.%d, KLD has %d.%d.%d.%d\n",
2064 		    t4_fw_install, state,
2065 		    G_FW_HDR_FW_VER_MAJOR(d), G_FW_HDR_FW_VER_MINOR(d),
2066 		    G_FW_HDR_FW_VER_MICRO(d), G_FW_HDR_FW_VER_BUILD(d),
2067 		    G_FW_HDR_FW_VER_MAJOR(c), G_FW_HDR_FW_VER_MINOR(c),
2068 		    G_FW_HDR_FW_VER_MICRO(c), G_FW_HDR_FW_VER_BUILD(c),
2069 		    G_FW_HDR_FW_VER_MAJOR(k), G_FW_HDR_FW_VER_MINOR(k),
2070 		    G_FW_HDR_FW_VER_MICRO(k), G_FW_HDR_FW_VER_BUILD(k));
2071 		rc = EINVAL;
2072 		goto done;
2073 	}
2074 
2075 	/* We're using whatever's on the card and it's known to be good. */
2076 	sc->params.fw_vers = ntohl(card_fw->fw_ver);
2077 	snprintf(sc->fw_version, sizeof(sc->fw_version), "%u.%u.%u.%u",
2078 	    G_FW_HDR_FW_VER_MAJOR(sc->params.fw_vers),
2079 	    G_FW_HDR_FW_VER_MINOR(sc->params.fw_vers),
2080 	    G_FW_HDR_FW_VER_MICRO(sc->params.fw_vers),
2081 	    G_FW_HDR_FW_VER_BUILD(sc->params.fw_vers));
2082 
2083 	/* Reset device */
2084 	if (need_fw_reset &&
2085 	    (rc = -t4_fw_reset(sc, sc->mbox, F_PIORSTMODE | F_PIORST)) != 0) {
2086 		device_printf(sc->dev, "firmware reset failed: %d.\n", rc);
2087 		if (rc != ETIMEDOUT && rc != EIO)
2088 			t4_fw_bye(sc, sc->mbox);
2089 		goto done;
2090 	}
2091 	sc->flags |= FW_OK;
2092 
2093 	rc = get_params__pre_init(sc);
2094 	if (rc != 0)
2095 		goto done; /* error message displayed already */
2096 
2097 	/* Partition adapter resources as specified in the config file. */
2098 	if (state == DEV_STATE_UNINIT) {
2099 
2100 		KASSERT(sc->flags & MASTER_PF,
2101 		    ("%s: trying to change chip settings when not master.",
2102 		    __func__));
2103 
2104 		rc = partition_resources(sc, default_cfg, fw_info->kld_name);
2105 		if (rc != 0)
2106 			goto done;	/* error message displayed already */
2107 
2108 		t4_tweak_chip_settings(sc);
2109 
2110 		/* get basic stuff going */
2111 		rc = -t4_fw_initialize(sc, sc->mbox);
2112 		if (rc != 0) {
2113 			device_printf(sc->dev, "fw init failed: %d.\n", rc);
2114 			goto done;
2115 		}
2116 	} else {
2117 		snprintf(sc->cfg_file, sizeof(sc->cfg_file), "pf%d", pf);
2118 		sc->cfcsum = 0;
2119 	}
2120 
2121 done:
2122 	free(card_fw, M_CXGBE);
2123 	if (fw != NULL)
2124 		firmware_put(fw, FIRMWARE_UNLOAD);
2125 	if (default_cfg != NULL)
2126 		firmware_put(default_cfg, FIRMWARE_UNLOAD);
2127 
2128 	return (rc);
2129 }
2130 
2131 #define FW_PARAM_DEV(param) \
2132 	(V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) | \
2133 	 V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_##param))
2134 #define FW_PARAM_PFVF(param) \
2135 	(V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_PFVF) | \
2136 	 V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_PFVF_##param))
2137 
2138 /*
2139  * Partition chip resources for use between various PFs, VFs, etc.
2140  */
2141 static int
2142 partition_resources(struct adapter *sc, const struct firmware *default_cfg,
2143     const char *name_prefix)
2144 {
2145 	const struct firmware *cfg = NULL;
2146 	int rc = 0;
2147 	struct fw_caps_config_cmd caps;
2148 	uint32_t mtype, moff, finicsum, cfcsum;
2149 
2150 	/*
2151 	 * Figure out what configuration file to use.  Pick the default config
2152 	 * file for the card if the user hasn't specified one explicitly.
2153 	 */
2154 	snprintf(sc->cfg_file, sizeof(sc->cfg_file), "%s", t4_cfg_file);
2155 	if (strncmp(t4_cfg_file, DEFAULT_CF, sizeof(t4_cfg_file)) == 0) {
2156 		/* Card specific overrides go here. */
2157 		if (pci_get_device(sc->dev) == 0x440a)
2158 			snprintf(sc->cfg_file, sizeof(sc->cfg_file), UWIRE_CF);
2159 		if (is_fpga(sc))
2160 			snprintf(sc->cfg_file, sizeof(sc->cfg_file), FPGA_CF);
2161 	}
2162 
2163 	/*
2164 	 * We need to load another module if the profile is anything except
2165 	 * "default" or "flash".
2166 	 */
2167 	if (strncmp(sc->cfg_file, DEFAULT_CF, sizeof(sc->cfg_file)) != 0 &&
2168 	    strncmp(sc->cfg_file, FLASH_CF, sizeof(sc->cfg_file)) != 0) {
2169 		char s[32];
2170 
2171 		snprintf(s, sizeof(s), "%s_%s", name_prefix, sc->cfg_file);
2172 		cfg = firmware_get(s);
2173 		if (cfg == NULL) {
2174 			if (default_cfg != NULL) {
2175 				device_printf(sc->dev,
2176 				    "unable to load module \"%s\" for "
2177 				    "configuration profile \"%s\", will use "
2178 				    "the default config file instead.\n",
2179 				    s, sc->cfg_file);
2180 				snprintf(sc->cfg_file, sizeof(sc->cfg_file),
2181 				    "%s", DEFAULT_CF);
2182 			} else {
2183 				device_printf(sc->dev,
2184 				    "unable to load module \"%s\" for "
2185 				    "configuration profile \"%s\", will use "
2186 				    "the config file on the card's flash "
2187 				    "instead.\n", s, sc->cfg_file);
2188 				snprintf(sc->cfg_file, sizeof(sc->cfg_file),
2189 				    "%s", FLASH_CF);
2190 			}
2191 		}
2192 	}
2193 
2194 	if (strncmp(sc->cfg_file, DEFAULT_CF, sizeof(sc->cfg_file)) == 0 &&
2195 	    default_cfg == NULL) {
2196 		device_printf(sc->dev,
2197 		    "default config file not available, will use the config "
2198 		    "file on the card's flash instead.\n");
2199 		snprintf(sc->cfg_file, sizeof(sc->cfg_file), "%s", FLASH_CF);
2200 	}
2201 
2202 	if (strncmp(sc->cfg_file, FLASH_CF, sizeof(sc->cfg_file)) != 0) {
2203 		u_int cflen, i, n;
2204 		const uint32_t *cfdata;
2205 		uint32_t param, val, addr, off, mw_base, mw_aperture;
2206 
2207 		KASSERT(cfg != NULL || default_cfg != NULL,
2208 		    ("%s: no config to upload", __func__));
2209 
2210 		/*
2211 		 * Ask the firmware where it wants us to upload the config file.
2212 		 */
2213 		param = FW_PARAM_DEV(CF);
2214 		rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 1, &param, &val);
2215 		if (rc != 0) {
2216 			/* No support for config file?  Shouldn't happen. */
2217 			device_printf(sc->dev,
2218 			    "failed to query config file location: %d.\n", rc);
2219 			goto done;
2220 		}
2221 		mtype = G_FW_PARAMS_PARAM_Y(val);
2222 		moff = G_FW_PARAMS_PARAM_Z(val) << 16;
2223 
2224 		/*
2225 		 * XXX: sheer laziness.  We deliberately added 4 bytes of
2226 		 * useless stuffing/comments at the end of the config file so
2227 		 * it's ok to simply throw away the last remaining bytes when
2228 		 * the config file is not an exact multiple of 4.  This also
2229 		 * helps with the validate_mt_off_len check.
2230 		 */
2231 		if (cfg != NULL) {
2232 			cflen = cfg->datasize & ~3;
2233 			cfdata = cfg->data;
2234 		} else {
2235 			cflen = default_cfg->datasize & ~3;
2236 			cfdata = default_cfg->data;
2237 		}
2238 
2239 		if (cflen > FLASH_CFG_MAX_SIZE) {
2240 			device_printf(sc->dev,
2241 			    "config file too long (%d, max allowed is %d).  "
2242 			    "Will try to use the config on the card, if any.\n",
2243 			    cflen, FLASH_CFG_MAX_SIZE);
2244 			goto use_config_on_flash;
2245 		}
2246 
2247 		rc = validate_mt_off_len(sc, mtype, moff, cflen, &addr);
2248 		if (rc != 0) {
2249 			device_printf(sc->dev,
2250 			    "%s: addr (%d/0x%x) or len %d is not valid: %d.  "
2251 			    "Will try to use the config on the card, if any.\n",
2252 			    __func__, mtype, moff, cflen, rc);
2253 			goto use_config_on_flash;
2254 		}
2255 
2256 		memwin_info(sc, 2, &mw_base, &mw_aperture);
2257 		while (cflen) {
2258 			off = position_memwin(sc, 2, addr);
2259 			n = min(cflen, mw_aperture - off);
2260 			for (i = 0; i < n; i += 4)
2261 				t4_write_reg(sc, mw_base + off + i, *cfdata++);
2262 			cflen -= n;
2263 			addr += n;
2264 		}
2265 	} else {
2266 use_config_on_flash:
2267 		mtype = FW_MEMTYPE_CF_FLASH;
2268 		moff = t4_flash_cfg_addr(sc);
2269 	}
2270 
2271 	bzero(&caps, sizeof(caps));
2272 	caps.op_to_write = htobe32(V_FW_CMD_OP(FW_CAPS_CONFIG_CMD) |
2273 	    F_FW_CMD_REQUEST | F_FW_CMD_READ);
2274 	caps.cfvalid_to_len16 = htobe32(F_FW_CAPS_CONFIG_CMD_CFVALID |
2275 	    V_FW_CAPS_CONFIG_CMD_MEMTYPE_CF(mtype) |
2276 	    V_FW_CAPS_CONFIG_CMD_MEMADDR64K_CF(moff >> 16) | FW_LEN16(caps));
2277 	rc = -t4_wr_mbox(sc, sc->mbox, &caps, sizeof(caps), &caps);
2278 	if (rc != 0) {
2279 		device_printf(sc->dev,
2280 		    "failed to pre-process config file: %d "
2281 		    "(mtype %d, moff 0x%x).\n", rc, mtype, moff);
2282 		goto done;
2283 	}
2284 
2285 	finicsum = be32toh(caps.finicsum);
2286 	cfcsum = be32toh(caps.cfcsum);
2287 	if (finicsum != cfcsum) {
2288 		device_printf(sc->dev,
2289 		    "WARNING: config file checksum mismatch: %08x %08x\n",
2290 		    finicsum, cfcsum);
2291 	}
2292 	sc->cfcsum = cfcsum;
2293 
2294 #define LIMIT_CAPS(x) do { \
2295 	caps.x &= htobe16(t4_##x##_allowed); \
2296 	sc->x = htobe16(caps.x); \
2297 } while (0)
2298 
2299 	/*
2300 	 * Let the firmware know what features will (not) be used so it can tune
2301 	 * things accordingly.
2302 	 */
2303 	LIMIT_CAPS(linkcaps);
2304 	LIMIT_CAPS(niccaps);
2305 	LIMIT_CAPS(toecaps);
2306 	LIMIT_CAPS(rdmacaps);
2307 	LIMIT_CAPS(iscsicaps);
2308 	LIMIT_CAPS(fcoecaps);
2309 #undef LIMIT_CAPS
2310 
2311 	caps.op_to_write = htobe32(V_FW_CMD_OP(FW_CAPS_CONFIG_CMD) |
2312 	    F_FW_CMD_REQUEST | F_FW_CMD_WRITE);
2313 	caps.cfvalid_to_len16 = htobe32(FW_LEN16(caps));
2314 	rc = -t4_wr_mbox(sc, sc->mbox, &caps, sizeof(caps), NULL);
2315 	if (rc != 0) {
2316 		device_printf(sc->dev,
2317 		    "failed to process config file: %d.\n", rc);
2318 	}
2319 done:
2320 	if (cfg != NULL)
2321 		firmware_put(cfg, FIRMWARE_UNLOAD);
2322 	return (rc);
2323 }
2324 
2325 /*
2326  * Retrieve parameters that are needed (or nice to have) very early.
2327  */
2328 static int
2329 get_params__pre_init(struct adapter *sc)
2330 {
2331 	int rc;
2332 	uint32_t param[2], val[2];
2333 	struct fw_devlog_cmd cmd;
2334 	struct devlog_params *dlog = &sc->params.devlog;
2335 
2336 	param[0] = FW_PARAM_DEV(PORTVEC);
2337 	param[1] = FW_PARAM_DEV(CCLK);
2338 	rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 2, param, val);
2339 	if (rc != 0) {
2340 		device_printf(sc->dev,
2341 		    "failed to query parameters (pre_init): %d.\n", rc);
2342 		return (rc);
2343 	}
2344 
2345 	sc->params.portvec = val[0];
2346 	sc->params.nports = bitcount32(val[0]);
2347 	sc->params.vpd.cclk = val[1];
2348 
2349 	/* Read device log parameters. */
2350 	bzero(&cmd, sizeof(cmd));
2351 	cmd.op_to_write = htobe32(V_FW_CMD_OP(FW_DEVLOG_CMD) |
2352 	    F_FW_CMD_REQUEST | F_FW_CMD_READ);
2353 	cmd.retval_len16 = htobe32(FW_LEN16(cmd));
2354 	rc = -t4_wr_mbox(sc, sc->mbox, &cmd, sizeof(cmd), &cmd);
2355 	if (rc != 0) {
2356 		device_printf(sc->dev,
2357 		    "failed to get devlog parameters: %d.\n", rc);
2358 		bzero(dlog, sizeof (*dlog));
2359 		rc = 0;	/* devlog isn't critical for device operation */
2360 	} else {
2361 		val[0] = be32toh(cmd.memtype_devlog_memaddr16_devlog);
2362 		dlog->memtype = G_FW_DEVLOG_CMD_MEMTYPE_DEVLOG(val[0]);
2363 		dlog->start = G_FW_DEVLOG_CMD_MEMADDR16_DEVLOG(val[0]) << 4;
2364 		dlog->size = be32toh(cmd.memsize_devlog);
2365 	}
2366 
2367 	return (rc);
2368 }
2369 
2370 /*
2371  * Retrieve various parameters that are of interest to the driver.  The device
2372  * has been initialized by the firmware at this point.
2373  */
2374 static int
2375 get_params__post_init(struct adapter *sc)
2376 {
2377 	int rc;
2378 	uint32_t param[7], val[7];
2379 	struct fw_caps_config_cmd caps;
2380 
2381 	param[0] = FW_PARAM_PFVF(IQFLINT_START);
2382 	param[1] = FW_PARAM_PFVF(EQ_START);
2383 	param[2] = FW_PARAM_PFVF(FILTER_START);
2384 	param[3] = FW_PARAM_PFVF(FILTER_END);
2385 	param[4] = FW_PARAM_PFVF(L2T_START);
2386 	param[5] = FW_PARAM_PFVF(L2T_END);
2387 	rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 6, param, val);
2388 	if (rc != 0) {
2389 		device_printf(sc->dev,
2390 		    "failed to query parameters (post_init): %d.\n", rc);
2391 		return (rc);
2392 	}
2393 
2394 	sc->sge.iq_start = val[0];
2395 	sc->sge.eq_start = val[1];
2396 	sc->tids.ftid_base = val[2];
2397 	sc->tids.nftids = val[3] - val[2] + 1;
2398 	sc->vres.l2t.start = val[4];
2399 	sc->vres.l2t.size = val[5] - val[4] + 1;
2400 	KASSERT(sc->vres.l2t.size <= L2T_SIZE,
2401 	    ("%s: L2 table size (%u) larger than expected (%u)",
2402 	    __func__, sc->vres.l2t.size, L2T_SIZE));
2403 
2404 	/* get capabilites */
2405 	bzero(&caps, sizeof(caps));
2406 	caps.op_to_write = htobe32(V_FW_CMD_OP(FW_CAPS_CONFIG_CMD) |
2407 	    F_FW_CMD_REQUEST | F_FW_CMD_READ);
2408 	caps.cfvalid_to_len16 = htobe32(FW_LEN16(caps));
2409 	rc = -t4_wr_mbox(sc, sc->mbox, &caps, sizeof(caps), &caps);
2410 	if (rc != 0) {
2411 		device_printf(sc->dev,
2412 		    "failed to get card capabilities: %d.\n", rc);
2413 		return (rc);
2414 	}
2415 
2416 	if (caps.toecaps) {
2417 		/* query offload-related parameters */
2418 		param[0] = FW_PARAM_DEV(NTID);
2419 		param[1] = FW_PARAM_PFVF(SERVER_START);
2420 		param[2] = FW_PARAM_PFVF(SERVER_END);
2421 		param[3] = FW_PARAM_PFVF(TDDP_START);
2422 		param[4] = FW_PARAM_PFVF(TDDP_END);
2423 		param[5] = FW_PARAM_DEV(FLOWC_BUFFIFO_SZ);
2424 		rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 6, param, val);
2425 		if (rc != 0) {
2426 			device_printf(sc->dev,
2427 			    "failed to query TOE parameters: %d.\n", rc);
2428 			return (rc);
2429 		}
2430 		sc->tids.ntids = val[0];
2431 		sc->tids.natids = min(sc->tids.ntids / 2, MAX_ATIDS);
2432 		sc->tids.stid_base = val[1];
2433 		sc->tids.nstids = val[2] - val[1] + 1;
2434 		sc->vres.ddp.start = val[3];
2435 		sc->vres.ddp.size = val[4] - val[3] + 1;
2436 		sc->params.ofldq_wr_cred = val[5];
2437 		sc->params.offload = 1;
2438 	}
2439 	if (caps.rdmacaps) {
2440 		param[0] = FW_PARAM_PFVF(STAG_START);
2441 		param[1] = FW_PARAM_PFVF(STAG_END);
2442 		param[2] = FW_PARAM_PFVF(RQ_START);
2443 		param[3] = FW_PARAM_PFVF(RQ_END);
2444 		param[4] = FW_PARAM_PFVF(PBL_START);
2445 		param[5] = FW_PARAM_PFVF(PBL_END);
2446 		rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 6, param, val);
2447 		if (rc != 0) {
2448 			device_printf(sc->dev,
2449 			    "failed to query RDMA parameters(1): %d.\n", rc);
2450 			return (rc);
2451 		}
2452 		sc->vres.stag.start = val[0];
2453 		sc->vres.stag.size = val[1] - val[0] + 1;
2454 		sc->vres.rq.start = val[2];
2455 		sc->vres.rq.size = val[3] - val[2] + 1;
2456 		sc->vres.pbl.start = val[4];
2457 		sc->vres.pbl.size = val[5] - val[4] + 1;
2458 
2459 		param[0] = FW_PARAM_PFVF(SQRQ_START);
2460 		param[1] = FW_PARAM_PFVF(SQRQ_END);
2461 		param[2] = FW_PARAM_PFVF(CQ_START);
2462 		param[3] = FW_PARAM_PFVF(CQ_END);
2463 		param[4] = FW_PARAM_PFVF(OCQ_START);
2464 		param[5] = FW_PARAM_PFVF(OCQ_END);
2465 		rc = -t4_query_params(sc, 0, 0, 0, 6, param, val);
2466 		if (rc != 0) {
2467 			device_printf(sc->dev,
2468 			    "failed to query RDMA parameters(2): %d.\n", rc);
2469 			return (rc);
2470 		}
2471 		sc->vres.qp.start = val[0];
2472 		sc->vres.qp.size = val[1] - val[0] + 1;
2473 		sc->vres.cq.start = val[2];
2474 		sc->vres.cq.size = val[3] - val[2] + 1;
2475 		sc->vres.ocq.start = val[4];
2476 		sc->vres.ocq.size = val[5] - val[4] + 1;
2477 	}
2478 	if (caps.iscsicaps) {
2479 		param[0] = FW_PARAM_PFVF(ISCSI_START);
2480 		param[1] = FW_PARAM_PFVF(ISCSI_END);
2481 		rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 2, param, val);
2482 		if (rc != 0) {
2483 			device_printf(sc->dev,
2484 			    "failed to query iSCSI parameters: %d.\n", rc);
2485 			return (rc);
2486 		}
2487 		sc->vres.iscsi.start = val[0];
2488 		sc->vres.iscsi.size = val[1] - val[0] + 1;
2489 	}
2490 
2491 	/*
2492 	 * We've got the params we wanted to query via the firmware.  Now grab
2493 	 * some others directly from the chip.
2494 	 */
2495 	rc = t4_read_chip_settings(sc);
2496 
2497 	return (rc);
2498 }
2499 
2500 static int
2501 set_params__post_init(struct adapter *sc)
2502 {
2503 	uint32_t param, val;
2504 
2505 	/* ask for encapsulated CPLs */
2506 	param = FW_PARAM_PFVF(CPLFW4MSG_ENCAP);
2507 	val = 1;
2508 	(void)t4_set_params(sc, sc->mbox, sc->pf, 0, 1, &param, &val);
2509 
2510 	return (0);
2511 }
2512 
2513 #undef FW_PARAM_PFVF
2514 #undef FW_PARAM_DEV
2515 
2516 static void
2517 t4_set_desc(struct adapter *sc)
2518 {
2519 	char buf[128];
2520 	struct adapter_params *p = &sc->params;
2521 
2522 	snprintf(buf, sizeof(buf), "Chelsio %s %sNIC (rev %d), S/N:%s, E/C:%s",
2523 	    p->vpd.id, is_offload(sc) ? "R" : "", chip_rev(sc), p->vpd.sn,
2524 	    p->vpd.ec);
2525 
2526 	device_set_desc_copy(sc->dev, buf);
2527 }
2528 
2529 static void
2530 build_medialist(struct port_info *pi)
2531 {
2532 	struct ifmedia *media = &pi->media;
2533 	int data, m;
2534 
2535 	PORT_LOCK(pi);
2536 
2537 	ifmedia_removeall(media);
2538 
2539 	m = IFM_ETHER | IFM_FDX;
2540 	data = (pi->port_type << 8) | pi->mod_type;
2541 
2542 	switch(pi->port_type) {
2543 	case FW_PORT_TYPE_BT_XFI:
2544 		ifmedia_add(media, m | IFM_10G_T, data, NULL);
2545 		break;
2546 
2547 	case FW_PORT_TYPE_BT_XAUI:
2548 		ifmedia_add(media, m | IFM_10G_T, data, NULL);
2549 		/* fall through */
2550 
2551 	case FW_PORT_TYPE_BT_SGMII:
2552 		ifmedia_add(media, m | IFM_1000_T, data, NULL);
2553 		ifmedia_add(media, m | IFM_100_TX, data, NULL);
2554 		ifmedia_add(media, IFM_ETHER | IFM_AUTO, data, NULL);
2555 		ifmedia_set(media, IFM_ETHER | IFM_AUTO);
2556 		break;
2557 
2558 	case FW_PORT_TYPE_CX4:
2559 		ifmedia_add(media, m | IFM_10G_CX4, data, NULL);
2560 		ifmedia_set(media, m | IFM_10G_CX4);
2561 		break;
2562 
2563 	case FW_PORT_TYPE_SFP:
2564 	case FW_PORT_TYPE_FIBER_XFI:
2565 	case FW_PORT_TYPE_FIBER_XAUI:
2566 		switch (pi->mod_type) {
2567 
2568 		case FW_PORT_MOD_TYPE_LR:
2569 			ifmedia_add(media, m | IFM_10G_LR, data, NULL);
2570 			ifmedia_set(media, m | IFM_10G_LR);
2571 			break;
2572 
2573 		case FW_PORT_MOD_TYPE_SR:
2574 			ifmedia_add(media, m | IFM_10G_SR, data, NULL);
2575 			ifmedia_set(media, m | IFM_10G_SR);
2576 			break;
2577 
2578 		case FW_PORT_MOD_TYPE_LRM:
2579 			ifmedia_add(media, m | IFM_10G_LRM, data, NULL);
2580 			ifmedia_set(media, m | IFM_10G_LRM);
2581 			break;
2582 
2583 		case FW_PORT_MOD_TYPE_TWINAX_PASSIVE:
2584 		case FW_PORT_MOD_TYPE_TWINAX_ACTIVE:
2585 			ifmedia_add(media, m | IFM_10G_TWINAX, data, NULL);
2586 			ifmedia_set(media, m | IFM_10G_TWINAX);
2587 			break;
2588 
2589 		case FW_PORT_MOD_TYPE_NONE:
2590 			m &= ~IFM_FDX;
2591 			ifmedia_add(media, m | IFM_NONE, data, NULL);
2592 			ifmedia_set(media, m | IFM_NONE);
2593 			break;
2594 
2595 		case FW_PORT_MOD_TYPE_NA:
2596 		case FW_PORT_MOD_TYPE_ER:
2597 		default:
2598 			ifmedia_add(media, m | IFM_UNKNOWN, data, NULL);
2599 			ifmedia_set(media, m | IFM_UNKNOWN);
2600 			break;
2601 		}
2602 		break;
2603 
2604 	case FW_PORT_TYPE_KX4:
2605 	case FW_PORT_TYPE_KX:
2606 	case FW_PORT_TYPE_KR:
2607 	default:
2608 		ifmedia_add(media, m | IFM_UNKNOWN, data, NULL);
2609 		ifmedia_set(media, m | IFM_UNKNOWN);
2610 		break;
2611 	}
2612 
2613 	PORT_UNLOCK(pi);
2614 }
2615 
2616 #define FW_MAC_EXACT_CHUNK	7
2617 
2618 /*
2619  * Program the port's XGMAC based on parameters in ifnet.  The caller also
2620  * indicates which parameters should be programmed (the rest are left alone).
2621  */
2622 static int
2623 update_mac_settings(struct port_info *pi, int flags)
2624 {
2625 	int rc;
2626 	struct ifnet *ifp = pi->ifp;
2627 	struct adapter *sc = pi->adapter;
2628 	int mtu = -1, promisc = -1, allmulti = -1, vlanex = -1;
2629 
2630 	ASSERT_SYNCHRONIZED_OP(sc);
2631 	KASSERT(flags, ("%s: not told what to update.", __func__));
2632 
2633 	if (flags & XGMAC_MTU)
2634 		mtu = ifp->if_mtu;
2635 
2636 	if (flags & XGMAC_PROMISC)
2637 		promisc = ifp->if_flags & IFF_PROMISC ? 1 : 0;
2638 
2639 	if (flags & XGMAC_ALLMULTI)
2640 		allmulti = ifp->if_flags & IFF_ALLMULTI ? 1 : 0;
2641 
2642 	if (flags & XGMAC_VLANEX)
2643 		vlanex = ifp->if_capenable & IFCAP_VLAN_HWTAGGING ? 1 : 0;
2644 
2645 	rc = -t4_set_rxmode(sc, sc->mbox, pi->viid, mtu, promisc, allmulti, 1,
2646 	    vlanex, false);
2647 	if (rc) {
2648 		if_printf(ifp, "set_rxmode (%x) failed: %d\n", flags, rc);
2649 		return (rc);
2650 	}
2651 
2652 	if (flags & XGMAC_UCADDR) {
2653 		uint8_t ucaddr[ETHER_ADDR_LEN];
2654 
2655 		bcopy(IF_LLADDR(ifp), ucaddr, sizeof(ucaddr));
2656 		rc = t4_change_mac(sc, sc->mbox, pi->viid, pi->xact_addr_filt,
2657 		    ucaddr, true, true);
2658 		if (rc < 0) {
2659 			rc = -rc;
2660 			if_printf(ifp, "change_mac failed: %d\n", rc);
2661 			return (rc);
2662 		} else {
2663 			pi->xact_addr_filt = rc;
2664 			rc = 0;
2665 		}
2666 	}
2667 
2668 	if (flags & XGMAC_MCADDRS) {
2669 		const uint8_t *mcaddr[FW_MAC_EXACT_CHUNK];
2670 		int del = 1;
2671 		uint64_t hash = 0;
2672 		struct ifmultiaddr *ifma;
2673 		int i = 0, j;
2674 
2675 		if_maddr_rlock(ifp);
2676 		TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
2677 			if (ifma->ifma_addr->sa_family != AF_LINK)
2678 				continue;
2679 			mcaddr[i++] =
2680 			    LLADDR((struct sockaddr_dl *)ifma->ifma_addr);
2681 
2682 			if (i == FW_MAC_EXACT_CHUNK) {
2683 				rc = t4_alloc_mac_filt(sc, sc->mbox, pi->viid,
2684 				    del, i, mcaddr, NULL, &hash, 0);
2685 				if (rc < 0) {
2686 					rc = -rc;
2687 					for (j = 0; j < i; j++) {
2688 						if_printf(ifp,
2689 						    "failed to add mc address"
2690 						    " %02x:%02x:%02x:"
2691 						    "%02x:%02x:%02x rc=%d\n",
2692 						    mcaddr[j][0], mcaddr[j][1],
2693 						    mcaddr[j][2], mcaddr[j][3],
2694 						    mcaddr[j][4], mcaddr[j][5],
2695 						    rc);
2696 					}
2697 					goto mcfail;
2698 				}
2699 				del = 0;
2700 				i = 0;
2701 			}
2702 		}
2703 		if (i > 0) {
2704 			rc = t4_alloc_mac_filt(sc, sc->mbox, pi->viid,
2705 			    del, i, mcaddr, NULL, &hash, 0);
2706 			if (rc < 0) {
2707 				rc = -rc;
2708 				for (j = 0; j < i; j++) {
2709 					if_printf(ifp,
2710 					    "failed to add mc address"
2711 					    " %02x:%02x:%02x:"
2712 					    "%02x:%02x:%02x rc=%d\n",
2713 					    mcaddr[j][0], mcaddr[j][1],
2714 					    mcaddr[j][2], mcaddr[j][3],
2715 					    mcaddr[j][4], mcaddr[j][5],
2716 					    rc);
2717 				}
2718 				goto mcfail;
2719 			}
2720 		}
2721 
2722 		rc = -t4_set_addr_hash(sc, sc->mbox, pi->viid, 0, hash, 0);
2723 		if (rc != 0)
2724 			if_printf(ifp, "failed to set mc address hash: %d", rc);
2725 mcfail:
2726 		if_maddr_runlock(ifp);
2727 	}
2728 
2729 	return (rc);
2730 }
2731 
2732 int
2733 begin_synchronized_op(struct adapter *sc, struct port_info *pi, int flags,
2734     char *wmesg)
2735 {
2736 	int rc, pri;
2737 
2738 #ifdef WITNESS
2739 	/* the caller thinks it's ok to sleep, but is it really? */
2740 	if (flags & SLEEP_OK)
2741 		pause("t4slptst", 1);
2742 #endif
2743 
2744 	if (INTR_OK)
2745 		pri = PCATCH;
2746 	else
2747 		pri = 0;
2748 
2749 	ADAPTER_LOCK(sc);
2750 	for (;;) {
2751 
2752 		if (pi && IS_DOOMED(pi)) {
2753 			rc = ENXIO;
2754 			goto done;
2755 		}
2756 
2757 		if (!IS_BUSY(sc)) {
2758 			rc = 0;
2759 			break;
2760 		}
2761 
2762 		if (!(flags & SLEEP_OK)) {
2763 			rc = EBUSY;
2764 			goto done;
2765 		}
2766 
2767 		if (mtx_sleep(&sc->flags, &sc->sc_lock, pri, wmesg, 0)) {
2768 			rc = EINTR;
2769 			goto done;
2770 		}
2771 	}
2772 
2773 	KASSERT(!IS_BUSY(sc), ("%s: controller busy.", __func__));
2774 	SET_BUSY(sc);
2775 #ifdef INVARIANTS
2776 	sc->last_op = wmesg;
2777 	sc->last_op_thr = curthread;
2778 #endif
2779 
2780 done:
2781 	if (!(flags & HOLD_LOCK) || rc)
2782 		ADAPTER_UNLOCK(sc);
2783 
2784 	return (rc);
2785 }
2786 
2787 void
2788 end_synchronized_op(struct adapter *sc, int flags)
2789 {
2790 
2791 	if (flags & LOCK_HELD)
2792 		ADAPTER_LOCK_ASSERT_OWNED(sc);
2793 	else
2794 		ADAPTER_LOCK(sc);
2795 
2796 	KASSERT(IS_BUSY(sc), ("%s: controller not busy.", __func__));
2797 	CLR_BUSY(sc);
2798 	wakeup(&sc->flags);
2799 	ADAPTER_UNLOCK(sc);
2800 }
2801 
2802 static int
2803 cxgbe_init_synchronized(struct port_info *pi)
2804 {
2805 	struct adapter *sc = pi->adapter;
2806 	struct ifnet *ifp = pi->ifp;
2807 	int rc = 0;
2808 
2809 	ASSERT_SYNCHRONIZED_OP(sc);
2810 
2811 	if (isset(&sc->open_device_map, pi->port_id)) {
2812 		KASSERT(ifp->if_drv_flags & IFF_DRV_RUNNING,
2813 		    ("mismatch between open_device_map and if_drv_flags"));
2814 		return (0);	/* already running */
2815 	}
2816 
2817 	if (!(sc->flags & FULL_INIT_DONE) &&
2818 	    ((rc = adapter_full_init(sc)) != 0))
2819 		return (rc);	/* error message displayed already */
2820 
2821 	if (!(pi->flags & PORT_INIT_DONE) &&
2822 	    ((rc = port_full_init(pi)) != 0))
2823 		return (rc); /* error message displayed already */
2824 
2825 	rc = update_mac_settings(pi, XGMAC_ALL);
2826 	if (rc)
2827 		goto done;	/* error message displayed already */
2828 
2829 	rc = -t4_link_start(sc, sc->mbox, pi->tx_chan, &pi->link_cfg);
2830 	if (rc != 0) {
2831 		if_printf(ifp, "start_link failed: %d\n", rc);
2832 		goto done;
2833 	}
2834 
2835 	rc = -t4_enable_vi(sc, sc->mbox, pi->viid, true, true);
2836 	if (rc != 0) {
2837 		if_printf(ifp, "enable_vi failed: %d\n", rc);
2838 		goto done;
2839 	}
2840 
2841 	/* all ok */
2842 	setbit(&sc->open_device_map, pi->port_id);
2843 	PORT_LOCK(pi);
2844 	ifp->if_drv_flags |= IFF_DRV_RUNNING;
2845 	PORT_UNLOCK(pi);
2846 
2847 	callout_reset(&pi->tick, hz, cxgbe_tick, pi);
2848 done:
2849 	if (rc != 0)
2850 		cxgbe_uninit_synchronized(pi);
2851 
2852 	return (rc);
2853 }
2854 
2855 /*
2856  * Idempotent.
2857  */
2858 static int
2859 cxgbe_uninit_synchronized(struct port_info *pi)
2860 {
2861 	struct adapter *sc = pi->adapter;
2862 	struct ifnet *ifp = pi->ifp;
2863 	int rc;
2864 
2865 	ASSERT_SYNCHRONIZED_OP(sc);
2866 
2867 	/*
2868 	 * Disable the VI so that all its data in either direction is discarded
2869 	 * by the MPS.  Leave everything else (the queues, interrupts, and 1Hz
2870 	 * tick) intact as the TP can deliver negative advice or data that it's
2871 	 * holding in its RAM (for an offloaded connection) even after the VI is
2872 	 * disabled.
2873 	 */
2874 	rc = -t4_enable_vi(sc, sc->mbox, pi->viid, false, false);
2875 	if (rc) {
2876 		if_printf(ifp, "disable_vi failed: %d\n", rc);
2877 		return (rc);
2878 	}
2879 
2880 	clrbit(&sc->open_device_map, pi->port_id);
2881 	PORT_LOCK(pi);
2882 	ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
2883 	PORT_UNLOCK(pi);
2884 
2885 	pi->link_cfg.link_ok = 0;
2886 	pi->link_cfg.speed = 0;
2887 	t4_os_link_changed(sc, pi->port_id, 0);
2888 
2889 	return (0);
2890 }
2891 
2892 /*
2893  * It is ok for this function to fail midway and return right away.  t4_detach
2894  * will walk the entire sc->irq list and clean up whatever is valid.
2895  */
2896 static int
2897 setup_intr_handlers(struct adapter *sc)
2898 {
2899 	int rc, rid, p, q;
2900 	char s[8];
2901 	struct irq *irq;
2902 	struct port_info *pi;
2903 	struct sge_rxq *rxq;
2904 #ifdef TCP_OFFLOAD
2905 	struct sge_ofld_rxq *ofld_rxq;
2906 #endif
2907 
2908 	/*
2909 	 * Setup interrupts.
2910 	 */
2911 	irq = &sc->irq[0];
2912 	rid = sc->intr_type == INTR_INTX ? 0 : 1;
2913 	if (sc->intr_count == 1) {
2914 		KASSERT(!(sc->flags & INTR_DIRECT),
2915 		    ("%s: single interrupt && INTR_DIRECT?", __func__));
2916 
2917 		rc = t4_alloc_irq(sc, irq, rid, t4_intr_all, sc, "all");
2918 		if (rc != 0)
2919 			return (rc);
2920 	} else {
2921 		/* Multiple interrupts. */
2922 		KASSERT(sc->intr_count >= T4_EXTRA_INTR + sc->params.nports,
2923 		    ("%s: too few intr.", __func__));
2924 
2925 		/* The first one is always error intr */
2926 		rc = t4_alloc_irq(sc, irq, rid, t4_intr_err, sc, "err");
2927 		if (rc != 0)
2928 			return (rc);
2929 		irq++;
2930 		rid++;
2931 
2932 		/* The second one is always the firmware event queue */
2933 		rc = t4_alloc_irq(sc, irq, rid, t4_intr_evt, &sc->sge.fwq,
2934 		    "evt");
2935 		if (rc != 0)
2936 			return (rc);
2937 		irq++;
2938 		rid++;
2939 
2940 		/*
2941 		 * Note that if INTR_DIRECT is not set then either the NIC rx
2942 		 * queues or (exclusive or) the TOE rx queueus will be taking
2943 		 * direct interrupts.
2944 		 *
2945 		 * There is no need to check for is_offload(sc) as nofldrxq
2946 		 * will be 0 if offload is disabled.
2947 		 */
2948 		for_each_port(sc, p) {
2949 			pi = sc->port[p];
2950 
2951 #ifdef TCP_OFFLOAD
2952 			/*
2953 			 * Skip over the NIC queues if they aren't taking direct
2954 			 * interrupts.
2955 			 */
2956 			if (!(sc->flags & INTR_DIRECT) &&
2957 			    pi->nofldrxq > pi->nrxq)
2958 				goto ofld_queues;
2959 #endif
2960 			rxq = &sc->sge.rxq[pi->first_rxq];
2961 			for (q = 0; q < pi->nrxq; q++, rxq++) {
2962 				snprintf(s, sizeof(s), "%d.%d", p, q);
2963 				rc = t4_alloc_irq(sc, irq, rid, t4_intr, rxq,
2964 				    s);
2965 				if (rc != 0)
2966 					return (rc);
2967 				irq++;
2968 				rid++;
2969 			}
2970 
2971 #ifdef TCP_OFFLOAD
2972 			/*
2973 			 * Skip over the offload queues if they aren't taking
2974 			 * direct interrupts.
2975 			 */
2976 			if (!(sc->flags & INTR_DIRECT))
2977 				continue;
2978 ofld_queues:
2979 			ofld_rxq = &sc->sge.ofld_rxq[pi->first_ofld_rxq];
2980 			for (q = 0; q < pi->nofldrxq; q++, ofld_rxq++) {
2981 				snprintf(s, sizeof(s), "%d,%d", p, q);
2982 				rc = t4_alloc_irq(sc, irq, rid, t4_intr,
2983 				    ofld_rxq, s);
2984 				if (rc != 0)
2985 					return (rc);
2986 				irq++;
2987 				rid++;
2988 			}
2989 #endif
2990 		}
2991 	}
2992 
2993 	return (0);
2994 }
2995 
2996 static int
2997 adapter_full_init(struct adapter *sc)
2998 {
2999 	int rc, i;
3000 
3001 	ADAPTER_LOCK_ASSERT_NOTOWNED(sc);
3002 	KASSERT((sc->flags & FULL_INIT_DONE) == 0,
3003 	    ("%s: FULL_INIT_DONE already", __func__));
3004 
3005 	/*
3006 	 * queues that belong to the adapter (not any particular port).
3007 	 */
3008 	rc = t4_setup_adapter_queues(sc);
3009 	if (rc != 0)
3010 		goto done;
3011 
3012 	for (i = 0; i < nitems(sc->tq); i++) {
3013 		sc->tq[i] = taskqueue_create("t4 taskq", M_NOWAIT,
3014 		    taskqueue_thread_enqueue, &sc->tq[i]);
3015 		if (sc->tq[i] == NULL) {
3016 			device_printf(sc->dev,
3017 			    "failed to allocate task queue %d\n", i);
3018 			rc = ENOMEM;
3019 			goto done;
3020 		}
3021 		taskqueue_start_threads(&sc->tq[i], 1, PI_NET, "%s tq%d",
3022 		    device_get_nameunit(sc->dev), i);
3023 	}
3024 
3025 	t4_intr_enable(sc);
3026 	sc->flags |= FULL_INIT_DONE;
3027 done:
3028 	if (rc != 0)
3029 		adapter_full_uninit(sc);
3030 
3031 	return (rc);
3032 }
3033 
3034 static int
3035 adapter_full_uninit(struct adapter *sc)
3036 {
3037 	int i;
3038 
3039 	ADAPTER_LOCK_ASSERT_NOTOWNED(sc);
3040 
3041 	t4_teardown_adapter_queues(sc);
3042 
3043 	for (i = 0; i < nitems(sc->tq) && sc->tq[i]; i++) {
3044 		taskqueue_free(sc->tq[i]);
3045 		sc->tq[i] = NULL;
3046 	}
3047 
3048 	sc->flags &= ~FULL_INIT_DONE;
3049 
3050 	return (0);
3051 }
3052 
3053 static int
3054 port_full_init(struct port_info *pi)
3055 {
3056 	struct adapter *sc = pi->adapter;
3057 	struct ifnet *ifp = pi->ifp;
3058 	uint16_t *rss;
3059 	struct sge_rxq *rxq;
3060 	int rc, i;
3061 
3062 	ASSERT_SYNCHRONIZED_OP(sc);
3063 	KASSERT((pi->flags & PORT_INIT_DONE) == 0,
3064 	    ("%s: PORT_INIT_DONE already", __func__));
3065 
3066 	sysctl_ctx_init(&pi->ctx);
3067 	pi->flags |= PORT_SYSCTL_CTX;
3068 
3069 	/*
3070 	 * Allocate tx/rx/fl queues for this port.
3071 	 */
3072 	rc = t4_setup_port_queues(pi);
3073 	if (rc != 0)
3074 		goto done;	/* error message displayed already */
3075 
3076 	/*
3077 	 * Setup RSS for this port.
3078 	 */
3079 	rss = malloc(pi->nrxq * sizeof (*rss), M_CXGBE,
3080 	    M_ZERO | M_WAITOK);
3081 	for_each_rxq(pi, i, rxq) {
3082 		rss[i] = rxq->iq.abs_id;
3083 	}
3084 	rc = -t4_config_rss_range(sc, sc->mbox, pi->viid, 0,
3085 	    pi->rss_size, rss, pi->nrxq);
3086 	free(rss, M_CXGBE);
3087 	if (rc != 0) {
3088 		if_printf(ifp, "rss_config failed: %d\n", rc);
3089 		goto done;
3090 	}
3091 
3092 	pi->flags |= PORT_INIT_DONE;
3093 done:
3094 	if (rc != 0)
3095 		port_full_uninit(pi);
3096 
3097 	return (rc);
3098 }
3099 
3100 /*
3101  * Idempotent.
3102  */
3103 static int
3104 port_full_uninit(struct port_info *pi)
3105 {
3106 	struct adapter *sc = pi->adapter;
3107 	int i;
3108 	struct sge_rxq *rxq;
3109 	struct sge_txq *txq;
3110 #ifdef TCP_OFFLOAD
3111 	struct sge_ofld_rxq *ofld_rxq;
3112 	struct sge_wrq *ofld_txq;
3113 #endif
3114 
3115 	if (pi->flags & PORT_INIT_DONE) {
3116 
3117 		/* Need to quiesce queues.  XXX: ctrl queues? */
3118 
3119 		for_each_txq(pi, i, txq) {
3120 			quiesce_eq(sc, &txq->eq);
3121 		}
3122 
3123 #ifdef TCP_OFFLOAD
3124 		for_each_ofld_txq(pi, i, ofld_txq) {
3125 			quiesce_eq(sc, &ofld_txq->eq);
3126 		}
3127 #endif
3128 
3129 		for_each_rxq(pi, i, rxq) {
3130 			quiesce_iq(sc, &rxq->iq);
3131 			quiesce_fl(sc, &rxq->fl);
3132 		}
3133 
3134 #ifdef TCP_OFFLOAD
3135 		for_each_ofld_rxq(pi, i, ofld_rxq) {
3136 			quiesce_iq(sc, &ofld_rxq->iq);
3137 			quiesce_fl(sc, &ofld_rxq->fl);
3138 		}
3139 #endif
3140 	}
3141 
3142 	t4_teardown_port_queues(pi);
3143 	pi->flags &= ~PORT_INIT_DONE;
3144 
3145 	return (0);
3146 }
3147 
3148 static void
3149 quiesce_eq(struct adapter *sc, struct sge_eq *eq)
3150 {
3151 	EQ_LOCK(eq);
3152 	eq->flags |= EQ_DOOMED;
3153 
3154 	/*
3155 	 * Wait for the response to a credit flush if one's
3156 	 * pending.
3157 	 */
3158 	while (eq->flags & EQ_CRFLUSHED)
3159 		mtx_sleep(eq, &eq->eq_lock, 0, "crflush", 0);
3160 	EQ_UNLOCK(eq);
3161 
3162 	callout_drain(&eq->tx_callout);	/* XXX: iffy */
3163 	pause("callout", 10);		/* Still iffy */
3164 
3165 	taskqueue_drain(sc->tq[eq->tx_chan], &eq->tx_task);
3166 }
3167 
3168 static void
3169 quiesce_iq(struct adapter *sc, struct sge_iq *iq)
3170 {
3171 	(void) sc;	/* unused */
3172 
3173 	/* Synchronize with the interrupt handler */
3174 	while (!atomic_cmpset_int(&iq->state, IQS_IDLE, IQS_DISABLED))
3175 		pause("iqfree", 1);
3176 }
3177 
3178 static void
3179 quiesce_fl(struct adapter *sc, struct sge_fl *fl)
3180 {
3181 	mtx_lock(&sc->sfl_lock);
3182 	FL_LOCK(fl);
3183 	fl->flags |= FL_DOOMED;
3184 	FL_UNLOCK(fl);
3185 	mtx_unlock(&sc->sfl_lock);
3186 
3187 	callout_drain(&sc->sfl_callout);
3188 	KASSERT((fl->flags & FL_STARVING) == 0,
3189 	    ("%s: still starving", __func__));
3190 }
3191 
3192 static int
3193 t4_alloc_irq(struct adapter *sc, struct irq *irq, int rid,
3194     driver_intr_t *handler, void *arg, char *name)
3195 {
3196 	int rc;
3197 
3198 	irq->rid = rid;
3199 	irq->res = bus_alloc_resource_any(sc->dev, SYS_RES_IRQ, &irq->rid,
3200 	    RF_SHAREABLE | RF_ACTIVE);
3201 	if (irq->res == NULL) {
3202 		device_printf(sc->dev,
3203 		    "failed to allocate IRQ for rid %d, name %s.\n", rid, name);
3204 		return (ENOMEM);
3205 	}
3206 
3207 	rc = bus_setup_intr(sc->dev, irq->res, INTR_MPSAFE | INTR_TYPE_NET,
3208 	    NULL, handler, arg, &irq->tag);
3209 	if (rc != 0) {
3210 		device_printf(sc->dev,
3211 		    "failed to setup interrupt for rid %d, name %s: %d\n",
3212 		    rid, name, rc);
3213 	} else if (name)
3214 		bus_describe_intr(sc->dev, irq->res, irq->tag, name);
3215 
3216 	return (rc);
3217 }
3218 
3219 static int
3220 t4_free_irq(struct adapter *sc, struct irq *irq)
3221 {
3222 	if (irq->tag)
3223 		bus_teardown_intr(sc->dev, irq->res, irq->tag);
3224 	if (irq->res)
3225 		bus_release_resource(sc->dev, SYS_RES_IRQ, irq->rid, irq->res);
3226 
3227 	bzero(irq, sizeof(*irq));
3228 
3229 	return (0);
3230 }
3231 
3232 static void
3233 reg_block_dump(struct adapter *sc, uint8_t *buf, unsigned int start,
3234     unsigned int end)
3235 {
3236 	uint32_t *p = (uint32_t *)(buf + start);
3237 
3238 	for ( ; start <= end; start += sizeof(uint32_t))
3239 		*p++ = t4_read_reg(sc, start);
3240 }
3241 
3242 static void
3243 t4_get_regs(struct adapter *sc, struct t4_regdump *regs, uint8_t *buf)
3244 {
3245 	int i, n;
3246 	const unsigned int *reg_ranges;
3247 	static const unsigned int t4_reg_ranges[] = {
3248 		0x1008, 0x1108,
3249 		0x1180, 0x11b4,
3250 		0x11fc, 0x123c,
3251 		0x1300, 0x173c,
3252 		0x1800, 0x18fc,
3253 		0x3000, 0x30d8,
3254 		0x30e0, 0x5924,
3255 		0x5960, 0x59d4,
3256 		0x5a00, 0x5af8,
3257 		0x6000, 0x6098,
3258 		0x6100, 0x6150,
3259 		0x6200, 0x6208,
3260 		0x6240, 0x6248,
3261 		0x6280, 0x6338,
3262 		0x6370, 0x638c,
3263 		0x6400, 0x643c,
3264 		0x6500, 0x6524,
3265 		0x6a00, 0x6a38,
3266 		0x6a60, 0x6a78,
3267 		0x6b00, 0x6b84,
3268 		0x6bf0, 0x6c84,
3269 		0x6cf0, 0x6d84,
3270 		0x6df0, 0x6e84,
3271 		0x6ef0, 0x6f84,
3272 		0x6ff0, 0x7084,
3273 		0x70f0, 0x7184,
3274 		0x71f0, 0x7284,
3275 		0x72f0, 0x7384,
3276 		0x73f0, 0x7450,
3277 		0x7500, 0x7530,
3278 		0x7600, 0x761c,
3279 		0x7680, 0x76cc,
3280 		0x7700, 0x7798,
3281 		0x77c0, 0x77fc,
3282 		0x7900, 0x79fc,
3283 		0x7b00, 0x7c38,
3284 		0x7d00, 0x7efc,
3285 		0x8dc0, 0x8e1c,
3286 		0x8e30, 0x8e78,
3287 		0x8ea0, 0x8f6c,
3288 		0x8fc0, 0x9074,
3289 		0x90fc, 0x90fc,
3290 		0x9400, 0x9458,
3291 		0x9600, 0x96bc,
3292 		0x9800, 0x9808,
3293 		0x9820, 0x983c,
3294 		0x9850, 0x9864,
3295 		0x9c00, 0x9c6c,
3296 		0x9c80, 0x9cec,
3297 		0x9d00, 0x9d6c,
3298 		0x9d80, 0x9dec,
3299 		0x9e00, 0x9e6c,
3300 		0x9e80, 0x9eec,
3301 		0x9f00, 0x9f6c,
3302 		0x9f80, 0x9fec,
3303 		0xd004, 0xd03c,
3304 		0xdfc0, 0xdfe0,
3305 		0xe000, 0xea7c,
3306 		0xf000, 0x11190,
3307 		0x19040, 0x1906c,
3308 		0x19078, 0x19080,
3309 		0x1908c, 0x19124,
3310 		0x19150, 0x191b0,
3311 		0x191d0, 0x191e8,
3312 		0x19238, 0x1924c,
3313 		0x193f8, 0x19474,
3314 		0x19490, 0x194f8,
3315 		0x19800, 0x19f30,
3316 		0x1a000, 0x1a06c,
3317 		0x1a0b0, 0x1a120,
3318 		0x1a128, 0x1a138,
3319 		0x1a190, 0x1a1c4,
3320 		0x1a1fc, 0x1a1fc,
3321 		0x1e040, 0x1e04c,
3322 		0x1e284, 0x1e28c,
3323 		0x1e2c0, 0x1e2c0,
3324 		0x1e2e0, 0x1e2e0,
3325 		0x1e300, 0x1e384,
3326 		0x1e3c0, 0x1e3c8,
3327 		0x1e440, 0x1e44c,
3328 		0x1e684, 0x1e68c,
3329 		0x1e6c0, 0x1e6c0,
3330 		0x1e6e0, 0x1e6e0,
3331 		0x1e700, 0x1e784,
3332 		0x1e7c0, 0x1e7c8,
3333 		0x1e840, 0x1e84c,
3334 		0x1ea84, 0x1ea8c,
3335 		0x1eac0, 0x1eac0,
3336 		0x1eae0, 0x1eae0,
3337 		0x1eb00, 0x1eb84,
3338 		0x1ebc0, 0x1ebc8,
3339 		0x1ec40, 0x1ec4c,
3340 		0x1ee84, 0x1ee8c,
3341 		0x1eec0, 0x1eec0,
3342 		0x1eee0, 0x1eee0,
3343 		0x1ef00, 0x1ef84,
3344 		0x1efc0, 0x1efc8,
3345 		0x1f040, 0x1f04c,
3346 		0x1f284, 0x1f28c,
3347 		0x1f2c0, 0x1f2c0,
3348 		0x1f2e0, 0x1f2e0,
3349 		0x1f300, 0x1f384,
3350 		0x1f3c0, 0x1f3c8,
3351 		0x1f440, 0x1f44c,
3352 		0x1f684, 0x1f68c,
3353 		0x1f6c0, 0x1f6c0,
3354 		0x1f6e0, 0x1f6e0,
3355 		0x1f700, 0x1f784,
3356 		0x1f7c0, 0x1f7c8,
3357 		0x1f840, 0x1f84c,
3358 		0x1fa84, 0x1fa8c,
3359 		0x1fac0, 0x1fac0,
3360 		0x1fae0, 0x1fae0,
3361 		0x1fb00, 0x1fb84,
3362 		0x1fbc0, 0x1fbc8,
3363 		0x1fc40, 0x1fc4c,
3364 		0x1fe84, 0x1fe8c,
3365 		0x1fec0, 0x1fec0,
3366 		0x1fee0, 0x1fee0,
3367 		0x1ff00, 0x1ff84,
3368 		0x1ffc0, 0x1ffc8,
3369 		0x20000, 0x2002c,
3370 		0x20100, 0x2013c,
3371 		0x20190, 0x201c8,
3372 		0x20200, 0x20318,
3373 		0x20400, 0x20528,
3374 		0x20540, 0x20614,
3375 		0x21000, 0x21040,
3376 		0x2104c, 0x21060,
3377 		0x210c0, 0x210ec,
3378 		0x21200, 0x21268,
3379 		0x21270, 0x21284,
3380 		0x212fc, 0x21388,
3381 		0x21400, 0x21404,
3382 		0x21500, 0x21518,
3383 		0x2152c, 0x2153c,
3384 		0x21550, 0x21554,
3385 		0x21600, 0x21600,
3386 		0x21608, 0x21628,
3387 		0x21630, 0x2163c,
3388 		0x21700, 0x2171c,
3389 		0x21780, 0x2178c,
3390 		0x21800, 0x21c38,
3391 		0x21c80, 0x21d7c,
3392 		0x21e00, 0x21e04,
3393 		0x22000, 0x2202c,
3394 		0x22100, 0x2213c,
3395 		0x22190, 0x221c8,
3396 		0x22200, 0x22318,
3397 		0x22400, 0x22528,
3398 		0x22540, 0x22614,
3399 		0x23000, 0x23040,
3400 		0x2304c, 0x23060,
3401 		0x230c0, 0x230ec,
3402 		0x23200, 0x23268,
3403 		0x23270, 0x23284,
3404 		0x232fc, 0x23388,
3405 		0x23400, 0x23404,
3406 		0x23500, 0x23518,
3407 		0x2352c, 0x2353c,
3408 		0x23550, 0x23554,
3409 		0x23600, 0x23600,
3410 		0x23608, 0x23628,
3411 		0x23630, 0x2363c,
3412 		0x23700, 0x2371c,
3413 		0x23780, 0x2378c,
3414 		0x23800, 0x23c38,
3415 		0x23c80, 0x23d7c,
3416 		0x23e00, 0x23e04,
3417 		0x24000, 0x2402c,
3418 		0x24100, 0x2413c,
3419 		0x24190, 0x241c8,
3420 		0x24200, 0x24318,
3421 		0x24400, 0x24528,
3422 		0x24540, 0x24614,
3423 		0x25000, 0x25040,
3424 		0x2504c, 0x25060,
3425 		0x250c0, 0x250ec,
3426 		0x25200, 0x25268,
3427 		0x25270, 0x25284,
3428 		0x252fc, 0x25388,
3429 		0x25400, 0x25404,
3430 		0x25500, 0x25518,
3431 		0x2552c, 0x2553c,
3432 		0x25550, 0x25554,
3433 		0x25600, 0x25600,
3434 		0x25608, 0x25628,
3435 		0x25630, 0x2563c,
3436 		0x25700, 0x2571c,
3437 		0x25780, 0x2578c,
3438 		0x25800, 0x25c38,
3439 		0x25c80, 0x25d7c,
3440 		0x25e00, 0x25e04,
3441 		0x26000, 0x2602c,
3442 		0x26100, 0x2613c,
3443 		0x26190, 0x261c8,
3444 		0x26200, 0x26318,
3445 		0x26400, 0x26528,
3446 		0x26540, 0x26614,
3447 		0x27000, 0x27040,
3448 		0x2704c, 0x27060,
3449 		0x270c0, 0x270ec,
3450 		0x27200, 0x27268,
3451 		0x27270, 0x27284,
3452 		0x272fc, 0x27388,
3453 		0x27400, 0x27404,
3454 		0x27500, 0x27518,
3455 		0x2752c, 0x2753c,
3456 		0x27550, 0x27554,
3457 		0x27600, 0x27600,
3458 		0x27608, 0x27628,
3459 		0x27630, 0x2763c,
3460 		0x27700, 0x2771c,
3461 		0x27780, 0x2778c,
3462 		0x27800, 0x27c38,
3463 		0x27c80, 0x27d7c,
3464 		0x27e00, 0x27e04
3465 	};
3466 	static const unsigned int t5_reg_ranges[] = {
3467 		0x1008, 0x1148,
3468 		0x1180, 0x11b4,
3469 		0x11fc, 0x123c,
3470 		0x1280, 0x173c,
3471 		0x1800, 0x18fc,
3472 		0x3000, 0x3028,
3473 		0x3060, 0x30d8,
3474 		0x30e0, 0x30fc,
3475 		0x3140, 0x357c,
3476 		0x35a8, 0x35cc,
3477 		0x35ec, 0x35ec,
3478 		0x3600, 0x5624,
3479 		0x56cc, 0x575c,
3480 		0x580c, 0x5814,
3481 		0x5890, 0x58bc,
3482 		0x5940, 0x59dc,
3483 		0x59fc, 0x5a18,
3484 		0x5a60, 0x5a9c,
3485 		0x5b94, 0x5bfc,
3486 		0x6000, 0x6040,
3487 		0x6058, 0x614c,
3488 		0x7700, 0x7798,
3489 		0x77c0, 0x78fc,
3490 		0x7b00, 0x7c54,
3491 		0x7d00, 0x7efc,
3492 		0x8dc0, 0x8de0,
3493 		0x8df8, 0x8e84,
3494 		0x8ea0, 0x8f84,
3495 		0x8fc0, 0x90f8,
3496 		0x9400, 0x9470,
3497 		0x9600, 0x96f4,
3498 		0x9800, 0x9808,
3499 		0x9820, 0x983c,
3500 		0x9850, 0x9864,
3501 		0x9c00, 0x9c6c,
3502 		0x9c80, 0x9cec,
3503 		0x9d00, 0x9d6c,
3504 		0x9d80, 0x9dec,
3505 		0x9e00, 0x9e6c,
3506 		0x9e80, 0x9eec,
3507 		0x9f00, 0x9f6c,
3508 		0x9f80, 0xa020,
3509 		0xd004, 0xd03c,
3510 		0xdfc0, 0xdfe0,
3511 		0xe000, 0x11088,
3512 		0x1109c, 0x1117c,
3513 		0x11190, 0x11204,
3514 		0x19040, 0x1906c,
3515 		0x19078, 0x19080,
3516 		0x1908c, 0x19124,
3517 		0x19150, 0x191b0,
3518 		0x191d0, 0x191e8,
3519 		0x19238, 0x19290,
3520 		0x193f8, 0x19474,
3521 		0x19490, 0x194cc,
3522 		0x194f0, 0x194f8,
3523 		0x19c00, 0x19c60,
3524 		0x19c94, 0x19e10,
3525 		0x19e50, 0x19f34,
3526 		0x19f40, 0x19f50,
3527 		0x19f90, 0x19fe4,
3528 		0x1a000, 0x1a06c,
3529 		0x1a0b0, 0x1a120,
3530 		0x1a128, 0x1a138,
3531 		0x1a190, 0x1a1c4,
3532 		0x1a1fc, 0x1a1fc,
3533 		0x1e008, 0x1e00c,
3534 		0x1e040, 0x1e04c,
3535 		0x1e284, 0x1e290,
3536 		0x1e2c0, 0x1e2c0,
3537 		0x1e2e0, 0x1e2e0,
3538 		0x1e300, 0x1e384,
3539 		0x1e3c0, 0x1e3c8,
3540 		0x1e408, 0x1e40c,
3541 		0x1e440, 0x1e44c,
3542 		0x1e684, 0x1e690,
3543 		0x1e6c0, 0x1e6c0,
3544 		0x1e6e0, 0x1e6e0,
3545 		0x1e700, 0x1e784,
3546 		0x1e7c0, 0x1e7c8,
3547 		0x1e808, 0x1e80c,
3548 		0x1e840, 0x1e84c,
3549 		0x1ea84, 0x1ea90,
3550 		0x1eac0, 0x1eac0,
3551 		0x1eae0, 0x1eae0,
3552 		0x1eb00, 0x1eb84,
3553 		0x1ebc0, 0x1ebc8,
3554 		0x1ec08, 0x1ec0c,
3555 		0x1ec40, 0x1ec4c,
3556 		0x1ee84, 0x1ee90,
3557 		0x1eec0, 0x1eec0,
3558 		0x1eee0, 0x1eee0,
3559 		0x1ef00, 0x1ef84,
3560 		0x1efc0, 0x1efc8,
3561 		0x1f008, 0x1f00c,
3562 		0x1f040, 0x1f04c,
3563 		0x1f284, 0x1f290,
3564 		0x1f2c0, 0x1f2c0,
3565 		0x1f2e0, 0x1f2e0,
3566 		0x1f300, 0x1f384,
3567 		0x1f3c0, 0x1f3c8,
3568 		0x1f408, 0x1f40c,
3569 		0x1f440, 0x1f44c,
3570 		0x1f684, 0x1f690,
3571 		0x1f6c0, 0x1f6c0,
3572 		0x1f6e0, 0x1f6e0,
3573 		0x1f700, 0x1f784,
3574 		0x1f7c0, 0x1f7c8,
3575 		0x1f808, 0x1f80c,
3576 		0x1f840, 0x1f84c,
3577 		0x1fa84, 0x1fa90,
3578 		0x1fac0, 0x1fac0,
3579 		0x1fae0, 0x1fae0,
3580 		0x1fb00, 0x1fb84,
3581 		0x1fbc0, 0x1fbc8,
3582 		0x1fc08, 0x1fc0c,
3583 		0x1fc40, 0x1fc4c,
3584 		0x1fe84, 0x1fe90,
3585 		0x1fec0, 0x1fec0,
3586 		0x1fee0, 0x1fee0,
3587 		0x1ff00, 0x1ff84,
3588 		0x1ffc0, 0x1ffc8,
3589 		0x30000, 0x30040,
3590 		0x30100, 0x30144,
3591 		0x30190, 0x301d0,
3592 		0x30200, 0x30318,
3593 		0x30400, 0x3052c,
3594 		0x30540, 0x3061c,
3595 		0x30800, 0x30834,
3596 		0x308c0, 0x30908,
3597 		0x30910, 0x309ac,
3598 		0x30a00, 0x30a04,
3599 		0x30a0c, 0x30a2c,
3600 		0x30a44, 0x30a50,
3601 		0x30a74, 0x30c24,
3602 		0x30d08, 0x30d14,
3603 		0x30d1c, 0x30d20,
3604 		0x30d3c, 0x30d50,
3605 		0x31200, 0x3120c,
3606 		0x31220, 0x31220,
3607 		0x31240, 0x31240,
3608 		0x31600, 0x31600,
3609 		0x31608, 0x3160c,
3610 		0x31a00, 0x31a1c,
3611 		0x31e04, 0x31e20,
3612 		0x31e38, 0x31e3c,
3613 		0x31e80, 0x31e80,
3614 		0x31e88, 0x31ea8,
3615 		0x31eb0, 0x31eb4,
3616 		0x31ec8, 0x31ed4,
3617 		0x31fb8, 0x32004,
3618 		0x32208, 0x3223c,
3619 		0x32248, 0x3227c,
3620 		0x32288, 0x322bc,
3621 		0x322c8, 0x322fc,
3622 		0x32600, 0x32630,
3623 		0x32a00, 0x32abc,
3624 		0x32b00, 0x32b70,
3625 		0x33000, 0x33048,
3626 		0x33060, 0x3309c,
3627 		0x330f0, 0x33148,
3628 		0x33160, 0x3319c,
3629 		0x331f0, 0x332e4,
3630 		0x332f8, 0x333e4,
3631 		0x333f8, 0x33448,
3632 		0x33460, 0x3349c,
3633 		0x334f0, 0x33548,
3634 		0x33560, 0x3359c,
3635 		0x335f0, 0x336e4,
3636 		0x336f8, 0x337e4,
3637 		0x337f8, 0x337fc,
3638 		0x33814, 0x33814,
3639 		0x3382c, 0x3382c,
3640 		0x33880, 0x3388c,
3641 		0x338e8, 0x338ec,
3642 		0x33900, 0x33948,
3643 		0x33960, 0x3399c,
3644 		0x339f0, 0x33ae4,
3645 		0x33af8, 0x33b10,
3646 		0x33b28, 0x33b28,
3647 		0x33b3c, 0x33b50,
3648 		0x33bf0, 0x33c10,
3649 		0x33c28, 0x33c28,
3650 		0x33c3c, 0x33c50,
3651 		0x33cf0, 0x33cfc,
3652 		0x34000, 0x34040,
3653 		0x34100, 0x34144,
3654 		0x34190, 0x341d0,
3655 		0x34200, 0x34318,
3656 		0x34400, 0x3452c,
3657 		0x34540, 0x3461c,
3658 		0x34800, 0x34834,
3659 		0x348c0, 0x34908,
3660 		0x34910, 0x349ac,
3661 		0x34a00, 0x34a04,
3662 		0x34a0c, 0x34a2c,
3663 		0x34a44, 0x34a50,
3664 		0x34a74, 0x34c24,
3665 		0x34d08, 0x34d14,
3666 		0x34d1c, 0x34d20,
3667 		0x34d3c, 0x34d50,
3668 		0x35200, 0x3520c,
3669 		0x35220, 0x35220,
3670 		0x35240, 0x35240,
3671 		0x35600, 0x35600,
3672 		0x35608, 0x3560c,
3673 		0x35a00, 0x35a1c,
3674 		0x35e04, 0x35e20,
3675 		0x35e38, 0x35e3c,
3676 		0x35e80, 0x35e80,
3677 		0x35e88, 0x35ea8,
3678 		0x35eb0, 0x35eb4,
3679 		0x35ec8, 0x35ed4,
3680 		0x35fb8, 0x36004,
3681 		0x36208, 0x3623c,
3682 		0x36248, 0x3627c,
3683 		0x36288, 0x362bc,
3684 		0x362c8, 0x362fc,
3685 		0x36600, 0x36630,
3686 		0x36a00, 0x36abc,
3687 		0x36b00, 0x36b70,
3688 		0x37000, 0x37048,
3689 		0x37060, 0x3709c,
3690 		0x370f0, 0x37148,
3691 		0x37160, 0x3719c,
3692 		0x371f0, 0x372e4,
3693 		0x372f8, 0x373e4,
3694 		0x373f8, 0x37448,
3695 		0x37460, 0x3749c,
3696 		0x374f0, 0x37548,
3697 		0x37560, 0x3759c,
3698 		0x375f0, 0x376e4,
3699 		0x376f8, 0x377e4,
3700 		0x377f8, 0x377fc,
3701 		0x37814, 0x37814,
3702 		0x3782c, 0x3782c,
3703 		0x37880, 0x3788c,
3704 		0x378e8, 0x378ec,
3705 		0x37900, 0x37948,
3706 		0x37960, 0x3799c,
3707 		0x379f0, 0x37ae4,
3708 		0x37af8, 0x37b10,
3709 		0x37b28, 0x37b28,
3710 		0x37b3c, 0x37b50,
3711 		0x37bf0, 0x37c10,
3712 		0x37c28, 0x37c28,
3713 		0x37c3c, 0x37c50,
3714 		0x37cf0, 0x37cfc,
3715 		0x38000, 0x38040,
3716 		0x38100, 0x38144,
3717 		0x38190, 0x381d0,
3718 		0x38200, 0x38318,
3719 		0x38400, 0x3852c,
3720 		0x38540, 0x3861c,
3721 		0x38800, 0x38834,
3722 		0x388c0, 0x38908,
3723 		0x38910, 0x389ac,
3724 		0x38a00, 0x38a04,
3725 		0x38a0c, 0x38a2c,
3726 		0x38a44, 0x38a50,
3727 		0x38a74, 0x38c24,
3728 		0x38d08, 0x38d14,
3729 		0x38d1c, 0x38d20,
3730 		0x38d3c, 0x38d50,
3731 		0x39200, 0x3920c,
3732 		0x39220, 0x39220,
3733 		0x39240, 0x39240,
3734 		0x39600, 0x39600,
3735 		0x39608, 0x3960c,
3736 		0x39a00, 0x39a1c,
3737 		0x39e04, 0x39e20,
3738 		0x39e38, 0x39e3c,
3739 		0x39e80, 0x39e80,
3740 		0x39e88, 0x39ea8,
3741 		0x39eb0, 0x39eb4,
3742 		0x39ec8, 0x39ed4,
3743 		0x39fb8, 0x3a004,
3744 		0x3a208, 0x3a23c,
3745 		0x3a248, 0x3a27c,
3746 		0x3a288, 0x3a2bc,
3747 		0x3a2c8, 0x3a2fc,
3748 		0x3a600, 0x3a630,
3749 		0x3aa00, 0x3aabc,
3750 		0x3ab00, 0x3ab70,
3751 		0x3b000, 0x3b048,
3752 		0x3b060, 0x3b09c,
3753 		0x3b0f0, 0x3b148,
3754 		0x3b160, 0x3b19c,
3755 		0x3b1f0, 0x3b2e4,
3756 		0x3b2f8, 0x3b3e4,
3757 		0x3b3f8, 0x3b448,
3758 		0x3b460, 0x3b49c,
3759 		0x3b4f0, 0x3b548,
3760 		0x3b560, 0x3b59c,
3761 		0x3b5f0, 0x3b6e4,
3762 		0x3b6f8, 0x3b7e4,
3763 		0x3b7f8, 0x3b7fc,
3764 		0x3b814, 0x3b814,
3765 		0x3b82c, 0x3b82c,
3766 		0x3b880, 0x3b88c,
3767 		0x3b8e8, 0x3b8ec,
3768 		0x3b900, 0x3b948,
3769 		0x3b960, 0x3b99c,
3770 		0x3b9f0, 0x3bae4,
3771 		0x3baf8, 0x3bb10,
3772 		0x3bb28, 0x3bb28,
3773 		0x3bb3c, 0x3bb50,
3774 		0x3bbf0, 0x3bc10,
3775 		0x3bc28, 0x3bc28,
3776 		0x3bc3c, 0x3bc50,
3777 		0x3bcf0, 0x3bcfc,
3778 		0x3c000, 0x3c040,
3779 		0x3c100, 0x3c144,
3780 		0x3c190, 0x3c1d0,
3781 		0x3c200, 0x3c318,
3782 		0x3c400, 0x3c52c,
3783 		0x3c540, 0x3c61c,
3784 		0x3c800, 0x3c834,
3785 		0x3c8c0, 0x3c908,
3786 		0x3c910, 0x3c9ac,
3787 		0x3ca00, 0x3ca04,
3788 		0x3ca0c, 0x3ca2c,
3789 		0x3ca44, 0x3ca50,
3790 		0x3ca74, 0x3cc24,
3791 		0x3cd08, 0x3cd14,
3792 		0x3cd1c, 0x3cd20,
3793 		0x3cd3c, 0x3cd50,
3794 		0x3d200, 0x3d20c,
3795 		0x3d220, 0x3d220,
3796 		0x3d240, 0x3d240,
3797 		0x3d600, 0x3d600,
3798 		0x3d608, 0x3d60c,
3799 		0x3da00, 0x3da1c,
3800 		0x3de04, 0x3de20,
3801 		0x3de38, 0x3de3c,
3802 		0x3de80, 0x3de80,
3803 		0x3de88, 0x3dea8,
3804 		0x3deb0, 0x3deb4,
3805 		0x3dec8, 0x3ded4,
3806 		0x3dfb8, 0x3e004,
3807 		0x3e208, 0x3e23c,
3808 		0x3e248, 0x3e27c,
3809 		0x3e288, 0x3e2bc,
3810 		0x3e2c8, 0x3e2fc,
3811 		0x3e600, 0x3e630,
3812 		0x3ea00, 0x3eabc,
3813 		0x3eb00, 0x3eb70,
3814 		0x3f000, 0x3f048,
3815 		0x3f060, 0x3f09c,
3816 		0x3f0f0, 0x3f148,
3817 		0x3f160, 0x3f19c,
3818 		0x3f1f0, 0x3f2e4,
3819 		0x3f2f8, 0x3f3e4,
3820 		0x3f3f8, 0x3f448,
3821 		0x3f460, 0x3f49c,
3822 		0x3f4f0, 0x3f548,
3823 		0x3f560, 0x3f59c,
3824 		0x3f5f0, 0x3f6e4,
3825 		0x3f6f8, 0x3f7e4,
3826 		0x3f7f8, 0x3f7fc,
3827 		0x3f814, 0x3f814,
3828 		0x3f82c, 0x3f82c,
3829 		0x3f880, 0x3f88c,
3830 		0x3f8e8, 0x3f8ec,
3831 		0x3f900, 0x3f948,
3832 		0x3f960, 0x3f99c,
3833 		0x3f9f0, 0x3fae4,
3834 		0x3faf8, 0x3fb10,
3835 		0x3fb28, 0x3fb28,
3836 		0x3fb3c, 0x3fb50,
3837 		0x3fbf0, 0x3fc10,
3838 		0x3fc28, 0x3fc28,
3839 		0x3fc3c, 0x3fc50,
3840 		0x3fcf0, 0x3fcfc,
3841 		0x40000, 0x4000c,
3842 		0x40040, 0x40068,
3843 		0x4007c, 0x40144,
3844 		0x40180, 0x4018c,
3845 		0x40200, 0x40298,
3846 		0x402ac, 0x4033c,
3847 		0x403f8, 0x403fc,
3848 		0x41300, 0x413c4,
3849 		0x41400, 0x4141c,
3850 		0x41480, 0x414d0,
3851 		0x44000, 0x44078,
3852 		0x440c0, 0x44278,
3853 		0x442c0, 0x44478,
3854 		0x444c0, 0x44678,
3855 		0x446c0, 0x44878,
3856 		0x448c0, 0x449fc,
3857 		0x45000, 0x45068,
3858 		0x45080, 0x45084,
3859 		0x450a0, 0x450b0,
3860 		0x45200, 0x45268,
3861 		0x45280, 0x45284,
3862 		0x452a0, 0x452b0,
3863 		0x460c0, 0x460e4,
3864 		0x47000, 0x4708c,
3865 		0x47200, 0x47250,
3866 		0x47400, 0x47420,
3867 		0x47600, 0x47618,
3868 		0x47800, 0x47814,
3869 		0x48000, 0x4800c,
3870 		0x48040, 0x48068,
3871 		0x4807c, 0x48144,
3872 		0x48180, 0x4818c,
3873 		0x48200, 0x48298,
3874 		0x482ac, 0x4833c,
3875 		0x483f8, 0x483fc,
3876 		0x49300, 0x493c4,
3877 		0x49400, 0x4941c,
3878 		0x49480, 0x494d0,
3879 		0x4c000, 0x4c078,
3880 		0x4c0c0, 0x4c278,
3881 		0x4c2c0, 0x4c478,
3882 		0x4c4c0, 0x4c678,
3883 		0x4c6c0, 0x4c878,
3884 		0x4c8c0, 0x4c9fc,
3885 		0x4d000, 0x4d068,
3886 		0x4d080, 0x4d084,
3887 		0x4d0a0, 0x4d0b0,
3888 		0x4d200, 0x4d268,
3889 		0x4d280, 0x4d284,
3890 		0x4d2a0, 0x4d2b0,
3891 		0x4e0c0, 0x4e0e4,
3892 		0x4f000, 0x4f08c,
3893 		0x4f200, 0x4f250,
3894 		0x4f400, 0x4f420,
3895 		0x4f600, 0x4f618,
3896 		0x4f800, 0x4f814,
3897 		0x50000, 0x500cc,
3898 		0x50400, 0x50400,
3899 		0x50800, 0x508cc,
3900 		0x50c00, 0x50c00,
3901 		0x51000, 0x5101c,
3902 		0x51300, 0x51308,
3903 	};
3904 
3905 	if (is_t4(sc)) {
3906 		reg_ranges = &t4_reg_ranges[0];
3907 		n = nitems(t4_reg_ranges);
3908 	} else {
3909 		reg_ranges = &t5_reg_ranges[0];
3910 		n = nitems(t5_reg_ranges);
3911 	}
3912 
3913 	regs->version = chip_id(sc) | chip_rev(sc) << 10;
3914 	for (i = 0; i < n; i += 2)
3915 		reg_block_dump(sc, buf, reg_ranges[i], reg_ranges[i + 1]);
3916 }
3917 
3918 static void
3919 cxgbe_tick(void *arg)
3920 {
3921 	struct port_info *pi = arg;
3922 	struct ifnet *ifp = pi->ifp;
3923 	struct sge_txq *txq;
3924 	int i, drops;
3925 	struct port_stats *s = &pi->stats;
3926 
3927 	PORT_LOCK(pi);
3928 	if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) {
3929 		PORT_UNLOCK(pi);
3930 		return;	/* without scheduling another callout */
3931 	}
3932 
3933 	t4_get_port_stats(pi->adapter, pi->tx_chan, s);
3934 
3935 	ifp->if_opackets = s->tx_frames - s->tx_pause;
3936 	ifp->if_ipackets = s->rx_frames - s->rx_pause;
3937 	ifp->if_obytes = s->tx_octets - s->tx_pause * 64;
3938 	ifp->if_ibytes = s->rx_octets - s->rx_pause * 64;
3939 	ifp->if_omcasts = s->tx_mcast_frames - s->tx_pause;
3940 	ifp->if_imcasts = s->rx_mcast_frames - s->rx_pause;
3941 	ifp->if_iqdrops = s->rx_ovflow0 + s->rx_ovflow1 + s->rx_ovflow2 +
3942 	    s->rx_ovflow3 + s->rx_trunc0 + s->rx_trunc1 + s->rx_trunc2 +
3943 	    s->rx_trunc3;
3944 
3945 	drops = s->tx_drop;
3946 	for_each_txq(pi, i, txq)
3947 		drops += txq->br->br_drops;
3948 	ifp->if_snd.ifq_drops = drops;
3949 
3950 	ifp->if_oerrors = s->tx_error_frames;
3951 	ifp->if_ierrors = s->rx_jabber + s->rx_runt + s->rx_too_long +
3952 	    s->rx_fcs_err + s->rx_len_err;
3953 
3954 	callout_schedule(&pi->tick, hz);
3955 	PORT_UNLOCK(pi);
3956 }
3957 
3958 static void
3959 cxgbe_vlan_config(void *arg, struct ifnet *ifp, uint16_t vid)
3960 {
3961 	struct ifnet *vlan;
3962 
3963 	if (arg != ifp || ifp->if_type != IFT_ETHER)
3964 		return;
3965 
3966 	vlan = VLAN_DEVAT(ifp, vid);
3967 	VLAN_SETCOOKIE(vlan, ifp);
3968 }
3969 
3970 static int
3971 cpl_not_handled(struct sge_iq *iq, const struct rss_header *rss, struct mbuf *m)
3972 {
3973 
3974 #ifdef INVARIANTS
3975 	panic("%s: opcode 0x%02x on iq %p with payload %p",
3976 	    __func__, rss->opcode, iq, m);
3977 #else
3978 	log(LOG_ERR, "%s: opcode 0x%02x on iq %p with payload %p\n",
3979 	    __func__, rss->opcode, iq, m);
3980 	m_freem(m);
3981 #endif
3982 	return (EDOOFUS);
3983 }
3984 
3985 int
3986 t4_register_cpl_handler(struct adapter *sc, int opcode, cpl_handler_t h)
3987 {
3988 	uintptr_t *loc, new;
3989 
3990 	if (opcode >= nitems(sc->cpl_handler))
3991 		return (EINVAL);
3992 
3993 	new = h ? (uintptr_t)h : (uintptr_t)cpl_not_handled;
3994 	loc = (uintptr_t *) &sc->cpl_handler[opcode];
3995 	atomic_store_rel_ptr(loc, new);
3996 
3997 	return (0);
3998 }
3999 
4000 static int
4001 an_not_handled(struct sge_iq *iq, const struct rsp_ctrl *ctrl)
4002 {
4003 
4004 #ifdef INVARIANTS
4005 	panic("%s: async notification on iq %p (ctrl %p)", __func__, iq, ctrl);
4006 #else
4007 	log(LOG_ERR, "%s: async notification on iq %p (ctrl %p)\n",
4008 	    __func__, iq, ctrl);
4009 #endif
4010 	return (EDOOFUS);
4011 }
4012 
4013 int
4014 t4_register_an_handler(struct adapter *sc, an_handler_t h)
4015 {
4016 	uintptr_t *loc, new;
4017 
4018 	new = h ? (uintptr_t)h : (uintptr_t)an_not_handled;
4019 	loc = (uintptr_t *) &sc->an_handler;
4020 	atomic_store_rel_ptr(loc, new);
4021 
4022 	return (0);
4023 }
4024 
4025 static int
4026 fw_msg_not_handled(struct adapter *sc, const __be64 *rpl)
4027 {
4028 	const struct cpl_fw6_msg *cpl =
4029 	    __containerof(rpl, struct cpl_fw6_msg, data[0]);
4030 
4031 #ifdef INVARIANTS
4032 	panic("%s: fw_msg type %d", __func__, cpl->type);
4033 #else
4034 	log(LOG_ERR, "%s: fw_msg type %d\n", __func__, cpl->type);
4035 #endif
4036 	return (EDOOFUS);
4037 }
4038 
4039 int
4040 t4_register_fw_msg_handler(struct adapter *sc, int type, fw_msg_handler_t h)
4041 {
4042 	uintptr_t *loc, new;
4043 
4044 	if (type >= nitems(sc->fw_msg_handler))
4045 		return (EINVAL);
4046 
4047 	/*
4048 	 * These are dispatched by the handler for FW{4|6}_CPL_MSG using the CPL
4049 	 * handler dispatch table.  Reject any attempt to install a handler for
4050 	 * this subtype.
4051 	 */
4052 	if (type == FW_TYPE_RSSCPL || type == FW6_TYPE_RSSCPL)
4053 		return (EINVAL);
4054 
4055 	new = h ? (uintptr_t)h : (uintptr_t)fw_msg_not_handled;
4056 	loc = (uintptr_t *) &sc->fw_msg_handler[type];
4057 	atomic_store_rel_ptr(loc, new);
4058 
4059 	return (0);
4060 }
4061 
4062 static int
4063 t4_sysctls(struct adapter *sc)
4064 {
4065 	struct sysctl_ctx_list *ctx;
4066 	struct sysctl_oid *oid;
4067 	struct sysctl_oid_list *children, *c0;
4068 	static char *caps[] = {
4069 		"\20\1PPP\2QFC\3DCBX",			/* caps[0] linkcaps */
4070 		"\20\1NIC\2VM\3IDS\4UM\5UM_ISGL",	/* caps[1] niccaps */
4071 		"\20\1TOE",				/* caps[2] toecaps */
4072 		"\20\1RDDP\2RDMAC",			/* caps[3] rdmacaps */
4073 		"\20\1INITIATOR_PDU\2TARGET_PDU"	/* caps[4] iscsicaps */
4074 		    "\3INITIATOR_CNXOFLD\4TARGET_CNXOFLD"
4075 		    "\5INITIATOR_SSNOFLD\6TARGET_SSNOFLD",
4076 		"\20\1INITIATOR\2TARGET\3CTRL_OFLD"	/* caps[5] fcoecaps */
4077 	};
4078 	static char *doorbells = {"\20\1UDB\2WCWR\3UDBWC\4KDB"};
4079 
4080 	ctx = device_get_sysctl_ctx(sc->dev);
4081 
4082 	/*
4083 	 * dev.t4nex.X.
4084 	 */
4085 	oid = device_get_sysctl_tree(sc->dev);
4086 	c0 = children = SYSCTL_CHILDREN(oid);
4087 
4088 	SYSCTL_ADD_INT(ctx, children, OID_AUTO, "nports", CTLFLAG_RD, NULL,
4089 	    sc->params.nports, "# of ports");
4090 
4091 	SYSCTL_ADD_INT(ctx, children, OID_AUTO, "hw_revision", CTLFLAG_RD,
4092 	    NULL, chip_rev(sc), "chip hardware revision");
4093 
4094 	SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "firmware_version",
4095 	    CTLFLAG_RD, &sc->fw_version, 0, "firmware version");
4096 
4097 	SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "cf",
4098 	    CTLFLAG_RD, &sc->cfg_file, 0, "configuration file");
4099 
4100 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "cfcsum", CTLFLAG_RD, NULL,
4101 	    sc->cfcsum, "config file checksum");
4102 
4103 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "doorbells",
4104 	    CTLTYPE_STRING | CTLFLAG_RD, doorbells, sc->doorbells,
4105 	    sysctl_bitfield, "A", "available doorbells");
4106 
4107 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "linkcaps",
4108 	    CTLTYPE_STRING | CTLFLAG_RD, caps[0], sc->linkcaps,
4109 	    sysctl_bitfield, "A", "available link capabilities");
4110 
4111 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "niccaps",
4112 	    CTLTYPE_STRING | CTLFLAG_RD, caps[1], sc->niccaps,
4113 	    sysctl_bitfield, "A", "available NIC capabilities");
4114 
4115 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "toecaps",
4116 	    CTLTYPE_STRING | CTLFLAG_RD, caps[2], sc->toecaps,
4117 	    sysctl_bitfield, "A", "available TCP offload capabilities");
4118 
4119 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "rdmacaps",
4120 	    CTLTYPE_STRING | CTLFLAG_RD, caps[3], sc->rdmacaps,
4121 	    sysctl_bitfield, "A", "available RDMA capabilities");
4122 
4123 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "iscsicaps",
4124 	    CTLTYPE_STRING | CTLFLAG_RD, caps[4], sc->iscsicaps,
4125 	    sysctl_bitfield, "A", "available iSCSI capabilities");
4126 
4127 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "fcoecaps",
4128 	    CTLTYPE_STRING | CTLFLAG_RD, caps[5], sc->fcoecaps,
4129 	    sysctl_bitfield, "A", "available FCoE capabilities");
4130 
4131 	SYSCTL_ADD_INT(ctx, children, OID_AUTO, "core_clock", CTLFLAG_RD, NULL,
4132 	    sc->params.vpd.cclk, "core clock frequency (in KHz)");
4133 
4134 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "holdoff_timers",
4135 	    CTLTYPE_STRING | CTLFLAG_RD, sc->sge.timer_val,
4136 	    sizeof(sc->sge.timer_val), sysctl_int_array, "A",
4137 	    "interrupt holdoff timer values (us)");
4138 
4139 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "holdoff_pkt_counts",
4140 	    CTLTYPE_STRING | CTLFLAG_RD, sc->sge.counter_val,
4141 	    sizeof(sc->sge.counter_val), sysctl_int_array, "A",
4142 	    "interrupt holdoff packet counter values");
4143 
4144 #ifdef SBUF_DRAIN
4145 	/*
4146 	 * dev.t4nex.X.misc.  Marked CTLFLAG_SKIP to avoid information overload.
4147 	 */
4148 	oid = SYSCTL_ADD_NODE(ctx, c0, OID_AUTO, "misc",
4149 	    CTLFLAG_RD | CTLFLAG_SKIP, NULL,
4150 	    "logs and miscellaneous information");
4151 	children = SYSCTL_CHILDREN(oid);
4152 
4153 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cctrl",
4154 	    CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
4155 	    sysctl_cctrl, "A", "congestion control");
4156 
4157 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_ibq_tp0",
4158 	    CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
4159 	    sysctl_cim_ibq_obq, "A", "CIM IBQ 0 (TP0)");
4160 
4161 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_ibq_tp1",
4162 	    CTLTYPE_STRING | CTLFLAG_RD, sc, 1,
4163 	    sysctl_cim_ibq_obq, "A", "CIM IBQ 1 (TP1)");
4164 
4165 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_ibq_ulp",
4166 	    CTLTYPE_STRING | CTLFLAG_RD, sc, 2,
4167 	    sysctl_cim_ibq_obq, "A", "CIM IBQ 2 (ULP)");
4168 
4169 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_ibq_sge0",
4170 	    CTLTYPE_STRING | CTLFLAG_RD, sc, 3,
4171 	    sysctl_cim_ibq_obq, "A", "CIM IBQ 3 (SGE0)");
4172 
4173 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_ibq_sge1",
4174 	    CTLTYPE_STRING | CTLFLAG_RD, sc, 4,
4175 	    sysctl_cim_ibq_obq, "A", "CIM IBQ 4 (SGE1)");
4176 
4177 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_ibq_ncsi",
4178 	    CTLTYPE_STRING | CTLFLAG_RD, sc, 5,
4179 	    sysctl_cim_ibq_obq, "A", "CIM IBQ 5 (NCSI)");
4180 
4181 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_la",
4182 	    CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
4183 	    sysctl_cim_la, "A", "CIM logic analyzer");
4184 
4185 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_obq_ulp0",
4186 	    CTLTYPE_STRING | CTLFLAG_RD, sc, 0 + CIM_NUM_IBQ,
4187 	    sysctl_cim_ibq_obq, "A", "CIM OBQ 0 (ULP0)");
4188 
4189 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_obq_ulp1",
4190 	    CTLTYPE_STRING | CTLFLAG_RD, sc, 1 + CIM_NUM_IBQ,
4191 	    sysctl_cim_ibq_obq, "A", "CIM OBQ 1 (ULP1)");
4192 
4193 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_obq_ulp2",
4194 	    CTLTYPE_STRING | CTLFLAG_RD, sc, 2 + CIM_NUM_IBQ,
4195 	    sysctl_cim_ibq_obq, "A", "CIM OBQ 2 (ULP2)");
4196 
4197 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_obq_ulp3",
4198 	    CTLTYPE_STRING | CTLFLAG_RD, sc, 3 + CIM_NUM_IBQ,
4199 	    sysctl_cim_ibq_obq, "A", "CIM OBQ 3 (ULP3)");
4200 
4201 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_obq_sge",
4202 	    CTLTYPE_STRING | CTLFLAG_RD, sc, 4 + CIM_NUM_IBQ,
4203 	    sysctl_cim_ibq_obq, "A", "CIM OBQ 4 (SGE)");
4204 
4205 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_obq_ncsi",
4206 	    CTLTYPE_STRING | CTLFLAG_RD, sc, 5 + CIM_NUM_IBQ,
4207 	    sysctl_cim_ibq_obq, "A", "CIM OBQ 5 (NCSI)");
4208 
4209 	if (is_t5(sc)) {
4210 		SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_obq_sge0_rx",
4211 		    CTLTYPE_STRING | CTLFLAG_RD, sc, 6 + CIM_NUM_IBQ,
4212 		    sysctl_cim_ibq_obq, "A", "CIM OBQ 6 (SGE0-RX)");
4213 
4214 		SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_obq_sge1_rx",
4215 		    CTLTYPE_STRING | CTLFLAG_RD, sc, 7 + CIM_NUM_IBQ,
4216 		    sysctl_cim_ibq_obq, "A", "CIM OBQ 7 (SGE1-RX)");
4217 	}
4218 
4219 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_qcfg",
4220 	    CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
4221 	    sysctl_cim_qcfg, "A", "CIM queue configuration");
4222 
4223 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cpl_stats",
4224 	    CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
4225 	    sysctl_cpl_stats, "A", "CPL statistics");
4226 
4227 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "ddp_stats",
4228 	    CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
4229 	    sysctl_ddp_stats, "A", "DDP statistics");
4230 
4231 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "devlog",
4232 	    CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
4233 	    sysctl_devlog, "A", "firmware's device log");
4234 
4235 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "fcoe_stats",
4236 	    CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
4237 	    sysctl_fcoe_stats, "A", "FCoE statistics");
4238 
4239 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "hw_sched",
4240 	    CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
4241 	    sysctl_hw_sched, "A", "hardware scheduler ");
4242 
4243 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "l2t",
4244 	    CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
4245 	    sysctl_l2t, "A", "hardware L2 table");
4246 
4247 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "lb_stats",
4248 	    CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
4249 	    sysctl_lb_stats, "A", "loopback statistics");
4250 
4251 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "meminfo",
4252 	    CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
4253 	    sysctl_meminfo, "A", "memory regions");
4254 
4255 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "path_mtus",
4256 	    CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
4257 	    sysctl_path_mtus, "A", "path MTUs");
4258 
4259 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "pm_stats",
4260 	    CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
4261 	    sysctl_pm_stats, "A", "PM statistics");
4262 
4263 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "rdma_stats",
4264 	    CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
4265 	    sysctl_rdma_stats, "A", "RDMA statistics");
4266 
4267 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "tcp_stats",
4268 	    CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
4269 	    sysctl_tcp_stats, "A", "TCP statistics");
4270 
4271 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "tids",
4272 	    CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
4273 	    sysctl_tids, "A", "TID information");
4274 
4275 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "tp_err_stats",
4276 	    CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
4277 	    sysctl_tp_err_stats, "A", "TP error statistics");
4278 
4279 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "tx_rate",
4280 	    CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
4281 	    sysctl_tx_rate, "A", "Tx rate");
4282 
4283 	if (is_t5(sc)) {
4284 		SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "wcwr_stats",
4285 		    CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
4286 		    sysctl_wcwr_stats, "A", "write combined work requests");
4287 	}
4288 #endif
4289 
4290 #ifdef TCP_OFFLOAD
4291 	if (is_offload(sc)) {
4292 		/*
4293 		 * dev.t4nex.X.toe.
4294 		 */
4295 		oid = SYSCTL_ADD_NODE(ctx, c0, OID_AUTO, "toe", CTLFLAG_RD,
4296 		    NULL, "TOE parameters");
4297 		children = SYSCTL_CHILDREN(oid);
4298 
4299 		sc->tt.sndbuf = 256 * 1024;
4300 		SYSCTL_ADD_INT(ctx, children, OID_AUTO, "sndbuf", CTLFLAG_RW,
4301 		    &sc->tt.sndbuf, 0, "max hardware send buffer size");
4302 
4303 		sc->tt.ddp = 0;
4304 		SYSCTL_ADD_INT(ctx, children, OID_AUTO, "ddp", CTLFLAG_RW,
4305 		    &sc->tt.ddp, 0, "DDP allowed");
4306 
4307 		sc->tt.indsz = G_INDICATESIZE(t4_read_reg(sc, A_TP_PARA_REG5));
4308 		SYSCTL_ADD_INT(ctx, children, OID_AUTO, "indsz", CTLFLAG_RW,
4309 		    &sc->tt.indsz, 0, "DDP max indicate size allowed");
4310 
4311 		sc->tt.ddp_thres =
4312 		    G_RXCOALESCESIZE(t4_read_reg(sc, A_TP_PARA_REG2));
4313 		SYSCTL_ADD_INT(ctx, children, OID_AUTO, "ddp_thres", CTLFLAG_RW,
4314 		    &sc->tt.ddp_thres, 0, "DDP threshold");
4315 	}
4316 #endif
4317 
4318 
4319 	return (0);
4320 }
4321 
4322 static int
4323 cxgbe_sysctls(struct port_info *pi)
4324 {
4325 	struct sysctl_ctx_list *ctx;
4326 	struct sysctl_oid *oid;
4327 	struct sysctl_oid_list *children;
4328 
4329 	ctx = device_get_sysctl_ctx(pi->dev);
4330 
4331 	/*
4332 	 * dev.cxgbe.X.
4333 	 */
4334 	oid = device_get_sysctl_tree(pi->dev);
4335 	children = SYSCTL_CHILDREN(oid);
4336 
4337 	SYSCTL_ADD_INT(ctx, children, OID_AUTO, "nrxq", CTLFLAG_RD,
4338 	    &pi->nrxq, 0, "# of rx queues");
4339 	SYSCTL_ADD_INT(ctx, children, OID_AUTO, "ntxq", CTLFLAG_RD,
4340 	    &pi->ntxq, 0, "# of tx queues");
4341 	SYSCTL_ADD_INT(ctx, children, OID_AUTO, "first_rxq", CTLFLAG_RD,
4342 	    &pi->first_rxq, 0, "index of first rx queue");
4343 	SYSCTL_ADD_INT(ctx, children, OID_AUTO, "first_txq", CTLFLAG_RD,
4344 	    &pi->first_txq, 0, "index of first tx queue");
4345 
4346 #ifdef TCP_OFFLOAD
4347 	if (is_offload(pi->adapter)) {
4348 		SYSCTL_ADD_INT(ctx, children, OID_AUTO, "nofldrxq", CTLFLAG_RD,
4349 		    &pi->nofldrxq, 0,
4350 		    "# of rx queues for offloaded TCP connections");
4351 		SYSCTL_ADD_INT(ctx, children, OID_AUTO, "nofldtxq", CTLFLAG_RD,
4352 		    &pi->nofldtxq, 0,
4353 		    "# of tx queues for offloaded TCP connections");
4354 		SYSCTL_ADD_INT(ctx, children, OID_AUTO, "first_ofld_rxq",
4355 		    CTLFLAG_RD, &pi->first_ofld_rxq, 0,
4356 		    "index of first TOE rx queue");
4357 		SYSCTL_ADD_INT(ctx, children, OID_AUTO, "first_ofld_txq",
4358 		    CTLFLAG_RD, &pi->first_ofld_txq, 0,
4359 		    "index of first TOE tx queue");
4360 	}
4361 #endif
4362 
4363 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "holdoff_tmr_idx",
4364 	    CTLTYPE_INT | CTLFLAG_RW, pi, 0, sysctl_holdoff_tmr_idx, "I",
4365 	    "holdoff timer index");
4366 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "holdoff_pktc_idx",
4367 	    CTLTYPE_INT | CTLFLAG_RW, pi, 0, sysctl_holdoff_pktc_idx, "I",
4368 	    "holdoff packet counter index");
4369 
4370 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "qsize_rxq",
4371 	    CTLTYPE_INT | CTLFLAG_RW, pi, 0, sysctl_qsize_rxq, "I",
4372 	    "rx queue size");
4373 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "qsize_txq",
4374 	    CTLTYPE_INT | CTLFLAG_RW, pi, 0, sysctl_qsize_txq, "I",
4375 	    "tx queue size");
4376 
4377 	/*
4378 	 * dev.cxgbe.X.stats.
4379 	 */
4380 	oid = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, "stats", CTLFLAG_RD,
4381 	    NULL, "port statistics");
4382 	children = SYSCTL_CHILDREN(oid);
4383 
4384 #define SYSCTL_ADD_T4_REG64(pi, name, desc, reg) \
4385 	SYSCTL_ADD_OID(ctx, children, OID_AUTO, name, \
4386 	    CTLTYPE_U64 | CTLFLAG_RD, pi->adapter, reg, \
4387 	    sysctl_handle_t4_reg64, "QU", desc)
4388 
4389 	SYSCTL_ADD_T4_REG64(pi, "tx_octets", "# of octets in good frames",
4390 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_BYTES_L));
4391 	SYSCTL_ADD_T4_REG64(pi, "tx_frames", "total # of good frames",
4392 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_FRAMES_L));
4393 	SYSCTL_ADD_T4_REG64(pi, "tx_bcast_frames", "# of broadcast frames",
4394 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_BCAST_L));
4395 	SYSCTL_ADD_T4_REG64(pi, "tx_mcast_frames", "# of multicast frames",
4396 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_MCAST_L));
4397 	SYSCTL_ADD_T4_REG64(pi, "tx_ucast_frames", "# of unicast frames",
4398 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_UCAST_L));
4399 	SYSCTL_ADD_T4_REG64(pi, "tx_error_frames", "# of error frames",
4400 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_ERROR_L));
4401 	SYSCTL_ADD_T4_REG64(pi, "tx_frames_64",
4402 	    "# of tx frames in this range",
4403 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_64B_L));
4404 	SYSCTL_ADD_T4_REG64(pi, "tx_frames_65_127",
4405 	    "# of tx frames in this range",
4406 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_65B_127B_L));
4407 	SYSCTL_ADD_T4_REG64(pi, "tx_frames_128_255",
4408 	    "# of tx frames in this range",
4409 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_128B_255B_L));
4410 	SYSCTL_ADD_T4_REG64(pi, "tx_frames_256_511",
4411 	    "# of tx frames in this range",
4412 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_256B_511B_L));
4413 	SYSCTL_ADD_T4_REG64(pi, "tx_frames_512_1023",
4414 	    "# of tx frames in this range",
4415 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_512B_1023B_L));
4416 	SYSCTL_ADD_T4_REG64(pi, "tx_frames_1024_1518",
4417 	    "# of tx frames in this range",
4418 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_1024B_1518B_L));
4419 	SYSCTL_ADD_T4_REG64(pi, "tx_frames_1519_max",
4420 	    "# of tx frames in this range",
4421 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_1519B_MAX_L));
4422 	SYSCTL_ADD_T4_REG64(pi, "tx_drop", "# of dropped tx frames",
4423 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_DROP_L));
4424 	SYSCTL_ADD_T4_REG64(pi, "tx_pause", "# of pause frames transmitted",
4425 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_PAUSE_L));
4426 	SYSCTL_ADD_T4_REG64(pi, "tx_ppp0", "# of PPP prio 0 frames transmitted",
4427 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_PPP0_L));
4428 	SYSCTL_ADD_T4_REG64(pi, "tx_ppp1", "# of PPP prio 1 frames transmitted",
4429 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_PPP1_L));
4430 	SYSCTL_ADD_T4_REG64(pi, "tx_ppp2", "# of PPP prio 2 frames transmitted",
4431 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_PPP2_L));
4432 	SYSCTL_ADD_T4_REG64(pi, "tx_ppp3", "# of PPP prio 3 frames transmitted",
4433 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_PPP3_L));
4434 	SYSCTL_ADD_T4_REG64(pi, "tx_ppp4", "# of PPP prio 4 frames transmitted",
4435 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_PPP4_L));
4436 	SYSCTL_ADD_T4_REG64(pi, "tx_ppp5", "# of PPP prio 5 frames transmitted",
4437 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_PPP5_L));
4438 	SYSCTL_ADD_T4_REG64(pi, "tx_ppp6", "# of PPP prio 6 frames transmitted",
4439 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_PPP6_L));
4440 	SYSCTL_ADD_T4_REG64(pi, "tx_ppp7", "# of PPP prio 7 frames transmitted",
4441 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_PPP7_L));
4442 
4443 	SYSCTL_ADD_T4_REG64(pi, "rx_octets", "# of octets in good frames",
4444 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_BYTES_L));
4445 	SYSCTL_ADD_T4_REG64(pi, "rx_frames", "total # of good frames",
4446 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_FRAMES_L));
4447 	SYSCTL_ADD_T4_REG64(pi, "rx_bcast_frames", "# of broadcast frames",
4448 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_BCAST_L));
4449 	SYSCTL_ADD_T4_REG64(pi, "rx_mcast_frames", "# of multicast frames",
4450 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_MCAST_L));
4451 	SYSCTL_ADD_T4_REG64(pi, "rx_ucast_frames", "# of unicast frames",
4452 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_UCAST_L));
4453 	SYSCTL_ADD_T4_REG64(pi, "rx_too_long", "# of frames exceeding MTU",
4454 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_MTU_ERROR_L));
4455 	SYSCTL_ADD_T4_REG64(pi, "rx_jabber", "# of jabber frames",
4456 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_MTU_CRC_ERROR_L));
4457 	SYSCTL_ADD_T4_REG64(pi, "rx_fcs_err",
4458 	    "# of frames received with bad FCS",
4459 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_CRC_ERROR_L));
4460 	SYSCTL_ADD_T4_REG64(pi, "rx_len_err",
4461 	    "# of frames received with length error",
4462 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_LEN_ERROR_L));
4463 	SYSCTL_ADD_T4_REG64(pi, "rx_symbol_err", "symbol errors",
4464 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_SYM_ERROR_L));
4465 	SYSCTL_ADD_T4_REG64(pi, "rx_runt", "# of short frames received",
4466 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_LESS_64B_L));
4467 	SYSCTL_ADD_T4_REG64(pi, "rx_frames_64",
4468 	    "# of rx frames in this range",
4469 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_64B_L));
4470 	SYSCTL_ADD_T4_REG64(pi, "rx_frames_65_127",
4471 	    "# of rx frames in this range",
4472 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_65B_127B_L));
4473 	SYSCTL_ADD_T4_REG64(pi, "rx_frames_128_255",
4474 	    "# of rx frames in this range",
4475 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_128B_255B_L));
4476 	SYSCTL_ADD_T4_REG64(pi, "rx_frames_256_511",
4477 	    "# of rx frames in this range",
4478 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_256B_511B_L));
4479 	SYSCTL_ADD_T4_REG64(pi, "rx_frames_512_1023",
4480 	    "# of rx frames in this range",
4481 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_512B_1023B_L));
4482 	SYSCTL_ADD_T4_REG64(pi, "rx_frames_1024_1518",
4483 	    "# of rx frames in this range",
4484 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_1024B_1518B_L));
4485 	SYSCTL_ADD_T4_REG64(pi, "rx_frames_1519_max",
4486 	    "# of rx frames in this range",
4487 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_1519B_MAX_L));
4488 	SYSCTL_ADD_T4_REG64(pi, "rx_pause", "# of pause frames received",
4489 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_PAUSE_L));
4490 	SYSCTL_ADD_T4_REG64(pi, "rx_ppp0", "# of PPP prio 0 frames received",
4491 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_PPP0_L));
4492 	SYSCTL_ADD_T4_REG64(pi, "rx_ppp1", "# of PPP prio 1 frames received",
4493 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_PPP1_L));
4494 	SYSCTL_ADD_T4_REG64(pi, "rx_ppp2", "# of PPP prio 2 frames received",
4495 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_PPP2_L));
4496 	SYSCTL_ADD_T4_REG64(pi, "rx_ppp3", "# of PPP prio 3 frames received",
4497 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_PPP3_L));
4498 	SYSCTL_ADD_T4_REG64(pi, "rx_ppp4", "# of PPP prio 4 frames received",
4499 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_PPP4_L));
4500 	SYSCTL_ADD_T4_REG64(pi, "rx_ppp5", "# of PPP prio 5 frames received",
4501 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_PPP5_L));
4502 	SYSCTL_ADD_T4_REG64(pi, "rx_ppp6", "# of PPP prio 6 frames received",
4503 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_PPP6_L));
4504 	SYSCTL_ADD_T4_REG64(pi, "rx_ppp7", "# of PPP prio 7 frames received",
4505 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_PPP7_L));
4506 
4507 #undef SYSCTL_ADD_T4_REG64
4508 
4509 #define SYSCTL_ADD_T4_PORTSTAT(name, desc) \
4510 	SYSCTL_ADD_UQUAD(ctx, children, OID_AUTO, #name, CTLFLAG_RD, \
4511 	    &pi->stats.name, desc)
4512 
4513 	/* We get these from port_stats and they may be stale by upto 1s */
4514 	SYSCTL_ADD_T4_PORTSTAT(rx_ovflow0,
4515 	    "# drops due to buffer-group 0 overflows");
4516 	SYSCTL_ADD_T4_PORTSTAT(rx_ovflow1,
4517 	    "# drops due to buffer-group 1 overflows");
4518 	SYSCTL_ADD_T4_PORTSTAT(rx_ovflow2,
4519 	    "# drops due to buffer-group 2 overflows");
4520 	SYSCTL_ADD_T4_PORTSTAT(rx_ovflow3,
4521 	    "# drops due to buffer-group 3 overflows");
4522 	SYSCTL_ADD_T4_PORTSTAT(rx_trunc0,
4523 	    "# of buffer-group 0 truncated packets");
4524 	SYSCTL_ADD_T4_PORTSTAT(rx_trunc1,
4525 	    "# of buffer-group 1 truncated packets");
4526 	SYSCTL_ADD_T4_PORTSTAT(rx_trunc2,
4527 	    "# of buffer-group 2 truncated packets");
4528 	SYSCTL_ADD_T4_PORTSTAT(rx_trunc3,
4529 	    "# of buffer-group 3 truncated packets");
4530 
4531 #undef SYSCTL_ADD_T4_PORTSTAT
4532 
4533 	return (0);
4534 }
4535 
4536 static int
4537 sysctl_int_array(SYSCTL_HANDLER_ARGS)
4538 {
4539 	int rc, *i;
4540 	struct sbuf sb;
4541 
4542 	sbuf_new(&sb, NULL, 32, SBUF_AUTOEXTEND);
4543 	for (i = arg1; arg2; arg2 -= sizeof(int), i++)
4544 		sbuf_printf(&sb, "%d ", *i);
4545 	sbuf_trim(&sb);
4546 	sbuf_finish(&sb);
4547 	rc = sysctl_handle_string(oidp, sbuf_data(&sb), sbuf_len(&sb), req);
4548 	sbuf_delete(&sb);
4549 	return (rc);
4550 }
4551 
4552 static int
4553 sysctl_bitfield(SYSCTL_HANDLER_ARGS)
4554 {
4555 	int rc;
4556 	struct sbuf *sb;
4557 
4558 	rc = sysctl_wire_old_buffer(req, 0);
4559 	if (rc != 0)
4560 		return(rc);
4561 
4562 	sb = sbuf_new_for_sysctl(NULL, NULL, 128, req);
4563 	if (sb == NULL)
4564 		return (ENOMEM);
4565 
4566 	sbuf_printf(sb, "%b", (int)arg2, (char *)arg1);
4567 	rc = sbuf_finish(sb);
4568 	sbuf_delete(sb);
4569 
4570 	return (rc);
4571 }
4572 
4573 static int
4574 sysctl_holdoff_tmr_idx(SYSCTL_HANDLER_ARGS)
4575 {
4576 	struct port_info *pi = arg1;
4577 	struct adapter *sc = pi->adapter;
4578 	int idx, rc, i;
4579 	struct sge_rxq *rxq;
4580 	uint8_t v;
4581 
4582 	idx = pi->tmr_idx;
4583 
4584 	rc = sysctl_handle_int(oidp, &idx, 0, req);
4585 	if (rc != 0 || req->newptr == NULL)
4586 		return (rc);
4587 
4588 	if (idx < 0 || idx >= SGE_NTIMERS)
4589 		return (EINVAL);
4590 
4591 	rc = begin_synchronized_op(sc, pi, HOLD_LOCK | SLEEP_OK | INTR_OK,
4592 	    "t4tmr");
4593 	if (rc)
4594 		return (rc);
4595 
4596 	v = V_QINTR_TIMER_IDX(idx) | V_QINTR_CNT_EN(pi->pktc_idx != -1);
4597 	for_each_rxq(pi, i, rxq) {
4598 #ifdef atomic_store_rel_8
4599 		atomic_store_rel_8(&rxq->iq.intr_params, v);
4600 #else
4601 		rxq->iq.intr_params = v;
4602 #endif
4603 	}
4604 	pi->tmr_idx = idx;
4605 
4606 	end_synchronized_op(sc, LOCK_HELD);
4607 	return (0);
4608 }
4609 
4610 static int
4611 sysctl_holdoff_pktc_idx(SYSCTL_HANDLER_ARGS)
4612 {
4613 	struct port_info *pi = arg1;
4614 	struct adapter *sc = pi->adapter;
4615 	int idx, rc;
4616 
4617 	idx = pi->pktc_idx;
4618 
4619 	rc = sysctl_handle_int(oidp, &idx, 0, req);
4620 	if (rc != 0 || req->newptr == NULL)
4621 		return (rc);
4622 
4623 	if (idx < -1 || idx >= SGE_NCOUNTERS)
4624 		return (EINVAL);
4625 
4626 	rc = begin_synchronized_op(sc, pi, HOLD_LOCK | SLEEP_OK | INTR_OK,
4627 	    "t4pktc");
4628 	if (rc)
4629 		return (rc);
4630 
4631 	if (pi->flags & PORT_INIT_DONE)
4632 		rc = EBUSY; /* cannot be changed once the queues are created */
4633 	else
4634 		pi->pktc_idx = idx;
4635 
4636 	end_synchronized_op(sc, LOCK_HELD);
4637 	return (rc);
4638 }
4639 
4640 static int
4641 sysctl_qsize_rxq(SYSCTL_HANDLER_ARGS)
4642 {
4643 	struct port_info *pi = arg1;
4644 	struct adapter *sc = pi->adapter;
4645 	int qsize, rc;
4646 
4647 	qsize = pi->qsize_rxq;
4648 
4649 	rc = sysctl_handle_int(oidp, &qsize, 0, req);
4650 	if (rc != 0 || req->newptr == NULL)
4651 		return (rc);
4652 
4653 	if (qsize < 128 || (qsize & 7))
4654 		return (EINVAL);
4655 
4656 	rc = begin_synchronized_op(sc, pi, HOLD_LOCK | SLEEP_OK | INTR_OK,
4657 	    "t4rxqs");
4658 	if (rc)
4659 		return (rc);
4660 
4661 	if (pi->flags & PORT_INIT_DONE)
4662 		rc = EBUSY; /* cannot be changed once the queues are created */
4663 	else
4664 		pi->qsize_rxq = qsize;
4665 
4666 	end_synchronized_op(sc, LOCK_HELD);
4667 	return (rc);
4668 }
4669 
4670 static int
4671 sysctl_qsize_txq(SYSCTL_HANDLER_ARGS)
4672 {
4673 	struct port_info *pi = arg1;
4674 	struct adapter *sc = pi->adapter;
4675 	int qsize, rc;
4676 
4677 	qsize = pi->qsize_txq;
4678 
4679 	rc = sysctl_handle_int(oidp, &qsize, 0, req);
4680 	if (rc != 0 || req->newptr == NULL)
4681 		return (rc);
4682 
4683 	/* bufring size must be powerof2 */
4684 	if (qsize < 128 || !powerof2(qsize))
4685 		return (EINVAL);
4686 
4687 	rc = begin_synchronized_op(sc, pi, HOLD_LOCK | SLEEP_OK | INTR_OK,
4688 	    "t4txqs");
4689 	if (rc)
4690 		return (rc);
4691 
4692 	if (pi->flags & PORT_INIT_DONE)
4693 		rc = EBUSY; /* cannot be changed once the queues are created */
4694 	else
4695 		pi->qsize_txq = qsize;
4696 
4697 	end_synchronized_op(sc, LOCK_HELD);
4698 	return (rc);
4699 }
4700 
4701 static int
4702 sysctl_handle_t4_reg64(SYSCTL_HANDLER_ARGS)
4703 {
4704 	struct adapter *sc = arg1;
4705 	int reg = arg2;
4706 	uint64_t val;
4707 
4708 	val = t4_read_reg64(sc, reg);
4709 
4710 	return (sysctl_handle_64(oidp, &val, 0, req));
4711 }
4712 
4713 #ifdef SBUF_DRAIN
4714 static int
4715 sysctl_cctrl(SYSCTL_HANDLER_ARGS)
4716 {
4717 	struct adapter *sc = arg1;
4718 	struct sbuf *sb;
4719 	int rc, i;
4720 	uint16_t incr[NMTUS][NCCTRL_WIN];
4721 	static const char *dec_fac[] = {
4722 		"0.5", "0.5625", "0.625", "0.6875", "0.75", "0.8125", "0.875",
4723 		"0.9375"
4724 	};
4725 
4726 	rc = sysctl_wire_old_buffer(req, 0);
4727 	if (rc != 0)
4728 		return (rc);
4729 
4730 	sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req);
4731 	if (sb == NULL)
4732 		return (ENOMEM);
4733 
4734 	t4_read_cong_tbl(sc, incr);
4735 
4736 	for (i = 0; i < NCCTRL_WIN; ++i) {
4737 		sbuf_printf(sb, "%2d: %4u %4u %4u %4u %4u %4u %4u %4u\n", i,
4738 		    incr[0][i], incr[1][i], incr[2][i], incr[3][i], incr[4][i],
4739 		    incr[5][i], incr[6][i], incr[7][i]);
4740 		sbuf_printf(sb, "%8u %4u %4u %4u %4u %4u %4u %4u %5u %s\n",
4741 		    incr[8][i], incr[9][i], incr[10][i], incr[11][i],
4742 		    incr[12][i], incr[13][i], incr[14][i], incr[15][i],
4743 		    sc->params.a_wnd[i], dec_fac[sc->params.b_wnd[i]]);
4744 	}
4745 
4746 	rc = sbuf_finish(sb);
4747 	sbuf_delete(sb);
4748 
4749 	return (rc);
4750 }
4751 
4752 static const char *qname[CIM_NUM_IBQ + CIM_NUM_OBQ_T5] = {
4753 	"TP0", "TP1", "ULP", "SGE0", "SGE1", "NC-SI",	/* ibq's */
4754 	"ULP0", "ULP1", "ULP2", "ULP3", "SGE", "NC-SI",	/* obq's */
4755 	"SGE0-RX", "SGE1-RX"	/* additional obq's (T5 onwards) */
4756 };
4757 
4758 static int
4759 sysctl_cim_ibq_obq(SYSCTL_HANDLER_ARGS)
4760 {
4761 	struct adapter *sc = arg1;
4762 	struct sbuf *sb;
4763 	int rc, i, n, qid = arg2;
4764 	uint32_t *buf, *p;
4765 	char *qtype;
4766 	u_int cim_num_obq = is_t4(sc) ? CIM_NUM_OBQ : CIM_NUM_OBQ_T5;
4767 
4768 	KASSERT(qid >= 0 && qid < CIM_NUM_IBQ + cim_num_obq,
4769 	    ("%s: bad qid %d\n", __func__, qid));
4770 
4771 	if (qid < CIM_NUM_IBQ) {
4772 		/* inbound queue */
4773 		qtype = "IBQ";
4774 		n = 4 * CIM_IBQ_SIZE;
4775 		buf = malloc(n * sizeof(uint32_t), M_CXGBE, M_ZERO | M_WAITOK);
4776 		rc = t4_read_cim_ibq(sc, qid, buf, n);
4777 	} else {
4778 		/* outbound queue */
4779 		qtype = "OBQ";
4780 		qid -= CIM_NUM_IBQ;
4781 		n = 4 * cim_num_obq * CIM_OBQ_SIZE;
4782 		buf = malloc(n * sizeof(uint32_t), M_CXGBE, M_ZERO | M_WAITOK);
4783 		rc = t4_read_cim_obq(sc, qid, buf, n);
4784 	}
4785 
4786 	if (rc < 0) {
4787 		rc = -rc;
4788 		goto done;
4789 	}
4790 	n = rc * sizeof(uint32_t);	/* rc has # of words actually read */
4791 
4792 	rc = sysctl_wire_old_buffer(req, 0);
4793 	if (rc != 0)
4794 		goto done;
4795 
4796 	sb = sbuf_new_for_sysctl(NULL, NULL, PAGE_SIZE, req);
4797 	if (sb == NULL) {
4798 		rc = ENOMEM;
4799 		goto done;
4800 	}
4801 
4802 	sbuf_printf(sb, "%s%d %s", qtype , qid, qname[arg2]);
4803 	for (i = 0, p = buf; i < n; i += 16, p += 4)
4804 		sbuf_printf(sb, "\n%#06x: %08x %08x %08x %08x", i, p[0], p[1],
4805 		    p[2], p[3]);
4806 
4807 	rc = sbuf_finish(sb);
4808 	sbuf_delete(sb);
4809 done:
4810 	free(buf, M_CXGBE);
4811 	return (rc);
4812 }
4813 
4814 static int
4815 sysctl_cim_la(SYSCTL_HANDLER_ARGS)
4816 {
4817 	struct adapter *sc = arg1;
4818 	u_int cfg;
4819 	struct sbuf *sb;
4820 	uint32_t *buf, *p;
4821 	int rc;
4822 
4823 	rc = -t4_cim_read(sc, A_UP_UP_DBG_LA_CFG, 1, &cfg);
4824 	if (rc != 0)
4825 		return (rc);
4826 
4827 	rc = sysctl_wire_old_buffer(req, 0);
4828 	if (rc != 0)
4829 		return (rc);
4830 
4831 	sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req);
4832 	if (sb == NULL)
4833 		return (ENOMEM);
4834 
4835 	buf = malloc(sc->params.cim_la_size * sizeof(uint32_t), M_CXGBE,
4836 	    M_ZERO | M_WAITOK);
4837 
4838 	rc = -t4_cim_read_la(sc, buf, NULL);
4839 	if (rc != 0)
4840 		goto done;
4841 
4842 	sbuf_printf(sb, "Status   Data      PC%s",
4843 	    cfg & F_UPDBGLACAPTPCONLY ? "" :
4844 	    "     LS0Stat  LS0Addr             LS0Data");
4845 
4846 	KASSERT((sc->params.cim_la_size & 7) == 0,
4847 	    ("%s: p will walk off the end of buf", __func__));
4848 
4849 	for (p = buf; p < &buf[sc->params.cim_la_size]; p += 8) {
4850 		if (cfg & F_UPDBGLACAPTPCONLY) {
4851 			sbuf_printf(sb, "\n  %02x   %08x %08x", p[5] & 0xff,
4852 			    p[6], p[7]);
4853 			sbuf_printf(sb, "\n  %02x   %02x%06x %02x%06x",
4854 			    (p[3] >> 8) & 0xff, p[3] & 0xff, p[4] >> 8,
4855 			    p[4] & 0xff, p[5] >> 8);
4856 			sbuf_printf(sb, "\n  %02x   %x%07x %x%07x",
4857 			    (p[0] >> 4) & 0xff, p[0] & 0xf, p[1] >> 4,
4858 			    p[1] & 0xf, p[2] >> 4);
4859 		} else {
4860 			sbuf_printf(sb,
4861 			    "\n  %02x   %x%07x %x%07x %08x %08x "
4862 			    "%08x%08x%08x%08x",
4863 			    (p[0] >> 4) & 0xff, p[0] & 0xf, p[1] >> 4,
4864 			    p[1] & 0xf, p[2] >> 4, p[2] & 0xf, p[3], p[4], p[5],
4865 			    p[6], p[7]);
4866 		}
4867 	}
4868 
4869 	rc = sbuf_finish(sb);
4870 	sbuf_delete(sb);
4871 done:
4872 	free(buf, M_CXGBE);
4873 	return (rc);
4874 }
4875 
4876 static int
4877 sysctl_cim_qcfg(SYSCTL_HANDLER_ARGS)
4878 {
4879 	struct adapter *sc = arg1;
4880 	struct sbuf *sb;
4881 	int rc, i;
4882 	uint16_t base[CIM_NUM_IBQ + CIM_NUM_OBQ_T5];
4883 	uint16_t size[CIM_NUM_IBQ + CIM_NUM_OBQ_T5];
4884 	uint16_t thres[CIM_NUM_IBQ];
4885 	uint32_t obq_wr[2 * CIM_NUM_OBQ_T5], *wr = obq_wr;
4886 	uint32_t stat[4 * (CIM_NUM_IBQ + CIM_NUM_OBQ_T5)], *p = stat;
4887 	u_int cim_num_obq, ibq_rdaddr, obq_rdaddr, nq;
4888 
4889 	if (is_t4(sc)) {
4890 		cim_num_obq = CIM_NUM_OBQ;
4891 		ibq_rdaddr = A_UP_IBQ_0_RDADDR;
4892 		obq_rdaddr = A_UP_OBQ_0_REALADDR;
4893 	} else {
4894 		cim_num_obq = CIM_NUM_OBQ_T5;
4895 		ibq_rdaddr = A_UP_IBQ_0_SHADOW_RDADDR;
4896 		obq_rdaddr = A_UP_OBQ_0_SHADOW_REALADDR;
4897 	}
4898 	nq = CIM_NUM_IBQ + cim_num_obq;
4899 
4900 	rc = -t4_cim_read(sc, ibq_rdaddr, 4 * nq, stat);
4901 	if (rc == 0)
4902 		rc = -t4_cim_read(sc, obq_rdaddr, 2 * cim_num_obq, obq_wr);
4903 	if (rc != 0)
4904 		return (rc);
4905 
4906 	t4_read_cimq_cfg(sc, base, size, thres);
4907 
4908 	rc = sysctl_wire_old_buffer(req, 0);
4909 	if (rc != 0)
4910 		return (rc);
4911 
4912 	sb = sbuf_new_for_sysctl(NULL, NULL, PAGE_SIZE, req);
4913 	if (sb == NULL)
4914 		return (ENOMEM);
4915 
4916 	sbuf_printf(sb, "Queue  Base  Size Thres RdPtr WrPtr  SOP  EOP Avail");
4917 
4918 	for (i = 0; i < CIM_NUM_IBQ; i++, p += 4)
4919 		sbuf_printf(sb, "\n%7s %5x %5u %5u %6x  %4x %4u %4u %5u",
4920 		    qname[i], base[i], size[i], thres[i], G_IBQRDADDR(p[0]),
4921 		    G_IBQWRADDR(p[1]), G_QUESOPCNT(p[3]), G_QUEEOPCNT(p[3]),
4922 		    G_QUEREMFLITS(p[2]) * 16);
4923 	for ( ; i < nq; i++, p += 4, wr += 2)
4924 		sbuf_printf(sb, "\n%7s %5x %5u %12x  %4x %4u %4u %5u", qname[i],
4925 		    base[i], size[i], G_QUERDADDR(p[0]) & 0x3fff,
4926 		    wr[0] - base[i], G_QUESOPCNT(p[3]), G_QUEEOPCNT(p[3]),
4927 		    G_QUEREMFLITS(p[2]) * 16);
4928 
4929 	rc = sbuf_finish(sb);
4930 	sbuf_delete(sb);
4931 
4932 	return (rc);
4933 }
4934 
4935 static int
4936 sysctl_cpl_stats(SYSCTL_HANDLER_ARGS)
4937 {
4938 	struct adapter *sc = arg1;
4939 	struct sbuf *sb;
4940 	int rc;
4941 	struct tp_cpl_stats stats;
4942 
4943 	rc = sysctl_wire_old_buffer(req, 0);
4944 	if (rc != 0)
4945 		return (rc);
4946 
4947 	sb = sbuf_new_for_sysctl(NULL, NULL, 256, req);
4948 	if (sb == NULL)
4949 		return (ENOMEM);
4950 
4951 	t4_tp_get_cpl_stats(sc, &stats);
4952 
4953 	sbuf_printf(sb, "                 channel 0  channel 1  channel 2  "
4954 	    "channel 3\n");
4955 	sbuf_printf(sb, "CPL requests:   %10u %10u %10u %10u\n",
4956 		   stats.req[0], stats.req[1], stats.req[2], stats.req[3]);
4957 	sbuf_printf(sb, "CPL responses:  %10u %10u %10u %10u",
4958 		   stats.rsp[0], stats.rsp[1], stats.rsp[2], stats.rsp[3]);
4959 
4960 	rc = sbuf_finish(sb);
4961 	sbuf_delete(sb);
4962 
4963 	return (rc);
4964 }
4965 
4966 static int
4967 sysctl_ddp_stats(SYSCTL_HANDLER_ARGS)
4968 {
4969 	struct adapter *sc = arg1;
4970 	struct sbuf *sb;
4971 	int rc;
4972 	struct tp_usm_stats stats;
4973 
4974 	rc = sysctl_wire_old_buffer(req, 0);
4975 	if (rc != 0)
4976 		return(rc);
4977 
4978 	sb = sbuf_new_for_sysctl(NULL, NULL, 256, req);
4979 	if (sb == NULL)
4980 		return (ENOMEM);
4981 
4982 	t4_get_usm_stats(sc, &stats);
4983 
4984 	sbuf_printf(sb, "Frames: %u\n", stats.frames);
4985 	sbuf_printf(sb, "Octets: %ju\n", stats.octets);
4986 	sbuf_printf(sb, "Drops:  %u", stats.drops);
4987 
4988 	rc = sbuf_finish(sb);
4989 	sbuf_delete(sb);
4990 
4991 	return (rc);
4992 }
4993 
4994 const char *devlog_level_strings[] = {
4995 	[FW_DEVLOG_LEVEL_EMERG]		= "EMERG",
4996 	[FW_DEVLOG_LEVEL_CRIT]		= "CRIT",
4997 	[FW_DEVLOG_LEVEL_ERR]		= "ERR",
4998 	[FW_DEVLOG_LEVEL_NOTICE]	= "NOTICE",
4999 	[FW_DEVLOG_LEVEL_INFO]		= "INFO",
5000 	[FW_DEVLOG_LEVEL_DEBUG]		= "DEBUG"
5001 };
5002 
5003 const char *devlog_facility_strings[] = {
5004 	[FW_DEVLOG_FACILITY_CORE]	= "CORE",
5005 	[FW_DEVLOG_FACILITY_SCHED]	= "SCHED",
5006 	[FW_DEVLOG_FACILITY_TIMER]	= "TIMER",
5007 	[FW_DEVLOG_FACILITY_RES]	= "RES",
5008 	[FW_DEVLOG_FACILITY_HW]		= "HW",
5009 	[FW_DEVLOG_FACILITY_FLR]	= "FLR",
5010 	[FW_DEVLOG_FACILITY_DMAQ]	= "DMAQ",
5011 	[FW_DEVLOG_FACILITY_PHY]	= "PHY",
5012 	[FW_DEVLOG_FACILITY_MAC]	= "MAC",
5013 	[FW_DEVLOG_FACILITY_PORT]	= "PORT",
5014 	[FW_DEVLOG_FACILITY_VI]		= "VI",
5015 	[FW_DEVLOG_FACILITY_FILTER]	= "FILTER",
5016 	[FW_DEVLOG_FACILITY_ACL]	= "ACL",
5017 	[FW_DEVLOG_FACILITY_TM]		= "TM",
5018 	[FW_DEVLOG_FACILITY_QFC]	= "QFC",
5019 	[FW_DEVLOG_FACILITY_DCB]	= "DCB",
5020 	[FW_DEVLOG_FACILITY_ETH]	= "ETH",
5021 	[FW_DEVLOG_FACILITY_OFLD]	= "OFLD",
5022 	[FW_DEVLOG_FACILITY_RI]		= "RI",
5023 	[FW_DEVLOG_FACILITY_ISCSI]	= "ISCSI",
5024 	[FW_DEVLOG_FACILITY_FCOE]	= "FCOE",
5025 	[FW_DEVLOG_FACILITY_FOISCSI]	= "FOISCSI",
5026 	[FW_DEVLOG_FACILITY_FOFCOE]	= "FOFCOE"
5027 };
5028 
5029 static int
5030 sysctl_devlog(SYSCTL_HANDLER_ARGS)
5031 {
5032 	struct adapter *sc = arg1;
5033 	struct devlog_params *dparams = &sc->params.devlog;
5034 	struct fw_devlog_e *buf, *e;
5035 	int i, j, rc, nentries, first = 0;
5036 	struct sbuf *sb;
5037 	uint64_t ftstamp = UINT64_MAX;
5038 
5039 	if (dparams->start == 0) {
5040 		dparams->memtype = 0;
5041 		dparams->start = 0x84000;
5042 		dparams->size = 32768;
5043 	}
5044 
5045 	nentries = dparams->size / sizeof(struct fw_devlog_e);
5046 
5047 	buf = malloc(dparams->size, M_CXGBE, M_NOWAIT);
5048 	if (buf == NULL)
5049 		return (ENOMEM);
5050 
5051 	rc = -t4_mem_read(sc, dparams->memtype, dparams->start, dparams->size,
5052 	    (void *)buf);
5053 	if (rc != 0)
5054 		goto done;
5055 
5056 	for (i = 0; i < nentries; i++) {
5057 		e = &buf[i];
5058 
5059 		if (e->timestamp == 0)
5060 			break;	/* end */
5061 
5062 		e->timestamp = be64toh(e->timestamp);
5063 		e->seqno = be32toh(e->seqno);
5064 		for (j = 0; j < 8; j++)
5065 			e->params[j] = be32toh(e->params[j]);
5066 
5067 		if (e->timestamp < ftstamp) {
5068 			ftstamp = e->timestamp;
5069 			first = i;
5070 		}
5071 	}
5072 
5073 	if (buf[first].timestamp == 0)
5074 		goto done;	/* nothing in the log */
5075 
5076 	rc = sysctl_wire_old_buffer(req, 0);
5077 	if (rc != 0)
5078 		goto done;
5079 
5080 	sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req);
5081 	if (sb == NULL) {
5082 		rc = ENOMEM;
5083 		goto done;
5084 	}
5085 	sbuf_printf(sb, "%10s  %15s  %8s  %8s  %s\n",
5086 	    "Seq#", "Tstamp", "Level", "Facility", "Message");
5087 
5088 	i = first;
5089 	do {
5090 		e = &buf[i];
5091 		if (e->timestamp == 0)
5092 			break;	/* end */
5093 
5094 		sbuf_printf(sb, "%10d  %15ju  %8s  %8s  ",
5095 		    e->seqno, e->timestamp,
5096 		    (e->level < nitems(devlog_level_strings) ?
5097 			devlog_level_strings[e->level] : "UNKNOWN"),
5098 		    (e->facility < nitems(devlog_facility_strings) ?
5099 			devlog_facility_strings[e->facility] : "UNKNOWN"));
5100 		sbuf_printf(sb, e->fmt, e->params[0], e->params[1],
5101 		    e->params[2], e->params[3], e->params[4],
5102 		    e->params[5], e->params[6], e->params[7]);
5103 
5104 		if (++i == nentries)
5105 			i = 0;
5106 	} while (i != first);
5107 
5108 	rc = sbuf_finish(sb);
5109 	sbuf_delete(sb);
5110 done:
5111 	free(buf, M_CXGBE);
5112 	return (rc);
5113 }
5114 
5115 static int
5116 sysctl_fcoe_stats(SYSCTL_HANDLER_ARGS)
5117 {
5118 	struct adapter *sc = arg1;
5119 	struct sbuf *sb;
5120 	int rc;
5121 	struct tp_fcoe_stats stats[4];
5122 
5123 	rc = sysctl_wire_old_buffer(req, 0);
5124 	if (rc != 0)
5125 		return (rc);
5126 
5127 	sb = sbuf_new_for_sysctl(NULL, NULL, 256, req);
5128 	if (sb == NULL)
5129 		return (ENOMEM);
5130 
5131 	t4_get_fcoe_stats(sc, 0, &stats[0]);
5132 	t4_get_fcoe_stats(sc, 1, &stats[1]);
5133 	t4_get_fcoe_stats(sc, 2, &stats[2]);
5134 	t4_get_fcoe_stats(sc, 3, &stats[3]);
5135 
5136 	sbuf_printf(sb, "                   channel 0        channel 1        "
5137 	    "channel 2        channel 3\n");
5138 	sbuf_printf(sb, "octetsDDP:  %16ju %16ju %16ju %16ju\n",
5139 	    stats[0].octetsDDP, stats[1].octetsDDP, stats[2].octetsDDP,
5140 	    stats[3].octetsDDP);
5141 	sbuf_printf(sb, "framesDDP:  %16u %16u %16u %16u\n", stats[0].framesDDP,
5142 	    stats[1].framesDDP, stats[2].framesDDP, stats[3].framesDDP);
5143 	sbuf_printf(sb, "framesDrop: %16u %16u %16u %16u",
5144 	    stats[0].framesDrop, stats[1].framesDrop, stats[2].framesDrop,
5145 	    stats[3].framesDrop);
5146 
5147 	rc = sbuf_finish(sb);
5148 	sbuf_delete(sb);
5149 
5150 	return (rc);
5151 }
5152 
5153 static int
5154 sysctl_hw_sched(SYSCTL_HANDLER_ARGS)
5155 {
5156 	struct adapter *sc = arg1;
5157 	struct sbuf *sb;
5158 	int rc, i;
5159 	unsigned int map, kbps, ipg, mode;
5160 	unsigned int pace_tab[NTX_SCHED];
5161 
5162 	rc = sysctl_wire_old_buffer(req, 0);
5163 	if (rc != 0)
5164 		return (rc);
5165 
5166 	sb = sbuf_new_for_sysctl(NULL, NULL, 256, req);
5167 	if (sb == NULL)
5168 		return (ENOMEM);
5169 
5170 	map = t4_read_reg(sc, A_TP_TX_MOD_QUEUE_REQ_MAP);
5171 	mode = G_TIMERMODE(t4_read_reg(sc, A_TP_MOD_CONFIG));
5172 	t4_read_pace_tbl(sc, pace_tab);
5173 
5174 	sbuf_printf(sb, "Scheduler  Mode   Channel  Rate (Kbps)   "
5175 	    "Class IPG (0.1 ns)   Flow IPG (us)");
5176 
5177 	for (i = 0; i < NTX_SCHED; ++i, map >>= 2) {
5178 		t4_get_tx_sched(sc, i, &kbps, &ipg);
5179 		sbuf_printf(sb, "\n    %u      %-5s     %u     ", i,
5180 		    (mode & (1 << i)) ? "flow" : "class", map & 3);
5181 		if (kbps)
5182 			sbuf_printf(sb, "%9u     ", kbps);
5183 		else
5184 			sbuf_printf(sb, " disabled     ");
5185 
5186 		if (ipg)
5187 			sbuf_printf(sb, "%13u        ", ipg);
5188 		else
5189 			sbuf_printf(sb, "     disabled        ");
5190 
5191 		if (pace_tab[i])
5192 			sbuf_printf(sb, "%10u", pace_tab[i]);
5193 		else
5194 			sbuf_printf(sb, "  disabled");
5195 	}
5196 
5197 	rc = sbuf_finish(sb);
5198 	sbuf_delete(sb);
5199 
5200 	return (rc);
5201 }
5202 
5203 static int
5204 sysctl_lb_stats(SYSCTL_HANDLER_ARGS)
5205 {
5206 	struct adapter *sc = arg1;
5207 	struct sbuf *sb;
5208 	int rc, i, j;
5209 	uint64_t *p0, *p1;
5210 	struct lb_port_stats s[2];
5211 	static const char *stat_name[] = {
5212 		"OctetsOK:", "FramesOK:", "BcastFrames:", "McastFrames:",
5213 		"UcastFrames:", "ErrorFrames:", "Frames64:", "Frames65To127:",
5214 		"Frames128To255:", "Frames256To511:", "Frames512To1023:",
5215 		"Frames1024To1518:", "Frames1519ToMax:", "FramesDropped:",
5216 		"BG0FramesDropped:", "BG1FramesDropped:", "BG2FramesDropped:",
5217 		"BG3FramesDropped:", "BG0FramesTrunc:", "BG1FramesTrunc:",
5218 		"BG2FramesTrunc:", "BG3FramesTrunc:"
5219 	};
5220 
5221 	rc = sysctl_wire_old_buffer(req, 0);
5222 	if (rc != 0)
5223 		return (rc);
5224 
5225 	sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req);
5226 	if (sb == NULL)
5227 		return (ENOMEM);
5228 
5229 	memset(s, 0, sizeof(s));
5230 
5231 	for (i = 0; i < 4; i += 2) {
5232 		t4_get_lb_stats(sc, i, &s[0]);
5233 		t4_get_lb_stats(sc, i + 1, &s[1]);
5234 
5235 		p0 = &s[0].octets;
5236 		p1 = &s[1].octets;
5237 		sbuf_printf(sb, "%s                       Loopback %u"
5238 		    "           Loopback %u", i == 0 ? "" : "\n", i, i + 1);
5239 
5240 		for (j = 0; j < nitems(stat_name); j++)
5241 			sbuf_printf(sb, "\n%-17s %20ju %20ju", stat_name[j],
5242 				   *p0++, *p1++);
5243 	}
5244 
5245 	rc = sbuf_finish(sb);
5246 	sbuf_delete(sb);
5247 
5248 	return (rc);
5249 }
5250 
5251 struct mem_desc {
5252 	unsigned int base;
5253 	unsigned int limit;
5254 	unsigned int idx;
5255 };
5256 
5257 static int
5258 mem_desc_cmp(const void *a, const void *b)
5259 {
5260 	return ((const struct mem_desc *)a)->base -
5261 	       ((const struct mem_desc *)b)->base;
5262 }
5263 
5264 static void
5265 mem_region_show(struct sbuf *sb, const char *name, unsigned int from,
5266     unsigned int to)
5267 {
5268 	unsigned int size;
5269 
5270 	size = to - from + 1;
5271 	if (size == 0)
5272 		return;
5273 
5274 	/* XXX: need humanize_number(3) in libkern for a more readable 'size' */
5275 	sbuf_printf(sb, "%-15s %#x-%#x [%u]\n", name, from, to, size);
5276 }
5277 
5278 static int
5279 sysctl_meminfo(SYSCTL_HANDLER_ARGS)
5280 {
5281 	struct adapter *sc = arg1;
5282 	struct sbuf *sb;
5283 	int rc, i, n;
5284 	uint32_t lo, hi, used, alloc;
5285 	static const char *memory[] = {"EDC0:", "EDC1:", "MC:", "MC0:", "MC1:"};
5286 	static const char *region[] = {
5287 		"DBQ contexts:", "IMSG contexts:", "FLM cache:", "TCBs:",
5288 		"Pstructs:", "Timers:", "Rx FL:", "Tx FL:", "Pstruct FL:",
5289 		"Tx payload:", "Rx payload:", "LE hash:", "iSCSI region:",
5290 		"TDDP region:", "TPT region:", "STAG region:", "RQ region:",
5291 		"RQUDP region:", "PBL region:", "TXPBL region:",
5292 		"DBVFIFO region:", "ULPRX state:", "ULPTX state:",
5293 		"On-chip queues:"
5294 	};
5295 	struct mem_desc avail[4];
5296 	struct mem_desc mem[nitems(region) + 3];	/* up to 3 holes */
5297 	struct mem_desc *md = mem;
5298 
5299 	rc = sysctl_wire_old_buffer(req, 0);
5300 	if (rc != 0)
5301 		return (rc);
5302 
5303 	sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req);
5304 	if (sb == NULL)
5305 		return (ENOMEM);
5306 
5307 	for (i = 0; i < nitems(mem); i++) {
5308 		mem[i].limit = 0;
5309 		mem[i].idx = i;
5310 	}
5311 
5312 	/* Find and sort the populated memory ranges */
5313 	i = 0;
5314 	lo = t4_read_reg(sc, A_MA_TARGET_MEM_ENABLE);
5315 	if (lo & F_EDRAM0_ENABLE) {
5316 		hi = t4_read_reg(sc, A_MA_EDRAM0_BAR);
5317 		avail[i].base = G_EDRAM0_BASE(hi) << 20;
5318 		avail[i].limit = avail[i].base + (G_EDRAM0_SIZE(hi) << 20);
5319 		avail[i].idx = 0;
5320 		i++;
5321 	}
5322 	if (lo & F_EDRAM1_ENABLE) {
5323 		hi = t4_read_reg(sc, A_MA_EDRAM1_BAR);
5324 		avail[i].base = G_EDRAM1_BASE(hi) << 20;
5325 		avail[i].limit = avail[i].base + (G_EDRAM1_SIZE(hi) << 20);
5326 		avail[i].idx = 1;
5327 		i++;
5328 	}
5329 	if (lo & F_EXT_MEM_ENABLE) {
5330 		hi = t4_read_reg(sc, A_MA_EXT_MEMORY_BAR);
5331 		avail[i].base = G_EXT_MEM_BASE(hi) << 20;
5332 		avail[i].limit = avail[i].base +
5333 		    (G_EXT_MEM_SIZE(hi) << 20);
5334 		avail[i].idx = is_t4(sc) ? 2 : 3;	/* Call it MC for T4 */
5335 		i++;
5336 	}
5337 	if (!is_t4(sc) && lo & F_EXT_MEM1_ENABLE) {
5338 		hi = t4_read_reg(sc, A_MA_EXT_MEMORY1_BAR);
5339 		avail[i].base = G_EXT_MEM1_BASE(hi) << 20;
5340 		avail[i].limit = avail[i].base +
5341 		    (G_EXT_MEM1_SIZE(hi) << 20);
5342 		avail[i].idx = 4;
5343 		i++;
5344 	}
5345 	if (!i)                                    /* no memory available */
5346 		return 0;
5347 	qsort(avail, i, sizeof(struct mem_desc), mem_desc_cmp);
5348 
5349 	(md++)->base = t4_read_reg(sc, A_SGE_DBQ_CTXT_BADDR);
5350 	(md++)->base = t4_read_reg(sc, A_SGE_IMSG_CTXT_BADDR);
5351 	(md++)->base = t4_read_reg(sc, A_SGE_FLM_CACHE_BADDR);
5352 	(md++)->base = t4_read_reg(sc, A_TP_CMM_TCB_BASE);
5353 	(md++)->base = t4_read_reg(sc, A_TP_CMM_MM_BASE);
5354 	(md++)->base = t4_read_reg(sc, A_TP_CMM_TIMER_BASE);
5355 	(md++)->base = t4_read_reg(sc, A_TP_CMM_MM_RX_FLST_BASE);
5356 	(md++)->base = t4_read_reg(sc, A_TP_CMM_MM_TX_FLST_BASE);
5357 	(md++)->base = t4_read_reg(sc, A_TP_CMM_MM_PS_FLST_BASE);
5358 
5359 	/* the next few have explicit upper bounds */
5360 	md->base = t4_read_reg(sc, A_TP_PMM_TX_BASE);
5361 	md->limit = md->base - 1 +
5362 		    t4_read_reg(sc, A_TP_PMM_TX_PAGE_SIZE) *
5363 		    G_PMTXMAXPAGE(t4_read_reg(sc, A_TP_PMM_TX_MAX_PAGE));
5364 	md++;
5365 
5366 	md->base = t4_read_reg(sc, A_TP_PMM_RX_BASE);
5367 	md->limit = md->base - 1 +
5368 		    t4_read_reg(sc, A_TP_PMM_RX_PAGE_SIZE) *
5369 		    G_PMRXMAXPAGE(t4_read_reg(sc, A_TP_PMM_RX_MAX_PAGE));
5370 	md++;
5371 
5372 	if (t4_read_reg(sc, A_LE_DB_CONFIG) & F_HASHEN) {
5373 		hi = t4_read_reg(sc, A_LE_DB_TID_HASHBASE) / 4;
5374 		md->base = t4_read_reg(sc, A_LE_DB_HASH_TID_BASE);
5375 		md->limit = (sc->tids.ntids - hi) * 16 + md->base - 1;
5376 	} else {
5377 		md->base = 0;
5378 		md->idx = nitems(region);  /* hide it */
5379 	}
5380 	md++;
5381 
5382 #define ulp_region(reg) \
5383 	md->base = t4_read_reg(sc, A_ULP_ ## reg ## _LLIMIT);\
5384 	(md++)->limit = t4_read_reg(sc, A_ULP_ ## reg ## _ULIMIT)
5385 
5386 	ulp_region(RX_ISCSI);
5387 	ulp_region(RX_TDDP);
5388 	ulp_region(TX_TPT);
5389 	ulp_region(RX_STAG);
5390 	ulp_region(RX_RQ);
5391 	ulp_region(RX_RQUDP);
5392 	ulp_region(RX_PBL);
5393 	ulp_region(TX_PBL);
5394 #undef ulp_region
5395 
5396 	md->base = 0;
5397 	md->idx = nitems(region);
5398 	if (!is_t4(sc) && t4_read_reg(sc, A_SGE_CONTROL2) & F_VFIFO_ENABLE) {
5399 		md->base = G_BASEADDR(t4_read_reg(sc, A_SGE_DBVFIFO_BADDR));
5400 		md->limit = md->base + (G_DBVFIFO_SIZE((t4_read_reg(sc,
5401 		    A_SGE_DBVFIFO_SIZE))) << 2) - 1;
5402 	}
5403 	md++;
5404 
5405 	md->base = t4_read_reg(sc, A_ULP_RX_CTX_BASE);
5406 	md->limit = md->base + sc->tids.ntids - 1;
5407 	md++;
5408 	md->base = t4_read_reg(sc, A_ULP_TX_ERR_TABLE_BASE);
5409 	md->limit = md->base + sc->tids.ntids - 1;
5410 	md++;
5411 
5412 	md->base = sc->vres.ocq.start;
5413 	if (sc->vres.ocq.size)
5414 		md->limit = md->base + sc->vres.ocq.size - 1;
5415 	else
5416 		md->idx = nitems(region);  /* hide it */
5417 	md++;
5418 
5419 	/* add any address-space holes, there can be up to 3 */
5420 	for (n = 0; n < i - 1; n++)
5421 		if (avail[n].limit < avail[n + 1].base)
5422 			(md++)->base = avail[n].limit;
5423 	if (avail[n].limit)
5424 		(md++)->base = avail[n].limit;
5425 
5426 	n = md - mem;
5427 	qsort(mem, n, sizeof(struct mem_desc), mem_desc_cmp);
5428 
5429 	for (lo = 0; lo < i; lo++)
5430 		mem_region_show(sb, memory[avail[lo].idx], avail[lo].base,
5431 				avail[lo].limit - 1);
5432 
5433 	sbuf_printf(sb, "\n");
5434 	for (i = 0; i < n; i++) {
5435 		if (mem[i].idx >= nitems(region))
5436 			continue;                        /* skip holes */
5437 		if (!mem[i].limit)
5438 			mem[i].limit = i < n - 1 ? mem[i + 1].base - 1 : ~0;
5439 		mem_region_show(sb, region[mem[i].idx], mem[i].base,
5440 				mem[i].limit);
5441 	}
5442 
5443 	sbuf_printf(sb, "\n");
5444 	lo = t4_read_reg(sc, A_CIM_SDRAM_BASE_ADDR);
5445 	hi = t4_read_reg(sc, A_CIM_SDRAM_ADDR_SIZE) + lo - 1;
5446 	mem_region_show(sb, "uP RAM:", lo, hi);
5447 
5448 	lo = t4_read_reg(sc, A_CIM_EXTMEM2_BASE_ADDR);
5449 	hi = t4_read_reg(sc, A_CIM_EXTMEM2_ADDR_SIZE) + lo - 1;
5450 	mem_region_show(sb, "uP Extmem2:", lo, hi);
5451 
5452 	lo = t4_read_reg(sc, A_TP_PMM_RX_MAX_PAGE);
5453 	sbuf_printf(sb, "\n%u Rx pages of size %uKiB for %u channels\n",
5454 		   G_PMRXMAXPAGE(lo),
5455 		   t4_read_reg(sc, A_TP_PMM_RX_PAGE_SIZE) >> 10,
5456 		   (lo & F_PMRXNUMCHN) ? 2 : 1);
5457 
5458 	lo = t4_read_reg(sc, A_TP_PMM_TX_MAX_PAGE);
5459 	hi = t4_read_reg(sc, A_TP_PMM_TX_PAGE_SIZE);
5460 	sbuf_printf(sb, "%u Tx pages of size %u%ciB for %u channels\n",
5461 		   G_PMTXMAXPAGE(lo),
5462 		   hi >= (1 << 20) ? (hi >> 20) : (hi >> 10),
5463 		   hi >= (1 << 20) ? 'M' : 'K', 1 << G_PMTXNUMCHN(lo));
5464 	sbuf_printf(sb, "%u p-structs\n",
5465 		   t4_read_reg(sc, A_TP_CMM_MM_MAX_PSTRUCT));
5466 
5467 	for (i = 0; i < 4; i++) {
5468 		lo = t4_read_reg(sc, A_MPS_RX_PG_RSV0 + i * 4);
5469 		if (is_t4(sc)) {
5470 			used = G_USED(lo);
5471 			alloc = G_ALLOC(lo);
5472 		} else {
5473 			used = G_T5_USED(lo);
5474 			alloc = G_T5_ALLOC(lo);
5475 		}
5476 		sbuf_printf(sb, "\nPort %d using %u pages out of %u allocated",
5477 			   i, used, alloc);
5478 	}
5479 	for (i = 0; i < 4; i++) {
5480 		lo = t4_read_reg(sc, A_MPS_RX_PG_RSV4 + i * 4);
5481 		if (is_t4(sc)) {
5482 			used = G_USED(lo);
5483 			alloc = G_ALLOC(lo);
5484 		} else {
5485 			used = G_T5_USED(lo);
5486 			alloc = G_T5_ALLOC(lo);
5487 		}
5488 		sbuf_printf(sb,
5489 			   "\nLoopback %d using %u pages out of %u allocated",
5490 			   i, used, alloc);
5491 	}
5492 
5493 	rc = sbuf_finish(sb);
5494 	sbuf_delete(sb);
5495 
5496 	return (rc);
5497 }
5498 
5499 static int
5500 sysctl_path_mtus(SYSCTL_HANDLER_ARGS)
5501 {
5502 	struct adapter *sc = arg1;
5503 	struct sbuf *sb;
5504 	int rc;
5505 	uint16_t mtus[NMTUS];
5506 
5507 	rc = sysctl_wire_old_buffer(req, 0);
5508 	if (rc != 0)
5509 		return (rc);
5510 
5511 	sb = sbuf_new_for_sysctl(NULL, NULL, 256, req);
5512 	if (sb == NULL)
5513 		return (ENOMEM);
5514 
5515 	t4_read_mtu_tbl(sc, mtus, NULL);
5516 
5517 	sbuf_printf(sb, "%u %u %u %u %u %u %u %u %u %u %u %u %u %u %u %u",
5518 	    mtus[0], mtus[1], mtus[2], mtus[3], mtus[4], mtus[5], mtus[6],
5519 	    mtus[7], mtus[8], mtus[9], mtus[10], mtus[11], mtus[12], mtus[13],
5520 	    mtus[14], mtus[15]);
5521 
5522 	rc = sbuf_finish(sb);
5523 	sbuf_delete(sb);
5524 
5525 	return (rc);
5526 }
5527 
5528 static int
5529 sysctl_pm_stats(SYSCTL_HANDLER_ARGS)
5530 {
5531 	struct adapter *sc = arg1;
5532 	struct sbuf *sb;
5533 	int rc, i;
5534 	uint32_t tx_cnt[PM_NSTATS], rx_cnt[PM_NSTATS];
5535 	uint64_t tx_cyc[PM_NSTATS], rx_cyc[PM_NSTATS];
5536 	static const char *pm_stats[] = {
5537 		"Read:", "Write bypass:", "Write mem:", "Flush:", "FIFO wait:"
5538 	};
5539 
5540 	rc = sysctl_wire_old_buffer(req, 0);
5541 	if (rc != 0)
5542 		return (rc);
5543 
5544 	sb = sbuf_new_for_sysctl(NULL, NULL, 256, req);
5545 	if (sb == NULL)
5546 		return (ENOMEM);
5547 
5548 	t4_pmtx_get_stats(sc, tx_cnt, tx_cyc);
5549 	t4_pmrx_get_stats(sc, rx_cnt, rx_cyc);
5550 
5551 	sbuf_printf(sb, "                Tx count            Tx cycles    "
5552 	    "Rx count            Rx cycles");
5553 	for (i = 0; i < PM_NSTATS; i++)
5554 		sbuf_printf(sb, "\n%-13s %10u %20ju  %10u %20ju",
5555 		    pm_stats[i], tx_cnt[i], tx_cyc[i], rx_cnt[i], rx_cyc[i]);
5556 
5557 	rc = sbuf_finish(sb);
5558 	sbuf_delete(sb);
5559 
5560 	return (rc);
5561 }
5562 
5563 static int
5564 sysctl_rdma_stats(SYSCTL_HANDLER_ARGS)
5565 {
5566 	struct adapter *sc = arg1;
5567 	struct sbuf *sb;
5568 	int rc;
5569 	struct tp_rdma_stats stats;
5570 
5571 	rc = sysctl_wire_old_buffer(req, 0);
5572 	if (rc != 0)
5573 		return (rc);
5574 
5575 	sb = sbuf_new_for_sysctl(NULL, NULL, 256, req);
5576 	if (sb == NULL)
5577 		return (ENOMEM);
5578 
5579 	t4_tp_get_rdma_stats(sc, &stats);
5580 	sbuf_printf(sb, "NoRQEModDefferals: %u\n", stats.rqe_dfr_mod);
5581 	sbuf_printf(sb, "NoRQEPktDefferals: %u", stats.rqe_dfr_pkt);
5582 
5583 	rc = sbuf_finish(sb);
5584 	sbuf_delete(sb);
5585 
5586 	return (rc);
5587 }
5588 
5589 static int
5590 sysctl_tcp_stats(SYSCTL_HANDLER_ARGS)
5591 {
5592 	struct adapter *sc = arg1;
5593 	struct sbuf *sb;
5594 	int rc;
5595 	struct tp_tcp_stats v4, v6;
5596 
5597 	rc = sysctl_wire_old_buffer(req, 0);
5598 	if (rc != 0)
5599 		return (rc);
5600 
5601 	sb = sbuf_new_for_sysctl(NULL, NULL, 256, req);
5602 	if (sb == NULL)
5603 		return (ENOMEM);
5604 
5605 	t4_tp_get_tcp_stats(sc, &v4, &v6);
5606 	sbuf_printf(sb,
5607 	    "                                IP                 IPv6\n");
5608 	sbuf_printf(sb, "OutRsts:      %20u %20u\n",
5609 	    v4.tcpOutRsts, v6.tcpOutRsts);
5610 	sbuf_printf(sb, "InSegs:       %20ju %20ju\n",
5611 	    v4.tcpInSegs, v6.tcpInSegs);
5612 	sbuf_printf(sb, "OutSegs:      %20ju %20ju\n",
5613 	    v4.tcpOutSegs, v6.tcpOutSegs);
5614 	sbuf_printf(sb, "RetransSegs:  %20ju %20ju",
5615 	    v4.tcpRetransSegs, v6.tcpRetransSegs);
5616 
5617 	rc = sbuf_finish(sb);
5618 	sbuf_delete(sb);
5619 
5620 	return (rc);
5621 }
5622 
5623 static int
5624 sysctl_tids(SYSCTL_HANDLER_ARGS)
5625 {
5626 	struct adapter *sc = arg1;
5627 	struct sbuf *sb;
5628 	int rc;
5629 	struct tid_info *t = &sc->tids;
5630 
5631 	rc = sysctl_wire_old_buffer(req, 0);
5632 	if (rc != 0)
5633 		return (rc);
5634 
5635 	sb = sbuf_new_for_sysctl(NULL, NULL, 256, req);
5636 	if (sb == NULL)
5637 		return (ENOMEM);
5638 
5639 	if (t->natids) {
5640 		sbuf_printf(sb, "ATID range: 0-%u, in use: %u\n", t->natids - 1,
5641 		    t->atids_in_use);
5642 	}
5643 
5644 	if (t->ntids) {
5645 		if (t4_read_reg(sc, A_LE_DB_CONFIG) & F_HASHEN) {
5646 			uint32_t b = t4_read_reg(sc, A_LE_DB_SERVER_INDEX) / 4;
5647 
5648 			if (b) {
5649 				sbuf_printf(sb, "TID range: 0-%u, %u-%u", b - 1,
5650 				    t4_read_reg(sc, A_LE_DB_TID_HASHBASE) / 4,
5651 				    t->ntids - 1);
5652 			} else {
5653 				sbuf_printf(sb, "TID range: %u-%u",
5654 				    t4_read_reg(sc, A_LE_DB_TID_HASHBASE) / 4,
5655 				    t->ntids - 1);
5656 			}
5657 		} else
5658 			sbuf_printf(sb, "TID range: 0-%u", t->ntids - 1);
5659 		sbuf_printf(sb, ", in use: %u\n",
5660 		    atomic_load_acq_int(&t->tids_in_use));
5661 	}
5662 
5663 	if (t->nstids) {
5664 		sbuf_printf(sb, "STID range: %u-%u, in use: %u\n", t->stid_base,
5665 		    t->stid_base + t->nstids - 1, t->stids_in_use);
5666 	}
5667 
5668 	if (t->nftids) {
5669 		sbuf_printf(sb, "FTID range: %u-%u\n", t->ftid_base,
5670 		    t->ftid_base + t->nftids - 1);
5671 	}
5672 
5673 	sbuf_printf(sb, "HW TID usage: %u IP users, %u IPv6 users",
5674 	    t4_read_reg(sc, A_LE_DB_ACT_CNT_IPV4),
5675 	    t4_read_reg(sc, A_LE_DB_ACT_CNT_IPV6));
5676 
5677 	rc = sbuf_finish(sb);
5678 	sbuf_delete(sb);
5679 
5680 	return (rc);
5681 }
5682 
5683 static int
5684 sysctl_tp_err_stats(SYSCTL_HANDLER_ARGS)
5685 {
5686 	struct adapter *sc = arg1;
5687 	struct sbuf *sb;
5688 	int rc;
5689 	struct tp_err_stats stats;
5690 
5691 	rc = sysctl_wire_old_buffer(req, 0);
5692 	if (rc != 0)
5693 		return (rc);
5694 
5695 	sb = sbuf_new_for_sysctl(NULL, NULL, 256, req);
5696 	if (sb == NULL)
5697 		return (ENOMEM);
5698 
5699 	t4_tp_get_err_stats(sc, &stats);
5700 
5701 	sbuf_printf(sb, "                 channel 0  channel 1  channel 2  "
5702 		      "channel 3\n");
5703 	sbuf_printf(sb, "macInErrs:      %10u %10u %10u %10u\n",
5704 	    stats.macInErrs[0], stats.macInErrs[1], stats.macInErrs[2],
5705 	    stats.macInErrs[3]);
5706 	sbuf_printf(sb, "hdrInErrs:      %10u %10u %10u %10u\n",
5707 	    stats.hdrInErrs[0], stats.hdrInErrs[1], stats.hdrInErrs[2],
5708 	    stats.hdrInErrs[3]);
5709 	sbuf_printf(sb, "tcpInErrs:      %10u %10u %10u %10u\n",
5710 	    stats.tcpInErrs[0], stats.tcpInErrs[1], stats.tcpInErrs[2],
5711 	    stats.tcpInErrs[3]);
5712 	sbuf_printf(sb, "tcp6InErrs:     %10u %10u %10u %10u\n",
5713 	    stats.tcp6InErrs[0], stats.tcp6InErrs[1], stats.tcp6InErrs[2],
5714 	    stats.tcp6InErrs[3]);
5715 	sbuf_printf(sb, "tnlCongDrops:   %10u %10u %10u %10u\n",
5716 	    stats.tnlCongDrops[0], stats.tnlCongDrops[1], stats.tnlCongDrops[2],
5717 	    stats.tnlCongDrops[3]);
5718 	sbuf_printf(sb, "tnlTxDrops:     %10u %10u %10u %10u\n",
5719 	    stats.tnlTxDrops[0], stats.tnlTxDrops[1], stats.tnlTxDrops[2],
5720 	    stats.tnlTxDrops[3]);
5721 	sbuf_printf(sb, "ofldVlanDrops:  %10u %10u %10u %10u\n",
5722 	    stats.ofldVlanDrops[0], stats.ofldVlanDrops[1],
5723 	    stats.ofldVlanDrops[2], stats.ofldVlanDrops[3]);
5724 	sbuf_printf(sb, "ofldChanDrops:  %10u %10u %10u %10u\n\n",
5725 	    stats.ofldChanDrops[0], stats.ofldChanDrops[1],
5726 	    stats.ofldChanDrops[2], stats.ofldChanDrops[3]);
5727 	sbuf_printf(sb, "ofldNoNeigh:    %u\nofldCongDefer:  %u",
5728 	    stats.ofldNoNeigh, stats.ofldCongDefer);
5729 
5730 	rc = sbuf_finish(sb);
5731 	sbuf_delete(sb);
5732 
5733 	return (rc);
5734 }
5735 
5736 static int
5737 sysctl_tx_rate(SYSCTL_HANDLER_ARGS)
5738 {
5739 	struct adapter *sc = arg1;
5740 	struct sbuf *sb;
5741 	int rc;
5742 	u64 nrate[NCHAN], orate[NCHAN];
5743 
5744 	rc = sysctl_wire_old_buffer(req, 0);
5745 	if (rc != 0)
5746 		return (rc);
5747 
5748 	sb = sbuf_new_for_sysctl(NULL, NULL, 256, req);
5749 	if (sb == NULL)
5750 		return (ENOMEM);
5751 
5752 	t4_get_chan_txrate(sc, nrate, orate);
5753 	sbuf_printf(sb, "              channel 0   channel 1   channel 2   "
5754 		 "channel 3\n");
5755 	sbuf_printf(sb, "NIC B/s:     %10ju  %10ju  %10ju  %10ju\n",
5756 	    nrate[0], nrate[1], nrate[2], nrate[3]);
5757 	sbuf_printf(sb, "Offload B/s: %10ju  %10ju  %10ju  %10ju",
5758 	    orate[0], orate[1], orate[2], orate[3]);
5759 
5760 	rc = sbuf_finish(sb);
5761 	sbuf_delete(sb);
5762 
5763 	return (rc);
5764 }
5765 
5766 static int
5767 sysctl_wcwr_stats(SYSCTL_HANDLER_ARGS)
5768 {
5769 	struct adapter *sc = arg1;
5770 	struct sbuf *sb;
5771 	int rc, v;
5772 
5773 	rc = sysctl_wire_old_buffer(req, 0);
5774 	if (rc != 0)
5775 		return (rc);
5776 
5777 	sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req);
5778 	if (sb == NULL)
5779 		return (ENOMEM);
5780 
5781 	v = t4_read_reg(sc, A_SGE_STAT_CFG);
5782 	if (G_STATSOURCE_T5(v) == 7) {
5783 		if (G_STATMODE(v) == 0) {
5784 			sbuf_printf(sb, "total %d, incomplete %d",
5785 			    t4_read_reg(sc, A_SGE_STAT_TOTAL),
5786 			    t4_read_reg(sc, A_SGE_STAT_MATCH));
5787 		} else if (G_STATMODE(v) == 1) {
5788 			sbuf_printf(sb, "total %d, data overflow %d",
5789 			    t4_read_reg(sc, A_SGE_STAT_TOTAL),
5790 			    t4_read_reg(sc, A_SGE_STAT_MATCH));
5791 		}
5792 	}
5793 	rc = sbuf_finish(sb);
5794 	sbuf_delete(sb);
5795 
5796 	return (rc);
5797 }
5798 #endif
5799 
5800 static inline void
5801 txq_start(struct ifnet *ifp, struct sge_txq *txq)
5802 {
5803 	struct buf_ring *br;
5804 	struct mbuf *m;
5805 
5806 	TXQ_LOCK_ASSERT_OWNED(txq);
5807 
5808 	br = txq->br;
5809 	m = txq->m ? txq->m : drbr_dequeue(ifp, br);
5810 	if (m)
5811 		t4_eth_tx(ifp, txq, m);
5812 }
5813 
5814 void
5815 t4_tx_callout(void *arg)
5816 {
5817 	struct sge_eq *eq = arg;
5818 	struct adapter *sc;
5819 
5820 	if (EQ_TRYLOCK(eq) == 0)
5821 		goto reschedule;
5822 
5823 	if (eq->flags & EQ_STALLED && !can_resume_tx(eq)) {
5824 		EQ_UNLOCK(eq);
5825 reschedule:
5826 		if (__predict_true(!(eq->flags && EQ_DOOMED)))
5827 			callout_schedule(&eq->tx_callout, 1);
5828 		return;
5829 	}
5830 
5831 	EQ_LOCK_ASSERT_OWNED(eq);
5832 
5833 	if (__predict_true((eq->flags & EQ_DOOMED) == 0)) {
5834 
5835 		if ((eq->flags & EQ_TYPEMASK) == EQ_ETH) {
5836 			struct sge_txq *txq = arg;
5837 			struct port_info *pi = txq->ifp->if_softc;
5838 
5839 			sc = pi->adapter;
5840 		} else {
5841 			struct sge_wrq *wrq = arg;
5842 
5843 			sc = wrq->adapter;
5844 		}
5845 
5846 		taskqueue_enqueue(sc->tq[eq->tx_chan], &eq->tx_task);
5847 	}
5848 
5849 	EQ_UNLOCK(eq);
5850 }
5851 
5852 void
5853 t4_tx_task(void *arg, int count)
5854 {
5855 	struct sge_eq *eq = arg;
5856 
5857 	EQ_LOCK(eq);
5858 	if ((eq->flags & EQ_TYPEMASK) == EQ_ETH) {
5859 		struct sge_txq *txq = arg;
5860 		txq_start(txq->ifp, txq);
5861 	} else {
5862 		struct sge_wrq *wrq = arg;
5863 		t4_wrq_tx_locked(wrq->adapter, wrq, NULL);
5864 	}
5865 	EQ_UNLOCK(eq);
5866 }
5867 
5868 static uint32_t
5869 fconf_to_mode(uint32_t fconf)
5870 {
5871 	uint32_t mode;
5872 
5873 	mode = T4_FILTER_IPv4 | T4_FILTER_IPv6 | T4_FILTER_IP_SADDR |
5874 	    T4_FILTER_IP_DADDR | T4_FILTER_IP_SPORT | T4_FILTER_IP_DPORT;
5875 
5876 	if (fconf & F_FRAGMENTATION)
5877 		mode |= T4_FILTER_IP_FRAGMENT;
5878 
5879 	if (fconf & F_MPSHITTYPE)
5880 		mode |= T4_FILTER_MPS_HIT_TYPE;
5881 
5882 	if (fconf & F_MACMATCH)
5883 		mode |= T4_FILTER_MAC_IDX;
5884 
5885 	if (fconf & F_ETHERTYPE)
5886 		mode |= T4_FILTER_ETH_TYPE;
5887 
5888 	if (fconf & F_PROTOCOL)
5889 		mode |= T4_FILTER_IP_PROTO;
5890 
5891 	if (fconf & F_TOS)
5892 		mode |= T4_FILTER_IP_TOS;
5893 
5894 	if (fconf & F_VLAN)
5895 		mode |= T4_FILTER_VLAN;
5896 
5897 	if (fconf & F_VNIC_ID)
5898 		mode |= T4_FILTER_VNIC;
5899 
5900 	if (fconf & F_PORT)
5901 		mode |= T4_FILTER_PORT;
5902 
5903 	if (fconf & F_FCOE)
5904 		mode |= T4_FILTER_FCoE;
5905 
5906 	return (mode);
5907 }
5908 
5909 static uint32_t
5910 mode_to_fconf(uint32_t mode)
5911 {
5912 	uint32_t fconf = 0;
5913 
5914 	if (mode & T4_FILTER_IP_FRAGMENT)
5915 		fconf |= F_FRAGMENTATION;
5916 
5917 	if (mode & T4_FILTER_MPS_HIT_TYPE)
5918 		fconf |= F_MPSHITTYPE;
5919 
5920 	if (mode & T4_FILTER_MAC_IDX)
5921 		fconf |= F_MACMATCH;
5922 
5923 	if (mode & T4_FILTER_ETH_TYPE)
5924 		fconf |= F_ETHERTYPE;
5925 
5926 	if (mode & T4_FILTER_IP_PROTO)
5927 		fconf |= F_PROTOCOL;
5928 
5929 	if (mode & T4_FILTER_IP_TOS)
5930 		fconf |= F_TOS;
5931 
5932 	if (mode & T4_FILTER_VLAN)
5933 		fconf |= F_VLAN;
5934 
5935 	if (mode & T4_FILTER_VNIC)
5936 		fconf |= F_VNIC_ID;
5937 
5938 	if (mode & T4_FILTER_PORT)
5939 		fconf |= F_PORT;
5940 
5941 	if (mode & T4_FILTER_FCoE)
5942 		fconf |= F_FCOE;
5943 
5944 	return (fconf);
5945 }
5946 
5947 static uint32_t
5948 fspec_to_fconf(struct t4_filter_specification *fs)
5949 {
5950 	uint32_t fconf = 0;
5951 
5952 	if (fs->val.frag || fs->mask.frag)
5953 		fconf |= F_FRAGMENTATION;
5954 
5955 	if (fs->val.matchtype || fs->mask.matchtype)
5956 		fconf |= F_MPSHITTYPE;
5957 
5958 	if (fs->val.macidx || fs->mask.macidx)
5959 		fconf |= F_MACMATCH;
5960 
5961 	if (fs->val.ethtype || fs->mask.ethtype)
5962 		fconf |= F_ETHERTYPE;
5963 
5964 	if (fs->val.proto || fs->mask.proto)
5965 		fconf |= F_PROTOCOL;
5966 
5967 	if (fs->val.tos || fs->mask.tos)
5968 		fconf |= F_TOS;
5969 
5970 	if (fs->val.vlan_vld || fs->mask.vlan_vld)
5971 		fconf |= F_VLAN;
5972 
5973 	if (fs->val.vnic_vld || fs->mask.vnic_vld)
5974 		fconf |= F_VNIC_ID;
5975 
5976 	if (fs->val.iport || fs->mask.iport)
5977 		fconf |= F_PORT;
5978 
5979 	if (fs->val.fcoe || fs->mask.fcoe)
5980 		fconf |= F_FCOE;
5981 
5982 	return (fconf);
5983 }
5984 
5985 static int
5986 get_filter_mode(struct adapter *sc, uint32_t *mode)
5987 {
5988 	int rc;
5989 	uint32_t fconf;
5990 
5991 	rc = begin_synchronized_op(sc, NULL, HOLD_LOCK | SLEEP_OK | INTR_OK,
5992 	    "t4getfm");
5993 	if (rc)
5994 		return (rc);
5995 
5996 	t4_read_indirect(sc, A_TP_PIO_ADDR, A_TP_PIO_DATA, &fconf, 1,
5997 	    A_TP_VLAN_PRI_MAP);
5998 
5999 	if (sc->filter_mode != fconf) {
6000 		log(LOG_WARNING, "%s: cached filter mode out of sync %x %x.\n",
6001 		    device_get_nameunit(sc->dev), sc->filter_mode, fconf);
6002 		sc->filter_mode = fconf;
6003 	}
6004 
6005 	*mode = fconf_to_mode(sc->filter_mode);
6006 
6007 	end_synchronized_op(sc, LOCK_HELD);
6008 	return (0);
6009 }
6010 
6011 static int
6012 set_filter_mode(struct adapter *sc, uint32_t mode)
6013 {
6014 	uint32_t fconf;
6015 	int rc;
6016 
6017 	fconf = mode_to_fconf(mode);
6018 
6019 	rc = begin_synchronized_op(sc, NULL, HOLD_LOCK | SLEEP_OK | INTR_OK,
6020 	    "t4setfm");
6021 	if (rc)
6022 		return (rc);
6023 
6024 	if (sc->tids.ftids_in_use > 0) {
6025 		rc = EBUSY;
6026 		goto done;
6027 	}
6028 
6029 #ifdef TCP_OFFLOAD
6030 	if (sc->offload_map) {
6031 		rc = EBUSY;
6032 		goto done;
6033 	}
6034 #endif
6035 
6036 #ifdef notyet
6037 	rc = -t4_set_filter_mode(sc, fconf);
6038 	if (rc == 0)
6039 		sc->filter_mode = fconf;
6040 #else
6041 	rc = ENOTSUP;
6042 #endif
6043 
6044 done:
6045 	end_synchronized_op(sc, LOCK_HELD);
6046 	return (rc);
6047 }
6048 
6049 static inline uint64_t
6050 get_filter_hits(struct adapter *sc, uint32_t fid)
6051 {
6052 	uint32_t mw_base, off, tcb_base = t4_read_reg(sc, A_TP_CMM_TCB_BASE);
6053 	uint64_t hits;
6054 
6055 	memwin_info(sc, 0, &mw_base, NULL);
6056 	off = position_memwin(sc, 0,
6057 	    tcb_base + (fid + sc->tids.ftid_base) * TCB_SIZE);
6058 	hits = t4_read_reg64(sc, mw_base + off + 16);
6059 
6060 	return (be64toh(hits));
6061 }
6062 
6063 static int
6064 get_filter(struct adapter *sc, struct t4_filter *t)
6065 {
6066 	int i, rc, nfilters = sc->tids.nftids;
6067 	struct filter_entry *f;
6068 
6069 	rc = begin_synchronized_op(sc, NULL, HOLD_LOCK | SLEEP_OK | INTR_OK,
6070 	    "t4getf");
6071 	if (rc)
6072 		return (rc);
6073 
6074 	if (sc->tids.ftids_in_use == 0 || sc->tids.ftid_tab == NULL ||
6075 	    t->idx >= nfilters) {
6076 		t->idx = 0xffffffff;
6077 		goto done;
6078 	}
6079 
6080 	f = &sc->tids.ftid_tab[t->idx];
6081 	for (i = t->idx; i < nfilters; i++, f++) {
6082 		if (f->valid) {
6083 			t->idx = i;
6084 			t->l2tidx = f->l2t ? f->l2t->idx : 0;
6085 			t->smtidx = f->smtidx;
6086 			if (f->fs.hitcnts)
6087 				t->hits = get_filter_hits(sc, t->idx);
6088 			else
6089 				t->hits = UINT64_MAX;
6090 			t->fs = f->fs;
6091 
6092 			goto done;
6093 		}
6094 	}
6095 
6096 	t->idx = 0xffffffff;
6097 done:
6098 	end_synchronized_op(sc, LOCK_HELD);
6099 	return (0);
6100 }
6101 
6102 static int
6103 set_filter(struct adapter *sc, struct t4_filter *t)
6104 {
6105 	unsigned int nfilters, nports;
6106 	struct filter_entry *f;
6107 	int i, rc;
6108 
6109 	rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4setf");
6110 	if (rc)
6111 		return (rc);
6112 
6113 	nfilters = sc->tids.nftids;
6114 	nports = sc->params.nports;
6115 
6116 	if (nfilters == 0) {
6117 		rc = ENOTSUP;
6118 		goto done;
6119 	}
6120 
6121 	if (!(sc->flags & FULL_INIT_DONE)) {
6122 		rc = EAGAIN;
6123 		goto done;
6124 	}
6125 
6126 	if (t->idx >= nfilters) {
6127 		rc = EINVAL;
6128 		goto done;
6129 	}
6130 
6131 	/* Validate against the global filter mode */
6132 	if ((sc->filter_mode | fspec_to_fconf(&t->fs)) != sc->filter_mode) {
6133 		rc = E2BIG;
6134 		goto done;
6135 	}
6136 
6137 	if (t->fs.action == FILTER_SWITCH && t->fs.eport >= nports) {
6138 		rc = EINVAL;
6139 		goto done;
6140 	}
6141 
6142 	if (t->fs.val.iport >= nports) {
6143 		rc = EINVAL;
6144 		goto done;
6145 	}
6146 
6147 	/* Can't specify an iq if not steering to it */
6148 	if (!t->fs.dirsteer && t->fs.iq) {
6149 		rc = EINVAL;
6150 		goto done;
6151 	}
6152 
6153 	/* IPv6 filter idx must be 4 aligned */
6154 	if (t->fs.type == 1 &&
6155 	    ((t->idx & 0x3) || t->idx + 4 >= nfilters)) {
6156 		rc = EINVAL;
6157 		goto done;
6158 	}
6159 
6160 	if (sc->tids.ftid_tab == NULL) {
6161 		KASSERT(sc->tids.ftids_in_use == 0,
6162 		    ("%s: no memory allocated but filters_in_use > 0",
6163 		    __func__));
6164 
6165 		sc->tids.ftid_tab = malloc(sizeof (struct filter_entry) *
6166 		    nfilters, M_CXGBE, M_NOWAIT | M_ZERO);
6167 		if (sc->tids.ftid_tab == NULL) {
6168 			rc = ENOMEM;
6169 			goto done;
6170 		}
6171 		mtx_init(&sc->tids.ftid_lock, "T4 filters", 0, MTX_DEF);
6172 	}
6173 
6174 	for (i = 0; i < 4; i++) {
6175 		f = &sc->tids.ftid_tab[t->idx + i];
6176 
6177 		if (f->pending || f->valid) {
6178 			rc = EBUSY;
6179 			goto done;
6180 		}
6181 		if (f->locked) {
6182 			rc = EPERM;
6183 			goto done;
6184 		}
6185 
6186 		if (t->fs.type == 0)
6187 			break;
6188 	}
6189 
6190 	f = &sc->tids.ftid_tab[t->idx];
6191 	f->fs = t->fs;
6192 
6193 	rc = set_filter_wr(sc, t->idx);
6194 done:
6195 	end_synchronized_op(sc, 0);
6196 
6197 	if (rc == 0) {
6198 		mtx_lock(&sc->tids.ftid_lock);
6199 		for (;;) {
6200 			if (f->pending == 0) {
6201 				rc = f->valid ? 0 : EIO;
6202 				break;
6203 			}
6204 
6205 			if (mtx_sleep(&sc->tids.ftid_tab, &sc->tids.ftid_lock,
6206 			    PCATCH, "t4setfw", 0)) {
6207 				rc = EINPROGRESS;
6208 				break;
6209 			}
6210 		}
6211 		mtx_unlock(&sc->tids.ftid_lock);
6212 	}
6213 	return (rc);
6214 }
6215 
6216 static int
6217 del_filter(struct adapter *sc, struct t4_filter *t)
6218 {
6219 	unsigned int nfilters;
6220 	struct filter_entry *f;
6221 	int rc;
6222 
6223 	rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4delf");
6224 	if (rc)
6225 		return (rc);
6226 
6227 	nfilters = sc->tids.nftids;
6228 
6229 	if (nfilters == 0) {
6230 		rc = ENOTSUP;
6231 		goto done;
6232 	}
6233 
6234 	if (sc->tids.ftid_tab == NULL || sc->tids.ftids_in_use == 0 ||
6235 	    t->idx >= nfilters) {
6236 		rc = EINVAL;
6237 		goto done;
6238 	}
6239 
6240 	if (!(sc->flags & FULL_INIT_DONE)) {
6241 		rc = EAGAIN;
6242 		goto done;
6243 	}
6244 
6245 	f = &sc->tids.ftid_tab[t->idx];
6246 
6247 	if (f->pending) {
6248 		rc = EBUSY;
6249 		goto done;
6250 	}
6251 	if (f->locked) {
6252 		rc = EPERM;
6253 		goto done;
6254 	}
6255 
6256 	if (f->valid) {
6257 		t->fs = f->fs;	/* extra info for the caller */
6258 		rc = del_filter_wr(sc, t->idx);
6259 	}
6260 
6261 done:
6262 	end_synchronized_op(sc, 0);
6263 
6264 	if (rc == 0) {
6265 		mtx_lock(&sc->tids.ftid_lock);
6266 		for (;;) {
6267 			if (f->pending == 0) {
6268 				rc = f->valid ? EIO : 0;
6269 				break;
6270 			}
6271 
6272 			if (mtx_sleep(&sc->tids.ftid_tab, &sc->tids.ftid_lock,
6273 			    PCATCH, "t4delfw", 0)) {
6274 				rc = EINPROGRESS;
6275 				break;
6276 			}
6277 		}
6278 		mtx_unlock(&sc->tids.ftid_lock);
6279 	}
6280 
6281 	return (rc);
6282 }
6283 
6284 static void
6285 clear_filter(struct filter_entry *f)
6286 {
6287 	if (f->l2t)
6288 		t4_l2t_release(f->l2t);
6289 
6290 	bzero(f, sizeof (*f));
6291 }
6292 
6293 static int
6294 set_filter_wr(struct adapter *sc, int fidx)
6295 {
6296 	struct filter_entry *f = &sc->tids.ftid_tab[fidx];
6297 	struct wrqe *wr;
6298 	struct fw_filter_wr *fwr;
6299 	unsigned int ftid;
6300 
6301 	ASSERT_SYNCHRONIZED_OP(sc);
6302 
6303 	if (f->fs.newdmac || f->fs.newvlan) {
6304 		/* This filter needs an L2T entry; allocate one. */
6305 		f->l2t = t4_l2t_alloc_switching(sc->l2t);
6306 		if (f->l2t == NULL)
6307 			return (EAGAIN);
6308 		if (t4_l2t_set_switching(sc, f->l2t, f->fs.vlan, f->fs.eport,
6309 		    f->fs.dmac)) {
6310 			t4_l2t_release(f->l2t);
6311 			f->l2t = NULL;
6312 			return (ENOMEM);
6313 		}
6314 	}
6315 
6316 	ftid = sc->tids.ftid_base + fidx;
6317 
6318 	wr = alloc_wrqe(sizeof(*fwr), &sc->sge.mgmtq);
6319 	if (wr == NULL)
6320 		return (ENOMEM);
6321 
6322 	fwr = wrtod(wr);
6323 	bzero(fwr, sizeof (*fwr));
6324 
6325 	fwr->op_pkd = htobe32(V_FW_WR_OP(FW_FILTER_WR));
6326 	fwr->len16_pkd = htobe32(FW_LEN16(*fwr));
6327 	fwr->tid_to_iq =
6328 	    htobe32(V_FW_FILTER_WR_TID(ftid) |
6329 		V_FW_FILTER_WR_RQTYPE(f->fs.type) |
6330 		V_FW_FILTER_WR_NOREPLY(0) |
6331 		V_FW_FILTER_WR_IQ(f->fs.iq));
6332 	fwr->del_filter_to_l2tix =
6333 	    htobe32(V_FW_FILTER_WR_RPTTID(f->fs.rpttid) |
6334 		V_FW_FILTER_WR_DROP(f->fs.action == FILTER_DROP) |
6335 		V_FW_FILTER_WR_DIRSTEER(f->fs.dirsteer) |
6336 		V_FW_FILTER_WR_MASKHASH(f->fs.maskhash) |
6337 		V_FW_FILTER_WR_DIRSTEERHASH(f->fs.dirsteerhash) |
6338 		V_FW_FILTER_WR_LPBK(f->fs.action == FILTER_SWITCH) |
6339 		V_FW_FILTER_WR_DMAC(f->fs.newdmac) |
6340 		V_FW_FILTER_WR_SMAC(f->fs.newsmac) |
6341 		V_FW_FILTER_WR_INSVLAN(f->fs.newvlan == VLAN_INSERT ||
6342 		    f->fs.newvlan == VLAN_REWRITE) |
6343 		V_FW_FILTER_WR_RMVLAN(f->fs.newvlan == VLAN_REMOVE ||
6344 		    f->fs.newvlan == VLAN_REWRITE) |
6345 		V_FW_FILTER_WR_HITCNTS(f->fs.hitcnts) |
6346 		V_FW_FILTER_WR_TXCHAN(f->fs.eport) |
6347 		V_FW_FILTER_WR_PRIO(f->fs.prio) |
6348 		V_FW_FILTER_WR_L2TIX(f->l2t ? f->l2t->idx : 0));
6349 	fwr->ethtype = htobe16(f->fs.val.ethtype);
6350 	fwr->ethtypem = htobe16(f->fs.mask.ethtype);
6351 	fwr->frag_to_ovlan_vldm =
6352 	    (V_FW_FILTER_WR_FRAG(f->fs.val.frag) |
6353 		V_FW_FILTER_WR_FRAGM(f->fs.mask.frag) |
6354 		V_FW_FILTER_WR_IVLAN_VLD(f->fs.val.vlan_vld) |
6355 		V_FW_FILTER_WR_OVLAN_VLD(f->fs.val.vnic_vld) |
6356 		V_FW_FILTER_WR_IVLAN_VLDM(f->fs.mask.vlan_vld) |
6357 		V_FW_FILTER_WR_OVLAN_VLDM(f->fs.mask.vnic_vld));
6358 	fwr->smac_sel = 0;
6359 	fwr->rx_chan_rx_rpl_iq = htobe16(V_FW_FILTER_WR_RX_CHAN(0) |
6360 	    V_FW_FILTER_WR_RX_RPL_IQ(sc->sge.fwq.abs_id));
6361 	fwr->maci_to_matchtypem =
6362 	    htobe32(V_FW_FILTER_WR_MACI(f->fs.val.macidx) |
6363 		V_FW_FILTER_WR_MACIM(f->fs.mask.macidx) |
6364 		V_FW_FILTER_WR_FCOE(f->fs.val.fcoe) |
6365 		V_FW_FILTER_WR_FCOEM(f->fs.mask.fcoe) |
6366 		V_FW_FILTER_WR_PORT(f->fs.val.iport) |
6367 		V_FW_FILTER_WR_PORTM(f->fs.mask.iport) |
6368 		V_FW_FILTER_WR_MATCHTYPE(f->fs.val.matchtype) |
6369 		V_FW_FILTER_WR_MATCHTYPEM(f->fs.mask.matchtype));
6370 	fwr->ptcl = f->fs.val.proto;
6371 	fwr->ptclm = f->fs.mask.proto;
6372 	fwr->ttyp = f->fs.val.tos;
6373 	fwr->ttypm = f->fs.mask.tos;
6374 	fwr->ivlan = htobe16(f->fs.val.vlan);
6375 	fwr->ivlanm = htobe16(f->fs.mask.vlan);
6376 	fwr->ovlan = htobe16(f->fs.val.vnic);
6377 	fwr->ovlanm = htobe16(f->fs.mask.vnic);
6378 	bcopy(f->fs.val.dip, fwr->lip, sizeof (fwr->lip));
6379 	bcopy(f->fs.mask.dip, fwr->lipm, sizeof (fwr->lipm));
6380 	bcopy(f->fs.val.sip, fwr->fip, sizeof (fwr->fip));
6381 	bcopy(f->fs.mask.sip, fwr->fipm, sizeof (fwr->fipm));
6382 	fwr->lp = htobe16(f->fs.val.dport);
6383 	fwr->lpm = htobe16(f->fs.mask.dport);
6384 	fwr->fp = htobe16(f->fs.val.sport);
6385 	fwr->fpm = htobe16(f->fs.mask.sport);
6386 	if (f->fs.newsmac)
6387 		bcopy(f->fs.smac, fwr->sma, sizeof (fwr->sma));
6388 
6389 	f->pending = 1;
6390 	sc->tids.ftids_in_use++;
6391 
6392 	t4_wrq_tx(sc, wr);
6393 	return (0);
6394 }
6395 
6396 static int
6397 del_filter_wr(struct adapter *sc, int fidx)
6398 {
6399 	struct filter_entry *f = &sc->tids.ftid_tab[fidx];
6400 	struct wrqe *wr;
6401 	struct fw_filter_wr *fwr;
6402 	unsigned int ftid;
6403 
6404 	ftid = sc->tids.ftid_base + fidx;
6405 
6406 	wr = alloc_wrqe(sizeof(*fwr), &sc->sge.mgmtq);
6407 	if (wr == NULL)
6408 		return (ENOMEM);
6409 	fwr = wrtod(wr);
6410 	bzero(fwr, sizeof (*fwr));
6411 
6412 	t4_mk_filtdelwr(ftid, fwr, sc->sge.fwq.abs_id);
6413 
6414 	f->pending = 1;
6415 	t4_wrq_tx(sc, wr);
6416 	return (0);
6417 }
6418 
6419 int
6420 t4_filter_rpl(struct sge_iq *iq, const struct rss_header *rss, struct mbuf *m)
6421 {
6422 	struct adapter *sc = iq->adapter;
6423 	const struct cpl_set_tcb_rpl *rpl = (const void *)(rss + 1);
6424 	unsigned int idx = GET_TID(rpl);
6425 
6426 	KASSERT(m == NULL, ("%s: payload with opcode %02x", __func__,
6427 	    rss->opcode));
6428 
6429 	if (idx >= sc->tids.ftid_base &&
6430 	    (idx -= sc->tids.ftid_base) < sc->tids.nftids) {
6431 		unsigned int rc = G_COOKIE(rpl->cookie);
6432 		struct filter_entry *f = &sc->tids.ftid_tab[idx];
6433 
6434 		mtx_lock(&sc->tids.ftid_lock);
6435 		if (rc == FW_FILTER_WR_FLT_ADDED) {
6436 			KASSERT(f->pending, ("%s: filter[%u] isn't pending.",
6437 			    __func__, idx));
6438 			f->smtidx = (be64toh(rpl->oldval) >> 24) & 0xff;
6439 			f->pending = 0;  /* asynchronous setup completed */
6440 			f->valid = 1;
6441 		} else {
6442 			if (rc != FW_FILTER_WR_FLT_DELETED) {
6443 				/* Add or delete failed, display an error */
6444 				log(LOG_ERR,
6445 				    "filter %u setup failed with error %u\n",
6446 				    idx, rc);
6447 			}
6448 
6449 			clear_filter(f);
6450 			sc->tids.ftids_in_use--;
6451 		}
6452 		wakeup(&sc->tids.ftid_tab);
6453 		mtx_unlock(&sc->tids.ftid_lock);
6454 	}
6455 
6456 	return (0);
6457 }
6458 
6459 static int
6460 get_sge_context(struct adapter *sc, struct t4_sge_context *cntxt)
6461 {
6462 	int rc;
6463 
6464 	if (cntxt->cid > M_CTXTQID)
6465 		return (EINVAL);
6466 
6467 	if (cntxt->mem_id != CTXT_EGRESS && cntxt->mem_id != CTXT_INGRESS &&
6468 	    cntxt->mem_id != CTXT_FLM && cntxt->mem_id != CTXT_CNM)
6469 		return (EINVAL);
6470 
6471 	rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4ctxt");
6472 	if (rc)
6473 		return (rc);
6474 
6475 	if (sc->flags & FW_OK) {
6476 		rc = -t4_sge_ctxt_rd(sc, sc->mbox, cntxt->cid, cntxt->mem_id,
6477 		    &cntxt->data[0]);
6478 		if (rc == 0)
6479 			goto done;
6480 	}
6481 
6482 	/*
6483 	 * Read via firmware failed or wasn't even attempted.  Read directly via
6484 	 * the backdoor.
6485 	 */
6486 	rc = -t4_sge_ctxt_rd_bd(sc, cntxt->cid, cntxt->mem_id, &cntxt->data[0]);
6487 done:
6488 	end_synchronized_op(sc, 0);
6489 	return (rc);
6490 }
6491 
6492 static int
6493 load_fw(struct adapter *sc, struct t4_data *fw)
6494 {
6495 	int rc;
6496 	uint8_t *fw_data;
6497 
6498 	rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4ldfw");
6499 	if (rc)
6500 		return (rc);
6501 
6502 	if (sc->flags & FULL_INIT_DONE) {
6503 		rc = EBUSY;
6504 		goto done;
6505 	}
6506 
6507 	fw_data = malloc(fw->len, M_CXGBE, M_WAITOK);
6508 	if (fw_data == NULL) {
6509 		rc = ENOMEM;
6510 		goto done;
6511 	}
6512 
6513 	rc = copyin(fw->data, fw_data, fw->len);
6514 	if (rc == 0)
6515 		rc = -t4_load_fw(sc, fw_data, fw->len);
6516 
6517 	free(fw_data, M_CXGBE);
6518 done:
6519 	end_synchronized_op(sc, 0);
6520 	return (rc);
6521 }
6522 
6523 static int
6524 read_card_mem(struct adapter *sc, int win, struct t4_mem_range *mr)
6525 {
6526 	uint32_t addr, off, remaining, i, n;
6527 	uint32_t *buf, *b;
6528 	uint32_t mw_base, mw_aperture;
6529 	int rc;
6530 	uint8_t *dst;
6531 
6532 	rc = validate_mem_range(sc, mr->addr, mr->len);
6533 	if (rc != 0)
6534 		return (rc);
6535 
6536 	memwin_info(sc, win, &mw_base, &mw_aperture);
6537 	buf = b = malloc(min(mr->len, mw_aperture), M_CXGBE, M_WAITOK);
6538 	addr = mr->addr;
6539 	remaining = mr->len;
6540 	dst = (void *)mr->data;
6541 
6542 	while (remaining) {
6543 		off = position_memwin(sc, win, addr);
6544 
6545 		/* number of bytes that we'll copy in the inner loop */
6546 		n = min(remaining, mw_aperture - off);
6547 		for (i = 0; i < n; i += 4)
6548 			*b++ = t4_read_reg(sc, mw_base + off + i);
6549 
6550 		rc = copyout(buf, dst, n);
6551 		if (rc != 0)
6552 			break;
6553 
6554 		b = buf;
6555 		dst += n;
6556 		remaining -= n;
6557 		addr += n;
6558 	}
6559 
6560 	free(buf, M_CXGBE);
6561 	return (rc);
6562 }
6563 
6564 static int
6565 read_i2c(struct adapter *sc, struct t4_i2c_data *i2cd)
6566 {
6567 	int rc;
6568 
6569 	if (i2cd->len == 0 || i2cd->port_id >= sc->params.nports)
6570 		return (EINVAL);
6571 
6572 	if (i2cd->len > 1) {
6573 		/* XXX: need fw support for longer reads in one go */
6574 		return (ENOTSUP);
6575 	}
6576 
6577 	rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4i2crd");
6578 	if (rc)
6579 		return (rc);
6580 	rc = -t4_i2c_rd(sc, sc->mbox, i2cd->port_id, i2cd->dev_addr,
6581 	    i2cd->offset, &i2cd->data[0]);
6582 	end_synchronized_op(sc, 0);
6583 
6584 	return (rc);
6585 }
6586 
6587 int
6588 t4_os_find_pci_capability(struct adapter *sc, int cap)
6589 {
6590 	int i;
6591 
6592 	return (pci_find_cap(sc->dev, cap, &i) == 0 ? i : 0);
6593 }
6594 
6595 int
6596 t4_os_pci_save_state(struct adapter *sc)
6597 {
6598 	device_t dev;
6599 	struct pci_devinfo *dinfo;
6600 
6601 	dev = sc->dev;
6602 	dinfo = device_get_ivars(dev);
6603 
6604 	pci_cfg_save(dev, dinfo, 0);
6605 	return (0);
6606 }
6607 
6608 int
6609 t4_os_pci_restore_state(struct adapter *sc)
6610 {
6611 	device_t dev;
6612 	struct pci_devinfo *dinfo;
6613 
6614 	dev = sc->dev;
6615 	dinfo = device_get_ivars(dev);
6616 
6617 	pci_cfg_restore(dev, dinfo);
6618 	return (0);
6619 }
6620 
6621 void
6622 t4_os_portmod_changed(const struct adapter *sc, int idx)
6623 {
6624 	struct port_info *pi = sc->port[idx];
6625 	static const char *mod_str[] = {
6626 		NULL, "LR", "SR", "ER", "TWINAX", "active TWINAX", "LRM"
6627 	};
6628 
6629 	if (pi->mod_type == FW_PORT_MOD_TYPE_NONE)
6630 		if_printf(pi->ifp, "transceiver unplugged.\n");
6631 	else if (pi->mod_type == FW_PORT_MOD_TYPE_UNKNOWN)
6632 		if_printf(pi->ifp, "unknown transceiver inserted.\n");
6633 	else if (pi->mod_type == FW_PORT_MOD_TYPE_NOTSUPPORTED)
6634 		if_printf(pi->ifp, "unsupported transceiver inserted.\n");
6635 	else if (pi->mod_type > 0 && pi->mod_type < nitems(mod_str)) {
6636 		if_printf(pi->ifp, "%s transceiver inserted.\n",
6637 		    mod_str[pi->mod_type]);
6638 	} else {
6639 		if_printf(pi->ifp, "transceiver (type %d) inserted.\n",
6640 		    pi->mod_type);
6641 	}
6642 }
6643 
6644 void
6645 t4_os_link_changed(struct adapter *sc, int idx, int link_stat)
6646 {
6647 	struct port_info *pi = sc->port[idx];
6648 	struct ifnet *ifp = pi->ifp;
6649 
6650 	if (link_stat) {
6651 		ifp->if_baudrate = IF_Mbps(pi->link_cfg.speed);
6652 		if_link_state_change(ifp, LINK_STATE_UP);
6653 	} else
6654 		if_link_state_change(ifp, LINK_STATE_DOWN);
6655 }
6656 
6657 void
6658 t4_iterate(void (*func)(struct adapter *, void *), void *arg)
6659 {
6660 	struct adapter *sc;
6661 
6662 	mtx_lock(&t4_list_lock);
6663 	SLIST_FOREACH(sc, &t4_list, link) {
6664 		/*
6665 		 * func should not make any assumptions about what state sc is
6666 		 * in - the only guarantee is that sc->sc_lock is a valid lock.
6667 		 */
6668 		func(sc, arg);
6669 	}
6670 	mtx_unlock(&t4_list_lock);
6671 }
6672 
6673 static int
6674 t4_open(struct cdev *dev, int flags, int type, struct thread *td)
6675 {
6676        return (0);
6677 }
6678 
6679 static int
6680 t4_close(struct cdev *dev, int flags, int type, struct thread *td)
6681 {
6682        return (0);
6683 }
6684 
6685 static int
6686 t4_ioctl(struct cdev *dev, unsigned long cmd, caddr_t data, int fflag,
6687     struct thread *td)
6688 {
6689 	int rc;
6690 	struct adapter *sc = dev->si_drv1;
6691 
6692 	rc = priv_check(td, PRIV_DRIVER);
6693 	if (rc != 0)
6694 		return (rc);
6695 
6696 	switch (cmd) {
6697 	case CHELSIO_T4_GETREG: {
6698 		struct t4_reg *edata = (struct t4_reg *)data;
6699 
6700 		if ((edata->addr & 0x3) != 0 || edata->addr >= sc->mmio_len)
6701 			return (EFAULT);
6702 
6703 		if (edata->size == 4)
6704 			edata->val = t4_read_reg(sc, edata->addr);
6705 		else if (edata->size == 8)
6706 			edata->val = t4_read_reg64(sc, edata->addr);
6707 		else
6708 			return (EINVAL);
6709 
6710 		break;
6711 	}
6712 	case CHELSIO_T4_SETREG: {
6713 		struct t4_reg *edata = (struct t4_reg *)data;
6714 
6715 		if ((edata->addr & 0x3) != 0 || edata->addr >= sc->mmio_len)
6716 			return (EFAULT);
6717 
6718 		if (edata->size == 4) {
6719 			if (edata->val & 0xffffffff00000000)
6720 				return (EINVAL);
6721 			t4_write_reg(sc, edata->addr, (uint32_t) edata->val);
6722 		} else if (edata->size == 8)
6723 			t4_write_reg64(sc, edata->addr, edata->val);
6724 		else
6725 			return (EINVAL);
6726 		break;
6727 	}
6728 	case CHELSIO_T4_REGDUMP: {
6729 		struct t4_regdump *regs = (struct t4_regdump *)data;
6730 		int reglen = is_t4(sc) ? T4_REGDUMP_SIZE : T5_REGDUMP_SIZE;
6731 		uint8_t *buf;
6732 
6733 		if (regs->len < reglen) {
6734 			regs->len = reglen; /* hint to the caller */
6735 			return (ENOBUFS);
6736 		}
6737 
6738 		regs->len = reglen;
6739 		buf = malloc(reglen, M_CXGBE, M_WAITOK | M_ZERO);
6740 		t4_get_regs(sc, regs, buf);
6741 		rc = copyout(buf, regs->data, reglen);
6742 		free(buf, M_CXGBE);
6743 		break;
6744 	}
6745 	case CHELSIO_T4_GET_FILTER_MODE:
6746 		rc = get_filter_mode(sc, (uint32_t *)data);
6747 		break;
6748 	case CHELSIO_T4_SET_FILTER_MODE:
6749 		rc = set_filter_mode(sc, *(uint32_t *)data);
6750 		break;
6751 	case CHELSIO_T4_GET_FILTER:
6752 		rc = get_filter(sc, (struct t4_filter *)data);
6753 		break;
6754 	case CHELSIO_T4_SET_FILTER:
6755 		rc = set_filter(sc, (struct t4_filter *)data);
6756 		break;
6757 	case CHELSIO_T4_DEL_FILTER:
6758 		rc = del_filter(sc, (struct t4_filter *)data);
6759 		break;
6760 	case CHELSIO_T4_GET_SGE_CONTEXT:
6761 		rc = get_sge_context(sc, (struct t4_sge_context *)data);
6762 		break;
6763 	case CHELSIO_T4_LOAD_FW:
6764 		rc = load_fw(sc, (struct t4_data *)data);
6765 		break;
6766 	case CHELSIO_T4_GET_MEM:
6767 		rc = read_card_mem(sc, 2, (struct t4_mem_range *)data);
6768 		break;
6769 	case CHELSIO_T4_GET_I2C:
6770 		rc = read_i2c(sc, (struct t4_i2c_data *)data);
6771 		break;
6772 	case CHELSIO_T4_CLEAR_STATS: {
6773 		int i;
6774 		u_int port_id = *(uint32_t *)data;
6775 		struct port_info *pi;
6776 
6777 		if (port_id >= sc->params.nports)
6778 			return (EINVAL);
6779 
6780 		/* MAC stats */
6781 		t4_clr_port_stats(sc, port_id);
6782 
6783 		pi = sc->port[port_id];
6784 		if (pi->flags & PORT_INIT_DONE) {
6785 			struct sge_rxq *rxq;
6786 			struct sge_txq *txq;
6787 			struct sge_wrq *wrq;
6788 
6789 			for_each_rxq(pi, i, rxq) {
6790 #if defined(INET) || defined(INET6)
6791 				rxq->lro.lro_queued = 0;
6792 				rxq->lro.lro_flushed = 0;
6793 #endif
6794 				rxq->rxcsum = 0;
6795 				rxq->vlan_extraction = 0;
6796 			}
6797 
6798 			for_each_txq(pi, i, txq) {
6799 				txq->txcsum = 0;
6800 				txq->tso_wrs = 0;
6801 				txq->vlan_insertion = 0;
6802 				txq->imm_wrs = 0;
6803 				txq->sgl_wrs = 0;
6804 				txq->txpkt_wrs = 0;
6805 				txq->txpkts_wrs = 0;
6806 				txq->txpkts_pkts = 0;
6807 				txq->br->br_drops = 0;
6808 				txq->no_dmamap = 0;
6809 				txq->no_desc = 0;
6810 			}
6811 
6812 #ifdef TCP_OFFLOAD
6813 			/* nothing to clear for each ofld_rxq */
6814 
6815 			for_each_ofld_txq(pi, i, wrq) {
6816 				wrq->tx_wrs = 0;
6817 				wrq->no_desc = 0;
6818 			}
6819 #endif
6820 			wrq = &sc->sge.ctrlq[pi->port_id];
6821 			wrq->tx_wrs = 0;
6822 			wrq->no_desc = 0;
6823 		}
6824 		break;
6825 	}
6826 	default:
6827 		rc = EINVAL;
6828 	}
6829 
6830 	return (rc);
6831 }
6832 
6833 #ifdef TCP_OFFLOAD
6834 static int
6835 toe_capability(struct port_info *pi, int enable)
6836 {
6837 	int rc;
6838 	struct adapter *sc = pi->adapter;
6839 
6840 	ASSERT_SYNCHRONIZED_OP(sc);
6841 
6842 	if (!is_offload(sc))
6843 		return (ENODEV);
6844 
6845 	if (enable) {
6846 		if (!(sc->flags & FULL_INIT_DONE)) {
6847 			rc = cxgbe_init_synchronized(pi);
6848 			if (rc)
6849 				return (rc);
6850 		}
6851 
6852 		if (isset(&sc->offload_map, pi->port_id))
6853 			return (0);
6854 
6855 		if (!(sc->flags & TOM_INIT_DONE)) {
6856 			rc = t4_activate_uld(sc, ULD_TOM);
6857 			if (rc == EAGAIN) {
6858 				log(LOG_WARNING,
6859 				    "You must kldload t4_tom.ko before trying "
6860 				    "to enable TOE on a cxgbe interface.\n");
6861 			}
6862 			if (rc != 0)
6863 				return (rc);
6864 			KASSERT(sc->tom_softc != NULL,
6865 			    ("%s: TOM activated but softc NULL", __func__));
6866 			KASSERT(sc->flags & TOM_INIT_DONE,
6867 			    ("%s: TOM activated but flag not set", __func__));
6868 		}
6869 
6870 		setbit(&sc->offload_map, pi->port_id);
6871 	} else {
6872 		if (!isset(&sc->offload_map, pi->port_id))
6873 			return (0);
6874 
6875 		KASSERT(sc->flags & TOM_INIT_DONE,
6876 		    ("%s: TOM never initialized?", __func__));
6877 		clrbit(&sc->offload_map, pi->port_id);
6878 	}
6879 
6880 	return (0);
6881 }
6882 
6883 /*
6884  * Add an upper layer driver to the global list.
6885  */
6886 int
6887 t4_register_uld(struct uld_info *ui)
6888 {
6889 	int rc = 0;
6890 	struct uld_info *u;
6891 
6892 	mtx_lock(&t4_uld_list_lock);
6893 	SLIST_FOREACH(u, &t4_uld_list, link) {
6894 	    if (u->uld_id == ui->uld_id) {
6895 		    rc = EEXIST;
6896 		    goto done;
6897 	    }
6898 	}
6899 
6900 	SLIST_INSERT_HEAD(&t4_uld_list, ui, link);
6901 	ui->refcount = 0;
6902 done:
6903 	mtx_unlock(&t4_uld_list_lock);
6904 	return (rc);
6905 }
6906 
6907 int
6908 t4_unregister_uld(struct uld_info *ui)
6909 {
6910 	int rc = EINVAL;
6911 	struct uld_info *u;
6912 
6913 	mtx_lock(&t4_uld_list_lock);
6914 
6915 	SLIST_FOREACH(u, &t4_uld_list, link) {
6916 	    if (u == ui) {
6917 		    if (ui->refcount > 0) {
6918 			    rc = EBUSY;
6919 			    goto done;
6920 		    }
6921 
6922 		    SLIST_REMOVE(&t4_uld_list, ui, uld_info, link);
6923 		    rc = 0;
6924 		    goto done;
6925 	    }
6926 	}
6927 done:
6928 	mtx_unlock(&t4_uld_list_lock);
6929 	return (rc);
6930 }
6931 
6932 int
6933 t4_activate_uld(struct adapter *sc, int id)
6934 {
6935 	int rc = EAGAIN;
6936 	struct uld_info *ui;
6937 
6938 	ASSERT_SYNCHRONIZED_OP(sc);
6939 
6940 	mtx_lock(&t4_uld_list_lock);
6941 
6942 	SLIST_FOREACH(ui, &t4_uld_list, link) {
6943 		if (ui->uld_id == id) {
6944 			rc = ui->activate(sc);
6945 			if (rc == 0)
6946 				ui->refcount++;
6947 			goto done;
6948 		}
6949 	}
6950 done:
6951 	mtx_unlock(&t4_uld_list_lock);
6952 
6953 	return (rc);
6954 }
6955 
6956 int
6957 t4_deactivate_uld(struct adapter *sc, int id)
6958 {
6959 	int rc = EINVAL;
6960 	struct uld_info *ui;
6961 
6962 	ASSERT_SYNCHRONIZED_OP(sc);
6963 
6964 	mtx_lock(&t4_uld_list_lock);
6965 
6966 	SLIST_FOREACH(ui, &t4_uld_list, link) {
6967 		if (ui->uld_id == id) {
6968 			rc = ui->deactivate(sc);
6969 			if (rc == 0)
6970 				ui->refcount--;
6971 			goto done;
6972 		}
6973 	}
6974 done:
6975 	mtx_unlock(&t4_uld_list_lock);
6976 
6977 	return (rc);
6978 }
6979 #endif
6980 
6981 /*
6982  * Come up with reasonable defaults for some of the tunables, provided they're
6983  * not set by the user (in which case we'll use the values as is).
6984  */
6985 static void
6986 tweak_tunables(void)
6987 {
6988 	int nc = mp_ncpus;	/* our snapshot of the number of CPUs */
6989 
6990 	if (t4_ntxq10g < 1)
6991 		t4_ntxq10g = min(nc, NTXQ_10G);
6992 
6993 	if (t4_ntxq1g < 1)
6994 		t4_ntxq1g = min(nc, NTXQ_1G);
6995 
6996 	if (t4_nrxq10g < 1)
6997 		t4_nrxq10g = min(nc, NRXQ_10G);
6998 
6999 	if (t4_nrxq1g < 1)
7000 		t4_nrxq1g = min(nc, NRXQ_1G);
7001 
7002 #ifdef TCP_OFFLOAD
7003 	if (t4_nofldtxq10g < 1)
7004 		t4_nofldtxq10g = min(nc, NOFLDTXQ_10G);
7005 
7006 	if (t4_nofldtxq1g < 1)
7007 		t4_nofldtxq1g = min(nc, NOFLDTXQ_1G);
7008 
7009 	if (t4_nofldrxq10g < 1)
7010 		t4_nofldrxq10g = min(nc, NOFLDRXQ_10G);
7011 
7012 	if (t4_nofldrxq1g < 1)
7013 		t4_nofldrxq1g = min(nc, NOFLDRXQ_1G);
7014 
7015 	if (t4_toecaps_allowed == -1)
7016 		t4_toecaps_allowed = FW_CAPS_CONFIG_TOE;
7017 #else
7018 	if (t4_toecaps_allowed == -1)
7019 		t4_toecaps_allowed = 0;
7020 #endif
7021 
7022 	if (t4_tmr_idx_10g < 0 || t4_tmr_idx_10g >= SGE_NTIMERS)
7023 		t4_tmr_idx_10g = TMR_IDX_10G;
7024 
7025 	if (t4_pktc_idx_10g < -1 || t4_pktc_idx_10g >= SGE_NCOUNTERS)
7026 		t4_pktc_idx_10g = PKTC_IDX_10G;
7027 
7028 	if (t4_tmr_idx_1g < 0 || t4_tmr_idx_1g >= SGE_NTIMERS)
7029 		t4_tmr_idx_1g = TMR_IDX_1G;
7030 
7031 	if (t4_pktc_idx_1g < -1 || t4_pktc_idx_1g >= SGE_NCOUNTERS)
7032 		t4_pktc_idx_1g = PKTC_IDX_1G;
7033 
7034 	if (t4_qsize_txq < 128)
7035 		t4_qsize_txq = 128;
7036 
7037 	if (t4_qsize_rxq < 128)
7038 		t4_qsize_rxq = 128;
7039 	while (t4_qsize_rxq & 7)
7040 		t4_qsize_rxq++;
7041 
7042 	t4_intr_types &= INTR_MSIX | INTR_MSI | INTR_INTX;
7043 }
7044 
7045 static int
7046 mod_event(module_t mod, int cmd, void *arg)
7047 {
7048 	int rc = 0;
7049 	static int loaded = 0;
7050 
7051 	switch (cmd) {
7052 	case MOD_LOAD:
7053 		if (atomic_fetchadd_int(&loaded, 1))
7054 			break;
7055 		t4_sge_modload();
7056 		mtx_init(&t4_list_lock, "T4 adapters", 0, MTX_DEF);
7057 		SLIST_INIT(&t4_list);
7058 #ifdef TCP_OFFLOAD
7059 		mtx_init(&t4_uld_list_lock, "T4 ULDs", 0, MTX_DEF);
7060 		SLIST_INIT(&t4_uld_list);
7061 #endif
7062 		tweak_tunables();
7063 		break;
7064 
7065 	case MOD_UNLOAD:
7066 		if (atomic_fetchadd_int(&loaded, -1) > 1)
7067 			break;
7068 #ifdef TCP_OFFLOAD
7069 		mtx_lock(&t4_uld_list_lock);
7070 		if (!SLIST_EMPTY(&t4_uld_list)) {
7071 			rc = EBUSY;
7072 			mtx_unlock(&t4_uld_list_lock);
7073 			break;
7074 		}
7075 		mtx_unlock(&t4_uld_list_lock);
7076 		mtx_destroy(&t4_uld_list_lock);
7077 #endif
7078 		mtx_lock(&t4_list_lock);
7079 		if (!SLIST_EMPTY(&t4_list)) {
7080 			rc = EBUSY;
7081 			mtx_unlock(&t4_list_lock);
7082 			break;
7083 		}
7084 		mtx_unlock(&t4_list_lock);
7085 		mtx_destroy(&t4_list_lock);
7086 		break;
7087 	}
7088 
7089 	return (rc);
7090 }
7091 
7092 static devclass_t t4_devclass, t5_devclass;
7093 static devclass_t cxgbe_devclass, cxl_devclass;
7094 
7095 DRIVER_MODULE(t4nex, pci, t4_driver, t4_devclass, mod_event, 0);
7096 MODULE_VERSION(t4nex, 1);
7097 
7098 DRIVER_MODULE(t5nex, pci, t5_driver, t5_devclass, mod_event, 0);
7099 MODULE_VERSION(t5nex, 1);
7100 
7101 DRIVER_MODULE(cxgbe, t4nex, cxgbe_driver, cxgbe_devclass, 0, 0);
7102 MODULE_VERSION(cxgbe, 1);
7103 
7104 DRIVER_MODULE(cxl, t5nex, cxl_driver, cxl_devclass, 0, 0);
7105 MODULE_VERSION(cxl, 1);
7106