xref: /freebsd/sys/dev/cxgbe/t4_main.c (revision 87c1627502a5dde91e5284118eec8682b60f27a2)
1 /*-
2  * Copyright (c) 2011 Chelsio Communications, Inc.
3  * All rights reserved.
4  * Written by: Navdeep Parhar <np@FreeBSD.org>
5  *
6  * Redistribution and use in source and binary forms, with or without
7  * modification, are permitted provided that the following conditions
8  * are met:
9  * 1. Redistributions of source code must retain the above copyright
10  *    notice, this list of conditions and the following disclaimer.
11  * 2. Redistributions in binary form must reproduce the above copyright
12  *    notice, this list of conditions and the following disclaimer in the
13  *    documentation and/or other materials provided with the distribution.
14  *
15  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
16  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
25  * SUCH DAMAGE.
26  */
27 
28 #include <sys/cdefs.h>
29 __FBSDID("$FreeBSD$");
30 
31 #include "opt_inet.h"
32 #include "opt_inet6.h"
33 
34 #include <sys/param.h>
35 #include <sys/conf.h>
36 #include <sys/priv.h>
37 #include <sys/kernel.h>
38 #include <sys/bus.h>
39 #include <sys/module.h>
40 #include <sys/malloc.h>
41 #include <sys/queue.h>
42 #include <sys/taskqueue.h>
43 #include <sys/pciio.h>
44 #include <dev/pci/pcireg.h>
45 #include <dev/pci/pcivar.h>
46 #include <dev/pci/pci_private.h>
47 #include <sys/firmware.h>
48 #include <sys/sbuf.h>
49 #include <sys/smp.h>
50 #include <sys/socket.h>
51 #include <sys/sockio.h>
52 #include <sys/sysctl.h>
53 #include <net/ethernet.h>
54 #include <net/if.h>
55 #include <net/if_types.h>
56 #include <net/if_dl.h>
57 #include <net/if_vlan_var.h>
58 #if defined(__i386__) || defined(__amd64__)
59 #include <vm/vm.h>
60 #include <vm/pmap.h>
61 #endif
62 
63 #include "common/common.h"
64 #include "common/t4_msg.h"
65 #include "common/t4_regs.h"
66 #include "common/t4_regs_values.h"
67 #include "t4_ioctl.h"
68 #include "t4_l2t.h"
69 
70 /* T4 bus driver interface */
71 static int t4_probe(device_t);
72 static int t4_attach(device_t);
73 static int t4_detach(device_t);
74 static device_method_t t4_methods[] = {
75 	DEVMETHOD(device_probe,		t4_probe),
76 	DEVMETHOD(device_attach,	t4_attach),
77 	DEVMETHOD(device_detach,	t4_detach),
78 
79 	DEVMETHOD_END
80 };
81 static driver_t t4_driver = {
82 	"t4nex",
83 	t4_methods,
84 	sizeof(struct adapter)
85 };
86 
87 
88 /* T4 port (cxgbe) interface */
89 static int cxgbe_probe(device_t);
90 static int cxgbe_attach(device_t);
91 static int cxgbe_detach(device_t);
92 static device_method_t cxgbe_methods[] = {
93 	DEVMETHOD(device_probe,		cxgbe_probe),
94 	DEVMETHOD(device_attach,	cxgbe_attach),
95 	DEVMETHOD(device_detach,	cxgbe_detach),
96 	{ 0, 0 }
97 };
98 static driver_t cxgbe_driver = {
99 	"cxgbe",
100 	cxgbe_methods,
101 	sizeof(struct port_info)
102 };
103 
104 static d_ioctl_t t4_ioctl;
105 static d_open_t t4_open;
106 static d_close_t t4_close;
107 
108 static struct cdevsw t4_cdevsw = {
109        .d_version = D_VERSION,
110        .d_flags = 0,
111        .d_open = t4_open,
112        .d_close = t4_close,
113        .d_ioctl = t4_ioctl,
114        .d_name = "t4nex",
115 };
116 
117 /* T5 bus driver interface */
118 static int t5_probe(device_t);
119 static device_method_t t5_methods[] = {
120 	DEVMETHOD(device_probe,		t5_probe),
121 	DEVMETHOD(device_attach,	t4_attach),
122 	DEVMETHOD(device_detach,	t4_detach),
123 
124 	DEVMETHOD_END
125 };
126 static driver_t t5_driver = {
127 	"t5nex",
128 	t5_methods,
129 	sizeof(struct adapter)
130 };
131 
132 
133 /* T5 port (cxl) interface */
134 static driver_t cxl_driver = {
135 	"cxl",
136 	cxgbe_methods,
137 	sizeof(struct port_info)
138 };
139 
140 static struct cdevsw t5_cdevsw = {
141        .d_version = D_VERSION,
142        .d_flags = 0,
143        .d_open = t4_open,
144        .d_close = t4_close,
145        .d_ioctl = t4_ioctl,
146        .d_name = "t5nex",
147 };
148 
149 /* ifnet + media interface */
150 static void cxgbe_init(void *);
151 static int cxgbe_ioctl(struct ifnet *, unsigned long, caddr_t);
152 static int cxgbe_transmit(struct ifnet *, struct mbuf *);
153 static void cxgbe_qflush(struct ifnet *);
154 static int cxgbe_media_change(struct ifnet *);
155 static void cxgbe_media_status(struct ifnet *, struct ifmediareq *);
156 
157 MALLOC_DEFINE(M_CXGBE, "cxgbe", "Chelsio T4/T5 Ethernet driver and services");
158 
159 /*
160  * Correct lock order when you need to acquire multiple locks is t4_list_lock,
161  * then ADAPTER_LOCK, then t4_uld_list_lock.
162  */
163 static struct mtx t4_list_lock;
164 static SLIST_HEAD(, adapter) t4_list;
165 #ifdef TCP_OFFLOAD
166 static struct mtx t4_uld_list_lock;
167 static SLIST_HEAD(, uld_info) t4_uld_list;
168 #endif
169 
170 /*
171  * Tunables.  See tweak_tunables() too.
172  *
173  * Each tunable is set to a default value here if it's known at compile-time.
174  * Otherwise it is set to -1 as an indication to tweak_tunables() that it should
175  * provide a reasonable default when the driver is loaded.
176  *
177  * Tunables applicable to both T4 and T5 are under hw.cxgbe.  Those specific to
178  * T5 are under hw.cxl.
179  */
180 
181 /*
182  * Number of queues for tx and rx, 10G and 1G, NIC and offload.
183  */
184 #define NTXQ_10G 16
185 static int t4_ntxq10g = -1;
186 TUNABLE_INT("hw.cxgbe.ntxq10g", &t4_ntxq10g);
187 
188 #define NRXQ_10G 8
189 static int t4_nrxq10g = -1;
190 TUNABLE_INT("hw.cxgbe.nrxq10g", &t4_nrxq10g);
191 
192 #define NTXQ_1G 4
193 static int t4_ntxq1g = -1;
194 TUNABLE_INT("hw.cxgbe.ntxq1g", &t4_ntxq1g);
195 
196 #define NRXQ_1G 2
197 static int t4_nrxq1g = -1;
198 TUNABLE_INT("hw.cxgbe.nrxq1g", &t4_nrxq1g);
199 
200 #ifdef TCP_OFFLOAD
201 #define NOFLDTXQ_10G 8
202 static int t4_nofldtxq10g = -1;
203 TUNABLE_INT("hw.cxgbe.nofldtxq10g", &t4_nofldtxq10g);
204 
205 #define NOFLDRXQ_10G 2
206 static int t4_nofldrxq10g = -1;
207 TUNABLE_INT("hw.cxgbe.nofldrxq10g", &t4_nofldrxq10g);
208 
209 #define NOFLDTXQ_1G 2
210 static int t4_nofldtxq1g = -1;
211 TUNABLE_INT("hw.cxgbe.nofldtxq1g", &t4_nofldtxq1g);
212 
213 #define NOFLDRXQ_1G 1
214 static int t4_nofldrxq1g = -1;
215 TUNABLE_INT("hw.cxgbe.nofldrxq1g", &t4_nofldrxq1g);
216 #endif
217 
218 /*
219  * Holdoff parameters for 10G and 1G ports.
220  */
221 #define TMR_IDX_10G 1
222 static int t4_tmr_idx_10g = TMR_IDX_10G;
223 TUNABLE_INT("hw.cxgbe.holdoff_timer_idx_10G", &t4_tmr_idx_10g);
224 
225 #define PKTC_IDX_10G (-1)
226 static int t4_pktc_idx_10g = PKTC_IDX_10G;
227 TUNABLE_INT("hw.cxgbe.holdoff_pktc_idx_10G", &t4_pktc_idx_10g);
228 
229 #define TMR_IDX_1G 1
230 static int t4_tmr_idx_1g = TMR_IDX_1G;
231 TUNABLE_INT("hw.cxgbe.holdoff_timer_idx_1G", &t4_tmr_idx_1g);
232 
233 #define PKTC_IDX_1G (-1)
234 static int t4_pktc_idx_1g = PKTC_IDX_1G;
235 TUNABLE_INT("hw.cxgbe.holdoff_pktc_idx_1G", &t4_pktc_idx_1g);
236 
237 /*
238  * Size (# of entries) of each tx and rx queue.
239  */
240 static unsigned int t4_qsize_txq = TX_EQ_QSIZE;
241 TUNABLE_INT("hw.cxgbe.qsize_txq", &t4_qsize_txq);
242 
243 static unsigned int t4_qsize_rxq = RX_IQ_QSIZE;
244 TUNABLE_INT("hw.cxgbe.qsize_rxq", &t4_qsize_rxq);
245 
246 /*
247  * Interrupt types allowed (bits 0, 1, 2 = INTx, MSI, MSI-X respectively).
248  */
249 static int t4_intr_types = INTR_MSIX | INTR_MSI | INTR_INTX;
250 TUNABLE_INT("hw.cxgbe.interrupt_types", &t4_intr_types);
251 
252 /*
253  * Configuration file.
254  */
255 #define DEFAULT_CF	"default"
256 #define FLASH_CF	"flash"
257 #define UWIRE_CF	"uwire"
258 #define FPGA_CF		"fpga"
259 static char t4_cfg_file[32] = DEFAULT_CF;
260 TUNABLE_STR("hw.cxgbe.config_file", t4_cfg_file, sizeof(t4_cfg_file));
261 
262 /*
263  * Firmware auto-install by driver during attach (0, 1, 2 = prohibited, allowed,
264  * encouraged respectively).
265  */
266 static unsigned int t4_fw_install = 1;
267 TUNABLE_INT("hw.cxgbe.fw_install", &t4_fw_install);
268 
269 /*
270  * ASIC features that will be used.  Disable the ones you don't want so that the
271  * chip resources aren't wasted on features that will not be used.
272  */
273 static int t4_linkcaps_allowed = 0;	/* No DCBX, PPP, etc. by default */
274 TUNABLE_INT("hw.cxgbe.linkcaps_allowed", &t4_linkcaps_allowed);
275 
276 static int t4_niccaps_allowed = FW_CAPS_CONFIG_NIC;
277 TUNABLE_INT("hw.cxgbe.niccaps_allowed", &t4_niccaps_allowed);
278 
279 static int t4_toecaps_allowed = -1;
280 TUNABLE_INT("hw.cxgbe.toecaps_allowed", &t4_toecaps_allowed);
281 
282 static int t4_rdmacaps_allowed = 0;
283 TUNABLE_INT("hw.cxgbe.rdmacaps_allowed", &t4_rdmacaps_allowed);
284 
285 static int t4_iscsicaps_allowed = 0;
286 TUNABLE_INT("hw.cxgbe.iscsicaps_allowed", &t4_iscsicaps_allowed);
287 
288 static int t4_fcoecaps_allowed = 0;
289 TUNABLE_INT("hw.cxgbe.fcoecaps_allowed", &t4_fcoecaps_allowed);
290 
291 static int t5_write_combine = 0;
292 TUNABLE_INT("hw.cxl.write_combine", &t5_write_combine);
293 
294 struct intrs_and_queues {
295 	int intr_type;		/* INTx, MSI, or MSI-X */
296 	int nirq;		/* Number of vectors */
297 	int intr_flags;
298 	int ntxq10g;		/* # of NIC txq's for each 10G port */
299 	int nrxq10g;		/* # of NIC rxq's for each 10G port */
300 	int ntxq1g;		/* # of NIC txq's for each 1G port */
301 	int nrxq1g;		/* # of NIC rxq's for each 1G port */
302 #ifdef TCP_OFFLOAD
303 	int nofldtxq10g;	/* # of TOE txq's for each 10G port */
304 	int nofldrxq10g;	/* # of TOE rxq's for each 10G port */
305 	int nofldtxq1g;		/* # of TOE txq's for each 1G port */
306 	int nofldrxq1g;		/* # of TOE rxq's for each 1G port */
307 #endif
308 };
309 
310 struct filter_entry {
311         uint32_t valid:1;	/* filter allocated and valid */
312         uint32_t locked:1;	/* filter is administratively locked */
313         uint32_t pending:1;	/* filter action is pending firmware reply */
314 	uint32_t smtidx:8;	/* Source MAC Table index for smac */
315 	struct l2t_entry *l2t;	/* Layer Two Table entry for dmac */
316 
317         struct t4_filter_specification fs;
318 };
319 
320 enum {
321 	XGMAC_MTU	= (1 << 0),
322 	XGMAC_PROMISC	= (1 << 1),
323 	XGMAC_ALLMULTI	= (1 << 2),
324 	XGMAC_VLANEX	= (1 << 3),
325 	XGMAC_UCADDR	= (1 << 4),
326 	XGMAC_MCADDRS	= (1 << 5),
327 
328 	XGMAC_ALL	= 0xffff
329 };
330 
331 static int map_bars_0_and_4(struct adapter *);
332 static int map_bar_2(struct adapter *);
333 static void setup_memwin(struct adapter *);
334 static int validate_mem_range(struct adapter *, uint32_t, int);
335 static int validate_mt_off_len(struct adapter *, int, uint32_t, int,
336     uint32_t *);
337 static void memwin_info(struct adapter *, int, uint32_t *, uint32_t *);
338 static uint32_t position_memwin(struct adapter *, int, uint32_t);
339 static int cfg_itype_and_nqueues(struct adapter *, int, int,
340     struct intrs_and_queues *);
341 static int prep_firmware(struct adapter *);
342 static int partition_resources(struct adapter *, const struct firmware *,
343     const char *);
344 static int get_params__pre_init(struct adapter *);
345 static int get_params__post_init(struct adapter *);
346 static int set_params__post_init(struct adapter *);
347 static void t4_set_desc(struct adapter *);
348 static void build_medialist(struct port_info *);
349 static int update_mac_settings(struct port_info *, int);
350 static int cxgbe_init_synchronized(struct port_info *);
351 static int cxgbe_uninit_synchronized(struct port_info *);
352 static int setup_intr_handlers(struct adapter *);
353 static int adapter_full_init(struct adapter *);
354 static int adapter_full_uninit(struct adapter *);
355 static int port_full_init(struct port_info *);
356 static int port_full_uninit(struct port_info *);
357 static void quiesce_eq(struct adapter *, struct sge_eq *);
358 static void quiesce_iq(struct adapter *, struct sge_iq *);
359 static void quiesce_fl(struct adapter *, struct sge_fl *);
360 static int t4_alloc_irq(struct adapter *, struct irq *, int rid,
361     driver_intr_t *, void *, char *);
362 static int t4_free_irq(struct adapter *, struct irq *);
363 static void reg_block_dump(struct adapter *, uint8_t *, unsigned int,
364     unsigned int);
365 static void t4_get_regs(struct adapter *, struct t4_regdump *, uint8_t *);
366 static void cxgbe_tick(void *);
367 static void cxgbe_vlan_config(void *, struct ifnet *, uint16_t);
368 static int cpl_not_handled(struct sge_iq *, const struct rss_header *,
369     struct mbuf *);
370 static int an_not_handled(struct sge_iq *, const struct rsp_ctrl *);
371 static int fw_msg_not_handled(struct adapter *, const __be64 *);
372 static int t4_sysctls(struct adapter *);
373 static int cxgbe_sysctls(struct port_info *);
374 static int sysctl_int_array(SYSCTL_HANDLER_ARGS);
375 static int sysctl_bitfield(SYSCTL_HANDLER_ARGS);
376 static int sysctl_holdoff_tmr_idx(SYSCTL_HANDLER_ARGS);
377 static int sysctl_holdoff_pktc_idx(SYSCTL_HANDLER_ARGS);
378 static int sysctl_qsize_rxq(SYSCTL_HANDLER_ARGS);
379 static int sysctl_qsize_txq(SYSCTL_HANDLER_ARGS);
380 static int sysctl_handle_t4_reg64(SYSCTL_HANDLER_ARGS);
381 #ifdef SBUF_DRAIN
382 static int sysctl_cctrl(SYSCTL_HANDLER_ARGS);
383 static int sysctl_cim_ibq_obq(SYSCTL_HANDLER_ARGS);
384 static int sysctl_cim_la(SYSCTL_HANDLER_ARGS);
385 static int sysctl_cim_qcfg(SYSCTL_HANDLER_ARGS);
386 static int sysctl_cpl_stats(SYSCTL_HANDLER_ARGS);
387 static int sysctl_ddp_stats(SYSCTL_HANDLER_ARGS);
388 static int sysctl_devlog(SYSCTL_HANDLER_ARGS);
389 static int sysctl_fcoe_stats(SYSCTL_HANDLER_ARGS);
390 static int sysctl_hw_sched(SYSCTL_HANDLER_ARGS);
391 static int sysctl_lb_stats(SYSCTL_HANDLER_ARGS);
392 static int sysctl_meminfo(SYSCTL_HANDLER_ARGS);
393 static int sysctl_path_mtus(SYSCTL_HANDLER_ARGS);
394 static int sysctl_pm_stats(SYSCTL_HANDLER_ARGS);
395 static int sysctl_rdma_stats(SYSCTL_HANDLER_ARGS);
396 static int sysctl_tcp_stats(SYSCTL_HANDLER_ARGS);
397 static int sysctl_tids(SYSCTL_HANDLER_ARGS);
398 static int sysctl_tp_err_stats(SYSCTL_HANDLER_ARGS);
399 static int sysctl_tx_rate(SYSCTL_HANDLER_ARGS);
400 static int sysctl_wcwr_stats(SYSCTL_HANDLER_ARGS);
401 #endif
402 static inline void txq_start(struct ifnet *, struct sge_txq *);
403 static uint32_t fconf_to_mode(uint32_t);
404 static uint32_t mode_to_fconf(uint32_t);
405 static uint32_t fspec_to_fconf(struct t4_filter_specification *);
406 static int get_filter_mode(struct adapter *, uint32_t *);
407 static int set_filter_mode(struct adapter *, uint32_t);
408 static inline uint64_t get_filter_hits(struct adapter *, uint32_t);
409 static int get_filter(struct adapter *, struct t4_filter *);
410 static int set_filter(struct adapter *, struct t4_filter *);
411 static int del_filter(struct adapter *, struct t4_filter *);
412 static void clear_filter(struct filter_entry *);
413 static int set_filter_wr(struct adapter *, int);
414 static int del_filter_wr(struct adapter *, int);
415 static int get_sge_context(struct adapter *, struct t4_sge_context *);
416 static int load_fw(struct adapter *, struct t4_data *);
417 static int read_card_mem(struct adapter *, int, struct t4_mem_range *);
418 static int read_i2c(struct adapter *, struct t4_i2c_data *);
419 #ifdef TCP_OFFLOAD
420 static int toe_capability(struct port_info *, int);
421 #endif
422 static int mod_event(module_t, int, void *);
423 
424 struct {
425 	uint16_t device;
426 	char *desc;
427 } t4_pciids[] = {
428 	{0xa000, "Chelsio Terminator 4 FPGA"},
429 	{0x4400, "Chelsio T440-dbg"},
430 	{0x4401, "Chelsio T420-CR"},
431 	{0x4402, "Chelsio T422-CR"},
432 	{0x4403, "Chelsio T440-CR"},
433 	{0x4404, "Chelsio T420-BCH"},
434 	{0x4405, "Chelsio T440-BCH"},
435 	{0x4406, "Chelsio T440-CH"},
436 	{0x4407, "Chelsio T420-SO"},
437 	{0x4408, "Chelsio T420-CX"},
438 	{0x4409, "Chelsio T420-BT"},
439 	{0x440a, "Chelsio T404-BT"},
440 	{0x440e, "Chelsio T440-LP-CR"},
441 }, t5_pciids[] = {
442 	{0xb000, "Chelsio Terminator 5 FPGA"},
443 	{0x5400, "Chelsio T580-dbg"},
444 	{0x5401,  "Chelsio T520-CR"},
445 	{0x5407,  "Chelsio T520-SO"},
446 	{0x5408,  "Chelsio T520-CX"},
447 	{0x5410,  "Chelsio T580-LP-CR"},	/* 2 x 40G */
448 	{0x5411,  "Chelsio T520-LL-CR"},
449 #ifdef notyet
450 	{0x5402,  "Chelsio T522-CR"},
451 	{0x5403,  "Chelsio T540-CR"},
452 	{0x5404,  "Chelsio T520-BCH"},
453 	{0x5405,  "Chelsio T540-BCH"},
454 	{0x5406,  "Chelsio T540-CH"},
455 	{0x5409,  "Chelsio T520-BT"},
456 	{0x540a,  "Chelsio T504-BT"},
457 	{0x540b,  "Chelsio B520-SR"},
458 	{0x540c,  "Chelsio B504-BT"},
459 	{0x540d,  "Chelsio T580-CR"},
460 	{0x540e,  "Chelsio T540-LP-CR"},
461 	{0x540f,  "Chelsio Amsterdam"},
462 	{0x5412,  "Chelsio T560-CR"},
463 	{0x5413,  "Chelsio T580-CR"},
464 #endif
465 };
466 
467 #ifdef TCP_OFFLOAD
468 /*
469  * service_iq() has an iq and needs the fl.  Offset of fl from the iq should be
470  * exactly the same for both rxq and ofld_rxq.
471  */
472 CTASSERT(offsetof(struct sge_ofld_rxq, iq) == offsetof(struct sge_rxq, iq));
473 CTASSERT(offsetof(struct sge_ofld_rxq, fl) == offsetof(struct sge_rxq, fl));
474 #endif
475 
476 /* No easy way to include t4_msg.h before adapter.h so we check this way */
477 CTASSERT(nitems(((struct adapter *)0)->cpl_handler) == NUM_CPL_CMDS);
478 CTASSERT(nitems(((struct adapter *)0)->fw_msg_handler) == NUM_FW6_TYPES);
479 
480 static int
481 t4_probe(device_t dev)
482 {
483 	int i;
484 	uint16_t v = pci_get_vendor(dev);
485 	uint16_t d = pci_get_device(dev);
486 	uint8_t f = pci_get_function(dev);
487 
488 	if (v != PCI_VENDOR_ID_CHELSIO)
489 		return (ENXIO);
490 
491 	/* Attach only to PF0 of the FPGA */
492 	if (d == 0xa000 && f != 0)
493 		return (ENXIO);
494 
495 	for (i = 0; i < nitems(t4_pciids); i++) {
496 		if (d == t4_pciids[i].device) {
497 			device_set_desc(dev, t4_pciids[i].desc);
498 			return (BUS_PROBE_DEFAULT);
499 		}
500 	}
501 
502 	return (ENXIO);
503 }
504 
505 static int
506 t5_probe(device_t dev)
507 {
508 	int i;
509 	uint16_t v = pci_get_vendor(dev);
510 	uint16_t d = pci_get_device(dev);
511 	uint8_t f = pci_get_function(dev);
512 
513 	if (v != PCI_VENDOR_ID_CHELSIO)
514 		return (ENXIO);
515 
516 	/* Attach only to PF0 of the FPGA */
517 	if (d == 0xb000 && f != 0)
518 		return (ENXIO);
519 
520 	for (i = 0; i < nitems(t5_pciids); i++) {
521 		if (d == t5_pciids[i].device) {
522 			device_set_desc(dev, t5_pciids[i].desc);
523 			return (BUS_PROBE_DEFAULT);
524 		}
525 	}
526 
527 	return (ENXIO);
528 }
529 
530 static int
531 t4_attach(device_t dev)
532 {
533 	struct adapter *sc;
534 	int rc = 0, i, n10g, n1g, rqidx, tqidx;
535 	struct intrs_and_queues iaq;
536 	struct sge *s;
537 #ifdef TCP_OFFLOAD
538 	int ofld_rqidx, ofld_tqidx;
539 #endif
540 
541 	sc = device_get_softc(dev);
542 	sc->dev = dev;
543 
544 	pci_enable_busmaster(dev);
545 	if (pci_find_cap(dev, PCIY_EXPRESS, &i) == 0) {
546 		uint32_t v;
547 
548 		pci_set_max_read_req(dev, 4096);
549 		v = pci_read_config(dev, i + PCIER_DEVICE_CTL, 2);
550 		v |= PCIEM_CTL_RELAXED_ORD_ENABLE;
551 		pci_write_config(dev, i + PCIER_DEVICE_CTL, v, 2);
552 	}
553 
554 	snprintf(sc->lockname, sizeof(sc->lockname), "%s",
555 	    device_get_nameunit(dev));
556 	mtx_init(&sc->sc_lock, sc->lockname, 0, MTX_DEF);
557 	mtx_lock(&t4_list_lock);
558 	SLIST_INSERT_HEAD(&t4_list, sc, link);
559 	mtx_unlock(&t4_list_lock);
560 
561 	mtx_init(&sc->sfl_lock, "starving freelists", 0, MTX_DEF);
562 	TAILQ_INIT(&sc->sfl);
563 	callout_init(&sc->sfl_callout, CALLOUT_MPSAFE);
564 
565 	rc = map_bars_0_and_4(sc);
566 	if (rc != 0)
567 		goto done; /* error message displayed already */
568 
569 	/*
570 	 * This is the real PF# to which we're attaching.  Works from within PCI
571 	 * passthrough environments too, where pci_get_function() could return a
572 	 * different PF# depending on the passthrough configuration.  We need to
573 	 * use the real PF# in all our communication with the firmware.
574 	 */
575 	sc->pf = G_SOURCEPF(t4_read_reg(sc, A_PL_WHOAMI));
576 	sc->mbox = sc->pf;
577 
578 	memset(sc->chan_map, 0xff, sizeof(sc->chan_map));
579 	sc->an_handler = an_not_handled;
580 	for (i = 0; i < nitems(sc->cpl_handler); i++)
581 		sc->cpl_handler[i] = cpl_not_handled;
582 	for (i = 0; i < nitems(sc->fw_msg_handler); i++)
583 		sc->fw_msg_handler[i] = fw_msg_not_handled;
584 	t4_register_cpl_handler(sc, CPL_SET_TCB_RPL, t4_filter_rpl);
585 	t4_init_sge_cpl_handlers(sc);
586 
587 	/* Prepare the adapter for operation */
588 	rc = -t4_prep_adapter(sc);
589 	if (rc != 0) {
590 		device_printf(dev, "failed to prepare adapter: %d.\n", rc);
591 		goto done;
592 	}
593 
594 	/*
595 	 * Do this really early, with the memory windows set up even before the
596 	 * character device.  The userland tool's register i/o and mem read
597 	 * will work even in "recovery mode".
598 	 */
599 	setup_memwin(sc);
600 	sc->cdev = make_dev(is_t4(sc) ? &t4_cdevsw : &t5_cdevsw,
601 	    device_get_unit(dev), UID_ROOT, GID_WHEEL, 0600, "%s",
602 	    device_get_nameunit(dev));
603 	if (sc->cdev == NULL)
604 		device_printf(dev, "failed to create nexus char device.\n");
605 	else
606 		sc->cdev->si_drv1 = sc;
607 
608 	/* Go no further if recovery mode has been requested. */
609 	if (TUNABLE_INT_FETCH("hw.cxgbe.sos", &i) && i != 0) {
610 		device_printf(dev, "recovery mode.\n");
611 		goto done;
612 	}
613 
614 	/* Prepare the firmware for operation */
615 	rc = prep_firmware(sc);
616 	if (rc != 0)
617 		goto done; /* error message displayed already */
618 
619 	rc = get_params__post_init(sc);
620 	if (rc != 0)
621 		goto done; /* error message displayed already */
622 
623 	rc = set_params__post_init(sc);
624 	if (rc != 0)
625 		goto done; /* error message displayed already */
626 
627 	rc = map_bar_2(sc);
628 	if (rc != 0)
629 		goto done; /* error message displayed already */
630 
631 	for (i = 0; i < NCHAN; i++)
632 		sc->params.tp.tx_modq[i] = i;
633 
634 	rc = t4_create_dma_tag(sc);
635 	if (rc != 0)
636 		goto done; /* error message displayed already */
637 
638 	/*
639 	 * First pass over all the ports - allocate VIs and initialize some
640 	 * basic parameters like mac address, port type, etc.  We also figure
641 	 * out whether a port is 10G or 1G and use that information when
642 	 * calculating how many interrupts to attempt to allocate.
643 	 */
644 	n10g = n1g = 0;
645 	for_each_port(sc, i) {
646 		struct port_info *pi;
647 
648 		pi = malloc(sizeof(*pi), M_CXGBE, M_ZERO | M_WAITOK);
649 		sc->port[i] = pi;
650 
651 		/* These must be set before t4_port_init */
652 		pi->adapter = sc;
653 		pi->port_id = i;
654 
655 		/* Allocate the vi and initialize parameters like mac addr */
656 		rc = -t4_port_init(pi, sc->mbox, sc->pf, 0);
657 		if (rc != 0) {
658 			device_printf(dev, "unable to initialize port %d: %d\n",
659 			    i, rc);
660 			free(pi, M_CXGBE);
661 			sc->port[i] = NULL;
662 			goto done;
663 		}
664 
665 		snprintf(pi->lockname, sizeof(pi->lockname), "%sp%d",
666 		    device_get_nameunit(dev), i);
667 		mtx_init(&pi->pi_lock, pi->lockname, 0, MTX_DEF);
668 
669 		if (is_10G_port(pi) || is_40G_port(pi)) {
670 			n10g++;
671 			pi->tmr_idx = t4_tmr_idx_10g;
672 			pi->pktc_idx = t4_pktc_idx_10g;
673 		} else {
674 			n1g++;
675 			pi->tmr_idx = t4_tmr_idx_1g;
676 			pi->pktc_idx = t4_pktc_idx_1g;
677 		}
678 
679 		pi->xact_addr_filt = -1;
680 
681 		pi->qsize_rxq = t4_qsize_rxq;
682 		pi->qsize_txq = t4_qsize_txq;
683 
684 		pi->dev = device_add_child(dev, is_t4(sc) ? "cxgbe" : "cxl", -1);
685 		if (pi->dev == NULL) {
686 			device_printf(dev,
687 			    "failed to add device for port %d.\n", i);
688 			rc = ENXIO;
689 			goto done;
690 		}
691 		device_set_softc(pi->dev, pi);
692 	}
693 
694 	/*
695 	 * Interrupt type, # of interrupts, # of rx/tx queues, etc.
696 	 */
697 	rc = cfg_itype_and_nqueues(sc, n10g, n1g, &iaq);
698 	if (rc != 0)
699 		goto done; /* error message displayed already */
700 
701 	sc->intr_type = iaq.intr_type;
702 	sc->intr_count = iaq.nirq;
703 	sc->flags |= iaq.intr_flags;
704 
705 	s = &sc->sge;
706 	s->nrxq = n10g * iaq.nrxq10g + n1g * iaq.nrxq1g;
707 	s->ntxq = n10g * iaq.ntxq10g + n1g * iaq.ntxq1g;
708 	s->neq = s->ntxq + s->nrxq;	/* the free list in an rxq is an eq */
709 	s->neq += sc->params.nports + 1;/* ctrl queues: 1 per port + 1 mgmt */
710 	s->niq = s->nrxq + 1;		/* 1 extra for firmware event queue */
711 
712 #ifdef TCP_OFFLOAD
713 	if (is_offload(sc)) {
714 
715 		s->nofldrxq = n10g * iaq.nofldrxq10g + n1g * iaq.nofldrxq1g;
716 		s->nofldtxq = n10g * iaq.nofldtxq10g + n1g * iaq.nofldtxq1g;
717 		s->neq += s->nofldtxq + s->nofldrxq;
718 		s->niq += s->nofldrxq;
719 
720 		s->ofld_rxq = malloc(s->nofldrxq * sizeof(struct sge_ofld_rxq),
721 		    M_CXGBE, M_ZERO | M_WAITOK);
722 		s->ofld_txq = malloc(s->nofldtxq * sizeof(struct sge_wrq),
723 		    M_CXGBE, M_ZERO | M_WAITOK);
724 	}
725 #endif
726 
727 	s->ctrlq = malloc(sc->params.nports * sizeof(struct sge_wrq), M_CXGBE,
728 	    M_ZERO | M_WAITOK);
729 	s->rxq = malloc(s->nrxq * sizeof(struct sge_rxq), M_CXGBE,
730 	    M_ZERO | M_WAITOK);
731 	s->txq = malloc(s->ntxq * sizeof(struct sge_txq), M_CXGBE,
732 	    M_ZERO | M_WAITOK);
733 	s->iqmap = malloc(s->niq * sizeof(struct sge_iq *), M_CXGBE,
734 	    M_ZERO | M_WAITOK);
735 	s->eqmap = malloc(s->neq * sizeof(struct sge_eq *), M_CXGBE,
736 	    M_ZERO | M_WAITOK);
737 
738 	sc->irq = malloc(sc->intr_count * sizeof(struct irq), M_CXGBE,
739 	    M_ZERO | M_WAITOK);
740 
741 	t4_init_l2t(sc, M_WAITOK);
742 
743 	/*
744 	 * Second pass over the ports.  This time we know the number of rx and
745 	 * tx queues that each port should get.
746 	 */
747 	rqidx = tqidx = 0;
748 #ifdef TCP_OFFLOAD
749 	ofld_rqidx = ofld_tqidx = 0;
750 #endif
751 	for_each_port(sc, i) {
752 		struct port_info *pi = sc->port[i];
753 
754 		if (pi == NULL)
755 			continue;
756 
757 		pi->first_rxq = rqidx;
758 		pi->first_txq = tqidx;
759 		if (is_10G_port(pi) || is_40G_port(pi)) {
760 			pi->nrxq = iaq.nrxq10g;
761 			pi->ntxq = iaq.ntxq10g;
762 		} else {
763 			pi->nrxq = iaq.nrxq1g;
764 			pi->ntxq = iaq.ntxq1g;
765 		}
766 
767 		rqidx += pi->nrxq;
768 		tqidx += pi->ntxq;
769 
770 #ifdef TCP_OFFLOAD
771 		if (is_offload(sc)) {
772 			pi->first_ofld_rxq = ofld_rqidx;
773 			pi->first_ofld_txq = ofld_tqidx;
774 			if (is_10G_port(pi) || is_40G_port(pi)) {
775 				pi->nofldrxq = iaq.nofldrxq10g;
776 				pi->nofldtxq = iaq.nofldtxq10g;
777 			} else {
778 				pi->nofldrxq = iaq.nofldrxq1g;
779 				pi->nofldtxq = iaq.nofldtxq1g;
780 			}
781 			ofld_rqidx += pi->nofldrxq;
782 			ofld_tqidx += pi->nofldtxq;
783 		}
784 #endif
785 	}
786 
787 	rc = setup_intr_handlers(sc);
788 	if (rc != 0) {
789 		device_printf(dev,
790 		    "failed to setup interrupt handlers: %d\n", rc);
791 		goto done;
792 	}
793 
794 	rc = bus_generic_attach(dev);
795 	if (rc != 0) {
796 		device_printf(dev,
797 		    "failed to attach all child ports: %d\n", rc);
798 		goto done;
799 	}
800 
801 	device_printf(dev,
802 	    "PCIe x%d, %d ports, %d %s interrupt%s, %d eq, %d iq\n",
803 	    sc->params.pci.width, sc->params.nports, sc->intr_count,
804 	    sc->intr_type == INTR_MSIX ? "MSI-X" :
805 	    (sc->intr_type == INTR_MSI ? "MSI" : "INTx"),
806 	    sc->intr_count > 1 ? "s" : "", sc->sge.neq, sc->sge.niq);
807 
808 	t4_set_desc(sc);
809 
810 done:
811 	if (rc != 0 && sc->cdev) {
812 		/* cdev was created and so cxgbetool works; recover that way. */
813 		device_printf(dev,
814 		    "error during attach, adapter is now in recovery mode.\n");
815 		rc = 0;
816 	}
817 
818 	if (rc != 0)
819 		t4_detach(dev);
820 	else
821 		t4_sysctls(sc);
822 
823 	return (rc);
824 }
825 
826 /*
827  * Idempotent
828  */
829 static int
830 t4_detach(device_t dev)
831 {
832 	struct adapter *sc;
833 	struct port_info *pi;
834 	int i, rc;
835 
836 	sc = device_get_softc(dev);
837 
838 	if (sc->flags & FULL_INIT_DONE)
839 		t4_intr_disable(sc);
840 
841 	if (sc->cdev) {
842 		destroy_dev(sc->cdev);
843 		sc->cdev = NULL;
844 	}
845 
846 	rc = bus_generic_detach(dev);
847 	if (rc) {
848 		device_printf(dev,
849 		    "failed to detach child devices: %d\n", rc);
850 		return (rc);
851 	}
852 
853 	for (i = 0; i < sc->intr_count; i++)
854 		t4_free_irq(sc, &sc->irq[i]);
855 
856 	for (i = 0; i < MAX_NPORTS; i++) {
857 		pi = sc->port[i];
858 		if (pi) {
859 			t4_free_vi(pi->adapter, sc->mbox, sc->pf, 0, pi->viid);
860 			if (pi->dev)
861 				device_delete_child(dev, pi->dev);
862 
863 			mtx_destroy(&pi->pi_lock);
864 			free(pi, M_CXGBE);
865 		}
866 	}
867 
868 	if (sc->flags & FULL_INIT_DONE)
869 		adapter_full_uninit(sc);
870 
871 	if (sc->flags & FW_OK)
872 		t4_fw_bye(sc, sc->mbox);
873 
874 	if (sc->intr_type == INTR_MSI || sc->intr_type == INTR_MSIX)
875 		pci_release_msi(dev);
876 
877 	if (sc->regs_res)
878 		bus_release_resource(dev, SYS_RES_MEMORY, sc->regs_rid,
879 		    sc->regs_res);
880 
881 	if (sc->udbs_res)
882 		bus_release_resource(dev, SYS_RES_MEMORY, sc->udbs_rid,
883 		    sc->udbs_res);
884 
885 	if (sc->msix_res)
886 		bus_release_resource(dev, SYS_RES_MEMORY, sc->msix_rid,
887 		    sc->msix_res);
888 
889 	if (sc->l2t)
890 		t4_free_l2t(sc->l2t);
891 
892 #ifdef TCP_OFFLOAD
893 	free(sc->sge.ofld_rxq, M_CXGBE);
894 	free(sc->sge.ofld_txq, M_CXGBE);
895 #endif
896 	free(sc->irq, M_CXGBE);
897 	free(sc->sge.rxq, M_CXGBE);
898 	free(sc->sge.txq, M_CXGBE);
899 	free(sc->sge.ctrlq, M_CXGBE);
900 	free(sc->sge.iqmap, M_CXGBE);
901 	free(sc->sge.eqmap, M_CXGBE);
902 	free(sc->tids.ftid_tab, M_CXGBE);
903 	t4_destroy_dma_tag(sc);
904 	if (mtx_initialized(&sc->sc_lock)) {
905 		mtx_lock(&t4_list_lock);
906 		SLIST_REMOVE(&t4_list, sc, adapter, link);
907 		mtx_unlock(&t4_list_lock);
908 		mtx_destroy(&sc->sc_lock);
909 	}
910 
911 	if (mtx_initialized(&sc->tids.ftid_lock))
912 		mtx_destroy(&sc->tids.ftid_lock);
913 	if (mtx_initialized(&sc->sfl_lock))
914 		mtx_destroy(&sc->sfl_lock);
915 
916 	bzero(sc, sizeof(*sc));
917 
918 	return (0);
919 }
920 
921 
922 static int
923 cxgbe_probe(device_t dev)
924 {
925 	char buf[128];
926 	struct port_info *pi = device_get_softc(dev);
927 
928 	snprintf(buf, sizeof(buf), "port %d", pi->port_id);
929 	device_set_desc_copy(dev, buf);
930 
931 	return (BUS_PROBE_DEFAULT);
932 }
933 
934 #define T4_CAP (IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_MTU | IFCAP_HWCSUM | \
935     IFCAP_VLAN_HWCSUM | IFCAP_TSO | IFCAP_JUMBO_MTU | IFCAP_LRO | \
936     IFCAP_VLAN_HWTSO | IFCAP_LINKSTATE | IFCAP_HWCSUM_IPV6)
937 #define T4_CAP_ENABLE (T4_CAP)
938 
939 static int
940 cxgbe_attach(device_t dev)
941 {
942 	struct port_info *pi = device_get_softc(dev);
943 	struct ifnet *ifp;
944 
945 	/* Allocate an ifnet and set it up */
946 	ifp = if_alloc(IFT_ETHER);
947 	if (ifp == NULL) {
948 		device_printf(dev, "Cannot allocate ifnet\n");
949 		return (ENOMEM);
950 	}
951 	pi->ifp = ifp;
952 	ifp->if_softc = pi;
953 
954 	callout_init(&pi->tick, CALLOUT_MPSAFE);
955 
956 	if_initname(ifp, device_get_name(dev), device_get_unit(dev));
957 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
958 
959 	ifp->if_init = cxgbe_init;
960 	ifp->if_ioctl = cxgbe_ioctl;
961 	ifp->if_transmit = cxgbe_transmit;
962 	ifp->if_qflush = cxgbe_qflush;
963 
964 	ifp->if_capabilities = T4_CAP;
965 #ifdef TCP_OFFLOAD
966 	if (is_offload(pi->adapter))
967 		ifp->if_capabilities |= IFCAP_TOE;
968 #endif
969 	ifp->if_capenable = T4_CAP_ENABLE;
970 	ifp->if_hwassist = CSUM_TCP | CSUM_UDP | CSUM_IP | CSUM_TSO |
971 	    CSUM_UDP_IPV6 | CSUM_TCP_IPV6;
972 
973 	/* Initialize ifmedia for this port */
974 	ifmedia_init(&pi->media, IFM_IMASK, cxgbe_media_change,
975 	    cxgbe_media_status);
976 	build_medialist(pi);
977 
978 	pi->vlan_c = EVENTHANDLER_REGISTER(vlan_config, cxgbe_vlan_config, ifp,
979 	    EVENTHANDLER_PRI_ANY);
980 
981 	ether_ifattach(ifp, pi->hw_addr);
982 
983 #ifdef TCP_OFFLOAD
984 	if (is_offload(pi->adapter)) {
985 		device_printf(dev,
986 		    "%d txq, %d rxq (NIC); %d txq, %d rxq (TOE)\n",
987 		    pi->ntxq, pi->nrxq, pi->nofldtxq, pi->nofldrxq);
988 	} else
989 #endif
990 		device_printf(dev, "%d txq, %d rxq\n", pi->ntxq, pi->nrxq);
991 
992 	cxgbe_sysctls(pi);
993 
994 	return (0);
995 }
996 
997 static int
998 cxgbe_detach(device_t dev)
999 {
1000 	struct port_info *pi = device_get_softc(dev);
1001 	struct adapter *sc = pi->adapter;
1002 	struct ifnet *ifp = pi->ifp;
1003 
1004 	/* Tell if_ioctl and if_init that the port is going away */
1005 	ADAPTER_LOCK(sc);
1006 	SET_DOOMED(pi);
1007 	wakeup(&sc->flags);
1008 	while (IS_BUSY(sc))
1009 		mtx_sleep(&sc->flags, &sc->sc_lock, 0, "t4detach", 0);
1010 	SET_BUSY(sc);
1011 #ifdef INVARIANTS
1012 	sc->last_op = "t4detach";
1013 	sc->last_op_thr = curthread;
1014 #endif
1015 	ADAPTER_UNLOCK(sc);
1016 
1017 	if (pi->vlan_c)
1018 		EVENTHANDLER_DEREGISTER(vlan_config, pi->vlan_c);
1019 
1020 	PORT_LOCK(pi);
1021 	ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
1022 	callout_stop(&pi->tick);
1023 	PORT_UNLOCK(pi);
1024 	callout_drain(&pi->tick);
1025 
1026 	/* Let detach proceed even if these fail. */
1027 	cxgbe_uninit_synchronized(pi);
1028 	port_full_uninit(pi);
1029 
1030 	ifmedia_removeall(&pi->media);
1031 	ether_ifdetach(pi->ifp);
1032 	if_free(pi->ifp);
1033 
1034 	ADAPTER_LOCK(sc);
1035 	CLR_BUSY(sc);
1036 	wakeup(&sc->flags);
1037 	ADAPTER_UNLOCK(sc);
1038 
1039 	return (0);
1040 }
1041 
1042 static void
1043 cxgbe_init(void *arg)
1044 {
1045 	struct port_info *pi = arg;
1046 	struct adapter *sc = pi->adapter;
1047 
1048 	if (begin_synchronized_op(sc, pi, SLEEP_OK | INTR_OK, "t4init") != 0)
1049 		return;
1050 	cxgbe_init_synchronized(pi);
1051 	end_synchronized_op(sc, 0);
1052 }
1053 
1054 static int
1055 cxgbe_ioctl(struct ifnet *ifp, unsigned long cmd, caddr_t data)
1056 {
1057 	int rc = 0, mtu, flags;
1058 	struct port_info *pi = ifp->if_softc;
1059 	struct adapter *sc = pi->adapter;
1060 	struct ifreq *ifr = (struct ifreq *)data;
1061 	uint32_t mask;
1062 
1063 	switch (cmd) {
1064 	case SIOCSIFMTU:
1065 		mtu = ifr->ifr_mtu;
1066 		if ((mtu < ETHERMIN) || (mtu > ETHERMTU_JUMBO))
1067 			return (EINVAL);
1068 
1069 		rc = begin_synchronized_op(sc, pi, SLEEP_OK | INTR_OK, "t4mtu");
1070 		if (rc)
1071 			return (rc);
1072 		ifp->if_mtu = mtu;
1073 		if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1074 			t4_update_fl_bufsize(ifp);
1075 			rc = update_mac_settings(pi, XGMAC_MTU);
1076 		}
1077 		end_synchronized_op(sc, 0);
1078 		break;
1079 
1080 	case SIOCSIFFLAGS:
1081 		rc = begin_synchronized_op(sc, pi, SLEEP_OK | INTR_OK, "t4flg");
1082 		if (rc)
1083 			return (rc);
1084 
1085 		if (ifp->if_flags & IFF_UP) {
1086 			if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1087 				flags = pi->if_flags;
1088 				if ((ifp->if_flags ^ flags) &
1089 				    (IFF_PROMISC | IFF_ALLMULTI)) {
1090 					rc = update_mac_settings(pi,
1091 					    XGMAC_PROMISC | XGMAC_ALLMULTI);
1092 				}
1093 			} else
1094 				rc = cxgbe_init_synchronized(pi);
1095 			pi->if_flags = ifp->if_flags;
1096 		} else if (ifp->if_drv_flags & IFF_DRV_RUNNING)
1097 			rc = cxgbe_uninit_synchronized(pi);
1098 		end_synchronized_op(sc, 0);
1099 		break;
1100 
1101 	case SIOCADDMULTI:
1102 	case SIOCDELMULTI: /* these two are called with a mutex held :-( */
1103 		rc = begin_synchronized_op(sc, pi, HOLD_LOCK, "t4multi");
1104 		if (rc)
1105 			return (rc);
1106 		if (ifp->if_drv_flags & IFF_DRV_RUNNING)
1107 			rc = update_mac_settings(pi, XGMAC_MCADDRS);
1108 		end_synchronized_op(sc, LOCK_HELD);
1109 		break;
1110 
1111 	case SIOCSIFCAP:
1112 		rc = begin_synchronized_op(sc, pi, SLEEP_OK | INTR_OK, "t4cap");
1113 		if (rc)
1114 			return (rc);
1115 
1116 		mask = ifr->ifr_reqcap ^ ifp->if_capenable;
1117 		if (mask & IFCAP_TXCSUM) {
1118 			ifp->if_capenable ^= IFCAP_TXCSUM;
1119 			ifp->if_hwassist ^= (CSUM_TCP | CSUM_UDP | CSUM_IP);
1120 
1121 			if (IFCAP_TSO4 & ifp->if_capenable &&
1122 			    !(IFCAP_TXCSUM & ifp->if_capenable)) {
1123 				ifp->if_capenable &= ~IFCAP_TSO4;
1124 				if_printf(ifp,
1125 				    "tso4 disabled due to -txcsum.\n");
1126 			}
1127 		}
1128 		if (mask & IFCAP_TXCSUM_IPV6) {
1129 			ifp->if_capenable ^= IFCAP_TXCSUM_IPV6;
1130 			ifp->if_hwassist ^= (CSUM_UDP_IPV6 | CSUM_TCP_IPV6);
1131 
1132 			if (IFCAP_TSO6 & ifp->if_capenable &&
1133 			    !(IFCAP_TXCSUM_IPV6 & ifp->if_capenable)) {
1134 				ifp->if_capenable &= ~IFCAP_TSO6;
1135 				if_printf(ifp,
1136 				    "tso6 disabled due to -txcsum6.\n");
1137 			}
1138 		}
1139 		if (mask & IFCAP_RXCSUM)
1140 			ifp->if_capenable ^= IFCAP_RXCSUM;
1141 		if (mask & IFCAP_RXCSUM_IPV6)
1142 			ifp->if_capenable ^= IFCAP_RXCSUM_IPV6;
1143 
1144 		/*
1145 		 * Note that we leave CSUM_TSO alone (it is always set).  The
1146 		 * kernel takes both IFCAP_TSOx and CSUM_TSO into account before
1147 		 * sending a TSO request our way, so it's sufficient to toggle
1148 		 * IFCAP_TSOx only.
1149 		 */
1150 		if (mask & IFCAP_TSO4) {
1151 			if (!(IFCAP_TSO4 & ifp->if_capenable) &&
1152 			    !(IFCAP_TXCSUM & ifp->if_capenable)) {
1153 				if_printf(ifp, "enable txcsum first.\n");
1154 				rc = EAGAIN;
1155 				goto fail;
1156 			}
1157 			ifp->if_capenable ^= IFCAP_TSO4;
1158 		}
1159 		if (mask & IFCAP_TSO6) {
1160 			if (!(IFCAP_TSO6 & ifp->if_capenable) &&
1161 			    !(IFCAP_TXCSUM_IPV6 & ifp->if_capenable)) {
1162 				if_printf(ifp, "enable txcsum6 first.\n");
1163 				rc = EAGAIN;
1164 				goto fail;
1165 			}
1166 			ifp->if_capenable ^= IFCAP_TSO6;
1167 		}
1168 		if (mask & IFCAP_LRO) {
1169 #if defined(INET) || defined(INET6)
1170 			int i;
1171 			struct sge_rxq *rxq;
1172 
1173 			ifp->if_capenable ^= IFCAP_LRO;
1174 			for_each_rxq(pi, i, rxq) {
1175 				if (ifp->if_capenable & IFCAP_LRO)
1176 					rxq->iq.flags |= IQ_LRO_ENABLED;
1177 				else
1178 					rxq->iq.flags &= ~IQ_LRO_ENABLED;
1179 			}
1180 #endif
1181 		}
1182 #ifdef TCP_OFFLOAD
1183 		if (mask & IFCAP_TOE) {
1184 			int enable = (ifp->if_capenable ^ mask) & IFCAP_TOE;
1185 
1186 			rc = toe_capability(pi, enable);
1187 			if (rc != 0)
1188 				goto fail;
1189 
1190 			ifp->if_capenable ^= mask;
1191 		}
1192 #endif
1193 		if (mask & IFCAP_VLAN_HWTAGGING) {
1194 			ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING;
1195 			if (ifp->if_drv_flags & IFF_DRV_RUNNING)
1196 				rc = update_mac_settings(pi, XGMAC_VLANEX);
1197 		}
1198 		if (mask & IFCAP_VLAN_MTU) {
1199 			ifp->if_capenable ^= IFCAP_VLAN_MTU;
1200 
1201 			/* Need to find out how to disable auto-mtu-inflation */
1202 		}
1203 		if (mask & IFCAP_VLAN_HWTSO)
1204 			ifp->if_capenable ^= IFCAP_VLAN_HWTSO;
1205 		if (mask & IFCAP_VLAN_HWCSUM)
1206 			ifp->if_capenable ^= IFCAP_VLAN_HWCSUM;
1207 
1208 #ifdef VLAN_CAPABILITIES
1209 		VLAN_CAPABILITIES(ifp);
1210 #endif
1211 fail:
1212 		end_synchronized_op(sc, 0);
1213 		break;
1214 
1215 	case SIOCSIFMEDIA:
1216 	case SIOCGIFMEDIA:
1217 		ifmedia_ioctl(ifp, ifr, &pi->media, cmd);
1218 		break;
1219 
1220 	default:
1221 		rc = ether_ioctl(ifp, cmd, data);
1222 	}
1223 
1224 	return (rc);
1225 }
1226 
1227 static int
1228 cxgbe_transmit(struct ifnet *ifp, struct mbuf *m)
1229 {
1230 	struct port_info *pi = ifp->if_softc;
1231 	struct adapter *sc = pi->adapter;
1232 	struct sge_txq *txq = &sc->sge.txq[pi->first_txq];
1233 	struct buf_ring *br;
1234 	int rc;
1235 
1236 	M_ASSERTPKTHDR(m);
1237 
1238 	if (__predict_false(pi->link_cfg.link_ok == 0)) {
1239 		m_freem(m);
1240 		return (ENETDOWN);
1241 	}
1242 
1243 	if (m->m_flags & M_FLOWID)
1244 		txq += (m->m_pkthdr.flowid % pi->ntxq);
1245 	br = txq->br;
1246 
1247 	if (TXQ_TRYLOCK(txq) == 0) {
1248 		struct sge_eq *eq = &txq->eq;
1249 
1250 		/*
1251 		 * It is possible that t4_eth_tx finishes up and releases the
1252 		 * lock between the TRYLOCK above and the drbr_enqueue here.  We
1253 		 * need to make sure that this mbuf doesn't just sit there in
1254 		 * the drbr.
1255 		 */
1256 
1257 		rc = drbr_enqueue(ifp, br, m);
1258 		if (rc == 0 && callout_pending(&eq->tx_callout) == 0 &&
1259 		    !(eq->flags & EQ_DOOMED))
1260 			callout_reset(&eq->tx_callout, 1, t4_tx_callout, eq);
1261 		return (rc);
1262 	}
1263 
1264 	/*
1265 	 * txq->m is the mbuf that is held up due to a temporary shortage of
1266 	 * resources and it should be put on the wire first.  Then what's in
1267 	 * drbr and finally the mbuf that was just passed in to us.
1268 	 *
1269 	 * Return code should indicate the fate of the mbuf that was passed in
1270 	 * this time.
1271 	 */
1272 
1273 	TXQ_LOCK_ASSERT_OWNED(txq);
1274 	if (drbr_needs_enqueue(ifp, br) || txq->m) {
1275 
1276 		/* Queued for transmission. */
1277 
1278 		rc = drbr_enqueue(ifp, br, m);
1279 		m = txq->m ? txq->m : drbr_dequeue(ifp, br);
1280 		(void) t4_eth_tx(ifp, txq, m);
1281 		TXQ_UNLOCK(txq);
1282 		return (rc);
1283 	}
1284 
1285 	/* Direct transmission. */
1286 	rc = t4_eth_tx(ifp, txq, m);
1287 	if (rc != 0 && txq->m)
1288 		rc = 0;	/* held, will be transmitted soon (hopefully) */
1289 
1290 	TXQ_UNLOCK(txq);
1291 	return (rc);
1292 }
1293 
1294 static void
1295 cxgbe_qflush(struct ifnet *ifp)
1296 {
1297 	struct port_info *pi = ifp->if_softc;
1298 	struct sge_txq *txq;
1299 	int i;
1300 	struct mbuf *m;
1301 
1302 	/* queues do not exist if !PORT_INIT_DONE. */
1303 	if (pi->flags & PORT_INIT_DONE) {
1304 		for_each_txq(pi, i, txq) {
1305 			TXQ_LOCK(txq);
1306 			m_freem(txq->m);
1307 			txq->m = NULL;
1308 			while ((m = buf_ring_dequeue_sc(txq->br)) != NULL)
1309 				m_freem(m);
1310 			TXQ_UNLOCK(txq);
1311 		}
1312 	}
1313 	if_qflush(ifp);
1314 }
1315 
1316 static int
1317 cxgbe_media_change(struct ifnet *ifp)
1318 {
1319 	struct port_info *pi = ifp->if_softc;
1320 
1321 	device_printf(pi->dev, "%s unimplemented.\n", __func__);
1322 
1323 	return (EOPNOTSUPP);
1324 }
1325 
1326 static void
1327 cxgbe_media_status(struct ifnet *ifp, struct ifmediareq *ifmr)
1328 {
1329 	struct port_info *pi = ifp->if_softc;
1330 	struct ifmedia_entry *cur = pi->media.ifm_cur;
1331 	int speed = pi->link_cfg.speed;
1332 	int data = (pi->port_type << 8) | pi->mod_type;
1333 
1334 	if (cur->ifm_data != data) {
1335 		build_medialist(pi);
1336 		cur = pi->media.ifm_cur;
1337 	}
1338 
1339 	ifmr->ifm_status = IFM_AVALID;
1340 	if (!pi->link_cfg.link_ok)
1341 		return;
1342 
1343 	ifmr->ifm_status |= IFM_ACTIVE;
1344 
1345 	/* active and current will differ iff current media is autoselect. */
1346 	if (IFM_SUBTYPE(cur->ifm_media) != IFM_AUTO)
1347 		return;
1348 
1349 	ifmr->ifm_active = IFM_ETHER | IFM_FDX;
1350 	if (speed == SPEED_10000)
1351 		ifmr->ifm_active |= IFM_10G_T;
1352 	else if (speed == SPEED_1000)
1353 		ifmr->ifm_active |= IFM_1000_T;
1354 	else if (speed == SPEED_100)
1355 		ifmr->ifm_active |= IFM_100_TX;
1356 	else if (speed == SPEED_10)
1357 		ifmr->ifm_active |= IFM_10_T;
1358 	else
1359 		KASSERT(0, ("%s: link up but speed unknown (%u)", __func__,
1360 			    speed));
1361 }
1362 
1363 void
1364 t4_fatal_err(struct adapter *sc)
1365 {
1366 	t4_set_reg_field(sc, A_SGE_CONTROL, F_GLOBALENABLE, 0);
1367 	t4_intr_disable(sc);
1368 	log(LOG_EMERG, "%s: encountered fatal error, adapter stopped.\n",
1369 	    device_get_nameunit(sc->dev));
1370 }
1371 
1372 static int
1373 map_bars_0_and_4(struct adapter *sc)
1374 {
1375 	sc->regs_rid = PCIR_BAR(0);
1376 	sc->regs_res = bus_alloc_resource_any(sc->dev, SYS_RES_MEMORY,
1377 	    &sc->regs_rid, RF_ACTIVE);
1378 	if (sc->regs_res == NULL) {
1379 		device_printf(sc->dev, "cannot map registers.\n");
1380 		return (ENXIO);
1381 	}
1382 	sc->bt = rman_get_bustag(sc->regs_res);
1383 	sc->bh = rman_get_bushandle(sc->regs_res);
1384 	sc->mmio_len = rman_get_size(sc->regs_res);
1385 	setbit(&sc->doorbells, DOORBELL_KDB);
1386 
1387 	sc->msix_rid = PCIR_BAR(4);
1388 	sc->msix_res = bus_alloc_resource_any(sc->dev, SYS_RES_MEMORY,
1389 	    &sc->msix_rid, RF_ACTIVE);
1390 	if (sc->msix_res == NULL) {
1391 		device_printf(sc->dev, "cannot map MSI-X BAR.\n");
1392 		return (ENXIO);
1393 	}
1394 
1395 	return (0);
1396 }
1397 
1398 static int
1399 map_bar_2(struct adapter *sc)
1400 {
1401 
1402 	/*
1403 	 * T4: only iWARP driver uses the userspace doorbells.  There is no need
1404 	 * to map it if RDMA is disabled.
1405 	 */
1406 	if (is_t4(sc) && sc->rdmacaps == 0)
1407 		return (0);
1408 
1409 	sc->udbs_rid = PCIR_BAR(2);
1410 	sc->udbs_res = bus_alloc_resource_any(sc->dev, SYS_RES_MEMORY,
1411 	    &sc->udbs_rid, RF_ACTIVE);
1412 	if (sc->udbs_res == NULL) {
1413 		device_printf(sc->dev, "cannot map doorbell BAR.\n");
1414 		return (ENXIO);
1415 	}
1416 	sc->udbs_base = rman_get_virtual(sc->udbs_res);
1417 
1418 	if (is_t5(sc)) {
1419 		setbit(&sc->doorbells, DOORBELL_UDB);
1420 #if defined(__i386__) || defined(__amd64__)
1421 		if (t5_write_combine) {
1422 			int rc;
1423 
1424 			/*
1425 			 * Enable write combining on BAR2.  This is the
1426 			 * userspace doorbell BAR and is split into 128B
1427 			 * (UDBS_SEG_SIZE) doorbell regions, each associated
1428 			 * with an egress queue.  The first 64B has the doorbell
1429 			 * and the second 64B can be used to submit a tx work
1430 			 * request with an implicit doorbell.
1431 			 */
1432 
1433 			rc = pmap_change_attr((vm_offset_t)sc->udbs_base,
1434 			    rman_get_size(sc->udbs_res), PAT_WRITE_COMBINING);
1435 			if (rc == 0) {
1436 				clrbit(&sc->doorbells, DOORBELL_UDB);
1437 				setbit(&sc->doorbells, DOORBELL_WCWR);
1438 				setbit(&sc->doorbells, DOORBELL_UDBWC);
1439 			} else {
1440 				device_printf(sc->dev,
1441 				    "couldn't enable write combining: %d\n",
1442 				    rc);
1443 			}
1444 
1445 			t4_write_reg(sc, A_SGE_STAT_CFG,
1446 			    V_STATSOURCE_T5(7) | V_STATMODE(0));
1447 		}
1448 #endif
1449 	}
1450 
1451 	return (0);
1452 }
1453 
1454 static const struct memwin t4_memwin[] = {
1455 	{ MEMWIN0_BASE, MEMWIN0_APERTURE },
1456 	{ MEMWIN1_BASE, MEMWIN1_APERTURE },
1457 	{ MEMWIN2_BASE_T4, MEMWIN2_APERTURE_T4 }
1458 };
1459 
1460 static const struct memwin t5_memwin[] = {
1461 	{ MEMWIN0_BASE, MEMWIN0_APERTURE },
1462 	{ MEMWIN1_BASE, MEMWIN1_APERTURE },
1463 	{ MEMWIN2_BASE_T5, MEMWIN2_APERTURE_T5 },
1464 };
1465 
1466 static void
1467 setup_memwin(struct adapter *sc)
1468 {
1469 	const struct memwin *mw;
1470 	int i, n;
1471 	uint32_t bar0;
1472 
1473 	if (is_t4(sc)) {
1474 		/*
1475 		 * Read low 32b of bar0 indirectly via the hardware backdoor
1476 		 * mechanism.  Works from within PCI passthrough environments
1477 		 * too, where rman_get_start() can return a different value.  We
1478 		 * need to program the T4 memory window decoders with the actual
1479 		 * addresses that will be coming across the PCIe link.
1480 		 */
1481 		bar0 = t4_hw_pci_read_cfg4(sc, PCIR_BAR(0));
1482 		bar0 &= (uint32_t) PCIM_BAR_MEM_BASE;
1483 
1484 		mw = &t4_memwin[0];
1485 		n = nitems(t4_memwin);
1486 	} else {
1487 		/* T5 uses the relative offset inside the PCIe BAR */
1488 		bar0 = 0;
1489 
1490 		mw = &t5_memwin[0];
1491 		n = nitems(t5_memwin);
1492 	}
1493 
1494 	for (i = 0; i < n; i++, mw++) {
1495 		t4_write_reg(sc,
1496 		    PCIE_MEM_ACCESS_REG(A_PCIE_MEM_ACCESS_BASE_WIN, i),
1497 		    (mw->base + bar0) | V_BIR(0) |
1498 		    V_WINDOW(ilog2(mw->aperture) - 10));
1499 	}
1500 
1501 	/* flush */
1502 	t4_read_reg(sc, PCIE_MEM_ACCESS_REG(A_PCIE_MEM_ACCESS_BASE_WIN, 2));
1503 }
1504 
1505 /*
1506  * Verify that the memory range specified by the addr/len pair is valid and lies
1507  * entirely within a single region (EDCx or MCx).
1508  */
1509 static int
1510 validate_mem_range(struct adapter *sc, uint32_t addr, int len)
1511 {
1512 	uint32_t em, addr_len, maddr, mlen;
1513 
1514 	/* Memory can only be accessed in naturally aligned 4 byte units */
1515 	if (addr & 3 || len & 3 || len == 0)
1516 		return (EINVAL);
1517 
1518 	/* Enabled memories */
1519 	em = t4_read_reg(sc, A_MA_TARGET_MEM_ENABLE);
1520 	if (em & F_EDRAM0_ENABLE) {
1521 		addr_len = t4_read_reg(sc, A_MA_EDRAM0_BAR);
1522 		maddr = G_EDRAM0_BASE(addr_len) << 20;
1523 		mlen = G_EDRAM0_SIZE(addr_len) << 20;
1524 		if (mlen > 0 && addr >= maddr && addr < maddr + mlen &&
1525 		    addr + len <= maddr + mlen)
1526 			return (0);
1527 	}
1528 	if (em & F_EDRAM1_ENABLE) {
1529 		addr_len = t4_read_reg(sc, A_MA_EDRAM1_BAR);
1530 		maddr = G_EDRAM1_BASE(addr_len) << 20;
1531 		mlen = G_EDRAM1_SIZE(addr_len) << 20;
1532 		if (mlen > 0 && addr >= maddr && addr < maddr + mlen &&
1533 		    addr + len <= maddr + mlen)
1534 			return (0);
1535 	}
1536 	if (em & F_EXT_MEM_ENABLE) {
1537 		addr_len = t4_read_reg(sc, A_MA_EXT_MEMORY_BAR);
1538 		maddr = G_EXT_MEM_BASE(addr_len) << 20;
1539 		mlen = G_EXT_MEM_SIZE(addr_len) << 20;
1540 		if (mlen > 0 && addr >= maddr && addr < maddr + mlen &&
1541 		    addr + len <= maddr + mlen)
1542 			return (0);
1543 	}
1544 	if (!is_t4(sc) && em & F_EXT_MEM1_ENABLE) {
1545 		addr_len = t4_read_reg(sc, A_MA_EXT_MEMORY1_BAR);
1546 		maddr = G_EXT_MEM1_BASE(addr_len) << 20;
1547 		mlen = G_EXT_MEM1_SIZE(addr_len) << 20;
1548 		if (mlen > 0 && addr >= maddr && addr < maddr + mlen &&
1549 		    addr + len <= maddr + mlen)
1550 			return (0);
1551 	}
1552 
1553 	return (EFAULT);
1554 }
1555 
1556 /*
1557  * Verify that the memory range specified by the memtype/offset/len pair is
1558  * valid and lies entirely within the memtype specified.  The global address of
1559  * the start of the range is returned in addr.
1560  */
1561 static int
1562 validate_mt_off_len(struct adapter *sc, int mtype, uint32_t off, int len,
1563     uint32_t *addr)
1564 {
1565 	uint32_t em, addr_len, maddr, mlen;
1566 
1567 	/* Memory can only be accessed in naturally aligned 4 byte units */
1568 	if (off & 3 || len & 3 || len == 0)
1569 		return (EINVAL);
1570 
1571 	em = t4_read_reg(sc, A_MA_TARGET_MEM_ENABLE);
1572 	switch (mtype) {
1573 	case MEM_EDC0:
1574 		if (!(em & F_EDRAM0_ENABLE))
1575 			return (EINVAL);
1576 		addr_len = t4_read_reg(sc, A_MA_EDRAM0_BAR);
1577 		maddr = G_EDRAM0_BASE(addr_len) << 20;
1578 		mlen = G_EDRAM0_SIZE(addr_len) << 20;
1579 		break;
1580 	case MEM_EDC1:
1581 		if (!(em & F_EDRAM1_ENABLE))
1582 			return (EINVAL);
1583 		addr_len = t4_read_reg(sc, A_MA_EDRAM1_BAR);
1584 		maddr = G_EDRAM1_BASE(addr_len) << 20;
1585 		mlen = G_EDRAM1_SIZE(addr_len) << 20;
1586 		break;
1587 	case MEM_MC:
1588 		if (!(em & F_EXT_MEM_ENABLE))
1589 			return (EINVAL);
1590 		addr_len = t4_read_reg(sc, A_MA_EXT_MEMORY_BAR);
1591 		maddr = G_EXT_MEM_BASE(addr_len) << 20;
1592 		mlen = G_EXT_MEM_SIZE(addr_len) << 20;
1593 		break;
1594 	case MEM_MC1:
1595 		if (is_t4(sc) || !(em & F_EXT_MEM1_ENABLE))
1596 			return (EINVAL);
1597 		addr_len = t4_read_reg(sc, A_MA_EXT_MEMORY1_BAR);
1598 		maddr = G_EXT_MEM1_BASE(addr_len) << 20;
1599 		mlen = G_EXT_MEM1_SIZE(addr_len) << 20;
1600 		break;
1601 	default:
1602 		return (EINVAL);
1603 	}
1604 
1605 	if (mlen > 0 && off < mlen && off + len <= mlen) {
1606 		*addr = maddr + off;	/* global address */
1607 		return (0);
1608 	}
1609 
1610 	return (EFAULT);
1611 }
1612 
1613 static void
1614 memwin_info(struct adapter *sc, int win, uint32_t *base, uint32_t *aperture)
1615 {
1616 	const struct memwin *mw;
1617 
1618 	if (is_t4(sc)) {
1619 		KASSERT(win >= 0 && win < nitems(t4_memwin),
1620 		    ("%s: incorrect memwin# (%d)", __func__, win));
1621 		mw = &t4_memwin[win];
1622 	} else {
1623 		KASSERT(win >= 0 && win < nitems(t5_memwin),
1624 		    ("%s: incorrect memwin# (%d)", __func__, win));
1625 		mw = &t5_memwin[win];
1626 	}
1627 
1628 	if (base != NULL)
1629 		*base = mw->base;
1630 	if (aperture != NULL)
1631 		*aperture = mw->aperture;
1632 }
1633 
1634 /*
1635  * Positions the memory window such that it can be used to access the specified
1636  * address in the chip's address space.  The return value is the offset of addr
1637  * from the start of the window.
1638  */
1639 static uint32_t
1640 position_memwin(struct adapter *sc, int n, uint32_t addr)
1641 {
1642 	uint32_t start, pf;
1643 	uint32_t reg;
1644 
1645 	KASSERT(n >= 0 && n <= 3,
1646 	    ("%s: invalid window %d.", __func__, n));
1647 	KASSERT((addr & 3) == 0,
1648 	    ("%s: addr (0x%x) is not at a 4B boundary.", __func__, addr));
1649 
1650 	if (is_t4(sc)) {
1651 		pf = 0;
1652 		start = addr & ~0xf;	/* start must be 16B aligned */
1653 	} else {
1654 		pf = V_PFNUM(sc->pf);
1655 		start = addr & ~0x7f;	/* start must be 128B aligned */
1656 	}
1657 	reg = PCIE_MEM_ACCESS_REG(A_PCIE_MEM_ACCESS_OFFSET, n);
1658 
1659 	t4_write_reg(sc, reg, start | pf);
1660 	t4_read_reg(sc, reg);
1661 
1662 	return (addr - start);
1663 }
1664 
1665 static int
1666 cfg_itype_and_nqueues(struct adapter *sc, int n10g, int n1g,
1667     struct intrs_and_queues *iaq)
1668 {
1669 	int rc, itype, navail, nrxq10g, nrxq1g, n;
1670 	int nofldrxq10g = 0, nofldrxq1g = 0;
1671 
1672 	bzero(iaq, sizeof(*iaq));
1673 
1674 	iaq->ntxq10g = t4_ntxq10g;
1675 	iaq->ntxq1g = t4_ntxq1g;
1676 	iaq->nrxq10g = nrxq10g = t4_nrxq10g;
1677 	iaq->nrxq1g = nrxq1g = t4_nrxq1g;
1678 #ifdef TCP_OFFLOAD
1679 	if (is_offload(sc)) {
1680 		iaq->nofldtxq10g = t4_nofldtxq10g;
1681 		iaq->nofldtxq1g = t4_nofldtxq1g;
1682 		iaq->nofldrxq10g = nofldrxq10g = t4_nofldrxq10g;
1683 		iaq->nofldrxq1g = nofldrxq1g = t4_nofldrxq1g;
1684 	}
1685 #endif
1686 
1687 	for (itype = INTR_MSIX; itype; itype >>= 1) {
1688 
1689 		if ((itype & t4_intr_types) == 0)
1690 			continue;	/* not allowed */
1691 
1692 		if (itype == INTR_MSIX)
1693 			navail = pci_msix_count(sc->dev);
1694 		else if (itype == INTR_MSI)
1695 			navail = pci_msi_count(sc->dev);
1696 		else
1697 			navail = 1;
1698 restart:
1699 		if (navail == 0)
1700 			continue;
1701 
1702 		iaq->intr_type = itype;
1703 		iaq->intr_flags = 0;
1704 
1705 		/*
1706 		 * Best option: an interrupt vector for errors, one for the
1707 		 * firmware event queue, and one each for each rxq (NIC as well
1708 		 * as offload).
1709 		 */
1710 		iaq->nirq = T4_EXTRA_INTR;
1711 		iaq->nirq += n10g * (nrxq10g + nofldrxq10g);
1712 		iaq->nirq += n1g * (nrxq1g + nofldrxq1g);
1713 		if (iaq->nirq <= navail &&
1714 		    (itype != INTR_MSI || powerof2(iaq->nirq))) {
1715 			iaq->intr_flags |= INTR_DIRECT;
1716 			goto allocate;
1717 		}
1718 
1719 		/*
1720 		 * Second best option: an interrupt vector for errors, one for
1721 		 * the firmware event queue, and one each for either NIC or
1722 		 * offload rxq's.
1723 		 */
1724 		iaq->nirq = T4_EXTRA_INTR;
1725 		iaq->nirq += n10g * max(nrxq10g, nofldrxq10g);
1726 		iaq->nirq += n1g * max(nrxq1g, nofldrxq1g);
1727 		if (iaq->nirq <= navail &&
1728 		    (itype != INTR_MSI || powerof2(iaq->nirq)))
1729 			goto allocate;
1730 
1731 		/*
1732 		 * Next best option: an interrupt vector for errors, one for the
1733 		 * firmware event queue, and at least one per port.  At this
1734 		 * point we know we'll have to downsize nrxq or nofldrxq to fit
1735 		 * what's available to us.
1736 		 */
1737 		iaq->nirq = T4_EXTRA_INTR;
1738 		iaq->nirq += n10g + n1g;
1739 		if (iaq->nirq <= navail) {
1740 			int leftover = navail - iaq->nirq;
1741 
1742 			if (n10g > 0) {
1743 				int target = max(nrxq10g, nofldrxq10g);
1744 
1745 				n = 1;
1746 				while (n < target && leftover >= n10g) {
1747 					leftover -= n10g;
1748 					iaq->nirq += n10g;
1749 					n++;
1750 				}
1751 				iaq->nrxq10g = min(n, nrxq10g);
1752 #ifdef TCP_OFFLOAD
1753 				if (is_offload(sc))
1754 					iaq->nofldrxq10g = min(n, nofldrxq10g);
1755 #endif
1756 			}
1757 
1758 			if (n1g > 0) {
1759 				int target = max(nrxq1g, nofldrxq1g);
1760 
1761 				n = 1;
1762 				while (n < target && leftover >= n1g) {
1763 					leftover -= n1g;
1764 					iaq->nirq += n1g;
1765 					n++;
1766 				}
1767 				iaq->nrxq1g = min(n, nrxq1g);
1768 #ifdef TCP_OFFLOAD
1769 				if (is_offload(sc))
1770 					iaq->nofldrxq1g = min(n, nofldrxq1g);
1771 #endif
1772 			}
1773 
1774 			if (itype != INTR_MSI || powerof2(iaq->nirq))
1775 				goto allocate;
1776 		}
1777 
1778 		/*
1779 		 * Least desirable option: one interrupt vector for everything.
1780 		 */
1781 		iaq->nirq = iaq->nrxq10g = iaq->nrxq1g = 1;
1782 #ifdef TCP_OFFLOAD
1783 		if (is_offload(sc))
1784 			iaq->nofldrxq10g = iaq->nofldrxq1g = 1;
1785 #endif
1786 
1787 allocate:
1788 		navail = iaq->nirq;
1789 		rc = 0;
1790 		if (itype == INTR_MSIX)
1791 			rc = pci_alloc_msix(sc->dev, &navail);
1792 		else if (itype == INTR_MSI)
1793 			rc = pci_alloc_msi(sc->dev, &navail);
1794 
1795 		if (rc == 0) {
1796 			if (navail == iaq->nirq)
1797 				return (0);
1798 
1799 			/*
1800 			 * Didn't get the number requested.  Use whatever number
1801 			 * the kernel is willing to allocate (it's in navail).
1802 			 */
1803 			device_printf(sc->dev, "fewer vectors than requested, "
1804 			    "type=%d, req=%d, rcvd=%d; will downshift req.\n",
1805 			    itype, iaq->nirq, navail);
1806 			pci_release_msi(sc->dev);
1807 			goto restart;
1808 		}
1809 
1810 		device_printf(sc->dev,
1811 		    "failed to allocate vectors:%d, type=%d, req=%d, rcvd=%d\n",
1812 		    itype, rc, iaq->nirq, navail);
1813 	}
1814 
1815 	device_printf(sc->dev,
1816 	    "failed to find a usable interrupt type.  "
1817 	    "allowed=%d, msi-x=%d, msi=%d, intx=1", t4_intr_types,
1818 	    pci_msix_count(sc->dev), pci_msi_count(sc->dev));
1819 
1820 	return (ENXIO);
1821 }
1822 
1823 #define FW_VERSION(chip) ( \
1824     V_FW_HDR_FW_VER_MAJOR(FW_VERSION_MAJOR_##chip) | \
1825     V_FW_HDR_FW_VER_MINOR(FW_VERSION_MINOR_##chip) | \
1826     V_FW_HDR_FW_VER_MICRO(FW_VERSION_MICRO_##chip) | \
1827     V_FW_HDR_FW_VER_BUILD(FW_VERSION_BUILD_##chip))
1828 #define FW_INTFVER(chip, intf) (FW_HDR_INTFVER_##intf)
1829 
1830 struct fw_info {
1831 	uint8_t chip;
1832 	char *kld_name;
1833 	char *fw_mod_name;
1834 	struct fw_hdr fw_hdr;	/* XXX: waste of space, need a sparse struct */
1835 } fw_info[] = {
1836 	{
1837 		.chip = CHELSIO_T4,
1838 		.kld_name = "t4fw_cfg",
1839 		.fw_mod_name = "t4fw",
1840 		.fw_hdr = {
1841 			.chip = FW_HDR_CHIP_T4,
1842 			.fw_ver = htobe32_const(FW_VERSION(T4)),
1843 			.intfver_nic = FW_INTFVER(T4, NIC),
1844 			.intfver_vnic = FW_INTFVER(T4, VNIC),
1845 			.intfver_ofld = FW_INTFVER(T4, OFLD),
1846 			.intfver_ri = FW_INTFVER(T4, RI),
1847 			.intfver_iscsipdu = FW_INTFVER(T4, ISCSIPDU),
1848 			.intfver_iscsi = FW_INTFVER(T4, ISCSI),
1849 			.intfver_fcoepdu = FW_INTFVER(T4, FCOEPDU),
1850 			.intfver_fcoe = FW_INTFVER(T4, FCOE),
1851 		},
1852 	}, {
1853 		.chip = CHELSIO_T5,
1854 		.kld_name = "t5fw_cfg",
1855 		.fw_mod_name = "t5fw",
1856 		.fw_hdr = {
1857 			.chip = FW_HDR_CHIP_T5,
1858 			.fw_ver = htobe32_const(FW_VERSION(T5)),
1859 			.intfver_nic = FW_INTFVER(T5, NIC),
1860 			.intfver_vnic = FW_INTFVER(T5, VNIC),
1861 			.intfver_ofld = FW_INTFVER(T5, OFLD),
1862 			.intfver_ri = FW_INTFVER(T5, RI),
1863 			.intfver_iscsipdu = FW_INTFVER(T5, ISCSIPDU),
1864 			.intfver_iscsi = FW_INTFVER(T5, ISCSI),
1865 			.intfver_fcoepdu = FW_INTFVER(T5, FCOEPDU),
1866 			.intfver_fcoe = FW_INTFVER(T5, FCOE),
1867 		},
1868 	}
1869 };
1870 
1871 static struct fw_info *
1872 find_fw_info(int chip)
1873 {
1874 	int i;
1875 
1876 	for (i = 0; i < nitems(fw_info); i++) {
1877 		if (fw_info[i].chip == chip)
1878 			return (&fw_info[i]);
1879 	}
1880 	return (NULL);
1881 }
1882 
1883 /*
1884  * Is the given firmware API compatible with the one the driver was compiled
1885  * with?
1886  */
1887 static int
1888 fw_compatible(const struct fw_hdr *hdr1, const struct fw_hdr *hdr2)
1889 {
1890 
1891 	/* short circuit if it's the exact same firmware version */
1892 	if (hdr1->chip == hdr2->chip && hdr1->fw_ver == hdr2->fw_ver)
1893 		return (1);
1894 
1895 	/*
1896 	 * XXX: Is this too conservative?  Perhaps I should limit this to the
1897 	 * features that are supported in the driver.
1898 	 */
1899 #define SAME_INTF(x) (hdr1->intfver_##x == hdr2->intfver_##x)
1900 	if (hdr1->chip == hdr2->chip && SAME_INTF(nic) && SAME_INTF(vnic) &&
1901 	    SAME_INTF(ofld) && SAME_INTF(ri) && SAME_INTF(iscsipdu) &&
1902 	    SAME_INTF(iscsi) && SAME_INTF(fcoepdu) && SAME_INTF(fcoe))
1903 		return (1);
1904 #undef SAME_INTF
1905 
1906 	return (0);
1907 }
1908 
1909 /*
1910  * The firmware in the KLD is usable and can be installed.  But should it be?
1911  * This routine explains itself in detail if it indicates the KLD firmware
1912  * should be installed.
1913  */
1914 static int
1915 should_install_kld_fw(struct adapter *sc, int card_fw_usable, int k, int c)
1916 {
1917 	const char *reason;
1918 
1919 	KASSERT(t4_fw_install != 0, ("%s: Can't install; shouldn't be asked "
1920 	    "to evaluate if install is a good idea.", __func__));
1921 
1922 	if (!card_fw_usable) {
1923 		reason = "incompatible or unusable";
1924 		goto install;
1925 	}
1926 
1927 	if (k > c) {
1928 		reason = "older than the version bundled with this driver";
1929 		goto install;
1930 	}
1931 
1932 	if (t4_fw_install == 2 && k != c) {
1933 		reason = "different than the version bundled with this driver";
1934 		goto install;
1935 	}
1936 
1937 	return (0);
1938 
1939 install:
1940 	device_printf(sc->dev, "firmware on card (%u.%u.%u.%u) is %s, "
1941 	    "installing firmware %u.%u.%u.%u on card.\n",
1942 	    G_FW_HDR_FW_VER_MAJOR(c), G_FW_HDR_FW_VER_MINOR(c),
1943 	    G_FW_HDR_FW_VER_MICRO(c), G_FW_HDR_FW_VER_BUILD(c), reason,
1944 	    G_FW_HDR_FW_VER_MAJOR(k), G_FW_HDR_FW_VER_MINOR(k),
1945 	    G_FW_HDR_FW_VER_MICRO(k), G_FW_HDR_FW_VER_BUILD(k));
1946 
1947 	return (1);
1948 }
1949 /*
1950  * Establish contact with the firmware and determine if we are the master driver
1951  * or not, and whether we are responsible for chip initialization.
1952  */
1953 static int
1954 prep_firmware(struct adapter *sc)
1955 {
1956 	const struct firmware *fw = NULL, *default_cfg;
1957 	int rc, pf, card_fw_usable, kld_fw_usable, need_fw_reset = 1;
1958 	enum dev_state state;
1959 	struct fw_info *fw_info;
1960 	struct fw_hdr *card_fw;		/* fw on the card */
1961 	const struct fw_hdr *kld_fw;	/* fw in the KLD */
1962 	const struct fw_hdr *drv_fw;	/* fw header the driver was compiled
1963 					   against */
1964 
1965 	/* Contact firmware. */
1966 	rc = t4_fw_hello(sc, sc->mbox, sc->mbox, MASTER_MAY, &state);
1967 	if (rc < 0 || state == DEV_STATE_ERR) {
1968 		rc = -rc;
1969 		device_printf(sc->dev,
1970 		    "failed to connect to the firmware: %d, %d.\n", rc, state);
1971 		return (rc);
1972 	}
1973 	pf = rc;
1974 	if (pf == sc->mbox)
1975 		sc->flags |= MASTER_PF;
1976 	else if (state == DEV_STATE_UNINIT) {
1977 		/*
1978 		 * We didn't get to be the master so we definitely won't be
1979 		 * configuring the chip.  It's a bug if someone else hasn't
1980 		 * configured it already.
1981 		 */
1982 		device_printf(sc->dev, "couldn't be master(%d), "
1983 		    "device not already initialized either(%d).\n", rc, state);
1984 		return (EDOOFUS);
1985 	}
1986 
1987 	/* This is the firmware whose headers the driver was compiled against */
1988 	fw_info = find_fw_info(chip_id(sc));
1989 	if (fw_info == NULL) {
1990 		device_printf(sc->dev,
1991 		    "unable to look up firmware information for chip %d.\n",
1992 		    chip_id(sc));
1993 		return (EINVAL);
1994 	}
1995 	drv_fw = &fw_info->fw_hdr;
1996 
1997 	/*
1998 	 * The firmware KLD contains many modules.  The KLD name is also the
1999 	 * name of the module that contains the default config file.
2000 	 */
2001 	default_cfg = firmware_get(fw_info->kld_name);
2002 
2003 	/* Read the header of the firmware on the card */
2004 	card_fw = malloc(sizeof(*card_fw), M_CXGBE, M_ZERO | M_WAITOK);
2005 	rc = -t4_read_flash(sc, FLASH_FW_START,
2006 	    sizeof (*card_fw) / sizeof (uint32_t), (uint32_t *)card_fw, 1);
2007 	if (rc == 0)
2008 		card_fw_usable = fw_compatible(drv_fw, (const void*)card_fw);
2009 	else {
2010 		device_printf(sc->dev,
2011 		    "Unable to read card's firmware header: %d\n", rc);
2012 		card_fw_usable = 0;
2013 	}
2014 
2015 	/* This is the firmware in the KLD */
2016 	fw = firmware_get(fw_info->fw_mod_name);
2017 	if (fw != NULL) {
2018 		kld_fw = (const void *)fw->data;
2019 		kld_fw_usable = fw_compatible(drv_fw, kld_fw);
2020 	} else {
2021 		kld_fw = NULL;
2022 		kld_fw_usable = 0;
2023 	}
2024 
2025 	if (card_fw_usable && card_fw->fw_ver == drv_fw->fw_ver &&
2026 	    (!kld_fw_usable || kld_fw->fw_ver == drv_fw->fw_ver ||
2027 	    t4_fw_install == 0)) {
2028 		/*
2029 		 * Common case: the firmware on the card is an exact match and
2030 		 * the KLD is an exact match too, or the KLD is
2031 		 * absent/incompatible, or we're prohibited from using it.  Note
2032 		 * that t4_fw_install = 2 is ignored here -- use cxgbetool
2033 		 * loadfw if you want to reinstall the same firmware as the one
2034 		 * on the card.
2035 		 */
2036 	} else if (kld_fw_usable && state == DEV_STATE_UNINIT &&
2037 	    should_install_kld_fw(sc, card_fw_usable, be32toh(kld_fw->fw_ver),
2038 	    be32toh(card_fw->fw_ver))) {
2039 
2040 		rc = -t4_fw_upgrade(sc, sc->mbox, fw->data, fw->datasize, 0);
2041 		if (rc != 0) {
2042 			device_printf(sc->dev,
2043 			    "failed to install firmware: %d\n", rc);
2044 			goto done;
2045 		}
2046 
2047 		/* Installed successfully, update the cached header too. */
2048 		memcpy(card_fw, kld_fw, sizeof(*card_fw));
2049 		card_fw_usable = 1;
2050 		need_fw_reset = 0;	/* already reset as part of load_fw */
2051 	}
2052 
2053 	if (!card_fw_usable) {
2054 		uint32_t d, c, k;
2055 
2056 		d = ntohl(drv_fw->fw_ver);
2057 		c = ntohl(card_fw->fw_ver);
2058 		k = kld_fw ? ntohl(kld_fw->fw_ver) : 0;
2059 
2060 		device_printf(sc->dev, "Cannot find a usable firmware: "
2061 		    "fw_install %d, chip state %d, "
2062 		    "driver compiled with %d.%d.%d.%d, "
2063 		    "card has %d.%d.%d.%d, KLD has %d.%d.%d.%d\n",
2064 		    t4_fw_install, state,
2065 		    G_FW_HDR_FW_VER_MAJOR(d), G_FW_HDR_FW_VER_MINOR(d),
2066 		    G_FW_HDR_FW_VER_MICRO(d), G_FW_HDR_FW_VER_BUILD(d),
2067 		    G_FW_HDR_FW_VER_MAJOR(c), G_FW_HDR_FW_VER_MINOR(c),
2068 		    G_FW_HDR_FW_VER_MICRO(c), G_FW_HDR_FW_VER_BUILD(c),
2069 		    G_FW_HDR_FW_VER_MAJOR(k), G_FW_HDR_FW_VER_MINOR(k),
2070 		    G_FW_HDR_FW_VER_MICRO(k), G_FW_HDR_FW_VER_BUILD(k));
2071 		rc = EINVAL;
2072 		goto done;
2073 	}
2074 
2075 	/* We're using whatever's on the card and it's known to be good. */
2076 	sc->params.fw_vers = ntohl(card_fw->fw_ver);
2077 	snprintf(sc->fw_version, sizeof(sc->fw_version), "%u.%u.%u.%u",
2078 	    G_FW_HDR_FW_VER_MAJOR(sc->params.fw_vers),
2079 	    G_FW_HDR_FW_VER_MINOR(sc->params.fw_vers),
2080 	    G_FW_HDR_FW_VER_MICRO(sc->params.fw_vers),
2081 	    G_FW_HDR_FW_VER_BUILD(sc->params.fw_vers));
2082 
2083 	/* Reset device */
2084 	if (need_fw_reset &&
2085 	    (rc = -t4_fw_reset(sc, sc->mbox, F_PIORSTMODE | F_PIORST)) != 0) {
2086 		device_printf(sc->dev, "firmware reset failed: %d.\n", rc);
2087 		if (rc != ETIMEDOUT && rc != EIO)
2088 			t4_fw_bye(sc, sc->mbox);
2089 		goto done;
2090 	}
2091 	sc->flags |= FW_OK;
2092 
2093 	rc = get_params__pre_init(sc);
2094 	if (rc != 0)
2095 		goto done; /* error message displayed already */
2096 
2097 	/* Partition adapter resources as specified in the config file. */
2098 	if (state == DEV_STATE_UNINIT) {
2099 
2100 		KASSERT(sc->flags & MASTER_PF,
2101 		    ("%s: trying to change chip settings when not master.",
2102 		    __func__));
2103 
2104 		rc = partition_resources(sc, default_cfg, fw_info->kld_name);
2105 		if (rc != 0)
2106 			goto done;	/* error message displayed already */
2107 
2108 		t4_tweak_chip_settings(sc);
2109 
2110 		/* get basic stuff going */
2111 		rc = -t4_fw_initialize(sc, sc->mbox);
2112 		if (rc != 0) {
2113 			device_printf(sc->dev, "fw init failed: %d.\n", rc);
2114 			goto done;
2115 		}
2116 	} else {
2117 		snprintf(sc->cfg_file, sizeof(sc->cfg_file), "pf%d", pf);
2118 		sc->cfcsum = 0;
2119 	}
2120 
2121 done:
2122 	free(card_fw, M_CXGBE);
2123 	if (fw != NULL)
2124 		firmware_put(fw, FIRMWARE_UNLOAD);
2125 	if (default_cfg != NULL)
2126 		firmware_put(default_cfg, FIRMWARE_UNLOAD);
2127 
2128 	return (rc);
2129 }
2130 
2131 #define FW_PARAM_DEV(param) \
2132 	(V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) | \
2133 	 V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_##param))
2134 #define FW_PARAM_PFVF(param) \
2135 	(V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_PFVF) | \
2136 	 V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_PFVF_##param))
2137 
2138 /*
2139  * Partition chip resources for use between various PFs, VFs, etc.
2140  */
2141 static int
2142 partition_resources(struct adapter *sc, const struct firmware *default_cfg,
2143     const char *name_prefix)
2144 {
2145 	const struct firmware *cfg = NULL;
2146 	int rc = 0;
2147 	struct fw_caps_config_cmd caps;
2148 	uint32_t mtype, moff, finicsum, cfcsum;
2149 
2150 	/*
2151 	 * Figure out what configuration file to use.  Pick the default config
2152 	 * file for the card if the user hasn't specified one explicitly.
2153 	 */
2154 	snprintf(sc->cfg_file, sizeof(sc->cfg_file), "%s", t4_cfg_file);
2155 	if (strncmp(t4_cfg_file, DEFAULT_CF, sizeof(t4_cfg_file)) == 0) {
2156 		/* Card specific overrides go here. */
2157 		if (pci_get_device(sc->dev) == 0x440a)
2158 			snprintf(sc->cfg_file, sizeof(sc->cfg_file), UWIRE_CF);
2159 		if (is_fpga(sc))
2160 			snprintf(sc->cfg_file, sizeof(sc->cfg_file), FPGA_CF);
2161 	}
2162 
2163 	/*
2164 	 * We need to load another module if the profile is anything except
2165 	 * "default" or "flash".
2166 	 */
2167 	if (strncmp(sc->cfg_file, DEFAULT_CF, sizeof(sc->cfg_file)) != 0 &&
2168 	    strncmp(sc->cfg_file, FLASH_CF, sizeof(sc->cfg_file)) != 0) {
2169 		char s[32];
2170 
2171 		snprintf(s, sizeof(s), "%s_%s", name_prefix, sc->cfg_file);
2172 		cfg = firmware_get(s);
2173 		if (cfg == NULL) {
2174 			if (default_cfg != NULL) {
2175 				device_printf(sc->dev,
2176 				    "unable to load module \"%s\" for "
2177 				    "configuration profile \"%s\", will use "
2178 				    "the default config file instead.\n",
2179 				    s, sc->cfg_file);
2180 				snprintf(sc->cfg_file, sizeof(sc->cfg_file),
2181 				    "%s", DEFAULT_CF);
2182 			} else {
2183 				device_printf(sc->dev,
2184 				    "unable to load module \"%s\" for "
2185 				    "configuration profile \"%s\", will use "
2186 				    "the config file on the card's flash "
2187 				    "instead.\n", s, sc->cfg_file);
2188 				snprintf(sc->cfg_file, sizeof(sc->cfg_file),
2189 				    "%s", FLASH_CF);
2190 			}
2191 		}
2192 	}
2193 
2194 	if (strncmp(sc->cfg_file, DEFAULT_CF, sizeof(sc->cfg_file)) == 0 &&
2195 	    default_cfg == NULL) {
2196 		device_printf(sc->dev,
2197 		    "default config file not available, will use the config "
2198 		    "file on the card's flash instead.\n");
2199 		snprintf(sc->cfg_file, sizeof(sc->cfg_file), "%s", FLASH_CF);
2200 	}
2201 
2202 	if (strncmp(sc->cfg_file, FLASH_CF, sizeof(sc->cfg_file)) != 0) {
2203 		u_int cflen, i, n;
2204 		const uint32_t *cfdata;
2205 		uint32_t param, val, addr, off, mw_base, mw_aperture;
2206 
2207 		KASSERT(cfg != NULL || default_cfg != NULL,
2208 		    ("%s: no config to upload", __func__));
2209 
2210 		/*
2211 		 * Ask the firmware where it wants us to upload the config file.
2212 		 */
2213 		param = FW_PARAM_DEV(CF);
2214 		rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 1, &param, &val);
2215 		if (rc != 0) {
2216 			/* No support for config file?  Shouldn't happen. */
2217 			device_printf(sc->dev,
2218 			    "failed to query config file location: %d.\n", rc);
2219 			goto done;
2220 		}
2221 		mtype = G_FW_PARAMS_PARAM_Y(val);
2222 		moff = G_FW_PARAMS_PARAM_Z(val) << 16;
2223 
2224 		/*
2225 		 * XXX: sheer laziness.  We deliberately added 4 bytes of
2226 		 * useless stuffing/comments at the end of the config file so
2227 		 * it's ok to simply throw away the last remaining bytes when
2228 		 * the config file is not an exact multiple of 4.  This also
2229 		 * helps with the validate_mt_off_len check.
2230 		 */
2231 		if (cfg != NULL) {
2232 			cflen = cfg->datasize & ~3;
2233 			cfdata = cfg->data;
2234 		} else {
2235 			cflen = default_cfg->datasize & ~3;
2236 			cfdata = default_cfg->data;
2237 		}
2238 
2239 		if (cflen > FLASH_CFG_MAX_SIZE) {
2240 			device_printf(sc->dev,
2241 			    "config file too long (%d, max allowed is %d).  "
2242 			    "Will try to use the config on the card, if any.\n",
2243 			    cflen, FLASH_CFG_MAX_SIZE);
2244 			goto use_config_on_flash;
2245 		}
2246 
2247 		rc = validate_mt_off_len(sc, mtype, moff, cflen, &addr);
2248 		if (rc != 0) {
2249 			device_printf(sc->dev,
2250 			    "%s: addr (%d/0x%x) or len %d is not valid: %d.  "
2251 			    "Will try to use the config on the card, if any.\n",
2252 			    __func__, mtype, moff, cflen, rc);
2253 			goto use_config_on_flash;
2254 		}
2255 
2256 		memwin_info(sc, 2, &mw_base, &mw_aperture);
2257 		while (cflen) {
2258 			off = position_memwin(sc, 2, addr);
2259 			n = min(cflen, mw_aperture - off);
2260 			for (i = 0; i < n; i += 4)
2261 				t4_write_reg(sc, mw_base + off + i, *cfdata++);
2262 			cflen -= n;
2263 			addr += n;
2264 		}
2265 	} else {
2266 use_config_on_flash:
2267 		mtype = FW_MEMTYPE_CF_FLASH;
2268 		moff = t4_flash_cfg_addr(sc);
2269 	}
2270 
2271 	bzero(&caps, sizeof(caps));
2272 	caps.op_to_write = htobe32(V_FW_CMD_OP(FW_CAPS_CONFIG_CMD) |
2273 	    F_FW_CMD_REQUEST | F_FW_CMD_READ);
2274 	caps.cfvalid_to_len16 = htobe32(F_FW_CAPS_CONFIG_CMD_CFVALID |
2275 	    V_FW_CAPS_CONFIG_CMD_MEMTYPE_CF(mtype) |
2276 	    V_FW_CAPS_CONFIG_CMD_MEMADDR64K_CF(moff >> 16) | FW_LEN16(caps));
2277 	rc = -t4_wr_mbox(sc, sc->mbox, &caps, sizeof(caps), &caps);
2278 	if (rc != 0) {
2279 		device_printf(sc->dev,
2280 		    "failed to pre-process config file: %d "
2281 		    "(mtype %d, moff 0x%x).\n", rc, mtype, moff);
2282 		goto done;
2283 	}
2284 
2285 	finicsum = be32toh(caps.finicsum);
2286 	cfcsum = be32toh(caps.cfcsum);
2287 	if (finicsum != cfcsum) {
2288 		device_printf(sc->dev,
2289 		    "WARNING: config file checksum mismatch: %08x %08x\n",
2290 		    finicsum, cfcsum);
2291 	}
2292 	sc->cfcsum = cfcsum;
2293 
2294 #define LIMIT_CAPS(x) do { \
2295 	caps.x &= htobe16(t4_##x##_allowed); \
2296 	sc->x = htobe16(caps.x); \
2297 } while (0)
2298 
2299 	/*
2300 	 * Let the firmware know what features will (not) be used so it can tune
2301 	 * things accordingly.
2302 	 */
2303 	LIMIT_CAPS(linkcaps);
2304 	LIMIT_CAPS(niccaps);
2305 	LIMIT_CAPS(toecaps);
2306 	LIMIT_CAPS(rdmacaps);
2307 	LIMIT_CAPS(iscsicaps);
2308 	LIMIT_CAPS(fcoecaps);
2309 #undef LIMIT_CAPS
2310 
2311 	caps.op_to_write = htobe32(V_FW_CMD_OP(FW_CAPS_CONFIG_CMD) |
2312 	    F_FW_CMD_REQUEST | F_FW_CMD_WRITE);
2313 	caps.cfvalid_to_len16 = htobe32(FW_LEN16(caps));
2314 	rc = -t4_wr_mbox(sc, sc->mbox, &caps, sizeof(caps), NULL);
2315 	if (rc != 0) {
2316 		device_printf(sc->dev,
2317 		    "failed to process config file: %d.\n", rc);
2318 	}
2319 done:
2320 	if (cfg != NULL)
2321 		firmware_put(cfg, FIRMWARE_UNLOAD);
2322 	return (rc);
2323 }
2324 
2325 /*
2326  * Retrieve parameters that are needed (or nice to have) very early.
2327  */
2328 static int
2329 get_params__pre_init(struct adapter *sc)
2330 {
2331 	int rc;
2332 	uint32_t param[2], val[2];
2333 	struct fw_devlog_cmd cmd;
2334 	struct devlog_params *dlog = &sc->params.devlog;
2335 
2336 	param[0] = FW_PARAM_DEV(PORTVEC);
2337 	param[1] = FW_PARAM_DEV(CCLK);
2338 	rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 2, param, val);
2339 	if (rc != 0) {
2340 		device_printf(sc->dev,
2341 		    "failed to query parameters (pre_init): %d.\n", rc);
2342 		return (rc);
2343 	}
2344 
2345 	sc->params.portvec = val[0];
2346 	sc->params.nports = bitcount32(val[0]);
2347 	sc->params.vpd.cclk = val[1];
2348 
2349 	/* Read device log parameters. */
2350 	bzero(&cmd, sizeof(cmd));
2351 	cmd.op_to_write = htobe32(V_FW_CMD_OP(FW_DEVLOG_CMD) |
2352 	    F_FW_CMD_REQUEST | F_FW_CMD_READ);
2353 	cmd.retval_len16 = htobe32(FW_LEN16(cmd));
2354 	rc = -t4_wr_mbox(sc, sc->mbox, &cmd, sizeof(cmd), &cmd);
2355 	if (rc != 0) {
2356 		device_printf(sc->dev,
2357 		    "failed to get devlog parameters: %d.\n", rc);
2358 		bzero(dlog, sizeof (*dlog));
2359 		rc = 0;	/* devlog isn't critical for device operation */
2360 	} else {
2361 		val[0] = be32toh(cmd.memtype_devlog_memaddr16_devlog);
2362 		dlog->memtype = G_FW_DEVLOG_CMD_MEMTYPE_DEVLOG(val[0]);
2363 		dlog->start = G_FW_DEVLOG_CMD_MEMADDR16_DEVLOG(val[0]) << 4;
2364 		dlog->size = be32toh(cmd.memsize_devlog);
2365 	}
2366 
2367 	return (rc);
2368 }
2369 
2370 /*
2371  * Retrieve various parameters that are of interest to the driver.  The device
2372  * has been initialized by the firmware at this point.
2373  */
2374 static int
2375 get_params__post_init(struct adapter *sc)
2376 {
2377 	int rc;
2378 	uint32_t param[7], val[7];
2379 	struct fw_caps_config_cmd caps;
2380 
2381 	param[0] = FW_PARAM_PFVF(IQFLINT_START);
2382 	param[1] = FW_PARAM_PFVF(EQ_START);
2383 	param[2] = FW_PARAM_PFVF(FILTER_START);
2384 	param[3] = FW_PARAM_PFVF(FILTER_END);
2385 	param[4] = FW_PARAM_PFVF(L2T_START);
2386 	param[5] = FW_PARAM_PFVF(L2T_END);
2387 	rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 6, param, val);
2388 	if (rc != 0) {
2389 		device_printf(sc->dev,
2390 		    "failed to query parameters (post_init): %d.\n", rc);
2391 		return (rc);
2392 	}
2393 
2394 	sc->sge.iq_start = val[0];
2395 	sc->sge.eq_start = val[1];
2396 	sc->tids.ftid_base = val[2];
2397 	sc->tids.nftids = val[3] - val[2] + 1;
2398 	sc->vres.l2t.start = val[4];
2399 	sc->vres.l2t.size = val[5] - val[4] + 1;
2400 	KASSERT(sc->vres.l2t.size <= L2T_SIZE,
2401 	    ("%s: L2 table size (%u) larger than expected (%u)",
2402 	    __func__, sc->vres.l2t.size, L2T_SIZE));
2403 
2404 	/* get capabilites */
2405 	bzero(&caps, sizeof(caps));
2406 	caps.op_to_write = htobe32(V_FW_CMD_OP(FW_CAPS_CONFIG_CMD) |
2407 	    F_FW_CMD_REQUEST | F_FW_CMD_READ);
2408 	caps.cfvalid_to_len16 = htobe32(FW_LEN16(caps));
2409 	rc = -t4_wr_mbox(sc, sc->mbox, &caps, sizeof(caps), &caps);
2410 	if (rc != 0) {
2411 		device_printf(sc->dev,
2412 		    "failed to get card capabilities: %d.\n", rc);
2413 		return (rc);
2414 	}
2415 
2416 	if (caps.toecaps) {
2417 		/* query offload-related parameters */
2418 		param[0] = FW_PARAM_DEV(NTID);
2419 		param[1] = FW_PARAM_PFVF(SERVER_START);
2420 		param[2] = FW_PARAM_PFVF(SERVER_END);
2421 		param[3] = FW_PARAM_PFVF(TDDP_START);
2422 		param[4] = FW_PARAM_PFVF(TDDP_END);
2423 		param[5] = FW_PARAM_DEV(FLOWC_BUFFIFO_SZ);
2424 		rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 6, param, val);
2425 		if (rc != 0) {
2426 			device_printf(sc->dev,
2427 			    "failed to query TOE parameters: %d.\n", rc);
2428 			return (rc);
2429 		}
2430 		sc->tids.ntids = val[0];
2431 		sc->tids.natids = min(sc->tids.ntids / 2, MAX_ATIDS);
2432 		sc->tids.stid_base = val[1];
2433 		sc->tids.nstids = val[2] - val[1] + 1;
2434 		sc->vres.ddp.start = val[3];
2435 		sc->vres.ddp.size = val[4] - val[3] + 1;
2436 		sc->params.ofldq_wr_cred = val[5];
2437 		sc->params.offload = 1;
2438 	}
2439 	if (caps.rdmacaps) {
2440 		param[0] = FW_PARAM_PFVF(STAG_START);
2441 		param[1] = FW_PARAM_PFVF(STAG_END);
2442 		param[2] = FW_PARAM_PFVF(RQ_START);
2443 		param[3] = FW_PARAM_PFVF(RQ_END);
2444 		param[4] = FW_PARAM_PFVF(PBL_START);
2445 		param[5] = FW_PARAM_PFVF(PBL_END);
2446 		rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 6, param, val);
2447 		if (rc != 0) {
2448 			device_printf(sc->dev,
2449 			    "failed to query RDMA parameters(1): %d.\n", rc);
2450 			return (rc);
2451 		}
2452 		sc->vres.stag.start = val[0];
2453 		sc->vres.stag.size = val[1] - val[0] + 1;
2454 		sc->vres.rq.start = val[2];
2455 		sc->vres.rq.size = val[3] - val[2] + 1;
2456 		sc->vres.pbl.start = val[4];
2457 		sc->vres.pbl.size = val[5] - val[4] + 1;
2458 
2459 		param[0] = FW_PARAM_PFVF(SQRQ_START);
2460 		param[1] = FW_PARAM_PFVF(SQRQ_END);
2461 		param[2] = FW_PARAM_PFVF(CQ_START);
2462 		param[3] = FW_PARAM_PFVF(CQ_END);
2463 		param[4] = FW_PARAM_PFVF(OCQ_START);
2464 		param[5] = FW_PARAM_PFVF(OCQ_END);
2465 		rc = -t4_query_params(sc, 0, 0, 0, 6, param, val);
2466 		if (rc != 0) {
2467 			device_printf(sc->dev,
2468 			    "failed to query RDMA parameters(2): %d.\n", rc);
2469 			return (rc);
2470 		}
2471 		sc->vres.qp.start = val[0];
2472 		sc->vres.qp.size = val[1] - val[0] + 1;
2473 		sc->vres.cq.start = val[2];
2474 		sc->vres.cq.size = val[3] - val[2] + 1;
2475 		sc->vres.ocq.start = val[4];
2476 		sc->vres.ocq.size = val[5] - val[4] + 1;
2477 	}
2478 	if (caps.iscsicaps) {
2479 		param[0] = FW_PARAM_PFVF(ISCSI_START);
2480 		param[1] = FW_PARAM_PFVF(ISCSI_END);
2481 		rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 2, param, val);
2482 		if (rc != 0) {
2483 			device_printf(sc->dev,
2484 			    "failed to query iSCSI parameters: %d.\n", rc);
2485 			return (rc);
2486 		}
2487 		sc->vres.iscsi.start = val[0];
2488 		sc->vres.iscsi.size = val[1] - val[0] + 1;
2489 	}
2490 
2491 	/*
2492 	 * We've got the params we wanted to query via the firmware.  Now grab
2493 	 * some others directly from the chip.
2494 	 */
2495 	rc = t4_read_chip_settings(sc);
2496 
2497 	return (rc);
2498 }
2499 
2500 static int
2501 set_params__post_init(struct adapter *sc)
2502 {
2503 	uint32_t param, val;
2504 
2505 	/* ask for encapsulated CPLs */
2506 	param = FW_PARAM_PFVF(CPLFW4MSG_ENCAP);
2507 	val = 1;
2508 	(void)t4_set_params(sc, sc->mbox, sc->pf, 0, 1, &param, &val);
2509 
2510 	return (0);
2511 }
2512 
2513 #undef FW_PARAM_PFVF
2514 #undef FW_PARAM_DEV
2515 
2516 static void
2517 t4_set_desc(struct adapter *sc)
2518 {
2519 	char buf[128];
2520 	struct adapter_params *p = &sc->params;
2521 
2522 	snprintf(buf, sizeof(buf), "Chelsio %s %sNIC (rev %d), S/N:%s, E/C:%s",
2523 	    p->vpd.id, is_offload(sc) ? "R" : "", chip_rev(sc), p->vpd.sn,
2524 	    p->vpd.ec);
2525 
2526 	device_set_desc_copy(sc->dev, buf);
2527 }
2528 
2529 static void
2530 build_medialist(struct port_info *pi)
2531 {
2532 	struct ifmedia *media = &pi->media;
2533 	int data, m;
2534 
2535 	PORT_LOCK(pi);
2536 
2537 	ifmedia_removeall(media);
2538 
2539 	m = IFM_ETHER | IFM_FDX;
2540 	data = (pi->port_type << 8) | pi->mod_type;
2541 
2542 	switch(pi->port_type) {
2543 	case FW_PORT_TYPE_BT_XFI:
2544 		ifmedia_add(media, m | IFM_10G_T, data, NULL);
2545 		break;
2546 
2547 	case FW_PORT_TYPE_BT_XAUI:
2548 		ifmedia_add(media, m | IFM_10G_T, data, NULL);
2549 		/* fall through */
2550 
2551 	case FW_PORT_TYPE_BT_SGMII:
2552 		ifmedia_add(media, m | IFM_1000_T, data, NULL);
2553 		ifmedia_add(media, m | IFM_100_TX, data, NULL);
2554 		ifmedia_add(media, IFM_ETHER | IFM_AUTO, data, NULL);
2555 		ifmedia_set(media, IFM_ETHER | IFM_AUTO);
2556 		break;
2557 
2558 	case FW_PORT_TYPE_CX4:
2559 		ifmedia_add(media, m | IFM_10G_CX4, data, NULL);
2560 		ifmedia_set(media, m | IFM_10G_CX4);
2561 		break;
2562 
2563 	case FW_PORT_TYPE_SFP:
2564 	case FW_PORT_TYPE_FIBER_XFI:
2565 	case FW_PORT_TYPE_FIBER_XAUI:
2566 		switch (pi->mod_type) {
2567 
2568 		case FW_PORT_MOD_TYPE_LR:
2569 			ifmedia_add(media, m | IFM_10G_LR, data, NULL);
2570 			ifmedia_set(media, m | IFM_10G_LR);
2571 			break;
2572 
2573 		case FW_PORT_MOD_TYPE_SR:
2574 			ifmedia_add(media, m | IFM_10G_SR, data, NULL);
2575 			ifmedia_set(media, m | IFM_10G_SR);
2576 			break;
2577 
2578 		case FW_PORT_MOD_TYPE_LRM:
2579 			ifmedia_add(media, m | IFM_10G_LRM, data, NULL);
2580 			ifmedia_set(media, m | IFM_10G_LRM);
2581 			break;
2582 
2583 		case FW_PORT_MOD_TYPE_TWINAX_PASSIVE:
2584 		case FW_PORT_MOD_TYPE_TWINAX_ACTIVE:
2585 			ifmedia_add(media, m | IFM_10G_TWINAX, data, NULL);
2586 			ifmedia_set(media, m | IFM_10G_TWINAX);
2587 			break;
2588 
2589 		case FW_PORT_MOD_TYPE_NONE:
2590 			m &= ~IFM_FDX;
2591 			ifmedia_add(media, m | IFM_NONE, data, NULL);
2592 			ifmedia_set(media, m | IFM_NONE);
2593 			break;
2594 
2595 		case FW_PORT_MOD_TYPE_NA:
2596 		case FW_PORT_MOD_TYPE_ER:
2597 		default:
2598 			device_printf(pi->dev,
2599 			    "unknown port_type (%d), mod_type (%d)\n",
2600 			    pi->port_type, pi->mod_type);
2601 			ifmedia_add(media, m | IFM_UNKNOWN, data, NULL);
2602 			ifmedia_set(media, m | IFM_UNKNOWN);
2603 			break;
2604 		}
2605 		break;
2606 
2607 	case FW_PORT_TYPE_QSFP:
2608 		switch (pi->mod_type) {
2609 
2610 		case FW_PORT_MOD_TYPE_LR:
2611 			ifmedia_add(media, m | IFM_40G_LR4, data, NULL);
2612 			ifmedia_set(media, m | IFM_40G_LR4);
2613 			break;
2614 
2615 		case FW_PORT_MOD_TYPE_SR:
2616 			ifmedia_add(media, m | IFM_40G_SR4, data, NULL);
2617 			ifmedia_set(media, m | IFM_40G_SR4);
2618 			break;
2619 		case FW_PORT_MOD_TYPE_TWINAX_PASSIVE:
2620 		case FW_PORT_MOD_TYPE_TWINAX_ACTIVE:
2621 			ifmedia_add(media, m | IFM_40G_CR4, data, NULL);
2622 			ifmedia_set(media, m | IFM_40G_CR4);
2623 			break;
2624 
2625 		default:
2626 			device_printf(pi->dev,
2627 			    "unknown port_type (%d), mod_type (%d)\n",
2628 			    pi->port_type, pi->mod_type);
2629 			ifmedia_add(media, m | IFM_UNKNOWN, data, NULL);
2630 			ifmedia_set(media, m | IFM_UNKNOWN);
2631 			break;
2632 		}
2633 		break;
2634 
2635 	default:
2636 		device_printf(pi->dev,
2637 		    "unknown port_type (%d), mod_type (%d)\n", pi->port_type,
2638 		    pi->mod_type);
2639 		ifmedia_add(media, m | IFM_UNKNOWN, data, NULL);
2640 		ifmedia_set(media, m | IFM_UNKNOWN);
2641 		break;
2642 	}
2643 
2644 	PORT_UNLOCK(pi);
2645 }
2646 
2647 #define FW_MAC_EXACT_CHUNK	7
2648 
2649 /*
2650  * Program the port's XGMAC based on parameters in ifnet.  The caller also
2651  * indicates which parameters should be programmed (the rest are left alone).
2652  */
2653 static int
2654 update_mac_settings(struct port_info *pi, int flags)
2655 {
2656 	int rc;
2657 	struct ifnet *ifp = pi->ifp;
2658 	struct adapter *sc = pi->adapter;
2659 	int mtu = -1, promisc = -1, allmulti = -1, vlanex = -1;
2660 
2661 	ASSERT_SYNCHRONIZED_OP(sc);
2662 	KASSERT(flags, ("%s: not told what to update.", __func__));
2663 
2664 	if (flags & XGMAC_MTU)
2665 		mtu = ifp->if_mtu;
2666 
2667 	if (flags & XGMAC_PROMISC)
2668 		promisc = ifp->if_flags & IFF_PROMISC ? 1 : 0;
2669 
2670 	if (flags & XGMAC_ALLMULTI)
2671 		allmulti = ifp->if_flags & IFF_ALLMULTI ? 1 : 0;
2672 
2673 	if (flags & XGMAC_VLANEX)
2674 		vlanex = ifp->if_capenable & IFCAP_VLAN_HWTAGGING ? 1 : 0;
2675 
2676 	rc = -t4_set_rxmode(sc, sc->mbox, pi->viid, mtu, promisc, allmulti, 1,
2677 	    vlanex, false);
2678 	if (rc) {
2679 		if_printf(ifp, "set_rxmode (%x) failed: %d\n", flags, rc);
2680 		return (rc);
2681 	}
2682 
2683 	if (flags & XGMAC_UCADDR) {
2684 		uint8_t ucaddr[ETHER_ADDR_LEN];
2685 
2686 		bcopy(IF_LLADDR(ifp), ucaddr, sizeof(ucaddr));
2687 		rc = t4_change_mac(sc, sc->mbox, pi->viid, pi->xact_addr_filt,
2688 		    ucaddr, true, true);
2689 		if (rc < 0) {
2690 			rc = -rc;
2691 			if_printf(ifp, "change_mac failed: %d\n", rc);
2692 			return (rc);
2693 		} else {
2694 			pi->xact_addr_filt = rc;
2695 			rc = 0;
2696 		}
2697 	}
2698 
2699 	if (flags & XGMAC_MCADDRS) {
2700 		const uint8_t *mcaddr[FW_MAC_EXACT_CHUNK];
2701 		int del = 1;
2702 		uint64_t hash = 0;
2703 		struct ifmultiaddr *ifma;
2704 		int i = 0, j;
2705 
2706 		if_maddr_rlock(ifp);
2707 		TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
2708 			if (ifma->ifma_addr->sa_family != AF_LINK)
2709 				continue;
2710 			mcaddr[i++] =
2711 			    LLADDR((struct sockaddr_dl *)ifma->ifma_addr);
2712 
2713 			if (i == FW_MAC_EXACT_CHUNK) {
2714 				rc = t4_alloc_mac_filt(sc, sc->mbox, pi->viid,
2715 				    del, i, mcaddr, NULL, &hash, 0);
2716 				if (rc < 0) {
2717 					rc = -rc;
2718 					for (j = 0; j < i; j++) {
2719 						if_printf(ifp,
2720 						    "failed to add mc address"
2721 						    " %02x:%02x:%02x:"
2722 						    "%02x:%02x:%02x rc=%d\n",
2723 						    mcaddr[j][0], mcaddr[j][1],
2724 						    mcaddr[j][2], mcaddr[j][3],
2725 						    mcaddr[j][4], mcaddr[j][5],
2726 						    rc);
2727 					}
2728 					goto mcfail;
2729 				}
2730 				del = 0;
2731 				i = 0;
2732 			}
2733 		}
2734 		if (i > 0) {
2735 			rc = t4_alloc_mac_filt(sc, sc->mbox, pi->viid,
2736 			    del, i, mcaddr, NULL, &hash, 0);
2737 			if (rc < 0) {
2738 				rc = -rc;
2739 				for (j = 0; j < i; j++) {
2740 					if_printf(ifp,
2741 					    "failed to add mc address"
2742 					    " %02x:%02x:%02x:"
2743 					    "%02x:%02x:%02x rc=%d\n",
2744 					    mcaddr[j][0], mcaddr[j][1],
2745 					    mcaddr[j][2], mcaddr[j][3],
2746 					    mcaddr[j][4], mcaddr[j][5],
2747 					    rc);
2748 				}
2749 				goto mcfail;
2750 			}
2751 		}
2752 
2753 		rc = -t4_set_addr_hash(sc, sc->mbox, pi->viid, 0, hash, 0);
2754 		if (rc != 0)
2755 			if_printf(ifp, "failed to set mc address hash: %d", rc);
2756 mcfail:
2757 		if_maddr_runlock(ifp);
2758 	}
2759 
2760 	return (rc);
2761 }
2762 
2763 int
2764 begin_synchronized_op(struct adapter *sc, struct port_info *pi, int flags,
2765     char *wmesg)
2766 {
2767 	int rc, pri;
2768 
2769 #ifdef WITNESS
2770 	/* the caller thinks it's ok to sleep, but is it really? */
2771 	if (flags & SLEEP_OK)
2772 		pause("t4slptst", 1);
2773 #endif
2774 
2775 	if (INTR_OK)
2776 		pri = PCATCH;
2777 	else
2778 		pri = 0;
2779 
2780 	ADAPTER_LOCK(sc);
2781 	for (;;) {
2782 
2783 		if (pi && IS_DOOMED(pi)) {
2784 			rc = ENXIO;
2785 			goto done;
2786 		}
2787 
2788 		if (!IS_BUSY(sc)) {
2789 			rc = 0;
2790 			break;
2791 		}
2792 
2793 		if (!(flags & SLEEP_OK)) {
2794 			rc = EBUSY;
2795 			goto done;
2796 		}
2797 
2798 		if (mtx_sleep(&sc->flags, &sc->sc_lock, pri, wmesg, 0)) {
2799 			rc = EINTR;
2800 			goto done;
2801 		}
2802 	}
2803 
2804 	KASSERT(!IS_BUSY(sc), ("%s: controller busy.", __func__));
2805 	SET_BUSY(sc);
2806 #ifdef INVARIANTS
2807 	sc->last_op = wmesg;
2808 	sc->last_op_thr = curthread;
2809 #endif
2810 
2811 done:
2812 	if (!(flags & HOLD_LOCK) || rc)
2813 		ADAPTER_UNLOCK(sc);
2814 
2815 	return (rc);
2816 }
2817 
2818 void
2819 end_synchronized_op(struct adapter *sc, int flags)
2820 {
2821 
2822 	if (flags & LOCK_HELD)
2823 		ADAPTER_LOCK_ASSERT_OWNED(sc);
2824 	else
2825 		ADAPTER_LOCK(sc);
2826 
2827 	KASSERT(IS_BUSY(sc), ("%s: controller not busy.", __func__));
2828 	CLR_BUSY(sc);
2829 	wakeup(&sc->flags);
2830 	ADAPTER_UNLOCK(sc);
2831 }
2832 
2833 static int
2834 cxgbe_init_synchronized(struct port_info *pi)
2835 {
2836 	struct adapter *sc = pi->adapter;
2837 	struct ifnet *ifp = pi->ifp;
2838 	int rc = 0;
2839 
2840 	ASSERT_SYNCHRONIZED_OP(sc);
2841 
2842 	if (isset(&sc->open_device_map, pi->port_id)) {
2843 		KASSERT(ifp->if_drv_flags & IFF_DRV_RUNNING,
2844 		    ("mismatch between open_device_map and if_drv_flags"));
2845 		return (0);	/* already running */
2846 	}
2847 
2848 	if (!(sc->flags & FULL_INIT_DONE) &&
2849 	    ((rc = adapter_full_init(sc)) != 0))
2850 		return (rc);	/* error message displayed already */
2851 
2852 	if (!(pi->flags & PORT_INIT_DONE) &&
2853 	    ((rc = port_full_init(pi)) != 0))
2854 		return (rc); /* error message displayed already */
2855 
2856 	rc = update_mac_settings(pi, XGMAC_ALL);
2857 	if (rc)
2858 		goto done;	/* error message displayed already */
2859 
2860 	rc = -t4_link_start(sc, sc->mbox, pi->tx_chan, &pi->link_cfg);
2861 	if (rc != 0) {
2862 		if_printf(ifp, "start_link failed: %d\n", rc);
2863 		goto done;
2864 	}
2865 
2866 	rc = -t4_enable_vi(sc, sc->mbox, pi->viid, true, true);
2867 	if (rc != 0) {
2868 		if_printf(ifp, "enable_vi failed: %d\n", rc);
2869 		goto done;
2870 	}
2871 
2872 	/* all ok */
2873 	setbit(&sc->open_device_map, pi->port_id);
2874 	PORT_LOCK(pi);
2875 	ifp->if_drv_flags |= IFF_DRV_RUNNING;
2876 	PORT_UNLOCK(pi);
2877 
2878 	callout_reset(&pi->tick, hz, cxgbe_tick, pi);
2879 done:
2880 	if (rc != 0)
2881 		cxgbe_uninit_synchronized(pi);
2882 
2883 	return (rc);
2884 }
2885 
2886 /*
2887  * Idempotent.
2888  */
2889 static int
2890 cxgbe_uninit_synchronized(struct port_info *pi)
2891 {
2892 	struct adapter *sc = pi->adapter;
2893 	struct ifnet *ifp = pi->ifp;
2894 	int rc;
2895 
2896 	ASSERT_SYNCHRONIZED_OP(sc);
2897 
2898 	/*
2899 	 * Disable the VI so that all its data in either direction is discarded
2900 	 * by the MPS.  Leave everything else (the queues, interrupts, and 1Hz
2901 	 * tick) intact as the TP can deliver negative advice or data that it's
2902 	 * holding in its RAM (for an offloaded connection) even after the VI is
2903 	 * disabled.
2904 	 */
2905 	rc = -t4_enable_vi(sc, sc->mbox, pi->viid, false, false);
2906 	if (rc) {
2907 		if_printf(ifp, "disable_vi failed: %d\n", rc);
2908 		return (rc);
2909 	}
2910 
2911 	clrbit(&sc->open_device_map, pi->port_id);
2912 	PORT_LOCK(pi);
2913 	ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
2914 	PORT_UNLOCK(pi);
2915 
2916 	pi->link_cfg.link_ok = 0;
2917 	pi->link_cfg.speed = 0;
2918 	t4_os_link_changed(sc, pi->port_id, 0);
2919 
2920 	return (0);
2921 }
2922 
2923 /*
2924  * It is ok for this function to fail midway and return right away.  t4_detach
2925  * will walk the entire sc->irq list and clean up whatever is valid.
2926  */
2927 static int
2928 setup_intr_handlers(struct adapter *sc)
2929 {
2930 	int rc, rid, p, q;
2931 	char s[8];
2932 	struct irq *irq;
2933 	struct port_info *pi;
2934 	struct sge_rxq *rxq;
2935 #ifdef TCP_OFFLOAD
2936 	struct sge_ofld_rxq *ofld_rxq;
2937 #endif
2938 
2939 	/*
2940 	 * Setup interrupts.
2941 	 */
2942 	irq = &sc->irq[0];
2943 	rid = sc->intr_type == INTR_INTX ? 0 : 1;
2944 	if (sc->intr_count == 1) {
2945 		KASSERT(!(sc->flags & INTR_DIRECT),
2946 		    ("%s: single interrupt && INTR_DIRECT?", __func__));
2947 
2948 		rc = t4_alloc_irq(sc, irq, rid, t4_intr_all, sc, "all");
2949 		if (rc != 0)
2950 			return (rc);
2951 	} else {
2952 		/* Multiple interrupts. */
2953 		KASSERT(sc->intr_count >= T4_EXTRA_INTR + sc->params.nports,
2954 		    ("%s: too few intr.", __func__));
2955 
2956 		/* The first one is always error intr */
2957 		rc = t4_alloc_irq(sc, irq, rid, t4_intr_err, sc, "err");
2958 		if (rc != 0)
2959 			return (rc);
2960 		irq++;
2961 		rid++;
2962 
2963 		/* The second one is always the firmware event queue */
2964 		rc = t4_alloc_irq(sc, irq, rid, t4_intr_evt, &sc->sge.fwq,
2965 		    "evt");
2966 		if (rc != 0)
2967 			return (rc);
2968 		irq++;
2969 		rid++;
2970 
2971 		/*
2972 		 * Note that if INTR_DIRECT is not set then either the NIC rx
2973 		 * queues or (exclusive or) the TOE rx queueus will be taking
2974 		 * direct interrupts.
2975 		 *
2976 		 * There is no need to check for is_offload(sc) as nofldrxq
2977 		 * will be 0 if offload is disabled.
2978 		 */
2979 		for_each_port(sc, p) {
2980 			pi = sc->port[p];
2981 
2982 #ifdef TCP_OFFLOAD
2983 			/*
2984 			 * Skip over the NIC queues if they aren't taking direct
2985 			 * interrupts.
2986 			 */
2987 			if (!(sc->flags & INTR_DIRECT) &&
2988 			    pi->nofldrxq > pi->nrxq)
2989 				goto ofld_queues;
2990 #endif
2991 			rxq = &sc->sge.rxq[pi->first_rxq];
2992 			for (q = 0; q < pi->nrxq; q++, rxq++) {
2993 				snprintf(s, sizeof(s), "%d.%d", p, q);
2994 				rc = t4_alloc_irq(sc, irq, rid, t4_intr, rxq,
2995 				    s);
2996 				if (rc != 0)
2997 					return (rc);
2998 				irq++;
2999 				rid++;
3000 			}
3001 
3002 #ifdef TCP_OFFLOAD
3003 			/*
3004 			 * Skip over the offload queues if they aren't taking
3005 			 * direct interrupts.
3006 			 */
3007 			if (!(sc->flags & INTR_DIRECT))
3008 				continue;
3009 ofld_queues:
3010 			ofld_rxq = &sc->sge.ofld_rxq[pi->first_ofld_rxq];
3011 			for (q = 0; q < pi->nofldrxq; q++, ofld_rxq++) {
3012 				snprintf(s, sizeof(s), "%d,%d", p, q);
3013 				rc = t4_alloc_irq(sc, irq, rid, t4_intr,
3014 				    ofld_rxq, s);
3015 				if (rc != 0)
3016 					return (rc);
3017 				irq++;
3018 				rid++;
3019 			}
3020 #endif
3021 		}
3022 	}
3023 
3024 	return (0);
3025 }
3026 
3027 static int
3028 adapter_full_init(struct adapter *sc)
3029 {
3030 	int rc, i;
3031 
3032 	ADAPTER_LOCK_ASSERT_NOTOWNED(sc);
3033 	KASSERT((sc->flags & FULL_INIT_DONE) == 0,
3034 	    ("%s: FULL_INIT_DONE already", __func__));
3035 
3036 	/*
3037 	 * queues that belong to the adapter (not any particular port).
3038 	 */
3039 	rc = t4_setup_adapter_queues(sc);
3040 	if (rc != 0)
3041 		goto done;
3042 
3043 	for (i = 0; i < nitems(sc->tq); i++) {
3044 		sc->tq[i] = taskqueue_create("t4 taskq", M_NOWAIT,
3045 		    taskqueue_thread_enqueue, &sc->tq[i]);
3046 		if (sc->tq[i] == NULL) {
3047 			device_printf(sc->dev,
3048 			    "failed to allocate task queue %d\n", i);
3049 			rc = ENOMEM;
3050 			goto done;
3051 		}
3052 		taskqueue_start_threads(&sc->tq[i], 1, PI_NET, "%s tq%d",
3053 		    device_get_nameunit(sc->dev), i);
3054 	}
3055 
3056 	t4_intr_enable(sc);
3057 	sc->flags |= FULL_INIT_DONE;
3058 done:
3059 	if (rc != 0)
3060 		adapter_full_uninit(sc);
3061 
3062 	return (rc);
3063 }
3064 
3065 static int
3066 adapter_full_uninit(struct adapter *sc)
3067 {
3068 	int i;
3069 
3070 	ADAPTER_LOCK_ASSERT_NOTOWNED(sc);
3071 
3072 	t4_teardown_adapter_queues(sc);
3073 
3074 	for (i = 0; i < nitems(sc->tq) && sc->tq[i]; i++) {
3075 		taskqueue_free(sc->tq[i]);
3076 		sc->tq[i] = NULL;
3077 	}
3078 
3079 	sc->flags &= ~FULL_INIT_DONE;
3080 
3081 	return (0);
3082 }
3083 
3084 static int
3085 port_full_init(struct port_info *pi)
3086 {
3087 	struct adapter *sc = pi->adapter;
3088 	struct ifnet *ifp = pi->ifp;
3089 	uint16_t *rss;
3090 	struct sge_rxq *rxq;
3091 	int rc, i;
3092 
3093 	ASSERT_SYNCHRONIZED_OP(sc);
3094 	KASSERT((pi->flags & PORT_INIT_DONE) == 0,
3095 	    ("%s: PORT_INIT_DONE already", __func__));
3096 
3097 	sysctl_ctx_init(&pi->ctx);
3098 	pi->flags |= PORT_SYSCTL_CTX;
3099 
3100 	/*
3101 	 * Allocate tx/rx/fl queues for this port.
3102 	 */
3103 	rc = t4_setup_port_queues(pi);
3104 	if (rc != 0)
3105 		goto done;	/* error message displayed already */
3106 
3107 	/*
3108 	 * Setup RSS for this port.
3109 	 */
3110 	rss = malloc(pi->nrxq * sizeof (*rss), M_CXGBE,
3111 	    M_ZERO | M_WAITOK);
3112 	for_each_rxq(pi, i, rxq) {
3113 		rss[i] = rxq->iq.abs_id;
3114 	}
3115 	rc = -t4_config_rss_range(sc, sc->mbox, pi->viid, 0,
3116 	    pi->rss_size, rss, pi->nrxq);
3117 	free(rss, M_CXGBE);
3118 	if (rc != 0) {
3119 		if_printf(ifp, "rss_config failed: %d\n", rc);
3120 		goto done;
3121 	}
3122 
3123 	pi->flags |= PORT_INIT_DONE;
3124 done:
3125 	if (rc != 0)
3126 		port_full_uninit(pi);
3127 
3128 	return (rc);
3129 }
3130 
3131 /*
3132  * Idempotent.
3133  */
3134 static int
3135 port_full_uninit(struct port_info *pi)
3136 {
3137 	struct adapter *sc = pi->adapter;
3138 	int i;
3139 	struct sge_rxq *rxq;
3140 	struct sge_txq *txq;
3141 #ifdef TCP_OFFLOAD
3142 	struct sge_ofld_rxq *ofld_rxq;
3143 	struct sge_wrq *ofld_txq;
3144 #endif
3145 
3146 	if (pi->flags & PORT_INIT_DONE) {
3147 
3148 		/* Need to quiesce queues.  XXX: ctrl queues? */
3149 
3150 		for_each_txq(pi, i, txq) {
3151 			quiesce_eq(sc, &txq->eq);
3152 		}
3153 
3154 #ifdef TCP_OFFLOAD
3155 		for_each_ofld_txq(pi, i, ofld_txq) {
3156 			quiesce_eq(sc, &ofld_txq->eq);
3157 		}
3158 #endif
3159 
3160 		for_each_rxq(pi, i, rxq) {
3161 			quiesce_iq(sc, &rxq->iq);
3162 			quiesce_fl(sc, &rxq->fl);
3163 		}
3164 
3165 #ifdef TCP_OFFLOAD
3166 		for_each_ofld_rxq(pi, i, ofld_rxq) {
3167 			quiesce_iq(sc, &ofld_rxq->iq);
3168 			quiesce_fl(sc, &ofld_rxq->fl);
3169 		}
3170 #endif
3171 	}
3172 
3173 	t4_teardown_port_queues(pi);
3174 	pi->flags &= ~PORT_INIT_DONE;
3175 
3176 	return (0);
3177 }
3178 
3179 static void
3180 quiesce_eq(struct adapter *sc, struct sge_eq *eq)
3181 {
3182 	EQ_LOCK(eq);
3183 	eq->flags |= EQ_DOOMED;
3184 
3185 	/*
3186 	 * Wait for the response to a credit flush if one's
3187 	 * pending.
3188 	 */
3189 	while (eq->flags & EQ_CRFLUSHED)
3190 		mtx_sleep(eq, &eq->eq_lock, 0, "crflush", 0);
3191 	EQ_UNLOCK(eq);
3192 
3193 	callout_drain(&eq->tx_callout);	/* XXX: iffy */
3194 	pause("callout", 10);		/* Still iffy */
3195 
3196 	taskqueue_drain(sc->tq[eq->tx_chan], &eq->tx_task);
3197 }
3198 
3199 static void
3200 quiesce_iq(struct adapter *sc, struct sge_iq *iq)
3201 {
3202 	(void) sc;	/* unused */
3203 
3204 	/* Synchronize with the interrupt handler */
3205 	while (!atomic_cmpset_int(&iq->state, IQS_IDLE, IQS_DISABLED))
3206 		pause("iqfree", 1);
3207 }
3208 
3209 static void
3210 quiesce_fl(struct adapter *sc, struct sge_fl *fl)
3211 {
3212 	mtx_lock(&sc->sfl_lock);
3213 	FL_LOCK(fl);
3214 	fl->flags |= FL_DOOMED;
3215 	FL_UNLOCK(fl);
3216 	mtx_unlock(&sc->sfl_lock);
3217 
3218 	callout_drain(&sc->sfl_callout);
3219 	KASSERT((fl->flags & FL_STARVING) == 0,
3220 	    ("%s: still starving", __func__));
3221 }
3222 
3223 static int
3224 t4_alloc_irq(struct adapter *sc, struct irq *irq, int rid,
3225     driver_intr_t *handler, void *arg, char *name)
3226 {
3227 	int rc;
3228 
3229 	irq->rid = rid;
3230 	irq->res = bus_alloc_resource_any(sc->dev, SYS_RES_IRQ, &irq->rid,
3231 	    RF_SHAREABLE | RF_ACTIVE);
3232 	if (irq->res == NULL) {
3233 		device_printf(sc->dev,
3234 		    "failed to allocate IRQ for rid %d, name %s.\n", rid, name);
3235 		return (ENOMEM);
3236 	}
3237 
3238 	rc = bus_setup_intr(sc->dev, irq->res, INTR_MPSAFE | INTR_TYPE_NET,
3239 	    NULL, handler, arg, &irq->tag);
3240 	if (rc != 0) {
3241 		device_printf(sc->dev,
3242 		    "failed to setup interrupt for rid %d, name %s: %d\n",
3243 		    rid, name, rc);
3244 	} else if (name)
3245 		bus_describe_intr(sc->dev, irq->res, irq->tag, name);
3246 
3247 	return (rc);
3248 }
3249 
3250 static int
3251 t4_free_irq(struct adapter *sc, struct irq *irq)
3252 {
3253 	if (irq->tag)
3254 		bus_teardown_intr(sc->dev, irq->res, irq->tag);
3255 	if (irq->res)
3256 		bus_release_resource(sc->dev, SYS_RES_IRQ, irq->rid, irq->res);
3257 
3258 	bzero(irq, sizeof(*irq));
3259 
3260 	return (0);
3261 }
3262 
3263 static void
3264 reg_block_dump(struct adapter *sc, uint8_t *buf, unsigned int start,
3265     unsigned int end)
3266 {
3267 	uint32_t *p = (uint32_t *)(buf + start);
3268 
3269 	for ( ; start <= end; start += sizeof(uint32_t))
3270 		*p++ = t4_read_reg(sc, start);
3271 }
3272 
3273 static void
3274 t4_get_regs(struct adapter *sc, struct t4_regdump *regs, uint8_t *buf)
3275 {
3276 	int i, n;
3277 	const unsigned int *reg_ranges;
3278 	static const unsigned int t4_reg_ranges[] = {
3279 		0x1008, 0x1108,
3280 		0x1180, 0x11b4,
3281 		0x11fc, 0x123c,
3282 		0x1300, 0x173c,
3283 		0x1800, 0x18fc,
3284 		0x3000, 0x30d8,
3285 		0x30e0, 0x5924,
3286 		0x5960, 0x59d4,
3287 		0x5a00, 0x5af8,
3288 		0x6000, 0x6098,
3289 		0x6100, 0x6150,
3290 		0x6200, 0x6208,
3291 		0x6240, 0x6248,
3292 		0x6280, 0x6338,
3293 		0x6370, 0x638c,
3294 		0x6400, 0x643c,
3295 		0x6500, 0x6524,
3296 		0x6a00, 0x6a38,
3297 		0x6a60, 0x6a78,
3298 		0x6b00, 0x6b84,
3299 		0x6bf0, 0x6c84,
3300 		0x6cf0, 0x6d84,
3301 		0x6df0, 0x6e84,
3302 		0x6ef0, 0x6f84,
3303 		0x6ff0, 0x7084,
3304 		0x70f0, 0x7184,
3305 		0x71f0, 0x7284,
3306 		0x72f0, 0x7384,
3307 		0x73f0, 0x7450,
3308 		0x7500, 0x7530,
3309 		0x7600, 0x761c,
3310 		0x7680, 0x76cc,
3311 		0x7700, 0x7798,
3312 		0x77c0, 0x77fc,
3313 		0x7900, 0x79fc,
3314 		0x7b00, 0x7c38,
3315 		0x7d00, 0x7efc,
3316 		0x8dc0, 0x8e1c,
3317 		0x8e30, 0x8e78,
3318 		0x8ea0, 0x8f6c,
3319 		0x8fc0, 0x9074,
3320 		0x90fc, 0x90fc,
3321 		0x9400, 0x9458,
3322 		0x9600, 0x96bc,
3323 		0x9800, 0x9808,
3324 		0x9820, 0x983c,
3325 		0x9850, 0x9864,
3326 		0x9c00, 0x9c6c,
3327 		0x9c80, 0x9cec,
3328 		0x9d00, 0x9d6c,
3329 		0x9d80, 0x9dec,
3330 		0x9e00, 0x9e6c,
3331 		0x9e80, 0x9eec,
3332 		0x9f00, 0x9f6c,
3333 		0x9f80, 0x9fec,
3334 		0xd004, 0xd03c,
3335 		0xdfc0, 0xdfe0,
3336 		0xe000, 0xea7c,
3337 		0xf000, 0x11190,
3338 		0x19040, 0x1906c,
3339 		0x19078, 0x19080,
3340 		0x1908c, 0x19124,
3341 		0x19150, 0x191b0,
3342 		0x191d0, 0x191e8,
3343 		0x19238, 0x1924c,
3344 		0x193f8, 0x19474,
3345 		0x19490, 0x194f8,
3346 		0x19800, 0x19f30,
3347 		0x1a000, 0x1a06c,
3348 		0x1a0b0, 0x1a120,
3349 		0x1a128, 0x1a138,
3350 		0x1a190, 0x1a1c4,
3351 		0x1a1fc, 0x1a1fc,
3352 		0x1e040, 0x1e04c,
3353 		0x1e284, 0x1e28c,
3354 		0x1e2c0, 0x1e2c0,
3355 		0x1e2e0, 0x1e2e0,
3356 		0x1e300, 0x1e384,
3357 		0x1e3c0, 0x1e3c8,
3358 		0x1e440, 0x1e44c,
3359 		0x1e684, 0x1e68c,
3360 		0x1e6c0, 0x1e6c0,
3361 		0x1e6e0, 0x1e6e0,
3362 		0x1e700, 0x1e784,
3363 		0x1e7c0, 0x1e7c8,
3364 		0x1e840, 0x1e84c,
3365 		0x1ea84, 0x1ea8c,
3366 		0x1eac0, 0x1eac0,
3367 		0x1eae0, 0x1eae0,
3368 		0x1eb00, 0x1eb84,
3369 		0x1ebc0, 0x1ebc8,
3370 		0x1ec40, 0x1ec4c,
3371 		0x1ee84, 0x1ee8c,
3372 		0x1eec0, 0x1eec0,
3373 		0x1eee0, 0x1eee0,
3374 		0x1ef00, 0x1ef84,
3375 		0x1efc0, 0x1efc8,
3376 		0x1f040, 0x1f04c,
3377 		0x1f284, 0x1f28c,
3378 		0x1f2c0, 0x1f2c0,
3379 		0x1f2e0, 0x1f2e0,
3380 		0x1f300, 0x1f384,
3381 		0x1f3c0, 0x1f3c8,
3382 		0x1f440, 0x1f44c,
3383 		0x1f684, 0x1f68c,
3384 		0x1f6c0, 0x1f6c0,
3385 		0x1f6e0, 0x1f6e0,
3386 		0x1f700, 0x1f784,
3387 		0x1f7c0, 0x1f7c8,
3388 		0x1f840, 0x1f84c,
3389 		0x1fa84, 0x1fa8c,
3390 		0x1fac0, 0x1fac0,
3391 		0x1fae0, 0x1fae0,
3392 		0x1fb00, 0x1fb84,
3393 		0x1fbc0, 0x1fbc8,
3394 		0x1fc40, 0x1fc4c,
3395 		0x1fe84, 0x1fe8c,
3396 		0x1fec0, 0x1fec0,
3397 		0x1fee0, 0x1fee0,
3398 		0x1ff00, 0x1ff84,
3399 		0x1ffc0, 0x1ffc8,
3400 		0x20000, 0x2002c,
3401 		0x20100, 0x2013c,
3402 		0x20190, 0x201c8,
3403 		0x20200, 0x20318,
3404 		0x20400, 0x20528,
3405 		0x20540, 0x20614,
3406 		0x21000, 0x21040,
3407 		0x2104c, 0x21060,
3408 		0x210c0, 0x210ec,
3409 		0x21200, 0x21268,
3410 		0x21270, 0x21284,
3411 		0x212fc, 0x21388,
3412 		0x21400, 0x21404,
3413 		0x21500, 0x21518,
3414 		0x2152c, 0x2153c,
3415 		0x21550, 0x21554,
3416 		0x21600, 0x21600,
3417 		0x21608, 0x21628,
3418 		0x21630, 0x2163c,
3419 		0x21700, 0x2171c,
3420 		0x21780, 0x2178c,
3421 		0x21800, 0x21c38,
3422 		0x21c80, 0x21d7c,
3423 		0x21e00, 0x21e04,
3424 		0x22000, 0x2202c,
3425 		0x22100, 0x2213c,
3426 		0x22190, 0x221c8,
3427 		0x22200, 0x22318,
3428 		0x22400, 0x22528,
3429 		0x22540, 0x22614,
3430 		0x23000, 0x23040,
3431 		0x2304c, 0x23060,
3432 		0x230c0, 0x230ec,
3433 		0x23200, 0x23268,
3434 		0x23270, 0x23284,
3435 		0x232fc, 0x23388,
3436 		0x23400, 0x23404,
3437 		0x23500, 0x23518,
3438 		0x2352c, 0x2353c,
3439 		0x23550, 0x23554,
3440 		0x23600, 0x23600,
3441 		0x23608, 0x23628,
3442 		0x23630, 0x2363c,
3443 		0x23700, 0x2371c,
3444 		0x23780, 0x2378c,
3445 		0x23800, 0x23c38,
3446 		0x23c80, 0x23d7c,
3447 		0x23e00, 0x23e04,
3448 		0x24000, 0x2402c,
3449 		0x24100, 0x2413c,
3450 		0x24190, 0x241c8,
3451 		0x24200, 0x24318,
3452 		0x24400, 0x24528,
3453 		0x24540, 0x24614,
3454 		0x25000, 0x25040,
3455 		0x2504c, 0x25060,
3456 		0x250c0, 0x250ec,
3457 		0x25200, 0x25268,
3458 		0x25270, 0x25284,
3459 		0x252fc, 0x25388,
3460 		0x25400, 0x25404,
3461 		0x25500, 0x25518,
3462 		0x2552c, 0x2553c,
3463 		0x25550, 0x25554,
3464 		0x25600, 0x25600,
3465 		0x25608, 0x25628,
3466 		0x25630, 0x2563c,
3467 		0x25700, 0x2571c,
3468 		0x25780, 0x2578c,
3469 		0x25800, 0x25c38,
3470 		0x25c80, 0x25d7c,
3471 		0x25e00, 0x25e04,
3472 		0x26000, 0x2602c,
3473 		0x26100, 0x2613c,
3474 		0x26190, 0x261c8,
3475 		0x26200, 0x26318,
3476 		0x26400, 0x26528,
3477 		0x26540, 0x26614,
3478 		0x27000, 0x27040,
3479 		0x2704c, 0x27060,
3480 		0x270c0, 0x270ec,
3481 		0x27200, 0x27268,
3482 		0x27270, 0x27284,
3483 		0x272fc, 0x27388,
3484 		0x27400, 0x27404,
3485 		0x27500, 0x27518,
3486 		0x2752c, 0x2753c,
3487 		0x27550, 0x27554,
3488 		0x27600, 0x27600,
3489 		0x27608, 0x27628,
3490 		0x27630, 0x2763c,
3491 		0x27700, 0x2771c,
3492 		0x27780, 0x2778c,
3493 		0x27800, 0x27c38,
3494 		0x27c80, 0x27d7c,
3495 		0x27e00, 0x27e04
3496 	};
3497 	static const unsigned int t5_reg_ranges[] = {
3498 		0x1008, 0x1148,
3499 		0x1180, 0x11b4,
3500 		0x11fc, 0x123c,
3501 		0x1280, 0x173c,
3502 		0x1800, 0x18fc,
3503 		0x3000, 0x3028,
3504 		0x3060, 0x30d8,
3505 		0x30e0, 0x30fc,
3506 		0x3140, 0x357c,
3507 		0x35a8, 0x35cc,
3508 		0x35ec, 0x35ec,
3509 		0x3600, 0x5624,
3510 		0x56cc, 0x575c,
3511 		0x580c, 0x5814,
3512 		0x5890, 0x58bc,
3513 		0x5940, 0x59dc,
3514 		0x59fc, 0x5a18,
3515 		0x5a60, 0x5a9c,
3516 		0x5b94, 0x5bfc,
3517 		0x6000, 0x6040,
3518 		0x6058, 0x614c,
3519 		0x7700, 0x7798,
3520 		0x77c0, 0x78fc,
3521 		0x7b00, 0x7c54,
3522 		0x7d00, 0x7efc,
3523 		0x8dc0, 0x8de0,
3524 		0x8df8, 0x8e84,
3525 		0x8ea0, 0x8f84,
3526 		0x8fc0, 0x90f8,
3527 		0x9400, 0x9470,
3528 		0x9600, 0x96f4,
3529 		0x9800, 0x9808,
3530 		0x9820, 0x983c,
3531 		0x9850, 0x9864,
3532 		0x9c00, 0x9c6c,
3533 		0x9c80, 0x9cec,
3534 		0x9d00, 0x9d6c,
3535 		0x9d80, 0x9dec,
3536 		0x9e00, 0x9e6c,
3537 		0x9e80, 0x9eec,
3538 		0x9f00, 0x9f6c,
3539 		0x9f80, 0xa020,
3540 		0xd004, 0xd03c,
3541 		0xdfc0, 0xdfe0,
3542 		0xe000, 0x11088,
3543 		0x1109c, 0x1117c,
3544 		0x11190, 0x11204,
3545 		0x19040, 0x1906c,
3546 		0x19078, 0x19080,
3547 		0x1908c, 0x19124,
3548 		0x19150, 0x191b0,
3549 		0x191d0, 0x191e8,
3550 		0x19238, 0x19290,
3551 		0x193f8, 0x19474,
3552 		0x19490, 0x194cc,
3553 		0x194f0, 0x194f8,
3554 		0x19c00, 0x19c60,
3555 		0x19c94, 0x19e10,
3556 		0x19e50, 0x19f34,
3557 		0x19f40, 0x19f50,
3558 		0x19f90, 0x19fe4,
3559 		0x1a000, 0x1a06c,
3560 		0x1a0b0, 0x1a120,
3561 		0x1a128, 0x1a138,
3562 		0x1a190, 0x1a1c4,
3563 		0x1a1fc, 0x1a1fc,
3564 		0x1e008, 0x1e00c,
3565 		0x1e040, 0x1e04c,
3566 		0x1e284, 0x1e290,
3567 		0x1e2c0, 0x1e2c0,
3568 		0x1e2e0, 0x1e2e0,
3569 		0x1e300, 0x1e384,
3570 		0x1e3c0, 0x1e3c8,
3571 		0x1e408, 0x1e40c,
3572 		0x1e440, 0x1e44c,
3573 		0x1e684, 0x1e690,
3574 		0x1e6c0, 0x1e6c0,
3575 		0x1e6e0, 0x1e6e0,
3576 		0x1e700, 0x1e784,
3577 		0x1e7c0, 0x1e7c8,
3578 		0x1e808, 0x1e80c,
3579 		0x1e840, 0x1e84c,
3580 		0x1ea84, 0x1ea90,
3581 		0x1eac0, 0x1eac0,
3582 		0x1eae0, 0x1eae0,
3583 		0x1eb00, 0x1eb84,
3584 		0x1ebc0, 0x1ebc8,
3585 		0x1ec08, 0x1ec0c,
3586 		0x1ec40, 0x1ec4c,
3587 		0x1ee84, 0x1ee90,
3588 		0x1eec0, 0x1eec0,
3589 		0x1eee0, 0x1eee0,
3590 		0x1ef00, 0x1ef84,
3591 		0x1efc0, 0x1efc8,
3592 		0x1f008, 0x1f00c,
3593 		0x1f040, 0x1f04c,
3594 		0x1f284, 0x1f290,
3595 		0x1f2c0, 0x1f2c0,
3596 		0x1f2e0, 0x1f2e0,
3597 		0x1f300, 0x1f384,
3598 		0x1f3c0, 0x1f3c8,
3599 		0x1f408, 0x1f40c,
3600 		0x1f440, 0x1f44c,
3601 		0x1f684, 0x1f690,
3602 		0x1f6c0, 0x1f6c0,
3603 		0x1f6e0, 0x1f6e0,
3604 		0x1f700, 0x1f784,
3605 		0x1f7c0, 0x1f7c8,
3606 		0x1f808, 0x1f80c,
3607 		0x1f840, 0x1f84c,
3608 		0x1fa84, 0x1fa90,
3609 		0x1fac0, 0x1fac0,
3610 		0x1fae0, 0x1fae0,
3611 		0x1fb00, 0x1fb84,
3612 		0x1fbc0, 0x1fbc8,
3613 		0x1fc08, 0x1fc0c,
3614 		0x1fc40, 0x1fc4c,
3615 		0x1fe84, 0x1fe90,
3616 		0x1fec0, 0x1fec0,
3617 		0x1fee0, 0x1fee0,
3618 		0x1ff00, 0x1ff84,
3619 		0x1ffc0, 0x1ffc8,
3620 		0x30000, 0x30040,
3621 		0x30100, 0x30144,
3622 		0x30190, 0x301d0,
3623 		0x30200, 0x30318,
3624 		0x30400, 0x3052c,
3625 		0x30540, 0x3061c,
3626 		0x30800, 0x30834,
3627 		0x308c0, 0x30908,
3628 		0x30910, 0x309ac,
3629 		0x30a00, 0x30a04,
3630 		0x30a0c, 0x30a2c,
3631 		0x30a44, 0x30a50,
3632 		0x30a74, 0x30c24,
3633 		0x30d08, 0x30d14,
3634 		0x30d1c, 0x30d20,
3635 		0x30d3c, 0x30d50,
3636 		0x31200, 0x3120c,
3637 		0x31220, 0x31220,
3638 		0x31240, 0x31240,
3639 		0x31600, 0x31600,
3640 		0x31608, 0x3160c,
3641 		0x31a00, 0x31a1c,
3642 		0x31e04, 0x31e20,
3643 		0x31e38, 0x31e3c,
3644 		0x31e80, 0x31e80,
3645 		0x31e88, 0x31ea8,
3646 		0x31eb0, 0x31eb4,
3647 		0x31ec8, 0x31ed4,
3648 		0x31fb8, 0x32004,
3649 		0x32208, 0x3223c,
3650 		0x32248, 0x3227c,
3651 		0x32288, 0x322bc,
3652 		0x322c8, 0x322fc,
3653 		0x32600, 0x32630,
3654 		0x32a00, 0x32abc,
3655 		0x32b00, 0x32b70,
3656 		0x33000, 0x33048,
3657 		0x33060, 0x3309c,
3658 		0x330f0, 0x33148,
3659 		0x33160, 0x3319c,
3660 		0x331f0, 0x332e4,
3661 		0x332f8, 0x333e4,
3662 		0x333f8, 0x33448,
3663 		0x33460, 0x3349c,
3664 		0x334f0, 0x33548,
3665 		0x33560, 0x3359c,
3666 		0x335f0, 0x336e4,
3667 		0x336f8, 0x337e4,
3668 		0x337f8, 0x337fc,
3669 		0x33814, 0x33814,
3670 		0x3382c, 0x3382c,
3671 		0x33880, 0x3388c,
3672 		0x338e8, 0x338ec,
3673 		0x33900, 0x33948,
3674 		0x33960, 0x3399c,
3675 		0x339f0, 0x33ae4,
3676 		0x33af8, 0x33b10,
3677 		0x33b28, 0x33b28,
3678 		0x33b3c, 0x33b50,
3679 		0x33bf0, 0x33c10,
3680 		0x33c28, 0x33c28,
3681 		0x33c3c, 0x33c50,
3682 		0x33cf0, 0x33cfc,
3683 		0x34000, 0x34040,
3684 		0x34100, 0x34144,
3685 		0x34190, 0x341d0,
3686 		0x34200, 0x34318,
3687 		0x34400, 0x3452c,
3688 		0x34540, 0x3461c,
3689 		0x34800, 0x34834,
3690 		0x348c0, 0x34908,
3691 		0x34910, 0x349ac,
3692 		0x34a00, 0x34a04,
3693 		0x34a0c, 0x34a2c,
3694 		0x34a44, 0x34a50,
3695 		0x34a74, 0x34c24,
3696 		0x34d08, 0x34d14,
3697 		0x34d1c, 0x34d20,
3698 		0x34d3c, 0x34d50,
3699 		0x35200, 0x3520c,
3700 		0x35220, 0x35220,
3701 		0x35240, 0x35240,
3702 		0x35600, 0x35600,
3703 		0x35608, 0x3560c,
3704 		0x35a00, 0x35a1c,
3705 		0x35e04, 0x35e20,
3706 		0x35e38, 0x35e3c,
3707 		0x35e80, 0x35e80,
3708 		0x35e88, 0x35ea8,
3709 		0x35eb0, 0x35eb4,
3710 		0x35ec8, 0x35ed4,
3711 		0x35fb8, 0x36004,
3712 		0x36208, 0x3623c,
3713 		0x36248, 0x3627c,
3714 		0x36288, 0x362bc,
3715 		0x362c8, 0x362fc,
3716 		0x36600, 0x36630,
3717 		0x36a00, 0x36abc,
3718 		0x36b00, 0x36b70,
3719 		0x37000, 0x37048,
3720 		0x37060, 0x3709c,
3721 		0x370f0, 0x37148,
3722 		0x37160, 0x3719c,
3723 		0x371f0, 0x372e4,
3724 		0x372f8, 0x373e4,
3725 		0x373f8, 0x37448,
3726 		0x37460, 0x3749c,
3727 		0x374f0, 0x37548,
3728 		0x37560, 0x3759c,
3729 		0x375f0, 0x376e4,
3730 		0x376f8, 0x377e4,
3731 		0x377f8, 0x377fc,
3732 		0x37814, 0x37814,
3733 		0x3782c, 0x3782c,
3734 		0x37880, 0x3788c,
3735 		0x378e8, 0x378ec,
3736 		0x37900, 0x37948,
3737 		0x37960, 0x3799c,
3738 		0x379f0, 0x37ae4,
3739 		0x37af8, 0x37b10,
3740 		0x37b28, 0x37b28,
3741 		0x37b3c, 0x37b50,
3742 		0x37bf0, 0x37c10,
3743 		0x37c28, 0x37c28,
3744 		0x37c3c, 0x37c50,
3745 		0x37cf0, 0x37cfc,
3746 		0x38000, 0x38040,
3747 		0x38100, 0x38144,
3748 		0x38190, 0x381d0,
3749 		0x38200, 0x38318,
3750 		0x38400, 0x3852c,
3751 		0x38540, 0x3861c,
3752 		0x38800, 0x38834,
3753 		0x388c0, 0x38908,
3754 		0x38910, 0x389ac,
3755 		0x38a00, 0x38a04,
3756 		0x38a0c, 0x38a2c,
3757 		0x38a44, 0x38a50,
3758 		0x38a74, 0x38c24,
3759 		0x38d08, 0x38d14,
3760 		0x38d1c, 0x38d20,
3761 		0x38d3c, 0x38d50,
3762 		0x39200, 0x3920c,
3763 		0x39220, 0x39220,
3764 		0x39240, 0x39240,
3765 		0x39600, 0x39600,
3766 		0x39608, 0x3960c,
3767 		0x39a00, 0x39a1c,
3768 		0x39e04, 0x39e20,
3769 		0x39e38, 0x39e3c,
3770 		0x39e80, 0x39e80,
3771 		0x39e88, 0x39ea8,
3772 		0x39eb0, 0x39eb4,
3773 		0x39ec8, 0x39ed4,
3774 		0x39fb8, 0x3a004,
3775 		0x3a208, 0x3a23c,
3776 		0x3a248, 0x3a27c,
3777 		0x3a288, 0x3a2bc,
3778 		0x3a2c8, 0x3a2fc,
3779 		0x3a600, 0x3a630,
3780 		0x3aa00, 0x3aabc,
3781 		0x3ab00, 0x3ab70,
3782 		0x3b000, 0x3b048,
3783 		0x3b060, 0x3b09c,
3784 		0x3b0f0, 0x3b148,
3785 		0x3b160, 0x3b19c,
3786 		0x3b1f0, 0x3b2e4,
3787 		0x3b2f8, 0x3b3e4,
3788 		0x3b3f8, 0x3b448,
3789 		0x3b460, 0x3b49c,
3790 		0x3b4f0, 0x3b548,
3791 		0x3b560, 0x3b59c,
3792 		0x3b5f0, 0x3b6e4,
3793 		0x3b6f8, 0x3b7e4,
3794 		0x3b7f8, 0x3b7fc,
3795 		0x3b814, 0x3b814,
3796 		0x3b82c, 0x3b82c,
3797 		0x3b880, 0x3b88c,
3798 		0x3b8e8, 0x3b8ec,
3799 		0x3b900, 0x3b948,
3800 		0x3b960, 0x3b99c,
3801 		0x3b9f0, 0x3bae4,
3802 		0x3baf8, 0x3bb10,
3803 		0x3bb28, 0x3bb28,
3804 		0x3bb3c, 0x3bb50,
3805 		0x3bbf0, 0x3bc10,
3806 		0x3bc28, 0x3bc28,
3807 		0x3bc3c, 0x3bc50,
3808 		0x3bcf0, 0x3bcfc,
3809 		0x3c000, 0x3c040,
3810 		0x3c100, 0x3c144,
3811 		0x3c190, 0x3c1d0,
3812 		0x3c200, 0x3c318,
3813 		0x3c400, 0x3c52c,
3814 		0x3c540, 0x3c61c,
3815 		0x3c800, 0x3c834,
3816 		0x3c8c0, 0x3c908,
3817 		0x3c910, 0x3c9ac,
3818 		0x3ca00, 0x3ca04,
3819 		0x3ca0c, 0x3ca2c,
3820 		0x3ca44, 0x3ca50,
3821 		0x3ca74, 0x3cc24,
3822 		0x3cd08, 0x3cd14,
3823 		0x3cd1c, 0x3cd20,
3824 		0x3cd3c, 0x3cd50,
3825 		0x3d200, 0x3d20c,
3826 		0x3d220, 0x3d220,
3827 		0x3d240, 0x3d240,
3828 		0x3d600, 0x3d600,
3829 		0x3d608, 0x3d60c,
3830 		0x3da00, 0x3da1c,
3831 		0x3de04, 0x3de20,
3832 		0x3de38, 0x3de3c,
3833 		0x3de80, 0x3de80,
3834 		0x3de88, 0x3dea8,
3835 		0x3deb0, 0x3deb4,
3836 		0x3dec8, 0x3ded4,
3837 		0x3dfb8, 0x3e004,
3838 		0x3e208, 0x3e23c,
3839 		0x3e248, 0x3e27c,
3840 		0x3e288, 0x3e2bc,
3841 		0x3e2c8, 0x3e2fc,
3842 		0x3e600, 0x3e630,
3843 		0x3ea00, 0x3eabc,
3844 		0x3eb00, 0x3eb70,
3845 		0x3f000, 0x3f048,
3846 		0x3f060, 0x3f09c,
3847 		0x3f0f0, 0x3f148,
3848 		0x3f160, 0x3f19c,
3849 		0x3f1f0, 0x3f2e4,
3850 		0x3f2f8, 0x3f3e4,
3851 		0x3f3f8, 0x3f448,
3852 		0x3f460, 0x3f49c,
3853 		0x3f4f0, 0x3f548,
3854 		0x3f560, 0x3f59c,
3855 		0x3f5f0, 0x3f6e4,
3856 		0x3f6f8, 0x3f7e4,
3857 		0x3f7f8, 0x3f7fc,
3858 		0x3f814, 0x3f814,
3859 		0x3f82c, 0x3f82c,
3860 		0x3f880, 0x3f88c,
3861 		0x3f8e8, 0x3f8ec,
3862 		0x3f900, 0x3f948,
3863 		0x3f960, 0x3f99c,
3864 		0x3f9f0, 0x3fae4,
3865 		0x3faf8, 0x3fb10,
3866 		0x3fb28, 0x3fb28,
3867 		0x3fb3c, 0x3fb50,
3868 		0x3fbf0, 0x3fc10,
3869 		0x3fc28, 0x3fc28,
3870 		0x3fc3c, 0x3fc50,
3871 		0x3fcf0, 0x3fcfc,
3872 		0x40000, 0x4000c,
3873 		0x40040, 0x40068,
3874 		0x4007c, 0x40144,
3875 		0x40180, 0x4018c,
3876 		0x40200, 0x40298,
3877 		0x402ac, 0x4033c,
3878 		0x403f8, 0x403fc,
3879 		0x41300, 0x413c4,
3880 		0x41400, 0x4141c,
3881 		0x41480, 0x414d0,
3882 		0x44000, 0x44078,
3883 		0x440c0, 0x44278,
3884 		0x442c0, 0x44478,
3885 		0x444c0, 0x44678,
3886 		0x446c0, 0x44878,
3887 		0x448c0, 0x449fc,
3888 		0x45000, 0x45068,
3889 		0x45080, 0x45084,
3890 		0x450a0, 0x450b0,
3891 		0x45200, 0x45268,
3892 		0x45280, 0x45284,
3893 		0x452a0, 0x452b0,
3894 		0x460c0, 0x460e4,
3895 		0x47000, 0x4708c,
3896 		0x47200, 0x47250,
3897 		0x47400, 0x47420,
3898 		0x47600, 0x47618,
3899 		0x47800, 0x47814,
3900 		0x48000, 0x4800c,
3901 		0x48040, 0x48068,
3902 		0x4807c, 0x48144,
3903 		0x48180, 0x4818c,
3904 		0x48200, 0x48298,
3905 		0x482ac, 0x4833c,
3906 		0x483f8, 0x483fc,
3907 		0x49300, 0x493c4,
3908 		0x49400, 0x4941c,
3909 		0x49480, 0x494d0,
3910 		0x4c000, 0x4c078,
3911 		0x4c0c0, 0x4c278,
3912 		0x4c2c0, 0x4c478,
3913 		0x4c4c0, 0x4c678,
3914 		0x4c6c0, 0x4c878,
3915 		0x4c8c0, 0x4c9fc,
3916 		0x4d000, 0x4d068,
3917 		0x4d080, 0x4d084,
3918 		0x4d0a0, 0x4d0b0,
3919 		0x4d200, 0x4d268,
3920 		0x4d280, 0x4d284,
3921 		0x4d2a0, 0x4d2b0,
3922 		0x4e0c0, 0x4e0e4,
3923 		0x4f000, 0x4f08c,
3924 		0x4f200, 0x4f250,
3925 		0x4f400, 0x4f420,
3926 		0x4f600, 0x4f618,
3927 		0x4f800, 0x4f814,
3928 		0x50000, 0x500cc,
3929 		0x50400, 0x50400,
3930 		0x50800, 0x508cc,
3931 		0x50c00, 0x50c00,
3932 		0x51000, 0x5101c,
3933 		0x51300, 0x51308,
3934 	};
3935 
3936 	if (is_t4(sc)) {
3937 		reg_ranges = &t4_reg_ranges[0];
3938 		n = nitems(t4_reg_ranges);
3939 	} else {
3940 		reg_ranges = &t5_reg_ranges[0];
3941 		n = nitems(t5_reg_ranges);
3942 	}
3943 
3944 	regs->version = chip_id(sc) | chip_rev(sc) << 10;
3945 	for (i = 0; i < n; i += 2)
3946 		reg_block_dump(sc, buf, reg_ranges[i], reg_ranges[i + 1]);
3947 }
3948 
3949 static void
3950 cxgbe_tick(void *arg)
3951 {
3952 	struct port_info *pi = arg;
3953 	struct ifnet *ifp = pi->ifp;
3954 	struct sge_txq *txq;
3955 	int i, drops;
3956 	struct port_stats *s = &pi->stats;
3957 
3958 	PORT_LOCK(pi);
3959 	if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) {
3960 		PORT_UNLOCK(pi);
3961 		return;	/* without scheduling another callout */
3962 	}
3963 
3964 	t4_get_port_stats(pi->adapter, pi->tx_chan, s);
3965 
3966 	ifp->if_opackets = s->tx_frames - s->tx_pause;
3967 	ifp->if_ipackets = s->rx_frames - s->rx_pause;
3968 	ifp->if_obytes = s->tx_octets - s->tx_pause * 64;
3969 	ifp->if_ibytes = s->rx_octets - s->rx_pause * 64;
3970 	ifp->if_omcasts = s->tx_mcast_frames - s->tx_pause;
3971 	ifp->if_imcasts = s->rx_mcast_frames - s->rx_pause;
3972 	ifp->if_iqdrops = s->rx_ovflow0 + s->rx_ovflow1 + s->rx_ovflow2 +
3973 	    s->rx_ovflow3 + s->rx_trunc0 + s->rx_trunc1 + s->rx_trunc2 +
3974 	    s->rx_trunc3;
3975 
3976 	drops = s->tx_drop;
3977 	for_each_txq(pi, i, txq)
3978 		drops += txq->br->br_drops;
3979 	ifp->if_snd.ifq_drops = drops;
3980 
3981 	ifp->if_oerrors = s->tx_error_frames;
3982 	ifp->if_ierrors = s->rx_jabber + s->rx_runt + s->rx_too_long +
3983 	    s->rx_fcs_err + s->rx_len_err;
3984 
3985 	callout_schedule(&pi->tick, hz);
3986 	PORT_UNLOCK(pi);
3987 }
3988 
3989 static void
3990 cxgbe_vlan_config(void *arg, struct ifnet *ifp, uint16_t vid)
3991 {
3992 	struct ifnet *vlan;
3993 
3994 	if (arg != ifp || ifp->if_type != IFT_ETHER)
3995 		return;
3996 
3997 	vlan = VLAN_DEVAT(ifp, vid);
3998 	VLAN_SETCOOKIE(vlan, ifp);
3999 }
4000 
4001 static int
4002 cpl_not_handled(struct sge_iq *iq, const struct rss_header *rss, struct mbuf *m)
4003 {
4004 
4005 #ifdef INVARIANTS
4006 	panic("%s: opcode 0x%02x on iq %p with payload %p",
4007 	    __func__, rss->opcode, iq, m);
4008 #else
4009 	log(LOG_ERR, "%s: opcode 0x%02x on iq %p with payload %p\n",
4010 	    __func__, rss->opcode, iq, m);
4011 	m_freem(m);
4012 #endif
4013 	return (EDOOFUS);
4014 }
4015 
4016 int
4017 t4_register_cpl_handler(struct adapter *sc, int opcode, cpl_handler_t h)
4018 {
4019 	uintptr_t *loc, new;
4020 
4021 	if (opcode >= nitems(sc->cpl_handler))
4022 		return (EINVAL);
4023 
4024 	new = h ? (uintptr_t)h : (uintptr_t)cpl_not_handled;
4025 	loc = (uintptr_t *) &sc->cpl_handler[opcode];
4026 	atomic_store_rel_ptr(loc, new);
4027 
4028 	return (0);
4029 }
4030 
4031 static int
4032 an_not_handled(struct sge_iq *iq, const struct rsp_ctrl *ctrl)
4033 {
4034 
4035 #ifdef INVARIANTS
4036 	panic("%s: async notification on iq %p (ctrl %p)", __func__, iq, ctrl);
4037 #else
4038 	log(LOG_ERR, "%s: async notification on iq %p (ctrl %p)\n",
4039 	    __func__, iq, ctrl);
4040 #endif
4041 	return (EDOOFUS);
4042 }
4043 
4044 int
4045 t4_register_an_handler(struct adapter *sc, an_handler_t h)
4046 {
4047 	uintptr_t *loc, new;
4048 
4049 	new = h ? (uintptr_t)h : (uintptr_t)an_not_handled;
4050 	loc = (uintptr_t *) &sc->an_handler;
4051 	atomic_store_rel_ptr(loc, new);
4052 
4053 	return (0);
4054 }
4055 
4056 static int
4057 fw_msg_not_handled(struct adapter *sc, const __be64 *rpl)
4058 {
4059 	const struct cpl_fw6_msg *cpl =
4060 	    __containerof(rpl, struct cpl_fw6_msg, data[0]);
4061 
4062 #ifdef INVARIANTS
4063 	panic("%s: fw_msg type %d", __func__, cpl->type);
4064 #else
4065 	log(LOG_ERR, "%s: fw_msg type %d\n", __func__, cpl->type);
4066 #endif
4067 	return (EDOOFUS);
4068 }
4069 
4070 int
4071 t4_register_fw_msg_handler(struct adapter *sc, int type, fw_msg_handler_t h)
4072 {
4073 	uintptr_t *loc, new;
4074 
4075 	if (type >= nitems(sc->fw_msg_handler))
4076 		return (EINVAL);
4077 
4078 	/*
4079 	 * These are dispatched by the handler for FW{4|6}_CPL_MSG using the CPL
4080 	 * handler dispatch table.  Reject any attempt to install a handler for
4081 	 * this subtype.
4082 	 */
4083 	if (type == FW_TYPE_RSSCPL || type == FW6_TYPE_RSSCPL)
4084 		return (EINVAL);
4085 
4086 	new = h ? (uintptr_t)h : (uintptr_t)fw_msg_not_handled;
4087 	loc = (uintptr_t *) &sc->fw_msg_handler[type];
4088 	atomic_store_rel_ptr(loc, new);
4089 
4090 	return (0);
4091 }
4092 
4093 static int
4094 t4_sysctls(struct adapter *sc)
4095 {
4096 	struct sysctl_ctx_list *ctx;
4097 	struct sysctl_oid *oid;
4098 	struct sysctl_oid_list *children, *c0;
4099 	static char *caps[] = {
4100 		"\20\1PPP\2QFC\3DCBX",			/* caps[0] linkcaps */
4101 		"\20\1NIC\2VM\3IDS\4UM\5UM_ISGL",	/* caps[1] niccaps */
4102 		"\20\1TOE",				/* caps[2] toecaps */
4103 		"\20\1RDDP\2RDMAC",			/* caps[3] rdmacaps */
4104 		"\20\1INITIATOR_PDU\2TARGET_PDU"	/* caps[4] iscsicaps */
4105 		    "\3INITIATOR_CNXOFLD\4TARGET_CNXOFLD"
4106 		    "\5INITIATOR_SSNOFLD\6TARGET_SSNOFLD",
4107 		"\20\1INITIATOR\2TARGET\3CTRL_OFLD"	/* caps[5] fcoecaps */
4108 	};
4109 	static char *doorbells = {"\20\1UDB\2WCWR\3UDBWC\4KDB"};
4110 
4111 	ctx = device_get_sysctl_ctx(sc->dev);
4112 
4113 	/*
4114 	 * dev.t4nex.X.
4115 	 */
4116 	oid = device_get_sysctl_tree(sc->dev);
4117 	c0 = children = SYSCTL_CHILDREN(oid);
4118 
4119 	SYSCTL_ADD_INT(ctx, children, OID_AUTO, "nports", CTLFLAG_RD, NULL,
4120 	    sc->params.nports, "# of ports");
4121 
4122 	SYSCTL_ADD_INT(ctx, children, OID_AUTO, "hw_revision", CTLFLAG_RD,
4123 	    NULL, chip_rev(sc), "chip hardware revision");
4124 
4125 	SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "firmware_version",
4126 	    CTLFLAG_RD, &sc->fw_version, 0, "firmware version");
4127 
4128 	SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "cf",
4129 	    CTLFLAG_RD, &sc->cfg_file, 0, "configuration file");
4130 
4131 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "cfcsum", CTLFLAG_RD, NULL,
4132 	    sc->cfcsum, "config file checksum");
4133 
4134 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "doorbells",
4135 	    CTLTYPE_STRING | CTLFLAG_RD, doorbells, sc->doorbells,
4136 	    sysctl_bitfield, "A", "available doorbells");
4137 
4138 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "linkcaps",
4139 	    CTLTYPE_STRING | CTLFLAG_RD, caps[0], sc->linkcaps,
4140 	    sysctl_bitfield, "A", "available link capabilities");
4141 
4142 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "niccaps",
4143 	    CTLTYPE_STRING | CTLFLAG_RD, caps[1], sc->niccaps,
4144 	    sysctl_bitfield, "A", "available NIC capabilities");
4145 
4146 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "toecaps",
4147 	    CTLTYPE_STRING | CTLFLAG_RD, caps[2], sc->toecaps,
4148 	    sysctl_bitfield, "A", "available TCP offload capabilities");
4149 
4150 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "rdmacaps",
4151 	    CTLTYPE_STRING | CTLFLAG_RD, caps[3], sc->rdmacaps,
4152 	    sysctl_bitfield, "A", "available RDMA capabilities");
4153 
4154 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "iscsicaps",
4155 	    CTLTYPE_STRING | CTLFLAG_RD, caps[4], sc->iscsicaps,
4156 	    sysctl_bitfield, "A", "available iSCSI capabilities");
4157 
4158 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "fcoecaps",
4159 	    CTLTYPE_STRING | CTLFLAG_RD, caps[5], sc->fcoecaps,
4160 	    sysctl_bitfield, "A", "available FCoE capabilities");
4161 
4162 	SYSCTL_ADD_INT(ctx, children, OID_AUTO, "core_clock", CTLFLAG_RD, NULL,
4163 	    sc->params.vpd.cclk, "core clock frequency (in KHz)");
4164 
4165 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "holdoff_timers",
4166 	    CTLTYPE_STRING | CTLFLAG_RD, sc->sge.timer_val,
4167 	    sizeof(sc->sge.timer_val), sysctl_int_array, "A",
4168 	    "interrupt holdoff timer values (us)");
4169 
4170 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "holdoff_pkt_counts",
4171 	    CTLTYPE_STRING | CTLFLAG_RD, sc->sge.counter_val,
4172 	    sizeof(sc->sge.counter_val), sysctl_int_array, "A",
4173 	    "interrupt holdoff packet counter values");
4174 
4175 #ifdef SBUF_DRAIN
4176 	/*
4177 	 * dev.t4nex.X.misc.  Marked CTLFLAG_SKIP to avoid information overload.
4178 	 */
4179 	oid = SYSCTL_ADD_NODE(ctx, c0, OID_AUTO, "misc",
4180 	    CTLFLAG_RD | CTLFLAG_SKIP, NULL,
4181 	    "logs and miscellaneous information");
4182 	children = SYSCTL_CHILDREN(oid);
4183 
4184 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cctrl",
4185 	    CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
4186 	    sysctl_cctrl, "A", "congestion control");
4187 
4188 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_ibq_tp0",
4189 	    CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
4190 	    sysctl_cim_ibq_obq, "A", "CIM IBQ 0 (TP0)");
4191 
4192 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_ibq_tp1",
4193 	    CTLTYPE_STRING | CTLFLAG_RD, sc, 1,
4194 	    sysctl_cim_ibq_obq, "A", "CIM IBQ 1 (TP1)");
4195 
4196 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_ibq_ulp",
4197 	    CTLTYPE_STRING | CTLFLAG_RD, sc, 2,
4198 	    sysctl_cim_ibq_obq, "A", "CIM IBQ 2 (ULP)");
4199 
4200 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_ibq_sge0",
4201 	    CTLTYPE_STRING | CTLFLAG_RD, sc, 3,
4202 	    sysctl_cim_ibq_obq, "A", "CIM IBQ 3 (SGE0)");
4203 
4204 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_ibq_sge1",
4205 	    CTLTYPE_STRING | CTLFLAG_RD, sc, 4,
4206 	    sysctl_cim_ibq_obq, "A", "CIM IBQ 4 (SGE1)");
4207 
4208 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_ibq_ncsi",
4209 	    CTLTYPE_STRING | CTLFLAG_RD, sc, 5,
4210 	    sysctl_cim_ibq_obq, "A", "CIM IBQ 5 (NCSI)");
4211 
4212 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_la",
4213 	    CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
4214 	    sysctl_cim_la, "A", "CIM logic analyzer");
4215 
4216 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_obq_ulp0",
4217 	    CTLTYPE_STRING | CTLFLAG_RD, sc, 0 + CIM_NUM_IBQ,
4218 	    sysctl_cim_ibq_obq, "A", "CIM OBQ 0 (ULP0)");
4219 
4220 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_obq_ulp1",
4221 	    CTLTYPE_STRING | CTLFLAG_RD, sc, 1 + CIM_NUM_IBQ,
4222 	    sysctl_cim_ibq_obq, "A", "CIM OBQ 1 (ULP1)");
4223 
4224 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_obq_ulp2",
4225 	    CTLTYPE_STRING | CTLFLAG_RD, sc, 2 + CIM_NUM_IBQ,
4226 	    sysctl_cim_ibq_obq, "A", "CIM OBQ 2 (ULP2)");
4227 
4228 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_obq_ulp3",
4229 	    CTLTYPE_STRING | CTLFLAG_RD, sc, 3 + CIM_NUM_IBQ,
4230 	    sysctl_cim_ibq_obq, "A", "CIM OBQ 3 (ULP3)");
4231 
4232 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_obq_sge",
4233 	    CTLTYPE_STRING | CTLFLAG_RD, sc, 4 + CIM_NUM_IBQ,
4234 	    sysctl_cim_ibq_obq, "A", "CIM OBQ 4 (SGE)");
4235 
4236 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_obq_ncsi",
4237 	    CTLTYPE_STRING | CTLFLAG_RD, sc, 5 + CIM_NUM_IBQ,
4238 	    sysctl_cim_ibq_obq, "A", "CIM OBQ 5 (NCSI)");
4239 
4240 	if (is_t5(sc)) {
4241 		SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_obq_sge0_rx",
4242 		    CTLTYPE_STRING | CTLFLAG_RD, sc, 6 + CIM_NUM_IBQ,
4243 		    sysctl_cim_ibq_obq, "A", "CIM OBQ 6 (SGE0-RX)");
4244 
4245 		SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_obq_sge1_rx",
4246 		    CTLTYPE_STRING | CTLFLAG_RD, sc, 7 + CIM_NUM_IBQ,
4247 		    sysctl_cim_ibq_obq, "A", "CIM OBQ 7 (SGE1-RX)");
4248 	}
4249 
4250 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_qcfg",
4251 	    CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
4252 	    sysctl_cim_qcfg, "A", "CIM queue configuration");
4253 
4254 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cpl_stats",
4255 	    CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
4256 	    sysctl_cpl_stats, "A", "CPL statistics");
4257 
4258 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "ddp_stats",
4259 	    CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
4260 	    sysctl_ddp_stats, "A", "DDP statistics");
4261 
4262 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "devlog",
4263 	    CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
4264 	    sysctl_devlog, "A", "firmware's device log");
4265 
4266 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "fcoe_stats",
4267 	    CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
4268 	    sysctl_fcoe_stats, "A", "FCoE statistics");
4269 
4270 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "hw_sched",
4271 	    CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
4272 	    sysctl_hw_sched, "A", "hardware scheduler ");
4273 
4274 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "l2t",
4275 	    CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
4276 	    sysctl_l2t, "A", "hardware L2 table");
4277 
4278 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "lb_stats",
4279 	    CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
4280 	    sysctl_lb_stats, "A", "loopback statistics");
4281 
4282 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "meminfo",
4283 	    CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
4284 	    sysctl_meminfo, "A", "memory regions");
4285 
4286 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "path_mtus",
4287 	    CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
4288 	    sysctl_path_mtus, "A", "path MTUs");
4289 
4290 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "pm_stats",
4291 	    CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
4292 	    sysctl_pm_stats, "A", "PM statistics");
4293 
4294 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "rdma_stats",
4295 	    CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
4296 	    sysctl_rdma_stats, "A", "RDMA statistics");
4297 
4298 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "tcp_stats",
4299 	    CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
4300 	    sysctl_tcp_stats, "A", "TCP statistics");
4301 
4302 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "tids",
4303 	    CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
4304 	    sysctl_tids, "A", "TID information");
4305 
4306 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "tp_err_stats",
4307 	    CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
4308 	    sysctl_tp_err_stats, "A", "TP error statistics");
4309 
4310 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "tx_rate",
4311 	    CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
4312 	    sysctl_tx_rate, "A", "Tx rate");
4313 
4314 	if (is_t5(sc)) {
4315 		SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "wcwr_stats",
4316 		    CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
4317 		    sysctl_wcwr_stats, "A", "write combined work requests");
4318 	}
4319 #endif
4320 
4321 #ifdef TCP_OFFLOAD
4322 	if (is_offload(sc)) {
4323 		/*
4324 		 * dev.t4nex.X.toe.
4325 		 */
4326 		oid = SYSCTL_ADD_NODE(ctx, c0, OID_AUTO, "toe", CTLFLAG_RD,
4327 		    NULL, "TOE parameters");
4328 		children = SYSCTL_CHILDREN(oid);
4329 
4330 		sc->tt.sndbuf = 256 * 1024;
4331 		SYSCTL_ADD_INT(ctx, children, OID_AUTO, "sndbuf", CTLFLAG_RW,
4332 		    &sc->tt.sndbuf, 0, "max hardware send buffer size");
4333 
4334 		sc->tt.ddp = 0;
4335 		SYSCTL_ADD_INT(ctx, children, OID_AUTO, "ddp", CTLFLAG_RW,
4336 		    &sc->tt.ddp, 0, "DDP allowed");
4337 
4338 		sc->tt.indsz = G_INDICATESIZE(t4_read_reg(sc, A_TP_PARA_REG5));
4339 		SYSCTL_ADD_INT(ctx, children, OID_AUTO, "indsz", CTLFLAG_RW,
4340 		    &sc->tt.indsz, 0, "DDP max indicate size allowed");
4341 
4342 		sc->tt.ddp_thres =
4343 		    G_RXCOALESCESIZE(t4_read_reg(sc, A_TP_PARA_REG2));
4344 		SYSCTL_ADD_INT(ctx, children, OID_AUTO, "ddp_thres", CTLFLAG_RW,
4345 		    &sc->tt.ddp_thres, 0, "DDP threshold");
4346 	}
4347 #endif
4348 
4349 
4350 	return (0);
4351 }
4352 
4353 static int
4354 cxgbe_sysctls(struct port_info *pi)
4355 {
4356 	struct sysctl_ctx_list *ctx;
4357 	struct sysctl_oid *oid;
4358 	struct sysctl_oid_list *children;
4359 
4360 	ctx = device_get_sysctl_ctx(pi->dev);
4361 
4362 	/*
4363 	 * dev.cxgbe.X.
4364 	 */
4365 	oid = device_get_sysctl_tree(pi->dev);
4366 	children = SYSCTL_CHILDREN(oid);
4367 
4368 	SYSCTL_ADD_INT(ctx, children, OID_AUTO, "nrxq", CTLFLAG_RD,
4369 	    &pi->nrxq, 0, "# of rx queues");
4370 	SYSCTL_ADD_INT(ctx, children, OID_AUTO, "ntxq", CTLFLAG_RD,
4371 	    &pi->ntxq, 0, "# of tx queues");
4372 	SYSCTL_ADD_INT(ctx, children, OID_AUTO, "first_rxq", CTLFLAG_RD,
4373 	    &pi->first_rxq, 0, "index of first rx queue");
4374 	SYSCTL_ADD_INT(ctx, children, OID_AUTO, "first_txq", CTLFLAG_RD,
4375 	    &pi->first_txq, 0, "index of first tx queue");
4376 
4377 #ifdef TCP_OFFLOAD
4378 	if (is_offload(pi->adapter)) {
4379 		SYSCTL_ADD_INT(ctx, children, OID_AUTO, "nofldrxq", CTLFLAG_RD,
4380 		    &pi->nofldrxq, 0,
4381 		    "# of rx queues for offloaded TCP connections");
4382 		SYSCTL_ADD_INT(ctx, children, OID_AUTO, "nofldtxq", CTLFLAG_RD,
4383 		    &pi->nofldtxq, 0,
4384 		    "# of tx queues for offloaded TCP connections");
4385 		SYSCTL_ADD_INT(ctx, children, OID_AUTO, "first_ofld_rxq",
4386 		    CTLFLAG_RD, &pi->first_ofld_rxq, 0,
4387 		    "index of first TOE rx queue");
4388 		SYSCTL_ADD_INT(ctx, children, OID_AUTO, "first_ofld_txq",
4389 		    CTLFLAG_RD, &pi->first_ofld_txq, 0,
4390 		    "index of first TOE tx queue");
4391 	}
4392 #endif
4393 
4394 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "holdoff_tmr_idx",
4395 	    CTLTYPE_INT | CTLFLAG_RW, pi, 0, sysctl_holdoff_tmr_idx, "I",
4396 	    "holdoff timer index");
4397 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "holdoff_pktc_idx",
4398 	    CTLTYPE_INT | CTLFLAG_RW, pi, 0, sysctl_holdoff_pktc_idx, "I",
4399 	    "holdoff packet counter index");
4400 
4401 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "qsize_rxq",
4402 	    CTLTYPE_INT | CTLFLAG_RW, pi, 0, sysctl_qsize_rxq, "I",
4403 	    "rx queue size");
4404 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "qsize_txq",
4405 	    CTLTYPE_INT | CTLFLAG_RW, pi, 0, sysctl_qsize_txq, "I",
4406 	    "tx queue size");
4407 
4408 	/*
4409 	 * dev.cxgbe.X.stats.
4410 	 */
4411 	oid = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, "stats", CTLFLAG_RD,
4412 	    NULL, "port statistics");
4413 	children = SYSCTL_CHILDREN(oid);
4414 
4415 #define SYSCTL_ADD_T4_REG64(pi, name, desc, reg) \
4416 	SYSCTL_ADD_OID(ctx, children, OID_AUTO, name, \
4417 	    CTLTYPE_U64 | CTLFLAG_RD, pi->adapter, reg, \
4418 	    sysctl_handle_t4_reg64, "QU", desc)
4419 
4420 	SYSCTL_ADD_T4_REG64(pi, "tx_octets", "# of octets in good frames",
4421 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_BYTES_L));
4422 	SYSCTL_ADD_T4_REG64(pi, "tx_frames", "total # of good frames",
4423 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_FRAMES_L));
4424 	SYSCTL_ADD_T4_REG64(pi, "tx_bcast_frames", "# of broadcast frames",
4425 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_BCAST_L));
4426 	SYSCTL_ADD_T4_REG64(pi, "tx_mcast_frames", "# of multicast frames",
4427 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_MCAST_L));
4428 	SYSCTL_ADD_T4_REG64(pi, "tx_ucast_frames", "# of unicast frames",
4429 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_UCAST_L));
4430 	SYSCTL_ADD_T4_REG64(pi, "tx_error_frames", "# of error frames",
4431 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_ERROR_L));
4432 	SYSCTL_ADD_T4_REG64(pi, "tx_frames_64",
4433 	    "# of tx frames in this range",
4434 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_64B_L));
4435 	SYSCTL_ADD_T4_REG64(pi, "tx_frames_65_127",
4436 	    "# of tx frames in this range",
4437 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_65B_127B_L));
4438 	SYSCTL_ADD_T4_REG64(pi, "tx_frames_128_255",
4439 	    "# of tx frames in this range",
4440 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_128B_255B_L));
4441 	SYSCTL_ADD_T4_REG64(pi, "tx_frames_256_511",
4442 	    "# of tx frames in this range",
4443 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_256B_511B_L));
4444 	SYSCTL_ADD_T4_REG64(pi, "tx_frames_512_1023",
4445 	    "# of tx frames in this range",
4446 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_512B_1023B_L));
4447 	SYSCTL_ADD_T4_REG64(pi, "tx_frames_1024_1518",
4448 	    "# of tx frames in this range",
4449 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_1024B_1518B_L));
4450 	SYSCTL_ADD_T4_REG64(pi, "tx_frames_1519_max",
4451 	    "# of tx frames in this range",
4452 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_1519B_MAX_L));
4453 	SYSCTL_ADD_T4_REG64(pi, "tx_drop", "# of dropped tx frames",
4454 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_DROP_L));
4455 	SYSCTL_ADD_T4_REG64(pi, "tx_pause", "# of pause frames transmitted",
4456 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_PAUSE_L));
4457 	SYSCTL_ADD_T4_REG64(pi, "tx_ppp0", "# of PPP prio 0 frames transmitted",
4458 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_PPP0_L));
4459 	SYSCTL_ADD_T4_REG64(pi, "tx_ppp1", "# of PPP prio 1 frames transmitted",
4460 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_PPP1_L));
4461 	SYSCTL_ADD_T4_REG64(pi, "tx_ppp2", "# of PPP prio 2 frames transmitted",
4462 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_PPP2_L));
4463 	SYSCTL_ADD_T4_REG64(pi, "tx_ppp3", "# of PPP prio 3 frames transmitted",
4464 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_PPP3_L));
4465 	SYSCTL_ADD_T4_REG64(pi, "tx_ppp4", "# of PPP prio 4 frames transmitted",
4466 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_PPP4_L));
4467 	SYSCTL_ADD_T4_REG64(pi, "tx_ppp5", "# of PPP prio 5 frames transmitted",
4468 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_PPP5_L));
4469 	SYSCTL_ADD_T4_REG64(pi, "tx_ppp6", "# of PPP prio 6 frames transmitted",
4470 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_PPP6_L));
4471 	SYSCTL_ADD_T4_REG64(pi, "tx_ppp7", "# of PPP prio 7 frames transmitted",
4472 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_PPP7_L));
4473 
4474 	SYSCTL_ADD_T4_REG64(pi, "rx_octets", "# of octets in good frames",
4475 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_BYTES_L));
4476 	SYSCTL_ADD_T4_REG64(pi, "rx_frames", "total # of good frames",
4477 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_FRAMES_L));
4478 	SYSCTL_ADD_T4_REG64(pi, "rx_bcast_frames", "# of broadcast frames",
4479 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_BCAST_L));
4480 	SYSCTL_ADD_T4_REG64(pi, "rx_mcast_frames", "# of multicast frames",
4481 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_MCAST_L));
4482 	SYSCTL_ADD_T4_REG64(pi, "rx_ucast_frames", "# of unicast frames",
4483 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_UCAST_L));
4484 	SYSCTL_ADD_T4_REG64(pi, "rx_too_long", "# of frames exceeding MTU",
4485 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_MTU_ERROR_L));
4486 	SYSCTL_ADD_T4_REG64(pi, "rx_jabber", "# of jabber frames",
4487 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_MTU_CRC_ERROR_L));
4488 	SYSCTL_ADD_T4_REG64(pi, "rx_fcs_err",
4489 	    "# of frames received with bad FCS",
4490 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_CRC_ERROR_L));
4491 	SYSCTL_ADD_T4_REG64(pi, "rx_len_err",
4492 	    "# of frames received with length error",
4493 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_LEN_ERROR_L));
4494 	SYSCTL_ADD_T4_REG64(pi, "rx_symbol_err", "symbol errors",
4495 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_SYM_ERROR_L));
4496 	SYSCTL_ADD_T4_REG64(pi, "rx_runt", "# of short frames received",
4497 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_LESS_64B_L));
4498 	SYSCTL_ADD_T4_REG64(pi, "rx_frames_64",
4499 	    "# of rx frames in this range",
4500 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_64B_L));
4501 	SYSCTL_ADD_T4_REG64(pi, "rx_frames_65_127",
4502 	    "# of rx frames in this range",
4503 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_65B_127B_L));
4504 	SYSCTL_ADD_T4_REG64(pi, "rx_frames_128_255",
4505 	    "# of rx frames in this range",
4506 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_128B_255B_L));
4507 	SYSCTL_ADD_T4_REG64(pi, "rx_frames_256_511",
4508 	    "# of rx frames in this range",
4509 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_256B_511B_L));
4510 	SYSCTL_ADD_T4_REG64(pi, "rx_frames_512_1023",
4511 	    "# of rx frames in this range",
4512 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_512B_1023B_L));
4513 	SYSCTL_ADD_T4_REG64(pi, "rx_frames_1024_1518",
4514 	    "# of rx frames in this range",
4515 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_1024B_1518B_L));
4516 	SYSCTL_ADD_T4_REG64(pi, "rx_frames_1519_max",
4517 	    "# of rx frames in this range",
4518 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_1519B_MAX_L));
4519 	SYSCTL_ADD_T4_REG64(pi, "rx_pause", "# of pause frames received",
4520 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_PAUSE_L));
4521 	SYSCTL_ADD_T4_REG64(pi, "rx_ppp0", "# of PPP prio 0 frames received",
4522 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_PPP0_L));
4523 	SYSCTL_ADD_T4_REG64(pi, "rx_ppp1", "# of PPP prio 1 frames received",
4524 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_PPP1_L));
4525 	SYSCTL_ADD_T4_REG64(pi, "rx_ppp2", "# of PPP prio 2 frames received",
4526 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_PPP2_L));
4527 	SYSCTL_ADD_T4_REG64(pi, "rx_ppp3", "# of PPP prio 3 frames received",
4528 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_PPP3_L));
4529 	SYSCTL_ADD_T4_REG64(pi, "rx_ppp4", "# of PPP prio 4 frames received",
4530 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_PPP4_L));
4531 	SYSCTL_ADD_T4_REG64(pi, "rx_ppp5", "# of PPP prio 5 frames received",
4532 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_PPP5_L));
4533 	SYSCTL_ADD_T4_REG64(pi, "rx_ppp6", "# of PPP prio 6 frames received",
4534 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_PPP6_L));
4535 	SYSCTL_ADD_T4_REG64(pi, "rx_ppp7", "# of PPP prio 7 frames received",
4536 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_PPP7_L));
4537 
4538 #undef SYSCTL_ADD_T4_REG64
4539 
4540 #define SYSCTL_ADD_T4_PORTSTAT(name, desc) \
4541 	SYSCTL_ADD_UQUAD(ctx, children, OID_AUTO, #name, CTLFLAG_RD, \
4542 	    &pi->stats.name, desc)
4543 
4544 	/* We get these from port_stats and they may be stale by upto 1s */
4545 	SYSCTL_ADD_T4_PORTSTAT(rx_ovflow0,
4546 	    "# drops due to buffer-group 0 overflows");
4547 	SYSCTL_ADD_T4_PORTSTAT(rx_ovflow1,
4548 	    "# drops due to buffer-group 1 overflows");
4549 	SYSCTL_ADD_T4_PORTSTAT(rx_ovflow2,
4550 	    "# drops due to buffer-group 2 overflows");
4551 	SYSCTL_ADD_T4_PORTSTAT(rx_ovflow3,
4552 	    "# drops due to buffer-group 3 overflows");
4553 	SYSCTL_ADD_T4_PORTSTAT(rx_trunc0,
4554 	    "# of buffer-group 0 truncated packets");
4555 	SYSCTL_ADD_T4_PORTSTAT(rx_trunc1,
4556 	    "# of buffer-group 1 truncated packets");
4557 	SYSCTL_ADD_T4_PORTSTAT(rx_trunc2,
4558 	    "# of buffer-group 2 truncated packets");
4559 	SYSCTL_ADD_T4_PORTSTAT(rx_trunc3,
4560 	    "# of buffer-group 3 truncated packets");
4561 
4562 #undef SYSCTL_ADD_T4_PORTSTAT
4563 
4564 	return (0);
4565 }
4566 
4567 static int
4568 sysctl_int_array(SYSCTL_HANDLER_ARGS)
4569 {
4570 	int rc, *i;
4571 	struct sbuf sb;
4572 
4573 	sbuf_new(&sb, NULL, 32, SBUF_AUTOEXTEND);
4574 	for (i = arg1; arg2; arg2 -= sizeof(int), i++)
4575 		sbuf_printf(&sb, "%d ", *i);
4576 	sbuf_trim(&sb);
4577 	sbuf_finish(&sb);
4578 	rc = sysctl_handle_string(oidp, sbuf_data(&sb), sbuf_len(&sb), req);
4579 	sbuf_delete(&sb);
4580 	return (rc);
4581 }
4582 
4583 static int
4584 sysctl_bitfield(SYSCTL_HANDLER_ARGS)
4585 {
4586 	int rc;
4587 	struct sbuf *sb;
4588 
4589 	rc = sysctl_wire_old_buffer(req, 0);
4590 	if (rc != 0)
4591 		return(rc);
4592 
4593 	sb = sbuf_new_for_sysctl(NULL, NULL, 128, req);
4594 	if (sb == NULL)
4595 		return (ENOMEM);
4596 
4597 	sbuf_printf(sb, "%b", (int)arg2, (char *)arg1);
4598 	rc = sbuf_finish(sb);
4599 	sbuf_delete(sb);
4600 
4601 	return (rc);
4602 }
4603 
4604 static int
4605 sysctl_holdoff_tmr_idx(SYSCTL_HANDLER_ARGS)
4606 {
4607 	struct port_info *pi = arg1;
4608 	struct adapter *sc = pi->adapter;
4609 	int idx, rc, i;
4610 	struct sge_rxq *rxq;
4611 	uint8_t v;
4612 
4613 	idx = pi->tmr_idx;
4614 
4615 	rc = sysctl_handle_int(oidp, &idx, 0, req);
4616 	if (rc != 0 || req->newptr == NULL)
4617 		return (rc);
4618 
4619 	if (idx < 0 || idx >= SGE_NTIMERS)
4620 		return (EINVAL);
4621 
4622 	rc = begin_synchronized_op(sc, pi, HOLD_LOCK | SLEEP_OK | INTR_OK,
4623 	    "t4tmr");
4624 	if (rc)
4625 		return (rc);
4626 
4627 	v = V_QINTR_TIMER_IDX(idx) | V_QINTR_CNT_EN(pi->pktc_idx != -1);
4628 	for_each_rxq(pi, i, rxq) {
4629 #ifdef atomic_store_rel_8
4630 		atomic_store_rel_8(&rxq->iq.intr_params, v);
4631 #else
4632 		rxq->iq.intr_params = v;
4633 #endif
4634 	}
4635 	pi->tmr_idx = idx;
4636 
4637 	end_synchronized_op(sc, LOCK_HELD);
4638 	return (0);
4639 }
4640 
4641 static int
4642 sysctl_holdoff_pktc_idx(SYSCTL_HANDLER_ARGS)
4643 {
4644 	struct port_info *pi = arg1;
4645 	struct adapter *sc = pi->adapter;
4646 	int idx, rc;
4647 
4648 	idx = pi->pktc_idx;
4649 
4650 	rc = sysctl_handle_int(oidp, &idx, 0, req);
4651 	if (rc != 0 || req->newptr == NULL)
4652 		return (rc);
4653 
4654 	if (idx < -1 || idx >= SGE_NCOUNTERS)
4655 		return (EINVAL);
4656 
4657 	rc = begin_synchronized_op(sc, pi, HOLD_LOCK | SLEEP_OK | INTR_OK,
4658 	    "t4pktc");
4659 	if (rc)
4660 		return (rc);
4661 
4662 	if (pi->flags & PORT_INIT_DONE)
4663 		rc = EBUSY; /* cannot be changed once the queues are created */
4664 	else
4665 		pi->pktc_idx = idx;
4666 
4667 	end_synchronized_op(sc, LOCK_HELD);
4668 	return (rc);
4669 }
4670 
4671 static int
4672 sysctl_qsize_rxq(SYSCTL_HANDLER_ARGS)
4673 {
4674 	struct port_info *pi = arg1;
4675 	struct adapter *sc = pi->adapter;
4676 	int qsize, rc;
4677 
4678 	qsize = pi->qsize_rxq;
4679 
4680 	rc = sysctl_handle_int(oidp, &qsize, 0, req);
4681 	if (rc != 0 || req->newptr == NULL)
4682 		return (rc);
4683 
4684 	if (qsize < 128 || (qsize & 7))
4685 		return (EINVAL);
4686 
4687 	rc = begin_synchronized_op(sc, pi, HOLD_LOCK | SLEEP_OK | INTR_OK,
4688 	    "t4rxqs");
4689 	if (rc)
4690 		return (rc);
4691 
4692 	if (pi->flags & PORT_INIT_DONE)
4693 		rc = EBUSY; /* cannot be changed once the queues are created */
4694 	else
4695 		pi->qsize_rxq = qsize;
4696 
4697 	end_synchronized_op(sc, LOCK_HELD);
4698 	return (rc);
4699 }
4700 
4701 static int
4702 sysctl_qsize_txq(SYSCTL_HANDLER_ARGS)
4703 {
4704 	struct port_info *pi = arg1;
4705 	struct adapter *sc = pi->adapter;
4706 	int qsize, rc;
4707 
4708 	qsize = pi->qsize_txq;
4709 
4710 	rc = sysctl_handle_int(oidp, &qsize, 0, req);
4711 	if (rc != 0 || req->newptr == NULL)
4712 		return (rc);
4713 
4714 	/* bufring size must be powerof2 */
4715 	if (qsize < 128 || !powerof2(qsize))
4716 		return (EINVAL);
4717 
4718 	rc = begin_synchronized_op(sc, pi, HOLD_LOCK | SLEEP_OK | INTR_OK,
4719 	    "t4txqs");
4720 	if (rc)
4721 		return (rc);
4722 
4723 	if (pi->flags & PORT_INIT_DONE)
4724 		rc = EBUSY; /* cannot be changed once the queues are created */
4725 	else
4726 		pi->qsize_txq = qsize;
4727 
4728 	end_synchronized_op(sc, LOCK_HELD);
4729 	return (rc);
4730 }
4731 
4732 static int
4733 sysctl_handle_t4_reg64(SYSCTL_HANDLER_ARGS)
4734 {
4735 	struct adapter *sc = arg1;
4736 	int reg = arg2;
4737 	uint64_t val;
4738 
4739 	val = t4_read_reg64(sc, reg);
4740 
4741 	return (sysctl_handle_64(oidp, &val, 0, req));
4742 }
4743 
4744 #ifdef SBUF_DRAIN
4745 static int
4746 sysctl_cctrl(SYSCTL_HANDLER_ARGS)
4747 {
4748 	struct adapter *sc = arg1;
4749 	struct sbuf *sb;
4750 	int rc, i;
4751 	uint16_t incr[NMTUS][NCCTRL_WIN];
4752 	static const char *dec_fac[] = {
4753 		"0.5", "0.5625", "0.625", "0.6875", "0.75", "0.8125", "0.875",
4754 		"0.9375"
4755 	};
4756 
4757 	rc = sysctl_wire_old_buffer(req, 0);
4758 	if (rc != 0)
4759 		return (rc);
4760 
4761 	sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req);
4762 	if (sb == NULL)
4763 		return (ENOMEM);
4764 
4765 	t4_read_cong_tbl(sc, incr);
4766 
4767 	for (i = 0; i < NCCTRL_WIN; ++i) {
4768 		sbuf_printf(sb, "%2d: %4u %4u %4u %4u %4u %4u %4u %4u\n", i,
4769 		    incr[0][i], incr[1][i], incr[2][i], incr[3][i], incr[4][i],
4770 		    incr[5][i], incr[6][i], incr[7][i]);
4771 		sbuf_printf(sb, "%8u %4u %4u %4u %4u %4u %4u %4u %5u %s\n",
4772 		    incr[8][i], incr[9][i], incr[10][i], incr[11][i],
4773 		    incr[12][i], incr[13][i], incr[14][i], incr[15][i],
4774 		    sc->params.a_wnd[i], dec_fac[sc->params.b_wnd[i]]);
4775 	}
4776 
4777 	rc = sbuf_finish(sb);
4778 	sbuf_delete(sb);
4779 
4780 	return (rc);
4781 }
4782 
4783 static const char *qname[CIM_NUM_IBQ + CIM_NUM_OBQ_T5] = {
4784 	"TP0", "TP1", "ULP", "SGE0", "SGE1", "NC-SI",	/* ibq's */
4785 	"ULP0", "ULP1", "ULP2", "ULP3", "SGE", "NC-SI",	/* obq's */
4786 	"SGE0-RX", "SGE1-RX"	/* additional obq's (T5 onwards) */
4787 };
4788 
4789 static int
4790 sysctl_cim_ibq_obq(SYSCTL_HANDLER_ARGS)
4791 {
4792 	struct adapter *sc = arg1;
4793 	struct sbuf *sb;
4794 	int rc, i, n, qid = arg2;
4795 	uint32_t *buf, *p;
4796 	char *qtype;
4797 	u_int cim_num_obq = is_t4(sc) ? CIM_NUM_OBQ : CIM_NUM_OBQ_T5;
4798 
4799 	KASSERT(qid >= 0 && qid < CIM_NUM_IBQ + cim_num_obq,
4800 	    ("%s: bad qid %d\n", __func__, qid));
4801 
4802 	if (qid < CIM_NUM_IBQ) {
4803 		/* inbound queue */
4804 		qtype = "IBQ";
4805 		n = 4 * CIM_IBQ_SIZE;
4806 		buf = malloc(n * sizeof(uint32_t), M_CXGBE, M_ZERO | M_WAITOK);
4807 		rc = t4_read_cim_ibq(sc, qid, buf, n);
4808 	} else {
4809 		/* outbound queue */
4810 		qtype = "OBQ";
4811 		qid -= CIM_NUM_IBQ;
4812 		n = 4 * cim_num_obq * CIM_OBQ_SIZE;
4813 		buf = malloc(n * sizeof(uint32_t), M_CXGBE, M_ZERO | M_WAITOK);
4814 		rc = t4_read_cim_obq(sc, qid, buf, n);
4815 	}
4816 
4817 	if (rc < 0) {
4818 		rc = -rc;
4819 		goto done;
4820 	}
4821 	n = rc * sizeof(uint32_t);	/* rc has # of words actually read */
4822 
4823 	rc = sysctl_wire_old_buffer(req, 0);
4824 	if (rc != 0)
4825 		goto done;
4826 
4827 	sb = sbuf_new_for_sysctl(NULL, NULL, PAGE_SIZE, req);
4828 	if (sb == NULL) {
4829 		rc = ENOMEM;
4830 		goto done;
4831 	}
4832 
4833 	sbuf_printf(sb, "%s%d %s", qtype , qid, qname[arg2]);
4834 	for (i = 0, p = buf; i < n; i += 16, p += 4)
4835 		sbuf_printf(sb, "\n%#06x: %08x %08x %08x %08x", i, p[0], p[1],
4836 		    p[2], p[3]);
4837 
4838 	rc = sbuf_finish(sb);
4839 	sbuf_delete(sb);
4840 done:
4841 	free(buf, M_CXGBE);
4842 	return (rc);
4843 }
4844 
4845 static int
4846 sysctl_cim_la(SYSCTL_HANDLER_ARGS)
4847 {
4848 	struct adapter *sc = arg1;
4849 	u_int cfg;
4850 	struct sbuf *sb;
4851 	uint32_t *buf, *p;
4852 	int rc;
4853 
4854 	rc = -t4_cim_read(sc, A_UP_UP_DBG_LA_CFG, 1, &cfg);
4855 	if (rc != 0)
4856 		return (rc);
4857 
4858 	rc = sysctl_wire_old_buffer(req, 0);
4859 	if (rc != 0)
4860 		return (rc);
4861 
4862 	sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req);
4863 	if (sb == NULL)
4864 		return (ENOMEM);
4865 
4866 	buf = malloc(sc->params.cim_la_size * sizeof(uint32_t), M_CXGBE,
4867 	    M_ZERO | M_WAITOK);
4868 
4869 	rc = -t4_cim_read_la(sc, buf, NULL);
4870 	if (rc != 0)
4871 		goto done;
4872 
4873 	sbuf_printf(sb, "Status   Data      PC%s",
4874 	    cfg & F_UPDBGLACAPTPCONLY ? "" :
4875 	    "     LS0Stat  LS0Addr             LS0Data");
4876 
4877 	KASSERT((sc->params.cim_la_size & 7) == 0,
4878 	    ("%s: p will walk off the end of buf", __func__));
4879 
4880 	for (p = buf; p < &buf[sc->params.cim_la_size]; p += 8) {
4881 		if (cfg & F_UPDBGLACAPTPCONLY) {
4882 			sbuf_printf(sb, "\n  %02x   %08x %08x", p[5] & 0xff,
4883 			    p[6], p[7]);
4884 			sbuf_printf(sb, "\n  %02x   %02x%06x %02x%06x",
4885 			    (p[3] >> 8) & 0xff, p[3] & 0xff, p[4] >> 8,
4886 			    p[4] & 0xff, p[5] >> 8);
4887 			sbuf_printf(sb, "\n  %02x   %x%07x %x%07x",
4888 			    (p[0] >> 4) & 0xff, p[0] & 0xf, p[1] >> 4,
4889 			    p[1] & 0xf, p[2] >> 4);
4890 		} else {
4891 			sbuf_printf(sb,
4892 			    "\n  %02x   %x%07x %x%07x %08x %08x "
4893 			    "%08x%08x%08x%08x",
4894 			    (p[0] >> 4) & 0xff, p[0] & 0xf, p[1] >> 4,
4895 			    p[1] & 0xf, p[2] >> 4, p[2] & 0xf, p[3], p[4], p[5],
4896 			    p[6], p[7]);
4897 		}
4898 	}
4899 
4900 	rc = sbuf_finish(sb);
4901 	sbuf_delete(sb);
4902 done:
4903 	free(buf, M_CXGBE);
4904 	return (rc);
4905 }
4906 
4907 static int
4908 sysctl_cim_qcfg(SYSCTL_HANDLER_ARGS)
4909 {
4910 	struct adapter *sc = arg1;
4911 	struct sbuf *sb;
4912 	int rc, i;
4913 	uint16_t base[CIM_NUM_IBQ + CIM_NUM_OBQ_T5];
4914 	uint16_t size[CIM_NUM_IBQ + CIM_NUM_OBQ_T5];
4915 	uint16_t thres[CIM_NUM_IBQ];
4916 	uint32_t obq_wr[2 * CIM_NUM_OBQ_T5], *wr = obq_wr;
4917 	uint32_t stat[4 * (CIM_NUM_IBQ + CIM_NUM_OBQ_T5)], *p = stat;
4918 	u_int cim_num_obq, ibq_rdaddr, obq_rdaddr, nq;
4919 
4920 	if (is_t4(sc)) {
4921 		cim_num_obq = CIM_NUM_OBQ;
4922 		ibq_rdaddr = A_UP_IBQ_0_RDADDR;
4923 		obq_rdaddr = A_UP_OBQ_0_REALADDR;
4924 	} else {
4925 		cim_num_obq = CIM_NUM_OBQ_T5;
4926 		ibq_rdaddr = A_UP_IBQ_0_SHADOW_RDADDR;
4927 		obq_rdaddr = A_UP_OBQ_0_SHADOW_REALADDR;
4928 	}
4929 	nq = CIM_NUM_IBQ + cim_num_obq;
4930 
4931 	rc = -t4_cim_read(sc, ibq_rdaddr, 4 * nq, stat);
4932 	if (rc == 0)
4933 		rc = -t4_cim_read(sc, obq_rdaddr, 2 * cim_num_obq, obq_wr);
4934 	if (rc != 0)
4935 		return (rc);
4936 
4937 	t4_read_cimq_cfg(sc, base, size, thres);
4938 
4939 	rc = sysctl_wire_old_buffer(req, 0);
4940 	if (rc != 0)
4941 		return (rc);
4942 
4943 	sb = sbuf_new_for_sysctl(NULL, NULL, PAGE_SIZE, req);
4944 	if (sb == NULL)
4945 		return (ENOMEM);
4946 
4947 	sbuf_printf(sb, "Queue  Base  Size Thres RdPtr WrPtr  SOP  EOP Avail");
4948 
4949 	for (i = 0; i < CIM_NUM_IBQ; i++, p += 4)
4950 		sbuf_printf(sb, "\n%7s %5x %5u %5u %6x  %4x %4u %4u %5u",
4951 		    qname[i], base[i], size[i], thres[i], G_IBQRDADDR(p[0]),
4952 		    G_IBQWRADDR(p[1]), G_QUESOPCNT(p[3]), G_QUEEOPCNT(p[3]),
4953 		    G_QUEREMFLITS(p[2]) * 16);
4954 	for ( ; i < nq; i++, p += 4, wr += 2)
4955 		sbuf_printf(sb, "\n%7s %5x %5u %12x  %4x %4u %4u %5u", qname[i],
4956 		    base[i], size[i], G_QUERDADDR(p[0]) & 0x3fff,
4957 		    wr[0] - base[i], G_QUESOPCNT(p[3]), G_QUEEOPCNT(p[3]),
4958 		    G_QUEREMFLITS(p[2]) * 16);
4959 
4960 	rc = sbuf_finish(sb);
4961 	sbuf_delete(sb);
4962 
4963 	return (rc);
4964 }
4965 
4966 static int
4967 sysctl_cpl_stats(SYSCTL_HANDLER_ARGS)
4968 {
4969 	struct adapter *sc = arg1;
4970 	struct sbuf *sb;
4971 	int rc;
4972 	struct tp_cpl_stats stats;
4973 
4974 	rc = sysctl_wire_old_buffer(req, 0);
4975 	if (rc != 0)
4976 		return (rc);
4977 
4978 	sb = sbuf_new_for_sysctl(NULL, NULL, 256, req);
4979 	if (sb == NULL)
4980 		return (ENOMEM);
4981 
4982 	t4_tp_get_cpl_stats(sc, &stats);
4983 
4984 	sbuf_printf(sb, "                 channel 0  channel 1  channel 2  "
4985 	    "channel 3\n");
4986 	sbuf_printf(sb, "CPL requests:   %10u %10u %10u %10u\n",
4987 		   stats.req[0], stats.req[1], stats.req[2], stats.req[3]);
4988 	sbuf_printf(sb, "CPL responses:  %10u %10u %10u %10u",
4989 		   stats.rsp[0], stats.rsp[1], stats.rsp[2], stats.rsp[3]);
4990 
4991 	rc = sbuf_finish(sb);
4992 	sbuf_delete(sb);
4993 
4994 	return (rc);
4995 }
4996 
4997 static int
4998 sysctl_ddp_stats(SYSCTL_HANDLER_ARGS)
4999 {
5000 	struct adapter *sc = arg1;
5001 	struct sbuf *sb;
5002 	int rc;
5003 	struct tp_usm_stats stats;
5004 
5005 	rc = sysctl_wire_old_buffer(req, 0);
5006 	if (rc != 0)
5007 		return(rc);
5008 
5009 	sb = sbuf_new_for_sysctl(NULL, NULL, 256, req);
5010 	if (sb == NULL)
5011 		return (ENOMEM);
5012 
5013 	t4_get_usm_stats(sc, &stats);
5014 
5015 	sbuf_printf(sb, "Frames: %u\n", stats.frames);
5016 	sbuf_printf(sb, "Octets: %ju\n", stats.octets);
5017 	sbuf_printf(sb, "Drops:  %u", stats.drops);
5018 
5019 	rc = sbuf_finish(sb);
5020 	sbuf_delete(sb);
5021 
5022 	return (rc);
5023 }
5024 
5025 const char *devlog_level_strings[] = {
5026 	[FW_DEVLOG_LEVEL_EMERG]		= "EMERG",
5027 	[FW_DEVLOG_LEVEL_CRIT]		= "CRIT",
5028 	[FW_DEVLOG_LEVEL_ERR]		= "ERR",
5029 	[FW_DEVLOG_LEVEL_NOTICE]	= "NOTICE",
5030 	[FW_DEVLOG_LEVEL_INFO]		= "INFO",
5031 	[FW_DEVLOG_LEVEL_DEBUG]		= "DEBUG"
5032 };
5033 
5034 const char *devlog_facility_strings[] = {
5035 	[FW_DEVLOG_FACILITY_CORE]	= "CORE",
5036 	[FW_DEVLOG_FACILITY_SCHED]	= "SCHED",
5037 	[FW_DEVLOG_FACILITY_TIMER]	= "TIMER",
5038 	[FW_DEVLOG_FACILITY_RES]	= "RES",
5039 	[FW_DEVLOG_FACILITY_HW]		= "HW",
5040 	[FW_DEVLOG_FACILITY_FLR]	= "FLR",
5041 	[FW_DEVLOG_FACILITY_DMAQ]	= "DMAQ",
5042 	[FW_DEVLOG_FACILITY_PHY]	= "PHY",
5043 	[FW_DEVLOG_FACILITY_MAC]	= "MAC",
5044 	[FW_DEVLOG_FACILITY_PORT]	= "PORT",
5045 	[FW_DEVLOG_FACILITY_VI]		= "VI",
5046 	[FW_DEVLOG_FACILITY_FILTER]	= "FILTER",
5047 	[FW_DEVLOG_FACILITY_ACL]	= "ACL",
5048 	[FW_DEVLOG_FACILITY_TM]		= "TM",
5049 	[FW_DEVLOG_FACILITY_QFC]	= "QFC",
5050 	[FW_DEVLOG_FACILITY_DCB]	= "DCB",
5051 	[FW_DEVLOG_FACILITY_ETH]	= "ETH",
5052 	[FW_DEVLOG_FACILITY_OFLD]	= "OFLD",
5053 	[FW_DEVLOG_FACILITY_RI]		= "RI",
5054 	[FW_DEVLOG_FACILITY_ISCSI]	= "ISCSI",
5055 	[FW_DEVLOG_FACILITY_FCOE]	= "FCOE",
5056 	[FW_DEVLOG_FACILITY_FOISCSI]	= "FOISCSI",
5057 	[FW_DEVLOG_FACILITY_FOFCOE]	= "FOFCOE"
5058 };
5059 
5060 static int
5061 sysctl_devlog(SYSCTL_HANDLER_ARGS)
5062 {
5063 	struct adapter *sc = arg1;
5064 	struct devlog_params *dparams = &sc->params.devlog;
5065 	struct fw_devlog_e *buf, *e;
5066 	int i, j, rc, nentries, first = 0;
5067 	struct sbuf *sb;
5068 	uint64_t ftstamp = UINT64_MAX;
5069 
5070 	if (dparams->start == 0) {
5071 		dparams->memtype = 0;
5072 		dparams->start = 0x84000;
5073 		dparams->size = 32768;
5074 	}
5075 
5076 	nentries = dparams->size / sizeof(struct fw_devlog_e);
5077 
5078 	buf = malloc(dparams->size, M_CXGBE, M_NOWAIT);
5079 	if (buf == NULL)
5080 		return (ENOMEM);
5081 
5082 	rc = -t4_mem_read(sc, dparams->memtype, dparams->start, dparams->size,
5083 	    (void *)buf);
5084 	if (rc != 0)
5085 		goto done;
5086 
5087 	for (i = 0; i < nentries; i++) {
5088 		e = &buf[i];
5089 
5090 		if (e->timestamp == 0)
5091 			break;	/* end */
5092 
5093 		e->timestamp = be64toh(e->timestamp);
5094 		e->seqno = be32toh(e->seqno);
5095 		for (j = 0; j < 8; j++)
5096 			e->params[j] = be32toh(e->params[j]);
5097 
5098 		if (e->timestamp < ftstamp) {
5099 			ftstamp = e->timestamp;
5100 			first = i;
5101 		}
5102 	}
5103 
5104 	if (buf[first].timestamp == 0)
5105 		goto done;	/* nothing in the log */
5106 
5107 	rc = sysctl_wire_old_buffer(req, 0);
5108 	if (rc != 0)
5109 		goto done;
5110 
5111 	sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req);
5112 	if (sb == NULL) {
5113 		rc = ENOMEM;
5114 		goto done;
5115 	}
5116 	sbuf_printf(sb, "%10s  %15s  %8s  %8s  %s\n",
5117 	    "Seq#", "Tstamp", "Level", "Facility", "Message");
5118 
5119 	i = first;
5120 	do {
5121 		e = &buf[i];
5122 		if (e->timestamp == 0)
5123 			break;	/* end */
5124 
5125 		sbuf_printf(sb, "%10d  %15ju  %8s  %8s  ",
5126 		    e->seqno, e->timestamp,
5127 		    (e->level < nitems(devlog_level_strings) ?
5128 			devlog_level_strings[e->level] : "UNKNOWN"),
5129 		    (e->facility < nitems(devlog_facility_strings) ?
5130 			devlog_facility_strings[e->facility] : "UNKNOWN"));
5131 		sbuf_printf(sb, e->fmt, e->params[0], e->params[1],
5132 		    e->params[2], e->params[3], e->params[4],
5133 		    e->params[5], e->params[6], e->params[7]);
5134 
5135 		if (++i == nentries)
5136 			i = 0;
5137 	} while (i != first);
5138 
5139 	rc = sbuf_finish(sb);
5140 	sbuf_delete(sb);
5141 done:
5142 	free(buf, M_CXGBE);
5143 	return (rc);
5144 }
5145 
5146 static int
5147 sysctl_fcoe_stats(SYSCTL_HANDLER_ARGS)
5148 {
5149 	struct adapter *sc = arg1;
5150 	struct sbuf *sb;
5151 	int rc;
5152 	struct tp_fcoe_stats stats[4];
5153 
5154 	rc = sysctl_wire_old_buffer(req, 0);
5155 	if (rc != 0)
5156 		return (rc);
5157 
5158 	sb = sbuf_new_for_sysctl(NULL, NULL, 256, req);
5159 	if (sb == NULL)
5160 		return (ENOMEM);
5161 
5162 	t4_get_fcoe_stats(sc, 0, &stats[0]);
5163 	t4_get_fcoe_stats(sc, 1, &stats[1]);
5164 	t4_get_fcoe_stats(sc, 2, &stats[2]);
5165 	t4_get_fcoe_stats(sc, 3, &stats[3]);
5166 
5167 	sbuf_printf(sb, "                   channel 0        channel 1        "
5168 	    "channel 2        channel 3\n");
5169 	sbuf_printf(sb, "octetsDDP:  %16ju %16ju %16ju %16ju\n",
5170 	    stats[0].octetsDDP, stats[1].octetsDDP, stats[2].octetsDDP,
5171 	    stats[3].octetsDDP);
5172 	sbuf_printf(sb, "framesDDP:  %16u %16u %16u %16u\n", stats[0].framesDDP,
5173 	    stats[1].framesDDP, stats[2].framesDDP, stats[3].framesDDP);
5174 	sbuf_printf(sb, "framesDrop: %16u %16u %16u %16u",
5175 	    stats[0].framesDrop, stats[1].framesDrop, stats[2].framesDrop,
5176 	    stats[3].framesDrop);
5177 
5178 	rc = sbuf_finish(sb);
5179 	sbuf_delete(sb);
5180 
5181 	return (rc);
5182 }
5183 
5184 static int
5185 sysctl_hw_sched(SYSCTL_HANDLER_ARGS)
5186 {
5187 	struct adapter *sc = arg1;
5188 	struct sbuf *sb;
5189 	int rc, i;
5190 	unsigned int map, kbps, ipg, mode;
5191 	unsigned int pace_tab[NTX_SCHED];
5192 
5193 	rc = sysctl_wire_old_buffer(req, 0);
5194 	if (rc != 0)
5195 		return (rc);
5196 
5197 	sb = sbuf_new_for_sysctl(NULL, NULL, 256, req);
5198 	if (sb == NULL)
5199 		return (ENOMEM);
5200 
5201 	map = t4_read_reg(sc, A_TP_TX_MOD_QUEUE_REQ_MAP);
5202 	mode = G_TIMERMODE(t4_read_reg(sc, A_TP_MOD_CONFIG));
5203 	t4_read_pace_tbl(sc, pace_tab);
5204 
5205 	sbuf_printf(sb, "Scheduler  Mode   Channel  Rate (Kbps)   "
5206 	    "Class IPG (0.1 ns)   Flow IPG (us)");
5207 
5208 	for (i = 0; i < NTX_SCHED; ++i, map >>= 2) {
5209 		t4_get_tx_sched(sc, i, &kbps, &ipg);
5210 		sbuf_printf(sb, "\n    %u      %-5s     %u     ", i,
5211 		    (mode & (1 << i)) ? "flow" : "class", map & 3);
5212 		if (kbps)
5213 			sbuf_printf(sb, "%9u     ", kbps);
5214 		else
5215 			sbuf_printf(sb, " disabled     ");
5216 
5217 		if (ipg)
5218 			sbuf_printf(sb, "%13u        ", ipg);
5219 		else
5220 			sbuf_printf(sb, "     disabled        ");
5221 
5222 		if (pace_tab[i])
5223 			sbuf_printf(sb, "%10u", pace_tab[i]);
5224 		else
5225 			sbuf_printf(sb, "  disabled");
5226 	}
5227 
5228 	rc = sbuf_finish(sb);
5229 	sbuf_delete(sb);
5230 
5231 	return (rc);
5232 }
5233 
5234 static int
5235 sysctl_lb_stats(SYSCTL_HANDLER_ARGS)
5236 {
5237 	struct adapter *sc = arg1;
5238 	struct sbuf *sb;
5239 	int rc, i, j;
5240 	uint64_t *p0, *p1;
5241 	struct lb_port_stats s[2];
5242 	static const char *stat_name[] = {
5243 		"OctetsOK:", "FramesOK:", "BcastFrames:", "McastFrames:",
5244 		"UcastFrames:", "ErrorFrames:", "Frames64:", "Frames65To127:",
5245 		"Frames128To255:", "Frames256To511:", "Frames512To1023:",
5246 		"Frames1024To1518:", "Frames1519ToMax:", "FramesDropped:",
5247 		"BG0FramesDropped:", "BG1FramesDropped:", "BG2FramesDropped:",
5248 		"BG3FramesDropped:", "BG0FramesTrunc:", "BG1FramesTrunc:",
5249 		"BG2FramesTrunc:", "BG3FramesTrunc:"
5250 	};
5251 
5252 	rc = sysctl_wire_old_buffer(req, 0);
5253 	if (rc != 0)
5254 		return (rc);
5255 
5256 	sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req);
5257 	if (sb == NULL)
5258 		return (ENOMEM);
5259 
5260 	memset(s, 0, sizeof(s));
5261 
5262 	for (i = 0; i < 4; i += 2) {
5263 		t4_get_lb_stats(sc, i, &s[0]);
5264 		t4_get_lb_stats(sc, i + 1, &s[1]);
5265 
5266 		p0 = &s[0].octets;
5267 		p1 = &s[1].octets;
5268 		sbuf_printf(sb, "%s                       Loopback %u"
5269 		    "           Loopback %u", i == 0 ? "" : "\n", i, i + 1);
5270 
5271 		for (j = 0; j < nitems(stat_name); j++)
5272 			sbuf_printf(sb, "\n%-17s %20ju %20ju", stat_name[j],
5273 				   *p0++, *p1++);
5274 	}
5275 
5276 	rc = sbuf_finish(sb);
5277 	sbuf_delete(sb);
5278 
5279 	return (rc);
5280 }
5281 
5282 struct mem_desc {
5283 	unsigned int base;
5284 	unsigned int limit;
5285 	unsigned int idx;
5286 };
5287 
5288 static int
5289 mem_desc_cmp(const void *a, const void *b)
5290 {
5291 	return ((const struct mem_desc *)a)->base -
5292 	       ((const struct mem_desc *)b)->base;
5293 }
5294 
5295 static void
5296 mem_region_show(struct sbuf *sb, const char *name, unsigned int from,
5297     unsigned int to)
5298 {
5299 	unsigned int size;
5300 
5301 	size = to - from + 1;
5302 	if (size == 0)
5303 		return;
5304 
5305 	/* XXX: need humanize_number(3) in libkern for a more readable 'size' */
5306 	sbuf_printf(sb, "%-15s %#x-%#x [%u]\n", name, from, to, size);
5307 }
5308 
5309 static int
5310 sysctl_meminfo(SYSCTL_HANDLER_ARGS)
5311 {
5312 	struct adapter *sc = arg1;
5313 	struct sbuf *sb;
5314 	int rc, i, n;
5315 	uint32_t lo, hi, used, alloc;
5316 	static const char *memory[] = {"EDC0:", "EDC1:", "MC:", "MC0:", "MC1:"};
5317 	static const char *region[] = {
5318 		"DBQ contexts:", "IMSG contexts:", "FLM cache:", "TCBs:",
5319 		"Pstructs:", "Timers:", "Rx FL:", "Tx FL:", "Pstruct FL:",
5320 		"Tx payload:", "Rx payload:", "LE hash:", "iSCSI region:",
5321 		"TDDP region:", "TPT region:", "STAG region:", "RQ region:",
5322 		"RQUDP region:", "PBL region:", "TXPBL region:",
5323 		"DBVFIFO region:", "ULPRX state:", "ULPTX state:",
5324 		"On-chip queues:"
5325 	};
5326 	struct mem_desc avail[4];
5327 	struct mem_desc mem[nitems(region) + 3];	/* up to 3 holes */
5328 	struct mem_desc *md = mem;
5329 
5330 	rc = sysctl_wire_old_buffer(req, 0);
5331 	if (rc != 0)
5332 		return (rc);
5333 
5334 	sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req);
5335 	if (sb == NULL)
5336 		return (ENOMEM);
5337 
5338 	for (i = 0; i < nitems(mem); i++) {
5339 		mem[i].limit = 0;
5340 		mem[i].idx = i;
5341 	}
5342 
5343 	/* Find and sort the populated memory ranges */
5344 	i = 0;
5345 	lo = t4_read_reg(sc, A_MA_TARGET_MEM_ENABLE);
5346 	if (lo & F_EDRAM0_ENABLE) {
5347 		hi = t4_read_reg(sc, A_MA_EDRAM0_BAR);
5348 		avail[i].base = G_EDRAM0_BASE(hi) << 20;
5349 		avail[i].limit = avail[i].base + (G_EDRAM0_SIZE(hi) << 20);
5350 		avail[i].idx = 0;
5351 		i++;
5352 	}
5353 	if (lo & F_EDRAM1_ENABLE) {
5354 		hi = t4_read_reg(sc, A_MA_EDRAM1_BAR);
5355 		avail[i].base = G_EDRAM1_BASE(hi) << 20;
5356 		avail[i].limit = avail[i].base + (G_EDRAM1_SIZE(hi) << 20);
5357 		avail[i].idx = 1;
5358 		i++;
5359 	}
5360 	if (lo & F_EXT_MEM_ENABLE) {
5361 		hi = t4_read_reg(sc, A_MA_EXT_MEMORY_BAR);
5362 		avail[i].base = G_EXT_MEM_BASE(hi) << 20;
5363 		avail[i].limit = avail[i].base +
5364 		    (G_EXT_MEM_SIZE(hi) << 20);
5365 		avail[i].idx = is_t4(sc) ? 2 : 3;	/* Call it MC for T4 */
5366 		i++;
5367 	}
5368 	if (!is_t4(sc) && lo & F_EXT_MEM1_ENABLE) {
5369 		hi = t4_read_reg(sc, A_MA_EXT_MEMORY1_BAR);
5370 		avail[i].base = G_EXT_MEM1_BASE(hi) << 20;
5371 		avail[i].limit = avail[i].base +
5372 		    (G_EXT_MEM1_SIZE(hi) << 20);
5373 		avail[i].idx = 4;
5374 		i++;
5375 	}
5376 	if (!i)                                    /* no memory available */
5377 		return 0;
5378 	qsort(avail, i, sizeof(struct mem_desc), mem_desc_cmp);
5379 
5380 	(md++)->base = t4_read_reg(sc, A_SGE_DBQ_CTXT_BADDR);
5381 	(md++)->base = t4_read_reg(sc, A_SGE_IMSG_CTXT_BADDR);
5382 	(md++)->base = t4_read_reg(sc, A_SGE_FLM_CACHE_BADDR);
5383 	(md++)->base = t4_read_reg(sc, A_TP_CMM_TCB_BASE);
5384 	(md++)->base = t4_read_reg(sc, A_TP_CMM_MM_BASE);
5385 	(md++)->base = t4_read_reg(sc, A_TP_CMM_TIMER_BASE);
5386 	(md++)->base = t4_read_reg(sc, A_TP_CMM_MM_RX_FLST_BASE);
5387 	(md++)->base = t4_read_reg(sc, A_TP_CMM_MM_TX_FLST_BASE);
5388 	(md++)->base = t4_read_reg(sc, A_TP_CMM_MM_PS_FLST_BASE);
5389 
5390 	/* the next few have explicit upper bounds */
5391 	md->base = t4_read_reg(sc, A_TP_PMM_TX_BASE);
5392 	md->limit = md->base - 1 +
5393 		    t4_read_reg(sc, A_TP_PMM_TX_PAGE_SIZE) *
5394 		    G_PMTXMAXPAGE(t4_read_reg(sc, A_TP_PMM_TX_MAX_PAGE));
5395 	md++;
5396 
5397 	md->base = t4_read_reg(sc, A_TP_PMM_RX_BASE);
5398 	md->limit = md->base - 1 +
5399 		    t4_read_reg(sc, A_TP_PMM_RX_PAGE_SIZE) *
5400 		    G_PMRXMAXPAGE(t4_read_reg(sc, A_TP_PMM_RX_MAX_PAGE));
5401 	md++;
5402 
5403 	if (t4_read_reg(sc, A_LE_DB_CONFIG) & F_HASHEN) {
5404 		hi = t4_read_reg(sc, A_LE_DB_TID_HASHBASE) / 4;
5405 		md->base = t4_read_reg(sc, A_LE_DB_HASH_TID_BASE);
5406 		md->limit = (sc->tids.ntids - hi) * 16 + md->base - 1;
5407 	} else {
5408 		md->base = 0;
5409 		md->idx = nitems(region);  /* hide it */
5410 	}
5411 	md++;
5412 
5413 #define ulp_region(reg) \
5414 	md->base = t4_read_reg(sc, A_ULP_ ## reg ## _LLIMIT);\
5415 	(md++)->limit = t4_read_reg(sc, A_ULP_ ## reg ## _ULIMIT)
5416 
5417 	ulp_region(RX_ISCSI);
5418 	ulp_region(RX_TDDP);
5419 	ulp_region(TX_TPT);
5420 	ulp_region(RX_STAG);
5421 	ulp_region(RX_RQ);
5422 	ulp_region(RX_RQUDP);
5423 	ulp_region(RX_PBL);
5424 	ulp_region(TX_PBL);
5425 #undef ulp_region
5426 
5427 	md->base = 0;
5428 	md->idx = nitems(region);
5429 	if (!is_t4(sc) && t4_read_reg(sc, A_SGE_CONTROL2) & F_VFIFO_ENABLE) {
5430 		md->base = G_BASEADDR(t4_read_reg(sc, A_SGE_DBVFIFO_BADDR));
5431 		md->limit = md->base + (G_DBVFIFO_SIZE((t4_read_reg(sc,
5432 		    A_SGE_DBVFIFO_SIZE))) << 2) - 1;
5433 	}
5434 	md++;
5435 
5436 	md->base = t4_read_reg(sc, A_ULP_RX_CTX_BASE);
5437 	md->limit = md->base + sc->tids.ntids - 1;
5438 	md++;
5439 	md->base = t4_read_reg(sc, A_ULP_TX_ERR_TABLE_BASE);
5440 	md->limit = md->base + sc->tids.ntids - 1;
5441 	md++;
5442 
5443 	md->base = sc->vres.ocq.start;
5444 	if (sc->vres.ocq.size)
5445 		md->limit = md->base + sc->vres.ocq.size - 1;
5446 	else
5447 		md->idx = nitems(region);  /* hide it */
5448 	md++;
5449 
5450 	/* add any address-space holes, there can be up to 3 */
5451 	for (n = 0; n < i - 1; n++)
5452 		if (avail[n].limit < avail[n + 1].base)
5453 			(md++)->base = avail[n].limit;
5454 	if (avail[n].limit)
5455 		(md++)->base = avail[n].limit;
5456 
5457 	n = md - mem;
5458 	qsort(mem, n, sizeof(struct mem_desc), mem_desc_cmp);
5459 
5460 	for (lo = 0; lo < i; lo++)
5461 		mem_region_show(sb, memory[avail[lo].idx], avail[lo].base,
5462 				avail[lo].limit - 1);
5463 
5464 	sbuf_printf(sb, "\n");
5465 	for (i = 0; i < n; i++) {
5466 		if (mem[i].idx >= nitems(region))
5467 			continue;                        /* skip holes */
5468 		if (!mem[i].limit)
5469 			mem[i].limit = i < n - 1 ? mem[i + 1].base - 1 : ~0;
5470 		mem_region_show(sb, region[mem[i].idx], mem[i].base,
5471 				mem[i].limit);
5472 	}
5473 
5474 	sbuf_printf(sb, "\n");
5475 	lo = t4_read_reg(sc, A_CIM_SDRAM_BASE_ADDR);
5476 	hi = t4_read_reg(sc, A_CIM_SDRAM_ADDR_SIZE) + lo - 1;
5477 	mem_region_show(sb, "uP RAM:", lo, hi);
5478 
5479 	lo = t4_read_reg(sc, A_CIM_EXTMEM2_BASE_ADDR);
5480 	hi = t4_read_reg(sc, A_CIM_EXTMEM2_ADDR_SIZE) + lo - 1;
5481 	mem_region_show(sb, "uP Extmem2:", lo, hi);
5482 
5483 	lo = t4_read_reg(sc, A_TP_PMM_RX_MAX_PAGE);
5484 	sbuf_printf(sb, "\n%u Rx pages of size %uKiB for %u channels\n",
5485 		   G_PMRXMAXPAGE(lo),
5486 		   t4_read_reg(sc, A_TP_PMM_RX_PAGE_SIZE) >> 10,
5487 		   (lo & F_PMRXNUMCHN) ? 2 : 1);
5488 
5489 	lo = t4_read_reg(sc, A_TP_PMM_TX_MAX_PAGE);
5490 	hi = t4_read_reg(sc, A_TP_PMM_TX_PAGE_SIZE);
5491 	sbuf_printf(sb, "%u Tx pages of size %u%ciB for %u channels\n",
5492 		   G_PMTXMAXPAGE(lo),
5493 		   hi >= (1 << 20) ? (hi >> 20) : (hi >> 10),
5494 		   hi >= (1 << 20) ? 'M' : 'K', 1 << G_PMTXNUMCHN(lo));
5495 	sbuf_printf(sb, "%u p-structs\n",
5496 		   t4_read_reg(sc, A_TP_CMM_MM_MAX_PSTRUCT));
5497 
5498 	for (i = 0; i < 4; i++) {
5499 		lo = t4_read_reg(sc, A_MPS_RX_PG_RSV0 + i * 4);
5500 		if (is_t4(sc)) {
5501 			used = G_USED(lo);
5502 			alloc = G_ALLOC(lo);
5503 		} else {
5504 			used = G_T5_USED(lo);
5505 			alloc = G_T5_ALLOC(lo);
5506 		}
5507 		sbuf_printf(sb, "\nPort %d using %u pages out of %u allocated",
5508 			   i, used, alloc);
5509 	}
5510 	for (i = 0; i < 4; i++) {
5511 		lo = t4_read_reg(sc, A_MPS_RX_PG_RSV4 + i * 4);
5512 		if (is_t4(sc)) {
5513 			used = G_USED(lo);
5514 			alloc = G_ALLOC(lo);
5515 		} else {
5516 			used = G_T5_USED(lo);
5517 			alloc = G_T5_ALLOC(lo);
5518 		}
5519 		sbuf_printf(sb,
5520 			   "\nLoopback %d using %u pages out of %u allocated",
5521 			   i, used, alloc);
5522 	}
5523 
5524 	rc = sbuf_finish(sb);
5525 	sbuf_delete(sb);
5526 
5527 	return (rc);
5528 }
5529 
5530 static int
5531 sysctl_path_mtus(SYSCTL_HANDLER_ARGS)
5532 {
5533 	struct adapter *sc = arg1;
5534 	struct sbuf *sb;
5535 	int rc;
5536 	uint16_t mtus[NMTUS];
5537 
5538 	rc = sysctl_wire_old_buffer(req, 0);
5539 	if (rc != 0)
5540 		return (rc);
5541 
5542 	sb = sbuf_new_for_sysctl(NULL, NULL, 256, req);
5543 	if (sb == NULL)
5544 		return (ENOMEM);
5545 
5546 	t4_read_mtu_tbl(sc, mtus, NULL);
5547 
5548 	sbuf_printf(sb, "%u %u %u %u %u %u %u %u %u %u %u %u %u %u %u %u",
5549 	    mtus[0], mtus[1], mtus[2], mtus[3], mtus[4], mtus[5], mtus[6],
5550 	    mtus[7], mtus[8], mtus[9], mtus[10], mtus[11], mtus[12], mtus[13],
5551 	    mtus[14], mtus[15]);
5552 
5553 	rc = sbuf_finish(sb);
5554 	sbuf_delete(sb);
5555 
5556 	return (rc);
5557 }
5558 
5559 static int
5560 sysctl_pm_stats(SYSCTL_HANDLER_ARGS)
5561 {
5562 	struct adapter *sc = arg1;
5563 	struct sbuf *sb;
5564 	int rc, i;
5565 	uint32_t tx_cnt[PM_NSTATS], rx_cnt[PM_NSTATS];
5566 	uint64_t tx_cyc[PM_NSTATS], rx_cyc[PM_NSTATS];
5567 	static const char *pm_stats[] = {
5568 		"Read:", "Write bypass:", "Write mem:", "Flush:", "FIFO wait:"
5569 	};
5570 
5571 	rc = sysctl_wire_old_buffer(req, 0);
5572 	if (rc != 0)
5573 		return (rc);
5574 
5575 	sb = sbuf_new_for_sysctl(NULL, NULL, 256, req);
5576 	if (sb == NULL)
5577 		return (ENOMEM);
5578 
5579 	t4_pmtx_get_stats(sc, tx_cnt, tx_cyc);
5580 	t4_pmrx_get_stats(sc, rx_cnt, rx_cyc);
5581 
5582 	sbuf_printf(sb, "                Tx count            Tx cycles    "
5583 	    "Rx count            Rx cycles");
5584 	for (i = 0; i < PM_NSTATS; i++)
5585 		sbuf_printf(sb, "\n%-13s %10u %20ju  %10u %20ju",
5586 		    pm_stats[i], tx_cnt[i], tx_cyc[i], rx_cnt[i], rx_cyc[i]);
5587 
5588 	rc = sbuf_finish(sb);
5589 	sbuf_delete(sb);
5590 
5591 	return (rc);
5592 }
5593 
5594 static int
5595 sysctl_rdma_stats(SYSCTL_HANDLER_ARGS)
5596 {
5597 	struct adapter *sc = arg1;
5598 	struct sbuf *sb;
5599 	int rc;
5600 	struct tp_rdma_stats stats;
5601 
5602 	rc = sysctl_wire_old_buffer(req, 0);
5603 	if (rc != 0)
5604 		return (rc);
5605 
5606 	sb = sbuf_new_for_sysctl(NULL, NULL, 256, req);
5607 	if (sb == NULL)
5608 		return (ENOMEM);
5609 
5610 	t4_tp_get_rdma_stats(sc, &stats);
5611 	sbuf_printf(sb, "NoRQEModDefferals: %u\n", stats.rqe_dfr_mod);
5612 	sbuf_printf(sb, "NoRQEPktDefferals: %u", stats.rqe_dfr_pkt);
5613 
5614 	rc = sbuf_finish(sb);
5615 	sbuf_delete(sb);
5616 
5617 	return (rc);
5618 }
5619 
5620 static int
5621 sysctl_tcp_stats(SYSCTL_HANDLER_ARGS)
5622 {
5623 	struct adapter *sc = arg1;
5624 	struct sbuf *sb;
5625 	int rc;
5626 	struct tp_tcp_stats v4, v6;
5627 
5628 	rc = sysctl_wire_old_buffer(req, 0);
5629 	if (rc != 0)
5630 		return (rc);
5631 
5632 	sb = sbuf_new_for_sysctl(NULL, NULL, 256, req);
5633 	if (sb == NULL)
5634 		return (ENOMEM);
5635 
5636 	t4_tp_get_tcp_stats(sc, &v4, &v6);
5637 	sbuf_printf(sb,
5638 	    "                                IP                 IPv6\n");
5639 	sbuf_printf(sb, "OutRsts:      %20u %20u\n",
5640 	    v4.tcpOutRsts, v6.tcpOutRsts);
5641 	sbuf_printf(sb, "InSegs:       %20ju %20ju\n",
5642 	    v4.tcpInSegs, v6.tcpInSegs);
5643 	sbuf_printf(sb, "OutSegs:      %20ju %20ju\n",
5644 	    v4.tcpOutSegs, v6.tcpOutSegs);
5645 	sbuf_printf(sb, "RetransSegs:  %20ju %20ju",
5646 	    v4.tcpRetransSegs, v6.tcpRetransSegs);
5647 
5648 	rc = sbuf_finish(sb);
5649 	sbuf_delete(sb);
5650 
5651 	return (rc);
5652 }
5653 
5654 static int
5655 sysctl_tids(SYSCTL_HANDLER_ARGS)
5656 {
5657 	struct adapter *sc = arg1;
5658 	struct sbuf *sb;
5659 	int rc;
5660 	struct tid_info *t = &sc->tids;
5661 
5662 	rc = sysctl_wire_old_buffer(req, 0);
5663 	if (rc != 0)
5664 		return (rc);
5665 
5666 	sb = sbuf_new_for_sysctl(NULL, NULL, 256, req);
5667 	if (sb == NULL)
5668 		return (ENOMEM);
5669 
5670 	if (t->natids) {
5671 		sbuf_printf(sb, "ATID range: 0-%u, in use: %u\n", t->natids - 1,
5672 		    t->atids_in_use);
5673 	}
5674 
5675 	if (t->ntids) {
5676 		if (t4_read_reg(sc, A_LE_DB_CONFIG) & F_HASHEN) {
5677 			uint32_t b = t4_read_reg(sc, A_LE_DB_SERVER_INDEX) / 4;
5678 
5679 			if (b) {
5680 				sbuf_printf(sb, "TID range: 0-%u, %u-%u", b - 1,
5681 				    t4_read_reg(sc, A_LE_DB_TID_HASHBASE) / 4,
5682 				    t->ntids - 1);
5683 			} else {
5684 				sbuf_printf(sb, "TID range: %u-%u",
5685 				    t4_read_reg(sc, A_LE_DB_TID_HASHBASE) / 4,
5686 				    t->ntids - 1);
5687 			}
5688 		} else
5689 			sbuf_printf(sb, "TID range: 0-%u", t->ntids - 1);
5690 		sbuf_printf(sb, ", in use: %u\n",
5691 		    atomic_load_acq_int(&t->tids_in_use));
5692 	}
5693 
5694 	if (t->nstids) {
5695 		sbuf_printf(sb, "STID range: %u-%u, in use: %u\n", t->stid_base,
5696 		    t->stid_base + t->nstids - 1, t->stids_in_use);
5697 	}
5698 
5699 	if (t->nftids) {
5700 		sbuf_printf(sb, "FTID range: %u-%u\n", t->ftid_base,
5701 		    t->ftid_base + t->nftids - 1);
5702 	}
5703 
5704 	sbuf_printf(sb, "HW TID usage: %u IP users, %u IPv6 users",
5705 	    t4_read_reg(sc, A_LE_DB_ACT_CNT_IPV4),
5706 	    t4_read_reg(sc, A_LE_DB_ACT_CNT_IPV6));
5707 
5708 	rc = sbuf_finish(sb);
5709 	sbuf_delete(sb);
5710 
5711 	return (rc);
5712 }
5713 
5714 static int
5715 sysctl_tp_err_stats(SYSCTL_HANDLER_ARGS)
5716 {
5717 	struct adapter *sc = arg1;
5718 	struct sbuf *sb;
5719 	int rc;
5720 	struct tp_err_stats stats;
5721 
5722 	rc = sysctl_wire_old_buffer(req, 0);
5723 	if (rc != 0)
5724 		return (rc);
5725 
5726 	sb = sbuf_new_for_sysctl(NULL, NULL, 256, req);
5727 	if (sb == NULL)
5728 		return (ENOMEM);
5729 
5730 	t4_tp_get_err_stats(sc, &stats);
5731 
5732 	sbuf_printf(sb, "                 channel 0  channel 1  channel 2  "
5733 		      "channel 3\n");
5734 	sbuf_printf(sb, "macInErrs:      %10u %10u %10u %10u\n",
5735 	    stats.macInErrs[0], stats.macInErrs[1], stats.macInErrs[2],
5736 	    stats.macInErrs[3]);
5737 	sbuf_printf(sb, "hdrInErrs:      %10u %10u %10u %10u\n",
5738 	    stats.hdrInErrs[0], stats.hdrInErrs[1], stats.hdrInErrs[2],
5739 	    stats.hdrInErrs[3]);
5740 	sbuf_printf(sb, "tcpInErrs:      %10u %10u %10u %10u\n",
5741 	    stats.tcpInErrs[0], stats.tcpInErrs[1], stats.tcpInErrs[2],
5742 	    stats.tcpInErrs[3]);
5743 	sbuf_printf(sb, "tcp6InErrs:     %10u %10u %10u %10u\n",
5744 	    stats.tcp6InErrs[0], stats.tcp6InErrs[1], stats.tcp6InErrs[2],
5745 	    stats.tcp6InErrs[3]);
5746 	sbuf_printf(sb, "tnlCongDrops:   %10u %10u %10u %10u\n",
5747 	    stats.tnlCongDrops[0], stats.tnlCongDrops[1], stats.tnlCongDrops[2],
5748 	    stats.tnlCongDrops[3]);
5749 	sbuf_printf(sb, "tnlTxDrops:     %10u %10u %10u %10u\n",
5750 	    stats.tnlTxDrops[0], stats.tnlTxDrops[1], stats.tnlTxDrops[2],
5751 	    stats.tnlTxDrops[3]);
5752 	sbuf_printf(sb, "ofldVlanDrops:  %10u %10u %10u %10u\n",
5753 	    stats.ofldVlanDrops[0], stats.ofldVlanDrops[1],
5754 	    stats.ofldVlanDrops[2], stats.ofldVlanDrops[3]);
5755 	sbuf_printf(sb, "ofldChanDrops:  %10u %10u %10u %10u\n\n",
5756 	    stats.ofldChanDrops[0], stats.ofldChanDrops[1],
5757 	    stats.ofldChanDrops[2], stats.ofldChanDrops[3]);
5758 	sbuf_printf(sb, "ofldNoNeigh:    %u\nofldCongDefer:  %u",
5759 	    stats.ofldNoNeigh, stats.ofldCongDefer);
5760 
5761 	rc = sbuf_finish(sb);
5762 	sbuf_delete(sb);
5763 
5764 	return (rc);
5765 }
5766 
5767 static int
5768 sysctl_tx_rate(SYSCTL_HANDLER_ARGS)
5769 {
5770 	struct adapter *sc = arg1;
5771 	struct sbuf *sb;
5772 	int rc;
5773 	u64 nrate[NCHAN], orate[NCHAN];
5774 
5775 	rc = sysctl_wire_old_buffer(req, 0);
5776 	if (rc != 0)
5777 		return (rc);
5778 
5779 	sb = sbuf_new_for_sysctl(NULL, NULL, 256, req);
5780 	if (sb == NULL)
5781 		return (ENOMEM);
5782 
5783 	t4_get_chan_txrate(sc, nrate, orate);
5784 	sbuf_printf(sb, "              channel 0   channel 1   channel 2   "
5785 		 "channel 3\n");
5786 	sbuf_printf(sb, "NIC B/s:     %10ju  %10ju  %10ju  %10ju\n",
5787 	    nrate[0], nrate[1], nrate[2], nrate[3]);
5788 	sbuf_printf(sb, "Offload B/s: %10ju  %10ju  %10ju  %10ju",
5789 	    orate[0], orate[1], orate[2], orate[3]);
5790 
5791 	rc = sbuf_finish(sb);
5792 	sbuf_delete(sb);
5793 
5794 	return (rc);
5795 }
5796 
5797 static int
5798 sysctl_wcwr_stats(SYSCTL_HANDLER_ARGS)
5799 {
5800 	struct adapter *sc = arg1;
5801 	struct sbuf *sb;
5802 	int rc, v;
5803 
5804 	rc = sysctl_wire_old_buffer(req, 0);
5805 	if (rc != 0)
5806 		return (rc);
5807 
5808 	sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req);
5809 	if (sb == NULL)
5810 		return (ENOMEM);
5811 
5812 	v = t4_read_reg(sc, A_SGE_STAT_CFG);
5813 	if (G_STATSOURCE_T5(v) == 7) {
5814 		if (G_STATMODE(v) == 0) {
5815 			sbuf_printf(sb, "total %d, incomplete %d",
5816 			    t4_read_reg(sc, A_SGE_STAT_TOTAL),
5817 			    t4_read_reg(sc, A_SGE_STAT_MATCH));
5818 		} else if (G_STATMODE(v) == 1) {
5819 			sbuf_printf(sb, "total %d, data overflow %d",
5820 			    t4_read_reg(sc, A_SGE_STAT_TOTAL),
5821 			    t4_read_reg(sc, A_SGE_STAT_MATCH));
5822 		}
5823 	}
5824 	rc = sbuf_finish(sb);
5825 	sbuf_delete(sb);
5826 
5827 	return (rc);
5828 }
5829 #endif
5830 
5831 static inline void
5832 txq_start(struct ifnet *ifp, struct sge_txq *txq)
5833 {
5834 	struct buf_ring *br;
5835 	struct mbuf *m;
5836 
5837 	TXQ_LOCK_ASSERT_OWNED(txq);
5838 
5839 	br = txq->br;
5840 	m = txq->m ? txq->m : drbr_dequeue(ifp, br);
5841 	if (m)
5842 		t4_eth_tx(ifp, txq, m);
5843 }
5844 
5845 void
5846 t4_tx_callout(void *arg)
5847 {
5848 	struct sge_eq *eq = arg;
5849 	struct adapter *sc;
5850 
5851 	if (EQ_TRYLOCK(eq) == 0)
5852 		goto reschedule;
5853 
5854 	if (eq->flags & EQ_STALLED && !can_resume_tx(eq)) {
5855 		EQ_UNLOCK(eq);
5856 reschedule:
5857 		if (__predict_true(!(eq->flags && EQ_DOOMED)))
5858 			callout_schedule(&eq->tx_callout, 1);
5859 		return;
5860 	}
5861 
5862 	EQ_LOCK_ASSERT_OWNED(eq);
5863 
5864 	if (__predict_true((eq->flags & EQ_DOOMED) == 0)) {
5865 
5866 		if ((eq->flags & EQ_TYPEMASK) == EQ_ETH) {
5867 			struct sge_txq *txq = arg;
5868 			struct port_info *pi = txq->ifp->if_softc;
5869 
5870 			sc = pi->adapter;
5871 		} else {
5872 			struct sge_wrq *wrq = arg;
5873 
5874 			sc = wrq->adapter;
5875 		}
5876 
5877 		taskqueue_enqueue(sc->tq[eq->tx_chan], &eq->tx_task);
5878 	}
5879 
5880 	EQ_UNLOCK(eq);
5881 }
5882 
5883 void
5884 t4_tx_task(void *arg, int count)
5885 {
5886 	struct sge_eq *eq = arg;
5887 
5888 	EQ_LOCK(eq);
5889 	if ((eq->flags & EQ_TYPEMASK) == EQ_ETH) {
5890 		struct sge_txq *txq = arg;
5891 		txq_start(txq->ifp, txq);
5892 	} else {
5893 		struct sge_wrq *wrq = arg;
5894 		t4_wrq_tx_locked(wrq->adapter, wrq, NULL);
5895 	}
5896 	EQ_UNLOCK(eq);
5897 }
5898 
5899 static uint32_t
5900 fconf_to_mode(uint32_t fconf)
5901 {
5902 	uint32_t mode;
5903 
5904 	mode = T4_FILTER_IPv4 | T4_FILTER_IPv6 | T4_FILTER_IP_SADDR |
5905 	    T4_FILTER_IP_DADDR | T4_FILTER_IP_SPORT | T4_FILTER_IP_DPORT;
5906 
5907 	if (fconf & F_FRAGMENTATION)
5908 		mode |= T4_FILTER_IP_FRAGMENT;
5909 
5910 	if (fconf & F_MPSHITTYPE)
5911 		mode |= T4_FILTER_MPS_HIT_TYPE;
5912 
5913 	if (fconf & F_MACMATCH)
5914 		mode |= T4_FILTER_MAC_IDX;
5915 
5916 	if (fconf & F_ETHERTYPE)
5917 		mode |= T4_FILTER_ETH_TYPE;
5918 
5919 	if (fconf & F_PROTOCOL)
5920 		mode |= T4_FILTER_IP_PROTO;
5921 
5922 	if (fconf & F_TOS)
5923 		mode |= T4_FILTER_IP_TOS;
5924 
5925 	if (fconf & F_VLAN)
5926 		mode |= T4_FILTER_VLAN;
5927 
5928 	if (fconf & F_VNIC_ID)
5929 		mode |= T4_FILTER_VNIC;
5930 
5931 	if (fconf & F_PORT)
5932 		mode |= T4_FILTER_PORT;
5933 
5934 	if (fconf & F_FCOE)
5935 		mode |= T4_FILTER_FCoE;
5936 
5937 	return (mode);
5938 }
5939 
5940 static uint32_t
5941 mode_to_fconf(uint32_t mode)
5942 {
5943 	uint32_t fconf = 0;
5944 
5945 	if (mode & T4_FILTER_IP_FRAGMENT)
5946 		fconf |= F_FRAGMENTATION;
5947 
5948 	if (mode & T4_FILTER_MPS_HIT_TYPE)
5949 		fconf |= F_MPSHITTYPE;
5950 
5951 	if (mode & T4_FILTER_MAC_IDX)
5952 		fconf |= F_MACMATCH;
5953 
5954 	if (mode & T4_FILTER_ETH_TYPE)
5955 		fconf |= F_ETHERTYPE;
5956 
5957 	if (mode & T4_FILTER_IP_PROTO)
5958 		fconf |= F_PROTOCOL;
5959 
5960 	if (mode & T4_FILTER_IP_TOS)
5961 		fconf |= F_TOS;
5962 
5963 	if (mode & T4_FILTER_VLAN)
5964 		fconf |= F_VLAN;
5965 
5966 	if (mode & T4_FILTER_VNIC)
5967 		fconf |= F_VNIC_ID;
5968 
5969 	if (mode & T4_FILTER_PORT)
5970 		fconf |= F_PORT;
5971 
5972 	if (mode & T4_FILTER_FCoE)
5973 		fconf |= F_FCOE;
5974 
5975 	return (fconf);
5976 }
5977 
5978 static uint32_t
5979 fspec_to_fconf(struct t4_filter_specification *fs)
5980 {
5981 	uint32_t fconf = 0;
5982 
5983 	if (fs->val.frag || fs->mask.frag)
5984 		fconf |= F_FRAGMENTATION;
5985 
5986 	if (fs->val.matchtype || fs->mask.matchtype)
5987 		fconf |= F_MPSHITTYPE;
5988 
5989 	if (fs->val.macidx || fs->mask.macidx)
5990 		fconf |= F_MACMATCH;
5991 
5992 	if (fs->val.ethtype || fs->mask.ethtype)
5993 		fconf |= F_ETHERTYPE;
5994 
5995 	if (fs->val.proto || fs->mask.proto)
5996 		fconf |= F_PROTOCOL;
5997 
5998 	if (fs->val.tos || fs->mask.tos)
5999 		fconf |= F_TOS;
6000 
6001 	if (fs->val.vlan_vld || fs->mask.vlan_vld)
6002 		fconf |= F_VLAN;
6003 
6004 	if (fs->val.vnic_vld || fs->mask.vnic_vld)
6005 		fconf |= F_VNIC_ID;
6006 
6007 	if (fs->val.iport || fs->mask.iport)
6008 		fconf |= F_PORT;
6009 
6010 	if (fs->val.fcoe || fs->mask.fcoe)
6011 		fconf |= F_FCOE;
6012 
6013 	return (fconf);
6014 }
6015 
6016 static int
6017 get_filter_mode(struct adapter *sc, uint32_t *mode)
6018 {
6019 	int rc;
6020 	uint32_t fconf;
6021 
6022 	rc = begin_synchronized_op(sc, NULL, HOLD_LOCK | SLEEP_OK | INTR_OK,
6023 	    "t4getfm");
6024 	if (rc)
6025 		return (rc);
6026 
6027 	t4_read_indirect(sc, A_TP_PIO_ADDR, A_TP_PIO_DATA, &fconf, 1,
6028 	    A_TP_VLAN_PRI_MAP);
6029 
6030 	if (sc->filter_mode != fconf) {
6031 		log(LOG_WARNING, "%s: cached filter mode out of sync %x %x.\n",
6032 		    device_get_nameunit(sc->dev), sc->filter_mode, fconf);
6033 		sc->filter_mode = fconf;
6034 	}
6035 
6036 	*mode = fconf_to_mode(sc->filter_mode);
6037 
6038 	end_synchronized_op(sc, LOCK_HELD);
6039 	return (0);
6040 }
6041 
6042 static int
6043 set_filter_mode(struct adapter *sc, uint32_t mode)
6044 {
6045 	uint32_t fconf;
6046 	int rc;
6047 
6048 	fconf = mode_to_fconf(mode);
6049 
6050 	rc = begin_synchronized_op(sc, NULL, HOLD_LOCK | SLEEP_OK | INTR_OK,
6051 	    "t4setfm");
6052 	if (rc)
6053 		return (rc);
6054 
6055 	if (sc->tids.ftids_in_use > 0) {
6056 		rc = EBUSY;
6057 		goto done;
6058 	}
6059 
6060 #ifdef TCP_OFFLOAD
6061 	if (sc->offload_map) {
6062 		rc = EBUSY;
6063 		goto done;
6064 	}
6065 #endif
6066 
6067 #ifdef notyet
6068 	rc = -t4_set_filter_mode(sc, fconf);
6069 	if (rc == 0)
6070 		sc->filter_mode = fconf;
6071 #else
6072 	rc = ENOTSUP;
6073 #endif
6074 
6075 done:
6076 	end_synchronized_op(sc, LOCK_HELD);
6077 	return (rc);
6078 }
6079 
6080 static inline uint64_t
6081 get_filter_hits(struct adapter *sc, uint32_t fid)
6082 {
6083 	uint32_t mw_base, off, tcb_base = t4_read_reg(sc, A_TP_CMM_TCB_BASE);
6084 	uint64_t hits;
6085 
6086 	memwin_info(sc, 0, &mw_base, NULL);
6087 	off = position_memwin(sc, 0,
6088 	    tcb_base + (fid + sc->tids.ftid_base) * TCB_SIZE);
6089 	hits = t4_read_reg64(sc, mw_base + off + 16);
6090 
6091 	return (be64toh(hits));
6092 }
6093 
6094 static int
6095 get_filter(struct adapter *sc, struct t4_filter *t)
6096 {
6097 	int i, rc, nfilters = sc->tids.nftids;
6098 	struct filter_entry *f;
6099 
6100 	rc = begin_synchronized_op(sc, NULL, HOLD_LOCK | SLEEP_OK | INTR_OK,
6101 	    "t4getf");
6102 	if (rc)
6103 		return (rc);
6104 
6105 	if (sc->tids.ftids_in_use == 0 || sc->tids.ftid_tab == NULL ||
6106 	    t->idx >= nfilters) {
6107 		t->idx = 0xffffffff;
6108 		goto done;
6109 	}
6110 
6111 	f = &sc->tids.ftid_tab[t->idx];
6112 	for (i = t->idx; i < nfilters; i++, f++) {
6113 		if (f->valid) {
6114 			t->idx = i;
6115 			t->l2tidx = f->l2t ? f->l2t->idx : 0;
6116 			t->smtidx = f->smtidx;
6117 			if (f->fs.hitcnts)
6118 				t->hits = get_filter_hits(sc, t->idx);
6119 			else
6120 				t->hits = UINT64_MAX;
6121 			t->fs = f->fs;
6122 
6123 			goto done;
6124 		}
6125 	}
6126 
6127 	t->idx = 0xffffffff;
6128 done:
6129 	end_synchronized_op(sc, LOCK_HELD);
6130 	return (0);
6131 }
6132 
6133 static int
6134 set_filter(struct adapter *sc, struct t4_filter *t)
6135 {
6136 	unsigned int nfilters, nports;
6137 	struct filter_entry *f;
6138 	int i, rc;
6139 
6140 	rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4setf");
6141 	if (rc)
6142 		return (rc);
6143 
6144 	nfilters = sc->tids.nftids;
6145 	nports = sc->params.nports;
6146 
6147 	if (nfilters == 0) {
6148 		rc = ENOTSUP;
6149 		goto done;
6150 	}
6151 
6152 	if (!(sc->flags & FULL_INIT_DONE)) {
6153 		rc = EAGAIN;
6154 		goto done;
6155 	}
6156 
6157 	if (t->idx >= nfilters) {
6158 		rc = EINVAL;
6159 		goto done;
6160 	}
6161 
6162 	/* Validate against the global filter mode */
6163 	if ((sc->filter_mode | fspec_to_fconf(&t->fs)) != sc->filter_mode) {
6164 		rc = E2BIG;
6165 		goto done;
6166 	}
6167 
6168 	if (t->fs.action == FILTER_SWITCH && t->fs.eport >= nports) {
6169 		rc = EINVAL;
6170 		goto done;
6171 	}
6172 
6173 	if (t->fs.val.iport >= nports) {
6174 		rc = EINVAL;
6175 		goto done;
6176 	}
6177 
6178 	/* Can't specify an iq if not steering to it */
6179 	if (!t->fs.dirsteer && t->fs.iq) {
6180 		rc = EINVAL;
6181 		goto done;
6182 	}
6183 
6184 	/* IPv6 filter idx must be 4 aligned */
6185 	if (t->fs.type == 1 &&
6186 	    ((t->idx & 0x3) || t->idx + 4 >= nfilters)) {
6187 		rc = EINVAL;
6188 		goto done;
6189 	}
6190 
6191 	if (sc->tids.ftid_tab == NULL) {
6192 		KASSERT(sc->tids.ftids_in_use == 0,
6193 		    ("%s: no memory allocated but filters_in_use > 0",
6194 		    __func__));
6195 
6196 		sc->tids.ftid_tab = malloc(sizeof (struct filter_entry) *
6197 		    nfilters, M_CXGBE, M_NOWAIT | M_ZERO);
6198 		if (sc->tids.ftid_tab == NULL) {
6199 			rc = ENOMEM;
6200 			goto done;
6201 		}
6202 		mtx_init(&sc->tids.ftid_lock, "T4 filters", 0, MTX_DEF);
6203 	}
6204 
6205 	for (i = 0; i < 4; i++) {
6206 		f = &sc->tids.ftid_tab[t->idx + i];
6207 
6208 		if (f->pending || f->valid) {
6209 			rc = EBUSY;
6210 			goto done;
6211 		}
6212 		if (f->locked) {
6213 			rc = EPERM;
6214 			goto done;
6215 		}
6216 
6217 		if (t->fs.type == 0)
6218 			break;
6219 	}
6220 
6221 	f = &sc->tids.ftid_tab[t->idx];
6222 	f->fs = t->fs;
6223 
6224 	rc = set_filter_wr(sc, t->idx);
6225 done:
6226 	end_synchronized_op(sc, 0);
6227 
6228 	if (rc == 0) {
6229 		mtx_lock(&sc->tids.ftid_lock);
6230 		for (;;) {
6231 			if (f->pending == 0) {
6232 				rc = f->valid ? 0 : EIO;
6233 				break;
6234 			}
6235 
6236 			if (mtx_sleep(&sc->tids.ftid_tab, &sc->tids.ftid_lock,
6237 			    PCATCH, "t4setfw", 0)) {
6238 				rc = EINPROGRESS;
6239 				break;
6240 			}
6241 		}
6242 		mtx_unlock(&sc->tids.ftid_lock);
6243 	}
6244 	return (rc);
6245 }
6246 
6247 static int
6248 del_filter(struct adapter *sc, struct t4_filter *t)
6249 {
6250 	unsigned int nfilters;
6251 	struct filter_entry *f;
6252 	int rc;
6253 
6254 	rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4delf");
6255 	if (rc)
6256 		return (rc);
6257 
6258 	nfilters = sc->tids.nftids;
6259 
6260 	if (nfilters == 0) {
6261 		rc = ENOTSUP;
6262 		goto done;
6263 	}
6264 
6265 	if (sc->tids.ftid_tab == NULL || sc->tids.ftids_in_use == 0 ||
6266 	    t->idx >= nfilters) {
6267 		rc = EINVAL;
6268 		goto done;
6269 	}
6270 
6271 	if (!(sc->flags & FULL_INIT_DONE)) {
6272 		rc = EAGAIN;
6273 		goto done;
6274 	}
6275 
6276 	f = &sc->tids.ftid_tab[t->idx];
6277 
6278 	if (f->pending) {
6279 		rc = EBUSY;
6280 		goto done;
6281 	}
6282 	if (f->locked) {
6283 		rc = EPERM;
6284 		goto done;
6285 	}
6286 
6287 	if (f->valid) {
6288 		t->fs = f->fs;	/* extra info for the caller */
6289 		rc = del_filter_wr(sc, t->idx);
6290 	}
6291 
6292 done:
6293 	end_synchronized_op(sc, 0);
6294 
6295 	if (rc == 0) {
6296 		mtx_lock(&sc->tids.ftid_lock);
6297 		for (;;) {
6298 			if (f->pending == 0) {
6299 				rc = f->valid ? EIO : 0;
6300 				break;
6301 			}
6302 
6303 			if (mtx_sleep(&sc->tids.ftid_tab, &sc->tids.ftid_lock,
6304 			    PCATCH, "t4delfw", 0)) {
6305 				rc = EINPROGRESS;
6306 				break;
6307 			}
6308 		}
6309 		mtx_unlock(&sc->tids.ftid_lock);
6310 	}
6311 
6312 	return (rc);
6313 }
6314 
6315 static void
6316 clear_filter(struct filter_entry *f)
6317 {
6318 	if (f->l2t)
6319 		t4_l2t_release(f->l2t);
6320 
6321 	bzero(f, sizeof (*f));
6322 }
6323 
6324 static int
6325 set_filter_wr(struct adapter *sc, int fidx)
6326 {
6327 	struct filter_entry *f = &sc->tids.ftid_tab[fidx];
6328 	struct wrqe *wr;
6329 	struct fw_filter_wr *fwr;
6330 	unsigned int ftid;
6331 
6332 	ASSERT_SYNCHRONIZED_OP(sc);
6333 
6334 	if (f->fs.newdmac || f->fs.newvlan) {
6335 		/* This filter needs an L2T entry; allocate one. */
6336 		f->l2t = t4_l2t_alloc_switching(sc->l2t);
6337 		if (f->l2t == NULL)
6338 			return (EAGAIN);
6339 		if (t4_l2t_set_switching(sc, f->l2t, f->fs.vlan, f->fs.eport,
6340 		    f->fs.dmac)) {
6341 			t4_l2t_release(f->l2t);
6342 			f->l2t = NULL;
6343 			return (ENOMEM);
6344 		}
6345 	}
6346 
6347 	ftid = sc->tids.ftid_base + fidx;
6348 
6349 	wr = alloc_wrqe(sizeof(*fwr), &sc->sge.mgmtq);
6350 	if (wr == NULL)
6351 		return (ENOMEM);
6352 
6353 	fwr = wrtod(wr);
6354 	bzero(fwr, sizeof (*fwr));
6355 
6356 	fwr->op_pkd = htobe32(V_FW_WR_OP(FW_FILTER_WR));
6357 	fwr->len16_pkd = htobe32(FW_LEN16(*fwr));
6358 	fwr->tid_to_iq =
6359 	    htobe32(V_FW_FILTER_WR_TID(ftid) |
6360 		V_FW_FILTER_WR_RQTYPE(f->fs.type) |
6361 		V_FW_FILTER_WR_NOREPLY(0) |
6362 		V_FW_FILTER_WR_IQ(f->fs.iq));
6363 	fwr->del_filter_to_l2tix =
6364 	    htobe32(V_FW_FILTER_WR_RPTTID(f->fs.rpttid) |
6365 		V_FW_FILTER_WR_DROP(f->fs.action == FILTER_DROP) |
6366 		V_FW_FILTER_WR_DIRSTEER(f->fs.dirsteer) |
6367 		V_FW_FILTER_WR_MASKHASH(f->fs.maskhash) |
6368 		V_FW_FILTER_WR_DIRSTEERHASH(f->fs.dirsteerhash) |
6369 		V_FW_FILTER_WR_LPBK(f->fs.action == FILTER_SWITCH) |
6370 		V_FW_FILTER_WR_DMAC(f->fs.newdmac) |
6371 		V_FW_FILTER_WR_SMAC(f->fs.newsmac) |
6372 		V_FW_FILTER_WR_INSVLAN(f->fs.newvlan == VLAN_INSERT ||
6373 		    f->fs.newvlan == VLAN_REWRITE) |
6374 		V_FW_FILTER_WR_RMVLAN(f->fs.newvlan == VLAN_REMOVE ||
6375 		    f->fs.newvlan == VLAN_REWRITE) |
6376 		V_FW_FILTER_WR_HITCNTS(f->fs.hitcnts) |
6377 		V_FW_FILTER_WR_TXCHAN(f->fs.eport) |
6378 		V_FW_FILTER_WR_PRIO(f->fs.prio) |
6379 		V_FW_FILTER_WR_L2TIX(f->l2t ? f->l2t->idx : 0));
6380 	fwr->ethtype = htobe16(f->fs.val.ethtype);
6381 	fwr->ethtypem = htobe16(f->fs.mask.ethtype);
6382 	fwr->frag_to_ovlan_vldm =
6383 	    (V_FW_FILTER_WR_FRAG(f->fs.val.frag) |
6384 		V_FW_FILTER_WR_FRAGM(f->fs.mask.frag) |
6385 		V_FW_FILTER_WR_IVLAN_VLD(f->fs.val.vlan_vld) |
6386 		V_FW_FILTER_WR_OVLAN_VLD(f->fs.val.vnic_vld) |
6387 		V_FW_FILTER_WR_IVLAN_VLDM(f->fs.mask.vlan_vld) |
6388 		V_FW_FILTER_WR_OVLAN_VLDM(f->fs.mask.vnic_vld));
6389 	fwr->smac_sel = 0;
6390 	fwr->rx_chan_rx_rpl_iq = htobe16(V_FW_FILTER_WR_RX_CHAN(0) |
6391 	    V_FW_FILTER_WR_RX_RPL_IQ(sc->sge.fwq.abs_id));
6392 	fwr->maci_to_matchtypem =
6393 	    htobe32(V_FW_FILTER_WR_MACI(f->fs.val.macidx) |
6394 		V_FW_FILTER_WR_MACIM(f->fs.mask.macidx) |
6395 		V_FW_FILTER_WR_FCOE(f->fs.val.fcoe) |
6396 		V_FW_FILTER_WR_FCOEM(f->fs.mask.fcoe) |
6397 		V_FW_FILTER_WR_PORT(f->fs.val.iport) |
6398 		V_FW_FILTER_WR_PORTM(f->fs.mask.iport) |
6399 		V_FW_FILTER_WR_MATCHTYPE(f->fs.val.matchtype) |
6400 		V_FW_FILTER_WR_MATCHTYPEM(f->fs.mask.matchtype));
6401 	fwr->ptcl = f->fs.val.proto;
6402 	fwr->ptclm = f->fs.mask.proto;
6403 	fwr->ttyp = f->fs.val.tos;
6404 	fwr->ttypm = f->fs.mask.tos;
6405 	fwr->ivlan = htobe16(f->fs.val.vlan);
6406 	fwr->ivlanm = htobe16(f->fs.mask.vlan);
6407 	fwr->ovlan = htobe16(f->fs.val.vnic);
6408 	fwr->ovlanm = htobe16(f->fs.mask.vnic);
6409 	bcopy(f->fs.val.dip, fwr->lip, sizeof (fwr->lip));
6410 	bcopy(f->fs.mask.dip, fwr->lipm, sizeof (fwr->lipm));
6411 	bcopy(f->fs.val.sip, fwr->fip, sizeof (fwr->fip));
6412 	bcopy(f->fs.mask.sip, fwr->fipm, sizeof (fwr->fipm));
6413 	fwr->lp = htobe16(f->fs.val.dport);
6414 	fwr->lpm = htobe16(f->fs.mask.dport);
6415 	fwr->fp = htobe16(f->fs.val.sport);
6416 	fwr->fpm = htobe16(f->fs.mask.sport);
6417 	if (f->fs.newsmac)
6418 		bcopy(f->fs.smac, fwr->sma, sizeof (fwr->sma));
6419 
6420 	f->pending = 1;
6421 	sc->tids.ftids_in_use++;
6422 
6423 	t4_wrq_tx(sc, wr);
6424 	return (0);
6425 }
6426 
6427 static int
6428 del_filter_wr(struct adapter *sc, int fidx)
6429 {
6430 	struct filter_entry *f = &sc->tids.ftid_tab[fidx];
6431 	struct wrqe *wr;
6432 	struct fw_filter_wr *fwr;
6433 	unsigned int ftid;
6434 
6435 	ftid = sc->tids.ftid_base + fidx;
6436 
6437 	wr = alloc_wrqe(sizeof(*fwr), &sc->sge.mgmtq);
6438 	if (wr == NULL)
6439 		return (ENOMEM);
6440 	fwr = wrtod(wr);
6441 	bzero(fwr, sizeof (*fwr));
6442 
6443 	t4_mk_filtdelwr(ftid, fwr, sc->sge.fwq.abs_id);
6444 
6445 	f->pending = 1;
6446 	t4_wrq_tx(sc, wr);
6447 	return (0);
6448 }
6449 
6450 int
6451 t4_filter_rpl(struct sge_iq *iq, const struct rss_header *rss, struct mbuf *m)
6452 {
6453 	struct adapter *sc = iq->adapter;
6454 	const struct cpl_set_tcb_rpl *rpl = (const void *)(rss + 1);
6455 	unsigned int idx = GET_TID(rpl);
6456 
6457 	KASSERT(m == NULL, ("%s: payload with opcode %02x", __func__,
6458 	    rss->opcode));
6459 
6460 	if (idx >= sc->tids.ftid_base &&
6461 	    (idx -= sc->tids.ftid_base) < sc->tids.nftids) {
6462 		unsigned int rc = G_COOKIE(rpl->cookie);
6463 		struct filter_entry *f = &sc->tids.ftid_tab[idx];
6464 
6465 		mtx_lock(&sc->tids.ftid_lock);
6466 		if (rc == FW_FILTER_WR_FLT_ADDED) {
6467 			KASSERT(f->pending, ("%s: filter[%u] isn't pending.",
6468 			    __func__, idx));
6469 			f->smtidx = (be64toh(rpl->oldval) >> 24) & 0xff;
6470 			f->pending = 0;  /* asynchronous setup completed */
6471 			f->valid = 1;
6472 		} else {
6473 			if (rc != FW_FILTER_WR_FLT_DELETED) {
6474 				/* Add or delete failed, display an error */
6475 				log(LOG_ERR,
6476 				    "filter %u setup failed with error %u\n",
6477 				    idx, rc);
6478 			}
6479 
6480 			clear_filter(f);
6481 			sc->tids.ftids_in_use--;
6482 		}
6483 		wakeup(&sc->tids.ftid_tab);
6484 		mtx_unlock(&sc->tids.ftid_lock);
6485 	}
6486 
6487 	return (0);
6488 }
6489 
6490 static int
6491 get_sge_context(struct adapter *sc, struct t4_sge_context *cntxt)
6492 {
6493 	int rc;
6494 
6495 	if (cntxt->cid > M_CTXTQID)
6496 		return (EINVAL);
6497 
6498 	if (cntxt->mem_id != CTXT_EGRESS && cntxt->mem_id != CTXT_INGRESS &&
6499 	    cntxt->mem_id != CTXT_FLM && cntxt->mem_id != CTXT_CNM)
6500 		return (EINVAL);
6501 
6502 	rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4ctxt");
6503 	if (rc)
6504 		return (rc);
6505 
6506 	if (sc->flags & FW_OK) {
6507 		rc = -t4_sge_ctxt_rd(sc, sc->mbox, cntxt->cid, cntxt->mem_id,
6508 		    &cntxt->data[0]);
6509 		if (rc == 0)
6510 			goto done;
6511 	}
6512 
6513 	/*
6514 	 * Read via firmware failed or wasn't even attempted.  Read directly via
6515 	 * the backdoor.
6516 	 */
6517 	rc = -t4_sge_ctxt_rd_bd(sc, cntxt->cid, cntxt->mem_id, &cntxt->data[0]);
6518 done:
6519 	end_synchronized_op(sc, 0);
6520 	return (rc);
6521 }
6522 
6523 static int
6524 load_fw(struct adapter *sc, struct t4_data *fw)
6525 {
6526 	int rc;
6527 	uint8_t *fw_data;
6528 
6529 	rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4ldfw");
6530 	if (rc)
6531 		return (rc);
6532 
6533 	if (sc->flags & FULL_INIT_DONE) {
6534 		rc = EBUSY;
6535 		goto done;
6536 	}
6537 
6538 	fw_data = malloc(fw->len, M_CXGBE, M_WAITOK);
6539 	if (fw_data == NULL) {
6540 		rc = ENOMEM;
6541 		goto done;
6542 	}
6543 
6544 	rc = copyin(fw->data, fw_data, fw->len);
6545 	if (rc == 0)
6546 		rc = -t4_load_fw(sc, fw_data, fw->len);
6547 
6548 	free(fw_data, M_CXGBE);
6549 done:
6550 	end_synchronized_op(sc, 0);
6551 	return (rc);
6552 }
6553 
6554 static int
6555 read_card_mem(struct adapter *sc, int win, struct t4_mem_range *mr)
6556 {
6557 	uint32_t addr, off, remaining, i, n;
6558 	uint32_t *buf, *b;
6559 	uint32_t mw_base, mw_aperture;
6560 	int rc;
6561 	uint8_t *dst;
6562 
6563 	rc = validate_mem_range(sc, mr->addr, mr->len);
6564 	if (rc != 0)
6565 		return (rc);
6566 
6567 	memwin_info(sc, win, &mw_base, &mw_aperture);
6568 	buf = b = malloc(min(mr->len, mw_aperture), M_CXGBE, M_WAITOK);
6569 	addr = mr->addr;
6570 	remaining = mr->len;
6571 	dst = (void *)mr->data;
6572 
6573 	while (remaining) {
6574 		off = position_memwin(sc, win, addr);
6575 
6576 		/* number of bytes that we'll copy in the inner loop */
6577 		n = min(remaining, mw_aperture - off);
6578 		for (i = 0; i < n; i += 4)
6579 			*b++ = t4_read_reg(sc, mw_base + off + i);
6580 
6581 		rc = copyout(buf, dst, n);
6582 		if (rc != 0)
6583 			break;
6584 
6585 		b = buf;
6586 		dst += n;
6587 		remaining -= n;
6588 		addr += n;
6589 	}
6590 
6591 	free(buf, M_CXGBE);
6592 	return (rc);
6593 }
6594 
6595 static int
6596 read_i2c(struct adapter *sc, struct t4_i2c_data *i2cd)
6597 {
6598 	int rc;
6599 
6600 	if (i2cd->len == 0 || i2cd->port_id >= sc->params.nports)
6601 		return (EINVAL);
6602 
6603 	if (i2cd->len > 1) {
6604 		/* XXX: need fw support for longer reads in one go */
6605 		return (ENOTSUP);
6606 	}
6607 
6608 	rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4i2crd");
6609 	if (rc)
6610 		return (rc);
6611 	rc = -t4_i2c_rd(sc, sc->mbox, i2cd->port_id, i2cd->dev_addr,
6612 	    i2cd->offset, &i2cd->data[0]);
6613 	end_synchronized_op(sc, 0);
6614 
6615 	return (rc);
6616 }
6617 
6618 int
6619 t4_os_find_pci_capability(struct adapter *sc, int cap)
6620 {
6621 	int i;
6622 
6623 	return (pci_find_cap(sc->dev, cap, &i) == 0 ? i : 0);
6624 }
6625 
6626 int
6627 t4_os_pci_save_state(struct adapter *sc)
6628 {
6629 	device_t dev;
6630 	struct pci_devinfo *dinfo;
6631 
6632 	dev = sc->dev;
6633 	dinfo = device_get_ivars(dev);
6634 
6635 	pci_cfg_save(dev, dinfo, 0);
6636 	return (0);
6637 }
6638 
6639 int
6640 t4_os_pci_restore_state(struct adapter *sc)
6641 {
6642 	device_t dev;
6643 	struct pci_devinfo *dinfo;
6644 
6645 	dev = sc->dev;
6646 	dinfo = device_get_ivars(dev);
6647 
6648 	pci_cfg_restore(dev, dinfo);
6649 	return (0);
6650 }
6651 
6652 void
6653 t4_os_portmod_changed(const struct adapter *sc, int idx)
6654 {
6655 	struct port_info *pi = sc->port[idx];
6656 	static const char *mod_str[] = {
6657 		NULL, "LR", "SR", "ER", "TWINAX", "active TWINAX", "LRM"
6658 	};
6659 
6660 	if (pi->mod_type == FW_PORT_MOD_TYPE_NONE)
6661 		if_printf(pi->ifp, "transceiver unplugged.\n");
6662 	else if (pi->mod_type == FW_PORT_MOD_TYPE_UNKNOWN)
6663 		if_printf(pi->ifp, "unknown transceiver inserted.\n");
6664 	else if (pi->mod_type == FW_PORT_MOD_TYPE_NOTSUPPORTED)
6665 		if_printf(pi->ifp, "unsupported transceiver inserted.\n");
6666 	else if (pi->mod_type > 0 && pi->mod_type < nitems(mod_str)) {
6667 		if_printf(pi->ifp, "%s transceiver inserted.\n",
6668 		    mod_str[pi->mod_type]);
6669 	} else {
6670 		if_printf(pi->ifp, "transceiver (type %d) inserted.\n",
6671 		    pi->mod_type);
6672 	}
6673 }
6674 
6675 void
6676 t4_os_link_changed(struct adapter *sc, int idx, int link_stat)
6677 {
6678 	struct port_info *pi = sc->port[idx];
6679 	struct ifnet *ifp = pi->ifp;
6680 
6681 	if (link_stat) {
6682 		ifp->if_baudrate = IF_Mbps(pi->link_cfg.speed);
6683 		if_link_state_change(ifp, LINK_STATE_UP);
6684 	} else
6685 		if_link_state_change(ifp, LINK_STATE_DOWN);
6686 }
6687 
6688 void
6689 t4_iterate(void (*func)(struct adapter *, void *), void *arg)
6690 {
6691 	struct adapter *sc;
6692 
6693 	mtx_lock(&t4_list_lock);
6694 	SLIST_FOREACH(sc, &t4_list, link) {
6695 		/*
6696 		 * func should not make any assumptions about what state sc is
6697 		 * in - the only guarantee is that sc->sc_lock is a valid lock.
6698 		 */
6699 		func(sc, arg);
6700 	}
6701 	mtx_unlock(&t4_list_lock);
6702 }
6703 
6704 static int
6705 t4_open(struct cdev *dev, int flags, int type, struct thread *td)
6706 {
6707        return (0);
6708 }
6709 
6710 static int
6711 t4_close(struct cdev *dev, int flags, int type, struct thread *td)
6712 {
6713        return (0);
6714 }
6715 
6716 static int
6717 t4_ioctl(struct cdev *dev, unsigned long cmd, caddr_t data, int fflag,
6718     struct thread *td)
6719 {
6720 	int rc;
6721 	struct adapter *sc = dev->si_drv1;
6722 
6723 	rc = priv_check(td, PRIV_DRIVER);
6724 	if (rc != 0)
6725 		return (rc);
6726 
6727 	switch (cmd) {
6728 	case CHELSIO_T4_GETREG: {
6729 		struct t4_reg *edata = (struct t4_reg *)data;
6730 
6731 		if ((edata->addr & 0x3) != 0 || edata->addr >= sc->mmio_len)
6732 			return (EFAULT);
6733 
6734 		if (edata->size == 4)
6735 			edata->val = t4_read_reg(sc, edata->addr);
6736 		else if (edata->size == 8)
6737 			edata->val = t4_read_reg64(sc, edata->addr);
6738 		else
6739 			return (EINVAL);
6740 
6741 		break;
6742 	}
6743 	case CHELSIO_T4_SETREG: {
6744 		struct t4_reg *edata = (struct t4_reg *)data;
6745 
6746 		if ((edata->addr & 0x3) != 0 || edata->addr >= sc->mmio_len)
6747 			return (EFAULT);
6748 
6749 		if (edata->size == 4) {
6750 			if (edata->val & 0xffffffff00000000)
6751 				return (EINVAL);
6752 			t4_write_reg(sc, edata->addr, (uint32_t) edata->val);
6753 		} else if (edata->size == 8)
6754 			t4_write_reg64(sc, edata->addr, edata->val);
6755 		else
6756 			return (EINVAL);
6757 		break;
6758 	}
6759 	case CHELSIO_T4_REGDUMP: {
6760 		struct t4_regdump *regs = (struct t4_regdump *)data;
6761 		int reglen = is_t4(sc) ? T4_REGDUMP_SIZE : T5_REGDUMP_SIZE;
6762 		uint8_t *buf;
6763 
6764 		if (regs->len < reglen) {
6765 			regs->len = reglen; /* hint to the caller */
6766 			return (ENOBUFS);
6767 		}
6768 
6769 		regs->len = reglen;
6770 		buf = malloc(reglen, M_CXGBE, M_WAITOK | M_ZERO);
6771 		t4_get_regs(sc, regs, buf);
6772 		rc = copyout(buf, regs->data, reglen);
6773 		free(buf, M_CXGBE);
6774 		break;
6775 	}
6776 	case CHELSIO_T4_GET_FILTER_MODE:
6777 		rc = get_filter_mode(sc, (uint32_t *)data);
6778 		break;
6779 	case CHELSIO_T4_SET_FILTER_MODE:
6780 		rc = set_filter_mode(sc, *(uint32_t *)data);
6781 		break;
6782 	case CHELSIO_T4_GET_FILTER:
6783 		rc = get_filter(sc, (struct t4_filter *)data);
6784 		break;
6785 	case CHELSIO_T4_SET_FILTER:
6786 		rc = set_filter(sc, (struct t4_filter *)data);
6787 		break;
6788 	case CHELSIO_T4_DEL_FILTER:
6789 		rc = del_filter(sc, (struct t4_filter *)data);
6790 		break;
6791 	case CHELSIO_T4_GET_SGE_CONTEXT:
6792 		rc = get_sge_context(sc, (struct t4_sge_context *)data);
6793 		break;
6794 	case CHELSIO_T4_LOAD_FW:
6795 		rc = load_fw(sc, (struct t4_data *)data);
6796 		break;
6797 	case CHELSIO_T4_GET_MEM:
6798 		rc = read_card_mem(sc, 2, (struct t4_mem_range *)data);
6799 		break;
6800 	case CHELSIO_T4_GET_I2C:
6801 		rc = read_i2c(sc, (struct t4_i2c_data *)data);
6802 		break;
6803 	case CHELSIO_T4_CLEAR_STATS: {
6804 		int i;
6805 		u_int port_id = *(uint32_t *)data;
6806 		struct port_info *pi;
6807 
6808 		if (port_id >= sc->params.nports)
6809 			return (EINVAL);
6810 
6811 		/* MAC stats */
6812 		t4_clr_port_stats(sc, port_id);
6813 
6814 		pi = sc->port[port_id];
6815 		if (pi->flags & PORT_INIT_DONE) {
6816 			struct sge_rxq *rxq;
6817 			struct sge_txq *txq;
6818 			struct sge_wrq *wrq;
6819 
6820 			for_each_rxq(pi, i, rxq) {
6821 #if defined(INET) || defined(INET6)
6822 				rxq->lro.lro_queued = 0;
6823 				rxq->lro.lro_flushed = 0;
6824 #endif
6825 				rxq->rxcsum = 0;
6826 				rxq->vlan_extraction = 0;
6827 			}
6828 
6829 			for_each_txq(pi, i, txq) {
6830 				txq->txcsum = 0;
6831 				txq->tso_wrs = 0;
6832 				txq->vlan_insertion = 0;
6833 				txq->imm_wrs = 0;
6834 				txq->sgl_wrs = 0;
6835 				txq->txpkt_wrs = 0;
6836 				txq->txpkts_wrs = 0;
6837 				txq->txpkts_pkts = 0;
6838 				txq->br->br_drops = 0;
6839 				txq->no_dmamap = 0;
6840 				txq->no_desc = 0;
6841 			}
6842 
6843 #ifdef TCP_OFFLOAD
6844 			/* nothing to clear for each ofld_rxq */
6845 
6846 			for_each_ofld_txq(pi, i, wrq) {
6847 				wrq->tx_wrs = 0;
6848 				wrq->no_desc = 0;
6849 			}
6850 #endif
6851 			wrq = &sc->sge.ctrlq[pi->port_id];
6852 			wrq->tx_wrs = 0;
6853 			wrq->no_desc = 0;
6854 		}
6855 		break;
6856 	}
6857 	default:
6858 		rc = EINVAL;
6859 	}
6860 
6861 	return (rc);
6862 }
6863 
6864 #ifdef TCP_OFFLOAD
6865 static int
6866 toe_capability(struct port_info *pi, int enable)
6867 {
6868 	int rc;
6869 	struct adapter *sc = pi->adapter;
6870 
6871 	ASSERT_SYNCHRONIZED_OP(sc);
6872 
6873 	if (!is_offload(sc))
6874 		return (ENODEV);
6875 
6876 	if (enable) {
6877 		if (!(sc->flags & FULL_INIT_DONE)) {
6878 			rc = cxgbe_init_synchronized(pi);
6879 			if (rc)
6880 				return (rc);
6881 		}
6882 
6883 		if (isset(&sc->offload_map, pi->port_id))
6884 			return (0);
6885 
6886 		if (!(sc->flags & TOM_INIT_DONE)) {
6887 			rc = t4_activate_uld(sc, ULD_TOM);
6888 			if (rc == EAGAIN) {
6889 				log(LOG_WARNING,
6890 				    "You must kldload t4_tom.ko before trying "
6891 				    "to enable TOE on a cxgbe interface.\n");
6892 			}
6893 			if (rc != 0)
6894 				return (rc);
6895 			KASSERT(sc->tom_softc != NULL,
6896 			    ("%s: TOM activated but softc NULL", __func__));
6897 			KASSERT(sc->flags & TOM_INIT_DONE,
6898 			    ("%s: TOM activated but flag not set", __func__));
6899 		}
6900 
6901 		setbit(&sc->offload_map, pi->port_id);
6902 	} else {
6903 		if (!isset(&sc->offload_map, pi->port_id))
6904 			return (0);
6905 
6906 		KASSERT(sc->flags & TOM_INIT_DONE,
6907 		    ("%s: TOM never initialized?", __func__));
6908 		clrbit(&sc->offload_map, pi->port_id);
6909 	}
6910 
6911 	return (0);
6912 }
6913 
6914 /*
6915  * Add an upper layer driver to the global list.
6916  */
6917 int
6918 t4_register_uld(struct uld_info *ui)
6919 {
6920 	int rc = 0;
6921 	struct uld_info *u;
6922 
6923 	mtx_lock(&t4_uld_list_lock);
6924 	SLIST_FOREACH(u, &t4_uld_list, link) {
6925 	    if (u->uld_id == ui->uld_id) {
6926 		    rc = EEXIST;
6927 		    goto done;
6928 	    }
6929 	}
6930 
6931 	SLIST_INSERT_HEAD(&t4_uld_list, ui, link);
6932 	ui->refcount = 0;
6933 done:
6934 	mtx_unlock(&t4_uld_list_lock);
6935 	return (rc);
6936 }
6937 
6938 int
6939 t4_unregister_uld(struct uld_info *ui)
6940 {
6941 	int rc = EINVAL;
6942 	struct uld_info *u;
6943 
6944 	mtx_lock(&t4_uld_list_lock);
6945 
6946 	SLIST_FOREACH(u, &t4_uld_list, link) {
6947 	    if (u == ui) {
6948 		    if (ui->refcount > 0) {
6949 			    rc = EBUSY;
6950 			    goto done;
6951 		    }
6952 
6953 		    SLIST_REMOVE(&t4_uld_list, ui, uld_info, link);
6954 		    rc = 0;
6955 		    goto done;
6956 	    }
6957 	}
6958 done:
6959 	mtx_unlock(&t4_uld_list_lock);
6960 	return (rc);
6961 }
6962 
6963 int
6964 t4_activate_uld(struct adapter *sc, int id)
6965 {
6966 	int rc = EAGAIN;
6967 	struct uld_info *ui;
6968 
6969 	ASSERT_SYNCHRONIZED_OP(sc);
6970 
6971 	mtx_lock(&t4_uld_list_lock);
6972 
6973 	SLIST_FOREACH(ui, &t4_uld_list, link) {
6974 		if (ui->uld_id == id) {
6975 			rc = ui->activate(sc);
6976 			if (rc == 0)
6977 				ui->refcount++;
6978 			goto done;
6979 		}
6980 	}
6981 done:
6982 	mtx_unlock(&t4_uld_list_lock);
6983 
6984 	return (rc);
6985 }
6986 
6987 int
6988 t4_deactivate_uld(struct adapter *sc, int id)
6989 {
6990 	int rc = EINVAL;
6991 	struct uld_info *ui;
6992 
6993 	ASSERT_SYNCHRONIZED_OP(sc);
6994 
6995 	mtx_lock(&t4_uld_list_lock);
6996 
6997 	SLIST_FOREACH(ui, &t4_uld_list, link) {
6998 		if (ui->uld_id == id) {
6999 			rc = ui->deactivate(sc);
7000 			if (rc == 0)
7001 				ui->refcount--;
7002 			goto done;
7003 		}
7004 	}
7005 done:
7006 	mtx_unlock(&t4_uld_list_lock);
7007 
7008 	return (rc);
7009 }
7010 #endif
7011 
7012 /*
7013  * Come up with reasonable defaults for some of the tunables, provided they're
7014  * not set by the user (in which case we'll use the values as is).
7015  */
7016 static void
7017 tweak_tunables(void)
7018 {
7019 	int nc = mp_ncpus;	/* our snapshot of the number of CPUs */
7020 
7021 	if (t4_ntxq10g < 1)
7022 		t4_ntxq10g = min(nc, NTXQ_10G);
7023 
7024 	if (t4_ntxq1g < 1)
7025 		t4_ntxq1g = min(nc, NTXQ_1G);
7026 
7027 	if (t4_nrxq10g < 1)
7028 		t4_nrxq10g = min(nc, NRXQ_10G);
7029 
7030 	if (t4_nrxq1g < 1)
7031 		t4_nrxq1g = min(nc, NRXQ_1G);
7032 
7033 #ifdef TCP_OFFLOAD
7034 	if (t4_nofldtxq10g < 1)
7035 		t4_nofldtxq10g = min(nc, NOFLDTXQ_10G);
7036 
7037 	if (t4_nofldtxq1g < 1)
7038 		t4_nofldtxq1g = min(nc, NOFLDTXQ_1G);
7039 
7040 	if (t4_nofldrxq10g < 1)
7041 		t4_nofldrxq10g = min(nc, NOFLDRXQ_10G);
7042 
7043 	if (t4_nofldrxq1g < 1)
7044 		t4_nofldrxq1g = min(nc, NOFLDRXQ_1G);
7045 
7046 	if (t4_toecaps_allowed == -1)
7047 		t4_toecaps_allowed = FW_CAPS_CONFIG_TOE;
7048 #else
7049 	if (t4_toecaps_allowed == -1)
7050 		t4_toecaps_allowed = 0;
7051 #endif
7052 
7053 	if (t4_tmr_idx_10g < 0 || t4_tmr_idx_10g >= SGE_NTIMERS)
7054 		t4_tmr_idx_10g = TMR_IDX_10G;
7055 
7056 	if (t4_pktc_idx_10g < -1 || t4_pktc_idx_10g >= SGE_NCOUNTERS)
7057 		t4_pktc_idx_10g = PKTC_IDX_10G;
7058 
7059 	if (t4_tmr_idx_1g < 0 || t4_tmr_idx_1g >= SGE_NTIMERS)
7060 		t4_tmr_idx_1g = TMR_IDX_1G;
7061 
7062 	if (t4_pktc_idx_1g < -1 || t4_pktc_idx_1g >= SGE_NCOUNTERS)
7063 		t4_pktc_idx_1g = PKTC_IDX_1G;
7064 
7065 	if (t4_qsize_txq < 128)
7066 		t4_qsize_txq = 128;
7067 
7068 	if (t4_qsize_rxq < 128)
7069 		t4_qsize_rxq = 128;
7070 	while (t4_qsize_rxq & 7)
7071 		t4_qsize_rxq++;
7072 
7073 	t4_intr_types &= INTR_MSIX | INTR_MSI | INTR_INTX;
7074 }
7075 
7076 static int
7077 mod_event(module_t mod, int cmd, void *arg)
7078 {
7079 	int rc = 0;
7080 	static int loaded = 0;
7081 
7082 	switch (cmd) {
7083 	case MOD_LOAD:
7084 		if (atomic_fetchadd_int(&loaded, 1))
7085 			break;
7086 		t4_sge_modload();
7087 		mtx_init(&t4_list_lock, "T4 adapters", 0, MTX_DEF);
7088 		SLIST_INIT(&t4_list);
7089 #ifdef TCP_OFFLOAD
7090 		mtx_init(&t4_uld_list_lock, "T4 ULDs", 0, MTX_DEF);
7091 		SLIST_INIT(&t4_uld_list);
7092 #endif
7093 		tweak_tunables();
7094 		break;
7095 
7096 	case MOD_UNLOAD:
7097 		if (atomic_fetchadd_int(&loaded, -1) > 1)
7098 			break;
7099 #ifdef TCP_OFFLOAD
7100 		mtx_lock(&t4_uld_list_lock);
7101 		if (!SLIST_EMPTY(&t4_uld_list)) {
7102 			rc = EBUSY;
7103 			mtx_unlock(&t4_uld_list_lock);
7104 			break;
7105 		}
7106 		mtx_unlock(&t4_uld_list_lock);
7107 		mtx_destroy(&t4_uld_list_lock);
7108 #endif
7109 		mtx_lock(&t4_list_lock);
7110 		if (!SLIST_EMPTY(&t4_list)) {
7111 			rc = EBUSY;
7112 			mtx_unlock(&t4_list_lock);
7113 			break;
7114 		}
7115 		mtx_unlock(&t4_list_lock);
7116 		mtx_destroy(&t4_list_lock);
7117 		break;
7118 	}
7119 
7120 	return (rc);
7121 }
7122 
7123 static devclass_t t4_devclass, t5_devclass;
7124 static devclass_t cxgbe_devclass, cxl_devclass;
7125 
7126 DRIVER_MODULE(t4nex, pci, t4_driver, t4_devclass, mod_event, 0);
7127 MODULE_VERSION(t4nex, 1);
7128 
7129 DRIVER_MODULE(t5nex, pci, t5_driver, t5_devclass, mod_event, 0);
7130 MODULE_VERSION(t5nex, 1);
7131 
7132 DRIVER_MODULE(cxgbe, t4nex, cxgbe_driver, cxgbe_devclass, 0, 0);
7133 MODULE_VERSION(cxgbe, 1);
7134 
7135 DRIVER_MODULE(cxl, t5nex, cxl_driver, cxl_devclass, 0, 0);
7136 MODULE_VERSION(cxl, 1);
7137