xref: /freebsd/sys/dev/cxgbe/t4_main.c (revision 4fd0d10e0fe684211328bc148edf89a792425b39)
1 /*-
2  * Copyright (c) 2011 Chelsio Communications, Inc.
3  * All rights reserved.
4  * Written by: Navdeep Parhar <np@FreeBSD.org>
5  *
6  * Redistribution and use in source and binary forms, with or without
7  * modification, are permitted provided that the following conditions
8  * are met:
9  * 1. Redistributions of source code must retain the above copyright
10  *    notice, this list of conditions and the following disclaimer.
11  * 2. Redistributions in binary form must reproduce the above copyright
12  *    notice, this list of conditions and the following disclaimer in the
13  *    documentation and/or other materials provided with the distribution.
14  *
15  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
16  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
25  * SUCH DAMAGE.
26  */
27 
28 #include <sys/cdefs.h>
29 __FBSDID("$FreeBSD$");
30 
31 #include "opt_inet.h"
32 #include "opt_inet6.h"
33 
34 #include <sys/param.h>
35 #include <sys/conf.h>
36 #include <sys/priv.h>
37 #include <sys/kernel.h>
38 #include <sys/bus.h>
39 #include <sys/module.h>
40 #include <sys/malloc.h>
41 #include <sys/queue.h>
42 #include <sys/taskqueue.h>
43 #include <sys/pciio.h>
44 #include <dev/pci/pcireg.h>
45 #include <dev/pci/pcivar.h>
46 #include <dev/pci/pci_private.h>
47 #include <sys/firmware.h>
48 #include <sys/sbuf.h>
49 #include <sys/smp.h>
50 #include <sys/socket.h>
51 #include <sys/sockio.h>
52 #include <sys/sysctl.h>
53 #include <net/ethernet.h>
54 #include <net/if.h>
55 #include <net/if_types.h>
56 #include <net/if_dl.h>
57 #include <net/if_vlan_var.h>
58 #if defined(__i386__) || defined(__amd64__)
59 #include <vm/vm.h>
60 #include <vm/pmap.h>
61 #endif
62 
63 #include "common/common.h"
64 #include "common/t4_msg.h"
65 #include "common/t4_regs.h"
66 #include "common/t4_regs_values.h"
67 #include "t4_ioctl.h"
68 #include "t4_l2t.h"
69 
70 /* T4 bus driver interface */
71 static int t4_probe(device_t);
72 static int t4_attach(device_t);
73 static int t4_detach(device_t);
74 static device_method_t t4_methods[] = {
75 	DEVMETHOD(device_probe,		t4_probe),
76 	DEVMETHOD(device_attach,	t4_attach),
77 	DEVMETHOD(device_detach,	t4_detach),
78 
79 	DEVMETHOD_END
80 };
81 static driver_t t4_driver = {
82 	"t4nex",
83 	t4_methods,
84 	sizeof(struct adapter)
85 };
86 
87 
88 /* T4 port (cxgbe) interface */
89 static int cxgbe_probe(device_t);
90 static int cxgbe_attach(device_t);
91 static int cxgbe_detach(device_t);
92 static device_method_t cxgbe_methods[] = {
93 	DEVMETHOD(device_probe,		cxgbe_probe),
94 	DEVMETHOD(device_attach,	cxgbe_attach),
95 	DEVMETHOD(device_detach,	cxgbe_detach),
96 	{ 0, 0 }
97 };
98 static driver_t cxgbe_driver = {
99 	"cxgbe",
100 	cxgbe_methods,
101 	sizeof(struct port_info)
102 };
103 
104 static d_ioctl_t t4_ioctl;
105 static d_open_t t4_open;
106 static d_close_t t4_close;
107 
108 static struct cdevsw t4_cdevsw = {
109        .d_version = D_VERSION,
110        .d_flags = 0,
111        .d_open = t4_open,
112        .d_close = t4_close,
113        .d_ioctl = t4_ioctl,
114        .d_name = "t4nex",
115 };
116 
117 /* T5 bus driver interface */
118 static int t5_probe(device_t);
119 static device_method_t t5_methods[] = {
120 	DEVMETHOD(device_probe,		t5_probe),
121 	DEVMETHOD(device_attach,	t4_attach),
122 	DEVMETHOD(device_detach,	t4_detach),
123 
124 	DEVMETHOD_END
125 };
126 static driver_t t5_driver = {
127 	"t5nex",
128 	t5_methods,
129 	sizeof(struct adapter)
130 };
131 
132 
133 /* T5 port (cxl) interface */
134 static driver_t cxl_driver = {
135 	"cxl",
136 	cxgbe_methods,
137 	sizeof(struct port_info)
138 };
139 
140 static struct cdevsw t5_cdevsw = {
141        .d_version = D_VERSION,
142        .d_flags = 0,
143        .d_open = t4_open,
144        .d_close = t4_close,
145        .d_ioctl = t4_ioctl,
146        .d_name = "t5nex",
147 };
148 
149 /* ifnet + media interface */
150 static void cxgbe_init(void *);
151 static int cxgbe_ioctl(struct ifnet *, unsigned long, caddr_t);
152 static int cxgbe_transmit(struct ifnet *, struct mbuf *);
153 static void cxgbe_qflush(struct ifnet *);
154 static int cxgbe_media_change(struct ifnet *);
155 static void cxgbe_media_status(struct ifnet *, struct ifmediareq *);
156 
157 MALLOC_DEFINE(M_CXGBE, "cxgbe", "Chelsio T4/T5 Ethernet driver and services");
158 
159 /*
160  * Correct lock order when you need to acquire multiple locks is t4_list_lock,
161  * then ADAPTER_LOCK, then t4_uld_list_lock.
162  */
163 static struct sx t4_list_lock;
164 static SLIST_HEAD(, adapter) t4_list;
165 #ifdef TCP_OFFLOAD
166 static struct sx t4_uld_list_lock;
167 static SLIST_HEAD(, uld_info) t4_uld_list;
168 #endif
169 
170 /*
171  * Tunables.  See tweak_tunables() too.
172  *
173  * Each tunable is set to a default value here if it's known at compile-time.
174  * Otherwise it is set to -1 as an indication to tweak_tunables() that it should
175  * provide a reasonable default when the driver is loaded.
176  *
177  * Tunables applicable to both T4 and T5 are under hw.cxgbe.  Those specific to
178  * T5 are under hw.cxl.
179  */
180 
181 /*
182  * Number of queues for tx and rx, 10G and 1G, NIC and offload.
183  */
184 #define NTXQ_10G 16
185 static int t4_ntxq10g = -1;
186 TUNABLE_INT("hw.cxgbe.ntxq10g", &t4_ntxq10g);
187 
188 #define NRXQ_10G 8
189 static int t4_nrxq10g = -1;
190 TUNABLE_INT("hw.cxgbe.nrxq10g", &t4_nrxq10g);
191 
192 #define NTXQ_1G 4
193 static int t4_ntxq1g = -1;
194 TUNABLE_INT("hw.cxgbe.ntxq1g", &t4_ntxq1g);
195 
196 #define NRXQ_1G 2
197 static int t4_nrxq1g = -1;
198 TUNABLE_INT("hw.cxgbe.nrxq1g", &t4_nrxq1g);
199 
200 #ifdef TCP_OFFLOAD
201 #define NOFLDTXQ_10G 8
202 static int t4_nofldtxq10g = -1;
203 TUNABLE_INT("hw.cxgbe.nofldtxq10g", &t4_nofldtxq10g);
204 
205 #define NOFLDRXQ_10G 2
206 static int t4_nofldrxq10g = -1;
207 TUNABLE_INT("hw.cxgbe.nofldrxq10g", &t4_nofldrxq10g);
208 
209 #define NOFLDTXQ_1G 2
210 static int t4_nofldtxq1g = -1;
211 TUNABLE_INT("hw.cxgbe.nofldtxq1g", &t4_nofldtxq1g);
212 
213 #define NOFLDRXQ_1G 1
214 static int t4_nofldrxq1g = -1;
215 TUNABLE_INT("hw.cxgbe.nofldrxq1g", &t4_nofldrxq1g);
216 #endif
217 
218 /*
219  * Holdoff parameters for 10G and 1G ports.
220  */
221 #define TMR_IDX_10G 1
222 static int t4_tmr_idx_10g = TMR_IDX_10G;
223 TUNABLE_INT("hw.cxgbe.holdoff_timer_idx_10G", &t4_tmr_idx_10g);
224 
225 #define PKTC_IDX_10G (-1)
226 static int t4_pktc_idx_10g = PKTC_IDX_10G;
227 TUNABLE_INT("hw.cxgbe.holdoff_pktc_idx_10G", &t4_pktc_idx_10g);
228 
229 #define TMR_IDX_1G 1
230 static int t4_tmr_idx_1g = TMR_IDX_1G;
231 TUNABLE_INT("hw.cxgbe.holdoff_timer_idx_1G", &t4_tmr_idx_1g);
232 
233 #define PKTC_IDX_1G (-1)
234 static int t4_pktc_idx_1g = PKTC_IDX_1G;
235 TUNABLE_INT("hw.cxgbe.holdoff_pktc_idx_1G", &t4_pktc_idx_1g);
236 
237 /*
238  * Size (# of entries) of each tx and rx queue.
239  */
240 static unsigned int t4_qsize_txq = TX_EQ_QSIZE;
241 TUNABLE_INT("hw.cxgbe.qsize_txq", &t4_qsize_txq);
242 
243 static unsigned int t4_qsize_rxq = RX_IQ_QSIZE;
244 TUNABLE_INT("hw.cxgbe.qsize_rxq", &t4_qsize_rxq);
245 
246 /*
247  * Interrupt types allowed (bits 0, 1, 2 = INTx, MSI, MSI-X respectively).
248  */
249 static int t4_intr_types = INTR_MSIX | INTR_MSI | INTR_INTX;
250 TUNABLE_INT("hw.cxgbe.interrupt_types", &t4_intr_types);
251 
252 /*
253  * Configuration file.
254  */
255 #define DEFAULT_CF	"default"
256 #define FLASH_CF	"flash"
257 #define UWIRE_CF	"uwire"
258 #define FPGA_CF		"fpga"
259 static char t4_cfg_file[32] = DEFAULT_CF;
260 TUNABLE_STR("hw.cxgbe.config_file", t4_cfg_file, sizeof(t4_cfg_file));
261 
262 /*
263  * Firmware auto-install by driver during attach (0, 1, 2 = prohibited, allowed,
264  * encouraged respectively).
265  */
266 static unsigned int t4_fw_install = 1;
267 TUNABLE_INT("hw.cxgbe.fw_install", &t4_fw_install);
268 
269 /*
270  * ASIC features that will be used.  Disable the ones you don't want so that the
271  * chip resources aren't wasted on features that will not be used.
272  */
273 static int t4_linkcaps_allowed = 0;	/* No DCBX, PPP, etc. by default */
274 TUNABLE_INT("hw.cxgbe.linkcaps_allowed", &t4_linkcaps_allowed);
275 
276 static int t4_niccaps_allowed = FW_CAPS_CONFIG_NIC;
277 TUNABLE_INT("hw.cxgbe.niccaps_allowed", &t4_niccaps_allowed);
278 
279 static int t4_toecaps_allowed = -1;
280 TUNABLE_INT("hw.cxgbe.toecaps_allowed", &t4_toecaps_allowed);
281 
282 static int t4_rdmacaps_allowed = 0;
283 TUNABLE_INT("hw.cxgbe.rdmacaps_allowed", &t4_rdmacaps_allowed);
284 
285 static int t4_iscsicaps_allowed = 0;
286 TUNABLE_INT("hw.cxgbe.iscsicaps_allowed", &t4_iscsicaps_allowed);
287 
288 static int t4_fcoecaps_allowed = 0;
289 TUNABLE_INT("hw.cxgbe.fcoecaps_allowed", &t4_fcoecaps_allowed);
290 
291 static int t5_write_combine = 0;
292 TUNABLE_INT("hw.cxl.write_combine", &t5_write_combine);
293 
294 struct intrs_and_queues {
295 	int intr_type;		/* INTx, MSI, or MSI-X */
296 	int nirq;		/* Number of vectors */
297 	int intr_flags;
298 	int ntxq10g;		/* # of NIC txq's for each 10G port */
299 	int nrxq10g;		/* # of NIC rxq's for each 10G port */
300 	int ntxq1g;		/* # of NIC txq's for each 1G port */
301 	int nrxq1g;		/* # of NIC rxq's for each 1G port */
302 #ifdef TCP_OFFLOAD
303 	int nofldtxq10g;	/* # of TOE txq's for each 10G port */
304 	int nofldrxq10g;	/* # of TOE rxq's for each 10G port */
305 	int nofldtxq1g;		/* # of TOE txq's for each 1G port */
306 	int nofldrxq1g;		/* # of TOE rxq's for each 1G port */
307 #endif
308 };
309 
310 struct filter_entry {
311         uint32_t valid:1;	/* filter allocated and valid */
312         uint32_t locked:1;	/* filter is administratively locked */
313         uint32_t pending:1;	/* filter action is pending firmware reply */
314 	uint32_t smtidx:8;	/* Source MAC Table index for smac */
315 	struct l2t_entry *l2t;	/* Layer Two Table entry for dmac */
316 
317         struct t4_filter_specification fs;
318 };
319 
320 enum {
321 	XGMAC_MTU	= (1 << 0),
322 	XGMAC_PROMISC	= (1 << 1),
323 	XGMAC_ALLMULTI	= (1 << 2),
324 	XGMAC_VLANEX	= (1 << 3),
325 	XGMAC_UCADDR	= (1 << 4),
326 	XGMAC_MCADDRS	= (1 << 5),
327 
328 	XGMAC_ALL	= 0xffff
329 };
330 
331 static int map_bars_0_and_4(struct adapter *);
332 static int map_bar_2(struct adapter *);
333 static void setup_memwin(struct adapter *);
334 static int validate_mem_range(struct adapter *, uint32_t, int);
335 static int validate_mt_off_len(struct adapter *, int, uint32_t, int,
336     uint32_t *);
337 static void memwin_info(struct adapter *, int, uint32_t *, uint32_t *);
338 static uint32_t position_memwin(struct adapter *, int, uint32_t);
339 static int cfg_itype_and_nqueues(struct adapter *, int, int,
340     struct intrs_and_queues *);
341 static int prep_firmware(struct adapter *);
342 static int partition_resources(struct adapter *, const struct firmware *,
343     const char *);
344 static int get_params__pre_init(struct adapter *);
345 static int get_params__post_init(struct adapter *);
346 static int set_params__post_init(struct adapter *);
347 static void t4_set_desc(struct adapter *);
348 static void build_medialist(struct port_info *);
349 static int update_mac_settings(struct port_info *, int);
350 static int cxgbe_init_synchronized(struct port_info *);
351 static int cxgbe_uninit_synchronized(struct port_info *);
352 static int setup_intr_handlers(struct adapter *);
353 static int adapter_full_init(struct adapter *);
354 static int adapter_full_uninit(struct adapter *);
355 static int port_full_init(struct port_info *);
356 static int port_full_uninit(struct port_info *);
357 static void quiesce_eq(struct adapter *, struct sge_eq *);
358 static void quiesce_iq(struct adapter *, struct sge_iq *);
359 static void quiesce_fl(struct adapter *, struct sge_fl *);
360 static int t4_alloc_irq(struct adapter *, struct irq *, int rid,
361     driver_intr_t *, void *, char *);
362 static int t4_free_irq(struct adapter *, struct irq *);
363 static void reg_block_dump(struct adapter *, uint8_t *, unsigned int,
364     unsigned int);
365 static void t4_get_regs(struct adapter *, struct t4_regdump *, uint8_t *);
366 static void cxgbe_tick(void *);
367 static void cxgbe_vlan_config(void *, struct ifnet *, uint16_t);
368 static int cpl_not_handled(struct sge_iq *, const struct rss_header *,
369     struct mbuf *);
370 static int an_not_handled(struct sge_iq *, const struct rsp_ctrl *);
371 static int fw_msg_not_handled(struct adapter *, const __be64 *);
372 static int t4_sysctls(struct adapter *);
373 static int cxgbe_sysctls(struct port_info *);
374 static int sysctl_int_array(SYSCTL_HANDLER_ARGS);
375 static int sysctl_bitfield(SYSCTL_HANDLER_ARGS);
376 static int sysctl_btphy(SYSCTL_HANDLER_ARGS);
377 static int sysctl_holdoff_tmr_idx(SYSCTL_HANDLER_ARGS);
378 static int sysctl_holdoff_pktc_idx(SYSCTL_HANDLER_ARGS);
379 static int sysctl_qsize_rxq(SYSCTL_HANDLER_ARGS);
380 static int sysctl_qsize_txq(SYSCTL_HANDLER_ARGS);
381 static int sysctl_handle_t4_reg64(SYSCTL_HANDLER_ARGS);
382 static int sysctl_temperature(SYSCTL_HANDLER_ARGS);
383 #ifdef SBUF_DRAIN
384 static int sysctl_cctrl(SYSCTL_HANDLER_ARGS);
385 static int sysctl_cim_ibq_obq(SYSCTL_HANDLER_ARGS);
386 static int sysctl_cim_la(SYSCTL_HANDLER_ARGS);
387 static int sysctl_cim_ma_la(SYSCTL_HANDLER_ARGS);
388 static int sysctl_cim_pif_la(SYSCTL_HANDLER_ARGS);
389 static int sysctl_cim_qcfg(SYSCTL_HANDLER_ARGS);
390 static int sysctl_cpl_stats(SYSCTL_HANDLER_ARGS);
391 static int sysctl_ddp_stats(SYSCTL_HANDLER_ARGS);
392 static int sysctl_devlog(SYSCTL_HANDLER_ARGS);
393 static int sysctl_fcoe_stats(SYSCTL_HANDLER_ARGS);
394 static int sysctl_hw_sched(SYSCTL_HANDLER_ARGS);
395 static int sysctl_lb_stats(SYSCTL_HANDLER_ARGS);
396 static int sysctl_linkdnrc(SYSCTL_HANDLER_ARGS);
397 static int sysctl_meminfo(SYSCTL_HANDLER_ARGS);
398 static int sysctl_mps_tcam(SYSCTL_HANDLER_ARGS);
399 static int sysctl_path_mtus(SYSCTL_HANDLER_ARGS);
400 static int sysctl_pm_stats(SYSCTL_HANDLER_ARGS);
401 static int sysctl_rdma_stats(SYSCTL_HANDLER_ARGS);
402 static int sysctl_tcp_stats(SYSCTL_HANDLER_ARGS);
403 static int sysctl_tids(SYSCTL_HANDLER_ARGS);
404 static int sysctl_tp_err_stats(SYSCTL_HANDLER_ARGS);
405 static int sysctl_tp_la(SYSCTL_HANDLER_ARGS);
406 static int sysctl_tx_rate(SYSCTL_HANDLER_ARGS);
407 static int sysctl_ulprx_la(SYSCTL_HANDLER_ARGS);
408 static int sysctl_wcwr_stats(SYSCTL_HANDLER_ARGS);
409 #endif
410 static inline void txq_start(struct ifnet *, struct sge_txq *);
411 static uint32_t fconf_to_mode(uint32_t);
412 static uint32_t mode_to_fconf(uint32_t);
413 static uint32_t fspec_to_fconf(struct t4_filter_specification *);
414 static int get_filter_mode(struct adapter *, uint32_t *);
415 static int set_filter_mode(struct adapter *, uint32_t);
416 static inline uint64_t get_filter_hits(struct adapter *, uint32_t);
417 static int get_filter(struct adapter *, struct t4_filter *);
418 static int set_filter(struct adapter *, struct t4_filter *);
419 static int del_filter(struct adapter *, struct t4_filter *);
420 static void clear_filter(struct filter_entry *);
421 static int set_filter_wr(struct adapter *, int);
422 static int del_filter_wr(struct adapter *, int);
423 static int get_sge_context(struct adapter *, struct t4_sge_context *);
424 static int load_fw(struct adapter *, struct t4_data *);
425 static int read_card_mem(struct adapter *, int, struct t4_mem_range *);
426 static int read_i2c(struct adapter *, struct t4_i2c_data *);
427 #ifdef TCP_OFFLOAD
428 static int toe_capability(struct port_info *, int);
429 #endif
430 static int mod_event(module_t, int, void *);
431 
432 struct {
433 	uint16_t device;
434 	char *desc;
435 } t4_pciids[] = {
436 	{0xa000, "Chelsio Terminator 4 FPGA"},
437 	{0x4400, "Chelsio T440-dbg"},
438 	{0x4401, "Chelsio T420-CR"},
439 	{0x4402, "Chelsio T422-CR"},
440 	{0x4403, "Chelsio T440-CR"},
441 	{0x4404, "Chelsio T420-BCH"},
442 	{0x4405, "Chelsio T440-BCH"},
443 	{0x4406, "Chelsio T440-CH"},
444 	{0x4407, "Chelsio T420-SO"},
445 	{0x4408, "Chelsio T420-CX"},
446 	{0x4409, "Chelsio T420-BT"},
447 	{0x440a, "Chelsio T404-BT"},
448 	{0x440e, "Chelsio T440-LP-CR"},
449 }, t5_pciids[] = {
450 	{0xb000, "Chelsio Terminator 5 FPGA"},
451 	{0x5400, "Chelsio T580-dbg"},
452 	{0x5401,  "Chelsio T520-CR"},		/* 2 x 10G */
453 	{0x5402,  "Chelsio T522-CR"},		/* 2 x 10G, 2 X 1G */
454 	{0x5403,  "Chelsio T540-CR"},		/* 4 x 10G */
455 	{0x5407,  "Chelsio T520-SO"},		/* 2 x 10G, nomem */
456 	{0x5409,  "Chelsio T520-BT"},		/* 2 x 10GBaseT */
457 	{0x540a,  "Chelsio T504-BT"},		/* 4 x 1G */
458 	{0x540d,  "Chelsio T580-CR"},		/* 2 x 40G */
459 	{0x540e,  "Chelsio T540-LP-CR"},	/* 4 x 10G */
460 	{0x5410,  "Chelsio T580-LP-CR"},	/* 2 x 40G */
461 	{0x5411,  "Chelsio T520-LL-CR"},	/* 2 x 10G */
462 	{0x5412,  "Chelsio T560-CR"},		/* 1 x 40G, 2 x 10G */
463 	{0x5414,  "Chelsio T580-LP-SO-CR"},	/* 2 x 40G, nomem */
464 #ifdef notyet
465 	{0x5404,  "Chelsio T520-BCH"},
466 	{0x5405,  "Chelsio T540-BCH"},
467 	{0x5406,  "Chelsio T540-CH"},
468 	{0x5408,  "Chelsio T520-CX"},
469 	{0x540b,  "Chelsio B520-SR"},
470 	{0x540c,  "Chelsio B504-BT"},
471 	{0x540f,  "Chelsio Amsterdam"},
472 	{0x5413,  "Chelsio T580-CHR"},
473 #endif
474 };
475 
476 #ifdef TCP_OFFLOAD
477 /*
478  * service_iq() has an iq and needs the fl.  Offset of fl from the iq should be
479  * exactly the same for both rxq and ofld_rxq.
480  */
481 CTASSERT(offsetof(struct sge_ofld_rxq, iq) == offsetof(struct sge_rxq, iq));
482 CTASSERT(offsetof(struct sge_ofld_rxq, fl) == offsetof(struct sge_rxq, fl));
483 #endif
484 
485 /* No easy way to include t4_msg.h before adapter.h so we check this way */
486 CTASSERT(nitems(((struct adapter *)0)->cpl_handler) == NUM_CPL_CMDS);
487 CTASSERT(nitems(((struct adapter *)0)->fw_msg_handler) == NUM_FW6_TYPES);
488 
489 static int
490 t4_probe(device_t dev)
491 {
492 	int i;
493 	uint16_t v = pci_get_vendor(dev);
494 	uint16_t d = pci_get_device(dev);
495 	uint8_t f = pci_get_function(dev);
496 
497 	if (v != PCI_VENDOR_ID_CHELSIO)
498 		return (ENXIO);
499 
500 	/* Attach only to PF0 of the FPGA */
501 	if (d == 0xa000 && f != 0)
502 		return (ENXIO);
503 
504 	for (i = 0; i < nitems(t4_pciids); i++) {
505 		if (d == t4_pciids[i].device) {
506 			device_set_desc(dev, t4_pciids[i].desc);
507 			return (BUS_PROBE_DEFAULT);
508 		}
509 	}
510 
511 	return (ENXIO);
512 }
513 
514 static int
515 t5_probe(device_t dev)
516 {
517 	int i;
518 	uint16_t v = pci_get_vendor(dev);
519 	uint16_t d = pci_get_device(dev);
520 	uint8_t f = pci_get_function(dev);
521 
522 	if (v != PCI_VENDOR_ID_CHELSIO)
523 		return (ENXIO);
524 
525 	/* Attach only to PF0 of the FPGA */
526 	if (d == 0xb000 && f != 0)
527 		return (ENXIO);
528 
529 	for (i = 0; i < nitems(t5_pciids); i++) {
530 		if (d == t5_pciids[i].device) {
531 			device_set_desc(dev, t5_pciids[i].desc);
532 			return (BUS_PROBE_DEFAULT);
533 		}
534 	}
535 
536 	return (ENXIO);
537 }
538 
539 static int
540 t4_attach(device_t dev)
541 {
542 	struct adapter *sc;
543 	int rc = 0, i, n10g, n1g, rqidx, tqidx;
544 	struct intrs_and_queues iaq;
545 	struct sge *s;
546 #ifdef TCP_OFFLOAD
547 	int ofld_rqidx, ofld_tqidx;
548 #endif
549 
550 	sc = device_get_softc(dev);
551 	sc->dev = dev;
552 
553 	pci_enable_busmaster(dev);
554 	if (pci_find_cap(dev, PCIY_EXPRESS, &i) == 0) {
555 		uint32_t v;
556 
557 		pci_set_max_read_req(dev, 4096);
558 		v = pci_read_config(dev, i + PCIER_DEVICE_CTL, 2);
559 		v |= PCIEM_CTL_RELAXED_ORD_ENABLE;
560 		pci_write_config(dev, i + PCIER_DEVICE_CTL, v, 2);
561 	}
562 
563 	sc->traceq = -1;
564 	mtx_init(&sc->ifp_lock, sc->ifp_lockname, 0, MTX_DEF);
565 	snprintf(sc->ifp_lockname, sizeof(sc->ifp_lockname), "%s tracer",
566 	    device_get_nameunit(dev));
567 
568 	snprintf(sc->lockname, sizeof(sc->lockname), "%s",
569 	    device_get_nameunit(dev));
570 	mtx_init(&sc->sc_lock, sc->lockname, 0, MTX_DEF);
571 	sx_xlock(&t4_list_lock);
572 	SLIST_INSERT_HEAD(&t4_list, sc, link);
573 	sx_xunlock(&t4_list_lock);
574 
575 	mtx_init(&sc->sfl_lock, "starving freelists", 0, MTX_DEF);
576 	TAILQ_INIT(&sc->sfl);
577 	callout_init(&sc->sfl_callout, CALLOUT_MPSAFE);
578 
579 	rc = map_bars_0_and_4(sc);
580 	if (rc != 0)
581 		goto done; /* error message displayed already */
582 
583 	/*
584 	 * This is the real PF# to which we're attaching.  Works from within PCI
585 	 * passthrough environments too, where pci_get_function() could return a
586 	 * different PF# depending on the passthrough configuration.  We need to
587 	 * use the real PF# in all our communication with the firmware.
588 	 */
589 	sc->pf = G_SOURCEPF(t4_read_reg(sc, A_PL_WHOAMI));
590 	sc->mbox = sc->pf;
591 
592 	memset(sc->chan_map, 0xff, sizeof(sc->chan_map));
593 	sc->an_handler = an_not_handled;
594 	for (i = 0; i < nitems(sc->cpl_handler); i++)
595 		sc->cpl_handler[i] = cpl_not_handled;
596 	for (i = 0; i < nitems(sc->fw_msg_handler); i++)
597 		sc->fw_msg_handler[i] = fw_msg_not_handled;
598 	t4_register_cpl_handler(sc, CPL_SET_TCB_RPL, t4_filter_rpl);
599 	t4_register_cpl_handler(sc, CPL_TRACE_PKT, t4_trace_pkt);
600 	t4_register_cpl_handler(sc, CPL_TRACE_PKT_T5, t5_trace_pkt);
601 	t4_init_sge_cpl_handlers(sc);
602 
603 	/* Prepare the adapter for operation */
604 	rc = -t4_prep_adapter(sc);
605 	if (rc != 0) {
606 		device_printf(dev, "failed to prepare adapter: %d.\n", rc);
607 		goto done;
608 	}
609 
610 	/*
611 	 * Do this really early, with the memory windows set up even before the
612 	 * character device.  The userland tool's register i/o and mem read
613 	 * will work even in "recovery mode".
614 	 */
615 	setup_memwin(sc);
616 	sc->cdev = make_dev(is_t4(sc) ? &t4_cdevsw : &t5_cdevsw,
617 	    device_get_unit(dev), UID_ROOT, GID_WHEEL, 0600, "%s",
618 	    device_get_nameunit(dev));
619 	if (sc->cdev == NULL)
620 		device_printf(dev, "failed to create nexus char device.\n");
621 	else
622 		sc->cdev->si_drv1 = sc;
623 
624 	/* Go no further if recovery mode has been requested. */
625 	if (TUNABLE_INT_FETCH("hw.cxgbe.sos", &i) && i != 0) {
626 		device_printf(dev, "recovery mode.\n");
627 		goto done;
628 	}
629 
630 	/* Prepare the firmware for operation */
631 	rc = prep_firmware(sc);
632 	if (rc != 0)
633 		goto done; /* error message displayed already */
634 
635 	rc = get_params__post_init(sc);
636 	if (rc != 0)
637 		goto done; /* error message displayed already */
638 
639 	rc = set_params__post_init(sc);
640 	if (rc != 0)
641 		goto done; /* error message displayed already */
642 
643 	rc = map_bar_2(sc);
644 	if (rc != 0)
645 		goto done; /* error message displayed already */
646 
647 	rc = t4_create_dma_tag(sc);
648 	if (rc != 0)
649 		goto done; /* error message displayed already */
650 
651 	/*
652 	 * First pass over all the ports - allocate VIs and initialize some
653 	 * basic parameters like mac address, port type, etc.  We also figure
654 	 * out whether a port is 10G or 1G and use that information when
655 	 * calculating how many interrupts to attempt to allocate.
656 	 */
657 	n10g = n1g = 0;
658 	for_each_port(sc, i) {
659 		struct port_info *pi;
660 
661 		pi = malloc(sizeof(*pi), M_CXGBE, M_ZERO | M_WAITOK);
662 		sc->port[i] = pi;
663 
664 		/* These must be set before t4_port_init */
665 		pi->adapter = sc;
666 		pi->port_id = i;
667 
668 		/* Allocate the vi and initialize parameters like mac addr */
669 		rc = -t4_port_init(pi, sc->mbox, sc->pf, 0);
670 		if (rc != 0) {
671 			device_printf(dev, "unable to initialize port %d: %d\n",
672 			    i, rc);
673 			free(pi, M_CXGBE);
674 			sc->port[i] = NULL;
675 			goto done;
676 		}
677 
678 		snprintf(pi->lockname, sizeof(pi->lockname), "%sp%d",
679 		    device_get_nameunit(dev), i);
680 		mtx_init(&pi->pi_lock, pi->lockname, 0, MTX_DEF);
681 		sc->chan_map[pi->tx_chan] = i;
682 
683 		if (is_10G_port(pi) || is_40G_port(pi)) {
684 			n10g++;
685 			pi->tmr_idx = t4_tmr_idx_10g;
686 			pi->pktc_idx = t4_pktc_idx_10g;
687 		} else {
688 			n1g++;
689 			pi->tmr_idx = t4_tmr_idx_1g;
690 			pi->pktc_idx = t4_pktc_idx_1g;
691 		}
692 
693 		pi->xact_addr_filt = -1;
694 		pi->linkdnrc = -1;
695 
696 		pi->qsize_rxq = t4_qsize_rxq;
697 		pi->qsize_txq = t4_qsize_txq;
698 
699 		pi->dev = device_add_child(dev, is_t4(sc) ? "cxgbe" : "cxl", -1);
700 		if (pi->dev == NULL) {
701 			device_printf(dev,
702 			    "failed to add device for port %d.\n", i);
703 			rc = ENXIO;
704 			goto done;
705 		}
706 		device_set_softc(pi->dev, pi);
707 	}
708 
709 	/*
710 	 * Interrupt type, # of interrupts, # of rx/tx queues, etc.
711 	 */
712 	rc = cfg_itype_and_nqueues(sc, n10g, n1g, &iaq);
713 	if (rc != 0)
714 		goto done; /* error message displayed already */
715 
716 	sc->intr_type = iaq.intr_type;
717 	sc->intr_count = iaq.nirq;
718 	sc->flags |= iaq.intr_flags;
719 
720 	s = &sc->sge;
721 	s->nrxq = n10g * iaq.nrxq10g + n1g * iaq.nrxq1g;
722 	s->ntxq = n10g * iaq.ntxq10g + n1g * iaq.ntxq1g;
723 	s->neq = s->ntxq + s->nrxq;	/* the free list in an rxq is an eq */
724 	s->neq += sc->params.nports + 1;/* ctrl queues: 1 per port + 1 mgmt */
725 	s->niq = s->nrxq + 1;		/* 1 extra for firmware event queue */
726 
727 #ifdef TCP_OFFLOAD
728 	if (is_offload(sc)) {
729 
730 		s->nofldrxq = n10g * iaq.nofldrxq10g + n1g * iaq.nofldrxq1g;
731 		s->nofldtxq = n10g * iaq.nofldtxq10g + n1g * iaq.nofldtxq1g;
732 		s->neq += s->nofldtxq + s->nofldrxq;
733 		s->niq += s->nofldrxq;
734 
735 		s->ofld_rxq = malloc(s->nofldrxq * sizeof(struct sge_ofld_rxq),
736 		    M_CXGBE, M_ZERO | M_WAITOK);
737 		s->ofld_txq = malloc(s->nofldtxq * sizeof(struct sge_wrq),
738 		    M_CXGBE, M_ZERO | M_WAITOK);
739 	}
740 #endif
741 
742 	s->ctrlq = malloc(sc->params.nports * sizeof(struct sge_wrq), M_CXGBE,
743 	    M_ZERO | M_WAITOK);
744 	s->rxq = malloc(s->nrxq * sizeof(struct sge_rxq), M_CXGBE,
745 	    M_ZERO | M_WAITOK);
746 	s->txq = malloc(s->ntxq * sizeof(struct sge_txq), M_CXGBE,
747 	    M_ZERO | M_WAITOK);
748 	s->iqmap = malloc(s->niq * sizeof(struct sge_iq *), M_CXGBE,
749 	    M_ZERO | M_WAITOK);
750 	s->eqmap = malloc(s->neq * sizeof(struct sge_eq *), M_CXGBE,
751 	    M_ZERO | M_WAITOK);
752 
753 	sc->irq = malloc(sc->intr_count * sizeof(struct irq), M_CXGBE,
754 	    M_ZERO | M_WAITOK);
755 
756 	t4_init_l2t(sc, M_WAITOK);
757 
758 	/*
759 	 * Second pass over the ports.  This time we know the number of rx and
760 	 * tx queues that each port should get.
761 	 */
762 	rqidx = tqidx = 0;
763 #ifdef TCP_OFFLOAD
764 	ofld_rqidx = ofld_tqidx = 0;
765 #endif
766 	for_each_port(sc, i) {
767 		struct port_info *pi = sc->port[i];
768 
769 		if (pi == NULL)
770 			continue;
771 
772 		pi->first_rxq = rqidx;
773 		pi->first_txq = tqidx;
774 		if (is_10G_port(pi) || is_40G_port(pi)) {
775 			pi->nrxq = iaq.nrxq10g;
776 			pi->ntxq = iaq.ntxq10g;
777 		} else {
778 			pi->nrxq = iaq.nrxq1g;
779 			pi->ntxq = iaq.ntxq1g;
780 		}
781 
782 		rqidx += pi->nrxq;
783 		tqidx += pi->ntxq;
784 
785 #ifdef TCP_OFFLOAD
786 		if (is_offload(sc)) {
787 			pi->first_ofld_rxq = ofld_rqidx;
788 			pi->first_ofld_txq = ofld_tqidx;
789 			if (is_10G_port(pi) || is_40G_port(pi)) {
790 				pi->nofldrxq = iaq.nofldrxq10g;
791 				pi->nofldtxq = iaq.nofldtxq10g;
792 			} else {
793 				pi->nofldrxq = iaq.nofldrxq1g;
794 				pi->nofldtxq = iaq.nofldtxq1g;
795 			}
796 			ofld_rqidx += pi->nofldrxq;
797 			ofld_tqidx += pi->nofldtxq;
798 		}
799 #endif
800 	}
801 
802 	rc = setup_intr_handlers(sc);
803 	if (rc != 0) {
804 		device_printf(dev,
805 		    "failed to setup interrupt handlers: %d\n", rc);
806 		goto done;
807 	}
808 
809 	rc = bus_generic_attach(dev);
810 	if (rc != 0) {
811 		device_printf(dev,
812 		    "failed to attach all child ports: %d\n", rc);
813 		goto done;
814 	}
815 
816 	device_printf(dev,
817 	    "PCIe x%d, %d ports, %d %s interrupt%s, %d eq, %d iq\n",
818 	    sc->params.pci.width, sc->params.nports, sc->intr_count,
819 	    sc->intr_type == INTR_MSIX ? "MSI-X" :
820 	    (sc->intr_type == INTR_MSI ? "MSI" : "INTx"),
821 	    sc->intr_count > 1 ? "s" : "", sc->sge.neq, sc->sge.niq);
822 
823 	t4_set_desc(sc);
824 
825 done:
826 	if (rc != 0 && sc->cdev) {
827 		/* cdev was created and so cxgbetool works; recover that way. */
828 		device_printf(dev,
829 		    "error during attach, adapter is now in recovery mode.\n");
830 		rc = 0;
831 	}
832 
833 	if (rc != 0)
834 		t4_detach(dev);
835 	else
836 		t4_sysctls(sc);
837 
838 	return (rc);
839 }
840 
841 /*
842  * Idempotent
843  */
844 static int
845 t4_detach(device_t dev)
846 {
847 	struct adapter *sc;
848 	struct port_info *pi;
849 	int i, rc;
850 
851 	sc = device_get_softc(dev);
852 
853 	if (sc->flags & FULL_INIT_DONE)
854 		t4_intr_disable(sc);
855 
856 	if (sc->cdev) {
857 		destroy_dev(sc->cdev);
858 		sc->cdev = NULL;
859 	}
860 
861 	rc = bus_generic_detach(dev);
862 	if (rc) {
863 		device_printf(dev,
864 		    "failed to detach child devices: %d\n", rc);
865 		return (rc);
866 	}
867 
868 	for (i = 0; i < sc->intr_count; i++)
869 		t4_free_irq(sc, &sc->irq[i]);
870 
871 	for (i = 0; i < MAX_NPORTS; i++) {
872 		pi = sc->port[i];
873 		if (pi) {
874 			t4_free_vi(pi->adapter, sc->mbox, sc->pf, 0, pi->viid);
875 			if (pi->dev)
876 				device_delete_child(dev, pi->dev);
877 
878 			mtx_destroy(&pi->pi_lock);
879 			free(pi, M_CXGBE);
880 		}
881 	}
882 
883 	if (sc->flags & FULL_INIT_DONE)
884 		adapter_full_uninit(sc);
885 
886 	if (sc->flags & FW_OK)
887 		t4_fw_bye(sc, sc->mbox);
888 
889 	if (sc->intr_type == INTR_MSI || sc->intr_type == INTR_MSIX)
890 		pci_release_msi(dev);
891 
892 	if (sc->regs_res)
893 		bus_release_resource(dev, SYS_RES_MEMORY, sc->regs_rid,
894 		    sc->regs_res);
895 
896 	if (sc->udbs_res)
897 		bus_release_resource(dev, SYS_RES_MEMORY, sc->udbs_rid,
898 		    sc->udbs_res);
899 
900 	if (sc->msix_res)
901 		bus_release_resource(dev, SYS_RES_MEMORY, sc->msix_rid,
902 		    sc->msix_res);
903 
904 	if (sc->l2t)
905 		t4_free_l2t(sc->l2t);
906 
907 #ifdef TCP_OFFLOAD
908 	free(sc->sge.ofld_rxq, M_CXGBE);
909 	free(sc->sge.ofld_txq, M_CXGBE);
910 #endif
911 	free(sc->irq, M_CXGBE);
912 	free(sc->sge.rxq, M_CXGBE);
913 	free(sc->sge.txq, M_CXGBE);
914 	free(sc->sge.ctrlq, M_CXGBE);
915 	free(sc->sge.iqmap, M_CXGBE);
916 	free(sc->sge.eqmap, M_CXGBE);
917 	free(sc->tids.ftid_tab, M_CXGBE);
918 	t4_destroy_dma_tag(sc);
919 	if (mtx_initialized(&sc->sc_lock)) {
920 		sx_xlock(&t4_list_lock);
921 		SLIST_REMOVE(&t4_list, sc, adapter, link);
922 		sx_xunlock(&t4_list_lock);
923 		mtx_destroy(&sc->sc_lock);
924 	}
925 
926 	if (mtx_initialized(&sc->tids.ftid_lock))
927 		mtx_destroy(&sc->tids.ftid_lock);
928 	if (mtx_initialized(&sc->sfl_lock))
929 		mtx_destroy(&sc->sfl_lock);
930 	if (mtx_initialized(&sc->ifp_lock))
931 		mtx_destroy(&sc->ifp_lock);
932 
933 	bzero(sc, sizeof(*sc));
934 
935 	return (0);
936 }
937 
938 
939 static int
940 cxgbe_probe(device_t dev)
941 {
942 	char buf[128];
943 	struct port_info *pi = device_get_softc(dev);
944 
945 	snprintf(buf, sizeof(buf), "port %d", pi->port_id);
946 	device_set_desc_copy(dev, buf);
947 
948 	return (BUS_PROBE_DEFAULT);
949 }
950 
951 #define T4_CAP (IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_MTU | IFCAP_HWCSUM | \
952     IFCAP_VLAN_HWCSUM | IFCAP_TSO | IFCAP_JUMBO_MTU | IFCAP_LRO | \
953     IFCAP_VLAN_HWTSO | IFCAP_LINKSTATE | IFCAP_HWCSUM_IPV6)
954 #define T4_CAP_ENABLE (T4_CAP)
955 
956 static int
957 cxgbe_attach(device_t dev)
958 {
959 	struct port_info *pi = device_get_softc(dev);
960 	struct ifnet *ifp;
961 
962 	/* Allocate an ifnet and set it up */
963 	ifp = if_alloc(IFT_ETHER);
964 	if (ifp == NULL) {
965 		device_printf(dev, "Cannot allocate ifnet\n");
966 		return (ENOMEM);
967 	}
968 	pi->ifp = ifp;
969 	ifp->if_softc = pi;
970 
971 	callout_init(&pi->tick, CALLOUT_MPSAFE);
972 
973 	if_initname(ifp, device_get_name(dev), device_get_unit(dev));
974 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
975 
976 	ifp->if_init = cxgbe_init;
977 	ifp->if_ioctl = cxgbe_ioctl;
978 	ifp->if_transmit = cxgbe_transmit;
979 	ifp->if_qflush = cxgbe_qflush;
980 
981 	ifp->if_capabilities = T4_CAP;
982 #ifdef TCP_OFFLOAD
983 	if (is_offload(pi->adapter))
984 		ifp->if_capabilities |= IFCAP_TOE;
985 #endif
986 	ifp->if_capenable = T4_CAP_ENABLE;
987 	ifp->if_hwassist = CSUM_TCP | CSUM_UDP | CSUM_IP | CSUM_TSO |
988 	    CSUM_UDP_IPV6 | CSUM_TCP_IPV6;
989 
990 	/* Initialize ifmedia for this port */
991 	ifmedia_init(&pi->media, IFM_IMASK, cxgbe_media_change,
992 	    cxgbe_media_status);
993 	build_medialist(pi);
994 
995 	pi->vlan_c = EVENTHANDLER_REGISTER(vlan_config, cxgbe_vlan_config, ifp,
996 	    EVENTHANDLER_PRI_ANY);
997 
998 	ether_ifattach(ifp, pi->hw_addr);
999 
1000 #ifdef TCP_OFFLOAD
1001 	if (is_offload(pi->adapter)) {
1002 		device_printf(dev,
1003 		    "%d txq, %d rxq (NIC); %d txq, %d rxq (TOE)\n",
1004 		    pi->ntxq, pi->nrxq, pi->nofldtxq, pi->nofldrxq);
1005 	} else
1006 #endif
1007 		device_printf(dev, "%d txq, %d rxq\n", pi->ntxq, pi->nrxq);
1008 
1009 	cxgbe_sysctls(pi);
1010 
1011 	return (0);
1012 }
1013 
1014 static int
1015 cxgbe_detach(device_t dev)
1016 {
1017 	struct port_info *pi = device_get_softc(dev);
1018 	struct adapter *sc = pi->adapter;
1019 	struct ifnet *ifp = pi->ifp;
1020 
1021 	/* Tell if_ioctl and if_init that the port is going away */
1022 	ADAPTER_LOCK(sc);
1023 	SET_DOOMED(pi);
1024 	wakeup(&sc->flags);
1025 	while (IS_BUSY(sc))
1026 		mtx_sleep(&sc->flags, &sc->sc_lock, 0, "t4detach", 0);
1027 	SET_BUSY(sc);
1028 #ifdef INVARIANTS
1029 	sc->last_op = "t4detach";
1030 	sc->last_op_thr = curthread;
1031 #endif
1032 	ADAPTER_UNLOCK(sc);
1033 
1034 	if (pi->flags & HAS_TRACEQ) {
1035 		sc->traceq = -1;	/* cloner should not create ifnet */
1036 		t4_tracer_port_detach(sc);
1037 	}
1038 
1039 	if (pi->vlan_c)
1040 		EVENTHANDLER_DEREGISTER(vlan_config, pi->vlan_c);
1041 
1042 	PORT_LOCK(pi);
1043 	ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
1044 	callout_stop(&pi->tick);
1045 	PORT_UNLOCK(pi);
1046 	callout_drain(&pi->tick);
1047 
1048 	/* Let detach proceed even if these fail. */
1049 	cxgbe_uninit_synchronized(pi);
1050 	port_full_uninit(pi);
1051 
1052 	ifmedia_removeall(&pi->media);
1053 	ether_ifdetach(pi->ifp);
1054 	if_free(pi->ifp);
1055 
1056 	ADAPTER_LOCK(sc);
1057 	CLR_BUSY(sc);
1058 	wakeup(&sc->flags);
1059 	ADAPTER_UNLOCK(sc);
1060 
1061 	return (0);
1062 }
1063 
1064 static void
1065 cxgbe_init(void *arg)
1066 {
1067 	struct port_info *pi = arg;
1068 	struct adapter *sc = pi->adapter;
1069 
1070 	if (begin_synchronized_op(sc, pi, SLEEP_OK | INTR_OK, "t4init") != 0)
1071 		return;
1072 	cxgbe_init_synchronized(pi);
1073 	end_synchronized_op(sc, 0);
1074 }
1075 
1076 static int
1077 cxgbe_ioctl(struct ifnet *ifp, unsigned long cmd, caddr_t data)
1078 {
1079 	int rc = 0, mtu, flags;
1080 	struct port_info *pi = ifp->if_softc;
1081 	struct adapter *sc = pi->adapter;
1082 	struct ifreq *ifr = (struct ifreq *)data;
1083 	uint32_t mask;
1084 
1085 	switch (cmd) {
1086 	case SIOCSIFMTU:
1087 		mtu = ifr->ifr_mtu;
1088 		if ((mtu < ETHERMIN) || (mtu > ETHERMTU_JUMBO))
1089 			return (EINVAL);
1090 
1091 		rc = begin_synchronized_op(sc, pi, SLEEP_OK | INTR_OK, "t4mtu");
1092 		if (rc)
1093 			return (rc);
1094 		ifp->if_mtu = mtu;
1095 		if (pi->flags & PORT_INIT_DONE) {
1096 			t4_update_fl_bufsize(ifp);
1097 			if (ifp->if_drv_flags & IFF_DRV_RUNNING)
1098 				rc = update_mac_settings(pi, XGMAC_MTU);
1099 		}
1100 		end_synchronized_op(sc, 0);
1101 		break;
1102 
1103 	case SIOCSIFFLAGS:
1104 		rc = begin_synchronized_op(sc, pi, SLEEP_OK | INTR_OK, "t4flg");
1105 		if (rc)
1106 			return (rc);
1107 
1108 		if (ifp->if_flags & IFF_UP) {
1109 			if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1110 				flags = pi->if_flags;
1111 				if ((ifp->if_flags ^ flags) &
1112 				    (IFF_PROMISC | IFF_ALLMULTI)) {
1113 					rc = update_mac_settings(pi,
1114 					    XGMAC_PROMISC | XGMAC_ALLMULTI);
1115 				}
1116 			} else
1117 				rc = cxgbe_init_synchronized(pi);
1118 			pi->if_flags = ifp->if_flags;
1119 		} else if (ifp->if_drv_flags & IFF_DRV_RUNNING)
1120 			rc = cxgbe_uninit_synchronized(pi);
1121 		end_synchronized_op(sc, 0);
1122 		break;
1123 
1124 	case SIOCADDMULTI:
1125 	case SIOCDELMULTI: /* these two are called with a mutex held :-( */
1126 		rc = begin_synchronized_op(sc, pi, HOLD_LOCK, "t4multi");
1127 		if (rc)
1128 			return (rc);
1129 		if (ifp->if_drv_flags & IFF_DRV_RUNNING)
1130 			rc = update_mac_settings(pi, XGMAC_MCADDRS);
1131 		end_synchronized_op(sc, LOCK_HELD);
1132 		break;
1133 
1134 	case SIOCSIFCAP:
1135 		rc = begin_synchronized_op(sc, pi, SLEEP_OK | INTR_OK, "t4cap");
1136 		if (rc)
1137 			return (rc);
1138 
1139 		mask = ifr->ifr_reqcap ^ ifp->if_capenable;
1140 		if (mask & IFCAP_TXCSUM) {
1141 			ifp->if_capenable ^= IFCAP_TXCSUM;
1142 			ifp->if_hwassist ^= (CSUM_TCP | CSUM_UDP | CSUM_IP);
1143 
1144 			if (IFCAP_TSO4 & ifp->if_capenable &&
1145 			    !(IFCAP_TXCSUM & ifp->if_capenable)) {
1146 				ifp->if_capenable &= ~IFCAP_TSO4;
1147 				if_printf(ifp,
1148 				    "tso4 disabled due to -txcsum.\n");
1149 			}
1150 		}
1151 		if (mask & IFCAP_TXCSUM_IPV6) {
1152 			ifp->if_capenable ^= IFCAP_TXCSUM_IPV6;
1153 			ifp->if_hwassist ^= (CSUM_UDP_IPV6 | CSUM_TCP_IPV6);
1154 
1155 			if (IFCAP_TSO6 & ifp->if_capenable &&
1156 			    !(IFCAP_TXCSUM_IPV6 & ifp->if_capenable)) {
1157 				ifp->if_capenable &= ~IFCAP_TSO6;
1158 				if_printf(ifp,
1159 				    "tso6 disabled due to -txcsum6.\n");
1160 			}
1161 		}
1162 		if (mask & IFCAP_RXCSUM)
1163 			ifp->if_capenable ^= IFCAP_RXCSUM;
1164 		if (mask & IFCAP_RXCSUM_IPV6)
1165 			ifp->if_capenable ^= IFCAP_RXCSUM_IPV6;
1166 
1167 		/*
1168 		 * Note that we leave CSUM_TSO alone (it is always set).  The
1169 		 * kernel takes both IFCAP_TSOx and CSUM_TSO into account before
1170 		 * sending a TSO request our way, so it's sufficient to toggle
1171 		 * IFCAP_TSOx only.
1172 		 */
1173 		if (mask & IFCAP_TSO4) {
1174 			if (!(IFCAP_TSO4 & ifp->if_capenable) &&
1175 			    !(IFCAP_TXCSUM & ifp->if_capenable)) {
1176 				if_printf(ifp, "enable txcsum first.\n");
1177 				rc = EAGAIN;
1178 				goto fail;
1179 			}
1180 			ifp->if_capenable ^= IFCAP_TSO4;
1181 		}
1182 		if (mask & IFCAP_TSO6) {
1183 			if (!(IFCAP_TSO6 & ifp->if_capenable) &&
1184 			    !(IFCAP_TXCSUM_IPV6 & ifp->if_capenable)) {
1185 				if_printf(ifp, "enable txcsum6 first.\n");
1186 				rc = EAGAIN;
1187 				goto fail;
1188 			}
1189 			ifp->if_capenable ^= IFCAP_TSO6;
1190 		}
1191 		if (mask & IFCAP_LRO) {
1192 #if defined(INET) || defined(INET6)
1193 			int i;
1194 			struct sge_rxq *rxq;
1195 
1196 			ifp->if_capenable ^= IFCAP_LRO;
1197 			for_each_rxq(pi, i, rxq) {
1198 				if (ifp->if_capenable & IFCAP_LRO)
1199 					rxq->iq.flags |= IQ_LRO_ENABLED;
1200 				else
1201 					rxq->iq.flags &= ~IQ_LRO_ENABLED;
1202 			}
1203 #endif
1204 		}
1205 #ifdef TCP_OFFLOAD
1206 		if (mask & IFCAP_TOE) {
1207 			int enable = (ifp->if_capenable ^ mask) & IFCAP_TOE;
1208 
1209 			rc = toe_capability(pi, enable);
1210 			if (rc != 0)
1211 				goto fail;
1212 
1213 			ifp->if_capenable ^= mask;
1214 		}
1215 #endif
1216 		if (mask & IFCAP_VLAN_HWTAGGING) {
1217 			ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING;
1218 			if (ifp->if_drv_flags & IFF_DRV_RUNNING)
1219 				rc = update_mac_settings(pi, XGMAC_VLANEX);
1220 		}
1221 		if (mask & IFCAP_VLAN_MTU) {
1222 			ifp->if_capenable ^= IFCAP_VLAN_MTU;
1223 
1224 			/* Need to find out how to disable auto-mtu-inflation */
1225 		}
1226 		if (mask & IFCAP_VLAN_HWTSO)
1227 			ifp->if_capenable ^= IFCAP_VLAN_HWTSO;
1228 		if (mask & IFCAP_VLAN_HWCSUM)
1229 			ifp->if_capenable ^= IFCAP_VLAN_HWCSUM;
1230 
1231 #ifdef VLAN_CAPABILITIES
1232 		VLAN_CAPABILITIES(ifp);
1233 #endif
1234 fail:
1235 		end_synchronized_op(sc, 0);
1236 		break;
1237 
1238 	case SIOCSIFMEDIA:
1239 	case SIOCGIFMEDIA:
1240 		ifmedia_ioctl(ifp, ifr, &pi->media, cmd);
1241 		break;
1242 
1243 	default:
1244 		rc = ether_ioctl(ifp, cmd, data);
1245 	}
1246 
1247 	return (rc);
1248 }
1249 
1250 static int
1251 cxgbe_transmit(struct ifnet *ifp, struct mbuf *m)
1252 {
1253 	struct port_info *pi = ifp->if_softc;
1254 	struct adapter *sc = pi->adapter;
1255 	struct sge_txq *txq = &sc->sge.txq[pi->first_txq];
1256 	struct buf_ring *br;
1257 	int rc;
1258 
1259 	M_ASSERTPKTHDR(m);
1260 
1261 	if (__predict_false(pi->link_cfg.link_ok == 0)) {
1262 		m_freem(m);
1263 		return (ENETDOWN);
1264 	}
1265 
1266 	if (m->m_flags & M_FLOWID)
1267 		txq += (m->m_pkthdr.flowid % pi->ntxq);
1268 	br = txq->br;
1269 
1270 	if (TXQ_TRYLOCK(txq) == 0) {
1271 		struct sge_eq *eq = &txq->eq;
1272 
1273 		/*
1274 		 * It is possible that t4_eth_tx finishes up and releases the
1275 		 * lock between the TRYLOCK above and the drbr_enqueue here.  We
1276 		 * need to make sure that this mbuf doesn't just sit there in
1277 		 * the drbr.
1278 		 */
1279 
1280 		rc = drbr_enqueue(ifp, br, m);
1281 		if (rc == 0 && callout_pending(&eq->tx_callout) == 0 &&
1282 		    !(eq->flags & EQ_DOOMED))
1283 			callout_reset(&eq->tx_callout, 1, t4_tx_callout, eq);
1284 		return (rc);
1285 	}
1286 
1287 	/*
1288 	 * txq->m is the mbuf that is held up due to a temporary shortage of
1289 	 * resources and it should be put on the wire first.  Then what's in
1290 	 * drbr and finally the mbuf that was just passed in to us.
1291 	 *
1292 	 * Return code should indicate the fate of the mbuf that was passed in
1293 	 * this time.
1294 	 */
1295 
1296 	TXQ_LOCK_ASSERT_OWNED(txq);
1297 	if (drbr_needs_enqueue(ifp, br) || txq->m) {
1298 
1299 		/* Queued for transmission. */
1300 
1301 		rc = drbr_enqueue(ifp, br, m);
1302 		m = txq->m ? txq->m : drbr_dequeue(ifp, br);
1303 		(void) t4_eth_tx(ifp, txq, m);
1304 		TXQ_UNLOCK(txq);
1305 		return (rc);
1306 	}
1307 
1308 	/* Direct transmission. */
1309 	rc = t4_eth_tx(ifp, txq, m);
1310 	if (rc != 0 && txq->m)
1311 		rc = 0;	/* held, will be transmitted soon (hopefully) */
1312 
1313 	TXQ_UNLOCK(txq);
1314 	return (rc);
1315 }
1316 
1317 static void
1318 cxgbe_qflush(struct ifnet *ifp)
1319 {
1320 	struct port_info *pi = ifp->if_softc;
1321 	struct sge_txq *txq;
1322 	int i;
1323 	struct mbuf *m;
1324 
1325 	/* queues do not exist if !PORT_INIT_DONE. */
1326 	if (pi->flags & PORT_INIT_DONE) {
1327 		for_each_txq(pi, i, txq) {
1328 			TXQ_LOCK(txq);
1329 			m_freem(txq->m);
1330 			txq->m = NULL;
1331 			while ((m = buf_ring_dequeue_sc(txq->br)) != NULL)
1332 				m_freem(m);
1333 			TXQ_UNLOCK(txq);
1334 		}
1335 	}
1336 	if_qflush(ifp);
1337 }
1338 
1339 static int
1340 cxgbe_media_change(struct ifnet *ifp)
1341 {
1342 	struct port_info *pi = ifp->if_softc;
1343 
1344 	device_printf(pi->dev, "%s unimplemented.\n", __func__);
1345 
1346 	return (EOPNOTSUPP);
1347 }
1348 
1349 static void
1350 cxgbe_media_status(struct ifnet *ifp, struct ifmediareq *ifmr)
1351 {
1352 	struct port_info *pi = ifp->if_softc;
1353 	struct ifmedia_entry *cur = pi->media.ifm_cur;
1354 	int speed = pi->link_cfg.speed;
1355 	int data = (pi->port_type << 8) | pi->mod_type;
1356 
1357 	if (cur->ifm_data != data) {
1358 		build_medialist(pi);
1359 		cur = pi->media.ifm_cur;
1360 	}
1361 
1362 	ifmr->ifm_status = IFM_AVALID;
1363 	if (!pi->link_cfg.link_ok)
1364 		return;
1365 
1366 	ifmr->ifm_status |= IFM_ACTIVE;
1367 
1368 	/* active and current will differ iff current media is autoselect. */
1369 	if (IFM_SUBTYPE(cur->ifm_media) != IFM_AUTO)
1370 		return;
1371 
1372 	ifmr->ifm_active = IFM_ETHER | IFM_FDX;
1373 	if (speed == SPEED_10000)
1374 		ifmr->ifm_active |= IFM_10G_T;
1375 	else if (speed == SPEED_1000)
1376 		ifmr->ifm_active |= IFM_1000_T;
1377 	else if (speed == SPEED_100)
1378 		ifmr->ifm_active |= IFM_100_TX;
1379 	else if (speed == SPEED_10)
1380 		ifmr->ifm_active |= IFM_10_T;
1381 	else
1382 		KASSERT(0, ("%s: link up but speed unknown (%u)", __func__,
1383 			    speed));
1384 }
1385 
1386 void
1387 t4_fatal_err(struct adapter *sc)
1388 {
1389 	t4_set_reg_field(sc, A_SGE_CONTROL, F_GLOBALENABLE, 0);
1390 	t4_intr_disable(sc);
1391 	log(LOG_EMERG, "%s: encountered fatal error, adapter stopped.\n",
1392 	    device_get_nameunit(sc->dev));
1393 }
1394 
1395 static int
1396 map_bars_0_and_4(struct adapter *sc)
1397 {
1398 	sc->regs_rid = PCIR_BAR(0);
1399 	sc->regs_res = bus_alloc_resource_any(sc->dev, SYS_RES_MEMORY,
1400 	    &sc->regs_rid, RF_ACTIVE);
1401 	if (sc->regs_res == NULL) {
1402 		device_printf(sc->dev, "cannot map registers.\n");
1403 		return (ENXIO);
1404 	}
1405 	sc->bt = rman_get_bustag(sc->regs_res);
1406 	sc->bh = rman_get_bushandle(sc->regs_res);
1407 	sc->mmio_len = rman_get_size(sc->regs_res);
1408 	setbit(&sc->doorbells, DOORBELL_KDB);
1409 
1410 	sc->msix_rid = PCIR_BAR(4);
1411 	sc->msix_res = bus_alloc_resource_any(sc->dev, SYS_RES_MEMORY,
1412 	    &sc->msix_rid, RF_ACTIVE);
1413 	if (sc->msix_res == NULL) {
1414 		device_printf(sc->dev, "cannot map MSI-X BAR.\n");
1415 		return (ENXIO);
1416 	}
1417 
1418 	return (0);
1419 }
1420 
1421 static int
1422 map_bar_2(struct adapter *sc)
1423 {
1424 
1425 	/*
1426 	 * T4: only iWARP driver uses the userspace doorbells.  There is no need
1427 	 * to map it if RDMA is disabled.
1428 	 */
1429 	if (is_t4(sc) && sc->rdmacaps == 0)
1430 		return (0);
1431 
1432 	sc->udbs_rid = PCIR_BAR(2);
1433 	sc->udbs_res = bus_alloc_resource_any(sc->dev, SYS_RES_MEMORY,
1434 	    &sc->udbs_rid, RF_ACTIVE);
1435 	if (sc->udbs_res == NULL) {
1436 		device_printf(sc->dev, "cannot map doorbell BAR.\n");
1437 		return (ENXIO);
1438 	}
1439 	sc->udbs_base = rman_get_virtual(sc->udbs_res);
1440 
1441 	if (is_t5(sc)) {
1442 		setbit(&sc->doorbells, DOORBELL_UDB);
1443 #if defined(__i386__) || defined(__amd64__)
1444 		if (t5_write_combine) {
1445 			int rc;
1446 
1447 			/*
1448 			 * Enable write combining on BAR2.  This is the
1449 			 * userspace doorbell BAR and is split into 128B
1450 			 * (UDBS_SEG_SIZE) doorbell regions, each associated
1451 			 * with an egress queue.  The first 64B has the doorbell
1452 			 * and the second 64B can be used to submit a tx work
1453 			 * request with an implicit doorbell.
1454 			 */
1455 
1456 			rc = pmap_change_attr((vm_offset_t)sc->udbs_base,
1457 			    rman_get_size(sc->udbs_res), PAT_WRITE_COMBINING);
1458 			if (rc == 0) {
1459 				clrbit(&sc->doorbells, DOORBELL_UDB);
1460 				setbit(&sc->doorbells, DOORBELL_WCWR);
1461 				setbit(&sc->doorbells, DOORBELL_UDBWC);
1462 			} else {
1463 				device_printf(sc->dev,
1464 				    "couldn't enable write combining: %d\n",
1465 				    rc);
1466 			}
1467 
1468 			t4_write_reg(sc, A_SGE_STAT_CFG,
1469 			    V_STATSOURCE_T5(7) | V_STATMODE(0));
1470 		}
1471 #endif
1472 	}
1473 
1474 	return (0);
1475 }
1476 
1477 static const struct memwin t4_memwin[] = {
1478 	{ MEMWIN0_BASE, MEMWIN0_APERTURE },
1479 	{ MEMWIN1_BASE, MEMWIN1_APERTURE },
1480 	{ MEMWIN2_BASE_T4, MEMWIN2_APERTURE_T4 }
1481 };
1482 
1483 static const struct memwin t5_memwin[] = {
1484 	{ MEMWIN0_BASE, MEMWIN0_APERTURE },
1485 	{ MEMWIN1_BASE, MEMWIN1_APERTURE },
1486 	{ MEMWIN2_BASE_T5, MEMWIN2_APERTURE_T5 },
1487 };
1488 
1489 static void
1490 setup_memwin(struct adapter *sc)
1491 {
1492 	const struct memwin *mw;
1493 	int i, n;
1494 	uint32_t bar0;
1495 
1496 	if (is_t4(sc)) {
1497 		/*
1498 		 * Read low 32b of bar0 indirectly via the hardware backdoor
1499 		 * mechanism.  Works from within PCI passthrough environments
1500 		 * too, where rman_get_start() can return a different value.  We
1501 		 * need to program the T4 memory window decoders with the actual
1502 		 * addresses that will be coming across the PCIe link.
1503 		 */
1504 		bar0 = t4_hw_pci_read_cfg4(sc, PCIR_BAR(0));
1505 		bar0 &= (uint32_t) PCIM_BAR_MEM_BASE;
1506 
1507 		mw = &t4_memwin[0];
1508 		n = nitems(t4_memwin);
1509 	} else {
1510 		/* T5 uses the relative offset inside the PCIe BAR */
1511 		bar0 = 0;
1512 
1513 		mw = &t5_memwin[0];
1514 		n = nitems(t5_memwin);
1515 	}
1516 
1517 	for (i = 0; i < n; i++, mw++) {
1518 		t4_write_reg(sc,
1519 		    PCIE_MEM_ACCESS_REG(A_PCIE_MEM_ACCESS_BASE_WIN, i),
1520 		    (mw->base + bar0) | V_BIR(0) |
1521 		    V_WINDOW(ilog2(mw->aperture) - 10));
1522 	}
1523 
1524 	/* flush */
1525 	t4_read_reg(sc, PCIE_MEM_ACCESS_REG(A_PCIE_MEM_ACCESS_BASE_WIN, 2));
1526 }
1527 
1528 /*
1529  * Verify that the memory range specified by the addr/len pair is valid and lies
1530  * entirely within a single region (EDCx or MCx).
1531  */
1532 static int
1533 validate_mem_range(struct adapter *sc, uint32_t addr, int len)
1534 {
1535 	uint32_t em, addr_len, maddr, mlen;
1536 
1537 	/* Memory can only be accessed in naturally aligned 4 byte units */
1538 	if (addr & 3 || len & 3 || len == 0)
1539 		return (EINVAL);
1540 
1541 	/* Enabled memories */
1542 	em = t4_read_reg(sc, A_MA_TARGET_MEM_ENABLE);
1543 	if (em & F_EDRAM0_ENABLE) {
1544 		addr_len = t4_read_reg(sc, A_MA_EDRAM0_BAR);
1545 		maddr = G_EDRAM0_BASE(addr_len) << 20;
1546 		mlen = G_EDRAM0_SIZE(addr_len) << 20;
1547 		if (mlen > 0 && addr >= maddr && addr < maddr + mlen &&
1548 		    addr + len <= maddr + mlen)
1549 			return (0);
1550 	}
1551 	if (em & F_EDRAM1_ENABLE) {
1552 		addr_len = t4_read_reg(sc, A_MA_EDRAM1_BAR);
1553 		maddr = G_EDRAM1_BASE(addr_len) << 20;
1554 		mlen = G_EDRAM1_SIZE(addr_len) << 20;
1555 		if (mlen > 0 && addr >= maddr && addr < maddr + mlen &&
1556 		    addr + len <= maddr + mlen)
1557 			return (0);
1558 	}
1559 	if (em & F_EXT_MEM_ENABLE) {
1560 		addr_len = t4_read_reg(sc, A_MA_EXT_MEMORY_BAR);
1561 		maddr = G_EXT_MEM_BASE(addr_len) << 20;
1562 		mlen = G_EXT_MEM_SIZE(addr_len) << 20;
1563 		if (mlen > 0 && addr >= maddr && addr < maddr + mlen &&
1564 		    addr + len <= maddr + mlen)
1565 			return (0);
1566 	}
1567 	if (!is_t4(sc) && em & F_EXT_MEM1_ENABLE) {
1568 		addr_len = t4_read_reg(sc, A_MA_EXT_MEMORY1_BAR);
1569 		maddr = G_EXT_MEM1_BASE(addr_len) << 20;
1570 		mlen = G_EXT_MEM1_SIZE(addr_len) << 20;
1571 		if (mlen > 0 && addr >= maddr && addr < maddr + mlen &&
1572 		    addr + len <= maddr + mlen)
1573 			return (0);
1574 	}
1575 
1576 	return (EFAULT);
1577 }
1578 
1579 /*
1580  * Verify that the memory range specified by the memtype/offset/len pair is
1581  * valid and lies entirely within the memtype specified.  The global address of
1582  * the start of the range is returned in addr.
1583  */
1584 static int
1585 validate_mt_off_len(struct adapter *sc, int mtype, uint32_t off, int len,
1586     uint32_t *addr)
1587 {
1588 	uint32_t em, addr_len, maddr, mlen;
1589 
1590 	/* Memory can only be accessed in naturally aligned 4 byte units */
1591 	if (off & 3 || len & 3 || len == 0)
1592 		return (EINVAL);
1593 
1594 	em = t4_read_reg(sc, A_MA_TARGET_MEM_ENABLE);
1595 	switch (mtype) {
1596 	case MEM_EDC0:
1597 		if (!(em & F_EDRAM0_ENABLE))
1598 			return (EINVAL);
1599 		addr_len = t4_read_reg(sc, A_MA_EDRAM0_BAR);
1600 		maddr = G_EDRAM0_BASE(addr_len) << 20;
1601 		mlen = G_EDRAM0_SIZE(addr_len) << 20;
1602 		break;
1603 	case MEM_EDC1:
1604 		if (!(em & F_EDRAM1_ENABLE))
1605 			return (EINVAL);
1606 		addr_len = t4_read_reg(sc, A_MA_EDRAM1_BAR);
1607 		maddr = G_EDRAM1_BASE(addr_len) << 20;
1608 		mlen = G_EDRAM1_SIZE(addr_len) << 20;
1609 		break;
1610 	case MEM_MC:
1611 		if (!(em & F_EXT_MEM_ENABLE))
1612 			return (EINVAL);
1613 		addr_len = t4_read_reg(sc, A_MA_EXT_MEMORY_BAR);
1614 		maddr = G_EXT_MEM_BASE(addr_len) << 20;
1615 		mlen = G_EXT_MEM_SIZE(addr_len) << 20;
1616 		break;
1617 	case MEM_MC1:
1618 		if (is_t4(sc) || !(em & F_EXT_MEM1_ENABLE))
1619 			return (EINVAL);
1620 		addr_len = t4_read_reg(sc, A_MA_EXT_MEMORY1_BAR);
1621 		maddr = G_EXT_MEM1_BASE(addr_len) << 20;
1622 		mlen = G_EXT_MEM1_SIZE(addr_len) << 20;
1623 		break;
1624 	default:
1625 		return (EINVAL);
1626 	}
1627 
1628 	if (mlen > 0 && off < mlen && off + len <= mlen) {
1629 		*addr = maddr + off;	/* global address */
1630 		return (0);
1631 	}
1632 
1633 	return (EFAULT);
1634 }
1635 
1636 static void
1637 memwin_info(struct adapter *sc, int win, uint32_t *base, uint32_t *aperture)
1638 {
1639 	const struct memwin *mw;
1640 
1641 	if (is_t4(sc)) {
1642 		KASSERT(win >= 0 && win < nitems(t4_memwin),
1643 		    ("%s: incorrect memwin# (%d)", __func__, win));
1644 		mw = &t4_memwin[win];
1645 	} else {
1646 		KASSERT(win >= 0 && win < nitems(t5_memwin),
1647 		    ("%s: incorrect memwin# (%d)", __func__, win));
1648 		mw = &t5_memwin[win];
1649 	}
1650 
1651 	if (base != NULL)
1652 		*base = mw->base;
1653 	if (aperture != NULL)
1654 		*aperture = mw->aperture;
1655 }
1656 
1657 /*
1658  * Positions the memory window such that it can be used to access the specified
1659  * address in the chip's address space.  The return value is the offset of addr
1660  * from the start of the window.
1661  */
1662 static uint32_t
1663 position_memwin(struct adapter *sc, int n, uint32_t addr)
1664 {
1665 	uint32_t start, pf;
1666 	uint32_t reg;
1667 
1668 	KASSERT(n >= 0 && n <= 3,
1669 	    ("%s: invalid window %d.", __func__, n));
1670 	KASSERT((addr & 3) == 0,
1671 	    ("%s: addr (0x%x) is not at a 4B boundary.", __func__, addr));
1672 
1673 	if (is_t4(sc)) {
1674 		pf = 0;
1675 		start = addr & ~0xf;	/* start must be 16B aligned */
1676 	} else {
1677 		pf = V_PFNUM(sc->pf);
1678 		start = addr & ~0x7f;	/* start must be 128B aligned */
1679 	}
1680 	reg = PCIE_MEM_ACCESS_REG(A_PCIE_MEM_ACCESS_OFFSET, n);
1681 
1682 	t4_write_reg(sc, reg, start | pf);
1683 	t4_read_reg(sc, reg);
1684 
1685 	return (addr - start);
1686 }
1687 
1688 static int
1689 cfg_itype_and_nqueues(struct adapter *sc, int n10g, int n1g,
1690     struct intrs_and_queues *iaq)
1691 {
1692 	int rc, itype, navail, nrxq10g, nrxq1g, n;
1693 	int nofldrxq10g = 0, nofldrxq1g = 0;
1694 
1695 	bzero(iaq, sizeof(*iaq));
1696 
1697 	iaq->ntxq10g = t4_ntxq10g;
1698 	iaq->ntxq1g = t4_ntxq1g;
1699 	iaq->nrxq10g = nrxq10g = t4_nrxq10g;
1700 	iaq->nrxq1g = nrxq1g = t4_nrxq1g;
1701 #ifdef TCP_OFFLOAD
1702 	if (is_offload(sc)) {
1703 		iaq->nofldtxq10g = t4_nofldtxq10g;
1704 		iaq->nofldtxq1g = t4_nofldtxq1g;
1705 		iaq->nofldrxq10g = nofldrxq10g = t4_nofldrxq10g;
1706 		iaq->nofldrxq1g = nofldrxq1g = t4_nofldrxq1g;
1707 	}
1708 #endif
1709 
1710 	for (itype = INTR_MSIX; itype; itype >>= 1) {
1711 
1712 		if ((itype & t4_intr_types) == 0)
1713 			continue;	/* not allowed */
1714 
1715 		if (itype == INTR_MSIX)
1716 			navail = pci_msix_count(sc->dev);
1717 		else if (itype == INTR_MSI)
1718 			navail = pci_msi_count(sc->dev);
1719 		else
1720 			navail = 1;
1721 restart:
1722 		if (navail == 0)
1723 			continue;
1724 
1725 		iaq->intr_type = itype;
1726 		iaq->intr_flags = 0;
1727 
1728 		/*
1729 		 * Best option: an interrupt vector for errors, one for the
1730 		 * firmware event queue, and one each for each rxq (NIC as well
1731 		 * as offload).
1732 		 */
1733 		iaq->nirq = T4_EXTRA_INTR;
1734 		iaq->nirq += n10g * (nrxq10g + nofldrxq10g);
1735 		iaq->nirq += n1g * (nrxq1g + nofldrxq1g);
1736 		if (iaq->nirq <= navail &&
1737 		    (itype != INTR_MSI || powerof2(iaq->nirq))) {
1738 			iaq->intr_flags |= INTR_DIRECT;
1739 			goto allocate;
1740 		}
1741 
1742 		/*
1743 		 * Second best option: an interrupt vector for errors, one for
1744 		 * the firmware event queue, and one each for either NIC or
1745 		 * offload rxq's.
1746 		 */
1747 		iaq->nirq = T4_EXTRA_INTR;
1748 		iaq->nirq += n10g * max(nrxq10g, nofldrxq10g);
1749 		iaq->nirq += n1g * max(nrxq1g, nofldrxq1g);
1750 		if (iaq->nirq <= navail &&
1751 		    (itype != INTR_MSI || powerof2(iaq->nirq)))
1752 			goto allocate;
1753 
1754 		/*
1755 		 * Next best option: an interrupt vector for errors, one for the
1756 		 * firmware event queue, and at least one per port.  At this
1757 		 * point we know we'll have to downsize nrxq or nofldrxq to fit
1758 		 * what's available to us.
1759 		 */
1760 		iaq->nirq = T4_EXTRA_INTR;
1761 		iaq->nirq += n10g + n1g;
1762 		if (iaq->nirq <= navail) {
1763 			int leftover = navail - iaq->nirq;
1764 
1765 			if (n10g > 0) {
1766 				int target = max(nrxq10g, nofldrxq10g);
1767 
1768 				n = 1;
1769 				while (n < target && leftover >= n10g) {
1770 					leftover -= n10g;
1771 					iaq->nirq += n10g;
1772 					n++;
1773 				}
1774 				iaq->nrxq10g = min(n, nrxq10g);
1775 #ifdef TCP_OFFLOAD
1776 				if (is_offload(sc))
1777 					iaq->nofldrxq10g = min(n, nofldrxq10g);
1778 #endif
1779 			}
1780 
1781 			if (n1g > 0) {
1782 				int target = max(nrxq1g, nofldrxq1g);
1783 
1784 				n = 1;
1785 				while (n < target && leftover >= n1g) {
1786 					leftover -= n1g;
1787 					iaq->nirq += n1g;
1788 					n++;
1789 				}
1790 				iaq->nrxq1g = min(n, nrxq1g);
1791 #ifdef TCP_OFFLOAD
1792 				if (is_offload(sc))
1793 					iaq->nofldrxq1g = min(n, nofldrxq1g);
1794 #endif
1795 			}
1796 
1797 			if (itype != INTR_MSI || powerof2(iaq->nirq))
1798 				goto allocate;
1799 		}
1800 
1801 		/*
1802 		 * Least desirable option: one interrupt vector for everything.
1803 		 */
1804 		iaq->nirq = iaq->nrxq10g = iaq->nrxq1g = 1;
1805 #ifdef TCP_OFFLOAD
1806 		if (is_offload(sc))
1807 			iaq->nofldrxq10g = iaq->nofldrxq1g = 1;
1808 #endif
1809 
1810 allocate:
1811 		navail = iaq->nirq;
1812 		rc = 0;
1813 		if (itype == INTR_MSIX)
1814 			rc = pci_alloc_msix(sc->dev, &navail);
1815 		else if (itype == INTR_MSI)
1816 			rc = pci_alloc_msi(sc->dev, &navail);
1817 
1818 		if (rc == 0) {
1819 			if (navail == iaq->nirq)
1820 				return (0);
1821 
1822 			/*
1823 			 * Didn't get the number requested.  Use whatever number
1824 			 * the kernel is willing to allocate (it's in navail).
1825 			 */
1826 			device_printf(sc->dev, "fewer vectors than requested, "
1827 			    "type=%d, req=%d, rcvd=%d; will downshift req.\n",
1828 			    itype, iaq->nirq, navail);
1829 			pci_release_msi(sc->dev);
1830 			goto restart;
1831 		}
1832 
1833 		device_printf(sc->dev,
1834 		    "failed to allocate vectors:%d, type=%d, req=%d, rcvd=%d\n",
1835 		    itype, rc, iaq->nirq, navail);
1836 	}
1837 
1838 	device_printf(sc->dev,
1839 	    "failed to find a usable interrupt type.  "
1840 	    "allowed=%d, msi-x=%d, msi=%d, intx=1", t4_intr_types,
1841 	    pci_msix_count(sc->dev), pci_msi_count(sc->dev));
1842 
1843 	return (ENXIO);
1844 }
1845 
1846 #define FW_VERSION(chip) ( \
1847     V_FW_HDR_FW_VER_MAJOR(chip##FW_VERSION_MAJOR) | \
1848     V_FW_HDR_FW_VER_MINOR(chip##FW_VERSION_MINOR) | \
1849     V_FW_HDR_FW_VER_MICRO(chip##FW_VERSION_MICRO) | \
1850     V_FW_HDR_FW_VER_BUILD(chip##FW_VERSION_BUILD))
1851 #define FW_INTFVER(chip, intf) (chip##FW_HDR_INTFVER_##intf)
1852 
1853 struct fw_info {
1854 	uint8_t chip;
1855 	char *kld_name;
1856 	char *fw_mod_name;
1857 	struct fw_hdr fw_hdr;	/* XXX: waste of space, need a sparse struct */
1858 } fw_info[] = {
1859 	{
1860 		.chip = CHELSIO_T4,
1861 		.kld_name = "t4fw_cfg",
1862 		.fw_mod_name = "t4fw",
1863 		.fw_hdr = {
1864 			.chip = FW_HDR_CHIP_T4,
1865 			.fw_ver = htobe32_const(FW_VERSION(T4)),
1866 			.intfver_nic = FW_INTFVER(T4, NIC),
1867 			.intfver_vnic = FW_INTFVER(T4, VNIC),
1868 			.intfver_ofld = FW_INTFVER(T4, OFLD),
1869 			.intfver_ri = FW_INTFVER(T4, RI),
1870 			.intfver_iscsipdu = FW_INTFVER(T4, ISCSIPDU),
1871 			.intfver_iscsi = FW_INTFVER(T4, ISCSI),
1872 			.intfver_fcoepdu = FW_INTFVER(T4, FCOEPDU),
1873 			.intfver_fcoe = FW_INTFVER(T4, FCOE),
1874 		},
1875 	}, {
1876 		.chip = CHELSIO_T5,
1877 		.kld_name = "t5fw_cfg",
1878 		.fw_mod_name = "t5fw",
1879 		.fw_hdr = {
1880 			.chip = FW_HDR_CHIP_T5,
1881 			.fw_ver = htobe32_const(FW_VERSION(T5)),
1882 			.intfver_nic = FW_INTFVER(T5, NIC),
1883 			.intfver_vnic = FW_INTFVER(T5, VNIC),
1884 			.intfver_ofld = FW_INTFVER(T5, OFLD),
1885 			.intfver_ri = FW_INTFVER(T5, RI),
1886 			.intfver_iscsipdu = FW_INTFVER(T5, ISCSIPDU),
1887 			.intfver_iscsi = FW_INTFVER(T5, ISCSI),
1888 			.intfver_fcoepdu = FW_INTFVER(T5, FCOEPDU),
1889 			.intfver_fcoe = FW_INTFVER(T5, FCOE),
1890 		},
1891 	}
1892 };
1893 
1894 static struct fw_info *
1895 find_fw_info(int chip)
1896 {
1897 	int i;
1898 
1899 	for (i = 0; i < nitems(fw_info); i++) {
1900 		if (fw_info[i].chip == chip)
1901 			return (&fw_info[i]);
1902 	}
1903 	return (NULL);
1904 }
1905 
1906 /*
1907  * Is the given firmware API compatible with the one the driver was compiled
1908  * with?
1909  */
1910 static int
1911 fw_compatible(const struct fw_hdr *hdr1, const struct fw_hdr *hdr2)
1912 {
1913 
1914 	/* short circuit if it's the exact same firmware version */
1915 	if (hdr1->chip == hdr2->chip && hdr1->fw_ver == hdr2->fw_ver)
1916 		return (1);
1917 
1918 	/*
1919 	 * XXX: Is this too conservative?  Perhaps I should limit this to the
1920 	 * features that are supported in the driver.
1921 	 */
1922 #define SAME_INTF(x) (hdr1->intfver_##x == hdr2->intfver_##x)
1923 	if (hdr1->chip == hdr2->chip && SAME_INTF(nic) && SAME_INTF(vnic) &&
1924 	    SAME_INTF(ofld) && SAME_INTF(ri) && SAME_INTF(iscsipdu) &&
1925 	    SAME_INTF(iscsi) && SAME_INTF(fcoepdu) && SAME_INTF(fcoe))
1926 		return (1);
1927 #undef SAME_INTF
1928 
1929 	return (0);
1930 }
1931 
1932 /*
1933  * The firmware in the KLD is usable, but should it be installed?  This routine
1934  * explains itself in detail if it indicates the KLD firmware should be
1935  * installed.
1936  */
1937 static int
1938 should_install_kld_fw(struct adapter *sc, int card_fw_usable, int k, int c)
1939 {
1940 	const char *reason;
1941 
1942 	if (!card_fw_usable) {
1943 		reason = "incompatible or unusable";
1944 		goto install;
1945 	}
1946 
1947 	if (k > c) {
1948 		reason = "older than the version bundled with this driver";
1949 		goto install;
1950 	}
1951 
1952 	if (t4_fw_install == 2 && k != c) {
1953 		reason = "different than the version bundled with this driver";
1954 		goto install;
1955 	}
1956 
1957 	return (0);
1958 
1959 install:
1960 	if (t4_fw_install == 0) {
1961 		device_printf(sc->dev, "firmware on card (%u.%u.%u.%u) is %s, "
1962 		    "but the driver is prohibited from installing a different "
1963 		    "firmware on the card.\n",
1964 		    G_FW_HDR_FW_VER_MAJOR(c), G_FW_HDR_FW_VER_MINOR(c),
1965 		    G_FW_HDR_FW_VER_MICRO(c), G_FW_HDR_FW_VER_BUILD(c), reason);
1966 
1967 		return (0);
1968 	}
1969 
1970 	device_printf(sc->dev, "firmware on card (%u.%u.%u.%u) is %s, "
1971 	    "installing firmware %u.%u.%u.%u on card.\n",
1972 	    G_FW_HDR_FW_VER_MAJOR(c), G_FW_HDR_FW_VER_MINOR(c),
1973 	    G_FW_HDR_FW_VER_MICRO(c), G_FW_HDR_FW_VER_BUILD(c), reason,
1974 	    G_FW_HDR_FW_VER_MAJOR(k), G_FW_HDR_FW_VER_MINOR(k),
1975 	    G_FW_HDR_FW_VER_MICRO(k), G_FW_HDR_FW_VER_BUILD(k));
1976 
1977 	return (1);
1978 }
1979 /*
1980  * Establish contact with the firmware and determine if we are the master driver
1981  * or not, and whether we are responsible for chip initialization.
1982  */
1983 static int
1984 prep_firmware(struct adapter *sc)
1985 {
1986 	const struct firmware *fw = NULL, *default_cfg;
1987 	int rc, pf, card_fw_usable, kld_fw_usable, need_fw_reset = 1;
1988 	enum dev_state state;
1989 	struct fw_info *fw_info;
1990 	struct fw_hdr *card_fw;		/* fw on the card */
1991 	const struct fw_hdr *kld_fw;	/* fw in the KLD */
1992 	const struct fw_hdr *drv_fw;	/* fw header the driver was compiled
1993 					   against */
1994 
1995 	/* Contact firmware. */
1996 	rc = t4_fw_hello(sc, sc->mbox, sc->mbox, MASTER_MAY, &state);
1997 	if (rc < 0 || state == DEV_STATE_ERR) {
1998 		rc = -rc;
1999 		device_printf(sc->dev,
2000 		    "failed to connect to the firmware: %d, %d.\n", rc, state);
2001 		return (rc);
2002 	}
2003 	pf = rc;
2004 	if (pf == sc->mbox)
2005 		sc->flags |= MASTER_PF;
2006 	else if (state == DEV_STATE_UNINIT) {
2007 		/*
2008 		 * We didn't get to be the master so we definitely won't be
2009 		 * configuring the chip.  It's a bug if someone else hasn't
2010 		 * configured it already.
2011 		 */
2012 		device_printf(sc->dev, "couldn't be master(%d), "
2013 		    "device not already initialized either(%d).\n", rc, state);
2014 		return (EDOOFUS);
2015 	}
2016 
2017 	/* This is the firmware whose headers the driver was compiled against */
2018 	fw_info = find_fw_info(chip_id(sc));
2019 	if (fw_info == NULL) {
2020 		device_printf(sc->dev,
2021 		    "unable to look up firmware information for chip %d.\n",
2022 		    chip_id(sc));
2023 		return (EINVAL);
2024 	}
2025 	drv_fw = &fw_info->fw_hdr;
2026 
2027 	/*
2028 	 * The firmware KLD contains many modules.  The KLD name is also the
2029 	 * name of the module that contains the default config file.
2030 	 */
2031 	default_cfg = firmware_get(fw_info->kld_name);
2032 
2033 	/* Read the header of the firmware on the card */
2034 	card_fw = malloc(sizeof(*card_fw), M_CXGBE, M_ZERO | M_WAITOK);
2035 	rc = -t4_read_flash(sc, FLASH_FW_START,
2036 	    sizeof (*card_fw) / sizeof (uint32_t), (uint32_t *)card_fw, 1);
2037 	if (rc == 0)
2038 		card_fw_usable = fw_compatible(drv_fw, (const void*)card_fw);
2039 	else {
2040 		device_printf(sc->dev,
2041 		    "Unable to read card's firmware header: %d\n", rc);
2042 		card_fw_usable = 0;
2043 	}
2044 
2045 	/* This is the firmware in the KLD */
2046 	fw = firmware_get(fw_info->fw_mod_name);
2047 	if (fw != NULL) {
2048 		kld_fw = (const void *)fw->data;
2049 		kld_fw_usable = fw_compatible(drv_fw, kld_fw);
2050 	} else {
2051 		kld_fw = NULL;
2052 		kld_fw_usable = 0;
2053 	}
2054 
2055 	if (card_fw_usable && card_fw->fw_ver == drv_fw->fw_ver &&
2056 	    (!kld_fw_usable || kld_fw->fw_ver == drv_fw->fw_ver)) {
2057 		/*
2058 		 * Common case: the firmware on the card is an exact match and
2059 		 * the KLD is an exact match too, or the KLD is
2060 		 * absent/incompatible.  Note that t4_fw_install = 2 is ignored
2061 		 * here -- use cxgbetool loadfw if you want to reinstall the
2062 		 * same firmware as the one on the card.
2063 		 */
2064 	} else if (kld_fw_usable && state == DEV_STATE_UNINIT &&
2065 	    should_install_kld_fw(sc, card_fw_usable, be32toh(kld_fw->fw_ver),
2066 	    be32toh(card_fw->fw_ver))) {
2067 
2068 		rc = -t4_fw_upgrade(sc, sc->mbox, fw->data, fw->datasize, 0);
2069 		if (rc != 0) {
2070 			device_printf(sc->dev,
2071 			    "failed to install firmware: %d\n", rc);
2072 			goto done;
2073 		}
2074 
2075 		/* Installed successfully, update the cached header too. */
2076 		memcpy(card_fw, kld_fw, sizeof(*card_fw));
2077 		card_fw_usable = 1;
2078 		need_fw_reset = 0;	/* already reset as part of load_fw */
2079 	}
2080 
2081 	if (!card_fw_usable) {
2082 		uint32_t d, c, k;
2083 
2084 		d = ntohl(drv_fw->fw_ver);
2085 		c = ntohl(card_fw->fw_ver);
2086 		k = kld_fw ? ntohl(kld_fw->fw_ver) : 0;
2087 
2088 		device_printf(sc->dev, "Cannot find a usable firmware: "
2089 		    "fw_install %d, chip state %d, "
2090 		    "driver compiled with %d.%d.%d.%d, "
2091 		    "card has %d.%d.%d.%d, KLD has %d.%d.%d.%d\n",
2092 		    t4_fw_install, state,
2093 		    G_FW_HDR_FW_VER_MAJOR(d), G_FW_HDR_FW_VER_MINOR(d),
2094 		    G_FW_HDR_FW_VER_MICRO(d), G_FW_HDR_FW_VER_BUILD(d),
2095 		    G_FW_HDR_FW_VER_MAJOR(c), G_FW_HDR_FW_VER_MINOR(c),
2096 		    G_FW_HDR_FW_VER_MICRO(c), G_FW_HDR_FW_VER_BUILD(c),
2097 		    G_FW_HDR_FW_VER_MAJOR(k), G_FW_HDR_FW_VER_MINOR(k),
2098 		    G_FW_HDR_FW_VER_MICRO(k), G_FW_HDR_FW_VER_BUILD(k));
2099 		rc = EINVAL;
2100 		goto done;
2101 	}
2102 
2103 	/* We're using whatever's on the card and it's known to be good. */
2104 	sc->params.fw_vers = ntohl(card_fw->fw_ver);
2105 	snprintf(sc->fw_version, sizeof(sc->fw_version), "%u.%u.%u.%u",
2106 	    G_FW_HDR_FW_VER_MAJOR(sc->params.fw_vers),
2107 	    G_FW_HDR_FW_VER_MINOR(sc->params.fw_vers),
2108 	    G_FW_HDR_FW_VER_MICRO(sc->params.fw_vers),
2109 	    G_FW_HDR_FW_VER_BUILD(sc->params.fw_vers));
2110 	t4_get_tp_version(sc, &sc->params.tp_vers);
2111 
2112 	/* Reset device */
2113 	if (need_fw_reset &&
2114 	    (rc = -t4_fw_reset(sc, sc->mbox, F_PIORSTMODE | F_PIORST)) != 0) {
2115 		device_printf(sc->dev, "firmware reset failed: %d.\n", rc);
2116 		if (rc != ETIMEDOUT && rc != EIO)
2117 			t4_fw_bye(sc, sc->mbox);
2118 		goto done;
2119 	}
2120 	sc->flags |= FW_OK;
2121 
2122 	rc = get_params__pre_init(sc);
2123 	if (rc != 0)
2124 		goto done; /* error message displayed already */
2125 
2126 	/* Partition adapter resources as specified in the config file. */
2127 	if (state == DEV_STATE_UNINIT) {
2128 
2129 		KASSERT(sc->flags & MASTER_PF,
2130 		    ("%s: trying to change chip settings when not master.",
2131 		    __func__));
2132 
2133 		rc = partition_resources(sc, default_cfg, fw_info->kld_name);
2134 		if (rc != 0)
2135 			goto done;	/* error message displayed already */
2136 
2137 		t4_tweak_chip_settings(sc);
2138 
2139 		/* get basic stuff going */
2140 		rc = -t4_fw_initialize(sc, sc->mbox);
2141 		if (rc != 0) {
2142 			device_printf(sc->dev, "fw init failed: %d.\n", rc);
2143 			goto done;
2144 		}
2145 	} else {
2146 		snprintf(sc->cfg_file, sizeof(sc->cfg_file), "pf%d", pf);
2147 		sc->cfcsum = 0;
2148 	}
2149 
2150 done:
2151 	free(card_fw, M_CXGBE);
2152 	if (fw != NULL)
2153 		firmware_put(fw, FIRMWARE_UNLOAD);
2154 	if (default_cfg != NULL)
2155 		firmware_put(default_cfg, FIRMWARE_UNLOAD);
2156 
2157 	return (rc);
2158 }
2159 
2160 #define FW_PARAM_DEV(param) \
2161 	(V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) | \
2162 	 V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_##param))
2163 #define FW_PARAM_PFVF(param) \
2164 	(V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_PFVF) | \
2165 	 V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_PFVF_##param))
2166 
2167 /*
2168  * Partition chip resources for use between various PFs, VFs, etc.
2169  */
2170 static int
2171 partition_resources(struct adapter *sc, const struct firmware *default_cfg,
2172     const char *name_prefix)
2173 {
2174 	const struct firmware *cfg = NULL;
2175 	int rc = 0;
2176 	struct fw_caps_config_cmd caps;
2177 	uint32_t mtype, moff, finicsum, cfcsum;
2178 
2179 	/*
2180 	 * Figure out what configuration file to use.  Pick the default config
2181 	 * file for the card if the user hasn't specified one explicitly.
2182 	 */
2183 	snprintf(sc->cfg_file, sizeof(sc->cfg_file), "%s", t4_cfg_file);
2184 	if (strncmp(t4_cfg_file, DEFAULT_CF, sizeof(t4_cfg_file)) == 0) {
2185 		/* Card specific overrides go here. */
2186 		if (pci_get_device(sc->dev) == 0x440a)
2187 			snprintf(sc->cfg_file, sizeof(sc->cfg_file), UWIRE_CF);
2188 		if (is_fpga(sc))
2189 			snprintf(sc->cfg_file, sizeof(sc->cfg_file), FPGA_CF);
2190 	}
2191 
2192 	/*
2193 	 * We need to load another module if the profile is anything except
2194 	 * "default" or "flash".
2195 	 */
2196 	if (strncmp(sc->cfg_file, DEFAULT_CF, sizeof(sc->cfg_file)) != 0 &&
2197 	    strncmp(sc->cfg_file, FLASH_CF, sizeof(sc->cfg_file)) != 0) {
2198 		char s[32];
2199 
2200 		snprintf(s, sizeof(s), "%s_%s", name_prefix, sc->cfg_file);
2201 		cfg = firmware_get(s);
2202 		if (cfg == NULL) {
2203 			if (default_cfg != NULL) {
2204 				device_printf(sc->dev,
2205 				    "unable to load module \"%s\" for "
2206 				    "configuration profile \"%s\", will use "
2207 				    "the default config file instead.\n",
2208 				    s, sc->cfg_file);
2209 				snprintf(sc->cfg_file, sizeof(sc->cfg_file),
2210 				    "%s", DEFAULT_CF);
2211 			} else {
2212 				device_printf(sc->dev,
2213 				    "unable to load module \"%s\" for "
2214 				    "configuration profile \"%s\", will use "
2215 				    "the config file on the card's flash "
2216 				    "instead.\n", s, sc->cfg_file);
2217 				snprintf(sc->cfg_file, sizeof(sc->cfg_file),
2218 				    "%s", FLASH_CF);
2219 			}
2220 		}
2221 	}
2222 
2223 	if (strncmp(sc->cfg_file, DEFAULT_CF, sizeof(sc->cfg_file)) == 0 &&
2224 	    default_cfg == NULL) {
2225 		device_printf(sc->dev,
2226 		    "default config file not available, will use the config "
2227 		    "file on the card's flash instead.\n");
2228 		snprintf(sc->cfg_file, sizeof(sc->cfg_file), "%s", FLASH_CF);
2229 	}
2230 
2231 	if (strncmp(sc->cfg_file, FLASH_CF, sizeof(sc->cfg_file)) != 0) {
2232 		u_int cflen, i, n;
2233 		const uint32_t *cfdata;
2234 		uint32_t param, val, addr, off, mw_base, mw_aperture;
2235 
2236 		KASSERT(cfg != NULL || default_cfg != NULL,
2237 		    ("%s: no config to upload", __func__));
2238 
2239 		/*
2240 		 * Ask the firmware where it wants us to upload the config file.
2241 		 */
2242 		param = FW_PARAM_DEV(CF);
2243 		rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 1, &param, &val);
2244 		if (rc != 0) {
2245 			/* No support for config file?  Shouldn't happen. */
2246 			device_printf(sc->dev,
2247 			    "failed to query config file location: %d.\n", rc);
2248 			goto done;
2249 		}
2250 		mtype = G_FW_PARAMS_PARAM_Y(val);
2251 		moff = G_FW_PARAMS_PARAM_Z(val) << 16;
2252 
2253 		/*
2254 		 * XXX: sheer laziness.  We deliberately added 4 bytes of
2255 		 * useless stuffing/comments at the end of the config file so
2256 		 * it's ok to simply throw away the last remaining bytes when
2257 		 * the config file is not an exact multiple of 4.  This also
2258 		 * helps with the validate_mt_off_len check.
2259 		 */
2260 		if (cfg != NULL) {
2261 			cflen = cfg->datasize & ~3;
2262 			cfdata = cfg->data;
2263 		} else {
2264 			cflen = default_cfg->datasize & ~3;
2265 			cfdata = default_cfg->data;
2266 		}
2267 
2268 		if (cflen > FLASH_CFG_MAX_SIZE) {
2269 			device_printf(sc->dev,
2270 			    "config file too long (%d, max allowed is %d).  "
2271 			    "Will try to use the config on the card, if any.\n",
2272 			    cflen, FLASH_CFG_MAX_SIZE);
2273 			goto use_config_on_flash;
2274 		}
2275 
2276 		rc = validate_mt_off_len(sc, mtype, moff, cflen, &addr);
2277 		if (rc != 0) {
2278 			device_printf(sc->dev,
2279 			    "%s: addr (%d/0x%x) or len %d is not valid: %d.  "
2280 			    "Will try to use the config on the card, if any.\n",
2281 			    __func__, mtype, moff, cflen, rc);
2282 			goto use_config_on_flash;
2283 		}
2284 
2285 		memwin_info(sc, 2, &mw_base, &mw_aperture);
2286 		while (cflen) {
2287 			off = position_memwin(sc, 2, addr);
2288 			n = min(cflen, mw_aperture - off);
2289 			for (i = 0; i < n; i += 4)
2290 				t4_write_reg(sc, mw_base + off + i, *cfdata++);
2291 			cflen -= n;
2292 			addr += n;
2293 		}
2294 	} else {
2295 use_config_on_flash:
2296 		mtype = FW_MEMTYPE_CF_FLASH;
2297 		moff = t4_flash_cfg_addr(sc);
2298 	}
2299 
2300 	bzero(&caps, sizeof(caps));
2301 	caps.op_to_write = htobe32(V_FW_CMD_OP(FW_CAPS_CONFIG_CMD) |
2302 	    F_FW_CMD_REQUEST | F_FW_CMD_READ);
2303 	caps.cfvalid_to_len16 = htobe32(F_FW_CAPS_CONFIG_CMD_CFVALID |
2304 	    V_FW_CAPS_CONFIG_CMD_MEMTYPE_CF(mtype) |
2305 	    V_FW_CAPS_CONFIG_CMD_MEMADDR64K_CF(moff >> 16) | FW_LEN16(caps));
2306 	rc = -t4_wr_mbox(sc, sc->mbox, &caps, sizeof(caps), &caps);
2307 	if (rc != 0) {
2308 		device_printf(sc->dev,
2309 		    "failed to pre-process config file: %d "
2310 		    "(mtype %d, moff 0x%x).\n", rc, mtype, moff);
2311 		goto done;
2312 	}
2313 
2314 	finicsum = be32toh(caps.finicsum);
2315 	cfcsum = be32toh(caps.cfcsum);
2316 	if (finicsum != cfcsum) {
2317 		device_printf(sc->dev,
2318 		    "WARNING: config file checksum mismatch: %08x %08x\n",
2319 		    finicsum, cfcsum);
2320 	}
2321 	sc->cfcsum = cfcsum;
2322 
2323 #define LIMIT_CAPS(x) do { \
2324 	caps.x &= htobe16(t4_##x##_allowed); \
2325 	sc->x = htobe16(caps.x); \
2326 } while (0)
2327 
2328 	/*
2329 	 * Let the firmware know what features will (not) be used so it can tune
2330 	 * things accordingly.
2331 	 */
2332 	LIMIT_CAPS(linkcaps);
2333 	LIMIT_CAPS(niccaps);
2334 	LIMIT_CAPS(toecaps);
2335 	LIMIT_CAPS(rdmacaps);
2336 	LIMIT_CAPS(iscsicaps);
2337 	LIMIT_CAPS(fcoecaps);
2338 #undef LIMIT_CAPS
2339 
2340 	caps.op_to_write = htobe32(V_FW_CMD_OP(FW_CAPS_CONFIG_CMD) |
2341 	    F_FW_CMD_REQUEST | F_FW_CMD_WRITE);
2342 	caps.cfvalid_to_len16 = htobe32(FW_LEN16(caps));
2343 	rc = -t4_wr_mbox(sc, sc->mbox, &caps, sizeof(caps), NULL);
2344 	if (rc != 0) {
2345 		device_printf(sc->dev,
2346 		    "failed to process config file: %d.\n", rc);
2347 	}
2348 done:
2349 	if (cfg != NULL)
2350 		firmware_put(cfg, FIRMWARE_UNLOAD);
2351 	return (rc);
2352 }
2353 
2354 /*
2355  * Retrieve parameters that are needed (or nice to have) very early.
2356  */
2357 static int
2358 get_params__pre_init(struct adapter *sc)
2359 {
2360 	int rc;
2361 	uint32_t param[2], val[2];
2362 	struct fw_devlog_cmd cmd;
2363 	struct devlog_params *dlog = &sc->params.devlog;
2364 
2365 	param[0] = FW_PARAM_DEV(PORTVEC);
2366 	param[1] = FW_PARAM_DEV(CCLK);
2367 	rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 2, param, val);
2368 	if (rc != 0) {
2369 		device_printf(sc->dev,
2370 		    "failed to query parameters (pre_init): %d.\n", rc);
2371 		return (rc);
2372 	}
2373 
2374 	sc->params.portvec = val[0];
2375 	sc->params.nports = bitcount32(val[0]);
2376 	sc->params.vpd.cclk = val[1];
2377 
2378 	/* Read device log parameters. */
2379 	bzero(&cmd, sizeof(cmd));
2380 	cmd.op_to_write = htobe32(V_FW_CMD_OP(FW_DEVLOG_CMD) |
2381 	    F_FW_CMD_REQUEST | F_FW_CMD_READ);
2382 	cmd.retval_len16 = htobe32(FW_LEN16(cmd));
2383 	rc = -t4_wr_mbox(sc, sc->mbox, &cmd, sizeof(cmd), &cmd);
2384 	if (rc != 0) {
2385 		device_printf(sc->dev,
2386 		    "failed to get devlog parameters: %d.\n", rc);
2387 		bzero(dlog, sizeof (*dlog));
2388 		rc = 0;	/* devlog isn't critical for device operation */
2389 	} else {
2390 		val[0] = be32toh(cmd.memtype_devlog_memaddr16_devlog);
2391 		dlog->memtype = G_FW_DEVLOG_CMD_MEMTYPE_DEVLOG(val[0]);
2392 		dlog->start = G_FW_DEVLOG_CMD_MEMADDR16_DEVLOG(val[0]) << 4;
2393 		dlog->size = be32toh(cmd.memsize_devlog);
2394 	}
2395 
2396 	return (rc);
2397 }
2398 
2399 /*
2400  * Retrieve various parameters that are of interest to the driver.  The device
2401  * has been initialized by the firmware at this point.
2402  */
2403 static int
2404 get_params__post_init(struct adapter *sc)
2405 {
2406 	int rc;
2407 	uint32_t param[7], val[7];
2408 	struct fw_caps_config_cmd caps;
2409 
2410 	param[0] = FW_PARAM_PFVF(IQFLINT_START);
2411 	param[1] = FW_PARAM_PFVF(EQ_START);
2412 	param[2] = FW_PARAM_PFVF(FILTER_START);
2413 	param[3] = FW_PARAM_PFVF(FILTER_END);
2414 	param[4] = FW_PARAM_PFVF(L2T_START);
2415 	param[5] = FW_PARAM_PFVF(L2T_END);
2416 	rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 6, param, val);
2417 	if (rc != 0) {
2418 		device_printf(sc->dev,
2419 		    "failed to query parameters (post_init): %d.\n", rc);
2420 		return (rc);
2421 	}
2422 
2423 	sc->sge.iq_start = val[0];
2424 	sc->sge.eq_start = val[1];
2425 	sc->tids.ftid_base = val[2];
2426 	sc->tids.nftids = val[3] - val[2] + 1;
2427 	sc->vres.l2t.start = val[4];
2428 	sc->vres.l2t.size = val[5] - val[4] + 1;
2429 	KASSERT(sc->vres.l2t.size <= L2T_SIZE,
2430 	    ("%s: L2 table size (%u) larger than expected (%u)",
2431 	    __func__, sc->vres.l2t.size, L2T_SIZE));
2432 
2433 	/* get capabilites */
2434 	bzero(&caps, sizeof(caps));
2435 	caps.op_to_write = htobe32(V_FW_CMD_OP(FW_CAPS_CONFIG_CMD) |
2436 	    F_FW_CMD_REQUEST | F_FW_CMD_READ);
2437 	caps.cfvalid_to_len16 = htobe32(FW_LEN16(caps));
2438 	rc = -t4_wr_mbox(sc, sc->mbox, &caps, sizeof(caps), &caps);
2439 	if (rc != 0) {
2440 		device_printf(sc->dev,
2441 		    "failed to get card capabilities: %d.\n", rc);
2442 		return (rc);
2443 	}
2444 
2445 	if (caps.toecaps) {
2446 		/* query offload-related parameters */
2447 		param[0] = FW_PARAM_DEV(NTID);
2448 		param[1] = FW_PARAM_PFVF(SERVER_START);
2449 		param[2] = FW_PARAM_PFVF(SERVER_END);
2450 		param[3] = FW_PARAM_PFVF(TDDP_START);
2451 		param[4] = FW_PARAM_PFVF(TDDP_END);
2452 		param[5] = FW_PARAM_DEV(FLOWC_BUFFIFO_SZ);
2453 		rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 6, param, val);
2454 		if (rc != 0) {
2455 			device_printf(sc->dev,
2456 			    "failed to query TOE parameters: %d.\n", rc);
2457 			return (rc);
2458 		}
2459 		sc->tids.ntids = val[0];
2460 		sc->tids.natids = min(sc->tids.ntids / 2, MAX_ATIDS);
2461 		sc->tids.stid_base = val[1];
2462 		sc->tids.nstids = val[2] - val[1] + 1;
2463 		sc->vres.ddp.start = val[3];
2464 		sc->vres.ddp.size = val[4] - val[3] + 1;
2465 		sc->params.ofldq_wr_cred = val[5];
2466 		sc->params.offload = 1;
2467 	}
2468 	if (caps.rdmacaps) {
2469 		param[0] = FW_PARAM_PFVF(STAG_START);
2470 		param[1] = FW_PARAM_PFVF(STAG_END);
2471 		param[2] = FW_PARAM_PFVF(RQ_START);
2472 		param[3] = FW_PARAM_PFVF(RQ_END);
2473 		param[4] = FW_PARAM_PFVF(PBL_START);
2474 		param[5] = FW_PARAM_PFVF(PBL_END);
2475 		rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 6, param, val);
2476 		if (rc != 0) {
2477 			device_printf(sc->dev,
2478 			    "failed to query RDMA parameters(1): %d.\n", rc);
2479 			return (rc);
2480 		}
2481 		sc->vres.stag.start = val[0];
2482 		sc->vres.stag.size = val[1] - val[0] + 1;
2483 		sc->vres.rq.start = val[2];
2484 		sc->vres.rq.size = val[3] - val[2] + 1;
2485 		sc->vres.pbl.start = val[4];
2486 		sc->vres.pbl.size = val[5] - val[4] + 1;
2487 
2488 		param[0] = FW_PARAM_PFVF(SQRQ_START);
2489 		param[1] = FW_PARAM_PFVF(SQRQ_END);
2490 		param[2] = FW_PARAM_PFVF(CQ_START);
2491 		param[3] = FW_PARAM_PFVF(CQ_END);
2492 		param[4] = FW_PARAM_PFVF(OCQ_START);
2493 		param[5] = FW_PARAM_PFVF(OCQ_END);
2494 		rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 6, param, val);
2495 		if (rc != 0) {
2496 			device_printf(sc->dev,
2497 			    "failed to query RDMA parameters(2): %d.\n", rc);
2498 			return (rc);
2499 		}
2500 		sc->vres.qp.start = val[0];
2501 		sc->vres.qp.size = val[1] - val[0] + 1;
2502 		sc->vres.cq.start = val[2];
2503 		sc->vres.cq.size = val[3] - val[2] + 1;
2504 		sc->vres.ocq.start = val[4];
2505 		sc->vres.ocq.size = val[5] - val[4] + 1;
2506 	}
2507 	if (caps.iscsicaps) {
2508 		param[0] = FW_PARAM_PFVF(ISCSI_START);
2509 		param[1] = FW_PARAM_PFVF(ISCSI_END);
2510 		rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 2, param, val);
2511 		if (rc != 0) {
2512 			device_printf(sc->dev,
2513 			    "failed to query iSCSI parameters: %d.\n", rc);
2514 			return (rc);
2515 		}
2516 		sc->vres.iscsi.start = val[0];
2517 		sc->vres.iscsi.size = val[1] - val[0] + 1;
2518 	}
2519 
2520 	/*
2521 	 * We've got the params we wanted to query via the firmware.  Now grab
2522 	 * some others directly from the chip.
2523 	 */
2524 	rc = t4_read_chip_settings(sc);
2525 
2526 	return (rc);
2527 }
2528 
2529 static int
2530 set_params__post_init(struct adapter *sc)
2531 {
2532 	uint32_t param, val;
2533 
2534 	/* ask for encapsulated CPLs */
2535 	param = FW_PARAM_PFVF(CPLFW4MSG_ENCAP);
2536 	val = 1;
2537 	(void)t4_set_params(sc, sc->mbox, sc->pf, 0, 1, &param, &val);
2538 
2539 	return (0);
2540 }
2541 
2542 #undef FW_PARAM_PFVF
2543 #undef FW_PARAM_DEV
2544 
2545 static void
2546 t4_set_desc(struct adapter *sc)
2547 {
2548 	char buf[128];
2549 	struct adapter_params *p = &sc->params;
2550 
2551 	snprintf(buf, sizeof(buf), "Chelsio %s %sNIC (rev %d), S/N:%s, "
2552 	    "P/N:%s, E/C:%s", p->vpd.id, is_offload(sc) ? "R" : "",
2553 	    chip_rev(sc), p->vpd.sn, p->vpd.pn, p->vpd.ec);
2554 
2555 	device_set_desc_copy(sc->dev, buf);
2556 }
2557 
2558 static void
2559 build_medialist(struct port_info *pi)
2560 {
2561 	struct ifmedia *media = &pi->media;
2562 	int data, m;
2563 
2564 	PORT_LOCK(pi);
2565 
2566 	ifmedia_removeall(media);
2567 
2568 	m = IFM_ETHER | IFM_FDX;
2569 	data = (pi->port_type << 8) | pi->mod_type;
2570 
2571 	switch(pi->port_type) {
2572 	case FW_PORT_TYPE_BT_XFI:
2573 		ifmedia_add(media, m | IFM_10G_T, data, NULL);
2574 		break;
2575 
2576 	case FW_PORT_TYPE_BT_XAUI:
2577 		ifmedia_add(media, m | IFM_10G_T, data, NULL);
2578 		/* fall through */
2579 
2580 	case FW_PORT_TYPE_BT_SGMII:
2581 		ifmedia_add(media, m | IFM_1000_T, data, NULL);
2582 		ifmedia_add(media, m | IFM_100_TX, data, NULL);
2583 		ifmedia_add(media, IFM_ETHER | IFM_AUTO, data, NULL);
2584 		ifmedia_set(media, IFM_ETHER | IFM_AUTO);
2585 		break;
2586 
2587 	case FW_PORT_TYPE_CX4:
2588 		ifmedia_add(media, m | IFM_10G_CX4, data, NULL);
2589 		ifmedia_set(media, m | IFM_10G_CX4);
2590 		break;
2591 
2592 	case FW_PORT_TYPE_SFP:
2593 	case FW_PORT_TYPE_FIBER_XFI:
2594 	case FW_PORT_TYPE_FIBER_XAUI:
2595 		switch (pi->mod_type) {
2596 
2597 		case FW_PORT_MOD_TYPE_LR:
2598 			ifmedia_add(media, m | IFM_10G_LR, data, NULL);
2599 			ifmedia_set(media, m | IFM_10G_LR);
2600 			break;
2601 
2602 		case FW_PORT_MOD_TYPE_SR:
2603 			ifmedia_add(media, m | IFM_10G_SR, data, NULL);
2604 			ifmedia_set(media, m | IFM_10G_SR);
2605 			break;
2606 
2607 		case FW_PORT_MOD_TYPE_LRM:
2608 			ifmedia_add(media, m | IFM_10G_LRM, data, NULL);
2609 			ifmedia_set(media, m | IFM_10G_LRM);
2610 			break;
2611 
2612 		case FW_PORT_MOD_TYPE_TWINAX_PASSIVE:
2613 		case FW_PORT_MOD_TYPE_TWINAX_ACTIVE:
2614 			ifmedia_add(media, m | IFM_10G_TWINAX, data, NULL);
2615 			ifmedia_set(media, m | IFM_10G_TWINAX);
2616 			break;
2617 
2618 		case FW_PORT_MOD_TYPE_NONE:
2619 			m &= ~IFM_FDX;
2620 			ifmedia_add(media, m | IFM_NONE, data, NULL);
2621 			ifmedia_set(media, m | IFM_NONE);
2622 			break;
2623 
2624 		case FW_PORT_MOD_TYPE_NA:
2625 		case FW_PORT_MOD_TYPE_ER:
2626 		default:
2627 			device_printf(pi->dev,
2628 			    "unknown port_type (%d), mod_type (%d)\n",
2629 			    pi->port_type, pi->mod_type);
2630 			ifmedia_add(media, m | IFM_UNKNOWN, data, NULL);
2631 			ifmedia_set(media, m | IFM_UNKNOWN);
2632 			break;
2633 		}
2634 		break;
2635 
2636 	case FW_PORT_TYPE_QSFP:
2637 		switch (pi->mod_type) {
2638 
2639 		case FW_PORT_MOD_TYPE_LR:
2640 			ifmedia_add(media, m | IFM_40G_LR4, data, NULL);
2641 			ifmedia_set(media, m | IFM_40G_LR4);
2642 			break;
2643 
2644 		case FW_PORT_MOD_TYPE_SR:
2645 			ifmedia_add(media, m | IFM_40G_SR4, data, NULL);
2646 			ifmedia_set(media, m | IFM_40G_SR4);
2647 			break;
2648 
2649 		case FW_PORT_MOD_TYPE_TWINAX_PASSIVE:
2650 		case FW_PORT_MOD_TYPE_TWINAX_ACTIVE:
2651 			ifmedia_add(media, m | IFM_40G_CR4, data, NULL);
2652 			ifmedia_set(media, m | IFM_40G_CR4);
2653 			break;
2654 
2655 		case FW_PORT_MOD_TYPE_NONE:
2656 			m &= ~IFM_FDX;
2657 			ifmedia_add(media, m | IFM_NONE, data, NULL);
2658 			ifmedia_set(media, m | IFM_NONE);
2659 			break;
2660 
2661 		default:
2662 			device_printf(pi->dev,
2663 			    "unknown port_type (%d), mod_type (%d)\n",
2664 			    pi->port_type, pi->mod_type);
2665 			ifmedia_add(media, m | IFM_UNKNOWN, data, NULL);
2666 			ifmedia_set(media, m | IFM_UNKNOWN);
2667 			break;
2668 		}
2669 		break;
2670 
2671 	default:
2672 		device_printf(pi->dev,
2673 		    "unknown port_type (%d), mod_type (%d)\n", pi->port_type,
2674 		    pi->mod_type);
2675 		ifmedia_add(media, m | IFM_UNKNOWN, data, NULL);
2676 		ifmedia_set(media, m | IFM_UNKNOWN);
2677 		break;
2678 	}
2679 
2680 	PORT_UNLOCK(pi);
2681 }
2682 
2683 #define FW_MAC_EXACT_CHUNK	7
2684 
2685 /*
2686  * Program the port's XGMAC based on parameters in ifnet.  The caller also
2687  * indicates which parameters should be programmed (the rest are left alone).
2688  */
2689 static int
2690 update_mac_settings(struct port_info *pi, int flags)
2691 {
2692 	int rc;
2693 	struct ifnet *ifp = pi->ifp;
2694 	struct adapter *sc = pi->adapter;
2695 	int mtu = -1, promisc = -1, allmulti = -1, vlanex = -1;
2696 
2697 	ASSERT_SYNCHRONIZED_OP(sc);
2698 	KASSERT(flags, ("%s: not told what to update.", __func__));
2699 
2700 	if (flags & XGMAC_MTU)
2701 		mtu = ifp->if_mtu;
2702 
2703 	if (flags & XGMAC_PROMISC)
2704 		promisc = ifp->if_flags & IFF_PROMISC ? 1 : 0;
2705 
2706 	if (flags & XGMAC_ALLMULTI)
2707 		allmulti = ifp->if_flags & IFF_ALLMULTI ? 1 : 0;
2708 
2709 	if (flags & XGMAC_VLANEX)
2710 		vlanex = ifp->if_capenable & IFCAP_VLAN_HWTAGGING ? 1 : 0;
2711 
2712 	rc = -t4_set_rxmode(sc, sc->mbox, pi->viid, mtu, promisc, allmulti, 1,
2713 	    vlanex, false);
2714 	if (rc) {
2715 		if_printf(ifp, "set_rxmode (%x) failed: %d\n", flags, rc);
2716 		return (rc);
2717 	}
2718 
2719 	if (flags & XGMAC_UCADDR) {
2720 		uint8_t ucaddr[ETHER_ADDR_LEN];
2721 
2722 		bcopy(IF_LLADDR(ifp), ucaddr, sizeof(ucaddr));
2723 		rc = t4_change_mac(sc, sc->mbox, pi->viid, pi->xact_addr_filt,
2724 		    ucaddr, true, true);
2725 		if (rc < 0) {
2726 			rc = -rc;
2727 			if_printf(ifp, "change_mac failed: %d\n", rc);
2728 			return (rc);
2729 		} else {
2730 			pi->xact_addr_filt = rc;
2731 			rc = 0;
2732 		}
2733 	}
2734 
2735 	if (flags & XGMAC_MCADDRS) {
2736 		const uint8_t *mcaddr[FW_MAC_EXACT_CHUNK];
2737 		int del = 1;
2738 		uint64_t hash = 0;
2739 		struct ifmultiaddr *ifma;
2740 		int i = 0, j;
2741 
2742 		if_maddr_rlock(ifp);
2743 		TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
2744 			if (ifma->ifma_addr->sa_family != AF_LINK)
2745 				continue;
2746 			mcaddr[i++] =
2747 			    LLADDR((struct sockaddr_dl *)ifma->ifma_addr);
2748 
2749 			if (i == FW_MAC_EXACT_CHUNK) {
2750 				rc = t4_alloc_mac_filt(sc, sc->mbox, pi->viid,
2751 				    del, i, mcaddr, NULL, &hash, 0);
2752 				if (rc < 0) {
2753 					rc = -rc;
2754 					for (j = 0; j < i; j++) {
2755 						if_printf(ifp,
2756 						    "failed to add mc address"
2757 						    " %02x:%02x:%02x:"
2758 						    "%02x:%02x:%02x rc=%d\n",
2759 						    mcaddr[j][0], mcaddr[j][1],
2760 						    mcaddr[j][2], mcaddr[j][3],
2761 						    mcaddr[j][4], mcaddr[j][5],
2762 						    rc);
2763 					}
2764 					goto mcfail;
2765 				}
2766 				del = 0;
2767 				i = 0;
2768 			}
2769 		}
2770 		if (i > 0) {
2771 			rc = t4_alloc_mac_filt(sc, sc->mbox, pi->viid,
2772 			    del, i, mcaddr, NULL, &hash, 0);
2773 			if (rc < 0) {
2774 				rc = -rc;
2775 				for (j = 0; j < i; j++) {
2776 					if_printf(ifp,
2777 					    "failed to add mc address"
2778 					    " %02x:%02x:%02x:"
2779 					    "%02x:%02x:%02x rc=%d\n",
2780 					    mcaddr[j][0], mcaddr[j][1],
2781 					    mcaddr[j][2], mcaddr[j][3],
2782 					    mcaddr[j][4], mcaddr[j][5],
2783 					    rc);
2784 				}
2785 				goto mcfail;
2786 			}
2787 		}
2788 
2789 		rc = -t4_set_addr_hash(sc, sc->mbox, pi->viid, 0, hash, 0);
2790 		if (rc != 0)
2791 			if_printf(ifp, "failed to set mc address hash: %d", rc);
2792 mcfail:
2793 		if_maddr_runlock(ifp);
2794 	}
2795 
2796 	return (rc);
2797 }
2798 
2799 int
2800 begin_synchronized_op(struct adapter *sc, struct port_info *pi, int flags,
2801     char *wmesg)
2802 {
2803 	int rc, pri;
2804 
2805 #ifdef WITNESS
2806 	/* the caller thinks it's ok to sleep, but is it really? */
2807 	if (flags & SLEEP_OK)
2808 		pause("t4slptst", 1);
2809 #endif
2810 
2811 	if (INTR_OK)
2812 		pri = PCATCH;
2813 	else
2814 		pri = 0;
2815 
2816 	ADAPTER_LOCK(sc);
2817 	for (;;) {
2818 
2819 		if (pi && IS_DOOMED(pi)) {
2820 			rc = ENXIO;
2821 			goto done;
2822 		}
2823 
2824 		if (!IS_BUSY(sc)) {
2825 			rc = 0;
2826 			break;
2827 		}
2828 
2829 		if (!(flags & SLEEP_OK)) {
2830 			rc = EBUSY;
2831 			goto done;
2832 		}
2833 
2834 		if (mtx_sleep(&sc->flags, &sc->sc_lock, pri, wmesg, 0)) {
2835 			rc = EINTR;
2836 			goto done;
2837 		}
2838 	}
2839 
2840 	KASSERT(!IS_BUSY(sc), ("%s: controller busy.", __func__));
2841 	SET_BUSY(sc);
2842 #ifdef INVARIANTS
2843 	sc->last_op = wmesg;
2844 	sc->last_op_thr = curthread;
2845 #endif
2846 
2847 done:
2848 	if (!(flags & HOLD_LOCK) || rc)
2849 		ADAPTER_UNLOCK(sc);
2850 
2851 	return (rc);
2852 }
2853 
2854 void
2855 end_synchronized_op(struct adapter *sc, int flags)
2856 {
2857 
2858 	if (flags & LOCK_HELD)
2859 		ADAPTER_LOCK_ASSERT_OWNED(sc);
2860 	else
2861 		ADAPTER_LOCK(sc);
2862 
2863 	KASSERT(IS_BUSY(sc), ("%s: controller not busy.", __func__));
2864 	CLR_BUSY(sc);
2865 	wakeup(&sc->flags);
2866 	ADAPTER_UNLOCK(sc);
2867 }
2868 
2869 static int
2870 cxgbe_init_synchronized(struct port_info *pi)
2871 {
2872 	struct adapter *sc = pi->adapter;
2873 	struct ifnet *ifp = pi->ifp;
2874 	int rc = 0;
2875 
2876 	ASSERT_SYNCHRONIZED_OP(sc);
2877 
2878 	if (isset(&sc->open_device_map, pi->port_id)) {
2879 		KASSERT(ifp->if_drv_flags & IFF_DRV_RUNNING,
2880 		    ("mismatch between open_device_map and if_drv_flags"));
2881 		return (0);	/* already running */
2882 	}
2883 
2884 	if (!(sc->flags & FULL_INIT_DONE) &&
2885 	    ((rc = adapter_full_init(sc)) != 0))
2886 		return (rc);	/* error message displayed already */
2887 
2888 	if (!(pi->flags & PORT_INIT_DONE) &&
2889 	    ((rc = port_full_init(pi)) != 0))
2890 		return (rc); /* error message displayed already */
2891 
2892 	rc = update_mac_settings(pi, XGMAC_ALL);
2893 	if (rc)
2894 		goto done;	/* error message displayed already */
2895 
2896 	rc = -t4_link_start(sc, sc->mbox, pi->tx_chan, &pi->link_cfg);
2897 	if (rc != 0) {
2898 		if_printf(ifp, "start_link failed: %d\n", rc);
2899 		goto done;
2900 	}
2901 
2902 	rc = -t4_enable_vi(sc, sc->mbox, pi->viid, true, true);
2903 	if (rc != 0) {
2904 		if_printf(ifp, "enable_vi failed: %d\n", rc);
2905 		goto done;
2906 	}
2907 
2908 	/*
2909 	 * The first iq of the first port to come up is used for tracing.
2910 	 */
2911 	if (sc->traceq < 0) {
2912 		sc->traceq = sc->sge.rxq[pi->first_rxq].iq.abs_id;
2913 		t4_write_reg(sc, is_t4(sc) ?  A_MPS_TRC_RSS_CONTROL :
2914 		    A_MPS_T5_TRC_RSS_CONTROL, V_RSSCONTROL(pi->tx_chan) |
2915 		    V_QUEUENUMBER(sc->traceq));
2916 		pi->flags |= HAS_TRACEQ;
2917 	}
2918 
2919 	/* all ok */
2920 	setbit(&sc->open_device_map, pi->port_id);
2921 	PORT_LOCK(pi);
2922 	ifp->if_drv_flags |= IFF_DRV_RUNNING;
2923 	PORT_UNLOCK(pi);
2924 
2925 	callout_reset(&pi->tick, hz, cxgbe_tick, pi);
2926 done:
2927 	if (rc != 0)
2928 		cxgbe_uninit_synchronized(pi);
2929 
2930 	return (rc);
2931 }
2932 
2933 /*
2934  * Idempotent.
2935  */
2936 static int
2937 cxgbe_uninit_synchronized(struct port_info *pi)
2938 {
2939 	struct adapter *sc = pi->adapter;
2940 	struct ifnet *ifp = pi->ifp;
2941 	int rc;
2942 
2943 	ASSERT_SYNCHRONIZED_OP(sc);
2944 
2945 	/*
2946 	 * Disable the VI so that all its data in either direction is discarded
2947 	 * by the MPS.  Leave everything else (the queues, interrupts, and 1Hz
2948 	 * tick) intact as the TP can deliver negative advice or data that it's
2949 	 * holding in its RAM (for an offloaded connection) even after the VI is
2950 	 * disabled.
2951 	 */
2952 	rc = -t4_enable_vi(sc, sc->mbox, pi->viid, false, false);
2953 	if (rc) {
2954 		if_printf(ifp, "disable_vi failed: %d\n", rc);
2955 		return (rc);
2956 	}
2957 
2958 	clrbit(&sc->open_device_map, pi->port_id);
2959 	PORT_LOCK(pi);
2960 	ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
2961 	PORT_UNLOCK(pi);
2962 
2963 	pi->link_cfg.link_ok = 0;
2964 	pi->link_cfg.speed = 0;
2965 	pi->linkdnrc = -1;
2966 	t4_os_link_changed(sc, pi->port_id, 0, -1);
2967 
2968 	return (0);
2969 }
2970 
2971 /*
2972  * It is ok for this function to fail midway and return right away.  t4_detach
2973  * will walk the entire sc->irq list and clean up whatever is valid.
2974  */
2975 static int
2976 setup_intr_handlers(struct adapter *sc)
2977 {
2978 	int rc, rid, p, q;
2979 	char s[8];
2980 	struct irq *irq;
2981 	struct port_info *pi;
2982 	struct sge_rxq *rxq;
2983 #ifdef TCP_OFFLOAD
2984 	struct sge_ofld_rxq *ofld_rxq;
2985 #endif
2986 
2987 	/*
2988 	 * Setup interrupts.
2989 	 */
2990 	irq = &sc->irq[0];
2991 	rid = sc->intr_type == INTR_INTX ? 0 : 1;
2992 	if (sc->intr_count == 1) {
2993 		KASSERT(!(sc->flags & INTR_DIRECT),
2994 		    ("%s: single interrupt && INTR_DIRECT?", __func__));
2995 
2996 		rc = t4_alloc_irq(sc, irq, rid, t4_intr_all, sc, "all");
2997 		if (rc != 0)
2998 			return (rc);
2999 	} else {
3000 		/* Multiple interrupts. */
3001 		KASSERT(sc->intr_count >= T4_EXTRA_INTR + sc->params.nports,
3002 		    ("%s: too few intr.", __func__));
3003 
3004 		/* The first one is always error intr */
3005 		rc = t4_alloc_irq(sc, irq, rid, t4_intr_err, sc, "err");
3006 		if (rc != 0)
3007 			return (rc);
3008 		irq++;
3009 		rid++;
3010 
3011 		/* The second one is always the firmware event queue */
3012 		rc = t4_alloc_irq(sc, irq, rid, t4_intr_evt, &sc->sge.fwq,
3013 		    "evt");
3014 		if (rc != 0)
3015 			return (rc);
3016 		irq++;
3017 		rid++;
3018 
3019 		/*
3020 		 * Note that if INTR_DIRECT is not set then either the NIC rx
3021 		 * queues or (exclusive or) the TOE rx queueus will be taking
3022 		 * direct interrupts.
3023 		 *
3024 		 * There is no need to check for is_offload(sc) as nofldrxq
3025 		 * will be 0 if offload is disabled.
3026 		 */
3027 		for_each_port(sc, p) {
3028 			pi = sc->port[p];
3029 
3030 #ifdef TCP_OFFLOAD
3031 			/*
3032 			 * Skip over the NIC queues if they aren't taking direct
3033 			 * interrupts.
3034 			 */
3035 			if (!(sc->flags & INTR_DIRECT) &&
3036 			    pi->nofldrxq > pi->nrxq)
3037 				goto ofld_queues;
3038 #endif
3039 			rxq = &sc->sge.rxq[pi->first_rxq];
3040 			for (q = 0; q < pi->nrxq; q++, rxq++) {
3041 				snprintf(s, sizeof(s), "%d.%d", p, q);
3042 				rc = t4_alloc_irq(sc, irq, rid, t4_intr, rxq,
3043 				    s);
3044 				if (rc != 0)
3045 					return (rc);
3046 				irq++;
3047 				rid++;
3048 			}
3049 
3050 #ifdef TCP_OFFLOAD
3051 			/*
3052 			 * Skip over the offload queues if they aren't taking
3053 			 * direct interrupts.
3054 			 */
3055 			if (!(sc->flags & INTR_DIRECT))
3056 				continue;
3057 ofld_queues:
3058 			ofld_rxq = &sc->sge.ofld_rxq[pi->first_ofld_rxq];
3059 			for (q = 0; q < pi->nofldrxq; q++, ofld_rxq++) {
3060 				snprintf(s, sizeof(s), "%d,%d", p, q);
3061 				rc = t4_alloc_irq(sc, irq, rid, t4_intr,
3062 				    ofld_rxq, s);
3063 				if (rc != 0)
3064 					return (rc);
3065 				irq++;
3066 				rid++;
3067 			}
3068 #endif
3069 		}
3070 	}
3071 
3072 	return (0);
3073 }
3074 
3075 static int
3076 adapter_full_init(struct adapter *sc)
3077 {
3078 	int rc, i;
3079 
3080 	ADAPTER_LOCK_ASSERT_NOTOWNED(sc);
3081 	KASSERT((sc->flags & FULL_INIT_DONE) == 0,
3082 	    ("%s: FULL_INIT_DONE already", __func__));
3083 
3084 	/*
3085 	 * queues that belong to the adapter (not any particular port).
3086 	 */
3087 	rc = t4_setup_adapter_queues(sc);
3088 	if (rc != 0)
3089 		goto done;
3090 
3091 	for (i = 0; i < nitems(sc->tq); i++) {
3092 		sc->tq[i] = taskqueue_create("t4 taskq", M_NOWAIT,
3093 		    taskqueue_thread_enqueue, &sc->tq[i]);
3094 		if (sc->tq[i] == NULL) {
3095 			device_printf(sc->dev,
3096 			    "failed to allocate task queue %d\n", i);
3097 			rc = ENOMEM;
3098 			goto done;
3099 		}
3100 		taskqueue_start_threads(&sc->tq[i], 1, PI_NET, "%s tq%d",
3101 		    device_get_nameunit(sc->dev), i);
3102 	}
3103 
3104 	t4_intr_enable(sc);
3105 	sc->flags |= FULL_INIT_DONE;
3106 done:
3107 	if (rc != 0)
3108 		adapter_full_uninit(sc);
3109 
3110 	return (rc);
3111 }
3112 
3113 static int
3114 adapter_full_uninit(struct adapter *sc)
3115 {
3116 	int i;
3117 
3118 	ADAPTER_LOCK_ASSERT_NOTOWNED(sc);
3119 
3120 	t4_teardown_adapter_queues(sc);
3121 
3122 	for (i = 0; i < nitems(sc->tq) && sc->tq[i]; i++) {
3123 		taskqueue_free(sc->tq[i]);
3124 		sc->tq[i] = NULL;
3125 	}
3126 
3127 	sc->flags &= ~FULL_INIT_DONE;
3128 
3129 	return (0);
3130 }
3131 
3132 static int
3133 port_full_init(struct port_info *pi)
3134 {
3135 	struct adapter *sc = pi->adapter;
3136 	struct ifnet *ifp = pi->ifp;
3137 	uint16_t *rss;
3138 	struct sge_rxq *rxq;
3139 	int rc, i;
3140 
3141 	ASSERT_SYNCHRONIZED_OP(sc);
3142 	KASSERT((pi->flags & PORT_INIT_DONE) == 0,
3143 	    ("%s: PORT_INIT_DONE already", __func__));
3144 
3145 	sysctl_ctx_init(&pi->ctx);
3146 	pi->flags |= PORT_SYSCTL_CTX;
3147 
3148 	/*
3149 	 * Allocate tx/rx/fl queues for this port.
3150 	 */
3151 	rc = t4_setup_port_queues(pi);
3152 	if (rc != 0)
3153 		goto done;	/* error message displayed already */
3154 
3155 	/*
3156 	 * Setup RSS for this port.
3157 	 */
3158 	rss = malloc(pi->nrxq * sizeof (*rss), M_CXGBE,
3159 	    M_ZERO | M_WAITOK);
3160 	for_each_rxq(pi, i, rxq) {
3161 		rss[i] = rxq->iq.abs_id;
3162 	}
3163 	rc = -t4_config_rss_range(sc, sc->mbox, pi->viid, 0,
3164 	    pi->rss_size, rss, pi->nrxq);
3165 	free(rss, M_CXGBE);
3166 	if (rc != 0) {
3167 		if_printf(ifp, "rss_config failed: %d\n", rc);
3168 		goto done;
3169 	}
3170 
3171 	pi->flags |= PORT_INIT_DONE;
3172 done:
3173 	if (rc != 0)
3174 		port_full_uninit(pi);
3175 
3176 	return (rc);
3177 }
3178 
3179 /*
3180  * Idempotent.
3181  */
3182 static int
3183 port_full_uninit(struct port_info *pi)
3184 {
3185 	struct adapter *sc = pi->adapter;
3186 	int i;
3187 	struct sge_rxq *rxq;
3188 	struct sge_txq *txq;
3189 #ifdef TCP_OFFLOAD
3190 	struct sge_ofld_rxq *ofld_rxq;
3191 	struct sge_wrq *ofld_txq;
3192 #endif
3193 
3194 	if (pi->flags & PORT_INIT_DONE) {
3195 
3196 		/* Need to quiesce queues.  XXX: ctrl queues? */
3197 
3198 		for_each_txq(pi, i, txq) {
3199 			quiesce_eq(sc, &txq->eq);
3200 		}
3201 
3202 #ifdef TCP_OFFLOAD
3203 		for_each_ofld_txq(pi, i, ofld_txq) {
3204 			quiesce_eq(sc, &ofld_txq->eq);
3205 		}
3206 #endif
3207 
3208 		for_each_rxq(pi, i, rxq) {
3209 			quiesce_iq(sc, &rxq->iq);
3210 			quiesce_fl(sc, &rxq->fl);
3211 		}
3212 
3213 #ifdef TCP_OFFLOAD
3214 		for_each_ofld_rxq(pi, i, ofld_rxq) {
3215 			quiesce_iq(sc, &ofld_rxq->iq);
3216 			quiesce_fl(sc, &ofld_rxq->fl);
3217 		}
3218 #endif
3219 	}
3220 
3221 	t4_teardown_port_queues(pi);
3222 	pi->flags &= ~PORT_INIT_DONE;
3223 
3224 	return (0);
3225 }
3226 
3227 static void
3228 quiesce_eq(struct adapter *sc, struct sge_eq *eq)
3229 {
3230 	EQ_LOCK(eq);
3231 	eq->flags |= EQ_DOOMED;
3232 
3233 	/*
3234 	 * Wait for the response to a credit flush if one's
3235 	 * pending.
3236 	 */
3237 	while (eq->flags & EQ_CRFLUSHED)
3238 		mtx_sleep(eq, &eq->eq_lock, 0, "crflush", 0);
3239 	EQ_UNLOCK(eq);
3240 
3241 	callout_drain(&eq->tx_callout);	/* XXX: iffy */
3242 	pause("callout", 10);		/* Still iffy */
3243 
3244 	taskqueue_drain(sc->tq[eq->tx_chan], &eq->tx_task);
3245 }
3246 
3247 static void
3248 quiesce_iq(struct adapter *sc, struct sge_iq *iq)
3249 {
3250 	(void) sc;	/* unused */
3251 
3252 	/* Synchronize with the interrupt handler */
3253 	while (!atomic_cmpset_int(&iq->state, IQS_IDLE, IQS_DISABLED))
3254 		pause("iqfree", 1);
3255 }
3256 
3257 static void
3258 quiesce_fl(struct adapter *sc, struct sge_fl *fl)
3259 {
3260 	mtx_lock(&sc->sfl_lock);
3261 	FL_LOCK(fl);
3262 	fl->flags |= FL_DOOMED;
3263 	FL_UNLOCK(fl);
3264 	mtx_unlock(&sc->sfl_lock);
3265 
3266 	callout_drain(&sc->sfl_callout);
3267 	KASSERT((fl->flags & FL_STARVING) == 0,
3268 	    ("%s: still starving", __func__));
3269 }
3270 
3271 static int
3272 t4_alloc_irq(struct adapter *sc, struct irq *irq, int rid,
3273     driver_intr_t *handler, void *arg, char *name)
3274 {
3275 	int rc;
3276 
3277 	irq->rid = rid;
3278 	irq->res = bus_alloc_resource_any(sc->dev, SYS_RES_IRQ, &irq->rid,
3279 	    RF_SHAREABLE | RF_ACTIVE);
3280 	if (irq->res == NULL) {
3281 		device_printf(sc->dev,
3282 		    "failed to allocate IRQ for rid %d, name %s.\n", rid, name);
3283 		return (ENOMEM);
3284 	}
3285 
3286 	rc = bus_setup_intr(sc->dev, irq->res, INTR_MPSAFE | INTR_TYPE_NET,
3287 	    NULL, handler, arg, &irq->tag);
3288 	if (rc != 0) {
3289 		device_printf(sc->dev,
3290 		    "failed to setup interrupt for rid %d, name %s: %d\n",
3291 		    rid, name, rc);
3292 	} else if (name)
3293 		bus_describe_intr(sc->dev, irq->res, irq->tag, name);
3294 
3295 	return (rc);
3296 }
3297 
3298 static int
3299 t4_free_irq(struct adapter *sc, struct irq *irq)
3300 {
3301 	if (irq->tag)
3302 		bus_teardown_intr(sc->dev, irq->res, irq->tag);
3303 	if (irq->res)
3304 		bus_release_resource(sc->dev, SYS_RES_IRQ, irq->rid, irq->res);
3305 
3306 	bzero(irq, sizeof(*irq));
3307 
3308 	return (0);
3309 }
3310 
3311 static void
3312 reg_block_dump(struct adapter *sc, uint8_t *buf, unsigned int start,
3313     unsigned int end)
3314 {
3315 	uint32_t *p = (uint32_t *)(buf + start);
3316 
3317 	for ( ; start <= end; start += sizeof(uint32_t))
3318 		*p++ = t4_read_reg(sc, start);
3319 }
3320 
3321 static void
3322 t4_get_regs(struct adapter *sc, struct t4_regdump *regs, uint8_t *buf)
3323 {
3324 	int i, n;
3325 	const unsigned int *reg_ranges;
3326 	static const unsigned int t4_reg_ranges[] = {
3327 		0x1008, 0x1108,
3328 		0x1180, 0x11b4,
3329 		0x11fc, 0x123c,
3330 		0x1300, 0x173c,
3331 		0x1800, 0x18fc,
3332 		0x3000, 0x30d8,
3333 		0x30e0, 0x5924,
3334 		0x5960, 0x59d4,
3335 		0x5a00, 0x5af8,
3336 		0x6000, 0x6098,
3337 		0x6100, 0x6150,
3338 		0x6200, 0x6208,
3339 		0x6240, 0x6248,
3340 		0x6280, 0x6338,
3341 		0x6370, 0x638c,
3342 		0x6400, 0x643c,
3343 		0x6500, 0x6524,
3344 		0x6a00, 0x6a38,
3345 		0x6a60, 0x6a78,
3346 		0x6b00, 0x6b84,
3347 		0x6bf0, 0x6c84,
3348 		0x6cf0, 0x6d84,
3349 		0x6df0, 0x6e84,
3350 		0x6ef0, 0x6f84,
3351 		0x6ff0, 0x7084,
3352 		0x70f0, 0x7184,
3353 		0x71f0, 0x7284,
3354 		0x72f0, 0x7384,
3355 		0x73f0, 0x7450,
3356 		0x7500, 0x7530,
3357 		0x7600, 0x761c,
3358 		0x7680, 0x76cc,
3359 		0x7700, 0x7798,
3360 		0x77c0, 0x77fc,
3361 		0x7900, 0x79fc,
3362 		0x7b00, 0x7c38,
3363 		0x7d00, 0x7efc,
3364 		0x8dc0, 0x8e1c,
3365 		0x8e30, 0x8e78,
3366 		0x8ea0, 0x8f6c,
3367 		0x8fc0, 0x9074,
3368 		0x90fc, 0x90fc,
3369 		0x9400, 0x9458,
3370 		0x9600, 0x96bc,
3371 		0x9800, 0x9808,
3372 		0x9820, 0x983c,
3373 		0x9850, 0x9864,
3374 		0x9c00, 0x9c6c,
3375 		0x9c80, 0x9cec,
3376 		0x9d00, 0x9d6c,
3377 		0x9d80, 0x9dec,
3378 		0x9e00, 0x9e6c,
3379 		0x9e80, 0x9eec,
3380 		0x9f00, 0x9f6c,
3381 		0x9f80, 0x9fec,
3382 		0xd004, 0xd03c,
3383 		0xdfc0, 0xdfe0,
3384 		0xe000, 0xea7c,
3385 		0xf000, 0x11190,
3386 		0x19040, 0x1906c,
3387 		0x19078, 0x19080,
3388 		0x1908c, 0x19124,
3389 		0x19150, 0x191b0,
3390 		0x191d0, 0x191e8,
3391 		0x19238, 0x1924c,
3392 		0x193f8, 0x19474,
3393 		0x19490, 0x194f8,
3394 		0x19800, 0x19f30,
3395 		0x1a000, 0x1a06c,
3396 		0x1a0b0, 0x1a120,
3397 		0x1a128, 0x1a138,
3398 		0x1a190, 0x1a1c4,
3399 		0x1a1fc, 0x1a1fc,
3400 		0x1e040, 0x1e04c,
3401 		0x1e284, 0x1e28c,
3402 		0x1e2c0, 0x1e2c0,
3403 		0x1e2e0, 0x1e2e0,
3404 		0x1e300, 0x1e384,
3405 		0x1e3c0, 0x1e3c8,
3406 		0x1e440, 0x1e44c,
3407 		0x1e684, 0x1e68c,
3408 		0x1e6c0, 0x1e6c0,
3409 		0x1e6e0, 0x1e6e0,
3410 		0x1e700, 0x1e784,
3411 		0x1e7c0, 0x1e7c8,
3412 		0x1e840, 0x1e84c,
3413 		0x1ea84, 0x1ea8c,
3414 		0x1eac0, 0x1eac0,
3415 		0x1eae0, 0x1eae0,
3416 		0x1eb00, 0x1eb84,
3417 		0x1ebc0, 0x1ebc8,
3418 		0x1ec40, 0x1ec4c,
3419 		0x1ee84, 0x1ee8c,
3420 		0x1eec0, 0x1eec0,
3421 		0x1eee0, 0x1eee0,
3422 		0x1ef00, 0x1ef84,
3423 		0x1efc0, 0x1efc8,
3424 		0x1f040, 0x1f04c,
3425 		0x1f284, 0x1f28c,
3426 		0x1f2c0, 0x1f2c0,
3427 		0x1f2e0, 0x1f2e0,
3428 		0x1f300, 0x1f384,
3429 		0x1f3c0, 0x1f3c8,
3430 		0x1f440, 0x1f44c,
3431 		0x1f684, 0x1f68c,
3432 		0x1f6c0, 0x1f6c0,
3433 		0x1f6e0, 0x1f6e0,
3434 		0x1f700, 0x1f784,
3435 		0x1f7c0, 0x1f7c8,
3436 		0x1f840, 0x1f84c,
3437 		0x1fa84, 0x1fa8c,
3438 		0x1fac0, 0x1fac0,
3439 		0x1fae0, 0x1fae0,
3440 		0x1fb00, 0x1fb84,
3441 		0x1fbc0, 0x1fbc8,
3442 		0x1fc40, 0x1fc4c,
3443 		0x1fe84, 0x1fe8c,
3444 		0x1fec0, 0x1fec0,
3445 		0x1fee0, 0x1fee0,
3446 		0x1ff00, 0x1ff84,
3447 		0x1ffc0, 0x1ffc8,
3448 		0x20000, 0x2002c,
3449 		0x20100, 0x2013c,
3450 		0x20190, 0x201c8,
3451 		0x20200, 0x20318,
3452 		0x20400, 0x20528,
3453 		0x20540, 0x20614,
3454 		0x21000, 0x21040,
3455 		0x2104c, 0x21060,
3456 		0x210c0, 0x210ec,
3457 		0x21200, 0x21268,
3458 		0x21270, 0x21284,
3459 		0x212fc, 0x21388,
3460 		0x21400, 0x21404,
3461 		0x21500, 0x21518,
3462 		0x2152c, 0x2153c,
3463 		0x21550, 0x21554,
3464 		0x21600, 0x21600,
3465 		0x21608, 0x21628,
3466 		0x21630, 0x2163c,
3467 		0x21700, 0x2171c,
3468 		0x21780, 0x2178c,
3469 		0x21800, 0x21c38,
3470 		0x21c80, 0x21d7c,
3471 		0x21e00, 0x21e04,
3472 		0x22000, 0x2202c,
3473 		0x22100, 0x2213c,
3474 		0x22190, 0x221c8,
3475 		0x22200, 0x22318,
3476 		0x22400, 0x22528,
3477 		0x22540, 0x22614,
3478 		0x23000, 0x23040,
3479 		0x2304c, 0x23060,
3480 		0x230c0, 0x230ec,
3481 		0x23200, 0x23268,
3482 		0x23270, 0x23284,
3483 		0x232fc, 0x23388,
3484 		0x23400, 0x23404,
3485 		0x23500, 0x23518,
3486 		0x2352c, 0x2353c,
3487 		0x23550, 0x23554,
3488 		0x23600, 0x23600,
3489 		0x23608, 0x23628,
3490 		0x23630, 0x2363c,
3491 		0x23700, 0x2371c,
3492 		0x23780, 0x2378c,
3493 		0x23800, 0x23c38,
3494 		0x23c80, 0x23d7c,
3495 		0x23e00, 0x23e04,
3496 		0x24000, 0x2402c,
3497 		0x24100, 0x2413c,
3498 		0x24190, 0x241c8,
3499 		0x24200, 0x24318,
3500 		0x24400, 0x24528,
3501 		0x24540, 0x24614,
3502 		0x25000, 0x25040,
3503 		0x2504c, 0x25060,
3504 		0x250c0, 0x250ec,
3505 		0x25200, 0x25268,
3506 		0x25270, 0x25284,
3507 		0x252fc, 0x25388,
3508 		0x25400, 0x25404,
3509 		0x25500, 0x25518,
3510 		0x2552c, 0x2553c,
3511 		0x25550, 0x25554,
3512 		0x25600, 0x25600,
3513 		0x25608, 0x25628,
3514 		0x25630, 0x2563c,
3515 		0x25700, 0x2571c,
3516 		0x25780, 0x2578c,
3517 		0x25800, 0x25c38,
3518 		0x25c80, 0x25d7c,
3519 		0x25e00, 0x25e04,
3520 		0x26000, 0x2602c,
3521 		0x26100, 0x2613c,
3522 		0x26190, 0x261c8,
3523 		0x26200, 0x26318,
3524 		0x26400, 0x26528,
3525 		0x26540, 0x26614,
3526 		0x27000, 0x27040,
3527 		0x2704c, 0x27060,
3528 		0x270c0, 0x270ec,
3529 		0x27200, 0x27268,
3530 		0x27270, 0x27284,
3531 		0x272fc, 0x27388,
3532 		0x27400, 0x27404,
3533 		0x27500, 0x27518,
3534 		0x2752c, 0x2753c,
3535 		0x27550, 0x27554,
3536 		0x27600, 0x27600,
3537 		0x27608, 0x27628,
3538 		0x27630, 0x2763c,
3539 		0x27700, 0x2771c,
3540 		0x27780, 0x2778c,
3541 		0x27800, 0x27c38,
3542 		0x27c80, 0x27d7c,
3543 		0x27e00, 0x27e04
3544 	};
3545 	static const unsigned int t5_reg_ranges[] = {
3546 		0x1008, 0x1148,
3547 		0x1180, 0x11b4,
3548 		0x11fc, 0x123c,
3549 		0x1280, 0x173c,
3550 		0x1800, 0x18fc,
3551 		0x3000, 0x3028,
3552 		0x3060, 0x30d8,
3553 		0x30e0, 0x30fc,
3554 		0x3140, 0x357c,
3555 		0x35a8, 0x35cc,
3556 		0x35ec, 0x35ec,
3557 		0x3600, 0x5624,
3558 		0x56cc, 0x575c,
3559 		0x580c, 0x5814,
3560 		0x5890, 0x58bc,
3561 		0x5940, 0x59dc,
3562 		0x59fc, 0x5a18,
3563 		0x5a60, 0x5a9c,
3564 		0x5b94, 0x5bfc,
3565 		0x6000, 0x6040,
3566 		0x6058, 0x614c,
3567 		0x7700, 0x7798,
3568 		0x77c0, 0x78fc,
3569 		0x7b00, 0x7c54,
3570 		0x7d00, 0x7efc,
3571 		0x8dc0, 0x8de0,
3572 		0x8df8, 0x8e84,
3573 		0x8ea0, 0x8f84,
3574 		0x8fc0, 0x90f8,
3575 		0x9400, 0x9470,
3576 		0x9600, 0x96f4,
3577 		0x9800, 0x9808,
3578 		0x9820, 0x983c,
3579 		0x9850, 0x9864,
3580 		0x9c00, 0x9c6c,
3581 		0x9c80, 0x9cec,
3582 		0x9d00, 0x9d6c,
3583 		0x9d80, 0x9dec,
3584 		0x9e00, 0x9e6c,
3585 		0x9e80, 0x9eec,
3586 		0x9f00, 0x9f6c,
3587 		0x9f80, 0xa020,
3588 		0xd004, 0xd03c,
3589 		0xdfc0, 0xdfe0,
3590 		0xe000, 0x11088,
3591 		0x1109c, 0x1117c,
3592 		0x11190, 0x11204,
3593 		0x19040, 0x1906c,
3594 		0x19078, 0x19080,
3595 		0x1908c, 0x19124,
3596 		0x19150, 0x191b0,
3597 		0x191d0, 0x191e8,
3598 		0x19238, 0x19290,
3599 		0x193f8, 0x19474,
3600 		0x19490, 0x194cc,
3601 		0x194f0, 0x194f8,
3602 		0x19c00, 0x19c60,
3603 		0x19c94, 0x19e10,
3604 		0x19e50, 0x19f34,
3605 		0x19f40, 0x19f50,
3606 		0x19f90, 0x19fe4,
3607 		0x1a000, 0x1a06c,
3608 		0x1a0b0, 0x1a120,
3609 		0x1a128, 0x1a138,
3610 		0x1a190, 0x1a1c4,
3611 		0x1a1fc, 0x1a1fc,
3612 		0x1e008, 0x1e00c,
3613 		0x1e040, 0x1e04c,
3614 		0x1e284, 0x1e290,
3615 		0x1e2c0, 0x1e2c0,
3616 		0x1e2e0, 0x1e2e0,
3617 		0x1e300, 0x1e384,
3618 		0x1e3c0, 0x1e3c8,
3619 		0x1e408, 0x1e40c,
3620 		0x1e440, 0x1e44c,
3621 		0x1e684, 0x1e690,
3622 		0x1e6c0, 0x1e6c0,
3623 		0x1e6e0, 0x1e6e0,
3624 		0x1e700, 0x1e784,
3625 		0x1e7c0, 0x1e7c8,
3626 		0x1e808, 0x1e80c,
3627 		0x1e840, 0x1e84c,
3628 		0x1ea84, 0x1ea90,
3629 		0x1eac0, 0x1eac0,
3630 		0x1eae0, 0x1eae0,
3631 		0x1eb00, 0x1eb84,
3632 		0x1ebc0, 0x1ebc8,
3633 		0x1ec08, 0x1ec0c,
3634 		0x1ec40, 0x1ec4c,
3635 		0x1ee84, 0x1ee90,
3636 		0x1eec0, 0x1eec0,
3637 		0x1eee0, 0x1eee0,
3638 		0x1ef00, 0x1ef84,
3639 		0x1efc0, 0x1efc8,
3640 		0x1f008, 0x1f00c,
3641 		0x1f040, 0x1f04c,
3642 		0x1f284, 0x1f290,
3643 		0x1f2c0, 0x1f2c0,
3644 		0x1f2e0, 0x1f2e0,
3645 		0x1f300, 0x1f384,
3646 		0x1f3c0, 0x1f3c8,
3647 		0x1f408, 0x1f40c,
3648 		0x1f440, 0x1f44c,
3649 		0x1f684, 0x1f690,
3650 		0x1f6c0, 0x1f6c0,
3651 		0x1f6e0, 0x1f6e0,
3652 		0x1f700, 0x1f784,
3653 		0x1f7c0, 0x1f7c8,
3654 		0x1f808, 0x1f80c,
3655 		0x1f840, 0x1f84c,
3656 		0x1fa84, 0x1fa90,
3657 		0x1fac0, 0x1fac0,
3658 		0x1fae0, 0x1fae0,
3659 		0x1fb00, 0x1fb84,
3660 		0x1fbc0, 0x1fbc8,
3661 		0x1fc08, 0x1fc0c,
3662 		0x1fc40, 0x1fc4c,
3663 		0x1fe84, 0x1fe90,
3664 		0x1fec0, 0x1fec0,
3665 		0x1fee0, 0x1fee0,
3666 		0x1ff00, 0x1ff84,
3667 		0x1ffc0, 0x1ffc8,
3668 		0x30000, 0x30030,
3669 		0x30100, 0x30144,
3670 		0x30190, 0x301d0,
3671 		0x30200, 0x30318,
3672 		0x30400, 0x3052c,
3673 		0x30540, 0x3061c,
3674 		0x30800, 0x30834,
3675 		0x308c0, 0x30908,
3676 		0x30910, 0x309ac,
3677 		0x30a00, 0x30a2c,
3678 		0x30a44, 0x30a50,
3679 		0x30a74, 0x30c24,
3680 		0x30d00, 0x30d00,
3681 		0x30d08, 0x30d14,
3682 		0x30d1c, 0x30d20,
3683 		0x30d3c, 0x30d50,
3684 		0x31200, 0x3120c,
3685 		0x31220, 0x31220,
3686 		0x31240, 0x31240,
3687 		0x31600, 0x3160c,
3688 		0x31a00, 0x31a1c,
3689 		0x31e00, 0x31e20,
3690 		0x31e38, 0x31e3c,
3691 		0x31e80, 0x31e80,
3692 		0x31e88, 0x31ea8,
3693 		0x31eb0, 0x31eb4,
3694 		0x31ec8, 0x31ed4,
3695 		0x31fb8, 0x32004,
3696 		0x32200, 0x32200,
3697 		0x32208, 0x32240,
3698 		0x32248, 0x32280,
3699 		0x32288, 0x322c0,
3700 		0x322c8, 0x322fc,
3701 		0x32600, 0x32630,
3702 		0x32a00, 0x32abc,
3703 		0x32b00, 0x32b70,
3704 		0x33000, 0x33048,
3705 		0x33060, 0x3309c,
3706 		0x330f0, 0x33148,
3707 		0x33160, 0x3319c,
3708 		0x331f0, 0x332e4,
3709 		0x332f8, 0x333e4,
3710 		0x333f8, 0x33448,
3711 		0x33460, 0x3349c,
3712 		0x334f0, 0x33548,
3713 		0x33560, 0x3359c,
3714 		0x335f0, 0x336e4,
3715 		0x336f8, 0x337e4,
3716 		0x337f8, 0x337fc,
3717 		0x33814, 0x33814,
3718 		0x3382c, 0x3382c,
3719 		0x33880, 0x3388c,
3720 		0x338e8, 0x338ec,
3721 		0x33900, 0x33948,
3722 		0x33960, 0x3399c,
3723 		0x339f0, 0x33ae4,
3724 		0x33af8, 0x33b10,
3725 		0x33b28, 0x33b28,
3726 		0x33b3c, 0x33b50,
3727 		0x33bf0, 0x33c10,
3728 		0x33c28, 0x33c28,
3729 		0x33c3c, 0x33c50,
3730 		0x33cf0, 0x33cfc,
3731 		0x34000, 0x34030,
3732 		0x34100, 0x34144,
3733 		0x34190, 0x341d0,
3734 		0x34200, 0x34318,
3735 		0x34400, 0x3452c,
3736 		0x34540, 0x3461c,
3737 		0x34800, 0x34834,
3738 		0x348c0, 0x34908,
3739 		0x34910, 0x349ac,
3740 		0x34a00, 0x34a2c,
3741 		0x34a44, 0x34a50,
3742 		0x34a74, 0x34c24,
3743 		0x34d00, 0x34d00,
3744 		0x34d08, 0x34d14,
3745 		0x34d1c, 0x34d20,
3746 		0x34d3c, 0x34d50,
3747 		0x35200, 0x3520c,
3748 		0x35220, 0x35220,
3749 		0x35240, 0x35240,
3750 		0x35600, 0x3560c,
3751 		0x35a00, 0x35a1c,
3752 		0x35e00, 0x35e20,
3753 		0x35e38, 0x35e3c,
3754 		0x35e80, 0x35e80,
3755 		0x35e88, 0x35ea8,
3756 		0x35eb0, 0x35eb4,
3757 		0x35ec8, 0x35ed4,
3758 		0x35fb8, 0x36004,
3759 		0x36200, 0x36200,
3760 		0x36208, 0x36240,
3761 		0x36248, 0x36280,
3762 		0x36288, 0x362c0,
3763 		0x362c8, 0x362fc,
3764 		0x36600, 0x36630,
3765 		0x36a00, 0x36abc,
3766 		0x36b00, 0x36b70,
3767 		0x37000, 0x37048,
3768 		0x37060, 0x3709c,
3769 		0x370f0, 0x37148,
3770 		0x37160, 0x3719c,
3771 		0x371f0, 0x372e4,
3772 		0x372f8, 0x373e4,
3773 		0x373f8, 0x37448,
3774 		0x37460, 0x3749c,
3775 		0x374f0, 0x37548,
3776 		0x37560, 0x3759c,
3777 		0x375f0, 0x376e4,
3778 		0x376f8, 0x377e4,
3779 		0x377f8, 0x377fc,
3780 		0x37814, 0x37814,
3781 		0x3782c, 0x3782c,
3782 		0x37880, 0x3788c,
3783 		0x378e8, 0x378ec,
3784 		0x37900, 0x37948,
3785 		0x37960, 0x3799c,
3786 		0x379f0, 0x37ae4,
3787 		0x37af8, 0x37b10,
3788 		0x37b28, 0x37b28,
3789 		0x37b3c, 0x37b50,
3790 		0x37bf0, 0x37c10,
3791 		0x37c28, 0x37c28,
3792 		0x37c3c, 0x37c50,
3793 		0x37cf0, 0x37cfc,
3794 		0x38000, 0x38030,
3795 		0x38100, 0x38144,
3796 		0x38190, 0x381d0,
3797 		0x38200, 0x38318,
3798 		0x38400, 0x3852c,
3799 		0x38540, 0x3861c,
3800 		0x38800, 0x38834,
3801 		0x388c0, 0x38908,
3802 		0x38910, 0x389ac,
3803 		0x38a00, 0x38a2c,
3804 		0x38a44, 0x38a50,
3805 		0x38a74, 0x38c24,
3806 		0x38d00, 0x38d00,
3807 		0x38d08, 0x38d14,
3808 		0x38d1c, 0x38d20,
3809 		0x38d3c, 0x38d50,
3810 		0x39200, 0x3920c,
3811 		0x39220, 0x39220,
3812 		0x39240, 0x39240,
3813 		0x39600, 0x3960c,
3814 		0x39a00, 0x39a1c,
3815 		0x39e00, 0x39e20,
3816 		0x39e38, 0x39e3c,
3817 		0x39e80, 0x39e80,
3818 		0x39e88, 0x39ea8,
3819 		0x39eb0, 0x39eb4,
3820 		0x39ec8, 0x39ed4,
3821 		0x39fb8, 0x3a004,
3822 		0x3a200, 0x3a200,
3823 		0x3a208, 0x3a240,
3824 		0x3a248, 0x3a280,
3825 		0x3a288, 0x3a2c0,
3826 		0x3a2c8, 0x3a2fc,
3827 		0x3a600, 0x3a630,
3828 		0x3aa00, 0x3aabc,
3829 		0x3ab00, 0x3ab70,
3830 		0x3b000, 0x3b048,
3831 		0x3b060, 0x3b09c,
3832 		0x3b0f0, 0x3b148,
3833 		0x3b160, 0x3b19c,
3834 		0x3b1f0, 0x3b2e4,
3835 		0x3b2f8, 0x3b3e4,
3836 		0x3b3f8, 0x3b448,
3837 		0x3b460, 0x3b49c,
3838 		0x3b4f0, 0x3b548,
3839 		0x3b560, 0x3b59c,
3840 		0x3b5f0, 0x3b6e4,
3841 		0x3b6f8, 0x3b7e4,
3842 		0x3b7f8, 0x3b7fc,
3843 		0x3b814, 0x3b814,
3844 		0x3b82c, 0x3b82c,
3845 		0x3b880, 0x3b88c,
3846 		0x3b8e8, 0x3b8ec,
3847 		0x3b900, 0x3b948,
3848 		0x3b960, 0x3b99c,
3849 		0x3b9f0, 0x3bae4,
3850 		0x3baf8, 0x3bb10,
3851 		0x3bb28, 0x3bb28,
3852 		0x3bb3c, 0x3bb50,
3853 		0x3bbf0, 0x3bc10,
3854 		0x3bc28, 0x3bc28,
3855 		0x3bc3c, 0x3bc50,
3856 		0x3bcf0, 0x3bcfc,
3857 		0x3c000, 0x3c030,
3858 		0x3c100, 0x3c144,
3859 		0x3c190, 0x3c1d0,
3860 		0x3c200, 0x3c318,
3861 		0x3c400, 0x3c52c,
3862 		0x3c540, 0x3c61c,
3863 		0x3c800, 0x3c834,
3864 		0x3c8c0, 0x3c908,
3865 		0x3c910, 0x3c9ac,
3866 		0x3ca00, 0x3ca2c,
3867 		0x3ca44, 0x3ca50,
3868 		0x3ca74, 0x3cc24,
3869 		0x3cd00, 0x3cd00,
3870 		0x3cd08, 0x3cd14,
3871 		0x3cd1c, 0x3cd20,
3872 		0x3cd3c, 0x3cd50,
3873 		0x3d200, 0x3d20c,
3874 		0x3d220, 0x3d220,
3875 		0x3d240, 0x3d240,
3876 		0x3d600, 0x3d60c,
3877 		0x3da00, 0x3da1c,
3878 		0x3de00, 0x3de20,
3879 		0x3de38, 0x3de3c,
3880 		0x3de80, 0x3de80,
3881 		0x3de88, 0x3dea8,
3882 		0x3deb0, 0x3deb4,
3883 		0x3dec8, 0x3ded4,
3884 		0x3dfb8, 0x3e004,
3885 		0x3e200, 0x3e200,
3886 		0x3e208, 0x3e240,
3887 		0x3e248, 0x3e280,
3888 		0x3e288, 0x3e2c0,
3889 		0x3e2c8, 0x3e2fc,
3890 		0x3e600, 0x3e630,
3891 		0x3ea00, 0x3eabc,
3892 		0x3eb00, 0x3eb70,
3893 		0x3f000, 0x3f048,
3894 		0x3f060, 0x3f09c,
3895 		0x3f0f0, 0x3f148,
3896 		0x3f160, 0x3f19c,
3897 		0x3f1f0, 0x3f2e4,
3898 		0x3f2f8, 0x3f3e4,
3899 		0x3f3f8, 0x3f448,
3900 		0x3f460, 0x3f49c,
3901 		0x3f4f0, 0x3f548,
3902 		0x3f560, 0x3f59c,
3903 		0x3f5f0, 0x3f6e4,
3904 		0x3f6f8, 0x3f7e4,
3905 		0x3f7f8, 0x3f7fc,
3906 		0x3f814, 0x3f814,
3907 		0x3f82c, 0x3f82c,
3908 		0x3f880, 0x3f88c,
3909 		0x3f8e8, 0x3f8ec,
3910 		0x3f900, 0x3f948,
3911 		0x3f960, 0x3f99c,
3912 		0x3f9f0, 0x3fae4,
3913 		0x3faf8, 0x3fb10,
3914 		0x3fb28, 0x3fb28,
3915 		0x3fb3c, 0x3fb50,
3916 		0x3fbf0, 0x3fc10,
3917 		0x3fc28, 0x3fc28,
3918 		0x3fc3c, 0x3fc50,
3919 		0x3fcf0, 0x3fcfc,
3920 		0x40000, 0x4000c,
3921 		0x40040, 0x40068,
3922 		0x4007c, 0x40144,
3923 		0x40180, 0x4018c,
3924 		0x40200, 0x40298,
3925 		0x402ac, 0x4033c,
3926 		0x403f8, 0x403fc,
3927 		0x41304, 0x413c4,
3928 		0x41400, 0x4141c,
3929 		0x41480, 0x414d0,
3930 		0x44000, 0x44078,
3931 		0x440c0, 0x44278,
3932 		0x442c0, 0x44478,
3933 		0x444c0, 0x44678,
3934 		0x446c0, 0x44878,
3935 		0x448c0, 0x449fc,
3936 		0x45000, 0x45068,
3937 		0x45080, 0x45084,
3938 		0x450a0, 0x450b0,
3939 		0x45200, 0x45268,
3940 		0x45280, 0x45284,
3941 		0x452a0, 0x452b0,
3942 		0x460c0, 0x460e4,
3943 		0x47000, 0x4708c,
3944 		0x47200, 0x47250,
3945 		0x47400, 0x47420,
3946 		0x47600, 0x47618,
3947 		0x47800, 0x47814,
3948 		0x48000, 0x4800c,
3949 		0x48040, 0x48068,
3950 		0x4807c, 0x48144,
3951 		0x48180, 0x4818c,
3952 		0x48200, 0x48298,
3953 		0x482ac, 0x4833c,
3954 		0x483f8, 0x483fc,
3955 		0x49304, 0x493c4,
3956 		0x49400, 0x4941c,
3957 		0x49480, 0x494d0,
3958 		0x4c000, 0x4c078,
3959 		0x4c0c0, 0x4c278,
3960 		0x4c2c0, 0x4c478,
3961 		0x4c4c0, 0x4c678,
3962 		0x4c6c0, 0x4c878,
3963 		0x4c8c0, 0x4c9fc,
3964 		0x4d000, 0x4d068,
3965 		0x4d080, 0x4d084,
3966 		0x4d0a0, 0x4d0b0,
3967 		0x4d200, 0x4d268,
3968 		0x4d280, 0x4d284,
3969 		0x4d2a0, 0x4d2b0,
3970 		0x4e0c0, 0x4e0e4,
3971 		0x4f000, 0x4f08c,
3972 		0x4f200, 0x4f250,
3973 		0x4f400, 0x4f420,
3974 		0x4f600, 0x4f618,
3975 		0x4f800, 0x4f814,
3976 		0x50000, 0x500cc,
3977 		0x50400, 0x50400,
3978 		0x50800, 0x508cc,
3979 		0x50c00, 0x50c00,
3980 		0x51000, 0x5101c,
3981 		0x51300, 0x51308,
3982 	};
3983 
3984 	if (is_t4(sc)) {
3985 		reg_ranges = &t4_reg_ranges[0];
3986 		n = nitems(t4_reg_ranges);
3987 	} else {
3988 		reg_ranges = &t5_reg_ranges[0];
3989 		n = nitems(t5_reg_ranges);
3990 	}
3991 
3992 	regs->version = chip_id(sc) | chip_rev(sc) << 10;
3993 	for (i = 0; i < n; i += 2)
3994 		reg_block_dump(sc, buf, reg_ranges[i], reg_ranges[i + 1]);
3995 }
3996 
3997 static void
3998 cxgbe_tick(void *arg)
3999 {
4000 	struct port_info *pi = arg;
4001 	struct ifnet *ifp = pi->ifp;
4002 	struct sge_txq *txq;
4003 	int i, drops;
4004 	struct port_stats *s = &pi->stats;
4005 
4006 	PORT_LOCK(pi);
4007 	if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) {
4008 		PORT_UNLOCK(pi);
4009 		return;	/* without scheduling another callout */
4010 	}
4011 
4012 	t4_get_port_stats(pi->adapter, pi->tx_chan, s);
4013 
4014 	ifp->if_opackets = s->tx_frames - s->tx_pause;
4015 	ifp->if_ipackets = s->rx_frames - s->rx_pause;
4016 	ifp->if_obytes = s->tx_octets - s->tx_pause * 64;
4017 	ifp->if_ibytes = s->rx_octets - s->rx_pause * 64;
4018 	ifp->if_omcasts = s->tx_mcast_frames - s->tx_pause;
4019 	ifp->if_imcasts = s->rx_mcast_frames - s->rx_pause;
4020 	ifp->if_iqdrops = s->rx_ovflow0 + s->rx_ovflow1 + s->rx_ovflow2 +
4021 	    s->rx_ovflow3 + s->rx_trunc0 + s->rx_trunc1 + s->rx_trunc2 +
4022 	    s->rx_trunc3;
4023 
4024 	drops = s->tx_drop;
4025 	for_each_txq(pi, i, txq)
4026 		drops += txq->br->br_drops;
4027 	ifp->if_snd.ifq_drops = drops;
4028 
4029 	ifp->if_oerrors = s->tx_error_frames;
4030 	ifp->if_ierrors = s->rx_jabber + s->rx_runt + s->rx_too_long +
4031 	    s->rx_fcs_err + s->rx_len_err;
4032 
4033 	callout_schedule(&pi->tick, hz);
4034 	PORT_UNLOCK(pi);
4035 }
4036 
4037 static void
4038 cxgbe_vlan_config(void *arg, struct ifnet *ifp, uint16_t vid)
4039 {
4040 	struct ifnet *vlan;
4041 
4042 	if (arg != ifp || ifp->if_type != IFT_ETHER)
4043 		return;
4044 
4045 	vlan = VLAN_DEVAT(ifp, vid);
4046 	VLAN_SETCOOKIE(vlan, ifp);
4047 }
4048 
4049 static int
4050 cpl_not_handled(struct sge_iq *iq, const struct rss_header *rss, struct mbuf *m)
4051 {
4052 
4053 #ifdef INVARIANTS
4054 	panic("%s: opcode 0x%02x on iq %p with payload %p",
4055 	    __func__, rss->opcode, iq, m);
4056 #else
4057 	log(LOG_ERR, "%s: opcode 0x%02x on iq %p with payload %p\n",
4058 	    __func__, rss->opcode, iq, m);
4059 	m_freem(m);
4060 #endif
4061 	return (EDOOFUS);
4062 }
4063 
4064 int
4065 t4_register_cpl_handler(struct adapter *sc, int opcode, cpl_handler_t h)
4066 {
4067 	uintptr_t *loc, new;
4068 
4069 	if (opcode >= nitems(sc->cpl_handler))
4070 		return (EINVAL);
4071 
4072 	new = h ? (uintptr_t)h : (uintptr_t)cpl_not_handled;
4073 	loc = (uintptr_t *) &sc->cpl_handler[opcode];
4074 	atomic_store_rel_ptr(loc, new);
4075 
4076 	return (0);
4077 }
4078 
4079 static int
4080 an_not_handled(struct sge_iq *iq, const struct rsp_ctrl *ctrl)
4081 {
4082 
4083 #ifdef INVARIANTS
4084 	panic("%s: async notification on iq %p (ctrl %p)", __func__, iq, ctrl);
4085 #else
4086 	log(LOG_ERR, "%s: async notification on iq %p (ctrl %p)\n",
4087 	    __func__, iq, ctrl);
4088 #endif
4089 	return (EDOOFUS);
4090 }
4091 
4092 int
4093 t4_register_an_handler(struct adapter *sc, an_handler_t h)
4094 {
4095 	uintptr_t *loc, new;
4096 
4097 	new = h ? (uintptr_t)h : (uintptr_t)an_not_handled;
4098 	loc = (uintptr_t *) &sc->an_handler;
4099 	atomic_store_rel_ptr(loc, new);
4100 
4101 	return (0);
4102 }
4103 
4104 static int
4105 fw_msg_not_handled(struct adapter *sc, const __be64 *rpl)
4106 {
4107 	const struct cpl_fw6_msg *cpl =
4108 	    __containerof(rpl, struct cpl_fw6_msg, data[0]);
4109 
4110 #ifdef INVARIANTS
4111 	panic("%s: fw_msg type %d", __func__, cpl->type);
4112 #else
4113 	log(LOG_ERR, "%s: fw_msg type %d\n", __func__, cpl->type);
4114 #endif
4115 	return (EDOOFUS);
4116 }
4117 
4118 int
4119 t4_register_fw_msg_handler(struct adapter *sc, int type, fw_msg_handler_t h)
4120 {
4121 	uintptr_t *loc, new;
4122 
4123 	if (type >= nitems(sc->fw_msg_handler))
4124 		return (EINVAL);
4125 
4126 	/*
4127 	 * These are dispatched by the handler for FW{4|6}_CPL_MSG using the CPL
4128 	 * handler dispatch table.  Reject any attempt to install a handler for
4129 	 * this subtype.
4130 	 */
4131 	if (type == FW_TYPE_RSSCPL || type == FW6_TYPE_RSSCPL)
4132 		return (EINVAL);
4133 
4134 	new = h ? (uintptr_t)h : (uintptr_t)fw_msg_not_handled;
4135 	loc = (uintptr_t *) &sc->fw_msg_handler[type];
4136 	atomic_store_rel_ptr(loc, new);
4137 
4138 	return (0);
4139 }
4140 
4141 static int
4142 t4_sysctls(struct adapter *sc)
4143 {
4144 	struct sysctl_ctx_list *ctx;
4145 	struct sysctl_oid *oid;
4146 	struct sysctl_oid_list *children, *c0;
4147 	static char *caps[] = {
4148 		"\20\1PPP\2QFC\3DCBX",			/* caps[0] linkcaps */
4149 		"\20\1NIC\2VM\3IDS\4UM\5UM_ISGL",	/* caps[1] niccaps */
4150 		"\20\1TOE",				/* caps[2] toecaps */
4151 		"\20\1RDDP\2RDMAC",			/* caps[3] rdmacaps */
4152 		"\20\1INITIATOR_PDU\2TARGET_PDU"	/* caps[4] iscsicaps */
4153 		    "\3INITIATOR_CNXOFLD\4TARGET_CNXOFLD"
4154 		    "\5INITIATOR_SSNOFLD\6TARGET_SSNOFLD",
4155 		"\20\1INITIATOR\2TARGET\3CTRL_OFLD"	/* caps[5] fcoecaps */
4156 	};
4157 	static char *doorbells = {"\20\1UDB\2WCWR\3UDBWC\4KDB"};
4158 
4159 	ctx = device_get_sysctl_ctx(sc->dev);
4160 
4161 	/*
4162 	 * dev.t4nex.X.
4163 	 */
4164 	oid = device_get_sysctl_tree(sc->dev);
4165 	c0 = children = SYSCTL_CHILDREN(oid);
4166 
4167 	SYSCTL_ADD_INT(ctx, children, OID_AUTO, "nports", CTLFLAG_RD, NULL,
4168 	    sc->params.nports, "# of ports");
4169 
4170 	SYSCTL_ADD_INT(ctx, children, OID_AUTO, "hw_revision", CTLFLAG_RD,
4171 	    NULL, chip_rev(sc), "chip hardware revision");
4172 
4173 	SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "firmware_version",
4174 	    CTLFLAG_RD, &sc->fw_version, 0, "firmware version");
4175 
4176 	SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "cf",
4177 	    CTLFLAG_RD, &sc->cfg_file, 0, "configuration file");
4178 
4179 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "cfcsum", CTLFLAG_RD, NULL,
4180 	    sc->cfcsum, "config file checksum");
4181 
4182 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "doorbells",
4183 	    CTLTYPE_STRING | CTLFLAG_RD, doorbells, sc->doorbells,
4184 	    sysctl_bitfield, "A", "available doorbells");
4185 
4186 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "linkcaps",
4187 	    CTLTYPE_STRING | CTLFLAG_RD, caps[0], sc->linkcaps,
4188 	    sysctl_bitfield, "A", "available link capabilities");
4189 
4190 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "niccaps",
4191 	    CTLTYPE_STRING | CTLFLAG_RD, caps[1], sc->niccaps,
4192 	    sysctl_bitfield, "A", "available NIC capabilities");
4193 
4194 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "toecaps",
4195 	    CTLTYPE_STRING | CTLFLAG_RD, caps[2], sc->toecaps,
4196 	    sysctl_bitfield, "A", "available TCP offload capabilities");
4197 
4198 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "rdmacaps",
4199 	    CTLTYPE_STRING | CTLFLAG_RD, caps[3], sc->rdmacaps,
4200 	    sysctl_bitfield, "A", "available RDMA capabilities");
4201 
4202 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "iscsicaps",
4203 	    CTLTYPE_STRING | CTLFLAG_RD, caps[4], sc->iscsicaps,
4204 	    sysctl_bitfield, "A", "available iSCSI capabilities");
4205 
4206 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "fcoecaps",
4207 	    CTLTYPE_STRING | CTLFLAG_RD, caps[5], sc->fcoecaps,
4208 	    sysctl_bitfield, "A", "available FCoE capabilities");
4209 
4210 	SYSCTL_ADD_INT(ctx, children, OID_AUTO, "core_clock", CTLFLAG_RD, NULL,
4211 	    sc->params.vpd.cclk, "core clock frequency (in KHz)");
4212 
4213 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "holdoff_timers",
4214 	    CTLTYPE_STRING | CTLFLAG_RD, sc->sge.timer_val,
4215 	    sizeof(sc->sge.timer_val), sysctl_int_array, "A",
4216 	    "interrupt holdoff timer values (us)");
4217 
4218 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "holdoff_pkt_counts",
4219 	    CTLTYPE_STRING | CTLFLAG_RD, sc->sge.counter_val,
4220 	    sizeof(sc->sge.counter_val), sysctl_int_array, "A",
4221 	    "interrupt holdoff packet counter values");
4222 
4223 	SYSCTL_ADD_INT(ctx, children, OID_AUTO, "nfilters", CTLFLAG_RD,
4224 	    NULL, sc->tids.nftids, "number of filters");
4225 
4226 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "temperature", CTLTYPE_INT |
4227 	    CTLFLAG_RD, sc, 0, sysctl_temperature, "A",
4228 	    "chip temperature (in Celsius)");
4229 
4230 	t4_sge_sysctls(sc, ctx, children);
4231 
4232 	sc->lro_timeout = 100;
4233 	SYSCTL_ADD_INT(ctx, children, OID_AUTO, "lro_timeout", CTLFLAG_RW,
4234 	    &sc->lro_timeout, 0, "lro inactive-flush timeout (in us)");
4235 
4236 #ifdef SBUF_DRAIN
4237 	/*
4238 	 * dev.t4nex.X.misc.  Marked CTLFLAG_SKIP to avoid information overload.
4239 	 */
4240 	oid = SYSCTL_ADD_NODE(ctx, c0, OID_AUTO, "misc",
4241 	    CTLFLAG_RD | CTLFLAG_SKIP, NULL,
4242 	    "logs and miscellaneous information");
4243 	children = SYSCTL_CHILDREN(oid);
4244 
4245 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cctrl",
4246 	    CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
4247 	    sysctl_cctrl, "A", "congestion control");
4248 
4249 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_ibq_tp0",
4250 	    CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
4251 	    sysctl_cim_ibq_obq, "A", "CIM IBQ 0 (TP0)");
4252 
4253 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_ibq_tp1",
4254 	    CTLTYPE_STRING | CTLFLAG_RD, sc, 1,
4255 	    sysctl_cim_ibq_obq, "A", "CIM IBQ 1 (TP1)");
4256 
4257 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_ibq_ulp",
4258 	    CTLTYPE_STRING | CTLFLAG_RD, sc, 2,
4259 	    sysctl_cim_ibq_obq, "A", "CIM IBQ 2 (ULP)");
4260 
4261 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_ibq_sge0",
4262 	    CTLTYPE_STRING | CTLFLAG_RD, sc, 3,
4263 	    sysctl_cim_ibq_obq, "A", "CIM IBQ 3 (SGE0)");
4264 
4265 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_ibq_sge1",
4266 	    CTLTYPE_STRING | CTLFLAG_RD, sc, 4,
4267 	    sysctl_cim_ibq_obq, "A", "CIM IBQ 4 (SGE1)");
4268 
4269 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_ibq_ncsi",
4270 	    CTLTYPE_STRING | CTLFLAG_RD, sc, 5,
4271 	    sysctl_cim_ibq_obq, "A", "CIM IBQ 5 (NCSI)");
4272 
4273 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_la",
4274 	    CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
4275 	    sysctl_cim_la, "A", "CIM logic analyzer");
4276 
4277 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_ma_la",
4278 	    CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
4279 	    sysctl_cim_ma_la, "A", "CIM MA logic analyzer");
4280 
4281 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_obq_ulp0",
4282 	    CTLTYPE_STRING | CTLFLAG_RD, sc, 0 + CIM_NUM_IBQ,
4283 	    sysctl_cim_ibq_obq, "A", "CIM OBQ 0 (ULP0)");
4284 
4285 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_obq_ulp1",
4286 	    CTLTYPE_STRING | CTLFLAG_RD, sc, 1 + CIM_NUM_IBQ,
4287 	    sysctl_cim_ibq_obq, "A", "CIM OBQ 1 (ULP1)");
4288 
4289 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_obq_ulp2",
4290 	    CTLTYPE_STRING | CTLFLAG_RD, sc, 2 + CIM_NUM_IBQ,
4291 	    sysctl_cim_ibq_obq, "A", "CIM OBQ 2 (ULP2)");
4292 
4293 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_obq_ulp3",
4294 	    CTLTYPE_STRING | CTLFLAG_RD, sc, 3 + CIM_NUM_IBQ,
4295 	    sysctl_cim_ibq_obq, "A", "CIM OBQ 3 (ULP3)");
4296 
4297 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_obq_sge",
4298 	    CTLTYPE_STRING | CTLFLAG_RD, sc, 4 + CIM_NUM_IBQ,
4299 	    sysctl_cim_ibq_obq, "A", "CIM OBQ 4 (SGE)");
4300 
4301 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_obq_ncsi",
4302 	    CTLTYPE_STRING | CTLFLAG_RD, sc, 5 + CIM_NUM_IBQ,
4303 	    sysctl_cim_ibq_obq, "A", "CIM OBQ 5 (NCSI)");
4304 
4305 	if (is_t5(sc)) {
4306 		SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_obq_sge0_rx",
4307 		    CTLTYPE_STRING | CTLFLAG_RD, sc, 6 + CIM_NUM_IBQ,
4308 		    sysctl_cim_ibq_obq, "A", "CIM OBQ 6 (SGE0-RX)");
4309 
4310 		SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_obq_sge1_rx",
4311 		    CTLTYPE_STRING | CTLFLAG_RD, sc, 7 + CIM_NUM_IBQ,
4312 		    sysctl_cim_ibq_obq, "A", "CIM OBQ 7 (SGE1-RX)");
4313 	}
4314 
4315 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_pif_la",
4316 	    CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
4317 	    sysctl_cim_pif_la, "A", "CIM PIF logic analyzer");
4318 
4319 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_qcfg",
4320 	    CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
4321 	    sysctl_cim_qcfg, "A", "CIM queue configuration");
4322 
4323 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cpl_stats",
4324 	    CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
4325 	    sysctl_cpl_stats, "A", "CPL statistics");
4326 
4327 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "ddp_stats",
4328 	    CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
4329 	    sysctl_ddp_stats, "A", "DDP statistics");
4330 
4331 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "devlog",
4332 	    CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
4333 	    sysctl_devlog, "A", "firmware's device log");
4334 
4335 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "fcoe_stats",
4336 	    CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
4337 	    sysctl_fcoe_stats, "A", "FCoE statistics");
4338 
4339 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "hw_sched",
4340 	    CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
4341 	    sysctl_hw_sched, "A", "hardware scheduler ");
4342 
4343 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "l2t",
4344 	    CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
4345 	    sysctl_l2t, "A", "hardware L2 table");
4346 
4347 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "lb_stats",
4348 	    CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
4349 	    sysctl_lb_stats, "A", "loopback statistics");
4350 
4351 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "meminfo",
4352 	    CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
4353 	    sysctl_meminfo, "A", "memory regions");
4354 
4355 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "mps_tcam",
4356 	    CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
4357 	    sysctl_mps_tcam, "A", "MPS TCAM entries");
4358 
4359 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "path_mtus",
4360 	    CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
4361 	    sysctl_path_mtus, "A", "path MTUs");
4362 
4363 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "pm_stats",
4364 	    CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
4365 	    sysctl_pm_stats, "A", "PM statistics");
4366 
4367 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "rdma_stats",
4368 	    CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
4369 	    sysctl_rdma_stats, "A", "RDMA statistics");
4370 
4371 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "tcp_stats",
4372 	    CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
4373 	    sysctl_tcp_stats, "A", "TCP statistics");
4374 
4375 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "tids",
4376 	    CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
4377 	    sysctl_tids, "A", "TID information");
4378 
4379 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "tp_err_stats",
4380 	    CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
4381 	    sysctl_tp_err_stats, "A", "TP error statistics");
4382 
4383 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "tp_la",
4384 	    CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
4385 	    sysctl_tp_la, "A", "TP logic analyzer");
4386 
4387 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "tx_rate",
4388 	    CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
4389 	    sysctl_tx_rate, "A", "Tx rate");
4390 
4391 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "ulprx_la",
4392 	    CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
4393 	    sysctl_ulprx_la, "A", "ULPRX logic analyzer");
4394 
4395 	if (is_t5(sc)) {
4396 		SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "wcwr_stats",
4397 		    CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
4398 		    sysctl_wcwr_stats, "A", "write combined work requests");
4399 	}
4400 #endif
4401 
4402 #ifdef TCP_OFFLOAD
4403 	if (is_offload(sc)) {
4404 		/*
4405 		 * dev.t4nex.X.toe.
4406 		 */
4407 		oid = SYSCTL_ADD_NODE(ctx, c0, OID_AUTO, "toe", CTLFLAG_RD,
4408 		    NULL, "TOE parameters");
4409 		children = SYSCTL_CHILDREN(oid);
4410 
4411 		sc->tt.sndbuf = 256 * 1024;
4412 		SYSCTL_ADD_INT(ctx, children, OID_AUTO, "sndbuf", CTLFLAG_RW,
4413 		    &sc->tt.sndbuf, 0, "max hardware send buffer size");
4414 
4415 		sc->tt.ddp = 0;
4416 		SYSCTL_ADD_INT(ctx, children, OID_AUTO, "ddp", CTLFLAG_RW,
4417 		    &sc->tt.ddp, 0, "DDP allowed");
4418 
4419 		sc->tt.indsz = G_INDICATESIZE(t4_read_reg(sc, A_TP_PARA_REG5));
4420 		SYSCTL_ADD_INT(ctx, children, OID_AUTO, "indsz", CTLFLAG_RW,
4421 		    &sc->tt.indsz, 0, "DDP max indicate size allowed");
4422 
4423 		sc->tt.ddp_thres =
4424 		    G_RXCOALESCESIZE(t4_read_reg(sc, A_TP_PARA_REG2));
4425 		SYSCTL_ADD_INT(ctx, children, OID_AUTO, "ddp_thres", CTLFLAG_RW,
4426 		    &sc->tt.ddp_thres, 0, "DDP threshold");
4427 
4428 		sc->tt.rx_coalesce = 1;
4429 		SYSCTL_ADD_INT(ctx, children, OID_AUTO, "rx_coalesce",
4430 		    CTLFLAG_RW, &sc->tt.rx_coalesce, 0, "receive coalescing");
4431 	}
4432 #endif
4433 
4434 
4435 	return (0);
4436 }
4437 
4438 static int
4439 cxgbe_sysctls(struct port_info *pi)
4440 {
4441 	struct sysctl_ctx_list *ctx;
4442 	struct sysctl_oid *oid;
4443 	struct sysctl_oid_list *children;
4444 
4445 	ctx = device_get_sysctl_ctx(pi->dev);
4446 
4447 	/*
4448 	 * dev.cxgbe.X.
4449 	 */
4450 	oid = device_get_sysctl_tree(pi->dev);
4451 	children = SYSCTL_CHILDREN(oid);
4452 
4453 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "linkdnrc", CTLTYPE_STRING |
4454 	   CTLFLAG_RD, pi, 0, sysctl_linkdnrc, "A", "reason why link is down");
4455 	if (pi->port_type == FW_PORT_TYPE_BT_XAUI) {
4456 		SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "temperature",
4457 		    CTLTYPE_INT | CTLFLAG_RD, pi, 0, sysctl_btphy, "I",
4458 		    "PHY temperature (in Celsius)");
4459 		SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "fw_version",
4460 		    CTLTYPE_INT | CTLFLAG_RD, pi, 1, sysctl_btphy, "I",
4461 		    "PHY firmware version");
4462 	}
4463 	SYSCTL_ADD_INT(ctx, children, OID_AUTO, "nrxq", CTLFLAG_RD,
4464 	    &pi->nrxq, 0, "# of rx queues");
4465 	SYSCTL_ADD_INT(ctx, children, OID_AUTO, "ntxq", CTLFLAG_RD,
4466 	    &pi->ntxq, 0, "# of tx queues");
4467 	SYSCTL_ADD_INT(ctx, children, OID_AUTO, "first_rxq", CTLFLAG_RD,
4468 	    &pi->first_rxq, 0, "index of first rx queue");
4469 	SYSCTL_ADD_INT(ctx, children, OID_AUTO, "first_txq", CTLFLAG_RD,
4470 	    &pi->first_txq, 0, "index of first tx queue");
4471 
4472 #ifdef TCP_OFFLOAD
4473 	if (is_offload(pi->adapter)) {
4474 		SYSCTL_ADD_INT(ctx, children, OID_AUTO, "nofldrxq", CTLFLAG_RD,
4475 		    &pi->nofldrxq, 0,
4476 		    "# of rx queues for offloaded TCP connections");
4477 		SYSCTL_ADD_INT(ctx, children, OID_AUTO, "nofldtxq", CTLFLAG_RD,
4478 		    &pi->nofldtxq, 0,
4479 		    "# of tx queues for offloaded TCP connections");
4480 		SYSCTL_ADD_INT(ctx, children, OID_AUTO, "first_ofld_rxq",
4481 		    CTLFLAG_RD, &pi->first_ofld_rxq, 0,
4482 		    "index of first TOE rx queue");
4483 		SYSCTL_ADD_INT(ctx, children, OID_AUTO, "first_ofld_txq",
4484 		    CTLFLAG_RD, &pi->first_ofld_txq, 0,
4485 		    "index of first TOE tx queue");
4486 	}
4487 #endif
4488 
4489 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "holdoff_tmr_idx",
4490 	    CTLTYPE_INT | CTLFLAG_RW, pi, 0, sysctl_holdoff_tmr_idx, "I",
4491 	    "holdoff timer index");
4492 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "holdoff_pktc_idx",
4493 	    CTLTYPE_INT | CTLFLAG_RW, pi, 0, sysctl_holdoff_pktc_idx, "I",
4494 	    "holdoff packet counter index");
4495 
4496 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "qsize_rxq",
4497 	    CTLTYPE_INT | CTLFLAG_RW, pi, 0, sysctl_qsize_rxq, "I",
4498 	    "rx queue size");
4499 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "qsize_txq",
4500 	    CTLTYPE_INT | CTLFLAG_RW, pi, 0, sysctl_qsize_txq, "I",
4501 	    "tx queue size");
4502 
4503 	/*
4504 	 * dev.cxgbe.X.stats.
4505 	 */
4506 	oid = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, "stats", CTLFLAG_RD,
4507 	    NULL, "port statistics");
4508 	children = SYSCTL_CHILDREN(oid);
4509 
4510 #define SYSCTL_ADD_T4_REG64(pi, name, desc, reg) \
4511 	SYSCTL_ADD_OID(ctx, children, OID_AUTO, name, \
4512 	    CTLTYPE_U64 | CTLFLAG_RD, pi->adapter, reg, \
4513 	    sysctl_handle_t4_reg64, "QU", desc)
4514 
4515 	SYSCTL_ADD_T4_REG64(pi, "tx_octets", "# of octets in good frames",
4516 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_BYTES_L));
4517 	SYSCTL_ADD_T4_REG64(pi, "tx_frames", "total # of good frames",
4518 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_FRAMES_L));
4519 	SYSCTL_ADD_T4_REG64(pi, "tx_bcast_frames", "# of broadcast frames",
4520 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_BCAST_L));
4521 	SYSCTL_ADD_T4_REG64(pi, "tx_mcast_frames", "# of multicast frames",
4522 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_MCAST_L));
4523 	SYSCTL_ADD_T4_REG64(pi, "tx_ucast_frames", "# of unicast frames",
4524 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_UCAST_L));
4525 	SYSCTL_ADD_T4_REG64(pi, "tx_error_frames", "# of error frames",
4526 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_ERROR_L));
4527 	SYSCTL_ADD_T4_REG64(pi, "tx_frames_64",
4528 	    "# of tx frames in this range",
4529 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_64B_L));
4530 	SYSCTL_ADD_T4_REG64(pi, "tx_frames_65_127",
4531 	    "# of tx frames in this range",
4532 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_65B_127B_L));
4533 	SYSCTL_ADD_T4_REG64(pi, "tx_frames_128_255",
4534 	    "# of tx frames in this range",
4535 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_128B_255B_L));
4536 	SYSCTL_ADD_T4_REG64(pi, "tx_frames_256_511",
4537 	    "# of tx frames in this range",
4538 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_256B_511B_L));
4539 	SYSCTL_ADD_T4_REG64(pi, "tx_frames_512_1023",
4540 	    "# of tx frames in this range",
4541 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_512B_1023B_L));
4542 	SYSCTL_ADD_T4_REG64(pi, "tx_frames_1024_1518",
4543 	    "# of tx frames in this range",
4544 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_1024B_1518B_L));
4545 	SYSCTL_ADD_T4_REG64(pi, "tx_frames_1519_max",
4546 	    "# of tx frames in this range",
4547 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_1519B_MAX_L));
4548 	SYSCTL_ADD_T4_REG64(pi, "tx_drop", "# of dropped tx frames",
4549 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_DROP_L));
4550 	SYSCTL_ADD_T4_REG64(pi, "tx_pause", "# of pause frames transmitted",
4551 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_PAUSE_L));
4552 	SYSCTL_ADD_T4_REG64(pi, "tx_ppp0", "# of PPP prio 0 frames transmitted",
4553 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_PPP0_L));
4554 	SYSCTL_ADD_T4_REG64(pi, "tx_ppp1", "# of PPP prio 1 frames transmitted",
4555 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_PPP1_L));
4556 	SYSCTL_ADD_T4_REG64(pi, "tx_ppp2", "# of PPP prio 2 frames transmitted",
4557 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_PPP2_L));
4558 	SYSCTL_ADD_T4_REG64(pi, "tx_ppp3", "# of PPP prio 3 frames transmitted",
4559 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_PPP3_L));
4560 	SYSCTL_ADD_T4_REG64(pi, "tx_ppp4", "# of PPP prio 4 frames transmitted",
4561 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_PPP4_L));
4562 	SYSCTL_ADD_T4_REG64(pi, "tx_ppp5", "# of PPP prio 5 frames transmitted",
4563 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_PPP5_L));
4564 	SYSCTL_ADD_T4_REG64(pi, "tx_ppp6", "# of PPP prio 6 frames transmitted",
4565 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_PPP6_L));
4566 	SYSCTL_ADD_T4_REG64(pi, "tx_ppp7", "# of PPP prio 7 frames transmitted",
4567 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_PPP7_L));
4568 
4569 	SYSCTL_ADD_T4_REG64(pi, "rx_octets", "# of octets in good frames",
4570 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_BYTES_L));
4571 	SYSCTL_ADD_T4_REG64(pi, "rx_frames", "total # of good frames",
4572 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_FRAMES_L));
4573 	SYSCTL_ADD_T4_REG64(pi, "rx_bcast_frames", "# of broadcast frames",
4574 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_BCAST_L));
4575 	SYSCTL_ADD_T4_REG64(pi, "rx_mcast_frames", "# of multicast frames",
4576 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_MCAST_L));
4577 	SYSCTL_ADD_T4_REG64(pi, "rx_ucast_frames", "# of unicast frames",
4578 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_UCAST_L));
4579 	SYSCTL_ADD_T4_REG64(pi, "rx_too_long", "# of frames exceeding MTU",
4580 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_MTU_ERROR_L));
4581 	SYSCTL_ADD_T4_REG64(pi, "rx_jabber", "# of jabber frames",
4582 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_MTU_CRC_ERROR_L));
4583 	SYSCTL_ADD_T4_REG64(pi, "rx_fcs_err",
4584 	    "# of frames received with bad FCS",
4585 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_CRC_ERROR_L));
4586 	SYSCTL_ADD_T4_REG64(pi, "rx_len_err",
4587 	    "# of frames received with length error",
4588 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_LEN_ERROR_L));
4589 	SYSCTL_ADD_T4_REG64(pi, "rx_symbol_err", "symbol errors",
4590 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_SYM_ERROR_L));
4591 	SYSCTL_ADD_T4_REG64(pi, "rx_runt", "# of short frames received",
4592 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_LESS_64B_L));
4593 	SYSCTL_ADD_T4_REG64(pi, "rx_frames_64",
4594 	    "# of rx frames in this range",
4595 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_64B_L));
4596 	SYSCTL_ADD_T4_REG64(pi, "rx_frames_65_127",
4597 	    "# of rx frames in this range",
4598 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_65B_127B_L));
4599 	SYSCTL_ADD_T4_REG64(pi, "rx_frames_128_255",
4600 	    "# of rx frames in this range",
4601 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_128B_255B_L));
4602 	SYSCTL_ADD_T4_REG64(pi, "rx_frames_256_511",
4603 	    "# of rx frames in this range",
4604 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_256B_511B_L));
4605 	SYSCTL_ADD_T4_REG64(pi, "rx_frames_512_1023",
4606 	    "# of rx frames in this range",
4607 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_512B_1023B_L));
4608 	SYSCTL_ADD_T4_REG64(pi, "rx_frames_1024_1518",
4609 	    "# of rx frames in this range",
4610 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_1024B_1518B_L));
4611 	SYSCTL_ADD_T4_REG64(pi, "rx_frames_1519_max",
4612 	    "# of rx frames in this range",
4613 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_1519B_MAX_L));
4614 	SYSCTL_ADD_T4_REG64(pi, "rx_pause", "# of pause frames received",
4615 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_PAUSE_L));
4616 	SYSCTL_ADD_T4_REG64(pi, "rx_ppp0", "# of PPP prio 0 frames received",
4617 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_PPP0_L));
4618 	SYSCTL_ADD_T4_REG64(pi, "rx_ppp1", "# of PPP prio 1 frames received",
4619 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_PPP1_L));
4620 	SYSCTL_ADD_T4_REG64(pi, "rx_ppp2", "# of PPP prio 2 frames received",
4621 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_PPP2_L));
4622 	SYSCTL_ADD_T4_REG64(pi, "rx_ppp3", "# of PPP prio 3 frames received",
4623 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_PPP3_L));
4624 	SYSCTL_ADD_T4_REG64(pi, "rx_ppp4", "# of PPP prio 4 frames received",
4625 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_PPP4_L));
4626 	SYSCTL_ADD_T4_REG64(pi, "rx_ppp5", "# of PPP prio 5 frames received",
4627 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_PPP5_L));
4628 	SYSCTL_ADD_T4_REG64(pi, "rx_ppp6", "# of PPP prio 6 frames received",
4629 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_PPP6_L));
4630 	SYSCTL_ADD_T4_REG64(pi, "rx_ppp7", "# of PPP prio 7 frames received",
4631 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_PPP7_L));
4632 
4633 #undef SYSCTL_ADD_T4_REG64
4634 
4635 #define SYSCTL_ADD_T4_PORTSTAT(name, desc) \
4636 	SYSCTL_ADD_UQUAD(ctx, children, OID_AUTO, #name, CTLFLAG_RD, \
4637 	    &pi->stats.name, desc)
4638 
4639 	/* We get these from port_stats and they may be stale by upto 1s */
4640 	SYSCTL_ADD_T4_PORTSTAT(rx_ovflow0,
4641 	    "# drops due to buffer-group 0 overflows");
4642 	SYSCTL_ADD_T4_PORTSTAT(rx_ovflow1,
4643 	    "# drops due to buffer-group 1 overflows");
4644 	SYSCTL_ADD_T4_PORTSTAT(rx_ovflow2,
4645 	    "# drops due to buffer-group 2 overflows");
4646 	SYSCTL_ADD_T4_PORTSTAT(rx_ovflow3,
4647 	    "# drops due to buffer-group 3 overflows");
4648 	SYSCTL_ADD_T4_PORTSTAT(rx_trunc0,
4649 	    "# of buffer-group 0 truncated packets");
4650 	SYSCTL_ADD_T4_PORTSTAT(rx_trunc1,
4651 	    "# of buffer-group 1 truncated packets");
4652 	SYSCTL_ADD_T4_PORTSTAT(rx_trunc2,
4653 	    "# of buffer-group 2 truncated packets");
4654 	SYSCTL_ADD_T4_PORTSTAT(rx_trunc3,
4655 	    "# of buffer-group 3 truncated packets");
4656 
4657 #undef SYSCTL_ADD_T4_PORTSTAT
4658 
4659 	return (0);
4660 }
4661 
4662 static int
4663 sysctl_int_array(SYSCTL_HANDLER_ARGS)
4664 {
4665 	int rc, *i;
4666 	struct sbuf sb;
4667 
4668 	sbuf_new(&sb, NULL, 32, SBUF_AUTOEXTEND);
4669 	for (i = arg1; arg2; arg2 -= sizeof(int), i++)
4670 		sbuf_printf(&sb, "%d ", *i);
4671 	sbuf_trim(&sb);
4672 	sbuf_finish(&sb);
4673 	rc = sysctl_handle_string(oidp, sbuf_data(&sb), sbuf_len(&sb), req);
4674 	sbuf_delete(&sb);
4675 	return (rc);
4676 }
4677 
4678 static int
4679 sysctl_bitfield(SYSCTL_HANDLER_ARGS)
4680 {
4681 	int rc;
4682 	struct sbuf *sb;
4683 
4684 	rc = sysctl_wire_old_buffer(req, 0);
4685 	if (rc != 0)
4686 		return(rc);
4687 
4688 	sb = sbuf_new_for_sysctl(NULL, NULL, 128, req);
4689 	if (sb == NULL)
4690 		return (ENOMEM);
4691 
4692 	sbuf_printf(sb, "%b", (int)arg2, (char *)arg1);
4693 	rc = sbuf_finish(sb);
4694 	sbuf_delete(sb);
4695 
4696 	return (rc);
4697 }
4698 
4699 static int
4700 sysctl_btphy(SYSCTL_HANDLER_ARGS)
4701 {
4702 	struct port_info *pi = arg1;
4703 	int op = arg2;
4704 	struct adapter *sc = pi->adapter;
4705 	u_int v;
4706 	int rc;
4707 
4708 	rc = begin_synchronized_op(sc, pi, SLEEP_OK | INTR_OK, "t4btt");
4709 	if (rc)
4710 		return (rc);
4711 	/* XXX: magic numbers */
4712 	rc = -t4_mdio_rd(sc, sc->mbox, pi->mdio_addr, 0x1e, op ? 0x20 : 0xc820,
4713 	    &v);
4714 	end_synchronized_op(sc, 0);
4715 	if (rc)
4716 		return (rc);
4717 	if (op == 0)
4718 		v /= 256;
4719 
4720 	rc = sysctl_handle_int(oidp, &v, 0, req);
4721 	return (rc);
4722 }
4723 
4724 static int
4725 sysctl_holdoff_tmr_idx(SYSCTL_HANDLER_ARGS)
4726 {
4727 	struct port_info *pi = arg1;
4728 	struct adapter *sc = pi->adapter;
4729 	int idx, rc, i;
4730 	struct sge_rxq *rxq;
4731 #ifdef TCP_OFFLOAD
4732 	struct sge_ofld_rxq *ofld_rxq;
4733 #endif
4734 	uint8_t v;
4735 
4736 	idx = pi->tmr_idx;
4737 
4738 	rc = sysctl_handle_int(oidp, &idx, 0, req);
4739 	if (rc != 0 || req->newptr == NULL)
4740 		return (rc);
4741 
4742 	if (idx < 0 || idx >= SGE_NTIMERS)
4743 		return (EINVAL);
4744 
4745 	rc = begin_synchronized_op(sc, pi, HOLD_LOCK | SLEEP_OK | INTR_OK,
4746 	    "t4tmr");
4747 	if (rc)
4748 		return (rc);
4749 
4750 	v = V_QINTR_TIMER_IDX(idx) | V_QINTR_CNT_EN(pi->pktc_idx != -1);
4751 	for_each_rxq(pi, i, rxq) {
4752 #ifdef atomic_store_rel_8
4753 		atomic_store_rel_8(&rxq->iq.intr_params, v);
4754 #else
4755 		rxq->iq.intr_params = v;
4756 #endif
4757 	}
4758 #ifdef TCP_OFFLOAD
4759 	for_each_ofld_rxq(pi, i, ofld_rxq) {
4760 #ifdef atomic_store_rel_8
4761 		atomic_store_rel_8(&ofld_rxq->iq.intr_params, v);
4762 #else
4763 		ofld_rxq->iq.intr_params = v;
4764 #endif
4765 	}
4766 #endif
4767 	pi->tmr_idx = idx;
4768 
4769 	end_synchronized_op(sc, LOCK_HELD);
4770 	return (0);
4771 }
4772 
4773 static int
4774 sysctl_holdoff_pktc_idx(SYSCTL_HANDLER_ARGS)
4775 {
4776 	struct port_info *pi = arg1;
4777 	struct adapter *sc = pi->adapter;
4778 	int idx, rc;
4779 
4780 	idx = pi->pktc_idx;
4781 
4782 	rc = sysctl_handle_int(oidp, &idx, 0, req);
4783 	if (rc != 0 || req->newptr == NULL)
4784 		return (rc);
4785 
4786 	if (idx < -1 || idx >= SGE_NCOUNTERS)
4787 		return (EINVAL);
4788 
4789 	rc = begin_synchronized_op(sc, pi, HOLD_LOCK | SLEEP_OK | INTR_OK,
4790 	    "t4pktc");
4791 	if (rc)
4792 		return (rc);
4793 
4794 	if (pi->flags & PORT_INIT_DONE)
4795 		rc = EBUSY; /* cannot be changed once the queues are created */
4796 	else
4797 		pi->pktc_idx = idx;
4798 
4799 	end_synchronized_op(sc, LOCK_HELD);
4800 	return (rc);
4801 }
4802 
4803 static int
4804 sysctl_qsize_rxq(SYSCTL_HANDLER_ARGS)
4805 {
4806 	struct port_info *pi = arg1;
4807 	struct adapter *sc = pi->adapter;
4808 	int qsize, rc;
4809 
4810 	qsize = pi->qsize_rxq;
4811 
4812 	rc = sysctl_handle_int(oidp, &qsize, 0, req);
4813 	if (rc != 0 || req->newptr == NULL)
4814 		return (rc);
4815 
4816 	if (qsize < 128 || (qsize & 7))
4817 		return (EINVAL);
4818 
4819 	rc = begin_synchronized_op(sc, pi, HOLD_LOCK | SLEEP_OK | INTR_OK,
4820 	    "t4rxqs");
4821 	if (rc)
4822 		return (rc);
4823 
4824 	if (pi->flags & PORT_INIT_DONE)
4825 		rc = EBUSY; /* cannot be changed once the queues are created */
4826 	else
4827 		pi->qsize_rxq = qsize;
4828 
4829 	end_synchronized_op(sc, LOCK_HELD);
4830 	return (rc);
4831 }
4832 
4833 static int
4834 sysctl_qsize_txq(SYSCTL_HANDLER_ARGS)
4835 {
4836 	struct port_info *pi = arg1;
4837 	struct adapter *sc = pi->adapter;
4838 	int qsize, rc;
4839 
4840 	qsize = pi->qsize_txq;
4841 
4842 	rc = sysctl_handle_int(oidp, &qsize, 0, req);
4843 	if (rc != 0 || req->newptr == NULL)
4844 		return (rc);
4845 
4846 	/* bufring size must be powerof2 */
4847 	if (qsize < 128 || !powerof2(qsize))
4848 		return (EINVAL);
4849 
4850 	rc = begin_synchronized_op(sc, pi, HOLD_LOCK | SLEEP_OK | INTR_OK,
4851 	    "t4txqs");
4852 	if (rc)
4853 		return (rc);
4854 
4855 	if (pi->flags & PORT_INIT_DONE)
4856 		rc = EBUSY; /* cannot be changed once the queues are created */
4857 	else
4858 		pi->qsize_txq = qsize;
4859 
4860 	end_synchronized_op(sc, LOCK_HELD);
4861 	return (rc);
4862 }
4863 
4864 static int
4865 sysctl_handle_t4_reg64(SYSCTL_HANDLER_ARGS)
4866 {
4867 	struct adapter *sc = arg1;
4868 	int reg = arg2;
4869 	uint64_t val;
4870 
4871 	val = t4_read_reg64(sc, reg);
4872 
4873 	return (sysctl_handle_64(oidp, &val, 0, req));
4874 }
4875 
4876 static int
4877 sysctl_temperature(SYSCTL_HANDLER_ARGS)
4878 {
4879 	struct adapter *sc = arg1;
4880 	int rc, t;
4881 	uint32_t param, val;
4882 
4883 	rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4temp");
4884 	if (rc)
4885 		return (rc);
4886 	param = V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) |
4887 	    V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_DIAG) |
4888 	    V_FW_PARAMS_PARAM_Y(FW_PARAM_DEV_DIAG_TMP);
4889 	rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 1, &param, &val);
4890 	end_synchronized_op(sc, 0);
4891 	if (rc)
4892 		return (rc);
4893 
4894 	/* unknown is returned as 0 but we display -1 in that case */
4895 	t = val == 0 ? -1 : val;
4896 
4897 	rc = sysctl_handle_int(oidp, &t, 0, req);
4898 	return (rc);
4899 }
4900 
4901 #ifdef SBUF_DRAIN
4902 static int
4903 sysctl_cctrl(SYSCTL_HANDLER_ARGS)
4904 {
4905 	struct adapter *sc = arg1;
4906 	struct sbuf *sb;
4907 	int rc, i;
4908 	uint16_t incr[NMTUS][NCCTRL_WIN];
4909 	static const char *dec_fac[] = {
4910 		"0.5", "0.5625", "0.625", "0.6875", "0.75", "0.8125", "0.875",
4911 		"0.9375"
4912 	};
4913 
4914 	rc = sysctl_wire_old_buffer(req, 0);
4915 	if (rc != 0)
4916 		return (rc);
4917 
4918 	sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req);
4919 	if (sb == NULL)
4920 		return (ENOMEM);
4921 
4922 	t4_read_cong_tbl(sc, incr);
4923 
4924 	for (i = 0; i < NCCTRL_WIN; ++i) {
4925 		sbuf_printf(sb, "%2d: %4u %4u %4u %4u %4u %4u %4u %4u\n", i,
4926 		    incr[0][i], incr[1][i], incr[2][i], incr[3][i], incr[4][i],
4927 		    incr[5][i], incr[6][i], incr[7][i]);
4928 		sbuf_printf(sb, "%8u %4u %4u %4u %4u %4u %4u %4u %5u %s\n",
4929 		    incr[8][i], incr[9][i], incr[10][i], incr[11][i],
4930 		    incr[12][i], incr[13][i], incr[14][i], incr[15][i],
4931 		    sc->params.a_wnd[i], dec_fac[sc->params.b_wnd[i]]);
4932 	}
4933 
4934 	rc = sbuf_finish(sb);
4935 	sbuf_delete(sb);
4936 
4937 	return (rc);
4938 }
4939 
4940 static const char *qname[CIM_NUM_IBQ + CIM_NUM_OBQ_T5] = {
4941 	"TP0", "TP1", "ULP", "SGE0", "SGE1", "NC-SI",	/* ibq's */
4942 	"ULP0", "ULP1", "ULP2", "ULP3", "SGE", "NC-SI",	/* obq's */
4943 	"SGE0-RX", "SGE1-RX"	/* additional obq's (T5 onwards) */
4944 };
4945 
4946 static int
4947 sysctl_cim_ibq_obq(SYSCTL_HANDLER_ARGS)
4948 {
4949 	struct adapter *sc = arg1;
4950 	struct sbuf *sb;
4951 	int rc, i, n, qid = arg2;
4952 	uint32_t *buf, *p;
4953 	char *qtype;
4954 	u_int cim_num_obq = is_t4(sc) ? CIM_NUM_OBQ : CIM_NUM_OBQ_T5;
4955 
4956 	KASSERT(qid >= 0 && qid < CIM_NUM_IBQ + cim_num_obq,
4957 	    ("%s: bad qid %d\n", __func__, qid));
4958 
4959 	if (qid < CIM_NUM_IBQ) {
4960 		/* inbound queue */
4961 		qtype = "IBQ";
4962 		n = 4 * CIM_IBQ_SIZE;
4963 		buf = malloc(n * sizeof(uint32_t), M_CXGBE, M_ZERO | M_WAITOK);
4964 		rc = t4_read_cim_ibq(sc, qid, buf, n);
4965 	} else {
4966 		/* outbound queue */
4967 		qtype = "OBQ";
4968 		qid -= CIM_NUM_IBQ;
4969 		n = 4 * cim_num_obq * CIM_OBQ_SIZE;
4970 		buf = malloc(n * sizeof(uint32_t), M_CXGBE, M_ZERO | M_WAITOK);
4971 		rc = t4_read_cim_obq(sc, qid, buf, n);
4972 	}
4973 
4974 	if (rc < 0) {
4975 		rc = -rc;
4976 		goto done;
4977 	}
4978 	n = rc * sizeof(uint32_t);	/* rc has # of words actually read */
4979 
4980 	rc = sysctl_wire_old_buffer(req, 0);
4981 	if (rc != 0)
4982 		goto done;
4983 
4984 	sb = sbuf_new_for_sysctl(NULL, NULL, PAGE_SIZE, req);
4985 	if (sb == NULL) {
4986 		rc = ENOMEM;
4987 		goto done;
4988 	}
4989 
4990 	sbuf_printf(sb, "%s%d %s", qtype , qid, qname[arg2]);
4991 	for (i = 0, p = buf; i < n; i += 16, p += 4)
4992 		sbuf_printf(sb, "\n%#06x: %08x %08x %08x %08x", i, p[0], p[1],
4993 		    p[2], p[3]);
4994 
4995 	rc = sbuf_finish(sb);
4996 	sbuf_delete(sb);
4997 done:
4998 	free(buf, M_CXGBE);
4999 	return (rc);
5000 }
5001 
5002 static int
5003 sysctl_cim_la(SYSCTL_HANDLER_ARGS)
5004 {
5005 	struct adapter *sc = arg1;
5006 	u_int cfg;
5007 	struct sbuf *sb;
5008 	uint32_t *buf, *p;
5009 	int rc;
5010 
5011 	rc = -t4_cim_read(sc, A_UP_UP_DBG_LA_CFG, 1, &cfg);
5012 	if (rc != 0)
5013 		return (rc);
5014 
5015 	rc = sysctl_wire_old_buffer(req, 0);
5016 	if (rc != 0)
5017 		return (rc);
5018 
5019 	sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req);
5020 	if (sb == NULL)
5021 		return (ENOMEM);
5022 
5023 	buf = malloc(sc->params.cim_la_size * sizeof(uint32_t), M_CXGBE,
5024 	    M_ZERO | M_WAITOK);
5025 
5026 	rc = -t4_cim_read_la(sc, buf, NULL);
5027 	if (rc != 0)
5028 		goto done;
5029 
5030 	sbuf_printf(sb, "Status   Data      PC%s",
5031 	    cfg & F_UPDBGLACAPTPCONLY ? "" :
5032 	    "     LS0Stat  LS0Addr             LS0Data");
5033 
5034 	KASSERT((sc->params.cim_la_size & 7) == 0,
5035 	    ("%s: p will walk off the end of buf", __func__));
5036 
5037 	for (p = buf; p < &buf[sc->params.cim_la_size]; p += 8) {
5038 		if (cfg & F_UPDBGLACAPTPCONLY) {
5039 			sbuf_printf(sb, "\n  %02x   %08x %08x", p[5] & 0xff,
5040 			    p[6], p[7]);
5041 			sbuf_printf(sb, "\n  %02x   %02x%06x %02x%06x",
5042 			    (p[3] >> 8) & 0xff, p[3] & 0xff, p[4] >> 8,
5043 			    p[4] & 0xff, p[5] >> 8);
5044 			sbuf_printf(sb, "\n  %02x   %x%07x %x%07x",
5045 			    (p[0] >> 4) & 0xff, p[0] & 0xf, p[1] >> 4,
5046 			    p[1] & 0xf, p[2] >> 4);
5047 		} else {
5048 			sbuf_printf(sb,
5049 			    "\n  %02x   %x%07x %x%07x %08x %08x "
5050 			    "%08x%08x%08x%08x",
5051 			    (p[0] >> 4) & 0xff, p[0] & 0xf, p[1] >> 4,
5052 			    p[1] & 0xf, p[2] >> 4, p[2] & 0xf, p[3], p[4], p[5],
5053 			    p[6], p[7]);
5054 		}
5055 	}
5056 
5057 	rc = sbuf_finish(sb);
5058 	sbuf_delete(sb);
5059 done:
5060 	free(buf, M_CXGBE);
5061 	return (rc);
5062 }
5063 
5064 static int
5065 sysctl_cim_ma_la(SYSCTL_HANDLER_ARGS)
5066 {
5067 	struct adapter *sc = arg1;
5068 	u_int i;
5069 	struct sbuf *sb;
5070 	uint32_t *buf, *p;
5071 	int rc;
5072 
5073 	rc = sysctl_wire_old_buffer(req, 0);
5074 	if (rc != 0)
5075 		return (rc);
5076 
5077 	sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req);
5078 	if (sb == NULL)
5079 		return (ENOMEM);
5080 
5081 	buf = malloc(2 * CIM_MALA_SIZE * 5 * sizeof(uint32_t), M_CXGBE,
5082 	    M_ZERO | M_WAITOK);
5083 
5084 	t4_cim_read_ma_la(sc, buf, buf + 5 * CIM_MALA_SIZE);
5085 	p = buf;
5086 
5087 	for (i = 0; i < CIM_MALA_SIZE; i++, p += 5) {
5088 		sbuf_printf(sb, "\n%02x%08x%08x%08x%08x", p[4], p[3], p[2],
5089 		    p[1], p[0]);
5090 	}
5091 
5092 	sbuf_printf(sb, "\n\nCnt ID Tag UE       Data       RDY VLD");
5093 	for (i = 0; i < CIM_MALA_SIZE; i++, p += 5) {
5094 		sbuf_printf(sb, "\n%3u %2u  %x   %u %08x%08x  %u   %u",
5095 		    (p[2] >> 10) & 0xff, (p[2] >> 7) & 7,
5096 		    (p[2] >> 3) & 0xf, (p[2] >> 2) & 1,
5097 		    (p[1] >> 2) | ((p[2] & 3) << 30),
5098 		    (p[0] >> 2) | ((p[1] & 3) << 30), (p[0] >> 1) & 1,
5099 		    p[0] & 1);
5100 	}
5101 
5102 	rc = sbuf_finish(sb);
5103 	sbuf_delete(sb);
5104 	free(buf, M_CXGBE);
5105 	return (rc);
5106 }
5107 
5108 static int
5109 sysctl_cim_pif_la(SYSCTL_HANDLER_ARGS)
5110 {
5111 	struct adapter *sc = arg1;
5112 	u_int i;
5113 	struct sbuf *sb;
5114 	uint32_t *buf, *p;
5115 	int rc;
5116 
5117 	rc = sysctl_wire_old_buffer(req, 0);
5118 	if (rc != 0)
5119 		return (rc);
5120 
5121 	sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req);
5122 	if (sb == NULL)
5123 		return (ENOMEM);
5124 
5125 	buf = malloc(2 * CIM_PIFLA_SIZE * 6 * sizeof(uint32_t), M_CXGBE,
5126 	    M_ZERO | M_WAITOK);
5127 
5128 	t4_cim_read_pif_la(sc, buf, buf + 6 * CIM_PIFLA_SIZE, NULL, NULL);
5129 	p = buf;
5130 
5131 	sbuf_printf(sb, "Cntl ID DataBE   Addr                 Data");
5132 	for (i = 0; i < CIM_MALA_SIZE; i++, p += 6) {
5133 		sbuf_printf(sb, "\n %02x  %02x  %04x  %08x %08x%08x%08x%08x",
5134 		    (p[5] >> 22) & 0xff, (p[5] >> 16) & 0x3f, p[5] & 0xffff,
5135 		    p[4], p[3], p[2], p[1], p[0]);
5136 	}
5137 
5138 	sbuf_printf(sb, "\n\nCntl ID               Data");
5139 	for (i = 0; i < CIM_MALA_SIZE; i++, p += 6) {
5140 		sbuf_printf(sb, "\n %02x  %02x %08x%08x%08x%08x",
5141 		    (p[4] >> 6) & 0xff, p[4] & 0x3f, p[3], p[2], p[1], p[0]);
5142 	}
5143 
5144 	rc = sbuf_finish(sb);
5145 	sbuf_delete(sb);
5146 	free(buf, M_CXGBE);
5147 	return (rc);
5148 }
5149 
5150 static int
5151 sysctl_cim_qcfg(SYSCTL_HANDLER_ARGS)
5152 {
5153 	struct adapter *sc = arg1;
5154 	struct sbuf *sb;
5155 	int rc, i;
5156 	uint16_t base[CIM_NUM_IBQ + CIM_NUM_OBQ_T5];
5157 	uint16_t size[CIM_NUM_IBQ + CIM_NUM_OBQ_T5];
5158 	uint16_t thres[CIM_NUM_IBQ];
5159 	uint32_t obq_wr[2 * CIM_NUM_OBQ_T5], *wr = obq_wr;
5160 	uint32_t stat[4 * (CIM_NUM_IBQ + CIM_NUM_OBQ_T5)], *p = stat;
5161 	u_int cim_num_obq, ibq_rdaddr, obq_rdaddr, nq;
5162 
5163 	if (is_t4(sc)) {
5164 		cim_num_obq = CIM_NUM_OBQ;
5165 		ibq_rdaddr = A_UP_IBQ_0_RDADDR;
5166 		obq_rdaddr = A_UP_OBQ_0_REALADDR;
5167 	} else {
5168 		cim_num_obq = CIM_NUM_OBQ_T5;
5169 		ibq_rdaddr = A_UP_IBQ_0_SHADOW_RDADDR;
5170 		obq_rdaddr = A_UP_OBQ_0_SHADOW_REALADDR;
5171 	}
5172 	nq = CIM_NUM_IBQ + cim_num_obq;
5173 
5174 	rc = -t4_cim_read(sc, ibq_rdaddr, 4 * nq, stat);
5175 	if (rc == 0)
5176 		rc = -t4_cim_read(sc, obq_rdaddr, 2 * cim_num_obq, obq_wr);
5177 	if (rc != 0)
5178 		return (rc);
5179 
5180 	t4_read_cimq_cfg(sc, base, size, thres);
5181 
5182 	rc = sysctl_wire_old_buffer(req, 0);
5183 	if (rc != 0)
5184 		return (rc);
5185 
5186 	sb = sbuf_new_for_sysctl(NULL, NULL, PAGE_SIZE, req);
5187 	if (sb == NULL)
5188 		return (ENOMEM);
5189 
5190 	sbuf_printf(sb, "Queue  Base  Size Thres RdPtr WrPtr  SOP  EOP Avail");
5191 
5192 	for (i = 0; i < CIM_NUM_IBQ; i++, p += 4)
5193 		sbuf_printf(sb, "\n%7s %5x %5u %5u %6x  %4x %4u %4u %5u",
5194 		    qname[i], base[i], size[i], thres[i], G_IBQRDADDR(p[0]),
5195 		    G_IBQWRADDR(p[1]), G_QUESOPCNT(p[3]), G_QUEEOPCNT(p[3]),
5196 		    G_QUEREMFLITS(p[2]) * 16);
5197 	for ( ; i < nq; i++, p += 4, wr += 2)
5198 		sbuf_printf(sb, "\n%7s %5x %5u %12x  %4x %4u %4u %5u", qname[i],
5199 		    base[i], size[i], G_QUERDADDR(p[0]) & 0x3fff,
5200 		    wr[0] - base[i], G_QUESOPCNT(p[3]), G_QUEEOPCNT(p[3]),
5201 		    G_QUEREMFLITS(p[2]) * 16);
5202 
5203 	rc = sbuf_finish(sb);
5204 	sbuf_delete(sb);
5205 
5206 	return (rc);
5207 }
5208 
5209 static int
5210 sysctl_cpl_stats(SYSCTL_HANDLER_ARGS)
5211 {
5212 	struct adapter *sc = arg1;
5213 	struct sbuf *sb;
5214 	int rc;
5215 	struct tp_cpl_stats stats;
5216 
5217 	rc = sysctl_wire_old_buffer(req, 0);
5218 	if (rc != 0)
5219 		return (rc);
5220 
5221 	sb = sbuf_new_for_sysctl(NULL, NULL, 256, req);
5222 	if (sb == NULL)
5223 		return (ENOMEM);
5224 
5225 	t4_tp_get_cpl_stats(sc, &stats);
5226 
5227 	sbuf_printf(sb, "                 channel 0  channel 1  channel 2  "
5228 	    "channel 3\n");
5229 	sbuf_printf(sb, "CPL requests:   %10u %10u %10u %10u\n",
5230 		   stats.req[0], stats.req[1], stats.req[2], stats.req[3]);
5231 	sbuf_printf(sb, "CPL responses:  %10u %10u %10u %10u",
5232 		   stats.rsp[0], stats.rsp[1], stats.rsp[2], stats.rsp[3]);
5233 
5234 	rc = sbuf_finish(sb);
5235 	sbuf_delete(sb);
5236 
5237 	return (rc);
5238 }
5239 
5240 static int
5241 sysctl_ddp_stats(SYSCTL_HANDLER_ARGS)
5242 {
5243 	struct adapter *sc = arg1;
5244 	struct sbuf *sb;
5245 	int rc;
5246 	struct tp_usm_stats stats;
5247 
5248 	rc = sysctl_wire_old_buffer(req, 0);
5249 	if (rc != 0)
5250 		return(rc);
5251 
5252 	sb = sbuf_new_for_sysctl(NULL, NULL, 256, req);
5253 	if (sb == NULL)
5254 		return (ENOMEM);
5255 
5256 	t4_get_usm_stats(sc, &stats);
5257 
5258 	sbuf_printf(sb, "Frames: %u\n", stats.frames);
5259 	sbuf_printf(sb, "Octets: %ju\n", stats.octets);
5260 	sbuf_printf(sb, "Drops:  %u", stats.drops);
5261 
5262 	rc = sbuf_finish(sb);
5263 	sbuf_delete(sb);
5264 
5265 	return (rc);
5266 }
5267 
5268 const char *devlog_level_strings[] = {
5269 	[FW_DEVLOG_LEVEL_EMERG]		= "EMERG",
5270 	[FW_DEVLOG_LEVEL_CRIT]		= "CRIT",
5271 	[FW_DEVLOG_LEVEL_ERR]		= "ERR",
5272 	[FW_DEVLOG_LEVEL_NOTICE]	= "NOTICE",
5273 	[FW_DEVLOG_LEVEL_INFO]		= "INFO",
5274 	[FW_DEVLOG_LEVEL_DEBUG]		= "DEBUG"
5275 };
5276 
5277 const char *devlog_facility_strings[] = {
5278 	[FW_DEVLOG_FACILITY_CORE]	= "CORE",
5279 	[FW_DEVLOG_FACILITY_SCHED]	= "SCHED",
5280 	[FW_DEVLOG_FACILITY_TIMER]	= "TIMER",
5281 	[FW_DEVLOG_FACILITY_RES]	= "RES",
5282 	[FW_DEVLOG_FACILITY_HW]		= "HW",
5283 	[FW_DEVLOG_FACILITY_FLR]	= "FLR",
5284 	[FW_DEVLOG_FACILITY_DMAQ]	= "DMAQ",
5285 	[FW_DEVLOG_FACILITY_PHY]	= "PHY",
5286 	[FW_DEVLOG_FACILITY_MAC]	= "MAC",
5287 	[FW_DEVLOG_FACILITY_PORT]	= "PORT",
5288 	[FW_DEVLOG_FACILITY_VI]		= "VI",
5289 	[FW_DEVLOG_FACILITY_FILTER]	= "FILTER",
5290 	[FW_DEVLOG_FACILITY_ACL]	= "ACL",
5291 	[FW_DEVLOG_FACILITY_TM]		= "TM",
5292 	[FW_DEVLOG_FACILITY_QFC]	= "QFC",
5293 	[FW_DEVLOG_FACILITY_DCB]	= "DCB",
5294 	[FW_DEVLOG_FACILITY_ETH]	= "ETH",
5295 	[FW_DEVLOG_FACILITY_OFLD]	= "OFLD",
5296 	[FW_DEVLOG_FACILITY_RI]		= "RI",
5297 	[FW_DEVLOG_FACILITY_ISCSI]	= "ISCSI",
5298 	[FW_DEVLOG_FACILITY_FCOE]	= "FCOE",
5299 	[FW_DEVLOG_FACILITY_FOISCSI]	= "FOISCSI",
5300 	[FW_DEVLOG_FACILITY_FOFCOE]	= "FOFCOE"
5301 };
5302 
5303 static int
5304 sysctl_devlog(SYSCTL_HANDLER_ARGS)
5305 {
5306 	struct adapter *sc = arg1;
5307 	struct devlog_params *dparams = &sc->params.devlog;
5308 	struct fw_devlog_e *buf, *e;
5309 	int i, j, rc, nentries, first = 0;
5310 	struct sbuf *sb;
5311 	uint64_t ftstamp = UINT64_MAX;
5312 
5313 	if (dparams->start == 0) {
5314 		dparams->memtype = 0;
5315 		dparams->start = 0x84000;
5316 		dparams->size = 32768;
5317 	}
5318 
5319 	nentries = dparams->size / sizeof(struct fw_devlog_e);
5320 
5321 	buf = malloc(dparams->size, M_CXGBE, M_NOWAIT);
5322 	if (buf == NULL)
5323 		return (ENOMEM);
5324 
5325 	rc = -t4_mem_read(sc, dparams->memtype, dparams->start, dparams->size,
5326 	    (void *)buf);
5327 	if (rc != 0)
5328 		goto done;
5329 
5330 	for (i = 0; i < nentries; i++) {
5331 		e = &buf[i];
5332 
5333 		if (e->timestamp == 0)
5334 			break;	/* end */
5335 
5336 		e->timestamp = be64toh(e->timestamp);
5337 		e->seqno = be32toh(e->seqno);
5338 		for (j = 0; j < 8; j++)
5339 			e->params[j] = be32toh(e->params[j]);
5340 
5341 		if (e->timestamp < ftstamp) {
5342 			ftstamp = e->timestamp;
5343 			first = i;
5344 		}
5345 	}
5346 
5347 	if (buf[first].timestamp == 0)
5348 		goto done;	/* nothing in the log */
5349 
5350 	rc = sysctl_wire_old_buffer(req, 0);
5351 	if (rc != 0)
5352 		goto done;
5353 
5354 	sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req);
5355 	if (sb == NULL) {
5356 		rc = ENOMEM;
5357 		goto done;
5358 	}
5359 	sbuf_printf(sb, "%10s  %15s  %8s  %8s  %s\n",
5360 	    "Seq#", "Tstamp", "Level", "Facility", "Message");
5361 
5362 	i = first;
5363 	do {
5364 		e = &buf[i];
5365 		if (e->timestamp == 0)
5366 			break;	/* end */
5367 
5368 		sbuf_printf(sb, "%10d  %15ju  %8s  %8s  ",
5369 		    e->seqno, e->timestamp,
5370 		    (e->level < nitems(devlog_level_strings) ?
5371 			devlog_level_strings[e->level] : "UNKNOWN"),
5372 		    (e->facility < nitems(devlog_facility_strings) ?
5373 			devlog_facility_strings[e->facility] : "UNKNOWN"));
5374 		sbuf_printf(sb, e->fmt, e->params[0], e->params[1],
5375 		    e->params[2], e->params[3], e->params[4],
5376 		    e->params[5], e->params[6], e->params[7]);
5377 
5378 		if (++i == nentries)
5379 			i = 0;
5380 	} while (i != first);
5381 
5382 	rc = sbuf_finish(sb);
5383 	sbuf_delete(sb);
5384 done:
5385 	free(buf, M_CXGBE);
5386 	return (rc);
5387 }
5388 
5389 static int
5390 sysctl_fcoe_stats(SYSCTL_HANDLER_ARGS)
5391 {
5392 	struct adapter *sc = arg1;
5393 	struct sbuf *sb;
5394 	int rc;
5395 	struct tp_fcoe_stats stats[4];
5396 
5397 	rc = sysctl_wire_old_buffer(req, 0);
5398 	if (rc != 0)
5399 		return (rc);
5400 
5401 	sb = sbuf_new_for_sysctl(NULL, NULL, 256, req);
5402 	if (sb == NULL)
5403 		return (ENOMEM);
5404 
5405 	t4_get_fcoe_stats(sc, 0, &stats[0]);
5406 	t4_get_fcoe_stats(sc, 1, &stats[1]);
5407 	t4_get_fcoe_stats(sc, 2, &stats[2]);
5408 	t4_get_fcoe_stats(sc, 3, &stats[3]);
5409 
5410 	sbuf_printf(sb, "                   channel 0        channel 1        "
5411 	    "channel 2        channel 3\n");
5412 	sbuf_printf(sb, "octetsDDP:  %16ju %16ju %16ju %16ju\n",
5413 	    stats[0].octetsDDP, stats[1].octetsDDP, stats[2].octetsDDP,
5414 	    stats[3].octetsDDP);
5415 	sbuf_printf(sb, "framesDDP:  %16u %16u %16u %16u\n", stats[0].framesDDP,
5416 	    stats[1].framesDDP, stats[2].framesDDP, stats[3].framesDDP);
5417 	sbuf_printf(sb, "framesDrop: %16u %16u %16u %16u",
5418 	    stats[0].framesDrop, stats[1].framesDrop, stats[2].framesDrop,
5419 	    stats[3].framesDrop);
5420 
5421 	rc = sbuf_finish(sb);
5422 	sbuf_delete(sb);
5423 
5424 	return (rc);
5425 }
5426 
5427 static int
5428 sysctl_hw_sched(SYSCTL_HANDLER_ARGS)
5429 {
5430 	struct adapter *sc = arg1;
5431 	struct sbuf *sb;
5432 	int rc, i;
5433 	unsigned int map, kbps, ipg, mode;
5434 	unsigned int pace_tab[NTX_SCHED];
5435 
5436 	rc = sysctl_wire_old_buffer(req, 0);
5437 	if (rc != 0)
5438 		return (rc);
5439 
5440 	sb = sbuf_new_for_sysctl(NULL, NULL, 256, req);
5441 	if (sb == NULL)
5442 		return (ENOMEM);
5443 
5444 	map = t4_read_reg(sc, A_TP_TX_MOD_QUEUE_REQ_MAP);
5445 	mode = G_TIMERMODE(t4_read_reg(sc, A_TP_MOD_CONFIG));
5446 	t4_read_pace_tbl(sc, pace_tab);
5447 
5448 	sbuf_printf(sb, "Scheduler  Mode   Channel  Rate (Kbps)   "
5449 	    "Class IPG (0.1 ns)   Flow IPG (us)");
5450 
5451 	for (i = 0; i < NTX_SCHED; ++i, map >>= 2) {
5452 		t4_get_tx_sched(sc, i, &kbps, &ipg);
5453 		sbuf_printf(sb, "\n    %u      %-5s     %u     ", i,
5454 		    (mode & (1 << i)) ? "flow" : "class", map & 3);
5455 		if (kbps)
5456 			sbuf_printf(sb, "%9u     ", kbps);
5457 		else
5458 			sbuf_printf(sb, " disabled     ");
5459 
5460 		if (ipg)
5461 			sbuf_printf(sb, "%13u        ", ipg);
5462 		else
5463 			sbuf_printf(sb, "     disabled        ");
5464 
5465 		if (pace_tab[i])
5466 			sbuf_printf(sb, "%10u", pace_tab[i]);
5467 		else
5468 			sbuf_printf(sb, "  disabled");
5469 	}
5470 
5471 	rc = sbuf_finish(sb);
5472 	sbuf_delete(sb);
5473 
5474 	return (rc);
5475 }
5476 
5477 static int
5478 sysctl_lb_stats(SYSCTL_HANDLER_ARGS)
5479 {
5480 	struct adapter *sc = arg1;
5481 	struct sbuf *sb;
5482 	int rc, i, j;
5483 	uint64_t *p0, *p1;
5484 	struct lb_port_stats s[2];
5485 	static const char *stat_name[] = {
5486 		"OctetsOK:", "FramesOK:", "BcastFrames:", "McastFrames:",
5487 		"UcastFrames:", "ErrorFrames:", "Frames64:", "Frames65To127:",
5488 		"Frames128To255:", "Frames256To511:", "Frames512To1023:",
5489 		"Frames1024To1518:", "Frames1519ToMax:", "FramesDropped:",
5490 		"BG0FramesDropped:", "BG1FramesDropped:", "BG2FramesDropped:",
5491 		"BG3FramesDropped:", "BG0FramesTrunc:", "BG1FramesTrunc:",
5492 		"BG2FramesTrunc:", "BG3FramesTrunc:"
5493 	};
5494 
5495 	rc = sysctl_wire_old_buffer(req, 0);
5496 	if (rc != 0)
5497 		return (rc);
5498 
5499 	sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req);
5500 	if (sb == NULL)
5501 		return (ENOMEM);
5502 
5503 	memset(s, 0, sizeof(s));
5504 
5505 	for (i = 0; i < 4; i += 2) {
5506 		t4_get_lb_stats(sc, i, &s[0]);
5507 		t4_get_lb_stats(sc, i + 1, &s[1]);
5508 
5509 		p0 = &s[0].octets;
5510 		p1 = &s[1].octets;
5511 		sbuf_printf(sb, "%s                       Loopback %u"
5512 		    "           Loopback %u", i == 0 ? "" : "\n", i, i + 1);
5513 
5514 		for (j = 0; j < nitems(stat_name); j++)
5515 			sbuf_printf(sb, "\n%-17s %20ju %20ju", stat_name[j],
5516 				   *p0++, *p1++);
5517 	}
5518 
5519 	rc = sbuf_finish(sb);
5520 	sbuf_delete(sb);
5521 
5522 	return (rc);
5523 }
5524 
5525 static int
5526 sysctl_linkdnrc(SYSCTL_HANDLER_ARGS)
5527 {
5528 	int rc = 0;
5529 	struct port_info *pi = arg1;
5530 	struct sbuf *sb;
5531 	static const char *linkdnreasons[] = {
5532 		"non-specific", "remote fault", "autoneg failed", "reserved3",
5533 		"PHY overheated", "unknown", "rx los", "reserved7"
5534 	};
5535 
5536 	rc = sysctl_wire_old_buffer(req, 0);
5537 	if (rc != 0)
5538 		return(rc);
5539 	sb = sbuf_new_for_sysctl(NULL, NULL, 64, req);
5540 	if (sb == NULL)
5541 		return (ENOMEM);
5542 
5543 	if (pi->linkdnrc < 0)
5544 		sbuf_printf(sb, "n/a");
5545 	else if (pi->linkdnrc < nitems(linkdnreasons))
5546 		sbuf_printf(sb, "%s", linkdnreasons[pi->linkdnrc]);
5547 	else
5548 		sbuf_printf(sb, "%d", pi->linkdnrc);
5549 
5550 	rc = sbuf_finish(sb);
5551 	sbuf_delete(sb);
5552 
5553 	return (rc);
5554 }
5555 
5556 struct mem_desc {
5557 	unsigned int base;
5558 	unsigned int limit;
5559 	unsigned int idx;
5560 };
5561 
5562 static int
5563 mem_desc_cmp(const void *a, const void *b)
5564 {
5565 	return ((const struct mem_desc *)a)->base -
5566 	       ((const struct mem_desc *)b)->base;
5567 }
5568 
5569 static void
5570 mem_region_show(struct sbuf *sb, const char *name, unsigned int from,
5571     unsigned int to)
5572 {
5573 	unsigned int size;
5574 
5575 	size = to - from + 1;
5576 	if (size == 0)
5577 		return;
5578 
5579 	/* XXX: need humanize_number(3) in libkern for a more readable 'size' */
5580 	sbuf_printf(sb, "%-15s %#x-%#x [%u]\n", name, from, to, size);
5581 }
5582 
5583 static int
5584 sysctl_meminfo(SYSCTL_HANDLER_ARGS)
5585 {
5586 	struct adapter *sc = arg1;
5587 	struct sbuf *sb;
5588 	int rc, i, n;
5589 	uint32_t lo, hi, used, alloc;
5590 	static const char *memory[] = {"EDC0:", "EDC1:", "MC:", "MC0:", "MC1:"};
5591 	static const char *region[] = {
5592 		"DBQ contexts:", "IMSG contexts:", "FLM cache:", "TCBs:",
5593 		"Pstructs:", "Timers:", "Rx FL:", "Tx FL:", "Pstruct FL:",
5594 		"Tx payload:", "Rx payload:", "LE hash:", "iSCSI region:",
5595 		"TDDP region:", "TPT region:", "STAG region:", "RQ region:",
5596 		"RQUDP region:", "PBL region:", "TXPBL region:",
5597 		"DBVFIFO region:", "ULPRX state:", "ULPTX state:",
5598 		"On-chip queues:"
5599 	};
5600 	struct mem_desc avail[4];
5601 	struct mem_desc mem[nitems(region) + 3];	/* up to 3 holes */
5602 	struct mem_desc *md = mem;
5603 
5604 	rc = sysctl_wire_old_buffer(req, 0);
5605 	if (rc != 0)
5606 		return (rc);
5607 
5608 	sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req);
5609 	if (sb == NULL)
5610 		return (ENOMEM);
5611 
5612 	for (i = 0; i < nitems(mem); i++) {
5613 		mem[i].limit = 0;
5614 		mem[i].idx = i;
5615 	}
5616 
5617 	/* Find and sort the populated memory ranges */
5618 	i = 0;
5619 	lo = t4_read_reg(sc, A_MA_TARGET_MEM_ENABLE);
5620 	if (lo & F_EDRAM0_ENABLE) {
5621 		hi = t4_read_reg(sc, A_MA_EDRAM0_BAR);
5622 		avail[i].base = G_EDRAM0_BASE(hi) << 20;
5623 		avail[i].limit = avail[i].base + (G_EDRAM0_SIZE(hi) << 20);
5624 		avail[i].idx = 0;
5625 		i++;
5626 	}
5627 	if (lo & F_EDRAM1_ENABLE) {
5628 		hi = t4_read_reg(sc, A_MA_EDRAM1_BAR);
5629 		avail[i].base = G_EDRAM1_BASE(hi) << 20;
5630 		avail[i].limit = avail[i].base + (G_EDRAM1_SIZE(hi) << 20);
5631 		avail[i].idx = 1;
5632 		i++;
5633 	}
5634 	if (lo & F_EXT_MEM_ENABLE) {
5635 		hi = t4_read_reg(sc, A_MA_EXT_MEMORY_BAR);
5636 		avail[i].base = G_EXT_MEM_BASE(hi) << 20;
5637 		avail[i].limit = avail[i].base +
5638 		    (G_EXT_MEM_SIZE(hi) << 20);
5639 		avail[i].idx = is_t4(sc) ? 2 : 3;	/* Call it MC for T4 */
5640 		i++;
5641 	}
5642 	if (!is_t4(sc) && lo & F_EXT_MEM1_ENABLE) {
5643 		hi = t4_read_reg(sc, A_MA_EXT_MEMORY1_BAR);
5644 		avail[i].base = G_EXT_MEM1_BASE(hi) << 20;
5645 		avail[i].limit = avail[i].base +
5646 		    (G_EXT_MEM1_SIZE(hi) << 20);
5647 		avail[i].idx = 4;
5648 		i++;
5649 	}
5650 	if (!i)                                    /* no memory available */
5651 		return 0;
5652 	qsort(avail, i, sizeof(struct mem_desc), mem_desc_cmp);
5653 
5654 	(md++)->base = t4_read_reg(sc, A_SGE_DBQ_CTXT_BADDR);
5655 	(md++)->base = t4_read_reg(sc, A_SGE_IMSG_CTXT_BADDR);
5656 	(md++)->base = t4_read_reg(sc, A_SGE_FLM_CACHE_BADDR);
5657 	(md++)->base = t4_read_reg(sc, A_TP_CMM_TCB_BASE);
5658 	(md++)->base = t4_read_reg(sc, A_TP_CMM_MM_BASE);
5659 	(md++)->base = t4_read_reg(sc, A_TP_CMM_TIMER_BASE);
5660 	(md++)->base = t4_read_reg(sc, A_TP_CMM_MM_RX_FLST_BASE);
5661 	(md++)->base = t4_read_reg(sc, A_TP_CMM_MM_TX_FLST_BASE);
5662 	(md++)->base = t4_read_reg(sc, A_TP_CMM_MM_PS_FLST_BASE);
5663 
5664 	/* the next few have explicit upper bounds */
5665 	md->base = t4_read_reg(sc, A_TP_PMM_TX_BASE);
5666 	md->limit = md->base - 1 +
5667 		    t4_read_reg(sc, A_TP_PMM_TX_PAGE_SIZE) *
5668 		    G_PMTXMAXPAGE(t4_read_reg(sc, A_TP_PMM_TX_MAX_PAGE));
5669 	md++;
5670 
5671 	md->base = t4_read_reg(sc, A_TP_PMM_RX_BASE);
5672 	md->limit = md->base - 1 +
5673 		    t4_read_reg(sc, A_TP_PMM_RX_PAGE_SIZE) *
5674 		    G_PMRXMAXPAGE(t4_read_reg(sc, A_TP_PMM_RX_MAX_PAGE));
5675 	md++;
5676 
5677 	if (t4_read_reg(sc, A_LE_DB_CONFIG) & F_HASHEN) {
5678 		hi = t4_read_reg(sc, A_LE_DB_TID_HASHBASE) / 4;
5679 		md->base = t4_read_reg(sc, A_LE_DB_HASH_TID_BASE);
5680 		md->limit = (sc->tids.ntids - hi) * 16 + md->base - 1;
5681 	} else {
5682 		md->base = 0;
5683 		md->idx = nitems(region);  /* hide it */
5684 	}
5685 	md++;
5686 
5687 #define ulp_region(reg) \
5688 	md->base = t4_read_reg(sc, A_ULP_ ## reg ## _LLIMIT);\
5689 	(md++)->limit = t4_read_reg(sc, A_ULP_ ## reg ## _ULIMIT)
5690 
5691 	ulp_region(RX_ISCSI);
5692 	ulp_region(RX_TDDP);
5693 	ulp_region(TX_TPT);
5694 	ulp_region(RX_STAG);
5695 	ulp_region(RX_RQ);
5696 	ulp_region(RX_RQUDP);
5697 	ulp_region(RX_PBL);
5698 	ulp_region(TX_PBL);
5699 #undef ulp_region
5700 
5701 	md->base = 0;
5702 	md->idx = nitems(region);
5703 	if (!is_t4(sc) && t4_read_reg(sc, A_SGE_CONTROL2) & F_VFIFO_ENABLE) {
5704 		md->base = G_BASEADDR(t4_read_reg(sc, A_SGE_DBVFIFO_BADDR));
5705 		md->limit = md->base + (G_DBVFIFO_SIZE((t4_read_reg(sc,
5706 		    A_SGE_DBVFIFO_SIZE))) << 2) - 1;
5707 	}
5708 	md++;
5709 
5710 	md->base = t4_read_reg(sc, A_ULP_RX_CTX_BASE);
5711 	md->limit = md->base + sc->tids.ntids - 1;
5712 	md++;
5713 	md->base = t4_read_reg(sc, A_ULP_TX_ERR_TABLE_BASE);
5714 	md->limit = md->base + sc->tids.ntids - 1;
5715 	md++;
5716 
5717 	md->base = sc->vres.ocq.start;
5718 	if (sc->vres.ocq.size)
5719 		md->limit = md->base + sc->vres.ocq.size - 1;
5720 	else
5721 		md->idx = nitems(region);  /* hide it */
5722 	md++;
5723 
5724 	/* add any address-space holes, there can be up to 3 */
5725 	for (n = 0; n < i - 1; n++)
5726 		if (avail[n].limit < avail[n + 1].base)
5727 			(md++)->base = avail[n].limit;
5728 	if (avail[n].limit)
5729 		(md++)->base = avail[n].limit;
5730 
5731 	n = md - mem;
5732 	qsort(mem, n, sizeof(struct mem_desc), mem_desc_cmp);
5733 
5734 	for (lo = 0; lo < i; lo++)
5735 		mem_region_show(sb, memory[avail[lo].idx], avail[lo].base,
5736 				avail[lo].limit - 1);
5737 
5738 	sbuf_printf(sb, "\n");
5739 	for (i = 0; i < n; i++) {
5740 		if (mem[i].idx >= nitems(region))
5741 			continue;                        /* skip holes */
5742 		if (!mem[i].limit)
5743 			mem[i].limit = i < n - 1 ? mem[i + 1].base - 1 : ~0;
5744 		mem_region_show(sb, region[mem[i].idx], mem[i].base,
5745 				mem[i].limit);
5746 	}
5747 
5748 	sbuf_printf(sb, "\n");
5749 	lo = t4_read_reg(sc, A_CIM_SDRAM_BASE_ADDR);
5750 	hi = t4_read_reg(sc, A_CIM_SDRAM_ADDR_SIZE) + lo - 1;
5751 	mem_region_show(sb, "uP RAM:", lo, hi);
5752 
5753 	lo = t4_read_reg(sc, A_CIM_EXTMEM2_BASE_ADDR);
5754 	hi = t4_read_reg(sc, A_CIM_EXTMEM2_ADDR_SIZE) + lo - 1;
5755 	mem_region_show(sb, "uP Extmem2:", lo, hi);
5756 
5757 	lo = t4_read_reg(sc, A_TP_PMM_RX_MAX_PAGE);
5758 	sbuf_printf(sb, "\n%u Rx pages of size %uKiB for %u channels\n",
5759 		   G_PMRXMAXPAGE(lo),
5760 		   t4_read_reg(sc, A_TP_PMM_RX_PAGE_SIZE) >> 10,
5761 		   (lo & F_PMRXNUMCHN) ? 2 : 1);
5762 
5763 	lo = t4_read_reg(sc, A_TP_PMM_TX_MAX_PAGE);
5764 	hi = t4_read_reg(sc, A_TP_PMM_TX_PAGE_SIZE);
5765 	sbuf_printf(sb, "%u Tx pages of size %u%ciB for %u channels\n",
5766 		   G_PMTXMAXPAGE(lo),
5767 		   hi >= (1 << 20) ? (hi >> 20) : (hi >> 10),
5768 		   hi >= (1 << 20) ? 'M' : 'K', 1 << G_PMTXNUMCHN(lo));
5769 	sbuf_printf(sb, "%u p-structs\n",
5770 		   t4_read_reg(sc, A_TP_CMM_MM_MAX_PSTRUCT));
5771 
5772 	for (i = 0; i < 4; i++) {
5773 		lo = t4_read_reg(sc, A_MPS_RX_PG_RSV0 + i * 4);
5774 		if (is_t4(sc)) {
5775 			used = G_USED(lo);
5776 			alloc = G_ALLOC(lo);
5777 		} else {
5778 			used = G_T5_USED(lo);
5779 			alloc = G_T5_ALLOC(lo);
5780 		}
5781 		sbuf_printf(sb, "\nPort %d using %u pages out of %u allocated",
5782 			   i, used, alloc);
5783 	}
5784 	for (i = 0; i < 4; i++) {
5785 		lo = t4_read_reg(sc, A_MPS_RX_PG_RSV4 + i * 4);
5786 		if (is_t4(sc)) {
5787 			used = G_USED(lo);
5788 			alloc = G_ALLOC(lo);
5789 		} else {
5790 			used = G_T5_USED(lo);
5791 			alloc = G_T5_ALLOC(lo);
5792 		}
5793 		sbuf_printf(sb,
5794 			   "\nLoopback %d using %u pages out of %u allocated",
5795 			   i, used, alloc);
5796 	}
5797 
5798 	rc = sbuf_finish(sb);
5799 	sbuf_delete(sb);
5800 
5801 	return (rc);
5802 }
5803 
5804 static inline void
5805 tcamxy2valmask(uint64_t x, uint64_t y, uint8_t *addr, uint64_t *mask)
5806 {
5807 	*mask = x | y;
5808 	y = htobe64(y);
5809 	memcpy(addr, (char *)&y + 2, ETHER_ADDR_LEN);
5810 }
5811 
5812 static int
5813 sysctl_mps_tcam(SYSCTL_HANDLER_ARGS)
5814 {
5815 	struct adapter *sc = arg1;
5816 	struct sbuf *sb;
5817 	int rc, i, n;
5818 
5819 	rc = sysctl_wire_old_buffer(req, 0);
5820 	if (rc != 0)
5821 		return (rc);
5822 
5823 	sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req);
5824 	if (sb == NULL)
5825 		return (ENOMEM);
5826 
5827 	sbuf_printf(sb,
5828 	    "Idx  Ethernet address     Mask     Vld Ports PF"
5829 	    "  VF              Replication             P0 P1 P2 P3  ML");
5830 	n = is_t4(sc) ? NUM_MPS_CLS_SRAM_L_INSTANCES :
5831 	    NUM_MPS_T5_CLS_SRAM_L_INSTANCES;
5832 	for (i = 0; i < n; i++) {
5833 		uint64_t tcamx, tcamy, mask;
5834 		uint32_t cls_lo, cls_hi;
5835 		uint8_t addr[ETHER_ADDR_LEN];
5836 
5837 		tcamy = t4_read_reg64(sc, MPS_CLS_TCAM_Y_L(i));
5838 		tcamx = t4_read_reg64(sc, MPS_CLS_TCAM_X_L(i));
5839 		cls_lo = t4_read_reg(sc, MPS_CLS_SRAM_L(i));
5840 		cls_hi = t4_read_reg(sc, MPS_CLS_SRAM_H(i));
5841 
5842 		if (tcamx & tcamy)
5843 			continue;
5844 
5845 		tcamxy2valmask(tcamx, tcamy, addr, &mask);
5846 		sbuf_printf(sb, "\n%3u %02x:%02x:%02x:%02x:%02x:%02x %012jx"
5847 			   "  %c   %#x%4u%4d", i, addr[0], addr[1], addr[2],
5848 			   addr[3], addr[4], addr[5], (uintmax_t)mask,
5849 			   (cls_lo & F_SRAM_VLD) ? 'Y' : 'N',
5850 			   G_PORTMAP(cls_hi), G_PF(cls_lo),
5851 			   (cls_lo & F_VF_VALID) ? G_VF(cls_lo) : -1);
5852 
5853 		if (cls_lo & F_REPLICATE) {
5854 			struct fw_ldst_cmd ldst_cmd;
5855 
5856 			memset(&ldst_cmd, 0, sizeof(ldst_cmd));
5857 			ldst_cmd.op_to_addrspace =
5858 			    htobe32(V_FW_CMD_OP(FW_LDST_CMD) |
5859 				F_FW_CMD_REQUEST | F_FW_CMD_READ |
5860 				V_FW_LDST_CMD_ADDRSPACE(FW_LDST_ADDRSPC_MPS));
5861 			ldst_cmd.cycles_to_len16 = htobe32(FW_LEN16(ldst_cmd));
5862 			ldst_cmd.u.mps.fid_ctl =
5863 			    htobe16(V_FW_LDST_CMD_FID(FW_LDST_MPS_RPLC) |
5864 				V_FW_LDST_CMD_CTL(i));
5865 
5866 			rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK,
5867 			    "t4mps");
5868 			if (rc)
5869 				break;
5870 			rc = -t4_wr_mbox(sc, sc->mbox, &ldst_cmd,
5871 			    sizeof(ldst_cmd), &ldst_cmd);
5872 			end_synchronized_op(sc, 0);
5873 
5874 			if (rc != 0) {
5875 				sbuf_printf(sb,
5876 				    " ------------ error %3u ------------", rc);
5877 				rc = 0;
5878 			} else {
5879 				sbuf_printf(sb, " %08x %08x %08x %08x",
5880 				    be32toh(ldst_cmd.u.mps.rplc127_96),
5881 				    be32toh(ldst_cmd.u.mps.rplc95_64),
5882 				    be32toh(ldst_cmd.u.mps.rplc63_32),
5883 				    be32toh(ldst_cmd.u.mps.rplc31_0));
5884 			}
5885 		} else
5886 			sbuf_printf(sb, "%36s", "");
5887 
5888 		sbuf_printf(sb, "%4u%3u%3u%3u %#3x", G_SRAM_PRIO0(cls_lo),
5889 		    G_SRAM_PRIO1(cls_lo), G_SRAM_PRIO2(cls_lo),
5890 		    G_SRAM_PRIO3(cls_lo), (cls_lo >> S_MULTILISTEN0) & 0xf);
5891 	}
5892 
5893 	if (rc)
5894 		(void) sbuf_finish(sb);
5895 	else
5896 		rc = sbuf_finish(sb);
5897 	sbuf_delete(sb);
5898 
5899 	return (rc);
5900 }
5901 
5902 static int
5903 sysctl_path_mtus(SYSCTL_HANDLER_ARGS)
5904 {
5905 	struct adapter *sc = arg1;
5906 	struct sbuf *sb;
5907 	int rc;
5908 	uint16_t mtus[NMTUS];
5909 
5910 	rc = sysctl_wire_old_buffer(req, 0);
5911 	if (rc != 0)
5912 		return (rc);
5913 
5914 	sb = sbuf_new_for_sysctl(NULL, NULL, 256, req);
5915 	if (sb == NULL)
5916 		return (ENOMEM);
5917 
5918 	t4_read_mtu_tbl(sc, mtus, NULL);
5919 
5920 	sbuf_printf(sb, "%u %u %u %u %u %u %u %u %u %u %u %u %u %u %u %u",
5921 	    mtus[0], mtus[1], mtus[2], mtus[3], mtus[4], mtus[5], mtus[6],
5922 	    mtus[7], mtus[8], mtus[9], mtus[10], mtus[11], mtus[12], mtus[13],
5923 	    mtus[14], mtus[15]);
5924 
5925 	rc = sbuf_finish(sb);
5926 	sbuf_delete(sb);
5927 
5928 	return (rc);
5929 }
5930 
5931 static int
5932 sysctl_pm_stats(SYSCTL_HANDLER_ARGS)
5933 {
5934 	struct adapter *sc = arg1;
5935 	struct sbuf *sb;
5936 	int rc, i;
5937 	uint32_t tx_cnt[PM_NSTATS], rx_cnt[PM_NSTATS];
5938 	uint64_t tx_cyc[PM_NSTATS], rx_cyc[PM_NSTATS];
5939 	static const char *pm_stats[] = {
5940 		"Read:", "Write bypass:", "Write mem:", "Flush:", "FIFO wait:"
5941 	};
5942 
5943 	rc = sysctl_wire_old_buffer(req, 0);
5944 	if (rc != 0)
5945 		return (rc);
5946 
5947 	sb = sbuf_new_for_sysctl(NULL, NULL, 256, req);
5948 	if (sb == NULL)
5949 		return (ENOMEM);
5950 
5951 	t4_pmtx_get_stats(sc, tx_cnt, tx_cyc);
5952 	t4_pmrx_get_stats(sc, rx_cnt, rx_cyc);
5953 
5954 	sbuf_printf(sb, "                Tx count            Tx cycles    "
5955 	    "Rx count            Rx cycles");
5956 	for (i = 0; i < PM_NSTATS; i++)
5957 		sbuf_printf(sb, "\n%-13s %10u %20ju  %10u %20ju",
5958 		    pm_stats[i], tx_cnt[i], tx_cyc[i], rx_cnt[i], rx_cyc[i]);
5959 
5960 	rc = sbuf_finish(sb);
5961 	sbuf_delete(sb);
5962 
5963 	return (rc);
5964 }
5965 
5966 static int
5967 sysctl_rdma_stats(SYSCTL_HANDLER_ARGS)
5968 {
5969 	struct adapter *sc = arg1;
5970 	struct sbuf *sb;
5971 	int rc;
5972 	struct tp_rdma_stats stats;
5973 
5974 	rc = sysctl_wire_old_buffer(req, 0);
5975 	if (rc != 0)
5976 		return (rc);
5977 
5978 	sb = sbuf_new_for_sysctl(NULL, NULL, 256, req);
5979 	if (sb == NULL)
5980 		return (ENOMEM);
5981 
5982 	t4_tp_get_rdma_stats(sc, &stats);
5983 	sbuf_printf(sb, "NoRQEModDefferals: %u\n", stats.rqe_dfr_mod);
5984 	sbuf_printf(sb, "NoRQEPktDefferals: %u", stats.rqe_dfr_pkt);
5985 
5986 	rc = sbuf_finish(sb);
5987 	sbuf_delete(sb);
5988 
5989 	return (rc);
5990 }
5991 
5992 static int
5993 sysctl_tcp_stats(SYSCTL_HANDLER_ARGS)
5994 {
5995 	struct adapter *sc = arg1;
5996 	struct sbuf *sb;
5997 	int rc;
5998 	struct tp_tcp_stats v4, v6;
5999 
6000 	rc = sysctl_wire_old_buffer(req, 0);
6001 	if (rc != 0)
6002 		return (rc);
6003 
6004 	sb = sbuf_new_for_sysctl(NULL, NULL, 256, req);
6005 	if (sb == NULL)
6006 		return (ENOMEM);
6007 
6008 	t4_tp_get_tcp_stats(sc, &v4, &v6);
6009 	sbuf_printf(sb,
6010 	    "                                IP                 IPv6\n");
6011 	sbuf_printf(sb, "OutRsts:      %20u %20u\n",
6012 	    v4.tcpOutRsts, v6.tcpOutRsts);
6013 	sbuf_printf(sb, "InSegs:       %20ju %20ju\n",
6014 	    v4.tcpInSegs, v6.tcpInSegs);
6015 	sbuf_printf(sb, "OutSegs:      %20ju %20ju\n",
6016 	    v4.tcpOutSegs, v6.tcpOutSegs);
6017 	sbuf_printf(sb, "RetransSegs:  %20ju %20ju",
6018 	    v4.tcpRetransSegs, v6.tcpRetransSegs);
6019 
6020 	rc = sbuf_finish(sb);
6021 	sbuf_delete(sb);
6022 
6023 	return (rc);
6024 }
6025 
6026 static int
6027 sysctl_tids(SYSCTL_HANDLER_ARGS)
6028 {
6029 	struct adapter *sc = arg1;
6030 	struct sbuf *sb;
6031 	int rc;
6032 	struct tid_info *t = &sc->tids;
6033 
6034 	rc = sysctl_wire_old_buffer(req, 0);
6035 	if (rc != 0)
6036 		return (rc);
6037 
6038 	sb = sbuf_new_for_sysctl(NULL, NULL, 256, req);
6039 	if (sb == NULL)
6040 		return (ENOMEM);
6041 
6042 	if (t->natids) {
6043 		sbuf_printf(sb, "ATID range: 0-%u, in use: %u\n", t->natids - 1,
6044 		    t->atids_in_use);
6045 	}
6046 
6047 	if (t->ntids) {
6048 		if (t4_read_reg(sc, A_LE_DB_CONFIG) & F_HASHEN) {
6049 			uint32_t b = t4_read_reg(sc, A_LE_DB_SERVER_INDEX) / 4;
6050 
6051 			if (b) {
6052 				sbuf_printf(sb, "TID range: 0-%u, %u-%u", b - 1,
6053 				    t4_read_reg(sc, A_LE_DB_TID_HASHBASE) / 4,
6054 				    t->ntids - 1);
6055 			} else {
6056 				sbuf_printf(sb, "TID range: %u-%u",
6057 				    t4_read_reg(sc, A_LE_DB_TID_HASHBASE) / 4,
6058 				    t->ntids - 1);
6059 			}
6060 		} else
6061 			sbuf_printf(sb, "TID range: 0-%u", t->ntids - 1);
6062 		sbuf_printf(sb, ", in use: %u\n",
6063 		    atomic_load_acq_int(&t->tids_in_use));
6064 	}
6065 
6066 	if (t->nstids) {
6067 		sbuf_printf(sb, "STID range: %u-%u, in use: %u\n", t->stid_base,
6068 		    t->stid_base + t->nstids - 1, t->stids_in_use);
6069 	}
6070 
6071 	if (t->nftids) {
6072 		sbuf_printf(sb, "FTID range: %u-%u\n", t->ftid_base,
6073 		    t->ftid_base + t->nftids - 1);
6074 	}
6075 
6076 	sbuf_printf(sb, "HW TID usage: %u IP users, %u IPv6 users",
6077 	    t4_read_reg(sc, A_LE_DB_ACT_CNT_IPV4),
6078 	    t4_read_reg(sc, A_LE_DB_ACT_CNT_IPV6));
6079 
6080 	rc = sbuf_finish(sb);
6081 	sbuf_delete(sb);
6082 
6083 	return (rc);
6084 }
6085 
6086 static int
6087 sysctl_tp_err_stats(SYSCTL_HANDLER_ARGS)
6088 {
6089 	struct adapter *sc = arg1;
6090 	struct sbuf *sb;
6091 	int rc;
6092 	struct tp_err_stats stats;
6093 
6094 	rc = sysctl_wire_old_buffer(req, 0);
6095 	if (rc != 0)
6096 		return (rc);
6097 
6098 	sb = sbuf_new_for_sysctl(NULL, NULL, 256, req);
6099 	if (sb == NULL)
6100 		return (ENOMEM);
6101 
6102 	t4_tp_get_err_stats(sc, &stats);
6103 
6104 	sbuf_printf(sb, "                 channel 0  channel 1  channel 2  "
6105 		      "channel 3\n");
6106 	sbuf_printf(sb, "macInErrs:      %10u %10u %10u %10u\n",
6107 	    stats.macInErrs[0], stats.macInErrs[1], stats.macInErrs[2],
6108 	    stats.macInErrs[3]);
6109 	sbuf_printf(sb, "hdrInErrs:      %10u %10u %10u %10u\n",
6110 	    stats.hdrInErrs[0], stats.hdrInErrs[1], stats.hdrInErrs[2],
6111 	    stats.hdrInErrs[3]);
6112 	sbuf_printf(sb, "tcpInErrs:      %10u %10u %10u %10u\n",
6113 	    stats.tcpInErrs[0], stats.tcpInErrs[1], stats.tcpInErrs[2],
6114 	    stats.tcpInErrs[3]);
6115 	sbuf_printf(sb, "tcp6InErrs:     %10u %10u %10u %10u\n",
6116 	    stats.tcp6InErrs[0], stats.tcp6InErrs[1], stats.tcp6InErrs[2],
6117 	    stats.tcp6InErrs[3]);
6118 	sbuf_printf(sb, "tnlCongDrops:   %10u %10u %10u %10u\n",
6119 	    stats.tnlCongDrops[0], stats.tnlCongDrops[1], stats.tnlCongDrops[2],
6120 	    stats.tnlCongDrops[3]);
6121 	sbuf_printf(sb, "tnlTxDrops:     %10u %10u %10u %10u\n",
6122 	    stats.tnlTxDrops[0], stats.tnlTxDrops[1], stats.tnlTxDrops[2],
6123 	    stats.tnlTxDrops[3]);
6124 	sbuf_printf(sb, "ofldVlanDrops:  %10u %10u %10u %10u\n",
6125 	    stats.ofldVlanDrops[0], stats.ofldVlanDrops[1],
6126 	    stats.ofldVlanDrops[2], stats.ofldVlanDrops[3]);
6127 	sbuf_printf(sb, "ofldChanDrops:  %10u %10u %10u %10u\n\n",
6128 	    stats.ofldChanDrops[0], stats.ofldChanDrops[1],
6129 	    stats.ofldChanDrops[2], stats.ofldChanDrops[3]);
6130 	sbuf_printf(sb, "ofldNoNeigh:    %u\nofldCongDefer:  %u",
6131 	    stats.ofldNoNeigh, stats.ofldCongDefer);
6132 
6133 	rc = sbuf_finish(sb);
6134 	sbuf_delete(sb);
6135 
6136 	return (rc);
6137 }
6138 
6139 struct field_desc {
6140 	const char *name;
6141 	u_int start;
6142 	u_int width;
6143 };
6144 
6145 static void
6146 field_desc_show(struct sbuf *sb, uint64_t v, const struct field_desc *f)
6147 {
6148 	char buf[32];
6149 	int line_size = 0;
6150 
6151 	while (f->name) {
6152 		uint64_t mask = (1ULL << f->width) - 1;
6153 		int len = snprintf(buf, sizeof(buf), "%s: %ju", f->name,
6154 		    ((uintmax_t)v >> f->start) & mask);
6155 
6156 		if (line_size + len >= 79) {
6157 			line_size = 8;
6158 			sbuf_printf(sb, "\n        ");
6159 		}
6160 		sbuf_printf(sb, "%s ", buf);
6161 		line_size += len + 1;
6162 		f++;
6163 	}
6164 	sbuf_printf(sb, "\n");
6165 }
6166 
6167 static struct field_desc tp_la0[] = {
6168 	{ "RcfOpCodeOut", 60, 4 },
6169 	{ "State", 56, 4 },
6170 	{ "WcfState", 52, 4 },
6171 	{ "RcfOpcSrcOut", 50, 2 },
6172 	{ "CRxError", 49, 1 },
6173 	{ "ERxError", 48, 1 },
6174 	{ "SanityFailed", 47, 1 },
6175 	{ "SpuriousMsg", 46, 1 },
6176 	{ "FlushInputMsg", 45, 1 },
6177 	{ "FlushInputCpl", 44, 1 },
6178 	{ "RssUpBit", 43, 1 },
6179 	{ "RssFilterHit", 42, 1 },
6180 	{ "Tid", 32, 10 },
6181 	{ "InitTcb", 31, 1 },
6182 	{ "LineNumber", 24, 7 },
6183 	{ "Emsg", 23, 1 },
6184 	{ "EdataOut", 22, 1 },
6185 	{ "Cmsg", 21, 1 },
6186 	{ "CdataOut", 20, 1 },
6187 	{ "EreadPdu", 19, 1 },
6188 	{ "CreadPdu", 18, 1 },
6189 	{ "TunnelPkt", 17, 1 },
6190 	{ "RcfPeerFin", 16, 1 },
6191 	{ "RcfReasonOut", 12, 4 },
6192 	{ "TxCchannel", 10, 2 },
6193 	{ "RcfTxChannel", 8, 2 },
6194 	{ "RxEchannel", 6, 2 },
6195 	{ "RcfRxChannel", 5, 1 },
6196 	{ "RcfDataOutSrdy", 4, 1 },
6197 	{ "RxDvld", 3, 1 },
6198 	{ "RxOoDvld", 2, 1 },
6199 	{ "RxCongestion", 1, 1 },
6200 	{ "TxCongestion", 0, 1 },
6201 	{ NULL }
6202 };
6203 
6204 static struct field_desc tp_la1[] = {
6205 	{ "CplCmdIn", 56, 8 },
6206 	{ "CplCmdOut", 48, 8 },
6207 	{ "ESynOut", 47, 1 },
6208 	{ "EAckOut", 46, 1 },
6209 	{ "EFinOut", 45, 1 },
6210 	{ "ERstOut", 44, 1 },
6211 	{ "SynIn", 43, 1 },
6212 	{ "AckIn", 42, 1 },
6213 	{ "FinIn", 41, 1 },
6214 	{ "RstIn", 40, 1 },
6215 	{ "DataIn", 39, 1 },
6216 	{ "DataInVld", 38, 1 },
6217 	{ "PadIn", 37, 1 },
6218 	{ "RxBufEmpty", 36, 1 },
6219 	{ "RxDdp", 35, 1 },
6220 	{ "RxFbCongestion", 34, 1 },
6221 	{ "TxFbCongestion", 33, 1 },
6222 	{ "TxPktSumSrdy", 32, 1 },
6223 	{ "RcfUlpType", 28, 4 },
6224 	{ "Eread", 27, 1 },
6225 	{ "Ebypass", 26, 1 },
6226 	{ "Esave", 25, 1 },
6227 	{ "Static0", 24, 1 },
6228 	{ "Cread", 23, 1 },
6229 	{ "Cbypass", 22, 1 },
6230 	{ "Csave", 21, 1 },
6231 	{ "CPktOut", 20, 1 },
6232 	{ "RxPagePoolFull", 18, 2 },
6233 	{ "RxLpbkPkt", 17, 1 },
6234 	{ "TxLpbkPkt", 16, 1 },
6235 	{ "RxVfValid", 15, 1 },
6236 	{ "SynLearned", 14, 1 },
6237 	{ "SetDelEntry", 13, 1 },
6238 	{ "SetInvEntry", 12, 1 },
6239 	{ "CpcmdDvld", 11, 1 },
6240 	{ "CpcmdSave", 10, 1 },
6241 	{ "RxPstructsFull", 8, 2 },
6242 	{ "EpcmdDvld", 7, 1 },
6243 	{ "EpcmdFlush", 6, 1 },
6244 	{ "EpcmdTrimPrefix", 5, 1 },
6245 	{ "EpcmdTrimPostfix", 4, 1 },
6246 	{ "ERssIp4Pkt", 3, 1 },
6247 	{ "ERssIp6Pkt", 2, 1 },
6248 	{ "ERssTcpUdpPkt", 1, 1 },
6249 	{ "ERssFceFipPkt", 0, 1 },
6250 	{ NULL }
6251 };
6252 
6253 static struct field_desc tp_la2[] = {
6254 	{ "CplCmdIn", 56, 8 },
6255 	{ "MpsVfVld", 55, 1 },
6256 	{ "MpsPf", 52, 3 },
6257 	{ "MpsVf", 44, 8 },
6258 	{ "SynIn", 43, 1 },
6259 	{ "AckIn", 42, 1 },
6260 	{ "FinIn", 41, 1 },
6261 	{ "RstIn", 40, 1 },
6262 	{ "DataIn", 39, 1 },
6263 	{ "DataInVld", 38, 1 },
6264 	{ "PadIn", 37, 1 },
6265 	{ "RxBufEmpty", 36, 1 },
6266 	{ "RxDdp", 35, 1 },
6267 	{ "RxFbCongestion", 34, 1 },
6268 	{ "TxFbCongestion", 33, 1 },
6269 	{ "TxPktSumSrdy", 32, 1 },
6270 	{ "RcfUlpType", 28, 4 },
6271 	{ "Eread", 27, 1 },
6272 	{ "Ebypass", 26, 1 },
6273 	{ "Esave", 25, 1 },
6274 	{ "Static0", 24, 1 },
6275 	{ "Cread", 23, 1 },
6276 	{ "Cbypass", 22, 1 },
6277 	{ "Csave", 21, 1 },
6278 	{ "CPktOut", 20, 1 },
6279 	{ "RxPagePoolFull", 18, 2 },
6280 	{ "RxLpbkPkt", 17, 1 },
6281 	{ "TxLpbkPkt", 16, 1 },
6282 	{ "RxVfValid", 15, 1 },
6283 	{ "SynLearned", 14, 1 },
6284 	{ "SetDelEntry", 13, 1 },
6285 	{ "SetInvEntry", 12, 1 },
6286 	{ "CpcmdDvld", 11, 1 },
6287 	{ "CpcmdSave", 10, 1 },
6288 	{ "RxPstructsFull", 8, 2 },
6289 	{ "EpcmdDvld", 7, 1 },
6290 	{ "EpcmdFlush", 6, 1 },
6291 	{ "EpcmdTrimPrefix", 5, 1 },
6292 	{ "EpcmdTrimPostfix", 4, 1 },
6293 	{ "ERssIp4Pkt", 3, 1 },
6294 	{ "ERssIp6Pkt", 2, 1 },
6295 	{ "ERssTcpUdpPkt", 1, 1 },
6296 	{ "ERssFceFipPkt", 0, 1 },
6297 	{ NULL }
6298 };
6299 
6300 static void
6301 tp_la_show(struct sbuf *sb, uint64_t *p, int idx)
6302 {
6303 
6304 	field_desc_show(sb, *p, tp_la0);
6305 }
6306 
6307 static void
6308 tp_la_show2(struct sbuf *sb, uint64_t *p, int idx)
6309 {
6310 
6311 	if (idx)
6312 		sbuf_printf(sb, "\n");
6313 	field_desc_show(sb, p[0], tp_la0);
6314 	if (idx < (TPLA_SIZE / 2 - 1) || p[1] != ~0ULL)
6315 		field_desc_show(sb, p[1], tp_la0);
6316 }
6317 
6318 static void
6319 tp_la_show3(struct sbuf *sb, uint64_t *p, int idx)
6320 {
6321 
6322 	if (idx)
6323 		sbuf_printf(sb, "\n");
6324 	field_desc_show(sb, p[0], tp_la0);
6325 	if (idx < (TPLA_SIZE / 2 - 1) || p[1] != ~0ULL)
6326 		field_desc_show(sb, p[1], (p[0] & (1 << 17)) ? tp_la2 : tp_la1);
6327 }
6328 
6329 static int
6330 sysctl_tp_la(SYSCTL_HANDLER_ARGS)
6331 {
6332 	struct adapter *sc = arg1;
6333 	struct sbuf *sb;
6334 	uint64_t *buf, *p;
6335 	int rc;
6336 	u_int i, inc;
6337 	void (*show_func)(struct sbuf *, uint64_t *, int);
6338 
6339 	rc = sysctl_wire_old_buffer(req, 0);
6340 	if (rc != 0)
6341 		return (rc);
6342 
6343 	sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req);
6344 	if (sb == NULL)
6345 		return (ENOMEM);
6346 
6347 	buf = malloc(TPLA_SIZE * sizeof(uint64_t), M_CXGBE, M_ZERO | M_WAITOK);
6348 
6349 	t4_tp_read_la(sc, buf, NULL);
6350 	p = buf;
6351 
6352 	switch (G_DBGLAMODE(t4_read_reg(sc, A_TP_DBG_LA_CONFIG))) {
6353 	case 2:
6354 		inc = 2;
6355 		show_func = tp_la_show2;
6356 		break;
6357 	case 3:
6358 		inc = 2;
6359 		show_func = tp_la_show3;
6360 		break;
6361 	default:
6362 		inc = 1;
6363 		show_func = tp_la_show;
6364 	}
6365 
6366 	for (i = 0; i < TPLA_SIZE / inc; i++, p += inc)
6367 		(*show_func)(sb, p, i);
6368 
6369 	rc = sbuf_finish(sb);
6370 	sbuf_delete(sb);
6371 	free(buf, M_CXGBE);
6372 	return (rc);
6373 }
6374 
6375 static int
6376 sysctl_tx_rate(SYSCTL_HANDLER_ARGS)
6377 {
6378 	struct adapter *sc = arg1;
6379 	struct sbuf *sb;
6380 	int rc;
6381 	u64 nrate[NCHAN], orate[NCHAN];
6382 
6383 	rc = sysctl_wire_old_buffer(req, 0);
6384 	if (rc != 0)
6385 		return (rc);
6386 
6387 	sb = sbuf_new_for_sysctl(NULL, NULL, 256, req);
6388 	if (sb == NULL)
6389 		return (ENOMEM);
6390 
6391 	t4_get_chan_txrate(sc, nrate, orate);
6392 	sbuf_printf(sb, "              channel 0   channel 1   channel 2   "
6393 		 "channel 3\n");
6394 	sbuf_printf(sb, "NIC B/s:     %10ju  %10ju  %10ju  %10ju\n",
6395 	    nrate[0], nrate[1], nrate[2], nrate[3]);
6396 	sbuf_printf(sb, "Offload B/s: %10ju  %10ju  %10ju  %10ju",
6397 	    orate[0], orate[1], orate[2], orate[3]);
6398 
6399 	rc = sbuf_finish(sb);
6400 	sbuf_delete(sb);
6401 
6402 	return (rc);
6403 }
6404 
6405 static int
6406 sysctl_ulprx_la(SYSCTL_HANDLER_ARGS)
6407 {
6408 	struct adapter *sc = arg1;
6409 	struct sbuf *sb;
6410 	uint32_t *buf, *p;
6411 	int rc, i;
6412 
6413 	rc = sysctl_wire_old_buffer(req, 0);
6414 	if (rc != 0)
6415 		return (rc);
6416 
6417 	sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req);
6418 	if (sb == NULL)
6419 		return (ENOMEM);
6420 
6421 	buf = malloc(ULPRX_LA_SIZE * 8 * sizeof(uint32_t), M_CXGBE,
6422 	    M_ZERO | M_WAITOK);
6423 
6424 	t4_ulprx_read_la(sc, buf);
6425 	p = buf;
6426 
6427 	sbuf_printf(sb, "      Pcmd        Type   Message"
6428 	    "                Data");
6429 	for (i = 0; i < ULPRX_LA_SIZE; i++, p += 8) {
6430 		sbuf_printf(sb, "\n%08x%08x  %4x  %08x  %08x%08x%08x%08x",
6431 		    p[1], p[0], p[2], p[3], p[7], p[6], p[5], p[4]);
6432 	}
6433 
6434 	rc = sbuf_finish(sb);
6435 	sbuf_delete(sb);
6436 	free(buf, M_CXGBE);
6437 	return (rc);
6438 }
6439 
6440 static int
6441 sysctl_wcwr_stats(SYSCTL_HANDLER_ARGS)
6442 {
6443 	struct adapter *sc = arg1;
6444 	struct sbuf *sb;
6445 	int rc, v;
6446 
6447 	rc = sysctl_wire_old_buffer(req, 0);
6448 	if (rc != 0)
6449 		return (rc);
6450 
6451 	sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req);
6452 	if (sb == NULL)
6453 		return (ENOMEM);
6454 
6455 	v = t4_read_reg(sc, A_SGE_STAT_CFG);
6456 	if (G_STATSOURCE_T5(v) == 7) {
6457 		if (G_STATMODE(v) == 0) {
6458 			sbuf_printf(sb, "total %d, incomplete %d",
6459 			    t4_read_reg(sc, A_SGE_STAT_TOTAL),
6460 			    t4_read_reg(sc, A_SGE_STAT_MATCH));
6461 		} else if (G_STATMODE(v) == 1) {
6462 			sbuf_printf(sb, "total %d, data overflow %d",
6463 			    t4_read_reg(sc, A_SGE_STAT_TOTAL),
6464 			    t4_read_reg(sc, A_SGE_STAT_MATCH));
6465 		}
6466 	}
6467 	rc = sbuf_finish(sb);
6468 	sbuf_delete(sb);
6469 
6470 	return (rc);
6471 }
6472 #endif
6473 
6474 static inline void
6475 txq_start(struct ifnet *ifp, struct sge_txq *txq)
6476 {
6477 	struct buf_ring *br;
6478 	struct mbuf *m;
6479 
6480 	TXQ_LOCK_ASSERT_OWNED(txq);
6481 
6482 	br = txq->br;
6483 	m = txq->m ? txq->m : drbr_dequeue(ifp, br);
6484 	if (m)
6485 		t4_eth_tx(ifp, txq, m);
6486 }
6487 
6488 void
6489 t4_tx_callout(void *arg)
6490 {
6491 	struct sge_eq *eq = arg;
6492 	struct adapter *sc;
6493 
6494 	if (EQ_TRYLOCK(eq) == 0)
6495 		goto reschedule;
6496 
6497 	if (eq->flags & EQ_STALLED && !can_resume_tx(eq)) {
6498 		EQ_UNLOCK(eq);
6499 reschedule:
6500 		if (__predict_true(!(eq->flags && EQ_DOOMED)))
6501 			callout_schedule(&eq->tx_callout, 1);
6502 		return;
6503 	}
6504 
6505 	EQ_LOCK_ASSERT_OWNED(eq);
6506 
6507 	if (__predict_true((eq->flags & EQ_DOOMED) == 0)) {
6508 
6509 		if ((eq->flags & EQ_TYPEMASK) == EQ_ETH) {
6510 			struct sge_txq *txq = arg;
6511 			struct port_info *pi = txq->ifp->if_softc;
6512 
6513 			sc = pi->adapter;
6514 		} else {
6515 			struct sge_wrq *wrq = arg;
6516 
6517 			sc = wrq->adapter;
6518 		}
6519 
6520 		taskqueue_enqueue(sc->tq[eq->tx_chan], &eq->tx_task);
6521 	}
6522 
6523 	EQ_UNLOCK(eq);
6524 }
6525 
6526 void
6527 t4_tx_task(void *arg, int count)
6528 {
6529 	struct sge_eq *eq = arg;
6530 
6531 	EQ_LOCK(eq);
6532 	if ((eq->flags & EQ_TYPEMASK) == EQ_ETH) {
6533 		struct sge_txq *txq = arg;
6534 		txq_start(txq->ifp, txq);
6535 	} else {
6536 		struct sge_wrq *wrq = arg;
6537 		t4_wrq_tx_locked(wrq->adapter, wrq, NULL);
6538 	}
6539 	EQ_UNLOCK(eq);
6540 }
6541 
6542 static uint32_t
6543 fconf_to_mode(uint32_t fconf)
6544 {
6545 	uint32_t mode;
6546 
6547 	mode = T4_FILTER_IPv4 | T4_FILTER_IPv6 | T4_FILTER_IP_SADDR |
6548 	    T4_FILTER_IP_DADDR | T4_FILTER_IP_SPORT | T4_FILTER_IP_DPORT;
6549 
6550 	if (fconf & F_FRAGMENTATION)
6551 		mode |= T4_FILTER_IP_FRAGMENT;
6552 
6553 	if (fconf & F_MPSHITTYPE)
6554 		mode |= T4_FILTER_MPS_HIT_TYPE;
6555 
6556 	if (fconf & F_MACMATCH)
6557 		mode |= T4_FILTER_MAC_IDX;
6558 
6559 	if (fconf & F_ETHERTYPE)
6560 		mode |= T4_FILTER_ETH_TYPE;
6561 
6562 	if (fconf & F_PROTOCOL)
6563 		mode |= T4_FILTER_IP_PROTO;
6564 
6565 	if (fconf & F_TOS)
6566 		mode |= T4_FILTER_IP_TOS;
6567 
6568 	if (fconf & F_VLAN)
6569 		mode |= T4_FILTER_VLAN;
6570 
6571 	if (fconf & F_VNIC_ID)
6572 		mode |= T4_FILTER_VNIC;
6573 
6574 	if (fconf & F_PORT)
6575 		mode |= T4_FILTER_PORT;
6576 
6577 	if (fconf & F_FCOE)
6578 		mode |= T4_FILTER_FCoE;
6579 
6580 	return (mode);
6581 }
6582 
6583 static uint32_t
6584 mode_to_fconf(uint32_t mode)
6585 {
6586 	uint32_t fconf = 0;
6587 
6588 	if (mode & T4_FILTER_IP_FRAGMENT)
6589 		fconf |= F_FRAGMENTATION;
6590 
6591 	if (mode & T4_FILTER_MPS_HIT_TYPE)
6592 		fconf |= F_MPSHITTYPE;
6593 
6594 	if (mode & T4_FILTER_MAC_IDX)
6595 		fconf |= F_MACMATCH;
6596 
6597 	if (mode & T4_FILTER_ETH_TYPE)
6598 		fconf |= F_ETHERTYPE;
6599 
6600 	if (mode & T4_FILTER_IP_PROTO)
6601 		fconf |= F_PROTOCOL;
6602 
6603 	if (mode & T4_FILTER_IP_TOS)
6604 		fconf |= F_TOS;
6605 
6606 	if (mode & T4_FILTER_VLAN)
6607 		fconf |= F_VLAN;
6608 
6609 	if (mode & T4_FILTER_VNIC)
6610 		fconf |= F_VNIC_ID;
6611 
6612 	if (mode & T4_FILTER_PORT)
6613 		fconf |= F_PORT;
6614 
6615 	if (mode & T4_FILTER_FCoE)
6616 		fconf |= F_FCOE;
6617 
6618 	return (fconf);
6619 }
6620 
6621 static uint32_t
6622 fspec_to_fconf(struct t4_filter_specification *fs)
6623 {
6624 	uint32_t fconf = 0;
6625 
6626 	if (fs->val.frag || fs->mask.frag)
6627 		fconf |= F_FRAGMENTATION;
6628 
6629 	if (fs->val.matchtype || fs->mask.matchtype)
6630 		fconf |= F_MPSHITTYPE;
6631 
6632 	if (fs->val.macidx || fs->mask.macidx)
6633 		fconf |= F_MACMATCH;
6634 
6635 	if (fs->val.ethtype || fs->mask.ethtype)
6636 		fconf |= F_ETHERTYPE;
6637 
6638 	if (fs->val.proto || fs->mask.proto)
6639 		fconf |= F_PROTOCOL;
6640 
6641 	if (fs->val.tos || fs->mask.tos)
6642 		fconf |= F_TOS;
6643 
6644 	if (fs->val.vlan_vld || fs->mask.vlan_vld)
6645 		fconf |= F_VLAN;
6646 
6647 	if (fs->val.vnic_vld || fs->mask.vnic_vld)
6648 		fconf |= F_VNIC_ID;
6649 
6650 	if (fs->val.iport || fs->mask.iport)
6651 		fconf |= F_PORT;
6652 
6653 	if (fs->val.fcoe || fs->mask.fcoe)
6654 		fconf |= F_FCOE;
6655 
6656 	return (fconf);
6657 }
6658 
6659 static int
6660 get_filter_mode(struct adapter *sc, uint32_t *mode)
6661 {
6662 	int rc;
6663 	uint32_t fconf;
6664 
6665 	rc = begin_synchronized_op(sc, NULL, HOLD_LOCK | SLEEP_OK | INTR_OK,
6666 	    "t4getfm");
6667 	if (rc)
6668 		return (rc);
6669 
6670 	t4_read_indirect(sc, A_TP_PIO_ADDR, A_TP_PIO_DATA, &fconf, 1,
6671 	    A_TP_VLAN_PRI_MAP);
6672 
6673 	if (sc->params.tp.vlan_pri_map != fconf) {
6674 		log(LOG_WARNING, "%s: cached filter mode out of sync %x %x.\n",
6675 		    device_get_nameunit(sc->dev), sc->params.tp.vlan_pri_map,
6676 		    fconf);
6677 		sc->params.tp.vlan_pri_map = fconf;
6678 	}
6679 
6680 	*mode = fconf_to_mode(sc->params.tp.vlan_pri_map);
6681 
6682 	end_synchronized_op(sc, LOCK_HELD);
6683 	return (0);
6684 }
6685 
6686 static int
6687 set_filter_mode(struct adapter *sc, uint32_t mode)
6688 {
6689 	uint32_t fconf;
6690 	int rc;
6691 
6692 	fconf = mode_to_fconf(mode);
6693 
6694 	rc = begin_synchronized_op(sc, NULL, HOLD_LOCK | SLEEP_OK | INTR_OK,
6695 	    "t4setfm");
6696 	if (rc)
6697 		return (rc);
6698 
6699 	if (sc->tids.ftids_in_use > 0) {
6700 		rc = EBUSY;
6701 		goto done;
6702 	}
6703 
6704 #ifdef TCP_OFFLOAD
6705 	if (sc->offload_map) {
6706 		rc = EBUSY;
6707 		goto done;
6708 	}
6709 #endif
6710 
6711 #ifdef notyet
6712 	rc = -t4_set_filter_mode(sc, fconf);
6713 	if (rc == 0)
6714 		sc->filter_mode = fconf;
6715 #else
6716 	rc = ENOTSUP;
6717 #endif
6718 
6719 done:
6720 	end_synchronized_op(sc, LOCK_HELD);
6721 	return (rc);
6722 }
6723 
6724 static inline uint64_t
6725 get_filter_hits(struct adapter *sc, uint32_t fid)
6726 {
6727 	uint32_t mw_base, off, tcb_base = t4_read_reg(sc, A_TP_CMM_TCB_BASE);
6728 	uint64_t hits;
6729 
6730 	memwin_info(sc, 0, &mw_base, NULL);
6731 	off = position_memwin(sc, 0,
6732 	    tcb_base + (fid + sc->tids.ftid_base) * TCB_SIZE);
6733 	if (is_t4(sc)) {
6734 		hits = t4_read_reg64(sc, mw_base + off + 16);
6735 		hits = be64toh(hits);
6736 	} else {
6737 		hits = t4_read_reg(sc, mw_base + off + 24);
6738 		hits = be32toh(hits);
6739 	}
6740 
6741 	return (hits);
6742 }
6743 
6744 static int
6745 get_filter(struct adapter *sc, struct t4_filter *t)
6746 {
6747 	int i, rc, nfilters = sc->tids.nftids;
6748 	struct filter_entry *f;
6749 
6750 	rc = begin_synchronized_op(sc, NULL, HOLD_LOCK | SLEEP_OK | INTR_OK,
6751 	    "t4getf");
6752 	if (rc)
6753 		return (rc);
6754 
6755 	if (sc->tids.ftids_in_use == 0 || sc->tids.ftid_tab == NULL ||
6756 	    t->idx >= nfilters) {
6757 		t->idx = 0xffffffff;
6758 		goto done;
6759 	}
6760 
6761 	f = &sc->tids.ftid_tab[t->idx];
6762 	for (i = t->idx; i < nfilters; i++, f++) {
6763 		if (f->valid) {
6764 			t->idx = i;
6765 			t->l2tidx = f->l2t ? f->l2t->idx : 0;
6766 			t->smtidx = f->smtidx;
6767 			if (f->fs.hitcnts)
6768 				t->hits = get_filter_hits(sc, t->idx);
6769 			else
6770 				t->hits = UINT64_MAX;
6771 			t->fs = f->fs;
6772 
6773 			goto done;
6774 		}
6775 	}
6776 
6777 	t->idx = 0xffffffff;
6778 done:
6779 	end_synchronized_op(sc, LOCK_HELD);
6780 	return (0);
6781 }
6782 
6783 static int
6784 set_filter(struct adapter *sc, struct t4_filter *t)
6785 {
6786 	unsigned int nfilters, nports;
6787 	struct filter_entry *f;
6788 	int i, rc;
6789 
6790 	rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4setf");
6791 	if (rc)
6792 		return (rc);
6793 
6794 	nfilters = sc->tids.nftids;
6795 	nports = sc->params.nports;
6796 
6797 	if (nfilters == 0) {
6798 		rc = ENOTSUP;
6799 		goto done;
6800 	}
6801 
6802 	if (!(sc->flags & FULL_INIT_DONE)) {
6803 		rc = EAGAIN;
6804 		goto done;
6805 	}
6806 
6807 	if (t->idx >= nfilters) {
6808 		rc = EINVAL;
6809 		goto done;
6810 	}
6811 
6812 	/* Validate against the global filter mode */
6813 	if ((sc->params.tp.vlan_pri_map | fspec_to_fconf(&t->fs)) !=
6814 	    sc->params.tp.vlan_pri_map) {
6815 		rc = E2BIG;
6816 		goto done;
6817 	}
6818 
6819 	if (t->fs.action == FILTER_SWITCH && t->fs.eport >= nports) {
6820 		rc = EINVAL;
6821 		goto done;
6822 	}
6823 
6824 	if (t->fs.val.iport >= nports) {
6825 		rc = EINVAL;
6826 		goto done;
6827 	}
6828 
6829 	/* Can't specify an iq if not steering to it */
6830 	if (!t->fs.dirsteer && t->fs.iq) {
6831 		rc = EINVAL;
6832 		goto done;
6833 	}
6834 
6835 	/* IPv6 filter idx must be 4 aligned */
6836 	if (t->fs.type == 1 &&
6837 	    ((t->idx & 0x3) || t->idx + 4 >= nfilters)) {
6838 		rc = EINVAL;
6839 		goto done;
6840 	}
6841 
6842 	if (sc->tids.ftid_tab == NULL) {
6843 		KASSERT(sc->tids.ftids_in_use == 0,
6844 		    ("%s: no memory allocated but filters_in_use > 0",
6845 		    __func__));
6846 
6847 		sc->tids.ftid_tab = malloc(sizeof (struct filter_entry) *
6848 		    nfilters, M_CXGBE, M_NOWAIT | M_ZERO);
6849 		if (sc->tids.ftid_tab == NULL) {
6850 			rc = ENOMEM;
6851 			goto done;
6852 		}
6853 		mtx_init(&sc->tids.ftid_lock, "T4 filters", 0, MTX_DEF);
6854 	}
6855 
6856 	for (i = 0; i < 4; i++) {
6857 		f = &sc->tids.ftid_tab[t->idx + i];
6858 
6859 		if (f->pending || f->valid) {
6860 			rc = EBUSY;
6861 			goto done;
6862 		}
6863 		if (f->locked) {
6864 			rc = EPERM;
6865 			goto done;
6866 		}
6867 
6868 		if (t->fs.type == 0)
6869 			break;
6870 	}
6871 
6872 	f = &sc->tids.ftid_tab[t->idx];
6873 	f->fs = t->fs;
6874 
6875 	rc = set_filter_wr(sc, t->idx);
6876 done:
6877 	end_synchronized_op(sc, 0);
6878 
6879 	if (rc == 0) {
6880 		mtx_lock(&sc->tids.ftid_lock);
6881 		for (;;) {
6882 			if (f->pending == 0) {
6883 				rc = f->valid ? 0 : EIO;
6884 				break;
6885 			}
6886 
6887 			if (mtx_sleep(&sc->tids.ftid_tab, &sc->tids.ftid_lock,
6888 			    PCATCH, "t4setfw", 0)) {
6889 				rc = EINPROGRESS;
6890 				break;
6891 			}
6892 		}
6893 		mtx_unlock(&sc->tids.ftid_lock);
6894 	}
6895 	return (rc);
6896 }
6897 
6898 static int
6899 del_filter(struct adapter *sc, struct t4_filter *t)
6900 {
6901 	unsigned int nfilters;
6902 	struct filter_entry *f;
6903 	int rc;
6904 
6905 	rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4delf");
6906 	if (rc)
6907 		return (rc);
6908 
6909 	nfilters = sc->tids.nftids;
6910 
6911 	if (nfilters == 0) {
6912 		rc = ENOTSUP;
6913 		goto done;
6914 	}
6915 
6916 	if (sc->tids.ftid_tab == NULL || sc->tids.ftids_in_use == 0 ||
6917 	    t->idx >= nfilters) {
6918 		rc = EINVAL;
6919 		goto done;
6920 	}
6921 
6922 	if (!(sc->flags & FULL_INIT_DONE)) {
6923 		rc = EAGAIN;
6924 		goto done;
6925 	}
6926 
6927 	f = &sc->tids.ftid_tab[t->idx];
6928 
6929 	if (f->pending) {
6930 		rc = EBUSY;
6931 		goto done;
6932 	}
6933 	if (f->locked) {
6934 		rc = EPERM;
6935 		goto done;
6936 	}
6937 
6938 	if (f->valid) {
6939 		t->fs = f->fs;	/* extra info for the caller */
6940 		rc = del_filter_wr(sc, t->idx);
6941 	}
6942 
6943 done:
6944 	end_synchronized_op(sc, 0);
6945 
6946 	if (rc == 0) {
6947 		mtx_lock(&sc->tids.ftid_lock);
6948 		for (;;) {
6949 			if (f->pending == 0) {
6950 				rc = f->valid ? EIO : 0;
6951 				break;
6952 			}
6953 
6954 			if (mtx_sleep(&sc->tids.ftid_tab, &sc->tids.ftid_lock,
6955 			    PCATCH, "t4delfw", 0)) {
6956 				rc = EINPROGRESS;
6957 				break;
6958 			}
6959 		}
6960 		mtx_unlock(&sc->tids.ftid_lock);
6961 	}
6962 
6963 	return (rc);
6964 }
6965 
6966 static void
6967 clear_filter(struct filter_entry *f)
6968 {
6969 	if (f->l2t)
6970 		t4_l2t_release(f->l2t);
6971 
6972 	bzero(f, sizeof (*f));
6973 }
6974 
6975 static int
6976 set_filter_wr(struct adapter *sc, int fidx)
6977 {
6978 	struct filter_entry *f = &sc->tids.ftid_tab[fidx];
6979 	struct wrqe *wr;
6980 	struct fw_filter_wr *fwr;
6981 	unsigned int ftid;
6982 
6983 	ASSERT_SYNCHRONIZED_OP(sc);
6984 
6985 	if (f->fs.newdmac || f->fs.newvlan) {
6986 		/* This filter needs an L2T entry; allocate one. */
6987 		f->l2t = t4_l2t_alloc_switching(sc->l2t);
6988 		if (f->l2t == NULL)
6989 			return (EAGAIN);
6990 		if (t4_l2t_set_switching(sc, f->l2t, f->fs.vlan, f->fs.eport,
6991 		    f->fs.dmac)) {
6992 			t4_l2t_release(f->l2t);
6993 			f->l2t = NULL;
6994 			return (ENOMEM);
6995 		}
6996 	}
6997 
6998 	ftid = sc->tids.ftid_base + fidx;
6999 
7000 	wr = alloc_wrqe(sizeof(*fwr), &sc->sge.mgmtq);
7001 	if (wr == NULL)
7002 		return (ENOMEM);
7003 
7004 	fwr = wrtod(wr);
7005 	bzero(fwr, sizeof (*fwr));
7006 
7007 	fwr->op_pkd = htobe32(V_FW_WR_OP(FW_FILTER_WR));
7008 	fwr->len16_pkd = htobe32(FW_LEN16(*fwr));
7009 	fwr->tid_to_iq =
7010 	    htobe32(V_FW_FILTER_WR_TID(ftid) |
7011 		V_FW_FILTER_WR_RQTYPE(f->fs.type) |
7012 		V_FW_FILTER_WR_NOREPLY(0) |
7013 		V_FW_FILTER_WR_IQ(f->fs.iq));
7014 	fwr->del_filter_to_l2tix =
7015 	    htobe32(V_FW_FILTER_WR_RPTTID(f->fs.rpttid) |
7016 		V_FW_FILTER_WR_DROP(f->fs.action == FILTER_DROP) |
7017 		V_FW_FILTER_WR_DIRSTEER(f->fs.dirsteer) |
7018 		V_FW_FILTER_WR_MASKHASH(f->fs.maskhash) |
7019 		V_FW_FILTER_WR_DIRSTEERHASH(f->fs.dirsteerhash) |
7020 		V_FW_FILTER_WR_LPBK(f->fs.action == FILTER_SWITCH) |
7021 		V_FW_FILTER_WR_DMAC(f->fs.newdmac) |
7022 		V_FW_FILTER_WR_SMAC(f->fs.newsmac) |
7023 		V_FW_FILTER_WR_INSVLAN(f->fs.newvlan == VLAN_INSERT ||
7024 		    f->fs.newvlan == VLAN_REWRITE) |
7025 		V_FW_FILTER_WR_RMVLAN(f->fs.newvlan == VLAN_REMOVE ||
7026 		    f->fs.newvlan == VLAN_REWRITE) |
7027 		V_FW_FILTER_WR_HITCNTS(f->fs.hitcnts) |
7028 		V_FW_FILTER_WR_TXCHAN(f->fs.eport) |
7029 		V_FW_FILTER_WR_PRIO(f->fs.prio) |
7030 		V_FW_FILTER_WR_L2TIX(f->l2t ? f->l2t->idx : 0));
7031 	fwr->ethtype = htobe16(f->fs.val.ethtype);
7032 	fwr->ethtypem = htobe16(f->fs.mask.ethtype);
7033 	fwr->frag_to_ovlan_vldm =
7034 	    (V_FW_FILTER_WR_FRAG(f->fs.val.frag) |
7035 		V_FW_FILTER_WR_FRAGM(f->fs.mask.frag) |
7036 		V_FW_FILTER_WR_IVLAN_VLD(f->fs.val.vlan_vld) |
7037 		V_FW_FILTER_WR_OVLAN_VLD(f->fs.val.vnic_vld) |
7038 		V_FW_FILTER_WR_IVLAN_VLDM(f->fs.mask.vlan_vld) |
7039 		V_FW_FILTER_WR_OVLAN_VLDM(f->fs.mask.vnic_vld));
7040 	fwr->smac_sel = 0;
7041 	fwr->rx_chan_rx_rpl_iq = htobe16(V_FW_FILTER_WR_RX_CHAN(0) |
7042 	    V_FW_FILTER_WR_RX_RPL_IQ(sc->sge.fwq.abs_id));
7043 	fwr->maci_to_matchtypem =
7044 	    htobe32(V_FW_FILTER_WR_MACI(f->fs.val.macidx) |
7045 		V_FW_FILTER_WR_MACIM(f->fs.mask.macidx) |
7046 		V_FW_FILTER_WR_FCOE(f->fs.val.fcoe) |
7047 		V_FW_FILTER_WR_FCOEM(f->fs.mask.fcoe) |
7048 		V_FW_FILTER_WR_PORT(f->fs.val.iport) |
7049 		V_FW_FILTER_WR_PORTM(f->fs.mask.iport) |
7050 		V_FW_FILTER_WR_MATCHTYPE(f->fs.val.matchtype) |
7051 		V_FW_FILTER_WR_MATCHTYPEM(f->fs.mask.matchtype));
7052 	fwr->ptcl = f->fs.val.proto;
7053 	fwr->ptclm = f->fs.mask.proto;
7054 	fwr->ttyp = f->fs.val.tos;
7055 	fwr->ttypm = f->fs.mask.tos;
7056 	fwr->ivlan = htobe16(f->fs.val.vlan);
7057 	fwr->ivlanm = htobe16(f->fs.mask.vlan);
7058 	fwr->ovlan = htobe16(f->fs.val.vnic);
7059 	fwr->ovlanm = htobe16(f->fs.mask.vnic);
7060 	bcopy(f->fs.val.dip, fwr->lip, sizeof (fwr->lip));
7061 	bcopy(f->fs.mask.dip, fwr->lipm, sizeof (fwr->lipm));
7062 	bcopy(f->fs.val.sip, fwr->fip, sizeof (fwr->fip));
7063 	bcopy(f->fs.mask.sip, fwr->fipm, sizeof (fwr->fipm));
7064 	fwr->lp = htobe16(f->fs.val.dport);
7065 	fwr->lpm = htobe16(f->fs.mask.dport);
7066 	fwr->fp = htobe16(f->fs.val.sport);
7067 	fwr->fpm = htobe16(f->fs.mask.sport);
7068 	if (f->fs.newsmac)
7069 		bcopy(f->fs.smac, fwr->sma, sizeof (fwr->sma));
7070 
7071 	f->pending = 1;
7072 	sc->tids.ftids_in_use++;
7073 
7074 	t4_wrq_tx(sc, wr);
7075 	return (0);
7076 }
7077 
7078 static int
7079 del_filter_wr(struct adapter *sc, int fidx)
7080 {
7081 	struct filter_entry *f = &sc->tids.ftid_tab[fidx];
7082 	struct wrqe *wr;
7083 	struct fw_filter_wr *fwr;
7084 	unsigned int ftid;
7085 
7086 	ftid = sc->tids.ftid_base + fidx;
7087 
7088 	wr = alloc_wrqe(sizeof(*fwr), &sc->sge.mgmtq);
7089 	if (wr == NULL)
7090 		return (ENOMEM);
7091 	fwr = wrtod(wr);
7092 	bzero(fwr, sizeof (*fwr));
7093 
7094 	t4_mk_filtdelwr(ftid, fwr, sc->sge.fwq.abs_id);
7095 
7096 	f->pending = 1;
7097 	t4_wrq_tx(sc, wr);
7098 	return (0);
7099 }
7100 
7101 int
7102 t4_filter_rpl(struct sge_iq *iq, const struct rss_header *rss, struct mbuf *m)
7103 {
7104 	struct adapter *sc = iq->adapter;
7105 	const struct cpl_set_tcb_rpl *rpl = (const void *)(rss + 1);
7106 	unsigned int idx = GET_TID(rpl);
7107 
7108 	KASSERT(m == NULL, ("%s: payload with opcode %02x", __func__,
7109 	    rss->opcode));
7110 
7111 	if (idx >= sc->tids.ftid_base &&
7112 	    (idx -= sc->tids.ftid_base) < sc->tids.nftids) {
7113 		unsigned int rc = G_COOKIE(rpl->cookie);
7114 		struct filter_entry *f = &sc->tids.ftid_tab[idx];
7115 
7116 		mtx_lock(&sc->tids.ftid_lock);
7117 		if (rc == FW_FILTER_WR_FLT_ADDED) {
7118 			KASSERT(f->pending, ("%s: filter[%u] isn't pending.",
7119 			    __func__, idx));
7120 			f->smtidx = (be64toh(rpl->oldval) >> 24) & 0xff;
7121 			f->pending = 0;  /* asynchronous setup completed */
7122 			f->valid = 1;
7123 		} else {
7124 			if (rc != FW_FILTER_WR_FLT_DELETED) {
7125 				/* Add or delete failed, display an error */
7126 				log(LOG_ERR,
7127 				    "filter %u setup failed with error %u\n",
7128 				    idx, rc);
7129 			}
7130 
7131 			clear_filter(f);
7132 			sc->tids.ftids_in_use--;
7133 		}
7134 		wakeup(&sc->tids.ftid_tab);
7135 		mtx_unlock(&sc->tids.ftid_lock);
7136 	}
7137 
7138 	return (0);
7139 }
7140 
7141 static int
7142 get_sge_context(struct adapter *sc, struct t4_sge_context *cntxt)
7143 {
7144 	int rc;
7145 
7146 	if (cntxt->cid > M_CTXTQID)
7147 		return (EINVAL);
7148 
7149 	if (cntxt->mem_id != CTXT_EGRESS && cntxt->mem_id != CTXT_INGRESS &&
7150 	    cntxt->mem_id != CTXT_FLM && cntxt->mem_id != CTXT_CNM)
7151 		return (EINVAL);
7152 
7153 	rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4ctxt");
7154 	if (rc)
7155 		return (rc);
7156 
7157 	if (sc->flags & FW_OK) {
7158 		rc = -t4_sge_ctxt_rd(sc, sc->mbox, cntxt->cid, cntxt->mem_id,
7159 		    &cntxt->data[0]);
7160 		if (rc == 0)
7161 			goto done;
7162 	}
7163 
7164 	/*
7165 	 * Read via firmware failed or wasn't even attempted.  Read directly via
7166 	 * the backdoor.
7167 	 */
7168 	rc = -t4_sge_ctxt_rd_bd(sc, cntxt->cid, cntxt->mem_id, &cntxt->data[0]);
7169 done:
7170 	end_synchronized_op(sc, 0);
7171 	return (rc);
7172 }
7173 
7174 static int
7175 load_fw(struct adapter *sc, struct t4_data *fw)
7176 {
7177 	int rc;
7178 	uint8_t *fw_data;
7179 
7180 	rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4ldfw");
7181 	if (rc)
7182 		return (rc);
7183 
7184 	if (sc->flags & FULL_INIT_DONE) {
7185 		rc = EBUSY;
7186 		goto done;
7187 	}
7188 
7189 	fw_data = malloc(fw->len, M_CXGBE, M_WAITOK);
7190 	if (fw_data == NULL) {
7191 		rc = ENOMEM;
7192 		goto done;
7193 	}
7194 
7195 	rc = copyin(fw->data, fw_data, fw->len);
7196 	if (rc == 0)
7197 		rc = -t4_load_fw(sc, fw_data, fw->len);
7198 
7199 	free(fw_data, M_CXGBE);
7200 done:
7201 	end_synchronized_op(sc, 0);
7202 	return (rc);
7203 }
7204 
7205 static int
7206 read_card_mem(struct adapter *sc, int win, struct t4_mem_range *mr)
7207 {
7208 	uint32_t addr, off, remaining, i, n;
7209 	uint32_t *buf, *b;
7210 	uint32_t mw_base, mw_aperture;
7211 	int rc;
7212 	uint8_t *dst;
7213 
7214 	rc = validate_mem_range(sc, mr->addr, mr->len);
7215 	if (rc != 0)
7216 		return (rc);
7217 
7218 	memwin_info(sc, win, &mw_base, &mw_aperture);
7219 	buf = b = malloc(min(mr->len, mw_aperture), M_CXGBE, M_WAITOK);
7220 	addr = mr->addr;
7221 	remaining = mr->len;
7222 	dst = (void *)mr->data;
7223 
7224 	while (remaining) {
7225 		off = position_memwin(sc, win, addr);
7226 
7227 		/* number of bytes that we'll copy in the inner loop */
7228 		n = min(remaining, mw_aperture - off);
7229 		for (i = 0; i < n; i += 4)
7230 			*b++ = t4_read_reg(sc, mw_base + off + i);
7231 
7232 		rc = copyout(buf, dst, n);
7233 		if (rc != 0)
7234 			break;
7235 
7236 		b = buf;
7237 		dst += n;
7238 		remaining -= n;
7239 		addr += n;
7240 	}
7241 
7242 	free(buf, M_CXGBE);
7243 	return (rc);
7244 }
7245 
7246 static int
7247 read_i2c(struct adapter *sc, struct t4_i2c_data *i2cd)
7248 {
7249 	int rc;
7250 
7251 	if (i2cd->len == 0 || i2cd->port_id >= sc->params.nports)
7252 		return (EINVAL);
7253 
7254 	if (i2cd->len > 1) {
7255 		/* XXX: need fw support for longer reads in one go */
7256 		return (ENOTSUP);
7257 	}
7258 
7259 	rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4i2crd");
7260 	if (rc)
7261 		return (rc);
7262 	rc = -t4_i2c_rd(sc, sc->mbox, i2cd->port_id, i2cd->dev_addr,
7263 	    i2cd->offset, &i2cd->data[0]);
7264 	end_synchronized_op(sc, 0);
7265 
7266 	return (rc);
7267 }
7268 
7269 int
7270 t4_os_find_pci_capability(struct adapter *sc, int cap)
7271 {
7272 	int i;
7273 
7274 	return (pci_find_cap(sc->dev, cap, &i) == 0 ? i : 0);
7275 }
7276 
7277 int
7278 t4_os_pci_save_state(struct adapter *sc)
7279 {
7280 	device_t dev;
7281 	struct pci_devinfo *dinfo;
7282 
7283 	dev = sc->dev;
7284 	dinfo = device_get_ivars(dev);
7285 
7286 	pci_cfg_save(dev, dinfo, 0);
7287 	return (0);
7288 }
7289 
7290 int
7291 t4_os_pci_restore_state(struct adapter *sc)
7292 {
7293 	device_t dev;
7294 	struct pci_devinfo *dinfo;
7295 
7296 	dev = sc->dev;
7297 	dinfo = device_get_ivars(dev);
7298 
7299 	pci_cfg_restore(dev, dinfo);
7300 	return (0);
7301 }
7302 
7303 void
7304 t4_os_portmod_changed(const struct adapter *sc, int idx)
7305 {
7306 	struct port_info *pi = sc->port[idx];
7307 	static const char *mod_str[] = {
7308 		NULL, "LR", "SR", "ER", "TWINAX", "active TWINAX", "LRM"
7309 	};
7310 
7311 	if (pi->mod_type == FW_PORT_MOD_TYPE_NONE)
7312 		if_printf(pi->ifp, "transceiver unplugged.\n");
7313 	else if (pi->mod_type == FW_PORT_MOD_TYPE_UNKNOWN)
7314 		if_printf(pi->ifp, "unknown transceiver inserted.\n");
7315 	else if (pi->mod_type == FW_PORT_MOD_TYPE_NOTSUPPORTED)
7316 		if_printf(pi->ifp, "unsupported transceiver inserted.\n");
7317 	else if (pi->mod_type > 0 && pi->mod_type < nitems(mod_str)) {
7318 		if_printf(pi->ifp, "%s transceiver inserted.\n",
7319 		    mod_str[pi->mod_type]);
7320 	} else {
7321 		if_printf(pi->ifp, "transceiver (type %d) inserted.\n",
7322 		    pi->mod_type);
7323 	}
7324 }
7325 
7326 void
7327 t4_os_link_changed(struct adapter *sc, int idx, int link_stat, int reason)
7328 {
7329 	struct port_info *pi = sc->port[idx];
7330 	struct ifnet *ifp = pi->ifp;
7331 
7332 	if (link_stat) {
7333 		pi->linkdnrc = -1;
7334 		ifp->if_baudrate = IF_Mbps(pi->link_cfg.speed);
7335 		if_link_state_change(ifp, LINK_STATE_UP);
7336 	} else {
7337 		if (reason >= 0)
7338 			pi->linkdnrc = reason;
7339 		if_link_state_change(ifp, LINK_STATE_DOWN);
7340 	}
7341 }
7342 
7343 void
7344 t4_iterate(void (*func)(struct adapter *, void *), void *arg)
7345 {
7346 	struct adapter *sc;
7347 
7348 	sx_slock(&t4_list_lock);
7349 	SLIST_FOREACH(sc, &t4_list, link) {
7350 		/*
7351 		 * func should not make any assumptions about what state sc is
7352 		 * in - the only guarantee is that sc->sc_lock is a valid lock.
7353 		 */
7354 		func(sc, arg);
7355 	}
7356 	sx_sunlock(&t4_list_lock);
7357 }
7358 
7359 static int
7360 t4_open(struct cdev *dev, int flags, int type, struct thread *td)
7361 {
7362        return (0);
7363 }
7364 
7365 static int
7366 t4_close(struct cdev *dev, int flags, int type, struct thread *td)
7367 {
7368        return (0);
7369 }
7370 
7371 static int
7372 t4_ioctl(struct cdev *dev, unsigned long cmd, caddr_t data, int fflag,
7373     struct thread *td)
7374 {
7375 	int rc;
7376 	struct adapter *sc = dev->si_drv1;
7377 
7378 	rc = priv_check(td, PRIV_DRIVER);
7379 	if (rc != 0)
7380 		return (rc);
7381 
7382 	switch (cmd) {
7383 	case CHELSIO_T4_GETREG: {
7384 		struct t4_reg *edata = (struct t4_reg *)data;
7385 
7386 		if ((edata->addr & 0x3) != 0 || edata->addr >= sc->mmio_len)
7387 			return (EFAULT);
7388 
7389 		if (edata->size == 4)
7390 			edata->val = t4_read_reg(sc, edata->addr);
7391 		else if (edata->size == 8)
7392 			edata->val = t4_read_reg64(sc, edata->addr);
7393 		else
7394 			return (EINVAL);
7395 
7396 		break;
7397 	}
7398 	case CHELSIO_T4_SETREG: {
7399 		struct t4_reg *edata = (struct t4_reg *)data;
7400 
7401 		if ((edata->addr & 0x3) != 0 || edata->addr >= sc->mmio_len)
7402 			return (EFAULT);
7403 
7404 		if (edata->size == 4) {
7405 			if (edata->val & 0xffffffff00000000)
7406 				return (EINVAL);
7407 			t4_write_reg(sc, edata->addr, (uint32_t) edata->val);
7408 		} else if (edata->size == 8)
7409 			t4_write_reg64(sc, edata->addr, edata->val);
7410 		else
7411 			return (EINVAL);
7412 		break;
7413 	}
7414 	case CHELSIO_T4_REGDUMP: {
7415 		struct t4_regdump *regs = (struct t4_regdump *)data;
7416 		int reglen = is_t4(sc) ? T4_REGDUMP_SIZE : T5_REGDUMP_SIZE;
7417 		uint8_t *buf;
7418 
7419 		if (regs->len < reglen) {
7420 			regs->len = reglen; /* hint to the caller */
7421 			return (ENOBUFS);
7422 		}
7423 
7424 		regs->len = reglen;
7425 		buf = malloc(reglen, M_CXGBE, M_WAITOK | M_ZERO);
7426 		t4_get_regs(sc, regs, buf);
7427 		rc = copyout(buf, regs->data, reglen);
7428 		free(buf, M_CXGBE);
7429 		break;
7430 	}
7431 	case CHELSIO_T4_GET_FILTER_MODE:
7432 		rc = get_filter_mode(sc, (uint32_t *)data);
7433 		break;
7434 	case CHELSIO_T4_SET_FILTER_MODE:
7435 		rc = set_filter_mode(sc, *(uint32_t *)data);
7436 		break;
7437 	case CHELSIO_T4_GET_FILTER:
7438 		rc = get_filter(sc, (struct t4_filter *)data);
7439 		break;
7440 	case CHELSIO_T4_SET_FILTER:
7441 		rc = set_filter(sc, (struct t4_filter *)data);
7442 		break;
7443 	case CHELSIO_T4_DEL_FILTER:
7444 		rc = del_filter(sc, (struct t4_filter *)data);
7445 		break;
7446 	case CHELSIO_T4_GET_SGE_CONTEXT:
7447 		rc = get_sge_context(sc, (struct t4_sge_context *)data);
7448 		break;
7449 	case CHELSIO_T4_LOAD_FW:
7450 		rc = load_fw(sc, (struct t4_data *)data);
7451 		break;
7452 	case CHELSIO_T4_GET_MEM:
7453 		rc = read_card_mem(sc, 2, (struct t4_mem_range *)data);
7454 		break;
7455 	case CHELSIO_T4_GET_I2C:
7456 		rc = read_i2c(sc, (struct t4_i2c_data *)data);
7457 		break;
7458 	case CHELSIO_T4_CLEAR_STATS: {
7459 		int i;
7460 		u_int port_id = *(uint32_t *)data;
7461 		struct port_info *pi;
7462 
7463 		if (port_id >= sc->params.nports)
7464 			return (EINVAL);
7465 
7466 		/* MAC stats */
7467 		t4_clr_port_stats(sc, port_id);
7468 
7469 		pi = sc->port[port_id];
7470 		if (pi->flags & PORT_INIT_DONE) {
7471 			struct sge_rxq *rxq;
7472 			struct sge_txq *txq;
7473 			struct sge_wrq *wrq;
7474 
7475 			for_each_rxq(pi, i, rxq) {
7476 #if defined(INET) || defined(INET6)
7477 				rxq->lro.lro_queued = 0;
7478 				rxq->lro.lro_flushed = 0;
7479 #endif
7480 				rxq->rxcsum = 0;
7481 				rxq->vlan_extraction = 0;
7482 			}
7483 
7484 			for_each_txq(pi, i, txq) {
7485 				txq->txcsum = 0;
7486 				txq->tso_wrs = 0;
7487 				txq->vlan_insertion = 0;
7488 				txq->imm_wrs = 0;
7489 				txq->sgl_wrs = 0;
7490 				txq->txpkt_wrs = 0;
7491 				txq->txpkts_wrs = 0;
7492 				txq->txpkts_pkts = 0;
7493 				txq->br->br_drops = 0;
7494 				txq->no_dmamap = 0;
7495 				txq->no_desc = 0;
7496 			}
7497 
7498 #ifdef TCP_OFFLOAD
7499 			/* nothing to clear for each ofld_rxq */
7500 
7501 			for_each_ofld_txq(pi, i, wrq) {
7502 				wrq->tx_wrs = 0;
7503 				wrq->no_desc = 0;
7504 			}
7505 #endif
7506 			wrq = &sc->sge.ctrlq[pi->port_id];
7507 			wrq->tx_wrs = 0;
7508 			wrq->no_desc = 0;
7509 		}
7510 		break;
7511 	}
7512 	case CHELSIO_T4_GET_TRACER:
7513 		rc = t4_get_tracer(sc, (struct t4_tracer *)data);
7514 		break;
7515 	case CHELSIO_T4_SET_TRACER:
7516 		rc = t4_set_tracer(sc, (struct t4_tracer *)data);
7517 		break;
7518 	default:
7519 		rc = EINVAL;
7520 	}
7521 
7522 	return (rc);
7523 }
7524 
7525 #ifdef TCP_OFFLOAD
7526 static int
7527 toe_capability(struct port_info *pi, int enable)
7528 {
7529 	int rc;
7530 	struct adapter *sc = pi->adapter;
7531 
7532 	ASSERT_SYNCHRONIZED_OP(sc);
7533 
7534 	if (!is_offload(sc))
7535 		return (ENODEV);
7536 
7537 	if (enable) {
7538 		if (!(sc->flags & FULL_INIT_DONE)) {
7539 			rc = cxgbe_init_synchronized(pi);
7540 			if (rc)
7541 				return (rc);
7542 		}
7543 
7544 		if (isset(&sc->offload_map, pi->port_id))
7545 			return (0);
7546 
7547 		if (!(sc->flags & TOM_INIT_DONE)) {
7548 			rc = t4_activate_uld(sc, ULD_TOM);
7549 			if (rc == EAGAIN) {
7550 				log(LOG_WARNING,
7551 				    "You must kldload t4_tom.ko before trying "
7552 				    "to enable TOE on a cxgbe interface.\n");
7553 			}
7554 			if (rc != 0)
7555 				return (rc);
7556 			KASSERT(sc->tom_softc != NULL,
7557 			    ("%s: TOM activated but softc NULL", __func__));
7558 			KASSERT(sc->flags & TOM_INIT_DONE,
7559 			    ("%s: TOM activated but flag not set", __func__));
7560 		}
7561 
7562 		setbit(&sc->offload_map, pi->port_id);
7563 	} else {
7564 		if (!isset(&sc->offload_map, pi->port_id))
7565 			return (0);
7566 
7567 		KASSERT(sc->flags & TOM_INIT_DONE,
7568 		    ("%s: TOM never initialized?", __func__));
7569 		clrbit(&sc->offload_map, pi->port_id);
7570 	}
7571 
7572 	return (0);
7573 }
7574 
7575 /*
7576  * Add an upper layer driver to the global list.
7577  */
7578 int
7579 t4_register_uld(struct uld_info *ui)
7580 {
7581 	int rc = 0;
7582 	struct uld_info *u;
7583 
7584 	sx_xlock(&t4_uld_list_lock);
7585 	SLIST_FOREACH(u, &t4_uld_list, link) {
7586 	    if (u->uld_id == ui->uld_id) {
7587 		    rc = EEXIST;
7588 		    goto done;
7589 	    }
7590 	}
7591 
7592 	SLIST_INSERT_HEAD(&t4_uld_list, ui, link);
7593 	ui->refcount = 0;
7594 done:
7595 	sx_xunlock(&t4_uld_list_lock);
7596 	return (rc);
7597 }
7598 
7599 int
7600 t4_unregister_uld(struct uld_info *ui)
7601 {
7602 	int rc = EINVAL;
7603 	struct uld_info *u;
7604 
7605 	sx_xlock(&t4_uld_list_lock);
7606 
7607 	SLIST_FOREACH(u, &t4_uld_list, link) {
7608 	    if (u == ui) {
7609 		    if (ui->refcount > 0) {
7610 			    rc = EBUSY;
7611 			    goto done;
7612 		    }
7613 
7614 		    SLIST_REMOVE(&t4_uld_list, ui, uld_info, link);
7615 		    rc = 0;
7616 		    goto done;
7617 	    }
7618 	}
7619 done:
7620 	sx_xunlock(&t4_uld_list_lock);
7621 	return (rc);
7622 }
7623 
7624 int
7625 t4_activate_uld(struct adapter *sc, int id)
7626 {
7627 	int rc = EAGAIN;
7628 	struct uld_info *ui;
7629 
7630 	ASSERT_SYNCHRONIZED_OP(sc);
7631 
7632 	sx_slock(&t4_uld_list_lock);
7633 
7634 	SLIST_FOREACH(ui, &t4_uld_list, link) {
7635 		if (ui->uld_id == id) {
7636 			rc = ui->activate(sc);
7637 			if (rc == 0)
7638 				ui->refcount++;
7639 			goto done;
7640 		}
7641 	}
7642 done:
7643 	sx_sunlock(&t4_uld_list_lock);
7644 
7645 	return (rc);
7646 }
7647 
7648 int
7649 t4_deactivate_uld(struct adapter *sc, int id)
7650 {
7651 	int rc = EINVAL;
7652 	struct uld_info *ui;
7653 
7654 	ASSERT_SYNCHRONIZED_OP(sc);
7655 
7656 	sx_slock(&t4_uld_list_lock);
7657 
7658 	SLIST_FOREACH(ui, &t4_uld_list, link) {
7659 		if (ui->uld_id == id) {
7660 			rc = ui->deactivate(sc);
7661 			if (rc == 0)
7662 				ui->refcount--;
7663 			goto done;
7664 		}
7665 	}
7666 done:
7667 	sx_sunlock(&t4_uld_list_lock);
7668 
7669 	return (rc);
7670 }
7671 #endif
7672 
7673 /*
7674  * Come up with reasonable defaults for some of the tunables, provided they're
7675  * not set by the user (in which case we'll use the values as is).
7676  */
7677 static void
7678 tweak_tunables(void)
7679 {
7680 	int nc = mp_ncpus;	/* our snapshot of the number of CPUs */
7681 
7682 	if (t4_ntxq10g < 1)
7683 		t4_ntxq10g = min(nc, NTXQ_10G);
7684 
7685 	if (t4_ntxq1g < 1)
7686 		t4_ntxq1g = min(nc, NTXQ_1G);
7687 
7688 	if (t4_nrxq10g < 1)
7689 		t4_nrxq10g = min(nc, NRXQ_10G);
7690 
7691 	if (t4_nrxq1g < 1)
7692 		t4_nrxq1g = min(nc, NRXQ_1G);
7693 
7694 #ifdef TCP_OFFLOAD
7695 	if (t4_nofldtxq10g < 1)
7696 		t4_nofldtxq10g = min(nc, NOFLDTXQ_10G);
7697 
7698 	if (t4_nofldtxq1g < 1)
7699 		t4_nofldtxq1g = min(nc, NOFLDTXQ_1G);
7700 
7701 	if (t4_nofldrxq10g < 1)
7702 		t4_nofldrxq10g = min(nc, NOFLDRXQ_10G);
7703 
7704 	if (t4_nofldrxq1g < 1)
7705 		t4_nofldrxq1g = min(nc, NOFLDRXQ_1G);
7706 
7707 	if (t4_toecaps_allowed == -1)
7708 		t4_toecaps_allowed = FW_CAPS_CONFIG_TOE;
7709 #else
7710 	if (t4_toecaps_allowed == -1)
7711 		t4_toecaps_allowed = 0;
7712 #endif
7713 
7714 	if (t4_tmr_idx_10g < 0 || t4_tmr_idx_10g >= SGE_NTIMERS)
7715 		t4_tmr_idx_10g = TMR_IDX_10G;
7716 
7717 	if (t4_pktc_idx_10g < -1 || t4_pktc_idx_10g >= SGE_NCOUNTERS)
7718 		t4_pktc_idx_10g = PKTC_IDX_10G;
7719 
7720 	if (t4_tmr_idx_1g < 0 || t4_tmr_idx_1g >= SGE_NTIMERS)
7721 		t4_tmr_idx_1g = TMR_IDX_1G;
7722 
7723 	if (t4_pktc_idx_1g < -1 || t4_pktc_idx_1g >= SGE_NCOUNTERS)
7724 		t4_pktc_idx_1g = PKTC_IDX_1G;
7725 
7726 	if (t4_qsize_txq < 128)
7727 		t4_qsize_txq = 128;
7728 
7729 	if (t4_qsize_rxq < 128)
7730 		t4_qsize_rxq = 128;
7731 	while (t4_qsize_rxq & 7)
7732 		t4_qsize_rxq++;
7733 
7734 	t4_intr_types &= INTR_MSIX | INTR_MSI | INTR_INTX;
7735 }
7736 
7737 static int
7738 mod_event(module_t mod, int cmd, void *arg)
7739 {
7740 	int rc = 0;
7741 	static int loaded = 0;
7742 
7743 	switch (cmd) {
7744 	case MOD_LOAD:
7745 		if (atomic_fetchadd_int(&loaded, 1))
7746 			break;
7747 		t4_sge_modload();
7748 		sx_init(&t4_list_lock, "T4/T5 adapters");
7749 		SLIST_INIT(&t4_list);
7750 #ifdef TCP_OFFLOAD
7751 		sx_init(&t4_uld_list_lock, "T4/T5 ULDs");
7752 		SLIST_INIT(&t4_uld_list);
7753 #endif
7754 		t4_tracer_modload();
7755 		tweak_tunables();
7756 		break;
7757 
7758 	case MOD_UNLOAD:
7759 		if (atomic_fetchadd_int(&loaded, -1) > 1)
7760 			break;
7761 		t4_tracer_modunload();
7762 #ifdef TCP_OFFLOAD
7763 		sx_slock(&t4_uld_list_lock);
7764 		if (!SLIST_EMPTY(&t4_uld_list)) {
7765 			rc = EBUSY;
7766 			sx_sunlock(&t4_uld_list_lock);
7767 			break;
7768 		}
7769 		sx_sunlock(&t4_uld_list_lock);
7770 		sx_destroy(&t4_uld_list_lock);
7771 #endif
7772 		sx_slock(&t4_list_lock);
7773 		if (!SLIST_EMPTY(&t4_list)) {
7774 			rc = EBUSY;
7775 			sx_sunlock(&t4_list_lock);
7776 			break;
7777 		}
7778 		sx_sunlock(&t4_list_lock);
7779 		sx_destroy(&t4_list_lock);
7780 		break;
7781 	}
7782 
7783 	return (rc);
7784 }
7785 
7786 static devclass_t t4_devclass, t5_devclass;
7787 static devclass_t cxgbe_devclass, cxl_devclass;
7788 
7789 DRIVER_MODULE(t4nex, pci, t4_driver, t4_devclass, mod_event, 0);
7790 MODULE_VERSION(t4nex, 1);
7791 MODULE_DEPEND(t4nex, firmware, 1, 1, 1);
7792 
7793 DRIVER_MODULE(t5nex, pci, t5_driver, t5_devclass, mod_event, 0);
7794 MODULE_VERSION(t5nex, 1);
7795 MODULE_DEPEND(t5nex, firmware, 1, 1, 1);
7796 
7797 DRIVER_MODULE(cxgbe, t4nex, cxgbe_driver, cxgbe_devclass, 0, 0);
7798 MODULE_VERSION(cxgbe, 1);
7799 
7800 DRIVER_MODULE(cxl, t5nex, cxl_driver, cxl_devclass, 0, 0);
7801 MODULE_VERSION(cxl, 1);
7802