xref: /freebsd/sys/dev/cxgbe/t4_main.c (revision a18eacbefdfa1085ca3db829e86ece78cd416493)
1 /*-
2  * Copyright (c) 2011 Chelsio Communications, Inc.
3  * All rights reserved.
4  * Written by: Navdeep Parhar <np@FreeBSD.org>
5  *
6  * Redistribution and use in source and binary forms, with or without
7  * modification, are permitted provided that the following conditions
8  * are met:
9  * 1. Redistributions of source code must retain the above copyright
10  *    notice, this list of conditions and the following disclaimer.
11  * 2. Redistributions in binary form must reproduce the above copyright
12  *    notice, this list of conditions and the following disclaimer in the
13  *    documentation and/or other materials provided with the distribution.
14  *
15  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
16  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
25  * SUCH DAMAGE.
26  */
27 
28 #include <sys/cdefs.h>
29 __FBSDID("$FreeBSD$");
30 
31 #include "opt_inet.h"
32 #include "opt_inet6.h"
33 
34 #include <sys/param.h>
35 #include <sys/conf.h>
36 #include <sys/priv.h>
37 #include <sys/kernel.h>
38 #include <sys/bus.h>
39 #include <sys/module.h>
40 #include <sys/malloc.h>
41 #include <sys/queue.h>
42 #include <sys/taskqueue.h>
43 #include <sys/pciio.h>
44 #include <dev/pci/pcireg.h>
45 #include <dev/pci/pcivar.h>
46 #include <dev/pci/pci_private.h>
47 #include <sys/firmware.h>
48 #include <sys/sbuf.h>
49 #include <sys/smp.h>
50 #include <sys/socket.h>
51 #include <sys/sockio.h>
52 #include <sys/sysctl.h>
53 #include <net/ethernet.h>
54 #include <net/if.h>
55 #include <net/if_types.h>
56 #include <net/if_dl.h>
57 #include <net/if_vlan_var.h>
58 #if defined(__i386__) || defined(__amd64__)
59 #include <vm/vm.h>
60 #include <vm/pmap.h>
61 #endif
62 
63 #include "common/common.h"
64 #include "common/t4_msg.h"
65 #include "common/t4_regs.h"
66 #include "common/t4_regs_values.h"
67 #include "t4_ioctl.h"
68 #include "t4_l2t.h"
69 
70 /* T4 bus driver interface */
71 static int t4_probe(device_t);
72 static int t4_attach(device_t);
73 static int t4_detach(device_t);
74 static device_method_t t4_methods[] = {
75 	DEVMETHOD(device_probe,		t4_probe),
76 	DEVMETHOD(device_attach,	t4_attach),
77 	DEVMETHOD(device_detach,	t4_detach),
78 
79 	DEVMETHOD_END
80 };
81 static driver_t t4_driver = {
82 	"t4nex",
83 	t4_methods,
84 	sizeof(struct adapter)
85 };
86 
87 
88 /* T4 port (cxgbe) interface */
89 static int cxgbe_probe(device_t);
90 static int cxgbe_attach(device_t);
91 static int cxgbe_detach(device_t);
92 static device_method_t cxgbe_methods[] = {
93 	DEVMETHOD(device_probe,		cxgbe_probe),
94 	DEVMETHOD(device_attach,	cxgbe_attach),
95 	DEVMETHOD(device_detach,	cxgbe_detach),
96 	{ 0, 0 }
97 };
98 static driver_t cxgbe_driver = {
99 	"cxgbe",
100 	cxgbe_methods,
101 	sizeof(struct port_info)
102 };
103 
104 static d_ioctl_t t4_ioctl;
105 static d_open_t t4_open;
106 static d_close_t t4_close;
107 
108 static struct cdevsw t4_cdevsw = {
109        .d_version = D_VERSION,
110        .d_flags = 0,
111        .d_open = t4_open,
112        .d_close = t4_close,
113        .d_ioctl = t4_ioctl,
114        .d_name = "t4nex",
115 };
116 
117 /* T5 bus driver interface */
118 static int t5_probe(device_t);
119 static device_method_t t5_methods[] = {
120 	DEVMETHOD(device_probe,		t5_probe),
121 	DEVMETHOD(device_attach,	t4_attach),
122 	DEVMETHOD(device_detach,	t4_detach),
123 
124 	DEVMETHOD_END
125 };
126 static driver_t t5_driver = {
127 	"t5nex",
128 	t5_methods,
129 	sizeof(struct adapter)
130 };
131 
132 
133 /* T5 port (cxl) interface */
134 static driver_t cxl_driver = {
135 	"cxl",
136 	cxgbe_methods,
137 	sizeof(struct port_info)
138 };
139 
140 static struct cdevsw t5_cdevsw = {
141        .d_version = D_VERSION,
142        .d_flags = 0,
143        .d_open = t4_open,
144        .d_close = t4_close,
145        .d_ioctl = t4_ioctl,
146        .d_name = "t5nex",
147 };
148 
149 /* ifnet + media interface */
150 static void cxgbe_init(void *);
151 static int cxgbe_ioctl(struct ifnet *, unsigned long, caddr_t);
152 static int cxgbe_transmit(struct ifnet *, struct mbuf *);
153 static void cxgbe_qflush(struct ifnet *);
154 static int cxgbe_media_change(struct ifnet *);
155 static void cxgbe_media_status(struct ifnet *, struct ifmediareq *);
156 
157 MALLOC_DEFINE(M_CXGBE, "cxgbe", "Chelsio T4/T5 Ethernet driver and services");
158 
159 /*
160  * Correct lock order when you need to acquire multiple locks is t4_list_lock,
161  * then ADAPTER_LOCK, then t4_uld_list_lock.
162  */
163 static struct sx t4_list_lock;
164 static SLIST_HEAD(, adapter) t4_list;
165 #ifdef TCP_OFFLOAD
166 static struct sx t4_uld_list_lock;
167 static SLIST_HEAD(, uld_info) t4_uld_list;
168 #endif
169 
170 /*
171  * Tunables.  See tweak_tunables() too.
172  *
173  * Each tunable is set to a default value here if it's known at compile-time.
174  * Otherwise it is set to -1 as an indication to tweak_tunables() that it should
175  * provide a reasonable default when the driver is loaded.
176  *
177  * Tunables applicable to both T4 and T5 are under hw.cxgbe.  Those specific to
178  * T5 are under hw.cxl.
179  */
180 
181 /*
182  * Number of queues for tx and rx, 10G and 1G, NIC and offload.
183  */
184 #define NTXQ_10G 16
185 static int t4_ntxq10g = -1;
186 TUNABLE_INT("hw.cxgbe.ntxq10g", &t4_ntxq10g);
187 
188 #define NRXQ_10G 8
189 static int t4_nrxq10g = -1;
190 TUNABLE_INT("hw.cxgbe.nrxq10g", &t4_nrxq10g);
191 
192 #define NTXQ_1G 4
193 static int t4_ntxq1g = -1;
194 TUNABLE_INT("hw.cxgbe.ntxq1g", &t4_ntxq1g);
195 
196 #define NRXQ_1G 2
197 static int t4_nrxq1g = -1;
198 TUNABLE_INT("hw.cxgbe.nrxq1g", &t4_nrxq1g);
199 
200 #ifdef TCP_OFFLOAD
201 #define NOFLDTXQ_10G 8
202 static int t4_nofldtxq10g = -1;
203 TUNABLE_INT("hw.cxgbe.nofldtxq10g", &t4_nofldtxq10g);
204 
205 #define NOFLDRXQ_10G 2
206 static int t4_nofldrxq10g = -1;
207 TUNABLE_INT("hw.cxgbe.nofldrxq10g", &t4_nofldrxq10g);
208 
209 #define NOFLDTXQ_1G 2
210 static int t4_nofldtxq1g = -1;
211 TUNABLE_INT("hw.cxgbe.nofldtxq1g", &t4_nofldtxq1g);
212 
213 #define NOFLDRXQ_1G 1
214 static int t4_nofldrxq1g = -1;
215 TUNABLE_INT("hw.cxgbe.nofldrxq1g", &t4_nofldrxq1g);
216 #endif
217 
218 /*
219  * Holdoff parameters for 10G and 1G ports.
220  */
221 #define TMR_IDX_10G 1
222 static int t4_tmr_idx_10g = TMR_IDX_10G;
223 TUNABLE_INT("hw.cxgbe.holdoff_timer_idx_10G", &t4_tmr_idx_10g);
224 
225 #define PKTC_IDX_10G (-1)
226 static int t4_pktc_idx_10g = PKTC_IDX_10G;
227 TUNABLE_INT("hw.cxgbe.holdoff_pktc_idx_10G", &t4_pktc_idx_10g);
228 
229 #define TMR_IDX_1G 1
230 static int t4_tmr_idx_1g = TMR_IDX_1G;
231 TUNABLE_INT("hw.cxgbe.holdoff_timer_idx_1G", &t4_tmr_idx_1g);
232 
233 #define PKTC_IDX_1G (-1)
234 static int t4_pktc_idx_1g = PKTC_IDX_1G;
235 TUNABLE_INT("hw.cxgbe.holdoff_pktc_idx_1G", &t4_pktc_idx_1g);
236 
237 /*
238  * Size (# of entries) of each tx and rx queue.
239  */
240 static unsigned int t4_qsize_txq = TX_EQ_QSIZE;
241 TUNABLE_INT("hw.cxgbe.qsize_txq", &t4_qsize_txq);
242 
243 static unsigned int t4_qsize_rxq = RX_IQ_QSIZE;
244 TUNABLE_INT("hw.cxgbe.qsize_rxq", &t4_qsize_rxq);
245 
246 /*
247  * Interrupt types allowed (bits 0, 1, 2 = INTx, MSI, MSI-X respectively).
248  */
249 static int t4_intr_types = INTR_MSIX | INTR_MSI | INTR_INTX;
250 TUNABLE_INT("hw.cxgbe.interrupt_types", &t4_intr_types);
251 
252 /*
253  * Configuration file.
254  */
255 #define DEFAULT_CF	"default"
256 #define FLASH_CF	"flash"
257 #define UWIRE_CF	"uwire"
258 #define FPGA_CF		"fpga"
259 static char t4_cfg_file[32] = DEFAULT_CF;
260 TUNABLE_STR("hw.cxgbe.config_file", t4_cfg_file, sizeof(t4_cfg_file));
261 
262 /*
263  * Firmware auto-install by driver during attach (0, 1, 2 = prohibited, allowed,
264  * encouraged respectively).
265  */
266 static unsigned int t4_fw_install = 1;
267 TUNABLE_INT("hw.cxgbe.fw_install", &t4_fw_install);
268 
269 /*
270  * ASIC features that will be used.  Disable the ones you don't want so that the
271  * chip resources aren't wasted on features that will not be used.
272  */
273 static int t4_linkcaps_allowed = 0;	/* No DCBX, PPP, etc. by default */
274 TUNABLE_INT("hw.cxgbe.linkcaps_allowed", &t4_linkcaps_allowed);
275 
276 static int t4_niccaps_allowed = FW_CAPS_CONFIG_NIC;
277 TUNABLE_INT("hw.cxgbe.niccaps_allowed", &t4_niccaps_allowed);
278 
279 static int t4_toecaps_allowed = -1;
280 TUNABLE_INT("hw.cxgbe.toecaps_allowed", &t4_toecaps_allowed);
281 
282 static int t4_rdmacaps_allowed = 0;
283 TUNABLE_INT("hw.cxgbe.rdmacaps_allowed", &t4_rdmacaps_allowed);
284 
285 static int t4_iscsicaps_allowed = 0;
286 TUNABLE_INT("hw.cxgbe.iscsicaps_allowed", &t4_iscsicaps_allowed);
287 
288 static int t4_fcoecaps_allowed = 0;
289 TUNABLE_INT("hw.cxgbe.fcoecaps_allowed", &t4_fcoecaps_allowed);
290 
291 static int t5_write_combine = 0;
292 TUNABLE_INT("hw.cxl.write_combine", &t5_write_combine);
293 
294 struct intrs_and_queues {
295 	int intr_type;		/* INTx, MSI, or MSI-X */
296 	int nirq;		/* Number of vectors */
297 	int intr_flags;
298 	int ntxq10g;		/* # of NIC txq's for each 10G port */
299 	int nrxq10g;		/* # of NIC rxq's for each 10G port */
300 	int ntxq1g;		/* # of NIC txq's for each 1G port */
301 	int nrxq1g;		/* # of NIC rxq's for each 1G port */
302 #ifdef TCP_OFFLOAD
303 	int nofldtxq10g;	/* # of TOE txq's for each 10G port */
304 	int nofldrxq10g;	/* # of TOE rxq's for each 10G port */
305 	int nofldtxq1g;		/* # of TOE txq's for each 1G port */
306 	int nofldrxq1g;		/* # of TOE rxq's for each 1G port */
307 #endif
308 };
309 
310 struct filter_entry {
311         uint32_t valid:1;	/* filter allocated and valid */
312         uint32_t locked:1;	/* filter is administratively locked */
313         uint32_t pending:1;	/* filter action is pending firmware reply */
314 	uint32_t smtidx:8;	/* Source MAC Table index for smac */
315 	struct l2t_entry *l2t;	/* Layer Two Table entry for dmac */
316 
317         struct t4_filter_specification fs;
318 };
319 
320 enum {
321 	XGMAC_MTU	= (1 << 0),
322 	XGMAC_PROMISC	= (1 << 1),
323 	XGMAC_ALLMULTI	= (1 << 2),
324 	XGMAC_VLANEX	= (1 << 3),
325 	XGMAC_UCADDR	= (1 << 4),
326 	XGMAC_MCADDRS	= (1 << 5),
327 
328 	XGMAC_ALL	= 0xffff
329 };
330 
331 static int map_bars_0_and_4(struct adapter *);
332 static int map_bar_2(struct adapter *);
333 static void setup_memwin(struct adapter *);
334 static int validate_mem_range(struct adapter *, uint32_t, int);
335 static int fwmtype_to_hwmtype(int);
336 static int validate_mt_off_len(struct adapter *, int, uint32_t, int,
337     uint32_t *);
338 static void memwin_info(struct adapter *, int, uint32_t *, uint32_t *);
339 static uint32_t position_memwin(struct adapter *, int, uint32_t);
340 static int cfg_itype_and_nqueues(struct adapter *, int, int,
341     struct intrs_and_queues *);
342 static int prep_firmware(struct adapter *);
343 static int partition_resources(struct adapter *, const struct firmware *,
344     const char *);
345 static int get_params__pre_init(struct adapter *);
346 static int get_params__post_init(struct adapter *);
347 static int set_params__post_init(struct adapter *);
348 static void t4_set_desc(struct adapter *);
349 static void build_medialist(struct port_info *);
350 static int update_mac_settings(struct port_info *, int);
351 static int cxgbe_init_synchronized(struct port_info *);
352 static int cxgbe_uninit_synchronized(struct port_info *);
353 static int setup_intr_handlers(struct adapter *);
354 static int adapter_full_init(struct adapter *);
355 static int adapter_full_uninit(struct adapter *);
356 static int port_full_init(struct port_info *);
357 static int port_full_uninit(struct port_info *);
358 static void quiesce_eq(struct adapter *, struct sge_eq *);
359 static void quiesce_iq(struct adapter *, struct sge_iq *);
360 static void quiesce_fl(struct adapter *, struct sge_fl *);
361 static int t4_alloc_irq(struct adapter *, struct irq *, int rid,
362     driver_intr_t *, void *, char *);
363 static int t4_free_irq(struct adapter *, struct irq *);
364 static void reg_block_dump(struct adapter *, uint8_t *, unsigned int,
365     unsigned int);
366 static void t4_get_regs(struct adapter *, struct t4_regdump *, uint8_t *);
367 static void cxgbe_tick(void *);
368 static void cxgbe_vlan_config(void *, struct ifnet *, uint16_t);
369 static int cpl_not_handled(struct sge_iq *, const struct rss_header *,
370     struct mbuf *);
371 static int an_not_handled(struct sge_iq *, const struct rsp_ctrl *);
372 static int fw_msg_not_handled(struct adapter *, const __be64 *);
373 static int t4_sysctls(struct adapter *);
374 static int cxgbe_sysctls(struct port_info *);
375 static int sysctl_int_array(SYSCTL_HANDLER_ARGS);
376 static int sysctl_bitfield(SYSCTL_HANDLER_ARGS);
377 static int sysctl_btphy(SYSCTL_HANDLER_ARGS);
378 static int sysctl_holdoff_tmr_idx(SYSCTL_HANDLER_ARGS);
379 static int sysctl_holdoff_pktc_idx(SYSCTL_HANDLER_ARGS);
380 static int sysctl_qsize_rxq(SYSCTL_HANDLER_ARGS);
381 static int sysctl_qsize_txq(SYSCTL_HANDLER_ARGS);
382 static int sysctl_handle_t4_reg64(SYSCTL_HANDLER_ARGS);
383 static int sysctl_temperature(SYSCTL_HANDLER_ARGS);
384 #ifdef SBUF_DRAIN
385 static int sysctl_cctrl(SYSCTL_HANDLER_ARGS);
386 static int sysctl_cim_ibq_obq(SYSCTL_HANDLER_ARGS);
387 static int sysctl_cim_la(SYSCTL_HANDLER_ARGS);
388 static int sysctl_cim_ma_la(SYSCTL_HANDLER_ARGS);
389 static int sysctl_cim_pif_la(SYSCTL_HANDLER_ARGS);
390 static int sysctl_cim_qcfg(SYSCTL_HANDLER_ARGS);
391 static int sysctl_cpl_stats(SYSCTL_HANDLER_ARGS);
392 static int sysctl_ddp_stats(SYSCTL_HANDLER_ARGS);
393 static int sysctl_devlog(SYSCTL_HANDLER_ARGS);
394 static int sysctl_fcoe_stats(SYSCTL_HANDLER_ARGS);
395 static int sysctl_hw_sched(SYSCTL_HANDLER_ARGS);
396 static int sysctl_lb_stats(SYSCTL_HANDLER_ARGS);
397 static int sysctl_linkdnrc(SYSCTL_HANDLER_ARGS);
398 static int sysctl_meminfo(SYSCTL_HANDLER_ARGS);
399 static int sysctl_mps_tcam(SYSCTL_HANDLER_ARGS);
400 static int sysctl_path_mtus(SYSCTL_HANDLER_ARGS);
401 static int sysctl_pm_stats(SYSCTL_HANDLER_ARGS);
402 static int sysctl_rdma_stats(SYSCTL_HANDLER_ARGS);
403 static int sysctl_tcp_stats(SYSCTL_HANDLER_ARGS);
404 static int sysctl_tids(SYSCTL_HANDLER_ARGS);
405 static int sysctl_tp_err_stats(SYSCTL_HANDLER_ARGS);
406 static int sysctl_tp_la(SYSCTL_HANDLER_ARGS);
407 static int sysctl_tx_rate(SYSCTL_HANDLER_ARGS);
408 static int sysctl_ulprx_la(SYSCTL_HANDLER_ARGS);
409 static int sysctl_wcwr_stats(SYSCTL_HANDLER_ARGS);
410 #endif
411 static inline void txq_start(struct ifnet *, struct sge_txq *);
412 static uint32_t fconf_to_mode(uint32_t);
413 static uint32_t mode_to_fconf(uint32_t);
414 static uint32_t fspec_to_fconf(struct t4_filter_specification *);
415 static int get_filter_mode(struct adapter *, uint32_t *);
416 static int set_filter_mode(struct adapter *, uint32_t);
417 static inline uint64_t get_filter_hits(struct adapter *, uint32_t);
418 static int get_filter(struct adapter *, struct t4_filter *);
419 static int set_filter(struct adapter *, struct t4_filter *);
420 static int del_filter(struct adapter *, struct t4_filter *);
421 static void clear_filter(struct filter_entry *);
422 static int set_filter_wr(struct adapter *, int);
423 static int del_filter_wr(struct adapter *, int);
424 static int get_sge_context(struct adapter *, struct t4_sge_context *);
425 static int load_fw(struct adapter *, struct t4_data *);
426 static int read_card_mem(struct adapter *, int, struct t4_mem_range *);
427 static int read_i2c(struct adapter *, struct t4_i2c_data *);
428 #ifdef TCP_OFFLOAD
429 static int toe_capability(struct port_info *, int);
430 #endif
431 static int mod_event(module_t, int, void *);
432 
433 struct {
434 	uint16_t device;
435 	char *desc;
436 } t4_pciids[] = {
437 	{0xa000, "Chelsio Terminator 4 FPGA"},
438 	{0x4400, "Chelsio T440-dbg"},
439 	{0x4401, "Chelsio T420-CR"},
440 	{0x4402, "Chelsio T422-CR"},
441 	{0x4403, "Chelsio T440-CR"},
442 	{0x4404, "Chelsio T420-BCH"},
443 	{0x4405, "Chelsio T440-BCH"},
444 	{0x4406, "Chelsio T440-CH"},
445 	{0x4407, "Chelsio T420-SO"},
446 	{0x4408, "Chelsio T420-CX"},
447 	{0x4409, "Chelsio T420-BT"},
448 	{0x440a, "Chelsio T404-BT"},
449 	{0x440e, "Chelsio T440-LP-CR"},
450 }, t5_pciids[] = {
451 	{0xb000, "Chelsio Terminator 5 FPGA"},
452 	{0x5400, "Chelsio T580-dbg"},
453 	{0x5401,  "Chelsio T520-CR"},		/* 2 x 10G */
454 	{0x5402,  "Chelsio T522-CR"},		/* 2 x 10G, 2 X 1G */
455 	{0x5403,  "Chelsio T540-CR"},		/* 4 x 10G */
456 	{0x5407,  "Chelsio T520-SO"},		/* 2 x 10G, nomem */
457 	{0x5409,  "Chelsio T520-BT"},		/* 2 x 10GBaseT */
458 	{0x540a,  "Chelsio T504-BT"},		/* 4 x 1G */
459 	{0x540d,  "Chelsio T580-CR"},		/* 2 x 40G */
460 	{0x540e,  "Chelsio T540-LP-CR"},	/* 4 x 10G */
461 	{0x5410,  "Chelsio T580-LP-CR"},	/* 2 x 40G */
462 	{0x5411,  "Chelsio T520-LL-CR"},	/* 2 x 10G */
463 	{0x5412,  "Chelsio T560-CR"},		/* 1 x 40G, 2 x 10G */
464 	{0x5414,  "Chelsio T580-LP-SO-CR"},	/* 2 x 40G, nomem */
465 #ifdef notyet
466 	{0x5404,  "Chelsio T520-BCH"},
467 	{0x5405,  "Chelsio T540-BCH"},
468 	{0x5406,  "Chelsio T540-CH"},
469 	{0x5408,  "Chelsio T520-CX"},
470 	{0x540b,  "Chelsio B520-SR"},
471 	{0x540c,  "Chelsio B504-BT"},
472 	{0x540f,  "Chelsio Amsterdam"},
473 	{0x5413,  "Chelsio T580-CHR"},
474 #endif
475 };
476 
477 #ifdef TCP_OFFLOAD
478 /*
479  * service_iq() has an iq and needs the fl.  Offset of fl from the iq should be
480  * exactly the same for both rxq and ofld_rxq.
481  */
482 CTASSERT(offsetof(struct sge_ofld_rxq, iq) == offsetof(struct sge_rxq, iq));
483 CTASSERT(offsetof(struct sge_ofld_rxq, fl) == offsetof(struct sge_rxq, fl));
484 #endif
485 
486 /* No easy way to include t4_msg.h before adapter.h so we check this way */
487 CTASSERT(nitems(((struct adapter *)0)->cpl_handler) == NUM_CPL_CMDS);
488 CTASSERT(nitems(((struct adapter *)0)->fw_msg_handler) == NUM_FW6_TYPES);
489 
490 static int
491 t4_probe(device_t dev)
492 {
493 	int i;
494 	uint16_t v = pci_get_vendor(dev);
495 	uint16_t d = pci_get_device(dev);
496 	uint8_t f = pci_get_function(dev);
497 
498 	if (v != PCI_VENDOR_ID_CHELSIO)
499 		return (ENXIO);
500 
501 	/* Attach only to PF0 of the FPGA */
502 	if (d == 0xa000 && f != 0)
503 		return (ENXIO);
504 
505 	for (i = 0; i < nitems(t4_pciids); i++) {
506 		if (d == t4_pciids[i].device) {
507 			device_set_desc(dev, t4_pciids[i].desc);
508 			return (BUS_PROBE_DEFAULT);
509 		}
510 	}
511 
512 	return (ENXIO);
513 }
514 
515 static int
516 t5_probe(device_t dev)
517 {
518 	int i;
519 	uint16_t v = pci_get_vendor(dev);
520 	uint16_t d = pci_get_device(dev);
521 	uint8_t f = pci_get_function(dev);
522 
523 	if (v != PCI_VENDOR_ID_CHELSIO)
524 		return (ENXIO);
525 
526 	/* Attach only to PF0 of the FPGA */
527 	if (d == 0xb000 && f != 0)
528 		return (ENXIO);
529 
530 	for (i = 0; i < nitems(t5_pciids); i++) {
531 		if (d == t5_pciids[i].device) {
532 			device_set_desc(dev, t5_pciids[i].desc);
533 			return (BUS_PROBE_DEFAULT);
534 		}
535 	}
536 
537 	return (ENXIO);
538 }
539 
540 static int
541 t4_attach(device_t dev)
542 {
543 	struct adapter *sc;
544 	int rc = 0, i, n10g, n1g, rqidx, tqidx;
545 	struct intrs_and_queues iaq;
546 	struct sge *s;
547 #ifdef TCP_OFFLOAD
548 	int ofld_rqidx, ofld_tqidx;
549 #endif
550 
551 	sc = device_get_softc(dev);
552 	sc->dev = dev;
553 
554 	pci_enable_busmaster(dev);
555 	if (pci_find_cap(dev, PCIY_EXPRESS, &i) == 0) {
556 		uint32_t v;
557 
558 		pci_set_max_read_req(dev, 4096);
559 		v = pci_read_config(dev, i + PCIER_DEVICE_CTL, 2);
560 		v |= PCIEM_CTL_RELAXED_ORD_ENABLE;
561 		pci_write_config(dev, i + PCIER_DEVICE_CTL, v, 2);
562 	}
563 
564 	sc->traceq = -1;
565 	mtx_init(&sc->ifp_lock, sc->ifp_lockname, 0, MTX_DEF);
566 	snprintf(sc->ifp_lockname, sizeof(sc->ifp_lockname), "%s tracer",
567 	    device_get_nameunit(dev));
568 
569 	snprintf(sc->lockname, sizeof(sc->lockname), "%s",
570 	    device_get_nameunit(dev));
571 	mtx_init(&sc->sc_lock, sc->lockname, 0, MTX_DEF);
572 	sx_xlock(&t4_list_lock);
573 	SLIST_INSERT_HEAD(&t4_list, sc, link);
574 	sx_xunlock(&t4_list_lock);
575 
576 	mtx_init(&sc->sfl_lock, "starving freelists", 0, MTX_DEF);
577 	TAILQ_INIT(&sc->sfl);
578 	callout_init(&sc->sfl_callout, CALLOUT_MPSAFE);
579 
580 	rc = map_bars_0_and_4(sc);
581 	if (rc != 0)
582 		goto done; /* error message displayed already */
583 
584 	/*
585 	 * This is the real PF# to which we're attaching.  Works from within PCI
586 	 * passthrough environments too, where pci_get_function() could return a
587 	 * different PF# depending on the passthrough configuration.  We need to
588 	 * use the real PF# in all our communication with the firmware.
589 	 */
590 	sc->pf = G_SOURCEPF(t4_read_reg(sc, A_PL_WHOAMI));
591 	sc->mbox = sc->pf;
592 
593 	memset(sc->chan_map, 0xff, sizeof(sc->chan_map));
594 	sc->an_handler = an_not_handled;
595 	for (i = 0; i < nitems(sc->cpl_handler); i++)
596 		sc->cpl_handler[i] = cpl_not_handled;
597 	for (i = 0; i < nitems(sc->fw_msg_handler); i++)
598 		sc->fw_msg_handler[i] = fw_msg_not_handled;
599 	t4_register_cpl_handler(sc, CPL_SET_TCB_RPL, t4_filter_rpl);
600 	t4_register_cpl_handler(sc, CPL_TRACE_PKT, t4_trace_pkt);
601 	t4_register_cpl_handler(sc, CPL_TRACE_PKT_T5, t5_trace_pkt);
602 	t4_init_sge_cpl_handlers(sc);
603 
604 	/* Prepare the adapter for operation */
605 	rc = -t4_prep_adapter(sc);
606 	if (rc != 0) {
607 		device_printf(dev, "failed to prepare adapter: %d.\n", rc);
608 		goto done;
609 	}
610 
611 	/*
612 	 * Do this really early, with the memory windows set up even before the
613 	 * character device.  The userland tool's register i/o and mem read
614 	 * will work even in "recovery mode".
615 	 */
616 	setup_memwin(sc);
617 	sc->cdev = make_dev(is_t4(sc) ? &t4_cdevsw : &t5_cdevsw,
618 	    device_get_unit(dev), UID_ROOT, GID_WHEEL, 0600, "%s",
619 	    device_get_nameunit(dev));
620 	if (sc->cdev == NULL)
621 		device_printf(dev, "failed to create nexus char device.\n");
622 	else
623 		sc->cdev->si_drv1 = sc;
624 
625 	/* Go no further if recovery mode has been requested. */
626 	if (TUNABLE_INT_FETCH("hw.cxgbe.sos", &i) && i != 0) {
627 		device_printf(dev, "recovery mode.\n");
628 		goto done;
629 	}
630 
631 	/* Prepare the firmware for operation */
632 	rc = prep_firmware(sc);
633 	if (rc != 0)
634 		goto done; /* error message displayed already */
635 
636 	rc = get_params__post_init(sc);
637 	if (rc != 0)
638 		goto done; /* error message displayed already */
639 
640 	rc = set_params__post_init(sc);
641 	if (rc != 0)
642 		goto done; /* error message displayed already */
643 
644 	rc = map_bar_2(sc);
645 	if (rc != 0)
646 		goto done; /* error message displayed already */
647 
648 	rc = t4_create_dma_tag(sc);
649 	if (rc != 0)
650 		goto done; /* error message displayed already */
651 
652 	/*
653 	 * First pass over all the ports - allocate VIs and initialize some
654 	 * basic parameters like mac address, port type, etc.  We also figure
655 	 * out whether a port is 10G or 1G and use that information when
656 	 * calculating how many interrupts to attempt to allocate.
657 	 */
658 	n10g = n1g = 0;
659 	for_each_port(sc, i) {
660 		struct port_info *pi;
661 
662 		pi = malloc(sizeof(*pi), M_CXGBE, M_ZERO | M_WAITOK);
663 		sc->port[i] = pi;
664 
665 		/* These must be set before t4_port_init */
666 		pi->adapter = sc;
667 		pi->port_id = i;
668 
669 		/* Allocate the vi and initialize parameters like mac addr */
670 		rc = -t4_port_init(pi, sc->mbox, sc->pf, 0);
671 		if (rc != 0) {
672 			device_printf(dev, "unable to initialize port %d: %d\n",
673 			    i, rc);
674 			free(pi, M_CXGBE);
675 			sc->port[i] = NULL;
676 			goto done;
677 		}
678 
679 		snprintf(pi->lockname, sizeof(pi->lockname), "%sp%d",
680 		    device_get_nameunit(dev), i);
681 		mtx_init(&pi->pi_lock, pi->lockname, 0, MTX_DEF);
682 		sc->chan_map[pi->tx_chan] = i;
683 
684 		if (is_10G_port(pi) || is_40G_port(pi)) {
685 			n10g++;
686 			pi->tmr_idx = t4_tmr_idx_10g;
687 			pi->pktc_idx = t4_pktc_idx_10g;
688 		} else {
689 			n1g++;
690 			pi->tmr_idx = t4_tmr_idx_1g;
691 			pi->pktc_idx = t4_pktc_idx_1g;
692 		}
693 
694 		pi->xact_addr_filt = -1;
695 		pi->linkdnrc = -1;
696 
697 		pi->qsize_rxq = t4_qsize_rxq;
698 		pi->qsize_txq = t4_qsize_txq;
699 
700 		pi->dev = device_add_child(dev, is_t4(sc) ? "cxgbe" : "cxl", -1);
701 		if (pi->dev == NULL) {
702 			device_printf(dev,
703 			    "failed to add device for port %d.\n", i);
704 			rc = ENXIO;
705 			goto done;
706 		}
707 		device_set_softc(pi->dev, pi);
708 	}
709 
710 	/*
711 	 * Interrupt type, # of interrupts, # of rx/tx queues, etc.
712 	 */
713 	rc = cfg_itype_and_nqueues(sc, n10g, n1g, &iaq);
714 	if (rc != 0)
715 		goto done; /* error message displayed already */
716 
717 	sc->intr_type = iaq.intr_type;
718 	sc->intr_count = iaq.nirq;
719 	sc->flags |= iaq.intr_flags;
720 
721 	s = &sc->sge;
722 	s->nrxq = n10g * iaq.nrxq10g + n1g * iaq.nrxq1g;
723 	s->ntxq = n10g * iaq.ntxq10g + n1g * iaq.ntxq1g;
724 	s->neq = s->ntxq + s->nrxq;	/* the free list in an rxq is an eq */
725 	s->neq += sc->params.nports + 1;/* ctrl queues: 1 per port + 1 mgmt */
726 	s->niq = s->nrxq + 1;		/* 1 extra for firmware event queue */
727 
728 #ifdef TCP_OFFLOAD
729 	if (is_offload(sc)) {
730 
731 		s->nofldrxq = n10g * iaq.nofldrxq10g + n1g * iaq.nofldrxq1g;
732 		s->nofldtxq = n10g * iaq.nofldtxq10g + n1g * iaq.nofldtxq1g;
733 		s->neq += s->nofldtxq + s->nofldrxq;
734 		s->niq += s->nofldrxq;
735 
736 		s->ofld_rxq = malloc(s->nofldrxq * sizeof(struct sge_ofld_rxq),
737 		    M_CXGBE, M_ZERO | M_WAITOK);
738 		s->ofld_txq = malloc(s->nofldtxq * sizeof(struct sge_wrq),
739 		    M_CXGBE, M_ZERO | M_WAITOK);
740 	}
741 #endif
742 
743 	s->ctrlq = malloc(sc->params.nports * sizeof(struct sge_wrq), M_CXGBE,
744 	    M_ZERO | M_WAITOK);
745 	s->rxq = malloc(s->nrxq * sizeof(struct sge_rxq), M_CXGBE,
746 	    M_ZERO | M_WAITOK);
747 	s->txq = malloc(s->ntxq * sizeof(struct sge_txq), M_CXGBE,
748 	    M_ZERO | M_WAITOK);
749 	s->iqmap = malloc(s->niq * sizeof(struct sge_iq *), M_CXGBE,
750 	    M_ZERO | M_WAITOK);
751 	s->eqmap = malloc(s->neq * sizeof(struct sge_eq *), M_CXGBE,
752 	    M_ZERO | M_WAITOK);
753 
754 	sc->irq = malloc(sc->intr_count * sizeof(struct irq), M_CXGBE,
755 	    M_ZERO | M_WAITOK);
756 
757 	t4_init_l2t(sc, M_WAITOK);
758 
759 	/*
760 	 * Second pass over the ports.  This time we know the number of rx and
761 	 * tx queues that each port should get.
762 	 */
763 	rqidx = tqidx = 0;
764 #ifdef TCP_OFFLOAD
765 	ofld_rqidx = ofld_tqidx = 0;
766 #endif
767 	for_each_port(sc, i) {
768 		struct port_info *pi = sc->port[i];
769 
770 		if (pi == NULL)
771 			continue;
772 
773 		pi->first_rxq = rqidx;
774 		pi->first_txq = tqidx;
775 		if (is_10G_port(pi) || is_40G_port(pi)) {
776 			pi->nrxq = iaq.nrxq10g;
777 			pi->ntxq = iaq.ntxq10g;
778 		} else {
779 			pi->nrxq = iaq.nrxq1g;
780 			pi->ntxq = iaq.ntxq1g;
781 		}
782 
783 		rqidx += pi->nrxq;
784 		tqidx += pi->ntxq;
785 
786 #ifdef TCP_OFFLOAD
787 		if (is_offload(sc)) {
788 			pi->first_ofld_rxq = ofld_rqidx;
789 			pi->first_ofld_txq = ofld_tqidx;
790 			if (is_10G_port(pi) || is_40G_port(pi)) {
791 				pi->nofldrxq = iaq.nofldrxq10g;
792 				pi->nofldtxq = iaq.nofldtxq10g;
793 			} else {
794 				pi->nofldrxq = iaq.nofldrxq1g;
795 				pi->nofldtxq = iaq.nofldtxq1g;
796 			}
797 			ofld_rqidx += pi->nofldrxq;
798 			ofld_tqidx += pi->nofldtxq;
799 		}
800 #endif
801 	}
802 
803 	rc = setup_intr_handlers(sc);
804 	if (rc != 0) {
805 		device_printf(dev,
806 		    "failed to setup interrupt handlers: %d\n", rc);
807 		goto done;
808 	}
809 
810 	rc = bus_generic_attach(dev);
811 	if (rc != 0) {
812 		device_printf(dev,
813 		    "failed to attach all child ports: %d\n", rc);
814 		goto done;
815 	}
816 
817 	device_printf(dev,
818 	    "PCIe x%d, %d ports, %d %s interrupt%s, %d eq, %d iq\n",
819 	    sc->params.pci.width, sc->params.nports, sc->intr_count,
820 	    sc->intr_type == INTR_MSIX ? "MSI-X" :
821 	    (sc->intr_type == INTR_MSI ? "MSI" : "INTx"),
822 	    sc->intr_count > 1 ? "s" : "", sc->sge.neq, sc->sge.niq);
823 
824 	t4_set_desc(sc);
825 
826 done:
827 	if (rc != 0 && sc->cdev) {
828 		/* cdev was created and so cxgbetool works; recover that way. */
829 		device_printf(dev,
830 		    "error during attach, adapter is now in recovery mode.\n");
831 		rc = 0;
832 	}
833 
834 	if (rc != 0)
835 		t4_detach(dev);
836 	else
837 		t4_sysctls(sc);
838 
839 	return (rc);
840 }
841 
842 /*
843  * Idempotent
844  */
845 static int
846 t4_detach(device_t dev)
847 {
848 	struct adapter *sc;
849 	struct port_info *pi;
850 	int i, rc;
851 
852 	sc = device_get_softc(dev);
853 
854 	if (sc->flags & FULL_INIT_DONE)
855 		t4_intr_disable(sc);
856 
857 	if (sc->cdev) {
858 		destroy_dev(sc->cdev);
859 		sc->cdev = NULL;
860 	}
861 
862 	rc = bus_generic_detach(dev);
863 	if (rc) {
864 		device_printf(dev,
865 		    "failed to detach child devices: %d\n", rc);
866 		return (rc);
867 	}
868 
869 	for (i = 0; i < sc->intr_count; i++)
870 		t4_free_irq(sc, &sc->irq[i]);
871 
872 	for (i = 0; i < MAX_NPORTS; i++) {
873 		pi = sc->port[i];
874 		if (pi) {
875 			t4_free_vi(pi->adapter, sc->mbox, sc->pf, 0, pi->viid);
876 			if (pi->dev)
877 				device_delete_child(dev, pi->dev);
878 
879 			mtx_destroy(&pi->pi_lock);
880 			free(pi, M_CXGBE);
881 		}
882 	}
883 
884 	if (sc->flags & FULL_INIT_DONE)
885 		adapter_full_uninit(sc);
886 
887 	if (sc->flags & FW_OK)
888 		t4_fw_bye(sc, sc->mbox);
889 
890 	if (sc->intr_type == INTR_MSI || sc->intr_type == INTR_MSIX)
891 		pci_release_msi(dev);
892 
893 	if (sc->regs_res)
894 		bus_release_resource(dev, SYS_RES_MEMORY, sc->regs_rid,
895 		    sc->regs_res);
896 
897 	if (sc->udbs_res)
898 		bus_release_resource(dev, SYS_RES_MEMORY, sc->udbs_rid,
899 		    sc->udbs_res);
900 
901 	if (sc->msix_res)
902 		bus_release_resource(dev, SYS_RES_MEMORY, sc->msix_rid,
903 		    sc->msix_res);
904 
905 	if (sc->l2t)
906 		t4_free_l2t(sc->l2t);
907 
908 #ifdef TCP_OFFLOAD
909 	free(sc->sge.ofld_rxq, M_CXGBE);
910 	free(sc->sge.ofld_txq, M_CXGBE);
911 #endif
912 	free(sc->irq, M_CXGBE);
913 	free(sc->sge.rxq, M_CXGBE);
914 	free(sc->sge.txq, M_CXGBE);
915 	free(sc->sge.ctrlq, M_CXGBE);
916 	free(sc->sge.iqmap, M_CXGBE);
917 	free(sc->sge.eqmap, M_CXGBE);
918 	free(sc->tids.ftid_tab, M_CXGBE);
919 	t4_destroy_dma_tag(sc);
920 	if (mtx_initialized(&sc->sc_lock)) {
921 		sx_xlock(&t4_list_lock);
922 		SLIST_REMOVE(&t4_list, sc, adapter, link);
923 		sx_xunlock(&t4_list_lock);
924 		mtx_destroy(&sc->sc_lock);
925 	}
926 
927 	if (mtx_initialized(&sc->tids.ftid_lock))
928 		mtx_destroy(&sc->tids.ftid_lock);
929 	if (mtx_initialized(&sc->sfl_lock))
930 		mtx_destroy(&sc->sfl_lock);
931 	if (mtx_initialized(&sc->ifp_lock))
932 		mtx_destroy(&sc->ifp_lock);
933 
934 	bzero(sc, sizeof(*sc));
935 
936 	return (0);
937 }
938 
939 
940 static int
941 cxgbe_probe(device_t dev)
942 {
943 	char buf[128];
944 	struct port_info *pi = device_get_softc(dev);
945 
946 	snprintf(buf, sizeof(buf), "port %d", pi->port_id);
947 	device_set_desc_copy(dev, buf);
948 
949 	return (BUS_PROBE_DEFAULT);
950 }
951 
952 #define T4_CAP (IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_MTU | IFCAP_HWCSUM | \
953     IFCAP_VLAN_HWCSUM | IFCAP_TSO | IFCAP_JUMBO_MTU | IFCAP_LRO | \
954     IFCAP_VLAN_HWTSO | IFCAP_LINKSTATE | IFCAP_HWCSUM_IPV6 | IFCAP_HWSTATS)
955 #define T4_CAP_ENABLE (T4_CAP)
956 
957 static int
958 cxgbe_attach(device_t dev)
959 {
960 	struct port_info *pi = device_get_softc(dev);
961 	struct ifnet *ifp;
962 
963 	/* Allocate an ifnet and set it up */
964 	ifp = if_alloc(IFT_ETHER);
965 	if (ifp == NULL) {
966 		device_printf(dev, "Cannot allocate ifnet\n");
967 		return (ENOMEM);
968 	}
969 	pi->ifp = ifp;
970 	ifp->if_softc = pi;
971 
972 	callout_init(&pi->tick, CALLOUT_MPSAFE);
973 
974 	if_initname(ifp, device_get_name(dev), device_get_unit(dev));
975 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
976 
977 	ifp->if_init = cxgbe_init;
978 	ifp->if_ioctl = cxgbe_ioctl;
979 	ifp->if_transmit = cxgbe_transmit;
980 	ifp->if_qflush = cxgbe_qflush;
981 
982 	ifp->if_capabilities = T4_CAP;
983 #ifdef TCP_OFFLOAD
984 	if (is_offload(pi->adapter))
985 		ifp->if_capabilities |= IFCAP_TOE;
986 #endif
987 	ifp->if_capenable = T4_CAP_ENABLE;
988 	ifp->if_hwassist = CSUM_TCP | CSUM_UDP | CSUM_IP | CSUM_TSO |
989 	    CSUM_UDP_IPV6 | CSUM_TCP_IPV6;
990 
991 	/* Initialize ifmedia for this port */
992 	ifmedia_init(&pi->media, IFM_IMASK, cxgbe_media_change,
993 	    cxgbe_media_status);
994 	build_medialist(pi);
995 
996 	pi->vlan_c = EVENTHANDLER_REGISTER(vlan_config, cxgbe_vlan_config, ifp,
997 	    EVENTHANDLER_PRI_ANY);
998 
999 	ether_ifattach(ifp, pi->hw_addr);
1000 
1001 #ifdef TCP_OFFLOAD
1002 	if (is_offload(pi->adapter)) {
1003 		device_printf(dev,
1004 		    "%d txq, %d rxq (NIC); %d txq, %d rxq (TOE)\n",
1005 		    pi->ntxq, pi->nrxq, pi->nofldtxq, pi->nofldrxq);
1006 	} else
1007 #endif
1008 		device_printf(dev, "%d txq, %d rxq\n", pi->ntxq, pi->nrxq);
1009 
1010 	cxgbe_sysctls(pi);
1011 
1012 	return (0);
1013 }
1014 
1015 static int
1016 cxgbe_detach(device_t dev)
1017 {
1018 	struct port_info *pi = device_get_softc(dev);
1019 	struct adapter *sc = pi->adapter;
1020 	struct ifnet *ifp = pi->ifp;
1021 
1022 	/* Tell if_ioctl and if_init that the port is going away */
1023 	ADAPTER_LOCK(sc);
1024 	SET_DOOMED(pi);
1025 	wakeup(&sc->flags);
1026 	while (IS_BUSY(sc))
1027 		mtx_sleep(&sc->flags, &sc->sc_lock, 0, "t4detach", 0);
1028 	SET_BUSY(sc);
1029 #ifdef INVARIANTS
1030 	sc->last_op = "t4detach";
1031 	sc->last_op_thr = curthread;
1032 #endif
1033 	ADAPTER_UNLOCK(sc);
1034 
1035 	if (pi->flags & HAS_TRACEQ) {
1036 		sc->traceq = -1;	/* cloner should not create ifnet */
1037 		t4_tracer_port_detach(sc);
1038 	}
1039 
1040 	if (pi->vlan_c)
1041 		EVENTHANDLER_DEREGISTER(vlan_config, pi->vlan_c);
1042 
1043 	PORT_LOCK(pi);
1044 	ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
1045 	callout_stop(&pi->tick);
1046 	PORT_UNLOCK(pi);
1047 	callout_drain(&pi->tick);
1048 
1049 	/* Let detach proceed even if these fail. */
1050 	cxgbe_uninit_synchronized(pi);
1051 	port_full_uninit(pi);
1052 
1053 	ifmedia_removeall(&pi->media);
1054 	ether_ifdetach(pi->ifp);
1055 	if_free(pi->ifp);
1056 
1057 	ADAPTER_LOCK(sc);
1058 	CLR_BUSY(sc);
1059 	wakeup(&sc->flags);
1060 	ADAPTER_UNLOCK(sc);
1061 
1062 	return (0);
1063 }
1064 
1065 static void
1066 cxgbe_init(void *arg)
1067 {
1068 	struct port_info *pi = arg;
1069 	struct adapter *sc = pi->adapter;
1070 
1071 	if (begin_synchronized_op(sc, pi, SLEEP_OK | INTR_OK, "t4init") != 0)
1072 		return;
1073 	cxgbe_init_synchronized(pi);
1074 	end_synchronized_op(sc, 0);
1075 }
1076 
1077 static int
1078 cxgbe_ioctl(struct ifnet *ifp, unsigned long cmd, caddr_t data)
1079 {
1080 	int rc = 0, mtu, flags;
1081 	struct port_info *pi = ifp->if_softc;
1082 	struct adapter *sc = pi->adapter;
1083 	struct ifreq *ifr = (struct ifreq *)data;
1084 	uint32_t mask;
1085 
1086 	switch (cmd) {
1087 	case SIOCSIFMTU:
1088 		mtu = ifr->ifr_mtu;
1089 		if ((mtu < ETHERMIN) || (mtu > ETHERMTU_JUMBO))
1090 			return (EINVAL);
1091 
1092 		rc = begin_synchronized_op(sc, pi, SLEEP_OK | INTR_OK, "t4mtu");
1093 		if (rc)
1094 			return (rc);
1095 		ifp->if_mtu = mtu;
1096 		if (pi->flags & PORT_INIT_DONE) {
1097 			t4_update_fl_bufsize(ifp);
1098 			if (ifp->if_drv_flags & IFF_DRV_RUNNING)
1099 				rc = update_mac_settings(pi, XGMAC_MTU);
1100 		}
1101 		end_synchronized_op(sc, 0);
1102 		break;
1103 
1104 	case SIOCSIFFLAGS:
1105 		rc = begin_synchronized_op(sc, pi, SLEEP_OK | INTR_OK, "t4flg");
1106 		if (rc)
1107 			return (rc);
1108 
1109 		if (ifp->if_flags & IFF_UP) {
1110 			if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1111 				flags = pi->if_flags;
1112 				if ((ifp->if_flags ^ flags) &
1113 				    (IFF_PROMISC | IFF_ALLMULTI)) {
1114 					rc = update_mac_settings(pi,
1115 					    XGMAC_PROMISC | XGMAC_ALLMULTI);
1116 				}
1117 			} else
1118 				rc = cxgbe_init_synchronized(pi);
1119 			pi->if_flags = ifp->if_flags;
1120 		} else if (ifp->if_drv_flags & IFF_DRV_RUNNING)
1121 			rc = cxgbe_uninit_synchronized(pi);
1122 		end_synchronized_op(sc, 0);
1123 		break;
1124 
1125 	case SIOCADDMULTI:
1126 	case SIOCDELMULTI: /* these two are called with a mutex held :-( */
1127 		rc = begin_synchronized_op(sc, pi, HOLD_LOCK, "t4multi");
1128 		if (rc)
1129 			return (rc);
1130 		if (ifp->if_drv_flags & IFF_DRV_RUNNING)
1131 			rc = update_mac_settings(pi, XGMAC_MCADDRS);
1132 		end_synchronized_op(sc, LOCK_HELD);
1133 		break;
1134 
1135 	case SIOCSIFCAP:
1136 		rc = begin_synchronized_op(sc, pi, SLEEP_OK | INTR_OK, "t4cap");
1137 		if (rc)
1138 			return (rc);
1139 
1140 		mask = ifr->ifr_reqcap ^ ifp->if_capenable;
1141 		if (mask & IFCAP_TXCSUM) {
1142 			ifp->if_capenable ^= IFCAP_TXCSUM;
1143 			ifp->if_hwassist ^= (CSUM_TCP | CSUM_UDP | CSUM_IP);
1144 
1145 			if (IFCAP_TSO4 & ifp->if_capenable &&
1146 			    !(IFCAP_TXCSUM & ifp->if_capenable)) {
1147 				ifp->if_capenable &= ~IFCAP_TSO4;
1148 				if_printf(ifp,
1149 				    "tso4 disabled due to -txcsum.\n");
1150 			}
1151 		}
1152 		if (mask & IFCAP_TXCSUM_IPV6) {
1153 			ifp->if_capenable ^= IFCAP_TXCSUM_IPV6;
1154 			ifp->if_hwassist ^= (CSUM_UDP_IPV6 | CSUM_TCP_IPV6);
1155 
1156 			if (IFCAP_TSO6 & ifp->if_capenable &&
1157 			    !(IFCAP_TXCSUM_IPV6 & ifp->if_capenable)) {
1158 				ifp->if_capenable &= ~IFCAP_TSO6;
1159 				if_printf(ifp,
1160 				    "tso6 disabled due to -txcsum6.\n");
1161 			}
1162 		}
1163 		if (mask & IFCAP_RXCSUM)
1164 			ifp->if_capenable ^= IFCAP_RXCSUM;
1165 		if (mask & IFCAP_RXCSUM_IPV6)
1166 			ifp->if_capenable ^= IFCAP_RXCSUM_IPV6;
1167 
1168 		/*
1169 		 * Note that we leave CSUM_TSO alone (it is always set).  The
1170 		 * kernel takes both IFCAP_TSOx and CSUM_TSO into account before
1171 		 * sending a TSO request our way, so it's sufficient to toggle
1172 		 * IFCAP_TSOx only.
1173 		 */
1174 		if (mask & IFCAP_TSO4) {
1175 			if (!(IFCAP_TSO4 & ifp->if_capenable) &&
1176 			    !(IFCAP_TXCSUM & ifp->if_capenable)) {
1177 				if_printf(ifp, "enable txcsum first.\n");
1178 				rc = EAGAIN;
1179 				goto fail;
1180 			}
1181 			ifp->if_capenable ^= IFCAP_TSO4;
1182 		}
1183 		if (mask & IFCAP_TSO6) {
1184 			if (!(IFCAP_TSO6 & ifp->if_capenable) &&
1185 			    !(IFCAP_TXCSUM_IPV6 & ifp->if_capenable)) {
1186 				if_printf(ifp, "enable txcsum6 first.\n");
1187 				rc = EAGAIN;
1188 				goto fail;
1189 			}
1190 			ifp->if_capenable ^= IFCAP_TSO6;
1191 		}
1192 		if (mask & IFCAP_LRO) {
1193 #if defined(INET) || defined(INET6)
1194 			int i;
1195 			struct sge_rxq *rxq;
1196 
1197 			ifp->if_capenable ^= IFCAP_LRO;
1198 			for_each_rxq(pi, i, rxq) {
1199 				if (ifp->if_capenable & IFCAP_LRO)
1200 					rxq->iq.flags |= IQ_LRO_ENABLED;
1201 				else
1202 					rxq->iq.flags &= ~IQ_LRO_ENABLED;
1203 			}
1204 #endif
1205 		}
1206 #ifdef TCP_OFFLOAD
1207 		if (mask & IFCAP_TOE) {
1208 			int enable = (ifp->if_capenable ^ mask) & IFCAP_TOE;
1209 
1210 			rc = toe_capability(pi, enable);
1211 			if (rc != 0)
1212 				goto fail;
1213 
1214 			ifp->if_capenable ^= mask;
1215 		}
1216 #endif
1217 		if (mask & IFCAP_VLAN_HWTAGGING) {
1218 			ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING;
1219 			if (ifp->if_drv_flags & IFF_DRV_RUNNING)
1220 				rc = update_mac_settings(pi, XGMAC_VLANEX);
1221 		}
1222 		if (mask & IFCAP_VLAN_MTU) {
1223 			ifp->if_capenable ^= IFCAP_VLAN_MTU;
1224 
1225 			/* Need to find out how to disable auto-mtu-inflation */
1226 		}
1227 		if (mask & IFCAP_VLAN_HWTSO)
1228 			ifp->if_capenable ^= IFCAP_VLAN_HWTSO;
1229 		if (mask & IFCAP_VLAN_HWCSUM)
1230 			ifp->if_capenable ^= IFCAP_VLAN_HWCSUM;
1231 
1232 #ifdef VLAN_CAPABILITIES
1233 		VLAN_CAPABILITIES(ifp);
1234 #endif
1235 fail:
1236 		end_synchronized_op(sc, 0);
1237 		break;
1238 
1239 	case SIOCSIFMEDIA:
1240 	case SIOCGIFMEDIA:
1241 		ifmedia_ioctl(ifp, ifr, &pi->media, cmd);
1242 		break;
1243 
1244 	default:
1245 		rc = ether_ioctl(ifp, cmd, data);
1246 	}
1247 
1248 	return (rc);
1249 }
1250 
1251 static int
1252 cxgbe_transmit(struct ifnet *ifp, struct mbuf *m)
1253 {
1254 	struct port_info *pi = ifp->if_softc;
1255 	struct adapter *sc = pi->adapter;
1256 	struct sge_txq *txq = &sc->sge.txq[pi->first_txq];
1257 	struct buf_ring *br;
1258 	int rc;
1259 
1260 	M_ASSERTPKTHDR(m);
1261 
1262 	if (__predict_false(pi->link_cfg.link_ok == 0)) {
1263 		m_freem(m);
1264 		return (ENETDOWN);
1265 	}
1266 
1267 	if (m->m_flags & M_FLOWID)
1268 		txq += (m->m_pkthdr.flowid % pi->ntxq);
1269 	br = txq->br;
1270 
1271 	if (TXQ_TRYLOCK(txq) == 0) {
1272 		struct sge_eq *eq = &txq->eq;
1273 
1274 		/*
1275 		 * It is possible that t4_eth_tx finishes up and releases the
1276 		 * lock between the TRYLOCK above and the drbr_enqueue here.  We
1277 		 * need to make sure that this mbuf doesn't just sit there in
1278 		 * the drbr.
1279 		 */
1280 
1281 		rc = drbr_enqueue(ifp, br, m);
1282 		if (rc == 0 && callout_pending(&eq->tx_callout) == 0 &&
1283 		    !(eq->flags & EQ_DOOMED))
1284 			callout_reset(&eq->tx_callout, 1, t4_tx_callout, eq);
1285 		return (rc);
1286 	}
1287 
1288 	/*
1289 	 * txq->m is the mbuf that is held up due to a temporary shortage of
1290 	 * resources and it should be put on the wire first.  Then what's in
1291 	 * drbr and finally the mbuf that was just passed in to us.
1292 	 *
1293 	 * Return code should indicate the fate of the mbuf that was passed in
1294 	 * this time.
1295 	 */
1296 
1297 	TXQ_LOCK_ASSERT_OWNED(txq);
1298 	if (drbr_needs_enqueue(ifp, br) || txq->m) {
1299 
1300 		/* Queued for transmission. */
1301 
1302 		rc = drbr_enqueue(ifp, br, m);
1303 		m = txq->m ? txq->m : drbr_dequeue(ifp, br);
1304 		(void) t4_eth_tx(ifp, txq, m);
1305 		TXQ_UNLOCK(txq);
1306 		return (rc);
1307 	}
1308 
1309 	/* Direct transmission. */
1310 	rc = t4_eth_tx(ifp, txq, m);
1311 	if (rc != 0 && txq->m)
1312 		rc = 0;	/* held, will be transmitted soon (hopefully) */
1313 
1314 	TXQ_UNLOCK(txq);
1315 	return (rc);
1316 }
1317 
1318 static void
1319 cxgbe_qflush(struct ifnet *ifp)
1320 {
1321 	struct port_info *pi = ifp->if_softc;
1322 	struct sge_txq *txq;
1323 	int i;
1324 	struct mbuf *m;
1325 
1326 	/* queues do not exist if !PORT_INIT_DONE. */
1327 	if (pi->flags & PORT_INIT_DONE) {
1328 		for_each_txq(pi, i, txq) {
1329 			TXQ_LOCK(txq);
1330 			m_freem(txq->m);
1331 			txq->m = NULL;
1332 			while ((m = buf_ring_dequeue_sc(txq->br)) != NULL)
1333 				m_freem(m);
1334 			TXQ_UNLOCK(txq);
1335 		}
1336 	}
1337 	if_qflush(ifp);
1338 }
1339 
1340 static int
1341 cxgbe_media_change(struct ifnet *ifp)
1342 {
1343 	struct port_info *pi = ifp->if_softc;
1344 
1345 	device_printf(pi->dev, "%s unimplemented.\n", __func__);
1346 
1347 	return (EOPNOTSUPP);
1348 }
1349 
1350 static void
1351 cxgbe_media_status(struct ifnet *ifp, struct ifmediareq *ifmr)
1352 {
1353 	struct port_info *pi = ifp->if_softc;
1354 	struct ifmedia_entry *cur = pi->media.ifm_cur;
1355 	int speed = pi->link_cfg.speed;
1356 	int data = (pi->port_type << 8) | pi->mod_type;
1357 
1358 	if (cur->ifm_data != data) {
1359 		build_medialist(pi);
1360 		cur = pi->media.ifm_cur;
1361 	}
1362 
1363 	ifmr->ifm_status = IFM_AVALID;
1364 	if (!pi->link_cfg.link_ok)
1365 		return;
1366 
1367 	ifmr->ifm_status |= IFM_ACTIVE;
1368 
1369 	/* active and current will differ iff current media is autoselect. */
1370 	if (IFM_SUBTYPE(cur->ifm_media) != IFM_AUTO)
1371 		return;
1372 
1373 	ifmr->ifm_active = IFM_ETHER | IFM_FDX;
1374 	if (speed == SPEED_10000)
1375 		ifmr->ifm_active |= IFM_10G_T;
1376 	else if (speed == SPEED_1000)
1377 		ifmr->ifm_active |= IFM_1000_T;
1378 	else if (speed == SPEED_100)
1379 		ifmr->ifm_active |= IFM_100_TX;
1380 	else if (speed == SPEED_10)
1381 		ifmr->ifm_active |= IFM_10_T;
1382 	else
1383 		KASSERT(0, ("%s: link up but speed unknown (%u)", __func__,
1384 			    speed));
1385 }
1386 
1387 void
1388 t4_fatal_err(struct adapter *sc)
1389 {
1390 	t4_set_reg_field(sc, A_SGE_CONTROL, F_GLOBALENABLE, 0);
1391 	t4_intr_disable(sc);
1392 	log(LOG_EMERG, "%s: encountered fatal error, adapter stopped.\n",
1393 	    device_get_nameunit(sc->dev));
1394 }
1395 
1396 static int
1397 map_bars_0_and_4(struct adapter *sc)
1398 {
1399 	sc->regs_rid = PCIR_BAR(0);
1400 	sc->regs_res = bus_alloc_resource_any(sc->dev, SYS_RES_MEMORY,
1401 	    &sc->regs_rid, RF_ACTIVE);
1402 	if (sc->regs_res == NULL) {
1403 		device_printf(sc->dev, "cannot map registers.\n");
1404 		return (ENXIO);
1405 	}
1406 	sc->bt = rman_get_bustag(sc->regs_res);
1407 	sc->bh = rman_get_bushandle(sc->regs_res);
1408 	sc->mmio_len = rman_get_size(sc->regs_res);
1409 	setbit(&sc->doorbells, DOORBELL_KDB);
1410 
1411 	sc->msix_rid = PCIR_BAR(4);
1412 	sc->msix_res = bus_alloc_resource_any(sc->dev, SYS_RES_MEMORY,
1413 	    &sc->msix_rid, RF_ACTIVE);
1414 	if (sc->msix_res == NULL) {
1415 		device_printf(sc->dev, "cannot map MSI-X BAR.\n");
1416 		return (ENXIO);
1417 	}
1418 
1419 	return (0);
1420 }
1421 
1422 static int
1423 map_bar_2(struct adapter *sc)
1424 {
1425 
1426 	/*
1427 	 * T4: only iWARP driver uses the userspace doorbells.  There is no need
1428 	 * to map it if RDMA is disabled.
1429 	 */
1430 	if (is_t4(sc) && sc->rdmacaps == 0)
1431 		return (0);
1432 
1433 	sc->udbs_rid = PCIR_BAR(2);
1434 	sc->udbs_res = bus_alloc_resource_any(sc->dev, SYS_RES_MEMORY,
1435 	    &sc->udbs_rid, RF_ACTIVE);
1436 	if (sc->udbs_res == NULL) {
1437 		device_printf(sc->dev, "cannot map doorbell BAR.\n");
1438 		return (ENXIO);
1439 	}
1440 	sc->udbs_base = rman_get_virtual(sc->udbs_res);
1441 
1442 	if (is_t5(sc)) {
1443 		setbit(&sc->doorbells, DOORBELL_UDB);
1444 #if defined(__i386__) || defined(__amd64__)
1445 		if (t5_write_combine) {
1446 			int rc;
1447 
1448 			/*
1449 			 * Enable write combining on BAR2.  This is the
1450 			 * userspace doorbell BAR and is split into 128B
1451 			 * (UDBS_SEG_SIZE) doorbell regions, each associated
1452 			 * with an egress queue.  The first 64B has the doorbell
1453 			 * and the second 64B can be used to submit a tx work
1454 			 * request with an implicit doorbell.
1455 			 */
1456 
1457 			rc = pmap_change_attr((vm_offset_t)sc->udbs_base,
1458 			    rman_get_size(sc->udbs_res), PAT_WRITE_COMBINING);
1459 			if (rc == 0) {
1460 				clrbit(&sc->doorbells, DOORBELL_UDB);
1461 				setbit(&sc->doorbells, DOORBELL_WCWR);
1462 				setbit(&sc->doorbells, DOORBELL_UDBWC);
1463 			} else {
1464 				device_printf(sc->dev,
1465 				    "couldn't enable write combining: %d\n",
1466 				    rc);
1467 			}
1468 
1469 			t4_write_reg(sc, A_SGE_STAT_CFG,
1470 			    V_STATSOURCE_T5(7) | V_STATMODE(0));
1471 		}
1472 #endif
1473 	}
1474 
1475 	return (0);
1476 }
1477 
1478 static const struct memwin t4_memwin[] = {
1479 	{ MEMWIN0_BASE, MEMWIN0_APERTURE },
1480 	{ MEMWIN1_BASE, MEMWIN1_APERTURE },
1481 	{ MEMWIN2_BASE_T4, MEMWIN2_APERTURE_T4 }
1482 };
1483 
1484 static const struct memwin t5_memwin[] = {
1485 	{ MEMWIN0_BASE, MEMWIN0_APERTURE },
1486 	{ MEMWIN1_BASE, MEMWIN1_APERTURE },
1487 	{ MEMWIN2_BASE_T5, MEMWIN2_APERTURE_T5 },
1488 };
1489 
1490 static void
1491 setup_memwin(struct adapter *sc)
1492 {
1493 	const struct memwin *mw;
1494 	int i, n;
1495 	uint32_t bar0;
1496 
1497 	if (is_t4(sc)) {
1498 		/*
1499 		 * Read low 32b of bar0 indirectly via the hardware backdoor
1500 		 * mechanism.  Works from within PCI passthrough environments
1501 		 * too, where rman_get_start() can return a different value.  We
1502 		 * need to program the T4 memory window decoders with the actual
1503 		 * addresses that will be coming across the PCIe link.
1504 		 */
1505 		bar0 = t4_hw_pci_read_cfg4(sc, PCIR_BAR(0));
1506 		bar0 &= (uint32_t) PCIM_BAR_MEM_BASE;
1507 
1508 		mw = &t4_memwin[0];
1509 		n = nitems(t4_memwin);
1510 	} else {
1511 		/* T5 uses the relative offset inside the PCIe BAR */
1512 		bar0 = 0;
1513 
1514 		mw = &t5_memwin[0];
1515 		n = nitems(t5_memwin);
1516 	}
1517 
1518 	for (i = 0; i < n; i++, mw++) {
1519 		t4_write_reg(sc,
1520 		    PCIE_MEM_ACCESS_REG(A_PCIE_MEM_ACCESS_BASE_WIN, i),
1521 		    (mw->base + bar0) | V_BIR(0) |
1522 		    V_WINDOW(ilog2(mw->aperture) - 10));
1523 	}
1524 
1525 	/* flush */
1526 	t4_read_reg(sc, PCIE_MEM_ACCESS_REG(A_PCIE_MEM_ACCESS_BASE_WIN, 2));
1527 }
1528 
1529 /*
1530  * Verify that the memory range specified by the addr/len pair is valid and lies
1531  * entirely within a single region (EDCx or MCx).
1532  */
1533 static int
1534 validate_mem_range(struct adapter *sc, uint32_t addr, int len)
1535 {
1536 	uint32_t em, addr_len, maddr, mlen;
1537 
1538 	/* Memory can only be accessed in naturally aligned 4 byte units */
1539 	if (addr & 3 || len & 3 || len == 0)
1540 		return (EINVAL);
1541 
1542 	/* Enabled memories */
1543 	em = t4_read_reg(sc, A_MA_TARGET_MEM_ENABLE);
1544 	if (em & F_EDRAM0_ENABLE) {
1545 		addr_len = t4_read_reg(sc, A_MA_EDRAM0_BAR);
1546 		maddr = G_EDRAM0_BASE(addr_len) << 20;
1547 		mlen = G_EDRAM0_SIZE(addr_len) << 20;
1548 		if (mlen > 0 && addr >= maddr && addr < maddr + mlen &&
1549 		    addr + len <= maddr + mlen)
1550 			return (0);
1551 	}
1552 	if (em & F_EDRAM1_ENABLE) {
1553 		addr_len = t4_read_reg(sc, A_MA_EDRAM1_BAR);
1554 		maddr = G_EDRAM1_BASE(addr_len) << 20;
1555 		mlen = G_EDRAM1_SIZE(addr_len) << 20;
1556 		if (mlen > 0 && addr >= maddr && addr < maddr + mlen &&
1557 		    addr + len <= maddr + mlen)
1558 			return (0);
1559 	}
1560 	if (em & F_EXT_MEM_ENABLE) {
1561 		addr_len = t4_read_reg(sc, A_MA_EXT_MEMORY_BAR);
1562 		maddr = G_EXT_MEM_BASE(addr_len) << 20;
1563 		mlen = G_EXT_MEM_SIZE(addr_len) << 20;
1564 		if (mlen > 0 && addr >= maddr && addr < maddr + mlen &&
1565 		    addr + len <= maddr + mlen)
1566 			return (0);
1567 	}
1568 	if (!is_t4(sc) && em & F_EXT_MEM1_ENABLE) {
1569 		addr_len = t4_read_reg(sc, A_MA_EXT_MEMORY1_BAR);
1570 		maddr = G_EXT_MEM1_BASE(addr_len) << 20;
1571 		mlen = G_EXT_MEM1_SIZE(addr_len) << 20;
1572 		if (mlen > 0 && addr >= maddr && addr < maddr + mlen &&
1573 		    addr + len <= maddr + mlen)
1574 			return (0);
1575 	}
1576 
1577 	return (EFAULT);
1578 }
1579 
1580 static int
1581 fwmtype_to_hwmtype(int mtype)
1582 {
1583 
1584 	switch (mtype) {
1585 	case FW_MEMTYPE_EDC0:
1586 		return (MEM_EDC0);
1587 	case FW_MEMTYPE_EDC1:
1588 		return (MEM_EDC1);
1589 	case FW_MEMTYPE_EXTMEM:
1590 		return (MEM_MC0);
1591 	case FW_MEMTYPE_EXTMEM1:
1592 		return (MEM_MC1);
1593 	default:
1594 		panic("%s: cannot translate fw mtype %d.", __func__, mtype);
1595 	}
1596 }
1597 
1598 /*
1599  * Verify that the memory range specified by the memtype/offset/len pair is
1600  * valid and lies entirely within the memtype specified.  The global address of
1601  * the start of the range is returned in addr.
1602  */
1603 static int
1604 validate_mt_off_len(struct adapter *sc, int mtype, uint32_t off, int len,
1605     uint32_t *addr)
1606 {
1607 	uint32_t em, addr_len, maddr, mlen;
1608 
1609 	/* Memory can only be accessed in naturally aligned 4 byte units */
1610 	if (off & 3 || len & 3 || len == 0)
1611 		return (EINVAL);
1612 
1613 	em = t4_read_reg(sc, A_MA_TARGET_MEM_ENABLE);
1614 	switch (fwmtype_to_hwmtype(mtype)) {
1615 	case MEM_EDC0:
1616 		if (!(em & F_EDRAM0_ENABLE))
1617 			return (EINVAL);
1618 		addr_len = t4_read_reg(sc, A_MA_EDRAM0_BAR);
1619 		maddr = G_EDRAM0_BASE(addr_len) << 20;
1620 		mlen = G_EDRAM0_SIZE(addr_len) << 20;
1621 		break;
1622 	case MEM_EDC1:
1623 		if (!(em & F_EDRAM1_ENABLE))
1624 			return (EINVAL);
1625 		addr_len = t4_read_reg(sc, A_MA_EDRAM1_BAR);
1626 		maddr = G_EDRAM1_BASE(addr_len) << 20;
1627 		mlen = G_EDRAM1_SIZE(addr_len) << 20;
1628 		break;
1629 	case MEM_MC:
1630 		if (!(em & F_EXT_MEM_ENABLE))
1631 			return (EINVAL);
1632 		addr_len = t4_read_reg(sc, A_MA_EXT_MEMORY_BAR);
1633 		maddr = G_EXT_MEM_BASE(addr_len) << 20;
1634 		mlen = G_EXT_MEM_SIZE(addr_len) << 20;
1635 		break;
1636 	case MEM_MC1:
1637 		if (is_t4(sc) || !(em & F_EXT_MEM1_ENABLE))
1638 			return (EINVAL);
1639 		addr_len = t4_read_reg(sc, A_MA_EXT_MEMORY1_BAR);
1640 		maddr = G_EXT_MEM1_BASE(addr_len) << 20;
1641 		mlen = G_EXT_MEM1_SIZE(addr_len) << 20;
1642 		break;
1643 	default:
1644 		return (EINVAL);
1645 	}
1646 
1647 	if (mlen > 0 && off < mlen && off + len <= mlen) {
1648 		*addr = maddr + off;	/* global address */
1649 		return (0);
1650 	}
1651 
1652 	return (EFAULT);
1653 }
1654 
1655 static void
1656 memwin_info(struct adapter *sc, int win, uint32_t *base, uint32_t *aperture)
1657 {
1658 	const struct memwin *mw;
1659 
1660 	if (is_t4(sc)) {
1661 		KASSERT(win >= 0 && win < nitems(t4_memwin),
1662 		    ("%s: incorrect memwin# (%d)", __func__, win));
1663 		mw = &t4_memwin[win];
1664 	} else {
1665 		KASSERT(win >= 0 && win < nitems(t5_memwin),
1666 		    ("%s: incorrect memwin# (%d)", __func__, win));
1667 		mw = &t5_memwin[win];
1668 	}
1669 
1670 	if (base != NULL)
1671 		*base = mw->base;
1672 	if (aperture != NULL)
1673 		*aperture = mw->aperture;
1674 }
1675 
1676 /*
1677  * Positions the memory window such that it can be used to access the specified
1678  * address in the chip's address space.  The return value is the offset of addr
1679  * from the start of the window.
1680  */
1681 static uint32_t
1682 position_memwin(struct adapter *sc, int n, uint32_t addr)
1683 {
1684 	uint32_t start, pf;
1685 	uint32_t reg;
1686 
1687 	KASSERT(n >= 0 && n <= 3,
1688 	    ("%s: invalid window %d.", __func__, n));
1689 	KASSERT((addr & 3) == 0,
1690 	    ("%s: addr (0x%x) is not at a 4B boundary.", __func__, addr));
1691 
1692 	if (is_t4(sc)) {
1693 		pf = 0;
1694 		start = addr & ~0xf;	/* start must be 16B aligned */
1695 	} else {
1696 		pf = V_PFNUM(sc->pf);
1697 		start = addr & ~0x7f;	/* start must be 128B aligned */
1698 	}
1699 	reg = PCIE_MEM_ACCESS_REG(A_PCIE_MEM_ACCESS_OFFSET, n);
1700 
1701 	t4_write_reg(sc, reg, start | pf);
1702 	t4_read_reg(sc, reg);
1703 
1704 	return (addr - start);
1705 }
1706 
1707 static int
1708 cfg_itype_and_nqueues(struct adapter *sc, int n10g, int n1g,
1709     struct intrs_and_queues *iaq)
1710 {
1711 	int rc, itype, navail, nrxq10g, nrxq1g, n;
1712 	int nofldrxq10g = 0, nofldrxq1g = 0;
1713 
1714 	bzero(iaq, sizeof(*iaq));
1715 
1716 	iaq->ntxq10g = t4_ntxq10g;
1717 	iaq->ntxq1g = t4_ntxq1g;
1718 	iaq->nrxq10g = nrxq10g = t4_nrxq10g;
1719 	iaq->nrxq1g = nrxq1g = t4_nrxq1g;
1720 #ifdef TCP_OFFLOAD
1721 	if (is_offload(sc)) {
1722 		iaq->nofldtxq10g = t4_nofldtxq10g;
1723 		iaq->nofldtxq1g = t4_nofldtxq1g;
1724 		iaq->nofldrxq10g = nofldrxq10g = t4_nofldrxq10g;
1725 		iaq->nofldrxq1g = nofldrxq1g = t4_nofldrxq1g;
1726 	}
1727 #endif
1728 
1729 	for (itype = INTR_MSIX; itype; itype >>= 1) {
1730 
1731 		if ((itype & t4_intr_types) == 0)
1732 			continue;	/* not allowed */
1733 
1734 		if (itype == INTR_MSIX)
1735 			navail = pci_msix_count(sc->dev);
1736 		else if (itype == INTR_MSI)
1737 			navail = pci_msi_count(sc->dev);
1738 		else
1739 			navail = 1;
1740 restart:
1741 		if (navail == 0)
1742 			continue;
1743 
1744 		iaq->intr_type = itype;
1745 		iaq->intr_flags = 0;
1746 
1747 		/*
1748 		 * Best option: an interrupt vector for errors, one for the
1749 		 * firmware event queue, and one each for each rxq (NIC as well
1750 		 * as offload).
1751 		 */
1752 		iaq->nirq = T4_EXTRA_INTR;
1753 		iaq->nirq += n10g * (nrxq10g + nofldrxq10g);
1754 		iaq->nirq += n1g * (nrxq1g + nofldrxq1g);
1755 		if (iaq->nirq <= navail &&
1756 		    (itype != INTR_MSI || powerof2(iaq->nirq))) {
1757 			iaq->intr_flags |= INTR_DIRECT;
1758 			goto allocate;
1759 		}
1760 
1761 		/*
1762 		 * Second best option: an interrupt vector for errors, one for
1763 		 * the firmware event queue, and one each for either NIC or
1764 		 * offload rxq's.
1765 		 */
1766 		iaq->nirq = T4_EXTRA_INTR;
1767 		iaq->nirq += n10g * max(nrxq10g, nofldrxq10g);
1768 		iaq->nirq += n1g * max(nrxq1g, nofldrxq1g);
1769 		if (iaq->nirq <= navail &&
1770 		    (itype != INTR_MSI || powerof2(iaq->nirq)))
1771 			goto allocate;
1772 
1773 		/*
1774 		 * Next best option: an interrupt vector for errors, one for the
1775 		 * firmware event queue, and at least one per port.  At this
1776 		 * point we know we'll have to downsize nrxq or nofldrxq to fit
1777 		 * what's available to us.
1778 		 */
1779 		iaq->nirq = T4_EXTRA_INTR;
1780 		iaq->nirq += n10g + n1g;
1781 		if (iaq->nirq <= navail) {
1782 			int leftover = navail - iaq->nirq;
1783 
1784 			if (n10g > 0) {
1785 				int target = max(nrxq10g, nofldrxq10g);
1786 
1787 				n = 1;
1788 				while (n < target && leftover >= n10g) {
1789 					leftover -= n10g;
1790 					iaq->nirq += n10g;
1791 					n++;
1792 				}
1793 				iaq->nrxq10g = min(n, nrxq10g);
1794 #ifdef TCP_OFFLOAD
1795 				if (is_offload(sc))
1796 					iaq->nofldrxq10g = min(n, nofldrxq10g);
1797 #endif
1798 			}
1799 
1800 			if (n1g > 0) {
1801 				int target = max(nrxq1g, nofldrxq1g);
1802 
1803 				n = 1;
1804 				while (n < target && leftover >= n1g) {
1805 					leftover -= n1g;
1806 					iaq->nirq += n1g;
1807 					n++;
1808 				}
1809 				iaq->nrxq1g = min(n, nrxq1g);
1810 #ifdef TCP_OFFLOAD
1811 				if (is_offload(sc))
1812 					iaq->nofldrxq1g = min(n, nofldrxq1g);
1813 #endif
1814 			}
1815 
1816 			if (itype != INTR_MSI || powerof2(iaq->nirq))
1817 				goto allocate;
1818 		}
1819 
1820 		/*
1821 		 * Least desirable option: one interrupt vector for everything.
1822 		 */
1823 		iaq->nirq = iaq->nrxq10g = iaq->nrxq1g = 1;
1824 #ifdef TCP_OFFLOAD
1825 		if (is_offload(sc))
1826 			iaq->nofldrxq10g = iaq->nofldrxq1g = 1;
1827 #endif
1828 
1829 allocate:
1830 		navail = iaq->nirq;
1831 		rc = 0;
1832 		if (itype == INTR_MSIX)
1833 			rc = pci_alloc_msix(sc->dev, &navail);
1834 		else if (itype == INTR_MSI)
1835 			rc = pci_alloc_msi(sc->dev, &navail);
1836 
1837 		if (rc == 0) {
1838 			if (navail == iaq->nirq)
1839 				return (0);
1840 
1841 			/*
1842 			 * Didn't get the number requested.  Use whatever number
1843 			 * the kernel is willing to allocate (it's in navail).
1844 			 */
1845 			device_printf(sc->dev, "fewer vectors than requested, "
1846 			    "type=%d, req=%d, rcvd=%d; will downshift req.\n",
1847 			    itype, iaq->nirq, navail);
1848 			pci_release_msi(sc->dev);
1849 			goto restart;
1850 		}
1851 
1852 		device_printf(sc->dev,
1853 		    "failed to allocate vectors:%d, type=%d, req=%d, rcvd=%d\n",
1854 		    itype, rc, iaq->nirq, navail);
1855 	}
1856 
1857 	device_printf(sc->dev,
1858 	    "failed to find a usable interrupt type.  "
1859 	    "allowed=%d, msi-x=%d, msi=%d, intx=1", t4_intr_types,
1860 	    pci_msix_count(sc->dev), pci_msi_count(sc->dev));
1861 
1862 	return (ENXIO);
1863 }
1864 
1865 #define FW_VERSION(chip) ( \
1866     V_FW_HDR_FW_VER_MAJOR(chip##FW_VERSION_MAJOR) | \
1867     V_FW_HDR_FW_VER_MINOR(chip##FW_VERSION_MINOR) | \
1868     V_FW_HDR_FW_VER_MICRO(chip##FW_VERSION_MICRO) | \
1869     V_FW_HDR_FW_VER_BUILD(chip##FW_VERSION_BUILD))
1870 #define FW_INTFVER(chip, intf) (chip##FW_HDR_INTFVER_##intf)
1871 
1872 struct fw_info {
1873 	uint8_t chip;
1874 	char *kld_name;
1875 	char *fw_mod_name;
1876 	struct fw_hdr fw_hdr;	/* XXX: waste of space, need a sparse struct */
1877 } fw_info[] = {
1878 	{
1879 		.chip = CHELSIO_T4,
1880 		.kld_name = "t4fw_cfg",
1881 		.fw_mod_name = "t4fw",
1882 		.fw_hdr = {
1883 			.chip = FW_HDR_CHIP_T4,
1884 			.fw_ver = htobe32_const(FW_VERSION(T4)),
1885 			.intfver_nic = FW_INTFVER(T4, NIC),
1886 			.intfver_vnic = FW_INTFVER(T4, VNIC),
1887 			.intfver_ofld = FW_INTFVER(T4, OFLD),
1888 			.intfver_ri = FW_INTFVER(T4, RI),
1889 			.intfver_iscsipdu = FW_INTFVER(T4, ISCSIPDU),
1890 			.intfver_iscsi = FW_INTFVER(T4, ISCSI),
1891 			.intfver_fcoepdu = FW_INTFVER(T4, FCOEPDU),
1892 			.intfver_fcoe = FW_INTFVER(T4, FCOE),
1893 		},
1894 	}, {
1895 		.chip = CHELSIO_T5,
1896 		.kld_name = "t5fw_cfg",
1897 		.fw_mod_name = "t5fw",
1898 		.fw_hdr = {
1899 			.chip = FW_HDR_CHIP_T5,
1900 			.fw_ver = htobe32_const(FW_VERSION(T5)),
1901 			.intfver_nic = FW_INTFVER(T5, NIC),
1902 			.intfver_vnic = FW_INTFVER(T5, VNIC),
1903 			.intfver_ofld = FW_INTFVER(T5, OFLD),
1904 			.intfver_ri = FW_INTFVER(T5, RI),
1905 			.intfver_iscsipdu = FW_INTFVER(T5, ISCSIPDU),
1906 			.intfver_iscsi = FW_INTFVER(T5, ISCSI),
1907 			.intfver_fcoepdu = FW_INTFVER(T5, FCOEPDU),
1908 			.intfver_fcoe = FW_INTFVER(T5, FCOE),
1909 		},
1910 	}
1911 };
1912 
1913 static struct fw_info *
1914 find_fw_info(int chip)
1915 {
1916 	int i;
1917 
1918 	for (i = 0; i < nitems(fw_info); i++) {
1919 		if (fw_info[i].chip == chip)
1920 			return (&fw_info[i]);
1921 	}
1922 	return (NULL);
1923 }
1924 
1925 /*
1926  * Is the given firmware API compatible with the one the driver was compiled
1927  * with?
1928  */
1929 static int
1930 fw_compatible(const struct fw_hdr *hdr1, const struct fw_hdr *hdr2)
1931 {
1932 
1933 	/* short circuit if it's the exact same firmware version */
1934 	if (hdr1->chip == hdr2->chip && hdr1->fw_ver == hdr2->fw_ver)
1935 		return (1);
1936 
1937 	/*
1938 	 * XXX: Is this too conservative?  Perhaps I should limit this to the
1939 	 * features that are supported in the driver.
1940 	 */
1941 #define SAME_INTF(x) (hdr1->intfver_##x == hdr2->intfver_##x)
1942 	if (hdr1->chip == hdr2->chip && SAME_INTF(nic) && SAME_INTF(vnic) &&
1943 	    SAME_INTF(ofld) && SAME_INTF(ri) && SAME_INTF(iscsipdu) &&
1944 	    SAME_INTF(iscsi) && SAME_INTF(fcoepdu) && SAME_INTF(fcoe))
1945 		return (1);
1946 #undef SAME_INTF
1947 
1948 	return (0);
1949 }
1950 
1951 /*
1952  * The firmware in the KLD is usable, but should it be installed?  This routine
1953  * explains itself in detail if it indicates the KLD firmware should be
1954  * installed.
1955  */
1956 static int
1957 should_install_kld_fw(struct adapter *sc, int card_fw_usable, int k, int c)
1958 {
1959 	const char *reason;
1960 
1961 	if (!card_fw_usable) {
1962 		reason = "incompatible or unusable";
1963 		goto install;
1964 	}
1965 
1966 	if (k > c) {
1967 		reason = "older than the version bundled with this driver";
1968 		goto install;
1969 	}
1970 
1971 	if (t4_fw_install == 2 && k != c) {
1972 		reason = "different than the version bundled with this driver";
1973 		goto install;
1974 	}
1975 
1976 	return (0);
1977 
1978 install:
1979 	if (t4_fw_install == 0) {
1980 		device_printf(sc->dev, "firmware on card (%u.%u.%u.%u) is %s, "
1981 		    "but the driver is prohibited from installing a different "
1982 		    "firmware on the card.\n",
1983 		    G_FW_HDR_FW_VER_MAJOR(c), G_FW_HDR_FW_VER_MINOR(c),
1984 		    G_FW_HDR_FW_VER_MICRO(c), G_FW_HDR_FW_VER_BUILD(c), reason);
1985 
1986 		return (0);
1987 	}
1988 
1989 	device_printf(sc->dev, "firmware on card (%u.%u.%u.%u) is %s, "
1990 	    "installing firmware %u.%u.%u.%u on card.\n",
1991 	    G_FW_HDR_FW_VER_MAJOR(c), G_FW_HDR_FW_VER_MINOR(c),
1992 	    G_FW_HDR_FW_VER_MICRO(c), G_FW_HDR_FW_VER_BUILD(c), reason,
1993 	    G_FW_HDR_FW_VER_MAJOR(k), G_FW_HDR_FW_VER_MINOR(k),
1994 	    G_FW_HDR_FW_VER_MICRO(k), G_FW_HDR_FW_VER_BUILD(k));
1995 
1996 	return (1);
1997 }
1998 /*
1999  * Establish contact with the firmware and determine if we are the master driver
2000  * or not, and whether we are responsible for chip initialization.
2001  */
2002 static int
2003 prep_firmware(struct adapter *sc)
2004 {
2005 	const struct firmware *fw = NULL, *default_cfg;
2006 	int rc, pf, card_fw_usable, kld_fw_usable, need_fw_reset = 1;
2007 	enum dev_state state;
2008 	struct fw_info *fw_info;
2009 	struct fw_hdr *card_fw;		/* fw on the card */
2010 	const struct fw_hdr *kld_fw;	/* fw in the KLD */
2011 	const struct fw_hdr *drv_fw;	/* fw header the driver was compiled
2012 					   against */
2013 
2014 	/* Contact firmware. */
2015 	rc = t4_fw_hello(sc, sc->mbox, sc->mbox, MASTER_MAY, &state);
2016 	if (rc < 0 || state == DEV_STATE_ERR) {
2017 		rc = -rc;
2018 		device_printf(sc->dev,
2019 		    "failed to connect to the firmware: %d, %d.\n", rc, state);
2020 		return (rc);
2021 	}
2022 	pf = rc;
2023 	if (pf == sc->mbox)
2024 		sc->flags |= MASTER_PF;
2025 	else if (state == DEV_STATE_UNINIT) {
2026 		/*
2027 		 * We didn't get to be the master so we definitely won't be
2028 		 * configuring the chip.  It's a bug if someone else hasn't
2029 		 * configured it already.
2030 		 */
2031 		device_printf(sc->dev, "couldn't be master(%d), "
2032 		    "device not already initialized either(%d).\n", rc, state);
2033 		return (EDOOFUS);
2034 	}
2035 
2036 	/* This is the firmware whose headers the driver was compiled against */
2037 	fw_info = find_fw_info(chip_id(sc));
2038 	if (fw_info == NULL) {
2039 		device_printf(sc->dev,
2040 		    "unable to look up firmware information for chip %d.\n",
2041 		    chip_id(sc));
2042 		return (EINVAL);
2043 	}
2044 	drv_fw = &fw_info->fw_hdr;
2045 
2046 	/*
2047 	 * The firmware KLD contains many modules.  The KLD name is also the
2048 	 * name of the module that contains the default config file.
2049 	 */
2050 	default_cfg = firmware_get(fw_info->kld_name);
2051 
2052 	/* Read the header of the firmware on the card */
2053 	card_fw = malloc(sizeof(*card_fw), M_CXGBE, M_ZERO | M_WAITOK);
2054 	rc = -t4_read_flash(sc, FLASH_FW_START,
2055 	    sizeof (*card_fw) / sizeof (uint32_t), (uint32_t *)card_fw, 1);
2056 	if (rc == 0)
2057 		card_fw_usable = fw_compatible(drv_fw, (const void*)card_fw);
2058 	else {
2059 		device_printf(sc->dev,
2060 		    "Unable to read card's firmware header: %d\n", rc);
2061 		card_fw_usable = 0;
2062 	}
2063 
2064 	/* This is the firmware in the KLD */
2065 	fw = firmware_get(fw_info->fw_mod_name);
2066 	if (fw != NULL) {
2067 		kld_fw = (const void *)fw->data;
2068 		kld_fw_usable = fw_compatible(drv_fw, kld_fw);
2069 	} else {
2070 		kld_fw = NULL;
2071 		kld_fw_usable = 0;
2072 	}
2073 
2074 	if (card_fw_usable && card_fw->fw_ver == drv_fw->fw_ver &&
2075 	    (!kld_fw_usable || kld_fw->fw_ver == drv_fw->fw_ver)) {
2076 		/*
2077 		 * Common case: the firmware on the card is an exact match and
2078 		 * the KLD is an exact match too, or the KLD is
2079 		 * absent/incompatible.  Note that t4_fw_install = 2 is ignored
2080 		 * here -- use cxgbetool loadfw if you want to reinstall the
2081 		 * same firmware as the one on the card.
2082 		 */
2083 	} else if (kld_fw_usable && state == DEV_STATE_UNINIT &&
2084 	    should_install_kld_fw(sc, card_fw_usable, be32toh(kld_fw->fw_ver),
2085 	    be32toh(card_fw->fw_ver))) {
2086 
2087 		rc = -t4_fw_upgrade(sc, sc->mbox, fw->data, fw->datasize, 0);
2088 		if (rc != 0) {
2089 			device_printf(sc->dev,
2090 			    "failed to install firmware: %d\n", rc);
2091 			goto done;
2092 		}
2093 
2094 		/* Installed successfully, update the cached header too. */
2095 		memcpy(card_fw, kld_fw, sizeof(*card_fw));
2096 		card_fw_usable = 1;
2097 		need_fw_reset = 0;	/* already reset as part of load_fw */
2098 	}
2099 
2100 	if (!card_fw_usable) {
2101 		uint32_t d, c, k;
2102 
2103 		d = ntohl(drv_fw->fw_ver);
2104 		c = ntohl(card_fw->fw_ver);
2105 		k = kld_fw ? ntohl(kld_fw->fw_ver) : 0;
2106 
2107 		device_printf(sc->dev, "Cannot find a usable firmware: "
2108 		    "fw_install %d, chip state %d, "
2109 		    "driver compiled with %d.%d.%d.%d, "
2110 		    "card has %d.%d.%d.%d, KLD has %d.%d.%d.%d\n",
2111 		    t4_fw_install, state,
2112 		    G_FW_HDR_FW_VER_MAJOR(d), G_FW_HDR_FW_VER_MINOR(d),
2113 		    G_FW_HDR_FW_VER_MICRO(d), G_FW_HDR_FW_VER_BUILD(d),
2114 		    G_FW_HDR_FW_VER_MAJOR(c), G_FW_HDR_FW_VER_MINOR(c),
2115 		    G_FW_HDR_FW_VER_MICRO(c), G_FW_HDR_FW_VER_BUILD(c),
2116 		    G_FW_HDR_FW_VER_MAJOR(k), G_FW_HDR_FW_VER_MINOR(k),
2117 		    G_FW_HDR_FW_VER_MICRO(k), G_FW_HDR_FW_VER_BUILD(k));
2118 		rc = EINVAL;
2119 		goto done;
2120 	}
2121 
2122 	/* We're using whatever's on the card and it's known to be good. */
2123 	sc->params.fw_vers = ntohl(card_fw->fw_ver);
2124 	snprintf(sc->fw_version, sizeof(sc->fw_version), "%u.%u.%u.%u",
2125 	    G_FW_HDR_FW_VER_MAJOR(sc->params.fw_vers),
2126 	    G_FW_HDR_FW_VER_MINOR(sc->params.fw_vers),
2127 	    G_FW_HDR_FW_VER_MICRO(sc->params.fw_vers),
2128 	    G_FW_HDR_FW_VER_BUILD(sc->params.fw_vers));
2129 	t4_get_tp_version(sc, &sc->params.tp_vers);
2130 
2131 	/* Reset device */
2132 	if (need_fw_reset &&
2133 	    (rc = -t4_fw_reset(sc, sc->mbox, F_PIORSTMODE | F_PIORST)) != 0) {
2134 		device_printf(sc->dev, "firmware reset failed: %d.\n", rc);
2135 		if (rc != ETIMEDOUT && rc != EIO)
2136 			t4_fw_bye(sc, sc->mbox);
2137 		goto done;
2138 	}
2139 	sc->flags |= FW_OK;
2140 
2141 	rc = get_params__pre_init(sc);
2142 	if (rc != 0)
2143 		goto done; /* error message displayed already */
2144 
2145 	/* Partition adapter resources as specified in the config file. */
2146 	if (state == DEV_STATE_UNINIT) {
2147 
2148 		KASSERT(sc->flags & MASTER_PF,
2149 		    ("%s: trying to change chip settings when not master.",
2150 		    __func__));
2151 
2152 		rc = partition_resources(sc, default_cfg, fw_info->kld_name);
2153 		if (rc != 0)
2154 			goto done;	/* error message displayed already */
2155 
2156 		t4_tweak_chip_settings(sc);
2157 
2158 		/* get basic stuff going */
2159 		rc = -t4_fw_initialize(sc, sc->mbox);
2160 		if (rc != 0) {
2161 			device_printf(sc->dev, "fw init failed: %d.\n", rc);
2162 			goto done;
2163 		}
2164 	} else {
2165 		snprintf(sc->cfg_file, sizeof(sc->cfg_file), "pf%d", pf);
2166 		sc->cfcsum = 0;
2167 	}
2168 
2169 done:
2170 	free(card_fw, M_CXGBE);
2171 	if (fw != NULL)
2172 		firmware_put(fw, FIRMWARE_UNLOAD);
2173 	if (default_cfg != NULL)
2174 		firmware_put(default_cfg, FIRMWARE_UNLOAD);
2175 
2176 	return (rc);
2177 }
2178 
2179 #define FW_PARAM_DEV(param) \
2180 	(V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) | \
2181 	 V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_##param))
2182 #define FW_PARAM_PFVF(param) \
2183 	(V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_PFVF) | \
2184 	 V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_PFVF_##param))
2185 
2186 /*
2187  * Partition chip resources for use between various PFs, VFs, etc.
2188  */
2189 static int
2190 partition_resources(struct adapter *sc, const struct firmware *default_cfg,
2191     const char *name_prefix)
2192 {
2193 	const struct firmware *cfg = NULL;
2194 	int rc = 0;
2195 	struct fw_caps_config_cmd caps;
2196 	uint32_t mtype, moff, finicsum, cfcsum;
2197 
2198 	/*
2199 	 * Figure out what configuration file to use.  Pick the default config
2200 	 * file for the card if the user hasn't specified one explicitly.
2201 	 */
2202 	snprintf(sc->cfg_file, sizeof(sc->cfg_file), "%s", t4_cfg_file);
2203 	if (strncmp(t4_cfg_file, DEFAULT_CF, sizeof(t4_cfg_file)) == 0) {
2204 		/* Card specific overrides go here. */
2205 		if (pci_get_device(sc->dev) == 0x440a)
2206 			snprintf(sc->cfg_file, sizeof(sc->cfg_file), UWIRE_CF);
2207 		if (is_fpga(sc))
2208 			snprintf(sc->cfg_file, sizeof(sc->cfg_file), FPGA_CF);
2209 	}
2210 
2211 	/*
2212 	 * We need to load another module if the profile is anything except
2213 	 * "default" or "flash".
2214 	 */
2215 	if (strncmp(sc->cfg_file, DEFAULT_CF, sizeof(sc->cfg_file)) != 0 &&
2216 	    strncmp(sc->cfg_file, FLASH_CF, sizeof(sc->cfg_file)) != 0) {
2217 		char s[32];
2218 
2219 		snprintf(s, sizeof(s), "%s_%s", name_prefix, sc->cfg_file);
2220 		cfg = firmware_get(s);
2221 		if (cfg == NULL) {
2222 			if (default_cfg != NULL) {
2223 				device_printf(sc->dev,
2224 				    "unable to load module \"%s\" for "
2225 				    "configuration profile \"%s\", will use "
2226 				    "the default config file instead.\n",
2227 				    s, sc->cfg_file);
2228 				snprintf(sc->cfg_file, sizeof(sc->cfg_file),
2229 				    "%s", DEFAULT_CF);
2230 			} else {
2231 				device_printf(sc->dev,
2232 				    "unable to load module \"%s\" for "
2233 				    "configuration profile \"%s\", will use "
2234 				    "the config file on the card's flash "
2235 				    "instead.\n", s, sc->cfg_file);
2236 				snprintf(sc->cfg_file, sizeof(sc->cfg_file),
2237 				    "%s", FLASH_CF);
2238 			}
2239 		}
2240 	}
2241 
2242 	if (strncmp(sc->cfg_file, DEFAULT_CF, sizeof(sc->cfg_file)) == 0 &&
2243 	    default_cfg == NULL) {
2244 		device_printf(sc->dev,
2245 		    "default config file not available, will use the config "
2246 		    "file on the card's flash instead.\n");
2247 		snprintf(sc->cfg_file, sizeof(sc->cfg_file), "%s", FLASH_CF);
2248 	}
2249 
2250 	if (strncmp(sc->cfg_file, FLASH_CF, sizeof(sc->cfg_file)) != 0) {
2251 		u_int cflen, i, n;
2252 		const uint32_t *cfdata;
2253 		uint32_t param, val, addr, off, mw_base, mw_aperture;
2254 
2255 		KASSERT(cfg != NULL || default_cfg != NULL,
2256 		    ("%s: no config to upload", __func__));
2257 
2258 		/*
2259 		 * Ask the firmware where it wants us to upload the config file.
2260 		 */
2261 		param = FW_PARAM_DEV(CF);
2262 		rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 1, &param, &val);
2263 		if (rc != 0) {
2264 			/* No support for config file?  Shouldn't happen. */
2265 			device_printf(sc->dev,
2266 			    "failed to query config file location: %d.\n", rc);
2267 			goto done;
2268 		}
2269 		mtype = G_FW_PARAMS_PARAM_Y(val);
2270 		moff = G_FW_PARAMS_PARAM_Z(val) << 16;
2271 
2272 		/*
2273 		 * XXX: sheer laziness.  We deliberately added 4 bytes of
2274 		 * useless stuffing/comments at the end of the config file so
2275 		 * it's ok to simply throw away the last remaining bytes when
2276 		 * the config file is not an exact multiple of 4.  This also
2277 		 * helps with the validate_mt_off_len check.
2278 		 */
2279 		if (cfg != NULL) {
2280 			cflen = cfg->datasize & ~3;
2281 			cfdata = cfg->data;
2282 		} else {
2283 			cflen = default_cfg->datasize & ~3;
2284 			cfdata = default_cfg->data;
2285 		}
2286 
2287 		if (cflen > FLASH_CFG_MAX_SIZE) {
2288 			device_printf(sc->dev,
2289 			    "config file too long (%d, max allowed is %d).  "
2290 			    "Will try to use the config on the card, if any.\n",
2291 			    cflen, FLASH_CFG_MAX_SIZE);
2292 			goto use_config_on_flash;
2293 		}
2294 
2295 		rc = validate_mt_off_len(sc, mtype, moff, cflen, &addr);
2296 		if (rc != 0) {
2297 			device_printf(sc->dev,
2298 			    "%s: addr (%d/0x%x) or len %d is not valid: %d.  "
2299 			    "Will try to use the config on the card, if any.\n",
2300 			    __func__, mtype, moff, cflen, rc);
2301 			goto use_config_on_flash;
2302 		}
2303 
2304 		memwin_info(sc, 2, &mw_base, &mw_aperture);
2305 		while (cflen) {
2306 			off = position_memwin(sc, 2, addr);
2307 			n = min(cflen, mw_aperture - off);
2308 			for (i = 0; i < n; i += 4)
2309 				t4_write_reg(sc, mw_base + off + i, *cfdata++);
2310 			cflen -= n;
2311 			addr += n;
2312 		}
2313 	} else {
2314 use_config_on_flash:
2315 		mtype = FW_MEMTYPE_FLASH;
2316 		moff = t4_flash_cfg_addr(sc);
2317 	}
2318 
2319 	bzero(&caps, sizeof(caps));
2320 	caps.op_to_write = htobe32(V_FW_CMD_OP(FW_CAPS_CONFIG_CMD) |
2321 	    F_FW_CMD_REQUEST | F_FW_CMD_READ);
2322 	caps.cfvalid_to_len16 = htobe32(F_FW_CAPS_CONFIG_CMD_CFVALID |
2323 	    V_FW_CAPS_CONFIG_CMD_MEMTYPE_CF(mtype) |
2324 	    V_FW_CAPS_CONFIG_CMD_MEMADDR64K_CF(moff >> 16) | FW_LEN16(caps));
2325 	rc = -t4_wr_mbox(sc, sc->mbox, &caps, sizeof(caps), &caps);
2326 	if (rc != 0) {
2327 		device_printf(sc->dev,
2328 		    "failed to pre-process config file: %d "
2329 		    "(mtype %d, moff 0x%x).\n", rc, mtype, moff);
2330 		goto done;
2331 	}
2332 
2333 	finicsum = be32toh(caps.finicsum);
2334 	cfcsum = be32toh(caps.cfcsum);
2335 	if (finicsum != cfcsum) {
2336 		device_printf(sc->dev,
2337 		    "WARNING: config file checksum mismatch: %08x %08x\n",
2338 		    finicsum, cfcsum);
2339 	}
2340 	sc->cfcsum = cfcsum;
2341 
2342 #define LIMIT_CAPS(x) do { \
2343 	caps.x &= htobe16(t4_##x##_allowed); \
2344 	sc->x = htobe16(caps.x); \
2345 } while (0)
2346 
2347 	/*
2348 	 * Let the firmware know what features will (not) be used so it can tune
2349 	 * things accordingly.
2350 	 */
2351 	LIMIT_CAPS(linkcaps);
2352 	LIMIT_CAPS(niccaps);
2353 	LIMIT_CAPS(toecaps);
2354 	LIMIT_CAPS(rdmacaps);
2355 	LIMIT_CAPS(iscsicaps);
2356 	LIMIT_CAPS(fcoecaps);
2357 #undef LIMIT_CAPS
2358 
2359 	caps.op_to_write = htobe32(V_FW_CMD_OP(FW_CAPS_CONFIG_CMD) |
2360 	    F_FW_CMD_REQUEST | F_FW_CMD_WRITE);
2361 	caps.cfvalid_to_len16 = htobe32(FW_LEN16(caps));
2362 	rc = -t4_wr_mbox(sc, sc->mbox, &caps, sizeof(caps), NULL);
2363 	if (rc != 0) {
2364 		device_printf(sc->dev,
2365 		    "failed to process config file: %d.\n", rc);
2366 	}
2367 done:
2368 	if (cfg != NULL)
2369 		firmware_put(cfg, FIRMWARE_UNLOAD);
2370 	return (rc);
2371 }
2372 
2373 /*
2374  * Retrieve parameters that are needed (or nice to have) very early.
2375  */
2376 static int
2377 get_params__pre_init(struct adapter *sc)
2378 {
2379 	int rc;
2380 	uint32_t param[2], val[2];
2381 	struct fw_devlog_cmd cmd;
2382 	struct devlog_params *dlog = &sc->params.devlog;
2383 
2384 	param[0] = FW_PARAM_DEV(PORTVEC);
2385 	param[1] = FW_PARAM_DEV(CCLK);
2386 	rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 2, param, val);
2387 	if (rc != 0) {
2388 		device_printf(sc->dev,
2389 		    "failed to query parameters (pre_init): %d.\n", rc);
2390 		return (rc);
2391 	}
2392 
2393 	sc->params.portvec = val[0];
2394 	sc->params.nports = bitcount32(val[0]);
2395 	sc->params.vpd.cclk = val[1];
2396 
2397 	/* Read device log parameters. */
2398 	bzero(&cmd, sizeof(cmd));
2399 	cmd.op_to_write = htobe32(V_FW_CMD_OP(FW_DEVLOG_CMD) |
2400 	    F_FW_CMD_REQUEST | F_FW_CMD_READ);
2401 	cmd.retval_len16 = htobe32(FW_LEN16(cmd));
2402 	rc = -t4_wr_mbox(sc, sc->mbox, &cmd, sizeof(cmd), &cmd);
2403 	if (rc != 0) {
2404 		device_printf(sc->dev,
2405 		    "failed to get devlog parameters: %d.\n", rc);
2406 		bzero(dlog, sizeof (*dlog));
2407 		rc = 0;	/* devlog isn't critical for device operation */
2408 	} else {
2409 		val[0] = be32toh(cmd.memtype_devlog_memaddr16_devlog);
2410 		dlog->memtype = G_FW_DEVLOG_CMD_MEMTYPE_DEVLOG(val[0]);
2411 		dlog->start = G_FW_DEVLOG_CMD_MEMADDR16_DEVLOG(val[0]) << 4;
2412 		dlog->size = be32toh(cmd.memsize_devlog);
2413 	}
2414 
2415 	return (rc);
2416 }
2417 
2418 /*
2419  * Retrieve various parameters that are of interest to the driver.  The device
2420  * has been initialized by the firmware at this point.
2421  */
2422 static int
2423 get_params__post_init(struct adapter *sc)
2424 {
2425 	int rc;
2426 	uint32_t param[7], val[7];
2427 	struct fw_caps_config_cmd caps;
2428 
2429 	param[0] = FW_PARAM_PFVF(IQFLINT_START);
2430 	param[1] = FW_PARAM_PFVF(EQ_START);
2431 	param[2] = FW_PARAM_PFVF(FILTER_START);
2432 	param[3] = FW_PARAM_PFVF(FILTER_END);
2433 	param[4] = FW_PARAM_PFVF(L2T_START);
2434 	param[5] = FW_PARAM_PFVF(L2T_END);
2435 	rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 6, param, val);
2436 	if (rc != 0) {
2437 		device_printf(sc->dev,
2438 		    "failed to query parameters (post_init): %d.\n", rc);
2439 		return (rc);
2440 	}
2441 
2442 	sc->sge.iq_start = val[0];
2443 	sc->sge.eq_start = val[1];
2444 	sc->tids.ftid_base = val[2];
2445 	sc->tids.nftids = val[3] - val[2] + 1;
2446 	sc->vres.l2t.start = val[4];
2447 	sc->vres.l2t.size = val[5] - val[4] + 1;
2448 	KASSERT(sc->vres.l2t.size <= L2T_SIZE,
2449 	    ("%s: L2 table size (%u) larger than expected (%u)",
2450 	    __func__, sc->vres.l2t.size, L2T_SIZE));
2451 
2452 	/* get capabilites */
2453 	bzero(&caps, sizeof(caps));
2454 	caps.op_to_write = htobe32(V_FW_CMD_OP(FW_CAPS_CONFIG_CMD) |
2455 	    F_FW_CMD_REQUEST | F_FW_CMD_READ);
2456 	caps.cfvalid_to_len16 = htobe32(FW_LEN16(caps));
2457 	rc = -t4_wr_mbox(sc, sc->mbox, &caps, sizeof(caps), &caps);
2458 	if (rc != 0) {
2459 		device_printf(sc->dev,
2460 		    "failed to get card capabilities: %d.\n", rc);
2461 		return (rc);
2462 	}
2463 
2464 	if (caps.toecaps) {
2465 		/* query offload-related parameters */
2466 		param[0] = FW_PARAM_DEV(NTID);
2467 		param[1] = FW_PARAM_PFVF(SERVER_START);
2468 		param[2] = FW_PARAM_PFVF(SERVER_END);
2469 		param[3] = FW_PARAM_PFVF(TDDP_START);
2470 		param[4] = FW_PARAM_PFVF(TDDP_END);
2471 		param[5] = FW_PARAM_DEV(FLOWC_BUFFIFO_SZ);
2472 		rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 6, param, val);
2473 		if (rc != 0) {
2474 			device_printf(sc->dev,
2475 			    "failed to query TOE parameters: %d.\n", rc);
2476 			return (rc);
2477 		}
2478 		sc->tids.ntids = val[0];
2479 		sc->tids.natids = min(sc->tids.ntids / 2, MAX_ATIDS);
2480 		sc->tids.stid_base = val[1];
2481 		sc->tids.nstids = val[2] - val[1] + 1;
2482 		sc->vres.ddp.start = val[3];
2483 		sc->vres.ddp.size = val[4] - val[3] + 1;
2484 		sc->params.ofldq_wr_cred = val[5];
2485 		sc->params.offload = 1;
2486 	}
2487 	if (caps.rdmacaps) {
2488 		param[0] = FW_PARAM_PFVF(STAG_START);
2489 		param[1] = FW_PARAM_PFVF(STAG_END);
2490 		param[2] = FW_PARAM_PFVF(RQ_START);
2491 		param[3] = FW_PARAM_PFVF(RQ_END);
2492 		param[4] = FW_PARAM_PFVF(PBL_START);
2493 		param[5] = FW_PARAM_PFVF(PBL_END);
2494 		rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 6, param, val);
2495 		if (rc != 0) {
2496 			device_printf(sc->dev,
2497 			    "failed to query RDMA parameters(1): %d.\n", rc);
2498 			return (rc);
2499 		}
2500 		sc->vres.stag.start = val[0];
2501 		sc->vres.stag.size = val[1] - val[0] + 1;
2502 		sc->vres.rq.start = val[2];
2503 		sc->vres.rq.size = val[3] - val[2] + 1;
2504 		sc->vres.pbl.start = val[4];
2505 		sc->vres.pbl.size = val[5] - val[4] + 1;
2506 
2507 		param[0] = FW_PARAM_PFVF(SQRQ_START);
2508 		param[1] = FW_PARAM_PFVF(SQRQ_END);
2509 		param[2] = FW_PARAM_PFVF(CQ_START);
2510 		param[3] = FW_PARAM_PFVF(CQ_END);
2511 		param[4] = FW_PARAM_PFVF(OCQ_START);
2512 		param[5] = FW_PARAM_PFVF(OCQ_END);
2513 		rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 6, param, val);
2514 		if (rc != 0) {
2515 			device_printf(sc->dev,
2516 			    "failed to query RDMA parameters(2): %d.\n", rc);
2517 			return (rc);
2518 		}
2519 		sc->vres.qp.start = val[0];
2520 		sc->vres.qp.size = val[1] - val[0] + 1;
2521 		sc->vres.cq.start = val[2];
2522 		sc->vres.cq.size = val[3] - val[2] + 1;
2523 		sc->vres.ocq.start = val[4];
2524 		sc->vres.ocq.size = val[5] - val[4] + 1;
2525 	}
2526 	if (caps.iscsicaps) {
2527 		param[0] = FW_PARAM_PFVF(ISCSI_START);
2528 		param[1] = FW_PARAM_PFVF(ISCSI_END);
2529 		rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 2, param, val);
2530 		if (rc != 0) {
2531 			device_printf(sc->dev,
2532 			    "failed to query iSCSI parameters: %d.\n", rc);
2533 			return (rc);
2534 		}
2535 		sc->vres.iscsi.start = val[0];
2536 		sc->vres.iscsi.size = val[1] - val[0] + 1;
2537 	}
2538 
2539 	/*
2540 	 * We've got the params we wanted to query via the firmware.  Now grab
2541 	 * some others directly from the chip.
2542 	 */
2543 	rc = t4_read_chip_settings(sc);
2544 
2545 	return (rc);
2546 }
2547 
2548 static int
2549 set_params__post_init(struct adapter *sc)
2550 {
2551 	uint32_t param, val;
2552 
2553 	/* ask for encapsulated CPLs */
2554 	param = FW_PARAM_PFVF(CPLFW4MSG_ENCAP);
2555 	val = 1;
2556 	(void)t4_set_params(sc, sc->mbox, sc->pf, 0, 1, &param, &val);
2557 
2558 	return (0);
2559 }
2560 
2561 #undef FW_PARAM_PFVF
2562 #undef FW_PARAM_DEV
2563 
2564 static void
2565 t4_set_desc(struct adapter *sc)
2566 {
2567 	char buf[128];
2568 	struct adapter_params *p = &sc->params;
2569 
2570 	snprintf(buf, sizeof(buf), "Chelsio %s %sNIC (rev %d), S/N:%s, "
2571 	    "P/N:%s, E/C:%s", p->vpd.id, is_offload(sc) ? "R" : "",
2572 	    chip_rev(sc), p->vpd.sn, p->vpd.pn, p->vpd.ec);
2573 
2574 	device_set_desc_copy(sc->dev, buf);
2575 }
2576 
2577 static void
2578 build_medialist(struct port_info *pi)
2579 {
2580 	struct ifmedia *media = &pi->media;
2581 	int data, m;
2582 
2583 	PORT_LOCK(pi);
2584 
2585 	ifmedia_removeall(media);
2586 
2587 	m = IFM_ETHER | IFM_FDX;
2588 	data = (pi->port_type << 8) | pi->mod_type;
2589 
2590 	switch(pi->port_type) {
2591 	case FW_PORT_TYPE_BT_XFI:
2592 		ifmedia_add(media, m | IFM_10G_T, data, NULL);
2593 		break;
2594 
2595 	case FW_PORT_TYPE_BT_XAUI:
2596 		ifmedia_add(media, m | IFM_10G_T, data, NULL);
2597 		/* fall through */
2598 
2599 	case FW_PORT_TYPE_BT_SGMII:
2600 		ifmedia_add(media, m | IFM_1000_T, data, NULL);
2601 		ifmedia_add(media, m | IFM_100_TX, data, NULL);
2602 		ifmedia_add(media, IFM_ETHER | IFM_AUTO, data, NULL);
2603 		ifmedia_set(media, IFM_ETHER | IFM_AUTO);
2604 		break;
2605 
2606 	case FW_PORT_TYPE_CX4:
2607 		ifmedia_add(media, m | IFM_10G_CX4, data, NULL);
2608 		ifmedia_set(media, m | IFM_10G_CX4);
2609 		break;
2610 
2611 	case FW_PORT_TYPE_SFP:
2612 	case FW_PORT_TYPE_FIBER_XFI:
2613 	case FW_PORT_TYPE_FIBER_XAUI:
2614 		switch (pi->mod_type) {
2615 
2616 		case FW_PORT_MOD_TYPE_LR:
2617 			ifmedia_add(media, m | IFM_10G_LR, data, NULL);
2618 			ifmedia_set(media, m | IFM_10G_LR);
2619 			break;
2620 
2621 		case FW_PORT_MOD_TYPE_SR:
2622 			ifmedia_add(media, m | IFM_10G_SR, data, NULL);
2623 			ifmedia_set(media, m | IFM_10G_SR);
2624 			break;
2625 
2626 		case FW_PORT_MOD_TYPE_LRM:
2627 			ifmedia_add(media, m | IFM_10G_LRM, data, NULL);
2628 			ifmedia_set(media, m | IFM_10G_LRM);
2629 			break;
2630 
2631 		case FW_PORT_MOD_TYPE_TWINAX_PASSIVE:
2632 		case FW_PORT_MOD_TYPE_TWINAX_ACTIVE:
2633 			ifmedia_add(media, m | IFM_10G_TWINAX, data, NULL);
2634 			ifmedia_set(media, m | IFM_10G_TWINAX);
2635 			break;
2636 
2637 		case FW_PORT_MOD_TYPE_NONE:
2638 			m &= ~IFM_FDX;
2639 			ifmedia_add(media, m | IFM_NONE, data, NULL);
2640 			ifmedia_set(media, m | IFM_NONE);
2641 			break;
2642 
2643 		case FW_PORT_MOD_TYPE_NA:
2644 		case FW_PORT_MOD_TYPE_ER:
2645 		default:
2646 			device_printf(pi->dev,
2647 			    "unknown port_type (%d), mod_type (%d)\n",
2648 			    pi->port_type, pi->mod_type);
2649 			ifmedia_add(media, m | IFM_UNKNOWN, data, NULL);
2650 			ifmedia_set(media, m | IFM_UNKNOWN);
2651 			break;
2652 		}
2653 		break;
2654 
2655 	case FW_PORT_TYPE_QSFP:
2656 		switch (pi->mod_type) {
2657 
2658 		case FW_PORT_MOD_TYPE_LR:
2659 			ifmedia_add(media, m | IFM_40G_LR4, data, NULL);
2660 			ifmedia_set(media, m | IFM_40G_LR4);
2661 			break;
2662 
2663 		case FW_PORT_MOD_TYPE_SR:
2664 			ifmedia_add(media, m | IFM_40G_SR4, data, NULL);
2665 			ifmedia_set(media, m | IFM_40G_SR4);
2666 			break;
2667 
2668 		case FW_PORT_MOD_TYPE_TWINAX_PASSIVE:
2669 		case FW_PORT_MOD_TYPE_TWINAX_ACTIVE:
2670 			ifmedia_add(media, m | IFM_40G_CR4, data, NULL);
2671 			ifmedia_set(media, m | IFM_40G_CR4);
2672 			break;
2673 
2674 		case FW_PORT_MOD_TYPE_NONE:
2675 			m &= ~IFM_FDX;
2676 			ifmedia_add(media, m | IFM_NONE, data, NULL);
2677 			ifmedia_set(media, m | IFM_NONE);
2678 			break;
2679 
2680 		default:
2681 			device_printf(pi->dev,
2682 			    "unknown port_type (%d), mod_type (%d)\n",
2683 			    pi->port_type, pi->mod_type);
2684 			ifmedia_add(media, m | IFM_UNKNOWN, data, NULL);
2685 			ifmedia_set(media, m | IFM_UNKNOWN);
2686 			break;
2687 		}
2688 		break;
2689 
2690 	default:
2691 		device_printf(pi->dev,
2692 		    "unknown port_type (%d), mod_type (%d)\n", pi->port_type,
2693 		    pi->mod_type);
2694 		ifmedia_add(media, m | IFM_UNKNOWN, data, NULL);
2695 		ifmedia_set(media, m | IFM_UNKNOWN);
2696 		break;
2697 	}
2698 
2699 	PORT_UNLOCK(pi);
2700 }
2701 
2702 #define FW_MAC_EXACT_CHUNK	7
2703 
2704 /*
2705  * Program the port's XGMAC based on parameters in ifnet.  The caller also
2706  * indicates which parameters should be programmed (the rest are left alone).
2707  */
2708 static int
2709 update_mac_settings(struct port_info *pi, int flags)
2710 {
2711 	int rc;
2712 	struct ifnet *ifp = pi->ifp;
2713 	struct adapter *sc = pi->adapter;
2714 	int mtu = -1, promisc = -1, allmulti = -1, vlanex = -1;
2715 
2716 	ASSERT_SYNCHRONIZED_OP(sc);
2717 	KASSERT(flags, ("%s: not told what to update.", __func__));
2718 
2719 	if (flags & XGMAC_MTU)
2720 		mtu = ifp->if_mtu;
2721 
2722 	if (flags & XGMAC_PROMISC)
2723 		promisc = ifp->if_flags & IFF_PROMISC ? 1 : 0;
2724 
2725 	if (flags & XGMAC_ALLMULTI)
2726 		allmulti = ifp->if_flags & IFF_ALLMULTI ? 1 : 0;
2727 
2728 	if (flags & XGMAC_VLANEX)
2729 		vlanex = ifp->if_capenable & IFCAP_VLAN_HWTAGGING ? 1 : 0;
2730 
2731 	rc = -t4_set_rxmode(sc, sc->mbox, pi->viid, mtu, promisc, allmulti, 1,
2732 	    vlanex, false);
2733 	if (rc) {
2734 		if_printf(ifp, "set_rxmode (%x) failed: %d\n", flags, rc);
2735 		return (rc);
2736 	}
2737 
2738 	if (flags & XGMAC_UCADDR) {
2739 		uint8_t ucaddr[ETHER_ADDR_LEN];
2740 
2741 		bcopy(IF_LLADDR(ifp), ucaddr, sizeof(ucaddr));
2742 		rc = t4_change_mac(sc, sc->mbox, pi->viid, pi->xact_addr_filt,
2743 		    ucaddr, true, true);
2744 		if (rc < 0) {
2745 			rc = -rc;
2746 			if_printf(ifp, "change_mac failed: %d\n", rc);
2747 			return (rc);
2748 		} else {
2749 			pi->xact_addr_filt = rc;
2750 			rc = 0;
2751 		}
2752 	}
2753 
2754 	if (flags & XGMAC_MCADDRS) {
2755 		const uint8_t *mcaddr[FW_MAC_EXACT_CHUNK];
2756 		int del = 1;
2757 		uint64_t hash = 0;
2758 		struct ifmultiaddr *ifma;
2759 		int i = 0, j;
2760 
2761 		if_maddr_rlock(ifp);
2762 		TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
2763 			if (ifma->ifma_addr->sa_family != AF_LINK)
2764 				continue;
2765 			mcaddr[i++] =
2766 			    LLADDR((struct sockaddr_dl *)ifma->ifma_addr);
2767 
2768 			if (i == FW_MAC_EXACT_CHUNK) {
2769 				rc = t4_alloc_mac_filt(sc, sc->mbox, pi->viid,
2770 				    del, i, mcaddr, NULL, &hash, 0);
2771 				if (rc < 0) {
2772 					rc = -rc;
2773 					for (j = 0; j < i; j++) {
2774 						if_printf(ifp,
2775 						    "failed to add mc address"
2776 						    " %02x:%02x:%02x:"
2777 						    "%02x:%02x:%02x rc=%d\n",
2778 						    mcaddr[j][0], mcaddr[j][1],
2779 						    mcaddr[j][2], mcaddr[j][3],
2780 						    mcaddr[j][4], mcaddr[j][5],
2781 						    rc);
2782 					}
2783 					goto mcfail;
2784 				}
2785 				del = 0;
2786 				i = 0;
2787 			}
2788 		}
2789 		if (i > 0) {
2790 			rc = t4_alloc_mac_filt(sc, sc->mbox, pi->viid,
2791 			    del, i, mcaddr, NULL, &hash, 0);
2792 			if (rc < 0) {
2793 				rc = -rc;
2794 				for (j = 0; j < i; j++) {
2795 					if_printf(ifp,
2796 					    "failed to add mc address"
2797 					    " %02x:%02x:%02x:"
2798 					    "%02x:%02x:%02x rc=%d\n",
2799 					    mcaddr[j][0], mcaddr[j][1],
2800 					    mcaddr[j][2], mcaddr[j][3],
2801 					    mcaddr[j][4], mcaddr[j][5],
2802 					    rc);
2803 				}
2804 				goto mcfail;
2805 			}
2806 		}
2807 
2808 		rc = -t4_set_addr_hash(sc, sc->mbox, pi->viid, 0, hash, 0);
2809 		if (rc != 0)
2810 			if_printf(ifp, "failed to set mc address hash: %d", rc);
2811 mcfail:
2812 		if_maddr_runlock(ifp);
2813 	}
2814 
2815 	return (rc);
2816 }
2817 
2818 int
2819 begin_synchronized_op(struct adapter *sc, struct port_info *pi, int flags,
2820     char *wmesg)
2821 {
2822 	int rc, pri;
2823 
2824 #ifdef WITNESS
2825 	/* the caller thinks it's ok to sleep, but is it really? */
2826 	if (flags & SLEEP_OK)
2827 		pause("t4slptst", 1);
2828 #endif
2829 
2830 	if (INTR_OK)
2831 		pri = PCATCH;
2832 	else
2833 		pri = 0;
2834 
2835 	ADAPTER_LOCK(sc);
2836 	for (;;) {
2837 
2838 		if (pi && IS_DOOMED(pi)) {
2839 			rc = ENXIO;
2840 			goto done;
2841 		}
2842 
2843 		if (!IS_BUSY(sc)) {
2844 			rc = 0;
2845 			break;
2846 		}
2847 
2848 		if (!(flags & SLEEP_OK)) {
2849 			rc = EBUSY;
2850 			goto done;
2851 		}
2852 
2853 		if (mtx_sleep(&sc->flags, &sc->sc_lock, pri, wmesg, 0)) {
2854 			rc = EINTR;
2855 			goto done;
2856 		}
2857 	}
2858 
2859 	KASSERT(!IS_BUSY(sc), ("%s: controller busy.", __func__));
2860 	SET_BUSY(sc);
2861 #ifdef INVARIANTS
2862 	sc->last_op = wmesg;
2863 	sc->last_op_thr = curthread;
2864 #endif
2865 
2866 done:
2867 	if (!(flags & HOLD_LOCK) || rc)
2868 		ADAPTER_UNLOCK(sc);
2869 
2870 	return (rc);
2871 }
2872 
2873 void
2874 end_synchronized_op(struct adapter *sc, int flags)
2875 {
2876 
2877 	if (flags & LOCK_HELD)
2878 		ADAPTER_LOCK_ASSERT_OWNED(sc);
2879 	else
2880 		ADAPTER_LOCK(sc);
2881 
2882 	KASSERT(IS_BUSY(sc), ("%s: controller not busy.", __func__));
2883 	CLR_BUSY(sc);
2884 	wakeup(&sc->flags);
2885 	ADAPTER_UNLOCK(sc);
2886 }
2887 
2888 static int
2889 cxgbe_init_synchronized(struct port_info *pi)
2890 {
2891 	struct adapter *sc = pi->adapter;
2892 	struct ifnet *ifp = pi->ifp;
2893 	int rc = 0;
2894 
2895 	ASSERT_SYNCHRONIZED_OP(sc);
2896 
2897 	if (isset(&sc->open_device_map, pi->port_id)) {
2898 		KASSERT(ifp->if_drv_flags & IFF_DRV_RUNNING,
2899 		    ("mismatch between open_device_map and if_drv_flags"));
2900 		return (0);	/* already running */
2901 	}
2902 
2903 	if (!(sc->flags & FULL_INIT_DONE) &&
2904 	    ((rc = adapter_full_init(sc)) != 0))
2905 		return (rc);	/* error message displayed already */
2906 
2907 	if (!(pi->flags & PORT_INIT_DONE) &&
2908 	    ((rc = port_full_init(pi)) != 0))
2909 		return (rc); /* error message displayed already */
2910 
2911 	rc = update_mac_settings(pi, XGMAC_ALL);
2912 	if (rc)
2913 		goto done;	/* error message displayed already */
2914 
2915 	rc = -t4_link_start(sc, sc->mbox, pi->tx_chan, &pi->link_cfg);
2916 	if (rc != 0) {
2917 		if_printf(ifp, "start_link failed: %d\n", rc);
2918 		goto done;
2919 	}
2920 
2921 	rc = -t4_enable_vi(sc, sc->mbox, pi->viid, true, true);
2922 	if (rc != 0) {
2923 		if_printf(ifp, "enable_vi failed: %d\n", rc);
2924 		goto done;
2925 	}
2926 
2927 	/*
2928 	 * The first iq of the first port to come up is used for tracing.
2929 	 */
2930 	if (sc->traceq < 0) {
2931 		sc->traceq = sc->sge.rxq[pi->first_rxq].iq.abs_id;
2932 		t4_write_reg(sc, is_t4(sc) ?  A_MPS_TRC_RSS_CONTROL :
2933 		    A_MPS_T5_TRC_RSS_CONTROL, V_RSSCONTROL(pi->tx_chan) |
2934 		    V_QUEUENUMBER(sc->traceq));
2935 		pi->flags |= HAS_TRACEQ;
2936 	}
2937 
2938 	/* all ok */
2939 	setbit(&sc->open_device_map, pi->port_id);
2940 	PORT_LOCK(pi);
2941 	ifp->if_drv_flags |= IFF_DRV_RUNNING;
2942 	PORT_UNLOCK(pi);
2943 
2944 	callout_reset(&pi->tick, hz, cxgbe_tick, pi);
2945 done:
2946 	if (rc != 0)
2947 		cxgbe_uninit_synchronized(pi);
2948 
2949 	return (rc);
2950 }
2951 
2952 /*
2953  * Idempotent.
2954  */
2955 static int
2956 cxgbe_uninit_synchronized(struct port_info *pi)
2957 {
2958 	struct adapter *sc = pi->adapter;
2959 	struct ifnet *ifp = pi->ifp;
2960 	int rc;
2961 
2962 	ASSERT_SYNCHRONIZED_OP(sc);
2963 
2964 	/*
2965 	 * Disable the VI so that all its data in either direction is discarded
2966 	 * by the MPS.  Leave everything else (the queues, interrupts, and 1Hz
2967 	 * tick) intact as the TP can deliver negative advice or data that it's
2968 	 * holding in its RAM (for an offloaded connection) even after the VI is
2969 	 * disabled.
2970 	 */
2971 	rc = -t4_enable_vi(sc, sc->mbox, pi->viid, false, false);
2972 	if (rc) {
2973 		if_printf(ifp, "disable_vi failed: %d\n", rc);
2974 		return (rc);
2975 	}
2976 
2977 	clrbit(&sc->open_device_map, pi->port_id);
2978 	PORT_LOCK(pi);
2979 	ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
2980 	PORT_UNLOCK(pi);
2981 
2982 	pi->link_cfg.link_ok = 0;
2983 	pi->link_cfg.speed = 0;
2984 	pi->linkdnrc = -1;
2985 	t4_os_link_changed(sc, pi->port_id, 0, -1);
2986 
2987 	return (0);
2988 }
2989 
2990 /*
2991  * It is ok for this function to fail midway and return right away.  t4_detach
2992  * will walk the entire sc->irq list and clean up whatever is valid.
2993  */
2994 static int
2995 setup_intr_handlers(struct adapter *sc)
2996 {
2997 	int rc, rid, p, q;
2998 	char s[8];
2999 	struct irq *irq;
3000 	struct port_info *pi;
3001 	struct sge_rxq *rxq;
3002 #ifdef TCP_OFFLOAD
3003 	struct sge_ofld_rxq *ofld_rxq;
3004 #endif
3005 
3006 	/*
3007 	 * Setup interrupts.
3008 	 */
3009 	irq = &sc->irq[0];
3010 	rid = sc->intr_type == INTR_INTX ? 0 : 1;
3011 	if (sc->intr_count == 1) {
3012 		KASSERT(!(sc->flags & INTR_DIRECT),
3013 		    ("%s: single interrupt && INTR_DIRECT?", __func__));
3014 
3015 		rc = t4_alloc_irq(sc, irq, rid, t4_intr_all, sc, "all");
3016 		if (rc != 0)
3017 			return (rc);
3018 	} else {
3019 		/* Multiple interrupts. */
3020 		KASSERT(sc->intr_count >= T4_EXTRA_INTR + sc->params.nports,
3021 		    ("%s: too few intr.", __func__));
3022 
3023 		/* The first one is always error intr */
3024 		rc = t4_alloc_irq(sc, irq, rid, t4_intr_err, sc, "err");
3025 		if (rc != 0)
3026 			return (rc);
3027 		irq++;
3028 		rid++;
3029 
3030 		/* The second one is always the firmware event queue */
3031 		rc = t4_alloc_irq(sc, irq, rid, t4_intr_evt, &sc->sge.fwq,
3032 		    "evt");
3033 		if (rc != 0)
3034 			return (rc);
3035 		irq++;
3036 		rid++;
3037 
3038 		/*
3039 		 * Note that if INTR_DIRECT is not set then either the NIC rx
3040 		 * queues or (exclusive or) the TOE rx queueus will be taking
3041 		 * direct interrupts.
3042 		 *
3043 		 * There is no need to check for is_offload(sc) as nofldrxq
3044 		 * will be 0 if offload is disabled.
3045 		 */
3046 		for_each_port(sc, p) {
3047 			pi = sc->port[p];
3048 
3049 #ifdef TCP_OFFLOAD
3050 			/*
3051 			 * Skip over the NIC queues if they aren't taking direct
3052 			 * interrupts.
3053 			 */
3054 			if (!(sc->flags & INTR_DIRECT) &&
3055 			    pi->nofldrxq > pi->nrxq)
3056 				goto ofld_queues;
3057 #endif
3058 			rxq = &sc->sge.rxq[pi->first_rxq];
3059 			for (q = 0; q < pi->nrxq; q++, rxq++) {
3060 				snprintf(s, sizeof(s), "%d.%d", p, q);
3061 				rc = t4_alloc_irq(sc, irq, rid, t4_intr, rxq,
3062 				    s);
3063 				if (rc != 0)
3064 					return (rc);
3065 				irq++;
3066 				rid++;
3067 			}
3068 
3069 #ifdef TCP_OFFLOAD
3070 			/*
3071 			 * Skip over the offload queues if they aren't taking
3072 			 * direct interrupts.
3073 			 */
3074 			if (!(sc->flags & INTR_DIRECT))
3075 				continue;
3076 ofld_queues:
3077 			ofld_rxq = &sc->sge.ofld_rxq[pi->first_ofld_rxq];
3078 			for (q = 0; q < pi->nofldrxq; q++, ofld_rxq++) {
3079 				snprintf(s, sizeof(s), "%d,%d", p, q);
3080 				rc = t4_alloc_irq(sc, irq, rid, t4_intr,
3081 				    ofld_rxq, s);
3082 				if (rc != 0)
3083 					return (rc);
3084 				irq++;
3085 				rid++;
3086 			}
3087 #endif
3088 		}
3089 	}
3090 
3091 	return (0);
3092 }
3093 
3094 static int
3095 adapter_full_init(struct adapter *sc)
3096 {
3097 	int rc, i;
3098 
3099 	ADAPTER_LOCK_ASSERT_NOTOWNED(sc);
3100 	KASSERT((sc->flags & FULL_INIT_DONE) == 0,
3101 	    ("%s: FULL_INIT_DONE already", __func__));
3102 
3103 	/*
3104 	 * queues that belong to the adapter (not any particular port).
3105 	 */
3106 	rc = t4_setup_adapter_queues(sc);
3107 	if (rc != 0)
3108 		goto done;
3109 
3110 	for (i = 0; i < nitems(sc->tq); i++) {
3111 		sc->tq[i] = taskqueue_create("t4 taskq", M_NOWAIT,
3112 		    taskqueue_thread_enqueue, &sc->tq[i]);
3113 		if (sc->tq[i] == NULL) {
3114 			device_printf(sc->dev,
3115 			    "failed to allocate task queue %d\n", i);
3116 			rc = ENOMEM;
3117 			goto done;
3118 		}
3119 		taskqueue_start_threads(&sc->tq[i], 1, PI_NET, "%s tq%d",
3120 		    device_get_nameunit(sc->dev), i);
3121 	}
3122 
3123 	t4_intr_enable(sc);
3124 	sc->flags |= FULL_INIT_DONE;
3125 done:
3126 	if (rc != 0)
3127 		adapter_full_uninit(sc);
3128 
3129 	return (rc);
3130 }
3131 
3132 static int
3133 adapter_full_uninit(struct adapter *sc)
3134 {
3135 	int i;
3136 
3137 	ADAPTER_LOCK_ASSERT_NOTOWNED(sc);
3138 
3139 	t4_teardown_adapter_queues(sc);
3140 
3141 	for (i = 0; i < nitems(sc->tq) && sc->tq[i]; i++) {
3142 		taskqueue_free(sc->tq[i]);
3143 		sc->tq[i] = NULL;
3144 	}
3145 
3146 	sc->flags &= ~FULL_INIT_DONE;
3147 
3148 	return (0);
3149 }
3150 
3151 static int
3152 port_full_init(struct port_info *pi)
3153 {
3154 	struct adapter *sc = pi->adapter;
3155 	struct ifnet *ifp = pi->ifp;
3156 	uint16_t *rss;
3157 	struct sge_rxq *rxq;
3158 	int rc, i;
3159 
3160 	ASSERT_SYNCHRONIZED_OP(sc);
3161 	KASSERT((pi->flags & PORT_INIT_DONE) == 0,
3162 	    ("%s: PORT_INIT_DONE already", __func__));
3163 
3164 	sysctl_ctx_init(&pi->ctx);
3165 	pi->flags |= PORT_SYSCTL_CTX;
3166 
3167 	/*
3168 	 * Allocate tx/rx/fl queues for this port.
3169 	 */
3170 	rc = t4_setup_port_queues(pi);
3171 	if (rc != 0)
3172 		goto done;	/* error message displayed already */
3173 
3174 	/*
3175 	 * Setup RSS for this port.
3176 	 */
3177 	rss = malloc(pi->nrxq * sizeof (*rss), M_CXGBE,
3178 	    M_ZERO | M_WAITOK);
3179 	for_each_rxq(pi, i, rxq) {
3180 		rss[i] = rxq->iq.abs_id;
3181 	}
3182 	rc = -t4_config_rss_range(sc, sc->mbox, pi->viid, 0,
3183 	    pi->rss_size, rss, pi->nrxq);
3184 	free(rss, M_CXGBE);
3185 	if (rc != 0) {
3186 		if_printf(ifp, "rss_config failed: %d\n", rc);
3187 		goto done;
3188 	}
3189 
3190 	pi->flags |= PORT_INIT_DONE;
3191 done:
3192 	if (rc != 0)
3193 		port_full_uninit(pi);
3194 
3195 	return (rc);
3196 }
3197 
3198 /*
3199  * Idempotent.
3200  */
3201 static int
3202 port_full_uninit(struct port_info *pi)
3203 {
3204 	struct adapter *sc = pi->adapter;
3205 	int i;
3206 	struct sge_rxq *rxq;
3207 	struct sge_txq *txq;
3208 #ifdef TCP_OFFLOAD
3209 	struct sge_ofld_rxq *ofld_rxq;
3210 	struct sge_wrq *ofld_txq;
3211 #endif
3212 
3213 	if (pi->flags & PORT_INIT_DONE) {
3214 
3215 		/* Need to quiesce queues.  XXX: ctrl queues? */
3216 
3217 		for_each_txq(pi, i, txq) {
3218 			quiesce_eq(sc, &txq->eq);
3219 		}
3220 
3221 #ifdef TCP_OFFLOAD
3222 		for_each_ofld_txq(pi, i, ofld_txq) {
3223 			quiesce_eq(sc, &ofld_txq->eq);
3224 		}
3225 #endif
3226 
3227 		for_each_rxq(pi, i, rxq) {
3228 			quiesce_iq(sc, &rxq->iq);
3229 			quiesce_fl(sc, &rxq->fl);
3230 		}
3231 
3232 #ifdef TCP_OFFLOAD
3233 		for_each_ofld_rxq(pi, i, ofld_rxq) {
3234 			quiesce_iq(sc, &ofld_rxq->iq);
3235 			quiesce_fl(sc, &ofld_rxq->fl);
3236 		}
3237 #endif
3238 	}
3239 
3240 	t4_teardown_port_queues(pi);
3241 	pi->flags &= ~PORT_INIT_DONE;
3242 
3243 	return (0);
3244 }
3245 
3246 static void
3247 quiesce_eq(struct adapter *sc, struct sge_eq *eq)
3248 {
3249 	EQ_LOCK(eq);
3250 	eq->flags |= EQ_DOOMED;
3251 
3252 	/*
3253 	 * Wait for the response to a credit flush if one's
3254 	 * pending.
3255 	 */
3256 	while (eq->flags & EQ_CRFLUSHED)
3257 		mtx_sleep(eq, &eq->eq_lock, 0, "crflush", 0);
3258 	EQ_UNLOCK(eq);
3259 
3260 	callout_drain(&eq->tx_callout);	/* XXX: iffy */
3261 	pause("callout", 10);		/* Still iffy */
3262 
3263 	taskqueue_drain(sc->tq[eq->tx_chan], &eq->tx_task);
3264 }
3265 
3266 static void
3267 quiesce_iq(struct adapter *sc, struct sge_iq *iq)
3268 {
3269 	(void) sc;	/* unused */
3270 
3271 	/* Synchronize with the interrupt handler */
3272 	while (!atomic_cmpset_int(&iq->state, IQS_IDLE, IQS_DISABLED))
3273 		pause("iqfree", 1);
3274 }
3275 
3276 static void
3277 quiesce_fl(struct adapter *sc, struct sge_fl *fl)
3278 {
3279 	mtx_lock(&sc->sfl_lock);
3280 	FL_LOCK(fl);
3281 	fl->flags |= FL_DOOMED;
3282 	FL_UNLOCK(fl);
3283 	mtx_unlock(&sc->sfl_lock);
3284 
3285 	callout_drain(&sc->sfl_callout);
3286 	KASSERT((fl->flags & FL_STARVING) == 0,
3287 	    ("%s: still starving", __func__));
3288 }
3289 
3290 static int
3291 t4_alloc_irq(struct adapter *sc, struct irq *irq, int rid,
3292     driver_intr_t *handler, void *arg, char *name)
3293 {
3294 	int rc;
3295 
3296 	irq->rid = rid;
3297 	irq->res = bus_alloc_resource_any(sc->dev, SYS_RES_IRQ, &irq->rid,
3298 	    RF_SHAREABLE | RF_ACTIVE);
3299 	if (irq->res == NULL) {
3300 		device_printf(sc->dev,
3301 		    "failed to allocate IRQ for rid %d, name %s.\n", rid, name);
3302 		return (ENOMEM);
3303 	}
3304 
3305 	rc = bus_setup_intr(sc->dev, irq->res, INTR_MPSAFE | INTR_TYPE_NET,
3306 	    NULL, handler, arg, &irq->tag);
3307 	if (rc != 0) {
3308 		device_printf(sc->dev,
3309 		    "failed to setup interrupt for rid %d, name %s: %d\n",
3310 		    rid, name, rc);
3311 	} else if (name)
3312 		bus_describe_intr(sc->dev, irq->res, irq->tag, name);
3313 
3314 	return (rc);
3315 }
3316 
3317 static int
3318 t4_free_irq(struct adapter *sc, struct irq *irq)
3319 {
3320 	if (irq->tag)
3321 		bus_teardown_intr(sc->dev, irq->res, irq->tag);
3322 	if (irq->res)
3323 		bus_release_resource(sc->dev, SYS_RES_IRQ, irq->rid, irq->res);
3324 
3325 	bzero(irq, sizeof(*irq));
3326 
3327 	return (0);
3328 }
3329 
3330 static void
3331 reg_block_dump(struct adapter *sc, uint8_t *buf, unsigned int start,
3332     unsigned int end)
3333 {
3334 	uint32_t *p = (uint32_t *)(buf + start);
3335 
3336 	for ( ; start <= end; start += sizeof(uint32_t))
3337 		*p++ = t4_read_reg(sc, start);
3338 }
3339 
3340 static void
3341 t4_get_regs(struct adapter *sc, struct t4_regdump *regs, uint8_t *buf)
3342 {
3343 	int i, n;
3344 	const unsigned int *reg_ranges;
3345 	static const unsigned int t4_reg_ranges[] = {
3346 		0x1008, 0x1108,
3347 		0x1180, 0x11b4,
3348 		0x11fc, 0x123c,
3349 		0x1300, 0x173c,
3350 		0x1800, 0x18fc,
3351 		0x3000, 0x30d8,
3352 		0x30e0, 0x5924,
3353 		0x5960, 0x59d4,
3354 		0x5a00, 0x5af8,
3355 		0x6000, 0x6098,
3356 		0x6100, 0x6150,
3357 		0x6200, 0x6208,
3358 		0x6240, 0x6248,
3359 		0x6280, 0x6338,
3360 		0x6370, 0x638c,
3361 		0x6400, 0x643c,
3362 		0x6500, 0x6524,
3363 		0x6a00, 0x6a38,
3364 		0x6a60, 0x6a78,
3365 		0x6b00, 0x6b84,
3366 		0x6bf0, 0x6c84,
3367 		0x6cf0, 0x6d84,
3368 		0x6df0, 0x6e84,
3369 		0x6ef0, 0x6f84,
3370 		0x6ff0, 0x7084,
3371 		0x70f0, 0x7184,
3372 		0x71f0, 0x7284,
3373 		0x72f0, 0x7384,
3374 		0x73f0, 0x7450,
3375 		0x7500, 0x7530,
3376 		0x7600, 0x761c,
3377 		0x7680, 0x76cc,
3378 		0x7700, 0x7798,
3379 		0x77c0, 0x77fc,
3380 		0x7900, 0x79fc,
3381 		0x7b00, 0x7c38,
3382 		0x7d00, 0x7efc,
3383 		0x8dc0, 0x8e1c,
3384 		0x8e30, 0x8e78,
3385 		0x8ea0, 0x8f6c,
3386 		0x8fc0, 0x9074,
3387 		0x90fc, 0x90fc,
3388 		0x9400, 0x9458,
3389 		0x9600, 0x96bc,
3390 		0x9800, 0x9808,
3391 		0x9820, 0x983c,
3392 		0x9850, 0x9864,
3393 		0x9c00, 0x9c6c,
3394 		0x9c80, 0x9cec,
3395 		0x9d00, 0x9d6c,
3396 		0x9d80, 0x9dec,
3397 		0x9e00, 0x9e6c,
3398 		0x9e80, 0x9eec,
3399 		0x9f00, 0x9f6c,
3400 		0x9f80, 0x9fec,
3401 		0xd004, 0xd03c,
3402 		0xdfc0, 0xdfe0,
3403 		0xe000, 0xea7c,
3404 		0xf000, 0x11110,
3405 		0x11118, 0x11190,
3406 		0x19040, 0x1906c,
3407 		0x19078, 0x19080,
3408 		0x1908c, 0x19124,
3409 		0x19150, 0x191b0,
3410 		0x191d0, 0x191e8,
3411 		0x19238, 0x1924c,
3412 		0x193f8, 0x19474,
3413 		0x19490, 0x194f8,
3414 		0x19800, 0x19f30,
3415 		0x1a000, 0x1a06c,
3416 		0x1a0b0, 0x1a120,
3417 		0x1a128, 0x1a138,
3418 		0x1a190, 0x1a1c4,
3419 		0x1a1fc, 0x1a1fc,
3420 		0x1e040, 0x1e04c,
3421 		0x1e284, 0x1e28c,
3422 		0x1e2c0, 0x1e2c0,
3423 		0x1e2e0, 0x1e2e0,
3424 		0x1e300, 0x1e384,
3425 		0x1e3c0, 0x1e3c8,
3426 		0x1e440, 0x1e44c,
3427 		0x1e684, 0x1e68c,
3428 		0x1e6c0, 0x1e6c0,
3429 		0x1e6e0, 0x1e6e0,
3430 		0x1e700, 0x1e784,
3431 		0x1e7c0, 0x1e7c8,
3432 		0x1e840, 0x1e84c,
3433 		0x1ea84, 0x1ea8c,
3434 		0x1eac0, 0x1eac0,
3435 		0x1eae0, 0x1eae0,
3436 		0x1eb00, 0x1eb84,
3437 		0x1ebc0, 0x1ebc8,
3438 		0x1ec40, 0x1ec4c,
3439 		0x1ee84, 0x1ee8c,
3440 		0x1eec0, 0x1eec0,
3441 		0x1eee0, 0x1eee0,
3442 		0x1ef00, 0x1ef84,
3443 		0x1efc0, 0x1efc8,
3444 		0x1f040, 0x1f04c,
3445 		0x1f284, 0x1f28c,
3446 		0x1f2c0, 0x1f2c0,
3447 		0x1f2e0, 0x1f2e0,
3448 		0x1f300, 0x1f384,
3449 		0x1f3c0, 0x1f3c8,
3450 		0x1f440, 0x1f44c,
3451 		0x1f684, 0x1f68c,
3452 		0x1f6c0, 0x1f6c0,
3453 		0x1f6e0, 0x1f6e0,
3454 		0x1f700, 0x1f784,
3455 		0x1f7c0, 0x1f7c8,
3456 		0x1f840, 0x1f84c,
3457 		0x1fa84, 0x1fa8c,
3458 		0x1fac0, 0x1fac0,
3459 		0x1fae0, 0x1fae0,
3460 		0x1fb00, 0x1fb84,
3461 		0x1fbc0, 0x1fbc8,
3462 		0x1fc40, 0x1fc4c,
3463 		0x1fe84, 0x1fe8c,
3464 		0x1fec0, 0x1fec0,
3465 		0x1fee0, 0x1fee0,
3466 		0x1ff00, 0x1ff84,
3467 		0x1ffc0, 0x1ffc8,
3468 		0x20000, 0x2002c,
3469 		0x20100, 0x2013c,
3470 		0x20190, 0x201c8,
3471 		0x20200, 0x20318,
3472 		0x20400, 0x20528,
3473 		0x20540, 0x20614,
3474 		0x21000, 0x21040,
3475 		0x2104c, 0x21060,
3476 		0x210c0, 0x210ec,
3477 		0x21200, 0x21268,
3478 		0x21270, 0x21284,
3479 		0x212fc, 0x21388,
3480 		0x21400, 0x21404,
3481 		0x21500, 0x21518,
3482 		0x2152c, 0x2153c,
3483 		0x21550, 0x21554,
3484 		0x21600, 0x21600,
3485 		0x21608, 0x21628,
3486 		0x21630, 0x2163c,
3487 		0x21700, 0x2171c,
3488 		0x21780, 0x2178c,
3489 		0x21800, 0x21c38,
3490 		0x21c80, 0x21d7c,
3491 		0x21e00, 0x21e04,
3492 		0x22000, 0x2202c,
3493 		0x22100, 0x2213c,
3494 		0x22190, 0x221c8,
3495 		0x22200, 0x22318,
3496 		0x22400, 0x22528,
3497 		0x22540, 0x22614,
3498 		0x23000, 0x23040,
3499 		0x2304c, 0x23060,
3500 		0x230c0, 0x230ec,
3501 		0x23200, 0x23268,
3502 		0x23270, 0x23284,
3503 		0x232fc, 0x23388,
3504 		0x23400, 0x23404,
3505 		0x23500, 0x23518,
3506 		0x2352c, 0x2353c,
3507 		0x23550, 0x23554,
3508 		0x23600, 0x23600,
3509 		0x23608, 0x23628,
3510 		0x23630, 0x2363c,
3511 		0x23700, 0x2371c,
3512 		0x23780, 0x2378c,
3513 		0x23800, 0x23c38,
3514 		0x23c80, 0x23d7c,
3515 		0x23e00, 0x23e04,
3516 		0x24000, 0x2402c,
3517 		0x24100, 0x2413c,
3518 		0x24190, 0x241c8,
3519 		0x24200, 0x24318,
3520 		0x24400, 0x24528,
3521 		0x24540, 0x24614,
3522 		0x25000, 0x25040,
3523 		0x2504c, 0x25060,
3524 		0x250c0, 0x250ec,
3525 		0x25200, 0x25268,
3526 		0x25270, 0x25284,
3527 		0x252fc, 0x25388,
3528 		0x25400, 0x25404,
3529 		0x25500, 0x25518,
3530 		0x2552c, 0x2553c,
3531 		0x25550, 0x25554,
3532 		0x25600, 0x25600,
3533 		0x25608, 0x25628,
3534 		0x25630, 0x2563c,
3535 		0x25700, 0x2571c,
3536 		0x25780, 0x2578c,
3537 		0x25800, 0x25c38,
3538 		0x25c80, 0x25d7c,
3539 		0x25e00, 0x25e04,
3540 		0x26000, 0x2602c,
3541 		0x26100, 0x2613c,
3542 		0x26190, 0x261c8,
3543 		0x26200, 0x26318,
3544 		0x26400, 0x26528,
3545 		0x26540, 0x26614,
3546 		0x27000, 0x27040,
3547 		0x2704c, 0x27060,
3548 		0x270c0, 0x270ec,
3549 		0x27200, 0x27268,
3550 		0x27270, 0x27284,
3551 		0x272fc, 0x27388,
3552 		0x27400, 0x27404,
3553 		0x27500, 0x27518,
3554 		0x2752c, 0x2753c,
3555 		0x27550, 0x27554,
3556 		0x27600, 0x27600,
3557 		0x27608, 0x27628,
3558 		0x27630, 0x2763c,
3559 		0x27700, 0x2771c,
3560 		0x27780, 0x2778c,
3561 		0x27800, 0x27c38,
3562 		0x27c80, 0x27d7c,
3563 		0x27e00, 0x27e04
3564 	};
3565 	static const unsigned int t5_reg_ranges[] = {
3566 		0x1008, 0x1148,
3567 		0x1180, 0x11b4,
3568 		0x11fc, 0x123c,
3569 		0x1280, 0x173c,
3570 		0x1800, 0x18fc,
3571 		0x3000, 0x3028,
3572 		0x3060, 0x30d8,
3573 		0x30e0, 0x30fc,
3574 		0x3140, 0x357c,
3575 		0x35a8, 0x35cc,
3576 		0x35ec, 0x35ec,
3577 		0x3600, 0x5624,
3578 		0x56cc, 0x575c,
3579 		0x580c, 0x5814,
3580 		0x5890, 0x58bc,
3581 		0x5940, 0x59dc,
3582 		0x59fc, 0x5a18,
3583 		0x5a60, 0x5a9c,
3584 		0x5b94, 0x5bfc,
3585 		0x6000, 0x6040,
3586 		0x6058, 0x614c,
3587 		0x7700, 0x7798,
3588 		0x77c0, 0x78fc,
3589 		0x7b00, 0x7c54,
3590 		0x7d00, 0x7efc,
3591 		0x8dc0, 0x8de0,
3592 		0x8df8, 0x8e84,
3593 		0x8ea0, 0x8f84,
3594 		0x8fc0, 0x90f8,
3595 		0x9400, 0x9470,
3596 		0x9600, 0x96f4,
3597 		0x9800, 0x9808,
3598 		0x9820, 0x983c,
3599 		0x9850, 0x9864,
3600 		0x9c00, 0x9c6c,
3601 		0x9c80, 0x9cec,
3602 		0x9d00, 0x9d6c,
3603 		0x9d80, 0x9dec,
3604 		0x9e00, 0x9e6c,
3605 		0x9e80, 0x9eec,
3606 		0x9f00, 0x9f6c,
3607 		0x9f80, 0xa020,
3608 		0xd004, 0xd03c,
3609 		0xdfc0, 0xdfe0,
3610 		0xe000, 0x11088,
3611 		0x1109c, 0x11110,
3612 		0x11118, 0x1117c,
3613 		0x11190, 0x11204,
3614 		0x19040, 0x1906c,
3615 		0x19078, 0x19080,
3616 		0x1908c, 0x19124,
3617 		0x19150, 0x191b0,
3618 		0x191d0, 0x191e8,
3619 		0x19238, 0x19290,
3620 		0x193f8, 0x19474,
3621 		0x19490, 0x194cc,
3622 		0x194f0, 0x194f8,
3623 		0x19c00, 0x19c60,
3624 		0x19c94, 0x19e10,
3625 		0x19e50, 0x19f34,
3626 		0x19f40, 0x19f50,
3627 		0x19f90, 0x19fe4,
3628 		0x1a000, 0x1a06c,
3629 		0x1a0b0, 0x1a120,
3630 		0x1a128, 0x1a138,
3631 		0x1a190, 0x1a1c4,
3632 		0x1a1fc, 0x1a1fc,
3633 		0x1e008, 0x1e00c,
3634 		0x1e040, 0x1e04c,
3635 		0x1e284, 0x1e290,
3636 		0x1e2c0, 0x1e2c0,
3637 		0x1e2e0, 0x1e2e0,
3638 		0x1e300, 0x1e384,
3639 		0x1e3c0, 0x1e3c8,
3640 		0x1e408, 0x1e40c,
3641 		0x1e440, 0x1e44c,
3642 		0x1e684, 0x1e690,
3643 		0x1e6c0, 0x1e6c0,
3644 		0x1e6e0, 0x1e6e0,
3645 		0x1e700, 0x1e784,
3646 		0x1e7c0, 0x1e7c8,
3647 		0x1e808, 0x1e80c,
3648 		0x1e840, 0x1e84c,
3649 		0x1ea84, 0x1ea90,
3650 		0x1eac0, 0x1eac0,
3651 		0x1eae0, 0x1eae0,
3652 		0x1eb00, 0x1eb84,
3653 		0x1ebc0, 0x1ebc8,
3654 		0x1ec08, 0x1ec0c,
3655 		0x1ec40, 0x1ec4c,
3656 		0x1ee84, 0x1ee90,
3657 		0x1eec0, 0x1eec0,
3658 		0x1eee0, 0x1eee0,
3659 		0x1ef00, 0x1ef84,
3660 		0x1efc0, 0x1efc8,
3661 		0x1f008, 0x1f00c,
3662 		0x1f040, 0x1f04c,
3663 		0x1f284, 0x1f290,
3664 		0x1f2c0, 0x1f2c0,
3665 		0x1f2e0, 0x1f2e0,
3666 		0x1f300, 0x1f384,
3667 		0x1f3c0, 0x1f3c8,
3668 		0x1f408, 0x1f40c,
3669 		0x1f440, 0x1f44c,
3670 		0x1f684, 0x1f690,
3671 		0x1f6c0, 0x1f6c0,
3672 		0x1f6e0, 0x1f6e0,
3673 		0x1f700, 0x1f784,
3674 		0x1f7c0, 0x1f7c8,
3675 		0x1f808, 0x1f80c,
3676 		0x1f840, 0x1f84c,
3677 		0x1fa84, 0x1fa90,
3678 		0x1fac0, 0x1fac0,
3679 		0x1fae0, 0x1fae0,
3680 		0x1fb00, 0x1fb84,
3681 		0x1fbc0, 0x1fbc8,
3682 		0x1fc08, 0x1fc0c,
3683 		0x1fc40, 0x1fc4c,
3684 		0x1fe84, 0x1fe90,
3685 		0x1fec0, 0x1fec0,
3686 		0x1fee0, 0x1fee0,
3687 		0x1ff00, 0x1ff84,
3688 		0x1ffc0, 0x1ffc8,
3689 		0x30000, 0x30030,
3690 		0x30100, 0x30144,
3691 		0x30190, 0x301d0,
3692 		0x30200, 0x30318,
3693 		0x30400, 0x3052c,
3694 		0x30540, 0x3061c,
3695 		0x30800, 0x30834,
3696 		0x308c0, 0x30908,
3697 		0x30910, 0x309ac,
3698 		0x30a00, 0x30a2c,
3699 		0x30a44, 0x30a50,
3700 		0x30a74, 0x30c24,
3701 		0x30d00, 0x30d00,
3702 		0x30d08, 0x30d14,
3703 		0x30d1c, 0x30d20,
3704 		0x30d3c, 0x30d50,
3705 		0x31200, 0x3120c,
3706 		0x31220, 0x31220,
3707 		0x31240, 0x31240,
3708 		0x31600, 0x3160c,
3709 		0x31a00, 0x31a1c,
3710 		0x31e00, 0x31e20,
3711 		0x31e38, 0x31e3c,
3712 		0x31e80, 0x31e80,
3713 		0x31e88, 0x31ea8,
3714 		0x31eb0, 0x31eb4,
3715 		0x31ec8, 0x31ed4,
3716 		0x31fb8, 0x32004,
3717 		0x32200, 0x32200,
3718 		0x32208, 0x32240,
3719 		0x32248, 0x32280,
3720 		0x32288, 0x322c0,
3721 		0x322c8, 0x322fc,
3722 		0x32600, 0x32630,
3723 		0x32a00, 0x32abc,
3724 		0x32b00, 0x32b70,
3725 		0x33000, 0x33048,
3726 		0x33060, 0x3309c,
3727 		0x330f0, 0x33148,
3728 		0x33160, 0x3319c,
3729 		0x331f0, 0x332e4,
3730 		0x332f8, 0x333e4,
3731 		0x333f8, 0x33448,
3732 		0x33460, 0x3349c,
3733 		0x334f0, 0x33548,
3734 		0x33560, 0x3359c,
3735 		0x335f0, 0x336e4,
3736 		0x336f8, 0x337e4,
3737 		0x337f8, 0x337fc,
3738 		0x33814, 0x33814,
3739 		0x3382c, 0x3382c,
3740 		0x33880, 0x3388c,
3741 		0x338e8, 0x338ec,
3742 		0x33900, 0x33948,
3743 		0x33960, 0x3399c,
3744 		0x339f0, 0x33ae4,
3745 		0x33af8, 0x33b10,
3746 		0x33b28, 0x33b28,
3747 		0x33b3c, 0x33b50,
3748 		0x33bf0, 0x33c10,
3749 		0x33c28, 0x33c28,
3750 		0x33c3c, 0x33c50,
3751 		0x33cf0, 0x33cfc,
3752 		0x34000, 0x34030,
3753 		0x34100, 0x34144,
3754 		0x34190, 0x341d0,
3755 		0x34200, 0x34318,
3756 		0x34400, 0x3452c,
3757 		0x34540, 0x3461c,
3758 		0x34800, 0x34834,
3759 		0x348c0, 0x34908,
3760 		0x34910, 0x349ac,
3761 		0x34a00, 0x34a2c,
3762 		0x34a44, 0x34a50,
3763 		0x34a74, 0x34c24,
3764 		0x34d00, 0x34d00,
3765 		0x34d08, 0x34d14,
3766 		0x34d1c, 0x34d20,
3767 		0x34d3c, 0x34d50,
3768 		0x35200, 0x3520c,
3769 		0x35220, 0x35220,
3770 		0x35240, 0x35240,
3771 		0x35600, 0x3560c,
3772 		0x35a00, 0x35a1c,
3773 		0x35e00, 0x35e20,
3774 		0x35e38, 0x35e3c,
3775 		0x35e80, 0x35e80,
3776 		0x35e88, 0x35ea8,
3777 		0x35eb0, 0x35eb4,
3778 		0x35ec8, 0x35ed4,
3779 		0x35fb8, 0x36004,
3780 		0x36200, 0x36200,
3781 		0x36208, 0x36240,
3782 		0x36248, 0x36280,
3783 		0x36288, 0x362c0,
3784 		0x362c8, 0x362fc,
3785 		0x36600, 0x36630,
3786 		0x36a00, 0x36abc,
3787 		0x36b00, 0x36b70,
3788 		0x37000, 0x37048,
3789 		0x37060, 0x3709c,
3790 		0x370f0, 0x37148,
3791 		0x37160, 0x3719c,
3792 		0x371f0, 0x372e4,
3793 		0x372f8, 0x373e4,
3794 		0x373f8, 0x37448,
3795 		0x37460, 0x3749c,
3796 		0x374f0, 0x37548,
3797 		0x37560, 0x3759c,
3798 		0x375f0, 0x376e4,
3799 		0x376f8, 0x377e4,
3800 		0x377f8, 0x377fc,
3801 		0x37814, 0x37814,
3802 		0x3782c, 0x3782c,
3803 		0x37880, 0x3788c,
3804 		0x378e8, 0x378ec,
3805 		0x37900, 0x37948,
3806 		0x37960, 0x3799c,
3807 		0x379f0, 0x37ae4,
3808 		0x37af8, 0x37b10,
3809 		0x37b28, 0x37b28,
3810 		0x37b3c, 0x37b50,
3811 		0x37bf0, 0x37c10,
3812 		0x37c28, 0x37c28,
3813 		0x37c3c, 0x37c50,
3814 		0x37cf0, 0x37cfc,
3815 		0x38000, 0x38030,
3816 		0x38100, 0x38144,
3817 		0x38190, 0x381d0,
3818 		0x38200, 0x38318,
3819 		0x38400, 0x3852c,
3820 		0x38540, 0x3861c,
3821 		0x38800, 0x38834,
3822 		0x388c0, 0x38908,
3823 		0x38910, 0x389ac,
3824 		0x38a00, 0x38a2c,
3825 		0x38a44, 0x38a50,
3826 		0x38a74, 0x38c24,
3827 		0x38d00, 0x38d00,
3828 		0x38d08, 0x38d14,
3829 		0x38d1c, 0x38d20,
3830 		0x38d3c, 0x38d50,
3831 		0x39200, 0x3920c,
3832 		0x39220, 0x39220,
3833 		0x39240, 0x39240,
3834 		0x39600, 0x3960c,
3835 		0x39a00, 0x39a1c,
3836 		0x39e00, 0x39e20,
3837 		0x39e38, 0x39e3c,
3838 		0x39e80, 0x39e80,
3839 		0x39e88, 0x39ea8,
3840 		0x39eb0, 0x39eb4,
3841 		0x39ec8, 0x39ed4,
3842 		0x39fb8, 0x3a004,
3843 		0x3a200, 0x3a200,
3844 		0x3a208, 0x3a240,
3845 		0x3a248, 0x3a280,
3846 		0x3a288, 0x3a2c0,
3847 		0x3a2c8, 0x3a2fc,
3848 		0x3a600, 0x3a630,
3849 		0x3aa00, 0x3aabc,
3850 		0x3ab00, 0x3ab70,
3851 		0x3b000, 0x3b048,
3852 		0x3b060, 0x3b09c,
3853 		0x3b0f0, 0x3b148,
3854 		0x3b160, 0x3b19c,
3855 		0x3b1f0, 0x3b2e4,
3856 		0x3b2f8, 0x3b3e4,
3857 		0x3b3f8, 0x3b448,
3858 		0x3b460, 0x3b49c,
3859 		0x3b4f0, 0x3b548,
3860 		0x3b560, 0x3b59c,
3861 		0x3b5f0, 0x3b6e4,
3862 		0x3b6f8, 0x3b7e4,
3863 		0x3b7f8, 0x3b7fc,
3864 		0x3b814, 0x3b814,
3865 		0x3b82c, 0x3b82c,
3866 		0x3b880, 0x3b88c,
3867 		0x3b8e8, 0x3b8ec,
3868 		0x3b900, 0x3b948,
3869 		0x3b960, 0x3b99c,
3870 		0x3b9f0, 0x3bae4,
3871 		0x3baf8, 0x3bb10,
3872 		0x3bb28, 0x3bb28,
3873 		0x3bb3c, 0x3bb50,
3874 		0x3bbf0, 0x3bc10,
3875 		0x3bc28, 0x3bc28,
3876 		0x3bc3c, 0x3bc50,
3877 		0x3bcf0, 0x3bcfc,
3878 		0x3c000, 0x3c030,
3879 		0x3c100, 0x3c144,
3880 		0x3c190, 0x3c1d0,
3881 		0x3c200, 0x3c318,
3882 		0x3c400, 0x3c52c,
3883 		0x3c540, 0x3c61c,
3884 		0x3c800, 0x3c834,
3885 		0x3c8c0, 0x3c908,
3886 		0x3c910, 0x3c9ac,
3887 		0x3ca00, 0x3ca2c,
3888 		0x3ca44, 0x3ca50,
3889 		0x3ca74, 0x3cc24,
3890 		0x3cd00, 0x3cd00,
3891 		0x3cd08, 0x3cd14,
3892 		0x3cd1c, 0x3cd20,
3893 		0x3cd3c, 0x3cd50,
3894 		0x3d200, 0x3d20c,
3895 		0x3d220, 0x3d220,
3896 		0x3d240, 0x3d240,
3897 		0x3d600, 0x3d60c,
3898 		0x3da00, 0x3da1c,
3899 		0x3de00, 0x3de20,
3900 		0x3de38, 0x3de3c,
3901 		0x3de80, 0x3de80,
3902 		0x3de88, 0x3dea8,
3903 		0x3deb0, 0x3deb4,
3904 		0x3dec8, 0x3ded4,
3905 		0x3dfb8, 0x3e004,
3906 		0x3e200, 0x3e200,
3907 		0x3e208, 0x3e240,
3908 		0x3e248, 0x3e280,
3909 		0x3e288, 0x3e2c0,
3910 		0x3e2c8, 0x3e2fc,
3911 		0x3e600, 0x3e630,
3912 		0x3ea00, 0x3eabc,
3913 		0x3eb00, 0x3eb70,
3914 		0x3f000, 0x3f048,
3915 		0x3f060, 0x3f09c,
3916 		0x3f0f0, 0x3f148,
3917 		0x3f160, 0x3f19c,
3918 		0x3f1f0, 0x3f2e4,
3919 		0x3f2f8, 0x3f3e4,
3920 		0x3f3f8, 0x3f448,
3921 		0x3f460, 0x3f49c,
3922 		0x3f4f0, 0x3f548,
3923 		0x3f560, 0x3f59c,
3924 		0x3f5f0, 0x3f6e4,
3925 		0x3f6f8, 0x3f7e4,
3926 		0x3f7f8, 0x3f7fc,
3927 		0x3f814, 0x3f814,
3928 		0x3f82c, 0x3f82c,
3929 		0x3f880, 0x3f88c,
3930 		0x3f8e8, 0x3f8ec,
3931 		0x3f900, 0x3f948,
3932 		0x3f960, 0x3f99c,
3933 		0x3f9f0, 0x3fae4,
3934 		0x3faf8, 0x3fb10,
3935 		0x3fb28, 0x3fb28,
3936 		0x3fb3c, 0x3fb50,
3937 		0x3fbf0, 0x3fc10,
3938 		0x3fc28, 0x3fc28,
3939 		0x3fc3c, 0x3fc50,
3940 		0x3fcf0, 0x3fcfc,
3941 		0x40000, 0x4000c,
3942 		0x40040, 0x40068,
3943 		0x4007c, 0x40144,
3944 		0x40180, 0x4018c,
3945 		0x40200, 0x40298,
3946 		0x402ac, 0x4033c,
3947 		0x403f8, 0x403fc,
3948 		0x41304, 0x413c4,
3949 		0x41400, 0x4141c,
3950 		0x41480, 0x414d0,
3951 		0x44000, 0x44078,
3952 		0x440c0, 0x44278,
3953 		0x442c0, 0x44478,
3954 		0x444c0, 0x44678,
3955 		0x446c0, 0x44878,
3956 		0x448c0, 0x449fc,
3957 		0x45000, 0x45068,
3958 		0x45080, 0x45084,
3959 		0x450a0, 0x450b0,
3960 		0x45200, 0x45268,
3961 		0x45280, 0x45284,
3962 		0x452a0, 0x452b0,
3963 		0x460c0, 0x460e4,
3964 		0x47000, 0x4708c,
3965 		0x47200, 0x47250,
3966 		0x47400, 0x47420,
3967 		0x47600, 0x47618,
3968 		0x47800, 0x47814,
3969 		0x48000, 0x4800c,
3970 		0x48040, 0x48068,
3971 		0x4807c, 0x48144,
3972 		0x48180, 0x4818c,
3973 		0x48200, 0x48298,
3974 		0x482ac, 0x4833c,
3975 		0x483f8, 0x483fc,
3976 		0x49304, 0x493c4,
3977 		0x49400, 0x4941c,
3978 		0x49480, 0x494d0,
3979 		0x4c000, 0x4c078,
3980 		0x4c0c0, 0x4c278,
3981 		0x4c2c0, 0x4c478,
3982 		0x4c4c0, 0x4c678,
3983 		0x4c6c0, 0x4c878,
3984 		0x4c8c0, 0x4c9fc,
3985 		0x4d000, 0x4d068,
3986 		0x4d080, 0x4d084,
3987 		0x4d0a0, 0x4d0b0,
3988 		0x4d200, 0x4d268,
3989 		0x4d280, 0x4d284,
3990 		0x4d2a0, 0x4d2b0,
3991 		0x4e0c0, 0x4e0e4,
3992 		0x4f000, 0x4f08c,
3993 		0x4f200, 0x4f250,
3994 		0x4f400, 0x4f420,
3995 		0x4f600, 0x4f618,
3996 		0x4f800, 0x4f814,
3997 		0x50000, 0x500cc,
3998 		0x50400, 0x50400,
3999 		0x50800, 0x508cc,
4000 		0x50c00, 0x50c00,
4001 		0x51000, 0x5101c,
4002 		0x51300, 0x51308,
4003 	};
4004 
4005 	if (is_t4(sc)) {
4006 		reg_ranges = &t4_reg_ranges[0];
4007 		n = nitems(t4_reg_ranges);
4008 	} else {
4009 		reg_ranges = &t5_reg_ranges[0];
4010 		n = nitems(t5_reg_ranges);
4011 	}
4012 
4013 	regs->version = chip_id(sc) | chip_rev(sc) << 10;
4014 	for (i = 0; i < n; i += 2)
4015 		reg_block_dump(sc, buf, reg_ranges[i], reg_ranges[i + 1]);
4016 }
4017 
4018 static void
4019 cxgbe_tick(void *arg)
4020 {
4021 	struct port_info *pi = arg;
4022 	struct ifnet *ifp = pi->ifp;
4023 	struct sge_txq *txq;
4024 	int i, drops;
4025 	struct port_stats *s = &pi->stats;
4026 
4027 	PORT_LOCK(pi);
4028 	if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) {
4029 		PORT_UNLOCK(pi);
4030 		return;	/* without scheduling another callout */
4031 	}
4032 
4033 	t4_get_port_stats(pi->adapter, pi->tx_chan, s);
4034 
4035 	ifp->if_opackets = s->tx_frames - s->tx_pause;
4036 	ifp->if_ipackets = s->rx_frames - s->rx_pause;
4037 	ifp->if_obytes = s->tx_octets - s->tx_pause * 64;
4038 	ifp->if_ibytes = s->rx_octets - s->rx_pause * 64;
4039 	ifp->if_omcasts = s->tx_mcast_frames - s->tx_pause;
4040 	ifp->if_imcasts = s->rx_mcast_frames - s->rx_pause;
4041 	ifp->if_iqdrops = s->rx_ovflow0 + s->rx_ovflow1 + s->rx_ovflow2 +
4042 	    s->rx_ovflow3 + s->rx_trunc0 + s->rx_trunc1 + s->rx_trunc2 +
4043 	    s->rx_trunc3;
4044 
4045 	drops = s->tx_drop;
4046 	for_each_txq(pi, i, txq)
4047 		drops += txq->br->br_drops;
4048 	ifp->if_snd.ifq_drops = drops;
4049 
4050 	ifp->if_oerrors = s->tx_error_frames;
4051 	ifp->if_ierrors = s->rx_jabber + s->rx_runt + s->rx_too_long +
4052 	    s->rx_fcs_err + s->rx_len_err;
4053 
4054 	callout_schedule(&pi->tick, hz);
4055 	PORT_UNLOCK(pi);
4056 }
4057 
4058 static void
4059 cxgbe_vlan_config(void *arg, struct ifnet *ifp, uint16_t vid)
4060 {
4061 	struct ifnet *vlan;
4062 
4063 	if (arg != ifp || ifp->if_type != IFT_ETHER)
4064 		return;
4065 
4066 	vlan = VLAN_DEVAT(ifp, vid);
4067 	VLAN_SETCOOKIE(vlan, ifp);
4068 }
4069 
4070 static int
4071 cpl_not_handled(struct sge_iq *iq, const struct rss_header *rss, struct mbuf *m)
4072 {
4073 
4074 #ifdef INVARIANTS
4075 	panic("%s: opcode 0x%02x on iq %p with payload %p",
4076 	    __func__, rss->opcode, iq, m);
4077 #else
4078 	log(LOG_ERR, "%s: opcode 0x%02x on iq %p with payload %p\n",
4079 	    __func__, rss->opcode, iq, m);
4080 	m_freem(m);
4081 #endif
4082 	return (EDOOFUS);
4083 }
4084 
4085 int
4086 t4_register_cpl_handler(struct adapter *sc, int opcode, cpl_handler_t h)
4087 {
4088 	uintptr_t *loc, new;
4089 
4090 	if (opcode >= nitems(sc->cpl_handler))
4091 		return (EINVAL);
4092 
4093 	new = h ? (uintptr_t)h : (uintptr_t)cpl_not_handled;
4094 	loc = (uintptr_t *) &sc->cpl_handler[opcode];
4095 	atomic_store_rel_ptr(loc, new);
4096 
4097 	return (0);
4098 }
4099 
4100 static int
4101 an_not_handled(struct sge_iq *iq, const struct rsp_ctrl *ctrl)
4102 {
4103 
4104 #ifdef INVARIANTS
4105 	panic("%s: async notification on iq %p (ctrl %p)", __func__, iq, ctrl);
4106 #else
4107 	log(LOG_ERR, "%s: async notification on iq %p (ctrl %p)\n",
4108 	    __func__, iq, ctrl);
4109 #endif
4110 	return (EDOOFUS);
4111 }
4112 
4113 int
4114 t4_register_an_handler(struct adapter *sc, an_handler_t h)
4115 {
4116 	uintptr_t *loc, new;
4117 
4118 	new = h ? (uintptr_t)h : (uintptr_t)an_not_handled;
4119 	loc = (uintptr_t *) &sc->an_handler;
4120 	atomic_store_rel_ptr(loc, new);
4121 
4122 	return (0);
4123 }
4124 
4125 static int
4126 fw_msg_not_handled(struct adapter *sc, const __be64 *rpl)
4127 {
4128 	const struct cpl_fw6_msg *cpl =
4129 	    __containerof(rpl, struct cpl_fw6_msg, data[0]);
4130 
4131 #ifdef INVARIANTS
4132 	panic("%s: fw_msg type %d", __func__, cpl->type);
4133 #else
4134 	log(LOG_ERR, "%s: fw_msg type %d\n", __func__, cpl->type);
4135 #endif
4136 	return (EDOOFUS);
4137 }
4138 
4139 int
4140 t4_register_fw_msg_handler(struct adapter *sc, int type, fw_msg_handler_t h)
4141 {
4142 	uintptr_t *loc, new;
4143 
4144 	if (type >= nitems(sc->fw_msg_handler))
4145 		return (EINVAL);
4146 
4147 	/*
4148 	 * These are dispatched by the handler for FW{4|6}_CPL_MSG using the CPL
4149 	 * handler dispatch table.  Reject any attempt to install a handler for
4150 	 * this subtype.
4151 	 */
4152 	if (type == FW_TYPE_RSSCPL || type == FW6_TYPE_RSSCPL)
4153 		return (EINVAL);
4154 
4155 	new = h ? (uintptr_t)h : (uintptr_t)fw_msg_not_handled;
4156 	loc = (uintptr_t *) &sc->fw_msg_handler[type];
4157 	atomic_store_rel_ptr(loc, new);
4158 
4159 	return (0);
4160 }
4161 
4162 static int
4163 t4_sysctls(struct adapter *sc)
4164 {
4165 	struct sysctl_ctx_list *ctx;
4166 	struct sysctl_oid *oid;
4167 	struct sysctl_oid_list *children, *c0;
4168 	static char *caps[] = {
4169 		"\20\1PPP\2QFC\3DCBX",			/* caps[0] linkcaps */
4170 		"\20\1NIC\2VM\3IDS\4UM\5UM_ISGL",	/* caps[1] niccaps */
4171 		"\20\1TOE",				/* caps[2] toecaps */
4172 		"\20\1RDDP\2RDMAC",			/* caps[3] rdmacaps */
4173 		"\20\1INITIATOR_PDU\2TARGET_PDU"	/* caps[4] iscsicaps */
4174 		    "\3INITIATOR_CNXOFLD\4TARGET_CNXOFLD"
4175 		    "\5INITIATOR_SSNOFLD\6TARGET_SSNOFLD",
4176 		"\20\1INITIATOR\2TARGET\3CTRL_OFLD"	/* caps[5] fcoecaps */
4177 	};
4178 	static char *doorbells = {"\20\1UDB\2WCWR\3UDBWC\4KDB"};
4179 
4180 	ctx = device_get_sysctl_ctx(sc->dev);
4181 
4182 	/*
4183 	 * dev.t4nex.X.
4184 	 */
4185 	oid = device_get_sysctl_tree(sc->dev);
4186 	c0 = children = SYSCTL_CHILDREN(oid);
4187 
4188 	SYSCTL_ADD_INT(ctx, children, OID_AUTO, "nports", CTLFLAG_RD, NULL,
4189 	    sc->params.nports, "# of ports");
4190 
4191 	SYSCTL_ADD_INT(ctx, children, OID_AUTO, "hw_revision", CTLFLAG_RD,
4192 	    NULL, chip_rev(sc), "chip hardware revision");
4193 
4194 	SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "firmware_version",
4195 	    CTLFLAG_RD, &sc->fw_version, 0, "firmware version");
4196 
4197 	SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "cf",
4198 	    CTLFLAG_RD, &sc->cfg_file, 0, "configuration file");
4199 
4200 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "cfcsum", CTLFLAG_RD, NULL,
4201 	    sc->cfcsum, "config file checksum");
4202 
4203 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "doorbells",
4204 	    CTLTYPE_STRING | CTLFLAG_RD, doorbells, sc->doorbells,
4205 	    sysctl_bitfield, "A", "available doorbells");
4206 
4207 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "linkcaps",
4208 	    CTLTYPE_STRING | CTLFLAG_RD, caps[0], sc->linkcaps,
4209 	    sysctl_bitfield, "A", "available link capabilities");
4210 
4211 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "niccaps",
4212 	    CTLTYPE_STRING | CTLFLAG_RD, caps[1], sc->niccaps,
4213 	    sysctl_bitfield, "A", "available NIC capabilities");
4214 
4215 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "toecaps",
4216 	    CTLTYPE_STRING | CTLFLAG_RD, caps[2], sc->toecaps,
4217 	    sysctl_bitfield, "A", "available TCP offload capabilities");
4218 
4219 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "rdmacaps",
4220 	    CTLTYPE_STRING | CTLFLAG_RD, caps[3], sc->rdmacaps,
4221 	    sysctl_bitfield, "A", "available RDMA capabilities");
4222 
4223 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "iscsicaps",
4224 	    CTLTYPE_STRING | CTLFLAG_RD, caps[4], sc->iscsicaps,
4225 	    sysctl_bitfield, "A", "available iSCSI capabilities");
4226 
4227 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "fcoecaps",
4228 	    CTLTYPE_STRING | CTLFLAG_RD, caps[5], sc->fcoecaps,
4229 	    sysctl_bitfield, "A", "available FCoE capabilities");
4230 
4231 	SYSCTL_ADD_INT(ctx, children, OID_AUTO, "core_clock", CTLFLAG_RD, NULL,
4232 	    sc->params.vpd.cclk, "core clock frequency (in KHz)");
4233 
4234 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "holdoff_timers",
4235 	    CTLTYPE_STRING | CTLFLAG_RD, sc->sge.timer_val,
4236 	    sizeof(sc->sge.timer_val), sysctl_int_array, "A",
4237 	    "interrupt holdoff timer values (us)");
4238 
4239 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "holdoff_pkt_counts",
4240 	    CTLTYPE_STRING | CTLFLAG_RD, sc->sge.counter_val,
4241 	    sizeof(sc->sge.counter_val), sysctl_int_array, "A",
4242 	    "interrupt holdoff packet counter values");
4243 
4244 	SYSCTL_ADD_INT(ctx, children, OID_AUTO, "nfilters", CTLFLAG_RD,
4245 	    NULL, sc->tids.nftids, "number of filters");
4246 
4247 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "temperature", CTLTYPE_INT |
4248 	    CTLFLAG_RD, sc, 0, sysctl_temperature, "A",
4249 	    "chip temperature (in Celsius)");
4250 
4251 	t4_sge_sysctls(sc, ctx, children);
4252 
4253 	sc->lro_timeout = 100;
4254 	SYSCTL_ADD_INT(ctx, children, OID_AUTO, "lro_timeout", CTLFLAG_RW,
4255 	    &sc->lro_timeout, 0, "lro inactive-flush timeout (in us)");
4256 
4257 #ifdef SBUF_DRAIN
4258 	/*
4259 	 * dev.t4nex.X.misc.  Marked CTLFLAG_SKIP to avoid information overload.
4260 	 */
4261 	oid = SYSCTL_ADD_NODE(ctx, c0, OID_AUTO, "misc",
4262 	    CTLFLAG_RD | CTLFLAG_SKIP, NULL,
4263 	    "logs and miscellaneous information");
4264 	children = SYSCTL_CHILDREN(oid);
4265 
4266 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cctrl",
4267 	    CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
4268 	    sysctl_cctrl, "A", "congestion control");
4269 
4270 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_ibq_tp0",
4271 	    CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
4272 	    sysctl_cim_ibq_obq, "A", "CIM IBQ 0 (TP0)");
4273 
4274 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_ibq_tp1",
4275 	    CTLTYPE_STRING | CTLFLAG_RD, sc, 1,
4276 	    sysctl_cim_ibq_obq, "A", "CIM IBQ 1 (TP1)");
4277 
4278 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_ibq_ulp",
4279 	    CTLTYPE_STRING | CTLFLAG_RD, sc, 2,
4280 	    sysctl_cim_ibq_obq, "A", "CIM IBQ 2 (ULP)");
4281 
4282 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_ibq_sge0",
4283 	    CTLTYPE_STRING | CTLFLAG_RD, sc, 3,
4284 	    sysctl_cim_ibq_obq, "A", "CIM IBQ 3 (SGE0)");
4285 
4286 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_ibq_sge1",
4287 	    CTLTYPE_STRING | CTLFLAG_RD, sc, 4,
4288 	    sysctl_cim_ibq_obq, "A", "CIM IBQ 4 (SGE1)");
4289 
4290 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_ibq_ncsi",
4291 	    CTLTYPE_STRING | CTLFLAG_RD, sc, 5,
4292 	    sysctl_cim_ibq_obq, "A", "CIM IBQ 5 (NCSI)");
4293 
4294 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_la",
4295 	    CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
4296 	    sysctl_cim_la, "A", "CIM logic analyzer");
4297 
4298 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_ma_la",
4299 	    CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
4300 	    sysctl_cim_ma_la, "A", "CIM MA logic analyzer");
4301 
4302 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_obq_ulp0",
4303 	    CTLTYPE_STRING | CTLFLAG_RD, sc, 0 + CIM_NUM_IBQ,
4304 	    sysctl_cim_ibq_obq, "A", "CIM OBQ 0 (ULP0)");
4305 
4306 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_obq_ulp1",
4307 	    CTLTYPE_STRING | CTLFLAG_RD, sc, 1 + CIM_NUM_IBQ,
4308 	    sysctl_cim_ibq_obq, "A", "CIM OBQ 1 (ULP1)");
4309 
4310 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_obq_ulp2",
4311 	    CTLTYPE_STRING | CTLFLAG_RD, sc, 2 + CIM_NUM_IBQ,
4312 	    sysctl_cim_ibq_obq, "A", "CIM OBQ 2 (ULP2)");
4313 
4314 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_obq_ulp3",
4315 	    CTLTYPE_STRING | CTLFLAG_RD, sc, 3 + CIM_NUM_IBQ,
4316 	    sysctl_cim_ibq_obq, "A", "CIM OBQ 3 (ULP3)");
4317 
4318 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_obq_sge",
4319 	    CTLTYPE_STRING | CTLFLAG_RD, sc, 4 + CIM_NUM_IBQ,
4320 	    sysctl_cim_ibq_obq, "A", "CIM OBQ 4 (SGE)");
4321 
4322 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_obq_ncsi",
4323 	    CTLTYPE_STRING | CTLFLAG_RD, sc, 5 + CIM_NUM_IBQ,
4324 	    sysctl_cim_ibq_obq, "A", "CIM OBQ 5 (NCSI)");
4325 
4326 	if (is_t5(sc)) {
4327 		SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_obq_sge0_rx",
4328 		    CTLTYPE_STRING | CTLFLAG_RD, sc, 6 + CIM_NUM_IBQ,
4329 		    sysctl_cim_ibq_obq, "A", "CIM OBQ 6 (SGE0-RX)");
4330 
4331 		SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_obq_sge1_rx",
4332 		    CTLTYPE_STRING | CTLFLAG_RD, sc, 7 + CIM_NUM_IBQ,
4333 		    sysctl_cim_ibq_obq, "A", "CIM OBQ 7 (SGE1-RX)");
4334 	}
4335 
4336 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_pif_la",
4337 	    CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
4338 	    sysctl_cim_pif_la, "A", "CIM PIF logic analyzer");
4339 
4340 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_qcfg",
4341 	    CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
4342 	    sysctl_cim_qcfg, "A", "CIM queue configuration");
4343 
4344 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cpl_stats",
4345 	    CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
4346 	    sysctl_cpl_stats, "A", "CPL statistics");
4347 
4348 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "ddp_stats",
4349 	    CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
4350 	    sysctl_ddp_stats, "A", "DDP statistics");
4351 
4352 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "devlog",
4353 	    CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
4354 	    sysctl_devlog, "A", "firmware's device log");
4355 
4356 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "fcoe_stats",
4357 	    CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
4358 	    sysctl_fcoe_stats, "A", "FCoE statistics");
4359 
4360 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "hw_sched",
4361 	    CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
4362 	    sysctl_hw_sched, "A", "hardware scheduler ");
4363 
4364 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "l2t",
4365 	    CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
4366 	    sysctl_l2t, "A", "hardware L2 table");
4367 
4368 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "lb_stats",
4369 	    CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
4370 	    sysctl_lb_stats, "A", "loopback statistics");
4371 
4372 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "meminfo",
4373 	    CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
4374 	    sysctl_meminfo, "A", "memory regions");
4375 
4376 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "mps_tcam",
4377 	    CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
4378 	    sysctl_mps_tcam, "A", "MPS TCAM entries");
4379 
4380 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "path_mtus",
4381 	    CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
4382 	    sysctl_path_mtus, "A", "path MTUs");
4383 
4384 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "pm_stats",
4385 	    CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
4386 	    sysctl_pm_stats, "A", "PM statistics");
4387 
4388 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "rdma_stats",
4389 	    CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
4390 	    sysctl_rdma_stats, "A", "RDMA statistics");
4391 
4392 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "tcp_stats",
4393 	    CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
4394 	    sysctl_tcp_stats, "A", "TCP statistics");
4395 
4396 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "tids",
4397 	    CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
4398 	    sysctl_tids, "A", "TID information");
4399 
4400 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "tp_err_stats",
4401 	    CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
4402 	    sysctl_tp_err_stats, "A", "TP error statistics");
4403 
4404 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "tp_la",
4405 	    CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
4406 	    sysctl_tp_la, "A", "TP logic analyzer");
4407 
4408 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "tx_rate",
4409 	    CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
4410 	    sysctl_tx_rate, "A", "Tx rate");
4411 
4412 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "ulprx_la",
4413 	    CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
4414 	    sysctl_ulprx_la, "A", "ULPRX logic analyzer");
4415 
4416 	if (is_t5(sc)) {
4417 		SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "wcwr_stats",
4418 		    CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
4419 		    sysctl_wcwr_stats, "A", "write combined work requests");
4420 	}
4421 #endif
4422 
4423 #ifdef TCP_OFFLOAD
4424 	if (is_offload(sc)) {
4425 		/*
4426 		 * dev.t4nex.X.toe.
4427 		 */
4428 		oid = SYSCTL_ADD_NODE(ctx, c0, OID_AUTO, "toe", CTLFLAG_RD,
4429 		    NULL, "TOE parameters");
4430 		children = SYSCTL_CHILDREN(oid);
4431 
4432 		sc->tt.sndbuf = 256 * 1024;
4433 		SYSCTL_ADD_INT(ctx, children, OID_AUTO, "sndbuf", CTLFLAG_RW,
4434 		    &sc->tt.sndbuf, 0, "max hardware send buffer size");
4435 
4436 		sc->tt.ddp = 0;
4437 		SYSCTL_ADD_INT(ctx, children, OID_AUTO, "ddp", CTLFLAG_RW,
4438 		    &sc->tt.ddp, 0, "DDP allowed");
4439 
4440 		sc->tt.indsz = G_INDICATESIZE(t4_read_reg(sc, A_TP_PARA_REG5));
4441 		SYSCTL_ADD_INT(ctx, children, OID_AUTO, "indsz", CTLFLAG_RW,
4442 		    &sc->tt.indsz, 0, "DDP max indicate size allowed");
4443 
4444 		sc->tt.ddp_thres =
4445 		    G_RXCOALESCESIZE(t4_read_reg(sc, A_TP_PARA_REG2));
4446 		SYSCTL_ADD_INT(ctx, children, OID_AUTO, "ddp_thres", CTLFLAG_RW,
4447 		    &sc->tt.ddp_thres, 0, "DDP threshold");
4448 
4449 		sc->tt.rx_coalesce = 1;
4450 		SYSCTL_ADD_INT(ctx, children, OID_AUTO, "rx_coalesce",
4451 		    CTLFLAG_RW, &sc->tt.rx_coalesce, 0, "receive coalescing");
4452 	}
4453 #endif
4454 
4455 
4456 	return (0);
4457 }
4458 
4459 static int
4460 cxgbe_sysctls(struct port_info *pi)
4461 {
4462 	struct sysctl_ctx_list *ctx;
4463 	struct sysctl_oid *oid;
4464 	struct sysctl_oid_list *children;
4465 
4466 	ctx = device_get_sysctl_ctx(pi->dev);
4467 
4468 	/*
4469 	 * dev.cxgbe.X.
4470 	 */
4471 	oid = device_get_sysctl_tree(pi->dev);
4472 	children = SYSCTL_CHILDREN(oid);
4473 
4474 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "linkdnrc", CTLTYPE_STRING |
4475 	   CTLFLAG_RD, pi, 0, sysctl_linkdnrc, "A", "reason why link is down");
4476 	if (pi->port_type == FW_PORT_TYPE_BT_XAUI) {
4477 		SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "temperature",
4478 		    CTLTYPE_INT | CTLFLAG_RD, pi, 0, sysctl_btphy, "I",
4479 		    "PHY temperature (in Celsius)");
4480 		SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "fw_version",
4481 		    CTLTYPE_INT | CTLFLAG_RD, pi, 1, sysctl_btphy, "I",
4482 		    "PHY firmware version");
4483 	}
4484 	SYSCTL_ADD_INT(ctx, children, OID_AUTO, "nrxq", CTLFLAG_RD,
4485 	    &pi->nrxq, 0, "# of rx queues");
4486 	SYSCTL_ADD_INT(ctx, children, OID_AUTO, "ntxq", CTLFLAG_RD,
4487 	    &pi->ntxq, 0, "# of tx queues");
4488 	SYSCTL_ADD_INT(ctx, children, OID_AUTO, "first_rxq", CTLFLAG_RD,
4489 	    &pi->first_rxq, 0, "index of first rx queue");
4490 	SYSCTL_ADD_INT(ctx, children, OID_AUTO, "first_txq", CTLFLAG_RD,
4491 	    &pi->first_txq, 0, "index of first tx queue");
4492 
4493 #ifdef TCP_OFFLOAD
4494 	if (is_offload(pi->adapter)) {
4495 		SYSCTL_ADD_INT(ctx, children, OID_AUTO, "nofldrxq", CTLFLAG_RD,
4496 		    &pi->nofldrxq, 0,
4497 		    "# of rx queues for offloaded TCP connections");
4498 		SYSCTL_ADD_INT(ctx, children, OID_AUTO, "nofldtxq", CTLFLAG_RD,
4499 		    &pi->nofldtxq, 0,
4500 		    "# of tx queues for offloaded TCP connections");
4501 		SYSCTL_ADD_INT(ctx, children, OID_AUTO, "first_ofld_rxq",
4502 		    CTLFLAG_RD, &pi->first_ofld_rxq, 0,
4503 		    "index of first TOE rx queue");
4504 		SYSCTL_ADD_INT(ctx, children, OID_AUTO, "first_ofld_txq",
4505 		    CTLFLAG_RD, &pi->first_ofld_txq, 0,
4506 		    "index of first TOE tx queue");
4507 	}
4508 #endif
4509 
4510 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "holdoff_tmr_idx",
4511 	    CTLTYPE_INT | CTLFLAG_RW, pi, 0, sysctl_holdoff_tmr_idx, "I",
4512 	    "holdoff timer index");
4513 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "holdoff_pktc_idx",
4514 	    CTLTYPE_INT | CTLFLAG_RW, pi, 0, sysctl_holdoff_pktc_idx, "I",
4515 	    "holdoff packet counter index");
4516 
4517 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "qsize_rxq",
4518 	    CTLTYPE_INT | CTLFLAG_RW, pi, 0, sysctl_qsize_rxq, "I",
4519 	    "rx queue size");
4520 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "qsize_txq",
4521 	    CTLTYPE_INT | CTLFLAG_RW, pi, 0, sysctl_qsize_txq, "I",
4522 	    "tx queue size");
4523 
4524 	/*
4525 	 * dev.cxgbe.X.stats.
4526 	 */
4527 	oid = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, "stats", CTLFLAG_RD,
4528 	    NULL, "port statistics");
4529 	children = SYSCTL_CHILDREN(oid);
4530 
4531 #define SYSCTL_ADD_T4_REG64(pi, name, desc, reg) \
4532 	SYSCTL_ADD_OID(ctx, children, OID_AUTO, name, \
4533 	    CTLTYPE_U64 | CTLFLAG_RD, pi->adapter, reg, \
4534 	    sysctl_handle_t4_reg64, "QU", desc)
4535 
4536 	SYSCTL_ADD_T4_REG64(pi, "tx_octets", "# of octets in good frames",
4537 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_BYTES_L));
4538 	SYSCTL_ADD_T4_REG64(pi, "tx_frames", "total # of good frames",
4539 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_FRAMES_L));
4540 	SYSCTL_ADD_T4_REG64(pi, "tx_bcast_frames", "# of broadcast frames",
4541 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_BCAST_L));
4542 	SYSCTL_ADD_T4_REG64(pi, "tx_mcast_frames", "# of multicast frames",
4543 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_MCAST_L));
4544 	SYSCTL_ADD_T4_REG64(pi, "tx_ucast_frames", "# of unicast frames",
4545 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_UCAST_L));
4546 	SYSCTL_ADD_T4_REG64(pi, "tx_error_frames", "# of error frames",
4547 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_ERROR_L));
4548 	SYSCTL_ADD_T4_REG64(pi, "tx_frames_64",
4549 	    "# of tx frames in this range",
4550 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_64B_L));
4551 	SYSCTL_ADD_T4_REG64(pi, "tx_frames_65_127",
4552 	    "# of tx frames in this range",
4553 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_65B_127B_L));
4554 	SYSCTL_ADD_T4_REG64(pi, "tx_frames_128_255",
4555 	    "# of tx frames in this range",
4556 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_128B_255B_L));
4557 	SYSCTL_ADD_T4_REG64(pi, "tx_frames_256_511",
4558 	    "# of tx frames in this range",
4559 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_256B_511B_L));
4560 	SYSCTL_ADD_T4_REG64(pi, "tx_frames_512_1023",
4561 	    "# of tx frames in this range",
4562 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_512B_1023B_L));
4563 	SYSCTL_ADD_T4_REG64(pi, "tx_frames_1024_1518",
4564 	    "# of tx frames in this range",
4565 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_1024B_1518B_L));
4566 	SYSCTL_ADD_T4_REG64(pi, "tx_frames_1519_max",
4567 	    "# of tx frames in this range",
4568 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_1519B_MAX_L));
4569 	SYSCTL_ADD_T4_REG64(pi, "tx_drop", "# of dropped tx frames",
4570 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_DROP_L));
4571 	SYSCTL_ADD_T4_REG64(pi, "tx_pause", "# of pause frames transmitted",
4572 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_PAUSE_L));
4573 	SYSCTL_ADD_T4_REG64(pi, "tx_ppp0", "# of PPP prio 0 frames transmitted",
4574 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_PPP0_L));
4575 	SYSCTL_ADD_T4_REG64(pi, "tx_ppp1", "# of PPP prio 1 frames transmitted",
4576 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_PPP1_L));
4577 	SYSCTL_ADD_T4_REG64(pi, "tx_ppp2", "# of PPP prio 2 frames transmitted",
4578 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_PPP2_L));
4579 	SYSCTL_ADD_T4_REG64(pi, "tx_ppp3", "# of PPP prio 3 frames transmitted",
4580 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_PPP3_L));
4581 	SYSCTL_ADD_T4_REG64(pi, "tx_ppp4", "# of PPP prio 4 frames transmitted",
4582 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_PPP4_L));
4583 	SYSCTL_ADD_T4_REG64(pi, "tx_ppp5", "# of PPP prio 5 frames transmitted",
4584 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_PPP5_L));
4585 	SYSCTL_ADD_T4_REG64(pi, "tx_ppp6", "# of PPP prio 6 frames transmitted",
4586 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_PPP6_L));
4587 	SYSCTL_ADD_T4_REG64(pi, "tx_ppp7", "# of PPP prio 7 frames transmitted",
4588 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_PPP7_L));
4589 
4590 	SYSCTL_ADD_T4_REG64(pi, "rx_octets", "# of octets in good frames",
4591 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_BYTES_L));
4592 	SYSCTL_ADD_T4_REG64(pi, "rx_frames", "total # of good frames",
4593 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_FRAMES_L));
4594 	SYSCTL_ADD_T4_REG64(pi, "rx_bcast_frames", "# of broadcast frames",
4595 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_BCAST_L));
4596 	SYSCTL_ADD_T4_REG64(pi, "rx_mcast_frames", "# of multicast frames",
4597 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_MCAST_L));
4598 	SYSCTL_ADD_T4_REG64(pi, "rx_ucast_frames", "# of unicast frames",
4599 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_UCAST_L));
4600 	SYSCTL_ADD_T4_REG64(pi, "rx_too_long", "# of frames exceeding MTU",
4601 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_MTU_ERROR_L));
4602 	SYSCTL_ADD_T4_REG64(pi, "rx_jabber", "# of jabber frames",
4603 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_MTU_CRC_ERROR_L));
4604 	SYSCTL_ADD_T4_REG64(pi, "rx_fcs_err",
4605 	    "# of frames received with bad FCS",
4606 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_CRC_ERROR_L));
4607 	SYSCTL_ADD_T4_REG64(pi, "rx_len_err",
4608 	    "# of frames received with length error",
4609 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_LEN_ERROR_L));
4610 	SYSCTL_ADD_T4_REG64(pi, "rx_symbol_err", "symbol errors",
4611 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_SYM_ERROR_L));
4612 	SYSCTL_ADD_T4_REG64(pi, "rx_runt", "# of short frames received",
4613 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_LESS_64B_L));
4614 	SYSCTL_ADD_T4_REG64(pi, "rx_frames_64",
4615 	    "# of rx frames in this range",
4616 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_64B_L));
4617 	SYSCTL_ADD_T4_REG64(pi, "rx_frames_65_127",
4618 	    "# of rx frames in this range",
4619 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_65B_127B_L));
4620 	SYSCTL_ADD_T4_REG64(pi, "rx_frames_128_255",
4621 	    "# of rx frames in this range",
4622 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_128B_255B_L));
4623 	SYSCTL_ADD_T4_REG64(pi, "rx_frames_256_511",
4624 	    "# of rx frames in this range",
4625 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_256B_511B_L));
4626 	SYSCTL_ADD_T4_REG64(pi, "rx_frames_512_1023",
4627 	    "# of rx frames in this range",
4628 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_512B_1023B_L));
4629 	SYSCTL_ADD_T4_REG64(pi, "rx_frames_1024_1518",
4630 	    "# of rx frames in this range",
4631 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_1024B_1518B_L));
4632 	SYSCTL_ADD_T4_REG64(pi, "rx_frames_1519_max",
4633 	    "# of rx frames in this range",
4634 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_1519B_MAX_L));
4635 	SYSCTL_ADD_T4_REG64(pi, "rx_pause", "# of pause frames received",
4636 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_PAUSE_L));
4637 	SYSCTL_ADD_T4_REG64(pi, "rx_ppp0", "# of PPP prio 0 frames received",
4638 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_PPP0_L));
4639 	SYSCTL_ADD_T4_REG64(pi, "rx_ppp1", "# of PPP prio 1 frames received",
4640 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_PPP1_L));
4641 	SYSCTL_ADD_T4_REG64(pi, "rx_ppp2", "# of PPP prio 2 frames received",
4642 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_PPP2_L));
4643 	SYSCTL_ADD_T4_REG64(pi, "rx_ppp3", "# of PPP prio 3 frames received",
4644 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_PPP3_L));
4645 	SYSCTL_ADD_T4_REG64(pi, "rx_ppp4", "# of PPP prio 4 frames received",
4646 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_PPP4_L));
4647 	SYSCTL_ADD_T4_REG64(pi, "rx_ppp5", "# of PPP prio 5 frames received",
4648 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_PPP5_L));
4649 	SYSCTL_ADD_T4_REG64(pi, "rx_ppp6", "# of PPP prio 6 frames received",
4650 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_PPP6_L));
4651 	SYSCTL_ADD_T4_REG64(pi, "rx_ppp7", "# of PPP prio 7 frames received",
4652 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_PPP7_L));
4653 
4654 #undef SYSCTL_ADD_T4_REG64
4655 
4656 #define SYSCTL_ADD_T4_PORTSTAT(name, desc) \
4657 	SYSCTL_ADD_UQUAD(ctx, children, OID_AUTO, #name, CTLFLAG_RD, \
4658 	    &pi->stats.name, desc)
4659 
4660 	/* We get these from port_stats and they may be stale by upto 1s */
4661 	SYSCTL_ADD_T4_PORTSTAT(rx_ovflow0,
4662 	    "# drops due to buffer-group 0 overflows");
4663 	SYSCTL_ADD_T4_PORTSTAT(rx_ovflow1,
4664 	    "# drops due to buffer-group 1 overflows");
4665 	SYSCTL_ADD_T4_PORTSTAT(rx_ovflow2,
4666 	    "# drops due to buffer-group 2 overflows");
4667 	SYSCTL_ADD_T4_PORTSTAT(rx_ovflow3,
4668 	    "# drops due to buffer-group 3 overflows");
4669 	SYSCTL_ADD_T4_PORTSTAT(rx_trunc0,
4670 	    "# of buffer-group 0 truncated packets");
4671 	SYSCTL_ADD_T4_PORTSTAT(rx_trunc1,
4672 	    "# of buffer-group 1 truncated packets");
4673 	SYSCTL_ADD_T4_PORTSTAT(rx_trunc2,
4674 	    "# of buffer-group 2 truncated packets");
4675 	SYSCTL_ADD_T4_PORTSTAT(rx_trunc3,
4676 	    "# of buffer-group 3 truncated packets");
4677 
4678 #undef SYSCTL_ADD_T4_PORTSTAT
4679 
4680 	return (0);
4681 }
4682 
4683 static int
4684 sysctl_int_array(SYSCTL_HANDLER_ARGS)
4685 {
4686 	int rc, *i;
4687 	struct sbuf sb;
4688 
4689 	sbuf_new(&sb, NULL, 32, SBUF_AUTOEXTEND);
4690 	for (i = arg1; arg2; arg2 -= sizeof(int), i++)
4691 		sbuf_printf(&sb, "%d ", *i);
4692 	sbuf_trim(&sb);
4693 	sbuf_finish(&sb);
4694 	rc = sysctl_handle_string(oidp, sbuf_data(&sb), sbuf_len(&sb), req);
4695 	sbuf_delete(&sb);
4696 	return (rc);
4697 }
4698 
4699 static int
4700 sysctl_bitfield(SYSCTL_HANDLER_ARGS)
4701 {
4702 	int rc;
4703 	struct sbuf *sb;
4704 
4705 	rc = sysctl_wire_old_buffer(req, 0);
4706 	if (rc != 0)
4707 		return(rc);
4708 
4709 	sb = sbuf_new_for_sysctl(NULL, NULL, 128, req);
4710 	if (sb == NULL)
4711 		return (ENOMEM);
4712 
4713 	sbuf_printf(sb, "%b", (int)arg2, (char *)arg1);
4714 	rc = sbuf_finish(sb);
4715 	sbuf_delete(sb);
4716 
4717 	return (rc);
4718 }
4719 
4720 static int
4721 sysctl_btphy(SYSCTL_HANDLER_ARGS)
4722 {
4723 	struct port_info *pi = arg1;
4724 	int op = arg2;
4725 	struct adapter *sc = pi->adapter;
4726 	u_int v;
4727 	int rc;
4728 
4729 	rc = begin_synchronized_op(sc, pi, SLEEP_OK | INTR_OK, "t4btt");
4730 	if (rc)
4731 		return (rc);
4732 	/* XXX: magic numbers */
4733 	rc = -t4_mdio_rd(sc, sc->mbox, pi->mdio_addr, 0x1e, op ? 0x20 : 0xc820,
4734 	    &v);
4735 	end_synchronized_op(sc, 0);
4736 	if (rc)
4737 		return (rc);
4738 	if (op == 0)
4739 		v /= 256;
4740 
4741 	rc = sysctl_handle_int(oidp, &v, 0, req);
4742 	return (rc);
4743 }
4744 
4745 static int
4746 sysctl_holdoff_tmr_idx(SYSCTL_HANDLER_ARGS)
4747 {
4748 	struct port_info *pi = arg1;
4749 	struct adapter *sc = pi->adapter;
4750 	int idx, rc, i;
4751 	struct sge_rxq *rxq;
4752 #ifdef TCP_OFFLOAD
4753 	struct sge_ofld_rxq *ofld_rxq;
4754 #endif
4755 	uint8_t v;
4756 
4757 	idx = pi->tmr_idx;
4758 
4759 	rc = sysctl_handle_int(oidp, &idx, 0, req);
4760 	if (rc != 0 || req->newptr == NULL)
4761 		return (rc);
4762 
4763 	if (idx < 0 || idx >= SGE_NTIMERS)
4764 		return (EINVAL);
4765 
4766 	rc = begin_synchronized_op(sc, pi, HOLD_LOCK | SLEEP_OK | INTR_OK,
4767 	    "t4tmr");
4768 	if (rc)
4769 		return (rc);
4770 
4771 	v = V_QINTR_TIMER_IDX(idx) | V_QINTR_CNT_EN(pi->pktc_idx != -1);
4772 	for_each_rxq(pi, i, rxq) {
4773 #ifdef atomic_store_rel_8
4774 		atomic_store_rel_8(&rxq->iq.intr_params, v);
4775 #else
4776 		rxq->iq.intr_params = v;
4777 #endif
4778 	}
4779 #ifdef TCP_OFFLOAD
4780 	for_each_ofld_rxq(pi, i, ofld_rxq) {
4781 #ifdef atomic_store_rel_8
4782 		atomic_store_rel_8(&ofld_rxq->iq.intr_params, v);
4783 #else
4784 		ofld_rxq->iq.intr_params = v;
4785 #endif
4786 	}
4787 #endif
4788 	pi->tmr_idx = idx;
4789 
4790 	end_synchronized_op(sc, LOCK_HELD);
4791 	return (0);
4792 }
4793 
4794 static int
4795 sysctl_holdoff_pktc_idx(SYSCTL_HANDLER_ARGS)
4796 {
4797 	struct port_info *pi = arg1;
4798 	struct adapter *sc = pi->adapter;
4799 	int idx, rc;
4800 
4801 	idx = pi->pktc_idx;
4802 
4803 	rc = sysctl_handle_int(oidp, &idx, 0, req);
4804 	if (rc != 0 || req->newptr == NULL)
4805 		return (rc);
4806 
4807 	if (idx < -1 || idx >= SGE_NCOUNTERS)
4808 		return (EINVAL);
4809 
4810 	rc = begin_synchronized_op(sc, pi, HOLD_LOCK | SLEEP_OK | INTR_OK,
4811 	    "t4pktc");
4812 	if (rc)
4813 		return (rc);
4814 
4815 	if (pi->flags & PORT_INIT_DONE)
4816 		rc = EBUSY; /* cannot be changed once the queues are created */
4817 	else
4818 		pi->pktc_idx = idx;
4819 
4820 	end_synchronized_op(sc, LOCK_HELD);
4821 	return (rc);
4822 }
4823 
4824 static int
4825 sysctl_qsize_rxq(SYSCTL_HANDLER_ARGS)
4826 {
4827 	struct port_info *pi = arg1;
4828 	struct adapter *sc = pi->adapter;
4829 	int qsize, rc;
4830 
4831 	qsize = pi->qsize_rxq;
4832 
4833 	rc = sysctl_handle_int(oidp, &qsize, 0, req);
4834 	if (rc != 0 || req->newptr == NULL)
4835 		return (rc);
4836 
4837 	if (qsize < 128 || (qsize & 7))
4838 		return (EINVAL);
4839 
4840 	rc = begin_synchronized_op(sc, pi, HOLD_LOCK | SLEEP_OK | INTR_OK,
4841 	    "t4rxqs");
4842 	if (rc)
4843 		return (rc);
4844 
4845 	if (pi->flags & PORT_INIT_DONE)
4846 		rc = EBUSY; /* cannot be changed once the queues are created */
4847 	else
4848 		pi->qsize_rxq = qsize;
4849 
4850 	end_synchronized_op(sc, LOCK_HELD);
4851 	return (rc);
4852 }
4853 
4854 static int
4855 sysctl_qsize_txq(SYSCTL_HANDLER_ARGS)
4856 {
4857 	struct port_info *pi = arg1;
4858 	struct adapter *sc = pi->adapter;
4859 	int qsize, rc;
4860 
4861 	qsize = pi->qsize_txq;
4862 
4863 	rc = sysctl_handle_int(oidp, &qsize, 0, req);
4864 	if (rc != 0 || req->newptr == NULL)
4865 		return (rc);
4866 
4867 	/* bufring size must be powerof2 */
4868 	if (qsize < 128 || !powerof2(qsize))
4869 		return (EINVAL);
4870 
4871 	rc = begin_synchronized_op(sc, pi, HOLD_LOCK | SLEEP_OK | INTR_OK,
4872 	    "t4txqs");
4873 	if (rc)
4874 		return (rc);
4875 
4876 	if (pi->flags & PORT_INIT_DONE)
4877 		rc = EBUSY; /* cannot be changed once the queues are created */
4878 	else
4879 		pi->qsize_txq = qsize;
4880 
4881 	end_synchronized_op(sc, LOCK_HELD);
4882 	return (rc);
4883 }
4884 
4885 static int
4886 sysctl_handle_t4_reg64(SYSCTL_HANDLER_ARGS)
4887 {
4888 	struct adapter *sc = arg1;
4889 	int reg = arg2;
4890 	uint64_t val;
4891 
4892 	val = t4_read_reg64(sc, reg);
4893 
4894 	return (sysctl_handle_64(oidp, &val, 0, req));
4895 }
4896 
4897 static int
4898 sysctl_temperature(SYSCTL_HANDLER_ARGS)
4899 {
4900 	struct adapter *sc = arg1;
4901 	int rc, t;
4902 	uint32_t param, val;
4903 
4904 	rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4temp");
4905 	if (rc)
4906 		return (rc);
4907 	param = V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) |
4908 	    V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_DIAG) |
4909 	    V_FW_PARAMS_PARAM_Y(FW_PARAM_DEV_DIAG_TMP);
4910 	rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 1, &param, &val);
4911 	end_synchronized_op(sc, 0);
4912 	if (rc)
4913 		return (rc);
4914 
4915 	/* unknown is returned as 0 but we display -1 in that case */
4916 	t = val == 0 ? -1 : val;
4917 
4918 	rc = sysctl_handle_int(oidp, &t, 0, req);
4919 	return (rc);
4920 }
4921 
4922 #ifdef SBUF_DRAIN
4923 static int
4924 sysctl_cctrl(SYSCTL_HANDLER_ARGS)
4925 {
4926 	struct adapter *sc = arg1;
4927 	struct sbuf *sb;
4928 	int rc, i;
4929 	uint16_t incr[NMTUS][NCCTRL_WIN];
4930 	static const char *dec_fac[] = {
4931 		"0.5", "0.5625", "0.625", "0.6875", "0.75", "0.8125", "0.875",
4932 		"0.9375"
4933 	};
4934 
4935 	rc = sysctl_wire_old_buffer(req, 0);
4936 	if (rc != 0)
4937 		return (rc);
4938 
4939 	sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req);
4940 	if (sb == NULL)
4941 		return (ENOMEM);
4942 
4943 	t4_read_cong_tbl(sc, incr);
4944 
4945 	for (i = 0; i < NCCTRL_WIN; ++i) {
4946 		sbuf_printf(sb, "%2d: %4u %4u %4u %4u %4u %4u %4u %4u\n", i,
4947 		    incr[0][i], incr[1][i], incr[2][i], incr[3][i], incr[4][i],
4948 		    incr[5][i], incr[6][i], incr[7][i]);
4949 		sbuf_printf(sb, "%8u %4u %4u %4u %4u %4u %4u %4u %5u %s\n",
4950 		    incr[8][i], incr[9][i], incr[10][i], incr[11][i],
4951 		    incr[12][i], incr[13][i], incr[14][i], incr[15][i],
4952 		    sc->params.a_wnd[i], dec_fac[sc->params.b_wnd[i]]);
4953 	}
4954 
4955 	rc = sbuf_finish(sb);
4956 	sbuf_delete(sb);
4957 
4958 	return (rc);
4959 }
4960 
4961 static const char *qname[CIM_NUM_IBQ + CIM_NUM_OBQ_T5] = {
4962 	"TP0", "TP1", "ULP", "SGE0", "SGE1", "NC-SI",	/* ibq's */
4963 	"ULP0", "ULP1", "ULP2", "ULP3", "SGE", "NC-SI",	/* obq's */
4964 	"SGE0-RX", "SGE1-RX"	/* additional obq's (T5 onwards) */
4965 };
4966 
4967 static int
4968 sysctl_cim_ibq_obq(SYSCTL_HANDLER_ARGS)
4969 {
4970 	struct adapter *sc = arg1;
4971 	struct sbuf *sb;
4972 	int rc, i, n, qid = arg2;
4973 	uint32_t *buf, *p;
4974 	char *qtype;
4975 	u_int cim_num_obq = is_t4(sc) ? CIM_NUM_OBQ : CIM_NUM_OBQ_T5;
4976 
4977 	KASSERT(qid >= 0 && qid < CIM_NUM_IBQ + cim_num_obq,
4978 	    ("%s: bad qid %d\n", __func__, qid));
4979 
4980 	if (qid < CIM_NUM_IBQ) {
4981 		/* inbound queue */
4982 		qtype = "IBQ";
4983 		n = 4 * CIM_IBQ_SIZE;
4984 		buf = malloc(n * sizeof(uint32_t), M_CXGBE, M_ZERO | M_WAITOK);
4985 		rc = t4_read_cim_ibq(sc, qid, buf, n);
4986 	} else {
4987 		/* outbound queue */
4988 		qtype = "OBQ";
4989 		qid -= CIM_NUM_IBQ;
4990 		n = 4 * cim_num_obq * CIM_OBQ_SIZE;
4991 		buf = malloc(n * sizeof(uint32_t), M_CXGBE, M_ZERO | M_WAITOK);
4992 		rc = t4_read_cim_obq(sc, qid, buf, n);
4993 	}
4994 
4995 	if (rc < 0) {
4996 		rc = -rc;
4997 		goto done;
4998 	}
4999 	n = rc * sizeof(uint32_t);	/* rc has # of words actually read */
5000 
5001 	rc = sysctl_wire_old_buffer(req, 0);
5002 	if (rc != 0)
5003 		goto done;
5004 
5005 	sb = sbuf_new_for_sysctl(NULL, NULL, PAGE_SIZE, req);
5006 	if (sb == NULL) {
5007 		rc = ENOMEM;
5008 		goto done;
5009 	}
5010 
5011 	sbuf_printf(sb, "%s%d %s", qtype , qid, qname[arg2]);
5012 	for (i = 0, p = buf; i < n; i += 16, p += 4)
5013 		sbuf_printf(sb, "\n%#06x: %08x %08x %08x %08x", i, p[0], p[1],
5014 		    p[2], p[3]);
5015 
5016 	rc = sbuf_finish(sb);
5017 	sbuf_delete(sb);
5018 done:
5019 	free(buf, M_CXGBE);
5020 	return (rc);
5021 }
5022 
5023 static int
5024 sysctl_cim_la(SYSCTL_HANDLER_ARGS)
5025 {
5026 	struct adapter *sc = arg1;
5027 	u_int cfg;
5028 	struct sbuf *sb;
5029 	uint32_t *buf, *p;
5030 	int rc;
5031 
5032 	rc = -t4_cim_read(sc, A_UP_UP_DBG_LA_CFG, 1, &cfg);
5033 	if (rc != 0)
5034 		return (rc);
5035 
5036 	rc = sysctl_wire_old_buffer(req, 0);
5037 	if (rc != 0)
5038 		return (rc);
5039 
5040 	sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req);
5041 	if (sb == NULL)
5042 		return (ENOMEM);
5043 
5044 	buf = malloc(sc->params.cim_la_size * sizeof(uint32_t), M_CXGBE,
5045 	    M_ZERO | M_WAITOK);
5046 
5047 	rc = -t4_cim_read_la(sc, buf, NULL);
5048 	if (rc != 0)
5049 		goto done;
5050 
5051 	sbuf_printf(sb, "Status   Data      PC%s",
5052 	    cfg & F_UPDBGLACAPTPCONLY ? "" :
5053 	    "     LS0Stat  LS0Addr             LS0Data");
5054 
5055 	KASSERT((sc->params.cim_la_size & 7) == 0,
5056 	    ("%s: p will walk off the end of buf", __func__));
5057 
5058 	for (p = buf; p < &buf[sc->params.cim_la_size]; p += 8) {
5059 		if (cfg & F_UPDBGLACAPTPCONLY) {
5060 			sbuf_printf(sb, "\n  %02x   %08x %08x", p[5] & 0xff,
5061 			    p[6], p[7]);
5062 			sbuf_printf(sb, "\n  %02x   %02x%06x %02x%06x",
5063 			    (p[3] >> 8) & 0xff, p[3] & 0xff, p[4] >> 8,
5064 			    p[4] & 0xff, p[5] >> 8);
5065 			sbuf_printf(sb, "\n  %02x   %x%07x %x%07x",
5066 			    (p[0] >> 4) & 0xff, p[0] & 0xf, p[1] >> 4,
5067 			    p[1] & 0xf, p[2] >> 4);
5068 		} else {
5069 			sbuf_printf(sb,
5070 			    "\n  %02x   %x%07x %x%07x %08x %08x "
5071 			    "%08x%08x%08x%08x",
5072 			    (p[0] >> 4) & 0xff, p[0] & 0xf, p[1] >> 4,
5073 			    p[1] & 0xf, p[2] >> 4, p[2] & 0xf, p[3], p[4], p[5],
5074 			    p[6], p[7]);
5075 		}
5076 	}
5077 
5078 	rc = sbuf_finish(sb);
5079 	sbuf_delete(sb);
5080 done:
5081 	free(buf, M_CXGBE);
5082 	return (rc);
5083 }
5084 
5085 static int
5086 sysctl_cim_ma_la(SYSCTL_HANDLER_ARGS)
5087 {
5088 	struct adapter *sc = arg1;
5089 	u_int i;
5090 	struct sbuf *sb;
5091 	uint32_t *buf, *p;
5092 	int rc;
5093 
5094 	rc = sysctl_wire_old_buffer(req, 0);
5095 	if (rc != 0)
5096 		return (rc);
5097 
5098 	sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req);
5099 	if (sb == NULL)
5100 		return (ENOMEM);
5101 
5102 	buf = malloc(2 * CIM_MALA_SIZE * 5 * sizeof(uint32_t), M_CXGBE,
5103 	    M_ZERO | M_WAITOK);
5104 
5105 	t4_cim_read_ma_la(sc, buf, buf + 5 * CIM_MALA_SIZE);
5106 	p = buf;
5107 
5108 	for (i = 0; i < CIM_MALA_SIZE; i++, p += 5) {
5109 		sbuf_printf(sb, "\n%02x%08x%08x%08x%08x", p[4], p[3], p[2],
5110 		    p[1], p[0]);
5111 	}
5112 
5113 	sbuf_printf(sb, "\n\nCnt ID Tag UE       Data       RDY VLD");
5114 	for (i = 0; i < CIM_MALA_SIZE; i++, p += 5) {
5115 		sbuf_printf(sb, "\n%3u %2u  %x   %u %08x%08x  %u   %u",
5116 		    (p[2] >> 10) & 0xff, (p[2] >> 7) & 7,
5117 		    (p[2] >> 3) & 0xf, (p[2] >> 2) & 1,
5118 		    (p[1] >> 2) | ((p[2] & 3) << 30),
5119 		    (p[0] >> 2) | ((p[1] & 3) << 30), (p[0] >> 1) & 1,
5120 		    p[0] & 1);
5121 	}
5122 
5123 	rc = sbuf_finish(sb);
5124 	sbuf_delete(sb);
5125 	free(buf, M_CXGBE);
5126 	return (rc);
5127 }
5128 
5129 static int
5130 sysctl_cim_pif_la(SYSCTL_HANDLER_ARGS)
5131 {
5132 	struct adapter *sc = arg1;
5133 	u_int i;
5134 	struct sbuf *sb;
5135 	uint32_t *buf, *p;
5136 	int rc;
5137 
5138 	rc = sysctl_wire_old_buffer(req, 0);
5139 	if (rc != 0)
5140 		return (rc);
5141 
5142 	sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req);
5143 	if (sb == NULL)
5144 		return (ENOMEM);
5145 
5146 	buf = malloc(2 * CIM_PIFLA_SIZE * 6 * sizeof(uint32_t), M_CXGBE,
5147 	    M_ZERO | M_WAITOK);
5148 
5149 	t4_cim_read_pif_la(sc, buf, buf + 6 * CIM_PIFLA_SIZE, NULL, NULL);
5150 	p = buf;
5151 
5152 	sbuf_printf(sb, "Cntl ID DataBE   Addr                 Data");
5153 	for (i = 0; i < CIM_MALA_SIZE; i++, p += 6) {
5154 		sbuf_printf(sb, "\n %02x  %02x  %04x  %08x %08x%08x%08x%08x",
5155 		    (p[5] >> 22) & 0xff, (p[5] >> 16) & 0x3f, p[5] & 0xffff,
5156 		    p[4], p[3], p[2], p[1], p[0]);
5157 	}
5158 
5159 	sbuf_printf(sb, "\n\nCntl ID               Data");
5160 	for (i = 0; i < CIM_MALA_SIZE; i++, p += 6) {
5161 		sbuf_printf(sb, "\n %02x  %02x %08x%08x%08x%08x",
5162 		    (p[4] >> 6) & 0xff, p[4] & 0x3f, p[3], p[2], p[1], p[0]);
5163 	}
5164 
5165 	rc = sbuf_finish(sb);
5166 	sbuf_delete(sb);
5167 	free(buf, M_CXGBE);
5168 	return (rc);
5169 }
5170 
5171 static int
5172 sysctl_cim_qcfg(SYSCTL_HANDLER_ARGS)
5173 {
5174 	struct adapter *sc = arg1;
5175 	struct sbuf *sb;
5176 	int rc, i;
5177 	uint16_t base[CIM_NUM_IBQ + CIM_NUM_OBQ_T5];
5178 	uint16_t size[CIM_NUM_IBQ + CIM_NUM_OBQ_T5];
5179 	uint16_t thres[CIM_NUM_IBQ];
5180 	uint32_t obq_wr[2 * CIM_NUM_OBQ_T5], *wr = obq_wr;
5181 	uint32_t stat[4 * (CIM_NUM_IBQ + CIM_NUM_OBQ_T5)], *p = stat;
5182 	u_int cim_num_obq, ibq_rdaddr, obq_rdaddr, nq;
5183 
5184 	if (is_t4(sc)) {
5185 		cim_num_obq = CIM_NUM_OBQ;
5186 		ibq_rdaddr = A_UP_IBQ_0_RDADDR;
5187 		obq_rdaddr = A_UP_OBQ_0_REALADDR;
5188 	} else {
5189 		cim_num_obq = CIM_NUM_OBQ_T5;
5190 		ibq_rdaddr = A_UP_IBQ_0_SHADOW_RDADDR;
5191 		obq_rdaddr = A_UP_OBQ_0_SHADOW_REALADDR;
5192 	}
5193 	nq = CIM_NUM_IBQ + cim_num_obq;
5194 
5195 	rc = -t4_cim_read(sc, ibq_rdaddr, 4 * nq, stat);
5196 	if (rc == 0)
5197 		rc = -t4_cim_read(sc, obq_rdaddr, 2 * cim_num_obq, obq_wr);
5198 	if (rc != 0)
5199 		return (rc);
5200 
5201 	t4_read_cimq_cfg(sc, base, size, thres);
5202 
5203 	rc = sysctl_wire_old_buffer(req, 0);
5204 	if (rc != 0)
5205 		return (rc);
5206 
5207 	sb = sbuf_new_for_sysctl(NULL, NULL, PAGE_SIZE, req);
5208 	if (sb == NULL)
5209 		return (ENOMEM);
5210 
5211 	sbuf_printf(sb, "Queue  Base  Size Thres RdPtr WrPtr  SOP  EOP Avail");
5212 
5213 	for (i = 0; i < CIM_NUM_IBQ; i++, p += 4)
5214 		sbuf_printf(sb, "\n%7s %5x %5u %5u %6x  %4x %4u %4u %5u",
5215 		    qname[i], base[i], size[i], thres[i], G_IBQRDADDR(p[0]),
5216 		    G_IBQWRADDR(p[1]), G_QUESOPCNT(p[3]), G_QUEEOPCNT(p[3]),
5217 		    G_QUEREMFLITS(p[2]) * 16);
5218 	for ( ; i < nq; i++, p += 4, wr += 2)
5219 		sbuf_printf(sb, "\n%7s %5x %5u %12x  %4x %4u %4u %5u", qname[i],
5220 		    base[i], size[i], G_QUERDADDR(p[0]) & 0x3fff,
5221 		    wr[0] - base[i], G_QUESOPCNT(p[3]), G_QUEEOPCNT(p[3]),
5222 		    G_QUEREMFLITS(p[2]) * 16);
5223 
5224 	rc = sbuf_finish(sb);
5225 	sbuf_delete(sb);
5226 
5227 	return (rc);
5228 }
5229 
5230 static int
5231 sysctl_cpl_stats(SYSCTL_HANDLER_ARGS)
5232 {
5233 	struct adapter *sc = arg1;
5234 	struct sbuf *sb;
5235 	int rc;
5236 	struct tp_cpl_stats stats;
5237 
5238 	rc = sysctl_wire_old_buffer(req, 0);
5239 	if (rc != 0)
5240 		return (rc);
5241 
5242 	sb = sbuf_new_for_sysctl(NULL, NULL, 256, req);
5243 	if (sb == NULL)
5244 		return (ENOMEM);
5245 
5246 	t4_tp_get_cpl_stats(sc, &stats);
5247 
5248 	sbuf_printf(sb, "                 channel 0  channel 1  channel 2  "
5249 	    "channel 3\n");
5250 	sbuf_printf(sb, "CPL requests:   %10u %10u %10u %10u\n",
5251 		   stats.req[0], stats.req[1], stats.req[2], stats.req[3]);
5252 	sbuf_printf(sb, "CPL responses:  %10u %10u %10u %10u",
5253 		   stats.rsp[0], stats.rsp[1], stats.rsp[2], stats.rsp[3]);
5254 
5255 	rc = sbuf_finish(sb);
5256 	sbuf_delete(sb);
5257 
5258 	return (rc);
5259 }
5260 
5261 static int
5262 sysctl_ddp_stats(SYSCTL_HANDLER_ARGS)
5263 {
5264 	struct adapter *sc = arg1;
5265 	struct sbuf *sb;
5266 	int rc;
5267 	struct tp_usm_stats stats;
5268 
5269 	rc = sysctl_wire_old_buffer(req, 0);
5270 	if (rc != 0)
5271 		return(rc);
5272 
5273 	sb = sbuf_new_for_sysctl(NULL, NULL, 256, req);
5274 	if (sb == NULL)
5275 		return (ENOMEM);
5276 
5277 	t4_get_usm_stats(sc, &stats);
5278 
5279 	sbuf_printf(sb, "Frames: %u\n", stats.frames);
5280 	sbuf_printf(sb, "Octets: %ju\n", stats.octets);
5281 	sbuf_printf(sb, "Drops:  %u", stats.drops);
5282 
5283 	rc = sbuf_finish(sb);
5284 	sbuf_delete(sb);
5285 
5286 	return (rc);
5287 }
5288 
5289 const char *devlog_level_strings[] = {
5290 	[FW_DEVLOG_LEVEL_EMERG]		= "EMERG",
5291 	[FW_DEVLOG_LEVEL_CRIT]		= "CRIT",
5292 	[FW_DEVLOG_LEVEL_ERR]		= "ERR",
5293 	[FW_DEVLOG_LEVEL_NOTICE]	= "NOTICE",
5294 	[FW_DEVLOG_LEVEL_INFO]		= "INFO",
5295 	[FW_DEVLOG_LEVEL_DEBUG]		= "DEBUG"
5296 };
5297 
5298 const char *devlog_facility_strings[] = {
5299 	[FW_DEVLOG_FACILITY_CORE]	= "CORE",
5300 	[FW_DEVLOG_FACILITY_SCHED]	= "SCHED",
5301 	[FW_DEVLOG_FACILITY_TIMER]	= "TIMER",
5302 	[FW_DEVLOG_FACILITY_RES]	= "RES",
5303 	[FW_DEVLOG_FACILITY_HW]		= "HW",
5304 	[FW_DEVLOG_FACILITY_FLR]	= "FLR",
5305 	[FW_DEVLOG_FACILITY_DMAQ]	= "DMAQ",
5306 	[FW_DEVLOG_FACILITY_PHY]	= "PHY",
5307 	[FW_DEVLOG_FACILITY_MAC]	= "MAC",
5308 	[FW_DEVLOG_FACILITY_PORT]	= "PORT",
5309 	[FW_DEVLOG_FACILITY_VI]		= "VI",
5310 	[FW_DEVLOG_FACILITY_FILTER]	= "FILTER",
5311 	[FW_DEVLOG_FACILITY_ACL]	= "ACL",
5312 	[FW_DEVLOG_FACILITY_TM]		= "TM",
5313 	[FW_DEVLOG_FACILITY_QFC]	= "QFC",
5314 	[FW_DEVLOG_FACILITY_DCB]	= "DCB",
5315 	[FW_DEVLOG_FACILITY_ETH]	= "ETH",
5316 	[FW_DEVLOG_FACILITY_OFLD]	= "OFLD",
5317 	[FW_DEVLOG_FACILITY_RI]		= "RI",
5318 	[FW_DEVLOG_FACILITY_ISCSI]	= "ISCSI",
5319 	[FW_DEVLOG_FACILITY_FCOE]	= "FCOE",
5320 	[FW_DEVLOG_FACILITY_FOISCSI]	= "FOISCSI",
5321 	[FW_DEVLOG_FACILITY_FOFCOE]	= "FOFCOE"
5322 };
5323 
5324 static int
5325 sysctl_devlog(SYSCTL_HANDLER_ARGS)
5326 {
5327 	struct adapter *sc = arg1;
5328 	struct devlog_params *dparams = &sc->params.devlog;
5329 	struct fw_devlog_e *buf, *e;
5330 	int i, j, rc, nentries, first = 0, m;
5331 	struct sbuf *sb;
5332 	uint64_t ftstamp = UINT64_MAX;
5333 
5334 	if (dparams->start == 0) {
5335 		dparams->memtype = FW_MEMTYPE_EDC0;
5336 		dparams->start = 0x84000;
5337 		dparams->size = 32768;
5338 	}
5339 
5340 	nentries = dparams->size / sizeof(struct fw_devlog_e);
5341 
5342 	buf = malloc(dparams->size, M_CXGBE, M_NOWAIT);
5343 	if (buf == NULL)
5344 		return (ENOMEM);
5345 
5346 	m = fwmtype_to_hwmtype(dparams->memtype);
5347 	rc = -t4_mem_read(sc, m, dparams->start, dparams->size, (void *)buf);
5348 	if (rc != 0)
5349 		goto done;
5350 
5351 	for (i = 0; i < nentries; i++) {
5352 		e = &buf[i];
5353 
5354 		if (e->timestamp == 0)
5355 			break;	/* end */
5356 
5357 		e->timestamp = be64toh(e->timestamp);
5358 		e->seqno = be32toh(e->seqno);
5359 		for (j = 0; j < 8; j++)
5360 			e->params[j] = be32toh(e->params[j]);
5361 
5362 		if (e->timestamp < ftstamp) {
5363 			ftstamp = e->timestamp;
5364 			first = i;
5365 		}
5366 	}
5367 
5368 	if (buf[first].timestamp == 0)
5369 		goto done;	/* nothing in the log */
5370 
5371 	rc = sysctl_wire_old_buffer(req, 0);
5372 	if (rc != 0)
5373 		goto done;
5374 
5375 	sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req);
5376 	if (sb == NULL) {
5377 		rc = ENOMEM;
5378 		goto done;
5379 	}
5380 	sbuf_printf(sb, "%10s  %15s  %8s  %8s  %s\n",
5381 	    "Seq#", "Tstamp", "Level", "Facility", "Message");
5382 
5383 	i = first;
5384 	do {
5385 		e = &buf[i];
5386 		if (e->timestamp == 0)
5387 			break;	/* end */
5388 
5389 		sbuf_printf(sb, "%10d  %15ju  %8s  %8s  ",
5390 		    e->seqno, e->timestamp,
5391 		    (e->level < nitems(devlog_level_strings) ?
5392 			devlog_level_strings[e->level] : "UNKNOWN"),
5393 		    (e->facility < nitems(devlog_facility_strings) ?
5394 			devlog_facility_strings[e->facility] : "UNKNOWN"));
5395 		sbuf_printf(sb, e->fmt, e->params[0], e->params[1],
5396 		    e->params[2], e->params[3], e->params[4],
5397 		    e->params[5], e->params[6], e->params[7]);
5398 
5399 		if (++i == nentries)
5400 			i = 0;
5401 	} while (i != first);
5402 
5403 	rc = sbuf_finish(sb);
5404 	sbuf_delete(sb);
5405 done:
5406 	free(buf, M_CXGBE);
5407 	return (rc);
5408 }
5409 
5410 static int
5411 sysctl_fcoe_stats(SYSCTL_HANDLER_ARGS)
5412 {
5413 	struct adapter *sc = arg1;
5414 	struct sbuf *sb;
5415 	int rc;
5416 	struct tp_fcoe_stats stats[4];
5417 
5418 	rc = sysctl_wire_old_buffer(req, 0);
5419 	if (rc != 0)
5420 		return (rc);
5421 
5422 	sb = sbuf_new_for_sysctl(NULL, NULL, 256, req);
5423 	if (sb == NULL)
5424 		return (ENOMEM);
5425 
5426 	t4_get_fcoe_stats(sc, 0, &stats[0]);
5427 	t4_get_fcoe_stats(sc, 1, &stats[1]);
5428 	t4_get_fcoe_stats(sc, 2, &stats[2]);
5429 	t4_get_fcoe_stats(sc, 3, &stats[3]);
5430 
5431 	sbuf_printf(sb, "                   channel 0        channel 1        "
5432 	    "channel 2        channel 3\n");
5433 	sbuf_printf(sb, "octetsDDP:  %16ju %16ju %16ju %16ju\n",
5434 	    stats[0].octetsDDP, stats[1].octetsDDP, stats[2].octetsDDP,
5435 	    stats[3].octetsDDP);
5436 	sbuf_printf(sb, "framesDDP:  %16u %16u %16u %16u\n", stats[0].framesDDP,
5437 	    stats[1].framesDDP, stats[2].framesDDP, stats[3].framesDDP);
5438 	sbuf_printf(sb, "framesDrop: %16u %16u %16u %16u",
5439 	    stats[0].framesDrop, stats[1].framesDrop, stats[2].framesDrop,
5440 	    stats[3].framesDrop);
5441 
5442 	rc = sbuf_finish(sb);
5443 	sbuf_delete(sb);
5444 
5445 	return (rc);
5446 }
5447 
5448 static int
5449 sysctl_hw_sched(SYSCTL_HANDLER_ARGS)
5450 {
5451 	struct adapter *sc = arg1;
5452 	struct sbuf *sb;
5453 	int rc, i;
5454 	unsigned int map, kbps, ipg, mode;
5455 	unsigned int pace_tab[NTX_SCHED];
5456 
5457 	rc = sysctl_wire_old_buffer(req, 0);
5458 	if (rc != 0)
5459 		return (rc);
5460 
5461 	sb = sbuf_new_for_sysctl(NULL, NULL, 256, req);
5462 	if (sb == NULL)
5463 		return (ENOMEM);
5464 
5465 	map = t4_read_reg(sc, A_TP_TX_MOD_QUEUE_REQ_MAP);
5466 	mode = G_TIMERMODE(t4_read_reg(sc, A_TP_MOD_CONFIG));
5467 	t4_read_pace_tbl(sc, pace_tab);
5468 
5469 	sbuf_printf(sb, "Scheduler  Mode   Channel  Rate (Kbps)   "
5470 	    "Class IPG (0.1 ns)   Flow IPG (us)");
5471 
5472 	for (i = 0; i < NTX_SCHED; ++i, map >>= 2) {
5473 		t4_get_tx_sched(sc, i, &kbps, &ipg);
5474 		sbuf_printf(sb, "\n    %u      %-5s     %u     ", i,
5475 		    (mode & (1 << i)) ? "flow" : "class", map & 3);
5476 		if (kbps)
5477 			sbuf_printf(sb, "%9u     ", kbps);
5478 		else
5479 			sbuf_printf(sb, " disabled     ");
5480 
5481 		if (ipg)
5482 			sbuf_printf(sb, "%13u        ", ipg);
5483 		else
5484 			sbuf_printf(sb, "     disabled        ");
5485 
5486 		if (pace_tab[i])
5487 			sbuf_printf(sb, "%10u", pace_tab[i]);
5488 		else
5489 			sbuf_printf(sb, "  disabled");
5490 	}
5491 
5492 	rc = sbuf_finish(sb);
5493 	sbuf_delete(sb);
5494 
5495 	return (rc);
5496 }
5497 
5498 static int
5499 sysctl_lb_stats(SYSCTL_HANDLER_ARGS)
5500 {
5501 	struct adapter *sc = arg1;
5502 	struct sbuf *sb;
5503 	int rc, i, j;
5504 	uint64_t *p0, *p1;
5505 	struct lb_port_stats s[2];
5506 	static const char *stat_name[] = {
5507 		"OctetsOK:", "FramesOK:", "BcastFrames:", "McastFrames:",
5508 		"UcastFrames:", "ErrorFrames:", "Frames64:", "Frames65To127:",
5509 		"Frames128To255:", "Frames256To511:", "Frames512To1023:",
5510 		"Frames1024To1518:", "Frames1519ToMax:", "FramesDropped:",
5511 		"BG0FramesDropped:", "BG1FramesDropped:", "BG2FramesDropped:",
5512 		"BG3FramesDropped:", "BG0FramesTrunc:", "BG1FramesTrunc:",
5513 		"BG2FramesTrunc:", "BG3FramesTrunc:"
5514 	};
5515 
5516 	rc = sysctl_wire_old_buffer(req, 0);
5517 	if (rc != 0)
5518 		return (rc);
5519 
5520 	sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req);
5521 	if (sb == NULL)
5522 		return (ENOMEM);
5523 
5524 	memset(s, 0, sizeof(s));
5525 
5526 	for (i = 0; i < 4; i += 2) {
5527 		t4_get_lb_stats(sc, i, &s[0]);
5528 		t4_get_lb_stats(sc, i + 1, &s[1]);
5529 
5530 		p0 = &s[0].octets;
5531 		p1 = &s[1].octets;
5532 		sbuf_printf(sb, "%s                       Loopback %u"
5533 		    "           Loopback %u", i == 0 ? "" : "\n", i, i + 1);
5534 
5535 		for (j = 0; j < nitems(stat_name); j++)
5536 			sbuf_printf(sb, "\n%-17s %20ju %20ju", stat_name[j],
5537 				   *p0++, *p1++);
5538 	}
5539 
5540 	rc = sbuf_finish(sb);
5541 	sbuf_delete(sb);
5542 
5543 	return (rc);
5544 }
5545 
5546 static int
5547 sysctl_linkdnrc(SYSCTL_HANDLER_ARGS)
5548 {
5549 	int rc = 0;
5550 	struct port_info *pi = arg1;
5551 	struct sbuf *sb;
5552 	static const char *linkdnreasons[] = {
5553 		"non-specific", "remote fault", "autoneg failed", "reserved3",
5554 		"PHY overheated", "unknown", "rx los", "reserved7"
5555 	};
5556 
5557 	rc = sysctl_wire_old_buffer(req, 0);
5558 	if (rc != 0)
5559 		return(rc);
5560 	sb = sbuf_new_for_sysctl(NULL, NULL, 64, req);
5561 	if (sb == NULL)
5562 		return (ENOMEM);
5563 
5564 	if (pi->linkdnrc < 0)
5565 		sbuf_printf(sb, "n/a");
5566 	else if (pi->linkdnrc < nitems(linkdnreasons))
5567 		sbuf_printf(sb, "%s", linkdnreasons[pi->linkdnrc]);
5568 	else
5569 		sbuf_printf(sb, "%d", pi->linkdnrc);
5570 
5571 	rc = sbuf_finish(sb);
5572 	sbuf_delete(sb);
5573 
5574 	return (rc);
5575 }
5576 
5577 struct mem_desc {
5578 	unsigned int base;
5579 	unsigned int limit;
5580 	unsigned int idx;
5581 };
5582 
5583 static int
5584 mem_desc_cmp(const void *a, const void *b)
5585 {
5586 	return ((const struct mem_desc *)a)->base -
5587 	       ((const struct mem_desc *)b)->base;
5588 }
5589 
5590 static void
5591 mem_region_show(struct sbuf *sb, const char *name, unsigned int from,
5592     unsigned int to)
5593 {
5594 	unsigned int size;
5595 
5596 	size = to - from + 1;
5597 	if (size == 0)
5598 		return;
5599 
5600 	/* XXX: need humanize_number(3) in libkern for a more readable 'size' */
5601 	sbuf_printf(sb, "%-15s %#x-%#x [%u]\n", name, from, to, size);
5602 }
5603 
5604 static int
5605 sysctl_meminfo(SYSCTL_HANDLER_ARGS)
5606 {
5607 	struct adapter *sc = arg1;
5608 	struct sbuf *sb;
5609 	int rc, i, n;
5610 	uint32_t lo, hi, used, alloc;
5611 	static const char *memory[] = {"EDC0:", "EDC1:", "MC:", "MC0:", "MC1:"};
5612 	static const char *region[] = {
5613 		"DBQ contexts:", "IMSG contexts:", "FLM cache:", "TCBs:",
5614 		"Pstructs:", "Timers:", "Rx FL:", "Tx FL:", "Pstruct FL:",
5615 		"Tx payload:", "Rx payload:", "LE hash:", "iSCSI region:",
5616 		"TDDP region:", "TPT region:", "STAG region:", "RQ region:",
5617 		"RQUDP region:", "PBL region:", "TXPBL region:",
5618 		"DBVFIFO region:", "ULPRX state:", "ULPTX state:",
5619 		"On-chip queues:"
5620 	};
5621 	struct mem_desc avail[4];
5622 	struct mem_desc mem[nitems(region) + 3];	/* up to 3 holes */
5623 	struct mem_desc *md = mem;
5624 
5625 	rc = sysctl_wire_old_buffer(req, 0);
5626 	if (rc != 0)
5627 		return (rc);
5628 
5629 	sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req);
5630 	if (sb == NULL)
5631 		return (ENOMEM);
5632 
5633 	for (i = 0; i < nitems(mem); i++) {
5634 		mem[i].limit = 0;
5635 		mem[i].idx = i;
5636 	}
5637 
5638 	/* Find and sort the populated memory ranges */
5639 	i = 0;
5640 	lo = t4_read_reg(sc, A_MA_TARGET_MEM_ENABLE);
5641 	if (lo & F_EDRAM0_ENABLE) {
5642 		hi = t4_read_reg(sc, A_MA_EDRAM0_BAR);
5643 		avail[i].base = G_EDRAM0_BASE(hi) << 20;
5644 		avail[i].limit = avail[i].base + (G_EDRAM0_SIZE(hi) << 20);
5645 		avail[i].idx = 0;
5646 		i++;
5647 	}
5648 	if (lo & F_EDRAM1_ENABLE) {
5649 		hi = t4_read_reg(sc, A_MA_EDRAM1_BAR);
5650 		avail[i].base = G_EDRAM1_BASE(hi) << 20;
5651 		avail[i].limit = avail[i].base + (G_EDRAM1_SIZE(hi) << 20);
5652 		avail[i].idx = 1;
5653 		i++;
5654 	}
5655 	if (lo & F_EXT_MEM_ENABLE) {
5656 		hi = t4_read_reg(sc, A_MA_EXT_MEMORY_BAR);
5657 		avail[i].base = G_EXT_MEM_BASE(hi) << 20;
5658 		avail[i].limit = avail[i].base +
5659 		    (G_EXT_MEM_SIZE(hi) << 20);
5660 		avail[i].idx = is_t4(sc) ? 2 : 3;	/* Call it MC for T4 */
5661 		i++;
5662 	}
5663 	if (!is_t4(sc) && lo & F_EXT_MEM1_ENABLE) {
5664 		hi = t4_read_reg(sc, A_MA_EXT_MEMORY1_BAR);
5665 		avail[i].base = G_EXT_MEM1_BASE(hi) << 20;
5666 		avail[i].limit = avail[i].base +
5667 		    (G_EXT_MEM1_SIZE(hi) << 20);
5668 		avail[i].idx = 4;
5669 		i++;
5670 	}
5671 	if (!i)                                    /* no memory available */
5672 		return 0;
5673 	qsort(avail, i, sizeof(struct mem_desc), mem_desc_cmp);
5674 
5675 	(md++)->base = t4_read_reg(sc, A_SGE_DBQ_CTXT_BADDR);
5676 	(md++)->base = t4_read_reg(sc, A_SGE_IMSG_CTXT_BADDR);
5677 	(md++)->base = t4_read_reg(sc, A_SGE_FLM_CACHE_BADDR);
5678 	(md++)->base = t4_read_reg(sc, A_TP_CMM_TCB_BASE);
5679 	(md++)->base = t4_read_reg(sc, A_TP_CMM_MM_BASE);
5680 	(md++)->base = t4_read_reg(sc, A_TP_CMM_TIMER_BASE);
5681 	(md++)->base = t4_read_reg(sc, A_TP_CMM_MM_RX_FLST_BASE);
5682 	(md++)->base = t4_read_reg(sc, A_TP_CMM_MM_TX_FLST_BASE);
5683 	(md++)->base = t4_read_reg(sc, A_TP_CMM_MM_PS_FLST_BASE);
5684 
5685 	/* the next few have explicit upper bounds */
5686 	md->base = t4_read_reg(sc, A_TP_PMM_TX_BASE);
5687 	md->limit = md->base - 1 +
5688 		    t4_read_reg(sc, A_TP_PMM_TX_PAGE_SIZE) *
5689 		    G_PMTXMAXPAGE(t4_read_reg(sc, A_TP_PMM_TX_MAX_PAGE));
5690 	md++;
5691 
5692 	md->base = t4_read_reg(sc, A_TP_PMM_RX_BASE);
5693 	md->limit = md->base - 1 +
5694 		    t4_read_reg(sc, A_TP_PMM_RX_PAGE_SIZE) *
5695 		    G_PMRXMAXPAGE(t4_read_reg(sc, A_TP_PMM_RX_MAX_PAGE));
5696 	md++;
5697 
5698 	if (t4_read_reg(sc, A_LE_DB_CONFIG) & F_HASHEN) {
5699 		hi = t4_read_reg(sc, A_LE_DB_TID_HASHBASE) / 4;
5700 		md->base = t4_read_reg(sc, A_LE_DB_HASH_TID_BASE);
5701 		md->limit = (sc->tids.ntids - hi) * 16 + md->base - 1;
5702 	} else {
5703 		md->base = 0;
5704 		md->idx = nitems(region);  /* hide it */
5705 	}
5706 	md++;
5707 
5708 #define ulp_region(reg) \
5709 	md->base = t4_read_reg(sc, A_ULP_ ## reg ## _LLIMIT);\
5710 	(md++)->limit = t4_read_reg(sc, A_ULP_ ## reg ## _ULIMIT)
5711 
5712 	ulp_region(RX_ISCSI);
5713 	ulp_region(RX_TDDP);
5714 	ulp_region(TX_TPT);
5715 	ulp_region(RX_STAG);
5716 	ulp_region(RX_RQ);
5717 	ulp_region(RX_RQUDP);
5718 	ulp_region(RX_PBL);
5719 	ulp_region(TX_PBL);
5720 #undef ulp_region
5721 
5722 	md->base = 0;
5723 	md->idx = nitems(region);
5724 	if (!is_t4(sc) && t4_read_reg(sc, A_SGE_CONTROL2) & F_VFIFO_ENABLE) {
5725 		md->base = G_BASEADDR(t4_read_reg(sc, A_SGE_DBVFIFO_BADDR));
5726 		md->limit = md->base + (G_DBVFIFO_SIZE((t4_read_reg(sc,
5727 		    A_SGE_DBVFIFO_SIZE))) << 2) - 1;
5728 	}
5729 	md++;
5730 
5731 	md->base = t4_read_reg(sc, A_ULP_RX_CTX_BASE);
5732 	md->limit = md->base + sc->tids.ntids - 1;
5733 	md++;
5734 	md->base = t4_read_reg(sc, A_ULP_TX_ERR_TABLE_BASE);
5735 	md->limit = md->base + sc->tids.ntids - 1;
5736 	md++;
5737 
5738 	md->base = sc->vres.ocq.start;
5739 	if (sc->vres.ocq.size)
5740 		md->limit = md->base + sc->vres.ocq.size - 1;
5741 	else
5742 		md->idx = nitems(region);  /* hide it */
5743 	md++;
5744 
5745 	/* add any address-space holes, there can be up to 3 */
5746 	for (n = 0; n < i - 1; n++)
5747 		if (avail[n].limit < avail[n + 1].base)
5748 			(md++)->base = avail[n].limit;
5749 	if (avail[n].limit)
5750 		(md++)->base = avail[n].limit;
5751 
5752 	n = md - mem;
5753 	qsort(mem, n, sizeof(struct mem_desc), mem_desc_cmp);
5754 
5755 	for (lo = 0; lo < i; lo++)
5756 		mem_region_show(sb, memory[avail[lo].idx], avail[lo].base,
5757 				avail[lo].limit - 1);
5758 
5759 	sbuf_printf(sb, "\n");
5760 	for (i = 0; i < n; i++) {
5761 		if (mem[i].idx >= nitems(region))
5762 			continue;                        /* skip holes */
5763 		if (!mem[i].limit)
5764 			mem[i].limit = i < n - 1 ? mem[i + 1].base - 1 : ~0;
5765 		mem_region_show(sb, region[mem[i].idx], mem[i].base,
5766 				mem[i].limit);
5767 	}
5768 
5769 	sbuf_printf(sb, "\n");
5770 	lo = t4_read_reg(sc, A_CIM_SDRAM_BASE_ADDR);
5771 	hi = t4_read_reg(sc, A_CIM_SDRAM_ADDR_SIZE) + lo - 1;
5772 	mem_region_show(sb, "uP RAM:", lo, hi);
5773 
5774 	lo = t4_read_reg(sc, A_CIM_EXTMEM2_BASE_ADDR);
5775 	hi = t4_read_reg(sc, A_CIM_EXTMEM2_ADDR_SIZE) + lo - 1;
5776 	mem_region_show(sb, "uP Extmem2:", lo, hi);
5777 
5778 	lo = t4_read_reg(sc, A_TP_PMM_RX_MAX_PAGE);
5779 	sbuf_printf(sb, "\n%u Rx pages of size %uKiB for %u channels\n",
5780 		   G_PMRXMAXPAGE(lo),
5781 		   t4_read_reg(sc, A_TP_PMM_RX_PAGE_SIZE) >> 10,
5782 		   (lo & F_PMRXNUMCHN) ? 2 : 1);
5783 
5784 	lo = t4_read_reg(sc, A_TP_PMM_TX_MAX_PAGE);
5785 	hi = t4_read_reg(sc, A_TP_PMM_TX_PAGE_SIZE);
5786 	sbuf_printf(sb, "%u Tx pages of size %u%ciB for %u channels\n",
5787 		   G_PMTXMAXPAGE(lo),
5788 		   hi >= (1 << 20) ? (hi >> 20) : (hi >> 10),
5789 		   hi >= (1 << 20) ? 'M' : 'K', 1 << G_PMTXNUMCHN(lo));
5790 	sbuf_printf(sb, "%u p-structs\n",
5791 		   t4_read_reg(sc, A_TP_CMM_MM_MAX_PSTRUCT));
5792 
5793 	for (i = 0; i < 4; i++) {
5794 		lo = t4_read_reg(sc, A_MPS_RX_PG_RSV0 + i * 4);
5795 		if (is_t4(sc)) {
5796 			used = G_USED(lo);
5797 			alloc = G_ALLOC(lo);
5798 		} else {
5799 			used = G_T5_USED(lo);
5800 			alloc = G_T5_ALLOC(lo);
5801 		}
5802 		sbuf_printf(sb, "\nPort %d using %u pages out of %u allocated",
5803 			   i, used, alloc);
5804 	}
5805 	for (i = 0; i < 4; i++) {
5806 		lo = t4_read_reg(sc, A_MPS_RX_PG_RSV4 + i * 4);
5807 		if (is_t4(sc)) {
5808 			used = G_USED(lo);
5809 			alloc = G_ALLOC(lo);
5810 		} else {
5811 			used = G_T5_USED(lo);
5812 			alloc = G_T5_ALLOC(lo);
5813 		}
5814 		sbuf_printf(sb,
5815 			   "\nLoopback %d using %u pages out of %u allocated",
5816 			   i, used, alloc);
5817 	}
5818 
5819 	rc = sbuf_finish(sb);
5820 	sbuf_delete(sb);
5821 
5822 	return (rc);
5823 }
5824 
5825 static inline void
5826 tcamxy2valmask(uint64_t x, uint64_t y, uint8_t *addr, uint64_t *mask)
5827 {
5828 	*mask = x | y;
5829 	y = htobe64(y);
5830 	memcpy(addr, (char *)&y + 2, ETHER_ADDR_LEN);
5831 }
5832 
5833 static int
5834 sysctl_mps_tcam(SYSCTL_HANDLER_ARGS)
5835 {
5836 	struct adapter *sc = arg1;
5837 	struct sbuf *sb;
5838 	int rc, i, n;
5839 
5840 	rc = sysctl_wire_old_buffer(req, 0);
5841 	if (rc != 0)
5842 		return (rc);
5843 
5844 	sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req);
5845 	if (sb == NULL)
5846 		return (ENOMEM);
5847 
5848 	sbuf_printf(sb,
5849 	    "Idx  Ethernet address     Mask     Vld Ports PF"
5850 	    "  VF              Replication             P0 P1 P2 P3  ML");
5851 	n = is_t4(sc) ? NUM_MPS_CLS_SRAM_L_INSTANCES :
5852 	    NUM_MPS_T5_CLS_SRAM_L_INSTANCES;
5853 	for (i = 0; i < n; i++) {
5854 		uint64_t tcamx, tcamy, mask;
5855 		uint32_t cls_lo, cls_hi;
5856 		uint8_t addr[ETHER_ADDR_LEN];
5857 
5858 		tcamy = t4_read_reg64(sc, MPS_CLS_TCAM_Y_L(i));
5859 		tcamx = t4_read_reg64(sc, MPS_CLS_TCAM_X_L(i));
5860 		cls_lo = t4_read_reg(sc, MPS_CLS_SRAM_L(i));
5861 		cls_hi = t4_read_reg(sc, MPS_CLS_SRAM_H(i));
5862 
5863 		if (tcamx & tcamy)
5864 			continue;
5865 
5866 		tcamxy2valmask(tcamx, tcamy, addr, &mask);
5867 		sbuf_printf(sb, "\n%3u %02x:%02x:%02x:%02x:%02x:%02x %012jx"
5868 			   "  %c   %#x%4u%4d", i, addr[0], addr[1], addr[2],
5869 			   addr[3], addr[4], addr[5], (uintmax_t)mask,
5870 			   (cls_lo & F_SRAM_VLD) ? 'Y' : 'N',
5871 			   G_PORTMAP(cls_hi), G_PF(cls_lo),
5872 			   (cls_lo & F_VF_VALID) ? G_VF(cls_lo) : -1);
5873 
5874 		if (cls_lo & F_REPLICATE) {
5875 			struct fw_ldst_cmd ldst_cmd;
5876 
5877 			memset(&ldst_cmd, 0, sizeof(ldst_cmd));
5878 			ldst_cmd.op_to_addrspace =
5879 			    htobe32(V_FW_CMD_OP(FW_LDST_CMD) |
5880 				F_FW_CMD_REQUEST | F_FW_CMD_READ |
5881 				V_FW_LDST_CMD_ADDRSPACE(FW_LDST_ADDRSPC_MPS));
5882 			ldst_cmd.cycles_to_len16 = htobe32(FW_LEN16(ldst_cmd));
5883 			ldst_cmd.u.mps.fid_ctl =
5884 			    htobe16(V_FW_LDST_CMD_FID(FW_LDST_MPS_RPLC) |
5885 				V_FW_LDST_CMD_CTL(i));
5886 
5887 			rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK,
5888 			    "t4mps");
5889 			if (rc)
5890 				break;
5891 			rc = -t4_wr_mbox(sc, sc->mbox, &ldst_cmd,
5892 			    sizeof(ldst_cmd), &ldst_cmd);
5893 			end_synchronized_op(sc, 0);
5894 
5895 			if (rc != 0) {
5896 				sbuf_printf(sb,
5897 				    " ------------ error %3u ------------", rc);
5898 				rc = 0;
5899 			} else {
5900 				sbuf_printf(sb, " %08x %08x %08x %08x",
5901 				    be32toh(ldst_cmd.u.mps.rplc127_96),
5902 				    be32toh(ldst_cmd.u.mps.rplc95_64),
5903 				    be32toh(ldst_cmd.u.mps.rplc63_32),
5904 				    be32toh(ldst_cmd.u.mps.rplc31_0));
5905 			}
5906 		} else
5907 			sbuf_printf(sb, "%36s", "");
5908 
5909 		sbuf_printf(sb, "%4u%3u%3u%3u %#3x", G_SRAM_PRIO0(cls_lo),
5910 		    G_SRAM_PRIO1(cls_lo), G_SRAM_PRIO2(cls_lo),
5911 		    G_SRAM_PRIO3(cls_lo), (cls_lo >> S_MULTILISTEN0) & 0xf);
5912 	}
5913 
5914 	if (rc)
5915 		(void) sbuf_finish(sb);
5916 	else
5917 		rc = sbuf_finish(sb);
5918 	sbuf_delete(sb);
5919 
5920 	return (rc);
5921 }
5922 
5923 static int
5924 sysctl_path_mtus(SYSCTL_HANDLER_ARGS)
5925 {
5926 	struct adapter *sc = arg1;
5927 	struct sbuf *sb;
5928 	int rc;
5929 	uint16_t mtus[NMTUS];
5930 
5931 	rc = sysctl_wire_old_buffer(req, 0);
5932 	if (rc != 0)
5933 		return (rc);
5934 
5935 	sb = sbuf_new_for_sysctl(NULL, NULL, 256, req);
5936 	if (sb == NULL)
5937 		return (ENOMEM);
5938 
5939 	t4_read_mtu_tbl(sc, mtus, NULL);
5940 
5941 	sbuf_printf(sb, "%u %u %u %u %u %u %u %u %u %u %u %u %u %u %u %u",
5942 	    mtus[0], mtus[1], mtus[2], mtus[3], mtus[4], mtus[5], mtus[6],
5943 	    mtus[7], mtus[8], mtus[9], mtus[10], mtus[11], mtus[12], mtus[13],
5944 	    mtus[14], mtus[15]);
5945 
5946 	rc = sbuf_finish(sb);
5947 	sbuf_delete(sb);
5948 
5949 	return (rc);
5950 }
5951 
5952 static int
5953 sysctl_pm_stats(SYSCTL_HANDLER_ARGS)
5954 {
5955 	struct adapter *sc = arg1;
5956 	struct sbuf *sb;
5957 	int rc, i;
5958 	uint32_t cnt[PM_NSTATS];
5959 	uint64_t cyc[PM_NSTATS];
5960 	static const char *rx_stats[] = {
5961 		"Read:", "Write bypass:", "Write mem:", "Flush:"
5962 	};
5963 	static const char *tx_stats[] = {
5964 		"Read:", "Write bypass:", "Write mem:", "Bypass + mem:"
5965 	};
5966 
5967 	rc = sysctl_wire_old_buffer(req, 0);
5968 	if (rc != 0)
5969 		return (rc);
5970 
5971 	sb = sbuf_new_for_sysctl(NULL, NULL, 256, req);
5972 	if (sb == NULL)
5973 		return (ENOMEM);
5974 
5975 	t4_pmtx_get_stats(sc, cnt, cyc);
5976 	sbuf_printf(sb, "                Tx pcmds             Tx bytes");
5977 	for (i = 0; i < ARRAY_SIZE(tx_stats); i++)
5978 		sbuf_printf(sb, "\n%-13s %10u %20ju", tx_stats[i], cnt[i],
5979 		    cyc[i]);
5980 
5981 	t4_pmrx_get_stats(sc, cnt, cyc);
5982 	sbuf_printf(sb, "\n                Rx pcmds             Rx bytes");
5983 	for (i = 0; i < ARRAY_SIZE(rx_stats); i++)
5984 		sbuf_printf(sb, "\n%-13s %10u %20ju", rx_stats[i], cnt[i],
5985 		    cyc[i]);
5986 
5987 	rc = sbuf_finish(sb);
5988 	sbuf_delete(sb);
5989 
5990 	return (rc);
5991 }
5992 
5993 static int
5994 sysctl_rdma_stats(SYSCTL_HANDLER_ARGS)
5995 {
5996 	struct adapter *sc = arg1;
5997 	struct sbuf *sb;
5998 	int rc;
5999 	struct tp_rdma_stats stats;
6000 
6001 	rc = sysctl_wire_old_buffer(req, 0);
6002 	if (rc != 0)
6003 		return (rc);
6004 
6005 	sb = sbuf_new_for_sysctl(NULL, NULL, 256, req);
6006 	if (sb == NULL)
6007 		return (ENOMEM);
6008 
6009 	t4_tp_get_rdma_stats(sc, &stats);
6010 	sbuf_printf(sb, "NoRQEModDefferals: %u\n", stats.rqe_dfr_mod);
6011 	sbuf_printf(sb, "NoRQEPktDefferals: %u", stats.rqe_dfr_pkt);
6012 
6013 	rc = sbuf_finish(sb);
6014 	sbuf_delete(sb);
6015 
6016 	return (rc);
6017 }
6018 
6019 static int
6020 sysctl_tcp_stats(SYSCTL_HANDLER_ARGS)
6021 {
6022 	struct adapter *sc = arg1;
6023 	struct sbuf *sb;
6024 	int rc;
6025 	struct tp_tcp_stats v4, v6;
6026 
6027 	rc = sysctl_wire_old_buffer(req, 0);
6028 	if (rc != 0)
6029 		return (rc);
6030 
6031 	sb = sbuf_new_for_sysctl(NULL, NULL, 256, req);
6032 	if (sb == NULL)
6033 		return (ENOMEM);
6034 
6035 	t4_tp_get_tcp_stats(sc, &v4, &v6);
6036 	sbuf_printf(sb,
6037 	    "                                IP                 IPv6\n");
6038 	sbuf_printf(sb, "OutRsts:      %20u %20u\n",
6039 	    v4.tcpOutRsts, v6.tcpOutRsts);
6040 	sbuf_printf(sb, "InSegs:       %20ju %20ju\n",
6041 	    v4.tcpInSegs, v6.tcpInSegs);
6042 	sbuf_printf(sb, "OutSegs:      %20ju %20ju\n",
6043 	    v4.tcpOutSegs, v6.tcpOutSegs);
6044 	sbuf_printf(sb, "RetransSegs:  %20ju %20ju",
6045 	    v4.tcpRetransSegs, v6.tcpRetransSegs);
6046 
6047 	rc = sbuf_finish(sb);
6048 	sbuf_delete(sb);
6049 
6050 	return (rc);
6051 }
6052 
6053 static int
6054 sysctl_tids(SYSCTL_HANDLER_ARGS)
6055 {
6056 	struct adapter *sc = arg1;
6057 	struct sbuf *sb;
6058 	int rc;
6059 	struct tid_info *t = &sc->tids;
6060 
6061 	rc = sysctl_wire_old_buffer(req, 0);
6062 	if (rc != 0)
6063 		return (rc);
6064 
6065 	sb = sbuf_new_for_sysctl(NULL, NULL, 256, req);
6066 	if (sb == NULL)
6067 		return (ENOMEM);
6068 
6069 	if (t->natids) {
6070 		sbuf_printf(sb, "ATID range: 0-%u, in use: %u\n", t->natids - 1,
6071 		    t->atids_in_use);
6072 	}
6073 
6074 	if (t->ntids) {
6075 		if (t4_read_reg(sc, A_LE_DB_CONFIG) & F_HASHEN) {
6076 			uint32_t b = t4_read_reg(sc, A_LE_DB_SERVER_INDEX) / 4;
6077 
6078 			if (b) {
6079 				sbuf_printf(sb, "TID range: 0-%u, %u-%u", b - 1,
6080 				    t4_read_reg(sc, A_LE_DB_TID_HASHBASE) / 4,
6081 				    t->ntids - 1);
6082 			} else {
6083 				sbuf_printf(sb, "TID range: %u-%u",
6084 				    t4_read_reg(sc, A_LE_DB_TID_HASHBASE) / 4,
6085 				    t->ntids - 1);
6086 			}
6087 		} else
6088 			sbuf_printf(sb, "TID range: 0-%u", t->ntids - 1);
6089 		sbuf_printf(sb, ", in use: %u\n",
6090 		    atomic_load_acq_int(&t->tids_in_use));
6091 	}
6092 
6093 	if (t->nstids) {
6094 		sbuf_printf(sb, "STID range: %u-%u, in use: %u\n", t->stid_base,
6095 		    t->stid_base + t->nstids - 1, t->stids_in_use);
6096 	}
6097 
6098 	if (t->nftids) {
6099 		sbuf_printf(sb, "FTID range: %u-%u\n", t->ftid_base,
6100 		    t->ftid_base + t->nftids - 1);
6101 	}
6102 
6103 	sbuf_printf(sb, "HW TID usage: %u IP users, %u IPv6 users",
6104 	    t4_read_reg(sc, A_LE_DB_ACT_CNT_IPV4),
6105 	    t4_read_reg(sc, A_LE_DB_ACT_CNT_IPV6));
6106 
6107 	rc = sbuf_finish(sb);
6108 	sbuf_delete(sb);
6109 
6110 	return (rc);
6111 }
6112 
6113 static int
6114 sysctl_tp_err_stats(SYSCTL_HANDLER_ARGS)
6115 {
6116 	struct adapter *sc = arg1;
6117 	struct sbuf *sb;
6118 	int rc;
6119 	struct tp_err_stats stats;
6120 
6121 	rc = sysctl_wire_old_buffer(req, 0);
6122 	if (rc != 0)
6123 		return (rc);
6124 
6125 	sb = sbuf_new_for_sysctl(NULL, NULL, 256, req);
6126 	if (sb == NULL)
6127 		return (ENOMEM);
6128 
6129 	t4_tp_get_err_stats(sc, &stats);
6130 
6131 	sbuf_printf(sb, "                 channel 0  channel 1  channel 2  "
6132 		      "channel 3\n");
6133 	sbuf_printf(sb, "macInErrs:      %10u %10u %10u %10u\n",
6134 	    stats.macInErrs[0], stats.macInErrs[1], stats.macInErrs[2],
6135 	    stats.macInErrs[3]);
6136 	sbuf_printf(sb, "hdrInErrs:      %10u %10u %10u %10u\n",
6137 	    stats.hdrInErrs[0], stats.hdrInErrs[1], stats.hdrInErrs[2],
6138 	    stats.hdrInErrs[3]);
6139 	sbuf_printf(sb, "tcpInErrs:      %10u %10u %10u %10u\n",
6140 	    stats.tcpInErrs[0], stats.tcpInErrs[1], stats.tcpInErrs[2],
6141 	    stats.tcpInErrs[3]);
6142 	sbuf_printf(sb, "tcp6InErrs:     %10u %10u %10u %10u\n",
6143 	    stats.tcp6InErrs[0], stats.tcp6InErrs[1], stats.tcp6InErrs[2],
6144 	    stats.tcp6InErrs[3]);
6145 	sbuf_printf(sb, "tnlCongDrops:   %10u %10u %10u %10u\n",
6146 	    stats.tnlCongDrops[0], stats.tnlCongDrops[1], stats.tnlCongDrops[2],
6147 	    stats.tnlCongDrops[3]);
6148 	sbuf_printf(sb, "tnlTxDrops:     %10u %10u %10u %10u\n",
6149 	    stats.tnlTxDrops[0], stats.tnlTxDrops[1], stats.tnlTxDrops[2],
6150 	    stats.tnlTxDrops[3]);
6151 	sbuf_printf(sb, "ofldVlanDrops:  %10u %10u %10u %10u\n",
6152 	    stats.ofldVlanDrops[0], stats.ofldVlanDrops[1],
6153 	    stats.ofldVlanDrops[2], stats.ofldVlanDrops[3]);
6154 	sbuf_printf(sb, "ofldChanDrops:  %10u %10u %10u %10u\n\n",
6155 	    stats.ofldChanDrops[0], stats.ofldChanDrops[1],
6156 	    stats.ofldChanDrops[2], stats.ofldChanDrops[3]);
6157 	sbuf_printf(sb, "ofldNoNeigh:    %u\nofldCongDefer:  %u",
6158 	    stats.ofldNoNeigh, stats.ofldCongDefer);
6159 
6160 	rc = sbuf_finish(sb);
6161 	sbuf_delete(sb);
6162 
6163 	return (rc);
6164 }
6165 
6166 struct field_desc {
6167 	const char *name;
6168 	u_int start;
6169 	u_int width;
6170 };
6171 
6172 static void
6173 field_desc_show(struct sbuf *sb, uint64_t v, const struct field_desc *f)
6174 {
6175 	char buf[32];
6176 	int line_size = 0;
6177 
6178 	while (f->name) {
6179 		uint64_t mask = (1ULL << f->width) - 1;
6180 		int len = snprintf(buf, sizeof(buf), "%s: %ju", f->name,
6181 		    ((uintmax_t)v >> f->start) & mask);
6182 
6183 		if (line_size + len >= 79) {
6184 			line_size = 8;
6185 			sbuf_printf(sb, "\n        ");
6186 		}
6187 		sbuf_printf(sb, "%s ", buf);
6188 		line_size += len + 1;
6189 		f++;
6190 	}
6191 	sbuf_printf(sb, "\n");
6192 }
6193 
6194 static struct field_desc tp_la0[] = {
6195 	{ "RcfOpCodeOut", 60, 4 },
6196 	{ "State", 56, 4 },
6197 	{ "WcfState", 52, 4 },
6198 	{ "RcfOpcSrcOut", 50, 2 },
6199 	{ "CRxError", 49, 1 },
6200 	{ "ERxError", 48, 1 },
6201 	{ "SanityFailed", 47, 1 },
6202 	{ "SpuriousMsg", 46, 1 },
6203 	{ "FlushInputMsg", 45, 1 },
6204 	{ "FlushInputCpl", 44, 1 },
6205 	{ "RssUpBit", 43, 1 },
6206 	{ "RssFilterHit", 42, 1 },
6207 	{ "Tid", 32, 10 },
6208 	{ "InitTcb", 31, 1 },
6209 	{ "LineNumber", 24, 7 },
6210 	{ "Emsg", 23, 1 },
6211 	{ "EdataOut", 22, 1 },
6212 	{ "Cmsg", 21, 1 },
6213 	{ "CdataOut", 20, 1 },
6214 	{ "EreadPdu", 19, 1 },
6215 	{ "CreadPdu", 18, 1 },
6216 	{ "TunnelPkt", 17, 1 },
6217 	{ "RcfPeerFin", 16, 1 },
6218 	{ "RcfReasonOut", 12, 4 },
6219 	{ "TxCchannel", 10, 2 },
6220 	{ "RcfTxChannel", 8, 2 },
6221 	{ "RxEchannel", 6, 2 },
6222 	{ "RcfRxChannel", 5, 1 },
6223 	{ "RcfDataOutSrdy", 4, 1 },
6224 	{ "RxDvld", 3, 1 },
6225 	{ "RxOoDvld", 2, 1 },
6226 	{ "RxCongestion", 1, 1 },
6227 	{ "TxCongestion", 0, 1 },
6228 	{ NULL }
6229 };
6230 
6231 static struct field_desc tp_la1[] = {
6232 	{ "CplCmdIn", 56, 8 },
6233 	{ "CplCmdOut", 48, 8 },
6234 	{ "ESynOut", 47, 1 },
6235 	{ "EAckOut", 46, 1 },
6236 	{ "EFinOut", 45, 1 },
6237 	{ "ERstOut", 44, 1 },
6238 	{ "SynIn", 43, 1 },
6239 	{ "AckIn", 42, 1 },
6240 	{ "FinIn", 41, 1 },
6241 	{ "RstIn", 40, 1 },
6242 	{ "DataIn", 39, 1 },
6243 	{ "DataInVld", 38, 1 },
6244 	{ "PadIn", 37, 1 },
6245 	{ "RxBufEmpty", 36, 1 },
6246 	{ "RxDdp", 35, 1 },
6247 	{ "RxFbCongestion", 34, 1 },
6248 	{ "TxFbCongestion", 33, 1 },
6249 	{ "TxPktSumSrdy", 32, 1 },
6250 	{ "RcfUlpType", 28, 4 },
6251 	{ "Eread", 27, 1 },
6252 	{ "Ebypass", 26, 1 },
6253 	{ "Esave", 25, 1 },
6254 	{ "Static0", 24, 1 },
6255 	{ "Cread", 23, 1 },
6256 	{ "Cbypass", 22, 1 },
6257 	{ "Csave", 21, 1 },
6258 	{ "CPktOut", 20, 1 },
6259 	{ "RxPagePoolFull", 18, 2 },
6260 	{ "RxLpbkPkt", 17, 1 },
6261 	{ "TxLpbkPkt", 16, 1 },
6262 	{ "RxVfValid", 15, 1 },
6263 	{ "SynLearned", 14, 1 },
6264 	{ "SetDelEntry", 13, 1 },
6265 	{ "SetInvEntry", 12, 1 },
6266 	{ "CpcmdDvld", 11, 1 },
6267 	{ "CpcmdSave", 10, 1 },
6268 	{ "RxPstructsFull", 8, 2 },
6269 	{ "EpcmdDvld", 7, 1 },
6270 	{ "EpcmdFlush", 6, 1 },
6271 	{ "EpcmdTrimPrefix", 5, 1 },
6272 	{ "EpcmdTrimPostfix", 4, 1 },
6273 	{ "ERssIp4Pkt", 3, 1 },
6274 	{ "ERssIp6Pkt", 2, 1 },
6275 	{ "ERssTcpUdpPkt", 1, 1 },
6276 	{ "ERssFceFipPkt", 0, 1 },
6277 	{ NULL }
6278 };
6279 
6280 static struct field_desc tp_la2[] = {
6281 	{ "CplCmdIn", 56, 8 },
6282 	{ "MpsVfVld", 55, 1 },
6283 	{ "MpsPf", 52, 3 },
6284 	{ "MpsVf", 44, 8 },
6285 	{ "SynIn", 43, 1 },
6286 	{ "AckIn", 42, 1 },
6287 	{ "FinIn", 41, 1 },
6288 	{ "RstIn", 40, 1 },
6289 	{ "DataIn", 39, 1 },
6290 	{ "DataInVld", 38, 1 },
6291 	{ "PadIn", 37, 1 },
6292 	{ "RxBufEmpty", 36, 1 },
6293 	{ "RxDdp", 35, 1 },
6294 	{ "RxFbCongestion", 34, 1 },
6295 	{ "TxFbCongestion", 33, 1 },
6296 	{ "TxPktSumSrdy", 32, 1 },
6297 	{ "RcfUlpType", 28, 4 },
6298 	{ "Eread", 27, 1 },
6299 	{ "Ebypass", 26, 1 },
6300 	{ "Esave", 25, 1 },
6301 	{ "Static0", 24, 1 },
6302 	{ "Cread", 23, 1 },
6303 	{ "Cbypass", 22, 1 },
6304 	{ "Csave", 21, 1 },
6305 	{ "CPktOut", 20, 1 },
6306 	{ "RxPagePoolFull", 18, 2 },
6307 	{ "RxLpbkPkt", 17, 1 },
6308 	{ "TxLpbkPkt", 16, 1 },
6309 	{ "RxVfValid", 15, 1 },
6310 	{ "SynLearned", 14, 1 },
6311 	{ "SetDelEntry", 13, 1 },
6312 	{ "SetInvEntry", 12, 1 },
6313 	{ "CpcmdDvld", 11, 1 },
6314 	{ "CpcmdSave", 10, 1 },
6315 	{ "RxPstructsFull", 8, 2 },
6316 	{ "EpcmdDvld", 7, 1 },
6317 	{ "EpcmdFlush", 6, 1 },
6318 	{ "EpcmdTrimPrefix", 5, 1 },
6319 	{ "EpcmdTrimPostfix", 4, 1 },
6320 	{ "ERssIp4Pkt", 3, 1 },
6321 	{ "ERssIp6Pkt", 2, 1 },
6322 	{ "ERssTcpUdpPkt", 1, 1 },
6323 	{ "ERssFceFipPkt", 0, 1 },
6324 	{ NULL }
6325 };
6326 
6327 static void
6328 tp_la_show(struct sbuf *sb, uint64_t *p, int idx)
6329 {
6330 
6331 	field_desc_show(sb, *p, tp_la0);
6332 }
6333 
6334 static void
6335 tp_la_show2(struct sbuf *sb, uint64_t *p, int idx)
6336 {
6337 
6338 	if (idx)
6339 		sbuf_printf(sb, "\n");
6340 	field_desc_show(sb, p[0], tp_la0);
6341 	if (idx < (TPLA_SIZE / 2 - 1) || p[1] != ~0ULL)
6342 		field_desc_show(sb, p[1], tp_la0);
6343 }
6344 
6345 static void
6346 tp_la_show3(struct sbuf *sb, uint64_t *p, int idx)
6347 {
6348 
6349 	if (idx)
6350 		sbuf_printf(sb, "\n");
6351 	field_desc_show(sb, p[0], tp_la0);
6352 	if (idx < (TPLA_SIZE / 2 - 1) || p[1] != ~0ULL)
6353 		field_desc_show(sb, p[1], (p[0] & (1 << 17)) ? tp_la2 : tp_la1);
6354 }
6355 
6356 static int
6357 sysctl_tp_la(SYSCTL_HANDLER_ARGS)
6358 {
6359 	struct adapter *sc = arg1;
6360 	struct sbuf *sb;
6361 	uint64_t *buf, *p;
6362 	int rc;
6363 	u_int i, inc;
6364 	void (*show_func)(struct sbuf *, uint64_t *, int);
6365 
6366 	rc = sysctl_wire_old_buffer(req, 0);
6367 	if (rc != 0)
6368 		return (rc);
6369 
6370 	sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req);
6371 	if (sb == NULL)
6372 		return (ENOMEM);
6373 
6374 	buf = malloc(TPLA_SIZE * sizeof(uint64_t), M_CXGBE, M_ZERO | M_WAITOK);
6375 
6376 	t4_tp_read_la(sc, buf, NULL);
6377 	p = buf;
6378 
6379 	switch (G_DBGLAMODE(t4_read_reg(sc, A_TP_DBG_LA_CONFIG))) {
6380 	case 2:
6381 		inc = 2;
6382 		show_func = tp_la_show2;
6383 		break;
6384 	case 3:
6385 		inc = 2;
6386 		show_func = tp_la_show3;
6387 		break;
6388 	default:
6389 		inc = 1;
6390 		show_func = tp_la_show;
6391 	}
6392 
6393 	for (i = 0; i < TPLA_SIZE / inc; i++, p += inc)
6394 		(*show_func)(sb, p, i);
6395 
6396 	rc = sbuf_finish(sb);
6397 	sbuf_delete(sb);
6398 	free(buf, M_CXGBE);
6399 	return (rc);
6400 }
6401 
6402 static int
6403 sysctl_tx_rate(SYSCTL_HANDLER_ARGS)
6404 {
6405 	struct adapter *sc = arg1;
6406 	struct sbuf *sb;
6407 	int rc;
6408 	u64 nrate[NCHAN], orate[NCHAN];
6409 
6410 	rc = sysctl_wire_old_buffer(req, 0);
6411 	if (rc != 0)
6412 		return (rc);
6413 
6414 	sb = sbuf_new_for_sysctl(NULL, NULL, 256, req);
6415 	if (sb == NULL)
6416 		return (ENOMEM);
6417 
6418 	t4_get_chan_txrate(sc, nrate, orate);
6419 	sbuf_printf(sb, "              channel 0   channel 1   channel 2   "
6420 		 "channel 3\n");
6421 	sbuf_printf(sb, "NIC B/s:     %10ju  %10ju  %10ju  %10ju\n",
6422 	    nrate[0], nrate[1], nrate[2], nrate[3]);
6423 	sbuf_printf(sb, "Offload B/s: %10ju  %10ju  %10ju  %10ju",
6424 	    orate[0], orate[1], orate[2], orate[3]);
6425 
6426 	rc = sbuf_finish(sb);
6427 	sbuf_delete(sb);
6428 
6429 	return (rc);
6430 }
6431 
6432 static int
6433 sysctl_ulprx_la(SYSCTL_HANDLER_ARGS)
6434 {
6435 	struct adapter *sc = arg1;
6436 	struct sbuf *sb;
6437 	uint32_t *buf, *p;
6438 	int rc, i;
6439 
6440 	rc = sysctl_wire_old_buffer(req, 0);
6441 	if (rc != 0)
6442 		return (rc);
6443 
6444 	sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req);
6445 	if (sb == NULL)
6446 		return (ENOMEM);
6447 
6448 	buf = malloc(ULPRX_LA_SIZE * 8 * sizeof(uint32_t), M_CXGBE,
6449 	    M_ZERO | M_WAITOK);
6450 
6451 	t4_ulprx_read_la(sc, buf);
6452 	p = buf;
6453 
6454 	sbuf_printf(sb, "      Pcmd        Type   Message"
6455 	    "                Data");
6456 	for (i = 0; i < ULPRX_LA_SIZE; i++, p += 8) {
6457 		sbuf_printf(sb, "\n%08x%08x  %4x  %08x  %08x%08x%08x%08x",
6458 		    p[1], p[0], p[2], p[3], p[7], p[6], p[5], p[4]);
6459 	}
6460 
6461 	rc = sbuf_finish(sb);
6462 	sbuf_delete(sb);
6463 	free(buf, M_CXGBE);
6464 	return (rc);
6465 }
6466 
6467 static int
6468 sysctl_wcwr_stats(SYSCTL_HANDLER_ARGS)
6469 {
6470 	struct adapter *sc = arg1;
6471 	struct sbuf *sb;
6472 	int rc, v;
6473 
6474 	rc = sysctl_wire_old_buffer(req, 0);
6475 	if (rc != 0)
6476 		return (rc);
6477 
6478 	sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req);
6479 	if (sb == NULL)
6480 		return (ENOMEM);
6481 
6482 	v = t4_read_reg(sc, A_SGE_STAT_CFG);
6483 	if (G_STATSOURCE_T5(v) == 7) {
6484 		if (G_STATMODE(v) == 0) {
6485 			sbuf_printf(sb, "total %d, incomplete %d",
6486 			    t4_read_reg(sc, A_SGE_STAT_TOTAL),
6487 			    t4_read_reg(sc, A_SGE_STAT_MATCH));
6488 		} else if (G_STATMODE(v) == 1) {
6489 			sbuf_printf(sb, "total %d, data overflow %d",
6490 			    t4_read_reg(sc, A_SGE_STAT_TOTAL),
6491 			    t4_read_reg(sc, A_SGE_STAT_MATCH));
6492 		}
6493 	}
6494 	rc = sbuf_finish(sb);
6495 	sbuf_delete(sb);
6496 
6497 	return (rc);
6498 }
6499 #endif
6500 
6501 static inline void
6502 txq_start(struct ifnet *ifp, struct sge_txq *txq)
6503 {
6504 	struct buf_ring *br;
6505 	struct mbuf *m;
6506 
6507 	TXQ_LOCK_ASSERT_OWNED(txq);
6508 
6509 	br = txq->br;
6510 	m = txq->m ? txq->m : drbr_dequeue(ifp, br);
6511 	if (m)
6512 		t4_eth_tx(ifp, txq, m);
6513 }
6514 
6515 void
6516 t4_tx_callout(void *arg)
6517 {
6518 	struct sge_eq *eq = arg;
6519 	struct adapter *sc;
6520 
6521 	if (EQ_TRYLOCK(eq) == 0)
6522 		goto reschedule;
6523 
6524 	if (eq->flags & EQ_STALLED && !can_resume_tx(eq)) {
6525 		EQ_UNLOCK(eq);
6526 reschedule:
6527 		if (__predict_true(!(eq->flags && EQ_DOOMED)))
6528 			callout_schedule(&eq->tx_callout, 1);
6529 		return;
6530 	}
6531 
6532 	EQ_LOCK_ASSERT_OWNED(eq);
6533 
6534 	if (__predict_true((eq->flags & EQ_DOOMED) == 0)) {
6535 
6536 		if ((eq->flags & EQ_TYPEMASK) == EQ_ETH) {
6537 			struct sge_txq *txq = arg;
6538 			struct port_info *pi = txq->ifp->if_softc;
6539 
6540 			sc = pi->adapter;
6541 		} else {
6542 			struct sge_wrq *wrq = arg;
6543 
6544 			sc = wrq->adapter;
6545 		}
6546 
6547 		taskqueue_enqueue(sc->tq[eq->tx_chan], &eq->tx_task);
6548 	}
6549 
6550 	EQ_UNLOCK(eq);
6551 }
6552 
6553 void
6554 t4_tx_task(void *arg, int count)
6555 {
6556 	struct sge_eq *eq = arg;
6557 
6558 	EQ_LOCK(eq);
6559 	if ((eq->flags & EQ_TYPEMASK) == EQ_ETH) {
6560 		struct sge_txq *txq = arg;
6561 		txq_start(txq->ifp, txq);
6562 	} else {
6563 		struct sge_wrq *wrq = arg;
6564 		t4_wrq_tx_locked(wrq->adapter, wrq, NULL);
6565 	}
6566 	EQ_UNLOCK(eq);
6567 }
6568 
6569 static uint32_t
6570 fconf_to_mode(uint32_t fconf)
6571 {
6572 	uint32_t mode;
6573 
6574 	mode = T4_FILTER_IPv4 | T4_FILTER_IPv6 | T4_FILTER_IP_SADDR |
6575 	    T4_FILTER_IP_DADDR | T4_FILTER_IP_SPORT | T4_FILTER_IP_DPORT;
6576 
6577 	if (fconf & F_FRAGMENTATION)
6578 		mode |= T4_FILTER_IP_FRAGMENT;
6579 
6580 	if (fconf & F_MPSHITTYPE)
6581 		mode |= T4_FILTER_MPS_HIT_TYPE;
6582 
6583 	if (fconf & F_MACMATCH)
6584 		mode |= T4_FILTER_MAC_IDX;
6585 
6586 	if (fconf & F_ETHERTYPE)
6587 		mode |= T4_FILTER_ETH_TYPE;
6588 
6589 	if (fconf & F_PROTOCOL)
6590 		mode |= T4_FILTER_IP_PROTO;
6591 
6592 	if (fconf & F_TOS)
6593 		mode |= T4_FILTER_IP_TOS;
6594 
6595 	if (fconf & F_VLAN)
6596 		mode |= T4_FILTER_VLAN;
6597 
6598 	if (fconf & F_VNIC_ID)
6599 		mode |= T4_FILTER_VNIC;
6600 
6601 	if (fconf & F_PORT)
6602 		mode |= T4_FILTER_PORT;
6603 
6604 	if (fconf & F_FCOE)
6605 		mode |= T4_FILTER_FCoE;
6606 
6607 	return (mode);
6608 }
6609 
6610 static uint32_t
6611 mode_to_fconf(uint32_t mode)
6612 {
6613 	uint32_t fconf = 0;
6614 
6615 	if (mode & T4_FILTER_IP_FRAGMENT)
6616 		fconf |= F_FRAGMENTATION;
6617 
6618 	if (mode & T4_FILTER_MPS_HIT_TYPE)
6619 		fconf |= F_MPSHITTYPE;
6620 
6621 	if (mode & T4_FILTER_MAC_IDX)
6622 		fconf |= F_MACMATCH;
6623 
6624 	if (mode & T4_FILTER_ETH_TYPE)
6625 		fconf |= F_ETHERTYPE;
6626 
6627 	if (mode & T4_FILTER_IP_PROTO)
6628 		fconf |= F_PROTOCOL;
6629 
6630 	if (mode & T4_FILTER_IP_TOS)
6631 		fconf |= F_TOS;
6632 
6633 	if (mode & T4_FILTER_VLAN)
6634 		fconf |= F_VLAN;
6635 
6636 	if (mode & T4_FILTER_VNIC)
6637 		fconf |= F_VNIC_ID;
6638 
6639 	if (mode & T4_FILTER_PORT)
6640 		fconf |= F_PORT;
6641 
6642 	if (mode & T4_FILTER_FCoE)
6643 		fconf |= F_FCOE;
6644 
6645 	return (fconf);
6646 }
6647 
6648 static uint32_t
6649 fspec_to_fconf(struct t4_filter_specification *fs)
6650 {
6651 	uint32_t fconf = 0;
6652 
6653 	if (fs->val.frag || fs->mask.frag)
6654 		fconf |= F_FRAGMENTATION;
6655 
6656 	if (fs->val.matchtype || fs->mask.matchtype)
6657 		fconf |= F_MPSHITTYPE;
6658 
6659 	if (fs->val.macidx || fs->mask.macidx)
6660 		fconf |= F_MACMATCH;
6661 
6662 	if (fs->val.ethtype || fs->mask.ethtype)
6663 		fconf |= F_ETHERTYPE;
6664 
6665 	if (fs->val.proto || fs->mask.proto)
6666 		fconf |= F_PROTOCOL;
6667 
6668 	if (fs->val.tos || fs->mask.tos)
6669 		fconf |= F_TOS;
6670 
6671 	if (fs->val.vlan_vld || fs->mask.vlan_vld)
6672 		fconf |= F_VLAN;
6673 
6674 	if (fs->val.vnic_vld || fs->mask.vnic_vld)
6675 		fconf |= F_VNIC_ID;
6676 
6677 	if (fs->val.iport || fs->mask.iport)
6678 		fconf |= F_PORT;
6679 
6680 	if (fs->val.fcoe || fs->mask.fcoe)
6681 		fconf |= F_FCOE;
6682 
6683 	return (fconf);
6684 }
6685 
6686 static int
6687 get_filter_mode(struct adapter *sc, uint32_t *mode)
6688 {
6689 	int rc;
6690 	uint32_t fconf;
6691 
6692 	rc = begin_synchronized_op(sc, NULL, HOLD_LOCK | SLEEP_OK | INTR_OK,
6693 	    "t4getfm");
6694 	if (rc)
6695 		return (rc);
6696 
6697 	t4_read_indirect(sc, A_TP_PIO_ADDR, A_TP_PIO_DATA, &fconf, 1,
6698 	    A_TP_VLAN_PRI_MAP);
6699 
6700 	if (sc->params.tp.vlan_pri_map != fconf) {
6701 		log(LOG_WARNING, "%s: cached filter mode out of sync %x %x.\n",
6702 		    device_get_nameunit(sc->dev), sc->params.tp.vlan_pri_map,
6703 		    fconf);
6704 		sc->params.tp.vlan_pri_map = fconf;
6705 	}
6706 
6707 	*mode = fconf_to_mode(sc->params.tp.vlan_pri_map);
6708 
6709 	end_synchronized_op(sc, LOCK_HELD);
6710 	return (0);
6711 }
6712 
6713 static int
6714 set_filter_mode(struct adapter *sc, uint32_t mode)
6715 {
6716 	uint32_t fconf;
6717 	int rc;
6718 
6719 	fconf = mode_to_fconf(mode);
6720 
6721 	rc = begin_synchronized_op(sc, NULL, HOLD_LOCK | SLEEP_OK | INTR_OK,
6722 	    "t4setfm");
6723 	if (rc)
6724 		return (rc);
6725 
6726 	if (sc->tids.ftids_in_use > 0) {
6727 		rc = EBUSY;
6728 		goto done;
6729 	}
6730 
6731 #ifdef TCP_OFFLOAD
6732 	if (sc->offload_map) {
6733 		rc = EBUSY;
6734 		goto done;
6735 	}
6736 #endif
6737 
6738 #ifdef notyet
6739 	rc = -t4_set_filter_mode(sc, fconf);
6740 	if (rc == 0)
6741 		sc->filter_mode = fconf;
6742 #else
6743 	rc = ENOTSUP;
6744 #endif
6745 
6746 done:
6747 	end_synchronized_op(sc, LOCK_HELD);
6748 	return (rc);
6749 }
6750 
6751 static inline uint64_t
6752 get_filter_hits(struct adapter *sc, uint32_t fid)
6753 {
6754 	uint32_t mw_base, off, tcb_base = t4_read_reg(sc, A_TP_CMM_TCB_BASE);
6755 	uint64_t hits;
6756 
6757 	memwin_info(sc, 0, &mw_base, NULL);
6758 	off = position_memwin(sc, 0,
6759 	    tcb_base + (fid + sc->tids.ftid_base) * TCB_SIZE);
6760 	if (is_t4(sc)) {
6761 		hits = t4_read_reg64(sc, mw_base + off + 16);
6762 		hits = be64toh(hits);
6763 	} else {
6764 		hits = t4_read_reg(sc, mw_base + off + 24);
6765 		hits = be32toh(hits);
6766 	}
6767 
6768 	return (hits);
6769 }
6770 
6771 static int
6772 get_filter(struct adapter *sc, struct t4_filter *t)
6773 {
6774 	int i, rc, nfilters = sc->tids.nftids;
6775 	struct filter_entry *f;
6776 
6777 	rc = begin_synchronized_op(sc, NULL, HOLD_LOCK | SLEEP_OK | INTR_OK,
6778 	    "t4getf");
6779 	if (rc)
6780 		return (rc);
6781 
6782 	if (sc->tids.ftids_in_use == 0 || sc->tids.ftid_tab == NULL ||
6783 	    t->idx >= nfilters) {
6784 		t->idx = 0xffffffff;
6785 		goto done;
6786 	}
6787 
6788 	f = &sc->tids.ftid_tab[t->idx];
6789 	for (i = t->idx; i < nfilters; i++, f++) {
6790 		if (f->valid) {
6791 			t->idx = i;
6792 			t->l2tidx = f->l2t ? f->l2t->idx : 0;
6793 			t->smtidx = f->smtidx;
6794 			if (f->fs.hitcnts)
6795 				t->hits = get_filter_hits(sc, t->idx);
6796 			else
6797 				t->hits = UINT64_MAX;
6798 			t->fs = f->fs;
6799 
6800 			goto done;
6801 		}
6802 	}
6803 
6804 	t->idx = 0xffffffff;
6805 done:
6806 	end_synchronized_op(sc, LOCK_HELD);
6807 	return (0);
6808 }
6809 
6810 static int
6811 set_filter(struct adapter *sc, struct t4_filter *t)
6812 {
6813 	unsigned int nfilters, nports;
6814 	struct filter_entry *f;
6815 	int i, rc;
6816 
6817 	rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4setf");
6818 	if (rc)
6819 		return (rc);
6820 
6821 	nfilters = sc->tids.nftids;
6822 	nports = sc->params.nports;
6823 
6824 	if (nfilters == 0) {
6825 		rc = ENOTSUP;
6826 		goto done;
6827 	}
6828 
6829 	if (!(sc->flags & FULL_INIT_DONE)) {
6830 		rc = EAGAIN;
6831 		goto done;
6832 	}
6833 
6834 	if (t->idx >= nfilters) {
6835 		rc = EINVAL;
6836 		goto done;
6837 	}
6838 
6839 	/* Validate against the global filter mode */
6840 	if ((sc->params.tp.vlan_pri_map | fspec_to_fconf(&t->fs)) !=
6841 	    sc->params.tp.vlan_pri_map) {
6842 		rc = E2BIG;
6843 		goto done;
6844 	}
6845 
6846 	if (t->fs.action == FILTER_SWITCH && t->fs.eport >= nports) {
6847 		rc = EINVAL;
6848 		goto done;
6849 	}
6850 
6851 	if (t->fs.val.iport >= nports) {
6852 		rc = EINVAL;
6853 		goto done;
6854 	}
6855 
6856 	/* Can't specify an iq if not steering to it */
6857 	if (!t->fs.dirsteer && t->fs.iq) {
6858 		rc = EINVAL;
6859 		goto done;
6860 	}
6861 
6862 	/* IPv6 filter idx must be 4 aligned */
6863 	if (t->fs.type == 1 &&
6864 	    ((t->idx & 0x3) || t->idx + 4 >= nfilters)) {
6865 		rc = EINVAL;
6866 		goto done;
6867 	}
6868 
6869 	if (sc->tids.ftid_tab == NULL) {
6870 		KASSERT(sc->tids.ftids_in_use == 0,
6871 		    ("%s: no memory allocated but filters_in_use > 0",
6872 		    __func__));
6873 
6874 		sc->tids.ftid_tab = malloc(sizeof (struct filter_entry) *
6875 		    nfilters, M_CXGBE, M_NOWAIT | M_ZERO);
6876 		if (sc->tids.ftid_tab == NULL) {
6877 			rc = ENOMEM;
6878 			goto done;
6879 		}
6880 		mtx_init(&sc->tids.ftid_lock, "T4 filters", 0, MTX_DEF);
6881 	}
6882 
6883 	for (i = 0; i < 4; i++) {
6884 		f = &sc->tids.ftid_tab[t->idx + i];
6885 
6886 		if (f->pending || f->valid) {
6887 			rc = EBUSY;
6888 			goto done;
6889 		}
6890 		if (f->locked) {
6891 			rc = EPERM;
6892 			goto done;
6893 		}
6894 
6895 		if (t->fs.type == 0)
6896 			break;
6897 	}
6898 
6899 	f = &sc->tids.ftid_tab[t->idx];
6900 	f->fs = t->fs;
6901 
6902 	rc = set_filter_wr(sc, t->idx);
6903 done:
6904 	end_synchronized_op(sc, 0);
6905 
6906 	if (rc == 0) {
6907 		mtx_lock(&sc->tids.ftid_lock);
6908 		for (;;) {
6909 			if (f->pending == 0) {
6910 				rc = f->valid ? 0 : EIO;
6911 				break;
6912 			}
6913 
6914 			if (mtx_sleep(&sc->tids.ftid_tab, &sc->tids.ftid_lock,
6915 			    PCATCH, "t4setfw", 0)) {
6916 				rc = EINPROGRESS;
6917 				break;
6918 			}
6919 		}
6920 		mtx_unlock(&sc->tids.ftid_lock);
6921 	}
6922 	return (rc);
6923 }
6924 
6925 static int
6926 del_filter(struct adapter *sc, struct t4_filter *t)
6927 {
6928 	unsigned int nfilters;
6929 	struct filter_entry *f;
6930 	int rc;
6931 
6932 	rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4delf");
6933 	if (rc)
6934 		return (rc);
6935 
6936 	nfilters = sc->tids.nftids;
6937 
6938 	if (nfilters == 0) {
6939 		rc = ENOTSUP;
6940 		goto done;
6941 	}
6942 
6943 	if (sc->tids.ftid_tab == NULL || sc->tids.ftids_in_use == 0 ||
6944 	    t->idx >= nfilters) {
6945 		rc = EINVAL;
6946 		goto done;
6947 	}
6948 
6949 	if (!(sc->flags & FULL_INIT_DONE)) {
6950 		rc = EAGAIN;
6951 		goto done;
6952 	}
6953 
6954 	f = &sc->tids.ftid_tab[t->idx];
6955 
6956 	if (f->pending) {
6957 		rc = EBUSY;
6958 		goto done;
6959 	}
6960 	if (f->locked) {
6961 		rc = EPERM;
6962 		goto done;
6963 	}
6964 
6965 	if (f->valid) {
6966 		t->fs = f->fs;	/* extra info for the caller */
6967 		rc = del_filter_wr(sc, t->idx);
6968 	}
6969 
6970 done:
6971 	end_synchronized_op(sc, 0);
6972 
6973 	if (rc == 0) {
6974 		mtx_lock(&sc->tids.ftid_lock);
6975 		for (;;) {
6976 			if (f->pending == 0) {
6977 				rc = f->valid ? EIO : 0;
6978 				break;
6979 			}
6980 
6981 			if (mtx_sleep(&sc->tids.ftid_tab, &sc->tids.ftid_lock,
6982 			    PCATCH, "t4delfw", 0)) {
6983 				rc = EINPROGRESS;
6984 				break;
6985 			}
6986 		}
6987 		mtx_unlock(&sc->tids.ftid_lock);
6988 	}
6989 
6990 	return (rc);
6991 }
6992 
6993 static void
6994 clear_filter(struct filter_entry *f)
6995 {
6996 	if (f->l2t)
6997 		t4_l2t_release(f->l2t);
6998 
6999 	bzero(f, sizeof (*f));
7000 }
7001 
7002 static int
7003 set_filter_wr(struct adapter *sc, int fidx)
7004 {
7005 	struct filter_entry *f = &sc->tids.ftid_tab[fidx];
7006 	struct wrqe *wr;
7007 	struct fw_filter_wr *fwr;
7008 	unsigned int ftid;
7009 
7010 	ASSERT_SYNCHRONIZED_OP(sc);
7011 
7012 	if (f->fs.newdmac || f->fs.newvlan) {
7013 		/* This filter needs an L2T entry; allocate one. */
7014 		f->l2t = t4_l2t_alloc_switching(sc->l2t);
7015 		if (f->l2t == NULL)
7016 			return (EAGAIN);
7017 		if (t4_l2t_set_switching(sc, f->l2t, f->fs.vlan, f->fs.eport,
7018 		    f->fs.dmac)) {
7019 			t4_l2t_release(f->l2t);
7020 			f->l2t = NULL;
7021 			return (ENOMEM);
7022 		}
7023 	}
7024 
7025 	ftid = sc->tids.ftid_base + fidx;
7026 
7027 	wr = alloc_wrqe(sizeof(*fwr), &sc->sge.mgmtq);
7028 	if (wr == NULL)
7029 		return (ENOMEM);
7030 
7031 	fwr = wrtod(wr);
7032 	bzero(fwr, sizeof (*fwr));
7033 
7034 	fwr->op_pkd = htobe32(V_FW_WR_OP(FW_FILTER_WR));
7035 	fwr->len16_pkd = htobe32(FW_LEN16(*fwr));
7036 	fwr->tid_to_iq =
7037 	    htobe32(V_FW_FILTER_WR_TID(ftid) |
7038 		V_FW_FILTER_WR_RQTYPE(f->fs.type) |
7039 		V_FW_FILTER_WR_NOREPLY(0) |
7040 		V_FW_FILTER_WR_IQ(f->fs.iq));
7041 	fwr->del_filter_to_l2tix =
7042 	    htobe32(V_FW_FILTER_WR_RPTTID(f->fs.rpttid) |
7043 		V_FW_FILTER_WR_DROP(f->fs.action == FILTER_DROP) |
7044 		V_FW_FILTER_WR_DIRSTEER(f->fs.dirsteer) |
7045 		V_FW_FILTER_WR_MASKHASH(f->fs.maskhash) |
7046 		V_FW_FILTER_WR_DIRSTEERHASH(f->fs.dirsteerhash) |
7047 		V_FW_FILTER_WR_LPBK(f->fs.action == FILTER_SWITCH) |
7048 		V_FW_FILTER_WR_DMAC(f->fs.newdmac) |
7049 		V_FW_FILTER_WR_SMAC(f->fs.newsmac) |
7050 		V_FW_FILTER_WR_INSVLAN(f->fs.newvlan == VLAN_INSERT ||
7051 		    f->fs.newvlan == VLAN_REWRITE) |
7052 		V_FW_FILTER_WR_RMVLAN(f->fs.newvlan == VLAN_REMOVE ||
7053 		    f->fs.newvlan == VLAN_REWRITE) |
7054 		V_FW_FILTER_WR_HITCNTS(f->fs.hitcnts) |
7055 		V_FW_FILTER_WR_TXCHAN(f->fs.eport) |
7056 		V_FW_FILTER_WR_PRIO(f->fs.prio) |
7057 		V_FW_FILTER_WR_L2TIX(f->l2t ? f->l2t->idx : 0));
7058 	fwr->ethtype = htobe16(f->fs.val.ethtype);
7059 	fwr->ethtypem = htobe16(f->fs.mask.ethtype);
7060 	fwr->frag_to_ovlan_vldm =
7061 	    (V_FW_FILTER_WR_FRAG(f->fs.val.frag) |
7062 		V_FW_FILTER_WR_FRAGM(f->fs.mask.frag) |
7063 		V_FW_FILTER_WR_IVLAN_VLD(f->fs.val.vlan_vld) |
7064 		V_FW_FILTER_WR_OVLAN_VLD(f->fs.val.vnic_vld) |
7065 		V_FW_FILTER_WR_IVLAN_VLDM(f->fs.mask.vlan_vld) |
7066 		V_FW_FILTER_WR_OVLAN_VLDM(f->fs.mask.vnic_vld));
7067 	fwr->smac_sel = 0;
7068 	fwr->rx_chan_rx_rpl_iq = htobe16(V_FW_FILTER_WR_RX_CHAN(0) |
7069 	    V_FW_FILTER_WR_RX_RPL_IQ(sc->sge.fwq.abs_id));
7070 	fwr->maci_to_matchtypem =
7071 	    htobe32(V_FW_FILTER_WR_MACI(f->fs.val.macidx) |
7072 		V_FW_FILTER_WR_MACIM(f->fs.mask.macidx) |
7073 		V_FW_FILTER_WR_FCOE(f->fs.val.fcoe) |
7074 		V_FW_FILTER_WR_FCOEM(f->fs.mask.fcoe) |
7075 		V_FW_FILTER_WR_PORT(f->fs.val.iport) |
7076 		V_FW_FILTER_WR_PORTM(f->fs.mask.iport) |
7077 		V_FW_FILTER_WR_MATCHTYPE(f->fs.val.matchtype) |
7078 		V_FW_FILTER_WR_MATCHTYPEM(f->fs.mask.matchtype));
7079 	fwr->ptcl = f->fs.val.proto;
7080 	fwr->ptclm = f->fs.mask.proto;
7081 	fwr->ttyp = f->fs.val.tos;
7082 	fwr->ttypm = f->fs.mask.tos;
7083 	fwr->ivlan = htobe16(f->fs.val.vlan);
7084 	fwr->ivlanm = htobe16(f->fs.mask.vlan);
7085 	fwr->ovlan = htobe16(f->fs.val.vnic);
7086 	fwr->ovlanm = htobe16(f->fs.mask.vnic);
7087 	bcopy(f->fs.val.dip, fwr->lip, sizeof (fwr->lip));
7088 	bcopy(f->fs.mask.dip, fwr->lipm, sizeof (fwr->lipm));
7089 	bcopy(f->fs.val.sip, fwr->fip, sizeof (fwr->fip));
7090 	bcopy(f->fs.mask.sip, fwr->fipm, sizeof (fwr->fipm));
7091 	fwr->lp = htobe16(f->fs.val.dport);
7092 	fwr->lpm = htobe16(f->fs.mask.dport);
7093 	fwr->fp = htobe16(f->fs.val.sport);
7094 	fwr->fpm = htobe16(f->fs.mask.sport);
7095 	if (f->fs.newsmac)
7096 		bcopy(f->fs.smac, fwr->sma, sizeof (fwr->sma));
7097 
7098 	f->pending = 1;
7099 	sc->tids.ftids_in_use++;
7100 
7101 	t4_wrq_tx(sc, wr);
7102 	return (0);
7103 }
7104 
7105 static int
7106 del_filter_wr(struct adapter *sc, int fidx)
7107 {
7108 	struct filter_entry *f = &sc->tids.ftid_tab[fidx];
7109 	struct wrqe *wr;
7110 	struct fw_filter_wr *fwr;
7111 	unsigned int ftid;
7112 
7113 	ftid = sc->tids.ftid_base + fidx;
7114 
7115 	wr = alloc_wrqe(sizeof(*fwr), &sc->sge.mgmtq);
7116 	if (wr == NULL)
7117 		return (ENOMEM);
7118 	fwr = wrtod(wr);
7119 	bzero(fwr, sizeof (*fwr));
7120 
7121 	t4_mk_filtdelwr(ftid, fwr, sc->sge.fwq.abs_id);
7122 
7123 	f->pending = 1;
7124 	t4_wrq_tx(sc, wr);
7125 	return (0);
7126 }
7127 
7128 int
7129 t4_filter_rpl(struct sge_iq *iq, const struct rss_header *rss, struct mbuf *m)
7130 {
7131 	struct adapter *sc = iq->adapter;
7132 	const struct cpl_set_tcb_rpl *rpl = (const void *)(rss + 1);
7133 	unsigned int idx = GET_TID(rpl);
7134 
7135 	KASSERT(m == NULL, ("%s: payload with opcode %02x", __func__,
7136 	    rss->opcode));
7137 
7138 	if (idx >= sc->tids.ftid_base &&
7139 	    (idx -= sc->tids.ftid_base) < sc->tids.nftids) {
7140 		unsigned int rc = G_COOKIE(rpl->cookie);
7141 		struct filter_entry *f = &sc->tids.ftid_tab[idx];
7142 
7143 		mtx_lock(&sc->tids.ftid_lock);
7144 		if (rc == FW_FILTER_WR_FLT_ADDED) {
7145 			KASSERT(f->pending, ("%s: filter[%u] isn't pending.",
7146 			    __func__, idx));
7147 			f->smtidx = (be64toh(rpl->oldval) >> 24) & 0xff;
7148 			f->pending = 0;  /* asynchronous setup completed */
7149 			f->valid = 1;
7150 		} else {
7151 			if (rc != FW_FILTER_WR_FLT_DELETED) {
7152 				/* Add or delete failed, display an error */
7153 				log(LOG_ERR,
7154 				    "filter %u setup failed with error %u\n",
7155 				    idx, rc);
7156 			}
7157 
7158 			clear_filter(f);
7159 			sc->tids.ftids_in_use--;
7160 		}
7161 		wakeup(&sc->tids.ftid_tab);
7162 		mtx_unlock(&sc->tids.ftid_lock);
7163 	}
7164 
7165 	return (0);
7166 }
7167 
7168 static int
7169 get_sge_context(struct adapter *sc, struct t4_sge_context *cntxt)
7170 {
7171 	int rc;
7172 
7173 	if (cntxt->cid > M_CTXTQID)
7174 		return (EINVAL);
7175 
7176 	if (cntxt->mem_id != CTXT_EGRESS && cntxt->mem_id != CTXT_INGRESS &&
7177 	    cntxt->mem_id != CTXT_FLM && cntxt->mem_id != CTXT_CNM)
7178 		return (EINVAL);
7179 
7180 	rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4ctxt");
7181 	if (rc)
7182 		return (rc);
7183 
7184 	if (sc->flags & FW_OK) {
7185 		rc = -t4_sge_ctxt_rd(sc, sc->mbox, cntxt->cid, cntxt->mem_id,
7186 		    &cntxt->data[0]);
7187 		if (rc == 0)
7188 			goto done;
7189 	}
7190 
7191 	/*
7192 	 * Read via firmware failed or wasn't even attempted.  Read directly via
7193 	 * the backdoor.
7194 	 */
7195 	rc = -t4_sge_ctxt_rd_bd(sc, cntxt->cid, cntxt->mem_id, &cntxt->data[0]);
7196 done:
7197 	end_synchronized_op(sc, 0);
7198 	return (rc);
7199 }
7200 
7201 static int
7202 load_fw(struct adapter *sc, struct t4_data *fw)
7203 {
7204 	int rc;
7205 	uint8_t *fw_data;
7206 
7207 	rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4ldfw");
7208 	if (rc)
7209 		return (rc);
7210 
7211 	if (sc->flags & FULL_INIT_DONE) {
7212 		rc = EBUSY;
7213 		goto done;
7214 	}
7215 
7216 	fw_data = malloc(fw->len, M_CXGBE, M_WAITOK);
7217 	if (fw_data == NULL) {
7218 		rc = ENOMEM;
7219 		goto done;
7220 	}
7221 
7222 	rc = copyin(fw->data, fw_data, fw->len);
7223 	if (rc == 0)
7224 		rc = -t4_load_fw(sc, fw_data, fw->len);
7225 
7226 	free(fw_data, M_CXGBE);
7227 done:
7228 	end_synchronized_op(sc, 0);
7229 	return (rc);
7230 }
7231 
7232 static int
7233 read_card_mem(struct adapter *sc, int win, struct t4_mem_range *mr)
7234 {
7235 	uint32_t addr, off, remaining, i, n;
7236 	uint32_t *buf, *b;
7237 	uint32_t mw_base, mw_aperture;
7238 	int rc;
7239 	uint8_t *dst;
7240 
7241 	rc = validate_mem_range(sc, mr->addr, mr->len);
7242 	if (rc != 0)
7243 		return (rc);
7244 
7245 	memwin_info(sc, win, &mw_base, &mw_aperture);
7246 	buf = b = malloc(min(mr->len, mw_aperture), M_CXGBE, M_WAITOK);
7247 	addr = mr->addr;
7248 	remaining = mr->len;
7249 	dst = (void *)mr->data;
7250 
7251 	while (remaining) {
7252 		off = position_memwin(sc, win, addr);
7253 
7254 		/* number of bytes that we'll copy in the inner loop */
7255 		n = min(remaining, mw_aperture - off);
7256 		for (i = 0; i < n; i += 4)
7257 			*b++ = t4_read_reg(sc, mw_base + off + i);
7258 
7259 		rc = copyout(buf, dst, n);
7260 		if (rc != 0)
7261 			break;
7262 
7263 		b = buf;
7264 		dst += n;
7265 		remaining -= n;
7266 		addr += n;
7267 	}
7268 
7269 	free(buf, M_CXGBE);
7270 	return (rc);
7271 }
7272 
7273 static int
7274 read_i2c(struct adapter *sc, struct t4_i2c_data *i2cd)
7275 {
7276 	int rc;
7277 
7278 	if (i2cd->len == 0 || i2cd->port_id >= sc->params.nports)
7279 		return (EINVAL);
7280 
7281 	if (i2cd->len > 1) {
7282 		/* XXX: need fw support for longer reads in one go */
7283 		return (ENOTSUP);
7284 	}
7285 
7286 	rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4i2crd");
7287 	if (rc)
7288 		return (rc);
7289 	rc = -t4_i2c_rd(sc, sc->mbox, i2cd->port_id, i2cd->dev_addr,
7290 	    i2cd->offset, &i2cd->data[0]);
7291 	end_synchronized_op(sc, 0);
7292 
7293 	return (rc);
7294 }
7295 
7296 int
7297 t4_os_find_pci_capability(struct adapter *sc, int cap)
7298 {
7299 	int i;
7300 
7301 	return (pci_find_cap(sc->dev, cap, &i) == 0 ? i : 0);
7302 }
7303 
7304 int
7305 t4_os_pci_save_state(struct adapter *sc)
7306 {
7307 	device_t dev;
7308 	struct pci_devinfo *dinfo;
7309 
7310 	dev = sc->dev;
7311 	dinfo = device_get_ivars(dev);
7312 
7313 	pci_cfg_save(dev, dinfo, 0);
7314 	return (0);
7315 }
7316 
7317 int
7318 t4_os_pci_restore_state(struct adapter *sc)
7319 {
7320 	device_t dev;
7321 	struct pci_devinfo *dinfo;
7322 
7323 	dev = sc->dev;
7324 	dinfo = device_get_ivars(dev);
7325 
7326 	pci_cfg_restore(dev, dinfo);
7327 	return (0);
7328 }
7329 
7330 void
7331 t4_os_portmod_changed(const struct adapter *sc, int idx)
7332 {
7333 	struct port_info *pi = sc->port[idx];
7334 	static const char *mod_str[] = {
7335 		NULL, "LR", "SR", "ER", "TWINAX", "active TWINAX", "LRM"
7336 	};
7337 
7338 	if (pi->mod_type == FW_PORT_MOD_TYPE_NONE)
7339 		if_printf(pi->ifp, "transceiver unplugged.\n");
7340 	else if (pi->mod_type == FW_PORT_MOD_TYPE_UNKNOWN)
7341 		if_printf(pi->ifp, "unknown transceiver inserted.\n");
7342 	else if (pi->mod_type == FW_PORT_MOD_TYPE_NOTSUPPORTED)
7343 		if_printf(pi->ifp, "unsupported transceiver inserted.\n");
7344 	else if (pi->mod_type > 0 && pi->mod_type < nitems(mod_str)) {
7345 		if_printf(pi->ifp, "%s transceiver inserted.\n",
7346 		    mod_str[pi->mod_type]);
7347 	} else {
7348 		if_printf(pi->ifp, "transceiver (type %d) inserted.\n",
7349 		    pi->mod_type);
7350 	}
7351 }
7352 
7353 void
7354 t4_os_link_changed(struct adapter *sc, int idx, int link_stat, int reason)
7355 {
7356 	struct port_info *pi = sc->port[idx];
7357 	struct ifnet *ifp = pi->ifp;
7358 
7359 	if (link_stat) {
7360 		pi->linkdnrc = -1;
7361 		ifp->if_baudrate = IF_Mbps(pi->link_cfg.speed);
7362 		if_link_state_change(ifp, LINK_STATE_UP);
7363 	} else {
7364 		if (reason >= 0)
7365 			pi->linkdnrc = reason;
7366 		if_link_state_change(ifp, LINK_STATE_DOWN);
7367 	}
7368 }
7369 
7370 void
7371 t4_iterate(void (*func)(struct adapter *, void *), void *arg)
7372 {
7373 	struct adapter *sc;
7374 
7375 	sx_slock(&t4_list_lock);
7376 	SLIST_FOREACH(sc, &t4_list, link) {
7377 		/*
7378 		 * func should not make any assumptions about what state sc is
7379 		 * in - the only guarantee is that sc->sc_lock is a valid lock.
7380 		 */
7381 		func(sc, arg);
7382 	}
7383 	sx_sunlock(&t4_list_lock);
7384 }
7385 
7386 static int
7387 t4_open(struct cdev *dev, int flags, int type, struct thread *td)
7388 {
7389        return (0);
7390 }
7391 
7392 static int
7393 t4_close(struct cdev *dev, int flags, int type, struct thread *td)
7394 {
7395        return (0);
7396 }
7397 
7398 static int
7399 t4_ioctl(struct cdev *dev, unsigned long cmd, caddr_t data, int fflag,
7400     struct thread *td)
7401 {
7402 	int rc;
7403 	struct adapter *sc = dev->si_drv1;
7404 
7405 	rc = priv_check(td, PRIV_DRIVER);
7406 	if (rc != 0)
7407 		return (rc);
7408 
7409 	switch (cmd) {
7410 	case CHELSIO_T4_GETREG: {
7411 		struct t4_reg *edata = (struct t4_reg *)data;
7412 
7413 		if ((edata->addr & 0x3) != 0 || edata->addr >= sc->mmio_len)
7414 			return (EFAULT);
7415 
7416 		if (edata->size == 4)
7417 			edata->val = t4_read_reg(sc, edata->addr);
7418 		else if (edata->size == 8)
7419 			edata->val = t4_read_reg64(sc, edata->addr);
7420 		else
7421 			return (EINVAL);
7422 
7423 		break;
7424 	}
7425 	case CHELSIO_T4_SETREG: {
7426 		struct t4_reg *edata = (struct t4_reg *)data;
7427 
7428 		if ((edata->addr & 0x3) != 0 || edata->addr >= sc->mmio_len)
7429 			return (EFAULT);
7430 
7431 		if (edata->size == 4) {
7432 			if (edata->val & 0xffffffff00000000)
7433 				return (EINVAL);
7434 			t4_write_reg(sc, edata->addr, (uint32_t) edata->val);
7435 		} else if (edata->size == 8)
7436 			t4_write_reg64(sc, edata->addr, edata->val);
7437 		else
7438 			return (EINVAL);
7439 		break;
7440 	}
7441 	case CHELSIO_T4_REGDUMP: {
7442 		struct t4_regdump *regs = (struct t4_regdump *)data;
7443 		int reglen = is_t4(sc) ? T4_REGDUMP_SIZE : T5_REGDUMP_SIZE;
7444 		uint8_t *buf;
7445 
7446 		if (regs->len < reglen) {
7447 			regs->len = reglen; /* hint to the caller */
7448 			return (ENOBUFS);
7449 		}
7450 
7451 		regs->len = reglen;
7452 		buf = malloc(reglen, M_CXGBE, M_WAITOK | M_ZERO);
7453 		t4_get_regs(sc, regs, buf);
7454 		rc = copyout(buf, regs->data, reglen);
7455 		free(buf, M_CXGBE);
7456 		break;
7457 	}
7458 	case CHELSIO_T4_GET_FILTER_MODE:
7459 		rc = get_filter_mode(sc, (uint32_t *)data);
7460 		break;
7461 	case CHELSIO_T4_SET_FILTER_MODE:
7462 		rc = set_filter_mode(sc, *(uint32_t *)data);
7463 		break;
7464 	case CHELSIO_T4_GET_FILTER:
7465 		rc = get_filter(sc, (struct t4_filter *)data);
7466 		break;
7467 	case CHELSIO_T4_SET_FILTER:
7468 		rc = set_filter(sc, (struct t4_filter *)data);
7469 		break;
7470 	case CHELSIO_T4_DEL_FILTER:
7471 		rc = del_filter(sc, (struct t4_filter *)data);
7472 		break;
7473 	case CHELSIO_T4_GET_SGE_CONTEXT:
7474 		rc = get_sge_context(sc, (struct t4_sge_context *)data);
7475 		break;
7476 	case CHELSIO_T4_LOAD_FW:
7477 		rc = load_fw(sc, (struct t4_data *)data);
7478 		break;
7479 	case CHELSIO_T4_GET_MEM:
7480 		rc = read_card_mem(sc, 2, (struct t4_mem_range *)data);
7481 		break;
7482 	case CHELSIO_T4_GET_I2C:
7483 		rc = read_i2c(sc, (struct t4_i2c_data *)data);
7484 		break;
7485 	case CHELSIO_T4_CLEAR_STATS: {
7486 		int i;
7487 		u_int port_id = *(uint32_t *)data;
7488 		struct port_info *pi;
7489 
7490 		if (port_id >= sc->params.nports)
7491 			return (EINVAL);
7492 
7493 		/* MAC stats */
7494 		t4_clr_port_stats(sc, port_id);
7495 
7496 		pi = sc->port[port_id];
7497 		if (pi->flags & PORT_INIT_DONE) {
7498 			struct sge_rxq *rxq;
7499 			struct sge_txq *txq;
7500 			struct sge_wrq *wrq;
7501 
7502 			for_each_rxq(pi, i, rxq) {
7503 #if defined(INET) || defined(INET6)
7504 				rxq->lro.lro_queued = 0;
7505 				rxq->lro.lro_flushed = 0;
7506 #endif
7507 				rxq->rxcsum = 0;
7508 				rxq->vlan_extraction = 0;
7509 			}
7510 
7511 			for_each_txq(pi, i, txq) {
7512 				txq->txcsum = 0;
7513 				txq->tso_wrs = 0;
7514 				txq->vlan_insertion = 0;
7515 				txq->imm_wrs = 0;
7516 				txq->sgl_wrs = 0;
7517 				txq->txpkt_wrs = 0;
7518 				txq->txpkts_wrs = 0;
7519 				txq->txpkts_pkts = 0;
7520 				txq->br->br_drops = 0;
7521 				txq->no_dmamap = 0;
7522 				txq->no_desc = 0;
7523 			}
7524 
7525 #ifdef TCP_OFFLOAD
7526 			/* nothing to clear for each ofld_rxq */
7527 
7528 			for_each_ofld_txq(pi, i, wrq) {
7529 				wrq->tx_wrs = 0;
7530 				wrq->no_desc = 0;
7531 			}
7532 #endif
7533 			wrq = &sc->sge.ctrlq[pi->port_id];
7534 			wrq->tx_wrs = 0;
7535 			wrq->no_desc = 0;
7536 		}
7537 		break;
7538 	}
7539 	case CHELSIO_T4_GET_TRACER:
7540 		rc = t4_get_tracer(sc, (struct t4_tracer *)data);
7541 		break;
7542 	case CHELSIO_T4_SET_TRACER:
7543 		rc = t4_set_tracer(sc, (struct t4_tracer *)data);
7544 		break;
7545 	default:
7546 		rc = EINVAL;
7547 	}
7548 
7549 	return (rc);
7550 }
7551 
7552 #ifdef TCP_OFFLOAD
7553 static int
7554 toe_capability(struct port_info *pi, int enable)
7555 {
7556 	int rc;
7557 	struct adapter *sc = pi->adapter;
7558 
7559 	ASSERT_SYNCHRONIZED_OP(sc);
7560 
7561 	if (!is_offload(sc))
7562 		return (ENODEV);
7563 
7564 	if (enable) {
7565 		if (!(sc->flags & FULL_INIT_DONE)) {
7566 			rc = cxgbe_init_synchronized(pi);
7567 			if (rc)
7568 				return (rc);
7569 		}
7570 
7571 		if (isset(&sc->offload_map, pi->port_id))
7572 			return (0);
7573 
7574 		if (!(sc->flags & TOM_INIT_DONE)) {
7575 			rc = t4_activate_uld(sc, ULD_TOM);
7576 			if (rc == EAGAIN) {
7577 				log(LOG_WARNING,
7578 				    "You must kldload t4_tom.ko before trying "
7579 				    "to enable TOE on a cxgbe interface.\n");
7580 			}
7581 			if (rc != 0)
7582 				return (rc);
7583 			KASSERT(sc->tom_softc != NULL,
7584 			    ("%s: TOM activated but softc NULL", __func__));
7585 			KASSERT(sc->flags & TOM_INIT_DONE,
7586 			    ("%s: TOM activated but flag not set", __func__));
7587 		}
7588 
7589 		setbit(&sc->offload_map, pi->port_id);
7590 	} else {
7591 		if (!isset(&sc->offload_map, pi->port_id))
7592 			return (0);
7593 
7594 		KASSERT(sc->flags & TOM_INIT_DONE,
7595 		    ("%s: TOM never initialized?", __func__));
7596 		clrbit(&sc->offload_map, pi->port_id);
7597 	}
7598 
7599 	return (0);
7600 }
7601 
7602 /*
7603  * Add an upper layer driver to the global list.
7604  */
7605 int
7606 t4_register_uld(struct uld_info *ui)
7607 {
7608 	int rc = 0;
7609 	struct uld_info *u;
7610 
7611 	sx_xlock(&t4_uld_list_lock);
7612 	SLIST_FOREACH(u, &t4_uld_list, link) {
7613 	    if (u->uld_id == ui->uld_id) {
7614 		    rc = EEXIST;
7615 		    goto done;
7616 	    }
7617 	}
7618 
7619 	SLIST_INSERT_HEAD(&t4_uld_list, ui, link);
7620 	ui->refcount = 0;
7621 done:
7622 	sx_xunlock(&t4_uld_list_lock);
7623 	return (rc);
7624 }
7625 
7626 int
7627 t4_unregister_uld(struct uld_info *ui)
7628 {
7629 	int rc = EINVAL;
7630 	struct uld_info *u;
7631 
7632 	sx_xlock(&t4_uld_list_lock);
7633 
7634 	SLIST_FOREACH(u, &t4_uld_list, link) {
7635 	    if (u == ui) {
7636 		    if (ui->refcount > 0) {
7637 			    rc = EBUSY;
7638 			    goto done;
7639 		    }
7640 
7641 		    SLIST_REMOVE(&t4_uld_list, ui, uld_info, link);
7642 		    rc = 0;
7643 		    goto done;
7644 	    }
7645 	}
7646 done:
7647 	sx_xunlock(&t4_uld_list_lock);
7648 	return (rc);
7649 }
7650 
7651 int
7652 t4_activate_uld(struct adapter *sc, int id)
7653 {
7654 	int rc = EAGAIN;
7655 	struct uld_info *ui;
7656 
7657 	ASSERT_SYNCHRONIZED_OP(sc);
7658 
7659 	sx_slock(&t4_uld_list_lock);
7660 
7661 	SLIST_FOREACH(ui, &t4_uld_list, link) {
7662 		if (ui->uld_id == id) {
7663 			rc = ui->activate(sc);
7664 			if (rc == 0)
7665 				ui->refcount++;
7666 			goto done;
7667 		}
7668 	}
7669 done:
7670 	sx_sunlock(&t4_uld_list_lock);
7671 
7672 	return (rc);
7673 }
7674 
7675 int
7676 t4_deactivate_uld(struct adapter *sc, int id)
7677 {
7678 	int rc = EINVAL;
7679 	struct uld_info *ui;
7680 
7681 	ASSERT_SYNCHRONIZED_OP(sc);
7682 
7683 	sx_slock(&t4_uld_list_lock);
7684 
7685 	SLIST_FOREACH(ui, &t4_uld_list, link) {
7686 		if (ui->uld_id == id) {
7687 			rc = ui->deactivate(sc);
7688 			if (rc == 0)
7689 				ui->refcount--;
7690 			goto done;
7691 		}
7692 	}
7693 done:
7694 	sx_sunlock(&t4_uld_list_lock);
7695 
7696 	return (rc);
7697 }
7698 #endif
7699 
7700 /*
7701  * Come up with reasonable defaults for some of the tunables, provided they're
7702  * not set by the user (in which case we'll use the values as is).
7703  */
7704 static void
7705 tweak_tunables(void)
7706 {
7707 	int nc = mp_ncpus;	/* our snapshot of the number of CPUs */
7708 
7709 	if (t4_ntxq10g < 1)
7710 		t4_ntxq10g = min(nc, NTXQ_10G);
7711 
7712 	if (t4_ntxq1g < 1)
7713 		t4_ntxq1g = min(nc, NTXQ_1G);
7714 
7715 	if (t4_nrxq10g < 1)
7716 		t4_nrxq10g = min(nc, NRXQ_10G);
7717 
7718 	if (t4_nrxq1g < 1)
7719 		t4_nrxq1g = min(nc, NRXQ_1G);
7720 
7721 #ifdef TCP_OFFLOAD
7722 	if (t4_nofldtxq10g < 1)
7723 		t4_nofldtxq10g = min(nc, NOFLDTXQ_10G);
7724 
7725 	if (t4_nofldtxq1g < 1)
7726 		t4_nofldtxq1g = min(nc, NOFLDTXQ_1G);
7727 
7728 	if (t4_nofldrxq10g < 1)
7729 		t4_nofldrxq10g = min(nc, NOFLDRXQ_10G);
7730 
7731 	if (t4_nofldrxq1g < 1)
7732 		t4_nofldrxq1g = min(nc, NOFLDRXQ_1G);
7733 
7734 	if (t4_toecaps_allowed == -1)
7735 		t4_toecaps_allowed = FW_CAPS_CONFIG_TOE;
7736 #else
7737 	if (t4_toecaps_allowed == -1)
7738 		t4_toecaps_allowed = 0;
7739 #endif
7740 
7741 	if (t4_tmr_idx_10g < 0 || t4_tmr_idx_10g >= SGE_NTIMERS)
7742 		t4_tmr_idx_10g = TMR_IDX_10G;
7743 
7744 	if (t4_pktc_idx_10g < -1 || t4_pktc_idx_10g >= SGE_NCOUNTERS)
7745 		t4_pktc_idx_10g = PKTC_IDX_10G;
7746 
7747 	if (t4_tmr_idx_1g < 0 || t4_tmr_idx_1g >= SGE_NTIMERS)
7748 		t4_tmr_idx_1g = TMR_IDX_1G;
7749 
7750 	if (t4_pktc_idx_1g < -1 || t4_pktc_idx_1g >= SGE_NCOUNTERS)
7751 		t4_pktc_idx_1g = PKTC_IDX_1G;
7752 
7753 	if (t4_qsize_txq < 128)
7754 		t4_qsize_txq = 128;
7755 
7756 	if (t4_qsize_rxq < 128)
7757 		t4_qsize_rxq = 128;
7758 	while (t4_qsize_rxq & 7)
7759 		t4_qsize_rxq++;
7760 
7761 	t4_intr_types &= INTR_MSIX | INTR_MSI | INTR_INTX;
7762 }
7763 
7764 static int
7765 mod_event(module_t mod, int cmd, void *arg)
7766 {
7767 	int rc = 0;
7768 	static int loaded = 0;
7769 
7770 	switch (cmd) {
7771 	case MOD_LOAD:
7772 		if (atomic_fetchadd_int(&loaded, 1))
7773 			break;
7774 		t4_sge_modload();
7775 		sx_init(&t4_list_lock, "T4/T5 adapters");
7776 		SLIST_INIT(&t4_list);
7777 #ifdef TCP_OFFLOAD
7778 		sx_init(&t4_uld_list_lock, "T4/T5 ULDs");
7779 		SLIST_INIT(&t4_uld_list);
7780 #endif
7781 		t4_tracer_modload();
7782 		tweak_tunables();
7783 		break;
7784 
7785 	case MOD_UNLOAD:
7786 		if (atomic_fetchadd_int(&loaded, -1) > 1)
7787 			break;
7788 		t4_tracer_modunload();
7789 #ifdef TCP_OFFLOAD
7790 		sx_slock(&t4_uld_list_lock);
7791 		if (!SLIST_EMPTY(&t4_uld_list)) {
7792 			rc = EBUSY;
7793 			sx_sunlock(&t4_uld_list_lock);
7794 			break;
7795 		}
7796 		sx_sunlock(&t4_uld_list_lock);
7797 		sx_destroy(&t4_uld_list_lock);
7798 #endif
7799 		sx_slock(&t4_list_lock);
7800 		if (!SLIST_EMPTY(&t4_list)) {
7801 			rc = EBUSY;
7802 			sx_sunlock(&t4_list_lock);
7803 			break;
7804 		}
7805 		sx_sunlock(&t4_list_lock);
7806 		sx_destroy(&t4_list_lock);
7807 		break;
7808 	}
7809 
7810 	return (rc);
7811 }
7812 
7813 static devclass_t t4_devclass, t5_devclass;
7814 static devclass_t cxgbe_devclass, cxl_devclass;
7815 
7816 DRIVER_MODULE(t4nex, pci, t4_driver, t4_devclass, mod_event, 0);
7817 MODULE_VERSION(t4nex, 1);
7818 MODULE_DEPEND(t4nex, firmware, 1, 1, 1);
7819 
7820 DRIVER_MODULE(t5nex, pci, t5_driver, t5_devclass, mod_event, 0);
7821 MODULE_VERSION(t5nex, 1);
7822 MODULE_DEPEND(t5nex, firmware, 1, 1, 1);
7823 
7824 DRIVER_MODULE(cxgbe, t4nex, cxgbe_driver, cxgbe_devclass, 0, 0);
7825 MODULE_VERSION(cxgbe, 1);
7826 
7827 DRIVER_MODULE(cxl, t5nex, cxl_driver, cxl_devclass, 0, 0);
7828 MODULE_VERSION(cxl, 1);
7829