xref: /freebsd/sys/dev/cxgbe/t4_main.c (revision 39ee7a7a6bdd1557b1c3532abf60d139798ac88b)
1 /*-
2  * Copyright (c) 2011 Chelsio Communications, Inc.
3  * All rights reserved.
4  * Written by: Navdeep Parhar <np@FreeBSD.org>
5  *
6  * Redistribution and use in source and binary forms, with or without
7  * modification, are permitted provided that the following conditions
8  * are met:
9  * 1. Redistributions of source code must retain the above copyright
10  *    notice, this list of conditions and the following disclaimer.
11  * 2. Redistributions in binary form must reproduce the above copyright
12  *    notice, this list of conditions and the following disclaimer in the
13  *    documentation and/or other materials provided with the distribution.
14  *
15  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
16  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
25  * SUCH DAMAGE.
26  */
27 
28 #include <sys/cdefs.h>
29 __FBSDID("$FreeBSD$");
30 
31 #include "opt_inet.h"
32 #include "opt_inet6.h"
33 #include "opt_rss.h"
34 
35 #include <sys/param.h>
36 #include <sys/conf.h>
37 #include <sys/priv.h>
38 #include <sys/kernel.h>
39 #include <sys/bus.h>
40 #include <sys/module.h>
41 #include <sys/malloc.h>
42 #include <sys/queue.h>
43 #include <sys/taskqueue.h>
44 #include <sys/pciio.h>
45 #include <dev/pci/pcireg.h>
46 #include <dev/pci/pcivar.h>
47 #include <dev/pci/pci_private.h>
48 #include <sys/firmware.h>
49 #include <sys/sbuf.h>
50 #include <sys/smp.h>
51 #include <sys/socket.h>
52 #include <sys/sockio.h>
53 #include <sys/sysctl.h>
54 #include <net/ethernet.h>
55 #include <net/if.h>
56 #include <net/if_types.h>
57 #include <net/if_dl.h>
58 #include <net/if_vlan_var.h>
59 #ifdef RSS
60 #include <net/rss_config.h>
61 #endif
62 #if defined(__i386__) || defined(__amd64__)
63 #include <vm/vm.h>
64 #include <vm/pmap.h>
65 #endif
66 
67 #include "common/common.h"
68 #include "common/t4_msg.h"
69 #include "common/t4_regs.h"
70 #include "common/t4_regs_values.h"
71 #include "t4_ioctl.h"
72 #include "t4_l2t.h"
73 #include "t4_mp_ring.h"
74 
75 /* T4 bus driver interface */
76 static int t4_probe(device_t);
77 static int t4_attach(device_t);
78 static int t4_detach(device_t);
79 static device_method_t t4_methods[] = {
80 	DEVMETHOD(device_probe,		t4_probe),
81 	DEVMETHOD(device_attach,	t4_attach),
82 	DEVMETHOD(device_detach,	t4_detach),
83 
84 	DEVMETHOD_END
85 };
86 static driver_t t4_driver = {
87 	"t4nex",
88 	t4_methods,
89 	sizeof(struct adapter)
90 };
91 
92 
93 /* T4 port (cxgbe) interface */
94 static int cxgbe_probe(device_t);
95 static int cxgbe_attach(device_t);
96 static int cxgbe_detach(device_t);
97 static device_method_t cxgbe_methods[] = {
98 	DEVMETHOD(device_probe,		cxgbe_probe),
99 	DEVMETHOD(device_attach,	cxgbe_attach),
100 	DEVMETHOD(device_detach,	cxgbe_detach),
101 	{ 0, 0 }
102 };
103 static driver_t cxgbe_driver = {
104 	"cxgbe",
105 	cxgbe_methods,
106 	sizeof(struct port_info)
107 };
108 
109 static d_ioctl_t t4_ioctl;
110 static d_open_t t4_open;
111 static d_close_t t4_close;
112 
113 static struct cdevsw t4_cdevsw = {
114        .d_version = D_VERSION,
115        .d_flags = 0,
116        .d_open = t4_open,
117        .d_close = t4_close,
118        .d_ioctl = t4_ioctl,
119        .d_name = "t4nex",
120 };
121 
122 /* T5 bus driver interface */
123 static int t5_probe(device_t);
124 static device_method_t t5_methods[] = {
125 	DEVMETHOD(device_probe,		t5_probe),
126 	DEVMETHOD(device_attach,	t4_attach),
127 	DEVMETHOD(device_detach,	t4_detach),
128 
129 	DEVMETHOD_END
130 };
131 static driver_t t5_driver = {
132 	"t5nex",
133 	t5_methods,
134 	sizeof(struct adapter)
135 };
136 
137 
138 /* T5 port (cxl) interface */
139 static driver_t cxl_driver = {
140 	"cxl",
141 	cxgbe_methods,
142 	sizeof(struct port_info)
143 };
144 
145 static struct cdevsw t5_cdevsw = {
146        .d_version = D_VERSION,
147        .d_flags = 0,
148        .d_open = t4_open,
149        .d_close = t4_close,
150        .d_ioctl = t4_ioctl,
151        .d_name = "t5nex",
152 };
153 
154 /* ifnet + media interface */
155 static void cxgbe_init(void *);
156 static int cxgbe_ioctl(struct ifnet *, unsigned long, caddr_t);
157 static int cxgbe_transmit(struct ifnet *, struct mbuf *);
158 static void cxgbe_qflush(struct ifnet *);
159 static uint64_t cxgbe_get_counter(struct ifnet *, ift_counter);
160 static int cxgbe_media_change(struct ifnet *);
161 static void cxgbe_media_status(struct ifnet *, struct ifmediareq *);
162 
163 MALLOC_DEFINE(M_CXGBE, "cxgbe", "Chelsio T4/T5 Ethernet driver and services");
164 
165 /*
166  * Correct lock order when you need to acquire multiple locks is t4_list_lock,
167  * then ADAPTER_LOCK, then t4_uld_list_lock.
168  */
169 static struct sx t4_list_lock;
170 SLIST_HEAD(, adapter) t4_list;
171 #ifdef TCP_OFFLOAD
172 static struct sx t4_uld_list_lock;
173 SLIST_HEAD(, uld_info) t4_uld_list;
174 #endif
175 
176 /*
177  * Tunables.  See tweak_tunables() too.
178  *
179  * Each tunable is set to a default value here if it's known at compile-time.
180  * Otherwise it is set to -1 as an indication to tweak_tunables() that it should
181  * provide a reasonable default when the driver is loaded.
182  *
183  * Tunables applicable to both T4 and T5 are under hw.cxgbe.  Those specific to
184  * T5 are under hw.cxl.
185  */
186 
187 /*
188  * Number of queues for tx and rx, 10G and 1G, NIC and offload.
189  */
190 #define NTXQ_10G 16
191 static int t4_ntxq10g = -1;
192 TUNABLE_INT("hw.cxgbe.ntxq10g", &t4_ntxq10g);
193 
194 #define NRXQ_10G 8
195 static int t4_nrxq10g = -1;
196 TUNABLE_INT("hw.cxgbe.nrxq10g", &t4_nrxq10g);
197 
198 #define NTXQ_1G 4
199 static int t4_ntxq1g = -1;
200 TUNABLE_INT("hw.cxgbe.ntxq1g", &t4_ntxq1g);
201 
202 #define NRXQ_1G 2
203 static int t4_nrxq1g = -1;
204 TUNABLE_INT("hw.cxgbe.nrxq1g", &t4_nrxq1g);
205 
206 static int t4_rsrv_noflowq = 0;
207 TUNABLE_INT("hw.cxgbe.rsrv_noflowq", &t4_rsrv_noflowq);
208 
209 #ifdef TCP_OFFLOAD
210 #define NOFLDTXQ_10G 8
211 static int t4_nofldtxq10g = -1;
212 TUNABLE_INT("hw.cxgbe.nofldtxq10g", &t4_nofldtxq10g);
213 
214 #define NOFLDRXQ_10G 2
215 static int t4_nofldrxq10g = -1;
216 TUNABLE_INT("hw.cxgbe.nofldrxq10g", &t4_nofldrxq10g);
217 
218 #define NOFLDTXQ_1G 2
219 static int t4_nofldtxq1g = -1;
220 TUNABLE_INT("hw.cxgbe.nofldtxq1g", &t4_nofldtxq1g);
221 
222 #define NOFLDRXQ_1G 1
223 static int t4_nofldrxq1g = -1;
224 TUNABLE_INT("hw.cxgbe.nofldrxq1g", &t4_nofldrxq1g);
225 #endif
226 
227 #ifdef DEV_NETMAP
228 #define NNMTXQ_10G 2
229 static int t4_nnmtxq10g = -1;
230 TUNABLE_INT("hw.cxgbe.nnmtxq10g", &t4_nnmtxq10g);
231 
232 #define NNMRXQ_10G 2
233 static int t4_nnmrxq10g = -1;
234 TUNABLE_INT("hw.cxgbe.nnmrxq10g", &t4_nnmrxq10g);
235 
236 #define NNMTXQ_1G 1
237 static int t4_nnmtxq1g = -1;
238 TUNABLE_INT("hw.cxgbe.nnmtxq1g", &t4_nnmtxq1g);
239 
240 #define NNMRXQ_1G 1
241 static int t4_nnmrxq1g = -1;
242 TUNABLE_INT("hw.cxgbe.nnmrxq1g", &t4_nnmrxq1g);
243 #endif
244 
245 /*
246  * Holdoff parameters for 10G and 1G ports.
247  */
248 #define TMR_IDX_10G 1
249 static int t4_tmr_idx_10g = TMR_IDX_10G;
250 TUNABLE_INT("hw.cxgbe.holdoff_timer_idx_10G", &t4_tmr_idx_10g);
251 
252 #define PKTC_IDX_10G (-1)
253 static int t4_pktc_idx_10g = PKTC_IDX_10G;
254 TUNABLE_INT("hw.cxgbe.holdoff_pktc_idx_10G", &t4_pktc_idx_10g);
255 
256 #define TMR_IDX_1G 1
257 static int t4_tmr_idx_1g = TMR_IDX_1G;
258 TUNABLE_INT("hw.cxgbe.holdoff_timer_idx_1G", &t4_tmr_idx_1g);
259 
260 #define PKTC_IDX_1G (-1)
261 static int t4_pktc_idx_1g = PKTC_IDX_1G;
262 TUNABLE_INT("hw.cxgbe.holdoff_pktc_idx_1G", &t4_pktc_idx_1g);
263 
264 /*
265  * Size (# of entries) of each tx and rx queue.
266  */
267 static unsigned int t4_qsize_txq = TX_EQ_QSIZE;
268 TUNABLE_INT("hw.cxgbe.qsize_txq", &t4_qsize_txq);
269 
270 static unsigned int t4_qsize_rxq = RX_IQ_QSIZE;
271 TUNABLE_INT("hw.cxgbe.qsize_rxq", &t4_qsize_rxq);
272 
273 /*
274  * Interrupt types allowed (bits 0, 1, 2 = INTx, MSI, MSI-X respectively).
275  */
276 static int t4_intr_types = INTR_MSIX | INTR_MSI | INTR_INTX;
277 TUNABLE_INT("hw.cxgbe.interrupt_types", &t4_intr_types);
278 
279 /*
280  * Configuration file.
281  */
282 #define DEFAULT_CF	"default"
283 #define FLASH_CF	"flash"
284 #define UWIRE_CF	"uwire"
285 #define FPGA_CF		"fpga"
286 static char t4_cfg_file[32] = DEFAULT_CF;
287 TUNABLE_STR("hw.cxgbe.config_file", t4_cfg_file, sizeof(t4_cfg_file));
288 
289 /*
290  * PAUSE settings (bit 0, 1 = rx_pause, tx_pause respectively).
291  * rx_pause = 1 to heed incoming PAUSE frames, 0 to ignore them.
292  * tx_pause = 1 to emit PAUSE frames when the rx FIFO reaches its high water
293  *            mark or when signalled to do so, 0 to never emit PAUSE.
294  */
295 static int t4_pause_settings = PAUSE_TX | PAUSE_RX;
296 TUNABLE_INT("hw.cxgbe.pause_settings", &t4_pause_settings);
297 
298 /*
299  * Firmware auto-install by driver during attach (0, 1, 2 = prohibited, allowed,
300  * encouraged respectively).
301  */
302 static unsigned int t4_fw_install = 1;
303 TUNABLE_INT("hw.cxgbe.fw_install", &t4_fw_install);
304 
305 /*
306  * ASIC features that will be used.  Disable the ones you don't want so that the
307  * chip resources aren't wasted on features that will not be used.
308  */
309 static int t4_linkcaps_allowed = 0;	/* No DCBX, PPP, etc. by default */
310 TUNABLE_INT("hw.cxgbe.linkcaps_allowed", &t4_linkcaps_allowed);
311 
312 static int t4_niccaps_allowed = FW_CAPS_CONFIG_NIC;
313 TUNABLE_INT("hw.cxgbe.niccaps_allowed", &t4_niccaps_allowed);
314 
315 static int t4_toecaps_allowed = -1;
316 TUNABLE_INT("hw.cxgbe.toecaps_allowed", &t4_toecaps_allowed);
317 
318 static int t4_rdmacaps_allowed = 0;
319 TUNABLE_INT("hw.cxgbe.rdmacaps_allowed", &t4_rdmacaps_allowed);
320 
321 static int t4_iscsicaps_allowed = 0;
322 TUNABLE_INT("hw.cxgbe.iscsicaps_allowed", &t4_iscsicaps_allowed);
323 
324 static int t4_fcoecaps_allowed = 0;
325 TUNABLE_INT("hw.cxgbe.fcoecaps_allowed", &t4_fcoecaps_allowed);
326 
327 static int t5_write_combine = 0;
328 TUNABLE_INT("hw.cxl.write_combine", &t5_write_combine);
329 
330 struct intrs_and_queues {
331 	uint16_t intr_type;	/* INTx, MSI, or MSI-X */
332 	uint16_t nirq;		/* Total # of vectors */
333 	uint16_t intr_flags_10g;/* Interrupt flags for each 10G port */
334 	uint16_t intr_flags_1g;	/* Interrupt flags for each 1G port */
335 	uint16_t ntxq10g;	/* # of NIC txq's for each 10G port */
336 	uint16_t nrxq10g;	/* # of NIC rxq's for each 10G port */
337 	uint16_t ntxq1g;	/* # of NIC txq's for each 1G port */
338 	uint16_t nrxq1g;	/* # of NIC rxq's for each 1G port */
339 	uint16_t rsrv_noflowq;	/* Flag whether to reserve queue 0 */
340 #ifdef TCP_OFFLOAD
341 	uint16_t nofldtxq10g;	/* # of TOE txq's for each 10G port */
342 	uint16_t nofldrxq10g;	/* # of TOE rxq's for each 10G port */
343 	uint16_t nofldtxq1g;	/* # of TOE txq's for each 1G port */
344 	uint16_t nofldrxq1g;	/* # of TOE rxq's for each 1G port */
345 #endif
346 #ifdef DEV_NETMAP
347 	uint16_t nnmtxq10g;	/* # of netmap txq's for each 10G port */
348 	uint16_t nnmrxq10g;	/* # of netmap rxq's for each 10G port */
349 	uint16_t nnmtxq1g;	/* # of netmap txq's for each 1G port */
350 	uint16_t nnmrxq1g;	/* # of netmap rxq's for each 1G port */
351 #endif
352 };
353 
354 struct filter_entry {
355         uint32_t valid:1;	/* filter allocated and valid */
356         uint32_t locked:1;	/* filter is administratively locked */
357         uint32_t pending:1;	/* filter action is pending firmware reply */
358 	uint32_t smtidx:8;	/* Source MAC Table index for smac */
359 	struct l2t_entry *l2t;	/* Layer Two Table entry for dmac */
360 
361         struct t4_filter_specification fs;
362 };
363 
364 static int map_bars_0_and_4(struct adapter *);
365 static int map_bar_2(struct adapter *);
366 static void setup_memwin(struct adapter *);
367 static int validate_mem_range(struct adapter *, uint32_t, int);
368 static int fwmtype_to_hwmtype(int);
369 static int validate_mt_off_len(struct adapter *, int, uint32_t, int,
370     uint32_t *);
371 static void memwin_info(struct adapter *, int, uint32_t *, uint32_t *);
372 static uint32_t position_memwin(struct adapter *, int, uint32_t);
373 static int cfg_itype_and_nqueues(struct adapter *, int, int,
374     struct intrs_and_queues *);
375 static int prep_firmware(struct adapter *);
376 static int partition_resources(struct adapter *, const struct firmware *,
377     const char *);
378 static int get_params__pre_init(struct adapter *);
379 static int get_params__post_init(struct adapter *);
380 static int set_params__post_init(struct adapter *);
381 static void t4_set_desc(struct adapter *);
382 static void build_medialist(struct port_info *, struct ifmedia *);
383 static int cxgbe_init_synchronized(struct port_info *);
384 static int cxgbe_uninit_synchronized(struct port_info *);
385 static int setup_intr_handlers(struct adapter *);
386 static void quiesce_txq(struct adapter *, struct sge_txq *);
387 static void quiesce_wrq(struct adapter *, struct sge_wrq *);
388 static void quiesce_iq(struct adapter *, struct sge_iq *);
389 static void quiesce_fl(struct adapter *, struct sge_fl *);
390 static int t4_alloc_irq(struct adapter *, struct irq *, int rid,
391     driver_intr_t *, void *, char *);
392 static int t4_free_irq(struct adapter *, struct irq *);
393 static void reg_block_dump(struct adapter *, uint8_t *, unsigned int,
394     unsigned int);
395 static void t4_get_regs(struct adapter *, struct t4_regdump *, uint8_t *);
396 static void cxgbe_refresh_stats(struct adapter *, struct port_info *);
397 static void cxgbe_tick(void *);
398 static void cxgbe_vlan_config(void *, struct ifnet *, uint16_t);
399 static int cpl_not_handled(struct sge_iq *, const struct rss_header *,
400     struct mbuf *);
401 static int an_not_handled(struct sge_iq *, const struct rsp_ctrl *);
402 static int fw_msg_not_handled(struct adapter *, const __be64 *);
403 static int t4_sysctls(struct adapter *);
404 static int cxgbe_sysctls(struct port_info *);
405 static int sysctl_int_array(SYSCTL_HANDLER_ARGS);
406 static int sysctl_bitfield(SYSCTL_HANDLER_ARGS);
407 static int sysctl_btphy(SYSCTL_HANDLER_ARGS);
408 static int sysctl_noflowq(SYSCTL_HANDLER_ARGS);
409 static int sysctl_holdoff_tmr_idx(SYSCTL_HANDLER_ARGS);
410 static int sysctl_holdoff_pktc_idx(SYSCTL_HANDLER_ARGS);
411 static int sysctl_qsize_rxq(SYSCTL_HANDLER_ARGS);
412 static int sysctl_qsize_txq(SYSCTL_HANDLER_ARGS);
413 static int sysctl_pause_settings(SYSCTL_HANDLER_ARGS);
414 static int sysctl_handle_t4_reg64(SYSCTL_HANDLER_ARGS);
415 static int sysctl_temperature(SYSCTL_HANDLER_ARGS);
416 #ifdef SBUF_DRAIN
417 static int sysctl_cctrl(SYSCTL_HANDLER_ARGS);
418 static int sysctl_cim_ibq_obq(SYSCTL_HANDLER_ARGS);
419 static int sysctl_cim_la(SYSCTL_HANDLER_ARGS);
420 static int sysctl_cim_ma_la(SYSCTL_HANDLER_ARGS);
421 static int sysctl_cim_pif_la(SYSCTL_HANDLER_ARGS);
422 static int sysctl_cim_qcfg(SYSCTL_HANDLER_ARGS);
423 static int sysctl_cpl_stats(SYSCTL_HANDLER_ARGS);
424 static int sysctl_ddp_stats(SYSCTL_HANDLER_ARGS);
425 static int sysctl_devlog(SYSCTL_HANDLER_ARGS);
426 static int sysctl_fcoe_stats(SYSCTL_HANDLER_ARGS);
427 static int sysctl_hw_sched(SYSCTL_HANDLER_ARGS);
428 static int sysctl_lb_stats(SYSCTL_HANDLER_ARGS);
429 static int sysctl_linkdnrc(SYSCTL_HANDLER_ARGS);
430 static int sysctl_meminfo(SYSCTL_HANDLER_ARGS);
431 static int sysctl_mps_tcam(SYSCTL_HANDLER_ARGS);
432 static int sysctl_path_mtus(SYSCTL_HANDLER_ARGS);
433 static int sysctl_pm_stats(SYSCTL_HANDLER_ARGS);
434 static int sysctl_rdma_stats(SYSCTL_HANDLER_ARGS);
435 static int sysctl_tcp_stats(SYSCTL_HANDLER_ARGS);
436 static int sysctl_tids(SYSCTL_HANDLER_ARGS);
437 static int sysctl_tp_err_stats(SYSCTL_HANDLER_ARGS);
438 static int sysctl_tp_la(SYSCTL_HANDLER_ARGS);
439 static int sysctl_tx_rate(SYSCTL_HANDLER_ARGS);
440 static int sysctl_ulprx_la(SYSCTL_HANDLER_ARGS);
441 static int sysctl_wcwr_stats(SYSCTL_HANDLER_ARGS);
442 #endif
443 static uint32_t fconf_to_mode(uint32_t);
444 static uint32_t mode_to_fconf(uint32_t);
445 static uint32_t fspec_to_fconf(struct t4_filter_specification *);
446 static int get_filter_mode(struct adapter *, uint32_t *);
447 static int set_filter_mode(struct adapter *, uint32_t);
448 static inline uint64_t get_filter_hits(struct adapter *, uint32_t);
449 static int get_filter(struct adapter *, struct t4_filter *);
450 static int set_filter(struct adapter *, struct t4_filter *);
451 static int del_filter(struct adapter *, struct t4_filter *);
452 static void clear_filter(struct filter_entry *);
453 static int set_filter_wr(struct adapter *, int);
454 static int del_filter_wr(struct adapter *, int);
455 static int get_sge_context(struct adapter *, struct t4_sge_context *);
456 static int load_fw(struct adapter *, struct t4_data *);
457 static int read_card_mem(struct adapter *, int, struct t4_mem_range *);
458 static int read_i2c(struct adapter *, struct t4_i2c_data *);
459 static int set_sched_class(struct adapter *, struct t4_sched_params *);
460 static int set_sched_queue(struct adapter *, struct t4_sched_queue *);
461 #ifdef TCP_OFFLOAD
462 static int toe_capability(struct port_info *, int);
463 #endif
464 static int mod_event(module_t, int, void *);
465 
466 struct {
467 	uint16_t device;
468 	char *desc;
469 } t4_pciids[] = {
470 	{0xa000, "Chelsio Terminator 4 FPGA"},
471 	{0x4400, "Chelsio T440-dbg"},
472 	{0x4401, "Chelsio T420-CR"},
473 	{0x4402, "Chelsio T422-CR"},
474 	{0x4403, "Chelsio T440-CR"},
475 	{0x4404, "Chelsio T420-BCH"},
476 	{0x4405, "Chelsio T440-BCH"},
477 	{0x4406, "Chelsio T440-CH"},
478 	{0x4407, "Chelsio T420-SO"},
479 	{0x4408, "Chelsio T420-CX"},
480 	{0x4409, "Chelsio T420-BT"},
481 	{0x440a, "Chelsio T404-BT"},
482 	{0x440e, "Chelsio T440-LP-CR"},
483 }, t5_pciids[] = {
484 	{0xb000, "Chelsio Terminator 5 FPGA"},
485 	{0x5400, "Chelsio T580-dbg"},
486 	{0x5401,  "Chelsio T520-CR"},		/* 2 x 10G */
487 	{0x5402,  "Chelsio T522-CR"},		/* 2 x 10G, 2 X 1G */
488 	{0x5403,  "Chelsio T540-CR"},		/* 4 x 10G */
489 	{0x5407,  "Chelsio T520-SO"},		/* 2 x 10G, nomem */
490 	{0x5409,  "Chelsio T520-BT"},		/* 2 x 10GBaseT */
491 	{0x540a,  "Chelsio T504-BT"},		/* 4 x 1G */
492 	{0x540d,  "Chelsio T580-CR"},		/* 2 x 40G */
493 	{0x540e,  "Chelsio T540-LP-CR"},	/* 4 x 10G */
494 	{0x5410,  "Chelsio T580-LP-CR"},	/* 2 x 40G */
495 	{0x5411,  "Chelsio T520-LL-CR"},	/* 2 x 10G */
496 	{0x5412,  "Chelsio T560-CR"},		/* 1 x 40G, 2 x 10G */
497 	{0x5414,  "Chelsio T580-LP-SO-CR"},	/* 2 x 40G, nomem */
498 	{0x5415,  "Chelsio T502-BT"},		/* 2 x 1G */
499 #ifdef notyet
500 	{0x5404,  "Chelsio T520-BCH"},
501 	{0x5405,  "Chelsio T540-BCH"},
502 	{0x5406,  "Chelsio T540-CH"},
503 	{0x5408,  "Chelsio T520-CX"},
504 	{0x540b,  "Chelsio B520-SR"},
505 	{0x540c,  "Chelsio B504-BT"},
506 	{0x540f,  "Chelsio Amsterdam"},
507 	{0x5413,  "Chelsio T580-CHR"},
508 #endif
509 };
510 
511 #ifdef TCP_OFFLOAD
512 /*
513  * service_iq() has an iq and needs the fl.  Offset of fl from the iq should be
514  * exactly the same for both rxq and ofld_rxq.
515  */
516 CTASSERT(offsetof(struct sge_ofld_rxq, iq) == offsetof(struct sge_rxq, iq));
517 CTASSERT(offsetof(struct sge_ofld_rxq, fl) == offsetof(struct sge_rxq, fl));
518 #endif
519 
520 /* No easy way to include t4_msg.h before adapter.h so we check this way */
521 CTASSERT(nitems(((struct adapter *)0)->cpl_handler) == NUM_CPL_CMDS);
522 CTASSERT(nitems(((struct adapter *)0)->fw_msg_handler) == NUM_FW6_TYPES);
523 
524 CTASSERT(sizeof(struct cluster_metadata) <= CL_METADATA_SIZE);
525 
526 static int
527 t4_probe(device_t dev)
528 {
529 	int i;
530 	uint16_t v = pci_get_vendor(dev);
531 	uint16_t d = pci_get_device(dev);
532 	uint8_t f = pci_get_function(dev);
533 
534 	if (v != PCI_VENDOR_ID_CHELSIO)
535 		return (ENXIO);
536 
537 	/* Attach only to PF0 of the FPGA */
538 	if (d == 0xa000 && f != 0)
539 		return (ENXIO);
540 
541 	for (i = 0; i < nitems(t4_pciids); i++) {
542 		if (d == t4_pciids[i].device) {
543 			device_set_desc(dev, t4_pciids[i].desc);
544 			return (BUS_PROBE_DEFAULT);
545 		}
546 	}
547 
548 	return (ENXIO);
549 }
550 
551 static int
552 t5_probe(device_t dev)
553 {
554 	int i;
555 	uint16_t v = pci_get_vendor(dev);
556 	uint16_t d = pci_get_device(dev);
557 	uint8_t f = pci_get_function(dev);
558 
559 	if (v != PCI_VENDOR_ID_CHELSIO)
560 		return (ENXIO);
561 
562 	/* Attach only to PF0 of the FPGA */
563 	if (d == 0xb000 && f != 0)
564 		return (ENXIO);
565 
566 	for (i = 0; i < nitems(t5_pciids); i++) {
567 		if (d == t5_pciids[i].device) {
568 			device_set_desc(dev, t5_pciids[i].desc);
569 			return (BUS_PROBE_DEFAULT);
570 		}
571 	}
572 
573 	return (ENXIO);
574 }
575 
576 static int
577 t4_attach(device_t dev)
578 {
579 	struct adapter *sc;
580 	int rc = 0, i, n10g, n1g, rqidx, tqidx;
581 	struct intrs_and_queues iaq;
582 	struct sge *s;
583 #ifdef TCP_OFFLOAD
584 	int ofld_rqidx, ofld_tqidx;
585 #endif
586 #ifdef DEV_NETMAP
587 	int nm_rqidx, nm_tqidx;
588 #endif
589 
590 	sc = device_get_softc(dev);
591 	sc->dev = dev;
592 	TUNABLE_INT_FETCH("hw.cxgbe.debug_flags", &sc->debug_flags);
593 
594 	pci_enable_busmaster(dev);
595 	if (pci_find_cap(dev, PCIY_EXPRESS, &i) == 0) {
596 		uint32_t v;
597 
598 		pci_set_max_read_req(dev, 4096);
599 		v = pci_read_config(dev, i + PCIER_DEVICE_CTL, 2);
600 		v |= PCIEM_CTL_RELAXED_ORD_ENABLE;
601 		pci_write_config(dev, i + PCIER_DEVICE_CTL, v, 2);
602 
603 		sc->params.pci.mps = 128 << ((v & PCIEM_CTL_MAX_PAYLOAD) >> 5);
604 	}
605 
606 	sc->traceq = -1;
607 	mtx_init(&sc->ifp_lock, sc->ifp_lockname, 0, MTX_DEF);
608 	snprintf(sc->ifp_lockname, sizeof(sc->ifp_lockname), "%s tracer",
609 	    device_get_nameunit(dev));
610 
611 	snprintf(sc->lockname, sizeof(sc->lockname), "%s",
612 	    device_get_nameunit(dev));
613 	mtx_init(&sc->sc_lock, sc->lockname, 0, MTX_DEF);
614 	sx_xlock(&t4_list_lock);
615 	SLIST_INSERT_HEAD(&t4_list, sc, link);
616 	sx_xunlock(&t4_list_lock);
617 
618 	mtx_init(&sc->sfl_lock, "starving freelists", 0, MTX_DEF);
619 	TAILQ_INIT(&sc->sfl);
620 	callout_init(&sc->sfl_callout, 1);
621 
622 	mtx_init(&sc->regwin_lock, "register and memory window", 0, MTX_DEF);
623 
624 	rc = map_bars_0_and_4(sc);
625 	if (rc != 0)
626 		goto done; /* error message displayed already */
627 
628 	/*
629 	 * This is the real PF# to which we're attaching.  Works from within PCI
630 	 * passthrough environments too, where pci_get_function() could return a
631 	 * different PF# depending on the passthrough configuration.  We need to
632 	 * use the real PF# in all our communication with the firmware.
633 	 */
634 	sc->pf = G_SOURCEPF(t4_read_reg(sc, A_PL_WHOAMI));
635 	sc->mbox = sc->pf;
636 
637 	memset(sc->chan_map, 0xff, sizeof(sc->chan_map));
638 	sc->an_handler = an_not_handled;
639 	for (i = 0; i < nitems(sc->cpl_handler); i++)
640 		sc->cpl_handler[i] = cpl_not_handled;
641 	for (i = 0; i < nitems(sc->fw_msg_handler); i++)
642 		sc->fw_msg_handler[i] = fw_msg_not_handled;
643 	t4_register_cpl_handler(sc, CPL_SET_TCB_RPL, t4_filter_rpl);
644 	t4_register_cpl_handler(sc, CPL_TRACE_PKT, t4_trace_pkt);
645 	t4_register_cpl_handler(sc, CPL_TRACE_PKT_T5, t5_trace_pkt);
646 	t4_init_sge_cpl_handlers(sc);
647 
648 	/* Prepare the adapter for operation */
649 	rc = -t4_prep_adapter(sc);
650 	if (rc != 0) {
651 		device_printf(dev, "failed to prepare adapter: %d.\n", rc);
652 		goto done;
653 	}
654 
655 	/*
656 	 * Do this really early, with the memory windows set up even before the
657 	 * character device.  The userland tool's register i/o and mem read
658 	 * will work even in "recovery mode".
659 	 */
660 	setup_memwin(sc);
661 	sc->cdev = make_dev(is_t4(sc) ? &t4_cdevsw : &t5_cdevsw,
662 	    device_get_unit(dev), UID_ROOT, GID_WHEEL, 0600, "%s",
663 	    device_get_nameunit(dev));
664 	if (sc->cdev == NULL)
665 		device_printf(dev, "failed to create nexus char device.\n");
666 	else
667 		sc->cdev->si_drv1 = sc;
668 
669 	/* Go no further if recovery mode has been requested. */
670 	if (TUNABLE_INT_FETCH("hw.cxgbe.sos", &i) && i != 0) {
671 		device_printf(dev, "recovery mode.\n");
672 		goto done;
673 	}
674 
675 #if defined(__i386__)
676 	if ((cpu_feature & CPUID_CX8) == 0) {
677 		device_printf(dev, "64 bit atomics not available.\n");
678 		rc = ENOTSUP;
679 		goto done;
680 	}
681 #endif
682 
683 	/* Prepare the firmware for operation */
684 	rc = prep_firmware(sc);
685 	if (rc != 0)
686 		goto done; /* error message displayed already */
687 
688 	rc = get_params__post_init(sc);
689 	if (rc != 0)
690 		goto done; /* error message displayed already */
691 
692 	rc = set_params__post_init(sc);
693 	if (rc != 0)
694 		goto done; /* error message displayed already */
695 
696 	rc = map_bar_2(sc);
697 	if (rc != 0)
698 		goto done; /* error message displayed already */
699 
700 	rc = t4_create_dma_tag(sc);
701 	if (rc != 0)
702 		goto done; /* error message displayed already */
703 
704 	/*
705 	 * First pass over all the ports - allocate VIs and initialize some
706 	 * basic parameters like mac address, port type, etc.  We also figure
707 	 * out whether a port is 10G or 1G and use that information when
708 	 * calculating how many interrupts to attempt to allocate.
709 	 */
710 	n10g = n1g = 0;
711 	for_each_port(sc, i) {
712 		struct port_info *pi;
713 
714 		pi = malloc(sizeof(*pi), M_CXGBE, M_ZERO | M_WAITOK);
715 		sc->port[i] = pi;
716 
717 		/* These must be set before t4_port_init */
718 		pi->adapter = sc;
719 		pi->port_id = i;
720 
721 		/* Allocate the vi and initialize parameters like mac addr */
722 		rc = -t4_port_init(pi, sc->mbox, sc->pf, 0);
723 		if (rc != 0) {
724 			device_printf(dev, "unable to initialize port %d: %d\n",
725 			    i, rc);
726 			free(pi, M_CXGBE);
727 			sc->port[i] = NULL;
728 			goto done;
729 		}
730 
731 		pi->link_cfg.requested_fc &= ~(PAUSE_TX | PAUSE_RX);
732 		pi->link_cfg.requested_fc |= t4_pause_settings;
733 		pi->link_cfg.fc &= ~(PAUSE_TX | PAUSE_RX);
734 		pi->link_cfg.fc |= t4_pause_settings;
735 
736 		rc = -t4_link_start(sc, sc->mbox, pi->tx_chan, &pi->link_cfg);
737 		if (rc != 0) {
738 			device_printf(dev, "port %d l1cfg failed: %d\n", i, rc);
739 			free(pi, M_CXGBE);
740 			sc->port[i] = NULL;
741 			goto done;
742 		}
743 
744 		snprintf(pi->lockname, sizeof(pi->lockname), "%sp%d",
745 		    device_get_nameunit(dev), i);
746 		mtx_init(&pi->pi_lock, pi->lockname, 0, MTX_DEF);
747 		sc->chan_map[pi->tx_chan] = i;
748 
749 		if (is_10G_port(pi) || is_40G_port(pi)) {
750 			n10g++;
751 			pi->tmr_idx = t4_tmr_idx_10g;
752 			pi->pktc_idx = t4_pktc_idx_10g;
753 		} else {
754 			n1g++;
755 			pi->tmr_idx = t4_tmr_idx_1g;
756 			pi->pktc_idx = t4_pktc_idx_1g;
757 		}
758 
759 		pi->xact_addr_filt = -1;
760 		pi->linkdnrc = -1;
761 
762 		pi->qsize_rxq = t4_qsize_rxq;
763 		pi->qsize_txq = t4_qsize_txq;
764 
765 		pi->dev = device_add_child(dev, is_t4(sc) ? "cxgbe" : "cxl", -1);
766 		if (pi->dev == NULL) {
767 			device_printf(dev,
768 			    "failed to add device for port %d.\n", i);
769 			rc = ENXIO;
770 			goto done;
771 		}
772 		device_set_softc(pi->dev, pi);
773 	}
774 
775 	/*
776 	 * Interrupt type, # of interrupts, # of rx/tx queues, etc.
777 	 */
778 	rc = cfg_itype_and_nqueues(sc, n10g, n1g, &iaq);
779 	if (rc != 0)
780 		goto done; /* error message displayed already */
781 
782 	sc->intr_type = iaq.intr_type;
783 	sc->intr_count = iaq.nirq;
784 
785 	s = &sc->sge;
786 	s->nrxq = n10g * iaq.nrxq10g + n1g * iaq.nrxq1g;
787 	s->ntxq = n10g * iaq.ntxq10g + n1g * iaq.ntxq1g;
788 	s->neq = s->ntxq + s->nrxq;	/* the free list in an rxq is an eq */
789 	s->neq += sc->params.nports + 1;/* ctrl queues: 1 per port + 1 mgmt */
790 	s->niq = s->nrxq + 1;		/* 1 extra for firmware event queue */
791 #ifdef TCP_OFFLOAD
792 	if (is_offload(sc)) {
793 		s->nofldrxq = n10g * iaq.nofldrxq10g + n1g * iaq.nofldrxq1g;
794 		s->nofldtxq = n10g * iaq.nofldtxq10g + n1g * iaq.nofldtxq1g;
795 		s->neq += s->nofldtxq + s->nofldrxq;
796 		s->niq += s->nofldrxq;
797 
798 		s->ofld_rxq = malloc(s->nofldrxq * sizeof(struct sge_ofld_rxq),
799 		    M_CXGBE, M_ZERO | M_WAITOK);
800 		s->ofld_txq = malloc(s->nofldtxq * sizeof(struct sge_wrq),
801 		    M_CXGBE, M_ZERO | M_WAITOK);
802 	}
803 #endif
804 #ifdef DEV_NETMAP
805 	s->nnmrxq = n10g * iaq.nnmrxq10g + n1g * iaq.nnmrxq1g;
806 	s->nnmtxq = n10g * iaq.nnmtxq10g + n1g * iaq.nnmtxq1g;
807 	s->neq += s->nnmtxq + s->nnmrxq;
808 	s->niq += s->nnmrxq;
809 
810 	s->nm_rxq = malloc(s->nnmrxq * sizeof(struct sge_nm_rxq),
811 	    M_CXGBE, M_ZERO | M_WAITOK);
812 	s->nm_txq = malloc(s->nnmtxq * sizeof(struct sge_nm_txq),
813 	    M_CXGBE, M_ZERO | M_WAITOK);
814 #endif
815 
816 	s->ctrlq = malloc(sc->params.nports * sizeof(struct sge_wrq), M_CXGBE,
817 	    M_ZERO | M_WAITOK);
818 	s->rxq = malloc(s->nrxq * sizeof(struct sge_rxq), M_CXGBE,
819 	    M_ZERO | M_WAITOK);
820 	s->txq = malloc(s->ntxq * sizeof(struct sge_txq), M_CXGBE,
821 	    M_ZERO | M_WAITOK);
822 	s->iqmap = malloc(s->niq * sizeof(struct sge_iq *), M_CXGBE,
823 	    M_ZERO | M_WAITOK);
824 	s->eqmap = malloc(s->neq * sizeof(struct sge_eq *), M_CXGBE,
825 	    M_ZERO | M_WAITOK);
826 
827 	sc->irq = malloc(sc->intr_count * sizeof(struct irq), M_CXGBE,
828 	    M_ZERO | M_WAITOK);
829 
830 	t4_init_l2t(sc, M_WAITOK);
831 
832 	/*
833 	 * Second pass over the ports.  This time we know the number of rx and
834 	 * tx queues that each port should get.
835 	 */
836 	rqidx = tqidx = 0;
837 #ifdef TCP_OFFLOAD
838 	ofld_rqidx = ofld_tqidx = 0;
839 #endif
840 #ifdef DEV_NETMAP
841 	nm_rqidx = nm_tqidx = 0;
842 #endif
843 	for_each_port(sc, i) {
844 		struct port_info *pi = sc->port[i];
845 
846 		if (pi == NULL)
847 			continue;
848 
849 		pi->first_rxq = rqidx;
850 		pi->first_txq = tqidx;
851 		if (is_10G_port(pi) || is_40G_port(pi)) {
852 			pi->flags |= iaq.intr_flags_10g;
853 			pi->nrxq = iaq.nrxq10g;
854 			pi->ntxq = iaq.ntxq10g;
855 		} else {
856 			pi->flags |= iaq.intr_flags_1g;
857 			pi->nrxq = iaq.nrxq1g;
858 			pi->ntxq = iaq.ntxq1g;
859 		}
860 
861 		if (pi->ntxq > 1)
862 			pi->rsrv_noflowq = iaq.rsrv_noflowq ? 1 : 0;
863 		else
864 			pi->rsrv_noflowq = 0;
865 
866 		rqidx += pi->nrxq;
867 		tqidx += pi->ntxq;
868 #ifdef TCP_OFFLOAD
869 		if (is_offload(sc)) {
870 			pi->first_ofld_rxq = ofld_rqidx;
871 			pi->first_ofld_txq = ofld_tqidx;
872 			if (is_10G_port(pi) || is_40G_port(pi)) {
873 				pi->nofldrxq = iaq.nofldrxq10g;
874 				pi->nofldtxq = iaq.nofldtxq10g;
875 			} else {
876 				pi->nofldrxq = iaq.nofldrxq1g;
877 				pi->nofldtxq = iaq.nofldtxq1g;
878 			}
879 			ofld_rqidx += pi->nofldrxq;
880 			ofld_tqidx += pi->nofldtxq;
881 		}
882 #endif
883 #ifdef DEV_NETMAP
884 		pi->first_nm_rxq = nm_rqidx;
885 		pi->first_nm_txq = nm_tqidx;
886 		if (is_10G_port(pi) || is_40G_port(pi)) {
887 			pi->nnmrxq = iaq.nnmrxq10g;
888 			pi->nnmtxq = iaq.nnmtxq10g;
889 		} else {
890 			pi->nnmrxq = iaq.nnmrxq1g;
891 			pi->nnmtxq = iaq.nnmtxq1g;
892 		}
893 		nm_rqidx += pi->nnmrxq;
894 		nm_tqidx += pi->nnmtxq;
895 #endif
896 	}
897 
898 	rc = setup_intr_handlers(sc);
899 	if (rc != 0) {
900 		device_printf(dev,
901 		    "failed to setup interrupt handlers: %d\n", rc);
902 		goto done;
903 	}
904 
905 	rc = bus_generic_attach(dev);
906 	if (rc != 0) {
907 		device_printf(dev,
908 		    "failed to attach all child ports: %d\n", rc);
909 		goto done;
910 	}
911 
912 	device_printf(dev,
913 	    "PCIe gen%d x%d, %d ports, %d %s interrupt%s, %d eq, %d iq\n",
914 	    sc->params.pci.speed, sc->params.pci.width, sc->params.nports,
915 	    sc->intr_count, sc->intr_type == INTR_MSIX ? "MSI-X" :
916 	    (sc->intr_type == INTR_MSI ? "MSI" : "INTx"),
917 	    sc->intr_count > 1 ? "s" : "", sc->sge.neq, sc->sge.niq);
918 
919 	t4_set_desc(sc);
920 
921 done:
922 	if (rc != 0 && sc->cdev) {
923 		/* cdev was created and so cxgbetool works; recover that way. */
924 		device_printf(dev,
925 		    "error during attach, adapter is now in recovery mode.\n");
926 		rc = 0;
927 	}
928 
929 	if (rc != 0)
930 		t4_detach(dev);
931 	else
932 		t4_sysctls(sc);
933 
934 	return (rc);
935 }
936 
937 /*
938  * Idempotent
939  */
940 static int
941 t4_detach(device_t dev)
942 {
943 	struct adapter *sc;
944 	struct port_info *pi;
945 	int i, rc;
946 
947 	sc = device_get_softc(dev);
948 
949 	if (sc->flags & FULL_INIT_DONE)
950 		t4_intr_disable(sc);
951 
952 	if (sc->cdev) {
953 		destroy_dev(sc->cdev);
954 		sc->cdev = NULL;
955 	}
956 
957 	rc = bus_generic_detach(dev);
958 	if (rc) {
959 		device_printf(dev,
960 		    "failed to detach child devices: %d\n", rc);
961 		return (rc);
962 	}
963 
964 	for (i = 0; i < sc->intr_count; i++)
965 		t4_free_irq(sc, &sc->irq[i]);
966 
967 	for (i = 0; i < MAX_NPORTS; i++) {
968 		pi = sc->port[i];
969 		if (pi) {
970 			t4_free_vi(sc, sc->mbox, sc->pf, 0, pi->viid);
971 			if (pi->dev)
972 				device_delete_child(dev, pi->dev);
973 
974 			mtx_destroy(&pi->pi_lock);
975 			free(pi, M_CXGBE);
976 		}
977 	}
978 
979 	if (sc->flags & FULL_INIT_DONE)
980 		adapter_full_uninit(sc);
981 
982 	if (sc->flags & FW_OK)
983 		t4_fw_bye(sc, sc->mbox);
984 
985 	if (sc->intr_type == INTR_MSI || sc->intr_type == INTR_MSIX)
986 		pci_release_msi(dev);
987 
988 	if (sc->regs_res)
989 		bus_release_resource(dev, SYS_RES_MEMORY, sc->regs_rid,
990 		    sc->regs_res);
991 
992 	if (sc->udbs_res)
993 		bus_release_resource(dev, SYS_RES_MEMORY, sc->udbs_rid,
994 		    sc->udbs_res);
995 
996 	if (sc->msix_res)
997 		bus_release_resource(dev, SYS_RES_MEMORY, sc->msix_rid,
998 		    sc->msix_res);
999 
1000 	if (sc->l2t)
1001 		t4_free_l2t(sc->l2t);
1002 
1003 #ifdef TCP_OFFLOAD
1004 	free(sc->sge.ofld_rxq, M_CXGBE);
1005 	free(sc->sge.ofld_txq, M_CXGBE);
1006 #endif
1007 #ifdef DEV_NETMAP
1008 	free(sc->sge.nm_rxq, M_CXGBE);
1009 	free(sc->sge.nm_txq, M_CXGBE);
1010 #endif
1011 	free(sc->irq, M_CXGBE);
1012 	free(sc->sge.rxq, M_CXGBE);
1013 	free(sc->sge.txq, M_CXGBE);
1014 	free(sc->sge.ctrlq, M_CXGBE);
1015 	free(sc->sge.iqmap, M_CXGBE);
1016 	free(sc->sge.eqmap, M_CXGBE);
1017 	free(sc->tids.ftid_tab, M_CXGBE);
1018 	t4_destroy_dma_tag(sc);
1019 	if (mtx_initialized(&sc->sc_lock)) {
1020 		sx_xlock(&t4_list_lock);
1021 		SLIST_REMOVE(&t4_list, sc, adapter, link);
1022 		sx_xunlock(&t4_list_lock);
1023 		mtx_destroy(&sc->sc_lock);
1024 	}
1025 
1026 	if (mtx_initialized(&sc->tids.ftid_lock))
1027 		mtx_destroy(&sc->tids.ftid_lock);
1028 	if (mtx_initialized(&sc->sfl_lock))
1029 		mtx_destroy(&sc->sfl_lock);
1030 	if (mtx_initialized(&sc->ifp_lock))
1031 		mtx_destroy(&sc->ifp_lock);
1032 	if (mtx_initialized(&sc->regwin_lock))
1033 		mtx_destroy(&sc->regwin_lock);
1034 
1035 	bzero(sc, sizeof(*sc));
1036 
1037 	return (0);
1038 }
1039 
1040 static int
1041 cxgbe_probe(device_t dev)
1042 {
1043 	char buf[128];
1044 	struct port_info *pi = device_get_softc(dev);
1045 
1046 	snprintf(buf, sizeof(buf), "port %d", pi->port_id);
1047 	device_set_desc_copy(dev, buf);
1048 
1049 	return (BUS_PROBE_DEFAULT);
1050 }
1051 
1052 #define T4_CAP (IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_MTU | IFCAP_HWCSUM | \
1053     IFCAP_VLAN_HWCSUM | IFCAP_TSO | IFCAP_JUMBO_MTU | IFCAP_LRO | \
1054     IFCAP_VLAN_HWTSO | IFCAP_LINKSTATE | IFCAP_HWCSUM_IPV6 | IFCAP_HWSTATS)
1055 #define T4_CAP_ENABLE (T4_CAP)
1056 
1057 static int
1058 cxgbe_attach(device_t dev)
1059 {
1060 	struct port_info *pi = device_get_softc(dev);
1061 	struct ifnet *ifp;
1062 	char *s;
1063 	int n, o;
1064 
1065 	/* Allocate an ifnet and set it up */
1066 	ifp = if_alloc(IFT_ETHER);
1067 	if (ifp == NULL) {
1068 		device_printf(dev, "Cannot allocate ifnet\n");
1069 		return (ENOMEM);
1070 	}
1071 	pi->ifp = ifp;
1072 	ifp->if_softc = pi;
1073 
1074 	callout_init(&pi->tick, 1);
1075 
1076 	if_initname(ifp, device_get_name(dev), device_get_unit(dev));
1077 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
1078 
1079 	ifp->if_init = cxgbe_init;
1080 	ifp->if_ioctl = cxgbe_ioctl;
1081 	ifp->if_transmit = cxgbe_transmit;
1082 	ifp->if_qflush = cxgbe_qflush;
1083 	ifp->if_get_counter = cxgbe_get_counter;
1084 
1085 	ifp->if_capabilities = T4_CAP;
1086 #ifdef TCP_OFFLOAD
1087 	if (is_offload(pi->adapter))
1088 		ifp->if_capabilities |= IFCAP_TOE;
1089 #endif
1090 	ifp->if_capenable = T4_CAP_ENABLE;
1091 	ifp->if_hwassist = CSUM_TCP | CSUM_UDP | CSUM_IP | CSUM_TSO |
1092 	    CSUM_UDP_IPV6 | CSUM_TCP_IPV6;
1093 
1094 	ifp->if_hw_tsomax = 65536 - (ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN);
1095 	ifp->if_hw_tsomaxsegcount = TX_SGL_SEGS;
1096 	ifp->if_hw_tsomaxsegsize = 65536;
1097 
1098 	/* Initialize ifmedia for this port */
1099 	ifmedia_init(&pi->media, IFM_IMASK, cxgbe_media_change,
1100 	    cxgbe_media_status);
1101 	build_medialist(pi, &pi->media);
1102 
1103 	pi->vlan_c = EVENTHANDLER_REGISTER(vlan_config, cxgbe_vlan_config, ifp,
1104 	    EVENTHANDLER_PRI_ANY);
1105 
1106 	ether_ifattach(ifp, pi->hw_addr);
1107 
1108 	n = 128;
1109 	s = malloc(n, M_CXGBE, M_WAITOK);
1110 	o = snprintf(s, n, "%d txq, %d rxq (NIC)", pi->ntxq, pi->nrxq);
1111 	MPASS(n > o);
1112 #ifdef TCP_OFFLOAD
1113 	if (is_offload(pi->adapter)) {
1114 		o += snprintf(s + o, n - o, "; %d txq, %d rxq (TOE)",
1115 		    pi->nofldtxq, pi->nofldrxq);
1116 		MPASS(n > o);
1117 	}
1118 #endif
1119 #ifdef DEV_NETMAP
1120 	o += snprintf(s + o, n - o, "; %d txq, %d rxq (netmap)", pi->nnmtxq,
1121 	    pi->nnmrxq);
1122 	MPASS(n > o);
1123 #endif
1124 	device_printf(dev, "%s\n", s);
1125 	free(s, M_CXGBE);
1126 
1127 #ifdef DEV_NETMAP
1128 	/* nm_media handled here to keep implementation private to this file */
1129 	ifmedia_init(&pi->nm_media, IFM_IMASK, cxgbe_media_change,
1130 	    cxgbe_media_status);
1131 	build_medialist(pi, &pi->nm_media);
1132 	create_netmap_ifnet(pi);	/* logs errors it something fails */
1133 #endif
1134 	cxgbe_sysctls(pi);
1135 
1136 	return (0);
1137 }
1138 
1139 static int
1140 cxgbe_detach(device_t dev)
1141 {
1142 	struct port_info *pi = device_get_softc(dev);
1143 	struct adapter *sc = pi->adapter;
1144 	struct ifnet *ifp = pi->ifp;
1145 
1146 	/* Tell if_ioctl and if_init that the port is going away */
1147 	ADAPTER_LOCK(sc);
1148 	SET_DOOMED(pi);
1149 	wakeup(&sc->flags);
1150 	while (IS_BUSY(sc))
1151 		mtx_sleep(&sc->flags, &sc->sc_lock, 0, "t4detach", 0);
1152 	SET_BUSY(sc);
1153 #ifdef INVARIANTS
1154 	sc->last_op = "t4detach";
1155 	sc->last_op_thr = curthread;
1156 	sc->last_op_flags = 0;
1157 #endif
1158 	ADAPTER_UNLOCK(sc);
1159 
1160 	if (pi->flags & HAS_TRACEQ) {
1161 		sc->traceq = -1;	/* cloner should not create ifnet */
1162 		t4_tracer_port_detach(sc);
1163 	}
1164 
1165 	if (pi->vlan_c)
1166 		EVENTHANDLER_DEREGISTER(vlan_config, pi->vlan_c);
1167 
1168 	PORT_LOCK(pi);
1169 	ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
1170 	callout_stop(&pi->tick);
1171 	PORT_UNLOCK(pi);
1172 	callout_drain(&pi->tick);
1173 
1174 	/* Let detach proceed even if these fail. */
1175 	cxgbe_uninit_synchronized(pi);
1176 	port_full_uninit(pi);
1177 
1178 	ifmedia_removeall(&pi->media);
1179 	ether_ifdetach(pi->ifp);
1180 	if_free(pi->ifp);
1181 
1182 #ifdef DEV_NETMAP
1183 	/* XXXNM: equivalent of cxgbe_uninit_synchronized to ifdown nm_ifp */
1184 	destroy_netmap_ifnet(pi);
1185 #endif
1186 
1187 	ADAPTER_LOCK(sc);
1188 	CLR_BUSY(sc);
1189 	wakeup(&sc->flags);
1190 	ADAPTER_UNLOCK(sc);
1191 
1192 	return (0);
1193 }
1194 
1195 static void
1196 cxgbe_init(void *arg)
1197 {
1198 	struct port_info *pi = arg;
1199 	struct adapter *sc = pi->adapter;
1200 
1201 	if (begin_synchronized_op(sc, pi, SLEEP_OK | INTR_OK, "t4init") != 0)
1202 		return;
1203 	cxgbe_init_synchronized(pi);
1204 	end_synchronized_op(sc, 0);
1205 }
1206 
1207 static int
1208 cxgbe_ioctl(struct ifnet *ifp, unsigned long cmd, caddr_t data)
1209 {
1210 	int rc = 0, mtu, flags, can_sleep;
1211 	struct port_info *pi = ifp->if_softc;
1212 	struct adapter *sc = pi->adapter;
1213 	struct ifreq *ifr = (struct ifreq *)data;
1214 	uint32_t mask;
1215 
1216 	switch (cmd) {
1217 	case SIOCSIFMTU:
1218 		mtu = ifr->ifr_mtu;
1219 		if ((mtu < ETHERMIN) || (mtu > ETHERMTU_JUMBO))
1220 			return (EINVAL);
1221 
1222 		rc = begin_synchronized_op(sc, pi, SLEEP_OK | INTR_OK, "t4mtu");
1223 		if (rc)
1224 			return (rc);
1225 		ifp->if_mtu = mtu;
1226 		if (pi->flags & PORT_INIT_DONE) {
1227 			t4_update_fl_bufsize(ifp);
1228 			if (ifp->if_drv_flags & IFF_DRV_RUNNING)
1229 				rc = update_mac_settings(ifp, XGMAC_MTU);
1230 		}
1231 		end_synchronized_op(sc, 0);
1232 		break;
1233 
1234 	case SIOCSIFFLAGS:
1235 		can_sleep = 0;
1236 redo_sifflags:
1237 		rc = begin_synchronized_op(sc, pi,
1238 		    can_sleep ? (SLEEP_OK | INTR_OK) : HOLD_LOCK, "t4flg");
1239 		if (rc)
1240 			return (rc);
1241 
1242 		if (ifp->if_flags & IFF_UP) {
1243 			if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1244 				flags = pi->if_flags;
1245 				if ((ifp->if_flags ^ flags) &
1246 				    (IFF_PROMISC | IFF_ALLMULTI)) {
1247 					if (can_sleep == 1) {
1248 						end_synchronized_op(sc, 0);
1249 						can_sleep = 0;
1250 						goto redo_sifflags;
1251 					}
1252 					rc = update_mac_settings(ifp,
1253 					    XGMAC_PROMISC | XGMAC_ALLMULTI);
1254 				}
1255 			} else {
1256 				if (can_sleep == 0) {
1257 					end_synchronized_op(sc, LOCK_HELD);
1258 					can_sleep = 1;
1259 					goto redo_sifflags;
1260 				}
1261 				rc = cxgbe_init_synchronized(pi);
1262 			}
1263 			pi->if_flags = ifp->if_flags;
1264 		} else if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1265 			if (can_sleep == 0) {
1266 				end_synchronized_op(sc, LOCK_HELD);
1267 				can_sleep = 1;
1268 				goto redo_sifflags;
1269 			}
1270 			rc = cxgbe_uninit_synchronized(pi);
1271 		}
1272 		end_synchronized_op(sc, can_sleep ? 0 : LOCK_HELD);
1273 		break;
1274 
1275 	case SIOCADDMULTI:
1276 	case SIOCDELMULTI: /* these two are called with a mutex held :-( */
1277 		rc = begin_synchronized_op(sc, pi, HOLD_LOCK, "t4multi");
1278 		if (rc)
1279 			return (rc);
1280 		if (ifp->if_drv_flags & IFF_DRV_RUNNING)
1281 			rc = update_mac_settings(ifp, XGMAC_MCADDRS);
1282 		end_synchronized_op(sc, LOCK_HELD);
1283 		break;
1284 
1285 	case SIOCSIFCAP:
1286 		rc = begin_synchronized_op(sc, pi, SLEEP_OK | INTR_OK, "t4cap");
1287 		if (rc)
1288 			return (rc);
1289 
1290 		mask = ifr->ifr_reqcap ^ ifp->if_capenable;
1291 		if (mask & IFCAP_TXCSUM) {
1292 			ifp->if_capenable ^= IFCAP_TXCSUM;
1293 			ifp->if_hwassist ^= (CSUM_TCP | CSUM_UDP | CSUM_IP);
1294 
1295 			if (IFCAP_TSO4 & ifp->if_capenable &&
1296 			    !(IFCAP_TXCSUM & ifp->if_capenable)) {
1297 				ifp->if_capenable &= ~IFCAP_TSO4;
1298 				if_printf(ifp,
1299 				    "tso4 disabled due to -txcsum.\n");
1300 			}
1301 		}
1302 		if (mask & IFCAP_TXCSUM_IPV6) {
1303 			ifp->if_capenable ^= IFCAP_TXCSUM_IPV6;
1304 			ifp->if_hwassist ^= (CSUM_UDP_IPV6 | CSUM_TCP_IPV6);
1305 
1306 			if (IFCAP_TSO6 & ifp->if_capenable &&
1307 			    !(IFCAP_TXCSUM_IPV6 & ifp->if_capenable)) {
1308 				ifp->if_capenable &= ~IFCAP_TSO6;
1309 				if_printf(ifp,
1310 				    "tso6 disabled due to -txcsum6.\n");
1311 			}
1312 		}
1313 		if (mask & IFCAP_RXCSUM)
1314 			ifp->if_capenable ^= IFCAP_RXCSUM;
1315 		if (mask & IFCAP_RXCSUM_IPV6)
1316 			ifp->if_capenable ^= IFCAP_RXCSUM_IPV6;
1317 
1318 		/*
1319 		 * Note that we leave CSUM_TSO alone (it is always set).  The
1320 		 * kernel takes both IFCAP_TSOx and CSUM_TSO into account before
1321 		 * sending a TSO request our way, so it's sufficient to toggle
1322 		 * IFCAP_TSOx only.
1323 		 */
1324 		if (mask & IFCAP_TSO4) {
1325 			if (!(IFCAP_TSO4 & ifp->if_capenable) &&
1326 			    !(IFCAP_TXCSUM & ifp->if_capenable)) {
1327 				if_printf(ifp, "enable txcsum first.\n");
1328 				rc = EAGAIN;
1329 				goto fail;
1330 			}
1331 			ifp->if_capenable ^= IFCAP_TSO4;
1332 		}
1333 		if (mask & IFCAP_TSO6) {
1334 			if (!(IFCAP_TSO6 & ifp->if_capenable) &&
1335 			    !(IFCAP_TXCSUM_IPV6 & ifp->if_capenable)) {
1336 				if_printf(ifp, "enable txcsum6 first.\n");
1337 				rc = EAGAIN;
1338 				goto fail;
1339 			}
1340 			ifp->if_capenable ^= IFCAP_TSO6;
1341 		}
1342 		if (mask & IFCAP_LRO) {
1343 #if defined(INET) || defined(INET6)
1344 			int i;
1345 			struct sge_rxq *rxq;
1346 
1347 			ifp->if_capenable ^= IFCAP_LRO;
1348 			for_each_rxq(pi, i, rxq) {
1349 				if (ifp->if_capenable & IFCAP_LRO)
1350 					rxq->iq.flags |= IQ_LRO_ENABLED;
1351 				else
1352 					rxq->iq.flags &= ~IQ_LRO_ENABLED;
1353 			}
1354 #endif
1355 		}
1356 #ifdef TCP_OFFLOAD
1357 		if (mask & IFCAP_TOE) {
1358 			int enable = (ifp->if_capenable ^ mask) & IFCAP_TOE;
1359 
1360 			rc = toe_capability(pi, enable);
1361 			if (rc != 0)
1362 				goto fail;
1363 
1364 			ifp->if_capenable ^= mask;
1365 		}
1366 #endif
1367 		if (mask & IFCAP_VLAN_HWTAGGING) {
1368 			ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING;
1369 			if (ifp->if_drv_flags & IFF_DRV_RUNNING)
1370 				rc = update_mac_settings(ifp, XGMAC_VLANEX);
1371 		}
1372 		if (mask & IFCAP_VLAN_MTU) {
1373 			ifp->if_capenable ^= IFCAP_VLAN_MTU;
1374 
1375 			/* Need to find out how to disable auto-mtu-inflation */
1376 		}
1377 		if (mask & IFCAP_VLAN_HWTSO)
1378 			ifp->if_capenable ^= IFCAP_VLAN_HWTSO;
1379 		if (mask & IFCAP_VLAN_HWCSUM)
1380 			ifp->if_capenable ^= IFCAP_VLAN_HWCSUM;
1381 
1382 #ifdef VLAN_CAPABILITIES
1383 		VLAN_CAPABILITIES(ifp);
1384 #endif
1385 fail:
1386 		end_synchronized_op(sc, 0);
1387 		break;
1388 
1389 	case SIOCSIFMEDIA:
1390 	case SIOCGIFMEDIA:
1391 		ifmedia_ioctl(ifp, ifr, &pi->media, cmd);
1392 		break;
1393 
1394 	case SIOCGI2C: {
1395 		struct ifi2creq i2c;
1396 
1397 		rc = copyin(ifr->ifr_data, &i2c, sizeof(i2c));
1398 		if (rc != 0)
1399 			break;
1400 		if (i2c.dev_addr != 0xA0 && i2c.dev_addr != 0xA2) {
1401 			rc = EPERM;
1402 			break;
1403 		}
1404 		if (i2c.len > sizeof(i2c.data)) {
1405 			rc = EINVAL;
1406 			break;
1407 		}
1408 		rc = begin_synchronized_op(sc, pi, SLEEP_OK | INTR_OK, "t4i2c");
1409 		if (rc)
1410 			return (rc);
1411 		rc = -t4_i2c_rd(sc, sc->mbox, pi->port_id, i2c.dev_addr,
1412 		    i2c.offset, i2c.len, &i2c.data[0]);
1413 		end_synchronized_op(sc, 0);
1414 		if (rc == 0)
1415 			rc = copyout(&i2c, ifr->ifr_data, sizeof(i2c));
1416 		break;
1417 	}
1418 
1419 	default:
1420 		rc = ether_ioctl(ifp, cmd, data);
1421 	}
1422 
1423 	return (rc);
1424 }
1425 
1426 static int
1427 cxgbe_transmit(struct ifnet *ifp, struct mbuf *m)
1428 {
1429 	struct port_info *pi = ifp->if_softc;
1430 	struct adapter *sc = pi->adapter;
1431 	struct sge_txq *txq;
1432 	void *items[1];
1433 	int rc;
1434 
1435 	M_ASSERTPKTHDR(m);
1436 	MPASS(m->m_nextpkt == NULL);	/* not quite ready for this yet */
1437 
1438 	if (__predict_false(pi->link_cfg.link_ok == 0)) {
1439 		m_freem(m);
1440 		return (ENETDOWN);
1441 	}
1442 
1443 	rc = parse_pkt(&m);
1444 	if (__predict_false(rc != 0)) {
1445 		MPASS(m == NULL);			/* was freed already */
1446 		atomic_add_int(&pi->tx_parse_error, 1);	/* rare, atomic is ok */
1447 		return (rc);
1448 	}
1449 
1450 	/* Select a txq. */
1451 	txq = &sc->sge.txq[pi->first_txq];
1452 	if (M_HASHTYPE_GET(m) != M_HASHTYPE_NONE)
1453 		txq += ((m->m_pkthdr.flowid % (pi->ntxq - pi->rsrv_noflowq)) +
1454 		    pi->rsrv_noflowq);
1455 
1456 	items[0] = m;
1457 	rc = mp_ring_enqueue(txq->r, items, 1, 4096);
1458 	if (__predict_false(rc != 0))
1459 		m_freem(m);
1460 
1461 	return (rc);
1462 }
1463 
1464 static void
1465 cxgbe_qflush(struct ifnet *ifp)
1466 {
1467 	struct port_info *pi = ifp->if_softc;
1468 	struct sge_txq *txq;
1469 	int i;
1470 
1471 	/* queues do not exist if !PORT_INIT_DONE. */
1472 	if (pi->flags & PORT_INIT_DONE) {
1473 		for_each_txq(pi, i, txq) {
1474 			TXQ_LOCK(txq);
1475 			txq->eq.flags &= ~EQ_ENABLED;
1476 			TXQ_UNLOCK(txq);
1477 			while (!mp_ring_is_idle(txq->r)) {
1478 				mp_ring_check_drainage(txq->r, 0);
1479 				pause("qflush", 1);
1480 			}
1481 		}
1482 	}
1483 	if_qflush(ifp);
1484 }
1485 
1486 static uint64_t
1487 cxgbe_get_counter(struct ifnet *ifp, ift_counter c)
1488 {
1489 	struct port_info *pi = ifp->if_softc;
1490 	struct adapter *sc = pi->adapter;
1491 	struct port_stats *s = &pi->stats;
1492 
1493 	cxgbe_refresh_stats(sc, pi);
1494 
1495 	switch (c) {
1496 	case IFCOUNTER_IPACKETS:
1497 		return (s->rx_frames - s->rx_pause);
1498 
1499 	case IFCOUNTER_IERRORS:
1500 		return (s->rx_jabber + s->rx_runt + s->rx_too_long +
1501 		    s->rx_fcs_err + s->rx_len_err);
1502 
1503 	case IFCOUNTER_OPACKETS:
1504 		return (s->tx_frames - s->tx_pause);
1505 
1506 	case IFCOUNTER_OERRORS:
1507 		return (s->tx_error_frames);
1508 
1509 	case IFCOUNTER_IBYTES:
1510 		return (s->rx_octets - s->rx_pause * 64);
1511 
1512 	case IFCOUNTER_OBYTES:
1513 		return (s->tx_octets - s->tx_pause * 64);
1514 
1515 	case IFCOUNTER_IMCASTS:
1516 		return (s->rx_mcast_frames - s->rx_pause);
1517 
1518 	case IFCOUNTER_OMCASTS:
1519 		return (s->tx_mcast_frames - s->tx_pause);
1520 
1521 	case IFCOUNTER_IQDROPS:
1522 		return (s->rx_ovflow0 + s->rx_ovflow1 + s->rx_ovflow2 +
1523 		    s->rx_ovflow3 + s->rx_trunc0 + s->rx_trunc1 + s->rx_trunc2 +
1524 		    s->rx_trunc3 + pi->tnl_cong_drops);
1525 
1526 	case IFCOUNTER_OQDROPS: {
1527 		uint64_t drops;
1528 
1529 		drops = s->tx_drop;
1530 		if (pi->flags & PORT_INIT_DONE) {
1531 			int i;
1532 			struct sge_txq *txq;
1533 
1534 			for_each_txq(pi, i, txq)
1535 				drops += counter_u64_fetch(txq->r->drops);
1536 		}
1537 
1538 		return (drops);
1539 
1540 	}
1541 
1542 	default:
1543 		return (if_get_counter_default(ifp, c));
1544 	}
1545 }
1546 
1547 static int
1548 cxgbe_media_change(struct ifnet *ifp)
1549 {
1550 	struct port_info *pi = ifp->if_softc;
1551 
1552 	device_printf(pi->dev, "%s unimplemented.\n", __func__);
1553 
1554 	return (EOPNOTSUPP);
1555 }
1556 
1557 static void
1558 cxgbe_media_status(struct ifnet *ifp, struct ifmediareq *ifmr)
1559 {
1560 	struct port_info *pi = ifp->if_softc;
1561 	struct ifmedia *media = NULL;
1562 	struct ifmedia_entry *cur;
1563 	int speed = pi->link_cfg.speed;
1564 
1565 	if (ifp == pi->ifp)
1566 		media = &pi->media;
1567 #ifdef DEV_NETMAP
1568 	else if (ifp == pi->nm_ifp)
1569 		media = &pi->nm_media;
1570 #endif
1571 	MPASS(media != NULL);
1572 
1573 	cur = media->ifm_cur;
1574 
1575 	ifmr->ifm_status = IFM_AVALID;
1576 	if (!pi->link_cfg.link_ok)
1577 		return;
1578 
1579 	ifmr->ifm_status |= IFM_ACTIVE;
1580 
1581 	/* active and current will differ iff current media is autoselect. */
1582 	if (IFM_SUBTYPE(cur->ifm_media) != IFM_AUTO)
1583 		return;
1584 
1585 	ifmr->ifm_active = IFM_ETHER | IFM_FDX;
1586 	if (speed == SPEED_10000)
1587 		ifmr->ifm_active |= IFM_10G_T;
1588 	else if (speed == SPEED_1000)
1589 		ifmr->ifm_active |= IFM_1000_T;
1590 	else if (speed == SPEED_100)
1591 		ifmr->ifm_active |= IFM_100_TX;
1592 	else if (speed == SPEED_10)
1593 		ifmr->ifm_active |= IFM_10_T;
1594 	else
1595 		KASSERT(0, ("%s: link up but speed unknown (%u)", __func__,
1596 			    speed));
1597 }
1598 
1599 void
1600 t4_fatal_err(struct adapter *sc)
1601 {
1602 	t4_set_reg_field(sc, A_SGE_CONTROL, F_GLOBALENABLE, 0);
1603 	t4_intr_disable(sc);
1604 	log(LOG_EMERG, "%s: encountered fatal error, adapter stopped.\n",
1605 	    device_get_nameunit(sc->dev));
1606 }
1607 
1608 static int
1609 map_bars_0_and_4(struct adapter *sc)
1610 {
1611 	sc->regs_rid = PCIR_BAR(0);
1612 	sc->regs_res = bus_alloc_resource_any(sc->dev, SYS_RES_MEMORY,
1613 	    &sc->regs_rid, RF_ACTIVE);
1614 	if (sc->regs_res == NULL) {
1615 		device_printf(sc->dev, "cannot map registers.\n");
1616 		return (ENXIO);
1617 	}
1618 	sc->bt = rman_get_bustag(sc->regs_res);
1619 	sc->bh = rman_get_bushandle(sc->regs_res);
1620 	sc->mmio_len = rman_get_size(sc->regs_res);
1621 	setbit(&sc->doorbells, DOORBELL_KDB);
1622 
1623 	sc->msix_rid = PCIR_BAR(4);
1624 	sc->msix_res = bus_alloc_resource_any(sc->dev, SYS_RES_MEMORY,
1625 	    &sc->msix_rid, RF_ACTIVE);
1626 	if (sc->msix_res == NULL) {
1627 		device_printf(sc->dev, "cannot map MSI-X BAR.\n");
1628 		return (ENXIO);
1629 	}
1630 
1631 	return (0);
1632 }
1633 
1634 static int
1635 map_bar_2(struct adapter *sc)
1636 {
1637 
1638 	/*
1639 	 * T4: only iWARP driver uses the userspace doorbells.  There is no need
1640 	 * to map it if RDMA is disabled.
1641 	 */
1642 	if (is_t4(sc) && sc->rdmacaps == 0)
1643 		return (0);
1644 
1645 	sc->udbs_rid = PCIR_BAR(2);
1646 	sc->udbs_res = bus_alloc_resource_any(sc->dev, SYS_RES_MEMORY,
1647 	    &sc->udbs_rid, RF_ACTIVE);
1648 	if (sc->udbs_res == NULL) {
1649 		device_printf(sc->dev, "cannot map doorbell BAR.\n");
1650 		return (ENXIO);
1651 	}
1652 	sc->udbs_base = rman_get_virtual(sc->udbs_res);
1653 
1654 	if (is_t5(sc)) {
1655 		setbit(&sc->doorbells, DOORBELL_UDB);
1656 #if defined(__i386__) || defined(__amd64__)
1657 		if (t5_write_combine) {
1658 			int rc;
1659 
1660 			/*
1661 			 * Enable write combining on BAR2.  This is the
1662 			 * userspace doorbell BAR and is split into 128B
1663 			 * (UDBS_SEG_SIZE) doorbell regions, each associated
1664 			 * with an egress queue.  The first 64B has the doorbell
1665 			 * and the second 64B can be used to submit a tx work
1666 			 * request with an implicit doorbell.
1667 			 */
1668 
1669 			rc = pmap_change_attr((vm_offset_t)sc->udbs_base,
1670 			    rman_get_size(sc->udbs_res), PAT_WRITE_COMBINING);
1671 			if (rc == 0) {
1672 				clrbit(&sc->doorbells, DOORBELL_UDB);
1673 				setbit(&sc->doorbells, DOORBELL_WCWR);
1674 				setbit(&sc->doorbells, DOORBELL_UDBWC);
1675 			} else {
1676 				device_printf(sc->dev,
1677 				    "couldn't enable write combining: %d\n",
1678 				    rc);
1679 			}
1680 
1681 			t4_write_reg(sc, A_SGE_STAT_CFG,
1682 			    V_STATSOURCE_T5(7) | V_STATMODE(0));
1683 		}
1684 #endif
1685 	}
1686 
1687 	return (0);
1688 }
1689 
1690 static const struct memwin t4_memwin[] = {
1691 	{ MEMWIN0_BASE, MEMWIN0_APERTURE },
1692 	{ MEMWIN1_BASE, MEMWIN1_APERTURE },
1693 	{ MEMWIN2_BASE_T4, MEMWIN2_APERTURE_T4 }
1694 };
1695 
1696 static const struct memwin t5_memwin[] = {
1697 	{ MEMWIN0_BASE, MEMWIN0_APERTURE },
1698 	{ MEMWIN1_BASE, MEMWIN1_APERTURE },
1699 	{ MEMWIN2_BASE_T5, MEMWIN2_APERTURE_T5 },
1700 };
1701 
1702 static void
1703 setup_memwin(struct adapter *sc)
1704 {
1705 	const struct memwin *mw;
1706 	int i, n;
1707 	uint32_t bar0;
1708 
1709 	if (is_t4(sc)) {
1710 		/*
1711 		 * Read low 32b of bar0 indirectly via the hardware backdoor
1712 		 * mechanism.  Works from within PCI passthrough environments
1713 		 * too, where rman_get_start() can return a different value.  We
1714 		 * need to program the T4 memory window decoders with the actual
1715 		 * addresses that will be coming across the PCIe link.
1716 		 */
1717 		bar0 = t4_hw_pci_read_cfg4(sc, PCIR_BAR(0));
1718 		bar0 &= (uint32_t) PCIM_BAR_MEM_BASE;
1719 
1720 		mw = &t4_memwin[0];
1721 		n = nitems(t4_memwin);
1722 	} else {
1723 		/* T5 uses the relative offset inside the PCIe BAR */
1724 		bar0 = 0;
1725 
1726 		mw = &t5_memwin[0];
1727 		n = nitems(t5_memwin);
1728 	}
1729 
1730 	for (i = 0; i < n; i++, mw++) {
1731 		t4_write_reg(sc,
1732 		    PCIE_MEM_ACCESS_REG(A_PCIE_MEM_ACCESS_BASE_WIN, i),
1733 		    (mw->base + bar0) | V_BIR(0) |
1734 		    V_WINDOW(ilog2(mw->aperture) - 10));
1735 	}
1736 
1737 	/* flush */
1738 	t4_read_reg(sc, PCIE_MEM_ACCESS_REG(A_PCIE_MEM_ACCESS_BASE_WIN, 2));
1739 }
1740 
1741 /*
1742  * Verify that the memory range specified by the addr/len pair is valid and lies
1743  * entirely within a single region (EDCx or MCx).
1744  */
1745 static int
1746 validate_mem_range(struct adapter *sc, uint32_t addr, int len)
1747 {
1748 	uint32_t em, addr_len, maddr, mlen;
1749 
1750 	/* Memory can only be accessed in naturally aligned 4 byte units */
1751 	if (addr & 3 || len & 3 || len == 0)
1752 		return (EINVAL);
1753 
1754 	/* Enabled memories */
1755 	em = t4_read_reg(sc, A_MA_TARGET_MEM_ENABLE);
1756 	if (em & F_EDRAM0_ENABLE) {
1757 		addr_len = t4_read_reg(sc, A_MA_EDRAM0_BAR);
1758 		maddr = G_EDRAM0_BASE(addr_len) << 20;
1759 		mlen = G_EDRAM0_SIZE(addr_len) << 20;
1760 		if (mlen > 0 && addr >= maddr && addr < maddr + mlen &&
1761 		    addr + len <= maddr + mlen)
1762 			return (0);
1763 	}
1764 	if (em & F_EDRAM1_ENABLE) {
1765 		addr_len = t4_read_reg(sc, A_MA_EDRAM1_BAR);
1766 		maddr = G_EDRAM1_BASE(addr_len) << 20;
1767 		mlen = G_EDRAM1_SIZE(addr_len) << 20;
1768 		if (mlen > 0 && addr >= maddr && addr < maddr + mlen &&
1769 		    addr + len <= maddr + mlen)
1770 			return (0);
1771 	}
1772 	if (em & F_EXT_MEM_ENABLE) {
1773 		addr_len = t4_read_reg(sc, A_MA_EXT_MEMORY_BAR);
1774 		maddr = G_EXT_MEM_BASE(addr_len) << 20;
1775 		mlen = G_EXT_MEM_SIZE(addr_len) << 20;
1776 		if (mlen > 0 && addr >= maddr && addr < maddr + mlen &&
1777 		    addr + len <= maddr + mlen)
1778 			return (0);
1779 	}
1780 	if (!is_t4(sc) && em & F_EXT_MEM1_ENABLE) {
1781 		addr_len = t4_read_reg(sc, A_MA_EXT_MEMORY1_BAR);
1782 		maddr = G_EXT_MEM1_BASE(addr_len) << 20;
1783 		mlen = G_EXT_MEM1_SIZE(addr_len) << 20;
1784 		if (mlen > 0 && addr >= maddr && addr < maddr + mlen &&
1785 		    addr + len <= maddr + mlen)
1786 			return (0);
1787 	}
1788 
1789 	return (EFAULT);
1790 }
1791 
1792 static int
1793 fwmtype_to_hwmtype(int mtype)
1794 {
1795 
1796 	switch (mtype) {
1797 	case FW_MEMTYPE_EDC0:
1798 		return (MEM_EDC0);
1799 	case FW_MEMTYPE_EDC1:
1800 		return (MEM_EDC1);
1801 	case FW_MEMTYPE_EXTMEM:
1802 		return (MEM_MC0);
1803 	case FW_MEMTYPE_EXTMEM1:
1804 		return (MEM_MC1);
1805 	default:
1806 		panic("%s: cannot translate fw mtype %d.", __func__, mtype);
1807 	}
1808 }
1809 
1810 /*
1811  * Verify that the memory range specified by the memtype/offset/len pair is
1812  * valid and lies entirely within the memtype specified.  The global address of
1813  * the start of the range is returned in addr.
1814  */
1815 static int
1816 validate_mt_off_len(struct adapter *sc, int mtype, uint32_t off, int len,
1817     uint32_t *addr)
1818 {
1819 	uint32_t em, addr_len, maddr, mlen;
1820 
1821 	/* Memory can only be accessed in naturally aligned 4 byte units */
1822 	if (off & 3 || len & 3 || len == 0)
1823 		return (EINVAL);
1824 
1825 	em = t4_read_reg(sc, A_MA_TARGET_MEM_ENABLE);
1826 	switch (fwmtype_to_hwmtype(mtype)) {
1827 	case MEM_EDC0:
1828 		if (!(em & F_EDRAM0_ENABLE))
1829 			return (EINVAL);
1830 		addr_len = t4_read_reg(sc, A_MA_EDRAM0_BAR);
1831 		maddr = G_EDRAM0_BASE(addr_len) << 20;
1832 		mlen = G_EDRAM0_SIZE(addr_len) << 20;
1833 		break;
1834 	case MEM_EDC1:
1835 		if (!(em & F_EDRAM1_ENABLE))
1836 			return (EINVAL);
1837 		addr_len = t4_read_reg(sc, A_MA_EDRAM1_BAR);
1838 		maddr = G_EDRAM1_BASE(addr_len) << 20;
1839 		mlen = G_EDRAM1_SIZE(addr_len) << 20;
1840 		break;
1841 	case MEM_MC:
1842 		if (!(em & F_EXT_MEM_ENABLE))
1843 			return (EINVAL);
1844 		addr_len = t4_read_reg(sc, A_MA_EXT_MEMORY_BAR);
1845 		maddr = G_EXT_MEM_BASE(addr_len) << 20;
1846 		mlen = G_EXT_MEM_SIZE(addr_len) << 20;
1847 		break;
1848 	case MEM_MC1:
1849 		if (is_t4(sc) || !(em & F_EXT_MEM1_ENABLE))
1850 			return (EINVAL);
1851 		addr_len = t4_read_reg(sc, A_MA_EXT_MEMORY1_BAR);
1852 		maddr = G_EXT_MEM1_BASE(addr_len) << 20;
1853 		mlen = G_EXT_MEM1_SIZE(addr_len) << 20;
1854 		break;
1855 	default:
1856 		return (EINVAL);
1857 	}
1858 
1859 	if (mlen > 0 && off < mlen && off + len <= mlen) {
1860 		*addr = maddr + off;	/* global address */
1861 		return (0);
1862 	}
1863 
1864 	return (EFAULT);
1865 }
1866 
1867 static void
1868 memwin_info(struct adapter *sc, int win, uint32_t *base, uint32_t *aperture)
1869 {
1870 	const struct memwin *mw;
1871 
1872 	if (is_t4(sc)) {
1873 		KASSERT(win >= 0 && win < nitems(t4_memwin),
1874 		    ("%s: incorrect memwin# (%d)", __func__, win));
1875 		mw = &t4_memwin[win];
1876 	} else {
1877 		KASSERT(win >= 0 && win < nitems(t5_memwin),
1878 		    ("%s: incorrect memwin# (%d)", __func__, win));
1879 		mw = &t5_memwin[win];
1880 	}
1881 
1882 	if (base != NULL)
1883 		*base = mw->base;
1884 	if (aperture != NULL)
1885 		*aperture = mw->aperture;
1886 }
1887 
1888 /*
1889  * Positions the memory window such that it can be used to access the specified
1890  * address in the chip's address space.  The return value is the offset of addr
1891  * from the start of the window.
1892  */
1893 static uint32_t
1894 position_memwin(struct adapter *sc, int n, uint32_t addr)
1895 {
1896 	uint32_t start, pf;
1897 	uint32_t reg;
1898 
1899 	KASSERT(n >= 0 && n <= 3,
1900 	    ("%s: invalid window %d.", __func__, n));
1901 	KASSERT((addr & 3) == 0,
1902 	    ("%s: addr (0x%x) is not at a 4B boundary.", __func__, addr));
1903 
1904 	if (is_t4(sc)) {
1905 		pf = 0;
1906 		start = addr & ~0xf;	/* start must be 16B aligned */
1907 	} else {
1908 		pf = V_PFNUM(sc->pf);
1909 		start = addr & ~0x7f;	/* start must be 128B aligned */
1910 	}
1911 	reg = PCIE_MEM_ACCESS_REG(A_PCIE_MEM_ACCESS_OFFSET, n);
1912 
1913 	t4_write_reg(sc, reg, start | pf);
1914 	t4_read_reg(sc, reg);
1915 
1916 	return (addr - start);
1917 }
1918 
1919 static int
1920 cfg_itype_and_nqueues(struct adapter *sc, int n10g, int n1g,
1921     struct intrs_and_queues *iaq)
1922 {
1923 	int rc, itype, navail, nrxq10g, nrxq1g, n;
1924 	int nofldrxq10g = 0, nofldrxq1g = 0;
1925 	int nnmrxq10g = 0, nnmrxq1g = 0;
1926 
1927 	bzero(iaq, sizeof(*iaq));
1928 
1929 	iaq->ntxq10g = t4_ntxq10g;
1930 	iaq->ntxq1g = t4_ntxq1g;
1931 	iaq->nrxq10g = nrxq10g = t4_nrxq10g;
1932 	iaq->nrxq1g = nrxq1g = t4_nrxq1g;
1933 	iaq->rsrv_noflowq = t4_rsrv_noflowq;
1934 #ifdef TCP_OFFLOAD
1935 	if (is_offload(sc)) {
1936 		iaq->nofldtxq10g = t4_nofldtxq10g;
1937 		iaq->nofldtxq1g = t4_nofldtxq1g;
1938 		iaq->nofldrxq10g = nofldrxq10g = t4_nofldrxq10g;
1939 		iaq->nofldrxq1g = nofldrxq1g = t4_nofldrxq1g;
1940 	}
1941 #endif
1942 #ifdef DEV_NETMAP
1943 	iaq->nnmtxq10g = t4_nnmtxq10g;
1944 	iaq->nnmtxq1g = t4_nnmtxq1g;
1945 	iaq->nnmrxq10g = nnmrxq10g = t4_nnmrxq10g;
1946 	iaq->nnmrxq1g = nnmrxq1g = t4_nnmrxq1g;
1947 #endif
1948 
1949 	for (itype = INTR_MSIX; itype; itype >>= 1) {
1950 
1951 		if ((itype & t4_intr_types) == 0)
1952 			continue;	/* not allowed */
1953 
1954 		if (itype == INTR_MSIX)
1955 			navail = pci_msix_count(sc->dev);
1956 		else if (itype == INTR_MSI)
1957 			navail = pci_msi_count(sc->dev);
1958 		else
1959 			navail = 1;
1960 restart:
1961 		if (navail == 0)
1962 			continue;
1963 
1964 		iaq->intr_type = itype;
1965 		iaq->intr_flags_10g = 0;
1966 		iaq->intr_flags_1g = 0;
1967 
1968 		/*
1969 		 * Best option: an interrupt vector for errors, one for the
1970 		 * firmware event queue, and one for every rxq (NIC, TOE, and
1971 		 * netmap).
1972 		 */
1973 		iaq->nirq = T4_EXTRA_INTR;
1974 		iaq->nirq += n10g * (nrxq10g + nofldrxq10g + nnmrxq10g);
1975 		iaq->nirq += n1g * (nrxq1g + nofldrxq1g + nnmrxq1g);
1976 		if (iaq->nirq <= navail &&
1977 		    (itype != INTR_MSI || powerof2(iaq->nirq))) {
1978 			iaq->intr_flags_10g = INTR_ALL;
1979 			iaq->intr_flags_1g = INTR_ALL;
1980 			goto allocate;
1981 		}
1982 
1983 		/*
1984 		 * Second best option: a vector for errors, one for the firmware
1985 		 * event queue, and vectors for either all the NIC rx queues or
1986 		 * all the TOE rx queues.  The queues that don't get vectors
1987 		 * will forward their interrupts to those that do.
1988 		 *
1989 		 * Note: netmap rx queues cannot be created early and so they
1990 		 * can't be setup to receive forwarded interrupts for others.
1991 		 */
1992 		iaq->nirq = T4_EXTRA_INTR;
1993 		if (nrxq10g >= nofldrxq10g) {
1994 			iaq->intr_flags_10g = INTR_RXQ;
1995 			iaq->nirq += n10g * nrxq10g;
1996 #ifdef DEV_NETMAP
1997 			iaq->nnmrxq10g = min(nnmrxq10g, nrxq10g);
1998 #endif
1999 		} else {
2000 			iaq->intr_flags_10g = INTR_OFLD_RXQ;
2001 			iaq->nirq += n10g * nofldrxq10g;
2002 #ifdef DEV_NETMAP
2003 			iaq->nnmrxq10g = min(nnmrxq10g, nofldrxq10g);
2004 #endif
2005 		}
2006 		if (nrxq1g >= nofldrxq1g) {
2007 			iaq->intr_flags_1g = INTR_RXQ;
2008 			iaq->nirq += n1g * nrxq1g;
2009 #ifdef DEV_NETMAP
2010 			iaq->nnmrxq1g = min(nnmrxq1g, nrxq1g);
2011 #endif
2012 		} else {
2013 			iaq->intr_flags_1g = INTR_OFLD_RXQ;
2014 			iaq->nirq += n1g * nofldrxq1g;
2015 #ifdef DEV_NETMAP
2016 			iaq->nnmrxq1g = min(nnmrxq1g, nofldrxq1g);
2017 #endif
2018 		}
2019 		if (iaq->nirq <= navail &&
2020 		    (itype != INTR_MSI || powerof2(iaq->nirq)))
2021 			goto allocate;
2022 
2023 		/*
2024 		 * Next best option: an interrupt vector for errors, one for the
2025 		 * firmware event queue, and at least one per port.  At this
2026 		 * point we know we'll have to downsize nrxq and/or nofldrxq
2027 		 * and/or nnmrxq to fit what's available to us.
2028 		 */
2029 		iaq->nirq = T4_EXTRA_INTR;
2030 		iaq->nirq += n10g + n1g;
2031 		if (iaq->nirq <= navail) {
2032 			int leftover = navail - iaq->nirq;
2033 
2034 			if (n10g > 0) {
2035 				int target = max(nrxq10g, nofldrxq10g);
2036 
2037 				iaq->intr_flags_10g = nrxq10g >= nofldrxq10g ?
2038 				    INTR_RXQ : INTR_OFLD_RXQ;
2039 
2040 				n = 1;
2041 				while (n < target && leftover >= n10g) {
2042 					leftover -= n10g;
2043 					iaq->nirq += n10g;
2044 					n++;
2045 				}
2046 				iaq->nrxq10g = min(n, nrxq10g);
2047 #ifdef TCP_OFFLOAD
2048 				iaq->nofldrxq10g = min(n, nofldrxq10g);
2049 #endif
2050 #ifdef DEV_NETMAP
2051 				iaq->nnmrxq10g = min(n, nnmrxq10g);
2052 #endif
2053 			}
2054 
2055 			if (n1g > 0) {
2056 				int target = max(nrxq1g, nofldrxq1g);
2057 
2058 				iaq->intr_flags_1g = nrxq1g >= nofldrxq1g ?
2059 				    INTR_RXQ : INTR_OFLD_RXQ;
2060 
2061 				n = 1;
2062 				while (n < target && leftover >= n1g) {
2063 					leftover -= n1g;
2064 					iaq->nirq += n1g;
2065 					n++;
2066 				}
2067 				iaq->nrxq1g = min(n, nrxq1g);
2068 #ifdef TCP_OFFLOAD
2069 				iaq->nofldrxq1g = min(n, nofldrxq1g);
2070 #endif
2071 #ifdef DEV_NETMAP
2072 				iaq->nnmrxq1g = min(n, nnmrxq1g);
2073 #endif
2074 			}
2075 
2076 			if (itype != INTR_MSI || powerof2(iaq->nirq))
2077 				goto allocate;
2078 		}
2079 
2080 		/*
2081 		 * Least desirable option: one interrupt vector for everything.
2082 		 */
2083 		iaq->nirq = iaq->nrxq10g = iaq->nrxq1g = 1;
2084 		iaq->intr_flags_10g = iaq->intr_flags_1g = 0;
2085 #ifdef TCP_OFFLOAD
2086 		if (is_offload(sc))
2087 			iaq->nofldrxq10g = iaq->nofldrxq1g = 1;
2088 #endif
2089 #ifdef DEV_NETMAP
2090 		iaq->nnmrxq10g = iaq->nnmrxq1g = 1;
2091 #endif
2092 
2093 allocate:
2094 		navail = iaq->nirq;
2095 		rc = 0;
2096 		if (itype == INTR_MSIX)
2097 			rc = pci_alloc_msix(sc->dev, &navail);
2098 		else if (itype == INTR_MSI)
2099 			rc = pci_alloc_msi(sc->dev, &navail);
2100 
2101 		if (rc == 0) {
2102 			if (navail == iaq->nirq)
2103 				return (0);
2104 
2105 			/*
2106 			 * Didn't get the number requested.  Use whatever number
2107 			 * the kernel is willing to allocate (it's in navail).
2108 			 */
2109 			device_printf(sc->dev, "fewer vectors than requested, "
2110 			    "type=%d, req=%d, rcvd=%d; will downshift req.\n",
2111 			    itype, iaq->nirq, navail);
2112 			pci_release_msi(sc->dev);
2113 			goto restart;
2114 		}
2115 
2116 		device_printf(sc->dev,
2117 		    "failed to allocate vectors:%d, type=%d, req=%d, rcvd=%d\n",
2118 		    itype, rc, iaq->nirq, navail);
2119 	}
2120 
2121 	device_printf(sc->dev,
2122 	    "failed to find a usable interrupt type.  "
2123 	    "allowed=%d, msi-x=%d, msi=%d, intx=1", t4_intr_types,
2124 	    pci_msix_count(sc->dev), pci_msi_count(sc->dev));
2125 
2126 	return (ENXIO);
2127 }
2128 
2129 #define FW_VERSION(chip) ( \
2130     V_FW_HDR_FW_VER_MAJOR(chip##FW_VERSION_MAJOR) | \
2131     V_FW_HDR_FW_VER_MINOR(chip##FW_VERSION_MINOR) | \
2132     V_FW_HDR_FW_VER_MICRO(chip##FW_VERSION_MICRO) | \
2133     V_FW_HDR_FW_VER_BUILD(chip##FW_VERSION_BUILD))
2134 #define FW_INTFVER(chip, intf) (chip##FW_HDR_INTFVER_##intf)
2135 
2136 struct fw_info {
2137 	uint8_t chip;
2138 	char *kld_name;
2139 	char *fw_mod_name;
2140 	struct fw_hdr fw_hdr;	/* XXX: waste of space, need a sparse struct */
2141 } fw_info[] = {
2142 	{
2143 		.chip = CHELSIO_T4,
2144 		.kld_name = "t4fw_cfg",
2145 		.fw_mod_name = "t4fw",
2146 		.fw_hdr = {
2147 			.chip = FW_HDR_CHIP_T4,
2148 			.fw_ver = htobe32_const(FW_VERSION(T4)),
2149 			.intfver_nic = FW_INTFVER(T4, NIC),
2150 			.intfver_vnic = FW_INTFVER(T4, VNIC),
2151 			.intfver_ofld = FW_INTFVER(T4, OFLD),
2152 			.intfver_ri = FW_INTFVER(T4, RI),
2153 			.intfver_iscsipdu = FW_INTFVER(T4, ISCSIPDU),
2154 			.intfver_iscsi = FW_INTFVER(T4, ISCSI),
2155 			.intfver_fcoepdu = FW_INTFVER(T4, FCOEPDU),
2156 			.intfver_fcoe = FW_INTFVER(T4, FCOE),
2157 		},
2158 	}, {
2159 		.chip = CHELSIO_T5,
2160 		.kld_name = "t5fw_cfg",
2161 		.fw_mod_name = "t5fw",
2162 		.fw_hdr = {
2163 			.chip = FW_HDR_CHIP_T5,
2164 			.fw_ver = htobe32_const(FW_VERSION(T5)),
2165 			.intfver_nic = FW_INTFVER(T5, NIC),
2166 			.intfver_vnic = FW_INTFVER(T5, VNIC),
2167 			.intfver_ofld = FW_INTFVER(T5, OFLD),
2168 			.intfver_ri = FW_INTFVER(T5, RI),
2169 			.intfver_iscsipdu = FW_INTFVER(T5, ISCSIPDU),
2170 			.intfver_iscsi = FW_INTFVER(T5, ISCSI),
2171 			.intfver_fcoepdu = FW_INTFVER(T5, FCOEPDU),
2172 			.intfver_fcoe = FW_INTFVER(T5, FCOE),
2173 		},
2174 	}
2175 };
2176 
2177 static struct fw_info *
2178 find_fw_info(int chip)
2179 {
2180 	int i;
2181 
2182 	for (i = 0; i < nitems(fw_info); i++) {
2183 		if (fw_info[i].chip == chip)
2184 			return (&fw_info[i]);
2185 	}
2186 	return (NULL);
2187 }
2188 
2189 /*
2190  * Is the given firmware API compatible with the one the driver was compiled
2191  * with?
2192  */
2193 static int
2194 fw_compatible(const struct fw_hdr *hdr1, const struct fw_hdr *hdr2)
2195 {
2196 
2197 	/* short circuit if it's the exact same firmware version */
2198 	if (hdr1->chip == hdr2->chip && hdr1->fw_ver == hdr2->fw_ver)
2199 		return (1);
2200 
2201 	/*
2202 	 * XXX: Is this too conservative?  Perhaps I should limit this to the
2203 	 * features that are supported in the driver.
2204 	 */
2205 #define SAME_INTF(x) (hdr1->intfver_##x == hdr2->intfver_##x)
2206 	if (hdr1->chip == hdr2->chip && SAME_INTF(nic) && SAME_INTF(vnic) &&
2207 	    SAME_INTF(ofld) && SAME_INTF(ri) && SAME_INTF(iscsipdu) &&
2208 	    SAME_INTF(iscsi) && SAME_INTF(fcoepdu) && SAME_INTF(fcoe))
2209 		return (1);
2210 #undef SAME_INTF
2211 
2212 	return (0);
2213 }
2214 
2215 /*
2216  * The firmware in the KLD is usable, but should it be installed?  This routine
2217  * explains itself in detail if it indicates the KLD firmware should be
2218  * installed.
2219  */
2220 static int
2221 should_install_kld_fw(struct adapter *sc, int card_fw_usable, int k, int c)
2222 {
2223 	const char *reason;
2224 
2225 	if (!card_fw_usable) {
2226 		reason = "incompatible or unusable";
2227 		goto install;
2228 	}
2229 
2230 	if (k > c) {
2231 		reason = "older than the version bundled with this driver";
2232 		goto install;
2233 	}
2234 
2235 	if (t4_fw_install == 2 && k != c) {
2236 		reason = "different than the version bundled with this driver";
2237 		goto install;
2238 	}
2239 
2240 	return (0);
2241 
2242 install:
2243 	if (t4_fw_install == 0) {
2244 		device_printf(sc->dev, "firmware on card (%u.%u.%u.%u) is %s, "
2245 		    "but the driver is prohibited from installing a different "
2246 		    "firmware on the card.\n",
2247 		    G_FW_HDR_FW_VER_MAJOR(c), G_FW_HDR_FW_VER_MINOR(c),
2248 		    G_FW_HDR_FW_VER_MICRO(c), G_FW_HDR_FW_VER_BUILD(c), reason);
2249 
2250 		return (0);
2251 	}
2252 
2253 	device_printf(sc->dev, "firmware on card (%u.%u.%u.%u) is %s, "
2254 	    "installing firmware %u.%u.%u.%u on card.\n",
2255 	    G_FW_HDR_FW_VER_MAJOR(c), G_FW_HDR_FW_VER_MINOR(c),
2256 	    G_FW_HDR_FW_VER_MICRO(c), G_FW_HDR_FW_VER_BUILD(c), reason,
2257 	    G_FW_HDR_FW_VER_MAJOR(k), G_FW_HDR_FW_VER_MINOR(k),
2258 	    G_FW_HDR_FW_VER_MICRO(k), G_FW_HDR_FW_VER_BUILD(k));
2259 
2260 	return (1);
2261 }
2262 /*
2263  * Establish contact with the firmware and determine if we are the master driver
2264  * or not, and whether we are responsible for chip initialization.
2265  */
2266 static int
2267 prep_firmware(struct adapter *sc)
2268 {
2269 	const struct firmware *fw = NULL, *default_cfg;
2270 	int rc, pf, card_fw_usable, kld_fw_usable, need_fw_reset = 1;
2271 	enum dev_state state;
2272 	struct fw_info *fw_info;
2273 	struct fw_hdr *card_fw;		/* fw on the card */
2274 	const struct fw_hdr *kld_fw;	/* fw in the KLD */
2275 	const struct fw_hdr *drv_fw;	/* fw header the driver was compiled
2276 					   against */
2277 
2278 	/* Contact firmware. */
2279 	rc = t4_fw_hello(sc, sc->mbox, sc->mbox, MASTER_MAY, &state);
2280 	if (rc < 0 || state == DEV_STATE_ERR) {
2281 		rc = -rc;
2282 		device_printf(sc->dev,
2283 		    "failed to connect to the firmware: %d, %d.\n", rc, state);
2284 		return (rc);
2285 	}
2286 	pf = rc;
2287 	if (pf == sc->mbox)
2288 		sc->flags |= MASTER_PF;
2289 	else if (state == DEV_STATE_UNINIT) {
2290 		/*
2291 		 * We didn't get to be the master so we definitely won't be
2292 		 * configuring the chip.  It's a bug if someone else hasn't
2293 		 * configured it already.
2294 		 */
2295 		device_printf(sc->dev, "couldn't be master(%d), "
2296 		    "device not already initialized either(%d).\n", rc, state);
2297 		return (EDOOFUS);
2298 	}
2299 
2300 	/* This is the firmware whose headers the driver was compiled against */
2301 	fw_info = find_fw_info(chip_id(sc));
2302 	if (fw_info == NULL) {
2303 		device_printf(sc->dev,
2304 		    "unable to look up firmware information for chip %d.\n",
2305 		    chip_id(sc));
2306 		return (EINVAL);
2307 	}
2308 	drv_fw = &fw_info->fw_hdr;
2309 
2310 	/*
2311 	 * The firmware KLD contains many modules.  The KLD name is also the
2312 	 * name of the module that contains the default config file.
2313 	 */
2314 	default_cfg = firmware_get(fw_info->kld_name);
2315 
2316 	/* Read the header of the firmware on the card */
2317 	card_fw = malloc(sizeof(*card_fw), M_CXGBE, M_ZERO | M_WAITOK);
2318 	rc = -t4_read_flash(sc, FLASH_FW_START,
2319 	    sizeof (*card_fw) / sizeof (uint32_t), (uint32_t *)card_fw, 1);
2320 	if (rc == 0)
2321 		card_fw_usable = fw_compatible(drv_fw, (const void*)card_fw);
2322 	else {
2323 		device_printf(sc->dev,
2324 		    "Unable to read card's firmware header: %d\n", rc);
2325 		card_fw_usable = 0;
2326 	}
2327 
2328 	/* This is the firmware in the KLD */
2329 	fw = firmware_get(fw_info->fw_mod_name);
2330 	if (fw != NULL) {
2331 		kld_fw = (const void *)fw->data;
2332 		kld_fw_usable = fw_compatible(drv_fw, kld_fw);
2333 	} else {
2334 		kld_fw = NULL;
2335 		kld_fw_usable = 0;
2336 	}
2337 
2338 	if (card_fw_usable && card_fw->fw_ver == drv_fw->fw_ver &&
2339 	    (!kld_fw_usable || kld_fw->fw_ver == drv_fw->fw_ver)) {
2340 		/*
2341 		 * Common case: the firmware on the card is an exact match and
2342 		 * the KLD is an exact match too, or the KLD is
2343 		 * absent/incompatible.  Note that t4_fw_install = 2 is ignored
2344 		 * here -- use cxgbetool loadfw if you want to reinstall the
2345 		 * same firmware as the one on the card.
2346 		 */
2347 	} else if (kld_fw_usable && state == DEV_STATE_UNINIT &&
2348 	    should_install_kld_fw(sc, card_fw_usable, be32toh(kld_fw->fw_ver),
2349 	    be32toh(card_fw->fw_ver))) {
2350 
2351 		rc = -t4_fw_upgrade(sc, sc->mbox, fw->data, fw->datasize, 0);
2352 		if (rc != 0) {
2353 			device_printf(sc->dev,
2354 			    "failed to install firmware: %d\n", rc);
2355 			goto done;
2356 		}
2357 
2358 		/* Installed successfully, update the cached header too. */
2359 		memcpy(card_fw, kld_fw, sizeof(*card_fw));
2360 		card_fw_usable = 1;
2361 		need_fw_reset = 0;	/* already reset as part of load_fw */
2362 	}
2363 
2364 	if (!card_fw_usable) {
2365 		uint32_t d, c, k;
2366 
2367 		d = ntohl(drv_fw->fw_ver);
2368 		c = ntohl(card_fw->fw_ver);
2369 		k = kld_fw ? ntohl(kld_fw->fw_ver) : 0;
2370 
2371 		device_printf(sc->dev, "Cannot find a usable firmware: "
2372 		    "fw_install %d, chip state %d, "
2373 		    "driver compiled with %d.%d.%d.%d, "
2374 		    "card has %d.%d.%d.%d, KLD has %d.%d.%d.%d\n",
2375 		    t4_fw_install, state,
2376 		    G_FW_HDR_FW_VER_MAJOR(d), G_FW_HDR_FW_VER_MINOR(d),
2377 		    G_FW_HDR_FW_VER_MICRO(d), G_FW_HDR_FW_VER_BUILD(d),
2378 		    G_FW_HDR_FW_VER_MAJOR(c), G_FW_HDR_FW_VER_MINOR(c),
2379 		    G_FW_HDR_FW_VER_MICRO(c), G_FW_HDR_FW_VER_BUILD(c),
2380 		    G_FW_HDR_FW_VER_MAJOR(k), G_FW_HDR_FW_VER_MINOR(k),
2381 		    G_FW_HDR_FW_VER_MICRO(k), G_FW_HDR_FW_VER_BUILD(k));
2382 		rc = EINVAL;
2383 		goto done;
2384 	}
2385 
2386 	/* We're using whatever's on the card and it's known to be good. */
2387 	sc->params.fw_vers = ntohl(card_fw->fw_ver);
2388 	snprintf(sc->fw_version, sizeof(sc->fw_version), "%u.%u.%u.%u",
2389 	    G_FW_HDR_FW_VER_MAJOR(sc->params.fw_vers),
2390 	    G_FW_HDR_FW_VER_MINOR(sc->params.fw_vers),
2391 	    G_FW_HDR_FW_VER_MICRO(sc->params.fw_vers),
2392 	    G_FW_HDR_FW_VER_BUILD(sc->params.fw_vers));
2393 	t4_get_tp_version(sc, &sc->params.tp_vers);
2394 
2395 	/* Reset device */
2396 	if (need_fw_reset &&
2397 	    (rc = -t4_fw_reset(sc, sc->mbox, F_PIORSTMODE | F_PIORST)) != 0) {
2398 		device_printf(sc->dev, "firmware reset failed: %d.\n", rc);
2399 		if (rc != ETIMEDOUT && rc != EIO)
2400 			t4_fw_bye(sc, sc->mbox);
2401 		goto done;
2402 	}
2403 	sc->flags |= FW_OK;
2404 
2405 	rc = get_params__pre_init(sc);
2406 	if (rc != 0)
2407 		goto done; /* error message displayed already */
2408 
2409 	/* Partition adapter resources as specified in the config file. */
2410 	if (state == DEV_STATE_UNINIT) {
2411 
2412 		KASSERT(sc->flags & MASTER_PF,
2413 		    ("%s: trying to change chip settings when not master.",
2414 		    __func__));
2415 
2416 		rc = partition_resources(sc, default_cfg, fw_info->kld_name);
2417 		if (rc != 0)
2418 			goto done;	/* error message displayed already */
2419 
2420 		t4_tweak_chip_settings(sc);
2421 
2422 		/* get basic stuff going */
2423 		rc = -t4_fw_initialize(sc, sc->mbox);
2424 		if (rc != 0) {
2425 			device_printf(sc->dev, "fw init failed: %d.\n", rc);
2426 			goto done;
2427 		}
2428 	} else {
2429 		snprintf(sc->cfg_file, sizeof(sc->cfg_file), "pf%d", pf);
2430 		sc->cfcsum = 0;
2431 	}
2432 
2433 done:
2434 	free(card_fw, M_CXGBE);
2435 	if (fw != NULL)
2436 		firmware_put(fw, FIRMWARE_UNLOAD);
2437 	if (default_cfg != NULL)
2438 		firmware_put(default_cfg, FIRMWARE_UNLOAD);
2439 
2440 	return (rc);
2441 }
2442 
2443 #define FW_PARAM_DEV(param) \
2444 	(V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) | \
2445 	 V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_##param))
2446 #define FW_PARAM_PFVF(param) \
2447 	(V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_PFVF) | \
2448 	 V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_PFVF_##param))
2449 
2450 /*
2451  * Partition chip resources for use between various PFs, VFs, etc.
2452  */
2453 static int
2454 partition_resources(struct adapter *sc, const struct firmware *default_cfg,
2455     const char *name_prefix)
2456 {
2457 	const struct firmware *cfg = NULL;
2458 	int rc = 0;
2459 	struct fw_caps_config_cmd caps;
2460 	uint32_t mtype, moff, finicsum, cfcsum;
2461 
2462 	/*
2463 	 * Figure out what configuration file to use.  Pick the default config
2464 	 * file for the card if the user hasn't specified one explicitly.
2465 	 */
2466 	snprintf(sc->cfg_file, sizeof(sc->cfg_file), "%s", t4_cfg_file);
2467 	if (strncmp(t4_cfg_file, DEFAULT_CF, sizeof(t4_cfg_file)) == 0) {
2468 		/* Card specific overrides go here. */
2469 		if (pci_get_device(sc->dev) == 0x440a)
2470 			snprintf(sc->cfg_file, sizeof(sc->cfg_file), UWIRE_CF);
2471 		if (is_fpga(sc))
2472 			snprintf(sc->cfg_file, sizeof(sc->cfg_file), FPGA_CF);
2473 	}
2474 
2475 	/*
2476 	 * We need to load another module if the profile is anything except
2477 	 * "default" or "flash".
2478 	 */
2479 	if (strncmp(sc->cfg_file, DEFAULT_CF, sizeof(sc->cfg_file)) != 0 &&
2480 	    strncmp(sc->cfg_file, FLASH_CF, sizeof(sc->cfg_file)) != 0) {
2481 		char s[32];
2482 
2483 		snprintf(s, sizeof(s), "%s_%s", name_prefix, sc->cfg_file);
2484 		cfg = firmware_get(s);
2485 		if (cfg == NULL) {
2486 			if (default_cfg != NULL) {
2487 				device_printf(sc->dev,
2488 				    "unable to load module \"%s\" for "
2489 				    "configuration profile \"%s\", will use "
2490 				    "the default config file instead.\n",
2491 				    s, sc->cfg_file);
2492 				snprintf(sc->cfg_file, sizeof(sc->cfg_file),
2493 				    "%s", DEFAULT_CF);
2494 			} else {
2495 				device_printf(sc->dev,
2496 				    "unable to load module \"%s\" for "
2497 				    "configuration profile \"%s\", will use "
2498 				    "the config file on the card's flash "
2499 				    "instead.\n", s, sc->cfg_file);
2500 				snprintf(sc->cfg_file, sizeof(sc->cfg_file),
2501 				    "%s", FLASH_CF);
2502 			}
2503 		}
2504 	}
2505 
2506 	if (strncmp(sc->cfg_file, DEFAULT_CF, sizeof(sc->cfg_file)) == 0 &&
2507 	    default_cfg == NULL) {
2508 		device_printf(sc->dev,
2509 		    "default config file not available, will use the config "
2510 		    "file on the card's flash instead.\n");
2511 		snprintf(sc->cfg_file, sizeof(sc->cfg_file), "%s", FLASH_CF);
2512 	}
2513 
2514 	if (strncmp(sc->cfg_file, FLASH_CF, sizeof(sc->cfg_file)) != 0) {
2515 		u_int cflen, i, n;
2516 		const uint32_t *cfdata;
2517 		uint32_t param, val, addr, off, mw_base, mw_aperture;
2518 
2519 		KASSERT(cfg != NULL || default_cfg != NULL,
2520 		    ("%s: no config to upload", __func__));
2521 
2522 		/*
2523 		 * Ask the firmware where it wants us to upload the config file.
2524 		 */
2525 		param = FW_PARAM_DEV(CF);
2526 		rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 1, &param, &val);
2527 		if (rc != 0) {
2528 			/* No support for config file?  Shouldn't happen. */
2529 			device_printf(sc->dev,
2530 			    "failed to query config file location: %d.\n", rc);
2531 			goto done;
2532 		}
2533 		mtype = G_FW_PARAMS_PARAM_Y(val);
2534 		moff = G_FW_PARAMS_PARAM_Z(val) << 16;
2535 
2536 		/*
2537 		 * XXX: sheer laziness.  We deliberately added 4 bytes of
2538 		 * useless stuffing/comments at the end of the config file so
2539 		 * it's ok to simply throw away the last remaining bytes when
2540 		 * the config file is not an exact multiple of 4.  This also
2541 		 * helps with the validate_mt_off_len check.
2542 		 */
2543 		if (cfg != NULL) {
2544 			cflen = cfg->datasize & ~3;
2545 			cfdata = cfg->data;
2546 		} else {
2547 			cflen = default_cfg->datasize & ~3;
2548 			cfdata = default_cfg->data;
2549 		}
2550 
2551 		if (cflen > FLASH_CFG_MAX_SIZE) {
2552 			device_printf(sc->dev,
2553 			    "config file too long (%d, max allowed is %d).  "
2554 			    "Will try to use the config on the card, if any.\n",
2555 			    cflen, FLASH_CFG_MAX_SIZE);
2556 			goto use_config_on_flash;
2557 		}
2558 
2559 		rc = validate_mt_off_len(sc, mtype, moff, cflen, &addr);
2560 		if (rc != 0) {
2561 			device_printf(sc->dev,
2562 			    "%s: addr (%d/0x%x) or len %d is not valid: %d.  "
2563 			    "Will try to use the config on the card, if any.\n",
2564 			    __func__, mtype, moff, cflen, rc);
2565 			goto use_config_on_flash;
2566 		}
2567 
2568 		memwin_info(sc, 2, &mw_base, &mw_aperture);
2569 		while (cflen) {
2570 			off = position_memwin(sc, 2, addr);
2571 			n = min(cflen, mw_aperture - off);
2572 			for (i = 0; i < n; i += 4)
2573 				t4_write_reg(sc, mw_base + off + i, *cfdata++);
2574 			cflen -= n;
2575 			addr += n;
2576 		}
2577 	} else {
2578 use_config_on_flash:
2579 		mtype = FW_MEMTYPE_FLASH;
2580 		moff = t4_flash_cfg_addr(sc);
2581 	}
2582 
2583 	bzero(&caps, sizeof(caps));
2584 	caps.op_to_write = htobe32(V_FW_CMD_OP(FW_CAPS_CONFIG_CMD) |
2585 	    F_FW_CMD_REQUEST | F_FW_CMD_READ);
2586 	caps.cfvalid_to_len16 = htobe32(F_FW_CAPS_CONFIG_CMD_CFVALID |
2587 	    V_FW_CAPS_CONFIG_CMD_MEMTYPE_CF(mtype) |
2588 	    V_FW_CAPS_CONFIG_CMD_MEMADDR64K_CF(moff >> 16) | FW_LEN16(caps));
2589 	rc = -t4_wr_mbox(sc, sc->mbox, &caps, sizeof(caps), &caps);
2590 	if (rc != 0) {
2591 		device_printf(sc->dev,
2592 		    "failed to pre-process config file: %d "
2593 		    "(mtype %d, moff 0x%x).\n", rc, mtype, moff);
2594 		goto done;
2595 	}
2596 
2597 	finicsum = be32toh(caps.finicsum);
2598 	cfcsum = be32toh(caps.cfcsum);
2599 	if (finicsum != cfcsum) {
2600 		device_printf(sc->dev,
2601 		    "WARNING: config file checksum mismatch: %08x %08x\n",
2602 		    finicsum, cfcsum);
2603 	}
2604 	sc->cfcsum = cfcsum;
2605 
2606 #define LIMIT_CAPS(x) do { \
2607 	caps.x &= htobe16(t4_##x##_allowed); \
2608 } while (0)
2609 
2610 	/*
2611 	 * Let the firmware know what features will (not) be used so it can tune
2612 	 * things accordingly.
2613 	 */
2614 	LIMIT_CAPS(linkcaps);
2615 	LIMIT_CAPS(niccaps);
2616 	LIMIT_CAPS(toecaps);
2617 	LIMIT_CAPS(rdmacaps);
2618 	LIMIT_CAPS(iscsicaps);
2619 	LIMIT_CAPS(fcoecaps);
2620 #undef LIMIT_CAPS
2621 
2622 	caps.op_to_write = htobe32(V_FW_CMD_OP(FW_CAPS_CONFIG_CMD) |
2623 	    F_FW_CMD_REQUEST | F_FW_CMD_WRITE);
2624 	caps.cfvalid_to_len16 = htobe32(FW_LEN16(caps));
2625 	rc = -t4_wr_mbox(sc, sc->mbox, &caps, sizeof(caps), NULL);
2626 	if (rc != 0) {
2627 		device_printf(sc->dev,
2628 		    "failed to process config file: %d.\n", rc);
2629 	}
2630 done:
2631 	if (cfg != NULL)
2632 		firmware_put(cfg, FIRMWARE_UNLOAD);
2633 	return (rc);
2634 }
2635 
2636 /*
2637  * Retrieve parameters that are needed (or nice to have) very early.
2638  */
2639 static int
2640 get_params__pre_init(struct adapter *sc)
2641 {
2642 	int rc;
2643 	uint32_t param[2], val[2];
2644 	struct fw_devlog_cmd cmd;
2645 	struct devlog_params *dlog = &sc->params.devlog;
2646 
2647 	param[0] = FW_PARAM_DEV(PORTVEC);
2648 	param[1] = FW_PARAM_DEV(CCLK);
2649 	rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 2, param, val);
2650 	if (rc != 0) {
2651 		device_printf(sc->dev,
2652 		    "failed to query parameters (pre_init): %d.\n", rc);
2653 		return (rc);
2654 	}
2655 
2656 	sc->params.portvec = val[0];
2657 	sc->params.nports = bitcount32(val[0]);
2658 	sc->params.vpd.cclk = val[1];
2659 
2660 	/* Read device log parameters. */
2661 	bzero(&cmd, sizeof(cmd));
2662 	cmd.op_to_write = htobe32(V_FW_CMD_OP(FW_DEVLOG_CMD) |
2663 	    F_FW_CMD_REQUEST | F_FW_CMD_READ);
2664 	cmd.retval_len16 = htobe32(FW_LEN16(cmd));
2665 	rc = -t4_wr_mbox(sc, sc->mbox, &cmd, sizeof(cmd), &cmd);
2666 	if (rc != 0) {
2667 		device_printf(sc->dev,
2668 		    "failed to get devlog parameters: %d.\n", rc);
2669 		bzero(dlog, sizeof (*dlog));
2670 		rc = 0;	/* devlog isn't critical for device operation */
2671 	} else {
2672 		val[0] = be32toh(cmd.memtype_devlog_memaddr16_devlog);
2673 		dlog->memtype = G_FW_DEVLOG_CMD_MEMTYPE_DEVLOG(val[0]);
2674 		dlog->start = G_FW_DEVLOG_CMD_MEMADDR16_DEVLOG(val[0]) << 4;
2675 		dlog->size = be32toh(cmd.memsize_devlog);
2676 	}
2677 
2678 	return (rc);
2679 }
2680 
2681 /*
2682  * Retrieve various parameters that are of interest to the driver.  The device
2683  * has been initialized by the firmware at this point.
2684  */
2685 static int
2686 get_params__post_init(struct adapter *sc)
2687 {
2688 	int rc;
2689 	uint32_t param[7], val[7];
2690 	struct fw_caps_config_cmd caps;
2691 
2692 	param[0] = FW_PARAM_PFVF(IQFLINT_START);
2693 	param[1] = FW_PARAM_PFVF(EQ_START);
2694 	param[2] = FW_PARAM_PFVF(FILTER_START);
2695 	param[3] = FW_PARAM_PFVF(FILTER_END);
2696 	param[4] = FW_PARAM_PFVF(L2T_START);
2697 	param[5] = FW_PARAM_PFVF(L2T_END);
2698 	rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 6, param, val);
2699 	if (rc != 0) {
2700 		device_printf(sc->dev,
2701 		    "failed to query parameters (post_init): %d.\n", rc);
2702 		return (rc);
2703 	}
2704 
2705 	sc->sge.iq_start = val[0];
2706 	sc->sge.eq_start = val[1];
2707 	sc->tids.ftid_base = val[2];
2708 	sc->tids.nftids = val[3] - val[2] + 1;
2709 	sc->params.ftid_min = val[2];
2710 	sc->params.ftid_max = val[3];
2711 	sc->vres.l2t.start = val[4];
2712 	sc->vres.l2t.size = val[5] - val[4] + 1;
2713 	KASSERT(sc->vres.l2t.size <= L2T_SIZE,
2714 	    ("%s: L2 table size (%u) larger than expected (%u)",
2715 	    __func__, sc->vres.l2t.size, L2T_SIZE));
2716 
2717 	/* get capabilites */
2718 	bzero(&caps, sizeof(caps));
2719 	caps.op_to_write = htobe32(V_FW_CMD_OP(FW_CAPS_CONFIG_CMD) |
2720 	    F_FW_CMD_REQUEST | F_FW_CMD_READ);
2721 	caps.cfvalid_to_len16 = htobe32(FW_LEN16(caps));
2722 	rc = -t4_wr_mbox(sc, sc->mbox, &caps, sizeof(caps), &caps);
2723 	if (rc != 0) {
2724 		device_printf(sc->dev,
2725 		    "failed to get card capabilities: %d.\n", rc);
2726 		return (rc);
2727 	}
2728 
2729 #define READ_CAPS(x) do { \
2730 	sc->x = htobe16(caps.x); \
2731 } while (0)
2732 	READ_CAPS(linkcaps);
2733 	READ_CAPS(niccaps);
2734 	READ_CAPS(toecaps);
2735 	READ_CAPS(rdmacaps);
2736 	READ_CAPS(iscsicaps);
2737 	READ_CAPS(fcoecaps);
2738 
2739 	if (sc->niccaps & FW_CAPS_CONFIG_NIC_ETHOFLD) {
2740 		param[0] = FW_PARAM_PFVF(ETHOFLD_START);
2741 		param[1] = FW_PARAM_PFVF(ETHOFLD_END);
2742 		param[2] = FW_PARAM_DEV(FLOWC_BUFFIFO_SZ);
2743 		rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 3, param, val);
2744 		if (rc != 0) {
2745 			device_printf(sc->dev,
2746 			    "failed to query NIC parameters: %d.\n", rc);
2747 			return (rc);
2748 		}
2749 		sc->tids.etid_base = val[0];
2750 		sc->params.etid_min = val[0];
2751 		sc->tids.netids = val[1] - val[0] + 1;
2752 		sc->params.netids = sc->tids.netids;
2753 		sc->params.eo_wr_cred = val[2];
2754 		sc->params.ethoffload = 1;
2755 	}
2756 
2757 	if (sc->toecaps) {
2758 		/* query offload-related parameters */
2759 		param[0] = FW_PARAM_DEV(NTID);
2760 		param[1] = FW_PARAM_PFVF(SERVER_START);
2761 		param[2] = FW_PARAM_PFVF(SERVER_END);
2762 		param[3] = FW_PARAM_PFVF(TDDP_START);
2763 		param[4] = FW_PARAM_PFVF(TDDP_END);
2764 		param[5] = FW_PARAM_DEV(FLOWC_BUFFIFO_SZ);
2765 		rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 6, param, val);
2766 		if (rc != 0) {
2767 			device_printf(sc->dev,
2768 			    "failed to query TOE parameters: %d.\n", rc);
2769 			return (rc);
2770 		}
2771 		sc->tids.ntids = val[0];
2772 		sc->tids.natids = min(sc->tids.ntids / 2, MAX_ATIDS);
2773 		sc->tids.stid_base = val[1];
2774 		sc->tids.nstids = val[2] - val[1] + 1;
2775 		sc->vres.ddp.start = val[3];
2776 		sc->vres.ddp.size = val[4] - val[3] + 1;
2777 		sc->params.ofldq_wr_cred = val[5];
2778 		sc->params.offload = 1;
2779 	}
2780 	if (sc->rdmacaps) {
2781 		param[0] = FW_PARAM_PFVF(STAG_START);
2782 		param[1] = FW_PARAM_PFVF(STAG_END);
2783 		param[2] = FW_PARAM_PFVF(RQ_START);
2784 		param[3] = FW_PARAM_PFVF(RQ_END);
2785 		param[4] = FW_PARAM_PFVF(PBL_START);
2786 		param[5] = FW_PARAM_PFVF(PBL_END);
2787 		rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 6, param, val);
2788 		if (rc != 0) {
2789 			device_printf(sc->dev,
2790 			    "failed to query RDMA parameters(1): %d.\n", rc);
2791 			return (rc);
2792 		}
2793 		sc->vres.stag.start = val[0];
2794 		sc->vres.stag.size = val[1] - val[0] + 1;
2795 		sc->vres.rq.start = val[2];
2796 		sc->vres.rq.size = val[3] - val[2] + 1;
2797 		sc->vres.pbl.start = val[4];
2798 		sc->vres.pbl.size = val[5] - val[4] + 1;
2799 
2800 		param[0] = FW_PARAM_PFVF(SQRQ_START);
2801 		param[1] = FW_PARAM_PFVF(SQRQ_END);
2802 		param[2] = FW_PARAM_PFVF(CQ_START);
2803 		param[3] = FW_PARAM_PFVF(CQ_END);
2804 		param[4] = FW_PARAM_PFVF(OCQ_START);
2805 		param[5] = FW_PARAM_PFVF(OCQ_END);
2806 		rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 6, param, val);
2807 		if (rc != 0) {
2808 			device_printf(sc->dev,
2809 			    "failed to query RDMA parameters(2): %d.\n", rc);
2810 			return (rc);
2811 		}
2812 		sc->vres.qp.start = val[0];
2813 		sc->vres.qp.size = val[1] - val[0] + 1;
2814 		sc->vres.cq.start = val[2];
2815 		sc->vres.cq.size = val[3] - val[2] + 1;
2816 		sc->vres.ocq.start = val[4];
2817 		sc->vres.ocq.size = val[5] - val[4] + 1;
2818 	}
2819 	if (sc->iscsicaps) {
2820 		param[0] = FW_PARAM_PFVF(ISCSI_START);
2821 		param[1] = FW_PARAM_PFVF(ISCSI_END);
2822 		rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 2, param, val);
2823 		if (rc != 0) {
2824 			device_printf(sc->dev,
2825 			    "failed to query iSCSI parameters: %d.\n", rc);
2826 			return (rc);
2827 		}
2828 		sc->vres.iscsi.start = val[0];
2829 		sc->vres.iscsi.size = val[1] - val[0] + 1;
2830 	}
2831 
2832 	/*
2833 	 * We've got the params we wanted to query via the firmware.  Now grab
2834 	 * some others directly from the chip.
2835 	 */
2836 	rc = t4_read_chip_settings(sc);
2837 
2838 	return (rc);
2839 }
2840 
2841 static int
2842 set_params__post_init(struct adapter *sc)
2843 {
2844 	uint32_t param, val;
2845 
2846 	/* ask for encapsulated CPLs */
2847 	param = FW_PARAM_PFVF(CPLFW4MSG_ENCAP);
2848 	val = 1;
2849 	(void)t4_set_params(sc, sc->mbox, sc->pf, 0, 1, &param, &val);
2850 
2851 	return (0);
2852 }
2853 
2854 #undef FW_PARAM_PFVF
2855 #undef FW_PARAM_DEV
2856 
2857 static void
2858 t4_set_desc(struct adapter *sc)
2859 {
2860 	char buf[128];
2861 	struct adapter_params *p = &sc->params;
2862 
2863 	snprintf(buf, sizeof(buf), "Chelsio %s %sNIC (rev %d), S/N:%s, "
2864 	    "P/N:%s, E/C:%s", p->vpd.id, is_offload(sc) ? "R" : "",
2865 	    chip_rev(sc), p->vpd.sn, p->vpd.pn, p->vpd.ec);
2866 
2867 	device_set_desc_copy(sc->dev, buf);
2868 }
2869 
2870 static void
2871 build_medialist(struct port_info *pi, struct ifmedia *media)
2872 {
2873 	int m;
2874 
2875 	PORT_LOCK(pi);
2876 
2877 	ifmedia_removeall(media);
2878 
2879 	m = IFM_ETHER | IFM_FDX;
2880 
2881 	switch(pi->port_type) {
2882 	case FW_PORT_TYPE_BT_XFI:
2883 	case FW_PORT_TYPE_BT_XAUI:
2884 		ifmedia_add(media, m | IFM_10G_T, 0, NULL);
2885 		/* fall through */
2886 
2887 	case FW_PORT_TYPE_BT_SGMII:
2888 		ifmedia_add(media, m | IFM_1000_T, 0, NULL);
2889 		ifmedia_add(media, m | IFM_100_TX, 0, NULL);
2890 		ifmedia_add(media, IFM_ETHER | IFM_AUTO, 0, NULL);
2891 		ifmedia_set(media, IFM_ETHER | IFM_AUTO);
2892 		break;
2893 
2894 	case FW_PORT_TYPE_CX4:
2895 		ifmedia_add(media, m | IFM_10G_CX4, 0, NULL);
2896 		ifmedia_set(media, m | IFM_10G_CX4);
2897 		break;
2898 
2899 	case FW_PORT_TYPE_QSFP_10G:
2900 	case FW_PORT_TYPE_SFP:
2901 	case FW_PORT_TYPE_FIBER_XFI:
2902 	case FW_PORT_TYPE_FIBER_XAUI:
2903 		switch (pi->mod_type) {
2904 
2905 		case FW_PORT_MOD_TYPE_LR:
2906 			ifmedia_add(media, m | IFM_10G_LR, 0, NULL);
2907 			ifmedia_set(media, m | IFM_10G_LR);
2908 			break;
2909 
2910 		case FW_PORT_MOD_TYPE_SR:
2911 			ifmedia_add(media, m | IFM_10G_SR, 0, NULL);
2912 			ifmedia_set(media, m | IFM_10G_SR);
2913 			break;
2914 
2915 		case FW_PORT_MOD_TYPE_LRM:
2916 			ifmedia_add(media, m | IFM_10G_LRM, 0, NULL);
2917 			ifmedia_set(media, m | IFM_10G_LRM);
2918 			break;
2919 
2920 		case FW_PORT_MOD_TYPE_TWINAX_PASSIVE:
2921 		case FW_PORT_MOD_TYPE_TWINAX_ACTIVE:
2922 			ifmedia_add(media, m | IFM_10G_TWINAX, 0, NULL);
2923 			ifmedia_set(media, m | IFM_10G_TWINAX);
2924 			break;
2925 
2926 		case FW_PORT_MOD_TYPE_NONE:
2927 			m &= ~IFM_FDX;
2928 			ifmedia_add(media, m | IFM_NONE, 0, NULL);
2929 			ifmedia_set(media, m | IFM_NONE);
2930 			break;
2931 
2932 		case FW_PORT_MOD_TYPE_NA:
2933 		case FW_PORT_MOD_TYPE_ER:
2934 		default:
2935 			device_printf(pi->dev,
2936 			    "unknown port_type (%d), mod_type (%d)\n",
2937 			    pi->port_type, pi->mod_type);
2938 			ifmedia_add(media, m | IFM_UNKNOWN, 0, NULL);
2939 			ifmedia_set(media, m | IFM_UNKNOWN);
2940 			break;
2941 		}
2942 		break;
2943 
2944 	case FW_PORT_TYPE_QSFP:
2945 		switch (pi->mod_type) {
2946 
2947 		case FW_PORT_MOD_TYPE_LR:
2948 			ifmedia_add(media, m | IFM_40G_LR4, 0, NULL);
2949 			ifmedia_set(media, m | IFM_40G_LR4);
2950 			break;
2951 
2952 		case FW_PORT_MOD_TYPE_SR:
2953 			ifmedia_add(media, m | IFM_40G_SR4, 0, NULL);
2954 			ifmedia_set(media, m | IFM_40G_SR4);
2955 			break;
2956 
2957 		case FW_PORT_MOD_TYPE_TWINAX_PASSIVE:
2958 		case FW_PORT_MOD_TYPE_TWINAX_ACTIVE:
2959 			ifmedia_add(media, m | IFM_40G_CR4, 0, NULL);
2960 			ifmedia_set(media, m | IFM_40G_CR4);
2961 			break;
2962 
2963 		case FW_PORT_MOD_TYPE_NONE:
2964 			m &= ~IFM_FDX;
2965 			ifmedia_add(media, m | IFM_NONE, 0, NULL);
2966 			ifmedia_set(media, m | IFM_NONE);
2967 			break;
2968 
2969 		default:
2970 			device_printf(pi->dev,
2971 			    "unknown port_type (%d), mod_type (%d)\n",
2972 			    pi->port_type, pi->mod_type);
2973 			ifmedia_add(media, m | IFM_UNKNOWN, 0, NULL);
2974 			ifmedia_set(media, m | IFM_UNKNOWN);
2975 			break;
2976 		}
2977 		break;
2978 
2979 	default:
2980 		device_printf(pi->dev,
2981 		    "unknown port_type (%d), mod_type (%d)\n", pi->port_type,
2982 		    pi->mod_type);
2983 		ifmedia_add(media, m | IFM_UNKNOWN, 0, NULL);
2984 		ifmedia_set(media, m | IFM_UNKNOWN);
2985 		break;
2986 	}
2987 
2988 	PORT_UNLOCK(pi);
2989 }
2990 
2991 #define FW_MAC_EXACT_CHUNK	7
2992 
2993 /*
2994  * Program the port's XGMAC based on parameters in ifnet.  The caller also
2995  * indicates which parameters should be programmed (the rest are left alone).
2996  */
2997 int
2998 update_mac_settings(struct ifnet *ifp, int flags)
2999 {
3000 	int rc = 0;
3001 	struct port_info *pi = ifp->if_softc;
3002 	struct adapter *sc = pi->adapter;
3003 	int mtu = -1, promisc = -1, allmulti = -1, vlanex = -1;
3004 	uint16_t viid = 0xffff;
3005 	int16_t *xact_addr_filt = NULL;
3006 
3007 	ASSERT_SYNCHRONIZED_OP(sc);
3008 	KASSERT(flags, ("%s: not told what to update.", __func__));
3009 
3010 	if (ifp == pi->ifp) {
3011 		viid = pi->viid;
3012 		xact_addr_filt = &pi->xact_addr_filt;
3013 	}
3014 #ifdef DEV_NETMAP
3015 	else if (ifp == pi->nm_ifp) {
3016 		viid = pi->nm_viid;
3017 		xact_addr_filt = &pi->nm_xact_addr_filt;
3018 	}
3019 #endif
3020 	if (flags & XGMAC_MTU)
3021 		mtu = ifp->if_mtu;
3022 
3023 	if (flags & XGMAC_PROMISC)
3024 		promisc = ifp->if_flags & IFF_PROMISC ? 1 : 0;
3025 
3026 	if (flags & XGMAC_ALLMULTI)
3027 		allmulti = ifp->if_flags & IFF_ALLMULTI ? 1 : 0;
3028 
3029 	if (flags & XGMAC_VLANEX)
3030 		vlanex = ifp->if_capenable & IFCAP_VLAN_HWTAGGING ? 1 : 0;
3031 
3032 	if (flags & (XGMAC_MTU|XGMAC_PROMISC|XGMAC_ALLMULTI|XGMAC_VLANEX)) {
3033 		rc = -t4_set_rxmode(sc, sc->mbox, viid, mtu, promisc, allmulti,
3034 		    1, vlanex, false);
3035 		if (rc) {
3036 			if_printf(ifp, "set_rxmode (%x) failed: %d\n", flags,
3037 			    rc);
3038 			return (rc);
3039 		}
3040 	}
3041 
3042 	if (flags & XGMAC_UCADDR) {
3043 		uint8_t ucaddr[ETHER_ADDR_LEN];
3044 
3045 		bcopy(IF_LLADDR(ifp), ucaddr, sizeof(ucaddr));
3046 		rc = t4_change_mac(sc, sc->mbox, viid, *xact_addr_filt, ucaddr,
3047 		    true, true);
3048 		if (rc < 0) {
3049 			rc = -rc;
3050 			if_printf(ifp, "change_mac failed: %d\n", rc);
3051 			return (rc);
3052 		} else {
3053 			*xact_addr_filt = rc;
3054 			rc = 0;
3055 		}
3056 	}
3057 
3058 	if (flags & XGMAC_MCADDRS) {
3059 		const uint8_t *mcaddr[FW_MAC_EXACT_CHUNK];
3060 		int del = 1;
3061 		uint64_t hash = 0;
3062 		struct ifmultiaddr *ifma;
3063 		int i = 0, j;
3064 
3065 		if_maddr_rlock(ifp);
3066 		TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
3067 			if (ifma->ifma_addr->sa_family != AF_LINK)
3068 				continue;
3069 			mcaddr[i] =
3070 			    LLADDR((struct sockaddr_dl *)ifma->ifma_addr);
3071 			MPASS(ETHER_IS_MULTICAST(mcaddr[i]));
3072 			i++;
3073 
3074 			if (i == FW_MAC_EXACT_CHUNK) {
3075 				rc = t4_alloc_mac_filt(sc, sc->mbox, viid, del,
3076 				    i, mcaddr, NULL, &hash, 0);
3077 				if (rc < 0) {
3078 					rc = -rc;
3079 					for (j = 0; j < i; j++) {
3080 						if_printf(ifp,
3081 						    "failed to add mc address"
3082 						    " %02x:%02x:%02x:"
3083 						    "%02x:%02x:%02x rc=%d\n",
3084 						    mcaddr[j][0], mcaddr[j][1],
3085 						    mcaddr[j][2], mcaddr[j][3],
3086 						    mcaddr[j][4], mcaddr[j][5],
3087 						    rc);
3088 					}
3089 					goto mcfail;
3090 				}
3091 				del = 0;
3092 				i = 0;
3093 			}
3094 		}
3095 		if (i > 0) {
3096 			rc = t4_alloc_mac_filt(sc, sc->mbox, viid, del, i,
3097 			    mcaddr, NULL, &hash, 0);
3098 			if (rc < 0) {
3099 				rc = -rc;
3100 				for (j = 0; j < i; j++) {
3101 					if_printf(ifp,
3102 					    "failed to add mc address"
3103 					    " %02x:%02x:%02x:"
3104 					    "%02x:%02x:%02x rc=%d\n",
3105 					    mcaddr[j][0], mcaddr[j][1],
3106 					    mcaddr[j][2], mcaddr[j][3],
3107 					    mcaddr[j][4], mcaddr[j][5],
3108 					    rc);
3109 				}
3110 				goto mcfail;
3111 			}
3112 		}
3113 
3114 		rc = -t4_set_addr_hash(sc, sc->mbox, viid, 0, hash, 0);
3115 		if (rc != 0)
3116 			if_printf(ifp, "failed to set mc address hash: %d", rc);
3117 mcfail:
3118 		if_maddr_runlock(ifp);
3119 	}
3120 
3121 	return (rc);
3122 }
3123 
3124 /*
3125  * {begin|end}_synchronized_op must be called from the same thread.
3126  */
3127 int
3128 begin_synchronized_op(struct adapter *sc, struct port_info *pi, int flags,
3129     char *wmesg)
3130 {
3131 	int rc, pri;
3132 
3133 #ifdef WITNESS
3134 	/* the caller thinks it's ok to sleep, but is it really? */
3135 	if (flags & SLEEP_OK)
3136 		pause("t4slptst", 1);
3137 #endif
3138 
3139 	if (INTR_OK)
3140 		pri = PCATCH;
3141 	else
3142 		pri = 0;
3143 
3144 	ADAPTER_LOCK(sc);
3145 	for (;;) {
3146 
3147 		if (pi && IS_DOOMED(pi)) {
3148 			rc = ENXIO;
3149 			goto done;
3150 		}
3151 
3152 		if (!IS_BUSY(sc)) {
3153 			rc = 0;
3154 			break;
3155 		}
3156 
3157 		if (!(flags & SLEEP_OK)) {
3158 			rc = EBUSY;
3159 			goto done;
3160 		}
3161 
3162 		if (mtx_sleep(&sc->flags, &sc->sc_lock, pri, wmesg, 0)) {
3163 			rc = EINTR;
3164 			goto done;
3165 		}
3166 	}
3167 
3168 	KASSERT(!IS_BUSY(sc), ("%s: controller busy.", __func__));
3169 	SET_BUSY(sc);
3170 #ifdef INVARIANTS
3171 	sc->last_op = wmesg;
3172 	sc->last_op_thr = curthread;
3173 	sc->last_op_flags = flags;
3174 #endif
3175 
3176 done:
3177 	if (!(flags & HOLD_LOCK) || rc)
3178 		ADAPTER_UNLOCK(sc);
3179 
3180 	return (rc);
3181 }
3182 
3183 /*
3184  * {begin|end}_synchronized_op must be called from the same thread.
3185  */
3186 void
3187 end_synchronized_op(struct adapter *sc, int flags)
3188 {
3189 
3190 	if (flags & LOCK_HELD)
3191 		ADAPTER_LOCK_ASSERT_OWNED(sc);
3192 	else
3193 		ADAPTER_LOCK(sc);
3194 
3195 	KASSERT(IS_BUSY(sc), ("%s: controller not busy.", __func__));
3196 	CLR_BUSY(sc);
3197 	wakeup(&sc->flags);
3198 	ADAPTER_UNLOCK(sc);
3199 }
3200 
3201 static int
3202 cxgbe_init_synchronized(struct port_info *pi)
3203 {
3204 	struct adapter *sc = pi->adapter;
3205 	struct ifnet *ifp = pi->ifp;
3206 	int rc = 0, i;
3207 	struct sge_txq *txq;
3208 
3209 	ASSERT_SYNCHRONIZED_OP(sc);
3210 
3211 	if (isset(&sc->open_device_map, pi->port_id)) {
3212 		KASSERT(ifp->if_drv_flags & IFF_DRV_RUNNING,
3213 		    ("mismatch between open_device_map and if_drv_flags"));
3214 		return (0);	/* already running */
3215 	}
3216 
3217 	if (!(sc->flags & FULL_INIT_DONE) &&
3218 	    ((rc = adapter_full_init(sc)) != 0))
3219 		return (rc);	/* error message displayed already */
3220 
3221 	if (!(pi->flags & PORT_INIT_DONE) &&
3222 	    ((rc = port_full_init(pi)) != 0))
3223 		return (rc); /* error message displayed already */
3224 
3225 	rc = update_mac_settings(ifp, XGMAC_ALL);
3226 	if (rc)
3227 		goto done;	/* error message displayed already */
3228 
3229 	rc = -t4_enable_vi(sc, sc->mbox, pi->viid, true, true);
3230 	if (rc != 0) {
3231 		if_printf(ifp, "enable_vi failed: %d\n", rc);
3232 		goto done;
3233 	}
3234 
3235 	/*
3236 	 * Can't fail from this point onwards.  Review cxgbe_uninit_synchronized
3237 	 * if this changes.
3238 	 */
3239 
3240 	for_each_txq(pi, i, txq) {
3241 		TXQ_LOCK(txq);
3242 		txq->eq.flags |= EQ_ENABLED;
3243 		TXQ_UNLOCK(txq);
3244 	}
3245 
3246 	/*
3247 	 * The first iq of the first port to come up is used for tracing.
3248 	 */
3249 	if (sc->traceq < 0) {
3250 		sc->traceq = sc->sge.rxq[pi->first_rxq].iq.abs_id;
3251 		t4_write_reg(sc, is_t4(sc) ?  A_MPS_TRC_RSS_CONTROL :
3252 		    A_MPS_T5_TRC_RSS_CONTROL, V_RSSCONTROL(pi->tx_chan) |
3253 		    V_QUEUENUMBER(sc->traceq));
3254 		pi->flags |= HAS_TRACEQ;
3255 	}
3256 
3257 	/* all ok */
3258 	setbit(&sc->open_device_map, pi->port_id);
3259 	PORT_LOCK(pi);
3260 	ifp->if_drv_flags |= IFF_DRV_RUNNING;
3261 	PORT_UNLOCK(pi);
3262 
3263 	callout_reset(&pi->tick, hz, cxgbe_tick, pi);
3264 done:
3265 	if (rc != 0)
3266 		cxgbe_uninit_synchronized(pi);
3267 
3268 	return (rc);
3269 }
3270 
3271 /*
3272  * Idempotent.
3273  */
3274 static int
3275 cxgbe_uninit_synchronized(struct port_info *pi)
3276 {
3277 	struct adapter *sc = pi->adapter;
3278 	struct ifnet *ifp = pi->ifp;
3279 	int rc, i;
3280 	struct sge_txq *txq;
3281 
3282 	ASSERT_SYNCHRONIZED_OP(sc);
3283 
3284 	if (!(pi->flags & PORT_INIT_DONE)) {
3285 		KASSERT(!(ifp->if_drv_flags & IFF_DRV_RUNNING),
3286 		    ("uninited port is running"));
3287 		return (0);
3288 	}
3289 
3290 	/*
3291 	 * Disable the VI so that all its data in either direction is discarded
3292 	 * by the MPS.  Leave everything else (the queues, interrupts, and 1Hz
3293 	 * tick) intact as the TP can deliver negative advice or data that it's
3294 	 * holding in its RAM (for an offloaded connection) even after the VI is
3295 	 * disabled.
3296 	 */
3297 	rc = -t4_enable_vi(sc, sc->mbox, pi->viid, false, false);
3298 	if (rc) {
3299 		if_printf(ifp, "disable_vi failed: %d\n", rc);
3300 		return (rc);
3301 	}
3302 
3303 	for_each_txq(pi, i, txq) {
3304 		TXQ_LOCK(txq);
3305 		txq->eq.flags &= ~EQ_ENABLED;
3306 		TXQ_UNLOCK(txq);
3307 	}
3308 
3309 	clrbit(&sc->open_device_map, pi->port_id);
3310 	PORT_LOCK(pi);
3311 	ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
3312 	PORT_UNLOCK(pi);
3313 
3314 	pi->link_cfg.link_ok = 0;
3315 	pi->link_cfg.speed = 0;
3316 	pi->linkdnrc = -1;
3317 	t4_os_link_changed(sc, pi->port_id, 0, -1);
3318 
3319 	return (0);
3320 }
3321 
3322 /*
3323  * It is ok for this function to fail midway and return right away.  t4_detach
3324  * will walk the entire sc->irq list and clean up whatever is valid.
3325  */
3326 static int
3327 setup_intr_handlers(struct adapter *sc)
3328 {
3329 	int rc, rid, p, q;
3330 	char s[8];
3331 	struct irq *irq;
3332 	struct port_info *pi;
3333 	struct sge_rxq *rxq;
3334 #ifdef TCP_OFFLOAD
3335 	struct sge_ofld_rxq *ofld_rxq;
3336 #endif
3337 #ifdef DEV_NETMAP
3338 	struct sge_nm_rxq *nm_rxq;
3339 #endif
3340 
3341 	/*
3342 	 * Setup interrupts.
3343 	 */
3344 	irq = &sc->irq[0];
3345 	rid = sc->intr_type == INTR_INTX ? 0 : 1;
3346 	if (sc->intr_count == 1)
3347 		return (t4_alloc_irq(sc, irq, rid, t4_intr_all, sc, "all"));
3348 
3349 	/* Multiple interrupts. */
3350 	KASSERT(sc->intr_count >= T4_EXTRA_INTR + sc->params.nports,
3351 	    ("%s: too few intr.", __func__));
3352 
3353 	/* The first one is always error intr */
3354 	rc = t4_alloc_irq(sc, irq, rid, t4_intr_err, sc, "err");
3355 	if (rc != 0)
3356 		return (rc);
3357 	irq++;
3358 	rid++;
3359 
3360 	/* The second one is always the firmware event queue */
3361 	rc = t4_alloc_irq(sc, irq, rid, t4_intr_evt, &sc->sge.fwq, "evt");
3362 	if (rc != 0)
3363 		return (rc);
3364 	irq++;
3365 	rid++;
3366 
3367 	for_each_port(sc, p) {
3368 		pi = sc->port[p];
3369 
3370 		if (pi->flags & INTR_RXQ) {
3371 			for_each_rxq(pi, q, rxq) {
3372 				snprintf(s, sizeof(s), "%d.%d", p, q);
3373 				rc = t4_alloc_irq(sc, irq, rid, t4_intr, rxq,
3374 				    s);
3375 				if (rc != 0)
3376 					return (rc);
3377 				irq++;
3378 				rid++;
3379 			}
3380 		}
3381 #ifdef TCP_OFFLOAD
3382 		if (pi->flags & INTR_OFLD_RXQ) {
3383 			for_each_ofld_rxq(pi, q, ofld_rxq) {
3384 				snprintf(s, sizeof(s), "%d,%d", p, q);
3385 				rc = t4_alloc_irq(sc, irq, rid, t4_intr,
3386 				    ofld_rxq, s);
3387 				if (rc != 0)
3388 					return (rc);
3389 				irq++;
3390 				rid++;
3391 			}
3392 		}
3393 #endif
3394 #ifdef DEV_NETMAP
3395 		if (pi->flags & INTR_NM_RXQ) {
3396 			for_each_nm_rxq(pi, q, nm_rxq) {
3397 				snprintf(s, sizeof(s), "%d-%d", p, q);
3398 				rc = t4_alloc_irq(sc, irq, rid, t4_nm_intr,
3399 				    nm_rxq, s);
3400 				if (rc != 0)
3401 					return (rc);
3402 				irq++;
3403 				rid++;
3404 			}
3405 		}
3406 #endif
3407 	}
3408 	MPASS(irq == &sc->irq[sc->intr_count]);
3409 
3410 	return (0);
3411 }
3412 
3413 int
3414 adapter_full_init(struct adapter *sc)
3415 {
3416 	int rc, i;
3417 
3418 	ASSERT_SYNCHRONIZED_OP(sc);
3419 	ADAPTER_LOCK_ASSERT_NOTOWNED(sc);
3420 	KASSERT((sc->flags & FULL_INIT_DONE) == 0,
3421 	    ("%s: FULL_INIT_DONE already", __func__));
3422 
3423 	/*
3424 	 * queues that belong to the adapter (not any particular port).
3425 	 */
3426 	rc = t4_setup_adapter_queues(sc);
3427 	if (rc != 0)
3428 		goto done;
3429 
3430 	for (i = 0; i < nitems(sc->tq); i++) {
3431 		sc->tq[i] = taskqueue_create("t4 taskq", M_NOWAIT,
3432 		    taskqueue_thread_enqueue, &sc->tq[i]);
3433 		if (sc->tq[i] == NULL) {
3434 			device_printf(sc->dev,
3435 			    "failed to allocate task queue %d\n", i);
3436 			rc = ENOMEM;
3437 			goto done;
3438 		}
3439 		taskqueue_start_threads(&sc->tq[i], 1, PI_NET, "%s tq%d",
3440 		    device_get_nameunit(sc->dev), i);
3441 	}
3442 
3443 	t4_intr_enable(sc);
3444 	sc->flags |= FULL_INIT_DONE;
3445 done:
3446 	if (rc != 0)
3447 		adapter_full_uninit(sc);
3448 
3449 	return (rc);
3450 }
3451 
3452 int
3453 adapter_full_uninit(struct adapter *sc)
3454 {
3455 	int i;
3456 
3457 	ADAPTER_LOCK_ASSERT_NOTOWNED(sc);
3458 
3459 	t4_teardown_adapter_queues(sc);
3460 
3461 	for (i = 0; i < nitems(sc->tq) && sc->tq[i]; i++) {
3462 		taskqueue_free(sc->tq[i]);
3463 		sc->tq[i] = NULL;
3464 	}
3465 
3466 	sc->flags &= ~FULL_INIT_DONE;
3467 
3468 	return (0);
3469 }
3470 
3471 #ifdef RSS
3472 #define SUPPORTED_RSS_HASHTYPES (RSS_HASHTYPE_RSS_IPV4 | \
3473     RSS_HASHTYPE_RSS_TCP_IPV4 | RSS_HASHTYPE_RSS_IPV6 | \
3474     RSS_HASHTYPE_RSS_TCP_IPV6 | RSS_HASHTYPE_RSS_UDP_IPV4 | \
3475     RSS_HASHTYPE_RSS_UDP_IPV6)
3476 
3477 /* Translates kernel hash types to hardware. */
3478 static int
3479 hashconfig_to_hashen(int hashconfig)
3480 {
3481 	int hashen = 0;
3482 
3483 	if (hashconfig & RSS_HASHTYPE_RSS_IPV4)
3484 		hashen |= F_FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN;
3485 	if (hashconfig & RSS_HASHTYPE_RSS_IPV6)
3486 		hashen |= F_FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN;
3487 	if (hashconfig & RSS_HASHTYPE_RSS_UDP_IPV4) {
3488 		hashen |= F_FW_RSS_VI_CONFIG_CMD_UDPEN |
3489 		    F_FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN;
3490 	}
3491 	if (hashconfig & RSS_HASHTYPE_RSS_UDP_IPV6) {
3492 		hashen |= F_FW_RSS_VI_CONFIG_CMD_UDPEN |
3493 		    F_FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN;
3494 	}
3495 	if (hashconfig & RSS_HASHTYPE_RSS_TCP_IPV4)
3496 		hashen |= F_FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN;
3497 	if (hashconfig & RSS_HASHTYPE_RSS_TCP_IPV6)
3498 		hashen |= F_FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN;
3499 
3500 	return (hashen);
3501 }
3502 
3503 /* Translates hardware hash types to kernel. */
3504 static int
3505 hashen_to_hashconfig(int hashen)
3506 {
3507 	int hashconfig = 0;
3508 
3509 	if (hashen & F_FW_RSS_VI_CONFIG_CMD_UDPEN) {
3510 		/*
3511 		 * If UDP hashing was enabled it must have been enabled for
3512 		 * either IPv4 or IPv6 (inclusive or).  Enabling UDP without
3513 		 * enabling any 4-tuple hash is nonsense configuration.
3514 		 */
3515 		MPASS(hashen & (F_FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN |
3516 		    F_FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN));
3517 
3518 		if (hashen & F_FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN)
3519 			hashconfig |= RSS_HASHTYPE_RSS_UDP_IPV4;
3520 		if (hashen & F_FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN)
3521 			hashconfig |= RSS_HASHTYPE_RSS_UDP_IPV6;
3522 	}
3523 	if (hashen & F_FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN)
3524 		hashconfig |= RSS_HASHTYPE_RSS_TCP_IPV4;
3525 	if (hashen & F_FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN)
3526 		hashconfig |= RSS_HASHTYPE_RSS_TCP_IPV6;
3527 	if (hashen & F_FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN)
3528 		hashconfig |= RSS_HASHTYPE_RSS_IPV4;
3529 	if (hashen & F_FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN)
3530 		hashconfig |= RSS_HASHTYPE_RSS_IPV6;
3531 
3532 	return (hashconfig);
3533 }
3534 #endif
3535 
3536 int
3537 port_full_init(struct port_info *pi)
3538 {
3539 	struct adapter *sc = pi->adapter;
3540 	struct ifnet *ifp = pi->ifp;
3541 	uint16_t *rss;
3542 	struct sge_rxq *rxq;
3543 	int rc, i, j, hashen;
3544 #ifdef RSS
3545 	int nbuckets = rss_getnumbuckets();
3546 	int hashconfig = rss_gethashconfig();
3547 	int extra;
3548 	uint32_t raw_rss_key[RSS_KEYSIZE / sizeof(uint32_t)];
3549 	uint32_t rss_key[RSS_KEYSIZE / sizeof(uint32_t)];
3550 #endif
3551 
3552 	ASSERT_SYNCHRONIZED_OP(sc);
3553 	KASSERT((pi->flags & PORT_INIT_DONE) == 0,
3554 	    ("%s: PORT_INIT_DONE already", __func__));
3555 
3556 	sysctl_ctx_init(&pi->ctx);
3557 	pi->flags |= PORT_SYSCTL_CTX;
3558 
3559 	/*
3560 	 * Allocate tx/rx/fl queues for this port.
3561 	 */
3562 	rc = t4_setup_port_queues(pi);
3563 	if (rc != 0)
3564 		goto done;	/* error message displayed already */
3565 
3566 	/*
3567 	 * Setup RSS for this port.  Save a copy of the RSS table for later use.
3568 	 */
3569 	if (pi->nrxq > pi->rss_size) {
3570 		if_printf(ifp, "nrxq (%d) > hw RSS table size (%d); "
3571 		    "some queues will never receive traffic.\n", pi->nrxq,
3572 		    pi->rss_size);
3573 	} else if (pi->rss_size % pi->nrxq) {
3574 		if_printf(ifp, "nrxq (%d), hw RSS table size (%d); "
3575 		    "expect uneven traffic distribution.\n", pi->nrxq,
3576 		    pi->rss_size);
3577 	}
3578 #ifdef RSS
3579 	MPASS(RSS_KEYSIZE == 40);
3580 	if (pi->nrxq != nbuckets) {
3581 		if_printf(ifp, "nrxq (%d) != kernel RSS buckets (%d);"
3582 		    "performance will be impacted.\n", pi->nrxq, nbuckets);
3583 	}
3584 
3585 	rss_getkey((void *)&raw_rss_key[0]);
3586 	for (i = 0; i < nitems(rss_key); i++) {
3587 		rss_key[i] = htobe32(raw_rss_key[nitems(rss_key) - 1 - i]);
3588 	}
3589 	t4_write_rss_key(sc, (void *)&rss_key[0], -1);
3590 #endif
3591 	rss = malloc(pi->rss_size * sizeof (*rss), M_CXGBE, M_ZERO | M_WAITOK);
3592 	for (i = 0; i < pi->rss_size;) {
3593 #ifdef RSS
3594 		j = rss_get_indirection_to_bucket(i);
3595 		j %= pi->nrxq;
3596 		rxq = &sc->sge.rxq[pi->first_rxq + j];
3597 		rss[i++] = rxq->iq.abs_id;
3598 #else
3599 		for_each_rxq(pi, j, rxq) {
3600 			rss[i++] = rxq->iq.abs_id;
3601 			if (i == pi->rss_size)
3602 				break;
3603 		}
3604 #endif
3605 	}
3606 
3607 	rc = -t4_config_rss_range(sc, sc->mbox, pi->viid, 0, pi->rss_size, rss,
3608 	    pi->rss_size);
3609 	if (rc != 0) {
3610 		if_printf(ifp, "rss_config failed: %d\n", rc);
3611 		goto done;
3612 	}
3613 
3614 #ifdef RSS
3615 	hashen = hashconfig_to_hashen(hashconfig);
3616 
3617 	/*
3618 	 * We may have had to enable some hashes even though the global config
3619 	 * wants them disabled.  This is a potential problem that must be
3620 	 * reported to the user.
3621 	 */
3622 	extra = hashen_to_hashconfig(hashen) ^ hashconfig;
3623 
3624 	/*
3625 	 * If we consider only the supported hash types, then the enabled hashes
3626 	 * are a superset of the requested hashes.  In other words, there cannot
3627 	 * be any supported hash that was requested but not enabled, but there
3628 	 * can be hashes that were not requested but had to be enabled.
3629 	 */
3630 	extra &= SUPPORTED_RSS_HASHTYPES;
3631 	MPASS((extra & hashconfig) == 0);
3632 
3633 	if (extra) {
3634 		if_printf(ifp,
3635 		    "global RSS config (0x%x) cannot be accomodated.\n",
3636 		    hashconfig);
3637 	}
3638 	if (extra & RSS_HASHTYPE_RSS_IPV4)
3639 		if_printf(ifp, "IPv4 2-tuple hashing forced on.\n");
3640 	if (extra & RSS_HASHTYPE_RSS_TCP_IPV4)
3641 		if_printf(ifp, "TCP/IPv4 4-tuple hashing forced on.\n");
3642 	if (extra & RSS_HASHTYPE_RSS_IPV6)
3643 		if_printf(ifp, "IPv6 2-tuple hashing forced on.\n");
3644 	if (extra & RSS_HASHTYPE_RSS_TCP_IPV6)
3645 		if_printf(ifp, "TCP/IPv6 4-tuple hashing forced on.\n");
3646 	if (extra & RSS_HASHTYPE_RSS_UDP_IPV4)
3647 		if_printf(ifp, "UDP/IPv4 4-tuple hashing forced on.\n");
3648 	if (extra & RSS_HASHTYPE_RSS_UDP_IPV6)
3649 		if_printf(ifp, "UDP/IPv6 4-tuple hashing forced on.\n");
3650 #else
3651 	hashen = F_FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN |
3652 	    F_FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN |
3653 	    F_FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN |
3654 	    F_FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN | F_FW_RSS_VI_CONFIG_CMD_UDPEN;
3655 #endif
3656 	rc = -t4_config_vi_rss(sc, sc->mbox, pi->viid, hashen, rss[0]);
3657 	if (rc != 0) {
3658 		if_printf(ifp, "rss hash/defaultq config failed: %d\n", rc);
3659 		goto done;
3660 	}
3661 
3662 	pi->rss = rss;
3663 	pi->flags |= PORT_INIT_DONE;
3664 done:
3665 	if (rc != 0)
3666 		port_full_uninit(pi);
3667 
3668 	return (rc);
3669 }
3670 
3671 /*
3672  * Idempotent.
3673  */
3674 int
3675 port_full_uninit(struct port_info *pi)
3676 {
3677 	struct adapter *sc = pi->adapter;
3678 	int i;
3679 	struct sge_rxq *rxq;
3680 	struct sge_txq *txq;
3681 #ifdef TCP_OFFLOAD
3682 	struct sge_ofld_rxq *ofld_rxq;
3683 	struct sge_wrq *ofld_txq;
3684 #endif
3685 
3686 	if (pi->flags & PORT_INIT_DONE) {
3687 
3688 		/* Need to quiesce queues.  */
3689 
3690 		quiesce_wrq(sc, &sc->sge.ctrlq[pi->port_id]);
3691 
3692 		for_each_txq(pi, i, txq) {
3693 			quiesce_txq(sc, txq);
3694 		}
3695 
3696 #ifdef TCP_OFFLOAD
3697 		for_each_ofld_txq(pi, i, ofld_txq) {
3698 			quiesce_wrq(sc, ofld_txq);
3699 		}
3700 #endif
3701 
3702 		for_each_rxq(pi, i, rxq) {
3703 			quiesce_iq(sc, &rxq->iq);
3704 			quiesce_fl(sc, &rxq->fl);
3705 		}
3706 
3707 #ifdef TCP_OFFLOAD
3708 		for_each_ofld_rxq(pi, i, ofld_rxq) {
3709 			quiesce_iq(sc, &ofld_rxq->iq);
3710 			quiesce_fl(sc, &ofld_rxq->fl);
3711 		}
3712 #endif
3713 		free(pi->rss, M_CXGBE);
3714 	}
3715 
3716 	t4_teardown_port_queues(pi);
3717 	pi->flags &= ~PORT_INIT_DONE;
3718 
3719 	return (0);
3720 }
3721 
3722 static void
3723 quiesce_txq(struct adapter *sc, struct sge_txq *txq)
3724 {
3725 	struct sge_eq *eq = &txq->eq;
3726 	struct sge_qstat *spg = (void *)&eq->desc[eq->sidx];
3727 
3728 	(void) sc;	/* unused */
3729 
3730 #ifdef INVARIANTS
3731 	TXQ_LOCK(txq);
3732 	MPASS((eq->flags & EQ_ENABLED) == 0);
3733 	TXQ_UNLOCK(txq);
3734 #endif
3735 
3736 	/* Wait for the mp_ring to empty. */
3737 	while (!mp_ring_is_idle(txq->r)) {
3738 		mp_ring_check_drainage(txq->r, 0);
3739 		pause("rquiesce", 1);
3740 	}
3741 
3742 	/* Then wait for the hardware to finish. */
3743 	while (spg->cidx != htobe16(eq->pidx))
3744 		pause("equiesce", 1);
3745 
3746 	/* Finally, wait for the driver to reclaim all descriptors. */
3747 	while (eq->cidx != eq->pidx)
3748 		pause("dquiesce", 1);
3749 }
3750 
3751 static void
3752 quiesce_wrq(struct adapter *sc, struct sge_wrq *wrq)
3753 {
3754 
3755 	/* XXXTX */
3756 }
3757 
3758 static void
3759 quiesce_iq(struct adapter *sc, struct sge_iq *iq)
3760 {
3761 	(void) sc;	/* unused */
3762 
3763 	/* Synchronize with the interrupt handler */
3764 	while (!atomic_cmpset_int(&iq->state, IQS_IDLE, IQS_DISABLED))
3765 		pause("iqfree", 1);
3766 }
3767 
3768 static void
3769 quiesce_fl(struct adapter *sc, struct sge_fl *fl)
3770 {
3771 	mtx_lock(&sc->sfl_lock);
3772 	FL_LOCK(fl);
3773 	fl->flags |= FL_DOOMED;
3774 	FL_UNLOCK(fl);
3775 	mtx_unlock(&sc->sfl_lock);
3776 
3777 	callout_drain(&sc->sfl_callout);
3778 	KASSERT((fl->flags & FL_STARVING) == 0,
3779 	    ("%s: still starving", __func__));
3780 }
3781 
3782 static int
3783 t4_alloc_irq(struct adapter *sc, struct irq *irq, int rid,
3784     driver_intr_t *handler, void *arg, char *name)
3785 {
3786 	int rc;
3787 
3788 	irq->rid = rid;
3789 	irq->res = bus_alloc_resource_any(sc->dev, SYS_RES_IRQ, &irq->rid,
3790 	    RF_SHAREABLE | RF_ACTIVE);
3791 	if (irq->res == NULL) {
3792 		device_printf(sc->dev,
3793 		    "failed to allocate IRQ for rid %d, name %s.\n", rid, name);
3794 		return (ENOMEM);
3795 	}
3796 
3797 	rc = bus_setup_intr(sc->dev, irq->res, INTR_MPSAFE | INTR_TYPE_NET,
3798 	    NULL, handler, arg, &irq->tag);
3799 	if (rc != 0) {
3800 		device_printf(sc->dev,
3801 		    "failed to setup interrupt for rid %d, name %s: %d\n",
3802 		    rid, name, rc);
3803 	} else if (name)
3804 		bus_describe_intr(sc->dev, irq->res, irq->tag, name);
3805 
3806 	return (rc);
3807 }
3808 
3809 static int
3810 t4_free_irq(struct adapter *sc, struct irq *irq)
3811 {
3812 	if (irq->tag)
3813 		bus_teardown_intr(sc->dev, irq->res, irq->tag);
3814 	if (irq->res)
3815 		bus_release_resource(sc->dev, SYS_RES_IRQ, irq->rid, irq->res);
3816 
3817 	bzero(irq, sizeof(*irq));
3818 
3819 	return (0);
3820 }
3821 
3822 static void
3823 reg_block_dump(struct adapter *sc, uint8_t *buf, unsigned int start,
3824     unsigned int end)
3825 {
3826 	uint32_t *p = (uint32_t *)(buf + start);
3827 
3828 	for ( ; start <= end; start += sizeof(uint32_t))
3829 		*p++ = t4_read_reg(sc, start);
3830 }
3831 
3832 static void
3833 t4_get_regs(struct adapter *sc, struct t4_regdump *regs, uint8_t *buf)
3834 {
3835 	int i, n;
3836 	const unsigned int *reg_ranges;
3837 	static const unsigned int t4_reg_ranges[] = {
3838 		0x1008, 0x1108,
3839 		0x1180, 0x11b4,
3840 		0x11fc, 0x123c,
3841 		0x1300, 0x173c,
3842 		0x1800, 0x18fc,
3843 		0x3000, 0x30d8,
3844 		0x30e0, 0x5924,
3845 		0x5960, 0x59d4,
3846 		0x5a00, 0x5af8,
3847 		0x6000, 0x6098,
3848 		0x6100, 0x6150,
3849 		0x6200, 0x6208,
3850 		0x6240, 0x6248,
3851 		0x6280, 0x6338,
3852 		0x6370, 0x638c,
3853 		0x6400, 0x643c,
3854 		0x6500, 0x6524,
3855 		0x6a00, 0x6a38,
3856 		0x6a60, 0x6a78,
3857 		0x6b00, 0x6b84,
3858 		0x6bf0, 0x6c84,
3859 		0x6cf0, 0x6d84,
3860 		0x6df0, 0x6e84,
3861 		0x6ef0, 0x6f84,
3862 		0x6ff0, 0x7084,
3863 		0x70f0, 0x7184,
3864 		0x71f0, 0x7284,
3865 		0x72f0, 0x7384,
3866 		0x73f0, 0x7450,
3867 		0x7500, 0x7530,
3868 		0x7600, 0x761c,
3869 		0x7680, 0x76cc,
3870 		0x7700, 0x7798,
3871 		0x77c0, 0x77fc,
3872 		0x7900, 0x79fc,
3873 		0x7b00, 0x7c38,
3874 		0x7d00, 0x7efc,
3875 		0x8dc0, 0x8e1c,
3876 		0x8e30, 0x8e78,
3877 		0x8ea0, 0x8f6c,
3878 		0x8fc0, 0x9074,
3879 		0x90fc, 0x90fc,
3880 		0x9400, 0x9458,
3881 		0x9600, 0x96bc,
3882 		0x9800, 0x9808,
3883 		0x9820, 0x983c,
3884 		0x9850, 0x9864,
3885 		0x9c00, 0x9c6c,
3886 		0x9c80, 0x9cec,
3887 		0x9d00, 0x9d6c,
3888 		0x9d80, 0x9dec,
3889 		0x9e00, 0x9e6c,
3890 		0x9e80, 0x9eec,
3891 		0x9f00, 0x9f6c,
3892 		0x9f80, 0x9fec,
3893 		0xd004, 0xd03c,
3894 		0xdfc0, 0xdfe0,
3895 		0xe000, 0xea7c,
3896 		0xf000, 0x11110,
3897 		0x11118, 0x11190,
3898 		0x19040, 0x1906c,
3899 		0x19078, 0x19080,
3900 		0x1908c, 0x19124,
3901 		0x19150, 0x191b0,
3902 		0x191d0, 0x191e8,
3903 		0x19238, 0x1924c,
3904 		0x193f8, 0x19474,
3905 		0x19490, 0x194f8,
3906 		0x19800, 0x19f30,
3907 		0x1a000, 0x1a06c,
3908 		0x1a0b0, 0x1a120,
3909 		0x1a128, 0x1a138,
3910 		0x1a190, 0x1a1c4,
3911 		0x1a1fc, 0x1a1fc,
3912 		0x1e040, 0x1e04c,
3913 		0x1e284, 0x1e28c,
3914 		0x1e2c0, 0x1e2c0,
3915 		0x1e2e0, 0x1e2e0,
3916 		0x1e300, 0x1e384,
3917 		0x1e3c0, 0x1e3c8,
3918 		0x1e440, 0x1e44c,
3919 		0x1e684, 0x1e68c,
3920 		0x1e6c0, 0x1e6c0,
3921 		0x1e6e0, 0x1e6e0,
3922 		0x1e700, 0x1e784,
3923 		0x1e7c0, 0x1e7c8,
3924 		0x1e840, 0x1e84c,
3925 		0x1ea84, 0x1ea8c,
3926 		0x1eac0, 0x1eac0,
3927 		0x1eae0, 0x1eae0,
3928 		0x1eb00, 0x1eb84,
3929 		0x1ebc0, 0x1ebc8,
3930 		0x1ec40, 0x1ec4c,
3931 		0x1ee84, 0x1ee8c,
3932 		0x1eec0, 0x1eec0,
3933 		0x1eee0, 0x1eee0,
3934 		0x1ef00, 0x1ef84,
3935 		0x1efc0, 0x1efc8,
3936 		0x1f040, 0x1f04c,
3937 		0x1f284, 0x1f28c,
3938 		0x1f2c0, 0x1f2c0,
3939 		0x1f2e0, 0x1f2e0,
3940 		0x1f300, 0x1f384,
3941 		0x1f3c0, 0x1f3c8,
3942 		0x1f440, 0x1f44c,
3943 		0x1f684, 0x1f68c,
3944 		0x1f6c0, 0x1f6c0,
3945 		0x1f6e0, 0x1f6e0,
3946 		0x1f700, 0x1f784,
3947 		0x1f7c0, 0x1f7c8,
3948 		0x1f840, 0x1f84c,
3949 		0x1fa84, 0x1fa8c,
3950 		0x1fac0, 0x1fac0,
3951 		0x1fae0, 0x1fae0,
3952 		0x1fb00, 0x1fb84,
3953 		0x1fbc0, 0x1fbc8,
3954 		0x1fc40, 0x1fc4c,
3955 		0x1fe84, 0x1fe8c,
3956 		0x1fec0, 0x1fec0,
3957 		0x1fee0, 0x1fee0,
3958 		0x1ff00, 0x1ff84,
3959 		0x1ffc0, 0x1ffc8,
3960 		0x20000, 0x2002c,
3961 		0x20100, 0x2013c,
3962 		0x20190, 0x201c8,
3963 		0x20200, 0x20318,
3964 		0x20400, 0x20528,
3965 		0x20540, 0x20614,
3966 		0x21000, 0x21040,
3967 		0x2104c, 0x21060,
3968 		0x210c0, 0x210ec,
3969 		0x21200, 0x21268,
3970 		0x21270, 0x21284,
3971 		0x212fc, 0x21388,
3972 		0x21400, 0x21404,
3973 		0x21500, 0x21518,
3974 		0x2152c, 0x2153c,
3975 		0x21550, 0x21554,
3976 		0x21600, 0x21600,
3977 		0x21608, 0x21628,
3978 		0x21630, 0x2163c,
3979 		0x21700, 0x2171c,
3980 		0x21780, 0x2178c,
3981 		0x21800, 0x21c38,
3982 		0x21c80, 0x21d7c,
3983 		0x21e00, 0x21e04,
3984 		0x22000, 0x2202c,
3985 		0x22100, 0x2213c,
3986 		0x22190, 0x221c8,
3987 		0x22200, 0x22318,
3988 		0x22400, 0x22528,
3989 		0x22540, 0x22614,
3990 		0x23000, 0x23040,
3991 		0x2304c, 0x23060,
3992 		0x230c0, 0x230ec,
3993 		0x23200, 0x23268,
3994 		0x23270, 0x23284,
3995 		0x232fc, 0x23388,
3996 		0x23400, 0x23404,
3997 		0x23500, 0x23518,
3998 		0x2352c, 0x2353c,
3999 		0x23550, 0x23554,
4000 		0x23600, 0x23600,
4001 		0x23608, 0x23628,
4002 		0x23630, 0x2363c,
4003 		0x23700, 0x2371c,
4004 		0x23780, 0x2378c,
4005 		0x23800, 0x23c38,
4006 		0x23c80, 0x23d7c,
4007 		0x23e00, 0x23e04,
4008 		0x24000, 0x2402c,
4009 		0x24100, 0x2413c,
4010 		0x24190, 0x241c8,
4011 		0x24200, 0x24318,
4012 		0x24400, 0x24528,
4013 		0x24540, 0x24614,
4014 		0x25000, 0x25040,
4015 		0x2504c, 0x25060,
4016 		0x250c0, 0x250ec,
4017 		0x25200, 0x25268,
4018 		0x25270, 0x25284,
4019 		0x252fc, 0x25388,
4020 		0x25400, 0x25404,
4021 		0x25500, 0x25518,
4022 		0x2552c, 0x2553c,
4023 		0x25550, 0x25554,
4024 		0x25600, 0x25600,
4025 		0x25608, 0x25628,
4026 		0x25630, 0x2563c,
4027 		0x25700, 0x2571c,
4028 		0x25780, 0x2578c,
4029 		0x25800, 0x25c38,
4030 		0x25c80, 0x25d7c,
4031 		0x25e00, 0x25e04,
4032 		0x26000, 0x2602c,
4033 		0x26100, 0x2613c,
4034 		0x26190, 0x261c8,
4035 		0x26200, 0x26318,
4036 		0x26400, 0x26528,
4037 		0x26540, 0x26614,
4038 		0x27000, 0x27040,
4039 		0x2704c, 0x27060,
4040 		0x270c0, 0x270ec,
4041 		0x27200, 0x27268,
4042 		0x27270, 0x27284,
4043 		0x272fc, 0x27388,
4044 		0x27400, 0x27404,
4045 		0x27500, 0x27518,
4046 		0x2752c, 0x2753c,
4047 		0x27550, 0x27554,
4048 		0x27600, 0x27600,
4049 		0x27608, 0x27628,
4050 		0x27630, 0x2763c,
4051 		0x27700, 0x2771c,
4052 		0x27780, 0x2778c,
4053 		0x27800, 0x27c38,
4054 		0x27c80, 0x27d7c,
4055 		0x27e00, 0x27e04
4056 	};
4057 	static const unsigned int t5_reg_ranges[] = {
4058 		0x1008, 0x1148,
4059 		0x1180, 0x11b4,
4060 		0x11fc, 0x123c,
4061 		0x1280, 0x173c,
4062 		0x1800, 0x18fc,
4063 		0x3000, 0x3028,
4064 		0x3060, 0x30d8,
4065 		0x30e0, 0x30fc,
4066 		0x3140, 0x357c,
4067 		0x35a8, 0x35cc,
4068 		0x35ec, 0x35ec,
4069 		0x3600, 0x5624,
4070 		0x56cc, 0x575c,
4071 		0x580c, 0x5814,
4072 		0x5890, 0x58bc,
4073 		0x5940, 0x59dc,
4074 		0x59fc, 0x5a18,
4075 		0x5a60, 0x5a9c,
4076 		0x5b94, 0x5bfc,
4077 		0x6000, 0x6040,
4078 		0x6058, 0x614c,
4079 		0x7700, 0x7798,
4080 		0x77c0, 0x78fc,
4081 		0x7b00, 0x7c54,
4082 		0x7d00, 0x7efc,
4083 		0x8dc0, 0x8de0,
4084 		0x8df8, 0x8e84,
4085 		0x8ea0, 0x8f84,
4086 		0x8fc0, 0x90f8,
4087 		0x9400, 0x9470,
4088 		0x9600, 0x96f4,
4089 		0x9800, 0x9808,
4090 		0x9820, 0x983c,
4091 		0x9850, 0x9864,
4092 		0x9c00, 0x9c6c,
4093 		0x9c80, 0x9cec,
4094 		0x9d00, 0x9d6c,
4095 		0x9d80, 0x9dec,
4096 		0x9e00, 0x9e6c,
4097 		0x9e80, 0x9eec,
4098 		0x9f00, 0x9f6c,
4099 		0x9f80, 0xa020,
4100 		0xd004, 0xd03c,
4101 		0xdfc0, 0xdfe0,
4102 		0xe000, 0x11088,
4103 		0x1109c, 0x11110,
4104 		0x11118, 0x1117c,
4105 		0x11190, 0x11204,
4106 		0x19040, 0x1906c,
4107 		0x19078, 0x19080,
4108 		0x1908c, 0x19124,
4109 		0x19150, 0x191b0,
4110 		0x191d0, 0x191e8,
4111 		0x19238, 0x19290,
4112 		0x193f8, 0x19474,
4113 		0x19490, 0x194cc,
4114 		0x194f0, 0x194f8,
4115 		0x19c00, 0x19c60,
4116 		0x19c94, 0x19e10,
4117 		0x19e50, 0x19f34,
4118 		0x19f40, 0x19f50,
4119 		0x19f90, 0x19fe4,
4120 		0x1a000, 0x1a06c,
4121 		0x1a0b0, 0x1a120,
4122 		0x1a128, 0x1a138,
4123 		0x1a190, 0x1a1c4,
4124 		0x1a1fc, 0x1a1fc,
4125 		0x1e008, 0x1e00c,
4126 		0x1e040, 0x1e04c,
4127 		0x1e284, 0x1e290,
4128 		0x1e2c0, 0x1e2c0,
4129 		0x1e2e0, 0x1e2e0,
4130 		0x1e300, 0x1e384,
4131 		0x1e3c0, 0x1e3c8,
4132 		0x1e408, 0x1e40c,
4133 		0x1e440, 0x1e44c,
4134 		0x1e684, 0x1e690,
4135 		0x1e6c0, 0x1e6c0,
4136 		0x1e6e0, 0x1e6e0,
4137 		0x1e700, 0x1e784,
4138 		0x1e7c0, 0x1e7c8,
4139 		0x1e808, 0x1e80c,
4140 		0x1e840, 0x1e84c,
4141 		0x1ea84, 0x1ea90,
4142 		0x1eac0, 0x1eac0,
4143 		0x1eae0, 0x1eae0,
4144 		0x1eb00, 0x1eb84,
4145 		0x1ebc0, 0x1ebc8,
4146 		0x1ec08, 0x1ec0c,
4147 		0x1ec40, 0x1ec4c,
4148 		0x1ee84, 0x1ee90,
4149 		0x1eec0, 0x1eec0,
4150 		0x1eee0, 0x1eee0,
4151 		0x1ef00, 0x1ef84,
4152 		0x1efc0, 0x1efc8,
4153 		0x1f008, 0x1f00c,
4154 		0x1f040, 0x1f04c,
4155 		0x1f284, 0x1f290,
4156 		0x1f2c0, 0x1f2c0,
4157 		0x1f2e0, 0x1f2e0,
4158 		0x1f300, 0x1f384,
4159 		0x1f3c0, 0x1f3c8,
4160 		0x1f408, 0x1f40c,
4161 		0x1f440, 0x1f44c,
4162 		0x1f684, 0x1f690,
4163 		0x1f6c0, 0x1f6c0,
4164 		0x1f6e0, 0x1f6e0,
4165 		0x1f700, 0x1f784,
4166 		0x1f7c0, 0x1f7c8,
4167 		0x1f808, 0x1f80c,
4168 		0x1f840, 0x1f84c,
4169 		0x1fa84, 0x1fa90,
4170 		0x1fac0, 0x1fac0,
4171 		0x1fae0, 0x1fae0,
4172 		0x1fb00, 0x1fb84,
4173 		0x1fbc0, 0x1fbc8,
4174 		0x1fc08, 0x1fc0c,
4175 		0x1fc40, 0x1fc4c,
4176 		0x1fe84, 0x1fe90,
4177 		0x1fec0, 0x1fec0,
4178 		0x1fee0, 0x1fee0,
4179 		0x1ff00, 0x1ff84,
4180 		0x1ffc0, 0x1ffc8,
4181 		0x30000, 0x30030,
4182 		0x30100, 0x30144,
4183 		0x30190, 0x301d0,
4184 		0x30200, 0x30318,
4185 		0x30400, 0x3052c,
4186 		0x30540, 0x3061c,
4187 		0x30800, 0x30834,
4188 		0x308c0, 0x30908,
4189 		0x30910, 0x309ac,
4190 		0x30a00, 0x30a2c,
4191 		0x30a44, 0x30a50,
4192 		0x30a74, 0x30c24,
4193 		0x30d00, 0x30d00,
4194 		0x30d08, 0x30d14,
4195 		0x30d1c, 0x30d20,
4196 		0x30d3c, 0x30d50,
4197 		0x31200, 0x3120c,
4198 		0x31220, 0x31220,
4199 		0x31240, 0x31240,
4200 		0x31600, 0x3160c,
4201 		0x31a00, 0x31a1c,
4202 		0x31e00, 0x31e20,
4203 		0x31e38, 0x31e3c,
4204 		0x31e80, 0x31e80,
4205 		0x31e88, 0x31ea8,
4206 		0x31eb0, 0x31eb4,
4207 		0x31ec8, 0x31ed4,
4208 		0x31fb8, 0x32004,
4209 		0x32200, 0x32200,
4210 		0x32208, 0x32240,
4211 		0x32248, 0x32280,
4212 		0x32288, 0x322c0,
4213 		0x322c8, 0x322fc,
4214 		0x32600, 0x32630,
4215 		0x32a00, 0x32abc,
4216 		0x32b00, 0x32b70,
4217 		0x33000, 0x33048,
4218 		0x33060, 0x3309c,
4219 		0x330f0, 0x33148,
4220 		0x33160, 0x3319c,
4221 		0x331f0, 0x332e4,
4222 		0x332f8, 0x333e4,
4223 		0x333f8, 0x33448,
4224 		0x33460, 0x3349c,
4225 		0x334f0, 0x33548,
4226 		0x33560, 0x3359c,
4227 		0x335f0, 0x336e4,
4228 		0x336f8, 0x337e4,
4229 		0x337f8, 0x337fc,
4230 		0x33814, 0x33814,
4231 		0x3382c, 0x3382c,
4232 		0x33880, 0x3388c,
4233 		0x338e8, 0x338ec,
4234 		0x33900, 0x33948,
4235 		0x33960, 0x3399c,
4236 		0x339f0, 0x33ae4,
4237 		0x33af8, 0x33b10,
4238 		0x33b28, 0x33b28,
4239 		0x33b3c, 0x33b50,
4240 		0x33bf0, 0x33c10,
4241 		0x33c28, 0x33c28,
4242 		0x33c3c, 0x33c50,
4243 		0x33cf0, 0x33cfc,
4244 		0x34000, 0x34030,
4245 		0x34100, 0x34144,
4246 		0x34190, 0x341d0,
4247 		0x34200, 0x34318,
4248 		0x34400, 0x3452c,
4249 		0x34540, 0x3461c,
4250 		0x34800, 0x34834,
4251 		0x348c0, 0x34908,
4252 		0x34910, 0x349ac,
4253 		0x34a00, 0x34a2c,
4254 		0x34a44, 0x34a50,
4255 		0x34a74, 0x34c24,
4256 		0x34d00, 0x34d00,
4257 		0x34d08, 0x34d14,
4258 		0x34d1c, 0x34d20,
4259 		0x34d3c, 0x34d50,
4260 		0x35200, 0x3520c,
4261 		0x35220, 0x35220,
4262 		0x35240, 0x35240,
4263 		0x35600, 0x3560c,
4264 		0x35a00, 0x35a1c,
4265 		0x35e00, 0x35e20,
4266 		0x35e38, 0x35e3c,
4267 		0x35e80, 0x35e80,
4268 		0x35e88, 0x35ea8,
4269 		0x35eb0, 0x35eb4,
4270 		0x35ec8, 0x35ed4,
4271 		0x35fb8, 0x36004,
4272 		0x36200, 0x36200,
4273 		0x36208, 0x36240,
4274 		0x36248, 0x36280,
4275 		0x36288, 0x362c0,
4276 		0x362c8, 0x362fc,
4277 		0x36600, 0x36630,
4278 		0x36a00, 0x36abc,
4279 		0x36b00, 0x36b70,
4280 		0x37000, 0x37048,
4281 		0x37060, 0x3709c,
4282 		0x370f0, 0x37148,
4283 		0x37160, 0x3719c,
4284 		0x371f0, 0x372e4,
4285 		0x372f8, 0x373e4,
4286 		0x373f8, 0x37448,
4287 		0x37460, 0x3749c,
4288 		0x374f0, 0x37548,
4289 		0x37560, 0x3759c,
4290 		0x375f0, 0x376e4,
4291 		0x376f8, 0x377e4,
4292 		0x377f8, 0x377fc,
4293 		0x37814, 0x37814,
4294 		0x3782c, 0x3782c,
4295 		0x37880, 0x3788c,
4296 		0x378e8, 0x378ec,
4297 		0x37900, 0x37948,
4298 		0x37960, 0x3799c,
4299 		0x379f0, 0x37ae4,
4300 		0x37af8, 0x37b10,
4301 		0x37b28, 0x37b28,
4302 		0x37b3c, 0x37b50,
4303 		0x37bf0, 0x37c10,
4304 		0x37c28, 0x37c28,
4305 		0x37c3c, 0x37c50,
4306 		0x37cf0, 0x37cfc,
4307 		0x38000, 0x38030,
4308 		0x38100, 0x38144,
4309 		0x38190, 0x381d0,
4310 		0x38200, 0x38318,
4311 		0x38400, 0x3852c,
4312 		0x38540, 0x3861c,
4313 		0x38800, 0x38834,
4314 		0x388c0, 0x38908,
4315 		0x38910, 0x389ac,
4316 		0x38a00, 0x38a2c,
4317 		0x38a44, 0x38a50,
4318 		0x38a74, 0x38c24,
4319 		0x38d00, 0x38d00,
4320 		0x38d08, 0x38d14,
4321 		0x38d1c, 0x38d20,
4322 		0x38d3c, 0x38d50,
4323 		0x39200, 0x3920c,
4324 		0x39220, 0x39220,
4325 		0x39240, 0x39240,
4326 		0x39600, 0x3960c,
4327 		0x39a00, 0x39a1c,
4328 		0x39e00, 0x39e20,
4329 		0x39e38, 0x39e3c,
4330 		0x39e80, 0x39e80,
4331 		0x39e88, 0x39ea8,
4332 		0x39eb0, 0x39eb4,
4333 		0x39ec8, 0x39ed4,
4334 		0x39fb8, 0x3a004,
4335 		0x3a200, 0x3a200,
4336 		0x3a208, 0x3a240,
4337 		0x3a248, 0x3a280,
4338 		0x3a288, 0x3a2c0,
4339 		0x3a2c8, 0x3a2fc,
4340 		0x3a600, 0x3a630,
4341 		0x3aa00, 0x3aabc,
4342 		0x3ab00, 0x3ab70,
4343 		0x3b000, 0x3b048,
4344 		0x3b060, 0x3b09c,
4345 		0x3b0f0, 0x3b148,
4346 		0x3b160, 0x3b19c,
4347 		0x3b1f0, 0x3b2e4,
4348 		0x3b2f8, 0x3b3e4,
4349 		0x3b3f8, 0x3b448,
4350 		0x3b460, 0x3b49c,
4351 		0x3b4f0, 0x3b548,
4352 		0x3b560, 0x3b59c,
4353 		0x3b5f0, 0x3b6e4,
4354 		0x3b6f8, 0x3b7e4,
4355 		0x3b7f8, 0x3b7fc,
4356 		0x3b814, 0x3b814,
4357 		0x3b82c, 0x3b82c,
4358 		0x3b880, 0x3b88c,
4359 		0x3b8e8, 0x3b8ec,
4360 		0x3b900, 0x3b948,
4361 		0x3b960, 0x3b99c,
4362 		0x3b9f0, 0x3bae4,
4363 		0x3baf8, 0x3bb10,
4364 		0x3bb28, 0x3bb28,
4365 		0x3bb3c, 0x3bb50,
4366 		0x3bbf0, 0x3bc10,
4367 		0x3bc28, 0x3bc28,
4368 		0x3bc3c, 0x3bc50,
4369 		0x3bcf0, 0x3bcfc,
4370 		0x3c000, 0x3c030,
4371 		0x3c100, 0x3c144,
4372 		0x3c190, 0x3c1d0,
4373 		0x3c200, 0x3c318,
4374 		0x3c400, 0x3c52c,
4375 		0x3c540, 0x3c61c,
4376 		0x3c800, 0x3c834,
4377 		0x3c8c0, 0x3c908,
4378 		0x3c910, 0x3c9ac,
4379 		0x3ca00, 0x3ca2c,
4380 		0x3ca44, 0x3ca50,
4381 		0x3ca74, 0x3cc24,
4382 		0x3cd00, 0x3cd00,
4383 		0x3cd08, 0x3cd14,
4384 		0x3cd1c, 0x3cd20,
4385 		0x3cd3c, 0x3cd50,
4386 		0x3d200, 0x3d20c,
4387 		0x3d220, 0x3d220,
4388 		0x3d240, 0x3d240,
4389 		0x3d600, 0x3d60c,
4390 		0x3da00, 0x3da1c,
4391 		0x3de00, 0x3de20,
4392 		0x3de38, 0x3de3c,
4393 		0x3de80, 0x3de80,
4394 		0x3de88, 0x3dea8,
4395 		0x3deb0, 0x3deb4,
4396 		0x3dec8, 0x3ded4,
4397 		0x3dfb8, 0x3e004,
4398 		0x3e200, 0x3e200,
4399 		0x3e208, 0x3e240,
4400 		0x3e248, 0x3e280,
4401 		0x3e288, 0x3e2c0,
4402 		0x3e2c8, 0x3e2fc,
4403 		0x3e600, 0x3e630,
4404 		0x3ea00, 0x3eabc,
4405 		0x3eb00, 0x3eb70,
4406 		0x3f000, 0x3f048,
4407 		0x3f060, 0x3f09c,
4408 		0x3f0f0, 0x3f148,
4409 		0x3f160, 0x3f19c,
4410 		0x3f1f0, 0x3f2e4,
4411 		0x3f2f8, 0x3f3e4,
4412 		0x3f3f8, 0x3f448,
4413 		0x3f460, 0x3f49c,
4414 		0x3f4f0, 0x3f548,
4415 		0x3f560, 0x3f59c,
4416 		0x3f5f0, 0x3f6e4,
4417 		0x3f6f8, 0x3f7e4,
4418 		0x3f7f8, 0x3f7fc,
4419 		0x3f814, 0x3f814,
4420 		0x3f82c, 0x3f82c,
4421 		0x3f880, 0x3f88c,
4422 		0x3f8e8, 0x3f8ec,
4423 		0x3f900, 0x3f948,
4424 		0x3f960, 0x3f99c,
4425 		0x3f9f0, 0x3fae4,
4426 		0x3faf8, 0x3fb10,
4427 		0x3fb28, 0x3fb28,
4428 		0x3fb3c, 0x3fb50,
4429 		0x3fbf0, 0x3fc10,
4430 		0x3fc28, 0x3fc28,
4431 		0x3fc3c, 0x3fc50,
4432 		0x3fcf0, 0x3fcfc,
4433 		0x40000, 0x4000c,
4434 		0x40040, 0x40068,
4435 		0x4007c, 0x40144,
4436 		0x40180, 0x4018c,
4437 		0x40200, 0x40298,
4438 		0x402ac, 0x4033c,
4439 		0x403f8, 0x403fc,
4440 		0x41304, 0x413c4,
4441 		0x41400, 0x4141c,
4442 		0x41480, 0x414d0,
4443 		0x44000, 0x44078,
4444 		0x440c0, 0x44278,
4445 		0x442c0, 0x44478,
4446 		0x444c0, 0x44678,
4447 		0x446c0, 0x44878,
4448 		0x448c0, 0x449fc,
4449 		0x45000, 0x45068,
4450 		0x45080, 0x45084,
4451 		0x450a0, 0x450b0,
4452 		0x45200, 0x45268,
4453 		0x45280, 0x45284,
4454 		0x452a0, 0x452b0,
4455 		0x460c0, 0x460e4,
4456 		0x47000, 0x4708c,
4457 		0x47200, 0x47250,
4458 		0x47400, 0x47420,
4459 		0x47600, 0x47618,
4460 		0x47800, 0x47814,
4461 		0x48000, 0x4800c,
4462 		0x48040, 0x48068,
4463 		0x4807c, 0x48144,
4464 		0x48180, 0x4818c,
4465 		0x48200, 0x48298,
4466 		0x482ac, 0x4833c,
4467 		0x483f8, 0x483fc,
4468 		0x49304, 0x493c4,
4469 		0x49400, 0x4941c,
4470 		0x49480, 0x494d0,
4471 		0x4c000, 0x4c078,
4472 		0x4c0c0, 0x4c278,
4473 		0x4c2c0, 0x4c478,
4474 		0x4c4c0, 0x4c678,
4475 		0x4c6c0, 0x4c878,
4476 		0x4c8c0, 0x4c9fc,
4477 		0x4d000, 0x4d068,
4478 		0x4d080, 0x4d084,
4479 		0x4d0a0, 0x4d0b0,
4480 		0x4d200, 0x4d268,
4481 		0x4d280, 0x4d284,
4482 		0x4d2a0, 0x4d2b0,
4483 		0x4e0c0, 0x4e0e4,
4484 		0x4f000, 0x4f08c,
4485 		0x4f200, 0x4f250,
4486 		0x4f400, 0x4f420,
4487 		0x4f600, 0x4f618,
4488 		0x4f800, 0x4f814,
4489 		0x50000, 0x500cc,
4490 		0x50400, 0x50400,
4491 		0x50800, 0x508cc,
4492 		0x50c00, 0x50c00,
4493 		0x51000, 0x5101c,
4494 		0x51300, 0x51308,
4495 	};
4496 
4497 	if (is_t4(sc)) {
4498 		reg_ranges = &t4_reg_ranges[0];
4499 		n = nitems(t4_reg_ranges);
4500 	} else {
4501 		reg_ranges = &t5_reg_ranges[0];
4502 		n = nitems(t5_reg_ranges);
4503 	}
4504 
4505 	regs->version = chip_id(sc) | chip_rev(sc) << 10;
4506 	for (i = 0; i < n; i += 2)
4507 		reg_block_dump(sc, buf, reg_ranges[i], reg_ranges[i + 1]);
4508 }
4509 
4510 static void
4511 cxgbe_refresh_stats(struct adapter *sc, struct port_info *pi)
4512 {
4513 	int i;
4514 	u_int v, tnl_cong_drops;
4515 	struct timeval tv;
4516 	const struct timeval interval = {0, 250000};	/* 250ms */
4517 
4518 	getmicrotime(&tv);
4519 	timevalsub(&tv, &interval);
4520 	if (timevalcmp(&tv, &pi->last_refreshed, <))
4521 		return;
4522 
4523 	tnl_cong_drops = 0;
4524 	t4_get_port_stats(sc, pi->tx_chan, &pi->stats);
4525 	for (i = 0; i < NCHAN; i++) {
4526 		if (pi->rx_chan_map & (1 << i)) {
4527 			mtx_lock(&sc->regwin_lock);
4528 			t4_read_indirect(sc, A_TP_MIB_INDEX, A_TP_MIB_DATA, &v,
4529 			    1, A_TP_MIB_TNL_CNG_DROP_0 + i);
4530 			mtx_unlock(&sc->regwin_lock);
4531 			tnl_cong_drops += v;
4532 		}
4533 	}
4534 	pi->tnl_cong_drops = tnl_cong_drops;
4535 	getmicrotime(&pi->last_refreshed);
4536 }
4537 
4538 static void
4539 cxgbe_tick(void *arg)
4540 {
4541 	struct port_info *pi = arg;
4542 	struct adapter *sc = pi->adapter;
4543 	struct ifnet *ifp = pi->ifp;
4544 
4545 	PORT_LOCK(pi);
4546 	if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) {
4547 		PORT_UNLOCK(pi);
4548 		return;	/* without scheduling another callout */
4549 	}
4550 
4551 	cxgbe_refresh_stats(sc, pi);
4552 
4553 	callout_schedule(&pi->tick, hz);
4554 	PORT_UNLOCK(pi);
4555 }
4556 
4557 static void
4558 cxgbe_vlan_config(void *arg, struct ifnet *ifp, uint16_t vid)
4559 {
4560 	struct ifnet *vlan;
4561 
4562 	if (arg != ifp || ifp->if_type != IFT_ETHER)
4563 		return;
4564 
4565 	vlan = VLAN_DEVAT(ifp, vid);
4566 	VLAN_SETCOOKIE(vlan, ifp);
4567 }
4568 
4569 static int
4570 cpl_not_handled(struct sge_iq *iq, const struct rss_header *rss, struct mbuf *m)
4571 {
4572 
4573 #ifdef INVARIANTS
4574 	panic("%s: opcode 0x%02x on iq %p with payload %p",
4575 	    __func__, rss->opcode, iq, m);
4576 #else
4577 	log(LOG_ERR, "%s: opcode 0x%02x on iq %p with payload %p\n",
4578 	    __func__, rss->opcode, iq, m);
4579 	m_freem(m);
4580 #endif
4581 	return (EDOOFUS);
4582 }
4583 
4584 int
4585 t4_register_cpl_handler(struct adapter *sc, int opcode, cpl_handler_t h)
4586 {
4587 	uintptr_t *loc, new;
4588 
4589 	if (opcode >= nitems(sc->cpl_handler))
4590 		return (EINVAL);
4591 
4592 	new = h ? (uintptr_t)h : (uintptr_t)cpl_not_handled;
4593 	loc = (uintptr_t *) &sc->cpl_handler[opcode];
4594 	atomic_store_rel_ptr(loc, new);
4595 
4596 	return (0);
4597 }
4598 
4599 static int
4600 an_not_handled(struct sge_iq *iq, const struct rsp_ctrl *ctrl)
4601 {
4602 
4603 #ifdef INVARIANTS
4604 	panic("%s: async notification on iq %p (ctrl %p)", __func__, iq, ctrl);
4605 #else
4606 	log(LOG_ERR, "%s: async notification on iq %p (ctrl %p)\n",
4607 	    __func__, iq, ctrl);
4608 #endif
4609 	return (EDOOFUS);
4610 }
4611 
4612 int
4613 t4_register_an_handler(struct adapter *sc, an_handler_t h)
4614 {
4615 	uintptr_t *loc, new;
4616 
4617 	new = h ? (uintptr_t)h : (uintptr_t)an_not_handled;
4618 	loc = (uintptr_t *) &sc->an_handler;
4619 	atomic_store_rel_ptr(loc, new);
4620 
4621 	return (0);
4622 }
4623 
4624 static int
4625 fw_msg_not_handled(struct adapter *sc, const __be64 *rpl)
4626 {
4627 	const struct cpl_fw6_msg *cpl =
4628 	    __containerof(rpl, struct cpl_fw6_msg, data[0]);
4629 
4630 #ifdef INVARIANTS
4631 	panic("%s: fw_msg type %d", __func__, cpl->type);
4632 #else
4633 	log(LOG_ERR, "%s: fw_msg type %d\n", __func__, cpl->type);
4634 #endif
4635 	return (EDOOFUS);
4636 }
4637 
4638 int
4639 t4_register_fw_msg_handler(struct adapter *sc, int type, fw_msg_handler_t h)
4640 {
4641 	uintptr_t *loc, new;
4642 
4643 	if (type >= nitems(sc->fw_msg_handler))
4644 		return (EINVAL);
4645 
4646 	/*
4647 	 * These are dispatched by the handler for FW{4|6}_CPL_MSG using the CPL
4648 	 * handler dispatch table.  Reject any attempt to install a handler for
4649 	 * this subtype.
4650 	 */
4651 	if (type == FW_TYPE_RSSCPL || type == FW6_TYPE_RSSCPL)
4652 		return (EINVAL);
4653 
4654 	new = h ? (uintptr_t)h : (uintptr_t)fw_msg_not_handled;
4655 	loc = (uintptr_t *) &sc->fw_msg_handler[type];
4656 	atomic_store_rel_ptr(loc, new);
4657 
4658 	return (0);
4659 }
4660 
4661 static int
4662 t4_sysctls(struct adapter *sc)
4663 {
4664 	struct sysctl_ctx_list *ctx;
4665 	struct sysctl_oid *oid;
4666 	struct sysctl_oid_list *children, *c0;
4667 	static char *caps[] = {
4668 		"\20\1PPP\2QFC\3DCBX",			/* caps[0] linkcaps */
4669 		"\20\1NIC\2VM\3IDS\4UM\5UM_ISGL"	/* caps[1] niccaps */
4670 		    "\6HASHFILTER\7ETHOFLD",
4671 		"\20\1TOE",				/* caps[2] toecaps */
4672 		"\20\1RDDP\2RDMAC",			/* caps[3] rdmacaps */
4673 		"\20\1INITIATOR_PDU\2TARGET_PDU"	/* caps[4] iscsicaps */
4674 		    "\3INITIATOR_CNXOFLD\4TARGET_CNXOFLD"
4675 		    "\5INITIATOR_SSNOFLD\6TARGET_SSNOFLD",
4676 		"\20\1INITIATOR\2TARGET\3CTRL_OFLD"	/* caps[5] fcoecaps */
4677 		    "\4PO_INITIAOR\5PO_TARGET"
4678 	};
4679 	static char *doorbells = {"\20\1UDB\2WCWR\3UDBWC\4KDB"};
4680 
4681 	ctx = device_get_sysctl_ctx(sc->dev);
4682 
4683 	/*
4684 	 * dev.t4nex.X.
4685 	 */
4686 	oid = device_get_sysctl_tree(sc->dev);
4687 	c0 = children = SYSCTL_CHILDREN(oid);
4688 
4689 	sc->sc_do_rxcopy = 1;
4690 	SYSCTL_ADD_INT(ctx, children, OID_AUTO, "do_rx_copy", CTLFLAG_RW,
4691 	    &sc->sc_do_rxcopy, 1, "Do RX copy of small frames");
4692 
4693 	SYSCTL_ADD_INT(ctx, children, OID_AUTO, "nports", CTLFLAG_RD, NULL,
4694 	    sc->params.nports, "# of ports");
4695 
4696 	SYSCTL_ADD_INT(ctx, children, OID_AUTO, "hw_revision", CTLFLAG_RD,
4697 	    NULL, chip_rev(sc), "chip hardware revision");
4698 
4699 	SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "firmware_version",
4700 	    CTLFLAG_RD, sc->fw_version, 0, "firmware version");
4701 
4702 	SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "cf",
4703 	    CTLFLAG_RD, sc->cfg_file, 0, "configuration file");
4704 
4705 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "cfcsum", CTLFLAG_RD, NULL,
4706 	    sc->cfcsum, "config file checksum");
4707 
4708 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "doorbells",
4709 	    CTLTYPE_STRING | CTLFLAG_RD, doorbells, sc->doorbells,
4710 	    sysctl_bitfield, "A", "available doorbells");
4711 
4712 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "linkcaps",
4713 	    CTLTYPE_STRING | CTLFLAG_RD, caps[0], sc->linkcaps,
4714 	    sysctl_bitfield, "A", "available link capabilities");
4715 
4716 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "niccaps",
4717 	    CTLTYPE_STRING | CTLFLAG_RD, caps[1], sc->niccaps,
4718 	    sysctl_bitfield, "A", "available NIC capabilities");
4719 
4720 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "toecaps",
4721 	    CTLTYPE_STRING | CTLFLAG_RD, caps[2], sc->toecaps,
4722 	    sysctl_bitfield, "A", "available TCP offload capabilities");
4723 
4724 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "rdmacaps",
4725 	    CTLTYPE_STRING | CTLFLAG_RD, caps[3], sc->rdmacaps,
4726 	    sysctl_bitfield, "A", "available RDMA capabilities");
4727 
4728 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "iscsicaps",
4729 	    CTLTYPE_STRING | CTLFLAG_RD, caps[4], sc->iscsicaps,
4730 	    sysctl_bitfield, "A", "available iSCSI capabilities");
4731 
4732 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "fcoecaps",
4733 	    CTLTYPE_STRING | CTLFLAG_RD, caps[5], sc->fcoecaps,
4734 	    sysctl_bitfield, "A", "available FCoE capabilities");
4735 
4736 	SYSCTL_ADD_INT(ctx, children, OID_AUTO, "core_clock", CTLFLAG_RD, NULL,
4737 	    sc->params.vpd.cclk, "core clock frequency (in KHz)");
4738 
4739 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "holdoff_timers",
4740 	    CTLTYPE_STRING | CTLFLAG_RD, sc->sge.timer_val,
4741 	    sizeof(sc->sge.timer_val), sysctl_int_array, "A",
4742 	    "interrupt holdoff timer values (us)");
4743 
4744 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "holdoff_pkt_counts",
4745 	    CTLTYPE_STRING | CTLFLAG_RD, sc->sge.counter_val,
4746 	    sizeof(sc->sge.counter_val), sysctl_int_array, "A",
4747 	    "interrupt holdoff packet counter values");
4748 
4749 	SYSCTL_ADD_INT(ctx, children, OID_AUTO, "nfilters", CTLFLAG_RD,
4750 	    NULL, sc->tids.nftids, "number of filters");
4751 
4752 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "temperature", CTLTYPE_INT |
4753 	    CTLFLAG_RD, sc, 0, sysctl_temperature, "I",
4754 	    "chip temperature (in Celsius)");
4755 
4756 	t4_sge_sysctls(sc, ctx, children);
4757 
4758 	sc->lro_timeout = 100;
4759 	SYSCTL_ADD_INT(ctx, children, OID_AUTO, "lro_timeout", CTLFLAG_RW,
4760 	    &sc->lro_timeout, 0, "lro inactive-flush timeout (in us)");
4761 
4762 	SYSCTL_ADD_INT(ctx, children, OID_AUTO, "debug_flags", CTLFLAG_RW,
4763 	    &sc->debug_flags, 0, "flags to enable runtime debugging");
4764 
4765 #ifdef SBUF_DRAIN
4766 	/*
4767 	 * dev.t4nex.X.misc.  Marked CTLFLAG_SKIP to avoid information overload.
4768 	 */
4769 	oid = SYSCTL_ADD_NODE(ctx, c0, OID_AUTO, "misc",
4770 	    CTLFLAG_RD | CTLFLAG_SKIP, NULL,
4771 	    "logs and miscellaneous information");
4772 	children = SYSCTL_CHILDREN(oid);
4773 
4774 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cctrl",
4775 	    CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
4776 	    sysctl_cctrl, "A", "congestion control");
4777 
4778 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_ibq_tp0",
4779 	    CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
4780 	    sysctl_cim_ibq_obq, "A", "CIM IBQ 0 (TP0)");
4781 
4782 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_ibq_tp1",
4783 	    CTLTYPE_STRING | CTLFLAG_RD, sc, 1,
4784 	    sysctl_cim_ibq_obq, "A", "CIM IBQ 1 (TP1)");
4785 
4786 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_ibq_ulp",
4787 	    CTLTYPE_STRING | CTLFLAG_RD, sc, 2,
4788 	    sysctl_cim_ibq_obq, "A", "CIM IBQ 2 (ULP)");
4789 
4790 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_ibq_sge0",
4791 	    CTLTYPE_STRING | CTLFLAG_RD, sc, 3,
4792 	    sysctl_cim_ibq_obq, "A", "CIM IBQ 3 (SGE0)");
4793 
4794 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_ibq_sge1",
4795 	    CTLTYPE_STRING | CTLFLAG_RD, sc, 4,
4796 	    sysctl_cim_ibq_obq, "A", "CIM IBQ 4 (SGE1)");
4797 
4798 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_ibq_ncsi",
4799 	    CTLTYPE_STRING | CTLFLAG_RD, sc, 5,
4800 	    sysctl_cim_ibq_obq, "A", "CIM IBQ 5 (NCSI)");
4801 
4802 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_la",
4803 	    CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
4804 	    sysctl_cim_la, "A", "CIM logic analyzer");
4805 
4806 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_ma_la",
4807 	    CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
4808 	    sysctl_cim_ma_la, "A", "CIM MA logic analyzer");
4809 
4810 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_obq_ulp0",
4811 	    CTLTYPE_STRING | CTLFLAG_RD, sc, 0 + CIM_NUM_IBQ,
4812 	    sysctl_cim_ibq_obq, "A", "CIM OBQ 0 (ULP0)");
4813 
4814 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_obq_ulp1",
4815 	    CTLTYPE_STRING | CTLFLAG_RD, sc, 1 + CIM_NUM_IBQ,
4816 	    sysctl_cim_ibq_obq, "A", "CIM OBQ 1 (ULP1)");
4817 
4818 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_obq_ulp2",
4819 	    CTLTYPE_STRING | CTLFLAG_RD, sc, 2 + CIM_NUM_IBQ,
4820 	    sysctl_cim_ibq_obq, "A", "CIM OBQ 2 (ULP2)");
4821 
4822 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_obq_ulp3",
4823 	    CTLTYPE_STRING | CTLFLAG_RD, sc, 3 + CIM_NUM_IBQ,
4824 	    sysctl_cim_ibq_obq, "A", "CIM OBQ 3 (ULP3)");
4825 
4826 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_obq_sge",
4827 	    CTLTYPE_STRING | CTLFLAG_RD, sc, 4 + CIM_NUM_IBQ,
4828 	    sysctl_cim_ibq_obq, "A", "CIM OBQ 4 (SGE)");
4829 
4830 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_obq_ncsi",
4831 	    CTLTYPE_STRING | CTLFLAG_RD, sc, 5 + CIM_NUM_IBQ,
4832 	    sysctl_cim_ibq_obq, "A", "CIM OBQ 5 (NCSI)");
4833 
4834 	if (is_t5(sc)) {
4835 		SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_obq_sge0_rx",
4836 		    CTLTYPE_STRING | CTLFLAG_RD, sc, 6 + CIM_NUM_IBQ,
4837 		    sysctl_cim_ibq_obq, "A", "CIM OBQ 6 (SGE0-RX)");
4838 
4839 		SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_obq_sge1_rx",
4840 		    CTLTYPE_STRING | CTLFLAG_RD, sc, 7 + CIM_NUM_IBQ,
4841 		    sysctl_cim_ibq_obq, "A", "CIM OBQ 7 (SGE1-RX)");
4842 	}
4843 
4844 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_pif_la",
4845 	    CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
4846 	    sysctl_cim_pif_la, "A", "CIM PIF logic analyzer");
4847 
4848 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_qcfg",
4849 	    CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
4850 	    sysctl_cim_qcfg, "A", "CIM queue configuration");
4851 
4852 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cpl_stats",
4853 	    CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
4854 	    sysctl_cpl_stats, "A", "CPL statistics");
4855 
4856 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "ddp_stats",
4857 	    CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
4858 	    sysctl_ddp_stats, "A", "non-TCP DDP statistics");
4859 
4860 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "devlog",
4861 	    CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
4862 	    sysctl_devlog, "A", "firmware's device log");
4863 
4864 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "fcoe_stats",
4865 	    CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
4866 	    sysctl_fcoe_stats, "A", "FCoE statistics");
4867 
4868 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "hw_sched",
4869 	    CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
4870 	    sysctl_hw_sched, "A", "hardware scheduler ");
4871 
4872 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "l2t",
4873 	    CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
4874 	    sysctl_l2t, "A", "hardware L2 table");
4875 
4876 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "lb_stats",
4877 	    CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
4878 	    sysctl_lb_stats, "A", "loopback statistics");
4879 
4880 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "meminfo",
4881 	    CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
4882 	    sysctl_meminfo, "A", "memory regions");
4883 
4884 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "mps_tcam",
4885 	    CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
4886 	    sysctl_mps_tcam, "A", "MPS TCAM entries");
4887 
4888 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "path_mtus",
4889 	    CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
4890 	    sysctl_path_mtus, "A", "path MTUs");
4891 
4892 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "pm_stats",
4893 	    CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
4894 	    sysctl_pm_stats, "A", "PM statistics");
4895 
4896 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "rdma_stats",
4897 	    CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
4898 	    sysctl_rdma_stats, "A", "RDMA statistics");
4899 
4900 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "tcp_stats",
4901 	    CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
4902 	    sysctl_tcp_stats, "A", "TCP statistics");
4903 
4904 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "tids",
4905 	    CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
4906 	    sysctl_tids, "A", "TID information");
4907 
4908 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "tp_err_stats",
4909 	    CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
4910 	    sysctl_tp_err_stats, "A", "TP error statistics");
4911 
4912 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "tp_la",
4913 	    CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
4914 	    sysctl_tp_la, "A", "TP logic analyzer");
4915 
4916 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "tx_rate",
4917 	    CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
4918 	    sysctl_tx_rate, "A", "Tx rate");
4919 
4920 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "ulprx_la",
4921 	    CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
4922 	    sysctl_ulprx_la, "A", "ULPRX logic analyzer");
4923 
4924 	if (is_t5(sc)) {
4925 		SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "wcwr_stats",
4926 		    CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
4927 		    sysctl_wcwr_stats, "A", "write combined work requests");
4928 	}
4929 #endif
4930 
4931 #ifdef TCP_OFFLOAD
4932 	if (is_offload(sc)) {
4933 		/*
4934 		 * dev.t4nex.X.toe.
4935 		 */
4936 		oid = SYSCTL_ADD_NODE(ctx, c0, OID_AUTO, "toe", CTLFLAG_RD,
4937 		    NULL, "TOE parameters");
4938 		children = SYSCTL_CHILDREN(oid);
4939 
4940 		sc->tt.sndbuf = 256 * 1024;
4941 		SYSCTL_ADD_INT(ctx, children, OID_AUTO, "sndbuf", CTLFLAG_RW,
4942 		    &sc->tt.sndbuf, 0, "max hardware send buffer size");
4943 
4944 		sc->tt.ddp = 0;
4945 		SYSCTL_ADD_INT(ctx, children, OID_AUTO, "ddp", CTLFLAG_RW,
4946 		    &sc->tt.ddp, 0, "DDP allowed");
4947 
4948 		sc->tt.indsz = G_INDICATESIZE(t4_read_reg(sc, A_TP_PARA_REG5));
4949 		SYSCTL_ADD_INT(ctx, children, OID_AUTO, "indsz", CTLFLAG_RW,
4950 		    &sc->tt.indsz, 0, "DDP max indicate size allowed");
4951 
4952 		sc->tt.ddp_thres =
4953 		    G_RXCOALESCESIZE(t4_read_reg(sc, A_TP_PARA_REG2));
4954 		SYSCTL_ADD_INT(ctx, children, OID_AUTO, "ddp_thres", CTLFLAG_RW,
4955 		    &sc->tt.ddp_thres, 0, "DDP threshold");
4956 
4957 		sc->tt.rx_coalesce = 1;
4958 		SYSCTL_ADD_INT(ctx, children, OID_AUTO, "rx_coalesce",
4959 		    CTLFLAG_RW, &sc->tt.rx_coalesce, 0, "receive coalescing");
4960 
4961 		sc->tt.tx_align = 1;
4962 		SYSCTL_ADD_INT(ctx, children, OID_AUTO, "tx_align",
4963 		    CTLFLAG_RW, &sc->tt.tx_align, 0, "chop and align payload");
4964 	}
4965 #endif
4966 
4967 
4968 	return (0);
4969 }
4970 
4971 static int
4972 cxgbe_sysctls(struct port_info *pi)
4973 {
4974 	struct sysctl_ctx_list *ctx;
4975 	struct sysctl_oid *oid;
4976 	struct sysctl_oid_list *children;
4977 	struct adapter *sc = pi->adapter;
4978 
4979 	ctx = device_get_sysctl_ctx(pi->dev);
4980 
4981 	/*
4982 	 * dev.cxgbe.X.
4983 	 */
4984 	oid = device_get_sysctl_tree(pi->dev);
4985 	children = SYSCTL_CHILDREN(oid);
4986 
4987 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "linkdnrc", CTLTYPE_STRING |
4988 	   CTLFLAG_RD, pi, 0, sysctl_linkdnrc, "A", "reason why link is down");
4989 	if (pi->port_type == FW_PORT_TYPE_BT_XAUI) {
4990 		SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "temperature",
4991 		    CTLTYPE_INT | CTLFLAG_RD, pi, 0, sysctl_btphy, "I",
4992 		    "PHY temperature (in Celsius)");
4993 		SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "fw_version",
4994 		    CTLTYPE_INT | CTLFLAG_RD, pi, 1, sysctl_btphy, "I",
4995 		    "PHY firmware version");
4996 	}
4997 	SYSCTL_ADD_INT(ctx, children, OID_AUTO, "nrxq", CTLFLAG_RD,
4998 	    &pi->nrxq, 0, "# of rx queues");
4999 	SYSCTL_ADD_INT(ctx, children, OID_AUTO, "ntxq", CTLFLAG_RD,
5000 	    &pi->ntxq, 0, "# of tx queues");
5001 	SYSCTL_ADD_INT(ctx, children, OID_AUTO, "first_rxq", CTLFLAG_RD,
5002 	    &pi->first_rxq, 0, "index of first rx queue");
5003 	SYSCTL_ADD_INT(ctx, children, OID_AUTO, "first_txq", CTLFLAG_RD,
5004 	    &pi->first_txq, 0, "index of first tx queue");
5005 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "rsrv_noflowq", CTLTYPE_INT |
5006 	    CTLFLAG_RW, pi, 0, sysctl_noflowq, "IU",
5007 	    "Reserve queue 0 for non-flowid packets");
5008 
5009 #ifdef TCP_OFFLOAD
5010 	if (is_offload(sc)) {
5011 		SYSCTL_ADD_INT(ctx, children, OID_AUTO, "nofldrxq", CTLFLAG_RD,
5012 		    &pi->nofldrxq, 0,
5013 		    "# of rx queues for offloaded TCP connections");
5014 		SYSCTL_ADD_INT(ctx, children, OID_AUTO, "nofldtxq", CTLFLAG_RD,
5015 		    &pi->nofldtxq, 0,
5016 		    "# of tx queues for offloaded TCP connections");
5017 		SYSCTL_ADD_INT(ctx, children, OID_AUTO, "first_ofld_rxq",
5018 		    CTLFLAG_RD, &pi->first_ofld_rxq, 0,
5019 		    "index of first TOE rx queue");
5020 		SYSCTL_ADD_INT(ctx, children, OID_AUTO, "first_ofld_txq",
5021 		    CTLFLAG_RD, &pi->first_ofld_txq, 0,
5022 		    "index of first TOE tx queue");
5023 	}
5024 #endif
5025 #ifdef DEV_NETMAP
5026 	SYSCTL_ADD_INT(ctx, children, OID_AUTO, "nnmrxq", CTLFLAG_RD,
5027 	    &pi->nnmrxq, 0, "# of rx queues for netmap");
5028 	SYSCTL_ADD_INT(ctx, children, OID_AUTO, "nnmtxq", CTLFLAG_RD,
5029 	    &pi->nnmtxq, 0, "# of tx queues for netmap");
5030 	SYSCTL_ADD_INT(ctx, children, OID_AUTO, "first_nm_rxq",
5031 	    CTLFLAG_RD, &pi->first_nm_rxq, 0,
5032 	    "index of first netmap rx queue");
5033 	SYSCTL_ADD_INT(ctx, children, OID_AUTO, "first_nm_txq",
5034 	    CTLFLAG_RD, &pi->first_nm_txq, 0,
5035 	    "index of first netmap tx queue");
5036 #endif
5037 
5038 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "holdoff_tmr_idx",
5039 	    CTLTYPE_INT | CTLFLAG_RW, pi, 0, sysctl_holdoff_tmr_idx, "I",
5040 	    "holdoff timer index");
5041 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "holdoff_pktc_idx",
5042 	    CTLTYPE_INT | CTLFLAG_RW, pi, 0, sysctl_holdoff_pktc_idx, "I",
5043 	    "holdoff packet counter index");
5044 
5045 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "qsize_rxq",
5046 	    CTLTYPE_INT | CTLFLAG_RW, pi, 0, sysctl_qsize_rxq, "I",
5047 	    "rx queue size");
5048 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "qsize_txq",
5049 	    CTLTYPE_INT | CTLFLAG_RW, pi, 0, sysctl_qsize_txq, "I",
5050 	    "tx queue size");
5051 
5052 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "pause_settings",
5053 	    CTLTYPE_STRING | CTLFLAG_RW, pi, PAUSE_TX, sysctl_pause_settings,
5054 	    "A", "PAUSE settings (bit 0 = rx_pause, bit 1 = tx_pause)");
5055 
5056 	/*
5057 	 * dev.cxgbe.X.stats.
5058 	 */
5059 	oid = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, "stats", CTLFLAG_RD,
5060 	    NULL, "port statistics");
5061 	children = SYSCTL_CHILDREN(oid);
5062 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "tx_parse_error", CTLFLAG_RD,
5063 	    &pi->tx_parse_error, 0,
5064 	    "# of tx packets with invalid length or # of segments");
5065 
5066 #define SYSCTL_ADD_T4_REG64(pi, name, desc, reg) \
5067 	SYSCTL_ADD_OID(ctx, children, OID_AUTO, name, \
5068 	    CTLTYPE_U64 | CTLFLAG_RD, sc, reg, \
5069 	    sysctl_handle_t4_reg64, "QU", desc)
5070 
5071 	SYSCTL_ADD_T4_REG64(pi, "tx_octets", "# of octets in good frames",
5072 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_BYTES_L));
5073 	SYSCTL_ADD_T4_REG64(pi, "tx_frames", "total # of good frames",
5074 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_FRAMES_L));
5075 	SYSCTL_ADD_T4_REG64(pi, "tx_bcast_frames", "# of broadcast frames",
5076 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_BCAST_L));
5077 	SYSCTL_ADD_T4_REG64(pi, "tx_mcast_frames", "# of multicast frames",
5078 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_MCAST_L));
5079 	SYSCTL_ADD_T4_REG64(pi, "tx_ucast_frames", "# of unicast frames",
5080 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_UCAST_L));
5081 	SYSCTL_ADD_T4_REG64(pi, "tx_error_frames", "# of error frames",
5082 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_ERROR_L));
5083 	SYSCTL_ADD_T4_REG64(pi, "tx_frames_64",
5084 	    "# of tx frames in this range",
5085 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_64B_L));
5086 	SYSCTL_ADD_T4_REG64(pi, "tx_frames_65_127",
5087 	    "# of tx frames in this range",
5088 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_65B_127B_L));
5089 	SYSCTL_ADD_T4_REG64(pi, "tx_frames_128_255",
5090 	    "# of tx frames in this range",
5091 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_128B_255B_L));
5092 	SYSCTL_ADD_T4_REG64(pi, "tx_frames_256_511",
5093 	    "# of tx frames in this range",
5094 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_256B_511B_L));
5095 	SYSCTL_ADD_T4_REG64(pi, "tx_frames_512_1023",
5096 	    "# of tx frames in this range",
5097 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_512B_1023B_L));
5098 	SYSCTL_ADD_T4_REG64(pi, "tx_frames_1024_1518",
5099 	    "# of tx frames in this range",
5100 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_1024B_1518B_L));
5101 	SYSCTL_ADD_T4_REG64(pi, "tx_frames_1519_max",
5102 	    "# of tx frames in this range",
5103 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_1519B_MAX_L));
5104 	SYSCTL_ADD_T4_REG64(pi, "tx_drop", "# of dropped tx frames",
5105 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_DROP_L));
5106 	SYSCTL_ADD_T4_REG64(pi, "tx_pause", "# of pause frames transmitted",
5107 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_PAUSE_L));
5108 	SYSCTL_ADD_T4_REG64(pi, "tx_ppp0", "# of PPP prio 0 frames transmitted",
5109 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_PPP0_L));
5110 	SYSCTL_ADD_T4_REG64(pi, "tx_ppp1", "# of PPP prio 1 frames transmitted",
5111 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_PPP1_L));
5112 	SYSCTL_ADD_T4_REG64(pi, "tx_ppp2", "# of PPP prio 2 frames transmitted",
5113 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_PPP2_L));
5114 	SYSCTL_ADD_T4_REG64(pi, "tx_ppp3", "# of PPP prio 3 frames transmitted",
5115 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_PPP3_L));
5116 	SYSCTL_ADD_T4_REG64(pi, "tx_ppp4", "# of PPP prio 4 frames transmitted",
5117 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_PPP4_L));
5118 	SYSCTL_ADD_T4_REG64(pi, "tx_ppp5", "# of PPP prio 5 frames transmitted",
5119 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_PPP5_L));
5120 	SYSCTL_ADD_T4_REG64(pi, "tx_ppp6", "# of PPP prio 6 frames transmitted",
5121 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_PPP6_L));
5122 	SYSCTL_ADD_T4_REG64(pi, "tx_ppp7", "# of PPP prio 7 frames transmitted",
5123 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_PPP7_L));
5124 
5125 	SYSCTL_ADD_T4_REG64(pi, "rx_octets", "# of octets in good frames",
5126 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_BYTES_L));
5127 	SYSCTL_ADD_T4_REG64(pi, "rx_frames", "total # of good frames",
5128 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_FRAMES_L));
5129 	SYSCTL_ADD_T4_REG64(pi, "rx_bcast_frames", "# of broadcast frames",
5130 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_BCAST_L));
5131 	SYSCTL_ADD_T4_REG64(pi, "rx_mcast_frames", "# of multicast frames",
5132 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_MCAST_L));
5133 	SYSCTL_ADD_T4_REG64(pi, "rx_ucast_frames", "# of unicast frames",
5134 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_UCAST_L));
5135 	SYSCTL_ADD_T4_REG64(pi, "rx_too_long", "# of frames exceeding MTU",
5136 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_MTU_ERROR_L));
5137 	SYSCTL_ADD_T4_REG64(pi, "rx_jabber", "# of jabber frames",
5138 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_MTU_CRC_ERROR_L));
5139 	SYSCTL_ADD_T4_REG64(pi, "rx_fcs_err",
5140 	    "# of frames received with bad FCS",
5141 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_CRC_ERROR_L));
5142 	SYSCTL_ADD_T4_REG64(pi, "rx_len_err",
5143 	    "# of frames received with length error",
5144 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_LEN_ERROR_L));
5145 	SYSCTL_ADD_T4_REG64(pi, "rx_symbol_err", "symbol errors",
5146 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_SYM_ERROR_L));
5147 	SYSCTL_ADD_T4_REG64(pi, "rx_runt", "# of short frames received",
5148 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_LESS_64B_L));
5149 	SYSCTL_ADD_T4_REG64(pi, "rx_frames_64",
5150 	    "# of rx frames in this range",
5151 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_64B_L));
5152 	SYSCTL_ADD_T4_REG64(pi, "rx_frames_65_127",
5153 	    "# of rx frames in this range",
5154 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_65B_127B_L));
5155 	SYSCTL_ADD_T4_REG64(pi, "rx_frames_128_255",
5156 	    "# of rx frames in this range",
5157 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_128B_255B_L));
5158 	SYSCTL_ADD_T4_REG64(pi, "rx_frames_256_511",
5159 	    "# of rx frames in this range",
5160 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_256B_511B_L));
5161 	SYSCTL_ADD_T4_REG64(pi, "rx_frames_512_1023",
5162 	    "# of rx frames in this range",
5163 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_512B_1023B_L));
5164 	SYSCTL_ADD_T4_REG64(pi, "rx_frames_1024_1518",
5165 	    "# of rx frames in this range",
5166 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_1024B_1518B_L));
5167 	SYSCTL_ADD_T4_REG64(pi, "rx_frames_1519_max",
5168 	    "# of rx frames in this range",
5169 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_1519B_MAX_L));
5170 	SYSCTL_ADD_T4_REG64(pi, "rx_pause", "# of pause frames received",
5171 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_PAUSE_L));
5172 	SYSCTL_ADD_T4_REG64(pi, "rx_ppp0", "# of PPP prio 0 frames received",
5173 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_PPP0_L));
5174 	SYSCTL_ADD_T4_REG64(pi, "rx_ppp1", "# of PPP prio 1 frames received",
5175 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_PPP1_L));
5176 	SYSCTL_ADD_T4_REG64(pi, "rx_ppp2", "# of PPP prio 2 frames received",
5177 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_PPP2_L));
5178 	SYSCTL_ADD_T4_REG64(pi, "rx_ppp3", "# of PPP prio 3 frames received",
5179 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_PPP3_L));
5180 	SYSCTL_ADD_T4_REG64(pi, "rx_ppp4", "# of PPP prio 4 frames received",
5181 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_PPP4_L));
5182 	SYSCTL_ADD_T4_REG64(pi, "rx_ppp5", "# of PPP prio 5 frames received",
5183 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_PPP5_L));
5184 	SYSCTL_ADD_T4_REG64(pi, "rx_ppp6", "# of PPP prio 6 frames received",
5185 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_PPP6_L));
5186 	SYSCTL_ADD_T4_REG64(pi, "rx_ppp7", "# of PPP prio 7 frames received",
5187 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_PPP7_L));
5188 
5189 #undef SYSCTL_ADD_T4_REG64
5190 
5191 #define SYSCTL_ADD_T4_PORTSTAT(name, desc) \
5192 	SYSCTL_ADD_UQUAD(ctx, children, OID_AUTO, #name, CTLFLAG_RD, \
5193 	    &pi->stats.name, desc)
5194 
5195 	/* We get these from port_stats and they may be stale by upto 1s */
5196 	SYSCTL_ADD_T4_PORTSTAT(rx_ovflow0,
5197 	    "# drops due to buffer-group 0 overflows");
5198 	SYSCTL_ADD_T4_PORTSTAT(rx_ovflow1,
5199 	    "# drops due to buffer-group 1 overflows");
5200 	SYSCTL_ADD_T4_PORTSTAT(rx_ovflow2,
5201 	    "# drops due to buffer-group 2 overflows");
5202 	SYSCTL_ADD_T4_PORTSTAT(rx_ovflow3,
5203 	    "# drops due to buffer-group 3 overflows");
5204 	SYSCTL_ADD_T4_PORTSTAT(rx_trunc0,
5205 	    "# of buffer-group 0 truncated packets");
5206 	SYSCTL_ADD_T4_PORTSTAT(rx_trunc1,
5207 	    "# of buffer-group 1 truncated packets");
5208 	SYSCTL_ADD_T4_PORTSTAT(rx_trunc2,
5209 	    "# of buffer-group 2 truncated packets");
5210 	SYSCTL_ADD_T4_PORTSTAT(rx_trunc3,
5211 	    "# of buffer-group 3 truncated packets");
5212 
5213 #undef SYSCTL_ADD_T4_PORTSTAT
5214 
5215 	return (0);
5216 }
5217 
5218 static int
5219 sysctl_int_array(SYSCTL_HANDLER_ARGS)
5220 {
5221 	int rc, *i, space = 0;
5222 	struct sbuf sb;
5223 
5224 	sbuf_new_for_sysctl(&sb, NULL, 64, req);
5225 	for (i = arg1; arg2; arg2 -= sizeof(int), i++) {
5226 		if (space)
5227 			sbuf_printf(&sb, " ");
5228 		sbuf_printf(&sb, "%d", *i);
5229 		space = 1;
5230 	}
5231 	rc = sbuf_finish(&sb);
5232 	sbuf_delete(&sb);
5233 	return (rc);
5234 }
5235 
5236 static int
5237 sysctl_bitfield(SYSCTL_HANDLER_ARGS)
5238 {
5239 	int rc;
5240 	struct sbuf *sb;
5241 
5242 	rc = sysctl_wire_old_buffer(req, 0);
5243 	if (rc != 0)
5244 		return(rc);
5245 
5246 	sb = sbuf_new_for_sysctl(NULL, NULL, 128, req);
5247 	if (sb == NULL)
5248 		return (ENOMEM);
5249 
5250 	sbuf_printf(sb, "%b", (int)arg2, (char *)arg1);
5251 	rc = sbuf_finish(sb);
5252 	sbuf_delete(sb);
5253 
5254 	return (rc);
5255 }
5256 
5257 static int
5258 sysctl_btphy(SYSCTL_HANDLER_ARGS)
5259 {
5260 	struct port_info *pi = arg1;
5261 	int op = arg2;
5262 	struct adapter *sc = pi->adapter;
5263 	u_int v;
5264 	int rc;
5265 
5266 	rc = begin_synchronized_op(sc, pi, SLEEP_OK | INTR_OK, "t4btt");
5267 	if (rc)
5268 		return (rc);
5269 	/* XXX: magic numbers */
5270 	rc = -t4_mdio_rd(sc, sc->mbox, pi->mdio_addr, 0x1e, op ? 0x20 : 0xc820,
5271 	    &v);
5272 	end_synchronized_op(sc, 0);
5273 	if (rc)
5274 		return (rc);
5275 	if (op == 0)
5276 		v /= 256;
5277 
5278 	rc = sysctl_handle_int(oidp, &v, 0, req);
5279 	return (rc);
5280 }
5281 
5282 static int
5283 sysctl_noflowq(SYSCTL_HANDLER_ARGS)
5284 {
5285 	struct port_info *pi = arg1;
5286 	int rc, val;
5287 
5288 	val = pi->rsrv_noflowq;
5289 	rc = sysctl_handle_int(oidp, &val, 0, req);
5290 	if (rc != 0 || req->newptr == NULL)
5291 		return (rc);
5292 
5293 	if ((val >= 1) && (pi->ntxq > 1))
5294 		pi->rsrv_noflowq = 1;
5295 	else
5296 		pi->rsrv_noflowq = 0;
5297 
5298 	return (rc);
5299 }
5300 
5301 static int
5302 sysctl_holdoff_tmr_idx(SYSCTL_HANDLER_ARGS)
5303 {
5304 	struct port_info *pi = arg1;
5305 	struct adapter *sc = pi->adapter;
5306 	int idx, rc, i;
5307 	struct sge_rxq *rxq;
5308 #ifdef TCP_OFFLOAD
5309 	struct sge_ofld_rxq *ofld_rxq;
5310 #endif
5311 	uint8_t v;
5312 
5313 	idx = pi->tmr_idx;
5314 
5315 	rc = sysctl_handle_int(oidp, &idx, 0, req);
5316 	if (rc != 0 || req->newptr == NULL)
5317 		return (rc);
5318 
5319 	if (idx < 0 || idx >= SGE_NTIMERS)
5320 		return (EINVAL);
5321 
5322 	rc = begin_synchronized_op(sc, pi, HOLD_LOCK | SLEEP_OK | INTR_OK,
5323 	    "t4tmr");
5324 	if (rc)
5325 		return (rc);
5326 
5327 	v = V_QINTR_TIMER_IDX(idx) | V_QINTR_CNT_EN(pi->pktc_idx != -1);
5328 	for_each_rxq(pi, i, rxq) {
5329 #ifdef atomic_store_rel_8
5330 		atomic_store_rel_8(&rxq->iq.intr_params, v);
5331 #else
5332 		rxq->iq.intr_params = v;
5333 #endif
5334 	}
5335 #ifdef TCP_OFFLOAD
5336 	for_each_ofld_rxq(pi, i, ofld_rxq) {
5337 #ifdef atomic_store_rel_8
5338 		atomic_store_rel_8(&ofld_rxq->iq.intr_params, v);
5339 #else
5340 		ofld_rxq->iq.intr_params = v;
5341 #endif
5342 	}
5343 #endif
5344 	pi->tmr_idx = idx;
5345 
5346 	end_synchronized_op(sc, LOCK_HELD);
5347 	return (0);
5348 }
5349 
5350 static int
5351 sysctl_holdoff_pktc_idx(SYSCTL_HANDLER_ARGS)
5352 {
5353 	struct port_info *pi = arg1;
5354 	struct adapter *sc = pi->adapter;
5355 	int idx, rc;
5356 
5357 	idx = pi->pktc_idx;
5358 
5359 	rc = sysctl_handle_int(oidp, &idx, 0, req);
5360 	if (rc != 0 || req->newptr == NULL)
5361 		return (rc);
5362 
5363 	if (idx < -1 || idx >= SGE_NCOUNTERS)
5364 		return (EINVAL);
5365 
5366 	rc = begin_synchronized_op(sc, pi, HOLD_LOCK | SLEEP_OK | INTR_OK,
5367 	    "t4pktc");
5368 	if (rc)
5369 		return (rc);
5370 
5371 	if (pi->flags & PORT_INIT_DONE)
5372 		rc = EBUSY; /* cannot be changed once the queues are created */
5373 	else
5374 		pi->pktc_idx = idx;
5375 
5376 	end_synchronized_op(sc, LOCK_HELD);
5377 	return (rc);
5378 }
5379 
5380 static int
5381 sysctl_qsize_rxq(SYSCTL_HANDLER_ARGS)
5382 {
5383 	struct port_info *pi = arg1;
5384 	struct adapter *sc = pi->adapter;
5385 	int qsize, rc;
5386 
5387 	qsize = pi->qsize_rxq;
5388 
5389 	rc = sysctl_handle_int(oidp, &qsize, 0, req);
5390 	if (rc != 0 || req->newptr == NULL)
5391 		return (rc);
5392 
5393 	if (qsize < 128 || (qsize & 7))
5394 		return (EINVAL);
5395 
5396 	rc = begin_synchronized_op(sc, pi, HOLD_LOCK | SLEEP_OK | INTR_OK,
5397 	    "t4rxqs");
5398 	if (rc)
5399 		return (rc);
5400 
5401 	if (pi->flags & PORT_INIT_DONE)
5402 		rc = EBUSY; /* cannot be changed once the queues are created */
5403 	else
5404 		pi->qsize_rxq = qsize;
5405 
5406 	end_synchronized_op(sc, LOCK_HELD);
5407 	return (rc);
5408 }
5409 
5410 static int
5411 sysctl_qsize_txq(SYSCTL_HANDLER_ARGS)
5412 {
5413 	struct port_info *pi = arg1;
5414 	struct adapter *sc = pi->adapter;
5415 	int qsize, rc;
5416 
5417 	qsize = pi->qsize_txq;
5418 
5419 	rc = sysctl_handle_int(oidp, &qsize, 0, req);
5420 	if (rc != 0 || req->newptr == NULL)
5421 		return (rc);
5422 
5423 	if (qsize < 128 || qsize > 65536)
5424 		return (EINVAL);
5425 
5426 	rc = begin_synchronized_op(sc, pi, HOLD_LOCK | SLEEP_OK | INTR_OK,
5427 	    "t4txqs");
5428 	if (rc)
5429 		return (rc);
5430 
5431 	if (pi->flags & PORT_INIT_DONE)
5432 		rc = EBUSY; /* cannot be changed once the queues are created */
5433 	else
5434 		pi->qsize_txq = qsize;
5435 
5436 	end_synchronized_op(sc, LOCK_HELD);
5437 	return (rc);
5438 }
5439 
5440 static int
5441 sysctl_pause_settings(SYSCTL_HANDLER_ARGS)
5442 {
5443 	struct port_info *pi = arg1;
5444 	struct adapter *sc = pi->adapter;
5445 	struct link_config *lc = &pi->link_cfg;
5446 	int rc;
5447 
5448 	if (req->newptr == NULL) {
5449 		struct sbuf *sb;
5450 		static char *bits = "\20\1PAUSE_RX\2PAUSE_TX";
5451 
5452 		rc = sysctl_wire_old_buffer(req, 0);
5453 		if (rc != 0)
5454 			return(rc);
5455 
5456 		sb = sbuf_new_for_sysctl(NULL, NULL, 128, req);
5457 		if (sb == NULL)
5458 			return (ENOMEM);
5459 
5460 		sbuf_printf(sb, "%b", lc->fc & (PAUSE_TX | PAUSE_RX), bits);
5461 		rc = sbuf_finish(sb);
5462 		sbuf_delete(sb);
5463 	} else {
5464 		char s[2];
5465 		int n;
5466 
5467 		s[0] = '0' + (lc->requested_fc & (PAUSE_TX | PAUSE_RX));
5468 		s[1] = 0;
5469 
5470 		rc = sysctl_handle_string(oidp, s, sizeof(s), req);
5471 		if (rc != 0)
5472 			return(rc);
5473 
5474 		if (s[1] != 0)
5475 			return (EINVAL);
5476 		if (s[0] < '0' || s[0] > '9')
5477 			return (EINVAL);	/* not a number */
5478 		n = s[0] - '0';
5479 		if (n & ~(PAUSE_TX | PAUSE_RX))
5480 			return (EINVAL);	/* some other bit is set too */
5481 
5482 		rc = begin_synchronized_op(sc, pi, SLEEP_OK | INTR_OK, "t4PAUSE");
5483 		if (rc)
5484 			return (rc);
5485 		if ((lc->requested_fc & (PAUSE_TX | PAUSE_RX)) != n) {
5486 			int link_ok = lc->link_ok;
5487 
5488 			lc->requested_fc &= ~(PAUSE_TX | PAUSE_RX);
5489 			lc->requested_fc |= n;
5490 			rc = -t4_link_start(sc, sc->mbox, pi->tx_chan, lc);
5491 			lc->link_ok = link_ok;	/* restore */
5492 		}
5493 		end_synchronized_op(sc, 0);
5494 	}
5495 
5496 	return (rc);
5497 }
5498 
5499 static int
5500 sysctl_handle_t4_reg64(SYSCTL_HANDLER_ARGS)
5501 {
5502 	struct adapter *sc = arg1;
5503 	int reg = arg2;
5504 	uint64_t val;
5505 
5506 	val = t4_read_reg64(sc, reg);
5507 
5508 	return (sysctl_handle_64(oidp, &val, 0, req));
5509 }
5510 
5511 static int
5512 sysctl_temperature(SYSCTL_HANDLER_ARGS)
5513 {
5514 	struct adapter *sc = arg1;
5515 	int rc, t;
5516 	uint32_t param, val;
5517 
5518 	rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4temp");
5519 	if (rc)
5520 		return (rc);
5521 	param = V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) |
5522 	    V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_DIAG) |
5523 	    V_FW_PARAMS_PARAM_Y(FW_PARAM_DEV_DIAG_TMP);
5524 	rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 1, &param, &val);
5525 	end_synchronized_op(sc, 0);
5526 	if (rc)
5527 		return (rc);
5528 
5529 	/* unknown is returned as 0 but we display -1 in that case */
5530 	t = val == 0 ? -1 : val;
5531 
5532 	rc = sysctl_handle_int(oidp, &t, 0, req);
5533 	return (rc);
5534 }
5535 
5536 #ifdef SBUF_DRAIN
5537 static int
5538 sysctl_cctrl(SYSCTL_HANDLER_ARGS)
5539 {
5540 	struct adapter *sc = arg1;
5541 	struct sbuf *sb;
5542 	int rc, i;
5543 	uint16_t incr[NMTUS][NCCTRL_WIN];
5544 	static const char *dec_fac[] = {
5545 		"0.5", "0.5625", "0.625", "0.6875", "0.75", "0.8125", "0.875",
5546 		"0.9375"
5547 	};
5548 
5549 	rc = sysctl_wire_old_buffer(req, 0);
5550 	if (rc != 0)
5551 		return (rc);
5552 
5553 	sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req);
5554 	if (sb == NULL)
5555 		return (ENOMEM);
5556 
5557 	t4_read_cong_tbl(sc, incr);
5558 
5559 	for (i = 0; i < NCCTRL_WIN; ++i) {
5560 		sbuf_printf(sb, "%2d: %4u %4u %4u %4u %4u %4u %4u %4u\n", i,
5561 		    incr[0][i], incr[1][i], incr[2][i], incr[3][i], incr[4][i],
5562 		    incr[5][i], incr[6][i], incr[7][i]);
5563 		sbuf_printf(sb, "%8u %4u %4u %4u %4u %4u %4u %4u %5u %s\n",
5564 		    incr[8][i], incr[9][i], incr[10][i], incr[11][i],
5565 		    incr[12][i], incr[13][i], incr[14][i], incr[15][i],
5566 		    sc->params.a_wnd[i], dec_fac[sc->params.b_wnd[i]]);
5567 	}
5568 
5569 	rc = sbuf_finish(sb);
5570 	sbuf_delete(sb);
5571 
5572 	return (rc);
5573 }
5574 
5575 static const char *qname[CIM_NUM_IBQ + CIM_NUM_OBQ_T5] = {
5576 	"TP0", "TP1", "ULP", "SGE0", "SGE1", "NC-SI",	/* ibq's */
5577 	"ULP0", "ULP1", "ULP2", "ULP3", "SGE", "NC-SI",	/* obq's */
5578 	"SGE0-RX", "SGE1-RX"	/* additional obq's (T5 onwards) */
5579 };
5580 
5581 static int
5582 sysctl_cim_ibq_obq(SYSCTL_HANDLER_ARGS)
5583 {
5584 	struct adapter *sc = arg1;
5585 	struct sbuf *sb;
5586 	int rc, i, n, qid = arg2;
5587 	uint32_t *buf, *p;
5588 	char *qtype;
5589 	u_int cim_num_obq = is_t4(sc) ? CIM_NUM_OBQ : CIM_NUM_OBQ_T5;
5590 
5591 	KASSERT(qid >= 0 && qid < CIM_NUM_IBQ + cim_num_obq,
5592 	    ("%s: bad qid %d\n", __func__, qid));
5593 
5594 	if (qid < CIM_NUM_IBQ) {
5595 		/* inbound queue */
5596 		qtype = "IBQ";
5597 		n = 4 * CIM_IBQ_SIZE;
5598 		buf = malloc(n * sizeof(uint32_t), M_CXGBE, M_ZERO | M_WAITOK);
5599 		rc = t4_read_cim_ibq(sc, qid, buf, n);
5600 	} else {
5601 		/* outbound queue */
5602 		qtype = "OBQ";
5603 		qid -= CIM_NUM_IBQ;
5604 		n = 4 * cim_num_obq * CIM_OBQ_SIZE;
5605 		buf = malloc(n * sizeof(uint32_t), M_CXGBE, M_ZERO | M_WAITOK);
5606 		rc = t4_read_cim_obq(sc, qid, buf, n);
5607 	}
5608 
5609 	if (rc < 0) {
5610 		rc = -rc;
5611 		goto done;
5612 	}
5613 	n = rc * sizeof(uint32_t);	/* rc has # of words actually read */
5614 
5615 	rc = sysctl_wire_old_buffer(req, 0);
5616 	if (rc != 0)
5617 		goto done;
5618 
5619 	sb = sbuf_new_for_sysctl(NULL, NULL, PAGE_SIZE, req);
5620 	if (sb == NULL) {
5621 		rc = ENOMEM;
5622 		goto done;
5623 	}
5624 
5625 	sbuf_printf(sb, "%s%d %s", qtype , qid, qname[arg2]);
5626 	for (i = 0, p = buf; i < n; i += 16, p += 4)
5627 		sbuf_printf(sb, "\n%#06x: %08x %08x %08x %08x", i, p[0], p[1],
5628 		    p[2], p[3]);
5629 
5630 	rc = sbuf_finish(sb);
5631 	sbuf_delete(sb);
5632 done:
5633 	free(buf, M_CXGBE);
5634 	return (rc);
5635 }
5636 
5637 static int
5638 sysctl_cim_la(SYSCTL_HANDLER_ARGS)
5639 {
5640 	struct adapter *sc = arg1;
5641 	u_int cfg;
5642 	struct sbuf *sb;
5643 	uint32_t *buf, *p;
5644 	int rc;
5645 
5646 	rc = -t4_cim_read(sc, A_UP_UP_DBG_LA_CFG, 1, &cfg);
5647 	if (rc != 0)
5648 		return (rc);
5649 
5650 	rc = sysctl_wire_old_buffer(req, 0);
5651 	if (rc != 0)
5652 		return (rc);
5653 
5654 	sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req);
5655 	if (sb == NULL)
5656 		return (ENOMEM);
5657 
5658 	buf = malloc(sc->params.cim_la_size * sizeof(uint32_t), M_CXGBE,
5659 	    M_ZERO | M_WAITOK);
5660 
5661 	rc = -t4_cim_read_la(sc, buf, NULL);
5662 	if (rc != 0)
5663 		goto done;
5664 
5665 	sbuf_printf(sb, "Status   Data      PC%s",
5666 	    cfg & F_UPDBGLACAPTPCONLY ? "" :
5667 	    "     LS0Stat  LS0Addr             LS0Data");
5668 
5669 	KASSERT((sc->params.cim_la_size & 7) == 0,
5670 	    ("%s: p will walk off the end of buf", __func__));
5671 
5672 	for (p = buf; p < &buf[sc->params.cim_la_size]; p += 8) {
5673 		if (cfg & F_UPDBGLACAPTPCONLY) {
5674 			sbuf_printf(sb, "\n  %02x   %08x %08x", p[5] & 0xff,
5675 			    p[6], p[7]);
5676 			sbuf_printf(sb, "\n  %02x   %02x%06x %02x%06x",
5677 			    (p[3] >> 8) & 0xff, p[3] & 0xff, p[4] >> 8,
5678 			    p[4] & 0xff, p[5] >> 8);
5679 			sbuf_printf(sb, "\n  %02x   %x%07x %x%07x",
5680 			    (p[0] >> 4) & 0xff, p[0] & 0xf, p[1] >> 4,
5681 			    p[1] & 0xf, p[2] >> 4);
5682 		} else {
5683 			sbuf_printf(sb,
5684 			    "\n  %02x   %x%07x %x%07x %08x %08x "
5685 			    "%08x%08x%08x%08x",
5686 			    (p[0] >> 4) & 0xff, p[0] & 0xf, p[1] >> 4,
5687 			    p[1] & 0xf, p[2] >> 4, p[2] & 0xf, p[3], p[4], p[5],
5688 			    p[6], p[7]);
5689 		}
5690 	}
5691 
5692 	rc = sbuf_finish(sb);
5693 	sbuf_delete(sb);
5694 done:
5695 	free(buf, M_CXGBE);
5696 	return (rc);
5697 }
5698 
5699 static int
5700 sysctl_cim_ma_la(SYSCTL_HANDLER_ARGS)
5701 {
5702 	struct adapter *sc = arg1;
5703 	u_int i;
5704 	struct sbuf *sb;
5705 	uint32_t *buf, *p;
5706 	int rc;
5707 
5708 	rc = sysctl_wire_old_buffer(req, 0);
5709 	if (rc != 0)
5710 		return (rc);
5711 
5712 	sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req);
5713 	if (sb == NULL)
5714 		return (ENOMEM);
5715 
5716 	buf = malloc(2 * CIM_MALA_SIZE * 5 * sizeof(uint32_t), M_CXGBE,
5717 	    M_ZERO | M_WAITOK);
5718 
5719 	t4_cim_read_ma_la(sc, buf, buf + 5 * CIM_MALA_SIZE);
5720 	p = buf;
5721 
5722 	for (i = 0; i < CIM_MALA_SIZE; i++, p += 5) {
5723 		sbuf_printf(sb, "\n%02x%08x%08x%08x%08x", p[4], p[3], p[2],
5724 		    p[1], p[0]);
5725 	}
5726 
5727 	sbuf_printf(sb, "\n\nCnt ID Tag UE       Data       RDY VLD");
5728 	for (i = 0; i < CIM_MALA_SIZE; i++, p += 5) {
5729 		sbuf_printf(sb, "\n%3u %2u  %x   %u %08x%08x  %u   %u",
5730 		    (p[2] >> 10) & 0xff, (p[2] >> 7) & 7,
5731 		    (p[2] >> 3) & 0xf, (p[2] >> 2) & 1,
5732 		    (p[1] >> 2) | ((p[2] & 3) << 30),
5733 		    (p[0] >> 2) | ((p[1] & 3) << 30), (p[0] >> 1) & 1,
5734 		    p[0] & 1);
5735 	}
5736 
5737 	rc = sbuf_finish(sb);
5738 	sbuf_delete(sb);
5739 	free(buf, M_CXGBE);
5740 	return (rc);
5741 }
5742 
5743 static int
5744 sysctl_cim_pif_la(SYSCTL_HANDLER_ARGS)
5745 {
5746 	struct adapter *sc = arg1;
5747 	u_int i;
5748 	struct sbuf *sb;
5749 	uint32_t *buf, *p;
5750 	int rc;
5751 
5752 	rc = sysctl_wire_old_buffer(req, 0);
5753 	if (rc != 0)
5754 		return (rc);
5755 
5756 	sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req);
5757 	if (sb == NULL)
5758 		return (ENOMEM);
5759 
5760 	buf = malloc(2 * CIM_PIFLA_SIZE * 6 * sizeof(uint32_t), M_CXGBE,
5761 	    M_ZERO | M_WAITOK);
5762 
5763 	t4_cim_read_pif_la(sc, buf, buf + 6 * CIM_PIFLA_SIZE, NULL, NULL);
5764 	p = buf;
5765 
5766 	sbuf_printf(sb, "Cntl ID DataBE   Addr                 Data");
5767 	for (i = 0; i < CIM_MALA_SIZE; i++, p += 6) {
5768 		sbuf_printf(sb, "\n %02x  %02x  %04x  %08x %08x%08x%08x%08x",
5769 		    (p[5] >> 22) & 0xff, (p[5] >> 16) & 0x3f, p[5] & 0xffff,
5770 		    p[4], p[3], p[2], p[1], p[0]);
5771 	}
5772 
5773 	sbuf_printf(sb, "\n\nCntl ID               Data");
5774 	for (i = 0; i < CIM_MALA_SIZE; i++, p += 6) {
5775 		sbuf_printf(sb, "\n %02x  %02x %08x%08x%08x%08x",
5776 		    (p[4] >> 6) & 0xff, p[4] & 0x3f, p[3], p[2], p[1], p[0]);
5777 	}
5778 
5779 	rc = sbuf_finish(sb);
5780 	sbuf_delete(sb);
5781 	free(buf, M_CXGBE);
5782 	return (rc);
5783 }
5784 
5785 static int
5786 sysctl_cim_qcfg(SYSCTL_HANDLER_ARGS)
5787 {
5788 	struct adapter *sc = arg1;
5789 	struct sbuf *sb;
5790 	int rc, i;
5791 	uint16_t base[CIM_NUM_IBQ + CIM_NUM_OBQ_T5];
5792 	uint16_t size[CIM_NUM_IBQ + CIM_NUM_OBQ_T5];
5793 	uint16_t thres[CIM_NUM_IBQ];
5794 	uint32_t obq_wr[2 * CIM_NUM_OBQ_T5], *wr = obq_wr;
5795 	uint32_t stat[4 * (CIM_NUM_IBQ + CIM_NUM_OBQ_T5)], *p = stat;
5796 	u_int cim_num_obq, ibq_rdaddr, obq_rdaddr, nq;
5797 
5798 	if (is_t4(sc)) {
5799 		cim_num_obq = CIM_NUM_OBQ;
5800 		ibq_rdaddr = A_UP_IBQ_0_RDADDR;
5801 		obq_rdaddr = A_UP_OBQ_0_REALADDR;
5802 	} else {
5803 		cim_num_obq = CIM_NUM_OBQ_T5;
5804 		ibq_rdaddr = A_UP_IBQ_0_SHADOW_RDADDR;
5805 		obq_rdaddr = A_UP_OBQ_0_SHADOW_REALADDR;
5806 	}
5807 	nq = CIM_NUM_IBQ + cim_num_obq;
5808 
5809 	rc = -t4_cim_read(sc, ibq_rdaddr, 4 * nq, stat);
5810 	if (rc == 0)
5811 		rc = -t4_cim_read(sc, obq_rdaddr, 2 * cim_num_obq, obq_wr);
5812 	if (rc != 0)
5813 		return (rc);
5814 
5815 	t4_read_cimq_cfg(sc, base, size, thres);
5816 
5817 	rc = sysctl_wire_old_buffer(req, 0);
5818 	if (rc != 0)
5819 		return (rc);
5820 
5821 	sb = sbuf_new_for_sysctl(NULL, NULL, PAGE_SIZE, req);
5822 	if (sb == NULL)
5823 		return (ENOMEM);
5824 
5825 	sbuf_printf(sb, "Queue  Base  Size Thres RdPtr WrPtr  SOP  EOP Avail");
5826 
5827 	for (i = 0; i < CIM_NUM_IBQ; i++, p += 4)
5828 		sbuf_printf(sb, "\n%7s %5x %5u %5u %6x  %4x %4u %4u %5u",
5829 		    qname[i], base[i], size[i], thres[i], G_IBQRDADDR(p[0]),
5830 		    G_IBQWRADDR(p[1]), G_QUESOPCNT(p[3]), G_QUEEOPCNT(p[3]),
5831 		    G_QUEREMFLITS(p[2]) * 16);
5832 	for ( ; i < nq; i++, p += 4, wr += 2)
5833 		sbuf_printf(sb, "\n%7s %5x %5u %12x  %4x %4u %4u %5u", qname[i],
5834 		    base[i], size[i], G_QUERDADDR(p[0]) & 0x3fff,
5835 		    wr[0] - base[i], G_QUESOPCNT(p[3]), G_QUEEOPCNT(p[3]),
5836 		    G_QUEREMFLITS(p[2]) * 16);
5837 
5838 	rc = sbuf_finish(sb);
5839 	sbuf_delete(sb);
5840 
5841 	return (rc);
5842 }
5843 
5844 static int
5845 sysctl_cpl_stats(SYSCTL_HANDLER_ARGS)
5846 {
5847 	struct adapter *sc = arg1;
5848 	struct sbuf *sb;
5849 	int rc;
5850 	struct tp_cpl_stats stats;
5851 
5852 	rc = sysctl_wire_old_buffer(req, 0);
5853 	if (rc != 0)
5854 		return (rc);
5855 
5856 	sb = sbuf_new_for_sysctl(NULL, NULL, 256, req);
5857 	if (sb == NULL)
5858 		return (ENOMEM);
5859 
5860 	t4_tp_get_cpl_stats(sc, &stats);
5861 
5862 	sbuf_printf(sb, "                 channel 0  channel 1  channel 2  "
5863 	    "channel 3\n");
5864 	sbuf_printf(sb, "CPL requests:   %10u %10u %10u %10u\n",
5865 		   stats.req[0], stats.req[1], stats.req[2], stats.req[3]);
5866 	sbuf_printf(sb, "CPL responses:  %10u %10u %10u %10u",
5867 		   stats.rsp[0], stats.rsp[1], stats.rsp[2], stats.rsp[3]);
5868 
5869 	rc = sbuf_finish(sb);
5870 	sbuf_delete(sb);
5871 
5872 	return (rc);
5873 }
5874 
5875 static int
5876 sysctl_ddp_stats(SYSCTL_HANDLER_ARGS)
5877 {
5878 	struct adapter *sc = arg1;
5879 	struct sbuf *sb;
5880 	int rc;
5881 	struct tp_usm_stats stats;
5882 
5883 	rc = sysctl_wire_old_buffer(req, 0);
5884 	if (rc != 0)
5885 		return(rc);
5886 
5887 	sb = sbuf_new_for_sysctl(NULL, NULL, 256, req);
5888 	if (sb == NULL)
5889 		return (ENOMEM);
5890 
5891 	t4_get_usm_stats(sc, &stats);
5892 
5893 	sbuf_printf(sb, "Frames: %u\n", stats.frames);
5894 	sbuf_printf(sb, "Octets: %ju\n", stats.octets);
5895 	sbuf_printf(sb, "Drops:  %u", stats.drops);
5896 
5897 	rc = sbuf_finish(sb);
5898 	sbuf_delete(sb);
5899 
5900 	return (rc);
5901 }
5902 
5903 const char *devlog_level_strings[] = {
5904 	[FW_DEVLOG_LEVEL_EMERG]		= "EMERG",
5905 	[FW_DEVLOG_LEVEL_CRIT]		= "CRIT",
5906 	[FW_DEVLOG_LEVEL_ERR]		= "ERR",
5907 	[FW_DEVLOG_LEVEL_NOTICE]	= "NOTICE",
5908 	[FW_DEVLOG_LEVEL_INFO]		= "INFO",
5909 	[FW_DEVLOG_LEVEL_DEBUG]		= "DEBUG"
5910 };
5911 
5912 const char *devlog_facility_strings[] = {
5913 	[FW_DEVLOG_FACILITY_CORE]	= "CORE",
5914 	[FW_DEVLOG_FACILITY_CF]		= "CF",
5915 	[FW_DEVLOG_FACILITY_SCHED]	= "SCHED",
5916 	[FW_DEVLOG_FACILITY_TIMER]	= "TIMER",
5917 	[FW_DEVLOG_FACILITY_RES]	= "RES",
5918 	[FW_DEVLOG_FACILITY_HW]		= "HW",
5919 	[FW_DEVLOG_FACILITY_FLR]	= "FLR",
5920 	[FW_DEVLOG_FACILITY_DMAQ]	= "DMAQ",
5921 	[FW_DEVLOG_FACILITY_PHY]	= "PHY",
5922 	[FW_DEVLOG_FACILITY_MAC]	= "MAC",
5923 	[FW_DEVLOG_FACILITY_PORT]	= "PORT",
5924 	[FW_DEVLOG_FACILITY_VI]		= "VI",
5925 	[FW_DEVLOG_FACILITY_FILTER]	= "FILTER",
5926 	[FW_DEVLOG_FACILITY_ACL]	= "ACL",
5927 	[FW_DEVLOG_FACILITY_TM]		= "TM",
5928 	[FW_DEVLOG_FACILITY_QFC]	= "QFC",
5929 	[FW_DEVLOG_FACILITY_DCB]	= "DCB",
5930 	[FW_DEVLOG_FACILITY_ETH]	= "ETH",
5931 	[FW_DEVLOG_FACILITY_OFLD]	= "OFLD",
5932 	[FW_DEVLOG_FACILITY_RI]		= "RI",
5933 	[FW_DEVLOG_FACILITY_ISCSI]	= "ISCSI",
5934 	[FW_DEVLOG_FACILITY_FCOE]	= "FCOE",
5935 	[FW_DEVLOG_FACILITY_FOISCSI]	= "FOISCSI",
5936 	[FW_DEVLOG_FACILITY_FOFCOE]	= "FOFCOE"
5937 };
5938 
5939 static int
5940 sysctl_devlog(SYSCTL_HANDLER_ARGS)
5941 {
5942 	struct adapter *sc = arg1;
5943 	struct devlog_params *dparams = &sc->params.devlog;
5944 	struct fw_devlog_e *buf, *e;
5945 	int i, j, rc, nentries, first = 0, m;
5946 	struct sbuf *sb;
5947 	uint64_t ftstamp = UINT64_MAX;
5948 
5949 	if (dparams->start == 0) {
5950 		dparams->memtype = FW_MEMTYPE_EDC0;
5951 		dparams->start = 0x84000;
5952 		dparams->size = 32768;
5953 	}
5954 
5955 	nentries = dparams->size / sizeof(struct fw_devlog_e);
5956 
5957 	buf = malloc(dparams->size, M_CXGBE, M_NOWAIT);
5958 	if (buf == NULL)
5959 		return (ENOMEM);
5960 
5961 	m = fwmtype_to_hwmtype(dparams->memtype);
5962 	rc = -t4_mem_read(sc, m, dparams->start, dparams->size, (void *)buf);
5963 	if (rc != 0)
5964 		goto done;
5965 
5966 	for (i = 0; i < nentries; i++) {
5967 		e = &buf[i];
5968 
5969 		if (e->timestamp == 0)
5970 			break;	/* end */
5971 
5972 		e->timestamp = be64toh(e->timestamp);
5973 		e->seqno = be32toh(e->seqno);
5974 		for (j = 0; j < 8; j++)
5975 			e->params[j] = be32toh(e->params[j]);
5976 
5977 		if (e->timestamp < ftstamp) {
5978 			ftstamp = e->timestamp;
5979 			first = i;
5980 		}
5981 	}
5982 
5983 	if (buf[first].timestamp == 0)
5984 		goto done;	/* nothing in the log */
5985 
5986 	rc = sysctl_wire_old_buffer(req, 0);
5987 	if (rc != 0)
5988 		goto done;
5989 
5990 	sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req);
5991 	if (sb == NULL) {
5992 		rc = ENOMEM;
5993 		goto done;
5994 	}
5995 	sbuf_printf(sb, "%10s  %15s  %8s  %8s  %s\n",
5996 	    "Seq#", "Tstamp", "Level", "Facility", "Message");
5997 
5998 	i = first;
5999 	do {
6000 		e = &buf[i];
6001 		if (e->timestamp == 0)
6002 			break;	/* end */
6003 
6004 		sbuf_printf(sb, "%10d  %15ju  %8s  %8s  ",
6005 		    e->seqno, e->timestamp,
6006 		    (e->level < nitems(devlog_level_strings) ?
6007 			devlog_level_strings[e->level] : "UNKNOWN"),
6008 		    (e->facility < nitems(devlog_facility_strings) ?
6009 			devlog_facility_strings[e->facility] : "UNKNOWN"));
6010 		sbuf_printf(sb, e->fmt, e->params[0], e->params[1],
6011 		    e->params[2], e->params[3], e->params[4],
6012 		    e->params[5], e->params[6], e->params[7]);
6013 
6014 		if (++i == nentries)
6015 			i = 0;
6016 	} while (i != first);
6017 
6018 	rc = sbuf_finish(sb);
6019 	sbuf_delete(sb);
6020 done:
6021 	free(buf, M_CXGBE);
6022 	return (rc);
6023 }
6024 
6025 static int
6026 sysctl_fcoe_stats(SYSCTL_HANDLER_ARGS)
6027 {
6028 	struct adapter *sc = arg1;
6029 	struct sbuf *sb;
6030 	int rc;
6031 	struct tp_fcoe_stats stats[4];
6032 
6033 	rc = sysctl_wire_old_buffer(req, 0);
6034 	if (rc != 0)
6035 		return (rc);
6036 
6037 	sb = sbuf_new_for_sysctl(NULL, NULL, 256, req);
6038 	if (sb == NULL)
6039 		return (ENOMEM);
6040 
6041 	t4_get_fcoe_stats(sc, 0, &stats[0]);
6042 	t4_get_fcoe_stats(sc, 1, &stats[1]);
6043 	t4_get_fcoe_stats(sc, 2, &stats[2]);
6044 	t4_get_fcoe_stats(sc, 3, &stats[3]);
6045 
6046 	sbuf_printf(sb, "                   channel 0        channel 1        "
6047 	    "channel 2        channel 3\n");
6048 	sbuf_printf(sb, "octetsDDP:  %16ju %16ju %16ju %16ju\n",
6049 	    stats[0].octetsDDP, stats[1].octetsDDP, stats[2].octetsDDP,
6050 	    stats[3].octetsDDP);
6051 	sbuf_printf(sb, "framesDDP:  %16u %16u %16u %16u\n", stats[0].framesDDP,
6052 	    stats[1].framesDDP, stats[2].framesDDP, stats[3].framesDDP);
6053 	sbuf_printf(sb, "framesDrop: %16u %16u %16u %16u",
6054 	    stats[0].framesDrop, stats[1].framesDrop, stats[2].framesDrop,
6055 	    stats[3].framesDrop);
6056 
6057 	rc = sbuf_finish(sb);
6058 	sbuf_delete(sb);
6059 
6060 	return (rc);
6061 }
6062 
6063 static int
6064 sysctl_hw_sched(SYSCTL_HANDLER_ARGS)
6065 {
6066 	struct adapter *sc = arg1;
6067 	struct sbuf *sb;
6068 	int rc, i;
6069 	unsigned int map, kbps, ipg, mode;
6070 	unsigned int pace_tab[NTX_SCHED];
6071 
6072 	rc = sysctl_wire_old_buffer(req, 0);
6073 	if (rc != 0)
6074 		return (rc);
6075 
6076 	sb = sbuf_new_for_sysctl(NULL, NULL, 256, req);
6077 	if (sb == NULL)
6078 		return (ENOMEM);
6079 
6080 	map = t4_read_reg(sc, A_TP_TX_MOD_QUEUE_REQ_MAP);
6081 	mode = G_TIMERMODE(t4_read_reg(sc, A_TP_MOD_CONFIG));
6082 	t4_read_pace_tbl(sc, pace_tab);
6083 
6084 	sbuf_printf(sb, "Scheduler  Mode   Channel  Rate (Kbps)   "
6085 	    "Class IPG (0.1 ns)   Flow IPG (us)");
6086 
6087 	for (i = 0; i < NTX_SCHED; ++i, map >>= 2) {
6088 		t4_get_tx_sched(sc, i, &kbps, &ipg);
6089 		sbuf_printf(sb, "\n    %u      %-5s     %u     ", i,
6090 		    (mode & (1 << i)) ? "flow" : "class", map & 3);
6091 		if (kbps)
6092 			sbuf_printf(sb, "%9u     ", kbps);
6093 		else
6094 			sbuf_printf(sb, " disabled     ");
6095 
6096 		if (ipg)
6097 			sbuf_printf(sb, "%13u        ", ipg);
6098 		else
6099 			sbuf_printf(sb, "     disabled        ");
6100 
6101 		if (pace_tab[i])
6102 			sbuf_printf(sb, "%10u", pace_tab[i]);
6103 		else
6104 			sbuf_printf(sb, "  disabled");
6105 	}
6106 
6107 	rc = sbuf_finish(sb);
6108 	sbuf_delete(sb);
6109 
6110 	return (rc);
6111 }
6112 
6113 static int
6114 sysctl_lb_stats(SYSCTL_HANDLER_ARGS)
6115 {
6116 	struct adapter *sc = arg1;
6117 	struct sbuf *sb;
6118 	int rc, i, j;
6119 	uint64_t *p0, *p1;
6120 	struct lb_port_stats s[2];
6121 	static const char *stat_name[] = {
6122 		"OctetsOK:", "FramesOK:", "BcastFrames:", "McastFrames:",
6123 		"UcastFrames:", "ErrorFrames:", "Frames64:", "Frames65To127:",
6124 		"Frames128To255:", "Frames256To511:", "Frames512To1023:",
6125 		"Frames1024To1518:", "Frames1519ToMax:", "FramesDropped:",
6126 		"BG0FramesDropped:", "BG1FramesDropped:", "BG2FramesDropped:",
6127 		"BG3FramesDropped:", "BG0FramesTrunc:", "BG1FramesTrunc:",
6128 		"BG2FramesTrunc:", "BG3FramesTrunc:"
6129 	};
6130 
6131 	rc = sysctl_wire_old_buffer(req, 0);
6132 	if (rc != 0)
6133 		return (rc);
6134 
6135 	sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req);
6136 	if (sb == NULL)
6137 		return (ENOMEM);
6138 
6139 	memset(s, 0, sizeof(s));
6140 
6141 	for (i = 0; i < 4; i += 2) {
6142 		t4_get_lb_stats(sc, i, &s[0]);
6143 		t4_get_lb_stats(sc, i + 1, &s[1]);
6144 
6145 		p0 = &s[0].octets;
6146 		p1 = &s[1].octets;
6147 		sbuf_printf(sb, "%s                       Loopback %u"
6148 		    "           Loopback %u", i == 0 ? "" : "\n", i, i + 1);
6149 
6150 		for (j = 0; j < nitems(stat_name); j++)
6151 			sbuf_printf(sb, "\n%-17s %20ju %20ju", stat_name[j],
6152 				   *p0++, *p1++);
6153 	}
6154 
6155 	rc = sbuf_finish(sb);
6156 	sbuf_delete(sb);
6157 
6158 	return (rc);
6159 }
6160 
6161 static int
6162 sysctl_linkdnrc(SYSCTL_HANDLER_ARGS)
6163 {
6164 	int rc = 0;
6165 	struct port_info *pi = arg1;
6166 	struct sbuf *sb;
6167 	static const char *linkdnreasons[] = {
6168 		"non-specific", "remote fault", "autoneg failed", "reserved3",
6169 		"PHY overheated", "unknown", "rx los", "reserved7"
6170 	};
6171 
6172 	rc = sysctl_wire_old_buffer(req, 0);
6173 	if (rc != 0)
6174 		return(rc);
6175 	sb = sbuf_new_for_sysctl(NULL, NULL, 64, req);
6176 	if (sb == NULL)
6177 		return (ENOMEM);
6178 
6179 	if (pi->linkdnrc < 0)
6180 		sbuf_printf(sb, "n/a");
6181 	else if (pi->linkdnrc < nitems(linkdnreasons))
6182 		sbuf_printf(sb, "%s", linkdnreasons[pi->linkdnrc]);
6183 	else
6184 		sbuf_printf(sb, "%d", pi->linkdnrc);
6185 
6186 	rc = sbuf_finish(sb);
6187 	sbuf_delete(sb);
6188 
6189 	return (rc);
6190 }
6191 
6192 struct mem_desc {
6193 	unsigned int base;
6194 	unsigned int limit;
6195 	unsigned int idx;
6196 };
6197 
6198 static int
6199 mem_desc_cmp(const void *a, const void *b)
6200 {
6201 	return ((const struct mem_desc *)a)->base -
6202 	       ((const struct mem_desc *)b)->base;
6203 }
6204 
6205 static void
6206 mem_region_show(struct sbuf *sb, const char *name, unsigned int from,
6207     unsigned int to)
6208 {
6209 	unsigned int size;
6210 
6211 	size = to - from + 1;
6212 	if (size == 0)
6213 		return;
6214 
6215 	/* XXX: need humanize_number(3) in libkern for a more readable 'size' */
6216 	sbuf_printf(sb, "%-15s %#x-%#x [%u]\n", name, from, to, size);
6217 }
6218 
6219 static int
6220 sysctl_meminfo(SYSCTL_HANDLER_ARGS)
6221 {
6222 	struct adapter *sc = arg1;
6223 	struct sbuf *sb;
6224 	int rc, i, n;
6225 	uint32_t lo, hi, used, alloc;
6226 	static const char *memory[] = {"EDC0:", "EDC1:", "MC:", "MC0:", "MC1:"};
6227 	static const char *region[] = {
6228 		"DBQ contexts:", "IMSG contexts:", "FLM cache:", "TCBs:",
6229 		"Pstructs:", "Timers:", "Rx FL:", "Tx FL:", "Pstruct FL:",
6230 		"Tx payload:", "Rx payload:", "LE hash:", "iSCSI region:",
6231 		"TDDP region:", "TPT region:", "STAG region:", "RQ region:",
6232 		"RQUDP region:", "PBL region:", "TXPBL region:",
6233 		"DBVFIFO region:", "ULPRX state:", "ULPTX state:",
6234 		"On-chip queues:"
6235 	};
6236 	struct mem_desc avail[4];
6237 	struct mem_desc mem[nitems(region) + 3];	/* up to 3 holes */
6238 	struct mem_desc *md = mem;
6239 
6240 	rc = sysctl_wire_old_buffer(req, 0);
6241 	if (rc != 0)
6242 		return (rc);
6243 
6244 	sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req);
6245 	if (sb == NULL)
6246 		return (ENOMEM);
6247 
6248 	for (i = 0; i < nitems(mem); i++) {
6249 		mem[i].limit = 0;
6250 		mem[i].idx = i;
6251 	}
6252 
6253 	/* Find and sort the populated memory ranges */
6254 	i = 0;
6255 	lo = t4_read_reg(sc, A_MA_TARGET_MEM_ENABLE);
6256 	if (lo & F_EDRAM0_ENABLE) {
6257 		hi = t4_read_reg(sc, A_MA_EDRAM0_BAR);
6258 		avail[i].base = G_EDRAM0_BASE(hi) << 20;
6259 		avail[i].limit = avail[i].base + (G_EDRAM0_SIZE(hi) << 20);
6260 		avail[i].idx = 0;
6261 		i++;
6262 	}
6263 	if (lo & F_EDRAM1_ENABLE) {
6264 		hi = t4_read_reg(sc, A_MA_EDRAM1_BAR);
6265 		avail[i].base = G_EDRAM1_BASE(hi) << 20;
6266 		avail[i].limit = avail[i].base + (G_EDRAM1_SIZE(hi) << 20);
6267 		avail[i].idx = 1;
6268 		i++;
6269 	}
6270 	if (lo & F_EXT_MEM_ENABLE) {
6271 		hi = t4_read_reg(sc, A_MA_EXT_MEMORY_BAR);
6272 		avail[i].base = G_EXT_MEM_BASE(hi) << 20;
6273 		avail[i].limit = avail[i].base +
6274 		    (G_EXT_MEM_SIZE(hi) << 20);
6275 		avail[i].idx = is_t4(sc) ? 2 : 3;	/* Call it MC for T4 */
6276 		i++;
6277 	}
6278 	if (!is_t4(sc) && lo & F_EXT_MEM1_ENABLE) {
6279 		hi = t4_read_reg(sc, A_MA_EXT_MEMORY1_BAR);
6280 		avail[i].base = G_EXT_MEM1_BASE(hi) << 20;
6281 		avail[i].limit = avail[i].base +
6282 		    (G_EXT_MEM1_SIZE(hi) << 20);
6283 		avail[i].idx = 4;
6284 		i++;
6285 	}
6286 	if (!i)                                    /* no memory available */
6287 		return 0;
6288 	qsort(avail, i, sizeof(struct mem_desc), mem_desc_cmp);
6289 
6290 	(md++)->base = t4_read_reg(sc, A_SGE_DBQ_CTXT_BADDR);
6291 	(md++)->base = t4_read_reg(sc, A_SGE_IMSG_CTXT_BADDR);
6292 	(md++)->base = t4_read_reg(sc, A_SGE_FLM_CACHE_BADDR);
6293 	(md++)->base = t4_read_reg(sc, A_TP_CMM_TCB_BASE);
6294 	(md++)->base = t4_read_reg(sc, A_TP_CMM_MM_BASE);
6295 	(md++)->base = t4_read_reg(sc, A_TP_CMM_TIMER_BASE);
6296 	(md++)->base = t4_read_reg(sc, A_TP_CMM_MM_RX_FLST_BASE);
6297 	(md++)->base = t4_read_reg(sc, A_TP_CMM_MM_TX_FLST_BASE);
6298 	(md++)->base = t4_read_reg(sc, A_TP_CMM_MM_PS_FLST_BASE);
6299 
6300 	/* the next few have explicit upper bounds */
6301 	md->base = t4_read_reg(sc, A_TP_PMM_TX_BASE);
6302 	md->limit = md->base - 1 +
6303 		    t4_read_reg(sc, A_TP_PMM_TX_PAGE_SIZE) *
6304 		    G_PMTXMAXPAGE(t4_read_reg(sc, A_TP_PMM_TX_MAX_PAGE));
6305 	md++;
6306 
6307 	md->base = t4_read_reg(sc, A_TP_PMM_RX_BASE);
6308 	md->limit = md->base - 1 +
6309 		    t4_read_reg(sc, A_TP_PMM_RX_PAGE_SIZE) *
6310 		    G_PMRXMAXPAGE(t4_read_reg(sc, A_TP_PMM_RX_MAX_PAGE));
6311 	md++;
6312 
6313 	if (t4_read_reg(sc, A_LE_DB_CONFIG) & F_HASHEN) {
6314 		hi = t4_read_reg(sc, A_LE_DB_TID_HASHBASE) / 4;
6315 		md->base = t4_read_reg(sc, A_LE_DB_HASH_TID_BASE);
6316 		md->limit = (sc->tids.ntids - hi) * 16 + md->base - 1;
6317 	} else {
6318 		md->base = 0;
6319 		md->idx = nitems(region);  /* hide it */
6320 	}
6321 	md++;
6322 
6323 #define ulp_region(reg) \
6324 	md->base = t4_read_reg(sc, A_ULP_ ## reg ## _LLIMIT);\
6325 	(md++)->limit = t4_read_reg(sc, A_ULP_ ## reg ## _ULIMIT)
6326 
6327 	ulp_region(RX_ISCSI);
6328 	ulp_region(RX_TDDP);
6329 	ulp_region(TX_TPT);
6330 	ulp_region(RX_STAG);
6331 	ulp_region(RX_RQ);
6332 	ulp_region(RX_RQUDP);
6333 	ulp_region(RX_PBL);
6334 	ulp_region(TX_PBL);
6335 #undef ulp_region
6336 
6337 	md->base = 0;
6338 	md->idx = nitems(region);
6339 	if (!is_t4(sc) && t4_read_reg(sc, A_SGE_CONTROL2) & F_VFIFO_ENABLE) {
6340 		md->base = G_BASEADDR(t4_read_reg(sc, A_SGE_DBVFIFO_BADDR));
6341 		md->limit = md->base + (G_DBVFIFO_SIZE((t4_read_reg(sc,
6342 		    A_SGE_DBVFIFO_SIZE))) << 2) - 1;
6343 	}
6344 	md++;
6345 
6346 	md->base = t4_read_reg(sc, A_ULP_RX_CTX_BASE);
6347 	md->limit = md->base + sc->tids.ntids - 1;
6348 	md++;
6349 	md->base = t4_read_reg(sc, A_ULP_TX_ERR_TABLE_BASE);
6350 	md->limit = md->base + sc->tids.ntids - 1;
6351 	md++;
6352 
6353 	md->base = sc->vres.ocq.start;
6354 	if (sc->vres.ocq.size)
6355 		md->limit = md->base + sc->vres.ocq.size - 1;
6356 	else
6357 		md->idx = nitems(region);  /* hide it */
6358 	md++;
6359 
6360 	/* add any address-space holes, there can be up to 3 */
6361 	for (n = 0; n < i - 1; n++)
6362 		if (avail[n].limit < avail[n + 1].base)
6363 			(md++)->base = avail[n].limit;
6364 	if (avail[n].limit)
6365 		(md++)->base = avail[n].limit;
6366 
6367 	n = md - mem;
6368 	qsort(mem, n, sizeof(struct mem_desc), mem_desc_cmp);
6369 
6370 	for (lo = 0; lo < i; lo++)
6371 		mem_region_show(sb, memory[avail[lo].idx], avail[lo].base,
6372 				avail[lo].limit - 1);
6373 
6374 	sbuf_printf(sb, "\n");
6375 	for (i = 0; i < n; i++) {
6376 		if (mem[i].idx >= nitems(region))
6377 			continue;                        /* skip holes */
6378 		if (!mem[i].limit)
6379 			mem[i].limit = i < n - 1 ? mem[i + 1].base - 1 : ~0;
6380 		mem_region_show(sb, region[mem[i].idx], mem[i].base,
6381 				mem[i].limit);
6382 	}
6383 
6384 	sbuf_printf(sb, "\n");
6385 	lo = t4_read_reg(sc, A_CIM_SDRAM_BASE_ADDR);
6386 	hi = t4_read_reg(sc, A_CIM_SDRAM_ADDR_SIZE) + lo - 1;
6387 	mem_region_show(sb, "uP RAM:", lo, hi);
6388 
6389 	lo = t4_read_reg(sc, A_CIM_EXTMEM2_BASE_ADDR);
6390 	hi = t4_read_reg(sc, A_CIM_EXTMEM2_ADDR_SIZE) + lo - 1;
6391 	mem_region_show(sb, "uP Extmem2:", lo, hi);
6392 
6393 	lo = t4_read_reg(sc, A_TP_PMM_RX_MAX_PAGE);
6394 	sbuf_printf(sb, "\n%u Rx pages of size %uKiB for %u channels\n",
6395 		   G_PMRXMAXPAGE(lo),
6396 		   t4_read_reg(sc, A_TP_PMM_RX_PAGE_SIZE) >> 10,
6397 		   (lo & F_PMRXNUMCHN) ? 2 : 1);
6398 
6399 	lo = t4_read_reg(sc, A_TP_PMM_TX_MAX_PAGE);
6400 	hi = t4_read_reg(sc, A_TP_PMM_TX_PAGE_SIZE);
6401 	sbuf_printf(sb, "%u Tx pages of size %u%ciB for %u channels\n",
6402 		   G_PMTXMAXPAGE(lo),
6403 		   hi >= (1 << 20) ? (hi >> 20) : (hi >> 10),
6404 		   hi >= (1 << 20) ? 'M' : 'K', 1 << G_PMTXNUMCHN(lo));
6405 	sbuf_printf(sb, "%u p-structs\n",
6406 		   t4_read_reg(sc, A_TP_CMM_MM_MAX_PSTRUCT));
6407 
6408 	for (i = 0; i < 4; i++) {
6409 		lo = t4_read_reg(sc, A_MPS_RX_PG_RSV0 + i * 4);
6410 		if (is_t4(sc)) {
6411 			used = G_USED(lo);
6412 			alloc = G_ALLOC(lo);
6413 		} else {
6414 			used = G_T5_USED(lo);
6415 			alloc = G_T5_ALLOC(lo);
6416 		}
6417 		sbuf_printf(sb, "\nPort %d using %u pages out of %u allocated",
6418 			   i, used, alloc);
6419 	}
6420 	for (i = 0; i < 4; i++) {
6421 		lo = t4_read_reg(sc, A_MPS_RX_PG_RSV4 + i * 4);
6422 		if (is_t4(sc)) {
6423 			used = G_USED(lo);
6424 			alloc = G_ALLOC(lo);
6425 		} else {
6426 			used = G_T5_USED(lo);
6427 			alloc = G_T5_ALLOC(lo);
6428 		}
6429 		sbuf_printf(sb,
6430 			   "\nLoopback %d using %u pages out of %u allocated",
6431 			   i, used, alloc);
6432 	}
6433 
6434 	rc = sbuf_finish(sb);
6435 	sbuf_delete(sb);
6436 
6437 	return (rc);
6438 }
6439 
6440 static inline void
6441 tcamxy2valmask(uint64_t x, uint64_t y, uint8_t *addr, uint64_t *mask)
6442 {
6443 	*mask = x | y;
6444 	y = htobe64(y);
6445 	memcpy(addr, (char *)&y + 2, ETHER_ADDR_LEN);
6446 }
6447 
6448 static int
6449 sysctl_mps_tcam(SYSCTL_HANDLER_ARGS)
6450 {
6451 	struct adapter *sc = arg1;
6452 	struct sbuf *sb;
6453 	int rc, i, n;
6454 
6455 	rc = sysctl_wire_old_buffer(req, 0);
6456 	if (rc != 0)
6457 		return (rc);
6458 
6459 	sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req);
6460 	if (sb == NULL)
6461 		return (ENOMEM);
6462 
6463 	sbuf_printf(sb,
6464 	    "Idx  Ethernet address     Mask     Vld Ports PF"
6465 	    "  VF              Replication             P0 P1 P2 P3  ML");
6466 	n = is_t4(sc) ? NUM_MPS_CLS_SRAM_L_INSTANCES :
6467 	    NUM_MPS_T5_CLS_SRAM_L_INSTANCES;
6468 	for (i = 0; i < n; i++) {
6469 		uint64_t tcamx, tcamy, mask;
6470 		uint32_t cls_lo, cls_hi;
6471 		uint8_t addr[ETHER_ADDR_LEN];
6472 
6473 		tcamy = t4_read_reg64(sc, MPS_CLS_TCAM_Y_L(i));
6474 		tcamx = t4_read_reg64(sc, MPS_CLS_TCAM_X_L(i));
6475 		cls_lo = t4_read_reg(sc, MPS_CLS_SRAM_L(i));
6476 		cls_hi = t4_read_reg(sc, MPS_CLS_SRAM_H(i));
6477 
6478 		if (tcamx & tcamy)
6479 			continue;
6480 
6481 		tcamxy2valmask(tcamx, tcamy, addr, &mask);
6482 		sbuf_printf(sb, "\n%3u %02x:%02x:%02x:%02x:%02x:%02x %012jx"
6483 			   "  %c   %#x%4u%4d", i, addr[0], addr[1], addr[2],
6484 			   addr[3], addr[4], addr[5], (uintmax_t)mask,
6485 			   (cls_lo & F_SRAM_VLD) ? 'Y' : 'N',
6486 			   G_PORTMAP(cls_hi), G_PF(cls_lo),
6487 			   (cls_lo & F_VF_VALID) ? G_VF(cls_lo) : -1);
6488 
6489 		if (cls_lo & F_REPLICATE) {
6490 			struct fw_ldst_cmd ldst_cmd;
6491 
6492 			memset(&ldst_cmd, 0, sizeof(ldst_cmd));
6493 			ldst_cmd.op_to_addrspace =
6494 			    htobe32(V_FW_CMD_OP(FW_LDST_CMD) |
6495 				F_FW_CMD_REQUEST | F_FW_CMD_READ |
6496 				V_FW_LDST_CMD_ADDRSPACE(FW_LDST_ADDRSPC_MPS));
6497 			ldst_cmd.cycles_to_len16 = htobe32(FW_LEN16(ldst_cmd));
6498 			ldst_cmd.u.mps.rplc.fid_idx =
6499 			    htobe16(V_FW_LDST_CMD_FID(FW_LDST_MPS_RPLC) |
6500 				V_FW_LDST_CMD_IDX(i));
6501 
6502 			rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK,
6503 			    "t4mps");
6504 			if (rc)
6505 				break;
6506 			rc = -t4_wr_mbox(sc, sc->mbox, &ldst_cmd,
6507 			    sizeof(ldst_cmd), &ldst_cmd);
6508 			end_synchronized_op(sc, 0);
6509 
6510 			if (rc != 0) {
6511 				sbuf_printf(sb,
6512 				    " ------------ error %3u ------------", rc);
6513 				rc = 0;
6514 			} else {
6515 				sbuf_printf(sb, " %08x %08x %08x %08x",
6516 				    be32toh(ldst_cmd.u.mps.rplc.rplc127_96),
6517 				    be32toh(ldst_cmd.u.mps.rplc.rplc95_64),
6518 				    be32toh(ldst_cmd.u.mps.rplc.rplc63_32),
6519 				    be32toh(ldst_cmd.u.mps.rplc.rplc31_0));
6520 			}
6521 		} else
6522 			sbuf_printf(sb, "%36s", "");
6523 
6524 		sbuf_printf(sb, "%4u%3u%3u%3u %#3x", G_SRAM_PRIO0(cls_lo),
6525 		    G_SRAM_PRIO1(cls_lo), G_SRAM_PRIO2(cls_lo),
6526 		    G_SRAM_PRIO3(cls_lo), (cls_lo >> S_MULTILISTEN0) & 0xf);
6527 	}
6528 
6529 	if (rc)
6530 		(void) sbuf_finish(sb);
6531 	else
6532 		rc = sbuf_finish(sb);
6533 	sbuf_delete(sb);
6534 
6535 	return (rc);
6536 }
6537 
6538 static int
6539 sysctl_path_mtus(SYSCTL_HANDLER_ARGS)
6540 {
6541 	struct adapter *sc = arg1;
6542 	struct sbuf *sb;
6543 	int rc;
6544 	uint16_t mtus[NMTUS];
6545 
6546 	rc = sysctl_wire_old_buffer(req, 0);
6547 	if (rc != 0)
6548 		return (rc);
6549 
6550 	sb = sbuf_new_for_sysctl(NULL, NULL, 256, req);
6551 	if (sb == NULL)
6552 		return (ENOMEM);
6553 
6554 	t4_read_mtu_tbl(sc, mtus, NULL);
6555 
6556 	sbuf_printf(sb, "%u %u %u %u %u %u %u %u %u %u %u %u %u %u %u %u",
6557 	    mtus[0], mtus[1], mtus[2], mtus[3], mtus[4], mtus[5], mtus[6],
6558 	    mtus[7], mtus[8], mtus[9], mtus[10], mtus[11], mtus[12], mtus[13],
6559 	    mtus[14], mtus[15]);
6560 
6561 	rc = sbuf_finish(sb);
6562 	sbuf_delete(sb);
6563 
6564 	return (rc);
6565 }
6566 
6567 static int
6568 sysctl_pm_stats(SYSCTL_HANDLER_ARGS)
6569 {
6570 	struct adapter *sc = arg1;
6571 	struct sbuf *sb;
6572 	int rc, i;
6573 	uint32_t cnt[PM_NSTATS];
6574 	uint64_t cyc[PM_NSTATS];
6575 	static const char *rx_stats[] = {
6576 		"Read:", "Write bypass:", "Write mem:", "Flush:"
6577 	};
6578 	static const char *tx_stats[] = {
6579 		"Read:", "Write bypass:", "Write mem:", "Bypass + mem:"
6580 	};
6581 
6582 	rc = sysctl_wire_old_buffer(req, 0);
6583 	if (rc != 0)
6584 		return (rc);
6585 
6586 	sb = sbuf_new_for_sysctl(NULL, NULL, 256, req);
6587 	if (sb == NULL)
6588 		return (ENOMEM);
6589 
6590 	t4_pmtx_get_stats(sc, cnt, cyc);
6591 	sbuf_printf(sb, "                Tx pcmds             Tx bytes");
6592 	for (i = 0; i < ARRAY_SIZE(tx_stats); i++)
6593 		sbuf_printf(sb, "\n%-13s %10u %20ju", tx_stats[i], cnt[i],
6594 		    cyc[i]);
6595 
6596 	t4_pmrx_get_stats(sc, cnt, cyc);
6597 	sbuf_printf(sb, "\n                Rx pcmds             Rx bytes");
6598 	for (i = 0; i < ARRAY_SIZE(rx_stats); i++)
6599 		sbuf_printf(sb, "\n%-13s %10u %20ju", rx_stats[i], cnt[i],
6600 		    cyc[i]);
6601 
6602 	rc = sbuf_finish(sb);
6603 	sbuf_delete(sb);
6604 
6605 	return (rc);
6606 }
6607 
6608 static int
6609 sysctl_rdma_stats(SYSCTL_HANDLER_ARGS)
6610 {
6611 	struct adapter *sc = arg1;
6612 	struct sbuf *sb;
6613 	int rc;
6614 	struct tp_rdma_stats stats;
6615 
6616 	rc = sysctl_wire_old_buffer(req, 0);
6617 	if (rc != 0)
6618 		return (rc);
6619 
6620 	sb = sbuf_new_for_sysctl(NULL, NULL, 256, req);
6621 	if (sb == NULL)
6622 		return (ENOMEM);
6623 
6624 	t4_tp_get_rdma_stats(sc, &stats);
6625 	sbuf_printf(sb, "NoRQEModDefferals: %u\n", stats.rqe_dfr_mod);
6626 	sbuf_printf(sb, "NoRQEPktDefferals: %u", stats.rqe_dfr_pkt);
6627 
6628 	rc = sbuf_finish(sb);
6629 	sbuf_delete(sb);
6630 
6631 	return (rc);
6632 }
6633 
6634 static int
6635 sysctl_tcp_stats(SYSCTL_HANDLER_ARGS)
6636 {
6637 	struct adapter *sc = arg1;
6638 	struct sbuf *sb;
6639 	int rc;
6640 	struct tp_tcp_stats v4, v6;
6641 
6642 	rc = sysctl_wire_old_buffer(req, 0);
6643 	if (rc != 0)
6644 		return (rc);
6645 
6646 	sb = sbuf_new_for_sysctl(NULL, NULL, 256, req);
6647 	if (sb == NULL)
6648 		return (ENOMEM);
6649 
6650 	t4_tp_get_tcp_stats(sc, &v4, &v6);
6651 	sbuf_printf(sb,
6652 	    "                                IP                 IPv6\n");
6653 	sbuf_printf(sb, "OutRsts:      %20u %20u\n",
6654 	    v4.tcpOutRsts, v6.tcpOutRsts);
6655 	sbuf_printf(sb, "InSegs:       %20ju %20ju\n",
6656 	    v4.tcpInSegs, v6.tcpInSegs);
6657 	sbuf_printf(sb, "OutSegs:      %20ju %20ju\n",
6658 	    v4.tcpOutSegs, v6.tcpOutSegs);
6659 	sbuf_printf(sb, "RetransSegs:  %20ju %20ju",
6660 	    v4.tcpRetransSegs, v6.tcpRetransSegs);
6661 
6662 	rc = sbuf_finish(sb);
6663 	sbuf_delete(sb);
6664 
6665 	return (rc);
6666 }
6667 
6668 static int
6669 sysctl_tids(SYSCTL_HANDLER_ARGS)
6670 {
6671 	struct adapter *sc = arg1;
6672 	struct sbuf *sb;
6673 	int rc;
6674 	struct tid_info *t = &sc->tids;
6675 
6676 	rc = sysctl_wire_old_buffer(req, 0);
6677 	if (rc != 0)
6678 		return (rc);
6679 
6680 	sb = sbuf_new_for_sysctl(NULL, NULL, 256, req);
6681 	if (sb == NULL)
6682 		return (ENOMEM);
6683 
6684 	if (t->natids) {
6685 		sbuf_printf(sb, "ATID range: 0-%u, in use: %u\n", t->natids - 1,
6686 		    t->atids_in_use);
6687 	}
6688 
6689 	if (t->ntids) {
6690 		if (t4_read_reg(sc, A_LE_DB_CONFIG) & F_HASHEN) {
6691 			uint32_t b = t4_read_reg(sc, A_LE_DB_SERVER_INDEX) / 4;
6692 
6693 			if (b) {
6694 				sbuf_printf(sb, "TID range: 0-%u, %u-%u", b - 1,
6695 				    t4_read_reg(sc, A_LE_DB_TID_HASHBASE) / 4,
6696 				    t->ntids - 1);
6697 			} else {
6698 				sbuf_printf(sb, "TID range: %u-%u",
6699 				    t4_read_reg(sc, A_LE_DB_TID_HASHBASE) / 4,
6700 				    t->ntids - 1);
6701 			}
6702 		} else
6703 			sbuf_printf(sb, "TID range: 0-%u", t->ntids - 1);
6704 		sbuf_printf(sb, ", in use: %u\n",
6705 		    atomic_load_acq_int(&t->tids_in_use));
6706 	}
6707 
6708 	if (t->nstids) {
6709 		sbuf_printf(sb, "STID range: %u-%u, in use: %u\n", t->stid_base,
6710 		    t->stid_base + t->nstids - 1, t->stids_in_use);
6711 	}
6712 
6713 	if (t->nftids) {
6714 		sbuf_printf(sb, "FTID range: %u-%u\n", t->ftid_base,
6715 		    t->ftid_base + t->nftids - 1);
6716 	}
6717 
6718 	if (t->netids) {
6719 		sbuf_printf(sb, "ETID range: %u-%u\n", t->etid_base,
6720 		    t->etid_base + t->netids - 1);
6721 	}
6722 
6723 	sbuf_printf(sb, "HW TID usage: %u IP users, %u IPv6 users",
6724 	    t4_read_reg(sc, A_LE_DB_ACT_CNT_IPV4),
6725 	    t4_read_reg(sc, A_LE_DB_ACT_CNT_IPV6));
6726 
6727 	rc = sbuf_finish(sb);
6728 	sbuf_delete(sb);
6729 
6730 	return (rc);
6731 }
6732 
6733 static int
6734 sysctl_tp_err_stats(SYSCTL_HANDLER_ARGS)
6735 {
6736 	struct adapter *sc = arg1;
6737 	struct sbuf *sb;
6738 	int rc;
6739 	struct tp_err_stats stats;
6740 
6741 	rc = sysctl_wire_old_buffer(req, 0);
6742 	if (rc != 0)
6743 		return (rc);
6744 
6745 	sb = sbuf_new_for_sysctl(NULL, NULL, 256, req);
6746 	if (sb == NULL)
6747 		return (ENOMEM);
6748 
6749 	t4_tp_get_err_stats(sc, &stats);
6750 
6751 	sbuf_printf(sb, "                 channel 0  channel 1  channel 2  "
6752 		      "channel 3\n");
6753 	sbuf_printf(sb, "macInErrs:      %10u %10u %10u %10u\n",
6754 	    stats.macInErrs[0], stats.macInErrs[1], stats.macInErrs[2],
6755 	    stats.macInErrs[3]);
6756 	sbuf_printf(sb, "hdrInErrs:      %10u %10u %10u %10u\n",
6757 	    stats.hdrInErrs[0], stats.hdrInErrs[1], stats.hdrInErrs[2],
6758 	    stats.hdrInErrs[3]);
6759 	sbuf_printf(sb, "tcpInErrs:      %10u %10u %10u %10u\n",
6760 	    stats.tcpInErrs[0], stats.tcpInErrs[1], stats.tcpInErrs[2],
6761 	    stats.tcpInErrs[3]);
6762 	sbuf_printf(sb, "tcp6InErrs:     %10u %10u %10u %10u\n",
6763 	    stats.tcp6InErrs[0], stats.tcp6InErrs[1], stats.tcp6InErrs[2],
6764 	    stats.tcp6InErrs[3]);
6765 	sbuf_printf(sb, "tnlCongDrops:   %10u %10u %10u %10u\n",
6766 	    stats.tnlCongDrops[0], stats.tnlCongDrops[1], stats.tnlCongDrops[2],
6767 	    stats.tnlCongDrops[3]);
6768 	sbuf_printf(sb, "tnlTxDrops:     %10u %10u %10u %10u\n",
6769 	    stats.tnlTxDrops[0], stats.tnlTxDrops[1], stats.tnlTxDrops[2],
6770 	    stats.tnlTxDrops[3]);
6771 	sbuf_printf(sb, "ofldVlanDrops:  %10u %10u %10u %10u\n",
6772 	    stats.ofldVlanDrops[0], stats.ofldVlanDrops[1],
6773 	    stats.ofldVlanDrops[2], stats.ofldVlanDrops[3]);
6774 	sbuf_printf(sb, "ofldChanDrops:  %10u %10u %10u %10u\n\n",
6775 	    stats.ofldChanDrops[0], stats.ofldChanDrops[1],
6776 	    stats.ofldChanDrops[2], stats.ofldChanDrops[3]);
6777 	sbuf_printf(sb, "ofldNoNeigh:    %u\nofldCongDefer:  %u",
6778 	    stats.ofldNoNeigh, stats.ofldCongDefer);
6779 
6780 	rc = sbuf_finish(sb);
6781 	sbuf_delete(sb);
6782 
6783 	return (rc);
6784 }
6785 
6786 struct field_desc {
6787 	const char *name;
6788 	u_int start;
6789 	u_int width;
6790 };
6791 
6792 static void
6793 field_desc_show(struct sbuf *sb, uint64_t v, const struct field_desc *f)
6794 {
6795 	char buf[32];
6796 	int line_size = 0;
6797 
6798 	while (f->name) {
6799 		uint64_t mask = (1ULL << f->width) - 1;
6800 		int len = snprintf(buf, sizeof(buf), "%s: %ju", f->name,
6801 		    ((uintmax_t)v >> f->start) & mask);
6802 
6803 		if (line_size + len >= 79) {
6804 			line_size = 8;
6805 			sbuf_printf(sb, "\n        ");
6806 		}
6807 		sbuf_printf(sb, "%s ", buf);
6808 		line_size += len + 1;
6809 		f++;
6810 	}
6811 	sbuf_printf(sb, "\n");
6812 }
6813 
6814 static struct field_desc tp_la0[] = {
6815 	{ "RcfOpCodeOut", 60, 4 },
6816 	{ "State", 56, 4 },
6817 	{ "WcfState", 52, 4 },
6818 	{ "RcfOpcSrcOut", 50, 2 },
6819 	{ "CRxError", 49, 1 },
6820 	{ "ERxError", 48, 1 },
6821 	{ "SanityFailed", 47, 1 },
6822 	{ "SpuriousMsg", 46, 1 },
6823 	{ "FlushInputMsg", 45, 1 },
6824 	{ "FlushInputCpl", 44, 1 },
6825 	{ "RssUpBit", 43, 1 },
6826 	{ "RssFilterHit", 42, 1 },
6827 	{ "Tid", 32, 10 },
6828 	{ "InitTcb", 31, 1 },
6829 	{ "LineNumber", 24, 7 },
6830 	{ "Emsg", 23, 1 },
6831 	{ "EdataOut", 22, 1 },
6832 	{ "Cmsg", 21, 1 },
6833 	{ "CdataOut", 20, 1 },
6834 	{ "EreadPdu", 19, 1 },
6835 	{ "CreadPdu", 18, 1 },
6836 	{ "TunnelPkt", 17, 1 },
6837 	{ "RcfPeerFin", 16, 1 },
6838 	{ "RcfReasonOut", 12, 4 },
6839 	{ "TxCchannel", 10, 2 },
6840 	{ "RcfTxChannel", 8, 2 },
6841 	{ "RxEchannel", 6, 2 },
6842 	{ "RcfRxChannel", 5, 1 },
6843 	{ "RcfDataOutSrdy", 4, 1 },
6844 	{ "RxDvld", 3, 1 },
6845 	{ "RxOoDvld", 2, 1 },
6846 	{ "RxCongestion", 1, 1 },
6847 	{ "TxCongestion", 0, 1 },
6848 	{ NULL }
6849 };
6850 
6851 static struct field_desc tp_la1[] = {
6852 	{ "CplCmdIn", 56, 8 },
6853 	{ "CplCmdOut", 48, 8 },
6854 	{ "ESynOut", 47, 1 },
6855 	{ "EAckOut", 46, 1 },
6856 	{ "EFinOut", 45, 1 },
6857 	{ "ERstOut", 44, 1 },
6858 	{ "SynIn", 43, 1 },
6859 	{ "AckIn", 42, 1 },
6860 	{ "FinIn", 41, 1 },
6861 	{ "RstIn", 40, 1 },
6862 	{ "DataIn", 39, 1 },
6863 	{ "DataInVld", 38, 1 },
6864 	{ "PadIn", 37, 1 },
6865 	{ "RxBufEmpty", 36, 1 },
6866 	{ "RxDdp", 35, 1 },
6867 	{ "RxFbCongestion", 34, 1 },
6868 	{ "TxFbCongestion", 33, 1 },
6869 	{ "TxPktSumSrdy", 32, 1 },
6870 	{ "RcfUlpType", 28, 4 },
6871 	{ "Eread", 27, 1 },
6872 	{ "Ebypass", 26, 1 },
6873 	{ "Esave", 25, 1 },
6874 	{ "Static0", 24, 1 },
6875 	{ "Cread", 23, 1 },
6876 	{ "Cbypass", 22, 1 },
6877 	{ "Csave", 21, 1 },
6878 	{ "CPktOut", 20, 1 },
6879 	{ "RxPagePoolFull", 18, 2 },
6880 	{ "RxLpbkPkt", 17, 1 },
6881 	{ "TxLpbkPkt", 16, 1 },
6882 	{ "RxVfValid", 15, 1 },
6883 	{ "SynLearned", 14, 1 },
6884 	{ "SetDelEntry", 13, 1 },
6885 	{ "SetInvEntry", 12, 1 },
6886 	{ "CpcmdDvld", 11, 1 },
6887 	{ "CpcmdSave", 10, 1 },
6888 	{ "RxPstructsFull", 8, 2 },
6889 	{ "EpcmdDvld", 7, 1 },
6890 	{ "EpcmdFlush", 6, 1 },
6891 	{ "EpcmdTrimPrefix", 5, 1 },
6892 	{ "EpcmdTrimPostfix", 4, 1 },
6893 	{ "ERssIp4Pkt", 3, 1 },
6894 	{ "ERssIp6Pkt", 2, 1 },
6895 	{ "ERssTcpUdpPkt", 1, 1 },
6896 	{ "ERssFceFipPkt", 0, 1 },
6897 	{ NULL }
6898 };
6899 
6900 static struct field_desc tp_la2[] = {
6901 	{ "CplCmdIn", 56, 8 },
6902 	{ "MpsVfVld", 55, 1 },
6903 	{ "MpsPf", 52, 3 },
6904 	{ "MpsVf", 44, 8 },
6905 	{ "SynIn", 43, 1 },
6906 	{ "AckIn", 42, 1 },
6907 	{ "FinIn", 41, 1 },
6908 	{ "RstIn", 40, 1 },
6909 	{ "DataIn", 39, 1 },
6910 	{ "DataInVld", 38, 1 },
6911 	{ "PadIn", 37, 1 },
6912 	{ "RxBufEmpty", 36, 1 },
6913 	{ "RxDdp", 35, 1 },
6914 	{ "RxFbCongestion", 34, 1 },
6915 	{ "TxFbCongestion", 33, 1 },
6916 	{ "TxPktSumSrdy", 32, 1 },
6917 	{ "RcfUlpType", 28, 4 },
6918 	{ "Eread", 27, 1 },
6919 	{ "Ebypass", 26, 1 },
6920 	{ "Esave", 25, 1 },
6921 	{ "Static0", 24, 1 },
6922 	{ "Cread", 23, 1 },
6923 	{ "Cbypass", 22, 1 },
6924 	{ "Csave", 21, 1 },
6925 	{ "CPktOut", 20, 1 },
6926 	{ "RxPagePoolFull", 18, 2 },
6927 	{ "RxLpbkPkt", 17, 1 },
6928 	{ "TxLpbkPkt", 16, 1 },
6929 	{ "RxVfValid", 15, 1 },
6930 	{ "SynLearned", 14, 1 },
6931 	{ "SetDelEntry", 13, 1 },
6932 	{ "SetInvEntry", 12, 1 },
6933 	{ "CpcmdDvld", 11, 1 },
6934 	{ "CpcmdSave", 10, 1 },
6935 	{ "RxPstructsFull", 8, 2 },
6936 	{ "EpcmdDvld", 7, 1 },
6937 	{ "EpcmdFlush", 6, 1 },
6938 	{ "EpcmdTrimPrefix", 5, 1 },
6939 	{ "EpcmdTrimPostfix", 4, 1 },
6940 	{ "ERssIp4Pkt", 3, 1 },
6941 	{ "ERssIp6Pkt", 2, 1 },
6942 	{ "ERssTcpUdpPkt", 1, 1 },
6943 	{ "ERssFceFipPkt", 0, 1 },
6944 	{ NULL }
6945 };
6946 
6947 static void
6948 tp_la_show(struct sbuf *sb, uint64_t *p, int idx)
6949 {
6950 
6951 	field_desc_show(sb, *p, tp_la0);
6952 }
6953 
6954 static void
6955 tp_la_show2(struct sbuf *sb, uint64_t *p, int idx)
6956 {
6957 
6958 	if (idx)
6959 		sbuf_printf(sb, "\n");
6960 	field_desc_show(sb, p[0], tp_la0);
6961 	if (idx < (TPLA_SIZE / 2 - 1) || p[1] != ~0ULL)
6962 		field_desc_show(sb, p[1], tp_la0);
6963 }
6964 
6965 static void
6966 tp_la_show3(struct sbuf *sb, uint64_t *p, int idx)
6967 {
6968 
6969 	if (idx)
6970 		sbuf_printf(sb, "\n");
6971 	field_desc_show(sb, p[0], tp_la0);
6972 	if (idx < (TPLA_SIZE / 2 - 1) || p[1] != ~0ULL)
6973 		field_desc_show(sb, p[1], (p[0] & (1 << 17)) ? tp_la2 : tp_la1);
6974 }
6975 
6976 static int
6977 sysctl_tp_la(SYSCTL_HANDLER_ARGS)
6978 {
6979 	struct adapter *sc = arg1;
6980 	struct sbuf *sb;
6981 	uint64_t *buf, *p;
6982 	int rc;
6983 	u_int i, inc;
6984 	void (*show_func)(struct sbuf *, uint64_t *, int);
6985 
6986 	rc = sysctl_wire_old_buffer(req, 0);
6987 	if (rc != 0)
6988 		return (rc);
6989 
6990 	sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req);
6991 	if (sb == NULL)
6992 		return (ENOMEM);
6993 
6994 	buf = malloc(TPLA_SIZE * sizeof(uint64_t), M_CXGBE, M_ZERO | M_WAITOK);
6995 
6996 	t4_tp_read_la(sc, buf, NULL);
6997 	p = buf;
6998 
6999 	switch (G_DBGLAMODE(t4_read_reg(sc, A_TP_DBG_LA_CONFIG))) {
7000 	case 2:
7001 		inc = 2;
7002 		show_func = tp_la_show2;
7003 		break;
7004 	case 3:
7005 		inc = 2;
7006 		show_func = tp_la_show3;
7007 		break;
7008 	default:
7009 		inc = 1;
7010 		show_func = tp_la_show;
7011 	}
7012 
7013 	for (i = 0; i < TPLA_SIZE / inc; i++, p += inc)
7014 		(*show_func)(sb, p, i);
7015 
7016 	rc = sbuf_finish(sb);
7017 	sbuf_delete(sb);
7018 	free(buf, M_CXGBE);
7019 	return (rc);
7020 }
7021 
7022 static int
7023 sysctl_tx_rate(SYSCTL_HANDLER_ARGS)
7024 {
7025 	struct adapter *sc = arg1;
7026 	struct sbuf *sb;
7027 	int rc;
7028 	u64 nrate[NCHAN], orate[NCHAN];
7029 
7030 	rc = sysctl_wire_old_buffer(req, 0);
7031 	if (rc != 0)
7032 		return (rc);
7033 
7034 	sb = sbuf_new_for_sysctl(NULL, NULL, 256, req);
7035 	if (sb == NULL)
7036 		return (ENOMEM);
7037 
7038 	t4_get_chan_txrate(sc, nrate, orate);
7039 	sbuf_printf(sb, "              channel 0   channel 1   channel 2   "
7040 		 "channel 3\n");
7041 	sbuf_printf(sb, "NIC B/s:     %10ju  %10ju  %10ju  %10ju\n",
7042 	    nrate[0], nrate[1], nrate[2], nrate[3]);
7043 	sbuf_printf(sb, "Offload B/s: %10ju  %10ju  %10ju  %10ju",
7044 	    orate[0], orate[1], orate[2], orate[3]);
7045 
7046 	rc = sbuf_finish(sb);
7047 	sbuf_delete(sb);
7048 
7049 	return (rc);
7050 }
7051 
7052 static int
7053 sysctl_ulprx_la(SYSCTL_HANDLER_ARGS)
7054 {
7055 	struct adapter *sc = arg1;
7056 	struct sbuf *sb;
7057 	uint32_t *buf, *p;
7058 	int rc, i;
7059 
7060 	rc = sysctl_wire_old_buffer(req, 0);
7061 	if (rc != 0)
7062 		return (rc);
7063 
7064 	sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req);
7065 	if (sb == NULL)
7066 		return (ENOMEM);
7067 
7068 	buf = malloc(ULPRX_LA_SIZE * 8 * sizeof(uint32_t), M_CXGBE,
7069 	    M_ZERO | M_WAITOK);
7070 
7071 	t4_ulprx_read_la(sc, buf);
7072 	p = buf;
7073 
7074 	sbuf_printf(sb, "      Pcmd        Type   Message"
7075 	    "                Data");
7076 	for (i = 0; i < ULPRX_LA_SIZE; i++, p += 8) {
7077 		sbuf_printf(sb, "\n%08x%08x  %4x  %08x  %08x%08x%08x%08x",
7078 		    p[1], p[0], p[2], p[3], p[7], p[6], p[5], p[4]);
7079 	}
7080 
7081 	rc = sbuf_finish(sb);
7082 	sbuf_delete(sb);
7083 	free(buf, M_CXGBE);
7084 	return (rc);
7085 }
7086 
7087 static int
7088 sysctl_wcwr_stats(SYSCTL_HANDLER_ARGS)
7089 {
7090 	struct adapter *sc = arg1;
7091 	struct sbuf *sb;
7092 	int rc, v;
7093 
7094 	rc = sysctl_wire_old_buffer(req, 0);
7095 	if (rc != 0)
7096 		return (rc);
7097 
7098 	sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req);
7099 	if (sb == NULL)
7100 		return (ENOMEM);
7101 
7102 	v = t4_read_reg(sc, A_SGE_STAT_CFG);
7103 	if (G_STATSOURCE_T5(v) == 7) {
7104 		if (G_STATMODE(v) == 0) {
7105 			sbuf_printf(sb, "total %d, incomplete %d",
7106 			    t4_read_reg(sc, A_SGE_STAT_TOTAL),
7107 			    t4_read_reg(sc, A_SGE_STAT_MATCH));
7108 		} else if (G_STATMODE(v) == 1) {
7109 			sbuf_printf(sb, "total %d, data overflow %d",
7110 			    t4_read_reg(sc, A_SGE_STAT_TOTAL),
7111 			    t4_read_reg(sc, A_SGE_STAT_MATCH));
7112 		}
7113 	}
7114 	rc = sbuf_finish(sb);
7115 	sbuf_delete(sb);
7116 
7117 	return (rc);
7118 }
7119 #endif
7120 
7121 static uint32_t
7122 fconf_to_mode(uint32_t fconf)
7123 {
7124 	uint32_t mode;
7125 
7126 	mode = T4_FILTER_IPv4 | T4_FILTER_IPv6 | T4_FILTER_IP_SADDR |
7127 	    T4_FILTER_IP_DADDR | T4_FILTER_IP_SPORT | T4_FILTER_IP_DPORT;
7128 
7129 	if (fconf & F_FRAGMENTATION)
7130 		mode |= T4_FILTER_IP_FRAGMENT;
7131 
7132 	if (fconf & F_MPSHITTYPE)
7133 		mode |= T4_FILTER_MPS_HIT_TYPE;
7134 
7135 	if (fconf & F_MACMATCH)
7136 		mode |= T4_FILTER_MAC_IDX;
7137 
7138 	if (fconf & F_ETHERTYPE)
7139 		mode |= T4_FILTER_ETH_TYPE;
7140 
7141 	if (fconf & F_PROTOCOL)
7142 		mode |= T4_FILTER_IP_PROTO;
7143 
7144 	if (fconf & F_TOS)
7145 		mode |= T4_FILTER_IP_TOS;
7146 
7147 	if (fconf & F_VLAN)
7148 		mode |= T4_FILTER_VLAN;
7149 
7150 	if (fconf & F_VNIC_ID)
7151 		mode |= T4_FILTER_VNIC;
7152 
7153 	if (fconf & F_PORT)
7154 		mode |= T4_FILTER_PORT;
7155 
7156 	if (fconf & F_FCOE)
7157 		mode |= T4_FILTER_FCoE;
7158 
7159 	return (mode);
7160 }
7161 
7162 static uint32_t
7163 mode_to_fconf(uint32_t mode)
7164 {
7165 	uint32_t fconf = 0;
7166 
7167 	if (mode & T4_FILTER_IP_FRAGMENT)
7168 		fconf |= F_FRAGMENTATION;
7169 
7170 	if (mode & T4_FILTER_MPS_HIT_TYPE)
7171 		fconf |= F_MPSHITTYPE;
7172 
7173 	if (mode & T4_FILTER_MAC_IDX)
7174 		fconf |= F_MACMATCH;
7175 
7176 	if (mode & T4_FILTER_ETH_TYPE)
7177 		fconf |= F_ETHERTYPE;
7178 
7179 	if (mode & T4_FILTER_IP_PROTO)
7180 		fconf |= F_PROTOCOL;
7181 
7182 	if (mode & T4_FILTER_IP_TOS)
7183 		fconf |= F_TOS;
7184 
7185 	if (mode & T4_FILTER_VLAN)
7186 		fconf |= F_VLAN;
7187 
7188 	if (mode & T4_FILTER_VNIC)
7189 		fconf |= F_VNIC_ID;
7190 
7191 	if (mode & T4_FILTER_PORT)
7192 		fconf |= F_PORT;
7193 
7194 	if (mode & T4_FILTER_FCoE)
7195 		fconf |= F_FCOE;
7196 
7197 	return (fconf);
7198 }
7199 
7200 static uint32_t
7201 fspec_to_fconf(struct t4_filter_specification *fs)
7202 {
7203 	uint32_t fconf = 0;
7204 
7205 	if (fs->val.frag || fs->mask.frag)
7206 		fconf |= F_FRAGMENTATION;
7207 
7208 	if (fs->val.matchtype || fs->mask.matchtype)
7209 		fconf |= F_MPSHITTYPE;
7210 
7211 	if (fs->val.macidx || fs->mask.macidx)
7212 		fconf |= F_MACMATCH;
7213 
7214 	if (fs->val.ethtype || fs->mask.ethtype)
7215 		fconf |= F_ETHERTYPE;
7216 
7217 	if (fs->val.proto || fs->mask.proto)
7218 		fconf |= F_PROTOCOL;
7219 
7220 	if (fs->val.tos || fs->mask.tos)
7221 		fconf |= F_TOS;
7222 
7223 	if (fs->val.vlan_vld || fs->mask.vlan_vld)
7224 		fconf |= F_VLAN;
7225 
7226 	if (fs->val.vnic_vld || fs->mask.vnic_vld)
7227 		fconf |= F_VNIC_ID;
7228 
7229 	if (fs->val.iport || fs->mask.iport)
7230 		fconf |= F_PORT;
7231 
7232 	if (fs->val.fcoe || fs->mask.fcoe)
7233 		fconf |= F_FCOE;
7234 
7235 	return (fconf);
7236 }
7237 
7238 static int
7239 get_filter_mode(struct adapter *sc, uint32_t *mode)
7240 {
7241 	int rc;
7242 	uint32_t fconf;
7243 
7244 	rc = begin_synchronized_op(sc, NULL, HOLD_LOCK | SLEEP_OK | INTR_OK,
7245 	    "t4getfm");
7246 	if (rc)
7247 		return (rc);
7248 
7249 	t4_read_indirect(sc, A_TP_PIO_ADDR, A_TP_PIO_DATA, &fconf, 1,
7250 	    A_TP_VLAN_PRI_MAP);
7251 
7252 	if (sc->params.tp.vlan_pri_map != fconf) {
7253 		log(LOG_WARNING, "%s: cached filter mode out of sync %x %x.\n",
7254 		    device_get_nameunit(sc->dev), sc->params.tp.vlan_pri_map,
7255 		    fconf);
7256 	}
7257 
7258 	*mode = fconf_to_mode(fconf);
7259 
7260 	end_synchronized_op(sc, LOCK_HELD);
7261 	return (0);
7262 }
7263 
7264 static int
7265 set_filter_mode(struct adapter *sc, uint32_t mode)
7266 {
7267 	uint32_t fconf;
7268 	int rc;
7269 
7270 	fconf = mode_to_fconf(mode);
7271 
7272 	rc = begin_synchronized_op(sc, NULL, HOLD_LOCK | SLEEP_OK | INTR_OK,
7273 	    "t4setfm");
7274 	if (rc)
7275 		return (rc);
7276 
7277 	if (sc->tids.ftids_in_use > 0) {
7278 		rc = EBUSY;
7279 		goto done;
7280 	}
7281 
7282 #ifdef TCP_OFFLOAD
7283 	if (uld_active(sc, ULD_TOM)) {
7284 		rc = EBUSY;
7285 		goto done;
7286 	}
7287 #endif
7288 
7289 	rc = -t4_set_filter_mode(sc, fconf);
7290 done:
7291 	end_synchronized_op(sc, LOCK_HELD);
7292 	return (rc);
7293 }
7294 
7295 static inline uint64_t
7296 get_filter_hits(struct adapter *sc, uint32_t fid)
7297 {
7298 	uint32_t mw_base, off, tcb_base = t4_read_reg(sc, A_TP_CMM_TCB_BASE);
7299 	uint64_t hits;
7300 
7301 	memwin_info(sc, 0, &mw_base, NULL);
7302 	off = position_memwin(sc, 0,
7303 	    tcb_base + (fid + sc->tids.ftid_base) * TCB_SIZE);
7304 	if (is_t4(sc)) {
7305 		hits = t4_read_reg64(sc, mw_base + off + 16);
7306 		hits = be64toh(hits);
7307 	} else {
7308 		hits = t4_read_reg(sc, mw_base + off + 24);
7309 		hits = be32toh(hits);
7310 	}
7311 
7312 	return (hits);
7313 }
7314 
7315 static int
7316 get_filter(struct adapter *sc, struct t4_filter *t)
7317 {
7318 	int i, rc, nfilters = sc->tids.nftids;
7319 	struct filter_entry *f;
7320 
7321 	rc = begin_synchronized_op(sc, NULL, HOLD_LOCK | SLEEP_OK | INTR_OK,
7322 	    "t4getf");
7323 	if (rc)
7324 		return (rc);
7325 
7326 	if (sc->tids.ftids_in_use == 0 || sc->tids.ftid_tab == NULL ||
7327 	    t->idx >= nfilters) {
7328 		t->idx = 0xffffffff;
7329 		goto done;
7330 	}
7331 
7332 	f = &sc->tids.ftid_tab[t->idx];
7333 	for (i = t->idx; i < nfilters; i++, f++) {
7334 		if (f->valid) {
7335 			t->idx = i;
7336 			t->l2tidx = f->l2t ? f->l2t->idx : 0;
7337 			t->smtidx = f->smtidx;
7338 			if (f->fs.hitcnts)
7339 				t->hits = get_filter_hits(sc, t->idx);
7340 			else
7341 				t->hits = UINT64_MAX;
7342 			t->fs = f->fs;
7343 
7344 			goto done;
7345 		}
7346 	}
7347 
7348 	t->idx = 0xffffffff;
7349 done:
7350 	end_synchronized_op(sc, LOCK_HELD);
7351 	return (0);
7352 }
7353 
7354 static int
7355 set_filter(struct adapter *sc, struct t4_filter *t)
7356 {
7357 	unsigned int nfilters, nports;
7358 	struct filter_entry *f;
7359 	int i, rc;
7360 
7361 	rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4setf");
7362 	if (rc)
7363 		return (rc);
7364 
7365 	nfilters = sc->tids.nftids;
7366 	nports = sc->params.nports;
7367 
7368 	if (nfilters == 0) {
7369 		rc = ENOTSUP;
7370 		goto done;
7371 	}
7372 
7373 	if (!(sc->flags & FULL_INIT_DONE)) {
7374 		rc = EAGAIN;
7375 		goto done;
7376 	}
7377 
7378 	if (t->idx >= nfilters) {
7379 		rc = EINVAL;
7380 		goto done;
7381 	}
7382 
7383 	/* Validate against the global filter mode */
7384 	if ((sc->params.tp.vlan_pri_map | fspec_to_fconf(&t->fs)) !=
7385 	    sc->params.tp.vlan_pri_map) {
7386 		rc = E2BIG;
7387 		goto done;
7388 	}
7389 
7390 	if (t->fs.action == FILTER_SWITCH && t->fs.eport >= nports) {
7391 		rc = EINVAL;
7392 		goto done;
7393 	}
7394 
7395 	if (t->fs.val.iport >= nports) {
7396 		rc = EINVAL;
7397 		goto done;
7398 	}
7399 
7400 	/* Can't specify an iq if not steering to it */
7401 	if (!t->fs.dirsteer && t->fs.iq) {
7402 		rc = EINVAL;
7403 		goto done;
7404 	}
7405 
7406 	/* IPv6 filter idx must be 4 aligned */
7407 	if (t->fs.type == 1 &&
7408 	    ((t->idx & 0x3) || t->idx + 4 >= nfilters)) {
7409 		rc = EINVAL;
7410 		goto done;
7411 	}
7412 
7413 	if (sc->tids.ftid_tab == NULL) {
7414 		KASSERT(sc->tids.ftids_in_use == 0,
7415 		    ("%s: no memory allocated but filters_in_use > 0",
7416 		    __func__));
7417 
7418 		sc->tids.ftid_tab = malloc(sizeof (struct filter_entry) *
7419 		    nfilters, M_CXGBE, M_NOWAIT | M_ZERO);
7420 		if (sc->tids.ftid_tab == NULL) {
7421 			rc = ENOMEM;
7422 			goto done;
7423 		}
7424 		mtx_init(&sc->tids.ftid_lock, "T4 filters", 0, MTX_DEF);
7425 	}
7426 
7427 	for (i = 0; i < 4; i++) {
7428 		f = &sc->tids.ftid_tab[t->idx + i];
7429 
7430 		if (f->pending || f->valid) {
7431 			rc = EBUSY;
7432 			goto done;
7433 		}
7434 		if (f->locked) {
7435 			rc = EPERM;
7436 			goto done;
7437 		}
7438 
7439 		if (t->fs.type == 0)
7440 			break;
7441 	}
7442 
7443 	f = &sc->tids.ftid_tab[t->idx];
7444 	f->fs = t->fs;
7445 
7446 	rc = set_filter_wr(sc, t->idx);
7447 done:
7448 	end_synchronized_op(sc, 0);
7449 
7450 	if (rc == 0) {
7451 		mtx_lock(&sc->tids.ftid_lock);
7452 		for (;;) {
7453 			if (f->pending == 0) {
7454 				rc = f->valid ? 0 : EIO;
7455 				break;
7456 			}
7457 
7458 			if (mtx_sleep(&sc->tids.ftid_tab, &sc->tids.ftid_lock,
7459 			    PCATCH, "t4setfw", 0)) {
7460 				rc = EINPROGRESS;
7461 				break;
7462 			}
7463 		}
7464 		mtx_unlock(&sc->tids.ftid_lock);
7465 	}
7466 	return (rc);
7467 }
7468 
7469 static int
7470 del_filter(struct adapter *sc, struct t4_filter *t)
7471 {
7472 	unsigned int nfilters;
7473 	struct filter_entry *f;
7474 	int rc;
7475 
7476 	rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4delf");
7477 	if (rc)
7478 		return (rc);
7479 
7480 	nfilters = sc->tids.nftids;
7481 
7482 	if (nfilters == 0) {
7483 		rc = ENOTSUP;
7484 		goto done;
7485 	}
7486 
7487 	if (sc->tids.ftid_tab == NULL || sc->tids.ftids_in_use == 0 ||
7488 	    t->idx >= nfilters) {
7489 		rc = EINVAL;
7490 		goto done;
7491 	}
7492 
7493 	if (!(sc->flags & FULL_INIT_DONE)) {
7494 		rc = EAGAIN;
7495 		goto done;
7496 	}
7497 
7498 	f = &sc->tids.ftid_tab[t->idx];
7499 
7500 	if (f->pending) {
7501 		rc = EBUSY;
7502 		goto done;
7503 	}
7504 	if (f->locked) {
7505 		rc = EPERM;
7506 		goto done;
7507 	}
7508 
7509 	if (f->valid) {
7510 		t->fs = f->fs;	/* extra info for the caller */
7511 		rc = del_filter_wr(sc, t->idx);
7512 	}
7513 
7514 done:
7515 	end_synchronized_op(sc, 0);
7516 
7517 	if (rc == 0) {
7518 		mtx_lock(&sc->tids.ftid_lock);
7519 		for (;;) {
7520 			if (f->pending == 0) {
7521 				rc = f->valid ? EIO : 0;
7522 				break;
7523 			}
7524 
7525 			if (mtx_sleep(&sc->tids.ftid_tab, &sc->tids.ftid_lock,
7526 			    PCATCH, "t4delfw", 0)) {
7527 				rc = EINPROGRESS;
7528 				break;
7529 			}
7530 		}
7531 		mtx_unlock(&sc->tids.ftid_lock);
7532 	}
7533 
7534 	return (rc);
7535 }
7536 
7537 static void
7538 clear_filter(struct filter_entry *f)
7539 {
7540 	if (f->l2t)
7541 		t4_l2t_release(f->l2t);
7542 
7543 	bzero(f, sizeof (*f));
7544 }
7545 
7546 static int
7547 set_filter_wr(struct adapter *sc, int fidx)
7548 {
7549 	struct filter_entry *f = &sc->tids.ftid_tab[fidx];
7550 	struct fw_filter_wr *fwr;
7551 	unsigned int ftid;
7552 	struct wrq_cookie cookie;
7553 
7554 	ASSERT_SYNCHRONIZED_OP(sc);
7555 
7556 	if (f->fs.newdmac || f->fs.newvlan) {
7557 		/* This filter needs an L2T entry; allocate one. */
7558 		f->l2t = t4_l2t_alloc_switching(sc->l2t);
7559 		if (f->l2t == NULL)
7560 			return (EAGAIN);
7561 		if (t4_l2t_set_switching(sc, f->l2t, f->fs.vlan, f->fs.eport,
7562 		    f->fs.dmac)) {
7563 			t4_l2t_release(f->l2t);
7564 			f->l2t = NULL;
7565 			return (ENOMEM);
7566 		}
7567 	}
7568 
7569 	ftid = sc->tids.ftid_base + fidx;
7570 
7571 	fwr = start_wrq_wr(&sc->sge.mgmtq, howmany(sizeof(*fwr), 16), &cookie);
7572 	if (fwr == NULL)
7573 		return (ENOMEM);
7574 	bzero(fwr, sizeof(*fwr));
7575 
7576 	fwr->op_pkd = htobe32(V_FW_WR_OP(FW_FILTER_WR));
7577 	fwr->len16_pkd = htobe32(FW_LEN16(*fwr));
7578 	fwr->tid_to_iq =
7579 	    htobe32(V_FW_FILTER_WR_TID(ftid) |
7580 		V_FW_FILTER_WR_RQTYPE(f->fs.type) |
7581 		V_FW_FILTER_WR_NOREPLY(0) |
7582 		V_FW_FILTER_WR_IQ(f->fs.iq));
7583 	fwr->del_filter_to_l2tix =
7584 	    htobe32(V_FW_FILTER_WR_RPTTID(f->fs.rpttid) |
7585 		V_FW_FILTER_WR_DROP(f->fs.action == FILTER_DROP) |
7586 		V_FW_FILTER_WR_DIRSTEER(f->fs.dirsteer) |
7587 		V_FW_FILTER_WR_MASKHASH(f->fs.maskhash) |
7588 		V_FW_FILTER_WR_DIRSTEERHASH(f->fs.dirsteerhash) |
7589 		V_FW_FILTER_WR_LPBK(f->fs.action == FILTER_SWITCH) |
7590 		V_FW_FILTER_WR_DMAC(f->fs.newdmac) |
7591 		V_FW_FILTER_WR_SMAC(f->fs.newsmac) |
7592 		V_FW_FILTER_WR_INSVLAN(f->fs.newvlan == VLAN_INSERT ||
7593 		    f->fs.newvlan == VLAN_REWRITE) |
7594 		V_FW_FILTER_WR_RMVLAN(f->fs.newvlan == VLAN_REMOVE ||
7595 		    f->fs.newvlan == VLAN_REWRITE) |
7596 		V_FW_FILTER_WR_HITCNTS(f->fs.hitcnts) |
7597 		V_FW_FILTER_WR_TXCHAN(f->fs.eport) |
7598 		V_FW_FILTER_WR_PRIO(f->fs.prio) |
7599 		V_FW_FILTER_WR_L2TIX(f->l2t ? f->l2t->idx : 0));
7600 	fwr->ethtype = htobe16(f->fs.val.ethtype);
7601 	fwr->ethtypem = htobe16(f->fs.mask.ethtype);
7602 	fwr->frag_to_ovlan_vldm =
7603 	    (V_FW_FILTER_WR_FRAG(f->fs.val.frag) |
7604 		V_FW_FILTER_WR_FRAGM(f->fs.mask.frag) |
7605 		V_FW_FILTER_WR_IVLAN_VLD(f->fs.val.vlan_vld) |
7606 		V_FW_FILTER_WR_OVLAN_VLD(f->fs.val.vnic_vld) |
7607 		V_FW_FILTER_WR_IVLAN_VLDM(f->fs.mask.vlan_vld) |
7608 		V_FW_FILTER_WR_OVLAN_VLDM(f->fs.mask.vnic_vld));
7609 	fwr->smac_sel = 0;
7610 	fwr->rx_chan_rx_rpl_iq = htobe16(V_FW_FILTER_WR_RX_CHAN(0) |
7611 	    V_FW_FILTER_WR_RX_RPL_IQ(sc->sge.fwq.abs_id));
7612 	fwr->maci_to_matchtypem =
7613 	    htobe32(V_FW_FILTER_WR_MACI(f->fs.val.macidx) |
7614 		V_FW_FILTER_WR_MACIM(f->fs.mask.macidx) |
7615 		V_FW_FILTER_WR_FCOE(f->fs.val.fcoe) |
7616 		V_FW_FILTER_WR_FCOEM(f->fs.mask.fcoe) |
7617 		V_FW_FILTER_WR_PORT(f->fs.val.iport) |
7618 		V_FW_FILTER_WR_PORTM(f->fs.mask.iport) |
7619 		V_FW_FILTER_WR_MATCHTYPE(f->fs.val.matchtype) |
7620 		V_FW_FILTER_WR_MATCHTYPEM(f->fs.mask.matchtype));
7621 	fwr->ptcl = f->fs.val.proto;
7622 	fwr->ptclm = f->fs.mask.proto;
7623 	fwr->ttyp = f->fs.val.tos;
7624 	fwr->ttypm = f->fs.mask.tos;
7625 	fwr->ivlan = htobe16(f->fs.val.vlan);
7626 	fwr->ivlanm = htobe16(f->fs.mask.vlan);
7627 	fwr->ovlan = htobe16(f->fs.val.vnic);
7628 	fwr->ovlanm = htobe16(f->fs.mask.vnic);
7629 	bcopy(f->fs.val.dip, fwr->lip, sizeof (fwr->lip));
7630 	bcopy(f->fs.mask.dip, fwr->lipm, sizeof (fwr->lipm));
7631 	bcopy(f->fs.val.sip, fwr->fip, sizeof (fwr->fip));
7632 	bcopy(f->fs.mask.sip, fwr->fipm, sizeof (fwr->fipm));
7633 	fwr->lp = htobe16(f->fs.val.dport);
7634 	fwr->lpm = htobe16(f->fs.mask.dport);
7635 	fwr->fp = htobe16(f->fs.val.sport);
7636 	fwr->fpm = htobe16(f->fs.mask.sport);
7637 	if (f->fs.newsmac)
7638 		bcopy(f->fs.smac, fwr->sma, sizeof (fwr->sma));
7639 
7640 	f->pending = 1;
7641 	sc->tids.ftids_in_use++;
7642 
7643 	commit_wrq_wr(&sc->sge.mgmtq, fwr, &cookie);
7644 	return (0);
7645 }
7646 
7647 static int
7648 del_filter_wr(struct adapter *sc, int fidx)
7649 {
7650 	struct filter_entry *f = &sc->tids.ftid_tab[fidx];
7651 	struct fw_filter_wr *fwr;
7652 	unsigned int ftid;
7653 	struct wrq_cookie cookie;
7654 
7655 	ftid = sc->tids.ftid_base + fidx;
7656 
7657 	fwr = start_wrq_wr(&sc->sge.mgmtq, howmany(sizeof(*fwr), 16), &cookie);
7658 	if (fwr == NULL)
7659 		return (ENOMEM);
7660 	bzero(fwr, sizeof (*fwr));
7661 
7662 	t4_mk_filtdelwr(ftid, fwr, sc->sge.fwq.abs_id);
7663 
7664 	f->pending = 1;
7665 	commit_wrq_wr(&sc->sge.mgmtq, fwr, &cookie);
7666 	return (0);
7667 }
7668 
7669 int
7670 t4_filter_rpl(struct sge_iq *iq, const struct rss_header *rss, struct mbuf *m)
7671 {
7672 	struct adapter *sc = iq->adapter;
7673 	const struct cpl_set_tcb_rpl *rpl = (const void *)(rss + 1);
7674 	unsigned int idx = GET_TID(rpl);
7675 	unsigned int rc;
7676 	struct filter_entry *f;
7677 
7678 	KASSERT(m == NULL, ("%s: payload with opcode %02x", __func__,
7679 	    rss->opcode));
7680 
7681 	if (is_ftid(sc, idx)) {
7682 
7683 		idx -= sc->tids.ftid_base;
7684 		f = &sc->tids.ftid_tab[idx];
7685 		rc = G_COOKIE(rpl->cookie);
7686 
7687 		mtx_lock(&sc->tids.ftid_lock);
7688 		if (rc == FW_FILTER_WR_FLT_ADDED) {
7689 			KASSERT(f->pending, ("%s: filter[%u] isn't pending.",
7690 			    __func__, idx));
7691 			f->smtidx = (be64toh(rpl->oldval) >> 24) & 0xff;
7692 			f->pending = 0;  /* asynchronous setup completed */
7693 			f->valid = 1;
7694 		} else {
7695 			if (rc != FW_FILTER_WR_FLT_DELETED) {
7696 				/* Add or delete failed, display an error */
7697 				log(LOG_ERR,
7698 				    "filter %u setup failed with error %u\n",
7699 				    idx, rc);
7700 			}
7701 
7702 			clear_filter(f);
7703 			sc->tids.ftids_in_use--;
7704 		}
7705 		wakeup(&sc->tids.ftid_tab);
7706 		mtx_unlock(&sc->tids.ftid_lock);
7707 	}
7708 
7709 	return (0);
7710 }
7711 
7712 static int
7713 get_sge_context(struct adapter *sc, struct t4_sge_context *cntxt)
7714 {
7715 	int rc;
7716 
7717 	if (cntxt->cid > M_CTXTQID)
7718 		return (EINVAL);
7719 
7720 	if (cntxt->mem_id != CTXT_EGRESS && cntxt->mem_id != CTXT_INGRESS &&
7721 	    cntxt->mem_id != CTXT_FLM && cntxt->mem_id != CTXT_CNM)
7722 		return (EINVAL);
7723 
7724 	rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4ctxt");
7725 	if (rc)
7726 		return (rc);
7727 
7728 	if (sc->flags & FW_OK) {
7729 		rc = -t4_sge_ctxt_rd(sc, sc->mbox, cntxt->cid, cntxt->mem_id,
7730 		    &cntxt->data[0]);
7731 		if (rc == 0)
7732 			goto done;
7733 	}
7734 
7735 	/*
7736 	 * Read via firmware failed or wasn't even attempted.  Read directly via
7737 	 * the backdoor.
7738 	 */
7739 	rc = -t4_sge_ctxt_rd_bd(sc, cntxt->cid, cntxt->mem_id, &cntxt->data[0]);
7740 done:
7741 	end_synchronized_op(sc, 0);
7742 	return (rc);
7743 }
7744 
7745 static int
7746 load_fw(struct adapter *sc, struct t4_data *fw)
7747 {
7748 	int rc;
7749 	uint8_t *fw_data;
7750 
7751 	rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4ldfw");
7752 	if (rc)
7753 		return (rc);
7754 
7755 	if (sc->flags & FULL_INIT_DONE) {
7756 		rc = EBUSY;
7757 		goto done;
7758 	}
7759 
7760 	fw_data = malloc(fw->len, M_CXGBE, M_WAITOK);
7761 	if (fw_data == NULL) {
7762 		rc = ENOMEM;
7763 		goto done;
7764 	}
7765 
7766 	rc = copyin(fw->data, fw_data, fw->len);
7767 	if (rc == 0)
7768 		rc = -t4_load_fw(sc, fw_data, fw->len);
7769 
7770 	free(fw_data, M_CXGBE);
7771 done:
7772 	end_synchronized_op(sc, 0);
7773 	return (rc);
7774 }
7775 
7776 static int
7777 read_card_mem(struct adapter *sc, int win, struct t4_mem_range *mr)
7778 {
7779 	uint32_t addr, off, remaining, i, n;
7780 	uint32_t *buf, *b;
7781 	uint32_t mw_base, mw_aperture;
7782 	int rc;
7783 	uint8_t *dst;
7784 
7785 	rc = validate_mem_range(sc, mr->addr, mr->len);
7786 	if (rc != 0)
7787 		return (rc);
7788 
7789 	memwin_info(sc, win, &mw_base, &mw_aperture);
7790 	buf = b = malloc(min(mr->len, mw_aperture), M_CXGBE, M_WAITOK);
7791 	addr = mr->addr;
7792 	remaining = mr->len;
7793 	dst = (void *)mr->data;
7794 
7795 	while (remaining) {
7796 		off = position_memwin(sc, win, addr);
7797 
7798 		/* number of bytes that we'll copy in the inner loop */
7799 		n = min(remaining, mw_aperture - off);
7800 		for (i = 0; i < n; i += 4)
7801 			*b++ = t4_read_reg(sc, mw_base + off + i);
7802 
7803 		rc = copyout(buf, dst, n);
7804 		if (rc != 0)
7805 			break;
7806 
7807 		b = buf;
7808 		dst += n;
7809 		remaining -= n;
7810 		addr += n;
7811 	}
7812 
7813 	free(buf, M_CXGBE);
7814 	return (rc);
7815 }
7816 
7817 static int
7818 read_i2c(struct adapter *sc, struct t4_i2c_data *i2cd)
7819 {
7820 	int rc;
7821 
7822 	if (i2cd->len == 0 || i2cd->port_id >= sc->params.nports)
7823 		return (EINVAL);
7824 
7825 	if (i2cd->len > sizeof(i2cd->data))
7826 		return (EFBIG);
7827 
7828 	rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4i2crd");
7829 	if (rc)
7830 		return (rc);
7831 	rc = -t4_i2c_rd(sc, sc->mbox, i2cd->port_id, i2cd->dev_addr,
7832 	    i2cd->offset, i2cd->len, &i2cd->data[0]);
7833 	end_synchronized_op(sc, 0);
7834 
7835 	return (rc);
7836 }
7837 
7838 static int
7839 in_range(int val, int lo, int hi)
7840 {
7841 
7842 	return (val < 0 || (val <= hi && val >= lo));
7843 }
7844 
7845 static int
7846 set_sched_class(struct adapter *sc, struct t4_sched_params *p)
7847 {
7848 	int fw_subcmd, fw_type, rc;
7849 
7850 	rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4setsc");
7851 	if (rc)
7852 		return (rc);
7853 
7854 	if (!(sc->flags & FULL_INIT_DONE)) {
7855 		rc = EAGAIN;
7856 		goto done;
7857 	}
7858 
7859 	/*
7860 	 * Translate the cxgbetool parameters into T4 firmware parameters.  (The
7861 	 * sub-command and type are in common locations.)
7862 	 */
7863 	if (p->subcmd == SCHED_CLASS_SUBCMD_CONFIG)
7864 		fw_subcmd = FW_SCHED_SC_CONFIG;
7865 	else if (p->subcmd == SCHED_CLASS_SUBCMD_PARAMS)
7866 		fw_subcmd = FW_SCHED_SC_PARAMS;
7867 	else {
7868 		rc = EINVAL;
7869 		goto done;
7870 	}
7871 	if (p->type == SCHED_CLASS_TYPE_PACKET)
7872 		fw_type = FW_SCHED_TYPE_PKTSCHED;
7873 	else {
7874 		rc = EINVAL;
7875 		goto done;
7876 	}
7877 
7878 	if (fw_subcmd == FW_SCHED_SC_CONFIG) {
7879 		/* Vet our parameters ..*/
7880 		if (p->u.config.minmax < 0) {
7881 			rc = EINVAL;
7882 			goto done;
7883 		}
7884 
7885 		/* And pass the request to the firmware ...*/
7886 		rc = -t4_sched_config(sc, fw_type, p->u.config.minmax, 1);
7887 		goto done;
7888 	}
7889 
7890 	if (fw_subcmd == FW_SCHED_SC_PARAMS) {
7891 		int fw_level;
7892 		int fw_mode;
7893 		int fw_rateunit;
7894 		int fw_ratemode;
7895 
7896 		if (p->u.params.level == SCHED_CLASS_LEVEL_CL_RL)
7897 			fw_level = FW_SCHED_PARAMS_LEVEL_CL_RL;
7898 		else if (p->u.params.level == SCHED_CLASS_LEVEL_CL_WRR)
7899 			fw_level = FW_SCHED_PARAMS_LEVEL_CL_WRR;
7900 		else if (p->u.params.level == SCHED_CLASS_LEVEL_CH_RL)
7901 			fw_level = FW_SCHED_PARAMS_LEVEL_CH_RL;
7902 		else {
7903 			rc = EINVAL;
7904 			goto done;
7905 		}
7906 
7907 		if (p->u.params.mode == SCHED_CLASS_MODE_CLASS)
7908 			fw_mode = FW_SCHED_PARAMS_MODE_CLASS;
7909 		else if (p->u.params.mode == SCHED_CLASS_MODE_FLOW)
7910 			fw_mode = FW_SCHED_PARAMS_MODE_FLOW;
7911 		else {
7912 			rc = EINVAL;
7913 			goto done;
7914 		}
7915 
7916 		if (p->u.params.rateunit == SCHED_CLASS_RATEUNIT_BITS)
7917 			fw_rateunit = FW_SCHED_PARAMS_UNIT_BITRATE;
7918 		else if (p->u.params.rateunit == SCHED_CLASS_RATEUNIT_PKTS)
7919 			fw_rateunit = FW_SCHED_PARAMS_UNIT_PKTRATE;
7920 		else {
7921 			rc = EINVAL;
7922 			goto done;
7923 		}
7924 
7925 		if (p->u.params.ratemode == SCHED_CLASS_RATEMODE_REL)
7926 			fw_ratemode = FW_SCHED_PARAMS_RATE_REL;
7927 		else if (p->u.params.ratemode == SCHED_CLASS_RATEMODE_ABS)
7928 			fw_ratemode = FW_SCHED_PARAMS_RATE_ABS;
7929 		else {
7930 			rc = EINVAL;
7931 			goto done;
7932 		}
7933 
7934 		/* Vet our parameters ... */
7935 		if (!in_range(p->u.params.channel, 0, 3) ||
7936 		    !in_range(p->u.params.cl, 0, is_t4(sc) ? 15 : 16) ||
7937 		    !in_range(p->u.params.minrate, 0, 10000000) ||
7938 		    !in_range(p->u.params.maxrate, 0, 10000000) ||
7939 		    !in_range(p->u.params.weight, 0, 100)) {
7940 			rc = ERANGE;
7941 			goto done;
7942 		}
7943 
7944 		/*
7945 		 * Translate any unset parameters into the firmware's
7946 		 * nomenclature and/or fail the call if the parameters
7947 		 * are required ...
7948 		 */
7949 		if (p->u.params.rateunit < 0 || p->u.params.ratemode < 0 ||
7950 		    p->u.params.channel < 0 || p->u.params.cl < 0) {
7951 			rc = EINVAL;
7952 			goto done;
7953 		}
7954 		if (p->u.params.minrate < 0)
7955 			p->u.params.minrate = 0;
7956 		if (p->u.params.maxrate < 0) {
7957 			if (p->u.params.level == SCHED_CLASS_LEVEL_CL_RL ||
7958 			    p->u.params.level == SCHED_CLASS_LEVEL_CH_RL) {
7959 				rc = EINVAL;
7960 				goto done;
7961 			} else
7962 				p->u.params.maxrate = 0;
7963 		}
7964 		if (p->u.params.weight < 0) {
7965 			if (p->u.params.level == SCHED_CLASS_LEVEL_CL_WRR) {
7966 				rc = EINVAL;
7967 				goto done;
7968 			} else
7969 				p->u.params.weight = 0;
7970 		}
7971 		if (p->u.params.pktsize < 0) {
7972 			if (p->u.params.level == SCHED_CLASS_LEVEL_CL_RL ||
7973 			    p->u.params.level == SCHED_CLASS_LEVEL_CH_RL) {
7974 				rc = EINVAL;
7975 				goto done;
7976 			} else
7977 				p->u.params.pktsize = 0;
7978 		}
7979 
7980 		/* See what the firmware thinks of the request ... */
7981 		rc = -t4_sched_params(sc, fw_type, fw_level, fw_mode,
7982 		    fw_rateunit, fw_ratemode, p->u.params.channel,
7983 		    p->u.params.cl, p->u.params.minrate, p->u.params.maxrate,
7984 		    p->u.params.weight, p->u.params.pktsize, 1);
7985 		goto done;
7986 	}
7987 
7988 	rc = EINVAL;
7989 done:
7990 	end_synchronized_op(sc, 0);
7991 	return (rc);
7992 }
7993 
7994 static int
7995 set_sched_queue(struct adapter *sc, struct t4_sched_queue *p)
7996 {
7997 	struct port_info *pi = NULL;
7998 	struct sge_txq *txq;
7999 	uint32_t fw_mnem, fw_queue, fw_class;
8000 	int i, rc;
8001 
8002 	rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4setsq");
8003 	if (rc)
8004 		return (rc);
8005 
8006 	if (!(sc->flags & FULL_INIT_DONE)) {
8007 		rc = EAGAIN;
8008 		goto done;
8009 	}
8010 
8011 	if (p->port >= sc->params.nports) {
8012 		rc = EINVAL;
8013 		goto done;
8014 	}
8015 
8016 	pi = sc->port[p->port];
8017 	if (!in_range(p->queue, 0, pi->ntxq - 1) || !in_range(p->cl, 0, 7)) {
8018 		rc = EINVAL;
8019 		goto done;
8020 	}
8021 
8022 	/*
8023 	 * Create a template for the FW_PARAMS_CMD mnemonic and value (TX
8024 	 * Scheduling Class in this case).
8025 	 */
8026 	fw_mnem = (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DMAQ) |
8027 	    V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DMAQ_EQ_SCHEDCLASS_ETH));
8028 	fw_class = p->cl < 0 ? 0xffffffff : p->cl;
8029 
8030 	/*
8031 	 * If op.queue is non-negative, then we're only changing the scheduling
8032 	 * on a single specified TX queue.
8033 	 */
8034 	if (p->queue >= 0) {
8035 		txq = &sc->sge.txq[pi->first_txq + p->queue];
8036 		fw_queue = (fw_mnem | V_FW_PARAMS_PARAM_YZ(txq->eq.cntxt_id));
8037 		rc = -t4_set_params(sc, sc->mbox, sc->pf, 0, 1, &fw_queue,
8038 		    &fw_class);
8039 		goto done;
8040 	}
8041 
8042 	/*
8043 	 * Change the scheduling on all the TX queues for the
8044 	 * interface.
8045 	 */
8046 	for_each_txq(pi, i, txq) {
8047 		fw_queue = (fw_mnem | V_FW_PARAMS_PARAM_YZ(txq->eq.cntxt_id));
8048 		rc = -t4_set_params(sc, sc->mbox, sc->pf, 0, 1, &fw_queue,
8049 		    &fw_class);
8050 		if (rc)
8051 			goto done;
8052 	}
8053 
8054 	rc = 0;
8055 done:
8056 	end_synchronized_op(sc, 0);
8057 	return (rc);
8058 }
8059 
8060 int
8061 t4_os_find_pci_capability(struct adapter *sc, int cap)
8062 {
8063 	int i;
8064 
8065 	return (pci_find_cap(sc->dev, cap, &i) == 0 ? i : 0);
8066 }
8067 
8068 int
8069 t4_os_pci_save_state(struct adapter *sc)
8070 {
8071 	device_t dev;
8072 	struct pci_devinfo *dinfo;
8073 
8074 	dev = sc->dev;
8075 	dinfo = device_get_ivars(dev);
8076 
8077 	pci_cfg_save(dev, dinfo, 0);
8078 	return (0);
8079 }
8080 
8081 int
8082 t4_os_pci_restore_state(struct adapter *sc)
8083 {
8084 	device_t dev;
8085 	struct pci_devinfo *dinfo;
8086 
8087 	dev = sc->dev;
8088 	dinfo = device_get_ivars(dev);
8089 
8090 	pci_cfg_restore(dev, dinfo);
8091 	return (0);
8092 }
8093 
8094 void
8095 t4_os_portmod_changed(const struct adapter *sc, int idx)
8096 {
8097 	struct port_info *pi = sc->port[idx];
8098 	static const char *mod_str[] = {
8099 		NULL, "LR", "SR", "ER", "TWINAX", "active TWINAX", "LRM"
8100 	};
8101 
8102 	build_medialist(pi, &pi->media);
8103 #ifdef DEV_NETMAP
8104 	build_medialist(pi, &pi->nm_media);
8105 #endif
8106 
8107 	if (pi->mod_type == FW_PORT_MOD_TYPE_NONE)
8108 		if_printf(pi->ifp, "transceiver unplugged.\n");
8109 	else if (pi->mod_type == FW_PORT_MOD_TYPE_UNKNOWN)
8110 		if_printf(pi->ifp, "unknown transceiver inserted.\n");
8111 	else if (pi->mod_type == FW_PORT_MOD_TYPE_NOTSUPPORTED)
8112 		if_printf(pi->ifp, "unsupported transceiver inserted.\n");
8113 	else if (pi->mod_type > 0 && pi->mod_type < nitems(mod_str)) {
8114 		if_printf(pi->ifp, "%s transceiver inserted.\n",
8115 		    mod_str[pi->mod_type]);
8116 	} else {
8117 		if_printf(pi->ifp, "transceiver (type %d) inserted.\n",
8118 		    pi->mod_type);
8119 	}
8120 }
8121 
8122 void
8123 t4_os_link_changed(struct adapter *sc, int idx, int link_stat, int reason)
8124 {
8125 	struct port_info *pi = sc->port[idx];
8126 	struct ifnet *ifp = pi->ifp;
8127 
8128 	if (link_stat) {
8129 		pi->linkdnrc = -1;
8130 		ifp->if_baudrate = IF_Mbps(pi->link_cfg.speed);
8131 		if_link_state_change(ifp, LINK_STATE_UP);
8132 	} else {
8133 		if (reason >= 0)
8134 			pi->linkdnrc = reason;
8135 		if_link_state_change(ifp, LINK_STATE_DOWN);
8136 	}
8137 }
8138 
8139 void
8140 t4_iterate(void (*func)(struct adapter *, void *), void *arg)
8141 {
8142 	struct adapter *sc;
8143 
8144 	sx_slock(&t4_list_lock);
8145 	SLIST_FOREACH(sc, &t4_list, link) {
8146 		/*
8147 		 * func should not make any assumptions about what state sc is
8148 		 * in - the only guarantee is that sc->sc_lock is a valid lock.
8149 		 */
8150 		func(sc, arg);
8151 	}
8152 	sx_sunlock(&t4_list_lock);
8153 }
8154 
8155 static int
8156 t4_open(struct cdev *dev, int flags, int type, struct thread *td)
8157 {
8158        return (0);
8159 }
8160 
8161 static int
8162 t4_close(struct cdev *dev, int flags, int type, struct thread *td)
8163 {
8164        return (0);
8165 }
8166 
8167 static int
8168 t4_ioctl(struct cdev *dev, unsigned long cmd, caddr_t data, int fflag,
8169     struct thread *td)
8170 {
8171 	int rc;
8172 	struct adapter *sc = dev->si_drv1;
8173 
8174 	rc = priv_check(td, PRIV_DRIVER);
8175 	if (rc != 0)
8176 		return (rc);
8177 
8178 	switch (cmd) {
8179 	case CHELSIO_T4_GETREG: {
8180 		struct t4_reg *edata = (struct t4_reg *)data;
8181 
8182 		if ((edata->addr & 0x3) != 0 || edata->addr >= sc->mmio_len)
8183 			return (EFAULT);
8184 
8185 		if (edata->size == 4)
8186 			edata->val = t4_read_reg(sc, edata->addr);
8187 		else if (edata->size == 8)
8188 			edata->val = t4_read_reg64(sc, edata->addr);
8189 		else
8190 			return (EINVAL);
8191 
8192 		break;
8193 	}
8194 	case CHELSIO_T4_SETREG: {
8195 		struct t4_reg *edata = (struct t4_reg *)data;
8196 
8197 		if ((edata->addr & 0x3) != 0 || edata->addr >= sc->mmio_len)
8198 			return (EFAULT);
8199 
8200 		if (edata->size == 4) {
8201 			if (edata->val & 0xffffffff00000000)
8202 				return (EINVAL);
8203 			t4_write_reg(sc, edata->addr, (uint32_t) edata->val);
8204 		} else if (edata->size == 8)
8205 			t4_write_reg64(sc, edata->addr, edata->val);
8206 		else
8207 			return (EINVAL);
8208 		break;
8209 	}
8210 	case CHELSIO_T4_REGDUMP: {
8211 		struct t4_regdump *regs = (struct t4_regdump *)data;
8212 		int reglen = is_t4(sc) ? T4_REGDUMP_SIZE : T5_REGDUMP_SIZE;
8213 		uint8_t *buf;
8214 
8215 		if (regs->len < reglen) {
8216 			regs->len = reglen; /* hint to the caller */
8217 			return (ENOBUFS);
8218 		}
8219 
8220 		regs->len = reglen;
8221 		buf = malloc(reglen, M_CXGBE, M_WAITOK | M_ZERO);
8222 		t4_get_regs(sc, regs, buf);
8223 		rc = copyout(buf, regs->data, reglen);
8224 		free(buf, M_CXGBE);
8225 		break;
8226 	}
8227 	case CHELSIO_T4_GET_FILTER_MODE:
8228 		rc = get_filter_mode(sc, (uint32_t *)data);
8229 		break;
8230 	case CHELSIO_T4_SET_FILTER_MODE:
8231 		rc = set_filter_mode(sc, *(uint32_t *)data);
8232 		break;
8233 	case CHELSIO_T4_GET_FILTER:
8234 		rc = get_filter(sc, (struct t4_filter *)data);
8235 		break;
8236 	case CHELSIO_T4_SET_FILTER:
8237 		rc = set_filter(sc, (struct t4_filter *)data);
8238 		break;
8239 	case CHELSIO_T4_DEL_FILTER:
8240 		rc = del_filter(sc, (struct t4_filter *)data);
8241 		break;
8242 	case CHELSIO_T4_GET_SGE_CONTEXT:
8243 		rc = get_sge_context(sc, (struct t4_sge_context *)data);
8244 		break;
8245 	case CHELSIO_T4_LOAD_FW:
8246 		rc = load_fw(sc, (struct t4_data *)data);
8247 		break;
8248 	case CHELSIO_T4_GET_MEM:
8249 		rc = read_card_mem(sc, 2, (struct t4_mem_range *)data);
8250 		break;
8251 	case CHELSIO_T4_GET_I2C:
8252 		rc = read_i2c(sc, (struct t4_i2c_data *)data);
8253 		break;
8254 	case CHELSIO_T4_CLEAR_STATS: {
8255 		int i;
8256 		u_int port_id = *(uint32_t *)data;
8257 		struct port_info *pi;
8258 
8259 		if (port_id >= sc->params.nports)
8260 			return (EINVAL);
8261 		pi = sc->port[port_id];
8262 
8263 		/* MAC stats */
8264 		t4_clr_port_stats(sc, pi->tx_chan);
8265 		pi->tx_parse_error = 0;
8266 
8267 		if (pi->flags & PORT_INIT_DONE) {
8268 			struct sge_rxq *rxq;
8269 			struct sge_txq *txq;
8270 			struct sge_wrq *wrq;
8271 
8272 			for_each_rxq(pi, i, rxq) {
8273 #if defined(INET) || defined(INET6)
8274 				rxq->lro.lro_queued = 0;
8275 				rxq->lro.lro_flushed = 0;
8276 #endif
8277 				rxq->rxcsum = 0;
8278 				rxq->vlan_extraction = 0;
8279 			}
8280 
8281 			for_each_txq(pi, i, txq) {
8282 				txq->txcsum = 0;
8283 				txq->tso_wrs = 0;
8284 				txq->vlan_insertion = 0;
8285 				txq->imm_wrs = 0;
8286 				txq->sgl_wrs = 0;
8287 				txq->txpkt_wrs = 0;
8288 				txq->txpkts0_wrs = 0;
8289 				txq->txpkts1_wrs = 0;
8290 				txq->txpkts0_pkts = 0;
8291 				txq->txpkts1_pkts = 0;
8292 				mp_ring_reset_stats(txq->r);
8293 			}
8294 
8295 #ifdef TCP_OFFLOAD
8296 			/* nothing to clear for each ofld_rxq */
8297 
8298 			for_each_ofld_txq(pi, i, wrq) {
8299 				wrq->tx_wrs_direct = 0;
8300 				wrq->tx_wrs_copied = 0;
8301 			}
8302 #endif
8303 			wrq = &sc->sge.ctrlq[pi->port_id];
8304 			wrq->tx_wrs_direct = 0;
8305 			wrq->tx_wrs_copied = 0;
8306 		}
8307 		break;
8308 	}
8309 	case CHELSIO_T4_SCHED_CLASS:
8310 		rc = set_sched_class(sc, (struct t4_sched_params *)data);
8311 		break;
8312 	case CHELSIO_T4_SCHED_QUEUE:
8313 		rc = set_sched_queue(sc, (struct t4_sched_queue *)data);
8314 		break;
8315 	case CHELSIO_T4_GET_TRACER:
8316 		rc = t4_get_tracer(sc, (struct t4_tracer *)data);
8317 		break;
8318 	case CHELSIO_T4_SET_TRACER:
8319 		rc = t4_set_tracer(sc, (struct t4_tracer *)data);
8320 		break;
8321 	default:
8322 		rc = EINVAL;
8323 	}
8324 
8325 	return (rc);
8326 }
8327 
8328 #ifdef TCP_OFFLOAD
8329 void
8330 t4_iscsi_init(struct ifnet *ifp, unsigned int tag_mask,
8331     const unsigned int *pgsz_order)
8332 {
8333 	struct port_info *pi = ifp->if_softc;
8334 	struct adapter *sc = pi->adapter;
8335 
8336 	t4_write_reg(sc, A_ULP_RX_ISCSI_TAGMASK, tag_mask);
8337 	t4_write_reg(sc, A_ULP_RX_ISCSI_PSZ, V_HPZ0(pgsz_order[0]) |
8338 		V_HPZ1(pgsz_order[1]) | V_HPZ2(pgsz_order[2]) |
8339 		V_HPZ3(pgsz_order[3]));
8340 }
8341 
8342 static int
8343 toe_capability(struct port_info *pi, int enable)
8344 {
8345 	int rc;
8346 	struct adapter *sc = pi->adapter;
8347 
8348 	ASSERT_SYNCHRONIZED_OP(sc);
8349 
8350 	if (!is_offload(sc))
8351 		return (ENODEV);
8352 
8353 	if (enable) {
8354 		/*
8355 		 * We need the port's queues around so that we're able to send
8356 		 * and receive CPLs to/from the TOE even if the ifnet for this
8357 		 * port has never been UP'd administratively.
8358 		 */
8359 		if (!(pi->flags & PORT_INIT_DONE)) {
8360 			rc = cxgbe_init_synchronized(pi);
8361 			if (rc)
8362 				return (rc);
8363 		}
8364 
8365 		if (isset(&sc->offload_map, pi->port_id))
8366 			return (0);
8367 
8368 		if (!uld_active(sc, ULD_TOM)) {
8369 			rc = t4_activate_uld(sc, ULD_TOM);
8370 			if (rc == EAGAIN) {
8371 				log(LOG_WARNING,
8372 				    "You must kldload t4_tom.ko before trying "
8373 				    "to enable TOE on a cxgbe interface.\n");
8374 			}
8375 			if (rc != 0)
8376 				return (rc);
8377 			KASSERT(sc->tom_softc != NULL,
8378 			    ("%s: TOM activated but softc NULL", __func__));
8379 			KASSERT(uld_active(sc, ULD_TOM),
8380 			    ("%s: TOM activated but flag not set", __func__));
8381 		}
8382 
8383 		/* Activate iWARP and iSCSI too, if the modules are loaded. */
8384 		if (!uld_active(sc, ULD_IWARP))
8385 			(void) t4_activate_uld(sc, ULD_IWARP);
8386 		if (!uld_active(sc, ULD_ISCSI))
8387 			(void) t4_activate_uld(sc, ULD_ISCSI);
8388 
8389 		setbit(&sc->offload_map, pi->port_id);
8390 	} else {
8391 		if (!isset(&sc->offload_map, pi->port_id))
8392 			return (0);
8393 
8394 		KASSERT(uld_active(sc, ULD_TOM),
8395 		    ("%s: TOM never initialized?", __func__));
8396 		clrbit(&sc->offload_map, pi->port_id);
8397 	}
8398 
8399 	return (0);
8400 }
8401 
8402 /*
8403  * Add an upper layer driver to the global list.
8404  */
8405 int
8406 t4_register_uld(struct uld_info *ui)
8407 {
8408 	int rc = 0;
8409 	struct uld_info *u;
8410 
8411 	sx_xlock(&t4_uld_list_lock);
8412 	SLIST_FOREACH(u, &t4_uld_list, link) {
8413 	    if (u->uld_id == ui->uld_id) {
8414 		    rc = EEXIST;
8415 		    goto done;
8416 	    }
8417 	}
8418 
8419 	SLIST_INSERT_HEAD(&t4_uld_list, ui, link);
8420 	ui->refcount = 0;
8421 done:
8422 	sx_xunlock(&t4_uld_list_lock);
8423 	return (rc);
8424 }
8425 
8426 int
8427 t4_unregister_uld(struct uld_info *ui)
8428 {
8429 	int rc = EINVAL;
8430 	struct uld_info *u;
8431 
8432 	sx_xlock(&t4_uld_list_lock);
8433 
8434 	SLIST_FOREACH(u, &t4_uld_list, link) {
8435 	    if (u == ui) {
8436 		    if (ui->refcount > 0) {
8437 			    rc = EBUSY;
8438 			    goto done;
8439 		    }
8440 
8441 		    SLIST_REMOVE(&t4_uld_list, ui, uld_info, link);
8442 		    rc = 0;
8443 		    goto done;
8444 	    }
8445 	}
8446 done:
8447 	sx_xunlock(&t4_uld_list_lock);
8448 	return (rc);
8449 }
8450 
8451 int
8452 t4_activate_uld(struct adapter *sc, int id)
8453 {
8454 	int rc;
8455 	struct uld_info *ui;
8456 
8457 	ASSERT_SYNCHRONIZED_OP(sc);
8458 
8459 	if (id < 0 || id > ULD_MAX)
8460 		return (EINVAL);
8461 	rc = EAGAIN;	/* kldoad the module with this ULD and try again. */
8462 
8463 	sx_slock(&t4_uld_list_lock);
8464 
8465 	SLIST_FOREACH(ui, &t4_uld_list, link) {
8466 		if (ui->uld_id == id) {
8467 			if (!(sc->flags & FULL_INIT_DONE)) {
8468 				rc = adapter_full_init(sc);
8469 				if (rc != 0)
8470 					break;
8471 			}
8472 
8473 			rc = ui->activate(sc);
8474 			if (rc == 0) {
8475 				setbit(&sc->active_ulds, id);
8476 				ui->refcount++;
8477 			}
8478 			break;
8479 		}
8480 	}
8481 
8482 	sx_sunlock(&t4_uld_list_lock);
8483 
8484 	return (rc);
8485 }
8486 
8487 int
8488 t4_deactivate_uld(struct adapter *sc, int id)
8489 {
8490 	int rc;
8491 	struct uld_info *ui;
8492 
8493 	ASSERT_SYNCHRONIZED_OP(sc);
8494 
8495 	if (id < 0 || id > ULD_MAX)
8496 		return (EINVAL);
8497 	rc = ENXIO;
8498 
8499 	sx_slock(&t4_uld_list_lock);
8500 
8501 	SLIST_FOREACH(ui, &t4_uld_list, link) {
8502 		if (ui->uld_id == id) {
8503 			rc = ui->deactivate(sc);
8504 			if (rc == 0) {
8505 				clrbit(&sc->active_ulds, id);
8506 				ui->refcount--;
8507 			}
8508 			break;
8509 		}
8510 	}
8511 
8512 	sx_sunlock(&t4_uld_list_lock);
8513 
8514 	return (rc);
8515 }
8516 
8517 int
8518 uld_active(struct adapter *sc, int uld_id)
8519 {
8520 
8521 	MPASS(uld_id >= 0 && uld_id <= ULD_MAX);
8522 
8523 	return (isset(&sc->active_ulds, uld_id));
8524 }
8525 #endif
8526 
8527 /*
8528  * Come up with reasonable defaults for some of the tunables, provided they're
8529  * not set by the user (in which case we'll use the values as is).
8530  */
8531 static void
8532 tweak_tunables(void)
8533 {
8534 	int nc = mp_ncpus;	/* our snapshot of the number of CPUs */
8535 
8536 	if (t4_ntxq10g < 1) {
8537 #ifdef RSS
8538 		t4_ntxq10g = rss_getnumbuckets();
8539 #else
8540 		t4_ntxq10g = min(nc, NTXQ_10G);
8541 #endif
8542 	}
8543 
8544 	if (t4_ntxq1g < 1) {
8545 #ifdef RSS
8546 		/* XXX: way too many for 1GbE? */
8547 		t4_ntxq1g = rss_getnumbuckets();
8548 #else
8549 		t4_ntxq1g = min(nc, NTXQ_1G);
8550 #endif
8551 	}
8552 
8553 	if (t4_nrxq10g < 1) {
8554 #ifdef RSS
8555 		t4_nrxq10g = rss_getnumbuckets();
8556 #else
8557 		t4_nrxq10g = min(nc, NRXQ_10G);
8558 #endif
8559 	}
8560 
8561 	if (t4_nrxq1g < 1) {
8562 #ifdef RSS
8563 		/* XXX: way too many for 1GbE? */
8564 		t4_nrxq1g = rss_getnumbuckets();
8565 #else
8566 		t4_nrxq1g = min(nc, NRXQ_1G);
8567 #endif
8568 	}
8569 
8570 #ifdef TCP_OFFLOAD
8571 	if (t4_nofldtxq10g < 1)
8572 		t4_nofldtxq10g = min(nc, NOFLDTXQ_10G);
8573 
8574 	if (t4_nofldtxq1g < 1)
8575 		t4_nofldtxq1g = min(nc, NOFLDTXQ_1G);
8576 
8577 	if (t4_nofldrxq10g < 1)
8578 		t4_nofldrxq10g = min(nc, NOFLDRXQ_10G);
8579 
8580 	if (t4_nofldrxq1g < 1)
8581 		t4_nofldrxq1g = min(nc, NOFLDRXQ_1G);
8582 
8583 	if (t4_toecaps_allowed == -1)
8584 		t4_toecaps_allowed = FW_CAPS_CONFIG_TOE;
8585 #else
8586 	if (t4_toecaps_allowed == -1)
8587 		t4_toecaps_allowed = 0;
8588 #endif
8589 
8590 #ifdef DEV_NETMAP
8591 	if (t4_nnmtxq10g < 1)
8592 		t4_nnmtxq10g = min(nc, NNMTXQ_10G);
8593 
8594 	if (t4_nnmtxq1g < 1)
8595 		t4_nnmtxq1g = min(nc, NNMTXQ_1G);
8596 
8597 	if (t4_nnmrxq10g < 1)
8598 		t4_nnmrxq10g = min(nc, NNMRXQ_10G);
8599 
8600 	if (t4_nnmrxq1g < 1)
8601 		t4_nnmrxq1g = min(nc, NNMRXQ_1G);
8602 #endif
8603 
8604 	if (t4_tmr_idx_10g < 0 || t4_tmr_idx_10g >= SGE_NTIMERS)
8605 		t4_tmr_idx_10g = TMR_IDX_10G;
8606 
8607 	if (t4_pktc_idx_10g < -1 || t4_pktc_idx_10g >= SGE_NCOUNTERS)
8608 		t4_pktc_idx_10g = PKTC_IDX_10G;
8609 
8610 	if (t4_tmr_idx_1g < 0 || t4_tmr_idx_1g >= SGE_NTIMERS)
8611 		t4_tmr_idx_1g = TMR_IDX_1G;
8612 
8613 	if (t4_pktc_idx_1g < -1 || t4_pktc_idx_1g >= SGE_NCOUNTERS)
8614 		t4_pktc_idx_1g = PKTC_IDX_1G;
8615 
8616 	if (t4_qsize_txq < 128)
8617 		t4_qsize_txq = 128;
8618 
8619 	if (t4_qsize_rxq < 128)
8620 		t4_qsize_rxq = 128;
8621 	while (t4_qsize_rxq & 7)
8622 		t4_qsize_rxq++;
8623 
8624 	t4_intr_types &= INTR_MSIX | INTR_MSI | INTR_INTX;
8625 }
8626 
8627 static struct sx mlu;	/* mod load unload */
8628 SX_SYSINIT(cxgbe_mlu, &mlu, "cxgbe mod load/unload");
8629 
8630 static int
8631 mod_event(module_t mod, int cmd, void *arg)
8632 {
8633 	int rc = 0;
8634 	static int loaded = 0;
8635 
8636 	switch (cmd) {
8637 	case MOD_LOAD:
8638 		sx_xlock(&mlu);
8639 		if (loaded++ == 0) {
8640 			t4_sge_modload();
8641 			sx_init(&t4_list_lock, "T4/T5 adapters");
8642 			SLIST_INIT(&t4_list);
8643 #ifdef TCP_OFFLOAD
8644 			sx_init(&t4_uld_list_lock, "T4/T5 ULDs");
8645 			SLIST_INIT(&t4_uld_list);
8646 #endif
8647 			t4_tracer_modload();
8648 			tweak_tunables();
8649 		}
8650 		sx_xunlock(&mlu);
8651 		break;
8652 
8653 	case MOD_UNLOAD:
8654 		sx_xlock(&mlu);
8655 		if (--loaded == 0) {
8656 			int tries;
8657 
8658 			sx_slock(&t4_list_lock);
8659 			if (!SLIST_EMPTY(&t4_list)) {
8660 				rc = EBUSY;
8661 				sx_sunlock(&t4_list_lock);
8662 				goto done_unload;
8663 			}
8664 #ifdef TCP_OFFLOAD
8665 			sx_slock(&t4_uld_list_lock);
8666 			if (!SLIST_EMPTY(&t4_uld_list)) {
8667 				rc = EBUSY;
8668 				sx_sunlock(&t4_uld_list_lock);
8669 				sx_sunlock(&t4_list_lock);
8670 				goto done_unload;
8671 			}
8672 #endif
8673 			tries = 0;
8674 			while (tries++ < 5 && t4_sge_extfree_refs() != 0) {
8675 				uprintf("%ju clusters with custom free routine "
8676 				    "still is use.\n", t4_sge_extfree_refs());
8677 				pause("t4unload", 2 * hz);
8678 			}
8679 #ifdef TCP_OFFLOAD
8680 			sx_sunlock(&t4_uld_list_lock);
8681 #endif
8682 			sx_sunlock(&t4_list_lock);
8683 
8684 			if (t4_sge_extfree_refs() == 0) {
8685 				t4_tracer_modunload();
8686 #ifdef TCP_OFFLOAD
8687 				sx_destroy(&t4_uld_list_lock);
8688 #endif
8689 				sx_destroy(&t4_list_lock);
8690 				t4_sge_modunload();
8691 				loaded = 0;
8692 			} else {
8693 				rc = EBUSY;
8694 				loaded++;	/* undo earlier decrement */
8695 			}
8696 		}
8697 done_unload:
8698 		sx_xunlock(&mlu);
8699 		break;
8700 	}
8701 
8702 	return (rc);
8703 }
8704 
8705 static devclass_t t4_devclass, t5_devclass;
8706 static devclass_t cxgbe_devclass, cxl_devclass;
8707 
8708 DRIVER_MODULE(t4nex, pci, t4_driver, t4_devclass, mod_event, 0);
8709 MODULE_VERSION(t4nex, 1);
8710 MODULE_DEPEND(t4nex, firmware, 1, 1, 1);
8711 #ifdef DEV_NETMAP
8712 MODULE_DEPEND(t4nex, netmap, 1, 1, 1);
8713 #endif /* DEV_NETMAP */
8714 
8715 
8716 DRIVER_MODULE(t5nex, pci, t5_driver, t5_devclass, mod_event, 0);
8717 MODULE_VERSION(t5nex, 1);
8718 MODULE_DEPEND(t5nex, firmware, 1, 1, 1);
8719 #ifdef DEV_NETMAP
8720 MODULE_DEPEND(t5nex, netmap, 1, 1, 1);
8721 #endif /* DEV_NETMAP */
8722 
8723 DRIVER_MODULE(cxgbe, t4nex, cxgbe_driver, cxgbe_devclass, 0, 0);
8724 MODULE_VERSION(cxgbe, 1);
8725 
8726 DRIVER_MODULE(cxl, t5nex, cxl_driver, cxl_devclass, 0, 0);
8727 MODULE_VERSION(cxl, 1);
8728