xref: /freebsd/sys/dev/cxgbe/t4_main.c (revision 69718b786d3943ea9a99eeeb5f5f6162f11c78b7)
1 /*-
2  * Copyright (c) 2011 Chelsio Communications, Inc.
3  * All rights reserved.
4  * Written by: Navdeep Parhar <np@FreeBSD.org>
5  *
6  * Redistribution and use in source and binary forms, with or without
7  * modification, are permitted provided that the following conditions
8  * are met:
9  * 1. Redistributions of source code must retain the above copyright
10  *    notice, this list of conditions and the following disclaimer.
11  * 2. Redistributions in binary form must reproduce the above copyright
12  *    notice, this list of conditions and the following disclaimer in the
13  *    documentation and/or other materials provided with the distribution.
14  *
15  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
16  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
25  * SUCH DAMAGE.
26  */
27 
28 #include <sys/cdefs.h>
29 __FBSDID("$FreeBSD$");
30 
31 #include "opt_ddb.h"
32 #include "opt_inet.h"
33 #include "opt_inet6.h"
34 #include "opt_rss.h"
35 
36 #include <sys/param.h>
37 #include <sys/conf.h>
38 #include <sys/priv.h>
39 #include <sys/kernel.h>
40 #include <sys/bus.h>
41 #include <sys/module.h>
42 #include <sys/malloc.h>
43 #include <sys/queue.h>
44 #include <sys/taskqueue.h>
45 #include <sys/pciio.h>
46 #include <dev/pci/pcireg.h>
47 #include <dev/pci/pcivar.h>
48 #include <dev/pci/pci_private.h>
49 #include <sys/firmware.h>
50 #include <sys/sbuf.h>
51 #include <sys/smp.h>
52 #include <sys/socket.h>
53 #include <sys/sockio.h>
54 #include <sys/sysctl.h>
55 #include <net/ethernet.h>
56 #include <net/if.h>
57 #include <net/if_types.h>
58 #include <net/if_dl.h>
59 #include <net/if_vlan_var.h>
60 #ifdef RSS
61 #include <net/rss_config.h>
62 #endif
63 #if defined(__i386__) || defined(__amd64__)
64 #include <vm/vm.h>
65 #include <vm/pmap.h>
66 #endif
67 #ifdef DDB
68 #include <ddb/ddb.h>
69 #include <ddb/db_lex.h>
70 #endif
71 
72 #include "common/common.h"
73 #include "common/t4_msg.h"
74 #include "common/t4_regs.h"
75 #include "common/t4_regs_values.h"
76 #include "t4_ioctl.h"
77 #include "t4_l2t.h"
78 #include "t4_mp_ring.h"
79 #include "t4_if.h"
80 
81 /* T4 bus driver interface */
82 static int t4_probe(device_t);
83 static int t4_attach(device_t);
84 static int t4_detach(device_t);
85 static int t4_ready(device_t);
86 static int t4_read_port_device(device_t, int, device_t *);
87 static device_method_t t4_methods[] = {
88 	DEVMETHOD(device_probe,		t4_probe),
89 	DEVMETHOD(device_attach,	t4_attach),
90 	DEVMETHOD(device_detach,	t4_detach),
91 
92 	DEVMETHOD(t4_is_main_ready,	t4_ready),
93 	DEVMETHOD(t4_read_port_device,	t4_read_port_device),
94 
95 	DEVMETHOD_END
96 };
97 static driver_t t4_driver = {
98 	"t4nex",
99 	t4_methods,
100 	sizeof(struct adapter)
101 };
102 
103 
104 /* T4 port (cxgbe) interface */
105 static int cxgbe_probe(device_t);
106 static int cxgbe_attach(device_t);
107 static int cxgbe_detach(device_t);
108 device_method_t cxgbe_methods[] = {
109 	DEVMETHOD(device_probe,		cxgbe_probe),
110 	DEVMETHOD(device_attach,	cxgbe_attach),
111 	DEVMETHOD(device_detach,	cxgbe_detach),
112 	{ 0, 0 }
113 };
114 static driver_t cxgbe_driver = {
115 	"cxgbe",
116 	cxgbe_methods,
117 	sizeof(struct port_info)
118 };
119 
120 /* T4 VI (vcxgbe) interface */
121 static int vcxgbe_probe(device_t);
122 static int vcxgbe_attach(device_t);
123 static int vcxgbe_detach(device_t);
124 static device_method_t vcxgbe_methods[] = {
125 	DEVMETHOD(device_probe,		vcxgbe_probe),
126 	DEVMETHOD(device_attach,	vcxgbe_attach),
127 	DEVMETHOD(device_detach,	vcxgbe_detach),
128 	{ 0, 0 }
129 };
130 static driver_t vcxgbe_driver = {
131 	"vcxgbe",
132 	vcxgbe_methods,
133 	sizeof(struct vi_info)
134 };
135 
136 static d_ioctl_t t4_ioctl;
137 
138 static struct cdevsw t4_cdevsw = {
139        .d_version = D_VERSION,
140        .d_ioctl = t4_ioctl,
141        .d_name = "t4nex",
142 };
143 
144 /* T5 bus driver interface */
145 static int t5_probe(device_t);
146 static device_method_t t5_methods[] = {
147 	DEVMETHOD(device_probe,		t5_probe),
148 	DEVMETHOD(device_attach,	t4_attach),
149 	DEVMETHOD(device_detach,	t4_detach),
150 
151 	DEVMETHOD(t4_is_main_ready,	t4_ready),
152 	DEVMETHOD(t4_read_port_device,	t4_read_port_device),
153 
154 	DEVMETHOD_END
155 };
156 static driver_t t5_driver = {
157 	"t5nex",
158 	t5_methods,
159 	sizeof(struct adapter)
160 };
161 
162 
163 /* T5 port (cxl) interface */
164 static driver_t cxl_driver = {
165 	"cxl",
166 	cxgbe_methods,
167 	sizeof(struct port_info)
168 };
169 
170 /* T5 VI (vcxl) interface */
171 static driver_t vcxl_driver = {
172 	"vcxl",
173 	vcxgbe_methods,
174 	sizeof(struct vi_info)
175 };
176 
177 /* T6 bus driver interface */
178 static int t6_probe(device_t);
179 static device_method_t t6_methods[] = {
180 	DEVMETHOD(device_probe,		t6_probe),
181 	DEVMETHOD(device_attach,	t4_attach),
182 	DEVMETHOD(device_detach,	t4_detach),
183 
184 	DEVMETHOD(t4_is_main_ready,	t4_ready),
185 	DEVMETHOD(t4_read_port_device,	t4_read_port_device),
186 
187 	DEVMETHOD_END
188 };
189 static driver_t t6_driver = {
190 	"t6nex",
191 	t6_methods,
192 	sizeof(struct adapter)
193 };
194 
195 
196 /* T6 port (cc) interface */
197 static driver_t cc_driver = {
198 	"cc",
199 	cxgbe_methods,
200 	sizeof(struct port_info)
201 };
202 
203 /* T6 VI (vcc) interface */
204 static driver_t vcc_driver = {
205 	"vcc",
206 	vcxgbe_methods,
207 	sizeof(struct vi_info)
208 };
209 
210 /* ifnet + media interface */
211 static void cxgbe_init(void *);
212 static int cxgbe_ioctl(struct ifnet *, unsigned long, caddr_t);
213 static int cxgbe_transmit(struct ifnet *, struct mbuf *);
214 static void cxgbe_qflush(struct ifnet *);
215 static int cxgbe_media_change(struct ifnet *);
216 static void cxgbe_media_status(struct ifnet *, struct ifmediareq *);
217 
218 MALLOC_DEFINE(M_CXGBE, "cxgbe", "Chelsio T4/T5 Ethernet driver and services");
219 
220 /*
221  * Correct lock order when you need to acquire multiple locks is t4_list_lock,
222  * then ADAPTER_LOCK, then t4_uld_list_lock.
223  */
224 static struct sx t4_list_lock;
225 SLIST_HEAD(, adapter) t4_list;
226 #ifdef TCP_OFFLOAD
227 static struct sx t4_uld_list_lock;
228 SLIST_HEAD(, uld_info) t4_uld_list;
229 #endif
230 
231 /*
232  * Tunables.  See tweak_tunables() too.
233  *
234  * Each tunable is set to a default value here if it's known at compile-time.
235  * Otherwise it is set to -1 as an indication to tweak_tunables() that it should
236  * provide a reasonable default when the driver is loaded.
237  *
238  * Tunables applicable to both T4 and T5 are under hw.cxgbe.  Those specific to
239  * T5 are under hw.cxl.
240  */
241 
242 /*
243  * Number of queues for tx and rx, 10G and 1G, NIC and offload.
244  */
245 #define NTXQ_10G 16
246 int t4_ntxq10g = -1;
247 TUNABLE_INT("hw.cxgbe.ntxq10g", &t4_ntxq10g);
248 
249 #define NRXQ_10G 8
250 int t4_nrxq10g = -1;
251 TUNABLE_INT("hw.cxgbe.nrxq10g", &t4_nrxq10g);
252 
253 #define NTXQ_1G 4
254 int t4_ntxq1g = -1;
255 TUNABLE_INT("hw.cxgbe.ntxq1g", &t4_ntxq1g);
256 
257 #define NRXQ_1G 2
258 int t4_nrxq1g = -1;
259 TUNABLE_INT("hw.cxgbe.nrxq1g", &t4_nrxq1g);
260 
261 #define NTXQ_VI 1
262 static int t4_ntxq_vi = -1;
263 TUNABLE_INT("hw.cxgbe.ntxq_vi", &t4_ntxq_vi);
264 
265 #define NRXQ_VI 1
266 static int t4_nrxq_vi = -1;
267 TUNABLE_INT("hw.cxgbe.nrxq_vi", &t4_nrxq_vi);
268 
269 static int t4_rsrv_noflowq = 0;
270 TUNABLE_INT("hw.cxgbe.rsrv_noflowq", &t4_rsrv_noflowq);
271 
272 #ifdef TCP_OFFLOAD
273 #define NOFLDTXQ_10G 8
274 static int t4_nofldtxq10g = -1;
275 TUNABLE_INT("hw.cxgbe.nofldtxq10g", &t4_nofldtxq10g);
276 
277 #define NOFLDRXQ_10G 2
278 static int t4_nofldrxq10g = -1;
279 TUNABLE_INT("hw.cxgbe.nofldrxq10g", &t4_nofldrxq10g);
280 
281 #define NOFLDTXQ_1G 2
282 static int t4_nofldtxq1g = -1;
283 TUNABLE_INT("hw.cxgbe.nofldtxq1g", &t4_nofldtxq1g);
284 
285 #define NOFLDRXQ_1G 1
286 static int t4_nofldrxq1g = -1;
287 TUNABLE_INT("hw.cxgbe.nofldrxq1g", &t4_nofldrxq1g);
288 
289 #define NOFLDTXQ_VI 1
290 static int t4_nofldtxq_vi = -1;
291 TUNABLE_INT("hw.cxgbe.nofldtxq_vi", &t4_nofldtxq_vi);
292 
293 #define NOFLDRXQ_VI 1
294 static int t4_nofldrxq_vi = -1;
295 TUNABLE_INT("hw.cxgbe.nofldrxq_vi", &t4_nofldrxq_vi);
296 #endif
297 
298 #ifdef DEV_NETMAP
299 #define NNMTXQ_VI 2
300 static int t4_nnmtxq_vi = -1;
301 TUNABLE_INT("hw.cxgbe.nnmtxq_vi", &t4_nnmtxq_vi);
302 
303 #define NNMRXQ_VI 2
304 static int t4_nnmrxq_vi = -1;
305 TUNABLE_INT("hw.cxgbe.nnmrxq_vi", &t4_nnmrxq_vi);
306 #endif
307 
308 /*
309  * Holdoff parameters for 10G and 1G ports.
310  */
311 #define TMR_IDX_10G 1
312 int t4_tmr_idx_10g = TMR_IDX_10G;
313 TUNABLE_INT("hw.cxgbe.holdoff_timer_idx_10G", &t4_tmr_idx_10g);
314 
315 #define PKTC_IDX_10G (-1)
316 int t4_pktc_idx_10g = PKTC_IDX_10G;
317 TUNABLE_INT("hw.cxgbe.holdoff_pktc_idx_10G", &t4_pktc_idx_10g);
318 
319 #define TMR_IDX_1G 1
320 int t4_tmr_idx_1g = TMR_IDX_1G;
321 TUNABLE_INT("hw.cxgbe.holdoff_timer_idx_1G", &t4_tmr_idx_1g);
322 
323 #define PKTC_IDX_1G (-1)
324 int t4_pktc_idx_1g = PKTC_IDX_1G;
325 TUNABLE_INT("hw.cxgbe.holdoff_pktc_idx_1G", &t4_pktc_idx_1g);
326 
327 /*
328  * Size (# of entries) of each tx and rx queue.
329  */
330 unsigned int t4_qsize_txq = TX_EQ_QSIZE;
331 TUNABLE_INT("hw.cxgbe.qsize_txq", &t4_qsize_txq);
332 
333 unsigned int t4_qsize_rxq = RX_IQ_QSIZE;
334 TUNABLE_INT("hw.cxgbe.qsize_rxq", &t4_qsize_rxq);
335 
336 /*
337  * Interrupt types allowed (bits 0, 1, 2 = INTx, MSI, MSI-X respectively).
338  */
339 int t4_intr_types = INTR_MSIX | INTR_MSI | INTR_INTX;
340 TUNABLE_INT("hw.cxgbe.interrupt_types", &t4_intr_types);
341 
342 /*
343  * Configuration file.
344  */
345 #define DEFAULT_CF	"default"
346 #define FLASH_CF	"flash"
347 #define UWIRE_CF	"uwire"
348 #define FPGA_CF		"fpga"
349 static char t4_cfg_file[32] = DEFAULT_CF;
350 TUNABLE_STR("hw.cxgbe.config_file", t4_cfg_file, sizeof(t4_cfg_file));
351 
352 /*
353  * PAUSE settings (bit 0, 1 = rx_pause, tx_pause respectively).
354  * rx_pause = 1 to heed incoming PAUSE frames, 0 to ignore them.
355  * tx_pause = 1 to emit PAUSE frames when the rx FIFO reaches its high water
356  *            mark or when signalled to do so, 0 to never emit PAUSE.
357  */
358 static int t4_pause_settings = PAUSE_TX | PAUSE_RX;
359 TUNABLE_INT("hw.cxgbe.pause_settings", &t4_pause_settings);
360 
361 /*
362  * Firmware auto-install by driver during attach (0, 1, 2 = prohibited, allowed,
363  * encouraged respectively).
364  */
365 static unsigned int t4_fw_install = 1;
366 TUNABLE_INT("hw.cxgbe.fw_install", &t4_fw_install);
367 
368 /*
369  * ASIC features that will be used.  Disable the ones you don't want so that the
370  * chip resources aren't wasted on features that will not be used.
371  */
372 static int t4_nbmcaps_allowed = 0;
373 TUNABLE_INT("hw.cxgbe.nbmcaps_allowed", &t4_nbmcaps_allowed);
374 
375 static int t4_linkcaps_allowed = 0;	/* No DCBX, PPP, etc. by default */
376 TUNABLE_INT("hw.cxgbe.linkcaps_allowed", &t4_linkcaps_allowed);
377 
378 static int t4_switchcaps_allowed = FW_CAPS_CONFIG_SWITCH_INGRESS |
379     FW_CAPS_CONFIG_SWITCH_EGRESS;
380 TUNABLE_INT("hw.cxgbe.switchcaps_allowed", &t4_switchcaps_allowed);
381 
382 static int t4_niccaps_allowed = FW_CAPS_CONFIG_NIC;
383 TUNABLE_INT("hw.cxgbe.niccaps_allowed", &t4_niccaps_allowed);
384 
385 static int t4_toecaps_allowed = -1;
386 TUNABLE_INT("hw.cxgbe.toecaps_allowed", &t4_toecaps_allowed);
387 
388 static int t4_rdmacaps_allowed = -1;
389 TUNABLE_INT("hw.cxgbe.rdmacaps_allowed", &t4_rdmacaps_allowed);
390 
391 static int t4_cryptocaps_allowed = 0;
392 TUNABLE_INT("hw.cxgbe.cryptocaps_allowed", &t4_cryptocaps_allowed);
393 
394 static int t4_iscsicaps_allowed = -1;
395 TUNABLE_INT("hw.cxgbe.iscsicaps_allowed", &t4_iscsicaps_allowed);
396 
397 static int t4_fcoecaps_allowed = 0;
398 TUNABLE_INT("hw.cxgbe.fcoecaps_allowed", &t4_fcoecaps_allowed);
399 
400 static int t5_write_combine = 0;
401 TUNABLE_INT("hw.cxl.write_combine", &t5_write_combine);
402 
403 static int t4_num_vis = 1;
404 TUNABLE_INT("hw.cxgbe.num_vis", &t4_num_vis);
405 
406 /* Functions used by extra VIs to obtain unique MAC addresses for each VI. */
407 static int vi_mac_funcs[] = {
408 	FW_VI_FUNC_OFLD,
409 	FW_VI_FUNC_IWARP,
410 	FW_VI_FUNC_OPENISCSI,
411 	FW_VI_FUNC_OPENFCOE,
412 	FW_VI_FUNC_FOISCSI,
413 	FW_VI_FUNC_FOFCOE,
414 };
415 
416 struct intrs_and_queues {
417 	uint16_t intr_type;	/* INTx, MSI, or MSI-X */
418 	uint16_t nirq;		/* Total # of vectors */
419 	uint16_t intr_flags_10g;/* Interrupt flags for each 10G port */
420 	uint16_t intr_flags_1g;	/* Interrupt flags for each 1G port */
421 	uint16_t ntxq10g;	/* # of NIC txq's for each 10G port */
422 	uint16_t nrxq10g;	/* # of NIC rxq's for each 10G port */
423 	uint16_t ntxq1g;	/* # of NIC txq's for each 1G port */
424 	uint16_t nrxq1g;	/* # of NIC rxq's for each 1G port */
425 	uint16_t rsrv_noflowq;	/* Flag whether to reserve queue 0 */
426 	uint16_t nofldtxq10g;	/* # of TOE txq's for each 10G port */
427 	uint16_t nofldrxq10g;	/* # of TOE rxq's for each 10G port */
428 	uint16_t nofldtxq1g;	/* # of TOE txq's for each 1G port */
429 	uint16_t nofldrxq1g;	/* # of TOE rxq's for each 1G port */
430 
431 	/* The vcxgbe/vcxl interfaces use these and not the ones above. */
432 	uint16_t ntxq_vi;	/* # of NIC txq's */
433 	uint16_t nrxq_vi;	/* # of NIC rxq's */
434 	uint16_t nofldtxq_vi;	/* # of TOE txq's */
435 	uint16_t nofldrxq_vi;	/* # of TOE rxq's */
436 	uint16_t nnmtxq_vi;	/* # of netmap txq's */
437 	uint16_t nnmrxq_vi;	/* # of netmap rxq's */
438 };
439 
440 struct filter_entry {
441         uint32_t valid:1;	/* filter allocated and valid */
442         uint32_t locked:1;	/* filter is administratively locked */
443         uint32_t pending:1;	/* filter action is pending firmware reply */
444 	uint32_t smtidx:8;	/* Source MAC Table index for smac */
445 	struct l2t_entry *l2t;	/* Layer Two Table entry for dmac */
446 
447         struct t4_filter_specification fs;
448 };
449 
450 static void setup_memwin(struct adapter *);
451 static void position_memwin(struct adapter *, int, uint32_t);
452 static int rw_via_memwin(struct adapter *, int, uint32_t, uint32_t *, int, int);
453 static inline int read_via_memwin(struct adapter *, int, uint32_t, uint32_t *,
454     int);
455 static inline int write_via_memwin(struct adapter *, int, uint32_t,
456     const uint32_t *, int);
457 static int validate_mem_range(struct adapter *, uint32_t, int);
458 static int fwmtype_to_hwmtype(int);
459 static int validate_mt_off_len(struct adapter *, int, uint32_t, int,
460     uint32_t *);
461 static int fixup_devlog_params(struct adapter *);
462 static int cfg_itype_and_nqueues(struct adapter *, int, int, int,
463     struct intrs_and_queues *);
464 static int prep_firmware(struct adapter *);
465 static int partition_resources(struct adapter *, const struct firmware *,
466     const char *);
467 static int get_params__pre_init(struct adapter *);
468 static int get_params__post_init(struct adapter *);
469 static int set_params__post_init(struct adapter *);
470 static void t4_set_desc(struct adapter *);
471 static void build_medialist(struct port_info *, struct ifmedia *);
472 static int cxgbe_init_synchronized(struct vi_info *);
473 static int cxgbe_uninit_synchronized(struct vi_info *);
474 static void quiesce_txq(struct adapter *, struct sge_txq *);
475 static void quiesce_wrq(struct adapter *, struct sge_wrq *);
476 static void quiesce_iq(struct adapter *, struct sge_iq *);
477 static void quiesce_fl(struct adapter *, struct sge_fl *);
478 static int t4_alloc_irq(struct adapter *, struct irq *, int rid,
479     driver_intr_t *, void *, char *);
480 static int t4_free_irq(struct adapter *, struct irq *);
481 static void get_regs(struct adapter *, struct t4_regdump *, uint8_t *);
482 static void vi_refresh_stats(struct adapter *, struct vi_info *);
483 static void cxgbe_refresh_stats(struct adapter *, struct port_info *);
484 static void cxgbe_tick(void *);
485 static void cxgbe_vlan_config(void *, struct ifnet *, uint16_t);
486 static void cxgbe_sysctls(struct port_info *);
487 static int sysctl_int_array(SYSCTL_HANDLER_ARGS);
488 static int sysctl_bitfield(SYSCTL_HANDLER_ARGS);
489 static int sysctl_btphy(SYSCTL_HANDLER_ARGS);
490 static int sysctl_noflowq(SYSCTL_HANDLER_ARGS);
491 static int sysctl_holdoff_tmr_idx(SYSCTL_HANDLER_ARGS);
492 static int sysctl_holdoff_pktc_idx(SYSCTL_HANDLER_ARGS);
493 static int sysctl_qsize_rxq(SYSCTL_HANDLER_ARGS);
494 static int sysctl_qsize_txq(SYSCTL_HANDLER_ARGS);
495 static int sysctl_pause_settings(SYSCTL_HANDLER_ARGS);
496 static int sysctl_handle_t4_reg64(SYSCTL_HANDLER_ARGS);
497 static int sysctl_temperature(SYSCTL_HANDLER_ARGS);
498 #ifdef SBUF_DRAIN
499 static int sysctl_cctrl(SYSCTL_HANDLER_ARGS);
500 static int sysctl_cim_ibq_obq(SYSCTL_HANDLER_ARGS);
501 static int sysctl_cim_la(SYSCTL_HANDLER_ARGS);
502 static int sysctl_cim_la_t6(SYSCTL_HANDLER_ARGS);
503 static int sysctl_cim_ma_la(SYSCTL_HANDLER_ARGS);
504 static int sysctl_cim_pif_la(SYSCTL_HANDLER_ARGS);
505 static int sysctl_cim_qcfg(SYSCTL_HANDLER_ARGS);
506 static int sysctl_cpl_stats(SYSCTL_HANDLER_ARGS);
507 static int sysctl_ddp_stats(SYSCTL_HANDLER_ARGS);
508 static int sysctl_devlog(SYSCTL_HANDLER_ARGS);
509 static int sysctl_fcoe_stats(SYSCTL_HANDLER_ARGS);
510 static int sysctl_hw_sched(SYSCTL_HANDLER_ARGS);
511 static int sysctl_lb_stats(SYSCTL_HANDLER_ARGS);
512 static int sysctl_linkdnrc(SYSCTL_HANDLER_ARGS);
513 static int sysctl_meminfo(SYSCTL_HANDLER_ARGS);
514 static int sysctl_mps_tcam(SYSCTL_HANDLER_ARGS);
515 static int sysctl_mps_tcam_t6(SYSCTL_HANDLER_ARGS);
516 static int sysctl_path_mtus(SYSCTL_HANDLER_ARGS);
517 static int sysctl_pm_stats(SYSCTL_HANDLER_ARGS);
518 static int sysctl_rdma_stats(SYSCTL_HANDLER_ARGS);
519 static int sysctl_tcp_stats(SYSCTL_HANDLER_ARGS);
520 static int sysctl_tids(SYSCTL_HANDLER_ARGS);
521 static int sysctl_tp_err_stats(SYSCTL_HANDLER_ARGS);
522 static int sysctl_tp_la_mask(SYSCTL_HANDLER_ARGS);
523 static int sysctl_tp_la(SYSCTL_HANDLER_ARGS);
524 static int sysctl_tx_rate(SYSCTL_HANDLER_ARGS);
525 static int sysctl_ulprx_la(SYSCTL_HANDLER_ARGS);
526 static int sysctl_wcwr_stats(SYSCTL_HANDLER_ARGS);
527 static int sysctl_tc_params(SYSCTL_HANDLER_ARGS);
528 #endif
529 #ifdef TCP_OFFLOAD
530 static int sysctl_tp_tick(SYSCTL_HANDLER_ARGS);
531 static int sysctl_tp_dack_timer(SYSCTL_HANDLER_ARGS);
532 static int sysctl_tp_timer(SYSCTL_HANDLER_ARGS);
533 #endif
534 static uint32_t fconf_iconf_to_mode(uint32_t, uint32_t);
535 static uint32_t mode_to_fconf(uint32_t);
536 static uint32_t mode_to_iconf(uint32_t);
537 static int check_fspec_against_fconf_iconf(struct adapter *,
538     struct t4_filter_specification *);
539 static int get_filter_mode(struct adapter *, uint32_t *);
540 static int set_filter_mode(struct adapter *, uint32_t);
541 static inline uint64_t get_filter_hits(struct adapter *, uint32_t);
542 static int get_filter(struct adapter *, struct t4_filter *);
543 static int set_filter(struct adapter *, struct t4_filter *);
544 static int del_filter(struct adapter *, struct t4_filter *);
545 static void clear_filter(struct filter_entry *);
546 static int set_filter_wr(struct adapter *, int);
547 static int del_filter_wr(struct adapter *, int);
548 static int set_tcb_rpl(struct sge_iq *, const struct rss_header *,
549     struct mbuf *);
550 static int get_sge_context(struct adapter *, struct t4_sge_context *);
551 static int load_fw(struct adapter *, struct t4_data *);
552 static int load_cfg(struct adapter *, struct t4_data *);
553 static int read_card_mem(struct adapter *, int, struct t4_mem_range *);
554 static int read_i2c(struct adapter *, struct t4_i2c_data *);
555 #ifdef TCP_OFFLOAD
556 static int toe_capability(struct vi_info *, int);
557 #endif
558 static int mod_event(module_t, int, void *);
559 static int notify_siblings(device_t, int);
560 
561 struct {
562 	uint16_t device;
563 	char *desc;
564 } t4_pciids[] = {
565 	{0xa000, "Chelsio Terminator 4 FPGA"},
566 	{0x4400, "Chelsio T440-dbg"},
567 	{0x4401, "Chelsio T420-CR"},
568 	{0x4402, "Chelsio T422-CR"},
569 	{0x4403, "Chelsio T440-CR"},
570 	{0x4404, "Chelsio T420-BCH"},
571 	{0x4405, "Chelsio T440-BCH"},
572 	{0x4406, "Chelsio T440-CH"},
573 	{0x4407, "Chelsio T420-SO"},
574 	{0x4408, "Chelsio T420-CX"},
575 	{0x4409, "Chelsio T420-BT"},
576 	{0x440a, "Chelsio T404-BT"},
577 	{0x440e, "Chelsio T440-LP-CR"},
578 }, t5_pciids[] = {
579 	{0xb000, "Chelsio Terminator 5 FPGA"},
580 	{0x5400, "Chelsio T580-dbg"},
581 	{0x5401,  "Chelsio T520-CR"},		/* 2 x 10G */
582 	{0x5402,  "Chelsio T522-CR"},		/* 2 x 10G, 2 X 1G */
583 	{0x5403,  "Chelsio T540-CR"},		/* 4 x 10G */
584 	{0x5407,  "Chelsio T520-SO"},		/* 2 x 10G, nomem */
585 	{0x5409,  "Chelsio T520-BT"},		/* 2 x 10GBaseT */
586 	{0x540a,  "Chelsio T504-BT"},		/* 4 x 1G */
587 	{0x540d,  "Chelsio T580-CR"},		/* 2 x 40G */
588 	{0x540e,  "Chelsio T540-LP-CR"},	/* 4 x 10G */
589 	{0x5410,  "Chelsio T580-LP-CR"},	/* 2 x 40G */
590 	{0x5411,  "Chelsio T520-LL-CR"},	/* 2 x 10G */
591 	{0x5412,  "Chelsio T560-CR"},		/* 1 x 40G, 2 x 10G */
592 	{0x5414,  "Chelsio T580-LP-SO-CR"},	/* 2 x 40G, nomem */
593 	{0x5415,  "Chelsio T502-BT"},		/* 2 x 1G */
594 #ifdef notyet
595 	{0x5404,  "Chelsio T520-BCH"},
596 	{0x5405,  "Chelsio T540-BCH"},
597 	{0x5406,  "Chelsio T540-CH"},
598 	{0x5408,  "Chelsio T520-CX"},
599 	{0x540b,  "Chelsio B520-SR"},
600 	{0x540c,  "Chelsio B504-BT"},
601 	{0x540f,  "Chelsio Amsterdam"},
602 	{0x5413,  "Chelsio T580-CHR"},
603 #endif
604 }, t6_pciids[] = {
605 	{0xc006, "Chelsio Terminator 6 FPGA"},	/* T6 PE10K6 FPGA (PF0) */
606 	{0x6401, "Chelsio T6225-CR"},		/* 2 x 10/25G */
607 	{0x6402, "Chelsio T6225-SO-CR"},	/* 2 x 10/25G, nomem */
608 	{0x6407, "Chelsio T62100-LP-CR"},	/* 2 x 40/50/100G */
609 	{0x6408, "Chelsio T62100-SO-CR"},	/* 2 x 40/50/100G, nomem */
610 	{0x640d, "Chelsio T62100-CR"},		/* 2 x 40/50/100G */
611 	{0x6410, "Chelsio T62100-DBG"},		/* 2 x 40/50/100G, debug */
612 };
613 
614 #ifdef TCP_OFFLOAD
615 /*
616  * service_iq() has an iq and needs the fl.  Offset of fl from the iq should be
617  * exactly the same for both rxq and ofld_rxq.
618  */
619 CTASSERT(offsetof(struct sge_ofld_rxq, iq) == offsetof(struct sge_rxq, iq));
620 CTASSERT(offsetof(struct sge_ofld_rxq, fl) == offsetof(struct sge_rxq, fl));
621 #endif
622 CTASSERT(sizeof(struct cluster_metadata) <= CL_METADATA_SIZE);
623 
624 static int
625 t4_probe(device_t dev)
626 {
627 	int i;
628 	uint16_t v = pci_get_vendor(dev);
629 	uint16_t d = pci_get_device(dev);
630 	uint8_t f = pci_get_function(dev);
631 
632 	if (v != PCI_VENDOR_ID_CHELSIO)
633 		return (ENXIO);
634 
635 	/* Attach only to PF0 of the FPGA */
636 	if (d == 0xa000 && f != 0)
637 		return (ENXIO);
638 
639 	for (i = 0; i < nitems(t4_pciids); i++) {
640 		if (d == t4_pciids[i].device) {
641 			device_set_desc(dev, t4_pciids[i].desc);
642 			return (BUS_PROBE_DEFAULT);
643 		}
644 	}
645 
646 	return (ENXIO);
647 }
648 
649 static int
650 t5_probe(device_t dev)
651 {
652 	int i;
653 	uint16_t v = pci_get_vendor(dev);
654 	uint16_t d = pci_get_device(dev);
655 	uint8_t f = pci_get_function(dev);
656 
657 	if (v != PCI_VENDOR_ID_CHELSIO)
658 		return (ENXIO);
659 
660 	/* Attach only to PF0 of the FPGA */
661 	if (d == 0xb000 && f != 0)
662 		return (ENXIO);
663 
664 	for (i = 0; i < nitems(t5_pciids); i++) {
665 		if (d == t5_pciids[i].device) {
666 			device_set_desc(dev, t5_pciids[i].desc);
667 			return (BUS_PROBE_DEFAULT);
668 		}
669 	}
670 
671 	return (ENXIO);
672 }
673 
674 static int
675 t6_probe(device_t dev)
676 {
677 	int i;
678 	uint16_t v = pci_get_vendor(dev);
679 	uint16_t d = pci_get_device(dev);
680 
681 	if (v != PCI_VENDOR_ID_CHELSIO)
682 		return (ENXIO);
683 
684 	for (i = 0; i < nitems(t6_pciids); i++) {
685 		if (d == t6_pciids[i].device) {
686 			device_set_desc(dev, t6_pciids[i].desc);
687 			return (BUS_PROBE_DEFAULT);
688 		}
689 	}
690 
691 	return (ENXIO);
692 }
693 
694 static void
695 t5_attribute_workaround(device_t dev)
696 {
697 	device_t root_port;
698 	uint32_t v;
699 
700 	/*
701 	 * The T5 chips do not properly echo the No Snoop and Relaxed
702 	 * Ordering attributes when replying to a TLP from a Root
703 	 * Port.  As a workaround, find the parent Root Port and
704 	 * disable No Snoop and Relaxed Ordering.  Note that this
705 	 * affects all devices under this root port.
706 	 */
707 	root_port = pci_find_pcie_root_port(dev);
708 	if (root_port == NULL) {
709 		device_printf(dev, "Unable to find parent root port\n");
710 		return;
711 	}
712 
713 	v = pcie_adjust_config(root_port, PCIER_DEVICE_CTL,
714 	    PCIEM_CTL_RELAXED_ORD_ENABLE | PCIEM_CTL_NOSNOOP_ENABLE, 0, 2);
715 	if ((v & (PCIEM_CTL_RELAXED_ORD_ENABLE | PCIEM_CTL_NOSNOOP_ENABLE)) !=
716 	    0)
717 		device_printf(dev, "Disabled No Snoop/Relaxed Ordering on %s\n",
718 		    device_get_nameunit(root_port));
719 }
720 
721 static const struct devnames devnames[] = {
722 	{
723 		.nexus_name = "t4nex",
724 		.ifnet_name = "cxgbe",
725 		.vi_ifnet_name = "vcxgbe",
726 		.pf03_drv_name = "t4iov",
727 		.vf_nexus_name = "t4vf",
728 		.vf_ifnet_name = "cxgbev"
729 	}, {
730 		.nexus_name = "t5nex",
731 		.ifnet_name = "cxl",
732 		.vi_ifnet_name = "vcxl",
733 		.pf03_drv_name = "t5iov",
734 		.vf_nexus_name = "t5vf",
735 		.vf_ifnet_name = "cxlv"
736 	}, {
737 		.nexus_name = "t6nex",
738 		.ifnet_name = "cc",
739 		.vi_ifnet_name = "vcc",
740 		.pf03_drv_name = "t6iov",
741 		.vf_nexus_name = "t6vf",
742 		.vf_ifnet_name = "ccv"
743 	}
744 };
745 
746 void
747 t4_init_devnames(struct adapter *sc)
748 {
749 	int id;
750 
751 	id = chip_id(sc);
752 	if (id >= CHELSIO_T4 && id - CHELSIO_T4 < nitems(devnames))
753 		sc->names = &devnames[id - CHELSIO_T4];
754 	else {
755 		device_printf(sc->dev, "chip id %d is not supported.\n", id);
756 		sc->names = NULL;
757 	}
758 }
759 
760 static int
761 t4_attach(device_t dev)
762 {
763 	struct adapter *sc;
764 	int rc = 0, i, j, n10g, n1g, rqidx, tqidx;
765 	struct make_dev_args mda;
766 	struct intrs_and_queues iaq;
767 	struct sge *s;
768 	uint8_t *buf;
769 #ifdef TCP_OFFLOAD
770 	int ofld_rqidx, ofld_tqidx;
771 #endif
772 #ifdef DEV_NETMAP
773 	int nm_rqidx, nm_tqidx;
774 #endif
775 	int num_vis;
776 
777 	sc = device_get_softc(dev);
778 	sc->dev = dev;
779 	TUNABLE_INT_FETCH("hw.cxgbe.dflags", &sc->debug_flags);
780 
781 	if ((pci_get_device(dev) & 0xff00) == 0x5400)
782 		t5_attribute_workaround(dev);
783 	pci_enable_busmaster(dev);
784 	if (pci_find_cap(dev, PCIY_EXPRESS, &i) == 0) {
785 		uint32_t v;
786 
787 		pci_set_max_read_req(dev, 4096);
788 		v = pci_read_config(dev, i + PCIER_DEVICE_CTL, 2);
789 		v |= PCIEM_CTL_RELAXED_ORD_ENABLE;
790 		pci_write_config(dev, i + PCIER_DEVICE_CTL, v, 2);
791 
792 		sc->params.pci.mps = 128 << ((v & PCIEM_CTL_MAX_PAYLOAD) >> 5);
793 	}
794 
795 	sc->sge_gts_reg = MYPF_REG(A_SGE_PF_GTS);
796 	sc->sge_kdoorbell_reg = MYPF_REG(A_SGE_PF_KDOORBELL);
797 	sc->traceq = -1;
798 	mtx_init(&sc->ifp_lock, sc->ifp_lockname, 0, MTX_DEF);
799 	snprintf(sc->ifp_lockname, sizeof(sc->ifp_lockname), "%s tracer",
800 	    device_get_nameunit(dev));
801 
802 	snprintf(sc->lockname, sizeof(sc->lockname), "%s",
803 	    device_get_nameunit(dev));
804 	mtx_init(&sc->sc_lock, sc->lockname, 0, MTX_DEF);
805 	t4_add_adapter(sc);
806 
807 	mtx_init(&sc->sfl_lock, "starving freelists", 0, MTX_DEF);
808 	TAILQ_INIT(&sc->sfl);
809 	callout_init_mtx(&sc->sfl_callout, &sc->sfl_lock, 0);
810 
811 	mtx_init(&sc->reg_lock, "indirect register access", 0, MTX_DEF);
812 
813 	rc = t4_map_bars_0_and_4(sc);
814 	if (rc != 0)
815 		goto done; /* error message displayed already */
816 
817 	memset(sc->chan_map, 0xff, sizeof(sc->chan_map));
818 
819 	/* Prepare the adapter for operation. */
820 	buf = malloc(PAGE_SIZE, M_CXGBE, M_ZERO | M_WAITOK);
821 	rc = -t4_prep_adapter(sc, buf);
822 	free(buf, M_CXGBE);
823 	if (rc != 0) {
824 		device_printf(dev, "failed to prepare adapter: %d.\n", rc);
825 		goto done;
826 	}
827 
828 	/*
829 	 * This is the real PF# to which we're attaching.  Works from within PCI
830 	 * passthrough environments too, where pci_get_function() could return a
831 	 * different PF# depending on the passthrough configuration.  We need to
832 	 * use the real PF# in all our communication with the firmware.
833 	 */
834 	j = t4_read_reg(sc, A_PL_WHOAMI);
835 	sc->pf = chip_id(sc) <= CHELSIO_T5 ? G_SOURCEPF(j) : G_T6_SOURCEPF(j);
836 	sc->mbox = sc->pf;
837 
838 	t4_init_devnames(sc);
839 	if (sc->names == NULL) {
840 		rc = ENOTSUP;
841 		goto done; /* error message displayed already */
842 	}
843 
844 	/*
845 	 * Do this really early, with the memory windows set up even before the
846 	 * character device.  The userland tool's register i/o and mem read
847 	 * will work even in "recovery mode".
848 	 */
849 	setup_memwin(sc);
850 	if (t4_init_devlog_params(sc, 0) == 0)
851 		fixup_devlog_params(sc);
852 	make_dev_args_init(&mda);
853 	mda.mda_devsw = &t4_cdevsw;
854 	mda.mda_uid = UID_ROOT;
855 	mda.mda_gid = GID_WHEEL;
856 	mda.mda_mode = 0600;
857 	mda.mda_si_drv1 = sc;
858 	rc = make_dev_s(&mda, &sc->cdev, "%s", device_get_nameunit(dev));
859 	if (rc != 0)
860 		device_printf(dev, "failed to create nexus char device: %d.\n",
861 		    rc);
862 
863 	/* Go no further if recovery mode has been requested. */
864 	if (TUNABLE_INT_FETCH("hw.cxgbe.sos", &i) && i != 0) {
865 		device_printf(dev, "recovery mode.\n");
866 		goto done;
867 	}
868 
869 #if defined(__i386__)
870 	if ((cpu_feature & CPUID_CX8) == 0) {
871 		device_printf(dev, "64 bit atomics not available.\n");
872 		rc = ENOTSUP;
873 		goto done;
874 	}
875 #endif
876 
877 	/* Prepare the firmware for operation */
878 	rc = prep_firmware(sc);
879 	if (rc != 0)
880 		goto done; /* error message displayed already */
881 
882 	rc = get_params__post_init(sc);
883 	if (rc != 0)
884 		goto done; /* error message displayed already */
885 
886 	rc = set_params__post_init(sc);
887 	if (rc != 0)
888 		goto done; /* error message displayed already */
889 
890 	rc = t4_map_bar_2(sc);
891 	if (rc != 0)
892 		goto done; /* error message displayed already */
893 
894 	rc = t4_create_dma_tag(sc);
895 	if (rc != 0)
896 		goto done; /* error message displayed already */
897 
898 	/*
899 	 * Number of VIs to create per-port.  The first VI is the "main" regular
900 	 * VI for the port.  The rest are additional virtual interfaces on the
901 	 * same physical port.  Note that the main VI does not have native
902 	 * netmap support but the extra VIs do.
903 	 *
904 	 * Limit the number of VIs per port to the number of available
905 	 * MAC addresses per port.
906 	 */
907 	if (t4_num_vis >= 1)
908 		num_vis = t4_num_vis;
909 	else
910 		num_vis = 1;
911 	if (num_vis > nitems(vi_mac_funcs)) {
912 		num_vis = nitems(vi_mac_funcs);
913 		device_printf(dev, "Number of VIs limited to %d\n", num_vis);
914 	}
915 
916 	/*
917 	 * First pass over all the ports - allocate VIs and initialize some
918 	 * basic parameters like mac address, port type, etc.  We also figure
919 	 * out whether a port is 10G or 1G and use that information when
920 	 * calculating how many interrupts to attempt to allocate.
921 	 */
922 	n10g = n1g = 0;
923 	for_each_port(sc, i) {
924 		struct port_info *pi;
925 
926 		pi = malloc(sizeof(*pi), M_CXGBE, M_ZERO | M_WAITOK);
927 		sc->port[i] = pi;
928 
929 		/* These must be set before t4_port_init */
930 		pi->adapter = sc;
931 		pi->port_id = i;
932 		/*
933 		 * XXX: vi[0] is special so we can't delay this allocation until
934 		 * pi->nvi's final value is known.
935 		 */
936 		pi->vi = malloc(sizeof(struct vi_info) * num_vis, M_CXGBE,
937 		    M_ZERO | M_WAITOK);
938 
939 		/*
940 		 * Allocate the "main" VI and initialize parameters
941 		 * like mac addr.
942 		 */
943 		rc = -t4_port_init(sc, sc->mbox, sc->pf, 0, i);
944 		if (rc != 0) {
945 			device_printf(dev, "unable to initialize port %d: %d\n",
946 			    i, rc);
947 			free(pi->vi, M_CXGBE);
948 			free(pi, M_CXGBE);
949 			sc->port[i] = NULL;
950 			goto done;
951 		}
952 
953 		pi->link_cfg.requested_fc &= ~(PAUSE_TX | PAUSE_RX);
954 		pi->link_cfg.requested_fc |= t4_pause_settings;
955 		pi->link_cfg.fc &= ~(PAUSE_TX | PAUSE_RX);
956 		pi->link_cfg.fc |= t4_pause_settings;
957 
958 		rc = -t4_link_l1cfg(sc, sc->mbox, pi->tx_chan, &pi->link_cfg);
959 		if (rc != 0) {
960 			device_printf(dev, "port %d l1cfg failed: %d\n", i, rc);
961 			free(pi->vi, M_CXGBE);
962 			free(pi, M_CXGBE);
963 			sc->port[i] = NULL;
964 			goto done;
965 		}
966 
967 		snprintf(pi->lockname, sizeof(pi->lockname), "%sp%d",
968 		    device_get_nameunit(dev), i);
969 		mtx_init(&pi->pi_lock, pi->lockname, 0, MTX_DEF);
970 		sc->chan_map[pi->tx_chan] = i;
971 
972 		pi->tc = malloc(sizeof(struct tx_sched_class) *
973 		    sc->chip_params->nsched_cls, M_CXGBE, M_ZERO | M_WAITOK);
974 
975 		if (port_top_speed(pi) >= 10) {
976 			n10g++;
977 		} else {
978 			n1g++;
979 		}
980 
981 		pi->linkdnrc = -1;
982 
983 		pi->dev = device_add_child(dev, sc->names->ifnet_name, -1);
984 		if (pi->dev == NULL) {
985 			device_printf(dev,
986 			    "failed to add device for port %d.\n", i);
987 			rc = ENXIO;
988 			goto done;
989 		}
990 		pi->vi[0].dev = pi->dev;
991 		device_set_softc(pi->dev, pi);
992 	}
993 
994 	/*
995 	 * Interrupt type, # of interrupts, # of rx/tx queues, etc.
996 	 */
997 	rc = cfg_itype_and_nqueues(sc, n10g, n1g, num_vis, &iaq);
998 	if (rc != 0)
999 		goto done; /* error message displayed already */
1000 	if (iaq.nrxq_vi + iaq.nofldrxq_vi + iaq.nnmrxq_vi == 0)
1001 		num_vis = 1;
1002 
1003 	sc->intr_type = iaq.intr_type;
1004 	sc->intr_count = iaq.nirq;
1005 
1006 	s = &sc->sge;
1007 	s->nrxq = n10g * iaq.nrxq10g + n1g * iaq.nrxq1g;
1008 	s->ntxq = n10g * iaq.ntxq10g + n1g * iaq.ntxq1g;
1009 	if (num_vis > 1) {
1010 		s->nrxq += (n10g + n1g) * (num_vis - 1) * iaq.nrxq_vi;
1011 		s->ntxq += (n10g + n1g) * (num_vis - 1) * iaq.ntxq_vi;
1012 	}
1013 	s->neq = s->ntxq + s->nrxq;	/* the free list in an rxq is an eq */
1014 	s->neq += sc->params.nports + 1;/* ctrl queues: 1 per port + 1 mgmt */
1015 	s->niq = s->nrxq + 1;		/* 1 extra for firmware event queue */
1016 #ifdef TCP_OFFLOAD
1017 	if (is_offload(sc)) {
1018 		s->nofldrxq = n10g * iaq.nofldrxq10g + n1g * iaq.nofldrxq1g;
1019 		s->nofldtxq = n10g * iaq.nofldtxq10g + n1g * iaq.nofldtxq1g;
1020 		if (num_vis > 1) {
1021 			s->nofldrxq += (n10g + n1g) * (num_vis - 1) *
1022 			    iaq.nofldrxq_vi;
1023 			s->nofldtxq += (n10g + n1g) * (num_vis - 1) *
1024 			    iaq.nofldtxq_vi;
1025 		}
1026 		s->neq += s->nofldtxq + s->nofldrxq;
1027 		s->niq += s->nofldrxq;
1028 
1029 		s->ofld_rxq = malloc(s->nofldrxq * sizeof(struct sge_ofld_rxq),
1030 		    M_CXGBE, M_ZERO | M_WAITOK);
1031 		s->ofld_txq = malloc(s->nofldtxq * sizeof(struct sge_wrq),
1032 		    M_CXGBE, M_ZERO | M_WAITOK);
1033 	}
1034 #endif
1035 #ifdef DEV_NETMAP
1036 	if (num_vis > 1) {
1037 		s->nnmrxq = (n10g + n1g) * (num_vis - 1) * iaq.nnmrxq_vi;
1038 		s->nnmtxq = (n10g + n1g) * (num_vis - 1) * iaq.nnmtxq_vi;
1039 	}
1040 	s->neq += s->nnmtxq + s->nnmrxq;
1041 	s->niq += s->nnmrxq;
1042 
1043 	s->nm_rxq = malloc(s->nnmrxq * sizeof(struct sge_nm_rxq),
1044 	    M_CXGBE, M_ZERO | M_WAITOK);
1045 	s->nm_txq = malloc(s->nnmtxq * sizeof(struct sge_nm_txq),
1046 	    M_CXGBE, M_ZERO | M_WAITOK);
1047 #endif
1048 
1049 	s->ctrlq = malloc(sc->params.nports * sizeof(struct sge_wrq), M_CXGBE,
1050 	    M_ZERO | M_WAITOK);
1051 	s->rxq = malloc(s->nrxq * sizeof(struct sge_rxq), M_CXGBE,
1052 	    M_ZERO | M_WAITOK);
1053 	s->txq = malloc(s->ntxq * sizeof(struct sge_txq), M_CXGBE,
1054 	    M_ZERO | M_WAITOK);
1055 	s->iqmap = malloc(s->niq * sizeof(struct sge_iq *), M_CXGBE,
1056 	    M_ZERO | M_WAITOK);
1057 	s->eqmap = malloc(s->neq * sizeof(struct sge_eq *), M_CXGBE,
1058 	    M_ZERO | M_WAITOK);
1059 
1060 	sc->irq = malloc(sc->intr_count * sizeof(struct irq), M_CXGBE,
1061 	    M_ZERO | M_WAITOK);
1062 
1063 	t4_init_l2t(sc, M_WAITOK);
1064 
1065 	/*
1066 	 * Second pass over the ports.  This time we know the number of rx and
1067 	 * tx queues that each port should get.
1068 	 */
1069 	rqidx = tqidx = 0;
1070 #ifdef TCP_OFFLOAD
1071 	ofld_rqidx = ofld_tqidx = 0;
1072 #endif
1073 #ifdef DEV_NETMAP
1074 	nm_rqidx = nm_tqidx = 0;
1075 #endif
1076 	for_each_port(sc, i) {
1077 		struct port_info *pi = sc->port[i];
1078 		struct vi_info *vi;
1079 
1080 		if (pi == NULL)
1081 			continue;
1082 
1083 		pi->nvi = num_vis;
1084 		for_each_vi(pi, j, vi) {
1085 			vi->pi = pi;
1086 			vi->qsize_rxq = t4_qsize_rxq;
1087 			vi->qsize_txq = t4_qsize_txq;
1088 
1089 			vi->first_rxq = rqidx;
1090 			vi->first_txq = tqidx;
1091 			if (port_top_speed(pi) >= 10) {
1092 				vi->tmr_idx = t4_tmr_idx_10g;
1093 				vi->pktc_idx = t4_pktc_idx_10g;
1094 				vi->flags |= iaq.intr_flags_10g & INTR_RXQ;
1095 				vi->nrxq = j == 0 ? iaq.nrxq10g : iaq.nrxq_vi;
1096 				vi->ntxq = j == 0 ? iaq.ntxq10g : iaq.ntxq_vi;
1097 			} else {
1098 				vi->tmr_idx = t4_tmr_idx_1g;
1099 				vi->pktc_idx = t4_pktc_idx_1g;
1100 				vi->flags |= iaq.intr_flags_1g & INTR_RXQ;
1101 				vi->nrxq = j == 0 ? iaq.nrxq1g : iaq.nrxq_vi;
1102 				vi->ntxq = j == 0 ? iaq.ntxq1g : iaq.ntxq_vi;
1103 			}
1104 			rqidx += vi->nrxq;
1105 			tqidx += vi->ntxq;
1106 
1107 			if (j == 0 && vi->ntxq > 1)
1108 				vi->rsrv_noflowq = iaq.rsrv_noflowq ? 1 : 0;
1109 			else
1110 				vi->rsrv_noflowq = 0;
1111 
1112 #ifdef TCP_OFFLOAD
1113 			vi->first_ofld_rxq = ofld_rqidx;
1114 			vi->first_ofld_txq = ofld_tqidx;
1115 			if (port_top_speed(pi) >= 10) {
1116 				vi->flags |= iaq.intr_flags_10g & INTR_OFLD_RXQ;
1117 				vi->nofldrxq = j == 0 ? iaq.nofldrxq10g :
1118 				    iaq.nofldrxq_vi;
1119 				vi->nofldtxq = j == 0 ? iaq.nofldtxq10g :
1120 				    iaq.nofldtxq_vi;
1121 			} else {
1122 				vi->flags |= iaq.intr_flags_1g & INTR_OFLD_RXQ;
1123 				vi->nofldrxq = j == 0 ? iaq.nofldrxq1g :
1124 				    iaq.nofldrxq_vi;
1125 				vi->nofldtxq = j == 0 ? iaq.nofldtxq1g :
1126 				    iaq.nofldtxq_vi;
1127 			}
1128 			ofld_rqidx += vi->nofldrxq;
1129 			ofld_tqidx += vi->nofldtxq;
1130 #endif
1131 #ifdef DEV_NETMAP
1132 			if (j > 0) {
1133 				vi->first_nm_rxq = nm_rqidx;
1134 				vi->first_nm_txq = nm_tqidx;
1135 				vi->nnmrxq = iaq.nnmrxq_vi;
1136 				vi->nnmtxq = iaq.nnmtxq_vi;
1137 				nm_rqidx += vi->nnmrxq;
1138 				nm_tqidx += vi->nnmtxq;
1139 			}
1140 #endif
1141 		}
1142 	}
1143 
1144 	rc = t4_setup_intr_handlers(sc);
1145 	if (rc != 0) {
1146 		device_printf(dev,
1147 		    "failed to setup interrupt handlers: %d\n", rc);
1148 		goto done;
1149 	}
1150 
1151 	rc = bus_generic_attach(dev);
1152 	if (rc != 0) {
1153 		device_printf(dev,
1154 		    "failed to attach all child ports: %d\n", rc);
1155 		goto done;
1156 	}
1157 
1158 	device_printf(dev,
1159 	    "PCIe gen%d x%d, %d ports, %d %s interrupt%s, %d eq, %d iq\n",
1160 	    sc->params.pci.speed, sc->params.pci.width, sc->params.nports,
1161 	    sc->intr_count, sc->intr_type == INTR_MSIX ? "MSI-X" :
1162 	    (sc->intr_type == INTR_MSI ? "MSI" : "INTx"),
1163 	    sc->intr_count > 1 ? "s" : "", sc->sge.neq, sc->sge.niq);
1164 
1165 	t4_set_desc(sc);
1166 
1167 	notify_siblings(dev, 0);
1168 
1169 done:
1170 	if (rc != 0 && sc->cdev) {
1171 		/* cdev was created and so cxgbetool works; recover that way. */
1172 		device_printf(dev,
1173 		    "error during attach, adapter is now in recovery mode.\n");
1174 		rc = 0;
1175 	}
1176 
1177 	if (rc != 0)
1178 		t4_detach_common(dev);
1179 	else
1180 		t4_sysctls(sc);
1181 
1182 	return (rc);
1183 }
1184 
1185 static int
1186 t4_ready(device_t dev)
1187 {
1188 	struct adapter *sc;
1189 
1190 	sc = device_get_softc(dev);
1191 	if (sc->flags & FW_OK)
1192 		return (0);
1193 	return (ENXIO);
1194 }
1195 
1196 static int
1197 t4_read_port_device(device_t dev, int port, device_t *child)
1198 {
1199 	struct adapter *sc;
1200 	struct port_info *pi;
1201 
1202 	sc = device_get_softc(dev);
1203 	if (port < 0 || port >= MAX_NPORTS)
1204 		return (EINVAL);
1205 	pi = sc->port[port];
1206 	if (pi == NULL || pi->dev == NULL)
1207 		return (ENXIO);
1208 	*child = pi->dev;
1209 	return (0);
1210 }
1211 
1212 static int
1213 notify_siblings(device_t dev, int detaching)
1214 {
1215 	device_t sibling;
1216 	int error, i;
1217 
1218 	error = 0;
1219 	for (i = 0; i < PCI_FUNCMAX; i++) {
1220 		if (i == pci_get_function(dev))
1221 			continue;
1222 		sibling = pci_find_dbsf(pci_get_domain(dev), pci_get_bus(dev),
1223 		    pci_get_slot(dev), i);
1224 		if (sibling == NULL || !device_is_attached(sibling))
1225 			continue;
1226 		if (detaching)
1227 			error = T4_DETACH_CHILD(sibling);
1228 		else
1229 			(void)T4_ATTACH_CHILD(sibling);
1230 		if (error)
1231 			break;
1232 	}
1233 	return (error);
1234 }
1235 
1236 /*
1237  * Idempotent
1238  */
1239 static int
1240 t4_detach(device_t dev)
1241 {
1242 	struct adapter *sc;
1243 	int rc;
1244 
1245 	sc = device_get_softc(dev);
1246 
1247 	rc = notify_siblings(dev, 1);
1248 	if (rc) {
1249 		device_printf(dev,
1250 		    "failed to detach sibling devices: %d\n", rc);
1251 		return (rc);
1252 	}
1253 
1254 	return (t4_detach_common(dev));
1255 }
1256 
1257 int
1258 t4_detach_common(device_t dev)
1259 {
1260 	struct adapter *sc;
1261 	struct port_info *pi;
1262 	int i, rc;
1263 
1264 	sc = device_get_softc(dev);
1265 
1266 	if (sc->flags & FULL_INIT_DONE) {
1267 		if (!(sc->flags & IS_VF))
1268 			t4_intr_disable(sc);
1269 	}
1270 
1271 	if (sc->cdev) {
1272 		destroy_dev(sc->cdev);
1273 		sc->cdev = NULL;
1274 	}
1275 
1276 	if (device_is_attached(dev)) {
1277 		rc = bus_generic_detach(dev);
1278 		if (rc) {
1279 			device_printf(dev,
1280 			    "failed to detach child devices: %d\n", rc);
1281 			return (rc);
1282 		}
1283 	}
1284 
1285 	for (i = 0; i < sc->intr_count; i++)
1286 		t4_free_irq(sc, &sc->irq[i]);
1287 
1288 	for (i = 0; i < MAX_NPORTS; i++) {
1289 		pi = sc->port[i];
1290 		if (pi) {
1291 			t4_free_vi(sc, sc->mbox, sc->pf, 0, pi->vi[0].viid);
1292 			if (pi->dev)
1293 				device_delete_child(dev, pi->dev);
1294 
1295 			mtx_destroy(&pi->pi_lock);
1296 			free(pi->vi, M_CXGBE);
1297 			free(pi->tc, M_CXGBE);
1298 			free(pi, M_CXGBE);
1299 		}
1300 	}
1301 
1302 	if (sc->flags & FULL_INIT_DONE)
1303 		adapter_full_uninit(sc);
1304 
1305 	if ((sc->flags & (IS_VF | FW_OK)) == FW_OK)
1306 		t4_fw_bye(sc, sc->mbox);
1307 
1308 	if (sc->intr_type == INTR_MSI || sc->intr_type == INTR_MSIX)
1309 		pci_release_msi(dev);
1310 
1311 	if (sc->regs_res)
1312 		bus_release_resource(dev, SYS_RES_MEMORY, sc->regs_rid,
1313 		    sc->regs_res);
1314 
1315 	if (sc->udbs_res)
1316 		bus_release_resource(dev, SYS_RES_MEMORY, sc->udbs_rid,
1317 		    sc->udbs_res);
1318 
1319 	if (sc->msix_res)
1320 		bus_release_resource(dev, SYS_RES_MEMORY, sc->msix_rid,
1321 		    sc->msix_res);
1322 
1323 	if (sc->l2t)
1324 		t4_free_l2t(sc->l2t);
1325 
1326 #ifdef TCP_OFFLOAD
1327 	free(sc->sge.ofld_rxq, M_CXGBE);
1328 	free(sc->sge.ofld_txq, M_CXGBE);
1329 #endif
1330 #ifdef DEV_NETMAP
1331 	free(sc->sge.nm_rxq, M_CXGBE);
1332 	free(sc->sge.nm_txq, M_CXGBE);
1333 #endif
1334 	free(sc->irq, M_CXGBE);
1335 	free(sc->sge.rxq, M_CXGBE);
1336 	free(sc->sge.txq, M_CXGBE);
1337 	free(sc->sge.ctrlq, M_CXGBE);
1338 	free(sc->sge.iqmap, M_CXGBE);
1339 	free(sc->sge.eqmap, M_CXGBE);
1340 	free(sc->tids.ftid_tab, M_CXGBE);
1341 	t4_destroy_dma_tag(sc);
1342 	if (mtx_initialized(&sc->sc_lock)) {
1343 		sx_xlock(&t4_list_lock);
1344 		SLIST_REMOVE(&t4_list, sc, adapter, link);
1345 		sx_xunlock(&t4_list_lock);
1346 		mtx_destroy(&sc->sc_lock);
1347 	}
1348 
1349 	callout_drain(&sc->sfl_callout);
1350 	if (mtx_initialized(&sc->tids.ftid_lock))
1351 		mtx_destroy(&sc->tids.ftid_lock);
1352 	if (mtx_initialized(&sc->sfl_lock))
1353 		mtx_destroy(&sc->sfl_lock);
1354 	if (mtx_initialized(&sc->ifp_lock))
1355 		mtx_destroy(&sc->ifp_lock);
1356 	if (mtx_initialized(&sc->reg_lock))
1357 		mtx_destroy(&sc->reg_lock);
1358 
1359 	for (i = 0; i < NUM_MEMWIN; i++) {
1360 		struct memwin *mw = &sc->memwin[i];
1361 
1362 		if (rw_initialized(&mw->mw_lock))
1363 			rw_destroy(&mw->mw_lock);
1364 	}
1365 
1366 	bzero(sc, sizeof(*sc));
1367 
1368 	return (0);
1369 }
1370 
1371 static int
1372 cxgbe_probe(device_t dev)
1373 {
1374 	char buf[128];
1375 	struct port_info *pi = device_get_softc(dev);
1376 
1377 	snprintf(buf, sizeof(buf), "port %d", pi->port_id);
1378 	device_set_desc_copy(dev, buf);
1379 
1380 	return (BUS_PROBE_DEFAULT);
1381 }
1382 
1383 #define T4_CAP (IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_MTU | IFCAP_HWCSUM | \
1384     IFCAP_VLAN_HWCSUM | IFCAP_TSO | IFCAP_JUMBO_MTU | IFCAP_LRO | \
1385     IFCAP_VLAN_HWTSO | IFCAP_LINKSTATE | IFCAP_HWCSUM_IPV6 | IFCAP_HWSTATS)
1386 #define T4_CAP_ENABLE (T4_CAP)
1387 
1388 static int
1389 cxgbe_vi_attach(device_t dev, struct vi_info *vi)
1390 {
1391 	struct ifnet *ifp;
1392 	struct sbuf *sb;
1393 
1394 	vi->xact_addr_filt = -1;
1395 	callout_init(&vi->tick, 1);
1396 
1397 	/* Allocate an ifnet and set it up */
1398 	ifp = if_alloc(IFT_ETHER);
1399 	if (ifp == NULL) {
1400 		device_printf(dev, "Cannot allocate ifnet\n");
1401 		return (ENOMEM);
1402 	}
1403 	vi->ifp = ifp;
1404 	ifp->if_softc = vi;
1405 
1406 	if_initname(ifp, device_get_name(dev), device_get_unit(dev));
1407 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
1408 
1409 	ifp->if_init = cxgbe_init;
1410 	ifp->if_ioctl = cxgbe_ioctl;
1411 	ifp->if_transmit = cxgbe_transmit;
1412 	ifp->if_qflush = cxgbe_qflush;
1413 	ifp->if_get_counter = cxgbe_get_counter;
1414 
1415 	ifp->if_capabilities = T4_CAP;
1416 #ifdef TCP_OFFLOAD
1417 	if (vi->nofldrxq != 0)
1418 		ifp->if_capabilities |= IFCAP_TOE;
1419 #endif
1420 	ifp->if_capenable = T4_CAP_ENABLE;
1421 	ifp->if_hwassist = CSUM_TCP | CSUM_UDP | CSUM_IP | CSUM_TSO |
1422 	    CSUM_UDP_IPV6 | CSUM_TCP_IPV6;
1423 
1424 	ifp->if_hw_tsomax = 65536 - (ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN);
1425 	ifp->if_hw_tsomaxsegcount = TX_SGL_SEGS;
1426 	ifp->if_hw_tsomaxsegsize = 65536;
1427 
1428 	/* Initialize ifmedia for this VI */
1429 	ifmedia_init(&vi->media, IFM_IMASK, cxgbe_media_change,
1430 	    cxgbe_media_status);
1431 	build_medialist(vi->pi, &vi->media);
1432 
1433 	vi->vlan_c = EVENTHANDLER_REGISTER(vlan_config, cxgbe_vlan_config, ifp,
1434 	    EVENTHANDLER_PRI_ANY);
1435 
1436 	ether_ifattach(ifp, vi->hw_addr);
1437 #ifdef DEV_NETMAP
1438 	if (vi->nnmrxq != 0)
1439 		cxgbe_nm_attach(vi);
1440 #endif
1441 	sb = sbuf_new_auto();
1442 	sbuf_printf(sb, "%d txq, %d rxq (NIC)", vi->ntxq, vi->nrxq);
1443 #ifdef TCP_OFFLOAD
1444 	if (ifp->if_capabilities & IFCAP_TOE)
1445 		sbuf_printf(sb, "; %d txq, %d rxq (TOE)",
1446 		    vi->nofldtxq, vi->nofldrxq);
1447 #endif
1448 #ifdef DEV_NETMAP
1449 	if (ifp->if_capabilities & IFCAP_NETMAP)
1450 		sbuf_printf(sb, "; %d txq, %d rxq (netmap)",
1451 		    vi->nnmtxq, vi->nnmrxq);
1452 #endif
1453 	sbuf_finish(sb);
1454 	device_printf(dev, "%s\n", sbuf_data(sb));
1455 	sbuf_delete(sb);
1456 
1457 	vi_sysctls(vi);
1458 
1459 	return (0);
1460 }
1461 
1462 static int
1463 cxgbe_attach(device_t dev)
1464 {
1465 	struct port_info *pi = device_get_softc(dev);
1466 	struct adapter *sc = pi->adapter;
1467 	struct vi_info *vi;
1468 	int i, rc;
1469 
1470 	callout_init_mtx(&pi->tick, &pi->pi_lock, 0);
1471 
1472 	rc = cxgbe_vi_attach(dev, &pi->vi[0]);
1473 	if (rc)
1474 		return (rc);
1475 
1476 	for_each_vi(pi, i, vi) {
1477 		if (i == 0)
1478 			continue;
1479 		vi->dev = device_add_child(dev, sc->names->vi_ifnet_name, -1);
1480 		if (vi->dev == NULL) {
1481 			device_printf(dev, "failed to add VI %d\n", i);
1482 			continue;
1483 		}
1484 		device_set_softc(vi->dev, vi);
1485 	}
1486 
1487 	cxgbe_sysctls(pi);
1488 
1489 	bus_generic_attach(dev);
1490 
1491 	return (0);
1492 }
1493 
1494 static void
1495 cxgbe_vi_detach(struct vi_info *vi)
1496 {
1497 	struct ifnet *ifp = vi->ifp;
1498 
1499 	ether_ifdetach(ifp);
1500 
1501 	if (vi->vlan_c)
1502 		EVENTHANDLER_DEREGISTER(vlan_config, vi->vlan_c);
1503 
1504 	/* Let detach proceed even if these fail. */
1505 #ifdef DEV_NETMAP
1506 	if (ifp->if_capabilities & IFCAP_NETMAP)
1507 		cxgbe_nm_detach(vi);
1508 #endif
1509 	cxgbe_uninit_synchronized(vi);
1510 	callout_drain(&vi->tick);
1511 	vi_full_uninit(vi);
1512 
1513 	ifmedia_removeall(&vi->media);
1514 	if_free(vi->ifp);
1515 	vi->ifp = NULL;
1516 }
1517 
1518 static int
1519 cxgbe_detach(device_t dev)
1520 {
1521 	struct port_info *pi = device_get_softc(dev);
1522 	struct adapter *sc = pi->adapter;
1523 	int rc;
1524 
1525 	/* Detach the extra VIs first. */
1526 	rc = bus_generic_detach(dev);
1527 	if (rc)
1528 		return (rc);
1529 	device_delete_children(dev);
1530 
1531 	doom_vi(sc, &pi->vi[0]);
1532 
1533 	if (pi->flags & HAS_TRACEQ) {
1534 		sc->traceq = -1;	/* cloner should not create ifnet */
1535 		t4_tracer_port_detach(sc);
1536 	}
1537 
1538 	cxgbe_vi_detach(&pi->vi[0]);
1539 	callout_drain(&pi->tick);
1540 
1541 	end_synchronized_op(sc, 0);
1542 
1543 	return (0);
1544 }
1545 
1546 static void
1547 cxgbe_init(void *arg)
1548 {
1549 	struct vi_info *vi = arg;
1550 	struct adapter *sc = vi->pi->adapter;
1551 
1552 	if (begin_synchronized_op(sc, vi, SLEEP_OK | INTR_OK, "t4init") != 0)
1553 		return;
1554 	cxgbe_init_synchronized(vi);
1555 	end_synchronized_op(sc, 0);
1556 }
1557 
1558 static int
1559 cxgbe_ioctl(struct ifnet *ifp, unsigned long cmd, caddr_t data)
1560 {
1561 	int rc = 0, mtu, flags, can_sleep;
1562 	struct vi_info *vi = ifp->if_softc;
1563 	struct adapter *sc = vi->pi->adapter;
1564 	struct ifreq *ifr = (struct ifreq *)data;
1565 	uint32_t mask;
1566 
1567 	switch (cmd) {
1568 	case SIOCSIFMTU:
1569 		mtu = ifr->ifr_mtu;
1570 		if (mtu < ETHERMIN || mtu > MAX_MTU)
1571 			return (EINVAL);
1572 
1573 		rc = begin_synchronized_op(sc, vi, SLEEP_OK | INTR_OK, "t4mtu");
1574 		if (rc)
1575 			return (rc);
1576 		ifp->if_mtu = mtu;
1577 		if (vi->flags & VI_INIT_DONE) {
1578 			t4_update_fl_bufsize(ifp);
1579 			if (ifp->if_drv_flags & IFF_DRV_RUNNING)
1580 				rc = update_mac_settings(ifp, XGMAC_MTU);
1581 		}
1582 		end_synchronized_op(sc, 0);
1583 		break;
1584 
1585 	case SIOCSIFFLAGS:
1586 		can_sleep = 0;
1587 redo_sifflags:
1588 		rc = begin_synchronized_op(sc, vi,
1589 		    can_sleep ? (SLEEP_OK | INTR_OK) : HOLD_LOCK, "t4flg");
1590 		if (rc)
1591 			return (rc);
1592 
1593 		if (ifp->if_flags & IFF_UP) {
1594 			if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1595 				flags = vi->if_flags;
1596 				if ((ifp->if_flags ^ flags) &
1597 				    (IFF_PROMISC | IFF_ALLMULTI)) {
1598 					if (can_sleep == 1) {
1599 						end_synchronized_op(sc, 0);
1600 						can_sleep = 0;
1601 						goto redo_sifflags;
1602 					}
1603 					rc = update_mac_settings(ifp,
1604 					    XGMAC_PROMISC | XGMAC_ALLMULTI);
1605 				}
1606 			} else {
1607 				if (can_sleep == 0) {
1608 					end_synchronized_op(sc, LOCK_HELD);
1609 					can_sleep = 1;
1610 					goto redo_sifflags;
1611 				}
1612 				rc = cxgbe_init_synchronized(vi);
1613 			}
1614 			vi->if_flags = ifp->if_flags;
1615 		} else if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1616 			if (can_sleep == 0) {
1617 				end_synchronized_op(sc, LOCK_HELD);
1618 				can_sleep = 1;
1619 				goto redo_sifflags;
1620 			}
1621 			rc = cxgbe_uninit_synchronized(vi);
1622 		}
1623 		end_synchronized_op(sc, can_sleep ? 0 : LOCK_HELD);
1624 		break;
1625 
1626 	case SIOCADDMULTI:
1627 	case SIOCDELMULTI: /* these two are called with a mutex held :-( */
1628 		rc = begin_synchronized_op(sc, vi, HOLD_LOCK, "t4multi");
1629 		if (rc)
1630 			return (rc);
1631 		if (ifp->if_drv_flags & IFF_DRV_RUNNING)
1632 			rc = update_mac_settings(ifp, XGMAC_MCADDRS);
1633 		end_synchronized_op(sc, LOCK_HELD);
1634 		break;
1635 
1636 	case SIOCSIFCAP:
1637 		rc = begin_synchronized_op(sc, vi, SLEEP_OK | INTR_OK, "t4cap");
1638 		if (rc)
1639 			return (rc);
1640 
1641 		mask = ifr->ifr_reqcap ^ ifp->if_capenable;
1642 		if (mask & IFCAP_TXCSUM) {
1643 			ifp->if_capenable ^= IFCAP_TXCSUM;
1644 			ifp->if_hwassist ^= (CSUM_TCP | CSUM_UDP | CSUM_IP);
1645 
1646 			if (IFCAP_TSO4 & ifp->if_capenable &&
1647 			    !(IFCAP_TXCSUM & ifp->if_capenable)) {
1648 				ifp->if_capenable &= ~IFCAP_TSO4;
1649 				if_printf(ifp,
1650 				    "tso4 disabled due to -txcsum.\n");
1651 			}
1652 		}
1653 		if (mask & IFCAP_TXCSUM_IPV6) {
1654 			ifp->if_capenable ^= IFCAP_TXCSUM_IPV6;
1655 			ifp->if_hwassist ^= (CSUM_UDP_IPV6 | CSUM_TCP_IPV6);
1656 
1657 			if (IFCAP_TSO6 & ifp->if_capenable &&
1658 			    !(IFCAP_TXCSUM_IPV6 & ifp->if_capenable)) {
1659 				ifp->if_capenable &= ~IFCAP_TSO6;
1660 				if_printf(ifp,
1661 				    "tso6 disabled due to -txcsum6.\n");
1662 			}
1663 		}
1664 		if (mask & IFCAP_RXCSUM)
1665 			ifp->if_capenable ^= IFCAP_RXCSUM;
1666 		if (mask & IFCAP_RXCSUM_IPV6)
1667 			ifp->if_capenable ^= IFCAP_RXCSUM_IPV6;
1668 
1669 		/*
1670 		 * Note that we leave CSUM_TSO alone (it is always set).  The
1671 		 * kernel takes both IFCAP_TSOx and CSUM_TSO into account before
1672 		 * sending a TSO request our way, so it's sufficient to toggle
1673 		 * IFCAP_TSOx only.
1674 		 */
1675 		if (mask & IFCAP_TSO4) {
1676 			if (!(IFCAP_TSO4 & ifp->if_capenable) &&
1677 			    !(IFCAP_TXCSUM & ifp->if_capenable)) {
1678 				if_printf(ifp, "enable txcsum first.\n");
1679 				rc = EAGAIN;
1680 				goto fail;
1681 			}
1682 			ifp->if_capenable ^= IFCAP_TSO4;
1683 		}
1684 		if (mask & IFCAP_TSO6) {
1685 			if (!(IFCAP_TSO6 & ifp->if_capenable) &&
1686 			    !(IFCAP_TXCSUM_IPV6 & ifp->if_capenable)) {
1687 				if_printf(ifp, "enable txcsum6 first.\n");
1688 				rc = EAGAIN;
1689 				goto fail;
1690 			}
1691 			ifp->if_capenable ^= IFCAP_TSO6;
1692 		}
1693 		if (mask & IFCAP_LRO) {
1694 #if defined(INET) || defined(INET6)
1695 			int i;
1696 			struct sge_rxq *rxq;
1697 
1698 			ifp->if_capenable ^= IFCAP_LRO;
1699 			for_each_rxq(vi, i, rxq) {
1700 				if (ifp->if_capenable & IFCAP_LRO)
1701 					rxq->iq.flags |= IQ_LRO_ENABLED;
1702 				else
1703 					rxq->iq.flags &= ~IQ_LRO_ENABLED;
1704 			}
1705 #endif
1706 		}
1707 #ifdef TCP_OFFLOAD
1708 		if (mask & IFCAP_TOE) {
1709 			int enable = (ifp->if_capenable ^ mask) & IFCAP_TOE;
1710 
1711 			rc = toe_capability(vi, enable);
1712 			if (rc != 0)
1713 				goto fail;
1714 
1715 			ifp->if_capenable ^= mask;
1716 		}
1717 #endif
1718 		if (mask & IFCAP_VLAN_HWTAGGING) {
1719 			ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING;
1720 			if (ifp->if_drv_flags & IFF_DRV_RUNNING)
1721 				rc = update_mac_settings(ifp, XGMAC_VLANEX);
1722 		}
1723 		if (mask & IFCAP_VLAN_MTU) {
1724 			ifp->if_capenable ^= IFCAP_VLAN_MTU;
1725 
1726 			/* Need to find out how to disable auto-mtu-inflation */
1727 		}
1728 		if (mask & IFCAP_VLAN_HWTSO)
1729 			ifp->if_capenable ^= IFCAP_VLAN_HWTSO;
1730 		if (mask & IFCAP_VLAN_HWCSUM)
1731 			ifp->if_capenable ^= IFCAP_VLAN_HWCSUM;
1732 
1733 #ifdef VLAN_CAPABILITIES
1734 		VLAN_CAPABILITIES(ifp);
1735 #endif
1736 fail:
1737 		end_synchronized_op(sc, 0);
1738 		break;
1739 
1740 	case SIOCSIFMEDIA:
1741 	case SIOCGIFMEDIA:
1742 	case SIOCGIFXMEDIA:
1743 		ifmedia_ioctl(ifp, ifr, &vi->media, cmd);
1744 		break;
1745 
1746 	case SIOCGI2C: {
1747 		struct ifi2creq i2c;
1748 
1749 		rc = copyin(ifr->ifr_data, &i2c, sizeof(i2c));
1750 		if (rc != 0)
1751 			break;
1752 		if (i2c.dev_addr != 0xA0 && i2c.dev_addr != 0xA2) {
1753 			rc = EPERM;
1754 			break;
1755 		}
1756 		if (i2c.len > sizeof(i2c.data)) {
1757 			rc = EINVAL;
1758 			break;
1759 		}
1760 		rc = begin_synchronized_op(sc, vi, SLEEP_OK | INTR_OK, "t4i2c");
1761 		if (rc)
1762 			return (rc);
1763 		rc = -t4_i2c_rd(sc, sc->mbox, vi->pi->port_id, i2c.dev_addr,
1764 		    i2c.offset, i2c.len, &i2c.data[0]);
1765 		end_synchronized_op(sc, 0);
1766 		if (rc == 0)
1767 			rc = copyout(&i2c, ifr->ifr_data, sizeof(i2c));
1768 		break;
1769 	}
1770 
1771 	default:
1772 		rc = ether_ioctl(ifp, cmd, data);
1773 	}
1774 
1775 	return (rc);
1776 }
1777 
1778 static int
1779 cxgbe_transmit(struct ifnet *ifp, struct mbuf *m)
1780 {
1781 	struct vi_info *vi = ifp->if_softc;
1782 	struct port_info *pi = vi->pi;
1783 	struct adapter *sc = pi->adapter;
1784 	struct sge_txq *txq;
1785 	void *items[1];
1786 	int rc;
1787 
1788 	M_ASSERTPKTHDR(m);
1789 	MPASS(m->m_nextpkt == NULL);	/* not quite ready for this yet */
1790 
1791 	if (__predict_false(pi->link_cfg.link_ok == 0)) {
1792 		m_freem(m);
1793 		return (ENETDOWN);
1794 	}
1795 
1796 	rc = parse_pkt(sc, &m);
1797 	if (__predict_false(rc != 0)) {
1798 		MPASS(m == NULL);			/* was freed already */
1799 		atomic_add_int(&pi->tx_parse_error, 1);	/* rare, atomic is ok */
1800 		return (rc);
1801 	}
1802 
1803 	/* Select a txq. */
1804 	txq = &sc->sge.txq[vi->first_txq];
1805 	if (M_HASHTYPE_GET(m) != M_HASHTYPE_NONE)
1806 		txq += ((m->m_pkthdr.flowid % (vi->ntxq - vi->rsrv_noflowq)) +
1807 		    vi->rsrv_noflowq);
1808 
1809 	items[0] = m;
1810 	rc = mp_ring_enqueue(txq->r, items, 1, 4096);
1811 	if (__predict_false(rc != 0))
1812 		m_freem(m);
1813 
1814 	return (rc);
1815 }
1816 
1817 static void
1818 cxgbe_qflush(struct ifnet *ifp)
1819 {
1820 	struct vi_info *vi = ifp->if_softc;
1821 	struct sge_txq *txq;
1822 	int i;
1823 
1824 	/* queues do not exist if !VI_INIT_DONE. */
1825 	if (vi->flags & VI_INIT_DONE) {
1826 		for_each_txq(vi, i, txq) {
1827 			TXQ_LOCK(txq);
1828 			txq->eq.flags &= ~EQ_ENABLED;
1829 			TXQ_UNLOCK(txq);
1830 			while (!mp_ring_is_idle(txq->r)) {
1831 				mp_ring_check_drainage(txq->r, 0);
1832 				pause("qflush", 1);
1833 			}
1834 		}
1835 	}
1836 	if_qflush(ifp);
1837 }
1838 
1839 static uint64_t
1840 vi_get_counter(struct ifnet *ifp, ift_counter c)
1841 {
1842 	struct vi_info *vi = ifp->if_softc;
1843 	struct fw_vi_stats_vf *s = &vi->stats;
1844 
1845 	vi_refresh_stats(vi->pi->adapter, vi);
1846 
1847 	switch (c) {
1848 	case IFCOUNTER_IPACKETS:
1849 		return (s->rx_bcast_frames + s->rx_mcast_frames +
1850 		    s->rx_ucast_frames);
1851 	case IFCOUNTER_IERRORS:
1852 		return (s->rx_err_frames);
1853 	case IFCOUNTER_OPACKETS:
1854 		return (s->tx_bcast_frames + s->tx_mcast_frames +
1855 		    s->tx_ucast_frames + s->tx_offload_frames);
1856 	case IFCOUNTER_OERRORS:
1857 		return (s->tx_drop_frames);
1858 	case IFCOUNTER_IBYTES:
1859 		return (s->rx_bcast_bytes + s->rx_mcast_bytes +
1860 		    s->rx_ucast_bytes);
1861 	case IFCOUNTER_OBYTES:
1862 		return (s->tx_bcast_bytes + s->tx_mcast_bytes +
1863 		    s->tx_ucast_bytes + s->tx_offload_bytes);
1864 	case IFCOUNTER_IMCASTS:
1865 		return (s->rx_mcast_frames);
1866 	case IFCOUNTER_OMCASTS:
1867 		return (s->tx_mcast_frames);
1868 	case IFCOUNTER_OQDROPS: {
1869 		uint64_t drops;
1870 
1871 		drops = 0;
1872 		if (vi->flags & VI_INIT_DONE) {
1873 			int i;
1874 			struct sge_txq *txq;
1875 
1876 			for_each_txq(vi, i, txq)
1877 				drops += counter_u64_fetch(txq->r->drops);
1878 		}
1879 
1880 		return (drops);
1881 
1882 	}
1883 
1884 	default:
1885 		return (if_get_counter_default(ifp, c));
1886 	}
1887 }
1888 
1889 uint64_t
1890 cxgbe_get_counter(struct ifnet *ifp, ift_counter c)
1891 {
1892 	struct vi_info *vi = ifp->if_softc;
1893 	struct port_info *pi = vi->pi;
1894 	struct adapter *sc = pi->adapter;
1895 	struct port_stats *s = &pi->stats;
1896 
1897 	if (pi->nvi > 1 || sc->flags & IS_VF)
1898 		return (vi_get_counter(ifp, c));
1899 
1900 	cxgbe_refresh_stats(sc, pi);
1901 
1902 	switch (c) {
1903 	case IFCOUNTER_IPACKETS:
1904 		return (s->rx_frames);
1905 
1906 	case IFCOUNTER_IERRORS:
1907 		return (s->rx_jabber + s->rx_runt + s->rx_too_long +
1908 		    s->rx_fcs_err + s->rx_len_err);
1909 
1910 	case IFCOUNTER_OPACKETS:
1911 		return (s->tx_frames);
1912 
1913 	case IFCOUNTER_OERRORS:
1914 		return (s->tx_error_frames);
1915 
1916 	case IFCOUNTER_IBYTES:
1917 		return (s->rx_octets);
1918 
1919 	case IFCOUNTER_OBYTES:
1920 		return (s->tx_octets);
1921 
1922 	case IFCOUNTER_IMCASTS:
1923 		return (s->rx_mcast_frames);
1924 
1925 	case IFCOUNTER_OMCASTS:
1926 		return (s->tx_mcast_frames);
1927 
1928 	case IFCOUNTER_IQDROPS:
1929 		return (s->rx_ovflow0 + s->rx_ovflow1 + s->rx_ovflow2 +
1930 		    s->rx_ovflow3 + s->rx_trunc0 + s->rx_trunc1 + s->rx_trunc2 +
1931 		    s->rx_trunc3 + pi->tnl_cong_drops);
1932 
1933 	case IFCOUNTER_OQDROPS: {
1934 		uint64_t drops;
1935 
1936 		drops = s->tx_drop;
1937 		if (vi->flags & VI_INIT_DONE) {
1938 			int i;
1939 			struct sge_txq *txq;
1940 
1941 			for_each_txq(vi, i, txq)
1942 				drops += counter_u64_fetch(txq->r->drops);
1943 		}
1944 
1945 		return (drops);
1946 
1947 	}
1948 
1949 	default:
1950 		return (if_get_counter_default(ifp, c));
1951 	}
1952 }
1953 
1954 static int
1955 cxgbe_media_change(struct ifnet *ifp)
1956 {
1957 	struct vi_info *vi = ifp->if_softc;
1958 
1959 	device_printf(vi->dev, "%s unimplemented.\n", __func__);
1960 
1961 	return (EOPNOTSUPP);
1962 }
1963 
1964 static void
1965 cxgbe_media_status(struct ifnet *ifp, struct ifmediareq *ifmr)
1966 {
1967 	struct vi_info *vi = ifp->if_softc;
1968 	struct port_info *pi = vi->pi;
1969 	struct ifmedia_entry *cur;
1970 	int speed = pi->link_cfg.speed;
1971 
1972 	cur = vi->media.ifm_cur;
1973 
1974 	ifmr->ifm_status = IFM_AVALID;
1975 	if (!pi->link_cfg.link_ok)
1976 		return;
1977 
1978 	ifmr->ifm_status |= IFM_ACTIVE;
1979 
1980 	/* active and current will differ iff current media is autoselect. */
1981 	if (IFM_SUBTYPE(cur->ifm_media) != IFM_AUTO)
1982 		return;
1983 
1984 	ifmr->ifm_active = IFM_ETHER | IFM_FDX;
1985 	if (speed == 10000)
1986 		ifmr->ifm_active |= IFM_10G_T;
1987 	else if (speed == 1000)
1988 		ifmr->ifm_active |= IFM_1000_T;
1989 	else if (speed == 100)
1990 		ifmr->ifm_active |= IFM_100_TX;
1991 	else if (speed == 10)
1992 		ifmr->ifm_active |= IFM_10_T;
1993 	else
1994 		KASSERT(0, ("%s: link up but speed unknown (%u)", __func__,
1995 			    speed));
1996 }
1997 
1998 static int
1999 vcxgbe_probe(device_t dev)
2000 {
2001 	char buf[128];
2002 	struct vi_info *vi = device_get_softc(dev);
2003 
2004 	snprintf(buf, sizeof(buf), "port %d vi %td", vi->pi->port_id,
2005 	    vi - vi->pi->vi);
2006 	device_set_desc_copy(dev, buf);
2007 
2008 	return (BUS_PROBE_DEFAULT);
2009 }
2010 
2011 static int
2012 vcxgbe_attach(device_t dev)
2013 {
2014 	struct vi_info *vi;
2015 	struct port_info *pi;
2016 	struct adapter *sc;
2017 	int func, index, rc;
2018 	u32 param, val;
2019 
2020 	vi = device_get_softc(dev);
2021 	pi = vi->pi;
2022 	sc = pi->adapter;
2023 
2024 	index = vi - pi->vi;
2025 	KASSERT(index < nitems(vi_mac_funcs),
2026 	    ("%s: VI %s doesn't have a MAC func", __func__,
2027 	    device_get_nameunit(dev)));
2028 	func = vi_mac_funcs[index];
2029 	rc = t4_alloc_vi_func(sc, sc->mbox, pi->tx_chan, sc->pf, 0, 1,
2030 	    vi->hw_addr, &vi->rss_size, func, 0);
2031 	if (rc < 0) {
2032 		device_printf(dev, "Failed to allocate virtual interface "
2033 		    "for port %d: %d\n", pi->port_id, -rc);
2034 		return (-rc);
2035 	}
2036 	vi->viid = rc;
2037 	if (chip_id(sc) <= CHELSIO_T5)
2038 		vi->smt_idx = (rc & 0x7f) << 1;
2039 	else
2040 		vi->smt_idx = (rc & 0x7f);
2041 
2042 	param = V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) |
2043 	    V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_RSSINFO) |
2044 	    V_FW_PARAMS_PARAM_YZ(vi->viid);
2045 	rc = t4_query_params(sc, sc->mbox, sc->pf, 0, 1, &param, &val);
2046 	if (rc)
2047 		vi->rss_base = 0xffff;
2048 	else {
2049 		/* MPASS((val >> 16) == rss_size); */
2050 		vi->rss_base = val & 0xffff;
2051 	}
2052 
2053 	rc = cxgbe_vi_attach(dev, vi);
2054 	if (rc) {
2055 		t4_free_vi(sc, sc->mbox, sc->pf, 0, vi->viid);
2056 		return (rc);
2057 	}
2058 	return (0);
2059 }
2060 
2061 static int
2062 vcxgbe_detach(device_t dev)
2063 {
2064 	struct vi_info *vi;
2065 	struct adapter *sc;
2066 
2067 	vi = device_get_softc(dev);
2068 	sc = vi->pi->adapter;
2069 
2070 	doom_vi(sc, vi);
2071 
2072 	cxgbe_vi_detach(vi);
2073 	t4_free_vi(sc, sc->mbox, sc->pf, 0, vi->viid);
2074 
2075 	end_synchronized_op(sc, 0);
2076 
2077 	return (0);
2078 }
2079 
2080 void
2081 t4_fatal_err(struct adapter *sc)
2082 {
2083 	t4_set_reg_field(sc, A_SGE_CONTROL, F_GLOBALENABLE, 0);
2084 	t4_intr_disable(sc);
2085 	log(LOG_EMERG, "%s: encountered fatal error, adapter stopped.\n",
2086 	    device_get_nameunit(sc->dev));
2087 }
2088 
2089 void
2090 t4_add_adapter(struct adapter *sc)
2091 {
2092 	sx_xlock(&t4_list_lock);
2093 	SLIST_INSERT_HEAD(&t4_list, sc, link);
2094 	sx_xunlock(&t4_list_lock);
2095 }
2096 
2097 int
2098 t4_map_bars_0_and_4(struct adapter *sc)
2099 {
2100 	sc->regs_rid = PCIR_BAR(0);
2101 	sc->regs_res = bus_alloc_resource_any(sc->dev, SYS_RES_MEMORY,
2102 	    &sc->regs_rid, RF_ACTIVE);
2103 	if (sc->regs_res == NULL) {
2104 		device_printf(sc->dev, "cannot map registers.\n");
2105 		return (ENXIO);
2106 	}
2107 	sc->bt = rman_get_bustag(sc->regs_res);
2108 	sc->bh = rman_get_bushandle(sc->regs_res);
2109 	sc->mmio_len = rman_get_size(sc->regs_res);
2110 	setbit(&sc->doorbells, DOORBELL_KDB);
2111 
2112 	sc->msix_rid = PCIR_BAR(4);
2113 	sc->msix_res = bus_alloc_resource_any(sc->dev, SYS_RES_MEMORY,
2114 	    &sc->msix_rid, RF_ACTIVE);
2115 	if (sc->msix_res == NULL) {
2116 		device_printf(sc->dev, "cannot map MSI-X BAR.\n");
2117 		return (ENXIO);
2118 	}
2119 
2120 	return (0);
2121 }
2122 
2123 int
2124 t4_map_bar_2(struct adapter *sc)
2125 {
2126 
2127 	/*
2128 	 * T4: only iWARP driver uses the userspace doorbells.  There is no need
2129 	 * to map it if RDMA is disabled.
2130 	 */
2131 	if (is_t4(sc) && sc->rdmacaps == 0)
2132 		return (0);
2133 
2134 	sc->udbs_rid = PCIR_BAR(2);
2135 	sc->udbs_res = bus_alloc_resource_any(sc->dev, SYS_RES_MEMORY,
2136 	    &sc->udbs_rid, RF_ACTIVE);
2137 	if (sc->udbs_res == NULL) {
2138 		device_printf(sc->dev, "cannot map doorbell BAR.\n");
2139 		return (ENXIO);
2140 	}
2141 	sc->udbs_base = rman_get_virtual(sc->udbs_res);
2142 
2143 	if (chip_id(sc) >= CHELSIO_T5) {
2144 		setbit(&sc->doorbells, DOORBELL_UDB);
2145 #if defined(__i386__) || defined(__amd64__)
2146 		if (t5_write_combine) {
2147 			int rc, mode;
2148 
2149 			/*
2150 			 * Enable write combining on BAR2.  This is the
2151 			 * userspace doorbell BAR and is split into 128B
2152 			 * (UDBS_SEG_SIZE) doorbell regions, each associated
2153 			 * with an egress queue.  The first 64B has the doorbell
2154 			 * and the second 64B can be used to submit a tx work
2155 			 * request with an implicit doorbell.
2156 			 */
2157 
2158 			rc = pmap_change_attr((vm_offset_t)sc->udbs_base,
2159 			    rman_get_size(sc->udbs_res), PAT_WRITE_COMBINING);
2160 			if (rc == 0) {
2161 				clrbit(&sc->doorbells, DOORBELL_UDB);
2162 				setbit(&sc->doorbells, DOORBELL_WCWR);
2163 				setbit(&sc->doorbells, DOORBELL_UDBWC);
2164 			} else {
2165 				device_printf(sc->dev,
2166 				    "couldn't enable write combining: %d\n",
2167 				    rc);
2168 			}
2169 
2170 			mode = is_t5(sc) ? V_STATMODE(0) : V_T6_STATMODE(0);
2171 			t4_write_reg(sc, A_SGE_STAT_CFG,
2172 			    V_STATSOURCE_T5(7) | mode);
2173 		}
2174 #endif
2175 	}
2176 
2177 	return (0);
2178 }
2179 
2180 struct memwin_init {
2181 	uint32_t base;
2182 	uint32_t aperture;
2183 };
2184 
2185 static const struct memwin_init t4_memwin[NUM_MEMWIN] = {
2186 	{ MEMWIN0_BASE, MEMWIN0_APERTURE },
2187 	{ MEMWIN1_BASE, MEMWIN1_APERTURE },
2188 	{ MEMWIN2_BASE_T4, MEMWIN2_APERTURE_T4 }
2189 };
2190 
2191 static const struct memwin_init t5_memwin[NUM_MEMWIN] = {
2192 	{ MEMWIN0_BASE, MEMWIN0_APERTURE },
2193 	{ MEMWIN1_BASE, MEMWIN1_APERTURE },
2194 	{ MEMWIN2_BASE_T5, MEMWIN2_APERTURE_T5 },
2195 };
2196 
2197 static void
2198 setup_memwin(struct adapter *sc)
2199 {
2200 	const struct memwin_init *mw_init;
2201 	struct memwin *mw;
2202 	int i;
2203 	uint32_t bar0;
2204 
2205 	if (is_t4(sc)) {
2206 		/*
2207 		 * Read low 32b of bar0 indirectly via the hardware backdoor
2208 		 * mechanism.  Works from within PCI passthrough environments
2209 		 * too, where rman_get_start() can return a different value.  We
2210 		 * need to program the T4 memory window decoders with the actual
2211 		 * addresses that will be coming across the PCIe link.
2212 		 */
2213 		bar0 = t4_hw_pci_read_cfg4(sc, PCIR_BAR(0));
2214 		bar0 &= (uint32_t) PCIM_BAR_MEM_BASE;
2215 
2216 		mw_init = &t4_memwin[0];
2217 	} else {
2218 		/* T5+ use the relative offset inside the PCIe BAR */
2219 		bar0 = 0;
2220 
2221 		mw_init = &t5_memwin[0];
2222 	}
2223 
2224 	for (i = 0, mw = &sc->memwin[0]; i < NUM_MEMWIN; i++, mw_init++, mw++) {
2225 		rw_init(&mw->mw_lock, "memory window access");
2226 		mw->mw_base = mw_init->base;
2227 		mw->mw_aperture = mw_init->aperture;
2228 		mw->mw_curpos = 0;
2229 		t4_write_reg(sc,
2230 		    PCIE_MEM_ACCESS_REG(A_PCIE_MEM_ACCESS_BASE_WIN, i),
2231 		    (mw->mw_base + bar0) | V_BIR(0) |
2232 		    V_WINDOW(ilog2(mw->mw_aperture) - 10));
2233 		rw_wlock(&mw->mw_lock);
2234 		position_memwin(sc, i, 0);
2235 		rw_wunlock(&mw->mw_lock);
2236 	}
2237 
2238 	/* flush */
2239 	t4_read_reg(sc, PCIE_MEM_ACCESS_REG(A_PCIE_MEM_ACCESS_BASE_WIN, 2));
2240 }
2241 
2242 /*
2243  * Positions the memory window at the given address in the card's address space.
2244  * There are some alignment requirements and the actual position may be at an
2245  * address prior to the requested address.  mw->mw_curpos always has the actual
2246  * position of the window.
2247  */
2248 static void
2249 position_memwin(struct adapter *sc, int idx, uint32_t addr)
2250 {
2251 	struct memwin *mw;
2252 	uint32_t pf;
2253 	uint32_t reg;
2254 
2255 	MPASS(idx >= 0 && idx < NUM_MEMWIN);
2256 	mw = &sc->memwin[idx];
2257 	rw_assert(&mw->mw_lock, RA_WLOCKED);
2258 
2259 	if (is_t4(sc)) {
2260 		pf = 0;
2261 		mw->mw_curpos = addr & ~0xf;	/* start must be 16B aligned */
2262 	} else {
2263 		pf = V_PFNUM(sc->pf);
2264 		mw->mw_curpos = addr & ~0x7f;	/* start must be 128B aligned */
2265 	}
2266 	reg = PCIE_MEM_ACCESS_REG(A_PCIE_MEM_ACCESS_OFFSET, idx);
2267 	t4_write_reg(sc, reg, mw->mw_curpos | pf);
2268 	t4_read_reg(sc, reg);	/* flush */
2269 }
2270 
2271 static int
2272 rw_via_memwin(struct adapter *sc, int idx, uint32_t addr, uint32_t *val,
2273     int len, int rw)
2274 {
2275 	struct memwin *mw;
2276 	uint32_t mw_end, v;
2277 
2278 	MPASS(idx >= 0 && idx < NUM_MEMWIN);
2279 
2280 	/* Memory can only be accessed in naturally aligned 4 byte units */
2281 	if (addr & 3 || len & 3 || len <= 0)
2282 		return (EINVAL);
2283 
2284 	mw = &sc->memwin[idx];
2285 	while (len > 0) {
2286 		rw_rlock(&mw->mw_lock);
2287 		mw_end = mw->mw_curpos + mw->mw_aperture;
2288 		if (addr >= mw_end || addr < mw->mw_curpos) {
2289 			/* Will need to reposition the window */
2290 			if (!rw_try_upgrade(&mw->mw_lock)) {
2291 				rw_runlock(&mw->mw_lock);
2292 				rw_wlock(&mw->mw_lock);
2293 			}
2294 			rw_assert(&mw->mw_lock, RA_WLOCKED);
2295 			position_memwin(sc, idx, addr);
2296 			rw_downgrade(&mw->mw_lock);
2297 			mw_end = mw->mw_curpos + mw->mw_aperture;
2298 		}
2299 		rw_assert(&mw->mw_lock, RA_RLOCKED);
2300 		while (addr < mw_end && len > 0) {
2301 			if (rw == 0) {
2302 				v = t4_read_reg(sc, mw->mw_base + addr -
2303 				    mw->mw_curpos);
2304 				*val++ = le32toh(v);
2305 			} else {
2306 				v = *val++;
2307 				t4_write_reg(sc, mw->mw_base + addr -
2308 				    mw->mw_curpos, htole32(v));
2309 			}
2310 			addr += 4;
2311 			len -= 4;
2312 		}
2313 		rw_runlock(&mw->mw_lock);
2314 	}
2315 
2316 	return (0);
2317 }
2318 
2319 static inline int
2320 read_via_memwin(struct adapter *sc, int idx, uint32_t addr, uint32_t *val,
2321     int len)
2322 {
2323 
2324 	return (rw_via_memwin(sc, idx, addr, val, len, 0));
2325 }
2326 
2327 static inline int
2328 write_via_memwin(struct adapter *sc, int idx, uint32_t addr,
2329     const uint32_t *val, int len)
2330 {
2331 
2332 	return (rw_via_memwin(sc, idx, addr, (void *)(uintptr_t)val, len, 1));
2333 }
2334 
2335 static int
2336 t4_range_cmp(const void *a, const void *b)
2337 {
2338 	return ((const struct t4_range *)a)->start -
2339 	       ((const struct t4_range *)b)->start;
2340 }
2341 
2342 /*
2343  * Verify that the memory range specified by the addr/len pair is valid within
2344  * the card's address space.
2345  */
2346 static int
2347 validate_mem_range(struct adapter *sc, uint32_t addr, int len)
2348 {
2349 	struct t4_range mem_ranges[4], *r, *next;
2350 	uint32_t em, addr_len;
2351 	int i, n, remaining;
2352 
2353 	/* Memory can only be accessed in naturally aligned 4 byte units */
2354 	if (addr & 3 || len & 3 || len <= 0)
2355 		return (EINVAL);
2356 
2357 	/* Enabled memories */
2358 	em = t4_read_reg(sc, A_MA_TARGET_MEM_ENABLE);
2359 
2360 	r = &mem_ranges[0];
2361 	n = 0;
2362 	bzero(r, sizeof(mem_ranges));
2363 	if (em & F_EDRAM0_ENABLE) {
2364 		addr_len = t4_read_reg(sc, A_MA_EDRAM0_BAR);
2365 		r->size = G_EDRAM0_SIZE(addr_len) << 20;
2366 		if (r->size > 0) {
2367 			r->start = G_EDRAM0_BASE(addr_len) << 20;
2368 			if (addr >= r->start &&
2369 			    addr + len <= r->start + r->size)
2370 				return (0);
2371 			r++;
2372 			n++;
2373 		}
2374 	}
2375 	if (em & F_EDRAM1_ENABLE) {
2376 		addr_len = t4_read_reg(sc, A_MA_EDRAM1_BAR);
2377 		r->size = G_EDRAM1_SIZE(addr_len) << 20;
2378 		if (r->size > 0) {
2379 			r->start = G_EDRAM1_BASE(addr_len) << 20;
2380 			if (addr >= r->start &&
2381 			    addr + len <= r->start + r->size)
2382 				return (0);
2383 			r++;
2384 			n++;
2385 		}
2386 	}
2387 	if (em & F_EXT_MEM_ENABLE) {
2388 		addr_len = t4_read_reg(sc, A_MA_EXT_MEMORY_BAR);
2389 		r->size = G_EXT_MEM_SIZE(addr_len) << 20;
2390 		if (r->size > 0) {
2391 			r->start = G_EXT_MEM_BASE(addr_len) << 20;
2392 			if (addr >= r->start &&
2393 			    addr + len <= r->start + r->size)
2394 				return (0);
2395 			r++;
2396 			n++;
2397 		}
2398 	}
2399 	if (is_t5(sc) && em & F_EXT_MEM1_ENABLE) {
2400 		addr_len = t4_read_reg(sc, A_MA_EXT_MEMORY1_BAR);
2401 		r->size = G_EXT_MEM1_SIZE(addr_len) << 20;
2402 		if (r->size > 0) {
2403 			r->start = G_EXT_MEM1_BASE(addr_len) << 20;
2404 			if (addr >= r->start &&
2405 			    addr + len <= r->start + r->size)
2406 				return (0);
2407 			r++;
2408 			n++;
2409 		}
2410 	}
2411 	MPASS(n <= nitems(mem_ranges));
2412 
2413 	if (n > 1) {
2414 		/* Sort and merge the ranges. */
2415 		qsort(mem_ranges, n, sizeof(struct t4_range), t4_range_cmp);
2416 
2417 		/* Start from index 0 and examine the next n - 1 entries. */
2418 		r = &mem_ranges[0];
2419 		for (remaining = n - 1; remaining > 0; remaining--, r++) {
2420 
2421 			MPASS(r->size > 0);	/* r is a valid entry. */
2422 			next = r + 1;
2423 			MPASS(next->size > 0);	/* and so is the next one. */
2424 
2425 			while (r->start + r->size >= next->start) {
2426 				/* Merge the next one into the current entry. */
2427 				r->size = max(r->start + r->size,
2428 				    next->start + next->size) - r->start;
2429 				n--;	/* One fewer entry in total. */
2430 				if (--remaining == 0)
2431 					goto done;	/* short circuit */
2432 				next++;
2433 			}
2434 			if (next != r + 1) {
2435 				/*
2436 				 * Some entries were merged into r and next
2437 				 * points to the first valid entry that couldn't
2438 				 * be merged.
2439 				 */
2440 				MPASS(next->size > 0);	/* must be valid */
2441 				memcpy(r + 1, next, remaining * sizeof(*r));
2442 #ifdef INVARIANTS
2443 				/*
2444 				 * This so that the foo->size assertion in the
2445 				 * next iteration of the loop do the right
2446 				 * thing for entries that were pulled up and are
2447 				 * no longer valid.
2448 				 */
2449 				MPASS(n < nitems(mem_ranges));
2450 				bzero(&mem_ranges[n], (nitems(mem_ranges) - n) *
2451 				    sizeof(struct t4_range));
2452 #endif
2453 			}
2454 		}
2455 done:
2456 		/* Done merging the ranges. */
2457 		MPASS(n > 0);
2458 		r = &mem_ranges[0];
2459 		for (i = 0; i < n; i++, r++) {
2460 			if (addr >= r->start &&
2461 			    addr + len <= r->start + r->size)
2462 				return (0);
2463 		}
2464 	}
2465 
2466 	return (EFAULT);
2467 }
2468 
2469 static int
2470 fwmtype_to_hwmtype(int mtype)
2471 {
2472 
2473 	switch (mtype) {
2474 	case FW_MEMTYPE_EDC0:
2475 		return (MEM_EDC0);
2476 	case FW_MEMTYPE_EDC1:
2477 		return (MEM_EDC1);
2478 	case FW_MEMTYPE_EXTMEM:
2479 		return (MEM_MC0);
2480 	case FW_MEMTYPE_EXTMEM1:
2481 		return (MEM_MC1);
2482 	default:
2483 		panic("%s: cannot translate fw mtype %d.", __func__, mtype);
2484 	}
2485 }
2486 
2487 /*
2488  * Verify that the memory range specified by the memtype/offset/len pair is
2489  * valid and lies entirely within the memtype specified.  The global address of
2490  * the start of the range is returned in addr.
2491  */
2492 static int
2493 validate_mt_off_len(struct adapter *sc, int mtype, uint32_t off, int len,
2494     uint32_t *addr)
2495 {
2496 	uint32_t em, addr_len, maddr;
2497 
2498 	/* Memory can only be accessed in naturally aligned 4 byte units */
2499 	if (off & 3 || len & 3 || len == 0)
2500 		return (EINVAL);
2501 
2502 	em = t4_read_reg(sc, A_MA_TARGET_MEM_ENABLE);
2503 	switch (fwmtype_to_hwmtype(mtype)) {
2504 	case MEM_EDC0:
2505 		if (!(em & F_EDRAM0_ENABLE))
2506 			return (EINVAL);
2507 		addr_len = t4_read_reg(sc, A_MA_EDRAM0_BAR);
2508 		maddr = G_EDRAM0_BASE(addr_len) << 20;
2509 		break;
2510 	case MEM_EDC1:
2511 		if (!(em & F_EDRAM1_ENABLE))
2512 			return (EINVAL);
2513 		addr_len = t4_read_reg(sc, A_MA_EDRAM1_BAR);
2514 		maddr = G_EDRAM1_BASE(addr_len) << 20;
2515 		break;
2516 	case MEM_MC:
2517 		if (!(em & F_EXT_MEM_ENABLE))
2518 			return (EINVAL);
2519 		addr_len = t4_read_reg(sc, A_MA_EXT_MEMORY_BAR);
2520 		maddr = G_EXT_MEM_BASE(addr_len) << 20;
2521 		break;
2522 	case MEM_MC1:
2523 		if (!is_t5(sc) || !(em & F_EXT_MEM1_ENABLE))
2524 			return (EINVAL);
2525 		addr_len = t4_read_reg(sc, A_MA_EXT_MEMORY1_BAR);
2526 		maddr = G_EXT_MEM1_BASE(addr_len) << 20;
2527 		break;
2528 	default:
2529 		return (EINVAL);
2530 	}
2531 
2532 	*addr = maddr + off;	/* global address */
2533 	return (validate_mem_range(sc, *addr, len));
2534 }
2535 
2536 static int
2537 fixup_devlog_params(struct adapter *sc)
2538 {
2539 	struct devlog_params *dparams = &sc->params.devlog;
2540 	int rc;
2541 
2542 	rc = validate_mt_off_len(sc, dparams->memtype, dparams->start,
2543 	    dparams->size, &dparams->addr);
2544 
2545 	return (rc);
2546 }
2547 
2548 static int
2549 cfg_itype_and_nqueues(struct adapter *sc, int n10g, int n1g, int num_vis,
2550     struct intrs_and_queues *iaq)
2551 {
2552 	int rc, itype, navail, nrxq10g, nrxq1g, n;
2553 	int nofldrxq10g = 0, nofldrxq1g = 0;
2554 
2555 	bzero(iaq, sizeof(*iaq));
2556 
2557 	iaq->ntxq10g = t4_ntxq10g;
2558 	iaq->ntxq1g = t4_ntxq1g;
2559 	iaq->ntxq_vi = t4_ntxq_vi;
2560 	iaq->nrxq10g = nrxq10g = t4_nrxq10g;
2561 	iaq->nrxq1g = nrxq1g = t4_nrxq1g;
2562 	iaq->nrxq_vi = t4_nrxq_vi;
2563 	iaq->rsrv_noflowq = t4_rsrv_noflowq;
2564 #ifdef TCP_OFFLOAD
2565 	if (is_offload(sc)) {
2566 		iaq->nofldtxq10g = t4_nofldtxq10g;
2567 		iaq->nofldtxq1g = t4_nofldtxq1g;
2568 		iaq->nofldtxq_vi = t4_nofldtxq_vi;
2569 		iaq->nofldrxq10g = nofldrxq10g = t4_nofldrxq10g;
2570 		iaq->nofldrxq1g = nofldrxq1g = t4_nofldrxq1g;
2571 		iaq->nofldrxq_vi = t4_nofldrxq_vi;
2572 	}
2573 #endif
2574 #ifdef DEV_NETMAP
2575 	iaq->nnmtxq_vi = t4_nnmtxq_vi;
2576 	iaq->nnmrxq_vi = t4_nnmrxq_vi;
2577 #endif
2578 
2579 	for (itype = INTR_MSIX; itype; itype >>= 1) {
2580 
2581 		if ((itype & t4_intr_types) == 0)
2582 			continue;	/* not allowed */
2583 
2584 		if (itype == INTR_MSIX)
2585 			navail = pci_msix_count(sc->dev);
2586 		else if (itype == INTR_MSI)
2587 			navail = pci_msi_count(sc->dev);
2588 		else
2589 			navail = 1;
2590 restart:
2591 		if (navail == 0)
2592 			continue;
2593 
2594 		iaq->intr_type = itype;
2595 		iaq->intr_flags_10g = 0;
2596 		iaq->intr_flags_1g = 0;
2597 
2598 		/*
2599 		 * Best option: an interrupt vector for errors, one for the
2600 		 * firmware event queue, and one for every rxq (NIC and TOE) of
2601 		 * every VI.  The VIs that support netmap use the same
2602 		 * interrupts for the NIC rx queues and the netmap rx queues
2603 		 * because only one set of queues is active at a time.
2604 		 */
2605 		iaq->nirq = T4_EXTRA_INTR;
2606 		iaq->nirq += n10g * (nrxq10g + nofldrxq10g);
2607 		iaq->nirq += n1g * (nrxq1g + nofldrxq1g);
2608 		iaq->nirq += (n10g + n1g) * (num_vis - 1) *
2609 		    max(iaq->nrxq_vi, iaq->nnmrxq_vi);	/* See comment above. */
2610 		iaq->nirq += (n10g + n1g) * (num_vis - 1) * iaq->nofldrxq_vi;
2611 		if (iaq->nirq <= navail &&
2612 		    (itype != INTR_MSI || powerof2(iaq->nirq))) {
2613 			iaq->intr_flags_10g = INTR_ALL;
2614 			iaq->intr_flags_1g = INTR_ALL;
2615 			goto allocate;
2616 		}
2617 
2618 		/* Disable the VIs (and netmap) if there aren't enough intrs */
2619 		if (num_vis > 1) {
2620 			device_printf(sc->dev, "virtual interfaces disabled "
2621 			    "because num_vis=%u with current settings "
2622 			    "(nrxq10g=%u, nrxq1g=%u, nofldrxq10g=%u, "
2623 			    "nofldrxq1g=%u, nrxq_vi=%u nofldrxq_vi=%u, "
2624 			    "nnmrxq_vi=%u) would need %u interrupts but "
2625 			    "only %u are available.\n", num_vis, nrxq10g,
2626 			    nrxq1g, nofldrxq10g, nofldrxq1g, iaq->nrxq_vi,
2627 			    iaq->nofldrxq_vi, iaq->nnmrxq_vi, iaq->nirq,
2628 			    navail);
2629 			num_vis = 1;
2630 			iaq->ntxq_vi = iaq->nrxq_vi = 0;
2631 			iaq->nofldtxq_vi = iaq->nofldrxq_vi = 0;
2632 			iaq->nnmtxq_vi = iaq->nnmrxq_vi = 0;
2633 			goto restart;
2634 		}
2635 
2636 		/*
2637 		 * Second best option: a vector for errors, one for the firmware
2638 		 * event queue, and vectors for either all the NIC rx queues or
2639 		 * all the TOE rx queues.  The queues that don't get vectors
2640 		 * will forward their interrupts to those that do.
2641 		 */
2642 		iaq->nirq = T4_EXTRA_INTR;
2643 		if (nrxq10g >= nofldrxq10g) {
2644 			iaq->intr_flags_10g = INTR_RXQ;
2645 			iaq->nirq += n10g * nrxq10g;
2646 		} else {
2647 			iaq->intr_flags_10g = INTR_OFLD_RXQ;
2648 			iaq->nirq += n10g * nofldrxq10g;
2649 		}
2650 		if (nrxq1g >= nofldrxq1g) {
2651 			iaq->intr_flags_1g = INTR_RXQ;
2652 			iaq->nirq += n1g * nrxq1g;
2653 		} else {
2654 			iaq->intr_flags_1g = INTR_OFLD_RXQ;
2655 			iaq->nirq += n1g * nofldrxq1g;
2656 		}
2657 		if (iaq->nirq <= navail &&
2658 		    (itype != INTR_MSI || powerof2(iaq->nirq)))
2659 			goto allocate;
2660 
2661 		/*
2662 		 * Next best option: an interrupt vector for errors, one for the
2663 		 * firmware event queue, and at least one per main-VI.  At this
2664 		 * point we know we'll have to downsize nrxq and/or nofldrxq to
2665 		 * fit what's available to us.
2666 		 */
2667 		iaq->nirq = T4_EXTRA_INTR;
2668 		iaq->nirq += n10g + n1g;
2669 		if (iaq->nirq <= navail) {
2670 			int leftover = navail - iaq->nirq;
2671 
2672 			if (n10g > 0) {
2673 				int target = max(nrxq10g, nofldrxq10g);
2674 
2675 				iaq->intr_flags_10g = nrxq10g >= nofldrxq10g ?
2676 				    INTR_RXQ : INTR_OFLD_RXQ;
2677 
2678 				n = 1;
2679 				while (n < target && leftover >= n10g) {
2680 					leftover -= n10g;
2681 					iaq->nirq += n10g;
2682 					n++;
2683 				}
2684 				iaq->nrxq10g = min(n, nrxq10g);
2685 #ifdef TCP_OFFLOAD
2686 				iaq->nofldrxq10g = min(n, nofldrxq10g);
2687 #endif
2688 			}
2689 
2690 			if (n1g > 0) {
2691 				int target = max(nrxq1g, nofldrxq1g);
2692 
2693 				iaq->intr_flags_1g = nrxq1g >= nofldrxq1g ?
2694 				    INTR_RXQ : INTR_OFLD_RXQ;
2695 
2696 				n = 1;
2697 				while (n < target && leftover >= n1g) {
2698 					leftover -= n1g;
2699 					iaq->nirq += n1g;
2700 					n++;
2701 				}
2702 				iaq->nrxq1g = min(n, nrxq1g);
2703 #ifdef TCP_OFFLOAD
2704 				iaq->nofldrxq1g = min(n, nofldrxq1g);
2705 #endif
2706 			}
2707 
2708 			if (itype != INTR_MSI || powerof2(iaq->nirq))
2709 				goto allocate;
2710 		}
2711 
2712 		/*
2713 		 * Least desirable option: one interrupt vector for everything.
2714 		 */
2715 		iaq->nirq = iaq->nrxq10g = iaq->nrxq1g = 1;
2716 		iaq->intr_flags_10g = iaq->intr_flags_1g = 0;
2717 #ifdef TCP_OFFLOAD
2718 		if (is_offload(sc))
2719 			iaq->nofldrxq10g = iaq->nofldrxq1g = 1;
2720 #endif
2721 allocate:
2722 		navail = iaq->nirq;
2723 		rc = 0;
2724 		if (itype == INTR_MSIX)
2725 			rc = pci_alloc_msix(sc->dev, &navail);
2726 		else if (itype == INTR_MSI)
2727 			rc = pci_alloc_msi(sc->dev, &navail);
2728 
2729 		if (rc == 0) {
2730 			if (navail == iaq->nirq)
2731 				return (0);
2732 
2733 			/*
2734 			 * Didn't get the number requested.  Use whatever number
2735 			 * the kernel is willing to allocate (it's in navail).
2736 			 */
2737 			device_printf(sc->dev, "fewer vectors than requested, "
2738 			    "type=%d, req=%d, rcvd=%d; will downshift req.\n",
2739 			    itype, iaq->nirq, navail);
2740 			pci_release_msi(sc->dev);
2741 			goto restart;
2742 		}
2743 
2744 		device_printf(sc->dev,
2745 		    "failed to allocate vectors:%d, type=%d, req=%d, rcvd=%d\n",
2746 		    itype, rc, iaq->nirq, navail);
2747 	}
2748 
2749 	device_printf(sc->dev,
2750 	    "failed to find a usable interrupt type.  "
2751 	    "allowed=%d, msi-x=%d, msi=%d, intx=1", t4_intr_types,
2752 	    pci_msix_count(sc->dev), pci_msi_count(sc->dev));
2753 
2754 	return (ENXIO);
2755 }
2756 
2757 #define FW_VERSION(chip) ( \
2758     V_FW_HDR_FW_VER_MAJOR(chip##FW_VERSION_MAJOR) | \
2759     V_FW_HDR_FW_VER_MINOR(chip##FW_VERSION_MINOR) | \
2760     V_FW_HDR_FW_VER_MICRO(chip##FW_VERSION_MICRO) | \
2761     V_FW_HDR_FW_VER_BUILD(chip##FW_VERSION_BUILD))
2762 #define FW_INTFVER(chip, intf) (chip##FW_HDR_INTFVER_##intf)
2763 
2764 struct fw_info {
2765 	uint8_t chip;
2766 	char *kld_name;
2767 	char *fw_mod_name;
2768 	struct fw_hdr fw_hdr;	/* XXX: waste of space, need a sparse struct */
2769 } fw_info[] = {
2770 	{
2771 		.chip = CHELSIO_T4,
2772 		.kld_name = "t4fw_cfg",
2773 		.fw_mod_name = "t4fw",
2774 		.fw_hdr = {
2775 			.chip = FW_HDR_CHIP_T4,
2776 			.fw_ver = htobe32_const(FW_VERSION(T4)),
2777 			.intfver_nic = FW_INTFVER(T4, NIC),
2778 			.intfver_vnic = FW_INTFVER(T4, VNIC),
2779 			.intfver_ofld = FW_INTFVER(T4, OFLD),
2780 			.intfver_ri = FW_INTFVER(T4, RI),
2781 			.intfver_iscsipdu = FW_INTFVER(T4, ISCSIPDU),
2782 			.intfver_iscsi = FW_INTFVER(T4, ISCSI),
2783 			.intfver_fcoepdu = FW_INTFVER(T4, FCOEPDU),
2784 			.intfver_fcoe = FW_INTFVER(T4, FCOE),
2785 		},
2786 	}, {
2787 		.chip = CHELSIO_T5,
2788 		.kld_name = "t5fw_cfg",
2789 		.fw_mod_name = "t5fw",
2790 		.fw_hdr = {
2791 			.chip = FW_HDR_CHIP_T5,
2792 			.fw_ver = htobe32_const(FW_VERSION(T5)),
2793 			.intfver_nic = FW_INTFVER(T5, NIC),
2794 			.intfver_vnic = FW_INTFVER(T5, VNIC),
2795 			.intfver_ofld = FW_INTFVER(T5, OFLD),
2796 			.intfver_ri = FW_INTFVER(T5, RI),
2797 			.intfver_iscsipdu = FW_INTFVER(T5, ISCSIPDU),
2798 			.intfver_iscsi = FW_INTFVER(T5, ISCSI),
2799 			.intfver_fcoepdu = FW_INTFVER(T5, FCOEPDU),
2800 			.intfver_fcoe = FW_INTFVER(T5, FCOE),
2801 		},
2802 	}, {
2803 		.chip = CHELSIO_T6,
2804 		.kld_name = "t6fw_cfg",
2805 		.fw_mod_name = "t6fw",
2806 		.fw_hdr = {
2807 			.chip = FW_HDR_CHIP_T6,
2808 			.fw_ver = htobe32_const(FW_VERSION(T6)),
2809 			.intfver_nic = FW_INTFVER(T6, NIC),
2810 			.intfver_vnic = FW_INTFVER(T6, VNIC),
2811 			.intfver_ofld = FW_INTFVER(T6, OFLD),
2812 			.intfver_ri = FW_INTFVER(T6, RI),
2813 			.intfver_iscsipdu = FW_INTFVER(T6, ISCSIPDU),
2814 			.intfver_iscsi = FW_INTFVER(T6, ISCSI),
2815 			.intfver_fcoepdu = FW_INTFVER(T6, FCOEPDU),
2816 			.intfver_fcoe = FW_INTFVER(T6, FCOE),
2817 		},
2818 	}
2819 };
2820 
2821 static struct fw_info *
2822 find_fw_info(int chip)
2823 {
2824 	int i;
2825 
2826 	for (i = 0; i < nitems(fw_info); i++) {
2827 		if (fw_info[i].chip == chip)
2828 			return (&fw_info[i]);
2829 	}
2830 	return (NULL);
2831 }
2832 
2833 /*
2834  * Is the given firmware API compatible with the one the driver was compiled
2835  * with?
2836  */
2837 static int
2838 fw_compatible(const struct fw_hdr *hdr1, const struct fw_hdr *hdr2)
2839 {
2840 
2841 	/* short circuit if it's the exact same firmware version */
2842 	if (hdr1->chip == hdr2->chip && hdr1->fw_ver == hdr2->fw_ver)
2843 		return (1);
2844 
2845 	/*
2846 	 * XXX: Is this too conservative?  Perhaps I should limit this to the
2847 	 * features that are supported in the driver.
2848 	 */
2849 #define SAME_INTF(x) (hdr1->intfver_##x == hdr2->intfver_##x)
2850 	if (hdr1->chip == hdr2->chip && SAME_INTF(nic) && SAME_INTF(vnic) &&
2851 	    SAME_INTF(ofld) && SAME_INTF(ri) && SAME_INTF(iscsipdu) &&
2852 	    SAME_INTF(iscsi) && SAME_INTF(fcoepdu) && SAME_INTF(fcoe))
2853 		return (1);
2854 #undef SAME_INTF
2855 
2856 	return (0);
2857 }
2858 
2859 /*
2860  * The firmware in the KLD is usable, but should it be installed?  This routine
2861  * explains itself in detail if it indicates the KLD firmware should be
2862  * installed.
2863  */
2864 static int
2865 should_install_kld_fw(struct adapter *sc, int card_fw_usable, int k, int c)
2866 {
2867 	const char *reason;
2868 
2869 	if (!card_fw_usable) {
2870 		reason = "incompatible or unusable";
2871 		goto install;
2872 	}
2873 
2874 	if (k > c) {
2875 		reason = "older than the version bundled with this driver";
2876 		goto install;
2877 	}
2878 
2879 	if (t4_fw_install == 2 && k != c) {
2880 		reason = "different than the version bundled with this driver";
2881 		goto install;
2882 	}
2883 
2884 	return (0);
2885 
2886 install:
2887 	if (t4_fw_install == 0) {
2888 		device_printf(sc->dev, "firmware on card (%u.%u.%u.%u) is %s, "
2889 		    "but the driver is prohibited from installing a different "
2890 		    "firmware on the card.\n",
2891 		    G_FW_HDR_FW_VER_MAJOR(c), G_FW_HDR_FW_VER_MINOR(c),
2892 		    G_FW_HDR_FW_VER_MICRO(c), G_FW_HDR_FW_VER_BUILD(c), reason);
2893 
2894 		return (0);
2895 	}
2896 
2897 	device_printf(sc->dev, "firmware on card (%u.%u.%u.%u) is %s, "
2898 	    "installing firmware %u.%u.%u.%u on card.\n",
2899 	    G_FW_HDR_FW_VER_MAJOR(c), G_FW_HDR_FW_VER_MINOR(c),
2900 	    G_FW_HDR_FW_VER_MICRO(c), G_FW_HDR_FW_VER_BUILD(c), reason,
2901 	    G_FW_HDR_FW_VER_MAJOR(k), G_FW_HDR_FW_VER_MINOR(k),
2902 	    G_FW_HDR_FW_VER_MICRO(k), G_FW_HDR_FW_VER_BUILD(k));
2903 
2904 	return (1);
2905 }
2906 /*
2907  * Establish contact with the firmware and determine if we are the master driver
2908  * or not, and whether we are responsible for chip initialization.
2909  */
2910 static int
2911 prep_firmware(struct adapter *sc)
2912 {
2913 	const struct firmware *fw = NULL, *default_cfg;
2914 	int rc, pf, card_fw_usable, kld_fw_usable, need_fw_reset = 1;
2915 	enum dev_state state;
2916 	struct fw_info *fw_info;
2917 	struct fw_hdr *card_fw;		/* fw on the card */
2918 	const struct fw_hdr *kld_fw;	/* fw in the KLD */
2919 	const struct fw_hdr *drv_fw;	/* fw header the driver was compiled
2920 					   against */
2921 
2922 	/* Contact firmware. */
2923 	rc = t4_fw_hello(sc, sc->mbox, sc->mbox, MASTER_MAY, &state);
2924 	if (rc < 0 || state == DEV_STATE_ERR) {
2925 		rc = -rc;
2926 		device_printf(sc->dev,
2927 		    "failed to connect to the firmware: %d, %d.\n", rc, state);
2928 		return (rc);
2929 	}
2930 	pf = rc;
2931 	if (pf == sc->mbox)
2932 		sc->flags |= MASTER_PF;
2933 	else if (state == DEV_STATE_UNINIT) {
2934 		/*
2935 		 * We didn't get to be the master so we definitely won't be
2936 		 * configuring the chip.  It's a bug if someone else hasn't
2937 		 * configured it already.
2938 		 */
2939 		device_printf(sc->dev, "couldn't be master(%d), "
2940 		    "device not already initialized either(%d).\n", rc, state);
2941 		return (EDOOFUS);
2942 	}
2943 
2944 	/* This is the firmware whose headers the driver was compiled against */
2945 	fw_info = find_fw_info(chip_id(sc));
2946 	if (fw_info == NULL) {
2947 		device_printf(sc->dev,
2948 		    "unable to look up firmware information for chip %d.\n",
2949 		    chip_id(sc));
2950 		return (EINVAL);
2951 	}
2952 	drv_fw = &fw_info->fw_hdr;
2953 
2954 	/*
2955 	 * The firmware KLD contains many modules.  The KLD name is also the
2956 	 * name of the module that contains the default config file.
2957 	 */
2958 	default_cfg = firmware_get(fw_info->kld_name);
2959 
2960 	/* Read the header of the firmware on the card */
2961 	card_fw = malloc(sizeof(*card_fw), M_CXGBE, M_ZERO | M_WAITOK);
2962 	rc = -t4_read_flash(sc, FLASH_FW_START,
2963 	    sizeof (*card_fw) / sizeof (uint32_t), (uint32_t *)card_fw, 1);
2964 	if (rc == 0)
2965 		card_fw_usable = fw_compatible(drv_fw, (const void*)card_fw);
2966 	else {
2967 		device_printf(sc->dev,
2968 		    "Unable to read card's firmware header: %d\n", rc);
2969 		card_fw_usable = 0;
2970 	}
2971 
2972 	/* This is the firmware in the KLD */
2973 	fw = firmware_get(fw_info->fw_mod_name);
2974 	if (fw != NULL) {
2975 		kld_fw = (const void *)fw->data;
2976 		kld_fw_usable = fw_compatible(drv_fw, kld_fw);
2977 	} else {
2978 		kld_fw = NULL;
2979 		kld_fw_usable = 0;
2980 	}
2981 
2982 	if (card_fw_usable && card_fw->fw_ver == drv_fw->fw_ver &&
2983 	    (!kld_fw_usable || kld_fw->fw_ver == drv_fw->fw_ver)) {
2984 		/*
2985 		 * Common case: the firmware on the card is an exact match and
2986 		 * the KLD is an exact match too, or the KLD is
2987 		 * absent/incompatible.  Note that t4_fw_install = 2 is ignored
2988 		 * here -- use cxgbetool loadfw if you want to reinstall the
2989 		 * same firmware as the one on the card.
2990 		 */
2991 	} else if (kld_fw_usable && state == DEV_STATE_UNINIT &&
2992 	    should_install_kld_fw(sc, card_fw_usable, be32toh(kld_fw->fw_ver),
2993 	    be32toh(card_fw->fw_ver))) {
2994 
2995 		rc = -t4_fw_upgrade(sc, sc->mbox, fw->data, fw->datasize, 0);
2996 		if (rc != 0) {
2997 			device_printf(sc->dev,
2998 			    "failed to install firmware: %d\n", rc);
2999 			goto done;
3000 		}
3001 
3002 		/* Installed successfully, update the cached header too. */
3003 		memcpy(card_fw, kld_fw, sizeof(*card_fw));
3004 		card_fw_usable = 1;
3005 		need_fw_reset = 0;	/* already reset as part of load_fw */
3006 	}
3007 
3008 	if (!card_fw_usable) {
3009 		uint32_t d, c, k;
3010 
3011 		d = ntohl(drv_fw->fw_ver);
3012 		c = ntohl(card_fw->fw_ver);
3013 		k = kld_fw ? ntohl(kld_fw->fw_ver) : 0;
3014 
3015 		device_printf(sc->dev, "Cannot find a usable firmware: "
3016 		    "fw_install %d, chip state %d, "
3017 		    "driver compiled with %d.%d.%d.%d, "
3018 		    "card has %d.%d.%d.%d, KLD has %d.%d.%d.%d\n",
3019 		    t4_fw_install, state,
3020 		    G_FW_HDR_FW_VER_MAJOR(d), G_FW_HDR_FW_VER_MINOR(d),
3021 		    G_FW_HDR_FW_VER_MICRO(d), G_FW_HDR_FW_VER_BUILD(d),
3022 		    G_FW_HDR_FW_VER_MAJOR(c), G_FW_HDR_FW_VER_MINOR(c),
3023 		    G_FW_HDR_FW_VER_MICRO(c), G_FW_HDR_FW_VER_BUILD(c),
3024 		    G_FW_HDR_FW_VER_MAJOR(k), G_FW_HDR_FW_VER_MINOR(k),
3025 		    G_FW_HDR_FW_VER_MICRO(k), G_FW_HDR_FW_VER_BUILD(k));
3026 		rc = EINVAL;
3027 		goto done;
3028 	}
3029 
3030 	/* Reset device */
3031 	if (need_fw_reset &&
3032 	    (rc = -t4_fw_reset(sc, sc->mbox, F_PIORSTMODE | F_PIORST)) != 0) {
3033 		device_printf(sc->dev, "firmware reset failed: %d.\n", rc);
3034 		if (rc != ETIMEDOUT && rc != EIO)
3035 			t4_fw_bye(sc, sc->mbox);
3036 		goto done;
3037 	}
3038 	sc->flags |= FW_OK;
3039 
3040 	rc = get_params__pre_init(sc);
3041 	if (rc != 0)
3042 		goto done; /* error message displayed already */
3043 
3044 	/* Partition adapter resources as specified in the config file. */
3045 	if (state == DEV_STATE_UNINIT) {
3046 
3047 		KASSERT(sc->flags & MASTER_PF,
3048 		    ("%s: trying to change chip settings when not master.",
3049 		    __func__));
3050 
3051 		rc = partition_resources(sc, default_cfg, fw_info->kld_name);
3052 		if (rc != 0)
3053 			goto done;	/* error message displayed already */
3054 
3055 		t4_tweak_chip_settings(sc);
3056 
3057 		/* get basic stuff going */
3058 		rc = -t4_fw_initialize(sc, sc->mbox);
3059 		if (rc != 0) {
3060 			device_printf(sc->dev, "fw init failed: %d.\n", rc);
3061 			goto done;
3062 		}
3063 	} else {
3064 		snprintf(sc->cfg_file, sizeof(sc->cfg_file), "pf%d", pf);
3065 		sc->cfcsum = 0;
3066 	}
3067 
3068 done:
3069 	free(card_fw, M_CXGBE);
3070 	if (fw != NULL)
3071 		firmware_put(fw, FIRMWARE_UNLOAD);
3072 	if (default_cfg != NULL)
3073 		firmware_put(default_cfg, FIRMWARE_UNLOAD);
3074 
3075 	return (rc);
3076 }
3077 
3078 #define FW_PARAM_DEV(param) \
3079 	(V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) | \
3080 	 V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_##param))
3081 #define FW_PARAM_PFVF(param) \
3082 	(V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_PFVF) | \
3083 	 V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_PFVF_##param))
3084 
3085 /*
3086  * Partition chip resources for use between various PFs, VFs, etc.
3087  */
3088 static int
3089 partition_resources(struct adapter *sc, const struct firmware *default_cfg,
3090     const char *name_prefix)
3091 {
3092 	const struct firmware *cfg = NULL;
3093 	int rc = 0;
3094 	struct fw_caps_config_cmd caps;
3095 	uint32_t mtype, moff, finicsum, cfcsum;
3096 
3097 	/*
3098 	 * Figure out what configuration file to use.  Pick the default config
3099 	 * file for the card if the user hasn't specified one explicitly.
3100 	 */
3101 	snprintf(sc->cfg_file, sizeof(sc->cfg_file), "%s", t4_cfg_file);
3102 	if (strncmp(t4_cfg_file, DEFAULT_CF, sizeof(t4_cfg_file)) == 0) {
3103 		/* Card specific overrides go here. */
3104 		if (pci_get_device(sc->dev) == 0x440a)
3105 			snprintf(sc->cfg_file, sizeof(sc->cfg_file), UWIRE_CF);
3106 		if (is_fpga(sc))
3107 			snprintf(sc->cfg_file, sizeof(sc->cfg_file), FPGA_CF);
3108 	}
3109 
3110 	/*
3111 	 * We need to load another module if the profile is anything except
3112 	 * "default" or "flash".
3113 	 */
3114 	if (strncmp(sc->cfg_file, DEFAULT_CF, sizeof(sc->cfg_file)) != 0 &&
3115 	    strncmp(sc->cfg_file, FLASH_CF, sizeof(sc->cfg_file)) != 0) {
3116 		char s[32];
3117 
3118 		snprintf(s, sizeof(s), "%s_%s", name_prefix, sc->cfg_file);
3119 		cfg = firmware_get(s);
3120 		if (cfg == NULL) {
3121 			if (default_cfg != NULL) {
3122 				device_printf(sc->dev,
3123 				    "unable to load module \"%s\" for "
3124 				    "configuration profile \"%s\", will use "
3125 				    "the default config file instead.\n",
3126 				    s, sc->cfg_file);
3127 				snprintf(sc->cfg_file, sizeof(sc->cfg_file),
3128 				    "%s", DEFAULT_CF);
3129 			} else {
3130 				device_printf(sc->dev,
3131 				    "unable to load module \"%s\" for "
3132 				    "configuration profile \"%s\", will use "
3133 				    "the config file on the card's flash "
3134 				    "instead.\n", s, sc->cfg_file);
3135 				snprintf(sc->cfg_file, sizeof(sc->cfg_file),
3136 				    "%s", FLASH_CF);
3137 			}
3138 		}
3139 	}
3140 
3141 	if (strncmp(sc->cfg_file, DEFAULT_CF, sizeof(sc->cfg_file)) == 0 &&
3142 	    default_cfg == NULL) {
3143 		device_printf(sc->dev,
3144 		    "default config file not available, will use the config "
3145 		    "file on the card's flash instead.\n");
3146 		snprintf(sc->cfg_file, sizeof(sc->cfg_file), "%s", FLASH_CF);
3147 	}
3148 
3149 	if (strncmp(sc->cfg_file, FLASH_CF, sizeof(sc->cfg_file)) != 0) {
3150 		u_int cflen;
3151 		const uint32_t *cfdata;
3152 		uint32_t param, val, addr;
3153 
3154 		KASSERT(cfg != NULL || default_cfg != NULL,
3155 		    ("%s: no config to upload", __func__));
3156 
3157 		/*
3158 		 * Ask the firmware where it wants us to upload the config file.
3159 		 */
3160 		param = FW_PARAM_DEV(CF);
3161 		rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 1, &param, &val);
3162 		if (rc != 0) {
3163 			/* No support for config file?  Shouldn't happen. */
3164 			device_printf(sc->dev,
3165 			    "failed to query config file location: %d.\n", rc);
3166 			goto done;
3167 		}
3168 		mtype = G_FW_PARAMS_PARAM_Y(val);
3169 		moff = G_FW_PARAMS_PARAM_Z(val) << 16;
3170 
3171 		/*
3172 		 * XXX: sheer laziness.  We deliberately added 4 bytes of
3173 		 * useless stuffing/comments at the end of the config file so
3174 		 * it's ok to simply throw away the last remaining bytes when
3175 		 * the config file is not an exact multiple of 4.  This also
3176 		 * helps with the validate_mt_off_len check.
3177 		 */
3178 		if (cfg != NULL) {
3179 			cflen = cfg->datasize & ~3;
3180 			cfdata = cfg->data;
3181 		} else {
3182 			cflen = default_cfg->datasize & ~3;
3183 			cfdata = default_cfg->data;
3184 		}
3185 
3186 		if (cflen > FLASH_CFG_MAX_SIZE) {
3187 			device_printf(sc->dev,
3188 			    "config file too long (%d, max allowed is %d).  "
3189 			    "Will try to use the config on the card, if any.\n",
3190 			    cflen, FLASH_CFG_MAX_SIZE);
3191 			goto use_config_on_flash;
3192 		}
3193 
3194 		rc = validate_mt_off_len(sc, mtype, moff, cflen, &addr);
3195 		if (rc != 0) {
3196 			device_printf(sc->dev,
3197 			    "%s: addr (%d/0x%x) or len %d is not valid: %d.  "
3198 			    "Will try to use the config on the card, if any.\n",
3199 			    __func__, mtype, moff, cflen, rc);
3200 			goto use_config_on_flash;
3201 		}
3202 		write_via_memwin(sc, 2, addr, cfdata, cflen);
3203 	} else {
3204 use_config_on_flash:
3205 		mtype = FW_MEMTYPE_FLASH;
3206 		moff = t4_flash_cfg_addr(sc);
3207 	}
3208 
3209 	bzero(&caps, sizeof(caps));
3210 	caps.op_to_write = htobe32(V_FW_CMD_OP(FW_CAPS_CONFIG_CMD) |
3211 	    F_FW_CMD_REQUEST | F_FW_CMD_READ);
3212 	caps.cfvalid_to_len16 = htobe32(F_FW_CAPS_CONFIG_CMD_CFVALID |
3213 	    V_FW_CAPS_CONFIG_CMD_MEMTYPE_CF(mtype) |
3214 	    V_FW_CAPS_CONFIG_CMD_MEMADDR64K_CF(moff >> 16) | FW_LEN16(caps));
3215 	rc = -t4_wr_mbox(sc, sc->mbox, &caps, sizeof(caps), &caps);
3216 	if (rc != 0) {
3217 		device_printf(sc->dev,
3218 		    "failed to pre-process config file: %d "
3219 		    "(mtype %d, moff 0x%x).\n", rc, mtype, moff);
3220 		goto done;
3221 	}
3222 
3223 	finicsum = be32toh(caps.finicsum);
3224 	cfcsum = be32toh(caps.cfcsum);
3225 	if (finicsum != cfcsum) {
3226 		device_printf(sc->dev,
3227 		    "WARNING: config file checksum mismatch: %08x %08x\n",
3228 		    finicsum, cfcsum);
3229 	}
3230 	sc->cfcsum = cfcsum;
3231 
3232 #define LIMIT_CAPS(x) do { \
3233 	caps.x &= htobe16(t4_##x##_allowed); \
3234 } while (0)
3235 
3236 	/*
3237 	 * Let the firmware know what features will (not) be used so it can tune
3238 	 * things accordingly.
3239 	 */
3240 	LIMIT_CAPS(nbmcaps);
3241 	LIMIT_CAPS(linkcaps);
3242 	LIMIT_CAPS(switchcaps);
3243 	LIMIT_CAPS(niccaps);
3244 	LIMIT_CAPS(toecaps);
3245 	LIMIT_CAPS(rdmacaps);
3246 	LIMIT_CAPS(cryptocaps);
3247 	LIMIT_CAPS(iscsicaps);
3248 	LIMIT_CAPS(fcoecaps);
3249 #undef LIMIT_CAPS
3250 
3251 	caps.op_to_write = htobe32(V_FW_CMD_OP(FW_CAPS_CONFIG_CMD) |
3252 	    F_FW_CMD_REQUEST | F_FW_CMD_WRITE);
3253 	caps.cfvalid_to_len16 = htobe32(FW_LEN16(caps));
3254 	rc = -t4_wr_mbox(sc, sc->mbox, &caps, sizeof(caps), NULL);
3255 	if (rc != 0) {
3256 		device_printf(sc->dev,
3257 		    "failed to process config file: %d.\n", rc);
3258 	}
3259 done:
3260 	if (cfg != NULL)
3261 		firmware_put(cfg, FIRMWARE_UNLOAD);
3262 	return (rc);
3263 }
3264 
3265 /*
3266  * Retrieve parameters that are needed (or nice to have) very early.
3267  */
3268 static int
3269 get_params__pre_init(struct adapter *sc)
3270 {
3271 	int rc;
3272 	uint32_t param[2], val[2];
3273 
3274 	t4_get_version_info(sc);
3275 
3276 	snprintf(sc->fw_version, sizeof(sc->fw_version), "%u.%u.%u.%u",
3277 	    G_FW_HDR_FW_VER_MAJOR(sc->params.fw_vers),
3278 	    G_FW_HDR_FW_VER_MINOR(sc->params.fw_vers),
3279 	    G_FW_HDR_FW_VER_MICRO(sc->params.fw_vers),
3280 	    G_FW_HDR_FW_VER_BUILD(sc->params.fw_vers));
3281 
3282 	snprintf(sc->bs_version, sizeof(sc->bs_version), "%u.%u.%u.%u",
3283 	    G_FW_HDR_FW_VER_MAJOR(sc->params.bs_vers),
3284 	    G_FW_HDR_FW_VER_MINOR(sc->params.bs_vers),
3285 	    G_FW_HDR_FW_VER_MICRO(sc->params.bs_vers),
3286 	    G_FW_HDR_FW_VER_BUILD(sc->params.bs_vers));
3287 
3288 	snprintf(sc->tp_version, sizeof(sc->tp_version), "%u.%u.%u.%u",
3289 	    G_FW_HDR_FW_VER_MAJOR(sc->params.tp_vers),
3290 	    G_FW_HDR_FW_VER_MINOR(sc->params.tp_vers),
3291 	    G_FW_HDR_FW_VER_MICRO(sc->params.tp_vers),
3292 	    G_FW_HDR_FW_VER_BUILD(sc->params.tp_vers));
3293 
3294 	snprintf(sc->er_version, sizeof(sc->er_version), "%u.%u.%u.%u",
3295 	    G_FW_HDR_FW_VER_MAJOR(sc->params.er_vers),
3296 	    G_FW_HDR_FW_VER_MINOR(sc->params.er_vers),
3297 	    G_FW_HDR_FW_VER_MICRO(sc->params.er_vers),
3298 	    G_FW_HDR_FW_VER_BUILD(sc->params.er_vers));
3299 
3300 	param[0] = FW_PARAM_DEV(PORTVEC);
3301 	param[1] = FW_PARAM_DEV(CCLK);
3302 	rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 2, param, val);
3303 	if (rc != 0) {
3304 		device_printf(sc->dev,
3305 		    "failed to query parameters (pre_init): %d.\n", rc);
3306 		return (rc);
3307 	}
3308 
3309 	sc->params.portvec = val[0];
3310 	sc->params.nports = bitcount32(val[0]);
3311 	sc->params.vpd.cclk = val[1];
3312 
3313 	/* Read device log parameters. */
3314 	rc = -t4_init_devlog_params(sc, 1);
3315 	if (rc == 0)
3316 		fixup_devlog_params(sc);
3317 	else {
3318 		device_printf(sc->dev,
3319 		    "failed to get devlog parameters: %d.\n", rc);
3320 		rc = 0;	/* devlog isn't critical for device operation */
3321 	}
3322 
3323 	return (rc);
3324 }
3325 
3326 /*
3327  * Retrieve various parameters that are of interest to the driver.  The device
3328  * has been initialized by the firmware at this point.
3329  */
3330 static int
3331 get_params__post_init(struct adapter *sc)
3332 {
3333 	int rc;
3334 	uint32_t param[7], val[7];
3335 	struct fw_caps_config_cmd caps;
3336 
3337 	param[0] = FW_PARAM_PFVF(IQFLINT_START);
3338 	param[1] = FW_PARAM_PFVF(EQ_START);
3339 	param[2] = FW_PARAM_PFVF(FILTER_START);
3340 	param[3] = FW_PARAM_PFVF(FILTER_END);
3341 	param[4] = FW_PARAM_PFVF(L2T_START);
3342 	param[5] = FW_PARAM_PFVF(L2T_END);
3343 	rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 6, param, val);
3344 	if (rc != 0) {
3345 		device_printf(sc->dev,
3346 		    "failed to query parameters (post_init): %d.\n", rc);
3347 		return (rc);
3348 	}
3349 
3350 	sc->sge.iq_start = val[0];
3351 	sc->sge.eq_start = val[1];
3352 	sc->tids.ftid_base = val[2];
3353 	sc->tids.nftids = val[3] - val[2] + 1;
3354 	sc->params.ftid_min = val[2];
3355 	sc->params.ftid_max = val[3];
3356 	sc->vres.l2t.start = val[4];
3357 	sc->vres.l2t.size = val[5] - val[4] + 1;
3358 	KASSERT(sc->vres.l2t.size <= L2T_SIZE,
3359 	    ("%s: L2 table size (%u) larger than expected (%u)",
3360 	    __func__, sc->vres.l2t.size, L2T_SIZE));
3361 
3362 	/* get capabilites */
3363 	bzero(&caps, sizeof(caps));
3364 	caps.op_to_write = htobe32(V_FW_CMD_OP(FW_CAPS_CONFIG_CMD) |
3365 	    F_FW_CMD_REQUEST | F_FW_CMD_READ);
3366 	caps.cfvalid_to_len16 = htobe32(FW_LEN16(caps));
3367 	rc = -t4_wr_mbox(sc, sc->mbox, &caps, sizeof(caps), &caps);
3368 	if (rc != 0) {
3369 		device_printf(sc->dev,
3370 		    "failed to get card capabilities: %d.\n", rc);
3371 		return (rc);
3372 	}
3373 
3374 #define READ_CAPS(x) do { \
3375 	sc->x = htobe16(caps.x); \
3376 } while (0)
3377 	READ_CAPS(nbmcaps);
3378 	READ_CAPS(linkcaps);
3379 	READ_CAPS(switchcaps);
3380 	READ_CAPS(niccaps);
3381 	READ_CAPS(toecaps);
3382 	READ_CAPS(rdmacaps);
3383 	READ_CAPS(cryptocaps);
3384 	READ_CAPS(iscsicaps);
3385 	READ_CAPS(fcoecaps);
3386 
3387 	if (sc->niccaps & FW_CAPS_CONFIG_NIC_ETHOFLD) {
3388 		param[0] = FW_PARAM_PFVF(ETHOFLD_START);
3389 		param[1] = FW_PARAM_PFVF(ETHOFLD_END);
3390 		param[2] = FW_PARAM_DEV(FLOWC_BUFFIFO_SZ);
3391 		rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 3, param, val);
3392 		if (rc != 0) {
3393 			device_printf(sc->dev,
3394 			    "failed to query NIC parameters: %d.\n", rc);
3395 			return (rc);
3396 		}
3397 		sc->tids.etid_base = val[0];
3398 		sc->params.etid_min = val[0];
3399 		sc->tids.netids = val[1] - val[0] + 1;
3400 		sc->params.netids = sc->tids.netids;
3401 		sc->params.eo_wr_cred = val[2];
3402 		sc->params.ethoffload = 1;
3403 	}
3404 
3405 	if (sc->toecaps) {
3406 		/* query offload-related parameters */
3407 		param[0] = FW_PARAM_DEV(NTID);
3408 		param[1] = FW_PARAM_PFVF(SERVER_START);
3409 		param[2] = FW_PARAM_PFVF(SERVER_END);
3410 		param[3] = FW_PARAM_PFVF(TDDP_START);
3411 		param[4] = FW_PARAM_PFVF(TDDP_END);
3412 		param[5] = FW_PARAM_DEV(FLOWC_BUFFIFO_SZ);
3413 		rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 6, param, val);
3414 		if (rc != 0) {
3415 			device_printf(sc->dev,
3416 			    "failed to query TOE parameters: %d.\n", rc);
3417 			return (rc);
3418 		}
3419 		sc->tids.ntids = val[0];
3420 		sc->tids.natids = min(sc->tids.ntids / 2, MAX_ATIDS);
3421 		sc->tids.stid_base = val[1];
3422 		sc->tids.nstids = val[2] - val[1] + 1;
3423 		sc->vres.ddp.start = val[3];
3424 		sc->vres.ddp.size = val[4] - val[3] + 1;
3425 		sc->params.ofldq_wr_cred = val[5];
3426 		sc->params.offload = 1;
3427 	}
3428 	if (sc->rdmacaps) {
3429 		param[0] = FW_PARAM_PFVF(STAG_START);
3430 		param[1] = FW_PARAM_PFVF(STAG_END);
3431 		param[2] = FW_PARAM_PFVF(RQ_START);
3432 		param[3] = FW_PARAM_PFVF(RQ_END);
3433 		param[4] = FW_PARAM_PFVF(PBL_START);
3434 		param[5] = FW_PARAM_PFVF(PBL_END);
3435 		rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 6, param, val);
3436 		if (rc != 0) {
3437 			device_printf(sc->dev,
3438 			    "failed to query RDMA parameters(1): %d.\n", rc);
3439 			return (rc);
3440 		}
3441 		sc->vres.stag.start = val[0];
3442 		sc->vres.stag.size = val[1] - val[0] + 1;
3443 		sc->vres.rq.start = val[2];
3444 		sc->vres.rq.size = val[3] - val[2] + 1;
3445 		sc->vres.pbl.start = val[4];
3446 		sc->vres.pbl.size = val[5] - val[4] + 1;
3447 
3448 		param[0] = FW_PARAM_PFVF(SQRQ_START);
3449 		param[1] = FW_PARAM_PFVF(SQRQ_END);
3450 		param[2] = FW_PARAM_PFVF(CQ_START);
3451 		param[3] = FW_PARAM_PFVF(CQ_END);
3452 		param[4] = FW_PARAM_PFVF(OCQ_START);
3453 		param[5] = FW_PARAM_PFVF(OCQ_END);
3454 		rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 6, param, val);
3455 		if (rc != 0) {
3456 			device_printf(sc->dev,
3457 			    "failed to query RDMA parameters(2): %d.\n", rc);
3458 			return (rc);
3459 		}
3460 		sc->vres.qp.start = val[0];
3461 		sc->vres.qp.size = val[1] - val[0] + 1;
3462 		sc->vres.cq.start = val[2];
3463 		sc->vres.cq.size = val[3] - val[2] + 1;
3464 		sc->vres.ocq.start = val[4];
3465 		sc->vres.ocq.size = val[5] - val[4] + 1;
3466 	}
3467 	if (sc->iscsicaps) {
3468 		param[0] = FW_PARAM_PFVF(ISCSI_START);
3469 		param[1] = FW_PARAM_PFVF(ISCSI_END);
3470 		rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 2, param, val);
3471 		if (rc != 0) {
3472 			device_printf(sc->dev,
3473 			    "failed to query iSCSI parameters: %d.\n", rc);
3474 			return (rc);
3475 		}
3476 		sc->vres.iscsi.start = val[0];
3477 		sc->vres.iscsi.size = val[1] - val[0] + 1;
3478 	}
3479 
3480 	t4_init_sge_params(sc);
3481 
3482 	/*
3483 	 * We've got the params we wanted to query via the firmware.  Now grab
3484 	 * some others directly from the chip.
3485 	 */
3486 	rc = t4_read_chip_settings(sc);
3487 
3488 	return (rc);
3489 }
3490 
3491 static int
3492 set_params__post_init(struct adapter *sc)
3493 {
3494 	uint32_t param, val;
3495 
3496 	/* ask for encapsulated CPLs */
3497 	param = FW_PARAM_PFVF(CPLFW4MSG_ENCAP);
3498 	val = 1;
3499 	(void)t4_set_params(sc, sc->mbox, sc->pf, 0, 1, &param, &val);
3500 
3501 	return (0);
3502 }
3503 
3504 #undef FW_PARAM_PFVF
3505 #undef FW_PARAM_DEV
3506 
3507 static void
3508 t4_set_desc(struct adapter *sc)
3509 {
3510 	char buf[128];
3511 	struct adapter_params *p = &sc->params;
3512 
3513 	snprintf(buf, sizeof(buf), "Chelsio %s", p->vpd.id);
3514 
3515 	device_set_desc_copy(sc->dev, buf);
3516 }
3517 
3518 static void
3519 build_medialist(struct port_info *pi, struct ifmedia *media)
3520 {
3521 	int m;
3522 
3523 	PORT_LOCK(pi);
3524 
3525 	ifmedia_removeall(media);
3526 
3527 	m = IFM_ETHER | IFM_FDX;
3528 
3529 	switch(pi->port_type) {
3530 	case FW_PORT_TYPE_BT_XFI:
3531 	case FW_PORT_TYPE_BT_XAUI:
3532 		ifmedia_add(media, m | IFM_10G_T, 0, NULL);
3533 		/* fall through */
3534 
3535 	case FW_PORT_TYPE_BT_SGMII:
3536 		ifmedia_add(media, m | IFM_1000_T, 0, NULL);
3537 		ifmedia_add(media, m | IFM_100_TX, 0, NULL);
3538 		ifmedia_add(media, IFM_ETHER | IFM_AUTO, 0, NULL);
3539 		ifmedia_set(media, IFM_ETHER | IFM_AUTO);
3540 		break;
3541 
3542 	case FW_PORT_TYPE_CX4:
3543 		ifmedia_add(media, m | IFM_10G_CX4, 0, NULL);
3544 		ifmedia_set(media, m | IFM_10G_CX4);
3545 		break;
3546 
3547 	case FW_PORT_TYPE_QSFP_10G:
3548 	case FW_PORT_TYPE_SFP:
3549 	case FW_PORT_TYPE_FIBER_XFI:
3550 	case FW_PORT_TYPE_FIBER_XAUI:
3551 		switch (pi->mod_type) {
3552 
3553 		case FW_PORT_MOD_TYPE_LR:
3554 			ifmedia_add(media, m | IFM_10G_LR, 0, NULL);
3555 			ifmedia_set(media, m | IFM_10G_LR);
3556 			break;
3557 
3558 		case FW_PORT_MOD_TYPE_SR:
3559 			ifmedia_add(media, m | IFM_10G_SR, 0, NULL);
3560 			ifmedia_set(media, m | IFM_10G_SR);
3561 			break;
3562 
3563 		case FW_PORT_MOD_TYPE_LRM:
3564 			ifmedia_add(media, m | IFM_10G_LRM, 0, NULL);
3565 			ifmedia_set(media, m | IFM_10G_LRM);
3566 			break;
3567 
3568 		case FW_PORT_MOD_TYPE_TWINAX_PASSIVE:
3569 		case FW_PORT_MOD_TYPE_TWINAX_ACTIVE:
3570 			ifmedia_add(media, m | IFM_10G_TWINAX, 0, NULL);
3571 			ifmedia_set(media, m | IFM_10G_TWINAX);
3572 			break;
3573 
3574 		case FW_PORT_MOD_TYPE_NONE:
3575 			m &= ~IFM_FDX;
3576 			ifmedia_add(media, m | IFM_NONE, 0, NULL);
3577 			ifmedia_set(media, m | IFM_NONE);
3578 			break;
3579 
3580 		case FW_PORT_MOD_TYPE_NA:
3581 		case FW_PORT_MOD_TYPE_ER:
3582 		default:
3583 			device_printf(pi->dev,
3584 			    "unknown port_type (%d), mod_type (%d)\n",
3585 			    pi->port_type, pi->mod_type);
3586 			ifmedia_add(media, m | IFM_UNKNOWN, 0, NULL);
3587 			ifmedia_set(media, m | IFM_UNKNOWN);
3588 			break;
3589 		}
3590 		break;
3591 
3592 	case FW_PORT_TYPE_CR_QSFP:
3593 	case FW_PORT_TYPE_CR_SFP28:
3594 	case FW_PORT_TYPE_SFP28:
3595 	case FW_PORT_TYPE_KR_SFP28:
3596 		switch (pi->mod_type) {
3597 
3598 		case FW_PORT_MOD_TYPE_SR:
3599 			ifmedia_add(media, m | IFM_25G_SR, 0, NULL);
3600 			ifmedia_set(media, m | IFM_25G_SR);
3601 			break;
3602 
3603 		case FW_PORT_MOD_TYPE_TWINAX_PASSIVE:
3604 		case FW_PORT_MOD_TYPE_TWINAX_ACTIVE:
3605 			ifmedia_add(media, m | IFM_25G_CR, 0, NULL);
3606 			ifmedia_set(media, m | IFM_25G_CR);
3607 			break;
3608 
3609 		case FW_PORT_MOD_TYPE_NONE:
3610 			m &= ~IFM_FDX;
3611 			ifmedia_add(media, m | IFM_NONE, 0, NULL);
3612 			ifmedia_set(media, m | IFM_NONE);
3613 			break;
3614 
3615 		default:
3616 			device_printf(pi->dev,
3617 			    "unknown port_type (%d), mod_type (%d)\n",
3618 			    pi->port_type, pi->mod_type);
3619 			ifmedia_add(media, m | IFM_UNKNOWN, 0, NULL);
3620 			ifmedia_set(media, m | IFM_UNKNOWN);
3621 			break;
3622 		}
3623 		break;
3624 
3625 	case FW_PORT_TYPE_QSFP:
3626 		switch (pi->mod_type) {
3627 
3628 		case FW_PORT_MOD_TYPE_LR:
3629 			ifmedia_add(media, m | IFM_40G_LR4, 0, NULL);
3630 			ifmedia_set(media, m | IFM_40G_LR4);
3631 			break;
3632 
3633 		case FW_PORT_MOD_TYPE_SR:
3634 			ifmedia_add(media, m | IFM_40G_SR4, 0, NULL);
3635 			ifmedia_set(media, m | IFM_40G_SR4);
3636 			break;
3637 
3638 		case FW_PORT_MOD_TYPE_TWINAX_PASSIVE:
3639 		case FW_PORT_MOD_TYPE_TWINAX_ACTIVE:
3640 			ifmedia_add(media, m | IFM_40G_CR4, 0, NULL);
3641 			ifmedia_set(media, m | IFM_40G_CR4);
3642 			break;
3643 
3644 		case FW_PORT_MOD_TYPE_NONE:
3645 			m &= ~IFM_FDX;
3646 			ifmedia_add(media, m | IFM_NONE, 0, NULL);
3647 			ifmedia_set(media, m | IFM_NONE);
3648 			break;
3649 
3650 		default:
3651 			device_printf(pi->dev,
3652 			    "unknown port_type (%d), mod_type (%d)\n",
3653 			    pi->port_type, pi->mod_type);
3654 			ifmedia_add(media, m | IFM_UNKNOWN, 0, NULL);
3655 			ifmedia_set(media, m | IFM_UNKNOWN);
3656 			break;
3657 		}
3658 		break;
3659 
3660 	case FW_PORT_TYPE_KR4_100G:
3661 	case FW_PORT_TYPE_CR4_QSFP:
3662 		switch (pi->mod_type) {
3663 
3664 		case FW_PORT_MOD_TYPE_LR:
3665 			ifmedia_add(media, m | IFM_100G_LR4, 0, NULL);
3666 			ifmedia_set(media, m | IFM_100G_LR4);
3667 			break;
3668 
3669 		case FW_PORT_MOD_TYPE_SR:
3670 			ifmedia_add(media, m | IFM_100G_SR4, 0, NULL);
3671 			ifmedia_set(media, m | IFM_100G_SR4);
3672 			break;
3673 
3674 		case FW_PORT_MOD_TYPE_TWINAX_PASSIVE:
3675 		case FW_PORT_MOD_TYPE_TWINAX_ACTIVE:
3676 			ifmedia_add(media, m | IFM_100G_CR4, 0, NULL);
3677 			ifmedia_set(media, m | IFM_100G_CR4);
3678 			break;
3679 
3680 		case FW_PORT_MOD_TYPE_NONE:
3681 			m &= ~IFM_FDX;
3682 			ifmedia_add(media, m | IFM_NONE, 0, NULL);
3683 			ifmedia_set(media, m | IFM_NONE);
3684 			break;
3685 
3686 		default:
3687 			device_printf(pi->dev,
3688 			    "unknown port_type (%d), mod_type (%d)\n",
3689 			    pi->port_type, pi->mod_type);
3690 			ifmedia_add(media, m | IFM_UNKNOWN, 0, NULL);
3691 			ifmedia_set(media, m | IFM_UNKNOWN);
3692 			break;
3693 		}
3694 		break;
3695 
3696 	default:
3697 		device_printf(pi->dev,
3698 		    "unknown port_type (%d), mod_type (%d)\n", pi->port_type,
3699 		    pi->mod_type);
3700 		ifmedia_add(media, m | IFM_UNKNOWN, 0, NULL);
3701 		ifmedia_set(media, m | IFM_UNKNOWN);
3702 		break;
3703 	}
3704 
3705 	PORT_UNLOCK(pi);
3706 }
3707 
3708 #define FW_MAC_EXACT_CHUNK	7
3709 
3710 /*
3711  * Program the port's XGMAC based on parameters in ifnet.  The caller also
3712  * indicates which parameters should be programmed (the rest are left alone).
3713  */
3714 int
3715 update_mac_settings(struct ifnet *ifp, int flags)
3716 {
3717 	int rc = 0;
3718 	struct vi_info *vi = ifp->if_softc;
3719 	struct port_info *pi = vi->pi;
3720 	struct adapter *sc = pi->adapter;
3721 	int mtu = -1, promisc = -1, allmulti = -1, vlanex = -1;
3722 
3723 	ASSERT_SYNCHRONIZED_OP(sc);
3724 	KASSERT(flags, ("%s: not told what to update.", __func__));
3725 
3726 	if (flags & XGMAC_MTU)
3727 		mtu = ifp->if_mtu;
3728 
3729 	if (flags & XGMAC_PROMISC)
3730 		promisc = ifp->if_flags & IFF_PROMISC ? 1 : 0;
3731 
3732 	if (flags & XGMAC_ALLMULTI)
3733 		allmulti = ifp->if_flags & IFF_ALLMULTI ? 1 : 0;
3734 
3735 	if (flags & XGMAC_VLANEX)
3736 		vlanex = ifp->if_capenable & IFCAP_VLAN_HWTAGGING ? 1 : 0;
3737 
3738 	if (flags & (XGMAC_MTU|XGMAC_PROMISC|XGMAC_ALLMULTI|XGMAC_VLANEX)) {
3739 		rc = -t4_set_rxmode(sc, sc->mbox, vi->viid, mtu, promisc,
3740 		    allmulti, 1, vlanex, false);
3741 		if (rc) {
3742 			if_printf(ifp, "set_rxmode (%x) failed: %d\n", flags,
3743 			    rc);
3744 			return (rc);
3745 		}
3746 	}
3747 
3748 	if (flags & XGMAC_UCADDR) {
3749 		uint8_t ucaddr[ETHER_ADDR_LEN];
3750 
3751 		bcopy(IF_LLADDR(ifp), ucaddr, sizeof(ucaddr));
3752 		rc = t4_change_mac(sc, sc->mbox, vi->viid, vi->xact_addr_filt,
3753 		    ucaddr, true, true);
3754 		if (rc < 0) {
3755 			rc = -rc;
3756 			if_printf(ifp, "change_mac failed: %d\n", rc);
3757 			return (rc);
3758 		} else {
3759 			vi->xact_addr_filt = rc;
3760 			rc = 0;
3761 		}
3762 	}
3763 
3764 	if (flags & XGMAC_MCADDRS) {
3765 		const uint8_t *mcaddr[FW_MAC_EXACT_CHUNK];
3766 		int del = 1;
3767 		uint64_t hash = 0;
3768 		struct ifmultiaddr *ifma;
3769 		int i = 0, j;
3770 
3771 		if_maddr_rlock(ifp);
3772 		TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
3773 			if (ifma->ifma_addr->sa_family != AF_LINK)
3774 				continue;
3775 			mcaddr[i] =
3776 			    LLADDR((struct sockaddr_dl *)ifma->ifma_addr);
3777 			MPASS(ETHER_IS_MULTICAST(mcaddr[i]));
3778 			i++;
3779 
3780 			if (i == FW_MAC_EXACT_CHUNK) {
3781 				rc = t4_alloc_mac_filt(sc, sc->mbox, vi->viid,
3782 				    del, i, mcaddr, NULL, &hash, 0);
3783 				if (rc < 0) {
3784 					rc = -rc;
3785 					for (j = 0; j < i; j++) {
3786 						if_printf(ifp,
3787 						    "failed to add mc address"
3788 						    " %02x:%02x:%02x:"
3789 						    "%02x:%02x:%02x rc=%d\n",
3790 						    mcaddr[j][0], mcaddr[j][1],
3791 						    mcaddr[j][2], mcaddr[j][3],
3792 						    mcaddr[j][4], mcaddr[j][5],
3793 						    rc);
3794 					}
3795 					goto mcfail;
3796 				}
3797 				del = 0;
3798 				i = 0;
3799 			}
3800 		}
3801 		if (i > 0) {
3802 			rc = t4_alloc_mac_filt(sc, sc->mbox, vi->viid, del, i,
3803 			    mcaddr, NULL, &hash, 0);
3804 			if (rc < 0) {
3805 				rc = -rc;
3806 				for (j = 0; j < i; j++) {
3807 					if_printf(ifp,
3808 					    "failed to add mc address"
3809 					    " %02x:%02x:%02x:"
3810 					    "%02x:%02x:%02x rc=%d\n",
3811 					    mcaddr[j][0], mcaddr[j][1],
3812 					    mcaddr[j][2], mcaddr[j][3],
3813 					    mcaddr[j][4], mcaddr[j][5],
3814 					    rc);
3815 				}
3816 				goto mcfail;
3817 			}
3818 		}
3819 
3820 		rc = -t4_set_addr_hash(sc, sc->mbox, vi->viid, 0, hash, 0);
3821 		if (rc != 0)
3822 			if_printf(ifp, "failed to set mc address hash: %d", rc);
3823 mcfail:
3824 		if_maddr_runlock(ifp);
3825 	}
3826 
3827 	return (rc);
3828 }
3829 
3830 /*
3831  * {begin|end}_synchronized_op must be called from the same thread.
3832  */
3833 int
3834 begin_synchronized_op(struct adapter *sc, struct vi_info *vi, int flags,
3835     char *wmesg)
3836 {
3837 	int rc, pri;
3838 
3839 #ifdef WITNESS
3840 	/* the caller thinks it's ok to sleep, but is it really? */
3841 	if (flags & SLEEP_OK)
3842 		WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL,
3843 		    "begin_synchronized_op");
3844 #endif
3845 
3846 	if (INTR_OK)
3847 		pri = PCATCH;
3848 	else
3849 		pri = 0;
3850 
3851 	ADAPTER_LOCK(sc);
3852 	for (;;) {
3853 
3854 		if (vi && IS_DOOMED(vi)) {
3855 			rc = ENXIO;
3856 			goto done;
3857 		}
3858 
3859 		if (!IS_BUSY(sc)) {
3860 			rc = 0;
3861 			break;
3862 		}
3863 
3864 		if (!(flags & SLEEP_OK)) {
3865 			rc = EBUSY;
3866 			goto done;
3867 		}
3868 
3869 		if (mtx_sleep(&sc->flags, &sc->sc_lock, pri, wmesg, 0)) {
3870 			rc = EINTR;
3871 			goto done;
3872 		}
3873 	}
3874 
3875 	KASSERT(!IS_BUSY(sc), ("%s: controller busy.", __func__));
3876 	SET_BUSY(sc);
3877 #ifdef INVARIANTS
3878 	sc->last_op = wmesg;
3879 	sc->last_op_thr = curthread;
3880 	sc->last_op_flags = flags;
3881 #endif
3882 
3883 done:
3884 	if (!(flags & HOLD_LOCK) || rc)
3885 		ADAPTER_UNLOCK(sc);
3886 
3887 	return (rc);
3888 }
3889 
3890 /*
3891  * Tell if_ioctl and if_init that the VI is going away.  This is
3892  * special variant of begin_synchronized_op and must be paired with a
3893  * call to end_synchronized_op.
3894  */
3895 void
3896 doom_vi(struct adapter *sc, struct vi_info *vi)
3897 {
3898 
3899 	ADAPTER_LOCK(sc);
3900 	SET_DOOMED(vi);
3901 	wakeup(&sc->flags);
3902 	while (IS_BUSY(sc))
3903 		mtx_sleep(&sc->flags, &sc->sc_lock, 0, "t4detach", 0);
3904 	SET_BUSY(sc);
3905 #ifdef INVARIANTS
3906 	sc->last_op = "t4detach";
3907 	sc->last_op_thr = curthread;
3908 	sc->last_op_flags = 0;
3909 #endif
3910 	ADAPTER_UNLOCK(sc);
3911 }
3912 
3913 /*
3914  * {begin|end}_synchronized_op must be called from the same thread.
3915  */
3916 void
3917 end_synchronized_op(struct adapter *sc, int flags)
3918 {
3919 
3920 	if (flags & LOCK_HELD)
3921 		ADAPTER_LOCK_ASSERT_OWNED(sc);
3922 	else
3923 		ADAPTER_LOCK(sc);
3924 
3925 	KASSERT(IS_BUSY(sc), ("%s: controller not busy.", __func__));
3926 	CLR_BUSY(sc);
3927 	wakeup(&sc->flags);
3928 	ADAPTER_UNLOCK(sc);
3929 }
3930 
3931 static int
3932 cxgbe_init_synchronized(struct vi_info *vi)
3933 {
3934 	struct port_info *pi = vi->pi;
3935 	struct adapter *sc = pi->adapter;
3936 	struct ifnet *ifp = vi->ifp;
3937 	int rc = 0, i;
3938 	struct sge_txq *txq;
3939 
3940 	ASSERT_SYNCHRONIZED_OP(sc);
3941 
3942 	if (ifp->if_drv_flags & IFF_DRV_RUNNING)
3943 		return (0);	/* already running */
3944 
3945 	if (!(sc->flags & FULL_INIT_DONE) &&
3946 	    ((rc = adapter_full_init(sc)) != 0))
3947 		return (rc);	/* error message displayed already */
3948 
3949 	if (!(vi->flags & VI_INIT_DONE) &&
3950 	    ((rc = vi_full_init(vi)) != 0))
3951 		return (rc); /* error message displayed already */
3952 
3953 	rc = update_mac_settings(ifp, XGMAC_ALL);
3954 	if (rc)
3955 		goto done;	/* error message displayed already */
3956 
3957 	rc = -t4_enable_vi(sc, sc->mbox, vi->viid, true, true);
3958 	if (rc != 0) {
3959 		if_printf(ifp, "enable_vi failed: %d\n", rc);
3960 		goto done;
3961 	}
3962 
3963 	/*
3964 	 * Can't fail from this point onwards.  Review cxgbe_uninit_synchronized
3965 	 * if this changes.
3966 	 */
3967 
3968 	for_each_txq(vi, i, txq) {
3969 		TXQ_LOCK(txq);
3970 		txq->eq.flags |= EQ_ENABLED;
3971 		TXQ_UNLOCK(txq);
3972 	}
3973 
3974 	/*
3975 	 * The first iq of the first port to come up is used for tracing.
3976 	 */
3977 	if (sc->traceq < 0 && IS_MAIN_VI(vi)) {
3978 		sc->traceq = sc->sge.rxq[vi->first_rxq].iq.abs_id;
3979 		t4_write_reg(sc, is_t4(sc) ?  A_MPS_TRC_RSS_CONTROL :
3980 		    A_MPS_T5_TRC_RSS_CONTROL, V_RSSCONTROL(pi->tx_chan) |
3981 		    V_QUEUENUMBER(sc->traceq));
3982 		pi->flags |= HAS_TRACEQ;
3983 	}
3984 
3985 	/* all ok */
3986 	PORT_LOCK(pi);
3987 	ifp->if_drv_flags |= IFF_DRV_RUNNING;
3988 	pi->up_vis++;
3989 
3990 	if (pi->nvi > 1 || sc->flags & IS_VF)
3991 		callout_reset(&vi->tick, hz, vi_tick, vi);
3992 	else
3993 		callout_reset(&pi->tick, hz, cxgbe_tick, pi);
3994 	PORT_UNLOCK(pi);
3995 done:
3996 	if (rc != 0)
3997 		cxgbe_uninit_synchronized(vi);
3998 
3999 	return (rc);
4000 }
4001 
4002 /*
4003  * Idempotent.
4004  */
4005 static int
4006 cxgbe_uninit_synchronized(struct vi_info *vi)
4007 {
4008 	struct port_info *pi = vi->pi;
4009 	struct adapter *sc = pi->adapter;
4010 	struct ifnet *ifp = vi->ifp;
4011 	int rc, i;
4012 	struct sge_txq *txq;
4013 
4014 	ASSERT_SYNCHRONIZED_OP(sc);
4015 
4016 	if (!(vi->flags & VI_INIT_DONE)) {
4017 		KASSERT(!(ifp->if_drv_flags & IFF_DRV_RUNNING),
4018 		    ("uninited VI is running"));
4019 		return (0);
4020 	}
4021 
4022 	/*
4023 	 * Disable the VI so that all its data in either direction is discarded
4024 	 * by the MPS.  Leave everything else (the queues, interrupts, and 1Hz
4025 	 * tick) intact as the TP can deliver negative advice or data that it's
4026 	 * holding in its RAM (for an offloaded connection) even after the VI is
4027 	 * disabled.
4028 	 */
4029 	rc = -t4_enable_vi(sc, sc->mbox, vi->viid, false, false);
4030 	if (rc) {
4031 		if_printf(ifp, "disable_vi failed: %d\n", rc);
4032 		return (rc);
4033 	}
4034 
4035 	for_each_txq(vi, i, txq) {
4036 		TXQ_LOCK(txq);
4037 		txq->eq.flags &= ~EQ_ENABLED;
4038 		TXQ_UNLOCK(txq);
4039 	}
4040 
4041 	PORT_LOCK(pi);
4042 	if (pi->nvi > 1 || sc->flags & IS_VF)
4043 		callout_stop(&vi->tick);
4044 	else
4045 		callout_stop(&pi->tick);
4046 	if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) {
4047 		PORT_UNLOCK(pi);
4048 		return (0);
4049 	}
4050 	ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
4051 	pi->up_vis--;
4052 	if (pi->up_vis > 0) {
4053 		PORT_UNLOCK(pi);
4054 		return (0);
4055 	}
4056 	PORT_UNLOCK(pi);
4057 
4058 	pi->link_cfg.link_ok = 0;
4059 	pi->link_cfg.speed = 0;
4060 	pi->linkdnrc = -1;
4061 	t4_os_link_changed(sc, pi->port_id, 0, -1);
4062 
4063 	return (0);
4064 }
4065 
4066 /*
4067  * It is ok for this function to fail midway and return right away.  t4_detach
4068  * will walk the entire sc->irq list and clean up whatever is valid.
4069  */
4070 int
4071 t4_setup_intr_handlers(struct adapter *sc)
4072 {
4073 	int rc, rid, p, q, v;
4074 	char s[8];
4075 	struct irq *irq;
4076 	struct port_info *pi;
4077 	struct vi_info *vi;
4078 	struct sge *sge = &sc->sge;
4079 	struct sge_rxq *rxq;
4080 #ifdef TCP_OFFLOAD
4081 	struct sge_ofld_rxq *ofld_rxq;
4082 #endif
4083 #ifdef DEV_NETMAP
4084 	struct sge_nm_rxq *nm_rxq;
4085 #endif
4086 #ifdef RSS
4087 	int nbuckets = rss_getnumbuckets();
4088 #endif
4089 
4090 	/*
4091 	 * Setup interrupts.
4092 	 */
4093 	irq = &sc->irq[0];
4094 	rid = sc->intr_type == INTR_INTX ? 0 : 1;
4095 	if (sc->intr_count == 1)
4096 		return (t4_alloc_irq(sc, irq, rid, t4_intr_all, sc, "all"));
4097 
4098 	/* Multiple interrupts. */
4099 	if (sc->flags & IS_VF)
4100 		KASSERT(sc->intr_count >= T4VF_EXTRA_INTR + sc->params.nports,
4101 		    ("%s: too few intr.", __func__));
4102 	else
4103 		KASSERT(sc->intr_count >= T4_EXTRA_INTR + sc->params.nports,
4104 		    ("%s: too few intr.", __func__));
4105 
4106 	/* The first one is always error intr on PFs */
4107 	if (!(sc->flags & IS_VF)) {
4108 		rc = t4_alloc_irq(sc, irq, rid, t4_intr_err, sc, "err");
4109 		if (rc != 0)
4110 			return (rc);
4111 		irq++;
4112 		rid++;
4113 	}
4114 
4115 	/* The second one is always the firmware event queue (first on VFs) */
4116 	rc = t4_alloc_irq(sc, irq, rid, t4_intr_evt, &sge->fwq, "evt");
4117 	if (rc != 0)
4118 		return (rc);
4119 	irq++;
4120 	rid++;
4121 
4122 	for_each_port(sc, p) {
4123 		pi = sc->port[p];
4124 		for_each_vi(pi, v, vi) {
4125 			vi->first_intr = rid - 1;
4126 
4127 			if (vi->nnmrxq > 0) {
4128 				int n = max(vi->nrxq, vi->nnmrxq);
4129 
4130 				MPASS(vi->flags & INTR_RXQ);
4131 
4132 				rxq = &sge->rxq[vi->first_rxq];
4133 #ifdef DEV_NETMAP
4134 				nm_rxq = &sge->nm_rxq[vi->first_nm_rxq];
4135 #endif
4136 				for (q = 0; q < n; q++) {
4137 					snprintf(s, sizeof(s), "%x%c%x", p,
4138 					    'a' + v, q);
4139 					if (q < vi->nrxq)
4140 						irq->rxq = rxq++;
4141 #ifdef DEV_NETMAP
4142 					if (q < vi->nnmrxq)
4143 						irq->nm_rxq = nm_rxq++;
4144 #endif
4145 					rc = t4_alloc_irq(sc, irq, rid,
4146 					    t4_vi_intr, irq, s);
4147 					if (rc != 0)
4148 						return (rc);
4149 					irq++;
4150 					rid++;
4151 					vi->nintr++;
4152 				}
4153 			} else if (vi->flags & INTR_RXQ) {
4154 				for_each_rxq(vi, q, rxq) {
4155 					snprintf(s, sizeof(s), "%x%c%x", p,
4156 					    'a' + v, q);
4157 					rc = t4_alloc_irq(sc, irq, rid,
4158 					    t4_intr, rxq, s);
4159 					if (rc != 0)
4160 						return (rc);
4161 #ifdef RSS
4162 					bus_bind_intr(sc->dev, irq->res,
4163 					    rss_getcpu(q % nbuckets));
4164 #endif
4165 					irq++;
4166 					rid++;
4167 					vi->nintr++;
4168 				}
4169 			}
4170 #ifdef TCP_OFFLOAD
4171 			if (vi->flags & INTR_OFLD_RXQ) {
4172 				for_each_ofld_rxq(vi, q, ofld_rxq) {
4173 					snprintf(s, sizeof(s), "%x%c%x", p,
4174 					    'A' + v, q);
4175 					rc = t4_alloc_irq(sc, irq, rid,
4176 					    t4_intr, ofld_rxq, s);
4177 					if (rc != 0)
4178 						return (rc);
4179 					irq++;
4180 					rid++;
4181 					vi->nintr++;
4182 				}
4183 			}
4184 #endif
4185 		}
4186 	}
4187 	MPASS(irq == &sc->irq[sc->intr_count]);
4188 
4189 	return (0);
4190 }
4191 
4192 int
4193 adapter_full_init(struct adapter *sc)
4194 {
4195 	int rc, i;
4196 
4197 	ASSERT_SYNCHRONIZED_OP(sc);
4198 	ADAPTER_LOCK_ASSERT_NOTOWNED(sc);
4199 	KASSERT((sc->flags & FULL_INIT_DONE) == 0,
4200 	    ("%s: FULL_INIT_DONE already", __func__));
4201 
4202 	/*
4203 	 * queues that belong to the adapter (not any particular port).
4204 	 */
4205 	rc = t4_setup_adapter_queues(sc);
4206 	if (rc != 0)
4207 		goto done;
4208 
4209 	for (i = 0; i < nitems(sc->tq); i++) {
4210 		sc->tq[i] = taskqueue_create("t4 taskq", M_NOWAIT,
4211 		    taskqueue_thread_enqueue, &sc->tq[i]);
4212 		if (sc->tq[i] == NULL) {
4213 			device_printf(sc->dev,
4214 			    "failed to allocate task queue %d\n", i);
4215 			rc = ENOMEM;
4216 			goto done;
4217 		}
4218 		taskqueue_start_threads(&sc->tq[i], 1, PI_NET, "%s tq%d",
4219 		    device_get_nameunit(sc->dev), i);
4220 	}
4221 
4222 	if (!(sc->flags & IS_VF))
4223 		t4_intr_enable(sc);
4224 	sc->flags |= FULL_INIT_DONE;
4225 done:
4226 	if (rc != 0)
4227 		adapter_full_uninit(sc);
4228 
4229 	return (rc);
4230 }
4231 
4232 int
4233 adapter_full_uninit(struct adapter *sc)
4234 {
4235 	int i;
4236 
4237 	ADAPTER_LOCK_ASSERT_NOTOWNED(sc);
4238 
4239 	t4_teardown_adapter_queues(sc);
4240 
4241 	for (i = 0; i < nitems(sc->tq) && sc->tq[i]; i++) {
4242 		taskqueue_free(sc->tq[i]);
4243 		sc->tq[i] = NULL;
4244 	}
4245 
4246 	sc->flags &= ~FULL_INIT_DONE;
4247 
4248 	return (0);
4249 }
4250 
4251 #ifdef RSS
4252 #define SUPPORTED_RSS_HASHTYPES (RSS_HASHTYPE_RSS_IPV4 | \
4253     RSS_HASHTYPE_RSS_TCP_IPV4 | RSS_HASHTYPE_RSS_IPV6 | \
4254     RSS_HASHTYPE_RSS_TCP_IPV6 | RSS_HASHTYPE_RSS_UDP_IPV4 | \
4255     RSS_HASHTYPE_RSS_UDP_IPV6)
4256 
4257 /* Translates kernel hash types to hardware. */
4258 static int
4259 hashconfig_to_hashen(int hashconfig)
4260 {
4261 	int hashen = 0;
4262 
4263 	if (hashconfig & RSS_HASHTYPE_RSS_IPV4)
4264 		hashen |= F_FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN;
4265 	if (hashconfig & RSS_HASHTYPE_RSS_IPV6)
4266 		hashen |= F_FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN;
4267 	if (hashconfig & RSS_HASHTYPE_RSS_UDP_IPV4) {
4268 		hashen |= F_FW_RSS_VI_CONFIG_CMD_UDPEN |
4269 		    F_FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN;
4270 	}
4271 	if (hashconfig & RSS_HASHTYPE_RSS_UDP_IPV6) {
4272 		hashen |= F_FW_RSS_VI_CONFIG_CMD_UDPEN |
4273 		    F_FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN;
4274 	}
4275 	if (hashconfig & RSS_HASHTYPE_RSS_TCP_IPV4)
4276 		hashen |= F_FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN;
4277 	if (hashconfig & RSS_HASHTYPE_RSS_TCP_IPV6)
4278 		hashen |= F_FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN;
4279 
4280 	return (hashen);
4281 }
4282 
4283 /* Translates hardware hash types to kernel. */
4284 static int
4285 hashen_to_hashconfig(int hashen)
4286 {
4287 	int hashconfig = 0;
4288 
4289 	if (hashen & F_FW_RSS_VI_CONFIG_CMD_UDPEN) {
4290 		/*
4291 		 * If UDP hashing was enabled it must have been enabled for
4292 		 * either IPv4 or IPv6 (inclusive or).  Enabling UDP without
4293 		 * enabling any 4-tuple hash is nonsense configuration.
4294 		 */
4295 		MPASS(hashen & (F_FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN |
4296 		    F_FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN));
4297 
4298 		if (hashen & F_FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN)
4299 			hashconfig |= RSS_HASHTYPE_RSS_UDP_IPV4;
4300 		if (hashen & F_FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN)
4301 			hashconfig |= RSS_HASHTYPE_RSS_UDP_IPV6;
4302 	}
4303 	if (hashen & F_FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN)
4304 		hashconfig |= RSS_HASHTYPE_RSS_TCP_IPV4;
4305 	if (hashen & F_FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN)
4306 		hashconfig |= RSS_HASHTYPE_RSS_TCP_IPV6;
4307 	if (hashen & F_FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN)
4308 		hashconfig |= RSS_HASHTYPE_RSS_IPV4;
4309 	if (hashen & F_FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN)
4310 		hashconfig |= RSS_HASHTYPE_RSS_IPV6;
4311 
4312 	return (hashconfig);
4313 }
4314 #endif
4315 
4316 int
4317 vi_full_init(struct vi_info *vi)
4318 {
4319 	struct adapter *sc = vi->pi->adapter;
4320 	struct ifnet *ifp = vi->ifp;
4321 	uint16_t *rss;
4322 	struct sge_rxq *rxq;
4323 	int rc, i, j, hashen;
4324 #ifdef RSS
4325 	int nbuckets = rss_getnumbuckets();
4326 	int hashconfig = rss_gethashconfig();
4327 	int extra;
4328 	uint32_t raw_rss_key[RSS_KEYSIZE / sizeof(uint32_t)];
4329 	uint32_t rss_key[RSS_KEYSIZE / sizeof(uint32_t)];
4330 #endif
4331 
4332 	ASSERT_SYNCHRONIZED_OP(sc);
4333 	KASSERT((vi->flags & VI_INIT_DONE) == 0,
4334 	    ("%s: VI_INIT_DONE already", __func__));
4335 
4336 	sysctl_ctx_init(&vi->ctx);
4337 	vi->flags |= VI_SYSCTL_CTX;
4338 
4339 	/*
4340 	 * Allocate tx/rx/fl queues for this VI.
4341 	 */
4342 	rc = t4_setup_vi_queues(vi);
4343 	if (rc != 0)
4344 		goto done;	/* error message displayed already */
4345 
4346 	/*
4347 	 * Setup RSS for this VI.  Save a copy of the RSS table for later use.
4348 	 */
4349 	if (vi->nrxq > vi->rss_size) {
4350 		if_printf(ifp, "nrxq (%d) > hw RSS table size (%d); "
4351 		    "some queues will never receive traffic.\n", vi->nrxq,
4352 		    vi->rss_size);
4353 	} else if (vi->rss_size % vi->nrxq) {
4354 		if_printf(ifp, "nrxq (%d), hw RSS table size (%d); "
4355 		    "expect uneven traffic distribution.\n", vi->nrxq,
4356 		    vi->rss_size);
4357 	}
4358 #ifdef RSS
4359 	MPASS(RSS_KEYSIZE == 40);
4360 	if (vi->nrxq != nbuckets) {
4361 		if_printf(ifp, "nrxq (%d) != kernel RSS buckets (%d);"
4362 		    "performance will be impacted.\n", vi->nrxq, nbuckets);
4363 	}
4364 
4365 	rss_getkey((void *)&raw_rss_key[0]);
4366 	for (i = 0; i < nitems(rss_key); i++) {
4367 		rss_key[i] = htobe32(raw_rss_key[nitems(rss_key) - 1 - i]);
4368 	}
4369 	t4_write_rss_key(sc, &rss_key[0], -1);
4370 #endif
4371 	rss = malloc(vi->rss_size * sizeof (*rss), M_CXGBE, M_ZERO | M_WAITOK);
4372 	for (i = 0; i < vi->rss_size;) {
4373 #ifdef RSS
4374 		j = rss_get_indirection_to_bucket(i);
4375 		j %= vi->nrxq;
4376 		rxq = &sc->sge.rxq[vi->first_rxq + j];
4377 		rss[i++] = rxq->iq.abs_id;
4378 #else
4379 		for_each_rxq(vi, j, rxq) {
4380 			rss[i++] = rxq->iq.abs_id;
4381 			if (i == vi->rss_size)
4382 				break;
4383 		}
4384 #endif
4385 	}
4386 
4387 	rc = -t4_config_rss_range(sc, sc->mbox, vi->viid, 0, vi->rss_size, rss,
4388 	    vi->rss_size);
4389 	if (rc != 0) {
4390 		if_printf(ifp, "rss_config failed: %d\n", rc);
4391 		goto done;
4392 	}
4393 
4394 #ifdef RSS
4395 	hashen = hashconfig_to_hashen(hashconfig);
4396 
4397 	/*
4398 	 * We may have had to enable some hashes even though the global config
4399 	 * wants them disabled.  This is a potential problem that must be
4400 	 * reported to the user.
4401 	 */
4402 	extra = hashen_to_hashconfig(hashen) ^ hashconfig;
4403 
4404 	/*
4405 	 * If we consider only the supported hash types, then the enabled hashes
4406 	 * are a superset of the requested hashes.  In other words, there cannot
4407 	 * be any supported hash that was requested but not enabled, but there
4408 	 * can be hashes that were not requested but had to be enabled.
4409 	 */
4410 	extra &= SUPPORTED_RSS_HASHTYPES;
4411 	MPASS((extra & hashconfig) == 0);
4412 
4413 	if (extra) {
4414 		if_printf(ifp,
4415 		    "global RSS config (0x%x) cannot be accommodated.\n",
4416 		    hashconfig);
4417 	}
4418 	if (extra & RSS_HASHTYPE_RSS_IPV4)
4419 		if_printf(ifp, "IPv4 2-tuple hashing forced on.\n");
4420 	if (extra & RSS_HASHTYPE_RSS_TCP_IPV4)
4421 		if_printf(ifp, "TCP/IPv4 4-tuple hashing forced on.\n");
4422 	if (extra & RSS_HASHTYPE_RSS_IPV6)
4423 		if_printf(ifp, "IPv6 2-tuple hashing forced on.\n");
4424 	if (extra & RSS_HASHTYPE_RSS_TCP_IPV6)
4425 		if_printf(ifp, "TCP/IPv6 4-tuple hashing forced on.\n");
4426 	if (extra & RSS_HASHTYPE_RSS_UDP_IPV4)
4427 		if_printf(ifp, "UDP/IPv4 4-tuple hashing forced on.\n");
4428 	if (extra & RSS_HASHTYPE_RSS_UDP_IPV6)
4429 		if_printf(ifp, "UDP/IPv6 4-tuple hashing forced on.\n");
4430 #else
4431 	hashen = F_FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN |
4432 	    F_FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN |
4433 	    F_FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN |
4434 	    F_FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN | F_FW_RSS_VI_CONFIG_CMD_UDPEN;
4435 #endif
4436 	rc = -t4_config_vi_rss(sc, sc->mbox, vi->viid, hashen, rss[0], 0, 0);
4437 	if (rc != 0) {
4438 		if_printf(ifp, "rss hash/defaultq config failed: %d\n", rc);
4439 		goto done;
4440 	}
4441 
4442 	vi->rss = rss;
4443 	vi->flags |= VI_INIT_DONE;
4444 done:
4445 	if (rc != 0)
4446 		vi_full_uninit(vi);
4447 
4448 	return (rc);
4449 }
4450 
4451 /*
4452  * Idempotent.
4453  */
4454 int
4455 vi_full_uninit(struct vi_info *vi)
4456 {
4457 	struct port_info *pi = vi->pi;
4458 	struct adapter *sc = pi->adapter;
4459 	int i;
4460 	struct sge_rxq *rxq;
4461 	struct sge_txq *txq;
4462 #ifdef TCP_OFFLOAD
4463 	struct sge_ofld_rxq *ofld_rxq;
4464 	struct sge_wrq *ofld_txq;
4465 #endif
4466 
4467 	if (vi->flags & VI_INIT_DONE) {
4468 
4469 		/* Need to quiesce queues.  */
4470 
4471 		/* XXX: Only for the first VI? */
4472 		if (IS_MAIN_VI(vi) && !(sc->flags & IS_VF))
4473 			quiesce_wrq(sc, &sc->sge.ctrlq[pi->port_id]);
4474 
4475 		for_each_txq(vi, i, txq) {
4476 			quiesce_txq(sc, txq);
4477 		}
4478 
4479 #ifdef TCP_OFFLOAD
4480 		for_each_ofld_txq(vi, i, ofld_txq) {
4481 			quiesce_wrq(sc, ofld_txq);
4482 		}
4483 #endif
4484 
4485 		for_each_rxq(vi, i, rxq) {
4486 			quiesce_iq(sc, &rxq->iq);
4487 			quiesce_fl(sc, &rxq->fl);
4488 		}
4489 
4490 #ifdef TCP_OFFLOAD
4491 		for_each_ofld_rxq(vi, i, ofld_rxq) {
4492 			quiesce_iq(sc, &ofld_rxq->iq);
4493 			quiesce_fl(sc, &ofld_rxq->fl);
4494 		}
4495 #endif
4496 		free(vi->rss, M_CXGBE);
4497 		free(vi->nm_rss, M_CXGBE);
4498 	}
4499 
4500 	t4_teardown_vi_queues(vi);
4501 	vi->flags &= ~VI_INIT_DONE;
4502 
4503 	return (0);
4504 }
4505 
4506 static void
4507 quiesce_txq(struct adapter *sc, struct sge_txq *txq)
4508 {
4509 	struct sge_eq *eq = &txq->eq;
4510 	struct sge_qstat *spg = (void *)&eq->desc[eq->sidx];
4511 
4512 	(void) sc;	/* unused */
4513 
4514 #ifdef INVARIANTS
4515 	TXQ_LOCK(txq);
4516 	MPASS((eq->flags & EQ_ENABLED) == 0);
4517 	TXQ_UNLOCK(txq);
4518 #endif
4519 
4520 	/* Wait for the mp_ring to empty. */
4521 	while (!mp_ring_is_idle(txq->r)) {
4522 		mp_ring_check_drainage(txq->r, 0);
4523 		pause("rquiesce", 1);
4524 	}
4525 
4526 	/* Then wait for the hardware to finish. */
4527 	while (spg->cidx != htobe16(eq->pidx))
4528 		pause("equiesce", 1);
4529 
4530 	/* Finally, wait for the driver to reclaim all descriptors. */
4531 	while (eq->cidx != eq->pidx)
4532 		pause("dquiesce", 1);
4533 }
4534 
4535 static void
4536 quiesce_wrq(struct adapter *sc, struct sge_wrq *wrq)
4537 {
4538 
4539 	/* XXXTX */
4540 }
4541 
4542 static void
4543 quiesce_iq(struct adapter *sc, struct sge_iq *iq)
4544 {
4545 	(void) sc;	/* unused */
4546 
4547 	/* Synchronize with the interrupt handler */
4548 	while (!atomic_cmpset_int(&iq->state, IQS_IDLE, IQS_DISABLED))
4549 		pause("iqfree", 1);
4550 }
4551 
4552 static void
4553 quiesce_fl(struct adapter *sc, struct sge_fl *fl)
4554 {
4555 	mtx_lock(&sc->sfl_lock);
4556 	FL_LOCK(fl);
4557 	fl->flags |= FL_DOOMED;
4558 	FL_UNLOCK(fl);
4559 	callout_stop(&sc->sfl_callout);
4560 	mtx_unlock(&sc->sfl_lock);
4561 
4562 	KASSERT((fl->flags & FL_STARVING) == 0,
4563 	    ("%s: still starving", __func__));
4564 }
4565 
4566 static int
4567 t4_alloc_irq(struct adapter *sc, struct irq *irq, int rid,
4568     driver_intr_t *handler, void *arg, char *name)
4569 {
4570 	int rc;
4571 
4572 	irq->rid = rid;
4573 	irq->res = bus_alloc_resource_any(sc->dev, SYS_RES_IRQ, &irq->rid,
4574 	    RF_SHAREABLE | RF_ACTIVE);
4575 	if (irq->res == NULL) {
4576 		device_printf(sc->dev,
4577 		    "failed to allocate IRQ for rid %d, name %s.\n", rid, name);
4578 		return (ENOMEM);
4579 	}
4580 
4581 	rc = bus_setup_intr(sc->dev, irq->res, INTR_MPSAFE | INTR_TYPE_NET,
4582 	    NULL, handler, arg, &irq->tag);
4583 	if (rc != 0) {
4584 		device_printf(sc->dev,
4585 		    "failed to setup interrupt for rid %d, name %s: %d\n",
4586 		    rid, name, rc);
4587 	} else if (name)
4588 		bus_describe_intr(sc->dev, irq->res, irq->tag, "%s", name);
4589 
4590 	return (rc);
4591 }
4592 
4593 static int
4594 t4_free_irq(struct adapter *sc, struct irq *irq)
4595 {
4596 	if (irq->tag)
4597 		bus_teardown_intr(sc->dev, irq->res, irq->tag);
4598 	if (irq->res)
4599 		bus_release_resource(sc->dev, SYS_RES_IRQ, irq->rid, irq->res);
4600 
4601 	bzero(irq, sizeof(*irq));
4602 
4603 	return (0);
4604 }
4605 
4606 static void
4607 get_regs(struct adapter *sc, struct t4_regdump *regs, uint8_t *buf)
4608 {
4609 
4610 	regs->version = chip_id(sc) | chip_rev(sc) << 10;
4611 	t4_get_regs(sc, buf, regs->len);
4612 }
4613 
4614 #define	A_PL_INDIR_CMD	0x1f8
4615 
4616 #define	S_PL_AUTOINC	31
4617 #define	M_PL_AUTOINC	0x1U
4618 #define	V_PL_AUTOINC(x)	((x) << S_PL_AUTOINC)
4619 #define	G_PL_AUTOINC(x)	(((x) >> S_PL_AUTOINC) & M_PL_AUTOINC)
4620 
4621 #define	S_PL_VFID	20
4622 #define	M_PL_VFID	0xffU
4623 #define	V_PL_VFID(x)	((x) << S_PL_VFID)
4624 #define	G_PL_VFID(x)	(((x) >> S_PL_VFID) & M_PL_VFID)
4625 
4626 #define	S_PL_ADDR	0
4627 #define	M_PL_ADDR	0xfffffU
4628 #define	V_PL_ADDR(x)	((x) << S_PL_ADDR)
4629 #define	G_PL_ADDR(x)	(((x) >> S_PL_ADDR) & M_PL_ADDR)
4630 
4631 #define	A_PL_INDIR_DATA	0x1fc
4632 
4633 static uint64_t
4634 read_vf_stat(struct adapter *sc, unsigned int viid, int reg)
4635 {
4636 	u32 stats[2];
4637 
4638 	mtx_assert(&sc->reg_lock, MA_OWNED);
4639 	if (sc->flags & IS_VF) {
4640 		stats[0] = t4_read_reg(sc, VF_MPS_REG(reg));
4641 		stats[1] = t4_read_reg(sc, VF_MPS_REG(reg + 4));
4642 	} else {
4643 		t4_write_reg(sc, A_PL_INDIR_CMD, V_PL_AUTOINC(1) |
4644 		    V_PL_VFID(G_FW_VIID_VIN(viid)) |
4645 		    V_PL_ADDR(VF_MPS_REG(reg)));
4646 		stats[0] = t4_read_reg(sc, A_PL_INDIR_DATA);
4647 		stats[1] = t4_read_reg(sc, A_PL_INDIR_DATA);
4648 	}
4649 	return (((uint64_t)stats[1]) << 32 | stats[0]);
4650 }
4651 
4652 static void
4653 t4_get_vi_stats(struct adapter *sc, unsigned int viid,
4654     struct fw_vi_stats_vf *stats)
4655 {
4656 
4657 #define GET_STAT(name) \
4658 	read_vf_stat(sc, viid, A_MPS_VF_STAT_##name##_L)
4659 
4660 	stats->tx_bcast_bytes    = GET_STAT(TX_VF_BCAST_BYTES);
4661 	stats->tx_bcast_frames   = GET_STAT(TX_VF_BCAST_FRAMES);
4662 	stats->tx_mcast_bytes    = GET_STAT(TX_VF_MCAST_BYTES);
4663 	stats->tx_mcast_frames   = GET_STAT(TX_VF_MCAST_FRAMES);
4664 	stats->tx_ucast_bytes    = GET_STAT(TX_VF_UCAST_BYTES);
4665 	stats->tx_ucast_frames   = GET_STAT(TX_VF_UCAST_FRAMES);
4666 	stats->tx_drop_frames    = GET_STAT(TX_VF_DROP_FRAMES);
4667 	stats->tx_offload_bytes  = GET_STAT(TX_VF_OFFLOAD_BYTES);
4668 	stats->tx_offload_frames = GET_STAT(TX_VF_OFFLOAD_FRAMES);
4669 	stats->rx_bcast_bytes    = GET_STAT(RX_VF_BCAST_BYTES);
4670 	stats->rx_bcast_frames   = GET_STAT(RX_VF_BCAST_FRAMES);
4671 	stats->rx_mcast_bytes    = GET_STAT(RX_VF_MCAST_BYTES);
4672 	stats->rx_mcast_frames   = GET_STAT(RX_VF_MCAST_FRAMES);
4673 	stats->rx_ucast_bytes    = GET_STAT(RX_VF_UCAST_BYTES);
4674 	stats->rx_ucast_frames   = GET_STAT(RX_VF_UCAST_FRAMES);
4675 	stats->rx_err_frames     = GET_STAT(RX_VF_ERR_FRAMES);
4676 
4677 #undef GET_STAT
4678 }
4679 
4680 static void
4681 t4_clr_vi_stats(struct adapter *sc, unsigned int viid)
4682 {
4683 	int reg;
4684 
4685 	t4_write_reg(sc, A_PL_INDIR_CMD, V_PL_AUTOINC(1) |
4686 	    V_PL_VFID(G_FW_VIID_VIN(viid)) |
4687 	    V_PL_ADDR(VF_MPS_REG(A_MPS_VF_STAT_TX_VF_BCAST_BYTES_L)));
4688 	for (reg = A_MPS_VF_STAT_TX_VF_BCAST_BYTES_L;
4689 	     reg <= A_MPS_VF_STAT_RX_VF_ERR_FRAMES_H; reg += 4)
4690 		t4_write_reg(sc, A_PL_INDIR_DATA, 0);
4691 }
4692 
4693 static void
4694 vi_refresh_stats(struct adapter *sc, struct vi_info *vi)
4695 {
4696 	struct timeval tv;
4697 	const struct timeval interval = {0, 250000};	/* 250ms */
4698 
4699 	if (!(vi->flags & VI_INIT_DONE))
4700 		return;
4701 
4702 	getmicrotime(&tv);
4703 	timevalsub(&tv, &interval);
4704 	if (timevalcmp(&tv, &vi->last_refreshed, <))
4705 		return;
4706 
4707 	mtx_lock(&sc->reg_lock);
4708 	t4_get_vi_stats(sc, vi->viid, &vi->stats);
4709 	getmicrotime(&vi->last_refreshed);
4710 	mtx_unlock(&sc->reg_lock);
4711 }
4712 
4713 static void
4714 cxgbe_refresh_stats(struct adapter *sc, struct port_info *pi)
4715 {
4716 	int i;
4717 	u_int v, tnl_cong_drops;
4718 	struct timeval tv;
4719 	const struct timeval interval = {0, 250000};	/* 250ms */
4720 
4721 	getmicrotime(&tv);
4722 	timevalsub(&tv, &interval);
4723 	if (timevalcmp(&tv, &pi->last_refreshed, <))
4724 		return;
4725 
4726 	tnl_cong_drops = 0;
4727 	t4_get_port_stats(sc, pi->tx_chan, &pi->stats);
4728 	for (i = 0; i < sc->chip_params->nchan; i++) {
4729 		if (pi->rx_chan_map & (1 << i)) {
4730 			mtx_lock(&sc->reg_lock);
4731 			t4_read_indirect(sc, A_TP_MIB_INDEX, A_TP_MIB_DATA, &v,
4732 			    1, A_TP_MIB_TNL_CNG_DROP_0 + i);
4733 			mtx_unlock(&sc->reg_lock);
4734 			tnl_cong_drops += v;
4735 		}
4736 	}
4737 	pi->tnl_cong_drops = tnl_cong_drops;
4738 	getmicrotime(&pi->last_refreshed);
4739 }
4740 
4741 static void
4742 cxgbe_tick(void *arg)
4743 {
4744 	struct port_info *pi = arg;
4745 	struct adapter *sc = pi->adapter;
4746 
4747 	PORT_LOCK_ASSERT_OWNED(pi);
4748 	cxgbe_refresh_stats(sc, pi);
4749 
4750 	callout_schedule(&pi->tick, hz);
4751 }
4752 
4753 void
4754 vi_tick(void *arg)
4755 {
4756 	struct vi_info *vi = arg;
4757 	struct adapter *sc = vi->pi->adapter;
4758 
4759 	vi_refresh_stats(sc, vi);
4760 
4761 	callout_schedule(&vi->tick, hz);
4762 }
4763 
4764 static void
4765 cxgbe_vlan_config(void *arg, struct ifnet *ifp, uint16_t vid)
4766 {
4767 	struct ifnet *vlan;
4768 
4769 	if (arg != ifp || ifp->if_type != IFT_ETHER)
4770 		return;
4771 
4772 	vlan = VLAN_DEVAT(ifp, vid);
4773 	VLAN_SETCOOKIE(vlan, ifp);
4774 }
4775 
4776 /*
4777  * Should match fw_caps_config_<foo> enums in t4fw_interface.h
4778  */
4779 static char *caps_decoder[] = {
4780 	"\20\001IPMI\002NCSI",				/* 0: NBM */
4781 	"\20\001PPP\002QFC\003DCBX",			/* 1: link */
4782 	"\20\001INGRESS\002EGRESS",			/* 2: switch */
4783 	"\20\001NIC\002VM\003IDS\004UM\005UM_ISGL"	/* 3: NIC */
4784 	    "\006HASHFILTER\007ETHOFLD",
4785 	"\20\001TOE",					/* 4: TOE */
4786 	"\20\001RDDP\002RDMAC",				/* 5: RDMA */
4787 	"\20\001INITIATOR_PDU\002TARGET_PDU"		/* 6: iSCSI */
4788 	    "\003INITIATOR_CNXOFLD\004TARGET_CNXOFLD"
4789 	    "\005INITIATOR_SSNOFLD\006TARGET_SSNOFLD"
4790 	    "\007T10DIF"
4791 	    "\010INITIATOR_CMDOFLD\011TARGET_CMDOFLD",
4792 	"\20\001LOOKASIDE\002TLSKEYS",			/* 7: Crypto */
4793 	"\20\001INITIATOR\002TARGET\003CTRL_OFLD"	/* 8: FCoE */
4794 		    "\004PO_INITIATOR\005PO_TARGET",
4795 };
4796 
4797 void
4798 t4_sysctls(struct adapter *sc)
4799 {
4800 	struct sysctl_ctx_list *ctx;
4801 	struct sysctl_oid *oid;
4802 	struct sysctl_oid_list *children, *c0;
4803 	static char *doorbells = {"\20\1UDB\2WCWR\3UDBWC\4KDB"};
4804 
4805 	ctx = device_get_sysctl_ctx(sc->dev);
4806 
4807 	/*
4808 	 * dev.t4nex.X.
4809 	 */
4810 	oid = device_get_sysctl_tree(sc->dev);
4811 	c0 = children = SYSCTL_CHILDREN(oid);
4812 
4813 	sc->sc_do_rxcopy = 1;
4814 	SYSCTL_ADD_INT(ctx, children, OID_AUTO, "do_rx_copy", CTLFLAG_RW,
4815 	    &sc->sc_do_rxcopy, 1, "Do RX copy of small frames");
4816 
4817 	SYSCTL_ADD_INT(ctx, children, OID_AUTO, "nports", CTLFLAG_RD, NULL,
4818 	    sc->params.nports, "# of ports");
4819 
4820 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "doorbells",
4821 	    CTLTYPE_STRING | CTLFLAG_RD, doorbells, sc->doorbells,
4822 	    sysctl_bitfield, "A", "available doorbells");
4823 
4824 	SYSCTL_ADD_INT(ctx, children, OID_AUTO, "core_clock", CTLFLAG_RD, NULL,
4825 	    sc->params.vpd.cclk, "core clock frequency (in KHz)");
4826 
4827 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "holdoff_timers",
4828 	    CTLTYPE_STRING | CTLFLAG_RD, sc->params.sge.timer_val,
4829 	    sizeof(sc->params.sge.timer_val), sysctl_int_array, "A",
4830 	    "interrupt holdoff timer values (us)");
4831 
4832 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "holdoff_pkt_counts",
4833 	    CTLTYPE_STRING | CTLFLAG_RD, sc->params.sge.counter_val,
4834 	    sizeof(sc->params.sge.counter_val), sysctl_int_array, "A",
4835 	    "interrupt holdoff packet counter values");
4836 
4837 	t4_sge_sysctls(sc, ctx, children);
4838 
4839 	sc->lro_timeout = 100;
4840 	SYSCTL_ADD_INT(ctx, children, OID_AUTO, "lro_timeout", CTLFLAG_RW,
4841 	    &sc->lro_timeout, 0, "lro inactive-flush timeout (in us)");
4842 
4843 	SYSCTL_ADD_INT(ctx, children, OID_AUTO, "dflags", CTLFLAG_RW,
4844 	    &sc->debug_flags, 0, "flags to enable runtime debugging");
4845 
4846 	SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "tp_version",
4847 	    CTLFLAG_RD, sc->tp_version, 0, "TP microcode version");
4848 
4849 	SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "firmware_version",
4850 	    CTLFLAG_RD, sc->fw_version, 0, "firmware version");
4851 
4852 	if (sc->flags & IS_VF)
4853 		return;
4854 
4855 	SYSCTL_ADD_INT(ctx, children, OID_AUTO, "hw_revision", CTLFLAG_RD,
4856 	    NULL, chip_rev(sc), "chip hardware revision");
4857 
4858 	SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "sn",
4859 	    CTLFLAG_RD, sc->params.vpd.sn, 0, "serial number");
4860 
4861 	SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "pn",
4862 	    CTLFLAG_RD, sc->params.vpd.pn, 0, "part number");
4863 
4864 	SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "ec",
4865 	    CTLFLAG_RD, sc->params.vpd.ec, 0, "engineering change");
4866 
4867 	SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "na",
4868 	    CTLFLAG_RD, sc->params.vpd.na, 0, "network address");
4869 
4870 	SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "er_version", CTLFLAG_RD,
4871 	    sc->er_version, 0, "expansion ROM version");
4872 
4873 	SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "bs_version", CTLFLAG_RD,
4874 	    sc->bs_version, 0, "bootstrap firmware version");
4875 
4876 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "scfg_version", CTLFLAG_RD,
4877 	    NULL, sc->params.scfg_vers, "serial config version");
4878 
4879 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "vpd_version", CTLFLAG_RD,
4880 	    NULL, sc->params.vpd_vers, "VPD version");
4881 
4882 	SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "cf",
4883 	    CTLFLAG_RD, sc->cfg_file, 0, "configuration file");
4884 
4885 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "cfcsum", CTLFLAG_RD, NULL,
4886 	    sc->cfcsum, "config file checksum");
4887 
4888 #define SYSCTL_CAP(name, n, text) \
4889 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, #name, \
4890 	    CTLTYPE_STRING | CTLFLAG_RD, caps_decoder[n], sc->name, \
4891 	    sysctl_bitfield, "A", "available " text " capabilities")
4892 
4893 	SYSCTL_CAP(nbmcaps, 0, "NBM");
4894 	SYSCTL_CAP(linkcaps, 1, "link");
4895 	SYSCTL_CAP(switchcaps, 2, "switch");
4896 	SYSCTL_CAP(niccaps, 3, "NIC");
4897 	SYSCTL_CAP(toecaps, 4, "TCP offload");
4898 	SYSCTL_CAP(rdmacaps, 5, "RDMA");
4899 	SYSCTL_CAP(iscsicaps, 6, "iSCSI");
4900 	SYSCTL_CAP(cryptocaps, 7, "crypto");
4901 	SYSCTL_CAP(fcoecaps, 8, "FCoE");
4902 #undef SYSCTL_CAP
4903 
4904 	SYSCTL_ADD_INT(ctx, children, OID_AUTO, "nfilters", CTLFLAG_RD,
4905 	    NULL, sc->tids.nftids, "number of filters");
4906 
4907 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "temperature", CTLTYPE_INT |
4908 	    CTLFLAG_RD, sc, 0, sysctl_temperature, "I",
4909 	    "chip temperature (in Celsius)");
4910 
4911 #ifdef SBUF_DRAIN
4912 	/*
4913 	 * dev.t4nex.X.misc.  Marked CTLFLAG_SKIP to avoid information overload.
4914 	 */
4915 	oid = SYSCTL_ADD_NODE(ctx, c0, OID_AUTO, "misc",
4916 	    CTLFLAG_RD | CTLFLAG_SKIP, NULL,
4917 	    "logs and miscellaneous information");
4918 	children = SYSCTL_CHILDREN(oid);
4919 
4920 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cctrl",
4921 	    CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
4922 	    sysctl_cctrl, "A", "congestion control");
4923 
4924 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_ibq_tp0",
4925 	    CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
4926 	    sysctl_cim_ibq_obq, "A", "CIM IBQ 0 (TP0)");
4927 
4928 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_ibq_tp1",
4929 	    CTLTYPE_STRING | CTLFLAG_RD, sc, 1,
4930 	    sysctl_cim_ibq_obq, "A", "CIM IBQ 1 (TP1)");
4931 
4932 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_ibq_ulp",
4933 	    CTLTYPE_STRING | CTLFLAG_RD, sc, 2,
4934 	    sysctl_cim_ibq_obq, "A", "CIM IBQ 2 (ULP)");
4935 
4936 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_ibq_sge0",
4937 	    CTLTYPE_STRING | CTLFLAG_RD, sc, 3,
4938 	    sysctl_cim_ibq_obq, "A", "CIM IBQ 3 (SGE0)");
4939 
4940 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_ibq_sge1",
4941 	    CTLTYPE_STRING | CTLFLAG_RD, sc, 4,
4942 	    sysctl_cim_ibq_obq, "A", "CIM IBQ 4 (SGE1)");
4943 
4944 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_ibq_ncsi",
4945 	    CTLTYPE_STRING | CTLFLAG_RD, sc, 5,
4946 	    sysctl_cim_ibq_obq, "A", "CIM IBQ 5 (NCSI)");
4947 
4948 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_la",
4949 	    CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
4950 	    chip_id(sc) <= CHELSIO_T5 ? sysctl_cim_la : sysctl_cim_la_t6,
4951 	    "A", "CIM logic analyzer");
4952 
4953 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_ma_la",
4954 	    CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
4955 	    sysctl_cim_ma_la, "A", "CIM MA logic analyzer");
4956 
4957 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_obq_ulp0",
4958 	    CTLTYPE_STRING | CTLFLAG_RD, sc, 0 + CIM_NUM_IBQ,
4959 	    sysctl_cim_ibq_obq, "A", "CIM OBQ 0 (ULP0)");
4960 
4961 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_obq_ulp1",
4962 	    CTLTYPE_STRING | CTLFLAG_RD, sc, 1 + CIM_NUM_IBQ,
4963 	    sysctl_cim_ibq_obq, "A", "CIM OBQ 1 (ULP1)");
4964 
4965 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_obq_ulp2",
4966 	    CTLTYPE_STRING | CTLFLAG_RD, sc, 2 + CIM_NUM_IBQ,
4967 	    sysctl_cim_ibq_obq, "A", "CIM OBQ 2 (ULP2)");
4968 
4969 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_obq_ulp3",
4970 	    CTLTYPE_STRING | CTLFLAG_RD, sc, 3 + CIM_NUM_IBQ,
4971 	    sysctl_cim_ibq_obq, "A", "CIM OBQ 3 (ULP3)");
4972 
4973 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_obq_sge",
4974 	    CTLTYPE_STRING | CTLFLAG_RD, sc, 4 + CIM_NUM_IBQ,
4975 	    sysctl_cim_ibq_obq, "A", "CIM OBQ 4 (SGE)");
4976 
4977 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_obq_ncsi",
4978 	    CTLTYPE_STRING | CTLFLAG_RD, sc, 5 + CIM_NUM_IBQ,
4979 	    sysctl_cim_ibq_obq, "A", "CIM OBQ 5 (NCSI)");
4980 
4981 	if (chip_id(sc) > CHELSIO_T4) {
4982 		SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_obq_sge0_rx",
4983 		    CTLTYPE_STRING | CTLFLAG_RD, sc, 6 + CIM_NUM_IBQ,
4984 		    sysctl_cim_ibq_obq, "A", "CIM OBQ 6 (SGE0-RX)");
4985 
4986 		SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_obq_sge1_rx",
4987 		    CTLTYPE_STRING | CTLFLAG_RD, sc, 7 + CIM_NUM_IBQ,
4988 		    sysctl_cim_ibq_obq, "A", "CIM OBQ 7 (SGE1-RX)");
4989 	}
4990 
4991 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_pif_la",
4992 	    CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
4993 	    sysctl_cim_pif_la, "A", "CIM PIF logic analyzer");
4994 
4995 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_qcfg",
4996 	    CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
4997 	    sysctl_cim_qcfg, "A", "CIM queue configuration");
4998 
4999 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cpl_stats",
5000 	    CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
5001 	    sysctl_cpl_stats, "A", "CPL statistics");
5002 
5003 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "ddp_stats",
5004 	    CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
5005 	    sysctl_ddp_stats, "A", "non-TCP DDP statistics");
5006 
5007 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "devlog",
5008 	    CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
5009 	    sysctl_devlog, "A", "firmware's device log");
5010 
5011 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "fcoe_stats",
5012 	    CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
5013 	    sysctl_fcoe_stats, "A", "FCoE statistics");
5014 
5015 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "hw_sched",
5016 	    CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
5017 	    sysctl_hw_sched, "A", "hardware scheduler ");
5018 
5019 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "l2t",
5020 	    CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
5021 	    sysctl_l2t, "A", "hardware L2 table");
5022 
5023 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "lb_stats",
5024 	    CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
5025 	    sysctl_lb_stats, "A", "loopback statistics");
5026 
5027 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "meminfo",
5028 	    CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
5029 	    sysctl_meminfo, "A", "memory regions");
5030 
5031 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "mps_tcam",
5032 	    CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
5033 	    chip_id(sc) <= CHELSIO_T5 ? sysctl_mps_tcam : sysctl_mps_tcam_t6,
5034 	    "A", "MPS TCAM entries");
5035 
5036 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "path_mtus",
5037 	    CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
5038 	    sysctl_path_mtus, "A", "path MTUs");
5039 
5040 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "pm_stats",
5041 	    CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
5042 	    sysctl_pm_stats, "A", "PM statistics");
5043 
5044 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "rdma_stats",
5045 	    CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
5046 	    sysctl_rdma_stats, "A", "RDMA statistics");
5047 
5048 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "tcp_stats",
5049 	    CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
5050 	    sysctl_tcp_stats, "A", "TCP statistics");
5051 
5052 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "tids",
5053 	    CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
5054 	    sysctl_tids, "A", "TID information");
5055 
5056 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "tp_err_stats",
5057 	    CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
5058 	    sysctl_tp_err_stats, "A", "TP error statistics");
5059 
5060 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "tp_la_mask",
5061 	    CTLTYPE_INT | CTLFLAG_RW, sc, 0, sysctl_tp_la_mask, "I",
5062 	    "TP logic analyzer event capture mask");
5063 
5064 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "tp_la",
5065 	    CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
5066 	    sysctl_tp_la, "A", "TP logic analyzer");
5067 
5068 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "tx_rate",
5069 	    CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
5070 	    sysctl_tx_rate, "A", "Tx rate");
5071 
5072 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "ulprx_la",
5073 	    CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
5074 	    sysctl_ulprx_la, "A", "ULPRX logic analyzer");
5075 
5076 	if (chip_id(sc) >= CHELSIO_T5) {
5077 		SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "wcwr_stats",
5078 		    CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
5079 		    sysctl_wcwr_stats, "A", "write combined work requests");
5080 	}
5081 #endif
5082 
5083 #ifdef TCP_OFFLOAD
5084 	if (is_offload(sc)) {
5085 		/*
5086 		 * dev.t4nex.X.toe.
5087 		 */
5088 		oid = SYSCTL_ADD_NODE(ctx, c0, OID_AUTO, "toe", CTLFLAG_RD,
5089 		    NULL, "TOE parameters");
5090 		children = SYSCTL_CHILDREN(oid);
5091 
5092 		sc->tt.sndbuf = 256 * 1024;
5093 		SYSCTL_ADD_INT(ctx, children, OID_AUTO, "sndbuf", CTLFLAG_RW,
5094 		    &sc->tt.sndbuf, 0, "max hardware send buffer size");
5095 
5096 		sc->tt.ddp = 0;
5097 		SYSCTL_ADD_INT(ctx, children, OID_AUTO, "ddp", CTLFLAG_RW,
5098 		    &sc->tt.ddp, 0, "DDP allowed");
5099 
5100 		sc->tt.rx_coalesce = 1;
5101 		SYSCTL_ADD_INT(ctx, children, OID_AUTO, "rx_coalesce",
5102 		    CTLFLAG_RW, &sc->tt.rx_coalesce, 0, "receive coalescing");
5103 
5104 		sc->tt.tx_align = 1;
5105 		SYSCTL_ADD_INT(ctx, children, OID_AUTO, "tx_align",
5106 		    CTLFLAG_RW, &sc->tt.tx_align, 0, "chop and align payload");
5107 
5108 		sc->tt.tx_zcopy = 0;
5109 		SYSCTL_ADD_INT(ctx, children, OID_AUTO, "tx_zcopy",
5110 		    CTLFLAG_RW, &sc->tt.tx_zcopy, 0,
5111 		    "Enable zero-copy aio_write(2)");
5112 
5113 		SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "timer_tick",
5114 		    CTLTYPE_STRING | CTLFLAG_RD, sc, 0, sysctl_tp_tick, "A",
5115 		    "TP timer tick (us)");
5116 
5117 		SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "timestamp_tick",
5118 		    CTLTYPE_STRING | CTLFLAG_RD, sc, 1, sysctl_tp_tick, "A",
5119 		    "TCP timestamp tick (us)");
5120 
5121 		SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "dack_tick",
5122 		    CTLTYPE_STRING | CTLFLAG_RD, sc, 2, sysctl_tp_tick, "A",
5123 		    "DACK tick (us)");
5124 
5125 		SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "dack_timer",
5126 		    CTLTYPE_UINT | CTLFLAG_RD, sc, 0, sysctl_tp_dack_timer,
5127 		    "IU", "DACK timer (us)");
5128 
5129 		SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "rexmt_min",
5130 		    CTLTYPE_ULONG | CTLFLAG_RD, sc, A_TP_RXT_MIN,
5131 		    sysctl_tp_timer, "LU", "Retransmit min (us)");
5132 
5133 		SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "rexmt_max",
5134 		    CTLTYPE_ULONG | CTLFLAG_RD, sc, A_TP_RXT_MAX,
5135 		    sysctl_tp_timer, "LU", "Retransmit max (us)");
5136 
5137 		SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "persist_min",
5138 		    CTLTYPE_ULONG | CTLFLAG_RD, sc, A_TP_PERS_MIN,
5139 		    sysctl_tp_timer, "LU", "Persist timer min (us)");
5140 
5141 		SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "persist_max",
5142 		    CTLTYPE_ULONG | CTLFLAG_RD, sc, A_TP_PERS_MAX,
5143 		    sysctl_tp_timer, "LU", "Persist timer max (us)");
5144 
5145 		SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "keepalive_idle",
5146 		    CTLTYPE_ULONG | CTLFLAG_RD, sc, A_TP_KEEP_IDLE,
5147 		    sysctl_tp_timer, "LU", "Keepidle idle timer (us)");
5148 
5149 		SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "keepalive_intvl",
5150 		    CTLTYPE_ULONG | CTLFLAG_RD, sc, A_TP_KEEP_INTVL,
5151 		    sysctl_tp_timer, "LU", "Keepidle interval (us)");
5152 
5153 		SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "initial_srtt",
5154 		    CTLTYPE_ULONG | CTLFLAG_RD, sc, A_TP_INIT_SRTT,
5155 		    sysctl_tp_timer, "LU", "Initial SRTT (us)");
5156 
5157 		SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "finwait2_timer",
5158 		    CTLTYPE_ULONG | CTLFLAG_RD, sc, A_TP_FINWAIT2_TIMER,
5159 		    sysctl_tp_timer, "LU", "FINWAIT2 timer (us)");
5160 	}
5161 #endif
5162 }
5163 
5164 void
5165 vi_sysctls(struct vi_info *vi)
5166 {
5167 	struct sysctl_ctx_list *ctx;
5168 	struct sysctl_oid *oid;
5169 	struct sysctl_oid_list *children;
5170 
5171 	ctx = device_get_sysctl_ctx(vi->dev);
5172 
5173 	/*
5174 	 * dev.v?(cxgbe|cxl).X.
5175 	 */
5176 	oid = device_get_sysctl_tree(vi->dev);
5177 	children = SYSCTL_CHILDREN(oid);
5178 
5179 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "viid", CTLFLAG_RD, NULL,
5180 	    vi->viid, "VI identifer");
5181 	SYSCTL_ADD_INT(ctx, children, OID_AUTO, "nrxq", CTLFLAG_RD,
5182 	    &vi->nrxq, 0, "# of rx queues");
5183 	SYSCTL_ADD_INT(ctx, children, OID_AUTO, "ntxq", CTLFLAG_RD,
5184 	    &vi->ntxq, 0, "# of tx queues");
5185 	SYSCTL_ADD_INT(ctx, children, OID_AUTO, "first_rxq", CTLFLAG_RD,
5186 	    &vi->first_rxq, 0, "index of first rx queue");
5187 	SYSCTL_ADD_INT(ctx, children, OID_AUTO, "first_txq", CTLFLAG_RD,
5188 	    &vi->first_txq, 0, "index of first tx queue");
5189 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "rss_size", CTLFLAG_RD, NULL,
5190 	    vi->rss_size, "size of RSS indirection table");
5191 
5192 	if (IS_MAIN_VI(vi)) {
5193 		SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "rsrv_noflowq",
5194 		    CTLTYPE_INT | CTLFLAG_RW, vi, 0, sysctl_noflowq, "IU",
5195 		    "Reserve queue 0 for non-flowid packets");
5196 	}
5197 
5198 #ifdef TCP_OFFLOAD
5199 	if (vi->nofldrxq != 0) {
5200 		SYSCTL_ADD_INT(ctx, children, OID_AUTO, "nofldrxq", CTLFLAG_RD,
5201 		    &vi->nofldrxq, 0,
5202 		    "# of rx queues for offloaded TCP connections");
5203 		SYSCTL_ADD_INT(ctx, children, OID_AUTO, "nofldtxq", CTLFLAG_RD,
5204 		    &vi->nofldtxq, 0,
5205 		    "# of tx queues for offloaded TCP connections");
5206 		SYSCTL_ADD_INT(ctx, children, OID_AUTO, "first_ofld_rxq",
5207 		    CTLFLAG_RD, &vi->first_ofld_rxq, 0,
5208 		    "index of first TOE rx queue");
5209 		SYSCTL_ADD_INT(ctx, children, OID_AUTO, "first_ofld_txq",
5210 		    CTLFLAG_RD, &vi->first_ofld_txq, 0,
5211 		    "index of first TOE tx queue");
5212 	}
5213 #endif
5214 #ifdef DEV_NETMAP
5215 	if (vi->nnmrxq != 0) {
5216 		SYSCTL_ADD_INT(ctx, children, OID_AUTO, "nnmrxq", CTLFLAG_RD,
5217 		    &vi->nnmrxq, 0, "# of netmap rx queues");
5218 		SYSCTL_ADD_INT(ctx, children, OID_AUTO, "nnmtxq", CTLFLAG_RD,
5219 		    &vi->nnmtxq, 0, "# of netmap tx queues");
5220 		SYSCTL_ADD_INT(ctx, children, OID_AUTO, "first_nm_rxq",
5221 		    CTLFLAG_RD, &vi->first_nm_rxq, 0,
5222 		    "index of first netmap rx queue");
5223 		SYSCTL_ADD_INT(ctx, children, OID_AUTO, "first_nm_txq",
5224 		    CTLFLAG_RD, &vi->first_nm_txq, 0,
5225 		    "index of first netmap tx queue");
5226 	}
5227 #endif
5228 
5229 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "holdoff_tmr_idx",
5230 	    CTLTYPE_INT | CTLFLAG_RW, vi, 0, sysctl_holdoff_tmr_idx, "I",
5231 	    "holdoff timer index");
5232 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "holdoff_pktc_idx",
5233 	    CTLTYPE_INT | CTLFLAG_RW, vi, 0, sysctl_holdoff_pktc_idx, "I",
5234 	    "holdoff packet counter index");
5235 
5236 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "qsize_rxq",
5237 	    CTLTYPE_INT | CTLFLAG_RW, vi, 0, sysctl_qsize_rxq, "I",
5238 	    "rx queue size");
5239 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "qsize_txq",
5240 	    CTLTYPE_INT | CTLFLAG_RW, vi, 0, sysctl_qsize_txq, "I",
5241 	    "tx queue size");
5242 }
5243 
5244 static void
5245 cxgbe_sysctls(struct port_info *pi)
5246 {
5247 	struct sysctl_ctx_list *ctx;
5248 	struct sysctl_oid *oid;
5249 	struct sysctl_oid_list *children, *children2;
5250 	struct adapter *sc = pi->adapter;
5251 	int i;
5252 	char name[16];
5253 
5254 	ctx = device_get_sysctl_ctx(pi->dev);
5255 
5256 	/*
5257 	 * dev.cxgbe.X.
5258 	 */
5259 	oid = device_get_sysctl_tree(pi->dev);
5260 	children = SYSCTL_CHILDREN(oid);
5261 
5262 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "linkdnrc", CTLTYPE_STRING |
5263 	   CTLFLAG_RD, pi, 0, sysctl_linkdnrc, "A", "reason why link is down");
5264 	if (pi->port_type == FW_PORT_TYPE_BT_XAUI) {
5265 		SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "temperature",
5266 		    CTLTYPE_INT | CTLFLAG_RD, pi, 0, sysctl_btphy, "I",
5267 		    "PHY temperature (in Celsius)");
5268 		SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "fw_version",
5269 		    CTLTYPE_INT | CTLFLAG_RD, pi, 1, sysctl_btphy, "I",
5270 		    "PHY firmware version");
5271 	}
5272 
5273 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "pause_settings",
5274 	    CTLTYPE_STRING | CTLFLAG_RW, pi, PAUSE_TX, sysctl_pause_settings,
5275 	    "A", "PAUSE settings (bit 0 = rx_pause, bit 1 = tx_pause)");
5276 
5277 	SYSCTL_ADD_INT(ctx, children, OID_AUTO, "max_speed", CTLFLAG_RD, NULL,
5278 	    port_top_speed(pi), "max speed (in Gbps)");
5279 
5280 	if (sc->flags & IS_VF)
5281 		return;
5282 
5283 	/*
5284 	 * dev.(cxgbe|cxl).X.tc.
5285 	 */
5286 	oid = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, "tc", CTLFLAG_RD, NULL,
5287 	    "Tx scheduler traffic classes");
5288 	for (i = 0; i < sc->chip_params->nsched_cls; i++) {
5289 		struct tx_sched_class *tc = &pi->tc[i];
5290 
5291 		snprintf(name, sizeof(name), "%d", i);
5292 		children2 = SYSCTL_CHILDREN(SYSCTL_ADD_NODE(ctx,
5293 		    SYSCTL_CHILDREN(oid), OID_AUTO, name, CTLFLAG_RD, NULL,
5294 		    "traffic class"));
5295 		SYSCTL_ADD_UINT(ctx, children2, OID_AUTO, "flags", CTLFLAG_RD,
5296 		    &tc->flags, 0, "flags");
5297 		SYSCTL_ADD_UINT(ctx, children2, OID_AUTO, "refcount",
5298 		    CTLFLAG_RD, &tc->refcount, 0, "references to this class");
5299 #ifdef SBUF_DRAIN
5300 		SYSCTL_ADD_PROC(ctx, children2, OID_AUTO, "params",
5301 		    CTLTYPE_STRING | CTLFLAG_RD, sc, (pi->port_id << 16) | i,
5302 		    sysctl_tc_params, "A", "traffic class parameters");
5303 #endif
5304 	}
5305 
5306 	/*
5307 	 * dev.cxgbe.X.stats.
5308 	 */
5309 	oid = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, "stats", CTLFLAG_RD,
5310 	    NULL, "port statistics");
5311 	children = SYSCTL_CHILDREN(oid);
5312 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "tx_parse_error", CTLFLAG_RD,
5313 	    &pi->tx_parse_error, 0,
5314 	    "# of tx packets with invalid length or # of segments");
5315 
5316 #define SYSCTL_ADD_T4_REG64(pi, name, desc, reg) \
5317 	SYSCTL_ADD_OID(ctx, children, OID_AUTO, name, \
5318 	    CTLTYPE_U64 | CTLFLAG_RD, sc, reg, \
5319 	    sysctl_handle_t4_reg64, "QU", desc)
5320 
5321 	SYSCTL_ADD_T4_REG64(pi, "tx_octets", "# of octets in good frames",
5322 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_BYTES_L));
5323 	SYSCTL_ADD_T4_REG64(pi, "tx_frames", "total # of good frames",
5324 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_FRAMES_L));
5325 	SYSCTL_ADD_T4_REG64(pi, "tx_bcast_frames", "# of broadcast frames",
5326 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_BCAST_L));
5327 	SYSCTL_ADD_T4_REG64(pi, "tx_mcast_frames", "# of multicast frames",
5328 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_MCAST_L));
5329 	SYSCTL_ADD_T4_REG64(pi, "tx_ucast_frames", "# of unicast frames",
5330 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_UCAST_L));
5331 	SYSCTL_ADD_T4_REG64(pi, "tx_error_frames", "# of error frames",
5332 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_ERROR_L));
5333 	SYSCTL_ADD_T4_REG64(pi, "tx_frames_64",
5334 	    "# of tx frames in this range",
5335 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_64B_L));
5336 	SYSCTL_ADD_T4_REG64(pi, "tx_frames_65_127",
5337 	    "# of tx frames in this range",
5338 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_65B_127B_L));
5339 	SYSCTL_ADD_T4_REG64(pi, "tx_frames_128_255",
5340 	    "# of tx frames in this range",
5341 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_128B_255B_L));
5342 	SYSCTL_ADD_T4_REG64(pi, "tx_frames_256_511",
5343 	    "# of tx frames in this range",
5344 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_256B_511B_L));
5345 	SYSCTL_ADD_T4_REG64(pi, "tx_frames_512_1023",
5346 	    "# of tx frames in this range",
5347 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_512B_1023B_L));
5348 	SYSCTL_ADD_T4_REG64(pi, "tx_frames_1024_1518",
5349 	    "# of tx frames in this range",
5350 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_1024B_1518B_L));
5351 	SYSCTL_ADD_T4_REG64(pi, "tx_frames_1519_max",
5352 	    "# of tx frames in this range",
5353 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_1519B_MAX_L));
5354 	SYSCTL_ADD_T4_REG64(pi, "tx_drop", "# of dropped tx frames",
5355 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_DROP_L));
5356 	SYSCTL_ADD_T4_REG64(pi, "tx_pause", "# of pause frames transmitted",
5357 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_PAUSE_L));
5358 	SYSCTL_ADD_T4_REG64(pi, "tx_ppp0", "# of PPP prio 0 frames transmitted",
5359 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_PPP0_L));
5360 	SYSCTL_ADD_T4_REG64(pi, "tx_ppp1", "# of PPP prio 1 frames transmitted",
5361 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_PPP1_L));
5362 	SYSCTL_ADD_T4_REG64(pi, "tx_ppp2", "# of PPP prio 2 frames transmitted",
5363 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_PPP2_L));
5364 	SYSCTL_ADD_T4_REG64(pi, "tx_ppp3", "# of PPP prio 3 frames transmitted",
5365 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_PPP3_L));
5366 	SYSCTL_ADD_T4_REG64(pi, "tx_ppp4", "# of PPP prio 4 frames transmitted",
5367 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_PPP4_L));
5368 	SYSCTL_ADD_T4_REG64(pi, "tx_ppp5", "# of PPP prio 5 frames transmitted",
5369 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_PPP5_L));
5370 	SYSCTL_ADD_T4_REG64(pi, "tx_ppp6", "# of PPP prio 6 frames transmitted",
5371 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_PPP6_L));
5372 	SYSCTL_ADD_T4_REG64(pi, "tx_ppp7", "# of PPP prio 7 frames transmitted",
5373 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_PPP7_L));
5374 
5375 	SYSCTL_ADD_T4_REG64(pi, "rx_octets", "# of octets in good frames",
5376 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_BYTES_L));
5377 	SYSCTL_ADD_T4_REG64(pi, "rx_frames", "total # of good frames",
5378 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_FRAMES_L));
5379 	SYSCTL_ADD_T4_REG64(pi, "rx_bcast_frames", "# of broadcast frames",
5380 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_BCAST_L));
5381 	SYSCTL_ADD_T4_REG64(pi, "rx_mcast_frames", "# of multicast frames",
5382 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_MCAST_L));
5383 	SYSCTL_ADD_T4_REG64(pi, "rx_ucast_frames", "# of unicast frames",
5384 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_UCAST_L));
5385 	SYSCTL_ADD_T4_REG64(pi, "rx_too_long", "# of frames exceeding MTU",
5386 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_MTU_ERROR_L));
5387 	SYSCTL_ADD_T4_REG64(pi, "rx_jabber", "# of jabber frames",
5388 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_MTU_CRC_ERROR_L));
5389 	SYSCTL_ADD_T4_REG64(pi, "rx_fcs_err",
5390 	    "# of frames received with bad FCS",
5391 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_CRC_ERROR_L));
5392 	SYSCTL_ADD_T4_REG64(pi, "rx_len_err",
5393 	    "# of frames received with length error",
5394 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_LEN_ERROR_L));
5395 	SYSCTL_ADD_T4_REG64(pi, "rx_symbol_err", "symbol errors",
5396 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_SYM_ERROR_L));
5397 	SYSCTL_ADD_T4_REG64(pi, "rx_runt", "# of short frames received",
5398 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_LESS_64B_L));
5399 	SYSCTL_ADD_T4_REG64(pi, "rx_frames_64",
5400 	    "# of rx frames in this range",
5401 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_64B_L));
5402 	SYSCTL_ADD_T4_REG64(pi, "rx_frames_65_127",
5403 	    "# of rx frames in this range",
5404 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_65B_127B_L));
5405 	SYSCTL_ADD_T4_REG64(pi, "rx_frames_128_255",
5406 	    "# of rx frames in this range",
5407 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_128B_255B_L));
5408 	SYSCTL_ADD_T4_REG64(pi, "rx_frames_256_511",
5409 	    "# of rx frames in this range",
5410 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_256B_511B_L));
5411 	SYSCTL_ADD_T4_REG64(pi, "rx_frames_512_1023",
5412 	    "# of rx frames in this range",
5413 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_512B_1023B_L));
5414 	SYSCTL_ADD_T4_REG64(pi, "rx_frames_1024_1518",
5415 	    "# of rx frames in this range",
5416 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_1024B_1518B_L));
5417 	SYSCTL_ADD_T4_REG64(pi, "rx_frames_1519_max",
5418 	    "# of rx frames in this range",
5419 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_1519B_MAX_L));
5420 	SYSCTL_ADD_T4_REG64(pi, "rx_pause", "# of pause frames received",
5421 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_PAUSE_L));
5422 	SYSCTL_ADD_T4_REG64(pi, "rx_ppp0", "# of PPP prio 0 frames received",
5423 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_PPP0_L));
5424 	SYSCTL_ADD_T4_REG64(pi, "rx_ppp1", "# of PPP prio 1 frames received",
5425 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_PPP1_L));
5426 	SYSCTL_ADD_T4_REG64(pi, "rx_ppp2", "# of PPP prio 2 frames received",
5427 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_PPP2_L));
5428 	SYSCTL_ADD_T4_REG64(pi, "rx_ppp3", "# of PPP prio 3 frames received",
5429 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_PPP3_L));
5430 	SYSCTL_ADD_T4_REG64(pi, "rx_ppp4", "# of PPP prio 4 frames received",
5431 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_PPP4_L));
5432 	SYSCTL_ADD_T4_REG64(pi, "rx_ppp5", "# of PPP prio 5 frames received",
5433 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_PPP5_L));
5434 	SYSCTL_ADD_T4_REG64(pi, "rx_ppp6", "# of PPP prio 6 frames received",
5435 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_PPP6_L));
5436 	SYSCTL_ADD_T4_REG64(pi, "rx_ppp7", "# of PPP prio 7 frames received",
5437 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_PPP7_L));
5438 
5439 #undef SYSCTL_ADD_T4_REG64
5440 
5441 #define SYSCTL_ADD_T4_PORTSTAT(name, desc) \
5442 	SYSCTL_ADD_UQUAD(ctx, children, OID_AUTO, #name, CTLFLAG_RD, \
5443 	    &pi->stats.name, desc)
5444 
5445 	/* We get these from port_stats and they may be stale by up to 1s */
5446 	SYSCTL_ADD_T4_PORTSTAT(rx_ovflow0,
5447 	    "# drops due to buffer-group 0 overflows");
5448 	SYSCTL_ADD_T4_PORTSTAT(rx_ovflow1,
5449 	    "# drops due to buffer-group 1 overflows");
5450 	SYSCTL_ADD_T4_PORTSTAT(rx_ovflow2,
5451 	    "# drops due to buffer-group 2 overflows");
5452 	SYSCTL_ADD_T4_PORTSTAT(rx_ovflow3,
5453 	    "# drops due to buffer-group 3 overflows");
5454 	SYSCTL_ADD_T4_PORTSTAT(rx_trunc0,
5455 	    "# of buffer-group 0 truncated packets");
5456 	SYSCTL_ADD_T4_PORTSTAT(rx_trunc1,
5457 	    "# of buffer-group 1 truncated packets");
5458 	SYSCTL_ADD_T4_PORTSTAT(rx_trunc2,
5459 	    "# of buffer-group 2 truncated packets");
5460 	SYSCTL_ADD_T4_PORTSTAT(rx_trunc3,
5461 	    "# of buffer-group 3 truncated packets");
5462 
5463 #undef SYSCTL_ADD_T4_PORTSTAT
5464 }
5465 
5466 static int
5467 sysctl_int_array(SYSCTL_HANDLER_ARGS)
5468 {
5469 	int rc, *i, space = 0;
5470 	struct sbuf sb;
5471 
5472 	sbuf_new_for_sysctl(&sb, NULL, 64, req);
5473 	for (i = arg1; arg2; arg2 -= sizeof(int), i++) {
5474 		if (space)
5475 			sbuf_printf(&sb, " ");
5476 		sbuf_printf(&sb, "%d", *i);
5477 		space = 1;
5478 	}
5479 	rc = sbuf_finish(&sb);
5480 	sbuf_delete(&sb);
5481 	return (rc);
5482 }
5483 
5484 static int
5485 sysctl_bitfield(SYSCTL_HANDLER_ARGS)
5486 {
5487 	int rc;
5488 	struct sbuf *sb;
5489 
5490 	rc = sysctl_wire_old_buffer(req, 0);
5491 	if (rc != 0)
5492 		return(rc);
5493 
5494 	sb = sbuf_new_for_sysctl(NULL, NULL, 128, req);
5495 	if (sb == NULL)
5496 		return (ENOMEM);
5497 
5498 	sbuf_printf(sb, "%b", (int)arg2, (char *)arg1);
5499 	rc = sbuf_finish(sb);
5500 	sbuf_delete(sb);
5501 
5502 	return (rc);
5503 }
5504 
5505 static int
5506 sysctl_btphy(SYSCTL_HANDLER_ARGS)
5507 {
5508 	struct port_info *pi = arg1;
5509 	int op = arg2;
5510 	struct adapter *sc = pi->adapter;
5511 	u_int v;
5512 	int rc;
5513 
5514 	rc = begin_synchronized_op(sc, &pi->vi[0], SLEEP_OK | INTR_OK, "t4btt");
5515 	if (rc)
5516 		return (rc);
5517 	/* XXX: magic numbers */
5518 	rc = -t4_mdio_rd(sc, sc->mbox, pi->mdio_addr, 0x1e, op ? 0x20 : 0xc820,
5519 	    &v);
5520 	end_synchronized_op(sc, 0);
5521 	if (rc)
5522 		return (rc);
5523 	if (op == 0)
5524 		v /= 256;
5525 
5526 	rc = sysctl_handle_int(oidp, &v, 0, req);
5527 	return (rc);
5528 }
5529 
5530 static int
5531 sysctl_noflowq(SYSCTL_HANDLER_ARGS)
5532 {
5533 	struct vi_info *vi = arg1;
5534 	int rc, val;
5535 
5536 	val = vi->rsrv_noflowq;
5537 	rc = sysctl_handle_int(oidp, &val, 0, req);
5538 	if (rc != 0 || req->newptr == NULL)
5539 		return (rc);
5540 
5541 	if ((val >= 1) && (vi->ntxq > 1))
5542 		vi->rsrv_noflowq = 1;
5543 	else
5544 		vi->rsrv_noflowq = 0;
5545 
5546 	return (rc);
5547 }
5548 
5549 static int
5550 sysctl_holdoff_tmr_idx(SYSCTL_HANDLER_ARGS)
5551 {
5552 	struct vi_info *vi = arg1;
5553 	struct adapter *sc = vi->pi->adapter;
5554 	int idx, rc, i;
5555 	struct sge_rxq *rxq;
5556 #ifdef TCP_OFFLOAD
5557 	struct sge_ofld_rxq *ofld_rxq;
5558 #endif
5559 	uint8_t v;
5560 
5561 	idx = vi->tmr_idx;
5562 
5563 	rc = sysctl_handle_int(oidp, &idx, 0, req);
5564 	if (rc != 0 || req->newptr == NULL)
5565 		return (rc);
5566 
5567 	if (idx < 0 || idx >= SGE_NTIMERS)
5568 		return (EINVAL);
5569 
5570 	rc = begin_synchronized_op(sc, vi, HOLD_LOCK | SLEEP_OK | INTR_OK,
5571 	    "t4tmr");
5572 	if (rc)
5573 		return (rc);
5574 
5575 	v = V_QINTR_TIMER_IDX(idx) | V_QINTR_CNT_EN(vi->pktc_idx != -1);
5576 	for_each_rxq(vi, i, rxq) {
5577 #ifdef atomic_store_rel_8
5578 		atomic_store_rel_8(&rxq->iq.intr_params, v);
5579 #else
5580 		rxq->iq.intr_params = v;
5581 #endif
5582 	}
5583 #ifdef TCP_OFFLOAD
5584 	for_each_ofld_rxq(vi, i, ofld_rxq) {
5585 #ifdef atomic_store_rel_8
5586 		atomic_store_rel_8(&ofld_rxq->iq.intr_params, v);
5587 #else
5588 		ofld_rxq->iq.intr_params = v;
5589 #endif
5590 	}
5591 #endif
5592 	vi->tmr_idx = idx;
5593 
5594 	end_synchronized_op(sc, LOCK_HELD);
5595 	return (0);
5596 }
5597 
5598 static int
5599 sysctl_holdoff_pktc_idx(SYSCTL_HANDLER_ARGS)
5600 {
5601 	struct vi_info *vi = arg1;
5602 	struct adapter *sc = vi->pi->adapter;
5603 	int idx, rc;
5604 
5605 	idx = vi->pktc_idx;
5606 
5607 	rc = sysctl_handle_int(oidp, &idx, 0, req);
5608 	if (rc != 0 || req->newptr == NULL)
5609 		return (rc);
5610 
5611 	if (idx < -1 || idx >= SGE_NCOUNTERS)
5612 		return (EINVAL);
5613 
5614 	rc = begin_synchronized_op(sc, vi, HOLD_LOCK | SLEEP_OK | INTR_OK,
5615 	    "t4pktc");
5616 	if (rc)
5617 		return (rc);
5618 
5619 	if (vi->flags & VI_INIT_DONE)
5620 		rc = EBUSY; /* cannot be changed once the queues are created */
5621 	else
5622 		vi->pktc_idx = idx;
5623 
5624 	end_synchronized_op(sc, LOCK_HELD);
5625 	return (rc);
5626 }
5627 
5628 static int
5629 sysctl_qsize_rxq(SYSCTL_HANDLER_ARGS)
5630 {
5631 	struct vi_info *vi = arg1;
5632 	struct adapter *sc = vi->pi->adapter;
5633 	int qsize, rc;
5634 
5635 	qsize = vi->qsize_rxq;
5636 
5637 	rc = sysctl_handle_int(oidp, &qsize, 0, req);
5638 	if (rc != 0 || req->newptr == NULL)
5639 		return (rc);
5640 
5641 	if (qsize < 128 || (qsize & 7))
5642 		return (EINVAL);
5643 
5644 	rc = begin_synchronized_op(sc, vi, HOLD_LOCK | SLEEP_OK | INTR_OK,
5645 	    "t4rxqs");
5646 	if (rc)
5647 		return (rc);
5648 
5649 	if (vi->flags & VI_INIT_DONE)
5650 		rc = EBUSY; /* cannot be changed once the queues are created */
5651 	else
5652 		vi->qsize_rxq = qsize;
5653 
5654 	end_synchronized_op(sc, LOCK_HELD);
5655 	return (rc);
5656 }
5657 
5658 static int
5659 sysctl_qsize_txq(SYSCTL_HANDLER_ARGS)
5660 {
5661 	struct vi_info *vi = arg1;
5662 	struct adapter *sc = vi->pi->adapter;
5663 	int qsize, rc;
5664 
5665 	qsize = vi->qsize_txq;
5666 
5667 	rc = sysctl_handle_int(oidp, &qsize, 0, req);
5668 	if (rc != 0 || req->newptr == NULL)
5669 		return (rc);
5670 
5671 	if (qsize < 128 || qsize > 65536)
5672 		return (EINVAL);
5673 
5674 	rc = begin_synchronized_op(sc, vi, HOLD_LOCK | SLEEP_OK | INTR_OK,
5675 	    "t4txqs");
5676 	if (rc)
5677 		return (rc);
5678 
5679 	if (vi->flags & VI_INIT_DONE)
5680 		rc = EBUSY; /* cannot be changed once the queues are created */
5681 	else
5682 		vi->qsize_txq = qsize;
5683 
5684 	end_synchronized_op(sc, LOCK_HELD);
5685 	return (rc);
5686 }
5687 
5688 static int
5689 sysctl_pause_settings(SYSCTL_HANDLER_ARGS)
5690 {
5691 	struct port_info *pi = arg1;
5692 	struct adapter *sc = pi->adapter;
5693 	struct link_config *lc = &pi->link_cfg;
5694 	int rc;
5695 
5696 	if (req->newptr == NULL) {
5697 		struct sbuf *sb;
5698 		static char *bits = "\20\1PAUSE_RX\2PAUSE_TX";
5699 
5700 		rc = sysctl_wire_old_buffer(req, 0);
5701 		if (rc != 0)
5702 			return(rc);
5703 
5704 		sb = sbuf_new_for_sysctl(NULL, NULL, 128, req);
5705 		if (sb == NULL)
5706 			return (ENOMEM);
5707 
5708 		sbuf_printf(sb, "%b", lc->fc & (PAUSE_TX | PAUSE_RX), bits);
5709 		rc = sbuf_finish(sb);
5710 		sbuf_delete(sb);
5711 	} else {
5712 		char s[2];
5713 		int n;
5714 
5715 		s[0] = '0' + (lc->requested_fc & (PAUSE_TX | PAUSE_RX));
5716 		s[1] = 0;
5717 
5718 		rc = sysctl_handle_string(oidp, s, sizeof(s), req);
5719 		if (rc != 0)
5720 			return(rc);
5721 
5722 		if (s[1] != 0)
5723 			return (EINVAL);
5724 		if (s[0] < '0' || s[0] > '9')
5725 			return (EINVAL);	/* not a number */
5726 		n = s[0] - '0';
5727 		if (n & ~(PAUSE_TX | PAUSE_RX))
5728 			return (EINVAL);	/* some other bit is set too */
5729 
5730 		rc = begin_synchronized_op(sc, &pi->vi[0], SLEEP_OK | INTR_OK,
5731 		    "t4PAUSE");
5732 		if (rc)
5733 			return (rc);
5734 		if ((lc->requested_fc & (PAUSE_TX | PAUSE_RX)) != n) {
5735 			int link_ok = lc->link_ok;
5736 
5737 			lc->requested_fc &= ~(PAUSE_TX | PAUSE_RX);
5738 			lc->requested_fc |= n;
5739 			rc = -t4_link_l1cfg(sc, sc->mbox, pi->tx_chan, lc);
5740 			lc->link_ok = link_ok;	/* restore */
5741 		}
5742 		end_synchronized_op(sc, 0);
5743 	}
5744 
5745 	return (rc);
5746 }
5747 
5748 static int
5749 sysctl_handle_t4_reg64(SYSCTL_HANDLER_ARGS)
5750 {
5751 	struct adapter *sc = arg1;
5752 	int reg = arg2;
5753 	uint64_t val;
5754 
5755 	val = t4_read_reg64(sc, reg);
5756 
5757 	return (sysctl_handle_64(oidp, &val, 0, req));
5758 }
5759 
5760 static int
5761 sysctl_temperature(SYSCTL_HANDLER_ARGS)
5762 {
5763 	struct adapter *sc = arg1;
5764 	int rc, t;
5765 	uint32_t param, val;
5766 
5767 	rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4temp");
5768 	if (rc)
5769 		return (rc);
5770 	param = V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) |
5771 	    V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_DIAG) |
5772 	    V_FW_PARAMS_PARAM_Y(FW_PARAM_DEV_DIAG_TMP);
5773 	rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 1, &param, &val);
5774 	end_synchronized_op(sc, 0);
5775 	if (rc)
5776 		return (rc);
5777 
5778 	/* unknown is returned as 0 but we display -1 in that case */
5779 	t = val == 0 ? -1 : val;
5780 
5781 	rc = sysctl_handle_int(oidp, &t, 0, req);
5782 	return (rc);
5783 }
5784 
5785 #ifdef SBUF_DRAIN
5786 static int
5787 sysctl_cctrl(SYSCTL_HANDLER_ARGS)
5788 {
5789 	struct adapter *sc = arg1;
5790 	struct sbuf *sb;
5791 	int rc, i;
5792 	uint16_t incr[NMTUS][NCCTRL_WIN];
5793 	static const char *dec_fac[] = {
5794 		"0.5", "0.5625", "0.625", "0.6875", "0.75", "0.8125", "0.875",
5795 		"0.9375"
5796 	};
5797 
5798 	rc = sysctl_wire_old_buffer(req, 0);
5799 	if (rc != 0)
5800 		return (rc);
5801 
5802 	sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req);
5803 	if (sb == NULL)
5804 		return (ENOMEM);
5805 
5806 	t4_read_cong_tbl(sc, incr);
5807 
5808 	for (i = 0; i < NCCTRL_WIN; ++i) {
5809 		sbuf_printf(sb, "%2d: %4u %4u %4u %4u %4u %4u %4u %4u\n", i,
5810 		    incr[0][i], incr[1][i], incr[2][i], incr[3][i], incr[4][i],
5811 		    incr[5][i], incr[6][i], incr[7][i]);
5812 		sbuf_printf(sb, "%8u %4u %4u %4u %4u %4u %4u %4u %5u %s\n",
5813 		    incr[8][i], incr[9][i], incr[10][i], incr[11][i],
5814 		    incr[12][i], incr[13][i], incr[14][i], incr[15][i],
5815 		    sc->params.a_wnd[i], dec_fac[sc->params.b_wnd[i]]);
5816 	}
5817 
5818 	rc = sbuf_finish(sb);
5819 	sbuf_delete(sb);
5820 
5821 	return (rc);
5822 }
5823 
5824 static const char *qname[CIM_NUM_IBQ + CIM_NUM_OBQ_T5] = {
5825 	"TP0", "TP1", "ULP", "SGE0", "SGE1", "NC-SI",	/* ibq's */
5826 	"ULP0", "ULP1", "ULP2", "ULP3", "SGE", "NC-SI",	/* obq's */
5827 	"SGE0-RX", "SGE1-RX"	/* additional obq's (T5 onwards) */
5828 };
5829 
5830 static int
5831 sysctl_cim_ibq_obq(SYSCTL_HANDLER_ARGS)
5832 {
5833 	struct adapter *sc = arg1;
5834 	struct sbuf *sb;
5835 	int rc, i, n, qid = arg2;
5836 	uint32_t *buf, *p;
5837 	char *qtype;
5838 	u_int cim_num_obq = sc->chip_params->cim_num_obq;
5839 
5840 	KASSERT(qid >= 0 && qid < CIM_NUM_IBQ + cim_num_obq,
5841 	    ("%s: bad qid %d\n", __func__, qid));
5842 
5843 	if (qid < CIM_NUM_IBQ) {
5844 		/* inbound queue */
5845 		qtype = "IBQ";
5846 		n = 4 * CIM_IBQ_SIZE;
5847 		buf = malloc(n * sizeof(uint32_t), M_CXGBE, M_ZERO | M_WAITOK);
5848 		rc = t4_read_cim_ibq(sc, qid, buf, n);
5849 	} else {
5850 		/* outbound queue */
5851 		qtype = "OBQ";
5852 		qid -= CIM_NUM_IBQ;
5853 		n = 4 * cim_num_obq * CIM_OBQ_SIZE;
5854 		buf = malloc(n * sizeof(uint32_t), M_CXGBE, M_ZERO | M_WAITOK);
5855 		rc = t4_read_cim_obq(sc, qid, buf, n);
5856 	}
5857 
5858 	if (rc < 0) {
5859 		rc = -rc;
5860 		goto done;
5861 	}
5862 	n = rc * sizeof(uint32_t);	/* rc has # of words actually read */
5863 
5864 	rc = sysctl_wire_old_buffer(req, 0);
5865 	if (rc != 0)
5866 		goto done;
5867 
5868 	sb = sbuf_new_for_sysctl(NULL, NULL, PAGE_SIZE, req);
5869 	if (sb == NULL) {
5870 		rc = ENOMEM;
5871 		goto done;
5872 	}
5873 
5874 	sbuf_printf(sb, "%s%d %s", qtype , qid, qname[arg2]);
5875 	for (i = 0, p = buf; i < n; i += 16, p += 4)
5876 		sbuf_printf(sb, "\n%#06x: %08x %08x %08x %08x", i, p[0], p[1],
5877 		    p[2], p[3]);
5878 
5879 	rc = sbuf_finish(sb);
5880 	sbuf_delete(sb);
5881 done:
5882 	free(buf, M_CXGBE);
5883 	return (rc);
5884 }
5885 
5886 static int
5887 sysctl_cim_la(SYSCTL_HANDLER_ARGS)
5888 {
5889 	struct adapter *sc = arg1;
5890 	u_int cfg;
5891 	struct sbuf *sb;
5892 	uint32_t *buf, *p;
5893 	int rc;
5894 
5895 	MPASS(chip_id(sc) <= CHELSIO_T5);
5896 
5897 	rc = -t4_cim_read(sc, A_UP_UP_DBG_LA_CFG, 1, &cfg);
5898 	if (rc != 0)
5899 		return (rc);
5900 
5901 	rc = sysctl_wire_old_buffer(req, 0);
5902 	if (rc != 0)
5903 		return (rc);
5904 
5905 	sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req);
5906 	if (sb == NULL)
5907 		return (ENOMEM);
5908 
5909 	buf = malloc(sc->params.cim_la_size * sizeof(uint32_t), M_CXGBE,
5910 	    M_ZERO | M_WAITOK);
5911 
5912 	rc = -t4_cim_read_la(sc, buf, NULL);
5913 	if (rc != 0)
5914 		goto done;
5915 
5916 	sbuf_printf(sb, "Status   Data      PC%s",
5917 	    cfg & F_UPDBGLACAPTPCONLY ? "" :
5918 	    "     LS0Stat  LS0Addr             LS0Data");
5919 
5920 	for (p = buf; p <= &buf[sc->params.cim_la_size - 8]; p += 8) {
5921 		if (cfg & F_UPDBGLACAPTPCONLY) {
5922 			sbuf_printf(sb, "\n  %02x   %08x %08x", p[5] & 0xff,
5923 			    p[6], p[7]);
5924 			sbuf_printf(sb, "\n  %02x   %02x%06x %02x%06x",
5925 			    (p[3] >> 8) & 0xff, p[3] & 0xff, p[4] >> 8,
5926 			    p[4] & 0xff, p[5] >> 8);
5927 			sbuf_printf(sb, "\n  %02x   %x%07x %x%07x",
5928 			    (p[0] >> 4) & 0xff, p[0] & 0xf, p[1] >> 4,
5929 			    p[1] & 0xf, p[2] >> 4);
5930 		} else {
5931 			sbuf_printf(sb,
5932 			    "\n  %02x   %x%07x %x%07x %08x %08x "
5933 			    "%08x%08x%08x%08x",
5934 			    (p[0] >> 4) & 0xff, p[0] & 0xf, p[1] >> 4,
5935 			    p[1] & 0xf, p[2] >> 4, p[2] & 0xf, p[3], p[4], p[5],
5936 			    p[6], p[7]);
5937 		}
5938 	}
5939 
5940 	rc = sbuf_finish(sb);
5941 	sbuf_delete(sb);
5942 done:
5943 	free(buf, M_CXGBE);
5944 	return (rc);
5945 }
5946 
5947 static int
5948 sysctl_cim_la_t6(SYSCTL_HANDLER_ARGS)
5949 {
5950 	struct adapter *sc = arg1;
5951 	u_int cfg;
5952 	struct sbuf *sb;
5953 	uint32_t *buf, *p;
5954 	int rc;
5955 
5956 	MPASS(chip_id(sc) > CHELSIO_T5);
5957 
5958 	rc = -t4_cim_read(sc, A_UP_UP_DBG_LA_CFG, 1, &cfg);
5959 	if (rc != 0)
5960 		return (rc);
5961 
5962 	rc = sysctl_wire_old_buffer(req, 0);
5963 	if (rc != 0)
5964 		return (rc);
5965 
5966 	sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req);
5967 	if (sb == NULL)
5968 		return (ENOMEM);
5969 
5970 	buf = malloc(sc->params.cim_la_size * sizeof(uint32_t), M_CXGBE,
5971 	    M_ZERO | M_WAITOK);
5972 
5973 	rc = -t4_cim_read_la(sc, buf, NULL);
5974 	if (rc != 0)
5975 		goto done;
5976 
5977 	sbuf_printf(sb, "Status   Inst    Data      PC%s",
5978 	    cfg & F_UPDBGLACAPTPCONLY ? "" :
5979 	    "     LS0Stat  LS0Addr  LS0Data  LS1Stat  LS1Addr  LS1Data");
5980 
5981 	for (p = buf; p <= &buf[sc->params.cim_la_size - 10]; p += 10) {
5982 		if (cfg & F_UPDBGLACAPTPCONLY) {
5983 			sbuf_printf(sb, "\n  %02x   %08x %08x %08x",
5984 			    p[3] & 0xff, p[2], p[1], p[0]);
5985 			sbuf_printf(sb, "\n  %02x   %02x%06x %02x%06x %02x%06x",
5986 			    (p[6] >> 8) & 0xff, p[6] & 0xff, p[5] >> 8,
5987 			    p[5] & 0xff, p[4] >> 8, p[4] & 0xff, p[3] >> 8);
5988 			sbuf_printf(sb, "\n  %02x   %04x%04x %04x%04x %04x%04x",
5989 			    (p[9] >> 16) & 0xff, p[9] & 0xffff, p[8] >> 16,
5990 			    p[8] & 0xffff, p[7] >> 16, p[7] & 0xffff,
5991 			    p[6] >> 16);
5992 		} else {
5993 			sbuf_printf(sb, "\n  %02x   %04x%04x %04x%04x %04x%04x "
5994 			    "%08x %08x %08x %08x %08x %08x",
5995 			    (p[9] >> 16) & 0xff,
5996 			    p[9] & 0xffff, p[8] >> 16,
5997 			    p[8] & 0xffff, p[7] >> 16,
5998 			    p[7] & 0xffff, p[6] >> 16,
5999 			    p[2], p[1], p[0], p[5], p[4], p[3]);
6000 		}
6001 	}
6002 
6003 	rc = sbuf_finish(sb);
6004 	sbuf_delete(sb);
6005 done:
6006 	free(buf, M_CXGBE);
6007 	return (rc);
6008 }
6009 
6010 static int
6011 sysctl_cim_ma_la(SYSCTL_HANDLER_ARGS)
6012 {
6013 	struct adapter *sc = arg1;
6014 	u_int i;
6015 	struct sbuf *sb;
6016 	uint32_t *buf, *p;
6017 	int rc;
6018 
6019 	rc = sysctl_wire_old_buffer(req, 0);
6020 	if (rc != 0)
6021 		return (rc);
6022 
6023 	sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req);
6024 	if (sb == NULL)
6025 		return (ENOMEM);
6026 
6027 	buf = malloc(2 * CIM_MALA_SIZE * 5 * sizeof(uint32_t), M_CXGBE,
6028 	    M_ZERO | M_WAITOK);
6029 
6030 	t4_cim_read_ma_la(sc, buf, buf + 5 * CIM_MALA_SIZE);
6031 	p = buf;
6032 
6033 	for (i = 0; i < CIM_MALA_SIZE; i++, p += 5) {
6034 		sbuf_printf(sb, "\n%02x%08x%08x%08x%08x", p[4], p[3], p[2],
6035 		    p[1], p[0]);
6036 	}
6037 
6038 	sbuf_printf(sb, "\n\nCnt ID Tag UE       Data       RDY VLD");
6039 	for (i = 0; i < CIM_MALA_SIZE; i++, p += 5) {
6040 		sbuf_printf(sb, "\n%3u %2u  %x   %u %08x%08x  %u   %u",
6041 		    (p[2] >> 10) & 0xff, (p[2] >> 7) & 7,
6042 		    (p[2] >> 3) & 0xf, (p[2] >> 2) & 1,
6043 		    (p[1] >> 2) | ((p[2] & 3) << 30),
6044 		    (p[0] >> 2) | ((p[1] & 3) << 30), (p[0] >> 1) & 1,
6045 		    p[0] & 1);
6046 	}
6047 
6048 	rc = sbuf_finish(sb);
6049 	sbuf_delete(sb);
6050 	free(buf, M_CXGBE);
6051 	return (rc);
6052 }
6053 
6054 static int
6055 sysctl_cim_pif_la(SYSCTL_HANDLER_ARGS)
6056 {
6057 	struct adapter *sc = arg1;
6058 	u_int i;
6059 	struct sbuf *sb;
6060 	uint32_t *buf, *p;
6061 	int rc;
6062 
6063 	rc = sysctl_wire_old_buffer(req, 0);
6064 	if (rc != 0)
6065 		return (rc);
6066 
6067 	sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req);
6068 	if (sb == NULL)
6069 		return (ENOMEM);
6070 
6071 	buf = malloc(2 * CIM_PIFLA_SIZE * 6 * sizeof(uint32_t), M_CXGBE,
6072 	    M_ZERO | M_WAITOK);
6073 
6074 	t4_cim_read_pif_la(sc, buf, buf + 6 * CIM_PIFLA_SIZE, NULL, NULL);
6075 	p = buf;
6076 
6077 	sbuf_printf(sb, "Cntl ID DataBE   Addr                 Data");
6078 	for (i = 0; i < CIM_PIFLA_SIZE; i++, p += 6) {
6079 		sbuf_printf(sb, "\n %02x  %02x  %04x  %08x %08x%08x%08x%08x",
6080 		    (p[5] >> 22) & 0xff, (p[5] >> 16) & 0x3f, p[5] & 0xffff,
6081 		    p[4], p[3], p[2], p[1], p[0]);
6082 	}
6083 
6084 	sbuf_printf(sb, "\n\nCntl ID               Data");
6085 	for (i = 0; i < CIM_PIFLA_SIZE; i++, p += 6) {
6086 		sbuf_printf(sb, "\n %02x  %02x %08x%08x%08x%08x",
6087 		    (p[4] >> 6) & 0xff, p[4] & 0x3f, p[3], p[2], p[1], p[0]);
6088 	}
6089 
6090 	rc = sbuf_finish(sb);
6091 	sbuf_delete(sb);
6092 	free(buf, M_CXGBE);
6093 	return (rc);
6094 }
6095 
6096 static int
6097 sysctl_cim_qcfg(SYSCTL_HANDLER_ARGS)
6098 {
6099 	struct adapter *sc = arg1;
6100 	struct sbuf *sb;
6101 	int rc, i;
6102 	uint16_t base[CIM_NUM_IBQ + CIM_NUM_OBQ_T5];
6103 	uint16_t size[CIM_NUM_IBQ + CIM_NUM_OBQ_T5];
6104 	uint16_t thres[CIM_NUM_IBQ];
6105 	uint32_t obq_wr[2 * CIM_NUM_OBQ_T5], *wr = obq_wr;
6106 	uint32_t stat[4 * (CIM_NUM_IBQ + CIM_NUM_OBQ_T5)], *p = stat;
6107 	u_int cim_num_obq, ibq_rdaddr, obq_rdaddr, nq;
6108 
6109 	cim_num_obq = sc->chip_params->cim_num_obq;
6110 	if (is_t4(sc)) {
6111 		ibq_rdaddr = A_UP_IBQ_0_RDADDR;
6112 		obq_rdaddr = A_UP_OBQ_0_REALADDR;
6113 	} else {
6114 		ibq_rdaddr = A_UP_IBQ_0_SHADOW_RDADDR;
6115 		obq_rdaddr = A_UP_OBQ_0_SHADOW_REALADDR;
6116 	}
6117 	nq = CIM_NUM_IBQ + cim_num_obq;
6118 
6119 	rc = -t4_cim_read(sc, ibq_rdaddr, 4 * nq, stat);
6120 	if (rc == 0)
6121 		rc = -t4_cim_read(sc, obq_rdaddr, 2 * cim_num_obq, obq_wr);
6122 	if (rc != 0)
6123 		return (rc);
6124 
6125 	t4_read_cimq_cfg(sc, base, size, thres);
6126 
6127 	rc = sysctl_wire_old_buffer(req, 0);
6128 	if (rc != 0)
6129 		return (rc);
6130 
6131 	sb = sbuf_new_for_sysctl(NULL, NULL, PAGE_SIZE, req);
6132 	if (sb == NULL)
6133 		return (ENOMEM);
6134 
6135 	sbuf_printf(sb,
6136 	    "  Queue  Base  Size Thres  RdPtr WrPtr  SOP  EOP Avail");
6137 
6138 	for (i = 0; i < CIM_NUM_IBQ; i++, p += 4)
6139 		sbuf_printf(sb, "\n%7s %5x %5u %5u %6x  %4x %4u %4u %5u",
6140 		    qname[i], base[i], size[i], thres[i], G_IBQRDADDR(p[0]),
6141 		    G_IBQWRADDR(p[1]), G_QUESOPCNT(p[3]), G_QUEEOPCNT(p[3]),
6142 		    G_QUEREMFLITS(p[2]) * 16);
6143 	for ( ; i < nq; i++, p += 4, wr += 2)
6144 		sbuf_printf(sb, "\n%7s %5x %5u %12x  %4x %4u %4u %5u", qname[i],
6145 		    base[i], size[i], G_QUERDADDR(p[0]) & 0x3fff,
6146 		    wr[0] - base[i], G_QUESOPCNT(p[3]), G_QUEEOPCNT(p[3]),
6147 		    G_QUEREMFLITS(p[2]) * 16);
6148 
6149 	rc = sbuf_finish(sb);
6150 	sbuf_delete(sb);
6151 
6152 	return (rc);
6153 }
6154 
6155 static int
6156 sysctl_cpl_stats(SYSCTL_HANDLER_ARGS)
6157 {
6158 	struct adapter *sc = arg1;
6159 	struct sbuf *sb;
6160 	int rc;
6161 	struct tp_cpl_stats stats;
6162 
6163 	rc = sysctl_wire_old_buffer(req, 0);
6164 	if (rc != 0)
6165 		return (rc);
6166 
6167 	sb = sbuf_new_for_sysctl(NULL, NULL, 256, req);
6168 	if (sb == NULL)
6169 		return (ENOMEM);
6170 
6171 	mtx_lock(&sc->reg_lock);
6172 	t4_tp_get_cpl_stats(sc, &stats);
6173 	mtx_unlock(&sc->reg_lock);
6174 
6175 	if (sc->chip_params->nchan > 2) {
6176 		sbuf_printf(sb, "                 channel 0  channel 1"
6177 		    "  channel 2  channel 3");
6178 		sbuf_printf(sb, "\nCPL requests:   %10u %10u %10u %10u",
6179 		    stats.req[0], stats.req[1], stats.req[2], stats.req[3]);
6180 		sbuf_printf(sb, "\nCPL responses:   %10u %10u %10u %10u",
6181 		    stats.rsp[0], stats.rsp[1], stats.rsp[2], stats.rsp[3]);
6182 	} else {
6183 		sbuf_printf(sb, "                 channel 0  channel 1");
6184 		sbuf_printf(sb, "\nCPL requests:   %10u %10u",
6185 		    stats.req[0], stats.req[1]);
6186 		sbuf_printf(sb, "\nCPL responses:   %10u %10u",
6187 		    stats.rsp[0], stats.rsp[1]);
6188 	}
6189 
6190 	rc = sbuf_finish(sb);
6191 	sbuf_delete(sb);
6192 
6193 	return (rc);
6194 }
6195 
6196 static int
6197 sysctl_ddp_stats(SYSCTL_HANDLER_ARGS)
6198 {
6199 	struct adapter *sc = arg1;
6200 	struct sbuf *sb;
6201 	int rc;
6202 	struct tp_usm_stats stats;
6203 
6204 	rc = sysctl_wire_old_buffer(req, 0);
6205 	if (rc != 0)
6206 		return(rc);
6207 
6208 	sb = sbuf_new_for_sysctl(NULL, NULL, 256, req);
6209 	if (sb == NULL)
6210 		return (ENOMEM);
6211 
6212 	t4_get_usm_stats(sc, &stats);
6213 
6214 	sbuf_printf(sb, "Frames: %u\n", stats.frames);
6215 	sbuf_printf(sb, "Octets: %ju\n", stats.octets);
6216 	sbuf_printf(sb, "Drops:  %u", stats.drops);
6217 
6218 	rc = sbuf_finish(sb);
6219 	sbuf_delete(sb);
6220 
6221 	return (rc);
6222 }
6223 
6224 static const char * const devlog_level_strings[] = {
6225 	[FW_DEVLOG_LEVEL_EMERG]		= "EMERG",
6226 	[FW_DEVLOG_LEVEL_CRIT]		= "CRIT",
6227 	[FW_DEVLOG_LEVEL_ERR]		= "ERR",
6228 	[FW_DEVLOG_LEVEL_NOTICE]	= "NOTICE",
6229 	[FW_DEVLOG_LEVEL_INFO]		= "INFO",
6230 	[FW_DEVLOG_LEVEL_DEBUG]		= "DEBUG"
6231 };
6232 
6233 static const char * const devlog_facility_strings[] = {
6234 	[FW_DEVLOG_FACILITY_CORE]	= "CORE",
6235 	[FW_DEVLOG_FACILITY_CF]		= "CF",
6236 	[FW_DEVLOG_FACILITY_SCHED]	= "SCHED",
6237 	[FW_DEVLOG_FACILITY_TIMER]	= "TIMER",
6238 	[FW_DEVLOG_FACILITY_RES]	= "RES",
6239 	[FW_DEVLOG_FACILITY_HW]		= "HW",
6240 	[FW_DEVLOG_FACILITY_FLR]	= "FLR",
6241 	[FW_DEVLOG_FACILITY_DMAQ]	= "DMAQ",
6242 	[FW_DEVLOG_FACILITY_PHY]	= "PHY",
6243 	[FW_DEVLOG_FACILITY_MAC]	= "MAC",
6244 	[FW_DEVLOG_FACILITY_PORT]	= "PORT",
6245 	[FW_DEVLOG_FACILITY_VI]		= "VI",
6246 	[FW_DEVLOG_FACILITY_FILTER]	= "FILTER",
6247 	[FW_DEVLOG_FACILITY_ACL]	= "ACL",
6248 	[FW_DEVLOG_FACILITY_TM]		= "TM",
6249 	[FW_DEVLOG_FACILITY_QFC]	= "QFC",
6250 	[FW_DEVLOG_FACILITY_DCB]	= "DCB",
6251 	[FW_DEVLOG_FACILITY_ETH]	= "ETH",
6252 	[FW_DEVLOG_FACILITY_OFLD]	= "OFLD",
6253 	[FW_DEVLOG_FACILITY_RI]		= "RI",
6254 	[FW_DEVLOG_FACILITY_ISCSI]	= "ISCSI",
6255 	[FW_DEVLOG_FACILITY_FCOE]	= "FCOE",
6256 	[FW_DEVLOG_FACILITY_FOISCSI]	= "FOISCSI",
6257 	[FW_DEVLOG_FACILITY_FOFCOE]	= "FOFCOE",
6258 	[FW_DEVLOG_FACILITY_CHNET]	= "CHNET",
6259 };
6260 
6261 static int
6262 sysctl_devlog(SYSCTL_HANDLER_ARGS)
6263 {
6264 	struct adapter *sc = arg1;
6265 	struct devlog_params *dparams = &sc->params.devlog;
6266 	struct fw_devlog_e *buf, *e;
6267 	int i, j, rc, nentries, first = 0;
6268 	struct sbuf *sb;
6269 	uint64_t ftstamp = UINT64_MAX;
6270 
6271 	if (dparams->addr == 0)
6272 		return (ENXIO);
6273 
6274 	buf = malloc(dparams->size, M_CXGBE, M_NOWAIT);
6275 	if (buf == NULL)
6276 		return (ENOMEM);
6277 
6278 	rc = read_via_memwin(sc, 1, dparams->addr, (void *)buf, dparams->size);
6279 	if (rc != 0)
6280 		goto done;
6281 
6282 	nentries = dparams->size / sizeof(struct fw_devlog_e);
6283 	for (i = 0; i < nentries; i++) {
6284 		e = &buf[i];
6285 
6286 		if (e->timestamp == 0)
6287 			break;	/* end */
6288 
6289 		e->timestamp = be64toh(e->timestamp);
6290 		e->seqno = be32toh(e->seqno);
6291 		for (j = 0; j < 8; j++)
6292 			e->params[j] = be32toh(e->params[j]);
6293 
6294 		if (e->timestamp < ftstamp) {
6295 			ftstamp = e->timestamp;
6296 			first = i;
6297 		}
6298 	}
6299 
6300 	if (buf[first].timestamp == 0)
6301 		goto done;	/* nothing in the log */
6302 
6303 	rc = sysctl_wire_old_buffer(req, 0);
6304 	if (rc != 0)
6305 		goto done;
6306 
6307 	sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req);
6308 	if (sb == NULL) {
6309 		rc = ENOMEM;
6310 		goto done;
6311 	}
6312 	sbuf_printf(sb, "%10s  %15s  %8s  %8s  %s\n",
6313 	    "Seq#", "Tstamp", "Level", "Facility", "Message");
6314 
6315 	i = first;
6316 	do {
6317 		e = &buf[i];
6318 		if (e->timestamp == 0)
6319 			break;	/* end */
6320 
6321 		sbuf_printf(sb, "%10d  %15ju  %8s  %8s  ",
6322 		    e->seqno, e->timestamp,
6323 		    (e->level < nitems(devlog_level_strings) ?
6324 			devlog_level_strings[e->level] : "UNKNOWN"),
6325 		    (e->facility < nitems(devlog_facility_strings) ?
6326 			devlog_facility_strings[e->facility] : "UNKNOWN"));
6327 		sbuf_printf(sb, e->fmt, e->params[0], e->params[1],
6328 		    e->params[2], e->params[3], e->params[4],
6329 		    e->params[5], e->params[6], e->params[7]);
6330 
6331 		if (++i == nentries)
6332 			i = 0;
6333 	} while (i != first);
6334 
6335 	rc = sbuf_finish(sb);
6336 	sbuf_delete(sb);
6337 done:
6338 	free(buf, M_CXGBE);
6339 	return (rc);
6340 }
6341 
6342 static int
6343 sysctl_fcoe_stats(SYSCTL_HANDLER_ARGS)
6344 {
6345 	struct adapter *sc = arg1;
6346 	struct sbuf *sb;
6347 	int rc;
6348 	struct tp_fcoe_stats stats[MAX_NCHAN];
6349 	int i, nchan = sc->chip_params->nchan;
6350 
6351 	rc = sysctl_wire_old_buffer(req, 0);
6352 	if (rc != 0)
6353 		return (rc);
6354 
6355 	sb = sbuf_new_for_sysctl(NULL, NULL, 256, req);
6356 	if (sb == NULL)
6357 		return (ENOMEM);
6358 
6359 	for (i = 0; i < nchan; i++)
6360 		t4_get_fcoe_stats(sc, i, &stats[i]);
6361 
6362 	if (nchan > 2) {
6363 		sbuf_printf(sb, "                   channel 0        channel 1"
6364 		    "        channel 2        channel 3");
6365 		sbuf_printf(sb, "\noctetsDDP:  %16ju %16ju %16ju %16ju",
6366 		    stats[0].octets_ddp, stats[1].octets_ddp,
6367 		    stats[2].octets_ddp, stats[3].octets_ddp);
6368 		sbuf_printf(sb, "\nframesDDP:  %16u %16u %16u %16u",
6369 		    stats[0].frames_ddp, stats[1].frames_ddp,
6370 		    stats[2].frames_ddp, stats[3].frames_ddp);
6371 		sbuf_printf(sb, "\nframesDrop: %16u %16u %16u %16u",
6372 		    stats[0].frames_drop, stats[1].frames_drop,
6373 		    stats[2].frames_drop, stats[3].frames_drop);
6374 	} else {
6375 		sbuf_printf(sb, "                   channel 0        channel 1");
6376 		sbuf_printf(sb, "\noctetsDDP:  %16ju %16ju",
6377 		    stats[0].octets_ddp, stats[1].octets_ddp);
6378 		sbuf_printf(sb, "\nframesDDP:  %16u %16u",
6379 		    stats[0].frames_ddp, stats[1].frames_ddp);
6380 		sbuf_printf(sb, "\nframesDrop: %16u %16u",
6381 		    stats[0].frames_drop, stats[1].frames_drop);
6382 	}
6383 
6384 	rc = sbuf_finish(sb);
6385 	sbuf_delete(sb);
6386 
6387 	return (rc);
6388 }
6389 
6390 static int
6391 sysctl_hw_sched(SYSCTL_HANDLER_ARGS)
6392 {
6393 	struct adapter *sc = arg1;
6394 	struct sbuf *sb;
6395 	int rc, i;
6396 	unsigned int map, kbps, ipg, mode;
6397 	unsigned int pace_tab[NTX_SCHED];
6398 
6399 	rc = sysctl_wire_old_buffer(req, 0);
6400 	if (rc != 0)
6401 		return (rc);
6402 
6403 	sb = sbuf_new_for_sysctl(NULL, NULL, 256, req);
6404 	if (sb == NULL)
6405 		return (ENOMEM);
6406 
6407 	map = t4_read_reg(sc, A_TP_TX_MOD_QUEUE_REQ_MAP);
6408 	mode = G_TIMERMODE(t4_read_reg(sc, A_TP_MOD_CONFIG));
6409 	t4_read_pace_tbl(sc, pace_tab);
6410 
6411 	sbuf_printf(sb, "Scheduler  Mode   Channel  Rate (Kbps)   "
6412 	    "Class IPG (0.1 ns)   Flow IPG (us)");
6413 
6414 	for (i = 0; i < NTX_SCHED; ++i, map >>= 2) {
6415 		t4_get_tx_sched(sc, i, &kbps, &ipg);
6416 		sbuf_printf(sb, "\n    %u      %-5s     %u     ", i,
6417 		    (mode & (1 << i)) ? "flow" : "class", map & 3);
6418 		if (kbps)
6419 			sbuf_printf(sb, "%9u     ", kbps);
6420 		else
6421 			sbuf_printf(sb, " disabled     ");
6422 
6423 		if (ipg)
6424 			sbuf_printf(sb, "%13u        ", ipg);
6425 		else
6426 			sbuf_printf(sb, "     disabled        ");
6427 
6428 		if (pace_tab[i])
6429 			sbuf_printf(sb, "%10u", pace_tab[i]);
6430 		else
6431 			sbuf_printf(sb, "  disabled");
6432 	}
6433 
6434 	rc = sbuf_finish(sb);
6435 	sbuf_delete(sb);
6436 
6437 	return (rc);
6438 }
6439 
6440 static int
6441 sysctl_lb_stats(SYSCTL_HANDLER_ARGS)
6442 {
6443 	struct adapter *sc = arg1;
6444 	struct sbuf *sb;
6445 	int rc, i, j;
6446 	uint64_t *p0, *p1;
6447 	struct lb_port_stats s[2];
6448 	static const char *stat_name[] = {
6449 		"OctetsOK:", "FramesOK:", "BcastFrames:", "McastFrames:",
6450 		"UcastFrames:", "ErrorFrames:", "Frames64:", "Frames65To127:",
6451 		"Frames128To255:", "Frames256To511:", "Frames512To1023:",
6452 		"Frames1024To1518:", "Frames1519ToMax:", "FramesDropped:",
6453 		"BG0FramesDropped:", "BG1FramesDropped:", "BG2FramesDropped:",
6454 		"BG3FramesDropped:", "BG0FramesTrunc:", "BG1FramesTrunc:",
6455 		"BG2FramesTrunc:", "BG3FramesTrunc:"
6456 	};
6457 
6458 	rc = sysctl_wire_old_buffer(req, 0);
6459 	if (rc != 0)
6460 		return (rc);
6461 
6462 	sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req);
6463 	if (sb == NULL)
6464 		return (ENOMEM);
6465 
6466 	memset(s, 0, sizeof(s));
6467 
6468 	for (i = 0; i < sc->chip_params->nchan; i += 2) {
6469 		t4_get_lb_stats(sc, i, &s[0]);
6470 		t4_get_lb_stats(sc, i + 1, &s[1]);
6471 
6472 		p0 = &s[0].octets;
6473 		p1 = &s[1].octets;
6474 		sbuf_printf(sb, "%s                       Loopback %u"
6475 		    "           Loopback %u", i == 0 ? "" : "\n", i, i + 1);
6476 
6477 		for (j = 0; j < nitems(stat_name); j++)
6478 			sbuf_printf(sb, "\n%-17s %20ju %20ju", stat_name[j],
6479 				   *p0++, *p1++);
6480 	}
6481 
6482 	rc = sbuf_finish(sb);
6483 	sbuf_delete(sb);
6484 
6485 	return (rc);
6486 }
6487 
6488 static int
6489 sysctl_linkdnrc(SYSCTL_HANDLER_ARGS)
6490 {
6491 	int rc = 0;
6492 	struct port_info *pi = arg1;
6493 	struct sbuf *sb;
6494 
6495 	rc = sysctl_wire_old_buffer(req, 0);
6496 	if (rc != 0)
6497 		return(rc);
6498 	sb = sbuf_new_for_sysctl(NULL, NULL, 64, req);
6499 	if (sb == NULL)
6500 		return (ENOMEM);
6501 
6502 	if (pi->linkdnrc < 0)
6503 		sbuf_printf(sb, "n/a");
6504 	else
6505 		sbuf_printf(sb, "%s", t4_link_down_rc_str(pi->linkdnrc));
6506 
6507 	rc = sbuf_finish(sb);
6508 	sbuf_delete(sb);
6509 
6510 	return (rc);
6511 }
6512 
6513 struct mem_desc {
6514 	unsigned int base;
6515 	unsigned int limit;
6516 	unsigned int idx;
6517 };
6518 
6519 static int
6520 mem_desc_cmp(const void *a, const void *b)
6521 {
6522 	return ((const struct mem_desc *)a)->base -
6523 	       ((const struct mem_desc *)b)->base;
6524 }
6525 
6526 static void
6527 mem_region_show(struct sbuf *sb, const char *name, unsigned int from,
6528     unsigned int to)
6529 {
6530 	unsigned int size;
6531 
6532 	if (from == to)
6533 		return;
6534 
6535 	size = to - from + 1;
6536 	if (size == 0)
6537 		return;
6538 
6539 	/* XXX: need humanize_number(3) in libkern for a more readable 'size' */
6540 	sbuf_printf(sb, "%-15s %#x-%#x [%u]\n", name, from, to, size);
6541 }
6542 
6543 static int
6544 sysctl_meminfo(SYSCTL_HANDLER_ARGS)
6545 {
6546 	struct adapter *sc = arg1;
6547 	struct sbuf *sb;
6548 	int rc, i, n;
6549 	uint32_t lo, hi, used, alloc;
6550 	static const char *memory[] = {"EDC0:", "EDC1:", "MC:", "MC0:", "MC1:"};
6551 	static const char *region[] = {
6552 		"DBQ contexts:", "IMSG contexts:", "FLM cache:", "TCBs:",
6553 		"Pstructs:", "Timers:", "Rx FL:", "Tx FL:", "Pstruct FL:",
6554 		"Tx payload:", "Rx payload:", "LE hash:", "iSCSI region:",
6555 		"TDDP region:", "TPT region:", "STAG region:", "RQ region:",
6556 		"RQUDP region:", "PBL region:", "TXPBL region:",
6557 		"DBVFIFO region:", "ULPRX state:", "ULPTX state:",
6558 		"On-chip queues:"
6559 	};
6560 	struct mem_desc avail[4];
6561 	struct mem_desc mem[nitems(region) + 3];	/* up to 3 holes */
6562 	struct mem_desc *md = mem;
6563 
6564 	rc = sysctl_wire_old_buffer(req, 0);
6565 	if (rc != 0)
6566 		return (rc);
6567 
6568 	sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req);
6569 	if (sb == NULL)
6570 		return (ENOMEM);
6571 
6572 	for (i = 0; i < nitems(mem); i++) {
6573 		mem[i].limit = 0;
6574 		mem[i].idx = i;
6575 	}
6576 
6577 	/* Find and sort the populated memory ranges */
6578 	i = 0;
6579 	lo = t4_read_reg(sc, A_MA_TARGET_MEM_ENABLE);
6580 	if (lo & F_EDRAM0_ENABLE) {
6581 		hi = t4_read_reg(sc, A_MA_EDRAM0_BAR);
6582 		avail[i].base = G_EDRAM0_BASE(hi) << 20;
6583 		avail[i].limit = avail[i].base + (G_EDRAM0_SIZE(hi) << 20);
6584 		avail[i].idx = 0;
6585 		i++;
6586 	}
6587 	if (lo & F_EDRAM1_ENABLE) {
6588 		hi = t4_read_reg(sc, A_MA_EDRAM1_BAR);
6589 		avail[i].base = G_EDRAM1_BASE(hi) << 20;
6590 		avail[i].limit = avail[i].base + (G_EDRAM1_SIZE(hi) << 20);
6591 		avail[i].idx = 1;
6592 		i++;
6593 	}
6594 	if (lo & F_EXT_MEM_ENABLE) {
6595 		hi = t4_read_reg(sc, A_MA_EXT_MEMORY_BAR);
6596 		avail[i].base = G_EXT_MEM_BASE(hi) << 20;
6597 		avail[i].limit = avail[i].base +
6598 		    (G_EXT_MEM_SIZE(hi) << 20);
6599 		avail[i].idx = is_t5(sc) ? 3 : 2;	/* Call it MC0 for T5 */
6600 		i++;
6601 	}
6602 	if (is_t5(sc) && lo & F_EXT_MEM1_ENABLE) {
6603 		hi = t4_read_reg(sc, A_MA_EXT_MEMORY1_BAR);
6604 		avail[i].base = G_EXT_MEM1_BASE(hi) << 20;
6605 		avail[i].limit = avail[i].base +
6606 		    (G_EXT_MEM1_SIZE(hi) << 20);
6607 		avail[i].idx = 4;
6608 		i++;
6609 	}
6610 	if (!i)                                    /* no memory available */
6611 		return 0;
6612 	qsort(avail, i, sizeof(struct mem_desc), mem_desc_cmp);
6613 
6614 	(md++)->base = t4_read_reg(sc, A_SGE_DBQ_CTXT_BADDR);
6615 	(md++)->base = t4_read_reg(sc, A_SGE_IMSG_CTXT_BADDR);
6616 	(md++)->base = t4_read_reg(sc, A_SGE_FLM_CACHE_BADDR);
6617 	(md++)->base = t4_read_reg(sc, A_TP_CMM_TCB_BASE);
6618 	(md++)->base = t4_read_reg(sc, A_TP_CMM_MM_BASE);
6619 	(md++)->base = t4_read_reg(sc, A_TP_CMM_TIMER_BASE);
6620 	(md++)->base = t4_read_reg(sc, A_TP_CMM_MM_RX_FLST_BASE);
6621 	(md++)->base = t4_read_reg(sc, A_TP_CMM_MM_TX_FLST_BASE);
6622 	(md++)->base = t4_read_reg(sc, A_TP_CMM_MM_PS_FLST_BASE);
6623 
6624 	/* the next few have explicit upper bounds */
6625 	md->base = t4_read_reg(sc, A_TP_PMM_TX_BASE);
6626 	md->limit = md->base - 1 +
6627 		    t4_read_reg(sc, A_TP_PMM_TX_PAGE_SIZE) *
6628 		    G_PMTXMAXPAGE(t4_read_reg(sc, A_TP_PMM_TX_MAX_PAGE));
6629 	md++;
6630 
6631 	md->base = t4_read_reg(sc, A_TP_PMM_RX_BASE);
6632 	md->limit = md->base - 1 +
6633 		    t4_read_reg(sc, A_TP_PMM_RX_PAGE_SIZE) *
6634 		    G_PMRXMAXPAGE(t4_read_reg(sc, A_TP_PMM_RX_MAX_PAGE));
6635 	md++;
6636 
6637 	if (t4_read_reg(sc, A_LE_DB_CONFIG) & F_HASHEN) {
6638 		if (chip_id(sc) <= CHELSIO_T5)
6639 			md->base = t4_read_reg(sc, A_LE_DB_HASH_TID_BASE);
6640 		else
6641 			md->base = t4_read_reg(sc, A_LE_DB_HASH_TBL_BASE_ADDR);
6642 		md->limit = 0;
6643 	} else {
6644 		md->base = 0;
6645 		md->idx = nitems(region);  /* hide it */
6646 	}
6647 	md++;
6648 
6649 #define ulp_region(reg) \
6650 	md->base = t4_read_reg(sc, A_ULP_ ## reg ## _LLIMIT);\
6651 	(md++)->limit = t4_read_reg(sc, A_ULP_ ## reg ## _ULIMIT)
6652 
6653 	ulp_region(RX_ISCSI);
6654 	ulp_region(RX_TDDP);
6655 	ulp_region(TX_TPT);
6656 	ulp_region(RX_STAG);
6657 	ulp_region(RX_RQ);
6658 	ulp_region(RX_RQUDP);
6659 	ulp_region(RX_PBL);
6660 	ulp_region(TX_PBL);
6661 #undef ulp_region
6662 
6663 	md->base = 0;
6664 	md->idx = nitems(region);
6665 	if (!is_t4(sc)) {
6666 		uint32_t size = 0;
6667 		uint32_t sge_ctrl = t4_read_reg(sc, A_SGE_CONTROL2);
6668 		uint32_t fifo_size = t4_read_reg(sc, A_SGE_DBVFIFO_SIZE);
6669 
6670 		if (is_t5(sc)) {
6671 			if (sge_ctrl & F_VFIFO_ENABLE)
6672 				size = G_DBVFIFO_SIZE(fifo_size);
6673 		} else
6674 			size = G_T6_DBVFIFO_SIZE(fifo_size);
6675 
6676 		if (size) {
6677 			md->base = G_BASEADDR(t4_read_reg(sc,
6678 			    A_SGE_DBVFIFO_BADDR));
6679 			md->limit = md->base + (size << 2) - 1;
6680 		}
6681 	}
6682 	md++;
6683 
6684 	md->base = t4_read_reg(sc, A_ULP_RX_CTX_BASE);
6685 	md->limit = 0;
6686 	md++;
6687 	md->base = t4_read_reg(sc, A_ULP_TX_ERR_TABLE_BASE);
6688 	md->limit = 0;
6689 	md++;
6690 
6691 	md->base = sc->vres.ocq.start;
6692 	if (sc->vres.ocq.size)
6693 		md->limit = md->base + sc->vres.ocq.size - 1;
6694 	else
6695 		md->idx = nitems(region);  /* hide it */
6696 	md++;
6697 
6698 	/* add any address-space holes, there can be up to 3 */
6699 	for (n = 0; n < i - 1; n++)
6700 		if (avail[n].limit < avail[n + 1].base)
6701 			(md++)->base = avail[n].limit;
6702 	if (avail[n].limit)
6703 		(md++)->base = avail[n].limit;
6704 
6705 	n = md - mem;
6706 	qsort(mem, n, sizeof(struct mem_desc), mem_desc_cmp);
6707 
6708 	for (lo = 0; lo < i; lo++)
6709 		mem_region_show(sb, memory[avail[lo].idx], avail[lo].base,
6710 				avail[lo].limit - 1);
6711 
6712 	sbuf_printf(sb, "\n");
6713 	for (i = 0; i < n; i++) {
6714 		if (mem[i].idx >= nitems(region))
6715 			continue;                        /* skip holes */
6716 		if (!mem[i].limit)
6717 			mem[i].limit = i < n - 1 ? mem[i + 1].base - 1 : ~0;
6718 		mem_region_show(sb, region[mem[i].idx], mem[i].base,
6719 				mem[i].limit);
6720 	}
6721 
6722 	sbuf_printf(sb, "\n");
6723 	lo = t4_read_reg(sc, A_CIM_SDRAM_BASE_ADDR);
6724 	hi = t4_read_reg(sc, A_CIM_SDRAM_ADDR_SIZE) + lo - 1;
6725 	mem_region_show(sb, "uP RAM:", lo, hi);
6726 
6727 	lo = t4_read_reg(sc, A_CIM_EXTMEM2_BASE_ADDR);
6728 	hi = t4_read_reg(sc, A_CIM_EXTMEM2_ADDR_SIZE) + lo - 1;
6729 	mem_region_show(sb, "uP Extmem2:", lo, hi);
6730 
6731 	lo = t4_read_reg(sc, A_TP_PMM_RX_MAX_PAGE);
6732 	sbuf_printf(sb, "\n%u Rx pages of size %uKiB for %u channels\n",
6733 		   G_PMRXMAXPAGE(lo),
6734 		   t4_read_reg(sc, A_TP_PMM_RX_PAGE_SIZE) >> 10,
6735 		   (lo & F_PMRXNUMCHN) ? 2 : 1);
6736 
6737 	lo = t4_read_reg(sc, A_TP_PMM_TX_MAX_PAGE);
6738 	hi = t4_read_reg(sc, A_TP_PMM_TX_PAGE_SIZE);
6739 	sbuf_printf(sb, "%u Tx pages of size %u%ciB for %u channels\n",
6740 		   G_PMTXMAXPAGE(lo),
6741 		   hi >= (1 << 20) ? (hi >> 20) : (hi >> 10),
6742 		   hi >= (1 << 20) ? 'M' : 'K', 1 << G_PMTXNUMCHN(lo));
6743 	sbuf_printf(sb, "%u p-structs\n",
6744 		   t4_read_reg(sc, A_TP_CMM_MM_MAX_PSTRUCT));
6745 
6746 	for (i = 0; i < 4; i++) {
6747 		if (chip_id(sc) > CHELSIO_T5)
6748 			lo = t4_read_reg(sc, A_MPS_RX_MAC_BG_PG_CNT0 + i * 4);
6749 		else
6750 			lo = t4_read_reg(sc, A_MPS_RX_PG_RSV0 + i * 4);
6751 		if (is_t5(sc)) {
6752 			used = G_T5_USED(lo);
6753 			alloc = G_T5_ALLOC(lo);
6754 		} else {
6755 			used = G_USED(lo);
6756 			alloc = G_ALLOC(lo);
6757 		}
6758 		/* For T6 these are MAC buffer groups */
6759 		sbuf_printf(sb, "\nPort %d using %u pages out of %u allocated",
6760 		    i, used, alloc);
6761 	}
6762 	for (i = 0; i < sc->chip_params->nchan; i++) {
6763 		if (chip_id(sc) > CHELSIO_T5)
6764 			lo = t4_read_reg(sc, A_MPS_RX_LPBK_BG_PG_CNT0 + i * 4);
6765 		else
6766 			lo = t4_read_reg(sc, A_MPS_RX_PG_RSV4 + i * 4);
6767 		if (is_t5(sc)) {
6768 			used = G_T5_USED(lo);
6769 			alloc = G_T5_ALLOC(lo);
6770 		} else {
6771 			used = G_USED(lo);
6772 			alloc = G_ALLOC(lo);
6773 		}
6774 		/* For T6 these are MAC buffer groups */
6775 		sbuf_printf(sb,
6776 		    "\nLoopback %d using %u pages out of %u allocated",
6777 		    i, used, alloc);
6778 	}
6779 
6780 	rc = sbuf_finish(sb);
6781 	sbuf_delete(sb);
6782 
6783 	return (rc);
6784 }
6785 
6786 static inline void
6787 tcamxy2valmask(uint64_t x, uint64_t y, uint8_t *addr, uint64_t *mask)
6788 {
6789 	*mask = x | y;
6790 	y = htobe64(y);
6791 	memcpy(addr, (char *)&y + 2, ETHER_ADDR_LEN);
6792 }
6793 
6794 static int
6795 sysctl_mps_tcam(SYSCTL_HANDLER_ARGS)
6796 {
6797 	struct adapter *sc = arg1;
6798 	struct sbuf *sb;
6799 	int rc, i;
6800 
6801 	MPASS(chip_id(sc) <= CHELSIO_T5);
6802 
6803 	rc = sysctl_wire_old_buffer(req, 0);
6804 	if (rc != 0)
6805 		return (rc);
6806 
6807 	sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req);
6808 	if (sb == NULL)
6809 		return (ENOMEM);
6810 
6811 	sbuf_printf(sb,
6812 	    "Idx  Ethernet address     Mask     Vld Ports PF"
6813 	    "  VF              Replication             P0 P1 P2 P3  ML");
6814 	for (i = 0; i < sc->chip_params->mps_tcam_size; i++) {
6815 		uint64_t tcamx, tcamy, mask;
6816 		uint32_t cls_lo, cls_hi;
6817 		uint8_t addr[ETHER_ADDR_LEN];
6818 
6819 		tcamy = t4_read_reg64(sc, MPS_CLS_TCAM_Y_L(i));
6820 		tcamx = t4_read_reg64(sc, MPS_CLS_TCAM_X_L(i));
6821 		if (tcamx & tcamy)
6822 			continue;
6823 		tcamxy2valmask(tcamx, tcamy, addr, &mask);
6824 		cls_lo = t4_read_reg(sc, MPS_CLS_SRAM_L(i));
6825 		cls_hi = t4_read_reg(sc, MPS_CLS_SRAM_H(i));
6826 		sbuf_printf(sb, "\n%3u %02x:%02x:%02x:%02x:%02x:%02x %012jx"
6827 			   "  %c   %#x%4u%4d", i, addr[0], addr[1], addr[2],
6828 			   addr[3], addr[4], addr[5], (uintmax_t)mask,
6829 			   (cls_lo & F_SRAM_VLD) ? 'Y' : 'N',
6830 			   G_PORTMAP(cls_hi), G_PF(cls_lo),
6831 			   (cls_lo & F_VF_VALID) ? G_VF(cls_lo) : -1);
6832 
6833 		if (cls_lo & F_REPLICATE) {
6834 			struct fw_ldst_cmd ldst_cmd;
6835 
6836 			memset(&ldst_cmd, 0, sizeof(ldst_cmd));
6837 			ldst_cmd.op_to_addrspace =
6838 			    htobe32(V_FW_CMD_OP(FW_LDST_CMD) |
6839 				F_FW_CMD_REQUEST | F_FW_CMD_READ |
6840 				V_FW_LDST_CMD_ADDRSPACE(FW_LDST_ADDRSPC_MPS));
6841 			ldst_cmd.cycles_to_len16 = htobe32(FW_LEN16(ldst_cmd));
6842 			ldst_cmd.u.mps.rplc.fid_idx =
6843 			    htobe16(V_FW_LDST_CMD_FID(FW_LDST_MPS_RPLC) |
6844 				V_FW_LDST_CMD_IDX(i));
6845 
6846 			rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK,
6847 			    "t4mps");
6848 			if (rc)
6849 				break;
6850 			rc = -t4_wr_mbox(sc, sc->mbox, &ldst_cmd,
6851 			    sizeof(ldst_cmd), &ldst_cmd);
6852 			end_synchronized_op(sc, 0);
6853 
6854 			if (rc != 0) {
6855 				sbuf_printf(sb, "%36d", rc);
6856 				rc = 0;
6857 			} else {
6858 				sbuf_printf(sb, " %08x %08x %08x %08x",
6859 				    be32toh(ldst_cmd.u.mps.rplc.rplc127_96),
6860 				    be32toh(ldst_cmd.u.mps.rplc.rplc95_64),
6861 				    be32toh(ldst_cmd.u.mps.rplc.rplc63_32),
6862 				    be32toh(ldst_cmd.u.mps.rplc.rplc31_0));
6863 			}
6864 		} else
6865 			sbuf_printf(sb, "%36s", "");
6866 
6867 		sbuf_printf(sb, "%4u%3u%3u%3u %#3x", G_SRAM_PRIO0(cls_lo),
6868 		    G_SRAM_PRIO1(cls_lo), G_SRAM_PRIO2(cls_lo),
6869 		    G_SRAM_PRIO3(cls_lo), (cls_lo >> S_MULTILISTEN0) & 0xf);
6870 	}
6871 
6872 	if (rc)
6873 		(void) sbuf_finish(sb);
6874 	else
6875 		rc = sbuf_finish(sb);
6876 	sbuf_delete(sb);
6877 
6878 	return (rc);
6879 }
6880 
6881 static int
6882 sysctl_mps_tcam_t6(SYSCTL_HANDLER_ARGS)
6883 {
6884 	struct adapter *sc = arg1;
6885 	struct sbuf *sb;
6886 	int rc, i;
6887 
6888 	MPASS(chip_id(sc) > CHELSIO_T5);
6889 
6890 	rc = sysctl_wire_old_buffer(req, 0);
6891 	if (rc != 0)
6892 		return (rc);
6893 
6894 	sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req);
6895 	if (sb == NULL)
6896 		return (ENOMEM);
6897 
6898 	sbuf_printf(sb, "Idx  Ethernet address     Mask       VNI   Mask"
6899 	    "   IVLAN Vld DIP_Hit   Lookup  Port Vld Ports PF  VF"
6900 	    "                           Replication"
6901 	    "                                    P0 P1 P2 P3  ML\n");
6902 
6903 	for (i = 0; i < sc->chip_params->mps_tcam_size; i++) {
6904 		uint8_t dip_hit, vlan_vld, lookup_type, port_num;
6905 		uint16_t ivlan;
6906 		uint64_t tcamx, tcamy, val, mask;
6907 		uint32_t cls_lo, cls_hi, ctl, data2, vnix, vniy;
6908 		uint8_t addr[ETHER_ADDR_LEN];
6909 
6910 		ctl = V_CTLREQID(1) | V_CTLCMDTYPE(0) | V_CTLXYBITSEL(0);
6911 		if (i < 256)
6912 			ctl |= V_CTLTCAMINDEX(i) | V_CTLTCAMSEL(0);
6913 		else
6914 			ctl |= V_CTLTCAMINDEX(i - 256) | V_CTLTCAMSEL(1);
6915 		t4_write_reg(sc, A_MPS_CLS_TCAM_DATA2_CTL, ctl);
6916 		val = t4_read_reg(sc, A_MPS_CLS_TCAM_RDATA1_REQ_ID1);
6917 		tcamy = G_DMACH(val) << 32;
6918 		tcamy |= t4_read_reg(sc, A_MPS_CLS_TCAM_RDATA0_REQ_ID1);
6919 		data2 = t4_read_reg(sc, A_MPS_CLS_TCAM_RDATA2_REQ_ID1);
6920 		lookup_type = G_DATALKPTYPE(data2);
6921 		port_num = G_DATAPORTNUM(data2);
6922 		if (lookup_type && lookup_type != M_DATALKPTYPE) {
6923 			/* Inner header VNI */
6924 			vniy = ((data2 & F_DATAVIDH2) << 23) |
6925 				       (G_DATAVIDH1(data2) << 16) | G_VIDL(val);
6926 			dip_hit = data2 & F_DATADIPHIT;
6927 			vlan_vld = 0;
6928 		} else {
6929 			vniy = 0;
6930 			dip_hit = 0;
6931 			vlan_vld = data2 & F_DATAVIDH2;
6932 			ivlan = G_VIDL(val);
6933 		}
6934 
6935 		ctl |= V_CTLXYBITSEL(1);
6936 		t4_write_reg(sc, A_MPS_CLS_TCAM_DATA2_CTL, ctl);
6937 		val = t4_read_reg(sc, A_MPS_CLS_TCAM_RDATA1_REQ_ID1);
6938 		tcamx = G_DMACH(val) << 32;
6939 		tcamx |= t4_read_reg(sc, A_MPS_CLS_TCAM_RDATA0_REQ_ID1);
6940 		data2 = t4_read_reg(sc, A_MPS_CLS_TCAM_RDATA2_REQ_ID1);
6941 		if (lookup_type && lookup_type != M_DATALKPTYPE) {
6942 			/* Inner header VNI mask */
6943 			vnix = ((data2 & F_DATAVIDH2) << 23) |
6944 			       (G_DATAVIDH1(data2) << 16) | G_VIDL(val);
6945 		} else
6946 			vnix = 0;
6947 
6948 		if (tcamx & tcamy)
6949 			continue;
6950 		tcamxy2valmask(tcamx, tcamy, addr, &mask);
6951 
6952 		cls_lo = t4_read_reg(sc, MPS_CLS_SRAM_L(i));
6953 		cls_hi = t4_read_reg(sc, MPS_CLS_SRAM_H(i));
6954 
6955 		if (lookup_type && lookup_type != M_DATALKPTYPE) {
6956 			sbuf_printf(sb, "\n%3u %02x:%02x:%02x:%02x:%02x:%02x "
6957 			    "%012jx %06x %06x    -    -   %3c"
6958 			    "      'I'  %4x   %3c   %#x%4u%4d", i, addr[0],
6959 			    addr[1], addr[2], addr[3], addr[4], addr[5],
6960 			    (uintmax_t)mask, vniy, vnix, dip_hit ? 'Y' : 'N',
6961 			    port_num, cls_lo & F_T6_SRAM_VLD ? 'Y' : 'N',
6962 			    G_PORTMAP(cls_hi), G_T6_PF(cls_lo),
6963 			    cls_lo & F_T6_VF_VALID ? G_T6_VF(cls_lo) : -1);
6964 		} else {
6965 			sbuf_printf(sb, "\n%3u %02x:%02x:%02x:%02x:%02x:%02x "
6966 			    "%012jx    -       -   ", i, addr[0], addr[1],
6967 			    addr[2], addr[3], addr[4], addr[5],
6968 			    (uintmax_t)mask);
6969 
6970 			if (vlan_vld)
6971 				sbuf_printf(sb, "%4u   Y     ", ivlan);
6972 			else
6973 				sbuf_printf(sb, "  -    N     ");
6974 
6975 			sbuf_printf(sb, "-      %3c  %4x   %3c   %#x%4u%4d",
6976 			    lookup_type ? 'I' : 'O', port_num,
6977 			    cls_lo & F_T6_SRAM_VLD ? 'Y' : 'N',
6978 			    G_PORTMAP(cls_hi), G_T6_PF(cls_lo),
6979 			    cls_lo & F_T6_VF_VALID ? G_T6_VF(cls_lo) : -1);
6980 		}
6981 
6982 
6983 		if (cls_lo & F_T6_REPLICATE) {
6984 			struct fw_ldst_cmd ldst_cmd;
6985 
6986 			memset(&ldst_cmd, 0, sizeof(ldst_cmd));
6987 			ldst_cmd.op_to_addrspace =
6988 			    htobe32(V_FW_CMD_OP(FW_LDST_CMD) |
6989 				F_FW_CMD_REQUEST | F_FW_CMD_READ |
6990 				V_FW_LDST_CMD_ADDRSPACE(FW_LDST_ADDRSPC_MPS));
6991 			ldst_cmd.cycles_to_len16 = htobe32(FW_LEN16(ldst_cmd));
6992 			ldst_cmd.u.mps.rplc.fid_idx =
6993 			    htobe16(V_FW_LDST_CMD_FID(FW_LDST_MPS_RPLC) |
6994 				V_FW_LDST_CMD_IDX(i));
6995 
6996 			rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK,
6997 			    "t6mps");
6998 			if (rc)
6999 				break;
7000 			rc = -t4_wr_mbox(sc, sc->mbox, &ldst_cmd,
7001 			    sizeof(ldst_cmd), &ldst_cmd);
7002 			end_synchronized_op(sc, 0);
7003 
7004 			if (rc != 0) {
7005 				sbuf_printf(sb, "%72d", rc);
7006 				rc = 0;
7007 			} else {
7008 				sbuf_printf(sb, " %08x %08x %08x %08x"
7009 				    " %08x %08x %08x %08x",
7010 				    be32toh(ldst_cmd.u.mps.rplc.rplc255_224),
7011 				    be32toh(ldst_cmd.u.mps.rplc.rplc223_192),
7012 				    be32toh(ldst_cmd.u.mps.rplc.rplc191_160),
7013 				    be32toh(ldst_cmd.u.mps.rplc.rplc159_128),
7014 				    be32toh(ldst_cmd.u.mps.rplc.rplc127_96),
7015 				    be32toh(ldst_cmd.u.mps.rplc.rplc95_64),
7016 				    be32toh(ldst_cmd.u.mps.rplc.rplc63_32),
7017 				    be32toh(ldst_cmd.u.mps.rplc.rplc31_0));
7018 			}
7019 		} else
7020 			sbuf_printf(sb, "%72s", "");
7021 
7022 		sbuf_printf(sb, "%4u%3u%3u%3u %#x",
7023 		    G_T6_SRAM_PRIO0(cls_lo), G_T6_SRAM_PRIO1(cls_lo),
7024 		    G_T6_SRAM_PRIO2(cls_lo), G_T6_SRAM_PRIO3(cls_lo),
7025 		    (cls_lo >> S_T6_MULTILISTEN0) & 0xf);
7026 	}
7027 
7028 	if (rc)
7029 		(void) sbuf_finish(sb);
7030 	else
7031 		rc = sbuf_finish(sb);
7032 	sbuf_delete(sb);
7033 
7034 	return (rc);
7035 }
7036 
7037 static int
7038 sysctl_path_mtus(SYSCTL_HANDLER_ARGS)
7039 {
7040 	struct adapter *sc = arg1;
7041 	struct sbuf *sb;
7042 	int rc;
7043 	uint16_t mtus[NMTUS];
7044 
7045 	rc = sysctl_wire_old_buffer(req, 0);
7046 	if (rc != 0)
7047 		return (rc);
7048 
7049 	sb = sbuf_new_for_sysctl(NULL, NULL, 256, req);
7050 	if (sb == NULL)
7051 		return (ENOMEM);
7052 
7053 	t4_read_mtu_tbl(sc, mtus, NULL);
7054 
7055 	sbuf_printf(sb, "%u %u %u %u %u %u %u %u %u %u %u %u %u %u %u %u",
7056 	    mtus[0], mtus[1], mtus[2], mtus[3], mtus[4], mtus[5], mtus[6],
7057 	    mtus[7], mtus[8], mtus[9], mtus[10], mtus[11], mtus[12], mtus[13],
7058 	    mtus[14], mtus[15]);
7059 
7060 	rc = sbuf_finish(sb);
7061 	sbuf_delete(sb);
7062 
7063 	return (rc);
7064 }
7065 
7066 static int
7067 sysctl_pm_stats(SYSCTL_HANDLER_ARGS)
7068 {
7069 	struct adapter *sc = arg1;
7070 	struct sbuf *sb;
7071 	int rc, i;
7072 	uint32_t tx_cnt[MAX_PM_NSTATS], rx_cnt[MAX_PM_NSTATS];
7073 	uint64_t tx_cyc[MAX_PM_NSTATS], rx_cyc[MAX_PM_NSTATS];
7074 	static const char *tx_stats[MAX_PM_NSTATS] = {
7075 		"Read:", "Write bypass:", "Write mem:", "Bypass + mem:",
7076 		"Tx FIFO wait", NULL, "Tx latency"
7077 	};
7078 	static const char *rx_stats[MAX_PM_NSTATS] = {
7079 		"Read:", "Write bypass:", "Write mem:", "Flush:",
7080 		"Rx FIFO wait", NULL, "Rx latency"
7081 	};
7082 
7083 	rc = sysctl_wire_old_buffer(req, 0);
7084 	if (rc != 0)
7085 		return (rc);
7086 
7087 	sb = sbuf_new_for_sysctl(NULL, NULL, 256, req);
7088 	if (sb == NULL)
7089 		return (ENOMEM);
7090 
7091 	t4_pmtx_get_stats(sc, tx_cnt, tx_cyc);
7092 	t4_pmrx_get_stats(sc, rx_cnt, rx_cyc);
7093 
7094 	sbuf_printf(sb, "                Tx pcmds             Tx bytes");
7095 	for (i = 0; i < 4; i++) {
7096 		sbuf_printf(sb, "\n%-13s %10u %20ju", tx_stats[i], tx_cnt[i],
7097 		    tx_cyc[i]);
7098 	}
7099 
7100 	sbuf_printf(sb, "\n                Rx pcmds             Rx bytes");
7101 	for (i = 0; i < 4; i++) {
7102 		sbuf_printf(sb, "\n%-13s %10u %20ju", rx_stats[i], rx_cnt[i],
7103 		    rx_cyc[i]);
7104 	}
7105 
7106 	if (chip_id(sc) > CHELSIO_T5) {
7107 		sbuf_printf(sb,
7108 		    "\n              Total wait      Total occupancy");
7109 		sbuf_printf(sb, "\n%-13s %10u %20ju", tx_stats[i], tx_cnt[i],
7110 		    tx_cyc[i]);
7111 		sbuf_printf(sb, "\n%-13s %10u %20ju", rx_stats[i], rx_cnt[i],
7112 		    rx_cyc[i]);
7113 
7114 		i += 2;
7115 		MPASS(i < nitems(tx_stats));
7116 
7117 		sbuf_printf(sb,
7118 		    "\n                   Reads           Total wait");
7119 		sbuf_printf(sb, "\n%-13s %10u %20ju", tx_stats[i], tx_cnt[i],
7120 		    tx_cyc[i]);
7121 		sbuf_printf(sb, "\n%-13s %10u %20ju", rx_stats[i], rx_cnt[i],
7122 		    rx_cyc[i]);
7123 	}
7124 
7125 	rc = sbuf_finish(sb);
7126 	sbuf_delete(sb);
7127 
7128 	return (rc);
7129 }
7130 
7131 static int
7132 sysctl_rdma_stats(SYSCTL_HANDLER_ARGS)
7133 {
7134 	struct adapter *sc = arg1;
7135 	struct sbuf *sb;
7136 	int rc;
7137 	struct tp_rdma_stats stats;
7138 
7139 	rc = sysctl_wire_old_buffer(req, 0);
7140 	if (rc != 0)
7141 		return (rc);
7142 
7143 	sb = sbuf_new_for_sysctl(NULL, NULL, 256, req);
7144 	if (sb == NULL)
7145 		return (ENOMEM);
7146 
7147 	mtx_lock(&sc->reg_lock);
7148 	t4_tp_get_rdma_stats(sc, &stats);
7149 	mtx_unlock(&sc->reg_lock);
7150 
7151 	sbuf_printf(sb, "NoRQEModDefferals: %u\n", stats.rqe_dfr_mod);
7152 	sbuf_printf(sb, "NoRQEPktDefferals: %u", stats.rqe_dfr_pkt);
7153 
7154 	rc = sbuf_finish(sb);
7155 	sbuf_delete(sb);
7156 
7157 	return (rc);
7158 }
7159 
7160 static int
7161 sysctl_tcp_stats(SYSCTL_HANDLER_ARGS)
7162 {
7163 	struct adapter *sc = arg1;
7164 	struct sbuf *sb;
7165 	int rc;
7166 	struct tp_tcp_stats v4, v6;
7167 
7168 	rc = sysctl_wire_old_buffer(req, 0);
7169 	if (rc != 0)
7170 		return (rc);
7171 
7172 	sb = sbuf_new_for_sysctl(NULL, NULL, 256, req);
7173 	if (sb == NULL)
7174 		return (ENOMEM);
7175 
7176 	mtx_lock(&sc->reg_lock);
7177 	t4_tp_get_tcp_stats(sc, &v4, &v6);
7178 	mtx_unlock(&sc->reg_lock);
7179 
7180 	sbuf_printf(sb,
7181 	    "                                IP                 IPv6\n");
7182 	sbuf_printf(sb, "OutRsts:      %20u %20u\n",
7183 	    v4.tcp_out_rsts, v6.tcp_out_rsts);
7184 	sbuf_printf(sb, "InSegs:       %20ju %20ju\n",
7185 	    v4.tcp_in_segs, v6.tcp_in_segs);
7186 	sbuf_printf(sb, "OutSegs:      %20ju %20ju\n",
7187 	    v4.tcp_out_segs, v6.tcp_out_segs);
7188 	sbuf_printf(sb, "RetransSegs:  %20ju %20ju",
7189 	    v4.tcp_retrans_segs, v6.tcp_retrans_segs);
7190 
7191 	rc = sbuf_finish(sb);
7192 	sbuf_delete(sb);
7193 
7194 	return (rc);
7195 }
7196 
7197 static int
7198 sysctl_tids(SYSCTL_HANDLER_ARGS)
7199 {
7200 	struct adapter *sc = arg1;
7201 	struct sbuf *sb;
7202 	int rc;
7203 	struct tid_info *t = &sc->tids;
7204 
7205 	rc = sysctl_wire_old_buffer(req, 0);
7206 	if (rc != 0)
7207 		return (rc);
7208 
7209 	sb = sbuf_new_for_sysctl(NULL, NULL, 256, req);
7210 	if (sb == NULL)
7211 		return (ENOMEM);
7212 
7213 	if (t->natids) {
7214 		sbuf_printf(sb, "ATID range: 0-%u, in use: %u\n", t->natids - 1,
7215 		    t->atids_in_use);
7216 	}
7217 
7218 	if (t->ntids) {
7219 		if (t4_read_reg(sc, A_LE_DB_CONFIG) & F_HASHEN) {
7220 			uint32_t b;
7221 
7222 			if (chip_id(sc) <= CHELSIO_T5)
7223 				b = t4_read_reg(sc, A_LE_DB_SERVER_INDEX) / 4;
7224 			else
7225 				b = t4_read_reg(sc, A_LE_DB_SRVR_START_INDEX);
7226 
7227 			if (b) {
7228 				sbuf_printf(sb, "TID range: 0-%u, %u-%u", b - 1,
7229 				    t4_read_reg(sc, A_LE_DB_TID_HASHBASE) / 4,
7230 				    t->ntids - 1);
7231 			} else {
7232 				sbuf_printf(sb, "TID range: %u-%u",
7233 				    t4_read_reg(sc, A_LE_DB_TID_HASHBASE) / 4,
7234 				    t->ntids - 1);
7235 			}
7236 		} else
7237 			sbuf_printf(sb, "TID range: 0-%u", t->ntids - 1);
7238 		sbuf_printf(sb, ", in use: %u\n",
7239 		    atomic_load_acq_int(&t->tids_in_use));
7240 	}
7241 
7242 	if (t->nstids) {
7243 		sbuf_printf(sb, "STID range: %u-%u, in use: %u\n", t->stid_base,
7244 		    t->stid_base + t->nstids - 1, t->stids_in_use);
7245 	}
7246 
7247 	if (t->nftids) {
7248 		sbuf_printf(sb, "FTID range: %u-%u\n", t->ftid_base,
7249 		    t->ftid_base + t->nftids - 1);
7250 	}
7251 
7252 	if (t->netids) {
7253 		sbuf_printf(sb, "ETID range: %u-%u\n", t->etid_base,
7254 		    t->etid_base + t->netids - 1);
7255 	}
7256 
7257 	sbuf_printf(sb, "HW TID usage: %u IP users, %u IPv6 users",
7258 	    t4_read_reg(sc, A_LE_DB_ACT_CNT_IPV4),
7259 	    t4_read_reg(sc, A_LE_DB_ACT_CNT_IPV6));
7260 
7261 	rc = sbuf_finish(sb);
7262 	sbuf_delete(sb);
7263 
7264 	return (rc);
7265 }
7266 
7267 static int
7268 sysctl_tp_err_stats(SYSCTL_HANDLER_ARGS)
7269 {
7270 	struct adapter *sc = arg1;
7271 	struct sbuf *sb;
7272 	int rc;
7273 	struct tp_err_stats stats;
7274 
7275 	rc = sysctl_wire_old_buffer(req, 0);
7276 	if (rc != 0)
7277 		return (rc);
7278 
7279 	sb = sbuf_new_for_sysctl(NULL, NULL, 256, req);
7280 	if (sb == NULL)
7281 		return (ENOMEM);
7282 
7283 	mtx_lock(&sc->reg_lock);
7284 	t4_tp_get_err_stats(sc, &stats);
7285 	mtx_unlock(&sc->reg_lock);
7286 
7287 	if (sc->chip_params->nchan > 2) {
7288 		sbuf_printf(sb, "                 channel 0  channel 1"
7289 		    "  channel 2  channel 3\n");
7290 		sbuf_printf(sb, "macInErrs:      %10u %10u %10u %10u\n",
7291 		    stats.mac_in_errs[0], stats.mac_in_errs[1],
7292 		    stats.mac_in_errs[2], stats.mac_in_errs[3]);
7293 		sbuf_printf(sb, "hdrInErrs:      %10u %10u %10u %10u\n",
7294 		    stats.hdr_in_errs[0], stats.hdr_in_errs[1],
7295 		    stats.hdr_in_errs[2], stats.hdr_in_errs[3]);
7296 		sbuf_printf(sb, "tcpInErrs:      %10u %10u %10u %10u\n",
7297 		    stats.tcp_in_errs[0], stats.tcp_in_errs[1],
7298 		    stats.tcp_in_errs[2], stats.tcp_in_errs[3]);
7299 		sbuf_printf(sb, "tcp6InErrs:     %10u %10u %10u %10u\n",
7300 		    stats.tcp6_in_errs[0], stats.tcp6_in_errs[1],
7301 		    stats.tcp6_in_errs[2], stats.tcp6_in_errs[3]);
7302 		sbuf_printf(sb, "tnlCongDrops:   %10u %10u %10u %10u\n",
7303 		    stats.tnl_cong_drops[0], stats.tnl_cong_drops[1],
7304 		    stats.tnl_cong_drops[2], stats.tnl_cong_drops[3]);
7305 		sbuf_printf(sb, "tnlTxDrops:     %10u %10u %10u %10u\n",
7306 		    stats.tnl_tx_drops[0], stats.tnl_tx_drops[1],
7307 		    stats.tnl_tx_drops[2], stats.tnl_tx_drops[3]);
7308 		sbuf_printf(sb, "ofldVlanDrops:  %10u %10u %10u %10u\n",
7309 		    stats.ofld_vlan_drops[0], stats.ofld_vlan_drops[1],
7310 		    stats.ofld_vlan_drops[2], stats.ofld_vlan_drops[3]);
7311 		sbuf_printf(sb, "ofldChanDrops:  %10u %10u %10u %10u\n\n",
7312 		    stats.ofld_chan_drops[0], stats.ofld_chan_drops[1],
7313 		    stats.ofld_chan_drops[2], stats.ofld_chan_drops[3]);
7314 	} else {
7315 		sbuf_printf(sb, "                 channel 0  channel 1\n");
7316 		sbuf_printf(sb, "macInErrs:      %10u %10u\n",
7317 		    stats.mac_in_errs[0], stats.mac_in_errs[1]);
7318 		sbuf_printf(sb, "hdrInErrs:      %10u %10u\n",
7319 		    stats.hdr_in_errs[0], stats.hdr_in_errs[1]);
7320 		sbuf_printf(sb, "tcpInErrs:      %10u %10u\n",
7321 		    stats.tcp_in_errs[0], stats.tcp_in_errs[1]);
7322 		sbuf_printf(sb, "tcp6InErrs:     %10u %10u\n",
7323 		    stats.tcp6_in_errs[0], stats.tcp6_in_errs[1]);
7324 		sbuf_printf(sb, "tnlCongDrops:   %10u %10u\n",
7325 		    stats.tnl_cong_drops[0], stats.tnl_cong_drops[1]);
7326 		sbuf_printf(sb, "tnlTxDrops:     %10u %10u\n",
7327 		    stats.tnl_tx_drops[0], stats.tnl_tx_drops[1]);
7328 		sbuf_printf(sb, "ofldVlanDrops:  %10u %10u\n",
7329 		    stats.ofld_vlan_drops[0], stats.ofld_vlan_drops[1]);
7330 		sbuf_printf(sb, "ofldChanDrops:  %10u %10u\n\n",
7331 		    stats.ofld_chan_drops[0], stats.ofld_chan_drops[1]);
7332 	}
7333 
7334 	sbuf_printf(sb, "ofldNoNeigh:    %u\nofldCongDefer:  %u",
7335 	    stats.ofld_no_neigh, stats.ofld_cong_defer);
7336 
7337 	rc = sbuf_finish(sb);
7338 	sbuf_delete(sb);
7339 
7340 	return (rc);
7341 }
7342 
7343 static int
7344 sysctl_tp_la_mask(SYSCTL_HANDLER_ARGS)
7345 {
7346 	struct adapter *sc = arg1;
7347 	struct tp_params *tpp = &sc->params.tp;
7348 	u_int mask;
7349 	int rc;
7350 
7351 	mask = tpp->la_mask >> 16;
7352 	rc = sysctl_handle_int(oidp, &mask, 0, req);
7353 	if (rc != 0 || req->newptr == NULL)
7354 		return (rc);
7355 	if (mask > 0xffff)
7356 		return (EINVAL);
7357 	tpp->la_mask = mask << 16;
7358 	t4_set_reg_field(sc, A_TP_DBG_LA_CONFIG, 0xffff0000U, tpp->la_mask);
7359 
7360 	return (0);
7361 }
7362 
7363 struct field_desc {
7364 	const char *name;
7365 	u_int start;
7366 	u_int width;
7367 };
7368 
7369 static void
7370 field_desc_show(struct sbuf *sb, uint64_t v, const struct field_desc *f)
7371 {
7372 	char buf[32];
7373 	int line_size = 0;
7374 
7375 	while (f->name) {
7376 		uint64_t mask = (1ULL << f->width) - 1;
7377 		int len = snprintf(buf, sizeof(buf), "%s: %ju", f->name,
7378 		    ((uintmax_t)v >> f->start) & mask);
7379 
7380 		if (line_size + len >= 79) {
7381 			line_size = 8;
7382 			sbuf_printf(sb, "\n        ");
7383 		}
7384 		sbuf_printf(sb, "%s ", buf);
7385 		line_size += len + 1;
7386 		f++;
7387 	}
7388 	sbuf_printf(sb, "\n");
7389 }
7390 
7391 static const struct field_desc tp_la0[] = {
7392 	{ "RcfOpCodeOut", 60, 4 },
7393 	{ "State", 56, 4 },
7394 	{ "WcfState", 52, 4 },
7395 	{ "RcfOpcSrcOut", 50, 2 },
7396 	{ "CRxError", 49, 1 },
7397 	{ "ERxError", 48, 1 },
7398 	{ "SanityFailed", 47, 1 },
7399 	{ "SpuriousMsg", 46, 1 },
7400 	{ "FlushInputMsg", 45, 1 },
7401 	{ "FlushInputCpl", 44, 1 },
7402 	{ "RssUpBit", 43, 1 },
7403 	{ "RssFilterHit", 42, 1 },
7404 	{ "Tid", 32, 10 },
7405 	{ "InitTcb", 31, 1 },
7406 	{ "LineNumber", 24, 7 },
7407 	{ "Emsg", 23, 1 },
7408 	{ "EdataOut", 22, 1 },
7409 	{ "Cmsg", 21, 1 },
7410 	{ "CdataOut", 20, 1 },
7411 	{ "EreadPdu", 19, 1 },
7412 	{ "CreadPdu", 18, 1 },
7413 	{ "TunnelPkt", 17, 1 },
7414 	{ "RcfPeerFin", 16, 1 },
7415 	{ "RcfReasonOut", 12, 4 },
7416 	{ "TxCchannel", 10, 2 },
7417 	{ "RcfTxChannel", 8, 2 },
7418 	{ "RxEchannel", 6, 2 },
7419 	{ "RcfRxChannel", 5, 1 },
7420 	{ "RcfDataOutSrdy", 4, 1 },
7421 	{ "RxDvld", 3, 1 },
7422 	{ "RxOoDvld", 2, 1 },
7423 	{ "RxCongestion", 1, 1 },
7424 	{ "TxCongestion", 0, 1 },
7425 	{ NULL }
7426 };
7427 
7428 static const struct field_desc tp_la1[] = {
7429 	{ "CplCmdIn", 56, 8 },
7430 	{ "CplCmdOut", 48, 8 },
7431 	{ "ESynOut", 47, 1 },
7432 	{ "EAckOut", 46, 1 },
7433 	{ "EFinOut", 45, 1 },
7434 	{ "ERstOut", 44, 1 },
7435 	{ "SynIn", 43, 1 },
7436 	{ "AckIn", 42, 1 },
7437 	{ "FinIn", 41, 1 },
7438 	{ "RstIn", 40, 1 },
7439 	{ "DataIn", 39, 1 },
7440 	{ "DataInVld", 38, 1 },
7441 	{ "PadIn", 37, 1 },
7442 	{ "RxBufEmpty", 36, 1 },
7443 	{ "RxDdp", 35, 1 },
7444 	{ "RxFbCongestion", 34, 1 },
7445 	{ "TxFbCongestion", 33, 1 },
7446 	{ "TxPktSumSrdy", 32, 1 },
7447 	{ "RcfUlpType", 28, 4 },
7448 	{ "Eread", 27, 1 },
7449 	{ "Ebypass", 26, 1 },
7450 	{ "Esave", 25, 1 },
7451 	{ "Static0", 24, 1 },
7452 	{ "Cread", 23, 1 },
7453 	{ "Cbypass", 22, 1 },
7454 	{ "Csave", 21, 1 },
7455 	{ "CPktOut", 20, 1 },
7456 	{ "RxPagePoolFull", 18, 2 },
7457 	{ "RxLpbkPkt", 17, 1 },
7458 	{ "TxLpbkPkt", 16, 1 },
7459 	{ "RxVfValid", 15, 1 },
7460 	{ "SynLearned", 14, 1 },
7461 	{ "SetDelEntry", 13, 1 },
7462 	{ "SetInvEntry", 12, 1 },
7463 	{ "CpcmdDvld", 11, 1 },
7464 	{ "CpcmdSave", 10, 1 },
7465 	{ "RxPstructsFull", 8, 2 },
7466 	{ "EpcmdDvld", 7, 1 },
7467 	{ "EpcmdFlush", 6, 1 },
7468 	{ "EpcmdTrimPrefix", 5, 1 },
7469 	{ "EpcmdTrimPostfix", 4, 1 },
7470 	{ "ERssIp4Pkt", 3, 1 },
7471 	{ "ERssIp6Pkt", 2, 1 },
7472 	{ "ERssTcpUdpPkt", 1, 1 },
7473 	{ "ERssFceFipPkt", 0, 1 },
7474 	{ NULL }
7475 };
7476 
7477 static const struct field_desc tp_la2[] = {
7478 	{ "CplCmdIn", 56, 8 },
7479 	{ "MpsVfVld", 55, 1 },
7480 	{ "MpsPf", 52, 3 },
7481 	{ "MpsVf", 44, 8 },
7482 	{ "SynIn", 43, 1 },
7483 	{ "AckIn", 42, 1 },
7484 	{ "FinIn", 41, 1 },
7485 	{ "RstIn", 40, 1 },
7486 	{ "DataIn", 39, 1 },
7487 	{ "DataInVld", 38, 1 },
7488 	{ "PadIn", 37, 1 },
7489 	{ "RxBufEmpty", 36, 1 },
7490 	{ "RxDdp", 35, 1 },
7491 	{ "RxFbCongestion", 34, 1 },
7492 	{ "TxFbCongestion", 33, 1 },
7493 	{ "TxPktSumSrdy", 32, 1 },
7494 	{ "RcfUlpType", 28, 4 },
7495 	{ "Eread", 27, 1 },
7496 	{ "Ebypass", 26, 1 },
7497 	{ "Esave", 25, 1 },
7498 	{ "Static0", 24, 1 },
7499 	{ "Cread", 23, 1 },
7500 	{ "Cbypass", 22, 1 },
7501 	{ "Csave", 21, 1 },
7502 	{ "CPktOut", 20, 1 },
7503 	{ "RxPagePoolFull", 18, 2 },
7504 	{ "RxLpbkPkt", 17, 1 },
7505 	{ "TxLpbkPkt", 16, 1 },
7506 	{ "RxVfValid", 15, 1 },
7507 	{ "SynLearned", 14, 1 },
7508 	{ "SetDelEntry", 13, 1 },
7509 	{ "SetInvEntry", 12, 1 },
7510 	{ "CpcmdDvld", 11, 1 },
7511 	{ "CpcmdSave", 10, 1 },
7512 	{ "RxPstructsFull", 8, 2 },
7513 	{ "EpcmdDvld", 7, 1 },
7514 	{ "EpcmdFlush", 6, 1 },
7515 	{ "EpcmdTrimPrefix", 5, 1 },
7516 	{ "EpcmdTrimPostfix", 4, 1 },
7517 	{ "ERssIp4Pkt", 3, 1 },
7518 	{ "ERssIp6Pkt", 2, 1 },
7519 	{ "ERssTcpUdpPkt", 1, 1 },
7520 	{ "ERssFceFipPkt", 0, 1 },
7521 	{ NULL }
7522 };
7523 
7524 static void
7525 tp_la_show(struct sbuf *sb, uint64_t *p, int idx)
7526 {
7527 
7528 	field_desc_show(sb, *p, tp_la0);
7529 }
7530 
7531 static void
7532 tp_la_show2(struct sbuf *sb, uint64_t *p, int idx)
7533 {
7534 
7535 	if (idx)
7536 		sbuf_printf(sb, "\n");
7537 	field_desc_show(sb, p[0], tp_la0);
7538 	if (idx < (TPLA_SIZE / 2 - 1) || p[1] != ~0ULL)
7539 		field_desc_show(sb, p[1], tp_la0);
7540 }
7541 
7542 static void
7543 tp_la_show3(struct sbuf *sb, uint64_t *p, int idx)
7544 {
7545 
7546 	if (idx)
7547 		sbuf_printf(sb, "\n");
7548 	field_desc_show(sb, p[0], tp_la0);
7549 	if (idx < (TPLA_SIZE / 2 - 1) || p[1] != ~0ULL)
7550 		field_desc_show(sb, p[1], (p[0] & (1 << 17)) ? tp_la2 : tp_la1);
7551 }
7552 
7553 static int
7554 sysctl_tp_la(SYSCTL_HANDLER_ARGS)
7555 {
7556 	struct adapter *sc = arg1;
7557 	struct sbuf *sb;
7558 	uint64_t *buf, *p;
7559 	int rc;
7560 	u_int i, inc;
7561 	void (*show_func)(struct sbuf *, uint64_t *, int);
7562 
7563 	rc = sysctl_wire_old_buffer(req, 0);
7564 	if (rc != 0)
7565 		return (rc);
7566 
7567 	sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req);
7568 	if (sb == NULL)
7569 		return (ENOMEM);
7570 
7571 	buf = malloc(TPLA_SIZE * sizeof(uint64_t), M_CXGBE, M_ZERO | M_WAITOK);
7572 
7573 	t4_tp_read_la(sc, buf, NULL);
7574 	p = buf;
7575 
7576 	switch (G_DBGLAMODE(t4_read_reg(sc, A_TP_DBG_LA_CONFIG))) {
7577 	case 2:
7578 		inc = 2;
7579 		show_func = tp_la_show2;
7580 		break;
7581 	case 3:
7582 		inc = 2;
7583 		show_func = tp_la_show3;
7584 		break;
7585 	default:
7586 		inc = 1;
7587 		show_func = tp_la_show;
7588 	}
7589 
7590 	for (i = 0; i < TPLA_SIZE / inc; i++, p += inc)
7591 		(*show_func)(sb, p, i);
7592 
7593 	rc = sbuf_finish(sb);
7594 	sbuf_delete(sb);
7595 	free(buf, M_CXGBE);
7596 	return (rc);
7597 }
7598 
7599 static int
7600 sysctl_tx_rate(SYSCTL_HANDLER_ARGS)
7601 {
7602 	struct adapter *sc = arg1;
7603 	struct sbuf *sb;
7604 	int rc;
7605 	u64 nrate[MAX_NCHAN], orate[MAX_NCHAN];
7606 
7607 	rc = sysctl_wire_old_buffer(req, 0);
7608 	if (rc != 0)
7609 		return (rc);
7610 
7611 	sb = sbuf_new_for_sysctl(NULL, NULL, 256, req);
7612 	if (sb == NULL)
7613 		return (ENOMEM);
7614 
7615 	t4_get_chan_txrate(sc, nrate, orate);
7616 
7617 	if (sc->chip_params->nchan > 2) {
7618 		sbuf_printf(sb, "              channel 0   channel 1"
7619 		    "   channel 2   channel 3\n");
7620 		sbuf_printf(sb, "NIC B/s:     %10ju  %10ju  %10ju  %10ju\n",
7621 		    nrate[0], nrate[1], nrate[2], nrate[3]);
7622 		sbuf_printf(sb, "Offload B/s: %10ju  %10ju  %10ju  %10ju",
7623 		    orate[0], orate[1], orate[2], orate[3]);
7624 	} else {
7625 		sbuf_printf(sb, "              channel 0   channel 1\n");
7626 		sbuf_printf(sb, "NIC B/s:     %10ju  %10ju\n",
7627 		    nrate[0], nrate[1]);
7628 		sbuf_printf(sb, "Offload B/s: %10ju  %10ju",
7629 		    orate[0], orate[1]);
7630 	}
7631 
7632 	rc = sbuf_finish(sb);
7633 	sbuf_delete(sb);
7634 
7635 	return (rc);
7636 }
7637 
7638 static int
7639 sysctl_ulprx_la(SYSCTL_HANDLER_ARGS)
7640 {
7641 	struct adapter *sc = arg1;
7642 	struct sbuf *sb;
7643 	uint32_t *buf, *p;
7644 	int rc, i;
7645 
7646 	rc = sysctl_wire_old_buffer(req, 0);
7647 	if (rc != 0)
7648 		return (rc);
7649 
7650 	sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req);
7651 	if (sb == NULL)
7652 		return (ENOMEM);
7653 
7654 	buf = malloc(ULPRX_LA_SIZE * 8 * sizeof(uint32_t), M_CXGBE,
7655 	    M_ZERO | M_WAITOK);
7656 
7657 	t4_ulprx_read_la(sc, buf);
7658 	p = buf;
7659 
7660 	sbuf_printf(sb, "      Pcmd        Type   Message"
7661 	    "                Data");
7662 	for (i = 0; i < ULPRX_LA_SIZE; i++, p += 8) {
7663 		sbuf_printf(sb, "\n%08x%08x  %4x  %08x  %08x%08x%08x%08x",
7664 		    p[1], p[0], p[2], p[3], p[7], p[6], p[5], p[4]);
7665 	}
7666 
7667 	rc = sbuf_finish(sb);
7668 	sbuf_delete(sb);
7669 	free(buf, M_CXGBE);
7670 	return (rc);
7671 }
7672 
7673 static int
7674 sysctl_wcwr_stats(SYSCTL_HANDLER_ARGS)
7675 {
7676 	struct adapter *sc = arg1;
7677 	struct sbuf *sb;
7678 	int rc, v;
7679 
7680 	MPASS(chip_id(sc) >= CHELSIO_T5);
7681 
7682 	rc = sysctl_wire_old_buffer(req, 0);
7683 	if (rc != 0)
7684 		return (rc);
7685 
7686 	sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req);
7687 	if (sb == NULL)
7688 		return (ENOMEM);
7689 
7690 	v = t4_read_reg(sc, A_SGE_STAT_CFG);
7691 	if (G_STATSOURCE_T5(v) == 7) {
7692 		int mode;
7693 
7694 		mode = is_t5(sc) ? G_STATMODE(v) : G_T6_STATMODE(v);
7695 		if (mode == 0) {
7696 			sbuf_printf(sb, "total %d, incomplete %d",
7697 			    t4_read_reg(sc, A_SGE_STAT_TOTAL),
7698 			    t4_read_reg(sc, A_SGE_STAT_MATCH));
7699 		} else if (mode == 1) {
7700 			sbuf_printf(sb, "total %d, data overflow %d",
7701 			    t4_read_reg(sc, A_SGE_STAT_TOTAL),
7702 			    t4_read_reg(sc, A_SGE_STAT_MATCH));
7703 		} else {
7704 			sbuf_printf(sb, "unknown mode %d", mode);
7705 		}
7706 	}
7707 	rc = sbuf_finish(sb);
7708 	sbuf_delete(sb);
7709 
7710 	return (rc);
7711 }
7712 
7713 static int
7714 sysctl_tc_params(SYSCTL_HANDLER_ARGS)
7715 {
7716 	struct adapter *sc = arg1;
7717 	struct tx_sched_class *tc;
7718 	struct t4_sched_class_params p;
7719 	struct sbuf *sb;
7720 	int i, rc, port_id, flags, mbps, gbps;
7721 
7722 	rc = sysctl_wire_old_buffer(req, 0);
7723 	if (rc != 0)
7724 		return (rc);
7725 
7726 	sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req);
7727 	if (sb == NULL)
7728 		return (ENOMEM);
7729 
7730 	port_id = arg2 >> 16;
7731 	MPASS(port_id < sc->params.nports);
7732 	MPASS(sc->port[port_id] != NULL);
7733 	i = arg2 & 0xffff;
7734 	MPASS(i < sc->chip_params->nsched_cls);
7735 	tc = &sc->port[port_id]->tc[i];
7736 
7737 	rc = begin_synchronized_op(sc, NULL, HOLD_LOCK | SLEEP_OK | INTR_OK,
7738 	    "t4tc_p");
7739 	if (rc)
7740 		goto done;
7741 	flags = tc->flags;
7742 	p = tc->params;
7743 	end_synchronized_op(sc, LOCK_HELD);
7744 
7745 	if ((flags & TX_SC_OK) == 0) {
7746 		sbuf_printf(sb, "none");
7747 		goto done;
7748 	}
7749 
7750 	if (p.level == SCHED_CLASS_LEVEL_CL_WRR) {
7751 		sbuf_printf(sb, "cl-wrr weight %u", p.weight);
7752 		goto done;
7753 	} else if (p.level == SCHED_CLASS_LEVEL_CL_RL)
7754 		sbuf_printf(sb, "cl-rl");
7755 	else if (p.level == SCHED_CLASS_LEVEL_CH_RL)
7756 		sbuf_printf(sb, "ch-rl");
7757 	else {
7758 		rc = ENXIO;
7759 		goto done;
7760 	}
7761 
7762 	if (p.ratemode == SCHED_CLASS_RATEMODE_REL) {
7763 		/* XXX: top speed or actual link speed? */
7764 		gbps = port_top_speed(sc->port[port_id]);
7765 		sbuf_printf(sb, " %u%% of %uGbps", p.maxrate, gbps);
7766 	}
7767 	else if (p.ratemode == SCHED_CLASS_RATEMODE_ABS) {
7768 		switch (p.rateunit) {
7769 		case SCHED_CLASS_RATEUNIT_BITS:
7770 			mbps = p.maxrate / 1000;
7771 			gbps = p.maxrate / 1000000;
7772 			if (p.maxrate == gbps * 1000000)
7773 				sbuf_printf(sb, " %uGbps", gbps);
7774 			else if (p.maxrate == mbps * 1000)
7775 				sbuf_printf(sb, " %uMbps", mbps);
7776 			else
7777 				sbuf_printf(sb, " %uKbps", p.maxrate);
7778 			break;
7779 		case SCHED_CLASS_RATEUNIT_PKTS:
7780 			sbuf_printf(sb, " %upps", p.maxrate);
7781 			break;
7782 		default:
7783 			rc = ENXIO;
7784 			goto done;
7785 		}
7786 	}
7787 
7788 	switch (p.mode) {
7789 	case SCHED_CLASS_MODE_CLASS:
7790 		sbuf_printf(sb, " aggregate");
7791 		break;
7792 	case SCHED_CLASS_MODE_FLOW:
7793 		sbuf_printf(sb, " per-flow");
7794 		break;
7795 	default:
7796 		rc = ENXIO;
7797 		goto done;
7798 	}
7799 
7800 done:
7801 	if (rc == 0)
7802 		rc = sbuf_finish(sb);
7803 	sbuf_delete(sb);
7804 
7805 	return (rc);
7806 }
7807 #endif
7808 
7809 #ifdef TCP_OFFLOAD
7810 static void
7811 unit_conv(char *buf, size_t len, u_int val, u_int factor)
7812 {
7813 	u_int rem = val % factor;
7814 
7815 	if (rem == 0)
7816 		snprintf(buf, len, "%u", val / factor);
7817 	else {
7818 		while (rem % 10 == 0)
7819 			rem /= 10;
7820 		snprintf(buf, len, "%u.%u", val / factor, rem);
7821 	}
7822 }
7823 
7824 static int
7825 sysctl_tp_tick(SYSCTL_HANDLER_ARGS)
7826 {
7827 	struct adapter *sc = arg1;
7828 	char buf[16];
7829 	u_int res, re;
7830 	u_int cclk_ps = 1000000000 / sc->params.vpd.cclk;
7831 
7832 	res = t4_read_reg(sc, A_TP_TIMER_RESOLUTION);
7833 	switch (arg2) {
7834 	case 0:
7835 		/* timer_tick */
7836 		re = G_TIMERRESOLUTION(res);
7837 		break;
7838 	case 1:
7839 		/* TCP timestamp tick */
7840 		re = G_TIMESTAMPRESOLUTION(res);
7841 		break;
7842 	case 2:
7843 		/* DACK tick */
7844 		re = G_DELAYEDACKRESOLUTION(res);
7845 		break;
7846 	default:
7847 		return (EDOOFUS);
7848 	}
7849 
7850 	unit_conv(buf, sizeof(buf), (cclk_ps << re), 1000000);
7851 
7852 	return (sysctl_handle_string(oidp, buf, sizeof(buf), req));
7853 }
7854 
7855 static int
7856 sysctl_tp_dack_timer(SYSCTL_HANDLER_ARGS)
7857 {
7858 	struct adapter *sc = arg1;
7859 	u_int res, dack_re, v;
7860 	u_int cclk_ps = 1000000000 / sc->params.vpd.cclk;
7861 
7862 	res = t4_read_reg(sc, A_TP_TIMER_RESOLUTION);
7863 	dack_re = G_DELAYEDACKRESOLUTION(res);
7864 	v = ((cclk_ps << dack_re) / 1000000) * t4_read_reg(sc, A_TP_DACK_TIMER);
7865 
7866 	return (sysctl_handle_int(oidp, &v, 0, req));
7867 }
7868 
7869 static int
7870 sysctl_tp_timer(SYSCTL_HANDLER_ARGS)
7871 {
7872 	struct adapter *sc = arg1;
7873 	int reg = arg2;
7874 	u_int tre;
7875 	u_long tp_tick_us, v;
7876 	u_int cclk_ps = 1000000000 / sc->params.vpd.cclk;
7877 
7878 	MPASS(reg == A_TP_RXT_MIN || reg == A_TP_RXT_MAX ||
7879 	    reg == A_TP_PERS_MIN || reg == A_TP_PERS_MAX ||
7880 	    reg == A_TP_KEEP_IDLE || A_TP_KEEP_INTVL || reg == A_TP_INIT_SRTT ||
7881 	    reg == A_TP_FINWAIT2_TIMER);
7882 
7883 	tre = G_TIMERRESOLUTION(t4_read_reg(sc, A_TP_TIMER_RESOLUTION));
7884 	tp_tick_us = (cclk_ps << tre) / 1000000;
7885 
7886 	if (reg == A_TP_INIT_SRTT)
7887 		v = tp_tick_us * G_INITSRTT(t4_read_reg(sc, reg));
7888 	else
7889 		v = tp_tick_us * t4_read_reg(sc, reg);
7890 
7891 	return (sysctl_handle_long(oidp, &v, 0, req));
7892 }
7893 #endif
7894 
7895 static uint32_t
7896 fconf_iconf_to_mode(uint32_t fconf, uint32_t iconf)
7897 {
7898 	uint32_t mode;
7899 
7900 	mode = T4_FILTER_IPv4 | T4_FILTER_IPv6 | T4_FILTER_IP_SADDR |
7901 	    T4_FILTER_IP_DADDR | T4_FILTER_IP_SPORT | T4_FILTER_IP_DPORT;
7902 
7903 	if (fconf & F_FRAGMENTATION)
7904 		mode |= T4_FILTER_IP_FRAGMENT;
7905 
7906 	if (fconf & F_MPSHITTYPE)
7907 		mode |= T4_FILTER_MPS_HIT_TYPE;
7908 
7909 	if (fconf & F_MACMATCH)
7910 		mode |= T4_FILTER_MAC_IDX;
7911 
7912 	if (fconf & F_ETHERTYPE)
7913 		mode |= T4_FILTER_ETH_TYPE;
7914 
7915 	if (fconf & F_PROTOCOL)
7916 		mode |= T4_FILTER_IP_PROTO;
7917 
7918 	if (fconf & F_TOS)
7919 		mode |= T4_FILTER_IP_TOS;
7920 
7921 	if (fconf & F_VLAN)
7922 		mode |= T4_FILTER_VLAN;
7923 
7924 	if (fconf & F_VNIC_ID) {
7925 		mode |= T4_FILTER_VNIC;
7926 		if (iconf & F_VNIC)
7927 			mode |= T4_FILTER_IC_VNIC;
7928 	}
7929 
7930 	if (fconf & F_PORT)
7931 		mode |= T4_FILTER_PORT;
7932 
7933 	if (fconf & F_FCOE)
7934 		mode |= T4_FILTER_FCoE;
7935 
7936 	return (mode);
7937 }
7938 
7939 static uint32_t
7940 mode_to_fconf(uint32_t mode)
7941 {
7942 	uint32_t fconf = 0;
7943 
7944 	if (mode & T4_FILTER_IP_FRAGMENT)
7945 		fconf |= F_FRAGMENTATION;
7946 
7947 	if (mode & T4_FILTER_MPS_HIT_TYPE)
7948 		fconf |= F_MPSHITTYPE;
7949 
7950 	if (mode & T4_FILTER_MAC_IDX)
7951 		fconf |= F_MACMATCH;
7952 
7953 	if (mode & T4_FILTER_ETH_TYPE)
7954 		fconf |= F_ETHERTYPE;
7955 
7956 	if (mode & T4_FILTER_IP_PROTO)
7957 		fconf |= F_PROTOCOL;
7958 
7959 	if (mode & T4_FILTER_IP_TOS)
7960 		fconf |= F_TOS;
7961 
7962 	if (mode & T4_FILTER_VLAN)
7963 		fconf |= F_VLAN;
7964 
7965 	if (mode & T4_FILTER_VNIC)
7966 		fconf |= F_VNIC_ID;
7967 
7968 	if (mode & T4_FILTER_PORT)
7969 		fconf |= F_PORT;
7970 
7971 	if (mode & T4_FILTER_FCoE)
7972 		fconf |= F_FCOE;
7973 
7974 	return (fconf);
7975 }
7976 
7977 static uint32_t
7978 mode_to_iconf(uint32_t mode)
7979 {
7980 
7981 	if (mode & T4_FILTER_IC_VNIC)
7982 		return (F_VNIC);
7983 	return (0);
7984 }
7985 
7986 static int check_fspec_against_fconf_iconf(struct adapter *sc,
7987     struct t4_filter_specification *fs)
7988 {
7989 	struct tp_params *tpp = &sc->params.tp;
7990 	uint32_t fconf = 0;
7991 
7992 	if (fs->val.frag || fs->mask.frag)
7993 		fconf |= F_FRAGMENTATION;
7994 
7995 	if (fs->val.matchtype || fs->mask.matchtype)
7996 		fconf |= F_MPSHITTYPE;
7997 
7998 	if (fs->val.macidx || fs->mask.macidx)
7999 		fconf |= F_MACMATCH;
8000 
8001 	if (fs->val.ethtype || fs->mask.ethtype)
8002 		fconf |= F_ETHERTYPE;
8003 
8004 	if (fs->val.proto || fs->mask.proto)
8005 		fconf |= F_PROTOCOL;
8006 
8007 	if (fs->val.tos || fs->mask.tos)
8008 		fconf |= F_TOS;
8009 
8010 	if (fs->val.vlan_vld || fs->mask.vlan_vld)
8011 		fconf |= F_VLAN;
8012 
8013 	if (fs->val.ovlan_vld || fs->mask.ovlan_vld) {
8014 		fconf |= F_VNIC_ID;
8015 		if (tpp->ingress_config & F_VNIC)
8016 			return (EINVAL);
8017 	}
8018 
8019 	if (fs->val.pfvf_vld || fs->mask.pfvf_vld) {
8020 		fconf |= F_VNIC_ID;
8021 		if ((tpp->ingress_config & F_VNIC) == 0)
8022 			return (EINVAL);
8023 	}
8024 
8025 	if (fs->val.iport || fs->mask.iport)
8026 		fconf |= F_PORT;
8027 
8028 	if (fs->val.fcoe || fs->mask.fcoe)
8029 		fconf |= F_FCOE;
8030 
8031 	if ((tpp->vlan_pri_map | fconf) != tpp->vlan_pri_map)
8032 		return (E2BIG);
8033 
8034 	return (0);
8035 }
8036 
8037 static int
8038 get_filter_mode(struct adapter *sc, uint32_t *mode)
8039 {
8040 	struct tp_params *tpp = &sc->params.tp;
8041 
8042 	/*
8043 	 * We trust the cached values of the relevant TP registers.  This means
8044 	 * things work reliably only if writes to those registers are always via
8045 	 * t4_set_filter_mode.
8046 	 */
8047 	*mode = fconf_iconf_to_mode(tpp->vlan_pri_map, tpp->ingress_config);
8048 
8049 	return (0);
8050 }
8051 
8052 static int
8053 set_filter_mode(struct adapter *sc, uint32_t mode)
8054 {
8055 	struct tp_params *tpp = &sc->params.tp;
8056 	uint32_t fconf, iconf;
8057 	int rc;
8058 
8059 	iconf = mode_to_iconf(mode);
8060 	if ((iconf ^ tpp->ingress_config) & F_VNIC) {
8061 		/*
8062 		 * For now we just complain if A_TP_INGRESS_CONFIG is not
8063 		 * already set to the correct value for the requested filter
8064 		 * mode.  It's not clear if it's safe to write to this register
8065 		 * on the fly.  (And we trust the cached value of the register).
8066 		 */
8067 		return (EBUSY);
8068 	}
8069 
8070 	fconf = mode_to_fconf(mode);
8071 
8072 	rc = begin_synchronized_op(sc, NULL, HOLD_LOCK | SLEEP_OK | INTR_OK,
8073 	    "t4setfm");
8074 	if (rc)
8075 		return (rc);
8076 
8077 	if (sc->tids.ftids_in_use > 0) {
8078 		rc = EBUSY;
8079 		goto done;
8080 	}
8081 
8082 #ifdef TCP_OFFLOAD
8083 	if (uld_active(sc, ULD_TOM)) {
8084 		rc = EBUSY;
8085 		goto done;
8086 	}
8087 #endif
8088 
8089 	rc = -t4_set_filter_mode(sc, fconf);
8090 done:
8091 	end_synchronized_op(sc, LOCK_HELD);
8092 	return (rc);
8093 }
8094 
8095 static inline uint64_t
8096 get_filter_hits(struct adapter *sc, uint32_t fid)
8097 {
8098 	uint32_t tcb_addr;
8099 
8100 	tcb_addr = t4_read_reg(sc, A_TP_CMM_TCB_BASE) +
8101 	    (fid + sc->tids.ftid_base) * TCB_SIZE;
8102 
8103 	if (is_t4(sc)) {
8104 		uint64_t hits;
8105 
8106 		read_via_memwin(sc, 0, tcb_addr + 16, (uint32_t *)&hits, 8);
8107 		return (be64toh(hits));
8108 	} else {
8109 		uint32_t hits;
8110 
8111 		read_via_memwin(sc, 0, tcb_addr + 24, &hits, 4);
8112 		return (be32toh(hits));
8113 	}
8114 }
8115 
8116 static int
8117 get_filter(struct adapter *sc, struct t4_filter *t)
8118 {
8119 	int i, rc, nfilters = sc->tids.nftids;
8120 	struct filter_entry *f;
8121 
8122 	rc = begin_synchronized_op(sc, NULL, HOLD_LOCK | SLEEP_OK | INTR_OK,
8123 	    "t4getf");
8124 	if (rc)
8125 		return (rc);
8126 
8127 	if (sc->tids.ftids_in_use == 0 || sc->tids.ftid_tab == NULL ||
8128 	    t->idx >= nfilters) {
8129 		t->idx = 0xffffffff;
8130 		goto done;
8131 	}
8132 
8133 	f = &sc->tids.ftid_tab[t->idx];
8134 	for (i = t->idx; i < nfilters; i++, f++) {
8135 		if (f->valid) {
8136 			t->idx = i;
8137 			t->l2tidx = f->l2t ? f->l2t->idx : 0;
8138 			t->smtidx = f->smtidx;
8139 			if (f->fs.hitcnts)
8140 				t->hits = get_filter_hits(sc, t->idx);
8141 			else
8142 				t->hits = UINT64_MAX;
8143 			t->fs = f->fs;
8144 
8145 			goto done;
8146 		}
8147 	}
8148 
8149 	t->idx = 0xffffffff;
8150 done:
8151 	end_synchronized_op(sc, LOCK_HELD);
8152 	return (0);
8153 }
8154 
8155 static int
8156 set_filter(struct adapter *sc, struct t4_filter *t)
8157 {
8158 	unsigned int nfilters, nports;
8159 	struct filter_entry *f;
8160 	int i, rc;
8161 
8162 	rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4setf");
8163 	if (rc)
8164 		return (rc);
8165 
8166 	nfilters = sc->tids.nftids;
8167 	nports = sc->params.nports;
8168 
8169 	if (nfilters == 0) {
8170 		rc = ENOTSUP;
8171 		goto done;
8172 	}
8173 
8174 	if (t->idx >= nfilters) {
8175 		rc = EINVAL;
8176 		goto done;
8177 	}
8178 
8179 	/* Validate against the global filter mode and ingress config */
8180 	rc = check_fspec_against_fconf_iconf(sc, &t->fs);
8181 	if (rc != 0)
8182 		goto done;
8183 
8184 	if (t->fs.action == FILTER_SWITCH && t->fs.eport >= nports) {
8185 		rc = EINVAL;
8186 		goto done;
8187 	}
8188 
8189 	if (t->fs.val.iport >= nports) {
8190 		rc = EINVAL;
8191 		goto done;
8192 	}
8193 
8194 	/* Can't specify an iq if not steering to it */
8195 	if (!t->fs.dirsteer && t->fs.iq) {
8196 		rc = EINVAL;
8197 		goto done;
8198 	}
8199 
8200 	/* IPv6 filter idx must be 4 aligned */
8201 	if (t->fs.type == 1 &&
8202 	    ((t->idx & 0x3) || t->idx + 4 >= nfilters)) {
8203 		rc = EINVAL;
8204 		goto done;
8205 	}
8206 
8207 	if (!(sc->flags & FULL_INIT_DONE) &&
8208 	    ((rc = adapter_full_init(sc)) != 0))
8209 		goto done;
8210 
8211 	if (sc->tids.ftid_tab == NULL) {
8212 		KASSERT(sc->tids.ftids_in_use == 0,
8213 		    ("%s: no memory allocated but filters_in_use > 0",
8214 		    __func__));
8215 
8216 		sc->tids.ftid_tab = malloc(sizeof (struct filter_entry) *
8217 		    nfilters, M_CXGBE, M_NOWAIT | M_ZERO);
8218 		if (sc->tids.ftid_tab == NULL) {
8219 			rc = ENOMEM;
8220 			goto done;
8221 		}
8222 		mtx_init(&sc->tids.ftid_lock, "T4 filters", 0, MTX_DEF);
8223 	}
8224 
8225 	for (i = 0; i < 4; i++) {
8226 		f = &sc->tids.ftid_tab[t->idx + i];
8227 
8228 		if (f->pending || f->valid) {
8229 			rc = EBUSY;
8230 			goto done;
8231 		}
8232 		if (f->locked) {
8233 			rc = EPERM;
8234 			goto done;
8235 		}
8236 
8237 		if (t->fs.type == 0)
8238 			break;
8239 	}
8240 
8241 	f = &sc->tids.ftid_tab[t->idx];
8242 	f->fs = t->fs;
8243 
8244 	rc = set_filter_wr(sc, t->idx);
8245 done:
8246 	end_synchronized_op(sc, 0);
8247 
8248 	if (rc == 0) {
8249 		mtx_lock(&sc->tids.ftid_lock);
8250 		for (;;) {
8251 			if (f->pending == 0) {
8252 				rc = f->valid ? 0 : EIO;
8253 				break;
8254 			}
8255 
8256 			if (mtx_sleep(&sc->tids.ftid_tab, &sc->tids.ftid_lock,
8257 			    PCATCH, "t4setfw", 0)) {
8258 				rc = EINPROGRESS;
8259 				break;
8260 			}
8261 		}
8262 		mtx_unlock(&sc->tids.ftid_lock);
8263 	}
8264 	return (rc);
8265 }
8266 
8267 static int
8268 del_filter(struct adapter *sc, struct t4_filter *t)
8269 {
8270 	unsigned int nfilters;
8271 	struct filter_entry *f;
8272 	int rc;
8273 
8274 	rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4delf");
8275 	if (rc)
8276 		return (rc);
8277 
8278 	nfilters = sc->tids.nftids;
8279 
8280 	if (nfilters == 0) {
8281 		rc = ENOTSUP;
8282 		goto done;
8283 	}
8284 
8285 	if (sc->tids.ftid_tab == NULL || sc->tids.ftids_in_use == 0 ||
8286 	    t->idx >= nfilters) {
8287 		rc = EINVAL;
8288 		goto done;
8289 	}
8290 
8291 	if (!(sc->flags & FULL_INIT_DONE)) {
8292 		rc = EAGAIN;
8293 		goto done;
8294 	}
8295 
8296 	f = &sc->tids.ftid_tab[t->idx];
8297 
8298 	if (f->pending) {
8299 		rc = EBUSY;
8300 		goto done;
8301 	}
8302 	if (f->locked) {
8303 		rc = EPERM;
8304 		goto done;
8305 	}
8306 
8307 	if (f->valid) {
8308 		t->fs = f->fs;	/* extra info for the caller */
8309 		rc = del_filter_wr(sc, t->idx);
8310 	}
8311 
8312 done:
8313 	end_synchronized_op(sc, 0);
8314 
8315 	if (rc == 0) {
8316 		mtx_lock(&sc->tids.ftid_lock);
8317 		for (;;) {
8318 			if (f->pending == 0) {
8319 				rc = f->valid ? EIO : 0;
8320 				break;
8321 			}
8322 
8323 			if (mtx_sleep(&sc->tids.ftid_tab, &sc->tids.ftid_lock,
8324 			    PCATCH, "t4delfw", 0)) {
8325 				rc = EINPROGRESS;
8326 				break;
8327 			}
8328 		}
8329 		mtx_unlock(&sc->tids.ftid_lock);
8330 	}
8331 
8332 	return (rc);
8333 }
8334 
8335 static void
8336 clear_filter(struct filter_entry *f)
8337 {
8338 	if (f->l2t)
8339 		t4_l2t_release(f->l2t);
8340 
8341 	bzero(f, sizeof (*f));
8342 }
8343 
8344 static int
8345 set_filter_wr(struct adapter *sc, int fidx)
8346 {
8347 	struct filter_entry *f = &sc->tids.ftid_tab[fidx];
8348 	struct fw_filter_wr *fwr;
8349 	unsigned int ftid, vnic_vld, vnic_vld_mask;
8350 	struct wrq_cookie cookie;
8351 
8352 	ASSERT_SYNCHRONIZED_OP(sc);
8353 
8354 	if (f->fs.newdmac || f->fs.newvlan) {
8355 		/* This filter needs an L2T entry; allocate one. */
8356 		f->l2t = t4_l2t_alloc_switching(sc->l2t);
8357 		if (f->l2t == NULL)
8358 			return (EAGAIN);
8359 		if (t4_l2t_set_switching(sc, f->l2t, f->fs.vlan, f->fs.eport,
8360 		    f->fs.dmac)) {
8361 			t4_l2t_release(f->l2t);
8362 			f->l2t = NULL;
8363 			return (ENOMEM);
8364 		}
8365 	}
8366 
8367 	/* Already validated against fconf, iconf */
8368 	MPASS((f->fs.val.pfvf_vld & f->fs.val.ovlan_vld) == 0);
8369 	MPASS((f->fs.mask.pfvf_vld & f->fs.mask.ovlan_vld) == 0);
8370 	if (f->fs.val.pfvf_vld || f->fs.val.ovlan_vld)
8371 		vnic_vld = 1;
8372 	else
8373 		vnic_vld = 0;
8374 	if (f->fs.mask.pfvf_vld || f->fs.mask.ovlan_vld)
8375 		vnic_vld_mask = 1;
8376 	else
8377 		vnic_vld_mask = 0;
8378 
8379 	ftid = sc->tids.ftid_base + fidx;
8380 
8381 	fwr = start_wrq_wr(&sc->sge.mgmtq, howmany(sizeof(*fwr), 16), &cookie);
8382 	if (fwr == NULL)
8383 		return (ENOMEM);
8384 	bzero(fwr, sizeof(*fwr));
8385 
8386 	fwr->op_pkd = htobe32(V_FW_WR_OP(FW_FILTER_WR));
8387 	fwr->len16_pkd = htobe32(FW_LEN16(*fwr));
8388 	fwr->tid_to_iq =
8389 	    htobe32(V_FW_FILTER_WR_TID(ftid) |
8390 		V_FW_FILTER_WR_RQTYPE(f->fs.type) |
8391 		V_FW_FILTER_WR_NOREPLY(0) |
8392 		V_FW_FILTER_WR_IQ(f->fs.iq));
8393 	fwr->del_filter_to_l2tix =
8394 	    htobe32(V_FW_FILTER_WR_RPTTID(f->fs.rpttid) |
8395 		V_FW_FILTER_WR_DROP(f->fs.action == FILTER_DROP) |
8396 		V_FW_FILTER_WR_DIRSTEER(f->fs.dirsteer) |
8397 		V_FW_FILTER_WR_MASKHASH(f->fs.maskhash) |
8398 		V_FW_FILTER_WR_DIRSTEERHASH(f->fs.dirsteerhash) |
8399 		V_FW_FILTER_WR_LPBK(f->fs.action == FILTER_SWITCH) |
8400 		V_FW_FILTER_WR_DMAC(f->fs.newdmac) |
8401 		V_FW_FILTER_WR_SMAC(f->fs.newsmac) |
8402 		V_FW_FILTER_WR_INSVLAN(f->fs.newvlan == VLAN_INSERT ||
8403 		    f->fs.newvlan == VLAN_REWRITE) |
8404 		V_FW_FILTER_WR_RMVLAN(f->fs.newvlan == VLAN_REMOVE ||
8405 		    f->fs.newvlan == VLAN_REWRITE) |
8406 		V_FW_FILTER_WR_HITCNTS(f->fs.hitcnts) |
8407 		V_FW_FILTER_WR_TXCHAN(f->fs.eport) |
8408 		V_FW_FILTER_WR_PRIO(f->fs.prio) |
8409 		V_FW_FILTER_WR_L2TIX(f->l2t ? f->l2t->idx : 0));
8410 	fwr->ethtype = htobe16(f->fs.val.ethtype);
8411 	fwr->ethtypem = htobe16(f->fs.mask.ethtype);
8412 	fwr->frag_to_ovlan_vldm =
8413 	    (V_FW_FILTER_WR_FRAG(f->fs.val.frag) |
8414 		V_FW_FILTER_WR_FRAGM(f->fs.mask.frag) |
8415 		V_FW_FILTER_WR_IVLAN_VLD(f->fs.val.vlan_vld) |
8416 		V_FW_FILTER_WR_OVLAN_VLD(vnic_vld) |
8417 		V_FW_FILTER_WR_IVLAN_VLDM(f->fs.mask.vlan_vld) |
8418 		V_FW_FILTER_WR_OVLAN_VLDM(vnic_vld_mask));
8419 	fwr->smac_sel = 0;
8420 	fwr->rx_chan_rx_rpl_iq = htobe16(V_FW_FILTER_WR_RX_CHAN(0) |
8421 	    V_FW_FILTER_WR_RX_RPL_IQ(sc->sge.fwq.abs_id));
8422 	fwr->maci_to_matchtypem =
8423 	    htobe32(V_FW_FILTER_WR_MACI(f->fs.val.macidx) |
8424 		V_FW_FILTER_WR_MACIM(f->fs.mask.macidx) |
8425 		V_FW_FILTER_WR_FCOE(f->fs.val.fcoe) |
8426 		V_FW_FILTER_WR_FCOEM(f->fs.mask.fcoe) |
8427 		V_FW_FILTER_WR_PORT(f->fs.val.iport) |
8428 		V_FW_FILTER_WR_PORTM(f->fs.mask.iport) |
8429 		V_FW_FILTER_WR_MATCHTYPE(f->fs.val.matchtype) |
8430 		V_FW_FILTER_WR_MATCHTYPEM(f->fs.mask.matchtype));
8431 	fwr->ptcl = f->fs.val.proto;
8432 	fwr->ptclm = f->fs.mask.proto;
8433 	fwr->ttyp = f->fs.val.tos;
8434 	fwr->ttypm = f->fs.mask.tos;
8435 	fwr->ivlan = htobe16(f->fs.val.vlan);
8436 	fwr->ivlanm = htobe16(f->fs.mask.vlan);
8437 	fwr->ovlan = htobe16(f->fs.val.vnic);
8438 	fwr->ovlanm = htobe16(f->fs.mask.vnic);
8439 	bcopy(f->fs.val.dip, fwr->lip, sizeof (fwr->lip));
8440 	bcopy(f->fs.mask.dip, fwr->lipm, sizeof (fwr->lipm));
8441 	bcopy(f->fs.val.sip, fwr->fip, sizeof (fwr->fip));
8442 	bcopy(f->fs.mask.sip, fwr->fipm, sizeof (fwr->fipm));
8443 	fwr->lp = htobe16(f->fs.val.dport);
8444 	fwr->lpm = htobe16(f->fs.mask.dport);
8445 	fwr->fp = htobe16(f->fs.val.sport);
8446 	fwr->fpm = htobe16(f->fs.mask.sport);
8447 	if (f->fs.newsmac)
8448 		bcopy(f->fs.smac, fwr->sma, sizeof (fwr->sma));
8449 
8450 	f->pending = 1;
8451 	sc->tids.ftids_in_use++;
8452 
8453 	commit_wrq_wr(&sc->sge.mgmtq, fwr, &cookie);
8454 	return (0);
8455 }
8456 
8457 static int
8458 del_filter_wr(struct adapter *sc, int fidx)
8459 {
8460 	struct filter_entry *f = &sc->tids.ftid_tab[fidx];
8461 	struct fw_filter_wr *fwr;
8462 	unsigned int ftid;
8463 	struct wrq_cookie cookie;
8464 
8465 	ftid = sc->tids.ftid_base + fidx;
8466 
8467 	fwr = start_wrq_wr(&sc->sge.mgmtq, howmany(sizeof(*fwr), 16), &cookie);
8468 	if (fwr == NULL)
8469 		return (ENOMEM);
8470 	bzero(fwr, sizeof (*fwr));
8471 
8472 	t4_mk_filtdelwr(ftid, fwr, sc->sge.fwq.abs_id);
8473 
8474 	f->pending = 1;
8475 	commit_wrq_wr(&sc->sge.mgmtq, fwr, &cookie);
8476 	return (0);
8477 }
8478 
8479 int
8480 t4_filter_rpl(struct sge_iq *iq, const struct rss_header *rss, struct mbuf *m)
8481 {
8482 	struct adapter *sc = iq->adapter;
8483 	const struct cpl_set_tcb_rpl *rpl = (const void *)(rss + 1);
8484 	unsigned int idx = GET_TID(rpl);
8485 	unsigned int rc;
8486 	struct filter_entry *f;
8487 
8488 	KASSERT(m == NULL, ("%s: payload with opcode %02x", __func__,
8489 	    rss->opcode));
8490 	MPASS(iq == &sc->sge.fwq);
8491 	MPASS(is_ftid(sc, idx));
8492 
8493 	idx -= sc->tids.ftid_base;
8494 	f = &sc->tids.ftid_tab[idx];
8495 	rc = G_COOKIE(rpl->cookie);
8496 
8497 	mtx_lock(&sc->tids.ftid_lock);
8498 	if (rc == FW_FILTER_WR_FLT_ADDED) {
8499 		KASSERT(f->pending, ("%s: filter[%u] isn't pending.",
8500 		    __func__, idx));
8501 		f->smtidx = (be64toh(rpl->oldval) >> 24) & 0xff;
8502 		f->pending = 0;  /* asynchronous setup completed */
8503 		f->valid = 1;
8504 	} else {
8505 		if (rc != FW_FILTER_WR_FLT_DELETED) {
8506 			/* Add or delete failed, display an error */
8507 			log(LOG_ERR,
8508 			    "filter %u setup failed with error %u\n",
8509 			    idx, rc);
8510 		}
8511 
8512 		clear_filter(f);
8513 		sc->tids.ftids_in_use--;
8514 	}
8515 	wakeup(&sc->tids.ftid_tab);
8516 	mtx_unlock(&sc->tids.ftid_lock);
8517 
8518 	return (0);
8519 }
8520 
8521 static int
8522 set_tcb_rpl(struct sge_iq *iq, const struct rss_header *rss, struct mbuf *m)
8523 {
8524 
8525 	MPASS(iq->set_tcb_rpl != NULL);
8526 	return (iq->set_tcb_rpl(iq, rss, m));
8527 }
8528 
8529 static int
8530 l2t_write_rpl(struct sge_iq *iq, const struct rss_header *rss, struct mbuf *m)
8531 {
8532 
8533 	MPASS(iq->l2t_write_rpl != NULL);
8534 	return (iq->l2t_write_rpl(iq, rss, m));
8535 }
8536 
8537 static int
8538 get_sge_context(struct adapter *sc, struct t4_sge_context *cntxt)
8539 {
8540 	int rc;
8541 
8542 	if (cntxt->cid > M_CTXTQID)
8543 		return (EINVAL);
8544 
8545 	if (cntxt->mem_id != CTXT_EGRESS && cntxt->mem_id != CTXT_INGRESS &&
8546 	    cntxt->mem_id != CTXT_FLM && cntxt->mem_id != CTXT_CNM)
8547 		return (EINVAL);
8548 
8549 	rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4ctxt");
8550 	if (rc)
8551 		return (rc);
8552 
8553 	if (sc->flags & FW_OK) {
8554 		rc = -t4_sge_ctxt_rd(sc, sc->mbox, cntxt->cid, cntxt->mem_id,
8555 		    &cntxt->data[0]);
8556 		if (rc == 0)
8557 			goto done;
8558 	}
8559 
8560 	/*
8561 	 * Read via firmware failed or wasn't even attempted.  Read directly via
8562 	 * the backdoor.
8563 	 */
8564 	rc = -t4_sge_ctxt_rd_bd(sc, cntxt->cid, cntxt->mem_id, &cntxt->data[0]);
8565 done:
8566 	end_synchronized_op(sc, 0);
8567 	return (rc);
8568 }
8569 
8570 static int
8571 load_fw(struct adapter *sc, struct t4_data *fw)
8572 {
8573 	int rc;
8574 	uint8_t *fw_data;
8575 
8576 	rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4ldfw");
8577 	if (rc)
8578 		return (rc);
8579 
8580 	if (sc->flags & FULL_INIT_DONE) {
8581 		rc = EBUSY;
8582 		goto done;
8583 	}
8584 
8585 	fw_data = malloc(fw->len, M_CXGBE, M_WAITOK);
8586 	if (fw_data == NULL) {
8587 		rc = ENOMEM;
8588 		goto done;
8589 	}
8590 
8591 	rc = copyin(fw->data, fw_data, fw->len);
8592 	if (rc == 0)
8593 		rc = -t4_load_fw(sc, fw_data, fw->len);
8594 
8595 	free(fw_data, M_CXGBE);
8596 done:
8597 	end_synchronized_op(sc, 0);
8598 	return (rc);
8599 }
8600 
8601 static int
8602 load_cfg(struct adapter *sc, struct t4_data *cfg)
8603 {
8604 	int rc;
8605 	uint8_t *cfg_data = NULL;
8606 
8607 	rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4ldcf");
8608 	if (rc)
8609 		return (rc);
8610 
8611 	if (cfg->len == 0) {
8612 		/* clear */
8613 		rc = -t4_load_cfg(sc, NULL, 0);
8614 		goto done;
8615 	}
8616 
8617 	cfg_data = malloc(cfg->len, M_CXGBE, M_WAITOK);
8618 	if (cfg_data == NULL) {
8619 		rc = ENOMEM;
8620 		goto done;
8621 	}
8622 
8623 	rc = copyin(cfg->data, cfg_data, cfg->len);
8624 	if (rc == 0)
8625 		rc = -t4_load_cfg(sc, cfg_data, cfg->len);
8626 
8627 	free(cfg_data, M_CXGBE);
8628 done:
8629 	end_synchronized_op(sc, 0);
8630 	return (rc);
8631 }
8632 
8633 #define MAX_READ_BUF_SIZE (128 * 1024)
8634 static int
8635 read_card_mem(struct adapter *sc, int win, struct t4_mem_range *mr)
8636 {
8637 	uint32_t addr, remaining, n;
8638 	uint32_t *buf;
8639 	int rc;
8640 	uint8_t *dst;
8641 
8642 	rc = validate_mem_range(sc, mr->addr, mr->len);
8643 	if (rc != 0)
8644 		return (rc);
8645 
8646 	buf = malloc(min(mr->len, MAX_READ_BUF_SIZE), M_CXGBE, M_WAITOK);
8647 	addr = mr->addr;
8648 	remaining = mr->len;
8649 	dst = (void *)mr->data;
8650 
8651 	while (remaining) {
8652 		n = min(remaining, MAX_READ_BUF_SIZE);
8653 		read_via_memwin(sc, 2, addr, buf, n);
8654 
8655 		rc = copyout(buf, dst, n);
8656 		if (rc != 0)
8657 			break;
8658 
8659 		dst += n;
8660 		remaining -= n;
8661 		addr += n;
8662 	}
8663 
8664 	free(buf, M_CXGBE);
8665 	return (rc);
8666 }
8667 #undef MAX_READ_BUF_SIZE
8668 
8669 static int
8670 read_i2c(struct adapter *sc, struct t4_i2c_data *i2cd)
8671 {
8672 	int rc;
8673 
8674 	if (i2cd->len == 0 || i2cd->port_id >= sc->params.nports)
8675 		return (EINVAL);
8676 
8677 	if (i2cd->len > sizeof(i2cd->data))
8678 		return (EFBIG);
8679 
8680 	rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4i2crd");
8681 	if (rc)
8682 		return (rc);
8683 	rc = -t4_i2c_rd(sc, sc->mbox, i2cd->port_id, i2cd->dev_addr,
8684 	    i2cd->offset, i2cd->len, &i2cd->data[0]);
8685 	end_synchronized_op(sc, 0);
8686 
8687 	return (rc);
8688 }
8689 
8690 static int
8691 in_range(int val, int lo, int hi)
8692 {
8693 
8694 	return (val < 0 || (val <= hi && val >= lo));
8695 }
8696 
8697 static int
8698 set_sched_class_config(struct adapter *sc, int minmax)
8699 {
8700 	int rc;
8701 
8702 	if (minmax < 0)
8703 		return (EINVAL);
8704 
8705 	rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4sscc");
8706 	if (rc)
8707 		return (rc);
8708 	rc = -t4_sched_config(sc, FW_SCHED_TYPE_PKTSCHED, minmax, 1);
8709 	end_synchronized_op(sc, 0);
8710 
8711 	return (rc);
8712 }
8713 
8714 static int
8715 set_sched_class_params(struct adapter *sc, struct t4_sched_class_params *p,
8716     int sleep_ok)
8717 {
8718 	int rc, top_speed, fw_level, fw_mode, fw_rateunit, fw_ratemode;
8719 	struct port_info *pi;
8720 	struct tx_sched_class *tc;
8721 
8722 	if (p->level == SCHED_CLASS_LEVEL_CL_RL)
8723 		fw_level = FW_SCHED_PARAMS_LEVEL_CL_RL;
8724 	else if (p->level == SCHED_CLASS_LEVEL_CL_WRR)
8725 		fw_level = FW_SCHED_PARAMS_LEVEL_CL_WRR;
8726 	else if (p->level == SCHED_CLASS_LEVEL_CH_RL)
8727 		fw_level = FW_SCHED_PARAMS_LEVEL_CH_RL;
8728 	else
8729 		return (EINVAL);
8730 
8731 	if (p->mode == SCHED_CLASS_MODE_CLASS)
8732 		fw_mode = FW_SCHED_PARAMS_MODE_CLASS;
8733 	else if (p->mode == SCHED_CLASS_MODE_FLOW)
8734 		fw_mode = FW_SCHED_PARAMS_MODE_FLOW;
8735 	else
8736 		return (EINVAL);
8737 
8738 	if (p->rateunit == SCHED_CLASS_RATEUNIT_BITS)
8739 		fw_rateunit = FW_SCHED_PARAMS_UNIT_BITRATE;
8740 	else if (p->rateunit == SCHED_CLASS_RATEUNIT_PKTS)
8741 		fw_rateunit = FW_SCHED_PARAMS_UNIT_PKTRATE;
8742 	else
8743 		return (EINVAL);
8744 
8745 	if (p->ratemode == SCHED_CLASS_RATEMODE_REL)
8746 		fw_ratemode = FW_SCHED_PARAMS_RATE_REL;
8747 	else if (p->ratemode == SCHED_CLASS_RATEMODE_ABS)
8748 		fw_ratemode = FW_SCHED_PARAMS_RATE_ABS;
8749 	else
8750 		return (EINVAL);
8751 
8752 	/* Vet our parameters ... */
8753 	if (!in_range(p->channel, 0, sc->chip_params->nchan - 1))
8754 		return (ERANGE);
8755 
8756 	pi = sc->port[sc->chan_map[p->channel]];
8757 	if (pi == NULL)
8758 		return (ENXIO);
8759 	MPASS(pi->tx_chan == p->channel);
8760 	top_speed = port_top_speed(pi) * 1000000; /* Gbps -> Kbps */
8761 
8762 	if (!in_range(p->cl, 0, sc->chip_params->nsched_cls) ||
8763 	    !in_range(p->minrate, 0, top_speed) ||
8764 	    !in_range(p->maxrate, 0, top_speed) ||
8765 	    !in_range(p->weight, 0, 100))
8766 		return (ERANGE);
8767 
8768 	/*
8769 	 * Translate any unset parameters into the firmware's
8770 	 * nomenclature and/or fail the call if the parameters
8771 	 * are required ...
8772 	 */
8773 	if (p->rateunit < 0 || p->ratemode < 0 || p->channel < 0 || p->cl < 0)
8774 		return (EINVAL);
8775 
8776 	if (p->minrate < 0)
8777 		p->minrate = 0;
8778 	if (p->maxrate < 0) {
8779 		if (p->level == SCHED_CLASS_LEVEL_CL_RL ||
8780 		    p->level == SCHED_CLASS_LEVEL_CH_RL)
8781 			return (EINVAL);
8782 		else
8783 			p->maxrate = 0;
8784 	}
8785 	if (p->weight < 0) {
8786 		if (p->level == SCHED_CLASS_LEVEL_CL_WRR)
8787 			return (EINVAL);
8788 		else
8789 			p->weight = 0;
8790 	}
8791 	if (p->pktsize < 0) {
8792 		if (p->level == SCHED_CLASS_LEVEL_CL_RL ||
8793 		    p->level == SCHED_CLASS_LEVEL_CH_RL)
8794 			return (EINVAL);
8795 		else
8796 			p->pktsize = 0;
8797 	}
8798 
8799 	rc = begin_synchronized_op(sc, NULL,
8800 	    sleep_ok ? (SLEEP_OK | INTR_OK) : HOLD_LOCK, "t4sscp");
8801 	if (rc)
8802 		return (rc);
8803 	tc = &pi->tc[p->cl];
8804 	tc->params = *p;
8805 	rc = -t4_sched_params(sc, FW_SCHED_TYPE_PKTSCHED, fw_level, fw_mode,
8806 	    fw_rateunit, fw_ratemode, p->channel, p->cl, p->minrate, p->maxrate,
8807 	    p->weight, p->pktsize, sleep_ok);
8808 	if (rc == 0)
8809 		tc->flags |= TX_SC_OK;
8810 	else {
8811 		/*
8812 		 * Unknown state at this point, see tc->params for what was
8813 		 * attempted.
8814 		 */
8815 		tc->flags &= ~TX_SC_OK;
8816 	}
8817 	end_synchronized_op(sc, sleep_ok ? 0 : LOCK_HELD);
8818 
8819 	return (rc);
8820 }
8821 
8822 int
8823 t4_set_sched_class(struct adapter *sc, struct t4_sched_params *p)
8824 {
8825 
8826 	if (p->type != SCHED_CLASS_TYPE_PACKET)
8827 		return (EINVAL);
8828 
8829 	if (p->subcmd == SCHED_CLASS_SUBCMD_CONFIG)
8830 		return (set_sched_class_config(sc, p->u.config.minmax));
8831 
8832 	if (p->subcmd == SCHED_CLASS_SUBCMD_PARAMS)
8833 		return (set_sched_class_params(sc, &p->u.params, 1));
8834 
8835 	return (EINVAL);
8836 }
8837 
8838 int
8839 t4_set_sched_queue(struct adapter *sc, struct t4_sched_queue *p)
8840 {
8841 	struct port_info *pi = NULL;
8842 	struct vi_info *vi;
8843 	struct sge_txq *txq;
8844 	uint32_t fw_mnem, fw_queue, fw_class;
8845 	int i, rc;
8846 
8847 	rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4setsq");
8848 	if (rc)
8849 		return (rc);
8850 
8851 	if (p->port >= sc->params.nports) {
8852 		rc = EINVAL;
8853 		goto done;
8854 	}
8855 
8856 	/* XXX: Only supported for the main VI. */
8857 	pi = sc->port[p->port];
8858 	vi = &pi->vi[0];
8859 	if (!(vi->flags & VI_INIT_DONE)) {
8860 		/* tx queues not set up yet */
8861 		rc = EAGAIN;
8862 		goto done;
8863 	}
8864 
8865 	if (!in_range(p->queue, 0, vi->ntxq - 1) ||
8866 	    !in_range(p->cl, 0, sc->chip_params->nsched_cls - 1)) {
8867 		rc = EINVAL;
8868 		goto done;
8869 	}
8870 
8871 	/*
8872 	 * Create a template for the FW_PARAMS_CMD mnemonic and value (TX
8873 	 * Scheduling Class in this case).
8874 	 */
8875 	fw_mnem = (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DMAQ) |
8876 	    V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DMAQ_EQ_SCHEDCLASS_ETH));
8877 	fw_class = p->cl < 0 ? 0xffffffff : p->cl;
8878 
8879 	/*
8880 	 * If op.queue is non-negative, then we're only changing the scheduling
8881 	 * on a single specified TX queue.
8882 	 */
8883 	if (p->queue >= 0) {
8884 		txq = &sc->sge.txq[vi->first_txq + p->queue];
8885 		fw_queue = (fw_mnem | V_FW_PARAMS_PARAM_YZ(txq->eq.cntxt_id));
8886 		rc = -t4_set_params(sc, sc->mbox, sc->pf, 0, 1, &fw_queue,
8887 		    &fw_class);
8888 		goto done;
8889 	}
8890 
8891 	/*
8892 	 * Change the scheduling on all the TX queues for the
8893 	 * interface.
8894 	 */
8895 	for_each_txq(vi, i, txq) {
8896 		fw_queue = (fw_mnem | V_FW_PARAMS_PARAM_YZ(txq->eq.cntxt_id));
8897 		rc = -t4_set_params(sc, sc->mbox, sc->pf, 0, 1, &fw_queue,
8898 		    &fw_class);
8899 		if (rc)
8900 			goto done;
8901 	}
8902 
8903 	rc = 0;
8904 done:
8905 	end_synchronized_op(sc, 0);
8906 	return (rc);
8907 }
8908 
8909 int
8910 t4_os_find_pci_capability(struct adapter *sc, int cap)
8911 {
8912 	int i;
8913 
8914 	return (pci_find_cap(sc->dev, cap, &i) == 0 ? i : 0);
8915 }
8916 
8917 int
8918 t4_os_pci_save_state(struct adapter *sc)
8919 {
8920 	device_t dev;
8921 	struct pci_devinfo *dinfo;
8922 
8923 	dev = sc->dev;
8924 	dinfo = device_get_ivars(dev);
8925 
8926 	pci_cfg_save(dev, dinfo, 0);
8927 	return (0);
8928 }
8929 
8930 int
8931 t4_os_pci_restore_state(struct adapter *sc)
8932 {
8933 	device_t dev;
8934 	struct pci_devinfo *dinfo;
8935 
8936 	dev = sc->dev;
8937 	dinfo = device_get_ivars(dev);
8938 
8939 	pci_cfg_restore(dev, dinfo);
8940 	return (0);
8941 }
8942 
8943 void
8944 t4_os_portmod_changed(const struct adapter *sc, int idx)
8945 {
8946 	struct port_info *pi = sc->port[idx];
8947 	struct vi_info *vi;
8948 	struct ifnet *ifp;
8949 	int v;
8950 	static const char *mod_str[] = {
8951 		NULL, "LR", "SR", "ER", "TWINAX", "active TWINAX", "LRM"
8952 	};
8953 
8954 	for_each_vi(pi, v, vi) {
8955 		build_medialist(pi, &vi->media);
8956 	}
8957 
8958 	ifp = pi->vi[0].ifp;
8959 	if (pi->mod_type == FW_PORT_MOD_TYPE_NONE)
8960 		if_printf(ifp, "transceiver unplugged.\n");
8961 	else if (pi->mod_type == FW_PORT_MOD_TYPE_UNKNOWN)
8962 		if_printf(ifp, "unknown transceiver inserted.\n");
8963 	else if (pi->mod_type == FW_PORT_MOD_TYPE_NOTSUPPORTED)
8964 		if_printf(ifp, "unsupported transceiver inserted.\n");
8965 	else if (pi->mod_type > 0 && pi->mod_type < nitems(mod_str)) {
8966 		if_printf(ifp, "%s transceiver inserted.\n",
8967 		    mod_str[pi->mod_type]);
8968 	} else {
8969 		if_printf(ifp, "transceiver (type %d) inserted.\n",
8970 		    pi->mod_type);
8971 	}
8972 }
8973 
8974 void
8975 t4_os_link_changed(struct adapter *sc, int idx, int link_stat, int reason)
8976 {
8977 	struct port_info *pi = sc->port[idx];
8978 	struct vi_info *vi;
8979 	struct ifnet *ifp;
8980 	int v;
8981 
8982 	if (link_stat)
8983 		pi->linkdnrc = -1;
8984 	else {
8985 		if (reason >= 0)
8986 			pi->linkdnrc = reason;
8987 	}
8988 	for_each_vi(pi, v, vi) {
8989 		ifp = vi->ifp;
8990 		if (ifp == NULL)
8991 			continue;
8992 
8993 		if (link_stat) {
8994 			ifp->if_baudrate = IF_Mbps(pi->link_cfg.speed);
8995 			if_link_state_change(ifp, LINK_STATE_UP);
8996 		} else {
8997 			if_link_state_change(ifp, LINK_STATE_DOWN);
8998 		}
8999 	}
9000 }
9001 
9002 void
9003 t4_iterate(void (*func)(struct adapter *, void *), void *arg)
9004 {
9005 	struct adapter *sc;
9006 
9007 	sx_slock(&t4_list_lock);
9008 	SLIST_FOREACH(sc, &t4_list, link) {
9009 		/*
9010 		 * func should not make any assumptions about what state sc is
9011 		 * in - the only guarantee is that sc->sc_lock is a valid lock.
9012 		 */
9013 		func(sc, arg);
9014 	}
9015 	sx_sunlock(&t4_list_lock);
9016 }
9017 
9018 static int
9019 t4_ioctl(struct cdev *dev, unsigned long cmd, caddr_t data, int fflag,
9020     struct thread *td)
9021 {
9022 	int rc;
9023 	struct adapter *sc = dev->si_drv1;
9024 
9025 	rc = priv_check(td, PRIV_DRIVER);
9026 	if (rc != 0)
9027 		return (rc);
9028 
9029 	switch (cmd) {
9030 	case CHELSIO_T4_GETREG: {
9031 		struct t4_reg *edata = (struct t4_reg *)data;
9032 
9033 		if ((edata->addr & 0x3) != 0 || edata->addr >= sc->mmio_len)
9034 			return (EFAULT);
9035 
9036 		if (edata->size == 4)
9037 			edata->val = t4_read_reg(sc, edata->addr);
9038 		else if (edata->size == 8)
9039 			edata->val = t4_read_reg64(sc, edata->addr);
9040 		else
9041 			return (EINVAL);
9042 
9043 		break;
9044 	}
9045 	case CHELSIO_T4_SETREG: {
9046 		struct t4_reg *edata = (struct t4_reg *)data;
9047 
9048 		if ((edata->addr & 0x3) != 0 || edata->addr >= sc->mmio_len)
9049 			return (EFAULT);
9050 
9051 		if (edata->size == 4) {
9052 			if (edata->val & 0xffffffff00000000)
9053 				return (EINVAL);
9054 			t4_write_reg(sc, edata->addr, (uint32_t) edata->val);
9055 		} else if (edata->size == 8)
9056 			t4_write_reg64(sc, edata->addr, edata->val);
9057 		else
9058 			return (EINVAL);
9059 		break;
9060 	}
9061 	case CHELSIO_T4_REGDUMP: {
9062 		struct t4_regdump *regs = (struct t4_regdump *)data;
9063 		int reglen = t4_get_regs_len(sc);
9064 		uint8_t *buf;
9065 
9066 		if (regs->len < reglen) {
9067 			regs->len = reglen; /* hint to the caller */
9068 			return (ENOBUFS);
9069 		}
9070 
9071 		regs->len = reglen;
9072 		buf = malloc(reglen, M_CXGBE, M_WAITOK | M_ZERO);
9073 		get_regs(sc, regs, buf);
9074 		rc = copyout(buf, regs->data, reglen);
9075 		free(buf, M_CXGBE);
9076 		break;
9077 	}
9078 	case CHELSIO_T4_GET_FILTER_MODE:
9079 		rc = get_filter_mode(sc, (uint32_t *)data);
9080 		break;
9081 	case CHELSIO_T4_SET_FILTER_MODE:
9082 		rc = set_filter_mode(sc, *(uint32_t *)data);
9083 		break;
9084 	case CHELSIO_T4_GET_FILTER:
9085 		rc = get_filter(sc, (struct t4_filter *)data);
9086 		break;
9087 	case CHELSIO_T4_SET_FILTER:
9088 		rc = set_filter(sc, (struct t4_filter *)data);
9089 		break;
9090 	case CHELSIO_T4_DEL_FILTER:
9091 		rc = del_filter(sc, (struct t4_filter *)data);
9092 		break;
9093 	case CHELSIO_T4_GET_SGE_CONTEXT:
9094 		rc = get_sge_context(sc, (struct t4_sge_context *)data);
9095 		break;
9096 	case CHELSIO_T4_LOAD_FW:
9097 		rc = load_fw(sc, (struct t4_data *)data);
9098 		break;
9099 	case CHELSIO_T4_GET_MEM:
9100 		rc = read_card_mem(sc, 2, (struct t4_mem_range *)data);
9101 		break;
9102 	case CHELSIO_T4_GET_I2C:
9103 		rc = read_i2c(sc, (struct t4_i2c_data *)data);
9104 		break;
9105 	case CHELSIO_T4_CLEAR_STATS: {
9106 		int i, v;
9107 		u_int port_id = *(uint32_t *)data;
9108 		struct port_info *pi;
9109 		struct vi_info *vi;
9110 
9111 		if (port_id >= sc->params.nports)
9112 			return (EINVAL);
9113 		pi = sc->port[port_id];
9114 		if (pi == NULL)
9115 			return (EIO);
9116 
9117 		/* MAC stats */
9118 		t4_clr_port_stats(sc, pi->tx_chan);
9119 		pi->tx_parse_error = 0;
9120 		mtx_lock(&sc->reg_lock);
9121 		for_each_vi(pi, v, vi) {
9122 			if (vi->flags & VI_INIT_DONE)
9123 				t4_clr_vi_stats(sc, vi->viid);
9124 		}
9125 		mtx_unlock(&sc->reg_lock);
9126 
9127 		/*
9128 		 * Since this command accepts a port, clear stats for
9129 		 * all VIs on this port.
9130 		 */
9131 		for_each_vi(pi, v, vi) {
9132 			if (vi->flags & VI_INIT_DONE) {
9133 				struct sge_rxq *rxq;
9134 				struct sge_txq *txq;
9135 				struct sge_wrq *wrq;
9136 
9137 				for_each_rxq(vi, i, rxq) {
9138 #if defined(INET) || defined(INET6)
9139 					rxq->lro.lro_queued = 0;
9140 					rxq->lro.lro_flushed = 0;
9141 #endif
9142 					rxq->rxcsum = 0;
9143 					rxq->vlan_extraction = 0;
9144 				}
9145 
9146 				for_each_txq(vi, i, txq) {
9147 					txq->txcsum = 0;
9148 					txq->tso_wrs = 0;
9149 					txq->vlan_insertion = 0;
9150 					txq->imm_wrs = 0;
9151 					txq->sgl_wrs = 0;
9152 					txq->txpkt_wrs = 0;
9153 					txq->txpkts0_wrs = 0;
9154 					txq->txpkts1_wrs = 0;
9155 					txq->txpkts0_pkts = 0;
9156 					txq->txpkts1_pkts = 0;
9157 					mp_ring_reset_stats(txq->r);
9158 				}
9159 
9160 #ifdef TCP_OFFLOAD
9161 				/* nothing to clear for each ofld_rxq */
9162 
9163 				for_each_ofld_txq(vi, i, wrq) {
9164 					wrq->tx_wrs_direct = 0;
9165 					wrq->tx_wrs_copied = 0;
9166 				}
9167 #endif
9168 
9169 				if (IS_MAIN_VI(vi)) {
9170 					wrq = &sc->sge.ctrlq[pi->port_id];
9171 					wrq->tx_wrs_direct = 0;
9172 					wrq->tx_wrs_copied = 0;
9173 				}
9174 			}
9175 		}
9176 		break;
9177 	}
9178 	case CHELSIO_T4_SCHED_CLASS:
9179 		rc = t4_set_sched_class(sc, (struct t4_sched_params *)data);
9180 		break;
9181 	case CHELSIO_T4_SCHED_QUEUE:
9182 		rc = t4_set_sched_queue(sc, (struct t4_sched_queue *)data);
9183 		break;
9184 	case CHELSIO_T4_GET_TRACER:
9185 		rc = t4_get_tracer(sc, (struct t4_tracer *)data);
9186 		break;
9187 	case CHELSIO_T4_SET_TRACER:
9188 		rc = t4_set_tracer(sc, (struct t4_tracer *)data);
9189 		break;
9190 	case CHELSIO_T4_LOAD_CFG:
9191 		rc = load_cfg(sc, (struct t4_data *)data);
9192 		break;
9193 	default:
9194 		rc = ENOTTY;
9195 	}
9196 
9197 	return (rc);
9198 }
9199 
9200 void
9201 t4_db_full(struct adapter *sc)
9202 {
9203 
9204 	CXGBE_UNIMPLEMENTED(__func__);
9205 }
9206 
9207 void
9208 t4_db_dropped(struct adapter *sc)
9209 {
9210 
9211 	CXGBE_UNIMPLEMENTED(__func__);
9212 }
9213 
9214 #ifdef TCP_OFFLOAD
9215 static int
9216 toe_capability(struct vi_info *vi, int enable)
9217 {
9218 	int rc;
9219 	struct port_info *pi = vi->pi;
9220 	struct adapter *sc = pi->adapter;
9221 
9222 	ASSERT_SYNCHRONIZED_OP(sc);
9223 
9224 	if (!is_offload(sc))
9225 		return (ENODEV);
9226 
9227 	if (enable) {
9228 		if ((vi->ifp->if_capenable & IFCAP_TOE) != 0) {
9229 			/* TOE is already enabled. */
9230 			return (0);
9231 		}
9232 
9233 		/*
9234 		 * We need the port's queues around so that we're able to send
9235 		 * and receive CPLs to/from the TOE even if the ifnet for this
9236 		 * port has never been UP'd administratively.
9237 		 */
9238 		if (!(vi->flags & VI_INIT_DONE)) {
9239 			rc = vi_full_init(vi);
9240 			if (rc)
9241 				return (rc);
9242 		}
9243 		if (!(pi->vi[0].flags & VI_INIT_DONE)) {
9244 			rc = vi_full_init(&pi->vi[0]);
9245 			if (rc)
9246 				return (rc);
9247 		}
9248 
9249 		if (isset(&sc->offload_map, pi->port_id)) {
9250 			/* TOE is enabled on another VI of this port. */
9251 			pi->uld_vis++;
9252 			return (0);
9253 		}
9254 
9255 		if (!uld_active(sc, ULD_TOM)) {
9256 			rc = t4_activate_uld(sc, ULD_TOM);
9257 			if (rc == EAGAIN) {
9258 				log(LOG_WARNING,
9259 				    "You must kldload t4_tom.ko before trying "
9260 				    "to enable TOE on a cxgbe interface.\n");
9261 			}
9262 			if (rc != 0)
9263 				return (rc);
9264 			KASSERT(sc->tom_softc != NULL,
9265 			    ("%s: TOM activated but softc NULL", __func__));
9266 			KASSERT(uld_active(sc, ULD_TOM),
9267 			    ("%s: TOM activated but flag not set", __func__));
9268 		}
9269 
9270 		/* Activate iWARP and iSCSI too, if the modules are loaded. */
9271 		if (!uld_active(sc, ULD_IWARP))
9272 			(void) t4_activate_uld(sc, ULD_IWARP);
9273 		if (!uld_active(sc, ULD_ISCSI))
9274 			(void) t4_activate_uld(sc, ULD_ISCSI);
9275 
9276 		pi->uld_vis++;
9277 		setbit(&sc->offload_map, pi->port_id);
9278 	} else {
9279 		pi->uld_vis--;
9280 
9281 		if (!isset(&sc->offload_map, pi->port_id) || pi->uld_vis > 0)
9282 			return (0);
9283 
9284 		KASSERT(uld_active(sc, ULD_TOM),
9285 		    ("%s: TOM never initialized?", __func__));
9286 		clrbit(&sc->offload_map, pi->port_id);
9287 	}
9288 
9289 	return (0);
9290 }
9291 
9292 /*
9293  * Add an upper layer driver to the global list.
9294  */
9295 int
9296 t4_register_uld(struct uld_info *ui)
9297 {
9298 	int rc = 0;
9299 	struct uld_info *u;
9300 
9301 	sx_xlock(&t4_uld_list_lock);
9302 	SLIST_FOREACH(u, &t4_uld_list, link) {
9303 	    if (u->uld_id == ui->uld_id) {
9304 		    rc = EEXIST;
9305 		    goto done;
9306 	    }
9307 	}
9308 
9309 	SLIST_INSERT_HEAD(&t4_uld_list, ui, link);
9310 	ui->refcount = 0;
9311 done:
9312 	sx_xunlock(&t4_uld_list_lock);
9313 	return (rc);
9314 }
9315 
9316 int
9317 t4_unregister_uld(struct uld_info *ui)
9318 {
9319 	int rc = EINVAL;
9320 	struct uld_info *u;
9321 
9322 	sx_xlock(&t4_uld_list_lock);
9323 
9324 	SLIST_FOREACH(u, &t4_uld_list, link) {
9325 	    if (u == ui) {
9326 		    if (ui->refcount > 0) {
9327 			    rc = EBUSY;
9328 			    goto done;
9329 		    }
9330 
9331 		    SLIST_REMOVE(&t4_uld_list, ui, uld_info, link);
9332 		    rc = 0;
9333 		    goto done;
9334 	    }
9335 	}
9336 done:
9337 	sx_xunlock(&t4_uld_list_lock);
9338 	return (rc);
9339 }
9340 
9341 int
9342 t4_activate_uld(struct adapter *sc, int id)
9343 {
9344 	int rc;
9345 	struct uld_info *ui;
9346 
9347 	ASSERT_SYNCHRONIZED_OP(sc);
9348 
9349 	if (id < 0 || id > ULD_MAX)
9350 		return (EINVAL);
9351 	rc = EAGAIN;	/* kldoad the module with this ULD and try again. */
9352 
9353 	sx_slock(&t4_uld_list_lock);
9354 
9355 	SLIST_FOREACH(ui, &t4_uld_list, link) {
9356 		if (ui->uld_id == id) {
9357 			if (!(sc->flags & FULL_INIT_DONE)) {
9358 				rc = adapter_full_init(sc);
9359 				if (rc != 0)
9360 					break;
9361 			}
9362 
9363 			rc = ui->activate(sc);
9364 			if (rc == 0) {
9365 				setbit(&sc->active_ulds, id);
9366 				ui->refcount++;
9367 			}
9368 			break;
9369 		}
9370 	}
9371 
9372 	sx_sunlock(&t4_uld_list_lock);
9373 
9374 	return (rc);
9375 }
9376 
9377 int
9378 t4_deactivate_uld(struct adapter *sc, int id)
9379 {
9380 	int rc;
9381 	struct uld_info *ui;
9382 
9383 	ASSERT_SYNCHRONIZED_OP(sc);
9384 
9385 	if (id < 0 || id > ULD_MAX)
9386 		return (EINVAL);
9387 	rc = ENXIO;
9388 
9389 	sx_slock(&t4_uld_list_lock);
9390 
9391 	SLIST_FOREACH(ui, &t4_uld_list, link) {
9392 		if (ui->uld_id == id) {
9393 			rc = ui->deactivate(sc);
9394 			if (rc == 0) {
9395 				clrbit(&sc->active_ulds, id);
9396 				ui->refcount--;
9397 			}
9398 			break;
9399 		}
9400 	}
9401 
9402 	sx_sunlock(&t4_uld_list_lock);
9403 
9404 	return (rc);
9405 }
9406 
9407 int
9408 uld_active(struct adapter *sc, int uld_id)
9409 {
9410 
9411 	MPASS(uld_id >= 0 && uld_id <= ULD_MAX);
9412 
9413 	return (isset(&sc->active_ulds, uld_id));
9414 }
9415 #endif
9416 
9417 /*
9418  * Come up with reasonable defaults for some of the tunables, provided they're
9419  * not set by the user (in which case we'll use the values as is).
9420  */
9421 static void
9422 tweak_tunables(void)
9423 {
9424 	int nc = mp_ncpus;	/* our snapshot of the number of CPUs */
9425 
9426 	if (t4_ntxq10g < 1) {
9427 #ifdef RSS
9428 		t4_ntxq10g = rss_getnumbuckets();
9429 #else
9430 		t4_ntxq10g = min(nc, NTXQ_10G);
9431 #endif
9432 	}
9433 
9434 	if (t4_ntxq1g < 1) {
9435 #ifdef RSS
9436 		/* XXX: way too many for 1GbE? */
9437 		t4_ntxq1g = rss_getnumbuckets();
9438 #else
9439 		t4_ntxq1g = min(nc, NTXQ_1G);
9440 #endif
9441 	}
9442 
9443 	if (t4_ntxq_vi < 1)
9444 		t4_ntxq_vi = min(nc, NTXQ_VI);
9445 
9446 	if (t4_nrxq10g < 1) {
9447 #ifdef RSS
9448 		t4_nrxq10g = rss_getnumbuckets();
9449 #else
9450 		t4_nrxq10g = min(nc, NRXQ_10G);
9451 #endif
9452 	}
9453 
9454 	if (t4_nrxq1g < 1) {
9455 #ifdef RSS
9456 		/* XXX: way too many for 1GbE? */
9457 		t4_nrxq1g = rss_getnumbuckets();
9458 #else
9459 		t4_nrxq1g = min(nc, NRXQ_1G);
9460 #endif
9461 	}
9462 
9463 	if (t4_nrxq_vi < 1)
9464 		t4_nrxq_vi = min(nc, NRXQ_VI);
9465 
9466 #ifdef TCP_OFFLOAD
9467 	if (t4_nofldtxq10g < 1)
9468 		t4_nofldtxq10g = min(nc, NOFLDTXQ_10G);
9469 
9470 	if (t4_nofldtxq1g < 1)
9471 		t4_nofldtxq1g = min(nc, NOFLDTXQ_1G);
9472 
9473 	if (t4_nofldtxq_vi < 1)
9474 		t4_nofldtxq_vi = min(nc, NOFLDTXQ_VI);
9475 
9476 	if (t4_nofldrxq10g < 1)
9477 		t4_nofldrxq10g = min(nc, NOFLDRXQ_10G);
9478 
9479 	if (t4_nofldrxq1g < 1)
9480 		t4_nofldrxq1g = min(nc, NOFLDRXQ_1G);
9481 
9482 	if (t4_nofldrxq_vi < 1)
9483 		t4_nofldrxq_vi = min(nc, NOFLDRXQ_VI);
9484 
9485 	if (t4_toecaps_allowed == -1)
9486 		t4_toecaps_allowed = FW_CAPS_CONFIG_TOE;
9487 
9488 	if (t4_rdmacaps_allowed == -1) {
9489 		t4_rdmacaps_allowed = FW_CAPS_CONFIG_RDMA_RDDP |
9490 		    FW_CAPS_CONFIG_RDMA_RDMAC;
9491 	}
9492 
9493 	if (t4_iscsicaps_allowed == -1) {
9494 		t4_iscsicaps_allowed = FW_CAPS_CONFIG_ISCSI_INITIATOR_PDU |
9495 		    FW_CAPS_CONFIG_ISCSI_TARGET_PDU |
9496 		    FW_CAPS_CONFIG_ISCSI_T10DIF;
9497 	}
9498 #else
9499 	if (t4_toecaps_allowed == -1)
9500 		t4_toecaps_allowed = 0;
9501 
9502 	if (t4_rdmacaps_allowed == -1)
9503 		t4_rdmacaps_allowed = 0;
9504 
9505 	if (t4_iscsicaps_allowed == -1)
9506 		t4_iscsicaps_allowed = 0;
9507 #endif
9508 
9509 #ifdef DEV_NETMAP
9510 	if (t4_nnmtxq_vi < 1)
9511 		t4_nnmtxq_vi = min(nc, NNMTXQ_VI);
9512 
9513 	if (t4_nnmrxq_vi < 1)
9514 		t4_nnmrxq_vi = min(nc, NNMRXQ_VI);
9515 #endif
9516 
9517 	if (t4_tmr_idx_10g < 0 || t4_tmr_idx_10g >= SGE_NTIMERS)
9518 		t4_tmr_idx_10g = TMR_IDX_10G;
9519 
9520 	if (t4_pktc_idx_10g < -1 || t4_pktc_idx_10g >= SGE_NCOUNTERS)
9521 		t4_pktc_idx_10g = PKTC_IDX_10G;
9522 
9523 	if (t4_tmr_idx_1g < 0 || t4_tmr_idx_1g >= SGE_NTIMERS)
9524 		t4_tmr_idx_1g = TMR_IDX_1G;
9525 
9526 	if (t4_pktc_idx_1g < -1 || t4_pktc_idx_1g >= SGE_NCOUNTERS)
9527 		t4_pktc_idx_1g = PKTC_IDX_1G;
9528 
9529 	if (t4_qsize_txq < 128)
9530 		t4_qsize_txq = 128;
9531 
9532 	if (t4_qsize_rxq < 128)
9533 		t4_qsize_rxq = 128;
9534 	while (t4_qsize_rxq & 7)
9535 		t4_qsize_rxq++;
9536 
9537 	t4_intr_types &= INTR_MSIX | INTR_MSI | INTR_INTX;
9538 }
9539 
9540 #ifdef DDB
9541 static void
9542 t4_dump_tcb(struct adapter *sc, int tid)
9543 {
9544 	uint32_t base, i, j, off, pf, reg, save, tcb_addr, win_pos;
9545 
9546 	reg = PCIE_MEM_ACCESS_REG(A_PCIE_MEM_ACCESS_OFFSET, 2);
9547 	save = t4_read_reg(sc, reg);
9548 	base = sc->memwin[2].mw_base;
9549 
9550 	/* Dump TCB for the tid */
9551 	tcb_addr = t4_read_reg(sc, A_TP_CMM_TCB_BASE);
9552 	tcb_addr += tid * TCB_SIZE;
9553 
9554 	if (is_t4(sc)) {
9555 		pf = 0;
9556 		win_pos = tcb_addr & ~0xf;	/* start must be 16B aligned */
9557 	} else {
9558 		pf = V_PFNUM(sc->pf);
9559 		win_pos = tcb_addr & ~0x7f;	/* start must be 128B aligned */
9560 	}
9561 	t4_write_reg(sc, reg, win_pos | pf);
9562 	t4_read_reg(sc, reg);
9563 
9564 	off = tcb_addr - win_pos;
9565 	for (i = 0; i < 4; i++) {
9566 		uint32_t buf[8];
9567 		for (j = 0; j < 8; j++, off += 4)
9568 			buf[j] = htonl(t4_read_reg(sc, base + off));
9569 
9570 		db_printf("%08x %08x %08x %08x %08x %08x %08x %08x\n",
9571 		    buf[0], buf[1], buf[2], buf[3], buf[4], buf[5], buf[6],
9572 		    buf[7]);
9573 	}
9574 
9575 	t4_write_reg(sc, reg, save);
9576 	t4_read_reg(sc, reg);
9577 }
9578 
9579 static void
9580 t4_dump_devlog(struct adapter *sc)
9581 {
9582 	struct devlog_params *dparams = &sc->params.devlog;
9583 	struct fw_devlog_e e;
9584 	int i, first, j, m, nentries, rc;
9585 	uint64_t ftstamp = UINT64_MAX;
9586 
9587 	if (dparams->start == 0) {
9588 		db_printf("devlog params not valid\n");
9589 		return;
9590 	}
9591 
9592 	nentries = dparams->size / sizeof(struct fw_devlog_e);
9593 	m = fwmtype_to_hwmtype(dparams->memtype);
9594 
9595 	/* Find the first entry. */
9596 	first = -1;
9597 	for (i = 0; i < nentries && !db_pager_quit; i++) {
9598 		rc = -t4_mem_read(sc, m, dparams->start + i * sizeof(e),
9599 		    sizeof(e), (void *)&e);
9600 		if (rc != 0)
9601 			break;
9602 
9603 		if (e.timestamp == 0)
9604 			break;
9605 
9606 		e.timestamp = be64toh(e.timestamp);
9607 		if (e.timestamp < ftstamp) {
9608 			ftstamp = e.timestamp;
9609 			first = i;
9610 		}
9611 	}
9612 
9613 	if (first == -1)
9614 		return;
9615 
9616 	i = first;
9617 	do {
9618 		rc = -t4_mem_read(sc, m, dparams->start + i * sizeof(e),
9619 		    sizeof(e), (void *)&e);
9620 		if (rc != 0)
9621 			return;
9622 
9623 		if (e.timestamp == 0)
9624 			return;
9625 
9626 		e.timestamp = be64toh(e.timestamp);
9627 		e.seqno = be32toh(e.seqno);
9628 		for (j = 0; j < 8; j++)
9629 			e.params[j] = be32toh(e.params[j]);
9630 
9631 		db_printf("%10d  %15ju  %8s  %8s  ",
9632 		    e.seqno, e.timestamp,
9633 		    (e.level < nitems(devlog_level_strings) ?
9634 			devlog_level_strings[e.level] : "UNKNOWN"),
9635 		    (e.facility < nitems(devlog_facility_strings) ?
9636 			devlog_facility_strings[e.facility] : "UNKNOWN"));
9637 		db_printf(e.fmt, e.params[0], e.params[1], e.params[2],
9638 		    e.params[3], e.params[4], e.params[5], e.params[6],
9639 		    e.params[7]);
9640 
9641 		if (++i == nentries)
9642 			i = 0;
9643 	} while (i != first && !db_pager_quit);
9644 }
9645 
9646 static struct command_table db_t4_table = LIST_HEAD_INITIALIZER(db_t4_table);
9647 _DB_SET(_show, t4, NULL, db_show_table, 0, &db_t4_table);
9648 
9649 DB_FUNC(devlog, db_show_devlog, db_t4_table, CS_OWN, NULL)
9650 {
9651 	device_t dev;
9652 	int t;
9653 	bool valid;
9654 
9655 	valid = false;
9656 	t = db_read_token();
9657 	if (t == tIDENT) {
9658 		dev = device_lookup_by_name(db_tok_string);
9659 		valid = true;
9660 	}
9661 	db_skip_to_eol();
9662 	if (!valid) {
9663 		db_printf("usage: show t4 devlog <nexus>\n");
9664 		return;
9665 	}
9666 
9667 	if (dev == NULL) {
9668 		db_printf("device not found\n");
9669 		return;
9670 	}
9671 
9672 	t4_dump_devlog(device_get_softc(dev));
9673 }
9674 
9675 DB_FUNC(tcb, db_show_t4tcb, db_t4_table, CS_OWN, NULL)
9676 {
9677 	device_t dev;
9678 	int radix, tid, t;
9679 	bool valid;
9680 
9681 	valid = false;
9682 	radix = db_radix;
9683 	db_radix = 10;
9684 	t = db_read_token();
9685 	if (t == tIDENT) {
9686 		dev = device_lookup_by_name(db_tok_string);
9687 		t = db_read_token();
9688 		if (t == tNUMBER) {
9689 			tid = db_tok_number;
9690 			valid = true;
9691 		}
9692 	}
9693 	db_radix = radix;
9694 	db_skip_to_eol();
9695 	if (!valid) {
9696 		db_printf("usage: show t4 tcb <nexus> <tid>\n");
9697 		return;
9698 	}
9699 
9700 	if (dev == NULL) {
9701 		db_printf("device not found\n");
9702 		return;
9703 	}
9704 	if (tid < 0) {
9705 		db_printf("invalid tid\n");
9706 		return;
9707 	}
9708 
9709 	t4_dump_tcb(device_get_softc(dev), tid);
9710 }
9711 #endif
9712 
9713 static struct sx mlu;	/* mod load unload */
9714 SX_SYSINIT(cxgbe_mlu, &mlu, "cxgbe mod load/unload");
9715 
9716 static int
9717 mod_event(module_t mod, int cmd, void *arg)
9718 {
9719 	int rc = 0;
9720 	static int loaded = 0;
9721 
9722 	switch (cmd) {
9723 	case MOD_LOAD:
9724 		sx_xlock(&mlu);
9725 		if (loaded++ == 0) {
9726 			t4_sge_modload();
9727 			t4_register_cpl_handler(CPL_SET_TCB_RPL, set_tcb_rpl);
9728 			t4_register_cpl_handler(CPL_L2T_WRITE_RPL, l2t_write_rpl);
9729 			t4_register_cpl_handler(CPL_TRACE_PKT, t4_trace_pkt);
9730 			t4_register_cpl_handler(CPL_T5_TRACE_PKT, t5_trace_pkt);
9731 			sx_init(&t4_list_lock, "T4/T5 adapters");
9732 			SLIST_INIT(&t4_list);
9733 #ifdef TCP_OFFLOAD
9734 			sx_init(&t4_uld_list_lock, "T4/T5 ULDs");
9735 			SLIST_INIT(&t4_uld_list);
9736 #endif
9737 			t4_tracer_modload();
9738 			tweak_tunables();
9739 		}
9740 		sx_xunlock(&mlu);
9741 		break;
9742 
9743 	case MOD_UNLOAD:
9744 		sx_xlock(&mlu);
9745 		if (--loaded == 0) {
9746 			int tries;
9747 
9748 			sx_slock(&t4_list_lock);
9749 			if (!SLIST_EMPTY(&t4_list)) {
9750 				rc = EBUSY;
9751 				sx_sunlock(&t4_list_lock);
9752 				goto done_unload;
9753 			}
9754 #ifdef TCP_OFFLOAD
9755 			sx_slock(&t4_uld_list_lock);
9756 			if (!SLIST_EMPTY(&t4_uld_list)) {
9757 				rc = EBUSY;
9758 				sx_sunlock(&t4_uld_list_lock);
9759 				sx_sunlock(&t4_list_lock);
9760 				goto done_unload;
9761 			}
9762 #endif
9763 			tries = 0;
9764 			while (tries++ < 5 && t4_sge_extfree_refs() != 0) {
9765 				uprintf("%ju clusters with custom free routine "
9766 				    "still is use.\n", t4_sge_extfree_refs());
9767 				pause("t4unload", 2 * hz);
9768 			}
9769 #ifdef TCP_OFFLOAD
9770 			sx_sunlock(&t4_uld_list_lock);
9771 #endif
9772 			sx_sunlock(&t4_list_lock);
9773 
9774 			if (t4_sge_extfree_refs() == 0) {
9775 				t4_tracer_modunload();
9776 #ifdef TCP_OFFLOAD
9777 				sx_destroy(&t4_uld_list_lock);
9778 #endif
9779 				sx_destroy(&t4_list_lock);
9780 				t4_sge_modunload();
9781 				loaded = 0;
9782 			} else {
9783 				rc = EBUSY;
9784 				loaded++;	/* undo earlier decrement */
9785 			}
9786 		}
9787 done_unload:
9788 		sx_xunlock(&mlu);
9789 		break;
9790 	}
9791 
9792 	return (rc);
9793 }
9794 
9795 static devclass_t t4_devclass, t5_devclass, t6_devclass;
9796 static devclass_t cxgbe_devclass, cxl_devclass, cc_devclass;
9797 static devclass_t vcxgbe_devclass, vcxl_devclass, vcc_devclass;
9798 
9799 DRIVER_MODULE(t4nex, pci, t4_driver, t4_devclass, mod_event, 0);
9800 MODULE_VERSION(t4nex, 1);
9801 MODULE_DEPEND(t4nex, firmware, 1, 1, 1);
9802 #ifdef DEV_NETMAP
9803 MODULE_DEPEND(t4nex, netmap, 1, 1, 1);
9804 #endif /* DEV_NETMAP */
9805 
9806 DRIVER_MODULE(t5nex, pci, t5_driver, t5_devclass, mod_event, 0);
9807 MODULE_VERSION(t5nex, 1);
9808 MODULE_DEPEND(t5nex, firmware, 1, 1, 1);
9809 #ifdef DEV_NETMAP
9810 MODULE_DEPEND(t5nex, netmap, 1, 1, 1);
9811 #endif /* DEV_NETMAP */
9812 
9813 DRIVER_MODULE(t6nex, pci, t6_driver, t6_devclass, mod_event, 0);
9814 MODULE_VERSION(t6nex, 1);
9815 MODULE_DEPEND(t6nex, firmware, 1, 1, 1);
9816 #ifdef DEV_NETMAP
9817 MODULE_DEPEND(t6nex, netmap, 1, 1, 1);
9818 #endif /* DEV_NETMAP */
9819 
9820 DRIVER_MODULE(cxgbe, t4nex, cxgbe_driver, cxgbe_devclass, 0, 0);
9821 MODULE_VERSION(cxgbe, 1);
9822 
9823 DRIVER_MODULE(cxl, t5nex, cxl_driver, cxl_devclass, 0, 0);
9824 MODULE_VERSION(cxl, 1);
9825 
9826 DRIVER_MODULE(cc, t6nex, cc_driver, cc_devclass, 0, 0);
9827 MODULE_VERSION(cc, 1);
9828 
9829 DRIVER_MODULE(vcxgbe, cxgbe, vcxgbe_driver, vcxgbe_devclass, 0, 0);
9830 MODULE_VERSION(vcxgbe, 1);
9831 
9832 DRIVER_MODULE(vcxl, cxl, vcxl_driver, vcxl_devclass, 0, 0);
9833 MODULE_VERSION(vcxl, 1);
9834 
9835 DRIVER_MODULE(vcc, cc, vcc_driver, vcc_devclass, 0, 0);
9836 MODULE_VERSION(vcc, 1);
9837