xref: /freebsd/sys/dev/cxgbe/t4_main.c (revision 2dc4dbb9673c9a3309c2dad59cb588c6f04beaea)
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
3  *
4  * Copyright (c) 2011 Chelsio Communications, Inc.
5  * All rights reserved.
6  * Written by: Navdeep Parhar <np@FreeBSD.org>
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in the
15  *    documentation and/or other materials provided with the distribution.
16  *
17  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
18  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
21  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27  * SUCH DAMAGE.
28  */
29 
30 #include <sys/cdefs.h>
31 __FBSDID("$FreeBSD$");
32 
33 #include "opt_ddb.h"
34 #include "opt_inet.h"
35 #include "opt_inet6.h"
36 #include "opt_ratelimit.h"
37 #include "opt_rss.h"
38 
39 #include <sys/param.h>
40 #include <sys/conf.h>
41 #include <sys/priv.h>
42 #include <sys/kernel.h>
43 #include <sys/bus.h>
44 #include <sys/module.h>
45 #include <sys/malloc.h>
46 #include <sys/queue.h>
47 #include <sys/taskqueue.h>
48 #include <sys/pciio.h>
49 #include <dev/pci/pcireg.h>
50 #include <dev/pci/pcivar.h>
51 #include <dev/pci/pci_private.h>
52 #include <sys/firmware.h>
53 #include <sys/sbuf.h>
54 #include <sys/smp.h>
55 #include <sys/socket.h>
56 #include <sys/sockio.h>
57 #include <sys/sysctl.h>
58 #include <net/ethernet.h>
59 #include <net/if.h>
60 #include <net/if_types.h>
61 #include <net/if_dl.h>
62 #include <net/if_vlan_var.h>
63 #ifdef RSS
64 #include <net/rss_config.h>
65 #endif
66 #if defined(__i386__) || defined(__amd64__)
67 #include <machine/md_var.h>
68 #include <machine/cputypes.h>
69 #include <vm/vm.h>
70 #include <vm/pmap.h>
71 #endif
72 #include <crypto/rijndael/rijndael.h>
73 #ifdef DDB
74 #include <ddb/ddb.h>
75 #include <ddb/db_lex.h>
76 #endif
77 
78 #include "common/common.h"
79 #include "common/t4_msg.h"
80 #include "common/t4_regs.h"
81 #include "common/t4_regs_values.h"
82 #include "cudbg/cudbg.h"
83 #include "t4_ioctl.h"
84 #include "t4_l2t.h"
85 #include "t4_mp_ring.h"
86 #include "t4_if.h"
87 #include "t4_smt.h"
88 
89 /* T4 bus driver interface */
90 static int t4_probe(device_t);
91 static int t4_attach(device_t);
92 static int t4_detach(device_t);
93 static int t4_ready(device_t);
94 static int t4_read_port_device(device_t, int, device_t *);
95 static device_method_t t4_methods[] = {
96 	DEVMETHOD(device_probe,		t4_probe),
97 	DEVMETHOD(device_attach,	t4_attach),
98 	DEVMETHOD(device_detach,	t4_detach),
99 
100 	DEVMETHOD(t4_is_main_ready,	t4_ready),
101 	DEVMETHOD(t4_read_port_device,	t4_read_port_device),
102 
103 	DEVMETHOD_END
104 };
105 static driver_t t4_driver = {
106 	"t4nex",
107 	t4_methods,
108 	sizeof(struct adapter)
109 };
110 
111 
112 /* T4 port (cxgbe) interface */
113 static int cxgbe_probe(device_t);
114 static int cxgbe_attach(device_t);
115 static int cxgbe_detach(device_t);
116 device_method_t cxgbe_methods[] = {
117 	DEVMETHOD(device_probe,		cxgbe_probe),
118 	DEVMETHOD(device_attach,	cxgbe_attach),
119 	DEVMETHOD(device_detach,	cxgbe_detach),
120 	{ 0, 0 }
121 };
122 static driver_t cxgbe_driver = {
123 	"cxgbe",
124 	cxgbe_methods,
125 	sizeof(struct port_info)
126 };
127 
128 /* T4 VI (vcxgbe) interface */
129 static int vcxgbe_probe(device_t);
130 static int vcxgbe_attach(device_t);
131 static int vcxgbe_detach(device_t);
132 static device_method_t vcxgbe_methods[] = {
133 	DEVMETHOD(device_probe,		vcxgbe_probe),
134 	DEVMETHOD(device_attach,	vcxgbe_attach),
135 	DEVMETHOD(device_detach,	vcxgbe_detach),
136 	{ 0, 0 }
137 };
138 static driver_t vcxgbe_driver = {
139 	"vcxgbe",
140 	vcxgbe_methods,
141 	sizeof(struct vi_info)
142 };
143 
144 static d_ioctl_t t4_ioctl;
145 
146 static struct cdevsw t4_cdevsw = {
147        .d_version = D_VERSION,
148        .d_ioctl = t4_ioctl,
149        .d_name = "t4nex",
150 };
151 
152 /* T5 bus driver interface */
153 static int t5_probe(device_t);
154 static device_method_t t5_methods[] = {
155 	DEVMETHOD(device_probe,		t5_probe),
156 	DEVMETHOD(device_attach,	t4_attach),
157 	DEVMETHOD(device_detach,	t4_detach),
158 
159 	DEVMETHOD(t4_is_main_ready,	t4_ready),
160 	DEVMETHOD(t4_read_port_device,	t4_read_port_device),
161 
162 	DEVMETHOD_END
163 };
164 static driver_t t5_driver = {
165 	"t5nex",
166 	t5_methods,
167 	sizeof(struct adapter)
168 };
169 
170 
171 /* T5 port (cxl) interface */
172 static driver_t cxl_driver = {
173 	"cxl",
174 	cxgbe_methods,
175 	sizeof(struct port_info)
176 };
177 
178 /* T5 VI (vcxl) interface */
179 static driver_t vcxl_driver = {
180 	"vcxl",
181 	vcxgbe_methods,
182 	sizeof(struct vi_info)
183 };
184 
185 /* T6 bus driver interface */
186 static int t6_probe(device_t);
187 static device_method_t t6_methods[] = {
188 	DEVMETHOD(device_probe,		t6_probe),
189 	DEVMETHOD(device_attach,	t4_attach),
190 	DEVMETHOD(device_detach,	t4_detach),
191 
192 	DEVMETHOD(t4_is_main_ready,	t4_ready),
193 	DEVMETHOD(t4_read_port_device,	t4_read_port_device),
194 
195 	DEVMETHOD_END
196 };
197 static driver_t t6_driver = {
198 	"t6nex",
199 	t6_methods,
200 	sizeof(struct adapter)
201 };
202 
203 
204 /* T6 port (cc) interface */
205 static driver_t cc_driver = {
206 	"cc",
207 	cxgbe_methods,
208 	sizeof(struct port_info)
209 };
210 
211 /* T6 VI (vcc) interface */
212 static driver_t vcc_driver = {
213 	"vcc",
214 	vcxgbe_methods,
215 	sizeof(struct vi_info)
216 };
217 
218 /* ifnet + media interface */
219 static void cxgbe_init(void *);
220 static int cxgbe_ioctl(struct ifnet *, unsigned long, caddr_t);
221 static int cxgbe_transmit(struct ifnet *, struct mbuf *);
222 static void cxgbe_qflush(struct ifnet *);
223 static int cxgbe_media_change(struct ifnet *);
224 static void cxgbe_media_status(struct ifnet *, struct ifmediareq *);
225 
226 MALLOC_DEFINE(M_CXGBE, "cxgbe", "Chelsio T4/T5 Ethernet driver and services");
227 
228 /*
229  * Correct lock order when you need to acquire multiple locks is t4_list_lock,
230  * then ADAPTER_LOCK, then t4_uld_list_lock.
231  */
232 static struct sx t4_list_lock;
233 SLIST_HEAD(, adapter) t4_list;
234 #ifdef TCP_OFFLOAD
235 static struct sx t4_uld_list_lock;
236 SLIST_HEAD(, uld_info) t4_uld_list;
237 #endif
238 
239 /*
240  * Tunables.  See tweak_tunables() too.
241  *
242  * Each tunable is set to a default value here if it's known at compile-time.
243  * Otherwise it is set to -n as an indication to tweak_tunables() that it should
244  * provide a reasonable default (upto n) when the driver is loaded.
245  *
246  * Tunables applicable to both T4 and T5 are under hw.cxgbe.  Those specific to
247  * T5 are under hw.cxl.
248  */
249 
250 /*
251  * Number of queues for tx and rx, NIC and offload.
252  */
253 #define NTXQ 16
254 int t4_ntxq = -NTXQ;
255 TUNABLE_INT("hw.cxgbe.ntxq", &t4_ntxq);
256 TUNABLE_INT("hw.cxgbe.ntxq10g", &t4_ntxq);	/* Old name, undocumented */
257 
258 #define NRXQ 8
259 int t4_nrxq = -NRXQ;
260 TUNABLE_INT("hw.cxgbe.nrxq", &t4_nrxq);
261 TUNABLE_INT("hw.cxgbe.nrxq10g", &t4_nrxq);	/* Old name, undocumented */
262 
263 #define NTXQ_VI 1
264 static int t4_ntxq_vi = -NTXQ_VI;
265 TUNABLE_INT("hw.cxgbe.ntxq_vi", &t4_ntxq_vi);
266 
267 #define NRXQ_VI 1
268 static int t4_nrxq_vi = -NRXQ_VI;
269 TUNABLE_INT("hw.cxgbe.nrxq_vi", &t4_nrxq_vi);
270 
271 static int t4_rsrv_noflowq = 0;
272 TUNABLE_INT("hw.cxgbe.rsrv_noflowq", &t4_rsrv_noflowq);
273 
274 #if defined(TCP_OFFLOAD) || defined(RATELIMIT)
275 #define NOFLDTXQ 8
276 static int t4_nofldtxq = -NOFLDTXQ;
277 TUNABLE_INT("hw.cxgbe.nofldtxq", &t4_nofldtxq);
278 
279 #define NOFLDRXQ 2
280 static int t4_nofldrxq = -NOFLDRXQ;
281 TUNABLE_INT("hw.cxgbe.nofldrxq", &t4_nofldrxq);
282 
283 #define NOFLDTXQ_VI 1
284 static int t4_nofldtxq_vi = -NOFLDTXQ_VI;
285 TUNABLE_INT("hw.cxgbe.nofldtxq_vi", &t4_nofldtxq_vi);
286 
287 #define NOFLDRXQ_VI 1
288 static int t4_nofldrxq_vi = -NOFLDRXQ_VI;
289 TUNABLE_INT("hw.cxgbe.nofldrxq_vi", &t4_nofldrxq_vi);
290 
291 #define TMR_IDX_OFLD 1
292 int t4_tmr_idx_ofld = TMR_IDX_OFLD;
293 TUNABLE_INT("hw.cxgbe.holdoff_timer_idx_ofld", &t4_tmr_idx_ofld);
294 
295 #define PKTC_IDX_OFLD (-1)
296 int t4_pktc_idx_ofld = PKTC_IDX_OFLD;
297 TUNABLE_INT("hw.cxgbe.holdoff_pktc_idx_ofld", &t4_pktc_idx_ofld);
298 
299 /* 0 means chip/fw default, non-zero number is value in microseconds */
300 static u_long t4_toe_keepalive_idle = 0;
301 TUNABLE_ULONG("hw.cxgbe.toe.keepalive_idle", &t4_toe_keepalive_idle);
302 
303 /* 0 means chip/fw default, non-zero number is value in microseconds */
304 static u_long t4_toe_keepalive_interval = 0;
305 TUNABLE_ULONG("hw.cxgbe.toe.keepalive_interval", &t4_toe_keepalive_interval);
306 
307 /* 0 means chip/fw default, non-zero number is # of keepalives before abort */
308 static int t4_toe_keepalive_count = 0;
309 TUNABLE_INT("hw.cxgbe.toe.keepalive_count", &t4_toe_keepalive_count);
310 
311 /* 0 means chip/fw default, non-zero number is value in microseconds */
312 static u_long t4_toe_rexmt_min = 0;
313 TUNABLE_ULONG("hw.cxgbe.toe.rexmt_min", &t4_toe_rexmt_min);
314 
315 /* 0 means chip/fw default, non-zero number is value in microseconds */
316 static u_long t4_toe_rexmt_max = 0;
317 TUNABLE_ULONG("hw.cxgbe.toe.rexmt_max", &t4_toe_rexmt_max);
318 
319 /* 0 means chip/fw default, non-zero number is # of rexmt before abort */
320 static int t4_toe_rexmt_count = 0;
321 TUNABLE_INT("hw.cxgbe.toe.rexmt_count", &t4_toe_rexmt_count);
322 
323 /* -1 means chip/fw default, other values are raw backoff values to use */
324 static int t4_toe_rexmt_backoff[16] = {
325 	-1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1
326 };
327 TUNABLE_INT("hw.cxgbe.toe.rexmt_backoff.0", &t4_toe_rexmt_backoff[0]);
328 TUNABLE_INT("hw.cxgbe.toe.rexmt_backoff.1", &t4_toe_rexmt_backoff[1]);
329 TUNABLE_INT("hw.cxgbe.toe.rexmt_backoff.2", &t4_toe_rexmt_backoff[2]);
330 TUNABLE_INT("hw.cxgbe.toe.rexmt_backoff.3", &t4_toe_rexmt_backoff[3]);
331 TUNABLE_INT("hw.cxgbe.toe.rexmt_backoff.4", &t4_toe_rexmt_backoff[4]);
332 TUNABLE_INT("hw.cxgbe.toe.rexmt_backoff.5", &t4_toe_rexmt_backoff[5]);
333 TUNABLE_INT("hw.cxgbe.toe.rexmt_backoff.6", &t4_toe_rexmt_backoff[6]);
334 TUNABLE_INT("hw.cxgbe.toe.rexmt_backoff.7", &t4_toe_rexmt_backoff[7]);
335 TUNABLE_INT("hw.cxgbe.toe.rexmt_backoff.8", &t4_toe_rexmt_backoff[8]);
336 TUNABLE_INT("hw.cxgbe.toe.rexmt_backoff.9", &t4_toe_rexmt_backoff[9]);
337 TUNABLE_INT("hw.cxgbe.toe.rexmt_backoff.10", &t4_toe_rexmt_backoff[10]);
338 TUNABLE_INT("hw.cxgbe.toe.rexmt_backoff.11", &t4_toe_rexmt_backoff[11]);
339 TUNABLE_INT("hw.cxgbe.toe.rexmt_backoff.12", &t4_toe_rexmt_backoff[12]);
340 TUNABLE_INT("hw.cxgbe.toe.rexmt_backoff.13", &t4_toe_rexmt_backoff[13]);
341 TUNABLE_INT("hw.cxgbe.toe.rexmt_backoff.14", &t4_toe_rexmt_backoff[14]);
342 TUNABLE_INT("hw.cxgbe.toe.rexmt_backoff.15", &t4_toe_rexmt_backoff[15]);
343 #endif
344 
345 #ifdef DEV_NETMAP
346 #define NNMTXQ_VI 2
347 static int t4_nnmtxq_vi = -NNMTXQ_VI;
348 TUNABLE_INT("hw.cxgbe.nnmtxq_vi", &t4_nnmtxq_vi);
349 
350 #define NNMRXQ_VI 2
351 static int t4_nnmrxq_vi = -NNMRXQ_VI;
352 TUNABLE_INT("hw.cxgbe.nnmrxq_vi", &t4_nnmrxq_vi);
353 #endif
354 
355 /*
356  * Holdoff parameters for ports.
357  */
358 #define TMR_IDX 1
359 int t4_tmr_idx = TMR_IDX;
360 TUNABLE_INT("hw.cxgbe.holdoff_timer_idx", &t4_tmr_idx);
361 TUNABLE_INT("hw.cxgbe.holdoff_timer_idx_10G", &t4_tmr_idx);	/* Old name */
362 
363 #define PKTC_IDX (-1)
364 int t4_pktc_idx = PKTC_IDX;
365 TUNABLE_INT("hw.cxgbe.holdoff_pktc_idx", &t4_pktc_idx);
366 TUNABLE_INT("hw.cxgbe.holdoff_pktc_idx_10G", &t4_pktc_idx);	/* Old name */
367 
368 /*
369  * Size (# of entries) of each tx and rx queue.
370  */
371 unsigned int t4_qsize_txq = TX_EQ_QSIZE;
372 TUNABLE_INT("hw.cxgbe.qsize_txq", &t4_qsize_txq);
373 
374 unsigned int t4_qsize_rxq = RX_IQ_QSIZE;
375 TUNABLE_INT("hw.cxgbe.qsize_rxq", &t4_qsize_rxq);
376 
377 /*
378  * Interrupt types allowed (bits 0, 1, 2 = INTx, MSI, MSI-X respectively).
379  */
380 int t4_intr_types = INTR_MSIX | INTR_MSI | INTR_INTX;
381 TUNABLE_INT("hw.cxgbe.interrupt_types", &t4_intr_types);
382 
383 /*
384  * Configuration file.  All the _CF names here are special.
385  */
386 #define DEFAULT_CF	"default"
387 #define BUILTIN_CF	"built-in"
388 #define FLASH_CF	"flash"
389 #define UWIRE_CF	"uwire"
390 #define FPGA_CF		"fpga"
391 static char t4_cfg_file[32] = DEFAULT_CF;
392 TUNABLE_STR("hw.cxgbe.config_file", t4_cfg_file, sizeof(t4_cfg_file));
393 
394 /*
395  * PAUSE settings (bit 0, 1 = rx_pause, tx_pause respectively).
396  * rx_pause = 1 to heed incoming PAUSE frames, 0 to ignore them.
397  * tx_pause = 1 to emit PAUSE frames when the rx FIFO reaches its high water
398  *            mark or when signalled to do so, 0 to never emit PAUSE.
399  */
400 static int t4_pause_settings = PAUSE_TX | PAUSE_RX;
401 TUNABLE_INT("hw.cxgbe.pause_settings", &t4_pause_settings);
402 
403 /*
404  * Forward Error Correction settings (bit 0, 1, 2 = FEC_RS, FEC_BASER_RS,
405  * FEC_RESERVED respectively).
406  * -1 to run with the firmware default.
407  *  0 to disable FEC.
408  */
409 static int t4_fec = -1;
410 TUNABLE_INT("hw.cxgbe.fec", &t4_fec);
411 
412 /*
413  * Link autonegotiation.
414  * -1 to run with the firmware default.
415  *  0 to disable.
416  *  1 to enable.
417  */
418 static int t4_autoneg = -1;
419 TUNABLE_INT("hw.cxgbe.autoneg", &t4_autoneg);
420 
421 /*
422  * Firmware auto-install by driver during attach (0, 1, 2 = prohibited, allowed,
423  * encouraged respectively).
424  */
425 static unsigned int t4_fw_install = 1;
426 TUNABLE_INT("hw.cxgbe.fw_install", &t4_fw_install);
427 
428 /*
429  * ASIC features that will be used.  Disable the ones you don't want so that the
430  * chip resources aren't wasted on features that will not be used.
431  */
432 static int t4_nbmcaps_allowed = 0;
433 TUNABLE_INT("hw.cxgbe.nbmcaps_allowed", &t4_nbmcaps_allowed);
434 
435 static int t4_linkcaps_allowed = 0;	/* No DCBX, PPP, etc. by default */
436 TUNABLE_INT("hw.cxgbe.linkcaps_allowed", &t4_linkcaps_allowed);
437 
438 static int t4_switchcaps_allowed = FW_CAPS_CONFIG_SWITCH_INGRESS |
439     FW_CAPS_CONFIG_SWITCH_EGRESS;
440 TUNABLE_INT("hw.cxgbe.switchcaps_allowed", &t4_switchcaps_allowed);
441 
442 static int t4_niccaps_allowed = FW_CAPS_CONFIG_NIC |
443 	FW_CAPS_CONFIG_NIC_HASHFILTER | FW_CAPS_CONFIG_NIC_ETHOFLD;
444 TUNABLE_INT("hw.cxgbe.niccaps_allowed", &t4_niccaps_allowed);
445 
446 static int t4_toecaps_allowed = -1;
447 TUNABLE_INT("hw.cxgbe.toecaps_allowed", &t4_toecaps_allowed);
448 
449 static int t4_rdmacaps_allowed = -1;
450 TUNABLE_INT("hw.cxgbe.rdmacaps_allowed", &t4_rdmacaps_allowed);
451 
452 static int t4_cryptocaps_allowed = -1;
453 TUNABLE_INT("hw.cxgbe.cryptocaps_allowed", &t4_cryptocaps_allowed);
454 
455 static int t4_iscsicaps_allowed = -1;
456 TUNABLE_INT("hw.cxgbe.iscsicaps_allowed", &t4_iscsicaps_allowed);
457 
458 static int t4_fcoecaps_allowed = 0;
459 TUNABLE_INT("hw.cxgbe.fcoecaps_allowed", &t4_fcoecaps_allowed);
460 
461 static int t5_write_combine = 0;
462 TUNABLE_INT("hw.cxl.write_combine", &t5_write_combine);
463 
464 static int t4_num_vis = 1;
465 TUNABLE_INT("hw.cxgbe.num_vis", &t4_num_vis);
466 /*
467  * PCIe Relaxed Ordering.
468  * -1: driver should figure out a good value.
469  * 0: disable RO.
470  * 1: enable RO.
471  * 2: leave RO alone.
472  */
473 static int pcie_relaxed_ordering = -1;
474 TUNABLE_INT("hw.cxgbe.pcie_relaxed_ordering", &pcie_relaxed_ordering);
475 
476 static int t4_panic_on_fatal_err = 0;
477 TUNABLE_INT("hw.cxgbe.panic_on_fatal_err", &t4_panic_on_fatal_err);
478 
479 #ifdef TCP_OFFLOAD
480 /*
481  * TOE tunables.
482  */
483 static int t4_cop_managed_offloading = 0;
484 TUNABLE_INT("hw.cxgbe.cop_managed_offloading", &t4_cop_managed_offloading);
485 #endif
486 
487 /* Functions used by VIs to obtain unique MAC addresses for each VI. */
488 static int vi_mac_funcs[] = {
489 	FW_VI_FUNC_ETH,
490 	FW_VI_FUNC_OFLD,
491 	FW_VI_FUNC_IWARP,
492 	FW_VI_FUNC_OPENISCSI,
493 	FW_VI_FUNC_OPENFCOE,
494 	FW_VI_FUNC_FOISCSI,
495 	FW_VI_FUNC_FOFCOE,
496 };
497 
498 struct intrs_and_queues {
499 	uint16_t intr_type;	/* INTx, MSI, or MSI-X */
500 	uint16_t num_vis;	/* number of VIs for each port */
501 	uint16_t nirq;		/* Total # of vectors */
502 	uint16_t ntxq;		/* # of NIC txq's for each port */
503 	uint16_t nrxq;		/* # of NIC rxq's for each port */
504 	uint16_t nofldtxq;	/* # of TOE/ETHOFLD txq's for each port */
505 	uint16_t nofldrxq;	/* # of TOE rxq's for each port */
506 
507 	/* The vcxgbe/vcxl interfaces use these and not the ones above. */
508 	uint16_t ntxq_vi;	/* # of NIC txq's */
509 	uint16_t nrxq_vi;	/* # of NIC rxq's */
510 	uint16_t nofldtxq_vi;	/* # of TOE txq's */
511 	uint16_t nofldrxq_vi;	/* # of TOE rxq's */
512 	uint16_t nnmtxq_vi;	/* # of netmap txq's */
513 	uint16_t nnmrxq_vi;	/* # of netmap rxq's */
514 };
515 
516 static void setup_memwin(struct adapter *);
517 static void position_memwin(struct adapter *, int, uint32_t);
518 static int validate_mem_range(struct adapter *, uint32_t, int);
519 static int fwmtype_to_hwmtype(int);
520 static int validate_mt_off_len(struct adapter *, int, uint32_t, int,
521     uint32_t *);
522 static int fixup_devlog_params(struct adapter *);
523 static int cfg_itype_and_nqueues(struct adapter *, struct intrs_and_queues *);
524 static int prep_firmware(struct adapter *);
525 static int partition_resources(struct adapter *, const struct firmware *,
526     const char *);
527 static int get_params__pre_init(struct adapter *);
528 static int get_params__post_init(struct adapter *);
529 static int set_params__post_init(struct adapter *);
530 static void t4_set_desc(struct adapter *);
531 static void build_medialist(struct port_info *, struct ifmedia *);
532 static void init_l1cfg(struct port_info *);
533 static int apply_l1cfg(struct port_info *);
534 static int cxgbe_init_synchronized(struct vi_info *);
535 static int cxgbe_uninit_synchronized(struct vi_info *);
536 static void quiesce_txq(struct adapter *, struct sge_txq *);
537 static void quiesce_wrq(struct adapter *, struct sge_wrq *);
538 static void quiesce_iq(struct adapter *, struct sge_iq *);
539 static void quiesce_fl(struct adapter *, struct sge_fl *);
540 static int t4_alloc_irq(struct adapter *, struct irq *, int rid,
541     driver_intr_t *, void *, char *);
542 static int t4_free_irq(struct adapter *, struct irq *);
543 static void get_regs(struct adapter *, struct t4_regdump *, uint8_t *);
544 static void vi_refresh_stats(struct adapter *, struct vi_info *);
545 static void cxgbe_refresh_stats(struct adapter *, struct port_info *);
546 static void cxgbe_tick(void *);
547 static void cxgbe_vlan_config(void *, struct ifnet *, uint16_t);
548 static void cxgbe_sysctls(struct port_info *);
549 static int sysctl_int_array(SYSCTL_HANDLER_ARGS);
550 static int sysctl_bitfield_8b(SYSCTL_HANDLER_ARGS);
551 static int sysctl_bitfield_16b(SYSCTL_HANDLER_ARGS);
552 static int sysctl_btphy(SYSCTL_HANDLER_ARGS);
553 static int sysctl_noflowq(SYSCTL_HANDLER_ARGS);
554 static int sysctl_holdoff_tmr_idx(SYSCTL_HANDLER_ARGS);
555 static int sysctl_holdoff_pktc_idx(SYSCTL_HANDLER_ARGS);
556 static int sysctl_qsize_rxq(SYSCTL_HANDLER_ARGS);
557 static int sysctl_qsize_txq(SYSCTL_HANDLER_ARGS);
558 static int sysctl_pause_settings(SYSCTL_HANDLER_ARGS);
559 static int sysctl_fec(SYSCTL_HANDLER_ARGS);
560 static int sysctl_autoneg(SYSCTL_HANDLER_ARGS);
561 static int sysctl_handle_t4_reg64(SYSCTL_HANDLER_ARGS);
562 static int sysctl_temperature(SYSCTL_HANDLER_ARGS);
563 static int sysctl_loadavg(SYSCTL_HANDLER_ARGS);
564 static int sysctl_cctrl(SYSCTL_HANDLER_ARGS);
565 static int sysctl_cim_ibq_obq(SYSCTL_HANDLER_ARGS);
566 static int sysctl_cim_la(SYSCTL_HANDLER_ARGS);
567 static int sysctl_cim_la_t6(SYSCTL_HANDLER_ARGS);
568 static int sysctl_cim_ma_la(SYSCTL_HANDLER_ARGS);
569 static int sysctl_cim_pif_la(SYSCTL_HANDLER_ARGS);
570 static int sysctl_cim_qcfg(SYSCTL_HANDLER_ARGS);
571 static int sysctl_cpl_stats(SYSCTL_HANDLER_ARGS);
572 static int sysctl_ddp_stats(SYSCTL_HANDLER_ARGS);
573 static int sysctl_devlog(SYSCTL_HANDLER_ARGS);
574 static int sysctl_fcoe_stats(SYSCTL_HANDLER_ARGS);
575 static int sysctl_hw_sched(SYSCTL_HANDLER_ARGS);
576 static int sysctl_lb_stats(SYSCTL_HANDLER_ARGS);
577 static int sysctl_linkdnrc(SYSCTL_HANDLER_ARGS);
578 static int sysctl_meminfo(SYSCTL_HANDLER_ARGS);
579 static int sysctl_mps_tcam(SYSCTL_HANDLER_ARGS);
580 static int sysctl_mps_tcam_t6(SYSCTL_HANDLER_ARGS);
581 static int sysctl_path_mtus(SYSCTL_HANDLER_ARGS);
582 static int sysctl_pm_stats(SYSCTL_HANDLER_ARGS);
583 static int sysctl_rdma_stats(SYSCTL_HANDLER_ARGS);
584 static int sysctl_tcp_stats(SYSCTL_HANDLER_ARGS);
585 static int sysctl_tids(SYSCTL_HANDLER_ARGS);
586 static int sysctl_tp_err_stats(SYSCTL_HANDLER_ARGS);
587 static int sysctl_tp_la_mask(SYSCTL_HANDLER_ARGS);
588 static int sysctl_tp_la(SYSCTL_HANDLER_ARGS);
589 static int sysctl_tx_rate(SYSCTL_HANDLER_ARGS);
590 static int sysctl_ulprx_la(SYSCTL_HANDLER_ARGS);
591 static int sysctl_wcwr_stats(SYSCTL_HANDLER_ARGS);
592 static int sysctl_cpus(SYSCTL_HANDLER_ARGS);
593 #ifdef TCP_OFFLOAD
594 static int sysctl_tls_rx_ports(SYSCTL_HANDLER_ARGS);
595 static int sysctl_tp_tick(SYSCTL_HANDLER_ARGS);
596 static int sysctl_tp_dack_timer(SYSCTL_HANDLER_ARGS);
597 static int sysctl_tp_timer(SYSCTL_HANDLER_ARGS);
598 static int sysctl_tp_shift_cnt(SYSCTL_HANDLER_ARGS);
599 static int sysctl_tp_backoff(SYSCTL_HANDLER_ARGS);
600 static int sysctl_holdoff_tmr_idx_ofld(SYSCTL_HANDLER_ARGS);
601 static int sysctl_holdoff_pktc_idx_ofld(SYSCTL_HANDLER_ARGS);
602 #endif
603 static int get_sge_context(struct adapter *, struct t4_sge_context *);
604 static int load_fw(struct adapter *, struct t4_data *);
605 static int load_cfg(struct adapter *, struct t4_data *);
606 static int load_boot(struct adapter *, struct t4_bootrom *);
607 static int load_bootcfg(struct adapter *, struct t4_data *);
608 static int cudbg_dump(struct adapter *, struct t4_cudbg_dump *);
609 static void free_offload_policy(struct t4_offload_policy *);
610 static int set_offload_policy(struct adapter *, struct t4_offload_policy *);
611 static int read_card_mem(struct adapter *, int, struct t4_mem_range *);
612 static int read_i2c(struct adapter *, struct t4_i2c_data *);
613 #ifdef TCP_OFFLOAD
614 static int toe_capability(struct vi_info *, int);
615 #endif
616 static int mod_event(module_t, int, void *);
617 static int notify_siblings(device_t, int);
618 
619 struct {
620 	uint16_t device;
621 	char *desc;
622 } t4_pciids[] = {
623 	{0xa000, "Chelsio Terminator 4 FPGA"},
624 	{0x4400, "Chelsio T440-dbg"},
625 	{0x4401, "Chelsio T420-CR"},
626 	{0x4402, "Chelsio T422-CR"},
627 	{0x4403, "Chelsio T440-CR"},
628 	{0x4404, "Chelsio T420-BCH"},
629 	{0x4405, "Chelsio T440-BCH"},
630 	{0x4406, "Chelsio T440-CH"},
631 	{0x4407, "Chelsio T420-SO"},
632 	{0x4408, "Chelsio T420-CX"},
633 	{0x4409, "Chelsio T420-BT"},
634 	{0x440a, "Chelsio T404-BT"},
635 	{0x440e, "Chelsio T440-LP-CR"},
636 }, t5_pciids[] = {
637 	{0xb000, "Chelsio Terminator 5 FPGA"},
638 	{0x5400, "Chelsio T580-dbg"},
639 	{0x5401,  "Chelsio T520-CR"},		/* 2 x 10G */
640 	{0x5402,  "Chelsio T522-CR"},		/* 2 x 10G, 2 X 1G */
641 	{0x5403,  "Chelsio T540-CR"},		/* 4 x 10G */
642 	{0x5407,  "Chelsio T520-SO"},		/* 2 x 10G, nomem */
643 	{0x5409,  "Chelsio T520-BT"},		/* 2 x 10GBaseT */
644 	{0x540a,  "Chelsio T504-BT"},		/* 4 x 1G */
645 	{0x540d,  "Chelsio T580-CR"},		/* 2 x 40G */
646 	{0x540e,  "Chelsio T540-LP-CR"},	/* 4 x 10G */
647 	{0x5410,  "Chelsio T580-LP-CR"},	/* 2 x 40G */
648 	{0x5411,  "Chelsio T520-LL-CR"},	/* 2 x 10G */
649 	{0x5412,  "Chelsio T560-CR"},		/* 1 x 40G, 2 x 10G */
650 	{0x5414,  "Chelsio T580-LP-SO-CR"},	/* 2 x 40G, nomem */
651 	{0x5415,  "Chelsio T502-BT"},		/* 2 x 1G */
652 	{0x5418,  "Chelsio T540-BT"},		/* 4 x 10GBaseT */
653 	{0x5419,  "Chelsio T540-LP-BT"},	/* 4 x 10GBaseT */
654 	{0x541a,  "Chelsio T540-SO-BT"},	/* 4 x 10GBaseT, nomem */
655 	{0x541b,  "Chelsio T540-SO-CR"},	/* 4 x 10G, nomem */
656 }, t6_pciids[] = {
657 	{0xc006, "Chelsio Terminator 6 FPGA"},	/* T6 PE10K6 FPGA (PF0) */
658 	{0x6400, "Chelsio T6-DBG-25"},		/* 2 x 10/25G, debug */
659 	{0x6401, "Chelsio T6225-CR"},		/* 2 x 10/25G */
660 	{0x6402, "Chelsio T6225-SO-CR"},	/* 2 x 10/25G, nomem */
661 	{0x6403, "Chelsio T6425-CR"},		/* 4 x 10/25G */
662 	{0x6404, "Chelsio T6425-SO-CR"},	/* 4 x 10/25G, nomem */
663 	{0x6405, "Chelsio T6225-OCP-SO"},	/* 2 x 10/25G, nomem */
664 	{0x6406, "Chelsio T62100-OCP-SO"},	/* 2 x 40/50/100G, nomem */
665 	{0x6407, "Chelsio T62100-LP-CR"},	/* 2 x 40/50/100G */
666 	{0x6408, "Chelsio T62100-SO-CR"},	/* 2 x 40/50/100G, nomem */
667 	{0x6409, "Chelsio T6210-BT"},		/* 2 x 10GBASE-T */
668 	{0x640d, "Chelsio T62100-CR"},		/* 2 x 40/50/100G */
669 	{0x6410, "Chelsio T6-DBG-100"},		/* 2 x 40/50/100G, debug */
670 	{0x6411, "Chelsio T6225-LL-CR"},	/* 2 x 10/25G */
671 	{0x6414, "Chelsio T61100-OCP-SO"},	/* 1 x 40/50/100G, nomem */
672 	{0x6415, "Chelsio T6201-BT"},		/* 2 x 1000BASE-T */
673 
674 	/* Custom */
675 	{0x6480, "Custom T6225-CR"},
676 	{0x6481, "Custom T62100-CR"},
677 	{0x6482, "Custom T6225-CR"},
678 	{0x6483, "Custom T62100-CR"},
679 	{0x6484, "Custom T64100-CR"},
680 	{0x6485, "Custom T6240-SO"},
681 	{0x6486, "Custom T6225-SO-CR"},
682 	{0x6487, "Custom T6225-CR"},
683 };
684 
685 #ifdef TCP_OFFLOAD
686 /*
687  * service_iq() has an iq and needs the fl.  Offset of fl from the iq should be
688  * exactly the same for both rxq and ofld_rxq.
689  */
690 CTASSERT(offsetof(struct sge_ofld_rxq, iq) == offsetof(struct sge_rxq, iq));
691 CTASSERT(offsetof(struct sge_ofld_rxq, fl) == offsetof(struct sge_rxq, fl));
692 #endif
693 CTASSERT(sizeof(struct cluster_metadata) <= CL_METADATA_SIZE);
694 
695 static int
696 t4_probe(device_t dev)
697 {
698 	int i;
699 	uint16_t v = pci_get_vendor(dev);
700 	uint16_t d = pci_get_device(dev);
701 	uint8_t f = pci_get_function(dev);
702 
703 	if (v != PCI_VENDOR_ID_CHELSIO)
704 		return (ENXIO);
705 
706 	/* Attach only to PF0 of the FPGA */
707 	if (d == 0xa000 && f != 0)
708 		return (ENXIO);
709 
710 	for (i = 0; i < nitems(t4_pciids); i++) {
711 		if (d == t4_pciids[i].device) {
712 			device_set_desc(dev, t4_pciids[i].desc);
713 			return (BUS_PROBE_DEFAULT);
714 		}
715 	}
716 
717 	return (ENXIO);
718 }
719 
720 static int
721 t5_probe(device_t dev)
722 {
723 	int i;
724 	uint16_t v = pci_get_vendor(dev);
725 	uint16_t d = pci_get_device(dev);
726 	uint8_t f = pci_get_function(dev);
727 
728 	if (v != PCI_VENDOR_ID_CHELSIO)
729 		return (ENXIO);
730 
731 	/* Attach only to PF0 of the FPGA */
732 	if (d == 0xb000 && f != 0)
733 		return (ENXIO);
734 
735 	for (i = 0; i < nitems(t5_pciids); i++) {
736 		if (d == t5_pciids[i].device) {
737 			device_set_desc(dev, t5_pciids[i].desc);
738 			return (BUS_PROBE_DEFAULT);
739 		}
740 	}
741 
742 	return (ENXIO);
743 }
744 
745 static int
746 t6_probe(device_t dev)
747 {
748 	int i;
749 	uint16_t v = pci_get_vendor(dev);
750 	uint16_t d = pci_get_device(dev);
751 
752 	if (v != PCI_VENDOR_ID_CHELSIO)
753 		return (ENXIO);
754 
755 	for (i = 0; i < nitems(t6_pciids); i++) {
756 		if (d == t6_pciids[i].device) {
757 			device_set_desc(dev, t6_pciids[i].desc);
758 			return (BUS_PROBE_DEFAULT);
759 		}
760 	}
761 
762 	return (ENXIO);
763 }
764 
765 static void
766 t5_attribute_workaround(device_t dev)
767 {
768 	device_t root_port;
769 	uint32_t v;
770 
771 	/*
772 	 * The T5 chips do not properly echo the No Snoop and Relaxed
773 	 * Ordering attributes when replying to a TLP from a Root
774 	 * Port.  As a workaround, find the parent Root Port and
775 	 * disable No Snoop and Relaxed Ordering.  Note that this
776 	 * affects all devices under this root port.
777 	 */
778 	root_port = pci_find_pcie_root_port(dev);
779 	if (root_port == NULL) {
780 		device_printf(dev, "Unable to find parent root port\n");
781 		return;
782 	}
783 
784 	v = pcie_adjust_config(root_port, PCIER_DEVICE_CTL,
785 	    PCIEM_CTL_RELAXED_ORD_ENABLE | PCIEM_CTL_NOSNOOP_ENABLE, 0, 2);
786 	if ((v & (PCIEM_CTL_RELAXED_ORD_ENABLE | PCIEM_CTL_NOSNOOP_ENABLE)) !=
787 	    0)
788 		device_printf(dev, "Disabled No Snoop/Relaxed Ordering on %s\n",
789 		    device_get_nameunit(root_port));
790 }
791 
792 static const struct devnames devnames[] = {
793 	{
794 		.nexus_name = "t4nex",
795 		.ifnet_name = "cxgbe",
796 		.vi_ifnet_name = "vcxgbe",
797 		.pf03_drv_name = "t4iov",
798 		.vf_nexus_name = "t4vf",
799 		.vf_ifnet_name = "cxgbev"
800 	}, {
801 		.nexus_name = "t5nex",
802 		.ifnet_name = "cxl",
803 		.vi_ifnet_name = "vcxl",
804 		.pf03_drv_name = "t5iov",
805 		.vf_nexus_name = "t5vf",
806 		.vf_ifnet_name = "cxlv"
807 	}, {
808 		.nexus_name = "t6nex",
809 		.ifnet_name = "cc",
810 		.vi_ifnet_name = "vcc",
811 		.pf03_drv_name = "t6iov",
812 		.vf_nexus_name = "t6vf",
813 		.vf_ifnet_name = "ccv"
814 	}
815 };
816 
817 void
818 t4_init_devnames(struct adapter *sc)
819 {
820 	int id;
821 
822 	id = chip_id(sc);
823 	if (id >= CHELSIO_T4 && id - CHELSIO_T4 < nitems(devnames))
824 		sc->names = &devnames[id - CHELSIO_T4];
825 	else {
826 		device_printf(sc->dev, "chip id %d is not supported.\n", id);
827 		sc->names = NULL;
828 	}
829 }
830 
831 static int
832 t4_attach(device_t dev)
833 {
834 	struct adapter *sc;
835 	int rc = 0, i, j, rqidx, tqidx, nports;
836 	struct make_dev_args mda;
837 	struct intrs_and_queues iaq;
838 	struct sge *s;
839 	uint32_t *buf;
840 #if defined(TCP_OFFLOAD) || defined(RATELIMIT)
841 	int ofld_tqidx;
842 #endif
843 #ifdef TCP_OFFLOAD
844 	int ofld_rqidx;
845 #endif
846 #ifdef DEV_NETMAP
847 	int nm_rqidx, nm_tqidx;
848 #endif
849 	int num_vis;
850 
851 	sc = device_get_softc(dev);
852 	sc->dev = dev;
853 	TUNABLE_INT_FETCH("hw.cxgbe.dflags", &sc->debug_flags);
854 
855 	if ((pci_get_device(dev) & 0xff00) == 0x5400)
856 		t5_attribute_workaround(dev);
857 	pci_enable_busmaster(dev);
858 	if (pci_find_cap(dev, PCIY_EXPRESS, &i) == 0) {
859 		uint32_t v;
860 
861 		pci_set_max_read_req(dev, 4096);
862 		v = pci_read_config(dev, i + PCIER_DEVICE_CTL, 2);
863 		sc->params.pci.mps = 128 << ((v & PCIEM_CTL_MAX_PAYLOAD) >> 5);
864 		if (pcie_relaxed_ordering == 0 &&
865 		    (v | PCIEM_CTL_RELAXED_ORD_ENABLE) != 0) {
866 			v &= ~PCIEM_CTL_RELAXED_ORD_ENABLE;
867 			pci_write_config(dev, i + PCIER_DEVICE_CTL, v, 2);
868 		} else if (pcie_relaxed_ordering == 1 &&
869 		    (v & PCIEM_CTL_RELAXED_ORD_ENABLE) == 0) {
870 			v |= PCIEM_CTL_RELAXED_ORD_ENABLE;
871 			pci_write_config(dev, i + PCIER_DEVICE_CTL, v, 2);
872 		}
873 	}
874 
875 	sc->sge_gts_reg = MYPF_REG(A_SGE_PF_GTS);
876 	sc->sge_kdoorbell_reg = MYPF_REG(A_SGE_PF_KDOORBELL);
877 	sc->traceq = -1;
878 	mtx_init(&sc->ifp_lock, sc->ifp_lockname, 0, MTX_DEF);
879 	snprintf(sc->ifp_lockname, sizeof(sc->ifp_lockname), "%s tracer",
880 	    device_get_nameunit(dev));
881 
882 	snprintf(sc->lockname, sizeof(sc->lockname), "%s",
883 	    device_get_nameunit(dev));
884 	mtx_init(&sc->sc_lock, sc->lockname, 0, MTX_DEF);
885 	t4_add_adapter(sc);
886 
887 	mtx_init(&sc->sfl_lock, "starving freelists", 0, MTX_DEF);
888 	TAILQ_INIT(&sc->sfl);
889 	callout_init_mtx(&sc->sfl_callout, &sc->sfl_lock, 0);
890 
891 	mtx_init(&sc->reg_lock, "indirect register access", 0, MTX_DEF);
892 
893 	sc->policy = NULL;
894 	rw_init(&sc->policy_lock, "connection offload policy");
895 
896 	rc = t4_map_bars_0_and_4(sc);
897 	if (rc != 0)
898 		goto done; /* error message displayed already */
899 
900 	memset(sc->chan_map, 0xff, sizeof(sc->chan_map));
901 
902 	/* Prepare the adapter for operation. */
903 	buf = malloc(PAGE_SIZE, M_CXGBE, M_ZERO | M_WAITOK);
904 	rc = -t4_prep_adapter(sc, buf);
905 	free(buf, M_CXGBE);
906 	if (rc != 0) {
907 		device_printf(dev, "failed to prepare adapter: %d.\n", rc);
908 		goto done;
909 	}
910 
911 	/*
912 	 * This is the real PF# to which we're attaching.  Works from within PCI
913 	 * passthrough environments too, where pci_get_function() could return a
914 	 * different PF# depending on the passthrough configuration.  We need to
915 	 * use the real PF# in all our communication with the firmware.
916 	 */
917 	j = t4_read_reg(sc, A_PL_WHOAMI);
918 	sc->pf = chip_id(sc) <= CHELSIO_T5 ? G_SOURCEPF(j) : G_T6_SOURCEPF(j);
919 	sc->mbox = sc->pf;
920 
921 	t4_init_devnames(sc);
922 	if (sc->names == NULL) {
923 		rc = ENOTSUP;
924 		goto done; /* error message displayed already */
925 	}
926 
927 	/*
928 	 * Do this really early, with the memory windows set up even before the
929 	 * character device.  The userland tool's register i/o and mem read
930 	 * will work even in "recovery mode".
931 	 */
932 	setup_memwin(sc);
933 	if (t4_init_devlog_params(sc, 0) == 0)
934 		fixup_devlog_params(sc);
935 	make_dev_args_init(&mda);
936 	mda.mda_devsw = &t4_cdevsw;
937 	mda.mda_uid = UID_ROOT;
938 	mda.mda_gid = GID_WHEEL;
939 	mda.mda_mode = 0600;
940 	mda.mda_si_drv1 = sc;
941 	rc = make_dev_s(&mda, &sc->cdev, "%s", device_get_nameunit(dev));
942 	if (rc != 0)
943 		device_printf(dev, "failed to create nexus char device: %d.\n",
944 		    rc);
945 
946 	/* Go no further if recovery mode has been requested. */
947 	if (TUNABLE_INT_FETCH("hw.cxgbe.sos", &i) && i != 0) {
948 		device_printf(dev, "recovery mode.\n");
949 		goto done;
950 	}
951 
952 #if defined(__i386__)
953 	if ((cpu_feature & CPUID_CX8) == 0) {
954 		device_printf(dev, "64 bit atomics not available.\n");
955 		rc = ENOTSUP;
956 		goto done;
957 	}
958 #endif
959 
960 	/* Prepare the firmware for operation */
961 	rc = prep_firmware(sc);
962 	if (rc != 0)
963 		goto done; /* error message displayed already */
964 
965 	rc = get_params__post_init(sc);
966 	if (rc != 0)
967 		goto done; /* error message displayed already */
968 
969 	rc = set_params__post_init(sc);
970 	if (rc != 0)
971 		goto done; /* error message displayed already */
972 
973 	rc = t4_map_bar_2(sc);
974 	if (rc != 0)
975 		goto done; /* error message displayed already */
976 
977 	rc = t4_create_dma_tag(sc);
978 	if (rc != 0)
979 		goto done; /* error message displayed already */
980 
981 	/*
982 	 * First pass over all the ports - allocate VIs and initialize some
983 	 * basic parameters like mac address, port type, etc.
984 	 */
985 	for_each_port(sc, i) {
986 		struct port_info *pi;
987 
988 		pi = malloc(sizeof(*pi), M_CXGBE, M_ZERO | M_WAITOK);
989 		sc->port[i] = pi;
990 
991 		/* These must be set before t4_port_init */
992 		pi->adapter = sc;
993 		pi->port_id = i;
994 		/*
995 		 * XXX: vi[0] is special so we can't delay this allocation until
996 		 * pi->nvi's final value is known.
997 		 */
998 		pi->vi = malloc(sizeof(struct vi_info) * t4_num_vis, M_CXGBE,
999 		    M_ZERO | M_WAITOK);
1000 
1001 		/*
1002 		 * Allocate the "main" VI and initialize parameters
1003 		 * like mac addr.
1004 		 */
1005 		rc = -t4_port_init(sc, sc->mbox, sc->pf, 0, i);
1006 		if (rc != 0) {
1007 			device_printf(dev, "unable to initialize port %d: %d\n",
1008 			    i, rc);
1009 			free(pi->vi, M_CXGBE);
1010 			free(pi, M_CXGBE);
1011 			sc->port[i] = NULL;
1012 			goto done;
1013 		}
1014 
1015 		snprintf(pi->lockname, sizeof(pi->lockname), "%sp%d",
1016 		    device_get_nameunit(dev), i);
1017 		mtx_init(&pi->pi_lock, pi->lockname, 0, MTX_DEF);
1018 		sc->chan_map[pi->tx_chan] = i;
1019 
1020 		/* All VIs on this port share this media. */
1021 		ifmedia_init(&pi->media, IFM_IMASK, cxgbe_media_change,
1022 		    cxgbe_media_status);
1023 
1024 		pi->dev = device_add_child(dev, sc->names->ifnet_name, -1);
1025 		if (pi->dev == NULL) {
1026 			device_printf(dev,
1027 			    "failed to add device for port %d.\n", i);
1028 			rc = ENXIO;
1029 			goto done;
1030 		}
1031 		pi->vi[0].dev = pi->dev;
1032 		device_set_softc(pi->dev, pi);
1033 	}
1034 
1035 	/*
1036 	 * Interrupt type, # of interrupts, # of rx/tx queues, etc.
1037 	 */
1038 	nports = sc->params.nports;
1039 	rc = cfg_itype_and_nqueues(sc, &iaq);
1040 	if (rc != 0)
1041 		goto done; /* error message displayed already */
1042 
1043 	num_vis = iaq.num_vis;
1044 	sc->intr_type = iaq.intr_type;
1045 	sc->intr_count = iaq.nirq;
1046 
1047 	s = &sc->sge;
1048 	s->nrxq = nports * iaq.nrxq;
1049 	s->ntxq = nports * iaq.ntxq;
1050 	if (num_vis > 1) {
1051 		s->nrxq += nports * (num_vis - 1) * iaq.nrxq_vi;
1052 		s->ntxq += nports * (num_vis - 1) * iaq.ntxq_vi;
1053 	}
1054 	s->neq = s->ntxq + s->nrxq;	/* the free list in an rxq is an eq */
1055 	s->neq += nports + 1;/* ctrl queues: 1 per port + 1 mgmt */
1056 	s->niq = s->nrxq + 1;		/* 1 extra for firmware event queue */
1057 #if defined(TCP_OFFLOAD) || defined(RATELIMIT)
1058 	if (is_offload(sc) || is_ethoffload(sc)) {
1059 		s->nofldtxq = nports * iaq.nofldtxq;
1060 		if (num_vis > 1)
1061 			s->nofldtxq += nports * (num_vis - 1) * iaq.nofldtxq_vi;
1062 		s->neq += s->nofldtxq;
1063 
1064 		s->ofld_txq = malloc(s->nofldtxq * sizeof(struct sge_wrq),
1065 		    M_CXGBE, M_ZERO | M_WAITOK);
1066 	}
1067 #endif
1068 #ifdef TCP_OFFLOAD
1069 	if (is_offload(sc)) {
1070 		s->nofldrxq = nports * iaq.nofldrxq;
1071 		if (num_vis > 1)
1072 			s->nofldrxq += nports * (num_vis - 1) * iaq.nofldrxq_vi;
1073 		s->neq += s->nofldrxq;	/* free list */
1074 		s->niq += s->nofldrxq;
1075 
1076 		s->ofld_rxq = malloc(s->nofldrxq * sizeof(struct sge_ofld_rxq),
1077 		    M_CXGBE, M_ZERO | M_WAITOK);
1078 	}
1079 #endif
1080 #ifdef DEV_NETMAP
1081 	if (num_vis > 1) {
1082 		s->nnmrxq = nports * (num_vis - 1) * iaq.nnmrxq_vi;
1083 		s->nnmtxq = nports * (num_vis - 1) * iaq.nnmtxq_vi;
1084 	}
1085 	s->neq += s->nnmtxq + s->nnmrxq;
1086 	s->niq += s->nnmrxq;
1087 
1088 	s->nm_rxq = malloc(s->nnmrxq * sizeof(struct sge_nm_rxq),
1089 	    M_CXGBE, M_ZERO | M_WAITOK);
1090 	s->nm_txq = malloc(s->nnmtxq * sizeof(struct sge_nm_txq),
1091 	    M_CXGBE, M_ZERO | M_WAITOK);
1092 #endif
1093 
1094 	s->ctrlq = malloc(nports * sizeof(struct sge_wrq), M_CXGBE,
1095 	    M_ZERO | M_WAITOK);
1096 	s->rxq = malloc(s->nrxq * sizeof(struct sge_rxq), M_CXGBE,
1097 	    M_ZERO | M_WAITOK);
1098 	s->txq = malloc(s->ntxq * sizeof(struct sge_txq), M_CXGBE,
1099 	    M_ZERO | M_WAITOK);
1100 	s->iqmap = malloc(s->niq * sizeof(struct sge_iq *), M_CXGBE,
1101 	    M_ZERO | M_WAITOK);
1102 	s->eqmap = malloc(s->neq * sizeof(struct sge_eq *), M_CXGBE,
1103 	    M_ZERO | M_WAITOK);
1104 
1105 	sc->irq = malloc(sc->intr_count * sizeof(struct irq), M_CXGBE,
1106 	    M_ZERO | M_WAITOK);
1107 
1108 	t4_init_l2t(sc, M_WAITOK);
1109 	t4_init_smt(sc, M_WAITOK);
1110 	t4_init_tx_sched(sc);
1111 #ifdef RATELIMIT
1112 	t4_init_etid_table(sc);
1113 #endif
1114 
1115 	/*
1116 	 * Second pass over the ports.  This time we know the number of rx and
1117 	 * tx queues that each port should get.
1118 	 */
1119 	rqidx = tqidx = 0;
1120 #if defined(TCP_OFFLOAD) || defined(RATELIMIT)
1121 	ofld_tqidx = 0;
1122 #endif
1123 #ifdef TCP_OFFLOAD
1124 	ofld_rqidx = 0;
1125 #endif
1126 #ifdef DEV_NETMAP
1127 	nm_rqidx = nm_tqidx = 0;
1128 #endif
1129 	for_each_port(sc, i) {
1130 		struct port_info *pi = sc->port[i];
1131 		struct vi_info *vi;
1132 
1133 		if (pi == NULL)
1134 			continue;
1135 
1136 		pi->nvi = num_vis;
1137 		for_each_vi(pi, j, vi) {
1138 			vi->pi = pi;
1139 			vi->qsize_rxq = t4_qsize_rxq;
1140 			vi->qsize_txq = t4_qsize_txq;
1141 
1142 			vi->first_rxq = rqidx;
1143 			vi->first_txq = tqidx;
1144 			vi->tmr_idx = t4_tmr_idx;
1145 			vi->pktc_idx = t4_pktc_idx;
1146 			vi->nrxq = j == 0 ? iaq.nrxq : iaq.nrxq_vi;
1147 			vi->ntxq = j == 0 ? iaq.ntxq : iaq.ntxq_vi;
1148 
1149 			rqidx += vi->nrxq;
1150 			tqidx += vi->ntxq;
1151 
1152 			if (j == 0 && vi->ntxq > 1)
1153 				vi->rsrv_noflowq = t4_rsrv_noflowq ? 1 : 0;
1154 			else
1155 				vi->rsrv_noflowq = 0;
1156 
1157 #if defined(TCP_OFFLOAD) || defined(RATELIMIT)
1158 			vi->first_ofld_txq = ofld_tqidx;
1159 			vi->nofldtxq = j == 0 ? iaq.nofldtxq : iaq.nofldtxq_vi;
1160 			ofld_tqidx += vi->nofldtxq;
1161 #endif
1162 #ifdef TCP_OFFLOAD
1163 			vi->ofld_tmr_idx = t4_tmr_idx_ofld;
1164 			vi->ofld_pktc_idx = t4_pktc_idx_ofld;
1165 			vi->first_ofld_rxq = ofld_rqidx;
1166 			vi->nofldrxq = j == 0 ? iaq.nofldrxq : iaq.nofldrxq_vi;
1167 
1168 			ofld_rqidx += vi->nofldrxq;
1169 #endif
1170 #ifdef DEV_NETMAP
1171 			if (j > 0) {
1172 				vi->first_nm_rxq = nm_rqidx;
1173 				vi->first_nm_txq = nm_tqidx;
1174 				vi->nnmrxq = iaq.nnmrxq_vi;
1175 				vi->nnmtxq = iaq.nnmtxq_vi;
1176 				nm_rqidx += vi->nnmrxq;
1177 				nm_tqidx += vi->nnmtxq;
1178 			}
1179 #endif
1180 		}
1181 	}
1182 
1183 	rc = t4_setup_intr_handlers(sc);
1184 	if (rc != 0) {
1185 		device_printf(dev,
1186 		    "failed to setup interrupt handlers: %d\n", rc);
1187 		goto done;
1188 	}
1189 
1190 	rc = bus_generic_probe(dev);
1191 	if (rc != 0) {
1192 		device_printf(dev, "failed to probe child drivers: %d\n", rc);
1193 		goto done;
1194 	}
1195 
1196 	/*
1197 	 * Ensure thread-safe mailbox access (in debug builds).
1198 	 *
1199 	 * So far this was the only thread accessing the mailbox but various
1200 	 * ifnets and sysctls are about to be created and their handlers/ioctls
1201 	 * will access the mailbox from different threads.
1202 	 */
1203 	sc->flags |= CHK_MBOX_ACCESS;
1204 
1205 	rc = bus_generic_attach(dev);
1206 	if (rc != 0) {
1207 		device_printf(dev,
1208 		    "failed to attach all child ports: %d\n", rc);
1209 		goto done;
1210 	}
1211 
1212 	device_printf(dev,
1213 	    "PCIe gen%d x%d, %d ports, %d %s interrupt%s, %d eq, %d iq\n",
1214 	    sc->params.pci.speed, sc->params.pci.width, sc->params.nports,
1215 	    sc->intr_count, sc->intr_type == INTR_MSIX ? "MSI-X" :
1216 	    (sc->intr_type == INTR_MSI ? "MSI" : "INTx"),
1217 	    sc->intr_count > 1 ? "s" : "", sc->sge.neq, sc->sge.niq);
1218 
1219 	t4_set_desc(sc);
1220 
1221 	notify_siblings(dev, 0);
1222 
1223 done:
1224 	if (rc != 0 && sc->cdev) {
1225 		/* cdev was created and so cxgbetool works; recover that way. */
1226 		device_printf(dev,
1227 		    "error during attach, adapter is now in recovery mode.\n");
1228 		rc = 0;
1229 	}
1230 
1231 	if (rc != 0)
1232 		t4_detach_common(dev);
1233 	else
1234 		t4_sysctls(sc);
1235 
1236 	return (rc);
1237 }
1238 
1239 static int
1240 t4_ready(device_t dev)
1241 {
1242 	struct adapter *sc;
1243 
1244 	sc = device_get_softc(dev);
1245 	if (sc->flags & FW_OK)
1246 		return (0);
1247 	return (ENXIO);
1248 }
1249 
1250 static int
1251 t4_read_port_device(device_t dev, int port, device_t *child)
1252 {
1253 	struct adapter *sc;
1254 	struct port_info *pi;
1255 
1256 	sc = device_get_softc(dev);
1257 	if (port < 0 || port >= MAX_NPORTS)
1258 		return (EINVAL);
1259 	pi = sc->port[port];
1260 	if (pi == NULL || pi->dev == NULL)
1261 		return (ENXIO);
1262 	*child = pi->dev;
1263 	return (0);
1264 }
1265 
1266 static int
1267 notify_siblings(device_t dev, int detaching)
1268 {
1269 	device_t sibling;
1270 	int error, i;
1271 
1272 	error = 0;
1273 	for (i = 0; i < PCI_FUNCMAX; i++) {
1274 		if (i == pci_get_function(dev))
1275 			continue;
1276 		sibling = pci_find_dbsf(pci_get_domain(dev), pci_get_bus(dev),
1277 		    pci_get_slot(dev), i);
1278 		if (sibling == NULL || !device_is_attached(sibling))
1279 			continue;
1280 		if (detaching)
1281 			error = T4_DETACH_CHILD(sibling);
1282 		else
1283 			(void)T4_ATTACH_CHILD(sibling);
1284 		if (error)
1285 			break;
1286 	}
1287 	return (error);
1288 }
1289 
1290 /*
1291  * Idempotent
1292  */
1293 static int
1294 t4_detach(device_t dev)
1295 {
1296 	struct adapter *sc;
1297 	int rc;
1298 
1299 	sc = device_get_softc(dev);
1300 
1301 	rc = notify_siblings(dev, 1);
1302 	if (rc) {
1303 		device_printf(dev,
1304 		    "failed to detach sibling devices: %d\n", rc);
1305 		return (rc);
1306 	}
1307 
1308 	return (t4_detach_common(dev));
1309 }
1310 
1311 int
1312 t4_detach_common(device_t dev)
1313 {
1314 	struct adapter *sc;
1315 	struct port_info *pi;
1316 	int i, rc;
1317 
1318 	sc = device_get_softc(dev);
1319 
1320 	if (sc->cdev) {
1321 		destroy_dev(sc->cdev);
1322 		sc->cdev = NULL;
1323 	}
1324 
1325 	sc->flags &= ~CHK_MBOX_ACCESS;
1326 	if (sc->flags & FULL_INIT_DONE) {
1327 		if (!(sc->flags & IS_VF))
1328 			t4_intr_disable(sc);
1329 	}
1330 
1331 	if (device_is_attached(dev)) {
1332 		rc = bus_generic_detach(dev);
1333 		if (rc) {
1334 			device_printf(dev,
1335 			    "failed to detach child devices: %d\n", rc);
1336 			return (rc);
1337 		}
1338 	}
1339 
1340 	for (i = 0; i < sc->intr_count; i++)
1341 		t4_free_irq(sc, &sc->irq[i]);
1342 
1343 	if ((sc->flags & (IS_VF | FW_OK)) == FW_OK)
1344 		t4_free_tx_sched(sc);
1345 
1346 	for (i = 0; i < MAX_NPORTS; i++) {
1347 		pi = sc->port[i];
1348 		if (pi) {
1349 			t4_free_vi(sc, sc->mbox, sc->pf, 0, pi->vi[0].viid);
1350 			if (pi->dev)
1351 				device_delete_child(dev, pi->dev);
1352 
1353 			mtx_destroy(&pi->pi_lock);
1354 			free(pi->vi, M_CXGBE);
1355 			free(pi, M_CXGBE);
1356 		}
1357 	}
1358 
1359 	device_delete_children(dev);
1360 
1361 	if (sc->flags & FULL_INIT_DONE)
1362 		adapter_full_uninit(sc);
1363 
1364 	if ((sc->flags & (IS_VF | FW_OK)) == FW_OK)
1365 		t4_fw_bye(sc, sc->mbox);
1366 
1367 	if (sc->intr_type == INTR_MSI || sc->intr_type == INTR_MSIX)
1368 		pci_release_msi(dev);
1369 
1370 	if (sc->regs_res)
1371 		bus_release_resource(dev, SYS_RES_MEMORY, sc->regs_rid,
1372 		    sc->regs_res);
1373 
1374 	if (sc->udbs_res)
1375 		bus_release_resource(dev, SYS_RES_MEMORY, sc->udbs_rid,
1376 		    sc->udbs_res);
1377 
1378 	if (sc->msix_res)
1379 		bus_release_resource(dev, SYS_RES_MEMORY, sc->msix_rid,
1380 		    sc->msix_res);
1381 
1382 	if (sc->l2t)
1383 		t4_free_l2t(sc->l2t);
1384 	if (sc->smt)
1385 		t4_free_smt(sc->smt);
1386 #ifdef RATELIMIT
1387 	t4_free_etid_table(sc);
1388 #endif
1389 
1390 #if defined(TCP_OFFLOAD) || defined(RATELIMIT)
1391 	free(sc->sge.ofld_txq, M_CXGBE);
1392 #endif
1393 #ifdef TCP_OFFLOAD
1394 	free(sc->sge.ofld_rxq, M_CXGBE);
1395 #endif
1396 #ifdef DEV_NETMAP
1397 	free(sc->sge.nm_rxq, M_CXGBE);
1398 	free(sc->sge.nm_txq, M_CXGBE);
1399 #endif
1400 	free(sc->irq, M_CXGBE);
1401 	free(sc->sge.rxq, M_CXGBE);
1402 	free(sc->sge.txq, M_CXGBE);
1403 	free(sc->sge.ctrlq, M_CXGBE);
1404 	free(sc->sge.iqmap, M_CXGBE);
1405 	free(sc->sge.eqmap, M_CXGBE);
1406 	free(sc->tids.ftid_tab, M_CXGBE);
1407 	if (sc->tids.hftid_tab)
1408 		free_hftid_tab(&sc->tids);
1409 	free(sc->tids.atid_tab, M_CXGBE);
1410 	free(sc->tids.tid_tab, M_CXGBE);
1411 	free(sc->tt.tls_rx_ports, M_CXGBE);
1412 	t4_destroy_dma_tag(sc);
1413 	if (mtx_initialized(&sc->sc_lock)) {
1414 		sx_xlock(&t4_list_lock);
1415 		SLIST_REMOVE(&t4_list, sc, adapter, link);
1416 		sx_xunlock(&t4_list_lock);
1417 		mtx_destroy(&sc->sc_lock);
1418 	}
1419 
1420 	callout_drain(&sc->sfl_callout);
1421 	if (mtx_initialized(&sc->tids.ftid_lock)) {
1422 		mtx_destroy(&sc->tids.ftid_lock);
1423 		cv_destroy(&sc->tids.ftid_cv);
1424 	}
1425 	if (mtx_initialized(&sc->tids.atid_lock))
1426 		mtx_destroy(&sc->tids.atid_lock);
1427 	if (mtx_initialized(&sc->sfl_lock))
1428 		mtx_destroy(&sc->sfl_lock);
1429 	if (mtx_initialized(&sc->ifp_lock))
1430 		mtx_destroy(&sc->ifp_lock);
1431 	if (mtx_initialized(&sc->reg_lock))
1432 		mtx_destroy(&sc->reg_lock);
1433 
1434 	if (rw_initialized(&sc->policy_lock)) {
1435 		rw_destroy(&sc->policy_lock);
1436 #ifdef TCP_OFFLOAD
1437 		if (sc->policy != NULL)
1438 			free_offload_policy(sc->policy);
1439 #endif
1440 	}
1441 
1442 	for (i = 0; i < NUM_MEMWIN; i++) {
1443 		struct memwin *mw = &sc->memwin[i];
1444 
1445 		if (rw_initialized(&mw->mw_lock))
1446 			rw_destroy(&mw->mw_lock);
1447 	}
1448 
1449 	bzero(sc, sizeof(*sc));
1450 
1451 	return (0);
1452 }
1453 
1454 static int
1455 cxgbe_probe(device_t dev)
1456 {
1457 	char buf[128];
1458 	struct port_info *pi = device_get_softc(dev);
1459 
1460 	snprintf(buf, sizeof(buf), "port %d", pi->port_id);
1461 	device_set_desc_copy(dev, buf);
1462 
1463 	return (BUS_PROBE_DEFAULT);
1464 }
1465 
1466 #define T4_CAP (IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_MTU | IFCAP_HWCSUM | \
1467     IFCAP_VLAN_HWCSUM | IFCAP_TSO | IFCAP_JUMBO_MTU | IFCAP_LRO | \
1468     IFCAP_VLAN_HWTSO | IFCAP_LINKSTATE | IFCAP_HWCSUM_IPV6 | IFCAP_HWSTATS)
1469 #define T4_CAP_ENABLE (T4_CAP)
1470 
1471 static int
1472 cxgbe_vi_attach(device_t dev, struct vi_info *vi)
1473 {
1474 	struct ifnet *ifp;
1475 	struct sbuf *sb;
1476 
1477 	vi->xact_addr_filt = -1;
1478 	callout_init(&vi->tick, 1);
1479 
1480 	/* Allocate an ifnet and set it up */
1481 	ifp = if_alloc(IFT_ETHER);
1482 	if (ifp == NULL) {
1483 		device_printf(dev, "Cannot allocate ifnet\n");
1484 		return (ENOMEM);
1485 	}
1486 	vi->ifp = ifp;
1487 	ifp->if_softc = vi;
1488 
1489 	if_initname(ifp, device_get_name(dev), device_get_unit(dev));
1490 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
1491 
1492 	ifp->if_init = cxgbe_init;
1493 	ifp->if_ioctl = cxgbe_ioctl;
1494 	ifp->if_transmit = cxgbe_transmit;
1495 	ifp->if_qflush = cxgbe_qflush;
1496 	ifp->if_get_counter = cxgbe_get_counter;
1497 #ifdef RATELIMIT
1498 	ifp->if_snd_tag_alloc = cxgbe_snd_tag_alloc;
1499 	ifp->if_snd_tag_modify = cxgbe_snd_tag_modify;
1500 	ifp->if_snd_tag_query = cxgbe_snd_tag_query;
1501 	ifp->if_snd_tag_free = cxgbe_snd_tag_free;
1502 #endif
1503 
1504 	ifp->if_capabilities = T4_CAP;
1505 #ifdef TCP_OFFLOAD
1506 	if (vi->nofldrxq != 0)
1507 		ifp->if_capabilities |= IFCAP_TOE;
1508 #endif
1509 #ifdef DEV_NETMAP
1510 	if (vi->nnmrxq != 0)
1511 		ifp->if_capabilities |= IFCAP_NETMAP;
1512 #endif
1513 #ifdef RATELIMIT
1514 	if (is_ethoffload(vi->pi->adapter) && vi->nofldtxq != 0)
1515 		ifp->if_capabilities |= IFCAP_TXRTLMT;
1516 #endif
1517 	ifp->if_capenable = T4_CAP_ENABLE;
1518 	ifp->if_hwassist = CSUM_TCP | CSUM_UDP | CSUM_IP | CSUM_TSO |
1519 	    CSUM_UDP_IPV6 | CSUM_TCP_IPV6;
1520 
1521 	ifp->if_hw_tsomax = 65536 - (ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN);
1522 	ifp->if_hw_tsomaxsegcount = TX_SGL_SEGS;
1523 	ifp->if_hw_tsomaxsegsize = 65536;
1524 
1525 	vi->vlan_c = EVENTHANDLER_REGISTER(vlan_config, cxgbe_vlan_config, ifp,
1526 	    EVENTHANDLER_PRI_ANY);
1527 
1528 	ether_ifattach(ifp, vi->hw_addr);
1529 #ifdef DEV_NETMAP
1530 	if (ifp->if_capabilities & IFCAP_NETMAP)
1531 		cxgbe_nm_attach(vi);
1532 #endif
1533 	sb = sbuf_new_auto();
1534 	sbuf_printf(sb, "%d txq, %d rxq (NIC)", vi->ntxq, vi->nrxq);
1535 #if defined(TCP_OFFLOAD) || defined(RATELIMIT)
1536 	switch (ifp->if_capabilities & (IFCAP_TOE | IFCAP_TXRTLMT)) {
1537 	case IFCAP_TOE:
1538 		sbuf_printf(sb, "; %d txq (TOE)", vi->nofldtxq);
1539 		break;
1540 	case IFCAP_TOE | IFCAP_TXRTLMT:
1541 		sbuf_printf(sb, "; %d txq (TOE/ETHOFLD)", vi->nofldtxq);
1542 		break;
1543 	case IFCAP_TXRTLMT:
1544 		sbuf_printf(sb, "; %d txq (ETHOFLD)", vi->nofldtxq);
1545 		break;
1546 	}
1547 #endif
1548 #ifdef TCP_OFFLOAD
1549 	if (ifp->if_capabilities & IFCAP_TOE)
1550 		sbuf_printf(sb, ", %d rxq (TOE)", vi->nofldrxq);
1551 #endif
1552 #ifdef DEV_NETMAP
1553 	if (ifp->if_capabilities & IFCAP_NETMAP)
1554 		sbuf_printf(sb, "; %d txq, %d rxq (netmap)",
1555 		    vi->nnmtxq, vi->nnmrxq);
1556 #endif
1557 	sbuf_finish(sb);
1558 	device_printf(dev, "%s\n", sbuf_data(sb));
1559 	sbuf_delete(sb);
1560 
1561 	vi_sysctls(vi);
1562 
1563 	return (0);
1564 }
1565 
1566 static int
1567 cxgbe_attach(device_t dev)
1568 {
1569 	struct port_info *pi = device_get_softc(dev);
1570 	struct adapter *sc = pi->adapter;
1571 	struct vi_info *vi;
1572 	int i, rc;
1573 
1574 	callout_init_mtx(&pi->tick, &pi->pi_lock, 0);
1575 
1576 	rc = cxgbe_vi_attach(dev, &pi->vi[0]);
1577 	if (rc)
1578 		return (rc);
1579 
1580 	for_each_vi(pi, i, vi) {
1581 		if (i == 0)
1582 			continue;
1583 		vi->dev = device_add_child(dev, sc->names->vi_ifnet_name, -1);
1584 		if (vi->dev == NULL) {
1585 			device_printf(dev, "failed to add VI %d\n", i);
1586 			continue;
1587 		}
1588 		device_set_softc(vi->dev, vi);
1589 	}
1590 
1591 	cxgbe_sysctls(pi);
1592 
1593 	bus_generic_attach(dev);
1594 
1595 	return (0);
1596 }
1597 
1598 static void
1599 cxgbe_vi_detach(struct vi_info *vi)
1600 {
1601 	struct ifnet *ifp = vi->ifp;
1602 
1603 	ether_ifdetach(ifp);
1604 
1605 	if (vi->vlan_c)
1606 		EVENTHANDLER_DEREGISTER(vlan_config, vi->vlan_c);
1607 
1608 	/* Let detach proceed even if these fail. */
1609 #ifdef DEV_NETMAP
1610 	if (ifp->if_capabilities & IFCAP_NETMAP)
1611 		cxgbe_nm_detach(vi);
1612 #endif
1613 	cxgbe_uninit_synchronized(vi);
1614 	callout_drain(&vi->tick);
1615 	vi_full_uninit(vi);
1616 
1617 	if_free(vi->ifp);
1618 	vi->ifp = NULL;
1619 }
1620 
1621 static int
1622 cxgbe_detach(device_t dev)
1623 {
1624 	struct port_info *pi = device_get_softc(dev);
1625 	struct adapter *sc = pi->adapter;
1626 	int rc;
1627 
1628 	/* Detach the extra VIs first. */
1629 	rc = bus_generic_detach(dev);
1630 	if (rc)
1631 		return (rc);
1632 	device_delete_children(dev);
1633 
1634 	doom_vi(sc, &pi->vi[0]);
1635 
1636 	if (pi->flags & HAS_TRACEQ) {
1637 		sc->traceq = -1;	/* cloner should not create ifnet */
1638 		t4_tracer_port_detach(sc);
1639 	}
1640 
1641 	cxgbe_vi_detach(&pi->vi[0]);
1642 	callout_drain(&pi->tick);
1643 	ifmedia_removeall(&pi->media);
1644 
1645 	end_synchronized_op(sc, 0);
1646 
1647 	return (0);
1648 }
1649 
1650 static void
1651 cxgbe_init(void *arg)
1652 {
1653 	struct vi_info *vi = arg;
1654 	struct adapter *sc = vi->pi->adapter;
1655 
1656 	if (begin_synchronized_op(sc, vi, SLEEP_OK | INTR_OK, "t4init") != 0)
1657 		return;
1658 	cxgbe_init_synchronized(vi);
1659 	end_synchronized_op(sc, 0);
1660 }
1661 
1662 static int
1663 cxgbe_ioctl(struct ifnet *ifp, unsigned long cmd, caddr_t data)
1664 {
1665 	int rc = 0, mtu, flags;
1666 	struct vi_info *vi = ifp->if_softc;
1667 	struct port_info *pi = vi->pi;
1668 	struct adapter *sc = pi->adapter;
1669 	struct ifreq *ifr = (struct ifreq *)data;
1670 	uint32_t mask;
1671 
1672 	switch (cmd) {
1673 	case SIOCSIFMTU:
1674 		mtu = ifr->ifr_mtu;
1675 		if (mtu < ETHERMIN || mtu > MAX_MTU)
1676 			return (EINVAL);
1677 
1678 		rc = begin_synchronized_op(sc, vi, SLEEP_OK | INTR_OK, "t4mtu");
1679 		if (rc)
1680 			return (rc);
1681 		ifp->if_mtu = mtu;
1682 		if (vi->flags & VI_INIT_DONE) {
1683 			t4_update_fl_bufsize(ifp);
1684 			if (ifp->if_drv_flags & IFF_DRV_RUNNING)
1685 				rc = update_mac_settings(ifp, XGMAC_MTU);
1686 		}
1687 		end_synchronized_op(sc, 0);
1688 		break;
1689 
1690 	case SIOCSIFFLAGS:
1691 		rc = begin_synchronized_op(sc, vi, SLEEP_OK | INTR_OK, "t4flg");
1692 		if (rc)
1693 			return (rc);
1694 
1695 		if (ifp->if_flags & IFF_UP) {
1696 			if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1697 				flags = vi->if_flags;
1698 				if ((ifp->if_flags ^ flags) &
1699 				    (IFF_PROMISC | IFF_ALLMULTI)) {
1700 					rc = update_mac_settings(ifp,
1701 					    XGMAC_PROMISC | XGMAC_ALLMULTI);
1702 				}
1703 			} else {
1704 				rc = cxgbe_init_synchronized(vi);
1705 			}
1706 			vi->if_flags = ifp->if_flags;
1707 		} else if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1708 			rc = cxgbe_uninit_synchronized(vi);
1709 		}
1710 		end_synchronized_op(sc, 0);
1711 		break;
1712 
1713 	case SIOCADDMULTI:
1714 	case SIOCDELMULTI:
1715 		rc = begin_synchronized_op(sc, vi, SLEEP_OK | INTR_OK, "t4multi");
1716 		if (rc)
1717 			return (rc);
1718 		if (ifp->if_drv_flags & IFF_DRV_RUNNING)
1719 			rc = update_mac_settings(ifp, XGMAC_MCADDRS);
1720 		end_synchronized_op(sc, 0);
1721 		break;
1722 
1723 	case SIOCSIFCAP:
1724 		rc = begin_synchronized_op(sc, vi, SLEEP_OK | INTR_OK, "t4cap");
1725 		if (rc)
1726 			return (rc);
1727 
1728 		mask = ifr->ifr_reqcap ^ ifp->if_capenable;
1729 		if (mask & IFCAP_TXCSUM) {
1730 			ifp->if_capenable ^= IFCAP_TXCSUM;
1731 			ifp->if_hwassist ^= (CSUM_TCP | CSUM_UDP | CSUM_IP);
1732 
1733 			if (IFCAP_TSO4 & ifp->if_capenable &&
1734 			    !(IFCAP_TXCSUM & ifp->if_capenable)) {
1735 				ifp->if_capenable &= ~IFCAP_TSO4;
1736 				if_printf(ifp,
1737 				    "tso4 disabled due to -txcsum.\n");
1738 			}
1739 		}
1740 		if (mask & IFCAP_TXCSUM_IPV6) {
1741 			ifp->if_capenable ^= IFCAP_TXCSUM_IPV6;
1742 			ifp->if_hwassist ^= (CSUM_UDP_IPV6 | CSUM_TCP_IPV6);
1743 
1744 			if (IFCAP_TSO6 & ifp->if_capenable &&
1745 			    !(IFCAP_TXCSUM_IPV6 & ifp->if_capenable)) {
1746 				ifp->if_capenable &= ~IFCAP_TSO6;
1747 				if_printf(ifp,
1748 				    "tso6 disabled due to -txcsum6.\n");
1749 			}
1750 		}
1751 		if (mask & IFCAP_RXCSUM)
1752 			ifp->if_capenable ^= IFCAP_RXCSUM;
1753 		if (mask & IFCAP_RXCSUM_IPV6)
1754 			ifp->if_capenable ^= IFCAP_RXCSUM_IPV6;
1755 
1756 		/*
1757 		 * Note that we leave CSUM_TSO alone (it is always set).  The
1758 		 * kernel takes both IFCAP_TSOx and CSUM_TSO into account before
1759 		 * sending a TSO request our way, so it's sufficient to toggle
1760 		 * IFCAP_TSOx only.
1761 		 */
1762 		if (mask & IFCAP_TSO4) {
1763 			if (!(IFCAP_TSO4 & ifp->if_capenable) &&
1764 			    !(IFCAP_TXCSUM & ifp->if_capenable)) {
1765 				if_printf(ifp, "enable txcsum first.\n");
1766 				rc = EAGAIN;
1767 				goto fail;
1768 			}
1769 			ifp->if_capenable ^= IFCAP_TSO4;
1770 		}
1771 		if (mask & IFCAP_TSO6) {
1772 			if (!(IFCAP_TSO6 & ifp->if_capenable) &&
1773 			    !(IFCAP_TXCSUM_IPV6 & ifp->if_capenable)) {
1774 				if_printf(ifp, "enable txcsum6 first.\n");
1775 				rc = EAGAIN;
1776 				goto fail;
1777 			}
1778 			ifp->if_capenable ^= IFCAP_TSO6;
1779 		}
1780 		if (mask & IFCAP_LRO) {
1781 #if defined(INET) || defined(INET6)
1782 			int i;
1783 			struct sge_rxq *rxq;
1784 
1785 			ifp->if_capenable ^= IFCAP_LRO;
1786 			for_each_rxq(vi, i, rxq) {
1787 				if (ifp->if_capenable & IFCAP_LRO)
1788 					rxq->iq.flags |= IQ_LRO_ENABLED;
1789 				else
1790 					rxq->iq.flags &= ~IQ_LRO_ENABLED;
1791 			}
1792 #endif
1793 		}
1794 #ifdef TCP_OFFLOAD
1795 		if (mask & IFCAP_TOE) {
1796 			int enable = (ifp->if_capenable ^ mask) & IFCAP_TOE;
1797 
1798 			rc = toe_capability(vi, enable);
1799 			if (rc != 0)
1800 				goto fail;
1801 
1802 			ifp->if_capenable ^= mask;
1803 		}
1804 #endif
1805 		if (mask & IFCAP_VLAN_HWTAGGING) {
1806 			ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING;
1807 			if (ifp->if_drv_flags & IFF_DRV_RUNNING)
1808 				rc = update_mac_settings(ifp, XGMAC_VLANEX);
1809 		}
1810 		if (mask & IFCAP_VLAN_MTU) {
1811 			ifp->if_capenable ^= IFCAP_VLAN_MTU;
1812 
1813 			/* Need to find out how to disable auto-mtu-inflation */
1814 		}
1815 		if (mask & IFCAP_VLAN_HWTSO)
1816 			ifp->if_capenable ^= IFCAP_VLAN_HWTSO;
1817 		if (mask & IFCAP_VLAN_HWCSUM)
1818 			ifp->if_capenable ^= IFCAP_VLAN_HWCSUM;
1819 #ifdef RATELIMIT
1820 		if (mask & IFCAP_TXRTLMT)
1821 			ifp->if_capenable ^= IFCAP_TXRTLMT;
1822 #endif
1823 
1824 #ifdef VLAN_CAPABILITIES
1825 		VLAN_CAPABILITIES(ifp);
1826 #endif
1827 fail:
1828 		end_synchronized_op(sc, 0);
1829 		break;
1830 
1831 	case SIOCSIFMEDIA:
1832 	case SIOCGIFMEDIA:
1833 	case SIOCGIFXMEDIA:
1834 		ifmedia_ioctl(ifp, ifr, &pi->media, cmd);
1835 		break;
1836 
1837 	case SIOCGI2C: {
1838 		struct ifi2creq i2c;
1839 
1840 		rc = copyin(ifr_data_get_ptr(ifr), &i2c, sizeof(i2c));
1841 		if (rc != 0)
1842 			break;
1843 		if (i2c.dev_addr != 0xA0 && i2c.dev_addr != 0xA2) {
1844 			rc = EPERM;
1845 			break;
1846 		}
1847 		if (i2c.len > sizeof(i2c.data)) {
1848 			rc = EINVAL;
1849 			break;
1850 		}
1851 		rc = begin_synchronized_op(sc, vi, SLEEP_OK | INTR_OK, "t4i2c");
1852 		if (rc)
1853 			return (rc);
1854 		rc = -t4_i2c_rd(sc, sc->mbox, pi->port_id, i2c.dev_addr,
1855 		    i2c.offset, i2c.len, &i2c.data[0]);
1856 		end_synchronized_op(sc, 0);
1857 		if (rc == 0)
1858 			rc = copyout(&i2c, ifr_data_get_ptr(ifr), sizeof(i2c));
1859 		break;
1860 	}
1861 
1862 	default:
1863 		rc = ether_ioctl(ifp, cmd, data);
1864 	}
1865 
1866 	return (rc);
1867 }
1868 
1869 static int
1870 cxgbe_transmit(struct ifnet *ifp, struct mbuf *m)
1871 {
1872 	struct vi_info *vi = ifp->if_softc;
1873 	struct port_info *pi = vi->pi;
1874 	struct adapter *sc = pi->adapter;
1875 	struct sge_txq *txq;
1876 	void *items[1];
1877 	int rc;
1878 
1879 	M_ASSERTPKTHDR(m);
1880 	MPASS(m->m_nextpkt == NULL);	/* not quite ready for this yet */
1881 
1882 	if (__predict_false(pi->link_cfg.link_ok == 0)) {
1883 		m_freem(m);
1884 		return (ENETDOWN);
1885 	}
1886 
1887 	rc = parse_pkt(sc, &m);
1888 	if (__predict_false(rc != 0)) {
1889 		MPASS(m == NULL);			/* was freed already */
1890 		atomic_add_int(&pi->tx_parse_error, 1);	/* rare, atomic is ok */
1891 		return (rc);
1892 	}
1893 #ifdef RATELIMIT
1894 	if (m->m_pkthdr.snd_tag != NULL) {
1895 		/* EAGAIN tells the stack we are not the correct interface. */
1896 		if (__predict_false(ifp != m->m_pkthdr.snd_tag->ifp)) {
1897 			m_freem(m);
1898 			return (EAGAIN);
1899 		}
1900 
1901 		return (ethofld_transmit(ifp, m));
1902 	}
1903 #endif
1904 
1905 	/* Select a txq. */
1906 	txq = &sc->sge.txq[vi->first_txq];
1907 	if (M_HASHTYPE_GET(m) != M_HASHTYPE_NONE)
1908 		txq += ((m->m_pkthdr.flowid % (vi->ntxq - vi->rsrv_noflowq)) +
1909 		    vi->rsrv_noflowq);
1910 
1911 	items[0] = m;
1912 	rc = mp_ring_enqueue(txq->r, items, 1, 4096);
1913 	if (__predict_false(rc != 0))
1914 		m_freem(m);
1915 
1916 	return (rc);
1917 }
1918 
1919 static void
1920 cxgbe_qflush(struct ifnet *ifp)
1921 {
1922 	struct vi_info *vi = ifp->if_softc;
1923 	struct sge_txq *txq;
1924 	int i;
1925 
1926 	/* queues do not exist if !VI_INIT_DONE. */
1927 	if (vi->flags & VI_INIT_DONE) {
1928 		for_each_txq(vi, i, txq) {
1929 			TXQ_LOCK(txq);
1930 			txq->eq.flags |= EQ_QFLUSH;
1931 			TXQ_UNLOCK(txq);
1932 			while (!mp_ring_is_idle(txq->r)) {
1933 				mp_ring_check_drainage(txq->r, 0);
1934 				pause("qflush", 1);
1935 			}
1936 			TXQ_LOCK(txq);
1937 			txq->eq.flags &= ~EQ_QFLUSH;
1938 			TXQ_UNLOCK(txq);
1939 		}
1940 	}
1941 	if_qflush(ifp);
1942 }
1943 
1944 static uint64_t
1945 vi_get_counter(struct ifnet *ifp, ift_counter c)
1946 {
1947 	struct vi_info *vi = ifp->if_softc;
1948 	struct fw_vi_stats_vf *s = &vi->stats;
1949 
1950 	vi_refresh_stats(vi->pi->adapter, vi);
1951 
1952 	switch (c) {
1953 	case IFCOUNTER_IPACKETS:
1954 		return (s->rx_bcast_frames + s->rx_mcast_frames +
1955 		    s->rx_ucast_frames);
1956 	case IFCOUNTER_IERRORS:
1957 		return (s->rx_err_frames);
1958 	case IFCOUNTER_OPACKETS:
1959 		return (s->tx_bcast_frames + s->tx_mcast_frames +
1960 		    s->tx_ucast_frames + s->tx_offload_frames);
1961 	case IFCOUNTER_OERRORS:
1962 		return (s->tx_drop_frames);
1963 	case IFCOUNTER_IBYTES:
1964 		return (s->rx_bcast_bytes + s->rx_mcast_bytes +
1965 		    s->rx_ucast_bytes);
1966 	case IFCOUNTER_OBYTES:
1967 		return (s->tx_bcast_bytes + s->tx_mcast_bytes +
1968 		    s->tx_ucast_bytes + s->tx_offload_bytes);
1969 	case IFCOUNTER_IMCASTS:
1970 		return (s->rx_mcast_frames);
1971 	case IFCOUNTER_OMCASTS:
1972 		return (s->tx_mcast_frames);
1973 	case IFCOUNTER_OQDROPS: {
1974 		uint64_t drops;
1975 
1976 		drops = 0;
1977 		if (vi->flags & VI_INIT_DONE) {
1978 			int i;
1979 			struct sge_txq *txq;
1980 
1981 			for_each_txq(vi, i, txq)
1982 				drops += counter_u64_fetch(txq->r->drops);
1983 		}
1984 
1985 		return (drops);
1986 
1987 	}
1988 
1989 	default:
1990 		return (if_get_counter_default(ifp, c));
1991 	}
1992 }
1993 
1994 uint64_t
1995 cxgbe_get_counter(struct ifnet *ifp, ift_counter c)
1996 {
1997 	struct vi_info *vi = ifp->if_softc;
1998 	struct port_info *pi = vi->pi;
1999 	struct adapter *sc = pi->adapter;
2000 	struct port_stats *s = &pi->stats;
2001 
2002 	if (pi->nvi > 1 || sc->flags & IS_VF)
2003 		return (vi_get_counter(ifp, c));
2004 
2005 	cxgbe_refresh_stats(sc, pi);
2006 
2007 	switch (c) {
2008 	case IFCOUNTER_IPACKETS:
2009 		return (s->rx_frames);
2010 
2011 	case IFCOUNTER_IERRORS:
2012 		return (s->rx_jabber + s->rx_runt + s->rx_too_long +
2013 		    s->rx_fcs_err + s->rx_len_err);
2014 
2015 	case IFCOUNTER_OPACKETS:
2016 		return (s->tx_frames);
2017 
2018 	case IFCOUNTER_OERRORS:
2019 		return (s->tx_error_frames);
2020 
2021 	case IFCOUNTER_IBYTES:
2022 		return (s->rx_octets);
2023 
2024 	case IFCOUNTER_OBYTES:
2025 		return (s->tx_octets);
2026 
2027 	case IFCOUNTER_IMCASTS:
2028 		return (s->rx_mcast_frames);
2029 
2030 	case IFCOUNTER_OMCASTS:
2031 		return (s->tx_mcast_frames);
2032 
2033 	case IFCOUNTER_IQDROPS:
2034 		return (s->rx_ovflow0 + s->rx_ovflow1 + s->rx_ovflow2 +
2035 		    s->rx_ovflow3 + s->rx_trunc0 + s->rx_trunc1 + s->rx_trunc2 +
2036 		    s->rx_trunc3 + pi->tnl_cong_drops);
2037 
2038 	case IFCOUNTER_OQDROPS: {
2039 		uint64_t drops;
2040 
2041 		drops = s->tx_drop;
2042 		if (vi->flags & VI_INIT_DONE) {
2043 			int i;
2044 			struct sge_txq *txq;
2045 
2046 			for_each_txq(vi, i, txq)
2047 				drops += counter_u64_fetch(txq->r->drops);
2048 		}
2049 
2050 		return (drops);
2051 
2052 	}
2053 
2054 	default:
2055 		return (if_get_counter_default(ifp, c));
2056 	}
2057 }
2058 
2059 /*
2060  * The kernel picks a media from the list we had provided so we do not have to
2061  * validate the request.
2062  */
2063 static int
2064 cxgbe_media_change(struct ifnet *ifp)
2065 {
2066 	struct vi_info *vi = ifp->if_softc;
2067 	struct port_info *pi = vi->pi;
2068 	struct ifmedia *ifm = &pi->media;
2069 	struct link_config *lc = &pi->link_cfg;
2070 	struct adapter *sc = pi->adapter;
2071 	int rc;
2072 
2073 	rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4mec");
2074 	if (rc != 0)
2075 		return (rc);
2076 	PORT_LOCK(pi);
2077 	if (IFM_SUBTYPE(ifm->ifm_media) == IFM_AUTO) {
2078 		MPASS(lc->supported & FW_PORT_CAP_ANEG);
2079 		lc->requested_aneg = AUTONEG_ENABLE;
2080 	} else {
2081 		lc->requested_aneg = AUTONEG_DISABLE;
2082 		lc->requested_speed =
2083 		    ifmedia_baudrate(ifm->ifm_media) / 1000000;
2084 		lc->requested_fc = 0;
2085 		if (IFM_OPTIONS(ifm->ifm_media) & IFM_ETH_RXPAUSE)
2086 			lc->requested_fc |= PAUSE_RX;
2087 		if (IFM_OPTIONS(ifm->ifm_media) & IFM_ETH_TXPAUSE)
2088 			lc->requested_fc |= PAUSE_TX;
2089 	}
2090 	if (pi->up_vis > 0)
2091 		rc = apply_l1cfg(pi);
2092 	PORT_UNLOCK(pi);
2093 	end_synchronized_op(sc, 0);
2094 	return (rc);
2095 }
2096 
2097 /*
2098  * Mbps to FW_PORT_CAP_SPEED_* bit.
2099  */
2100 static uint16_t
2101 speed_to_fwspeed(int speed)
2102 {
2103 
2104 	switch (speed) {
2105 	case 100000:
2106 		return (FW_PORT_CAP_SPEED_100G);
2107 	case 40000:
2108 		return (FW_PORT_CAP_SPEED_40G);
2109 	case 25000:
2110 		return (FW_PORT_CAP_SPEED_25G);
2111 	case 10000:
2112 		return (FW_PORT_CAP_SPEED_10G);
2113 	case 1000:
2114 		return (FW_PORT_CAP_SPEED_1G);
2115 	case 100:
2116 		return (FW_PORT_CAP_SPEED_100M);
2117 	}
2118 
2119 	return (0);
2120 }
2121 
2122 /*
2123  * Base media word (without ETHER, pause, link active, etc.) for the port at the
2124  * given speed.
2125  */
2126 static int
2127 port_mword(struct port_info *pi, uint16_t speed)
2128 {
2129 
2130 	MPASS(speed & M_FW_PORT_CAP_SPEED);
2131 	MPASS(powerof2(speed));
2132 
2133 	switch(pi->port_type) {
2134 	case FW_PORT_TYPE_BT_SGMII:
2135 	case FW_PORT_TYPE_BT_XFI:
2136 	case FW_PORT_TYPE_BT_XAUI:
2137 		/* BaseT */
2138 		switch (speed) {
2139 		case FW_PORT_CAP_SPEED_100M:
2140 			return (IFM_100_T);
2141 		case FW_PORT_CAP_SPEED_1G:
2142 			return (IFM_1000_T);
2143 		case FW_PORT_CAP_SPEED_10G:
2144 			return (IFM_10G_T);
2145 		}
2146 		break;
2147 	case FW_PORT_TYPE_KX4:
2148 		if (speed == FW_PORT_CAP_SPEED_10G)
2149 			return (IFM_10G_KX4);
2150 		break;
2151 	case FW_PORT_TYPE_CX4:
2152 		if (speed == FW_PORT_CAP_SPEED_10G)
2153 			return (IFM_10G_CX4);
2154 		break;
2155 	case FW_PORT_TYPE_KX:
2156 		if (speed == FW_PORT_CAP_SPEED_1G)
2157 			return (IFM_1000_KX);
2158 		break;
2159 	case FW_PORT_TYPE_KR:
2160 	case FW_PORT_TYPE_BP_AP:
2161 	case FW_PORT_TYPE_BP4_AP:
2162 	case FW_PORT_TYPE_BP40_BA:
2163 	case FW_PORT_TYPE_KR4_100G:
2164 	case FW_PORT_TYPE_KR_SFP28:
2165 	case FW_PORT_TYPE_KR_XLAUI:
2166 		switch (speed) {
2167 		case FW_PORT_CAP_SPEED_1G:
2168 			return (IFM_1000_KX);
2169 		case FW_PORT_CAP_SPEED_10G:
2170 			return (IFM_10G_KR);
2171 		case FW_PORT_CAP_SPEED_25G:
2172 			return (IFM_25G_KR);
2173 		case FW_PORT_CAP_SPEED_40G:
2174 			return (IFM_40G_KR4);
2175 		case FW_PORT_CAP_SPEED_100G:
2176 			return (IFM_100G_KR4);
2177 		}
2178 		break;
2179 	case FW_PORT_TYPE_FIBER_XFI:
2180 	case FW_PORT_TYPE_FIBER_XAUI:
2181 	case FW_PORT_TYPE_SFP:
2182 	case FW_PORT_TYPE_QSFP_10G:
2183 	case FW_PORT_TYPE_QSA:
2184 	case FW_PORT_TYPE_QSFP:
2185 	case FW_PORT_TYPE_CR4_QSFP:
2186 	case FW_PORT_TYPE_CR_QSFP:
2187 	case FW_PORT_TYPE_CR2_QSFP:
2188 	case FW_PORT_TYPE_SFP28:
2189 		/* Pluggable transceiver */
2190 		switch (pi->mod_type) {
2191 		case FW_PORT_MOD_TYPE_LR:
2192 			switch (speed) {
2193 			case FW_PORT_CAP_SPEED_1G:
2194 				return (IFM_1000_LX);
2195 			case FW_PORT_CAP_SPEED_10G:
2196 				return (IFM_10G_LR);
2197 			case FW_PORT_CAP_SPEED_25G:
2198 				return (IFM_25G_LR);
2199 			case FW_PORT_CAP_SPEED_40G:
2200 				return (IFM_40G_LR4);
2201 			case FW_PORT_CAP_SPEED_100G:
2202 				return (IFM_100G_LR4);
2203 			}
2204 			break;
2205 		case FW_PORT_MOD_TYPE_SR:
2206 			switch (speed) {
2207 			case FW_PORT_CAP_SPEED_1G:
2208 				return (IFM_1000_SX);
2209 			case FW_PORT_CAP_SPEED_10G:
2210 				return (IFM_10G_SR);
2211 			case FW_PORT_CAP_SPEED_25G:
2212 				return (IFM_25G_SR);
2213 			case FW_PORT_CAP_SPEED_40G:
2214 				return (IFM_40G_SR4);
2215 			case FW_PORT_CAP_SPEED_100G:
2216 				return (IFM_100G_SR4);
2217 			}
2218 			break;
2219 		case FW_PORT_MOD_TYPE_ER:
2220 			if (speed == FW_PORT_CAP_SPEED_10G)
2221 				return (IFM_10G_ER);
2222 			break;
2223 		case FW_PORT_MOD_TYPE_TWINAX_PASSIVE:
2224 		case FW_PORT_MOD_TYPE_TWINAX_ACTIVE:
2225 			switch (speed) {
2226 			case FW_PORT_CAP_SPEED_1G:
2227 				return (IFM_1000_CX);
2228 			case FW_PORT_CAP_SPEED_10G:
2229 				return (IFM_10G_TWINAX);
2230 			case FW_PORT_CAP_SPEED_25G:
2231 				return (IFM_25G_CR);
2232 			case FW_PORT_CAP_SPEED_40G:
2233 				return (IFM_40G_CR4);
2234 			case FW_PORT_CAP_SPEED_100G:
2235 				return (IFM_100G_CR4);
2236 			}
2237 			break;
2238 		case FW_PORT_MOD_TYPE_LRM:
2239 			if (speed == FW_PORT_CAP_SPEED_10G)
2240 				return (IFM_10G_LRM);
2241 			break;
2242 		case FW_PORT_MOD_TYPE_NA:
2243 			MPASS(0);	/* Not pluggable? */
2244 			/* fall throough */
2245 		case FW_PORT_MOD_TYPE_ERROR:
2246 		case FW_PORT_MOD_TYPE_UNKNOWN:
2247 		case FW_PORT_MOD_TYPE_NOTSUPPORTED:
2248 			break;
2249 		case FW_PORT_MOD_TYPE_NONE:
2250 			return (IFM_NONE);
2251 		}
2252 		break;
2253 	case FW_PORT_TYPE_NONE:
2254 		return (IFM_NONE);
2255 	}
2256 
2257 	return (IFM_UNKNOWN);
2258 }
2259 
2260 static void
2261 cxgbe_media_status(struct ifnet *ifp, struct ifmediareq *ifmr)
2262 {
2263 	struct vi_info *vi = ifp->if_softc;
2264 	struct port_info *pi = vi->pi;
2265 	struct adapter *sc = pi->adapter;
2266 	struct link_config *lc = &pi->link_cfg;
2267 
2268 	if (begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4med") != 0)
2269 		return;
2270 	PORT_LOCK(pi);
2271 
2272 	if (pi->up_vis == 0) {
2273 		/*
2274 		 * If all the interfaces are administratively down the firmware
2275 		 * does not report transceiver changes.  Refresh port info here
2276 		 * so that ifconfig displays accurate ifmedia at all times.
2277 		 * This is the only reason we have a synchronized op in this
2278 		 * function.  Just PORT_LOCK would have been enough otherwise.
2279 		 */
2280 		t4_update_port_info(pi);
2281 		build_medialist(pi, &pi->media);
2282 	}
2283 
2284 	/* ifm_status */
2285 	ifmr->ifm_status = IFM_AVALID;
2286 	if (lc->link_ok == 0)
2287 		goto done;
2288 	ifmr->ifm_status |= IFM_ACTIVE;
2289 
2290 	/* ifm_active */
2291 	ifmr->ifm_active = IFM_ETHER | IFM_FDX;
2292 	ifmr->ifm_active &= ~(IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE);
2293 	if (lc->fc & PAUSE_RX)
2294 		ifmr->ifm_active |= IFM_ETH_RXPAUSE;
2295 	if (lc->fc & PAUSE_TX)
2296 		ifmr->ifm_active |= IFM_ETH_TXPAUSE;
2297 	ifmr->ifm_active |= port_mword(pi, speed_to_fwspeed(lc->speed));
2298 done:
2299 	PORT_UNLOCK(pi);
2300 	end_synchronized_op(sc, 0);
2301 }
2302 
2303 static int
2304 vcxgbe_probe(device_t dev)
2305 {
2306 	char buf[128];
2307 	struct vi_info *vi = device_get_softc(dev);
2308 
2309 	snprintf(buf, sizeof(buf), "port %d vi %td", vi->pi->port_id,
2310 	    vi - vi->pi->vi);
2311 	device_set_desc_copy(dev, buf);
2312 
2313 	return (BUS_PROBE_DEFAULT);
2314 }
2315 
2316 static int
2317 alloc_extra_vi(struct adapter *sc, struct port_info *pi, struct vi_info *vi)
2318 {
2319 	int func, index, rc;
2320 	uint32_t param, val;
2321 
2322 	ASSERT_SYNCHRONIZED_OP(sc);
2323 
2324 	index = vi - pi->vi;
2325 	MPASS(index > 0);	/* This function deals with _extra_ VIs only */
2326 	KASSERT(index < nitems(vi_mac_funcs),
2327 	    ("%s: VI %s doesn't have a MAC func", __func__,
2328 	    device_get_nameunit(vi->dev)));
2329 	func = vi_mac_funcs[index];
2330 	rc = t4_alloc_vi_func(sc, sc->mbox, pi->tx_chan, sc->pf, 0, 1,
2331 	    vi->hw_addr, &vi->rss_size, func, 0);
2332 	if (rc < 0) {
2333 		device_printf(vi->dev, "failed to allocate virtual interface %d"
2334 		    "for port %d: %d\n", index, pi->port_id, -rc);
2335 		return (-rc);
2336 	}
2337 	vi->viid = rc;
2338 	if (chip_id(sc) <= CHELSIO_T5)
2339 		vi->smt_idx = (rc & 0x7f) << 1;
2340 	else
2341 		vi->smt_idx = (rc & 0x7f);
2342 
2343 	if (vi->rss_size == 1) {
2344 		/*
2345 		 * This VI didn't get a slice of the RSS table.  Reduce the
2346 		 * number of VIs being created (hw.cxgbe.num_vis) or modify the
2347 		 * configuration file (nvi, rssnvi for this PF) if this is a
2348 		 * problem.
2349 		 */
2350 		device_printf(vi->dev, "RSS table not available.\n");
2351 		vi->rss_base = 0xffff;
2352 
2353 		return (0);
2354 	}
2355 
2356 	param = V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) |
2357 	    V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_RSSINFO) |
2358 	    V_FW_PARAMS_PARAM_YZ(vi->viid);
2359 	rc = t4_query_params(sc, sc->mbox, sc->pf, 0, 1, &param, &val);
2360 	if (rc)
2361 		vi->rss_base = 0xffff;
2362 	else {
2363 		MPASS((val >> 16) == vi->rss_size);
2364 		vi->rss_base = val & 0xffff;
2365 	}
2366 
2367 	return (0);
2368 }
2369 
2370 static int
2371 vcxgbe_attach(device_t dev)
2372 {
2373 	struct vi_info *vi;
2374 	struct port_info *pi;
2375 	struct adapter *sc;
2376 	int rc;
2377 
2378 	vi = device_get_softc(dev);
2379 	pi = vi->pi;
2380 	sc = pi->adapter;
2381 
2382 	rc = begin_synchronized_op(sc, vi, SLEEP_OK | INTR_OK, "t4via");
2383 	if (rc)
2384 		return (rc);
2385 	rc = alloc_extra_vi(sc, pi, vi);
2386 	end_synchronized_op(sc, 0);
2387 	if (rc)
2388 		return (rc);
2389 
2390 	rc = cxgbe_vi_attach(dev, vi);
2391 	if (rc) {
2392 		t4_free_vi(sc, sc->mbox, sc->pf, 0, vi->viid);
2393 		return (rc);
2394 	}
2395 	return (0);
2396 }
2397 
2398 static int
2399 vcxgbe_detach(device_t dev)
2400 {
2401 	struct vi_info *vi;
2402 	struct adapter *sc;
2403 
2404 	vi = device_get_softc(dev);
2405 	sc = vi->pi->adapter;
2406 
2407 	doom_vi(sc, vi);
2408 
2409 	cxgbe_vi_detach(vi);
2410 	t4_free_vi(sc, sc->mbox, sc->pf, 0, vi->viid);
2411 
2412 	end_synchronized_op(sc, 0);
2413 
2414 	return (0);
2415 }
2416 
2417 void
2418 t4_fatal_err(struct adapter *sc)
2419 {
2420 	t4_set_reg_field(sc, A_SGE_CONTROL, F_GLOBALENABLE, 0);
2421 	t4_intr_disable(sc);
2422 	log(LOG_EMERG, "%s: encountered fatal error, adapter stopped.\n",
2423 	    device_get_nameunit(sc->dev));
2424 	if (t4_panic_on_fatal_err)
2425 		panic("panic requested on fatal error");
2426 }
2427 
2428 void
2429 t4_add_adapter(struct adapter *sc)
2430 {
2431 	sx_xlock(&t4_list_lock);
2432 	SLIST_INSERT_HEAD(&t4_list, sc, link);
2433 	sx_xunlock(&t4_list_lock);
2434 }
2435 
2436 int
2437 t4_map_bars_0_and_4(struct adapter *sc)
2438 {
2439 	sc->regs_rid = PCIR_BAR(0);
2440 	sc->regs_res = bus_alloc_resource_any(sc->dev, SYS_RES_MEMORY,
2441 	    &sc->regs_rid, RF_ACTIVE);
2442 	if (sc->regs_res == NULL) {
2443 		device_printf(sc->dev, "cannot map registers.\n");
2444 		return (ENXIO);
2445 	}
2446 	sc->bt = rman_get_bustag(sc->regs_res);
2447 	sc->bh = rman_get_bushandle(sc->regs_res);
2448 	sc->mmio_len = rman_get_size(sc->regs_res);
2449 	setbit(&sc->doorbells, DOORBELL_KDB);
2450 
2451 	sc->msix_rid = PCIR_BAR(4);
2452 	sc->msix_res = bus_alloc_resource_any(sc->dev, SYS_RES_MEMORY,
2453 	    &sc->msix_rid, RF_ACTIVE);
2454 	if (sc->msix_res == NULL) {
2455 		device_printf(sc->dev, "cannot map MSI-X BAR.\n");
2456 		return (ENXIO);
2457 	}
2458 
2459 	return (0);
2460 }
2461 
2462 int
2463 t4_map_bar_2(struct adapter *sc)
2464 {
2465 
2466 	/*
2467 	 * T4: only iWARP driver uses the userspace doorbells.  There is no need
2468 	 * to map it if RDMA is disabled.
2469 	 */
2470 	if (is_t4(sc) && sc->rdmacaps == 0)
2471 		return (0);
2472 
2473 	sc->udbs_rid = PCIR_BAR(2);
2474 	sc->udbs_res = bus_alloc_resource_any(sc->dev, SYS_RES_MEMORY,
2475 	    &sc->udbs_rid, RF_ACTIVE);
2476 	if (sc->udbs_res == NULL) {
2477 		device_printf(sc->dev, "cannot map doorbell BAR.\n");
2478 		return (ENXIO);
2479 	}
2480 	sc->udbs_base = rman_get_virtual(sc->udbs_res);
2481 
2482 	if (chip_id(sc) >= CHELSIO_T5) {
2483 		setbit(&sc->doorbells, DOORBELL_UDB);
2484 #if defined(__i386__) || defined(__amd64__)
2485 		if (t5_write_combine) {
2486 			int rc, mode;
2487 
2488 			/*
2489 			 * Enable write combining on BAR2.  This is the
2490 			 * userspace doorbell BAR and is split into 128B
2491 			 * (UDBS_SEG_SIZE) doorbell regions, each associated
2492 			 * with an egress queue.  The first 64B has the doorbell
2493 			 * and the second 64B can be used to submit a tx work
2494 			 * request with an implicit doorbell.
2495 			 */
2496 
2497 			rc = pmap_change_attr((vm_offset_t)sc->udbs_base,
2498 			    rman_get_size(sc->udbs_res), PAT_WRITE_COMBINING);
2499 			if (rc == 0) {
2500 				clrbit(&sc->doorbells, DOORBELL_UDB);
2501 				setbit(&sc->doorbells, DOORBELL_WCWR);
2502 				setbit(&sc->doorbells, DOORBELL_UDBWC);
2503 			} else {
2504 				device_printf(sc->dev,
2505 				    "couldn't enable write combining: %d\n",
2506 				    rc);
2507 			}
2508 
2509 			mode = is_t5(sc) ? V_STATMODE(0) : V_T6_STATMODE(0);
2510 			t4_write_reg(sc, A_SGE_STAT_CFG,
2511 			    V_STATSOURCE_T5(7) | mode);
2512 		}
2513 #endif
2514 	}
2515 	sc->iwt.wc_en = isset(&sc->doorbells, DOORBELL_UDBWC) ? 1 : 0;
2516 
2517 	return (0);
2518 }
2519 
2520 struct memwin_init {
2521 	uint32_t base;
2522 	uint32_t aperture;
2523 };
2524 
2525 static const struct memwin_init t4_memwin[NUM_MEMWIN] = {
2526 	{ MEMWIN0_BASE, MEMWIN0_APERTURE },
2527 	{ MEMWIN1_BASE, MEMWIN1_APERTURE },
2528 	{ MEMWIN2_BASE_T4, MEMWIN2_APERTURE_T4 }
2529 };
2530 
2531 static const struct memwin_init t5_memwin[NUM_MEMWIN] = {
2532 	{ MEMWIN0_BASE, MEMWIN0_APERTURE },
2533 	{ MEMWIN1_BASE, MEMWIN1_APERTURE },
2534 	{ MEMWIN2_BASE_T5, MEMWIN2_APERTURE_T5 },
2535 };
2536 
2537 static void
2538 setup_memwin(struct adapter *sc)
2539 {
2540 	const struct memwin_init *mw_init;
2541 	struct memwin *mw;
2542 	int i;
2543 	uint32_t bar0;
2544 
2545 	if (is_t4(sc)) {
2546 		/*
2547 		 * Read low 32b of bar0 indirectly via the hardware backdoor
2548 		 * mechanism.  Works from within PCI passthrough environments
2549 		 * too, where rman_get_start() can return a different value.  We
2550 		 * need to program the T4 memory window decoders with the actual
2551 		 * addresses that will be coming across the PCIe link.
2552 		 */
2553 		bar0 = t4_hw_pci_read_cfg4(sc, PCIR_BAR(0));
2554 		bar0 &= (uint32_t) PCIM_BAR_MEM_BASE;
2555 
2556 		mw_init = &t4_memwin[0];
2557 	} else {
2558 		/* T5+ use the relative offset inside the PCIe BAR */
2559 		bar0 = 0;
2560 
2561 		mw_init = &t5_memwin[0];
2562 	}
2563 
2564 	for (i = 0, mw = &sc->memwin[0]; i < NUM_MEMWIN; i++, mw_init++, mw++) {
2565 		rw_init(&mw->mw_lock, "memory window access");
2566 		mw->mw_base = mw_init->base;
2567 		mw->mw_aperture = mw_init->aperture;
2568 		mw->mw_curpos = 0;
2569 		t4_write_reg(sc,
2570 		    PCIE_MEM_ACCESS_REG(A_PCIE_MEM_ACCESS_BASE_WIN, i),
2571 		    (mw->mw_base + bar0) | V_BIR(0) |
2572 		    V_WINDOW(ilog2(mw->mw_aperture) - 10));
2573 		rw_wlock(&mw->mw_lock);
2574 		position_memwin(sc, i, 0);
2575 		rw_wunlock(&mw->mw_lock);
2576 	}
2577 
2578 	/* flush */
2579 	t4_read_reg(sc, PCIE_MEM_ACCESS_REG(A_PCIE_MEM_ACCESS_BASE_WIN, 2));
2580 }
2581 
2582 /*
2583  * Positions the memory window at the given address in the card's address space.
2584  * There are some alignment requirements and the actual position may be at an
2585  * address prior to the requested address.  mw->mw_curpos always has the actual
2586  * position of the window.
2587  */
2588 static void
2589 position_memwin(struct adapter *sc, int idx, uint32_t addr)
2590 {
2591 	struct memwin *mw;
2592 	uint32_t pf;
2593 	uint32_t reg;
2594 
2595 	MPASS(idx >= 0 && idx < NUM_MEMWIN);
2596 	mw = &sc->memwin[idx];
2597 	rw_assert(&mw->mw_lock, RA_WLOCKED);
2598 
2599 	if (is_t4(sc)) {
2600 		pf = 0;
2601 		mw->mw_curpos = addr & ~0xf;	/* start must be 16B aligned */
2602 	} else {
2603 		pf = V_PFNUM(sc->pf);
2604 		mw->mw_curpos = addr & ~0x7f;	/* start must be 128B aligned */
2605 	}
2606 	reg = PCIE_MEM_ACCESS_REG(A_PCIE_MEM_ACCESS_OFFSET, idx);
2607 	t4_write_reg(sc, reg, mw->mw_curpos | pf);
2608 	t4_read_reg(sc, reg);	/* flush */
2609 }
2610 
2611 int
2612 rw_via_memwin(struct adapter *sc, int idx, uint32_t addr, uint32_t *val,
2613     int len, int rw)
2614 {
2615 	struct memwin *mw;
2616 	uint32_t mw_end, v;
2617 
2618 	MPASS(idx >= 0 && idx < NUM_MEMWIN);
2619 
2620 	/* Memory can only be accessed in naturally aligned 4 byte units */
2621 	if (addr & 3 || len & 3 || len <= 0)
2622 		return (EINVAL);
2623 
2624 	mw = &sc->memwin[idx];
2625 	while (len > 0) {
2626 		rw_rlock(&mw->mw_lock);
2627 		mw_end = mw->mw_curpos + mw->mw_aperture;
2628 		if (addr >= mw_end || addr < mw->mw_curpos) {
2629 			/* Will need to reposition the window */
2630 			if (!rw_try_upgrade(&mw->mw_lock)) {
2631 				rw_runlock(&mw->mw_lock);
2632 				rw_wlock(&mw->mw_lock);
2633 			}
2634 			rw_assert(&mw->mw_lock, RA_WLOCKED);
2635 			position_memwin(sc, idx, addr);
2636 			rw_downgrade(&mw->mw_lock);
2637 			mw_end = mw->mw_curpos + mw->mw_aperture;
2638 		}
2639 		rw_assert(&mw->mw_lock, RA_RLOCKED);
2640 		while (addr < mw_end && len > 0) {
2641 			if (rw == 0) {
2642 				v = t4_read_reg(sc, mw->mw_base + addr -
2643 				    mw->mw_curpos);
2644 				*val++ = le32toh(v);
2645 			} else {
2646 				v = *val++;
2647 				t4_write_reg(sc, mw->mw_base + addr -
2648 				    mw->mw_curpos, htole32(v));
2649 			}
2650 			addr += 4;
2651 			len -= 4;
2652 		}
2653 		rw_runlock(&mw->mw_lock);
2654 	}
2655 
2656 	return (0);
2657 }
2658 
2659 int
2660 alloc_atid_tab(struct tid_info *t, int flags)
2661 {
2662 	int i;
2663 
2664 	MPASS(t->natids > 0);
2665 	MPASS(t->atid_tab == NULL);
2666 
2667 	t->atid_tab = malloc(t->natids * sizeof(*t->atid_tab), M_CXGBE,
2668 	    M_ZERO | flags);
2669 	if (t->atid_tab == NULL)
2670 		return (ENOMEM);
2671 	mtx_init(&t->atid_lock, "atid lock", NULL, MTX_DEF);
2672 	t->afree = t->atid_tab;
2673 	t->atids_in_use = 0;
2674 	for (i = 1; i < t->natids; i++)
2675 		t->atid_tab[i - 1].next = &t->atid_tab[i];
2676 	t->atid_tab[t->natids - 1].next = NULL;
2677 
2678 	return (0);
2679 }
2680 
2681 void
2682 free_atid_tab(struct tid_info *t)
2683 {
2684 
2685 	KASSERT(t->atids_in_use == 0,
2686 	    ("%s: %d atids still in use.", __func__, t->atids_in_use));
2687 
2688 	if (mtx_initialized(&t->atid_lock))
2689 		mtx_destroy(&t->atid_lock);
2690 	free(t->atid_tab, M_CXGBE);
2691 	t->atid_tab = NULL;
2692 }
2693 
2694 int
2695 alloc_atid(struct adapter *sc, void *ctx)
2696 {
2697 	struct tid_info *t = &sc->tids;
2698 	int atid = -1;
2699 
2700 	mtx_lock(&t->atid_lock);
2701 	if (t->afree) {
2702 		union aopen_entry *p = t->afree;
2703 
2704 		atid = p - t->atid_tab;
2705 		MPASS(atid <= M_TID_TID);
2706 		t->afree = p->next;
2707 		p->data = ctx;
2708 		t->atids_in_use++;
2709 	}
2710 	mtx_unlock(&t->atid_lock);
2711 	return (atid);
2712 }
2713 
2714 void *
2715 lookup_atid(struct adapter *sc, int atid)
2716 {
2717 	struct tid_info *t = &sc->tids;
2718 
2719 	return (t->atid_tab[atid].data);
2720 }
2721 
2722 void
2723 free_atid(struct adapter *sc, int atid)
2724 {
2725 	struct tid_info *t = &sc->tids;
2726 	union aopen_entry *p = &t->atid_tab[atid];
2727 
2728 	mtx_lock(&t->atid_lock);
2729 	p->next = t->afree;
2730 	t->afree = p;
2731 	t->atids_in_use--;
2732 	mtx_unlock(&t->atid_lock);
2733 }
2734 
2735 static void
2736 queue_tid_release(struct adapter *sc, int tid)
2737 {
2738 
2739 	CXGBE_UNIMPLEMENTED("deferred tid release");
2740 }
2741 
2742 void
2743 release_tid(struct adapter *sc, int tid, struct sge_wrq *ctrlq)
2744 {
2745 	struct wrqe *wr;
2746 	struct cpl_tid_release *req;
2747 
2748 	wr = alloc_wrqe(sizeof(*req), ctrlq);
2749 	if (wr == NULL) {
2750 		queue_tid_release(sc, tid);	/* defer */
2751 		return;
2752 	}
2753 	req = wrtod(wr);
2754 
2755 	INIT_TP_WR_MIT_CPL(req, CPL_TID_RELEASE, tid);
2756 
2757 	t4_wrq_tx(sc, wr);
2758 }
2759 
2760 static int
2761 t4_range_cmp(const void *a, const void *b)
2762 {
2763 	return ((const struct t4_range *)a)->start -
2764 	       ((const struct t4_range *)b)->start;
2765 }
2766 
2767 /*
2768  * Verify that the memory range specified by the addr/len pair is valid within
2769  * the card's address space.
2770  */
2771 static int
2772 validate_mem_range(struct adapter *sc, uint32_t addr, int len)
2773 {
2774 	struct t4_range mem_ranges[4], *r, *next;
2775 	uint32_t em, addr_len;
2776 	int i, n, remaining;
2777 
2778 	/* Memory can only be accessed in naturally aligned 4 byte units */
2779 	if (addr & 3 || len & 3 || len <= 0)
2780 		return (EINVAL);
2781 
2782 	/* Enabled memories */
2783 	em = t4_read_reg(sc, A_MA_TARGET_MEM_ENABLE);
2784 
2785 	r = &mem_ranges[0];
2786 	n = 0;
2787 	bzero(r, sizeof(mem_ranges));
2788 	if (em & F_EDRAM0_ENABLE) {
2789 		addr_len = t4_read_reg(sc, A_MA_EDRAM0_BAR);
2790 		r->size = G_EDRAM0_SIZE(addr_len) << 20;
2791 		if (r->size > 0) {
2792 			r->start = G_EDRAM0_BASE(addr_len) << 20;
2793 			if (addr >= r->start &&
2794 			    addr + len <= r->start + r->size)
2795 				return (0);
2796 			r++;
2797 			n++;
2798 		}
2799 	}
2800 	if (em & F_EDRAM1_ENABLE) {
2801 		addr_len = t4_read_reg(sc, A_MA_EDRAM1_BAR);
2802 		r->size = G_EDRAM1_SIZE(addr_len) << 20;
2803 		if (r->size > 0) {
2804 			r->start = G_EDRAM1_BASE(addr_len) << 20;
2805 			if (addr >= r->start &&
2806 			    addr + len <= r->start + r->size)
2807 				return (0);
2808 			r++;
2809 			n++;
2810 		}
2811 	}
2812 	if (em & F_EXT_MEM_ENABLE) {
2813 		addr_len = t4_read_reg(sc, A_MA_EXT_MEMORY_BAR);
2814 		r->size = G_EXT_MEM_SIZE(addr_len) << 20;
2815 		if (r->size > 0) {
2816 			r->start = G_EXT_MEM_BASE(addr_len) << 20;
2817 			if (addr >= r->start &&
2818 			    addr + len <= r->start + r->size)
2819 				return (0);
2820 			r++;
2821 			n++;
2822 		}
2823 	}
2824 	if (is_t5(sc) && em & F_EXT_MEM1_ENABLE) {
2825 		addr_len = t4_read_reg(sc, A_MA_EXT_MEMORY1_BAR);
2826 		r->size = G_EXT_MEM1_SIZE(addr_len) << 20;
2827 		if (r->size > 0) {
2828 			r->start = G_EXT_MEM1_BASE(addr_len) << 20;
2829 			if (addr >= r->start &&
2830 			    addr + len <= r->start + r->size)
2831 				return (0);
2832 			r++;
2833 			n++;
2834 		}
2835 	}
2836 	MPASS(n <= nitems(mem_ranges));
2837 
2838 	if (n > 1) {
2839 		/* Sort and merge the ranges. */
2840 		qsort(mem_ranges, n, sizeof(struct t4_range), t4_range_cmp);
2841 
2842 		/* Start from index 0 and examine the next n - 1 entries. */
2843 		r = &mem_ranges[0];
2844 		for (remaining = n - 1; remaining > 0; remaining--, r++) {
2845 
2846 			MPASS(r->size > 0);	/* r is a valid entry. */
2847 			next = r + 1;
2848 			MPASS(next->size > 0);	/* and so is the next one. */
2849 
2850 			while (r->start + r->size >= next->start) {
2851 				/* Merge the next one into the current entry. */
2852 				r->size = max(r->start + r->size,
2853 				    next->start + next->size) - r->start;
2854 				n--;	/* One fewer entry in total. */
2855 				if (--remaining == 0)
2856 					goto done;	/* short circuit */
2857 				next++;
2858 			}
2859 			if (next != r + 1) {
2860 				/*
2861 				 * Some entries were merged into r and next
2862 				 * points to the first valid entry that couldn't
2863 				 * be merged.
2864 				 */
2865 				MPASS(next->size > 0);	/* must be valid */
2866 				memcpy(r + 1, next, remaining * sizeof(*r));
2867 #ifdef INVARIANTS
2868 				/*
2869 				 * This so that the foo->size assertion in the
2870 				 * next iteration of the loop do the right
2871 				 * thing for entries that were pulled up and are
2872 				 * no longer valid.
2873 				 */
2874 				MPASS(n < nitems(mem_ranges));
2875 				bzero(&mem_ranges[n], (nitems(mem_ranges) - n) *
2876 				    sizeof(struct t4_range));
2877 #endif
2878 			}
2879 		}
2880 done:
2881 		/* Done merging the ranges. */
2882 		MPASS(n > 0);
2883 		r = &mem_ranges[0];
2884 		for (i = 0; i < n; i++, r++) {
2885 			if (addr >= r->start &&
2886 			    addr + len <= r->start + r->size)
2887 				return (0);
2888 		}
2889 	}
2890 
2891 	return (EFAULT);
2892 }
2893 
2894 static int
2895 fwmtype_to_hwmtype(int mtype)
2896 {
2897 
2898 	switch (mtype) {
2899 	case FW_MEMTYPE_EDC0:
2900 		return (MEM_EDC0);
2901 	case FW_MEMTYPE_EDC1:
2902 		return (MEM_EDC1);
2903 	case FW_MEMTYPE_EXTMEM:
2904 		return (MEM_MC0);
2905 	case FW_MEMTYPE_EXTMEM1:
2906 		return (MEM_MC1);
2907 	default:
2908 		panic("%s: cannot translate fw mtype %d.", __func__, mtype);
2909 	}
2910 }
2911 
2912 /*
2913  * Verify that the memory range specified by the memtype/offset/len pair is
2914  * valid and lies entirely within the memtype specified.  The global address of
2915  * the start of the range is returned in addr.
2916  */
2917 static int
2918 validate_mt_off_len(struct adapter *sc, int mtype, uint32_t off, int len,
2919     uint32_t *addr)
2920 {
2921 	uint32_t em, addr_len, maddr;
2922 
2923 	/* Memory can only be accessed in naturally aligned 4 byte units */
2924 	if (off & 3 || len & 3 || len == 0)
2925 		return (EINVAL);
2926 
2927 	em = t4_read_reg(sc, A_MA_TARGET_MEM_ENABLE);
2928 	switch (fwmtype_to_hwmtype(mtype)) {
2929 	case MEM_EDC0:
2930 		if (!(em & F_EDRAM0_ENABLE))
2931 			return (EINVAL);
2932 		addr_len = t4_read_reg(sc, A_MA_EDRAM0_BAR);
2933 		maddr = G_EDRAM0_BASE(addr_len) << 20;
2934 		break;
2935 	case MEM_EDC1:
2936 		if (!(em & F_EDRAM1_ENABLE))
2937 			return (EINVAL);
2938 		addr_len = t4_read_reg(sc, A_MA_EDRAM1_BAR);
2939 		maddr = G_EDRAM1_BASE(addr_len) << 20;
2940 		break;
2941 	case MEM_MC:
2942 		if (!(em & F_EXT_MEM_ENABLE))
2943 			return (EINVAL);
2944 		addr_len = t4_read_reg(sc, A_MA_EXT_MEMORY_BAR);
2945 		maddr = G_EXT_MEM_BASE(addr_len) << 20;
2946 		break;
2947 	case MEM_MC1:
2948 		if (!is_t5(sc) || !(em & F_EXT_MEM1_ENABLE))
2949 			return (EINVAL);
2950 		addr_len = t4_read_reg(sc, A_MA_EXT_MEMORY1_BAR);
2951 		maddr = G_EXT_MEM1_BASE(addr_len) << 20;
2952 		break;
2953 	default:
2954 		return (EINVAL);
2955 	}
2956 
2957 	*addr = maddr + off;	/* global address */
2958 	return (validate_mem_range(sc, *addr, len));
2959 }
2960 
2961 static int
2962 fixup_devlog_params(struct adapter *sc)
2963 {
2964 	struct devlog_params *dparams = &sc->params.devlog;
2965 	int rc;
2966 
2967 	rc = validate_mt_off_len(sc, dparams->memtype, dparams->start,
2968 	    dparams->size, &dparams->addr);
2969 
2970 	return (rc);
2971 }
2972 
2973 static void
2974 update_nirq(struct intrs_and_queues *iaq, int nports)
2975 {
2976 	int extra = T4_EXTRA_INTR;
2977 
2978 	iaq->nirq = extra;
2979 	iaq->nirq += nports * (iaq->nrxq + iaq->nofldrxq);
2980 	iaq->nirq += nports * (iaq->num_vis - 1) *
2981 	    max(iaq->nrxq_vi, iaq->nnmrxq_vi);
2982 	iaq->nirq += nports * (iaq->num_vis - 1) * iaq->nofldrxq_vi;
2983 }
2984 
2985 /*
2986  * Adjust requirements to fit the number of interrupts available.
2987  */
2988 static void
2989 calculate_iaq(struct adapter *sc, struct intrs_and_queues *iaq, int itype,
2990     int navail)
2991 {
2992 	int old_nirq;
2993 	const int nports = sc->params.nports;
2994 
2995 	MPASS(nports > 0);
2996 	MPASS(navail > 0);
2997 
2998 	bzero(iaq, sizeof(*iaq));
2999 	iaq->intr_type = itype;
3000 	iaq->num_vis = t4_num_vis;
3001 	iaq->ntxq = t4_ntxq;
3002 	iaq->ntxq_vi = t4_ntxq_vi;
3003 	iaq->nrxq = t4_nrxq;
3004 	iaq->nrxq_vi = t4_nrxq_vi;
3005 #if defined(TCP_OFFLOAD) || defined(RATELIMIT)
3006 	if (is_offload(sc) || is_ethoffload(sc)) {
3007 		iaq->nofldtxq = t4_nofldtxq;
3008 		iaq->nofldtxq_vi = t4_nofldtxq_vi;
3009 	}
3010 #endif
3011 #ifdef TCP_OFFLOAD
3012 	if (is_offload(sc)) {
3013 		iaq->nofldrxq = t4_nofldrxq;
3014 		iaq->nofldrxq_vi = t4_nofldrxq_vi;
3015 	}
3016 #endif
3017 #ifdef DEV_NETMAP
3018 	iaq->nnmtxq_vi = t4_nnmtxq_vi;
3019 	iaq->nnmrxq_vi = t4_nnmrxq_vi;
3020 #endif
3021 
3022 	update_nirq(iaq, nports);
3023 	if (iaq->nirq <= navail &&
3024 	    (itype != INTR_MSI || powerof2(iaq->nirq))) {
3025 		/*
3026 		 * This is the normal case -- there are enough interrupts for
3027 		 * everything.
3028 		 */
3029 		goto done;
3030 	}
3031 
3032 	/*
3033 	 * If extra VIs have been configured try reducing their count and see if
3034 	 * that works.
3035 	 */
3036 	while (iaq->num_vis > 1) {
3037 		iaq->num_vis--;
3038 		update_nirq(iaq, nports);
3039 		if (iaq->nirq <= navail &&
3040 		    (itype != INTR_MSI || powerof2(iaq->nirq))) {
3041 			device_printf(sc->dev, "virtual interfaces per port "
3042 			    "reduced to %d from %d.  nrxq=%u, nofldrxq=%u, "
3043 			    "nrxq_vi=%u nofldrxq_vi=%u, nnmrxq_vi=%u.  "
3044 			    "itype %d, navail %u, nirq %d.\n",
3045 			    iaq->num_vis, t4_num_vis, iaq->nrxq, iaq->nofldrxq,
3046 			    iaq->nrxq_vi, iaq->nofldrxq_vi, iaq->nnmrxq_vi,
3047 			    itype, navail, iaq->nirq);
3048 			goto done;
3049 		}
3050 	}
3051 
3052 	/*
3053 	 * Extra VIs will not be created.  Log a message if they were requested.
3054 	 */
3055 	MPASS(iaq->num_vis == 1);
3056 	iaq->ntxq_vi = iaq->nrxq_vi = 0;
3057 	iaq->nofldtxq_vi = iaq->nofldrxq_vi = 0;
3058 	iaq->nnmtxq_vi = iaq->nnmrxq_vi = 0;
3059 	if (iaq->num_vis != t4_num_vis) {
3060 		device_printf(sc->dev, "extra virtual interfaces disabled.  "
3061 		    "nrxq=%u, nofldrxq=%u, nrxq_vi=%u nofldrxq_vi=%u, "
3062 		    "nnmrxq_vi=%u.  itype %d, navail %u, nirq %d.\n",
3063 		    iaq->nrxq, iaq->nofldrxq, iaq->nrxq_vi, iaq->nofldrxq_vi,
3064 		    iaq->nnmrxq_vi, itype, navail, iaq->nirq);
3065 	}
3066 
3067 	/*
3068 	 * Keep reducing the number of NIC rx queues to the next lower power of
3069 	 * 2 (for even RSS distribution) and halving the TOE rx queues and see
3070 	 * if that works.
3071 	 */
3072 	do {
3073 		if (iaq->nrxq > 1) {
3074 			do {
3075 				iaq->nrxq--;
3076 			} while (!powerof2(iaq->nrxq));
3077 		}
3078 		if (iaq->nofldrxq > 1)
3079 			iaq->nofldrxq >>= 1;
3080 
3081 		old_nirq = iaq->nirq;
3082 		update_nirq(iaq, nports);
3083 		if (iaq->nirq <= navail &&
3084 		    (itype != INTR_MSI || powerof2(iaq->nirq))) {
3085 			device_printf(sc->dev, "running with reduced number of "
3086 			    "rx queues because of shortage of interrupts.  "
3087 			    "nrxq=%u, nofldrxq=%u.  "
3088 			    "itype %d, navail %u, nirq %d.\n", iaq->nrxq,
3089 			    iaq->nofldrxq, itype, navail, iaq->nirq);
3090 			goto done;
3091 		}
3092 	} while (old_nirq != iaq->nirq);
3093 
3094 	/* One interrupt for everything.  Ugh. */
3095 	device_printf(sc->dev, "running with minimal number of queues.  "
3096 	    "itype %d, navail %u.\n", itype, navail);
3097 	iaq->nirq = 1;
3098 	MPASS(iaq->nrxq == 1);
3099 	iaq->ntxq = 1;
3100 	if (iaq->nofldrxq > 1)
3101 		iaq->nofldtxq = 1;
3102 done:
3103 	MPASS(iaq->num_vis > 0);
3104 	if (iaq->num_vis > 1) {
3105 		MPASS(iaq->nrxq_vi > 0);
3106 		MPASS(iaq->ntxq_vi > 0);
3107 	}
3108 	MPASS(iaq->nirq > 0);
3109 	MPASS(iaq->nrxq > 0);
3110 	MPASS(iaq->ntxq > 0);
3111 	if (itype == INTR_MSI) {
3112 		MPASS(powerof2(iaq->nirq));
3113 	}
3114 }
3115 
3116 static int
3117 cfg_itype_and_nqueues(struct adapter *sc, struct intrs_and_queues *iaq)
3118 {
3119 	int rc, itype, navail, nalloc;
3120 
3121 	for (itype = INTR_MSIX; itype; itype >>= 1) {
3122 
3123 		if ((itype & t4_intr_types) == 0)
3124 			continue;	/* not allowed */
3125 
3126 		if (itype == INTR_MSIX)
3127 			navail = pci_msix_count(sc->dev);
3128 		else if (itype == INTR_MSI)
3129 			navail = pci_msi_count(sc->dev);
3130 		else
3131 			navail = 1;
3132 restart:
3133 		if (navail == 0)
3134 			continue;
3135 
3136 		calculate_iaq(sc, iaq, itype, navail);
3137 		nalloc = iaq->nirq;
3138 		rc = 0;
3139 		if (itype == INTR_MSIX)
3140 			rc = pci_alloc_msix(sc->dev, &nalloc);
3141 		else if (itype == INTR_MSI)
3142 			rc = pci_alloc_msi(sc->dev, &nalloc);
3143 
3144 		if (rc == 0 && nalloc > 0) {
3145 			if (nalloc == iaq->nirq)
3146 				return (0);
3147 
3148 			/*
3149 			 * Didn't get the number requested.  Use whatever number
3150 			 * the kernel is willing to allocate.
3151 			 */
3152 			device_printf(sc->dev, "fewer vectors than requested, "
3153 			    "type=%d, req=%d, rcvd=%d; will downshift req.\n",
3154 			    itype, iaq->nirq, nalloc);
3155 			pci_release_msi(sc->dev);
3156 			navail = nalloc;
3157 			goto restart;
3158 		}
3159 
3160 		device_printf(sc->dev,
3161 		    "failed to allocate vectors:%d, type=%d, req=%d, rcvd=%d\n",
3162 		    itype, rc, iaq->nirq, nalloc);
3163 	}
3164 
3165 	device_printf(sc->dev,
3166 	    "failed to find a usable interrupt type.  "
3167 	    "allowed=%d, msi-x=%d, msi=%d, intx=1", t4_intr_types,
3168 	    pci_msix_count(sc->dev), pci_msi_count(sc->dev));
3169 
3170 	return (ENXIO);
3171 }
3172 
3173 #define FW_VERSION(chip) ( \
3174     V_FW_HDR_FW_VER_MAJOR(chip##FW_VERSION_MAJOR) | \
3175     V_FW_HDR_FW_VER_MINOR(chip##FW_VERSION_MINOR) | \
3176     V_FW_HDR_FW_VER_MICRO(chip##FW_VERSION_MICRO) | \
3177     V_FW_HDR_FW_VER_BUILD(chip##FW_VERSION_BUILD))
3178 #define FW_INTFVER(chip, intf) (chip##FW_HDR_INTFVER_##intf)
3179 
3180 struct fw_info {
3181 	uint8_t chip;
3182 	char *kld_name;
3183 	char *fw_mod_name;
3184 	struct fw_hdr fw_hdr;	/* XXX: waste of space, need a sparse struct */
3185 } fw_info[] = {
3186 	{
3187 		.chip = CHELSIO_T4,
3188 		.kld_name = "t4fw_cfg",
3189 		.fw_mod_name = "t4fw",
3190 		.fw_hdr = {
3191 			.chip = FW_HDR_CHIP_T4,
3192 			.fw_ver = htobe32(FW_VERSION(T4)),
3193 			.intfver_nic = FW_INTFVER(T4, NIC),
3194 			.intfver_vnic = FW_INTFVER(T4, VNIC),
3195 			.intfver_ofld = FW_INTFVER(T4, OFLD),
3196 			.intfver_ri = FW_INTFVER(T4, RI),
3197 			.intfver_iscsipdu = FW_INTFVER(T4, ISCSIPDU),
3198 			.intfver_iscsi = FW_INTFVER(T4, ISCSI),
3199 			.intfver_fcoepdu = FW_INTFVER(T4, FCOEPDU),
3200 			.intfver_fcoe = FW_INTFVER(T4, FCOE),
3201 		},
3202 	}, {
3203 		.chip = CHELSIO_T5,
3204 		.kld_name = "t5fw_cfg",
3205 		.fw_mod_name = "t5fw",
3206 		.fw_hdr = {
3207 			.chip = FW_HDR_CHIP_T5,
3208 			.fw_ver = htobe32(FW_VERSION(T5)),
3209 			.intfver_nic = FW_INTFVER(T5, NIC),
3210 			.intfver_vnic = FW_INTFVER(T5, VNIC),
3211 			.intfver_ofld = FW_INTFVER(T5, OFLD),
3212 			.intfver_ri = FW_INTFVER(T5, RI),
3213 			.intfver_iscsipdu = FW_INTFVER(T5, ISCSIPDU),
3214 			.intfver_iscsi = FW_INTFVER(T5, ISCSI),
3215 			.intfver_fcoepdu = FW_INTFVER(T5, FCOEPDU),
3216 			.intfver_fcoe = FW_INTFVER(T5, FCOE),
3217 		},
3218 	}, {
3219 		.chip = CHELSIO_T6,
3220 		.kld_name = "t6fw_cfg",
3221 		.fw_mod_name = "t6fw",
3222 		.fw_hdr = {
3223 			.chip = FW_HDR_CHIP_T6,
3224 			.fw_ver = htobe32(FW_VERSION(T6)),
3225 			.intfver_nic = FW_INTFVER(T6, NIC),
3226 			.intfver_vnic = FW_INTFVER(T6, VNIC),
3227 			.intfver_ofld = FW_INTFVER(T6, OFLD),
3228 			.intfver_ri = FW_INTFVER(T6, RI),
3229 			.intfver_iscsipdu = FW_INTFVER(T6, ISCSIPDU),
3230 			.intfver_iscsi = FW_INTFVER(T6, ISCSI),
3231 			.intfver_fcoepdu = FW_INTFVER(T6, FCOEPDU),
3232 			.intfver_fcoe = FW_INTFVER(T6, FCOE),
3233 		},
3234 	}
3235 };
3236 
3237 static struct fw_info *
3238 find_fw_info(int chip)
3239 {
3240 	int i;
3241 
3242 	for (i = 0; i < nitems(fw_info); i++) {
3243 		if (fw_info[i].chip == chip)
3244 			return (&fw_info[i]);
3245 	}
3246 	return (NULL);
3247 }
3248 
3249 /*
3250  * Is the given firmware API compatible with the one the driver was compiled
3251  * with?
3252  */
3253 static int
3254 fw_compatible(const struct fw_hdr *hdr1, const struct fw_hdr *hdr2)
3255 {
3256 
3257 	/* short circuit if it's the exact same firmware version */
3258 	if (hdr1->chip == hdr2->chip && hdr1->fw_ver == hdr2->fw_ver)
3259 		return (1);
3260 
3261 	/*
3262 	 * XXX: Is this too conservative?  Perhaps I should limit this to the
3263 	 * features that are supported in the driver.
3264 	 */
3265 #define SAME_INTF(x) (hdr1->intfver_##x == hdr2->intfver_##x)
3266 	if (hdr1->chip == hdr2->chip && SAME_INTF(nic) && SAME_INTF(vnic) &&
3267 	    SAME_INTF(ofld) && SAME_INTF(ri) && SAME_INTF(iscsipdu) &&
3268 	    SAME_INTF(iscsi) && SAME_INTF(fcoepdu) && SAME_INTF(fcoe))
3269 		return (1);
3270 #undef SAME_INTF
3271 
3272 	return (0);
3273 }
3274 
3275 /*
3276  * The firmware in the KLD is usable, but should it be installed?  This routine
3277  * explains itself in detail if it indicates the KLD firmware should be
3278  * installed.
3279  */
3280 static int
3281 should_install_kld_fw(struct adapter *sc, int card_fw_usable, int k, int c)
3282 {
3283 	const char *reason;
3284 
3285 	if (!card_fw_usable) {
3286 		reason = "incompatible or unusable";
3287 		goto install;
3288 	}
3289 
3290 	if (k > c) {
3291 		reason = "older than the version bundled with this driver";
3292 		goto install;
3293 	}
3294 
3295 	if (t4_fw_install == 2 && k != c) {
3296 		reason = "different than the version bundled with this driver";
3297 		goto install;
3298 	}
3299 
3300 	return (0);
3301 
3302 install:
3303 	if (t4_fw_install == 0) {
3304 		device_printf(sc->dev, "firmware on card (%u.%u.%u.%u) is %s, "
3305 		    "but the driver is prohibited from installing a different "
3306 		    "firmware on the card.\n",
3307 		    G_FW_HDR_FW_VER_MAJOR(c), G_FW_HDR_FW_VER_MINOR(c),
3308 		    G_FW_HDR_FW_VER_MICRO(c), G_FW_HDR_FW_VER_BUILD(c), reason);
3309 
3310 		return (0);
3311 	}
3312 
3313 	device_printf(sc->dev, "firmware on card (%u.%u.%u.%u) is %s, "
3314 	    "installing firmware %u.%u.%u.%u on card.\n",
3315 	    G_FW_HDR_FW_VER_MAJOR(c), G_FW_HDR_FW_VER_MINOR(c),
3316 	    G_FW_HDR_FW_VER_MICRO(c), G_FW_HDR_FW_VER_BUILD(c), reason,
3317 	    G_FW_HDR_FW_VER_MAJOR(k), G_FW_HDR_FW_VER_MINOR(k),
3318 	    G_FW_HDR_FW_VER_MICRO(k), G_FW_HDR_FW_VER_BUILD(k));
3319 
3320 	return (1);
3321 }
3322 
3323 /*
3324  * Establish contact with the firmware and determine if we are the master driver
3325  * or not, and whether we are responsible for chip initialization.
3326  */
3327 static int
3328 prep_firmware(struct adapter *sc)
3329 {
3330 	const struct firmware *fw = NULL, *default_cfg;
3331 	int rc, pf, card_fw_usable, kld_fw_usable, need_fw_reset = 1;
3332 	enum dev_state state;
3333 	struct fw_info *fw_info;
3334 	struct fw_hdr *card_fw;		/* fw on the card */
3335 	const struct fw_hdr *kld_fw;	/* fw in the KLD */
3336 	const struct fw_hdr *drv_fw;	/* fw header the driver was compiled
3337 					   against */
3338 
3339 	/* This is the firmware whose headers the driver was compiled against */
3340 	fw_info = find_fw_info(chip_id(sc));
3341 	if (fw_info == NULL) {
3342 		device_printf(sc->dev,
3343 		    "unable to look up firmware information for chip %d.\n",
3344 		    chip_id(sc));
3345 		return (EINVAL);
3346 	}
3347 	drv_fw = &fw_info->fw_hdr;
3348 
3349 	/*
3350 	 * The firmware KLD contains many modules.  The KLD name is also the
3351 	 * name of the module that contains the default config file.
3352 	 */
3353 	default_cfg = firmware_get(fw_info->kld_name);
3354 
3355 	/* This is the firmware in the KLD */
3356 	fw = firmware_get(fw_info->fw_mod_name);
3357 	if (fw != NULL) {
3358 		kld_fw = (const void *)fw->data;
3359 		kld_fw_usable = fw_compatible(drv_fw, kld_fw);
3360 	} else {
3361 		kld_fw = NULL;
3362 		kld_fw_usable = 0;
3363 	}
3364 
3365 	/* Read the header of the firmware on the card */
3366 	card_fw = malloc(sizeof(*card_fw), M_CXGBE, M_ZERO | M_WAITOK);
3367 	rc = -t4_read_flash(sc, FLASH_FW_START,
3368 	    sizeof (*card_fw) / sizeof (uint32_t), (uint32_t *)card_fw, 1);
3369 	if (rc == 0) {
3370 		card_fw_usable = fw_compatible(drv_fw, (const void*)card_fw);
3371 		if (card_fw->fw_ver == be32toh(0xffffffff)) {
3372 			uint32_t d = be32toh(kld_fw->fw_ver);
3373 
3374 			if (!kld_fw_usable) {
3375 				device_printf(sc->dev,
3376 				    "no firmware on the card and no usable "
3377 				    "firmware bundled with the driver.\n");
3378 				rc = EIO;
3379 				goto done;
3380 			} else if (t4_fw_install == 0) {
3381 				device_printf(sc->dev,
3382 				    "no firmware on the card and the driver "
3383 				    "is prohibited from installing new "
3384 				    "firmware.\n");
3385 				rc = EIO;
3386 				goto done;
3387 			}
3388 
3389 			device_printf(sc->dev, "no firmware on the card, "
3390 			    "installing firmware %d.%d.%d.%d\n",
3391 			    G_FW_HDR_FW_VER_MAJOR(d), G_FW_HDR_FW_VER_MINOR(d),
3392 			    G_FW_HDR_FW_VER_MICRO(d), G_FW_HDR_FW_VER_BUILD(d));
3393 			rc = t4_fw_forceinstall(sc, fw->data, fw->datasize);
3394 			if (rc < 0) {
3395 				rc = -rc;
3396 				device_printf(sc->dev,
3397 				    "firmware install failed: %d.\n", rc);
3398 				goto done;
3399 			}
3400 			memcpy(card_fw, kld_fw, sizeof(*card_fw));
3401 			card_fw_usable = 1;
3402 			need_fw_reset = 0;
3403 		}
3404 	} else {
3405 		device_printf(sc->dev,
3406 		    "Unable to read card's firmware header: %d\n", rc);
3407 		card_fw_usable = 0;
3408 	}
3409 
3410 	/* Contact firmware. */
3411 	rc = t4_fw_hello(sc, sc->mbox, sc->mbox, MASTER_MAY, &state);
3412 	if (rc < 0 || state == DEV_STATE_ERR) {
3413 		rc = -rc;
3414 		device_printf(sc->dev,
3415 		    "failed to connect to the firmware: %d, %d.\n", rc, state);
3416 		goto done;
3417 	}
3418 	pf = rc;
3419 	if (pf == sc->mbox)
3420 		sc->flags |= MASTER_PF;
3421 	else if (state == DEV_STATE_UNINIT) {
3422 		/*
3423 		 * We didn't get to be the master so we definitely won't be
3424 		 * configuring the chip.  It's a bug if someone else hasn't
3425 		 * configured it already.
3426 		 */
3427 		device_printf(sc->dev, "couldn't be master(%d), "
3428 		    "device not already initialized either(%d).\n", rc, state);
3429 		rc = EPROTO;
3430 		goto done;
3431 	}
3432 
3433 	if (card_fw_usable && card_fw->fw_ver == drv_fw->fw_ver &&
3434 	    (!kld_fw_usable || kld_fw->fw_ver == drv_fw->fw_ver)) {
3435 		/*
3436 		 * Common case: the firmware on the card is an exact match and
3437 		 * the KLD is an exact match too, or the KLD is
3438 		 * absent/incompatible.  Note that t4_fw_install = 2 is ignored
3439 		 * here -- use cxgbetool loadfw if you want to reinstall the
3440 		 * same firmware as the one on the card.
3441 		 */
3442 	} else if (kld_fw_usable && state == DEV_STATE_UNINIT &&
3443 	    should_install_kld_fw(sc, card_fw_usable, be32toh(kld_fw->fw_ver),
3444 	    be32toh(card_fw->fw_ver))) {
3445 
3446 		rc = -t4_fw_upgrade(sc, sc->mbox, fw->data, fw->datasize, 0);
3447 		if (rc != 0) {
3448 			device_printf(sc->dev,
3449 			    "failed to install firmware: %d\n", rc);
3450 			goto done;
3451 		}
3452 
3453 		/* Installed successfully, update the cached header too. */
3454 		memcpy(card_fw, kld_fw, sizeof(*card_fw));
3455 		card_fw_usable = 1;
3456 		need_fw_reset = 0;	/* already reset as part of load_fw */
3457 	}
3458 
3459 	if (!card_fw_usable) {
3460 		uint32_t d, c, k;
3461 
3462 		d = ntohl(drv_fw->fw_ver);
3463 		c = ntohl(card_fw->fw_ver);
3464 		k = kld_fw ? ntohl(kld_fw->fw_ver) : 0;
3465 
3466 		device_printf(sc->dev, "Cannot find a usable firmware: "
3467 		    "fw_install %d, chip state %d, "
3468 		    "driver compiled with %d.%d.%d.%d, "
3469 		    "card has %d.%d.%d.%d, KLD has %d.%d.%d.%d\n",
3470 		    t4_fw_install, state,
3471 		    G_FW_HDR_FW_VER_MAJOR(d), G_FW_HDR_FW_VER_MINOR(d),
3472 		    G_FW_HDR_FW_VER_MICRO(d), G_FW_HDR_FW_VER_BUILD(d),
3473 		    G_FW_HDR_FW_VER_MAJOR(c), G_FW_HDR_FW_VER_MINOR(c),
3474 		    G_FW_HDR_FW_VER_MICRO(c), G_FW_HDR_FW_VER_BUILD(c),
3475 		    G_FW_HDR_FW_VER_MAJOR(k), G_FW_HDR_FW_VER_MINOR(k),
3476 		    G_FW_HDR_FW_VER_MICRO(k), G_FW_HDR_FW_VER_BUILD(k));
3477 		rc = EINVAL;
3478 		goto done;
3479 	}
3480 
3481 	/* Reset device */
3482 	if (need_fw_reset &&
3483 	    (rc = -t4_fw_reset(sc, sc->mbox, F_PIORSTMODE | F_PIORST)) != 0) {
3484 		device_printf(sc->dev, "firmware reset failed: %d.\n", rc);
3485 		if (rc != ETIMEDOUT && rc != EIO)
3486 			t4_fw_bye(sc, sc->mbox);
3487 		goto done;
3488 	}
3489 	sc->flags |= FW_OK;
3490 
3491 	rc = get_params__pre_init(sc);
3492 	if (rc != 0)
3493 		goto done; /* error message displayed already */
3494 
3495 	/* Partition adapter resources as specified in the config file. */
3496 	if (state == DEV_STATE_UNINIT) {
3497 
3498 		KASSERT(sc->flags & MASTER_PF,
3499 		    ("%s: trying to change chip settings when not master.",
3500 		    __func__));
3501 
3502 		rc = partition_resources(sc, default_cfg, fw_info->kld_name);
3503 		if (rc != 0)
3504 			goto done;	/* error message displayed already */
3505 
3506 		t4_tweak_chip_settings(sc);
3507 
3508 		/* get basic stuff going */
3509 		rc = -t4_fw_initialize(sc, sc->mbox);
3510 		if (rc != 0) {
3511 			device_printf(sc->dev, "fw init failed: %d.\n", rc);
3512 			goto done;
3513 		}
3514 	} else {
3515 		snprintf(sc->cfg_file, sizeof(sc->cfg_file), "pf%d", pf);
3516 		sc->cfcsum = 0;
3517 	}
3518 
3519 done:
3520 	free(card_fw, M_CXGBE);
3521 	if (fw != NULL)
3522 		firmware_put(fw, FIRMWARE_UNLOAD);
3523 	if (default_cfg != NULL)
3524 		firmware_put(default_cfg, FIRMWARE_UNLOAD);
3525 
3526 	return (rc);
3527 }
3528 
3529 #define FW_PARAM_DEV(param) \
3530 	(V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) | \
3531 	 V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_##param))
3532 #define FW_PARAM_PFVF(param) \
3533 	(V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_PFVF) | \
3534 	 V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_PFVF_##param))
3535 
3536 /*
3537  * Partition chip resources for use between various PFs, VFs, etc.
3538  */
3539 static int
3540 partition_resources(struct adapter *sc, const struct firmware *default_cfg,
3541     const char *name_prefix)
3542 {
3543 	const struct firmware *cfg = NULL;
3544 	int rc = 0;
3545 	struct fw_caps_config_cmd caps;
3546 	uint32_t mtype, moff, finicsum, cfcsum;
3547 
3548 	/*
3549 	 * Figure out what configuration file to use.  Pick the default config
3550 	 * file for the card if the user hasn't specified one explicitly.
3551 	 */
3552 	snprintf(sc->cfg_file, sizeof(sc->cfg_file), "%s", t4_cfg_file);
3553 	if (strncmp(t4_cfg_file, DEFAULT_CF, sizeof(t4_cfg_file)) == 0) {
3554 		/* Card specific overrides go here. */
3555 		if (pci_get_device(sc->dev) == 0x440a)
3556 			snprintf(sc->cfg_file, sizeof(sc->cfg_file), UWIRE_CF);
3557 		if (is_fpga(sc))
3558 			snprintf(sc->cfg_file, sizeof(sc->cfg_file), FPGA_CF);
3559 	} else if (strncmp(t4_cfg_file, BUILTIN_CF, sizeof(t4_cfg_file)) == 0)
3560 		goto use_built_in_config;	/* go straight to config. */
3561 
3562 	/*
3563 	 * We need to load another module if the profile is anything except
3564 	 * "default" or "flash".
3565 	 */
3566 	if (strncmp(sc->cfg_file, DEFAULT_CF, sizeof(sc->cfg_file)) != 0 &&
3567 	    strncmp(sc->cfg_file, FLASH_CF, sizeof(sc->cfg_file)) != 0) {
3568 		char s[32];
3569 
3570 		snprintf(s, sizeof(s), "%s_%s", name_prefix, sc->cfg_file);
3571 		cfg = firmware_get(s);
3572 		if (cfg == NULL) {
3573 			if (default_cfg != NULL) {
3574 				device_printf(sc->dev,
3575 				    "unable to load module \"%s\" for "
3576 				    "configuration profile \"%s\", will use "
3577 				    "the default config file instead.\n",
3578 				    s, sc->cfg_file);
3579 				snprintf(sc->cfg_file, sizeof(sc->cfg_file),
3580 				    "%s", DEFAULT_CF);
3581 			} else {
3582 				device_printf(sc->dev,
3583 				    "unable to load module \"%s\" for "
3584 				    "configuration profile \"%s\", will use "
3585 				    "the config file on the card's flash "
3586 				    "instead.\n", s, sc->cfg_file);
3587 				snprintf(sc->cfg_file, sizeof(sc->cfg_file),
3588 				    "%s", FLASH_CF);
3589 			}
3590 		}
3591 	}
3592 
3593 	if (strncmp(sc->cfg_file, DEFAULT_CF, sizeof(sc->cfg_file)) == 0 &&
3594 	    default_cfg == NULL) {
3595 		device_printf(sc->dev,
3596 		    "default config file not available, will use the config "
3597 		    "file on the card's flash instead.\n");
3598 		snprintf(sc->cfg_file, sizeof(sc->cfg_file), "%s", FLASH_CF);
3599 	}
3600 
3601 	if (strncmp(sc->cfg_file, FLASH_CF, sizeof(sc->cfg_file)) != 0) {
3602 		u_int cflen;
3603 		const uint32_t *cfdata;
3604 		uint32_t param, val, addr;
3605 
3606 		KASSERT(cfg != NULL || default_cfg != NULL,
3607 		    ("%s: no config to upload", __func__));
3608 
3609 		/*
3610 		 * Ask the firmware where it wants us to upload the config file.
3611 		 */
3612 		param = FW_PARAM_DEV(CF);
3613 		rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 1, &param, &val);
3614 		if (rc != 0) {
3615 			/* No support for config file?  Shouldn't happen. */
3616 			device_printf(sc->dev,
3617 			    "failed to query config file location: %d.\n", rc);
3618 			goto done;
3619 		}
3620 		mtype = G_FW_PARAMS_PARAM_Y(val);
3621 		moff = G_FW_PARAMS_PARAM_Z(val) << 16;
3622 
3623 		/*
3624 		 * XXX: sheer laziness.  We deliberately added 4 bytes of
3625 		 * useless stuffing/comments at the end of the config file so
3626 		 * it's ok to simply throw away the last remaining bytes when
3627 		 * the config file is not an exact multiple of 4.  This also
3628 		 * helps with the validate_mt_off_len check.
3629 		 */
3630 		if (cfg != NULL) {
3631 			cflen = cfg->datasize & ~3;
3632 			cfdata = cfg->data;
3633 		} else {
3634 			cflen = default_cfg->datasize & ~3;
3635 			cfdata = default_cfg->data;
3636 		}
3637 
3638 		if (cflen > FLASH_CFG_MAX_SIZE) {
3639 			device_printf(sc->dev,
3640 			    "config file too long (%d, max allowed is %d).  "
3641 			    "Will try to use the config on the card, if any.\n",
3642 			    cflen, FLASH_CFG_MAX_SIZE);
3643 			goto use_config_on_flash;
3644 		}
3645 
3646 		rc = validate_mt_off_len(sc, mtype, moff, cflen, &addr);
3647 		if (rc != 0) {
3648 			device_printf(sc->dev,
3649 			    "%s: addr (%d/0x%x) or len %d is not valid: %d.  "
3650 			    "Will try to use the config on the card, if any.\n",
3651 			    __func__, mtype, moff, cflen, rc);
3652 			goto use_config_on_flash;
3653 		}
3654 		write_via_memwin(sc, 2, addr, cfdata, cflen);
3655 	} else {
3656 use_config_on_flash:
3657 		mtype = FW_MEMTYPE_FLASH;
3658 		moff = t4_flash_cfg_addr(sc);
3659 	}
3660 
3661 	bzero(&caps, sizeof(caps));
3662 	caps.op_to_write = htobe32(V_FW_CMD_OP(FW_CAPS_CONFIG_CMD) |
3663 	    F_FW_CMD_REQUEST | F_FW_CMD_READ);
3664 	caps.cfvalid_to_len16 = htobe32(F_FW_CAPS_CONFIG_CMD_CFVALID |
3665 	    V_FW_CAPS_CONFIG_CMD_MEMTYPE_CF(mtype) |
3666 	    V_FW_CAPS_CONFIG_CMD_MEMADDR64K_CF(moff >> 16) | FW_LEN16(caps));
3667 	rc = -t4_wr_mbox(sc, sc->mbox, &caps, sizeof(caps), &caps);
3668 	if (rc != 0) {
3669 		device_printf(sc->dev,
3670 		    "failed to pre-process config file: %d "
3671 		    "(mtype %d, moff 0x%x).  Will reset the firmware and retry "
3672 		    "with the built-in configuration.\n", rc, mtype, moff);
3673 
3674 	    	rc = -t4_fw_reset(sc, sc->mbox, F_PIORSTMODE | F_PIORST);
3675 		if (rc != 0) {
3676 			device_printf(sc->dev,
3677 			    "firmware reset failed: %d.\n", rc);
3678 			if (rc != ETIMEDOUT && rc != EIO) {
3679 				t4_fw_bye(sc, sc->mbox);
3680 				sc->flags &= ~FW_OK;
3681 			}
3682 			goto done;
3683 		}
3684 		snprintf(sc->cfg_file, sizeof(sc->cfg_file), "%s", "built-in");
3685 use_built_in_config:
3686 		bzero(&caps, sizeof(caps));
3687 		caps.op_to_write = htobe32(V_FW_CMD_OP(FW_CAPS_CONFIG_CMD) |
3688 		    F_FW_CMD_REQUEST | F_FW_CMD_READ);
3689 		caps.cfvalid_to_len16 = htobe32(FW_LEN16(caps));
3690 		rc = t4_wr_mbox(sc, sc->mbox, &caps, sizeof(caps), &caps);
3691 		if (rc != 0) {
3692 			device_printf(sc->dev,
3693 			    "built-in configuration failed: %d.\n", rc);
3694 			goto done;
3695 		}
3696 	}
3697 
3698 	finicsum = be32toh(caps.finicsum);
3699 	cfcsum = be32toh(caps.cfcsum);
3700 	if (finicsum != cfcsum) {
3701 		device_printf(sc->dev,
3702 		    "WARNING: config file checksum mismatch: %08x %08x\n",
3703 		    finicsum, cfcsum);
3704 	}
3705 	sc->cfcsum = cfcsum;
3706 
3707 #define LIMIT_CAPS(x) do { \
3708 	caps.x &= htobe16(t4_##x##_allowed); \
3709 } while (0)
3710 
3711 	/*
3712 	 * Let the firmware know what features will (not) be used so it can tune
3713 	 * things accordingly.
3714 	 */
3715 	LIMIT_CAPS(nbmcaps);
3716 	LIMIT_CAPS(linkcaps);
3717 	LIMIT_CAPS(switchcaps);
3718 	LIMIT_CAPS(niccaps);
3719 	LIMIT_CAPS(toecaps);
3720 	LIMIT_CAPS(rdmacaps);
3721 	LIMIT_CAPS(cryptocaps);
3722 	LIMIT_CAPS(iscsicaps);
3723 	LIMIT_CAPS(fcoecaps);
3724 #undef LIMIT_CAPS
3725 
3726 	if (caps.niccaps & htobe16(FW_CAPS_CONFIG_NIC_HASHFILTER)) {
3727 		/*
3728 		 * TOE and hashfilters are mutually exclusive.  It is a config
3729 		 * file or firmware bug if both are reported as available.  Try
3730 		 * to cope with the situation in non-debug builds by disabling
3731 		 * TOE.
3732 		 */
3733 		MPASS(caps.toecaps == 0);
3734 
3735 		caps.toecaps = 0;
3736 		caps.rdmacaps = 0;
3737 		caps.iscsicaps = 0;
3738 	}
3739 
3740 	caps.op_to_write = htobe32(V_FW_CMD_OP(FW_CAPS_CONFIG_CMD) |
3741 	    F_FW_CMD_REQUEST | F_FW_CMD_WRITE);
3742 	caps.cfvalid_to_len16 = htobe32(FW_LEN16(caps));
3743 	rc = -t4_wr_mbox(sc, sc->mbox, &caps, sizeof(caps), NULL);
3744 	if (rc != 0) {
3745 		device_printf(sc->dev,
3746 		    "failed to process config file: %d.\n", rc);
3747 	}
3748 done:
3749 	if (cfg != NULL)
3750 		firmware_put(cfg, FIRMWARE_UNLOAD);
3751 	return (rc);
3752 }
3753 
3754 /*
3755  * Retrieve parameters that are needed (or nice to have) very early.
3756  */
3757 static int
3758 get_params__pre_init(struct adapter *sc)
3759 {
3760 	int rc;
3761 	uint32_t param[2], val[2];
3762 
3763 	t4_get_version_info(sc);
3764 
3765 	snprintf(sc->fw_version, sizeof(sc->fw_version), "%u.%u.%u.%u",
3766 	    G_FW_HDR_FW_VER_MAJOR(sc->params.fw_vers),
3767 	    G_FW_HDR_FW_VER_MINOR(sc->params.fw_vers),
3768 	    G_FW_HDR_FW_VER_MICRO(sc->params.fw_vers),
3769 	    G_FW_HDR_FW_VER_BUILD(sc->params.fw_vers));
3770 
3771 	snprintf(sc->bs_version, sizeof(sc->bs_version), "%u.%u.%u.%u",
3772 	    G_FW_HDR_FW_VER_MAJOR(sc->params.bs_vers),
3773 	    G_FW_HDR_FW_VER_MINOR(sc->params.bs_vers),
3774 	    G_FW_HDR_FW_VER_MICRO(sc->params.bs_vers),
3775 	    G_FW_HDR_FW_VER_BUILD(sc->params.bs_vers));
3776 
3777 	snprintf(sc->tp_version, sizeof(sc->tp_version), "%u.%u.%u.%u",
3778 	    G_FW_HDR_FW_VER_MAJOR(sc->params.tp_vers),
3779 	    G_FW_HDR_FW_VER_MINOR(sc->params.tp_vers),
3780 	    G_FW_HDR_FW_VER_MICRO(sc->params.tp_vers),
3781 	    G_FW_HDR_FW_VER_BUILD(sc->params.tp_vers));
3782 
3783 	snprintf(sc->er_version, sizeof(sc->er_version), "%u.%u.%u.%u",
3784 	    G_FW_HDR_FW_VER_MAJOR(sc->params.er_vers),
3785 	    G_FW_HDR_FW_VER_MINOR(sc->params.er_vers),
3786 	    G_FW_HDR_FW_VER_MICRO(sc->params.er_vers),
3787 	    G_FW_HDR_FW_VER_BUILD(sc->params.er_vers));
3788 
3789 	param[0] = FW_PARAM_DEV(PORTVEC);
3790 	param[1] = FW_PARAM_DEV(CCLK);
3791 	rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 2, param, val);
3792 	if (rc != 0) {
3793 		device_printf(sc->dev,
3794 		    "failed to query parameters (pre_init): %d.\n", rc);
3795 		return (rc);
3796 	}
3797 
3798 	sc->params.portvec = val[0];
3799 	sc->params.nports = bitcount32(val[0]);
3800 	sc->params.vpd.cclk = val[1];
3801 
3802 	/* Read device log parameters. */
3803 	rc = -t4_init_devlog_params(sc, 1);
3804 	if (rc == 0)
3805 		fixup_devlog_params(sc);
3806 	else {
3807 		device_printf(sc->dev,
3808 		    "failed to get devlog parameters: %d.\n", rc);
3809 		rc = 0;	/* devlog isn't critical for device operation */
3810 	}
3811 
3812 	return (rc);
3813 }
3814 
3815 /*
3816  * Retrieve various parameters that are of interest to the driver.  The device
3817  * has been initialized by the firmware at this point.
3818  */
3819 static int
3820 get_params__post_init(struct adapter *sc)
3821 {
3822 	int rc;
3823 	uint32_t param[7], val[7];
3824 	struct fw_caps_config_cmd caps;
3825 
3826 	param[0] = FW_PARAM_PFVF(IQFLINT_START);
3827 	param[1] = FW_PARAM_PFVF(EQ_START);
3828 	param[2] = FW_PARAM_PFVF(FILTER_START);
3829 	param[3] = FW_PARAM_PFVF(FILTER_END);
3830 	param[4] = FW_PARAM_PFVF(L2T_START);
3831 	param[5] = FW_PARAM_PFVF(L2T_END);
3832 	param[6] = V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) |
3833 	    V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_DIAG) |
3834 	    V_FW_PARAMS_PARAM_Y(FW_PARAM_DEV_DIAG_VDD);
3835 	rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 7, param, val);
3836 	if (rc != 0) {
3837 		device_printf(sc->dev,
3838 		    "failed to query parameters (post_init): %d.\n", rc);
3839 		return (rc);
3840 	}
3841 
3842 	sc->sge.iq_start = val[0];
3843 	sc->sge.eq_start = val[1];
3844 	if (val[3] > val[2]) {
3845 		sc->tids.ftid_base = val[2];
3846 		sc->tids.ftid_end = val[3];
3847 		sc->tids.nftids = val[3] - val[2] + 1;
3848 	}
3849 	sc->vres.l2t.start = val[4];
3850 	sc->vres.l2t.size = val[5] - val[4] + 1;
3851 	KASSERT(sc->vres.l2t.size <= L2T_SIZE,
3852 	    ("%s: L2 table size (%u) larger than expected (%u)",
3853 	    __func__, sc->vres.l2t.size, L2T_SIZE));
3854 	sc->params.core_vdd = val[6];
3855 
3856 	/*
3857 	 * MPSBGMAP is queried separately because only recent firmwares support
3858 	 * it as a parameter and we don't want the compound query above to fail
3859 	 * on older firmwares.
3860 	 */
3861 	param[0] = FW_PARAM_DEV(MPSBGMAP);
3862 	val[0] = 0;
3863 	rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 1, param, val);
3864 	if (rc == 0)
3865 		sc->params.mps_bg_map = val[0];
3866 	else
3867 		sc->params.mps_bg_map = 0;
3868 
3869 	/*
3870 	 * Determine whether the firmware supports the filter2 work request.
3871 	 * This is queried separately for the same reason as MPSBGMAP above.
3872 	 */
3873 	param[0] = FW_PARAM_DEV(FILTER2_WR);
3874 	val[0] = 0;
3875 	rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 1, param, val);
3876 	if (rc == 0)
3877 		sc->params.filter2_wr_support = val[0] != 0;
3878 	else
3879 		sc->params.filter2_wr_support = 0;
3880 
3881 	/* get capabilites */
3882 	bzero(&caps, sizeof(caps));
3883 	caps.op_to_write = htobe32(V_FW_CMD_OP(FW_CAPS_CONFIG_CMD) |
3884 	    F_FW_CMD_REQUEST | F_FW_CMD_READ);
3885 	caps.cfvalid_to_len16 = htobe32(FW_LEN16(caps));
3886 	rc = -t4_wr_mbox(sc, sc->mbox, &caps, sizeof(caps), &caps);
3887 	if (rc != 0) {
3888 		device_printf(sc->dev,
3889 		    "failed to get card capabilities: %d.\n", rc);
3890 		return (rc);
3891 	}
3892 
3893 #define READ_CAPS(x) do { \
3894 	sc->x = htobe16(caps.x); \
3895 } while (0)
3896 	READ_CAPS(nbmcaps);
3897 	READ_CAPS(linkcaps);
3898 	READ_CAPS(switchcaps);
3899 	READ_CAPS(niccaps);
3900 	READ_CAPS(toecaps);
3901 	READ_CAPS(rdmacaps);
3902 	READ_CAPS(cryptocaps);
3903 	READ_CAPS(iscsicaps);
3904 	READ_CAPS(fcoecaps);
3905 
3906 	if (sc->niccaps & FW_CAPS_CONFIG_NIC_HASHFILTER) {
3907 		MPASS(chip_id(sc) > CHELSIO_T4);
3908 		MPASS(sc->toecaps == 0);
3909 		sc->toecaps = 0;
3910 
3911 		param[0] = FW_PARAM_DEV(NTID);
3912 		rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 6, param, val);
3913 		if (rc != 0) {
3914 			device_printf(sc->dev,
3915 			    "failed to query HASHFILTER parameters: %d.\n", rc);
3916 			return (rc);
3917 		}
3918 		sc->tids.ntids = val[0];
3919 		sc->tids.natids = min(sc->tids.ntids / 2, MAX_ATIDS);
3920 		sc->params.hash_filter = 1;
3921 	}
3922 	if (sc->niccaps & FW_CAPS_CONFIG_NIC_ETHOFLD) {
3923 		param[0] = FW_PARAM_PFVF(ETHOFLD_START);
3924 		param[1] = FW_PARAM_PFVF(ETHOFLD_END);
3925 		param[2] = FW_PARAM_DEV(FLOWC_BUFFIFO_SZ);
3926 		rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 3, param, val);
3927 		if (rc != 0) {
3928 			device_printf(sc->dev,
3929 			    "failed to query NIC parameters: %d.\n", rc);
3930 			return (rc);
3931 		}
3932 		if (val[1] > val[0]) {
3933 			sc->tids.etid_base = val[0];
3934 			sc->tids.etid_end = val[1];
3935 			sc->tids.netids = val[1] - val[0] + 1;
3936 			sc->params.eo_wr_cred = val[2];
3937 			sc->params.ethoffload = 1;
3938 		}
3939 	}
3940 	if (sc->toecaps) {
3941 		/* query offload-related parameters */
3942 		param[0] = FW_PARAM_DEV(NTID);
3943 		param[1] = FW_PARAM_PFVF(SERVER_START);
3944 		param[2] = FW_PARAM_PFVF(SERVER_END);
3945 		param[3] = FW_PARAM_PFVF(TDDP_START);
3946 		param[4] = FW_PARAM_PFVF(TDDP_END);
3947 		param[5] = FW_PARAM_DEV(FLOWC_BUFFIFO_SZ);
3948 		rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 6, param, val);
3949 		if (rc != 0) {
3950 			device_printf(sc->dev,
3951 			    "failed to query TOE parameters: %d.\n", rc);
3952 			return (rc);
3953 		}
3954 		sc->tids.ntids = val[0];
3955 		sc->tids.natids = min(sc->tids.ntids / 2, MAX_ATIDS);
3956 		if (val[2] > val[1]) {
3957 			sc->tids.stid_base = val[1];
3958 			sc->tids.nstids = val[2] - val[1] + 1;
3959 		}
3960 		sc->vres.ddp.start = val[3];
3961 		sc->vres.ddp.size = val[4] - val[3] + 1;
3962 		sc->params.ofldq_wr_cred = val[5];
3963 		sc->params.offload = 1;
3964 	} else {
3965 		/*
3966 		 * The firmware attempts memfree TOE configuration for -SO cards
3967 		 * and will report toecaps=0 if it runs out of resources (this
3968 		 * depends on the config file).  It may not report 0 for other
3969 		 * capabilities dependent on the TOE in this case.  Set them to
3970 		 * 0 here so that the driver doesn't bother tracking resources
3971 		 * that will never be used.
3972 		 */
3973 		sc->iscsicaps = 0;
3974 		sc->rdmacaps = 0;
3975 	}
3976 	if (sc->rdmacaps) {
3977 		param[0] = FW_PARAM_PFVF(STAG_START);
3978 		param[1] = FW_PARAM_PFVF(STAG_END);
3979 		param[2] = FW_PARAM_PFVF(RQ_START);
3980 		param[3] = FW_PARAM_PFVF(RQ_END);
3981 		param[4] = FW_PARAM_PFVF(PBL_START);
3982 		param[5] = FW_PARAM_PFVF(PBL_END);
3983 		rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 6, param, val);
3984 		if (rc != 0) {
3985 			device_printf(sc->dev,
3986 			    "failed to query RDMA parameters(1): %d.\n", rc);
3987 			return (rc);
3988 		}
3989 		sc->vres.stag.start = val[0];
3990 		sc->vres.stag.size = val[1] - val[0] + 1;
3991 		sc->vres.rq.start = val[2];
3992 		sc->vres.rq.size = val[3] - val[2] + 1;
3993 		sc->vres.pbl.start = val[4];
3994 		sc->vres.pbl.size = val[5] - val[4] + 1;
3995 
3996 		param[0] = FW_PARAM_PFVF(SQRQ_START);
3997 		param[1] = FW_PARAM_PFVF(SQRQ_END);
3998 		param[2] = FW_PARAM_PFVF(CQ_START);
3999 		param[3] = FW_PARAM_PFVF(CQ_END);
4000 		param[4] = FW_PARAM_PFVF(OCQ_START);
4001 		param[5] = FW_PARAM_PFVF(OCQ_END);
4002 		rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 6, param, val);
4003 		if (rc != 0) {
4004 			device_printf(sc->dev,
4005 			    "failed to query RDMA parameters(2): %d.\n", rc);
4006 			return (rc);
4007 		}
4008 		sc->vres.qp.start = val[0];
4009 		sc->vres.qp.size = val[1] - val[0] + 1;
4010 		sc->vres.cq.start = val[2];
4011 		sc->vres.cq.size = val[3] - val[2] + 1;
4012 		sc->vres.ocq.start = val[4];
4013 		sc->vres.ocq.size = val[5] - val[4] + 1;
4014 
4015 		param[0] = FW_PARAM_PFVF(SRQ_START);
4016 		param[1] = FW_PARAM_PFVF(SRQ_END);
4017 		param[2] = FW_PARAM_DEV(MAXORDIRD_QP);
4018 		param[3] = FW_PARAM_DEV(MAXIRD_ADAPTER);
4019 		rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 4, param, val);
4020 		if (rc != 0) {
4021 			device_printf(sc->dev,
4022 			    "failed to query RDMA parameters(3): %d.\n", rc);
4023 			return (rc);
4024 		}
4025 		sc->vres.srq.start = val[0];
4026 		sc->vres.srq.size = val[1] - val[0] + 1;
4027 		sc->params.max_ordird_qp = val[2];
4028 		sc->params.max_ird_adapter = val[3];
4029 	}
4030 	if (sc->iscsicaps) {
4031 		param[0] = FW_PARAM_PFVF(ISCSI_START);
4032 		param[1] = FW_PARAM_PFVF(ISCSI_END);
4033 		rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 2, param, val);
4034 		if (rc != 0) {
4035 			device_printf(sc->dev,
4036 			    "failed to query iSCSI parameters: %d.\n", rc);
4037 			return (rc);
4038 		}
4039 		sc->vres.iscsi.start = val[0];
4040 		sc->vres.iscsi.size = val[1] - val[0] + 1;
4041 	}
4042 	if (sc->cryptocaps & FW_CAPS_CONFIG_TLSKEYS) {
4043 		param[0] = FW_PARAM_PFVF(TLS_START);
4044 		param[1] = FW_PARAM_PFVF(TLS_END);
4045 		rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 2, param, val);
4046 		if (rc != 0) {
4047 			device_printf(sc->dev,
4048 			    "failed to query TLS parameters: %d.\n", rc);
4049 			return (rc);
4050 		}
4051 		sc->vres.key.start = val[0];
4052 		sc->vres.key.size = val[1] - val[0] + 1;
4053 	}
4054 
4055 	t4_init_sge_params(sc);
4056 
4057 	/*
4058 	 * We've got the params we wanted to query via the firmware.  Now grab
4059 	 * some others directly from the chip.
4060 	 */
4061 	rc = t4_read_chip_settings(sc);
4062 
4063 	return (rc);
4064 }
4065 
4066 static int
4067 set_params__post_init(struct adapter *sc)
4068 {
4069 	uint32_t param, val;
4070 #ifdef TCP_OFFLOAD
4071 	int i, v, shift;
4072 #endif
4073 
4074 	/* ask for encapsulated CPLs */
4075 	param = FW_PARAM_PFVF(CPLFW4MSG_ENCAP);
4076 	val = 1;
4077 	(void)t4_set_params(sc, sc->mbox, sc->pf, 0, 1, &param, &val);
4078 
4079 #ifdef TCP_OFFLOAD
4080 	/*
4081 	 * Override the TOE timers with user provided tunables.  This is not the
4082 	 * recommended way to change the timers (the firmware config file is) so
4083 	 * these tunables are not documented.
4084 	 *
4085 	 * All the timer tunables are in microseconds.
4086 	 */
4087 	if (t4_toe_keepalive_idle != 0) {
4088 		v = us_to_tcp_ticks(sc, t4_toe_keepalive_idle);
4089 		v &= M_KEEPALIVEIDLE;
4090 		t4_set_reg_field(sc, A_TP_KEEP_IDLE,
4091 		    V_KEEPALIVEIDLE(M_KEEPALIVEIDLE), V_KEEPALIVEIDLE(v));
4092 	}
4093 	if (t4_toe_keepalive_interval != 0) {
4094 		v = us_to_tcp_ticks(sc, t4_toe_keepalive_interval);
4095 		v &= M_KEEPALIVEINTVL;
4096 		t4_set_reg_field(sc, A_TP_KEEP_INTVL,
4097 		    V_KEEPALIVEINTVL(M_KEEPALIVEINTVL), V_KEEPALIVEINTVL(v));
4098 	}
4099 	if (t4_toe_keepalive_count != 0) {
4100 		v = t4_toe_keepalive_count & M_KEEPALIVEMAXR2;
4101 		t4_set_reg_field(sc, A_TP_SHIFT_CNT,
4102 		    V_KEEPALIVEMAXR1(M_KEEPALIVEMAXR1) |
4103 		    V_KEEPALIVEMAXR2(M_KEEPALIVEMAXR2),
4104 		    V_KEEPALIVEMAXR1(1) | V_KEEPALIVEMAXR2(v));
4105 	}
4106 	if (t4_toe_rexmt_min != 0) {
4107 		v = us_to_tcp_ticks(sc, t4_toe_rexmt_min);
4108 		v &= M_RXTMIN;
4109 		t4_set_reg_field(sc, A_TP_RXT_MIN,
4110 		    V_RXTMIN(M_RXTMIN), V_RXTMIN(v));
4111 	}
4112 	if (t4_toe_rexmt_max != 0) {
4113 		v = us_to_tcp_ticks(sc, t4_toe_rexmt_max);
4114 		v &= M_RXTMAX;
4115 		t4_set_reg_field(sc, A_TP_RXT_MAX,
4116 		    V_RXTMAX(M_RXTMAX), V_RXTMAX(v));
4117 	}
4118 	if (t4_toe_rexmt_count != 0) {
4119 		v = t4_toe_rexmt_count & M_RXTSHIFTMAXR2;
4120 		t4_set_reg_field(sc, A_TP_SHIFT_CNT,
4121 		    V_RXTSHIFTMAXR1(M_RXTSHIFTMAXR1) |
4122 		    V_RXTSHIFTMAXR2(M_RXTSHIFTMAXR2),
4123 		    V_RXTSHIFTMAXR1(1) | V_RXTSHIFTMAXR2(v));
4124 	}
4125 	for (i = 0; i < nitems(t4_toe_rexmt_backoff); i++) {
4126 		if (t4_toe_rexmt_backoff[i] != -1) {
4127 			v = t4_toe_rexmt_backoff[i] & M_TIMERBACKOFFINDEX0;
4128 			shift = (i & 3) << 3;
4129 			t4_set_reg_field(sc, A_TP_TCP_BACKOFF_REG0 + (i & ~3),
4130 			    M_TIMERBACKOFFINDEX0 << shift, v << shift);
4131 		}
4132 	}
4133 #endif
4134 	return (0);
4135 }
4136 
4137 #undef FW_PARAM_PFVF
4138 #undef FW_PARAM_DEV
4139 
4140 static void
4141 t4_set_desc(struct adapter *sc)
4142 {
4143 	char buf[128];
4144 	struct adapter_params *p = &sc->params;
4145 
4146 	snprintf(buf, sizeof(buf), "Chelsio %s", p->vpd.id);
4147 
4148 	device_set_desc_copy(sc->dev, buf);
4149 }
4150 
4151 static inline void
4152 ifmedia_add4(struct ifmedia *ifm, int m)
4153 {
4154 
4155 	ifmedia_add(ifm, m, 0, NULL);
4156 	ifmedia_add(ifm, m | IFM_ETH_TXPAUSE, 0, NULL);
4157 	ifmedia_add(ifm, m | IFM_ETH_RXPAUSE, 0, NULL);
4158 	ifmedia_add(ifm, m | IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE, 0, NULL);
4159 }
4160 
4161 static void
4162 set_current_media(struct port_info *pi, struct ifmedia *ifm)
4163 {
4164 	struct link_config *lc;
4165 	int mword;
4166 
4167 	PORT_LOCK_ASSERT_OWNED(pi);
4168 
4169 	/* Leave current media alone if it's already set to IFM_NONE. */
4170 	if (ifm->ifm_cur != NULL &&
4171 	    IFM_SUBTYPE(ifm->ifm_cur->ifm_media) == IFM_NONE)
4172 		return;
4173 
4174 	lc = &pi->link_cfg;
4175 	if (lc->requested_aneg == AUTONEG_ENABLE &&
4176 	    lc->supported & FW_PORT_CAP_ANEG) {
4177 		ifmedia_set(ifm, IFM_ETHER | IFM_AUTO);
4178 		return;
4179 	}
4180 	mword = IFM_ETHER | IFM_FDX;
4181 	if (lc->requested_fc & PAUSE_TX)
4182 		mword |= IFM_ETH_TXPAUSE;
4183 	if (lc->requested_fc & PAUSE_RX)
4184 		mword |= IFM_ETH_RXPAUSE;
4185 	mword |= port_mword(pi, speed_to_fwspeed(lc->requested_speed));
4186 	ifmedia_set(ifm, mword);
4187 }
4188 
4189 static void
4190 build_medialist(struct port_info *pi, struct ifmedia *ifm)
4191 {
4192 	uint16_t ss, speed;
4193 	int unknown, mword, bit;
4194 	struct link_config *lc;
4195 
4196 	PORT_LOCK_ASSERT_OWNED(pi);
4197 
4198 	if (pi->flags & FIXED_IFMEDIA)
4199 		return;
4200 
4201 	/*
4202 	 * First setup all the requested_ fields so that they comply with what's
4203 	 * supported by the port + transceiver.  Note that this clobbers any
4204 	 * user preferences set via sysctl_pause_settings or sysctl_autoneg.
4205 	 */
4206 	init_l1cfg(pi);
4207 
4208 	/*
4209 	 * Now (re)build the ifmedia list.
4210 	 */
4211 	ifmedia_removeall(ifm);
4212 	lc = &pi->link_cfg;
4213 	ss = G_FW_PORT_CAP_SPEED(lc->supported); /* Supported Speeds */
4214 	if (__predict_false(ss == 0)) {	/* not supposed to happen. */
4215 		MPASS(ss != 0);
4216 no_media:
4217 		MPASS(LIST_EMPTY(&ifm->ifm_list));
4218 		ifmedia_add(ifm, IFM_ETHER | IFM_NONE, 0, NULL);
4219 		ifmedia_set(ifm, IFM_ETHER | IFM_NONE);
4220 		return;
4221 	}
4222 
4223 	unknown = 0;
4224 	for (bit = 0; bit < fls(ss); bit++) {
4225 		speed = 1 << bit;
4226 		MPASS(speed & M_FW_PORT_CAP_SPEED);
4227 		if (ss & speed) {
4228 			mword = port_mword(pi, speed);
4229 			if (mword == IFM_NONE) {
4230 				goto no_media;
4231 			} else if (mword == IFM_UNKNOWN)
4232 				unknown++;
4233 			else
4234 				ifmedia_add4(ifm, IFM_ETHER | IFM_FDX | mword);
4235 		}
4236 	}
4237 	if (unknown > 0) /* Add one unknown for all unknown media types. */
4238 		ifmedia_add4(ifm, IFM_ETHER | IFM_FDX | IFM_UNKNOWN);
4239 	if (lc->supported & FW_PORT_CAP_ANEG)
4240 		ifmedia_add(ifm, IFM_ETHER | IFM_AUTO, 0, NULL);
4241 
4242 	set_current_media(pi, ifm);
4243 }
4244 
4245 /*
4246  * Update all the requested_* fields in the link config to something valid (and
4247  * reasonable).
4248  */
4249 static void
4250 init_l1cfg(struct port_info *pi)
4251 {
4252 	struct link_config *lc = &pi->link_cfg;
4253 
4254 	PORT_LOCK_ASSERT_OWNED(pi);
4255 
4256 	/* Gbps -> Mbps */
4257 	lc->requested_speed = port_top_speed(pi) * 1000;
4258 
4259 	if (t4_autoneg != 0 && lc->supported & FW_PORT_CAP_ANEG) {
4260 		lc->requested_aneg = AUTONEG_ENABLE;
4261 	} else {
4262 		lc->requested_aneg = AUTONEG_DISABLE;
4263 	}
4264 
4265 	lc->requested_fc = t4_pause_settings & (PAUSE_TX | PAUSE_RX);
4266 
4267 	if (t4_fec != -1) {
4268 		if (t4_fec & FEC_RS && lc->supported & FW_PORT_CAP_FEC_RS) {
4269 			lc->requested_fec = FEC_RS;
4270 		} else if (t4_fec & FEC_BASER_RS &&
4271 		    lc->supported & FW_PORT_CAP_FEC_BASER_RS) {
4272 			lc->requested_fec = FEC_BASER_RS;
4273 		} else {
4274 			lc->requested_fec = 0;
4275 		}
4276 	} else {
4277 		/* Use the suggested value provided by the firmware in acaps */
4278 		if (lc->advertising & FW_PORT_CAP_FEC_RS &&
4279 		    lc->supported & FW_PORT_CAP_FEC_RS) {
4280 			lc->requested_fec = FEC_RS;
4281 		} else if (lc->advertising & FW_PORT_CAP_FEC_BASER_RS &&
4282 		    lc->supported & FW_PORT_CAP_FEC_BASER_RS) {
4283 			lc->requested_fec = FEC_BASER_RS;
4284 		} else {
4285 			lc->requested_fec = 0;
4286 		}
4287 	}
4288 }
4289 
4290 /*
4291  * Apply the settings in requested_* to the hardware.  The parameters are
4292  * expected to be sane.
4293  */
4294 static int
4295 apply_l1cfg(struct port_info *pi)
4296 {
4297 	struct adapter *sc = pi->adapter;
4298 	struct link_config *lc = &pi->link_cfg;
4299 	int rc;
4300 #ifdef INVARIANTS
4301 	uint16_t fwspeed;
4302 
4303 	ASSERT_SYNCHRONIZED_OP(sc);
4304 	PORT_LOCK_ASSERT_OWNED(pi);
4305 
4306 	if (lc->requested_aneg == AUTONEG_ENABLE)
4307 		MPASS(lc->supported & FW_PORT_CAP_ANEG);
4308 	if (lc->requested_fc & PAUSE_TX)
4309 		MPASS(lc->supported & FW_PORT_CAP_FC_TX);
4310 	if (lc->requested_fc & PAUSE_RX)
4311 		MPASS(lc->supported & FW_PORT_CAP_FC_RX);
4312 	if (lc->requested_fec == FEC_RS)
4313 		MPASS(lc->supported & FW_PORT_CAP_FEC_RS);
4314 	if (lc->requested_fec == FEC_BASER_RS)
4315 		MPASS(lc->supported & FW_PORT_CAP_FEC_BASER_RS);
4316 	fwspeed = speed_to_fwspeed(lc->requested_speed);
4317 	MPASS(fwspeed != 0);
4318 	MPASS(lc->supported & fwspeed);
4319 #endif
4320 	rc = -t4_link_l1cfg(sc, sc->mbox, pi->tx_chan, lc);
4321 	if (rc != 0) {
4322 		device_printf(pi->dev, "l1cfg failed: %d\n", rc);
4323 	} else {
4324 		lc->fc = lc->requested_fc;
4325 		lc->fec = lc->requested_fec;
4326 	}
4327 	return (rc);
4328 }
4329 
4330 #define FW_MAC_EXACT_CHUNK	7
4331 
4332 /*
4333  * Program the port's XGMAC based on parameters in ifnet.  The caller also
4334  * indicates which parameters should be programmed (the rest are left alone).
4335  */
4336 int
4337 update_mac_settings(struct ifnet *ifp, int flags)
4338 {
4339 	int rc = 0;
4340 	struct vi_info *vi = ifp->if_softc;
4341 	struct port_info *pi = vi->pi;
4342 	struct adapter *sc = pi->adapter;
4343 	int mtu = -1, promisc = -1, allmulti = -1, vlanex = -1;
4344 
4345 	ASSERT_SYNCHRONIZED_OP(sc);
4346 	KASSERT(flags, ("%s: not told what to update.", __func__));
4347 
4348 	if (flags & XGMAC_MTU)
4349 		mtu = ifp->if_mtu;
4350 
4351 	if (flags & XGMAC_PROMISC)
4352 		promisc = ifp->if_flags & IFF_PROMISC ? 1 : 0;
4353 
4354 	if (flags & XGMAC_ALLMULTI)
4355 		allmulti = ifp->if_flags & IFF_ALLMULTI ? 1 : 0;
4356 
4357 	if (flags & XGMAC_VLANEX)
4358 		vlanex = ifp->if_capenable & IFCAP_VLAN_HWTAGGING ? 1 : 0;
4359 
4360 	if (flags & (XGMAC_MTU|XGMAC_PROMISC|XGMAC_ALLMULTI|XGMAC_VLANEX)) {
4361 		rc = -t4_set_rxmode(sc, sc->mbox, vi->viid, mtu, promisc,
4362 		    allmulti, 1, vlanex, false);
4363 		if (rc) {
4364 			if_printf(ifp, "set_rxmode (%x) failed: %d\n", flags,
4365 			    rc);
4366 			return (rc);
4367 		}
4368 	}
4369 
4370 	if (flags & XGMAC_UCADDR) {
4371 		uint8_t ucaddr[ETHER_ADDR_LEN];
4372 
4373 		bcopy(IF_LLADDR(ifp), ucaddr, sizeof(ucaddr));
4374 		rc = t4_change_mac(sc, sc->mbox, vi->viid, vi->xact_addr_filt,
4375 		    ucaddr, true, true);
4376 		if (rc < 0) {
4377 			rc = -rc;
4378 			if_printf(ifp, "change_mac failed: %d\n", rc);
4379 			return (rc);
4380 		} else {
4381 			vi->xact_addr_filt = rc;
4382 			rc = 0;
4383 		}
4384 	}
4385 
4386 	if (flags & XGMAC_MCADDRS) {
4387 		const uint8_t *mcaddr[FW_MAC_EXACT_CHUNK];
4388 		int del = 1;
4389 		uint64_t hash = 0;
4390 		struct ifmultiaddr *ifma;
4391 		int i = 0, j;
4392 
4393 		if_maddr_rlock(ifp);
4394 		CK_STAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
4395 			if (ifma->ifma_addr->sa_family != AF_LINK)
4396 				continue;
4397 			mcaddr[i] =
4398 			    LLADDR((struct sockaddr_dl *)ifma->ifma_addr);
4399 			MPASS(ETHER_IS_MULTICAST(mcaddr[i]));
4400 			i++;
4401 
4402 			if (i == FW_MAC_EXACT_CHUNK) {
4403 				rc = t4_alloc_mac_filt(sc, sc->mbox, vi->viid,
4404 				    del, i, mcaddr, NULL, &hash, 0);
4405 				if (rc < 0) {
4406 					rc = -rc;
4407 					for (j = 0; j < i; j++) {
4408 						if_printf(ifp,
4409 						    "failed to add mc address"
4410 						    " %02x:%02x:%02x:"
4411 						    "%02x:%02x:%02x rc=%d\n",
4412 						    mcaddr[j][0], mcaddr[j][1],
4413 						    mcaddr[j][2], mcaddr[j][3],
4414 						    mcaddr[j][4], mcaddr[j][5],
4415 						    rc);
4416 					}
4417 					goto mcfail;
4418 				}
4419 				del = 0;
4420 				i = 0;
4421 			}
4422 		}
4423 		if (i > 0) {
4424 			rc = t4_alloc_mac_filt(sc, sc->mbox, vi->viid, del, i,
4425 			    mcaddr, NULL, &hash, 0);
4426 			if (rc < 0) {
4427 				rc = -rc;
4428 				for (j = 0; j < i; j++) {
4429 					if_printf(ifp,
4430 					    "failed to add mc address"
4431 					    " %02x:%02x:%02x:"
4432 					    "%02x:%02x:%02x rc=%d\n",
4433 					    mcaddr[j][0], mcaddr[j][1],
4434 					    mcaddr[j][2], mcaddr[j][3],
4435 					    mcaddr[j][4], mcaddr[j][5],
4436 					    rc);
4437 				}
4438 				goto mcfail;
4439 			}
4440 		}
4441 
4442 		rc = -t4_set_addr_hash(sc, sc->mbox, vi->viid, 0, hash, 0);
4443 		if (rc != 0)
4444 			if_printf(ifp, "failed to set mc address hash: %d", rc);
4445 mcfail:
4446 		if_maddr_runlock(ifp);
4447 	}
4448 
4449 	return (rc);
4450 }
4451 
4452 /*
4453  * {begin|end}_synchronized_op must be called from the same thread.
4454  */
4455 int
4456 begin_synchronized_op(struct adapter *sc, struct vi_info *vi, int flags,
4457     char *wmesg)
4458 {
4459 	int rc, pri;
4460 
4461 #ifdef WITNESS
4462 	/* the caller thinks it's ok to sleep, but is it really? */
4463 	if (flags & SLEEP_OK)
4464 		WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL,
4465 		    "begin_synchronized_op");
4466 #endif
4467 
4468 	if (INTR_OK)
4469 		pri = PCATCH;
4470 	else
4471 		pri = 0;
4472 
4473 	ADAPTER_LOCK(sc);
4474 	for (;;) {
4475 
4476 		if (vi && IS_DOOMED(vi)) {
4477 			rc = ENXIO;
4478 			goto done;
4479 		}
4480 
4481 		if (!IS_BUSY(sc)) {
4482 			rc = 0;
4483 			break;
4484 		}
4485 
4486 		if (!(flags & SLEEP_OK)) {
4487 			rc = EBUSY;
4488 			goto done;
4489 		}
4490 
4491 		if (mtx_sleep(&sc->flags, &sc->sc_lock, pri, wmesg, 0)) {
4492 			rc = EINTR;
4493 			goto done;
4494 		}
4495 	}
4496 
4497 	KASSERT(!IS_BUSY(sc), ("%s: controller busy.", __func__));
4498 	SET_BUSY(sc);
4499 #ifdef INVARIANTS
4500 	sc->last_op = wmesg;
4501 	sc->last_op_thr = curthread;
4502 	sc->last_op_flags = flags;
4503 #endif
4504 
4505 done:
4506 	if (!(flags & HOLD_LOCK) || rc)
4507 		ADAPTER_UNLOCK(sc);
4508 
4509 	return (rc);
4510 }
4511 
4512 /*
4513  * Tell if_ioctl and if_init that the VI is going away.  This is
4514  * special variant of begin_synchronized_op and must be paired with a
4515  * call to end_synchronized_op.
4516  */
4517 void
4518 doom_vi(struct adapter *sc, struct vi_info *vi)
4519 {
4520 
4521 	ADAPTER_LOCK(sc);
4522 	SET_DOOMED(vi);
4523 	wakeup(&sc->flags);
4524 	while (IS_BUSY(sc))
4525 		mtx_sleep(&sc->flags, &sc->sc_lock, 0, "t4detach", 0);
4526 	SET_BUSY(sc);
4527 #ifdef INVARIANTS
4528 	sc->last_op = "t4detach";
4529 	sc->last_op_thr = curthread;
4530 	sc->last_op_flags = 0;
4531 #endif
4532 	ADAPTER_UNLOCK(sc);
4533 }
4534 
4535 /*
4536  * {begin|end}_synchronized_op must be called from the same thread.
4537  */
4538 void
4539 end_synchronized_op(struct adapter *sc, int flags)
4540 {
4541 
4542 	if (flags & LOCK_HELD)
4543 		ADAPTER_LOCK_ASSERT_OWNED(sc);
4544 	else
4545 		ADAPTER_LOCK(sc);
4546 
4547 	KASSERT(IS_BUSY(sc), ("%s: controller not busy.", __func__));
4548 	CLR_BUSY(sc);
4549 	wakeup(&sc->flags);
4550 	ADAPTER_UNLOCK(sc);
4551 }
4552 
4553 static int
4554 cxgbe_init_synchronized(struct vi_info *vi)
4555 {
4556 	struct port_info *pi = vi->pi;
4557 	struct adapter *sc = pi->adapter;
4558 	struct ifnet *ifp = vi->ifp;
4559 	int rc = 0, i;
4560 	struct sge_txq *txq;
4561 
4562 	ASSERT_SYNCHRONIZED_OP(sc);
4563 
4564 	if (ifp->if_drv_flags & IFF_DRV_RUNNING)
4565 		return (0);	/* already running */
4566 
4567 	if (!(sc->flags & FULL_INIT_DONE) &&
4568 	    ((rc = adapter_full_init(sc)) != 0))
4569 		return (rc);	/* error message displayed already */
4570 
4571 	if (!(vi->flags & VI_INIT_DONE) &&
4572 	    ((rc = vi_full_init(vi)) != 0))
4573 		return (rc); /* error message displayed already */
4574 
4575 	rc = update_mac_settings(ifp, XGMAC_ALL);
4576 	if (rc)
4577 		goto done;	/* error message displayed already */
4578 
4579 	rc = -t4_enable_vi(sc, sc->mbox, vi->viid, true, true);
4580 	if (rc != 0) {
4581 		if_printf(ifp, "enable_vi failed: %d\n", rc);
4582 		goto done;
4583 	}
4584 
4585 	/*
4586 	 * Can't fail from this point onwards.  Review cxgbe_uninit_synchronized
4587 	 * if this changes.
4588 	 */
4589 
4590 	for_each_txq(vi, i, txq) {
4591 		TXQ_LOCK(txq);
4592 		txq->eq.flags |= EQ_ENABLED;
4593 		TXQ_UNLOCK(txq);
4594 	}
4595 
4596 	/*
4597 	 * The first iq of the first port to come up is used for tracing.
4598 	 */
4599 	if (sc->traceq < 0 && IS_MAIN_VI(vi)) {
4600 		sc->traceq = sc->sge.rxq[vi->first_rxq].iq.abs_id;
4601 		t4_write_reg(sc, is_t4(sc) ?  A_MPS_TRC_RSS_CONTROL :
4602 		    A_MPS_T5_TRC_RSS_CONTROL, V_RSSCONTROL(pi->tx_chan) |
4603 		    V_QUEUENUMBER(sc->traceq));
4604 		pi->flags |= HAS_TRACEQ;
4605 	}
4606 
4607 	/* all ok */
4608 	PORT_LOCK(pi);
4609 	if (pi->up_vis++ == 0) {
4610 		t4_update_port_info(pi);
4611 		build_medialist(pi, &pi->media);
4612 		apply_l1cfg(pi);
4613 	}
4614 	ifp->if_drv_flags |= IFF_DRV_RUNNING;
4615 
4616 	if (pi->nvi > 1 || sc->flags & IS_VF)
4617 		callout_reset(&vi->tick, hz, vi_tick, vi);
4618 	else
4619 		callout_reset(&pi->tick, hz, cxgbe_tick, pi);
4620 	PORT_UNLOCK(pi);
4621 done:
4622 	if (rc != 0)
4623 		cxgbe_uninit_synchronized(vi);
4624 
4625 	return (rc);
4626 }
4627 
4628 /*
4629  * Idempotent.
4630  */
4631 static int
4632 cxgbe_uninit_synchronized(struct vi_info *vi)
4633 {
4634 	struct port_info *pi = vi->pi;
4635 	struct adapter *sc = pi->adapter;
4636 	struct ifnet *ifp = vi->ifp;
4637 	int rc, i;
4638 	struct sge_txq *txq;
4639 
4640 	ASSERT_SYNCHRONIZED_OP(sc);
4641 
4642 	if (!(vi->flags & VI_INIT_DONE)) {
4643 		if (__predict_false(ifp->if_drv_flags & IFF_DRV_RUNNING)) {
4644 			KASSERT(0, ("uninited VI is running"));
4645 			if_printf(ifp, "uninited VI with running ifnet.  "
4646 			    "vi->flags 0x%016lx, if_flags 0x%08x, "
4647 			    "if_drv_flags 0x%08x\n", vi->flags, ifp->if_flags,
4648 			    ifp->if_drv_flags);
4649 		}
4650 		return (0);
4651 	}
4652 
4653 	/*
4654 	 * Disable the VI so that all its data in either direction is discarded
4655 	 * by the MPS.  Leave everything else (the queues, interrupts, and 1Hz
4656 	 * tick) intact as the TP can deliver negative advice or data that it's
4657 	 * holding in its RAM (for an offloaded connection) even after the VI is
4658 	 * disabled.
4659 	 */
4660 	rc = -t4_enable_vi(sc, sc->mbox, vi->viid, false, false);
4661 	if (rc) {
4662 		if_printf(ifp, "disable_vi failed: %d\n", rc);
4663 		return (rc);
4664 	}
4665 
4666 	for_each_txq(vi, i, txq) {
4667 		TXQ_LOCK(txq);
4668 		txq->eq.flags &= ~EQ_ENABLED;
4669 		TXQ_UNLOCK(txq);
4670 	}
4671 
4672 	PORT_LOCK(pi);
4673 	if (pi->nvi > 1 || sc->flags & IS_VF)
4674 		callout_stop(&vi->tick);
4675 	else
4676 		callout_stop(&pi->tick);
4677 	if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) {
4678 		PORT_UNLOCK(pi);
4679 		return (0);
4680 	}
4681 	ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
4682 	pi->up_vis--;
4683 	if (pi->up_vis > 0) {
4684 		PORT_UNLOCK(pi);
4685 		return (0);
4686 	}
4687 
4688 	pi->link_cfg.link_ok = 0;
4689 	pi->link_cfg.speed = 0;
4690 	pi->link_cfg.link_down_rc = 255;
4691 	t4_os_link_changed(pi);
4692 	pi->old_link_cfg = pi->link_cfg;
4693 	PORT_UNLOCK(pi);
4694 
4695 	return (0);
4696 }
4697 
4698 /*
4699  * It is ok for this function to fail midway and return right away.  t4_detach
4700  * will walk the entire sc->irq list and clean up whatever is valid.
4701  */
4702 int
4703 t4_setup_intr_handlers(struct adapter *sc)
4704 {
4705 	int rc, rid, p, q, v;
4706 	char s[8];
4707 	struct irq *irq;
4708 	struct port_info *pi;
4709 	struct vi_info *vi;
4710 	struct sge *sge = &sc->sge;
4711 	struct sge_rxq *rxq;
4712 #ifdef TCP_OFFLOAD
4713 	struct sge_ofld_rxq *ofld_rxq;
4714 #endif
4715 #ifdef DEV_NETMAP
4716 	struct sge_nm_rxq *nm_rxq;
4717 #endif
4718 #ifdef RSS
4719 	int nbuckets = rss_getnumbuckets();
4720 #endif
4721 
4722 	/*
4723 	 * Setup interrupts.
4724 	 */
4725 	irq = &sc->irq[0];
4726 	rid = sc->intr_type == INTR_INTX ? 0 : 1;
4727 	if (forwarding_intr_to_fwq(sc))
4728 		return (t4_alloc_irq(sc, irq, rid, t4_intr_all, sc, "all"));
4729 
4730 	/* Multiple interrupts. */
4731 	if (sc->flags & IS_VF)
4732 		KASSERT(sc->intr_count >= T4VF_EXTRA_INTR + sc->params.nports,
4733 		    ("%s: too few intr.", __func__));
4734 	else
4735 		KASSERT(sc->intr_count >= T4_EXTRA_INTR + sc->params.nports,
4736 		    ("%s: too few intr.", __func__));
4737 
4738 	/* The first one is always error intr on PFs */
4739 	if (!(sc->flags & IS_VF)) {
4740 		rc = t4_alloc_irq(sc, irq, rid, t4_intr_err, sc, "err");
4741 		if (rc != 0)
4742 			return (rc);
4743 		irq++;
4744 		rid++;
4745 	}
4746 
4747 	/* The second one is always the firmware event queue (first on VFs) */
4748 	rc = t4_alloc_irq(sc, irq, rid, t4_intr_evt, &sge->fwq, "evt");
4749 	if (rc != 0)
4750 		return (rc);
4751 	irq++;
4752 	rid++;
4753 
4754 	for_each_port(sc, p) {
4755 		pi = sc->port[p];
4756 		for_each_vi(pi, v, vi) {
4757 			vi->first_intr = rid - 1;
4758 
4759 			if (vi->nnmrxq > 0) {
4760 				int n = max(vi->nrxq, vi->nnmrxq);
4761 
4762 				rxq = &sge->rxq[vi->first_rxq];
4763 #ifdef DEV_NETMAP
4764 				nm_rxq = &sge->nm_rxq[vi->first_nm_rxq];
4765 #endif
4766 				for (q = 0; q < n; q++) {
4767 					snprintf(s, sizeof(s), "%x%c%x", p,
4768 					    'a' + v, q);
4769 					if (q < vi->nrxq)
4770 						irq->rxq = rxq++;
4771 #ifdef DEV_NETMAP
4772 					if (q < vi->nnmrxq)
4773 						irq->nm_rxq = nm_rxq++;
4774 #endif
4775 					rc = t4_alloc_irq(sc, irq, rid,
4776 					    t4_vi_intr, irq, s);
4777 					if (rc != 0)
4778 						return (rc);
4779 #ifdef RSS
4780 					if (q < vi->nrxq) {
4781 						bus_bind_intr(sc->dev, irq->res,
4782 						    rss_getcpu(q % nbuckets));
4783 					}
4784 #endif
4785 					irq++;
4786 					rid++;
4787 					vi->nintr++;
4788 				}
4789 			} else {
4790 				for_each_rxq(vi, q, rxq) {
4791 					snprintf(s, sizeof(s), "%x%c%x", p,
4792 					    'a' + v, q);
4793 					rc = t4_alloc_irq(sc, irq, rid,
4794 					    t4_intr, rxq, s);
4795 					if (rc != 0)
4796 						return (rc);
4797 #ifdef RSS
4798 					bus_bind_intr(sc->dev, irq->res,
4799 					    rss_getcpu(q % nbuckets));
4800 #endif
4801 					irq++;
4802 					rid++;
4803 					vi->nintr++;
4804 				}
4805 			}
4806 #ifdef TCP_OFFLOAD
4807 			for_each_ofld_rxq(vi, q, ofld_rxq) {
4808 				snprintf(s, sizeof(s), "%x%c%x", p, 'A' + v, q);
4809 				rc = t4_alloc_irq(sc, irq, rid, t4_intr,
4810 				    ofld_rxq, s);
4811 				if (rc != 0)
4812 					return (rc);
4813 				irq++;
4814 				rid++;
4815 				vi->nintr++;
4816 			}
4817 #endif
4818 		}
4819 	}
4820 	MPASS(irq == &sc->irq[sc->intr_count]);
4821 
4822 	return (0);
4823 }
4824 
4825 int
4826 adapter_full_init(struct adapter *sc)
4827 {
4828 	int rc, i;
4829 #ifdef RSS
4830 	uint32_t raw_rss_key[RSS_KEYSIZE / sizeof(uint32_t)];
4831 	uint32_t rss_key[RSS_KEYSIZE / sizeof(uint32_t)];
4832 #endif
4833 
4834 	ASSERT_SYNCHRONIZED_OP(sc);
4835 	ADAPTER_LOCK_ASSERT_NOTOWNED(sc);
4836 	KASSERT((sc->flags & FULL_INIT_DONE) == 0,
4837 	    ("%s: FULL_INIT_DONE already", __func__));
4838 
4839 	/*
4840 	 * queues that belong to the adapter (not any particular port).
4841 	 */
4842 	rc = t4_setup_adapter_queues(sc);
4843 	if (rc != 0)
4844 		goto done;
4845 
4846 	for (i = 0; i < nitems(sc->tq); i++) {
4847 		sc->tq[i] = taskqueue_create("t4 taskq", M_NOWAIT,
4848 		    taskqueue_thread_enqueue, &sc->tq[i]);
4849 		if (sc->tq[i] == NULL) {
4850 			device_printf(sc->dev,
4851 			    "failed to allocate task queue %d\n", i);
4852 			rc = ENOMEM;
4853 			goto done;
4854 		}
4855 		taskqueue_start_threads(&sc->tq[i], 1, PI_NET, "%s tq%d",
4856 		    device_get_nameunit(sc->dev), i);
4857 	}
4858 #ifdef RSS
4859 	MPASS(RSS_KEYSIZE == 40);
4860 	rss_getkey((void *)&raw_rss_key[0]);
4861 	for (i = 0; i < nitems(rss_key); i++) {
4862 		rss_key[i] = htobe32(raw_rss_key[nitems(rss_key) - 1 - i]);
4863 	}
4864 	t4_write_rss_key(sc, &rss_key[0], -1, 1);
4865 #endif
4866 
4867 	if (!(sc->flags & IS_VF))
4868 		t4_intr_enable(sc);
4869 	sc->flags |= FULL_INIT_DONE;
4870 done:
4871 	if (rc != 0)
4872 		adapter_full_uninit(sc);
4873 
4874 	return (rc);
4875 }
4876 
4877 int
4878 adapter_full_uninit(struct adapter *sc)
4879 {
4880 	int i;
4881 
4882 	ADAPTER_LOCK_ASSERT_NOTOWNED(sc);
4883 
4884 	t4_teardown_adapter_queues(sc);
4885 
4886 	for (i = 0; i < nitems(sc->tq) && sc->tq[i]; i++) {
4887 		taskqueue_free(sc->tq[i]);
4888 		sc->tq[i] = NULL;
4889 	}
4890 
4891 	sc->flags &= ~FULL_INIT_DONE;
4892 
4893 	return (0);
4894 }
4895 
4896 #ifdef RSS
4897 #define SUPPORTED_RSS_HASHTYPES (RSS_HASHTYPE_RSS_IPV4 | \
4898     RSS_HASHTYPE_RSS_TCP_IPV4 | RSS_HASHTYPE_RSS_IPV6 | \
4899     RSS_HASHTYPE_RSS_TCP_IPV6 | RSS_HASHTYPE_RSS_UDP_IPV4 | \
4900     RSS_HASHTYPE_RSS_UDP_IPV6)
4901 
4902 /* Translates kernel hash types to hardware. */
4903 static int
4904 hashconfig_to_hashen(int hashconfig)
4905 {
4906 	int hashen = 0;
4907 
4908 	if (hashconfig & RSS_HASHTYPE_RSS_IPV4)
4909 		hashen |= F_FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN;
4910 	if (hashconfig & RSS_HASHTYPE_RSS_IPV6)
4911 		hashen |= F_FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN;
4912 	if (hashconfig & RSS_HASHTYPE_RSS_UDP_IPV4) {
4913 		hashen |= F_FW_RSS_VI_CONFIG_CMD_UDPEN |
4914 		    F_FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN;
4915 	}
4916 	if (hashconfig & RSS_HASHTYPE_RSS_UDP_IPV6) {
4917 		hashen |= F_FW_RSS_VI_CONFIG_CMD_UDPEN |
4918 		    F_FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN;
4919 	}
4920 	if (hashconfig & RSS_HASHTYPE_RSS_TCP_IPV4)
4921 		hashen |= F_FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN;
4922 	if (hashconfig & RSS_HASHTYPE_RSS_TCP_IPV6)
4923 		hashen |= F_FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN;
4924 
4925 	return (hashen);
4926 }
4927 
4928 /* Translates hardware hash types to kernel. */
4929 static int
4930 hashen_to_hashconfig(int hashen)
4931 {
4932 	int hashconfig = 0;
4933 
4934 	if (hashen & F_FW_RSS_VI_CONFIG_CMD_UDPEN) {
4935 		/*
4936 		 * If UDP hashing was enabled it must have been enabled for
4937 		 * either IPv4 or IPv6 (inclusive or).  Enabling UDP without
4938 		 * enabling any 4-tuple hash is nonsense configuration.
4939 		 */
4940 		MPASS(hashen & (F_FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN |
4941 		    F_FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN));
4942 
4943 		if (hashen & F_FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN)
4944 			hashconfig |= RSS_HASHTYPE_RSS_UDP_IPV4;
4945 		if (hashen & F_FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN)
4946 			hashconfig |= RSS_HASHTYPE_RSS_UDP_IPV6;
4947 	}
4948 	if (hashen & F_FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN)
4949 		hashconfig |= RSS_HASHTYPE_RSS_TCP_IPV4;
4950 	if (hashen & F_FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN)
4951 		hashconfig |= RSS_HASHTYPE_RSS_TCP_IPV6;
4952 	if (hashen & F_FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN)
4953 		hashconfig |= RSS_HASHTYPE_RSS_IPV4;
4954 	if (hashen & F_FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN)
4955 		hashconfig |= RSS_HASHTYPE_RSS_IPV6;
4956 
4957 	return (hashconfig);
4958 }
4959 #endif
4960 
4961 int
4962 vi_full_init(struct vi_info *vi)
4963 {
4964 	struct adapter *sc = vi->pi->adapter;
4965 	struct ifnet *ifp = vi->ifp;
4966 	uint16_t *rss;
4967 	struct sge_rxq *rxq;
4968 	int rc, i, j, hashen;
4969 #ifdef RSS
4970 	int nbuckets = rss_getnumbuckets();
4971 	int hashconfig = rss_gethashconfig();
4972 	int extra;
4973 #endif
4974 
4975 	ASSERT_SYNCHRONIZED_OP(sc);
4976 	KASSERT((vi->flags & VI_INIT_DONE) == 0,
4977 	    ("%s: VI_INIT_DONE already", __func__));
4978 
4979 	sysctl_ctx_init(&vi->ctx);
4980 	vi->flags |= VI_SYSCTL_CTX;
4981 
4982 	/*
4983 	 * Allocate tx/rx/fl queues for this VI.
4984 	 */
4985 	rc = t4_setup_vi_queues(vi);
4986 	if (rc != 0)
4987 		goto done;	/* error message displayed already */
4988 
4989 	/*
4990 	 * Setup RSS for this VI.  Save a copy of the RSS table for later use.
4991 	 */
4992 	if (vi->nrxq > vi->rss_size) {
4993 		if_printf(ifp, "nrxq (%d) > hw RSS table size (%d); "
4994 		    "some queues will never receive traffic.\n", vi->nrxq,
4995 		    vi->rss_size);
4996 	} else if (vi->rss_size % vi->nrxq) {
4997 		if_printf(ifp, "nrxq (%d), hw RSS table size (%d); "
4998 		    "expect uneven traffic distribution.\n", vi->nrxq,
4999 		    vi->rss_size);
5000 	}
5001 #ifdef RSS
5002 	if (vi->nrxq != nbuckets) {
5003 		if_printf(ifp, "nrxq (%d) != kernel RSS buckets (%d);"
5004 		    "performance will be impacted.\n", vi->nrxq, nbuckets);
5005 	}
5006 #endif
5007 	rss = malloc(vi->rss_size * sizeof (*rss), M_CXGBE, M_ZERO | M_WAITOK);
5008 	for (i = 0; i < vi->rss_size;) {
5009 #ifdef RSS
5010 		j = rss_get_indirection_to_bucket(i);
5011 		j %= vi->nrxq;
5012 		rxq = &sc->sge.rxq[vi->first_rxq + j];
5013 		rss[i++] = rxq->iq.abs_id;
5014 #else
5015 		for_each_rxq(vi, j, rxq) {
5016 			rss[i++] = rxq->iq.abs_id;
5017 			if (i == vi->rss_size)
5018 				break;
5019 		}
5020 #endif
5021 	}
5022 
5023 	rc = -t4_config_rss_range(sc, sc->mbox, vi->viid, 0, vi->rss_size, rss,
5024 	    vi->rss_size);
5025 	if (rc != 0) {
5026 		if_printf(ifp, "rss_config failed: %d\n", rc);
5027 		goto done;
5028 	}
5029 
5030 #ifdef RSS
5031 	hashen = hashconfig_to_hashen(hashconfig);
5032 
5033 	/*
5034 	 * We may have had to enable some hashes even though the global config
5035 	 * wants them disabled.  This is a potential problem that must be
5036 	 * reported to the user.
5037 	 */
5038 	extra = hashen_to_hashconfig(hashen) ^ hashconfig;
5039 
5040 	/*
5041 	 * If we consider only the supported hash types, then the enabled hashes
5042 	 * are a superset of the requested hashes.  In other words, there cannot
5043 	 * be any supported hash that was requested but not enabled, but there
5044 	 * can be hashes that were not requested but had to be enabled.
5045 	 */
5046 	extra &= SUPPORTED_RSS_HASHTYPES;
5047 	MPASS((extra & hashconfig) == 0);
5048 
5049 	if (extra) {
5050 		if_printf(ifp,
5051 		    "global RSS config (0x%x) cannot be accommodated.\n",
5052 		    hashconfig);
5053 	}
5054 	if (extra & RSS_HASHTYPE_RSS_IPV4)
5055 		if_printf(ifp, "IPv4 2-tuple hashing forced on.\n");
5056 	if (extra & RSS_HASHTYPE_RSS_TCP_IPV4)
5057 		if_printf(ifp, "TCP/IPv4 4-tuple hashing forced on.\n");
5058 	if (extra & RSS_HASHTYPE_RSS_IPV6)
5059 		if_printf(ifp, "IPv6 2-tuple hashing forced on.\n");
5060 	if (extra & RSS_HASHTYPE_RSS_TCP_IPV6)
5061 		if_printf(ifp, "TCP/IPv6 4-tuple hashing forced on.\n");
5062 	if (extra & RSS_HASHTYPE_RSS_UDP_IPV4)
5063 		if_printf(ifp, "UDP/IPv4 4-tuple hashing forced on.\n");
5064 	if (extra & RSS_HASHTYPE_RSS_UDP_IPV6)
5065 		if_printf(ifp, "UDP/IPv6 4-tuple hashing forced on.\n");
5066 #else
5067 	hashen = F_FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN |
5068 	    F_FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN |
5069 	    F_FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN |
5070 	    F_FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN | F_FW_RSS_VI_CONFIG_CMD_UDPEN;
5071 #endif
5072 	rc = -t4_config_vi_rss(sc, sc->mbox, vi->viid, hashen, rss[0], 0, 0);
5073 	if (rc != 0) {
5074 		if_printf(ifp, "rss hash/defaultq config failed: %d\n", rc);
5075 		goto done;
5076 	}
5077 
5078 	vi->rss = rss;
5079 	vi->flags |= VI_INIT_DONE;
5080 done:
5081 	if (rc != 0)
5082 		vi_full_uninit(vi);
5083 
5084 	return (rc);
5085 }
5086 
5087 /*
5088  * Idempotent.
5089  */
5090 int
5091 vi_full_uninit(struct vi_info *vi)
5092 {
5093 	struct port_info *pi = vi->pi;
5094 	struct adapter *sc = pi->adapter;
5095 	int i;
5096 	struct sge_rxq *rxq;
5097 	struct sge_txq *txq;
5098 #ifdef TCP_OFFLOAD
5099 	struct sge_ofld_rxq *ofld_rxq;
5100 	struct sge_wrq *ofld_txq;
5101 #endif
5102 
5103 	if (vi->flags & VI_INIT_DONE) {
5104 
5105 		/* Need to quiesce queues.  */
5106 
5107 		/* XXX: Only for the first VI? */
5108 		if (IS_MAIN_VI(vi) && !(sc->flags & IS_VF))
5109 			quiesce_wrq(sc, &sc->sge.ctrlq[pi->port_id]);
5110 
5111 		for_each_txq(vi, i, txq) {
5112 			quiesce_txq(sc, txq);
5113 		}
5114 
5115 #ifdef TCP_OFFLOAD
5116 		for_each_ofld_txq(vi, i, ofld_txq) {
5117 			quiesce_wrq(sc, ofld_txq);
5118 		}
5119 #endif
5120 
5121 		for_each_rxq(vi, i, rxq) {
5122 			quiesce_iq(sc, &rxq->iq);
5123 			quiesce_fl(sc, &rxq->fl);
5124 		}
5125 
5126 #ifdef TCP_OFFLOAD
5127 		for_each_ofld_rxq(vi, i, ofld_rxq) {
5128 			quiesce_iq(sc, &ofld_rxq->iq);
5129 			quiesce_fl(sc, &ofld_rxq->fl);
5130 		}
5131 #endif
5132 		free(vi->rss, M_CXGBE);
5133 		free(vi->nm_rss, M_CXGBE);
5134 	}
5135 
5136 	t4_teardown_vi_queues(vi);
5137 	vi->flags &= ~VI_INIT_DONE;
5138 
5139 	return (0);
5140 }
5141 
5142 static void
5143 quiesce_txq(struct adapter *sc, struct sge_txq *txq)
5144 {
5145 	struct sge_eq *eq = &txq->eq;
5146 	struct sge_qstat *spg = (void *)&eq->desc[eq->sidx];
5147 
5148 	(void) sc;	/* unused */
5149 
5150 #ifdef INVARIANTS
5151 	TXQ_LOCK(txq);
5152 	MPASS((eq->flags & EQ_ENABLED) == 0);
5153 	TXQ_UNLOCK(txq);
5154 #endif
5155 
5156 	/* Wait for the mp_ring to empty. */
5157 	while (!mp_ring_is_idle(txq->r)) {
5158 		mp_ring_check_drainage(txq->r, 0);
5159 		pause("rquiesce", 1);
5160 	}
5161 
5162 	/* Then wait for the hardware to finish. */
5163 	while (spg->cidx != htobe16(eq->pidx))
5164 		pause("equiesce", 1);
5165 
5166 	/* Finally, wait for the driver to reclaim all descriptors. */
5167 	while (eq->cidx != eq->pidx)
5168 		pause("dquiesce", 1);
5169 }
5170 
5171 static void
5172 quiesce_wrq(struct adapter *sc, struct sge_wrq *wrq)
5173 {
5174 
5175 	/* XXXTX */
5176 }
5177 
5178 static void
5179 quiesce_iq(struct adapter *sc, struct sge_iq *iq)
5180 {
5181 	(void) sc;	/* unused */
5182 
5183 	/* Synchronize with the interrupt handler */
5184 	while (!atomic_cmpset_int(&iq->state, IQS_IDLE, IQS_DISABLED))
5185 		pause("iqfree", 1);
5186 }
5187 
5188 static void
5189 quiesce_fl(struct adapter *sc, struct sge_fl *fl)
5190 {
5191 	mtx_lock(&sc->sfl_lock);
5192 	FL_LOCK(fl);
5193 	fl->flags |= FL_DOOMED;
5194 	FL_UNLOCK(fl);
5195 	callout_stop(&sc->sfl_callout);
5196 	mtx_unlock(&sc->sfl_lock);
5197 
5198 	KASSERT((fl->flags & FL_STARVING) == 0,
5199 	    ("%s: still starving", __func__));
5200 }
5201 
5202 static int
5203 t4_alloc_irq(struct adapter *sc, struct irq *irq, int rid,
5204     driver_intr_t *handler, void *arg, char *name)
5205 {
5206 	int rc;
5207 
5208 	irq->rid = rid;
5209 	irq->res = bus_alloc_resource_any(sc->dev, SYS_RES_IRQ, &irq->rid,
5210 	    RF_SHAREABLE | RF_ACTIVE);
5211 	if (irq->res == NULL) {
5212 		device_printf(sc->dev,
5213 		    "failed to allocate IRQ for rid %d, name %s.\n", rid, name);
5214 		return (ENOMEM);
5215 	}
5216 
5217 	rc = bus_setup_intr(sc->dev, irq->res, INTR_MPSAFE | INTR_TYPE_NET,
5218 	    NULL, handler, arg, &irq->tag);
5219 	if (rc != 0) {
5220 		device_printf(sc->dev,
5221 		    "failed to setup interrupt for rid %d, name %s: %d\n",
5222 		    rid, name, rc);
5223 	} else if (name)
5224 		bus_describe_intr(sc->dev, irq->res, irq->tag, "%s", name);
5225 
5226 	return (rc);
5227 }
5228 
5229 static int
5230 t4_free_irq(struct adapter *sc, struct irq *irq)
5231 {
5232 	if (irq->tag)
5233 		bus_teardown_intr(sc->dev, irq->res, irq->tag);
5234 	if (irq->res)
5235 		bus_release_resource(sc->dev, SYS_RES_IRQ, irq->rid, irq->res);
5236 
5237 	bzero(irq, sizeof(*irq));
5238 
5239 	return (0);
5240 }
5241 
5242 static void
5243 get_regs(struct adapter *sc, struct t4_regdump *regs, uint8_t *buf)
5244 {
5245 
5246 	regs->version = chip_id(sc) | chip_rev(sc) << 10;
5247 	t4_get_regs(sc, buf, regs->len);
5248 }
5249 
5250 #define	A_PL_INDIR_CMD	0x1f8
5251 
5252 #define	S_PL_AUTOINC	31
5253 #define	M_PL_AUTOINC	0x1U
5254 #define	V_PL_AUTOINC(x)	((x) << S_PL_AUTOINC)
5255 #define	G_PL_AUTOINC(x)	(((x) >> S_PL_AUTOINC) & M_PL_AUTOINC)
5256 
5257 #define	S_PL_VFID	20
5258 #define	M_PL_VFID	0xffU
5259 #define	V_PL_VFID(x)	((x) << S_PL_VFID)
5260 #define	G_PL_VFID(x)	(((x) >> S_PL_VFID) & M_PL_VFID)
5261 
5262 #define	S_PL_ADDR	0
5263 #define	M_PL_ADDR	0xfffffU
5264 #define	V_PL_ADDR(x)	((x) << S_PL_ADDR)
5265 #define	G_PL_ADDR(x)	(((x) >> S_PL_ADDR) & M_PL_ADDR)
5266 
5267 #define	A_PL_INDIR_DATA	0x1fc
5268 
5269 static uint64_t
5270 read_vf_stat(struct adapter *sc, unsigned int viid, int reg)
5271 {
5272 	u32 stats[2];
5273 
5274 	mtx_assert(&sc->reg_lock, MA_OWNED);
5275 	if (sc->flags & IS_VF) {
5276 		stats[0] = t4_read_reg(sc, VF_MPS_REG(reg));
5277 		stats[1] = t4_read_reg(sc, VF_MPS_REG(reg + 4));
5278 	} else {
5279 		t4_write_reg(sc, A_PL_INDIR_CMD, V_PL_AUTOINC(1) |
5280 		    V_PL_VFID(G_FW_VIID_VIN(viid)) |
5281 		    V_PL_ADDR(VF_MPS_REG(reg)));
5282 		stats[0] = t4_read_reg(sc, A_PL_INDIR_DATA);
5283 		stats[1] = t4_read_reg(sc, A_PL_INDIR_DATA);
5284 	}
5285 	return (((uint64_t)stats[1]) << 32 | stats[0]);
5286 }
5287 
5288 static void
5289 t4_get_vi_stats(struct adapter *sc, unsigned int viid,
5290     struct fw_vi_stats_vf *stats)
5291 {
5292 
5293 #define GET_STAT(name) \
5294 	read_vf_stat(sc, viid, A_MPS_VF_STAT_##name##_L)
5295 
5296 	stats->tx_bcast_bytes    = GET_STAT(TX_VF_BCAST_BYTES);
5297 	stats->tx_bcast_frames   = GET_STAT(TX_VF_BCAST_FRAMES);
5298 	stats->tx_mcast_bytes    = GET_STAT(TX_VF_MCAST_BYTES);
5299 	stats->tx_mcast_frames   = GET_STAT(TX_VF_MCAST_FRAMES);
5300 	stats->tx_ucast_bytes    = GET_STAT(TX_VF_UCAST_BYTES);
5301 	stats->tx_ucast_frames   = GET_STAT(TX_VF_UCAST_FRAMES);
5302 	stats->tx_drop_frames    = GET_STAT(TX_VF_DROP_FRAMES);
5303 	stats->tx_offload_bytes  = GET_STAT(TX_VF_OFFLOAD_BYTES);
5304 	stats->tx_offload_frames = GET_STAT(TX_VF_OFFLOAD_FRAMES);
5305 	stats->rx_bcast_bytes    = GET_STAT(RX_VF_BCAST_BYTES);
5306 	stats->rx_bcast_frames   = GET_STAT(RX_VF_BCAST_FRAMES);
5307 	stats->rx_mcast_bytes    = GET_STAT(RX_VF_MCAST_BYTES);
5308 	stats->rx_mcast_frames   = GET_STAT(RX_VF_MCAST_FRAMES);
5309 	stats->rx_ucast_bytes    = GET_STAT(RX_VF_UCAST_BYTES);
5310 	stats->rx_ucast_frames   = GET_STAT(RX_VF_UCAST_FRAMES);
5311 	stats->rx_err_frames     = GET_STAT(RX_VF_ERR_FRAMES);
5312 
5313 #undef GET_STAT
5314 }
5315 
5316 static void
5317 t4_clr_vi_stats(struct adapter *sc, unsigned int viid)
5318 {
5319 	int reg;
5320 
5321 	t4_write_reg(sc, A_PL_INDIR_CMD, V_PL_AUTOINC(1) |
5322 	    V_PL_VFID(G_FW_VIID_VIN(viid)) |
5323 	    V_PL_ADDR(VF_MPS_REG(A_MPS_VF_STAT_TX_VF_BCAST_BYTES_L)));
5324 	for (reg = A_MPS_VF_STAT_TX_VF_BCAST_BYTES_L;
5325 	     reg <= A_MPS_VF_STAT_RX_VF_ERR_FRAMES_H; reg += 4)
5326 		t4_write_reg(sc, A_PL_INDIR_DATA, 0);
5327 }
5328 
5329 static void
5330 vi_refresh_stats(struct adapter *sc, struct vi_info *vi)
5331 {
5332 	struct timeval tv;
5333 	const struct timeval interval = {0, 250000};	/* 250ms */
5334 
5335 	if (!(vi->flags & VI_INIT_DONE))
5336 		return;
5337 
5338 	getmicrotime(&tv);
5339 	timevalsub(&tv, &interval);
5340 	if (timevalcmp(&tv, &vi->last_refreshed, <))
5341 		return;
5342 
5343 	mtx_lock(&sc->reg_lock);
5344 	t4_get_vi_stats(sc, vi->viid, &vi->stats);
5345 	getmicrotime(&vi->last_refreshed);
5346 	mtx_unlock(&sc->reg_lock);
5347 }
5348 
5349 static void
5350 cxgbe_refresh_stats(struct adapter *sc, struct port_info *pi)
5351 {
5352 	u_int i, v, tnl_cong_drops, bg_map;
5353 	struct timeval tv;
5354 	const struct timeval interval = {0, 250000};	/* 250ms */
5355 
5356 	getmicrotime(&tv);
5357 	timevalsub(&tv, &interval);
5358 	if (timevalcmp(&tv, &pi->last_refreshed, <))
5359 		return;
5360 
5361 	tnl_cong_drops = 0;
5362 	t4_get_port_stats(sc, pi->tx_chan, &pi->stats);
5363 	bg_map = pi->mps_bg_map;
5364 	while (bg_map) {
5365 		i = ffs(bg_map) - 1;
5366 		mtx_lock(&sc->reg_lock);
5367 		t4_read_indirect(sc, A_TP_MIB_INDEX, A_TP_MIB_DATA, &v, 1,
5368 		    A_TP_MIB_TNL_CNG_DROP_0 + i);
5369 		mtx_unlock(&sc->reg_lock);
5370 		tnl_cong_drops += v;
5371 		bg_map &= ~(1 << i);
5372 	}
5373 	pi->tnl_cong_drops = tnl_cong_drops;
5374 	getmicrotime(&pi->last_refreshed);
5375 }
5376 
5377 static void
5378 cxgbe_tick(void *arg)
5379 {
5380 	struct port_info *pi = arg;
5381 	struct adapter *sc = pi->adapter;
5382 
5383 	PORT_LOCK_ASSERT_OWNED(pi);
5384 	cxgbe_refresh_stats(sc, pi);
5385 
5386 	callout_schedule(&pi->tick, hz);
5387 }
5388 
5389 void
5390 vi_tick(void *arg)
5391 {
5392 	struct vi_info *vi = arg;
5393 	struct adapter *sc = vi->pi->adapter;
5394 
5395 	vi_refresh_stats(sc, vi);
5396 
5397 	callout_schedule(&vi->tick, hz);
5398 }
5399 
5400 static void
5401 cxgbe_vlan_config(void *arg, struct ifnet *ifp, uint16_t vid)
5402 {
5403 	struct ifnet *vlan;
5404 
5405 	if (arg != ifp || ifp->if_type != IFT_ETHER)
5406 		return;
5407 
5408 	vlan = VLAN_DEVAT(ifp, vid);
5409 	VLAN_SETCOOKIE(vlan, ifp);
5410 }
5411 
5412 /*
5413  * Should match fw_caps_config_<foo> enums in t4fw_interface.h
5414  */
5415 static char *caps_decoder[] = {
5416 	"\20\001IPMI\002NCSI",				/* 0: NBM */
5417 	"\20\001PPP\002QFC\003DCBX",			/* 1: link */
5418 	"\20\001INGRESS\002EGRESS",			/* 2: switch */
5419 	"\20\001NIC\002VM\003IDS\004UM\005UM_ISGL"	/* 3: NIC */
5420 	    "\006HASHFILTER\007ETHOFLD",
5421 	"\20\001TOE",					/* 4: TOE */
5422 	"\20\001RDDP\002RDMAC",				/* 5: RDMA */
5423 	"\20\001INITIATOR_PDU\002TARGET_PDU"		/* 6: iSCSI */
5424 	    "\003INITIATOR_CNXOFLD\004TARGET_CNXOFLD"
5425 	    "\005INITIATOR_SSNOFLD\006TARGET_SSNOFLD"
5426 	    "\007T10DIF"
5427 	    "\010INITIATOR_CMDOFLD\011TARGET_CMDOFLD",
5428 	"\20\001LOOKASIDE\002TLSKEYS",			/* 7: Crypto */
5429 	"\20\001INITIATOR\002TARGET\003CTRL_OFLD"	/* 8: FCoE */
5430 		    "\004PO_INITIATOR\005PO_TARGET",
5431 };
5432 
5433 void
5434 t4_sysctls(struct adapter *sc)
5435 {
5436 	struct sysctl_ctx_list *ctx;
5437 	struct sysctl_oid *oid;
5438 	struct sysctl_oid_list *children, *c0;
5439 	static char *doorbells = {"\20\1UDB\2WCWR\3UDBWC\4KDB"};
5440 
5441 	ctx = device_get_sysctl_ctx(sc->dev);
5442 
5443 	/*
5444 	 * dev.t4nex.X.
5445 	 */
5446 	oid = device_get_sysctl_tree(sc->dev);
5447 	c0 = children = SYSCTL_CHILDREN(oid);
5448 
5449 	sc->sc_do_rxcopy = 1;
5450 	SYSCTL_ADD_INT(ctx, children, OID_AUTO, "do_rx_copy", CTLFLAG_RW,
5451 	    &sc->sc_do_rxcopy, 1, "Do RX copy of small frames");
5452 
5453 	SYSCTL_ADD_INT(ctx, children, OID_AUTO, "nports", CTLFLAG_RD, NULL,
5454 	    sc->params.nports, "# of ports");
5455 
5456 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "doorbells",
5457 	    CTLTYPE_STRING | CTLFLAG_RD, doorbells, (uintptr_t)&sc->doorbells,
5458 	    sysctl_bitfield_8b, "A", "available doorbells");
5459 
5460 	SYSCTL_ADD_INT(ctx, children, OID_AUTO, "core_clock", CTLFLAG_RD, NULL,
5461 	    sc->params.vpd.cclk, "core clock frequency (in KHz)");
5462 
5463 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "holdoff_timers",
5464 	    CTLTYPE_STRING | CTLFLAG_RD, sc->params.sge.timer_val,
5465 	    sizeof(sc->params.sge.timer_val), sysctl_int_array, "A",
5466 	    "interrupt holdoff timer values (us)");
5467 
5468 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "holdoff_pkt_counts",
5469 	    CTLTYPE_STRING | CTLFLAG_RD, sc->params.sge.counter_val,
5470 	    sizeof(sc->params.sge.counter_val), sysctl_int_array, "A",
5471 	    "interrupt holdoff packet counter values");
5472 
5473 	t4_sge_sysctls(sc, ctx, children);
5474 
5475 	sc->lro_timeout = 100;
5476 	SYSCTL_ADD_INT(ctx, children, OID_AUTO, "lro_timeout", CTLFLAG_RW,
5477 	    &sc->lro_timeout, 0, "lro inactive-flush timeout (in us)");
5478 
5479 	SYSCTL_ADD_INT(ctx, children, OID_AUTO, "dflags", CTLFLAG_RW,
5480 	    &sc->debug_flags, 0, "flags to enable runtime debugging");
5481 
5482 	SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "tp_version",
5483 	    CTLFLAG_RD, sc->tp_version, 0, "TP microcode version");
5484 
5485 	SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "firmware_version",
5486 	    CTLFLAG_RD, sc->fw_version, 0, "firmware version");
5487 
5488 	if (sc->flags & IS_VF)
5489 		return;
5490 
5491 	SYSCTL_ADD_INT(ctx, children, OID_AUTO, "hw_revision", CTLFLAG_RD,
5492 	    NULL, chip_rev(sc), "chip hardware revision");
5493 
5494 	SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "sn",
5495 	    CTLFLAG_RD, sc->params.vpd.sn, 0, "serial number");
5496 
5497 	SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "pn",
5498 	    CTLFLAG_RD, sc->params.vpd.pn, 0, "part number");
5499 
5500 	SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "ec",
5501 	    CTLFLAG_RD, sc->params.vpd.ec, 0, "engineering change");
5502 
5503 	SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "md_version",
5504 	    CTLFLAG_RD, sc->params.vpd.md, 0, "manufacturing diags version");
5505 
5506 	SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "na",
5507 	    CTLFLAG_RD, sc->params.vpd.na, 0, "network address");
5508 
5509 	SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "er_version", CTLFLAG_RD,
5510 	    sc->er_version, 0, "expansion ROM version");
5511 
5512 	SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "bs_version", CTLFLAG_RD,
5513 	    sc->bs_version, 0, "bootstrap firmware version");
5514 
5515 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "scfg_version", CTLFLAG_RD,
5516 	    NULL, sc->params.scfg_vers, "serial config version");
5517 
5518 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "vpd_version", CTLFLAG_RD,
5519 	    NULL, sc->params.vpd_vers, "VPD version");
5520 
5521 	SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "cf",
5522 	    CTLFLAG_RD, sc->cfg_file, 0, "configuration file");
5523 
5524 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "cfcsum", CTLFLAG_RD, NULL,
5525 	    sc->cfcsum, "config file checksum");
5526 
5527 #define SYSCTL_CAP(name, n, text) \
5528 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, #name, \
5529 	    CTLTYPE_STRING | CTLFLAG_RD, caps_decoder[n], (uintptr_t)&sc->name, \
5530 	    sysctl_bitfield_16b, "A", "available " text " capabilities")
5531 
5532 	SYSCTL_CAP(nbmcaps, 0, "NBM");
5533 	SYSCTL_CAP(linkcaps, 1, "link");
5534 	SYSCTL_CAP(switchcaps, 2, "switch");
5535 	SYSCTL_CAP(niccaps, 3, "NIC");
5536 	SYSCTL_CAP(toecaps, 4, "TCP offload");
5537 	SYSCTL_CAP(rdmacaps, 5, "RDMA");
5538 	SYSCTL_CAP(iscsicaps, 6, "iSCSI");
5539 	SYSCTL_CAP(cryptocaps, 7, "crypto");
5540 	SYSCTL_CAP(fcoecaps, 8, "FCoE");
5541 #undef SYSCTL_CAP
5542 
5543 	SYSCTL_ADD_INT(ctx, children, OID_AUTO, "nfilters", CTLFLAG_RD,
5544 	    NULL, sc->tids.nftids, "number of filters");
5545 
5546 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "temperature", CTLTYPE_INT |
5547 	    CTLFLAG_RD, sc, 0, sysctl_temperature, "I",
5548 	    "chip temperature (in Celsius)");
5549 
5550 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "loadavg", CTLTYPE_STRING |
5551 	    CTLFLAG_RD, sc, 0, sysctl_loadavg, "A",
5552 	    "microprocessor load averages (debug firmwares only)");
5553 
5554 	SYSCTL_ADD_INT(ctx, children, OID_AUTO, "core_vdd", CTLFLAG_RD,
5555 	    &sc->params.core_vdd, 0, "core Vdd (in mV)");
5556 
5557 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "local_cpus",
5558 	    CTLTYPE_STRING | CTLFLAG_RD, sc, LOCAL_CPUS,
5559 	    sysctl_cpus, "A", "local CPUs");
5560 
5561 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "intr_cpus",
5562 	    CTLTYPE_STRING | CTLFLAG_RD, sc, INTR_CPUS,
5563 	    sysctl_cpus, "A", "preferred CPUs for interrupts");
5564 
5565 	/*
5566 	 * dev.t4nex.X.misc.  Marked CTLFLAG_SKIP to avoid information overload.
5567 	 */
5568 	oid = SYSCTL_ADD_NODE(ctx, c0, OID_AUTO, "misc",
5569 	    CTLFLAG_RD | CTLFLAG_SKIP, NULL,
5570 	    "logs and miscellaneous information");
5571 	children = SYSCTL_CHILDREN(oid);
5572 
5573 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cctrl",
5574 	    CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
5575 	    sysctl_cctrl, "A", "congestion control");
5576 
5577 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_ibq_tp0",
5578 	    CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
5579 	    sysctl_cim_ibq_obq, "A", "CIM IBQ 0 (TP0)");
5580 
5581 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_ibq_tp1",
5582 	    CTLTYPE_STRING | CTLFLAG_RD, sc, 1,
5583 	    sysctl_cim_ibq_obq, "A", "CIM IBQ 1 (TP1)");
5584 
5585 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_ibq_ulp",
5586 	    CTLTYPE_STRING | CTLFLAG_RD, sc, 2,
5587 	    sysctl_cim_ibq_obq, "A", "CIM IBQ 2 (ULP)");
5588 
5589 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_ibq_sge0",
5590 	    CTLTYPE_STRING | CTLFLAG_RD, sc, 3,
5591 	    sysctl_cim_ibq_obq, "A", "CIM IBQ 3 (SGE0)");
5592 
5593 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_ibq_sge1",
5594 	    CTLTYPE_STRING | CTLFLAG_RD, sc, 4,
5595 	    sysctl_cim_ibq_obq, "A", "CIM IBQ 4 (SGE1)");
5596 
5597 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_ibq_ncsi",
5598 	    CTLTYPE_STRING | CTLFLAG_RD, sc, 5,
5599 	    sysctl_cim_ibq_obq, "A", "CIM IBQ 5 (NCSI)");
5600 
5601 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_la",
5602 	    CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
5603 	    chip_id(sc) <= CHELSIO_T5 ? sysctl_cim_la : sysctl_cim_la_t6,
5604 	    "A", "CIM logic analyzer");
5605 
5606 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_ma_la",
5607 	    CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
5608 	    sysctl_cim_ma_la, "A", "CIM MA logic analyzer");
5609 
5610 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_obq_ulp0",
5611 	    CTLTYPE_STRING | CTLFLAG_RD, sc, 0 + CIM_NUM_IBQ,
5612 	    sysctl_cim_ibq_obq, "A", "CIM OBQ 0 (ULP0)");
5613 
5614 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_obq_ulp1",
5615 	    CTLTYPE_STRING | CTLFLAG_RD, sc, 1 + CIM_NUM_IBQ,
5616 	    sysctl_cim_ibq_obq, "A", "CIM OBQ 1 (ULP1)");
5617 
5618 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_obq_ulp2",
5619 	    CTLTYPE_STRING | CTLFLAG_RD, sc, 2 + CIM_NUM_IBQ,
5620 	    sysctl_cim_ibq_obq, "A", "CIM OBQ 2 (ULP2)");
5621 
5622 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_obq_ulp3",
5623 	    CTLTYPE_STRING | CTLFLAG_RD, sc, 3 + CIM_NUM_IBQ,
5624 	    sysctl_cim_ibq_obq, "A", "CIM OBQ 3 (ULP3)");
5625 
5626 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_obq_sge",
5627 	    CTLTYPE_STRING | CTLFLAG_RD, sc, 4 + CIM_NUM_IBQ,
5628 	    sysctl_cim_ibq_obq, "A", "CIM OBQ 4 (SGE)");
5629 
5630 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_obq_ncsi",
5631 	    CTLTYPE_STRING | CTLFLAG_RD, sc, 5 + CIM_NUM_IBQ,
5632 	    sysctl_cim_ibq_obq, "A", "CIM OBQ 5 (NCSI)");
5633 
5634 	if (chip_id(sc) > CHELSIO_T4) {
5635 		SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_obq_sge0_rx",
5636 		    CTLTYPE_STRING | CTLFLAG_RD, sc, 6 + CIM_NUM_IBQ,
5637 		    sysctl_cim_ibq_obq, "A", "CIM OBQ 6 (SGE0-RX)");
5638 
5639 		SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_obq_sge1_rx",
5640 		    CTLTYPE_STRING | CTLFLAG_RD, sc, 7 + CIM_NUM_IBQ,
5641 		    sysctl_cim_ibq_obq, "A", "CIM OBQ 7 (SGE1-RX)");
5642 	}
5643 
5644 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_pif_la",
5645 	    CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
5646 	    sysctl_cim_pif_la, "A", "CIM PIF logic analyzer");
5647 
5648 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_qcfg",
5649 	    CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
5650 	    sysctl_cim_qcfg, "A", "CIM queue configuration");
5651 
5652 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cpl_stats",
5653 	    CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
5654 	    sysctl_cpl_stats, "A", "CPL statistics");
5655 
5656 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "ddp_stats",
5657 	    CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
5658 	    sysctl_ddp_stats, "A", "non-TCP DDP statistics");
5659 
5660 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "devlog",
5661 	    CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
5662 	    sysctl_devlog, "A", "firmware's device log");
5663 
5664 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "fcoe_stats",
5665 	    CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
5666 	    sysctl_fcoe_stats, "A", "FCoE statistics");
5667 
5668 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "hw_sched",
5669 	    CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
5670 	    sysctl_hw_sched, "A", "hardware scheduler ");
5671 
5672 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "l2t",
5673 	    CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
5674 	    sysctl_l2t, "A", "hardware L2 table");
5675 
5676 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "smt",
5677 	    CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
5678 	    sysctl_smt, "A", "hardware source MAC table");
5679 
5680 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "lb_stats",
5681 	    CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
5682 	    sysctl_lb_stats, "A", "loopback statistics");
5683 
5684 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "meminfo",
5685 	    CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
5686 	    sysctl_meminfo, "A", "memory regions");
5687 
5688 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "mps_tcam",
5689 	    CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
5690 	    chip_id(sc) <= CHELSIO_T5 ? sysctl_mps_tcam : sysctl_mps_tcam_t6,
5691 	    "A", "MPS TCAM entries");
5692 
5693 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "path_mtus",
5694 	    CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
5695 	    sysctl_path_mtus, "A", "path MTUs");
5696 
5697 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "pm_stats",
5698 	    CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
5699 	    sysctl_pm_stats, "A", "PM statistics");
5700 
5701 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "rdma_stats",
5702 	    CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
5703 	    sysctl_rdma_stats, "A", "RDMA statistics");
5704 
5705 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "tcp_stats",
5706 	    CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
5707 	    sysctl_tcp_stats, "A", "TCP statistics");
5708 
5709 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "tids",
5710 	    CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
5711 	    sysctl_tids, "A", "TID information");
5712 
5713 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "tp_err_stats",
5714 	    CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
5715 	    sysctl_tp_err_stats, "A", "TP error statistics");
5716 
5717 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "tp_la_mask",
5718 	    CTLTYPE_INT | CTLFLAG_RW, sc, 0, sysctl_tp_la_mask, "I",
5719 	    "TP logic analyzer event capture mask");
5720 
5721 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "tp_la",
5722 	    CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
5723 	    sysctl_tp_la, "A", "TP logic analyzer");
5724 
5725 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "tx_rate",
5726 	    CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
5727 	    sysctl_tx_rate, "A", "Tx rate");
5728 
5729 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "ulprx_la",
5730 	    CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
5731 	    sysctl_ulprx_la, "A", "ULPRX logic analyzer");
5732 
5733 	if (chip_id(sc) >= CHELSIO_T5) {
5734 		SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "wcwr_stats",
5735 		    CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
5736 		    sysctl_wcwr_stats, "A", "write combined work requests");
5737 	}
5738 
5739 #ifdef TCP_OFFLOAD
5740 	if (is_offload(sc)) {
5741 		int i;
5742 		char s[4];
5743 
5744 		/*
5745 		 * dev.t4nex.X.toe.
5746 		 */
5747 		oid = SYSCTL_ADD_NODE(ctx, c0, OID_AUTO, "toe", CTLFLAG_RD,
5748 		    NULL, "TOE parameters");
5749 		children = SYSCTL_CHILDREN(oid);
5750 
5751 		sc->tt.cong_algorithm = -1;
5752 		SYSCTL_ADD_INT(ctx, children, OID_AUTO, "cong_algorithm",
5753 		    CTLFLAG_RW, &sc->tt.cong_algorithm, 0, "congestion control "
5754 		    "(-1 = default, 0 = reno, 1 = tahoe, 2 = newreno, "
5755 		    "3 = highspeed)");
5756 
5757 		sc->tt.sndbuf = 256 * 1024;
5758 		SYSCTL_ADD_INT(ctx, children, OID_AUTO, "sndbuf", CTLFLAG_RW,
5759 		    &sc->tt.sndbuf, 0, "max hardware send buffer size");
5760 
5761 		sc->tt.ddp = 0;
5762 		SYSCTL_ADD_INT(ctx, children, OID_AUTO, "ddp", CTLFLAG_RW,
5763 		    &sc->tt.ddp, 0, "DDP allowed");
5764 
5765 		sc->tt.rx_coalesce = 1;
5766 		SYSCTL_ADD_INT(ctx, children, OID_AUTO, "rx_coalesce",
5767 		    CTLFLAG_RW, &sc->tt.rx_coalesce, 0, "receive coalescing");
5768 
5769 		sc->tt.tls = 0;
5770 		SYSCTL_ADD_INT(ctx, children, OID_AUTO, "tls", CTLFLAG_RW,
5771 		    &sc->tt.tls, 0, "Inline TLS allowed");
5772 
5773 		SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "tls_rx_ports",
5774 		    CTLTYPE_INT | CTLFLAG_RW, sc, 0, sysctl_tls_rx_ports,
5775 		    "I", "TCP ports that use inline TLS+TOE RX");
5776 
5777 		sc->tt.tx_align = 1;
5778 		SYSCTL_ADD_INT(ctx, children, OID_AUTO, "tx_align",
5779 		    CTLFLAG_RW, &sc->tt.tx_align, 0, "chop and align payload");
5780 
5781 		sc->tt.tx_zcopy = 0;
5782 		SYSCTL_ADD_INT(ctx, children, OID_AUTO, "tx_zcopy",
5783 		    CTLFLAG_RW, &sc->tt.tx_zcopy, 0,
5784 		    "Enable zero-copy aio_write(2)");
5785 
5786 		sc->tt.cop_managed_offloading = !!t4_cop_managed_offloading;
5787 		SYSCTL_ADD_INT(ctx, children, OID_AUTO,
5788 		    "cop_managed_offloading", CTLFLAG_RW,
5789 		    &sc->tt.cop_managed_offloading, 0,
5790 		    "COP (Connection Offload Policy) controls all TOE offload");
5791 
5792 		SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "timer_tick",
5793 		    CTLTYPE_STRING | CTLFLAG_RD, sc, 0, sysctl_tp_tick, "A",
5794 		    "TP timer tick (us)");
5795 
5796 		SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "timestamp_tick",
5797 		    CTLTYPE_STRING | CTLFLAG_RD, sc, 1, sysctl_tp_tick, "A",
5798 		    "TCP timestamp tick (us)");
5799 
5800 		SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "dack_tick",
5801 		    CTLTYPE_STRING | CTLFLAG_RD, sc, 2, sysctl_tp_tick, "A",
5802 		    "DACK tick (us)");
5803 
5804 		SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "dack_timer",
5805 		    CTLTYPE_UINT | CTLFLAG_RD, sc, 0, sysctl_tp_dack_timer,
5806 		    "IU", "DACK timer (us)");
5807 
5808 		SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "rexmt_min",
5809 		    CTLTYPE_ULONG | CTLFLAG_RD, sc, A_TP_RXT_MIN,
5810 		    sysctl_tp_timer, "LU", "Minimum retransmit interval (us)");
5811 
5812 		SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "rexmt_max",
5813 		    CTLTYPE_ULONG | CTLFLAG_RD, sc, A_TP_RXT_MAX,
5814 		    sysctl_tp_timer, "LU", "Maximum retransmit interval (us)");
5815 
5816 		SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "persist_min",
5817 		    CTLTYPE_ULONG | CTLFLAG_RD, sc, A_TP_PERS_MIN,
5818 		    sysctl_tp_timer, "LU", "Persist timer min (us)");
5819 
5820 		SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "persist_max",
5821 		    CTLTYPE_ULONG | CTLFLAG_RD, sc, A_TP_PERS_MAX,
5822 		    sysctl_tp_timer, "LU", "Persist timer max (us)");
5823 
5824 		SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "keepalive_idle",
5825 		    CTLTYPE_ULONG | CTLFLAG_RD, sc, A_TP_KEEP_IDLE,
5826 		    sysctl_tp_timer, "LU", "Keepalive idle timer (us)");
5827 
5828 		SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "keepalive_interval",
5829 		    CTLTYPE_ULONG | CTLFLAG_RD, sc, A_TP_KEEP_INTVL,
5830 		    sysctl_tp_timer, "LU", "Keepalive interval timer (us)");
5831 
5832 		SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "initial_srtt",
5833 		    CTLTYPE_ULONG | CTLFLAG_RD, sc, A_TP_INIT_SRTT,
5834 		    sysctl_tp_timer, "LU", "Initial SRTT (us)");
5835 
5836 		SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "finwait2_timer",
5837 		    CTLTYPE_ULONG | CTLFLAG_RD, sc, A_TP_FINWAIT2_TIMER,
5838 		    sysctl_tp_timer, "LU", "FINWAIT2 timer (us)");
5839 
5840 		SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "syn_rexmt_count",
5841 		    CTLTYPE_UINT | CTLFLAG_RD, sc, S_SYNSHIFTMAX,
5842 		    sysctl_tp_shift_cnt, "IU",
5843 		    "Number of SYN retransmissions before abort");
5844 
5845 		SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "rexmt_count",
5846 		    CTLTYPE_UINT | CTLFLAG_RD, sc, S_RXTSHIFTMAXR2,
5847 		    sysctl_tp_shift_cnt, "IU",
5848 		    "Number of retransmissions before abort");
5849 
5850 		SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "keepalive_count",
5851 		    CTLTYPE_UINT | CTLFLAG_RD, sc, S_KEEPALIVEMAXR2,
5852 		    sysctl_tp_shift_cnt, "IU",
5853 		    "Number of keepalive probes before abort");
5854 
5855 		oid = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, "rexmt_backoff",
5856 		    CTLFLAG_RD, NULL, "TOE retransmit backoffs");
5857 		children = SYSCTL_CHILDREN(oid);
5858 		for (i = 0; i < 16; i++) {
5859 			snprintf(s, sizeof(s), "%u", i);
5860 			SYSCTL_ADD_PROC(ctx, children, OID_AUTO, s,
5861 			    CTLTYPE_UINT | CTLFLAG_RD, sc, i, sysctl_tp_backoff,
5862 			    "IU", "TOE retransmit backoff");
5863 		}
5864 	}
5865 #endif
5866 }
5867 
5868 void
5869 vi_sysctls(struct vi_info *vi)
5870 {
5871 	struct sysctl_ctx_list *ctx;
5872 	struct sysctl_oid *oid;
5873 	struct sysctl_oid_list *children;
5874 
5875 	ctx = device_get_sysctl_ctx(vi->dev);
5876 
5877 	/*
5878 	 * dev.v?(cxgbe|cxl).X.
5879 	 */
5880 	oid = device_get_sysctl_tree(vi->dev);
5881 	children = SYSCTL_CHILDREN(oid);
5882 
5883 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "viid", CTLFLAG_RD, NULL,
5884 	    vi->viid, "VI identifer");
5885 	SYSCTL_ADD_INT(ctx, children, OID_AUTO, "nrxq", CTLFLAG_RD,
5886 	    &vi->nrxq, 0, "# of rx queues");
5887 	SYSCTL_ADD_INT(ctx, children, OID_AUTO, "ntxq", CTLFLAG_RD,
5888 	    &vi->ntxq, 0, "# of tx queues");
5889 	SYSCTL_ADD_INT(ctx, children, OID_AUTO, "first_rxq", CTLFLAG_RD,
5890 	    &vi->first_rxq, 0, "index of first rx queue");
5891 	SYSCTL_ADD_INT(ctx, children, OID_AUTO, "first_txq", CTLFLAG_RD,
5892 	    &vi->first_txq, 0, "index of first tx queue");
5893 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "rss_size", CTLFLAG_RD, NULL,
5894 	    vi->rss_size, "size of RSS indirection table");
5895 
5896 	if (IS_MAIN_VI(vi)) {
5897 		SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "rsrv_noflowq",
5898 		    CTLTYPE_INT | CTLFLAG_RW, vi, 0, sysctl_noflowq, "IU",
5899 		    "Reserve queue 0 for non-flowid packets");
5900 	}
5901 
5902 #ifdef TCP_OFFLOAD
5903 	if (vi->nofldrxq != 0) {
5904 		SYSCTL_ADD_INT(ctx, children, OID_AUTO, "nofldrxq", CTLFLAG_RD,
5905 		    &vi->nofldrxq, 0,
5906 		    "# of rx queues for offloaded TCP connections");
5907 		SYSCTL_ADD_INT(ctx, children, OID_AUTO, "nofldtxq", CTLFLAG_RD,
5908 		    &vi->nofldtxq, 0,
5909 		    "# of tx queues for offloaded TCP connections");
5910 		SYSCTL_ADD_INT(ctx, children, OID_AUTO, "first_ofld_rxq",
5911 		    CTLFLAG_RD, &vi->first_ofld_rxq, 0,
5912 		    "index of first TOE rx queue");
5913 		SYSCTL_ADD_INT(ctx, children, OID_AUTO, "first_ofld_txq",
5914 		    CTLFLAG_RD, &vi->first_ofld_txq, 0,
5915 		    "index of first TOE tx queue");
5916 		SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "holdoff_tmr_idx_ofld",
5917 		    CTLTYPE_INT | CTLFLAG_RW, vi, 0,
5918 		    sysctl_holdoff_tmr_idx_ofld, "I",
5919 		    "holdoff timer index for TOE queues");
5920 		SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "holdoff_pktc_idx_ofld",
5921 		    CTLTYPE_INT | CTLFLAG_RW, vi, 0,
5922 		    sysctl_holdoff_pktc_idx_ofld, "I",
5923 		    "holdoff packet counter index for TOE queues");
5924 	}
5925 #endif
5926 #ifdef DEV_NETMAP
5927 	if (vi->nnmrxq != 0) {
5928 		SYSCTL_ADD_INT(ctx, children, OID_AUTO, "nnmrxq", CTLFLAG_RD,
5929 		    &vi->nnmrxq, 0, "# of netmap rx queues");
5930 		SYSCTL_ADD_INT(ctx, children, OID_AUTO, "nnmtxq", CTLFLAG_RD,
5931 		    &vi->nnmtxq, 0, "# of netmap tx queues");
5932 		SYSCTL_ADD_INT(ctx, children, OID_AUTO, "first_nm_rxq",
5933 		    CTLFLAG_RD, &vi->first_nm_rxq, 0,
5934 		    "index of first netmap rx queue");
5935 		SYSCTL_ADD_INT(ctx, children, OID_AUTO, "first_nm_txq",
5936 		    CTLFLAG_RD, &vi->first_nm_txq, 0,
5937 		    "index of first netmap tx queue");
5938 	}
5939 #endif
5940 
5941 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "holdoff_tmr_idx",
5942 	    CTLTYPE_INT | CTLFLAG_RW, vi, 0, sysctl_holdoff_tmr_idx, "I",
5943 	    "holdoff timer index");
5944 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "holdoff_pktc_idx",
5945 	    CTLTYPE_INT | CTLFLAG_RW, vi, 0, sysctl_holdoff_pktc_idx, "I",
5946 	    "holdoff packet counter index");
5947 
5948 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "qsize_rxq",
5949 	    CTLTYPE_INT | CTLFLAG_RW, vi, 0, sysctl_qsize_rxq, "I",
5950 	    "rx queue size");
5951 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "qsize_txq",
5952 	    CTLTYPE_INT | CTLFLAG_RW, vi, 0, sysctl_qsize_txq, "I",
5953 	    "tx queue size");
5954 }
5955 
5956 static void
5957 cxgbe_sysctls(struct port_info *pi)
5958 {
5959 	struct sysctl_ctx_list *ctx;
5960 	struct sysctl_oid *oid;
5961 	struct sysctl_oid_list *children, *children2;
5962 	struct adapter *sc = pi->adapter;
5963 	int i;
5964 	char name[16];
5965 	static char *tc_flags = {"\20\1USER\2SYNC\3ASYNC\4ERR"};
5966 
5967 	ctx = device_get_sysctl_ctx(pi->dev);
5968 
5969 	/*
5970 	 * dev.cxgbe.X.
5971 	 */
5972 	oid = device_get_sysctl_tree(pi->dev);
5973 	children = SYSCTL_CHILDREN(oid);
5974 
5975 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "linkdnrc", CTLTYPE_STRING |
5976 	   CTLFLAG_RD, pi, 0, sysctl_linkdnrc, "A", "reason why link is down");
5977 	if (pi->port_type == FW_PORT_TYPE_BT_XAUI) {
5978 		SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "temperature",
5979 		    CTLTYPE_INT | CTLFLAG_RD, pi, 0, sysctl_btphy, "I",
5980 		    "PHY temperature (in Celsius)");
5981 		SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "fw_version",
5982 		    CTLTYPE_INT | CTLFLAG_RD, pi, 1, sysctl_btphy, "I",
5983 		    "PHY firmware version");
5984 	}
5985 
5986 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "pause_settings",
5987 	    CTLTYPE_STRING | CTLFLAG_RW, pi, 0, sysctl_pause_settings, "A",
5988 	    "PAUSE settings (bit 0 = rx_pause, bit 1 = tx_pause)");
5989 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "fec",
5990 	    CTLTYPE_STRING | CTLFLAG_RW, pi, 0, sysctl_fec, "A",
5991 	    "Forward Error Correction (bit 0 = RS, bit 1 = BASER_RS)");
5992 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "autoneg",
5993 	    CTLTYPE_INT | CTLFLAG_RW, pi, 0, sysctl_autoneg, "I",
5994 	    "autonegotiation (-1 = not supported)");
5995 
5996 	SYSCTL_ADD_INT(ctx, children, OID_AUTO, "max_speed", CTLFLAG_RD, NULL,
5997 	    port_top_speed(pi), "max speed (in Gbps)");
5998 	SYSCTL_ADD_INT(ctx, children, OID_AUTO, "mps_bg_map", CTLFLAG_RD, NULL,
5999 	    pi->mps_bg_map, "MPS buffer group map");
6000 	SYSCTL_ADD_INT(ctx, children, OID_AUTO, "rx_e_chan_map", CTLFLAG_RD,
6001 	    NULL, pi->rx_e_chan_map, "TP rx e-channel map");
6002 
6003 	if (sc->flags & IS_VF)
6004 		return;
6005 
6006 	/*
6007 	 * dev.(cxgbe|cxl).X.tc.
6008 	 */
6009 	oid = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, "tc", CTLFLAG_RD, NULL,
6010 	    "Tx scheduler traffic classes (cl_rl)");
6011 	children2 = SYSCTL_CHILDREN(oid);
6012 	SYSCTL_ADD_UINT(ctx, children2, OID_AUTO, "pktsize",
6013 	    CTLFLAG_RW, &pi->sched_params->pktsize, 0,
6014 	    "pktsize for per-flow cl-rl (0 means up to the driver )");
6015 	SYSCTL_ADD_UINT(ctx, children2, OID_AUTO, "burstsize",
6016 	    CTLFLAG_RW, &pi->sched_params->burstsize, 0,
6017 	    "burstsize for per-flow cl-rl (0 means up to the driver)");
6018 	for (i = 0; i < sc->chip_params->nsched_cls; i++) {
6019 		struct tx_cl_rl_params *tc = &pi->sched_params->cl_rl[i];
6020 
6021 		snprintf(name, sizeof(name), "%d", i);
6022 		children2 = SYSCTL_CHILDREN(SYSCTL_ADD_NODE(ctx,
6023 		    SYSCTL_CHILDREN(oid), OID_AUTO, name, CTLFLAG_RD, NULL,
6024 		    "traffic class"));
6025 		SYSCTL_ADD_PROC(ctx, children2, OID_AUTO, "flags",
6026 		    CTLTYPE_STRING | CTLFLAG_RD, tc_flags, (uintptr_t)&tc->flags,
6027 		    sysctl_bitfield_8b, "A", "flags");
6028 		SYSCTL_ADD_UINT(ctx, children2, OID_AUTO, "refcount",
6029 		    CTLFLAG_RD, &tc->refcount, 0, "references to this class");
6030 		SYSCTL_ADD_PROC(ctx, children2, OID_AUTO, "params",
6031 		    CTLTYPE_STRING | CTLFLAG_RD, sc, (pi->port_id << 16) | i,
6032 		    sysctl_tc_params, "A", "traffic class parameters");
6033 	}
6034 
6035 	/*
6036 	 * dev.cxgbe.X.stats.
6037 	 */
6038 	oid = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, "stats", CTLFLAG_RD,
6039 	    NULL, "port statistics");
6040 	children = SYSCTL_CHILDREN(oid);
6041 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "tx_parse_error", CTLFLAG_RD,
6042 	    &pi->tx_parse_error, 0,
6043 	    "# of tx packets with invalid length or # of segments");
6044 
6045 #define SYSCTL_ADD_T4_REG64(pi, name, desc, reg) \
6046 	SYSCTL_ADD_OID(ctx, children, OID_AUTO, name, \
6047 	    CTLTYPE_U64 | CTLFLAG_RD, sc, reg, \
6048 	    sysctl_handle_t4_reg64, "QU", desc)
6049 
6050 	SYSCTL_ADD_T4_REG64(pi, "tx_octets", "# of octets in good frames",
6051 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_BYTES_L));
6052 	SYSCTL_ADD_T4_REG64(pi, "tx_frames", "total # of good frames",
6053 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_FRAMES_L));
6054 	SYSCTL_ADD_T4_REG64(pi, "tx_bcast_frames", "# of broadcast frames",
6055 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_BCAST_L));
6056 	SYSCTL_ADD_T4_REG64(pi, "tx_mcast_frames", "# of multicast frames",
6057 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_MCAST_L));
6058 	SYSCTL_ADD_T4_REG64(pi, "tx_ucast_frames", "# of unicast frames",
6059 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_UCAST_L));
6060 	SYSCTL_ADD_T4_REG64(pi, "tx_error_frames", "# of error frames",
6061 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_ERROR_L));
6062 	SYSCTL_ADD_T4_REG64(pi, "tx_frames_64",
6063 	    "# of tx frames in this range",
6064 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_64B_L));
6065 	SYSCTL_ADD_T4_REG64(pi, "tx_frames_65_127",
6066 	    "# of tx frames in this range",
6067 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_65B_127B_L));
6068 	SYSCTL_ADD_T4_REG64(pi, "tx_frames_128_255",
6069 	    "# of tx frames in this range",
6070 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_128B_255B_L));
6071 	SYSCTL_ADD_T4_REG64(pi, "tx_frames_256_511",
6072 	    "# of tx frames in this range",
6073 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_256B_511B_L));
6074 	SYSCTL_ADD_T4_REG64(pi, "tx_frames_512_1023",
6075 	    "# of tx frames in this range",
6076 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_512B_1023B_L));
6077 	SYSCTL_ADD_T4_REG64(pi, "tx_frames_1024_1518",
6078 	    "# of tx frames in this range",
6079 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_1024B_1518B_L));
6080 	SYSCTL_ADD_T4_REG64(pi, "tx_frames_1519_max",
6081 	    "# of tx frames in this range",
6082 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_1519B_MAX_L));
6083 	SYSCTL_ADD_T4_REG64(pi, "tx_drop", "# of dropped tx frames",
6084 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_DROP_L));
6085 	SYSCTL_ADD_T4_REG64(pi, "tx_pause", "# of pause frames transmitted",
6086 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_PAUSE_L));
6087 	SYSCTL_ADD_T4_REG64(pi, "tx_ppp0", "# of PPP prio 0 frames transmitted",
6088 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_PPP0_L));
6089 	SYSCTL_ADD_T4_REG64(pi, "tx_ppp1", "# of PPP prio 1 frames transmitted",
6090 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_PPP1_L));
6091 	SYSCTL_ADD_T4_REG64(pi, "tx_ppp2", "# of PPP prio 2 frames transmitted",
6092 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_PPP2_L));
6093 	SYSCTL_ADD_T4_REG64(pi, "tx_ppp3", "# of PPP prio 3 frames transmitted",
6094 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_PPP3_L));
6095 	SYSCTL_ADD_T4_REG64(pi, "tx_ppp4", "# of PPP prio 4 frames transmitted",
6096 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_PPP4_L));
6097 	SYSCTL_ADD_T4_REG64(pi, "tx_ppp5", "# of PPP prio 5 frames transmitted",
6098 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_PPP5_L));
6099 	SYSCTL_ADD_T4_REG64(pi, "tx_ppp6", "# of PPP prio 6 frames transmitted",
6100 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_PPP6_L));
6101 	SYSCTL_ADD_T4_REG64(pi, "tx_ppp7", "# of PPP prio 7 frames transmitted",
6102 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_PPP7_L));
6103 
6104 	SYSCTL_ADD_T4_REG64(pi, "rx_octets", "# of octets in good frames",
6105 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_BYTES_L));
6106 	SYSCTL_ADD_T4_REG64(pi, "rx_frames", "total # of good frames",
6107 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_FRAMES_L));
6108 	SYSCTL_ADD_T4_REG64(pi, "rx_bcast_frames", "# of broadcast frames",
6109 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_BCAST_L));
6110 	SYSCTL_ADD_T4_REG64(pi, "rx_mcast_frames", "# of multicast frames",
6111 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_MCAST_L));
6112 	SYSCTL_ADD_T4_REG64(pi, "rx_ucast_frames", "# of unicast frames",
6113 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_UCAST_L));
6114 	SYSCTL_ADD_T4_REG64(pi, "rx_too_long", "# of frames exceeding MTU",
6115 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_MTU_ERROR_L));
6116 	SYSCTL_ADD_T4_REG64(pi, "rx_jabber", "# of jabber frames",
6117 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_MTU_CRC_ERROR_L));
6118 	SYSCTL_ADD_T4_REG64(pi, "rx_fcs_err",
6119 	    "# of frames received with bad FCS",
6120 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_CRC_ERROR_L));
6121 	SYSCTL_ADD_T4_REG64(pi, "rx_len_err",
6122 	    "# of frames received with length error",
6123 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_LEN_ERROR_L));
6124 	SYSCTL_ADD_T4_REG64(pi, "rx_symbol_err", "symbol errors",
6125 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_SYM_ERROR_L));
6126 	SYSCTL_ADD_T4_REG64(pi, "rx_runt", "# of short frames received",
6127 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_LESS_64B_L));
6128 	SYSCTL_ADD_T4_REG64(pi, "rx_frames_64",
6129 	    "# of rx frames in this range",
6130 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_64B_L));
6131 	SYSCTL_ADD_T4_REG64(pi, "rx_frames_65_127",
6132 	    "# of rx frames in this range",
6133 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_65B_127B_L));
6134 	SYSCTL_ADD_T4_REG64(pi, "rx_frames_128_255",
6135 	    "# of rx frames in this range",
6136 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_128B_255B_L));
6137 	SYSCTL_ADD_T4_REG64(pi, "rx_frames_256_511",
6138 	    "# of rx frames in this range",
6139 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_256B_511B_L));
6140 	SYSCTL_ADD_T4_REG64(pi, "rx_frames_512_1023",
6141 	    "# of rx frames in this range",
6142 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_512B_1023B_L));
6143 	SYSCTL_ADD_T4_REG64(pi, "rx_frames_1024_1518",
6144 	    "# of rx frames in this range",
6145 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_1024B_1518B_L));
6146 	SYSCTL_ADD_T4_REG64(pi, "rx_frames_1519_max",
6147 	    "# of rx frames in this range",
6148 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_1519B_MAX_L));
6149 	SYSCTL_ADD_T4_REG64(pi, "rx_pause", "# of pause frames received",
6150 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_PAUSE_L));
6151 	SYSCTL_ADD_T4_REG64(pi, "rx_ppp0", "# of PPP prio 0 frames received",
6152 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_PPP0_L));
6153 	SYSCTL_ADD_T4_REG64(pi, "rx_ppp1", "# of PPP prio 1 frames received",
6154 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_PPP1_L));
6155 	SYSCTL_ADD_T4_REG64(pi, "rx_ppp2", "# of PPP prio 2 frames received",
6156 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_PPP2_L));
6157 	SYSCTL_ADD_T4_REG64(pi, "rx_ppp3", "# of PPP prio 3 frames received",
6158 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_PPP3_L));
6159 	SYSCTL_ADD_T4_REG64(pi, "rx_ppp4", "# of PPP prio 4 frames received",
6160 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_PPP4_L));
6161 	SYSCTL_ADD_T4_REG64(pi, "rx_ppp5", "# of PPP prio 5 frames received",
6162 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_PPP5_L));
6163 	SYSCTL_ADD_T4_REG64(pi, "rx_ppp6", "# of PPP prio 6 frames received",
6164 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_PPP6_L));
6165 	SYSCTL_ADD_T4_REG64(pi, "rx_ppp7", "# of PPP prio 7 frames received",
6166 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_PPP7_L));
6167 
6168 #undef SYSCTL_ADD_T4_REG64
6169 
6170 #define SYSCTL_ADD_T4_PORTSTAT(name, desc) \
6171 	SYSCTL_ADD_UQUAD(ctx, children, OID_AUTO, #name, CTLFLAG_RD, \
6172 	    &pi->stats.name, desc)
6173 
6174 	/* We get these from port_stats and they may be stale by up to 1s */
6175 	SYSCTL_ADD_T4_PORTSTAT(rx_ovflow0,
6176 	    "# drops due to buffer-group 0 overflows");
6177 	SYSCTL_ADD_T4_PORTSTAT(rx_ovflow1,
6178 	    "# drops due to buffer-group 1 overflows");
6179 	SYSCTL_ADD_T4_PORTSTAT(rx_ovflow2,
6180 	    "# drops due to buffer-group 2 overflows");
6181 	SYSCTL_ADD_T4_PORTSTAT(rx_ovflow3,
6182 	    "# drops due to buffer-group 3 overflows");
6183 	SYSCTL_ADD_T4_PORTSTAT(rx_trunc0,
6184 	    "# of buffer-group 0 truncated packets");
6185 	SYSCTL_ADD_T4_PORTSTAT(rx_trunc1,
6186 	    "# of buffer-group 1 truncated packets");
6187 	SYSCTL_ADD_T4_PORTSTAT(rx_trunc2,
6188 	    "# of buffer-group 2 truncated packets");
6189 	SYSCTL_ADD_T4_PORTSTAT(rx_trunc3,
6190 	    "# of buffer-group 3 truncated packets");
6191 
6192 #undef SYSCTL_ADD_T4_PORTSTAT
6193 
6194 	SYSCTL_ADD_ULONG(ctx, children, OID_AUTO, "tx_tls_records",
6195 	    CTLFLAG_RD, &pi->tx_tls_records,
6196 	    "# of TLS records transmitted");
6197 	SYSCTL_ADD_ULONG(ctx, children, OID_AUTO, "tx_tls_octets",
6198 	    CTLFLAG_RD, &pi->tx_tls_octets,
6199 	    "# of payload octets in transmitted TLS records");
6200 	SYSCTL_ADD_ULONG(ctx, children, OID_AUTO, "rx_tls_records",
6201 	    CTLFLAG_RD, &pi->rx_tls_records,
6202 	    "# of TLS records received");
6203 	SYSCTL_ADD_ULONG(ctx, children, OID_AUTO, "rx_tls_octets",
6204 	    CTLFLAG_RD, &pi->rx_tls_octets,
6205 	    "# of payload octets in received TLS records");
6206 }
6207 
6208 static int
6209 sysctl_int_array(SYSCTL_HANDLER_ARGS)
6210 {
6211 	int rc, *i, space = 0;
6212 	struct sbuf sb;
6213 
6214 	sbuf_new_for_sysctl(&sb, NULL, 64, req);
6215 	for (i = arg1; arg2; arg2 -= sizeof(int), i++) {
6216 		if (space)
6217 			sbuf_printf(&sb, " ");
6218 		sbuf_printf(&sb, "%d", *i);
6219 		space = 1;
6220 	}
6221 	rc = sbuf_finish(&sb);
6222 	sbuf_delete(&sb);
6223 	return (rc);
6224 }
6225 
6226 static int
6227 sysctl_bitfield_8b(SYSCTL_HANDLER_ARGS)
6228 {
6229 	int rc;
6230 	struct sbuf *sb;
6231 
6232 	rc = sysctl_wire_old_buffer(req, 0);
6233 	if (rc != 0)
6234 		return(rc);
6235 
6236 	sb = sbuf_new_for_sysctl(NULL, NULL, 128, req);
6237 	if (sb == NULL)
6238 		return (ENOMEM);
6239 
6240 	sbuf_printf(sb, "%b", *(uint8_t *)(uintptr_t)arg2, (char *)arg1);
6241 	rc = sbuf_finish(sb);
6242 	sbuf_delete(sb);
6243 
6244 	return (rc);
6245 }
6246 
6247 static int
6248 sysctl_bitfield_16b(SYSCTL_HANDLER_ARGS)
6249 {
6250 	int rc;
6251 	struct sbuf *sb;
6252 
6253 	rc = sysctl_wire_old_buffer(req, 0);
6254 	if (rc != 0)
6255 		return(rc);
6256 
6257 	sb = sbuf_new_for_sysctl(NULL, NULL, 128, req);
6258 	if (sb == NULL)
6259 		return (ENOMEM);
6260 
6261 	sbuf_printf(sb, "%b", *(uint16_t *)(uintptr_t)arg2, (char *)arg1);
6262 	rc = sbuf_finish(sb);
6263 	sbuf_delete(sb);
6264 
6265 	return (rc);
6266 }
6267 
6268 static int
6269 sysctl_btphy(SYSCTL_HANDLER_ARGS)
6270 {
6271 	struct port_info *pi = arg1;
6272 	int op = arg2;
6273 	struct adapter *sc = pi->adapter;
6274 	u_int v;
6275 	int rc;
6276 
6277 	rc = begin_synchronized_op(sc, &pi->vi[0], SLEEP_OK | INTR_OK, "t4btt");
6278 	if (rc)
6279 		return (rc);
6280 	/* XXX: magic numbers */
6281 	rc = -t4_mdio_rd(sc, sc->mbox, pi->mdio_addr, 0x1e, op ? 0x20 : 0xc820,
6282 	    &v);
6283 	end_synchronized_op(sc, 0);
6284 	if (rc)
6285 		return (rc);
6286 	if (op == 0)
6287 		v /= 256;
6288 
6289 	rc = sysctl_handle_int(oidp, &v, 0, req);
6290 	return (rc);
6291 }
6292 
6293 static int
6294 sysctl_noflowq(SYSCTL_HANDLER_ARGS)
6295 {
6296 	struct vi_info *vi = arg1;
6297 	int rc, val;
6298 
6299 	val = vi->rsrv_noflowq;
6300 	rc = sysctl_handle_int(oidp, &val, 0, req);
6301 	if (rc != 0 || req->newptr == NULL)
6302 		return (rc);
6303 
6304 	if ((val >= 1) && (vi->ntxq > 1))
6305 		vi->rsrv_noflowq = 1;
6306 	else
6307 		vi->rsrv_noflowq = 0;
6308 
6309 	return (rc);
6310 }
6311 
6312 static int
6313 sysctl_holdoff_tmr_idx(SYSCTL_HANDLER_ARGS)
6314 {
6315 	struct vi_info *vi = arg1;
6316 	struct adapter *sc = vi->pi->adapter;
6317 	int idx, rc, i;
6318 	struct sge_rxq *rxq;
6319 	uint8_t v;
6320 
6321 	idx = vi->tmr_idx;
6322 
6323 	rc = sysctl_handle_int(oidp, &idx, 0, req);
6324 	if (rc != 0 || req->newptr == NULL)
6325 		return (rc);
6326 
6327 	if (idx < 0 || idx >= SGE_NTIMERS)
6328 		return (EINVAL);
6329 
6330 	rc = begin_synchronized_op(sc, vi, HOLD_LOCK | SLEEP_OK | INTR_OK,
6331 	    "t4tmr");
6332 	if (rc)
6333 		return (rc);
6334 
6335 	v = V_QINTR_TIMER_IDX(idx) | V_QINTR_CNT_EN(vi->pktc_idx != -1);
6336 	for_each_rxq(vi, i, rxq) {
6337 #ifdef atomic_store_rel_8
6338 		atomic_store_rel_8(&rxq->iq.intr_params, v);
6339 #else
6340 		rxq->iq.intr_params = v;
6341 #endif
6342 	}
6343 	vi->tmr_idx = idx;
6344 
6345 	end_synchronized_op(sc, LOCK_HELD);
6346 	return (0);
6347 }
6348 
6349 static int
6350 sysctl_holdoff_pktc_idx(SYSCTL_HANDLER_ARGS)
6351 {
6352 	struct vi_info *vi = arg1;
6353 	struct adapter *sc = vi->pi->adapter;
6354 	int idx, rc;
6355 
6356 	idx = vi->pktc_idx;
6357 
6358 	rc = sysctl_handle_int(oidp, &idx, 0, req);
6359 	if (rc != 0 || req->newptr == NULL)
6360 		return (rc);
6361 
6362 	if (idx < -1 || idx >= SGE_NCOUNTERS)
6363 		return (EINVAL);
6364 
6365 	rc = begin_synchronized_op(sc, vi, HOLD_LOCK | SLEEP_OK | INTR_OK,
6366 	    "t4pktc");
6367 	if (rc)
6368 		return (rc);
6369 
6370 	if (vi->flags & VI_INIT_DONE)
6371 		rc = EBUSY; /* cannot be changed once the queues are created */
6372 	else
6373 		vi->pktc_idx = idx;
6374 
6375 	end_synchronized_op(sc, LOCK_HELD);
6376 	return (rc);
6377 }
6378 
6379 static int
6380 sysctl_qsize_rxq(SYSCTL_HANDLER_ARGS)
6381 {
6382 	struct vi_info *vi = arg1;
6383 	struct adapter *sc = vi->pi->adapter;
6384 	int qsize, rc;
6385 
6386 	qsize = vi->qsize_rxq;
6387 
6388 	rc = sysctl_handle_int(oidp, &qsize, 0, req);
6389 	if (rc != 0 || req->newptr == NULL)
6390 		return (rc);
6391 
6392 	if (qsize < 128 || (qsize & 7))
6393 		return (EINVAL);
6394 
6395 	rc = begin_synchronized_op(sc, vi, HOLD_LOCK | SLEEP_OK | INTR_OK,
6396 	    "t4rxqs");
6397 	if (rc)
6398 		return (rc);
6399 
6400 	if (vi->flags & VI_INIT_DONE)
6401 		rc = EBUSY; /* cannot be changed once the queues are created */
6402 	else
6403 		vi->qsize_rxq = qsize;
6404 
6405 	end_synchronized_op(sc, LOCK_HELD);
6406 	return (rc);
6407 }
6408 
6409 static int
6410 sysctl_qsize_txq(SYSCTL_HANDLER_ARGS)
6411 {
6412 	struct vi_info *vi = arg1;
6413 	struct adapter *sc = vi->pi->adapter;
6414 	int qsize, rc;
6415 
6416 	qsize = vi->qsize_txq;
6417 
6418 	rc = sysctl_handle_int(oidp, &qsize, 0, req);
6419 	if (rc != 0 || req->newptr == NULL)
6420 		return (rc);
6421 
6422 	if (qsize < 128 || qsize > 65536)
6423 		return (EINVAL);
6424 
6425 	rc = begin_synchronized_op(sc, vi, HOLD_LOCK | SLEEP_OK | INTR_OK,
6426 	    "t4txqs");
6427 	if (rc)
6428 		return (rc);
6429 
6430 	if (vi->flags & VI_INIT_DONE)
6431 		rc = EBUSY; /* cannot be changed once the queues are created */
6432 	else
6433 		vi->qsize_txq = qsize;
6434 
6435 	end_synchronized_op(sc, LOCK_HELD);
6436 	return (rc);
6437 }
6438 
6439 static int
6440 sysctl_pause_settings(SYSCTL_HANDLER_ARGS)
6441 {
6442 	struct port_info *pi = arg1;
6443 	struct adapter *sc = pi->adapter;
6444 	struct link_config *lc = &pi->link_cfg;
6445 	int rc;
6446 
6447 	if (req->newptr == NULL) {
6448 		struct sbuf *sb;
6449 		static char *bits = "\20\1PAUSE_RX\2PAUSE_TX";
6450 
6451 		rc = sysctl_wire_old_buffer(req, 0);
6452 		if (rc != 0)
6453 			return(rc);
6454 
6455 		sb = sbuf_new_for_sysctl(NULL, NULL, 128, req);
6456 		if (sb == NULL)
6457 			return (ENOMEM);
6458 
6459 		sbuf_printf(sb, "%b", lc->fc & (PAUSE_TX | PAUSE_RX), bits);
6460 		rc = sbuf_finish(sb);
6461 		sbuf_delete(sb);
6462 	} else {
6463 		char s[2];
6464 		int n;
6465 
6466 		s[0] = '0' + (lc->requested_fc & (PAUSE_TX | PAUSE_RX));
6467 		s[1] = 0;
6468 
6469 		rc = sysctl_handle_string(oidp, s, sizeof(s), req);
6470 		if (rc != 0)
6471 			return(rc);
6472 
6473 		if (s[1] != 0)
6474 			return (EINVAL);
6475 		if (s[0] < '0' || s[0] > '9')
6476 			return (EINVAL);	/* not a number */
6477 		n = s[0] - '0';
6478 		if (n & ~(PAUSE_TX | PAUSE_RX))
6479 			return (EINVAL);	/* some other bit is set too */
6480 
6481 		rc = begin_synchronized_op(sc, &pi->vi[0], SLEEP_OK | INTR_OK,
6482 		    "t4PAUSE");
6483 		if (rc)
6484 			return (rc);
6485 		PORT_LOCK(pi);
6486 		if ((lc->requested_fc & (PAUSE_TX | PAUSE_RX)) != n) {
6487 			lc->requested_fc &= ~(PAUSE_TX | PAUSE_RX);
6488 			lc->requested_fc |= n;
6489 			rc = -t4_link_l1cfg(sc, sc->mbox, pi->tx_chan, lc);
6490 			if (rc == 0) {
6491 				lc->fc = lc->requested_fc;
6492 				set_current_media(pi, &pi->media);
6493 			}
6494 		}
6495 		PORT_UNLOCK(pi);
6496 		end_synchronized_op(sc, 0);
6497 	}
6498 
6499 	return (rc);
6500 }
6501 
6502 static int
6503 sysctl_fec(SYSCTL_HANDLER_ARGS)
6504 {
6505 	struct port_info *pi = arg1;
6506 	struct adapter *sc = pi->adapter;
6507 	struct link_config *lc = &pi->link_cfg;
6508 	int rc;
6509 
6510 	if (req->newptr == NULL) {
6511 		struct sbuf *sb;
6512 		static char *bits = "\20\1RS\2BASER_RS\3RESERVED";
6513 
6514 		rc = sysctl_wire_old_buffer(req, 0);
6515 		if (rc != 0)
6516 			return(rc);
6517 
6518 		sb = sbuf_new_for_sysctl(NULL, NULL, 128, req);
6519 		if (sb == NULL)
6520 			return (ENOMEM);
6521 
6522 		sbuf_printf(sb, "%b", lc->fec & M_FW_PORT_CAP_FEC, bits);
6523 		rc = sbuf_finish(sb);
6524 		sbuf_delete(sb);
6525 	} else {
6526 		char s[2];
6527 		int n;
6528 
6529 		s[0] = '0' + (lc->requested_fec & M_FW_PORT_CAP_FEC);
6530 		s[1] = 0;
6531 
6532 		rc = sysctl_handle_string(oidp, s, sizeof(s), req);
6533 		if (rc != 0)
6534 			return(rc);
6535 
6536 		if (s[1] != 0)
6537 			return (EINVAL);
6538 		if (s[0] < '0' || s[0] > '9')
6539 			return (EINVAL);	/* not a number */
6540 		n = s[0] - '0';
6541 		if (n & ~M_FW_PORT_CAP_FEC)
6542 			return (EINVAL);	/* some other bit is set too */
6543 		if (!powerof2(n))
6544 			return (EINVAL);	/* one bit can be set at most */
6545 
6546 		rc = begin_synchronized_op(sc, &pi->vi[0], SLEEP_OK | INTR_OK,
6547 		    "t4fec");
6548 		if (rc)
6549 			return (rc);
6550 		PORT_LOCK(pi);
6551 		if ((lc->requested_fec & M_FW_PORT_CAP_FEC) != n) {
6552 			lc->requested_fec = n &
6553 			    G_FW_PORT_CAP_FEC(lc->supported);
6554 			rc = -t4_link_l1cfg(sc, sc->mbox, pi->tx_chan, lc);
6555 			if (rc == 0) {
6556 				lc->fec = lc->requested_fec;
6557 			}
6558 		}
6559 		PORT_UNLOCK(pi);
6560 		end_synchronized_op(sc, 0);
6561 	}
6562 
6563 	return (rc);
6564 }
6565 
6566 static int
6567 sysctl_autoneg(SYSCTL_HANDLER_ARGS)
6568 {
6569 	struct port_info *pi = arg1;
6570 	struct adapter *sc = pi->adapter;
6571 	struct link_config *lc = &pi->link_cfg;
6572 	int rc, val, old;
6573 
6574 	if (lc->supported & FW_PORT_CAP_ANEG)
6575 		val = lc->requested_aneg == AUTONEG_ENABLE ? 1 : 0;
6576 	else
6577 		val = -1;
6578 	rc = sysctl_handle_int(oidp, &val, 0, req);
6579 	if (rc != 0 || req->newptr == NULL)
6580 		return (rc);
6581 	if (val == 0)
6582 		val = AUTONEG_DISABLE;
6583 	else if (val == 1)
6584 		val = AUTONEG_ENABLE;
6585 	else
6586 		return (EINVAL);
6587 
6588 	rc = begin_synchronized_op(sc, &pi->vi[0], SLEEP_OK | INTR_OK,
6589 	    "t4aneg");
6590 	if (rc)
6591 		return (rc);
6592 	PORT_LOCK(pi);
6593 	if ((lc->supported & FW_PORT_CAP_ANEG) == 0) {
6594 		rc = ENOTSUP;
6595 		goto done;
6596 	}
6597 	if (lc->requested_aneg == val) {
6598 		rc = 0;	/* no change, do nothing. */
6599 		goto done;
6600 	}
6601 	old = lc->requested_aneg;
6602 	lc->requested_aneg = val;
6603 	rc = -t4_link_l1cfg(sc, sc->mbox, pi->tx_chan, lc);
6604 	if (rc != 0)
6605 		lc->requested_aneg = old;
6606 	else
6607 		set_current_media(pi, &pi->media);
6608 done:
6609 	PORT_UNLOCK(pi);
6610 	end_synchronized_op(sc, 0);
6611 	return (rc);
6612 }
6613 
6614 static int
6615 sysctl_handle_t4_reg64(SYSCTL_HANDLER_ARGS)
6616 {
6617 	struct adapter *sc = arg1;
6618 	int reg = arg2;
6619 	uint64_t val;
6620 
6621 	val = t4_read_reg64(sc, reg);
6622 
6623 	return (sysctl_handle_64(oidp, &val, 0, req));
6624 }
6625 
6626 static int
6627 sysctl_temperature(SYSCTL_HANDLER_ARGS)
6628 {
6629 	struct adapter *sc = arg1;
6630 	int rc, t;
6631 	uint32_t param, val;
6632 
6633 	rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4temp");
6634 	if (rc)
6635 		return (rc);
6636 	param = V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) |
6637 	    V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_DIAG) |
6638 	    V_FW_PARAMS_PARAM_Y(FW_PARAM_DEV_DIAG_TMP);
6639 	rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 1, &param, &val);
6640 	end_synchronized_op(sc, 0);
6641 	if (rc)
6642 		return (rc);
6643 
6644 	/* unknown is returned as 0 but we display -1 in that case */
6645 	t = val == 0 ? -1 : val;
6646 
6647 	rc = sysctl_handle_int(oidp, &t, 0, req);
6648 	return (rc);
6649 }
6650 
6651 static int
6652 sysctl_loadavg(SYSCTL_HANDLER_ARGS)
6653 {
6654 	struct adapter *sc = arg1;
6655 	struct sbuf *sb;
6656 	int rc;
6657 	uint32_t param, val;
6658 
6659 	rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4lavg");
6660 	if (rc)
6661 		return (rc);
6662 	param = V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) |
6663 	    V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_LOAD);
6664 	rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 1, &param, &val);
6665 	end_synchronized_op(sc, 0);
6666 	if (rc)
6667 		return (rc);
6668 
6669 	rc = sysctl_wire_old_buffer(req, 0);
6670 	if (rc != 0)
6671 		return (rc);
6672 
6673 	sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req);
6674 	if (sb == NULL)
6675 		return (ENOMEM);
6676 
6677 	if (val == 0xffffffff) {
6678 		/* Only debug and custom firmwares report load averages. */
6679 		sbuf_printf(sb, "not available");
6680 	} else {
6681 		sbuf_printf(sb, "%d %d %d", val & 0xff, (val >> 8) & 0xff,
6682 		    (val >> 16) & 0xff);
6683 	}
6684 	rc = sbuf_finish(sb);
6685 	sbuf_delete(sb);
6686 
6687 	return (rc);
6688 }
6689 
6690 static int
6691 sysctl_cctrl(SYSCTL_HANDLER_ARGS)
6692 {
6693 	struct adapter *sc = arg1;
6694 	struct sbuf *sb;
6695 	int rc, i;
6696 	uint16_t incr[NMTUS][NCCTRL_WIN];
6697 	static const char *dec_fac[] = {
6698 		"0.5", "0.5625", "0.625", "0.6875", "0.75", "0.8125", "0.875",
6699 		"0.9375"
6700 	};
6701 
6702 	rc = sysctl_wire_old_buffer(req, 0);
6703 	if (rc != 0)
6704 		return (rc);
6705 
6706 	sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req);
6707 	if (sb == NULL)
6708 		return (ENOMEM);
6709 
6710 	t4_read_cong_tbl(sc, incr);
6711 
6712 	for (i = 0; i < NCCTRL_WIN; ++i) {
6713 		sbuf_printf(sb, "%2d: %4u %4u %4u %4u %4u %4u %4u %4u\n", i,
6714 		    incr[0][i], incr[1][i], incr[2][i], incr[3][i], incr[4][i],
6715 		    incr[5][i], incr[6][i], incr[7][i]);
6716 		sbuf_printf(sb, "%8u %4u %4u %4u %4u %4u %4u %4u %5u %s\n",
6717 		    incr[8][i], incr[9][i], incr[10][i], incr[11][i],
6718 		    incr[12][i], incr[13][i], incr[14][i], incr[15][i],
6719 		    sc->params.a_wnd[i], dec_fac[sc->params.b_wnd[i]]);
6720 	}
6721 
6722 	rc = sbuf_finish(sb);
6723 	sbuf_delete(sb);
6724 
6725 	return (rc);
6726 }
6727 
6728 static const char *qname[CIM_NUM_IBQ + CIM_NUM_OBQ_T5] = {
6729 	"TP0", "TP1", "ULP", "SGE0", "SGE1", "NC-SI",	/* ibq's */
6730 	"ULP0", "ULP1", "ULP2", "ULP3", "SGE", "NC-SI",	/* obq's */
6731 	"SGE0-RX", "SGE1-RX"	/* additional obq's (T5 onwards) */
6732 };
6733 
6734 static int
6735 sysctl_cim_ibq_obq(SYSCTL_HANDLER_ARGS)
6736 {
6737 	struct adapter *sc = arg1;
6738 	struct sbuf *sb;
6739 	int rc, i, n, qid = arg2;
6740 	uint32_t *buf, *p;
6741 	char *qtype;
6742 	u_int cim_num_obq = sc->chip_params->cim_num_obq;
6743 
6744 	KASSERT(qid >= 0 && qid < CIM_NUM_IBQ + cim_num_obq,
6745 	    ("%s: bad qid %d\n", __func__, qid));
6746 
6747 	if (qid < CIM_NUM_IBQ) {
6748 		/* inbound queue */
6749 		qtype = "IBQ";
6750 		n = 4 * CIM_IBQ_SIZE;
6751 		buf = malloc(n * sizeof(uint32_t), M_CXGBE, M_ZERO | M_WAITOK);
6752 		rc = t4_read_cim_ibq(sc, qid, buf, n);
6753 	} else {
6754 		/* outbound queue */
6755 		qtype = "OBQ";
6756 		qid -= CIM_NUM_IBQ;
6757 		n = 4 * cim_num_obq * CIM_OBQ_SIZE;
6758 		buf = malloc(n * sizeof(uint32_t), M_CXGBE, M_ZERO | M_WAITOK);
6759 		rc = t4_read_cim_obq(sc, qid, buf, n);
6760 	}
6761 
6762 	if (rc < 0) {
6763 		rc = -rc;
6764 		goto done;
6765 	}
6766 	n = rc * sizeof(uint32_t);	/* rc has # of words actually read */
6767 
6768 	rc = sysctl_wire_old_buffer(req, 0);
6769 	if (rc != 0)
6770 		goto done;
6771 
6772 	sb = sbuf_new_for_sysctl(NULL, NULL, PAGE_SIZE, req);
6773 	if (sb == NULL) {
6774 		rc = ENOMEM;
6775 		goto done;
6776 	}
6777 
6778 	sbuf_printf(sb, "%s%d %s", qtype , qid, qname[arg2]);
6779 	for (i = 0, p = buf; i < n; i += 16, p += 4)
6780 		sbuf_printf(sb, "\n%#06x: %08x %08x %08x %08x", i, p[0], p[1],
6781 		    p[2], p[3]);
6782 
6783 	rc = sbuf_finish(sb);
6784 	sbuf_delete(sb);
6785 done:
6786 	free(buf, M_CXGBE);
6787 	return (rc);
6788 }
6789 
6790 static int
6791 sysctl_cim_la(SYSCTL_HANDLER_ARGS)
6792 {
6793 	struct adapter *sc = arg1;
6794 	u_int cfg;
6795 	struct sbuf *sb;
6796 	uint32_t *buf, *p;
6797 	int rc;
6798 
6799 	MPASS(chip_id(sc) <= CHELSIO_T5);
6800 
6801 	rc = -t4_cim_read(sc, A_UP_UP_DBG_LA_CFG, 1, &cfg);
6802 	if (rc != 0)
6803 		return (rc);
6804 
6805 	rc = sysctl_wire_old_buffer(req, 0);
6806 	if (rc != 0)
6807 		return (rc);
6808 
6809 	sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req);
6810 	if (sb == NULL)
6811 		return (ENOMEM);
6812 
6813 	buf = malloc(sc->params.cim_la_size * sizeof(uint32_t), M_CXGBE,
6814 	    M_ZERO | M_WAITOK);
6815 
6816 	rc = -t4_cim_read_la(sc, buf, NULL);
6817 	if (rc != 0)
6818 		goto done;
6819 
6820 	sbuf_printf(sb, "Status   Data      PC%s",
6821 	    cfg & F_UPDBGLACAPTPCONLY ? "" :
6822 	    "     LS0Stat  LS0Addr             LS0Data");
6823 
6824 	for (p = buf; p <= &buf[sc->params.cim_la_size - 8]; p += 8) {
6825 		if (cfg & F_UPDBGLACAPTPCONLY) {
6826 			sbuf_printf(sb, "\n  %02x   %08x %08x", p[5] & 0xff,
6827 			    p[6], p[7]);
6828 			sbuf_printf(sb, "\n  %02x   %02x%06x %02x%06x",
6829 			    (p[3] >> 8) & 0xff, p[3] & 0xff, p[4] >> 8,
6830 			    p[4] & 0xff, p[5] >> 8);
6831 			sbuf_printf(sb, "\n  %02x   %x%07x %x%07x",
6832 			    (p[0] >> 4) & 0xff, p[0] & 0xf, p[1] >> 4,
6833 			    p[1] & 0xf, p[2] >> 4);
6834 		} else {
6835 			sbuf_printf(sb,
6836 			    "\n  %02x   %x%07x %x%07x %08x %08x "
6837 			    "%08x%08x%08x%08x",
6838 			    (p[0] >> 4) & 0xff, p[0] & 0xf, p[1] >> 4,
6839 			    p[1] & 0xf, p[2] >> 4, p[2] & 0xf, p[3], p[4], p[5],
6840 			    p[6], p[7]);
6841 		}
6842 	}
6843 
6844 	rc = sbuf_finish(sb);
6845 	sbuf_delete(sb);
6846 done:
6847 	free(buf, M_CXGBE);
6848 	return (rc);
6849 }
6850 
6851 static int
6852 sysctl_cim_la_t6(SYSCTL_HANDLER_ARGS)
6853 {
6854 	struct adapter *sc = arg1;
6855 	u_int cfg;
6856 	struct sbuf *sb;
6857 	uint32_t *buf, *p;
6858 	int rc;
6859 
6860 	MPASS(chip_id(sc) > CHELSIO_T5);
6861 
6862 	rc = -t4_cim_read(sc, A_UP_UP_DBG_LA_CFG, 1, &cfg);
6863 	if (rc != 0)
6864 		return (rc);
6865 
6866 	rc = sysctl_wire_old_buffer(req, 0);
6867 	if (rc != 0)
6868 		return (rc);
6869 
6870 	sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req);
6871 	if (sb == NULL)
6872 		return (ENOMEM);
6873 
6874 	buf = malloc(sc->params.cim_la_size * sizeof(uint32_t), M_CXGBE,
6875 	    M_ZERO | M_WAITOK);
6876 
6877 	rc = -t4_cim_read_la(sc, buf, NULL);
6878 	if (rc != 0)
6879 		goto done;
6880 
6881 	sbuf_printf(sb, "Status   Inst    Data      PC%s",
6882 	    cfg & F_UPDBGLACAPTPCONLY ? "" :
6883 	    "     LS0Stat  LS0Addr  LS0Data  LS1Stat  LS1Addr  LS1Data");
6884 
6885 	for (p = buf; p <= &buf[sc->params.cim_la_size - 10]; p += 10) {
6886 		if (cfg & F_UPDBGLACAPTPCONLY) {
6887 			sbuf_printf(sb, "\n  %02x   %08x %08x %08x",
6888 			    p[3] & 0xff, p[2], p[1], p[0]);
6889 			sbuf_printf(sb, "\n  %02x   %02x%06x %02x%06x %02x%06x",
6890 			    (p[6] >> 8) & 0xff, p[6] & 0xff, p[5] >> 8,
6891 			    p[5] & 0xff, p[4] >> 8, p[4] & 0xff, p[3] >> 8);
6892 			sbuf_printf(sb, "\n  %02x   %04x%04x %04x%04x %04x%04x",
6893 			    (p[9] >> 16) & 0xff, p[9] & 0xffff, p[8] >> 16,
6894 			    p[8] & 0xffff, p[7] >> 16, p[7] & 0xffff,
6895 			    p[6] >> 16);
6896 		} else {
6897 			sbuf_printf(sb, "\n  %02x   %04x%04x %04x%04x %04x%04x "
6898 			    "%08x %08x %08x %08x %08x %08x",
6899 			    (p[9] >> 16) & 0xff,
6900 			    p[9] & 0xffff, p[8] >> 16,
6901 			    p[8] & 0xffff, p[7] >> 16,
6902 			    p[7] & 0xffff, p[6] >> 16,
6903 			    p[2], p[1], p[0], p[5], p[4], p[3]);
6904 		}
6905 	}
6906 
6907 	rc = sbuf_finish(sb);
6908 	sbuf_delete(sb);
6909 done:
6910 	free(buf, M_CXGBE);
6911 	return (rc);
6912 }
6913 
6914 static int
6915 sysctl_cim_ma_la(SYSCTL_HANDLER_ARGS)
6916 {
6917 	struct adapter *sc = arg1;
6918 	u_int i;
6919 	struct sbuf *sb;
6920 	uint32_t *buf, *p;
6921 	int rc;
6922 
6923 	rc = sysctl_wire_old_buffer(req, 0);
6924 	if (rc != 0)
6925 		return (rc);
6926 
6927 	sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req);
6928 	if (sb == NULL)
6929 		return (ENOMEM);
6930 
6931 	buf = malloc(2 * CIM_MALA_SIZE * 5 * sizeof(uint32_t), M_CXGBE,
6932 	    M_ZERO | M_WAITOK);
6933 
6934 	t4_cim_read_ma_la(sc, buf, buf + 5 * CIM_MALA_SIZE);
6935 	p = buf;
6936 
6937 	for (i = 0; i < CIM_MALA_SIZE; i++, p += 5) {
6938 		sbuf_printf(sb, "\n%02x%08x%08x%08x%08x", p[4], p[3], p[2],
6939 		    p[1], p[0]);
6940 	}
6941 
6942 	sbuf_printf(sb, "\n\nCnt ID Tag UE       Data       RDY VLD");
6943 	for (i = 0; i < CIM_MALA_SIZE; i++, p += 5) {
6944 		sbuf_printf(sb, "\n%3u %2u  %x   %u %08x%08x  %u   %u",
6945 		    (p[2] >> 10) & 0xff, (p[2] >> 7) & 7,
6946 		    (p[2] >> 3) & 0xf, (p[2] >> 2) & 1,
6947 		    (p[1] >> 2) | ((p[2] & 3) << 30),
6948 		    (p[0] >> 2) | ((p[1] & 3) << 30), (p[0] >> 1) & 1,
6949 		    p[0] & 1);
6950 	}
6951 
6952 	rc = sbuf_finish(sb);
6953 	sbuf_delete(sb);
6954 	free(buf, M_CXGBE);
6955 	return (rc);
6956 }
6957 
6958 static int
6959 sysctl_cim_pif_la(SYSCTL_HANDLER_ARGS)
6960 {
6961 	struct adapter *sc = arg1;
6962 	u_int i;
6963 	struct sbuf *sb;
6964 	uint32_t *buf, *p;
6965 	int rc;
6966 
6967 	rc = sysctl_wire_old_buffer(req, 0);
6968 	if (rc != 0)
6969 		return (rc);
6970 
6971 	sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req);
6972 	if (sb == NULL)
6973 		return (ENOMEM);
6974 
6975 	buf = malloc(2 * CIM_PIFLA_SIZE * 6 * sizeof(uint32_t), M_CXGBE,
6976 	    M_ZERO | M_WAITOK);
6977 
6978 	t4_cim_read_pif_la(sc, buf, buf + 6 * CIM_PIFLA_SIZE, NULL, NULL);
6979 	p = buf;
6980 
6981 	sbuf_printf(sb, "Cntl ID DataBE   Addr                 Data");
6982 	for (i = 0; i < CIM_PIFLA_SIZE; i++, p += 6) {
6983 		sbuf_printf(sb, "\n %02x  %02x  %04x  %08x %08x%08x%08x%08x",
6984 		    (p[5] >> 22) & 0xff, (p[5] >> 16) & 0x3f, p[5] & 0xffff,
6985 		    p[4], p[3], p[2], p[1], p[0]);
6986 	}
6987 
6988 	sbuf_printf(sb, "\n\nCntl ID               Data");
6989 	for (i = 0; i < CIM_PIFLA_SIZE; i++, p += 6) {
6990 		sbuf_printf(sb, "\n %02x  %02x %08x%08x%08x%08x",
6991 		    (p[4] >> 6) & 0xff, p[4] & 0x3f, p[3], p[2], p[1], p[0]);
6992 	}
6993 
6994 	rc = sbuf_finish(sb);
6995 	sbuf_delete(sb);
6996 	free(buf, M_CXGBE);
6997 	return (rc);
6998 }
6999 
7000 static int
7001 sysctl_cim_qcfg(SYSCTL_HANDLER_ARGS)
7002 {
7003 	struct adapter *sc = arg1;
7004 	struct sbuf *sb;
7005 	int rc, i;
7006 	uint16_t base[CIM_NUM_IBQ + CIM_NUM_OBQ_T5];
7007 	uint16_t size[CIM_NUM_IBQ + CIM_NUM_OBQ_T5];
7008 	uint16_t thres[CIM_NUM_IBQ];
7009 	uint32_t obq_wr[2 * CIM_NUM_OBQ_T5], *wr = obq_wr;
7010 	uint32_t stat[4 * (CIM_NUM_IBQ + CIM_NUM_OBQ_T5)], *p = stat;
7011 	u_int cim_num_obq, ibq_rdaddr, obq_rdaddr, nq;
7012 
7013 	cim_num_obq = sc->chip_params->cim_num_obq;
7014 	if (is_t4(sc)) {
7015 		ibq_rdaddr = A_UP_IBQ_0_RDADDR;
7016 		obq_rdaddr = A_UP_OBQ_0_REALADDR;
7017 	} else {
7018 		ibq_rdaddr = A_UP_IBQ_0_SHADOW_RDADDR;
7019 		obq_rdaddr = A_UP_OBQ_0_SHADOW_REALADDR;
7020 	}
7021 	nq = CIM_NUM_IBQ + cim_num_obq;
7022 
7023 	rc = -t4_cim_read(sc, ibq_rdaddr, 4 * nq, stat);
7024 	if (rc == 0)
7025 		rc = -t4_cim_read(sc, obq_rdaddr, 2 * cim_num_obq, obq_wr);
7026 	if (rc != 0)
7027 		return (rc);
7028 
7029 	t4_read_cimq_cfg(sc, base, size, thres);
7030 
7031 	rc = sysctl_wire_old_buffer(req, 0);
7032 	if (rc != 0)
7033 		return (rc);
7034 
7035 	sb = sbuf_new_for_sysctl(NULL, NULL, PAGE_SIZE, req);
7036 	if (sb == NULL)
7037 		return (ENOMEM);
7038 
7039 	sbuf_printf(sb,
7040 	    "  Queue  Base  Size Thres  RdPtr WrPtr  SOP  EOP Avail");
7041 
7042 	for (i = 0; i < CIM_NUM_IBQ; i++, p += 4)
7043 		sbuf_printf(sb, "\n%7s %5x %5u %5u %6x  %4x %4u %4u %5u",
7044 		    qname[i], base[i], size[i], thres[i], G_IBQRDADDR(p[0]),
7045 		    G_IBQWRADDR(p[1]), G_QUESOPCNT(p[3]), G_QUEEOPCNT(p[3]),
7046 		    G_QUEREMFLITS(p[2]) * 16);
7047 	for ( ; i < nq; i++, p += 4, wr += 2)
7048 		sbuf_printf(sb, "\n%7s %5x %5u %12x  %4x %4u %4u %5u", qname[i],
7049 		    base[i], size[i], G_QUERDADDR(p[0]) & 0x3fff,
7050 		    wr[0] - base[i], G_QUESOPCNT(p[3]), G_QUEEOPCNT(p[3]),
7051 		    G_QUEREMFLITS(p[2]) * 16);
7052 
7053 	rc = sbuf_finish(sb);
7054 	sbuf_delete(sb);
7055 
7056 	return (rc);
7057 }
7058 
7059 static int
7060 sysctl_cpl_stats(SYSCTL_HANDLER_ARGS)
7061 {
7062 	struct adapter *sc = arg1;
7063 	struct sbuf *sb;
7064 	int rc;
7065 	struct tp_cpl_stats stats;
7066 
7067 	rc = sysctl_wire_old_buffer(req, 0);
7068 	if (rc != 0)
7069 		return (rc);
7070 
7071 	sb = sbuf_new_for_sysctl(NULL, NULL, 256, req);
7072 	if (sb == NULL)
7073 		return (ENOMEM);
7074 
7075 	mtx_lock(&sc->reg_lock);
7076 	t4_tp_get_cpl_stats(sc, &stats, 0);
7077 	mtx_unlock(&sc->reg_lock);
7078 
7079 	if (sc->chip_params->nchan > 2) {
7080 		sbuf_printf(sb, "                 channel 0  channel 1"
7081 		    "  channel 2  channel 3");
7082 		sbuf_printf(sb, "\nCPL requests:   %10u %10u %10u %10u",
7083 		    stats.req[0], stats.req[1], stats.req[2], stats.req[3]);
7084 		sbuf_printf(sb, "\nCPL responses:   %10u %10u %10u %10u",
7085 		    stats.rsp[0], stats.rsp[1], stats.rsp[2], stats.rsp[3]);
7086 	} else {
7087 		sbuf_printf(sb, "                 channel 0  channel 1");
7088 		sbuf_printf(sb, "\nCPL requests:   %10u %10u",
7089 		    stats.req[0], stats.req[1]);
7090 		sbuf_printf(sb, "\nCPL responses:   %10u %10u",
7091 		    stats.rsp[0], stats.rsp[1]);
7092 	}
7093 
7094 	rc = sbuf_finish(sb);
7095 	sbuf_delete(sb);
7096 
7097 	return (rc);
7098 }
7099 
7100 static int
7101 sysctl_ddp_stats(SYSCTL_HANDLER_ARGS)
7102 {
7103 	struct adapter *sc = arg1;
7104 	struct sbuf *sb;
7105 	int rc;
7106 	struct tp_usm_stats stats;
7107 
7108 	rc = sysctl_wire_old_buffer(req, 0);
7109 	if (rc != 0)
7110 		return(rc);
7111 
7112 	sb = sbuf_new_for_sysctl(NULL, NULL, 256, req);
7113 	if (sb == NULL)
7114 		return (ENOMEM);
7115 
7116 	t4_get_usm_stats(sc, &stats, 1);
7117 
7118 	sbuf_printf(sb, "Frames: %u\n", stats.frames);
7119 	sbuf_printf(sb, "Octets: %ju\n", stats.octets);
7120 	sbuf_printf(sb, "Drops:  %u", stats.drops);
7121 
7122 	rc = sbuf_finish(sb);
7123 	sbuf_delete(sb);
7124 
7125 	return (rc);
7126 }
7127 
7128 static const char * const devlog_level_strings[] = {
7129 	[FW_DEVLOG_LEVEL_EMERG]		= "EMERG",
7130 	[FW_DEVLOG_LEVEL_CRIT]		= "CRIT",
7131 	[FW_DEVLOG_LEVEL_ERR]		= "ERR",
7132 	[FW_DEVLOG_LEVEL_NOTICE]	= "NOTICE",
7133 	[FW_DEVLOG_LEVEL_INFO]		= "INFO",
7134 	[FW_DEVLOG_LEVEL_DEBUG]		= "DEBUG"
7135 };
7136 
7137 static const char * const devlog_facility_strings[] = {
7138 	[FW_DEVLOG_FACILITY_CORE]	= "CORE",
7139 	[FW_DEVLOG_FACILITY_CF]		= "CF",
7140 	[FW_DEVLOG_FACILITY_SCHED]	= "SCHED",
7141 	[FW_DEVLOG_FACILITY_TIMER]	= "TIMER",
7142 	[FW_DEVLOG_FACILITY_RES]	= "RES",
7143 	[FW_DEVLOG_FACILITY_HW]		= "HW",
7144 	[FW_DEVLOG_FACILITY_FLR]	= "FLR",
7145 	[FW_DEVLOG_FACILITY_DMAQ]	= "DMAQ",
7146 	[FW_DEVLOG_FACILITY_PHY]	= "PHY",
7147 	[FW_DEVLOG_FACILITY_MAC]	= "MAC",
7148 	[FW_DEVLOG_FACILITY_PORT]	= "PORT",
7149 	[FW_DEVLOG_FACILITY_VI]		= "VI",
7150 	[FW_DEVLOG_FACILITY_FILTER]	= "FILTER",
7151 	[FW_DEVLOG_FACILITY_ACL]	= "ACL",
7152 	[FW_DEVLOG_FACILITY_TM]		= "TM",
7153 	[FW_DEVLOG_FACILITY_QFC]	= "QFC",
7154 	[FW_DEVLOG_FACILITY_DCB]	= "DCB",
7155 	[FW_DEVLOG_FACILITY_ETH]	= "ETH",
7156 	[FW_DEVLOG_FACILITY_OFLD]	= "OFLD",
7157 	[FW_DEVLOG_FACILITY_RI]		= "RI",
7158 	[FW_DEVLOG_FACILITY_ISCSI]	= "ISCSI",
7159 	[FW_DEVLOG_FACILITY_FCOE]	= "FCOE",
7160 	[FW_DEVLOG_FACILITY_FOISCSI]	= "FOISCSI",
7161 	[FW_DEVLOG_FACILITY_FOFCOE]	= "FOFCOE",
7162 	[FW_DEVLOG_FACILITY_CHNET]	= "CHNET",
7163 };
7164 
7165 static int
7166 sysctl_devlog(SYSCTL_HANDLER_ARGS)
7167 {
7168 	struct adapter *sc = arg1;
7169 	struct devlog_params *dparams = &sc->params.devlog;
7170 	struct fw_devlog_e *buf, *e;
7171 	int i, j, rc, nentries, first = 0;
7172 	struct sbuf *sb;
7173 	uint64_t ftstamp = UINT64_MAX;
7174 
7175 	if (dparams->addr == 0)
7176 		return (ENXIO);
7177 
7178 	buf = malloc(dparams->size, M_CXGBE, M_NOWAIT);
7179 	if (buf == NULL)
7180 		return (ENOMEM);
7181 
7182 	rc = read_via_memwin(sc, 1, dparams->addr, (void *)buf, dparams->size);
7183 	if (rc != 0)
7184 		goto done;
7185 
7186 	nentries = dparams->size / sizeof(struct fw_devlog_e);
7187 	for (i = 0; i < nentries; i++) {
7188 		e = &buf[i];
7189 
7190 		if (e->timestamp == 0)
7191 			break;	/* end */
7192 
7193 		e->timestamp = be64toh(e->timestamp);
7194 		e->seqno = be32toh(e->seqno);
7195 		for (j = 0; j < 8; j++)
7196 			e->params[j] = be32toh(e->params[j]);
7197 
7198 		if (e->timestamp < ftstamp) {
7199 			ftstamp = e->timestamp;
7200 			first = i;
7201 		}
7202 	}
7203 
7204 	if (buf[first].timestamp == 0)
7205 		goto done;	/* nothing in the log */
7206 
7207 	rc = sysctl_wire_old_buffer(req, 0);
7208 	if (rc != 0)
7209 		goto done;
7210 
7211 	sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req);
7212 	if (sb == NULL) {
7213 		rc = ENOMEM;
7214 		goto done;
7215 	}
7216 	sbuf_printf(sb, "%10s  %15s  %8s  %8s  %s\n",
7217 	    "Seq#", "Tstamp", "Level", "Facility", "Message");
7218 
7219 	i = first;
7220 	do {
7221 		e = &buf[i];
7222 		if (e->timestamp == 0)
7223 			break;	/* end */
7224 
7225 		sbuf_printf(sb, "%10d  %15ju  %8s  %8s  ",
7226 		    e->seqno, e->timestamp,
7227 		    (e->level < nitems(devlog_level_strings) ?
7228 			devlog_level_strings[e->level] : "UNKNOWN"),
7229 		    (e->facility < nitems(devlog_facility_strings) ?
7230 			devlog_facility_strings[e->facility] : "UNKNOWN"));
7231 		sbuf_printf(sb, e->fmt, e->params[0], e->params[1],
7232 		    e->params[2], e->params[3], e->params[4],
7233 		    e->params[5], e->params[6], e->params[7]);
7234 
7235 		if (++i == nentries)
7236 			i = 0;
7237 	} while (i != first);
7238 
7239 	rc = sbuf_finish(sb);
7240 	sbuf_delete(sb);
7241 done:
7242 	free(buf, M_CXGBE);
7243 	return (rc);
7244 }
7245 
7246 static int
7247 sysctl_fcoe_stats(SYSCTL_HANDLER_ARGS)
7248 {
7249 	struct adapter *sc = arg1;
7250 	struct sbuf *sb;
7251 	int rc;
7252 	struct tp_fcoe_stats stats[MAX_NCHAN];
7253 	int i, nchan = sc->chip_params->nchan;
7254 
7255 	rc = sysctl_wire_old_buffer(req, 0);
7256 	if (rc != 0)
7257 		return (rc);
7258 
7259 	sb = sbuf_new_for_sysctl(NULL, NULL, 256, req);
7260 	if (sb == NULL)
7261 		return (ENOMEM);
7262 
7263 	for (i = 0; i < nchan; i++)
7264 		t4_get_fcoe_stats(sc, i, &stats[i], 1);
7265 
7266 	if (nchan > 2) {
7267 		sbuf_printf(sb, "                   channel 0        channel 1"
7268 		    "        channel 2        channel 3");
7269 		sbuf_printf(sb, "\noctetsDDP:  %16ju %16ju %16ju %16ju",
7270 		    stats[0].octets_ddp, stats[1].octets_ddp,
7271 		    stats[2].octets_ddp, stats[3].octets_ddp);
7272 		sbuf_printf(sb, "\nframesDDP:  %16u %16u %16u %16u",
7273 		    stats[0].frames_ddp, stats[1].frames_ddp,
7274 		    stats[2].frames_ddp, stats[3].frames_ddp);
7275 		sbuf_printf(sb, "\nframesDrop: %16u %16u %16u %16u",
7276 		    stats[0].frames_drop, stats[1].frames_drop,
7277 		    stats[2].frames_drop, stats[3].frames_drop);
7278 	} else {
7279 		sbuf_printf(sb, "                   channel 0        channel 1");
7280 		sbuf_printf(sb, "\noctetsDDP:  %16ju %16ju",
7281 		    stats[0].octets_ddp, stats[1].octets_ddp);
7282 		sbuf_printf(sb, "\nframesDDP:  %16u %16u",
7283 		    stats[0].frames_ddp, stats[1].frames_ddp);
7284 		sbuf_printf(sb, "\nframesDrop: %16u %16u",
7285 		    stats[0].frames_drop, stats[1].frames_drop);
7286 	}
7287 
7288 	rc = sbuf_finish(sb);
7289 	sbuf_delete(sb);
7290 
7291 	return (rc);
7292 }
7293 
7294 static int
7295 sysctl_hw_sched(SYSCTL_HANDLER_ARGS)
7296 {
7297 	struct adapter *sc = arg1;
7298 	struct sbuf *sb;
7299 	int rc, i;
7300 	unsigned int map, kbps, ipg, mode;
7301 	unsigned int pace_tab[NTX_SCHED];
7302 
7303 	rc = sysctl_wire_old_buffer(req, 0);
7304 	if (rc != 0)
7305 		return (rc);
7306 
7307 	sb = sbuf_new_for_sysctl(NULL, NULL, 256, req);
7308 	if (sb == NULL)
7309 		return (ENOMEM);
7310 
7311 	map = t4_read_reg(sc, A_TP_TX_MOD_QUEUE_REQ_MAP);
7312 	mode = G_TIMERMODE(t4_read_reg(sc, A_TP_MOD_CONFIG));
7313 	t4_read_pace_tbl(sc, pace_tab);
7314 
7315 	sbuf_printf(sb, "Scheduler  Mode   Channel  Rate (Kbps)   "
7316 	    "Class IPG (0.1 ns)   Flow IPG (us)");
7317 
7318 	for (i = 0; i < NTX_SCHED; ++i, map >>= 2) {
7319 		t4_get_tx_sched(sc, i, &kbps, &ipg, 1);
7320 		sbuf_printf(sb, "\n    %u      %-5s     %u     ", i,
7321 		    (mode & (1 << i)) ? "flow" : "class", map & 3);
7322 		if (kbps)
7323 			sbuf_printf(sb, "%9u     ", kbps);
7324 		else
7325 			sbuf_printf(sb, " disabled     ");
7326 
7327 		if (ipg)
7328 			sbuf_printf(sb, "%13u        ", ipg);
7329 		else
7330 			sbuf_printf(sb, "     disabled        ");
7331 
7332 		if (pace_tab[i])
7333 			sbuf_printf(sb, "%10u", pace_tab[i]);
7334 		else
7335 			sbuf_printf(sb, "  disabled");
7336 	}
7337 
7338 	rc = sbuf_finish(sb);
7339 	sbuf_delete(sb);
7340 
7341 	return (rc);
7342 }
7343 
7344 static int
7345 sysctl_lb_stats(SYSCTL_HANDLER_ARGS)
7346 {
7347 	struct adapter *sc = arg1;
7348 	struct sbuf *sb;
7349 	int rc, i, j;
7350 	uint64_t *p0, *p1;
7351 	struct lb_port_stats s[2];
7352 	static const char *stat_name[] = {
7353 		"OctetsOK:", "FramesOK:", "BcastFrames:", "McastFrames:",
7354 		"UcastFrames:", "ErrorFrames:", "Frames64:", "Frames65To127:",
7355 		"Frames128To255:", "Frames256To511:", "Frames512To1023:",
7356 		"Frames1024To1518:", "Frames1519ToMax:", "FramesDropped:",
7357 		"BG0FramesDropped:", "BG1FramesDropped:", "BG2FramesDropped:",
7358 		"BG3FramesDropped:", "BG0FramesTrunc:", "BG1FramesTrunc:",
7359 		"BG2FramesTrunc:", "BG3FramesTrunc:"
7360 	};
7361 
7362 	rc = sysctl_wire_old_buffer(req, 0);
7363 	if (rc != 0)
7364 		return (rc);
7365 
7366 	sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req);
7367 	if (sb == NULL)
7368 		return (ENOMEM);
7369 
7370 	memset(s, 0, sizeof(s));
7371 
7372 	for (i = 0; i < sc->chip_params->nchan; i += 2) {
7373 		t4_get_lb_stats(sc, i, &s[0]);
7374 		t4_get_lb_stats(sc, i + 1, &s[1]);
7375 
7376 		p0 = &s[0].octets;
7377 		p1 = &s[1].octets;
7378 		sbuf_printf(sb, "%s                       Loopback %u"
7379 		    "           Loopback %u", i == 0 ? "" : "\n", i, i + 1);
7380 
7381 		for (j = 0; j < nitems(stat_name); j++)
7382 			sbuf_printf(sb, "\n%-17s %20ju %20ju", stat_name[j],
7383 				   *p0++, *p1++);
7384 	}
7385 
7386 	rc = sbuf_finish(sb);
7387 	sbuf_delete(sb);
7388 
7389 	return (rc);
7390 }
7391 
7392 static int
7393 sysctl_linkdnrc(SYSCTL_HANDLER_ARGS)
7394 {
7395 	int rc = 0;
7396 	struct port_info *pi = arg1;
7397 	struct link_config *lc = &pi->link_cfg;
7398 	struct sbuf *sb;
7399 
7400 	rc = sysctl_wire_old_buffer(req, 0);
7401 	if (rc != 0)
7402 		return(rc);
7403 	sb = sbuf_new_for_sysctl(NULL, NULL, 64, req);
7404 	if (sb == NULL)
7405 		return (ENOMEM);
7406 
7407 	if (lc->link_ok || lc->link_down_rc == 255)
7408 		sbuf_printf(sb, "n/a");
7409 	else
7410 		sbuf_printf(sb, "%s", t4_link_down_rc_str(lc->link_down_rc));
7411 
7412 	rc = sbuf_finish(sb);
7413 	sbuf_delete(sb);
7414 
7415 	return (rc);
7416 }
7417 
7418 struct mem_desc {
7419 	unsigned int base;
7420 	unsigned int limit;
7421 	unsigned int idx;
7422 };
7423 
7424 static int
7425 mem_desc_cmp(const void *a, const void *b)
7426 {
7427 	return ((const struct mem_desc *)a)->base -
7428 	       ((const struct mem_desc *)b)->base;
7429 }
7430 
7431 static void
7432 mem_region_show(struct sbuf *sb, const char *name, unsigned int from,
7433     unsigned int to)
7434 {
7435 	unsigned int size;
7436 
7437 	if (from == to)
7438 		return;
7439 
7440 	size = to - from + 1;
7441 	if (size == 0)
7442 		return;
7443 
7444 	/* XXX: need humanize_number(3) in libkern for a more readable 'size' */
7445 	sbuf_printf(sb, "%-15s %#x-%#x [%u]\n", name, from, to, size);
7446 }
7447 
7448 static int
7449 sysctl_meminfo(SYSCTL_HANDLER_ARGS)
7450 {
7451 	struct adapter *sc = arg1;
7452 	struct sbuf *sb;
7453 	int rc, i, n;
7454 	uint32_t lo, hi, used, alloc;
7455 	static const char *memory[] = {"EDC0:", "EDC1:", "MC:", "MC0:", "MC1:"};
7456 	static const char *region[] = {
7457 		"DBQ contexts:", "IMSG contexts:", "FLM cache:", "TCBs:",
7458 		"Pstructs:", "Timers:", "Rx FL:", "Tx FL:", "Pstruct FL:",
7459 		"Tx payload:", "Rx payload:", "LE hash:", "iSCSI region:",
7460 		"TDDP region:", "TPT region:", "STAG region:", "RQ region:",
7461 		"RQUDP region:", "PBL region:", "TXPBL region:",
7462 		"DBVFIFO region:", "ULPRX state:", "ULPTX state:",
7463 		"On-chip queues:", "TLS keys:",
7464 	};
7465 	struct mem_desc avail[4];
7466 	struct mem_desc mem[nitems(region) + 3];	/* up to 3 holes */
7467 	struct mem_desc *md = mem;
7468 
7469 	rc = sysctl_wire_old_buffer(req, 0);
7470 	if (rc != 0)
7471 		return (rc);
7472 
7473 	sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req);
7474 	if (sb == NULL)
7475 		return (ENOMEM);
7476 
7477 	for (i = 0; i < nitems(mem); i++) {
7478 		mem[i].limit = 0;
7479 		mem[i].idx = i;
7480 	}
7481 
7482 	/* Find and sort the populated memory ranges */
7483 	i = 0;
7484 	lo = t4_read_reg(sc, A_MA_TARGET_MEM_ENABLE);
7485 	if (lo & F_EDRAM0_ENABLE) {
7486 		hi = t4_read_reg(sc, A_MA_EDRAM0_BAR);
7487 		avail[i].base = G_EDRAM0_BASE(hi) << 20;
7488 		avail[i].limit = avail[i].base + (G_EDRAM0_SIZE(hi) << 20);
7489 		avail[i].idx = 0;
7490 		i++;
7491 	}
7492 	if (lo & F_EDRAM1_ENABLE) {
7493 		hi = t4_read_reg(sc, A_MA_EDRAM1_BAR);
7494 		avail[i].base = G_EDRAM1_BASE(hi) << 20;
7495 		avail[i].limit = avail[i].base + (G_EDRAM1_SIZE(hi) << 20);
7496 		avail[i].idx = 1;
7497 		i++;
7498 	}
7499 	if (lo & F_EXT_MEM_ENABLE) {
7500 		hi = t4_read_reg(sc, A_MA_EXT_MEMORY_BAR);
7501 		avail[i].base = G_EXT_MEM_BASE(hi) << 20;
7502 		avail[i].limit = avail[i].base +
7503 		    (G_EXT_MEM_SIZE(hi) << 20);
7504 		avail[i].idx = is_t5(sc) ? 3 : 2;	/* Call it MC0 for T5 */
7505 		i++;
7506 	}
7507 	if (is_t5(sc) && lo & F_EXT_MEM1_ENABLE) {
7508 		hi = t4_read_reg(sc, A_MA_EXT_MEMORY1_BAR);
7509 		avail[i].base = G_EXT_MEM1_BASE(hi) << 20;
7510 		avail[i].limit = avail[i].base +
7511 		    (G_EXT_MEM1_SIZE(hi) << 20);
7512 		avail[i].idx = 4;
7513 		i++;
7514 	}
7515 	if (!i)                                    /* no memory available */
7516 		return 0;
7517 	qsort(avail, i, sizeof(struct mem_desc), mem_desc_cmp);
7518 
7519 	(md++)->base = t4_read_reg(sc, A_SGE_DBQ_CTXT_BADDR);
7520 	(md++)->base = t4_read_reg(sc, A_SGE_IMSG_CTXT_BADDR);
7521 	(md++)->base = t4_read_reg(sc, A_SGE_FLM_CACHE_BADDR);
7522 	(md++)->base = t4_read_reg(sc, A_TP_CMM_TCB_BASE);
7523 	(md++)->base = t4_read_reg(sc, A_TP_CMM_MM_BASE);
7524 	(md++)->base = t4_read_reg(sc, A_TP_CMM_TIMER_BASE);
7525 	(md++)->base = t4_read_reg(sc, A_TP_CMM_MM_RX_FLST_BASE);
7526 	(md++)->base = t4_read_reg(sc, A_TP_CMM_MM_TX_FLST_BASE);
7527 	(md++)->base = t4_read_reg(sc, A_TP_CMM_MM_PS_FLST_BASE);
7528 
7529 	/* the next few have explicit upper bounds */
7530 	md->base = t4_read_reg(sc, A_TP_PMM_TX_BASE);
7531 	md->limit = md->base - 1 +
7532 		    t4_read_reg(sc, A_TP_PMM_TX_PAGE_SIZE) *
7533 		    G_PMTXMAXPAGE(t4_read_reg(sc, A_TP_PMM_TX_MAX_PAGE));
7534 	md++;
7535 
7536 	md->base = t4_read_reg(sc, A_TP_PMM_RX_BASE);
7537 	md->limit = md->base - 1 +
7538 		    t4_read_reg(sc, A_TP_PMM_RX_PAGE_SIZE) *
7539 		    G_PMRXMAXPAGE(t4_read_reg(sc, A_TP_PMM_RX_MAX_PAGE));
7540 	md++;
7541 
7542 	if (t4_read_reg(sc, A_LE_DB_CONFIG) & F_HASHEN) {
7543 		if (chip_id(sc) <= CHELSIO_T5)
7544 			md->base = t4_read_reg(sc, A_LE_DB_HASH_TID_BASE);
7545 		else
7546 			md->base = t4_read_reg(sc, A_LE_DB_HASH_TBL_BASE_ADDR);
7547 		md->limit = 0;
7548 	} else {
7549 		md->base = 0;
7550 		md->idx = nitems(region);  /* hide it */
7551 	}
7552 	md++;
7553 
7554 #define ulp_region(reg) \
7555 	md->base = t4_read_reg(sc, A_ULP_ ## reg ## _LLIMIT);\
7556 	(md++)->limit = t4_read_reg(sc, A_ULP_ ## reg ## _ULIMIT)
7557 
7558 	ulp_region(RX_ISCSI);
7559 	ulp_region(RX_TDDP);
7560 	ulp_region(TX_TPT);
7561 	ulp_region(RX_STAG);
7562 	ulp_region(RX_RQ);
7563 	ulp_region(RX_RQUDP);
7564 	ulp_region(RX_PBL);
7565 	ulp_region(TX_PBL);
7566 #undef ulp_region
7567 
7568 	md->base = 0;
7569 	md->idx = nitems(region);
7570 	if (!is_t4(sc)) {
7571 		uint32_t size = 0;
7572 		uint32_t sge_ctrl = t4_read_reg(sc, A_SGE_CONTROL2);
7573 		uint32_t fifo_size = t4_read_reg(sc, A_SGE_DBVFIFO_SIZE);
7574 
7575 		if (is_t5(sc)) {
7576 			if (sge_ctrl & F_VFIFO_ENABLE)
7577 				size = G_DBVFIFO_SIZE(fifo_size);
7578 		} else
7579 			size = G_T6_DBVFIFO_SIZE(fifo_size);
7580 
7581 		if (size) {
7582 			md->base = G_BASEADDR(t4_read_reg(sc,
7583 			    A_SGE_DBVFIFO_BADDR));
7584 			md->limit = md->base + (size << 2) - 1;
7585 		}
7586 	}
7587 	md++;
7588 
7589 	md->base = t4_read_reg(sc, A_ULP_RX_CTX_BASE);
7590 	md->limit = 0;
7591 	md++;
7592 	md->base = t4_read_reg(sc, A_ULP_TX_ERR_TABLE_BASE);
7593 	md->limit = 0;
7594 	md++;
7595 
7596 	md->base = sc->vres.ocq.start;
7597 	if (sc->vres.ocq.size)
7598 		md->limit = md->base + sc->vres.ocq.size - 1;
7599 	else
7600 		md->idx = nitems(region);  /* hide it */
7601 	md++;
7602 
7603 	md->base = sc->vres.key.start;
7604 	if (sc->vres.key.size)
7605 		md->limit = md->base + sc->vres.key.size - 1;
7606 	else
7607 		md->idx = nitems(region);  /* hide it */
7608 	md++;
7609 
7610 	/* add any address-space holes, there can be up to 3 */
7611 	for (n = 0; n < i - 1; n++)
7612 		if (avail[n].limit < avail[n + 1].base)
7613 			(md++)->base = avail[n].limit;
7614 	if (avail[n].limit)
7615 		(md++)->base = avail[n].limit;
7616 
7617 	n = md - mem;
7618 	qsort(mem, n, sizeof(struct mem_desc), mem_desc_cmp);
7619 
7620 	for (lo = 0; lo < i; lo++)
7621 		mem_region_show(sb, memory[avail[lo].idx], avail[lo].base,
7622 				avail[lo].limit - 1);
7623 
7624 	sbuf_printf(sb, "\n");
7625 	for (i = 0; i < n; i++) {
7626 		if (mem[i].idx >= nitems(region))
7627 			continue;                        /* skip holes */
7628 		if (!mem[i].limit)
7629 			mem[i].limit = i < n - 1 ? mem[i + 1].base - 1 : ~0;
7630 		mem_region_show(sb, region[mem[i].idx], mem[i].base,
7631 				mem[i].limit);
7632 	}
7633 
7634 	sbuf_printf(sb, "\n");
7635 	lo = t4_read_reg(sc, A_CIM_SDRAM_BASE_ADDR);
7636 	hi = t4_read_reg(sc, A_CIM_SDRAM_ADDR_SIZE) + lo - 1;
7637 	mem_region_show(sb, "uP RAM:", lo, hi);
7638 
7639 	lo = t4_read_reg(sc, A_CIM_EXTMEM2_BASE_ADDR);
7640 	hi = t4_read_reg(sc, A_CIM_EXTMEM2_ADDR_SIZE) + lo - 1;
7641 	mem_region_show(sb, "uP Extmem2:", lo, hi);
7642 
7643 	lo = t4_read_reg(sc, A_TP_PMM_RX_MAX_PAGE);
7644 	sbuf_printf(sb, "\n%u Rx pages of size %uKiB for %u channels\n",
7645 		   G_PMRXMAXPAGE(lo),
7646 		   t4_read_reg(sc, A_TP_PMM_RX_PAGE_SIZE) >> 10,
7647 		   (lo & F_PMRXNUMCHN) ? 2 : 1);
7648 
7649 	lo = t4_read_reg(sc, A_TP_PMM_TX_MAX_PAGE);
7650 	hi = t4_read_reg(sc, A_TP_PMM_TX_PAGE_SIZE);
7651 	sbuf_printf(sb, "%u Tx pages of size %u%ciB for %u channels\n",
7652 		   G_PMTXMAXPAGE(lo),
7653 		   hi >= (1 << 20) ? (hi >> 20) : (hi >> 10),
7654 		   hi >= (1 << 20) ? 'M' : 'K', 1 << G_PMTXNUMCHN(lo));
7655 	sbuf_printf(sb, "%u p-structs\n",
7656 		   t4_read_reg(sc, A_TP_CMM_MM_MAX_PSTRUCT));
7657 
7658 	for (i = 0; i < 4; i++) {
7659 		if (chip_id(sc) > CHELSIO_T5)
7660 			lo = t4_read_reg(sc, A_MPS_RX_MAC_BG_PG_CNT0 + i * 4);
7661 		else
7662 			lo = t4_read_reg(sc, A_MPS_RX_PG_RSV0 + i * 4);
7663 		if (is_t5(sc)) {
7664 			used = G_T5_USED(lo);
7665 			alloc = G_T5_ALLOC(lo);
7666 		} else {
7667 			used = G_USED(lo);
7668 			alloc = G_ALLOC(lo);
7669 		}
7670 		/* For T6 these are MAC buffer groups */
7671 		sbuf_printf(sb, "\nPort %d using %u pages out of %u allocated",
7672 		    i, used, alloc);
7673 	}
7674 	for (i = 0; i < sc->chip_params->nchan; i++) {
7675 		if (chip_id(sc) > CHELSIO_T5)
7676 			lo = t4_read_reg(sc, A_MPS_RX_LPBK_BG_PG_CNT0 + i * 4);
7677 		else
7678 			lo = t4_read_reg(sc, A_MPS_RX_PG_RSV4 + i * 4);
7679 		if (is_t5(sc)) {
7680 			used = G_T5_USED(lo);
7681 			alloc = G_T5_ALLOC(lo);
7682 		} else {
7683 			used = G_USED(lo);
7684 			alloc = G_ALLOC(lo);
7685 		}
7686 		/* For T6 these are MAC buffer groups */
7687 		sbuf_printf(sb,
7688 		    "\nLoopback %d using %u pages out of %u allocated",
7689 		    i, used, alloc);
7690 	}
7691 
7692 	rc = sbuf_finish(sb);
7693 	sbuf_delete(sb);
7694 
7695 	return (rc);
7696 }
7697 
7698 static inline void
7699 tcamxy2valmask(uint64_t x, uint64_t y, uint8_t *addr, uint64_t *mask)
7700 {
7701 	*mask = x | y;
7702 	y = htobe64(y);
7703 	memcpy(addr, (char *)&y + 2, ETHER_ADDR_LEN);
7704 }
7705 
7706 static int
7707 sysctl_mps_tcam(SYSCTL_HANDLER_ARGS)
7708 {
7709 	struct adapter *sc = arg1;
7710 	struct sbuf *sb;
7711 	int rc, i;
7712 
7713 	MPASS(chip_id(sc) <= CHELSIO_T5);
7714 
7715 	rc = sysctl_wire_old_buffer(req, 0);
7716 	if (rc != 0)
7717 		return (rc);
7718 
7719 	sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req);
7720 	if (sb == NULL)
7721 		return (ENOMEM);
7722 
7723 	sbuf_printf(sb,
7724 	    "Idx  Ethernet address     Mask     Vld Ports PF"
7725 	    "  VF              Replication             P0 P1 P2 P3  ML");
7726 	for (i = 0; i < sc->chip_params->mps_tcam_size; i++) {
7727 		uint64_t tcamx, tcamy, mask;
7728 		uint32_t cls_lo, cls_hi;
7729 		uint8_t addr[ETHER_ADDR_LEN];
7730 
7731 		tcamy = t4_read_reg64(sc, MPS_CLS_TCAM_Y_L(i));
7732 		tcamx = t4_read_reg64(sc, MPS_CLS_TCAM_X_L(i));
7733 		if (tcamx & tcamy)
7734 			continue;
7735 		tcamxy2valmask(tcamx, tcamy, addr, &mask);
7736 		cls_lo = t4_read_reg(sc, MPS_CLS_SRAM_L(i));
7737 		cls_hi = t4_read_reg(sc, MPS_CLS_SRAM_H(i));
7738 		sbuf_printf(sb, "\n%3u %02x:%02x:%02x:%02x:%02x:%02x %012jx"
7739 			   "  %c   %#x%4u%4d", i, addr[0], addr[1], addr[2],
7740 			   addr[3], addr[4], addr[5], (uintmax_t)mask,
7741 			   (cls_lo & F_SRAM_VLD) ? 'Y' : 'N',
7742 			   G_PORTMAP(cls_hi), G_PF(cls_lo),
7743 			   (cls_lo & F_VF_VALID) ? G_VF(cls_lo) : -1);
7744 
7745 		if (cls_lo & F_REPLICATE) {
7746 			struct fw_ldst_cmd ldst_cmd;
7747 
7748 			memset(&ldst_cmd, 0, sizeof(ldst_cmd));
7749 			ldst_cmd.op_to_addrspace =
7750 			    htobe32(V_FW_CMD_OP(FW_LDST_CMD) |
7751 				F_FW_CMD_REQUEST | F_FW_CMD_READ |
7752 				V_FW_LDST_CMD_ADDRSPACE(FW_LDST_ADDRSPC_MPS));
7753 			ldst_cmd.cycles_to_len16 = htobe32(FW_LEN16(ldst_cmd));
7754 			ldst_cmd.u.mps.rplc.fid_idx =
7755 			    htobe16(V_FW_LDST_CMD_FID(FW_LDST_MPS_RPLC) |
7756 				V_FW_LDST_CMD_IDX(i));
7757 
7758 			rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK,
7759 			    "t4mps");
7760 			if (rc)
7761 				break;
7762 			rc = -t4_wr_mbox(sc, sc->mbox, &ldst_cmd,
7763 			    sizeof(ldst_cmd), &ldst_cmd);
7764 			end_synchronized_op(sc, 0);
7765 
7766 			if (rc != 0) {
7767 				sbuf_printf(sb, "%36d", rc);
7768 				rc = 0;
7769 			} else {
7770 				sbuf_printf(sb, " %08x %08x %08x %08x",
7771 				    be32toh(ldst_cmd.u.mps.rplc.rplc127_96),
7772 				    be32toh(ldst_cmd.u.mps.rplc.rplc95_64),
7773 				    be32toh(ldst_cmd.u.mps.rplc.rplc63_32),
7774 				    be32toh(ldst_cmd.u.mps.rplc.rplc31_0));
7775 			}
7776 		} else
7777 			sbuf_printf(sb, "%36s", "");
7778 
7779 		sbuf_printf(sb, "%4u%3u%3u%3u %#3x", G_SRAM_PRIO0(cls_lo),
7780 		    G_SRAM_PRIO1(cls_lo), G_SRAM_PRIO2(cls_lo),
7781 		    G_SRAM_PRIO3(cls_lo), (cls_lo >> S_MULTILISTEN0) & 0xf);
7782 	}
7783 
7784 	if (rc)
7785 		(void) sbuf_finish(sb);
7786 	else
7787 		rc = sbuf_finish(sb);
7788 	sbuf_delete(sb);
7789 
7790 	return (rc);
7791 }
7792 
7793 static int
7794 sysctl_mps_tcam_t6(SYSCTL_HANDLER_ARGS)
7795 {
7796 	struct adapter *sc = arg1;
7797 	struct sbuf *sb;
7798 	int rc, i;
7799 
7800 	MPASS(chip_id(sc) > CHELSIO_T5);
7801 
7802 	rc = sysctl_wire_old_buffer(req, 0);
7803 	if (rc != 0)
7804 		return (rc);
7805 
7806 	sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req);
7807 	if (sb == NULL)
7808 		return (ENOMEM);
7809 
7810 	sbuf_printf(sb, "Idx  Ethernet address     Mask       VNI   Mask"
7811 	    "   IVLAN Vld DIP_Hit   Lookup  Port Vld Ports PF  VF"
7812 	    "                           Replication"
7813 	    "                                    P0 P1 P2 P3  ML\n");
7814 
7815 	for (i = 0; i < sc->chip_params->mps_tcam_size; i++) {
7816 		uint8_t dip_hit, vlan_vld, lookup_type, port_num;
7817 		uint16_t ivlan;
7818 		uint64_t tcamx, tcamy, val, mask;
7819 		uint32_t cls_lo, cls_hi, ctl, data2, vnix, vniy;
7820 		uint8_t addr[ETHER_ADDR_LEN];
7821 
7822 		ctl = V_CTLREQID(1) | V_CTLCMDTYPE(0) | V_CTLXYBITSEL(0);
7823 		if (i < 256)
7824 			ctl |= V_CTLTCAMINDEX(i) | V_CTLTCAMSEL(0);
7825 		else
7826 			ctl |= V_CTLTCAMINDEX(i - 256) | V_CTLTCAMSEL(1);
7827 		t4_write_reg(sc, A_MPS_CLS_TCAM_DATA2_CTL, ctl);
7828 		val = t4_read_reg(sc, A_MPS_CLS_TCAM_RDATA1_REQ_ID1);
7829 		tcamy = G_DMACH(val) << 32;
7830 		tcamy |= t4_read_reg(sc, A_MPS_CLS_TCAM_RDATA0_REQ_ID1);
7831 		data2 = t4_read_reg(sc, A_MPS_CLS_TCAM_RDATA2_REQ_ID1);
7832 		lookup_type = G_DATALKPTYPE(data2);
7833 		port_num = G_DATAPORTNUM(data2);
7834 		if (lookup_type && lookup_type != M_DATALKPTYPE) {
7835 			/* Inner header VNI */
7836 			vniy = ((data2 & F_DATAVIDH2) << 23) |
7837 				       (G_DATAVIDH1(data2) << 16) | G_VIDL(val);
7838 			dip_hit = data2 & F_DATADIPHIT;
7839 			vlan_vld = 0;
7840 		} else {
7841 			vniy = 0;
7842 			dip_hit = 0;
7843 			vlan_vld = data2 & F_DATAVIDH2;
7844 			ivlan = G_VIDL(val);
7845 		}
7846 
7847 		ctl |= V_CTLXYBITSEL(1);
7848 		t4_write_reg(sc, A_MPS_CLS_TCAM_DATA2_CTL, ctl);
7849 		val = t4_read_reg(sc, A_MPS_CLS_TCAM_RDATA1_REQ_ID1);
7850 		tcamx = G_DMACH(val) << 32;
7851 		tcamx |= t4_read_reg(sc, A_MPS_CLS_TCAM_RDATA0_REQ_ID1);
7852 		data2 = t4_read_reg(sc, A_MPS_CLS_TCAM_RDATA2_REQ_ID1);
7853 		if (lookup_type && lookup_type != M_DATALKPTYPE) {
7854 			/* Inner header VNI mask */
7855 			vnix = ((data2 & F_DATAVIDH2) << 23) |
7856 			       (G_DATAVIDH1(data2) << 16) | G_VIDL(val);
7857 		} else
7858 			vnix = 0;
7859 
7860 		if (tcamx & tcamy)
7861 			continue;
7862 		tcamxy2valmask(tcamx, tcamy, addr, &mask);
7863 
7864 		cls_lo = t4_read_reg(sc, MPS_CLS_SRAM_L(i));
7865 		cls_hi = t4_read_reg(sc, MPS_CLS_SRAM_H(i));
7866 
7867 		if (lookup_type && lookup_type != M_DATALKPTYPE) {
7868 			sbuf_printf(sb, "\n%3u %02x:%02x:%02x:%02x:%02x:%02x "
7869 			    "%012jx %06x %06x    -    -   %3c"
7870 			    "      'I'  %4x   %3c   %#x%4u%4d", i, addr[0],
7871 			    addr[1], addr[2], addr[3], addr[4], addr[5],
7872 			    (uintmax_t)mask, vniy, vnix, dip_hit ? 'Y' : 'N',
7873 			    port_num, cls_lo & F_T6_SRAM_VLD ? 'Y' : 'N',
7874 			    G_PORTMAP(cls_hi), G_T6_PF(cls_lo),
7875 			    cls_lo & F_T6_VF_VALID ? G_T6_VF(cls_lo) : -1);
7876 		} else {
7877 			sbuf_printf(sb, "\n%3u %02x:%02x:%02x:%02x:%02x:%02x "
7878 			    "%012jx    -       -   ", i, addr[0], addr[1],
7879 			    addr[2], addr[3], addr[4], addr[5],
7880 			    (uintmax_t)mask);
7881 
7882 			if (vlan_vld)
7883 				sbuf_printf(sb, "%4u   Y     ", ivlan);
7884 			else
7885 				sbuf_printf(sb, "  -    N     ");
7886 
7887 			sbuf_printf(sb, "-      %3c  %4x   %3c   %#x%4u%4d",
7888 			    lookup_type ? 'I' : 'O', port_num,
7889 			    cls_lo & F_T6_SRAM_VLD ? 'Y' : 'N',
7890 			    G_PORTMAP(cls_hi), G_T6_PF(cls_lo),
7891 			    cls_lo & F_T6_VF_VALID ? G_T6_VF(cls_lo) : -1);
7892 		}
7893 
7894 
7895 		if (cls_lo & F_T6_REPLICATE) {
7896 			struct fw_ldst_cmd ldst_cmd;
7897 
7898 			memset(&ldst_cmd, 0, sizeof(ldst_cmd));
7899 			ldst_cmd.op_to_addrspace =
7900 			    htobe32(V_FW_CMD_OP(FW_LDST_CMD) |
7901 				F_FW_CMD_REQUEST | F_FW_CMD_READ |
7902 				V_FW_LDST_CMD_ADDRSPACE(FW_LDST_ADDRSPC_MPS));
7903 			ldst_cmd.cycles_to_len16 = htobe32(FW_LEN16(ldst_cmd));
7904 			ldst_cmd.u.mps.rplc.fid_idx =
7905 			    htobe16(V_FW_LDST_CMD_FID(FW_LDST_MPS_RPLC) |
7906 				V_FW_LDST_CMD_IDX(i));
7907 
7908 			rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK,
7909 			    "t6mps");
7910 			if (rc)
7911 				break;
7912 			rc = -t4_wr_mbox(sc, sc->mbox, &ldst_cmd,
7913 			    sizeof(ldst_cmd), &ldst_cmd);
7914 			end_synchronized_op(sc, 0);
7915 
7916 			if (rc != 0) {
7917 				sbuf_printf(sb, "%72d", rc);
7918 				rc = 0;
7919 			} else {
7920 				sbuf_printf(sb, " %08x %08x %08x %08x"
7921 				    " %08x %08x %08x %08x",
7922 				    be32toh(ldst_cmd.u.mps.rplc.rplc255_224),
7923 				    be32toh(ldst_cmd.u.mps.rplc.rplc223_192),
7924 				    be32toh(ldst_cmd.u.mps.rplc.rplc191_160),
7925 				    be32toh(ldst_cmd.u.mps.rplc.rplc159_128),
7926 				    be32toh(ldst_cmd.u.mps.rplc.rplc127_96),
7927 				    be32toh(ldst_cmd.u.mps.rplc.rplc95_64),
7928 				    be32toh(ldst_cmd.u.mps.rplc.rplc63_32),
7929 				    be32toh(ldst_cmd.u.mps.rplc.rplc31_0));
7930 			}
7931 		} else
7932 			sbuf_printf(sb, "%72s", "");
7933 
7934 		sbuf_printf(sb, "%4u%3u%3u%3u %#x",
7935 		    G_T6_SRAM_PRIO0(cls_lo), G_T6_SRAM_PRIO1(cls_lo),
7936 		    G_T6_SRAM_PRIO2(cls_lo), G_T6_SRAM_PRIO3(cls_lo),
7937 		    (cls_lo >> S_T6_MULTILISTEN0) & 0xf);
7938 	}
7939 
7940 	if (rc)
7941 		(void) sbuf_finish(sb);
7942 	else
7943 		rc = sbuf_finish(sb);
7944 	sbuf_delete(sb);
7945 
7946 	return (rc);
7947 }
7948 
7949 static int
7950 sysctl_path_mtus(SYSCTL_HANDLER_ARGS)
7951 {
7952 	struct adapter *sc = arg1;
7953 	struct sbuf *sb;
7954 	int rc;
7955 	uint16_t mtus[NMTUS];
7956 
7957 	rc = sysctl_wire_old_buffer(req, 0);
7958 	if (rc != 0)
7959 		return (rc);
7960 
7961 	sb = sbuf_new_for_sysctl(NULL, NULL, 256, req);
7962 	if (sb == NULL)
7963 		return (ENOMEM);
7964 
7965 	t4_read_mtu_tbl(sc, mtus, NULL);
7966 
7967 	sbuf_printf(sb, "%u %u %u %u %u %u %u %u %u %u %u %u %u %u %u %u",
7968 	    mtus[0], mtus[1], mtus[2], mtus[3], mtus[4], mtus[5], mtus[6],
7969 	    mtus[7], mtus[8], mtus[9], mtus[10], mtus[11], mtus[12], mtus[13],
7970 	    mtus[14], mtus[15]);
7971 
7972 	rc = sbuf_finish(sb);
7973 	sbuf_delete(sb);
7974 
7975 	return (rc);
7976 }
7977 
7978 static int
7979 sysctl_pm_stats(SYSCTL_HANDLER_ARGS)
7980 {
7981 	struct adapter *sc = arg1;
7982 	struct sbuf *sb;
7983 	int rc, i;
7984 	uint32_t tx_cnt[MAX_PM_NSTATS], rx_cnt[MAX_PM_NSTATS];
7985 	uint64_t tx_cyc[MAX_PM_NSTATS], rx_cyc[MAX_PM_NSTATS];
7986 	static const char *tx_stats[MAX_PM_NSTATS] = {
7987 		"Read:", "Write bypass:", "Write mem:", "Bypass + mem:",
7988 		"Tx FIFO wait", NULL, "Tx latency"
7989 	};
7990 	static const char *rx_stats[MAX_PM_NSTATS] = {
7991 		"Read:", "Write bypass:", "Write mem:", "Flush:",
7992 		"Rx FIFO wait", NULL, "Rx latency"
7993 	};
7994 
7995 	rc = sysctl_wire_old_buffer(req, 0);
7996 	if (rc != 0)
7997 		return (rc);
7998 
7999 	sb = sbuf_new_for_sysctl(NULL, NULL, 256, req);
8000 	if (sb == NULL)
8001 		return (ENOMEM);
8002 
8003 	t4_pmtx_get_stats(sc, tx_cnt, tx_cyc);
8004 	t4_pmrx_get_stats(sc, rx_cnt, rx_cyc);
8005 
8006 	sbuf_printf(sb, "                Tx pcmds             Tx bytes");
8007 	for (i = 0; i < 4; i++) {
8008 		sbuf_printf(sb, "\n%-13s %10u %20ju", tx_stats[i], tx_cnt[i],
8009 		    tx_cyc[i]);
8010 	}
8011 
8012 	sbuf_printf(sb, "\n                Rx pcmds             Rx bytes");
8013 	for (i = 0; i < 4; i++) {
8014 		sbuf_printf(sb, "\n%-13s %10u %20ju", rx_stats[i], rx_cnt[i],
8015 		    rx_cyc[i]);
8016 	}
8017 
8018 	if (chip_id(sc) > CHELSIO_T5) {
8019 		sbuf_printf(sb,
8020 		    "\n              Total wait      Total occupancy");
8021 		sbuf_printf(sb, "\n%-13s %10u %20ju", tx_stats[i], tx_cnt[i],
8022 		    tx_cyc[i]);
8023 		sbuf_printf(sb, "\n%-13s %10u %20ju", rx_stats[i], rx_cnt[i],
8024 		    rx_cyc[i]);
8025 
8026 		i += 2;
8027 		MPASS(i < nitems(tx_stats));
8028 
8029 		sbuf_printf(sb,
8030 		    "\n                   Reads           Total wait");
8031 		sbuf_printf(sb, "\n%-13s %10u %20ju", tx_stats[i], tx_cnt[i],
8032 		    tx_cyc[i]);
8033 		sbuf_printf(sb, "\n%-13s %10u %20ju", rx_stats[i], rx_cnt[i],
8034 		    rx_cyc[i]);
8035 	}
8036 
8037 	rc = sbuf_finish(sb);
8038 	sbuf_delete(sb);
8039 
8040 	return (rc);
8041 }
8042 
8043 static int
8044 sysctl_rdma_stats(SYSCTL_HANDLER_ARGS)
8045 {
8046 	struct adapter *sc = arg1;
8047 	struct sbuf *sb;
8048 	int rc;
8049 	struct tp_rdma_stats stats;
8050 
8051 	rc = sysctl_wire_old_buffer(req, 0);
8052 	if (rc != 0)
8053 		return (rc);
8054 
8055 	sb = sbuf_new_for_sysctl(NULL, NULL, 256, req);
8056 	if (sb == NULL)
8057 		return (ENOMEM);
8058 
8059 	mtx_lock(&sc->reg_lock);
8060 	t4_tp_get_rdma_stats(sc, &stats, 0);
8061 	mtx_unlock(&sc->reg_lock);
8062 
8063 	sbuf_printf(sb, "NoRQEModDefferals: %u\n", stats.rqe_dfr_mod);
8064 	sbuf_printf(sb, "NoRQEPktDefferals: %u", stats.rqe_dfr_pkt);
8065 
8066 	rc = sbuf_finish(sb);
8067 	sbuf_delete(sb);
8068 
8069 	return (rc);
8070 }
8071 
8072 static int
8073 sysctl_tcp_stats(SYSCTL_HANDLER_ARGS)
8074 {
8075 	struct adapter *sc = arg1;
8076 	struct sbuf *sb;
8077 	int rc;
8078 	struct tp_tcp_stats v4, v6;
8079 
8080 	rc = sysctl_wire_old_buffer(req, 0);
8081 	if (rc != 0)
8082 		return (rc);
8083 
8084 	sb = sbuf_new_for_sysctl(NULL, NULL, 256, req);
8085 	if (sb == NULL)
8086 		return (ENOMEM);
8087 
8088 	mtx_lock(&sc->reg_lock);
8089 	t4_tp_get_tcp_stats(sc, &v4, &v6, 0);
8090 	mtx_unlock(&sc->reg_lock);
8091 
8092 	sbuf_printf(sb,
8093 	    "                                IP                 IPv6\n");
8094 	sbuf_printf(sb, "OutRsts:      %20u %20u\n",
8095 	    v4.tcp_out_rsts, v6.tcp_out_rsts);
8096 	sbuf_printf(sb, "InSegs:       %20ju %20ju\n",
8097 	    v4.tcp_in_segs, v6.tcp_in_segs);
8098 	sbuf_printf(sb, "OutSegs:      %20ju %20ju\n",
8099 	    v4.tcp_out_segs, v6.tcp_out_segs);
8100 	sbuf_printf(sb, "RetransSegs:  %20ju %20ju",
8101 	    v4.tcp_retrans_segs, v6.tcp_retrans_segs);
8102 
8103 	rc = sbuf_finish(sb);
8104 	sbuf_delete(sb);
8105 
8106 	return (rc);
8107 }
8108 
8109 static int
8110 sysctl_tids(SYSCTL_HANDLER_ARGS)
8111 {
8112 	struct adapter *sc = arg1;
8113 	struct sbuf *sb;
8114 	int rc;
8115 	struct tid_info *t = &sc->tids;
8116 
8117 	rc = sysctl_wire_old_buffer(req, 0);
8118 	if (rc != 0)
8119 		return (rc);
8120 
8121 	sb = sbuf_new_for_sysctl(NULL, NULL, 256, req);
8122 	if (sb == NULL)
8123 		return (ENOMEM);
8124 
8125 	if (t->natids) {
8126 		sbuf_printf(sb, "ATID range: 0-%u, in use: %u\n", t->natids - 1,
8127 		    t->atids_in_use);
8128 	}
8129 
8130 	if (t->ntids) {
8131 		sbuf_printf(sb, "TID range: ");
8132 		if (t4_read_reg(sc, A_LE_DB_CONFIG) & F_HASHEN) {
8133 			uint32_t b, hb;
8134 
8135 			if (chip_id(sc) <= CHELSIO_T5) {
8136 				b = t4_read_reg(sc, A_LE_DB_SERVER_INDEX) / 4;
8137 				hb = t4_read_reg(sc, A_LE_DB_TID_HASHBASE) / 4;
8138 			} else {
8139 				b = t4_read_reg(sc, A_LE_DB_SRVR_START_INDEX);
8140 				hb = t4_read_reg(sc, A_T6_LE_DB_HASH_TID_BASE);
8141 			}
8142 
8143 			if (b)
8144 				sbuf_printf(sb, "0-%u, ", b - 1);
8145 			sbuf_printf(sb, "%u-%u", hb, t->ntids - 1);
8146 		} else
8147 			sbuf_printf(sb, "0-%u", t->ntids - 1);
8148 		sbuf_printf(sb, ", in use: %u\n",
8149 		    atomic_load_acq_int(&t->tids_in_use));
8150 	}
8151 
8152 	if (t->nstids) {
8153 		sbuf_printf(sb, "STID range: %u-%u, in use: %u\n", t->stid_base,
8154 		    t->stid_base + t->nstids - 1, t->stids_in_use);
8155 	}
8156 
8157 	if (t->nftids) {
8158 		sbuf_printf(sb, "FTID range: %u-%u\n", t->ftid_base,
8159 		    t->ftid_base + t->nftids - 1);
8160 	}
8161 
8162 	if (t->netids) {
8163 		sbuf_printf(sb, "ETID range: %u-%u, in use: %u\n", t->etid_base,
8164 		    t->etid_base + t->netids - 1, t->etids_in_use);
8165 	}
8166 
8167 	sbuf_printf(sb, "HW TID usage: %u IP users, %u IPv6 users",
8168 	    t4_read_reg(sc, A_LE_DB_ACT_CNT_IPV4),
8169 	    t4_read_reg(sc, A_LE_DB_ACT_CNT_IPV6));
8170 
8171 	rc = sbuf_finish(sb);
8172 	sbuf_delete(sb);
8173 
8174 	return (rc);
8175 }
8176 
8177 static int
8178 sysctl_tp_err_stats(SYSCTL_HANDLER_ARGS)
8179 {
8180 	struct adapter *sc = arg1;
8181 	struct sbuf *sb;
8182 	int rc;
8183 	struct tp_err_stats stats;
8184 
8185 	rc = sysctl_wire_old_buffer(req, 0);
8186 	if (rc != 0)
8187 		return (rc);
8188 
8189 	sb = sbuf_new_for_sysctl(NULL, NULL, 256, req);
8190 	if (sb == NULL)
8191 		return (ENOMEM);
8192 
8193 	mtx_lock(&sc->reg_lock);
8194 	t4_tp_get_err_stats(sc, &stats, 0);
8195 	mtx_unlock(&sc->reg_lock);
8196 
8197 	if (sc->chip_params->nchan > 2) {
8198 		sbuf_printf(sb, "                 channel 0  channel 1"
8199 		    "  channel 2  channel 3\n");
8200 		sbuf_printf(sb, "macInErrs:      %10u %10u %10u %10u\n",
8201 		    stats.mac_in_errs[0], stats.mac_in_errs[1],
8202 		    stats.mac_in_errs[2], stats.mac_in_errs[3]);
8203 		sbuf_printf(sb, "hdrInErrs:      %10u %10u %10u %10u\n",
8204 		    stats.hdr_in_errs[0], stats.hdr_in_errs[1],
8205 		    stats.hdr_in_errs[2], stats.hdr_in_errs[3]);
8206 		sbuf_printf(sb, "tcpInErrs:      %10u %10u %10u %10u\n",
8207 		    stats.tcp_in_errs[0], stats.tcp_in_errs[1],
8208 		    stats.tcp_in_errs[2], stats.tcp_in_errs[3]);
8209 		sbuf_printf(sb, "tcp6InErrs:     %10u %10u %10u %10u\n",
8210 		    stats.tcp6_in_errs[0], stats.tcp6_in_errs[1],
8211 		    stats.tcp6_in_errs[2], stats.tcp6_in_errs[3]);
8212 		sbuf_printf(sb, "tnlCongDrops:   %10u %10u %10u %10u\n",
8213 		    stats.tnl_cong_drops[0], stats.tnl_cong_drops[1],
8214 		    stats.tnl_cong_drops[2], stats.tnl_cong_drops[3]);
8215 		sbuf_printf(sb, "tnlTxDrops:     %10u %10u %10u %10u\n",
8216 		    stats.tnl_tx_drops[0], stats.tnl_tx_drops[1],
8217 		    stats.tnl_tx_drops[2], stats.tnl_tx_drops[3]);
8218 		sbuf_printf(sb, "ofldVlanDrops:  %10u %10u %10u %10u\n",
8219 		    stats.ofld_vlan_drops[0], stats.ofld_vlan_drops[1],
8220 		    stats.ofld_vlan_drops[2], stats.ofld_vlan_drops[3]);
8221 		sbuf_printf(sb, "ofldChanDrops:  %10u %10u %10u %10u\n\n",
8222 		    stats.ofld_chan_drops[0], stats.ofld_chan_drops[1],
8223 		    stats.ofld_chan_drops[2], stats.ofld_chan_drops[3]);
8224 	} else {
8225 		sbuf_printf(sb, "                 channel 0  channel 1\n");
8226 		sbuf_printf(sb, "macInErrs:      %10u %10u\n",
8227 		    stats.mac_in_errs[0], stats.mac_in_errs[1]);
8228 		sbuf_printf(sb, "hdrInErrs:      %10u %10u\n",
8229 		    stats.hdr_in_errs[0], stats.hdr_in_errs[1]);
8230 		sbuf_printf(sb, "tcpInErrs:      %10u %10u\n",
8231 		    stats.tcp_in_errs[0], stats.tcp_in_errs[1]);
8232 		sbuf_printf(sb, "tcp6InErrs:     %10u %10u\n",
8233 		    stats.tcp6_in_errs[0], stats.tcp6_in_errs[1]);
8234 		sbuf_printf(sb, "tnlCongDrops:   %10u %10u\n",
8235 		    stats.tnl_cong_drops[0], stats.tnl_cong_drops[1]);
8236 		sbuf_printf(sb, "tnlTxDrops:     %10u %10u\n",
8237 		    stats.tnl_tx_drops[0], stats.tnl_tx_drops[1]);
8238 		sbuf_printf(sb, "ofldVlanDrops:  %10u %10u\n",
8239 		    stats.ofld_vlan_drops[0], stats.ofld_vlan_drops[1]);
8240 		sbuf_printf(sb, "ofldChanDrops:  %10u %10u\n\n",
8241 		    stats.ofld_chan_drops[0], stats.ofld_chan_drops[1]);
8242 	}
8243 
8244 	sbuf_printf(sb, "ofldNoNeigh:    %u\nofldCongDefer:  %u",
8245 	    stats.ofld_no_neigh, stats.ofld_cong_defer);
8246 
8247 	rc = sbuf_finish(sb);
8248 	sbuf_delete(sb);
8249 
8250 	return (rc);
8251 }
8252 
8253 static int
8254 sysctl_tp_la_mask(SYSCTL_HANDLER_ARGS)
8255 {
8256 	struct adapter *sc = arg1;
8257 	struct tp_params *tpp = &sc->params.tp;
8258 	u_int mask;
8259 	int rc;
8260 
8261 	mask = tpp->la_mask >> 16;
8262 	rc = sysctl_handle_int(oidp, &mask, 0, req);
8263 	if (rc != 0 || req->newptr == NULL)
8264 		return (rc);
8265 	if (mask > 0xffff)
8266 		return (EINVAL);
8267 	tpp->la_mask = mask << 16;
8268 	t4_set_reg_field(sc, A_TP_DBG_LA_CONFIG, 0xffff0000U, tpp->la_mask);
8269 
8270 	return (0);
8271 }
8272 
8273 struct field_desc {
8274 	const char *name;
8275 	u_int start;
8276 	u_int width;
8277 };
8278 
8279 static void
8280 field_desc_show(struct sbuf *sb, uint64_t v, const struct field_desc *f)
8281 {
8282 	char buf[32];
8283 	int line_size = 0;
8284 
8285 	while (f->name) {
8286 		uint64_t mask = (1ULL << f->width) - 1;
8287 		int len = snprintf(buf, sizeof(buf), "%s: %ju", f->name,
8288 		    ((uintmax_t)v >> f->start) & mask);
8289 
8290 		if (line_size + len >= 79) {
8291 			line_size = 8;
8292 			sbuf_printf(sb, "\n        ");
8293 		}
8294 		sbuf_printf(sb, "%s ", buf);
8295 		line_size += len + 1;
8296 		f++;
8297 	}
8298 	sbuf_printf(sb, "\n");
8299 }
8300 
8301 static const struct field_desc tp_la0[] = {
8302 	{ "RcfOpCodeOut", 60, 4 },
8303 	{ "State", 56, 4 },
8304 	{ "WcfState", 52, 4 },
8305 	{ "RcfOpcSrcOut", 50, 2 },
8306 	{ "CRxError", 49, 1 },
8307 	{ "ERxError", 48, 1 },
8308 	{ "SanityFailed", 47, 1 },
8309 	{ "SpuriousMsg", 46, 1 },
8310 	{ "FlushInputMsg", 45, 1 },
8311 	{ "FlushInputCpl", 44, 1 },
8312 	{ "RssUpBit", 43, 1 },
8313 	{ "RssFilterHit", 42, 1 },
8314 	{ "Tid", 32, 10 },
8315 	{ "InitTcb", 31, 1 },
8316 	{ "LineNumber", 24, 7 },
8317 	{ "Emsg", 23, 1 },
8318 	{ "EdataOut", 22, 1 },
8319 	{ "Cmsg", 21, 1 },
8320 	{ "CdataOut", 20, 1 },
8321 	{ "EreadPdu", 19, 1 },
8322 	{ "CreadPdu", 18, 1 },
8323 	{ "TunnelPkt", 17, 1 },
8324 	{ "RcfPeerFin", 16, 1 },
8325 	{ "RcfReasonOut", 12, 4 },
8326 	{ "TxCchannel", 10, 2 },
8327 	{ "RcfTxChannel", 8, 2 },
8328 	{ "RxEchannel", 6, 2 },
8329 	{ "RcfRxChannel", 5, 1 },
8330 	{ "RcfDataOutSrdy", 4, 1 },
8331 	{ "RxDvld", 3, 1 },
8332 	{ "RxOoDvld", 2, 1 },
8333 	{ "RxCongestion", 1, 1 },
8334 	{ "TxCongestion", 0, 1 },
8335 	{ NULL }
8336 };
8337 
8338 static const struct field_desc tp_la1[] = {
8339 	{ "CplCmdIn", 56, 8 },
8340 	{ "CplCmdOut", 48, 8 },
8341 	{ "ESynOut", 47, 1 },
8342 	{ "EAckOut", 46, 1 },
8343 	{ "EFinOut", 45, 1 },
8344 	{ "ERstOut", 44, 1 },
8345 	{ "SynIn", 43, 1 },
8346 	{ "AckIn", 42, 1 },
8347 	{ "FinIn", 41, 1 },
8348 	{ "RstIn", 40, 1 },
8349 	{ "DataIn", 39, 1 },
8350 	{ "DataInVld", 38, 1 },
8351 	{ "PadIn", 37, 1 },
8352 	{ "RxBufEmpty", 36, 1 },
8353 	{ "RxDdp", 35, 1 },
8354 	{ "RxFbCongestion", 34, 1 },
8355 	{ "TxFbCongestion", 33, 1 },
8356 	{ "TxPktSumSrdy", 32, 1 },
8357 	{ "RcfUlpType", 28, 4 },
8358 	{ "Eread", 27, 1 },
8359 	{ "Ebypass", 26, 1 },
8360 	{ "Esave", 25, 1 },
8361 	{ "Static0", 24, 1 },
8362 	{ "Cread", 23, 1 },
8363 	{ "Cbypass", 22, 1 },
8364 	{ "Csave", 21, 1 },
8365 	{ "CPktOut", 20, 1 },
8366 	{ "RxPagePoolFull", 18, 2 },
8367 	{ "RxLpbkPkt", 17, 1 },
8368 	{ "TxLpbkPkt", 16, 1 },
8369 	{ "RxVfValid", 15, 1 },
8370 	{ "SynLearned", 14, 1 },
8371 	{ "SetDelEntry", 13, 1 },
8372 	{ "SetInvEntry", 12, 1 },
8373 	{ "CpcmdDvld", 11, 1 },
8374 	{ "CpcmdSave", 10, 1 },
8375 	{ "RxPstructsFull", 8, 2 },
8376 	{ "EpcmdDvld", 7, 1 },
8377 	{ "EpcmdFlush", 6, 1 },
8378 	{ "EpcmdTrimPrefix", 5, 1 },
8379 	{ "EpcmdTrimPostfix", 4, 1 },
8380 	{ "ERssIp4Pkt", 3, 1 },
8381 	{ "ERssIp6Pkt", 2, 1 },
8382 	{ "ERssTcpUdpPkt", 1, 1 },
8383 	{ "ERssFceFipPkt", 0, 1 },
8384 	{ NULL }
8385 };
8386 
8387 static const struct field_desc tp_la2[] = {
8388 	{ "CplCmdIn", 56, 8 },
8389 	{ "MpsVfVld", 55, 1 },
8390 	{ "MpsPf", 52, 3 },
8391 	{ "MpsVf", 44, 8 },
8392 	{ "SynIn", 43, 1 },
8393 	{ "AckIn", 42, 1 },
8394 	{ "FinIn", 41, 1 },
8395 	{ "RstIn", 40, 1 },
8396 	{ "DataIn", 39, 1 },
8397 	{ "DataInVld", 38, 1 },
8398 	{ "PadIn", 37, 1 },
8399 	{ "RxBufEmpty", 36, 1 },
8400 	{ "RxDdp", 35, 1 },
8401 	{ "RxFbCongestion", 34, 1 },
8402 	{ "TxFbCongestion", 33, 1 },
8403 	{ "TxPktSumSrdy", 32, 1 },
8404 	{ "RcfUlpType", 28, 4 },
8405 	{ "Eread", 27, 1 },
8406 	{ "Ebypass", 26, 1 },
8407 	{ "Esave", 25, 1 },
8408 	{ "Static0", 24, 1 },
8409 	{ "Cread", 23, 1 },
8410 	{ "Cbypass", 22, 1 },
8411 	{ "Csave", 21, 1 },
8412 	{ "CPktOut", 20, 1 },
8413 	{ "RxPagePoolFull", 18, 2 },
8414 	{ "RxLpbkPkt", 17, 1 },
8415 	{ "TxLpbkPkt", 16, 1 },
8416 	{ "RxVfValid", 15, 1 },
8417 	{ "SynLearned", 14, 1 },
8418 	{ "SetDelEntry", 13, 1 },
8419 	{ "SetInvEntry", 12, 1 },
8420 	{ "CpcmdDvld", 11, 1 },
8421 	{ "CpcmdSave", 10, 1 },
8422 	{ "RxPstructsFull", 8, 2 },
8423 	{ "EpcmdDvld", 7, 1 },
8424 	{ "EpcmdFlush", 6, 1 },
8425 	{ "EpcmdTrimPrefix", 5, 1 },
8426 	{ "EpcmdTrimPostfix", 4, 1 },
8427 	{ "ERssIp4Pkt", 3, 1 },
8428 	{ "ERssIp6Pkt", 2, 1 },
8429 	{ "ERssTcpUdpPkt", 1, 1 },
8430 	{ "ERssFceFipPkt", 0, 1 },
8431 	{ NULL }
8432 };
8433 
8434 static void
8435 tp_la_show(struct sbuf *sb, uint64_t *p, int idx)
8436 {
8437 
8438 	field_desc_show(sb, *p, tp_la0);
8439 }
8440 
8441 static void
8442 tp_la_show2(struct sbuf *sb, uint64_t *p, int idx)
8443 {
8444 
8445 	if (idx)
8446 		sbuf_printf(sb, "\n");
8447 	field_desc_show(sb, p[0], tp_la0);
8448 	if (idx < (TPLA_SIZE / 2 - 1) || p[1] != ~0ULL)
8449 		field_desc_show(sb, p[1], tp_la0);
8450 }
8451 
8452 static void
8453 tp_la_show3(struct sbuf *sb, uint64_t *p, int idx)
8454 {
8455 
8456 	if (idx)
8457 		sbuf_printf(sb, "\n");
8458 	field_desc_show(sb, p[0], tp_la0);
8459 	if (idx < (TPLA_SIZE / 2 - 1) || p[1] != ~0ULL)
8460 		field_desc_show(sb, p[1], (p[0] & (1 << 17)) ? tp_la2 : tp_la1);
8461 }
8462 
8463 static int
8464 sysctl_tp_la(SYSCTL_HANDLER_ARGS)
8465 {
8466 	struct adapter *sc = arg1;
8467 	struct sbuf *sb;
8468 	uint64_t *buf, *p;
8469 	int rc;
8470 	u_int i, inc;
8471 	void (*show_func)(struct sbuf *, uint64_t *, int);
8472 
8473 	rc = sysctl_wire_old_buffer(req, 0);
8474 	if (rc != 0)
8475 		return (rc);
8476 
8477 	sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req);
8478 	if (sb == NULL)
8479 		return (ENOMEM);
8480 
8481 	buf = malloc(TPLA_SIZE * sizeof(uint64_t), M_CXGBE, M_ZERO | M_WAITOK);
8482 
8483 	t4_tp_read_la(sc, buf, NULL);
8484 	p = buf;
8485 
8486 	switch (G_DBGLAMODE(t4_read_reg(sc, A_TP_DBG_LA_CONFIG))) {
8487 	case 2:
8488 		inc = 2;
8489 		show_func = tp_la_show2;
8490 		break;
8491 	case 3:
8492 		inc = 2;
8493 		show_func = tp_la_show3;
8494 		break;
8495 	default:
8496 		inc = 1;
8497 		show_func = tp_la_show;
8498 	}
8499 
8500 	for (i = 0; i < TPLA_SIZE / inc; i++, p += inc)
8501 		(*show_func)(sb, p, i);
8502 
8503 	rc = sbuf_finish(sb);
8504 	sbuf_delete(sb);
8505 	free(buf, M_CXGBE);
8506 	return (rc);
8507 }
8508 
8509 static int
8510 sysctl_tx_rate(SYSCTL_HANDLER_ARGS)
8511 {
8512 	struct adapter *sc = arg1;
8513 	struct sbuf *sb;
8514 	int rc;
8515 	u64 nrate[MAX_NCHAN], orate[MAX_NCHAN];
8516 
8517 	rc = sysctl_wire_old_buffer(req, 0);
8518 	if (rc != 0)
8519 		return (rc);
8520 
8521 	sb = sbuf_new_for_sysctl(NULL, NULL, 256, req);
8522 	if (sb == NULL)
8523 		return (ENOMEM);
8524 
8525 	t4_get_chan_txrate(sc, nrate, orate);
8526 
8527 	if (sc->chip_params->nchan > 2) {
8528 		sbuf_printf(sb, "              channel 0   channel 1"
8529 		    "   channel 2   channel 3\n");
8530 		sbuf_printf(sb, "NIC B/s:     %10ju  %10ju  %10ju  %10ju\n",
8531 		    nrate[0], nrate[1], nrate[2], nrate[3]);
8532 		sbuf_printf(sb, "Offload B/s: %10ju  %10ju  %10ju  %10ju",
8533 		    orate[0], orate[1], orate[2], orate[3]);
8534 	} else {
8535 		sbuf_printf(sb, "              channel 0   channel 1\n");
8536 		sbuf_printf(sb, "NIC B/s:     %10ju  %10ju\n",
8537 		    nrate[0], nrate[1]);
8538 		sbuf_printf(sb, "Offload B/s: %10ju  %10ju",
8539 		    orate[0], orate[1]);
8540 	}
8541 
8542 	rc = sbuf_finish(sb);
8543 	sbuf_delete(sb);
8544 
8545 	return (rc);
8546 }
8547 
8548 static int
8549 sysctl_ulprx_la(SYSCTL_HANDLER_ARGS)
8550 {
8551 	struct adapter *sc = arg1;
8552 	struct sbuf *sb;
8553 	uint32_t *buf, *p;
8554 	int rc, i;
8555 
8556 	rc = sysctl_wire_old_buffer(req, 0);
8557 	if (rc != 0)
8558 		return (rc);
8559 
8560 	sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req);
8561 	if (sb == NULL)
8562 		return (ENOMEM);
8563 
8564 	buf = malloc(ULPRX_LA_SIZE * 8 * sizeof(uint32_t), M_CXGBE,
8565 	    M_ZERO | M_WAITOK);
8566 
8567 	t4_ulprx_read_la(sc, buf);
8568 	p = buf;
8569 
8570 	sbuf_printf(sb, "      Pcmd        Type   Message"
8571 	    "                Data");
8572 	for (i = 0; i < ULPRX_LA_SIZE; i++, p += 8) {
8573 		sbuf_printf(sb, "\n%08x%08x  %4x  %08x  %08x%08x%08x%08x",
8574 		    p[1], p[0], p[2], p[3], p[7], p[6], p[5], p[4]);
8575 	}
8576 
8577 	rc = sbuf_finish(sb);
8578 	sbuf_delete(sb);
8579 	free(buf, M_CXGBE);
8580 	return (rc);
8581 }
8582 
8583 static int
8584 sysctl_wcwr_stats(SYSCTL_HANDLER_ARGS)
8585 {
8586 	struct adapter *sc = arg1;
8587 	struct sbuf *sb;
8588 	int rc, v;
8589 
8590 	MPASS(chip_id(sc) >= CHELSIO_T5);
8591 
8592 	rc = sysctl_wire_old_buffer(req, 0);
8593 	if (rc != 0)
8594 		return (rc);
8595 
8596 	sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req);
8597 	if (sb == NULL)
8598 		return (ENOMEM);
8599 
8600 	v = t4_read_reg(sc, A_SGE_STAT_CFG);
8601 	if (G_STATSOURCE_T5(v) == 7) {
8602 		int mode;
8603 
8604 		mode = is_t5(sc) ? G_STATMODE(v) : G_T6_STATMODE(v);
8605 		if (mode == 0) {
8606 			sbuf_printf(sb, "total %d, incomplete %d",
8607 			    t4_read_reg(sc, A_SGE_STAT_TOTAL),
8608 			    t4_read_reg(sc, A_SGE_STAT_MATCH));
8609 		} else if (mode == 1) {
8610 			sbuf_printf(sb, "total %d, data overflow %d",
8611 			    t4_read_reg(sc, A_SGE_STAT_TOTAL),
8612 			    t4_read_reg(sc, A_SGE_STAT_MATCH));
8613 		} else {
8614 			sbuf_printf(sb, "unknown mode %d", mode);
8615 		}
8616 	}
8617 	rc = sbuf_finish(sb);
8618 	sbuf_delete(sb);
8619 
8620 	return (rc);
8621 }
8622 
8623 static int
8624 sysctl_cpus(SYSCTL_HANDLER_ARGS)
8625 {
8626 	struct adapter *sc = arg1;
8627 	enum cpu_sets op = arg2;
8628 	cpuset_t cpuset;
8629 	struct sbuf *sb;
8630 	int i, rc;
8631 
8632 	MPASS(op == LOCAL_CPUS || op == INTR_CPUS);
8633 
8634 	CPU_ZERO(&cpuset);
8635 	rc = bus_get_cpus(sc->dev, op, sizeof(cpuset), &cpuset);
8636 	if (rc != 0)
8637 		return (rc);
8638 
8639 	rc = sysctl_wire_old_buffer(req, 0);
8640 	if (rc != 0)
8641 		return (rc);
8642 
8643 	sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req);
8644 	if (sb == NULL)
8645 		return (ENOMEM);
8646 
8647 	CPU_FOREACH(i)
8648 		sbuf_printf(sb, "%d ", i);
8649 	rc = sbuf_finish(sb);
8650 	sbuf_delete(sb);
8651 
8652 	return (rc);
8653 }
8654 
8655 #ifdef TCP_OFFLOAD
8656 static int
8657 sysctl_tls_rx_ports(SYSCTL_HANDLER_ARGS)
8658 {
8659 	struct adapter *sc = arg1;
8660 	int *old_ports, *new_ports;
8661 	int i, new_count, rc;
8662 
8663 	if (req->newptr == NULL && req->oldptr == NULL)
8664 		return (SYSCTL_OUT(req, NULL, imax(sc->tt.num_tls_rx_ports, 1) *
8665 		    sizeof(sc->tt.tls_rx_ports[0])));
8666 
8667 	rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4tlsrx");
8668 	if (rc)
8669 		return (rc);
8670 
8671 	if (sc->tt.num_tls_rx_ports == 0) {
8672 		i = -1;
8673 		rc = SYSCTL_OUT(req, &i, sizeof(i));
8674 	} else
8675 		rc = SYSCTL_OUT(req, sc->tt.tls_rx_ports,
8676 		    sc->tt.num_tls_rx_ports * sizeof(sc->tt.tls_rx_ports[0]));
8677 	if (rc == 0 && req->newptr != NULL) {
8678 		new_count = req->newlen / sizeof(new_ports[0]);
8679 		new_ports = malloc(new_count * sizeof(new_ports[0]), M_CXGBE,
8680 		    M_WAITOK);
8681 		rc = SYSCTL_IN(req, new_ports, new_count *
8682 		    sizeof(new_ports[0]));
8683 		if (rc)
8684 			goto err;
8685 
8686 		/* Allow setting to a single '-1' to clear the list. */
8687 		if (new_count == 1 && new_ports[0] == -1) {
8688 			ADAPTER_LOCK(sc);
8689 			old_ports = sc->tt.tls_rx_ports;
8690 			sc->tt.tls_rx_ports = NULL;
8691 			sc->tt.num_tls_rx_ports = 0;
8692 			ADAPTER_UNLOCK(sc);
8693 			free(old_ports, M_CXGBE);
8694 		} else {
8695 			for (i = 0; i < new_count; i++) {
8696 				if (new_ports[i] < 1 ||
8697 				    new_ports[i] > IPPORT_MAX) {
8698 					rc = EINVAL;
8699 					goto err;
8700 				}
8701 			}
8702 
8703 			ADAPTER_LOCK(sc);
8704 			old_ports = sc->tt.tls_rx_ports;
8705 			sc->tt.tls_rx_ports = new_ports;
8706 			sc->tt.num_tls_rx_ports = new_count;
8707 			ADAPTER_UNLOCK(sc);
8708 			free(old_ports, M_CXGBE);
8709 			new_ports = NULL;
8710 		}
8711 	err:
8712 		free(new_ports, M_CXGBE);
8713 	}
8714 	end_synchronized_op(sc, 0);
8715 	return (rc);
8716 }
8717 
8718 static void
8719 unit_conv(char *buf, size_t len, u_int val, u_int factor)
8720 {
8721 	u_int rem = val % factor;
8722 
8723 	if (rem == 0)
8724 		snprintf(buf, len, "%u", val / factor);
8725 	else {
8726 		while (rem % 10 == 0)
8727 			rem /= 10;
8728 		snprintf(buf, len, "%u.%u", val / factor, rem);
8729 	}
8730 }
8731 
8732 static int
8733 sysctl_tp_tick(SYSCTL_HANDLER_ARGS)
8734 {
8735 	struct adapter *sc = arg1;
8736 	char buf[16];
8737 	u_int res, re;
8738 	u_int cclk_ps = 1000000000 / sc->params.vpd.cclk;
8739 
8740 	res = t4_read_reg(sc, A_TP_TIMER_RESOLUTION);
8741 	switch (arg2) {
8742 	case 0:
8743 		/* timer_tick */
8744 		re = G_TIMERRESOLUTION(res);
8745 		break;
8746 	case 1:
8747 		/* TCP timestamp tick */
8748 		re = G_TIMESTAMPRESOLUTION(res);
8749 		break;
8750 	case 2:
8751 		/* DACK tick */
8752 		re = G_DELAYEDACKRESOLUTION(res);
8753 		break;
8754 	default:
8755 		return (EDOOFUS);
8756 	}
8757 
8758 	unit_conv(buf, sizeof(buf), (cclk_ps << re), 1000000);
8759 
8760 	return (sysctl_handle_string(oidp, buf, sizeof(buf), req));
8761 }
8762 
8763 static int
8764 sysctl_tp_dack_timer(SYSCTL_HANDLER_ARGS)
8765 {
8766 	struct adapter *sc = arg1;
8767 	u_int res, dack_re, v;
8768 	u_int cclk_ps = 1000000000 / sc->params.vpd.cclk;
8769 
8770 	res = t4_read_reg(sc, A_TP_TIMER_RESOLUTION);
8771 	dack_re = G_DELAYEDACKRESOLUTION(res);
8772 	v = ((cclk_ps << dack_re) / 1000000) * t4_read_reg(sc, A_TP_DACK_TIMER);
8773 
8774 	return (sysctl_handle_int(oidp, &v, 0, req));
8775 }
8776 
8777 static int
8778 sysctl_tp_timer(SYSCTL_HANDLER_ARGS)
8779 {
8780 	struct adapter *sc = arg1;
8781 	int reg = arg2;
8782 	u_int tre;
8783 	u_long tp_tick_us, v;
8784 	u_int cclk_ps = 1000000000 / sc->params.vpd.cclk;
8785 
8786 	MPASS(reg == A_TP_RXT_MIN || reg == A_TP_RXT_MAX ||
8787 	    reg == A_TP_PERS_MIN  || reg == A_TP_PERS_MAX ||
8788 	    reg == A_TP_KEEP_IDLE || reg == A_TP_KEEP_INTVL ||
8789 	    reg == A_TP_INIT_SRTT || reg == A_TP_FINWAIT2_TIMER);
8790 
8791 	tre = G_TIMERRESOLUTION(t4_read_reg(sc, A_TP_TIMER_RESOLUTION));
8792 	tp_tick_us = (cclk_ps << tre) / 1000000;
8793 
8794 	if (reg == A_TP_INIT_SRTT)
8795 		v = tp_tick_us * G_INITSRTT(t4_read_reg(sc, reg));
8796 	else
8797 		v = tp_tick_us * t4_read_reg(sc, reg);
8798 
8799 	return (sysctl_handle_long(oidp, &v, 0, req));
8800 }
8801 
8802 /*
8803  * All fields in TP_SHIFT_CNT are 4b and the starting location of the field is
8804  * passed to this function.
8805  */
8806 static int
8807 sysctl_tp_shift_cnt(SYSCTL_HANDLER_ARGS)
8808 {
8809 	struct adapter *sc = arg1;
8810 	int idx = arg2;
8811 	u_int v;
8812 
8813 	MPASS(idx >= 0 && idx <= 24);
8814 
8815 	v = (t4_read_reg(sc, A_TP_SHIFT_CNT) >> idx) & 0xf;
8816 
8817 	return (sysctl_handle_int(oidp, &v, 0, req));
8818 }
8819 
8820 static int
8821 sysctl_tp_backoff(SYSCTL_HANDLER_ARGS)
8822 {
8823 	struct adapter *sc = arg1;
8824 	int idx = arg2;
8825 	u_int shift, v, r;
8826 
8827 	MPASS(idx >= 0 && idx < 16);
8828 
8829 	r = A_TP_TCP_BACKOFF_REG0 + (idx & ~3);
8830 	shift = (idx & 3) << 3;
8831 	v = (t4_read_reg(sc, r) >> shift) & M_TIMERBACKOFFINDEX0;
8832 
8833 	return (sysctl_handle_int(oidp, &v, 0, req));
8834 }
8835 
8836 static int
8837 sysctl_holdoff_tmr_idx_ofld(SYSCTL_HANDLER_ARGS)
8838 {
8839 	struct vi_info *vi = arg1;
8840 	struct adapter *sc = vi->pi->adapter;
8841 	int idx, rc, i;
8842 	struct sge_ofld_rxq *ofld_rxq;
8843 	uint8_t v;
8844 
8845 	idx = vi->ofld_tmr_idx;
8846 
8847 	rc = sysctl_handle_int(oidp, &idx, 0, req);
8848 	if (rc != 0 || req->newptr == NULL)
8849 		return (rc);
8850 
8851 	if (idx < 0 || idx >= SGE_NTIMERS)
8852 		return (EINVAL);
8853 
8854 	rc = begin_synchronized_op(sc, vi, HOLD_LOCK | SLEEP_OK | INTR_OK,
8855 	    "t4otmr");
8856 	if (rc)
8857 		return (rc);
8858 
8859 	v = V_QINTR_TIMER_IDX(idx) | V_QINTR_CNT_EN(vi->ofld_pktc_idx != -1);
8860 	for_each_ofld_rxq(vi, i, ofld_rxq) {
8861 #ifdef atomic_store_rel_8
8862 		atomic_store_rel_8(&ofld_rxq->iq.intr_params, v);
8863 #else
8864 		ofld_rxq->iq.intr_params = v;
8865 #endif
8866 	}
8867 	vi->ofld_tmr_idx = idx;
8868 
8869 	end_synchronized_op(sc, LOCK_HELD);
8870 	return (0);
8871 }
8872 
8873 static int
8874 sysctl_holdoff_pktc_idx_ofld(SYSCTL_HANDLER_ARGS)
8875 {
8876 	struct vi_info *vi = arg1;
8877 	struct adapter *sc = vi->pi->adapter;
8878 	int idx, rc;
8879 
8880 	idx = vi->ofld_pktc_idx;
8881 
8882 	rc = sysctl_handle_int(oidp, &idx, 0, req);
8883 	if (rc != 0 || req->newptr == NULL)
8884 		return (rc);
8885 
8886 	if (idx < -1 || idx >= SGE_NCOUNTERS)
8887 		return (EINVAL);
8888 
8889 	rc = begin_synchronized_op(sc, vi, HOLD_LOCK | SLEEP_OK | INTR_OK,
8890 	    "t4opktc");
8891 	if (rc)
8892 		return (rc);
8893 
8894 	if (vi->flags & VI_INIT_DONE)
8895 		rc = EBUSY; /* cannot be changed once the queues are created */
8896 	else
8897 		vi->ofld_pktc_idx = idx;
8898 
8899 	end_synchronized_op(sc, LOCK_HELD);
8900 	return (rc);
8901 }
8902 #endif
8903 
8904 static int
8905 get_sge_context(struct adapter *sc, struct t4_sge_context *cntxt)
8906 {
8907 	int rc;
8908 
8909 	if (cntxt->cid > M_CTXTQID)
8910 		return (EINVAL);
8911 
8912 	if (cntxt->mem_id != CTXT_EGRESS && cntxt->mem_id != CTXT_INGRESS &&
8913 	    cntxt->mem_id != CTXT_FLM && cntxt->mem_id != CTXT_CNM)
8914 		return (EINVAL);
8915 
8916 	rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4ctxt");
8917 	if (rc)
8918 		return (rc);
8919 
8920 	if (sc->flags & FW_OK) {
8921 		rc = -t4_sge_ctxt_rd(sc, sc->mbox, cntxt->cid, cntxt->mem_id,
8922 		    &cntxt->data[0]);
8923 		if (rc == 0)
8924 			goto done;
8925 	}
8926 
8927 	/*
8928 	 * Read via firmware failed or wasn't even attempted.  Read directly via
8929 	 * the backdoor.
8930 	 */
8931 	rc = -t4_sge_ctxt_rd_bd(sc, cntxt->cid, cntxt->mem_id, &cntxt->data[0]);
8932 done:
8933 	end_synchronized_op(sc, 0);
8934 	return (rc);
8935 }
8936 
8937 static int
8938 load_fw(struct adapter *sc, struct t4_data *fw)
8939 {
8940 	int rc;
8941 	uint8_t *fw_data;
8942 
8943 	rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4ldfw");
8944 	if (rc)
8945 		return (rc);
8946 
8947 	/*
8948 	 * The firmware, with the sole exception of the memory parity error
8949 	 * handler, runs from memory and not flash.  It is almost always safe to
8950 	 * install a new firmware on a running system.  Just set bit 1 in
8951 	 * hw.cxgbe.dflags or dev.<nexus>.<n>.dflags first.
8952 	 */
8953 	if (sc->flags & FULL_INIT_DONE &&
8954 	    (sc->debug_flags & DF_LOAD_FW_ANYTIME) == 0) {
8955 		rc = EBUSY;
8956 		goto done;
8957 	}
8958 
8959 	fw_data = malloc(fw->len, M_CXGBE, M_WAITOK);
8960 	if (fw_data == NULL) {
8961 		rc = ENOMEM;
8962 		goto done;
8963 	}
8964 
8965 	rc = copyin(fw->data, fw_data, fw->len);
8966 	if (rc == 0)
8967 		rc = -t4_load_fw(sc, fw_data, fw->len);
8968 
8969 	free(fw_data, M_CXGBE);
8970 done:
8971 	end_synchronized_op(sc, 0);
8972 	return (rc);
8973 }
8974 
8975 static int
8976 load_cfg(struct adapter *sc, struct t4_data *cfg)
8977 {
8978 	int rc;
8979 	uint8_t *cfg_data = NULL;
8980 
8981 	rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4ldcf");
8982 	if (rc)
8983 		return (rc);
8984 
8985 	if (cfg->len == 0) {
8986 		/* clear */
8987 		rc = -t4_load_cfg(sc, NULL, 0);
8988 		goto done;
8989 	}
8990 
8991 	cfg_data = malloc(cfg->len, M_CXGBE, M_WAITOK);
8992 	if (cfg_data == NULL) {
8993 		rc = ENOMEM;
8994 		goto done;
8995 	}
8996 
8997 	rc = copyin(cfg->data, cfg_data, cfg->len);
8998 	if (rc == 0)
8999 		rc = -t4_load_cfg(sc, cfg_data, cfg->len);
9000 
9001 	free(cfg_data, M_CXGBE);
9002 done:
9003 	end_synchronized_op(sc, 0);
9004 	return (rc);
9005 }
9006 
9007 static int
9008 load_boot(struct adapter *sc, struct t4_bootrom *br)
9009 {
9010 	int rc;
9011 	uint8_t *br_data = NULL;
9012 	u_int offset;
9013 
9014 	if (br->len > 1024 * 1024)
9015 		return (EFBIG);
9016 
9017 	if (br->pf_offset == 0) {
9018 		/* pfidx */
9019 		if (br->pfidx_addr > 7)
9020 			return (EINVAL);
9021 		offset = G_OFFSET(t4_read_reg(sc, PF_REG(br->pfidx_addr,
9022 		    A_PCIE_PF_EXPROM_OFST)));
9023 	} else if (br->pf_offset == 1) {
9024 		/* offset */
9025 		offset = G_OFFSET(br->pfidx_addr);
9026 	} else {
9027 		return (EINVAL);
9028 	}
9029 
9030 	rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4ldbr");
9031 	if (rc)
9032 		return (rc);
9033 
9034 	if (br->len == 0) {
9035 		/* clear */
9036 		rc = -t4_load_boot(sc, NULL, offset, 0);
9037 		goto done;
9038 	}
9039 
9040 	br_data = malloc(br->len, M_CXGBE, M_WAITOK);
9041 	if (br_data == NULL) {
9042 		rc = ENOMEM;
9043 		goto done;
9044 	}
9045 
9046 	rc = copyin(br->data, br_data, br->len);
9047 	if (rc == 0)
9048 		rc = -t4_load_boot(sc, br_data, offset, br->len);
9049 
9050 	free(br_data, M_CXGBE);
9051 done:
9052 	end_synchronized_op(sc, 0);
9053 	return (rc);
9054 }
9055 
9056 static int
9057 load_bootcfg(struct adapter *sc, struct t4_data *bc)
9058 {
9059 	int rc;
9060 	uint8_t *bc_data = NULL;
9061 
9062 	rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4ldcf");
9063 	if (rc)
9064 		return (rc);
9065 
9066 	if (bc->len == 0) {
9067 		/* clear */
9068 		rc = -t4_load_bootcfg(sc, NULL, 0);
9069 		goto done;
9070 	}
9071 
9072 	bc_data = malloc(bc->len, M_CXGBE, M_WAITOK);
9073 	if (bc_data == NULL) {
9074 		rc = ENOMEM;
9075 		goto done;
9076 	}
9077 
9078 	rc = copyin(bc->data, bc_data, bc->len);
9079 	if (rc == 0)
9080 		rc = -t4_load_bootcfg(sc, bc_data, bc->len);
9081 
9082 	free(bc_data, M_CXGBE);
9083 done:
9084 	end_synchronized_op(sc, 0);
9085 	return (rc);
9086 }
9087 
9088 static int
9089 cudbg_dump(struct adapter *sc, struct t4_cudbg_dump *dump)
9090 {
9091 	int rc;
9092 	struct cudbg_init *cudbg;
9093 	void *handle, *buf;
9094 
9095 	/* buf is large, don't block if no memory is available */
9096 	buf = malloc(dump->len, M_CXGBE, M_NOWAIT | M_ZERO);
9097 	if (buf == NULL)
9098 		return (ENOMEM);
9099 
9100 	handle = cudbg_alloc_handle();
9101 	if (handle == NULL) {
9102 		rc = ENOMEM;
9103 		goto done;
9104 	}
9105 
9106 	cudbg = cudbg_get_init(handle);
9107 	cudbg->adap = sc;
9108 	cudbg->print = (cudbg_print_cb)printf;
9109 
9110 #ifndef notyet
9111 	device_printf(sc->dev, "%s: wr_flash %u, len %u, data %p.\n",
9112 	    __func__, dump->wr_flash, dump->len, dump->data);
9113 #endif
9114 
9115 	if (dump->wr_flash)
9116 		cudbg->use_flash = 1;
9117 	MPASS(sizeof(cudbg->dbg_bitmap) == sizeof(dump->bitmap));
9118 	memcpy(cudbg->dbg_bitmap, dump->bitmap, sizeof(cudbg->dbg_bitmap));
9119 
9120 	rc = cudbg_collect(handle, buf, &dump->len);
9121 	if (rc != 0)
9122 		goto done;
9123 
9124 	rc = copyout(buf, dump->data, dump->len);
9125 done:
9126 	cudbg_free_handle(handle);
9127 	free(buf, M_CXGBE);
9128 	return (rc);
9129 }
9130 
9131 static void
9132 free_offload_policy(struct t4_offload_policy *op)
9133 {
9134 	struct offload_rule *r;
9135 	int i;
9136 
9137 	if (op == NULL)
9138 		return;
9139 
9140 	r = &op->rule[0];
9141 	for (i = 0; i < op->nrules; i++, r++) {
9142 		free(r->bpf_prog.bf_insns, M_CXGBE);
9143 	}
9144 	free(op->rule, M_CXGBE);
9145 	free(op, M_CXGBE);
9146 }
9147 
9148 static int
9149 set_offload_policy(struct adapter *sc, struct t4_offload_policy *uop)
9150 {
9151 	int i, rc, len;
9152 	struct t4_offload_policy *op, *old;
9153 	struct bpf_program *bf;
9154 	const struct offload_settings *s;
9155 	struct offload_rule *r;
9156 	void *u;
9157 
9158 	if (!is_offload(sc))
9159 		return (ENODEV);
9160 
9161 	if (uop->nrules == 0) {
9162 		/* Delete installed policies. */
9163 		op = NULL;
9164 		goto set_policy;
9165 	} if (uop->nrules > 256) { /* arbitrary */
9166 		return (E2BIG);
9167 	}
9168 
9169 	/* Copy userspace offload policy to kernel */
9170 	op = malloc(sizeof(*op), M_CXGBE, M_ZERO | M_WAITOK);
9171 	op->nrules = uop->nrules;
9172 	len = op->nrules * sizeof(struct offload_rule);
9173 	op->rule = malloc(len, M_CXGBE, M_ZERO | M_WAITOK);
9174 	rc = copyin(uop->rule, op->rule, len);
9175 	if (rc) {
9176 		free(op->rule, M_CXGBE);
9177 		free(op, M_CXGBE);
9178 		return (rc);
9179 	}
9180 
9181 	r = &op->rule[0];
9182 	for (i = 0; i < op->nrules; i++, r++) {
9183 
9184 		/* Validate open_type */
9185 		if (r->open_type != OPEN_TYPE_LISTEN &&
9186 		    r->open_type != OPEN_TYPE_ACTIVE &&
9187 		    r->open_type != OPEN_TYPE_PASSIVE &&
9188 		    r->open_type != OPEN_TYPE_DONTCARE) {
9189 error:
9190 			/*
9191 			 * Rules 0 to i have malloc'd filters that need to be
9192 			 * freed.  Rules i+1 to nrules have userspace pointers
9193 			 * and should be left alone.
9194 			 */
9195 			op->nrules = i;
9196 			free_offload_policy(op);
9197 			return (rc);
9198 		}
9199 
9200 		/* Validate settings */
9201 		s = &r->settings;
9202 		if ((s->offload != 0 && s->offload != 1) ||
9203 		    s->cong_algo < -1 || s->cong_algo > CONG_ALG_HIGHSPEED ||
9204 		    s->sched_class < -1 ||
9205 		    s->sched_class >= sc->chip_params->nsched_cls) {
9206 			rc = EINVAL;
9207 			goto error;
9208 		}
9209 
9210 		bf = &r->bpf_prog;
9211 		u = bf->bf_insns;	/* userspace ptr */
9212 		bf->bf_insns = NULL;
9213 		if (bf->bf_len == 0) {
9214 			/* legal, matches everything */
9215 			continue;
9216 		}
9217 		len = bf->bf_len * sizeof(*bf->bf_insns);
9218 		bf->bf_insns = malloc(len, M_CXGBE, M_ZERO | M_WAITOK);
9219 		rc = copyin(u, bf->bf_insns, len);
9220 		if (rc != 0)
9221 			goto error;
9222 
9223 		if (!bpf_validate(bf->bf_insns, bf->bf_len)) {
9224 			rc = EINVAL;
9225 			goto error;
9226 		}
9227 	}
9228 set_policy:
9229 	rw_wlock(&sc->policy_lock);
9230 	old = sc->policy;
9231 	sc->policy = op;
9232 	rw_wunlock(&sc->policy_lock);
9233 	free_offload_policy(old);
9234 
9235 	return (0);
9236 }
9237 
9238 #define MAX_READ_BUF_SIZE (128 * 1024)
9239 static int
9240 read_card_mem(struct adapter *sc, int win, struct t4_mem_range *mr)
9241 {
9242 	uint32_t addr, remaining, n;
9243 	uint32_t *buf;
9244 	int rc;
9245 	uint8_t *dst;
9246 
9247 	rc = validate_mem_range(sc, mr->addr, mr->len);
9248 	if (rc != 0)
9249 		return (rc);
9250 
9251 	buf = malloc(min(mr->len, MAX_READ_BUF_SIZE), M_CXGBE, M_WAITOK);
9252 	addr = mr->addr;
9253 	remaining = mr->len;
9254 	dst = (void *)mr->data;
9255 
9256 	while (remaining) {
9257 		n = min(remaining, MAX_READ_BUF_SIZE);
9258 		read_via_memwin(sc, 2, addr, buf, n);
9259 
9260 		rc = copyout(buf, dst, n);
9261 		if (rc != 0)
9262 			break;
9263 
9264 		dst += n;
9265 		remaining -= n;
9266 		addr += n;
9267 	}
9268 
9269 	free(buf, M_CXGBE);
9270 	return (rc);
9271 }
9272 #undef MAX_READ_BUF_SIZE
9273 
9274 static int
9275 read_i2c(struct adapter *sc, struct t4_i2c_data *i2cd)
9276 {
9277 	int rc;
9278 
9279 	if (i2cd->len == 0 || i2cd->port_id >= sc->params.nports)
9280 		return (EINVAL);
9281 
9282 	if (i2cd->len > sizeof(i2cd->data))
9283 		return (EFBIG);
9284 
9285 	rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4i2crd");
9286 	if (rc)
9287 		return (rc);
9288 	rc = -t4_i2c_rd(sc, sc->mbox, i2cd->port_id, i2cd->dev_addr,
9289 	    i2cd->offset, i2cd->len, &i2cd->data[0]);
9290 	end_synchronized_op(sc, 0);
9291 
9292 	return (rc);
9293 }
9294 
9295 int
9296 t4_os_find_pci_capability(struct adapter *sc, int cap)
9297 {
9298 	int i;
9299 
9300 	return (pci_find_cap(sc->dev, cap, &i) == 0 ? i : 0);
9301 }
9302 
9303 int
9304 t4_os_pci_save_state(struct adapter *sc)
9305 {
9306 	device_t dev;
9307 	struct pci_devinfo *dinfo;
9308 
9309 	dev = sc->dev;
9310 	dinfo = device_get_ivars(dev);
9311 
9312 	pci_cfg_save(dev, dinfo, 0);
9313 	return (0);
9314 }
9315 
9316 int
9317 t4_os_pci_restore_state(struct adapter *sc)
9318 {
9319 	device_t dev;
9320 	struct pci_devinfo *dinfo;
9321 
9322 	dev = sc->dev;
9323 	dinfo = device_get_ivars(dev);
9324 
9325 	pci_cfg_restore(dev, dinfo);
9326 	return (0);
9327 }
9328 
9329 void
9330 t4_os_portmod_changed(struct port_info *pi)
9331 {
9332 	struct adapter *sc = pi->adapter;
9333 	struct vi_info *vi;
9334 	struct ifnet *ifp;
9335 	static const char *mod_str[] = {
9336 		NULL, "LR", "SR", "ER", "TWINAX", "active TWINAX", "LRM"
9337 	};
9338 
9339 	MPASS((pi->flags & FIXED_IFMEDIA) == 0);
9340 
9341 	vi = &pi->vi[0];
9342 	if (begin_synchronized_op(sc, vi, HOLD_LOCK, "t4mod") == 0) {
9343 		PORT_LOCK(pi);
9344 		build_medialist(pi, &pi->media);
9345 		apply_l1cfg(pi);
9346 		PORT_UNLOCK(pi);
9347 		end_synchronized_op(sc, LOCK_HELD);
9348 	}
9349 
9350 	ifp = vi->ifp;
9351 	if (pi->mod_type == FW_PORT_MOD_TYPE_NONE)
9352 		if_printf(ifp, "transceiver unplugged.\n");
9353 	else if (pi->mod_type == FW_PORT_MOD_TYPE_UNKNOWN)
9354 		if_printf(ifp, "unknown transceiver inserted.\n");
9355 	else if (pi->mod_type == FW_PORT_MOD_TYPE_NOTSUPPORTED)
9356 		if_printf(ifp, "unsupported transceiver inserted.\n");
9357 	else if (pi->mod_type > 0 && pi->mod_type < nitems(mod_str)) {
9358 		if_printf(ifp, "%dGbps %s transceiver inserted.\n",
9359 		    port_top_speed(pi), mod_str[pi->mod_type]);
9360 	} else {
9361 		if_printf(ifp, "transceiver (type %d) inserted.\n",
9362 		    pi->mod_type);
9363 	}
9364 }
9365 
9366 void
9367 t4_os_link_changed(struct port_info *pi)
9368 {
9369 	struct vi_info *vi;
9370 	struct ifnet *ifp;
9371 	struct link_config *lc;
9372 	int v;
9373 
9374 	PORT_LOCK_ASSERT_OWNED(pi);
9375 
9376 	for_each_vi(pi, v, vi) {
9377 		ifp = vi->ifp;
9378 		if (ifp == NULL)
9379 			continue;
9380 
9381 		lc = &pi->link_cfg;
9382 		if (lc->link_ok) {
9383 			ifp->if_baudrate = IF_Mbps(lc->speed);
9384 			if_link_state_change(ifp, LINK_STATE_UP);
9385 		} else {
9386 			if_link_state_change(ifp, LINK_STATE_DOWN);
9387 		}
9388 	}
9389 }
9390 
9391 void
9392 t4_iterate(void (*func)(struct adapter *, void *), void *arg)
9393 {
9394 	struct adapter *sc;
9395 
9396 	sx_slock(&t4_list_lock);
9397 	SLIST_FOREACH(sc, &t4_list, link) {
9398 		/*
9399 		 * func should not make any assumptions about what state sc is
9400 		 * in - the only guarantee is that sc->sc_lock is a valid lock.
9401 		 */
9402 		func(sc, arg);
9403 	}
9404 	sx_sunlock(&t4_list_lock);
9405 }
9406 
9407 static int
9408 t4_ioctl(struct cdev *dev, unsigned long cmd, caddr_t data, int fflag,
9409     struct thread *td)
9410 {
9411 	int rc;
9412 	struct adapter *sc = dev->si_drv1;
9413 
9414 	rc = priv_check(td, PRIV_DRIVER);
9415 	if (rc != 0)
9416 		return (rc);
9417 
9418 	switch (cmd) {
9419 	case CHELSIO_T4_GETREG: {
9420 		struct t4_reg *edata = (struct t4_reg *)data;
9421 
9422 		if ((edata->addr & 0x3) != 0 || edata->addr >= sc->mmio_len)
9423 			return (EFAULT);
9424 
9425 		if (edata->size == 4)
9426 			edata->val = t4_read_reg(sc, edata->addr);
9427 		else if (edata->size == 8)
9428 			edata->val = t4_read_reg64(sc, edata->addr);
9429 		else
9430 			return (EINVAL);
9431 
9432 		break;
9433 	}
9434 	case CHELSIO_T4_SETREG: {
9435 		struct t4_reg *edata = (struct t4_reg *)data;
9436 
9437 		if ((edata->addr & 0x3) != 0 || edata->addr >= sc->mmio_len)
9438 			return (EFAULT);
9439 
9440 		if (edata->size == 4) {
9441 			if (edata->val & 0xffffffff00000000)
9442 				return (EINVAL);
9443 			t4_write_reg(sc, edata->addr, (uint32_t) edata->val);
9444 		} else if (edata->size == 8)
9445 			t4_write_reg64(sc, edata->addr, edata->val);
9446 		else
9447 			return (EINVAL);
9448 		break;
9449 	}
9450 	case CHELSIO_T4_REGDUMP: {
9451 		struct t4_regdump *regs = (struct t4_regdump *)data;
9452 		int reglen = t4_get_regs_len(sc);
9453 		uint8_t *buf;
9454 
9455 		if (regs->len < reglen) {
9456 			regs->len = reglen; /* hint to the caller */
9457 			return (ENOBUFS);
9458 		}
9459 
9460 		regs->len = reglen;
9461 		buf = malloc(reglen, M_CXGBE, M_WAITOK | M_ZERO);
9462 		get_regs(sc, regs, buf);
9463 		rc = copyout(buf, regs->data, reglen);
9464 		free(buf, M_CXGBE);
9465 		break;
9466 	}
9467 	case CHELSIO_T4_GET_FILTER_MODE:
9468 		rc = get_filter_mode(sc, (uint32_t *)data);
9469 		break;
9470 	case CHELSIO_T4_SET_FILTER_MODE:
9471 		rc = set_filter_mode(sc, *(uint32_t *)data);
9472 		break;
9473 	case CHELSIO_T4_GET_FILTER:
9474 		rc = get_filter(sc, (struct t4_filter *)data);
9475 		break;
9476 	case CHELSIO_T4_SET_FILTER:
9477 		rc = set_filter(sc, (struct t4_filter *)data);
9478 		break;
9479 	case CHELSIO_T4_DEL_FILTER:
9480 		rc = del_filter(sc, (struct t4_filter *)data);
9481 		break;
9482 	case CHELSIO_T4_GET_SGE_CONTEXT:
9483 		rc = get_sge_context(sc, (struct t4_sge_context *)data);
9484 		break;
9485 	case CHELSIO_T4_LOAD_FW:
9486 		rc = load_fw(sc, (struct t4_data *)data);
9487 		break;
9488 	case CHELSIO_T4_GET_MEM:
9489 		rc = read_card_mem(sc, 2, (struct t4_mem_range *)data);
9490 		break;
9491 	case CHELSIO_T4_GET_I2C:
9492 		rc = read_i2c(sc, (struct t4_i2c_data *)data);
9493 		break;
9494 	case CHELSIO_T4_CLEAR_STATS: {
9495 		int i, v, bg_map;
9496 		u_int port_id = *(uint32_t *)data;
9497 		struct port_info *pi;
9498 		struct vi_info *vi;
9499 
9500 		if (port_id >= sc->params.nports)
9501 			return (EINVAL);
9502 		pi = sc->port[port_id];
9503 		if (pi == NULL)
9504 			return (EIO);
9505 
9506 		/* MAC stats */
9507 		t4_clr_port_stats(sc, pi->tx_chan);
9508 		pi->tx_parse_error = 0;
9509 		pi->tnl_cong_drops = 0;
9510 		mtx_lock(&sc->reg_lock);
9511 		for_each_vi(pi, v, vi) {
9512 			if (vi->flags & VI_INIT_DONE)
9513 				t4_clr_vi_stats(sc, vi->viid);
9514 		}
9515 		bg_map = pi->mps_bg_map;
9516 		v = 0;	/* reuse */
9517 		while (bg_map) {
9518 			i = ffs(bg_map) - 1;
9519 			t4_write_indirect(sc, A_TP_MIB_INDEX, A_TP_MIB_DATA, &v,
9520 			    1, A_TP_MIB_TNL_CNG_DROP_0 + i);
9521 			bg_map &= ~(1 << i);
9522 		}
9523 		mtx_unlock(&sc->reg_lock);
9524 
9525 		/*
9526 		 * Since this command accepts a port, clear stats for
9527 		 * all VIs on this port.
9528 		 */
9529 		for_each_vi(pi, v, vi) {
9530 			if (vi->flags & VI_INIT_DONE) {
9531 				struct sge_rxq *rxq;
9532 				struct sge_txq *txq;
9533 				struct sge_wrq *wrq;
9534 
9535 				for_each_rxq(vi, i, rxq) {
9536 #if defined(INET) || defined(INET6)
9537 					rxq->lro.lro_queued = 0;
9538 					rxq->lro.lro_flushed = 0;
9539 #endif
9540 					rxq->rxcsum = 0;
9541 					rxq->vlan_extraction = 0;
9542 				}
9543 
9544 				for_each_txq(vi, i, txq) {
9545 					txq->txcsum = 0;
9546 					txq->tso_wrs = 0;
9547 					txq->vlan_insertion = 0;
9548 					txq->imm_wrs = 0;
9549 					txq->sgl_wrs = 0;
9550 					txq->txpkt_wrs = 0;
9551 					txq->txpkts0_wrs = 0;
9552 					txq->txpkts1_wrs = 0;
9553 					txq->txpkts0_pkts = 0;
9554 					txq->txpkts1_pkts = 0;
9555 					mp_ring_reset_stats(txq->r);
9556 				}
9557 
9558 #ifdef TCP_OFFLOAD
9559 				/* nothing to clear for each ofld_rxq */
9560 
9561 				for_each_ofld_txq(vi, i, wrq) {
9562 					wrq->tx_wrs_direct = 0;
9563 					wrq->tx_wrs_copied = 0;
9564 				}
9565 #endif
9566 
9567 				if (IS_MAIN_VI(vi)) {
9568 					wrq = &sc->sge.ctrlq[pi->port_id];
9569 					wrq->tx_wrs_direct = 0;
9570 					wrq->tx_wrs_copied = 0;
9571 				}
9572 			}
9573 		}
9574 		break;
9575 	}
9576 	case CHELSIO_T4_SCHED_CLASS:
9577 		rc = t4_set_sched_class(sc, (struct t4_sched_params *)data);
9578 		break;
9579 	case CHELSIO_T4_SCHED_QUEUE:
9580 		rc = t4_set_sched_queue(sc, (struct t4_sched_queue *)data);
9581 		break;
9582 	case CHELSIO_T4_GET_TRACER:
9583 		rc = t4_get_tracer(sc, (struct t4_tracer *)data);
9584 		break;
9585 	case CHELSIO_T4_SET_TRACER:
9586 		rc = t4_set_tracer(sc, (struct t4_tracer *)data);
9587 		break;
9588 	case CHELSIO_T4_LOAD_CFG:
9589 		rc = load_cfg(sc, (struct t4_data *)data);
9590 		break;
9591 	case CHELSIO_T4_LOAD_BOOT:
9592 		rc = load_boot(sc, (struct t4_bootrom *)data);
9593 		break;
9594 	case CHELSIO_T4_LOAD_BOOTCFG:
9595 		rc = load_bootcfg(sc, (struct t4_data *)data);
9596 		break;
9597 	case CHELSIO_T4_CUDBG_DUMP:
9598 		rc = cudbg_dump(sc, (struct t4_cudbg_dump *)data);
9599 		break;
9600 	case CHELSIO_T4_SET_OFLD_POLICY:
9601 		rc = set_offload_policy(sc, (struct t4_offload_policy *)data);
9602 		break;
9603 	default:
9604 		rc = ENOTTY;
9605 	}
9606 
9607 	return (rc);
9608 }
9609 
9610 void
9611 t4_db_full(struct adapter *sc)
9612 {
9613 
9614 	CXGBE_UNIMPLEMENTED(__func__);
9615 }
9616 
9617 void
9618 t4_db_dropped(struct adapter *sc)
9619 {
9620 
9621 	CXGBE_UNIMPLEMENTED(__func__);
9622 }
9623 
9624 #ifdef TCP_OFFLOAD
9625 static int
9626 toe_capability(struct vi_info *vi, int enable)
9627 {
9628 	int rc;
9629 	struct port_info *pi = vi->pi;
9630 	struct adapter *sc = pi->adapter;
9631 
9632 	ASSERT_SYNCHRONIZED_OP(sc);
9633 
9634 	if (!is_offload(sc))
9635 		return (ENODEV);
9636 
9637 	if (enable) {
9638 		if ((vi->ifp->if_capenable & IFCAP_TOE) != 0) {
9639 			/* TOE is already enabled. */
9640 			return (0);
9641 		}
9642 
9643 		/*
9644 		 * We need the port's queues around so that we're able to send
9645 		 * and receive CPLs to/from the TOE even if the ifnet for this
9646 		 * port has never been UP'd administratively.
9647 		 */
9648 		if (!(vi->flags & VI_INIT_DONE)) {
9649 			rc = vi_full_init(vi);
9650 			if (rc)
9651 				return (rc);
9652 		}
9653 		if (!(pi->vi[0].flags & VI_INIT_DONE)) {
9654 			rc = vi_full_init(&pi->vi[0]);
9655 			if (rc)
9656 				return (rc);
9657 		}
9658 
9659 		if (isset(&sc->offload_map, pi->port_id)) {
9660 			/* TOE is enabled on another VI of this port. */
9661 			pi->uld_vis++;
9662 			return (0);
9663 		}
9664 
9665 		if (!uld_active(sc, ULD_TOM)) {
9666 			rc = t4_activate_uld(sc, ULD_TOM);
9667 			if (rc == EAGAIN) {
9668 				log(LOG_WARNING,
9669 				    "You must kldload t4_tom.ko before trying "
9670 				    "to enable TOE on a cxgbe interface.\n");
9671 			}
9672 			if (rc != 0)
9673 				return (rc);
9674 			KASSERT(sc->tom_softc != NULL,
9675 			    ("%s: TOM activated but softc NULL", __func__));
9676 			KASSERT(uld_active(sc, ULD_TOM),
9677 			    ("%s: TOM activated but flag not set", __func__));
9678 		}
9679 
9680 		/* Activate iWARP and iSCSI too, if the modules are loaded. */
9681 		if (!uld_active(sc, ULD_IWARP))
9682 			(void) t4_activate_uld(sc, ULD_IWARP);
9683 		if (!uld_active(sc, ULD_ISCSI))
9684 			(void) t4_activate_uld(sc, ULD_ISCSI);
9685 
9686 		pi->uld_vis++;
9687 		setbit(&sc->offload_map, pi->port_id);
9688 	} else {
9689 		pi->uld_vis--;
9690 
9691 		if (!isset(&sc->offload_map, pi->port_id) || pi->uld_vis > 0)
9692 			return (0);
9693 
9694 		KASSERT(uld_active(sc, ULD_TOM),
9695 		    ("%s: TOM never initialized?", __func__));
9696 		clrbit(&sc->offload_map, pi->port_id);
9697 	}
9698 
9699 	return (0);
9700 }
9701 
9702 /*
9703  * Add an upper layer driver to the global list.
9704  */
9705 int
9706 t4_register_uld(struct uld_info *ui)
9707 {
9708 	int rc = 0;
9709 	struct uld_info *u;
9710 
9711 	sx_xlock(&t4_uld_list_lock);
9712 	SLIST_FOREACH(u, &t4_uld_list, link) {
9713 	    if (u->uld_id == ui->uld_id) {
9714 		    rc = EEXIST;
9715 		    goto done;
9716 	    }
9717 	}
9718 
9719 	SLIST_INSERT_HEAD(&t4_uld_list, ui, link);
9720 	ui->refcount = 0;
9721 done:
9722 	sx_xunlock(&t4_uld_list_lock);
9723 	return (rc);
9724 }
9725 
9726 int
9727 t4_unregister_uld(struct uld_info *ui)
9728 {
9729 	int rc = EINVAL;
9730 	struct uld_info *u;
9731 
9732 	sx_xlock(&t4_uld_list_lock);
9733 
9734 	SLIST_FOREACH(u, &t4_uld_list, link) {
9735 	    if (u == ui) {
9736 		    if (ui->refcount > 0) {
9737 			    rc = EBUSY;
9738 			    goto done;
9739 		    }
9740 
9741 		    SLIST_REMOVE(&t4_uld_list, ui, uld_info, link);
9742 		    rc = 0;
9743 		    goto done;
9744 	    }
9745 	}
9746 done:
9747 	sx_xunlock(&t4_uld_list_lock);
9748 	return (rc);
9749 }
9750 
9751 int
9752 t4_activate_uld(struct adapter *sc, int id)
9753 {
9754 	int rc;
9755 	struct uld_info *ui;
9756 
9757 	ASSERT_SYNCHRONIZED_OP(sc);
9758 
9759 	if (id < 0 || id > ULD_MAX)
9760 		return (EINVAL);
9761 	rc = EAGAIN;	/* kldoad the module with this ULD and try again. */
9762 
9763 	sx_slock(&t4_uld_list_lock);
9764 
9765 	SLIST_FOREACH(ui, &t4_uld_list, link) {
9766 		if (ui->uld_id == id) {
9767 			if (!(sc->flags & FULL_INIT_DONE)) {
9768 				rc = adapter_full_init(sc);
9769 				if (rc != 0)
9770 					break;
9771 			}
9772 
9773 			rc = ui->activate(sc);
9774 			if (rc == 0) {
9775 				setbit(&sc->active_ulds, id);
9776 				ui->refcount++;
9777 			}
9778 			break;
9779 		}
9780 	}
9781 
9782 	sx_sunlock(&t4_uld_list_lock);
9783 
9784 	return (rc);
9785 }
9786 
9787 int
9788 t4_deactivate_uld(struct adapter *sc, int id)
9789 {
9790 	int rc;
9791 	struct uld_info *ui;
9792 
9793 	ASSERT_SYNCHRONIZED_OP(sc);
9794 
9795 	if (id < 0 || id > ULD_MAX)
9796 		return (EINVAL);
9797 	rc = ENXIO;
9798 
9799 	sx_slock(&t4_uld_list_lock);
9800 
9801 	SLIST_FOREACH(ui, &t4_uld_list, link) {
9802 		if (ui->uld_id == id) {
9803 			rc = ui->deactivate(sc);
9804 			if (rc == 0) {
9805 				clrbit(&sc->active_ulds, id);
9806 				ui->refcount--;
9807 			}
9808 			break;
9809 		}
9810 	}
9811 
9812 	sx_sunlock(&t4_uld_list_lock);
9813 
9814 	return (rc);
9815 }
9816 
9817 int
9818 uld_active(struct adapter *sc, int uld_id)
9819 {
9820 
9821 	MPASS(uld_id >= 0 && uld_id <= ULD_MAX);
9822 
9823 	return (isset(&sc->active_ulds, uld_id));
9824 }
9825 #endif
9826 
9827 /*
9828  * t  = ptr to tunable.
9829  * nc = number of CPUs.
9830  * c  = compiled in default for that tunable.
9831  */
9832 static void
9833 calculate_nqueues(int *t, int nc, const int c)
9834 {
9835 	int nq;
9836 
9837 	if (*t > 0)
9838 		return;
9839 	nq = *t < 0 ? -*t : c;
9840 	*t = min(nc, nq);
9841 }
9842 
9843 /*
9844  * Come up with reasonable defaults for some of the tunables, provided they're
9845  * not set by the user (in which case we'll use the values as is).
9846  */
9847 static void
9848 tweak_tunables(void)
9849 {
9850 	int nc = mp_ncpus;	/* our snapshot of the number of CPUs */
9851 
9852 	if (t4_ntxq < 1) {
9853 #ifdef RSS
9854 		t4_ntxq = rss_getnumbuckets();
9855 #else
9856 		calculate_nqueues(&t4_ntxq, nc, NTXQ);
9857 #endif
9858 	}
9859 
9860 	calculate_nqueues(&t4_ntxq_vi, nc, NTXQ_VI);
9861 
9862 	if (t4_nrxq < 1) {
9863 #ifdef RSS
9864 		t4_nrxq = rss_getnumbuckets();
9865 #else
9866 		calculate_nqueues(&t4_nrxq, nc, NRXQ);
9867 #endif
9868 	}
9869 
9870 	calculate_nqueues(&t4_nrxq_vi, nc, NRXQ_VI);
9871 
9872 #if defined(TCP_OFFLOAD) || defined(RATELIMIT)
9873 	calculate_nqueues(&t4_nofldtxq, nc, NOFLDTXQ);
9874 	calculate_nqueues(&t4_nofldtxq_vi, nc, NOFLDTXQ_VI);
9875 #endif
9876 #ifdef TCP_OFFLOAD
9877 	calculate_nqueues(&t4_nofldrxq, nc, NOFLDRXQ);
9878 	calculate_nqueues(&t4_nofldrxq_vi, nc, NOFLDRXQ_VI);
9879 
9880 	if (t4_toecaps_allowed == -1)
9881 		t4_toecaps_allowed = FW_CAPS_CONFIG_TOE;
9882 
9883 	if (t4_rdmacaps_allowed == -1) {
9884 		t4_rdmacaps_allowed = FW_CAPS_CONFIG_RDMA_RDDP |
9885 		    FW_CAPS_CONFIG_RDMA_RDMAC;
9886 	}
9887 
9888 	if (t4_iscsicaps_allowed == -1) {
9889 		t4_iscsicaps_allowed = FW_CAPS_CONFIG_ISCSI_INITIATOR_PDU |
9890 		    FW_CAPS_CONFIG_ISCSI_TARGET_PDU |
9891 		    FW_CAPS_CONFIG_ISCSI_T10DIF;
9892 	}
9893 
9894 	if (t4_tmr_idx_ofld < 0 || t4_tmr_idx_ofld >= SGE_NTIMERS)
9895 		t4_tmr_idx_ofld = TMR_IDX_OFLD;
9896 
9897 	if (t4_pktc_idx_ofld < -1 || t4_pktc_idx_ofld >= SGE_NCOUNTERS)
9898 		t4_pktc_idx_ofld = PKTC_IDX_OFLD;
9899 #else
9900 	if (t4_toecaps_allowed == -1)
9901 		t4_toecaps_allowed = 0;
9902 
9903 	if (t4_rdmacaps_allowed == -1)
9904 		t4_rdmacaps_allowed = 0;
9905 
9906 	if (t4_iscsicaps_allowed == -1)
9907 		t4_iscsicaps_allowed = 0;
9908 #endif
9909 
9910 #ifdef DEV_NETMAP
9911 	calculate_nqueues(&t4_nnmtxq_vi, nc, NNMTXQ_VI);
9912 	calculate_nqueues(&t4_nnmrxq_vi, nc, NNMRXQ_VI);
9913 #endif
9914 
9915 	if (t4_tmr_idx < 0 || t4_tmr_idx >= SGE_NTIMERS)
9916 		t4_tmr_idx = TMR_IDX;
9917 
9918 	if (t4_pktc_idx < -1 || t4_pktc_idx >= SGE_NCOUNTERS)
9919 		t4_pktc_idx = PKTC_IDX;
9920 
9921 	if (t4_qsize_txq < 128)
9922 		t4_qsize_txq = 128;
9923 
9924 	if (t4_qsize_rxq < 128)
9925 		t4_qsize_rxq = 128;
9926 	while (t4_qsize_rxq & 7)
9927 		t4_qsize_rxq++;
9928 
9929 	t4_intr_types &= INTR_MSIX | INTR_MSI | INTR_INTX;
9930 
9931 	/*
9932 	 * Number of VIs to create per-port.  The first VI is the "main" regular
9933 	 * VI for the port.  The rest are additional virtual interfaces on the
9934 	 * same physical port.  Note that the main VI does not have native
9935 	 * netmap support but the extra VIs do.
9936 	 *
9937 	 * Limit the number of VIs per port to the number of available
9938 	 * MAC addresses per port.
9939 	 */
9940 	if (t4_num_vis < 1)
9941 		t4_num_vis = 1;
9942 	if (t4_num_vis > nitems(vi_mac_funcs)) {
9943 		t4_num_vis = nitems(vi_mac_funcs);
9944 		printf("cxgbe: number of VIs limited to %d\n", t4_num_vis);
9945 	}
9946 
9947 	if (pcie_relaxed_ordering < 0 || pcie_relaxed_ordering > 2) {
9948 		pcie_relaxed_ordering = 1;
9949 #if defined(__i386__) || defined(__amd64__)
9950 		if (cpu_vendor_id == CPU_VENDOR_INTEL)
9951 			pcie_relaxed_ordering = 0;
9952 #endif
9953 	}
9954 }
9955 
9956 #ifdef DDB
9957 static void
9958 t4_dump_tcb(struct adapter *sc, int tid)
9959 {
9960 	uint32_t base, i, j, off, pf, reg, save, tcb_addr, win_pos;
9961 
9962 	reg = PCIE_MEM_ACCESS_REG(A_PCIE_MEM_ACCESS_OFFSET, 2);
9963 	save = t4_read_reg(sc, reg);
9964 	base = sc->memwin[2].mw_base;
9965 
9966 	/* Dump TCB for the tid */
9967 	tcb_addr = t4_read_reg(sc, A_TP_CMM_TCB_BASE);
9968 	tcb_addr += tid * TCB_SIZE;
9969 
9970 	if (is_t4(sc)) {
9971 		pf = 0;
9972 		win_pos = tcb_addr & ~0xf;	/* start must be 16B aligned */
9973 	} else {
9974 		pf = V_PFNUM(sc->pf);
9975 		win_pos = tcb_addr & ~0x7f;	/* start must be 128B aligned */
9976 	}
9977 	t4_write_reg(sc, reg, win_pos | pf);
9978 	t4_read_reg(sc, reg);
9979 
9980 	off = tcb_addr - win_pos;
9981 	for (i = 0; i < 4; i++) {
9982 		uint32_t buf[8];
9983 		for (j = 0; j < 8; j++, off += 4)
9984 			buf[j] = htonl(t4_read_reg(sc, base + off));
9985 
9986 		db_printf("%08x %08x %08x %08x %08x %08x %08x %08x\n",
9987 		    buf[0], buf[1], buf[2], buf[3], buf[4], buf[5], buf[6],
9988 		    buf[7]);
9989 	}
9990 
9991 	t4_write_reg(sc, reg, save);
9992 	t4_read_reg(sc, reg);
9993 }
9994 
9995 static void
9996 t4_dump_devlog(struct adapter *sc)
9997 {
9998 	struct devlog_params *dparams = &sc->params.devlog;
9999 	struct fw_devlog_e e;
10000 	int i, first, j, m, nentries, rc;
10001 	uint64_t ftstamp = UINT64_MAX;
10002 
10003 	if (dparams->start == 0) {
10004 		db_printf("devlog params not valid\n");
10005 		return;
10006 	}
10007 
10008 	nentries = dparams->size / sizeof(struct fw_devlog_e);
10009 	m = fwmtype_to_hwmtype(dparams->memtype);
10010 
10011 	/* Find the first entry. */
10012 	first = -1;
10013 	for (i = 0; i < nentries && !db_pager_quit; i++) {
10014 		rc = -t4_mem_read(sc, m, dparams->start + i * sizeof(e),
10015 		    sizeof(e), (void *)&e);
10016 		if (rc != 0)
10017 			break;
10018 
10019 		if (e.timestamp == 0)
10020 			break;
10021 
10022 		e.timestamp = be64toh(e.timestamp);
10023 		if (e.timestamp < ftstamp) {
10024 			ftstamp = e.timestamp;
10025 			first = i;
10026 		}
10027 	}
10028 
10029 	if (first == -1)
10030 		return;
10031 
10032 	i = first;
10033 	do {
10034 		rc = -t4_mem_read(sc, m, dparams->start + i * sizeof(e),
10035 		    sizeof(e), (void *)&e);
10036 		if (rc != 0)
10037 			return;
10038 
10039 		if (e.timestamp == 0)
10040 			return;
10041 
10042 		e.timestamp = be64toh(e.timestamp);
10043 		e.seqno = be32toh(e.seqno);
10044 		for (j = 0; j < 8; j++)
10045 			e.params[j] = be32toh(e.params[j]);
10046 
10047 		db_printf("%10d  %15ju  %8s  %8s  ",
10048 		    e.seqno, e.timestamp,
10049 		    (e.level < nitems(devlog_level_strings) ?
10050 			devlog_level_strings[e.level] : "UNKNOWN"),
10051 		    (e.facility < nitems(devlog_facility_strings) ?
10052 			devlog_facility_strings[e.facility] : "UNKNOWN"));
10053 		db_printf(e.fmt, e.params[0], e.params[1], e.params[2],
10054 		    e.params[3], e.params[4], e.params[5], e.params[6],
10055 		    e.params[7]);
10056 
10057 		if (++i == nentries)
10058 			i = 0;
10059 	} while (i != first && !db_pager_quit);
10060 }
10061 
10062 static struct command_table db_t4_table = LIST_HEAD_INITIALIZER(db_t4_table);
10063 _DB_SET(_show, t4, NULL, db_show_table, 0, &db_t4_table);
10064 
10065 DB_FUNC(devlog, db_show_devlog, db_t4_table, CS_OWN, NULL)
10066 {
10067 	device_t dev;
10068 	int t;
10069 	bool valid;
10070 
10071 	valid = false;
10072 	t = db_read_token();
10073 	if (t == tIDENT) {
10074 		dev = device_lookup_by_name(db_tok_string);
10075 		valid = true;
10076 	}
10077 	db_skip_to_eol();
10078 	if (!valid) {
10079 		db_printf("usage: show t4 devlog <nexus>\n");
10080 		return;
10081 	}
10082 
10083 	if (dev == NULL) {
10084 		db_printf("device not found\n");
10085 		return;
10086 	}
10087 
10088 	t4_dump_devlog(device_get_softc(dev));
10089 }
10090 
10091 DB_FUNC(tcb, db_show_t4tcb, db_t4_table, CS_OWN, NULL)
10092 {
10093 	device_t dev;
10094 	int radix, tid, t;
10095 	bool valid;
10096 
10097 	valid = false;
10098 	radix = db_radix;
10099 	db_radix = 10;
10100 	t = db_read_token();
10101 	if (t == tIDENT) {
10102 		dev = device_lookup_by_name(db_tok_string);
10103 		t = db_read_token();
10104 		if (t == tNUMBER) {
10105 			tid = db_tok_number;
10106 			valid = true;
10107 		}
10108 	}
10109 	db_radix = radix;
10110 	db_skip_to_eol();
10111 	if (!valid) {
10112 		db_printf("usage: show t4 tcb <nexus> <tid>\n");
10113 		return;
10114 	}
10115 
10116 	if (dev == NULL) {
10117 		db_printf("device not found\n");
10118 		return;
10119 	}
10120 	if (tid < 0) {
10121 		db_printf("invalid tid\n");
10122 		return;
10123 	}
10124 
10125 	t4_dump_tcb(device_get_softc(dev), tid);
10126 }
10127 #endif
10128 
10129 /*
10130  * Borrowed from cesa_prep_aes_key().
10131  *
10132  * NB: The crypto engine wants the words in the decryption key in reverse
10133  * order.
10134  */
10135 void
10136 t4_aes_getdeckey(void *dec_key, const void *enc_key, unsigned int kbits)
10137 {
10138 	uint32_t ek[4 * (RIJNDAEL_MAXNR + 1)];
10139 	uint32_t *dkey;
10140 	int i;
10141 
10142 	rijndaelKeySetupEnc(ek, enc_key, kbits);
10143 	dkey = dec_key;
10144 	dkey += (kbits / 8) / 4;
10145 
10146 	switch (kbits) {
10147 	case 128:
10148 		for (i = 0; i < 4; i++)
10149 			*--dkey = htobe32(ek[4 * 10 + i]);
10150 		break;
10151 	case 192:
10152 		for (i = 0; i < 2; i++)
10153 			*--dkey = htobe32(ek[4 * 11 + 2 + i]);
10154 		for (i = 0; i < 4; i++)
10155 			*--dkey = htobe32(ek[4 * 12 + i]);
10156 		break;
10157 	case 256:
10158 		for (i = 0; i < 4; i++)
10159 			*--dkey = htobe32(ek[4 * 13 + i]);
10160 		for (i = 0; i < 4; i++)
10161 			*--dkey = htobe32(ek[4 * 14 + i]);
10162 		break;
10163 	}
10164 	MPASS(dkey == dec_key);
10165 }
10166 
10167 static struct sx mlu;	/* mod load unload */
10168 SX_SYSINIT(cxgbe_mlu, &mlu, "cxgbe mod load/unload");
10169 
10170 static int
10171 mod_event(module_t mod, int cmd, void *arg)
10172 {
10173 	int rc = 0;
10174 	static int loaded = 0;
10175 
10176 	switch (cmd) {
10177 	case MOD_LOAD:
10178 		sx_xlock(&mlu);
10179 		if (loaded++ == 0) {
10180 			t4_sge_modload();
10181 			t4_register_shared_cpl_handler(CPL_SET_TCB_RPL,
10182 			    t4_filter_rpl, CPL_COOKIE_FILTER);
10183 			t4_register_shared_cpl_handler(CPL_L2T_WRITE_RPL,
10184 			    do_l2t_write_rpl, CPL_COOKIE_FILTER);
10185 			t4_register_shared_cpl_handler(CPL_ACT_OPEN_RPL,
10186 			    t4_hashfilter_ao_rpl, CPL_COOKIE_HASHFILTER);
10187 			t4_register_shared_cpl_handler(CPL_SET_TCB_RPL,
10188 			    t4_hashfilter_tcb_rpl, CPL_COOKIE_HASHFILTER);
10189 			t4_register_shared_cpl_handler(CPL_ABORT_RPL_RSS,
10190 			    t4_del_hashfilter_rpl, CPL_COOKIE_HASHFILTER);
10191 			t4_register_cpl_handler(CPL_TRACE_PKT, t4_trace_pkt);
10192 			t4_register_cpl_handler(CPL_T5_TRACE_PKT, t5_trace_pkt);
10193 			t4_register_cpl_handler(CPL_SMT_WRITE_RPL,
10194 			    do_smt_write_rpl);
10195 			sx_init(&t4_list_lock, "T4/T5 adapters");
10196 			SLIST_INIT(&t4_list);
10197 #ifdef TCP_OFFLOAD
10198 			sx_init(&t4_uld_list_lock, "T4/T5 ULDs");
10199 			SLIST_INIT(&t4_uld_list);
10200 #endif
10201 			t4_tracer_modload();
10202 			tweak_tunables();
10203 		}
10204 		sx_xunlock(&mlu);
10205 		break;
10206 
10207 	case MOD_UNLOAD:
10208 		sx_xlock(&mlu);
10209 		if (--loaded == 0) {
10210 			int tries;
10211 
10212 			sx_slock(&t4_list_lock);
10213 			if (!SLIST_EMPTY(&t4_list)) {
10214 				rc = EBUSY;
10215 				sx_sunlock(&t4_list_lock);
10216 				goto done_unload;
10217 			}
10218 #ifdef TCP_OFFLOAD
10219 			sx_slock(&t4_uld_list_lock);
10220 			if (!SLIST_EMPTY(&t4_uld_list)) {
10221 				rc = EBUSY;
10222 				sx_sunlock(&t4_uld_list_lock);
10223 				sx_sunlock(&t4_list_lock);
10224 				goto done_unload;
10225 			}
10226 #endif
10227 			tries = 0;
10228 			while (tries++ < 5 && t4_sge_extfree_refs() != 0) {
10229 				uprintf("%ju clusters with custom free routine "
10230 				    "still is use.\n", t4_sge_extfree_refs());
10231 				pause("t4unload", 2 * hz);
10232 			}
10233 #ifdef TCP_OFFLOAD
10234 			sx_sunlock(&t4_uld_list_lock);
10235 #endif
10236 			sx_sunlock(&t4_list_lock);
10237 
10238 			if (t4_sge_extfree_refs() == 0) {
10239 				t4_tracer_modunload();
10240 #ifdef TCP_OFFLOAD
10241 				sx_destroy(&t4_uld_list_lock);
10242 #endif
10243 				sx_destroy(&t4_list_lock);
10244 				t4_sge_modunload();
10245 				loaded = 0;
10246 			} else {
10247 				rc = EBUSY;
10248 				loaded++;	/* undo earlier decrement */
10249 			}
10250 		}
10251 done_unload:
10252 		sx_xunlock(&mlu);
10253 		break;
10254 	}
10255 
10256 	return (rc);
10257 }
10258 
10259 static devclass_t t4_devclass, t5_devclass, t6_devclass;
10260 static devclass_t cxgbe_devclass, cxl_devclass, cc_devclass;
10261 static devclass_t vcxgbe_devclass, vcxl_devclass, vcc_devclass;
10262 
10263 DRIVER_MODULE(t4nex, pci, t4_driver, t4_devclass, mod_event, 0);
10264 MODULE_VERSION(t4nex, 1);
10265 MODULE_DEPEND(t4nex, firmware, 1, 1, 1);
10266 #ifdef DEV_NETMAP
10267 MODULE_DEPEND(t4nex, netmap, 1, 1, 1);
10268 #endif /* DEV_NETMAP */
10269 
10270 DRIVER_MODULE(t5nex, pci, t5_driver, t5_devclass, mod_event, 0);
10271 MODULE_VERSION(t5nex, 1);
10272 MODULE_DEPEND(t5nex, firmware, 1, 1, 1);
10273 #ifdef DEV_NETMAP
10274 MODULE_DEPEND(t5nex, netmap, 1, 1, 1);
10275 #endif /* DEV_NETMAP */
10276 
10277 DRIVER_MODULE(t6nex, pci, t6_driver, t6_devclass, mod_event, 0);
10278 MODULE_VERSION(t6nex, 1);
10279 MODULE_DEPEND(t6nex, firmware, 1, 1, 1);
10280 #ifdef DEV_NETMAP
10281 MODULE_DEPEND(t6nex, netmap, 1, 1, 1);
10282 #endif /* DEV_NETMAP */
10283 
10284 DRIVER_MODULE(cxgbe, t4nex, cxgbe_driver, cxgbe_devclass, 0, 0);
10285 MODULE_VERSION(cxgbe, 1);
10286 
10287 DRIVER_MODULE(cxl, t5nex, cxl_driver, cxl_devclass, 0, 0);
10288 MODULE_VERSION(cxl, 1);
10289 
10290 DRIVER_MODULE(cc, t6nex, cc_driver, cc_devclass, 0, 0);
10291 MODULE_VERSION(cc, 1);
10292 
10293 DRIVER_MODULE(vcxgbe, cxgbe, vcxgbe_driver, vcxgbe_devclass, 0, 0);
10294 MODULE_VERSION(vcxgbe, 1);
10295 
10296 DRIVER_MODULE(vcxl, cxl, vcxl_driver, vcxl_devclass, 0, 0);
10297 MODULE_VERSION(vcxl, 1);
10298 
10299 DRIVER_MODULE(vcc, cc, vcc_driver, vcc_devclass, 0, 0);
10300 MODULE_VERSION(vcc, 1);
10301