xref: /freebsd/sys/dev/cxgbe/t4_main.c (revision 252884ae7e4760f0e3cb45fdc2fff8fb952251ae)
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
3  *
4  * Copyright (c) 2011 Chelsio Communications, Inc.
5  * All rights reserved.
6  * Written by: Navdeep Parhar <np@FreeBSD.org>
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in the
15  *    documentation and/or other materials provided with the distribution.
16  *
17  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
18  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
21  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27  * SUCH DAMAGE.
28  */
29 
30 #include <sys/cdefs.h>
31 __FBSDID("$FreeBSD$");
32 
33 #include "opt_ddb.h"
34 #include "opt_inet.h"
35 #include "opt_inet6.h"
36 #include "opt_kern_tls.h"
37 #include "opt_ratelimit.h"
38 #include "opt_rss.h"
39 
40 #include <sys/param.h>
41 #include <sys/conf.h>
42 #include <sys/priv.h>
43 #include <sys/kernel.h>
44 #include <sys/bus.h>
45 #include <sys/module.h>
46 #include <sys/malloc.h>
47 #include <sys/queue.h>
48 #include <sys/taskqueue.h>
49 #include <sys/pciio.h>
50 #include <dev/pci/pcireg.h>
51 #include <dev/pci/pcivar.h>
52 #include <dev/pci/pci_private.h>
53 #include <sys/firmware.h>
54 #include <sys/sbuf.h>
55 #include <sys/smp.h>
56 #include <sys/socket.h>
57 #include <sys/sockio.h>
58 #include <sys/sysctl.h>
59 #include <net/ethernet.h>
60 #include <net/if.h>
61 #include <net/if_types.h>
62 #include <net/if_dl.h>
63 #include <net/if_vlan_var.h>
64 #ifdef RSS
65 #include <net/rss_config.h>
66 #endif
67 #include <netinet/in.h>
68 #include <netinet/ip.h>
69 #ifdef KERN_TLS
70 #include <netinet/tcp_seq.h>
71 #endif
72 #if defined(__i386__) || defined(__amd64__)
73 #include <machine/md_var.h>
74 #include <machine/cputypes.h>
75 #include <vm/vm.h>
76 #include <vm/pmap.h>
77 #endif
78 #ifdef DDB
79 #include <ddb/ddb.h>
80 #include <ddb/db_lex.h>
81 #endif
82 
83 #include "common/common.h"
84 #include "common/t4_msg.h"
85 #include "common/t4_regs.h"
86 #include "common/t4_regs_values.h"
87 #include "cudbg/cudbg.h"
88 #include "t4_clip.h"
89 #include "t4_ioctl.h"
90 #include "t4_l2t.h"
91 #include "t4_mp_ring.h"
92 #include "t4_if.h"
93 #include "t4_smt.h"
94 
95 /* T4 bus driver interface */
96 static int t4_probe(device_t);
97 static int t4_attach(device_t);
98 static int t4_detach(device_t);
99 static int t4_child_location_str(device_t, device_t, char *, size_t);
100 static int t4_ready(device_t);
101 static int t4_read_port_device(device_t, int, device_t *);
102 static device_method_t t4_methods[] = {
103 	DEVMETHOD(device_probe,		t4_probe),
104 	DEVMETHOD(device_attach,	t4_attach),
105 	DEVMETHOD(device_detach,	t4_detach),
106 
107 	DEVMETHOD(bus_child_location_str, t4_child_location_str),
108 
109 	DEVMETHOD(t4_is_main_ready,	t4_ready),
110 	DEVMETHOD(t4_read_port_device,	t4_read_port_device),
111 
112 	DEVMETHOD_END
113 };
114 static driver_t t4_driver = {
115 	"t4nex",
116 	t4_methods,
117 	sizeof(struct adapter)
118 };
119 
120 
121 /* T4 port (cxgbe) interface */
122 static int cxgbe_probe(device_t);
123 static int cxgbe_attach(device_t);
124 static int cxgbe_detach(device_t);
125 device_method_t cxgbe_methods[] = {
126 	DEVMETHOD(device_probe,		cxgbe_probe),
127 	DEVMETHOD(device_attach,	cxgbe_attach),
128 	DEVMETHOD(device_detach,	cxgbe_detach),
129 	{ 0, 0 }
130 };
131 static driver_t cxgbe_driver = {
132 	"cxgbe",
133 	cxgbe_methods,
134 	sizeof(struct port_info)
135 };
136 
137 /* T4 VI (vcxgbe) interface */
138 static int vcxgbe_probe(device_t);
139 static int vcxgbe_attach(device_t);
140 static int vcxgbe_detach(device_t);
141 static device_method_t vcxgbe_methods[] = {
142 	DEVMETHOD(device_probe,		vcxgbe_probe),
143 	DEVMETHOD(device_attach,	vcxgbe_attach),
144 	DEVMETHOD(device_detach,	vcxgbe_detach),
145 	{ 0, 0 }
146 };
147 static driver_t vcxgbe_driver = {
148 	"vcxgbe",
149 	vcxgbe_methods,
150 	sizeof(struct vi_info)
151 };
152 
153 static d_ioctl_t t4_ioctl;
154 
155 static struct cdevsw t4_cdevsw = {
156        .d_version = D_VERSION,
157        .d_ioctl = t4_ioctl,
158        .d_name = "t4nex",
159 };
160 
161 /* T5 bus driver interface */
162 static int t5_probe(device_t);
163 static device_method_t t5_methods[] = {
164 	DEVMETHOD(device_probe,		t5_probe),
165 	DEVMETHOD(device_attach,	t4_attach),
166 	DEVMETHOD(device_detach,	t4_detach),
167 
168 	DEVMETHOD(bus_child_location_str, t4_child_location_str),
169 
170 	DEVMETHOD(t4_is_main_ready,	t4_ready),
171 	DEVMETHOD(t4_read_port_device,	t4_read_port_device),
172 
173 	DEVMETHOD_END
174 };
175 static driver_t t5_driver = {
176 	"t5nex",
177 	t5_methods,
178 	sizeof(struct adapter)
179 };
180 
181 
182 /* T5 port (cxl) interface */
183 static driver_t cxl_driver = {
184 	"cxl",
185 	cxgbe_methods,
186 	sizeof(struct port_info)
187 };
188 
189 /* T5 VI (vcxl) interface */
190 static driver_t vcxl_driver = {
191 	"vcxl",
192 	vcxgbe_methods,
193 	sizeof(struct vi_info)
194 };
195 
196 /* T6 bus driver interface */
197 static int t6_probe(device_t);
198 static device_method_t t6_methods[] = {
199 	DEVMETHOD(device_probe,		t6_probe),
200 	DEVMETHOD(device_attach,	t4_attach),
201 	DEVMETHOD(device_detach,	t4_detach),
202 
203 	DEVMETHOD(bus_child_location_str, t4_child_location_str),
204 
205 	DEVMETHOD(t4_is_main_ready,	t4_ready),
206 	DEVMETHOD(t4_read_port_device,	t4_read_port_device),
207 
208 	DEVMETHOD_END
209 };
210 static driver_t t6_driver = {
211 	"t6nex",
212 	t6_methods,
213 	sizeof(struct adapter)
214 };
215 
216 
217 /* T6 port (cc) interface */
218 static driver_t cc_driver = {
219 	"cc",
220 	cxgbe_methods,
221 	sizeof(struct port_info)
222 };
223 
224 /* T6 VI (vcc) interface */
225 static driver_t vcc_driver = {
226 	"vcc",
227 	vcxgbe_methods,
228 	sizeof(struct vi_info)
229 };
230 
231 /* ifnet interface */
232 static void cxgbe_init(void *);
233 static int cxgbe_ioctl(struct ifnet *, unsigned long, caddr_t);
234 static int cxgbe_transmit(struct ifnet *, struct mbuf *);
235 static void cxgbe_qflush(struct ifnet *);
236 #if defined(KERN_TLS) || defined(RATELIMIT)
237 static int cxgbe_snd_tag_alloc(struct ifnet *, union if_snd_tag_alloc_params *,
238     struct m_snd_tag **);
239 static int cxgbe_snd_tag_modify(struct m_snd_tag *,
240     union if_snd_tag_modify_params *);
241 static int cxgbe_snd_tag_query(struct m_snd_tag *,
242     union if_snd_tag_query_params *);
243 static void cxgbe_snd_tag_free(struct m_snd_tag *);
244 #endif
245 
246 MALLOC_DEFINE(M_CXGBE, "cxgbe", "Chelsio T4/T5 Ethernet driver and services");
247 
248 /*
249  * Correct lock order when you need to acquire multiple locks is t4_list_lock,
250  * then ADAPTER_LOCK, then t4_uld_list_lock.
251  */
252 static struct sx t4_list_lock;
253 SLIST_HEAD(, adapter) t4_list;
254 #ifdef TCP_OFFLOAD
255 static struct sx t4_uld_list_lock;
256 SLIST_HEAD(, uld_info) t4_uld_list;
257 #endif
258 
259 /*
260  * Tunables.  See tweak_tunables() too.
261  *
262  * Each tunable is set to a default value here if it's known at compile-time.
263  * Otherwise it is set to -n as an indication to tweak_tunables() that it should
264  * provide a reasonable default (upto n) when the driver is loaded.
265  *
266  * Tunables applicable to both T4 and T5 are under hw.cxgbe.  Those specific to
267  * T5 are under hw.cxl.
268  */
269 SYSCTL_NODE(_hw, OID_AUTO, cxgbe, CTLFLAG_RD | CTLFLAG_MPSAFE, 0,
270     "cxgbe(4) parameters");
271 SYSCTL_NODE(_hw, OID_AUTO, cxl, CTLFLAG_RD | CTLFLAG_MPSAFE, 0,
272     "cxgbe(4) T5+ parameters");
273 SYSCTL_NODE(_hw_cxgbe, OID_AUTO, toe, CTLFLAG_RD | CTLFLAG_MPSAFE, 0,
274     "cxgbe(4) TOE parameters");
275 
276 /*
277  * Number of queues for tx and rx, NIC and offload.
278  */
279 #define NTXQ 16
280 int t4_ntxq = -NTXQ;
281 SYSCTL_INT(_hw_cxgbe, OID_AUTO, ntxq, CTLFLAG_RDTUN, &t4_ntxq, 0,
282     "Number of TX queues per port");
283 TUNABLE_INT("hw.cxgbe.ntxq10g", &t4_ntxq);	/* Old name, undocumented */
284 
285 #define NRXQ 8
286 int t4_nrxq = -NRXQ;
287 SYSCTL_INT(_hw_cxgbe, OID_AUTO, nrxq, CTLFLAG_RDTUN, &t4_nrxq, 0,
288     "Number of RX queues per port");
289 TUNABLE_INT("hw.cxgbe.nrxq10g", &t4_nrxq);	/* Old name, undocumented */
290 
291 #define NTXQ_VI 1
292 static int t4_ntxq_vi = -NTXQ_VI;
293 SYSCTL_INT(_hw_cxgbe, OID_AUTO, ntxq_vi, CTLFLAG_RDTUN, &t4_ntxq_vi, 0,
294     "Number of TX queues per VI");
295 
296 #define NRXQ_VI 1
297 static int t4_nrxq_vi = -NRXQ_VI;
298 SYSCTL_INT(_hw_cxgbe, OID_AUTO, nrxq_vi, CTLFLAG_RDTUN, &t4_nrxq_vi, 0,
299     "Number of RX queues per VI");
300 
301 static int t4_rsrv_noflowq = 0;
302 SYSCTL_INT(_hw_cxgbe, OID_AUTO, rsrv_noflowq, CTLFLAG_RDTUN, &t4_rsrv_noflowq,
303     0, "Reserve TX queue 0 of each VI for non-flowid packets");
304 
305 #if defined(TCP_OFFLOAD) || defined(RATELIMIT)
306 #define NOFLDTXQ 8
307 static int t4_nofldtxq = -NOFLDTXQ;
308 SYSCTL_INT(_hw_cxgbe, OID_AUTO, nofldtxq, CTLFLAG_RDTUN, &t4_nofldtxq, 0,
309     "Number of offload TX queues per port");
310 
311 #define NOFLDRXQ 2
312 static int t4_nofldrxq = -NOFLDRXQ;
313 SYSCTL_INT(_hw_cxgbe, OID_AUTO, nofldrxq, CTLFLAG_RDTUN, &t4_nofldrxq, 0,
314     "Number of offload RX queues per port");
315 
316 #define NOFLDTXQ_VI 1
317 static int t4_nofldtxq_vi = -NOFLDTXQ_VI;
318 SYSCTL_INT(_hw_cxgbe, OID_AUTO, nofldtxq_vi, CTLFLAG_RDTUN, &t4_nofldtxq_vi, 0,
319     "Number of offload TX queues per VI");
320 
321 #define NOFLDRXQ_VI 1
322 static int t4_nofldrxq_vi = -NOFLDRXQ_VI;
323 SYSCTL_INT(_hw_cxgbe, OID_AUTO, nofldrxq_vi, CTLFLAG_RDTUN, &t4_nofldrxq_vi, 0,
324     "Number of offload RX queues per VI");
325 
326 #define TMR_IDX_OFLD 1
327 int t4_tmr_idx_ofld = TMR_IDX_OFLD;
328 SYSCTL_INT(_hw_cxgbe, OID_AUTO, holdoff_timer_idx_ofld, CTLFLAG_RDTUN,
329     &t4_tmr_idx_ofld, 0, "Holdoff timer index for offload queues");
330 
331 #define PKTC_IDX_OFLD (-1)
332 int t4_pktc_idx_ofld = PKTC_IDX_OFLD;
333 SYSCTL_INT(_hw_cxgbe, OID_AUTO, holdoff_pktc_idx_ofld, CTLFLAG_RDTUN,
334     &t4_pktc_idx_ofld, 0, "holdoff packet counter index for offload queues");
335 
336 /* 0 means chip/fw default, non-zero number is value in microseconds */
337 static u_long t4_toe_keepalive_idle = 0;
338 SYSCTL_ULONG(_hw_cxgbe_toe, OID_AUTO, keepalive_idle, CTLFLAG_RDTUN,
339     &t4_toe_keepalive_idle, 0, "TOE keepalive idle timer (us)");
340 
341 /* 0 means chip/fw default, non-zero number is value in microseconds */
342 static u_long t4_toe_keepalive_interval = 0;
343 SYSCTL_ULONG(_hw_cxgbe_toe, OID_AUTO, keepalive_interval, CTLFLAG_RDTUN,
344     &t4_toe_keepalive_interval, 0, "TOE keepalive interval timer (us)");
345 
346 /* 0 means chip/fw default, non-zero number is # of keepalives before abort */
347 static int t4_toe_keepalive_count = 0;
348 SYSCTL_INT(_hw_cxgbe_toe, OID_AUTO, keepalive_count, CTLFLAG_RDTUN,
349     &t4_toe_keepalive_count, 0, "Number of TOE keepalive probes before abort");
350 
351 /* 0 means chip/fw default, non-zero number is value in microseconds */
352 static u_long t4_toe_rexmt_min = 0;
353 SYSCTL_ULONG(_hw_cxgbe_toe, OID_AUTO, rexmt_min, CTLFLAG_RDTUN,
354     &t4_toe_rexmt_min, 0, "Minimum TOE retransmit interval (us)");
355 
356 /* 0 means chip/fw default, non-zero number is value in microseconds */
357 static u_long t4_toe_rexmt_max = 0;
358 SYSCTL_ULONG(_hw_cxgbe_toe, OID_AUTO, rexmt_max, CTLFLAG_RDTUN,
359     &t4_toe_rexmt_max, 0, "Maximum TOE retransmit interval (us)");
360 
361 /* 0 means chip/fw default, non-zero number is # of rexmt before abort */
362 static int t4_toe_rexmt_count = 0;
363 SYSCTL_INT(_hw_cxgbe_toe, OID_AUTO, rexmt_count, CTLFLAG_RDTUN,
364     &t4_toe_rexmt_count, 0, "Number of TOE retransmissions before abort");
365 
366 /* -1 means chip/fw default, other values are raw backoff values to use */
367 static int t4_toe_rexmt_backoff[16] = {
368 	-1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1
369 };
370 SYSCTL_NODE(_hw_cxgbe_toe, OID_AUTO, rexmt_backoff,
371     CTLFLAG_RD | CTLFLAG_MPSAFE, 0,
372     "cxgbe(4) TOE retransmit backoff values");
373 SYSCTL_INT(_hw_cxgbe_toe_rexmt_backoff, OID_AUTO, 0, CTLFLAG_RDTUN,
374     &t4_toe_rexmt_backoff[0], 0, "");
375 SYSCTL_INT(_hw_cxgbe_toe_rexmt_backoff, OID_AUTO, 1, CTLFLAG_RDTUN,
376     &t4_toe_rexmt_backoff[1], 0, "");
377 SYSCTL_INT(_hw_cxgbe_toe_rexmt_backoff, OID_AUTO, 2, CTLFLAG_RDTUN,
378     &t4_toe_rexmt_backoff[2], 0, "");
379 SYSCTL_INT(_hw_cxgbe_toe_rexmt_backoff, OID_AUTO, 3, CTLFLAG_RDTUN,
380     &t4_toe_rexmt_backoff[3], 0, "");
381 SYSCTL_INT(_hw_cxgbe_toe_rexmt_backoff, OID_AUTO, 4, CTLFLAG_RDTUN,
382     &t4_toe_rexmt_backoff[4], 0, "");
383 SYSCTL_INT(_hw_cxgbe_toe_rexmt_backoff, OID_AUTO, 5, CTLFLAG_RDTUN,
384     &t4_toe_rexmt_backoff[5], 0, "");
385 SYSCTL_INT(_hw_cxgbe_toe_rexmt_backoff, OID_AUTO, 6, CTLFLAG_RDTUN,
386     &t4_toe_rexmt_backoff[6], 0, "");
387 SYSCTL_INT(_hw_cxgbe_toe_rexmt_backoff, OID_AUTO, 7, CTLFLAG_RDTUN,
388     &t4_toe_rexmt_backoff[7], 0, "");
389 SYSCTL_INT(_hw_cxgbe_toe_rexmt_backoff, OID_AUTO, 8, CTLFLAG_RDTUN,
390     &t4_toe_rexmt_backoff[8], 0, "");
391 SYSCTL_INT(_hw_cxgbe_toe_rexmt_backoff, OID_AUTO, 9, CTLFLAG_RDTUN,
392     &t4_toe_rexmt_backoff[9], 0, "");
393 SYSCTL_INT(_hw_cxgbe_toe_rexmt_backoff, OID_AUTO, 10, CTLFLAG_RDTUN,
394     &t4_toe_rexmt_backoff[10], 0, "");
395 SYSCTL_INT(_hw_cxgbe_toe_rexmt_backoff, OID_AUTO, 11, CTLFLAG_RDTUN,
396     &t4_toe_rexmt_backoff[11], 0, "");
397 SYSCTL_INT(_hw_cxgbe_toe_rexmt_backoff, OID_AUTO, 12, CTLFLAG_RDTUN,
398     &t4_toe_rexmt_backoff[12], 0, "");
399 SYSCTL_INT(_hw_cxgbe_toe_rexmt_backoff, OID_AUTO, 13, CTLFLAG_RDTUN,
400     &t4_toe_rexmt_backoff[13], 0, "");
401 SYSCTL_INT(_hw_cxgbe_toe_rexmt_backoff, OID_AUTO, 14, CTLFLAG_RDTUN,
402     &t4_toe_rexmt_backoff[14], 0, "");
403 SYSCTL_INT(_hw_cxgbe_toe_rexmt_backoff, OID_AUTO, 15, CTLFLAG_RDTUN,
404     &t4_toe_rexmt_backoff[15], 0, "");
405 #endif
406 
407 #ifdef DEV_NETMAP
408 #define NN_MAIN_VI	(1 << 0)	/* Native netmap on the main VI */
409 #define NN_EXTRA_VI	(1 << 1)	/* Native netmap on the extra VI(s) */
410 static int t4_native_netmap = NN_EXTRA_VI;
411 SYSCTL_INT(_hw_cxgbe, OID_AUTO, native_netmap, CTLFLAG_RDTUN, &t4_native_netmap,
412     0, "Native netmap support.  bit 0 = main VI, bit 1 = extra VIs");
413 
414 #define NNMTXQ 8
415 static int t4_nnmtxq = -NNMTXQ;
416 SYSCTL_INT(_hw_cxgbe, OID_AUTO, nnmtxq, CTLFLAG_RDTUN, &t4_nnmtxq, 0,
417     "Number of netmap TX queues");
418 
419 #define NNMRXQ 8
420 static int t4_nnmrxq = -NNMRXQ;
421 SYSCTL_INT(_hw_cxgbe, OID_AUTO, nnmrxq, CTLFLAG_RDTUN, &t4_nnmrxq, 0,
422     "Number of netmap RX queues");
423 
424 #define NNMTXQ_VI 2
425 static int t4_nnmtxq_vi = -NNMTXQ_VI;
426 SYSCTL_INT(_hw_cxgbe, OID_AUTO, nnmtxq_vi, CTLFLAG_RDTUN, &t4_nnmtxq_vi, 0,
427     "Number of netmap TX queues per VI");
428 
429 #define NNMRXQ_VI 2
430 static int t4_nnmrxq_vi = -NNMRXQ_VI;
431 SYSCTL_INT(_hw_cxgbe, OID_AUTO, nnmrxq_vi, CTLFLAG_RDTUN, &t4_nnmrxq_vi, 0,
432     "Number of netmap RX queues per VI");
433 #endif
434 
435 /*
436  * Holdoff parameters for ports.
437  */
438 #define TMR_IDX 1
439 int t4_tmr_idx = TMR_IDX;
440 SYSCTL_INT(_hw_cxgbe, OID_AUTO, holdoff_timer_idx, CTLFLAG_RDTUN, &t4_tmr_idx,
441     0, "Holdoff timer index");
442 TUNABLE_INT("hw.cxgbe.holdoff_timer_idx_10G", &t4_tmr_idx);	/* Old name */
443 
444 #define PKTC_IDX (-1)
445 int t4_pktc_idx = PKTC_IDX;
446 SYSCTL_INT(_hw_cxgbe, OID_AUTO, holdoff_pktc_idx, CTLFLAG_RDTUN, &t4_pktc_idx,
447     0, "Holdoff packet counter index");
448 TUNABLE_INT("hw.cxgbe.holdoff_pktc_idx_10G", &t4_pktc_idx);	/* Old name */
449 
450 /*
451  * Size (# of entries) of each tx and rx queue.
452  */
453 unsigned int t4_qsize_txq = TX_EQ_QSIZE;
454 SYSCTL_INT(_hw_cxgbe, OID_AUTO, qsize_txq, CTLFLAG_RDTUN, &t4_qsize_txq, 0,
455     "Number of descriptors in each TX queue");
456 
457 unsigned int t4_qsize_rxq = RX_IQ_QSIZE;
458 SYSCTL_INT(_hw_cxgbe, OID_AUTO, qsize_rxq, CTLFLAG_RDTUN, &t4_qsize_rxq, 0,
459     "Number of descriptors in each RX queue");
460 
461 /*
462  * Interrupt types allowed (bits 0, 1, 2 = INTx, MSI, MSI-X respectively).
463  */
464 int t4_intr_types = INTR_MSIX | INTR_MSI | INTR_INTX;
465 SYSCTL_INT(_hw_cxgbe, OID_AUTO, interrupt_types, CTLFLAG_RDTUN, &t4_intr_types,
466     0, "Interrupt types allowed (bit 0 = INTx, 1 = MSI, 2 = MSI-X)");
467 
468 /*
469  * Configuration file.  All the _CF names here are special.
470  */
471 #define DEFAULT_CF	"default"
472 #define BUILTIN_CF	"built-in"
473 #define FLASH_CF	"flash"
474 #define UWIRE_CF	"uwire"
475 #define FPGA_CF		"fpga"
476 static char t4_cfg_file[32] = DEFAULT_CF;
477 SYSCTL_STRING(_hw_cxgbe, OID_AUTO, config_file, CTLFLAG_RDTUN, t4_cfg_file,
478     sizeof(t4_cfg_file), "Firmware configuration file");
479 
480 /*
481  * PAUSE settings (bit 0, 1, 2 = rx_pause, tx_pause, pause_autoneg respectively).
482  * rx_pause = 1 to heed incoming PAUSE frames, 0 to ignore them.
483  * tx_pause = 1 to emit PAUSE frames when the rx FIFO reaches its high water
484  *            mark or when signalled to do so, 0 to never emit PAUSE.
485  * pause_autoneg = 1 means PAUSE will be negotiated if possible and the
486  *                 negotiated settings will override rx_pause/tx_pause.
487  *                 Otherwise rx_pause/tx_pause are applied forcibly.
488  */
489 static int t4_pause_settings = PAUSE_RX | PAUSE_TX | PAUSE_AUTONEG;
490 SYSCTL_INT(_hw_cxgbe, OID_AUTO, pause_settings, CTLFLAG_RDTUN,
491     &t4_pause_settings, 0,
492     "PAUSE settings (bit 0 = rx_pause, 1 = tx_pause, 2 = pause_autoneg)");
493 
494 /*
495  * Forward Error Correction settings (bit 0, 1 = RS, BASER respectively).
496  * -1 to run with the firmware default.  Same as FEC_AUTO (bit 5)
497  *  0 to disable FEC.
498  */
499 static int t4_fec = -1;
500 SYSCTL_INT(_hw_cxgbe, OID_AUTO, fec, CTLFLAG_RDTUN, &t4_fec, 0,
501     "Forward Error Correction (bit 0 = RS, bit 1 = BASER_RS)");
502 
503 /*
504  * Link autonegotiation.
505  * -1 to run with the firmware default.
506  *  0 to disable.
507  *  1 to enable.
508  */
509 static int t4_autoneg = -1;
510 SYSCTL_INT(_hw_cxgbe, OID_AUTO, autoneg, CTLFLAG_RDTUN, &t4_autoneg, 0,
511     "Link autonegotiation");
512 
513 /*
514  * Firmware auto-install by driver during attach (0, 1, 2 = prohibited, allowed,
515  * encouraged respectively).  '-n' is the same as 'n' except the firmware
516  * version used in the checks is read from the firmware bundled with the driver.
517  */
518 static int t4_fw_install = 1;
519 SYSCTL_INT(_hw_cxgbe, OID_AUTO, fw_install, CTLFLAG_RDTUN, &t4_fw_install, 0,
520     "Firmware auto-install (0 = prohibited, 1 = allowed, 2 = encouraged)");
521 
522 /*
523  * ASIC features that will be used.  Disable the ones you don't want so that the
524  * chip resources aren't wasted on features that will not be used.
525  */
526 static int t4_nbmcaps_allowed = 0;
527 SYSCTL_INT(_hw_cxgbe, OID_AUTO, nbmcaps_allowed, CTLFLAG_RDTUN,
528     &t4_nbmcaps_allowed, 0, "Default NBM capabilities");
529 
530 static int t4_linkcaps_allowed = 0;	/* No DCBX, PPP, etc. by default */
531 SYSCTL_INT(_hw_cxgbe, OID_AUTO, linkcaps_allowed, CTLFLAG_RDTUN,
532     &t4_linkcaps_allowed, 0, "Default link capabilities");
533 
534 static int t4_switchcaps_allowed = FW_CAPS_CONFIG_SWITCH_INGRESS |
535     FW_CAPS_CONFIG_SWITCH_EGRESS;
536 SYSCTL_INT(_hw_cxgbe, OID_AUTO, switchcaps_allowed, CTLFLAG_RDTUN,
537     &t4_switchcaps_allowed, 0, "Default switch capabilities");
538 
539 #ifdef RATELIMIT
540 static int t4_niccaps_allowed = FW_CAPS_CONFIG_NIC |
541 	FW_CAPS_CONFIG_NIC_HASHFILTER | FW_CAPS_CONFIG_NIC_ETHOFLD;
542 #else
543 static int t4_niccaps_allowed = FW_CAPS_CONFIG_NIC |
544 	FW_CAPS_CONFIG_NIC_HASHFILTER;
545 #endif
546 SYSCTL_INT(_hw_cxgbe, OID_AUTO, niccaps_allowed, CTLFLAG_RDTUN,
547     &t4_niccaps_allowed, 0, "Default NIC capabilities");
548 
549 static int t4_toecaps_allowed = -1;
550 SYSCTL_INT(_hw_cxgbe, OID_AUTO, toecaps_allowed, CTLFLAG_RDTUN,
551     &t4_toecaps_allowed, 0, "Default TCP offload capabilities");
552 
553 static int t4_rdmacaps_allowed = -1;
554 SYSCTL_INT(_hw_cxgbe, OID_AUTO, rdmacaps_allowed, CTLFLAG_RDTUN,
555     &t4_rdmacaps_allowed, 0, "Default RDMA capabilities");
556 
557 static int t4_cryptocaps_allowed = -1;
558 SYSCTL_INT(_hw_cxgbe, OID_AUTO, cryptocaps_allowed, CTLFLAG_RDTUN,
559     &t4_cryptocaps_allowed, 0, "Default crypto capabilities");
560 
561 static int t4_iscsicaps_allowed = -1;
562 SYSCTL_INT(_hw_cxgbe, OID_AUTO, iscsicaps_allowed, CTLFLAG_RDTUN,
563     &t4_iscsicaps_allowed, 0, "Default iSCSI capabilities");
564 
565 static int t4_fcoecaps_allowed = 0;
566 SYSCTL_INT(_hw_cxgbe, OID_AUTO, fcoecaps_allowed, CTLFLAG_RDTUN,
567     &t4_fcoecaps_allowed, 0, "Default FCoE capabilities");
568 
569 static int t5_write_combine = 0;
570 SYSCTL_INT(_hw_cxl, OID_AUTO, write_combine, CTLFLAG_RDTUN, &t5_write_combine,
571     0, "Use WC instead of UC for BAR2");
572 
573 static int t4_num_vis = 1;
574 SYSCTL_INT(_hw_cxgbe, OID_AUTO, num_vis, CTLFLAG_RDTUN, &t4_num_vis, 0,
575     "Number of VIs per port");
576 
577 /*
578  * PCIe Relaxed Ordering.
579  * -1: driver should figure out a good value.
580  * 0: disable RO.
581  * 1: enable RO.
582  * 2: leave RO alone.
583  */
584 static int pcie_relaxed_ordering = -1;
585 SYSCTL_INT(_hw_cxgbe, OID_AUTO, pcie_relaxed_ordering, CTLFLAG_RDTUN,
586     &pcie_relaxed_ordering, 0,
587     "PCIe Relaxed Ordering: 0 = disable, 1 = enable, 2 = leave alone");
588 
589 static int t4_panic_on_fatal_err = 0;
590 SYSCTL_INT(_hw_cxgbe, OID_AUTO, panic_on_fatal_err, CTLFLAG_RDTUN,
591     &t4_panic_on_fatal_err, 0, "panic on fatal errors");
592 
593 #ifdef TCP_OFFLOAD
594 /*
595  * TOE tunables.
596  */
597 static int t4_cop_managed_offloading = 0;
598 SYSCTL_INT(_hw_cxgbe, OID_AUTO, cop_managed_offloading, CTLFLAG_RDTUN,
599     &t4_cop_managed_offloading, 0,
600     "COP (Connection Offload Policy) controls all TOE offload");
601 #endif
602 
603 #ifdef KERN_TLS
604 /*
605  * This enables KERN_TLS for all adapters if set.
606  */
607 static int t4_kern_tls = 0;
608 SYSCTL_INT(_hw_cxgbe, OID_AUTO, kern_tls, CTLFLAG_RDTUN, &t4_kern_tls, 0,
609     "Enable KERN_TLS mode for all supported adapters");
610 
611 SYSCTL_NODE(_hw_cxgbe, OID_AUTO, tls, CTLFLAG_RD | CTLFLAG_MPSAFE, 0,
612     "cxgbe(4) KERN_TLS parameters");
613 
614 static int t4_tls_inline_keys = 0;
615 SYSCTL_INT(_hw_cxgbe_tls, OID_AUTO, inline_keys, CTLFLAG_RDTUN,
616     &t4_tls_inline_keys, 0,
617     "Always pass TLS keys in work requests (1) or attempt to store TLS keys "
618     "in card memory.");
619 
620 static int t4_tls_combo_wrs = 0;
621 SYSCTL_INT(_hw_cxgbe_tls, OID_AUTO, combo_wrs, CTLFLAG_RDTUN, &t4_tls_combo_wrs,
622     0, "Attempt to combine TCB field updates with TLS record work requests.");
623 #endif
624 
625 /* Functions used by VIs to obtain unique MAC addresses for each VI. */
626 static int vi_mac_funcs[] = {
627 	FW_VI_FUNC_ETH,
628 	FW_VI_FUNC_OFLD,
629 	FW_VI_FUNC_IWARP,
630 	FW_VI_FUNC_OPENISCSI,
631 	FW_VI_FUNC_OPENFCOE,
632 	FW_VI_FUNC_FOISCSI,
633 	FW_VI_FUNC_FOFCOE,
634 };
635 
636 struct intrs_and_queues {
637 	uint16_t intr_type;	/* INTx, MSI, or MSI-X */
638 	uint16_t num_vis;	/* number of VIs for each port */
639 	uint16_t nirq;		/* Total # of vectors */
640 	uint16_t ntxq;		/* # of NIC txq's for each port */
641 	uint16_t nrxq;		/* # of NIC rxq's for each port */
642 	uint16_t nofldtxq;	/* # of TOE/ETHOFLD txq's for each port */
643 	uint16_t nofldrxq;	/* # of TOE rxq's for each port */
644 	uint16_t nnmtxq;	/* # of netmap txq's */
645 	uint16_t nnmrxq;	/* # of netmap rxq's */
646 
647 	/* The vcxgbe/vcxl interfaces use these and not the ones above. */
648 	uint16_t ntxq_vi;	/* # of NIC txq's */
649 	uint16_t nrxq_vi;	/* # of NIC rxq's */
650 	uint16_t nofldtxq_vi;	/* # of TOE txq's */
651 	uint16_t nofldrxq_vi;	/* # of TOE rxq's */
652 	uint16_t nnmtxq_vi;	/* # of netmap txq's */
653 	uint16_t nnmrxq_vi;	/* # of netmap rxq's */
654 };
655 
656 static void setup_memwin(struct adapter *);
657 static void position_memwin(struct adapter *, int, uint32_t);
658 static int validate_mem_range(struct adapter *, uint32_t, uint32_t);
659 static int fwmtype_to_hwmtype(int);
660 static int validate_mt_off_len(struct adapter *, int, uint32_t, uint32_t,
661     uint32_t *);
662 static int fixup_devlog_params(struct adapter *);
663 static int cfg_itype_and_nqueues(struct adapter *, struct intrs_and_queues *);
664 static int contact_firmware(struct adapter *);
665 static int partition_resources(struct adapter *);
666 static int get_params__pre_init(struct adapter *);
667 static int set_params__pre_init(struct adapter *);
668 static int get_params__post_init(struct adapter *);
669 static int set_params__post_init(struct adapter *);
670 static void t4_set_desc(struct adapter *);
671 static bool fixed_ifmedia(struct port_info *);
672 static void build_medialist(struct port_info *);
673 static void init_link_config(struct port_info *);
674 static int fixup_link_config(struct port_info *);
675 static int apply_link_config(struct port_info *);
676 static int cxgbe_init_synchronized(struct vi_info *);
677 static int cxgbe_uninit_synchronized(struct vi_info *);
678 static void quiesce_txq(struct adapter *, struct sge_txq *);
679 static void quiesce_wrq(struct adapter *, struct sge_wrq *);
680 static void quiesce_iq(struct adapter *, struct sge_iq *);
681 static void quiesce_fl(struct adapter *, struct sge_fl *);
682 static int t4_alloc_irq(struct adapter *, struct irq *, int rid,
683     driver_intr_t *, void *, char *);
684 static int t4_free_irq(struct adapter *, struct irq *);
685 static void t4_init_atid_table(struct adapter *);
686 static void t4_free_atid_table(struct adapter *);
687 static void get_regs(struct adapter *, struct t4_regdump *, uint8_t *);
688 static void vi_refresh_stats(struct adapter *, struct vi_info *);
689 static void cxgbe_refresh_stats(struct adapter *, struct port_info *);
690 static void cxgbe_tick(void *);
691 static void cxgbe_sysctls(struct port_info *);
692 static int sysctl_int_array(SYSCTL_HANDLER_ARGS);
693 static int sysctl_bitfield_8b(SYSCTL_HANDLER_ARGS);
694 static int sysctl_bitfield_16b(SYSCTL_HANDLER_ARGS);
695 static int sysctl_btphy(SYSCTL_HANDLER_ARGS);
696 static int sysctl_noflowq(SYSCTL_HANDLER_ARGS);
697 static int sysctl_holdoff_tmr_idx(SYSCTL_HANDLER_ARGS);
698 static int sysctl_holdoff_pktc_idx(SYSCTL_HANDLER_ARGS);
699 static int sysctl_qsize_rxq(SYSCTL_HANDLER_ARGS);
700 static int sysctl_qsize_txq(SYSCTL_HANDLER_ARGS);
701 static int sysctl_pause_settings(SYSCTL_HANDLER_ARGS);
702 static int sysctl_fec(SYSCTL_HANDLER_ARGS);
703 static int sysctl_module_fec(SYSCTL_HANDLER_ARGS);
704 static int sysctl_autoneg(SYSCTL_HANDLER_ARGS);
705 static int sysctl_handle_t4_reg64(SYSCTL_HANDLER_ARGS);
706 static int sysctl_temperature(SYSCTL_HANDLER_ARGS);
707 static int sysctl_vdd(SYSCTL_HANDLER_ARGS);
708 static int sysctl_reset_sensor(SYSCTL_HANDLER_ARGS);
709 static int sysctl_loadavg(SYSCTL_HANDLER_ARGS);
710 static int sysctl_cctrl(SYSCTL_HANDLER_ARGS);
711 static int sysctl_cim_ibq_obq(SYSCTL_HANDLER_ARGS);
712 static int sysctl_cim_la(SYSCTL_HANDLER_ARGS);
713 static int sysctl_cim_ma_la(SYSCTL_HANDLER_ARGS);
714 static int sysctl_cim_pif_la(SYSCTL_HANDLER_ARGS);
715 static int sysctl_cim_qcfg(SYSCTL_HANDLER_ARGS);
716 static int sysctl_cpl_stats(SYSCTL_HANDLER_ARGS);
717 static int sysctl_ddp_stats(SYSCTL_HANDLER_ARGS);
718 static int sysctl_devlog(SYSCTL_HANDLER_ARGS);
719 static int sysctl_fcoe_stats(SYSCTL_HANDLER_ARGS);
720 static int sysctl_hw_sched(SYSCTL_HANDLER_ARGS);
721 static int sysctl_lb_stats(SYSCTL_HANDLER_ARGS);
722 static int sysctl_linkdnrc(SYSCTL_HANDLER_ARGS);
723 static int sysctl_meminfo(SYSCTL_HANDLER_ARGS);
724 static int sysctl_mps_tcam(SYSCTL_HANDLER_ARGS);
725 static int sysctl_mps_tcam_t6(SYSCTL_HANDLER_ARGS);
726 static int sysctl_path_mtus(SYSCTL_HANDLER_ARGS);
727 static int sysctl_pm_stats(SYSCTL_HANDLER_ARGS);
728 static int sysctl_rdma_stats(SYSCTL_HANDLER_ARGS);
729 static int sysctl_tcp_stats(SYSCTL_HANDLER_ARGS);
730 static int sysctl_tids(SYSCTL_HANDLER_ARGS);
731 static int sysctl_tp_err_stats(SYSCTL_HANDLER_ARGS);
732 static int sysctl_tp_la_mask(SYSCTL_HANDLER_ARGS);
733 static int sysctl_tp_la(SYSCTL_HANDLER_ARGS);
734 static int sysctl_tx_rate(SYSCTL_HANDLER_ARGS);
735 static int sysctl_ulprx_la(SYSCTL_HANDLER_ARGS);
736 static int sysctl_wcwr_stats(SYSCTL_HANDLER_ARGS);
737 static int sysctl_cpus(SYSCTL_HANDLER_ARGS);
738 #ifdef TCP_OFFLOAD
739 static int sysctl_tls_rx_ports(SYSCTL_HANDLER_ARGS);
740 static int sysctl_tp_tick(SYSCTL_HANDLER_ARGS);
741 static int sysctl_tp_dack_timer(SYSCTL_HANDLER_ARGS);
742 static int sysctl_tp_timer(SYSCTL_HANDLER_ARGS);
743 static int sysctl_tp_shift_cnt(SYSCTL_HANDLER_ARGS);
744 static int sysctl_tp_backoff(SYSCTL_HANDLER_ARGS);
745 static int sysctl_holdoff_tmr_idx_ofld(SYSCTL_HANDLER_ARGS);
746 static int sysctl_holdoff_pktc_idx_ofld(SYSCTL_HANDLER_ARGS);
747 #endif
748 static int get_sge_context(struct adapter *, struct t4_sge_context *);
749 static int load_fw(struct adapter *, struct t4_data *);
750 static int load_cfg(struct adapter *, struct t4_data *);
751 static int load_boot(struct adapter *, struct t4_bootrom *);
752 static int load_bootcfg(struct adapter *, struct t4_data *);
753 static int cudbg_dump(struct adapter *, struct t4_cudbg_dump *);
754 static void free_offload_policy(struct t4_offload_policy *);
755 static int set_offload_policy(struct adapter *, struct t4_offload_policy *);
756 static int read_card_mem(struct adapter *, int, struct t4_mem_range *);
757 static int read_i2c(struct adapter *, struct t4_i2c_data *);
758 static int clear_stats(struct adapter *, u_int);
759 #ifdef TCP_OFFLOAD
760 static int toe_capability(struct vi_info *, int);
761 static void t4_async_event(void *, int);
762 #endif
763 static int mod_event(module_t, int, void *);
764 static int notify_siblings(device_t, int);
765 
766 struct {
767 	uint16_t device;
768 	char *desc;
769 } t4_pciids[] = {
770 	{0xa000, "Chelsio Terminator 4 FPGA"},
771 	{0x4400, "Chelsio T440-dbg"},
772 	{0x4401, "Chelsio T420-CR"},
773 	{0x4402, "Chelsio T422-CR"},
774 	{0x4403, "Chelsio T440-CR"},
775 	{0x4404, "Chelsio T420-BCH"},
776 	{0x4405, "Chelsio T440-BCH"},
777 	{0x4406, "Chelsio T440-CH"},
778 	{0x4407, "Chelsio T420-SO"},
779 	{0x4408, "Chelsio T420-CX"},
780 	{0x4409, "Chelsio T420-BT"},
781 	{0x440a, "Chelsio T404-BT"},
782 	{0x440e, "Chelsio T440-LP-CR"},
783 }, t5_pciids[] = {
784 	{0xb000, "Chelsio Terminator 5 FPGA"},
785 	{0x5400, "Chelsio T580-dbg"},
786 	{0x5401,  "Chelsio T520-CR"},		/* 2 x 10G */
787 	{0x5402,  "Chelsio T522-CR"},		/* 2 x 10G, 2 X 1G */
788 	{0x5403,  "Chelsio T540-CR"},		/* 4 x 10G */
789 	{0x5407,  "Chelsio T520-SO"},		/* 2 x 10G, nomem */
790 	{0x5409,  "Chelsio T520-BT"},		/* 2 x 10GBaseT */
791 	{0x540a,  "Chelsio T504-BT"},		/* 4 x 1G */
792 	{0x540d,  "Chelsio T580-CR"},		/* 2 x 40G */
793 	{0x540e,  "Chelsio T540-LP-CR"},	/* 4 x 10G */
794 	{0x5410,  "Chelsio T580-LP-CR"},	/* 2 x 40G */
795 	{0x5411,  "Chelsio T520-LL-CR"},	/* 2 x 10G */
796 	{0x5412,  "Chelsio T560-CR"},		/* 1 x 40G, 2 x 10G */
797 	{0x5414,  "Chelsio T580-LP-SO-CR"},	/* 2 x 40G, nomem */
798 	{0x5415,  "Chelsio T502-BT"},		/* 2 x 1G */
799 	{0x5418,  "Chelsio T540-BT"},		/* 4 x 10GBaseT */
800 	{0x5419,  "Chelsio T540-LP-BT"},	/* 4 x 10GBaseT */
801 	{0x541a,  "Chelsio T540-SO-BT"},	/* 4 x 10GBaseT, nomem */
802 	{0x541b,  "Chelsio T540-SO-CR"},	/* 4 x 10G, nomem */
803 
804 	/* Custom */
805 	{0x5483, "Custom T540-CR"},
806 	{0x5484, "Custom T540-BT"},
807 }, t6_pciids[] = {
808 	{0xc006, "Chelsio Terminator 6 FPGA"},	/* T6 PE10K6 FPGA (PF0) */
809 	{0x6400, "Chelsio T6-DBG-25"},		/* 2 x 10/25G, debug */
810 	{0x6401, "Chelsio T6225-CR"},		/* 2 x 10/25G */
811 	{0x6402, "Chelsio T6225-SO-CR"},	/* 2 x 10/25G, nomem */
812 	{0x6403, "Chelsio T6425-CR"},		/* 4 x 10/25G */
813 	{0x6404, "Chelsio T6425-SO-CR"},	/* 4 x 10/25G, nomem */
814 	{0x6405, "Chelsio T6225-OCP-SO"},	/* 2 x 10/25G, nomem */
815 	{0x6406, "Chelsio T62100-OCP-SO"},	/* 2 x 40/50/100G, nomem */
816 	{0x6407, "Chelsio T62100-LP-CR"},	/* 2 x 40/50/100G */
817 	{0x6408, "Chelsio T62100-SO-CR"},	/* 2 x 40/50/100G, nomem */
818 	{0x6409, "Chelsio T6210-BT"},		/* 2 x 10GBASE-T */
819 	{0x640d, "Chelsio T62100-CR"},		/* 2 x 40/50/100G */
820 	{0x6410, "Chelsio T6-DBG-100"},		/* 2 x 40/50/100G, debug */
821 	{0x6411, "Chelsio T6225-LL-CR"},	/* 2 x 10/25G */
822 	{0x6414, "Chelsio T61100-OCP-SO"},	/* 1 x 40/50/100G, nomem */
823 	{0x6415, "Chelsio T6201-BT"},		/* 2 x 1000BASE-T */
824 
825 	/* Custom */
826 	{0x6480, "Custom T6225-CR"},
827 	{0x6481, "Custom T62100-CR"},
828 	{0x6482, "Custom T6225-CR"},
829 	{0x6483, "Custom T62100-CR"},
830 	{0x6484, "Custom T64100-CR"},
831 	{0x6485, "Custom T6240-SO"},
832 	{0x6486, "Custom T6225-SO-CR"},
833 	{0x6487, "Custom T6225-CR"},
834 };
835 
836 #ifdef TCP_OFFLOAD
837 /*
838  * service_iq_fl() has an iq and needs the fl.  Offset of fl from the iq should
839  * be exactly the same for both rxq and ofld_rxq.
840  */
841 CTASSERT(offsetof(struct sge_ofld_rxq, iq) == offsetof(struct sge_rxq, iq));
842 CTASSERT(offsetof(struct sge_ofld_rxq, fl) == offsetof(struct sge_rxq, fl));
843 #endif
844 CTASSERT(sizeof(struct cluster_metadata) <= CL_METADATA_SIZE);
845 
846 static int
847 t4_probe(device_t dev)
848 {
849 	int i;
850 	uint16_t v = pci_get_vendor(dev);
851 	uint16_t d = pci_get_device(dev);
852 	uint8_t f = pci_get_function(dev);
853 
854 	if (v != PCI_VENDOR_ID_CHELSIO)
855 		return (ENXIO);
856 
857 	/* Attach only to PF0 of the FPGA */
858 	if (d == 0xa000 && f != 0)
859 		return (ENXIO);
860 
861 	for (i = 0; i < nitems(t4_pciids); i++) {
862 		if (d == t4_pciids[i].device) {
863 			device_set_desc(dev, t4_pciids[i].desc);
864 			return (BUS_PROBE_DEFAULT);
865 		}
866 	}
867 
868 	return (ENXIO);
869 }
870 
871 static int
872 t5_probe(device_t dev)
873 {
874 	int i;
875 	uint16_t v = pci_get_vendor(dev);
876 	uint16_t d = pci_get_device(dev);
877 	uint8_t f = pci_get_function(dev);
878 
879 	if (v != PCI_VENDOR_ID_CHELSIO)
880 		return (ENXIO);
881 
882 	/* Attach only to PF0 of the FPGA */
883 	if (d == 0xb000 && f != 0)
884 		return (ENXIO);
885 
886 	for (i = 0; i < nitems(t5_pciids); i++) {
887 		if (d == t5_pciids[i].device) {
888 			device_set_desc(dev, t5_pciids[i].desc);
889 			return (BUS_PROBE_DEFAULT);
890 		}
891 	}
892 
893 	return (ENXIO);
894 }
895 
896 static int
897 t6_probe(device_t dev)
898 {
899 	int i;
900 	uint16_t v = pci_get_vendor(dev);
901 	uint16_t d = pci_get_device(dev);
902 
903 	if (v != PCI_VENDOR_ID_CHELSIO)
904 		return (ENXIO);
905 
906 	for (i = 0; i < nitems(t6_pciids); i++) {
907 		if (d == t6_pciids[i].device) {
908 			device_set_desc(dev, t6_pciids[i].desc);
909 			return (BUS_PROBE_DEFAULT);
910 		}
911 	}
912 
913 	return (ENXIO);
914 }
915 
916 static void
917 t5_attribute_workaround(device_t dev)
918 {
919 	device_t root_port;
920 	uint32_t v;
921 
922 	/*
923 	 * The T5 chips do not properly echo the No Snoop and Relaxed
924 	 * Ordering attributes when replying to a TLP from a Root
925 	 * Port.  As a workaround, find the parent Root Port and
926 	 * disable No Snoop and Relaxed Ordering.  Note that this
927 	 * affects all devices under this root port.
928 	 */
929 	root_port = pci_find_pcie_root_port(dev);
930 	if (root_port == NULL) {
931 		device_printf(dev, "Unable to find parent root port\n");
932 		return;
933 	}
934 
935 	v = pcie_adjust_config(root_port, PCIER_DEVICE_CTL,
936 	    PCIEM_CTL_RELAXED_ORD_ENABLE | PCIEM_CTL_NOSNOOP_ENABLE, 0, 2);
937 	if ((v & (PCIEM_CTL_RELAXED_ORD_ENABLE | PCIEM_CTL_NOSNOOP_ENABLE)) !=
938 	    0)
939 		device_printf(dev, "Disabled No Snoop/Relaxed Ordering on %s\n",
940 		    device_get_nameunit(root_port));
941 }
942 
943 static const struct devnames devnames[] = {
944 	{
945 		.nexus_name = "t4nex",
946 		.ifnet_name = "cxgbe",
947 		.vi_ifnet_name = "vcxgbe",
948 		.pf03_drv_name = "t4iov",
949 		.vf_nexus_name = "t4vf",
950 		.vf_ifnet_name = "cxgbev"
951 	}, {
952 		.nexus_name = "t5nex",
953 		.ifnet_name = "cxl",
954 		.vi_ifnet_name = "vcxl",
955 		.pf03_drv_name = "t5iov",
956 		.vf_nexus_name = "t5vf",
957 		.vf_ifnet_name = "cxlv"
958 	}, {
959 		.nexus_name = "t6nex",
960 		.ifnet_name = "cc",
961 		.vi_ifnet_name = "vcc",
962 		.pf03_drv_name = "t6iov",
963 		.vf_nexus_name = "t6vf",
964 		.vf_ifnet_name = "ccv"
965 	}
966 };
967 
968 void
969 t4_init_devnames(struct adapter *sc)
970 {
971 	int id;
972 
973 	id = chip_id(sc);
974 	if (id >= CHELSIO_T4 && id - CHELSIO_T4 < nitems(devnames))
975 		sc->names = &devnames[id - CHELSIO_T4];
976 	else {
977 		device_printf(sc->dev, "chip id %d is not supported.\n", id);
978 		sc->names = NULL;
979 	}
980 }
981 
982 static int
983 t4_ifnet_unit(struct adapter *sc, struct port_info *pi)
984 {
985 	const char *parent, *name;
986 	long value;
987 	int line, unit;
988 
989 	line = 0;
990 	parent = device_get_nameunit(sc->dev);
991 	name = sc->names->ifnet_name;
992 	while (resource_find_dev(&line, name, &unit, "at", parent) == 0) {
993 		if (resource_long_value(name, unit, "port", &value) == 0 &&
994 		    value == pi->port_id)
995 			return (unit);
996 	}
997 	return (-1);
998 }
999 
1000 static int
1001 t4_attach(device_t dev)
1002 {
1003 	struct adapter *sc;
1004 	int rc = 0, i, j, rqidx, tqidx, nports;
1005 	struct make_dev_args mda;
1006 	struct intrs_and_queues iaq;
1007 	struct sge *s;
1008 	uint32_t *buf;
1009 #if defined(TCP_OFFLOAD) || defined(RATELIMIT)
1010 	int ofld_tqidx;
1011 #endif
1012 #ifdef TCP_OFFLOAD
1013 	int ofld_rqidx;
1014 #endif
1015 #ifdef DEV_NETMAP
1016 	int nm_rqidx, nm_tqidx;
1017 #endif
1018 	int num_vis;
1019 
1020 	sc = device_get_softc(dev);
1021 	sc->dev = dev;
1022 	TUNABLE_INT_FETCH("hw.cxgbe.dflags", &sc->debug_flags);
1023 
1024 	if ((pci_get_device(dev) & 0xff00) == 0x5400)
1025 		t5_attribute_workaround(dev);
1026 	pci_enable_busmaster(dev);
1027 	if (pci_find_cap(dev, PCIY_EXPRESS, &i) == 0) {
1028 		uint32_t v;
1029 
1030 		pci_set_max_read_req(dev, 4096);
1031 		v = pci_read_config(dev, i + PCIER_DEVICE_CTL, 2);
1032 		sc->params.pci.mps = 128 << ((v & PCIEM_CTL_MAX_PAYLOAD) >> 5);
1033 		if (pcie_relaxed_ordering == 0 &&
1034 		    (v & PCIEM_CTL_RELAXED_ORD_ENABLE) != 0) {
1035 			v &= ~PCIEM_CTL_RELAXED_ORD_ENABLE;
1036 			pci_write_config(dev, i + PCIER_DEVICE_CTL, v, 2);
1037 		} else if (pcie_relaxed_ordering == 1 &&
1038 		    (v & PCIEM_CTL_RELAXED_ORD_ENABLE) == 0) {
1039 			v |= PCIEM_CTL_RELAXED_ORD_ENABLE;
1040 			pci_write_config(dev, i + PCIER_DEVICE_CTL, v, 2);
1041 		}
1042 	}
1043 
1044 	sc->sge_gts_reg = MYPF_REG(A_SGE_PF_GTS);
1045 	sc->sge_kdoorbell_reg = MYPF_REG(A_SGE_PF_KDOORBELL);
1046 	sc->traceq = -1;
1047 	mtx_init(&sc->ifp_lock, sc->ifp_lockname, 0, MTX_DEF);
1048 	snprintf(sc->ifp_lockname, sizeof(sc->ifp_lockname), "%s tracer",
1049 	    device_get_nameunit(dev));
1050 
1051 	snprintf(sc->lockname, sizeof(sc->lockname), "%s",
1052 	    device_get_nameunit(dev));
1053 	mtx_init(&sc->sc_lock, sc->lockname, 0, MTX_DEF);
1054 	t4_add_adapter(sc);
1055 
1056 	mtx_init(&sc->sfl_lock, "starving freelists", 0, MTX_DEF);
1057 	TAILQ_INIT(&sc->sfl);
1058 	callout_init_mtx(&sc->sfl_callout, &sc->sfl_lock, 0);
1059 
1060 	mtx_init(&sc->reg_lock, "indirect register access", 0, MTX_DEF);
1061 
1062 	sc->policy = NULL;
1063 	rw_init(&sc->policy_lock, "connection offload policy");
1064 
1065 	callout_init(&sc->ktls_tick, 1);
1066 
1067 #ifdef TCP_OFFLOAD
1068 	TASK_INIT(&sc->async_event_task, 0, t4_async_event, sc);
1069 #endif
1070 
1071 	rc = t4_map_bars_0_and_4(sc);
1072 	if (rc != 0)
1073 		goto done; /* error message displayed already */
1074 
1075 	memset(sc->chan_map, 0xff, sizeof(sc->chan_map));
1076 
1077 	/* Prepare the adapter for operation. */
1078 	buf = malloc(PAGE_SIZE, M_CXGBE, M_ZERO | M_WAITOK);
1079 	rc = -t4_prep_adapter(sc, buf);
1080 	free(buf, M_CXGBE);
1081 	if (rc != 0) {
1082 		device_printf(dev, "failed to prepare adapter: %d.\n", rc);
1083 		goto done;
1084 	}
1085 
1086 	/*
1087 	 * This is the real PF# to which we're attaching.  Works from within PCI
1088 	 * passthrough environments too, where pci_get_function() could return a
1089 	 * different PF# depending on the passthrough configuration.  We need to
1090 	 * use the real PF# in all our communication with the firmware.
1091 	 */
1092 	j = t4_read_reg(sc, A_PL_WHOAMI);
1093 	sc->pf = chip_id(sc) <= CHELSIO_T5 ? G_SOURCEPF(j) : G_T6_SOURCEPF(j);
1094 	sc->mbox = sc->pf;
1095 
1096 	t4_init_devnames(sc);
1097 	if (sc->names == NULL) {
1098 		rc = ENOTSUP;
1099 		goto done; /* error message displayed already */
1100 	}
1101 
1102 	/*
1103 	 * Do this really early, with the memory windows set up even before the
1104 	 * character device.  The userland tool's register i/o and mem read
1105 	 * will work even in "recovery mode".
1106 	 */
1107 	setup_memwin(sc);
1108 	if (t4_init_devlog_params(sc, 0) == 0)
1109 		fixup_devlog_params(sc);
1110 	make_dev_args_init(&mda);
1111 	mda.mda_devsw = &t4_cdevsw;
1112 	mda.mda_uid = UID_ROOT;
1113 	mda.mda_gid = GID_WHEEL;
1114 	mda.mda_mode = 0600;
1115 	mda.mda_si_drv1 = sc;
1116 	rc = make_dev_s(&mda, &sc->cdev, "%s", device_get_nameunit(dev));
1117 	if (rc != 0)
1118 		device_printf(dev, "failed to create nexus char device: %d.\n",
1119 		    rc);
1120 
1121 	/* Go no further if recovery mode has been requested. */
1122 	if (TUNABLE_INT_FETCH("hw.cxgbe.sos", &i) && i != 0) {
1123 		device_printf(dev, "recovery mode.\n");
1124 		goto done;
1125 	}
1126 
1127 #if defined(__i386__)
1128 	if ((cpu_feature & CPUID_CX8) == 0) {
1129 		device_printf(dev, "64 bit atomics not available.\n");
1130 		rc = ENOTSUP;
1131 		goto done;
1132 	}
1133 #endif
1134 
1135 	/* Contact the firmware and try to become the master driver. */
1136 	rc = contact_firmware(sc);
1137 	if (rc != 0)
1138 		goto done; /* error message displayed already */
1139 	MPASS(sc->flags & FW_OK);
1140 
1141 	rc = get_params__pre_init(sc);
1142 	if (rc != 0)
1143 		goto done; /* error message displayed already */
1144 
1145 	if (sc->flags & MASTER_PF) {
1146 		rc = partition_resources(sc);
1147 		if (rc != 0)
1148 			goto done; /* error message displayed already */
1149 		t4_intr_clear(sc);
1150 	}
1151 
1152 	rc = get_params__post_init(sc);
1153 	if (rc != 0)
1154 		goto done; /* error message displayed already */
1155 
1156 	rc = set_params__post_init(sc);
1157 	if (rc != 0)
1158 		goto done; /* error message displayed already */
1159 
1160 	rc = t4_map_bar_2(sc);
1161 	if (rc != 0)
1162 		goto done; /* error message displayed already */
1163 
1164 	rc = t4_create_dma_tag(sc);
1165 	if (rc != 0)
1166 		goto done; /* error message displayed already */
1167 
1168 	/*
1169 	 * First pass over all the ports - allocate VIs and initialize some
1170 	 * basic parameters like mac address, port type, etc.
1171 	 */
1172 	for_each_port(sc, i) {
1173 		struct port_info *pi;
1174 
1175 		pi = malloc(sizeof(*pi), M_CXGBE, M_ZERO | M_WAITOK);
1176 		sc->port[i] = pi;
1177 
1178 		/* These must be set before t4_port_init */
1179 		pi->adapter = sc;
1180 		pi->port_id = i;
1181 		/*
1182 		 * XXX: vi[0] is special so we can't delay this allocation until
1183 		 * pi->nvi's final value is known.
1184 		 */
1185 		pi->vi = malloc(sizeof(struct vi_info) * t4_num_vis, M_CXGBE,
1186 		    M_ZERO | M_WAITOK);
1187 
1188 		/*
1189 		 * Allocate the "main" VI and initialize parameters
1190 		 * like mac addr.
1191 		 */
1192 		rc = -t4_port_init(sc, sc->mbox, sc->pf, 0, i);
1193 		if (rc != 0) {
1194 			device_printf(dev, "unable to initialize port %d: %d\n",
1195 			    i, rc);
1196 			free(pi->vi, M_CXGBE);
1197 			free(pi, M_CXGBE);
1198 			sc->port[i] = NULL;
1199 			goto done;
1200 		}
1201 
1202 		snprintf(pi->lockname, sizeof(pi->lockname), "%sp%d",
1203 		    device_get_nameunit(dev), i);
1204 		mtx_init(&pi->pi_lock, pi->lockname, 0, MTX_DEF);
1205 		sc->chan_map[pi->tx_chan] = i;
1206 
1207 		/* All VIs on this port share this media. */
1208 		ifmedia_init(&pi->media, IFM_IMASK, cxgbe_media_change,
1209 		    cxgbe_media_status);
1210 
1211 		PORT_LOCK(pi);
1212 		init_link_config(pi);
1213 		fixup_link_config(pi);
1214 		build_medialist(pi);
1215 		if (fixed_ifmedia(pi))
1216 			pi->flags |= FIXED_IFMEDIA;
1217 		PORT_UNLOCK(pi);
1218 
1219 		pi->dev = device_add_child(dev, sc->names->ifnet_name,
1220 		    t4_ifnet_unit(sc, pi));
1221 		if (pi->dev == NULL) {
1222 			device_printf(dev,
1223 			    "failed to add device for port %d.\n", i);
1224 			rc = ENXIO;
1225 			goto done;
1226 		}
1227 		pi->vi[0].dev = pi->dev;
1228 		device_set_softc(pi->dev, pi);
1229 	}
1230 
1231 	/*
1232 	 * Interrupt type, # of interrupts, # of rx/tx queues, etc.
1233 	 */
1234 	nports = sc->params.nports;
1235 	rc = cfg_itype_and_nqueues(sc, &iaq);
1236 	if (rc != 0)
1237 		goto done; /* error message displayed already */
1238 
1239 	num_vis = iaq.num_vis;
1240 	sc->intr_type = iaq.intr_type;
1241 	sc->intr_count = iaq.nirq;
1242 
1243 	s = &sc->sge;
1244 	s->nrxq = nports * iaq.nrxq;
1245 	s->ntxq = nports * iaq.ntxq;
1246 	if (num_vis > 1) {
1247 		s->nrxq += nports * (num_vis - 1) * iaq.nrxq_vi;
1248 		s->ntxq += nports * (num_vis - 1) * iaq.ntxq_vi;
1249 	}
1250 	s->neq = s->ntxq + s->nrxq;	/* the free list in an rxq is an eq */
1251 	s->neq += nports;		/* ctrl queues: 1 per port */
1252 	s->niq = s->nrxq + 1;		/* 1 extra for firmware event queue */
1253 #if defined(TCP_OFFLOAD) || defined(RATELIMIT)
1254 	if (is_offload(sc) || is_ethoffload(sc)) {
1255 		s->nofldtxq = nports * iaq.nofldtxq;
1256 		if (num_vis > 1)
1257 			s->nofldtxq += nports * (num_vis - 1) * iaq.nofldtxq_vi;
1258 		s->neq += s->nofldtxq;
1259 
1260 		s->ofld_txq = malloc(s->nofldtxq * sizeof(struct sge_wrq),
1261 		    M_CXGBE, M_ZERO | M_WAITOK);
1262 	}
1263 #endif
1264 #ifdef TCP_OFFLOAD
1265 	if (is_offload(sc)) {
1266 		s->nofldrxq = nports * iaq.nofldrxq;
1267 		if (num_vis > 1)
1268 			s->nofldrxq += nports * (num_vis - 1) * iaq.nofldrxq_vi;
1269 		s->neq += s->nofldrxq;	/* free list */
1270 		s->niq += s->nofldrxq;
1271 
1272 		s->ofld_rxq = malloc(s->nofldrxq * sizeof(struct sge_ofld_rxq),
1273 		    M_CXGBE, M_ZERO | M_WAITOK);
1274 	}
1275 #endif
1276 #ifdef DEV_NETMAP
1277 	s->nnmrxq = 0;
1278 	s->nnmtxq = 0;
1279 	if (t4_native_netmap & NN_MAIN_VI) {
1280 		s->nnmrxq += nports * iaq.nnmrxq;
1281 		s->nnmtxq += nports * iaq.nnmtxq;
1282 	}
1283 	if (num_vis > 1 && t4_native_netmap & NN_EXTRA_VI) {
1284 		s->nnmrxq += nports * (num_vis - 1) * iaq.nnmrxq_vi;
1285 		s->nnmtxq += nports * (num_vis - 1) * iaq.nnmtxq_vi;
1286 	}
1287 	s->neq += s->nnmtxq + s->nnmrxq;
1288 	s->niq += s->nnmrxq;
1289 
1290 	s->nm_rxq = malloc(s->nnmrxq * sizeof(struct sge_nm_rxq),
1291 	    M_CXGBE, M_ZERO | M_WAITOK);
1292 	s->nm_txq = malloc(s->nnmtxq * sizeof(struct sge_nm_txq),
1293 	    M_CXGBE, M_ZERO | M_WAITOK);
1294 #endif
1295 
1296 	s->ctrlq = malloc(nports * sizeof(struct sge_wrq), M_CXGBE,
1297 	    M_ZERO | M_WAITOK);
1298 	s->rxq = malloc(s->nrxq * sizeof(struct sge_rxq), M_CXGBE,
1299 	    M_ZERO | M_WAITOK);
1300 	s->txq = malloc(s->ntxq * sizeof(struct sge_txq), M_CXGBE,
1301 	    M_ZERO | M_WAITOK);
1302 	s->iqmap = malloc(s->niq * sizeof(struct sge_iq *), M_CXGBE,
1303 	    M_ZERO | M_WAITOK);
1304 	s->eqmap = malloc(s->neq * sizeof(struct sge_eq *), M_CXGBE,
1305 	    M_ZERO | M_WAITOK);
1306 
1307 	sc->irq = malloc(sc->intr_count * sizeof(struct irq), M_CXGBE,
1308 	    M_ZERO | M_WAITOK);
1309 
1310 	t4_init_l2t(sc, M_WAITOK);
1311 	t4_init_smt(sc, M_WAITOK);
1312 	t4_init_tx_sched(sc);
1313 	t4_init_atid_table(sc);
1314 #ifdef RATELIMIT
1315 	t4_init_etid_table(sc);
1316 #endif
1317 #ifdef INET6
1318 	t4_init_clip_table(sc);
1319 #endif
1320 	if (sc->vres.key.size != 0)
1321 		sc->key_map = vmem_create("T4TLS key map", sc->vres.key.start,
1322 		    sc->vres.key.size, 32, 0, M_FIRSTFIT | M_WAITOK);
1323 
1324 	/*
1325 	 * Second pass over the ports.  This time we know the number of rx and
1326 	 * tx queues that each port should get.
1327 	 */
1328 	rqidx = tqidx = 0;
1329 #if defined(TCP_OFFLOAD) || defined(RATELIMIT)
1330 	ofld_tqidx = 0;
1331 #endif
1332 #ifdef TCP_OFFLOAD
1333 	ofld_rqidx = 0;
1334 #endif
1335 #ifdef DEV_NETMAP
1336 	nm_rqidx = nm_tqidx = 0;
1337 #endif
1338 	for_each_port(sc, i) {
1339 		struct port_info *pi = sc->port[i];
1340 		struct vi_info *vi;
1341 
1342 		if (pi == NULL)
1343 			continue;
1344 
1345 		pi->nvi = num_vis;
1346 		for_each_vi(pi, j, vi) {
1347 			vi->pi = pi;
1348 			vi->adapter = sc;
1349 			vi->qsize_rxq = t4_qsize_rxq;
1350 			vi->qsize_txq = t4_qsize_txq;
1351 
1352 			vi->first_rxq = rqidx;
1353 			vi->first_txq = tqidx;
1354 			vi->tmr_idx = t4_tmr_idx;
1355 			vi->pktc_idx = t4_pktc_idx;
1356 			vi->nrxq = j == 0 ? iaq.nrxq : iaq.nrxq_vi;
1357 			vi->ntxq = j == 0 ? iaq.ntxq : iaq.ntxq_vi;
1358 
1359 			rqidx += vi->nrxq;
1360 			tqidx += vi->ntxq;
1361 
1362 			if (j == 0 && vi->ntxq > 1)
1363 				vi->rsrv_noflowq = t4_rsrv_noflowq ? 1 : 0;
1364 			else
1365 				vi->rsrv_noflowq = 0;
1366 
1367 #if defined(TCP_OFFLOAD) || defined(RATELIMIT)
1368 			vi->first_ofld_txq = ofld_tqidx;
1369 			vi->nofldtxq = j == 0 ? iaq.nofldtxq : iaq.nofldtxq_vi;
1370 			ofld_tqidx += vi->nofldtxq;
1371 #endif
1372 #ifdef TCP_OFFLOAD
1373 			vi->ofld_tmr_idx = t4_tmr_idx_ofld;
1374 			vi->ofld_pktc_idx = t4_pktc_idx_ofld;
1375 			vi->first_ofld_rxq = ofld_rqidx;
1376 			vi->nofldrxq = j == 0 ? iaq.nofldrxq : iaq.nofldrxq_vi;
1377 
1378 			ofld_rqidx += vi->nofldrxq;
1379 #endif
1380 #ifdef DEV_NETMAP
1381 			vi->first_nm_rxq = nm_rqidx;
1382 			vi->first_nm_txq = nm_tqidx;
1383 			if (j == 0) {
1384 				vi->nnmrxq = iaq.nnmrxq;
1385 				vi->nnmtxq = iaq.nnmtxq;
1386 			} else {
1387 				vi->nnmrxq = iaq.nnmrxq_vi;
1388 				vi->nnmtxq = iaq.nnmtxq_vi;
1389 			}
1390 			nm_rqidx += vi->nnmrxq;
1391 			nm_tqidx += vi->nnmtxq;
1392 #endif
1393 		}
1394 	}
1395 
1396 	rc = t4_setup_intr_handlers(sc);
1397 	if (rc != 0) {
1398 		device_printf(dev,
1399 		    "failed to setup interrupt handlers: %d\n", rc);
1400 		goto done;
1401 	}
1402 
1403 	rc = bus_generic_probe(dev);
1404 	if (rc != 0) {
1405 		device_printf(dev, "failed to probe child drivers: %d\n", rc);
1406 		goto done;
1407 	}
1408 
1409 	/*
1410 	 * Ensure thread-safe mailbox access (in debug builds).
1411 	 *
1412 	 * So far this was the only thread accessing the mailbox but various
1413 	 * ifnets and sysctls are about to be created and their handlers/ioctls
1414 	 * will access the mailbox from different threads.
1415 	 */
1416 	sc->flags |= CHK_MBOX_ACCESS;
1417 
1418 	rc = bus_generic_attach(dev);
1419 	if (rc != 0) {
1420 		device_printf(dev,
1421 		    "failed to attach all child ports: %d\n", rc);
1422 		goto done;
1423 	}
1424 
1425 	device_printf(dev,
1426 	    "PCIe gen%d x%d, %d ports, %d %s interrupt%s, %d eq, %d iq\n",
1427 	    sc->params.pci.speed, sc->params.pci.width, sc->params.nports,
1428 	    sc->intr_count, sc->intr_type == INTR_MSIX ? "MSI-X" :
1429 	    (sc->intr_type == INTR_MSI ? "MSI" : "INTx"),
1430 	    sc->intr_count > 1 ? "s" : "", sc->sge.neq, sc->sge.niq);
1431 
1432 	t4_set_desc(sc);
1433 
1434 	notify_siblings(dev, 0);
1435 
1436 done:
1437 	if (rc != 0 && sc->cdev) {
1438 		/* cdev was created and so cxgbetool works; recover that way. */
1439 		device_printf(dev,
1440 		    "error during attach, adapter is now in recovery mode.\n");
1441 		rc = 0;
1442 	}
1443 
1444 	if (rc != 0)
1445 		t4_detach_common(dev);
1446 	else
1447 		t4_sysctls(sc);
1448 
1449 	return (rc);
1450 }
1451 
1452 static int
1453 t4_child_location_str(device_t bus, device_t dev, char *buf, size_t buflen)
1454 {
1455 	struct adapter *sc;
1456 	struct port_info *pi;
1457 	int i;
1458 
1459 	sc = device_get_softc(bus);
1460 	buf[0] = '\0';
1461 	for_each_port(sc, i) {
1462 		pi = sc->port[i];
1463 		if (pi != NULL && pi->dev == dev) {
1464 			snprintf(buf, buflen, "port=%d", pi->port_id);
1465 			break;
1466 		}
1467 	}
1468 	return (0);
1469 }
1470 
1471 static int
1472 t4_ready(device_t dev)
1473 {
1474 	struct adapter *sc;
1475 
1476 	sc = device_get_softc(dev);
1477 	if (sc->flags & FW_OK)
1478 		return (0);
1479 	return (ENXIO);
1480 }
1481 
1482 static int
1483 t4_read_port_device(device_t dev, int port, device_t *child)
1484 {
1485 	struct adapter *sc;
1486 	struct port_info *pi;
1487 
1488 	sc = device_get_softc(dev);
1489 	if (port < 0 || port >= MAX_NPORTS)
1490 		return (EINVAL);
1491 	pi = sc->port[port];
1492 	if (pi == NULL || pi->dev == NULL)
1493 		return (ENXIO);
1494 	*child = pi->dev;
1495 	return (0);
1496 }
1497 
1498 static int
1499 notify_siblings(device_t dev, int detaching)
1500 {
1501 	device_t sibling;
1502 	int error, i;
1503 
1504 	error = 0;
1505 	for (i = 0; i < PCI_FUNCMAX; i++) {
1506 		if (i == pci_get_function(dev))
1507 			continue;
1508 		sibling = pci_find_dbsf(pci_get_domain(dev), pci_get_bus(dev),
1509 		    pci_get_slot(dev), i);
1510 		if (sibling == NULL || !device_is_attached(sibling))
1511 			continue;
1512 		if (detaching)
1513 			error = T4_DETACH_CHILD(sibling);
1514 		else
1515 			(void)T4_ATTACH_CHILD(sibling);
1516 		if (error)
1517 			break;
1518 	}
1519 	return (error);
1520 }
1521 
1522 /*
1523  * Idempotent
1524  */
1525 static int
1526 t4_detach(device_t dev)
1527 {
1528 	struct adapter *sc;
1529 	int rc;
1530 
1531 	sc = device_get_softc(dev);
1532 
1533 	rc = notify_siblings(dev, 1);
1534 	if (rc) {
1535 		device_printf(dev,
1536 		    "failed to detach sibling devices: %d\n", rc);
1537 		return (rc);
1538 	}
1539 
1540 	return (t4_detach_common(dev));
1541 }
1542 
1543 int
1544 t4_detach_common(device_t dev)
1545 {
1546 	struct adapter *sc;
1547 	struct port_info *pi;
1548 	int i, rc;
1549 
1550 	sc = device_get_softc(dev);
1551 
1552 	if (sc->cdev) {
1553 		destroy_dev(sc->cdev);
1554 		sc->cdev = NULL;
1555 	}
1556 
1557 	sx_xlock(&t4_list_lock);
1558 	SLIST_REMOVE(&t4_list, sc, adapter, link);
1559 	sx_xunlock(&t4_list_lock);
1560 
1561 	sc->flags &= ~CHK_MBOX_ACCESS;
1562 	if (sc->flags & FULL_INIT_DONE) {
1563 		if (!(sc->flags & IS_VF))
1564 			t4_intr_disable(sc);
1565 	}
1566 
1567 	if (device_is_attached(dev)) {
1568 		rc = bus_generic_detach(dev);
1569 		if (rc) {
1570 			device_printf(dev,
1571 			    "failed to detach child devices: %d\n", rc);
1572 			return (rc);
1573 		}
1574 	}
1575 
1576 #ifdef TCP_OFFLOAD
1577 	taskqueue_drain(taskqueue_thread, &sc->async_event_task);
1578 #endif
1579 
1580 	for (i = 0; i < sc->intr_count; i++)
1581 		t4_free_irq(sc, &sc->irq[i]);
1582 
1583 	if ((sc->flags & (IS_VF | FW_OK)) == FW_OK)
1584 		t4_free_tx_sched(sc);
1585 
1586 	for (i = 0; i < MAX_NPORTS; i++) {
1587 		pi = sc->port[i];
1588 		if (pi) {
1589 			t4_free_vi(sc, sc->mbox, sc->pf, 0, pi->vi[0].viid);
1590 			if (pi->dev)
1591 				device_delete_child(dev, pi->dev);
1592 
1593 			mtx_destroy(&pi->pi_lock);
1594 			free(pi->vi, M_CXGBE);
1595 			free(pi, M_CXGBE);
1596 		}
1597 	}
1598 
1599 	device_delete_children(dev);
1600 
1601 	if (sc->flags & FULL_INIT_DONE)
1602 		adapter_full_uninit(sc);
1603 
1604 	if ((sc->flags & (IS_VF | FW_OK)) == FW_OK)
1605 		t4_fw_bye(sc, sc->mbox);
1606 
1607 	if (sc->intr_type == INTR_MSI || sc->intr_type == INTR_MSIX)
1608 		pci_release_msi(dev);
1609 
1610 	if (sc->regs_res)
1611 		bus_release_resource(dev, SYS_RES_MEMORY, sc->regs_rid,
1612 		    sc->regs_res);
1613 
1614 	if (sc->udbs_res)
1615 		bus_release_resource(dev, SYS_RES_MEMORY, sc->udbs_rid,
1616 		    sc->udbs_res);
1617 
1618 	if (sc->msix_res)
1619 		bus_release_resource(dev, SYS_RES_MEMORY, sc->msix_rid,
1620 		    sc->msix_res);
1621 
1622 	if (sc->l2t)
1623 		t4_free_l2t(sc->l2t);
1624 	if (sc->smt)
1625 		t4_free_smt(sc->smt);
1626 	t4_free_atid_table(sc);
1627 #ifdef RATELIMIT
1628 	t4_free_etid_table(sc);
1629 #endif
1630 	if (sc->key_map)
1631 		vmem_destroy(sc->key_map);
1632 #ifdef INET6
1633 	t4_destroy_clip_table(sc);
1634 #endif
1635 
1636 #if defined(TCP_OFFLOAD) || defined(RATELIMIT)
1637 	free(sc->sge.ofld_txq, M_CXGBE);
1638 #endif
1639 #ifdef TCP_OFFLOAD
1640 	free(sc->sge.ofld_rxq, M_CXGBE);
1641 #endif
1642 #ifdef DEV_NETMAP
1643 	free(sc->sge.nm_rxq, M_CXGBE);
1644 	free(sc->sge.nm_txq, M_CXGBE);
1645 #endif
1646 	free(sc->irq, M_CXGBE);
1647 	free(sc->sge.rxq, M_CXGBE);
1648 	free(sc->sge.txq, M_CXGBE);
1649 	free(sc->sge.ctrlq, M_CXGBE);
1650 	free(sc->sge.iqmap, M_CXGBE);
1651 	free(sc->sge.eqmap, M_CXGBE);
1652 	free(sc->tids.ftid_tab, M_CXGBE);
1653 	free(sc->tids.hpftid_tab, M_CXGBE);
1654 	free_hftid_hash(&sc->tids);
1655 	free(sc->tids.tid_tab, M_CXGBE);
1656 	free(sc->tt.tls_rx_ports, M_CXGBE);
1657 	t4_destroy_dma_tag(sc);
1658 
1659 	callout_drain(&sc->ktls_tick);
1660 	callout_drain(&sc->sfl_callout);
1661 	if (mtx_initialized(&sc->tids.ftid_lock)) {
1662 		mtx_destroy(&sc->tids.ftid_lock);
1663 		cv_destroy(&sc->tids.ftid_cv);
1664 	}
1665 	if (mtx_initialized(&sc->tids.atid_lock))
1666 		mtx_destroy(&sc->tids.atid_lock);
1667 	if (mtx_initialized(&sc->ifp_lock))
1668 		mtx_destroy(&sc->ifp_lock);
1669 
1670 	if (rw_initialized(&sc->policy_lock)) {
1671 		rw_destroy(&sc->policy_lock);
1672 #ifdef TCP_OFFLOAD
1673 		if (sc->policy != NULL)
1674 			free_offload_policy(sc->policy);
1675 #endif
1676 	}
1677 
1678 	for (i = 0; i < NUM_MEMWIN; i++) {
1679 		struct memwin *mw = &sc->memwin[i];
1680 
1681 		if (rw_initialized(&mw->mw_lock))
1682 			rw_destroy(&mw->mw_lock);
1683 	}
1684 
1685 	mtx_destroy(&sc->sfl_lock);
1686 	mtx_destroy(&sc->reg_lock);
1687 	mtx_destroy(&sc->sc_lock);
1688 
1689 	bzero(sc, sizeof(*sc));
1690 
1691 	return (0);
1692 }
1693 
1694 static int
1695 cxgbe_probe(device_t dev)
1696 {
1697 	char buf[128];
1698 	struct port_info *pi = device_get_softc(dev);
1699 
1700 	snprintf(buf, sizeof(buf), "port %d", pi->port_id);
1701 	device_set_desc_copy(dev, buf);
1702 
1703 	return (BUS_PROBE_DEFAULT);
1704 }
1705 
1706 #define T4_CAP (IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_MTU | IFCAP_HWCSUM | \
1707     IFCAP_VLAN_HWCSUM | IFCAP_TSO | IFCAP_JUMBO_MTU | IFCAP_LRO | \
1708     IFCAP_VLAN_HWTSO | IFCAP_LINKSTATE | IFCAP_HWCSUM_IPV6 | IFCAP_HWSTATS | \
1709     IFCAP_HWRXTSTMP | IFCAP_NOMAP)
1710 #define T4_CAP_ENABLE (T4_CAP)
1711 
1712 static int
1713 cxgbe_vi_attach(device_t dev, struct vi_info *vi)
1714 {
1715 	struct ifnet *ifp;
1716 	struct sbuf *sb;
1717 	struct pfil_head_args pa;
1718 
1719 	vi->xact_addr_filt = -1;
1720 	callout_init(&vi->tick, 1);
1721 
1722 	/* Allocate an ifnet and set it up */
1723 	ifp = if_alloc_dev(IFT_ETHER, dev);
1724 	if (ifp == NULL) {
1725 		device_printf(dev, "Cannot allocate ifnet\n");
1726 		return (ENOMEM);
1727 	}
1728 	vi->ifp = ifp;
1729 	ifp->if_softc = vi;
1730 
1731 	if_initname(ifp, device_get_name(dev), device_get_unit(dev));
1732 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
1733 
1734 	ifp->if_init = cxgbe_init;
1735 	ifp->if_ioctl = cxgbe_ioctl;
1736 	ifp->if_transmit = cxgbe_transmit;
1737 	ifp->if_qflush = cxgbe_qflush;
1738 	ifp->if_get_counter = cxgbe_get_counter;
1739 #if defined(KERN_TLS) || defined(RATELIMIT)
1740 	ifp->if_snd_tag_alloc = cxgbe_snd_tag_alloc;
1741 	ifp->if_snd_tag_modify = cxgbe_snd_tag_modify;
1742 	ifp->if_snd_tag_query = cxgbe_snd_tag_query;
1743 	ifp->if_snd_tag_free = cxgbe_snd_tag_free;
1744 #endif
1745 #ifdef RATELIMIT
1746 	ifp->if_ratelimit_query = cxgbe_ratelimit_query;
1747 #endif
1748 
1749 	ifp->if_capabilities = T4_CAP;
1750 	ifp->if_capenable = T4_CAP_ENABLE;
1751 #ifdef TCP_OFFLOAD
1752 	if (vi->nofldrxq != 0 && (vi->adapter->flags & KERN_TLS_OK) == 0)
1753 		ifp->if_capabilities |= IFCAP_TOE;
1754 #endif
1755 #ifdef RATELIMIT
1756 	if (is_ethoffload(vi->adapter) && vi->nofldtxq != 0) {
1757 		ifp->if_capabilities |= IFCAP_TXRTLMT;
1758 		ifp->if_capenable |= IFCAP_TXRTLMT;
1759 	}
1760 #endif
1761 	ifp->if_hwassist = CSUM_TCP | CSUM_UDP | CSUM_IP | CSUM_TSO |
1762 	    CSUM_UDP_IPV6 | CSUM_TCP_IPV6;
1763 
1764 	ifp->if_hw_tsomax = IP_MAXPACKET;
1765 	ifp->if_hw_tsomaxsegcount = TX_SGL_SEGS_TSO;
1766 #ifdef RATELIMIT
1767 	if (is_ethoffload(vi->adapter) && vi->nofldtxq != 0)
1768 		ifp->if_hw_tsomaxsegcount = TX_SGL_SEGS_EO_TSO;
1769 #endif
1770 	ifp->if_hw_tsomaxsegsize = 65536;
1771 #ifdef KERN_TLS
1772 	if (vi->adapter->flags & KERN_TLS_OK) {
1773 		ifp->if_capabilities |= IFCAP_TXTLS;
1774 		ifp->if_capenable |= IFCAP_TXTLS;
1775 	}
1776 #endif
1777 
1778 	ether_ifattach(ifp, vi->hw_addr);
1779 #ifdef DEV_NETMAP
1780 	if (vi->nnmrxq != 0)
1781 		cxgbe_nm_attach(vi);
1782 #endif
1783 	sb = sbuf_new_auto();
1784 	sbuf_printf(sb, "%d txq, %d rxq (NIC)", vi->ntxq, vi->nrxq);
1785 #if defined(TCP_OFFLOAD) || defined(RATELIMIT)
1786 	switch (ifp->if_capabilities & (IFCAP_TOE | IFCAP_TXRTLMT)) {
1787 	case IFCAP_TOE:
1788 		sbuf_printf(sb, "; %d txq (TOE)", vi->nofldtxq);
1789 		break;
1790 	case IFCAP_TOE | IFCAP_TXRTLMT:
1791 		sbuf_printf(sb, "; %d txq (TOE/ETHOFLD)", vi->nofldtxq);
1792 		break;
1793 	case IFCAP_TXRTLMT:
1794 		sbuf_printf(sb, "; %d txq (ETHOFLD)", vi->nofldtxq);
1795 		break;
1796 	}
1797 #endif
1798 #ifdef TCP_OFFLOAD
1799 	if (ifp->if_capabilities & IFCAP_TOE)
1800 		sbuf_printf(sb, ", %d rxq (TOE)", vi->nofldrxq);
1801 #endif
1802 #ifdef DEV_NETMAP
1803 	if (ifp->if_capabilities & IFCAP_NETMAP)
1804 		sbuf_printf(sb, "; %d txq, %d rxq (netmap)",
1805 		    vi->nnmtxq, vi->nnmrxq);
1806 #endif
1807 	sbuf_finish(sb);
1808 	device_printf(dev, "%s\n", sbuf_data(sb));
1809 	sbuf_delete(sb);
1810 
1811 	vi_sysctls(vi);
1812 
1813 	pa.pa_version = PFIL_VERSION;
1814 	pa.pa_flags = PFIL_IN;
1815 	pa.pa_type = PFIL_TYPE_ETHERNET;
1816 	pa.pa_headname = ifp->if_xname;
1817 	vi->pfil = pfil_head_register(&pa);
1818 
1819 	return (0);
1820 }
1821 
1822 static int
1823 cxgbe_attach(device_t dev)
1824 {
1825 	struct port_info *pi = device_get_softc(dev);
1826 	struct adapter *sc = pi->adapter;
1827 	struct vi_info *vi;
1828 	int i, rc;
1829 
1830 	callout_init_mtx(&pi->tick, &pi->pi_lock, 0);
1831 
1832 	rc = cxgbe_vi_attach(dev, &pi->vi[0]);
1833 	if (rc)
1834 		return (rc);
1835 
1836 	for_each_vi(pi, i, vi) {
1837 		if (i == 0)
1838 			continue;
1839 		vi->dev = device_add_child(dev, sc->names->vi_ifnet_name, -1);
1840 		if (vi->dev == NULL) {
1841 			device_printf(dev, "failed to add VI %d\n", i);
1842 			continue;
1843 		}
1844 		device_set_softc(vi->dev, vi);
1845 	}
1846 
1847 	cxgbe_sysctls(pi);
1848 
1849 	bus_generic_attach(dev);
1850 
1851 	return (0);
1852 }
1853 
1854 static void
1855 cxgbe_vi_detach(struct vi_info *vi)
1856 {
1857 	struct ifnet *ifp = vi->ifp;
1858 
1859 	if (vi->pfil != NULL) {
1860 		pfil_head_unregister(vi->pfil);
1861 		vi->pfil = NULL;
1862 	}
1863 
1864 	ether_ifdetach(ifp);
1865 
1866 	/* Let detach proceed even if these fail. */
1867 #ifdef DEV_NETMAP
1868 	if (ifp->if_capabilities & IFCAP_NETMAP)
1869 		cxgbe_nm_detach(vi);
1870 #endif
1871 	cxgbe_uninit_synchronized(vi);
1872 	callout_drain(&vi->tick);
1873 	vi_full_uninit(vi);
1874 
1875 	if_free(vi->ifp);
1876 	vi->ifp = NULL;
1877 }
1878 
1879 static int
1880 cxgbe_detach(device_t dev)
1881 {
1882 	struct port_info *pi = device_get_softc(dev);
1883 	struct adapter *sc = pi->adapter;
1884 	int rc;
1885 
1886 	/* Detach the extra VIs first. */
1887 	rc = bus_generic_detach(dev);
1888 	if (rc)
1889 		return (rc);
1890 	device_delete_children(dev);
1891 
1892 	doom_vi(sc, &pi->vi[0]);
1893 
1894 	if (pi->flags & HAS_TRACEQ) {
1895 		sc->traceq = -1;	/* cloner should not create ifnet */
1896 		t4_tracer_port_detach(sc);
1897 	}
1898 
1899 	cxgbe_vi_detach(&pi->vi[0]);
1900 	callout_drain(&pi->tick);
1901 	ifmedia_removeall(&pi->media);
1902 
1903 	end_synchronized_op(sc, 0);
1904 
1905 	return (0);
1906 }
1907 
1908 static void
1909 cxgbe_init(void *arg)
1910 {
1911 	struct vi_info *vi = arg;
1912 	struct adapter *sc = vi->adapter;
1913 
1914 	if (begin_synchronized_op(sc, vi, SLEEP_OK | INTR_OK, "t4init") != 0)
1915 		return;
1916 	cxgbe_init_synchronized(vi);
1917 	end_synchronized_op(sc, 0);
1918 }
1919 
1920 static int
1921 cxgbe_ioctl(struct ifnet *ifp, unsigned long cmd, caddr_t data)
1922 {
1923 	int rc = 0, mtu, flags;
1924 	struct vi_info *vi = ifp->if_softc;
1925 	struct port_info *pi = vi->pi;
1926 	struct adapter *sc = pi->adapter;
1927 	struct ifreq *ifr = (struct ifreq *)data;
1928 	uint32_t mask;
1929 
1930 	switch (cmd) {
1931 	case SIOCSIFMTU:
1932 		mtu = ifr->ifr_mtu;
1933 		if (mtu < ETHERMIN || mtu > MAX_MTU)
1934 			return (EINVAL);
1935 
1936 		rc = begin_synchronized_op(sc, vi, SLEEP_OK | INTR_OK, "t4mtu");
1937 		if (rc)
1938 			return (rc);
1939 		ifp->if_mtu = mtu;
1940 		if (vi->flags & VI_INIT_DONE) {
1941 			t4_update_fl_bufsize(ifp);
1942 			if (ifp->if_drv_flags & IFF_DRV_RUNNING)
1943 				rc = update_mac_settings(ifp, XGMAC_MTU);
1944 		}
1945 		end_synchronized_op(sc, 0);
1946 		break;
1947 
1948 	case SIOCSIFFLAGS:
1949 		rc = begin_synchronized_op(sc, vi, SLEEP_OK | INTR_OK, "t4flg");
1950 		if (rc)
1951 			return (rc);
1952 
1953 		if (ifp->if_flags & IFF_UP) {
1954 			if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1955 				flags = vi->if_flags;
1956 				if ((ifp->if_flags ^ flags) &
1957 				    (IFF_PROMISC | IFF_ALLMULTI)) {
1958 					rc = update_mac_settings(ifp,
1959 					    XGMAC_PROMISC | XGMAC_ALLMULTI);
1960 				}
1961 			} else {
1962 				rc = cxgbe_init_synchronized(vi);
1963 			}
1964 			vi->if_flags = ifp->if_flags;
1965 		} else if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1966 			rc = cxgbe_uninit_synchronized(vi);
1967 		}
1968 		end_synchronized_op(sc, 0);
1969 		break;
1970 
1971 	case SIOCADDMULTI:
1972 	case SIOCDELMULTI:
1973 		rc = begin_synchronized_op(sc, vi, SLEEP_OK | INTR_OK, "t4multi");
1974 		if (rc)
1975 			return (rc);
1976 		if (ifp->if_drv_flags & IFF_DRV_RUNNING)
1977 			rc = update_mac_settings(ifp, XGMAC_MCADDRS);
1978 		end_synchronized_op(sc, 0);
1979 		break;
1980 
1981 	case SIOCSIFCAP:
1982 		rc = begin_synchronized_op(sc, vi, SLEEP_OK | INTR_OK, "t4cap");
1983 		if (rc)
1984 			return (rc);
1985 
1986 		mask = ifr->ifr_reqcap ^ ifp->if_capenable;
1987 		if (mask & IFCAP_TXCSUM) {
1988 			ifp->if_capenable ^= IFCAP_TXCSUM;
1989 			ifp->if_hwassist ^= (CSUM_TCP | CSUM_UDP | CSUM_IP);
1990 
1991 			if (IFCAP_TSO4 & ifp->if_capenable &&
1992 			    !(IFCAP_TXCSUM & ifp->if_capenable)) {
1993 				mask &= ~IFCAP_TSO4;
1994 				ifp->if_capenable &= ~IFCAP_TSO4;
1995 				if_printf(ifp,
1996 				    "tso4 disabled due to -txcsum.\n");
1997 			}
1998 		}
1999 		if (mask & IFCAP_TXCSUM_IPV6) {
2000 			ifp->if_capenable ^= IFCAP_TXCSUM_IPV6;
2001 			ifp->if_hwassist ^= (CSUM_UDP_IPV6 | CSUM_TCP_IPV6);
2002 
2003 			if (IFCAP_TSO6 & ifp->if_capenable &&
2004 			    !(IFCAP_TXCSUM_IPV6 & ifp->if_capenable)) {
2005 				mask &= ~IFCAP_TSO6;
2006 				ifp->if_capenable &= ~IFCAP_TSO6;
2007 				if_printf(ifp,
2008 				    "tso6 disabled due to -txcsum6.\n");
2009 			}
2010 		}
2011 		if (mask & IFCAP_RXCSUM)
2012 			ifp->if_capenable ^= IFCAP_RXCSUM;
2013 		if (mask & IFCAP_RXCSUM_IPV6)
2014 			ifp->if_capenable ^= IFCAP_RXCSUM_IPV6;
2015 
2016 		/*
2017 		 * Note that we leave CSUM_TSO alone (it is always set).  The
2018 		 * kernel takes both IFCAP_TSOx and CSUM_TSO into account before
2019 		 * sending a TSO request our way, so it's sufficient to toggle
2020 		 * IFCAP_TSOx only.
2021 		 */
2022 		if (mask & IFCAP_TSO4) {
2023 			if (!(IFCAP_TSO4 & ifp->if_capenable) &&
2024 			    !(IFCAP_TXCSUM & ifp->if_capenable)) {
2025 				if_printf(ifp, "enable txcsum first.\n");
2026 				rc = EAGAIN;
2027 				goto fail;
2028 			}
2029 			ifp->if_capenable ^= IFCAP_TSO4;
2030 		}
2031 		if (mask & IFCAP_TSO6) {
2032 			if (!(IFCAP_TSO6 & ifp->if_capenable) &&
2033 			    !(IFCAP_TXCSUM_IPV6 & ifp->if_capenable)) {
2034 				if_printf(ifp, "enable txcsum6 first.\n");
2035 				rc = EAGAIN;
2036 				goto fail;
2037 			}
2038 			ifp->if_capenable ^= IFCAP_TSO6;
2039 		}
2040 		if (mask & IFCAP_LRO) {
2041 #if defined(INET) || defined(INET6)
2042 			int i;
2043 			struct sge_rxq *rxq;
2044 
2045 			ifp->if_capenable ^= IFCAP_LRO;
2046 			for_each_rxq(vi, i, rxq) {
2047 				if (ifp->if_capenable & IFCAP_LRO)
2048 					rxq->iq.flags |= IQ_LRO_ENABLED;
2049 				else
2050 					rxq->iq.flags &= ~IQ_LRO_ENABLED;
2051 			}
2052 #endif
2053 		}
2054 #ifdef TCP_OFFLOAD
2055 		if (mask & IFCAP_TOE) {
2056 			int enable = (ifp->if_capenable ^ mask) & IFCAP_TOE;
2057 
2058 			rc = toe_capability(vi, enable);
2059 			if (rc != 0)
2060 				goto fail;
2061 
2062 			ifp->if_capenable ^= mask;
2063 		}
2064 #endif
2065 		if (mask & IFCAP_VLAN_HWTAGGING) {
2066 			ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING;
2067 			if (ifp->if_drv_flags & IFF_DRV_RUNNING)
2068 				rc = update_mac_settings(ifp, XGMAC_VLANEX);
2069 		}
2070 		if (mask & IFCAP_VLAN_MTU) {
2071 			ifp->if_capenable ^= IFCAP_VLAN_MTU;
2072 
2073 			/* Need to find out how to disable auto-mtu-inflation */
2074 		}
2075 		if (mask & IFCAP_VLAN_HWTSO)
2076 			ifp->if_capenable ^= IFCAP_VLAN_HWTSO;
2077 		if (mask & IFCAP_VLAN_HWCSUM)
2078 			ifp->if_capenable ^= IFCAP_VLAN_HWCSUM;
2079 #ifdef RATELIMIT
2080 		if (mask & IFCAP_TXRTLMT)
2081 			ifp->if_capenable ^= IFCAP_TXRTLMT;
2082 #endif
2083 		if (mask & IFCAP_HWRXTSTMP) {
2084 			int i;
2085 			struct sge_rxq *rxq;
2086 
2087 			ifp->if_capenable ^= IFCAP_HWRXTSTMP;
2088 			for_each_rxq(vi, i, rxq) {
2089 				if (ifp->if_capenable & IFCAP_HWRXTSTMP)
2090 					rxq->iq.flags |= IQ_RX_TIMESTAMP;
2091 				else
2092 					rxq->iq.flags &= ~IQ_RX_TIMESTAMP;
2093 			}
2094 		}
2095 		if (mask & IFCAP_NOMAP)
2096 			ifp->if_capenable ^= IFCAP_NOMAP;
2097 
2098 #ifdef KERN_TLS
2099 		if (mask & IFCAP_TXTLS)
2100 			ifp->if_capenable ^= (mask & IFCAP_TXTLS);
2101 #endif
2102 
2103 #ifdef VLAN_CAPABILITIES
2104 		VLAN_CAPABILITIES(ifp);
2105 #endif
2106 fail:
2107 		end_synchronized_op(sc, 0);
2108 		break;
2109 
2110 	case SIOCSIFMEDIA:
2111 	case SIOCGIFMEDIA:
2112 	case SIOCGIFXMEDIA:
2113 		ifmedia_ioctl(ifp, ifr, &pi->media, cmd);
2114 		break;
2115 
2116 	case SIOCGI2C: {
2117 		struct ifi2creq i2c;
2118 
2119 		rc = copyin(ifr_data_get_ptr(ifr), &i2c, sizeof(i2c));
2120 		if (rc != 0)
2121 			break;
2122 		if (i2c.dev_addr != 0xA0 && i2c.dev_addr != 0xA2) {
2123 			rc = EPERM;
2124 			break;
2125 		}
2126 		if (i2c.len > sizeof(i2c.data)) {
2127 			rc = EINVAL;
2128 			break;
2129 		}
2130 		rc = begin_synchronized_op(sc, vi, SLEEP_OK | INTR_OK, "t4i2c");
2131 		if (rc)
2132 			return (rc);
2133 		rc = -t4_i2c_rd(sc, sc->mbox, pi->port_id, i2c.dev_addr,
2134 		    i2c.offset, i2c.len, &i2c.data[0]);
2135 		end_synchronized_op(sc, 0);
2136 		if (rc == 0)
2137 			rc = copyout(&i2c, ifr_data_get_ptr(ifr), sizeof(i2c));
2138 		break;
2139 	}
2140 
2141 	default:
2142 		rc = ether_ioctl(ifp, cmd, data);
2143 	}
2144 
2145 	return (rc);
2146 }
2147 
2148 static int
2149 cxgbe_transmit(struct ifnet *ifp, struct mbuf *m)
2150 {
2151 	struct vi_info *vi = ifp->if_softc;
2152 	struct port_info *pi = vi->pi;
2153 	struct adapter *sc = pi->adapter;
2154 	struct sge_txq *txq;
2155 #ifdef RATELIMIT
2156 	struct cxgbe_snd_tag *cst;
2157 #endif
2158 	void *items[1];
2159 	int rc;
2160 
2161 	M_ASSERTPKTHDR(m);
2162 	MPASS(m->m_nextpkt == NULL);	/* not quite ready for this yet */
2163 #if defined(KERN_TLS) || defined(RATELIMIT)
2164 	if (m->m_pkthdr.csum_flags & CSUM_SND_TAG)
2165 		MPASS(m->m_pkthdr.snd_tag->ifp == ifp);
2166 #endif
2167 
2168 	if (__predict_false(pi->link_cfg.link_ok == false)) {
2169 		m_freem(m);
2170 		return (ENETDOWN);
2171 	}
2172 
2173 	rc = parse_pkt(sc, &m);
2174 	if (__predict_false(rc != 0)) {
2175 		MPASS(m == NULL);			/* was freed already */
2176 		atomic_add_int(&pi->tx_parse_error, 1);	/* rare, atomic is ok */
2177 		return (rc);
2178 	}
2179 #ifdef RATELIMIT
2180 	if (m->m_pkthdr.csum_flags & CSUM_SND_TAG) {
2181 		cst = mst_to_cst(m->m_pkthdr.snd_tag);
2182 		if (cst->type == IF_SND_TAG_TYPE_RATE_LIMIT)
2183 			return (ethofld_transmit(ifp, m));
2184 	}
2185 #endif
2186 
2187 	/* Select a txq. */
2188 	txq = &sc->sge.txq[vi->first_txq];
2189 	if (M_HASHTYPE_GET(m) != M_HASHTYPE_NONE)
2190 		txq += ((m->m_pkthdr.flowid % (vi->ntxq - vi->rsrv_noflowq)) +
2191 		    vi->rsrv_noflowq);
2192 
2193 	items[0] = m;
2194 	rc = mp_ring_enqueue(txq->r, items, 1, 4096);
2195 	if (__predict_false(rc != 0))
2196 		m_freem(m);
2197 
2198 	return (rc);
2199 }
2200 
2201 static void
2202 cxgbe_qflush(struct ifnet *ifp)
2203 {
2204 	struct vi_info *vi = ifp->if_softc;
2205 	struct sge_txq *txq;
2206 	int i;
2207 
2208 	/* queues do not exist if !VI_INIT_DONE. */
2209 	if (vi->flags & VI_INIT_DONE) {
2210 		for_each_txq(vi, i, txq) {
2211 			TXQ_LOCK(txq);
2212 			txq->eq.flags |= EQ_QFLUSH;
2213 			TXQ_UNLOCK(txq);
2214 			while (!mp_ring_is_idle(txq->r)) {
2215 				mp_ring_check_drainage(txq->r, 0);
2216 				pause("qflush", 1);
2217 			}
2218 			TXQ_LOCK(txq);
2219 			txq->eq.flags &= ~EQ_QFLUSH;
2220 			TXQ_UNLOCK(txq);
2221 		}
2222 	}
2223 	if_qflush(ifp);
2224 }
2225 
2226 static uint64_t
2227 vi_get_counter(struct ifnet *ifp, ift_counter c)
2228 {
2229 	struct vi_info *vi = ifp->if_softc;
2230 	struct fw_vi_stats_vf *s = &vi->stats;
2231 
2232 	vi_refresh_stats(vi->adapter, vi);
2233 
2234 	switch (c) {
2235 	case IFCOUNTER_IPACKETS:
2236 		return (s->rx_bcast_frames + s->rx_mcast_frames +
2237 		    s->rx_ucast_frames);
2238 	case IFCOUNTER_IERRORS:
2239 		return (s->rx_err_frames);
2240 	case IFCOUNTER_OPACKETS:
2241 		return (s->tx_bcast_frames + s->tx_mcast_frames +
2242 		    s->tx_ucast_frames + s->tx_offload_frames);
2243 	case IFCOUNTER_OERRORS:
2244 		return (s->tx_drop_frames);
2245 	case IFCOUNTER_IBYTES:
2246 		return (s->rx_bcast_bytes + s->rx_mcast_bytes +
2247 		    s->rx_ucast_bytes);
2248 	case IFCOUNTER_OBYTES:
2249 		return (s->tx_bcast_bytes + s->tx_mcast_bytes +
2250 		    s->tx_ucast_bytes + s->tx_offload_bytes);
2251 	case IFCOUNTER_IMCASTS:
2252 		return (s->rx_mcast_frames);
2253 	case IFCOUNTER_OMCASTS:
2254 		return (s->tx_mcast_frames);
2255 	case IFCOUNTER_OQDROPS: {
2256 		uint64_t drops;
2257 
2258 		drops = 0;
2259 		if (vi->flags & VI_INIT_DONE) {
2260 			int i;
2261 			struct sge_txq *txq;
2262 
2263 			for_each_txq(vi, i, txq)
2264 				drops += counter_u64_fetch(txq->r->drops);
2265 		}
2266 
2267 		return (drops);
2268 
2269 	}
2270 
2271 	default:
2272 		return (if_get_counter_default(ifp, c));
2273 	}
2274 }
2275 
2276 uint64_t
2277 cxgbe_get_counter(struct ifnet *ifp, ift_counter c)
2278 {
2279 	struct vi_info *vi = ifp->if_softc;
2280 	struct port_info *pi = vi->pi;
2281 	struct adapter *sc = pi->adapter;
2282 	struct port_stats *s = &pi->stats;
2283 
2284 	if (pi->nvi > 1 || sc->flags & IS_VF)
2285 		return (vi_get_counter(ifp, c));
2286 
2287 	cxgbe_refresh_stats(sc, pi);
2288 
2289 	switch (c) {
2290 	case IFCOUNTER_IPACKETS:
2291 		return (s->rx_frames);
2292 
2293 	case IFCOUNTER_IERRORS:
2294 		return (s->rx_jabber + s->rx_runt + s->rx_too_long +
2295 		    s->rx_fcs_err + s->rx_len_err);
2296 
2297 	case IFCOUNTER_OPACKETS:
2298 		return (s->tx_frames);
2299 
2300 	case IFCOUNTER_OERRORS:
2301 		return (s->tx_error_frames);
2302 
2303 	case IFCOUNTER_IBYTES:
2304 		return (s->rx_octets);
2305 
2306 	case IFCOUNTER_OBYTES:
2307 		return (s->tx_octets);
2308 
2309 	case IFCOUNTER_IMCASTS:
2310 		return (s->rx_mcast_frames);
2311 
2312 	case IFCOUNTER_OMCASTS:
2313 		return (s->tx_mcast_frames);
2314 
2315 	case IFCOUNTER_IQDROPS:
2316 		return (s->rx_ovflow0 + s->rx_ovflow1 + s->rx_ovflow2 +
2317 		    s->rx_ovflow3 + s->rx_trunc0 + s->rx_trunc1 + s->rx_trunc2 +
2318 		    s->rx_trunc3 + pi->tnl_cong_drops);
2319 
2320 	case IFCOUNTER_OQDROPS: {
2321 		uint64_t drops;
2322 
2323 		drops = s->tx_drop;
2324 		if (vi->flags & VI_INIT_DONE) {
2325 			int i;
2326 			struct sge_txq *txq;
2327 
2328 			for_each_txq(vi, i, txq)
2329 				drops += counter_u64_fetch(txq->r->drops);
2330 		}
2331 
2332 		return (drops);
2333 
2334 	}
2335 
2336 	default:
2337 		return (if_get_counter_default(ifp, c));
2338 	}
2339 }
2340 
2341 #if defined(KERN_TLS) || defined(RATELIMIT)
2342 void
2343 cxgbe_snd_tag_init(struct cxgbe_snd_tag *cst, struct ifnet *ifp, int type)
2344 {
2345 
2346 	m_snd_tag_init(&cst->com, ifp);
2347 	cst->type = type;
2348 }
2349 
2350 static int
2351 cxgbe_snd_tag_alloc(struct ifnet *ifp, union if_snd_tag_alloc_params *params,
2352     struct m_snd_tag **pt)
2353 {
2354 	int error;
2355 
2356 	switch (params->hdr.type) {
2357 #ifdef RATELIMIT
2358 	case IF_SND_TAG_TYPE_RATE_LIMIT:
2359 		error = cxgbe_rate_tag_alloc(ifp, params, pt);
2360 		break;
2361 #endif
2362 #ifdef KERN_TLS
2363 	case IF_SND_TAG_TYPE_TLS:
2364 		error = cxgbe_tls_tag_alloc(ifp, params, pt);
2365 		break;
2366 #endif
2367 	default:
2368 		error = EOPNOTSUPP;
2369 	}
2370 	if (error == 0)
2371 		MPASS(mst_to_cst(*pt)->type == params->hdr.type);
2372 	return (error);
2373 }
2374 
2375 static int
2376 cxgbe_snd_tag_modify(struct m_snd_tag *mst,
2377     union if_snd_tag_modify_params *params)
2378 {
2379 	struct cxgbe_snd_tag *cst;
2380 
2381 	cst = mst_to_cst(mst);
2382 	switch (cst->type) {
2383 #ifdef RATELIMIT
2384 	case IF_SND_TAG_TYPE_RATE_LIMIT:
2385 		return (cxgbe_rate_tag_modify(mst, params));
2386 #endif
2387 	default:
2388 		return (EOPNOTSUPP);
2389 	}
2390 }
2391 
2392 static int
2393 cxgbe_snd_tag_query(struct m_snd_tag *mst,
2394     union if_snd_tag_query_params *params)
2395 {
2396 	struct cxgbe_snd_tag *cst;
2397 
2398 	cst = mst_to_cst(mst);
2399 	switch (cst->type) {
2400 #ifdef RATELIMIT
2401 	case IF_SND_TAG_TYPE_RATE_LIMIT:
2402 		return (cxgbe_rate_tag_query(mst, params));
2403 #endif
2404 	default:
2405 		return (EOPNOTSUPP);
2406 	}
2407 }
2408 
2409 static void
2410 cxgbe_snd_tag_free(struct m_snd_tag *mst)
2411 {
2412 	struct cxgbe_snd_tag *cst;
2413 
2414 	cst = mst_to_cst(mst);
2415 	switch (cst->type) {
2416 #ifdef RATELIMIT
2417 	case IF_SND_TAG_TYPE_RATE_LIMIT:
2418 		cxgbe_rate_tag_free(mst);
2419 		return;
2420 #endif
2421 #ifdef KERN_TLS
2422 	case IF_SND_TAG_TYPE_TLS:
2423 		cxgbe_tls_tag_free(mst);
2424 		return;
2425 #endif
2426 	default:
2427 		panic("shouldn't get here");
2428 	}
2429 }
2430 #endif
2431 
2432 /*
2433  * The kernel picks a media from the list we had provided but we still validate
2434  * the requeste.
2435  */
2436 int
2437 cxgbe_media_change(struct ifnet *ifp)
2438 {
2439 	struct vi_info *vi = ifp->if_softc;
2440 	struct port_info *pi = vi->pi;
2441 	struct ifmedia *ifm = &pi->media;
2442 	struct link_config *lc = &pi->link_cfg;
2443 	struct adapter *sc = pi->adapter;
2444 	int rc;
2445 
2446 	rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4mec");
2447 	if (rc != 0)
2448 		return (rc);
2449 	PORT_LOCK(pi);
2450 	if (IFM_SUBTYPE(ifm->ifm_media) == IFM_AUTO) {
2451 		/* ifconfig .. media autoselect */
2452 		if (!(lc->pcaps & FW_PORT_CAP32_ANEG)) {
2453 			rc = ENOTSUP; /* AN not supported by transceiver */
2454 			goto done;
2455 		}
2456 		lc->requested_aneg = AUTONEG_ENABLE;
2457 		lc->requested_speed = 0;
2458 		lc->requested_fc |= PAUSE_AUTONEG;
2459 	} else {
2460 		lc->requested_aneg = AUTONEG_DISABLE;
2461 		lc->requested_speed =
2462 		    ifmedia_baudrate(ifm->ifm_media) / 1000000;
2463 		lc->requested_fc = 0;
2464 		if (IFM_OPTIONS(ifm->ifm_media) & IFM_ETH_RXPAUSE)
2465 			lc->requested_fc |= PAUSE_RX;
2466 		if (IFM_OPTIONS(ifm->ifm_media) & IFM_ETH_TXPAUSE)
2467 			lc->requested_fc |= PAUSE_TX;
2468 	}
2469 	if (pi->up_vis > 0) {
2470 		fixup_link_config(pi);
2471 		rc = apply_link_config(pi);
2472 	}
2473 done:
2474 	PORT_UNLOCK(pi);
2475 	end_synchronized_op(sc, 0);
2476 	return (rc);
2477 }
2478 
2479 /*
2480  * Base media word (without ETHER, pause, link active, etc.) for the port at the
2481  * given speed.
2482  */
2483 static int
2484 port_mword(struct port_info *pi, uint32_t speed)
2485 {
2486 
2487 	MPASS(speed & M_FW_PORT_CAP32_SPEED);
2488 	MPASS(powerof2(speed));
2489 
2490 	switch(pi->port_type) {
2491 	case FW_PORT_TYPE_BT_SGMII:
2492 	case FW_PORT_TYPE_BT_XFI:
2493 	case FW_PORT_TYPE_BT_XAUI:
2494 		/* BaseT */
2495 		switch (speed) {
2496 		case FW_PORT_CAP32_SPEED_100M:
2497 			return (IFM_100_T);
2498 		case FW_PORT_CAP32_SPEED_1G:
2499 			return (IFM_1000_T);
2500 		case FW_PORT_CAP32_SPEED_10G:
2501 			return (IFM_10G_T);
2502 		}
2503 		break;
2504 	case FW_PORT_TYPE_KX4:
2505 		if (speed == FW_PORT_CAP32_SPEED_10G)
2506 			return (IFM_10G_KX4);
2507 		break;
2508 	case FW_PORT_TYPE_CX4:
2509 		if (speed == FW_PORT_CAP32_SPEED_10G)
2510 			return (IFM_10G_CX4);
2511 		break;
2512 	case FW_PORT_TYPE_KX:
2513 		if (speed == FW_PORT_CAP32_SPEED_1G)
2514 			return (IFM_1000_KX);
2515 		break;
2516 	case FW_PORT_TYPE_KR:
2517 	case FW_PORT_TYPE_BP_AP:
2518 	case FW_PORT_TYPE_BP4_AP:
2519 	case FW_PORT_TYPE_BP40_BA:
2520 	case FW_PORT_TYPE_KR4_100G:
2521 	case FW_PORT_TYPE_KR_SFP28:
2522 	case FW_PORT_TYPE_KR_XLAUI:
2523 		switch (speed) {
2524 		case FW_PORT_CAP32_SPEED_1G:
2525 			return (IFM_1000_KX);
2526 		case FW_PORT_CAP32_SPEED_10G:
2527 			return (IFM_10G_KR);
2528 		case FW_PORT_CAP32_SPEED_25G:
2529 			return (IFM_25G_KR);
2530 		case FW_PORT_CAP32_SPEED_40G:
2531 			return (IFM_40G_KR4);
2532 		case FW_PORT_CAP32_SPEED_50G:
2533 			return (IFM_50G_KR2);
2534 		case FW_PORT_CAP32_SPEED_100G:
2535 			return (IFM_100G_KR4);
2536 		}
2537 		break;
2538 	case FW_PORT_TYPE_FIBER_XFI:
2539 	case FW_PORT_TYPE_FIBER_XAUI:
2540 	case FW_PORT_TYPE_SFP:
2541 	case FW_PORT_TYPE_QSFP_10G:
2542 	case FW_PORT_TYPE_QSA:
2543 	case FW_PORT_TYPE_QSFP:
2544 	case FW_PORT_TYPE_CR4_QSFP:
2545 	case FW_PORT_TYPE_CR_QSFP:
2546 	case FW_PORT_TYPE_CR2_QSFP:
2547 	case FW_PORT_TYPE_SFP28:
2548 		/* Pluggable transceiver */
2549 		switch (pi->mod_type) {
2550 		case FW_PORT_MOD_TYPE_LR:
2551 			switch (speed) {
2552 			case FW_PORT_CAP32_SPEED_1G:
2553 				return (IFM_1000_LX);
2554 			case FW_PORT_CAP32_SPEED_10G:
2555 				return (IFM_10G_LR);
2556 			case FW_PORT_CAP32_SPEED_25G:
2557 				return (IFM_25G_LR);
2558 			case FW_PORT_CAP32_SPEED_40G:
2559 				return (IFM_40G_LR4);
2560 			case FW_PORT_CAP32_SPEED_50G:
2561 				return (IFM_50G_LR2);
2562 			case FW_PORT_CAP32_SPEED_100G:
2563 				return (IFM_100G_LR4);
2564 			}
2565 			break;
2566 		case FW_PORT_MOD_TYPE_SR:
2567 			switch (speed) {
2568 			case FW_PORT_CAP32_SPEED_1G:
2569 				return (IFM_1000_SX);
2570 			case FW_PORT_CAP32_SPEED_10G:
2571 				return (IFM_10G_SR);
2572 			case FW_PORT_CAP32_SPEED_25G:
2573 				return (IFM_25G_SR);
2574 			case FW_PORT_CAP32_SPEED_40G:
2575 				return (IFM_40G_SR4);
2576 			case FW_PORT_CAP32_SPEED_50G:
2577 				return (IFM_50G_SR2);
2578 			case FW_PORT_CAP32_SPEED_100G:
2579 				return (IFM_100G_SR4);
2580 			}
2581 			break;
2582 		case FW_PORT_MOD_TYPE_ER:
2583 			if (speed == FW_PORT_CAP32_SPEED_10G)
2584 				return (IFM_10G_ER);
2585 			break;
2586 		case FW_PORT_MOD_TYPE_TWINAX_PASSIVE:
2587 		case FW_PORT_MOD_TYPE_TWINAX_ACTIVE:
2588 			switch (speed) {
2589 			case FW_PORT_CAP32_SPEED_1G:
2590 				return (IFM_1000_CX);
2591 			case FW_PORT_CAP32_SPEED_10G:
2592 				return (IFM_10G_TWINAX);
2593 			case FW_PORT_CAP32_SPEED_25G:
2594 				return (IFM_25G_CR);
2595 			case FW_PORT_CAP32_SPEED_40G:
2596 				return (IFM_40G_CR4);
2597 			case FW_PORT_CAP32_SPEED_50G:
2598 				return (IFM_50G_CR2);
2599 			case FW_PORT_CAP32_SPEED_100G:
2600 				return (IFM_100G_CR4);
2601 			}
2602 			break;
2603 		case FW_PORT_MOD_TYPE_LRM:
2604 			if (speed == FW_PORT_CAP32_SPEED_10G)
2605 				return (IFM_10G_LRM);
2606 			break;
2607 		case FW_PORT_MOD_TYPE_NA:
2608 			MPASS(0);	/* Not pluggable? */
2609 			/* fall throough */
2610 		case FW_PORT_MOD_TYPE_ERROR:
2611 		case FW_PORT_MOD_TYPE_UNKNOWN:
2612 		case FW_PORT_MOD_TYPE_NOTSUPPORTED:
2613 			break;
2614 		case FW_PORT_MOD_TYPE_NONE:
2615 			return (IFM_NONE);
2616 		}
2617 		break;
2618 	case FW_PORT_TYPE_NONE:
2619 		return (IFM_NONE);
2620 	}
2621 
2622 	return (IFM_UNKNOWN);
2623 }
2624 
2625 void
2626 cxgbe_media_status(struct ifnet *ifp, struct ifmediareq *ifmr)
2627 {
2628 	struct vi_info *vi = ifp->if_softc;
2629 	struct port_info *pi = vi->pi;
2630 	struct adapter *sc = pi->adapter;
2631 	struct link_config *lc = &pi->link_cfg;
2632 
2633 	if (begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4med") != 0)
2634 		return;
2635 	PORT_LOCK(pi);
2636 
2637 	if (pi->up_vis == 0) {
2638 		/*
2639 		 * If all the interfaces are administratively down the firmware
2640 		 * does not report transceiver changes.  Refresh port info here
2641 		 * so that ifconfig displays accurate ifmedia at all times.
2642 		 * This is the only reason we have a synchronized op in this
2643 		 * function.  Just PORT_LOCK would have been enough otherwise.
2644 		 */
2645 		t4_update_port_info(pi);
2646 		build_medialist(pi);
2647 	}
2648 
2649 	/* ifm_status */
2650 	ifmr->ifm_status = IFM_AVALID;
2651 	if (lc->link_ok == false)
2652 		goto done;
2653 	ifmr->ifm_status |= IFM_ACTIVE;
2654 
2655 	/* ifm_active */
2656 	ifmr->ifm_active = IFM_ETHER | IFM_FDX;
2657 	ifmr->ifm_active &= ~(IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE);
2658 	if (lc->fc & PAUSE_RX)
2659 		ifmr->ifm_active |= IFM_ETH_RXPAUSE;
2660 	if (lc->fc & PAUSE_TX)
2661 		ifmr->ifm_active |= IFM_ETH_TXPAUSE;
2662 	ifmr->ifm_active |= port_mword(pi, speed_to_fwcap(lc->speed));
2663 done:
2664 	PORT_UNLOCK(pi);
2665 	end_synchronized_op(sc, 0);
2666 }
2667 
2668 static int
2669 vcxgbe_probe(device_t dev)
2670 {
2671 	char buf[128];
2672 	struct vi_info *vi = device_get_softc(dev);
2673 
2674 	snprintf(buf, sizeof(buf), "port %d vi %td", vi->pi->port_id,
2675 	    vi - vi->pi->vi);
2676 	device_set_desc_copy(dev, buf);
2677 
2678 	return (BUS_PROBE_DEFAULT);
2679 }
2680 
2681 static int
2682 alloc_extra_vi(struct adapter *sc, struct port_info *pi, struct vi_info *vi)
2683 {
2684 	int func, index, rc;
2685 	uint32_t param, val;
2686 
2687 	ASSERT_SYNCHRONIZED_OP(sc);
2688 
2689 	index = vi - pi->vi;
2690 	MPASS(index > 0);	/* This function deals with _extra_ VIs only */
2691 	KASSERT(index < nitems(vi_mac_funcs),
2692 	    ("%s: VI %s doesn't have a MAC func", __func__,
2693 	    device_get_nameunit(vi->dev)));
2694 	func = vi_mac_funcs[index];
2695 	rc = t4_alloc_vi_func(sc, sc->mbox, pi->tx_chan, sc->pf, 0, 1,
2696 	    vi->hw_addr, &vi->rss_size, &vi->vfvld, &vi->vin, func, 0);
2697 	if (rc < 0) {
2698 		device_printf(vi->dev, "failed to allocate virtual interface %d"
2699 		    "for port %d: %d\n", index, pi->port_id, -rc);
2700 		return (-rc);
2701 	}
2702 	vi->viid = rc;
2703 
2704 	if (vi->rss_size == 1) {
2705 		/*
2706 		 * This VI didn't get a slice of the RSS table.  Reduce the
2707 		 * number of VIs being created (hw.cxgbe.num_vis) or modify the
2708 		 * configuration file (nvi, rssnvi for this PF) if this is a
2709 		 * problem.
2710 		 */
2711 		device_printf(vi->dev, "RSS table not available.\n");
2712 		vi->rss_base = 0xffff;
2713 
2714 		return (0);
2715 	}
2716 
2717 	param = V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) |
2718 	    V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_RSSINFO) |
2719 	    V_FW_PARAMS_PARAM_YZ(vi->viid);
2720 	rc = t4_query_params(sc, sc->mbox, sc->pf, 0, 1, &param, &val);
2721 	if (rc)
2722 		vi->rss_base = 0xffff;
2723 	else {
2724 		MPASS((val >> 16) == vi->rss_size);
2725 		vi->rss_base = val & 0xffff;
2726 	}
2727 
2728 	return (0);
2729 }
2730 
2731 static int
2732 vcxgbe_attach(device_t dev)
2733 {
2734 	struct vi_info *vi;
2735 	struct port_info *pi;
2736 	struct adapter *sc;
2737 	int rc;
2738 
2739 	vi = device_get_softc(dev);
2740 	pi = vi->pi;
2741 	sc = pi->adapter;
2742 
2743 	rc = begin_synchronized_op(sc, vi, SLEEP_OK | INTR_OK, "t4via");
2744 	if (rc)
2745 		return (rc);
2746 	rc = alloc_extra_vi(sc, pi, vi);
2747 	end_synchronized_op(sc, 0);
2748 	if (rc)
2749 		return (rc);
2750 
2751 	rc = cxgbe_vi_attach(dev, vi);
2752 	if (rc) {
2753 		t4_free_vi(sc, sc->mbox, sc->pf, 0, vi->viid);
2754 		return (rc);
2755 	}
2756 	return (0);
2757 }
2758 
2759 static int
2760 vcxgbe_detach(device_t dev)
2761 {
2762 	struct vi_info *vi;
2763 	struct adapter *sc;
2764 
2765 	vi = device_get_softc(dev);
2766 	sc = vi->adapter;
2767 
2768 	doom_vi(sc, vi);
2769 
2770 	cxgbe_vi_detach(vi);
2771 	t4_free_vi(sc, sc->mbox, sc->pf, 0, vi->viid);
2772 
2773 	end_synchronized_op(sc, 0);
2774 
2775 	return (0);
2776 }
2777 
2778 static struct callout fatal_callout;
2779 
2780 static void
2781 delayed_panic(void *arg)
2782 {
2783 	struct adapter *sc = arg;
2784 
2785 	panic("%s: panic on fatal error", device_get_nameunit(sc->dev));
2786 }
2787 
2788 void
2789 t4_fatal_err(struct adapter *sc, bool fw_error)
2790 {
2791 
2792 	t4_shutdown_adapter(sc);
2793 	log(LOG_ALERT, "%s: encountered fatal error, adapter stopped.\n",
2794 	    device_get_nameunit(sc->dev));
2795 	if (fw_error) {
2796 		ASSERT_SYNCHRONIZED_OP(sc);
2797 		sc->flags |= ADAP_ERR;
2798 	} else {
2799 		ADAPTER_LOCK(sc);
2800 		sc->flags |= ADAP_ERR;
2801 		ADAPTER_UNLOCK(sc);
2802 	}
2803 #ifdef TCP_OFFLOAD
2804 	taskqueue_enqueue(taskqueue_thread, &sc->async_event_task);
2805 #endif
2806 
2807 	if (t4_panic_on_fatal_err) {
2808 		log(LOG_ALERT, "%s: panic on fatal error after 30s",
2809 		    device_get_nameunit(sc->dev));
2810 		callout_reset(&fatal_callout, hz * 30, delayed_panic, sc);
2811 	}
2812 }
2813 
2814 void
2815 t4_add_adapter(struct adapter *sc)
2816 {
2817 	sx_xlock(&t4_list_lock);
2818 	SLIST_INSERT_HEAD(&t4_list, sc, link);
2819 	sx_xunlock(&t4_list_lock);
2820 }
2821 
2822 int
2823 t4_map_bars_0_and_4(struct adapter *sc)
2824 {
2825 	sc->regs_rid = PCIR_BAR(0);
2826 	sc->regs_res = bus_alloc_resource_any(sc->dev, SYS_RES_MEMORY,
2827 	    &sc->regs_rid, RF_ACTIVE);
2828 	if (sc->regs_res == NULL) {
2829 		device_printf(sc->dev, "cannot map registers.\n");
2830 		return (ENXIO);
2831 	}
2832 	sc->bt = rman_get_bustag(sc->regs_res);
2833 	sc->bh = rman_get_bushandle(sc->regs_res);
2834 	sc->mmio_len = rman_get_size(sc->regs_res);
2835 	setbit(&sc->doorbells, DOORBELL_KDB);
2836 
2837 	sc->msix_rid = PCIR_BAR(4);
2838 	sc->msix_res = bus_alloc_resource_any(sc->dev, SYS_RES_MEMORY,
2839 	    &sc->msix_rid, RF_ACTIVE);
2840 	if (sc->msix_res == NULL) {
2841 		device_printf(sc->dev, "cannot map MSI-X BAR.\n");
2842 		return (ENXIO);
2843 	}
2844 
2845 	return (0);
2846 }
2847 
2848 int
2849 t4_map_bar_2(struct adapter *sc)
2850 {
2851 
2852 	/*
2853 	 * T4: only iWARP driver uses the userspace doorbells.  There is no need
2854 	 * to map it if RDMA is disabled.
2855 	 */
2856 	if (is_t4(sc) && sc->rdmacaps == 0)
2857 		return (0);
2858 
2859 	sc->udbs_rid = PCIR_BAR(2);
2860 	sc->udbs_res = bus_alloc_resource_any(sc->dev, SYS_RES_MEMORY,
2861 	    &sc->udbs_rid, RF_ACTIVE);
2862 	if (sc->udbs_res == NULL) {
2863 		device_printf(sc->dev, "cannot map doorbell BAR.\n");
2864 		return (ENXIO);
2865 	}
2866 	sc->udbs_base = rman_get_virtual(sc->udbs_res);
2867 
2868 	if (chip_id(sc) >= CHELSIO_T5) {
2869 		setbit(&sc->doorbells, DOORBELL_UDB);
2870 #if defined(__i386__) || defined(__amd64__)
2871 		if (t5_write_combine) {
2872 			int rc, mode;
2873 
2874 			/*
2875 			 * Enable write combining on BAR2.  This is the
2876 			 * userspace doorbell BAR and is split into 128B
2877 			 * (UDBS_SEG_SIZE) doorbell regions, each associated
2878 			 * with an egress queue.  The first 64B has the doorbell
2879 			 * and the second 64B can be used to submit a tx work
2880 			 * request with an implicit doorbell.
2881 			 */
2882 
2883 			rc = pmap_change_attr((vm_offset_t)sc->udbs_base,
2884 			    rman_get_size(sc->udbs_res), PAT_WRITE_COMBINING);
2885 			if (rc == 0) {
2886 				clrbit(&sc->doorbells, DOORBELL_UDB);
2887 				setbit(&sc->doorbells, DOORBELL_WCWR);
2888 				setbit(&sc->doorbells, DOORBELL_UDBWC);
2889 			} else {
2890 				device_printf(sc->dev,
2891 				    "couldn't enable write combining: %d\n",
2892 				    rc);
2893 			}
2894 
2895 			mode = is_t5(sc) ? V_STATMODE(0) : V_T6_STATMODE(0);
2896 			t4_write_reg(sc, A_SGE_STAT_CFG,
2897 			    V_STATSOURCE_T5(7) | mode);
2898 		}
2899 #endif
2900 	}
2901 	sc->iwt.wc_en = isset(&sc->doorbells, DOORBELL_UDBWC) ? 1 : 0;
2902 
2903 	return (0);
2904 }
2905 
2906 struct memwin_init {
2907 	uint32_t base;
2908 	uint32_t aperture;
2909 };
2910 
2911 static const struct memwin_init t4_memwin[NUM_MEMWIN] = {
2912 	{ MEMWIN0_BASE, MEMWIN0_APERTURE },
2913 	{ MEMWIN1_BASE, MEMWIN1_APERTURE },
2914 	{ MEMWIN2_BASE_T4, MEMWIN2_APERTURE_T4 }
2915 };
2916 
2917 static const struct memwin_init t5_memwin[NUM_MEMWIN] = {
2918 	{ MEMWIN0_BASE, MEMWIN0_APERTURE },
2919 	{ MEMWIN1_BASE, MEMWIN1_APERTURE },
2920 	{ MEMWIN2_BASE_T5, MEMWIN2_APERTURE_T5 },
2921 };
2922 
2923 static void
2924 setup_memwin(struct adapter *sc)
2925 {
2926 	const struct memwin_init *mw_init;
2927 	struct memwin *mw;
2928 	int i;
2929 	uint32_t bar0;
2930 
2931 	if (is_t4(sc)) {
2932 		/*
2933 		 * Read low 32b of bar0 indirectly via the hardware backdoor
2934 		 * mechanism.  Works from within PCI passthrough environments
2935 		 * too, where rman_get_start() can return a different value.  We
2936 		 * need to program the T4 memory window decoders with the actual
2937 		 * addresses that will be coming across the PCIe link.
2938 		 */
2939 		bar0 = t4_hw_pci_read_cfg4(sc, PCIR_BAR(0));
2940 		bar0 &= (uint32_t) PCIM_BAR_MEM_BASE;
2941 
2942 		mw_init = &t4_memwin[0];
2943 	} else {
2944 		/* T5+ use the relative offset inside the PCIe BAR */
2945 		bar0 = 0;
2946 
2947 		mw_init = &t5_memwin[0];
2948 	}
2949 
2950 	for (i = 0, mw = &sc->memwin[0]; i < NUM_MEMWIN; i++, mw_init++, mw++) {
2951 		rw_init(&mw->mw_lock, "memory window access");
2952 		mw->mw_base = mw_init->base;
2953 		mw->mw_aperture = mw_init->aperture;
2954 		mw->mw_curpos = 0;
2955 		t4_write_reg(sc,
2956 		    PCIE_MEM_ACCESS_REG(A_PCIE_MEM_ACCESS_BASE_WIN, i),
2957 		    (mw->mw_base + bar0) | V_BIR(0) |
2958 		    V_WINDOW(ilog2(mw->mw_aperture) - 10));
2959 		rw_wlock(&mw->mw_lock);
2960 		position_memwin(sc, i, 0);
2961 		rw_wunlock(&mw->mw_lock);
2962 	}
2963 
2964 	/* flush */
2965 	t4_read_reg(sc, PCIE_MEM_ACCESS_REG(A_PCIE_MEM_ACCESS_BASE_WIN, 2));
2966 }
2967 
2968 /*
2969  * Positions the memory window at the given address in the card's address space.
2970  * There are some alignment requirements and the actual position may be at an
2971  * address prior to the requested address.  mw->mw_curpos always has the actual
2972  * position of the window.
2973  */
2974 static void
2975 position_memwin(struct adapter *sc, int idx, uint32_t addr)
2976 {
2977 	struct memwin *mw;
2978 	uint32_t pf;
2979 	uint32_t reg;
2980 
2981 	MPASS(idx >= 0 && idx < NUM_MEMWIN);
2982 	mw = &sc->memwin[idx];
2983 	rw_assert(&mw->mw_lock, RA_WLOCKED);
2984 
2985 	if (is_t4(sc)) {
2986 		pf = 0;
2987 		mw->mw_curpos = addr & ~0xf;	/* start must be 16B aligned */
2988 	} else {
2989 		pf = V_PFNUM(sc->pf);
2990 		mw->mw_curpos = addr & ~0x7f;	/* start must be 128B aligned */
2991 	}
2992 	reg = PCIE_MEM_ACCESS_REG(A_PCIE_MEM_ACCESS_OFFSET, idx);
2993 	t4_write_reg(sc, reg, mw->mw_curpos | pf);
2994 	t4_read_reg(sc, reg);	/* flush */
2995 }
2996 
2997 int
2998 rw_via_memwin(struct adapter *sc, int idx, uint32_t addr, uint32_t *val,
2999     int len, int rw)
3000 {
3001 	struct memwin *mw;
3002 	uint32_t mw_end, v;
3003 
3004 	MPASS(idx >= 0 && idx < NUM_MEMWIN);
3005 
3006 	/* Memory can only be accessed in naturally aligned 4 byte units */
3007 	if (addr & 3 || len & 3 || len <= 0)
3008 		return (EINVAL);
3009 
3010 	mw = &sc->memwin[idx];
3011 	while (len > 0) {
3012 		rw_rlock(&mw->mw_lock);
3013 		mw_end = mw->mw_curpos + mw->mw_aperture;
3014 		if (addr >= mw_end || addr < mw->mw_curpos) {
3015 			/* Will need to reposition the window */
3016 			if (!rw_try_upgrade(&mw->mw_lock)) {
3017 				rw_runlock(&mw->mw_lock);
3018 				rw_wlock(&mw->mw_lock);
3019 			}
3020 			rw_assert(&mw->mw_lock, RA_WLOCKED);
3021 			position_memwin(sc, idx, addr);
3022 			rw_downgrade(&mw->mw_lock);
3023 			mw_end = mw->mw_curpos + mw->mw_aperture;
3024 		}
3025 		rw_assert(&mw->mw_lock, RA_RLOCKED);
3026 		while (addr < mw_end && len > 0) {
3027 			if (rw == 0) {
3028 				v = t4_read_reg(sc, mw->mw_base + addr -
3029 				    mw->mw_curpos);
3030 				*val++ = le32toh(v);
3031 			} else {
3032 				v = *val++;
3033 				t4_write_reg(sc, mw->mw_base + addr -
3034 				    mw->mw_curpos, htole32(v));
3035 			}
3036 			addr += 4;
3037 			len -= 4;
3038 		}
3039 		rw_runlock(&mw->mw_lock);
3040 	}
3041 
3042 	return (0);
3043 }
3044 
3045 static void
3046 t4_init_atid_table(struct adapter *sc)
3047 {
3048 	struct tid_info *t;
3049 	int i;
3050 
3051 	t = &sc->tids;
3052 	if (t->natids == 0)
3053 		return;
3054 
3055 	MPASS(t->atid_tab == NULL);
3056 
3057 	t->atid_tab = malloc(t->natids * sizeof(*t->atid_tab), M_CXGBE,
3058 	    M_ZERO | M_WAITOK);
3059 	mtx_init(&t->atid_lock, "atid lock", NULL, MTX_DEF);
3060 	t->afree = t->atid_tab;
3061 	t->atids_in_use = 0;
3062 	for (i = 1; i < t->natids; i++)
3063 		t->atid_tab[i - 1].next = &t->atid_tab[i];
3064 	t->atid_tab[t->natids - 1].next = NULL;
3065 }
3066 
3067 static void
3068 t4_free_atid_table(struct adapter *sc)
3069 {
3070 	struct tid_info *t;
3071 
3072 	t = &sc->tids;
3073 
3074 	KASSERT(t->atids_in_use == 0,
3075 	    ("%s: %d atids still in use.", __func__, t->atids_in_use));
3076 
3077 	if (mtx_initialized(&t->atid_lock))
3078 		mtx_destroy(&t->atid_lock);
3079 	free(t->atid_tab, M_CXGBE);
3080 	t->atid_tab = NULL;
3081 }
3082 
3083 int
3084 alloc_atid(struct adapter *sc, void *ctx)
3085 {
3086 	struct tid_info *t = &sc->tids;
3087 	int atid = -1;
3088 
3089 	mtx_lock(&t->atid_lock);
3090 	if (t->afree) {
3091 		union aopen_entry *p = t->afree;
3092 
3093 		atid = p - t->atid_tab;
3094 		MPASS(atid <= M_TID_TID);
3095 		t->afree = p->next;
3096 		p->data = ctx;
3097 		t->atids_in_use++;
3098 	}
3099 	mtx_unlock(&t->atid_lock);
3100 	return (atid);
3101 }
3102 
3103 void *
3104 lookup_atid(struct adapter *sc, int atid)
3105 {
3106 	struct tid_info *t = &sc->tids;
3107 
3108 	return (t->atid_tab[atid].data);
3109 }
3110 
3111 void
3112 free_atid(struct adapter *sc, int atid)
3113 {
3114 	struct tid_info *t = &sc->tids;
3115 	union aopen_entry *p = &t->atid_tab[atid];
3116 
3117 	mtx_lock(&t->atid_lock);
3118 	p->next = t->afree;
3119 	t->afree = p;
3120 	t->atids_in_use--;
3121 	mtx_unlock(&t->atid_lock);
3122 }
3123 
3124 static void
3125 queue_tid_release(struct adapter *sc, int tid)
3126 {
3127 
3128 	CXGBE_UNIMPLEMENTED("deferred tid release");
3129 }
3130 
3131 void
3132 release_tid(struct adapter *sc, int tid, struct sge_wrq *ctrlq)
3133 {
3134 	struct wrqe *wr;
3135 	struct cpl_tid_release *req;
3136 
3137 	wr = alloc_wrqe(sizeof(*req), ctrlq);
3138 	if (wr == NULL) {
3139 		queue_tid_release(sc, tid);	/* defer */
3140 		return;
3141 	}
3142 	req = wrtod(wr);
3143 
3144 	INIT_TP_WR_MIT_CPL(req, CPL_TID_RELEASE, tid);
3145 
3146 	t4_wrq_tx(sc, wr);
3147 }
3148 
3149 static int
3150 t4_range_cmp(const void *a, const void *b)
3151 {
3152 	return ((const struct t4_range *)a)->start -
3153 	       ((const struct t4_range *)b)->start;
3154 }
3155 
3156 /*
3157  * Verify that the memory range specified by the addr/len pair is valid within
3158  * the card's address space.
3159  */
3160 static int
3161 validate_mem_range(struct adapter *sc, uint32_t addr, uint32_t len)
3162 {
3163 	struct t4_range mem_ranges[4], *r, *next;
3164 	uint32_t em, addr_len;
3165 	int i, n, remaining;
3166 
3167 	/* Memory can only be accessed in naturally aligned 4 byte units */
3168 	if (addr & 3 || len & 3 || len == 0)
3169 		return (EINVAL);
3170 
3171 	/* Enabled memories */
3172 	em = t4_read_reg(sc, A_MA_TARGET_MEM_ENABLE);
3173 
3174 	r = &mem_ranges[0];
3175 	n = 0;
3176 	bzero(r, sizeof(mem_ranges));
3177 	if (em & F_EDRAM0_ENABLE) {
3178 		addr_len = t4_read_reg(sc, A_MA_EDRAM0_BAR);
3179 		r->size = G_EDRAM0_SIZE(addr_len) << 20;
3180 		if (r->size > 0) {
3181 			r->start = G_EDRAM0_BASE(addr_len) << 20;
3182 			if (addr >= r->start &&
3183 			    addr + len <= r->start + r->size)
3184 				return (0);
3185 			r++;
3186 			n++;
3187 		}
3188 	}
3189 	if (em & F_EDRAM1_ENABLE) {
3190 		addr_len = t4_read_reg(sc, A_MA_EDRAM1_BAR);
3191 		r->size = G_EDRAM1_SIZE(addr_len) << 20;
3192 		if (r->size > 0) {
3193 			r->start = G_EDRAM1_BASE(addr_len) << 20;
3194 			if (addr >= r->start &&
3195 			    addr + len <= r->start + r->size)
3196 				return (0);
3197 			r++;
3198 			n++;
3199 		}
3200 	}
3201 	if (em & F_EXT_MEM_ENABLE) {
3202 		addr_len = t4_read_reg(sc, A_MA_EXT_MEMORY_BAR);
3203 		r->size = G_EXT_MEM_SIZE(addr_len) << 20;
3204 		if (r->size > 0) {
3205 			r->start = G_EXT_MEM_BASE(addr_len) << 20;
3206 			if (addr >= r->start &&
3207 			    addr + len <= r->start + r->size)
3208 				return (0);
3209 			r++;
3210 			n++;
3211 		}
3212 	}
3213 	if (is_t5(sc) && em & F_EXT_MEM1_ENABLE) {
3214 		addr_len = t4_read_reg(sc, A_MA_EXT_MEMORY1_BAR);
3215 		r->size = G_EXT_MEM1_SIZE(addr_len) << 20;
3216 		if (r->size > 0) {
3217 			r->start = G_EXT_MEM1_BASE(addr_len) << 20;
3218 			if (addr >= r->start &&
3219 			    addr + len <= r->start + r->size)
3220 				return (0);
3221 			r++;
3222 			n++;
3223 		}
3224 	}
3225 	MPASS(n <= nitems(mem_ranges));
3226 
3227 	if (n > 1) {
3228 		/* Sort and merge the ranges. */
3229 		qsort(mem_ranges, n, sizeof(struct t4_range), t4_range_cmp);
3230 
3231 		/* Start from index 0 and examine the next n - 1 entries. */
3232 		r = &mem_ranges[0];
3233 		for (remaining = n - 1; remaining > 0; remaining--, r++) {
3234 
3235 			MPASS(r->size > 0);	/* r is a valid entry. */
3236 			next = r + 1;
3237 			MPASS(next->size > 0);	/* and so is the next one. */
3238 
3239 			while (r->start + r->size >= next->start) {
3240 				/* Merge the next one into the current entry. */
3241 				r->size = max(r->start + r->size,
3242 				    next->start + next->size) - r->start;
3243 				n--;	/* One fewer entry in total. */
3244 				if (--remaining == 0)
3245 					goto done;	/* short circuit */
3246 				next++;
3247 			}
3248 			if (next != r + 1) {
3249 				/*
3250 				 * Some entries were merged into r and next
3251 				 * points to the first valid entry that couldn't
3252 				 * be merged.
3253 				 */
3254 				MPASS(next->size > 0);	/* must be valid */
3255 				memcpy(r + 1, next, remaining * sizeof(*r));
3256 #ifdef INVARIANTS
3257 				/*
3258 				 * This so that the foo->size assertion in the
3259 				 * next iteration of the loop do the right
3260 				 * thing for entries that were pulled up and are
3261 				 * no longer valid.
3262 				 */
3263 				MPASS(n < nitems(mem_ranges));
3264 				bzero(&mem_ranges[n], (nitems(mem_ranges) - n) *
3265 				    sizeof(struct t4_range));
3266 #endif
3267 			}
3268 		}
3269 done:
3270 		/* Done merging the ranges. */
3271 		MPASS(n > 0);
3272 		r = &mem_ranges[0];
3273 		for (i = 0; i < n; i++, r++) {
3274 			if (addr >= r->start &&
3275 			    addr + len <= r->start + r->size)
3276 				return (0);
3277 		}
3278 	}
3279 
3280 	return (EFAULT);
3281 }
3282 
3283 static int
3284 fwmtype_to_hwmtype(int mtype)
3285 {
3286 
3287 	switch (mtype) {
3288 	case FW_MEMTYPE_EDC0:
3289 		return (MEM_EDC0);
3290 	case FW_MEMTYPE_EDC1:
3291 		return (MEM_EDC1);
3292 	case FW_MEMTYPE_EXTMEM:
3293 		return (MEM_MC0);
3294 	case FW_MEMTYPE_EXTMEM1:
3295 		return (MEM_MC1);
3296 	default:
3297 		panic("%s: cannot translate fw mtype %d.", __func__, mtype);
3298 	}
3299 }
3300 
3301 /*
3302  * Verify that the memory range specified by the memtype/offset/len pair is
3303  * valid and lies entirely within the memtype specified.  The global address of
3304  * the start of the range is returned in addr.
3305  */
3306 static int
3307 validate_mt_off_len(struct adapter *sc, int mtype, uint32_t off, uint32_t len,
3308     uint32_t *addr)
3309 {
3310 	uint32_t em, addr_len, maddr;
3311 
3312 	/* Memory can only be accessed in naturally aligned 4 byte units */
3313 	if (off & 3 || len & 3 || len == 0)
3314 		return (EINVAL);
3315 
3316 	em = t4_read_reg(sc, A_MA_TARGET_MEM_ENABLE);
3317 	switch (fwmtype_to_hwmtype(mtype)) {
3318 	case MEM_EDC0:
3319 		if (!(em & F_EDRAM0_ENABLE))
3320 			return (EINVAL);
3321 		addr_len = t4_read_reg(sc, A_MA_EDRAM0_BAR);
3322 		maddr = G_EDRAM0_BASE(addr_len) << 20;
3323 		break;
3324 	case MEM_EDC1:
3325 		if (!(em & F_EDRAM1_ENABLE))
3326 			return (EINVAL);
3327 		addr_len = t4_read_reg(sc, A_MA_EDRAM1_BAR);
3328 		maddr = G_EDRAM1_BASE(addr_len) << 20;
3329 		break;
3330 	case MEM_MC:
3331 		if (!(em & F_EXT_MEM_ENABLE))
3332 			return (EINVAL);
3333 		addr_len = t4_read_reg(sc, A_MA_EXT_MEMORY_BAR);
3334 		maddr = G_EXT_MEM_BASE(addr_len) << 20;
3335 		break;
3336 	case MEM_MC1:
3337 		if (!is_t5(sc) || !(em & F_EXT_MEM1_ENABLE))
3338 			return (EINVAL);
3339 		addr_len = t4_read_reg(sc, A_MA_EXT_MEMORY1_BAR);
3340 		maddr = G_EXT_MEM1_BASE(addr_len) << 20;
3341 		break;
3342 	default:
3343 		return (EINVAL);
3344 	}
3345 
3346 	*addr = maddr + off;	/* global address */
3347 	return (validate_mem_range(sc, *addr, len));
3348 }
3349 
3350 static int
3351 fixup_devlog_params(struct adapter *sc)
3352 {
3353 	struct devlog_params *dparams = &sc->params.devlog;
3354 	int rc;
3355 
3356 	rc = validate_mt_off_len(sc, dparams->memtype, dparams->start,
3357 	    dparams->size, &dparams->addr);
3358 
3359 	return (rc);
3360 }
3361 
3362 static void
3363 update_nirq(struct intrs_and_queues *iaq, int nports)
3364 {
3365 
3366 	iaq->nirq = T4_EXTRA_INTR;
3367 	iaq->nirq += nports * max(iaq->nrxq, iaq->nnmrxq);
3368 	iaq->nirq += nports * iaq->nofldrxq;
3369 	iaq->nirq += nports * (iaq->num_vis - 1) *
3370 	    max(iaq->nrxq_vi, iaq->nnmrxq_vi);
3371 	iaq->nirq += nports * (iaq->num_vis - 1) * iaq->nofldrxq_vi;
3372 }
3373 
3374 /*
3375  * Adjust requirements to fit the number of interrupts available.
3376  */
3377 static void
3378 calculate_iaq(struct adapter *sc, struct intrs_and_queues *iaq, int itype,
3379     int navail)
3380 {
3381 	int old_nirq;
3382 	const int nports = sc->params.nports;
3383 
3384 	MPASS(nports > 0);
3385 	MPASS(navail > 0);
3386 
3387 	bzero(iaq, sizeof(*iaq));
3388 	iaq->intr_type = itype;
3389 	iaq->num_vis = t4_num_vis;
3390 	iaq->ntxq = t4_ntxq;
3391 	iaq->ntxq_vi = t4_ntxq_vi;
3392 	iaq->nrxq = t4_nrxq;
3393 	iaq->nrxq_vi = t4_nrxq_vi;
3394 #if defined(TCP_OFFLOAD) || defined(RATELIMIT)
3395 	if (is_offload(sc) || is_ethoffload(sc)) {
3396 		iaq->nofldtxq = t4_nofldtxq;
3397 		iaq->nofldtxq_vi = t4_nofldtxq_vi;
3398 	}
3399 #endif
3400 #ifdef TCP_OFFLOAD
3401 	if (is_offload(sc)) {
3402 		iaq->nofldrxq = t4_nofldrxq;
3403 		iaq->nofldrxq_vi = t4_nofldrxq_vi;
3404 	}
3405 #endif
3406 #ifdef DEV_NETMAP
3407 	if (t4_native_netmap & NN_MAIN_VI) {
3408 		iaq->nnmtxq = t4_nnmtxq;
3409 		iaq->nnmrxq = t4_nnmrxq;
3410 	}
3411 	if (t4_native_netmap & NN_EXTRA_VI) {
3412 		iaq->nnmtxq_vi = t4_nnmtxq_vi;
3413 		iaq->nnmrxq_vi = t4_nnmrxq_vi;
3414 	}
3415 #endif
3416 
3417 	update_nirq(iaq, nports);
3418 	if (iaq->nirq <= navail &&
3419 	    (itype != INTR_MSI || powerof2(iaq->nirq))) {
3420 		/*
3421 		 * This is the normal case -- there are enough interrupts for
3422 		 * everything.
3423 		 */
3424 		goto done;
3425 	}
3426 
3427 	/*
3428 	 * If extra VIs have been configured try reducing their count and see if
3429 	 * that works.
3430 	 */
3431 	while (iaq->num_vis > 1) {
3432 		iaq->num_vis--;
3433 		update_nirq(iaq, nports);
3434 		if (iaq->nirq <= navail &&
3435 		    (itype != INTR_MSI || powerof2(iaq->nirq))) {
3436 			device_printf(sc->dev, "virtual interfaces per port "
3437 			    "reduced to %d from %d.  nrxq=%u, nofldrxq=%u, "
3438 			    "nrxq_vi=%u nofldrxq_vi=%u, nnmrxq_vi=%u.  "
3439 			    "itype %d, navail %u, nirq %d.\n",
3440 			    iaq->num_vis, t4_num_vis, iaq->nrxq, iaq->nofldrxq,
3441 			    iaq->nrxq_vi, iaq->nofldrxq_vi, iaq->nnmrxq_vi,
3442 			    itype, navail, iaq->nirq);
3443 			goto done;
3444 		}
3445 	}
3446 
3447 	/*
3448 	 * Extra VIs will not be created.  Log a message if they were requested.
3449 	 */
3450 	MPASS(iaq->num_vis == 1);
3451 	iaq->ntxq_vi = iaq->nrxq_vi = 0;
3452 	iaq->nofldtxq_vi = iaq->nofldrxq_vi = 0;
3453 	iaq->nnmtxq_vi = iaq->nnmrxq_vi = 0;
3454 	if (iaq->num_vis != t4_num_vis) {
3455 		device_printf(sc->dev, "extra virtual interfaces disabled.  "
3456 		    "nrxq=%u, nofldrxq=%u, nrxq_vi=%u nofldrxq_vi=%u, "
3457 		    "nnmrxq_vi=%u.  itype %d, navail %u, nirq %d.\n",
3458 		    iaq->nrxq, iaq->nofldrxq, iaq->nrxq_vi, iaq->nofldrxq_vi,
3459 		    iaq->nnmrxq_vi, itype, navail, iaq->nirq);
3460 	}
3461 
3462 	/*
3463 	 * Keep reducing the number of NIC rx queues to the next lower power of
3464 	 * 2 (for even RSS distribution) and halving the TOE rx queues and see
3465 	 * if that works.
3466 	 */
3467 	do {
3468 		if (iaq->nrxq > 1) {
3469 			do {
3470 				iaq->nrxq--;
3471 			} while (!powerof2(iaq->nrxq));
3472 			if (iaq->nnmrxq > iaq->nrxq)
3473 				iaq->nnmrxq = iaq->nrxq;
3474 		}
3475 		if (iaq->nofldrxq > 1)
3476 			iaq->nofldrxq >>= 1;
3477 
3478 		old_nirq = iaq->nirq;
3479 		update_nirq(iaq, nports);
3480 		if (iaq->nirq <= navail &&
3481 		    (itype != INTR_MSI || powerof2(iaq->nirq))) {
3482 			device_printf(sc->dev, "running with reduced number of "
3483 			    "rx queues because of shortage of interrupts.  "
3484 			    "nrxq=%u, nofldrxq=%u.  "
3485 			    "itype %d, navail %u, nirq %d.\n", iaq->nrxq,
3486 			    iaq->nofldrxq, itype, navail, iaq->nirq);
3487 			goto done;
3488 		}
3489 	} while (old_nirq != iaq->nirq);
3490 
3491 	/* One interrupt for everything.  Ugh. */
3492 	device_printf(sc->dev, "running with minimal number of queues.  "
3493 	    "itype %d, navail %u.\n", itype, navail);
3494 	iaq->nirq = 1;
3495 	iaq->nrxq = 1;
3496 	iaq->ntxq = 1;
3497 	if (iaq->nofldrxq > 0) {
3498 		iaq->nofldrxq = 1;
3499 		iaq->nofldtxq = 1;
3500 	}
3501 	iaq->nnmtxq = 0;
3502 	iaq->nnmrxq = 0;
3503 done:
3504 	MPASS(iaq->num_vis > 0);
3505 	if (iaq->num_vis > 1) {
3506 		MPASS(iaq->nrxq_vi > 0);
3507 		MPASS(iaq->ntxq_vi > 0);
3508 	}
3509 	MPASS(iaq->nirq > 0);
3510 	MPASS(iaq->nrxq > 0);
3511 	MPASS(iaq->ntxq > 0);
3512 	if (itype == INTR_MSI) {
3513 		MPASS(powerof2(iaq->nirq));
3514 	}
3515 }
3516 
3517 static int
3518 cfg_itype_and_nqueues(struct adapter *sc, struct intrs_and_queues *iaq)
3519 {
3520 	int rc, itype, navail, nalloc;
3521 
3522 	for (itype = INTR_MSIX; itype; itype >>= 1) {
3523 
3524 		if ((itype & t4_intr_types) == 0)
3525 			continue;	/* not allowed */
3526 
3527 		if (itype == INTR_MSIX)
3528 			navail = pci_msix_count(sc->dev);
3529 		else if (itype == INTR_MSI)
3530 			navail = pci_msi_count(sc->dev);
3531 		else
3532 			navail = 1;
3533 restart:
3534 		if (navail == 0)
3535 			continue;
3536 
3537 		calculate_iaq(sc, iaq, itype, navail);
3538 		nalloc = iaq->nirq;
3539 		rc = 0;
3540 		if (itype == INTR_MSIX)
3541 			rc = pci_alloc_msix(sc->dev, &nalloc);
3542 		else if (itype == INTR_MSI)
3543 			rc = pci_alloc_msi(sc->dev, &nalloc);
3544 
3545 		if (rc == 0 && nalloc > 0) {
3546 			if (nalloc == iaq->nirq)
3547 				return (0);
3548 
3549 			/*
3550 			 * Didn't get the number requested.  Use whatever number
3551 			 * the kernel is willing to allocate.
3552 			 */
3553 			device_printf(sc->dev, "fewer vectors than requested, "
3554 			    "type=%d, req=%d, rcvd=%d; will downshift req.\n",
3555 			    itype, iaq->nirq, nalloc);
3556 			pci_release_msi(sc->dev);
3557 			navail = nalloc;
3558 			goto restart;
3559 		}
3560 
3561 		device_printf(sc->dev,
3562 		    "failed to allocate vectors:%d, type=%d, req=%d, rcvd=%d\n",
3563 		    itype, rc, iaq->nirq, nalloc);
3564 	}
3565 
3566 	device_printf(sc->dev,
3567 	    "failed to find a usable interrupt type.  "
3568 	    "allowed=%d, msi-x=%d, msi=%d, intx=1", t4_intr_types,
3569 	    pci_msix_count(sc->dev), pci_msi_count(sc->dev));
3570 
3571 	return (ENXIO);
3572 }
3573 
3574 #define FW_VERSION(chip) ( \
3575     V_FW_HDR_FW_VER_MAJOR(chip##FW_VERSION_MAJOR) | \
3576     V_FW_HDR_FW_VER_MINOR(chip##FW_VERSION_MINOR) | \
3577     V_FW_HDR_FW_VER_MICRO(chip##FW_VERSION_MICRO) | \
3578     V_FW_HDR_FW_VER_BUILD(chip##FW_VERSION_BUILD))
3579 #define FW_INTFVER(chip, intf) (chip##FW_HDR_INTFVER_##intf)
3580 
3581 /* Just enough of fw_hdr to cover all version info. */
3582 struct fw_h {
3583 	__u8	ver;
3584 	__u8	chip;
3585 	__be16	len512;
3586 	__be32	fw_ver;
3587 	__be32	tp_microcode_ver;
3588 	__u8	intfver_nic;
3589 	__u8	intfver_vnic;
3590 	__u8	intfver_ofld;
3591 	__u8	intfver_ri;
3592 	__u8	intfver_iscsipdu;
3593 	__u8	intfver_iscsi;
3594 	__u8	intfver_fcoepdu;
3595 	__u8	intfver_fcoe;
3596 };
3597 /* Spot check a couple of fields. */
3598 CTASSERT(offsetof(struct fw_h, fw_ver) == offsetof(struct fw_hdr, fw_ver));
3599 CTASSERT(offsetof(struct fw_h, intfver_nic) == offsetof(struct fw_hdr, intfver_nic));
3600 CTASSERT(offsetof(struct fw_h, intfver_fcoe) == offsetof(struct fw_hdr, intfver_fcoe));
3601 
3602 struct fw_info {
3603 	uint8_t chip;
3604 	char *kld_name;
3605 	char *fw_mod_name;
3606 	struct fw_h fw_h;
3607 } fw_info[] = {
3608 	{
3609 		.chip = CHELSIO_T4,
3610 		.kld_name = "t4fw_cfg",
3611 		.fw_mod_name = "t4fw",
3612 		.fw_h = {
3613 			.chip = FW_HDR_CHIP_T4,
3614 			.fw_ver = htobe32(FW_VERSION(T4)),
3615 			.intfver_nic = FW_INTFVER(T4, NIC),
3616 			.intfver_vnic = FW_INTFVER(T4, VNIC),
3617 			.intfver_ofld = FW_INTFVER(T4, OFLD),
3618 			.intfver_ri = FW_INTFVER(T4, RI),
3619 			.intfver_iscsipdu = FW_INTFVER(T4, ISCSIPDU),
3620 			.intfver_iscsi = FW_INTFVER(T4, ISCSI),
3621 			.intfver_fcoepdu = FW_INTFVER(T4, FCOEPDU),
3622 			.intfver_fcoe = FW_INTFVER(T4, FCOE),
3623 		},
3624 	}, {
3625 		.chip = CHELSIO_T5,
3626 		.kld_name = "t5fw_cfg",
3627 		.fw_mod_name = "t5fw",
3628 		.fw_h = {
3629 			.chip = FW_HDR_CHIP_T5,
3630 			.fw_ver = htobe32(FW_VERSION(T5)),
3631 			.intfver_nic = FW_INTFVER(T5, NIC),
3632 			.intfver_vnic = FW_INTFVER(T5, VNIC),
3633 			.intfver_ofld = FW_INTFVER(T5, OFLD),
3634 			.intfver_ri = FW_INTFVER(T5, RI),
3635 			.intfver_iscsipdu = FW_INTFVER(T5, ISCSIPDU),
3636 			.intfver_iscsi = FW_INTFVER(T5, ISCSI),
3637 			.intfver_fcoepdu = FW_INTFVER(T5, FCOEPDU),
3638 			.intfver_fcoe = FW_INTFVER(T5, FCOE),
3639 		},
3640 	}, {
3641 		.chip = CHELSIO_T6,
3642 		.kld_name = "t6fw_cfg",
3643 		.fw_mod_name = "t6fw",
3644 		.fw_h = {
3645 			.chip = FW_HDR_CHIP_T6,
3646 			.fw_ver = htobe32(FW_VERSION(T6)),
3647 			.intfver_nic = FW_INTFVER(T6, NIC),
3648 			.intfver_vnic = FW_INTFVER(T6, VNIC),
3649 			.intfver_ofld = FW_INTFVER(T6, OFLD),
3650 			.intfver_ri = FW_INTFVER(T6, RI),
3651 			.intfver_iscsipdu = FW_INTFVER(T6, ISCSIPDU),
3652 			.intfver_iscsi = FW_INTFVER(T6, ISCSI),
3653 			.intfver_fcoepdu = FW_INTFVER(T6, FCOEPDU),
3654 			.intfver_fcoe = FW_INTFVER(T6, FCOE),
3655 		},
3656 	}
3657 };
3658 
3659 static struct fw_info *
3660 find_fw_info(int chip)
3661 {
3662 	int i;
3663 
3664 	for (i = 0; i < nitems(fw_info); i++) {
3665 		if (fw_info[i].chip == chip)
3666 			return (&fw_info[i]);
3667 	}
3668 	return (NULL);
3669 }
3670 
3671 /*
3672  * Is the given firmware API compatible with the one the driver was compiled
3673  * with?
3674  */
3675 static int
3676 fw_compatible(const struct fw_h *hdr1, const struct fw_h *hdr2)
3677 {
3678 
3679 	/* short circuit if it's the exact same firmware version */
3680 	if (hdr1->chip == hdr2->chip && hdr1->fw_ver == hdr2->fw_ver)
3681 		return (1);
3682 
3683 	/*
3684 	 * XXX: Is this too conservative?  Perhaps I should limit this to the
3685 	 * features that are supported in the driver.
3686 	 */
3687 #define SAME_INTF(x) (hdr1->intfver_##x == hdr2->intfver_##x)
3688 	if (hdr1->chip == hdr2->chip && SAME_INTF(nic) && SAME_INTF(vnic) &&
3689 	    SAME_INTF(ofld) && SAME_INTF(ri) && SAME_INTF(iscsipdu) &&
3690 	    SAME_INTF(iscsi) && SAME_INTF(fcoepdu) && SAME_INTF(fcoe))
3691 		return (1);
3692 #undef SAME_INTF
3693 
3694 	return (0);
3695 }
3696 
3697 static int
3698 load_fw_module(struct adapter *sc, const struct firmware **dcfg,
3699     const struct firmware **fw)
3700 {
3701 	struct fw_info *fw_info;
3702 
3703 	*dcfg = NULL;
3704 	if (fw != NULL)
3705 		*fw = NULL;
3706 
3707 	fw_info = find_fw_info(chip_id(sc));
3708 	if (fw_info == NULL) {
3709 		device_printf(sc->dev,
3710 		    "unable to look up firmware information for chip %d.\n",
3711 		    chip_id(sc));
3712 		return (EINVAL);
3713 	}
3714 
3715 	*dcfg = firmware_get(fw_info->kld_name);
3716 	if (*dcfg != NULL) {
3717 		if (fw != NULL)
3718 			*fw = firmware_get(fw_info->fw_mod_name);
3719 		return (0);
3720 	}
3721 
3722 	return (ENOENT);
3723 }
3724 
3725 static void
3726 unload_fw_module(struct adapter *sc, const struct firmware *dcfg,
3727     const struct firmware *fw)
3728 {
3729 
3730 	if (fw != NULL)
3731 		firmware_put(fw, FIRMWARE_UNLOAD);
3732 	if (dcfg != NULL)
3733 		firmware_put(dcfg, FIRMWARE_UNLOAD);
3734 }
3735 
3736 /*
3737  * Return values:
3738  * 0 means no firmware install attempted.
3739  * ERESTART means a firmware install was attempted and was successful.
3740  * +ve errno means a firmware install was attempted but failed.
3741  */
3742 static int
3743 install_kld_firmware(struct adapter *sc, struct fw_h *card_fw,
3744     const struct fw_h *drv_fw, const char *reason, int *already)
3745 {
3746 	const struct firmware *cfg, *fw;
3747 	const uint32_t c = be32toh(card_fw->fw_ver);
3748 	uint32_t d, k;
3749 	int rc, fw_install;
3750 	struct fw_h bundled_fw;
3751 	bool load_attempted;
3752 
3753 	cfg = fw = NULL;
3754 	load_attempted = false;
3755 	fw_install = t4_fw_install < 0 ? -t4_fw_install : t4_fw_install;
3756 
3757 	memcpy(&bundled_fw, drv_fw, sizeof(bundled_fw));
3758 	if (t4_fw_install < 0) {
3759 		rc = load_fw_module(sc, &cfg, &fw);
3760 		if (rc != 0 || fw == NULL) {
3761 			device_printf(sc->dev,
3762 			    "failed to load firmware module: %d. cfg %p, fw %p;"
3763 			    " will use compiled-in firmware version for"
3764 			    "hw.cxgbe.fw_install checks.\n",
3765 			    rc, cfg, fw);
3766 		} else {
3767 			memcpy(&bundled_fw, fw->data, sizeof(bundled_fw));
3768 		}
3769 		load_attempted = true;
3770 	}
3771 	d = be32toh(bundled_fw.fw_ver);
3772 
3773 	if (reason != NULL)
3774 		goto install;
3775 
3776 	if ((sc->flags & FW_OK) == 0) {
3777 
3778 		if (c == 0xffffffff) {
3779 			reason = "missing";
3780 			goto install;
3781 		}
3782 
3783 		rc = 0;
3784 		goto done;
3785 	}
3786 
3787 	if (!fw_compatible(card_fw, &bundled_fw)) {
3788 		reason = "incompatible or unusable";
3789 		goto install;
3790 	}
3791 
3792 	if (d > c) {
3793 		reason = "older than the version bundled with this driver";
3794 		goto install;
3795 	}
3796 
3797 	if (fw_install == 2 && d != c) {
3798 		reason = "different than the version bundled with this driver";
3799 		goto install;
3800 	}
3801 
3802 	/* No reason to do anything to the firmware already on the card. */
3803 	rc = 0;
3804 	goto done;
3805 
3806 install:
3807 	rc = 0;
3808 	if ((*already)++)
3809 		goto done;
3810 
3811 	if (fw_install == 0) {
3812 		device_printf(sc->dev, "firmware on card (%u.%u.%u.%u) is %s, "
3813 		    "but the driver is prohibited from installing a firmware "
3814 		    "on the card.\n",
3815 		    G_FW_HDR_FW_VER_MAJOR(c), G_FW_HDR_FW_VER_MINOR(c),
3816 		    G_FW_HDR_FW_VER_MICRO(c), G_FW_HDR_FW_VER_BUILD(c), reason);
3817 
3818 		goto done;
3819 	}
3820 
3821 	/*
3822 	 * We'll attempt to install a firmware.  Load the module first (if it
3823 	 * hasn't been loaded already).
3824 	 */
3825 	if (!load_attempted) {
3826 		rc = load_fw_module(sc, &cfg, &fw);
3827 		if (rc != 0 || fw == NULL) {
3828 			device_printf(sc->dev,
3829 			    "failed to load firmware module: %d. cfg %p, fw %p\n",
3830 			    rc, cfg, fw);
3831 			/* carry on */
3832 		}
3833 	}
3834 	if (fw == NULL) {
3835 		device_printf(sc->dev, "firmware on card (%u.%u.%u.%u) is %s, "
3836 		    "but the driver cannot take corrective action because it "
3837 		    "is unable to load the firmware module.\n",
3838 		    G_FW_HDR_FW_VER_MAJOR(c), G_FW_HDR_FW_VER_MINOR(c),
3839 		    G_FW_HDR_FW_VER_MICRO(c), G_FW_HDR_FW_VER_BUILD(c), reason);
3840 		rc = sc->flags & FW_OK ? 0 : ENOENT;
3841 		goto done;
3842 	}
3843 	k = be32toh(((const struct fw_hdr *)fw->data)->fw_ver);
3844 	if (k != d) {
3845 		MPASS(t4_fw_install > 0);
3846 		device_printf(sc->dev,
3847 		    "firmware in KLD (%u.%u.%u.%u) is not what the driver was "
3848 		    "expecting (%u.%u.%u.%u) and will not be used.\n",
3849 		    G_FW_HDR_FW_VER_MAJOR(k), G_FW_HDR_FW_VER_MINOR(k),
3850 		    G_FW_HDR_FW_VER_MICRO(k), G_FW_HDR_FW_VER_BUILD(k),
3851 		    G_FW_HDR_FW_VER_MAJOR(d), G_FW_HDR_FW_VER_MINOR(d),
3852 		    G_FW_HDR_FW_VER_MICRO(d), G_FW_HDR_FW_VER_BUILD(d));
3853 		rc = sc->flags & FW_OK ? 0 : EINVAL;
3854 		goto done;
3855 	}
3856 
3857 	device_printf(sc->dev, "firmware on card (%u.%u.%u.%u) is %s, "
3858 	    "installing firmware %u.%u.%u.%u on card.\n",
3859 	    G_FW_HDR_FW_VER_MAJOR(c), G_FW_HDR_FW_VER_MINOR(c),
3860 	    G_FW_HDR_FW_VER_MICRO(c), G_FW_HDR_FW_VER_BUILD(c), reason,
3861 	    G_FW_HDR_FW_VER_MAJOR(d), G_FW_HDR_FW_VER_MINOR(d),
3862 	    G_FW_HDR_FW_VER_MICRO(d), G_FW_HDR_FW_VER_BUILD(d));
3863 
3864 	rc = -t4_fw_upgrade(sc, sc->mbox, fw->data, fw->datasize, 0);
3865 	if (rc != 0) {
3866 		device_printf(sc->dev, "failed to install firmware: %d\n", rc);
3867 	} else {
3868 		/* Installed successfully, update the cached header too. */
3869 		rc = ERESTART;
3870 		memcpy(card_fw, fw->data, sizeof(*card_fw));
3871 	}
3872 done:
3873 	unload_fw_module(sc, cfg, fw);
3874 
3875 	return (rc);
3876 }
3877 
3878 /*
3879  * Establish contact with the firmware and attempt to become the master driver.
3880  *
3881  * A firmware will be installed to the card if needed (if the driver is allowed
3882  * to do so).
3883  */
3884 static int
3885 contact_firmware(struct adapter *sc)
3886 {
3887 	int rc, already = 0;
3888 	enum dev_state state;
3889 	struct fw_info *fw_info;
3890 	struct fw_hdr *card_fw;		/* fw on the card */
3891 	const struct fw_h *drv_fw;
3892 
3893 	fw_info = find_fw_info(chip_id(sc));
3894 	if (fw_info == NULL) {
3895 		device_printf(sc->dev,
3896 		    "unable to look up firmware information for chip %d.\n",
3897 		    chip_id(sc));
3898 		return (EINVAL);
3899 	}
3900 	drv_fw = &fw_info->fw_h;
3901 
3902 	/* Read the header of the firmware on the card */
3903 	card_fw = malloc(sizeof(*card_fw), M_CXGBE, M_ZERO | M_WAITOK);
3904 restart:
3905 	rc = -t4_get_fw_hdr(sc, card_fw);
3906 	if (rc != 0) {
3907 		device_printf(sc->dev,
3908 		    "unable to read firmware header from card's flash: %d\n",
3909 		    rc);
3910 		goto done;
3911 	}
3912 
3913 	rc = install_kld_firmware(sc, (struct fw_h *)card_fw, drv_fw, NULL,
3914 	    &already);
3915 	if (rc == ERESTART)
3916 		goto restart;
3917 	if (rc != 0)
3918 		goto done;
3919 
3920 	rc = t4_fw_hello(sc, sc->mbox, sc->mbox, MASTER_MAY, &state);
3921 	if (rc < 0 || state == DEV_STATE_ERR) {
3922 		rc = -rc;
3923 		device_printf(sc->dev,
3924 		    "failed to connect to the firmware: %d, %d.  "
3925 		    "PCIE_FW 0x%08x\n", rc, state, t4_read_reg(sc, A_PCIE_FW));
3926 #if 0
3927 		if (install_kld_firmware(sc, (struct fw_h *)card_fw, drv_fw,
3928 		    "not responding properly to HELLO", &already) == ERESTART)
3929 			goto restart;
3930 #endif
3931 		goto done;
3932 	}
3933 	MPASS(be32toh(card_fw->flags) & FW_HDR_FLAGS_RESET_HALT);
3934 	sc->flags |= FW_OK;	/* The firmware responded to the FW_HELLO. */
3935 
3936 	if (rc == sc->pf) {
3937 		sc->flags |= MASTER_PF;
3938 		rc = install_kld_firmware(sc, (struct fw_h *)card_fw, drv_fw,
3939 		    NULL, &already);
3940 		if (rc == ERESTART)
3941 			rc = 0;
3942 		else if (rc != 0)
3943 			goto done;
3944 	} else if (state == DEV_STATE_UNINIT) {
3945 		/*
3946 		 * We didn't get to be the master so we definitely won't be
3947 		 * configuring the chip.  It's a bug if someone else hasn't
3948 		 * configured it already.
3949 		 */
3950 		device_printf(sc->dev, "couldn't be master(%d), "
3951 		    "device not already initialized either(%d).  "
3952 		    "PCIE_FW 0x%08x\n", rc, state, t4_read_reg(sc, A_PCIE_FW));
3953 		rc = EPROTO;
3954 		goto done;
3955 	} else {
3956 		/*
3957 		 * Some other PF is the master and has configured the chip.
3958 		 * This is allowed but untested.
3959 		 */
3960 		device_printf(sc->dev, "PF%d is master, device state %d.  "
3961 		    "PCIE_FW 0x%08x\n", rc, state, t4_read_reg(sc, A_PCIE_FW));
3962 		snprintf(sc->cfg_file, sizeof(sc->cfg_file), "pf%d", rc);
3963 		sc->cfcsum = 0;
3964 		rc = 0;
3965 	}
3966 done:
3967 	if (rc != 0 && sc->flags & FW_OK) {
3968 		t4_fw_bye(sc, sc->mbox);
3969 		sc->flags &= ~FW_OK;
3970 	}
3971 	free(card_fw, M_CXGBE);
3972 	return (rc);
3973 }
3974 
3975 static int
3976 copy_cfg_file_to_card(struct adapter *sc, char *cfg_file,
3977     uint32_t mtype, uint32_t moff)
3978 {
3979 	struct fw_info *fw_info;
3980 	const struct firmware *dcfg, *rcfg = NULL;
3981 	const uint32_t *cfdata;
3982 	uint32_t cflen, addr;
3983 	int rc;
3984 
3985 	load_fw_module(sc, &dcfg, NULL);
3986 
3987 	/* Card specific interpretation of "default". */
3988 	if (strncmp(cfg_file, DEFAULT_CF, sizeof(t4_cfg_file)) == 0) {
3989 		if (pci_get_device(sc->dev) == 0x440a)
3990 			snprintf(cfg_file, sizeof(t4_cfg_file), UWIRE_CF);
3991 		if (is_fpga(sc))
3992 			snprintf(cfg_file, sizeof(t4_cfg_file), FPGA_CF);
3993 	}
3994 
3995 	if (strncmp(cfg_file, DEFAULT_CF, sizeof(t4_cfg_file)) == 0) {
3996 		if (dcfg == NULL) {
3997 			device_printf(sc->dev,
3998 			    "KLD with default config is not available.\n");
3999 			rc = ENOENT;
4000 			goto done;
4001 		}
4002 		cfdata = dcfg->data;
4003 		cflen = dcfg->datasize & ~3;
4004 	} else {
4005 		char s[32];
4006 
4007 		fw_info = find_fw_info(chip_id(sc));
4008 		if (fw_info == NULL) {
4009 			device_printf(sc->dev,
4010 			    "unable to look up firmware information for chip %d.\n",
4011 			    chip_id(sc));
4012 			rc = EINVAL;
4013 			goto done;
4014 		}
4015 		snprintf(s, sizeof(s), "%s_%s", fw_info->kld_name, cfg_file);
4016 
4017 		rcfg = firmware_get(s);
4018 		if (rcfg == NULL) {
4019 			device_printf(sc->dev,
4020 			    "unable to load module \"%s\" for configuration "
4021 			    "profile \"%s\".\n", s, cfg_file);
4022 			rc = ENOENT;
4023 			goto done;
4024 		}
4025 		cfdata = rcfg->data;
4026 		cflen = rcfg->datasize & ~3;
4027 	}
4028 
4029 	if (cflen > FLASH_CFG_MAX_SIZE) {
4030 		device_printf(sc->dev,
4031 		    "config file too long (%d, max allowed is %d).\n",
4032 		    cflen, FLASH_CFG_MAX_SIZE);
4033 		rc = EINVAL;
4034 		goto done;
4035 	}
4036 
4037 	rc = validate_mt_off_len(sc, mtype, moff, cflen, &addr);
4038 	if (rc != 0) {
4039 		device_printf(sc->dev,
4040 		    "%s: addr (%d/0x%x) or len %d is not valid: %d.\n",
4041 		    __func__, mtype, moff, cflen, rc);
4042 		rc = EINVAL;
4043 		goto done;
4044 	}
4045 	write_via_memwin(sc, 2, addr, cfdata, cflen);
4046 done:
4047 	if (rcfg != NULL)
4048 		firmware_put(rcfg, FIRMWARE_UNLOAD);
4049 	unload_fw_module(sc, dcfg, NULL);
4050 	return (rc);
4051 }
4052 
4053 struct caps_allowed {
4054 	uint16_t nbmcaps;
4055 	uint16_t linkcaps;
4056 	uint16_t switchcaps;
4057 	uint16_t niccaps;
4058 	uint16_t toecaps;
4059 	uint16_t rdmacaps;
4060 	uint16_t cryptocaps;
4061 	uint16_t iscsicaps;
4062 	uint16_t fcoecaps;
4063 };
4064 
4065 #define FW_PARAM_DEV(param) \
4066 	(V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) | \
4067 	 V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_##param))
4068 #define FW_PARAM_PFVF(param) \
4069 	(V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_PFVF) | \
4070 	 V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_PFVF_##param))
4071 
4072 /*
4073  * Provide a configuration profile to the firmware and have it initialize the
4074  * chip accordingly.  This may involve uploading a configuration file to the
4075  * card.
4076  */
4077 static int
4078 apply_cfg_and_initialize(struct adapter *sc, char *cfg_file,
4079     const struct caps_allowed *caps_allowed)
4080 {
4081 	int rc;
4082 	struct fw_caps_config_cmd caps;
4083 	uint32_t mtype, moff, finicsum, cfcsum, param, val;
4084 
4085 	rc = -t4_fw_reset(sc, sc->mbox, F_PIORSTMODE | F_PIORST);
4086 	if (rc != 0) {
4087 		device_printf(sc->dev, "firmware reset failed: %d.\n", rc);
4088 		return (rc);
4089 	}
4090 
4091 	bzero(&caps, sizeof(caps));
4092 	caps.op_to_write = htobe32(V_FW_CMD_OP(FW_CAPS_CONFIG_CMD) |
4093 	    F_FW_CMD_REQUEST | F_FW_CMD_READ);
4094 	if (strncmp(cfg_file, BUILTIN_CF, sizeof(t4_cfg_file)) == 0) {
4095 		mtype = 0;
4096 		moff = 0;
4097 		caps.cfvalid_to_len16 = htobe32(FW_LEN16(caps));
4098 	} else if (strncmp(cfg_file, FLASH_CF, sizeof(t4_cfg_file)) == 0) {
4099 		mtype = FW_MEMTYPE_FLASH;
4100 		moff = t4_flash_cfg_addr(sc);
4101 		caps.cfvalid_to_len16 = htobe32(F_FW_CAPS_CONFIG_CMD_CFVALID |
4102 		    V_FW_CAPS_CONFIG_CMD_MEMTYPE_CF(mtype) |
4103 		    V_FW_CAPS_CONFIG_CMD_MEMADDR64K_CF(moff >> 16) |
4104 		    FW_LEN16(caps));
4105 	} else {
4106 		/*
4107 		 * Ask the firmware where it wants us to upload the config file.
4108 		 */
4109 		param = FW_PARAM_DEV(CF);
4110 		rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 1, &param, &val);
4111 		if (rc != 0) {
4112 			/* No support for config file?  Shouldn't happen. */
4113 			device_printf(sc->dev,
4114 			    "failed to query config file location: %d.\n", rc);
4115 			goto done;
4116 		}
4117 		mtype = G_FW_PARAMS_PARAM_Y(val);
4118 		moff = G_FW_PARAMS_PARAM_Z(val) << 16;
4119 		caps.cfvalid_to_len16 = htobe32(F_FW_CAPS_CONFIG_CMD_CFVALID |
4120 		    V_FW_CAPS_CONFIG_CMD_MEMTYPE_CF(mtype) |
4121 		    V_FW_CAPS_CONFIG_CMD_MEMADDR64K_CF(moff >> 16) |
4122 		    FW_LEN16(caps));
4123 
4124 		rc = copy_cfg_file_to_card(sc, cfg_file, mtype, moff);
4125 		if (rc != 0) {
4126 			device_printf(sc->dev,
4127 			    "failed to upload config file to card: %d.\n", rc);
4128 			goto done;
4129 		}
4130 	}
4131 	rc = -t4_wr_mbox(sc, sc->mbox, &caps, sizeof(caps), &caps);
4132 	if (rc != 0) {
4133 		device_printf(sc->dev, "failed to pre-process config file: %d "
4134 		    "(mtype %d, moff 0x%x).\n", rc, mtype, moff);
4135 		goto done;
4136 	}
4137 
4138 	finicsum = be32toh(caps.finicsum);
4139 	cfcsum = be32toh(caps.cfcsum);	/* actual */
4140 	if (finicsum != cfcsum) {
4141 		device_printf(sc->dev,
4142 		    "WARNING: config file checksum mismatch: %08x %08x\n",
4143 		    finicsum, cfcsum);
4144 	}
4145 	sc->cfcsum = cfcsum;
4146 	snprintf(sc->cfg_file, sizeof(sc->cfg_file), "%s", cfg_file);
4147 
4148 	/*
4149 	 * Let the firmware know what features will (not) be used so it can tune
4150 	 * things accordingly.
4151 	 */
4152 #define LIMIT_CAPS(x) do { \
4153 	caps.x##caps &= htobe16(caps_allowed->x##caps); \
4154 } while (0)
4155 	LIMIT_CAPS(nbm);
4156 	LIMIT_CAPS(link);
4157 	LIMIT_CAPS(switch);
4158 	LIMIT_CAPS(nic);
4159 	LIMIT_CAPS(toe);
4160 	LIMIT_CAPS(rdma);
4161 	LIMIT_CAPS(crypto);
4162 	LIMIT_CAPS(iscsi);
4163 	LIMIT_CAPS(fcoe);
4164 #undef LIMIT_CAPS
4165 	if (caps.niccaps & htobe16(FW_CAPS_CONFIG_NIC_HASHFILTER)) {
4166 		/*
4167 		 * TOE and hashfilters are mutually exclusive.  It is a config
4168 		 * file or firmware bug if both are reported as available.  Try
4169 		 * to cope with the situation in non-debug builds by disabling
4170 		 * TOE.
4171 		 */
4172 		MPASS(caps.toecaps == 0);
4173 
4174 		caps.toecaps = 0;
4175 		caps.rdmacaps = 0;
4176 		caps.iscsicaps = 0;
4177 	}
4178 
4179 	caps.op_to_write = htobe32(V_FW_CMD_OP(FW_CAPS_CONFIG_CMD) |
4180 	    F_FW_CMD_REQUEST | F_FW_CMD_WRITE);
4181 	caps.cfvalid_to_len16 = htobe32(FW_LEN16(caps));
4182 	rc = -t4_wr_mbox(sc, sc->mbox, &caps, sizeof(caps), NULL);
4183 	if (rc != 0) {
4184 		device_printf(sc->dev,
4185 		    "failed to process config file: %d.\n", rc);
4186 		goto done;
4187 	}
4188 
4189 	t4_tweak_chip_settings(sc);
4190 	set_params__pre_init(sc);
4191 
4192 	/* get basic stuff going */
4193 	rc = -t4_fw_initialize(sc, sc->mbox);
4194 	if (rc != 0) {
4195 		device_printf(sc->dev, "fw_initialize failed: %d.\n", rc);
4196 		goto done;
4197 	}
4198 done:
4199 	return (rc);
4200 }
4201 
4202 /*
4203  * Partition chip resources for use between various PFs, VFs, etc.
4204  */
4205 static int
4206 partition_resources(struct adapter *sc)
4207 {
4208 	char cfg_file[sizeof(t4_cfg_file)];
4209 	struct caps_allowed caps_allowed;
4210 	int rc;
4211 	bool fallback;
4212 
4213 	/* Only the master driver gets to configure the chip resources. */
4214 	MPASS(sc->flags & MASTER_PF);
4215 
4216 #define COPY_CAPS(x) do { \
4217 	caps_allowed.x##caps = t4_##x##caps_allowed; \
4218 } while (0)
4219 	bzero(&caps_allowed, sizeof(caps_allowed));
4220 	COPY_CAPS(nbm);
4221 	COPY_CAPS(link);
4222 	COPY_CAPS(switch);
4223 	COPY_CAPS(nic);
4224 	COPY_CAPS(toe);
4225 	COPY_CAPS(rdma);
4226 	COPY_CAPS(crypto);
4227 	COPY_CAPS(iscsi);
4228 	COPY_CAPS(fcoe);
4229 	fallback = sc->debug_flags & DF_DISABLE_CFG_RETRY ? false : true;
4230 	snprintf(cfg_file, sizeof(cfg_file), "%s", t4_cfg_file);
4231 retry:
4232 	rc = apply_cfg_and_initialize(sc, cfg_file, &caps_allowed);
4233 	if (rc != 0 && fallback) {
4234 		device_printf(sc->dev,
4235 		    "failed (%d) to configure card with \"%s\" profile, "
4236 		    "will fall back to a basic configuration and retry.\n",
4237 		    rc, cfg_file);
4238 		snprintf(cfg_file, sizeof(cfg_file), "%s", BUILTIN_CF);
4239 		bzero(&caps_allowed, sizeof(caps_allowed));
4240 		COPY_CAPS(switch);
4241 		caps_allowed.niccaps = FW_CAPS_CONFIG_NIC;
4242 		fallback = false;
4243 		goto retry;
4244 	}
4245 #undef COPY_CAPS
4246 	return (rc);
4247 }
4248 
4249 /*
4250  * Retrieve parameters that are needed (or nice to have) very early.
4251  */
4252 static int
4253 get_params__pre_init(struct adapter *sc)
4254 {
4255 	int rc;
4256 	uint32_t param[2], val[2];
4257 
4258 	t4_get_version_info(sc);
4259 
4260 	snprintf(sc->fw_version, sizeof(sc->fw_version), "%u.%u.%u.%u",
4261 	    G_FW_HDR_FW_VER_MAJOR(sc->params.fw_vers),
4262 	    G_FW_HDR_FW_VER_MINOR(sc->params.fw_vers),
4263 	    G_FW_HDR_FW_VER_MICRO(sc->params.fw_vers),
4264 	    G_FW_HDR_FW_VER_BUILD(sc->params.fw_vers));
4265 
4266 	snprintf(sc->bs_version, sizeof(sc->bs_version), "%u.%u.%u.%u",
4267 	    G_FW_HDR_FW_VER_MAJOR(sc->params.bs_vers),
4268 	    G_FW_HDR_FW_VER_MINOR(sc->params.bs_vers),
4269 	    G_FW_HDR_FW_VER_MICRO(sc->params.bs_vers),
4270 	    G_FW_HDR_FW_VER_BUILD(sc->params.bs_vers));
4271 
4272 	snprintf(sc->tp_version, sizeof(sc->tp_version), "%u.%u.%u.%u",
4273 	    G_FW_HDR_FW_VER_MAJOR(sc->params.tp_vers),
4274 	    G_FW_HDR_FW_VER_MINOR(sc->params.tp_vers),
4275 	    G_FW_HDR_FW_VER_MICRO(sc->params.tp_vers),
4276 	    G_FW_HDR_FW_VER_BUILD(sc->params.tp_vers));
4277 
4278 	snprintf(sc->er_version, sizeof(sc->er_version), "%u.%u.%u.%u",
4279 	    G_FW_HDR_FW_VER_MAJOR(sc->params.er_vers),
4280 	    G_FW_HDR_FW_VER_MINOR(sc->params.er_vers),
4281 	    G_FW_HDR_FW_VER_MICRO(sc->params.er_vers),
4282 	    G_FW_HDR_FW_VER_BUILD(sc->params.er_vers));
4283 
4284 	param[0] = FW_PARAM_DEV(PORTVEC);
4285 	param[1] = FW_PARAM_DEV(CCLK);
4286 	rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 2, param, val);
4287 	if (rc != 0) {
4288 		device_printf(sc->dev,
4289 		    "failed to query parameters (pre_init): %d.\n", rc);
4290 		return (rc);
4291 	}
4292 
4293 	sc->params.portvec = val[0];
4294 	sc->params.nports = bitcount32(val[0]);
4295 	sc->params.vpd.cclk = val[1];
4296 
4297 	/* Read device log parameters. */
4298 	rc = -t4_init_devlog_params(sc, 1);
4299 	if (rc == 0)
4300 		fixup_devlog_params(sc);
4301 	else {
4302 		device_printf(sc->dev,
4303 		    "failed to get devlog parameters: %d.\n", rc);
4304 		rc = 0;	/* devlog isn't critical for device operation */
4305 	}
4306 
4307 	return (rc);
4308 }
4309 
4310 /*
4311  * Any params that need to be set before FW_INITIALIZE.
4312  */
4313 static int
4314 set_params__pre_init(struct adapter *sc)
4315 {
4316 	int rc = 0;
4317 	uint32_t param, val;
4318 
4319 	if (chip_id(sc) >= CHELSIO_T6) {
4320 		param = FW_PARAM_DEV(HPFILTER_REGION_SUPPORT);
4321 		val = 1;
4322 		rc = -t4_set_params(sc, sc->mbox, sc->pf, 0, 1, &param, &val);
4323 		/* firmwares < 1.20.1.0 do not have this param. */
4324 		if (rc == FW_EINVAL &&
4325 		    sc->params.fw_vers < FW_VERSION32(1, 20, 1, 0)) {
4326 			rc = 0;
4327 		}
4328 		if (rc != 0) {
4329 			device_printf(sc->dev,
4330 			    "failed to enable high priority filters :%d.\n",
4331 			    rc);
4332 		}
4333 	}
4334 
4335 	/* Enable opaque VIIDs with firmwares that support it. */
4336 	param = FW_PARAM_DEV(OPAQUE_VIID_SMT_EXTN);
4337 	val = 1;
4338 	rc = -t4_set_params(sc, sc->mbox, sc->pf, 0, 1, &param, &val);
4339 	if (rc == 0 && val == 1)
4340 		sc->params.viid_smt_extn_support = true;
4341 	else
4342 		sc->params.viid_smt_extn_support = false;
4343 
4344 	return (rc);
4345 }
4346 
4347 /*
4348  * Retrieve various parameters that are of interest to the driver.  The device
4349  * has been initialized by the firmware at this point.
4350  */
4351 static int
4352 get_params__post_init(struct adapter *sc)
4353 {
4354 	int rc;
4355 	uint32_t param[7], val[7];
4356 	struct fw_caps_config_cmd caps;
4357 
4358 	param[0] = FW_PARAM_PFVF(IQFLINT_START);
4359 	param[1] = FW_PARAM_PFVF(EQ_START);
4360 	param[2] = FW_PARAM_PFVF(FILTER_START);
4361 	param[3] = FW_PARAM_PFVF(FILTER_END);
4362 	param[4] = FW_PARAM_PFVF(L2T_START);
4363 	param[5] = FW_PARAM_PFVF(L2T_END);
4364 	param[6] = V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) |
4365 	    V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_DIAG) |
4366 	    V_FW_PARAMS_PARAM_Y(FW_PARAM_DEV_DIAG_VDD);
4367 	rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 7, param, val);
4368 	if (rc != 0) {
4369 		device_printf(sc->dev,
4370 		    "failed to query parameters (post_init): %d.\n", rc);
4371 		return (rc);
4372 	}
4373 
4374 	sc->sge.iq_start = val[0];
4375 	sc->sge.eq_start = val[1];
4376 	if ((int)val[3] > (int)val[2]) {
4377 		sc->tids.ftid_base = val[2];
4378 		sc->tids.ftid_end = val[3];
4379 		sc->tids.nftids = val[3] - val[2] + 1;
4380 	}
4381 	sc->vres.l2t.start = val[4];
4382 	sc->vres.l2t.size = val[5] - val[4] + 1;
4383 	KASSERT(sc->vres.l2t.size <= L2T_SIZE,
4384 	    ("%s: L2 table size (%u) larger than expected (%u)",
4385 	    __func__, sc->vres.l2t.size, L2T_SIZE));
4386 	sc->params.core_vdd = val[6];
4387 
4388 	if (chip_id(sc) >= CHELSIO_T6) {
4389 
4390 		sc->tids.tid_base = t4_read_reg(sc,
4391 		    A_LE_DB_ACTIVE_TABLE_START_INDEX);
4392 
4393 		param[0] = FW_PARAM_PFVF(HPFILTER_START);
4394 		param[1] = FW_PARAM_PFVF(HPFILTER_END);
4395 		rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 2, param, val);
4396 		if (rc != 0) {
4397 			device_printf(sc->dev,
4398 			   "failed to query hpfilter parameters: %d.\n", rc);
4399 			return (rc);
4400 		}
4401 		if ((int)val[1] > (int)val[0]) {
4402 			sc->tids.hpftid_base = val[0];
4403 			sc->tids.hpftid_end = val[1];
4404 			sc->tids.nhpftids = val[1] - val[0] + 1;
4405 
4406 			/*
4407 			 * These should go off if the layout changes and the
4408 			 * driver needs to catch up.
4409 			 */
4410 			MPASS(sc->tids.hpftid_base == 0);
4411 			MPASS(sc->tids.tid_base == sc->tids.nhpftids);
4412 		}
4413 	}
4414 
4415 	/*
4416 	 * MPSBGMAP is queried separately because only recent firmwares support
4417 	 * it as a parameter and we don't want the compound query above to fail
4418 	 * on older firmwares.
4419 	 */
4420 	param[0] = FW_PARAM_DEV(MPSBGMAP);
4421 	val[0] = 0;
4422 	rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 1, param, val);
4423 	if (rc == 0)
4424 		sc->params.mps_bg_map = val[0];
4425 	else
4426 		sc->params.mps_bg_map = 0;
4427 
4428 	/*
4429 	 * Determine whether the firmware supports the filter2 work request.
4430 	 * This is queried separately for the same reason as MPSBGMAP above.
4431 	 */
4432 	param[0] = FW_PARAM_DEV(FILTER2_WR);
4433 	val[0] = 0;
4434 	rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 1, param, val);
4435 	if (rc == 0)
4436 		sc->params.filter2_wr_support = val[0] != 0;
4437 	else
4438 		sc->params.filter2_wr_support = 0;
4439 
4440 	/*
4441 	 * Find out whether we're allowed to use the ULPTX MEMWRITE DSGL.
4442 	 * This is queried separately for the same reason as other params above.
4443 	 */
4444 	param[0] = FW_PARAM_DEV(ULPTX_MEMWRITE_DSGL);
4445 	val[0] = 0;
4446 	rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 1, param, val);
4447 	if (rc == 0)
4448 		sc->params.ulptx_memwrite_dsgl = val[0] != 0;
4449 	else
4450 		sc->params.ulptx_memwrite_dsgl = false;
4451 
4452 	/* FW_RI_FR_NSMR_TPTE_WR support */
4453 	param[0] = FW_PARAM_DEV(RI_FR_NSMR_TPTE_WR);
4454 	rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 1, param, val);
4455 	if (rc == 0)
4456 		sc->params.fr_nsmr_tpte_wr_support = val[0] != 0;
4457 	else
4458 		sc->params.fr_nsmr_tpte_wr_support = false;
4459 
4460 	/* get capabilites */
4461 	bzero(&caps, sizeof(caps));
4462 	caps.op_to_write = htobe32(V_FW_CMD_OP(FW_CAPS_CONFIG_CMD) |
4463 	    F_FW_CMD_REQUEST | F_FW_CMD_READ);
4464 	caps.cfvalid_to_len16 = htobe32(FW_LEN16(caps));
4465 	rc = -t4_wr_mbox(sc, sc->mbox, &caps, sizeof(caps), &caps);
4466 	if (rc != 0) {
4467 		device_printf(sc->dev,
4468 		    "failed to get card capabilities: %d.\n", rc);
4469 		return (rc);
4470 	}
4471 
4472 #define READ_CAPS(x) do { \
4473 	sc->x = htobe16(caps.x); \
4474 } while (0)
4475 	READ_CAPS(nbmcaps);
4476 	READ_CAPS(linkcaps);
4477 	READ_CAPS(switchcaps);
4478 	READ_CAPS(niccaps);
4479 	READ_CAPS(toecaps);
4480 	READ_CAPS(rdmacaps);
4481 	READ_CAPS(cryptocaps);
4482 	READ_CAPS(iscsicaps);
4483 	READ_CAPS(fcoecaps);
4484 
4485 	if (sc->niccaps & FW_CAPS_CONFIG_NIC_HASHFILTER) {
4486 		MPASS(chip_id(sc) > CHELSIO_T4);
4487 		MPASS(sc->toecaps == 0);
4488 		sc->toecaps = 0;
4489 
4490 		param[0] = FW_PARAM_DEV(NTID);
4491 		rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 1, param, val);
4492 		if (rc != 0) {
4493 			device_printf(sc->dev,
4494 			    "failed to query HASHFILTER parameters: %d.\n", rc);
4495 			return (rc);
4496 		}
4497 		sc->tids.ntids = val[0];
4498 		if (sc->params.fw_vers < FW_VERSION32(1, 20, 5, 0)) {
4499 			MPASS(sc->tids.ntids >= sc->tids.nhpftids);
4500 			sc->tids.ntids -= sc->tids.nhpftids;
4501 		}
4502 		sc->tids.natids = min(sc->tids.ntids / 2, MAX_ATIDS);
4503 		sc->params.hash_filter = 1;
4504 	}
4505 	if (sc->niccaps & FW_CAPS_CONFIG_NIC_ETHOFLD) {
4506 		param[0] = FW_PARAM_PFVF(ETHOFLD_START);
4507 		param[1] = FW_PARAM_PFVF(ETHOFLD_END);
4508 		param[2] = FW_PARAM_DEV(FLOWC_BUFFIFO_SZ);
4509 		rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 3, param, val);
4510 		if (rc != 0) {
4511 			device_printf(sc->dev,
4512 			    "failed to query NIC parameters: %d.\n", rc);
4513 			return (rc);
4514 		}
4515 		if ((int)val[1] > (int)val[0]) {
4516 			sc->tids.etid_base = val[0];
4517 			sc->tids.etid_end = val[1];
4518 			sc->tids.netids = val[1] - val[0] + 1;
4519 			sc->params.eo_wr_cred = val[2];
4520 			sc->params.ethoffload = 1;
4521 		}
4522 	}
4523 	if (sc->toecaps) {
4524 		/* query offload-related parameters */
4525 		param[0] = FW_PARAM_DEV(NTID);
4526 		param[1] = FW_PARAM_PFVF(SERVER_START);
4527 		param[2] = FW_PARAM_PFVF(SERVER_END);
4528 		param[3] = FW_PARAM_PFVF(TDDP_START);
4529 		param[4] = FW_PARAM_PFVF(TDDP_END);
4530 		param[5] = FW_PARAM_DEV(FLOWC_BUFFIFO_SZ);
4531 		rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 6, param, val);
4532 		if (rc != 0) {
4533 			device_printf(sc->dev,
4534 			    "failed to query TOE parameters: %d.\n", rc);
4535 			return (rc);
4536 		}
4537 		sc->tids.ntids = val[0];
4538 		if (sc->params.fw_vers < FW_VERSION32(1, 20, 5, 0)) {
4539 			MPASS(sc->tids.ntids >= sc->tids.nhpftids);
4540 			sc->tids.ntids -= sc->tids.nhpftids;
4541 		}
4542 		sc->tids.natids = min(sc->tids.ntids / 2, MAX_ATIDS);
4543 		if ((int)val[2] > (int)val[1]) {
4544 			sc->tids.stid_base = val[1];
4545 			sc->tids.nstids = val[2] - val[1] + 1;
4546 		}
4547 		sc->vres.ddp.start = val[3];
4548 		sc->vres.ddp.size = val[4] - val[3] + 1;
4549 		sc->params.ofldq_wr_cred = val[5];
4550 		sc->params.offload = 1;
4551 	} else {
4552 		/*
4553 		 * The firmware attempts memfree TOE configuration for -SO cards
4554 		 * and will report toecaps=0 if it runs out of resources (this
4555 		 * depends on the config file).  It may not report 0 for other
4556 		 * capabilities dependent on the TOE in this case.  Set them to
4557 		 * 0 here so that the driver doesn't bother tracking resources
4558 		 * that will never be used.
4559 		 */
4560 		sc->iscsicaps = 0;
4561 		sc->rdmacaps = 0;
4562 	}
4563 	if (sc->rdmacaps) {
4564 		param[0] = FW_PARAM_PFVF(STAG_START);
4565 		param[1] = FW_PARAM_PFVF(STAG_END);
4566 		param[2] = FW_PARAM_PFVF(RQ_START);
4567 		param[3] = FW_PARAM_PFVF(RQ_END);
4568 		param[4] = FW_PARAM_PFVF(PBL_START);
4569 		param[5] = FW_PARAM_PFVF(PBL_END);
4570 		rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 6, param, val);
4571 		if (rc != 0) {
4572 			device_printf(sc->dev,
4573 			    "failed to query RDMA parameters(1): %d.\n", rc);
4574 			return (rc);
4575 		}
4576 		sc->vres.stag.start = val[0];
4577 		sc->vres.stag.size = val[1] - val[0] + 1;
4578 		sc->vres.rq.start = val[2];
4579 		sc->vres.rq.size = val[3] - val[2] + 1;
4580 		sc->vres.pbl.start = val[4];
4581 		sc->vres.pbl.size = val[5] - val[4] + 1;
4582 
4583 		param[0] = FW_PARAM_PFVF(SQRQ_START);
4584 		param[1] = FW_PARAM_PFVF(SQRQ_END);
4585 		param[2] = FW_PARAM_PFVF(CQ_START);
4586 		param[3] = FW_PARAM_PFVF(CQ_END);
4587 		param[4] = FW_PARAM_PFVF(OCQ_START);
4588 		param[5] = FW_PARAM_PFVF(OCQ_END);
4589 		rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 6, param, val);
4590 		if (rc != 0) {
4591 			device_printf(sc->dev,
4592 			    "failed to query RDMA parameters(2): %d.\n", rc);
4593 			return (rc);
4594 		}
4595 		sc->vres.qp.start = val[0];
4596 		sc->vres.qp.size = val[1] - val[0] + 1;
4597 		sc->vres.cq.start = val[2];
4598 		sc->vres.cq.size = val[3] - val[2] + 1;
4599 		sc->vres.ocq.start = val[4];
4600 		sc->vres.ocq.size = val[5] - val[4] + 1;
4601 
4602 		param[0] = FW_PARAM_PFVF(SRQ_START);
4603 		param[1] = FW_PARAM_PFVF(SRQ_END);
4604 		param[2] = FW_PARAM_DEV(MAXORDIRD_QP);
4605 		param[3] = FW_PARAM_DEV(MAXIRD_ADAPTER);
4606 		rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 4, param, val);
4607 		if (rc != 0) {
4608 			device_printf(sc->dev,
4609 			    "failed to query RDMA parameters(3): %d.\n", rc);
4610 			return (rc);
4611 		}
4612 		sc->vres.srq.start = val[0];
4613 		sc->vres.srq.size = val[1] - val[0] + 1;
4614 		sc->params.max_ordird_qp = val[2];
4615 		sc->params.max_ird_adapter = val[3];
4616 	}
4617 	if (sc->iscsicaps) {
4618 		param[0] = FW_PARAM_PFVF(ISCSI_START);
4619 		param[1] = FW_PARAM_PFVF(ISCSI_END);
4620 		rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 2, param, val);
4621 		if (rc != 0) {
4622 			device_printf(sc->dev,
4623 			    "failed to query iSCSI parameters: %d.\n", rc);
4624 			return (rc);
4625 		}
4626 		sc->vres.iscsi.start = val[0];
4627 		sc->vres.iscsi.size = val[1] - val[0] + 1;
4628 	}
4629 	if (sc->cryptocaps & FW_CAPS_CONFIG_TLSKEYS) {
4630 		param[0] = FW_PARAM_PFVF(TLS_START);
4631 		param[1] = FW_PARAM_PFVF(TLS_END);
4632 		rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 2, param, val);
4633 		if (rc != 0) {
4634 			device_printf(sc->dev,
4635 			    "failed to query TLS parameters: %d.\n", rc);
4636 			return (rc);
4637 		}
4638 		sc->vres.key.start = val[0];
4639 		sc->vres.key.size = val[1] - val[0] + 1;
4640 	}
4641 
4642 	t4_init_sge_params(sc);
4643 
4644 	/*
4645 	 * We've got the params we wanted to query via the firmware.  Now grab
4646 	 * some others directly from the chip.
4647 	 */
4648 	rc = t4_read_chip_settings(sc);
4649 
4650 	return (rc);
4651 }
4652 
4653 #ifdef KERN_TLS
4654 static void
4655 ktls_tick(void *arg)
4656 {
4657 	struct adapter *sc;
4658 	uint32_t tstamp;
4659 
4660 	sc = arg;
4661 
4662 	tstamp = tcp_ts_getticks();
4663 	t4_write_reg(sc, A_TP_SYNC_TIME_HI, tstamp >> 1);
4664 	t4_write_reg(sc, A_TP_SYNC_TIME_LO, tstamp << 31);
4665 
4666 	callout_schedule_sbt(&sc->ktls_tick, SBT_1MS, 0, C_HARDCLOCK);
4667 }
4668 
4669 static void
4670 t4_enable_kern_tls(struct adapter *sc)
4671 {
4672 	uint32_t m, v;
4673 
4674 	m = F_ENABLECBYP;
4675 	v = F_ENABLECBYP;
4676 	t4_set_reg_field(sc, A_TP_PARA_REG6, m, v);
4677 
4678 	m = F_CPL_FLAGS_UPDATE_EN | F_SEQ_UPDATE_EN;
4679 	v = F_CPL_FLAGS_UPDATE_EN | F_SEQ_UPDATE_EN;
4680 	t4_set_reg_field(sc, A_ULP_TX_CONFIG, m, v);
4681 
4682 	m = F_NICMODE;
4683 	v = F_NICMODE;
4684 	t4_set_reg_field(sc, A_TP_IN_CONFIG, m, v);
4685 
4686 	m = F_LOOKUPEVERYPKT;
4687 	v = 0;
4688 	t4_set_reg_field(sc, A_TP_INGRESS_CONFIG, m, v);
4689 
4690 	m = F_TXDEFERENABLE | F_DISABLEWINDOWPSH | F_DISABLESEPPSHFLAG;
4691 	v = F_DISABLEWINDOWPSH;
4692 	t4_set_reg_field(sc, A_TP_PC_CONFIG, m, v);
4693 
4694 	m = V_TIMESTAMPRESOLUTION(M_TIMESTAMPRESOLUTION);
4695 	v = V_TIMESTAMPRESOLUTION(0x1f);
4696 	t4_set_reg_field(sc, A_TP_TIMER_RESOLUTION, m, v);
4697 
4698 	sc->flags |= KERN_TLS_OK;
4699 
4700 	sc->tlst.inline_keys = t4_tls_inline_keys;
4701 	sc->tlst.combo_wrs = t4_tls_combo_wrs;
4702 }
4703 #endif
4704 
4705 static int
4706 set_params__post_init(struct adapter *sc)
4707 {
4708 	uint32_t param, val;
4709 #ifdef TCP_OFFLOAD
4710 	int i, v, shift;
4711 #endif
4712 
4713 	/* ask for encapsulated CPLs */
4714 	param = FW_PARAM_PFVF(CPLFW4MSG_ENCAP);
4715 	val = 1;
4716 	(void)t4_set_params(sc, sc->mbox, sc->pf, 0, 1, &param, &val);
4717 
4718 	/* Enable 32b port caps if the firmware supports it. */
4719 	param = FW_PARAM_PFVF(PORT_CAPS32);
4720 	val = 1;
4721 	if (t4_set_params(sc, sc->mbox, sc->pf, 0, 1, &param, &val) == 0)
4722 		sc->params.port_caps32 = 1;
4723 
4724 	/* Let filter + maskhash steer to a part of the VI's RSS region. */
4725 	val = 1 << (G_MASKSIZE(t4_read_reg(sc, A_TP_RSS_CONFIG_TNL)) - 1);
4726 	t4_set_reg_field(sc, A_TP_RSS_CONFIG_TNL, V_MASKFILTER(M_MASKFILTER),
4727 	    V_MASKFILTER(val - 1));
4728 
4729 #ifdef TCP_OFFLOAD
4730 	/*
4731 	 * Override the TOE timers with user provided tunables.  This is not the
4732 	 * recommended way to change the timers (the firmware config file is) so
4733 	 * these tunables are not documented.
4734 	 *
4735 	 * All the timer tunables are in microseconds.
4736 	 */
4737 	if (t4_toe_keepalive_idle != 0) {
4738 		v = us_to_tcp_ticks(sc, t4_toe_keepalive_idle);
4739 		v &= M_KEEPALIVEIDLE;
4740 		t4_set_reg_field(sc, A_TP_KEEP_IDLE,
4741 		    V_KEEPALIVEIDLE(M_KEEPALIVEIDLE), V_KEEPALIVEIDLE(v));
4742 	}
4743 	if (t4_toe_keepalive_interval != 0) {
4744 		v = us_to_tcp_ticks(sc, t4_toe_keepalive_interval);
4745 		v &= M_KEEPALIVEINTVL;
4746 		t4_set_reg_field(sc, A_TP_KEEP_INTVL,
4747 		    V_KEEPALIVEINTVL(M_KEEPALIVEINTVL), V_KEEPALIVEINTVL(v));
4748 	}
4749 	if (t4_toe_keepalive_count != 0) {
4750 		v = t4_toe_keepalive_count & M_KEEPALIVEMAXR2;
4751 		t4_set_reg_field(sc, A_TP_SHIFT_CNT,
4752 		    V_KEEPALIVEMAXR1(M_KEEPALIVEMAXR1) |
4753 		    V_KEEPALIVEMAXR2(M_KEEPALIVEMAXR2),
4754 		    V_KEEPALIVEMAXR1(1) | V_KEEPALIVEMAXR2(v));
4755 	}
4756 	if (t4_toe_rexmt_min != 0) {
4757 		v = us_to_tcp_ticks(sc, t4_toe_rexmt_min);
4758 		v &= M_RXTMIN;
4759 		t4_set_reg_field(sc, A_TP_RXT_MIN,
4760 		    V_RXTMIN(M_RXTMIN), V_RXTMIN(v));
4761 	}
4762 	if (t4_toe_rexmt_max != 0) {
4763 		v = us_to_tcp_ticks(sc, t4_toe_rexmt_max);
4764 		v &= M_RXTMAX;
4765 		t4_set_reg_field(sc, A_TP_RXT_MAX,
4766 		    V_RXTMAX(M_RXTMAX), V_RXTMAX(v));
4767 	}
4768 	if (t4_toe_rexmt_count != 0) {
4769 		v = t4_toe_rexmt_count & M_RXTSHIFTMAXR2;
4770 		t4_set_reg_field(sc, A_TP_SHIFT_CNT,
4771 		    V_RXTSHIFTMAXR1(M_RXTSHIFTMAXR1) |
4772 		    V_RXTSHIFTMAXR2(M_RXTSHIFTMAXR2),
4773 		    V_RXTSHIFTMAXR1(1) | V_RXTSHIFTMAXR2(v));
4774 	}
4775 	for (i = 0; i < nitems(t4_toe_rexmt_backoff); i++) {
4776 		if (t4_toe_rexmt_backoff[i] != -1) {
4777 			v = t4_toe_rexmt_backoff[i] & M_TIMERBACKOFFINDEX0;
4778 			shift = (i & 3) << 3;
4779 			t4_set_reg_field(sc, A_TP_TCP_BACKOFF_REG0 + (i & ~3),
4780 			    M_TIMERBACKOFFINDEX0 << shift, v << shift);
4781 		}
4782 	}
4783 #endif
4784 
4785 #ifdef KERN_TLS
4786 	if (t4_kern_tls != 0 && sc->cryptocaps & FW_CAPS_CONFIG_TLSKEYS &&
4787 	    sc->toecaps & FW_CAPS_CONFIG_TOE)
4788 		t4_enable_kern_tls(sc);
4789 #endif
4790 	return (0);
4791 }
4792 
4793 #undef FW_PARAM_PFVF
4794 #undef FW_PARAM_DEV
4795 
4796 static void
4797 t4_set_desc(struct adapter *sc)
4798 {
4799 	char buf[128];
4800 	struct adapter_params *p = &sc->params;
4801 
4802 	snprintf(buf, sizeof(buf), "Chelsio %s", p->vpd.id);
4803 
4804 	device_set_desc_copy(sc->dev, buf);
4805 }
4806 
4807 static inline void
4808 ifmedia_add4(struct ifmedia *ifm, int m)
4809 {
4810 
4811 	ifmedia_add(ifm, m, 0, NULL);
4812 	ifmedia_add(ifm, m | IFM_ETH_TXPAUSE, 0, NULL);
4813 	ifmedia_add(ifm, m | IFM_ETH_RXPAUSE, 0, NULL);
4814 	ifmedia_add(ifm, m | IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE, 0, NULL);
4815 }
4816 
4817 /*
4818  * This is the selected media, which is not quite the same as the active media.
4819  * The media line in ifconfig is "media: Ethernet selected (active)" if selected
4820  * and active are not the same, and "media: Ethernet selected" otherwise.
4821  */
4822 static void
4823 set_current_media(struct port_info *pi)
4824 {
4825 	struct link_config *lc;
4826 	struct ifmedia *ifm;
4827 	int mword;
4828 	u_int speed;
4829 
4830 	PORT_LOCK_ASSERT_OWNED(pi);
4831 
4832 	/* Leave current media alone if it's already set to IFM_NONE. */
4833 	ifm = &pi->media;
4834 	if (ifm->ifm_cur != NULL &&
4835 	    IFM_SUBTYPE(ifm->ifm_cur->ifm_media) == IFM_NONE)
4836 		return;
4837 
4838 	lc = &pi->link_cfg;
4839 	if (lc->requested_aneg != AUTONEG_DISABLE &&
4840 	    lc->pcaps & FW_PORT_CAP32_ANEG) {
4841 		ifmedia_set(ifm, IFM_ETHER | IFM_AUTO);
4842 		return;
4843 	}
4844 	mword = IFM_ETHER | IFM_FDX;
4845 	if (lc->requested_fc & PAUSE_TX)
4846 		mword |= IFM_ETH_TXPAUSE;
4847 	if (lc->requested_fc & PAUSE_RX)
4848 		mword |= IFM_ETH_RXPAUSE;
4849 	if (lc->requested_speed == 0)
4850 		speed = port_top_speed(pi) * 1000;	/* Gbps -> Mbps */
4851 	else
4852 		speed = lc->requested_speed;
4853 	mword |= port_mword(pi, speed_to_fwcap(speed));
4854 	ifmedia_set(ifm, mword);
4855 }
4856 
4857 /*
4858  * Returns true if the ifmedia list for the port cannot change.
4859  */
4860 static bool
4861 fixed_ifmedia(struct port_info *pi)
4862 {
4863 
4864 	return (pi->port_type == FW_PORT_TYPE_BT_SGMII ||
4865 	    pi->port_type == FW_PORT_TYPE_BT_XFI ||
4866 	    pi->port_type == FW_PORT_TYPE_BT_XAUI ||
4867 	    pi->port_type == FW_PORT_TYPE_KX4 ||
4868 	    pi->port_type == FW_PORT_TYPE_KX ||
4869 	    pi->port_type == FW_PORT_TYPE_KR ||
4870 	    pi->port_type == FW_PORT_TYPE_BP_AP ||
4871 	    pi->port_type == FW_PORT_TYPE_BP4_AP ||
4872 	    pi->port_type == FW_PORT_TYPE_BP40_BA ||
4873 	    pi->port_type == FW_PORT_TYPE_KR4_100G ||
4874 	    pi->port_type == FW_PORT_TYPE_KR_SFP28 ||
4875 	    pi->port_type == FW_PORT_TYPE_KR_XLAUI);
4876 }
4877 
4878 static void
4879 build_medialist(struct port_info *pi)
4880 {
4881 	uint32_t ss, speed;
4882 	int unknown, mword, bit;
4883 	struct link_config *lc;
4884 	struct ifmedia *ifm;
4885 
4886 	PORT_LOCK_ASSERT_OWNED(pi);
4887 
4888 	if (pi->flags & FIXED_IFMEDIA)
4889 		return;
4890 
4891 	/*
4892 	 * Rebuild the ifmedia list.
4893 	 */
4894 	ifm = &pi->media;
4895 	ifmedia_removeall(ifm);
4896 	lc = &pi->link_cfg;
4897 	ss = G_FW_PORT_CAP32_SPEED(lc->pcaps); /* Supported Speeds */
4898 	if (__predict_false(ss == 0)) {	/* not supposed to happen. */
4899 		MPASS(ss != 0);
4900 no_media:
4901 		MPASS(LIST_EMPTY(&ifm->ifm_list));
4902 		ifmedia_add(ifm, IFM_ETHER | IFM_NONE, 0, NULL);
4903 		ifmedia_set(ifm, IFM_ETHER | IFM_NONE);
4904 		return;
4905 	}
4906 
4907 	unknown = 0;
4908 	for (bit = S_FW_PORT_CAP32_SPEED; bit < fls(ss); bit++) {
4909 		speed = 1 << bit;
4910 		MPASS(speed & M_FW_PORT_CAP32_SPEED);
4911 		if (ss & speed) {
4912 			mword = port_mword(pi, speed);
4913 			if (mword == IFM_NONE) {
4914 				goto no_media;
4915 			} else if (mword == IFM_UNKNOWN)
4916 				unknown++;
4917 			else
4918 				ifmedia_add4(ifm, IFM_ETHER | IFM_FDX | mword);
4919 		}
4920 	}
4921 	if (unknown > 0) /* Add one unknown for all unknown media types. */
4922 		ifmedia_add4(ifm, IFM_ETHER | IFM_FDX | IFM_UNKNOWN);
4923 	if (lc->pcaps & FW_PORT_CAP32_ANEG)
4924 		ifmedia_add(ifm, IFM_ETHER | IFM_AUTO, 0, NULL);
4925 
4926 	set_current_media(pi);
4927 }
4928 
4929 /*
4930  * Initialize the requested fields in the link config based on driver tunables.
4931  */
4932 static void
4933 init_link_config(struct port_info *pi)
4934 {
4935 	struct link_config *lc = &pi->link_cfg;
4936 
4937 	PORT_LOCK_ASSERT_OWNED(pi);
4938 
4939 	lc->requested_speed = 0;
4940 
4941 	if (t4_autoneg == 0)
4942 		lc->requested_aneg = AUTONEG_DISABLE;
4943 	else if (t4_autoneg == 1)
4944 		lc->requested_aneg = AUTONEG_ENABLE;
4945 	else
4946 		lc->requested_aneg = AUTONEG_AUTO;
4947 
4948 	lc->requested_fc = t4_pause_settings & (PAUSE_TX | PAUSE_RX |
4949 	    PAUSE_AUTONEG);
4950 
4951 	if (t4_fec & FEC_AUTO)
4952 		lc->requested_fec = FEC_AUTO;
4953 	else if (t4_fec == 0)
4954 		lc->requested_fec = FEC_NONE;
4955 	else {
4956 		/* -1 is handled by the FEC_AUTO block above and not here. */
4957 		lc->requested_fec = t4_fec &
4958 		    (FEC_RS | FEC_BASER_RS | FEC_NONE | FEC_MODULE);
4959 		if (lc->requested_fec == 0)
4960 			lc->requested_fec = FEC_AUTO;
4961 	}
4962 }
4963 
4964 /*
4965  * Makes sure that all requested settings comply with what's supported by the
4966  * port.  Returns the number of settings that were invalid and had to be fixed.
4967  */
4968 static int
4969 fixup_link_config(struct port_info *pi)
4970 {
4971 	int n = 0;
4972 	struct link_config *lc = &pi->link_cfg;
4973 	uint32_t fwspeed;
4974 
4975 	PORT_LOCK_ASSERT_OWNED(pi);
4976 
4977 	/* Speed (when not autonegotiating) */
4978 	if (lc->requested_speed != 0) {
4979 		fwspeed = speed_to_fwcap(lc->requested_speed);
4980 		if ((fwspeed & lc->pcaps) == 0) {
4981 			n++;
4982 			lc->requested_speed = 0;
4983 		}
4984 	}
4985 
4986 	/* Link autonegotiation */
4987 	MPASS(lc->requested_aneg == AUTONEG_ENABLE ||
4988 	    lc->requested_aneg == AUTONEG_DISABLE ||
4989 	    lc->requested_aneg == AUTONEG_AUTO);
4990 	if (lc->requested_aneg == AUTONEG_ENABLE &&
4991 	    !(lc->pcaps & FW_PORT_CAP32_ANEG)) {
4992 		n++;
4993 		lc->requested_aneg = AUTONEG_AUTO;
4994 	}
4995 
4996 	/* Flow control */
4997 	MPASS((lc->requested_fc & ~(PAUSE_TX | PAUSE_RX | PAUSE_AUTONEG)) == 0);
4998 	if (lc->requested_fc & PAUSE_TX &&
4999 	    !(lc->pcaps & FW_PORT_CAP32_FC_TX)) {
5000 		n++;
5001 		lc->requested_fc &= ~PAUSE_TX;
5002 	}
5003 	if (lc->requested_fc & PAUSE_RX &&
5004 	    !(lc->pcaps & FW_PORT_CAP32_FC_RX)) {
5005 		n++;
5006 		lc->requested_fc &= ~PAUSE_RX;
5007 	}
5008 	if (!(lc->requested_fc & PAUSE_AUTONEG) &&
5009 	    !(lc->pcaps & FW_PORT_CAP32_FORCE_PAUSE)) {
5010 		n++;
5011 		lc->requested_fc |= PAUSE_AUTONEG;
5012 	}
5013 
5014 	/* FEC */
5015 	if ((lc->requested_fec & FEC_RS &&
5016 	    !(lc->pcaps & FW_PORT_CAP32_FEC_RS)) ||
5017 	    (lc->requested_fec & FEC_BASER_RS &&
5018 	    !(lc->pcaps & FW_PORT_CAP32_FEC_BASER_RS))) {
5019 		n++;
5020 		lc->requested_fec = FEC_AUTO;
5021 	}
5022 
5023 	return (n);
5024 }
5025 
5026 /*
5027  * Apply the requested L1 settings, which are expected to be valid, to the
5028  * hardware.
5029  */
5030 static int
5031 apply_link_config(struct port_info *pi)
5032 {
5033 	struct adapter *sc = pi->adapter;
5034 	struct link_config *lc = &pi->link_cfg;
5035 	int rc;
5036 
5037 #ifdef INVARIANTS
5038 	ASSERT_SYNCHRONIZED_OP(sc);
5039 	PORT_LOCK_ASSERT_OWNED(pi);
5040 
5041 	if (lc->requested_aneg == AUTONEG_ENABLE)
5042 		MPASS(lc->pcaps & FW_PORT_CAP32_ANEG);
5043 	if (!(lc->requested_fc & PAUSE_AUTONEG))
5044 		MPASS(lc->pcaps & FW_PORT_CAP32_FORCE_PAUSE);
5045 	if (lc->requested_fc & PAUSE_TX)
5046 		MPASS(lc->pcaps & FW_PORT_CAP32_FC_TX);
5047 	if (lc->requested_fc & PAUSE_RX)
5048 		MPASS(lc->pcaps & FW_PORT_CAP32_FC_RX);
5049 	if (lc->requested_fec & FEC_RS)
5050 		MPASS(lc->pcaps & FW_PORT_CAP32_FEC_RS);
5051 	if (lc->requested_fec & FEC_BASER_RS)
5052 		MPASS(lc->pcaps & FW_PORT_CAP32_FEC_BASER_RS);
5053 #endif
5054 	rc = -t4_link_l1cfg(sc, sc->mbox, pi->tx_chan, lc);
5055 	if (rc != 0) {
5056 		/* Don't complain if the VF driver gets back an EPERM. */
5057 		if (!(sc->flags & IS_VF) || rc != FW_EPERM)
5058 			device_printf(pi->dev, "l1cfg failed: %d\n", rc);
5059 	} else {
5060 		/*
5061 		 * An L1_CFG will almost always result in a link-change event if
5062 		 * the link is up, and the driver will refresh the actual
5063 		 * fec/fc/etc. when the notification is processed.  If the link
5064 		 * is down then the actual settings are meaningless.
5065 		 *
5066 		 * This takes care of the case where a change in the L1 settings
5067 		 * may not result in a notification.
5068 		 */
5069 		if (lc->link_ok && !(lc->requested_fc & PAUSE_AUTONEG))
5070 			lc->fc = lc->requested_fc & (PAUSE_TX | PAUSE_RX);
5071 	}
5072 	return (rc);
5073 }
5074 
5075 #define FW_MAC_EXACT_CHUNK	7
5076 struct mcaddr_ctx {
5077 	struct ifnet *ifp;
5078 	const uint8_t *mcaddr[FW_MAC_EXACT_CHUNK];
5079 	uint64_t hash;
5080 	int i;
5081 	int del;
5082 	int rc;
5083 };
5084 
5085 static u_int
5086 add_maddr(void *arg, struct sockaddr_dl *sdl, u_int cnt)
5087 {
5088 	struct mcaddr_ctx *ctx = arg;
5089 	struct vi_info *vi = ctx->ifp->if_softc;
5090 	struct port_info *pi = vi->pi;
5091 	struct adapter *sc = pi->adapter;
5092 
5093 	if (ctx->rc < 0)
5094 		return (0);
5095 
5096 	ctx->mcaddr[ctx->i] = LLADDR(sdl);
5097 	MPASS(ETHER_IS_MULTICAST(ctx->mcaddr[ctx->i]));
5098 	ctx->i++;
5099 
5100 	if (ctx->i == FW_MAC_EXACT_CHUNK) {
5101 		ctx->rc = t4_alloc_mac_filt(sc, sc->mbox, vi->viid, ctx->del,
5102 		    ctx->i, ctx->mcaddr, NULL, &ctx->hash, 0);
5103 		if (ctx->rc < 0) {
5104 			int j;
5105 
5106 			for (j = 0; j < ctx->i; j++) {
5107 				if_printf(ctx->ifp,
5108 				    "failed to add mc address"
5109 				    " %02x:%02x:%02x:"
5110 				    "%02x:%02x:%02x rc=%d\n",
5111 				    ctx->mcaddr[j][0], ctx->mcaddr[j][1],
5112 				    ctx->mcaddr[j][2], ctx->mcaddr[j][3],
5113 				    ctx->mcaddr[j][4], ctx->mcaddr[j][5],
5114 				    -ctx->rc);
5115 			}
5116 			return (0);
5117 		}
5118 		ctx->del = 0;
5119 		ctx->i = 0;
5120 	}
5121 
5122 	return (1);
5123 }
5124 
5125 /*
5126  * Program the port's XGMAC based on parameters in ifnet.  The caller also
5127  * indicates which parameters should be programmed (the rest are left alone).
5128  */
5129 int
5130 update_mac_settings(struct ifnet *ifp, int flags)
5131 {
5132 	int rc = 0;
5133 	struct vi_info *vi = ifp->if_softc;
5134 	struct port_info *pi = vi->pi;
5135 	struct adapter *sc = pi->adapter;
5136 	int mtu = -1, promisc = -1, allmulti = -1, vlanex = -1;
5137 
5138 	ASSERT_SYNCHRONIZED_OP(sc);
5139 	KASSERT(flags, ("%s: not told what to update.", __func__));
5140 
5141 	if (flags & XGMAC_MTU)
5142 		mtu = ifp->if_mtu;
5143 
5144 	if (flags & XGMAC_PROMISC)
5145 		promisc = ifp->if_flags & IFF_PROMISC ? 1 : 0;
5146 
5147 	if (flags & XGMAC_ALLMULTI)
5148 		allmulti = ifp->if_flags & IFF_ALLMULTI ? 1 : 0;
5149 
5150 	if (flags & XGMAC_VLANEX)
5151 		vlanex = ifp->if_capenable & IFCAP_VLAN_HWTAGGING ? 1 : 0;
5152 
5153 	if (flags & (XGMAC_MTU|XGMAC_PROMISC|XGMAC_ALLMULTI|XGMAC_VLANEX)) {
5154 		rc = -t4_set_rxmode(sc, sc->mbox, vi->viid, mtu, promisc,
5155 		    allmulti, 1, vlanex, false);
5156 		if (rc) {
5157 			if_printf(ifp, "set_rxmode (%x) failed: %d\n", flags,
5158 			    rc);
5159 			return (rc);
5160 		}
5161 	}
5162 
5163 	if (flags & XGMAC_UCADDR) {
5164 		uint8_t ucaddr[ETHER_ADDR_LEN];
5165 
5166 		bcopy(IF_LLADDR(ifp), ucaddr, sizeof(ucaddr));
5167 		rc = t4_change_mac(sc, sc->mbox, vi->viid, vi->xact_addr_filt,
5168 		    ucaddr, true, &vi->smt_idx);
5169 		if (rc < 0) {
5170 			rc = -rc;
5171 			if_printf(ifp, "change_mac failed: %d\n", rc);
5172 			return (rc);
5173 		} else {
5174 			vi->xact_addr_filt = rc;
5175 			rc = 0;
5176 		}
5177 	}
5178 
5179 	if (flags & XGMAC_MCADDRS) {
5180 		struct epoch_tracker et;
5181 		struct mcaddr_ctx ctx;
5182 		int j;
5183 
5184 		ctx.ifp = ifp;
5185 		ctx.hash = 0;
5186 		ctx.i = 0;
5187 		ctx.del = 1;
5188 		ctx.rc = 0;
5189 		/*
5190 		 * Unlike other drivers, we accumulate list of pointers into
5191 		 * interface address lists and we need to keep it safe even
5192 		 * after if_foreach_llmaddr() returns, thus we must enter the
5193 		 * network epoch.
5194 		 */
5195 		NET_EPOCH_ENTER(et);
5196 		if_foreach_llmaddr(ifp, add_maddr, &ctx);
5197 		if (ctx.rc < 0) {
5198 			NET_EPOCH_EXIT(et);
5199 			rc = -ctx.rc;
5200 			return (rc);
5201 		}
5202 		if (ctx.i > 0) {
5203 			rc = t4_alloc_mac_filt(sc, sc->mbox, vi->viid,
5204 			    ctx.del, ctx.i, ctx.mcaddr, NULL, &ctx.hash, 0);
5205 			NET_EPOCH_EXIT(et);
5206 			if (rc < 0) {
5207 				rc = -rc;
5208 				for (j = 0; j < ctx.i; j++) {
5209 					if_printf(ifp,
5210 					    "failed to add mc address"
5211 					    " %02x:%02x:%02x:"
5212 					    "%02x:%02x:%02x rc=%d\n",
5213 					    ctx.mcaddr[j][0], ctx.mcaddr[j][1],
5214 					    ctx.mcaddr[j][2], ctx.mcaddr[j][3],
5215 					    ctx.mcaddr[j][4], ctx.mcaddr[j][5],
5216 					    rc);
5217 				}
5218 				return (rc);
5219 			}
5220 		} else
5221 			NET_EPOCH_EXIT(et);
5222 
5223 		rc = -t4_set_addr_hash(sc, sc->mbox, vi->viid, 0, ctx.hash, 0);
5224 		if (rc != 0)
5225 			if_printf(ifp, "failed to set mc address hash: %d", rc);
5226 	}
5227 
5228 	return (rc);
5229 }
5230 
5231 /*
5232  * {begin|end}_synchronized_op must be called from the same thread.
5233  */
5234 int
5235 begin_synchronized_op(struct adapter *sc, struct vi_info *vi, int flags,
5236     char *wmesg)
5237 {
5238 	int rc, pri;
5239 
5240 #ifdef WITNESS
5241 	/* the caller thinks it's ok to sleep, but is it really? */
5242 	if (flags & SLEEP_OK)
5243 		WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL,
5244 		    "begin_synchronized_op");
5245 #endif
5246 
5247 	if (INTR_OK)
5248 		pri = PCATCH;
5249 	else
5250 		pri = 0;
5251 
5252 	ADAPTER_LOCK(sc);
5253 	for (;;) {
5254 
5255 		if (vi && IS_DOOMED(vi)) {
5256 			rc = ENXIO;
5257 			goto done;
5258 		}
5259 
5260 		if (!IS_BUSY(sc)) {
5261 			rc = 0;
5262 			break;
5263 		}
5264 
5265 		if (!(flags & SLEEP_OK)) {
5266 			rc = EBUSY;
5267 			goto done;
5268 		}
5269 
5270 		if (mtx_sleep(&sc->flags, &sc->sc_lock, pri, wmesg, 0)) {
5271 			rc = EINTR;
5272 			goto done;
5273 		}
5274 	}
5275 
5276 	KASSERT(!IS_BUSY(sc), ("%s: controller busy.", __func__));
5277 	SET_BUSY(sc);
5278 #ifdef INVARIANTS
5279 	sc->last_op = wmesg;
5280 	sc->last_op_thr = curthread;
5281 	sc->last_op_flags = flags;
5282 #endif
5283 
5284 done:
5285 	if (!(flags & HOLD_LOCK) || rc)
5286 		ADAPTER_UNLOCK(sc);
5287 
5288 	return (rc);
5289 }
5290 
5291 /*
5292  * Tell if_ioctl and if_init that the VI is going away.  This is
5293  * special variant of begin_synchronized_op and must be paired with a
5294  * call to end_synchronized_op.
5295  */
5296 void
5297 doom_vi(struct adapter *sc, struct vi_info *vi)
5298 {
5299 
5300 	ADAPTER_LOCK(sc);
5301 	SET_DOOMED(vi);
5302 	wakeup(&sc->flags);
5303 	while (IS_BUSY(sc))
5304 		mtx_sleep(&sc->flags, &sc->sc_lock, 0, "t4detach", 0);
5305 	SET_BUSY(sc);
5306 #ifdef INVARIANTS
5307 	sc->last_op = "t4detach";
5308 	sc->last_op_thr = curthread;
5309 	sc->last_op_flags = 0;
5310 #endif
5311 	ADAPTER_UNLOCK(sc);
5312 }
5313 
5314 /*
5315  * {begin|end}_synchronized_op must be called from the same thread.
5316  */
5317 void
5318 end_synchronized_op(struct adapter *sc, int flags)
5319 {
5320 
5321 	if (flags & LOCK_HELD)
5322 		ADAPTER_LOCK_ASSERT_OWNED(sc);
5323 	else
5324 		ADAPTER_LOCK(sc);
5325 
5326 	KASSERT(IS_BUSY(sc), ("%s: controller not busy.", __func__));
5327 	CLR_BUSY(sc);
5328 	wakeup(&sc->flags);
5329 	ADAPTER_UNLOCK(sc);
5330 }
5331 
5332 static int
5333 cxgbe_init_synchronized(struct vi_info *vi)
5334 {
5335 	struct port_info *pi = vi->pi;
5336 	struct adapter *sc = pi->adapter;
5337 	struct ifnet *ifp = vi->ifp;
5338 	int rc = 0, i;
5339 	struct sge_txq *txq;
5340 
5341 	ASSERT_SYNCHRONIZED_OP(sc);
5342 
5343 	if (ifp->if_drv_flags & IFF_DRV_RUNNING)
5344 		return (0);	/* already running */
5345 
5346 	if (!(sc->flags & FULL_INIT_DONE) &&
5347 	    ((rc = adapter_full_init(sc)) != 0))
5348 		return (rc);	/* error message displayed already */
5349 
5350 	if (!(vi->flags & VI_INIT_DONE) &&
5351 	    ((rc = vi_full_init(vi)) != 0))
5352 		return (rc); /* error message displayed already */
5353 
5354 	rc = update_mac_settings(ifp, XGMAC_ALL);
5355 	if (rc)
5356 		goto done;	/* error message displayed already */
5357 
5358 	PORT_LOCK(pi);
5359 	if (pi->up_vis == 0) {
5360 		t4_update_port_info(pi);
5361 		fixup_link_config(pi);
5362 		build_medialist(pi);
5363 		apply_link_config(pi);
5364 	}
5365 
5366 	rc = -t4_enable_vi(sc, sc->mbox, vi->viid, true, true);
5367 	if (rc != 0) {
5368 		if_printf(ifp, "enable_vi failed: %d\n", rc);
5369 		PORT_UNLOCK(pi);
5370 		goto done;
5371 	}
5372 
5373 	/*
5374 	 * Can't fail from this point onwards.  Review cxgbe_uninit_synchronized
5375 	 * if this changes.
5376 	 */
5377 
5378 	for_each_txq(vi, i, txq) {
5379 		TXQ_LOCK(txq);
5380 		txq->eq.flags |= EQ_ENABLED;
5381 		TXQ_UNLOCK(txq);
5382 	}
5383 
5384 	/*
5385 	 * The first iq of the first port to come up is used for tracing.
5386 	 */
5387 	if (sc->traceq < 0 && IS_MAIN_VI(vi)) {
5388 		sc->traceq = sc->sge.rxq[vi->first_rxq].iq.abs_id;
5389 		t4_write_reg(sc, is_t4(sc) ?  A_MPS_TRC_RSS_CONTROL :
5390 		    A_MPS_T5_TRC_RSS_CONTROL, V_RSSCONTROL(pi->tx_chan) |
5391 		    V_QUEUENUMBER(sc->traceq));
5392 		pi->flags |= HAS_TRACEQ;
5393 	}
5394 
5395 	/* all ok */
5396 	pi->up_vis++;
5397 	ifp->if_drv_flags |= IFF_DRV_RUNNING;
5398 
5399 	if (pi->nvi > 1 || sc->flags & IS_VF)
5400 		callout_reset(&vi->tick, hz, vi_tick, vi);
5401 	else
5402 		callout_reset(&pi->tick, hz, cxgbe_tick, pi);
5403 	if (pi->link_cfg.link_ok)
5404 		t4_os_link_changed(pi);
5405 	PORT_UNLOCK(pi);
5406 done:
5407 	if (rc != 0)
5408 		cxgbe_uninit_synchronized(vi);
5409 
5410 	return (rc);
5411 }
5412 
5413 /*
5414  * Idempotent.
5415  */
5416 static int
5417 cxgbe_uninit_synchronized(struct vi_info *vi)
5418 {
5419 	struct port_info *pi = vi->pi;
5420 	struct adapter *sc = pi->adapter;
5421 	struct ifnet *ifp = vi->ifp;
5422 	int rc, i;
5423 	struct sge_txq *txq;
5424 
5425 	ASSERT_SYNCHRONIZED_OP(sc);
5426 
5427 	if (!(vi->flags & VI_INIT_DONE)) {
5428 		if (__predict_false(ifp->if_drv_flags & IFF_DRV_RUNNING)) {
5429 			KASSERT(0, ("uninited VI is running"));
5430 			if_printf(ifp, "uninited VI with running ifnet.  "
5431 			    "vi->flags 0x%016lx, if_flags 0x%08x, "
5432 			    "if_drv_flags 0x%08x\n", vi->flags, ifp->if_flags,
5433 			    ifp->if_drv_flags);
5434 		}
5435 		return (0);
5436 	}
5437 
5438 	/*
5439 	 * Disable the VI so that all its data in either direction is discarded
5440 	 * by the MPS.  Leave everything else (the queues, interrupts, and 1Hz
5441 	 * tick) intact as the TP can deliver negative advice or data that it's
5442 	 * holding in its RAM (for an offloaded connection) even after the VI is
5443 	 * disabled.
5444 	 */
5445 	rc = -t4_enable_vi(sc, sc->mbox, vi->viid, false, false);
5446 	if (rc) {
5447 		if_printf(ifp, "disable_vi failed: %d\n", rc);
5448 		return (rc);
5449 	}
5450 
5451 	for_each_txq(vi, i, txq) {
5452 		TXQ_LOCK(txq);
5453 		txq->eq.flags &= ~EQ_ENABLED;
5454 		TXQ_UNLOCK(txq);
5455 	}
5456 
5457 	PORT_LOCK(pi);
5458 	if (pi->nvi > 1 || sc->flags & IS_VF)
5459 		callout_stop(&vi->tick);
5460 	else
5461 		callout_stop(&pi->tick);
5462 	if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) {
5463 		PORT_UNLOCK(pi);
5464 		return (0);
5465 	}
5466 	ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
5467 	pi->up_vis--;
5468 	if (pi->up_vis > 0) {
5469 		PORT_UNLOCK(pi);
5470 		return (0);
5471 	}
5472 
5473 	pi->link_cfg.link_ok = false;
5474 	pi->link_cfg.speed = 0;
5475 	pi->link_cfg.link_down_rc = 255;
5476 	t4_os_link_changed(pi);
5477 	PORT_UNLOCK(pi);
5478 
5479 	return (0);
5480 }
5481 
5482 /*
5483  * It is ok for this function to fail midway and return right away.  t4_detach
5484  * will walk the entire sc->irq list and clean up whatever is valid.
5485  */
5486 int
5487 t4_setup_intr_handlers(struct adapter *sc)
5488 {
5489 	int rc, rid, p, q, v;
5490 	char s[8];
5491 	struct irq *irq;
5492 	struct port_info *pi;
5493 	struct vi_info *vi;
5494 	struct sge *sge = &sc->sge;
5495 	struct sge_rxq *rxq;
5496 #ifdef TCP_OFFLOAD
5497 	struct sge_ofld_rxq *ofld_rxq;
5498 #endif
5499 #ifdef DEV_NETMAP
5500 	struct sge_nm_rxq *nm_rxq;
5501 #endif
5502 #ifdef RSS
5503 	int nbuckets = rss_getnumbuckets();
5504 #endif
5505 
5506 	/*
5507 	 * Setup interrupts.
5508 	 */
5509 	irq = &sc->irq[0];
5510 	rid = sc->intr_type == INTR_INTX ? 0 : 1;
5511 	if (forwarding_intr_to_fwq(sc))
5512 		return (t4_alloc_irq(sc, irq, rid, t4_intr_all, sc, "all"));
5513 
5514 	/* Multiple interrupts. */
5515 	if (sc->flags & IS_VF)
5516 		KASSERT(sc->intr_count >= T4VF_EXTRA_INTR + sc->params.nports,
5517 		    ("%s: too few intr.", __func__));
5518 	else
5519 		KASSERT(sc->intr_count >= T4_EXTRA_INTR + sc->params.nports,
5520 		    ("%s: too few intr.", __func__));
5521 
5522 	/* The first one is always error intr on PFs */
5523 	if (!(sc->flags & IS_VF)) {
5524 		rc = t4_alloc_irq(sc, irq, rid, t4_intr_err, sc, "err");
5525 		if (rc != 0)
5526 			return (rc);
5527 		irq++;
5528 		rid++;
5529 	}
5530 
5531 	/* The second one is always the firmware event queue (first on VFs) */
5532 	rc = t4_alloc_irq(sc, irq, rid, t4_intr_evt, &sge->fwq, "evt");
5533 	if (rc != 0)
5534 		return (rc);
5535 	irq++;
5536 	rid++;
5537 
5538 	for_each_port(sc, p) {
5539 		pi = sc->port[p];
5540 		for_each_vi(pi, v, vi) {
5541 			vi->first_intr = rid - 1;
5542 
5543 			if (vi->nnmrxq > 0) {
5544 				int n = max(vi->nrxq, vi->nnmrxq);
5545 
5546 				rxq = &sge->rxq[vi->first_rxq];
5547 #ifdef DEV_NETMAP
5548 				nm_rxq = &sge->nm_rxq[vi->first_nm_rxq];
5549 #endif
5550 				for (q = 0; q < n; q++) {
5551 					snprintf(s, sizeof(s), "%x%c%x", p,
5552 					    'a' + v, q);
5553 					if (q < vi->nrxq)
5554 						irq->rxq = rxq++;
5555 #ifdef DEV_NETMAP
5556 					if (q < vi->nnmrxq)
5557 						irq->nm_rxq = nm_rxq++;
5558 
5559 					if (irq->nm_rxq != NULL &&
5560 					    irq->rxq == NULL) {
5561 						/* Netmap rx only */
5562 						rc = t4_alloc_irq(sc, irq, rid,
5563 						    t4_nm_intr, irq->nm_rxq, s);
5564 					}
5565 					if (irq->nm_rxq != NULL &&
5566 					    irq->rxq != NULL) {
5567 						/* NIC and Netmap rx */
5568 						rc = t4_alloc_irq(sc, irq, rid,
5569 						    t4_vi_intr, irq, s);
5570 					}
5571 #endif
5572 					if (irq->rxq != NULL &&
5573 					    irq->nm_rxq == NULL) {
5574 						/* NIC rx only */
5575 						rc = t4_alloc_irq(sc, irq, rid,
5576 						    t4_intr, irq->rxq, s);
5577 					}
5578 					if (rc != 0)
5579 						return (rc);
5580 #ifdef RSS
5581 					if (q < vi->nrxq) {
5582 						bus_bind_intr(sc->dev, irq->res,
5583 						    rss_getcpu(q % nbuckets));
5584 					}
5585 #endif
5586 					irq++;
5587 					rid++;
5588 					vi->nintr++;
5589 				}
5590 			} else {
5591 				for_each_rxq(vi, q, rxq) {
5592 					snprintf(s, sizeof(s), "%x%c%x", p,
5593 					    'a' + v, q);
5594 					rc = t4_alloc_irq(sc, irq, rid,
5595 					    t4_intr, rxq, s);
5596 					if (rc != 0)
5597 						return (rc);
5598 #ifdef RSS
5599 					bus_bind_intr(sc->dev, irq->res,
5600 					    rss_getcpu(q % nbuckets));
5601 #endif
5602 					irq++;
5603 					rid++;
5604 					vi->nintr++;
5605 				}
5606 			}
5607 #ifdef TCP_OFFLOAD
5608 			for_each_ofld_rxq(vi, q, ofld_rxq) {
5609 				snprintf(s, sizeof(s), "%x%c%x", p, 'A' + v, q);
5610 				rc = t4_alloc_irq(sc, irq, rid, t4_intr,
5611 				    ofld_rxq, s);
5612 				if (rc != 0)
5613 					return (rc);
5614 				irq++;
5615 				rid++;
5616 				vi->nintr++;
5617 			}
5618 #endif
5619 		}
5620 	}
5621 	MPASS(irq == &sc->irq[sc->intr_count]);
5622 
5623 	return (0);
5624 }
5625 
5626 int
5627 adapter_full_init(struct adapter *sc)
5628 {
5629 	int rc, i;
5630 #ifdef RSS
5631 	uint32_t raw_rss_key[RSS_KEYSIZE / sizeof(uint32_t)];
5632 	uint32_t rss_key[RSS_KEYSIZE / sizeof(uint32_t)];
5633 #endif
5634 
5635 	ASSERT_SYNCHRONIZED_OP(sc);
5636 	ADAPTER_LOCK_ASSERT_NOTOWNED(sc);
5637 	KASSERT((sc->flags & FULL_INIT_DONE) == 0,
5638 	    ("%s: FULL_INIT_DONE already", __func__));
5639 
5640 	/*
5641 	 * queues that belong to the adapter (not any particular port).
5642 	 */
5643 	rc = t4_setup_adapter_queues(sc);
5644 	if (rc != 0)
5645 		goto done;
5646 
5647 	for (i = 0; i < nitems(sc->tq); i++) {
5648 		sc->tq[i] = taskqueue_create("t4 taskq", M_NOWAIT,
5649 		    taskqueue_thread_enqueue, &sc->tq[i]);
5650 		if (sc->tq[i] == NULL) {
5651 			device_printf(sc->dev,
5652 			    "failed to allocate task queue %d\n", i);
5653 			rc = ENOMEM;
5654 			goto done;
5655 		}
5656 		taskqueue_start_threads(&sc->tq[i], 1, PI_NET, "%s tq%d",
5657 		    device_get_nameunit(sc->dev), i);
5658 	}
5659 #ifdef RSS
5660 	MPASS(RSS_KEYSIZE == 40);
5661 	rss_getkey((void *)&raw_rss_key[0]);
5662 	for (i = 0; i < nitems(rss_key); i++) {
5663 		rss_key[i] = htobe32(raw_rss_key[nitems(rss_key) - 1 - i]);
5664 	}
5665 	t4_write_rss_key(sc, &rss_key[0], -1, 1);
5666 #endif
5667 
5668 	if (!(sc->flags & IS_VF))
5669 		t4_intr_enable(sc);
5670 #ifdef KERN_TLS
5671 	if (sc->flags & KERN_TLS_OK)
5672 		callout_reset_sbt(&sc->ktls_tick, SBT_1MS, 0, ktls_tick, sc,
5673 		    C_HARDCLOCK);
5674 #endif
5675 	sc->flags |= FULL_INIT_DONE;
5676 done:
5677 	if (rc != 0)
5678 		adapter_full_uninit(sc);
5679 
5680 	return (rc);
5681 }
5682 
5683 int
5684 adapter_full_uninit(struct adapter *sc)
5685 {
5686 	int i;
5687 
5688 	ADAPTER_LOCK_ASSERT_NOTOWNED(sc);
5689 
5690 	t4_teardown_adapter_queues(sc);
5691 
5692 	for (i = 0; i < nitems(sc->tq) && sc->tq[i]; i++) {
5693 		taskqueue_free(sc->tq[i]);
5694 		sc->tq[i] = NULL;
5695 	}
5696 
5697 	sc->flags &= ~FULL_INIT_DONE;
5698 
5699 	return (0);
5700 }
5701 
5702 #ifdef RSS
5703 #define SUPPORTED_RSS_HASHTYPES (RSS_HASHTYPE_RSS_IPV4 | \
5704     RSS_HASHTYPE_RSS_TCP_IPV4 | RSS_HASHTYPE_RSS_IPV6 | \
5705     RSS_HASHTYPE_RSS_TCP_IPV6 | RSS_HASHTYPE_RSS_UDP_IPV4 | \
5706     RSS_HASHTYPE_RSS_UDP_IPV6)
5707 
5708 /* Translates kernel hash types to hardware. */
5709 static int
5710 hashconfig_to_hashen(int hashconfig)
5711 {
5712 	int hashen = 0;
5713 
5714 	if (hashconfig & RSS_HASHTYPE_RSS_IPV4)
5715 		hashen |= F_FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN;
5716 	if (hashconfig & RSS_HASHTYPE_RSS_IPV6)
5717 		hashen |= F_FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN;
5718 	if (hashconfig & RSS_HASHTYPE_RSS_UDP_IPV4) {
5719 		hashen |= F_FW_RSS_VI_CONFIG_CMD_UDPEN |
5720 		    F_FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN;
5721 	}
5722 	if (hashconfig & RSS_HASHTYPE_RSS_UDP_IPV6) {
5723 		hashen |= F_FW_RSS_VI_CONFIG_CMD_UDPEN |
5724 		    F_FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN;
5725 	}
5726 	if (hashconfig & RSS_HASHTYPE_RSS_TCP_IPV4)
5727 		hashen |= F_FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN;
5728 	if (hashconfig & RSS_HASHTYPE_RSS_TCP_IPV6)
5729 		hashen |= F_FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN;
5730 
5731 	return (hashen);
5732 }
5733 
5734 /* Translates hardware hash types to kernel. */
5735 static int
5736 hashen_to_hashconfig(int hashen)
5737 {
5738 	int hashconfig = 0;
5739 
5740 	if (hashen & F_FW_RSS_VI_CONFIG_CMD_UDPEN) {
5741 		/*
5742 		 * If UDP hashing was enabled it must have been enabled for
5743 		 * either IPv4 or IPv6 (inclusive or).  Enabling UDP without
5744 		 * enabling any 4-tuple hash is nonsense configuration.
5745 		 */
5746 		MPASS(hashen & (F_FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN |
5747 		    F_FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN));
5748 
5749 		if (hashen & F_FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN)
5750 			hashconfig |= RSS_HASHTYPE_RSS_UDP_IPV4;
5751 		if (hashen & F_FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN)
5752 			hashconfig |= RSS_HASHTYPE_RSS_UDP_IPV6;
5753 	}
5754 	if (hashen & F_FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN)
5755 		hashconfig |= RSS_HASHTYPE_RSS_TCP_IPV4;
5756 	if (hashen & F_FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN)
5757 		hashconfig |= RSS_HASHTYPE_RSS_TCP_IPV6;
5758 	if (hashen & F_FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN)
5759 		hashconfig |= RSS_HASHTYPE_RSS_IPV4;
5760 	if (hashen & F_FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN)
5761 		hashconfig |= RSS_HASHTYPE_RSS_IPV6;
5762 
5763 	return (hashconfig);
5764 }
5765 #endif
5766 
5767 int
5768 vi_full_init(struct vi_info *vi)
5769 {
5770 	struct adapter *sc = vi->adapter;
5771 	struct ifnet *ifp = vi->ifp;
5772 	uint16_t *rss;
5773 	struct sge_rxq *rxq;
5774 	int rc, i, j;
5775 #ifdef RSS
5776 	int nbuckets = rss_getnumbuckets();
5777 	int hashconfig = rss_gethashconfig();
5778 	int extra;
5779 #endif
5780 
5781 	ASSERT_SYNCHRONIZED_OP(sc);
5782 	KASSERT((vi->flags & VI_INIT_DONE) == 0,
5783 	    ("%s: VI_INIT_DONE already", __func__));
5784 
5785 	sysctl_ctx_init(&vi->ctx);
5786 	vi->flags |= VI_SYSCTL_CTX;
5787 
5788 	/*
5789 	 * Allocate tx/rx/fl queues for this VI.
5790 	 */
5791 	rc = t4_setup_vi_queues(vi);
5792 	if (rc != 0)
5793 		goto done;	/* error message displayed already */
5794 
5795 	/*
5796 	 * Setup RSS for this VI.  Save a copy of the RSS table for later use.
5797 	 */
5798 	if (vi->nrxq > vi->rss_size) {
5799 		if_printf(ifp, "nrxq (%d) > hw RSS table size (%d); "
5800 		    "some queues will never receive traffic.\n", vi->nrxq,
5801 		    vi->rss_size);
5802 	} else if (vi->rss_size % vi->nrxq) {
5803 		if_printf(ifp, "nrxq (%d), hw RSS table size (%d); "
5804 		    "expect uneven traffic distribution.\n", vi->nrxq,
5805 		    vi->rss_size);
5806 	}
5807 #ifdef RSS
5808 	if (vi->nrxq != nbuckets) {
5809 		if_printf(ifp, "nrxq (%d) != kernel RSS buckets (%d);"
5810 		    "performance will be impacted.\n", vi->nrxq, nbuckets);
5811 	}
5812 #endif
5813 	rss = malloc(vi->rss_size * sizeof (*rss), M_CXGBE, M_ZERO | M_WAITOK);
5814 	for (i = 0; i < vi->rss_size;) {
5815 #ifdef RSS
5816 		j = rss_get_indirection_to_bucket(i);
5817 		j %= vi->nrxq;
5818 		rxq = &sc->sge.rxq[vi->first_rxq + j];
5819 		rss[i++] = rxq->iq.abs_id;
5820 #else
5821 		for_each_rxq(vi, j, rxq) {
5822 			rss[i++] = rxq->iq.abs_id;
5823 			if (i == vi->rss_size)
5824 				break;
5825 		}
5826 #endif
5827 	}
5828 
5829 	rc = -t4_config_rss_range(sc, sc->mbox, vi->viid, 0, vi->rss_size, rss,
5830 	    vi->rss_size);
5831 	if (rc != 0) {
5832 		free(rss, M_CXGBE);
5833 		if_printf(ifp, "rss_config failed: %d\n", rc);
5834 		goto done;
5835 	}
5836 
5837 #ifdef RSS
5838 	vi->hashen = hashconfig_to_hashen(hashconfig);
5839 
5840 	/*
5841 	 * We may have had to enable some hashes even though the global config
5842 	 * wants them disabled.  This is a potential problem that must be
5843 	 * reported to the user.
5844 	 */
5845 	extra = hashen_to_hashconfig(vi->hashen) ^ hashconfig;
5846 
5847 	/*
5848 	 * If we consider only the supported hash types, then the enabled hashes
5849 	 * are a superset of the requested hashes.  In other words, there cannot
5850 	 * be any supported hash that was requested but not enabled, but there
5851 	 * can be hashes that were not requested but had to be enabled.
5852 	 */
5853 	extra &= SUPPORTED_RSS_HASHTYPES;
5854 	MPASS((extra & hashconfig) == 0);
5855 
5856 	if (extra) {
5857 		if_printf(ifp,
5858 		    "global RSS config (0x%x) cannot be accommodated.\n",
5859 		    hashconfig);
5860 	}
5861 	if (extra & RSS_HASHTYPE_RSS_IPV4)
5862 		if_printf(ifp, "IPv4 2-tuple hashing forced on.\n");
5863 	if (extra & RSS_HASHTYPE_RSS_TCP_IPV4)
5864 		if_printf(ifp, "TCP/IPv4 4-tuple hashing forced on.\n");
5865 	if (extra & RSS_HASHTYPE_RSS_IPV6)
5866 		if_printf(ifp, "IPv6 2-tuple hashing forced on.\n");
5867 	if (extra & RSS_HASHTYPE_RSS_TCP_IPV6)
5868 		if_printf(ifp, "TCP/IPv6 4-tuple hashing forced on.\n");
5869 	if (extra & RSS_HASHTYPE_RSS_UDP_IPV4)
5870 		if_printf(ifp, "UDP/IPv4 4-tuple hashing forced on.\n");
5871 	if (extra & RSS_HASHTYPE_RSS_UDP_IPV6)
5872 		if_printf(ifp, "UDP/IPv6 4-tuple hashing forced on.\n");
5873 #else
5874 	vi->hashen = F_FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN |
5875 	    F_FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN |
5876 	    F_FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN |
5877 	    F_FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN | F_FW_RSS_VI_CONFIG_CMD_UDPEN;
5878 #endif
5879 	rc = -t4_config_vi_rss(sc, sc->mbox, vi->viid, vi->hashen, rss[0], 0, 0);
5880 	if (rc != 0) {
5881 		free(rss, M_CXGBE);
5882 		if_printf(ifp, "rss hash/defaultq config failed: %d\n", rc);
5883 		goto done;
5884 	}
5885 
5886 	vi->rss = rss;
5887 	vi->flags |= VI_INIT_DONE;
5888 done:
5889 	if (rc != 0)
5890 		vi_full_uninit(vi);
5891 
5892 	return (rc);
5893 }
5894 
5895 /*
5896  * Idempotent.
5897  */
5898 int
5899 vi_full_uninit(struct vi_info *vi)
5900 {
5901 	struct port_info *pi = vi->pi;
5902 	struct adapter *sc = pi->adapter;
5903 	int i;
5904 	struct sge_rxq *rxq;
5905 	struct sge_txq *txq;
5906 #ifdef TCP_OFFLOAD
5907 	struct sge_ofld_rxq *ofld_rxq;
5908 #endif
5909 #if defined(TCP_OFFLOAD) || defined(RATELIMIT)
5910 	struct sge_wrq *ofld_txq;
5911 #endif
5912 
5913 	if (vi->flags & VI_INIT_DONE) {
5914 
5915 		/* Need to quiesce queues.  */
5916 
5917 		/* XXX: Only for the first VI? */
5918 		if (IS_MAIN_VI(vi) && !(sc->flags & IS_VF))
5919 			quiesce_wrq(sc, &sc->sge.ctrlq[pi->port_id]);
5920 
5921 		for_each_txq(vi, i, txq) {
5922 			quiesce_txq(sc, txq);
5923 		}
5924 
5925 #if defined(TCP_OFFLOAD) || defined(RATELIMIT)
5926 		for_each_ofld_txq(vi, i, ofld_txq) {
5927 			quiesce_wrq(sc, ofld_txq);
5928 		}
5929 #endif
5930 
5931 		for_each_rxq(vi, i, rxq) {
5932 			quiesce_iq(sc, &rxq->iq);
5933 			quiesce_fl(sc, &rxq->fl);
5934 		}
5935 
5936 #ifdef TCP_OFFLOAD
5937 		for_each_ofld_rxq(vi, i, ofld_rxq) {
5938 			quiesce_iq(sc, &ofld_rxq->iq);
5939 			quiesce_fl(sc, &ofld_rxq->fl);
5940 		}
5941 #endif
5942 		free(vi->rss, M_CXGBE);
5943 		free(vi->nm_rss, M_CXGBE);
5944 	}
5945 
5946 	t4_teardown_vi_queues(vi);
5947 	vi->flags &= ~VI_INIT_DONE;
5948 
5949 	return (0);
5950 }
5951 
5952 static void
5953 quiesce_txq(struct adapter *sc, struct sge_txq *txq)
5954 {
5955 	struct sge_eq *eq = &txq->eq;
5956 	struct sge_qstat *spg = (void *)&eq->desc[eq->sidx];
5957 
5958 	(void) sc;	/* unused */
5959 
5960 #ifdef INVARIANTS
5961 	TXQ_LOCK(txq);
5962 	MPASS((eq->flags & EQ_ENABLED) == 0);
5963 	TXQ_UNLOCK(txq);
5964 #endif
5965 
5966 	/* Wait for the mp_ring to empty. */
5967 	while (!mp_ring_is_idle(txq->r)) {
5968 		mp_ring_check_drainage(txq->r, 0);
5969 		pause("rquiesce", 1);
5970 	}
5971 
5972 	/* Then wait for the hardware to finish. */
5973 	while (spg->cidx != htobe16(eq->pidx))
5974 		pause("equiesce", 1);
5975 
5976 	/* Finally, wait for the driver to reclaim all descriptors. */
5977 	while (eq->cidx != eq->pidx)
5978 		pause("dquiesce", 1);
5979 }
5980 
5981 static void
5982 quiesce_wrq(struct adapter *sc, struct sge_wrq *wrq)
5983 {
5984 
5985 	/* XXXTX */
5986 }
5987 
5988 static void
5989 quiesce_iq(struct adapter *sc, struct sge_iq *iq)
5990 {
5991 	(void) sc;	/* unused */
5992 
5993 	/* Synchronize with the interrupt handler */
5994 	while (!atomic_cmpset_int(&iq->state, IQS_IDLE, IQS_DISABLED))
5995 		pause("iqfree", 1);
5996 }
5997 
5998 static void
5999 quiesce_fl(struct adapter *sc, struct sge_fl *fl)
6000 {
6001 	mtx_lock(&sc->sfl_lock);
6002 	FL_LOCK(fl);
6003 	fl->flags |= FL_DOOMED;
6004 	FL_UNLOCK(fl);
6005 	callout_stop(&sc->sfl_callout);
6006 	mtx_unlock(&sc->sfl_lock);
6007 
6008 	KASSERT((fl->flags & FL_STARVING) == 0,
6009 	    ("%s: still starving", __func__));
6010 }
6011 
6012 static int
6013 t4_alloc_irq(struct adapter *sc, struct irq *irq, int rid,
6014     driver_intr_t *handler, void *arg, char *name)
6015 {
6016 	int rc;
6017 
6018 	irq->rid = rid;
6019 	irq->res = bus_alloc_resource_any(sc->dev, SYS_RES_IRQ, &irq->rid,
6020 	    RF_SHAREABLE | RF_ACTIVE);
6021 	if (irq->res == NULL) {
6022 		device_printf(sc->dev,
6023 		    "failed to allocate IRQ for rid %d, name %s.\n", rid, name);
6024 		return (ENOMEM);
6025 	}
6026 
6027 	rc = bus_setup_intr(sc->dev, irq->res, INTR_MPSAFE | INTR_TYPE_NET,
6028 	    NULL, handler, arg, &irq->tag);
6029 	if (rc != 0) {
6030 		device_printf(sc->dev,
6031 		    "failed to setup interrupt for rid %d, name %s: %d\n",
6032 		    rid, name, rc);
6033 	} else if (name)
6034 		bus_describe_intr(sc->dev, irq->res, irq->tag, "%s", name);
6035 
6036 	return (rc);
6037 }
6038 
6039 static int
6040 t4_free_irq(struct adapter *sc, struct irq *irq)
6041 {
6042 	if (irq->tag)
6043 		bus_teardown_intr(sc->dev, irq->res, irq->tag);
6044 	if (irq->res)
6045 		bus_release_resource(sc->dev, SYS_RES_IRQ, irq->rid, irq->res);
6046 
6047 	bzero(irq, sizeof(*irq));
6048 
6049 	return (0);
6050 }
6051 
6052 static void
6053 get_regs(struct adapter *sc, struct t4_regdump *regs, uint8_t *buf)
6054 {
6055 
6056 	regs->version = chip_id(sc) | chip_rev(sc) << 10;
6057 	t4_get_regs(sc, buf, regs->len);
6058 }
6059 
6060 #define	A_PL_INDIR_CMD	0x1f8
6061 
6062 #define	S_PL_AUTOINC	31
6063 #define	M_PL_AUTOINC	0x1U
6064 #define	V_PL_AUTOINC(x)	((x) << S_PL_AUTOINC)
6065 #define	G_PL_AUTOINC(x)	(((x) >> S_PL_AUTOINC) & M_PL_AUTOINC)
6066 
6067 #define	S_PL_VFID	20
6068 #define	M_PL_VFID	0xffU
6069 #define	V_PL_VFID(x)	((x) << S_PL_VFID)
6070 #define	G_PL_VFID(x)	(((x) >> S_PL_VFID) & M_PL_VFID)
6071 
6072 #define	S_PL_ADDR	0
6073 #define	M_PL_ADDR	0xfffffU
6074 #define	V_PL_ADDR(x)	((x) << S_PL_ADDR)
6075 #define	G_PL_ADDR(x)	(((x) >> S_PL_ADDR) & M_PL_ADDR)
6076 
6077 #define	A_PL_INDIR_DATA	0x1fc
6078 
6079 static uint64_t
6080 read_vf_stat(struct adapter *sc, u_int vin, int reg)
6081 {
6082 	u32 stats[2];
6083 
6084 	mtx_assert(&sc->reg_lock, MA_OWNED);
6085 	if (sc->flags & IS_VF) {
6086 		stats[0] = t4_read_reg(sc, VF_MPS_REG(reg));
6087 		stats[1] = t4_read_reg(sc, VF_MPS_REG(reg + 4));
6088 	} else {
6089 		t4_write_reg(sc, A_PL_INDIR_CMD, V_PL_AUTOINC(1) |
6090 		    V_PL_VFID(vin) | V_PL_ADDR(VF_MPS_REG(reg)));
6091 		stats[0] = t4_read_reg(sc, A_PL_INDIR_DATA);
6092 		stats[1] = t4_read_reg(sc, A_PL_INDIR_DATA);
6093 	}
6094 	return (((uint64_t)stats[1]) << 32 | stats[0]);
6095 }
6096 
6097 static void
6098 t4_get_vi_stats(struct adapter *sc, u_int vin, struct fw_vi_stats_vf *stats)
6099 {
6100 
6101 #define GET_STAT(name) \
6102 	read_vf_stat(sc, vin, A_MPS_VF_STAT_##name##_L)
6103 
6104 	stats->tx_bcast_bytes    = GET_STAT(TX_VF_BCAST_BYTES);
6105 	stats->tx_bcast_frames   = GET_STAT(TX_VF_BCAST_FRAMES);
6106 	stats->tx_mcast_bytes    = GET_STAT(TX_VF_MCAST_BYTES);
6107 	stats->tx_mcast_frames   = GET_STAT(TX_VF_MCAST_FRAMES);
6108 	stats->tx_ucast_bytes    = GET_STAT(TX_VF_UCAST_BYTES);
6109 	stats->tx_ucast_frames   = GET_STAT(TX_VF_UCAST_FRAMES);
6110 	stats->tx_drop_frames    = GET_STAT(TX_VF_DROP_FRAMES);
6111 	stats->tx_offload_bytes  = GET_STAT(TX_VF_OFFLOAD_BYTES);
6112 	stats->tx_offload_frames = GET_STAT(TX_VF_OFFLOAD_FRAMES);
6113 	stats->rx_bcast_bytes    = GET_STAT(RX_VF_BCAST_BYTES);
6114 	stats->rx_bcast_frames   = GET_STAT(RX_VF_BCAST_FRAMES);
6115 	stats->rx_mcast_bytes    = GET_STAT(RX_VF_MCAST_BYTES);
6116 	stats->rx_mcast_frames   = GET_STAT(RX_VF_MCAST_FRAMES);
6117 	stats->rx_ucast_bytes    = GET_STAT(RX_VF_UCAST_BYTES);
6118 	stats->rx_ucast_frames   = GET_STAT(RX_VF_UCAST_FRAMES);
6119 	stats->rx_err_frames     = GET_STAT(RX_VF_ERR_FRAMES);
6120 
6121 #undef GET_STAT
6122 }
6123 
6124 static void
6125 t4_clr_vi_stats(struct adapter *sc, u_int vin)
6126 {
6127 	int reg;
6128 
6129 	t4_write_reg(sc, A_PL_INDIR_CMD, V_PL_AUTOINC(1) | V_PL_VFID(vin) |
6130 	    V_PL_ADDR(VF_MPS_REG(A_MPS_VF_STAT_TX_VF_BCAST_BYTES_L)));
6131 	for (reg = A_MPS_VF_STAT_TX_VF_BCAST_BYTES_L;
6132 	     reg <= A_MPS_VF_STAT_RX_VF_ERR_FRAMES_H; reg += 4)
6133 		t4_write_reg(sc, A_PL_INDIR_DATA, 0);
6134 }
6135 
6136 static void
6137 vi_refresh_stats(struct adapter *sc, struct vi_info *vi)
6138 {
6139 	struct timeval tv;
6140 	const struct timeval interval = {0, 250000};	/* 250ms */
6141 
6142 	if (!(vi->flags & VI_INIT_DONE))
6143 		return;
6144 
6145 	getmicrotime(&tv);
6146 	timevalsub(&tv, &interval);
6147 	if (timevalcmp(&tv, &vi->last_refreshed, <))
6148 		return;
6149 
6150 	mtx_lock(&sc->reg_lock);
6151 	t4_get_vi_stats(sc, vi->vin, &vi->stats);
6152 	getmicrotime(&vi->last_refreshed);
6153 	mtx_unlock(&sc->reg_lock);
6154 }
6155 
6156 static void
6157 cxgbe_refresh_stats(struct adapter *sc, struct port_info *pi)
6158 {
6159 	u_int i, v, tnl_cong_drops, chan_map;
6160 	struct timeval tv;
6161 	const struct timeval interval = {0, 250000};	/* 250ms */
6162 
6163 	getmicrotime(&tv);
6164 	timevalsub(&tv, &interval);
6165 	if (timevalcmp(&tv, &pi->last_refreshed, <))
6166 		return;
6167 
6168 	tnl_cong_drops = 0;
6169 	t4_get_port_stats(sc, pi->tx_chan, &pi->stats);
6170 	chan_map = pi->rx_e_chan_map;
6171 	while (chan_map) {
6172 		i = ffs(chan_map) - 1;
6173 		mtx_lock(&sc->reg_lock);
6174 		t4_read_indirect(sc, A_TP_MIB_INDEX, A_TP_MIB_DATA, &v, 1,
6175 		    A_TP_MIB_TNL_CNG_DROP_0 + i);
6176 		mtx_unlock(&sc->reg_lock);
6177 		tnl_cong_drops += v;
6178 		chan_map &= ~(1 << i);
6179 	}
6180 	pi->tnl_cong_drops = tnl_cong_drops;
6181 	getmicrotime(&pi->last_refreshed);
6182 }
6183 
6184 static void
6185 cxgbe_tick(void *arg)
6186 {
6187 	struct port_info *pi = arg;
6188 	struct adapter *sc = pi->adapter;
6189 
6190 	PORT_LOCK_ASSERT_OWNED(pi);
6191 	cxgbe_refresh_stats(sc, pi);
6192 
6193 	callout_schedule(&pi->tick, hz);
6194 }
6195 
6196 void
6197 vi_tick(void *arg)
6198 {
6199 	struct vi_info *vi = arg;
6200 	struct adapter *sc = vi->adapter;
6201 
6202 	vi_refresh_stats(sc, vi);
6203 
6204 	callout_schedule(&vi->tick, hz);
6205 }
6206 
6207 /*
6208  * Should match fw_caps_config_<foo> enums in t4fw_interface.h
6209  */
6210 static char *caps_decoder[] = {
6211 	"\20\001IPMI\002NCSI",				/* 0: NBM */
6212 	"\20\001PPP\002QFC\003DCBX",			/* 1: link */
6213 	"\20\001INGRESS\002EGRESS",			/* 2: switch */
6214 	"\20\001NIC\002VM\003IDS\004UM\005UM_ISGL"	/* 3: NIC */
6215 	    "\006HASHFILTER\007ETHOFLD",
6216 	"\20\001TOE",					/* 4: TOE */
6217 	"\20\001RDDP\002RDMAC",				/* 5: RDMA */
6218 	"\20\001INITIATOR_PDU\002TARGET_PDU"		/* 6: iSCSI */
6219 	    "\003INITIATOR_CNXOFLD\004TARGET_CNXOFLD"
6220 	    "\005INITIATOR_SSNOFLD\006TARGET_SSNOFLD"
6221 	    "\007T10DIF"
6222 	    "\010INITIATOR_CMDOFLD\011TARGET_CMDOFLD",
6223 	"\20\001LOOKASIDE\002TLSKEYS",			/* 7: Crypto */
6224 	"\20\001INITIATOR\002TARGET\003CTRL_OFLD"	/* 8: FCoE */
6225 		    "\004PO_INITIATOR\005PO_TARGET",
6226 };
6227 
6228 void
6229 t4_sysctls(struct adapter *sc)
6230 {
6231 	struct sysctl_ctx_list *ctx;
6232 	struct sysctl_oid *oid;
6233 	struct sysctl_oid_list *children, *c0;
6234 	static char *doorbells = {"\20\1UDB\2WCWR\3UDBWC\4KDB"};
6235 
6236 	ctx = device_get_sysctl_ctx(sc->dev);
6237 
6238 	/*
6239 	 * dev.t4nex.X.
6240 	 */
6241 	oid = device_get_sysctl_tree(sc->dev);
6242 	c0 = children = SYSCTL_CHILDREN(oid);
6243 
6244 	sc->sc_do_rxcopy = 1;
6245 	SYSCTL_ADD_INT(ctx, children, OID_AUTO, "do_rx_copy", CTLFLAG_RW,
6246 	    &sc->sc_do_rxcopy, 1, "Do RX copy of small frames");
6247 
6248 	SYSCTL_ADD_INT(ctx, children, OID_AUTO, "nports", CTLFLAG_RD, NULL,
6249 	    sc->params.nports, "# of ports");
6250 
6251 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "doorbells",
6252 	    CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_NEEDGIANT, doorbells,
6253 	    (uintptr_t)&sc->doorbells, sysctl_bitfield_8b, "A",
6254 	    "available doorbells");
6255 
6256 	SYSCTL_ADD_INT(ctx, children, OID_AUTO, "core_clock", CTLFLAG_RD, NULL,
6257 	    sc->params.vpd.cclk, "core clock frequency (in KHz)");
6258 
6259 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "holdoff_timers",
6260 	    CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_NEEDGIANT,
6261 	    sc->params.sge.timer_val, sizeof(sc->params.sge.timer_val),
6262 	    sysctl_int_array, "A", "interrupt holdoff timer values (us)");
6263 
6264 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "holdoff_pkt_counts",
6265 	    CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_NEEDGIANT,
6266 	    sc->params.sge.counter_val, sizeof(sc->params.sge.counter_val),
6267 	    sysctl_int_array, "A", "interrupt holdoff packet counter values");
6268 
6269 	t4_sge_sysctls(sc, ctx, children);
6270 
6271 	sc->lro_timeout = 100;
6272 	SYSCTL_ADD_INT(ctx, children, OID_AUTO, "lro_timeout", CTLFLAG_RW,
6273 	    &sc->lro_timeout, 0, "lro inactive-flush timeout (in us)");
6274 
6275 	SYSCTL_ADD_INT(ctx, children, OID_AUTO, "dflags", CTLFLAG_RW,
6276 	    &sc->debug_flags, 0, "flags to enable runtime debugging");
6277 
6278 	SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "tp_version",
6279 	    CTLFLAG_RD, sc->tp_version, 0, "TP microcode version");
6280 
6281 	SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "firmware_version",
6282 	    CTLFLAG_RD, sc->fw_version, 0, "firmware version");
6283 
6284 	if (sc->flags & IS_VF)
6285 		return;
6286 
6287 	SYSCTL_ADD_INT(ctx, children, OID_AUTO, "hw_revision", CTLFLAG_RD,
6288 	    NULL, chip_rev(sc), "chip hardware revision");
6289 
6290 	SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "sn",
6291 	    CTLFLAG_RD, sc->params.vpd.sn, 0, "serial number");
6292 
6293 	SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "pn",
6294 	    CTLFLAG_RD, sc->params.vpd.pn, 0, "part number");
6295 
6296 	SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "ec",
6297 	    CTLFLAG_RD, sc->params.vpd.ec, 0, "engineering change");
6298 
6299 	SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "md_version",
6300 	    CTLFLAG_RD, sc->params.vpd.md, 0, "manufacturing diags version");
6301 
6302 	SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "na",
6303 	    CTLFLAG_RD, sc->params.vpd.na, 0, "network address");
6304 
6305 	SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "er_version", CTLFLAG_RD,
6306 	    sc->er_version, 0, "expansion ROM version");
6307 
6308 	SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "bs_version", CTLFLAG_RD,
6309 	    sc->bs_version, 0, "bootstrap firmware version");
6310 
6311 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "scfg_version", CTLFLAG_RD,
6312 	    NULL, sc->params.scfg_vers, "serial config version");
6313 
6314 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "vpd_version", CTLFLAG_RD,
6315 	    NULL, sc->params.vpd_vers, "VPD version");
6316 
6317 	SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "cf",
6318 	    CTLFLAG_RD, sc->cfg_file, 0, "configuration file");
6319 
6320 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "cfcsum", CTLFLAG_RD, NULL,
6321 	    sc->cfcsum, "config file checksum");
6322 
6323 #define SYSCTL_CAP(name, n, text) \
6324 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, #name, \
6325 	    CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_NEEDGIANT, caps_decoder[n], \
6326 	    (uintptr_t)&sc->name, sysctl_bitfield_16b, "A", \
6327 	    "available " text " capabilities")
6328 
6329 	SYSCTL_CAP(nbmcaps, 0, "NBM");
6330 	SYSCTL_CAP(linkcaps, 1, "link");
6331 	SYSCTL_CAP(switchcaps, 2, "switch");
6332 	SYSCTL_CAP(niccaps, 3, "NIC");
6333 	SYSCTL_CAP(toecaps, 4, "TCP offload");
6334 	SYSCTL_CAP(rdmacaps, 5, "RDMA");
6335 	SYSCTL_CAP(iscsicaps, 6, "iSCSI");
6336 	SYSCTL_CAP(cryptocaps, 7, "crypto");
6337 	SYSCTL_CAP(fcoecaps, 8, "FCoE");
6338 #undef SYSCTL_CAP
6339 
6340 	SYSCTL_ADD_INT(ctx, children, OID_AUTO, "nfilters", CTLFLAG_RD,
6341 	    NULL, sc->tids.nftids, "number of filters");
6342 
6343 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "temperature",
6344 	    CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_NEEDGIANT, sc, 0,
6345 	    sysctl_temperature, "I", "chip temperature (in Celsius)");
6346 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "reset_sensor",
6347 	    CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, sc, 0,
6348 	    sysctl_reset_sensor, "I", "reset the chip's temperature sensor.");
6349 
6350 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "loadavg",
6351 	    CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_NEEDGIANT, sc, 0,
6352 	    sysctl_loadavg, "A",
6353 	    "microprocessor load averages (debug firmwares only)");
6354 
6355 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "core_vdd",
6356 	    CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_NEEDGIANT, sc, 0, sysctl_vdd,
6357 	    "I", "core Vdd (in mV)");
6358 
6359 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "local_cpus",
6360 	    CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_NEEDGIANT, sc, LOCAL_CPUS,
6361 	    sysctl_cpus, "A", "local CPUs");
6362 
6363 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "intr_cpus",
6364 	    CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_NEEDGIANT, sc, INTR_CPUS,
6365 	    sysctl_cpus, "A", "preferred CPUs for interrupts");
6366 
6367 	SYSCTL_ADD_INT(ctx, children, OID_AUTO, "swintr", CTLFLAG_RW,
6368 	    &sc->swintr, 0, "software triggered interrupts");
6369 
6370 	/*
6371 	 * dev.t4nex.X.misc.  Marked CTLFLAG_SKIP to avoid information overload.
6372 	 */
6373 	oid = SYSCTL_ADD_NODE(ctx, c0, OID_AUTO, "misc",
6374 	    CTLFLAG_RD | CTLFLAG_SKIP | CTLFLAG_MPSAFE, NULL,
6375 	    "logs and miscellaneous information");
6376 	children = SYSCTL_CHILDREN(oid);
6377 
6378 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cctrl",
6379 	    CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_NEEDGIANT, sc, 0,
6380 	    sysctl_cctrl, "A", "congestion control");
6381 
6382 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_ibq_tp0",
6383 	    CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_NEEDGIANT, sc, 0,
6384 	    sysctl_cim_ibq_obq, "A", "CIM IBQ 0 (TP0)");
6385 
6386 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_ibq_tp1",
6387 	    CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_NEEDGIANT, sc, 1,
6388 	    sysctl_cim_ibq_obq, "A", "CIM IBQ 1 (TP1)");
6389 
6390 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_ibq_ulp",
6391 	    CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_NEEDGIANT, sc, 2,
6392 	    sysctl_cim_ibq_obq, "A", "CIM IBQ 2 (ULP)");
6393 
6394 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_ibq_sge0",
6395 	    CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_NEEDGIANT, sc, 3,
6396 	    sysctl_cim_ibq_obq, "A", "CIM IBQ 3 (SGE0)");
6397 
6398 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_ibq_sge1",
6399 	    CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_NEEDGIANT, sc, 4,
6400 	    sysctl_cim_ibq_obq, "A", "CIM IBQ 4 (SGE1)");
6401 
6402 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_ibq_ncsi",
6403 	    CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_NEEDGIANT, sc, 5,
6404 	    sysctl_cim_ibq_obq, "A", "CIM IBQ 5 (NCSI)");
6405 
6406 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_la",
6407 	    CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_NEEDGIANT, sc, 0,
6408 	    sysctl_cim_la, "A", "CIM logic analyzer");
6409 
6410 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_ma_la",
6411 	    CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_NEEDGIANT, sc, 0,
6412 	    sysctl_cim_ma_la, "A", "CIM MA logic analyzer");
6413 
6414 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_obq_ulp0",
6415 	    CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_NEEDGIANT, sc,
6416 	    0 + CIM_NUM_IBQ, sysctl_cim_ibq_obq, "A", "CIM OBQ 0 (ULP0)");
6417 
6418 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_obq_ulp1",
6419 	    CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_NEEDGIANT, sc,
6420 	    1 + CIM_NUM_IBQ, sysctl_cim_ibq_obq, "A", "CIM OBQ 1 (ULP1)");
6421 
6422 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_obq_ulp2",
6423 	    CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_NEEDGIANT, sc,
6424 	    2 + CIM_NUM_IBQ, sysctl_cim_ibq_obq, "A", "CIM OBQ 2 (ULP2)");
6425 
6426 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_obq_ulp3",
6427 	    CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_NEEDGIANT, sc,
6428 	    3 + CIM_NUM_IBQ, sysctl_cim_ibq_obq, "A", "CIM OBQ 3 (ULP3)");
6429 
6430 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_obq_sge",
6431 	    CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_NEEDGIANT, sc,
6432 	    4 + CIM_NUM_IBQ, sysctl_cim_ibq_obq, "A", "CIM OBQ 4 (SGE)");
6433 
6434 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_obq_ncsi",
6435 	    CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_NEEDGIANT, sc,
6436 	    5 + CIM_NUM_IBQ, sysctl_cim_ibq_obq, "A", "CIM OBQ 5 (NCSI)");
6437 
6438 	if (chip_id(sc) > CHELSIO_T4) {
6439 		SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_obq_sge0_rx",
6440 		    CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_NEEDGIANT, sc,
6441 		    6 + CIM_NUM_IBQ, sysctl_cim_ibq_obq, "A",
6442 		    "CIM OBQ 6 (SGE0-RX)");
6443 
6444 		SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_obq_sge1_rx",
6445 		    CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_NEEDGIANT, sc,
6446 		    7 + CIM_NUM_IBQ, sysctl_cim_ibq_obq, "A",
6447 		    "CIM OBQ 7 (SGE1-RX)");
6448 	}
6449 
6450 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_pif_la",
6451 	    CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_NEEDGIANT, sc, 0,
6452 	    sysctl_cim_pif_la, "A", "CIM PIF logic analyzer");
6453 
6454 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_qcfg",
6455 	    CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_NEEDGIANT, sc, 0,
6456 	    sysctl_cim_qcfg, "A", "CIM queue configuration");
6457 
6458 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cpl_stats",
6459 	    CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_NEEDGIANT, sc, 0,
6460 	    sysctl_cpl_stats, "A", "CPL statistics");
6461 
6462 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "ddp_stats",
6463 	    CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_NEEDGIANT, sc, 0,
6464 	    sysctl_ddp_stats, "A", "non-TCP DDP statistics");
6465 
6466 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "devlog",
6467 	    CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_NEEDGIANT, sc, 0,
6468 	    sysctl_devlog, "A", "firmware's device log");
6469 
6470 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "fcoe_stats",
6471 	    CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_NEEDGIANT, sc, 0,
6472 	    sysctl_fcoe_stats, "A", "FCoE statistics");
6473 
6474 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "hw_sched",
6475 	    CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_NEEDGIANT, sc, 0,
6476 	    sysctl_hw_sched, "A", "hardware scheduler ");
6477 
6478 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "l2t",
6479 	    CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_NEEDGIANT, sc, 0,
6480 	    sysctl_l2t, "A", "hardware L2 table");
6481 
6482 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "smt",
6483 	    CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_NEEDGIANT, sc, 0,
6484 	    sysctl_smt, "A", "hardware source MAC table");
6485 
6486 #ifdef INET6
6487 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "clip",
6488 	    CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_NEEDGIANT, sc, 0,
6489 	    sysctl_clip, "A", "active CLIP table entries");
6490 #endif
6491 
6492 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "lb_stats",
6493 	    CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_NEEDGIANT, sc, 0,
6494 	    sysctl_lb_stats, "A", "loopback statistics");
6495 
6496 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "meminfo",
6497 	    CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_NEEDGIANT, sc, 0,
6498 	    sysctl_meminfo, "A", "memory regions");
6499 
6500 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "mps_tcam",
6501 	    CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_NEEDGIANT, sc, 0,
6502 	    chip_id(sc) <= CHELSIO_T5 ? sysctl_mps_tcam : sysctl_mps_tcam_t6,
6503 	    "A", "MPS TCAM entries");
6504 
6505 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "path_mtus",
6506 	    CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_NEEDGIANT, sc, 0,
6507 	    sysctl_path_mtus, "A", "path MTUs");
6508 
6509 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "pm_stats",
6510 	    CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_NEEDGIANT, sc, 0,
6511 	    sysctl_pm_stats, "A", "PM statistics");
6512 
6513 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "rdma_stats",
6514 	    CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_NEEDGIANT, sc, 0,
6515 	    sysctl_rdma_stats, "A", "RDMA statistics");
6516 
6517 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "tcp_stats",
6518 	    CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_NEEDGIANT, sc, 0,
6519 	    sysctl_tcp_stats, "A", "TCP statistics");
6520 
6521 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "tids",
6522 	    CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_NEEDGIANT, sc, 0,
6523 	    sysctl_tids, "A", "TID information");
6524 
6525 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "tp_err_stats",
6526 	    CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_NEEDGIANT, sc, 0,
6527 	    sysctl_tp_err_stats, "A", "TP error statistics");
6528 
6529 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "tp_la_mask",
6530 	    CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, sc, 0,
6531 	    sysctl_tp_la_mask, "I", "TP logic analyzer event capture mask");
6532 
6533 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "tp_la",
6534 	    CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_NEEDGIANT, sc, 0,
6535 	    sysctl_tp_la, "A", "TP logic analyzer");
6536 
6537 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "tx_rate",
6538 	    CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_NEEDGIANT, sc, 0,
6539 	    sysctl_tx_rate, "A", "Tx rate");
6540 
6541 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "ulprx_la",
6542 	    CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_NEEDGIANT, sc, 0,
6543 	    sysctl_ulprx_la, "A", "ULPRX logic analyzer");
6544 
6545 	if (chip_id(sc) >= CHELSIO_T5) {
6546 		SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "wcwr_stats",
6547 		    CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_NEEDGIANT, sc, 0,
6548 		    sysctl_wcwr_stats, "A", "write combined work requests");
6549 	}
6550 
6551 #ifdef KERN_TLS
6552 	if (sc->flags & KERN_TLS_OK) {
6553 		/*
6554 		 * dev.t4nex.0.tls.
6555 		 */
6556 		oid = SYSCTL_ADD_NODE(ctx, c0, OID_AUTO, "tls",
6557 		    CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "KERN_TLS parameters");
6558 		children = SYSCTL_CHILDREN(oid);
6559 
6560 		SYSCTL_ADD_INT(ctx, children, OID_AUTO, "inline_keys",
6561 		    CTLFLAG_RW, &sc->tlst.inline_keys, 0, "Always pass TLS "
6562 		    "keys in work requests (1) or attempt to store TLS keys "
6563 		    "in card memory.");
6564 		SYSCTL_ADD_INT(ctx, children, OID_AUTO, "combo_wrs",
6565 		    CTLFLAG_RW, &sc->tlst.combo_wrs, 0, "Attempt to combine "
6566 		    "TCB field updates with TLS record work requests.");
6567 	}
6568 #endif
6569 
6570 #ifdef TCP_OFFLOAD
6571 	if (is_offload(sc)) {
6572 		int i;
6573 		char s[4];
6574 
6575 		/*
6576 		 * dev.t4nex.X.toe.
6577 		 */
6578 		oid = SYSCTL_ADD_NODE(ctx, c0, OID_AUTO, "toe",
6579 		    CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "TOE parameters");
6580 		children = SYSCTL_CHILDREN(oid);
6581 
6582 		sc->tt.cong_algorithm = -1;
6583 		SYSCTL_ADD_INT(ctx, children, OID_AUTO, "cong_algorithm",
6584 		    CTLFLAG_RW, &sc->tt.cong_algorithm, 0, "congestion control "
6585 		    "(-1 = default, 0 = reno, 1 = tahoe, 2 = newreno, "
6586 		    "3 = highspeed)");
6587 
6588 		sc->tt.sndbuf = -1;
6589 		SYSCTL_ADD_INT(ctx, children, OID_AUTO, "sndbuf", CTLFLAG_RW,
6590 		    &sc->tt.sndbuf, 0, "hardware send buffer");
6591 
6592 		sc->tt.ddp = 0;
6593 		SYSCTL_ADD_INT(ctx, children, OID_AUTO, "ddp",
6594 		    CTLFLAG_RW | CTLFLAG_SKIP, &sc->tt.ddp, 0, "");
6595 		SYSCTL_ADD_INT(ctx, children, OID_AUTO, "rx_zcopy", CTLFLAG_RW,
6596 		    &sc->tt.ddp, 0, "Enable zero-copy aio_read(2)");
6597 
6598 		sc->tt.rx_coalesce = -1;
6599 		SYSCTL_ADD_INT(ctx, children, OID_AUTO, "rx_coalesce",
6600 		    CTLFLAG_RW, &sc->tt.rx_coalesce, 0, "receive coalescing");
6601 
6602 		sc->tt.tls = 0;
6603 		SYSCTL_ADD_INT(ctx, children, OID_AUTO, "tls", CTLFLAG_RW,
6604 		    &sc->tt.tls, 0, "Inline TLS allowed");
6605 
6606 		SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "tls_rx_ports",
6607 		    CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, sc, 0,
6608 		    sysctl_tls_rx_ports, "I",
6609 		    "TCP ports that use inline TLS+TOE RX");
6610 
6611 		sc->tt.tx_align = -1;
6612 		SYSCTL_ADD_INT(ctx, children, OID_AUTO, "tx_align",
6613 		    CTLFLAG_RW, &sc->tt.tx_align, 0, "chop and align payload");
6614 
6615 		sc->tt.tx_zcopy = 0;
6616 		SYSCTL_ADD_INT(ctx, children, OID_AUTO, "tx_zcopy",
6617 		    CTLFLAG_RW, &sc->tt.tx_zcopy, 0,
6618 		    "Enable zero-copy aio_write(2)");
6619 
6620 		sc->tt.cop_managed_offloading = !!t4_cop_managed_offloading;
6621 		SYSCTL_ADD_INT(ctx, children, OID_AUTO,
6622 		    "cop_managed_offloading", CTLFLAG_RW,
6623 		    &sc->tt.cop_managed_offloading, 0,
6624 		    "COP (Connection Offload Policy) controls all TOE offload");
6625 
6626 		sc->tt.autorcvbuf_inc = 16 * 1024;
6627 		SYSCTL_ADD_INT(ctx, children, OID_AUTO, "autorcvbuf_inc",
6628 		    CTLFLAG_RW, &sc->tt.autorcvbuf_inc, 0,
6629 		    "autorcvbuf increment");
6630 
6631 		SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "timer_tick",
6632 		    CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_NEEDGIANT, sc, 0,
6633 		    sysctl_tp_tick, "A", "TP timer tick (us)");
6634 
6635 		SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "timestamp_tick",
6636 		    CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_NEEDGIANT, sc, 1,
6637 		    sysctl_tp_tick, "A", "TCP timestamp tick (us)");
6638 
6639 		SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "dack_tick",
6640 		    CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_NEEDGIANT, sc, 2,
6641 		    sysctl_tp_tick, "A", "DACK tick (us)");
6642 
6643 		SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "dack_timer",
6644 		    CTLTYPE_UINT | CTLFLAG_RD | CTLFLAG_NEEDGIANT, sc, 0,
6645 		    sysctl_tp_dack_timer, "IU", "DACK timer (us)");
6646 
6647 		SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "rexmt_min",
6648 		    CTLTYPE_ULONG | CTLFLAG_RD | CTLFLAG_NEEDGIANT, sc,
6649 		    A_TP_RXT_MIN, sysctl_tp_timer, "LU",
6650 		    "Minimum retransmit interval (us)");
6651 
6652 		SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "rexmt_max",
6653 		    CTLTYPE_ULONG | CTLFLAG_RD | CTLFLAG_NEEDGIANT, sc,
6654 		    A_TP_RXT_MAX, sysctl_tp_timer, "LU",
6655 		    "Maximum retransmit interval (us)");
6656 
6657 		SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "persist_min",
6658 		    CTLTYPE_ULONG | CTLFLAG_RD | CTLFLAG_NEEDGIANT, sc,
6659 		    A_TP_PERS_MIN, sysctl_tp_timer, "LU",
6660 		    "Persist timer min (us)");
6661 
6662 		SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "persist_max",
6663 		    CTLTYPE_ULONG | CTLFLAG_RD | CTLFLAG_NEEDGIANT, sc,
6664 		    A_TP_PERS_MAX, sysctl_tp_timer, "LU",
6665 		    "Persist timer max (us)");
6666 
6667 		SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "keepalive_idle",
6668 		    CTLTYPE_ULONG | CTLFLAG_RD | CTLFLAG_NEEDGIANT, sc,
6669 		    A_TP_KEEP_IDLE, sysctl_tp_timer, "LU",
6670 		    "Keepalive idle timer (us)");
6671 
6672 		SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "keepalive_interval",
6673 		    CTLTYPE_ULONG | CTLFLAG_RD | CTLFLAG_NEEDGIANT, sc,
6674 		    A_TP_KEEP_INTVL, sysctl_tp_timer, "LU",
6675 		    "Keepalive interval timer (us)");
6676 
6677 		SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "initial_srtt",
6678 		    CTLTYPE_ULONG | CTLFLAG_RD | CTLFLAG_NEEDGIANT, sc,
6679 		    A_TP_INIT_SRTT, sysctl_tp_timer, "LU", "Initial SRTT (us)");
6680 
6681 		SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "finwait2_timer",
6682 		    CTLTYPE_ULONG | CTLFLAG_RD | CTLFLAG_NEEDGIANT, sc,
6683 		    A_TP_FINWAIT2_TIMER, sysctl_tp_timer, "LU",
6684 		    "FINWAIT2 timer (us)");
6685 
6686 		SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "syn_rexmt_count",
6687 		    CTLTYPE_UINT | CTLFLAG_RD | CTLFLAG_NEEDGIANT, sc,
6688 		    S_SYNSHIFTMAX, sysctl_tp_shift_cnt, "IU",
6689 		    "Number of SYN retransmissions before abort");
6690 
6691 		SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "rexmt_count",
6692 		    CTLTYPE_UINT | CTLFLAG_RD | CTLFLAG_NEEDGIANT, sc,
6693 		    S_RXTSHIFTMAXR2, sysctl_tp_shift_cnt, "IU",
6694 		    "Number of retransmissions before abort");
6695 
6696 		SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "keepalive_count",
6697 		    CTLTYPE_UINT | CTLFLAG_RD | CTLFLAG_NEEDGIANT, sc,
6698 		    S_KEEPALIVEMAXR2, sysctl_tp_shift_cnt, "IU",
6699 		    "Number of keepalive probes before abort");
6700 
6701 		oid = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, "rexmt_backoff",
6702 		    CTLFLAG_RD | CTLFLAG_MPSAFE, NULL,
6703 		    "TOE retransmit backoffs");
6704 		children = SYSCTL_CHILDREN(oid);
6705 		for (i = 0; i < 16; i++) {
6706 			snprintf(s, sizeof(s), "%u", i);
6707 			SYSCTL_ADD_PROC(ctx, children, OID_AUTO, s,
6708 			    CTLTYPE_UINT | CTLFLAG_RD | CTLFLAG_NEEDGIANT, sc,
6709 			    i, sysctl_tp_backoff, "IU",
6710 			    "TOE retransmit backoff");
6711 		}
6712 	}
6713 #endif
6714 }
6715 
6716 void
6717 vi_sysctls(struct vi_info *vi)
6718 {
6719 	struct sysctl_ctx_list *ctx;
6720 	struct sysctl_oid *oid;
6721 	struct sysctl_oid_list *children;
6722 
6723 	ctx = device_get_sysctl_ctx(vi->dev);
6724 
6725 	/*
6726 	 * dev.v?(cxgbe|cxl).X.
6727 	 */
6728 	oid = device_get_sysctl_tree(vi->dev);
6729 	children = SYSCTL_CHILDREN(oid);
6730 
6731 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "viid", CTLFLAG_RD, NULL,
6732 	    vi->viid, "VI identifer");
6733 	SYSCTL_ADD_INT(ctx, children, OID_AUTO, "nrxq", CTLFLAG_RD,
6734 	    &vi->nrxq, 0, "# of rx queues");
6735 	SYSCTL_ADD_INT(ctx, children, OID_AUTO, "ntxq", CTLFLAG_RD,
6736 	    &vi->ntxq, 0, "# of tx queues");
6737 	SYSCTL_ADD_INT(ctx, children, OID_AUTO, "first_rxq", CTLFLAG_RD,
6738 	    &vi->first_rxq, 0, "index of first rx queue");
6739 	SYSCTL_ADD_INT(ctx, children, OID_AUTO, "first_txq", CTLFLAG_RD,
6740 	    &vi->first_txq, 0, "index of first tx queue");
6741 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "rss_base", CTLFLAG_RD, NULL,
6742 	    vi->rss_base, "start of RSS indirection table");
6743 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "rss_size", CTLFLAG_RD, NULL,
6744 	    vi->rss_size, "size of RSS indirection table");
6745 
6746 	if (IS_MAIN_VI(vi)) {
6747 		SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "rsrv_noflowq",
6748 		    CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, vi, 0,
6749 		    sysctl_noflowq, "IU",
6750 		    "Reserve queue 0 for non-flowid packets");
6751 	}
6752 
6753 #ifdef TCP_OFFLOAD
6754 	if (vi->nofldrxq != 0) {
6755 		SYSCTL_ADD_INT(ctx, children, OID_AUTO, "nofldrxq", CTLFLAG_RD,
6756 		    &vi->nofldrxq, 0,
6757 		    "# of rx queues for offloaded TCP connections");
6758 		SYSCTL_ADD_INT(ctx, children, OID_AUTO, "first_ofld_rxq",
6759 		    CTLFLAG_RD, &vi->first_ofld_rxq, 0,
6760 		    "index of first TOE rx queue");
6761 		SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "holdoff_tmr_idx_ofld",
6762 		    CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, vi, 0,
6763 		    sysctl_holdoff_tmr_idx_ofld, "I",
6764 		    "holdoff timer index for TOE queues");
6765 		SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "holdoff_pktc_idx_ofld",
6766 		    CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, vi, 0,
6767 		    sysctl_holdoff_pktc_idx_ofld, "I",
6768 		    "holdoff packet counter index for TOE queues");
6769 	}
6770 #endif
6771 #if defined(TCP_OFFLOAD) || defined(RATELIMIT)
6772 	if (vi->nofldtxq != 0) {
6773 		SYSCTL_ADD_INT(ctx, children, OID_AUTO, "nofldtxq", CTLFLAG_RD,
6774 		    &vi->nofldtxq, 0,
6775 		    "# of tx queues for TOE/ETHOFLD");
6776 		SYSCTL_ADD_INT(ctx, children, OID_AUTO, "first_ofld_txq",
6777 		    CTLFLAG_RD, &vi->first_ofld_txq, 0,
6778 		    "index of first TOE/ETHOFLD tx queue");
6779 	}
6780 #endif
6781 #ifdef DEV_NETMAP
6782 	if (vi->nnmrxq != 0) {
6783 		SYSCTL_ADD_INT(ctx, children, OID_AUTO, "nnmrxq", CTLFLAG_RD,
6784 		    &vi->nnmrxq, 0, "# of netmap rx queues");
6785 		SYSCTL_ADD_INT(ctx, children, OID_AUTO, "nnmtxq", CTLFLAG_RD,
6786 		    &vi->nnmtxq, 0, "# of netmap tx queues");
6787 		SYSCTL_ADD_INT(ctx, children, OID_AUTO, "first_nm_rxq",
6788 		    CTLFLAG_RD, &vi->first_nm_rxq, 0,
6789 		    "index of first netmap rx queue");
6790 		SYSCTL_ADD_INT(ctx, children, OID_AUTO, "first_nm_txq",
6791 		    CTLFLAG_RD, &vi->first_nm_txq, 0,
6792 		    "index of first netmap tx queue");
6793 	}
6794 #endif
6795 
6796 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "holdoff_tmr_idx",
6797 	    CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, vi, 0,
6798 	    sysctl_holdoff_tmr_idx, "I", "holdoff timer index");
6799 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "holdoff_pktc_idx",
6800 	    CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, vi, 0,
6801 	    sysctl_holdoff_pktc_idx, "I", "holdoff packet counter index");
6802 
6803 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "qsize_rxq",
6804 	    CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, vi, 0,
6805 	    sysctl_qsize_rxq, "I", "rx queue size");
6806 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "qsize_txq",
6807 	    CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, vi, 0,
6808 	    sysctl_qsize_txq, "I", "tx queue size");
6809 }
6810 
6811 static void
6812 cxgbe_sysctls(struct port_info *pi)
6813 {
6814 	struct sysctl_ctx_list *ctx;
6815 	struct sysctl_oid *oid;
6816 	struct sysctl_oid_list *children, *children2;
6817 	struct adapter *sc = pi->adapter;
6818 	int i;
6819 	char name[16];
6820 	static char *tc_flags = {"\20\1USER\2SYNC\3ASYNC\4ERR"};
6821 
6822 	ctx = device_get_sysctl_ctx(pi->dev);
6823 
6824 	/*
6825 	 * dev.cxgbe.X.
6826 	 */
6827 	oid = device_get_sysctl_tree(pi->dev);
6828 	children = SYSCTL_CHILDREN(oid);
6829 
6830 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "linkdnrc",
6831 	    CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_NEEDGIANT, pi, 0,
6832 	    sysctl_linkdnrc, "A", "reason why link is down");
6833 	if (pi->port_type == FW_PORT_TYPE_BT_XAUI) {
6834 		SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "temperature",
6835 		    CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_NEEDGIANT, pi, 0,
6836 		    sysctl_btphy, "I", "PHY temperature (in Celsius)");
6837 		SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "fw_version",
6838 		    CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_NEEDGIANT, pi, 1,
6839 		    sysctl_btphy, "I", "PHY firmware version");
6840 	}
6841 
6842 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "pause_settings",
6843 	    CTLTYPE_STRING | CTLFLAG_RW | CTLFLAG_NEEDGIANT, pi, 0,
6844 	    sysctl_pause_settings, "A",
6845 	    "PAUSE settings (bit 0 = rx_pause, 1 = tx_pause, 2 = pause_autoneg)");
6846 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "fec",
6847 	    CTLTYPE_STRING | CTLFLAG_RW | CTLFLAG_NEEDGIANT, pi, 0,
6848 	    sysctl_fec, "A",
6849 	    "FECs to use (bit 0 = RS, 1 = FC, 2 = none, 5 = auto, 6 = module)");
6850 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "module_fec",
6851 	    CTLTYPE_STRING | CTLFLAG_NEEDGIANT, pi, 0, sysctl_module_fec, "A",
6852 	    "FEC recommended by the cable/transceiver");
6853 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "autoneg",
6854 	    CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, pi, 0,
6855 	    sysctl_autoneg, "I",
6856 	    "autonegotiation (-1 = not supported)");
6857 
6858 	SYSCTL_ADD_INT(ctx, children, OID_AUTO, "pcaps", CTLFLAG_RD,
6859 	    &pi->link_cfg.pcaps, 0, "port capabilities");
6860 	SYSCTL_ADD_INT(ctx, children, OID_AUTO, "acaps", CTLFLAG_RD,
6861 	    &pi->link_cfg.acaps, 0, "advertised capabilities");
6862 	SYSCTL_ADD_INT(ctx, children, OID_AUTO, "lpacaps", CTLFLAG_RD,
6863 	    &pi->link_cfg.lpacaps, 0, "link partner advertised capabilities");
6864 
6865 	SYSCTL_ADD_INT(ctx, children, OID_AUTO, "max_speed", CTLFLAG_RD, NULL,
6866 	    port_top_speed(pi), "max speed (in Gbps)");
6867 	SYSCTL_ADD_INT(ctx, children, OID_AUTO, "mps_bg_map", CTLFLAG_RD, NULL,
6868 	    pi->mps_bg_map, "MPS buffer group map");
6869 	SYSCTL_ADD_INT(ctx, children, OID_AUTO, "rx_e_chan_map", CTLFLAG_RD,
6870 	    NULL, pi->rx_e_chan_map, "TP rx e-channel map");
6871 
6872 	if (sc->flags & IS_VF)
6873 		return;
6874 
6875 	/*
6876 	 * dev.(cxgbe|cxl).X.tc.
6877 	 */
6878 	oid = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, "tc",
6879 	    CTLFLAG_RD | CTLFLAG_MPSAFE, NULL,
6880 	    "Tx scheduler traffic classes (cl_rl)");
6881 	children2 = SYSCTL_CHILDREN(oid);
6882 	SYSCTL_ADD_UINT(ctx, children2, OID_AUTO, "pktsize",
6883 	    CTLFLAG_RW, &pi->sched_params->pktsize, 0,
6884 	    "pktsize for per-flow cl-rl (0 means up to the driver )");
6885 	SYSCTL_ADD_UINT(ctx, children2, OID_AUTO, "burstsize",
6886 	    CTLFLAG_RW, &pi->sched_params->burstsize, 0,
6887 	    "burstsize for per-flow cl-rl (0 means up to the driver)");
6888 	for (i = 0; i < sc->chip_params->nsched_cls; i++) {
6889 		struct tx_cl_rl_params *tc = &pi->sched_params->cl_rl[i];
6890 
6891 		snprintf(name, sizeof(name), "%d", i);
6892 		children2 = SYSCTL_CHILDREN(SYSCTL_ADD_NODE(ctx,
6893 		    SYSCTL_CHILDREN(oid), OID_AUTO, name,
6894 		    CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "traffic class"));
6895 		SYSCTL_ADD_PROC(ctx, children2, OID_AUTO, "flags",
6896 		    CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_NEEDGIANT, tc_flags,
6897 		    (uintptr_t)&tc->flags, sysctl_bitfield_8b, "A", "flags");
6898 		SYSCTL_ADD_UINT(ctx, children2, OID_AUTO, "refcount",
6899 		    CTLFLAG_RD, &tc->refcount, 0, "references to this class");
6900 		SYSCTL_ADD_PROC(ctx, children2, OID_AUTO, "params",
6901 		    CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_NEEDGIANT, sc,
6902 		    (pi->port_id << 16) | i, sysctl_tc_params, "A",
6903 		    "traffic class parameters");
6904 	}
6905 
6906 	/*
6907 	 * dev.cxgbe.X.stats.
6908 	 */
6909 	oid = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, "stats",
6910 	    CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "port statistics");
6911 	children = SYSCTL_CHILDREN(oid);
6912 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "tx_parse_error", CTLFLAG_RD,
6913 	    &pi->tx_parse_error, 0,
6914 	    "# of tx packets with invalid length or # of segments");
6915 
6916 #define SYSCTL_ADD_T4_REG64(pi, name, desc, reg) \
6917     SYSCTL_ADD_OID(ctx, children, OID_AUTO, name, \
6918         CTLTYPE_U64 | CTLFLAG_RD | CTLFLAG_NEEDGIANT, sc, reg, \
6919         sysctl_handle_t4_reg64, "QU", desc)
6920 
6921 	SYSCTL_ADD_T4_REG64(pi, "tx_octets", "# of octets in good frames",
6922 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_BYTES_L));
6923 	SYSCTL_ADD_T4_REG64(pi, "tx_frames", "total # of good frames",
6924 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_FRAMES_L));
6925 	SYSCTL_ADD_T4_REG64(pi, "tx_bcast_frames", "# of broadcast frames",
6926 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_BCAST_L));
6927 	SYSCTL_ADD_T4_REG64(pi, "tx_mcast_frames", "# of multicast frames",
6928 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_MCAST_L));
6929 	SYSCTL_ADD_T4_REG64(pi, "tx_ucast_frames", "# of unicast frames",
6930 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_UCAST_L));
6931 	SYSCTL_ADD_T4_REG64(pi, "tx_error_frames", "# of error frames",
6932 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_ERROR_L));
6933 	SYSCTL_ADD_T4_REG64(pi, "tx_frames_64",
6934 	    "# of tx frames in this range",
6935 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_64B_L));
6936 	SYSCTL_ADD_T4_REG64(pi, "tx_frames_65_127",
6937 	    "# of tx frames in this range",
6938 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_65B_127B_L));
6939 	SYSCTL_ADD_T4_REG64(pi, "tx_frames_128_255",
6940 	    "# of tx frames in this range",
6941 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_128B_255B_L));
6942 	SYSCTL_ADD_T4_REG64(pi, "tx_frames_256_511",
6943 	    "# of tx frames in this range",
6944 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_256B_511B_L));
6945 	SYSCTL_ADD_T4_REG64(pi, "tx_frames_512_1023",
6946 	    "# of tx frames in this range",
6947 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_512B_1023B_L));
6948 	SYSCTL_ADD_T4_REG64(pi, "tx_frames_1024_1518",
6949 	    "# of tx frames in this range",
6950 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_1024B_1518B_L));
6951 	SYSCTL_ADD_T4_REG64(pi, "tx_frames_1519_max",
6952 	    "# of tx frames in this range",
6953 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_1519B_MAX_L));
6954 	SYSCTL_ADD_T4_REG64(pi, "tx_drop", "# of dropped tx frames",
6955 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_DROP_L));
6956 	SYSCTL_ADD_T4_REG64(pi, "tx_pause", "# of pause frames transmitted",
6957 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_PAUSE_L));
6958 	SYSCTL_ADD_T4_REG64(pi, "tx_ppp0", "# of PPP prio 0 frames transmitted",
6959 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_PPP0_L));
6960 	SYSCTL_ADD_T4_REG64(pi, "tx_ppp1", "# of PPP prio 1 frames transmitted",
6961 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_PPP1_L));
6962 	SYSCTL_ADD_T4_REG64(pi, "tx_ppp2", "# of PPP prio 2 frames transmitted",
6963 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_PPP2_L));
6964 	SYSCTL_ADD_T4_REG64(pi, "tx_ppp3", "# of PPP prio 3 frames transmitted",
6965 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_PPP3_L));
6966 	SYSCTL_ADD_T4_REG64(pi, "tx_ppp4", "# of PPP prio 4 frames transmitted",
6967 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_PPP4_L));
6968 	SYSCTL_ADD_T4_REG64(pi, "tx_ppp5", "# of PPP prio 5 frames transmitted",
6969 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_PPP5_L));
6970 	SYSCTL_ADD_T4_REG64(pi, "tx_ppp6", "# of PPP prio 6 frames transmitted",
6971 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_PPP6_L));
6972 	SYSCTL_ADD_T4_REG64(pi, "tx_ppp7", "# of PPP prio 7 frames transmitted",
6973 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_PPP7_L));
6974 
6975 	SYSCTL_ADD_T4_REG64(pi, "rx_octets", "# of octets in good frames",
6976 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_BYTES_L));
6977 	SYSCTL_ADD_T4_REG64(pi, "rx_frames", "total # of good frames",
6978 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_FRAMES_L));
6979 	SYSCTL_ADD_T4_REG64(pi, "rx_bcast_frames", "# of broadcast frames",
6980 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_BCAST_L));
6981 	SYSCTL_ADD_T4_REG64(pi, "rx_mcast_frames", "# of multicast frames",
6982 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_MCAST_L));
6983 	SYSCTL_ADD_T4_REG64(pi, "rx_ucast_frames", "# of unicast frames",
6984 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_UCAST_L));
6985 	SYSCTL_ADD_T4_REG64(pi, "rx_too_long", "# of frames exceeding MTU",
6986 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_MTU_ERROR_L));
6987 	SYSCTL_ADD_T4_REG64(pi, "rx_jabber", "# of jabber frames",
6988 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_MTU_CRC_ERROR_L));
6989 	SYSCTL_ADD_T4_REG64(pi, "rx_fcs_err",
6990 	    "# of frames received with bad FCS",
6991 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_CRC_ERROR_L));
6992 	SYSCTL_ADD_T4_REG64(pi, "rx_len_err",
6993 	    "# of frames received with length error",
6994 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_LEN_ERROR_L));
6995 	SYSCTL_ADD_T4_REG64(pi, "rx_symbol_err", "symbol errors",
6996 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_SYM_ERROR_L));
6997 	SYSCTL_ADD_T4_REG64(pi, "rx_runt", "# of short frames received",
6998 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_LESS_64B_L));
6999 	SYSCTL_ADD_T4_REG64(pi, "rx_frames_64",
7000 	    "# of rx frames in this range",
7001 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_64B_L));
7002 	SYSCTL_ADD_T4_REG64(pi, "rx_frames_65_127",
7003 	    "# of rx frames in this range",
7004 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_65B_127B_L));
7005 	SYSCTL_ADD_T4_REG64(pi, "rx_frames_128_255",
7006 	    "# of rx frames in this range",
7007 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_128B_255B_L));
7008 	SYSCTL_ADD_T4_REG64(pi, "rx_frames_256_511",
7009 	    "# of rx frames in this range",
7010 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_256B_511B_L));
7011 	SYSCTL_ADD_T4_REG64(pi, "rx_frames_512_1023",
7012 	    "# of rx frames in this range",
7013 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_512B_1023B_L));
7014 	SYSCTL_ADD_T4_REG64(pi, "rx_frames_1024_1518",
7015 	    "# of rx frames in this range",
7016 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_1024B_1518B_L));
7017 	SYSCTL_ADD_T4_REG64(pi, "rx_frames_1519_max",
7018 	    "# of rx frames in this range",
7019 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_1519B_MAX_L));
7020 	SYSCTL_ADD_T4_REG64(pi, "rx_pause", "# of pause frames received",
7021 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_PAUSE_L));
7022 	SYSCTL_ADD_T4_REG64(pi, "rx_ppp0", "# of PPP prio 0 frames received",
7023 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_PPP0_L));
7024 	SYSCTL_ADD_T4_REG64(pi, "rx_ppp1", "# of PPP prio 1 frames received",
7025 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_PPP1_L));
7026 	SYSCTL_ADD_T4_REG64(pi, "rx_ppp2", "# of PPP prio 2 frames received",
7027 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_PPP2_L));
7028 	SYSCTL_ADD_T4_REG64(pi, "rx_ppp3", "# of PPP prio 3 frames received",
7029 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_PPP3_L));
7030 	SYSCTL_ADD_T4_REG64(pi, "rx_ppp4", "# of PPP prio 4 frames received",
7031 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_PPP4_L));
7032 	SYSCTL_ADD_T4_REG64(pi, "rx_ppp5", "# of PPP prio 5 frames received",
7033 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_PPP5_L));
7034 	SYSCTL_ADD_T4_REG64(pi, "rx_ppp6", "# of PPP prio 6 frames received",
7035 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_PPP6_L));
7036 	SYSCTL_ADD_T4_REG64(pi, "rx_ppp7", "# of PPP prio 7 frames received",
7037 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_PPP7_L));
7038 
7039 #undef SYSCTL_ADD_T4_REG64
7040 
7041 #define SYSCTL_ADD_T4_PORTSTAT(name, desc) \
7042 	SYSCTL_ADD_UQUAD(ctx, children, OID_AUTO, #name, CTLFLAG_RD, \
7043 	    &pi->stats.name, desc)
7044 
7045 	/* We get these from port_stats and they may be stale by up to 1s */
7046 	SYSCTL_ADD_T4_PORTSTAT(rx_ovflow0,
7047 	    "# drops due to buffer-group 0 overflows");
7048 	SYSCTL_ADD_T4_PORTSTAT(rx_ovflow1,
7049 	    "# drops due to buffer-group 1 overflows");
7050 	SYSCTL_ADD_T4_PORTSTAT(rx_ovflow2,
7051 	    "# drops due to buffer-group 2 overflows");
7052 	SYSCTL_ADD_T4_PORTSTAT(rx_ovflow3,
7053 	    "# drops due to buffer-group 3 overflows");
7054 	SYSCTL_ADD_T4_PORTSTAT(rx_trunc0,
7055 	    "# of buffer-group 0 truncated packets");
7056 	SYSCTL_ADD_T4_PORTSTAT(rx_trunc1,
7057 	    "# of buffer-group 1 truncated packets");
7058 	SYSCTL_ADD_T4_PORTSTAT(rx_trunc2,
7059 	    "# of buffer-group 2 truncated packets");
7060 	SYSCTL_ADD_T4_PORTSTAT(rx_trunc3,
7061 	    "# of buffer-group 3 truncated packets");
7062 
7063 #undef SYSCTL_ADD_T4_PORTSTAT
7064 
7065 	SYSCTL_ADD_ULONG(ctx, children, OID_AUTO, "tx_toe_tls_records",
7066 	    CTLFLAG_RD, &pi->tx_toe_tls_records,
7067 	    "# of TOE TLS records transmitted");
7068 	SYSCTL_ADD_ULONG(ctx, children, OID_AUTO, "tx_toe_tls_octets",
7069 	    CTLFLAG_RD, &pi->tx_toe_tls_octets,
7070 	    "# of payload octets in transmitted TOE TLS records");
7071 	SYSCTL_ADD_ULONG(ctx, children, OID_AUTO, "rx_toe_tls_records",
7072 	    CTLFLAG_RD, &pi->rx_toe_tls_records,
7073 	    "# of TOE TLS records received");
7074 	SYSCTL_ADD_ULONG(ctx, children, OID_AUTO, "rx_toe_tls_octets",
7075 	    CTLFLAG_RD, &pi->rx_toe_tls_octets,
7076 	    "# of payload octets in received TOE TLS records");
7077 }
7078 
7079 static int
7080 sysctl_int_array(SYSCTL_HANDLER_ARGS)
7081 {
7082 	int rc, *i, space = 0;
7083 	struct sbuf sb;
7084 
7085 	sbuf_new_for_sysctl(&sb, NULL, 64, req);
7086 	for (i = arg1; arg2; arg2 -= sizeof(int), i++) {
7087 		if (space)
7088 			sbuf_printf(&sb, " ");
7089 		sbuf_printf(&sb, "%d", *i);
7090 		space = 1;
7091 	}
7092 	rc = sbuf_finish(&sb);
7093 	sbuf_delete(&sb);
7094 	return (rc);
7095 }
7096 
7097 static int
7098 sysctl_bitfield_8b(SYSCTL_HANDLER_ARGS)
7099 {
7100 	int rc;
7101 	struct sbuf *sb;
7102 
7103 	rc = sysctl_wire_old_buffer(req, 0);
7104 	if (rc != 0)
7105 		return(rc);
7106 
7107 	sb = sbuf_new_for_sysctl(NULL, NULL, 128, req);
7108 	if (sb == NULL)
7109 		return (ENOMEM);
7110 
7111 	sbuf_printf(sb, "%b", *(uint8_t *)(uintptr_t)arg2, (char *)arg1);
7112 	rc = sbuf_finish(sb);
7113 	sbuf_delete(sb);
7114 
7115 	return (rc);
7116 }
7117 
7118 static int
7119 sysctl_bitfield_16b(SYSCTL_HANDLER_ARGS)
7120 {
7121 	int rc;
7122 	struct sbuf *sb;
7123 
7124 	rc = sysctl_wire_old_buffer(req, 0);
7125 	if (rc != 0)
7126 		return(rc);
7127 
7128 	sb = sbuf_new_for_sysctl(NULL, NULL, 128, req);
7129 	if (sb == NULL)
7130 		return (ENOMEM);
7131 
7132 	sbuf_printf(sb, "%b", *(uint16_t *)(uintptr_t)arg2, (char *)arg1);
7133 	rc = sbuf_finish(sb);
7134 	sbuf_delete(sb);
7135 
7136 	return (rc);
7137 }
7138 
7139 static int
7140 sysctl_btphy(SYSCTL_HANDLER_ARGS)
7141 {
7142 	struct port_info *pi = arg1;
7143 	int op = arg2;
7144 	struct adapter *sc = pi->adapter;
7145 	u_int v;
7146 	int rc;
7147 
7148 	rc = begin_synchronized_op(sc, &pi->vi[0], SLEEP_OK | INTR_OK, "t4btt");
7149 	if (rc)
7150 		return (rc);
7151 	/* XXX: magic numbers */
7152 	rc = -t4_mdio_rd(sc, sc->mbox, pi->mdio_addr, 0x1e, op ? 0x20 : 0xc820,
7153 	    &v);
7154 	end_synchronized_op(sc, 0);
7155 	if (rc)
7156 		return (rc);
7157 	if (op == 0)
7158 		v /= 256;
7159 
7160 	rc = sysctl_handle_int(oidp, &v, 0, req);
7161 	return (rc);
7162 }
7163 
7164 static int
7165 sysctl_noflowq(SYSCTL_HANDLER_ARGS)
7166 {
7167 	struct vi_info *vi = arg1;
7168 	int rc, val;
7169 
7170 	val = vi->rsrv_noflowq;
7171 	rc = sysctl_handle_int(oidp, &val, 0, req);
7172 	if (rc != 0 || req->newptr == NULL)
7173 		return (rc);
7174 
7175 	if ((val >= 1) && (vi->ntxq > 1))
7176 		vi->rsrv_noflowq = 1;
7177 	else
7178 		vi->rsrv_noflowq = 0;
7179 
7180 	return (rc);
7181 }
7182 
7183 static int
7184 sysctl_holdoff_tmr_idx(SYSCTL_HANDLER_ARGS)
7185 {
7186 	struct vi_info *vi = arg1;
7187 	struct adapter *sc = vi->adapter;
7188 	int idx, rc, i;
7189 	struct sge_rxq *rxq;
7190 	uint8_t v;
7191 
7192 	idx = vi->tmr_idx;
7193 
7194 	rc = sysctl_handle_int(oidp, &idx, 0, req);
7195 	if (rc != 0 || req->newptr == NULL)
7196 		return (rc);
7197 
7198 	if (idx < 0 || idx >= SGE_NTIMERS)
7199 		return (EINVAL);
7200 
7201 	rc = begin_synchronized_op(sc, vi, HOLD_LOCK | SLEEP_OK | INTR_OK,
7202 	    "t4tmr");
7203 	if (rc)
7204 		return (rc);
7205 
7206 	v = V_QINTR_TIMER_IDX(idx) | V_QINTR_CNT_EN(vi->pktc_idx != -1);
7207 	for_each_rxq(vi, i, rxq) {
7208 #ifdef atomic_store_rel_8
7209 		atomic_store_rel_8(&rxq->iq.intr_params, v);
7210 #else
7211 		rxq->iq.intr_params = v;
7212 #endif
7213 	}
7214 	vi->tmr_idx = idx;
7215 
7216 	end_synchronized_op(sc, LOCK_HELD);
7217 	return (0);
7218 }
7219 
7220 static int
7221 sysctl_holdoff_pktc_idx(SYSCTL_HANDLER_ARGS)
7222 {
7223 	struct vi_info *vi = arg1;
7224 	struct adapter *sc = vi->adapter;
7225 	int idx, rc;
7226 
7227 	idx = vi->pktc_idx;
7228 
7229 	rc = sysctl_handle_int(oidp, &idx, 0, req);
7230 	if (rc != 0 || req->newptr == NULL)
7231 		return (rc);
7232 
7233 	if (idx < -1 || idx >= SGE_NCOUNTERS)
7234 		return (EINVAL);
7235 
7236 	rc = begin_synchronized_op(sc, vi, HOLD_LOCK | SLEEP_OK | INTR_OK,
7237 	    "t4pktc");
7238 	if (rc)
7239 		return (rc);
7240 
7241 	if (vi->flags & VI_INIT_DONE)
7242 		rc = EBUSY; /* cannot be changed once the queues are created */
7243 	else
7244 		vi->pktc_idx = idx;
7245 
7246 	end_synchronized_op(sc, LOCK_HELD);
7247 	return (rc);
7248 }
7249 
7250 static int
7251 sysctl_qsize_rxq(SYSCTL_HANDLER_ARGS)
7252 {
7253 	struct vi_info *vi = arg1;
7254 	struct adapter *sc = vi->adapter;
7255 	int qsize, rc;
7256 
7257 	qsize = vi->qsize_rxq;
7258 
7259 	rc = sysctl_handle_int(oidp, &qsize, 0, req);
7260 	if (rc != 0 || req->newptr == NULL)
7261 		return (rc);
7262 
7263 	if (qsize < 128 || (qsize & 7))
7264 		return (EINVAL);
7265 
7266 	rc = begin_synchronized_op(sc, vi, HOLD_LOCK | SLEEP_OK | INTR_OK,
7267 	    "t4rxqs");
7268 	if (rc)
7269 		return (rc);
7270 
7271 	if (vi->flags & VI_INIT_DONE)
7272 		rc = EBUSY; /* cannot be changed once the queues are created */
7273 	else
7274 		vi->qsize_rxq = qsize;
7275 
7276 	end_synchronized_op(sc, LOCK_HELD);
7277 	return (rc);
7278 }
7279 
7280 static int
7281 sysctl_qsize_txq(SYSCTL_HANDLER_ARGS)
7282 {
7283 	struct vi_info *vi = arg1;
7284 	struct adapter *sc = vi->adapter;
7285 	int qsize, rc;
7286 
7287 	qsize = vi->qsize_txq;
7288 
7289 	rc = sysctl_handle_int(oidp, &qsize, 0, req);
7290 	if (rc != 0 || req->newptr == NULL)
7291 		return (rc);
7292 
7293 	if (qsize < 128 || qsize > 65536)
7294 		return (EINVAL);
7295 
7296 	rc = begin_synchronized_op(sc, vi, HOLD_LOCK | SLEEP_OK | INTR_OK,
7297 	    "t4txqs");
7298 	if (rc)
7299 		return (rc);
7300 
7301 	if (vi->flags & VI_INIT_DONE)
7302 		rc = EBUSY; /* cannot be changed once the queues are created */
7303 	else
7304 		vi->qsize_txq = qsize;
7305 
7306 	end_synchronized_op(sc, LOCK_HELD);
7307 	return (rc);
7308 }
7309 
7310 static int
7311 sysctl_pause_settings(SYSCTL_HANDLER_ARGS)
7312 {
7313 	struct port_info *pi = arg1;
7314 	struct adapter *sc = pi->adapter;
7315 	struct link_config *lc = &pi->link_cfg;
7316 	int rc;
7317 
7318 	if (req->newptr == NULL) {
7319 		struct sbuf *sb;
7320 		static char *bits = "\20\1RX\2TX\3AUTO";
7321 
7322 		rc = sysctl_wire_old_buffer(req, 0);
7323 		if (rc != 0)
7324 			return(rc);
7325 
7326 		sb = sbuf_new_for_sysctl(NULL, NULL, 128, req);
7327 		if (sb == NULL)
7328 			return (ENOMEM);
7329 
7330 		if (lc->link_ok) {
7331 			sbuf_printf(sb, "%b", (lc->fc & (PAUSE_TX | PAUSE_RX)) |
7332 			    (lc->requested_fc & PAUSE_AUTONEG), bits);
7333 		} else {
7334 			sbuf_printf(sb, "%b", lc->requested_fc & (PAUSE_TX |
7335 			    PAUSE_RX | PAUSE_AUTONEG), bits);
7336 		}
7337 		rc = sbuf_finish(sb);
7338 		sbuf_delete(sb);
7339 	} else {
7340 		char s[2];
7341 		int n;
7342 
7343 		s[0] = '0' + (lc->requested_fc & (PAUSE_TX | PAUSE_RX |
7344 		    PAUSE_AUTONEG));
7345 		s[1] = 0;
7346 
7347 		rc = sysctl_handle_string(oidp, s, sizeof(s), req);
7348 		if (rc != 0)
7349 			return(rc);
7350 
7351 		if (s[1] != 0)
7352 			return (EINVAL);
7353 		if (s[0] < '0' || s[0] > '9')
7354 			return (EINVAL);	/* not a number */
7355 		n = s[0] - '0';
7356 		if (n & ~(PAUSE_TX | PAUSE_RX | PAUSE_AUTONEG))
7357 			return (EINVAL);	/* some other bit is set too */
7358 
7359 		rc = begin_synchronized_op(sc, &pi->vi[0], SLEEP_OK | INTR_OK,
7360 		    "t4PAUSE");
7361 		if (rc)
7362 			return (rc);
7363 		PORT_LOCK(pi);
7364 		lc->requested_fc = n;
7365 		fixup_link_config(pi);
7366 		if (pi->up_vis > 0)
7367 			rc = apply_link_config(pi);
7368 		set_current_media(pi);
7369 		PORT_UNLOCK(pi);
7370 		end_synchronized_op(sc, 0);
7371 	}
7372 
7373 	return (rc);
7374 }
7375 
7376 static int
7377 sysctl_fec(SYSCTL_HANDLER_ARGS)
7378 {
7379 	struct port_info *pi = arg1;
7380 	struct adapter *sc = pi->adapter;
7381 	struct link_config *lc = &pi->link_cfg;
7382 	int rc;
7383 	int8_t old;
7384 
7385 	if (req->newptr == NULL) {
7386 		struct sbuf *sb;
7387 		static char *bits = "\20\1RS-FEC\2FC-FEC\3NO-FEC\4RSVD2"
7388 		    "\5RSVD3\6auto\7module";
7389 
7390 		rc = sysctl_wire_old_buffer(req, 0);
7391 		if (rc != 0)
7392 			return(rc);
7393 
7394 		sb = sbuf_new_for_sysctl(NULL, NULL, 128, req);
7395 		if (sb == NULL)
7396 			return (ENOMEM);
7397 
7398 		/*
7399 		 * Display the requested_fec when the link is down -- the actual
7400 		 * FEC makes sense only when the link is up.
7401 		 */
7402 		if (lc->link_ok) {
7403 			sbuf_printf(sb, "%b", (lc->fec & M_FW_PORT_CAP32_FEC) |
7404 			    (lc->requested_fec & (FEC_AUTO | FEC_MODULE)),
7405 			    bits);
7406 		} else {
7407 			sbuf_printf(sb, "%b", lc->requested_fec, bits);
7408 		}
7409 		rc = sbuf_finish(sb);
7410 		sbuf_delete(sb);
7411 	} else {
7412 		char s[8];
7413 		int n;
7414 
7415 		snprintf(s, sizeof(s), "%d",
7416 		    lc->requested_fec == FEC_AUTO ? -1 :
7417 		    lc->requested_fec & (M_FW_PORT_CAP32_FEC | FEC_MODULE));
7418 
7419 		rc = sysctl_handle_string(oidp, s, sizeof(s), req);
7420 		if (rc != 0)
7421 			return(rc);
7422 
7423 		n = strtol(&s[0], NULL, 0);
7424 		if (n < 0 || n & FEC_AUTO)
7425 			n = FEC_AUTO;
7426 		else if (n & ~(M_FW_PORT_CAP32_FEC | FEC_MODULE))
7427 			return (EINVAL);/* some other bit is set too */
7428 
7429 		rc = begin_synchronized_op(sc, &pi->vi[0], SLEEP_OK | INTR_OK,
7430 		    "t4fec");
7431 		if (rc)
7432 			return (rc);
7433 		PORT_LOCK(pi);
7434 		old = lc->requested_fec;
7435 		if (n == FEC_AUTO)
7436 			lc->requested_fec = FEC_AUTO;
7437 		else if (n == 0 || n == FEC_NONE)
7438 			lc->requested_fec = FEC_NONE;
7439 		else {
7440 			if ((lc->pcaps |
7441 			    V_FW_PORT_CAP32_FEC(n & M_FW_PORT_CAP32_FEC)) !=
7442 			    lc->pcaps) {
7443 				rc = ENOTSUP;
7444 				goto done;
7445 			}
7446 			lc->requested_fec = n & (M_FW_PORT_CAP32_FEC |
7447 			    FEC_MODULE);
7448 		}
7449 		fixup_link_config(pi);
7450 		if (pi->up_vis > 0) {
7451 			rc = apply_link_config(pi);
7452 			if (rc != 0) {
7453 				lc->requested_fec = old;
7454 				if (rc == FW_EPROTO)
7455 					rc = ENOTSUP;
7456 			}
7457 		}
7458 done:
7459 		PORT_UNLOCK(pi);
7460 		end_synchronized_op(sc, 0);
7461 	}
7462 
7463 	return (rc);
7464 }
7465 
7466 static int
7467 sysctl_module_fec(SYSCTL_HANDLER_ARGS)
7468 {
7469 	struct port_info *pi = arg1;
7470 	struct adapter *sc = pi->adapter;
7471 	struct link_config *lc = &pi->link_cfg;
7472 	int rc;
7473 	int8_t fec;
7474 	struct sbuf *sb;
7475 	static char *bits = "\20\1RS-FEC\2FC-FEC\3NO-FEC\4RSVD2\5RSVD3";
7476 
7477 	rc = sysctl_wire_old_buffer(req, 0);
7478 	if (rc != 0)
7479 		return (rc);
7480 
7481 	sb = sbuf_new_for_sysctl(NULL, NULL, 128, req);
7482 	if (sb == NULL)
7483 		return (ENOMEM);
7484 
7485 	if (begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4mfec") != 0)
7486 		return (EBUSY);
7487 	PORT_LOCK(pi);
7488 	if (pi->up_vis == 0) {
7489 		/*
7490 		 * If all the interfaces are administratively down the firmware
7491 		 * does not report transceiver changes.  Refresh port info here.
7492 		 * This is the only reason we have a synchronized op in this
7493 		 * function.  Just PORT_LOCK would have been enough otherwise.
7494 		 */
7495 		t4_update_port_info(pi);
7496 	}
7497 
7498 	fec = lc->fec_hint;
7499 	if (pi->mod_type == FW_PORT_MOD_TYPE_NONE ||
7500 	    !fec_supported(lc->pcaps)) {
7501 		sbuf_printf(sb, "n/a");
7502 	} else {
7503 		if (fec == 0)
7504 			fec = FEC_NONE;
7505 		sbuf_printf(sb, "%b", fec & M_FW_PORT_CAP32_FEC, bits);
7506 	}
7507 	rc = sbuf_finish(sb);
7508 	sbuf_delete(sb);
7509 
7510 	PORT_UNLOCK(pi);
7511 	end_synchronized_op(sc, 0);
7512 
7513 	return (rc);
7514 }
7515 
7516 static int
7517 sysctl_autoneg(SYSCTL_HANDLER_ARGS)
7518 {
7519 	struct port_info *pi = arg1;
7520 	struct adapter *sc = pi->adapter;
7521 	struct link_config *lc = &pi->link_cfg;
7522 	int rc, val;
7523 
7524 	if (lc->pcaps & FW_PORT_CAP32_ANEG)
7525 		val = lc->requested_aneg == AUTONEG_DISABLE ? 0 : 1;
7526 	else
7527 		val = -1;
7528 	rc = sysctl_handle_int(oidp, &val, 0, req);
7529 	if (rc != 0 || req->newptr == NULL)
7530 		return (rc);
7531 	if (val == 0)
7532 		val = AUTONEG_DISABLE;
7533 	else if (val == 1)
7534 		val = AUTONEG_ENABLE;
7535 	else
7536 		val = AUTONEG_AUTO;
7537 
7538 	rc = begin_synchronized_op(sc, &pi->vi[0], SLEEP_OK | INTR_OK,
7539 	    "t4aneg");
7540 	if (rc)
7541 		return (rc);
7542 	PORT_LOCK(pi);
7543 	if (val == AUTONEG_ENABLE && !(lc->pcaps & FW_PORT_CAP32_ANEG)) {
7544 		rc = ENOTSUP;
7545 		goto done;
7546 	}
7547 	lc->requested_aneg = val;
7548 	fixup_link_config(pi);
7549 	if (pi->up_vis > 0)
7550 		rc = apply_link_config(pi);
7551 	set_current_media(pi);
7552 done:
7553 	PORT_UNLOCK(pi);
7554 	end_synchronized_op(sc, 0);
7555 	return (rc);
7556 }
7557 
7558 static int
7559 sysctl_handle_t4_reg64(SYSCTL_HANDLER_ARGS)
7560 {
7561 	struct adapter *sc = arg1;
7562 	int reg = arg2;
7563 	uint64_t val;
7564 
7565 	val = t4_read_reg64(sc, reg);
7566 
7567 	return (sysctl_handle_64(oidp, &val, 0, req));
7568 }
7569 
7570 static int
7571 sysctl_temperature(SYSCTL_HANDLER_ARGS)
7572 {
7573 	struct adapter *sc = arg1;
7574 	int rc, t;
7575 	uint32_t param, val;
7576 
7577 	rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4temp");
7578 	if (rc)
7579 		return (rc);
7580 	param = V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) |
7581 	    V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_DIAG) |
7582 	    V_FW_PARAMS_PARAM_Y(FW_PARAM_DEV_DIAG_TMP);
7583 	rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 1, &param, &val);
7584 	end_synchronized_op(sc, 0);
7585 	if (rc)
7586 		return (rc);
7587 
7588 	/* unknown is returned as 0 but we display -1 in that case */
7589 	t = val == 0 ? -1 : val;
7590 
7591 	rc = sysctl_handle_int(oidp, &t, 0, req);
7592 	return (rc);
7593 }
7594 
7595 static int
7596 sysctl_vdd(SYSCTL_HANDLER_ARGS)
7597 {
7598 	struct adapter *sc = arg1;
7599 	int rc;
7600 	uint32_t param, val;
7601 
7602 	if (sc->params.core_vdd == 0) {
7603 		rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK,
7604 		    "t4vdd");
7605 		if (rc)
7606 			return (rc);
7607 		param = V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) |
7608 		    V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_DIAG) |
7609 		    V_FW_PARAMS_PARAM_Y(FW_PARAM_DEV_DIAG_VDD);
7610 		rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 1, &param, &val);
7611 		end_synchronized_op(sc, 0);
7612 		if (rc)
7613 			return (rc);
7614 		sc->params.core_vdd = val;
7615 	}
7616 
7617 	return (sysctl_handle_int(oidp, &sc->params.core_vdd, 0, req));
7618 }
7619 
7620 static int
7621 sysctl_reset_sensor(SYSCTL_HANDLER_ARGS)
7622 {
7623 	struct adapter *sc = arg1;
7624 	int rc, v;
7625 	uint32_t param, val;
7626 
7627 	v = sc->sensor_resets;
7628 	rc = sysctl_handle_int(oidp, &v, 0, req);
7629 	if (rc != 0 || req->newptr == NULL || v <= 0)
7630 		return (rc);
7631 
7632 	if (sc->params.fw_vers < FW_VERSION32(1, 24, 7, 0) ||
7633 	    chip_id(sc) < CHELSIO_T5)
7634 		return (ENOTSUP);
7635 
7636 	rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4srst");
7637 	if (rc)
7638 		return (rc);
7639 	param = (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) |
7640 	    V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_DIAG) |
7641 	    V_FW_PARAMS_PARAM_Y(FW_PARAM_DEV_DIAG_RESET_TMP_SENSOR));
7642 	val = 1;
7643 	rc = -t4_set_params(sc, sc->mbox, sc->pf, 0, 1, &param, &val);
7644 	end_synchronized_op(sc, 0);
7645 	if (rc == 0)
7646 		sc->sensor_resets++;
7647 	return (rc);
7648 }
7649 
7650 static int
7651 sysctl_loadavg(SYSCTL_HANDLER_ARGS)
7652 {
7653 	struct adapter *sc = arg1;
7654 	struct sbuf *sb;
7655 	int rc;
7656 	uint32_t param, val;
7657 
7658 	rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4lavg");
7659 	if (rc)
7660 		return (rc);
7661 	param = V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) |
7662 	    V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_LOAD);
7663 	rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 1, &param, &val);
7664 	end_synchronized_op(sc, 0);
7665 	if (rc)
7666 		return (rc);
7667 
7668 	rc = sysctl_wire_old_buffer(req, 0);
7669 	if (rc != 0)
7670 		return (rc);
7671 
7672 	sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req);
7673 	if (sb == NULL)
7674 		return (ENOMEM);
7675 
7676 	if (val == 0xffffffff) {
7677 		/* Only debug and custom firmwares report load averages. */
7678 		sbuf_printf(sb, "not available");
7679 	} else {
7680 		sbuf_printf(sb, "%d %d %d", val & 0xff, (val >> 8) & 0xff,
7681 		    (val >> 16) & 0xff);
7682 	}
7683 	rc = sbuf_finish(sb);
7684 	sbuf_delete(sb);
7685 
7686 	return (rc);
7687 }
7688 
7689 static int
7690 sysctl_cctrl(SYSCTL_HANDLER_ARGS)
7691 {
7692 	struct adapter *sc = arg1;
7693 	struct sbuf *sb;
7694 	int rc, i;
7695 	uint16_t incr[NMTUS][NCCTRL_WIN];
7696 	static const char *dec_fac[] = {
7697 		"0.5", "0.5625", "0.625", "0.6875", "0.75", "0.8125", "0.875",
7698 		"0.9375"
7699 	};
7700 
7701 	rc = sysctl_wire_old_buffer(req, 0);
7702 	if (rc != 0)
7703 		return (rc);
7704 
7705 	sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req);
7706 	if (sb == NULL)
7707 		return (ENOMEM);
7708 
7709 	t4_read_cong_tbl(sc, incr);
7710 
7711 	for (i = 0; i < NCCTRL_WIN; ++i) {
7712 		sbuf_printf(sb, "%2d: %4u %4u %4u %4u %4u %4u %4u %4u\n", i,
7713 		    incr[0][i], incr[1][i], incr[2][i], incr[3][i], incr[4][i],
7714 		    incr[5][i], incr[6][i], incr[7][i]);
7715 		sbuf_printf(sb, "%8u %4u %4u %4u %4u %4u %4u %4u %5u %s\n",
7716 		    incr[8][i], incr[9][i], incr[10][i], incr[11][i],
7717 		    incr[12][i], incr[13][i], incr[14][i], incr[15][i],
7718 		    sc->params.a_wnd[i], dec_fac[sc->params.b_wnd[i]]);
7719 	}
7720 
7721 	rc = sbuf_finish(sb);
7722 	sbuf_delete(sb);
7723 
7724 	return (rc);
7725 }
7726 
7727 static const char *qname[CIM_NUM_IBQ + CIM_NUM_OBQ_T5] = {
7728 	"TP0", "TP1", "ULP", "SGE0", "SGE1", "NC-SI",	/* ibq's */
7729 	"ULP0", "ULP1", "ULP2", "ULP3", "SGE", "NC-SI",	/* obq's */
7730 	"SGE0-RX", "SGE1-RX"	/* additional obq's (T5 onwards) */
7731 };
7732 
7733 static int
7734 sysctl_cim_ibq_obq(SYSCTL_HANDLER_ARGS)
7735 {
7736 	struct adapter *sc = arg1;
7737 	struct sbuf *sb;
7738 	int rc, i, n, qid = arg2;
7739 	uint32_t *buf, *p;
7740 	char *qtype;
7741 	u_int cim_num_obq = sc->chip_params->cim_num_obq;
7742 
7743 	KASSERT(qid >= 0 && qid < CIM_NUM_IBQ + cim_num_obq,
7744 	    ("%s: bad qid %d\n", __func__, qid));
7745 
7746 	if (qid < CIM_NUM_IBQ) {
7747 		/* inbound queue */
7748 		qtype = "IBQ";
7749 		n = 4 * CIM_IBQ_SIZE;
7750 		buf = malloc(n * sizeof(uint32_t), M_CXGBE, M_ZERO | M_WAITOK);
7751 		rc = t4_read_cim_ibq(sc, qid, buf, n);
7752 	} else {
7753 		/* outbound queue */
7754 		qtype = "OBQ";
7755 		qid -= CIM_NUM_IBQ;
7756 		n = 4 * cim_num_obq * CIM_OBQ_SIZE;
7757 		buf = malloc(n * sizeof(uint32_t), M_CXGBE, M_ZERO | M_WAITOK);
7758 		rc = t4_read_cim_obq(sc, qid, buf, n);
7759 	}
7760 
7761 	if (rc < 0) {
7762 		rc = -rc;
7763 		goto done;
7764 	}
7765 	n = rc * sizeof(uint32_t);	/* rc has # of words actually read */
7766 
7767 	rc = sysctl_wire_old_buffer(req, 0);
7768 	if (rc != 0)
7769 		goto done;
7770 
7771 	sb = sbuf_new_for_sysctl(NULL, NULL, PAGE_SIZE, req);
7772 	if (sb == NULL) {
7773 		rc = ENOMEM;
7774 		goto done;
7775 	}
7776 
7777 	sbuf_printf(sb, "%s%d %s", qtype , qid, qname[arg2]);
7778 	for (i = 0, p = buf; i < n; i += 16, p += 4)
7779 		sbuf_printf(sb, "\n%#06x: %08x %08x %08x %08x", i, p[0], p[1],
7780 		    p[2], p[3]);
7781 
7782 	rc = sbuf_finish(sb);
7783 	sbuf_delete(sb);
7784 done:
7785 	free(buf, M_CXGBE);
7786 	return (rc);
7787 }
7788 
7789 static void
7790 sbuf_cim_la4(struct adapter *sc, struct sbuf *sb, uint32_t *buf, uint32_t cfg)
7791 {
7792 	uint32_t *p;
7793 
7794 	sbuf_printf(sb, "Status   Data      PC%s",
7795 	    cfg & F_UPDBGLACAPTPCONLY ? "" :
7796 	    "     LS0Stat  LS0Addr             LS0Data");
7797 
7798 	for (p = buf; p <= &buf[sc->params.cim_la_size - 8]; p += 8) {
7799 		if (cfg & F_UPDBGLACAPTPCONLY) {
7800 			sbuf_printf(sb, "\n  %02x   %08x %08x", p[5] & 0xff,
7801 			    p[6], p[7]);
7802 			sbuf_printf(sb, "\n  %02x   %02x%06x %02x%06x",
7803 			    (p[3] >> 8) & 0xff, p[3] & 0xff, p[4] >> 8,
7804 			    p[4] & 0xff, p[5] >> 8);
7805 			sbuf_printf(sb, "\n  %02x   %x%07x %x%07x",
7806 			    (p[0] >> 4) & 0xff, p[0] & 0xf, p[1] >> 4,
7807 			    p[1] & 0xf, p[2] >> 4);
7808 		} else {
7809 			sbuf_printf(sb,
7810 			    "\n  %02x   %x%07x %x%07x %08x %08x "
7811 			    "%08x%08x%08x%08x",
7812 			    (p[0] >> 4) & 0xff, p[0] & 0xf, p[1] >> 4,
7813 			    p[1] & 0xf, p[2] >> 4, p[2] & 0xf, p[3], p[4], p[5],
7814 			    p[6], p[7]);
7815 		}
7816 	}
7817 }
7818 
7819 static void
7820 sbuf_cim_la6(struct adapter *sc, struct sbuf *sb, uint32_t *buf, uint32_t cfg)
7821 {
7822 	uint32_t *p;
7823 
7824 	sbuf_printf(sb, "Status   Inst    Data      PC%s",
7825 	    cfg & F_UPDBGLACAPTPCONLY ? "" :
7826 	    "     LS0Stat  LS0Addr  LS0Data  LS1Stat  LS1Addr  LS1Data");
7827 
7828 	for (p = buf; p <= &buf[sc->params.cim_la_size - 10]; p += 10) {
7829 		if (cfg & F_UPDBGLACAPTPCONLY) {
7830 			sbuf_printf(sb, "\n  %02x   %08x %08x %08x",
7831 			    p[3] & 0xff, p[2], p[1], p[0]);
7832 			sbuf_printf(sb, "\n  %02x   %02x%06x %02x%06x %02x%06x",
7833 			    (p[6] >> 8) & 0xff, p[6] & 0xff, p[5] >> 8,
7834 			    p[5] & 0xff, p[4] >> 8, p[4] & 0xff, p[3] >> 8);
7835 			sbuf_printf(sb, "\n  %02x   %04x%04x %04x%04x %04x%04x",
7836 			    (p[9] >> 16) & 0xff, p[9] & 0xffff, p[8] >> 16,
7837 			    p[8] & 0xffff, p[7] >> 16, p[7] & 0xffff,
7838 			    p[6] >> 16);
7839 		} else {
7840 			sbuf_printf(sb, "\n  %02x   %04x%04x %04x%04x %04x%04x "
7841 			    "%08x %08x %08x %08x %08x %08x",
7842 			    (p[9] >> 16) & 0xff,
7843 			    p[9] & 0xffff, p[8] >> 16,
7844 			    p[8] & 0xffff, p[7] >> 16,
7845 			    p[7] & 0xffff, p[6] >> 16,
7846 			    p[2], p[1], p[0], p[5], p[4], p[3]);
7847 		}
7848 	}
7849 }
7850 
7851 static int
7852 sbuf_cim_la(struct adapter *sc, struct sbuf *sb, int flags)
7853 {
7854 	uint32_t cfg, *buf;
7855 	int rc;
7856 
7857 	rc = -t4_cim_read(sc, A_UP_UP_DBG_LA_CFG, 1, &cfg);
7858 	if (rc != 0)
7859 		return (rc);
7860 
7861 	MPASS(flags == M_WAITOK || flags == M_NOWAIT);
7862 	buf = malloc(sc->params.cim_la_size * sizeof(uint32_t), M_CXGBE,
7863 	    M_ZERO | flags);
7864 	if (buf == NULL)
7865 		return (ENOMEM);
7866 
7867 	rc = -t4_cim_read_la(sc, buf, NULL);
7868 	if (rc != 0)
7869 		goto done;
7870 	if (chip_id(sc) < CHELSIO_T6)
7871 		sbuf_cim_la4(sc, sb, buf, cfg);
7872 	else
7873 		sbuf_cim_la6(sc, sb, buf, cfg);
7874 
7875 done:
7876 	free(buf, M_CXGBE);
7877 	return (rc);
7878 }
7879 
7880 static int
7881 sysctl_cim_la(SYSCTL_HANDLER_ARGS)
7882 {
7883 	struct adapter *sc = arg1;
7884 	struct sbuf *sb;
7885 	int rc;
7886 
7887 	rc = sysctl_wire_old_buffer(req, 0);
7888 	if (rc != 0)
7889 		return (rc);
7890 	sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req);
7891 	if (sb == NULL)
7892 		return (ENOMEM);
7893 
7894 	rc = sbuf_cim_la(sc, sb, M_WAITOK);
7895 	if (rc == 0)
7896 		rc = sbuf_finish(sb);
7897 	sbuf_delete(sb);
7898 	return (rc);
7899 }
7900 
7901 bool
7902 t4_os_dump_cimla(struct adapter *sc, int arg, bool verbose)
7903 {
7904 	struct sbuf sb;
7905 	int rc;
7906 
7907 	if (sbuf_new(&sb, NULL, 4096, SBUF_AUTOEXTEND) != &sb)
7908 		return (false);
7909 	rc = sbuf_cim_la(sc, &sb, M_NOWAIT);
7910 	if (rc == 0) {
7911 		rc = sbuf_finish(&sb);
7912 		if (rc == 0) {
7913 			log(LOG_DEBUG, "%s: CIM LA dump follows.\n%s",
7914 		    		device_get_nameunit(sc->dev), sbuf_data(&sb));
7915 		}
7916 	}
7917 	sbuf_delete(&sb);
7918 	return (false);
7919 }
7920 
7921 static int
7922 sysctl_cim_ma_la(SYSCTL_HANDLER_ARGS)
7923 {
7924 	struct adapter *sc = arg1;
7925 	u_int i;
7926 	struct sbuf *sb;
7927 	uint32_t *buf, *p;
7928 	int rc;
7929 
7930 	rc = sysctl_wire_old_buffer(req, 0);
7931 	if (rc != 0)
7932 		return (rc);
7933 
7934 	sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req);
7935 	if (sb == NULL)
7936 		return (ENOMEM);
7937 
7938 	buf = malloc(2 * CIM_MALA_SIZE * 5 * sizeof(uint32_t), M_CXGBE,
7939 	    M_ZERO | M_WAITOK);
7940 
7941 	t4_cim_read_ma_la(sc, buf, buf + 5 * CIM_MALA_SIZE);
7942 	p = buf;
7943 
7944 	for (i = 0; i < CIM_MALA_SIZE; i++, p += 5) {
7945 		sbuf_printf(sb, "\n%02x%08x%08x%08x%08x", p[4], p[3], p[2],
7946 		    p[1], p[0]);
7947 	}
7948 
7949 	sbuf_printf(sb, "\n\nCnt ID Tag UE       Data       RDY VLD");
7950 	for (i = 0; i < CIM_MALA_SIZE; i++, p += 5) {
7951 		sbuf_printf(sb, "\n%3u %2u  %x   %u %08x%08x  %u   %u",
7952 		    (p[2] >> 10) & 0xff, (p[2] >> 7) & 7,
7953 		    (p[2] >> 3) & 0xf, (p[2] >> 2) & 1,
7954 		    (p[1] >> 2) | ((p[2] & 3) << 30),
7955 		    (p[0] >> 2) | ((p[1] & 3) << 30), (p[0] >> 1) & 1,
7956 		    p[0] & 1);
7957 	}
7958 
7959 	rc = sbuf_finish(sb);
7960 	sbuf_delete(sb);
7961 	free(buf, M_CXGBE);
7962 	return (rc);
7963 }
7964 
7965 static int
7966 sysctl_cim_pif_la(SYSCTL_HANDLER_ARGS)
7967 {
7968 	struct adapter *sc = arg1;
7969 	u_int i;
7970 	struct sbuf *sb;
7971 	uint32_t *buf, *p;
7972 	int rc;
7973 
7974 	rc = sysctl_wire_old_buffer(req, 0);
7975 	if (rc != 0)
7976 		return (rc);
7977 
7978 	sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req);
7979 	if (sb == NULL)
7980 		return (ENOMEM);
7981 
7982 	buf = malloc(2 * CIM_PIFLA_SIZE * 6 * sizeof(uint32_t), M_CXGBE,
7983 	    M_ZERO | M_WAITOK);
7984 
7985 	t4_cim_read_pif_la(sc, buf, buf + 6 * CIM_PIFLA_SIZE, NULL, NULL);
7986 	p = buf;
7987 
7988 	sbuf_printf(sb, "Cntl ID DataBE   Addr                 Data");
7989 	for (i = 0; i < CIM_PIFLA_SIZE; i++, p += 6) {
7990 		sbuf_printf(sb, "\n %02x  %02x  %04x  %08x %08x%08x%08x%08x",
7991 		    (p[5] >> 22) & 0xff, (p[5] >> 16) & 0x3f, p[5] & 0xffff,
7992 		    p[4], p[3], p[2], p[1], p[0]);
7993 	}
7994 
7995 	sbuf_printf(sb, "\n\nCntl ID               Data");
7996 	for (i = 0; i < CIM_PIFLA_SIZE; i++, p += 6) {
7997 		sbuf_printf(sb, "\n %02x  %02x %08x%08x%08x%08x",
7998 		    (p[4] >> 6) & 0xff, p[4] & 0x3f, p[3], p[2], p[1], p[0]);
7999 	}
8000 
8001 	rc = sbuf_finish(sb);
8002 	sbuf_delete(sb);
8003 	free(buf, M_CXGBE);
8004 	return (rc);
8005 }
8006 
8007 static int
8008 sysctl_cim_qcfg(SYSCTL_HANDLER_ARGS)
8009 {
8010 	struct adapter *sc = arg1;
8011 	struct sbuf *sb;
8012 	int rc, i;
8013 	uint16_t base[CIM_NUM_IBQ + CIM_NUM_OBQ_T5];
8014 	uint16_t size[CIM_NUM_IBQ + CIM_NUM_OBQ_T5];
8015 	uint16_t thres[CIM_NUM_IBQ];
8016 	uint32_t obq_wr[2 * CIM_NUM_OBQ_T5], *wr = obq_wr;
8017 	uint32_t stat[4 * (CIM_NUM_IBQ + CIM_NUM_OBQ_T5)], *p = stat;
8018 	u_int cim_num_obq, ibq_rdaddr, obq_rdaddr, nq;
8019 
8020 	cim_num_obq = sc->chip_params->cim_num_obq;
8021 	if (is_t4(sc)) {
8022 		ibq_rdaddr = A_UP_IBQ_0_RDADDR;
8023 		obq_rdaddr = A_UP_OBQ_0_REALADDR;
8024 	} else {
8025 		ibq_rdaddr = A_UP_IBQ_0_SHADOW_RDADDR;
8026 		obq_rdaddr = A_UP_OBQ_0_SHADOW_REALADDR;
8027 	}
8028 	nq = CIM_NUM_IBQ + cim_num_obq;
8029 
8030 	rc = -t4_cim_read(sc, ibq_rdaddr, 4 * nq, stat);
8031 	if (rc == 0)
8032 		rc = -t4_cim_read(sc, obq_rdaddr, 2 * cim_num_obq, obq_wr);
8033 	if (rc != 0)
8034 		return (rc);
8035 
8036 	t4_read_cimq_cfg(sc, base, size, thres);
8037 
8038 	rc = sysctl_wire_old_buffer(req, 0);
8039 	if (rc != 0)
8040 		return (rc);
8041 
8042 	sb = sbuf_new_for_sysctl(NULL, NULL, PAGE_SIZE, req);
8043 	if (sb == NULL)
8044 		return (ENOMEM);
8045 
8046 	sbuf_printf(sb,
8047 	    "  Queue  Base  Size Thres  RdPtr WrPtr  SOP  EOP Avail");
8048 
8049 	for (i = 0; i < CIM_NUM_IBQ; i++, p += 4)
8050 		sbuf_printf(sb, "\n%7s %5x %5u %5u %6x  %4x %4u %4u %5u",
8051 		    qname[i], base[i], size[i], thres[i], G_IBQRDADDR(p[0]),
8052 		    G_IBQWRADDR(p[1]), G_QUESOPCNT(p[3]), G_QUEEOPCNT(p[3]),
8053 		    G_QUEREMFLITS(p[2]) * 16);
8054 	for ( ; i < nq; i++, p += 4, wr += 2)
8055 		sbuf_printf(sb, "\n%7s %5x %5u %12x  %4x %4u %4u %5u", qname[i],
8056 		    base[i], size[i], G_QUERDADDR(p[0]) & 0x3fff,
8057 		    wr[0] - base[i], G_QUESOPCNT(p[3]), G_QUEEOPCNT(p[3]),
8058 		    G_QUEREMFLITS(p[2]) * 16);
8059 
8060 	rc = sbuf_finish(sb);
8061 	sbuf_delete(sb);
8062 
8063 	return (rc);
8064 }
8065 
8066 static int
8067 sysctl_cpl_stats(SYSCTL_HANDLER_ARGS)
8068 {
8069 	struct adapter *sc = arg1;
8070 	struct sbuf *sb;
8071 	int rc;
8072 	struct tp_cpl_stats stats;
8073 
8074 	rc = sysctl_wire_old_buffer(req, 0);
8075 	if (rc != 0)
8076 		return (rc);
8077 
8078 	sb = sbuf_new_for_sysctl(NULL, NULL, 256, req);
8079 	if (sb == NULL)
8080 		return (ENOMEM);
8081 
8082 	mtx_lock(&sc->reg_lock);
8083 	t4_tp_get_cpl_stats(sc, &stats, 0);
8084 	mtx_unlock(&sc->reg_lock);
8085 
8086 	if (sc->chip_params->nchan > 2) {
8087 		sbuf_printf(sb, "                 channel 0  channel 1"
8088 		    "  channel 2  channel 3");
8089 		sbuf_printf(sb, "\nCPL requests:   %10u %10u %10u %10u",
8090 		    stats.req[0], stats.req[1], stats.req[2], stats.req[3]);
8091 		sbuf_printf(sb, "\nCPL responses:   %10u %10u %10u %10u",
8092 		    stats.rsp[0], stats.rsp[1], stats.rsp[2], stats.rsp[3]);
8093 	} else {
8094 		sbuf_printf(sb, "                 channel 0  channel 1");
8095 		sbuf_printf(sb, "\nCPL requests:   %10u %10u",
8096 		    stats.req[0], stats.req[1]);
8097 		sbuf_printf(sb, "\nCPL responses:   %10u %10u",
8098 		    stats.rsp[0], stats.rsp[1]);
8099 	}
8100 
8101 	rc = sbuf_finish(sb);
8102 	sbuf_delete(sb);
8103 
8104 	return (rc);
8105 }
8106 
8107 static int
8108 sysctl_ddp_stats(SYSCTL_HANDLER_ARGS)
8109 {
8110 	struct adapter *sc = arg1;
8111 	struct sbuf *sb;
8112 	int rc;
8113 	struct tp_usm_stats stats;
8114 
8115 	rc = sysctl_wire_old_buffer(req, 0);
8116 	if (rc != 0)
8117 		return(rc);
8118 
8119 	sb = sbuf_new_for_sysctl(NULL, NULL, 256, req);
8120 	if (sb == NULL)
8121 		return (ENOMEM);
8122 
8123 	t4_get_usm_stats(sc, &stats, 1);
8124 
8125 	sbuf_printf(sb, "Frames: %u\n", stats.frames);
8126 	sbuf_printf(sb, "Octets: %ju\n", stats.octets);
8127 	sbuf_printf(sb, "Drops:  %u", stats.drops);
8128 
8129 	rc = sbuf_finish(sb);
8130 	sbuf_delete(sb);
8131 
8132 	return (rc);
8133 }
8134 
8135 static const char * const devlog_level_strings[] = {
8136 	[FW_DEVLOG_LEVEL_EMERG]		= "EMERG",
8137 	[FW_DEVLOG_LEVEL_CRIT]		= "CRIT",
8138 	[FW_DEVLOG_LEVEL_ERR]		= "ERR",
8139 	[FW_DEVLOG_LEVEL_NOTICE]	= "NOTICE",
8140 	[FW_DEVLOG_LEVEL_INFO]		= "INFO",
8141 	[FW_DEVLOG_LEVEL_DEBUG]		= "DEBUG"
8142 };
8143 
8144 static const char * const devlog_facility_strings[] = {
8145 	[FW_DEVLOG_FACILITY_CORE]	= "CORE",
8146 	[FW_DEVLOG_FACILITY_CF]		= "CF",
8147 	[FW_DEVLOG_FACILITY_SCHED]	= "SCHED",
8148 	[FW_DEVLOG_FACILITY_TIMER]	= "TIMER",
8149 	[FW_DEVLOG_FACILITY_RES]	= "RES",
8150 	[FW_DEVLOG_FACILITY_HW]		= "HW",
8151 	[FW_DEVLOG_FACILITY_FLR]	= "FLR",
8152 	[FW_DEVLOG_FACILITY_DMAQ]	= "DMAQ",
8153 	[FW_DEVLOG_FACILITY_PHY]	= "PHY",
8154 	[FW_DEVLOG_FACILITY_MAC]	= "MAC",
8155 	[FW_DEVLOG_FACILITY_PORT]	= "PORT",
8156 	[FW_DEVLOG_FACILITY_VI]		= "VI",
8157 	[FW_DEVLOG_FACILITY_FILTER]	= "FILTER",
8158 	[FW_DEVLOG_FACILITY_ACL]	= "ACL",
8159 	[FW_DEVLOG_FACILITY_TM]		= "TM",
8160 	[FW_DEVLOG_FACILITY_QFC]	= "QFC",
8161 	[FW_DEVLOG_FACILITY_DCB]	= "DCB",
8162 	[FW_DEVLOG_FACILITY_ETH]	= "ETH",
8163 	[FW_DEVLOG_FACILITY_OFLD]	= "OFLD",
8164 	[FW_DEVLOG_FACILITY_RI]		= "RI",
8165 	[FW_DEVLOG_FACILITY_ISCSI]	= "ISCSI",
8166 	[FW_DEVLOG_FACILITY_FCOE]	= "FCOE",
8167 	[FW_DEVLOG_FACILITY_FOISCSI]	= "FOISCSI",
8168 	[FW_DEVLOG_FACILITY_FOFCOE]	= "FOFCOE",
8169 	[FW_DEVLOG_FACILITY_CHNET]	= "CHNET",
8170 };
8171 
8172 static int
8173 sbuf_devlog(struct adapter *sc, struct sbuf *sb, int flags)
8174 {
8175 	int i, j, rc, nentries, first = 0;
8176 	struct devlog_params *dparams = &sc->params.devlog;
8177 	struct fw_devlog_e *buf, *e;
8178 	uint64_t ftstamp = UINT64_MAX;
8179 
8180 	if (dparams->addr == 0)
8181 		return (ENXIO);
8182 
8183 	MPASS(flags == M_WAITOK || flags == M_NOWAIT);
8184 	buf = malloc(dparams->size, M_CXGBE, M_ZERO | flags);
8185 	if (buf == NULL)
8186 		return (ENOMEM);
8187 
8188 	rc = read_via_memwin(sc, 1, dparams->addr, (void *)buf, dparams->size);
8189 	if (rc != 0)
8190 		goto done;
8191 
8192 	nentries = dparams->size / sizeof(struct fw_devlog_e);
8193 	for (i = 0; i < nentries; i++) {
8194 		e = &buf[i];
8195 
8196 		if (e->timestamp == 0)
8197 			break;	/* end */
8198 
8199 		e->timestamp = be64toh(e->timestamp);
8200 		e->seqno = be32toh(e->seqno);
8201 		for (j = 0; j < 8; j++)
8202 			e->params[j] = be32toh(e->params[j]);
8203 
8204 		if (e->timestamp < ftstamp) {
8205 			ftstamp = e->timestamp;
8206 			first = i;
8207 		}
8208 	}
8209 
8210 	if (buf[first].timestamp == 0)
8211 		goto done;	/* nothing in the log */
8212 
8213 	sbuf_printf(sb, "%10s  %15s  %8s  %8s  %s\n",
8214 	    "Seq#", "Tstamp", "Level", "Facility", "Message");
8215 
8216 	i = first;
8217 	do {
8218 		e = &buf[i];
8219 		if (e->timestamp == 0)
8220 			break;	/* end */
8221 
8222 		sbuf_printf(sb, "%10d  %15ju  %8s  %8s  ",
8223 		    e->seqno, e->timestamp,
8224 		    (e->level < nitems(devlog_level_strings) ?
8225 			devlog_level_strings[e->level] : "UNKNOWN"),
8226 		    (e->facility < nitems(devlog_facility_strings) ?
8227 			devlog_facility_strings[e->facility] : "UNKNOWN"));
8228 		sbuf_printf(sb, e->fmt, e->params[0], e->params[1],
8229 		    e->params[2], e->params[3], e->params[4],
8230 		    e->params[5], e->params[6], e->params[7]);
8231 
8232 		if (++i == nentries)
8233 			i = 0;
8234 	} while (i != first);
8235 done:
8236 	free(buf, M_CXGBE);
8237 	return (rc);
8238 }
8239 
8240 static int
8241 sysctl_devlog(SYSCTL_HANDLER_ARGS)
8242 {
8243 	struct adapter *sc = arg1;
8244 	int rc;
8245 	struct sbuf *sb;
8246 
8247 	rc = sysctl_wire_old_buffer(req, 0);
8248 	if (rc != 0)
8249 		return (rc);
8250 	sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req);
8251 	if (sb == NULL)
8252 		return (ENOMEM);
8253 
8254 	rc = sbuf_devlog(sc, sb, M_WAITOK);
8255 	if (rc == 0)
8256 		rc = sbuf_finish(sb);
8257 	sbuf_delete(sb);
8258 	return (rc);
8259 }
8260 
8261 void
8262 t4_os_dump_devlog(struct adapter *sc)
8263 {
8264 	int rc;
8265 	struct sbuf sb;
8266 
8267 	if (sbuf_new(&sb, NULL, 4096, SBUF_AUTOEXTEND) != &sb)
8268 		return;
8269 	rc = sbuf_devlog(sc, &sb, M_NOWAIT);
8270 	if (rc == 0) {
8271 		rc = sbuf_finish(&sb);
8272 		if (rc == 0) {
8273 			log(LOG_DEBUG, "%s: device log follows.\n%s",
8274 		    		device_get_nameunit(sc->dev), sbuf_data(&sb));
8275 		}
8276 	}
8277 	sbuf_delete(&sb);
8278 }
8279 
8280 static int
8281 sysctl_fcoe_stats(SYSCTL_HANDLER_ARGS)
8282 {
8283 	struct adapter *sc = arg1;
8284 	struct sbuf *sb;
8285 	int rc;
8286 	struct tp_fcoe_stats stats[MAX_NCHAN];
8287 	int i, nchan = sc->chip_params->nchan;
8288 
8289 	rc = sysctl_wire_old_buffer(req, 0);
8290 	if (rc != 0)
8291 		return (rc);
8292 
8293 	sb = sbuf_new_for_sysctl(NULL, NULL, 256, req);
8294 	if (sb == NULL)
8295 		return (ENOMEM);
8296 
8297 	for (i = 0; i < nchan; i++)
8298 		t4_get_fcoe_stats(sc, i, &stats[i], 1);
8299 
8300 	if (nchan > 2) {
8301 		sbuf_printf(sb, "                   channel 0        channel 1"
8302 		    "        channel 2        channel 3");
8303 		sbuf_printf(sb, "\noctetsDDP:  %16ju %16ju %16ju %16ju",
8304 		    stats[0].octets_ddp, stats[1].octets_ddp,
8305 		    stats[2].octets_ddp, stats[3].octets_ddp);
8306 		sbuf_printf(sb, "\nframesDDP:  %16u %16u %16u %16u",
8307 		    stats[0].frames_ddp, stats[1].frames_ddp,
8308 		    stats[2].frames_ddp, stats[3].frames_ddp);
8309 		sbuf_printf(sb, "\nframesDrop: %16u %16u %16u %16u",
8310 		    stats[0].frames_drop, stats[1].frames_drop,
8311 		    stats[2].frames_drop, stats[3].frames_drop);
8312 	} else {
8313 		sbuf_printf(sb, "                   channel 0        channel 1");
8314 		sbuf_printf(sb, "\noctetsDDP:  %16ju %16ju",
8315 		    stats[0].octets_ddp, stats[1].octets_ddp);
8316 		sbuf_printf(sb, "\nframesDDP:  %16u %16u",
8317 		    stats[0].frames_ddp, stats[1].frames_ddp);
8318 		sbuf_printf(sb, "\nframesDrop: %16u %16u",
8319 		    stats[0].frames_drop, stats[1].frames_drop);
8320 	}
8321 
8322 	rc = sbuf_finish(sb);
8323 	sbuf_delete(sb);
8324 
8325 	return (rc);
8326 }
8327 
8328 static int
8329 sysctl_hw_sched(SYSCTL_HANDLER_ARGS)
8330 {
8331 	struct adapter *sc = arg1;
8332 	struct sbuf *sb;
8333 	int rc, i;
8334 	unsigned int map, kbps, ipg, mode;
8335 	unsigned int pace_tab[NTX_SCHED];
8336 
8337 	rc = sysctl_wire_old_buffer(req, 0);
8338 	if (rc != 0)
8339 		return (rc);
8340 
8341 	sb = sbuf_new_for_sysctl(NULL, NULL, 256, req);
8342 	if (sb == NULL)
8343 		return (ENOMEM);
8344 
8345 	map = t4_read_reg(sc, A_TP_TX_MOD_QUEUE_REQ_MAP);
8346 	mode = G_TIMERMODE(t4_read_reg(sc, A_TP_MOD_CONFIG));
8347 	t4_read_pace_tbl(sc, pace_tab);
8348 
8349 	sbuf_printf(sb, "Scheduler  Mode   Channel  Rate (Kbps)   "
8350 	    "Class IPG (0.1 ns)   Flow IPG (us)");
8351 
8352 	for (i = 0; i < NTX_SCHED; ++i, map >>= 2) {
8353 		t4_get_tx_sched(sc, i, &kbps, &ipg, 1);
8354 		sbuf_printf(sb, "\n    %u      %-5s     %u     ", i,
8355 		    (mode & (1 << i)) ? "flow" : "class", map & 3);
8356 		if (kbps)
8357 			sbuf_printf(sb, "%9u     ", kbps);
8358 		else
8359 			sbuf_printf(sb, " disabled     ");
8360 
8361 		if (ipg)
8362 			sbuf_printf(sb, "%13u        ", ipg);
8363 		else
8364 			sbuf_printf(sb, "     disabled        ");
8365 
8366 		if (pace_tab[i])
8367 			sbuf_printf(sb, "%10u", pace_tab[i]);
8368 		else
8369 			sbuf_printf(sb, "  disabled");
8370 	}
8371 
8372 	rc = sbuf_finish(sb);
8373 	sbuf_delete(sb);
8374 
8375 	return (rc);
8376 }
8377 
8378 static int
8379 sysctl_lb_stats(SYSCTL_HANDLER_ARGS)
8380 {
8381 	struct adapter *sc = arg1;
8382 	struct sbuf *sb;
8383 	int rc, i, j;
8384 	uint64_t *p0, *p1;
8385 	struct lb_port_stats s[2];
8386 	static const char *stat_name[] = {
8387 		"OctetsOK:", "FramesOK:", "BcastFrames:", "McastFrames:",
8388 		"UcastFrames:", "ErrorFrames:", "Frames64:", "Frames65To127:",
8389 		"Frames128To255:", "Frames256To511:", "Frames512To1023:",
8390 		"Frames1024To1518:", "Frames1519ToMax:", "FramesDropped:",
8391 		"BG0FramesDropped:", "BG1FramesDropped:", "BG2FramesDropped:",
8392 		"BG3FramesDropped:", "BG0FramesTrunc:", "BG1FramesTrunc:",
8393 		"BG2FramesTrunc:", "BG3FramesTrunc:"
8394 	};
8395 
8396 	rc = sysctl_wire_old_buffer(req, 0);
8397 	if (rc != 0)
8398 		return (rc);
8399 
8400 	sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req);
8401 	if (sb == NULL)
8402 		return (ENOMEM);
8403 
8404 	memset(s, 0, sizeof(s));
8405 
8406 	for (i = 0; i < sc->chip_params->nchan; i += 2) {
8407 		t4_get_lb_stats(sc, i, &s[0]);
8408 		t4_get_lb_stats(sc, i + 1, &s[1]);
8409 
8410 		p0 = &s[0].octets;
8411 		p1 = &s[1].octets;
8412 		sbuf_printf(sb, "%s                       Loopback %u"
8413 		    "           Loopback %u", i == 0 ? "" : "\n", i, i + 1);
8414 
8415 		for (j = 0; j < nitems(stat_name); j++)
8416 			sbuf_printf(sb, "\n%-17s %20ju %20ju", stat_name[j],
8417 				   *p0++, *p1++);
8418 	}
8419 
8420 	rc = sbuf_finish(sb);
8421 	sbuf_delete(sb);
8422 
8423 	return (rc);
8424 }
8425 
8426 static int
8427 sysctl_linkdnrc(SYSCTL_HANDLER_ARGS)
8428 {
8429 	int rc = 0;
8430 	struct port_info *pi = arg1;
8431 	struct link_config *lc = &pi->link_cfg;
8432 	struct sbuf *sb;
8433 
8434 	rc = sysctl_wire_old_buffer(req, 0);
8435 	if (rc != 0)
8436 		return(rc);
8437 	sb = sbuf_new_for_sysctl(NULL, NULL, 64, req);
8438 	if (sb == NULL)
8439 		return (ENOMEM);
8440 
8441 	if (lc->link_ok || lc->link_down_rc == 255)
8442 		sbuf_printf(sb, "n/a");
8443 	else
8444 		sbuf_printf(sb, "%s", t4_link_down_rc_str(lc->link_down_rc));
8445 
8446 	rc = sbuf_finish(sb);
8447 	sbuf_delete(sb);
8448 
8449 	return (rc);
8450 }
8451 
8452 struct mem_desc {
8453 	unsigned int base;
8454 	unsigned int limit;
8455 	unsigned int idx;
8456 };
8457 
8458 static int
8459 mem_desc_cmp(const void *a, const void *b)
8460 {
8461 	return ((const struct mem_desc *)a)->base -
8462 	       ((const struct mem_desc *)b)->base;
8463 }
8464 
8465 static void
8466 mem_region_show(struct sbuf *sb, const char *name, unsigned int from,
8467     unsigned int to)
8468 {
8469 	unsigned int size;
8470 
8471 	if (from == to)
8472 		return;
8473 
8474 	size = to - from + 1;
8475 	if (size == 0)
8476 		return;
8477 
8478 	/* XXX: need humanize_number(3) in libkern for a more readable 'size' */
8479 	sbuf_printf(sb, "%-15s %#x-%#x [%u]\n", name, from, to, size);
8480 }
8481 
8482 static int
8483 sysctl_meminfo(SYSCTL_HANDLER_ARGS)
8484 {
8485 	struct adapter *sc = arg1;
8486 	struct sbuf *sb;
8487 	int rc, i, n;
8488 	uint32_t lo, hi, used, alloc;
8489 	static const char *memory[] = {"EDC0:", "EDC1:", "MC:", "MC0:", "MC1:"};
8490 	static const char *region[] = {
8491 		"DBQ contexts:", "IMSG contexts:", "FLM cache:", "TCBs:",
8492 		"Pstructs:", "Timers:", "Rx FL:", "Tx FL:", "Pstruct FL:",
8493 		"Tx payload:", "Rx payload:", "LE hash:", "iSCSI region:",
8494 		"TDDP region:", "TPT region:", "STAG region:", "RQ region:",
8495 		"RQUDP region:", "PBL region:", "TXPBL region:",
8496 		"DBVFIFO region:", "ULPRX state:", "ULPTX state:",
8497 		"On-chip queues:", "TLS keys:",
8498 	};
8499 	struct mem_desc avail[4];
8500 	struct mem_desc mem[nitems(region) + 3];	/* up to 3 holes */
8501 	struct mem_desc *md = mem;
8502 
8503 	rc = sysctl_wire_old_buffer(req, 0);
8504 	if (rc != 0)
8505 		return (rc);
8506 
8507 	sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req);
8508 	if (sb == NULL)
8509 		return (ENOMEM);
8510 
8511 	for (i = 0; i < nitems(mem); i++) {
8512 		mem[i].limit = 0;
8513 		mem[i].idx = i;
8514 	}
8515 
8516 	/* Find and sort the populated memory ranges */
8517 	i = 0;
8518 	lo = t4_read_reg(sc, A_MA_TARGET_MEM_ENABLE);
8519 	if (lo & F_EDRAM0_ENABLE) {
8520 		hi = t4_read_reg(sc, A_MA_EDRAM0_BAR);
8521 		avail[i].base = G_EDRAM0_BASE(hi) << 20;
8522 		avail[i].limit = avail[i].base + (G_EDRAM0_SIZE(hi) << 20);
8523 		avail[i].idx = 0;
8524 		i++;
8525 	}
8526 	if (lo & F_EDRAM1_ENABLE) {
8527 		hi = t4_read_reg(sc, A_MA_EDRAM1_BAR);
8528 		avail[i].base = G_EDRAM1_BASE(hi) << 20;
8529 		avail[i].limit = avail[i].base + (G_EDRAM1_SIZE(hi) << 20);
8530 		avail[i].idx = 1;
8531 		i++;
8532 	}
8533 	if (lo & F_EXT_MEM_ENABLE) {
8534 		hi = t4_read_reg(sc, A_MA_EXT_MEMORY_BAR);
8535 		avail[i].base = G_EXT_MEM_BASE(hi) << 20;
8536 		avail[i].limit = avail[i].base +
8537 		    (G_EXT_MEM_SIZE(hi) << 20);
8538 		avail[i].idx = is_t5(sc) ? 3 : 2;	/* Call it MC0 for T5 */
8539 		i++;
8540 	}
8541 	if (is_t5(sc) && lo & F_EXT_MEM1_ENABLE) {
8542 		hi = t4_read_reg(sc, A_MA_EXT_MEMORY1_BAR);
8543 		avail[i].base = G_EXT_MEM1_BASE(hi) << 20;
8544 		avail[i].limit = avail[i].base +
8545 		    (G_EXT_MEM1_SIZE(hi) << 20);
8546 		avail[i].idx = 4;
8547 		i++;
8548 	}
8549 	if (!i)                                    /* no memory available */
8550 		return 0;
8551 	qsort(avail, i, sizeof(struct mem_desc), mem_desc_cmp);
8552 
8553 	(md++)->base = t4_read_reg(sc, A_SGE_DBQ_CTXT_BADDR);
8554 	(md++)->base = t4_read_reg(sc, A_SGE_IMSG_CTXT_BADDR);
8555 	(md++)->base = t4_read_reg(sc, A_SGE_FLM_CACHE_BADDR);
8556 	(md++)->base = t4_read_reg(sc, A_TP_CMM_TCB_BASE);
8557 	(md++)->base = t4_read_reg(sc, A_TP_CMM_MM_BASE);
8558 	(md++)->base = t4_read_reg(sc, A_TP_CMM_TIMER_BASE);
8559 	(md++)->base = t4_read_reg(sc, A_TP_CMM_MM_RX_FLST_BASE);
8560 	(md++)->base = t4_read_reg(sc, A_TP_CMM_MM_TX_FLST_BASE);
8561 	(md++)->base = t4_read_reg(sc, A_TP_CMM_MM_PS_FLST_BASE);
8562 
8563 	/* the next few have explicit upper bounds */
8564 	md->base = t4_read_reg(sc, A_TP_PMM_TX_BASE);
8565 	md->limit = md->base - 1 +
8566 		    t4_read_reg(sc, A_TP_PMM_TX_PAGE_SIZE) *
8567 		    G_PMTXMAXPAGE(t4_read_reg(sc, A_TP_PMM_TX_MAX_PAGE));
8568 	md++;
8569 
8570 	md->base = t4_read_reg(sc, A_TP_PMM_RX_BASE);
8571 	md->limit = md->base - 1 +
8572 		    t4_read_reg(sc, A_TP_PMM_RX_PAGE_SIZE) *
8573 		    G_PMRXMAXPAGE(t4_read_reg(sc, A_TP_PMM_RX_MAX_PAGE));
8574 	md++;
8575 
8576 	if (t4_read_reg(sc, A_LE_DB_CONFIG) & F_HASHEN) {
8577 		if (chip_id(sc) <= CHELSIO_T5)
8578 			md->base = t4_read_reg(sc, A_LE_DB_HASH_TID_BASE);
8579 		else
8580 			md->base = t4_read_reg(sc, A_LE_DB_HASH_TBL_BASE_ADDR);
8581 		md->limit = 0;
8582 	} else {
8583 		md->base = 0;
8584 		md->idx = nitems(region);  /* hide it */
8585 	}
8586 	md++;
8587 
8588 #define ulp_region(reg) \
8589 	md->base = t4_read_reg(sc, A_ULP_ ## reg ## _LLIMIT);\
8590 	(md++)->limit = t4_read_reg(sc, A_ULP_ ## reg ## _ULIMIT)
8591 
8592 	ulp_region(RX_ISCSI);
8593 	ulp_region(RX_TDDP);
8594 	ulp_region(TX_TPT);
8595 	ulp_region(RX_STAG);
8596 	ulp_region(RX_RQ);
8597 	ulp_region(RX_RQUDP);
8598 	ulp_region(RX_PBL);
8599 	ulp_region(TX_PBL);
8600 #undef ulp_region
8601 
8602 	md->base = 0;
8603 	md->idx = nitems(region);
8604 	if (!is_t4(sc)) {
8605 		uint32_t size = 0;
8606 		uint32_t sge_ctrl = t4_read_reg(sc, A_SGE_CONTROL2);
8607 		uint32_t fifo_size = t4_read_reg(sc, A_SGE_DBVFIFO_SIZE);
8608 
8609 		if (is_t5(sc)) {
8610 			if (sge_ctrl & F_VFIFO_ENABLE)
8611 				size = G_DBVFIFO_SIZE(fifo_size);
8612 		} else
8613 			size = G_T6_DBVFIFO_SIZE(fifo_size);
8614 
8615 		if (size) {
8616 			md->base = G_BASEADDR(t4_read_reg(sc,
8617 			    A_SGE_DBVFIFO_BADDR));
8618 			md->limit = md->base + (size << 2) - 1;
8619 		}
8620 	}
8621 	md++;
8622 
8623 	md->base = t4_read_reg(sc, A_ULP_RX_CTX_BASE);
8624 	md->limit = 0;
8625 	md++;
8626 	md->base = t4_read_reg(sc, A_ULP_TX_ERR_TABLE_BASE);
8627 	md->limit = 0;
8628 	md++;
8629 
8630 	md->base = sc->vres.ocq.start;
8631 	if (sc->vres.ocq.size)
8632 		md->limit = md->base + sc->vres.ocq.size - 1;
8633 	else
8634 		md->idx = nitems(region);  /* hide it */
8635 	md++;
8636 
8637 	md->base = sc->vres.key.start;
8638 	if (sc->vres.key.size)
8639 		md->limit = md->base + sc->vres.key.size - 1;
8640 	else
8641 		md->idx = nitems(region);  /* hide it */
8642 	md++;
8643 
8644 	/* add any address-space holes, there can be up to 3 */
8645 	for (n = 0; n < i - 1; n++)
8646 		if (avail[n].limit < avail[n + 1].base)
8647 			(md++)->base = avail[n].limit;
8648 	if (avail[n].limit)
8649 		(md++)->base = avail[n].limit;
8650 
8651 	n = md - mem;
8652 	qsort(mem, n, sizeof(struct mem_desc), mem_desc_cmp);
8653 
8654 	for (lo = 0; lo < i; lo++)
8655 		mem_region_show(sb, memory[avail[lo].idx], avail[lo].base,
8656 				avail[lo].limit - 1);
8657 
8658 	sbuf_printf(sb, "\n");
8659 	for (i = 0; i < n; i++) {
8660 		if (mem[i].idx >= nitems(region))
8661 			continue;                        /* skip holes */
8662 		if (!mem[i].limit)
8663 			mem[i].limit = i < n - 1 ? mem[i + 1].base - 1 : ~0;
8664 		mem_region_show(sb, region[mem[i].idx], mem[i].base,
8665 				mem[i].limit);
8666 	}
8667 
8668 	sbuf_printf(sb, "\n");
8669 	lo = t4_read_reg(sc, A_CIM_SDRAM_BASE_ADDR);
8670 	hi = t4_read_reg(sc, A_CIM_SDRAM_ADDR_SIZE) + lo - 1;
8671 	mem_region_show(sb, "uP RAM:", lo, hi);
8672 
8673 	lo = t4_read_reg(sc, A_CIM_EXTMEM2_BASE_ADDR);
8674 	hi = t4_read_reg(sc, A_CIM_EXTMEM2_ADDR_SIZE) + lo - 1;
8675 	mem_region_show(sb, "uP Extmem2:", lo, hi);
8676 
8677 	lo = t4_read_reg(sc, A_TP_PMM_RX_MAX_PAGE);
8678 	sbuf_printf(sb, "\n%u Rx pages of size %uKiB for %u channels\n",
8679 		   G_PMRXMAXPAGE(lo),
8680 		   t4_read_reg(sc, A_TP_PMM_RX_PAGE_SIZE) >> 10,
8681 		   (lo & F_PMRXNUMCHN) ? 2 : 1);
8682 
8683 	lo = t4_read_reg(sc, A_TP_PMM_TX_MAX_PAGE);
8684 	hi = t4_read_reg(sc, A_TP_PMM_TX_PAGE_SIZE);
8685 	sbuf_printf(sb, "%u Tx pages of size %u%ciB for %u channels\n",
8686 		   G_PMTXMAXPAGE(lo),
8687 		   hi >= (1 << 20) ? (hi >> 20) : (hi >> 10),
8688 		   hi >= (1 << 20) ? 'M' : 'K', 1 << G_PMTXNUMCHN(lo));
8689 	sbuf_printf(sb, "%u p-structs\n",
8690 		   t4_read_reg(sc, A_TP_CMM_MM_MAX_PSTRUCT));
8691 
8692 	for (i = 0; i < 4; i++) {
8693 		if (chip_id(sc) > CHELSIO_T5)
8694 			lo = t4_read_reg(sc, A_MPS_RX_MAC_BG_PG_CNT0 + i * 4);
8695 		else
8696 			lo = t4_read_reg(sc, A_MPS_RX_PG_RSV0 + i * 4);
8697 		if (is_t5(sc)) {
8698 			used = G_T5_USED(lo);
8699 			alloc = G_T5_ALLOC(lo);
8700 		} else {
8701 			used = G_USED(lo);
8702 			alloc = G_ALLOC(lo);
8703 		}
8704 		/* For T6 these are MAC buffer groups */
8705 		sbuf_printf(sb, "\nPort %d using %u pages out of %u allocated",
8706 		    i, used, alloc);
8707 	}
8708 	for (i = 0; i < sc->chip_params->nchan; i++) {
8709 		if (chip_id(sc) > CHELSIO_T5)
8710 			lo = t4_read_reg(sc, A_MPS_RX_LPBK_BG_PG_CNT0 + i * 4);
8711 		else
8712 			lo = t4_read_reg(sc, A_MPS_RX_PG_RSV4 + i * 4);
8713 		if (is_t5(sc)) {
8714 			used = G_T5_USED(lo);
8715 			alloc = G_T5_ALLOC(lo);
8716 		} else {
8717 			used = G_USED(lo);
8718 			alloc = G_ALLOC(lo);
8719 		}
8720 		/* For T6 these are MAC buffer groups */
8721 		sbuf_printf(sb,
8722 		    "\nLoopback %d using %u pages out of %u allocated",
8723 		    i, used, alloc);
8724 	}
8725 
8726 	rc = sbuf_finish(sb);
8727 	sbuf_delete(sb);
8728 
8729 	return (rc);
8730 }
8731 
8732 static inline void
8733 tcamxy2valmask(uint64_t x, uint64_t y, uint8_t *addr, uint64_t *mask)
8734 {
8735 	*mask = x | y;
8736 	y = htobe64(y);
8737 	memcpy(addr, (char *)&y + 2, ETHER_ADDR_LEN);
8738 }
8739 
8740 static int
8741 sysctl_mps_tcam(SYSCTL_HANDLER_ARGS)
8742 {
8743 	struct adapter *sc = arg1;
8744 	struct sbuf *sb;
8745 	int rc, i;
8746 
8747 	MPASS(chip_id(sc) <= CHELSIO_T5);
8748 
8749 	rc = sysctl_wire_old_buffer(req, 0);
8750 	if (rc != 0)
8751 		return (rc);
8752 
8753 	sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req);
8754 	if (sb == NULL)
8755 		return (ENOMEM);
8756 
8757 	sbuf_printf(sb,
8758 	    "Idx  Ethernet address     Mask     Vld Ports PF"
8759 	    "  VF              Replication             P0 P1 P2 P3  ML");
8760 	for (i = 0; i < sc->chip_params->mps_tcam_size; i++) {
8761 		uint64_t tcamx, tcamy, mask;
8762 		uint32_t cls_lo, cls_hi;
8763 		uint8_t addr[ETHER_ADDR_LEN];
8764 
8765 		tcamy = t4_read_reg64(sc, MPS_CLS_TCAM_Y_L(i));
8766 		tcamx = t4_read_reg64(sc, MPS_CLS_TCAM_X_L(i));
8767 		if (tcamx & tcamy)
8768 			continue;
8769 		tcamxy2valmask(tcamx, tcamy, addr, &mask);
8770 		cls_lo = t4_read_reg(sc, MPS_CLS_SRAM_L(i));
8771 		cls_hi = t4_read_reg(sc, MPS_CLS_SRAM_H(i));
8772 		sbuf_printf(sb, "\n%3u %02x:%02x:%02x:%02x:%02x:%02x %012jx"
8773 			   "  %c   %#x%4u%4d", i, addr[0], addr[1], addr[2],
8774 			   addr[3], addr[4], addr[5], (uintmax_t)mask,
8775 			   (cls_lo & F_SRAM_VLD) ? 'Y' : 'N',
8776 			   G_PORTMAP(cls_hi), G_PF(cls_lo),
8777 			   (cls_lo & F_VF_VALID) ? G_VF(cls_lo) : -1);
8778 
8779 		if (cls_lo & F_REPLICATE) {
8780 			struct fw_ldst_cmd ldst_cmd;
8781 
8782 			memset(&ldst_cmd, 0, sizeof(ldst_cmd));
8783 			ldst_cmd.op_to_addrspace =
8784 			    htobe32(V_FW_CMD_OP(FW_LDST_CMD) |
8785 				F_FW_CMD_REQUEST | F_FW_CMD_READ |
8786 				V_FW_LDST_CMD_ADDRSPACE(FW_LDST_ADDRSPC_MPS));
8787 			ldst_cmd.cycles_to_len16 = htobe32(FW_LEN16(ldst_cmd));
8788 			ldst_cmd.u.mps.rplc.fid_idx =
8789 			    htobe16(V_FW_LDST_CMD_FID(FW_LDST_MPS_RPLC) |
8790 				V_FW_LDST_CMD_IDX(i));
8791 
8792 			rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK,
8793 			    "t4mps");
8794 			if (rc)
8795 				break;
8796 			rc = -t4_wr_mbox(sc, sc->mbox, &ldst_cmd,
8797 			    sizeof(ldst_cmd), &ldst_cmd);
8798 			end_synchronized_op(sc, 0);
8799 
8800 			if (rc != 0) {
8801 				sbuf_printf(sb, "%36d", rc);
8802 				rc = 0;
8803 			} else {
8804 				sbuf_printf(sb, " %08x %08x %08x %08x",
8805 				    be32toh(ldst_cmd.u.mps.rplc.rplc127_96),
8806 				    be32toh(ldst_cmd.u.mps.rplc.rplc95_64),
8807 				    be32toh(ldst_cmd.u.mps.rplc.rplc63_32),
8808 				    be32toh(ldst_cmd.u.mps.rplc.rplc31_0));
8809 			}
8810 		} else
8811 			sbuf_printf(sb, "%36s", "");
8812 
8813 		sbuf_printf(sb, "%4u%3u%3u%3u %#3x", G_SRAM_PRIO0(cls_lo),
8814 		    G_SRAM_PRIO1(cls_lo), G_SRAM_PRIO2(cls_lo),
8815 		    G_SRAM_PRIO3(cls_lo), (cls_lo >> S_MULTILISTEN0) & 0xf);
8816 	}
8817 
8818 	if (rc)
8819 		(void) sbuf_finish(sb);
8820 	else
8821 		rc = sbuf_finish(sb);
8822 	sbuf_delete(sb);
8823 
8824 	return (rc);
8825 }
8826 
8827 static int
8828 sysctl_mps_tcam_t6(SYSCTL_HANDLER_ARGS)
8829 {
8830 	struct adapter *sc = arg1;
8831 	struct sbuf *sb;
8832 	int rc, i;
8833 
8834 	MPASS(chip_id(sc) > CHELSIO_T5);
8835 
8836 	rc = sysctl_wire_old_buffer(req, 0);
8837 	if (rc != 0)
8838 		return (rc);
8839 
8840 	sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req);
8841 	if (sb == NULL)
8842 		return (ENOMEM);
8843 
8844 	sbuf_printf(sb, "Idx  Ethernet address     Mask       VNI   Mask"
8845 	    "   IVLAN Vld DIP_Hit   Lookup  Port Vld Ports PF  VF"
8846 	    "                           Replication"
8847 	    "                                    P0 P1 P2 P3  ML\n");
8848 
8849 	for (i = 0; i < sc->chip_params->mps_tcam_size; i++) {
8850 		uint8_t dip_hit, vlan_vld, lookup_type, port_num;
8851 		uint16_t ivlan;
8852 		uint64_t tcamx, tcamy, val, mask;
8853 		uint32_t cls_lo, cls_hi, ctl, data2, vnix, vniy;
8854 		uint8_t addr[ETHER_ADDR_LEN];
8855 
8856 		ctl = V_CTLREQID(1) | V_CTLCMDTYPE(0) | V_CTLXYBITSEL(0);
8857 		if (i < 256)
8858 			ctl |= V_CTLTCAMINDEX(i) | V_CTLTCAMSEL(0);
8859 		else
8860 			ctl |= V_CTLTCAMINDEX(i - 256) | V_CTLTCAMSEL(1);
8861 		t4_write_reg(sc, A_MPS_CLS_TCAM_DATA2_CTL, ctl);
8862 		val = t4_read_reg(sc, A_MPS_CLS_TCAM_RDATA1_REQ_ID1);
8863 		tcamy = G_DMACH(val) << 32;
8864 		tcamy |= t4_read_reg(sc, A_MPS_CLS_TCAM_RDATA0_REQ_ID1);
8865 		data2 = t4_read_reg(sc, A_MPS_CLS_TCAM_RDATA2_REQ_ID1);
8866 		lookup_type = G_DATALKPTYPE(data2);
8867 		port_num = G_DATAPORTNUM(data2);
8868 		if (lookup_type && lookup_type != M_DATALKPTYPE) {
8869 			/* Inner header VNI */
8870 			vniy = ((data2 & F_DATAVIDH2) << 23) |
8871 				       (G_DATAVIDH1(data2) << 16) | G_VIDL(val);
8872 			dip_hit = data2 & F_DATADIPHIT;
8873 			vlan_vld = 0;
8874 		} else {
8875 			vniy = 0;
8876 			dip_hit = 0;
8877 			vlan_vld = data2 & F_DATAVIDH2;
8878 			ivlan = G_VIDL(val);
8879 		}
8880 
8881 		ctl |= V_CTLXYBITSEL(1);
8882 		t4_write_reg(sc, A_MPS_CLS_TCAM_DATA2_CTL, ctl);
8883 		val = t4_read_reg(sc, A_MPS_CLS_TCAM_RDATA1_REQ_ID1);
8884 		tcamx = G_DMACH(val) << 32;
8885 		tcamx |= t4_read_reg(sc, A_MPS_CLS_TCAM_RDATA0_REQ_ID1);
8886 		data2 = t4_read_reg(sc, A_MPS_CLS_TCAM_RDATA2_REQ_ID1);
8887 		if (lookup_type && lookup_type != M_DATALKPTYPE) {
8888 			/* Inner header VNI mask */
8889 			vnix = ((data2 & F_DATAVIDH2) << 23) |
8890 			       (G_DATAVIDH1(data2) << 16) | G_VIDL(val);
8891 		} else
8892 			vnix = 0;
8893 
8894 		if (tcamx & tcamy)
8895 			continue;
8896 		tcamxy2valmask(tcamx, tcamy, addr, &mask);
8897 
8898 		cls_lo = t4_read_reg(sc, MPS_CLS_SRAM_L(i));
8899 		cls_hi = t4_read_reg(sc, MPS_CLS_SRAM_H(i));
8900 
8901 		if (lookup_type && lookup_type != M_DATALKPTYPE) {
8902 			sbuf_printf(sb, "\n%3u %02x:%02x:%02x:%02x:%02x:%02x "
8903 			    "%012jx %06x %06x    -    -   %3c"
8904 			    "      'I'  %4x   %3c   %#x%4u%4d", i, addr[0],
8905 			    addr[1], addr[2], addr[3], addr[4], addr[5],
8906 			    (uintmax_t)mask, vniy, vnix, dip_hit ? 'Y' : 'N',
8907 			    port_num, cls_lo & F_T6_SRAM_VLD ? 'Y' : 'N',
8908 			    G_PORTMAP(cls_hi), G_T6_PF(cls_lo),
8909 			    cls_lo & F_T6_VF_VALID ? G_T6_VF(cls_lo) : -1);
8910 		} else {
8911 			sbuf_printf(sb, "\n%3u %02x:%02x:%02x:%02x:%02x:%02x "
8912 			    "%012jx    -       -   ", i, addr[0], addr[1],
8913 			    addr[2], addr[3], addr[4], addr[5],
8914 			    (uintmax_t)mask);
8915 
8916 			if (vlan_vld)
8917 				sbuf_printf(sb, "%4u   Y     ", ivlan);
8918 			else
8919 				sbuf_printf(sb, "  -    N     ");
8920 
8921 			sbuf_printf(sb, "-      %3c  %4x   %3c   %#x%4u%4d",
8922 			    lookup_type ? 'I' : 'O', port_num,
8923 			    cls_lo & F_T6_SRAM_VLD ? 'Y' : 'N',
8924 			    G_PORTMAP(cls_hi), G_T6_PF(cls_lo),
8925 			    cls_lo & F_T6_VF_VALID ? G_T6_VF(cls_lo) : -1);
8926 		}
8927 
8928 
8929 		if (cls_lo & F_T6_REPLICATE) {
8930 			struct fw_ldst_cmd ldst_cmd;
8931 
8932 			memset(&ldst_cmd, 0, sizeof(ldst_cmd));
8933 			ldst_cmd.op_to_addrspace =
8934 			    htobe32(V_FW_CMD_OP(FW_LDST_CMD) |
8935 				F_FW_CMD_REQUEST | F_FW_CMD_READ |
8936 				V_FW_LDST_CMD_ADDRSPACE(FW_LDST_ADDRSPC_MPS));
8937 			ldst_cmd.cycles_to_len16 = htobe32(FW_LEN16(ldst_cmd));
8938 			ldst_cmd.u.mps.rplc.fid_idx =
8939 			    htobe16(V_FW_LDST_CMD_FID(FW_LDST_MPS_RPLC) |
8940 				V_FW_LDST_CMD_IDX(i));
8941 
8942 			rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK,
8943 			    "t6mps");
8944 			if (rc)
8945 				break;
8946 			rc = -t4_wr_mbox(sc, sc->mbox, &ldst_cmd,
8947 			    sizeof(ldst_cmd), &ldst_cmd);
8948 			end_synchronized_op(sc, 0);
8949 
8950 			if (rc != 0) {
8951 				sbuf_printf(sb, "%72d", rc);
8952 				rc = 0;
8953 			} else {
8954 				sbuf_printf(sb, " %08x %08x %08x %08x"
8955 				    " %08x %08x %08x %08x",
8956 				    be32toh(ldst_cmd.u.mps.rplc.rplc255_224),
8957 				    be32toh(ldst_cmd.u.mps.rplc.rplc223_192),
8958 				    be32toh(ldst_cmd.u.mps.rplc.rplc191_160),
8959 				    be32toh(ldst_cmd.u.mps.rplc.rplc159_128),
8960 				    be32toh(ldst_cmd.u.mps.rplc.rplc127_96),
8961 				    be32toh(ldst_cmd.u.mps.rplc.rplc95_64),
8962 				    be32toh(ldst_cmd.u.mps.rplc.rplc63_32),
8963 				    be32toh(ldst_cmd.u.mps.rplc.rplc31_0));
8964 			}
8965 		} else
8966 			sbuf_printf(sb, "%72s", "");
8967 
8968 		sbuf_printf(sb, "%4u%3u%3u%3u %#x",
8969 		    G_T6_SRAM_PRIO0(cls_lo), G_T6_SRAM_PRIO1(cls_lo),
8970 		    G_T6_SRAM_PRIO2(cls_lo), G_T6_SRAM_PRIO3(cls_lo),
8971 		    (cls_lo >> S_T6_MULTILISTEN0) & 0xf);
8972 	}
8973 
8974 	if (rc)
8975 		(void) sbuf_finish(sb);
8976 	else
8977 		rc = sbuf_finish(sb);
8978 	sbuf_delete(sb);
8979 
8980 	return (rc);
8981 }
8982 
8983 static int
8984 sysctl_path_mtus(SYSCTL_HANDLER_ARGS)
8985 {
8986 	struct adapter *sc = arg1;
8987 	struct sbuf *sb;
8988 	int rc;
8989 	uint16_t mtus[NMTUS];
8990 
8991 	rc = sysctl_wire_old_buffer(req, 0);
8992 	if (rc != 0)
8993 		return (rc);
8994 
8995 	sb = sbuf_new_for_sysctl(NULL, NULL, 256, req);
8996 	if (sb == NULL)
8997 		return (ENOMEM);
8998 
8999 	t4_read_mtu_tbl(sc, mtus, NULL);
9000 
9001 	sbuf_printf(sb, "%u %u %u %u %u %u %u %u %u %u %u %u %u %u %u %u",
9002 	    mtus[0], mtus[1], mtus[2], mtus[3], mtus[4], mtus[5], mtus[6],
9003 	    mtus[7], mtus[8], mtus[9], mtus[10], mtus[11], mtus[12], mtus[13],
9004 	    mtus[14], mtus[15]);
9005 
9006 	rc = sbuf_finish(sb);
9007 	sbuf_delete(sb);
9008 
9009 	return (rc);
9010 }
9011 
9012 static int
9013 sysctl_pm_stats(SYSCTL_HANDLER_ARGS)
9014 {
9015 	struct adapter *sc = arg1;
9016 	struct sbuf *sb;
9017 	int rc, i;
9018 	uint32_t tx_cnt[MAX_PM_NSTATS], rx_cnt[MAX_PM_NSTATS];
9019 	uint64_t tx_cyc[MAX_PM_NSTATS], rx_cyc[MAX_PM_NSTATS];
9020 	static const char *tx_stats[MAX_PM_NSTATS] = {
9021 		"Read:", "Write bypass:", "Write mem:", "Bypass + mem:",
9022 		"Tx FIFO wait", NULL, "Tx latency"
9023 	};
9024 	static const char *rx_stats[MAX_PM_NSTATS] = {
9025 		"Read:", "Write bypass:", "Write mem:", "Flush:",
9026 		"Rx FIFO wait", NULL, "Rx latency"
9027 	};
9028 
9029 	rc = sysctl_wire_old_buffer(req, 0);
9030 	if (rc != 0)
9031 		return (rc);
9032 
9033 	sb = sbuf_new_for_sysctl(NULL, NULL, 256, req);
9034 	if (sb == NULL)
9035 		return (ENOMEM);
9036 
9037 	t4_pmtx_get_stats(sc, tx_cnt, tx_cyc);
9038 	t4_pmrx_get_stats(sc, rx_cnt, rx_cyc);
9039 
9040 	sbuf_printf(sb, "                Tx pcmds             Tx bytes");
9041 	for (i = 0; i < 4; i++) {
9042 		sbuf_printf(sb, "\n%-13s %10u %20ju", tx_stats[i], tx_cnt[i],
9043 		    tx_cyc[i]);
9044 	}
9045 
9046 	sbuf_printf(sb, "\n                Rx pcmds             Rx bytes");
9047 	for (i = 0; i < 4; i++) {
9048 		sbuf_printf(sb, "\n%-13s %10u %20ju", rx_stats[i], rx_cnt[i],
9049 		    rx_cyc[i]);
9050 	}
9051 
9052 	if (chip_id(sc) > CHELSIO_T5) {
9053 		sbuf_printf(sb,
9054 		    "\n              Total wait      Total occupancy");
9055 		sbuf_printf(sb, "\n%-13s %10u %20ju", tx_stats[i], tx_cnt[i],
9056 		    tx_cyc[i]);
9057 		sbuf_printf(sb, "\n%-13s %10u %20ju", rx_stats[i], rx_cnt[i],
9058 		    rx_cyc[i]);
9059 
9060 		i += 2;
9061 		MPASS(i < nitems(tx_stats));
9062 
9063 		sbuf_printf(sb,
9064 		    "\n                   Reads           Total wait");
9065 		sbuf_printf(sb, "\n%-13s %10u %20ju", tx_stats[i], tx_cnt[i],
9066 		    tx_cyc[i]);
9067 		sbuf_printf(sb, "\n%-13s %10u %20ju", rx_stats[i], rx_cnt[i],
9068 		    rx_cyc[i]);
9069 	}
9070 
9071 	rc = sbuf_finish(sb);
9072 	sbuf_delete(sb);
9073 
9074 	return (rc);
9075 }
9076 
9077 static int
9078 sysctl_rdma_stats(SYSCTL_HANDLER_ARGS)
9079 {
9080 	struct adapter *sc = arg1;
9081 	struct sbuf *sb;
9082 	int rc;
9083 	struct tp_rdma_stats stats;
9084 
9085 	rc = sysctl_wire_old_buffer(req, 0);
9086 	if (rc != 0)
9087 		return (rc);
9088 
9089 	sb = sbuf_new_for_sysctl(NULL, NULL, 256, req);
9090 	if (sb == NULL)
9091 		return (ENOMEM);
9092 
9093 	mtx_lock(&sc->reg_lock);
9094 	t4_tp_get_rdma_stats(sc, &stats, 0);
9095 	mtx_unlock(&sc->reg_lock);
9096 
9097 	sbuf_printf(sb, "NoRQEModDefferals: %u\n", stats.rqe_dfr_mod);
9098 	sbuf_printf(sb, "NoRQEPktDefferals: %u", stats.rqe_dfr_pkt);
9099 
9100 	rc = sbuf_finish(sb);
9101 	sbuf_delete(sb);
9102 
9103 	return (rc);
9104 }
9105 
9106 static int
9107 sysctl_tcp_stats(SYSCTL_HANDLER_ARGS)
9108 {
9109 	struct adapter *sc = arg1;
9110 	struct sbuf *sb;
9111 	int rc;
9112 	struct tp_tcp_stats v4, v6;
9113 
9114 	rc = sysctl_wire_old_buffer(req, 0);
9115 	if (rc != 0)
9116 		return (rc);
9117 
9118 	sb = sbuf_new_for_sysctl(NULL, NULL, 256, req);
9119 	if (sb == NULL)
9120 		return (ENOMEM);
9121 
9122 	mtx_lock(&sc->reg_lock);
9123 	t4_tp_get_tcp_stats(sc, &v4, &v6, 0);
9124 	mtx_unlock(&sc->reg_lock);
9125 
9126 	sbuf_printf(sb,
9127 	    "                                IP                 IPv6\n");
9128 	sbuf_printf(sb, "OutRsts:      %20u %20u\n",
9129 	    v4.tcp_out_rsts, v6.tcp_out_rsts);
9130 	sbuf_printf(sb, "InSegs:       %20ju %20ju\n",
9131 	    v4.tcp_in_segs, v6.tcp_in_segs);
9132 	sbuf_printf(sb, "OutSegs:      %20ju %20ju\n",
9133 	    v4.tcp_out_segs, v6.tcp_out_segs);
9134 	sbuf_printf(sb, "RetransSegs:  %20ju %20ju",
9135 	    v4.tcp_retrans_segs, v6.tcp_retrans_segs);
9136 
9137 	rc = sbuf_finish(sb);
9138 	sbuf_delete(sb);
9139 
9140 	return (rc);
9141 }
9142 
9143 static int
9144 sysctl_tids(SYSCTL_HANDLER_ARGS)
9145 {
9146 	struct adapter *sc = arg1;
9147 	struct sbuf *sb;
9148 	int rc;
9149 	struct tid_info *t = &sc->tids;
9150 
9151 	rc = sysctl_wire_old_buffer(req, 0);
9152 	if (rc != 0)
9153 		return (rc);
9154 
9155 	sb = sbuf_new_for_sysctl(NULL, NULL, 256, req);
9156 	if (sb == NULL)
9157 		return (ENOMEM);
9158 
9159 	if (t->natids) {
9160 		sbuf_printf(sb, "ATID range: 0-%u, in use: %u\n", t->natids - 1,
9161 		    t->atids_in_use);
9162 	}
9163 
9164 	if (t->nhpftids) {
9165 		sbuf_printf(sb, "HPFTID range: %u-%u, in use: %u\n",
9166 		    t->hpftid_base, t->hpftid_end, t->hpftids_in_use);
9167 	}
9168 
9169 	if (t->ntids) {
9170 		sbuf_printf(sb, "TID range: ");
9171 		if (t4_read_reg(sc, A_LE_DB_CONFIG) & F_HASHEN) {
9172 			uint32_t b, hb;
9173 
9174 			if (chip_id(sc) <= CHELSIO_T5) {
9175 				b = t4_read_reg(sc, A_LE_DB_SERVER_INDEX) / 4;
9176 				hb = t4_read_reg(sc, A_LE_DB_TID_HASHBASE) / 4;
9177 			} else {
9178 				b = t4_read_reg(sc, A_LE_DB_SRVR_START_INDEX);
9179 				hb = t4_read_reg(sc, A_T6_LE_DB_HASH_TID_BASE);
9180 			}
9181 
9182 			if (b)
9183 				sbuf_printf(sb, "%u-%u, ", t->tid_base, b - 1);
9184 			sbuf_printf(sb, "%u-%u", hb, t->ntids - 1);
9185 		} else
9186 			sbuf_printf(sb, "%u-%u", t->tid_base, t->ntids - 1);
9187 		sbuf_printf(sb, ", in use: %u\n",
9188 		    atomic_load_acq_int(&t->tids_in_use));
9189 	}
9190 
9191 	if (t->nstids) {
9192 		sbuf_printf(sb, "STID range: %u-%u, in use: %u\n", t->stid_base,
9193 		    t->stid_base + t->nstids - 1, t->stids_in_use);
9194 	}
9195 
9196 	if (t->nftids) {
9197 		sbuf_printf(sb, "FTID range: %u-%u, in use: %u\n", t->ftid_base,
9198 		    t->ftid_end, t->ftids_in_use);
9199 	}
9200 
9201 	if (t->netids) {
9202 		sbuf_printf(sb, "ETID range: %u-%u, in use: %u\n", t->etid_base,
9203 		    t->etid_base + t->netids - 1, t->etids_in_use);
9204 	}
9205 
9206 	sbuf_printf(sb, "HW TID usage: %u IP users, %u IPv6 users",
9207 	    t4_read_reg(sc, A_LE_DB_ACT_CNT_IPV4),
9208 	    t4_read_reg(sc, A_LE_DB_ACT_CNT_IPV6));
9209 
9210 	rc = sbuf_finish(sb);
9211 	sbuf_delete(sb);
9212 
9213 	return (rc);
9214 }
9215 
9216 static int
9217 sysctl_tp_err_stats(SYSCTL_HANDLER_ARGS)
9218 {
9219 	struct adapter *sc = arg1;
9220 	struct sbuf *sb;
9221 	int rc;
9222 	struct tp_err_stats stats;
9223 
9224 	rc = sysctl_wire_old_buffer(req, 0);
9225 	if (rc != 0)
9226 		return (rc);
9227 
9228 	sb = sbuf_new_for_sysctl(NULL, NULL, 256, req);
9229 	if (sb == NULL)
9230 		return (ENOMEM);
9231 
9232 	mtx_lock(&sc->reg_lock);
9233 	t4_tp_get_err_stats(sc, &stats, 0);
9234 	mtx_unlock(&sc->reg_lock);
9235 
9236 	if (sc->chip_params->nchan > 2) {
9237 		sbuf_printf(sb, "                 channel 0  channel 1"
9238 		    "  channel 2  channel 3\n");
9239 		sbuf_printf(sb, "macInErrs:      %10u %10u %10u %10u\n",
9240 		    stats.mac_in_errs[0], stats.mac_in_errs[1],
9241 		    stats.mac_in_errs[2], stats.mac_in_errs[3]);
9242 		sbuf_printf(sb, "hdrInErrs:      %10u %10u %10u %10u\n",
9243 		    stats.hdr_in_errs[0], stats.hdr_in_errs[1],
9244 		    stats.hdr_in_errs[2], stats.hdr_in_errs[3]);
9245 		sbuf_printf(sb, "tcpInErrs:      %10u %10u %10u %10u\n",
9246 		    stats.tcp_in_errs[0], stats.tcp_in_errs[1],
9247 		    stats.tcp_in_errs[2], stats.tcp_in_errs[3]);
9248 		sbuf_printf(sb, "tcp6InErrs:     %10u %10u %10u %10u\n",
9249 		    stats.tcp6_in_errs[0], stats.tcp6_in_errs[1],
9250 		    stats.tcp6_in_errs[2], stats.tcp6_in_errs[3]);
9251 		sbuf_printf(sb, "tnlCongDrops:   %10u %10u %10u %10u\n",
9252 		    stats.tnl_cong_drops[0], stats.tnl_cong_drops[1],
9253 		    stats.tnl_cong_drops[2], stats.tnl_cong_drops[3]);
9254 		sbuf_printf(sb, "tnlTxDrops:     %10u %10u %10u %10u\n",
9255 		    stats.tnl_tx_drops[0], stats.tnl_tx_drops[1],
9256 		    stats.tnl_tx_drops[2], stats.tnl_tx_drops[3]);
9257 		sbuf_printf(sb, "ofldVlanDrops:  %10u %10u %10u %10u\n",
9258 		    stats.ofld_vlan_drops[0], stats.ofld_vlan_drops[1],
9259 		    stats.ofld_vlan_drops[2], stats.ofld_vlan_drops[3]);
9260 		sbuf_printf(sb, "ofldChanDrops:  %10u %10u %10u %10u\n\n",
9261 		    stats.ofld_chan_drops[0], stats.ofld_chan_drops[1],
9262 		    stats.ofld_chan_drops[2], stats.ofld_chan_drops[3]);
9263 	} else {
9264 		sbuf_printf(sb, "                 channel 0  channel 1\n");
9265 		sbuf_printf(sb, "macInErrs:      %10u %10u\n",
9266 		    stats.mac_in_errs[0], stats.mac_in_errs[1]);
9267 		sbuf_printf(sb, "hdrInErrs:      %10u %10u\n",
9268 		    stats.hdr_in_errs[0], stats.hdr_in_errs[1]);
9269 		sbuf_printf(sb, "tcpInErrs:      %10u %10u\n",
9270 		    stats.tcp_in_errs[0], stats.tcp_in_errs[1]);
9271 		sbuf_printf(sb, "tcp6InErrs:     %10u %10u\n",
9272 		    stats.tcp6_in_errs[0], stats.tcp6_in_errs[1]);
9273 		sbuf_printf(sb, "tnlCongDrops:   %10u %10u\n",
9274 		    stats.tnl_cong_drops[0], stats.tnl_cong_drops[1]);
9275 		sbuf_printf(sb, "tnlTxDrops:     %10u %10u\n",
9276 		    stats.tnl_tx_drops[0], stats.tnl_tx_drops[1]);
9277 		sbuf_printf(sb, "ofldVlanDrops:  %10u %10u\n",
9278 		    stats.ofld_vlan_drops[0], stats.ofld_vlan_drops[1]);
9279 		sbuf_printf(sb, "ofldChanDrops:  %10u %10u\n\n",
9280 		    stats.ofld_chan_drops[0], stats.ofld_chan_drops[1]);
9281 	}
9282 
9283 	sbuf_printf(sb, "ofldNoNeigh:    %u\nofldCongDefer:  %u",
9284 	    stats.ofld_no_neigh, stats.ofld_cong_defer);
9285 
9286 	rc = sbuf_finish(sb);
9287 	sbuf_delete(sb);
9288 
9289 	return (rc);
9290 }
9291 
9292 static int
9293 sysctl_tp_la_mask(SYSCTL_HANDLER_ARGS)
9294 {
9295 	struct adapter *sc = arg1;
9296 	struct tp_params *tpp = &sc->params.tp;
9297 	u_int mask;
9298 	int rc;
9299 
9300 	mask = tpp->la_mask >> 16;
9301 	rc = sysctl_handle_int(oidp, &mask, 0, req);
9302 	if (rc != 0 || req->newptr == NULL)
9303 		return (rc);
9304 	if (mask > 0xffff)
9305 		return (EINVAL);
9306 	tpp->la_mask = mask << 16;
9307 	t4_set_reg_field(sc, A_TP_DBG_LA_CONFIG, 0xffff0000U, tpp->la_mask);
9308 
9309 	return (0);
9310 }
9311 
9312 struct field_desc {
9313 	const char *name;
9314 	u_int start;
9315 	u_int width;
9316 };
9317 
9318 static void
9319 field_desc_show(struct sbuf *sb, uint64_t v, const struct field_desc *f)
9320 {
9321 	char buf[32];
9322 	int line_size = 0;
9323 
9324 	while (f->name) {
9325 		uint64_t mask = (1ULL << f->width) - 1;
9326 		int len = snprintf(buf, sizeof(buf), "%s: %ju", f->name,
9327 		    ((uintmax_t)v >> f->start) & mask);
9328 
9329 		if (line_size + len >= 79) {
9330 			line_size = 8;
9331 			sbuf_printf(sb, "\n        ");
9332 		}
9333 		sbuf_printf(sb, "%s ", buf);
9334 		line_size += len + 1;
9335 		f++;
9336 	}
9337 	sbuf_printf(sb, "\n");
9338 }
9339 
9340 static const struct field_desc tp_la0[] = {
9341 	{ "RcfOpCodeOut", 60, 4 },
9342 	{ "State", 56, 4 },
9343 	{ "WcfState", 52, 4 },
9344 	{ "RcfOpcSrcOut", 50, 2 },
9345 	{ "CRxError", 49, 1 },
9346 	{ "ERxError", 48, 1 },
9347 	{ "SanityFailed", 47, 1 },
9348 	{ "SpuriousMsg", 46, 1 },
9349 	{ "FlushInputMsg", 45, 1 },
9350 	{ "FlushInputCpl", 44, 1 },
9351 	{ "RssUpBit", 43, 1 },
9352 	{ "RssFilterHit", 42, 1 },
9353 	{ "Tid", 32, 10 },
9354 	{ "InitTcb", 31, 1 },
9355 	{ "LineNumber", 24, 7 },
9356 	{ "Emsg", 23, 1 },
9357 	{ "EdataOut", 22, 1 },
9358 	{ "Cmsg", 21, 1 },
9359 	{ "CdataOut", 20, 1 },
9360 	{ "EreadPdu", 19, 1 },
9361 	{ "CreadPdu", 18, 1 },
9362 	{ "TunnelPkt", 17, 1 },
9363 	{ "RcfPeerFin", 16, 1 },
9364 	{ "RcfReasonOut", 12, 4 },
9365 	{ "TxCchannel", 10, 2 },
9366 	{ "RcfTxChannel", 8, 2 },
9367 	{ "RxEchannel", 6, 2 },
9368 	{ "RcfRxChannel", 5, 1 },
9369 	{ "RcfDataOutSrdy", 4, 1 },
9370 	{ "RxDvld", 3, 1 },
9371 	{ "RxOoDvld", 2, 1 },
9372 	{ "RxCongestion", 1, 1 },
9373 	{ "TxCongestion", 0, 1 },
9374 	{ NULL }
9375 };
9376 
9377 static const struct field_desc tp_la1[] = {
9378 	{ "CplCmdIn", 56, 8 },
9379 	{ "CplCmdOut", 48, 8 },
9380 	{ "ESynOut", 47, 1 },
9381 	{ "EAckOut", 46, 1 },
9382 	{ "EFinOut", 45, 1 },
9383 	{ "ERstOut", 44, 1 },
9384 	{ "SynIn", 43, 1 },
9385 	{ "AckIn", 42, 1 },
9386 	{ "FinIn", 41, 1 },
9387 	{ "RstIn", 40, 1 },
9388 	{ "DataIn", 39, 1 },
9389 	{ "DataInVld", 38, 1 },
9390 	{ "PadIn", 37, 1 },
9391 	{ "RxBufEmpty", 36, 1 },
9392 	{ "RxDdp", 35, 1 },
9393 	{ "RxFbCongestion", 34, 1 },
9394 	{ "TxFbCongestion", 33, 1 },
9395 	{ "TxPktSumSrdy", 32, 1 },
9396 	{ "RcfUlpType", 28, 4 },
9397 	{ "Eread", 27, 1 },
9398 	{ "Ebypass", 26, 1 },
9399 	{ "Esave", 25, 1 },
9400 	{ "Static0", 24, 1 },
9401 	{ "Cread", 23, 1 },
9402 	{ "Cbypass", 22, 1 },
9403 	{ "Csave", 21, 1 },
9404 	{ "CPktOut", 20, 1 },
9405 	{ "RxPagePoolFull", 18, 2 },
9406 	{ "RxLpbkPkt", 17, 1 },
9407 	{ "TxLpbkPkt", 16, 1 },
9408 	{ "RxVfValid", 15, 1 },
9409 	{ "SynLearned", 14, 1 },
9410 	{ "SetDelEntry", 13, 1 },
9411 	{ "SetInvEntry", 12, 1 },
9412 	{ "CpcmdDvld", 11, 1 },
9413 	{ "CpcmdSave", 10, 1 },
9414 	{ "RxPstructsFull", 8, 2 },
9415 	{ "EpcmdDvld", 7, 1 },
9416 	{ "EpcmdFlush", 6, 1 },
9417 	{ "EpcmdTrimPrefix", 5, 1 },
9418 	{ "EpcmdTrimPostfix", 4, 1 },
9419 	{ "ERssIp4Pkt", 3, 1 },
9420 	{ "ERssIp6Pkt", 2, 1 },
9421 	{ "ERssTcpUdpPkt", 1, 1 },
9422 	{ "ERssFceFipPkt", 0, 1 },
9423 	{ NULL }
9424 };
9425 
9426 static const struct field_desc tp_la2[] = {
9427 	{ "CplCmdIn", 56, 8 },
9428 	{ "MpsVfVld", 55, 1 },
9429 	{ "MpsPf", 52, 3 },
9430 	{ "MpsVf", 44, 8 },
9431 	{ "SynIn", 43, 1 },
9432 	{ "AckIn", 42, 1 },
9433 	{ "FinIn", 41, 1 },
9434 	{ "RstIn", 40, 1 },
9435 	{ "DataIn", 39, 1 },
9436 	{ "DataInVld", 38, 1 },
9437 	{ "PadIn", 37, 1 },
9438 	{ "RxBufEmpty", 36, 1 },
9439 	{ "RxDdp", 35, 1 },
9440 	{ "RxFbCongestion", 34, 1 },
9441 	{ "TxFbCongestion", 33, 1 },
9442 	{ "TxPktSumSrdy", 32, 1 },
9443 	{ "RcfUlpType", 28, 4 },
9444 	{ "Eread", 27, 1 },
9445 	{ "Ebypass", 26, 1 },
9446 	{ "Esave", 25, 1 },
9447 	{ "Static0", 24, 1 },
9448 	{ "Cread", 23, 1 },
9449 	{ "Cbypass", 22, 1 },
9450 	{ "Csave", 21, 1 },
9451 	{ "CPktOut", 20, 1 },
9452 	{ "RxPagePoolFull", 18, 2 },
9453 	{ "RxLpbkPkt", 17, 1 },
9454 	{ "TxLpbkPkt", 16, 1 },
9455 	{ "RxVfValid", 15, 1 },
9456 	{ "SynLearned", 14, 1 },
9457 	{ "SetDelEntry", 13, 1 },
9458 	{ "SetInvEntry", 12, 1 },
9459 	{ "CpcmdDvld", 11, 1 },
9460 	{ "CpcmdSave", 10, 1 },
9461 	{ "RxPstructsFull", 8, 2 },
9462 	{ "EpcmdDvld", 7, 1 },
9463 	{ "EpcmdFlush", 6, 1 },
9464 	{ "EpcmdTrimPrefix", 5, 1 },
9465 	{ "EpcmdTrimPostfix", 4, 1 },
9466 	{ "ERssIp4Pkt", 3, 1 },
9467 	{ "ERssIp6Pkt", 2, 1 },
9468 	{ "ERssTcpUdpPkt", 1, 1 },
9469 	{ "ERssFceFipPkt", 0, 1 },
9470 	{ NULL }
9471 };
9472 
9473 static void
9474 tp_la_show(struct sbuf *sb, uint64_t *p, int idx)
9475 {
9476 
9477 	field_desc_show(sb, *p, tp_la0);
9478 }
9479 
9480 static void
9481 tp_la_show2(struct sbuf *sb, uint64_t *p, int idx)
9482 {
9483 
9484 	if (idx)
9485 		sbuf_printf(sb, "\n");
9486 	field_desc_show(sb, p[0], tp_la0);
9487 	if (idx < (TPLA_SIZE / 2 - 1) || p[1] != ~0ULL)
9488 		field_desc_show(sb, p[1], tp_la0);
9489 }
9490 
9491 static void
9492 tp_la_show3(struct sbuf *sb, uint64_t *p, int idx)
9493 {
9494 
9495 	if (idx)
9496 		sbuf_printf(sb, "\n");
9497 	field_desc_show(sb, p[0], tp_la0);
9498 	if (idx < (TPLA_SIZE / 2 - 1) || p[1] != ~0ULL)
9499 		field_desc_show(sb, p[1], (p[0] & (1 << 17)) ? tp_la2 : tp_la1);
9500 }
9501 
9502 static int
9503 sysctl_tp_la(SYSCTL_HANDLER_ARGS)
9504 {
9505 	struct adapter *sc = arg1;
9506 	struct sbuf *sb;
9507 	uint64_t *buf, *p;
9508 	int rc;
9509 	u_int i, inc;
9510 	void (*show_func)(struct sbuf *, uint64_t *, int);
9511 
9512 	rc = sysctl_wire_old_buffer(req, 0);
9513 	if (rc != 0)
9514 		return (rc);
9515 
9516 	sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req);
9517 	if (sb == NULL)
9518 		return (ENOMEM);
9519 
9520 	buf = malloc(TPLA_SIZE * sizeof(uint64_t), M_CXGBE, M_ZERO | M_WAITOK);
9521 
9522 	t4_tp_read_la(sc, buf, NULL);
9523 	p = buf;
9524 
9525 	switch (G_DBGLAMODE(t4_read_reg(sc, A_TP_DBG_LA_CONFIG))) {
9526 	case 2:
9527 		inc = 2;
9528 		show_func = tp_la_show2;
9529 		break;
9530 	case 3:
9531 		inc = 2;
9532 		show_func = tp_la_show3;
9533 		break;
9534 	default:
9535 		inc = 1;
9536 		show_func = tp_la_show;
9537 	}
9538 
9539 	for (i = 0; i < TPLA_SIZE / inc; i++, p += inc)
9540 		(*show_func)(sb, p, i);
9541 
9542 	rc = sbuf_finish(sb);
9543 	sbuf_delete(sb);
9544 	free(buf, M_CXGBE);
9545 	return (rc);
9546 }
9547 
9548 static int
9549 sysctl_tx_rate(SYSCTL_HANDLER_ARGS)
9550 {
9551 	struct adapter *sc = arg1;
9552 	struct sbuf *sb;
9553 	int rc;
9554 	u64 nrate[MAX_NCHAN], orate[MAX_NCHAN];
9555 
9556 	rc = sysctl_wire_old_buffer(req, 0);
9557 	if (rc != 0)
9558 		return (rc);
9559 
9560 	sb = sbuf_new_for_sysctl(NULL, NULL, 256, req);
9561 	if (sb == NULL)
9562 		return (ENOMEM);
9563 
9564 	t4_get_chan_txrate(sc, nrate, orate);
9565 
9566 	if (sc->chip_params->nchan > 2) {
9567 		sbuf_printf(sb, "              channel 0   channel 1"
9568 		    "   channel 2   channel 3\n");
9569 		sbuf_printf(sb, "NIC B/s:     %10ju  %10ju  %10ju  %10ju\n",
9570 		    nrate[0], nrate[1], nrate[2], nrate[3]);
9571 		sbuf_printf(sb, "Offload B/s: %10ju  %10ju  %10ju  %10ju",
9572 		    orate[0], orate[1], orate[2], orate[3]);
9573 	} else {
9574 		sbuf_printf(sb, "              channel 0   channel 1\n");
9575 		sbuf_printf(sb, "NIC B/s:     %10ju  %10ju\n",
9576 		    nrate[0], nrate[1]);
9577 		sbuf_printf(sb, "Offload B/s: %10ju  %10ju",
9578 		    orate[0], orate[1]);
9579 	}
9580 
9581 	rc = sbuf_finish(sb);
9582 	sbuf_delete(sb);
9583 
9584 	return (rc);
9585 }
9586 
9587 static int
9588 sysctl_ulprx_la(SYSCTL_HANDLER_ARGS)
9589 {
9590 	struct adapter *sc = arg1;
9591 	struct sbuf *sb;
9592 	uint32_t *buf, *p;
9593 	int rc, i;
9594 
9595 	rc = sysctl_wire_old_buffer(req, 0);
9596 	if (rc != 0)
9597 		return (rc);
9598 
9599 	sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req);
9600 	if (sb == NULL)
9601 		return (ENOMEM);
9602 
9603 	buf = malloc(ULPRX_LA_SIZE * 8 * sizeof(uint32_t), M_CXGBE,
9604 	    M_ZERO | M_WAITOK);
9605 
9606 	t4_ulprx_read_la(sc, buf);
9607 	p = buf;
9608 
9609 	sbuf_printf(sb, "      Pcmd        Type   Message"
9610 	    "                Data");
9611 	for (i = 0; i < ULPRX_LA_SIZE; i++, p += 8) {
9612 		sbuf_printf(sb, "\n%08x%08x  %4x  %08x  %08x%08x%08x%08x",
9613 		    p[1], p[0], p[2], p[3], p[7], p[6], p[5], p[4]);
9614 	}
9615 
9616 	rc = sbuf_finish(sb);
9617 	sbuf_delete(sb);
9618 	free(buf, M_CXGBE);
9619 	return (rc);
9620 }
9621 
9622 static int
9623 sysctl_wcwr_stats(SYSCTL_HANDLER_ARGS)
9624 {
9625 	struct adapter *sc = arg1;
9626 	struct sbuf *sb;
9627 	int rc, v;
9628 
9629 	MPASS(chip_id(sc) >= CHELSIO_T5);
9630 
9631 	rc = sysctl_wire_old_buffer(req, 0);
9632 	if (rc != 0)
9633 		return (rc);
9634 
9635 	sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req);
9636 	if (sb == NULL)
9637 		return (ENOMEM);
9638 
9639 	v = t4_read_reg(sc, A_SGE_STAT_CFG);
9640 	if (G_STATSOURCE_T5(v) == 7) {
9641 		int mode;
9642 
9643 		mode = is_t5(sc) ? G_STATMODE(v) : G_T6_STATMODE(v);
9644 		if (mode == 0) {
9645 			sbuf_printf(sb, "total %d, incomplete %d",
9646 			    t4_read_reg(sc, A_SGE_STAT_TOTAL),
9647 			    t4_read_reg(sc, A_SGE_STAT_MATCH));
9648 		} else if (mode == 1) {
9649 			sbuf_printf(sb, "total %d, data overflow %d",
9650 			    t4_read_reg(sc, A_SGE_STAT_TOTAL),
9651 			    t4_read_reg(sc, A_SGE_STAT_MATCH));
9652 		} else {
9653 			sbuf_printf(sb, "unknown mode %d", mode);
9654 		}
9655 	}
9656 	rc = sbuf_finish(sb);
9657 	sbuf_delete(sb);
9658 
9659 	return (rc);
9660 }
9661 
9662 static int
9663 sysctl_cpus(SYSCTL_HANDLER_ARGS)
9664 {
9665 	struct adapter *sc = arg1;
9666 	enum cpu_sets op = arg2;
9667 	cpuset_t cpuset;
9668 	struct sbuf *sb;
9669 	int i, rc;
9670 
9671 	MPASS(op == LOCAL_CPUS || op == INTR_CPUS);
9672 
9673 	CPU_ZERO(&cpuset);
9674 	rc = bus_get_cpus(sc->dev, op, sizeof(cpuset), &cpuset);
9675 	if (rc != 0)
9676 		return (rc);
9677 
9678 	rc = sysctl_wire_old_buffer(req, 0);
9679 	if (rc != 0)
9680 		return (rc);
9681 
9682 	sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req);
9683 	if (sb == NULL)
9684 		return (ENOMEM);
9685 
9686 	CPU_FOREACH(i)
9687 		sbuf_printf(sb, "%d ", i);
9688 	rc = sbuf_finish(sb);
9689 	sbuf_delete(sb);
9690 
9691 	return (rc);
9692 }
9693 
9694 #ifdef TCP_OFFLOAD
9695 static int
9696 sysctl_tls_rx_ports(SYSCTL_HANDLER_ARGS)
9697 {
9698 	struct adapter *sc = arg1;
9699 	int *old_ports, *new_ports;
9700 	int i, new_count, rc;
9701 
9702 	if (req->newptr == NULL && req->oldptr == NULL)
9703 		return (SYSCTL_OUT(req, NULL, imax(sc->tt.num_tls_rx_ports, 1) *
9704 		    sizeof(sc->tt.tls_rx_ports[0])));
9705 
9706 	rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4tlsrx");
9707 	if (rc)
9708 		return (rc);
9709 
9710 	if (sc->tt.num_tls_rx_ports == 0) {
9711 		i = -1;
9712 		rc = SYSCTL_OUT(req, &i, sizeof(i));
9713 	} else
9714 		rc = SYSCTL_OUT(req, sc->tt.tls_rx_ports,
9715 		    sc->tt.num_tls_rx_ports * sizeof(sc->tt.tls_rx_ports[0]));
9716 	if (rc == 0 && req->newptr != NULL) {
9717 		new_count = req->newlen / sizeof(new_ports[0]);
9718 		new_ports = malloc(new_count * sizeof(new_ports[0]), M_CXGBE,
9719 		    M_WAITOK);
9720 		rc = SYSCTL_IN(req, new_ports, new_count *
9721 		    sizeof(new_ports[0]));
9722 		if (rc)
9723 			goto err;
9724 
9725 		/* Allow setting to a single '-1' to clear the list. */
9726 		if (new_count == 1 && new_ports[0] == -1) {
9727 			ADAPTER_LOCK(sc);
9728 			old_ports = sc->tt.tls_rx_ports;
9729 			sc->tt.tls_rx_ports = NULL;
9730 			sc->tt.num_tls_rx_ports = 0;
9731 			ADAPTER_UNLOCK(sc);
9732 			free(old_ports, M_CXGBE);
9733 		} else {
9734 			for (i = 0; i < new_count; i++) {
9735 				if (new_ports[i] < 1 ||
9736 				    new_ports[i] > IPPORT_MAX) {
9737 					rc = EINVAL;
9738 					goto err;
9739 				}
9740 			}
9741 
9742 			ADAPTER_LOCK(sc);
9743 			old_ports = sc->tt.tls_rx_ports;
9744 			sc->tt.tls_rx_ports = new_ports;
9745 			sc->tt.num_tls_rx_ports = new_count;
9746 			ADAPTER_UNLOCK(sc);
9747 			free(old_ports, M_CXGBE);
9748 			new_ports = NULL;
9749 		}
9750 	err:
9751 		free(new_ports, M_CXGBE);
9752 	}
9753 	end_synchronized_op(sc, 0);
9754 	return (rc);
9755 }
9756 
9757 static void
9758 unit_conv(char *buf, size_t len, u_int val, u_int factor)
9759 {
9760 	u_int rem = val % factor;
9761 
9762 	if (rem == 0)
9763 		snprintf(buf, len, "%u", val / factor);
9764 	else {
9765 		while (rem % 10 == 0)
9766 			rem /= 10;
9767 		snprintf(buf, len, "%u.%u", val / factor, rem);
9768 	}
9769 }
9770 
9771 static int
9772 sysctl_tp_tick(SYSCTL_HANDLER_ARGS)
9773 {
9774 	struct adapter *sc = arg1;
9775 	char buf[16];
9776 	u_int res, re;
9777 	u_int cclk_ps = 1000000000 / sc->params.vpd.cclk;
9778 
9779 	res = t4_read_reg(sc, A_TP_TIMER_RESOLUTION);
9780 	switch (arg2) {
9781 	case 0:
9782 		/* timer_tick */
9783 		re = G_TIMERRESOLUTION(res);
9784 		break;
9785 	case 1:
9786 		/* TCP timestamp tick */
9787 		re = G_TIMESTAMPRESOLUTION(res);
9788 		break;
9789 	case 2:
9790 		/* DACK tick */
9791 		re = G_DELAYEDACKRESOLUTION(res);
9792 		break;
9793 	default:
9794 		return (EDOOFUS);
9795 	}
9796 
9797 	unit_conv(buf, sizeof(buf), (cclk_ps << re), 1000000);
9798 
9799 	return (sysctl_handle_string(oidp, buf, sizeof(buf), req));
9800 }
9801 
9802 static int
9803 sysctl_tp_dack_timer(SYSCTL_HANDLER_ARGS)
9804 {
9805 	struct adapter *sc = arg1;
9806 	u_int res, dack_re, v;
9807 	u_int cclk_ps = 1000000000 / sc->params.vpd.cclk;
9808 
9809 	res = t4_read_reg(sc, A_TP_TIMER_RESOLUTION);
9810 	dack_re = G_DELAYEDACKRESOLUTION(res);
9811 	v = ((cclk_ps << dack_re) / 1000000) * t4_read_reg(sc, A_TP_DACK_TIMER);
9812 
9813 	return (sysctl_handle_int(oidp, &v, 0, req));
9814 }
9815 
9816 static int
9817 sysctl_tp_timer(SYSCTL_HANDLER_ARGS)
9818 {
9819 	struct adapter *sc = arg1;
9820 	int reg = arg2;
9821 	u_int tre;
9822 	u_long tp_tick_us, v;
9823 	u_int cclk_ps = 1000000000 / sc->params.vpd.cclk;
9824 
9825 	MPASS(reg == A_TP_RXT_MIN || reg == A_TP_RXT_MAX ||
9826 	    reg == A_TP_PERS_MIN  || reg == A_TP_PERS_MAX ||
9827 	    reg == A_TP_KEEP_IDLE || reg == A_TP_KEEP_INTVL ||
9828 	    reg == A_TP_INIT_SRTT || reg == A_TP_FINWAIT2_TIMER);
9829 
9830 	tre = G_TIMERRESOLUTION(t4_read_reg(sc, A_TP_TIMER_RESOLUTION));
9831 	tp_tick_us = (cclk_ps << tre) / 1000000;
9832 
9833 	if (reg == A_TP_INIT_SRTT)
9834 		v = tp_tick_us * G_INITSRTT(t4_read_reg(sc, reg));
9835 	else
9836 		v = tp_tick_us * t4_read_reg(sc, reg);
9837 
9838 	return (sysctl_handle_long(oidp, &v, 0, req));
9839 }
9840 
9841 /*
9842  * All fields in TP_SHIFT_CNT are 4b and the starting location of the field is
9843  * passed to this function.
9844  */
9845 static int
9846 sysctl_tp_shift_cnt(SYSCTL_HANDLER_ARGS)
9847 {
9848 	struct adapter *sc = arg1;
9849 	int idx = arg2;
9850 	u_int v;
9851 
9852 	MPASS(idx >= 0 && idx <= 24);
9853 
9854 	v = (t4_read_reg(sc, A_TP_SHIFT_CNT) >> idx) & 0xf;
9855 
9856 	return (sysctl_handle_int(oidp, &v, 0, req));
9857 }
9858 
9859 static int
9860 sysctl_tp_backoff(SYSCTL_HANDLER_ARGS)
9861 {
9862 	struct adapter *sc = arg1;
9863 	int idx = arg2;
9864 	u_int shift, v, r;
9865 
9866 	MPASS(idx >= 0 && idx < 16);
9867 
9868 	r = A_TP_TCP_BACKOFF_REG0 + (idx & ~3);
9869 	shift = (idx & 3) << 3;
9870 	v = (t4_read_reg(sc, r) >> shift) & M_TIMERBACKOFFINDEX0;
9871 
9872 	return (sysctl_handle_int(oidp, &v, 0, req));
9873 }
9874 
9875 static int
9876 sysctl_holdoff_tmr_idx_ofld(SYSCTL_HANDLER_ARGS)
9877 {
9878 	struct vi_info *vi = arg1;
9879 	struct adapter *sc = vi->adapter;
9880 	int idx, rc, i;
9881 	struct sge_ofld_rxq *ofld_rxq;
9882 	uint8_t v;
9883 
9884 	idx = vi->ofld_tmr_idx;
9885 
9886 	rc = sysctl_handle_int(oidp, &idx, 0, req);
9887 	if (rc != 0 || req->newptr == NULL)
9888 		return (rc);
9889 
9890 	if (idx < 0 || idx >= SGE_NTIMERS)
9891 		return (EINVAL);
9892 
9893 	rc = begin_synchronized_op(sc, vi, HOLD_LOCK | SLEEP_OK | INTR_OK,
9894 	    "t4otmr");
9895 	if (rc)
9896 		return (rc);
9897 
9898 	v = V_QINTR_TIMER_IDX(idx) | V_QINTR_CNT_EN(vi->ofld_pktc_idx != -1);
9899 	for_each_ofld_rxq(vi, i, ofld_rxq) {
9900 #ifdef atomic_store_rel_8
9901 		atomic_store_rel_8(&ofld_rxq->iq.intr_params, v);
9902 #else
9903 		ofld_rxq->iq.intr_params = v;
9904 #endif
9905 	}
9906 	vi->ofld_tmr_idx = idx;
9907 
9908 	end_synchronized_op(sc, LOCK_HELD);
9909 	return (0);
9910 }
9911 
9912 static int
9913 sysctl_holdoff_pktc_idx_ofld(SYSCTL_HANDLER_ARGS)
9914 {
9915 	struct vi_info *vi = arg1;
9916 	struct adapter *sc = vi->adapter;
9917 	int idx, rc;
9918 
9919 	idx = vi->ofld_pktc_idx;
9920 
9921 	rc = sysctl_handle_int(oidp, &idx, 0, req);
9922 	if (rc != 0 || req->newptr == NULL)
9923 		return (rc);
9924 
9925 	if (idx < -1 || idx >= SGE_NCOUNTERS)
9926 		return (EINVAL);
9927 
9928 	rc = begin_synchronized_op(sc, vi, HOLD_LOCK | SLEEP_OK | INTR_OK,
9929 	    "t4opktc");
9930 	if (rc)
9931 		return (rc);
9932 
9933 	if (vi->flags & VI_INIT_DONE)
9934 		rc = EBUSY; /* cannot be changed once the queues are created */
9935 	else
9936 		vi->ofld_pktc_idx = idx;
9937 
9938 	end_synchronized_op(sc, LOCK_HELD);
9939 	return (rc);
9940 }
9941 #endif
9942 
9943 static int
9944 get_sge_context(struct adapter *sc, struct t4_sge_context *cntxt)
9945 {
9946 	int rc;
9947 
9948 	if (cntxt->cid > M_CTXTQID)
9949 		return (EINVAL);
9950 
9951 	if (cntxt->mem_id != CTXT_EGRESS && cntxt->mem_id != CTXT_INGRESS &&
9952 	    cntxt->mem_id != CTXT_FLM && cntxt->mem_id != CTXT_CNM)
9953 		return (EINVAL);
9954 
9955 	rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4ctxt");
9956 	if (rc)
9957 		return (rc);
9958 
9959 	if (sc->flags & FW_OK) {
9960 		rc = -t4_sge_ctxt_rd(sc, sc->mbox, cntxt->cid, cntxt->mem_id,
9961 		    &cntxt->data[0]);
9962 		if (rc == 0)
9963 			goto done;
9964 	}
9965 
9966 	/*
9967 	 * Read via firmware failed or wasn't even attempted.  Read directly via
9968 	 * the backdoor.
9969 	 */
9970 	rc = -t4_sge_ctxt_rd_bd(sc, cntxt->cid, cntxt->mem_id, &cntxt->data[0]);
9971 done:
9972 	end_synchronized_op(sc, 0);
9973 	return (rc);
9974 }
9975 
9976 static int
9977 load_fw(struct adapter *sc, struct t4_data *fw)
9978 {
9979 	int rc;
9980 	uint8_t *fw_data;
9981 
9982 	rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4ldfw");
9983 	if (rc)
9984 		return (rc);
9985 
9986 	/*
9987 	 * The firmware, with the sole exception of the memory parity error
9988 	 * handler, runs from memory and not flash.  It is almost always safe to
9989 	 * install a new firmware on a running system.  Just set bit 1 in
9990 	 * hw.cxgbe.dflags or dev.<nexus>.<n>.dflags first.
9991 	 */
9992 	if (sc->flags & FULL_INIT_DONE &&
9993 	    (sc->debug_flags & DF_LOAD_FW_ANYTIME) == 0) {
9994 		rc = EBUSY;
9995 		goto done;
9996 	}
9997 
9998 	fw_data = malloc(fw->len, M_CXGBE, M_WAITOK);
9999 	if (fw_data == NULL) {
10000 		rc = ENOMEM;
10001 		goto done;
10002 	}
10003 
10004 	rc = copyin(fw->data, fw_data, fw->len);
10005 	if (rc == 0)
10006 		rc = -t4_load_fw(sc, fw_data, fw->len);
10007 
10008 	free(fw_data, M_CXGBE);
10009 done:
10010 	end_synchronized_op(sc, 0);
10011 	return (rc);
10012 }
10013 
10014 static int
10015 load_cfg(struct adapter *sc, struct t4_data *cfg)
10016 {
10017 	int rc;
10018 	uint8_t *cfg_data = NULL;
10019 
10020 	rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4ldcf");
10021 	if (rc)
10022 		return (rc);
10023 
10024 	if (cfg->len == 0) {
10025 		/* clear */
10026 		rc = -t4_load_cfg(sc, NULL, 0);
10027 		goto done;
10028 	}
10029 
10030 	cfg_data = malloc(cfg->len, M_CXGBE, M_WAITOK);
10031 	if (cfg_data == NULL) {
10032 		rc = ENOMEM;
10033 		goto done;
10034 	}
10035 
10036 	rc = copyin(cfg->data, cfg_data, cfg->len);
10037 	if (rc == 0)
10038 		rc = -t4_load_cfg(sc, cfg_data, cfg->len);
10039 
10040 	free(cfg_data, M_CXGBE);
10041 done:
10042 	end_synchronized_op(sc, 0);
10043 	return (rc);
10044 }
10045 
10046 static int
10047 load_boot(struct adapter *sc, struct t4_bootrom *br)
10048 {
10049 	int rc;
10050 	uint8_t *br_data = NULL;
10051 	u_int offset;
10052 
10053 	if (br->len > 1024 * 1024)
10054 		return (EFBIG);
10055 
10056 	if (br->pf_offset == 0) {
10057 		/* pfidx */
10058 		if (br->pfidx_addr > 7)
10059 			return (EINVAL);
10060 		offset = G_OFFSET(t4_read_reg(sc, PF_REG(br->pfidx_addr,
10061 		    A_PCIE_PF_EXPROM_OFST)));
10062 	} else if (br->pf_offset == 1) {
10063 		/* offset */
10064 		offset = G_OFFSET(br->pfidx_addr);
10065 	} else {
10066 		return (EINVAL);
10067 	}
10068 
10069 	rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4ldbr");
10070 	if (rc)
10071 		return (rc);
10072 
10073 	if (br->len == 0) {
10074 		/* clear */
10075 		rc = -t4_load_boot(sc, NULL, offset, 0);
10076 		goto done;
10077 	}
10078 
10079 	br_data = malloc(br->len, M_CXGBE, M_WAITOK);
10080 	if (br_data == NULL) {
10081 		rc = ENOMEM;
10082 		goto done;
10083 	}
10084 
10085 	rc = copyin(br->data, br_data, br->len);
10086 	if (rc == 0)
10087 		rc = -t4_load_boot(sc, br_data, offset, br->len);
10088 
10089 	free(br_data, M_CXGBE);
10090 done:
10091 	end_synchronized_op(sc, 0);
10092 	return (rc);
10093 }
10094 
10095 static int
10096 load_bootcfg(struct adapter *sc, struct t4_data *bc)
10097 {
10098 	int rc;
10099 	uint8_t *bc_data = NULL;
10100 
10101 	rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4ldcf");
10102 	if (rc)
10103 		return (rc);
10104 
10105 	if (bc->len == 0) {
10106 		/* clear */
10107 		rc = -t4_load_bootcfg(sc, NULL, 0);
10108 		goto done;
10109 	}
10110 
10111 	bc_data = malloc(bc->len, M_CXGBE, M_WAITOK);
10112 	if (bc_data == NULL) {
10113 		rc = ENOMEM;
10114 		goto done;
10115 	}
10116 
10117 	rc = copyin(bc->data, bc_data, bc->len);
10118 	if (rc == 0)
10119 		rc = -t4_load_bootcfg(sc, bc_data, bc->len);
10120 
10121 	free(bc_data, M_CXGBE);
10122 done:
10123 	end_synchronized_op(sc, 0);
10124 	return (rc);
10125 }
10126 
10127 static int
10128 cudbg_dump(struct adapter *sc, struct t4_cudbg_dump *dump)
10129 {
10130 	int rc;
10131 	struct cudbg_init *cudbg;
10132 	void *handle, *buf;
10133 
10134 	/* buf is large, don't block if no memory is available */
10135 	buf = malloc(dump->len, M_CXGBE, M_NOWAIT | M_ZERO);
10136 	if (buf == NULL)
10137 		return (ENOMEM);
10138 
10139 	handle = cudbg_alloc_handle();
10140 	if (handle == NULL) {
10141 		rc = ENOMEM;
10142 		goto done;
10143 	}
10144 
10145 	cudbg = cudbg_get_init(handle);
10146 	cudbg->adap = sc;
10147 	cudbg->print = (cudbg_print_cb)printf;
10148 
10149 #ifndef notyet
10150 	device_printf(sc->dev, "%s: wr_flash %u, len %u, data %p.\n",
10151 	    __func__, dump->wr_flash, dump->len, dump->data);
10152 #endif
10153 
10154 	if (dump->wr_flash)
10155 		cudbg->use_flash = 1;
10156 	MPASS(sizeof(cudbg->dbg_bitmap) == sizeof(dump->bitmap));
10157 	memcpy(cudbg->dbg_bitmap, dump->bitmap, sizeof(cudbg->dbg_bitmap));
10158 
10159 	rc = cudbg_collect(handle, buf, &dump->len);
10160 	if (rc != 0)
10161 		goto done;
10162 
10163 	rc = copyout(buf, dump->data, dump->len);
10164 done:
10165 	cudbg_free_handle(handle);
10166 	free(buf, M_CXGBE);
10167 	return (rc);
10168 }
10169 
10170 static void
10171 free_offload_policy(struct t4_offload_policy *op)
10172 {
10173 	struct offload_rule *r;
10174 	int i;
10175 
10176 	if (op == NULL)
10177 		return;
10178 
10179 	r = &op->rule[0];
10180 	for (i = 0; i < op->nrules; i++, r++) {
10181 		free(r->bpf_prog.bf_insns, M_CXGBE);
10182 	}
10183 	free(op->rule, M_CXGBE);
10184 	free(op, M_CXGBE);
10185 }
10186 
10187 static int
10188 set_offload_policy(struct adapter *sc, struct t4_offload_policy *uop)
10189 {
10190 	int i, rc, len;
10191 	struct t4_offload_policy *op, *old;
10192 	struct bpf_program *bf;
10193 	const struct offload_settings *s;
10194 	struct offload_rule *r;
10195 	void *u;
10196 
10197 	if (!is_offload(sc))
10198 		return (ENODEV);
10199 
10200 	if (uop->nrules == 0) {
10201 		/* Delete installed policies. */
10202 		op = NULL;
10203 		goto set_policy;
10204 	} else if (uop->nrules > 256) { /* arbitrary */
10205 		return (E2BIG);
10206 	}
10207 
10208 	/* Copy userspace offload policy to kernel */
10209 	op = malloc(sizeof(*op), M_CXGBE, M_ZERO | M_WAITOK);
10210 	op->nrules = uop->nrules;
10211 	len = op->nrules * sizeof(struct offload_rule);
10212 	op->rule = malloc(len, M_CXGBE, M_ZERO | M_WAITOK);
10213 	rc = copyin(uop->rule, op->rule, len);
10214 	if (rc) {
10215 		free(op->rule, M_CXGBE);
10216 		free(op, M_CXGBE);
10217 		return (rc);
10218 	}
10219 
10220 	r = &op->rule[0];
10221 	for (i = 0; i < op->nrules; i++, r++) {
10222 
10223 		/* Validate open_type */
10224 		if (r->open_type != OPEN_TYPE_LISTEN &&
10225 		    r->open_type != OPEN_TYPE_ACTIVE &&
10226 		    r->open_type != OPEN_TYPE_PASSIVE &&
10227 		    r->open_type != OPEN_TYPE_DONTCARE) {
10228 error:
10229 			/*
10230 			 * Rules 0 to i have malloc'd filters that need to be
10231 			 * freed.  Rules i+1 to nrules have userspace pointers
10232 			 * and should be left alone.
10233 			 */
10234 			op->nrules = i;
10235 			free_offload_policy(op);
10236 			return (rc);
10237 		}
10238 
10239 		/* Validate settings */
10240 		s = &r->settings;
10241 		if ((s->offload != 0 && s->offload != 1) ||
10242 		    s->cong_algo < -1 || s->cong_algo > CONG_ALG_HIGHSPEED ||
10243 		    s->sched_class < -1 ||
10244 		    s->sched_class >= sc->chip_params->nsched_cls) {
10245 			rc = EINVAL;
10246 			goto error;
10247 		}
10248 
10249 		bf = &r->bpf_prog;
10250 		u = bf->bf_insns;	/* userspace ptr */
10251 		bf->bf_insns = NULL;
10252 		if (bf->bf_len == 0) {
10253 			/* legal, matches everything */
10254 			continue;
10255 		}
10256 		len = bf->bf_len * sizeof(*bf->bf_insns);
10257 		bf->bf_insns = malloc(len, M_CXGBE, M_ZERO | M_WAITOK);
10258 		rc = copyin(u, bf->bf_insns, len);
10259 		if (rc != 0)
10260 			goto error;
10261 
10262 		if (!bpf_validate(bf->bf_insns, bf->bf_len)) {
10263 			rc = EINVAL;
10264 			goto error;
10265 		}
10266 	}
10267 set_policy:
10268 	rw_wlock(&sc->policy_lock);
10269 	old = sc->policy;
10270 	sc->policy = op;
10271 	rw_wunlock(&sc->policy_lock);
10272 	free_offload_policy(old);
10273 
10274 	return (0);
10275 }
10276 
10277 #define MAX_READ_BUF_SIZE (128 * 1024)
10278 static int
10279 read_card_mem(struct adapter *sc, int win, struct t4_mem_range *mr)
10280 {
10281 	uint32_t addr, remaining, n;
10282 	uint32_t *buf;
10283 	int rc;
10284 	uint8_t *dst;
10285 
10286 	rc = validate_mem_range(sc, mr->addr, mr->len);
10287 	if (rc != 0)
10288 		return (rc);
10289 
10290 	buf = malloc(min(mr->len, MAX_READ_BUF_SIZE), M_CXGBE, M_WAITOK);
10291 	addr = mr->addr;
10292 	remaining = mr->len;
10293 	dst = (void *)mr->data;
10294 
10295 	while (remaining) {
10296 		n = min(remaining, MAX_READ_BUF_SIZE);
10297 		read_via_memwin(sc, 2, addr, buf, n);
10298 
10299 		rc = copyout(buf, dst, n);
10300 		if (rc != 0)
10301 			break;
10302 
10303 		dst += n;
10304 		remaining -= n;
10305 		addr += n;
10306 	}
10307 
10308 	free(buf, M_CXGBE);
10309 	return (rc);
10310 }
10311 #undef MAX_READ_BUF_SIZE
10312 
10313 static int
10314 read_i2c(struct adapter *sc, struct t4_i2c_data *i2cd)
10315 {
10316 	int rc;
10317 
10318 	if (i2cd->len == 0 || i2cd->port_id >= sc->params.nports)
10319 		return (EINVAL);
10320 
10321 	if (i2cd->len > sizeof(i2cd->data))
10322 		return (EFBIG);
10323 
10324 	rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4i2crd");
10325 	if (rc)
10326 		return (rc);
10327 	rc = -t4_i2c_rd(sc, sc->mbox, i2cd->port_id, i2cd->dev_addr,
10328 	    i2cd->offset, i2cd->len, &i2cd->data[0]);
10329 	end_synchronized_op(sc, 0);
10330 
10331 	return (rc);
10332 }
10333 
10334 static int
10335 clear_stats(struct adapter *sc, u_int port_id)
10336 {
10337 	int i, v, chan_map;
10338 	struct port_info *pi;
10339 	struct vi_info *vi;
10340 	struct sge_rxq *rxq;
10341 	struct sge_txq *txq;
10342 	struct sge_wrq *wrq;
10343 #ifdef TCP_OFFLOAD
10344 	struct sge_ofld_rxq *ofld_rxq;
10345 #endif
10346 
10347 	if (port_id >= sc->params.nports)
10348 		return (EINVAL);
10349 	pi = sc->port[port_id];
10350 	if (pi == NULL)
10351 		return (EIO);
10352 
10353 	/* MAC stats */
10354 	t4_clr_port_stats(sc, pi->tx_chan);
10355 	pi->tx_parse_error = 0;
10356 	pi->tnl_cong_drops = 0;
10357 	mtx_lock(&sc->reg_lock);
10358 	for_each_vi(pi, v, vi) {
10359 		if (vi->flags & VI_INIT_DONE)
10360 			t4_clr_vi_stats(sc, vi->vin);
10361 	}
10362 	chan_map = pi->rx_e_chan_map;
10363 	v = 0;	/* reuse */
10364 	while (chan_map) {
10365 		i = ffs(chan_map) - 1;
10366 		t4_write_indirect(sc, A_TP_MIB_INDEX, A_TP_MIB_DATA, &v,
10367 		    1, A_TP_MIB_TNL_CNG_DROP_0 + i);
10368 		chan_map &= ~(1 << i);
10369 	}
10370 	mtx_unlock(&sc->reg_lock);
10371 
10372 	/*
10373 	 * Since this command accepts a port, clear stats for
10374 	 * all VIs on this port.
10375 	 */
10376 	for_each_vi(pi, v, vi) {
10377 		if (vi->flags & VI_INIT_DONE) {
10378 
10379 			for_each_rxq(vi, i, rxq) {
10380 #if defined(INET) || defined(INET6)
10381 				rxq->lro.lro_queued = 0;
10382 				rxq->lro.lro_flushed = 0;
10383 #endif
10384 				rxq->rxcsum = 0;
10385 				rxq->vlan_extraction = 0;
10386 
10387 				rxq->fl.cl_allocated = 0;
10388 				rxq->fl.cl_recycled = 0;
10389 				rxq->fl.cl_fast_recycled = 0;
10390 			}
10391 
10392 			for_each_txq(vi, i, txq) {
10393 				txq->txcsum = 0;
10394 				txq->tso_wrs = 0;
10395 				txq->vlan_insertion = 0;
10396 				txq->imm_wrs = 0;
10397 				txq->sgl_wrs = 0;
10398 				txq->txpkt_wrs = 0;
10399 				txq->txpkts0_wrs = 0;
10400 				txq->txpkts1_wrs = 0;
10401 				txq->txpkts0_pkts = 0;
10402 				txq->txpkts1_pkts = 0;
10403 				txq->raw_wrs = 0;
10404 				txq->kern_tls_records = 0;
10405 				txq->kern_tls_short = 0;
10406 				txq->kern_tls_partial = 0;
10407 				txq->kern_tls_full = 0;
10408 				txq->kern_tls_octets = 0;
10409 				txq->kern_tls_waste = 0;
10410 				txq->kern_tls_options = 0;
10411 				txq->kern_tls_header = 0;
10412 				txq->kern_tls_fin = 0;
10413 				txq->kern_tls_fin_short = 0;
10414 				txq->kern_tls_cbc = 0;
10415 				txq->kern_tls_gcm = 0;
10416 				mp_ring_reset_stats(txq->r);
10417 			}
10418 
10419 #if defined(TCP_OFFLOAD) || defined(RATELIMIT)
10420 			for_each_ofld_txq(vi, i, wrq) {
10421 				wrq->tx_wrs_direct = 0;
10422 				wrq->tx_wrs_copied = 0;
10423 			}
10424 #endif
10425 #ifdef TCP_OFFLOAD
10426 			for_each_ofld_rxq(vi, i, ofld_rxq) {
10427 				ofld_rxq->fl.cl_allocated = 0;
10428 				ofld_rxq->fl.cl_recycled = 0;
10429 				ofld_rxq->fl.cl_fast_recycled = 0;
10430 			}
10431 #endif
10432 
10433 			if (IS_MAIN_VI(vi)) {
10434 				wrq = &sc->sge.ctrlq[pi->port_id];
10435 				wrq->tx_wrs_direct = 0;
10436 				wrq->tx_wrs_copied = 0;
10437 			}
10438 		}
10439 	}
10440 
10441 	return (0);
10442 }
10443 
10444 int
10445 t4_os_find_pci_capability(struct adapter *sc, int cap)
10446 {
10447 	int i;
10448 
10449 	return (pci_find_cap(sc->dev, cap, &i) == 0 ? i : 0);
10450 }
10451 
10452 int
10453 t4_os_pci_save_state(struct adapter *sc)
10454 {
10455 	device_t dev;
10456 	struct pci_devinfo *dinfo;
10457 
10458 	dev = sc->dev;
10459 	dinfo = device_get_ivars(dev);
10460 
10461 	pci_cfg_save(dev, dinfo, 0);
10462 	return (0);
10463 }
10464 
10465 int
10466 t4_os_pci_restore_state(struct adapter *sc)
10467 {
10468 	device_t dev;
10469 	struct pci_devinfo *dinfo;
10470 
10471 	dev = sc->dev;
10472 	dinfo = device_get_ivars(dev);
10473 
10474 	pci_cfg_restore(dev, dinfo);
10475 	return (0);
10476 }
10477 
10478 void
10479 t4_os_portmod_changed(struct port_info *pi)
10480 {
10481 	struct adapter *sc = pi->adapter;
10482 	struct vi_info *vi;
10483 	struct ifnet *ifp;
10484 	static const char *mod_str[] = {
10485 		NULL, "LR", "SR", "ER", "TWINAX", "active TWINAX", "LRM"
10486 	};
10487 
10488 	KASSERT((pi->flags & FIXED_IFMEDIA) == 0,
10489 	    ("%s: port_type %u", __func__, pi->port_type));
10490 
10491 	vi = &pi->vi[0];
10492 	if (begin_synchronized_op(sc, vi, HOLD_LOCK, "t4mod") == 0) {
10493 		PORT_LOCK(pi);
10494 		build_medialist(pi);
10495 		if (pi->mod_type != FW_PORT_MOD_TYPE_NONE) {
10496 			fixup_link_config(pi);
10497 			apply_link_config(pi);
10498 		}
10499 		PORT_UNLOCK(pi);
10500 		end_synchronized_op(sc, LOCK_HELD);
10501 	}
10502 
10503 	ifp = vi->ifp;
10504 	if (pi->mod_type == FW_PORT_MOD_TYPE_NONE)
10505 		if_printf(ifp, "transceiver unplugged.\n");
10506 	else if (pi->mod_type == FW_PORT_MOD_TYPE_UNKNOWN)
10507 		if_printf(ifp, "unknown transceiver inserted.\n");
10508 	else if (pi->mod_type == FW_PORT_MOD_TYPE_NOTSUPPORTED)
10509 		if_printf(ifp, "unsupported transceiver inserted.\n");
10510 	else if (pi->mod_type > 0 && pi->mod_type < nitems(mod_str)) {
10511 		if_printf(ifp, "%dGbps %s transceiver inserted.\n",
10512 		    port_top_speed(pi), mod_str[pi->mod_type]);
10513 	} else {
10514 		if_printf(ifp, "transceiver (type %d) inserted.\n",
10515 		    pi->mod_type);
10516 	}
10517 }
10518 
10519 void
10520 t4_os_link_changed(struct port_info *pi)
10521 {
10522 	struct vi_info *vi;
10523 	struct ifnet *ifp;
10524 	struct link_config *lc;
10525 	int v;
10526 
10527 	PORT_LOCK_ASSERT_OWNED(pi);
10528 
10529 	for_each_vi(pi, v, vi) {
10530 		ifp = vi->ifp;
10531 		if (ifp == NULL)
10532 			continue;
10533 
10534 		lc = &pi->link_cfg;
10535 		if (lc->link_ok) {
10536 			ifp->if_baudrate = IF_Mbps(lc->speed);
10537 			if_link_state_change(ifp, LINK_STATE_UP);
10538 		} else {
10539 			if_link_state_change(ifp, LINK_STATE_DOWN);
10540 		}
10541 	}
10542 }
10543 
10544 void
10545 t4_iterate(void (*func)(struct adapter *, void *), void *arg)
10546 {
10547 	struct adapter *sc;
10548 
10549 	sx_slock(&t4_list_lock);
10550 	SLIST_FOREACH(sc, &t4_list, link) {
10551 		/*
10552 		 * func should not make any assumptions about what state sc is
10553 		 * in - the only guarantee is that sc->sc_lock is a valid lock.
10554 		 */
10555 		func(sc, arg);
10556 	}
10557 	sx_sunlock(&t4_list_lock);
10558 }
10559 
10560 static int
10561 t4_ioctl(struct cdev *dev, unsigned long cmd, caddr_t data, int fflag,
10562     struct thread *td)
10563 {
10564 	int rc;
10565 	struct adapter *sc = dev->si_drv1;
10566 
10567 	rc = priv_check(td, PRIV_DRIVER);
10568 	if (rc != 0)
10569 		return (rc);
10570 
10571 	switch (cmd) {
10572 	case CHELSIO_T4_GETREG: {
10573 		struct t4_reg *edata = (struct t4_reg *)data;
10574 
10575 		if ((edata->addr & 0x3) != 0 || edata->addr >= sc->mmio_len)
10576 			return (EFAULT);
10577 
10578 		if (edata->size == 4)
10579 			edata->val = t4_read_reg(sc, edata->addr);
10580 		else if (edata->size == 8)
10581 			edata->val = t4_read_reg64(sc, edata->addr);
10582 		else
10583 			return (EINVAL);
10584 
10585 		break;
10586 	}
10587 	case CHELSIO_T4_SETREG: {
10588 		struct t4_reg *edata = (struct t4_reg *)data;
10589 
10590 		if ((edata->addr & 0x3) != 0 || edata->addr >= sc->mmio_len)
10591 			return (EFAULT);
10592 
10593 		if (edata->size == 4) {
10594 			if (edata->val & 0xffffffff00000000)
10595 				return (EINVAL);
10596 			t4_write_reg(sc, edata->addr, (uint32_t) edata->val);
10597 		} else if (edata->size == 8)
10598 			t4_write_reg64(sc, edata->addr, edata->val);
10599 		else
10600 			return (EINVAL);
10601 		break;
10602 	}
10603 	case CHELSIO_T4_REGDUMP: {
10604 		struct t4_regdump *regs = (struct t4_regdump *)data;
10605 		int reglen = t4_get_regs_len(sc);
10606 		uint8_t *buf;
10607 
10608 		if (regs->len < reglen) {
10609 			regs->len = reglen; /* hint to the caller */
10610 			return (ENOBUFS);
10611 		}
10612 
10613 		regs->len = reglen;
10614 		buf = malloc(reglen, M_CXGBE, M_WAITOK | M_ZERO);
10615 		get_regs(sc, regs, buf);
10616 		rc = copyout(buf, regs->data, reglen);
10617 		free(buf, M_CXGBE);
10618 		break;
10619 	}
10620 	case CHELSIO_T4_GET_FILTER_MODE:
10621 		rc = get_filter_mode(sc, (uint32_t *)data);
10622 		break;
10623 	case CHELSIO_T4_SET_FILTER_MODE:
10624 		rc = set_filter_mode(sc, *(uint32_t *)data);
10625 		break;
10626 	case CHELSIO_T4_GET_FILTER:
10627 		rc = get_filter(sc, (struct t4_filter *)data);
10628 		break;
10629 	case CHELSIO_T4_SET_FILTER:
10630 		rc = set_filter(sc, (struct t4_filter *)data);
10631 		break;
10632 	case CHELSIO_T4_DEL_FILTER:
10633 		rc = del_filter(sc, (struct t4_filter *)data);
10634 		break;
10635 	case CHELSIO_T4_GET_SGE_CONTEXT:
10636 		rc = get_sge_context(sc, (struct t4_sge_context *)data);
10637 		break;
10638 	case CHELSIO_T4_LOAD_FW:
10639 		rc = load_fw(sc, (struct t4_data *)data);
10640 		break;
10641 	case CHELSIO_T4_GET_MEM:
10642 		rc = read_card_mem(sc, 2, (struct t4_mem_range *)data);
10643 		break;
10644 	case CHELSIO_T4_GET_I2C:
10645 		rc = read_i2c(sc, (struct t4_i2c_data *)data);
10646 		break;
10647 	case CHELSIO_T4_CLEAR_STATS:
10648 		rc = clear_stats(sc, *(uint32_t *)data);
10649 		break;
10650 	case CHELSIO_T4_SCHED_CLASS:
10651 		rc = t4_set_sched_class(sc, (struct t4_sched_params *)data);
10652 		break;
10653 	case CHELSIO_T4_SCHED_QUEUE:
10654 		rc = t4_set_sched_queue(sc, (struct t4_sched_queue *)data);
10655 		break;
10656 	case CHELSIO_T4_GET_TRACER:
10657 		rc = t4_get_tracer(sc, (struct t4_tracer *)data);
10658 		break;
10659 	case CHELSIO_T4_SET_TRACER:
10660 		rc = t4_set_tracer(sc, (struct t4_tracer *)data);
10661 		break;
10662 	case CHELSIO_T4_LOAD_CFG:
10663 		rc = load_cfg(sc, (struct t4_data *)data);
10664 		break;
10665 	case CHELSIO_T4_LOAD_BOOT:
10666 		rc = load_boot(sc, (struct t4_bootrom *)data);
10667 		break;
10668 	case CHELSIO_T4_LOAD_BOOTCFG:
10669 		rc = load_bootcfg(sc, (struct t4_data *)data);
10670 		break;
10671 	case CHELSIO_T4_CUDBG_DUMP:
10672 		rc = cudbg_dump(sc, (struct t4_cudbg_dump *)data);
10673 		break;
10674 	case CHELSIO_T4_SET_OFLD_POLICY:
10675 		rc = set_offload_policy(sc, (struct t4_offload_policy *)data);
10676 		break;
10677 	default:
10678 		rc = ENOTTY;
10679 	}
10680 
10681 	return (rc);
10682 }
10683 
10684 #ifdef TCP_OFFLOAD
10685 static int
10686 toe_capability(struct vi_info *vi, int enable)
10687 {
10688 	int rc;
10689 	struct port_info *pi = vi->pi;
10690 	struct adapter *sc = pi->adapter;
10691 
10692 	ASSERT_SYNCHRONIZED_OP(sc);
10693 
10694 	if (!is_offload(sc))
10695 		return (ENODEV);
10696 
10697 	if (enable) {
10698 		if ((vi->ifp->if_capenable & IFCAP_TOE) != 0) {
10699 			/* TOE is already enabled. */
10700 			return (0);
10701 		}
10702 
10703 		/*
10704 		 * We need the port's queues around so that we're able to send
10705 		 * and receive CPLs to/from the TOE even if the ifnet for this
10706 		 * port has never been UP'd administratively.
10707 		 */
10708 		if (!(vi->flags & VI_INIT_DONE)) {
10709 			rc = vi_full_init(vi);
10710 			if (rc)
10711 				return (rc);
10712 		}
10713 		if (!(pi->vi[0].flags & VI_INIT_DONE)) {
10714 			rc = vi_full_init(&pi->vi[0]);
10715 			if (rc)
10716 				return (rc);
10717 		}
10718 
10719 		if (isset(&sc->offload_map, pi->port_id)) {
10720 			/* TOE is enabled on another VI of this port. */
10721 			pi->uld_vis++;
10722 			return (0);
10723 		}
10724 
10725 		if (!uld_active(sc, ULD_TOM)) {
10726 			rc = t4_activate_uld(sc, ULD_TOM);
10727 			if (rc == EAGAIN) {
10728 				log(LOG_WARNING,
10729 				    "You must kldload t4_tom.ko before trying "
10730 				    "to enable TOE on a cxgbe interface.\n");
10731 			}
10732 			if (rc != 0)
10733 				return (rc);
10734 			KASSERT(sc->tom_softc != NULL,
10735 			    ("%s: TOM activated but softc NULL", __func__));
10736 			KASSERT(uld_active(sc, ULD_TOM),
10737 			    ("%s: TOM activated but flag not set", __func__));
10738 		}
10739 
10740 		/* Activate iWARP and iSCSI too, if the modules are loaded. */
10741 		if (!uld_active(sc, ULD_IWARP))
10742 			(void) t4_activate_uld(sc, ULD_IWARP);
10743 		if (!uld_active(sc, ULD_ISCSI))
10744 			(void) t4_activate_uld(sc, ULD_ISCSI);
10745 
10746 		pi->uld_vis++;
10747 		setbit(&sc->offload_map, pi->port_id);
10748 	} else {
10749 		pi->uld_vis--;
10750 
10751 		if (!isset(&sc->offload_map, pi->port_id) || pi->uld_vis > 0)
10752 			return (0);
10753 
10754 		KASSERT(uld_active(sc, ULD_TOM),
10755 		    ("%s: TOM never initialized?", __func__));
10756 		clrbit(&sc->offload_map, pi->port_id);
10757 	}
10758 
10759 	return (0);
10760 }
10761 
10762 /*
10763  * Add an upper layer driver to the global list.
10764  */
10765 int
10766 t4_register_uld(struct uld_info *ui)
10767 {
10768 	int rc = 0;
10769 	struct uld_info *u;
10770 
10771 	sx_xlock(&t4_uld_list_lock);
10772 	SLIST_FOREACH(u, &t4_uld_list, link) {
10773 	    if (u->uld_id == ui->uld_id) {
10774 		    rc = EEXIST;
10775 		    goto done;
10776 	    }
10777 	}
10778 
10779 	SLIST_INSERT_HEAD(&t4_uld_list, ui, link);
10780 	ui->refcount = 0;
10781 done:
10782 	sx_xunlock(&t4_uld_list_lock);
10783 	return (rc);
10784 }
10785 
10786 int
10787 t4_unregister_uld(struct uld_info *ui)
10788 {
10789 	int rc = EINVAL;
10790 	struct uld_info *u;
10791 
10792 	sx_xlock(&t4_uld_list_lock);
10793 
10794 	SLIST_FOREACH(u, &t4_uld_list, link) {
10795 	    if (u == ui) {
10796 		    if (ui->refcount > 0) {
10797 			    rc = EBUSY;
10798 			    goto done;
10799 		    }
10800 
10801 		    SLIST_REMOVE(&t4_uld_list, ui, uld_info, link);
10802 		    rc = 0;
10803 		    goto done;
10804 	    }
10805 	}
10806 done:
10807 	sx_xunlock(&t4_uld_list_lock);
10808 	return (rc);
10809 }
10810 
10811 int
10812 t4_activate_uld(struct adapter *sc, int id)
10813 {
10814 	int rc;
10815 	struct uld_info *ui;
10816 
10817 	ASSERT_SYNCHRONIZED_OP(sc);
10818 
10819 	if (id < 0 || id > ULD_MAX)
10820 		return (EINVAL);
10821 	rc = EAGAIN;	/* kldoad the module with this ULD and try again. */
10822 
10823 	sx_slock(&t4_uld_list_lock);
10824 
10825 	SLIST_FOREACH(ui, &t4_uld_list, link) {
10826 		if (ui->uld_id == id) {
10827 			if (!(sc->flags & FULL_INIT_DONE)) {
10828 				rc = adapter_full_init(sc);
10829 				if (rc != 0)
10830 					break;
10831 			}
10832 
10833 			rc = ui->activate(sc);
10834 			if (rc == 0) {
10835 				setbit(&sc->active_ulds, id);
10836 				ui->refcount++;
10837 			}
10838 			break;
10839 		}
10840 	}
10841 
10842 	sx_sunlock(&t4_uld_list_lock);
10843 
10844 	return (rc);
10845 }
10846 
10847 int
10848 t4_deactivate_uld(struct adapter *sc, int id)
10849 {
10850 	int rc;
10851 	struct uld_info *ui;
10852 
10853 	ASSERT_SYNCHRONIZED_OP(sc);
10854 
10855 	if (id < 0 || id > ULD_MAX)
10856 		return (EINVAL);
10857 	rc = ENXIO;
10858 
10859 	sx_slock(&t4_uld_list_lock);
10860 
10861 	SLIST_FOREACH(ui, &t4_uld_list, link) {
10862 		if (ui->uld_id == id) {
10863 			rc = ui->deactivate(sc);
10864 			if (rc == 0) {
10865 				clrbit(&sc->active_ulds, id);
10866 				ui->refcount--;
10867 			}
10868 			break;
10869 		}
10870 	}
10871 
10872 	sx_sunlock(&t4_uld_list_lock);
10873 
10874 	return (rc);
10875 }
10876 
10877 static void
10878 t4_async_event(void *arg, int n)
10879 {
10880 	struct uld_info *ui;
10881 	struct adapter *sc = (struct adapter *)arg;
10882 
10883 	if (begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4async") != 0)
10884 		return;
10885 	sx_slock(&t4_uld_list_lock);
10886 	SLIST_FOREACH(ui, &t4_uld_list, link) {
10887 		if (ui->uld_id == ULD_IWARP) {
10888 			ui->async_event(sc);
10889 			break;
10890 		}
10891 	}
10892 	sx_sunlock(&t4_uld_list_lock);
10893 	end_synchronized_op(sc, 0);
10894 }
10895 
10896 int
10897 uld_active(struct adapter *sc, int uld_id)
10898 {
10899 
10900 	MPASS(uld_id >= 0 && uld_id <= ULD_MAX);
10901 
10902 	return (isset(&sc->active_ulds, uld_id));
10903 }
10904 #endif
10905 
10906 /*
10907  * t  = ptr to tunable.
10908  * nc = number of CPUs.
10909  * c  = compiled in default for that tunable.
10910  */
10911 static void
10912 calculate_nqueues(int *t, int nc, const int c)
10913 {
10914 	int nq;
10915 
10916 	if (*t > 0)
10917 		return;
10918 	nq = *t < 0 ? -*t : c;
10919 	*t = min(nc, nq);
10920 }
10921 
10922 /*
10923  * Come up with reasonable defaults for some of the tunables, provided they're
10924  * not set by the user (in which case we'll use the values as is).
10925  */
10926 static void
10927 tweak_tunables(void)
10928 {
10929 	int nc = mp_ncpus;	/* our snapshot of the number of CPUs */
10930 
10931 	if (t4_ntxq < 1) {
10932 #ifdef RSS
10933 		t4_ntxq = rss_getnumbuckets();
10934 #else
10935 		calculate_nqueues(&t4_ntxq, nc, NTXQ);
10936 #endif
10937 	}
10938 
10939 	calculate_nqueues(&t4_ntxq_vi, nc, NTXQ_VI);
10940 
10941 	if (t4_nrxq < 1) {
10942 #ifdef RSS
10943 		t4_nrxq = rss_getnumbuckets();
10944 #else
10945 		calculate_nqueues(&t4_nrxq, nc, NRXQ);
10946 #endif
10947 	}
10948 
10949 	calculate_nqueues(&t4_nrxq_vi, nc, NRXQ_VI);
10950 
10951 #if defined(TCP_OFFLOAD) || defined(RATELIMIT)
10952 	calculate_nqueues(&t4_nofldtxq, nc, NOFLDTXQ);
10953 	calculate_nqueues(&t4_nofldtxq_vi, nc, NOFLDTXQ_VI);
10954 #endif
10955 #ifdef TCP_OFFLOAD
10956 	calculate_nqueues(&t4_nofldrxq, nc, NOFLDRXQ);
10957 	calculate_nqueues(&t4_nofldrxq_vi, nc, NOFLDRXQ_VI);
10958 #endif
10959 
10960 #if defined(TCP_OFFLOAD) || defined(KERN_TLS)
10961 	if (t4_toecaps_allowed == -1)
10962 		t4_toecaps_allowed = FW_CAPS_CONFIG_TOE;
10963 #else
10964 	if (t4_toecaps_allowed == -1)
10965 		t4_toecaps_allowed = 0;
10966 #endif
10967 
10968 #ifdef TCP_OFFLOAD
10969 	if (t4_rdmacaps_allowed == -1) {
10970 		t4_rdmacaps_allowed = FW_CAPS_CONFIG_RDMA_RDDP |
10971 		    FW_CAPS_CONFIG_RDMA_RDMAC;
10972 	}
10973 
10974 	if (t4_iscsicaps_allowed == -1) {
10975 		t4_iscsicaps_allowed = FW_CAPS_CONFIG_ISCSI_INITIATOR_PDU |
10976 		    FW_CAPS_CONFIG_ISCSI_TARGET_PDU |
10977 		    FW_CAPS_CONFIG_ISCSI_T10DIF;
10978 	}
10979 
10980 	if (t4_tmr_idx_ofld < 0 || t4_tmr_idx_ofld >= SGE_NTIMERS)
10981 		t4_tmr_idx_ofld = TMR_IDX_OFLD;
10982 
10983 	if (t4_pktc_idx_ofld < -1 || t4_pktc_idx_ofld >= SGE_NCOUNTERS)
10984 		t4_pktc_idx_ofld = PKTC_IDX_OFLD;
10985 #else
10986 	if (t4_rdmacaps_allowed == -1)
10987 		t4_rdmacaps_allowed = 0;
10988 
10989 	if (t4_iscsicaps_allowed == -1)
10990 		t4_iscsicaps_allowed = 0;
10991 #endif
10992 
10993 #ifdef DEV_NETMAP
10994 	calculate_nqueues(&t4_nnmtxq, nc, NNMTXQ);
10995 	calculate_nqueues(&t4_nnmrxq, nc, NNMRXQ);
10996 	calculate_nqueues(&t4_nnmtxq_vi, nc, NNMTXQ_VI);
10997 	calculate_nqueues(&t4_nnmrxq_vi, nc, NNMRXQ_VI);
10998 #endif
10999 
11000 	if (t4_tmr_idx < 0 || t4_tmr_idx >= SGE_NTIMERS)
11001 		t4_tmr_idx = TMR_IDX;
11002 
11003 	if (t4_pktc_idx < -1 || t4_pktc_idx >= SGE_NCOUNTERS)
11004 		t4_pktc_idx = PKTC_IDX;
11005 
11006 	if (t4_qsize_txq < 128)
11007 		t4_qsize_txq = 128;
11008 
11009 	if (t4_qsize_rxq < 128)
11010 		t4_qsize_rxq = 128;
11011 	while (t4_qsize_rxq & 7)
11012 		t4_qsize_rxq++;
11013 
11014 	t4_intr_types &= INTR_MSIX | INTR_MSI | INTR_INTX;
11015 
11016 	/*
11017 	 * Number of VIs to create per-port.  The first VI is the "main" regular
11018 	 * VI for the port.  The rest are additional virtual interfaces on the
11019 	 * same physical port.  Note that the main VI does not have native
11020 	 * netmap support but the extra VIs do.
11021 	 *
11022 	 * Limit the number of VIs per port to the number of available
11023 	 * MAC addresses per port.
11024 	 */
11025 	if (t4_num_vis < 1)
11026 		t4_num_vis = 1;
11027 	if (t4_num_vis > nitems(vi_mac_funcs)) {
11028 		t4_num_vis = nitems(vi_mac_funcs);
11029 		printf("cxgbe: number of VIs limited to %d\n", t4_num_vis);
11030 	}
11031 
11032 	if (pcie_relaxed_ordering < 0 || pcie_relaxed_ordering > 2) {
11033 		pcie_relaxed_ordering = 1;
11034 #if defined(__i386__) || defined(__amd64__)
11035 		if (cpu_vendor_id == CPU_VENDOR_INTEL)
11036 			pcie_relaxed_ordering = 0;
11037 #endif
11038 	}
11039 }
11040 
11041 #ifdef DDB
11042 static void
11043 t4_dump_tcb(struct adapter *sc, int tid)
11044 {
11045 	uint32_t base, i, j, off, pf, reg, save, tcb_addr, win_pos;
11046 
11047 	reg = PCIE_MEM_ACCESS_REG(A_PCIE_MEM_ACCESS_OFFSET, 2);
11048 	save = t4_read_reg(sc, reg);
11049 	base = sc->memwin[2].mw_base;
11050 
11051 	/* Dump TCB for the tid */
11052 	tcb_addr = t4_read_reg(sc, A_TP_CMM_TCB_BASE);
11053 	tcb_addr += tid * TCB_SIZE;
11054 
11055 	if (is_t4(sc)) {
11056 		pf = 0;
11057 		win_pos = tcb_addr & ~0xf;	/* start must be 16B aligned */
11058 	} else {
11059 		pf = V_PFNUM(sc->pf);
11060 		win_pos = tcb_addr & ~0x7f;	/* start must be 128B aligned */
11061 	}
11062 	t4_write_reg(sc, reg, win_pos | pf);
11063 	t4_read_reg(sc, reg);
11064 
11065 	off = tcb_addr - win_pos;
11066 	for (i = 0; i < 4; i++) {
11067 		uint32_t buf[8];
11068 		for (j = 0; j < 8; j++, off += 4)
11069 			buf[j] = htonl(t4_read_reg(sc, base + off));
11070 
11071 		db_printf("%08x %08x %08x %08x %08x %08x %08x %08x\n",
11072 		    buf[0], buf[1], buf[2], buf[3], buf[4], buf[5], buf[6],
11073 		    buf[7]);
11074 	}
11075 
11076 	t4_write_reg(sc, reg, save);
11077 	t4_read_reg(sc, reg);
11078 }
11079 
11080 static void
11081 t4_dump_devlog(struct adapter *sc)
11082 {
11083 	struct devlog_params *dparams = &sc->params.devlog;
11084 	struct fw_devlog_e e;
11085 	int i, first, j, m, nentries, rc;
11086 	uint64_t ftstamp = UINT64_MAX;
11087 
11088 	if (dparams->start == 0) {
11089 		db_printf("devlog params not valid\n");
11090 		return;
11091 	}
11092 
11093 	nentries = dparams->size / sizeof(struct fw_devlog_e);
11094 	m = fwmtype_to_hwmtype(dparams->memtype);
11095 
11096 	/* Find the first entry. */
11097 	first = -1;
11098 	for (i = 0; i < nentries && !db_pager_quit; i++) {
11099 		rc = -t4_mem_read(sc, m, dparams->start + i * sizeof(e),
11100 		    sizeof(e), (void *)&e);
11101 		if (rc != 0)
11102 			break;
11103 
11104 		if (e.timestamp == 0)
11105 			break;
11106 
11107 		e.timestamp = be64toh(e.timestamp);
11108 		if (e.timestamp < ftstamp) {
11109 			ftstamp = e.timestamp;
11110 			first = i;
11111 		}
11112 	}
11113 
11114 	if (first == -1)
11115 		return;
11116 
11117 	i = first;
11118 	do {
11119 		rc = -t4_mem_read(sc, m, dparams->start + i * sizeof(e),
11120 		    sizeof(e), (void *)&e);
11121 		if (rc != 0)
11122 			return;
11123 
11124 		if (e.timestamp == 0)
11125 			return;
11126 
11127 		e.timestamp = be64toh(e.timestamp);
11128 		e.seqno = be32toh(e.seqno);
11129 		for (j = 0; j < 8; j++)
11130 			e.params[j] = be32toh(e.params[j]);
11131 
11132 		db_printf("%10d  %15ju  %8s  %8s  ",
11133 		    e.seqno, e.timestamp,
11134 		    (e.level < nitems(devlog_level_strings) ?
11135 			devlog_level_strings[e.level] : "UNKNOWN"),
11136 		    (e.facility < nitems(devlog_facility_strings) ?
11137 			devlog_facility_strings[e.facility] : "UNKNOWN"));
11138 		db_printf(e.fmt, e.params[0], e.params[1], e.params[2],
11139 		    e.params[3], e.params[4], e.params[5], e.params[6],
11140 		    e.params[7]);
11141 
11142 		if (++i == nentries)
11143 			i = 0;
11144 	} while (i != first && !db_pager_quit);
11145 }
11146 
11147 static struct command_table db_t4_table = LIST_HEAD_INITIALIZER(db_t4_table);
11148 _DB_SET(_show, t4, NULL, db_show_table, 0, &db_t4_table);
11149 
11150 DB_FUNC(devlog, db_show_devlog, db_t4_table, CS_OWN, NULL)
11151 {
11152 	device_t dev;
11153 	int t;
11154 	bool valid;
11155 
11156 	valid = false;
11157 	t = db_read_token();
11158 	if (t == tIDENT) {
11159 		dev = device_lookup_by_name(db_tok_string);
11160 		valid = true;
11161 	}
11162 	db_skip_to_eol();
11163 	if (!valid) {
11164 		db_printf("usage: show t4 devlog <nexus>\n");
11165 		return;
11166 	}
11167 
11168 	if (dev == NULL) {
11169 		db_printf("device not found\n");
11170 		return;
11171 	}
11172 
11173 	t4_dump_devlog(device_get_softc(dev));
11174 }
11175 
11176 DB_FUNC(tcb, db_show_t4tcb, db_t4_table, CS_OWN, NULL)
11177 {
11178 	device_t dev;
11179 	int radix, tid, t;
11180 	bool valid;
11181 
11182 	valid = false;
11183 	radix = db_radix;
11184 	db_radix = 10;
11185 	t = db_read_token();
11186 	if (t == tIDENT) {
11187 		dev = device_lookup_by_name(db_tok_string);
11188 		t = db_read_token();
11189 		if (t == tNUMBER) {
11190 			tid = db_tok_number;
11191 			valid = true;
11192 		}
11193 	}
11194 	db_radix = radix;
11195 	db_skip_to_eol();
11196 	if (!valid) {
11197 		db_printf("usage: show t4 tcb <nexus> <tid>\n");
11198 		return;
11199 	}
11200 
11201 	if (dev == NULL) {
11202 		db_printf("device not found\n");
11203 		return;
11204 	}
11205 	if (tid < 0) {
11206 		db_printf("invalid tid\n");
11207 		return;
11208 	}
11209 
11210 	t4_dump_tcb(device_get_softc(dev), tid);
11211 }
11212 #endif
11213 
11214 static struct sx mlu;	/* mod load unload */
11215 SX_SYSINIT(cxgbe_mlu, &mlu, "cxgbe mod load/unload");
11216 
11217 static int
11218 mod_event(module_t mod, int cmd, void *arg)
11219 {
11220 	int rc = 0;
11221 	static int loaded = 0;
11222 
11223 	switch (cmd) {
11224 	case MOD_LOAD:
11225 		sx_xlock(&mlu);
11226 		if (loaded++ == 0) {
11227 			t4_sge_modload();
11228 			t4_register_shared_cpl_handler(CPL_SET_TCB_RPL,
11229 			    t4_filter_rpl, CPL_COOKIE_FILTER);
11230 			t4_register_shared_cpl_handler(CPL_L2T_WRITE_RPL,
11231 			    do_l2t_write_rpl, CPL_COOKIE_FILTER);
11232 			t4_register_shared_cpl_handler(CPL_ACT_OPEN_RPL,
11233 			    t4_hashfilter_ao_rpl, CPL_COOKIE_HASHFILTER);
11234 			t4_register_shared_cpl_handler(CPL_SET_TCB_RPL,
11235 			    t4_hashfilter_tcb_rpl, CPL_COOKIE_HASHFILTER);
11236 			t4_register_shared_cpl_handler(CPL_ABORT_RPL_RSS,
11237 			    t4_del_hashfilter_rpl, CPL_COOKIE_HASHFILTER);
11238 			t4_register_cpl_handler(CPL_TRACE_PKT, t4_trace_pkt);
11239 			t4_register_cpl_handler(CPL_T5_TRACE_PKT, t5_trace_pkt);
11240 			t4_register_cpl_handler(CPL_SMT_WRITE_RPL,
11241 			    do_smt_write_rpl);
11242 			sx_init(&t4_list_lock, "T4/T5 adapters");
11243 			SLIST_INIT(&t4_list);
11244 			callout_init(&fatal_callout, 1);
11245 #ifdef TCP_OFFLOAD
11246 			sx_init(&t4_uld_list_lock, "T4/T5 ULDs");
11247 			SLIST_INIT(&t4_uld_list);
11248 #endif
11249 #ifdef INET6
11250 			t4_clip_modload();
11251 #endif
11252 #ifdef KERN_TLS
11253 			t6_ktls_modload();
11254 #endif
11255 			t4_tracer_modload();
11256 			tweak_tunables();
11257 		}
11258 		sx_xunlock(&mlu);
11259 		break;
11260 
11261 	case MOD_UNLOAD:
11262 		sx_xlock(&mlu);
11263 		if (--loaded == 0) {
11264 			int tries;
11265 
11266 			sx_slock(&t4_list_lock);
11267 			if (!SLIST_EMPTY(&t4_list)) {
11268 				rc = EBUSY;
11269 				sx_sunlock(&t4_list_lock);
11270 				goto done_unload;
11271 			}
11272 #ifdef TCP_OFFLOAD
11273 			sx_slock(&t4_uld_list_lock);
11274 			if (!SLIST_EMPTY(&t4_uld_list)) {
11275 				rc = EBUSY;
11276 				sx_sunlock(&t4_uld_list_lock);
11277 				sx_sunlock(&t4_list_lock);
11278 				goto done_unload;
11279 			}
11280 #endif
11281 			tries = 0;
11282 			while (tries++ < 5 && t4_sge_extfree_refs() != 0) {
11283 				uprintf("%ju clusters with custom free routine "
11284 				    "still is use.\n", t4_sge_extfree_refs());
11285 				pause("t4unload", 2 * hz);
11286 			}
11287 #ifdef TCP_OFFLOAD
11288 			sx_sunlock(&t4_uld_list_lock);
11289 #endif
11290 			sx_sunlock(&t4_list_lock);
11291 
11292 			if (t4_sge_extfree_refs() == 0) {
11293 				t4_tracer_modunload();
11294 #ifdef KERN_TLS
11295 				t6_ktls_modunload();
11296 #endif
11297 #ifdef INET6
11298 				t4_clip_modunload();
11299 #endif
11300 #ifdef TCP_OFFLOAD
11301 				sx_destroy(&t4_uld_list_lock);
11302 #endif
11303 				sx_destroy(&t4_list_lock);
11304 				t4_sge_modunload();
11305 				loaded = 0;
11306 			} else {
11307 				rc = EBUSY;
11308 				loaded++;	/* undo earlier decrement */
11309 			}
11310 		}
11311 done_unload:
11312 		sx_xunlock(&mlu);
11313 		break;
11314 	}
11315 
11316 	return (rc);
11317 }
11318 
11319 static devclass_t t4_devclass, t5_devclass, t6_devclass;
11320 static devclass_t cxgbe_devclass, cxl_devclass, cc_devclass;
11321 static devclass_t vcxgbe_devclass, vcxl_devclass, vcc_devclass;
11322 
11323 DRIVER_MODULE(t4nex, pci, t4_driver, t4_devclass, mod_event, 0);
11324 MODULE_VERSION(t4nex, 1);
11325 MODULE_DEPEND(t4nex, firmware, 1, 1, 1);
11326 #ifdef DEV_NETMAP
11327 MODULE_DEPEND(t4nex, netmap, 1, 1, 1);
11328 #endif /* DEV_NETMAP */
11329 
11330 DRIVER_MODULE(t5nex, pci, t5_driver, t5_devclass, mod_event, 0);
11331 MODULE_VERSION(t5nex, 1);
11332 MODULE_DEPEND(t5nex, firmware, 1, 1, 1);
11333 #ifdef DEV_NETMAP
11334 MODULE_DEPEND(t5nex, netmap, 1, 1, 1);
11335 #endif /* DEV_NETMAP */
11336 
11337 DRIVER_MODULE(t6nex, pci, t6_driver, t6_devclass, mod_event, 0);
11338 MODULE_VERSION(t6nex, 1);
11339 MODULE_DEPEND(t6nex, firmware, 1, 1, 1);
11340 #ifdef DEV_NETMAP
11341 MODULE_DEPEND(t6nex, netmap, 1, 1, 1);
11342 #endif /* DEV_NETMAP */
11343 
11344 DRIVER_MODULE(cxgbe, t4nex, cxgbe_driver, cxgbe_devclass, 0, 0);
11345 MODULE_VERSION(cxgbe, 1);
11346 
11347 DRIVER_MODULE(cxl, t5nex, cxl_driver, cxl_devclass, 0, 0);
11348 MODULE_VERSION(cxl, 1);
11349 
11350 DRIVER_MODULE(cc, t6nex, cc_driver, cc_devclass, 0, 0);
11351 MODULE_VERSION(cc, 1);
11352 
11353 DRIVER_MODULE(vcxgbe, cxgbe, vcxgbe_driver, vcxgbe_devclass, 0, 0);
11354 MODULE_VERSION(vcxgbe, 1);
11355 
11356 DRIVER_MODULE(vcxl, cxl, vcxl_driver, vcxl_devclass, 0, 0);
11357 MODULE_VERSION(vcxl, 1);
11358 
11359 DRIVER_MODULE(vcc, cc, vcc_driver, vcc_devclass, 0, 0);
11360 MODULE_VERSION(vcc, 1);
11361