xref: /freebsd/sys/dev/cxgbe/t4_main.c (revision a4e4b355f45538a9b9550df744ca43787fd43c93)
1 /*-
2  * Copyright (c) 2011 Chelsio Communications, Inc.
3  * All rights reserved.
4  * Written by: Navdeep Parhar <np@FreeBSD.org>
5  *
6  * Redistribution and use in source and binary forms, with or without
7  * modification, are permitted provided that the following conditions
8  * are met:
9  * 1. Redistributions of source code must retain the above copyright
10  *    notice, this list of conditions and the following disclaimer.
11  * 2. Redistributions in binary form must reproduce the above copyright
12  *    notice, this list of conditions and the following disclaimer in the
13  *    documentation and/or other materials provided with the distribution.
14  *
15  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
16  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
25  * SUCH DAMAGE.
26  */
27 
28 #include <sys/cdefs.h>
29 __FBSDID("$FreeBSD$");
30 
31 #include "opt_inet.h"
32 #include "opt_inet6.h"
33 
34 #include <sys/param.h>
35 #include <sys/conf.h>
36 #include <sys/priv.h>
37 #include <sys/kernel.h>
38 #include <sys/bus.h>
39 #include <sys/module.h>
40 #include <sys/malloc.h>
41 #include <sys/queue.h>
42 #include <sys/taskqueue.h>
43 #include <sys/pciio.h>
44 #include <dev/pci/pcireg.h>
45 #include <dev/pci/pcivar.h>
46 #include <dev/pci/pci_private.h>
47 #include <sys/firmware.h>
48 #include <sys/sbuf.h>
49 #include <sys/smp.h>
50 #include <sys/socket.h>
51 #include <sys/sockio.h>
52 #include <sys/sysctl.h>
53 #include <net/ethernet.h>
54 #include <net/if.h>
55 #include <net/if_types.h>
56 #include <net/if_dl.h>
57 #include <net/if_vlan_var.h>
58 #if defined(__i386__) || defined(__amd64__)
59 #include <vm/vm.h>
60 #include <vm/pmap.h>
61 #endif
62 
63 #include "common/common.h"
64 #include "common/t4_msg.h"
65 #include "common/t4_regs.h"
66 #include "common/t4_regs_values.h"
67 #include "t4_ioctl.h"
68 #include "t4_l2t.h"
69 
70 /* T4 bus driver interface */
71 static int t4_probe(device_t);
72 static int t4_attach(device_t);
73 static int t4_detach(device_t);
74 static device_method_t t4_methods[] = {
75 	DEVMETHOD(device_probe,		t4_probe),
76 	DEVMETHOD(device_attach,	t4_attach),
77 	DEVMETHOD(device_detach,	t4_detach),
78 
79 	DEVMETHOD_END
80 };
81 static driver_t t4_driver = {
82 	"t4nex",
83 	t4_methods,
84 	sizeof(struct adapter)
85 };
86 
87 
88 /* T4 port (cxgbe) interface */
89 static int cxgbe_probe(device_t);
90 static int cxgbe_attach(device_t);
91 static int cxgbe_detach(device_t);
92 static device_method_t cxgbe_methods[] = {
93 	DEVMETHOD(device_probe,		cxgbe_probe),
94 	DEVMETHOD(device_attach,	cxgbe_attach),
95 	DEVMETHOD(device_detach,	cxgbe_detach),
96 	{ 0, 0 }
97 };
98 static driver_t cxgbe_driver = {
99 	"cxgbe",
100 	cxgbe_methods,
101 	sizeof(struct port_info)
102 };
103 
104 static d_ioctl_t t4_ioctl;
105 static d_open_t t4_open;
106 static d_close_t t4_close;
107 
108 static struct cdevsw t4_cdevsw = {
109        .d_version = D_VERSION,
110        .d_flags = 0,
111        .d_open = t4_open,
112        .d_close = t4_close,
113        .d_ioctl = t4_ioctl,
114        .d_name = "t4nex",
115 };
116 
117 /* T5 bus driver interface */
118 static int t5_probe(device_t);
119 static device_method_t t5_methods[] = {
120 	DEVMETHOD(device_probe,		t5_probe),
121 	DEVMETHOD(device_attach,	t4_attach),
122 	DEVMETHOD(device_detach,	t4_detach),
123 
124 	DEVMETHOD_END
125 };
126 static driver_t t5_driver = {
127 	"t5nex",
128 	t5_methods,
129 	sizeof(struct adapter)
130 };
131 
132 
133 /* T5 port (cxl) interface */
134 static driver_t cxl_driver = {
135 	"cxl",
136 	cxgbe_methods,
137 	sizeof(struct port_info)
138 };
139 
140 static struct cdevsw t5_cdevsw = {
141        .d_version = D_VERSION,
142        .d_flags = 0,
143        .d_open = t4_open,
144        .d_close = t4_close,
145        .d_ioctl = t4_ioctl,
146        .d_name = "t5nex",
147 };
148 
149 /* ifnet + media interface */
150 static void cxgbe_init(void *);
151 static int cxgbe_ioctl(struct ifnet *, unsigned long, caddr_t);
152 static int cxgbe_transmit(struct ifnet *, struct mbuf *);
153 static void cxgbe_qflush(struct ifnet *);
154 static uint64_t cxgbe_get_counter(struct ifnet *, ift_counter);
155 static int cxgbe_media_change(struct ifnet *);
156 static void cxgbe_media_status(struct ifnet *, struct ifmediareq *);
157 
158 MALLOC_DEFINE(M_CXGBE, "cxgbe", "Chelsio T4/T5 Ethernet driver and services");
159 
160 /*
161  * Correct lock order when you need to acquire multiple locks is t4_list_lock,
162  * then ADAPTER_LOCK, then t4_uld_list_lock.
163  */
164 static struct sx t4_list_lock;
165 SLIST_HEAD(, adapter) t4_list;
166 #ifdef TCP_OFFLOAD
167 static struct sx t4_uld_list_lock;
168 SLIST_HEAD(, uld_info) t4_uld_list;
169 #endif
170 
171 /*
172  * Tunables.  See tweak_tunables() too.
173  *
174  * Each tunable is set to a default value here if it's known at compile-time.
175  * Otherwise it is set to -1 as an indication to tweak_tunables() that it should
176  * provide a reasonable default when the driver is loaded.
177  *
178  * Tunables applicable to both T4 and T5 are under hw.cxgbe.  Those specific to
179  * T5 are under hw.cxl.
180  */
181 
182 /*
183  * Number of queues for tx and rx, 10G and 1G, NIC and offload.
184  */
185 #define NTXQ_10G 16
186 static int t4_ntxq10g = -1;
187 TUNABLE_INT("hw.cxgbe.ntxq10g", &t4_ntxq10g);
188 
189 #define NRXQ_10G 8
190 static int t4_nrxq10g = -1;
191 TUNABLE_INT("hw.cxgbe.nrxq10g", &t4_nrxq10g);
192 
193 #define NTXQ_1G 4
194 static int t4_ntxq1g = -1;
195 TUNABLE_INT("hw.cxgbe.ntxq1g", &t4_ntxq1g);
196 
197 #define NRXQ_1G 2
198 static int t4_nrxq1g = -1;
199 TUNABLE_INT("hw.cxgbe.nrxq1g", &t4_nrxq1g);
200 
201 static int t4_rsrv_noflowq = 0;
202 TUNABLE_INT("hw.cxgbe.rsrv_noflowq", &t4_rsrv_noflowq);
203 
204 #ifdef TCP_OFFLOAD
205 #define NOFLDTXQ_10G 8
206 static int t4_nofldtxq10g = -1;
207 TUNABLE_INT("hw.cxgbe.nofldtxq10g", &t4_nofldtxq10g);
208 
209 #define NOFLDRXQ_10G 2
210 static int t4_nofldrxq10g = -1;
211 TUNABLE_INT("hw.cxgbe.nofldrxq10g", &t4_nofldrxq10g);
212 
213 #define NOFLDTXQ_1G 2
214 static int t4_nofldtxq1g = -1;
215 TUNABLE_INT("hw.cxgbe.nofldtxq1g", &t4_nofldtxq1g);
216 
217 #define NOFLDRXQ_1G 1
218 static int t4_nofldrxq1g = -1;
219 TUNABLE_INT("hw.cxgbe.nofldrxq1g", &t4_nofldrxq1g);
220 #endif
221 
222 #ifdef DEV_NETMAP
223 #define NNMTXQ_10G 2
224 static int t4_nnmtxq10g = -1;
225 TUNABLE_INT("hw.cxgbe.nnmtxq10g", &t4_nnmtxq10g);
226 
227 #define NNMRXQ_10G 2
228 static int t4_nnmrxq10g = -1;
229 TUNABLE_INT("hw.cxgbe.nnmrxq10g", &t4_nnmrxq10g);
230 
231 #define NNMTXQ_1G 1
232 static int t4_nnmtxq1g = -1;
233 TUNABLE_INT("hw.cxgbe.nnmtxq1g", &t4_nnmtxq1g);
234 
235 #define NNMRXQ_1G 1
236 static int t4_nnmrxq1g = -1;
237 TUNABLE_INT("hw.cxgbe.nnmrxq1g", &t4_nnmrxq1g);
238 #endif
239 
240 /*
241  * Holdoff parameters for 10G and 1G ports.
242  */
243 #define TMR_IDX_10G 1
244 static int t4_tmr_idx_10g = TMR_IDX_10G;
245 TUNABLE_INT("hw.cxgbe.holdoff_timer_idx_10G", &t4_tmr_idx_10g);
246 
247 #define PKTC_IDX_10G (-1)
248 static int t4_pktc_idx_10g = PKTC_IDX_10G;
249 TUNABLE_INT("hw.cxgbe.holdoff_pktc_idx_10G", &t4_pktc_idx_10g);
250 
251 #define TMR_IDX_1G 1
252 static int t4_tmr_idx_1g = TMR_IDX_1G;
253 TUNABLE_INT("hw.cxgbe.holdoff_timer_idx_1G", &t4_tmr_idx_1g);
254 
255 #define PKTC_IDX_1G (-1)
256 static int t4_pktc_idx_1g = PKTC_IDX_1G;
257 TUNABLE_INT("hw.cxgbe.holdoff_pktc_idx_1G", &t4_pktc_idx_1g);
258 
259 /*
260  * Size (# of entries) of each tx and rx queue.
261  */
262 static unsigned int t4_qsize_txq = TX_EQ_QSIZE;
263 TUNABLE_INT("hw.cxgbe.qsize_txq", &t4_qsize_txq);
264 
265 static unsigned int t4_qsize_rxq = RX_IQ_QSIZE;
266 TUNABLE_INT("hw.cxgbe.qsize_rxq", &t4_qsize_rxq);
267 
268 /*
269  * Interrupt types allowed (bits 0, 1, 2 = INTx, MSI, MSI-X respectively).
270  */
271 static int t4_intr_types = INTR_MSIX | INTR_MSI | INTR_INTX;
272 TUNABLE_INT("hw.cxgbe.interrupt_types", &t4_intr_types);
273 
274 /*
275  * Configuration file.
276  */
277 #define DEFAULT_CF	"default"
278 #define FLASH_CF	"flash"
279 #define UWIRE_CF	"uwire"
280 #define FPGA_CF		"fpga"
281 static char t4_cfg_file[32] = DEFAULT_CF;
282 TUNABLE_STR("hw.cxgbe.config_file", t4_cfg_file, sizeof(t4_cfg_file));
283 
284 /*
285  * PAUSE settings (bit 0, 1 = rx_pause, tx_pause respectively).
286  * rx_pause = 1 to heed incoming PAUSE frames, 0 to ignore them.
287  * tx_pause = 1 to emit PAUSE frames when the rx FIFO reaches its high water
288  *            mark or when signalled to do so, 0 to never emit PAUSE.
289  */
290 static int t4_pause_settings = PAUSE_TX | PAUSE_RX;
291 TUNABLE_INT("hw.cxgbe.pause_settings", &t4_pause_settings);
292 
293 /*
294  * Firmware auto-install by driver during attach (0, 1, 2 = prohibited, allowed,
295  * encouraged respectively).
296  */
297 static unsigned int t4_fw_install = 1;
298 TUNABLE_INT("hw.cxgbe.fw_install", &t4_fw_install);
299 
300 /*
301  * ASIC features that will be used.  Disable the ones you don't want so that the
302  * chip resources aren't wasted on features that will not be used.
303  */
304 static int t4_linkcaps_allowed = 0;	/* No DCBX, PPP, etc. by default */
305 TUNABLE_INT("hw.cxgbe.linkcaps_allowed", &t4_linkcaps_allowed);
306 
307 static int t4_niccaps_allowed = FW_CAPS_CONFIG_NIC;
308 TUNABLE_INT("hw.cxgbe.niccaps_allowed", &t4_niccaps_allowed);
309 
310 static int t4_toecaps_allowed = -1;
311 TUNABLE_INT("hw.cxgbe.toecaps_allowed", &t4_toecaps_allowed);
312 
313 static int t4_rdmacaps_allowed = 0;
314 TUNABLE_INT("hw.cxgbe.rdmacaps_allowed", &t4_rdmacaps_allowed);
315 
316 static int t4_iscsicaps_allowed = 0;
317 TUNABLE_INT("hw.cxgbe.iscsicaps_allowed", &t4_iscsicaps_allowed);
318 
319 static int t4_fcoecaps_allowed = 0;
320 TUNABLE_INT("hw.cxgbe.fcoecaps_allowed", &t4_fcoecaps_allowed);
321 
322 static int t5_write_combine = 0;
323 TUNABLE_INT("hw.cxl.write_combine", &t5_write_combine);
324 
325 struct intrs_and_queues {
326 	uint16_t intr_type;	/* INTx, MSI, or MSI-X */
327 	uint16_t nirq;		/* Total # of vectors */
328 	uint16_t intr_flags_10g;/* Interrupt flags for each 10G port */
329 	uint16_t intr_flags_1g;	/* Interrupt flags for each 1G port */
330 	uint16_t ntxq10g;	/* # of NIC txq's for each 10G port */
331 	uint16_t nrxq10g;	/* # of NIC rxq's for each 10G port */
332 	uint16_t ntxq1g;	/* # of NIC txq's for each 1G port */
333 	uint16_t nrxq1g;	/* # of NIC rxq's for each 1G port */
334 	uint16_t rsrv_noflowq;	/* Flag whether to reserve queue 0 */
335 #ifdef TCP_OFFLOAD
336 	uint16_t nofldtxq10g;	/* # of TOE txq's for each 10G port */
337 	uint16_t nofldrxq10g;	/* # of TOE rxq's for each 10G port */
338 	uint16_t nofldtxq1g;	/* # of TOE txq's for each 1G port */
339 	uint16_t nofldrxq1g;	/* # of TOE rxq's for each 1G port */
340 #endif
341 #ifdef DEV_NETMAP
342 	uint16_t nnmtxq10g;	/* # of netmap txq's for each 10G port */
343 	uint16_t nnmrxq10g;	/* # of netmap rxq's for each 10G port */
344 	uint16_t nnmtxq1g;	/* # of netmap txq's for each 1G port */
345 	uint16_t nnmrxq1g;	/* # of netmap rxq's for each 1G port */
346 #endif
347 };
348 
349 struct filter_entry {
350         uint32_t valid:1;	/* filter allocated and valid */
351         uint32_t locked:1;	/* filter is administratively locked */
352         uint32_t pending:1;	/* filter action is pending firmware reply */
353 	uint32_t smtidx:8;	/* Source MAC Table index for smac */
354 	struct l2t_entry *l2t;	/* Layer Two Table entry for dmac */
355 
356         struct t4_filter_specification fs;
357 };
358 
359 static int map_bars_0_and_4(struct adapter *);
360 static int map_bar_2(struct adapter *);
361 static void setup_memwin(struct adapter *);
362 static int validate_mem_range(struct adapter *, uint32_t, int);
363 static int fwmtype_to_hwmtype(int);
364 static int validate_mt_off_len(struct adapter *, int, uint32_t, int,
365     uint32_t *);
366 static void memwin_info(struct adapter *, int, uint32_t *, uint32_t *);
367 static uint32_t position_memwin(struct adapter *, int, uint32_t);
368 static int cfg_itype_and_nqueues(struct adapter *, int, int,
369     struct intrs_and_queues *);
370 static int prep_firmware(struct adapter *);
371 static int partition_resources(struct adapter *, const struct firmware *,
372     const char *);
373 static int get_params__pre_init(struct adapter *);
374 static int get_params__post_init(struct adapter *);
375 static int set_params__post_init(struct adapter *);
376 static void t4_set_desc(struct adapter *);
377 static void build_medialist(struct port_info *, struct ifmedia *);
378 static int cxgbe_init_synchronized(struct port_info *);
379 static int cxgbe_uninit_synchronized(struct port_info *);
380 static int setup_intr_handlers(struct adapter *);
381 static void quiesce_eq(struct adapter *, struct sge_eq *);
382 static void quiesce_iq(struct adapter *, struct sge_iq *);
383 static void quiesce_fl(struct adapter *, struct sge_fl *);
384 static int t4_alloc_irq(struct adapter *, struct irq *, int rid,
385     driver_intr_t *, void *, char *);
386 static int t4_free_irq(struct adapter *, struct irq *);
387 static void reg_block_dump(struct adapter *, uint8_t *, unsigned int,
388     unsigned int);
389 static void t4_get_regs(struct adapter *, struct t4_regdump *, uint8_t *);
390 static void cxgbe_refresh_stats(struct adapter *, struct port_info *);
391 static void cxgbe_tick(void *);
392 static void cxgbe_vlan_config(void *, struct ifnet *, uint16_t);
393 static int cpl_not_handled(struct sge_iq *, const struct rss_header *,
394     struct mbuf *);
395 static int an_not_handled(struct sge_iq *, const struct rsp_ctrl *);
396 static int fw_msg_not_handled(struct adapter *, const __be64 *);
397 static int t4_sysctls(struct adapter *);
398 static int cxgbe_sysctls(struct port_info *);
399 static int sysctl_int_array(SYSCTL_HANDLER_ARGS);
400 static int sysctl_bitfield(SYSCTL_HANDLER_ARGS);
401 static int sysctl_btphy(SYSCTL_HANDLER_ARGS);
402 static int sysctl_noflowq(SYSCTL_HANDLER_ARGS);
403 static int sysctl_holdoff_tmr_idx(SYSCTL_HANDLER_ARGS);
404 static int sysctl_holdoff_pktc_idx(SYSCTL_HANDLER_ARGS);
405 static int sysctl_qsize_rxq(SYSCTL_HANDLER_ARGS);
406 static int sysctl_qsize_txq(SYSCTL_HANDLER_ARGS);
407 static int sysctl_pause_settings(SYSCTL_HANDLER_ARGS);
408 static int sysctl_handle_t4_reg64(SYSCTL_HANDLER_ARGS);
409 static int sysctl_temperature(SYSCTL_HANDLER_ARGS);
410 #ifdef SBUF_DRAIN
411 static int sysctl_cctrl(SYSCTL_HANDLER_ARGS);
412 static int sysctl_cim_ibq_obq(SYSCTL_HANDLER_ARGS);
413 static int sysctl_cim_la(SYSCTL_HANDLER_ARGS);
414 static int sysctl_cim_ma_la(SYSCTL_HANDLER_ARGS);
415 static int sysctl_cim_pif_la(SYSCTL_HANDLER_ARGS);
416 static int sysctl_cim_qcfg(SYSCTL_HANDLER_ARGS);
417 static int sysctl_cpl_stats(SYSCTL_HANDLER_ARGS);
418 static int sysctl_ddp_stats(SYSCTL_HANDLER_ARGS);
419 static int sysctl_devlog(SYSCTL_HANDLER_ARGS);
420 static int sysctl_fcoe_stats(SYSCTL_HANDLER_ARGS);
421 static int sysctl_hw_sched(SYSCTL_HANDLER_ARGS);
422 static int sysctl_lb_stats(SYSCTL_HANDLER_ARGS);
423 static int sysctl_linkdnrc(SYSCTL_HANDLER_ARGS);
424 static int sysctl_meminfo(SYSCTL_HANDLER_ARGS);
425 static int sysctl_mps_tcam(SYSCTL_HANDLER_ARGS);
426 static int sysctl_path_mtus(SYSCTL_HANDLER_ARGS);
427 static int sysctl_pm_stats(SYSCTL_HANDLER_ARGS);
428 static int sysctl_rdma_stats(SYSCTL_HANDLER_ARGS);
429 static int sysctl_tcp_stats(SYSCTL_HANDLER_ARGS);
430 static int sysctl_tids(SYSCTL_HANDLER_ARGS);
431 static int sysctl_tp_err_stats(SYSCTL_HANDLER_ARGS);
432 static int sysctl_tp_la(SYSCTL_HANDLER_ARGS);
433 static int sysctl_tx_rate(SYSCTL_HANDLER_ARGS);
434 static int sysctl_ulprx_la(SYSCTL_HANDLER_ARGS);
435 static int sysctl_wcwr_stats(SYSCTL_HANDLER_ARGS);
436 #endif
437 static inline void txq_start(struct ifnet *, struct sge_txq *);
438 static uint32_t fconf_to_mode(uint32_t);
439 static uint32_t mode_to_fconf(uint32_t);
440 static uint32_t fspec_to_fconf(struct t4_filter_specification *);
441 static int get_filter_mode(struct adapter *, uint32_t *);
442 static int set_filter_mode(struct adapter *, uint32_t);
443 static inline uint64_t get_filter_hits(struct adapter *, uint32_t);
444 static int get_filter(struct adapter *, struct t4_filter *);
445 static int set_filter(struct adapter *, struct t4_filter *);
446 static int del_filter(struct adapter *, struct t4_filter *);
447 static void clear_filter(struct filter_entry *);
448 static int set_filter_wr(struct adapter *, int);
449 static int del_filter_wr(struct adapter *, int);
450 static int get_sge_context(struct adapter *, struct t4_sge_context *);
451 static int load_fw(struct adapter *, struct t4_data *);
452 static int read_card_mem(struct adapter *, int, struct t4_mem_range *);
453 static int read_i2c(struct adapter *, struct t4_i2c_data *);
454 static int set_sched_class(struct adapter *, struct t4_sched_params *);
455 static int set_sched_queue(struct adapter *, struct t4_sched_queue *);
456 #ifdef TCP_OFFLOAD
457 static int toe_capability(struct port_info *, int);
458 #endif
459 static int mod_event(module_t, int, void *);
460 
461 struct {
462 	uint16_t device;
463 	char *desc;
464 } t4_pciids[] = {
465 	{0xa000, "Chelsio Terminator 4 FPGA"},
466 	{0x4400, "Chelsio T440-dbg"},
467 	{0x4401, "Chelsio T420-CR"},
468 	{0x4402, "Chelsio T422-CR"},
469 	{0x4403, "Chelsio T440-CR"},
470 	{0x4404, "Chelsio T420-BCH"},
471 	{0x4405, "Chelsio T440-BCH"},
472 	{0x4406, "Chelsio T440-CH"},
473 	{0x4407, "Chelsio T420-SO"},
474 	{0x4408, "Chelsio T420-CX"},
475 	{0x4409, "Chelsio T420-BT"},
476 	{0x440a, "Chelsio T404-BT"},
477 	{0x440e, "Chelsio T440-LP-CR"},
478 }, t5_pciids[] = {
479 	{0xb000, "Chelsio Terminator 5 FPGA"},
480 	{0x5400, "Chelsio T580-dbg"},
481 	{0x5401,  "Chelsio T520-CR"},		/* 2 x 10G */
482 	{0x5402,  "Chelsio T522-CR"},		/* 2 x 10G, 2 X 1G */
483 	{0x5403,  "Chelsio T540-CR"},		/* 4 x 10G */
484 	{0x5407,  "Chelsio T520-SO"},		/* 2 x 10G, nomem */
485 	{0x5409,  "Chelsio T520-BT"},		/* 2 x 10GBaseT */
486 	{0x540a,  "Chelsio T504-BT"},		/* 4 x 1G */
487 	{0x540d,  "Chelsio T580-CR"},		/* 2 x 40G */
488 	{0x540e,  "Chelsio T540-LP-CR"},	/* 4 x 10G */
489 	{0x5410,  "Chelsio T580-LP-CR"},	/* 2 x 40G */
490 	{0x5411,  "Chelsio T520-LL-CR"},	/* 2 x 10G */
491 	{0x5412,  "Chelsio T560-CR"},		/* 1 x 40G, 2 x 10G */
492 	{0x5414,  "Chelsio T580-LP-SO-CR"},	/* 2 x 40G, nomem */
493 #ifdef notyet
494 	{0x5404,  "Chelsio T520-BCH"},
495 	{0x5405,  "Chelsio T540-BCH"},
496 	{0x5406,  "Chelsio T540-CH"},
497 	{0x5408,  "Chelsio T520-CX"},
498 	{0x540b,  "Chelsio B520-SR"},
499 	{0x540c,  "Chelsio B504-BT"},
500 	{0x540f,  "Chelsio Amsterdam"},
501 	{0x5413,  "Chelsio T580-CHR"},
502 #endif
503 };
504 
505 #ifdef TCP_OFFLOAD
506 /*
507  * service_iq() has an iq and needs the fl.  Offset of fl from the iq should be
508  * exactly the same for both rxq and ofld_rxq.
509  */
510 CTASSERT(offsetof(struct sge_ofld_rxq, iq) == offsetof(struct sge_rxq, iq));
511 CTASSERT(offsetof(struct sge_ofld_rxq, fl) == offsetof(struct sge_rxq, fl));
512 #endif
513 
514 /* No easy way to include t4_msg.h before adapter.h so we check this way */
515 CTASSERT(nitems(((struct adapter *)0)->cpl_handler) == NUM_CPL_CMDS);
516 CTASSERT(nitems(((struct adapter *)0)->fw_msg_handler) == NUM_FW6_TYPES);
517 
518 CTASSERT(sizeof(struct cluster_metadata) <= CL_METADATA_SIZE);
519 
520 static int
521 t4_probe(device_t dev)
522 {
523 	int i;
524 	uint16_t v = pci_get_vendor(dev);
525 	uint16_t d = pci_get_device(dev);
526 	uint8_t f = pci_get_function(dev);
527 
528 	if (v != PCI_VENDOR_ID_CHELSIO)
529 		return (ENXIO);
530 
531 	/* Attach only to PF0 of the FPGA */
532 	if (d == 0xa000 && f != 0)
533 		return (ENXIO);
534 
535 	for (i = 0; i < nitems(t4_pciids); i++) {
536 		if (d == t4_pciids[i].device) {
537 			device_set_desc(dev, t4_pciids[i].desc);
538 			return (BUS_PROBE_DEFAULT);
539 		}
540 	}
541 
542 	return (ENXIO);
543 }
544 
545 static int
546 t5_probe(device_t dev)
547 {
548 	int i;
549 	uint16_t v = pci_get_vendor(dev);
550 	uint16_t d = pci_get_device(dev);
551 	uint8_t f = pci_get_function(dev);
552 
553 	if (v != PCI_VENDOR_ID_CHELSIO)
554 		return (ENXIO);
555 
556 	/* Attach only to PF0 of the FPGA */
557 	if (d == 0xb000 && f != 0)
558 		return (ENXIO);
559 
560 	for (i = 0; i < nitems(t5_pciids); i++) {
561 		if (d == t5_pciids[i].device) {
562 			device_set_desc(dev, t5_pciids[i].desc);
563 			return (BUS_PROBE_DEFAULT);
564 		}
565 	}
566 
567 	return (ENXIO);
568 }
569 
570 static int
571 t4_attach(device_t dev)
572 {
573 	struct adapter *sc;
574 	int rc = 0, i, n10g, n1g, rqidx, tqidx;
575 	struct intrs_and_queues iaq;
576 	struct sge *s;
577 #ifdef TCP_OFFLOAD
578 	int ofld_rqidx, ofld_tqidx;
579 #endif
580 #ifdef DEV_NETMAP
581 	int nm_rqidx, nm_tqidx;
582 #endif
583 	const char *pcie_ts;
584 
585 	sc = device_get_softc(dev);
586 	sc->dev = dev;
587 
588 	pci_enable_busmaster(dev);
589 	if (pci_find_cap(dev, PCIY_EXPRESS, &i) == 0) {
590 		uint32_t v;
591 
592 		pci_set_max_read_req(dev, 4096);
593 		v = pci_read_config(dev, i + PCIER_DEVICE_CTL, 2);
594 		v |= PCIEM_CTL_RELAXED_ORD_ENABLE;
595 		pci_write_config(dev, i + PCIER_DEVICE_CTL, v, 2);
596 	}
597 
598 	sc->traceq = -1;
599 	mtx_init(&sc->ifp_lock, sc->ifp_lockname, 0, MTX_DEF);
600 	snprintf(sc->ifp_lockname, sizeof(sc->ifp_lockname), "%s tracer",
601 	    device_get_nameunit(dev));
602 
603 	snprintf(sc->lockname, sizeof(sc->lockname), "%s",
604 	    device_get_nameunit(dev));
605 	mtx_init(&sc->sc_lock, sc->lockname, 0, MTX_DEF);
606 	sx_xlock(&t4_list_lock);
607 	SLIST_INSERT_HEAD(&t4_list, sc, link);
608 	sx_xunlock(&t4_list_lock);
609 
610 	mtx_init(&sc->sfl_lock, "starving freelists", 0, MTX_DEF);
611 	TAILQ_INIT(&sc->sfl);
612 	callout_init(&sc->sfl_callout, CALLOUT_MPSAFE);
613 
614 	mtx_init(&sc->regwin_lock, "register and memory window", 0, MTX_DEF);
615 
616 	rc = map_bars_0_and_4(sc);
617 	if (rc != 0)
618 		goto done; /* error message displayed already */
619 
620 	/*
621 	 * This is the real PF# to which we're attaching.  Works from within PCI
622 	 * passthrough environments too, where pci_get_function() could return a
623 	 * different PF# depending on the passthrough configuration.  We need to
624 	 * use the real PF# in all our communication with the firmware.
625 	 */
626 	sc->pf = G_SOURCEPF(t4_read_reg(sc, A_PL_WHOAMI));
627 	sc->mbox = sc->pf;
628 
629 	memset(sc->chan_map, 0xff, sizeof(sc->chan_map));
630 	sc->an_handler = an_not_handled;
631 	for (i = 0; i < nitems(sc->cpl_handler); i++)
632 		sc->cpl_handler[i] = cpl_not_handled;
633 	for (i = 0; i < nitems(sc->fw_msg_handler); i++)
634 		sc->fw_msg_handler[i] = fw_msg_not_handled;
635 	t4_register_cpl_handler(sc, CPL_SET_TCB_RPL, t4_filter_rpl);
636 	t4_register_cpl_handler(sc, CPL_TRACE_PKT, t4_trace_pkt);
637 	t4_register_cpl_handler(sc, CPL_TRACE_PKT_T5, t5_trace_pkt);
638 	t4_init_sge_cpl_handlers(sc);
639 
640 	/* Prepare the adapter for operation */
641 	rc = -t4_prep_adapter(sc);
642 	if (rc != 0) {
643 		device_printf(dev, "failed to prepare adapter: %d.\n", rc);
644 		goto done;
645 	}
646 
647 	/*
648 	 * Do this really early, with the memory windows set up even before the
649 	 * character device.  The userland tool's register i/o and mem read
650 	 * will work even in "recovery mode".
651 	 */
652 	setup_memwin(sc);
653 	sc->cdev = make_dev(is_t4(sc) ? &t4_cdevsw : &t5_cdevsw,
654 	    device_get_unit(dev), UID_ROOT, GID_WHEEL, 0600, "%s",
655 	    device_get_nameunit(dev));
656 	if (sc->cdev == NULL)
657 		device_printf(dev, "failed to create nexus char device.\n");
658 	else
659 		sc->cdev->si_drv1 = sc;
660 
661 	/* Go no further if recovery mode has been requested. */
662 	if (TUNABLE_INT_FETCH("hw.cxgbe.sos", &i) && i != 0) {
663 		device_printf(dev, "recovery mode.\n");
664 		goto done;
665 	}
666 
667 	/* Prepare the firmware for operation */
668 	rc = prep_firmware(sc);
669 	if (rc != 0)
670 		goto done; /* error message displayed already */
671 
672 	rc = get_params__post_init(sc);
673 	if (rc != 0)
674 		goto done; /* error message displayed already */
675 
676 	rc = set_params__post_init(sc);
677 	if (rc != 0)
678 		goto done; /* error message displayed already */
679 
680 	rc = map_bar_2(sc);
681 	if (rc != 0)
682 		goto done; /* error message displayed already */
683 
684 	rc = t4_create_dma_tag(sc);
685 	if (rc != 0)
686 		goto done; /* error message displayed already */
687 
688 	/*
689 	 * First pass over all the ports - allocate VIs and initialize some
690 	 * basic parameters like mac address, port type, etc.  We also figure
691 	 * out whether a port is 10G or 1G and use that information when
692 	 * calculating how many interrupts to attempt to allocate.
693 	 */
694 	n10g = n1g = 0;
695 	for_each_port(sc, i) {
696 		struct port_info *pi;
697 
698 		pi = malloc(sizeof(*pi), M_CXGBE, M_ZERO | M_WAITOK);
699 		sc->port[i] = pi;
700 
701 		/* These must be set before t4_port_init */
702 		pi->adapter = sc;
703 		pi->port_id = i;
704 
705 		/* Allocate the vi and initialize parameters like mac addr */
706 		rc = -t4_port_init(pi, sc->mbox, sc->pf, 0);
707 		if (rc != 0) {
708 			device_printf(dev, "unable to initialize port %d: %d\n",
709 			    i, rc);
710 			free(pi, M_CXGBE);
711 			sc->port[i] = NULL;
712 			goto done;
713 		}
714 
715 		pi->link_cfg.requested_fc &= ~(PAUSE_TX | PAUSE_RX);
716 		pi->link_cfg.requested_fc |= t4_pause_settings;
717 		pi->link_cfg.fc &= ~(PAUSE_TX | PAUSE_RX);
718 		pi->link_cfg.fc |= t4_pause_settings;
719 
720 		rc = -t4_link_start(sc, sc->mbox, pi->tx_chan, &pi->link_cfg);
721 		if (rc != 0) {
722 			device_printf(dev, "port %d l1cfg failed: %d\n", i, rc);
723 			free(pi, M_CXGBE);
724 			sc->port[i] = NULL;
725 			goto done;
726 		}
727 
728 		snprintf(pi->lockname, sizeof(pi->lockname), "%sp%d",
729 		    device_get_nameunit(dev), i);
730 		mtx_init(&pi->pi_lock, pi->lockname, 0, MTX_DEF);
731 		sc->chan_map[pi->tx_chan] = i;
732 
733 		if (is_10G_port(pi) || is_40G_port(pi)) {
734 			n10g++;
735 			pi->tmr_idx = t4_tmr_idx_10g;
736 			pi->pktc_idx = t4_pktc_idx_10g;
737 		} else {
738 			n1g++;
739 			pi->tmr_idx = t4_tmr_idx_1g;
740 			pi->pktc_idx = t4_pktc_idx_1g;
741 		}
742 
743 		pi->xact_addr_filt = -1;
744 		pi->linkdnrc = -1;
745 
746 		pi->qsize_rxq = t4_qsize_rxq;
747 		pi->qsize_txq = t4_qsize_txq;
748 
749 		pi->dev = device_add_child(dev, is_t4(sc) ? "cxgbe" : "cxl", -1);
750 		if (pi->dev == NULL) {
751 			device_printf(dev,
752 			    "failed to add device for port %d.\n", i);
753 			rc = ENXIO;
754 			goto done;
755 		}
756 		device_set_softc(pi->dev, pi);
757 	}
758 
759 	/*
760 	 * Interrupt type, # of interrupts, # of rx/tx queues, etc.
761 	 */
762 	rc = cfg_itype_and_nqueues(sc, n10g, n1g, &iaq);
763 	if (rc != 0)
764 		goto done; /* error message displayed already */
765 
766 	sc->intr_type = iaq.intr_type;
767 	sc->intr_count = iaq.nirq;
768 
769 	s = &sc->sge;
770 	s->nrxq = n10g * iaq.nrxq10g + n1g * iaq.nrxq1g;
771 	s->ntxq = n10g * iaq.ntxq10g + n1g * iaq.ntxq1g;
772 	s->neq = s->ntxq + s->nrxq;	/* the free list in an rxq is an eq */
773 	s->neq += sc->params.nports + 1;/* ctrl queues: 1 per port + 1 mgmt */
774 	s->niq = s->nrxq + 1;		/* 1 extra for firmware event queue */
775 #ifdef TCP_OFFLOAD
776 	if (is_offload(sc)) {
777 		s->nofldrxq = n10g * iaq.nofldrxq10g + n1g * iaq.nofldrxq1g;
778 		s->nofldtxq = n10g * iaq.nofldtxq10g + n1g * iaq.nofldtxq1g;
779 		s->neq += s->nofldtxq + s->nofldrxq;
780 		s->niq += s->nofldrxq;
781 
782 		s->ofld_rxq = malloc(s->nofldrxq * sizeof(struct sge_ofld_rxq),
783 		    M_CXGBE, M_ZERO | M_WAITOK);
784 		s->ofld_txq = malloc(s->nofldtxq * sizeof(struct sge_wrq),
785 		    M_CXGBE, M_ZERO | M_WAITOK);
786 	}
787 #endif
788 #ifdef DEV_NETMAP
789 	s->nnmrxq = n10g * iaq.nnmrxq10g + n1g * iaq.nnmrxq1g;
790 	s->nnmtxq = n10g * iaq.nnmtxq10g + n1g * iaq.nnmtxq1g;
791 	s->neq += s->nnmtxq + s->nnmrxq;
792 	s->niq += s->nnmrxq;
793 
794 	s->nm_rxq = malloc(s->nnmrxq * sizeof(struct sge_nm_rxq),
795 	    M_CXGBE, M_ZERO | M_WAITOK);
796 	s->nm_txq = malloc(s->nnmtxq * sizeof(struct sge_nm_txq),
797 	    M_CXGBE, M_ZERO | M_WAITOK);
798 #endif
799 
800 	s->ctrlq = malloc(sc->params.nports * sizeof(struct sge_wrq), M_CXGBE,
801 	    M_ZERO | M_WAITOK);
802 	s->rxq = malloc(s->nrxq * sizeof(struct sge_rxq), M_CXGBE,
803 	    M_ZERO | M_WAITOK);
804 	s->txq = malloc(s->ntxq * sizeof(struct sge_txq), M_CXGBE,
805 	    M_ZERO | M_WAITOK);
806 	s->iqmap = malloc(s->niq * sizeof(struct sge_iq *), M_CXGBE,
807 	    M_ZERO | M_WAITOK);
808 	s->eqmap = malloc(s->neq * sizeof(struct sge_eq *), M_CXGBE,
809 	    M_ZERO | M_WAITOK);
810 
811 	sc->irq = malloc(sc->intr_count * sizeof(struct irq), M_CXGBE,
812 	    M_ZERO | M_WAITOK);
813 
814 	t4_init_l2t(sc, M_WAITOK);
815 
816 	/*
817 	 * Second pass over the ports.  This time we know the number of rx and
818 	 * tx queues that each port should get.
819 	 */
820 	rqidx = tqidx = 0;
821 #ifdef TCP_OFFLOAD
822 	ofld_rqidx = ofld_tqidx = 0;
823 #endif
824 #ifdef DEV_NETMAP
825 	nm_rqidx = nm_tqidx = 0;
826 #endif
827 	for_each_port(sc, i) {
828 		struct port_info *pi = sc->port[i];
829 
830 		if (pi == NULL)
831 			continue;
832 
833 		pi->first_rxq = rqidx;
834 		pi->first_txq = tqidx;
835 		if (is_10G_port(pi) || is_40G_port(pi)) {
836 			pi->flags |= iaq.intr_flags_10g;
837 			pi->nrxq = iaq.nrxq10g;
838 			pi->ntxq = iaq.ntxq10g;
839 		} else {
840 			pi->flags |= iaq.intr_flags_1g;
841 			pi->nrxq = iaq.nrxq1g;
842 			pi->ntxq = iaq.ntxq1g;
843 		}
844 
845 		if (pi->ntxq > 1)
846 			pi->rsrv_noflowq = iaq.rsrv_noflowq ? 1 : 0;
847 		else
848 			pi->rsrv_noflowq = 0;
849 
850 		rqidx += pi->nrxq;
851 		tqidx += pi->ntxq;
852 #ifdef TCP_OFFLOAD
853 		if (is_offload(sc)) {
854 			pi->first_ofld_rxq = ofld_rqidx;
855 			pi->first_ofld_txq = ofld_tqidx;
856 			if (is_10G_port(pi) || is_40G_port(pi)) {
857 				pi->nofldrxq = iaq.nofldrxq10g;
858 				pi->nofldtxq = iaq.nofldtxq10g;
859 			} else {
860 				pi->nofldrxq = iaq.nofldrxq1g;
861 				pi->nofldtxq = iaq.nofldtxq1g;
862 			}
863 			ofld_rqidx += pi->nofldrxq;
864 			ofld_tqidx += pi->nofldtxq;
865 		}
866 #endif
867 #ifdef DEV_NETMAP
868 		pi->first_nm_rxq = nm_rqidx;
869 		pi->first_nm_txq = nm_tqidx;
870 		if (is_10G_port(pi) || is_40G_port(pi)) {
871 			pi->nnmrxq = iaq.nnmrxq10g;
872 			pi->nnmtxq = iaq.nnmtxq10g;
873 		} else {
874 			pi->nnmrxq = iaq.nnmrxq1g;
875 			pi->nnmtxq = iaq.nnmtxq1g;
876 		}
877 		nm_rqidx += pi->nnmrxq;
878 		nm_tqidx += pi->nnmtxq;
879 #endif
880 	}
881 
882 	rc = setup_intr_handlers(sc);
883 	if (rc != 0) {
884 		device_printf(dev,
885 		    "failed to setup interrupt handlers: %d\n", rc);
886 		goto done;
887 	}
888 
889 	rc = bus_generic_attach(dev);
890 	if (rc != 0) {
891 		device_printf(dev,
892 		    "failed to attach all child ports: %d\n", rc);
893 		goto done;
894 	}
895 
896 	switch (sc->params.pci.speed) {
897 		case 0x1:
898 			pcie_ts = "2.5";
899 			break;
900 		case 0x2:
901 			pcie_ts = "5.0";
902 			break;
903 		case 0x3:
904 			pcie_ts = "8.0";
905 			break;
906 		default:
907 			pcie_ts = "??";
908 			break;
909 	}
910 	device_printf(dev,
911 	    "PCIe x%d (%s GTS/s) (%d), %d ports, %d %s interrupt%s, %d eq, %d iq\n",
912 	    sc->params.pci.width, pcie_ts, sc->params.pci.speed,
913 	    sc->params.nports, sc->intr_count,
914 	    sc->intr_type == INTR_MSIX ? "MSI-X" :
915 	    (sc->intr_type == INTR_MSI ? "MSI" : "INTx"),
916 	    sc->intr_count > 1 ? "s" : "", sc->sge.neq, sc->sge.niq);
917 
918 	t4_set_desc(sc);
919 
920 done:
921 	if (rc != 0 && sc->cdev) {
922 		/* cdev was created and so cxgbetool works; recover that way. */
923 		device_printf(dev,
924 		    "error during attach, adapter is now in recovery mode.\n");
925 		rc = 0;
926 	}
927 
928 	if (rc != 0)
929 		t4_detach(dev);
930 	else
931 		t4_sysctls(sc);
932 
933 	return (rc);
934 }
935 
936 /*
937  * Idempotent
938  */
939 static int
940 t4_detach(device_t dev)
941 {
942 	struct adapter *sc;
943 	struct port_info *pi;
944 	int i, rc;
945 
946 	sc = device_get_softc(dev);
947 
948 	if (sc->flags & FULL_INIT_DONE)
949 		t4_intr_disable(sc);
950 
951 	if (sc->cdev) {
952 		destroy_dev(sc->cdev);
953 		sc->cdev = NULL;
954 	}
955 
956 	rc = bus_generic_detach(dev);
957 	if (rc) {
958 		device_printf(dev,
959 		    "failed to detach child devices: %d\n", rc);
960 		return (rc);
961 	}
962 
963 	for (i = 0; i < sc->intr_count; i++)
964 		t4_free_irq(sc, &sc->irq[i]);
965 
966 	for (i = 0; i < MAX_NPORTS; i++) {
967 		pi = sc->port[i];
968 		if (pi) {
969 			t4_free_vi(sc, sc->mbox, sc->pf, 0, pi->viid);
970 			if (pi->dev)
971 				device_delete_child(dev, pi->dev);
972 
973 			mtx_destroy(&pi->pi_lock);
974 			free(pi, M_CXGBE);
975 		}
976 	}
977 
978 	if (sc->flags & FULL_INIT_DONE)
979 		adapter_full_uninit(sc);
980 
981 	if (sc->flags & FW_OK)
982 		t4_fw_bye(sc, sc->mbox);
983 
984 	if (sc->intr_type == INTR_MSI || sc->intr_type == INTR_MSIX)
985 		pci_release_msi(dev);
986 
987 	if (sc->regs_res)
988 		bus_release_resource(dev, SYS_RES_MEMORY, sc->regs_rid,
989 		    sc->regs_res);
990 
991 	if (sc->udbs_res)
992 		bus_release_resource(dev, SYS_RES_MEMORY, sc->udbs_rid,
993 		    sc->udbs_res);
994 
995 	if (sc->msix_res)
996 		bus_release_resource(dev, SYS_RES_MEMORY, sc->msix_rid,
997 		    sc->msix_res);
998 
999 	if (sc->l2t)
1000 		t4_free_l2t(sc->l2t);
1001 
1002 #ifdef TCP_OFFLOAD
1003 	free(sc->sge.ofld_rxq, M_CXGBE);
1004 	free(sc->sge.ofld_txq, M_CXGBE);
1005 #endif
1006 #ifdef DEV_NETMAP
1007 	free(sc->sge.nm_rxq, M_CXGBE);
1008 	free(sc->sge.nm_txq, M_CXGBE);
1009 #endif
1010 	free(sc->irq, M_CXGBE);
1011 	free(sc->sge.rxq, M_CXGBE);
1012 	free(sc->sge.txq, M_CXGBE);
1013 	free(sc->sge.ctrlq, M_CXGBE);
1014 	free(sc->sge.iqmap, M_CXGBE);
1015 	free(sc->sge.eqmap, M_CXGBE);
1016 	free(sc->tids.ftid_tab, M_CXGBE);
1017 	t4_destroy_dma_tag(sc);
1018 	if (mtx_initialized(&sc->sc_lock)) {
1019 		sx_xlock(&t4_list_lock);
1020 		SLIST_REMOVE(&t4_list, sc, adapter, link);
1021 		sx_xunlock(&t4_list_lock);
1022 		mtx_destroy(&sc->sc_lock);
1023 	}
1024 
1025 	if (mtx_initialized(&sc->tids.ftid_lock))
1026 		mtx_destroy(&sc->tids.ftid_lock);
1027 	if (mtx_initialized(&sc->sfl_lock))
1028 		mtx_destroy(&sc->sfl_lock);
1029 	if (mtx_initialized(&sc->ifp_lock))
1030 		mtx_destroy(&sc->ifp_lock);
1031 	if (mtx_initialized(&sc->regwin_lock))
1032 		mtx_destroy(&sc->regwin_lock);
1033 
1034 	bzero(sc, sizeof(*sc));
1035 
1036 	return (0);
1037 }
1038 
1039 static int
1040 cxgbe_probe(device_t dev)
1041 {
1042 	char buf[128];
1043 	struct port_info *pi = device_get_softc(dev);
1044 
1045 	snprintf(buf, sizeof(buf), "port %d", pi->port_id);
1046 	device_set_desc_copy(dev, buf);
1047 
1048 	return (BUS_PROBE_DEFAULT);
1049 }
1050 
1051 #define T4_CAP (IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_MTU | IFCAP_HWCSUM | \
1052     IFCAP_VLAN_HWCSUM | IFCAP_TSO | IFCAP_JUMBO_MTU | IFCAP_LRO | \
1053     IFCAP_VLAN_HWTSO | IFCAP_LINKSTATE | IFCAP_HWCSUM_IPV6 | IFCAP_HWSTATS)
1054 #define T4_CAP_ENABLE (T4_CAP)
1055 
1056 static int
1057 cxgbe_attach(device_t dev)
1058 {
1059 	struct port_info *pi = device_get_softc(dev);
1060 	struct ifnet *ifp;
1061 	char *s;
1062 	int n, o;
1063 
1064 	/* Allocate an ifnet and set it up */
1065 	ifp = if_alloc(IFT_ETHER);
1066 	if (ifp == NULL) {
1067 		device_printf(dev, "Cannot allocate ifnet\n");
1068 		return (ENOMEM);
1069 	}
1070 	pi->ifp = ifp;
1071 	ifp->if_softc = pi;
1072 
1073 	callout_init(&pi->tick, CALLOUT_MPSAFE);
1074 
1075 	if_initname(ifp, device_get_name(dev), device_get_unit(dev));
1076 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
1077 
1078 	ifp->if_init = cxgbe_init;
1079 	ifp->if_ioctl = cxgbe_ioctl;
1080 	ifp->if_transmit = cxgbe_transmit;
1081 	ifp->if_qflush = cxgbe_qflush;
1082 	ifp->if_get_counter = cxgbe_get_counter;
1083 
1084 	ifp->if_capabilities = T4_CAP;
1085 #ifdef TCP_OFFLOAD
1086 	if (is_offload(pi->adapter))
1087 		ifp->if_capabilities |= IFCAP_TOE;
1088 #endif
1089 	ifp->if_capenable = T4_CAP_ENABLE;
1090 	ifp->if_hwassist = CSUM_TCP | CSUM_UDP | CSUM_IP | CSUM_TSO |
1091 	    CSUM_UDP_IPV6 | CSUM_TCP_IPV6;
1092 
1093 	ifp->if_hw_tsomax = 65536 - (ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN);
1094 	ifp->if_hw_tsomaxsegcount = TX_SGL_SEGS;
1095 	ifp->if_hw_tsomaxsegsize = 65536;
1096 
1097 	/* Initialize ifmedia for this port */
1098 	ifmedia_init(&pi->media, IFM_IMASK, cxgbe_media_change,
1099 	    cxgbe_media_status);
1100 	build_medialist(pi, &pi->media);
1101 
1102 	pi->vlan_c = EVENTHANDLER_REGISTER(vlan_config, cxgbe_vlan_config, ifp,
1103 	    EVENTHANDLER_PRI_ANY);
1104 
1105 	ether_ifattach(ifp, pi->hw_addr);
1106 
1107 	n = 128;
1108 	s = malloc(n, M_CXGBE, M_WAITOK);
1109 	o = snprintf(s, n, "%d txq, %d rxq (NIC)", pi->ntxq, pi->nrxq);
1110 	MPASS(n > o);
1111 #ifdef TCP_OFFLOAD
1112 	if (is_offload(pi->adapter)) {
1113 		o += snprintf(s + o, n - o, "; %d txq, %d rxq (TOE)",
1114 		    pi->nofldtxq, pi->nofldrxq);
1115 		MPASS(n > o);
1116 	}
1117 #endif
1118 #ifdef DEV_NETMAP
1119 	o += snprintf(s + o, n - o, "; %d txq, %d rxq (netmap)", pi->nnmtxq,
1120 	    pi->nnmrxq);
1121 	MPASS(n > o);
1122 #endif
1123 	device_printf(dev, "%s\n", s);
1124 	free(s, M_CXGBE);
1125 
1126 #ifdef DEV_NETMAP
1127 	/* nm_media handled here to keep implementation private to this file */
1128 	ifmedia_init(&pi->nm_media, IFM_IMASK, cxgbe_media_change,
1129 	    cxgbe_media_status);
1130 	build_medialist(pi, &pi->nm_media);
1131 	create_netmap_ifnet(pi);	/* logs errors it something fails */
1132 #endif
1133 	cxgbe_sysctls(pi);
1134 
1135 	return (0);
1136 }
1137 
1138 static int
1139 cxgbe_detach(device_t dev)
1140 {
1141 	struct port_info *pi = device_get_softc(dev);
1142 	struct adapter *sc = pi->adapter;
1143 	struct ifnet *ifp = pi->ifp;
1144 
1145 	/* Tell if_ioctl and if_init that the port is going away */
1146 	ADAPTER_LOCK(sc);
1147 	SET_DOOMED(pi);
1148 	wakeup(&sc->flags);
1149 	while (IS_BUSY(sc))
1150 		mtx_sleep(&sc->flags, &sc->sc_lock, 0, "t4detach", 0);
1151 	SET_BUSY(sc);
1152 #ifdef INVARIANTS
1153 	sc->last_op = "t4detach";
1154 	sc->last_op_thr = curthread;
1155 #endif
1156 	ADAPTER_UNLOCK(sc);
1157 
1158 	if (pi->flags & HAS_TRACEQ) {
1159 		sc->traceq = -1;	/* cloner should not create ifnet */
1160 		t4_tracer_port_detach(sc);
1161 	}
1162 
1163 	if (pi->vlan_c)
1164 		EVENTHANDLER_DEREGISTER(vlan_config, pi->vlan_c);
1165 
1166 	PORT_LOCK(pi);
1167 	ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
1168 	callout_stop(&pi->tick);
1169 	PORT_UNLOCK(pi);
1170 	callout_drain(&pi->tick);
1171 
1172 	/* Let detach proceed even if these fail. */
1173 	cxgbe_uninit_synchronized(pi);
1174 	port_full_uninit(pi);
1175 
1176 	ifmedia_removeall(&pi->media);
1177 	ether_ifdetach(pi->ifp);
1178 	if_free(pi->ifp);
1179 
1180 #ifdef DEV_NETMAP
1181 	/* XXXNM: equivalent of cxgbe_uninit_synchronized to ifdown nm_ifp */
1182 	destroy_netmap_ifnet(pi);
1183 #endif
1184 
1185 	ADAPTER_LOCK(sc);
1186 	CLR_BUSY(sc);
1187 	wakeup(&sc->flags);
1188 	ADAPTER_UNLOCK(sc);
1189 
1190 	return (0);
1191 }
1192 
1193 static void
1194 cxgbe_init(void *arg)
1195 {
1196 	struct port_info *pi = arg;
1197 	struct adapter *sc = pi->adapter;
1198 
1199 	if (begin_synchronized_op(sc, pi, SLEEP_OK | INTR_OK, "t4init") != 0)
1200 		return;
1201 	cxgbe_init_synchronized(pi);
1202 	end_synchronized_op(sc, 0);
1203 }
1204 
1205 static int
1206 cxgbe_ioctl(struct ifnet *ifp, unsigned long cmd, caddr_t data)
1207 {
1208 	int rc = 0, mtu, flags, can_sleep;
1209 	struct port_info *pi = ifp->if_softc;
1210 	struct adapter *sc = pi->adapter;
1211 	struct ifreq *ifr = (struct ifreq *)data;
1212 	uint32_t mask;
1213 
1214 	switch (cmd) {
1215 	case SIOCSIFMTU:
1216 		mtu = ifr->ifr_mtu;
1217 		if ((mtu < ETHERMIN) || (mtu > ETHERMTU_JUMBO))
1218 			return (EINVAL);
1219 
1220 		rc = begin_synchronized_op(sc, pi, SLEEP_OK | INTR_OK, "t4mtu");
1221 		if (rc)
1222 			return (rc);
1223 		ifp->if_mtu = mtu;
1224 		if (pi->flags & PORT_INIT_DONE) {
1225 			t4_update_fl_bufsize(ifp);
1226 			if (ifp->if_drv_flags & IFF_DRV_RUNNING)
1227 				rc = update_mac_settings(ifp, XGMAC_MTU);
1228 		}
1229 		end_synchronized_op(sc, 0);
1230 		break;
1231 
1232 	case SIOCSIFFLAGS:
1233 		can_sleep = 0;
1234 redo_sifflags:
1235 		rc = begin_synchronized_op(sc, pi,
1236 		    can_sleep ? (SLEEP_OK | INTR_OK) : HOLD_LOCK, "t4flg");
1237 		if (rc)
1238 			return (rc);
1239 
1240 		if (ifp->if_flags & IFF_UP) {
1241 			if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1242 				flags = pi->if_flags;
1243 				if ((ifp->if_flags ^ flags) &
1244 				    (IFF_PROMISC | IFF_ALLMULTI)) {
1245 					if (can_sleep == 1) {
1246 						end_synchronized_op(sc, 0);
1247 						can_sleep = 0;
1248 						goto redo_sifflags;
1249 					}
1250 					rc = update_mac_settings(ifp,
1251 					    XGMAC_PROMISC | XGMAC_ALLMULTI);
1252 				}
1253 			} else {
1254 				if (can_sleep == 0) {
1255 					end_synchronized_op(sc, LOCK_HELD);
1256 					can_sleep = 1;
1257 					goto redo_sifflags;
1258 				}
1259 				rc = cxgbe_init_synchronized(pi);
1260 			}
1261 			pi->if_flags = ifp->if_flags;
1262 		} else if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1263 			if (can_sleep == 0) {
1264 				end_synchronized_op(sc, LOCK_HELD);
1265 				can_sleep = 1;
1266 				goto redo_sifflags;
1267 			}
1268 			rc = cxgbe_uninit_synchronized(pi);
1269 		}
1270 		end_synchronized_op(sc, can_sleep ? 0 : LOCK_HELD);
1271 		break;
1272 
1273 	case SIOCADDMULTI:
1274 	case SIOCDELMULTI: /* these two are called with a mutex held :-( */
1275 		rc = begin_synchronized_op(sc, pi, HOLD_LOCK, "t4multi");
1276 		if (rc)
1277 			return (rc);
1278 		if (ifp->if_drv_flags & IFF_DRV_RUNNING)
1279 			rc = update_mac_settings(ifp, XGMAC_MCADDRS);
1280 		end_synchronized_op(sc, LOCK_HELD);
1281 		break;
1282 
1283 	case SIOCSIFCAP:
1284 		rc = begin_synchronized_op(sc, pi, SLEEP_OK | INTR_OK, "t4cap");
1285 		if (rc)
1286 			return (rc);
1287 
1288 		mask = ifr->ifr_reqcap ^ ifp->if_capenable;
1289 		if (mask & IFCAP_TXCSUM) {
1290 			ifp->if_capenable ^= IFCAP_TXCSUM;
1291 			ifp->if_hwassist ^= (CSUM_TCP | CSUM_UDP | CSUM_IP);
1292 
1293 			if (IFCAP_TSO4 & ifp->if_capenable &&
1294 			    !(IFCAP_TXCSUM & ifp->if_capenable)) {
1295 				ifp->if_capenable &= ~IFCAP_TSO4;
1296 				if_printf(ifp,
1297 				    "tso4 disabled due to -txcsum.\n");
1298 			}
1299 		}
1300 		if (mask & IFCAP_TXCSUM_IPV6) {
1301 			ifp->if_capenable ^= IFCAP_TXCSUM_IPV6;
1302 			ifp->if_hwassist ^= (CSUM_UDP_IPV6 | CSUM_TCP_IPV6);
1303 
1304 			if (IFCAP_TSO6 & ifp->if_capenable &&
1305 			    !(IFCAP_TXCSUM_IPV6 & ifp->if_capenable)) {
1306 				ifp->if_capenable &= ~IFCAP_TSO6;
1307 				if_printf(ifp,
1308 				    "tso6 disabled due to -txcsum6.\n");
1309 			}
1310 		}
1311 		if (mask & IFCAP_RXCSUM)
1312 			ifp->if_capenable ^= IFCAP_RXCSUM;
1313 		if (mask & IFCAP_RXCSUM_IPV6)
1314 			ifp->if_capenable ^= IFCAP_RXCSUM_IPV6;
1315 
1316 		/*
1317 		 * Note that we leave CSUM_TSO alone (it is always set).  The
1318 		 * kernel takes both IFCAP_TSOx and CSUM_TSO into account before
1319 		 * sending a TSO request our way, so it's sufficient to toggle
1320 		 * IFCAP_TSOx only.
1321 		 */
1322 		if (mask & IFCAP_TSO4) {
1323 			if (!(IFCAP_TSO4 & ifp->if_capenable) &&
1324 			    !(IFCAP_TXCSUM & ifp->if_capenable)) {
1325 				if_printf(ifp, "enable txcsum first.\n");
1326 				rc = EAGAIN;
1327 				goto fail;
1328 			}
1329 			ifp->if_capenable ^= IFCAP_TSO4;
1330 		}
1331 		if (mask & IFCAP_TSO6) {
1332 			if (!(IFCAP_TSO6 & ifp->if_capenable) &&
1333 			    !(IFCAP_TXCSUM_IPV6 & ifp->if_capenable)) {
1334 				if_printf(ifp, "enable txcsum6 first.\n");
1335 				rc = EAGAIN;
1336 				goto fail;
1337 			}
1338 			ifp->if_capenable ^= IFCAP_TSO6;
1339 		}
1340 		if (mask & IFCAP_LRO) {
1341 #if defined(INET) || defined(INET6)
1342 			int i;
1343 			struct sge_rxq *rxq;
1344 
1345 			ifp->if_capenable ^= IFCAP_LRO;
1346 			for_each_rxq(pi, i, rxq) {
1347 				if (ifp->if_capenable & IFCAP_LRO)
1348 					rxq->iq.flags |= IQ_LRO_ENABLED;
1349 				else
1350 					rxq->iq.flags &= ~IQ_LRO_ENABLED;
1351 			}
1352 #endif
1353 		}
1354 #ifdef TCP_OFFLOAD
1355 		if (mask & IFCAP_TOE) {
1356 			int enable = (ifp->if_capenable ^ mask) & IFCAP_TOE;
1357 
1358 			rc = toe_capability(pi, enable);
1359 			if (rc != 0)
1360 				goto fail;
1361 
1362 			ifp->if_capenable ^= mask;
1363 		}
1364 #endif
1365 		if (mask & IFCAP_VLAN_HWTAGGING) {
1366 			ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING;
1367 			if (ifp->if_drv_flags & IFF_DRV_RUNNING)
1368 				rc = update_mac_settings(ifp, XGMAC_VLANEX);
1369 		}
1370 		if (mask & IFCAP_VLAN_MTU) {
1371 			ifp->if_capenable ^= IFCAP_VLAN_MTU;
1372 
1373 			/* Need to find out how to disable auto-mtu-inflation */
1374 		}
1375 		if (mask & IFCAP_VLAN_HWTSO)
1376 			ifp->if_capenable ^= IFCAP_VLAN_HWTSO;
1377 		if (mask & IFCAP_VLAN_HWCSUM)
1378 			ifp->if_capenable ^= IFCAP_VLAN_HWCSUM;
1379 
1380 #ifdef VLAN_CAPABILITIES
1381 		VLAN_CAPABILITIES(ifp);
1382 #endif
1383 fail:
1384 		end_synchronized_op(sc, 0);
1385 		break;
1386 
1387 	case SIOCSIFMEDIA:
1388 	case SIOCGIFMEDIA:
1389 		ifmedia_ioctl(ifp, ifr, &pi->media, cmd);
1390 		break;
1391 
1392 	case SIOCGI2C: {
1393 		struct ifi2creq i2c;
1394 
1395 		rc = copyin(ifr->ifr_data, &i2c, sizeof(i2c));
1396 		if (rc != 0)
1397 			break;
1398 		if (i2c.dev_addr != 0xA0 && i2c.dev_addr != 0xA2) {
1399 			rc = EPERM;
1400 			break;
1401 		}
1402 		if (i2c.len > sizeof(i2c.data)) {
1403 			rc = EINVAL;
1404 			break;
1405 		}
1406 		rc = begin_synchronized_op(sc, pi, SLEEP_OK | INTR_OK, "t4i2c");
1407 		if (rc)
1408 			return (rc);
1409 		rc = -t4_i2c_rd(sc, sc->mbox, pi->port_id, i2c.dev_addr,
1410 		    i2c.offset, i2c.len, &i2c.data[0]);
1411 		end_synchronized_op(sc, 0);
1412 		if (rc == 0)
1413 			rc = copyout(&i2c, ifr->ifr_data, sizeof(i2c));
1414 		break;
1415 	}
1416 
1417 	default:
1418 		rc = ether_ioctl(ifp, cmd, data);
1419 	}
1420 
1421 	return (rc);
1422 }
1423 
1424 static int
1425 cxgbe_transmit(struct ifnet *ifp, struct mbuf *m)
1426 {
1427 	struct port_info *pi = ifp->if_softc;
1428 	struct adapter *sc = pi->adapter;
1429 	struct sge_txq *txq = &sc->sge.txq[pi->first_txq];
1430 	struct buf_ring *br;
1431 	int rc;
1432 
1433 	M_ASSERTPKTHDR(m);
1434 
1435 	if (__predict_false(pi->link_cfg.link_ok == 0)) {
1436 		m_freem(m);
1437 		return (ENETDOWN);
1438 	}
1439 
1440 	if (m->m_flags & M_FLOWID)
1441 		txq += ((m->m_pkthdr.flowid % (pi->ntxq - pi->rsrv_noflowq))
1442 		    + pi->rsrv_noflowq);
1443 	br = txq->br;
1444 
1445 	if (TXQ_TRYLOCK(txq) == 0) {
1446 		struct sge_eq *eq = &txq->eq;
1447 
1448 		/*
1449 		 * It is possible that t4_eth_tx finishes up and releases the
1450 		 * lock between the TRYLOCK above and the drbr_enqueue here.  We
1451 		 * need to make sure that this mbuf doesn't just sit there in
1452 		 * the drbr.
1453 		 */
1454 
1455 		rc = drbr_enqueue(ifp, br, m);
1456 		if (rc == 0 && callout_pending(&eq->tx_callout) == 0 &&
1457 		    !(eq->flags & EQ_DOOMED))
1458 			callout_reset(&eq->tx_callout, 1, t4_tx_callout, eq);
1459 		return (rc);
1460 	}
1461 
1462 	/*
1463 	 * txq->m is the mbuf that is held up due to a temporary shortage of
1464 	 * resources and it should be put on the wire first.  Then what's in
1465 	 * drbr and finally the mbuf that was just passed in to us.
1466 	 *
1467 	 * Return code should indicate the fate of the mbuf that was passed in
1468 	 * this time.
1469 	 */
1470 
1471 	TXQ_LOCK_ASSERT_OWNED(txq);
1472 	if (drbr_needs_enqueue(ifp, br) || txq->m) {
1473 
1474 		/* Queued for transmission. */
1475 
1476 		rc = drbr_enqueue(ifp, br, m);
1477 		m = txq->m ? txq->m : drbr_dequeue(ifp, br);
1478 		(void) t4_eth_tx(ifp, txq, m);
1479 		TXQ_UNLOCK(txq);
1480 		return (rc);
1481 	}
1482 
1483 	/* Direct transmission. */
1484 	rc = t4_eth_tx(ifp, txq, m);
1485 	if (rc != 0 && txq->m)
1486 		rc = 0;	/* held, will be transmitted soon (hopefully) */
1487 
1488 	TXQ_UNLOCK(txq);
1489 	return (rc);
1490 }
1491 
1492 static void
1493 cxgbe_qflush(struct ifnet *ifp)
1494 {
1495 	struct port_info *pi = ifp->if_softc;
1496 	struct sge_txq *txq;
1497 	int i;
1498 	struct mbuf *m;
1499 
1500 	/* queues do not exist if !PORT_INIT_DONE. */
1501 	if (pi->flags & PORT_INIT_DONE) {
1502 		for_each_txq(pi, i, txq) {
1503 			TXQ_LOCK(txq);
1504 			m_freem(txq->m);
1505 			txq->m = NULL;
1506 			while ((m = buf_ring_dequeue_sc(txq->br)) != NULL)
1507 				m_freem(m);
1508 			TXQ_UNLOCK(txq);
1509 		}
1510 	}
1511 	if_qflush(ifp);
1512 }
1513 
1514 static uint64_t
1515 cxgbe_get_counter(struct ifnet *ifp, ift_counter c)
1516 {
1517 	struct port_info *pi = ifp->if_softc;
1518 	struct adapter *sc = pi->adapter;
1519 	struct port_stats *s = &pi->stats;
1520 
1521 	cxgbe_refresh_stats(sc, pi);
1522 
1523 	switch (c) {
1524 	case IFCOUNTER_IPACKETS:
1525 		return (s->rx_frames - s->rx_pause);
1526 
1527 	case IFCOUNTER_IERRORS:
1528 		return (s->rx_jabber + s->rx_runt + s->rx_too_long +
1529 		    s->rx_fcs_err + s->rx_len_err);
1530 
1531 	case IFCOUNTER_OPACKETS:
1532 		return (s->tx_frames - s->tx_pause);
1533 
1534 	case IFCOUNTER_OERRORS:
1535 		return (s->tx_error_frames);
1536 
1537 	case IFCOUNTER_IBYTES:
1538 		return (s->rx_octets - s->rx_pause * 64);
1539 
1540 	case IFCOUNTER_OBYTES:
1541 		return (s->tx_octets - s->tx_pause * 64);
1542 
1543 	case IFCOUNTER_IMCASTS:
1544 		return (s->rx_mcast_frames - s->rx_pause);
1545 
1546 	case IFCOUNTER_OMCASTS:
1547 		return (s->tx_mcast_frames - s->tx_pause);
1548 
1549 	case IFCOUNTER_IQDROPS:
1550 		return (s->rx_ovflow0 + s->rx_ovflow1 + s->rx_ovflow2 +
1551 		    s->rx_ovflow3 + s->rx_trunc0 + s->rx_trunc1 + s->rx_trunc2 +
1552 		    s->rx_trunc3 + pi->tnl_cong_drops);
1553 
1554 	case IFCOUNTER_OQDROPS: {
1555 		uint64_t drops;
1556 
1557 		drops = s->tx_drop;
1558 		if (pi->flags & PORT_INIT_DONE) {
1559 			int i;
1560 			struct sge_txq *txq;
1561 
1562 			for_each_txq(pi, i, txq)
1563 				drops += txq->br->br_drops;
1564 		}
1565 
1566 		return (drops);
1567 
1568 	}
1569 
1570 	default:
1571 		return (if_get_counter_default(ifp, c));
1572 	}
1573 }
1574 
1575 static int
1576 cxgbe_media_change(struct ifnet *ifp)
1577 {
1578 	struct port_info *pi = ifp->if_softc;
1579 
1580 	device_printf(pi->dev, "%s unimplemented.\n", __func__);
1581 
1582 	return (EOPNOTSUPP);
1583 }
1584 
1585 static void
1586 cxgbe_media_status(struct ifnet *ifp, struct ifmediareq *ifmr)
1587 {
1588 	struct port_info *pi = ifp->if_softc;
1589 	struct ifmedia *media = NULL;
1590 	struct ifmedia_entry *cur;
1591 	int speed = pi->link_cfg.speed;
1592 	int data = (pi->port_type << 8) | pi->mod_type;
1593 
1594 	if (ifp == pi->ifp)
1595 		media = &pi->media;
1596 #ifdef DEV_NETMAP
1597 	else if (ifp == pi->nm_ifp)
1598 		media = &pi->nm_media;
1599 #endif
1600 	MPASS(media != NULL);
1601 
1602 	cur = media->ifm_cur;
1603 	if (cur->ifm_data != data) {
1604 		build_medialist(pi, media);
1605 		cur = media->ifm_cur;
1606 	}
1607 
1608 	ifmr->ifm_status = IFM_AVALID;
1609 	if (!pi->link_cfg.link_ok)
1610 		return;
1611 
1612 	ifmr->ifm_status |= IFM_ACTIVE;
1613 
1614 	/* active and current will differ iff current media is autoselect. */
1615 	if (IFM_SUBTYPE(cur->ifm_media) != IFM_AUTO)
1616 		return;
1617 
1618 	ifmr->ifm_active = IFM_ETHER | IFM_FDX;
1619 	if (speed == SPEED_10000)
1620 		ifmr->ifm_active |= IFM_10G_T;
1621 	else if (speed == SPEED_1000)
1622 		ifmr->ifm_active |= IFM_1000_T;
1623 	else if (speed == SPEED_100)
1624 		ifmr->ifm_active |= IFM_100_TX;
1625 	else if (speed == SPEED_10)
1626 		ifmr->ifm_active |= IFM_10_T;
1627 	else
1628 		KASSERT(0, ("%s: link up but speed unknown (%u)", __func__,
1629 			    speed));
1630 }
1631 
1632 void
1633 t4_fatal_err(struct adapter *sc)
1634 {
1635 	t4_set_reg_field(sc, A_SGE_CONTROL, F_GLOBALENABLE, 0);
1636 	t4_intr_disable(sc);
1637 	log(LOG_EMERG, "%s: encountered fatal error, adapter stopped.\n",
1638 	    device_get_nameunit(sc->dev));
1639 }
1640 
1641 static int
1642 map_bars_0_and_4(struct adapter *sc)
1643 {
1644 	sc->regs_rid = PCIR_BAR(0);
1645 	sc->regs_res = bus_alloc_resource_any(sc->dev, SYS_RES_MEMORY,
1646 	    &sc->regs_rid, RF_ACTIVE);
1647 	if (sc->regs_res == NULL) {
1648 		device_printf(sc->dev, "cannot map registers.\n");
1649 		return (ENXIO);
1650 	}
1651 	sc->bt = rman_get_bustag(sc->regs_res);
1652 	sc->bh = rman_get_bushandle(sc->regs_res);
1653 	sc->mmio_len = rman_get_size(sc->regs_res);
1654 	setbit(&sc->doorbells, DOORBELL_KDB);
1655 
1656 	sc->msix_rid = PCIR_BAR(4);
1657 	sc->msix_res = bus_alloc_resource_any(sc->dev, SYS_RES_MEMORY,
1658 	    &sc->msix_rid, RF_ACTIVE);
1659 	if (sc->msix_res == NULL) {
1660 		device_printf(sc->dev, "cannot map MSI-X BAR.\n");
1661 		return (ENXIO);
1662 	}
1663 
1664 	return (0);
1665 }
1666 
1667 static int
1668 map_bar_2(struct adapter *sc)
1669 {
1670 
1671 	/*
1672 	 * T4: only iWARP driver uses the userspace doorbells.  There is no need
1673 	 * to map it if RDMA is disabled.
1674 	 */
1675 	if (is_t4(sc) && sc->rdmacaps == 0)
1676 		return (0);
1677 
1678 	sc->udbs_rid = PCIR_BAR(2);
1679 	sc->udbs_res = bus_alloc_resource_any(sc->dev, SYS_RES_MEMORY,
1680 	    &sc->udbs_rid, RF_ACTIVE);
1681 	if (sc->udbs_res == NULL) {
1682 		device_printf(sc->dev, "cannot map doorbell BAR.\n");
1683 		return (ENXIO);
1684 	}
1685 	sc->udbs_base = rman_get_virtual(sc->udbs_res);
1686 
1687 	if (is_t5(sc)) {
1688 		setbit(&sc->doorbells, DOORBELL_UDB);
1689 #if defined(__i386__) || defined(__amd64__)
1690 		if (t5_write_combine) {
1691 			int rc;
1692 
1693 			/*
1694 			 * Enable write combining on BAR2.  This is the
1695 			 * userspace doorbell BAR and is split into 128B
1696 			 * (UDBS_SEG_SIZE) doorbell regions, each associated
1697 			 * with an egress queue.  The first 64B has the doorbell
1698 			 * and the second 64B can be used to submit a tx work
1699 			 * request with an implicit doorbell.
1700 			 */
1701 
1702 			rc = pmap_change_attr((vm_offset_t)sc->udbs_base,
1703 			    rman_get_size(sc->udbs_res), PAT_WRITE_COMBINING);
1704 			if (rc == 0) {
1705 				clrbit(&sc->doorbells, DOORBELL_UDB);
1706 				setbit(&sc->doorbells, DOORBELL_WCWR);
1707 				setbit(&sc->doorbells, DOORBELL_UDBWC);
1708 			} else {
1709 				device_printf(sc->dev,
1710 				    "couldn't enable write combining: %d\n",
1711 				    rc);
1712 			}
1713 
1714 			t4_write_reg(sc, A_SGE_STAT_CFG,
1715 			    V_STATSOURCE_T5(7) | V_STATMODE(0));
1716 		}
1717 #endif
1718 	}
1719 
1720 	return (0);
1721 }
1722 
1723 static const struct memwin t4_memwin[] = {
1724 	{ MEMWIN0_BASE, MEMWIN0_APERTURE },
1725 	{ MEMWIN1_BASE, MEMWIN1_APERTURE },
1726 	{ MEMWIN2_BASE_T4, MEMWIN2_APERTURE_T4 }
1727 };
1728 
1729 static const struct memwin t5_memwin[] = {
1730 	{ MEMWIN0_BASE, MEMWIN0_APERTURE },
1731 	{ MEMWIN1_BASE, MEMWIN1_APERTURE },
1732 	{ MEMWIN2_BASE_T5, MEMWIN2_APERTURE_T5 },
1733 };
1734 
1735 static void
1736 setup_memwin(struct adapter *sc)
1737 {
1738 	const struct memwin *mw;
1739 	int i, n;
1740 	uint32_t bar0;
1741 
1742 	if (is_t4(sc)) {
1743 		/*
1744 		 * Read low 32b of bar0 indirectly via the hardware backdoor
1745 		 * mechanism.  Works from within PCI passthrough environments
1746 		 * too, where rman_get_start() can return a different value.  We
1747 		 * need to program the T4 memory window decoders with the actual
1748 		 * addresses that will be coming across the PCIe link.
1749 		 */
1750 		bar0 = t4_hw_pci_read_cfg4(sc, PCIR_BAR(0));
1751 		bar0 &= (uint32_t) PCIM_BAR_MEM_BASE;
1752 
1753 		mw = &t4_memwin[0];
1754 		n = nitems(t4_memwin);
1755 	} else {
1756 		/* T5 uses the relative offset inside the PCIe BAR */
1757 		bar0 = 0;
1758 
1759 		mw = &t5_memwin[0];
1760 		n = nitems(t5_memwin);
1761 	}
1762 
1763 	for (i = 0; i < n; i++, mw++) {
1764 		t4_write_reg(sc,
1765 		    PCIE_MEM_ACCESS_REG(A_PCIE_MEM_ACCESS_BASE_WIN, i),
1766 		    (mw->base + bar0) | V_BIR(0) |
1767 		    V_WINDOW(ilog2(mw->aperture) - 10));
1768 	}
1769 
1770 	/* flush */
1771 	t4_read_reg(sc, PCIE_MEM_ACCESS_REG(A_PCIE_MEM_ACCESS_BASE_WIN, 2));
1772 }
1773 
1774 /*
1775  * Verify that the memory range specified by the addr/len pair is valid and lies
1776  * entirely within a single region (EDCx or MCx).
1777  */
1778 static int
1779 validate_mem_range(struct adapter *sc, uint32_t addr, int len)
1780 {
1781 	uint32_t em, addr_len, maddr, mlen;
1782 
1783 	/* Memory can only be accessed in naturally aligned 4 byte units */
1784 	if (addr & 3 || len & 3 || len == 0)
1785 		return (EINVAL);
1786 
1787 	/* Enabled memories */
1788 	em = t4_read_reg(sc, A_MA_TARGET_MEM_ENABLE);
1789 	if (em & F_EDRAM0_ENABLE) {
1790 		addr_len = t4_read_reg(sc, A_MA_EDRAM0_BAR);
1791 		maddr = G_EDRAM0_BASE(addr_len) << 20;
1792 		mlen = G_EDRAM0_SIZE(addr_len) << 20;
1793 		if (mlen > 0 && addr >= maddr && addr < maddr + mlen &&
1794 		    addr + len <= maddr + mlen)
1795 			return (0);
1796 	}
1797 	if (em & F_EDRAM1_ENABLE) {
1798 		addr_len = t4_read_reg(sc, A_MA_EDRAM1_BAR);
1799 		maddr = G_EDRAM1_BASE(addr_len) << 20;
1800 		mlen = G_EDRAM1_SIZE(addr_len) << 20;
1801 		if (mlen > 0 && addr >= maddr && addr < maddr + mlen &&
1802 		    addr + len <= maddr + mlen)
1803 			return (0);
1804 	}
1805 	if (em & F_EXT_MEM_ENABLE) {
1806 		addr_len = t4_read_reg(sc, A_MA_EXT_MEMORY_BAR);
1807 		maddr = G_EXT_MEM_BASE(addr_len) << 20;
1808 		mlen = G_EXT_MEM_SIZE(addr_len) << 20;
1809 		if (mlen > 0 && addr >= maddr && addr < maddr + mlen &&
1810 		    addr + len <= maddr + mlen)
1811 			return (0);
1812 	}
1813 	if (!is_t4(sc) && em & F_EXT_MEM1_ENABLE) {
1814 		addr_len = t4_read_reg(sc, A_MA_EXT_MEMORY1_BAR);
1815 		maddr = G_EXT_MEM1_BASE(addr_len) << 20;
1816 		mlen = G_EXT_MEM1_SIZE(addr_len) << 20;
1817 		if (mlen > 0 && addr >= maddr && addr < maddr + mlen &&
1818 		    addr + len <= maddr + mlen)
1819 			return (0);
1820 	}
1821 
1822 	return (EFAULT);
1823 }
1824 
1825 static int
1826 fwmtype_to_hwmtype(int mtype)
1827 {
1828 
1829 	switch (mtype) {
1830 	case FW_MEMTYPE_EDC0:
1831 		return (MEM_EDC0);
1832 	case FW_MEMTYPE_EDC1:
1833 		return (MEM_EDC1);
1834 	case FW_MEMTYPE_EXTMEM:
1835 		return (MEM_MC0);
1836 	case FW_MEMTYPE_EXTMEM1:
1837 		return (MEM_MC1);
1838 	default:
1839 		panic("%s: cannot translate fw mtype %d.", __func__, mtype);
1840 	}
1841 }
1842 
1843 /*
1844  * Verify that the memory range specified by the memtype/offset/len pair is
1845  * valid and lies entirely within the memtype specified.  The global address of
1846  * the start of the range is returned in addr.
1847  */
1848 static int
1849 validate_mt_off_len(struct adapter *sc, int mtype, uint32_t off, int len,
1850     uint32_t *addr)
1851 {
1852 	uint32_t em, addr_len, maddr, mlen;
1853 
1854 	/* Memory can only be accessed in naturally aligned 4 byte units */
1855 	if (off & 3 || len & 3 || len == 0)
1856 		return (EINVAL);
1857 
1858 	em = t4_read_reg(sc, A_MA_TARGET_MEM_ENABLE);
1859 	switch (fwmtype_to_hwmtype(mtype)) {
1860 	case MEM_EDC0:
1861 		if (!(em & F_EDRAM0_ENABLE))
1862 			return (EINVAL);
1863 		addr_len = t4_read_reg(sc, A_MA_EDRAM0_BAR);
1864 		maddr = G_EDRAM0_BASE(addr_len) << 20;
1865 		mlen = G_EDRAM0_SIZE(addr_len) << 20;
1866 		break;
1867 	case MEM_EDC1:
1868 		if (!(em & F_EDRAM1_ENABLE))
1869 			return (EINVAL);
1870 		addr_len = t4_read_reg(sc, A_MA_EDRAM1_BAR);
1871 		maddr = G_EDRAM1_BASE(addr_len) << 20;
1872 		mlen = G_EDRAM1_SIZE(addr_len) << 20;
1873 		break;
1874 	case MEM_MC:
1875 		if (!(em & F_EXT_MEM_ENABLE))
1876 			return (EINVAL);
1877 		addr_len = t4_read_reg(sc, A_MA_EXT_MEMORY_BAR);
1878 		maddr = G_EXT_MEM_BASE(addr_len) << 20;
1879 		mlen = G_EXT_MEM_SIZE(addr_len) << 20;
1880 		break;
1881 	case MEM_MC1:
1882 		if (is_t4(sc) || !(em & F_EXT_MEM1_ENABLE))
1883 			return (EINVAL);
1884 		addr_len = t4_read_reg(sc, A_MA_EXT_MEMORY1_BAR);
1885 		maddr = G_EXT_MEM1_BASE(addr_len) << 20;
1886 		mlen = G_EXT_MEM1_SIZE(addr_len) << 20;
1887 		break;
1888 	default:
1889 		return (EINVAL);
1890 	}
1891 
1892 	if (mlen > 0 && off < mlen && off + len <= mlen) {
1893 		*addr = maddr + off;	/* global address */
1894 		return (0);
1895 	}
1896 
1897 	return (EFAULT);
1898 }
1899 
1900 static void
1901 memwin_info(struct adapter *sc, int win, uint32_t *base, uint32_t *aperture)
1902 {
1903 	const struct memwin *mw;
1904 
1905 	if (is_t4(sc)) {
1906 		KASSERT(win >= 0 && win < nitems(t4_memwin),
1907 		    ("%s: incorrect memwin# (%d)", __func__, win));
1908 		mw = &t4_memwin[win];
1909 	} else {
1910 		KASSERT(win >= 0 && win < nitems(t5_memwin),
1911 		    ("%s: incorrect memwin# (%d)", __func__, win));
1912 		mw = &t5_memwin[win];
1913 	}
1914 
1915 	if (base != NULL)
1916 		*base = mw->base;
1917 	if (aperture != NULL)
1918 		*aperture = mw->aperture;
1919 }
1920 
1921 /*
1922  * Positions the memory window such that it can be used to access the specified
1923  * address in the chip's address space.  The return value is the offset of addr
1924  * from the start of the window.
1925  */
1926 static uint32_t
1927 position_memwin(struct adapter *sc, int n, uint32_t addr)
1928 {
1929 	uint32_t start, pf;
1930 	uint32_t reg;
1931 
1932 	KASSERT(n >= 0 && n <= 3,
1933 	    ("%s: invalid window %d.", __func__, n));
1934 	KASSERT((addr & 3) == 0,
1935 	    ("%s: addr (0x%x) is not at a 4B boundary.", __func__, addr));
1936 
1937 	if (is_t4(sc)) {
1938 		pf = 0;
1939 		start = addr & ~0xf;	/* start must be 16B aligned */
1940 	} else {
1941 		pf = V_PFNUM(sc->pf);
1942 		start = addr & ~0x7f;	/* start must be 128B aligned */
1943 	}
1944 	reg = PCIE_MEM_ACCESS_REG(A_PCIE_MEM_ACCESS_OFFSET, n);
1945 
1946 	t4_write_reg(sc, reg, start | pf);
1947 	t4_read_reg(sc, reg);
1948 
1949 	return (addr - start);
1950 }
1951 
1952 static int
1953 cfg_itype_and_nqueues(struct adapter *sc, int n10g, int n1g,
1954     struct intrs_and_queues *iaq)
1955 {
1956 	int rc, itype, navail, nrxq10g, nrxq1g, n;
1957 	int nofldrxq10g = 0, nofldrxq1g = 0;
1958 	int nnmrxq10g = 0, nnmrxq1g = 0;
1959 
1960 	bzero(iaq, sizeof(*iaq));
1961 
1962 	iaq->ntxq10g = t4_ntxq10g;
1963 	iaq->ntxq1g = t4_ntxq1g;
1964 	iaq->nrxq10g = nrxq10g = t4_nrxq10g;
1965 	iaq->nrxq1g = nrxq1g = t4_nrxq1g;
1966 	iaq->rsrv_noflowq = t4_rsrv_noflowq;
1967 #ifdef TCP_OFFLOAD
1968 	if (is_offload(sc)) {
1969 		iaq->nofldtxq10g = t4_nofldtxq10g;
1970 		iaq->nofldtxq1g = t4_nofldtxq1g;
1971 		iaq->nofldrxq10g = nofldrxq10g = t4_nofldrxq10g;
1972 		iaq->nofldrxq1g = nofldrxq1g = t4_nofldrxq1g;
1973 	}
1974 #endif
1975 #ifdef DEV_NETMAP
1976 	iaq->nnmtxq10g = t4_nnmtxq10g;
1977 	iaq->nnmtxq1g = t4_nnmtxq1g;
1978 	iaq->nnmrxq10g = nnmrxq10g = t4_nnmrxq10g;
1979 	iaq->nnmrxq1g = nnmrxq1g = t4_nnmrxq1g;
1980 #endif
1981 
1982 	for (itype = INTR_MSIX; itype; itype >>= 1) {
1983 
1984 		if ((itype & t4_intr_types) == 0)
1985 			continue;	/* not allowed */
1986 
1987 		if (itype == INTR_MSIX)
1988 			navail = pci_msix_count(sc->dev);
1989 		else if (itype == INTR_MSI)
1990 			navail = pci_msi_count(sc->dev);
1991 		else
1992 			navail = 1;
1993 restart:
1994 		if (navail == 0)
1995 			continue;
1996 
1997 		iaq->intr_type = itype;
1998 		iaq->intr_flags_10g = 0;
1999 		iaq->intr_flags_1g = 0;
2000 
2001 		/*
2002 		 * Best option: an interrupt vector for errors, one for the
2003 		 * firmware event queue, and one for every rxq (NIC, TOE, and
2004 		 * netmap).
2005 		 */
2006 		iaq->nirq = T4_EXTRA_INTR;
2007 		iaq->nirq += n10g * (nrxq10g + nofldrxq10g + nnmrxq10g);
2008 		iaq->nirq += n1g * (nrxq1g + nofldrxq1g + nnmrxq1g);
2009 		if (iaq->nirq <= navail &&
2010 		    (itype != INTR_MSI || powerof2(iaq->nirq))) {
2011 			iaq->intr_flags_10g = INTR_ALL;
2012 			iaq->intr_flags_1g = INTR_ALL;
2013 			goto allocate;
2014 		}
2015 
2016 		/*
2017 		 * Second best option: a vector for errors, one for the firmware
2018 		 * event queue, and vectors for either all the NIC rx queues or
2019 		 * all the TOE rx queues.  The queues that don't get vectors
2020 		 * will forward their interrupts to those that do.
2021 		 *
2022 		 * Note: netmap rx queues cannot be created early and so they
2023 		 * can't be setup to receive forwarded interrupts for others.
2024 		 */
2025 		iaq->nirq = T4_EXTRA_INTR;
2026 		if (nrxq10g >= nofldrxq10g) {
2027 			iaq->intr_flags_10g = INTR_RXQ;
2028 			iaq->nirq += n10g * nrxq10g;
2029 #ifdef DEV_NETMAP
2030 			iaq->nnmrxq10g = min(nnmrxq10g, nrxq10g);
2031 #endif
2032 		} else {
2033 			iaq->intr_flags_10g = INTR_OFLD_RXQ;
2034 			iaq->nirq += n10g * nofldrxq10g;
2035 #ifdef DEV_NETMAP
2036 			iaq->nnmrxq10g = min(nnmrxq10g, nofldrxq10g);
2037 #endif
2038 		}
2039 		if (nrxq1g >= nofldrxq1g) {
2040 			iaq->intr_flags_1g = INTR_RXQ;
2041 			iaq->nirq += n1g * nrxq1g;
2042 #ifdef DEV_NETMAP
2043 			iaq->nnmrxq1g = min(nnmrxq1g, nrxq1g);
2044 #endif
2045 		} else {
2046 			iaq->intr_flags_1g = INTR_OFLD_RXQ;
2047 			iaq->nirq += n1g * nofldrxq1g;
2048 #ifdef DEV_NETMAP
2049 			iaq->nnmrxq1g = min(nnmrxq1g, nofldrxq1g);
2050 #endif
2051 		}
2052 		if (iaq->nirq <= navail &&
2053 		    (itype != INTR_MSI || powerof2(iaq->nirq)))
2054 			goto allocate;
2055 
2056 		/*
2057 		 * Next best option: an interrupt vector for errors, one for the
2058 		 * firmware event queue, and at least one per port.  At this
2059 		 * point we know we'll have to downsize nrxq and/or nofldrxq
2060 		 * and/or nnmrxq to fit what's available to us.
2061 		 */
2062 		iaq->nirq = T4_EXTRA_INTR;
2063 		iaq->nirq += n10g + n1g;
2064 		if (iaq->nirq <= navail) {
2065 			int leftover = navail - iaq->nirq;
2066 
2067 			if (n10g > 0) {
2068 				int target = max(nrxq10g, nofldrxq10g);
2069 
2070 				iaq->intr_flags_10g = nrxq10g >= nofldrxq10g ?
2071 				    INTR_RXQ : INTR_OFLD_RXQ;
2072 
2073 				n = 1;
2074 				while (n < target && leftover >= n10g) {
2075 					leftover -= n10g;
2076 					iaq->nirq += n10g;
2077 					n++;
2078 				}
2079 				iaq->nrxq10g = min(n, nrxq10g);
2080 #ifdef TCP_OFFLOAD
2081 				iaq->nofldrxq10g = min(n, nofldrxq10g);
2082 #endif
2083 #ifdef DEV_NETMAP
2084 				iaq->nnmrxq10g = min(n, nnmrxq10g);
2085 #endif
2086 			}
2087 
2088 			if (n1g > 0) {
2089 				int target = max(nrxq1g, nofldrxq1g);
2090 
2091 				iaq->intr_flags_1g = nrxq1g >= nofldrxq1g ?
2092 				    INTR_RXQ : INTR_OFLD_RXQ;
2093 
2094 				n = 1;
2095 				while (n < target && leftover >= n1g) {
2096 					leftover -= n1g;
2097 					iaq->nirq += n1g;
2098 					n++;
2099 				}
2100 				iaq->nrxq1g = min(n, nrxq1g);
2101 #ifdef TCP_OFFLOAD
2102 				iaq->nofldrxq1g = min(n, nofldrxq1g);
2103 #endif
2104 #ifdef DEV_NETMAP
2105 				iaq->nnmrxq1g = min(n, nnmrxq1g);
2106 #endif
2107 			}
2108 
2109 			if (itype != INTR_MSI || powerof2(iaq->nirq))
2110 				goto allocate;
2111 		}
2112 
2113 		/*
2114 		 * Least desirable option: one interrupt vector for everything.
2115 		 */
2116 		iaq->nirq = iaq->nrxq10g = iaq->nrxq1g = 1;
2117 		iaq->intr_flags_10g = iaq->intr_flags_1g = 0;
2118 #ifdef TCP_OFFLOAD
2119 		if (is_offload(sc))
2120 			iaq->nofldrxq10g = iaq->nofldrxq1g = 1;
2121 #endif
2122 #ifdef DEV_NETMAP
2123 		iaq->nnmrxq10g = iaq->nnmrxq1g = 1;
2124 #endif
2125 
2126 allocate:
2127 		navail = iaq->nirq;
2128 		rc = 0;
2129 		if (itype == INTR_MSIX)
2130 			rc = pci_alloc_msix(sc->dev, &navail);
2131 		else if (itype == INTR_MSI)
2132 			rc = pci_alloc_msi(sc->dev, &navail);
2133 
2134 		if (rc == 0) {
2135 			if (navail == iaq->nirq)
2136 				return (0);
2137 
2138 			/*
2139 			 * Didn't get the number requested.  Use whatever number
2140 			 * the kernel is willing to allocate (it's in navail).
2141 			 */
2142 			device_printf(sc->dev, "fewer vectors than requested, "
2143 			    "type=%d, req=%d, rcvd=%d; will downshift req.\n",
2144 			    itype, iaq->nirq, navail);
2145 			pci_release_msi(sc->dev);
2146 			goto restart;
2147 		}
2148 
2149 		device_printf(sc->dev,
2150 		    "failed to allocate vectors:%d, type=%d, req=%d, rcvd=%d\n",
2151 		    itype, rc, iaq->nirq, navail);
2152 	}
2153 
2154 	device_printf(sc->dev,
2155 	    "failed to find a usable interrupt type.  "
2156 	    "allowed=%d, msi-x=%d, msi=%d, intx=1", t4_intr_types,
2157 	    pci_msix_count(sc->dev), pci_msi_count(sc->dev));
2158 
2159 	return (ENXIO);
2160 }
2161 
2162 #define FW_VERSION(chip) ( \
2163     V_FW_HDR_FW_VER_MAJOR(chip##FW_VERSION_MAJOR) | \
2164     V_FW_HDR_FW_VER_MINOR(chip##FW_VERSION_MINOR) | \
2165     V_FW_HDR_FW_VER_MICRO(chip##FW_VERSION_MICRO) | \
2166     V_FW_HDR_FW_VER_BUILD(chip##FW_VERSION_BUILD))
2167 #define FW_INTFVER(chip, intf) (chip##FW_HDR_INTFVER_##intf)
2168 
2169 struct fw_info {
2170 	uint8_t chip;
2171 	char *kld_name;
2172 	char *fw_mod_name;
2173 	struct fw_hdr fw_hdr;	/* XXX: waste of space, need a sparse struct */
2174 } fw_info[] = {
2175 	{
2176 		.chip = CHELSIO_T4,
2177 		.kld_name = "t4fw_cfg",
2178 		.fw_mod_name = "t4fw",
2179 		.fw_hdr = {
2180 			.chip = FW_HDR_CHIP_T4,
2181 			.fw_ver = htobe32_const(FW_VERSION(T4)),
2182 			.intfver_nic = FW_INTFVER(T4, NIC),
2183 			.intfver_vnic = FW_INTFVER(T4, VNIC),
2184 			.intfver_ofld = FW_INTFVER(T4, OFLD),
2185 			.intfver_ri = FW_INTFVER(T4, RI),
2186 			.intfver_iscsipdu = FW_INTFVER(T4, ISCSIPDU),
2187 			.intfver_iscsi = FW_INTFVER(T4, ISCSI),
2188 			.intfver_fcoepdu = FW_INTFVER(T4, FCOEPDU),
2189 			.intfver_fcoe = FW_INTFVER(T4, FCOE),
2190 		},
2191 	}, {
2192 		.chip = CHELSIO_T5,
2193 		.kld_name = "t5fw_cfg",
2194 		.fw_mod_name = "t5fw",
2195 		.fw_hdr = {
2196 			.chip = FW_HDR_CHIP_T5,
2197 			.fw_ver = htobe32_const(FW_VERSION(T5)),
2198 			.intfver_nic = FW_INTFVER(T5, NIC),
2199 			.intfver_vnic = FW_INTFVER(T5, VNIC),
2200 			.intfver_ofld = FW_INTFVER(T5, OFLD),
2201 			.intfver_ri = FW_INTFVER(T5, RI),
2202 			.intfver_iscsipdu = FW_INTFVER(T5, ISCSIPDU),
2203 			.intfver_iscsi = FW_INTFVER(T5, ISCSI),
2204 			.intfver_fcoepdu = FW_INTFVER(T5, FCOEPDU),
2205 			.intfver_fcoe = FW_INTFVER(T5, FCOE),
2206 		},
2207 	}
2208 };
2209 
2210 static struct fw_info *
2211 find_fw_info(int chip)
2212 {
2213 	int i;
2214 
2215 	for (i = 0; i < nitems(fw_info); i++) {
2216 		if (fw_info[i].chip == chip)
2217 			return (&fw_info[i]);
2218 	}
2219 	return (NULL);
2220 }
2221 
2222 /*
2223  * Is the given firmware API compatible with the one the driver was compiled
2224  * with?
2225  */
2226 static int
2227 fw_compatible(const struct fw_hdr *hdr1, const struct fw_hdr *hdr2)
2228 {
2229 
2230 	/* short circuit if it's the exact same firmware version */
2231 	if (hdr1->chip == hdr2->chip && hdr1->fw_ver == hdr2->fw_ver)
2232 		return (1);
2233 
2234 	/*
2235 	 * XXX: Is this too conservative?  Perhaps I should limit this to the
2236 	 * features that are supported in the driver.
2237 	 */
2238 #define SAME_INTF(x) (hdr1->intfver_##x == hdr2->intfver_##x)
2239 	if (hdr1->chip == hdr2->chip && SAME_INTF(nic) && SAME_INTF(vnic) &&
2240 	    SAME_INTF(ofld) && SAME_INTF(ri) && SAME_INTF(iscsipdu) &&
2241 	    SAME_INTF(iscsi) && SAME_INTF(fcoepdu) && SAME_INTF(fcoe))
2242 		return (1);
2243 #undef SAME_INTF
2244 
2245 	return (0);
2246 }
2247 
2248 /*
2249  * The firmware in the KLD is usable, but should it be installed?  This routine
2250  * explains itself in detail if it indicates the KLD firmware should be
2251  * installed.
2252  */
2253 static int
2254 should_install_kld_fw(struct adapter *sc, int card_fw_usable, int k, int c)
2255 {
2256 	const char *reason;
2257 
2258 	if (!card_fw_usable) {
2259 		reason = "incompatible or unusable";
2260 		goto install;
2261 	}
2262 
2263 	if (k > c) {
2264 		reason = "older than the version bundled with this driver";
2265 		goto install;
2266 	}
2267 
2268 	if (t4_fw_install == 2 && k != c) {
2269 		reason = "different than the version bundled with this driver";
2270 		goto install;
2271 	}
2272 
2273 	return (0);
2274 
2275 install:
2276 	if (t4_fw_install == 0) {
2277 		device_printf(sc->dev, "firmware on card (%u.%u.%u.%u) is %s, "
2278 		    "but the driver is prohibited from installing a different "
2279 		    "firmware on the card.\n",
2280 		    G_FW_HDR_FW_VER_MAJOR(c), G_FW_HDR_FW_VER_MINOR(c),
2281 		    G_FW_HDR_FW_VER_MICRO(c), G_FW_HDR_FW_VER_BUILD(c), reason);
2282 
2283 		return (0);
2284 	}
2285 
2286 	device_printf(sc->dev, "firmware on card (%u.%u.%u.%u) is %s, "
2287 	    "installing firmware %u.%u.%u.%u on card.\n",
2288 	    G_FW_HDR_FW_VER_MAJOR(c), G_FW_HDR_FW_VER_MINOR(c),
2289 	    G_FW_HDR_FW_VER_MICRO(c), G_FW_HDR_FW_VER_BUILD(c), reason,
2290 	    G_FW_HDR_FW_VER_MAJOR(k), G_FW_HDR_FW_VER_MINOR(k),
2291 	    G_FW_HDR_FW_VER_MICRO(k), G_FW_HDR_FW_VER_BUILD(k));
2292 
2293 	return (1);
2294 }
2295 /*
2296  * Establish contact with the firmware and determine if we are the master driver
2297  * or not, and whether we are responsible for chip initialization.
2298  */
2299 static int
2300 prep_firmware(struct adapter *sc)
2301 {
2302 	const struct firmware *fw = NULL, *default_cfg;
2303 	int rc, pf, card_fw_usable, kld_fw_usable, need_fw_reset = 1;
2304 	enum dev_state state;
2305 	struct fw_info *fw_info;
2306 	struct fw_hdr *card_fw;		/* fw on the card */
2307 	const struct fw_hdr *kld_fw;	/* fw in the KLD */
2308 	const struct fw_hdr *drv_fw;	/* fw header the driver was compiled
2309 					   against */
2310 
2311 	/* Contact firmware. */
2312 	rc = t4_fw_hello(sc, sc->mbox, sc->mbox, MASTER_MAY, &state);
2313 	if (rc < 0 || state == DEV_STATE_ERR) {
2314 		rc = -rc;
2315 		device_printf(sc->dev,
2316 		    "failed to connect to the firmware: %d, %d.\n", rc, state);
2317 		return (rc);
2318 	}
2319 	pf = rc;
2320 	if (pf == sc->mbox)
2321 		sc->flags |= MASTER_PF;
2322 	else if (state == DEV_STATE_UNINIT) {
2323 		/*
2324 		 * We didn't get to be the master so we definitely won't be
2325 		 * configuring the chip.  It's a bug if someone else hasn't
2326 		 * configured it already.
2327 		 */
2328 		device_printf(sc->dev, "couldn't be master(%d), "
2329 		    "device not already initialized either(%d).\n", rc, state);
2330 		return (EDOOFUS);
2331 	}
2332 
2333 	/* This is the firmware whose headers the driver was compiled against */
2334 	fw_info = find_fw_info(chip_id(sc));
2335 	if (fw_info == NULL) {
2336 		device_printf(sc->dev,
2337 		    "unable to look up firmware information for chip %d.\n",
2338 		    chip_id(sc));
2339 		return (EINVAL);
2340 	}
2341 	drv_fw = &fw_info->fw_hdr;
2342 
2343 	/*
2344 	 * The firmware KLD contains many modules.  The KLD name is also the
2345 	 * name of the module that contains the default config file.
2346 	 */
2347 	default_cfg = firmware_get(fw_info->kld_name);
2348 
2349 	/* Read the header of the firmware on the card */
2350 	card_fw = malloc(sizeof(*card_fw), M_CXGBE, M_ZERO | M_WAITOK);
2351 	rc = -t4_read_flash(sc, FLASH_FW_START,
2352 	    sizeof (*card_fw) / sizeof (uint32_t), (uint32_t *)card_fw, 1);
2353 	if (rc == 0)
2354 		card_fw_usable = fw_compatible(drv_fw, (const void*)card_fw);
2355 	else {
2356 		device_printf(sc->dev,
2357 		    "Unable to read card's firmware header: %d\n", rc);
2358 		card_fw_usable = 0;
2359 	}
2360 
2361 	/* This is the firmware in the KLD */
2362 	fw = firmware_get(fw_info->fw_mod_name);
2363 	if (fw != NULL) {
2364 		kld_fw = (const void *)fw->data;
2365 		kld_fw_usable = fw_compatible(drv_fw, kld_fw);
2366 	} else {
2367 		kld_fw = NULL;
2368 		kld_fw_usable = 0;
2369 	}
2370 
2371 	if (card_fw_usable && card_fw->fw_ver == drv_fw->fw_ver &&
2372 	    (!kld_fw_usable || kld_fw->fw_ver == drv_fw->fw_ver)) {
2373 		/*
2374 		 * Common case: the firmware on the card is an exact match and
2375 		 * the KLD is an exact match too, or the KLD is
2376 		 * absent/incompatible.  Note that t4_fw_install = 2 is ignored
2377 		 * here -- use cxgbetool loadfw if you want to reinstall the
2378 		 * same firmware as the one on the card.
2379 		 */
2380 	} else if (kld_fw_usable && state == DEV_STATE_UNINIT &&
2381 	    should_install_kld_fw(sc, card_fw_usable, be32toh(kld_fw->fw_ver),
2382 	    be32toh(card_fw->fw_ver))) {
2383 
2384 		rc = -t4_fw_upgrade(sc, sc->mbox, fw->data, fw->datasize, 0);
2385 		if (rc != 0) {
2386 			device_printf(sc->dev,
2387 			    "failed to install firmware: %d\n", rc);
2388 			goto done;
2389 		}
2390 
2391 		/* Installed successfully, update the cached header too. */
2392 		memcpy(card_fw, kld_fw, sizeof(*card_fw));
2393 		card_fw_usable = 1;
2394 		need_fw_reset = 0;	/* already reset as part of load_fw */
2395 	}
2396 
2397 	if (!card_fw_usable) {
2398 		uint32_t d, c, k;
2399 
2400 		d = ntohl(drv_fw->fw_ver);
2401 		c = ntohl(card_fw->fw_ver);
2402 		k = kld_fw ? ntohl(kld_fw->fw_ver) : 0;
2403 
2404 		device_printf(sc->dev, "Cannot find a usable firmware: "
2405 		    "fw_install %d, chip state %d, "
2406 		    "driver compiled with %d.%d.%d.%d, "
2407 		    "card has %d.%d.%d.%d, KLD has %d.%d.%d.%d\n",
2408 		    t4_fw_install, state,
2409 		    G_FW_HDR_FW_VER_MAJOR(d), G_FW_HDR_FW_VER_MINOR(d),
2410 		    G_FW_HDR_FW_VER_MICRO(d), G_FW_HDR_FW_VER_BUILD(d),
2411 		    G_FW_HDR_FW_VER_MAJOR(c), G_FW_HDR_FW_VER_MINOR(c),
2412 		    G_FW_HDR_FW_VER_MICRO(c), G_FW_HDR_FW_VER_BUILD(c),
2413 		    G_FW_HDR_FW_VER_MAJOR(k), G_FW_HDR_FW_VER_MINOR(k),
2414 		    G_FW_HDR_FW_VER_MICRO(k), G_FW_HDR_FW_VER_BUILD(k));
2415 		rc = EINVAL;
2416 		goto done;
2417 	}
2418 
2419 	/* We're using whatever's on the card and it's known to be good. */
2420 	sc->params.fw_vers = ntohl(card_fw->fw_ver);
2421 	snprintf(sc->fw_version, sizeof(sc->fw_version), "%u.%u.%u.%u",
2422 	    G_FW_HDR_FW_VER_MAJOR(sc->params.fw_vers),
2423 	    G_FW_HDR_FW_VER_MINOR(sc->params.fw_vers),
2424 	    G_FW_HDR_FW_VER_MICRO(sc->params.fw_vers),
2425 	    G_FW_HDR_FW_VER_BUILD(sc->params.fw_vers));
2426 	t4_get_tp_version(sc, &sc->params.tp_vers);
2427 
2428 	/* Reset device */
2429 	if (need_fw_reset &&
2430 	    (rc = -t4_fw_reset(sc, sc->mbox, F_PIORSTMODE | F_PIORST)) != 0) {
2431 		device_printf(sc->dev, "firmware reset failed: %d.\n", rc);
2432 		if (rc != ETIMEDOUT && rc != EIO)
2433 			t4_fw_bye(sc, sc->mbox);
2434 		goto done;
2435 	}
2436 	sc->flags |= FW_OK;
2437 
2438 	rc = get_params__pre_init(sc);
2439 	if (rc != 0)
2440 		goto done; /* error message displayed already */
2441 
2442 	/* Partition adapter resources as specified in the config file. */
2443 	if (state == DEV_STATE_UNINIT) {
2444 
2445 		KASSERT(sc->flags & MASTER_PF,
2446 		    ("%s: trying to change chip settings when not master.",
2447 		    __func__));
2448 
2449 		rc = partition_resources(sc, default_cfg, fw_info->kld_name);
2450 		if (rc != 0)
2451 			goto done;	/* error message displayed already */
2452 
2453 		t4_tweak_chip_settings(sc);
2454 
2455 		/* get basic stuff going */
2456 		rc = -t4_fw_initialize(sc, sc->mbox);
2457 		if (rc != 0) {
2458 			device_printf(sc->dev, "fw init failed: %d.\n", rc);
2459 			goto done;
2460 		}
2461 	} else {
2462 		snprintf(sc->cfg_file, sizeof(sc->cfg_file), "pf%d", pf);
2463 		sc->cfcsum = 0;
2464 	}
2465 
2466 done:
2467 	free(card_fw, M_CXGBE);
2468 	if (fw != NULL)
2469 		firmware_put(fw, FIRMWARE_UNLOAD);
2470 	if (default_cfg != NULL)
2471 		firmware_put(default_cfg, FIRMWARE_UNLOAD);
2472 
2473 	return (rc);
2474 }
2475 
2476 #define FW_PARAM_DEV(param) \
2477 	(V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) | \
2478 	 V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_##param))
2479 #define FW_PARAM_PFVF(param) \
2480 	(V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_PFVF) | \
2481 	 V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_PFVF_##param))
2482 
2483 /*
2484  * Partition chip resources for use between various PFs, VFs, etc.
2485  */
2486 static int
2487 partition_resources(struct adapter *sc, const struct firmware *default_cfg,
2488     const char *name_prefix)
2489 {
2490 	const struct firmware *cfg = NULL;
2491 	int rc = 0;
2492 	struct fw_caps_config_cmd caps;
2493 	uint32_t mtype, moff, finicsum, cfcsum;
2494 
2495 	/*
2496 	 * Figure out what configuration file to use.  Pick the default config
2497 	 * file for the card if the user hasn't specified one explicitly.
2498 	 */
2499 	snprintf(sc->cfg_file, sizeof(sc->cfg_file), "%s", t4_cfg_file);
2500 	if (strncmp(t4_cfg_file, DEFAULT_CF, sizeof(t4_cfg_file)) == 0) {
2501 		/* Card specific overrides go here. */
2502 		if (pci_get_device(sc->dev) == 0x440a)
2503 			snprintf(sc->cfg_file, sizeof(sc->cfg_file), UWIRE_CF);
2504 		if (is_fpga(sc))
2505 			snprintf(sc->cfg_file, sizeof(sc->cfg_file), FPGA_CF);
2506 	}
2507 
2508 	/*
2509 	 * We need to load another module if the profile is anything except
2510 	 * "default" or "flash".
2511 	 */
2512 	if (strncmp(sc->cfg_file, DEFAULT_CF, sizeof(sc->cfg_file)) != 0 &&
2513 	    strncmp(sc->cfg_file, FLASH_CF, sizeof(sc->cfg_file)) != 0) {
2514 		char s[32];
2515 
2516 		snprintf(s, sizeof(s), "%s_%s", name_prefix, sc->cfg_file);
2517 		cfg = firmware_get(s);
2518 		if (cfg == NULL) {
2519 			if (default_cfg != NULL) {
2520 				device_printf(sc->dev,
2521 				    "unable to load module \"%s\" for "
2522 				    "configuration profile \"%s\", will use "
2523 				    "the default config file instead.\n",
2524 				    s, sc->cfg_file);
2525 				snprintf(sc->cfg_file, sizeof(sc->cfg_file),
2526 				    "%s", DEFAULT_CF);
2527 			} else {
2528 				device_printf(sc->dev,
2529 				    "unable to load module \"%s\" for "
2530 				    "configuration profile \"%s\", will use "
2531 				    "the config file on the card's flash "
2532 				    "instead.\n", s, sc->cfg_file);
2533 				snprintf(sc->cfg_file, sizeof(sc->cfg_file),
2534 				    "%s", FLASH_CF);
2535 			}
2536 		}
2537 	}
2538 
2539 	if (strncmp(sc->cfg_file, DEFAULT_CF, sizeof(sc->cfg_file)) == 0 &&
2540 	    default_cfg == NULL) {
2541 		device_printf(sc->dev,
2542 		    "default config file not available, will use the config "
2543 		    "file on the card's flash instead.\n");
2544 		snprintf(sc->cfg_file, sizeof(sc->cfg_file), "%s", FLASH_CF);
2545 	}
2546 
2547 	if (strncmp(sc->cfg_file, FLASH_CF, sizeof(sc->cfg_file)) != 0) {
2548 		u_int cflen, i, n;
2549 		const uint32_t *cfdata;
2550 		uint32_t param, val, addr, off, mw_base, mw_aperture;
2551 
2552 		KASSERT(cfg != NULL || default_cfg != NULL,
2553 		    ("%s: no config to upload", __func__));
2554 
2555 		/*
2556 		 * Ask the firmware where it wants us to upload the config file.
2557 		 */
2558 		param = FW_PARAM_DEV(CF);
2559 		rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 1, &param, &val);
2560 		if (rc != 0) {
2561 			/* No support for config file?  Shouldn't happen. */
2562 			device_printf(sc->dev,
2563 			    "failed to query config file location: %d.\n", rc);
2564 			goto done;
2565 		}
2566 		mtype = G_FW_PARAMS_PARAM_Y(val);
2567 		moff = G_FW_PARAMS_PARAM_Z(val) << 16;
2568 
2569 		/*
2570 		 * XXX: sheer laziness.  We deliberately added 4 bytes of
2571 		 * useless stuffing/comments at the end of the config file so
2572 		 * it's ok to simply throw away the last remaining bytes when
2573 		 * the config file is not an exact multiple of 4.  This also
2574 		 * helps with the validate_mt_off_len check.
2575 		 */
2576 		if (cfg != NULL) {
2577 			cflen = cfg->datasize & ~3;
2578 			cfdata = cfg->data;
2579 		} else {
2580 			cflen = default_cfg->datasize & ~3;
2581 			cfdata = default_cfg->data;
2582 		}
2583 
2584 		if (cflen > FLASH_CFG_MAX_SIZE) {
2585 			device_printf(sc->dev,
2586 			    "config file too long (%d, max allowed is %d).  "
2587 			    "Will try to use the config on the card, if any.\n",
2588 			    cflen, FLASH_CFG_MAX_SIZE);
2589 			goto use_config_on_flash;
2590 		}
2591 
2592 		rc = validate_mt_off_len(sc, mtype, moff, cflen, &addr);
2593 		if (rc != 0) {
2594 			device_printf(sc->dev,
2595 			    "%s: addr (%d/0x%x) or len %d is not valid: %d.  "
2596 			    "Will try to use the config on the card, if any.\n",
2597 			    __func__, mtype, moff, cflen, rc);
2598 			goto use_config_on_flash;
2599 		}
2600 
2601 		memwin_info(sc, 2, &mw_base, &mw_aperture);
2602 		while (cflen) {
2603 			off = position_memwin(sc, 2, addr);
2604 			n = min(cflen, mw_aperture - off);
2605 			for (i = 0; i < n; i += 4)
2606 				t4_write_reg(sc, mw_base + off + i, *cfdata++);
2607 			cflen -= n;
2608 			addr += n;
2609 		}
2610 	} else {
2611 use_config_on_flash:
2612 		mtype = FW_MEMTYPE_FLASH;
2613 		moff = t4_flash_cfg_addr(sc);
2614 	}
2615 
2616 	bzero(&caps, sizeof(caps));
2617 	caps.op_to_write = htobe32(V_FW_CMD_OP(FW_CAPS_CONFIG_CMD) |
2618 	    F_FW_CMD_REQUEST | F_FW_CMD_READ);
2619 	caps.cfvalid_to_len16 = htobe32(F_FW_CAPS_CONFIG_CMD_CFVALID |
2620 	    V_FW_CAPS_CONFIG_CMD_MEMTYPE_CF(mtype) |
2621 	    V_FW_CAPS_CONFIG_CMD_MEMADDR64K_CF(moff >> 16) | FW_LEN16(caps));
2622 	rc = -t4_wr_mbox(sc, sc->mbox, &caps, sizeof(caps), &caps);
2623 	if (rc != 0) {
2624 		device_printf(sc->dev,
2625 		    "failed to pre-process config file: %d "
2626 		    "(mtype %d, moff 0x%x).\n", rc, mtype, moff);
2627 		goto done;
2628 	}
2629 
2630 	finicsum = be32toh(caps.finicsum);
2631 	cfcsum = be32toh(caps.cfcsum);
2632 	if (finicsum != cfcsum) {
2633 		device_printf(sc->dev,
2634 		    "WARNING: config file checksum mismatch: %08x %08x\n",
2635 		    finicsum, cfcsum);
2636 	}
2637 	sc->cfcsum = cfcsum;
2638 
2639 #define LIMIT_CAPS(x) do { \
2640 	caps.x &= htobe16(t4_##x##_allowed); \
2641 } while (0)
2642 
2643 	/*
2644 	 * Let the firmware know what features will (not) be used so it can tune
2645 	 * things accordingly.
2646 	 */
2647 	LIMIT_CAPS(linkcaps);
2648 	LIMIT_CAPS(niccaps);
2649 	LIMIT_CAPS(toecaps);
2650 	LIMIT_CAPS(rdmacaps);
2651 	LIMIT_CAPS(iscsicaps);
2652 	LIMIT_CAPS(fcoecaps);
2653 #undef LIMIT_CAPS
2654 
2655 	caps.op_to_write = htobe32(V_FW_CMD_OP(FW_CAPS_CONFIG_CMD) |
2656 	    F_FW_CMD_REQUEST | F_FW_CMD_WRITE);
2657 	caps.cfvalid_to_len16 = htobe32(FW_LEN16(caps));
2658 	rc = -t4_wr_mbox(sc, sc->mbox, &caps, sizeof(caps), NULL);
2659 	if (rc != 0) {
2660 		device_printf(sc->dev,
2661 		    "failed to process config file: %d.\n", rc);
2662 	}
2663 done:
2664 	if (cfg != NULL)
2665 		firmware_put(cfg, FIRMWARE_UNLOAD);
2666 	return (rc);
2667 }
2668 
2669 /*
2670  * Retrieve parameters that are needed (or nice to have) very early.
2671  */
2672 static int
2673 get_params__pre_init(struct adapter *sc)
2674 {
2675 	int rc;
2676 	uint32_t param[2], val[2];
2677 	struct fw_devlog_cmd cmd;
2678 	struct devlog_params *dlog = &sc->params.devlog;
2679 
2680 	param[0] = FW_PARAM_DEV(PORTVEC);
2681 	param[1] = FW_PARAM_DEV(CCLK);
2682 	rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 2, param, val);
2683 	if (rc != 0) {
2684 		device_printf(sc->dev,
2685 		    "failed to query parameters (pre_init): %d.\n", rc);
2686 		return (rc);
2687 	}
2688 
2689 	sc->params.portvec = val[0];
2690 	sc->params.nports = bitcount32(val[0]);
2691 	sc->params.vpd.cclk = val[1];
2692 
2693 	/* Read device log parameters. */
2694 	bzero(&cmd, sizeof(cmd));
2695 	cmd.op_to_write = htobe32(V_FW_CMD_OP(FW_DEVLOG_CMD) |
2696 	    F_FW_CMD_REQUEST | F_FW_CMD_READ);
2697 	cmd.retval_len16 = htobe32(FW_LEN16(cmd));
2698 	rc = -t4_wr_mbox(sc, sc->mbox, &cmd, sizeof(cmd), &cmd);
2699 	if (rc != 0) {
2700 		device_printf(sc->dev,
2701 		    "failed to get devlog parameters: %d.\n", rc);
2702 		bzero(dlog, sizeof (*dlog));
2703 		rc = 0;	/* devlog isn't critical for device operation */
2704 	} else {
2705 		val[0] = be32toh(cmd.memtype_devlog_memaddr16_devlog);
2706 		dlog->memtype = G_FW_DEVLOG_CMD_MEMTYPE_DEVLOG(val[0]);
2707 		dlog->start = G_FW_DEVLOG_CMD_MEMADDR16_DEVLOG(val[0]) << 4;
2708 		dlog->size = be32toh(cmd.memsize_devlog);
2709 	}
2710 
2711 	return (rc);
2712 }
2713 
2714 /*
2715  * Retrieve various parameters that are of interest to the driver.  The device
2716  * has been initialized by the firmware at this point.
2717  */
2718 static int
2719 get_params__post_init(struct adapter *sc)
2720 {
2721 	int rc;
2722 	uint32_t param[7], val[7];
2723 	struct fw_caps_config_cmd caps;
2724 
2725 	param[0] = FW_PARAM_PFVF(IQFLINT_START);
2726 	param[1] = FW_PARAM_PFVF(EQ_START);
2727 	param[2] = FW_PARAM_PFVF(FILTER_START);
2728 	param[3] = FW_PARAM_PFVF(FILTER_END);
2729 	param[4] = FW_PARAM_PFVF(L2T_START);
2730 	param[5] = FW_PARAM_PFVF(L2T_END);
2731 	rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 6, param, val);
2732 	if (rc != 0) {
2733 		device_printf(sc->dev,
2734 		    "failed to query parameters (post_init): %d.\n", rc);
2735 		return (rc);
2736 	}
2737 
2738 	sc->sge.iq_start = val[0];
2739 	sc->sge.eq_start = val[1];
2740 	sc->tids.ftid_base = val[2];
2741 	sc->tids.nftids = val[3] - val[2] + 1;
2742 	sc->params.ftid_min = val[2];
2743 	sc->params.ftid_max = val[3];
2744 	sc->vres.l2t.start = val[4];
2745 	sc->vres.l2t.size = val[5] - val[4] + 1;
2746 	KASSERT(sc->vres.l2t.size <= L2T_SIZE,
2747 	    ("%s: L2 table size (%u) larger than expected (%u)",
2748 	    __func__, sc->vres.l2t.size, L2T_SIZE));
2749 
2750 	/* get capabilites */
2751 	bzero(&caps, sizeof(caps));
2752 	caps.op_to_write = htobe32(V_FW_CMD_OP(FW_CAPS_CONFIG_CMD) |
2753 	    F_FW_CMD_REQUEST | F_FW_CMD_READ);
2754 	caps.cfvalid_to_len16 = htobe32(FW_LEN16(caps));
2755 	rc = -t4_wr_mbox(sc, sc->mbox, &caps, sizeof(caps), &caps);
2756 	if (rc != 0) {
2757 		device_printf(sc->dev,
2758 		    "failed to get card capabilities: %d.\n", rc);
2759 		return (rc);
2760 	}
2761 
2762 #define READ_CAPS(x) do { \
2763 	sc->x = htobe16(caps.x); \
2764 } while (0)
2765 	READ_CAPS(linkcaps);
2766 	READ_CAPS(niccaps);
2767 	READ_CAPS(toecaps);
2768 	READ_CAPS(rdmacaps);
2769 	READ_CAPS(iscsicaps);
2770 	READ_CAPS(fcoecaps);
2771 
2772 	if (sc->niccaps & FW_CAPS_CONFIG_NIC_ETHOFLD) {
2773 		param[0] = FW_PARAM_PFVF(ETHOFLD_START);
2774 		param[1] = FW_PARAM_PFVF(ETHOFLD_END);
2775 		param[2] = FW_PARAM_DEV(FLOWC_BUFFIFO_SZ);
2776 		rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 3, param, val);
2777 		if (rc != 0) {
2778 			device_printf(sc->dev,
2779 			    "failed to query NIC parameters: %d.\n", rc);
2780 			return (rc);
2781 		}
2782 		sc->tids.etid_base = val[0];
2783 		sc->params.etid_min = val[0];
2784 		sc->tids.netids = val[1] - val[0] + 1;
2785 		sc->params.netids = sc->tids.netids;
2786 		sc->params.eo_wr_cred = val[2];
2787 		sc->params.ethoffload = 1;
2788 	}
2789 
2790 	if (sc->toecaps) {
2791 		/* query offload-related parameters */
2792 		param[0] = FW_PARAM_DEV(NTID);
2793 		param[1] = FW_PARAM_PFVF(SERVER_START);
2794 		param[2] = FW_PARAM_PFVF(SERVER_END);
2795 		param[3] = FW_PARAM_PFVF(TDDP_START);
2796 		param[4] = FW_PARAM_PFVF(TDDP_END);
2797 		param[5] = FW_PARAM_DEV(FLOWC_BUFFIFO_SZ);
2798 		rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 6, param, val);
2799 		if (rc != 0) {
2800 			device_printf(sc->dev,
2801 			    "failed to query TOE parameters: %d.\n", rc);
2802 			return (rc);
2803 		}
2804 		sc->tids.ntids = val[0];
2805 		sc->tids.natids = min(sc->tids.ntids / 2, MAX_ATIDS);
2806 		sc->tids.stid_base = val[1];
2807 		sc->tids.nstids = val[2] - val[1] + 1;
2808 		sc->vres.ddp.start = val[3];
2809 		sc->vres.ddp.size = val[4] - val[3] + 1;
2810 		sc->params.ofldq_wr_cred = val[5];
2811 		sc->params.offload = 1;
2812 	}
2813 	if (sc->rdmacaps) {
2814 		param[0] = FW_PARAM_PFVF(STAG_START);
2815 		param[1] = FW_PARAM_PFVF(STAG_END);
2816 		param[2] = FW_PARAM_PFVF(RQ_START);
2817 		param[3] = FW_PARAM_PFVF(RQ_END);
2818 		param[4] = FW_PARAM_PFVF(PBL_START);
2819 		param[5] = FW_PARAM_PFVF(PBL_END);
2820 		rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 6, param, val);
2821 		if (rc != 0) {
2822 			device_printf(sc->dev,
2823 			    "failed to query RDMA parameters(1): %d.\n", rc);
2824 			return (rc);
2825 		}
2826 		sc->vres.stag.start = val[0];
2827 		sc->vres.stag.size = val[1] - val[0] + 1;
2828 		sc->vres.rq.start = val[2];
2829 		sc->vres.rq.size = val[3] - val[2] + 1;
2830 		sc->vres.pbl.start = val[4];
2831 		sc->vres.pbl.size = val[5] - val[4] + 1;
2832 
2833 		param[0] = FW_PARAM_PFVF(SQRQ_START);
2834 		param[1] = FW_PARAM_PFVF(SQRQ_END);
2835 		param[2] = FW_PARAM_PFVF(CQ_START);
2836 		param[3] = FW_PARAM_PFVF(CQ_END);
2837 		param[4] = FW_PARAM_PFVF(OCQ_START);
2838 		param[5] = FW_PARAM_PFVF(OCQ_END);
2839 		rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 6, param, val);
2840 		if (rc != 0) {
2841 			device_printf(sc->dev,
2842 			    "failed to query RDMA parameters(2): %d.\n", rc);
2843 			return (rc);
2844 		}
2845 		sc->vres.qp.start = val[0];
2846 		sc->vres.qp.size = val[1] - val[0] + 1;
2847 		sc->vres.cq.start = val[2];
2848 		sc->vres.cq.size = val[3] - val[2] + 1;
2849 		sc->vres.ocq.start = val[4];
2850 		sc->vres.ocq.size = val[5] - val[4] + 1;
2851 	}
2852 	if (sc->iscsicaps) {
2853 		param[0] = FW_PARAM_PFVF(ISCSI_START);
2854 		param[1] = FW_PARAM_PFVF(ISCSI_END);
2855 		rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 2, param, val);
2856 		if (rc != 0) {
2857 			device_printf(sc->dev,
2858 			    "failed to query iSCSI parameters: %d.\n", rc);
2859 			return (rc);
2860 		}
2861 		sc->vres.iscsi.start = val[0];
2862 		sc->vres.iscsi.size = val[1] - val[0] + 1;
2863 	}
2864 
2865 	/*
2866 	 * We've got the params we wanted to query via the firmware.  Now grab
2867 	 * some others directly from the chip.
2868 	 */
2869 	rc = t4_read_chip_settings(sc);
2870 
2871 	return (rc);
2872 }
2873 
2874 static int
2875 set_params__post_init(struct adapter *sc)
2876 {
2877 	uint32_t param, val;
2878 
2879 	/* ask for encapsulated CPLs */
2880 	param = FW_PARAM_PFVF(CPLFW4MSG_ENCAP);
2881 	val = 1;
2882 	(void)t4_set_params(sc, sc->mbox, sc->pf, 0, 1, &param, &val);
2883 
2884 	return (0);
2885 }
2886 
2887 #undef FW_PARAM_PFVF
2888 #undef FW_PARAM_DEV
2889 
2890 static void
2891 t4_set_desc(struct adapter *sc)
2892 {
2893 	char buf[128];
2894 	struct adapter_params *p = &sc->params;
2895 
2896 	snprintf(buf, sizeof(buf), "Chelsio %s %sNIC (rev %d), S/N:%s, "
2897 	    "P/N:%s, E/C:%s", p->vpd.id, is_offload(sc) ? "R" : "",
2898 	    chip_rev(sc), p->vpd.sn, p->vpd.pn, p->vpd.ec);
2899 
2900 	device_set_desc_copy(sc->dev, buf);
2901 }
2902 
2903 static void
2904 build_medialist(struct port_info *pi, struct ifmedia *media)
2905 {
2906 	int data, m;
2907 
2908 	PORT_LOCK(pi);
2909 
2910 	ifmedia_removeall(media);
2911 
2912 	m = IFM_ETHER | IFM_FDX;
2913 	data = (pi->port_type << 8) | pi->mod_type;
2914 
2915 	switch(pi->port_type) {
2916 	case FW_PORT_TYPE_BT_XFI:
2917 		ifmedia_add(media, m | IFM_10G_T, data, NULL);
2918 		break;
2919 
2920 	case FW_PORT_TYPE_BT_XAUI:
2921 		ifmedia_add(media, m | IFM_10G_T, data, NULL);
2922 		/* fall through */
2923 
2924 	case FW_PORT_TYPE_BT_SGMII:
2925 		ifmedia_add(media, m | IFM_1000_T, data, NULL);
2926 		ifmedia_add(media, m | IFM_100_TX, data, NULL);
2927 		ifmedia_add(media, IFM_ETHER | IFM_AUTO, data, NULL);
2928 		ifmedia_set(media, IFM_ETHER | IFM_AUTO);
2929 		break;
2930 
2931 	case FW_PORT_TYPE_CX4:
2932 		ifmedia_add(media, m | IFM_10G_CX4, data, NULL);
2933 		ifmedia_set(media, m | IFM_10G_CX4);
2934 		break;
2935 
2936 	case FW_PORT_TYPE_QSFP_10G:
2937 	case FW_PORT_TYPE_SFP:
2938 	case FW_PORT_TYPE_FIBER_XFI:
2939 	case FW_PORT_TYPE_FIBER_XAUI:
2940 		switch (pi->mod_type) {
2941 
2942 		case FW_PORT_MOD_TYPE_LR:
2943 			ifmedia_add(media, m | IFM_10G_LR, data, NULL);
2944 			ifmedia_set(media, m | IFM_10G_LR);
2945 			break;
2946 
2947 		case FW_PORT_MOD_TYPE_SR:
2948 			ifmedia_add(media, m | IFM_10G_SR, data, NULL);
2949 			ifmedia_set(media, m | IFM_10G_SR);
2950 			break;
2951 
2952 		case FW_PORT_MOD_TYPE_LRM:
2953 			ifmedia_add(media, m | IFM_10G_LRM, data, NULL);
2954 			ifmedia_set(media, m | IFM_10G_LRM);
2955 			break;
2956 
2957 		case FW_PORT_MOD_TYPE_TWINAX_PASSIVE:
2958 		case FW_PORT_MOD_TYPE_TWINAX_ACTIVE:
2959 			ifmedia_add(media, m | IFM_10G_TWINAX, data, NULL);
2960 			ifmedia_set(media, m | IFM_10G_TWINAX);
2961 			break;
2962 
2963 		case FW_PORT_MOD_TYPE_NONE:
2964 			m &= ~IFM_FDX;
2965 			ifmedia_add(media, m | IFM_NONE, data, NULL);
2966 			ifmedia_set(media, m | IFM_NONE);
2967 			break;
2968 
2969 		case FW_PORT_MOD_TYPE_NA:
2970 		case FW_PORT_MOD_TYPE_ER:
2971 		default:
2972 			device_printf(pi->dev,
2973 			    "unknown port_type (%d), mod_type (%d)\n",
2974 			    pi->port_type, pi->mod_type);
2975 			ifmedia_add(media, m | IFM_UNKNOWN, data, NULL);
2976 			ifmedia_set(media, m | IFM_UNKNOWN);
2977 			break;
2978 		}
2979 		break;
2980 
2981 	case FW_PORT_TYPE_QSFP:
2982 		switch (pi->mod_type) {
2983 
2984 		case FW_PORT_MOD_TYPE_LR:
2985 			ifmedia_add(media, m | IFM_40G_LR4, data, NULL);
2986 			ifmedia_set(media, m | IFM_40G_LR4);
2987 			break;
2988 
2989 		case FW_PORT_MOD_TYPE_SR:
2990 			ifmedia_add(media, m | IFM_40G_SR4, data, NULL);
2991 			ifmedia_set(media, m | IFM_40G_SR4);
2992 			break;
2993 
2994 		case FW_PORT_MOD_TYPE_TWINAX_PASSIVE:
2995 		case FW_PORT_MOD_TYPE_TWINAX_ACTIVE:
2996 			ifmedia_add(media, m | IFM_40G_CR4, data, NULL);
2997 			ifmedia_set(media, m | IFM_40G_CR4);
2998 			break;
2999 
3000 		case FW_PORT_MOD_TYPE_NONE:
3001 			m &= ~IFM_FDX;
3002 			ifmedia_add(media, m | IFM_NONE, data, NULL);
3003 			ifmedia_set(media, m | IFM_NONE);
3004 			break;
3005 
3006 		default:
3007 			device_printf(pi->dev,
3008 			    "unknown port_type (%d), mod_type (%d)\n",
3009 			    pi->port_type, pi->mod_type);
3010 			ifmedia_add(media, m | IFM_UNKNOWN, data, NULL);
3011 			ifmedia_set(media, m | IFM_UNKNOWN);
3012 			break;
3013 		}
3014 		break;
3015 
3016 	default:
3017 		device_printf(pi->dev,
3018 		    "unknown port_type (%d), mod_type (%d)\n", pi->port_type,
3019 		    pi->mod_type);
3020 		ifmedia_add(media, m | IFM_UNKNOWN, data, NULL);
3021 		ifmedia_set(media, m | IFM_UNKNOWN);
3022 		break;
3023 	}
3024 
3025 	PORT_UNLOCK(pi);
3026 }
3027 
3028 #define FW_MAC_EXACT_CHUNK	7
3029 
3030 /*
3031  * Program the port's XGMAC based on parameters in ifnet.  The caller also
3032  * indicates which parameters should be programmed (the rest are left alone).
3033  */
3034 int
3035 update_mac_settings(struct ifnet *ifp, int flags)
3036 {
3037 	int rc = 0;
3038 	struct port_info *pi = ifp->if_softc;
3039 	struct adapter *sc = pi->adapter;
3040 	int mtu = -1, promisc = -1, allmulti = -1, vlanex = -1;
3041 	uint16_t viid = 0xffff;
3042 	int16_t *xact_addr_filt = NULL;
3043 
3044 	ASSERT_SYNCHRONIZED_OP(sc);
3045 	KASSERT(flags, ("%s: not told what to update.", __func__));
3046 
3047 	if (ifp == pi->ifp) {
3048 		viid = pi->viid;
3049 		xact_addr_filt = &pi->xact_addr_filt;
3050 	}
3051 #ifdef DEV_NETMAP
3052 	else if (ifp == pi->nm_ifp) {
3053 		viid = pi->nm_viid;
3054 		xact_addr_filt = &pi->nm_xact_addr_filt;
3055 	}
3056 #endif
3057 	if (flags & XGMAC_MTU)
3058 		mtu = ifp->if_mtu;
3059 
3060 	if (flags & XGMAC_PROMISC)
3061 		promisc = ifp->if_flags & IFF_PROMISC ? 1 : 0;
3062 
3063 	if (flags & XGMAC_ALLMULTI)
3064 		allmulti = ifp->if_flags & IFF_ALLMULTI ? 1 : 0;
3065 
3066 	if (flags & XGMAC_VLANEX)
3067 		vlanex = ifp->if_capenable & IFCAP_VLAN_HWTAGGING ? 1 : 0;
3068 
3069 	if (flags & (XGMAC_MTU|XGMAC_PROMISC|XGMAC_ALLMULTI|XGMAC_VLANEX)) {
3070 		rc = -t4_set_rxmode(sc, sc->mbox, viid, mtu, promisc, allmulti,
3071 		    1, vlanex, false);
3072 		if (rc) {
3073 			if_printf(ifp, "set_rxmode (%x) failed: %d\n", flags,
3074 			    rc);
3075 			return (rc);
3076 		}
3077 	}
3078 
3079 	if (flags & XGMAC_UCADDR) {
3080 		uint8_t ucaddr[ETHER_ADDR_LEN];
3081 
3082 		bcopy(IF_LLADDR(ifp), ucaddr, sizeof(ucaddr));
3083 		rc = t4_change_mac(sc, sc->mbox, viid, *xact_addr_filt, ucaddr,
3084 		    true, true);
3085 		if (rc < 0) {
3086 			rc = -rc;
3087 			if_printf(ifp, "change_mac failed: %d\n", rc);
3088 			return (rc);
3089 		} else {
3090 			*xact_addr_filt = rc;
3091 			rc = 0;
3092 		}
3093 	}
3094 
3095 	if (flags & XGMAC_MCADDRS) {
3096 		const uint8_t *mcaddr[FW_MAC_EXACT_CHUNK];
3097 		int del = 1;
3098 		uint64_t hash = 0;
3099 		struct ifmultiaddr *ifma;
3100 		int i = 0, j;
3101 
3102 		if_maddr_rlock(ifp);
3103 		TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
3104 			if (ifma->ifma_addr->sa_family != AF_LINK)
3105 				continue;
3106 			mcaddr[i] =
3107 			    LLADDR((struct sockaddr_dl *)ifma->ifma_addr);
3108 			MPASS(ETHER_IS_MULTICAST(mcaddr[i]));
3109 			i++;
3110 
3111 			if (i == FW_MAC_EXACT_CHUNK) {
3112 				rc = t4_alloc_mac_filt(sc, sc->mbox, viid, del,
3113 				    i, mcaddr, NULL, &hash, 0);
3114 				if (rc < 0) {
3115 					rc = -rc;
3116 					for (j = 0; j < i; j++) {
3117 						if_printf(ifp,
3118 						    "failed to add mc address"
3119 						    " %02x:%02x:%02x:"
3120 						    "%02x:%02x:%02x rc=%d\n",
3121 						    mcaddr[j][0], mcaddr[j][1],
3122 						    mcaddr[j][2], mcaddr[j][3],
3123 						    mcaddr[j][4], mcaddr[j][5],
3124 						    rc);
3125 					}
3126 					goto mcfail;
3127 				}
3128 				del = 0;
3129 				i = 0;
3130 			}
3131 		}
3132 		if (i > 0) {
3133 			rc = t4_alloc_mac_filt(sc, sc->mbox, viid, del, i,
3134 			    mcaddr, NULL, &hash, 0);
3135 			if (rc < 0) {
3136 				rc = -rc;
3137 				for (j = 0; j < i; j++) {
3138 					if_printf(ifp,
3139 					    "failed to add mc address"
3140 					    " %02x:%02x:%02x:"
3141 					    "%02x:%02x:%02x rc=%d\n",
3142 					    mcaddr[j][0], mcaddr[j][1],
3143 					    mcaddr[j][2], mcaddr[j][3],
3144 					    mcaddr[j][4], mcaddr[j][5],
3145 					    rc);
3146 				}
3147 				goto mcfail;
3148 			}
3149 		}
3150 
3151 		rc = -t4_set_addr_hash(sc, sc->mbox, viid, 0, hash, 0);
3152 		if (rc != 0)
3153 			if_printf(ifp, "failed to set mc address hash: %d", rc);
3154 mcfail:
3155 		if_maddr_runlock(ifp);
3156 	}
3157 
3158 	return (rc);
3159 }
3160 
3161 int
3162 begin_synchronized_op(struct adapter *sc, struct port_info *pi, int flags,
3163     char *wmesg)
3164 {
3165 	int rc, pri;
3166 
3167 #ifdef WITNESS
3168 	/* the caller thinks it's ok to sleep, but is it really? */
3169 	if (flags & SLEEP_OK)
3170 		pause("t4slptst", 1);
3171 #endif
3172 
3173 	if (INTR_OK)
3174 		pri = PCATCH;
3175 	else
3176 		pri = 0;
3177 
3178 	ADAPTER_LOCK(sc);
3179 	for (;;) {
3180 
3181 		if (pi && IS_DOOMED(pi)) {
3182 			rc = ENXIO;
3183 			goto done;
3184 		}
3185 
3186 		if (!IS_BUSY(sc)) {
3187 			rc = 0;
3188 			break;
3189 		}
3190 
3191 		if (!(flags & SLEEP_OK)) {
3192 			rc = EBUSY;
3193 			goto done;
3194 		}
3195 
3196 		if (mtx_sleep(&sc->flags, &sc->sc_lock, pri, wmesg, 0)) {
3197 			rc = EINTR;
3198 			goto done;
3199 		}
3200 	}
3201 
3202 	KASSERT(!IS_BUSY(sc), ("%s: controller busy.", __func__));
3203 	SET_BUSY(sc);
3204 #ifdef INVARIANTS
3205 	sc->last_op = wmesg;
3206 	sc->last_op_thr = curthread;
3207 #endif
3208 
3209 done:
3210 	if (!(flags & HOLD_LOCK) || rc)
3211 		ADAPTER_UNLOCK(sc);
3212 
3213 	return (rc);
3214 }
3215 
3216 void
3217 end_synchronized_op(struct adapter *sc, int flags)
3218 {
3219 
3220 	if (flags & LOCK_HELD)
3221 		ADAPTER_LOCK_ASSERT_OWNED(sc);
3222 	else
3223 		ADAPTER_LOCK(sc);
3224 
3225 	KASSERT(IS_BUSY(sc), ("%s: controller not busy.", __func__));
3226 	CLR_BUSY(sc);
3227 	wakeup(&sc->flags);
3228 	ADAPTER_UNLOCK(sc);
3229 }
3230 
3231 static int
3232 cxgbe_init_synchronized(struct port_info *pi)
3233 {
3234 	struct adapter *sc = pi->adapter;
3235 	struct ifnet *ifp = pi->ifp;
3236 	int rc = 0;
3237 
3238 	ASSERT_SYNCHRONIZED_OP(sc);
3239 
3240 	if (isset(&sc->open_device_map, pi->port_id)) {
3241 		KASSERT(ifp->if_drv_flags & IFF_DRV_RUNNING,
3242 		    ("mismatch between open_device_map and if_drv_flags"));
3243 		return (0);	/* already running */
3244 	}
3245 
3246 	if (!(sc->flags & FULL_INIT_DONE) &&
3247 	    ((rc = adapter_full_init(sc)) != 0))
3248 		return (rc);	/* error message displayed already */
3249 
3250 	if (!(pi->flags & PORT_INIT_DONE) &&
3251 	    ((rc = port_full_init(pi)) != 0))
3252 		return (rc); /* error message displayed already */
3253 
3254 	rc = update_mac_settings(ifp, XGMAC_ALL);
3255 	if (rc)
3256 		goto done;	/* error message displayed already */
3257 
3258 	rc = -t4_enable_vi(sc, sc->mbox, pi->viid, true, true);
3259 	if (rc != 0) {
3260 		if_printf(ifp, "enable_vi failed: %d\n", rc);
3261 		goto done;
3262 	}
3263 
3264 	/*
3265 	 * The first iq of the first port to come up is used for tracing.
3266 	 */
3267 	if (sc->traceq < 0) {
3268 		sc->traceq = sc->sge.rxq[pi->first_rxq].iq.abs_id;
3269 		t4_write_reg(sc, is_t4(sc) ?  A_MPS_TRC_RSS_CONTROL :
3270 		    A_MPS_T5_TRC_RSS_CONTROL, V_RSSCONTROL(pi->tx_chan) |
3271 		    V_QUEUENUMBER(sc->traceq));
3272 		pi->flags |= HAS_TRACEQ;
3273 	}
3274 
3275 	/* all ok */
3276 	setbit(&sc->open_device_map, pi->port_id);
3277 	PORT_LOCK(pi);
3278 	ifp->if_drv_flags |= IFF_DRV_RUNNING;
3279 	PORT_UNLOCK(pi);
3280 
3281 	callout_reset(&pi->tick, hz, cxgbe_tick, pi);
3282 done:
3283 	if (rc != 0)
3284 		cxgbe_uninit_synchronized(pi);
3285 
3286 	return (rc);
3287 }
3288 
3289 /*
3290  * Idempotent.
3291  */
3292 static int
3293 cxgbe_uninit_synchronized(struct port_info *pi)
3294 {
3295 	struct adapter *sc = pi->adapter;
3296 	struct ifnet *ifp = pi->ifp;
3297 	int rc;
3298 
3299 	ASSERT_SYNCHRONIZED_OP(sc);
3300 
3301 	/*
3302 	 * Disable the VI so that all its data in either direction is discarded
3303 	 * by the MPS.  Leave everything else (the queues, interrupts, and 1Hz
3304 	 * tick) intact as the TP can deliver negative advice or data that it's
3305 	 * holding in its RAM (for an offloaded connection) even after the VI is
3306 	 * disabled.
3307 	 */
3308 	rc = -t4_enable_vi(sc, sc->mbox, pi->viid, false, false);
3309 	if (rc) {
3310 		if_printf(ifp, "disable_vi failed: %d\n", rc);
3311 		return (rc);
3312 	}
3313 
3314 	clrbit(&sc->open_device_map, pi->port_id);
3315 	PORT_LOCK(pi);
3316 	ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
3317 	PORT_UNLOCK(pi);
3318 
3319 	pi->link_cfg.link_ok = 0;
3320 	pi->link_cfg.speed = 0;
3321 	pi->linkdnrc = -1;
3322 	t4_os_link_changed(sc, pi->port_id, 0, -1);
3323 
3324 	return (0);
3325 }
3326 
3327 /*
3328  * It is ok for this function to fail midway and return right away.  t4_detach
3329  * will walk the entire sc->irq list and clean up whatever is valid.
3330  */
3331 static int
3332 setup_intr_handlers(struct adapter *sc)
3333 {
3334 	int rc, rid, p, q;
3335 	char s[8];
3336 	struct irq *irq;
3337 	struct port_info *pi;
3338 	struct sge_rxq *rxq;
3339 #ifdef TCP_OFFLOAD
3340 	struct sge_ofld_rxq *ofld_rxq;
3341 #endif
3342 #ifdef DEV_NETMAP
3343 	struct sge_nm_rxq *nm_rxq;
3344 #endif
3345 
3346 	/*
3347 	 * Setup interrupts.
3348 	 */
3349 	irq = &sc->irq[0];
3350 	rid = sc->intr_type == INTR_INTX ? 0 : 1;
3351 	if (sc->intr_count == 1)
3352 		return (t4_alloc_irq(sc, irq, rid, t4_intr_all, sc, "all"));
3353 
3354 	/* Multiple interrupts. */
3355 	KASSERT(sc->intr_count >= T4_EXTRA_INTR + sc->params.nports,
3356 	    ("%s: too few intr.", __func__));
3357 
3358 	/* The first one is always error intr */
3359 	rc = t4_alloc_irq(sc, irq, rid, t4_intr_err, sc, "err");
3360 	if (rc != 0)
3361 		return (rc);
3362 	irq++;
3363 	rid++;
3364 
3365 	/* The second one is always the firmware event queue */
3366 	rc = t4_alloc_irq(sc, irq, rid, t4_intr_evt, &sc->sge.fwq, "evt");
3367 	if (rc != 0)
3368 		return (rc);
3369 	irq++;
3370 	rid++;
3371 
3372 	for_each_port(sc, p) {
3373 		pi = sc->port[p];
3374 
3375 		if (pi->flags & INTR_RXQ) {
3376 			for_each_rxq(pi, q, rxq) {
3377 				snprintf(s, sizeof(s), "%d.%d", p, q);
3378 				rc = t4_alloc_irq(sc, irq, rid, t4_intr, rxq,
3379 				    s);
3380 				if (rc != 0)
3381 					return (rc);
3382 				irq++;
3383 				rid++;
3384 			}
3385 		}
3386 #ifdef TCP_OFFLOAD
3387 		if (pi->flags & INTR_OFLD_RXQ) {
3388 			for_each_ofld_rxq(pi, q, ofld_rxq) {
3389 				snprintf(s, sizeof(s), "%d,%d", p, q);
3390 				rc = t4_alloc_irq(sc, irq, rid, t4_intr,
3391 				    ofld_rxq, s);
3392 				if (rc != 0)
3393 					return (rc);
3394 				irq++;
3395 				rid++;
3396 			}
3397 		}
3398 #endif
3399 #ifdef DEV_NETMAP
3400 		if (pi->flags & INTR_NM_RXQ) {
3401 			for_each_nm_rxq(pi, q, nm_rxq) {
3402 				snprintf(s, sizeof(s), "%d-%d", p, q);
3403 				rc = t4_alloc_irq(sc, irq, rid, t4_nm_intr,
3404 				    nm_rxq, s);
3405 				if (rc != 0)
3406 					return (rc);
3407 				irq++;
3408 				rid++;
3409 			}
3410 		}
3411 #endif
3412 	}
3413 	MPASS(irq == &sc->irq[sc->intr_count]);
3414 
3415 	return (0);
3416 }
3417 
3418 int
3419 adapter_full_init(struct adapter *sc)
3420 {
3421 	int rc, i;
3422 
3423 	ADAPTER_LOCK_ASSERT_NOTOWNED(sc);
3424 	KASSERT((sc->flags & FULL_INIT_DONE) == 0,
3425 	    ("%s: FULL_INIT_DONE already", __func__));
3426 
3427 	/*
3428 	 * queues that belong to the adapter (not any particular port).
3429 	 */
3430 	rc = t4_setup_adapter_queues(sc);
3431 	if (rc != 0)
3432 		goto done;
3433 
3434 	for (i = 0; i < nitems(sc->tq); i++) {
3435 		sc->tq[i] = taskqueue_create("t4 taskq", M_NOWAIT,
3436 		    taskqueue_thread_enqueue, &sc->tq[i]);
3437 		if (sc->tq[i] == NULL) {
3438 			device_printf(sc->dev,
3439 			    "failed to allocate task queue %d\n", i);
3440 			rc = ENOMEM;
3441 			goto done;
3442 		}
3443 		taskqueue_start_threads(&sc->tq[i], 1, PI_NET, "%s tq%d",
3444 		    device_get_nameunit(sc->dev), i);
3445 	}
3446 
3447 	t4_intr_enable(sc);
3448 	sc->flags |= FULL_INIT_DONE;
3449 done:
3450 	if (rc != 0)
3451 		adapter_full_uninit(sc);
3452 
3453 	return (rc);
3454 }
3455 
3456 int
3457 adapter_full_uninit(struct adapter *sc)
3458 {
3459 	int i;
3460 
3461 	ADAPTER_LOCK_ASSERT_NOTOWNED(sc);
3462 
3463 	t4_teardown_adapter_queues(sc);
3464 
3465 	for (i = 0; i < nitems(sc->tq) && sc->tq[i]; i++) {
3466 		taskqueue_free(sc->tq[i]);
3467 		sc->tq[i] = NULL;
3468 	}
3469 
3470 	sc->flags &= ~FULL_INIT_DONE;
3471 
3472 	return (0);
3473 }
3474 
3475 int
3476 port_full_init(struct port_info *pi)
3477 {
3478 	struct adapter *sc = pi->adapter;
3479 	struct ifnet *ifp = pi->ifp;
3480 	uint16_t *rss;
3481 	struct sge_rxq *rxq;
3482 	int rc, i, j;
3483 
3484 	ASSERT_SYNCHRONIZED_OP(sc);
3485 	KASSERT((pi->flags & PORT_INIT_DONE) == 0,
3486 	    ("%s: PORT_INIT_DONE already", __func__));
3487 
3488 	sysctl_ctx_init(&pi->ctx);
3489 	pi->flags |= PORT_SYSCTL_CTX;
3490 
3491 	/*
3492 	 * Allocate tx/rx/fl queues for this port.
3493 	 */
3494 	rc = t4_setup_port_queues(pi);
3495 	if (rc != 0)
3496 		goto done;	/* error message displayed already */
3497 
3498 	/*
3499 	 * Setup RSS for this port.  Save a copy of the RSS table for later use.
3500 	 */
3501 	rss = malloc(pi->rss_size * sizeof (*rss), M_CXGBE, M_ZERO | M_WAITOK);
3502 	for (i = 0; i < pi->rss_size;) {
3503 		for_each_rxq(pi, j, rxq) {
3504 			rss[i++] = rxq->iq.abs_id;
3505 			if (i == pi->rss_size)
3506 				break;
3507 		}
3508 	}
3509 
3510 	rc = -t4_config_rss_range(sc, sc->mbox, pi->viid, 0, pi->rss_size, rss,
3511 	    pi->rss_size);
3512 	if (rc != 0) {
3513 		if_printf(ifp, "rss_config failed: %d\n", rc);
3514 		goto done;
3515 	}
3516 
3517 	pi->rss = rss;
3518 	pi->flags |= PORT_INIT_DONE;
3519 done:
3520 	if (rc != 0)
3521 		port_full_uninit(pi);
3522 
3523 	return (rc);
3524 }
3525 
3526 /*
3527  * Idempotent.
3528  */
3529 int
3530 port_full_uninit(struct port_info *pi)
3531 {
3532 	struct adapter *sc = pi->adapter;
3533 	int i;
3534 	struct sge_rxq *rxq;
3535 	struct sge_txq *txq;
3536 #ifdef TCP_OFFLOAD
3537 	struct sge_ofld_rxq *ofld_rxq;
3538 	struct sge_wrq *ofld_txq;
3539 #endif
3540 
3541 	if (pi->flags & PORT_INIT_DONE) {
3542 
3543 		/* Need to quiesce queues.  XXX: ctrl queues? */
3544 
3545 		for_each_txq(pi, i, txq) {
3546 			quiesce_eq(sc, &txq->eq);
3547 		}
3548 
3549 #ifdef TCP_OFFLOAD
3550 		for_each_ofld_txq(pi, i, ofld_txq) {
3551 			quiesce_eq(sc, &ofld_txq->eq);
3552 		}
3553 #endif
3554 
3555 		for_each_rxq(pi, i, rxq) {
3556 			quiesce_iq(sc, &rxq->iq);
3557 			quiesce_fl(sc, &rxq->fl);
3558 		}
3559 
3560 #ifdef TCP_OFFLOAD
3561 		for_each_ofld_rxq(pi, i, ofld_rxq) {
3562 			quiesce_iq(sc, &ofld_rxq->iq);
3563 			quiesce_fl(sc, &ofld_rxq->fl);
3564 		}
3565 #endif
3566 		free(pi->rss, M_CXGBE);
3567 	}
3568 
3569 	t4_teardown_port_queues(pi);
3570 	pi->flags &= ~PORT_INIT_DONE;
3571 
3572 	return (0);
3573 }
3574 
3575 static void
3576 quiesce_eq(struct adapter *sc, struct sge_eq *eq)
3577 {
3578 	EQ_LOCK(eq);
3579 	eq->flags |= EQ_DOOMED;
3580 
3581 	/*
3582 	 * Wait for the response to a credit flush if one's
3583 	 * pending.
3584 	 */
3585 	while (eq->flags & EQ_CRFLUSHED)
3586 		mtx_sleep(eq, &eq->eq_lock, 0, "crflush", 0);
3587 	EQ_UNLOCK(eq);
3588 
3589 	callout_drain(&eq->tx_callout);	/* XXX: iffy */
3590 	pause("callout", 10);		/* Still iffy */
3591 
3592 	taskqueue_drain(sc->tq[eq->tx_chan], &eq->tx_task);
3593 }
3594 
3595 static void
3596 quiesce_iq(struct adapter *sc, struct sge_iq *iq)
3597 {
3598 	(void) sc;	/* unused */
3599 
3600 	/* Synchronize with the interrupt handler */
3601 	while (!atomic_cmpset_int(&iq->state, IQS_IDLE, IQS_DISABLED))
3602 		pause("iqfree", 1);
3603 }
3604 
3605 static void
3606 quiesce_fl(struct adapter *sc, struct sge_fl *fl)
3607 {
3608 	mtx_lock(&sc->sfl_lock);
3609 	FL_LOCK(fl);
3610 	fl->flags |= FL_DOOMED;
3611 	FL_UNLOCK(fl);
3612 	mtx_unlock(&sc->sfl_lock);
3613 
3614 	callout_drain(&sc->sfl_callout);
3615 	KASSERT((fl->flags & FL_STARVING) == 0,
3616 	    ("%s: still starving", __func__));
3617 }
3618 
3619 static int
3620 t4_alloc_irq(struct adapter *sc, struct irq *irq, int rid,
3621     driver_intr_t *handler, void *arg, char *name)
3622 {
3623 	int rc;
3624 
3625 	irq->rid = rid;
3626 	irq->res = bus_alloc_resource_any(sc->dev, SYS_RES_IRQ, &irq->rid,
3627 	    RF_SHAREABLE | RF_ACTIVE);
3628 	if (irq->res == NULL) {
3629 		device_printf(sc->dev,
3630 		    "failed to allocate IRQ for rid %d, name %s.\n", rid, name);
3631 		return (ENOMEM);
3632 	}
3633 
3634 	rc = bus_setup_intr(sc->dev, irq->res, INTR_MPSAFE | INTR_TYPE_NET,
3635 	    NULL, handler, arg, &irq->tag);
3636 	if (rc != 0) {
3637 		device_printf(sc->dev,
3638 		    "failed to setup interrupt for rid %d, name %s: %d\n",
3639 		    rid, name, rc);
3640 	} else if (name)
3641 		bus_describe_intr(sc->dev, irq->res, irq->tag, name);
3642 
3643 	return (rc);
3644 }
3645 
3646 static int
3647 t4_free_irq(struct adapter *sc, struct irq *irq)
3648 {
3649 	if (irq->tag)
3650 		bus_teardown_intr(sc->dev, irq->res, irq->tag);
3651 	if (irq->res)
3652 		bus_release_resource(sc->dev, SYS_RES_IRQ, irq->rid, irq->res);
3653 
3654 	bzero(irq, sizeof(*irq));
3655 
3656 	return (0);
3657 }
3658 
3659 static void
3660 reg_block_dump(struct adapter *sc, uint8_t *buf, unsigned int start,
3661     unsigned int end)
3662 {
3663 	uint32_t *p = (uint32_t *)(buf + start);
3664 
3665 	for ( ; start <= end; start += sizeof(uint32_t))
3666 		*p++ = t4_read_reg(sc, start);
3667 }
3668 
3669 static void
3670 t4_get_regs(struct adapter *sc, struct t4_regdump *regs, uint8_t *buf)
3671 {
3672 	int i, n;
3673 	const unsigned int *reg_ranges;
3674 	static const unsigned int t4_reg_ranges[] = {
3675 		0x1008, 0x1108,
3676 		0x1180, 0x11b4,
3677 		0x11fc, 0x123c,
3678 		0x1300, 0x173c,
3679 		0x1800, 0x18fc,
3680 		0x3000, 0x30d8,
3681 		0x30e0, 0x5924,
3682 		0x5960, 0x59d4,
3683 		0x5a00, 0x5af8,
3684 		0x6000, 0x6098,
3685 		0x6100, 0x6150,
3686 		0x6200, 0x6208,
3687 		0x6240, 0x6248,
3688 		0x6280, 0x6338,
3689 		0x6370, 0x638c,
3690 		0x6400, 0x643c,
3691 		0x6500, 0x6524,
3692 		0x6a00, 0x6a38,
3693 		0x6a60, 0x6a78,
3694 		0x6b00, 0x6b84,
3695 		0x6bf0, 0x6c84,
3696 		0x6cf0, 0x6d84,
3697 		0x6df0, 0x6e84,
3698 		0x6ef0, 0x6f84,
3699 		0x6ff0, 0x7084,
3700 		0x70f0, 0x7184,
3701 		0x71f0, 0x7284,
3702 		0x72f0, 0x7384,
3703 		0x73f0, 0x7450,
3704 		0x7500, 0x7530,
3705 		0x7600, 0x761c,
3706 		0x7680, 0x76cc,
3707 		0x7700, 0x7798,
3708 		0x77c0, 0x77fc,
3709 		0x7900, 0x79fc,
3710 		0x7b00, 0x7c38,
3711 		0x7d00, 0x7efc,
3712 		0x8dc0, 0x8e1c,
3713 		0x8e30, 0x8e78,
3714 		0x8ea0, 0x8f6c,
3715 		0x8fc0, 0x9074,
3716 		0x90fc, 0x90fc,
3717 		0x9400, 0x9458,
3718 		0x9600, 0x96bc,
3719 		0x9800, 0x9808,
3720 		0x9820, 0x983c,
3721 		0x9850, 0x9864,
3722 		0x9c00, 0x9c6c,
3723 		0x9c80, 0x9cec,
3724 		0x9d00, 0x9d6c,
3725 		0x9d80, 0x9dec,
3726 		0x9e00, 0x9e6c,
3727 		0x9e80, 0x9eec,
3728 		0x9f00, 0x9f6c,
3729 		0x9f80, 0x9fec,
3730 		0xd004, 0xd03c,
3731 		0xdfc0, 0xdfe0,
3732 		0xe000, 0xea7c,
3733 		0xf000, 0x11110,
3734 		0x11118, 0x11190,
3735 		0x19040, 0x1906c,
3736 		0x19078, 0x19080,
3737 		0x1908c, 0x19124,
3738 		0x19150, 0x191b0,
3739 		0x191d0, 0x191e8,
3740 		0x19238, 0x1924c,
3741 		0x193f8, 0x19474,
3742 		0x19490, 0x194f8,
3743 		0x19800, 0x19f30,
3744 		0x1a000, 0x1a06c,
3745 		0x1a0b0, 0x1a120,
3746 		0x1a128, 0x1a138,
3747 		0x1a190, 0x1a1c4,
3748 		0x1a1fc, 0x1a1fc,
3749 		0x1e040, 0x1e04c,
3750 		0x1e284, 0x1e28c,
3751 		0x1e2c0, 0x1e2c0,
3752 		0x1e2e0, 0x1e2e0,
3753 		0x1e300, 0x1e384,
3754 		0x1e3c0, 0x1e3c8,
3755 		0x1e440, 0x1e44c,
3756 		0x1e684, 0x1e68c,
3757 		0x1e6c0, 0x1e6c0,
3758 		0x1e6e0, 0x1e6e0,
3759 		0x1e700, 0x1e784,
3760 		0x1e7c0, 0x1e7c8,
3761 		0x1e840, 0x1e84c,
3762 		0x1ea84, 0x1ea8c,
3763 		0x1eac0, 0x1eac0,
3764 		0x1eae0, 0x1eae0,
3765 		0x1eb00, 0x1eb84,
3766 		0x1ebc0, 0x1ebc8,
3767 		0x1ec40, 0x1ec4c,
3768 		0x1ee84, 0x1ee8c,
3769 		0x1eec0, 0x1eec0,
3770 		0x1eee0, 0x1eee0,
3771 		0x1ef00, 0x1ef84,
3772 		0x1efc0, 0x1efc8,
3773 		0x1f040, 0x1f04c,
3774 		0x1f284, 0x1f28c,
3775 		0x1f2c0, 0x1f2c0,
3776 		0x1f2e0, 0x1f2e0,
3777 		0x1f300, 0x1f384,
3778 		0x1f3c0, 0x1f3c8,
3779 		0x1f440, 0x1f44c,
3780 		0x1f684, 0x1f68c,
3781 		0x1f6c0, 0x1f6c0,
3782 		0x1f6e0, 0x1f6e0,
3783 		0x1f700, 0x1f784,
3784 		0x1f7c0, 0x1f7c8,
3785 		0x1f840, 0x1f84c,
3786 		0x1fa84, 0x1fa8c,
3787 		0x1fac0, 0x1fac0,
3788 		0x1fae0, 0x1fae0,
3789 		0x1fb00, 0x1fb84,
3790 		0x1fbc0, 0x1fbc8,
3791 		0x1fc40, 0x1fc4c,
3792 		0x1fe84, 0x1fe8c,
3793 		0x1fec0, 0x1fec0,
3794 		0x1fee0, 0x1fee0,
3795 		0x1ff00, 0x1ff84,
3796 		0x1ffc0, 0x1ffc8,
3797 		0x20000, 0x2002c,
3798 		0x20100, 0x2013c,
3799 		0x20190, 0x201c8,
3800 		0x20200, 0x20318,
3801 		0x20400, 0x20528,
3802 		0x20540, 0x20614,
3803 		0x21000, 0x21040,
3804 		0x2104c, 0x21060,
3805 		0x210c0, 0x210ec,
3806 		0x21200, 0x21268,
3807 		0x21270, 0x21284,
3808 		0x212fc, 0x21388,
3809 		0x21400, 0x21404,
3810 		0x21500, 0x21518,
3811 		0x2152c, 0x2153c,
3812 		0x21550, 0x21554,
3813 		0x21600, 0x21600,
3814 		0x21608, 0x21628,
3815 		0x21630, 0x2163c,
3816 		0x21700, 0x2171c,
3817 		0x21780, 0x2178c,
3818 		0x21800, 0x21c38,
3819 		0x21c80, 0x21d7c,
3820 		0x21e00, 0x21e04,
3821 		0x22000, 0x2202c,
3822 		0x22100, 0x2213c,
3823 		0x22190, 0x221c8,
3824 		0x22200, 0x22318,
3825 		0x22400, 0x22528,
3826 		0x22540, 0x22614,
3827 		0x23000, 0x23040,
3828 		0x2304c, 0x23060,
3829 		0x230c0, 0x230ec,
3830 		0x23200, 0x23268,
3831 		0x23270, 0x23284,
3832 		0x232fc, 0x23388,
3833 		0x23400, 0x23404,
3834 		0x23500, 0x23518,
3835 		0x2352c, 0x2353c,
3836 		0x23550, 0x23554,
3837 		0x23600, 0x23600,
3838 		0x23608, 0x23628,
3839 		0x23630, 0x2363c,
3840 		0x23700, 0x2371c,
3841 		0x23780, 0x2378c,
3842 		0x23800, 0x23c38,
3843 		0x23c80, 0x23d7c,
3844 		0x23e00, 0x23e04,
3845 		0x24000, 0x2402c,
3846 		0x24100, 0x2413c,
3847 		0x24190, 0x241c8,
3848 		0x24200, 0x24318,
3849 		0x24400, 0x24528,
3850 		0x24540, 0x24614,
3851 		0x25000, 0x25040,
3852 		0x2504c, 0x25060,
3853 		0x250c0, 0x250ec,
3854 		0x25200, 0x25268,
3855 		0x25270, 0x25284,
3856 		0x252fc, 0x25388,
3857 		0x25400, 0x25404,
3858 		0x25500, 0x25518,
3859 		0x2552c, 0x2553c,
3860 		0x25550, 0x25554,
3861 		0x25600, 0x25600,
3862 		0x25608, 0x25628,
3863 		0x25630, 0x2563c,
3864 		0x25700, 0x2571c,
3865 		0x25780, 0x2578c,
3866 		0x25800, 0x25c38,
3867 		0x25c80, 0x25d7c,
3868 		0x25e00, 0x25e04,
3869 		0x26000, 0x2602c,
3870 		0x26100, 0x2613c,
3871 		0x26190, 0x261c8,
3872 		0x26200, 0x26318,
3873 		0x26400, 0x26528,
3874 		0x26540, 0x26614,
3875 		0x27000, 0x27040,
3876 		0x2704c, 0x27060,
3877 		0x270c0, 0x270ec,
3878 		0x27200, 0x27268,
3879 		0x27270, 0x27284,
3880 		0x272fc, 0x27388,
3881 		0x27400, 0x27404,
3882 		0x27500, 0x27518,
3883 		0x2752c, 0x2753c,
3884 		0x27550, 0x27554,
3885 		0x27600, 0x27600,
3886 		0x27608, 0x27628,
3887 		0x27630, 0x2763c,
3888 		0x27700, 0x2771c,
3889 		0x27780, 0x2778c,
3890 		0x27800, 0x27c38,
3891 		0x27c80, 0x27d7c,
3892 		0x27e00, 0x27e04
3893 	};
3894 	static const unsigned int t5_reg_ranges[] = {
3895 		0x1008, 0x1148,
3896 		0x1180, 0x11b4,
3897 		0x11fc, 0x123c,
3898 		0x1280, 0x173c,
3899 		0x1800, 0x18fc,
3900 		0x3000, 0x3028,
3901 		0x3060, 0x30d8,
3902 		0x30e0, 0x30fc,
3903 		0x3140, 0x357c,
3904 		0x35a8, 0x35cc,
3905 		0x35ec, 0x35ec,
3906 		0x3600, 0x5624,
3907 		0x56cc, 0x575c,
3908 		0x580c, 0x5814,
3909 		0x5890, 0x58bc,
3910 		0x5940, 0x59dc,
3911 		0x59fc, 0x5a18,
3912 		0x5a60, 0x5a9c,
3913 		0x5b94, 0x5bfc,
3914 		0x6000, 0x6040,
3915 		0x6058, 0x614c,
3916 		0x7700, 0x7798,
3917 		0x77c0, 0x78fc,
3918 		0x7b00, 0x7c54,
3919 		0x7d00, 0x7efc,
3920 		0x8dc0, 0x8de0,
3921 		0x8df8, 0x8e84,
3922 		0x8ea0, 0x8f84,
3923 		0x8fc0, 0x90f8,
3924 		0x9400, 0x9470,
3925 		0x9600, 0x96f4,
3926 		0x9800, 0x9808,
3927 		0x9820, 0x983c,
3928 		0x9850, 0x9864,
3929 		0x9c00, 0x9c6c,
3930 		0x9c80, 0x9cec,
3931 		0x9d00, 0x9d6c,
3932 		0x9d80, 0x9dec,
3933 		0x9e00, 0x9e6c,
3934 		0x9e80, 0x9eec,
3935 		0x9f00, 0x9f6c,
3936 		0x9f80, 0xa020,
3937 		0xd004, 0xd03c,
3938 		0xdfc0, 0xdfe0,
3939 		0xe000, 0x11088,
3940 		0x1109c, 0x11110,
3941 		0x11118, 0x1117c,
3942 		0x11190, 0x11204,
3943 		0x19040, 0x1906c,
3944 		0x19078, 0x19080,
3945 		0x1908c, 0x19124,
3946 		0x19150, 0x191b0,
3947 		0x191d0, 0x191e8,
3948 		0x19238, 0x19290,
3949 		0x193f8, 0x19474,
3950 		0x19490, 0x194cc,
3951 		0x194f0, 0x194f8,
3952 		0x19c00, 0x19c60,
3953 		0x19c94, 0x19e10,
3954 		0x19e50, 0x19f34,
3955 		0x19f40, 0x19f50,
3956 		0x19f90, 0x19fe4,
3957 		0x1a000, 0x1a06c,
3958 		0x1a0b0, 0x1a120,
3959 		0x1a128, 0x1a138,
3960 		0x1a190, 0x1a1c4,
3961 		0x1a1fc, 0x1a1fc,
3962 		0x1e008, 0x1e00c,
3963 		0x1e040, 0x1e04c,
3964 		0x1e284, 0x1e290,
3965 		0x1e2c0, 0x1e2c0,
3966 		0x1e2e0, 0x1e2e0,
3967 		0x1e300, 0x1e384,
3968 		0x1e3c0, 0x1e3c8,
3969 		0x1e408, 0x1e40c,
3970 		0x1e440, 0x1e44c,
3971 		0x1e684, 0x1e690,
3972 		0x1e6c0, 0x1e6c0,
3973 		0x1e6e0, 0x1e6e0,
3974 		0x1e700, 0x1e784,
3975 		0x1e7c0, 0x1e7c8,
3976 		0x1e808, 0x1e80c,
3977 		0x1e840, 0x1e84c,
3978 		0x1ea84, 0x1ea90,
3979 		0x1eac0, 0x1eac0,
3980 		0x1eae0, 0x1eae0,
3981 		0x1eb00, 0x1eb84,
3982 		0x1ebc0, 0x1ebc8,
3983 		0x1ec08, 0x1ec0c,
3984 		0x1ec40, 0x1ec4c,
3985 		0x1ee84, 0x1ee90,
3986 		0x1eec0, 0x1eec0,
3987 		0x1eee0, 0x1eee0,
3988 		0x1ef00, 0x1ef84,
3989 		0x1efc0, 0x1efc8,
3990 		0x1f008, 0x1f00c,
3991 		0x1f040, 0x1f04c,
3992 		0x1f284, 0x1f290,
3993 		0x1f2c0, 0x1f2c0,
3994 		0x1f2e0, 0x1f2e0,
3995 		0x1f300, 0x1f384,
3996 		0x1f3c0, 0x1f3c8,
3997 		0x1f408, 0x1f40c,
3998 		0x1f440, 0x1f44c,
3999 		0x1f684, 0x1f690,
4000 		0x1f6c0, 0x1f6c0,
4001 		0x1f6e0, 0x1f6e0,
4002 		0x1f700, 0x1f784,
4003 		0x1f7c0, 0x1f7c8,
4004 		0x1f808, 0x1f80c,
4005 		0x1f840, 0x1f84c,
4006 		0x1fa84, 0x1fa90,
4007 		0x1fac0, 0x1fac0,
4008 		0x1fae0, 0x1fae0,
4009 		0x1fb00, 0x1fb84,
4010 		0x1fbc0, 0x1fbc8,
4011 		0x1fc08, 0x1fc0c,
4012 		0x1fc40, 0x1fc4c,
4013 		0x1fe84, 0x1fe90,
4014 		0x1fec0, 0x1fec0,
4015 		0x1fee0, 0x1fee0,
4016 		0x1ff00, 0x1ff84,
4017 		0x1ffc0, 0x1ffc8,
4018 		0x30000, 0x30030,
4019 		0x30100, 0x30144,
4020 		0x30190, 0x301d0,
4021 		0x30200, 0x30318,
4022 		0x30400, 0x3052c,
4023 		0x30540, 0x3061c,
4024 		0x30800, 0x30834,
4025 		0x308c0, 0x30908,
4026 		0x30910, 0x309ac,
4027 		0x30a00, 0x30a2c,
4028 		0x30a44, 0x30a50,
4029 		0x30a74, 0x30c24,
4030 		0x30d00, 0x30d00,
4031 		0x30d08, 0x30d14,
4032 		0x30d1c, 0x30d20,
4033 		0x30d3c, 0x30d50,
4034 		0x31200, 0x3120c,
4035 		0x31220, 0x31220,
4036 		0x31240, 0x31240,
4037 		0x31600, 0x3160c,
4038 		0x31a00, 0x31a1c,
4039 		0x31e00, 0x31e20,
4040 		0x31e38, 0x31e3c,
4041 		0x31e80, 0x31e80,
4042 		0x31e88, 0x31ea8,
4043 		0x31eb0, 0x31eb4,
4044 		0x31ec8, 0x31ed4,
4045 		0x31fb8, 0x32004,
4046 		0x32200, 0x32200,
4047 		0x32208, 0x32240,
4048 		0x32248, 0x32280,
4049 		0x32288, 0x322c0,
4050 		0x322c8, 0x322fc,
4051 		0x32600, 0x32630,
4052 		0x32a00, 0x32abc,
4053 		0x32b00, 0x32b70,
4054 		0x33000, 0x33048,
4055 		0x33060, 0x3309c,
4056 		0x330f0, 0x33148,
4057 		0x33160, 0x3319c,
4058 		0x331f0, 0x332e4,
4059 		0x332f8, 0x333e4,
4060 		0x333f8, 0x33448,
4061 		0x33460, 0x3349c,
4062 		0x334f0, 0x33548,
4063 		0x33560, 0x3359c,
4064 		0x335f0, 0x336e4,
4065 		0x336f8, 0x337e4,
4066 		0x337f8, 0x337fc,
4067 		0x33814, 0x33814,
4068 		0x3382c, 0x3382c,
4069 		0x33880, 0x3388c,
4070 		0x338e8, 0x338ec,
4071 		0x33900, 0x33948,
4072 		0x33960, 0x3399c,
4073 		0x339f0, 0x33ae4,
4074 		0x33af8, 0x33b10,
4075 		0x33b28, 0x33b28,
4076 		0x33b3c, 0x33b50,
4077 		0x33bf0, 0x33c10,
4078 		0x33c28, 0x33c28,
4079 		0x33c3c, 0x33c50,
4080 		0x33cf0, 0x33cfc,
4081 		0x34000, 0x34030,
4082 		0x34100, 0x34144,
4083 		0x34190, 0x341d0,
4084 		0x34200, 0x34318,
4085 		0x34400, 0x3452c,
4086 		0x34540, 0x3461c,
4087 		0x34800, 0x34834,
4088 		0x348c0, 0x34908,
4089 		0x34910, 0x349ac,
4090 		0x34a00, 0x34a2c,
4091 		0x34a44, 0x34a50,
4092 		0x34a74, 0x34c24,
4093 		0x34d00, 0x34d00,
4094 		0x34d08, 0x34d14,
4095 		0x34d1c, 0x34d20,
4096 		0x34d3c, 0x34d50,
4097 		0x35200, 0x3520c,
4098 		0x35220, 0x35220,
4099 		0x35240, 0x35240,
4100 		0x35600, 0x3560c,
4101 		0x35a00, 0x35a1c,
4102 		0x35e00, 0x35e20,
4103 		0x35e38, 0x35e3c,
4104 		0x35e80, 0x35e80,
4105 		0x35e88, 0x35ea8,
4106 		0x35eb0, 0x35eb4,
4107 		0x35ec8, 0x35ed4,
4108 		0x35fb8, 0x36004,
4109 		0x36200, 0x36200,
4110 		0x36208, 0x36240,
4111 		0x36248, 0x36280,
4112 		0x36288, 0x362c0,
4113 		0x362c8, 0x362fc,
4114 		0x36600, 0x36630,
4115 		0x36a00, 0x36abc,
4116 		0x36b00, 0x36b70,
4117 		0x37000, 0x37048,
4118 		0x37060, 0x3709c,
4119 		0x370f0, 0x37148,
4120 		0x37160, 0x3719c,
4121 		0x371f0, 0x372e4,
4122 		0x372f8, 0x373e4,
4123 		0x373f8, 0x37448,
4124 		0x37460, 0x3749c,
4125 		0x374f0, 0x37548,
4126 		0x37560, 0x3759c,
4127 		0x375f0, 0x376e4,
4128 		0x376f8, 0x377e4,
4129 		0x377f8, 0x377fc,
4130 		0x37814, 0x37814,
4131 		0x3782c, 0x3782c,
4132 		0x37880, 0x3788c,
4133 		0x378e8, 0x378ec,
4134 		0x37900, 0x37948,
4135 		0x37960, 0x3799c,
4136 		0x379f0, 0x37ae4,
4137 		0x37af8, 0x37b10,
4138 		0x37b28, 0x37b28,
4139 		0x37b3c, 0x37b50,
4140 		0x37bf0, 0x37c10,
4141 		0x37c28, 0x37c28,
4142 		0x37c3c, 0x37c50,
4143 		0x37cf0, 0x37cfc,
4144 		0x38000, 0x38030,
4145 		0x38100, 0x38144,
4146 		0x38190, 0x381d0,
4147 		0x38200, 0x38318,
4148 		0x38400, 0x3852c,
4149 		0x38540, 0x3861c,
4150 		0x38800, 0x38834,
4151 		0x388c0, 0x38908,
4152 		0x38910, 0x389ac,
4153 		0x38a00, 0x38a2c,
4154 		0x38a44, 0x38a50,
4155 		0x38a74, 0x38c24,
4156 		0x38d00, 0x38d00,
4157 		0x38d08, 0x38d14,
4158 		0x38d1c, 0x38d20,
4159 		0x38d3c, 0x38d50,
4160 		0x39200, 0x3920c,
4161 		0x39220, 0x39220,
4162 		0x39240, 0x39240,
4163 		0x39600, 0x3960c,
4164 		0x39a00, 0x39a1c,
4165 		0x39e00, 0x39e20,
4166 		0x39e38, 0x39e3c,
4167 		0x39e80, 0x39e80,
4168 		0x39e88, 0x39ea8,
4169 		0x39eb0, 0x39eb4,
4170 		0x39ec8, 0x39ed4,
4171 		0x39fb8, 0x3a004,
4172 		0x3a200, 0x3a200,
4173 		0x3a208, 0x3a240,
4174 		0x3a248, 0x3a280,
4175 		0x3a288, 0x3a2c0,
4176 		0x3a2c8, 0x3a2fc,
4177 		0x3a600, 0x3a630,
4178 		0x3aa00, 0x3aabc,
4179 		0x3ab00, 0x3ab70,
4180 		0x3b000, 0x3b048,
4181 		0x3b060, 0x3b09c,
4182 		0x3b0f0, 0x3b148,
4183 		0x3b160, 0x3b19c,
4184 		0x3b1f0, 0x3b2e4,
4185 		0x3b2f8, 0x3b3e4,
4186 		0x3b3f8, 0x3b448,
4187 		0x3b460, 0x3b49c,
4188 		0x3b4f0, 0x3b548,
4189 		0x3b560, 0x3b59c,
4190 		0x3b5f0, 0x3b6e4,
4191 		0x3b6f8, 0x3b7e4,
4192 		0x3b7f8, 0x3b7fc,
4193 		0x3b814, 0x3b814,
4194 		0x3b82c, 0x3b82c,
4195 		0x3b880, 0x3b88c,
4196 		0x3b8e8, 0x3b8ec,
4197 		0x3b900, 0x3b948,
4198 		0x3b960, 0x3b99c,
4199 		0x3b9f0, 0x3bae4,
4200 		0x3baf8, 0x3bb10,
4201 		0x3bb28, 0x3bb28,
4202 		0x3bb3c, 0x3bb50,
4203 		0x3bbf0, 0x3bc10,
4204 		0x3bc28, 0x3bc28,
4205 		0x3bc3c, 0x3bc50,
4206 		0x3bcf0, 0x3bcfc,
4207 		0x3c000, 0x3c030,
4208 		0x3c100, 0x3c144,
4209 		0x3c190, 0x3c1d0,
4210 		0x3c200, 0x3c318,
4211 		0x3c400, 0x3c52c,
4212 		0x3c540, 0x3c61c,
4213 		0x3c800, 0x3c834,
4214 		0x3c8c0, 0x3c908,
4215 		0x3c910, 0x3c9ac,
4216 		0x3ca00, 0x3ca2c,
4217 		0x3ca44, 0x3ca50,
4218 		0x3ca74, 0x3cc24,
4219 		0x3cd00, 0x3cd00,
4220 		0x3cd08, 0x3cd14,
4221 		0x3cd1c, 0x3cd20,
4222 		0x3cd3c, 0x3cd50,
4223 		0x3d200, 0x3d20c,
4224 		0x3d220, 0x3d220,
4225 		0x3d240, 0x3d240,
4226 		0x3d600, 0x3d60c,
4227 		0x3da00, 0x3da1c,
4228 		0x3de00, 0x3de20,
4229 		0x3de38, 0x3de3c,
4230 		0x3de80, 0x3de80,
4231 		0x3de88, 0x3dea8,
4232 		0x3deb0, 0x3deb4,
4233 		0x3dec8, 0x3ded4,
4234 		0x3dfb8, 0x3e004,
4235 		0x3e200, 0x3e200,
4236 		0x3e208, 0x3e240,
4237 		0x3e248, 0x3e280,
4238 		0x3e288, 0x3e2c0,
4239 		0x3e2c8, 0x3e2fc,
4240 		0x3e600, 0x3e630,
4241 		0x3ea00, 0x3eabc,
4242 		0x3eb00, 0x3eb70,
4243 		0x3f000, 0x3f048,
4244 		0x3f060, 0x3f09c,
4245 		0x3f0f0, 0x3f148,
4246 		0x3f160, 0x3f19c,
4247 		0x3f1f0, 0x3f2e4,
4248 		0x3f2f8, 0x3f3e4,
4249 		0x3f3f8, 0x3f448,
4250 		0x3f460, 0x3f49c,
4251 		0x3f4f0, 0x3f548,
4252 		0x3f560, 0x3f59c,
4253 		0x3f5f0, 0x3f6e4,
4254 		0x3f6f8, 0x3f7e4,
4255 		0x3f7f8, 0x3f7fc,
4256 		0x3f814, 0x3f814,
4257 		0x3f82c, 0x3f82c,
4258 		0x3f880, 0x3f88c,
4259 		0x3f8e8, 0x3f8ec,
4260 		0x3f900, 0x3f948,
4261 		0x3f960, 0x3f99c,
4262 		0x3f9f0, 0x3fae4,
4263 		0x3faf8, 0x3fb10,
4264 		0x3fb28, 0x3fb28,
4265 		0x3fb3c, 0x3fb50,
4266 		0x3fbf0, 0x3fc10,
4267 		0x3fc28, 0x3fc28,
4268 		0x3fc3c, 0x3fc50,
4269 		0x3fcf0, 0x3fcfc,
4270 		0x40000, 0x4000c,
4271 		0x40040, 0x40068,
4272 		0x4007c, 0x40144,
4273 		0x40180, 0x4018c,
4274 		0x40200, 0x40298,
4275 		0x402ac, 0x4033c,
4276 		0x403f8, 0x403fc,
4277 		0x41304, 0x413c4,
4278 		0x41400, 0x4141c,
4279 		0x41480, 0x414d0,
4280 		0x44000, 0x44078,
4281 		0x440c0, 0x44278,
4282 		0x442c0, 0x44478,
4283 		0x444c0, 0x44678,
4284 		0x446c0, 0x44878,
4285 		0x448c0, 0x449fc,
4286 		0x45000, 0x45068,
4287 		0x45080, 0x45084,
4288 		0x450a0, 0x450b0,
4289 		0x45200, 0x45268,
4290 		0x45280, 0x45284,
4291 		0x452a0, 0x452b0,
4292 		0x460c0, 0x460e4,
4293 		0x47000, 0x4708c,
4294 		0x47200, 0x47250,
4295 		0x47400, 0x47420,
4296 		0x47600, 0x47618,
4297 		0x47800, 0x47814,
4298 		0x48000, 0x4800c,
4299 		0x48040, 0x48068,
4300 		0x4807c, 0x48144,
4301 		0x48180, 0x4818c,
4302 		0x48200, 0x48298,
4303 		0x482ac, 0x4833c,
4304 		0x483f8, 0x483fc,
4305 		0x49304, 0x493c4,
4306 		0x49400, 0x4941c,
4307 		0x49480, 0x494d0,
4308 		0x4c000, 0x4c078,
4309 		0x4c0c0, 0x4c278,
4310 		0x4c2c0, 0x4c478,
4311 		0x4c4c0, 0x4c678,
4312 		0x4c6c0, 0x4c878,
4313 		0x4c8c0, 0x4c9fc,
4314 		0x4d000, 0x4d068,
4315 		0x4d080, 0x4d084,
4316 		0x4d0a0, 0x4d0b0,
4317 		0x4d200, 0x4d268,
4318 		0x4d280, 0x4d284,
4319 		0x4d2a0, 0x4d2b0,
4320 		0x4e0c0, 0x4e0e4,
4321 		0x4f000, 0x4f08c,
4322 		0x4f200, 0x4f250,
4323 		0x4f400, 0x4f420,
4324 		0x4f600, 0x4f618,
4325 		0x4f800, 0x4f814,
4326 		0x50000, 0x500cc,
4327 		0x50400, 0x50400,
4328 		0x50800, 0x508cc,
4329 		0x50c00, 0x50c00,
4330 		0x51000, 0x5101c,
4331 		0x51300, 0x51308,
4332 	};
4333 
4334 	if (is_t4(sc)) {
4335 		reg_ranges = &t4_reg_ranges[0];
4336 		n = nitems(t4_reg_ranges);
4337 	} else {
4338 		reg_ranges = &t5_reg_ranges[0];
4339 		n = nitems(t5_reg_ranges);
4340 	}
4341 
4342 	regs->version = chip_id(sc) | chip_rev(sc) << 10;
4343 	for (i = 0; i < n; i += 2)
4344 		reg_block_dump(sc, buf, reg_ranges[i], reg_ranges[i + 1]);
4345 }
4346 
4347 static void
4348 cxgbe_refresh_stats(struct adapter *sc, struct port_info *pi)
4349 {
4350 	int i;
4351 	u_int v, tnl_cong_drops;
4352 	struct timeval tv;
4353 	const struct timeval interval = {0, 250000};	/* 250ms */
4354 
4355 	getmicrotime(&tv);
4356 	timevalsub(&tv, &interval);
4357 	if (timevalcmp(&tv, &pi->last_refreshed, <))
4358 		return;
4359 
4360 	tnl_cong_drops = 0;
4361 	t4_get_port_stats(sc, pi->tx_chan, &pi->stats);
4362 	for (i = 0; i < NCHAN; i++) {
4363 		if (pi->rx_chan_map & (1 << i)) {
4364 			mtx_lock(&sc->regwin_lock);
4365 			t4_read_indirect(sc, A_TP_MIB_INDEX, A_TP_MIB_DATA, &v,
4366 			    1, A_TP_MIB_TNL_CNG_DROP_0 + i);
4367 			mtx_unlock(&sc->regwin_lock);
4368 			tnl_cong_drops += v;
4369 		}
4370 	}
4371 	pi->tnl_cong_drops = tnl_cong_drops;
4372 	getmicrotime(&pi->last_refreshed);
4373 }
4374 
4375 static void
4376 cxgbe_tick(void *arg)
4377 {
4378 	struct port_info *pi = arg;
4379 	struct adapter *sc = pi->adapter;
4380 	struct ifnet *ifp = pi->ifp;
4381 
4382 	PORT_LOCK(pi);
4383 	if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) {
4384 		PORT_UNLOCK(pi);
4385 		return;	/* without scheduling another callout */
4386 	}
4387 
4388 	cxgbe_refresh_stats(sc, pi);
4389 
4390 	callout_schedule(&pi->tick, hz);
4391 	PORT_UNLOCK(pi);
4392 }
4393 
4394 static void
4395 cxgbe_vlan_config(void *arg, struct ifnet *ifp, uint16_t vid)
4396 {
4397 	struct ifnet *vlan;
4398 
4399 	if (arg != ifp || ifp->if_type != IFT_ETHER)
4400 		return;
4401 
4402 	vlan = VLAN_DEVAT(ifp, vid);
4403 	VLAN_SETCOOKIE(vlan, ifp);
4404 }
4405 
4406 static int
4407 cpl_not_handled(struct sge_iq *iq, const struct rss_header *rss, struct mbuf *m)
4408 {
4409 
4410 #ifdef INVARIANTS
4411 	panic("%s: opcode 0x%02x on iq %p with payload %p",
4412 	    __func__, rss->opcode, iq, m);
4413 #else
4414 	log(LOG_ERR, "%s: opcode 0x%02x on iq %p with payload %p\n",
4415 	    __func__, rss->opcode, iq, m);
4416 	m_freem(m);
4417 #endif
4418 	return (EDOOFUS);
4419 }
4420 
4421 int
4422 t4_register_cpl_handler(struct adapter *sc, int opcode, cpl_handler_t h)
4423 {
4424 	uintptr_t *loc, new;
4425 
4426 	if (opcode >= nitems(sc->cpl_handler))
4427 		return (EINVAL);
4428 
4429 	new = h ? (uintptr_t)h : (uintptr_t)cpl_not_handled;
4430 	loc = (uintptr_t *) &sc->cpl_handler[opcode];
4431 	atomic_store_rel_ptr(loc, new);
4432 
4433 	return (0);
4434 }
4435 
4436 static int
4437 an_not_handled(struct sge_iq *iq, const struct rsp_ctrl *ctrl)
4438 {
4439 
4440 #ifdef INVARIANTS
4441 	panic("%s: async notification on iq %p (ctrl %p)", __func__, iq, ctrl);
4442 #else
4443 	log(LOG_ERR, "%s: async notification on iq %p (ctrl %p)\n",
4444 	    __func__, iq, ctrl);
4445 #endif
4446 	return (EDOOFUS);
4447 }
4448 
4449 int
4450 t4_register_an_handler(struct adapter *sc, an_handler_t h)
4451 {
4452 	uintptr_t *loc, new;
4453 
4454 	new = h ? (uintptr_t)h : (uintptr_t)an_not_handled;
4455 	loc = (uintptr_t *) &sc->an_handler;
4456 	atomic_store_rel_ptr(loc, new);
4457 
4458 	return (0);
4459 }
4460 
4461 static int
4462 fw_msg_not_handled(struct adapter *sc, const __be64 *rpl)
4463 {
4464 	const struct cpl_fw6_msg *cpl =
4465 	    __containerof(rpl, struct cpl_fw6_msg, data[0]);
4466 
4467 #ifdef INVARIANTS
4468 	panic("%s: fw_msg type %d", __func__, cpl->type);
4469 #else
4470 	log(LOG_ERR, "%s: fw_msg type %d\n", __func__, cpl->type);
4471 #endif
4472 	return (EDOOFUS);
4473 }
4474 
4475 int
4476 t4_register_fw_msg_handler(struct adapter *sc, int type, fw_msg_handler_t h)
4477 {
4478 	uintptr_t *loc, new;
4479 
4480 	if (type >= nitems(sc->fw_msg_handler))
4481 		return (EINVAL);
4482 
4483 	/*
4484 	 * These are dispatched by the handler for FW{4|6}_CPL_MSG using the CPL
4485 	 * handler dispatch table.  Reject any attempt to install a handler for
4486 	 * this subtype.
4487 	 */
4488 	if (type == FW_TYPE_RSSCPL || type == FW6_TYPE_RSSCPL)
4489 		return (EINVAL);
4490 
4491 	new = h ? (uintptr_t)h : (uintptr_t)fw_msg_not_handled;
4492 	loc = (uintptr_t *) &sc->fw_msg_handler[type];
4493 	atomic_store_rel_ptr(loc, new);
4494 
4495 	return (0);
4496 }
4497 
4498 static int
4499 t4_sysctls(struct adapter *sc)
4500 {
4501 	struct sysctl_ctx_list *ctx;
4502 	struct sysctl_oid *oid;
4503 	struct sysctl_oid_list *children, *c0;
4504 	static char *caps[] = {
4505 		"\20\1PPP\2QFC\3DCBX",			/* caps[0] linkcaps */
4506 		"\20\1NIC\2VM\3IDS\4UM\5UM_ISGL"	/* caps[1] niccaps */
4507 		    "\6HASHFILTER\7ETHOFLD",
4508 		"\20\1TOE",				/* caps[2] toecaps */
4509 		"\20\1RDDP\2RDMAC",			/* caps[3] rdmacaps */
4510 		"\20\1INITIATOR_PDU\2TARGET_PDU"	/* caps[4] iscsicaps */
4511 		    "\3INITIATOR_CNXOFLD\4TARGET_CNXOFLD"
4512 		    "\5INITIATOR_SSNOFLD\6TARGET_SSNOFLD",
4513 		"\20\1INITIATOR\2TARGET\3CTRL_OFLD"	/* caps[5] fcoecaps */
4514 		    "\4PO_INITIAOR\5PO_TARGET"
4515 	};
4516 	static char *doorbells = {"\20\1UDB\2WCWR\3UDBWC\4KDB"};
4517 
4518 	ctx = device_get_sysctl_ctx(sc->dev);
4519 
4520 	/*
4521 	 * dev.t4nex.X.
4522 	 */
4523 	oid = device_get_sysctl_tree(sc->dev);
4524 	c0 = children = SYSCTL_CHILDREN(oid);
4525 
4526 	sc->sc_do_rxcopy = 1;
4527 	SYSCTL_ADD_INT(ctx, children, OID_AUTO, "do_rx_copy", CTLFLAG_RW,
4528 	    &sc->sc_do_rxcopy, 1, "Do RX copy of small frames");
4529 
4530 	SYSCTL_ADD_INT(ctx, children, OID_AUTO, "nports", CTLFLAG_RD, NULL,
4531 	    sc->params.nports, "# of ports");
4532 
4533 	SYSCTL_ADD_INT(ctx, children, OID_AUTO, "hw_revision", CTLFLAG_RD,
4534 	    NULL, chip_rev(sc), "chip hardware revision");
4535 
4536 	SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "firmware_version",
4537 	    CTLFLAG_RD, &sc->fw_version, 0, "firmware version");
4538 
4539 	SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "cf",
4540 	    CTLFLAG_RD, &sc->cfg_file, 0, "configuration file");
4541 
4542 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "cfcsum", CTLFLAG_RD, NULL,
4543 	    sc->cfcsum, "config file checksum");
4544 
4545 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "doorbells",
4546 	    CTLTYPE_STRING | CTLFLAG_RD, doorbells, sc->doorbells,
4547 	    sysctl_bitfield, "A", "available doorbells");
4548 
4549 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "linkcaps",
4550 	    CTLTYPE_STRING | CTLFLAG_RD, caps[0], sc->linkcaps,
4551 	    sysctl_bitfield, "A", "available link capabilities");
4552 
4553 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "niccaps",
4554 	    CTLTYPE_STRING | CTLFLAG_RD, caps[1], sc->niccaps,
4555 	    sysctl_bitfield, "A", "available NIC capabilities");
4556 
4557 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "toecaps",
4558 	    CTLTYPE_STRING | CTLFLAG_RD, caps[2], sc->toecaps,
4559 	    sysctl_bitfield, "A", "available TCP offload capabilities");
4560 
4561 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "rdmacaps",
4562 	    CTLTYPE_STRING | CTLFLAG_RD, caps[3], sc->rdmacaps,
4563 	    sysctl_bitfield, "A", "available RDMA capabilities");
4564 
4565 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "iscsicaps",
4566 	    CTLTYPE_STRING | CTLFLAG_RD, caps[4], sc->iscsicaps,
4567 	    sysctl_bitfield, "A", "available iSCSI capabilities");
4568 
4569 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "fcoecaps",
4570 	    CTLTYPE_STRING | CTLFLAG_RD, caps[5], sc->fcoecaps,
4571 	    sysctl_bitfield, "A", "available FCoE capabilities");
4572 
4573 	SYSCTL_ADD_INT(ctx, children, OID_AUTO, "core_clock", CTLFLAG_RD, NULL,
4574 	    sc->params.vpd.cclk, "core clock frequency (in KHz)");
4575 
4576 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "holdoff_timers",
4577 	    CTLTYPE_STRING | CTLFLAG_RD, sc->sge.timer_val,
4578 	    sizeof(sc->sge.timer_val), sysctl_int_array, "A",
4579 	    "interrupt holdoff timer values (us)");
4580 
4581 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "holdoff_pkt_counts",
4582 	    CTLTYPE_STRING | CTLFLAG_RD, sc->sge.counter_val,
4583 	    sizeof(sc->sge.counter_val), sysctl_int_array, "A",
4584 	    "interrupt holdoff packet counter values");
4585 
4586 	SYSCTL_ADD_INT(ctx, children, OID_AUTO, "nfilters", CTLFLAG_RD,
4587 	    NULL, sc->tids.nftids, "number of filters");
4588 
4589 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "temperature", CTLTYPE_INT |
4590 	    CTLFLAG_RD, sc, 0, sysctl_temperature, "I",
4591 	    "chip temperature (in Celsius)");
4592 
4593 	t4_sge_sysctls(sc, ctx, children);
4594 
4595 	sc->lro_timeout = 100;
4596 	SYSCTL_ADD_INT(ctx, children, OID_AUTO, "lro_timeout", CTLFLAG_RW,
4597 	    &sc->lro_timeout, 0, "lro inactive-flush timeout (in us)");
4598 
4599 #ifdef SBUF_DRAIN
4600 	/*
4601 	 * dev.t4nex.X.misc.  Marked CTLFLAG_SKIP to avoid information overload.
4602 	 */
4603 	oid = SYSCTL_ADD_NODE(ctx, c0, OID_AUTO, "misc",
4604 	    CTLFLAG_RD | CTLFLAG_SKIP, NULL,
4605 	    "logs and miscellaneous information");
4606 	children = SYSCTL_CHILDREN(oid);
4607 
4608 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cctrl",
4609 	    CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
4610 	    sysctl_cctrl, "A", "congestion control");
4611 
4612 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_ibq_tp0",
4613 	    CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
4614 	    sysctl_cim_ibq_obq, "A", "CIM IBQ 0 (TP0)");
4615 
4616 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_ibq_tp1",
4617 	    CTLTYPE_STRING | CTLFLAG_RD, sc, 1,
4618 	    sysctl_cim_ibq_obq, "A", "CIM IBQ 1 (TP1)");
4619 
4620 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_ibq_ulp",
4621 	    CTLTYPE_STRING | CTLFLAG_RD, sc, 2,
4622 	    sysctl_cim_ibq_obq, "A", "CIM IBQ 2 (ULP)");
4623 
4624 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_ibq_sge0",
4625 	    CTLTYPE_STRING | CTLFLAG_RD, sc, 3,
4626 	    sysctl_cim_ibq_obq, "A", "CIM IBQ 3 (SGE0)");
4627 
4628 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_ibq_sge1",
4629 	    CTLTYPE_STRING | CTLFLAG_RD, sc, 4,
4630 	    sysctl_cim_ibq_obq, "A", "CIM IBQ 4 (SGE1)");
4631 
4632 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_ibq_ncsi",
4633 	    CTLTYPE_STRING | CTLFLAG_RD, sc, 5,
4634 	    sysctl_cim_ibq_obq, "A", "CIM IBQ 5 (NCSI)");
4635 
4636 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_la",
4637 	    CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
4638 	    sysctl_cim_la, "A", "CIM logic analyzer");
4639 
4640 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_ma_la",
4641 	    CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
4642 	    sysctl_cim_ma_la, "A", "CIM MA logic analyzer");
4643 
4644 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_obq_ulp0",
4645 	    CTLTYPE_STRING | CTLFLAG_RD, sc, 0 + CIM_NUM_IBQ,
4646 	    sysctl_cim_ibq_obq, "A", "CIM OBQ 0 (ULP0)");
4647 
4648 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_obq_ulp1",
4649 	    CTLTYPE_STRING | CTLFLAG_RD, sc, 1 + CIM_NUM_IBQ,
4650 	    sysctl_cim_ibq_obq, "A", "CIM OBQ 1 (ULP1)");
4651 
4652 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_obq_ulp2",
4653 	    CTLTYPE_STRING | CTLFLAG_RD, sc, 2 + CIM_NUM_IBQ,
4654 	    sysctl_cim_ibq_obq, "A", "CIM OBQ 2 (ULP2)");
4655 
4656 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_obq_ulp3",
4657 	    CTLTYPE_STRING | CTLFLAG_RD, sc, 3 + CIM_NUM_IBQ,
4658 	    sysctl_cim_ibq_obq, "A", "CIM OBQ 3 (ULP3)");
4659 
4660 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_obq_sge",
4661 	    CTLTYPE_STRING | CTLFLAG_RD, sc, 4 + CIM_NUM_IBQ,
4662 	    sysctl_cim_ibq_obq, "A", "CIM OBQ 4 (SGE)");
4663 
4664 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_obq_ncsi",
4665 	    CTLTYPE_STRING | CTLFLAG_RD, sc, 5 + CIM_NUM_IBQ,
4666 	    sysctl_cim_ibq_obq, "A", "CIM OBQ 5 (NCSI)");
4667 
4668 	if (is_t5(sc)) {
4669 		SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_obq_sge0_rx",
4670 		    CTLTYPE_STRING | CTLFLAG_RD, sc, 6 + CIM_NUM_IBQ,
4671 		    sysctl_cim_ibq_obq, "A", "CIM OBQ 6 (SGE0-RX)");
4672 
4673 		SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_obq_sge1_rx",
4674 		    CTLTYPE_STRING | CTLFLAG_RD, sc, 7 + CIM_NUM_IBQ,
4675 		    sysctl_cim_ibq_obq, "A", "CIM OBQ 7 (SGE1-RX)");
4676 	}
4677 
4678 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_pif_la",
4679 	    CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
4680 	    sysctl_cim_pif_la, "A", "CIM PIF logic analyzer");
4681 
4682 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_qcfg",
4683 	    CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
4684 	    sysctl_cim_qcfg, "A", "CIM queue configuration");
4685 
4686 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cpl_stats",
4687 	    CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
4688 	    sysctl_cpl_stats, "A", "CPL statistics");
4689 
4690 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "ddp_stats",
4691 	    CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
4692 	    sysctl_ddp_stats, "A", "DDP statistics");
4693 
4694 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "devlog",
4695 	    CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
4696 	    sysctl_devlog, "A", "firmware's device log");
4697 
4698 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "fcoe_stats",
4699 	    CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
4700 	    sysctl_fcoe_stats, "A", "FCoE statistics");
4701 
4702 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "hw_sched",
4703 	    CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
4704 	    sysctl_hw_sched, "A", "hardware scheduler ");
4705 
4706 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "l2t",
4707 	    CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
4708 	    sysctl_l2t, "A", "hardware L2 table");
4709 
4710 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "lb_stats",
4711 	    CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
4712 	    sysctl_lb_stats, "A", "loopback statistics");
4713 
4714 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "meminfo",
4715 	    CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
4716 	    sysctl_meminfo, "A", "memory regions");
4717 
4718 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "mps_tcam",
4719 	    CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
4720 	    sysctl_mps_tcam, "A", "MPS TCAM entries");
4721 
4722 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "path_mtus",
4723 	    CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
4724 	    sysctl_path_mtus, "A", "path MTUs");
4725 
4726 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "pm_stats",
4727 	    CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
4728 	    sysctl_pm_stats, "A", "PM statistics");
4729 
4730 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "rdma_stats",
4731 	    CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
4732 	    sysctl_rdma_stats, "A", "RDMA statistics");
4733 
4734 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "tcp_stats",
4735 	    CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
4736 	    sysctl_tcp_stats, "A", "TCP statistics");
4737 
4738 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "tids",
4739 	    CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
4740 	    sysctl_tids, "A", "TID information");
4741 
4742 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "tp_err_stats",
4743 	    CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
4744 	    sysctl_tp_err_stats, "A", "TP error statistics");
4745 
4746 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "tp_la",
4747 	    CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
4748 	    sysctl_tp_la, "A", "TP logic analyzer");
4749 
4750 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "tx_rate",
4751 	    CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
4752 	    sysctl_tx_rate, "A", "Tx rate");
4753 
4754 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "ulprx_la",
4755 	    CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
4756 	    sysctl_ulprx_la, "A", "ULPRX logic analyzer");
4757 
4758 	if (is_t5(sc)) {
4759 		SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "wcwr_stats",
4760 		    CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
4761 		    sysctl_wcwr_stats, "A", "write combined work requests");
4762 	}
4763 #endif
4764 
4765 #ifdef TCP_OFFLOAD
4766 	if (is_offload(sc)) {
4767 		/*
4768 		 * dev.t4nex.X.toe.
4769 		 */
4770 		oid = SYSCTL_ADD_NODE(ctx, c0, OID_AUTO, "toe", CTLFLAG_RD,
4771 		    NULL, "TOE parameters");
4772 		children = SYSCTL_CHILDREN(oid);
4773 
4774 		sc->tt.sndbuf = 256 * 1024;
4775 		SYSCTL_ADD_INT(ctx, children, OID_AUTO, "sndbuf", CTLFLAG_RW,
4776 		    &sc->tt.sndbuf, 0, "max hardware send buffer size");
4777 
4778 		sc->tt.ddp = 0;
4779 		SYSCTL_ADD_INT(ctx, children, OID_AUTO, "ddp", CTLFLAG_RW,
4780 		    &sc->tt.ddp, 0, "DDP allowed");
4781 
4782 		sc->tt.indsz = G_INDICATESIZE(t4_read_reg(sc, A_TP_PARA_REG5));
4783 		SYSCTL_ADD_INT(ctx, children, OID_AUTO, "indsz", CTLFLAG_RW,
4784 		    &sc->tt.indsz, 0, "DDP max indicate size allowed");
4785 
4786 		sc->tt.ddp_thres =
4787 		    G_RXCOALESCESIZE(t4_read_reg(sc, A_TP_PARA_REG2));
4788 		SYSCTL_ADD_INT(ctx, children, OID_AUTO, "ddp_thres", CTLFLAG_RW,
4789 		    &sc->tt.ddp_thres, 0, "DDP threshold");
4790 
4791 		sc->tt.rx_coalesce = 1;
4792 		SYSCTL_ADD_INT(ctx, children, OID_AUTO, "rx_coalesce",
4793 		    CTLFLAG_RW, &sc->tt.rx_coalesce, 0, "receive coalescing");
4794 	}
4795 #endif
4796 
4797 
4798 	return (0);
4799 }
4800 
4801 static int
4802 cxgbe_sysctls(struct port_info *pi)
4803 {
4804 	struct sysctl_ctx_list *ctx;
4805 	struct sysctl_oid *oid;
4806 	struct sysctl_oid_list *children;
4807 	struct adapter *sc = pi->adapter;
4808 
4809 	ctx = device_get_sysctl_ctx(pi->dev);
4810 
4811 	/*
4812 	 * dev.cxgbe.X.
4813 	 */
4814 	oid = device_get_sysctl_tree(pi->dev);
4815 	children = SYSCTL_CHILDREN(oid);
4816 
4817 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "linkdnrc", CTLTYPE_STRING |
4818 	   CTLFLAG_RD, pi, 0, sysctl_linkdnrc, "A", "reason why link is down");
4819 	if (pi->port_type == FW_PORT_TYPE_BT_XAUI) {
4820 		SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "temperature",
4821 		    CTLTYPE_INT | CTLFLAG_RD, pi, 0, sysctl_btphy, "I",
4822 		    "PHY temperature (in Celsius)");
4823 		SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "fw_version",
4824 		    CTLTYPE_INT | CTLFLAG_RD, pi, 1, sysctl_btphy, "I",
4825 		    "PHY firmware version");
4826 	}
4827 	SYSCTL_ADD_INT(ctx, children, OID_AUTO, "nrxq", CTLFLAG_RD,
4828 	    &pi->nrxq, 0, "# of rx queues");
4829 	SYSCTL_ADD_INT(ctx, children, OID_AUTO, "ntxq", CTLFLAG_RD,
4830 	    &pi->ntxq, 0, "# of tx queues");
4831 	SYSCTL_ADD_INT(ctx, children, OID_AUTO, "first_rxq", CTLFLAG_RD,
4832 	    &pi->first_rxq, 0, "index of first rx queue");
4833 	SYSCTL_ADD_INT(ctx, children, OID_AUTO, "first_txq", CTLFLAG_RD,
4834 	    &pi->first_txq, 0, "index of first tx queue");
4835 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "rsrv_noflowq", CTLTYPE_INT |
4836 	    CTLFLAG_RW, pi, 0, sysctl_noflowq, "IU",
4837 	    "Reserve queue 0 for non-flowid packets");
4838 
4839 #ifdef TCP_OFFLOAD
4840 	if (is_offload(sc)) {
4841 		SYSCTL_ADD_INT(ctx, children, OID_AUTO, "nofldrxq", CTLFLAG_RD,
4842 		    &pi->nofldrxq, 0,
4843 		    "# of rx queues for offloaded TCP connections");
4844 		SYSCTL_ADD_INT(ctx, children, OID_AUTO, "nofldtxq", CTLFLAG_RD,
4845 		    &pi->nofldtxq, 0,
4846 		    "# of tx queues for offloaded TCP connections");
4847 		SYSCTL_ADD_INT(ctx, children, OID_AUTO, "first_ofld_rxq",
4848 		    CTLFLAG_RD, &pi->first_ofld_rxq, 0,
4849 		    "index of first TOE rx queue");
4850 		SYSCTL_ADD_INT(ctx, children, OID_AUTO, "first_ofld_txq",
4851 		    CTLFLAG_RD, &pi->first_ofld_txq, 0,
4852 		    "index of first TOE tx queue");
4853 	}
4854 #endif
4855 #ifdef DEV_NETMAP
4856 	SYSCTL_ADD_INT(ctx, children, OID_AUTO, "nnmrxq", CTLFLAG_RD,
4857 	    &pi->nnmrxq, 0, "# of rx queues for netmap");
4858 	SYSCTL_ADD_INT(ctx, children, OID_AUTO, "nnmtxq", CTLFLAG_RD,
4859 	    &pi->nnmtxq, 0, "# of tx queues for netmap");
4860 	SYSCTL_ADD_INT(ctx, children, OID_AUTO, "first_nm_rxq",
4861 	    CTLFLAG_RD, &pi->first_nm_rxq, 0,
4862 	    "index of first netmap rx queue");
4863 	SYSCTL_ADD_INT(ctx, children, OID_AUTO, "first_nm_txq",
4864 	    CTLFLAG_RD, &pi->first_nm_txq, 0,
4865 	    "index of first netmap tx queue");
4866 #endif
4867 
4868 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "holdoff_tmr_idx",
4869 	    CTLTYPE_INT | CTLFLAG_RW, pi, 0, sysctl_holdoff_tmr_idx, "I",
4870 	    "holdoff timer index");
4871 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "holdoff_pktc_idx",
4872 	    CTLTYPE_INT | CTLFLAG_RW, pi, 0, sysctl_holdoff_pktc_idx, "I",
4873 	    "holdoff packet counter index");
4874 
4875 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "qsize_rxq",
4876 	    CTLTYPE_INT | CTLFLAG_RW, pi, 0, sysctl_qsize_rxq, "I",
4877 	    "rx queue size");
4878 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "qsize_txq",
4879 	    CTLTYPE_INT | CTLFLAG_RW, pi, 0, sysctl_qsize_txq, "I",
4880 	    "tx queue size");
4881 
4882 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "pause_settings",
4883 	    CTLTYPE_STRING | CTLFLAG_RW, pi, PAUSE_TX, sysctl_pause_settings,
4884 	    "A", "PAUSE settings (bit 0 = rx_pause, bit 1 = tx_pause)");
4885 
4886 	/*
4887 	 * dev.cxgbe.X.stats.
4888 	 */
4889 	oid = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, "stats", CTLFLAG_RD,
4890 	    NULL, "port statistics");
4891 	children = SYSCTL_CHILDREN(oid);
4892 
4893 #define SYSCTL_ADD_T4_REG64(pi, name, desc, reg) \
4894 	SYSCTL_ADD_OID(ctx, children, OID_AUTO, name, \
4895 	    CTLTYPE_U64 | CTLFLAG_RD, sc, reg, \
4896 	    sysctl_handle_t4_reg64, "QU", desc)
4897 
4898 	SYSCTL_ADD_T4_REG64(pi, "tx_octets", "# of octets in good frames",
4899 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_BYTES_L));
4900 	SYSCTL_ADD_T4_REG64(pi, "tx_frames", "total # of good frames",
4901 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_FRAMES_L));
4902 	SYSCTL_ADD_T4_REG64(pi, "tx_bcast_frames", "# of broadcast frames",
4903 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_BCAST_L));
4904 	SYSCTL_ADD_T4_REG64(pi, "tx_mcast_frames", "# of multicast frames",
4905 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_MCAST_L));
4906 	SYSCTL_ADD_T4_REG64(pi, "tx_ucast_frames", "# of unicast frames",
4907 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_UCAST_L));
4908 	SYSCTL_ADD_T4_REG64(pi, "tx_error_frames", "# of error frames",
4909 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_ERROR_L));
4910 	SYSCTL_ADD_T4_REG64(pi, "tx_frames_64",
4911 	    "# of tx frames in this range",
4912 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_64B_L));
4913 	SYSCTL_ADD_T4_REG64(pi, "tx_frames_65_127",
4914 	    "# of tx frames in this range",
4915 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_65B_127B_L));
4916 	SYSCTL_ADD_T4_REG64(pi, "tx_frames_128_255",
4917 	    "# of tx frames in this range",
4918 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_128B_255B_L));
4919 	SYSCTL_ADD_T4_REG64(pi, "tx_frames_256_511",
4920 	    "# of tx frames in this range",
4921 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_256B_511B_L));
4922 	SYSCTL_ADD_T4_REG64(pi, "tx_frames_512_1023",
4923 	    "# of tx frames in this range",
4924 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_512B_1023B_L));
4925 	SYSCTL_ADD_T4_REG64(pi, "tx_frames_1024_1518",
4926 	    "# of tx frames in this range",
4927 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_1024B_1518B_L));
4928 	SYSCTL_ADD_T4_REG64(pi, "tx_frames_1519_max",
4929 	    "# of tx frames in this range",
4930 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_1519B_MAX_L));
4931 	SYSCTL_ADD_T4_REG64(pi, "tx_drop", "# of dropped tx frames",
4932 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_DROP_L));
4933 	SYSCTL_ADD_T4_REG64(pi, "tx_pause", "# of pause frames transmitted",
4934 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_PAUSE_L));
4935 	SYSCTL_ADD_T4_REG64(pi, "tx_ppp0", "# of PPP prio 0 frames transmitted",
4936 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_PPP0_L));
4937 	SYSCTL_ADD_T4_REG64(pi, "tx_ppp1", "# of PPP prio 1 frames transmitted",
4938 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_PPP1_L));
4939 	SYSCTL_ADD_T4_REG64(pi, "tx_ppp2", "# of PPP prio 2 frames transmitted",
4940 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_PPP2_L));
4941 	SYSCTL_ADD_T4_REG64(pi, "tx_ppp3", "# of PPP prio 3 frames transmitted",
4942 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_PPP3_L));
4943 	SYSCTL_ADD_T4_REG64(pi, "tx_ppp4", "# of PPP prio 4 frames transmitted",
4944 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_PPP4_L));
4945 	SYSCTL_ADD_T4_REG64(pi, "tx_ppp5", "# of PPP prio 5 frames transmitted",
4946 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_PPP5_L));
4947 	SYSCTL_ADD_T4_REG64(pi, "tx_ppp6", "# of PPP prio 6 frames transmitted",
4948 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_PPP6_L));
4949 	SYSCTL_ADD_T4_REG64(pi, "tx_ppp7", "# of PPP prio 7 frames transmitted",
4950 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_PPP7_L));
4951 
4952 	SYSCTL_ADD_T4_REG64(pi, "rx_octets", "# of octets in good frames",
4953 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_BYTES_L));
4954 	SYSCTL_ADD_T4_REG64(pi, "rx_frames", "total # of good frames",
4955 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_FRAMES_L));
4956 	SYSCTL_ADD_T4_REG64(pi, "rx_bcast_frames", "# of broadcast frames",
4957 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_BCAST_L));
4958 	SYSCTL_ADD_T4_REG64(pi, "rx_mcast_frames", "# of multicast frames",
4959 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_MCAST_L));
4960 	SYSCTL_ADD_T4_REG64(pi, "rx_ucast_frames", "# of unicast frames",
4961 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_UCAST_L));
4962 	SYSCTL_ADD_T4_REG64(pi, "rx_too_long", "# of frames exceeding MTU",
4963 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_MTU_ERROR_L));
4964 	SYSCTL_ADD_T4_REG64(pi, "rx_jabber", "# of jabber frames",
4965 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_MTU_CRC_ERROR_L));
4966 	SYSCTL_ADD_T4_REG64(pi, "rx_fcs_err",
4967 	    "# of frames received with bad FCS",
4968 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_CRC_ERROR_L));
4969 	SYSCTL_ADD_T4_REG64(pi, "rx_len_err",
4970 	    "# of frames received with length error",
4971 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_LEN_ERROR_L));
4972 	SYSCTL_ADD_T4_REG64(pi, "rx_symbol_err", "symbol errors",
4973 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_SYM_ERROR_L));
4974 	SYSCTL_ADD_T4_REG64(pi, "rx_runt", "# of short frames received",
4975 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_LESS_64B_L));
4976 	SYSCTL_ADD_T4_REG64(pi, "rx_frames_64",
4977 	    "# of rx frames in this range",
4978 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_64B_L));
4979 	SYSCTL_ADD_T4_REG64(pi, "rx_frames_65_127",
4980 	    "# of rx frames in this range",
4981 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_65B_127B_L));
4982 	SYSCTL_ADD_T4_REG64(pi, "rx_frames_128_255",
4983 	    "# of rx frames in this range",
4984 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_128B_255B_L));
4985 	SYSCTL_ADD_T4_REG64(pi, "rx_frames_256_511",
4986 	    "# of rx frames in this range",
4987 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_256B_511B_L));
4988 	SYSCTL_ADD_T4_REG64(pi, "rx_frames_512_1023",
4989 	    "# of rx frames in this range",
4990 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_512B_1023B_L));
4991 	SYSCTL_ADD_T4_REG64(pi, "rx_frames_1024_1518",
4992 	    "# of rx frames in this range",
4993 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_1024B_1518B_L));
4994 	SYSCTL_ADD_T4_REG64(pi, "rx_frames_1519_max",
4995 	    "# of rx frames in this range",
4996 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_1519B_MAX_L));
4997 	SYSCTL_ADD_T4_REG64(pi, "rx_pause", "# of pause frames received",
4998 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_PAUSE_L));
4999 	SYSCTL_ADD_T4_REG64(pi, "rx_ppp0", "# of PPP prio 0 frames received",
5000 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_PPP0_L));
5001 	SYSCTL_ADD_T4_REG64(pi, "rx_ppp1", "# of PPP prio 1 frames received",
5002 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_PPP1_L));
5003 	SYSCTL_ADD_T4_REG64(pi, "rx_ppp2", "# of PPP prio 2 frames received",
5004 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_PPP2_L));
5005 	SYSCTL_ADD_T4_REG64(pi, "rx_ppp3", "# of PPP prio 3 frames received",
5006 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_PPP3_L));
5007 	SYSCTL_ADD_T4_REG64(pi, "rx_ppp4", "# of PPP prio 4 frames received",
5008 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_PPP4_L));
5009 	SYSCTL_ADD_T4_REG64(pi, "rx_ppp5", "# of PPP prio 5 frames received",
5010 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_PPP5_L));
5011 	SYSCTL_ADD_T4_REG64(pi, "rx_ppp6", "# of PPP prio 6 frames received",
5012 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_PPP6_L));
5013 	SYSCTL_ADD_T4_REG64(pi, "rx_ppp7", "# of PPP prio 7 frames received",
5014 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_PPP7_L));
5015 
5016 #undef SYSCTL_ADD_T4_REG64
5017 
5018 #define SYSCTL_ADD_T4_PORTSTAT(name, desc) \
5019 	SYSCTL_ADD_UQUAD(ctx, children, OID_AUTO, #name, CTLFLAG_RD, \
5020 	    &pi->stats.name, desc)
5021 
5022 	/* We get these from port_stats and they may be stale by upto 1s */
5023 	SYSCTL_ADD_T4_PORTSTAT(rx_ovflow0,
5024 	    "# drops due to buffer-group 0 overflows");
5025 	SYSCTL_ADD_T4_PORTSTAT(rx_ovflow1,
5026 	    "# drops due to buffer-group 1 overflows");
5027 	SYSCTL_ADD_T4_PORTSTAT(rx_ovflow2,
5028 	    "# drops due to buffer-group 2 overflows");
5029 	SYSCTL_ADD_T4_PORTSTAT(rx_ovflow3,
5030 	    "# drops due to buffer-group 3 overflows");
5031 	SYSCTL_ADD_T4_PORTSTAT(rx_trunc0,
5032 	    "# of buffer-group 0 truncated packets");
5033 	SYSCTL_ADD_T4_PORTSTAT(rx_trunc1,
5034 	    "# of buffer-group 1 truncated packets");
5035 	SYSCTL_ADD_T4_PORTSTAT(rx_trunc2,
5036 	    "# of buffer-group 2 truncated packets");
5037 	SYSCTL_ADD_T4_PORTSTAT(rx_trunc3,
5038 	    "# of buffer-group 3 truncated packets");
5039 
5040 #undef SYSCTL_ADD_T4_PORTSTAT
5041 
5042 	return (0);
5043 }
5044 
5045 static int
5046 sysctl_int_array(SYSCTL_HANDLER_ARGS)
5047 {
5048 	int rc, *i;
5049 	struct sbuf sb;
5050 
5051 	sbuf_new(&sb, NULL, 32, SBUF_AUTOEXTEND);
5052 	for (i = arg1; arg2; arg2 -= sizeof(int), i++)
5053 		sbuf_printf(&sb, "%d ", *i);
5054 	sbuf_trim(&sb);
5055 	sbuf_finish(&sb);
5056 	rc = sysctl_handle_string(oidp, sbuf_data(&sb), sbuf_len(&sb), req);
5057 	sbuf_delete(&sb);
5058 	return (rc);
5059 }
5060 
5061 static int
5062 sysctl_bitfield(SYSCTL_HANDLER_ARGS)
5063 {
5064 	int rc;
5065 	struct sbuf *sb;
5066 
5067 	rc = sysctl_wire_old_buffer(req, 0);
5068 	if (rc != 0)
5069 		return(rc);
5070 
5071 	sb = sbuf_new_for_sysctl(NULL, NULL, 128, req);
5072 	if (sb == NULL)
5073 		return (ENOMEM);
5074 
5075 	sbuf_printf(sb, "%b", (int)arg2, (char *)arg1);
5076 	rc = sbuf_finish(sb);
5077 	sbuf_delete(sb);
5078 
5079 	return (rc);
5080 }
5081 
5082 static int
5083 sysctl_btphy(SYSCTL_HANDLER_ARGS)
5084 {
5085 	struct port_info *pi = arg1;
5086 	int op = arg2;
5087 	struct adapter *sc = pi->adapter;
5088 	u_int v;
5089 	int rc;
5090 
5091 	rc = begin_synchronized_op(sc, pi, SLEEP_OK | INTR_OK, "t4btt");
5092 	if (rc)
5093 		return (rc);
5094 	/* XXX: magic numbers */
5095 	rc = -t4_mdio_rd(sc, sc->mbox, pi->mdio_addr, 0x1e, op ? 0x20 : 0xc820,
5096 	    &v);
5097 	end_synchronized_op(sc, 0);
5098 	if (rc)
5099 		return (rc);
5100 	if (op == 0)
5101 		v /= 256;
5102 
5103 	rc = sysctl_handle_int(oidp, &v, 0, req);
5104 	return (rc);
5105 }
5106 
5107 static int
5108 sysctl_noflowq(SYSCTL_HANDLER_ARGS)
5109 {
5110 	struct port_info *pi = arg1;
5111 	int rc, val;
5112 
5113 	val = pi->rsrv_noflowq;
5114 	rc = sysctl_handle_int(oidp, &val, 0, req);
5115 	if (rc != 0 || req->newptr == NULL)
5116 		return (rc);
5117 
5118 	if ((val >= 1) && (pi->ntxq > 1))
5119 		pi->rsrv_noflowq = 1;
5120 	else
5121 		pi->rsrv_noflowq = 0;
5122 
5123 	return (rc);
5124 }
5125 
5126 static int
5127 sysctl_holdoff_tmr_idx(SYSCTL_HANDLER_ARGS)
5128 {
5129 	struct port_info *pi = arg1;
5130 	struct adapter *sc = pi->adapter;
5131 	int idx, rc, i;
5132 	struct sge_rxq *rxq;
5133 #ifdef TCP_OFFLOAD
5134 	struct sge_ofld_rxq *ofld_rxq;
5135 #endif
5136 	uint8_t v;
5137 
5138 	idx = pi->tmr_idx;
5139 
5140 	rc = sysctl_handle_int(oidp, &idx, 0, req);
5141 	if (rc != 0 || req->newptr == NULL)
5142 		return (rc);
5143 
5144 	if (idx < 0 || idx >= SGE_NTIMERS)
5145 		return (EINVAL);
5146 
5147 	rc = begin_synchronized_op(sc, pi, HOLD_LOCK | SLEEP_OK | INTR_OK,
5148 	    "t4tmr");
5149 	if (rc)
5150 		return (rc);
5151 
5152 	v = V_QINTR_TIMER_IDX(idx) | V_QINTR_CNT_EN(pi->pktc_idx != -1);
5153 	for_each_rxq(pi, i, rxq) {
5154 #ifdef atomic_store_rel_8
5155 		atomic_store_rel_8(&rxq->iq.intr_params, v);
5156 #else
5157 		rxq->iq.intr_params = v;
5158 #endif
5159 	}
5160 #ifdef TCP_OFFLOAD
5161 	for_each_ofld_rxq(pi, i, ofld_rxq) {
5162 #ifdef atomic_store_rel_8
5163 		atomic_store_rel_8(&ofld_rxq->iq.intr_params, v);
5164 #else
5165 		ofld_rxq->iq.intr_params = v;
5166 #endif
5167 	}
5168 #endif
5169 	pi->tmr_idx = idx;
5170 
5171 	end_synchronized_op(sc, LOCK_HELD);
5172 	return (0);
5173 }
5174 
5175 static int
5176 sysctl_holdoff_pktc_idx(SYSCTL_HANDLER_ARGS)
5177 {
5178 	struct port_info *pi = arg1;
5179 	struct adapter *sc = pi->adapter;
5180 	int idx, rc;
5181 
5182 	idx = pi->pktc_idx;
5183 
5184 	rc = sysctl_handle_int(oidp, &idx, 0, req);
5185 	if (rc != 0 || req->newptr == NULL)
5186 		return (rc);
5187 
5188 	if (idx < -1 || idx >= SGE_NCOUNTERS)
5189 		return (EINVAL);
5190 
5191 	rc = begin_synchronized_op(sc, pi, HOLD_LOCK | SLEEP_OK | INTR_OK,
5192 	    "t4pktc");
5193 	if (rc)
5194 		return (rc);
5195 
5196 	if (pi->flags & PORT_INIT_DONE)
5197 		rc = EBUSY; /* cannot be changed once the queues are created */
5198 	else
5199 		pi->pktc_idx = idx;
5200 
5201 	end_synchronized_op(sc, LOCK_HELD);
5202 	return (rc);
5203 }
5204 
5205 static int
5206 sysctl_qsize_rxq(SYSCTL_HANDLER_ARGS)
5207 {
5208 	struct port_info *pi = arg1;
5209 	struct adapter *sc = pi->adapter;
5210 	int qsize, rc;
5211 
5212 	qsize = pi->qsize_rxq;
5213 
5214 	rc = sysctl_handle_int(oidp, &qsize, 0, req);
5215 	if (rc != 0 || req->newptr == NULL)
5216 		return (rc);
5217 
5218 	if (qsize < 128 || (qsize & 7))
5219 		return (EINVAL);
5220 
5221 	rc = begin_synchronized_op(sc, pi, HOLD_LOCK | SLEEP_OK | INTR_OK,
5222 	    "t4rxqs");
5223 	if (rc)
5224 		return (rc);
5225 
5226 	if (pi->flags & PORT_INIT_DONE)
5227 		rc = EBUSY; /* cannot be changed once the queues are created */
5228 	else
5229 		pi->qsize_rxq = qsize;
5230 
5231 	end_synchronized_op(sc, LOCK_HELD);
5232 	return (rc);
5233 }
5234 
5235 static int
5236 sysctl_qsize_txq(SYSCTL_HANDLER_ARGS)
5237 {
5238 	struct port_info *pi = arg1;
5239 	struct adapter *sc = pi->adapter;
5240 	int qsize, rc;
5241 
5242 	qsize = pi->qsize_txq;
5243 
5244 	rc = sysctl_handle_int(oidp, &qsize, 0, req);
5245 	if (rc != 0 || req->newptr == NULL)
5246 		return (rc);
5247 
5248 	/* bufring size must be powerof2 */
5249 	if (qsize < 128 || !powerof2(qsize))
5250 		return (EINVAL);
5251 
5252 	rc = begin_synchronized_op(sc, pi, HOLD_LOCK | SLEEP_OK | INTR_OK,
5253 	    "t4txqs");
5254 	if (rc)
5255 		return (rc);
5256 
5257 	if (pi->flags & PORT_INIT_DONE)
5258 		rc = EBUSY; /* cannot be changed once the queues are created */
5259 	else
5260 		pi->qsize_txq = qsize;
5261 
5262 	end_synchronized_op(sc, LOCK_HELD);
5263 	return (rc);
5264 }
5265 
5266 static int
5267 sysctl_pause_settings(SYSCTL_HANDLER_ARGS)
5268 {
5269 	struct port_info *pi = arg1;
5270 	struct adapter *sc = pi->adapter;
5271 	struct link_config *lc = &pi->link_cfg;
5272 	int rc;
5273 
5274 	if (req->newptr == NULL) {
5275 		struct sbuf *sb;
5276 		static char *bits = "\20\1PAUSE_RX\2PAUSE_TX";
5277 
5278 		rc = sysctl_wire_old_buffer(req, 0);
5279 		if (rc != 0)
5280 			return(rc);
5281 
5282 		sb = sbuf_new_for_sysctl(NULL, NULL, 128, req);
5283 		if (sb == NULL)
5284 			return (ENOMEM);
5285 
5286 		sbuf_printf(sb, "%b", lc->fc & (PAUSE_TX | PAUSE_RX), bits);
5287 		rc = sbuf_finish(sb);
5288 		sbuf_delete(sb);
5289 	} else {
5290 		char s[2];
5291 		int n;
5292 
5293 		s[0] = '0' + (lc->requested_fc & (PAUSE_TX | PAUSE_RX));
5294 		s[1] = 0;
5295 
5296 		rc = sysctl_handle_string(oidp, s, sizeof(s), req);
5297 		if (rc != 0)
5298 			return(rc);
5299 
5300 		if (s[1] != 0)
5301 			return (EINVAL);
5302 		if (s[0] < '0' || s[0] > '9')
5303 			return (EINVAL);	/* not a number */
5304 		n = s[0] - '0';
5305 		if (n & ~(PAUSE_TX | PAUSE_RX))
5306 			return (EINVAL);	/* some other bit is set too */
5307 
5308 		rc = begin_synchronized_op(sc, pi, SLEEP_OK | INTR_OK, "t4PAUSE");
5309 		if (rc)
5310 			return (rc);
5311 		if ((lc->requested_fc & (PAUSE_TX | PAUSE_RX)) != n) {
5312 			int link_ok = lc->link_ok;
5313 
5314 			lc->requested_fc &= ~(PAUSE_TX | PAUSE_RX);
5315 			lc->requested_fc |= n;
5316 			rc = -t4_link_start(sc, sc->mbox, pi->tx_chan, lc);
5317 			lc->link_ok = link_ok;	/* restore */
5318 		}
5319 		end_synchronized_op(sc, 0);
5320 	}
5321 
5322 	return (rc);
5323 }
5324 
5325 static int
5326 sysctl_handle_t4_reg64(SYSCTL_HANDLER_ARGS)
5327 {
5328 	struct adapter *sc = arg1;
5329 	int reg = arg2;
5330 	uint64_t val;
5331 
5332 	val = t4_read_reg64(sc, reg);
5333 
5334 	return (sysctl_handle_64(oidp, &val, 0, req));
5335 }
5336 
5337 static int
5338 sysctl_temperature(SYSCTL_HANDLER_ARGS)
5339 {
5340 	struct adapter *sc = arg1;
5341 	int rc, t;
5342 	uint32_t param, val;
5343 
5344 	rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4temp");
5345 	if (rc)
5346 		return (rc);
5347 	param = V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) |
5348 	    V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_DIAG) |
5349 	    V_FW_PARAMS_PARAM_Y(FW_PARAM_DEV_DIAG_TMP);
5350 	rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 1, &param, &val);
5351 	end_synchronized_op(sc, 0);
5352 	if (rc)
5353 		return (rc);
5354 
5355 	/* unknown is returned as 0 but we display -1 in that case */
5356 	t = val == 0 ? -1 : val;
5357 
5358 	rc = sysctl_handle_int(oidp, &t, 0, req);
5359 	return (rc);
5360 }
5361 
5362 #ifdef SBUF_DRAIN
5363 static int
5364 sysctl_cctrl(SYSCTL_HANDLER_ARGS)
5365 {
5366 	struct adapter *sc = arg1;
5367 	struct sbuf *sb;
5368 	int rc, i;
5369 	uint16_t incr[NMTUS][NCCTRL_WIN];
5370 	static const char *dec_fac[] = {
5371 		"0.5", "0.5625", "0.625", "0.6875", "0.75", "0.8125", "0.875",
5372 		"0.9375"
5373 	};
5374 
5375 	rc = sysctl_wire_old_buffer(req, 0);
5376 	if (rc != 0)
5377 		return (rc);
5378 
5379 	sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req);
5380 	if (sb == NULL)
5381 		return (ENOMEM);
5382 
5383 	t4_read_cong_tbl(sc, incr);
5384 
5385 	for (i = 0; i < NCCTRL_WIN; ++i) {
5386 		sbuf_printf(sb, "%2d: %4u %4u %4u %4u %4u %4u %4u %4u\n", i,
5387 		    incr[0][i], incr[1][i], incr[2][i], incr[3][i], incr[4][i],
5388 		    incr[5][i], incr[6][i], incr[7][i]);
5389 		sbuf_printf(sb, "%8u %4u %4u %4u %4u %4u %4u %4u %5u %s\n",
5390 		    incr[8][i], incr[9][i], incr[10][i], incr[11][i],
5391 		    incr[12][i], incr[13][i], incr[14][i], incr[15][i],
5392 		    sc->params.a_wnd[i], dec_fac[sc->params.b_wnd[i]]);
5393 	}
5394 
5395 	rc = sbuf_finish(sb);
5396 	sbuf_delete(sb);
5397 
5398 	return (rc);
5399 }
5400 
5401 static const char *qname[CIM_NUM_IBQ + CIM_NUM_OBQ_T5] = {
5402 	"TP0", "TP1", "ULP", "SGE0", "SGE1", "NC-SI",	/* ibq's */
5403 	"ULP0", "ULP1", "ULP2", "ULP3", "SGE", "NC-SI",	/* obq's */
5404 	"SGE0-RX", "SGE1-RX"	/* additional obq's (T5 onwards) */
5405 };
5406 
5407 static int
5408 sysctl_cim_ibq_obq(SYSCTL_HANDLER_ARGS)
5409 {
5410 	struct adapter *sc = arg1;
5411 	struct sbuf *sb;
5412 	int rc, i, n, qid = arg2;
5413 	uint32_t *buf, *p;
5414 	char *qtype;
5415 	u_int cim_num_obq = is_t4(sc) ? CIM_NUM_OBQ : CIM_NUM_OBQ_T5;
5416 
5417 	KASSERT(qid >= 0 && qid < CIM_NUM_IBQ + cim_num_obq,
5418 	    ("%s: bad qid %d\n", __func__, qid));
5419 
5420 	if (qid < CIM_NUM_IBQ) {
5421 		/* inbound queue */
5422 		qtype = "IBQ";
5423 		n = 4 * CIM_IBQ_SIZE;
5424 		buf = malloc(n * sizeof(uint32_t), M_CXGBE, M_ZERO | M_WAITOK);
5425 		rc = t4_read_cim_ibq(sc, qid, buf, n);
5426 	} else {
5427 		/* outbound queue */
5428 		qtype = "OBQ";
5429 		qid -= CIM_NUM_IBQ;
5430 		n = 4 * cim_num_obq * CIM_OBQ_SIZE;
5431 		buf = malloc(n * sizeof(uint32_t), M_CXGBE, M_ZERO | M_WAITOK);
5432 		rc = t4_read_cim_obq(sc, qid, buf, n);
5433 	}
5434 
5435 	if (rc < 0) {
5436 		rc = -rc;
5437 		goto done;
5438 	}
5439 	n = rc * sizeof(uint32_t);	/* rc has # of words actually read */
5440 
5441 	rc = sysctl_wire_old_buffer(req, 0);
5442 	if (rc != 0)
5443 		goto done;
5444 
5445 	sb = sbuf_new_for_sysctl(NULL, NULL, PAGE_SIZE, req);
5446 	if (sb == NULL) {
5447 		rc = ENOMEM;
5448 		goto done;
5449 	}
5450 
5451 	sbuf_printf(sb, "%s%d %s", qtype , qid, qname[arg2]);
5452 	for (i = 0, p = buf; i < n; i += 16, p += 4)
5453 		sbuf_printf(sb, "\n%#06x: %08x %08x %08x %08x", i, p[0], p[1],
5454 		    p[2], p[3]);
5455 
5456 	rc = sbuf_finish(sb);
5457 	sbuf_delete(sb);
5458 done:
5459 	free(buf, M_CXGBE);
5460 	return (rc);
5461 }
5462 
5463 static int
5464 sysctl_cim_la(SYSCTL_HANDLER_ARGS)
5465 {
5466 	struct adapter *sc = arg1;
5467 	u_int cfg;
5468 	struct sbuf *sb;
5469 	uint32_t *buf, *p;
5470 	int rc;
5471 
5472 	rc = -t4_cim_read(sc, A_UP_UP_DBG_LA_CFG, 1, &cfg);
5473 	if (rc != 0)
5474 		return (rc);
5475 
5476 	rc = sysctl_wire_old_buffer(req, 0);
5477 	if (rc != 0)
5478 		return (rc);
5479 
5480 	sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req);
5481 	if (sb == NULL)
5482 		return (ENOMEM);
5483 
5484 	buf = malloc(sc->params.cim_la_size * sizeof(uint32_t), M_CXGBE,
5485 	    M_ZERO | M_WAITOK);
5486 
5487 	rc = -t4_cim_read_la(sc, buf, NULL);
5488 	if (rc != 0)
5489 		goto done;
5490 
5491 	sbuf_printf(sb, "Status   Data      PC%s",
5492 	    cfg & F_UPDBGLACAPTPCONLY ? "" :
5493 	    "     LS0Stat  LS0Addr             LS0Data");
5494 
5495 	KASSERT((sc->params.cim_la_size & 7) == 0,
5496 	    ("%s: p will walk off the end of buf", __func__));
5497 
5498 	for (p = buf; p < &buf[sc->params.cim_la_size]; p += 8) {
5499 		if (cfg & F_UPDBGLACAPTPCONLY) {
5500 			sbuf_printf(sb, "\n  %02x   %08x %08x", p[5] & 0xff,
5501 			    p[6], p[7]);
5502 			sbuf_printf(sb, "\n  %02x   %02x%06x %02x%06x",
5503 			    (p[3] >> 8) & 0xff, p[3] & 0xff, p[4] >> 8,
5504 			    p[4] & 0xff, p[5] >> 8);
5505 			sbuf_printf(sb, "\n  %02x   %x%07x %x%07x",
5506 			    (p[0] >> 4) & 0xff, p[0] & 0xf, p[1] >> 4,
5507 			    p[1] & 0xf, p[2] >> 4);
5508 		} else {
5509 			sbuf_printf(sb,
5510 			    "\n  %02x   %x%07x %x%07x %08x %08x "
5511 			    "%08x%08x%08x%08x",
5512 			    (p[0] >> 4) & 0xff, p[0] & 0xf, p[1] >> 4,
5513 			    p[1] & 0xf, p[2] >> 4, p[2] & 0xf, p[3], p[4], p[5],
5514 			    p[6], p[7]);
5515 		}
5516 	}
5517 
5518 	rc = sbuf_finish(sb);
5519 	sbuf_delete(sb);
5520 done:
5521 	free(buf, M_CXGBE);
5522 	return (rc);
5523 }
5524 
5525 static int
5526 sysctl_cim_ma_la(SYSCTL_HANDLER_ARGS)
5527 {
5528 	struct adapter *sc = arg1;
5529 	u_int i;
5530 	struct sbuf *sb;
5531 	uint32_t *buf, *p;
5532 	int rc;
5533 
5534 	rc = sysctl_wire_old_buffer(req, 0);
5535 	if (rc != 0)
5536 		return (rc);
5537 
5538 	sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req);
5539 	if (sb == NULL)
5540 		return (ENOMEM);
5541 
5542 	buf = malloc(2 * CIM_MALA_SIZE * 5 * sizeof(uint32_t), M_CXGBE,
5543 	    M_ZERO | M_WAITOK);
5544 
5545 	t4_cim_read_ma_la(sc, buf, buf + 5 * CIM_MALA_SIZE);
5546 	p = buf;
5547 
5548 	for (i = 0; i < CIM_MALA_SIZE; i++, p += 5) {
5549 		sbuf_printf(sb, "\n%02x%08x%08x%08x%08x", p[4], p[3], p[2],
5550 		    p[1], p[0]);
5551 	}
5552 
5553 	sbuf_printf(sb, "\n\nCnt ID Tag UE       Data       RDY VLD");
5554 	for (i = 0; i < CIM_MALA_SIZE; i++, p += 5) {
5555 		sbuf_printf(sb, "\n%3u %2u  %x   %u %08x%08x  %u   %u",
5556 		    (p[2] >> 10) & 0xff, (p[2] >> 7) & 7,
5557 		    (p[2] >> 3) & 0xf, (p[2] >> 2) & 1,
5558 		    (p[1] >> 2) | ((p[2] & 3) << 30),
5559 		    (p[0] >> 2) | ((p[1] & 3) << 30), (p[0] >> 1) & 1,
5560 		    p[0] & 1);
5561 	}
5562 
5563 	rc = sbuf_finish(sb);
5564 	sbuf_delete(sb);
5565 	free(buf, M_CXGBE);
5566 	return (rc);
5567 }
5568 
5569 static int
5570 sysctl_cim_pif_la(SYSCTL_HANDLER_ARGS)
5571 {
5572 	struct adapter *sc = arg1;
5573 	u_int i;
5574 	struct sbuf *sb;
5575 	uint32_t *buf, *p;
5576 	int rc;
5577 
5578 	rc = sysctl_wire_old_buffer(req, 0);
5579 	if (rc != 0)
5580 		return (rc);
5581 
5582 	sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req);
5583 	if (sb == NULL)
5584 		return (ENOMEM);
5585 
5586 	buf = malloc(2 * CIM_PIFLA_SIZE * 6 * sizeof(uint32_t), M_CXGBE,
5587 	    M_ZERO | M_WAITOK);
5588 
5589 	t4_cim_read_pif_la(sc, buf, buf + 6 * CIM_PIFLA_SIZE, NULL, NULL);
5590 	p = buf;
5591 
5592 	sbuf_printf(sb, "Cntl ID DataBE   Addr                 Data");
5593 	for (i = 0; i < CIM_MALA_SIZE; i++, p += 6) {
5594 		sbuf_printf(sb, "\n %02x  %02x  %04x  %08x %08x%08x%08x%08x",
5595 		    (p[5] >> 22) & 0xff, (p[5] >> 16) & 0x3f, p[5] & 0xffff,
5596 		    p[4], p[3], p[2], p[1], p[0]);
5597 	}
5598 
5599 	sbuf_printf(sb, "\n\nCntl ID               Data");
5600 	for (i = 0; i < CIM_MALA_SIZE; i++, p += 6) {
5601 		sbuf_printf(sb, "\n %02x  %02x %08x%08x%08x%08x",
5602 		    (p[4] >> 6) & 0xff, p[4] & 0x3f, p[3], p[2], p[1], p[0]);
5603 	}
5604 
5605 	rc = sbuf_finish(sb);
5606 	sbuf_delete(sb);
5607 	free(buf, M_CXGBE);
5608 	return (rc);
5609 }
5610 
5611 static int
5612 sysctl_cim_qcfg(SYSCTL_HANDLER_ARGS)
5613 {
5614 	struct adapter *sc = arg1;
5615 	struct sbuf *sb;
5616 	int rc, i;
5617 	uint16_t base[CIM_NUM_IBQ + CIM_NUM_OBQ_T5];
5618 	uint16_t size[CIM_NUM_IBQ + CIM_NUM_OBQ_T5];
5619 	uint16_t thres[CIM_NUM_IBQ];
5620 	uint32_t obq_wr[2 * CIM_NUM_OBQ_T5], *wr = obq_wr;
5621 	uint32_t stat[4 * (CIM_NUM_IBQ + CIM_NUM_OBQ_T5)], *p = stat;
5622 	u_int cim_num_obq, ibq_rdaddr, obq_rdaddr, nq;
5623 
5624 	if (is_t4(sc)) {
5625 		cim_num_obq = CIM_NUM_OBQ;
5626 		ibq_rdaddr = A_UP_IBQ_0_RDADDR;
5627 		obq_rdaddr = A_UP_OBQ_0_REALADDR;
5628 	} else {
5629 		cim_num_obq = CIM_NUM_OBQ_T5;
5630 		ibq_rdaddr = A_UP_IBQ_0_SHADOW_RDADDR;
5631 		obq_rdaddr = A_UP_OBQ_0_SHADOW_REALADDR;
5632 	}
5633 	nq = CIM_NUM_IBQ + cim_num_obq;
5634 
5635 	rc = -t4_cim_read(sc, ibq_rdaddr, 4 * nq, stat);
5636 	if (rc == 0)
5637 		rc = -t4_cim_read(sc, obq_rdaddr, 2 * cim_num_obq, obq_wr);
5638 	if (rc != 0)
5639 		return (rc);
5640 
5641 	t4_read_cimq_cfg(sc, base, size, thres);
5642 
5643 	rc = sysctl_wire_old_buffer(req, 0);
5644 	if (rc != 0)
5645 		return (rc);
5646 
5647 	sb = sbuf_new_for_sysctl(NULL, NULL, PAGE_SIZE, req);
5648 	if (sb == NULL)
5649 		return (ENOMEM);
5650 
5651 	sbuf_printf(sb, "Queue  Base  Size Thres RdPtr WrPtr  SOP  EOP Avail");
5652 
5653 	for (i = 0; i < CIM_NUM_IBQ; i++, p += 4)
5654 		sbuf_printf(sb, "\n%7s %5x %5u %5u %6x  %4x %4u %4u %5u",
5655 		    qname[i], base[i], size[i], thres[i], G_IBQRDADDR(p[0]),
5656 		    G_IBQWRADDR(p[1]), G_QUESOPCNT(p[3]), G_QUEEOPCNT(p[3]),
5657 		    G_QUEREMFLITS(p[2]) * 16);
5658 	for ( ; i < nq; i++, p += 4, wr += 2)
5659 		sbuf_printf(sb, "\n%7s %5x %5u %12x  %4x %4u %4u %5u", qname[i],
5660 		    base[i], size[i], G_QUERDADDR(p[0]) & 0x3fff,
5661 		    wr[0] - base[i], G_QUESOPCNT(p[3]), G_QUEEOPCNT(p[3]),
5662 		    G_QUEREMFLITS(p[2]) * 16);
5663 
5664 	rc = sbuf_finish(sb);
5665 	sbuf_delete(sb);
5666 
5667 	return (rc);
5668 }
5669 
5670 static int
5671 sysctl_cpl_stats(SYSCTL_HANDLER_ARGS)
5672 {
5673 	struct adapter *sc = arg1;
5674 	struct sbuf *sb;
5675 	int rc;
5676 	struct tp_cpl_stats stats;
5677 
5678 	rc = sysctl_wire_old_buffer(req, 0);
5679 	if (rc != 0)
5680 		return (rc);
5681 
5682 	sb = sbuf_new_for_sysctl(NULL, NULL, 256, req);
5683 	if (sb == NULL)
5684 		return (ENOMEM);
5685 
5686 	t4_tp_get_cpl_stats(sc, &stats);
5687 
5688 	sbuf_printf(sb, "                 channel 0  channel 1  channel 2  "
5689 	    "channel 3\n");
5690 	sbuf_printf(sb, "CPL requests:   %10u %10u %10u %10u\n",
5691 		   stats.req[0], stats.req[1], stats.req[2], stats.req[3]);
5692 	sbuf_printf(sb, "CPL responses:  %10u %10u %10u %10u",
5693 		   stats.rsp[0], stats.rsp[1], stats.rsp[2], stats.rsp[3]);
5694 
5695 	rc = sbuf_finish(sb);
5696 	sbuf_delete(sb);
5697 
5698 	return (rc);
5699 }
5700 
5701 static int
5702 sysctl_ddp_stats(SYSCTL_HANDLER_ARGS)
5703 {
5704 	struct adapter *sc = arg1;
5705 	struct sbuf *sb;
5706 	int rc;
5707 	struct tp_usm_stats stats;
5708 
5709 	rc = sysctl_wire_old_buffer(req, 0);
5710 	if (rc != 0)
5711 		return(rc);
5712 
5713 	sb = sbuf_new_for_sysctl(NULL, NULL, 256, req);
5714 	if (sb == NULL)
5715 		return (ENOMEM);
5716 
5717 	t4_get_usm_stats(sc, &stats);
5718 
5719 	sbuf_printf(sb, "Frames: %u\n", stats.frames);
5720 	sbuf_printf(sb, "Octets: %ju\n", stats.octets);
5721 	sbuf_printf(sb, "Drops:  %u", stats.drops);
5722 
5723 	rc = sbuf_finish(sb);
5724 	sbuf_delete(sb);
5725 
5726 	return (rc);
5727 }
5728 
5729 const char *devlog_level_strings[] = {
5730 	[FW_DEVLOG_LEVEL_EMERG]		= "EMERG",
5731 	[FW_DEVLOG_LEVEL_CRIT]		= "CRIT",
5732 	[FW_DEVLOG_LEVEL_ERR]		= "ERR",
5733 	[FW_DEVLOG_LEVEL_NOTICE]	= "NOTICE",
5734 	[FW_DEVLOG_LEVEL_INFO]		= "INFO",
5735 	[FW_DEVLOG_LEVEL_DEBUG]		= "DEBUG"
5736 };
5737 
5738 const char *devlog_facility_strings[] = {
5739 	[FW_DEVLOG_FACILITY_CORE]	= "CORE",
5740 	[FW_DEVLOG_FACILITY_CF]		= "CF",
5741 	[FW_DEVLOG_FACILITY_SCHED]	= "SCHED",
5742 	[FW_DEVLOG_FACILITY_TIMER]	= "TIMER",
5743 	[FW_DEVLOG_FACILITY_RES]	= "RES",
5744 	[FW_DEVLOG_FACILITY_HW]		= "HW",
5745 	[FW_DEVLOG_FACILITY_FLR]	= "FLR",
5746 	[FW_DEVLOG_FACILITY_DMAQ]	= "DMAQ",
5747 	[FW_DEVLOG_FACILITY_PHY]	= "PHY",
5748 	[FW_DEVLOG_FACILITY_MAC]	= "MAC",
5749 	[FW_DEVLOG_FACILITY_PORT]	= "PORT",
5750 	[FW_DEVLOG_FACILITY_VI]		= "VI",
5751 	[FW_DEVLOG_FACILITY_FILTER]	= "FILTER",
5752 	[FW_DEVLOG_FACILITY_ACL]	= "ACL",
5753 	[FW_DEVLOG_FACILITY_TM]		= "TM",
5754 	[FW_DEVLOG_FACILITY_QFC]	= "QFC",
5755 	[FW_DEVLOG_FACILITY_DCB]	= "DCB",
5756 	[FW_DEVLOG_FACILITY_ETH]	= "ETH",
5757 	[FW_DEVLOG_FACILITY_OFLD]	= "OFLD",
5758 	[FW_DEVLOG_FACILITY_RI]		= "RI",
5759 	[FW_DEVLOG_FACILITY_ISCSI]	= "ISCSI",
5760 	[FW_DEVLOG_FACILITY_FCOE]	= "FCOE",
5761 	[FW_DEVLOG_FACILITY_FOISCSI]	= "FOISCSI",
5762 	[FW_DEVLOG_FACILITY_FOFCOE]	= "FOFCOE"
5763 };
5764 
5765 static int
5766 sysctl_devlog(SYSCTL_HANDLER_ARGS)
5767 {
5768 	struct adapter *sc = arg1;
5769 	struct devlog_params *dparams = &sc->params.devlog;
5770 	struct fw_devlog_e *buf, *e;
5771 	int i, j, rc, nentries, first = 0, m;
5772 	struct sbuf *sb;
5773 	uint64_t ftstamp = UINT64_MAX;
5774 
5775 	if (dparams->start == 0) {
5776 		dparams->memtype = FW_MEMTYPE_EDC0;
5777 		dparams->start = 0x84000;
5778 		dparams->size = 32768;
5779 	}
5780 
5781 	nentries = dparams->size / sizeof(struct fw_devlog_e);
5782 
5783 	buf = malloc(dparams->size, M_CXGBE, M_NOWAIT);
5784 	if (buf == NULL)
5785 		return (ENOMEM);
5786 
5787 	m = fwmtype_to_hwmtype(dparams->memtype);
5788 	rc = -t4_mem_read(sc, m, dparams->start, dparams->size, (void *)buf);
5789 	if (rc != 0)
5790 		goto done;
5791 
5792 	for (i = 0; i < nentries; i++) {
5793 		e = &buf[i];
5794 
5795 		if (e->timestamp == 0)
5796 			break;	/* end */
5797 
5798 		e->timestamp = be64toh(e->timestamp);
5799 		e->seqno = be32toh(e->seqno);
5800 		for (j = 0; j < 8; j++)
5801 			e->params[j] = be32toh(e->params[j]);
5802 
5803 		if (e->timestamp < ftstamp) {
5804 			ftstamp = e->timestamp;
5805 			first = i;
5806 		}
5807 	}
5808 
5809 	if (buf[first].timestamp == 0)
5810 		goto done;	/* nothing in the log */
5811 
5812 	rc = sysctl_wire_old_buffer(req, 0);
5813 	if (rc != 0)
5814 		goto done;
5815 
5816 	sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req);
5817 	if (sb == NULL) {
5818 		rc = ENOMEM;
5819 		goto done;
5820 	}
5821 	sbuf_printf(sb, "%10s  %15s  %8s  %8s  %s\n",
5822 	    "Seq#", "Tstamp", "Level", "Facility", "Message");
5823 
5824 	i = first;
5825 	do {
5826 		e = &buf[i];
5827 		if (e->timestamp == 0)
5828 			break;	/* end */
5829 
5830 		sbuf_printf(sb, "%10d  %15ju  %8s  %8s  ",
5831 		    e->seqno, e->timestamp,
5832 		    (e->level < nitems(devlog_level_strings) ?
5833 			devlog_level_strings[e->level] : "UNKNOWN"),
5834 		    (e->facility < nitems(devlog_facility_strings) ?
5835 			devlog_facility_strings[e->facility] : "UNKNOWN"));
5836 		sbuf_printf(sb, e->fmt, e->params[0], e->params[1],
5837 		    e->params[2], e->params[3], e->params[4],
5838 		    e->params[5], e->params[6], e->params[7]);
5839 
5840 		if (++i == nentries)
5841 			i = 0;
5842 	} while (i != first);
5843 
5844 	rc = sbuf_finish(sb);
5845 	sbuf_delete(sb);
5846 done:
5847 	free(buf, M_CXGBE);
5848 	return (rc);
5849 }
5850 
5851 static int
5852 sysctl_fcoe_stats(SYSCTL_HANDLER_ARGS)
5853 {
5854 	struct adapter *sc = arg1;
5855 	struct sbuf *sb;
5856 	int rc;
5857 	struct tp_fcoe_stats stats[4];
5858 
5859 	rc = sysctl_wire_old_buffer(req, 0);
5860 	if (rc != 0)
5861 		return (rc);
5862 
5863 	sb = sbuf_new_for_sysctl(NULL, NULL, 256, req);
5864 	if (sb == NULL)
5865 		return (ENOMEM);
5866 
5867 	t4_get_fcoe_stats(sc, 0, &stats[0]);
5868 	t4_get_fcoe_stats(sc, 1, &stats[1]);
5869 	t4_get_fcoe_stats(sc, 2, &stats[2]);
5870 	t4_get_fcoe_stats(sc, 3, &stats[3]);
5871 
5872 	sbuf_printf(sb, "                   channel 0        channel 1        "
5873 	    "channel 2        channel 3\n");
5874 	sbuf_printf(sb, "octetsDDP:  %16ju %16ju %16ju %16ju\n",
5875 	    stats[0].octetsDDP, stats[1].octetsDDP, stats[2].octetsDDP,
5876 	    stats[3].octetsDDP);
5877 	sbuf_printf(sb, "framesDDP:  %16u %16u %16u %16u\n", stats[0].framesDDP,
5878 	    stats[1].framesDDP, stats[2].framesDDP, stats[3].framesDDP);
5879 	sbuf_printf(sb, "framesDrop: %16u %16u %16u %16u",
5880 	    stats[0].framesDrop, stats[1].framesDrop, stats[2].framesDrop,
5881 	    stats[3].framesDrop);
5882 
5883 	rc = sbuf_finish(sb);
5884 	sbuf_delete(sb);
5885 
5886 	return (rc);
5887 }
5888 
5889 static int
5890 sysctl_hw_sched(SYSCTL_HANDLER_ARGS)
5891 {
5892 	struct adapter *sc = arg1;
5893 	struct sbuf *sb;
5894 	int rc, i;
5895 	unsigned int map, kbps, ipg, mode;
5896 	unsigned int pace_tab[NTX_SCHED];
5897 
5898 	rc = sysctl_wire_old_buffer(req, 0);
5899 	if (rc != 0)
5900 		return (rc);
5901 
5902 	sb = sbuf_new_for_sysctl(NULL, NULL, 256, req);
5903 	if (sb == NULL)
5904 		return (ENOMEM);
5905 
5906 	map = t4_read_reg(sc, A_TP_TX_MOD_QUEUE_REQ_MAP);
5907 	mode = G_TIMERMODE(t4_read_reg(sc, A_TP_MOD_CONFIG));
5908 	t4_read_pace_tbl(sc, pace_tab);
5909 
5910 	sbuf_printf(sb, "Scheduler  Mode   Channel  Rate (Kbps)   "
5911 	    "Class IPG (0.1 ns)   Flow IPG (us)");
5912 
5913 	for (i = 0; i < NTX_SCHED; ++i, map >>= 2) {
5914 		t4_get_tx_sched(sc, i, &kbps, &ipg);
5915 		sbuf_printf(sb, "\n    %u      %-5s     %u     ", i,
5916 		    (mode & (1 << i)) ? "flow" : "class", map & 3);
5917 		if (kbps)
5918 			sbuf_printf(sb, "%9u     ", kbps);
5919 		else
5920 			sbuf_printf(sb, " disabled     ");
5921 
5922 		if (ipg)
5923 			sbuf_printf(sb, "%13u        ", ipg);
5924 		else
5925 			sbuf_printf(sb, "     disabled        ");
5926 
5927 		if (pace_tab[i])
5928 			sbuf_printf(sb, "%10u", pace_tab[i]);
5929 		else
5930 			sbuf_printf(sb, "  disabled");
5931 	}
5932 
5933 	rc = sbuf_finish(sb);
5934 	sbuf_delete(sb);
5935 
5936 	return (rc);
5937 }
5938 
5939 static int
5940 sysctl_lb_stats(SYSCTL_HANDLER_ARGS)
5941 {
5942 	struct adapter *sc = arg1;
5943 	struct sbuf *sb;
5944 	int rc, i, j;
5945 	uint64_t *p0, *p1;
5946 	struct lb_port_stats s[2];
5947 	static const char *stat_name[] = {
5948 		"OctetsOK:", "FramesOK:", "BcastFrames:", "McastFrames:",
5949 		"UcastFrames:", "ErrorFrames:", "Frames64:", "Frames65To127:",
5950 		"Frames128To255:", "Frames256To511:", "Frames512To1023:",
5951 		"Frames1024To1518:", "Frames1519ToMax:", "FramesDropped:",
5952 		"BG0FramesDropped:", "BG1FramesDropped:", "BG2FramesDropped:",
5953 		"BG3FramesDropped:", "BG0FramesTrunc:", "BG1FramesTrunc:",
5954 		"BG2FramesTrunc:", "BG3FramesTrunc:"
5955 	};
5956 
5957 	rc = sysctl_wire_old_buffer(req, 0);
5958 	if (rc != 0)
5959 		return (rc);
5960 
5961 	sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req);
5962 	if (sb == NULL)
5963 		return (ENOMEM);
5964 
5965 	memset(s, 0, sizeof(s));
5966 
5967 	for (i = 0; i < 4; i += 2) {
5968 		t4_get_lb_stats(sc, i, &s[0]);
5969 		t4_get_lb_stats(sc, i + 1, &s[1]);
5970 
5971 		p0 = &s[0].octets;
5972 		p1 = &s[1].octets;
5973 		sbuf_printf(sb, "%s                       Loopback %u"
5974 		    "           Loopback %u", i == 0 ? "" : "\n", i, i + 1);
5975 
5976 		for (j = 0; j < nitems(stat_name); j++)
5977 			sbuf_printf(sb, "\n%-17s %20ju %20ju", stat_name[j],
5978 				   *p0++, *p1++);
5979 	}
5980 
5981 	rc = sbuf_finish(sb);
5982 	sbuf_delete(sb);
5983 
5984 	return (rc);
5985 }
5986 
5987 static int
5988 sysctl_linkdnrc(SYSCTL_HANDLER_ARGS)
5989 {
5990 	int rc = 0;
5991 	struct port_info *pi = arg1;
5992 	struct sbuf *sb;
5993 	static const char *linkdnreasons[] = {
5994 		"non-specific", "remote fault", "autoneg failed", "reserved3",
5995 		"PHY overheated", "unknown", "rx los", "reserved7"
5996 	};
5997 
5998 	rc = sysctl_wire_old_buffer(req, 0);
5999 	if (rc != 0)
6000 		return(rc);
6001 	sb = sbuf_new_for_sysctl(NULL, NULL, 64, req);
6002 	if (sb == NULL)
6003 		return (ENOMEM);
6004 
6005 	if (pi->linkdnrc < 0)
6006 		sbuf_printf(sb, "n/a");
6007 	else if (pi->linkdnrc < nitems(linkdnreasons))
6008 		sbuf_printf(sb, "%s", linkdnreasons[pi->linkdnrc]);
6009 	else
6010 		sbuf_printf(sb, "%d", pi->linkdnrc);
6011 
6012 	rc = sbuf_finish(sb);
6013 	sbuf_delete(sb);
6014 
6015 	return (rc);
6016 }
6017 
6018 struct mem_desc {
6019 	unsigned int base;
6020 	unsigned int limit;
6021 	unsigned int idx;
6022 };
6023 
6024 static int
6025 mem_desc_cmp(const void *a, const void *b)
6026 {
6027 	return ((const struct mem_desc *)a)->base -
6028 	       ((const struct mem_desc *)b)->base;
6029 }
6030 
6031 static void
6032 mem_region_show(struct sbuf *sb, const char *name, unsigned int from,
6033     unsigned int to)
6034 {
6035 	unsigned int size;
6036 
6037 	size = to - from + 1;
6038 	if (size == 0)
6039 		return;
6040 
6041 	/* XXX: need humanize_number(3) in libkern for a more readable 'size' */
6042 	sbuf_printf(sb, "%-15s %#x-%#x [%u]\n", name, from, to, size);
6043 }
6044 
6045 static int
6046 sysctl_meminfo(SYSCTL_HANDLER_ARGS)
6047 {
6048 	struct adapter *sc = arg1;
6049 	struct sbuf *sb;
6050 	int rc, i, n;
6051 	uint32_t lo, hi, used, alloc;
6052 	static const char *memory[] = {"EDC0:", "EDC1:", "MC:", "MC0:", "MC1:"};
6053 	static const char *region[] = {
6054 		"DBQ contexts:", "IMSG contexts:", "FLM cache:", "TCBs:",
6055 		"Pstructs:", "Timers:", "Rx FL:", "Tx FL:", "Pstruct FL:",
6056 		"Tx payload:", "Rx payload:", "LE hash:", "iSCSI region:",
6057 		"TDDP region:", "TPT region:", "STAG region:", "RQ region:",
6058 		"RQUDP region:", "PBL region:", "TXPBL region:",
6059 		"DBVFIFO region:", "ULPRX state:", "ULPTX state:",
6060 		"On-chip queues:"
6061 	};
6062 	struct mem_desc avail[4];
6063 	struct mem_desc mem[nitems(region) + 3];	/* up to 3 holes */
6064 	struct mem_desc *md = mem;
6065 
6066 	rc = sysctl_wire_old_buffer(req, 0);
6067 	if (rc != 0)
6068 		return (rc);
6069 
6070 	sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req);
6071 	if (sb == NULL)
6072 		return (ENOMEM);
6073 
6074 	for (i = 0; i < nitems(mem); i++) {
6075 		mem[i].limit = 0;
6076 		mem[i].idx = i;
6077 	}
6078 
6079 	/* Find and sort the populated memory ranges */
6080 	i = 0;
6081 	lo = t4_read_reg(sc, A_MA_TARGET_MEM_ENABLE);
6082 	if (lo & F_EDRAM0_ENABLE) {
6083 		hi = t4_read_reg(sc, A_MA_EDRAM0_BAR);
6084 		avail[i].base = G_EDRAM0_BASE(hi) << 20;
6085 		avail[i].limit = avail[i].base + (G_EDRAM0_SIZE(hi) << 20);
6086 		avail[i].idx = 0;
6087 		i++;
6088 	}
6089 	if (lo & F_EDRAM1_ENABLE) {
6090 		hi = t4_read_reg(sc, A_MA_EDRAM1_BAR);
6091 		avail[i].base = G_EDRAM1_BASE(hi) << 20;
6092 		avail[i].limit = avail[i].base + (G_EDRAM1_SIZE(hi) << 20);
6093 		avail[i].idx = 1;
6094 		i++;
6095 	}
6096 	if (lo & F_EXT_MEM_ENABLE) {
6097 		hi = t4_read_reg(sc, A_MA_EXT_MEMORY_BAR);
6098 		avail[i].base = G_EXT_MEM_BASE(hi) << 20;
6099 		avail[i].limit = avail[i].base +
6100 		    (G_EXT_MEM_SIZE(hi) << 20);
6101 		avail[i].idx = is_t4(sc) ? 2 : 3;	/* Call it MC for T4 */
6102 		i++;
6103 	}
6104 	if (!is_t4(sc) && lo & F_EXT_MEM1_ENABLE) {
6105 		hi = t4_read_reg(sc, A_MA_EXT_MEMORY1_BAR);
6106 		avail[i].base = G_EXT_MEM1_BASE(hi) << 20;
6107 		avail[i].limit = avail[i].base +
6108 		    (G_EXT_MEM1_SIZE(hi) << 20);
6109 		avail[i].idx = 4;
6110 		i++;
6111 	}
6112 	if (!i)                                    /* no memory available */
6113 		return 0;
6114 	qsort(avail, i, sizeof(struct mem_desc), mem_desc_cmp);
6115 
6116 	(md++)->base = t4_read_reg(sc, A_SGE_DBQ_CTXT_BADDR);
6117 	(md++)->base = t4_read_reg(sc, A_SGE_IMSG_CTXT_BADDR);
6118 	(md++)->base = t4_read_reg(sc, A_SGE_FLM_CACHE_BADDR);
6119 	(md++)->base = t4_read_reg(sc, A_TP_CMM_TCB_BASE);
6120 	(md++)->base = t4_read_reg(sc, A_TP_CMM_MM_BASE);
6121 	(md++)->base = t4_read_reg(sc, A_TP_CMM_TIMER_BASE);
6122 	(md++)->base = t4_read_reg(sc, A_TP_CMM_MM_RX_FLST_BASE);
6123 	(md++)->base = t4_read_reg(sc, A_TP_CMM_MM_TX_FLST_BASE);
6124 	(md++)->base = t4_read_reg(sc, A_TP_CMM_MM_PS_FLST_BASE);
6125 
6126 	/* the next few have explicit upper bounds */
6127 	md->base = t4_read_reg(sc, A_TP_PMM_TX_BASE);
6128 	md->limit = md->base - 1 +
6129 		    t4_read_reg(sc, A_TP_PMM_TX_PAGE_SIZE) *
6130 		    G_PMTXMAXPAGE(t4_read_reg(sc, A_TP_PMM_TX_MAX_PAGE));
6131 	md++;
6132 
6133 	md->base = t4_read_reg(sc, A_TP_PMM_RX_BASE);
6134 	md->limit = md->base - 1 +
6135 		    t4_read_reg(sc, A_TP_PMM_RX_PAGE_SIZE) *
6136 		    G_PMRXMAXPAGE(t4_read_reg(sc, A_TP_PMM_RX_MAX_PAGE));
6137 	md++;
6138 
6139 	if (t4_read_reg(sc, A_LE_DB_CONFIG) & F_HASHEN) {
6140 		hi = t4_read_reg(sc, A_LE_DB_TID_HASHBASE) / 4;
6141 		md->base = t4_read_reg(sc, A_LE_DB_HASH_TID_BASE);
6142 		md->limit = (sc->tids.ntids - hi) * 16 + md->base - 1;
6143 	} else {
6144 		md->base = 0;
6145 		md->idx = nitems(region);  /* hide it */
6146 	}
6147 	md++;
6148 
6149 #define ulp_region(reg) \
6150 	md->base = t4_read_reg(sc, A_ULP_ ## reg ## _LLIMIT);\
6151 	(md++)->limit = t4_read_reg(sc, A_ULP_ ## reg ## _ULIMIT)
6152 
6153 	ulp_region(RX_ISCSI);
6154 	ulp_region(RX_TDDP);
6155 	ulp_region(TX_TPT);
6156 	ulp_region(RX_STAG);
6157 	ulp_region(RX_RQ);
6158 	ulp_region(RX_RQUDP);
6159 	ulp_region(RX_PBL);
6160 	ulp_region(TX_PBL);
6161 #undef ulp_region
6162 
6163 	md->base = 0;
6164 	md->idx = nitems(region);
6165 	if (!is_t4(sc) && t4_read_reg(sc, A_SGE_CONTROL2) & F_VFIFO_ENABLE) {
6166 		md->base = G_BASEADDR(t4_read_reg(sc, A_SGE_DBVFIFO_BADDR));
6167 		md->limit = md->base + (G_DBVFIFO_SIZE((t4_read_reg(sc,
6168 		    A_SGE_DBVFIFO_SIZE))) << 2) - 1;
6169 	}
6170 	md++;
6171 
6172 	md->base = t4_read_reg(sc, A_ULP_RX_CTX_BASE);
6173 	md->limit = md->base + sc->tids.ntids - 1;
6174 	md++;
6175 	md->base = t4_read_reg(sc, A_ULP_TX_ERR_TABLE_BASE);
6176 	md->limit = md->base + sc->tids.ntids - 1;
6177 	md++;
6178 
6179 	md->base = sc->vres.ocq.start;
6180 	if (sc->vres.ocq.size)
6181 		md->limit = md->base + sc->vres.ocq.size - 1;
6182 	else
6183 		md->idx = nitems(region);  /* hide it */
6184 	md++;
6185 
6186 	/* add any address-space holes, there can be up to 3 */
6187 	for (n = 0; n < i - 1; n++)
6188 		if (avail[n].limit < avail[n + 1].base)
6189 			(md++)->base = avail[n].limit;
6190 	if (avail[n].limit)
6191 		(md++)->base = avail[n].limit;
6192 
6193 	n = md - mem;
6194 	qsort(mem, n, sizeof(struct mem_desc), mem_desc_cmp);
6195 
6196 	for (lo = 0; lo < i; lo++)
6197 		mem_region_show(sb, memory[avail[lo].idx], avail[lo].base,
6198 				avail[lo].limit - 1);
6199 
6200 	sbuf_printf(sb, "\n");
6201 	for (i = 0; i < n; i++) {
6202 		if (mem[i].idx >= nitems(region))
6203 			continue;                        /* skip holes */
6204 		if (!mem[i].limit)
6205 			mem[i].limit = i < n - 1 ? mem[i + 1].base - 1 : ~0;
6206 		mem_region_show(sb, region[mem[i].idx], mem[i].base,
6207 				mem[i].limit);
6208 	}
6209 
6210 	sbuf_printf(sb, "\n");
6211 	lo = t4_read_reg(sc, A_CIM_SDRAM_BASE_ADDR);
6212 	hi = t4_read_reg(sc, A_CIM_SDRAM_ADDR_SIZE) + lo - 1;
6213 	mem_region_show(sb, "uP RAM:", lo, hi);
6214 
6215 	lo = t4_read_reg(sc, A_CIM_EXTMEM2_BASE_ADDR);
6216 	hi = t4_read_reg(sc, A_CIM_EXTMEM2_ADDR_SIZE) + lo - 1;
6217 	mem_region_show(sb, "uP Extmem2:", lo, hi);
6218 
6219 	lo = t4_read_reg(sc, A_TP_PMM_RX_MAX_PAGE);
6220 	sbuf_printf(sb, "\n%u Rx pages of size %uKiB for %u channels\n",
6221 		   G_PMRXMAXPAGE(lo),
6222 		   t4_read_reg(sc, A_TP_PMM_RX_PAGE_SIZE) >> 10,
6223 		   (lo & F_PMRXNUMCHN) ? 2 : 1);
6224 
6225 	lo = t4_read_reg(sc, A_TP_PMM_TX_MAX_PAGE);
6226 	hi = t4_read_reg(sc, A_TP_PMM_TX_PAGE_SIZE);
6227 	sbuf_printf(sb, "%u Tx pages of size %u%ciB for %u channels\n",
6228 		   G_PMTXMAXPAGE(lo),
6229 		   hi >= (1 << 20) ? (hi >> 20) : (hi >> 10),
6230 		   hi >= (1 << 20) ? 'M' : 'K', 1 << G_PMTXNUMCHN(lo));
6231 	sbuf_printf(sb, "%u p-structs\n",
6232 		   t4_read_reg(sc, A_TP_CMM_MM_MAX_PSTRUCT));
6233 
6234 	for (i = 0; i < 4; i++) {
6235 		lo = t4_read_reg(sc, A_MPS_RX_PG_RSV0 + i * 4);
6236 		if (is_t4(sc)) {
6237 			used = G_USED(lo);
6238 			alloc = G_ALLOC(lo);
6239 		} else {
6240 			used = G_T5_USED(lo);
6241 			alloc = G_T5_ALLOC(lo);
6242 		}
6243 		sbuf_printf(sb, "\nPort %d using %u pages out of %u allocated",
6244 			   i, used, alloc);
6245 	}
6246 	for (i = 0; i < 4; i++) {
6247 		lo = t4_read_reg(sc, A_MPS_RX_PG_RSV4 + i * 4);
6248 		if (is_t4(sc)) {
6249 			used = G_USED(lo);
6250 			alloc = G_ALLOC(lo);
6251 		} else {
6252 			used = G_T5_USED(lo);
6253 			alloc = G_T5_ALLOC(lo);
6254 		}
6255 		sbuf_printf(sb,
6256 			   "\nLoopback %d using %u pages out of %u allocated",
6257 			   i, used, alloc);
6258 	}
6259 
6260 	rc = sbuf_finish(sb);
6261 	sbuf_delete(sb);
6262 
6263 	return (rc);
6264 }
6265 
6266 static inline void
6267 tcamxy2valmask(uint64_t x, uint64_t y, uint8_t *addr, uint64_t *mask)
6268 {
6269 	*mask = x | y;
6270 	y = htobe64(y);
6271 	memcpy(addr, (char *)&y + 2, ETHER_ADDR_LEN);
6272 }
6273 
6274 static int
6275 sysctl_mps_tcam(SYSCTL_HANDLER_ARGS)
6276 {
6277 	struct adapter *sc = arg1;
6278 	struct sbuf *sb;
6279 	int rc, i, n;
6280 
6281 	rc = sysctl_wire_old_buffer(req, 0);
6282 	if (rc != 0)
6283 		return (rc);
6284 
6285 	sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req);
6286 	if (sb == NULL)
6287 		return (ENOMEM);
6288 
6289 	sbuf_printf(sb,
6290 	    "Idx  Ethernet address     Mask     Vld Ports PF"
6291 	    "  VF              Replication             P0 P1 P2 P3  ML");
6292 	n = is_t4(sc) ? NUM_MPS_CLS_SRAM_L_INSTANCES :
6293 	    NUM_MPS_T5_CLS_SRAM_L_INSTANCES;
6294 	for (i = 0; i < n; i++) {
6295 		uint64_t tcamx, tcamy, mask;
6296 		uint32_t cls_lo, cls_hi;
6297 		uint8_t addr[ETHER_ADDR_LEN];
6298 
6299 		tcamy = t4_read_reg64(sc, MPS_CLS_TCAM_Y_L(i));
6300 		tcamx = t4_read_reg64(sc, MPS_CLS_TCAM_X_L(i));
6301 		cls_lo = t4_read_reg(sc, MPS_CLS_SRAM_L(i));
6302 		cls_hi = t4_read_reg(sc, MPS_CLS_SRAM_H(i));
6303 
6304 		if (tcamx & tcamy)
6305 			continue;
6306 
6307 		tcamxy2valmask(tcamx, tcamy, addr, &mask);
6308 		sbuf_printf(sb, "\n%3u %02x:%02x:%02x:%02x:%02x:%02x %012jx"
6309 			   "  %c   %#x%4u%4d", i, addr[0], addr[1], addr[2],
6310 			   addr[3], addr[4], addr[5], (uintmax_t)mask,
6311 			   (cls_lo & F_SRAM_VLD) ? 'Y' : 'N',
6312 			   G_PORTMAP(cls_hi), G_PF(cls_lo),
6313 			   (cls_lo & F_VF_VALID) ? G_VF(cls_lo) : -1);
6314 
6315 		if (cls_lo & F_REPLICATE) {
6316 			struct fw_ldst_cmd ldst_cmd;
6317 
6318 			memset(&ldst_cmd, 0, sizeof(ldst_cmd));
6319 			ldst_cmd.op_to_addrspace =
6320 			    htobe32(V_FW_CMD_OP(FW_LDST_CMD) |
6321 				F_FW_CMD_REQUEST | F_FW_CMD_READ |
6322 				V_FW_LDST_CMD_ADDRSPACE(FW_LDST_ADDRSPC_MPS));
6323 			ldst_cmd.cycles_to_len16 = htobe32(FW_LEN16(ldst_cmd));
6324 			ldst_cmd.u.mps.fid_ctl =
6325 			    htobe16(V_FW_LDST_CMD_FID(FW_LDST_MPS_RPLC) |
6326 				V_FW_LDST_CMD_CTL(i));
6327 
6328 			rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK,
6329 			    "t4mps");
6330 			if (rc)
6331 				break;
6332 			rc = -t4_wr_mbox(sc, sc->mbox, &ldst_cmd,
6333 			    sizeof(ldst_cmd), &ldst_cmd);
6334 			end_synchronized_op(sc, 0);
6335 
6336 			if (rc != 0) {
6337 				sbuf_printf(sb,
6338 				    " ------------ error %3u ------------", rc);
6339 				rc = 0;
6340 			} else {
6341 				sbuf_printf(sb, " %08x %08x %08x %08x",
6342 				    be32toh(ldst_cmd.u.mps.rplc127_96),
6343 				    be32toh(ldst_cmd.u.mps.rplc95_64),
6344 				    be32toh(ldst_cmd.u.mps.rplc63_32),
6345 				    be32toh(ldst_cmd.u.mps.rplc31_0));
6346 			}
6347 		} else
6348 			sbuf_printf(sb, "%36s", "");
6349 
6350 		sbuf_printf(sb, "%4u%3u%3u%3u %#3x", G_SRAM_PRIO0(cls_lo),
6351 		    G_SRAM_PRIO1(cls_lo), G_SRAM_PRIO2(cls_lo),
6352 		    G_SRAM_PRIO3(cls_lo), (cls_lo >> S_MULTILISTEN0) & 0xf);
6353 	}
6354 
6355 	if (rc)
6356 		(void) sbuf_finish(sb);
6357 	else
6358 		rc = sbuf_finish(sb);
6359 	sbuf_delete(sb);
6360 
6361 	return (rc);
6362 }
6363 
6364 static int
6365 sysctl_path_mtus(SYSCTL_HANDLER_ARGS)
6366 {
6367 	struct adapter *sc = arg1;
6368 	struct sbuf *sb;
6369 	int rc;
6370 	uint16_t mtus[NMTUS];
6371 
6372 	rc = sysctl_wire_old_buffer(req, 0);
6373 	if (rc != 0)
6374 		return (rc);
6375 
6376 	sb = sbuf_new_for_sysctl(NULL, NULL, 256, req);
6377 	if (sb == NULL)
6378 		return (ENOMEM);
6379 
6380 	t4_read_mtu_tbl(sc, mtus, NULL);
6381 
6382 	sbuf_printf(sb, "%u %u %u %u %u %u %u %u %u %u %u %u %u %u %u %u",
6383 	    mtus[0], mtus[1], mtus[2], mtus[3], mtus[4], mtus[5], mtus[6],
6384 	    mtus[7], mtus[8], mtus[9], mtus[10], mtus[11], mtus[12], mtus[13],
6385 	    mtus[14], mtus[15]);
6386 
6387 	rc = sbuf_finish(sb);
6388 	sbuf_delete(sb);
6389 
6390 	return (rc);
6391 }
6392 
6393 static int
6394 sysctl_pm_stats(SYSCTL_HANDLER_ARGS)
6395 {
6396 	struct adapter *sc = arg1;
6397 	struct sbuf *sb;
6398 	int rc, i;
6399 	uint32_t cnt[PM_NSTATS];
6400 	uint64_t cyc[PM_NSTATS];
6401 	static const char *rx_stats[] = {
6402 		"Read:", "Write bypass:", "Write mem:", "Flush:"
6403 	};
6404 	static const char *tx_stats[] = {
6405 		"Read:", "Write bypass:", "Write mem:", "Bypass + mem:"
6406 	};
6407 
6408 	rc = sysctl_wire_old_buffer(req, 0);
6409 	if (rc != 0)
6410 		return (rc);
6411 
6412 	sb = sbuf_new_for_sysctl(NULL, NULL, 256, req);
6413 	if (sb == NULL)
6414 		return (ENOMEM);
6415 
6416 	t4_pmtx_get_stats(sc, cnt, cyc);
6417 	sbuf_printf(sb, "                Tx pcmds             Tx bytes");
6418 	for (i = 0; i < ARRAY_SIZE(tx_stats); i++)
6419 		sbuf_printf(sb, "\n%-13s %10u %20ju", tx_stats[i], cnt[i],
6420 		    cyc[i]);
6421 
6422 	t4_pmrx_get_stats(sc, cnt, cyc);
6423 	sbuf_printf(sb, "\n                Rx pcmds             Rx bytes");
6424 	for (i = 0; i < ARRAY_SIZE(rx_stats); i++)
6425 		sbuf_printf(sb, "\n%-13s %10u %20ju", rx_stats[i], cnt[i],
6426 		    cyc[i]);
6427 
6428 	rc = sbuf_finish(sb);
6429 	sbuf_delete(sb);
6430 
6431 	return (rc);
6432 }
6433 
6434 static int
6435 sysctl_rdma_stats(SYSCTL_HANDLER_ARGS)
6436 {
6437 	struct adapter *sc = arg1;
6438 	struct sbuf *sb;
6439 	int rc;
6440 	struct tp_rdma_stats stats;
6441 
6442 	rc = sysctl_wire_old_buffer(req, 0);
6443 	if (rc != 0)
6444 		return (rc);
6445 
6446 	sb = sbuf_new_for_sysctl(NULL, NULL, 256, req);
6447 	if (sb == NULL)
6448 		return (ENOMEM);
6449 
6450 	t4_tp_get_rdma_stats(sc, &stats);
6451 	sbuf_printf(sb, "NoRQEModDefferals: %u\n", stats.rqe_dfr_mod);
6452 	sbuf_printf(sb, "NoRQEPktDefferals: %u", stats.rqe_dfr_pkt);
6453 
6454 	rc = sbuf_finish(sb);
6455 	sbuf_delete(sb);
6456 
6457 	return (rc);
6458 }
6459 
6460 static int
6461 sysctl_tcp_stats(SYSCTL_HANDLER_ARGS)
6462 {
6463 	struct adapter *sc = arg1;
6464 	struct sbuf *sb;
6465 	int rc;
6466 	struct tp_tcp_stats v4, v6;
6467 
6468 	rc = sysctl_wire_old_buffer(req, 0);
6469 	if (rc != 0)
6470 		return (rc);
6471 
6472 	sb = sbuf_new_for_sysctl(NULL, NULL, 256, req);
6473 	if (sb == NULL)
6474 		return (ENOMEM);
6475 
6476 	t4_tp_get_tcp_stats(sc, &v4, &v6);
6477 	sbuf_printf(sb,
6478 	    "                                IP                 IPv6\n");
6479 	sbuf_printf(sb, "OutRsts:      %20u %20u\n",
6480 	    v4.tcpOutRsts, v6.tcpOutRsts);
6481 	sbuf_printf(sb, "InSegs:       %20ju %20ju\n",
6482 	    v4.tcpInSegs, v6.tcpInSegs);
6483 	sbuf_printf(sb, "OutSegs:      %20ju %20ju\n",
6484 	    v4.tcpOutSegs, v6.tcpOutSegs);
6485 	sbuf_printf(sb, "RetransSegs:  %20ju %20ju",
6486 	    v4.tcpRetransSegs, v6.tcpRetransSegs);
6487 
6488 	rc = sbuf_finish(sb);
6489 	sbuf_delete(sb);
6490 
6491 	return (rc);
6492 }
6493 
6494 static int
6495 sysctl_tids(SYSCTL_HANDLER_ARGS)
6496 {
6497 	struct adapter *sc = arg1;
6498 	struct sbuf *sb;
6499 	int rc;
6500 	struct tid_info *t = &sc->tids;
6501 
6502 	rc = sysctl_wire_old_buffer(req, 0);
6503 	if (rc != 0)
6504 		return (rc);
6505 
6506 	sb = sbuf_new_for_sysctl(NULL, NULL, 256, req);
6507 	if (sb == NULL)
6508 		return (ENOMEM);
6509 
6510 	if (t->natids) {
6511 		sbuf_printf(sb, "ATID range: 0-%u, in use: %u\n", t->natids - 1,
6512 		    t->atids_in_use);
6513 	}
6514 
6515 	if (t->ntids) {
6516 		if (t4_read_reg(sc, A_LE_DB_CONFIG) & F_HASHEN) {
6517 			uint32_t b = t4_read_reg(sc, A_LE_DB_SERVER_INDEX) / 4;
6518 
6519 			if (b) {
6520 				sbuf_printf(sb, "TID range: 0-%u, %u-%u", b - 1,
6521 				    t4_read_reg(sc, A_LE_DB_TID_HASHBASE) / 4,
6522 				    t->ntids - 1);
6523 			} else {
6524 				sbuf_printf(sb, "TID range: %u-%u",
6525 				    t4_read_reg(sc, A_LE_DB_TID_HASHBASE) / 4,
6526 				    t->ntids - 1);
6527 			}
6528 		} else
6529 			sbuf_printf(sb, "TID range: 0-%u", t->ntids - 1);
6530 		sbuf_printf(sb, ", in use: %u\n",
6531 		    atomic_load_acq_int(&t->tids_in_use));
6532 	}
6533 
6534 	if (t->nstids) {
6535 		sbuf_printf(sb, "STID range: %u-%u, in use: %u\n", t->stid_base,
6536 		    t->stid_base + t->nstids - 1, t->stids_in_use);
6537 	}
6538 
6539 	if (t->nftids) {
6540 		sbuf_printf(sb, "FTID range: %u-%u\n", t->ftid_base,
6541 		    t->ftid_base + t->nftids - 1);
6542 	}
6543 
6544 	if (t->netids) {
6545 		sbuf_printf(sb, "ETID range: %u-%u\n", t->etid_base,
6546 		    t->etid_base + t->netids - 1);
6547 	}
6548 
6549 	sbuf_printf(sb, "HW TID usage: %u IP users, %u IPv6 users",
6550 	    t4_read_reg(sc, A_LE_DB_ACT_CNT_IPV4),
6551 	    t4_read_reg(sc, A_LE_DB_ACT_CNT_IPV6));
6552 
6553 	rc = sbuf_finish(sb);
6554 	sbuf_delete(sb);
6555 
6556 	return (rc);
6557 }
6558 
6559 static int
6560 sysctl_tp_err_stats(SYSCTL_HANDLER_ARGS)
6561 {
6562 	struct adapter *sc = arg1;
6563 	struct sbuf *sb;
6564 	int rc;
6565 	struct tp_err_stats stats;
6566 
6567 	rc = sysctl_wire_old_buffer(req, 0);
6568 	if (rc != 0)
6569 		return (rc);
6570 
6571 	sb = sbuf_new_for_sysctl(NULL, NULL, 256, req);
6572 	if (sb == NULL)
6573 		return (ENOMEM);
6574 
6575 	t4_tp_get_err_stats(sc, &stats);
6576 
6577 	sbuf_printf(sb, "                 channel 0  channel 1  channel 2  "
6578 		      "channel 3\n");
6579 	sbuf_printf(sb, "macInErrs:      %10u %10u %10u %10u\n",
6580 	    stats.macInErrs[0], stats.macInErrs[1], stats.macInErrs[2],
6581 	    stats.macInErrs[3]);
6582 	sbuf_printf(sb, "hdrInErrs:      %10u %10u %10u %10u\n",
6583 	    stats.hdrInErrs[0], stats.hdrInErrs[1], stats.hdrInErrs[2],
6584 	    stats.hdrInErrs[3]);
6585 	sbuf_printf(sb, "tcpInErrs:      %10u %10u %10u %10u\n",
6586 	    stats.tcpInErrs[0], stats.tcpInErrs[1], stats.tcpInErrs[2],
6587 	    stats.tcpInErrs[3]);
6588 	sbuf_printf(sb, "tcp6InErrs:     %10u %10u %10u %10u\n",
6589 	    stats.tcp6InErrs[0], stats.tcp6InErrs[1], stats.tcp6InErrs[2],
6590 	    stats.tcp6InErrs[3]);
6591 	sbuf_printf(sb, "tnlCongDrops:   %10u %10u %10u %10u\n",
6592 	    stats.tnlCongDrops[0], stats.tnlCongDrops[1], stats.tnlCongDrops[2],
6593 	    stats.tnlCongDrops[3]);
6594 	sbuf_printf(sb, "tnlTxDrops:     %10u %10u %10u %10u\n",
6595 	    stats.tnlTxDrops[0], stats.tnlTxDrops[1], stats.tnlTxDrops[2],
6596 	    stats.tnlTxDrops[3]);
6597 	sbuf_printf(sb, "ofldVlanDrops:  %10u %10u %10u %10u\n",
6598 	    stats.ofldVlanDrops[0], stats.ofldVlanDrops[1],
6599 	    stats.ofldVlanDrops[2], stats.ofldVlanDrops[3]);
6600 	sbuf_printf(sb, "ofldChanDrops:  %10u %10u %10u %10u\n\n",
6601 	    stats.ofldChanDrops[0], stats.ofldChanDrops[1],
6602 	    stats.ofldChanDrops[2], stats.ofldChanDrops[3]);
6603 	sbuf_printf(sb, "ofldNoNeigh:    %u\nofldCongDefer:  %u",
6604 	    stats.ofldNoNeigh, stats.ofldCongDefer);
6605 
6606 	rc = sbuf_finish(sb);
6607 	sbuf_delete(sb);
6608 
6609 	return (rc);
6610 }
6611 
6612 struct field_desc {
6613 	const char *name;
6614 	u_int start;
6615 	u_int width;
6616 };
6617 
6618 static void
6619 field_desc_show(struct sbuf *sb, uint64_t v, const struct field_desc *f)
6620 {
6621 	char buf[32];
6622 	int line_size = 0;
6623 
6624 	while (f->name) {
6625 		uint64_t mask = (1ULL << f->width) - 1;
6626 		int len = snprintf(buf, sizeof(buf), "%s: %ju", f->name,
6627 		    ((uintmax_t)v >> f->start) & mask);
6628 
6629 		if (line_size + len >= 79) {
6630 			line_size = 8;
6631 			sbuf_printf(sb, "\n        ");
6632 		}
6633 		sbuf_printf(sb, "%s ", buf);
6634 		line_size += len + 1;
6635 		f++;
6636 	}
6637 	sbuf_printf(sb, "\n");
6638 }
6639 
6640 static struct field_desc tp_la0[] = {
6641 	{ "RcfOpCodeOut", 60, 4 },
6642 	{ "State", 56, 4 },
6643 	{ "WcfState", 52, 4 },
6644 	{ "RcfOpcSrcOut", 50, 2 },
6645 	{ "CRxError", 49, 1 },
6646 	{ "ERxError", 48, 1 },
6647 	{ "SanityFailed", 47, 1 },
6648 	{ "SpuriousMsg", 46, 1 },
6649 	{ "FlushInputMsg", 45, 1 },
6650 	{ "FlushInputCpl", 44, 1 },
6651 	{ "RssUpBit", 43, 1 },
6652 	{ "RssFilterHit", 42, 1 },
6653 	{ "Tid", 32, 10 },
6654 	{ "InitTcb", 31, 1 },
6655 	{ "LineNumber", 24, 7 },
6656 	{ "Emsg", 23, 1 },
6657 	{ "EdataOut", 22, 1 },
6658 	{ "Cmsg", 21, 1 },
6659 	{ "CdataOut", 20, 1 },
6660 	{ "EreadPdu", 19, 1 },
6661 	{ "CreadPdu", 18, 1 },
6662 	{ "TunnelPkt", 17, 1 },
6663 	{ "RcfPeerFin", 16, 1 },
6664 	{ "RcfReasonOut", 12, 4 },
6665 	{ "TxCchannel", 10, 2 },
6666 	{ "RcfTxChannel", 8, 2 },
6667 	{ "RxEchannel", 6, 2 },
6668 	{ "RcfRxChannel", 5, 1 },
6669 	{ "RcfDataOutSrdy", 4, 1 },
6670 	{ "RxDvld", 3, 1 },
6671 	{ "RxOoDvld", 2, 1 },
6672 	{ "RxCongestion", 1, 1 },
6673 	{ "TxCongestion", 0, 1 },
6674 	{ NULL }
6675 };
6676 
6677 static struct field_desc tp_la1[] = {
6678 	{ "CplCmdIn", 56, 8 },
6679 	{ "CplCmdOut", 48, 8 },
6680 	{ "ESynOut", 47, 1 },
6681 	{ "EAckOut", 46, 1 },
6682 	{ "EFinOut", 45, 1 },
6683 	{ "ERstOut", 44, 1 },
6684 	{ "SynIn", 43, 1 },
6685 	{ "AckIn", 42, 1 },
6686 	{ "FinIn", 41, 1 },
6687 	{ "RstIn", 40, 1 },
6688 	{ "DataIn", 39, 1 },
6689 	{ "DataInVld", 38, 1 },
6690 	{ "PadIn", 37, 1 },
6691 	{ "RxBufEmpty", 36, 1 },
6692 	{ "RxDdp", 35, 1 },
6693 	{ "RxFbCongestion", 34, 1 },
6694 	{ "TxFbCongestion", 33, 1 },
6695 	{ "TxPktSumSrdy", 32, 1 },
6696 	{ "RcfUlpType", 28, 4 },
6697 	{ "Eread", 27, 1 },
6698 	{ "Ebypass", 26, 1 },
6699 	{ "Esave", 25, 1 },
6700 	{ "Static0", 24, 1 },
6701 	{ "Cread", 23, 1 },
6702 	{ "Cbypass", 22, 1 },
6703 	{ "Csave", 21, 1 },
6704 	{ "CPktOut", 20, 1 },
6705 	{ "RxPagePoolFull", 18, 2 },
6706 	{ "RxLpbkPkt", 17, 1 },
6707 	{ "TxLpbkPkt", 16, 1 },
6708 	{ "RxVfValid", 15, 1 },
6709 	{ "SynLearned", 14, 1 },
6710 	{ "SetDelEntry", 13, 1 },
6711 	{ "SetInvEntry", 12, 1 },
6712 	{ "CpcmdDvld", 11, 1 },
6713 	{ "CpcmdSave", 10, 1 },
6714 	{ "RxPstructsFull", 8, 2 },
6715 	{ "EpcmdDvld", 7, 1 },
6716 	{ "EpcmdFlush", 6, 1 },
6717 	{ "EpcmdTrimPrefix", 5, 1 },
6718 	{ "EpcmdTrimPostfix", 4, 1 },
6719 	{ "ERssIp4Pkt", 3, 1 },
6720 	{ "ERssIp6Pkt", 2, 1 },
6721 	{ "ERssTcpUdpPkt", 1, 1 },
6722 	{ "ERssFceFipPkt", 0, 1 },
6723 	{ NULL }
6724 };
6725 
6726 static struct field_desc tp_la2[] = {
6727 	{ "CplCmdIn", 56, 8 },
6728 	{ "MpsVfVld", 55, 1 },
6729 	{ "MpsPf", 52, 3 },
6730 	{ "MpsVf", 44, 8 },
6731 	{ "SynIn", 43, 1 },
6732 	{ "AckIn", 42, 1 },
6733 	{ "FinIn", 41, 1 },
6734 	{ "RstIn", 40, 1 },
6735 	{ "DataIn", 39, 1 },
6736 	{ "DataInVld", 38, 1 },
6737 	{ "PadIn", 37, 1 },
6738 	{ "RxBufEmpty", 36, 1 },
6739 	{ "RxDdp", 35, 1 },
6740 	{ "RxFbCongestion", 34, 1 },
6741 	{ "TxFbCongestion", 33, 1 },
6742 	{ "TxPktSumSrdy", 32, 1 },
6743 	{ "RcfUlpType", 28, 4 },
6744 	{ "Eread", 27, 1 },
6745 	{ "Ebypass", 26, 1 },
6746 	{ "Esave", 25, 1 },
6747 	{ "Static0", 24, 1 },
6748 	{ "Cread", 23, 1 },
6749 	{ "Cbypass", 22, 1 },
6750 	{ "Csave", 21, 1 },
6751 	{ "CPktOut", 20, 1 },
6752 	{ "RxPagePoolFull", 18, 2 },
6753 	{ "RxLpbkPkt", 17, 1 },
6754 	{ "TxLpbkPkt", 16, 1 },
6755 	{ "RxVfValid", 15, 1 },
6756 	{ "SynLearned", 14, 1 },
6757 	{ "SetDelEntry", 13, 1 },
6758 	{ "SetInvEntry", 12, 1 },
6759 	{ "CpcmdDvld", 11, 1 },
6760 	{ "CpcmdSave", 10, 1 },
6761 	{ "RxPstructsFull", 8, 2 },
6762 	{ "EpcmdDvld", 7, 1 },
6763 	{ "EpcmdFlush", 6, 1 },
6764 	{ "EpcmdTrimPrefix", 5, 1 },
6765 	{ "EpcmdTrimPostfix", 4, 1 },
6766 	{ "ERssIp4Pkt", 3, 1 },
6767 	{ "ERssIp6Pkt", 2, 1 },
6768 	{ "ERssTcpUdpPkt", 1, 1 },
6769 	{ "ERssFceFipPkt", 0, 1 },
6770 	{ NULL }
6771 };
6772 
6773 static void
6774 tp_la_show(struct sbuf *sb, uint64_t *p, int idx)
6775 {
6776 
6777 	field_desc_show(sb, *p, tp_la0);
6778 }
6779 
6780 static void
6781 tp_la_show2(struct sbuf *sb, uint64_t *p, int idx)
6782 {
6783 
6784 	if (idx)
6785 		sbuf_printf(sb, "\n");
6786 	field_desc_show(sb, p[0], tp_la0);
6787 	if (idx < (TPLA_SIZE / 2 - 1) || p[1] != ~0ULL)
6788 		field_desc_show(sb, p[1], tp_la0);
6789 }
6790 
6791 static void
6792 tp_la_show3(struct sbuf *sb, uint64_t *p, int idx)
6793 {
6794 
6795 	if (idx)
6796 		sbuf_printf(sb, "\n");
6797 	field_desc_show(sb, p[0], tp_la0);
6798 	if (idx < (TPLA_SIZE / 2 - 1) || p[1] != ~0ULL)
6799 		field_desc_show(sb, p[1], (p[0] & (1 << 17)) ? tp_la2 : tp_la1);
6800 }
6801 
6802 static int
6803 sysctl_tp_la(SYSCTL_HANDLER_ARGS)
6804 {
6805 	struct adapter *sc = arg1;
6806 	struct sbuf *sb;
6807 	uint64_t *buf, *p;
6808 	int rc;
6809 	u_int i, inc;
6810 	void (*show_func)(struct sbuf *, uint64_t *, int);
6811 
6812 	rc = sysctl_wire_old_buffer(req, 0);
6813 	if (rc != 0)
6814 		return (rc);
6815 
6816 	sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req);
6817 	if (sb == NULL)
6818 		return (ENOMEM);
6819 
6820 	buf = malloc(TPLA_SIZE * sizeof(uint64_t), M_CXGBE, M_ZERO | M_WAITOK);
6821 
6822 	t4_tp_read_la(sc, buf, NULL);
6823 	p = buf;
6824 
6825 	switch (G_DBGLAMODE(t4_read_reg(sc, A_TP_DBG_LA_CONFIG))) {
6826 	case 2:
6827 		inc = 2;
6828 		show_func = tp_la_show2;
6829 		break;
6830 	case 3:
6831 		inc = 2;
6832 		show_func = tp_la_show3;
6833 		break;
6834 	default:
6835 		inc = 1;
6836 		show_func = tp_la_show;
6837 	}
6838 
6839 	for (i = 0; i < TPLA_SIZE / inc; i++, p += inc)
6840 		(*show_func)(sb, p, i);
6841 
6842 	rc = sbuf_finish(sb);
6843 	sbuf_delete(sb);
6844 	free(buf, M_CXGBE);
6845 	return (rc);
6846 }
6847 
6848 static int
6849 sysctl_tx_rate(SYSCTL_HANDLER_ARGS)
6850 {
6851 	struct adapter *sc = arg1;
6852 	struct sbuf *sb;
6853 	int rc;
6854 	u64 nrate[NCHAN], orate[NCHAN];
6855 
6856 	rc = sysctl_wire_old_buffer(req, 0);
6857 	if (rc != 0)
6858 		return (rc);
6859 
6860 	sb = sbuf_new_for_sysctl(NULL, NULL, 256, req);
6861 	if (sb == NULL)
6862 		return (ENOMEM);
6863 
6864 	t4_get_chan_txrate(sc, nrate, orate);
6865 	sbuf_printf(sb, "              channel 0   channel 1   channel 2   "
6866 		 "channel 3\n");
6867 	sbuf_printf(sb, "NIC B/s:     %10ju  %10ju  %10ju  %10ju\n",
6868 	    nrate[0], nrate[1], nrate[2], nrate[3]);
6869 	sbuf_printf(sb, "Offload B/s: %10ju  %10ju  %10ju  %10ju",
6870 	    orate[0], orate[1], orate[2], orate[3]);
6871 
6872 	rc = sbuf_finish(sb);
6873 	sbuf_delete(sb);
6874 
6875 	return (rc);
6876 }
6877 
6878 static int
6879 sysctl_ulprx_la(SYSCTL_HANDLER_ARGS)
6880 {
6881 	struct adapter *sc = arg1;
6882 	struct sbuf *sb;
6883 	uint32_t *buf, *p;
6884 	int rc, i;
6885 
6886 	rc = sysctl_wire_old_buffer(req, 0);
6887 	if (rc != 0)
6888 		return (rc);
6889 
6890 	sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req);
6891 	if (sb == NULL)
6892 		return (ENOMEM);
6893 
6894 	buf = malloc(ULPRX_LA_SIZE * 8 * sizeof(uint32_t), M_CXGBE,
6895 	    M_ZERO | M_WAITOK);
6896 
6897 	t4_ulprx_read_la(sc, buf);
6898 	p = buf;
6899 
6900 	sbuf_printf(sb, "      Pcmd        Type   Message"
6901 	    "                Data");
6902 	for (i = 0; i < ULPRX_LA_SIZE; i++, p += 8) {
6903 		sbuf_printf(sb, "\n%08x%08x  %4x  %08x  %08x%08x%08x%08x",
6904 		    p[1], p[0], p[2], p[3], p[7], p[6], p[5], p[4]);
6905 	}
6906 
6907 	rc = sbuf_finish(sb);
6908 	sbuf_delete(sb);
6909 	free(buf, M_CXGBE);
6910 	return (rc);
6911 }
6912 
6913 static int
6914 sysctl_wcwr_stats(SYSCTL_HANDLER_ARGS)
6915 {
6916 	struct adapter *sc = arg1;
6917 	struct sbuf *sb;
6918 	int rc, v;
6919 
6920 	rc = sysctl_wire_old_buffer(req, 0);
6921 	if (rc != 0)
6922 		return (rc);
6923 
6924 	sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req);
6925 	if (sb == NULL)
6926 		return (ENOMEM);
6927 
6928 	v = t4_read_reg(sc, A_SGE_STAT_CFG);
6929 	if (G_STATSOURCE_T5(v) == 7) {
6930 		if (G_STATMODE(v) == 0) {
6931 			sbuf_printf(sb, "total %d, incomplete %d",
6932 			    t4_read_reg(sc, A_SGE_STAT_TOTAL),
6933 			    t4_read_reg(sc, A_SGE_STAT_MATCH));
6934 		} else if (G_STATMODE(v) == 1) {
6935 			sbuf_printf(sb, "total %d, data overflow %d",
6936 			    t4_read_reg(sc, A_SGE_STAT_TOTAL),
6937 			    t4_read_reg(sc, A_SGE_STAT_MATCH));
6938 		}
6939 	}
6940 	rc = sbuf_finish(sb);
6941 	sbuf_delete(sb);
6942 
6943 	return (rc);
6944 }
6945 #endif
6946 
6947 static inline void
6948 txq_start(struct ifnet *ifp, struct sge_txq *txq)
6949 {
6950 	struct buf_ring *br;
6951 	struct mbuf *m;
6952 
6953 	TXQ_LOCK_ASSERT_OWNED(txq);
6954 
6955 	br = txq->br;
6956 	m = txq->m ? txq->m : drbr_dequeue(ifp, br);
6957 	if (m)
6958 		t4_eth_tx(ifp, txq, m);
6959 }
6960 
6961 void
6962 t4_tx_callout(void *arg)
6963 {
6964 	struct sge_eq *eq = arg;
6965 	struct adapter *sc;
6966 
6967 	if (EQ_TRYLOCK(eq) == 0)
6968 		goto reschedule;
6969 
6970 	if (eq->flags & EQ_STALLED && !can_resume_tx(eq)) {
6971 		EQ_UNLOCK(eq);
6972 reschedule:
6973 		if (__predict_true(!(eq->flags && EQ_DOOMED)))
6974 			callout_schedule(&eq->tx_callout, 1);
6975 		return;
6976 	}
6977 
6978 	EQ_LOCK_ASSERT_OWNED(eq);
6979 
6980 	if (__predict_true((eq->flags & EQ_DOOMED) == 0)) {
6981 
6982 		if ((eq->flags & EQ_TYPEMASK) == EQ_ETH) {
6983 			struct sge_txq *txq = arg;
6984 			struct port_info *pi = txq->ifp->if_softc;
6985 
6986 			sc = pi->adapter;
6987 		} else {
6988 			struct sge_wrq *wrq = arg;
6989 
6990 			sc = wrq->adapter;
6991 		}
6992 
6993 		taskqueue_enqueue(sc->tq[eq->tx_chan], &eq->tx_task);
6994 	}
6995 
6996 	EQ_UNLOCK(eq);
6997 }
6998 
6999 void
7000 t4_tx_task(void *arg, int count)
7001 {
7002 	struct sge_eq *eq = arg;
7003 
7004 	EQ_LOCK(eq);
7005 	if ((eq->flags & EQ_TYPEMASK) == EQ_ETH) {
7006 		struct sge_txq *txq = arg;
7007 		txq_start(txq->ifp, txq);
7008 	} else {
7009 		struct sge_wrq *wrq = arg;
7010 		t4_wrq_tx_locked(wrq->adapter, wrq, NULL);
7011 	}
7012 	EQ_UNLOCK(eq);
7013 }
7014 
7015 static uint32_t
7016 fconf_to_mode(uint32_t fconf)
7017 {
7018 	uint32_t mode;
7019 
7020 	mode = T4_FILTER_IPv4 | T4_FILTER_IPv6 | T4_FILTER_IP_SADDR |
7021 	    T4_FILTER_IP_DADDR | T4_FILTER_IP_SPORT | T4_FILTER_IP_DPORT;
7022 
7023 	if (fconf & F_FRAGMENTATION)
7024 		mode |= T4_FILTER_IP_FRAGMENT;
7025 
7026 	if (fconf & F_MPSHITTYPE)
7027 		mode |= T4_FILTER_MPS_HIT_TYPE;
7028 
7029 	if (fconf & F_MACMATCH)
7030 		mode |= T4_FILTER_MAC_IDX;
7031 
7032 	if (fconf & F_ETHERTYPE)
7033 		mode |= T4_FILTER_ETH_TYPE;
7034 
7035 	if (fconf & F_PROTOCOL)
7036 		mode |= T4_FILTER_IP_PROTO;
7037 
7038 	if (fconf & F_TOS)
7039 		mode |= T4_FILTER_IP_TOS;
7040 
7041 	if (fconf & F_VLAN)
7042 		mode |= T4_FILTER_VLAN;
7043 
7044 	if (fconf & F_VNIC_ID)
7045 		mode |= T4_FILTER_VNIC;
7046 
7047 	if (fconf & F_PORT)
7048 		mode |= T4_FILTER_PORT;
7049 
7050 	if (fconf & F_FCOE)
7051 		mode |= T4_FILTER_FCoE;
7052 
7053 	return (mode);
7054 }
7055 
7056 static uint32_t
7057 mode_to_fconf(uint32_t mode)
7058 {
7059 	uint32_t fconf = 0;
7060 
7061 	if (mode & T4_FILTER_IP_FRAGMENT)
7062 		fconf |= F_FRAGMENTATION;
7063 
7064 	if (mode & T4_FILTER_MPS_HIT_TYPE)
7065 		fconf |= F_MPSHITTYPE;
7066 
7067 	if (mode & T4_FILTER_MAC_IDX)
7068 		fconf |= F_MACMATCH;
7069 
7070 	if (mode & T4_FILTER_ETH_TYPE)
7071 		fconf |= F_ETHERTYPE;
7072 
7073 	if (mode & T4_FILTER_IP_PROTO)
7074 		fconf |= F_PROTOCOL;
7075 
7076 	if (mode & T4_FILTER_IP_TOS)
7077 		fconf |= F_TOS;
7078 
7079 	if (mode & T4_FILTER_VLAN)
7080 		fconf |= F_VLAN;
7081 
7082 	if (mode & T4_FILTER_VNIC)
7083 		fconf |= F_VNIC_ID;
7084 
7085 	if (mode & T4_FILTER_PORT)
7086 		fconf |= F_PORT;
7087 
7088 	if (mode & T4_FILTER_FCoE)
7089 		fconf |= F_FCOE;
7090 
7091 	return (fconf);
7092 }
7093 
7094 static uint32_t
7095 fspec_to_fconf(struct t4_filter_specification *fs)
7096 {
7097 	uint32_t fconf = 0;
7098 
7099 	if (fs->val.frag || fs->mask.frag)
7100 		fconf |= F_FRAGMENTATION;
7101 
7102 	if (fs->val.matchtype || fs->mask.matchtype)
7103 		fconf |= F_MPSHITTYPE;
7104 
7105 	if (fs->val.macidx || fs->mask.macidx)
7106 		fconf |= F_MACMATCH;
7107 
7108 	if (fs->val.ethtype || fs->mask.ethtype)
7109 		fconf |= F_ETHERTYPE;
7110 
7111 	if (fs->val.proto || fs->mask.proto)
7112 		fconf |= F_PROTOCOL;
7113 
7114 	if (fs->val.tos || fs->mask.tos)
7115 		fconf |= F_TOS;
7116 
7117 	if (fs->val.vlan_vld || fs->mask.vlan_vld)
7118 		fconf |= F_VLAN;
7119 
7120 	if (fs->val.vnic_vld || fs->mask.vnic_vld)
7121 		fconf |= F_VNIC_ID;
7122 
7123 	if (fs->val.iport || fs->mask.iport)
7124 		fconf |= F_PORT;
7125 
7126 	if (fs->val.fcoe || fs->mask.fcoe)
7127 		fconf |= F_FCOE;
7128 
7129 	return (fconf);
7130 }
7131 
7132 static int
7133 get_filter_mode(struct adapter *sc, uint32_t *mode)
7134 {
7135 	int rc;
7136 	uint32_t fconf;
7137 
7138 	rc = begin_synchronized_op(sc, NULL, HOLD_LOCK | SLEEP_OK | INTR_OK,
7139 	    "t4getfm");
7140 	if (rc)
7141 		return (rc);
7142 
7143 	t4_read_indirect(sc, A_TP_PIO_ADDR, A_TP_PIO_DATA, &fconf, 1,
7144 	    A_TP_VLAN_PRI_MAP);
7145 
7146 	if (sc->params.tp.vlan_pri_map != fconf) {
7147 		log(LOG_WARNING, "%s: cached filter mode out of sync %x %x.\n",
7148 		    device_get_nameunit(sc->dev), sc->params.tp.vlan_pri_map,
7149 		    fconf);
7150 		sc->params.tp.vlan_pri_map = fconf;
7151 	}
7152 
7153 	*mode = fconf_to_mode(sc->params.tp.vlan_pri_map);
7154 
7155 	end_synchronized_op(sc, LOCK_HELD);
7156 	return (0);
7157 }
7158 
7159 static int
7160 set_filter_mode(struct adapter *sc, uint32_t mode)
7161 {
7162 	uint32_t fconf;
7163 	int rc;
7164 
7165 	fconf = mode_to_fconf(mode);
7166 
7167 	rc = begin_synchronized_op(sc, NULL, HOLD_LOCK | SLEEP_OK | INTR_OK,
7168 	    "t4setfm");
7169 	if (rc)
7170 		return (rc);
7171 
7172 	if (sc->tids.ftids_in_use > 0) {
7173 		rc = EBUSY;
7174 		goto done;
7175 	}
7176 
7177 #ifdef TCP_OFFLOAD
7178 	if (sc->offload_map) {
7179 		rc = EBUSY;
7180 		goto done;
7181 	}
7182 #endif
7183 
7184 #ifdef notyet
7185 	rc = -t4_set_filter_mode(sc, fconf);
7186 	if (rc == 0)
7187 		sc->filter_mode = fconf;
7188 #else
7189 	rc = ENOTSUP;
7190 #endif
7191 
7192 done:
7193 	end_synchronized_op(sc, LOCK_HELD);
7194 	return (rc);
7195 }
7196 
7197 static inline uint64_t
7198 get_filter_hits(struct adapter *sc, uint32_t fid)
7199 {
7200 	uint32_t mw_base, off, tcb_base = t4_read_reg(sc, A_TP_CMM_TCB_BASE);
7201 	uint64_t hits;
7202 
7203 	memwin_info(sc, 0, &mw_base, NULL);
7204 	off = position_memwin(sc, 0,
7205 	    tcb_base + (fid + sc->tids.ftid_base) * TCB_SIZE);
7206 	if (is_t4(sc)) {
7207 		hits = t4_read_reg64(sc, mw_base + off + 16);
7208 		hits = be64toh(hits);
7209 	} else {
7210 		hits = t4_read_reg(sc, mw_base + off + 24);
7211 		hits = be32toh(hits);
7212 	}
7213 
7214 	return (hits);
7215 }
7216 
7217 static int
7218 get_filter(struct adapter *sc, struct t4_filter *t)
7219 {
7220 	int i, rc, nfilters = sc->tids.nftids;
7221 	struct filter_entry *f;
7222 
7223 	rc = begin_synchronized_op(sc, NULL, HOLD_LOCK | SLEEP_OK | INTR_OK,
7224 	    "t4getf");
7225 	if (rc)
7226 		return (rc);
7227 
7228 	if (sc->tids.ftids_in_use == 0 || sc->tids.ftid_tab == NULL ||
7229 	    t->idx >= nfilters) {
7230 		t->idx = 0xffffffff;
7231 		goto done;
7232 	}
7233 
7234 	f = &sc->tids.ftid_tab[t->idx];
7235 	for (i = t->idx; i < nfilters; i++, f++) {
7236 		if (f->valid) {
7237 			t->idx = i;
7238 			t->l2tidx = f->l2t ? f->l2t->idx : 0;
7239 			t->smtidx = f->smtidx;
7240 			if (f->fs.hitcnts)
7241 				t->hits = get_filter_hits(sc, t->idx);
7242 			else
7243 				t->hits = UINT64_MAX;
7244 			t->fs = f->fs;
7245 
7246 			goto done;
7247 		}
7248 	}
7249 
7250 	t->idx = 0xffffffff;
7251 done:
7252 	end_synchronized_op(sc, LOCK_HELD);
7253 	return (0);
7254 }
7255 
7256 static int
7257 set_filter(struct adapter *sc, struct t4_filter *t)
7258 {
7259 	unsigned int nfilters, nports;
7260 	struct filter_entry *f;
7261 	int i, rc;
7262 
7263 	rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4setf");
7264 	if (rc)
7265 		return (rc);
7266 
7267 	nfilters = sc->tids.nftids;
7268 	nports = sc->params.nports;
7269 
7270 	if (nfilters == 0) {
7271 		rc = ENOTSUP;
7272 		goto done;
7273 	}
7274 
7275 	if (!(sc->flags & FULL_INIT_DONE)) {
7276 		rc = EAGAIN;
7277 		goto done;
7278 	}
7279 
7280 	if (t->idx >= nfilters) {
7281 		rc = EINVAL;
7282 		goto done;
7283 	}
7284 
7285 	/* Validate against the global filter mode */
7286 	if ((sc->params.tp.vlan_pri_map | fspec_to_fconf(&t->fs)) !=
7287 	    sc->params.tp.vlan_pri_map) {
7288 		rc = E2BIG;
7289 		goto done;
7290 	}
7291 
7292 	if (t->fs.action == FILTER_SWITCH && t->fs.eport >= nports) {
7293 		rc = EINVAL;
7294 		goto done;
7295 	}
7296 
7297 	if (t->fs.val.iport >= nports) {
7298 		rc = EINVAL;
7299 		goto done;
7300 	}
7301 
7302 	/* Can't specify an iq if not steering to it */
7303 	if (!t->fs.dirsteer && t->fs.iq) {
7304 		rc = EINVAL;
7305 		goto done;
7306 	}
7307 
7308 	/* IPv6 filter idx must be 4 aligned */
7309 	if (t->fs.type == 1 &&
7310 	    ((t->idx & 0x3) || t->idx + 4 >= nfilters)) {
7311 		rc = EINVAL;
7312 		goto done;
7313 	}
7314 
7315 	if (sc->tids.ftid_tab == NULL) {
7316 		KASSERT(sc->tids.ftids_in_use == 0,
7317 		    ("%s: no memory allocated but filters_in_use > 0",
7318 		    __func__));
7319 
7320 		sc->tids.ftid_tab = malloc(sizeof (struct filter_entry) *
7321 		    nfilters, M_CXGBE, M_NOWAIT | M_ZERO);
7322 		if (sc->tids.ftid_tab == NULL) {
7323 			rc = ENOMEM;
7324 			goto done;
7325 		}
7326 		mtx_init(&sc->tids.ftid_lock, "T4 filters", 0, MTX_DEF);
7327 	}
7328 
7329 	for (i = 0; i < 4; i++) {
7330 		f = &sc->tids.ftid_tab[t->idx + i];
7331 
7332 		if (f->pending || f->valid) {
7333 			rc = EBUSY;
7334 			goto done;
7335 		}
7336 		if (f->locked) {
7337 			rc = EPERM;
7338 			goto done;
7339 		}
7340 
7341 		if (t->fs.type == 0)
7342 			break;
7343 	}
7344 
7345 	f = &sc->tids.ftid_tab[t->idx];
7346 	f->fs = t->fs;
7347 
7348 	rc = set_filter_wr(sc, t->idx);
7349 done:
7350 	end_synchronized_op(sc, 0);
7351 
7352 	if (rc == 0) {
7353 		mtx_lock(&sc->tids.ftid_lock);
7354 		for (;;) {
7355 			if (f->pending == 0) {
7356 				rc = f->valid ? 0 : EIO;
7357 				break;
7358 			}
7359 
7360 			if (mtx_sleep(&sc->tids.ftid_tab, &sc->tids.ftid_lock,
7361 			    PCATCH, "t4setfw", 0)) {
7362 				rc = EINPROGRESS;
7363 				break;
7364 			}
7365 		}
7366 		mtx_unlock(&sc->tids.ftid_lock);
7367 	}
7368 	return (rc);
7369 }
7370 
7371 static int
7372 del_filter(struct adapter *sc, struct t4_filter *t)
7373 {
7374 	unsigned int nfilters;
7375 	struct filter_entry *f;
7376 	int rc;
7377 
7378 	rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4delf");
7379 	if (rc)
7380 		return (rc);
7381 
7382 	nfilters = sc->tids.nftids;
7383 
7384 	if (nfilters == 0) {
7385 		rc = ENOTSUP;
7386 		goto done;
7387 	}
7388 
7389 	if (sc->tids.ftid_tab == NULL || sc->tids.ftids_in_use == 0 ||
7390 	    t->idx >= nfilters) {
7391 		rc = EINVAL;
7392 		goto done;
7393 	}
7394 
7395 	if (!(sc->flags & FULL_INIT_DONE)) {
7396 		rc = EAGAIN;
7397 		goto done;
7398 	}
7399 
7400 	f = &sc->tids.ftid_tab[t->idx];
7401 
7402 	if (f->pending) {
7403 		rc = EBUSY;
7404 		goto done;
7405 	}
7406 	if (f->locked) {
7407 		rc = EPERM;
7408 		goto done;
7409 	}
7410 
7411 	if (f->valid) {
7412 		t->fs = f->fs;	/* extra info for the caller */
7413 		rc = del_filter_wr(sc, t->idx);
7414 	}
7415 
7416 done:
7417 	end_synchronized_op(sc, 0);
7418 
7419 	if (rc == 0) {
7420 		mtx_lock(&sc->tids.ftid_lock);
7421 		for (;;) {
7422 			if (f->pending == 0) {
7423 				rc = f->valid ? EIO : 0;
7424 				break;
7425 			}
7426 
7427 			if (mtx_sleep(&sc->tids.ftid_tab, &sc->tids.ftid_lock,
7428 			    PCATCH, "t4delfw", 0)) {
7429 				rc = EINPROGRESS;
7430 				break;
7431 			}
7432 		}
7433 		mtx_unlock(&sc->tids.ftid_lock);
7434 	}
7435 
7436 	return (rc);
7437 }
7438 
7439 static void
7440 clear_filter(struct filter_entry *f)
7441 {
7442 	if (f->l2t)
7443 		t4_l2t_release(f->l2t);
7444 
7445 	bzero(f, sizeof (*f));
7446 }
7447 
7448 static int
7449 set_filter_wr(struct adapter *sc, int fidx)
7450 {
7451 	struct filter_entry *f = &sc->tids.ftid_tab[fidx];
7452 	struct wrqe *wr;
7453 	struct fw_filter_wr *fwr;
7454 	unsigned int ftid;
7455 
7456 	ASSERT_SYNCHRONIZED_OP(sc);
7457 
7458 	if (f->fs.newdmac || f->fs.newvlan) {
7459 		/* This filter needs an L2T entry; allocate one. */
7460 		f->l2t = t4_l2t_alloc_switching(sc->l2t);
7461 		if (f->l2t == NULL)
7462 			return (EAGAIN);
7463 		if (t4_l2t_set_switching(sc, f->l2t, f->fs.vlan, f->fs.eport,
7464 		    f->fs.dmac)) {
7465 			t4_l2t_release(f->l2t);
7466 			f->l2t = NULL;
7467 			return (ENOMEM);
7468 		}
7469 	}
7470 
7471 	ftid = sc->tids.ftid_base + fidx;
7472 
7473 	wr = alloc_wrqe(sizeof(*fwr), &sc->sge.mgmtq);
7474 	if (wr == NULL)
7475 		return (ENOMEM);
7476 
7477 	fwr = wrtod(wr);
7478 	bzero(fwr, sizeof (*fwr));
7479 
7480 	fwr->op_pkd = htobe32(V_FW_WR_OP(FW_FILTER_WR));
7481 	fwr->len16_pkd = htobe32(FW_LEN16(*fwr));
7482 	fwr->tid_to_iq =
7483 	    htobe32(V_FW_FILTER_WR_TID(ftid) |
7484 		V_FW_FILTER_WR_RQTYPE(f->fs.type) |
7485 		V_FW_FILTER_WR_NOREPLY(0) |
7486 		V_FW_FILTER_WR_IQ(f->fs.iq));
7487 	fwr->del_filter_to_l2tix =
7488 	    htobe32(V_FW_FILTER_WR_RPTTID(f->fs.rpttid) |
7489 		V_FW_FILTER_WR_DROP(f->fs.action == FILTER_DROP) |
7490 		V_FW_FILTER_WR_DIRSTEER(f->fs.dirsteer) |
7491 		V_FW_FILTER_WR_MASKHASH(f->fs.maskhash) |
7492 		V_FW_FILTER_WR_DIRSTEERHASH(f->fs.dirsteerhash) |
7493 		V_FW_FILTER_WR_LPBK(f->fs.action == FILTER_SWITCH) |
7494 		V_FW_FILTER_WR_DMAC(f->fs.newdmac) |
7495 		V_FW_FILTER_WR_SMAC(f->fs.newsmac) |
7496 		V_FW_FILTER_WR_INSVLAN(f->fs.newvlan == VLAN_INSERT ||
7497 		    f->fs.newvlan == VLAN_REWRITE) |
7498 		V_FW_FILTER_WR_RMVLAN(f->fs.newvlan == VLAN_REMOVE ||
7499 		    f->fs.newvlan == VLAN_REWRITE) |
7500 		V_FW_FILTER_WR_HITCNTS(f->fs.hitcnts) |
7501 		V_FW_FILTER_WR_TXCHAN(f->fs.eport) |
7502 		V_FW_FILTER_WR_PRIO(f->fs.prio) |
7503 		V_FW_FILTER_WR_L2TIX(f->l2t ? f->l2t->idx : 0));
7504 	fwr->ethtype = htobe16(f->fs.val.ethtype);
7505 	fwr->ethtypem = htobe16(f->fs.mask.ethtype);
7506 	fwr->frag_to_ovlan_vldm =
7507 	    (V_FW_FILTER_WR_FRAG(f->fs.val.frag) |
7508 		V_FW_FILTER_WR_FRAGM(f->fs.mask.frag) |
7509 		V_FW_FILTER_WR_IVLAN_VLD(f->fs.val.vlan_vld) |
7510 		V_FW_FILTER_WR_OVLAN_VLD(f->fs.val.vnic_vld) |
7511 		V_FW_FILTER_WR_IVLAN_VLDM(f->fs.mask.vlan_vld) |
7512 		V_FW_FILTER_WR_OVLAN_VLDM(f->fs.mask.vnic_vld));
7513 	fwr->smac_sel = 0;
7514 	fwr->rx_chan_rx_rpl_iq = htobe16(V_FW_FILTER_WR_RX_CHAN(0) |
7515 	    V_FW_FILTER_WR_RX_RPL_IQ(sc->sge.fwq.abs_id));
7516 	fwr->maci_to_matchtypem =
7517 	    htobe32(V_FW_FILTER_WR_MACI(f->fs.val.macidx) |
7518 		V_FW_FILTER_WR_MACIM(f->fs.mask.macidx) |
7519 		V_FW_FILTER_WR_FCOE(f->fs.val.fcoe) |
7520 		V_FW_FILTER_WR_FCOEM(f->fs.mask.fcoe) |
7521 		V_FW_FILTER_WR_PORT(f->fs.val.iport) |
7522 		V_FW_FILTER_WR_PORTM(f->fs.mask.iport) |
7523 		V_FW_FILTER_WR_MATCHTYPE(f->fs.val.matchtype) |
7524 		V_FW_FILTER_WR_MATCHTYPEM(f->fs.mask.matchtype));
7525 	fwr->ptcl = f->fs.val.proto;
7526 	fwr->ptclm = f->fs.mask.proto;
7527 	fwr->ttyp = f->fs.val.tos;
7528 	fwr->ttypm = f->fs.mask.tos;
7529 	fwr->ivlan = htobe16(f->fs.val.vlan);
7530 	fwr->ivlanm = htobe16(f->fs.mask.vlan);
7531 	fwr->ovlan = htobe16(f->fs.val.vnic);
7532 	fwr->ovlanm = htobe16(f->fs.mask.vnic);
7533 	bcopy(f->fs.val.dip, fwr->lip, sizeof (fwr->lip));
7534 	bcopy(f->fs.mask.dip, fwr->lipm, sizeof (fwr->lipm));
7535 	bcopy(f->fs.val.sip, fwr->fip, sizeof (fwr->fip));
7536 	bcopy(f->fs.mask.sip, fwr->fipm, sizeof (fwr->fipm));
7537 	fwr->lp = htobe16(f->fs.val.dport);
7538 	fwr->lpm = htobe16(f->fs.mask.dport);
7539 	fwr->fp = htobe16(f->fs.val.sport);
7540 	fwr->fpm = htobe16(f->fs.mask.sport);
7541 	if (f->fs.newsmac)
7542 		bcopy(f->fs.smac, fwr->sma, sizeof (fwr->sma));
7543 
7544 	f->pending = 1;
7545 	sc->tids.ftids_in_use++;
7546 
7547 	t4_wrq_tx(sc, wr);
7548 	return (0);
7549 }
7550 
7551 static int
7552 del_filter_wr(struct adapter *sc, int fidx)
7553 {
7554 	struct filter_entry *f = &sc->tids.ftid_tab[fidx];
7555 	struct wrqe *wr;
7556 	struct fw_filter_wr *fwr;
7557 	unsigned int ftid;
7558 
7559 	ftid = sc->tids.ftid_base + fidx;
7560 
7561 	wr = alloc_wrqe(sizeof(*fwr), &sc->sge.mgmtq);
7562 	if (wr == NULL)
7563 		return (ENOMEM);
7564 	fwr = wrtod(wr);
7565 	bzero(fwr, sizeof (*fwr));
7566 
7567 	t4_mk_filtdelwr(ftid, fwr, sc->sge.fwq.abs_id);
7568 
7569 	f->pending = 1;
7570 	t4_wrq_tx(sc, wr);
7571 	return (0);
7572 }
7573 
7574 int
7575 t4_filter_rpl(struct sge_iq *iq, const struct rss_header *rss, struct mbuf *m)
7576 {
7577 	struct adapter *sc = iq->adapter;
7578 	const struct cpl_set_tcb_rpl *rpl = (const void *)(rss + 1);
7579 	unsigned int idx = GET_TID(rpl);
7580 	unsigned int rc;
7581 	struct filter_entry *f;
7582 
7583 	KASSERT(m == NULL, ("%s: payload with opcode %02x", __func__,
7584 	    rss->opcode));
7585 
7586 	if (is_ftid(sc, idx)) {
7587 
7588 		idx -= sc->tids.ftid_base;
7589 		f = &sc->tids.ftid_tab[idx];
7590 		rc = G_COOKIE(rpl->cookie);
7591 
7592 		mtx_lock(&sc->tids.ftid_lock);
7593 		if (rc == FW_FILTER_WR_FLT_ADDED) {
7594 			KASSERT(f->pending, ("%s: filter[%u] isn't pending.",
7595 			    __func__, idx));
7596 			f->smtidx = (be64toh(rpl->oldval) >> 24) & 0xff;
7597 			f->pending = 0;  /* asynchronous setup completed */
7598 			f->valid = 1;
7599 		} else {
7600 			if (rc != FW_FILTER_WR_FLT_DELETED) {
7601 				/* Add or delete failed, display an error */
7602 				log(LOG_ERR,
7603 				    "filter %u setup failed with error %u\n",
7604 				    idx, rc);
7605 			}
7606 
7607 			clear_filter(f);
7608 			sc->tids.ftids_in_use--;
7609 		}
7610 		wakeup(&sc->tids.ftid_tab);
7611 		mtx_unlock(&sc->tids.ftid_lock);
7612 	}
7613 
7614 	return (0);
7615 }
7616 
7617 static int
7618 get_sge_context(struct adapter *sc, struct t4_sge_context *cntxt)
7619 {
7620 	int rc;
7621 
7622 	if (cntxt->cid > M_CTXTQID)
7623 		return (EINVAL);
7624 
7625 	if (cntxt->mem_id != CTXT_EGRESS && cntxt->mem_id != CTXT_INGRESS &&
7626 	    cntxt->mem_id != CTXT_FLM && cntxt->mem_id != CTXT_CNM)
7627 		return (EINVAL);
7628 
7629 	rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4ctxt");
7630 	if (rc)
7631 		return (rc);
7632 
7633 	if (sc->flags & FW_OK) {
7634 		rc = -t4_sge_ctxt_rd(sc, sc->mbox, cntxt->cid, cntxt->mem_id,
7635 		    &cntxt->data[0]);
7636 		if (rc == 0)
7637 			goto done;
7638 	}
7639 
7640 	/*
7641 	 * Read via firmware failed or wasn't even attempted.  Read directly via
7642 	 * the backdoor.
7643 	 */
7644 	rc = -t4_sge_ctxt_rd_bd(sc, cntxt->cid, cntxt->mem_id, &cntxt->data[0]);
7645 done:
7646 	end_synchronized_op(sc, 0);
7647 	return (rc);
7648 }
7649 
7650 static int
7651 load_fw(struct adapter *sc, struct t4_data *fw)
7652 {
7653 	int rc;
7654 	uint8_t *fw_data;
7655 
7656 	rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4ldfw");
7657 	if (rc)
7658 		return (rc);
7659 
7660 	if (sc->flags & FULL_INIT_DONE) {
7661 		rc = EBUSY;
7662 		goto done;
7663 	}
7664 
7665 	fw_data = malloc(fw->len, M_CXGBE, M_WAITOK);
7666 	if (fw_data == NULL) {
7667 		rc = ENOMEM;
7668 		goto done;
7669 	}
7670 
7671 	rc = copyin(fw->data, fw_data, fw->len);
7672 	if (rc == 0)
7673 		rc = -t4_load_fw(sc, fw_data, fw->len);
7674 
7675 	free(fw_data, M_CXGBE);
7676 done:
7677 	end_synchronized_op(sc, 0);
7678 	return (rc);
7679 }
7680 
7681 static int
7682 read_card_mem(struct adapter *sc, int win, struct t4_mem_range *mr)
7683 {
7684 	uint32_t addr, off, remaining, i, n;
7685 	uint32_t *buf, *b;
7686 	uint32_t mw_base, mw_aperture;
7687 	int rc;
7688 	uint8_t *dst;
7689 
7690 	rc = validate_mem_range(sc, mr->addr, mr->len);
7691 	if (rc != 0)
7692 		return (rc);
7693 
7694 	memwin_info(sc, win, &mw_base, &mw_aperture);
7695 	buf = b = malloc(min(mr->len, mw_aperture), M_CXGBE, M_WAITOK);
7696 	addr = mr->addr;
7697 	remaining = mr->len;
7698 	dst = (void *)mr->data;
7699 
7700 	while (remaining) {
7701 		off = position_memwin(sc, win, addr);
7702 
7703 		/* number of bytes that we'll copy in the inner loop */
7704 		n = min(remaining, mw_aperture - off);
7705 		for (i = 0; i < n; i += 4)
7706 			*b++ = t4_read_reg(sc, mw_base + off + i);
7707 
7708 		rc = copyout(buf, dst, n);
7709 		if (rc != 0)
7710 			break;
7711 
7712 		b = buf;
7713 		dst += n;
7714 		remaining -= n;
7715 		addr += n;
7716 	}
7717 
7718 	free(buf, M_CXGBE);
7719 	return (rc);
7720 }
7721 
7722 static int
7723 read_i2c(struct adapter *sc, struct t4_i2c_data *i2cd)
7724 {
7725 	int rc;
7726 
7727 	if (i2cd->len == 0 || i2cd->port_id >= sc->params.nports)
7728 		return (EINVAL);
7729 
7730 	if (i2cd->len > sizeof(i2cd->data))
7731 		return (EFBIG);
7732 
7733 	rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4i2crd");
7734 	if (rc)
7735 		return (rc);
7736 	rc = -t4_i2c_rd(sc, sc->mbox, i2cd->port_id, i2cd->dev_addr,
7737 	    i2cd->offset, i2cd->len, &i2cd->data[0]);
7738 	end_synchronized_op(sc, 0);
7739 
7740 	return (rc);
7741 }
7742 
7743 static int
7744 in_range(int val, int lo, int hi)
7745 {
7746 
7747 	return (val < 0 || (val <= hi && val >= lo));
7748 }
7749 
7750 static int
7751 set_sched_class(struct adapter *sc, struct t4_sched_params *p)
7752 {
7753 	int fw_subcmd, fw_type, rc;
7754 
7755 	rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4setsc");
7756 	if (rc)
7757 		return (rc);
7758 
7759 	if (!(sc->flags & FULL_INIT_DONE)) {
7760 		rc = EAGAIN;
7761 		goto done;
7762 	}
7763 
7764 	/*
7765 	 * Translate the cxgbetool parameters into T4 firmware parameters.  (The
7766 	 * sub-command and type are in common locations.)
7767 	 */
7768 	if (p->subcmd == SCHED_CLASS_SUBCMD_CONFIG)
7769 		fw_subcmd = FW_SCHED_SC_CONFIG;
7770 	else if (p->subcmd == SCHED_CLASS_SUBCMD_PARAMS)
7771 		fw_subcmd = FW_SCHED_SC_PARAMS;
7772 	else {
7773 		rc = EINVAL;
7774 		goto done;
7775 	}
7776 	if (p->type == SCHED_CLASS_TYPE_PACKET)
7777 		fw_type = FW_SCHED_TYPE_PKTSCHED;
7778 	else {
7779 		rc = EINVAL;
7780 		goto done;
7781 	}
7782 
7783 	if (fw_subcmd == FW_SCHED_SC_CONFIG) {
7784 		/* Vet our parameters ..*/
7785 		if (p->u.config.minmax < 0) {
7786 			rc = EINVAL;
7787 			goto done;
7788 		}
7789 
7790 		/* And pass the request to the firmware ...*/
7791 		rc = -t4_sched_config(sc, fw_type, p->u.config.minmax, 1);
7792 		goto done;
7793 	}
7794 
7795 	if (fw_subcmd == FW_SCHED_SC_PARAMS) {
7796 		int fw_level;
7797 		int fw_mode;
7798 		int fw_rateunit;
7799 		int fw_ratemode;
7800 
7801 		if (p->u.params.level == SCHED_CLASS_LEVEL_CL_RL)
7802 			fw_level = FW_SCHED_PARAMS_LEVEL_CL_RL;
7803 		else if (p->u.params.level == SCHED_CLASS_LEVEL_CL_WRR)
7804 			fw_level = FW_SCHED_PARAMS_LEVEL_CL_WRR;
7805 		else if (p->u.params.level == SCHED_CLASS_LEVEL_CH_RL)
7806 			fw_level = FW_SCHED_PARAMS_LEVEL_CH_RL;
7807 		else {
7808 			rc = EINVAL;
7809 			goto done;
7810 		}
7811 
7812 		if (p->u.params.mode == SCHED_CLASS_MODE_CLASS)
7813 			fw_mode = FW_SCHED_PARAMS_MODE_CLASS;
7814 		else if (p->u.params.mode == SCHED_CLASS_MODE_FLOW)
7815 			fw_mode = FW_SCHED_PARAMS_MODE_FLOW;
7816 		else {
7817 			rc = EINVAL;
7818 			goto done;
7819 		}
7820 
7821 		if (p->u.params.rateunit == SCHED_CLASS_RATEUNIT_BITS)
7822 			fw_rateunit = FW_SCHED_PARAMS_UNIT_BITRATE;
7823 		else if (p->u.params.rateunit == SCHED_CLASS_RATEUNIT_PKTS)
7824 			fw_rateunit = FW_SCHED_PARAMS_UNIT_PKTRATE;
7825 		else {
7826 			rc = EINVAL;
7827 			goto done;
7828 		}
7829 
7830 		if (p->u.params.ratemode == SCHED_CLASS_RATEMODE_REL)
7831 			fw_ratemode = FW_SCHED_PARAMS_RATE_REL;
7832 		else if (p->u.params.ratemode == SCHED_CLASS_RATEMODE_ABS)
7833 			fw_ratemode = FW_SCHED_PARAMS_RATE_ABS;
7834 		else {
7835 			rc = EINVAL;
7836 			goto done;
7837 		}
7838 
7839 		/* Vet our parameters ... */
7840 		if (!in_range(p->u.params.channel, 0, 3) ||
7841 		    !in_range(p->u.params.cl, 0, is_t4(sc) ? 15 : 16) ||
7842 		    !in_range(p->u.params.minrate, 0, 10000000) ||
7843 		    !in_range(p->u.params.maxrate, 0, 10000000) ||
7844 		    !in_range(p->u.params.weight, 0, 100)) {
7845 			rc = ERANGE;
7846 			goto done;
7847 		}
7848 
7849 		/*
7850 		 * Translate any unset parameters into the firmware's
7851 		 * nomenclature and/or fail the call if the parameters
7852 		 * are required ...
7853 		 */
7854 		if (p->u.params.rateunit < 0 || p->u.params.ratemode < 0 ||
7855 		    p->u.params.channel < 0 || p->u.params.cl < 0) {
7856 			rc = EINVAL;
7857 			goto done;
7858 		}
7859 		if (p->u.params.minrate < 0)
7860 			p->u.params.minrate = 0;
7861 		if (p->u.params.maxrate < 0) {
7862 			if (p->u.params.level == SCHED_CLASS_LEVEL_CL_RL ||
7863 			    p->u.params.level == SCHED_CLASS_LEVEL_CH_RL) {
7864 				rc = EINVAL;
7865 				goto done;
7866 			} else
7867 				p->u.params.maxrate = 0;
7868 		}
7869 		if (p->u.params.weight < 0) {
7870 			if (p->u.params.level == SCHED_CLASS_LEVEL_CL_WRR) {
7871 				rc = EINVAL;
7872 				goto done;
7873 			} else
7874 				p->u.params.weight = 0;
7875 		}
7876 		if (p->u.params.pktsize < 0) {
7877 			if (p->u.params.level == SCHED_CLASS_LEVEL_CL_RL ||
7878 			    p->u.params.level == SCHED_CLASS_LEVEL_CH_RL) {
7879 				rc = EINVAL;
7880 				goto done;
7881 			} else
7882 				p->u.params.pktsize = 0;
7883 		}
7884 
7885 		/* See what the firmware thinks of the request ... */
7886 		rc = -t4_sched_params(sc, fw_type, fw_level, fw_mode,
7887 		    fw_rateunit, fw_ratemode, p->u.params.channel,
7888 		    p->u.params.cl, p->u.params.minrate, p->u.params.maxrate,
7889 		    p->u.params.weight, p->u.params.pktsize, 1);
7890 		goto done;
7891 	}
7892 
7893 	rc = EINVAL;
7894 done:
7895 	end_synchronized_op(sc, 0);
7896 	return (rc);
7897 }
7898 
7899 static int
7900 set_sched_queue(struct adapter *sc, struct t4_sched_queue *p)
7901 {
7902 	struct port_info *pi = NULL;
7903 	struct sge_txq *txq;
7904 	uint32_t fw_mnem, fw_queue, fw_class;
7905 	int i, rc;
7906 
7907 	rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4setsq");
7908 	if (rc)
7909 		return (rc);
7910 
7911 	if (!(sc->flags & FULL_INIT_DONE)) {
7912 		rc = EAGAIN;
7913 		goto done;
7914 	}
7915 
7916 	if (p->port >= sc->params.nports) {
7917 		rc = EINVAL;
7918 		goto done;
7919 	}
7920 
7921 	pi = sc->port[p->port];
7922 	if (!in_range(p->queue, 0, pi->ntxq - 1) || !in_range(p->cl, 0, 7)) {
7923 		rc = EINVAL;
7924 		goto done;
7925 	}
7926 
7927 	/*
7928 	 * Create a template for the FW_PARAMS_CMD mnemonic and value (TX
7929 	 * Scheduling Class in this case).
7930 	 */
7931 	fw_mnem = (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DMAQ) |
7932 	    V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DMAQ_EQ_SCHEDCLASS_ETH));
7933 	fw_class = p->cl < 0 ? 0xffffffff : p->cl;
7934 
7935 	/*
7936 	 * If op.queue is non-negative, then we're only changing the scheduling
7937 	 * on a single specified TX queue.
7938 	 */
7939 	if (p->queue >= 0) {
7940 		txq = &sc->sge.txq[pi->first_txq + p->queue];
7941 		fw_queue = (fw_mnem | V_FW_PARAMS_PARAM_YZ(txq->eq.cntxt_id));
7942 		rc = -t4_set_params(sc, sc->mbox, sc->pf, 0, 1, &fw_queue,
7943 		    &fw_class);
7944 		goto done;
7945 	}
7946 
7947 	/*
7948 	 * Change the scheduling on all the TX queues for the
7949 	 * interface.
7950 	 */
7951 	for_each_txq(pi, i, txq) {
7952 		fw_queue = (fw_mnem | V_FW_PARAMS_PARAM_YZ(txq->eq.cntxt_id));
7953 		rc = -t4_set_params(sc, sc->mbox, sc->pf, 0, 1, &fw_queue,
7954 		    &fw_class);
7955 		if (rc)
7956 			goto done;
7957 	}
7958 
7959 	rc = 0;
7960 done:
7961 	end_synchronized_op(sc, 0);
7962 	return (rc);
7963 }
7964 
7965 int
7966 t4_os_find_pci_capability(struct adapter *sc, int cap)
7967 {
7968 	int i;
7969 
7970 	return (pci_find_cap(sc->dev, cap, &i) == 0 ? i : 0);
7971 }
7972 
7973 int
7974 t4_os_pci_save_state(struct adapter *sc)
7975 {
7976 	device_t dev;
7977 	struct pci_devinfo *dinfo;
7978 
7979 	dev = sc->dev;
7980 	dinfo = device_get_ivars(dev);
7981 
7982 	pci_cfg_save(dev, dinfo, 0);
7983 	return (0);
7984 }
7985 
7986 int
7987 t4_os_pci_restore_state(struct adapter *sc)
7988 {
7989 	device_t dev;
7990 	struct pci_devinfo *dinfo;
7991 
7992 	dev = sc->dev;
7993 	dinfo = device_get_ivars(dev);
7994 
7995 	pci_cfg_restore(dev, dinfo);
7996 	return (0);
7997 }
7998 
7999 void
8000 t4_os_portmod_changed(const struct adapter *sc, int idx)
8001 {
8002 	struct port_info *pi = sc->port[idx];
8003 	static const char *mod_str[] = {
8004 		NULL, "LR", "SR", "ER", "TWINAX", "active TWINAX", "LRM"
8005 	};
8006 
8007 	if (pi->mod_type == FW_PORT_MOD_TYPE_NONE)
8008 		if_printf(pi->ifp, "transceiver unplugged.\n");
8009 	else if (pi->mod_type == FW_PORT_MOD_TYPE_UNKNOWN)
8010 		if_printf(pi->ifp, "unknown transceiver inserted.\n");
8011 	else if (pi->mod_type == FW_PORT_MOD_TYPE_NOTSUPPORTED)
8012 		if_printf(pi->ifp, "unsupported transceiver inserted.\n");
8013 	else if (pi->mod_type > 0 && pi->mod_type < nitems(mod_str)) {
8014 		if_printf(pi->ifp, "%s transceiver inserted.\n",
8015 		    mod_str[pi->mod_type]);
8016 	} else {
8017 		if_printf(pi->ifp, "transceiver (type %d) inserted.\n",
8018 		    pi->mod_type);
8019 	}
8020 }
8021 
8022 void
8023 t4_os_link_changed(struct adapter *sc, int idx, int link_stat, int reason)
8024 {
8025 	struct port_info *pi = sc->port[idx];
8026 	struct ifnet *ifp = pi->ifp;
8027 
8028 	if (link_stat) {
8029 		pi->linkdnrc = -1;
8030 		ifp->if_baudrate = IF_Mbps(pi->link_cfg.speed);
8031 		if_link_state_change(ifp, LINK_STATE_UP);
8032 	} else {
8033 		if (reason >= 0)
8034 			pi->linkdnrc = reason;
8035 		if_link_state_change(ifp, LINK_STATE_DOWN);
8036 	}
8037 }
8038 
8039 void
8040 t4_iterate(void (*func)(struct adapter *, void *), void *arg)
8041 {
8042 	struct adapter *sc;
8043 
8044 	sx_slock(&t4_list_lock);
8045 	SLIST_FOREACH(sc, &t4_list, link) {
8046 		/*
8047 		 * func should not make any assumptions about what state sc is
8048 		 * in - the only guarantee is that sc->sc_lock is a valid lock.
8049 		 */
8050 		func(sc, arg);
8051 	}
8052 	sx_sunlock(&t4_list_lock);
8053 }
8054 
8055 static int
8056 t4_open(struct cdev *dev, int flags, int type, struct thread *td)
8057 {
8058        return (0);
8059 }
8060 
8061 static int
8062 t4_close(struct cdev *dev, int flags, int type, struct thread *td)
8063 {
8064        return (0);
8065 }
8066 
8067 static int
8068 t4_ioctl(struct cdev *dev, unsigned long cmd, caddr_t data, int fflag,
8069     struct thread *td)
8070 {
8071 	int rc;
8072 	struct adapter *sc = dev->si_drv1;
8073 
8074 	rc = priv_check(td, PRIV_DRIVER);
8075 	if (rc != 0)
8076 		return (rc);
8077 
8078 	switch (cmd) {
8079 	case CHELSIO_T4_GETREG: {
8080 		struct t4_reg *edata = (struct t4_reg *)data;
8081 
8082 		if ((edata->addr & 0x3) != 0 || edata->addr >= sc->mmio_len)
8083 			return (EFAULT);
8084 
8085 		if (edata->size == 4)
8086 			edata->val = t4_read_reg(sc, edata->addr);
8087 		else if (edata->size == 8)
8088 			edata->val = t4_read_reg64(sc, edata->addr);
8089 		else
8090 			return (EINVAL);
8091 
8092 		break;
8093 	}
8094 	case CHELSIO_T4_SETREG: {
8095 		struct t4_reg *edata = (struct t4_reg *)data;
8096 
8097 		if ((edata->addr & 0x3) != 0 || edata->addr >= sc->mmio_len)
8098 			return (EFAULT);
8099 
8100 		if (edata->size == 4) {
8101 			if (edata->val & 0xffffffff00000000)
8102 				return (EINVAL);
8103 			t4_write_reg(sc, edata->addr, (uint32_t) edata->val);
8104 		} else if (edata->size == 8)
8105 			t4_write_reg64(sc, edata->addr, edata->val);
8106 		else
8107 			return (EINVAL);
8108 		break;
8109 	}
8110 	case CHELSIO_T4_REGDUMP: {
8111 		struct t4_regdump *regs = (struct t4_regdump *)data;
8112 		int reglen = is_t4(sc) ? T4_REGDUMP_SIZE : T5_REGDUMP_SIZE;
8113 		uint8_t *buf;
8114 
8115 		if (regs->len < reglen) {
8116 			regs->len = reglen; /* hint to the caller */
8117 			return (ENOBUFS);
8118 		}
8119 
8120 		regs->len = reglen;
8121 		buf = malloc(reglen, M_CXGBE, M_WAITOK | M_ZERO);
8122 		t4_get_regs(sc, regs, buf);
8123 		rc = copyout(buf, regs->data, reglen);
8124 		free(buf, M_CXGBE);
8125 		break;
8126 	}
8127 	case CHELSIO_T4_GET_FILTER_MODE:
8128 		rc = get_filter_mode(sc, (uint32_t *)data);
8129 		break;
8130 	case CHELSIO_T4_SET_FILTER_MODE:
8131 		rc = set_filter_mode(sc, *(uint32_t *)data);
8132 		break;
8133 	case CHELSIO_T4_GET_FILTER:
8134 		rc = get_filter(sc, (struct t4_filter *)data);
8135 		break;
8136 	case CHELSIO_T4_SET_FILTER:
8137 		rc = set_filter(sc, (struct t4_filter *)data);
8138 		break;
8139 	case CHELSIO_T4_DEL_FILTER:
8140 		rc = del_filter(sc, (struct t4_filter *)data);
8141 		break;
8142 	case CHELSIO_T4_GET_SGE_CONTEXT:
8143 		rc = get_sge_context(sc, (struct t4_sge_context *)data);
8144 		break;
8145 	case CHELSIO_T4_LOAD_FW:
8146 		rc = load_fw(sc, (struct t4_data *)data);
8147 		break;
8148 	case CHELSIO_T4_GET_MEM:
8149 		rc = read_card_mem(sc, 2, (struct t4_mem_range *)data);
8150 		break;
8151 	case CHELSIO_T4_GET_I2C:
8152 		rc = read_i2c(sc, (struct t4_i2c_data *)data);
8153 		break;
8154 	case CHELSIO_T4_CLEAR_STATS: {
8155 		int i;
8156 		u_int port_id = *(uint32_t *)data;
8157 		struct port_info *pi;
8158 
8159 		if (port_id >= sc->params.nports)
8160 			return (EINVAL);
8161 		pi = sc->port[port_id];
8162 
8163 		/* MAC stats */
8164 		t4_clr_port_stats(sc, pi->tx_chan);
8165 
8166 		if (pi->flags & PORT_INIT_DONE) {
8167 			struct sge_rxq *rxq;
8168 			struct sge_txq *txq;
8169 			struct sge_wrq *wrq;
8170 
8171 			for_each_rxq(pi, i, rxq) {
8172 #if defined(INET) || defined(INET6)
8173 				rxq->lro.lro_queued = 0;
8174 				rxq->lro.lro_flushed = 0;
8175 #endif
8176 				rxq->rxcsum = 0;
8177 				rxq->vlan_extraction = 0;
8178 			}
8179 
8180 			for_each_txq(pi, i, txq) {
8181 				txq->txcsum = 0;
8182 				txq->tso_wrs = 0;
8183 				txq->vlan_insertion = 0;
8184 				txq->imm_wrs = 0;
8185 				txq->sgl_wrs = 0;
8186 				txq->txpkt_wrs = 0;
8187 				txq->txpkts_wrs = 0;
8188 				txq->txpkts_pkts = 0;
8189 				txq->br->br_drops = 0;
8190 				txq->no_dmamap = 0;
8191 				txq->no_desc = 0;
8192 			}
8193 
8194 #ifdef TCP_OFFLOAD
8195 			/* nothing to clear for each ofld_rxq */
8196 
8197 			for_each_ofld_txq(pi, i, wrq) {
8198 				wrq->tx_wrs = 0;
8199 				wrq->no_desc = 0;
8200 			}
8201 #endif
8202 			wrq = &sc->sge.ctrlq[pi->port_id];
8203 			wrq->tx_wrs = 0;
8204 			wrq->no_desc = 0;
8205 		}
8206 		break;
8207 	}
8208 	case CHELSIO_T4_SCHED_CLASS:
8209 		rc = set_sched_class(sc, (struct t4_sched_params *)data);
8210 		break;
8211 	case CHELSIO_T4_SCHED_QUEUE:
8212 		rc = set_sched_queue(sc, (struct t4_sched_queue *)data);
8213 		break;
8214 	case CHELSIO_T4_GET_TRACER:
8215 		rc = t4_get_tracer(sc, (struct t4_tracer *)data);
8216 		break;
8217 	case CHELSIO_T4_SET_TRACER:
8218 		rc = t4_set_tracer(sc, (struct t4_tracer *)data);
8219 		break;
8220 	default:
8221 		rc = EINVAL;
8222 	}
8223 
8224 	return (rc);
8225 }
8226 
8227 #ifdef TCP_OFFLOAD
8228 void
8229 t4_iscsi_init(struct ifnet *ifp, unsigned int tag_mask,
8230     const unsigned int *pgsz_order)
8231 {
8232 	struct port_info *pi = ifp->if_softc;
8233 	struct adapter *sc = pi->adapter;
8234 
8235 	t4_write_reg(sc, A_ULP_RX_ISCSI_TAGMASK, tag_mask);
8236 	t4_write_reg(sc, A_ULP_RX_ISCSI_PSZ, V_HPZ0(pgsz_order[0]) |
8237 		V_HPZ1(pgsz_order[1]) | V_HPZ2(pgsz_order[2]) |
8238 		V_HPZ3(pgsz_order[3]));
8239 }
8240 
8241 static int
8242 toe_capability(struct port_info *pi, int enable)
8243 {
8244 	int rc;
8245 	struct adapter *sc = pi->adapter;
8246 
8247 	ASSERT_SYNCHRONIZED_OP(sc);
8248 
8249 	if (!is_offload(sc))
8250 		return (ENODEV);
8251 
8252 	if (enable) {
8253 		if (!(sc->flags & FULL_INIT_DONE)) {
8254 			rc = cxgbe_init_synchronized(pi);
8255 			if (rc)
8256 				return (rc);
8257 		}
8258 
8259 		if (isset(&sc->offload_map, pi->port_id))
8260 			return (0);
8261 
8262 		if (!(sc->flags & TOM_INIT_DONE)) {
8263 			rc = t4_activate_uld(sc, ULD_TOM);
8264 			if (rc == EAGAIN) {
8265 				log(LOG_WARNING,
8266 				    "You must kldload t4_tom.ko before trying "
8267 				    "to enable TOE on a cxgbe interface.\n");
8268 			}
8269 			if (rc != 0)
8270 				return (rc);
8271 			KASSERT(sc->tom_softc != NULL,
8272 			    ("%s: TOM activated but softc NULL", __func__));
8273 			KASSERT(sc->flags & TOM_INIT_DONE,
8274 			    ("%s: TOM activated but flag not set", __func__));
8275 		}
8276 
8277 		setbit(&sc->offload_map, pi->port_id);
8278 	} else {
8279 		if (!isset(&sc->offload_map, pi->port_id))
8280 			return (0);
8281 
8282 		KASSERT(sc->flags & TOM_INIT_DONE,
8283 		    ("%s: TOM never initialized?", __func__));
8284 		clrbit(&sc->offload_map, pi->port_id);
8285 	}
8286 
8287 	return (0);
8288 }
8289 
8290 /*
8291  * Add an upper layer driver to the global list.
8292  */
8293 int
8294 t4_register_uld(struct uld_info *ui)
8295 {
8296 	int rc = 0;
8297 	struct uld_info *u;
8298 
8299 	sx_xlock(&t4_uld_list_lock);
8300 	SLIST_FOREACH(u, &t4_uld_list, link) {
8301 	    if (u->uld_id == ui->uld_id) {
8302 		    rc = EEXIST;
8303 		    goto done;
8304 	    }
8305 	}
8306 
8307 	SLIST_INSERT_HEAD(&t4_uld_list, ui, link);
8308 	ui->refcount = 0;
8309 done:
8310 	sx_xunlock(&t4_uld_list_lock);
8311 	return (rc);
8312 }
8313 
8314 int
8315 t4_unregister_uld(struct uld_info *ui)
8316 {
8317 	int rc = EINVAL;
8318 	struct uld_info *u;
8319 
8320 	sx_xlock(&t4_uld_list_lock);
8321 
8322 	SLIST_FOREACH(u, &t4_uld_list, link) {
8323 	    if (u == ui) {
8324 		    if (ui->refcount > 0) {
8325 			    rc = EBUSY;
8326 			    goto done;
8327 		    }
8328 
8329 		    SLIST_REMOVE(&t4_uld_list, ui, uld_info, link);
8330 		    rc = 0;
8331 		    goto done;
8332 	    }
8333 	}
8334 done:
8335 	sx_xunlock(&t4_uld_list_lock);
8336 	return (rc);
8337 }
8338 
8339 int
8340 t4_activate_uld(struct adapter *sc, int id)
8341 {
8342 	int rc = EAGAIN;
8343 	struct uld_info *ui;
8344 
8345 	ASSERT_SYNCHRONIZED_OP(sc);
8346 
8347 	sx_slock(&t4_uld_list_lock);
8348 
8349 	SLIST_FOREACH(ui, &t4_uld_list, link) {
8350 		if (ui->uld_id == id) {
8351 			if (!(sc->flags & FULL_INIT_DONE)) {
8352 				rc = adapter_full_init(sc);
8353 				if (rc != 0)
8354 					goto done;
8355 			}
8356 
8357 			rc = ui->activate(sc);
8358 			if (rc == 0)
8359 				ui->refcount++;
8360 			goto done;
8361 		}
8362 	}
8363 done:
8364 	sx_sunlock(&t4_uld_list_lock);
8365 
8366 	return (rc);
8367 }
8368 
8369 int
8370 t4_deactivate_uld(struct adapter *sc, int id)
8371 {
8372 	int rc = EINVAL;
8373 	struct uld_info *ui;
8374 
8375 	ASSERT_SYNCHRONIZED_OP(sc);
8376 
8377 	sx_slock(&t4_uld_list_lock);
8378 
8379 	SLIST_FOREACH(ui, &t4_uld_list, link) {
8380 		if (ui->uld_id == id) {
8381 			rc = ui->deactivate(sc);
8382 			if (rc == 0)
8383 				ui->refcount--;
8384 			goto done;
8385 		}
8386 	}
8387 done:
8388 	sx_sunlock(&t4_uld_list_lock);
8389 
8390 	return (rc);
8391 }
8392 #endif
8393 
8394 /*
8395  * Come up with reasonable defaults for some of the tunables, provided they're
8396  * not set by the user (in which case we'll use the values as is).
8397  */
8398 static void
8399 tweak_tunables(void)
8400 {
8401 	int nc = mp_ncpus;	/* our snapshot of the number of CPUs */
8402 
8403 	if (t4_ntxq10g < 1)
8404 		t4_ntxq10g = min(nc, NTXQ_10G);
8405 
8406 	if (t4_ntxq1g < 1)
8407 		t4_ntxq1g = min(nc, NTXQ_1G);
8408 
8409 	if (t4_nrxq10g < 1)
8410 		t4_nrxq10g = min(nc, NRXQ_10G);
8411 
8412 	if (t4_nrxq1g < 1)
8413 		t4_nrxq1g = min(nc, NRXQ_1G);
8414 
8415 #ifdef TCP_OFFLOAD
8416 	if (t4_nofldtxq10g < 1)
8417 		t4_nofldtxq10g = min(nc, NOFLDTXQ_10G);
8418 
8419 	if (t4_nofldtxq1g < 1)
8420 		t4_nofldtxq1g = min(nc, NOFLDTXQ_1G);
8421 
8422 	if (t4_nofldrxq10g < 1)
8423 		t4_nofldrxq10g = min(nc, NOFLDRXQ_10G);
8424 
8425 	if (t4_nofldrxq1g < 1)
8426 		t4_nofldrxq1g = min(nc, NOFLDRXQ_1G);
8427 
8428 	if (t4_toecaps_allowed == -1)
8429 		t4_toecaps_allowed = FW_CAPS_CONFIG_TOE;
8430 #else
8431 	if (t4_toecaps_allowed == -1)
8432 		t4_toecaps_allowed = 0;
8433 #endif
8434 
8435 #ifdef DEV_NETMAP
8436 	if (t4_nnmtxq10g < 1)
8437 		t4_nnmtxq10g = min(nc, NNMTXQ_10G);
8438 
8439 	if (t4_nnmtxq1g < 1)
8440 		t4_nnmtxq1g = min(nc, NNMTXQ_1G);
8441 
8442 	if (t4_nnmrxq10g < 1)
8443 		t4_nnmrxq10g = min(nc, NNMRXQ_10G);
8444 
8445 	if (t4_nnmrxq1g < 1)
8446 		t4_nnmrxq1g = min(nc, NNMRXQ_1G);
8447 #endif
8448 
8449 	if (t4_tmr_idx_10g < 0 || t4_tmr_idx_10g >= SGE_NTIMERS)
8450 		t4_tmr_idx_10g = TMR_IDX_10G;
8451 
8452 	if (t4_pktc_idx_10g < -1 || t4_pktc_idx_10g >= SGE_NCOUNTERS)
8453 		t4_pktc_idx_10g = PKTC_IDX_10G;
8454 
8455 	if (t4_tmr_idx_1g < 0 || t4_tmr_idx_1g >= SGE_NTIMERS)
8456 		t4_tmr_idx_1g = TMR_IDX_1G;
8457 
8458 	if (t4_pktc_idx_1g < -1 || t4_pktc_idx_1g >= SGE_NCOUNTERS)
8459 		t4_pktc_idx_1g = PKTC_IDX_1G;
8460 
8461 	if (t4_qsize_txq < 128)
8462 		t4_qsize_txq = 128;
8463 
8464 	if (t4_qsize_rxq < 128)
8465 		t4_qsize_rxq = 128;
8466 	while (t4_qsize_rxq & 7)
8467 		t4_qsize_rxq++;
8468 
8469 	t4_intr_types &= INTR_MSIX | INTR_MSI | INTR_INTX;
8470 }
8471 
8472 static struct sx mlu;	/* mod load unload */
8473 SX_SYSINIT(cxgbe_mlu, &mlu, "cxgbe mod load/unload");
8474 
8475 static int
8476 mod_event(module_t mod, int cmd, void *arg)
8477 {
8478 	int rc = 0;
8479 	static int loaded = 0;
8480 
8481 	switch (cmd) {
8482 	case MOD_LOAD:
8483 		sx_xlock(&mlu);
8484 		if (loaded++ == 0) {
8485 			t4_sge_modload();
8486 			sx_init(&t4_list_lock, "T4/T5 adapters");
8487 			SLIST_INIT(&t4_list);
8488 #ifdef TCP_OFFLOAD
8489 			sx_init(&t4_uld_list_lock, "T4/T5 ULDs");
8490 			SLIST_INIT(&t4_uld_list);
8491 #endif
8492 			t4_tracer_modload();
8493 			tweak_tunables();
8494 		}
8495 		sx_xunlock(&mlu);
8496 		break;
8497 
8498 	case MOD_UNLOAD:
8499 		sx_xlock(&mlu);
8500 		if (--loaded == 0) {
8501 			int tries;
8502 
8503 			sx_slock(&t4_list_lock);
8504 			if (!SLIST_EMPTY(&t4_list)) {
8505 				rc = EBUSY;
8506 				sx_sunlock(&t4_list_lock);
8507 				goto done_unload;
8508 			}
8509 #ifdef TCP_OFFLOAD
8510 			sx_slock(&t4_uld_list_lock);
8511 			if (!SLIST_EMPTY(&t4_uld_list)) {
8512 				rc = EBUSY;
8513 				sx_sunlock(&t4_uld_list_lock);
8514 				sx_sunlock(&t4_list_lock);
8515 				goto done_unload;
8516 			}
8517 #endif
8518 			tries = 0;
8519 			while (tries++ < 5 && t4_sge_extfree_refs() != 0) {
8520 				uprintf("%ju clusters with custom free routine "
8521 				    "still is use.\n", t4_sge_extfree_refs());
8522 				pause("t4unload", 2 * hz);
8523 			}
8524 #ifdef TCP_OFFLOAD
8525 			sx_sunlock(&t4_uld_list_lock);
8526 #endif
8527 			sx_sunlock(&t4_list_lock);
8528 
8529 			if (t4_sge_extfree_refs() == 0) {
8530 				t4_tracer_modunload();
8531 #ifdef TCP_OFFLOAD
8532 				sx_destroy(&t4_uld_list_lock);
8533 #endif
8534 				sx_destroy(&t4_list_lock);
8535 				t4_sge_modunload();
8536 				loaded = 0;
8537 			} else {
8538 				rc = EBUSY;
8539 				loaded++;	/* undo earlier decrement */
8540 			}
8541 		}
8542 done_unload:
8543 		sx_xunlock(&mlu);
8544 		break;
8545 	}
8546 
8547 	return (rc);
8548 }
8549 
8550 static devclass_t t4_devclass, t5_devclass;
8551 static devclass_t cxgbe_devclass, cxl_devclass;
8552 
8553 DRIVER_MODULE(t4nex, pci, t4_driver, t4_devclass, mod_event, 0);
8554 MODULE_VERSION(t4nex, 1);
8555 MODULE_DEPEND(t4nex, firmware, 1, 1, 1);
8556 
8557 DRIVER_MODULE(t5nex, pci, t5_driver, t5_devclass, mod_event, 0);
8558 MODULE_VERSION(t5nex, 1);
8559 MODULE_DEPEND(t5nex, firmware, 1, 1, 1);
8560 
8561 DRIVER_MODULE(cxgbe, t4nex, cxgbe_driver, cxgbe_devclass, 0, 0);
8562 MODULE_VERSION(cxgbe, 1);
8563 
8564 DRIVER_MODULE(cxl, t5nex, cxl_driver, cxl_devclass, 0, 0);
8565 MODULE_VERSION(cxl, 1);
8566