xref: /freebsd/sys/dev/cxgbe/t4_main.c (revision 895f86f15fbf6540071feb9328c3c50ed1f027b8)
1 /*-
2  * Copyright (c) 2011 Chelsio Communications, Inc.
3  * All rights reserved.
4  * Written by: Navdeep Parhar <np@FreeBSD.org>
5  *
6  * Redistribution and use in source and binary forms, with or without
7  * modification, are permitted provided that the following conditions
8  * are met:
9  * 1. Redistributions of source code must retain the above copyright
10  *    notice, this list of conditions and the following disclaimer.
11  * 2. Redistributions in binary form must reproduce the above copyright
12  *    notice, this list of conditions and the following disclaimer in the
13  *    documentation and/or other materials provided with the distribution.
14  *
15  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
16  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
25  * SUCH DAMAGE.
26  */
27 
28 #include <sys/cdefs.h>
29 __FBSDID("$FreeBSD$");
30 
31 #include "opt_inet.h"
32 #include "opt_inet6.h"
33 #include "opt_rss.h"
34 
35 #include <sys/param.h>
36 #include <sys/conf.h>
37 #include <sys/priv.h>
38 #include <sys/kernel.h>
39 #include <sys/bus.h>
40 #include <sys/module.h>
41 #include <sys/malloc.h>
42 #include <sys/queue.h>
43 #include <sys/taskqueue.h>
44 #include <sys/pciio.h>
45 #include <dev/pci/pcireg.h>
46 #include <dev/pci/pcivar.h>
47 #include <dev/pci/pci_private.h>
48 #include <sys/firmware.h>
49 #include <sys/sbuf.h>
50 #include <sys/smp.h>
51 #include <sys/socket.h>
52 #include <sys/sockio.h>
53 #include <sys/sysctl.h>
54 #include <net/ethernet.h>
55 #include <net/if.h>
56 #include <net/if_types.h>
57 #include <net/if_dl.h>
58 #include <net/if_vlan_var.h>
59 #ifdef RSS
60 #include <net/rss_config.h>
61 #endif
62 #if defined(__i386__) || defined(__amd64__)
63 #include <vm/vm.h>
64 #include <vm/pmap.h>
65 #endif
66 
67 #include "common/common.h"
68 #include "common/t4_msg.h"
69 #include "common/t4_regs.h"
70 #include "common/t4_regs_values.h"
71 #include "t4_ioctl.h"
72 #include "t4_l2t.h"
73 #include "t4_mp_ring.h"
74 
75 /* T4 bus driver interface */
76 static int t4_probe(device_t);
77 static int t4_attach(device_t);
78 static int t4_detach(device_t);
79 static device_method_t t4_methods[] = {
80 	DEVMETHOD(device_probe,		t4_probe),
81 	DEVMETHOD(device_attach,	t4_attach),
82 	DEVMETHOD(device_detach,	t4_detach),
83 
84 	DEVMETHOD_END
85 };
86 static driver_t t4_driver = {
87 	"t4nex",
88 	t4_methods,
89 	sizeof(struct adapter)
90 };
91 
92 
93 /* T4 port (cxgbe) interface */
94 static int cxgbe_probe(device_t);
95 static int cxgbe_attach(device_t);
96 static int cxgbe_detach(device_t);
97 static device_method_t cxgbe_methods[] = {
98 	DEVMETHOD(device_probe,		cxgbe_probe),
99 	DEVMETHOD(device_attach,	cxgbe_attach),
100 	DEVMETHOD(device_detach,	cxgbe_detach),
101 	{ 0, 0 }
102 };
103 static driver_t cxgbe_driver = {
104 	"cxgbe",
105 	cxgbe_methods,
106 	sizeof(struct port_info)
107 };
108 
109 /* T4 VI (vcxgbe) interface */
110 static int vcxgbe_probe(device_t);
111 static int vcxgbe_attach(device_t);
112 static int vcxgbe_detach(device_t);
113 static device_method_t vcxgbe_methods[] = {
114 	DEVMETHOD(device_probe,		vcxgbe_probe),
115 	DEVMETHOD(device_attach,	vcxgbe_attach),
116 	DEVMETHOD(device_detach,	vcxgbe_detach),
117 	{ 0, 0 }
118 };
119 static driver_t vcxgbe_driver = {
120 	"vcxgbe",
121 	vcxgbe_methods,
122 	sizeof(struct vi_info)
123 };
124 
125 static d_ioctl_t t4_ioctl;
126 static d_open_t t4_open;
127 static d_close_t t4_close;
128 
129 static struct cdevsw t4_cdevsw = {
130        .d_version = D_VERSION,
131        .d_flags = 0,
132        .d_open = t4_open,
133        .d_close = t4_close,
134        .d_ioctl = t4_ioctl,
135        .d_name = "t4nex",
136 };
137 
138 /* T5 bus driver interface */
139 static int t5_probe(device_t);
140 static device_method_t t5_methods[] = {
141 	DEVMETHOD(device_probe,		t5_probe),
142 	DEVMETHOD(device_attach,	t4_attach),
143 	DEVMETHOD(device_detach,	t4_detach),
144 
145 	DEVMETHOD_END
146 };
147 static driver_t t5_driver = {
148 	"t5nex",
149 	t5_methods,
150 	sizeof(struct adapter)
151 };
152 
153 
154 /* T5 port (cxl) interface */
155 static driver_t cxl_driver = {
156 	"cxl",
157 	cxgbe_methods,
158 	sizeof(struct port_info)
159 };
160 
161 /* T5 VI (vcxl) interface */
162 static driver_t vcxl_driver = {
163 	"vcxl",
164 	vcxgbe_methods,
165 	sizeof(struct vi_info)
166 };
167 
168 static struct cdevsw t5_cdevsw = {
169        .d_version = D_VERSION,
170        .d_flags = 0,
171        .d_open = t4_open,
172        .d_close = t4_close,
173        .d_ioctl = t4_ioctl,
174        .d_name = "t5nex",
175 };
176 
177 /* ifnet + media interface */
178 static void cxgbe_init(void *);
179 static int cxgbe_ioctl(struct ifnet *, unsigned long, caddr_t);
180 static int cxgbe_transmit(struct ifnet *, struct mbuf *);
181 static void cxgbe_qflush(struct ifnet *);
182 static int cxgbe_media_change(struct ifnet *);
183 static void cxgbe_media_status(struct ifnet *, struct ifmediareq *);
184 
185 MALLOC_DEFINE(M_CXGBE, "cxgbe", "Chelsio T4/T5 Ethernet driver and services");
186 
187 /*
188  * Correct lock order when you need to acquire multiple locks is t4_list_lock,
189  * then ADAPTER_LOCK, then t4_uld_list_lock.
190  */
191 static struct sx t4_list_lock;
192 SLIST_HEAD(, adapter) t4_list;
193 #ifdef TCP_OFFLOAD
194 static struct sx t4_uld_list_lock;
195 SLIST_HEAD(, uld_info) t4_uld_list;
196 #endif
197 
198 /*
199  * Tunables.  See tweak_tunables() too.
200  *
201  * Each tunable is set to a default value here if it's known at compile-time.
202  * Otherwise it is set to -1 as an indication to tweak_tunables() that it should
203  * provide a reasonable default when the driver is loaded.
204  *
205  * Tunables applicable to both T4 and T5 are under hw.cxgbe.  Those specific to
206  * T5 are under hw.cxl.
207  */
208 
209 /*
210  * Number of queues for tx and rx, 10G and 1G, NIC and offload.
211  */
212 #define NTXQ_10G 16
213 static int t4_ntxq10g = -1;
214 TUNABLE_INT("hw.cxgbe.ntxq10g", &t4_ntxq10g);
215 
216 #define NRXQ_10G 8
217 static int t4_nrxq10g = -1;
218 TUNABLE_INT("hw.cxgbe.nrxq10g", &t4_nrxq10g);
219 
220 #define NTXQ_1G 4
221 static int t4_ntxq1g = -1;
222 TUNABLE_INT("hw.cxgbe.ntxq1g", &t4_ntxq1g);
223 
224 #define NRXQ_1G 2
225 static int t4_nrxq1g = -1;
226 TUNABLE_INT("hw.cxgbe.nrxq1g", &t4_nrxq1g);
227 
228 static int t4_rsrv_noflowq = 0;
229 TUNABLE_INT("hw.cxgbe.rsrv_noflowq", &t4_rsrv_noflowq);
230 
231 #ifdef TCP_OFFLOAD
232 #define NOFLDTXQ_10G 8
233 static int t4_nofldtxq10g = -1;
234 TUNABLE_INT("hw.cxgbe.nofldtxq10g", &t4_nofldtxq10g);
235 
236 #define NOFLDRXQ_10G 2
237 static int t4_nofldrxq10g = -1;
238 TUNABLE_INT("hw.cxgbe.nofldrxq10g", &t4_nofldrxq10g);
239 
240 #define NOFLDTXQ_1G 2
241 static int t4_nofldtxq1g = -1;
242 TUNABLE_INT("hw.cxgbe.nofldtxq1g", &t4_nofldtxq1g);
243 
244 #define NOFLDRXQ_1G 1
245 static int t4_nofldrxq1g = -1;
246 TUNABLE_INT("hw.cxgbe.nofldrxq1g", &t4_nofldrxq1g);
247 #endif
248 
249 #ifdef DEV_NETMAP
250 #define NNMTXQ_10G 2
251 static int t4_nnmtxq10g = -1;
252 TUNABLE_INT("hw.cxgbe.nnmtxq10g", &t4_nnmtxq10g);
253 
254 #define NNMRXQ_10G 2
255 static int t4_nnmrxq10g = -1;
256 TUNABLE_INT("hw.cxgbe.nnmrxq10g", &t4_nnmrxq10g);
257 
258 #define NNMTXQ_1G 1
259 static int t4_nnmtxq1g = -1;
260 TUNABLE_INT("hw.cxgbe.nnmtxq1g", &t4_nnmtxq1g);
261 
262 #define NNMRXQ_1G 1
263 static int t4_nnmrxq1g = -1;
264 TUNABLE_INT("hw.cxgbe.nnmrxq1g", &t4_nnmrxq1g);
265 #endif
266 
267 /*
268  * Holdoff parameters for 10G and 1G ports.
269  */
270 #define TMR_IDX_10G 1
271 static int t4_tmr_idx_10g = TMR_IDX_10G;
272 TUNABLE_INT("hw.cxgbe.holdoff_timer_idx_10G", &t4_tmr_idx_10g);
273 
274 #define PKTC_IDX_10G (-1)
275 static int t4_pktc_idx_10g = PKTC_IDX_10G;
276 TUNABLE_INT("hw.cxgbe.holdoff_pktc_idx_10G", &t4_pktc_idx_10g);
277 
278 #define TMR_IDX_1G 1
279 static int t4_tmr_idx_1g = TMR_IDX_1G;
280 TUNABLE_INT("hw.cxgbe.holdoff_timer_idx_1G", &t4_tmr_idx_1g);
281 
282 #define PKTC_IDX_1G (-1)
283 static int t4_pktc_idx_1g = PKTC_IDX_1G;
284 TUNABLE_INT("hw.cxgbe.holdoff_pktc_idx_1G", &t4_pktc_idx_1g);
285 
286 /*
287  * Size (# of entries) of each tx and rx queue.
288  */
289 static unsigned int t4_qsize_txq = TX_EQ_QSIZE;
290 TUNABLE_INT("hw.cxgbe.qsize_txq", &t4_qsize_txq);
291 
292 static unsigned int t4_qsize_rxq = RX_IQ_QSIZE;
293 TUNABLE_INT("hw.cxgbe.qsize_rxq", &t4_qsize_rxq);
294 
295 /*
296  * Interrupt types allowed (bits 0, 1, 2 = INTx, MSI, MSI-X respectively).
297  */
298 static int t4_intr_types = INTR_MSIX | INTR_MSI | INTR_INTX;
299 TUNABLE_INT("hw.cxgbe.interrupt_types", &t4_intr_types);
300 
301 /*
302  * Configuration file.
303  */
304 #define DEFAULT_CF	"default"
305 #define FLASH_CF	"flash"
306 #define UWIRE_CF	"uwire"
307 #define FPGA_CF		"fpga"
308 static char t4_cfg_file[32] = DEFAULT_CF;
309 TUNABLE_STR("hw.cxgbe.config_file", t4_cfg_file, sizeof(t4_cfg_file));
310 
311 /*
312  * PAUSE settings (bit 0, 1 = rx_pause, tx_pause respectively).
313  * rx_pause = 1 to heed incoming PAUSE frames, 0 to ignore them.
314  * tx_pause = 1 to emit PAUSE frames when the rx FIFO reaches its high water
315  *            mark or when signalled to do so, 0 to never emit PAUSE.
316  */
317 static int t4_pause_settings = PAUSE_TX | PAUSE_RX;
318 TUNABLE_INT("hw.cxgbe.pause_settings", &t4_pause_settings);
319 
320 /*
321  * Firmware auto-install by driver during attach (0, 1, 2 = prohibited, allowed,
322  * encouraged respectively).
323  */
324 static unsigned int t4_fw_install = 1;
325 TUNABLE_INT("hw.cxgbe.fw_install", &t4_fw_install);
326 
327 /*
328  * ASIC features that will be used.  Disable the ones you don't want so that the
329  * chip resources aren't wasted on features that will not be used.
330  */
331 static int t4_linkcaps_allowed = 0;	/* No DCBX, PPP, etc. by default */
332 TUNABLE_INT("hw.cxgbe.linkcaps_allowed", &t4_linkcaps_allowed);
333 
334 static int t4_niccaps_allowed = FW_CAPS_CONFIG_NIC;
335 TUNABLE_INT("hw.cxgbe.niccaps_allowed", &t4_niccaps_allowed);
336 
337 static int t4_toecaps_allowed = -1;
338 TUNABLE_INT("hw.cxgbe.toecaps_allowed", &t4_toecaps_allowed);
339 
340 static int t4_rdmacaps_allowed = 0;
341 TUNABLE_INT("hw.cxgbe.rdmacaps_allowed", &t4_rdmacaps_allowed);
342 
343 static int t4_iscsicaps_allowed = 0;
344 TUNABLE_INT("hw.cxgbe.iscsicaps_allowed", &t4_iscsicaps_allowed);
345 
346 static int t4_fcoecaps_allowed = 0;
347 TUNABLE_INT("hw.cxgbe.fcoecaps_allowed", &t4_fcoecaps_allowed);
348 
349 static int t5_write_combine = 0;
350 TUNABLE_INT("hw.cxl.write_combine", &t5_write_combine);
351 
352 static int t4_num_vis = 1;
353 TUNABLE_INT("hw.cxgbe.num_vis", &t4_num_vis);
354 
355 /* Functions used by extra VIs to obtain unique MAC addresses for each VI. */
356 static int vi_mac_funcs[] = {
357 	FW_VI_FUNC_OFLD,
358 	FW_VI_FUNC_IWARP,
359 	FW_VI_FUNC_OPENISCSI,
360 	FW_VI_FUNC_OPENFCOE,
361 	FW_VI_FUNC_FOISCSI,
362 	FW_VI_FUNC_FOFCOE,
363 };
364 
365 struct intrs_and_queues {
366 	uint16_t intr_type;	/* INTx, MSI, or MSI-X */
367 	uint16_t nirq;		/* Total # of vectors */
368 	uint16_t intr_flags_10g;/* Interrupt flags for each 10G port */
369 	uint16_t intr_flags_1g;	/* Interrupt flags for each 1G port */
370 	uint16_t ntxq10g;	/* # of NIC txq's for each 10G port */
371 	uint16_t nrxq10g;	/* # of NIC rxq's for each 10G port */
372 	uint16_t ntxq1g;	/* # of NIC txq's for each 1G port */
373 	uint16_t nrxq1g;	/* # of NIC rxq's for each 1G port */
374 	uint16_t rsrv_noflowq;	/* Flag whether to reserve queue 0 */
375 #ifdef TCP_OFFLOAD
376 	uint16_t nofldtxq10g;	/* # of TOE txq's for each 10G port */
377 	uint16_t nofldrxq10g;	/* # of TOE rxq's for each 10G port */
378 	uint16_t nofldtxq1g;	/* # of TOE txq's for each 1G port */
379 	uint16_t nofldrxq1g;	/* # of TOE rxq's for each 1G port */
380 #endif
381 #ifdef DEV_NETMAP
382 	uint16_t nnmtxq10g;	/* # of netmap txq's for each 10G port */
383 	uint16_t nnmrxq10g;	/* # of netmap rxq's for each 10G port */
384 	uint16_t nnmtxq1g;	/* # of netmap txq's for each 1G port */
385 	uint16_t nnmrxq1g;	/* # of netmap rxq's for each 1G port */
386 #endif
387 };
388 
389 struct filter_entry {
390         uint32_t valid:1;	/* filter allocated and valid */
391         uint32_t locked:1;	/* filter is administratively locked */
392         uint32_t pending:1;	/* filter action is pending firmware reply */
393 	uint32_t smtidx:8;	/* Source MAC Table index for smac */
394 	struct l2t_entry *l2t;	/* Layer Two Table entry for dmac */
395 
396         struct t4_filter_specification fs;
397 };
398 
399 static int map_bars_0_and_4(struct adapter *);
400 static int map_bar_2(struct adapter *);
401 static void setup_memwin(struct adapter *);
402 static int validate_mem_range(struct adapter *, uint32_t, int);
403 static int fwmtype_to_hwmtype(int);
404 static int validate_mt_off_len(struct adapter *, int, uint32_t, int,
405     uint32_t *);
406 static void memwin_info(struct adapter *, int, uint32_t *, uint32_t *);
407 static uint32_t position_memwin(struct adapter *, int, uint32_t);
408 static int cfg_itype_and_nqueues(struct adapter *, int, int, int,
409     struct intrs_and_queues *);
410 static int prep_firmware(struct adapter *);
411 static int partition_resources(struct adapter *, const struct firmware *,
412     const char *);
413 static int get_params__pre_init(struct adapter *);
414 static int get_params__post_init(struct adapter *);
415 static int set_params__post_init(struct adapter *);
416 static void t4_set_desc(struct adapter *);
417 static void build_medialist(struct port_info *, struct ifmedia *);
418 static int cxgbe_init_synchronized(struct vi_info *);
419 static int cxgbe_uninit_synchronized(struct vi_info *);
420 static int setup_intr_handlers(struct adapter *);
421 static void quiesce_txq(struct adapter *, struct sge_txq *);
422 static void quiesce_wrq(struct adapter *, struct sge_wrq *);
423 static void quiesce_iq(struct adapter *, struct sge_iq *);
424 static void quiesce_fl(struct adapter *, struct sge_fl *);
425 static int t4_alloc_irq(struct adapter *, struct irq *, int rid,
426     driver_intr_t *, void *, char *);
427 static int t4_free_irq(struct adapter *, struct irq *);
428 static void reg_block_dump(struct adapter *, uint8_t *, unsigned int,
429     unsigned int);
430 static void t4_get_regs(struct adapter *, struct t4_regdump *, uint8_t *);
431 static void vi_refresh_stats(struct adapter *, struct vi_info *);
432 static void cxgbe_refresh_stats(struct adapter *, struct port_info *);
433 static void cxgbe_tick(void *);
434 static void cxgbe_vlan_config(void *, struct ifnet *, uint16_t);
435 static int cpl_not_handled(struct sge_iq *, const struct rss_header *,
436     struct mbuf *);
437 static int an_not_handled(struct sge_iq *, const struct rsp_ctrl *);
438 static int fw_msg_not_handled(struct adapter *, const __be64 *);
439 static void t4_sysctls(struct adapter *);
440 static void cxgbe_sysctls(struct port_info *);
441 static int sysctl_int_array(SYSCTL_HANDLER_ARGS);
442 static int sysctl_bitfield(SYSCTL_HANDLER_ARGS);
443 static int sysctl_btphy(SYSCTL_HANDLER_ARGS);
444 static int sysctl_noflowq(SYSCTL_HANDLER_ARGS);
445 static int sysctl_holdoff_tmr_idx(SYSCTL_HANDLER_ARGS);
446 static int sysctl_holdoff_pktc_idx(SYSCTL_HANDLER_ARGS);
447 static int sysctl_qsize_rxq(SYSCTL_HANDLER_ARGS);
448 static int sysctl_qsize_txq(SYSCTL_HANDLER_ARGS);
449 static int sysctl_pause_settings(SYSCTL_HANDLER_ARGS);
450 static int sysctl_handle_t4_reg64(SYSCTL_HANDLER_ARGS);
451 static int sysctl_temperature(SYSCTL_HANDLER_ARGS);
452 #ifdef SBUF_DRAIN
453 static int sysctl_cctrl(SYSCTL_HANDLER_ARGS);
454 static int sysctl_cim_ibq_obq(SYSCTL_HANDLER_ARGS);
455 static int sysctl_cim_la(SYSCTL_HANDLER_ARGS);
456 static int sysctl_cim_ma_la(SYSCTL_HANDLER_ARGS);
457 static int sysctl_cim_pif_la(SYSCTL_HANDLER_ARGS);
458 static int sysctl_cim_qcfg(SYSCTL_HANDLER_ARGS);
459 static int sysctl_cpl_stats(SYSCTL_HANDLER_ARGS);
460 static int sysctl_ddp_stats(SYSCTL_HANDLER_ARGS);
461 static int sysctl_devlog(SYSCTL_HANDLER_ARGS);
462 static int sysctl_fcoe_stats(SYSCTL_HANDLER_ARGS);
463 static int sysctl_hw_sched(SYSCTL_HANDLER_ARGS);
464 static int sysctl_lb_stats(SYSCTL_HANDLER_ARGS);
465 static int sysctl_linkdnrc(SYSCTL_HANDLER_ARGS);
466 static int sysctl_meminfo(SYSCTL_HANDLER_ARGS);
467 static int sysctl_mps_tcam(SYSCTL_HANDLER_ARGS);
468 static int sysctl_path_mtus(SYSCTL_HANDLER_ARGS);
469 static int sysctl_pm_stats(SYSCTL_HANDLER_ARGS);
470 static int sysctl_rdma_stats(SYSCTL_HANDLER_ARGS);
471 static int sysctl_tcp_stats(SYSCTL_HANDLER_ARGS);
472 static int sysctl_tids(SYSCTL_HANDLER_ARGS);
473 static int sysctl_tp_err_stats(SYSCTL_HANDLER_ARGS);
474 static int sysctl_tp_la(SYSCTL_HANDLER_ARGS);
475 static int sysctl_tx_rate(SYSCTL_HANDLER_ARGS);
476 static int sysctl_ulprx_la(SYSCTL_HANDLER_ARGS);
477 static int sysctl_wcwr_stats(SYSCTL_HANDLER_ARGS);
478 #endif
479 static uint32_t fconf_to_mode(uint32_t);
480 static uint32_t mode_to_fconf(uint32_t);
481 static uint32_t fspec_to_fconf(struct t4_filter_specification *);
482 static int get_filter_mode(struct adapter *, uint32_t *);
483 static int set_filter_mode(struct adapter *, uint32_t);
484 static inline uint64_t get_filter_hits(struct adapter *, uint32_t);
485 static int get_filter(struct adapter *, struct t4_filter *);
486 static int set_filter(struct adapter *, struct t4_filter *);
487 static int del_filter(struct adapter *, struct t4_filter *);
488 static void clear_filter(struct filter_entry *);
489 static int set_filter_wr(struct adapter *, int);
490 static int del_filter_wr(struct adapter *, int);
491 static int get_sge_context(struct adapter *, struct t4_sge_context *);
492 static int load_fw(struct adapter *, struct t4_data *);
493 static int read_card_mem(struct adapter *, int, struct t4_mem_range *);
494 static int read_i2c(struct adapter *, struct t4_i2c_data *);
495 static int set_sched_class(struct adapter *, struct t4_sched_params *);
496 static int set_sched_queue(struct adapter *, struct t4_sched_queue *);
497 #ifdef TCP_OFFLOAD
498 static int toe_capability(struct vi_info *, int);
499 #endif
500 static int mod_event(module_t, int, void *);
501 
502 struct {
503 	uint16_t device;
504 	char *desc;
505 } t4_pciids[] = {
506 	{0xa000, "Chelsio Terminator 4 FPGA"},
507 	{0x4400, "Chelsio T440-dbg"},
508 	{0x4401, "Chelsio T420-CR"},
509 	{0x4402, "Chelsio T422-CR"},
510 	{0x4403, "Chelsio T440-CR"},
511 	{0x4404, "Chelsio T420-BCH"},
512 	{0x4405, "Chelsio T440-BCH"},
513 	{0x4406, "Chelsio T440-CH"},
514 	{0x4407, "Chelsio T420-SO"},
515 	{0x4408, "Chelsio T420-CX"},
516 	{0x4409, "Chelsio T420-BT"},
517 	{0x440a, "Chelsio T404-BT"},
518 	{0x440e, "Chelsio T440-LP-CR"},
519 }, t5_pciids[] = {
520 	{0xb000, "Chelsio Terminator 5 FPGA"},
521 	{0x5400, "Chelsio T580-dbg"},
522 	{0x5401,  "Chelsio T520-CR"},		/* 2 x 10G */
523 	{0x5402,  "Chelsio T522-CR"},		/* 2 x 10G, 2 X 1G */
524 	{0x5403,  "Chelsio T540-CR"},		/* 4 x 10G */
525 	{0x5407,  "Chelsio T520-SO"},		/* 2 x 10G, nomem */
526 	{0x5409,  "Chelsio T520-BT"},		/* 2 x 10GBaseT */
527 	{0x540a,  "Chelsio T504-BT"},		/* 4 x 1G */
528 	{0x540d,  "Chelsio T580-CR"},		/* 2 x 40G */
529 	{0x540e,  "Chelsio T540-LP-CR"},	/* 4 x 10G */
530 	{0x5410,  "Chelsio T580-LP-CR"},	/* 2 x 40G */
531 	{0x5411,  "Chelsio T520-LL-CR"},	/* 2 x 10G */
532 	{0x5412,  "Chelsio T560-CR"},		/* 1 x 40G, 2 x 10G */
533 	{0x5414,  "Chelsio T580-LP-SO-CR"},	/* 2 x 40G, nomem */
534 	{0x5415,  "Chelsio T502-BT"},		/* 2 x 1G */
535 #ifdef notyet
536 	{0x5404,  "Chelsio T520-BCH"},
537 	{0x5405,  "Chelsio T540-BCH"},
538 	{0x5406,  "Chelsio T540-CH"},
539 	{0x5408,  "Chelsio T520-CX"},
540 	{0x540b,  "Chelsio B520-SR"},
541 	{0x540c,  "Chelsio B504-BT"},
542 	{0x540f,  "Chelsio Amsterdam"},
543 	{0x5413,  "Chelsio T580-CHR"},
544 #endif
545 };
546 
547 #ifdef TCP_OFFLOAD
548 /*
549  * service_iq() has an iq and needs the fl.  Offset of fl from the iq should be
550  * exactly the same for both rxq and ofld_rxq.
551  */
552 CTASSERT(offsetof(struct sge_ofld_rxq, iq) == offsetof(struct sge_rxq, iq));
553 CTASSERT(offsetof(struct sge_ofld_rxq, fl) == offsetof(struct sge_rxq, fl));
554 #endif
555 
556 /* No easy way to include t4_msg.h before adapter.h so we check this way */
557 CTASSERT(nitems(((struct adapter *)0)->cpl_handler) == NUM_CPL_CMDS);
558 CTASSERT(nitems(((struct adapter *)0)->fw_msg_handler) == NUM_FW6_TYPES);
559 
560 CTASSERT(sizeof(struct cluster_metadata) <= CL_METADATA_SIZE);
561 
562 static int
563 t4_probe(device_t dev)
564 {
565 	int i;
566 	uint16_t v = pci_get_vendor(dev);
567 	uint16_t d = pci_get_device(dev);
568 	uint8_t f = pci_get_function(dev);
569 
570 	if (v != PCI_VENDOR_ID_CHELSIO)
571 		return (ENXIO);
572 
573 	/* Attach only to PF0 of the FPGA */
574 	if (d == 0xa000 && f != 0)
575 		return (ENXIO);
576 
577 	for (i = 0; i < nitems(t4_pciids); i++) {
578 		if (d == t4_pciids[i].device) {
579 			device_set_desc(dev, t4_pciids[i].desc);
580 			return (BUS_PROBE_DEFAULT);
581 		}
582 	}
583 
584 	return (ENXIO);
585 }
586 
587 static int
588 t5_probe(device_t dev)
589 {
590 	int i;
591 	uint16_t v = pci_get_vendor(dev);
592 	uint16_t d = pci_get_device(dev);
593 	uint8_t f = pci_get_function(dev);
594 
595 	if (v != PCI_VENDOR_ID_CHELSIO)
596 		return (ENXIO);
597 
598 	/* Attach only to PF0 of the FPGA */
599 	if (d == 0xb000 && f != 0)
600 		return (ENXIO);
601 
602 	for (i = 0; i < nitems(t5_pciids); i++) {
603 		if (d == t5_pciids[i].device) {
604 			device_set_desc(dev, t5_pciids[i].desc);
605 			return (BUS_PROBE_DEFAULT);
606 		}
607 	}
608 
609 	return (ENXIO);
610 }
611 
612 static void
613 t5_attribute_workaround(device_t dev)
614 {
615 	device_t root_port;
616 	uint32_t v;
617 
618 	/*
619 	 * The T5 chips do not properly echo the No Snoop and Relaxed
620 	 * Ordering attributes when replying to a TLP from a Root
621 	 * Port.  As a workaround, find the parent Root Port and
622 	 * disable No Snoop and Relaxed Ordering.  Note that this
623 	 * affects all devices under this root port.
624 	 */
625 	root_port = pci_find_pcie_root_port(dev);
626 	if (root_port == NULL) {
627 		device_printf(dev, "Unable to find parent root port\n");
628 		return;
629 	}
630 
631 	v = pcie_adjust_config(root_port, PCIER_DEVICE_CTL,
632 	    PCIEM_CTL_RELAXED_ORD_ENABLE | PCIEM_CTL_NOSNOOP_ENABLE, 0, 2);
633 	if ((v & (PCIEM_CTL_RELAXED_ORD_ENABLE | PCIEM_CTL_NOSNOOP_ENABLE)) !=
634 	    0)
635 		device_printf(dev, "Disabled No Snoop/Relaxed Ordering on %s\n",
636 		    device_get_nameunit(root_port));
637 }
638 
639 static int
640 t4_attach(device_t dev)
641 {
642 	struct adapter *sc;
643 	int rc = 0, i, j, n10g, n1g, rqidx, tqidx;
644 	struct intrs_and_queues iaq;
645 	struct sge *s;
646 #ifdef TCP_OFFLOAD
647 	int ofld_rqidx, ofld_tqidx;
648 #endif
649 #ifdef DEV_NETMAP
650 	int nm_rqidx, nm_tqidx;
651 #endif
652 	int num_vis;
653 
654 	sc = device_get_softc(dev);
655 	sc->dev = dev;
656 	TUNABLE_INT_FETCH("hw.cxgbe.debug_flags", &sc->debug_flags);
657 
658 	if ((pci_get_device(dev) & 0xff00) == 0x5400)
659 		t5_attribute_workaround(dev);
660 	pci_enable_busmaster(dev);
661 	if (pci_find_cap(dev, PCIY_EXPRESS, &i) == 0) {
662 		uint32_t v;
663 
664 		pci_set_max_read_req(dev, 4096);
665 		v = pci_read_config(dev, i + PCIER_DEVICE_CTL, 2);
666 		v |= PCIEM_CTL_RELAXED_ORD_ENABLE;
667 		pci_write_config(dev, i + PCIER_DEVICE_CTL, v, 2);
668 
669 		sc->params.pci.mps = 128 << ((v & PCIEM_CTL_MAX_PAYLOAD) >> 5);
670 	}
671 
672 	sc->traceq = -1;
673 	mtx_init(&sc->ifp_lock, sc->ifp_lockname, 0, MTX_DEF);
674 	snprintf(sc->ifp_lockname, sizeof(sc->ifp_lockname), "%s tracer",
675 	    device_get_nameunit(dev));
676 
677 	snprintf(sc->lockname, sizeof(sc->lockname), "%s",
678 	    device_get_nameunit(dev));
679 	mtx_init(&sc->sc_lock, sc->lockname, 0, MTX_DEF);
680 	sx_xlock(&t4_list_lock);
681 	SLIST_INSERT_HEAD(&t4_list, sc, link);
682 	sx_xunlock(&t4_list_lock);
683 
684 	mtx_init(&sc->sfl_lock, "starving freelists", 0, MTX_DEF);
685 	TAILQ_INIT(&sc->sfl);
686 	callout_init_mtx(&sc->sfl_callout, &sc->sfl_lock, 0);
687 
688 	mtx_init(&sc->regwin_lock, "register and memory window", 0, MTX_DEF);
689 
690 	rc = map_bars_0_and_4(sc);
691 	if (rc != 0)
692 		goto done; /* error message displayed already */
693 
694 	/*
695 	 * This is the real PF# to which we're attaching.  Works from within PCI
696 	 * passthrough environments too, where pci_get_function() could return a
697 	 * different PF# depending on the passthrough configuration.  We need to
698 	 * use the real PF# in all our communication with the firmware.
699 	 */
700 	sc->pf = G_SOURCEPF(t4_read_reg(sc, A_PL_WHOAMI));
701 	sc->mbox = sc->pf;
702 
703 	memset(sc->chan_map, 0xff, sizeof(sc->chan_map));
704 	sc->an_handler = an_not_handled;
705 	for (i = 0; i < nitems(sc->cpl_handler); i++)
706 		sc->cpl_handler[i] = cpl_not_handled;
707 	for (i = 0; i < nitems(sc->fw_msg_handler); i++)
708 		sc->fw_msg_handler[i] = fw_msg_not_handled;
709 	t4_register_cpl_handler(sc, CPL_SET_TCB_RPL, t4_filter_rpl);
710 	t4_register_cpl_handler(sc, CPL_TRACE_PKT, t4_trace_pkt);
711 	t4_register_cpl_handler(sc, CPL_TRACE_PKT_T5, t5_trace_pkt);
712 	t4_init_sge_cpl_handlers(sc);
713 
714 	/* Prepare the adapter for operation */
715 	rc = -t4_prep_adapter(sc);
716 	if (rc != 0) {
717 		device_printf(dev, "failed to prepare adapter: %d.\n", rc);
718 		goto done;
719 	}
720 
721 	/*
722 	 * Do this really early, with the memory windows set up even before the
723 	 * character device.  The userland tool's register i/o and mem read
724 	 * will work even in "recovery mode".
725 	 */
726 	setup_memwin(sc);
727 	sc->cdev = make_dev(is_t4(sc) ? &t4_cdevsw : &t5_cdevsw,
728 	    device_get_unit(dev), UID_ROOT, GID_WHEEL, 0600, "%s",
729 	    device_get_nameunit(dev));
730 	if (sc->cdev == NULL)
731 		device_printf(dev, "failed to create nexus char device.\n");
732 	else
733 		sc->cdev->si_drv1 = sc;
734 
735 	/* Go no further if recovery mode has been requested. */
736 	if (TUNABLE_INT_FETCH("hw.cxgbe.sos", &i) && i != 0) {
737 		device_printf(dev, "recovery mode.\n");
738 		goto done;
739 	}
740 
741 #if defined(__i386__)
742 	if ((cpu_feature & CPUID_CX8) == 0) {
743 		device_printf(dev, "64 bit atomics not available.\n");
744 		rc = ENOTSUP;
745 		goto done;
746 	}
747 #endif
748 
749 	/* Prepare the firmware for operation */
750 	rc = prep_firmware(sc);
751 	if (rc != 0)
752 		goto done; /* error message displayed already */
753 
754 	rc = get_params__post_init(sc);
755 	if (rc != 0)
756 		goto done; /* error message displayed already */
757 
758 	rc = set_params__post_init(sc);
759 	if (rc != 0)
760 		goto done; /* error message displayed already */
761 
762 	rc = map_bar_2(sc);
763 	if (rc != 0)
764 		goto done; /* error message displayed already */
765 
766 	rc = t4_create_dma_tag(sc);
767 	if (rc != 0)
768 		goto done; /* error message displayed already */
769 
770 	/*
771 	 * Number of VIs to create per-port.  The first VI is the
772 	 * "main" regular VI for the port.  The second VI is used for
773 	 * netmap if present, and any remaining VIs are used for
774 	 * additional virtual interfaces.
775 	 *
776 	 * Limit the number of VIs per port to the number of available
777 	 * MAC addresses per port.
778 	 */
779 	if (t4_num_vis >= 1)
780 		num_vis = t4_num_vis;
781 	else
782 		num_vis = 1;
783 #ifdef DEV_NETMAP
784 	num_vis++;
785 #endif
786 	if (num_vis > nitems(vi_mac_funcs)) {
787 		num_vis = nitems(vi_mac_funcs);
788 		device_printf(dev, "Number of VIs limited to %d\n", num_vis);
789 	}
790 
791 	/*
792 	 * First pass over all the ports - allocate VIs and initialize some
793 	 * basic parameters like mac address, port type, etc.  We also figure
794 	 * out whether a port is 10G or 1G and use that information when
795 	 * calculating how many interrupts to attempt to allocate.
796 	 */
797 	n10g = n1g = 0;
798 	for_each_port(sc, i) {
799 		struct port_info *pi;
800 		struct vi_info *vi;
801 
802 		pi = malloc(sizeof(*pi), M_CXGBE, M_ZERO | M_WAITOK);
803 		sc->port[i] = pi;
804 
805 		/* These must be set before t4_port_init */
806 		pi->adapter = sc;
807 		pi->port_id = i;
808 		pi->nvi = num_vis;
809 		pi->vi = malloc(sizeof(struct vi_info) * num_vis, M_CXGBE,
810 		    M_ZERO | M_WAITOK);
811 
812 		/*
813 		 * Allocate the "main" VI and initialize parameters
814 		 * like mac addr.
815 		 */
816 		rc = -t4_port_init(pi, sc->mbox, sc->pf, 0);
817 		if (rc != 0) {
818 			device_printf(dev, "unable to initialize port %d: %d\n",
819 			    i, rc);
820 			free(pi->vi, M_CXGBE);
821 			free(pi, M_CXGBE);
822 			sc->port[i] = NULL;
823 			goto done;
824 		}
825 
826 		pi->link_cfg.requested_fc &= ~(PAUSE_TX | PAUSE_RX);
827 		pi->link_cfg.requested_fc |= t4_pause_settings;
828 		pi->link_cfg.fc &= ~(PAUSE_TX | PAUSE_RX);
829 		pi->link_cfg.fc |= t4_pause_settings;
830 
831 		rc = -t4_link_start(sc, sc->mbox, pi->tx_chan, &pi->link_cfg);
832 		if (rc != 0) {
833 			device_printf(dev, "port %d l1cfg failed: %d\n", i, rc);
834 			free(pi->vi, M_CXGBE);
835 			free(pi, M_CXGBE);
836 			sc->port[i] = NULL;
837 			goto done;
838 		}
839 
840 		snprintf(pi->lockname, sizeof(pi->lockname), "%sp%d",
841 		    device_get_nameunit(dev), i);
842 		mtx_init(&pi->pi_lock, pi->lockname, 0, MTX_DEF);
843 		sc->chan_map[pi->tx_chan] = i;
844 
845 		if (is_10G_port(pi) || is_40G_port(pi)) {
846 			n10g++;
847 			for_each_vi(pi, j, vi) {
848 				vi->tmr_idx = t4_tmr_idx_10g;
849 				vi->pktc_idx = t4_pktc_idx_10g;
850 			}
851 		} else {
852 			n1g++;
853 			for_each_vi(pi, j, vi) {
854 				vi->tmr_idx = t4_tmr_idx_1g;
855 				vi->pktc_idx = t4_pktc_idx_1g;
856 			}
857 		}
858 
859 		pi->linkdnrc = -1;
860 
861 		for_each_vi(pi, j, vi) {
862 			vi->qsize_rxq = t4_qsize_rxq;
863 			vi->qsize_txq = t4_qsize_txq;
864 			vi->pi = pi;
865 		}
866 
867 		pi->dev = device_add_child(dev, is_t4(sc) ? "cxgbe" : "cxl", -1);
868 		if (pi->dev == NULL) {
869 			device_printf(dev,
870 			    "failed to add device for port %d.\n", i);
871 			rc = ENXIO;
872 			goto done;
873 		}
874 		pi->vi[0].dev = pi->dev;
875 		device_set_softc(pi->dev, pi);
876 	}
877 
878 	/*
879 	 * Interrupt type, # of interrupts, # of rx/tx queues, etc.
880 	 */
881 #ifdef DEV_NETMAP
882 	num_vis--;
883 #endif
884 	rc = cfg_itype_and_nqueues(sc, n10g, n1g, num_vis, &iaq);
885 	if (rc != 0)
886 		goto done; /* error message displayed already */
887 
888 	sc->intr_type = iaq.intr_type;
889 	sc->intr_count = iaq.nirq;
890 
891 	s = &sc->sge;
892 	s->nrxq = n10g * iaq.nrxq10g + n1g * iaq.nrxq1g;
893 	s->ntxq = n10g * iaq.ntxq10g + n1g * iaq.ntxq1g;
894 	if (num_vis > 1) {
895 		s->nrxq += (n10g + n1g) * (num_vis - 1);
896 		s->ntxq += (n10g + n1g) * (num_vis - 1);
897 	}
898 	s->neq = s->ntxq + s->nrxq;	/* the free list in an rxq is an eq */
899 	s->neq += sc->params.nports + 1;/* ctrl queues: 1 per port + 1 mgmt */
900 	s->niq = s->nrxq + 1;		/* 1 extra for firmware event queue */
901 #ifdef TCP_OFFLOAD
902 	if (is_offload(sc)) {
903 		s->nofldrxq = n10g * iaq.nofldrxq10g + n1g * iaq.nofldrxq1g;
904 		s->nofldtxq = n10g * iaq.nofldtxq10g + n1g * iaq.nofldtxq1g;
905 		if (num_vis > 1) {
906 			s->nofldrxq += (n10g + n1g) * (num_vis - 1);
907 			s->nofldtxq += (n10g + n1g) * (num_vis - 1);
908 		}
909 		s->neq += s->nofldtxq + s->nofldrxq;
910 		s->niq += s->nofldrxq;
911 
912 		s->ofld_rxq = malloc(s->nofldrxq * sizeof(struct sge_ofld_rxq),
913 		    M_CXGBE, M_ZERO | M_WAITOK);
914 		s->ofld_txq = malloc(s->nofldtxq * sizeof(struct sge_wrq),
915 		    M_CXGBE, M_ZERO | M_WAITOK);
916 	}
917 #endif
918 #ifdef DEV_NETMAP
919 	s->nnmrxq = n10g * iaq.nnmrxq10g + n1g * iaq.nnmrxq1g;
920 	s->nnmtxq = n10g * iaq.nnmtxq10g + n1g * iaq.nnmtxq1g;
921 	s->neq += s->nnmtxq + s->nnmrxq;
922 	s->niq += s->nnmrxq;
923 
924 	s->nm_rxq = malloc(s->nnmrxq * sizeof(struct sge_nm_rxq),
925 	    M_CXGBE, M_ZERO | M_WAITOK);
926 	s->nm_txq = malloc(s->nnmtxq * sizeof(struct sge_nm_txq),
927 	    M_CXGBE, M_ZERO | M_WAITOK);
928 #endif
929 
930 	s->ctrlq = malloc(sc->params.nports * sizeof(struct sge_wrq), M_CXGBE,
931 	    M_ZERO | M_WAITOK);
932 	s->rxq = malloc(s->nrxq * sizeof(struct sge_rxq), M_CXGBE,
933 	    M_ZERO | M_WAITOK);
934 	s->txq = malloc(s->ntxq * sizeof(struct sge_txq), M_CXGBE,
935 	    M_ZERO | M_WAITOK);
936 	s->iqmap = malloc(s->niq * sizeof(struct sge_iq *), M_CXGBE,
937 	    M_ZERO | M_WAITOK);
938 	s->eqmap = malloc(s->neq * sizeof(struct sge_eq *), M_CXGBE,
939 	    M_ZERO | M_WAITOK);
940 
941 	sc->irq = malloc(sc->intr_count * sizeof(struct irq), M_CXGBE,
942 	    M_ZERO | M_WAITOK);
943 
944 	t4_init_l2t(sc, M_WAITOK);
945 
946 	/*
947 	 * Second pass over the ports.  This time we know the number of rx and
948 	 * tx queues that each port should get.
949 	 */
950 	rqidx = tqidx = 0;
951 #ifdef TCP_OFFLOAD
952 	ofld_rqidx = ofld_tqidx = 0;
953 #endif
954 #ifdef DEV_NETMAP
955 	nm_rqidx = nm_tqidx = 0;
956 #endif
957 	for_each_port(sc, i) {
958 		struct port_info *pi = sc->port[i];
959 		struct vi_info *vi;
960 
961 		if (pi == NULL)
962 			continue;
963 
964 		for_each_vi(pi, j, vi) {
965 #ifdef DEV_NETMAP
966 			if (j == 1) {
967 				vi->flags |= VI_NETMAP | INTR_RXQ;
968 				vi->first_rxq = nm_rqidx;
969 				vi->first_txq = nm_tqidx;
970 				if (is_10G_port(pi) || is_40G_port(pi)) {
971 					vi->nrxq = iaq.nnmrxq10g;
972 					vi->ntxq = iaq.nnmtxq10g;
973 				} else {
974 					vi->nrxq = iaq.nnmrxq1g;
975 					vi->ntxq = iaq.nnmtxq1g;
976 				}
977 				nm_rqidx += vi->nrxq;
978 				nm_tqidx += vi->ntxq;
979 				continue;
980 			}
981 #endif
982 
983 			vi->first_rxq = rqidx;
984 			vi->first_txq = tqidx;
985 			if (is_10G_port(pi) || is_40G_port(pi)) {
986 				vi->flags |= iaq.intr_flags_10g & INTR_RXQ;
987 				vi->nrxq = j == 0 ? iaq.nrxq10g : 1;
988 				vi->ntxq = j == 0 ? iaq.ntxq10g : 1;
989 			} else {
990 				vi->flags |= iaq.intr_flags_1g & INTR_RXQ;
991 				vi->nrxq = j == 0 ? iaq.nrxq1g : 1;
992 				vi->ntxq = j == 0 ? iaq.ntxq1g : 1;
993 			}
994 
995 			if (vi->ntxq > 1)
996 				vi->rsrv_noflowq = iaq.rsrv_noflowq ? 1 : 0;
997 			else
998 				vi->rsrv_noflowq = 0;
999 
1000 			rqidx += vi->nrxq;
1001 			tqidx += vi->ntxq;
1002 
1003 #ifdef TCP_OFFLOAD
1004 			if (!is_offload(sc))
1005 				continue;
1006 			vi->first_ofld_rxq = ofld_rqidx;
1007 			vi->first_ofld_txq = ofld_tqidx;
1008 			if (is_10G_port(pi) || is_40G_port(pi)) {
1009 				vi->flags |= iaq.intr_flags_10g & INTR_OFLD_RXQ;
1010 				vi->nofldrxq = j == 0 ? iaq.nofldrxq10g : 1;
1011 				vi->nofldtxq = j == 0 ? iaq.nofldtxq10g : 1;
1012 			} else {
1013 				vi->flags |= iaq.intr_flags_1g & INTR_OFLD_RXQ;
1014 				vi->nofldrxq = j == 0 ? iaq.nofldrxq1g : 1;
1015 				vi->nofldtxq = j == 0 ? iaq.nofldtxq1g : 1;
1016 			}
1017 			ofld_rqidx += vi->nofldrxq;
1018 			ofld_tqidx += vi->nofldtxq;
1019 #endif
1020 		}
1021 	}
1022 
1023 	rc = setup_intr_handlers(sc);
1024 	if (rc != 0) {
1025 		device_printf(dev,
1026 		    "failed to setup interrupt handlers: %d\n", rc);
1027 		goto done;
1028 	}
1029 
1030 	rc = bus_generic_attach(dev);
1031 	if (rc != 0) {
1032 		device_printf(dev,
1033 		    "failed to attach all child ports: %d\n", rc);
1034 		goto done;
1035 	}
1036 
1037 	device_printf(dev,
1038 	    "PCIe gen%d x%d, %d ports, %d %s interrupt%s, %d eq, %d iq\n",
1039 	    sc->params.pci.speed, sc->params.pci.width, sc->params.nports,
1040 	    sc->intr_count, sc->intr_type == INTR_MSIX ? "MSI-X" :
1041 	    (sc->intr_type == INTR_MSI ? "MSI" : "INTx"),
1042 	    sc->intr_count > 1 ? "s" : "", sc->sge.neq, sc->sge.niq);
1043 
1044 	t4_set_desc(sc);
1045 
1046 done:
1047 	if (rc != 0 && sc->cdev) {
1048 		/* cdev was created and so cxgbetool works; recover that way. */
1049 		device_printf(dev,
1050 		    "error during attach, adapter is now in recovery mode.\n");
1051 		rc = 0;
1052 	}
1053 
1054 	if (rc != 0)
1055 		t4_detach(dev);
1056 	else
1057 		t4_sysctls(sc);
1058 
1059 	return (rc);
1060 }
1061 
1062 /*
1063  * Idempotent
1064  */
1065 static int
1066 t4_detach(device_t dev)
1067 {
1068 	struct adapter *sc;
1069 	struct port_info *pi;
1070 	int i, rc;
1071 
1072 	sc = device_get_softc(dev);
1073 
1074 	if (sc->flags & FULL_INIT_DONE)
1075 		t4_intr_disable(sc);
1076 
1077 	if (sc->cdev) {
1078 		destroy_dev(sc->cdev);
1079 		sc->cdev = NULL;
1080 	}
1081 
1082 	rc = bus_generic_detach(dev);
1083 	if (rc) {
1084 		device_printf(dev,
1085 		    "failed to detach child devices: %d\n", rc);
1086 		return (rc);
1087 	}
1088 
1089 	for (i = 0; i < sc->intr_count; i++)
1090 		t4_free_irq(sc, &sc->irq[i]);
1091 
1092 	for (i = 0; i < MAX_NPORTS; i++) {
1093 		pi = sc->port[i];
1094 		if (pi) {
1095 			t4_free_vi(sc, sc->mbox, sc->pf, 0, pi->vi[0].viid);
1096 			if (pi->dev)
1097 				device_delete_child(dev, pi->dev);
1098 
1099 			mtx_destroy(&pi->pi_lock);
1100 			free(pi->vi, M_CXGBE);
1101 			free(pi, M_CXGBE);
1102 		}
1103 	}
1104 
1105 	if (sc->flags & FULL_INIT_DONE)
1106 		adapter_full_uninit(sc);
1107 
1108 	if (sc->flags & FW_OK)
1109 		t4_fw_bye(sc, sc->mbox);
1110 
1111 	if (sc->intr_type == INTR_MSI || sc->intr_type == INTR_MSIX)
1112 		pci_release_msi(dev);
1113 
1114 	if (sc->regs_res)
1115 		bus_release_resource(dev, SYS_RES_MEMORY, sc->regs_rid,
1116 		    sc->regs_res);
1117 
1118 	if (sc->udbs_res)
1119 		bus_release_resource(dev, SYS_RES_MEMORY, sc->udbs_rid,
1120 		    sc->udbs_res);
1121 
1122 	if (sc->msix_res)
1123 		bus_release_resource(dev, SYS_RES_MEMORY, sc->msix_rid,
1124 		    sc->msix_res);
1125 
1126 	if (sc->l2t)
1127 		t4_free_l2t(sc->l2t);
1128 
1129 #ifdef TCP_OFFLOAD
1130 	free(sc->sge.ofld_rxq, M_CXGBE);
1131 	free(sc->sge.ofld_txq, M_CXGBE);
1132 #endif
1133 #ifdef DEV_NETMAP
1134 	free(sc->sge.nm_rxq, M_CXGBE);
1135 	free(sc->sge.nm_txq, M_CXGBE);
1136 #endif
1137 	free(sc->irq, M_CXGBE);
1138 	free(sc->sge.rxq, M_CXGBE);
1139 	free(sc->sge.txq, M_CXGBE);
1140 	free(sc->sge.ctrlq, M_CXGBE);
1141 	free(sc->sge.iqmap, M_CXGBE);
1142 	free(sc->sge.eqmap, M_CXGBE);
1143 	free(sc->tids.ftid_tab, M_CXGBE);
1144 	t4_destroy_dma_tag(sc);
1145 	if (mtx_initialized(&sc->sc_lock)) {
1146 		sx_xlock(&t4_list_lock);
1147 		SLIST_REMOVE(&t4_list, sc, adapter, link);
1148 		sx_xunlock(&t4_list_lock);
1149 		mtx_destroy(&sc->sc_lock);
1150 	}
1151 
1152 	callout_drain(&sc->sfl_callout);
1153 	if (mtx_initialized(&sc->tids.ftid_lock))
1154 		mtx_destroy(&sc->tids.ftid_lock);
1155 	if (mtx_initialized(&sc->sfl_lock))
1156 		mtx_destroy(&sc->sfl_lock);
1157 	if (mtx_initialized(&sc->ifp_lock))
1158 		mtx_destroy(&sc->ifp_lock);
1159 	if (mtx_initialized(&sc->regwin_lock))
1160 		mtx_destroy(&sc->regwin_lock);
1161 
1162 	bzero(sc, sizeof(*sc));
1163 
1164 	return (0);
1165 }
1166 
1167 static int
1168 cxgbe_probe(device_t dev)
1169 {
1170 	char buf[128];
1171 	struct port_info *pi = device_get_softc(dev);
1172 
1173 	snprintf(buf, sizeof(buf), "port %d", pi->port_id);
1174 	device_set_desc_copy(dev, buf);
1175 
1176 	return (BUS_PROBE_DEFAULT);
1177 }
1178 
1179 #define T4_CAP (IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_MTU | IFCAP_HWCSUM | \
1180     IFCAP_VLAN_HWCSUM | IFCAP_TSO | IFCAP_JUMBO_MTU | IFCAP_LRO | \
1181     IFCAP_VLAN_HWTSO | IFCAP_LINKSTATE | IFCAP_HWCSUM_IPV6 | IFCAP_HWSTATS)
1182 #define T4_CAP_ENABLE (T4_CAP)
1183 
1184 static int
1185 cxgbe_vi_attach(device_t dev, struct vi_info *vi)
1186 {
1187 	struct ifnet *ifp;
1188 	struct sbuf *sb;
1189 
1190 	vi->xact_addr_filt = -1;
1191 	callout_init(&vi->tick, 1);
1192 
1193 	/* Allocate an ifnet and set it up */
1194 	ifp = if_alloc(IFT_ETHER);
1195 	if (ifp == NULL) {
1196 		device_printf(dev, "Cannot allocate ifnet\n");
1197 		return (ENOMEM);
1198 	}
1199 	vi->ifp = ifp;
1200 	ifp->if_softc = vi;
1201 
1202 	if_initname(ifp, device_get_name(dev), device_get_unit(dev));
1203 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
1204 
1205 	ifp->if_init = cxgbe_init;
1206 	ifp->if_ioctl = cxgbe_ioctl;
1207 	ifp->if_transmit = cxgbe_transmit;
1208 	ifp->if_qflush = cxgbe_qflush;
1209 	ifp->if_get_counter = cxgbe_get_counter;
1210 
1211 	ifp->if_capabilities = T4_CAP;
1212 #ifdef TCP_OFFLOAD
1213 	if (vi->nofldrxq != 0)
1214 		ifp->if_capabilities |= IFCAP_TOE;
1215 #endif
1216 	ifp->if_capenable = T4_CAP_ENABLE;
1217 	ifp->if_hwassist = CSUM_TCP | CSUM_UDP | CSUM_IP | CSUM_TSO |
1218 	    CSUM_UDP_IPV6 | CSUM_TCP_IPV6;
1219 
1220 	ifp->if_hw_tsomax = 65536 - (ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN);
1221 	ifp->if_hw_tsomaxsegcount = TX_SGL_SEGS;
1222 	ifp->if_hw_tsomaxsegsize = 65536;
1223 
1224 	/* Initialize ifmedia for this VI */
1225 	ifmedia_init(&vi->media, IFM_IMASK, cxgbe_media_change,
1226 	    cxgbe_media_status);
1227 	build_medialist(vi->pi, &vi->media);
1228 
1229 	vi->vlan_c = EVENTHANDLER_REGISTER(vlan_config, cxgbe_vlan_config, ifp,
1230 	    EVENTHANDLER_PRI_ANY);
1231 
1232 	ether_ifattach(ifp, vi->hw_addr);
1233 
1234 	sb = sbuf_new_auto();
1235 	sbuf_printf(sb, "%d txq, %d rxq (NIC)", vi->ntxq, vi->nrxq);
1236 #ifdef TCP_OFFLOAD
1237 	if (ifp->if_capabilities & IFCAP_TOE)
1238 		sbuf_printf(sb, "; %d txq, %d rxq (TOE)",
1239 		    vi->nofldtxq, vi->nofldrxq);
1240 #endif
1241 	sbuf_finish(sb);
1242 	device_printf(dev, "%s\n", sbuf_data(sb));
1243 	sbuf_delete(sb);
1244 
1245 	vi_sysctls(vi);
1246 
1247 	return (0);
1248 }
1249 
1250 static int
1251 cxgbe_attach(device_t dev)
1252 {
1253 	struct port_info *pi = device_get_softc(dev);
1254 	struct vi_info *vi;
1255 	int i, rc;
1256 
1257 	callout_init_mtx(&pi->tick, &pi->pi_lock, 0);
1258 
1259 	rc = cxgbe_vi_attach(dev, &pi->vi[0]);
1260 	if (rc)
1261 		return (rc);
1262 
1263 	for_each_vi(pi, i, vi) {
1264 		if (i == 0)
1265 			continue;
1266 #ifdef DEV_NETMAP
1267 		if (vi->flags & VI_NETMAP) {
1268 			/*
1269 			 * media handled here to keep
1270 			 * implementation private to this file
1271 			 */
1272 			ifmedia_init(&vi->media, IFM_IMASK, cxgbe_media_change,
1273 			    cxgbe_media_status);
1274 			build_medialist(pi, &vi->media);
1275 			vi->dev = device_add_child(dev, is_t4(pi->adapter) ?
1276 			    "ncxgbe" : "ncxl", device_get_unit(dev));
1277 		} else
1278 #endif
1279 			vi->dev = device_add_child(dev, is_t4(pi->adapter) ?
1280 			    "vcxgbe" : "vcxl", -1);
1281 		if (vi->dev == NULL) {
1282 			device_printf(dev, "failed to add VI %d\n", i);
1283 			continue;
1284 		}
1285 		device_set_softc(vi->dev, vi);
1286 	}
1287 
1288 	cxgbe_sysctls(pi);
1289 
1290 	bus_generic_attach(dev);
1291 
1292 	return (0);
1293 }
1294 
1295 static void
1296 cxgbe_vi_detach(struct vi_info *vi)
1297 {
1298 	struct ifnet *ifp = vi->ifp;
1299 
1300 	ether_ifdetach(ifp);
1301 
1302 	if (vi->vlan_c)
1303 		EVENTHANDLER_DEREGISTER(vlan_config, vi->vlan_c);
1304 
1305 	/* Let detach proceed even if these fail. */
1306 	cxgbe_uninit_synchronized(vi);
1307 	callout_drain(&vi->tick);
1308 	vi_full_uninit(vi);
1309 
1310 	ifmedia_removeall(&vi->media);
1311 	if_free(vi->ifp);
1312 	vi->ifp = NULL;
1313 }
1314 
1315 static int
1316 cxgbe_detach(device_t dev)
1317 {
1318 	struct port_info *pi = device_get_softc(dev);
1319 	struct adapter *sc = pi->adapter;
1320 	int rc;
1321 
1322 	/* Detach the extra VIs first. */
1323 	rc = bus_generic_detach(dev);
1324 	if (rc)
1325 		return (rc);
1326 	device_delete_children(dev);
1327 
1328 	doom_vi(sc, &pi->vi[0]);
1329 
1330 	if (pi->flags & HAS_TRACEQ) {
1331 		sc->traceq = -1;	/* cloner should not create ifnet */
1332 		t4_tracer_port_detach(sc);
1333 	}
1334 
1335 	cxgbe_vi_detach(&pi->vi[0]);
1336 	callout_drain(&pi->tick);
1337 
1338 	end_synchronized_op(sc, 0);
1339 
1340 	return (0);
1341 }
1342 
1343 static void
1344 cxgbe_init(void *arg)
1345 {
1346 	struct vi_info *vi = arg;
1347 	struct adapter *sc = vi->pi->adapter;
1348 
1349 	if (begin_synchronized_op(sc, vi, SLEEP_OK | INTR_OK, "t4init") != 0)
1350 		return;
1351 	cxgbe_init_synchronized(vi);
1352 	end_synchronized_op(sc, 0);
1353 }
1354 
1355 static int
1356 cxgbe_ioctl(struct ifnet *ifp, unsigned long cmd, caddr_t data)
1357 {
1358 	int rc = 0, mtu, flags, can_sleep;
1359 	struct vi_info *vi = ifp->if_softc;
1360 	struct adapter *sc = vi->pi->adapter;
1361 	struct ifreq *ifr = (struct ifreq *)data;
1362 	uint32_t mask;
1363 
1364 	switch (cmd) {
1365 	case SIOCSIFMTU:
1366 		mtu = ifr->ifr_mtu;
1367 		if ((mtu < ETHERMIN) || (mtu > ETHERMTU_JUMBO))
1368 			return (EINVAL);
1369 
1370 		rc = begin_synchronized_op(sc, vi, SLEEP_OK | INTR_OK, "t4mtu");
1371 		if (rc)
1372 			return (rc);
1373 		ifp->if_mtu = mtu;
1374 		if (vi->flags & VI_INIT_DONE) {
1375 			t4_update_fl_bufsize(ifp);
1376 			if (ifp->if_drv_flags & IFF_DRV_RUNNING)
1377 				rc = update_mac_settings(ifp, XGMAC_MTU);
1378 		}
1379 		end_synchronized_op(sc, 0);
1380 		break;
1381 
1382 	case SIOCSIFFLAGS:
1383 		can_sleep = 0;
1384 redo_sifflags:
1385 		rc = begin_synchronized_op(sc, vi,
1386 		    can_sleep ? (SLEEP_OK | INTR_OK) : HOLD_LOCK, "t4flg");
1387 		if (rc)
1388 			return (rc);
1389 
1390 		if (ifp->if_flags & IFF_UP) {
1391 			if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1392 				flags = vi->if_flags;
1393 				if ((ifp->if_flags ^ flags) &
1394 				    (IFF_PROMISC | IFF_ALLMULTI)) {
1395 					if (can_sleep == 1) {
1396 						end_synchronized_op(sc, 0);
1397 						can_sleep = 0;
1398 						goto redo_sifflags;
1399 					}
1400 					rc = update_mac_settings(ifp,
1401 					    XGMAC_PROMISC | XGMAC_ALLMULTI);
1402 				}
1403 			} else {
1404 				if (can_sleep == 0) {
1405 					end_synchronized_op(sc, LOCK_HELD);
1406 					can_sleep = 1;
1407 					goto redo_sifflags;
1408 				}
1409 				rc = cxgbe_init_synchronized(vi);
1410 			}
1411 			vi->if_flags = ifp->if_flags;
1412 		} else if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1413 			if (can_sleep == 0) {
1414 				end_synchronized_op(sc, LOCK_HELD);
1415 				can_sleep = 1;
1416 				goto redo_sifflags;
1417 			}
1418 			rc = cxgbe_uninit_synchronized(vi);
1419 		}
1420 		end_synchronized_op(sc, can_sleep ? 0 : LOCK_HELD);
1421 		break;
1422 
1423 	case SIOCADDMULTI:
1424 	case SIOCDELMULTI: /* these two are called with a mutex held :-( */
1425 		rc = begin_synchronized_op(sc, vi, HOLD_LOCK, "t4multi");
1426 		if (rc)
1427 			return (rc);
1428 		if (ifp->if_drv_flags & IFF_DRV_RUNNING)
1429 			rc = update_mac_settings(ifp, XGMAC_MCADDRS);
1430 		end_synchronized_op(sc, LOCK_HELD);
1431 		break;
1432 
1433 	case SIOCSIFCAP:
1434 		rc = begin_synchronized_op(sc, vi, SLEEP_OK | INTR_OK, "t4cap");
1435 		if (rc)
1436 			return (rc);
1437 
1438 		mask = ifr->ifr_reqcap ^ ifp->if_capenable;
1439 		if (mask & IFCAP_TXCSUM) {
1440 			ifp->if_capenable ^= IFCAP_TXCSUM;
1441 			ifp->if_hwassist ^= (CSUM_TCP | CSUM_UDP | CSUM_IP);
1442 
1443 			if (IFCAP_TSO4 & ifp->if_capenable &&
1444 			    !(IFCAP_TXCSUM & ifp->if_capenable)) {
1445 				ifp->if_capenable &= ~IFCAP_TSO4;
1446 				if_printf(ifp,
1447 				    "tso4 disabled due to -txcsum.\n");
1448 			}
1449 		}
1450 		if (mask & IFCAP_TXCSUM_IPV6) {
1451 			ifp->if_capenable ^= IFCAP_TXCSUM_IPV6;
1452 			ifp->if_hwassist ^= (CSUM_UDP_IPV6 | CSUM_TCP_IPV6);
1453 
1454 			if (IFCAP_TSO6 & ifp->if_capenable &&
1455 			    !(IFCAP_TXCSUM_IPV6 & ifp->if_capenable)) {
1456 				ifp->if_capenable &= ~IFCAP_TSO6;
1457 				if_printf(ifp,
1458 				    "tso6 disabled due to -txcsum6.\n");
1459 			}
1460 		}
1461 		if (mask & IFCAP_RXCSUM)
1462 			ifp->if_capenable ^= IFCAP_RXCSUM;
1463 		if (mask & IFCAP_RXCSUM_IPV6)
1464 			ifp->if_capenable ^= IFCAP_RXCSUM_IPV6;
1465 
1466 		/*
1467 		 * Note that we leave CSUM_TSO alone (it is always set).  The
1468 		 * kernel takes both IFCAP_TSOx and CSUM_TSO into account before
1469 		 * sending a TSO request our way, so it's sufficient to toggle
1470 		 * IFCAP_TSOx only.
1471 		 */
1472 		if (mask & IFCAP_TSO4) {
1473 			if (!(IFCAP_TSO4 & ifp->if_capenable) &&
1474 			    !(IFCAP_TXCSUM & ifp->if_capenable)) {
1475 				if_printf(ifp, "enable txcsum first.\n");
1476 				rc = EAGAIN;
1477 				goto fail;
1478 			}
1479 			ifp->if_capenable ^= IFCAP_TSO4;
1480 		}
1481 		if (mask & IFCAP_TSO6) {
1482 			if (!(IFCAP_TSO6 & ifp->if_capenable) &&
1483 			    !(IFCAP_TXCSUM_IPV6 & ifp->if_capenable)) {
1484 				if_printf(ifp, "enable txcsum6 first.\n");
1485 				rc = EAGAIN;
1486 				goto fail;
1487 			}
1488 			ifp->if_capenable ^= IFCAP_TSO6;
1489 		}
1490 		if (mask & IFCAP_LRO) {
1491 #if defined(INET) || defined(INET6)
1492 			int i;
1493 			struct sge_rxq *rxq;
1494 
1495 			ifp->if_capenable ^= IFCAP_LRO;
1496 			for_each_rxq(vi, i, rxq) {
1497 				if (ifp->if_capenable & IFCAP_LRO)
1498 					rxq->iq.flags |= IQ_LRO_ENABLED;
1499 				else
1500 					rxq->iq.flags &= ~IQ_LRO_ENABLED;
1501 			}
1502 #endif
1503 		}
1504 #ifdef TCP_OFFLOAD
1505 		if (mask & IFCAP_TOE) {
1506 			int enable = (ifp->if_capenable ^ mask) & IFCAP_TOE;
1507 
1508 			rc = toe_capability(vi, enable);
1509 			if (rc != 0)
1510 				goto fail;
1511 
1512 			ifp->if_capenable ^= mask;
1513 		}
1514 #endif
1515 		if (mask & IFCAP_VLAN_HWTAGGING) {
1516 			ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING;
1517 			if (ifp->if_drv_flags & IFF_DRV_RUNNING)
1518 				rc = update_mac_settings(ifp, XGMAC_VLANEX);
1519 		}
1520 		if (mask & IFCAP_VLAN_MTU) {
1521 			ifp->if_capenable ^= IFCAP_VLAN_MTU;
1522 
1523 			/* Need to find out how to disable auto-mtu-inflation */
1524 		}
1525 		if (mask & IFCAP_VLAN_HWTSO)
1526 			ifp->if_capenable ^= IFCAP_VLAN_HWTSO;
1527 		if (mask & IFCAP_VLAN_HWCSUM)
1528 			ifp->if_capenable ^= IFCAP_VLAN_HWCSUM;
1529 
1530 #ifdef VLAN_CAPABILITIES
1531 		VLAN_CAPABILITIES(ifp);
1532 #endif
1533 fail:
1534 		end_synchronized_op(sc, 0);
1535 		break;
1536 
1537 	case SIOCSIFMEDIA:
1538 	case SIOCGIFMEDIA:
1539 		ifmedia_ioctl(ifp, ifr, &vi->media, cmd);
1540 		break;
1541 
1542 	case SIOCGI2C: {
1543 		struct ifi2creq i2c;
1544 
1545 		rc = copyin(ifr->ifr_data, &i2c, sizeof(i2c));
1546 		if (rc != 0)
1547 			break;
1548 		if (i2c.dev_addr != 0xA0 && i2c.dev_addr != 0xA2) {
1549 			rc = EPERM;
1550 			break;
1551 		}
1552 		if (i2c.len > sizeof(i2c.data)) {
1553 			rc = EINVAL;
1554 			break;
1555 		}
1556 		rc = begin_synchronized_op(sc, vi, SLEEP_OK | INTR_OK, "t4i2c");
1557 		if (rc)
1558 			return (rc);
1559 		rc = -t4_i2c_rd(sc, sc->mbox, vi->pi->port_id, i2c.dev_addr,
1560 		    i2c.offset, i2c.len, &i2c.data[0]);
1561 		end_synchronized_op(sc, 0);
1562 		if (rc == 0)
1563 			rc = copyout(&i2c, ifr->ifr_data, sizeof(i2c));
1564 		break;
1565 	}
1566 
1567 	default:
1568 		rc = ether_ioctl(ifp, cmd, data);
1569 	}
1570 
1571 	return (rc);
1572 }
1573 
1574 static int
1575 cxgbe_transmit(struct ifnet *ifp, struct mbuf *m)
1576 {
1577 	struct vi_info *vi = ifp->if_softc;
1578 	struct port_info *pi = vi->pi;
1579 	struct adapter *sc = pi->adapter;
1580 	struct sge_txq *txq;
1581 	void *items[1];
1582 	int rc;
1583 
1584 	M_ASSERTPKTHDR(m);
1585 	MPASS(m->m_nextpkt == NULL);	/* not quite ready for this yet */
1586 
1587 	if (__predict_false(pi->link_cfg.link_ok == 0)) {
1588 		m_freem(m);
1589 		return (ENETDOWN);
1590 	}
1591 
1592 	rc = parse_pkt(&m);
1593 	if (__predict_false(rc != 0)) {
1594 		MPASS(m == NULL);			/* was freed already */
1595 		atomic_add_int(&pi->tx_parse_error, 1);	/* rare, atomic is ok */
1596 		return (rc);
1597 	}
1598 
1599 	/* Select a txq. */
1600 	txq = &sc->sge.txq[vi->first_txq];
1601 	if (M_HASHTYPE_GET(m) != M_HASHTYPE_NONE)
1602 		txq += ((m->m_pkthdr.flowid % (vi->ntxq - vi->rsrv_noflowq)) +
1603 		    vi->rsrv_noflowq);
1604 
1605 	items[0] = m;
1606 	rc = mp_ring_enqueue(txq->r, items, 1, 4096);
1607 	if (__predict_false(rc != 0))
1608 		m_freem(m);
1609 
1610 	return (rc);
1611 }
1612 
1613 static void
1614 cxgbe_qflush(struct ifnet *ifp)
1615 {
1616 	struct vi_info *vi = ifp->if_softc;
1617 	struct sge_txq *txq;
1618 	int i;
1619 
1620 	/* queues do not exist if !VI_INIT_DONE. */
1621 	if (vi->flags & VI_INIT_DONE) {
1622 		for_each_txq(vi, i, txq) {
1623 			TXQ_LOCK(txq);
1624 			txq->eq.flags &= ~EQ_ENABLED;
1625 			TXQ_UNLOCK(txq);
1626 			while (!mp_ring_is_idle(txq->r)) {
1627 				mp_ring_check_drainage(txq->r, 0);
1628 				pause("qflush", 1);
1629 			}
1630 		}
1631 	}
1632 	if_qflush(ifp);
1633 }
1634 
1635 static uint64_t
1636 vi_get_counter(struct ifnet *ifp, ift_counter c)
1637 {
1638 	struct vi_info *vi = ifp->if_softc;
1639 	struct fw_vi_stats_vf *s = &vi->stats;
1640 
1641 	vi_refresh_stats(vi->pi->adapter, vi);
1642 
1643 	switch (c) {
1644 	case IFCOUNTER_IPACKETS:
1645 		return (s->rx_bcast_frames + s->rx_mcast_frames +
1646 		    s->rx_ucast_frames);
1647 	case IFCOUNTER_IERRORS:
1648 		return (s->rx_err_frames);
1649 	case IFCOUNTER_OPACKETS:
1650 		return (s->tx_bcast_frames + s->tx_mcast_frames +
1651 		    s->tx_ucast_frames + s->tx_offload_frames);
1652 	case IFCOUNTER_OERRORS:
1653 		return (s->tx_drop_frames);
1654 	case IFCOUNTER_IBYTES:
1655 		return (s->rx_bcast_bytes + s->rx_mcast_bytes +
1656 		    s->rx_ucast_bytes);
1657 	case IFCOUNTER_OBYTES:
1658 		return (s->tx_bcast_bytes + s->tx_mcast_bytes +
1659 		    s->tx_ucast_bytes + s->tx_offload_bytes);
1660 	case IFCOUNTER_IMCASTS:
1661 		return (s->rx_mcast_frames);
1662 	case IFCOUNTER_OMCASTS:
1663 		return (s->tx_mcast_frames);
1664 	case IFCOUNTER_OQDROPS: {
1665 		uint64_t drops;
1666 
1667 		drops = 0;
1668 		if ((vi->flags & (VI_INIT_DONE | VI_NETMAP)) == VI_INIT_DONE) {
1669 			int i;
1670 			struct sge_txq *txq;
1671 
1672 			for_each_txq(vi, i, txq)
1673 				drops += counter_u64_fetch(txq->r->drops);
1674 		}
1675 
1676 		return (drops);
1677 
1678 	}
1679 
1680 	default:
1681 		return (if_get_counter_default(ifp, c));
1682 	}
1683 }
1684 
1685 uint64_t
1686 cxgbe_get_counter(struct ifnet *ifp, ift_counter c)
1687 {
1688 	struct vi_info *vi = ifp->if_softc;
1689 	struct port_info *pi = vi->pi;
1690 	struct adapter *sc = pi->adapter;
1691 	struct port_stats *s = &pi->stats;
1692 
1693 	if (pi->nvi > 1)
1694 		return (vi_get_counter(ifp, c));
1695 
1696 	cxgbe_refresh_stats(sc, pi);
1697 
1698 	switch (c) {
1699 	case IFCOUNTER_IPACKETS:
1700 		return (s->rx_frames - s->rx_pause);
1701 
1702 	case IFCOUNTER_IERRORS:
1703 		return (s->rx_jabber + s->rx_runt + s->rx_too_long +
1704 		    s->rx_fcs_err + s->rx_len_err);
1705 
1706 	case IFCOUNTER_OPACKETS:
1707 		return (s->tx_frames - s->tx_pause);
1708 
1709 	case IFCOUNTER_OERRORS:
1710 		return (s->tx_error_frames);
1711 
1712 	case IFCOUNTER_IBYTES:
1713 		return (s->rx_octets - s->rx_pause * 64);
1714 
1715 	case IFCOUNTER_OBYTES:
1716 		return (s->tx_octets - s->tx_pause * 64);
1717 
1718 	case IFCOUNTER_IMCASTS:
1719 		return (s->rx_mcast_frames - s->rx_pause);
1720 
1721 	case IFCOUNTER_OMCASTS:
1722 		return (s->tx_mcast_frames - s->tx_pause);
1723 
1724 	case IFCOUNTER_IQDROPS:
1725 		return (s->rx_ovflow0 + s->rx_ovflow1 + s->rx_ovflow2 +
1726 		    s->rx_ovflow3 + s->rx_trunc0 + s->rx_trunc1 + s->rx_trunc2 +
1727 		    s->rx_trunc3 + pi->tnl_cong_drops);
1728 
1729 	case IFCOUNTER_OQDROPS: {
1730 		uint64_t drops;
1731 
1732 		drops = s->tx_drop;
1733 		if (vi->flags & VI_INIT_DONE) {
1734 			int i;
1735 			struct sge_txq *txq;
1736 
1737 			for_each_txq(vi, i, txq)
1738 				drops += counter_u64_fetch(txq->r->drops);
1739 		}
1740 
1741 		return (drops);
1742 
1743 	}
1744 
1745 	default:
1746 		return (if_get_counter_default(ifp, c));
1747 	}
1748 }
1749 
1750 static int
1751 cxgbe_media_change(struct ifnet *ifp)
1752 {
1753 	struct vi_info *vi = ifp->if_softc;
1754 
1755 	device_printf(vi->dev, "%s unimplemented.\n", __func__);
1756 
1757 	return (EOPNOTSUPP);
1758 }
1759 
1760 static void
1761 cxgbe_media_status(struct ifnet *ifp, struct ifmediareq *ifmr)
1762 {
1763 	struct vi_info *vi = ifp->if_softc;
1764 	struct port_info *pi = vi->pi;
1765 	struct ifmedia_entry *cur;
1766 	int speed = pi->link_cfg.speed;
1767 
1768 	cur = vi->media.ifm_cur;
1769 
1770 	ifmr->ifm_status = IFM_AVALID;
1771 	if (!pi->link_cfg.link_ok)
1772 		return;
1773 
1774 	ifmr->ifm_status |= IFM_ACTIVE;
1775 
1776 	/* active and current will differ iff current media is autoselect. */
1777 	if (IFM_SUBTYPE(cur->ifm_media) != IFM_AUTO)
1778 		return;
1779 
1780 	ifmr->ifm_active = IFM_ETHER | IFM_FDX;
1781 	if (speed == SPEED_10000)
1782 		ifmr->ifm_active |= IFM_10G_T;
1783 	else if (speed == SPEED_1000)
1784 		ifmr->ifm_active |= IFM_1000_T;
1785 	else if (speed == SPEED_100)
1786 		ifmr->ifm_active |= IFM_100_TX;
1787 	else if (speed == SPEED_10)
1788 		ifmr->ifm_active |= IFM_10_T;
1789 	else
1790 		KASSERT(0, ("%s: link up but speed unknown (%u)", __func__,
1791 			    speed));
1792 }
1793 
1794 static int
1795 vcxgbe_probe(device_t dev)
1796 {
1797 	char buf[128];
1798 	struct vi_info *vi = device_get_softc(dev);
1799 
1800 	snprintf(buf, sizeof(buf), "port %d vi %td", vi->pi->port_id,
1801 	    vi - vi->pi->vi);
1802 	device_set_desc_copy(dev, buf);
1803 
1804 	return (BUS_PROBE_DEFAULT);
1805 }
1806 
1807 static int
1808 vcxgbe_attach(device_t dev)
1809 {
1810 	struct vi_info *vi;
1811 	struct port_info *pi;
1812 	struct adapter *sc;
1813 	int func, index, rc;
1814 	u32 param, val;
1815 
1816 	vi = device_get_softc(dev);
1817 	pi = vi->pi;
1818 	sc = pi->adapter;
1819 
1820 	index = vi - pi->vi;
1821 	KASSERT(index < nitems(vi_mac_funcs),
1822 	    ("%s: VI %s doesn't have a MAC func", __func__,
1823 	    device_get_nameunit(dev)));
1824 	func = vi_mac_funcs[index];
1825 	rc = t4_alloc_vi_func(sc, sc->mbox, pi->tx_chan, sc->pf, 0, 1,
1826 	    vi->hw_addr, &vi->rss_size, func, 0);
1827 	if (rc < 0) {
1828 		device_printf(dev, "Failed to allocate virtual interface "
1829 		    "for port %d: %d\n", pi->port_id, -rc);
1830 		return (-rc);
1831 	}
1832 	vi->viid = rc;
1833 
1834 	param = V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) |
1835 	    V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_RSSINFO) |
1836 	    V_FW_PARAMS_PARAM_YZ(vi->viid);
1837 	rc = t4_query_params(sc, sc->mbox, sc->pf, 0, 1, &param, &val);
1838 	if (rc)
1839 		vi->rss_base = 0xffff;
1840 	else {
1841 		/* MPASS((val >> 16) == rss_size); */
1842 		vi->rss_base = val & 0xffff;
1843 	}
1844 
1845 	rc = cxgbe_vi_attach(dev, vi);
1846 	if (rc) {
1847 		t4_free_vi(sc, sc->mbox, sc->pf, 0, vi->viid);
1848 		return (rc);
1849 	}
1850 	return (0);
1851 }
1852 
1853 static int
1854 vcxgbe_detach(device_t dev)
1855 {
1856 	struct vi_info *vi;
1857 	struct adapter *sc;
1858 
1859 	vi = device_get_softc(dev);
1860 	sc = vi->pi->adapter;
1861 
1862 	doom_vi(sc, vi);
1863 
1864 	cxgbe_vi_detach(vi);
1865 	t4_free_vi(sc, sc->mbox, sc->pf, 0, vi->viid);
1866 
1867 	end_synchronized_op(sc, 0);
1868 
1869 	return (0);
1870 }
1871 
1872 void
1873 t4_fatal_err(struct adapter *sc)
1874 {
1875 	t4_set_reg_field(sc, A_SGE_CONTROL, F_GLOBALENABLE, 0);
1876 	t4_intr_disable(sc);
1877 	log(LOG_EMERG, "%s: encountered fatal error, adapter stopped.\n",
1878 	    device_get_nameunit(sc->dev));
1879 }
1880 
1881 static int
1882 map_bars_0_and_4(struct adapter *sc)
1883 {
1884 	sc->regs_rid = PCIR_BAR(0);
1885 	sc->regs_res = bus_alloc_resource_any(sc->dev, SYS_RES_MEMORY,
1886 	    &sc->regs_rid, RF_ACTIVE);
1887 	if (sc->regs_res == NULL) {
1888 		device_printf(sc->dev, "cannot map registers.\n");
1889 		return (ENXIO);
1890 	}
1891 	sc->bt = rman_get_bustag(sc->regs_res);
1892 	sc->bh = rman_get_bushandle(sc->regs_res);
1893 	sc->mmio_len = rman_get_size(sc->regs_res);
1894 	setbit(&sc->doorbells, DOORBELL_KDB);
1895 
1896 	sc->msix_rid = PCIR_BAR(4);
1897 	sc->msix_res = bus_alloc_resource_any(sc->dev, SYS_RES_MEMORY,
1898 	    &sc->msix_rid, RF_ACTIVE);
1899 	if (sc->msix_res == NULL) {
1900 		device_printf(sc->dev, "cannot map MSI-X BAR.\n");
1901 		return (ENXIO);
1902 	}
1903 
1904 	return (0);
1905 }
1906 
1907 static int
1908 map_bar_2(struct adapter *sc)
1909 {
1910 
1911 	/*
1912 	 * T4: only iWARP driver uses the userspace doorbells.  There is no need
1913 	 * to map it if RDMA is disabled.
1914 	 */
1915 	if (is_t4(sc) && sc->rdmacaps == 0)
1916 		return (0);
1917 
1918 	sc->udbs_rid = PCIR_BAR(2);
1919 	sc->udbs_res = bus_alloc_resource_any(sc->dev, SYS_RES_MEMORY,
1920 	    &sc->udbs_rid, RF_ACTIVE);
1921 	if (sc->udbs_res == NULL) {
1922 		device_printf(sc->dev, "cannot map doorbell BAR.\n");
1923 		return (ENXIO);
1924 	}
1925 	sc->udbs_base = rman_get_virtual(sc->udbs_res);
1926 
1927 	if (is_t5(sc)) {
1928 		setbit(&sc->doorbells, DOORBELL_UDB);
1929 #if defined(__i386__) || defined(__amd64__)
1930 		if (t5_write_combine) {
1931 			int rc;
1932 
1933 			/*
1934 			 * Enable write combining on BAR2.  This is the
1935 			 * userspace doorbell BAR and is split into 128B
1936 			 * (UDBS_SEG_SIZE) doorbell regions, each associated
1937 			 * with an egress queue.  The first 64B has the doorbell
1938 			 * and the second 64B can be used to submit a tx work
1939 			 * request with an implicit doorbell.
1940 			 */
1941 
1942 			rc = pmap_change_attr((vm_offset_t)sc->udbs_base,
1943 			    rman_get_size(sc->udbs_res), PAT_WRITE_COMBINING);
1944 			if (rc == 0) {
1945 				clrbit(&sc->doorbells, DOORBELL_UDB);
1946 				setbit(&sc->doorbells, DOORBELL_WCWR);
1947 				setbit(&sc->doorbells, DOORBELL_UDBWC);
1948 			} else {
1949 				device_printf(sc->dev,
1950 				    "couldn't enable write combining: %d\n",
1951 				    rc);
1952 			}
1953 
1954 			t4_write_reg(sc, A_SGE_STAT_CFG,
1955 			    V_STATSOURCE_T5(7) | V_STATMODE(0));
1956 		}
1957 #endif
1958 	}
1959 
1960 	return (0);
1961 }
1962 
1963 static const struct memwin t4_memwin[] = {
1964 	{ MEMWIN0_BASE, MEMWIN0_APERTURE },
1965 	{ MEMWIN1_BASE, MEMWIN1_APERTURE },
1966 	{ MEMWIN2_BASE_T4, MEMWIN2_APERTURE_T4 }
1967 };
1968 
1969 static const struct memwin t5_memwin[] = {
1970 	{ MEMWIN0_BASE, MEMWIN0_APERTURE },
1971 	{ MEMWIN1_BASE, MEMWIN1_APERTURE },
1972 	{ MEMWIN2_BASE_T5, MEMWIN2_APERTURE_T5 },
1973 };
1974 
1975 static void
1976 setup_memwin(struct adapter *sc)
1977 {
1978 	const struct memwin *mw;
1979 	int i, n;
1980 	uint32_t bar0;
1981 
1982 	if (is_t4(sc)) {
1983 		/*
1984 		 * Read low 32b of bar0 indirectly via the hardware backdoor
1985 		 * mechanism.  Works from within PCI passthrough environments
1986 		 * too, where rman_get_start() can return a different value.  We
1987 		 * need to program the T4 memory window decoders with the actual
1988 		 * addresses that will be coming across the PCIe link.
1989 		 */
1990 		bar0 = t4_hw_pci_read_cfg4(sc, PCIR_BAR(0));
1991 		bar0 &= (uint32_t) PCIM_BAR_MEM_BASE;
1992 
1993 		mw = &t4_memwin[0];
1994 		n = nitems(t4_memwin);
1995 	} else {
1996 		/* T5 uses the relative offset inside the PCIe BAR */
1997 		bar0 = 0;
1998 
1999 		mw = &t5_memwin[0];
2000 		n = nitems(t5_memwin);
2001 	}
2002 
2003 	for (i = 0; i < n; i++, mw++) {
2004 		t4_write_reg(sc,
2005 		    PCIE_MEM_ACCESS_REG(A_PCIE_MEM_ACCESS_BASE_WIN, i),
2006 		    (mw->base + bar0) | V_BIR(0) |
2007 		    V_WINDOW(ilog2(mw->aperture) - 10));
2008 	}
2009 
2010 	/* flush */
2011 	t4_read_reg(sc, PCIE_MEM_ACCESS_REG(A_PCIE_MEM_ACCESS_BASE_WIN, 2));
2012 }
2013 
2014 /*
2015  * Verify that the memory range specified by the addr/len pair is valid and lies
2016  * entirely within a single region (EDCx or MCx).
2017  */
2018 static int
2019 validate_mem_range(struct adapter *sc, uint32_t addr, int len)
2020 {
2021 	uint32_t em, addr_len, maddr, mlen;
2022 
2023 	/* Memory can only be accessed in naturally aligned 4 byte units */
2024 	if (addr & 3 || len & 3 || len == 0)
2025 		return (EINVAL);
2026 
2027 	/* Enabled memories */
2028 	em = t4_read_reg(sc, A_MA_TARGET_MEM_ENABLE);
2029 	if (em & F_EDRAM0_ENABLE) {
2030 		addr_len = t4_read_reg(sc, A_MA_EDRAM0_BAR);
2031 		maddr = G_EDRAM0_BASE(addr_len) << 20;
2032 		mlen = G_EDRAM0_SIZE(addr_len) << 20;
2033 		if (mlen > 0 && addr >= maddr && addr < maddr + mlen &&
2034 		    addr + len <= maddr + mlen)
2035 			return (0);
2036 	}
2037 	if (em & F_EDRAM1_ENABLE) {
2038 		addr_len = t4_read_reg(sc, A_MA_EDRAM1_BAR);
2039 		maddr = G_EDRAM1_BASE(addr_len) << 20;
2040 		mlen = G_EDRAM1_SIZE(addr_len) << 20;
2041 		if (mlen > 0 && addr >= maddr && addr < maddr + mlen &&
2042 		    addr + len <= maddr + mlen)
2043 			return (0);
2044 	}
2045 	if (em & F_EXT_MEM_ENABLE) {
2046 		addr_len = t4_read_reg(sc, A_MA_EXT_MEMORY_BAR);
2047 		maddr = G_EXT_MEM_BASE(addr_len) << 20;
2048 		mlen = G_EXT_MEM_SIZE(addr_len) << 20;
2049 		if (mlen > 0 && addr >= maddr && addr < maddr + mlen &&
2050 		    addr + len <= maddr + mlen)
2051 			return (0);
2052 	}
2053 	if (!is_t4(sc) && em & F_EXT_MEM1_ENABLE) {
2054 		addr_len = t4_read_reg(sc, A_MA_EXT_MEMORY1_BAR);
2055 		maddr = G_EXT_MEM1_BASE(addr_len) << 20;
2056 		mlen = G_EXT_MEM1_SIZE(addr_len) << 20;
2057 		if (mlen > 0 && addr >= maddr && addr < maddr + mlen &&
2058 		    addr + len <= maddr + mlen)
2059 			return (0);
2060 	}
2061 
2062 	return (EFAULT);
2063 }
2064 
2065 static int
2066 fwmtype_to_hwmtype(int mtype)
2067 {
2068 
2069 	switch (mtype) {
2070 	case FW_MEMTYPE_EDC0:
2071 		return (MEM_EDC0);
2072 	case FW_MEMTYPE_EDC1:
2073 		return (MEM_EDC1);
2074 	case FW_MEMTYPE_EXTMEM:
2075 		return (MEM_MC0);
2076 	case FW_MEMTYPE_EXTMEM1:
2077 		return (MEM_MC1);
2078 	default:
2079 		panic("%s: cannot translate fw mtype %d.", __func__, mtype);
2080 	}
2081 }
2082 
2083 /*
2084  * Verify that the memory range specified by the memtype/offset/len pair is
2085  * valid and lies entirely within the memtype specified.  The global address of
2086  * the start of the range is returned in addr.
2087  */
2088 static int
2089 validate_mt_off_len(struct adapter *sc, int mtype, uint32_t off, int len,
2090     uint32_t *addr)
2091 {
2092 	uint32_t em, addr_len, maddr, mlen;
2093 
2094 	/* Memory can only be accessed in naturally aligned 4 byte units */
2095 	if (off & 3 || len & 3 || len == 0)
2096 		return (EINVAL);
2097 
2098 	em = t4_read_reg(sc, A_MA_TARGET_MEM_ENABLE);
2099 	switch (fwmtype_to_hwmtype(mtype)) {
2100 	case MEM_EDC0:
2101 		if (!(em & F_EDRAM0_ENABLE))
2102 			return (EINVAL);
2103 		addr_len = t4_read_reg(sc, A_MA_EDRAM0_BAR);
2104 		maddr = G_EDRAM0_BASE(addr_len) << 20;
2105 		mlen = G_EDRAM0_SIZE(addr_len) << 20;
2106 		break;
2107 	case MEM_EDC1:
2108 		if (!(em & F_EDRAM1_ENABLE))
2109 			return (EINVAL);
2110 		addr_len = t4_read_reg(sc, A_MA_EDRAM1_BAR);
2111 		maddr = G_EDRAM1_BASE(addr_len) << 20;
2112 		mlen = G_EDRAM1_SIZE(addr_len) << 20;
2113 		break;
2114 	case MEM_MC:
2115 		if (!(em & F_EXT_MEM_ENABLE))
2116 			return (EINVAL);
2117 		addr_len = t4_read_reg(sc, A_MA_EXT_MEMORY_BAR);
2118 		maddr = G_EXT_MEM_BASE(addr_len) << 20;
2119 		mlen = G_EXT_MEM_SIZE(addr_len) << 20;
2120 		break;
2121 	case MEM_MC1:
2122 		if (is_t4(sc) || !(em & F_EXT_MEM1_ENABLE))
2123 			return (EINVAL);
2124 		addr_len = t4_read_reg(sc, A_MA_EXT_MEMORY1_BAR);
2125 		maddr = G_EXT_MEM1_BASE(addr_len) << 20;
2126 		mlen = G_EXT_MEM1_SIZE(addr_len) << 20;
2127 		break;
2128 	default:
2129 		return (EINVAL);
2130 	}
2131 
2132 	if (mlen > 0 && off < mlen && off + len <= mlen) {
2133 		*addr = maddr + off;	/* global address */
2134 		return (0);
2135 	}
2136 
2137 	return (EFAULT);
2138 }
2139 
2140 static void
2141 memwin_info(struct adapter *sc, int win, uint32_t *base, uint32_t *aperture)
2142 {
2143 	const struct memwin *mw;
2144 
2145 	if (is_t4(sc)) {
2146 		KASSERT(win >= 0 && win < nitems(t4_memwin),
2147 		    ("%s: incorrect memwin# (%d)", __func__, win));
2148 		mw = &t4_memwin[win];
2149 	} else {
2150 		KASSERT(win >= 0 && win < nitems(t5_memwin),
2151 		    ("%s: incorrect memwin# (%d)", __func__, win));
2152 		mw = &t5_memwin[win];
2153 	}
2154 
2155 	if (base != NULL)
2156 		*base = mw->base;
2157 	if (aperture != NULL)
2158 		*aperture = mw->aperture;
2159 }
2160 
2161 /*
2162  * Positions the memory window such that it can be used to access the specified
2163  * address in the chip's address space.  The return value is the offset of addr
2164  * from the start of the window.
2165  */
2166 static uint32_t
2167 position_memwin(struct adapter *sc, int n, uint32_t addr)
2168 {
2169 	uint32_t start, pf;
2170 	uint32_t reg;
2171 
2172 	KASSERT(n >= 0 && n <= 3,
2173 	    ("%s: invalid window %d.", __func__, n));
2174 	KASSERT((addr & 3) == 0,
2175 	    ("%s: addr (0x%x) is not at a 4B boundary.", __func__, addr));
2176 
2177 	if (is_t4(sc)) {
2178 		pf = 0;
2179 		start = addr & ~0xf;	/* start must be 16B aligned */
2180 	} else {
2181 		pf = V_PFNUM(sc->pf);
2182 		start = addr & ~0x7f;	/* start must be 128B aligned */
2183 	}
2184 	reg = PCIE_MEM_ACCESS_REG(A_PCIE_MEM_ACCESS_OFFSET, n);
2185 
2186 	t4_write_reg(sc, reg, start | pf);
2187 	t4_read_reg(sc, reg);
2188 
2189 	return (addr - start);
2190 }
2191 
2192 static int
2193 cfg_itype_and_nqueues(struct adapter *sc, int n10g, int n1g, int num_vis,
2194     struct intrs_and_queues *iaq)
2195 {
2196 	int rc, itype, navail, nrxq10g, nrxq1g, n;
2197 	int nofldrxq10g = 0, nofldrxq1g = 0;
2198 	int nnmrxq10g = 0, nnmrxq1g = 0;
2199 
2200 	bzero(iaq, sizeof(*iaq));
2201 
2202 	iaq->ntxq10g = t4_ntxq10g;
2203 	iaq->ntxq1g = t4_ntxq1g;
2204 	iaq->nrxq10g = nrxq10g = t4_nrxq10g;
2205 	iaq->nrxq1g = nrxq1g = t4_nrxq1g;
2206 	iaq->rsrv_noflowq = t4_rsrv_noflowq;
2207 #ifdef TCP_OFFLOAD
2208 	if (is_offload(sc)) {
2209 		iaq->nofldtxq10g = t4_nofldtxq10g;
2210 		iaq->nofldtxq1g = t4_nofldtxq1g;
2211 		iaq->nofldrxq10g = nofldrxq10g = t4_nofldrxq10g;
2212 		iaq->nofldrxq1g = nofldrxq1g = t4_nofldrxq1g;
2213 	}
2214 #endif
2215 #ifdef DEV_NETMAP
2216 	iaq->nnmtxq10g = t4_nnmtxq10g;
2217 	iaq->nnmtxq1g = t4_nnmtxq1g;
2218 	iaq->nnmrxq10g = nnmrxq10g = t4_nnmrxq10g;
2219 	iaq->nnmrxq1g = nnmrxq1g = t4_nnmrxq1g;
2220 #endif
2221 
2222 	for (itype = INTR_MSIX; itype; itype >>= 1) {
2223 
2224 		if ((itype & t4_intr_types) == 0)
2225 			continue;	/* not allowed */
2226 
2227 		if (itype == INTR_MSIX)
2228 			navail = pci_msix_count(sc->dev);
2229 		else if (itype == INTR_MSI)
2230 			navail = pci_msi_count(sc->dev);
2231 		else
2232 			navail = 1;
2233 restart:
2234 		if (navail == 0)
2235 			continue;
2236 
2237 		iaq->intr_type = itype;
2238 		iaq->intr_flags_10g = 0;
2239 		iaq->intr_flags_1g = 0;
2240 
2241 		/*
2242 		 * Best option: an interrupt vector for errors, one for the
2243 		 * firmware event queue, and one for every rxq (NIC, TOE, and
2244 		 * netmap).
2245 		 */
2246 		iaq->nirq = T4_EXTRA_INTR;
2247 		iaq->nirq += n10g * (nrxq10g + nofldrxq10g + nnmrxq10g);
2248 		iaq->nirq += n10g * 2 * (num_vis - 1);
2249 		iaq->nirq += n1g * (nrxq1g + nofldrxq1g + nnmrxq1g);
2250 		iaq->nirq += n1g * 2 * (num_vis - 1);
2251 		if (iaq->nirq <= navail &&
2252 		    (itype != INTR_MSI || powerof2(iaq->nirq))) {
2253 			iaq->intr_flags_10g = INTR_ALL;
2254 			iaq->intr_flags_1g = INTR_ALL;
2255 			goto allocate;
2256 		}
2257 
2258 		/*
2259 		 * Second best option: a vector for errors, one for the firmware
2260 		 * event queue, and vectors for either all the NIC rx queues or
2261 		 * all the TOE rx queues.  The queues that don't get vectors
2262 		 * will forward their interrupts to those that do.
2263 		 *
2264 		 * Note: netmap rx queues cannot be created early and so they
2265 		 * can't be setup to receive forwarded interrupts for others.
2266 		 */
2267 		iaq->nirq = T4_EXTRA_INTR;
2268 		if (nrxq10g >= nofldrxq10g) {
2269 			iaq->intr_flags_10g = INTR_RXQ;
2270 			iaq->nirq += n10g * nrxq10g;
2271 			iaq->nirq += n10g * (num_vis - 1);
2272 #ifdef DEV_NETMAP
2273 			iaq->nnmrxq10g = min(nnmrxq10g, nrxq10g);
2274 #endif
2275 		} else {
2276 			iaq->intr_flags_10g = INTR_OFLD_RXQ;
2277 			iaq->nirq += n10g * nofldrxq10g;
2278 #ifdef DEV_NETMAP
2279 			iaq->nnmrxq10g = min(nnmrxq10g, nofldrxq10g);
2280 #endif
2281 		}
2282 		if (nrxq1g >= nofldrxq1g) {
2283 			iaq->intr_flags_1g = INTR_RXQ;
2284 			iaq->nirq += n1g * nrxq1g;
2285 			iaq->nirq += n1g * (num_vis - 1);
2286 #ifdef DEV_NETMAP
2287 			iaq->nnmrxq1g = min(nnmrxq1g, nrxq1g);
2288 #endif
2289 		} else {
2290 			iaq->intr_flags_1g = INTR_OFLD_RXQ;
2291 			iaq->nirq += n1g * nofldrxq1g;
2292 #ifdef DEV_NETMAP
2293 			iaq->nnmrxq1g = min(nnmrxq1g, nofldrxq1g);
2294 #endif
2295 		}
2296 		if (iaq->nirq <= navail &&
2297 		    (itype != INTR_MSI || powerof2(iaq->nirq)))
2298 			goto allocate;
2299 
2300 		/*
2301 		 * Next best option: an interrupt vector for errors, one for the
2302 		 * firmware event queue, and at least one per VI.  At this
2303 		 * point we know we'll have to downsize nrxq and/or nofldrxq
2304 		 * and/or nnmrxq to fit what's available to us.
2305 		 */
2306 		iaq->nirq = T4_EXTRA_INTR;
2307 		iaq->nirq += (n10g + n1g) * num_vis;
2308 		if (iaq->nirq <= navail) {
2309 			int leftover = navail - iaq->nirq;
2310 
2311 			if (n10g > 0) {
2312 				int target = max(nrxq10g, nofldrxq10g);
2313 
2314 				iaq->intr_flags_10g = nrxq10g >= nofldrxq10g ?
2315 				    INTR_RXQ : INTR_OFLD_RXQ;
2316 
2317 				n = 1;
2318 				while (n < target && leftover >= n10g) {
2319 					leftover -= n10g;
2320 					iaq->nirq += n10g;
2321 					n++;
2322 				}
2323 				iaq->nrxq10g = min(n, nrxq10g);
2324 #ifdef TCP_OFFLOAD
2325 				iaq->nofldrxq10g = min(n, nofldrxq10g);
2326 #endif
2327 #ifdef DEV_NETMAP
2328 				iaq->nnmrxq10g = min(n, nnmrxq10g);
2329 #endif
2330 			}
2331 
2332 			if (n1g > 0) {
2333 				int target = max(nrxq1g, nofldrxq1g);
2334 
2335 				iaq->intr_flags_1g = nrxq1g >= nofldrxq1g ?
2336 				    INTR_RXQ : INTR_OFLD_RXQ;
2337 
2338 				n = 1;
2339 				while (n < target && leftover >= n1g) {
2340 					leftover -= n1g;
2341 					iaq->nirq += n1g;
2342 					n++;
2343 				}
2344 				iaq->nrxq1g = min(n, nrxq1g);
2345 #ifdef TCP_OFFLOAD
2346 				iaq->nofldrxq1g = min(n, nofldrxq1g);
2347 #endif
2348 #ifdef DEV_NETMAP
2349 				iaq->nnmrxq1g = min(n, nnmrxq1g);
2350 #endif
2351 			}
2352 
2353 			if (itype != INTR_MSI || powerof2(iaq->nirq))
2354 				goto allocate;
2355 		}
2356 
2357 		/*
2358 		 * Least desirable option: one interrupt vector for everything.
2359 		 */
2360 		iaq->nirq = iaq->nrxq10g = iaq->nrxq1g = 1;
2361 		iaq->intr_flags_10g = iaq->intr_flags_1g = 0;
2362 #ifdef TCP_OFFLOAD
2363 		if (is_offload(sc))
2364 			iaq->nofldrxq10g = iaq->nofldrxq1g = 1;
2365 #endif
2366 #ifdef DEV_NETMAP
2367 		iaq->nnmrxq10g = iaq->nnmrxq1g = 1;
2368 #endif
2369 
2370 allocate:
2371 		navail = iaq->nirq;
2372 		rc = 0;
2373 		if (itype == INTR_MSIX)
2374 			rc = pci_alloc_msix(sc->dev, &navail);
2375 		else if (itype == INTR_MSI)
2376 			rc = pci_alloc_msi(sc->dev, &navail);
2377 
2378 		if (rc == 0) {
2379 			if (navail == iaq->nirq)
2380 				return (0);
2381 
2382 			/*
2383 			 * Didn't get the number requested.  Use whatever number
2384 			 * the kernel is willing to allocate (it's in navail).
2385 			 */
2386 			device_printf(sc->dev, "fewer vectors than requested, "
2387 			    "type=%d, req=%d, rcvd=%d; will downshift req.\n",
2388 			    itype, iaq->nirq, navail);
2389 			pci_release_msi(sc->dev);
2390 			goto restart;
2391 		}
2392 
2393 		device_printf(sc->dev,
2394 		    "failed to allocate vectors:%d, type=%d, req=%d, rcvd=%d\n",
2395 		    itype, rc, iaq->nirq, navail);
2396 	}
2397 
2398 	device_printf(sc->dev,
2399 	    "failed to find a usable interrupt type.  "
2400 	    "allowed=%d, msi-x=%d, msi=%d, intx=1", t4_intr_types,
2401 	    pci_msix_count(sc->dev), pci_msi_count(sc->dev));
2402 
2403 	return (ENXIO);
2404 }
2405 
2406 #define FW_VERSION(chip) ( \
2407     V_FW_HDR_FW_VER_MAJOR(chip##FW_VERSION_MAJOR) | \
2408     V_FW_HDR_FW_VER_MINOR(chip##FW_VERSION_MINOR) | \
2409     V_FW_HDR_FW_VER_MICRO(chip##FW_VERSION_MICRO) | \
2410     V_FW_HDR_FW_VER_BUILD(chip##FW_VERSION_BUILD))
2411 #define FW_INTFVER(chip, intf) (chip##FW_HDR_INTFVER_##intf)
2412 
2413 struct fw_info {
2414 	uint8_t chip;
2415 	char *kld_name;
2416 	char *fw_mod_name;
2417 	struct fw_hdr fw_hdr;	/* XXX: waste of space, need a sparse struct */
2418 } fw_info[] = {
2419 	{
2420 		.chip = CHELSIO_T4,
2421 		.kld_name = "t4fw_cfg",
2422 		.fw_mod_name = "t4fw",
2423 		.fw_hdr = {
2424 			.chip = FW_HDR_CHIP_T4,
2425 			.fw_ver = htobe32_const(FW_VERSION(T4)),
2426 			.intfver_nic = FW_INTFVER(T4, NIC),
2427 			.intfver_vnic = FW_INTFVER(T4, VNIC),
2428 			.intfver_ofld = FW_INTFVER(T4, OFLD),
2429 			.intfver_ri = FW_INTFVER(T4, RI),
2430 			.intfver_iscsipdu = FW_INTFVER(T4, ISCSIPDU),
2431 			.intfver_iscsi = FW_INTFVER(T4, ISCSI),
2432 			.intfver_fcoepdu = FW_INTFVER(T4, FCOEPDU),
2433 			.intfver_fcoe = FW_INTFVER(T4, FCOE),
2434 		},
2435 	}, {
2436 		.chip = CHELSIO_T5,
2437 		.kld_name = "t5fw_cfg",
2438 		.fw_mod_name = "t5fw",
2439 		.fw_hdr = {
2440 			.chip = FW_HDR_CHIP_T5,
2441 			.fw_ver = htobe32_const(FW_VERSION(T5)),
2442 			.intfver_nic = FW_INTFVER(T5, NIC),
2443 			.intfver_vnic = FW_INTFVER(T5, VNIC),
2444 			.intfver_ofld = FW_INTFVER(T5, OFLD),
2445 			.intfver_ri = FW_INTFVER(T5, RI),
2446 			.intfver_iscsipdu = FW_INTFVER(T5, ISCSIPDU),
2447 			.intfver_iscsi = FW_INTFVER(T5, ISCSI),
2448 			.intfver_fcoepdu = FW_INTFVER(T5, FCOEPDU),
2449 			.intfver_fcoe = FW_INTFVER(T5, FCOE),
2450 		},
2451 	}
2452 };
2453 
2454 static struct fw_info *
2455 find_fw_info(int chip)
2456 {
2457 	int i;
2458 
2459 	for (i = 0; i < nitems(fw_info); i++) {
2460 		if (fw_info[i].chip == chip)
2461 			return (&fw_info[i]);
2462 	}
2463 	return (NULL);
2464 }
2465 
2466 /*
2467  * Is the given firmware API compatible with the one the driver was compiled
2468  * with?
2469  */
2470 static int
2471 fw_compatible(const struct fw_hdr *hdr1, const struct fw_hdr *hdr2)
2472 {
2473 
2474 	/* short circuit if it's the exact same firmware version */
2475 	if (hdr1->chip == hdr2->chip && hdr1->fw_ver == hdr2->fw_ver)
2476 		return (1);
2477 
2478 	/*
2479 	 * XXX: Is this too conservative?  Perhaps I should limit this to the
2480 	 * features that are supported in the driver.
2481 	 */
2482 #define SAME_INTF(x) (hdr1->intfver_##x == hdr2->intfver_##x)
2483 	if (hdr1->chip == hdr2->chip && SAME_INTF(nic) && SAME_INTF(vnic) &&
2484 	    SAME_INTF(ofld) && SAME_INTF(ri) && SAME_INTF(iscsipdu) &&
2485 	    SAME_INTF(iscsi) && SAME_INTF(fcoepdu) && SAME_INTF(fcoe))
2486 		return (1);
2487 #undef SAME_INTF
2488 
2489 	return (0);
2490 }
2491 
2492 /*
2493  * The firmware in the KLD is usable, but should it be installed?  This routine
2494  * explains itself in detail if it indicates the KLD firmware should be
2495  * installed.
2496  */
2497 static int
2498 should_install_kld_fw(struct adapter *sc, int card_fw_usable, int k, int c)
2499 {
2500 	const char *reason;
2501 
2502 	if (!card_fw_usable) {
2503 		reason = "incompatible or unusable";
2504 		goto install;
2505 	}
2506 
2507 	if (k > c) {
2508 		reason = "older than the version bundled with this driver";
2509 		goto install;
2510 	}
2511 
2512 	if (t4_fw_install == 2 && k != c) {
2513 		reason = "different than the version bundled with this driver";
2514 		goto install;
2515 	}
2516 
2517 	return (0);
2518 
2519 install:
2520 	if (t4_fw_install == 0) {
2521 		device_printf(sc->dev, "firmware on card (%u.%u.%u.%u) is %s, "
2522 		    "but the driver is prohibited from installing a different "
2523 		    "firmware on the card.\n",
2524 		    G_FW_HDR_FW_VER_MAJOR(c), G_FW_HDR_FW_VER_MINOR(c),
2525 		    G_FW_HDR_FW_VER_MICRO(c), G_FW_HDR_FW_VER_BUILD(c), reason);
2526 
2527 		return (0);
2528 	}
2529 
2530 	device_printf(sc->dev, "firmware on card (%u.%u.%u.%u) is %s, "
2531 	    "installing firmware %u.%u.%u.%u on card.\n",
2532 	    G_FW_HDR_FW_VER_MAJOR(c), G_FW_HDR_FW_VER_MINOR(c),
2533 	    G_FW_HDR_FW_VER_MICRO(c), G_FW_HDR_FW_VER_BUILD(c), reason,
2534 	    G_FW_HDR_FW_VER_MAJOR(k), G_FW_HDR_FW_VER_MINOR(k),
2535 	    G_FW_HDR_FW_VER_MICRO(k), G_FW_HDR_FW_VER_BUILD(k));
2536 
2537 	return (1);
2538 }
2539 /*
2540  * Establish contact with the firmware and determine if we are the master driver
2541  * or not, and whether we are responsible for chip initialization.
2542  */
2543 static int
2544 prep_firmware(struct adapter *sc)
2545 {
2546 	const struct firmware *fw = NULL, *default_cfg;
2547 	int rc, pf, card_fw_usable, kld_fw_usable, need_fw_reset = 1;
2548 	enum dev_state state;
2549 	struct fw_info *fw_info;
2550 	struct fw_hdr *card_fw;		/* fw on the card */
2551 	const struct fw_hdr *kld_fw;	/* fw in the KLD */
2552 	const struct fw_hdr *drv_fw;	/* fw header the driver was compiled
2553 					   against */
2554 
2555 	/* Contact firmware. */
2556 	rc = t4_fw_hello(sc, sc->mbox, sc->mbox, MASTER_MAY, &state);
2557 	if (rc < 0 || state == DEV_STATE_ERR) {
2558 		rc = -rc;
2559 		device_printf(sc->dev,
2560 		    "failed to connect to the firmware: %d, %d.\n", rc, state);
2561 		return (rc);
2562 	}
2563 	pf = rc;
2564 	if (pf == sc->mbox)
2565 		sc->flags |= MASTER_PF;
2566 	else if (state == DEV_STATE_UNINIT) {
2567 		/*
2568 		 * We didn't get to be the master so we definitely won't be
2569 		 * configuring the chip.  It's a bug if someone else hasn't
2570 		 * configured it already.
2571 		 */
2572 		device_printf(sc->dev, "couldn't be master(%d), "
2573 		    "device not already initialized either(%d).\n", rc, state);
2574 		return (EDOOFUS);
2575 	}
2576 
2577 	/* This is the firmware whose headers the driver was compiled against */
2578 	fw_info = find_fw_info(chip_id(sc));
2579 	if (fw_info == NULL) {
2580 		device_printf(sc->dev,
2581 		    "unable to look up firmware information for chip %d.\n",
2582 		    chip_id(sc));
2583 		return (EINVAL);
2584 	}
2585 	drv_fw = &fw_info->fw_hdr;
2586 
2587 	/*
2588 	 * The firmware KLD contains many modules.  The KLD name is also the
2589 	 * name of the module that contains the default config file.
2590 	 */
2591 	default_cfg = firmware_get(fw_info->kld_name);
2592 
2593 	/* Read the header of the firmware on the card */
2594 	card_fw = malloc(sizeof(*card_fw), M_CXGBE, M_ZERO | M_WAITOK);
2595 	rc = -t4_read_flash(sc, FLASH_FW_START,
2596 	    sizeof (*card_fw) / sizeof (uint32_t), (uint32_t *)card_fw, 1);
2597 	if (rc == 0)
2598 		card_fw_usable = fw_compatible(drv_fw, (const void*)card_fw);
2599 	else {
2600 		device_printf(sc->dev,
2601 		    "Unable to read card's firmware header: %d\n", rc);
2602 		card_fw_usable = 0;
2603 	}
2604 
2605 	/* This is the firmware in the KLD */
2606 	fw = firmware_get(fw_info->fw_mod_name);
2607 	if (fw != NULL) {
2608 		kld_fw = (const void *)fw->data;
2609 		kld_fw_usable = fw_compatible(drv_fw, kld_fw);
2610 	} else {
2611 		kld_fw = NULL;
2612 		kld_fw_usable = 0;
2613 	}
2614 
2615 	if (card_fw_usable && card_fw->fw_ver == drv_fw->fw_ver &&
2616 	    (!kld_fw_usable || kld_fw->fw_ver == drv_fw->fw_ver)) {
2617 		/*
2618 		 * Common case: the firmware on the card is an exact match and
2619 		 * the KLD is an exact match too, or the KLD is
2620 		 * absent/incompatible.  Note that t4_fw_install = 2 is ignored
2621 		 * here -- use cxgbetool loadfw if you want to reinstall the
2622 		 * same firmware as the one on the card.
2623 		 */
2624 	} else if (kld_fw_usable && state == DEV_STATE_UNINIT &&
2625 	    should_install_kld_fw(sc, card_fw_usable, be32toh(kld_fw->fw_ver),
2626 	    be32toh(card_fw->fw_ver))) {
2627 
2628 		rc = -t4_fw_upgrade(sc, sc->mbox, fw->data, fw->datasize, 0);
2629 		if (rc != 0) {
2630 			device_printf(sc->dev,
2631 			    "failed to install firmware: %d\n", rc);
2632 			goto done;
2633 		}
2634 
2635 		/* Installed successfully, update the cached header too. */
2636 		memcpy(card_fw, kld_fw, sizeof(*card_fw));
2637 		card_fw_usable = 1;
2638 		need_fw_reset = 0;	/* already reset as part of load_fw */
2639 	}
2640 
2641 	if (!card_fw_usable) {
2642 		uint32_t d, c, k;
2643 
2644 		d = ntohl(drv_fw->fw_ver);
2645 		c = ntohl(card_fw->fw_ver);
2646 		k = kld_fw ? ntohl(kld_fw->fw_ver) : 0;
2647 
2648 		device_printf(sc->dev, "Cannot find a usable firmware: "
2649 		    "fw_install %d, chip state %d, "
2650 		    "driver compiled with %d.%d.%d.%d, "
2651 		    "card has %d.%d.%d.%d, KLD has %d.%d.%d.%d\n",
2652 		    t4_fw_install, state,
2653 		    G_FW_HDR_FW_VER_MAJOR(d), G_FW_HDR_FW_VER_MINOR(d),
2654 		    G_FW_HDR_FW_VER_MICRO(d), G_FW_HDR_FW_VER_BUILD(d),
2655 		    G_FW_HDR_FW_VER_MAJOR(c), G_FW_HDR_FW_VER_MINOR(c),
2656 		    G_FW_HDR_FW_VER_MICRO(c), G_FW_HDR_FW_VER_BUILD(c),
2657 		    G_FW_HDR_FW_VER_MAJOR(k), G_FW_HDR_FW_VER_MINOR(k),
2658 		    G_FW_HDR_FW_VER_MICRO(k), G_FW_HDR_FW_VER_BUILD(k));
2659 		rc = EINVAL;
2660 		goto done;
2661 	}
2662 
2663 	/* We're using whatever's on the card and it's known to be good. */
2664 	sc->params.fw_vers = ntohl(card_fw->fw_ver);
2665 	snprintf(sc->fw_version, sizeof(sc->fw_version), "%u.%u.%u.%u",
2666 	    G_FW_HDR_FW_VER_MAJOR(sc->params.fw_vers),
2667 	    G_FW_HDR_FW_VER_MINOR(sc->params.fw_vers),
2668 	    G_FW_HDR_FW_VER_MICRO(sc->params.fw_vers),
2669 	    G_FW_HDR_FW_VER_BUILD(sc->params.fw_vers));
2670 	t4_get_tp_version(sc, &sc->params.tp_vers);
2671 
2672 	/* Reset device */
2673 	if (need_fw_reset &&
2674 	    (rc = -t4_fw_reset(sc, sc->mbox, F_PIORSTMODE | F_PIORST)) != 0) {
2675 		device_printf(sc->dev, "firmware reset failed: %d.\n", rc);
2676 		if (rc != ETIMEDOUT && rc != EIO)
2677 			t4_fw_bye(sc, sc->mbox);
2678 		goto done;
2679 	}
2680 	sc->flags |= FW_OK;
2681 
2682 	rc = get_params__pre_init(sc);
2683 	if (rc != 0)
2684 		goto done; /* error message displayed already */
2685 
2686 	/* Partition adapter resources as specified in the config file. */
2687 	if (state == DEV_STATE_UNINIT) {
2688 
2689 		KASSERT(sc->flags & MASTER_PF,
2690 		    ("%s: trying to change chip settings when not master.",
2691 		    __func__));
2692 
2693 		rc = partition_resources(sc, default_cfg, fw_info->kld_name);
2694 		if (rc != 0)
2695 			goto done;	/* error message displayed already */
2696 
2697 		t4_tweak_chip_settings(sc);
2698 
2699 		/* get basic stuff going */
2700 		rc = -t4_fw_initialize(sc, sc->mbox);
2701 		if (rc != 0) {
2702 			device_printf(sc->dev, "fw init failed: %d.\n", rc);
2703 			goto done;
2704 		}
2705 	} else {
2706 		snprintf(sc->cfg_file, sizeof(sc->cfg_file), "pf%d", pf);
2707 		sc->cfcsum = 0;
2708 	}
2709 
2710 done:
2711 	free(card_fw, M_CXGBE);
2712 	if (fw != NULL)
2713 		firmware_put(fw, FIRMWARE_UNLOAD);
2714 	if (default_cfg != NULL)
2715 		firmware_put(default_cfg, FIRMWARE_UNLOAD);
2716 
2717 	return (rc);
2718 }
2719 
2720 #define FW_PARAM_DEV(param) \
2721 	(V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) | \
2722 	 V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_##param))
2723 #define FW_PARAM_PFVF(param) \
2724 	(V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_PFVF) | \
2725 	 V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_PFVF_##param))
2726 
2727 /*
2728  * Partition chip resources for use between various PFs, VFs, etc.
2729  */
2730 static int
2731 partition_resources(struct adapter *sc, const struct firmware *default_cfg,
2732     const char *name_prefix)
2733 {
2734 	const struct firmware *cfg = NULL;
2735 	int rc = 0;
2736 	struct fw_caps_config_cmd caps;
2737 	uint32_t mtype, moff, finicsum, cfcsum;
2738 
2739 	/*
2740 	 * Figure out what configuration file to use.  Pick the default config
2741 	 * file for the card if the user hasn't specified one explicitly.
2742 	 */
2743 	snprintf(sc->cfg_file, sizeof(sc->cfg_file), "%s", t4_cfg_file);
2744 	if (strncmp(t4_cfg_file, DEFAULT_CF, sizeof(t4_cfg_file)) == 0) {
2745 		/* Card specific overrides go here. */
2746 		if (pci_get_device(sc->dev) == 0x440a)
2747 			snprintf(sc->cfg_file, sizeof(sc->cfg_file), UWIRE_CF);
2748 		if (is_fpga(sc))
2749 			snprintf(sc->cfg_file, sizeof(sc->cfg_file), FPGA_CF);
2750 	}
2751 
2752 	/*
2753 	 * We need to load another module if the profile is anything except
2754 	 * "default" or "flash".
2755 	 */
2756 	if (strncmp(sc->cfg_file, DEFAULT_CF, sizeof(sc->cfg_file)) != 0 &&
2757 	    strncmp(sc->cfg_file, FLASH_CF, sizeof(sc->cfg_file)) != 0) {
2758 		char s[32];
2759 
2760 		snprintf(s, sizeof(s), "%s_%s", name_prefix, sc->cfg_file);
2761 		cfg = firmware_get(s);
2762 		if (cfg == NULL) {
2763 			if (default_cfg != NULL) {
2764 				device_printf(sc->dev,
2765 				    "unable to load module \"%s\" for "
2766 				    "configuration profile \"%s\", will use "
2767 				    "the default config file instead.\n",
2768 				    s, sc->cfg_file);
2769 				snprintf(sc->cfg_file, sizeof(sc->cfg_file),
2770 				    "%s", DEFAULT_CF);
2771 			} else {
2772 				device_printf(sc->dev,
2773 				    "unable to load module \"%s\" for "
2774 				    "configuration profile \"%s\", will use "
2775 				    "the config file on the card's flash "
2776 				    "instead.\n", s, sc->cfg_file);
2777 				snprintf(sc->cfg_file, sizeof(sc->cfg_file),
2778 				    "%s", FLASH_CF);
2779 			}
2780 		}
2781 	}
2782 
2783 	if (strncmp(sc->cfg_file, DEFAULT_CF, sizeof(sc->cfg_file)) == 0 &&
2784 	    default_cfg == NULL) {
2785 		device_printf(sc->dev,
2786 		    "default config file not available, will use the config "
2787 		    "file on the card's flash instead.\n");
2788 		snprintf(sc->cfg_file, sizeof(sc->cfg_file), "%s", FLASH_CF);
2789 	}
2790 
2791 	if (strncmp(sc->cfg_file, FLASH_CF, sizeof(sc->cfg_file)) != 0) {
2792 		u_int cflen, i, n;
2793 		const uint32_t *cfdata;
2794 		uint32_t param, val, addr, off, mw_base, mw_aperture;
2795 
2796 		KASSERT(cfg != NULL || default_cfg != NULL,
2797 		    ("%s: no config to upload", __func__));
2798 
2799 		/*
2800 		 * Ask the firmware where it wants us to upload the config file.
2801 		 */
2802 		param = FW_PARAM_DEV(CF);
2803 		rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 1, &param, &val);
2804 		if (rc != 0) {
2805 			/* No support for config file?  Shouldn't happen. */
2806 			device_printf(sc->dev,
2807 			    "failed to query config file location: %d.\n", rc);
2808 			goto done;
2809 		}
2810 		mtype = G_FW_PARAMS_PARAM_Y(val);
2811 		moff = G_FW_PARAMS_PARAM_Z(val) << 16;
2812 
2813 		/*
2814 		 * XXX: sheer laziness.  We deliberately added 4 bytes of
2815 		 * useless stuffing/comments at the end of the config file so
2816 		 * it's ok to simply throw away the last remaining bytes when
2817 		 * the config file is not an exact multiple of 4.  This also
2818 		 * helps with the validate_mt_off_len check.
2819 		 */
2820 		if (cfg != NULL) {
2821 			cflen = cfg->datasize & ~3;
2822 			cfdata = cfg->data;
2823 		} else {
2824 			cflen = default_cfg->datasize & ~3;
2825 			cfdata = default_cfg->data;
2826 		}
2827 
2828 		if (cflen > FLASH_CFG_MAX_SIZE) {
2829 			device_printf(sc->dev,
2830 			    "config file too long (%d, max allowed is %d).  "
2831 			    "Will try to use the config on the card, if any.\n",
2832 			    cflen, FLASH_CFG_MAX_SIZE);
2833 			goto use_config_on_flash;
2834 		}
2835 
2836 		rc = validate_mt_off_len(sc, mtype, moff, cflen, &addr);
2837 		if (rc != 0) {
2838 			device_printf(sc->dev,
2839 			    "%s: addr (%d/0x%x) or len %d is not valid: %d.  "
2840 			    "Will try to use the config on the card, if any.\n",
2841 			    __func__, mtype, moff, cflen, rc);
2842 			goto use_config_on_flash;
2843 		}
2844 
2845 		memwin_info(sc, 2, &mw_base, &mw_aperture);
2846 		while (cflen) {
2847 			off = position_memwin(sc, 2, addr);
2848 			n = min(cflen, mw_aperture - off);
2849 			for (i = 0; i < n; i += 4)
2850 				t4_write_reg(sc, mw_base + off + i, *cfdata++);
2851 			cflen -= n;
2852 			addr += n;
2853 		}
2854 	} else {
2855 use_config_on_flash:
2856 		mtype = FW_MEMTYPE_FLASH;
2857 		moff = t4_flash_cfg_addr(sc);
2858 	}
2859 
2860 	bzero(&caps, sizeof(caps));
2861 	caps.op_to_write = htobe32(V_FW_CMD_OP(FW_CAPS_CONFIG_CMD) |
2862 	    F_FW_CMD_REQUEST | F_FW_CMD_READ);
2863 	caps.cfvalid_to_len16 = htobe32(F_FW_CAPS_CONFIG_CMD_CFVALID |
2864 	    V_FW_CAPS_CONFIG_CMD_MEMTYPE_CF(mtype) |
2865 	    V_FW_CAPS_CONFIG_CMD_MEMADDR64K_CF(moff >> 16) | FW_LEN16(caps));
2866 	rc = -t4_wr_mbox(sc, sc->mbox, &caps, sizeof(caps), &caps);
2867 	if (rc != 0) {
2868 		device_printf(sc->dev,
2869 		    "failed to pre-process config file: %d "
2870 		    "(mtype %d, moff 0x%x).\n", rc, mtype, moff);
2871 		goto done;
2872 	}
2873 
2874 	finicsum = be32toh(caps.finicsum);
2875 	cfcsum = be32toh(caps.cfcsum);
2876 	if (finicsum != cfcsum) {
2877 		device_printf(sc->dev,
2878 		    "WARNING: config file checksum mismatch: %08x %08x\n",
2879 		    finicsum, cfcsum);
2880 	}
2881 	sc->cfcsum = cfcsum;
2882 
2883 #define LIMIT_CAPS(x) do { \
2884 	caps.x &= htobe16(t4_##x##_allowed); \
2885 } while (0)
2886 
2887 	/*
2888 	 * Let the firmware know what features will (not) be used so it can tune
2889 	 * things accordingly.
2890 	 */
2891 	LIMIT_CAPS(linkcaps);
2892 	LIMIT_CAPS(niccaps);
2893 	LIMIT_CAPS(toecaps);
2894 	LIMIT_CAPS(rdmacaps);
2895 	LIMIT_CAPS(iscsicaps);
2896 	LIMIT_CAPS(fcoecaps);
2897 #undef LIMIT_CAPS
2898 
2899 	caps.op_to_write = htobe32(V_FW_CMD_OP(FW_CAPS_CONFIG_CMD) |
2900 	    F_FW_CMD_REQUEST | F_FW_CMD_WRITE);
2901 	caps.cfvalid_to_len16 = htobe32(FW_LEN16(caps));
2902 	rc = -t4_wr_mbox(sc, sc->mbox, &caps, sizeof(caps), NULL);
2903 	if (rc != 0) {
2904 		device_printf(sc->dev,
2905 		    "failed to process config file: %d.\n", rc);
2906 	}
2907 done:
2908 	if (cfg != NULL)
2909 		firmware_put(cfg, FIRMWARE_UNLOAD);
2910 	return (rc);
2911 }
2912 
2913 /*
2914  * Retrieve parameters that are needed (or nice to have) very early.
2915  */
2916 static int
2917 get_params__pre_init(struct adapter *sc)
2918 {
2919 	int rc;
2920 	uint32_t param[2], val[2];
2921 	struct fw_devlog_cmd cmd;
2922 	struct devlog_params *dlog = &sc->params.devlog;
2923 
2924 	param[0] = FW_PARAM_DEV(PORTVEC);
2925 	param[1] = FW_PARAM_DEV(CCLK);
2926 	rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 2, param, val);
2927 	if (rc != 0) {
2928 		device_printf(sc->dev,
2929 		    "failed to query parameters (pre_init): %d.\n", rc);
2930 		return (rc);
2931 	}
2932 
2933 	sc->params.portvec = val[0];
2934 	sc->params.nports = bitcount32(val[0]);
2935 	sc->params.vpd.cclk = val[1];
2936 
2937 	/* Read device log parameters. */
2938 	bzero(&cmd, sizeof(cmd));
2939 	cmd.op_to_write = htobe32(V_FW_CMD_OP(FW_DEVLOG_CMD) |
2940 	    F_FW_CMD_REQUEST | F_FW_CMD_READ);
2941 	cmd.retval_len16 = htobe32(FW_LEN16(cmd));
2942 	rc = -t4_wr_mbox(sc, sc->mbox, &cmd, sizeof(cmd), &cmd);
2943 	if (rc != 0) {
2944 		device_printf(sc->dev,
2945 		    "failed to get devlog parameters: %d.\n", rc);
2946 		bzero(dlog, sizeof (*dlog));
2947 		rc = 0;	/* devlog isn't critical for device operation */
2948 	} else {
2949 		val[0] = be32toh(cmd.memtype_devlog_memaddr16_devlog);
2950 		dlog->memtype = G_FW_DEVLOG_CMD_MEMTYPE_DEVLOG(val[0]);
2951 		dlog->start = G_FW_DEVLOG_CMD_MEMADDR16_DEVLOG(val[0]) << 4;
2952 		dlog->size = be32toh(cmd.memsize_devlog);
2953 	}
2954 
2955 	return (rc);
2956 }
2957 
2958 /*
2959  * Retrieve various parameters that are of interest to the driver.  The device
2960  * has been initialized by the firmware at this point.
2961  */
2962 static int
2963 get_params__post_init(struct adapter *sc)
2964 {
2965 	int rc;
2966 	uint32_t param[7], val[7];
2967 	struct fw_caps_config_cmd caps;
2968 
2969 	param[0] = FW_PARAM_PFVF(IQFLINT_START);
2970 	param[1] = FW_PARAM_PFVF(EQ_START);
2971 	param[2] = FW_PARAM_PFVF(FILTER_START);
2972 	param[3] = FW_PARAM_PFVF(FILTER_END);
2973 	param[4] = FW_PARAM_PFVF(L2T_START);
2974 	param[5] = FW_PARAM_PFVF(L2T_END);
2975 	rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 6, param, val);
2976 	if (rc != 0) {
2977 		device_printf(sc->dev,
2978 		    "failed to query parameters (post_init): %d.\n", rc);
2979 		return (rc);
2980 	}
2981 
2982 	sc->sge.iq_start = val[0];
2983 	sc->sge.eq_start = val[1];
2984 	sc->tids.ftid_base = val[2];
2985 	sc->tids.nftids = val[3] - val[2] + 1;
2986 	sc->params.ftid_min = val[2];
2987 	sc->params.ftid_max = val[3];
2988 	sc->vres.l2t.start = val[4];
2989 	sc->vres.l2t.size = val[5] - val[4] + 1;
2990 	KASSERT(sc->vres.l2t.size <= L2T_SIZE,
2991 	    ("%s: L2 table size (%u) larger than expected (%u)",
2992 	    __func__, sc->vres.l2t.size, L2T_SIZE));
2993 
2994 	/* get capabilites */
2995 	bzero(&caps, sizeof(caps));
2996 	caps.op_to_write = htobe32(V_FW_CMD_OP(FW_CAPS_CONFIG_CMD) |
2997 	    F_FW_CMD_REQUEST | F_FW_CMD_READ);
2998 	caps.cfvalid_to_len16 = htobe32(FW_LEN16(caps));
2999 	rc = -t4_wr_mbox(sc, sc->mbox, &caps, sizeof(caps), &caps);
3000 	if (rc != 0) {
3001 		device_printf(sc->dev,
3002 		    "failed to get card capabilities: %d.\n", rc);
3003 		return (rc);
3004 	}
3005 
3006 #define READ_CAPS(x) do { \
3007 	sc->x = htobe16(caps.x); \
3008 } while (0)
3009 	READ_CAPS(linkcaps);
3010 	READ_CAPS(niccaps);
3011 	READ_CAPS(toecaps);
3012 	READ_CAPS(rdmacaps);
3013 	READ_CAPS(iscsicaps);
3014 	READ_CAPS(fcoecaps);
3015 
3016 	if (sc->niccaps & FW_CAPS_CONFIG_NIC_ETHOFLD) {
3017 		param[0] = FW_PARAM_PFVF(ETHOFLD_START);
3018 		param[1] = FW_PARAM_PFVF(ETHOFLD_END);
3019 		param[2] = FW_PARAM_DEV(FLOWC_BUFFIFO_SZ);
3020 		rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 3, param, val);
3021 		if (rc != 0) {
3022 			device_printf(sc->dev,
3023 			    "failed to query NIC parameters: %d.\n", rc);
3024 			return (rc);
3025 		}
3026 		sc->tids.etid_base = val[0];
3027 		sc->params.etid_min = val[0];
3028 		sc->tids.netids = val[1] - val[0] + 1;
3029 		sc->params.netids = sc->tids.netids;
3030 		sc->params.eo_wr_cred = val[2];
3031 		sc->params.ethoffload = 1;
3032 	}
3033 
3034 	if (sc->toecaps) {
3035 		/* query offload-related parameters */
3036 		param[0] = FW_PARAM_DEV(NTID);
3037 		param[1] = FW_PARAM_PFVF(SERVER_START);
3038 		param[2] = FW_PARAM_PFVF(SERVER_END);
3039 		param[3] = FW_PARAM_PFVF(TDDP_START);
3040 		param[4] = FW_PARAM_PFVF(TDDP_END);
3041 		param[5] = FW_PARAM_DEV(FLOWC_BUFFIFO_SZ);
3042 		rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 6, param, val);
3043 		if (rc != 0) {
3044 			device_printf(sc->dev,
3045 			    "failed to query TOE parameters: %d.\n", rc);
3046 			return (rc);
3047 		}
3048 		sc->tids.ntids = val[0];
3049 		sc->tids.natids = min(sc->tids.ntids / 2, MAX_ATIDS);
3050 		sc->tids.stid_base = val[1];
3051 		sc->tids.nstids = val[2] - val[1] + 1;
3052 		sc->vres.ddp.start = val[3];
3053 		sc->vres.ddp.size = val[4] - val[3] + 1;
3054 		sc->params.ofldq_wr_cred = val[5];
3055 		sc->params.offload = 1;
3056 	}
3057 	if (sc->rdmacaps) {
3058 		param[0] = FW_PARAM_PFVF(STAG_START);
3059 		param[1] = FW_PARAM_PFVF(STAG_END);
3060 		param[2] = FW_PARAM_PFVF(RQ_START);
3061 		param[3] = FW_PARAM_PFVF(RQ_END);
3062 		param[4] = FW_PARAM_PFVF(PBL_START);
3063 		param[5] = FW_PARAM_PFVF(PBL_END);
3064 		rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 6, param, val);
3065 		if (rc != 0) {
3066 			device_printf(sc->dev,
3067 			    "failed to query RDMA parameters(1): %d.\n", rc);
3068 			return (rc);
3069 		}
3070 		sc->vres.stag.start = val[0];
3071 		sc->vres.stag.size = val[1] - val[0] + 1;
3072 		sc->vres.rq.start = val[2];
3073 		sc->vres.rq.size = val[3] - val[2] + 1;
3074 		sc->vres.pbl.start = val[4];
3075 		sc->vres.pbl.size = val[5] - val[4] + 1;
3076 
3077 		param[0] = FW_PARAM_PFVF(SQRQ_START);
3078 		param[1] = FW_PARAM_PFVF(SQRQ_END);
3079 		param[2] = FW_PARAM_PFVF(CQ_START);
3080 		param[3] = FW_PARAM_PFVF(CQ_END);
3081 		param[4] = FW_PARAM_PFVF(OCQ_START);
3082 		param[5] = FW_PARAM_PFVF(OCQ_END);
3083 		rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 6, param, val);
3084 		if (rc != 0) {
3085 			device_printf(sc->dev,
3086 			    "failed to query RDMA parameters(2): %d.\n", rc);
3087 			return (rc);
3088 		}
3089 		sc->vres.qp.start = val[0];
3090 		sc->vres.qp.size = val[1] - val[0] + 1;
3091 		sc->vres.cq.start = val[2];
3092 		sc->vres.cq.size = val[3] - val[2] + 1;
3093 		sc->vres.ocq.start = val[4];
3094 		sc->vres.ocq.size = val[5] - val[4] + 1;
3095 	}
3096 	if (sc->iscsicaps) {
3097 		param[0] = FW_PARAM_PFVF(ISCSI_START);
3098 		param[1] = FW_PARAM_PFVF(ISCSI_END);
3099 		rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 2, param, val);
3100 		if (rc != 0) {
3101 			device_printf(sc->dev,
3102 			    "failed to query iSCSI parameters: %d.\n", rc);
3103 			return (rc);
3104 		}
3105 		sc->vres.iscsi.start = val[0];
3106 		sc->vres.iscsi.size = val[1] - val[0] + 1;
3107 	}
3108 
3109 	/*
3110 	 * We've got the params we wanted to query via the firmware.  Now grab
3111 	 * some others directly from the chip.
3112 	 */
3113 	rc = t4_read_chip_settings(sc);
3114 
3115 	return (rc);
3116 }
3117 
3118 static int
3119 set_params__post_init(struct adapter *sc)
3120 {
3121 	uint32_t param, val;
3122 
3123 	/* ask for encapsulated CPLs */
3124 	param = FW_PARAM_PFVF(CPLFW4MSG_ENCAP);
3125 	val = 1;
3126 	(void)t4_set_params(sc, sc->mbox, sc->pf, 0, 1, &param, &val);
3127 
3128 	return (0);
3129 }
3130 
3131 #undef FW_PARAM_PFVF
3132 #undef FW_PARAM_DEV
3133 
3134 static void
3135 t4_set_desc(struct adapter *sc)
3136 {
3137 	char buf[128];
3138 	struct adapter_params *p = &sc->params;
3139 
3140 	snprintf(buf, sizeof(buf), "Chelsio %s %sNIC (rev %d), S/N:%s, "
3141 	    "P/N:%s, E/C:%s", p->vpd.id, is_offload(sc) ? "R" : "",
3142 	    chip_rev(sc), p->vpd.sn, p->vpd.pn, p->vpd.ec);
3143 
3144 	device_set_desc_copy(sc->dev, buf);
3145 }
3146 
3147 static void
3148 build_medialist(struct port_info *pi, struct ifmedia *media)
3149 {
3150 	int m;
3151 
3152 	PORT_LOCK(pi);
3153 
3154 	ifmedia_removeall(media);
3155 
3156 	m = IFM_ETHER | IFM_FDX;
3157 
3158 	switch(pi->port_type) {
3159 	case FW_PORT_TYPE_BT_XFI:
3160 	case FW_PORT_TYPE_BT_XAUI:
3161 		ifmedia_add(media, m | IFM_10G_T, 0, NULL);
3162 		/* fall through */
3163 
3164 	case FW_PORT_TYPE_BT_SGMII:
3165 		ifmedia_add(media, m | IFM_1000_T, 0, NULL);
3166 		ifmedia_add(media, m | IFM_100_TX, 0, NULL);
3167 		ifmedia_add(media, IFM_ETHER | IFM_AUTO, 0, NULL);
3168 		ifmedia_set(media, IFM_ETHER | IFM_AUTO);
3169 		break;
3170 
3171 	case FW_PORT_TYPE_CX4:
3172 		ifmedia_add(media, m | IFM_10G_CX4, 0, NULL);
3173 		ifmedia_set(media, m | IFM_10G_CX4);
3174 		break;
3175 
3176 	case FW_PORT_TYPE_QSFP_10G:
3177 	case FW_PORT_TYPE_SFP:
3178 	case FW_PORT_TYPE_FIBER_XFI:
3179 	case FW_PORT_TYPE_FIBER_XAUI:
3180 		switch (pi->mod_type) {
3181 
3182 		case FW_PORT_MOD_TYPE_LR:
3183 			ifmedia_add(media, m | IFM_10G_LR, 0, NULL);
3184 			ifmedia_set(media, m | IFM_10G_LR);
3185 			break;
3186 
3187 		case FW_PORT_MOD_TYPE_SR:
3188 			ifmedia_add(media, m | IFM_10G_SR, 0, NULL);
3189 			ifmedia_set(media, m | IFM_10G_SR);
3190 			break;
3191 
3192 		case FW_PORT_MOD_TYPE_LRM:
3193 			ifmedia_add(media, m | IFM_10G_LRM, 0, NULL);
3194 			ifmedia_set(media, m | IFM_10G_LRM);
3195 			break;
3196 
3197 		case FW_PORT_MOD_TYPE_TWINAX_PASSIVE:
3198 		case FW_PORT_MOD_TYPE_TWINAX_ACTIVE:
3199 			ifmedia_add(media, m | IFM_10G_TWINAX, 0, NULL);
3200 			ifmedia_set(media, m | IFM_10G_TWINAX);
3201 			break;
3202 
3203 		case FW_PORT_MOD_TYPE_NONE:
3204 			m &= ~IFM_FDX;
3205 			ifmedia_add(media, m | IFM_NONE, 0, NULL);
3206 			ifmedia_set(media, m | IFM_NONE);
3207 			break;
3208 
3209 		case FW_PORT_MOD_TYPE_NA:
3210 		case FW_PORT_MOD_TYPE_ER:
3211 		default:
3212 			device_printf(pi->dev,
3213 			    "unknown port_type (%d), mod_type (%d)\n",
3214 			    pi->port_type, pi->mod_type);
3215 			ifmedia_add(media, m | IFM_UNKNOWN, 0, NULL);
3216 			ifmedia_set(media, m | IFM_UNKNOWN);
3217 			break;
3218 		}
3219 		break;
3220 
3221 	case FW_PORT_TYPE_QSFP:
3222 		switch (pi->mod_type) {
3223 
3224 		case FW_PORT_MOD_TYPE_LR:
3225 			ifmedia_add(media, m | IFM_40G_LR4, 0, NULL);
3226 			ifmedia_set(media, m | IFM_40G_LR4);
3227 			break;
3228 
3229 		case FW_PORT_MOD_TYPE_SR:
3230 			ifmedia_add(media, m | IFM_40G_SR4, 0, NULL);
3231 			ifmedia_set(media, m | IFM_40G_SR4);
3232 			break;
3233 
3234 		case FW_PORT_MOD_TYPE_TWINAX_PASSIVE:
3235 		case FW_PORT_MOD_TYPE_TWINAX_ACTIVE:
3236 			ifmedia_add(media, m | IFM_40G_CR4, 0, NULL);
3237 			ifmedia_set(media, m | IFM_40G_CR4);
3238 			break;
3239 
3240 		case FW_PORT_MOD_TYPE_NONE:
3241 			m &= ~IFM_FDX;
3242 			ifmedia_add(media, m | IFM_NONE, 0, NULL);
3243 			ifmedia_set(media, m | IFM_NONE);
3244 			break;
3245 
3246 		default:
3247 			device_printf(pi->dev,
3248 			    "unknown port_type (%d), mod_type (%d)\n",
3249 			    pi->port_type, pi->mod_type);
3250 			ifmedia_add(media, m | IFM_UNKNOWN, 0, NULL);
3251 			ifmedia_set(media, m | IFM_UNKNOWN);
3252 			break;
3253 		}
3254 		break;
3255 
3256 	default:
3257 		device_printf(pi->dev,
3258 		    "unknown port_type (%d), mod_type (%d)\n", pi->port_type,
3259 		    pi->mod_type);
3260 		ifmedia_add(media, m | IFM_UNKNOWN, 0, NULL);
3261 		ifmedia_set(media, m | IFM_UNKNOWN);
3262 		break;
3263 	}
3264 
3265 	PORT_UNLOCK(pi);
3266 }
3267 
3268 #define FW_MAC_EXACT_CHUNK	7
3269 
3270 /*
3271  * Program the port's XGMAC based on parameters in ifnet.  The caller also
3272  * indicates which parameters should be programmed (the rest are left alone).
3273  */
3274 int
3275 update_mac_settings(struct ifnet *ifp, int flags)
3276 {
3277 	int rc = 0;
3278 	struct vi_info *vi = ifp->if_softc;
3279 	struct port_info *pi = vi->pi;
3280 	struct adapter *sc = pi->adapter;
3281 	int mtu = -1, promisc = -1, allmulti = -1, vlanex = -1;
3282 
3283 	ASSERT_SYNCHRONIZED_OP(sc);
3284 	KASSERT(flags, ("%s: not told what to update.", __func__));
3285 
3286 	if (flags & XGMAC_MTU)
3287 		mtu = ifp->if_mtu;
3288 
3289 	if (flags & XGMAC_PROMISC)
3290 		promisc = ifp->if_flags & IFF_PROMISC ? 1 : 0;
3291 
3292 	if (flags & XGMAC_ALLMULTI)
3293 		allmulti = ifp->if_flags & IFF_ALLMULTI ? 1 : 0;
3294 
3295 	if (flags & XGMAC_VLANEX)
3296 		vlanex = ifp->if_capenable & IFCAP_VLAN_HWTAGGING ? 1 : 0;
3297 
3298 	if (flags & (XGMAC_MTU|XGMAC_PROMISC|XGMAC_ALLMULTI|XGMAC_VLANEX)) {
3299 		rc = -t4_set_rxmode(sc, sc->mbox, vi->viid, mtu, promisc,
3300 		    allmulti, 1, vlanex, false);
3301 		if (rc) {
3302 			if_printf(ifp, "set_rxmode (%x) failed: %d\n", flags,
3303 			    rc);
3304 			return (rc);
3305 		}
3306 	}
3307 
3308 	if (flags & XGMAC_UCADDR) {
3309 		uint8_t ucaddr[ETHER_ADDR_LEN];
3310 
3311 		bcopy(IF_LLADDR(ifp), ucaddr, sizeof(ucaddr));
3312 		rc = t4_change_mac(sc, sc->mbox, vi->viid, vi->xact_addr_filt,
3313 		    ucaddr, true, true);
3314 		if (rc < 0) {
3315 			rc = -rc;
3316 			if_printf(ifp, "change_mac failed: %d\n", rc);
3317 			return (rc);
3318 		} else {
3319 			vi->xact_addr_filt = rc;
3320 			rc = 0;
3321 		}
3322 	}
3323 
3324 	if (flags & XGMAC_MCADDRS) {
3325 		const uint8_t *mcaddr[FW_MAC_EXACT_CHUNK];
3326 		int del = 1;
3327 		uint64_t hash = 0;
3328 		struct ifmultiaddr *ifma;
3329 		int i = 0, j;
3330 
3331 		if_maddr_rlock(ifp);
3332 		TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
3333 			if (ifma->ifma_addr->sa_family != AF_LINK)
3334 				continue;
3335 			mcaddr[i] =
3336 			    LLADDR((struct sockaddr_dl *)ifma->ifma_addr);
3337 			MPASS(ETHER_IS_MULTICAST(mcaddr[i]));
3338 			i++;
3339 
3340 			if (i == FW_MAC_EXACT_CHUNK) {
3341 				rc = t4_alloc_mac_filt(sc, sc->mbox, vi->viid,
3342 				    del, i, mcaddr, NULL, &hash, 0);
3343 				if (rc < 0) {
3344 					rc = -rc;
3345 					for (j = 0; j < i; j++) {
3346 						if_printf(ifp,
3347 						    "failed to add mc address"
3348 						    " %02x:%02x:%02x:"
3349 						    "%02x:%02x:%02x rc=%d\n",
3350 						    mcaddr[j][0], mcaddr[j][1],
3351 						    mcaddr[j][2], mcaddr[j][3],
3352 						    mcaddr[j][4], mcaddr[j][5],
3353 						    rc);
3354 					}
3355 					goto mcfail;
3356 				}
3357 				del = 0;
3358 				i = 0;
3359 			}
3360 		}
3361 		if (i > 0) {
3362 			rc = t4_alloc_mac_filt(sc, sc->mbox, vi->viid, del, i,
3363 			    mcaddr, NULL, &hash, 0);
3364 			if (rc < 0) {
3365 				rc = -rc;
3366 				for (j = 0; j < i; j++) {
3367 					if_printf(ifp,
3368 					    "failed to add mc address"
3369 					    " %02x:%02x:%02x:"
3370 					    "%02x:%02x:%02x rc=%d\n",
3371 					    mcaddr[j][0], mcaddr[j][1],
3372 					    mcaddr[j][2], mcaddr[j][3],
3373 					    mcaddr[j][4], mcaddr[j][5],
3374 					    rc);
3375 				}
3376 				goto mcfail;
3377 			}
3378 		}
3379 
3380 		rc = -t4_set_addr_hash(sc, sc->mbox, vi->viid, 0, hash, 0);
3381 		if (rc != 0)
3382 			if_printf(ifp, "failed to set mc address hash: %d", rc);
3383 mcfail:
3384 		if_maddr_runlock(ifp);
3385 	}
3386 
3387 	return (rc);
3388 }
3389 
3390 /*
3391  * {begin|end}_synchronized_op must be called from the same thread.
3392  */
3393 int
3394 begin_synchronized_op(struct adapter *sc, struct vi_info *vi, int flags,
3395     char *wmesg)
3396 {
3397 	int rc, pri;
3398 
3399 #ifdef WITNESS
3400 	/* the caller thinks it's ok to sleep, but is it really? */
3401 	if (flags & SLEEP_OK)
3402 		WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL,
3403 		    "begin_synchronized_op");
3404 #endif
3405 
3406 	if (INTR_OK)
3407 		pri = PCATCH;
3408 	else
3409 		pri = 0;
3410 
3411 	ADAPTER_LOCK(sc);
3412 	for (;;) {
3413 
3414 		if (vi && IS_DOOMED(vi)) {
3415 			rc = ENXIO;
3416 			goto done;
3417 		}
3418 
3419 		if (!IS_BUSY(sc)) {
3420 			rc = 0;
3421 			break;
3422 		}
3423 
3424 		if (!(flags & SLEEP_OK)) {
3425 			rc = EBUSY;
3426 			goto done;
3427 		}
3428 
3429 		if (mtx_sleep(&sc->flags, &sc->sc_lock, pri, wmesg, 0)) {
3430 			rc = EINTR;
3431 			goto done;
3432 		}
3433 	}
3434 
3435 	KASSERT(!IS_BUSY(sc), ("%s: controller busy.", __func__));
3436 	SET_BUSY(sc);
3437 #ifdef INVARIANTS
3438 	sc->last_op = wmesg;
3439 	sc->last_op_thr = curthread;
3440 	sc->last_op_flags = flags;
3441 #endif
3442 
3443 done:
3444 	if (!(flags & HOLD_LOCK) || rc)
3445 		ADAPTER_UNLOCK(sc);
3446 
3447 	return (rc);
3448 }
3449 
3450 /*
3451  * Tell if_ioctl and if_init that the VI is going away.  This is
3452  * special variant of begin_synchronized_op and must be paired with a
3453  * call to end_synchronized_op.
3454  */
3455 void
3456 doom_vi(struct adapter *sc, struct vi_info *vi)
3457 {
3458 
3459 	ADAPTER_LOCK(sc);
3460 	SET_DOOMED(vi);
3461 	wakeup(&sc->flags);
3462 	while (IS_BUSY(sc))
3463 		mtx_sleep(&sc->flags, &sc->sc_lock, 0, "t4detach", 0);
3464 	SET_BUSY(sc);
3465 #ifdef INVARIANTS
3466 	sc->last_op = "t4detach";
3467 	sc->last_op_thr = curthread;
3468 	sc->last_op_flags = 0;
3469 #endif
3470 	ADAPTER_UNLOCK(sc);
3471 }
3472 
3473 /*
3474  * {begin|end}_synchronized_op must be called from the same thread.
3475  */
3476 void
3477 end_synchronized_op(struct adapter *sc, int flags)
3478 {
3479 
3480 	if (flags & LOCK_HELD)
3481 		ADAPTER_LOCK_ASSERT_OWNED(sc);
3482 	else
3483 		ADAPTER_LOCK(sc);
3484 
3485 	KASSERT(IS_BUSY(sc), ("%s: controller not busy.", __func__));
3486 	CLR_BUSY(sc);
3487 	wakeup(&sc->flags);
3488 	ADAPTER_UNLOCK(sc);
3489 }
3490 
3491 static int
3492 cxgbe_init_synchronized(struct vi_info *vi)
3493 {
3494 	struct port_info *pi = vi->pi;
3495 	struct adapter *sc = pi->adapter;
3496 	struct ifnet *ifp = vi->ifp;
3497 	int rc = 0, i;
3498 	struct sge_txq *txq;
3499 
3500 	ASSERT_SYNCHRONIZED_OP(sc);
3501 
3502 	if (ifp->if_drv_flags & IFF_DRV_RUNNING)
3503 		return (0);	/* already running */
3504 
3505 	if (!(sc->flags & FULL_INIT_DONE) &&
3506 	    ((rc = adapter_full_init(sc)) != 0))
3507 		return (rc);	/* error message displayed already */
3508 
3509 	if (!(vi->flags & VI_INIT_DONE) &&
3510 	    ((rc = vi_full_init(vi)) != 0))
3511 		return (rc); /* error message displayed already */
3512 
3513 	rc = update_mac_settings(ifp, XGMAC_ALL);
3514 	if (rc)
3515 		goto done;	/* error message displayed already */
3516 
3517 	rc = -t4_enable_vi(sc, sc->mbox, vi->viid, true, true);
3518 	if (rc != 0) {
3519 		if_printf(ifp, "enable_vi failed: %d\n", rc);
3520 		goto done;
3521 	}
3522 
3523 	/*
3524 	 * Can't fail from this point onwards.  Review cxgbe_uninit_synchronized
3525 	 * if this changes.
3526 	 */
3527 
3528 	for_each_txq(vi, i, txq) {
3529 		TXQ_LOCK(txq);
3530 		txq->eq.flags |= EQ_ENABLED;
3531 		TXQ_UNLOCK(txq);
3532 	}
3533 
3534 	/*
3535 	 * The first iq of the first port to come up is used for tracing.
3536 	 */
3537 	if (sc->traceq < 0 && IS_MAIN_VI(vi)) {
3538 		sc->traceq = sc->sge.rxq[vi->first_rxq].iq.abs_id;
3539 		t4_write_reg(sc, is_t4(sc) ?  A_MPS_TRC_RSS_CONTROL :
3540 		    A_MPS_T5_TRC_RSS_CONTROL, V_RSSCONTROL(pi->tx_chan) |
3541 		    V_QUEUENUMBER(sc->traceq));
3542 		pi->flags |= HAS_TRACEQ;
3543 	}
3544 
3545 	/* all ok */
3546 	PORT_LOCK(pi);
3547 	ifp->if_drv_flags |= IFF_DRV_RUNNING;
3548 	pi->up_vis++;
3549 
3550 	if (pi->nvi > 1)
3551 		callout_reset(&vi->tick, hz, vi_tick, vi);
3552 	else
3553 		callout_reset(&pi->tick, hz, cxgbe_tick, pi);
3554 	PORT_UNLOCK(pi);
3555 done:
3556 	if (rc != 0)
3557 		cxgbe_uninit_synchronized(vi);
3558 
3559 	return (rc);
3560 }
3561 
3562 /*
3563  * Idempotent.
3564  */
3565 static int
3566 cxgbe_uninit_synchronized(struct vi_info *vi)
3567 {
3568 	struct port_info *pi = vi->pi;
3569 	struct adapter *sc = pi->adapter;
3570 	struct ifnet *ifp = vi->ifp;
3571 	int rc, i;
3572 	struct sge_txq *txq;
3573 
3574 	ASSERT_SYNCHRONIZED_OP(sc);
3575 
3576 	if (!(vi->flags & VI_INIT_DONE)) {
3577 		KASSERT(!(ifp->if_drv_flags & IFF_DRV_RUNNING),
3578 		    ("uninited VI is running"));
3579 		return (0);
3580 	}
3581 
3582 	/*
3583 	 * Disable the VI so that all its data in either direction is discarded
3584 	 * by the MPS.  Leave everything else (the queues, interrupts, and 1Hz
3585 	 * tick) intact as the TP can deliver negative advice or data that it's
3586 	 * holding in its RAM (for an offloaded connection) even after the VI is
3587 	 * disabled.
3588 	 */
3589 	rc = -t4_enable_vi(sc, sc->mbox, vi->viid, false, false);
3590 	if (rc) {
3591 		if_printf(ifp, "disable_vi failed: %d\n", rc);
3592 		return (rc);
3593 	}
3594 
3595 	for_each_txq(vi, i, txq) {
3596 		TXQ_LOCK(txq);
3597 		txq->eq.flags &= ~EQ_ENABLED;
3598 		TXQ_UNLOCK(txq);
3599 	}
3600 
3601 	PORT_LOCK(pi);
3602 	if (pi->nvi == 1)
3603 		callout_stop(&pi->tick);
3604 	else
3605 		callout_stop(&vi->tick);
3606 	if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) {
3607 		PORT_UNLOCK(pi);
3608 		return (0);
3609 	}
3610 	ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
3611 	pi->up_vis--;
3612 	if (pi->up_vis > 0) {
3613 		PORT_UNLOCK(pi);
3614 		return (0);
3615 	}
3616 	PORT_UNLOCK(pi);
3617 
3618 	pi->link_cfg.link_ok = 0;
3619 	pi->link_cfg.speed = 0;
3620 	pi->linkdnrc = -1;
3621 	t4_os_link_changed(sc, pi->port_id, 0, -1);
3622 
3623 	return (0);
3624 }
3625 
3626 /*
3627  * It is ok for this function to fail midway and return right away.  t4_detach
3628  * will walk the entire sc->irq list and clean up whatever is valid.
3629  */
3630 static int
3631 setup_intr_handlers(struct adapter *sc)
3632 {
3633 	int rc, rid, p, q, v;
3634 	char s[8];
3635 	struct irq *irq;
3636 	struct port_info *pi;
3637 	struct vi_info *vi;
3638 	struct sge_rxq *rxq;
3639 #ifdef TCP_OFFLOAD
3640 	struct sge_ofld_rxq *ofld_rxq;
3641 #endif
3642 #ifdef DEV_NETMAP
3643 	struct sge_nm_rxq *nm_rxq;
3644 #endif
3645 
3646 	/*
3647 	 * Setup interrupts.
3648 	 */
3649 	irq = &sc->irq[0];
3650 	rid = sc->intr_type == INTR_INTX ? 0 : 1;
3651 	if (sc->intr_count == 1)
3652 		return (t4_alloc_irq(sc, irq, rid, t4_intr_all, sc, "all"));
3653 
3654 	/* Multiple interrupts. */
3655 	KASSERT(sc->intr_count >= T4_EXTRA_INTR + sc->params.nports,
3656 	    ("%s: too few intr.", __func__));
3657 
3658 	/* The first one is always error intr */
3659 	rc = t4_alloc_irq(sc, irq, rid, t4_intr_err, sc, "err");
3660 	if (rc != 0)
3661 		return (rc);
3662 	irq++;
3663 	rid++;
3664 
3665 	/* The second one is always the firmware event queue */
3666 	rc = t4_alloc_irq(sc, irq, rid, t4_intr_evt, &sc->sge.fwq, "evt");
3667 	if (rc != 0)
3668 		return (rc);
3669 	irq++;
3670 	rid++;
3671 
3672 	for_each_port(sc, p) {
3673 		pi = sc->port[p];
3674 		for_each_vi(pi, v, vi) {
3675 			vi->first_intr = rid - 1;
3676 #ifdef DEV_NETMAP
3677 			if (vi->flags & VI_NETMAP) {
3678 				for_each_nm_rxq(vi, q, nm_rxq) {
3679 					snprintf(s, sizeof(s), "%d-%d", p, q);
3680 					rc = t4_alloc_irq(sc, irq, rid,
3681 					    t4_nm_intr, nm_rxq, s);
3682 					if (rc != 0)
3683 						return (rc);
3684 					irq++;
3685 					rid++;
3686 					vi->nintr++;
3687 				}
3688 				continue;
3689 			}
3690 #endif
3691 			if (vi->flags & INTR_RXQ) {
3692 				for_each_rxq(vi, q, rxq) {
3693 					if (v == 0)
3694 						snprintf(s, sizeof(s), "%d.%d",
3695 						    p, q);
3696 					else
3697 						snprintf(s, sizeof(s),
3698 						    "%d(%d).%d", p, v, q);
3699 					rc = t4_alloc_irq(sc, irq, rid,
3700 					    t4_intr, rxq, s);
3701 					if (rc != 0)
3702 						return (rc);
3703 					irq++;
3704 					rid++;
3705 					vi->nintr++;
3706 				}
3707 			}
3708 #ifdef TCP_OFFLOAD
3709 			if (vi->flags & INTR_OFLD_RXQ) {
3710 				for_each_ofld_rxq(vi, q, ofld_rxq) {
3711 					snprintf(s, sizeof(s), "%d,%d", p, q);
3712 					rc = t4_alloc_irq(sc, irq, rid,
3713 					    t4_intr, ofld_rxq, s);
3714 					if (rc != 0)
3715 						return (rc);
3716 					irq++;
3717 					rid++;
3718 					vi->nintr++;
3719 				}
3720 			}
3721 #endif
3722 		}
3723 	}
3724 	MPASS(irq == &sc->irq[sc->intr_count]);
3725 
3726 	return (0);
3727 }
3728 
3729 int
3730 adapter_full_init(struct adapter *sc)
3731 {
3732 	int rc, i;
3733 
3734 	ASSERT_SYNCHRONIZED_OP(sc);
3735 	ADAPTER_LOCK_ASSERT_NOTOWNED(sc);
3736 	KASSERT((sc->flags & FULL_INIT_DONE) == 0,
3737 	    ("%s: FULL_INIT_DONE already", __func__));
3738 
3739 	/*
3740 	 * queues that belong to the adapter (not any particular port).
3741 	 */
3742 	rc = t4_setup_adapter_queues(sc);
3743 	if (rc != 0)
3744 		goto done;
3745 
3746 	for (i = 0; i < nitems(sc->tq); i++) {
3747 		sc->tq[i] = taskqueue_create("t4 taskq", M_NOWAIT,
3748 		    taskqueue_thread_enqueue, &sc->tq[i]);
3749 		if (sc->tq[i] == NULL) {
3750 			device_printf(sc->dev,
3751 			    "failed to allocate task queue %d\n", i);
3752 			rc = ENOMEM;
3753 			goto done;
3754 		}
3755 		taskqueue_start_threads(&sc->tq[i], 1, PI_NET, "%s tq%d",
3756 		    device_get_nameunit(sc->dev), i);
3757 	}
3758 
3759 	t4_intr_enable(sc);
3760 	sc->flags |= FULL_INIT_DONE;
3761 done:
3762 	if (rc != 0)
3763 		adapter_full_uninit(sc);
3764 
3765 	return (rc);
3766 }
3767 
3768 int
3769 adapter_full_uninit(struct adapter *sc)
3770 {
3771 	int i;
3772 
3773 	ADAPTER_LOCK_ASSERT_NOTOWNED(sc);
3774 
3775 	t4_teardown_adapter_queues(sc);
3776 
3777 	for (i = 0; i < nitems(sc->tq) && sc->tq[i]; i++) {
3778 		taskqueue_free(sc->tq[i]);
3779 		sc->tq[i] = NULL;
3780 	}
3781 
3782 	sc->flags &= ~FULL_INIT_DONE;
3783 
3784 	return (0);
3785 }
3786 
3787 #ifdef RSS
3788 #define SUPPORTED_RSS_HASHTYPES (RSS_HASHTYPE_RSS_IPV4 | \
3789     RSS_HASHTYPE_RSS_TCP_IPV4 | RSS_HASHTYPE_RSS_IPV6 | \
3790     RSS_HASHTYPE_RSS_TCP_IPV6 | RSS_HASHTYPE_RSS_UDP_IPV4 | \
3791     RSS_HASHTYPE_RSS_UDP_IPV6)
3792 
3793 /* Translates kernel hash types to hardware. */
3794 static int
3795 hashconfig_to_hashen(int hashconfig)
3796 {
3797 	int hashen = 0;
3798 
3799 	if (hashconfig & RSS_HASHTYPE_RSS_IPV4)
3800 		hashen |= F_FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN;
3801 	if (hashconfig & RSS_HASHTYPE_RSS_IPV6)
3802 		hashen |= F_FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN;
3803 	if (hashconfig & RSS_HASHTYPE_RSS_UDP_IPV4) {
3804 		hashen |= F_FW_RSS_VI_CONFIG_CMD_UDPEN |
3805 		    F_FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN;
3806 	}
3807 	if (hashconfig & RSS_HASHTYPE_RSS_UDP_IPV6) {
3808 		hashen |= F_FW_RSS_VI_CONFIG_CMD_UDPEN |
3809 		    F_FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN;
3810 	}
3811 	if (hashconfig & RSS_HASHTYPE_RSS_TCP_IPV4)
3812 		hashen |= F_FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN;
3813 	if (hashconfig & RSS_HASHTYPE_RSS_TCP_IPV6)
3814 		hashen |= F_FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN;
3815 
3816 	return (hashen);
3817 }
3818 
3819 /* Translates hardware hash types to kernel. */
3820 static int
3821 hashen_to_hashconfig(int hashen)
3822 {
3823 	int hashconfig = 0;
3824 
3825 	if (hashen & F_FW_RSS_VI_CONFIG_CMD_UDPEN) {
3826 		/*
3827 		 * If UDP hashing was enabled it must have been enabled for
3828 		 * either IPv4 or IPv6 (inclusive or).  Enabling UDP without
3829 		 * enabling any 4-tuple hash is nonsense configuration.
3830 		 */
3831 		MPASS(hashen & (F_FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN |
3832 		    F_FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN));
3833 
3834 		if (hashen & F_FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN)
3835 			hashconfig |= RSS_HASHTYPE_RSS_UDP_IPV4;
3836 		if (hashen & F_FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN)
3837 			hashconfig |= RSS_HASHTYPE_RSS_UDP_IPV6;
3838 	}
3839 	if (hashen & F_FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN)
3840 		hashconfig |= RSS_HASHTYPE_RSS_TCP_IPV4;
3841 	if (hashen & F_FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN)
3842 		hashconfig |= RSS_HASHTYPE_RSS_TCP_IPV6;
3843 	if (hashen & F_FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN)
3844 		hashconfig |= RSS_HASHTYPE_RSS_IPV4;
3845 	if (hashen & F_FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN)
3846 		hashconfig |= RSS_HASHTYPE_RSS_IPV6;
3847 
3848 	return (hashconfig);
3849 }
3850 #endif
3851 
3852 int
3853 vi_full_init(struct vi_info *vi)
3854 {
3855 	struct adapter *sc = vi->pi->adapter;
3856 	struct ifnet *ifp = vi->ifp;
3857 	uint16_t *rss;
3858 	struct sge_rxq *rxq;
3859 	int rc, i, j, hashen;
3860 #ifdef RSS
3861 	int nbuckets = rss_getnumbuckets();
3862 	int hashconfig = rss_gethashconfig();
3863 	int extra;
3864 	uint32_t raw_rss_key[RSS_KEYSIZE / sizeof(uint32_t)];
3865 	uint32_t rss_key[RSS_KEYSIZE / sizeof(uint32_t)];
3866 #endif
3867 
3868 	ASSERT_SYNCHRONIZED_OP(sc);
3869 	KASSERT((vi->flags & VI_INIT_DONE) == 0,
3870 	    ("%s: VI_INIT_DONE already", __func__));
3871 
3872 	sysctl_ctx_init(&vi->ctx);
3873 	vi->flags |= VI_SYSCTL_CTX;
3874 
3875 	/*
3876 	 * Allocate tx/rx/fl queues for this VI.
3877 	 */
3878 	rc = t4_setup_vi_queues(vi);
3879 	if (rc != 0)
3880 		goto done;	/* error message displayed already */
3881 
3882 #ifdef DEV_NETMAP
3883 	/* Netmap VIs configure RSS when netmap is enabled. */
3884 	if (vi->flags & VI_NETMAP) {
3885 		vi->flags |= VI_INIT_DONE;
3886 		return (0);
3887 	}
3888 #endif
3889 
3890 	/*
3891 	 * Setup RSS for this VI.  Save a copy of the RSS table for later use.
3892 	 */
3893 	if (vi->nrxq > vi->rss_size) {
3894 		if_printf(ifp, "nrxq (%d) > hw RSS table size (%d); "
3895 		    "some queues will never receive traffic.\n", vi->nrxq,
3896 		    vi->rss_size);
3897 	} else if (vi->rss_size % vi->nrxq) {
3898 		if_printf(ifp, "nrxq (%d), hw RSS table size (%d); "
3899 		    "expect uneven traffic distribution.\n", vi->nrxq,
3900 		    vi->rss_size);
3901 	}
3902 #ifdef RSS
3903 	MPASS(RSS_KEYSIZE == 40);
3904 	if (vi->nrxq != nbuckets) {
3905 		if_printf(ifp, "nrxq (%d) != kernel RSS buckets (%d);"
3906 		    "performance will be impacted.\n", vi->nrxq, nbuckets);
3907 	}
3908 
3909 	rss_getkey((void *)&raw_rss_key[0]);
3910 	for (i = 0; i < nitems(rss_key); i++) {
3911 		rss_key[i] = htobe32(raw_rss_key[nitems(rss_key) - 1 - i]);
3912 	}
3913 	t4_write_rss_key(sc, (void *)&rss_key[0], -1);
3914 #endif
3915 	rss = malloc(vi->rss_size * sizeof (*rss), M_CXGBE, M_ZERO | M_WAITOK);
3916 	for (i = 0; i < vi->rss_size;) {
3917 #ifdef RSS
3918 		j = rss_get_indirection_to_bucket(i);
3919 		j %= pi->vrxq;
3920 		rxq = &sc->sge.rxq[vi->first_rxq + j];
3921 		rss[i++] = rxq->iq.abs_id;
3922 #else
3923 		for_each_rxq(vi, j, rxq) {
3924 			rss[i++] = rxq->iq.abs_id;
3925 			if (i == vi->rss_size)
3926 				break;
3927 		}
3928 #endif
3929 	}
3930 
3931 	rc = -t4_config_rss_range(sc, sc->mbox, vi->viid, 0, vi->rss_size, rss,
3932 	    vi->rss_size);
3933 	if (rc != 0) {
3934 		if_printf(ifp, "rss_config failed: %d\n", rc);
3935 		goto done;
3936 	}
3937 
3938 #ifdef RSS
3939 	hashen = hashconfig_to_hashen(hashconfig);
3940 
3941 	/*
3942 	 * We may have had to enable some hashes even though the global config
3943 	 * wants them disabled.  This is a potential problem that must be
3944 	 * reported to the user.
3945 	 */
3946 	extra = hashen_to_hashconfig(hashen) ^ hashconfig;
3947 
3948 	/*
3949 	 * If we consider only the supported hash types, then the enabled hashes
3950 	 * are a superset of the requested hashes.  In other words, there cannot
3951 	 * be any supported hash that was requested but not enabled, but there
3952 	 * can be hashes that were not requested but had to be enabled.
3953 	 */
3954 	extra &= SUPPORTED_RSS_HASHTYPES;
3955 	MPASS((extra & hashconfig) == 0);
3956 
3957 	if (extra) {
3958 		if_printf(ifp,
3959 		    "global RSS config (0x%x) cannot be accomodated.\n",
3960 		    hashconfig);
3961 	}
3962 	if (extra & RSS_HASHTYPE_RSS_IPV4)
3963 		if_printf(ifp, "IPv4 2-tuple hashing forced on.\n");
3964 	if (extra & RSS_HASHTYPE_RSS_TCP_IPV4)
3965 		if_printf(ifp, "TCP/IPv4 4-tuple hashing forced on.\n");
3966 	if (extra & RSS_HASHTYPE_RSS_IPV6)
3967 		if_printf(ifp, "IPv6 2-tuple hashing forced on.\n");
3968 	if (extra & RSS_HASHTYPE_RSS_TCP_IPV6)
3969 		if_printf(ifp, "TCP/IPv6 4-tuple hashing forced on.\n");
3970 	if (extra & RSS_HASHTYPE_RSS_UDP_IPV4)
3971 		if_printf(ifp, "UDP/IPv4 4-tuple hashing forced on.\n");
3972 	if (extra & RSS_HASHTYPE_RSS_UDP_IPV6)
3973 		if_printf(ifp, "UDP/IPv6 4-tuple hashing forced on.\n");
3974 #else
3975 	hashen = F_FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN |
3976 	    F_FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN |
3977 	    F_FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN |
3978 	    F_FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN | F_FW_RSS_VI_CONFIG_CMD_UDPEN;
3979 #endif
3980 	rc = -t4_config_vi_rss(sc, sc->mbox, vi->viid, hashen, rss[0]);
3981 	if (rc != 0) {
3982 		if_printf(ifp, "rss hash/defaultq config failed: %d\n", rc);
3983 		goto done;
3984 	}
3985 
3986 	vi->rss = rss;
3987 	vi->flags |= VI_INIT_DONE;
3988 done:
3989 	if (rc != 0)
3990 		vi_full_uninit(vi);
3991 
3992 	return (rc);
3993 }
3994 
3995 /*
3996  * Idempotent.
3997  */
3998 int
3999 vi_full_uninit(struct vi_info *vi)
4000 {
4001 	struct port_info *pi = vi->pi;
4002 	struct adapter *sc = pi->adapter;
4003 	int i;
4004 	struct sge_rxq *rxq;
4005 	struct sge_txq *txq;
4006 #ifdef TCP_OFFLOAD
4007 	struct sge_ofld_rxq *ofld_rxq;
4008 	struct sge_wrq *ofld_txq;
4009 #endif
4010 
4011 	if (vi->flags & VI_INIT_DONE) {
4012 
4013 		/* Need to quiesce queues.  */
4014 #ifdef DEV_NETMAP
4015 		if (vi->flags & VI_NETMAP)
4016 			goto skip;
4017 #endif
4018 
4019 		/* XXX: Only for the first VI? */
4020 		if (IS_MAIN_VI(vi))
4021 			quiesce_wrq(sc, &sc->sge.ctrlq[pi->port_id]);
4022 
4023 		for_each_txq(vi, i, txq) {
4024 			quiesce_txq(sc, txq);
4025 		}
4026 
4027 #ifdef TCP_OFFLOAD
4028 		for_each_ofld_txq(vi, i, ofld_txq) {
4029 			quiesce_wrq(sc, ofld_txq);
4030 		}
4031 #endif
4032 
4033 		for_each_rxq(vi, i, rxq) {
4034 			quiesce_iq(sc, &rxq->iq);
4035 			quiesce_fl(sc, &rxq->fl);
4036 		}
4037 
4038 #ifdef TCP_OFFLOAD
4039 		for_each_ofld_rxq(vi, i, ofld_rxq) {
4040 			quiesce_iq(sc, &ofld_rxq->iq);
4041 			quiesce_fl(sc, &ofld_rxq->fl);
4042 		}
4043 #endif
4044 		free(vi->rss, M_CXGBE);
4045 	}
4046 #ifdef DEV_NETMAP
4047 skip:
4048 #endif
4049 
4050 	t4_teardown_vi_queues(vi);
4051 	vi->flags &= ~VI_INIT_DONE;
4052 
4053 	return (0);
4054 }
4055 
4056 static void
4057 quiesce_txq(struct adapter *sc, struct sge_txq *txq)
4058 {
4059 	struct sge_eq *eq = &txq->eq;
4060 	struct sge_qstat *spg = (void *)&eq->desc[eq->sidx];
4061 
4062 	(void) sc;	/* unused */
4063 
4064 #ifdef INVARIANTS
4065 	TXQ_LOCK(txq);
4066 	MPASS((eq->flags & EQ_ENABLED) == 0);
4067 	TXQ_UNLOCK(txq);
4068 #endif
4069 
4070 	/* Wait for the mp_ring to empty. */
4071 	while (!mp_ring_is_idle(txq->r)) {
4072 		mp_ring_check_drainage(txq->r, 0);
4073 		pause("rquiesce", 1);
4074 	}
4075 
4076 	/* Then wait for the hardware to finish. */
4077 	while (spg->cidx != htobe16(eq->pidx))
4078 		pause("equiesce", 1);
4079 
4080 	/* Finally, wait for the driver to reclaim all descriptors. */
4081 	while (eq->cidx != eq->pidx)
4082 		pause("dquiesce", 1);
4083 }
4084 
4085 static void
4086 quiesce_wrq(struct adapter *sc, struct sge_wrq *wrq)
4087 {
4088 
4089 	/* XXXTX */
4090 }
4091 
4092 static void
4093 quiesce_iq(struct adapter *sc, struct sge_iq *iq)
4094 {
4095 	(void) sc;	/* unused */
4096 
4097 	/* Synchronize with the interrupt handler */
4098 	while (!atomic_cmpset_int(&iq->state, IQS_IDLE, IQS_DISABLED))
4099 		pause("iqfree", 1);
4100 }
4101 
4102 static void
4103 quiesce_fl(struct adapter *sc, struct sge_fl *fl)
4104 {
4105 	mtx_lock(&sc->sfl_lock);
4106 	FL_LOCK(fl);
4107 	fl->flags |= FL_DOOMED;
4108 	FL_UNLOCK(fl);
4109 	callout_stop(&sc->sfl_callout);
4110 	mtx_unlock(&sc->sfl_lock);
4111 
4112 	KASSERT((fl->flags & FL_STARVING) == 0,
4113 	    ("%s: still starving", __func__));
4114 }
4115 
4116 static int
4117 t4_alloc_irq(struct adapter *sc, struct irq *irq, int rid,
4118     driver_intr_t *handler, void *arg, char *name)
4119 {
4120 	int rc;
4121 
4122 	irq->rid = rid;
4123 	irq->res = bus_alloc_resource_any(sc->dev, SYS_RES_IRQ, &irq->rid,
4124 	    RF_SHAREABLE | RF_ACTIVE);
4125 	if (irq->res == NULL) {
4126 		device_printf(sc->dev,
4127 		    "failed to allocate IRQ for rid %d, name %s.\n", rid, name);
4128 		return (ENOMEM);
4129 	}
4130 
4131 	rc = bus_setup_intr(sc->dev, irq->res, INTR_MPSAFE | INTR_TYPE_NET,
4132 	    NULL, handler, arg, &irq->tag);
4133 	if (rc != 0) {
4134 		device_printf(sc->dev,
4135 		    "failed to setup interrupt for rid %d, name %s: %d\n",
4136 		    rid, name, rc);
4137 	} else if (name)
4138 		bus_describe_intr(sc->dev, irq->res, irq->tag, name);
4139 
4140 	return (rc);
4141 }
4142 
4143 static int
4144 t4_free_irq(struct adapter *sc, struct irq *irq)
4145 {
4146 	if (irq->tag)
4147 		bus_teardown_intr(sc->dev, irq->res, irq->tag);
4148 	if (irq->res)
4149 		bus_release_resource(sc->dev, SYS_RES_IRQ, irq->rid, irq->res);
4150 
4151 	bzero(irq, sizeof(*irq));
4152 
4153 	return (0);
4154 }
4155 
4156 static void
4157 reg_block_dump(struct adapter *sc, uint8_t *buf, unsigned int start,
4158     unsigned int end)
4159 {
4160 	uint32_t *p = (uint32_t *)(buf + start);
4161 
4162 	for ( ; start <= end; start += sizeof(uint32_t))
4163 		*p++ = t4_read_reg(sc, start);
4164 }
4165 
4166 static void
4167 t4_get_regs(struct adapter *sc, struct t4_regdump *regs, uint8_t *buf)
4168 {
4169 	int i, n;
4170 	const unsigned int *reg_ranges;
4171 	static const unsigned int t4_reg_ranges[] = {
4172 		0x1008, 0x1108,
4173 		0x1180, 0x11b4,
4174 		0x11fc, 0x123c,
4175 		0x1300, 0x173c,
4176 		0x1800, 0x18fc,
4177 		0x3000, 0x30d8,
4178 		0x30e0, 0x5924,
4179 		0x5960, 0x59d4,
4180 		0x5a00, 0x5af8,
4181 		0x6000, 0x6098,
4182 		0x6100, 0x6150,
4183 		0x6200, 0x6208,
4184 		0x6240, 0x6248,
4185 		0x6280, 0x6338,
4186 		0x6370, 0x638c,
4187 		0x6400, 0x643c,
4188 		0x6500, 0x6524,
4189 		0x6a00, 0x6a38,
4190 		0x6a60, 0x6a78,
4191 		0x6b00, 0x6b84,
4192 		0x6bf0, 0x6c84,
4193 		0x6cf0, 0x6d84,
4194 		0x6df0, 0x6e84,
4195 		0x6ef0, 0x6f84,
4196 		0x6ff0, 0x7084,
4197 		0x70f0, 0x7184,
4198 		0x71f0, 0x7284,
4199 		0x72f0, 0x7384,
4200 		0x73f0, 0x7450,
4201 		0x7500, 0x7530,
4202 		0x7600, 0x761c,
4203 		0x7680, 0x76cc,
4204 		0x7700, 0x7798,
4205 		0x77c0, 0x77fc,
4206 		0x7900, 0x79fc,
4207 		0x7b00, 0x7c38,
4208 		0x7d00, 0x7efc,
4209 		0x8dc0, 0x8e1c,
4210 		0x8e30, 0x8e78,
4211 		0x8ea0, 0x8f6c,
4212 		0x8fc0, 0x9074,
4213 		0x90fc, 0x90fc,
4214 		0x9400, 0x9458,
4215 		0x9600, 0x96bc,
4216 		0x9800, 0x9808,
4217 		0x9820, 0x983c,
4218 		0x9850, 0x9864,
4219 		0x9c00, 0x9c6c,
4220 		0x9c80, 0x9cec,
4221 		0x9d00, 0x9d6c,
4222 		0x9d80, 0x9dec,
4223 		0x9e00, 0x9e6c,
4224 		0x9e80, 0x9eec,
4225 		0x9f00, 0x9f6c,
4226 		0x9f80, 0x9fec,
4227 		0xd004, 0xd03c,
4228 		0xdfc0, 0xdfe0,
4229 		0xe000, 0xea7c,
4230 		0xf000, 0x11110,
4231 		0x11118, 0x11190,
4232 		0x19040, 0x1906c,
4233 		0x19078, 0x19080,
4234 		0x1908c, 0x19124,
4235 		0x19150, 0x191b0,
4236 		0x191d0, 0x191e8,
4237 		0x19238, 0x1924c,
4238 		0x193f8, 0x19474,
4239 		0x19490, 0x194f8,
4240 		0x19800, 0x19f30,
4241 		0x1a000, 0x1a06c,
4242 		0x1a0b0, 0x1a120,
4243 		0x1a128, 0x1a138,
4244 		0x1a190, 0x1a1c4,
4245 		0x1a1fc, 0x1a1fc,
4246 		0x1e040, 0x1e04c,
4247 		0x1e284, 0x1e28c,
4248 		0x1e2c0, 0x1e2c0,
4249 		0x1e2e0, 0x1e2e0,
4250 		0x1e300, 0x1e384,
4251 		0x1e3c0, 0x1e3c8,
4252 		0x1e440, 0x1e44c,
4253 		0x1e684, 0x1e68c,
4254 		0x1e6c0, 0x1e6c0,
4255 		0x1e6e0, 0x1e6e0,
4256 		0x1e700, 0x1e784,
4257 		0x1e7c0, 0x1e7c8,
4258 		0x1e840, 0x1e84c,
4259 		0x1ea84, 0x1ea8c,
4260 		0x1eac0, 0x1eac0,
4261 		0x1eae0, 0x1eae0,
4262 		0x1eb00, 0x1eb84,
4263 		0x1ebc0, 0x1ebc8,
4264 		0x1ec40, 0x1ec4c,
4265 		0x1ee84, 0x1ee8c,
4266 		0x1eec0, 0x1eec0,
4267 		0x1eee0, 0x1eee0,
4268 		0x1ef00, 0x1ef84,
4269 		0x1efc0, 0x1efc8,
4270 		0x1f040, 0x1f04c,
4271 		0x1f284, 0x1f28c,
4272 		0x1f2c0, 0x1f2c0,
4273 		0x1f2e0, 0x1f2e0,
4274 		0x1f300, 0x1f384,
4275 		0x1f3c0, 0x1f3c8,
4276 		0x1f440, 0x1f44c,
4277 		0x1f684, 0x1f68c,
4278 		0x1f6c0, 0x1f6c0,
4279 		0x1f6e0, 0x1f6e0,
4280 		0x1f700, 0x1f784,
4281 		0x1f7c0, 0x1f7c8,
4282 		0x1f840, 0x1f84c,
4283 		0x1fa84, 0x1fa8c,
4284 		0x1fac0, 0x1fac0,
4285 		0x1fae0, 0x1fae0,
4286 		0x1fb00, 0x1fb84,
4287 		0x1fbc0, 0x1fbc8,
4288 		0x1fc40, 0x1fc4c,
4289 		0x1fe84, 0x1fe8c,
4290 		0x1fec0, 0x1fec0,
4291 		0x1fee0, 0x1fee0,
4292 		0x1ff00, 0x1ff84,
4293 		0x1ffc0, 0x1ffc8,
4294 		0x20000, 0x2002c,
4295 		0x20100, 0x2013c,
4296 		0x20190, 0x201c8,
4297 		0x20200, 0x20318,
4298 		0x20400, 0x20528,
4299 		0x20540, 0x20614,
4300 		0x21000, 0x21040,
4301 		0x2104c, 0x21060,
4302 		0x210c0, 0x210ec,
4303 		0x21200, 0x21268,
4304 		0x21270, 0x21284,
4305 		0x212fc, 0x21388,
4306 		0x21400, 0x21404,
4307 		0x21500, 0x21518,
4308 		0x2152c, 0x2153c,
4309 		0x21550, 0x21554,
4310 		0x21600, 0x21600,
4311 		0x21608, 0x21628,
4312 		0x21630, 0x2163c,
4313 		0x21700, 0x2171c,
4314 		0x21780, 0x2178c,
4315 		0x21800, 0x21c38,
4316 		0x21c80, 0x21d7c,
4317 		0x21e00, 0x21e04,
4318 		0x22000, 0x2202c,
4319 		0x22100, 0x2213c,
4320 		0x22190, 0x221c8,
4321 		0x22200, 0x22318,
4322 		0x22400, 0x22528,
4323 		0x22540, 0x22614,
4324 		0x23000, 0x23040,
4325 		0x2304c, 0x23060,
4326 		0x230c0, 0x230ec,
4327 		0x23200, 0x23268,
4328 		0x23270, 0x23284,
4329 		0x232fc, 0x23388,
4330 		0x23400, 0x23404,
4331 		0x23500, 0x23518,
4332 		0x2352c, 0x2353c,
4333 		0x23550, 0x23554,
4334 		0x23600, 0x23600,
4335 		0x23608, 0x23628,
4336 		0x23630, 0x2363c,
4337 		0x23700, 0x2371c,
4338 		0x23780, 0x2378c,
4339 		0x23800, 0x23c38,
4340 		0x23c80, 0x23d7c,
4341 		0x23e00, 0x23e04,
4342 		0x24000, 0x2402c,
4343 		0x24100, 0x2413c,
4344 		0x24190, 0x241c8,
4345 		0x24200, 0x24318,
4346 		0x24400, 0x24528,
4347 		0x24540, 0x24614,
4348 		0x25000, 0x25040,
4349 		0x2504c, 0x25060,
4350 		0x250c0, 0x250ec,
4351 		0x25200, 0x25268,
4352 		0x25270, 0x25284,
4353 		0x252fc, 0x25388,
4354 		0x25400, 0x25404,
4355 		0x25500, 0x25518,
4356 		0x2552c, 0x2553c,
4357 		0x25550, 0x25554,
4358 		0x25600, 0x25600,
4359 		0x25608, 0x25628,
4360 		0x25630, 0x2563c,
4361 		0x25700, 0x2571c,
4362 		0x25780, 0x2578c,
4363 		0x25800, 0x25c38,
4364 		0x25c80, 0x25d7c,
4365 		0x25e00, 0x25e04,
4366 		0x26000, 0x2602c,
4367 		0x26100, 0x2613c,
4368 		0x26190, 0x261c8,
4369 		0x26200, 0x26318,
4370 		0x26400, 0x26528,
4371 		0x26540, 0x26614,
4372 		0x27000, 0x27040,
4373 		0x2704c, 0x27060,
4374 		0x270c0, 0x270ec,
4375 		0x27200, 0x27268,
4376 		0x27270, 0x27284,
4377 		0x272fc, 0x27388,
4378 		0x27400, 0x27404,
4379 		0x27500, 0x27518,
4380 		0x2752c, 0x2753c,
4381 		0x27550, 0x27554,
4382 		0x27600, 0x27600,
4383 		0x27608, 0x27628,
4384 		0x27630, 0x2763c,
4385 		0x27700, 0x2771c,
4386 		0x27780, 0x2778c,
4387 		0x27800, 0x27c38,
4388 		0x27c80, 0x27d7c,
4389 		0x27e00, 0x27e04
4390 	};
4391 	static const unsigned int t5_reg_ranges[] = {
4392 		0x1008, 0x1148,
4393 		0x1180, 0x11b4,
4394 		0x11fc, 0x123c,
4395 		0x1280, 0x173c,
4396 		0x1800, 0x18fc,
4397 		0x3000, 0x3028,
4398 		0x3060, 0x30d8,
4399 		0x30e0, 0x30fc,
4400 		0x3140, 0x357c,
4401 		0x35a8, 0x35cc,
4402 		0x35ec, 0x35ec,
4403 		0x3600, 0x5624,
4404 		0x56cc, 0x575c,
4405 		0x580c, 0x5814,
4406 		0x5890, 0x58bc,
4407 		0x5940, 0x59dc,
4408 		0x59fc, 0x5a18,
4409 		0x5a60, 0x5a9c,
4410 		0x5b94, 0x5bfc,
4411 		0x6000, 0x6040,
4412 		0x6058, 0x614c,
4413 		0x7700, 0x7798,
4414 		0x77c0, 0x78fc,
4415 		0x7b00, 0x7c54,
4416 		0x7d00, 0x7efc,
4417 		0x8dc0, 0x8de0,
4418 		0x8df8, 0x8e84,
4419 		0x8ea0, 0x8f84,
4420 		0x8fc0, 0x90f8,
4421 		0x9400, 0x9470,
4422 		0x9600, 0x96f4,
4423 		0x9800, 0x9808,
4424 		0x9820, 0x983c,
4425 		0x9850, 0x9864,
4426 		0x9c00, 0x9c6c,
4427 		0x9c80, 0x9cec,
4428 		0x9d00, 0x9d6c,
4429 		0x9d80, 0x9dec,
4430 		0x9e00, 0x9e6c,
4431 		0x9e80, 0x9eec,
4432 		0x9f00, 0x9f6c,
4433 		0x9f80, 0xa020,
4434 		0xd004, 0xd03c,
4435 		0xdfc0, 0xdfe0,
4436 		0xe000, 0x11088,
4437 		0x1109c, 0x11110,
4438 		0x11118, 0x1117c,
4439 		0x11190, 0x11204,
4440 		0x19040, 0x1906c,
4441 		0x19078, 0x19080,
4442 		0x1908c, 0x19124,
4443 		0x19150, 0x191b0,
4444 		0x191d0, 0x191e8,
4445 		0x19238, 0x19290,
4446 		0x193f8, 0x19474,
4447 		0x19490, 0x194cc,
4448 		0x194f0, 0x194f8,
4449 		0x19c00, 0x19c60,
4450 		0x19c94, 0x19e10,
4451 		0x19e50, 0x19f34,
4452 		0x19f40, 0x19f50,
4453 		0x19f90, 0x19fe4,
4454 		0x1a000, 0x1a06c,
4455 		0x1a0b0, 0x1a120,
4456 		0x1a128, 0x1a138,
4457 		0x1a190, 0x1a1c4,
4458 		0x1a1fc, 0x1a1fc,
4459 		0x1e008, 0x1e00c,
4460 		0x1e040, 0x1e04c,
4461 		0x1e284, 0x1e290,
4462 		0x1e2c0, 0x1e2c0,
4463 		0x1e2e0, 0x1e2e0,
4464 		0x1e300, 0x1e384,
4465 		0x1e3c0, 0x1e3c8,
4466 		0x1e408, 0x1e40c,
4467 		0x1e440, 0x1e44c,
4468 		0x1e684, 0x1e690,
4469 		0x1e6c0, 0x1e6c0,
4470 		0x1e6e0, 0x1e6e0,
4471 		0x1e700, 0x1e784,
4472 		0x1e7c0, 0x1e7c8,
4473 		0x1e808, 0x1e80c,
4474 		0x1e840, 0x1e84c,
4475 		0x1ea84, 0x1ea90,
4476 		0x1eac0, 0x1eac0,
4477 		0x1eae0, 0x1eae0,
4478 		0x1eb00, 0x1eb84,
4479 		0x1ebc0, 0x1ebc8,
4480 		0x1ec08, 0x1ec0c,
4481 		0x1ec40, 0x1ec4c,
4482 		0x1ee84, 0x1ee90,
4483 		0x1eec0, 0x1eec0,
4484 		0x1eee0, 0x1eee0,
4485 		0x1ef00, 0x1ef84,
4486 		0x1efc0, 0x1efc8,
4487 		0x1f008, 0x1f00c,
4488 		0x1f040, 0x1f04c,
4489 		0x1f284, 0x1f290,
4490 		0x1f2c0, 0x1f2c0,
4491 		0x1f2e0, 0x1f2e0,
4492 		0x1f300, 0x1f384,
4493 		0x1f3c0, 0x1f3c8,
4494 		0x1f408, 0x1f40c,
4495 		0x1f440, 0x1f44c,
4496 		0x1f684, 0x1f690,
4497 		0x1f6c0, 0x1f6c0,
4498 		0x1f6e0, 0x1f6e0,
4499 		0x1f700, 0x1f784,
4500 		0x1f7c0, 0x1f7c8,
4501 		0x1f808, 0x1f80c,
4502 		0x1f840, 0x1f84c,
4503 		0x1fa84, 0x1fa90,
4504 		0x1fac0, 0x1fac0,
4505 		0x1fae0, 0x1fae0,
4506 		0x1fb00, 0x1fb84,
4507 		0x1fbc0, 0x1fbc8,
4508 		0x1fc08, 0x1fc0c,
4509 		0x1fc40, 0x1fc4c,
4510 		0x1fe84, 0x1fe90,
4511 		0x1fec0, 0x1fec0,
4512 		0x1fee0, 0x1fee0,
4513 		0x1ff00, 0x1ff84,
4514 		0x1ffc0, 0x1ffc8,
4515 		0x30000, 0x30030,
4516 		0x30100, 0x30144,
4517 		0x30190, 0x301d0,
4518 		0x30200, 0x30318,
4519 		0x30400, 0x3052c,
4520 		0x30540, 0x3061c,
4521 		0x30800, 0x30834,
4522 		0x308c0, 0x30908,
4523 		0x30910, 0x309ac,
4524 		0x30a00, 0x30a2c,
4525 		0x30a44, 0x30a50,
4526 		0x30a74, 0x30c24,
4527 		0x30d00, 0x30d00,
4528 		0x30d08, 0x30d14,
4529 		0x30d1c, 0x30d20,
4530 		0x30d3c, 0x30d50,
4531 		0x31200, 0x3120c,
4532 		0x31220, 0x31220,
4533 		0x31240, 0x31240,
4534 		0x31600, 0x3160c,
4535 		0x31a00, 0x31a1c,
4536 		0x31e00, 0x31e20,
4537 		0x31e38, 0x31e3c,
4538 		0x31e80, 0x31e80,
4539 		0x31e88, 0x31ea8,
4540 		0x31eb0, 0x31eb4,
4541 		0x31ec8, 0x31ed4,
4542 		0x31fb8, 0x32004,
4543 		0x32200, 0x32200,
4544 		0x32208, 0x32240,
4545 		0x32248, 0x32280,
4546 		0x32288, 0x322c0,
4547 		0x322c8, 0x322fc,
4548 		0x32600, 0x32630,
4549 		0x32a00, 0x32abc,
4550 		0x32b00, 0x32b70,
4551 		0x33000, 0x33048,
4552 		0x33060, 0x3309c,
4553 		0x330f0, 0x33148,
4554 		0x33160, 0x3319c,
4555 		0x331f0, 0x332e4,
4556 		0x332f8, 0x333e4,
4557 		0x333f8, 0x33448,
4558 		0x33460, 0x3349c,
4559 		0x334f0, 0x33548,
4560 		0x33560, 0x3359c,
4561 		0x335f0, 0x336e4,
4562 		0x336f8, 0x337e4,
4563 		0x337f8, 0x337fc,
4564 		0x33814, 0x33814,
4565 		0x3382c, 0x3382c,
4566 		0x33880, 0x3388c,
4567 		0x338e8, 0x338ec,
4568 		0x33900, 0x33948,
4569 		0x33960, 0x3399c,
4570 		0x339f0, 0x33ae4,
4571 		0x33af8, 0x33b10,
4572 		0x33b28, 0x33b28,
4573 		0x33b3c, 0x33b50,
4574 		0x33bf0, 0x33c10,
4575 		0x33c28, 0x33c28,
4576 		0x33c3c, 0x33c50,
4577 		0x33cf0, 0x33cfc,
4578 		0x34000, 0x34030,
4579 		0x34100, 0x34144,
4580 		0x34190, 0x341d0,
4581 		0x34200, 0x34318,
4582 		0x34400, 0x3452c,
4583 		0x34540, 0x3461c,
4584 		0x34800, 0x34834,
4585 		0x348c0, 0x34908,
4586 		0x34910, 0x349ac,
4587 		0x34a00, 0x34a2c,
4588 		0x34a44, 0x34a50,
4589 		0x34a74, 0x34c24,
4590 		0x34d00, 0x34d00,
4591 		0x34d08, 0x34d14,
4592 		0x34d1c, 0x34d20,
4593 		0x34d3c, 0x34d50,
4594 		0x35200, 0x3520c,
4595 		0x35220, 0x35220,
4596 		0x35240, 0x35240,
4597 		0x35600, 0x3560c,
4598 		0x35a00, 0x35a1c,
4599 		0x35e00, 0x35e20,
4600 		0x35e38, 0x35e3c,
4601 		0x35e80, 0x35e80,
4602 		0x35e88, 0x35ea8,
4603 		0x35eb0, 0x35eb4,
4604 		0x35ec8, 0x35ed4,
4605 		0x35fb8, 0x36004,
4606 		0x36200, 0x36200,
4607 		0x36208, 0x36240,
4608 		0x36248, 0x36280,
4609 		0x36288, 0x362c0,
4610 		0x362c8, 0x362fc,
4611 		0x36600, 0x36630,
4612 		0x36a00, 0x36abc,
4613 		0x36b00, 0x36b70,
4614 		0x37000, 0x37048,
4615 		0x37060, 0x3709c,
4616 		0x370f0, 0x37148,
4617 		0x37160, 0x3719c,
4618 		0x371f0, 0x372e4,
4619 		0x372f8, 0x373e4,
4620 		0x373f8, 0x37448,
4621 		0x37460, 0x3749c,
4622 		0x374f0, 0x37548,
4623 		0x37560, 0x3759c,
4624 		0x375f0, 0x376e4,
4625 		0x376f8, 0x377e4,
4626 		0x377f8, 0x377fc,
4627 		0x37814, 0x37814,
4628 		0x3782c, 0x3782c,
4629 		0x37880, 0x3788c,
4630 		0x378e8, 0x378ec,
4631 		0x37900, 0x37948,
4632 		0x37960, 0x3799c,
4633 		0x379f0, 0x37ae4,
4634 		0x37af8, 0x37b10,
4635 		0x37b28, 0x37b28,
4636 		0x37b3c, 0x37b50,
4637 		0x37bf0, 0x37c10,
4638 		0x37c28, 0x37c28,
4639 		0x37c3c, 0x37c50,
4640 		0x37cf0, 0x37cfc,
4641 		0x38000, 0x38030,
4642 		0x38100, 0x38144,
4643 		0x38190, 0x381d0,
4644 		0x38200, 0x38318,
4645 		0x38400, 0x3852c,
4646 		0x38540, 0x3861c,
4647 		0x38800, 0x38834,
4648 		0x388c0, 0x38908,
4649 		0x38910, 0x389ac,
4650 		0x38a00, 0x38a2c,
4651 		0x38a44, 0x38a50,
4652 		0x38a74, 0x38c24,
4653 		0x38d00, 0x38d00,
4654 		0x38d08, 0x38d14,
4655 		0x38d1c, 0x38d20,
4656 		0x38d3c, 0x38d50,
4657 		0x39200, 0x3920c,
4658 		0x39220, 0x39220,
4659 		0x39240, 0x39240,
4660 		0x39600, 0x3960c,
4661 		0x39a00, 0x39a1c,
4662 		0x39e00, 0x39e20,
4663 		0x39e38, 0x39e3c,
4664 		0x39e80, 0x39e80,
4665 		0x39e88, 0x39ea8,
4666 		0x39eb0, 0x39eb4,
4667 		0x39ec8, 0x39ed4,
4668 		0x39fb8, 0x3a004,
4669 		0x3a200, 0x3a200,
4670 		0x3a208, 0x3a240,
4671 		0x3a248, 0x3a280,
4672 		0x3a288, 0x3a2c0,
4673 		0x3a2c8, 0x3a2fc,
4674 		0x3a600, 0x3a630,
4675 		0x3aa00, 0x3aabc,
4676 		0x3ab00, 0x3ab70,
4677 		0x3b000, 0x3b048,
4678 		0x3b060, 0x3b09c,
4679 		0x3b0f0, 0x3b148,
4680 		0x3b160, 0x3b19c,
4681 		0x3b1f0, 0x3b2e4,
4682 		0x3b2f8, 0x3b3e4,
4683 		0x3b3f8, 0x3b448,
4684 		0x3b460, 0x3b49c,
4685 		0x3b4f0, 0x3b548,
4686 		0x3b560, 0x3b59c,
4687 		0x3b5f0, 0x3b6e4,
4688 		0x3b6f8, 0x3b7e4,
4689 		0x3b7f8, 0x3b7fc,
4690 		0x3b814, 0x3b814,
4691 		0x3b82c, 0x3b82c,
4692 		0x3b880, 0x3b88c,
4693 		0x3b8e8, 0x3b8ec,
4694 		0x3b900, 0x3b948,
4695 		0x3b960, 0x3b99c,
4696 		0x3b9f0, 0x3bae4,
4697 		0x3baf8, 0x3bb10,
4698 		0x3bb28, 0x3bb28,
4699 		0x3bb3c, 0x3bb50,
4700 		0x3bbf0, 0x3bc10,
4701 		0x3bc28, 0x3bc28,
4702 		0x3bc3c, 0x3bc50,
4703 		0x3bcf0, 0x3bcfc,
4704 		0x3c000, 0x3c030,
4705 		0x3c100, 0x3c144,
4706 		0x3c190, 0x3c1d0,
4707 		0x3c200, 0x3c318,
4708 		0x3c400, 0x3c52c,
4709 		0x3c540, 0x3c61c,
4710 		0x3c800, 0x3c834,
4711 		0x3c8c0, 0x3c908,
4712 		0x3c910, 0x3c9ac,
4713 		0x3ca00, 0x3ca2c,
4714 		0x3ca44, 0x3ca50,
4715 		0x3ca74, 0x3cc24,
4716 		0x3cd00, 0x3cd00,
4717 		0x3cd08, 0x3cd14,
4718 		0x3cd1c, 0x3cd20,
4719 		0x3cd3c, 0x3cd50,
4720 		0x3d200, 0x3d20c,
4721 		0x3d220, 0x3d220,
4722 		0x3d240, 0x3d240,
4723 		0x3d600, 0x3d60c,
4724 		0x3da00, 0x3da1c,
4725 		0x3de00, 0x3de20,
4726 		0x3de38, 0x3de3c,
4727 		0x3de80, 0x3de80,
4728 		0x3de88, 0x3dea8,
4729 		0x3deb0, 0x3deb4,
4730 		0x3dec8, 0x3ded4,
4731 		0x3dfb8, 0x3e004,
4732 		0x3e200, 0x3e200,
4733 		0x3e208, 0x3e240,
4734 		0x3e248, 0x3e280,
4735 		0x3e288, 0x3e2c0,
4736 		0x3e2c8, 0x3e2fc,
4737 		0x3e600, 0x3e630,
4738 		0x3ea00, 0x3eabc,
4739 		0x3eb00, 0x3eb70,
4740 		0x3f000, 0x3f048,
4741 		0x3f060, 0x3f09c,
4742 		0x3f0f0, 0x3f148,
4743 		0x3f160, 0x3f19c,
4744 		0x3f1f0, 0x3f2e4,
4745 		0x3f2f8, 0x3f3e4,
4746 		0x3f3f8, 0x3f448,
4747 		0x3f460, 0x3f49c,
4748 		0x3f4f0, 0x3f548,
4749 		0x3f560, 0x3f59c,
4750 		0x3f5f0, 0x3f6e4,
4751 		0x3f6f8, 0x3f7e4,
4752 		0x3f7f8, 0x3f7fc,
4753 		0x3f814, 0x3f814,
4754 		0x3f82c, 0x3f82c,
4755 		0x3f880, 0x3f88c,
4756 		0x3f8e8, 0x3f8ec,
4757 		0x3f900, 0x3f948,
4758 		0x3f960, 0x3f99c,
4759 		0x3f9f0, 0x3fae4,
4760 		0x3faf8, 0x3fb10,
4761 		0x3fb28, 0x3fb28,
4762 		0x3fb3c, 0x3fb50,
4763 		0x3fbf0, 0x3fc10,
4764 		0x3fc28, 0x3fc28,
4765 		0x3fc3c, 0x3fc50,
4766 		0x3fcf0, 0x3fcfc,
4767 		0x40000, 0x4000c,
4768 		0x40040, 0x40068,
4769 		0x4007c, 0x40144,
4770 		0x40180, 0x4018c,
4771 		0x40200, 0x40298,
4772 		0x402ac, 0x4033c,
4773 		0x403f8, 0x403fc,
4774 		0x41304, 0x413c4,
4775 		0x41400, 0x4141c,
4776 		0x41480, 0x414d0,
4777 		0x44000, 0x44078,
4778 		0x440c0, 0x44278,
4779 		0x442c0, 0x44478,
4780 		0x444c0, 0x44678,
4781 		0x446c0, 0x44878,
4782 		0x448c0, 0x449fc,
4783 		0x45000, 0x45068,
4784 		0x45080, 0x45084,
4785 		0x450a0, 0x450b0,
4786 		0x45200, 0x45268,
4787 		0x45280, 0x45284,
4788 		0x452a0, 0x452b0,
4789 		0x460c0, 0x460e4,
4790 		0x47000, 0x4708c,
4791 		0x47200, 0x47250,
4792 		0x47400, 0x47420,
4793 		0x47600, 0x47618,
4794 		0x47800, 0x47814,
4795 		0x48000, 0x4800c,
4796 		0x48040, 0x48068,
4797 		0x4807c, 0x48144,
4798 		0x48180, 0x4818c,
4799 		0x48200, 0x48298,
4800 		0x482ac, 0x4833c,
4801 		0x483f8, 0x483fc,
4802 		0x49304, 0x493c4,
4803 		0x49400, 0x4941c,
4804 		0x49480, 0x494d0,
4805 		0x4c000, 0x4c078,
4806 		0x4c0c0, 0x4c278,
4807 		0x4c2c0, 0x4c478,
4808 		0x4c4c0, 0x4c678,
4809 		0x4c6c0, 0x4c878,
4810 		0x4c8c0, 0x4c9fc,
4811 		0x4d000, 0x4d068,
4812 		0x4d080, 0x4d084,
4813 		0x4d0a0, 0x4d0b0,
4814 		0x4d200, 0x4d268,
4815 		0x4d280, 0x4d284,
4816 		0x4d2a0, 0x4d2b0,
4817 		0x4e0c0, 0x4e0e4,
4818 		0x4f000, 0x4f08c,
4819 		0x4f200, 0x4f250,
4820 		0x4f400, 0x4f420,
4821 		0x4f600, 0x4f618,
4822 		0x4f800, 0x4f814,
4823 		0x50000, 0x500cc,
4824 		0x50400, 0x50400,
4825 		0x50800, 0x508cc,
4826 		0x50c00, 0x50c00,
4827 		0x51000, 0x5101c,
4828 		0x51300, 0x51308,
4829 	};
4830 
4831 	if (is_t4(sc)) {
4832 		reg_ranges = &t4_reg_ranges[0];
4833 		n = nitems(t4_reg_ranges);
4834 	} else {
4835 		reg_ranges = &t5_reg_ranges[0];
4836 		n = nitems(t5_reg_ranges);
4837 	}
4838 
4839 	regs->version = chip_id(sc) | chip_rev(sc) << 10;
4840 	for (i = 0; i < n; i += 2)
4841 		reg_block_dump(sc, buf, reg_ranges[i], reg_ranges[i + 1]);
4842 }
4843 
4844 #define	A_PL_INDIR_CMD	0x1f8
4845 
4846 #define	S_PL_AUTOINC	31
4847 #define	M_PL_AUTOINC	0x1U
4848 #define	V_PL_AUTOINC(x)	((x) << S_PL_AUTOINC)
4849 #define	G_PL_AUTOINC(x)	(((x) >> S_PL_AUTOINC) & M_PL_AUTOINC)
4850 
4851 #define	S_PL_VFID	20
4852 #define	M_PL_VFID	0xffU
4853 #define	V_PL_VFID(x)	((x) << S_PL_VFID)
4854 #define	G_PL_VFID(x)	(((x) >> S_PL_VFID) & M_PL_VFID)
4855 
4856 #define	S_PL_ADDR	0
4857 #define	M_PL_ADDR	0xfffffU
4858 #define	V_PL_ADDR(x)	((x) << S_PL_ADDR)
4859 #define	G_PL_ADDR(x)	(((x) >> S_PL_ADDR) & M_PL_ADDR)
4860 
4861 #define	A_PL_INDIR_DATA	0x1fc
4862 
4863 static uint64_t
4864 read_vf_stat(struct adapter *sc, unsigned int viid, int reg)
4865 {
4866 	u32 stats[2];
4867 
4868 	mtx_assert(&sc->regwin_lock, MA_OWNED);
4869 	t4_write_reg(sc, A_PL_INDIR_CMD, V_PL_AUTOINC(1) |
4870 	    V_PL_VFID(G_FW_VIID_VIN(viid)) | V_PL_ADDR(VF_MPS_REG(reg)));
4871 	stats[0] = t4_read_reg(sc, A_PL_INDIR_DATA);
4872 	stats[1] = t4_read_reg(sc, A_PL_INDIR_DATA);
4873 	return (((uint64_t)stats[1]) << 32 | stats[0]);
4874 }
4875 
4876 static void
4877 t4_get_vi_stats(struct adapter *sc, unsigned int viid,
4878     struct fw_vi_stats_vf *stats)
4879 {
4880 
4881 #define GET_STAT(name) \
4882 	read_vf_stat(sc, viid, A_MPS_VF_STAT_##name##_L)
4883 
4884 	stats->tx_bcast_bytes    = GET_STAT(TX_VF_BCAST_BYTES);
4885 	stats->tx_bcast_frames   = GET_STAT(TX_VF_BCAST_FRAMES);
4886 	stats->tx_mcast_bytes    = GET_STAT(TX_VF_MCAST_BYTES);
4887 	stats->tx_mcast_frames   = GET_STAT(TX_VF_MCAST_FRAMES);
4888 	stats->tx_ucast_bytes    = GET_STAT(TX_VF_UCAST_BYTES);
4889 	stats->tx_ucast_frames   = GET_STAT(TX_VF_UCAST_FRAMES);
4890 	stats->tx_drop_frames    = GET_STAT(TX_VF_DROP_FRAMES);
4891 	stats->tx_offload_bytes  = GET_STAT(TX_VF_OFFLOAD_BYTES);
4892 	stats->tx_offload_frames = GET_STAT(TX_VF_OFFLOAD_FRAMES);
4893 	stats->rx_bcast_bytes    = GET_STAT(RX_VF_BCAST_BYTES);
4894 	stats->rx_bcast_frames   = GET_STAT(RX_VF_BCAST_FRAMES);
4895 	stats->rx_mcast_bytes    = GET_STAT(RX_VF_MCAST_BYTES);
4896 	stats->rx_mcast_frames   = GET_STAT(RX_VF_MCAST_FRAMES);
4897 	stats->rx_ucast_bytes    = GET_STAT(RX_VF_UCAST_BYTES);
4898 	stats->rx_ucast_frames   = GET_STAT(RX_VF_UCAST_FRAMES);
4899 	stats->rx_err_frames     = GET_STAT(RX_VF_ERR_FRAMES);
4900 
4901 #undef GET_STAT
4902 }
4903 
4904 static void
4905 t4_clr_vi_stats(struct adapter *sc, unsigned int viid)
4906 {
4907 	int reg;
4908 
4909 	t4_write_reg(sc, A_PL_INDIR_CMD, V_PL_AUTOINC(1) |
4910 	    V_PL_VFID(G_FW_VIID_VIN(viid)) |
4911 	    V_PL_ADDR(VF_MPS_REG(A_MPS_VF_STAT_TX_VF_BCAST_BYTES_L)));
4912 	for (reg = A_MPS_VF_STAT_TX_VF_BCAST_BYTES_L;
4913 	     reg <= A_MPS_VF_STAT_RX_VF_ERR_FRAMES_H; reg += 4)
4914 		t4_write_reg(sc, A_PL_INDIR_DATA, 0);
4915 }
4916 
4917 static void
4918 vi_refresh_stats(struct adapter *sc, struct vi_info *vi)
4919 {
4920 	struct timeval tv;
4921 	const struct timeval interval = {0, 250000};	/* 250ms */
4922 
4923 	if (!(vi->flags & VI_INIT_DONE))
4924 		return;
4925 
4926 	getmicrotime(&tv);
4927 	timevalsub(&tv, &interval);
4928 	if (timevalcmp(&tv, &vi->last_refreshed, <))
4929 		return;
4930 
4931 	mtx_lock(&sc->regwin_lock);
4932 	t4_get_vi_stats(sc, vi->viid, &vi->stats);
4933 	getmicrotime(&vi->last_refreshed);
4934 	mtx_unlock(&sc->regwin_lock);
4935 }
4936 
4937 static void
4938 cxgbe_refresh_stats(struct adapter *sc, struct port_info *pi)
4939 {
4940 	int i;
4941 	u_int v, tnl_cong_drops;
4942 	struct timeval tv;
4943 	const struct timeval interval = {0, 250000};	/* 250ms */
4944 
4945 	getmicrotime(&tv);
4946 	timevalsub(&tv, &interval);
4947 	if (timevalcmp(&tv, &pi->last_refreshed, <))
4948 		return;
4949 
4950 	tnl_cong_drops = 0;
4951 	t4_get_port_stats(sc, pi->tx_chan, &pi->stats);
4952 	for (i = 0; i < NCHAN; i++) {
4953 		if (pi->rx_chan_map & (1 << i)) {
4954 			mtx_lock(&sc->regwin_lock);
4955 			t4_read_indirect(sc, A_TP_MIB_INDEX, A_TP_MIB_DATA, &v,
4956 			    1, A_TP_MIB_TNL_CNG_DROP_0 + i);
4957 			mtx_unlock(&sc->regwin_lock);
4958 			tnl_cong_drops += v;
4959 		}
4960 	}
4961 	pi->tnl_cong_drops = tnl_cong_drops;
4962 	getmicrotime(&pi->last_refreshed);
4963 }
4964 
4965 static void
4966 cxgbe_tick(void *arg)
4967 {
4968 	struct port_info *pi = arg;
4969 	struct adapter *sc = pi->adapter;
4970 
4971 	PORT_LOCK_ASSERT_OWNED(pi);
4972 	cxgbe_refresh_stats(sc, pi);
4973 
4974 	callout_schedule(&pi->tick, hz);
4975 }
4976 
4977 void
4978 vi_tick(void *arg)
4979 {
4980 	struct vi_info *vi = arg;
4981 	struct adapter *sc = vi->pi->adapter;
4982 
4983 	vi_refresh_stats(sc, vi);
4984 
4985 	callout_schedule(&vi->tick, hz);
4986 }
4987 
4988 static void
4989 cxgbe_vlan_config(void *arg, struct ifnet *ifp, uint16_t vid)
4990 {
4991 	struct ifnet *vlan;
4992 
4993 	if (arg != ifp || ifp->if_type != IFT_ETHER)
4994 		return;
4995 
4996 	vlan = VLAN_DEVAT(ifp, vid);
4997 	VLAN_SETCOOKIE(vlan, ifp);
4998 }
4999 
5000 static int
5001 cpl_not_handled(struct sge_iq *iq, const struct rss_header *rss, struct mbuf *m)
5002 {
5003 
5004 #ifdef INVARIANTS
5005 	panic("%s: opcode 0x%02x on iq %p with payload %p",
5006 	    __func__, rss->opcode, iq, m);
5007 #else
5008 	log(LOG_ERR, "%s: opcode 0x%02x on iq %p with payload %p\n",
5009 	    __func__, rss->opcode, iq, m);
5010 	m_freem(m);
5011 #endif
5012 	return (EDOOFUS);
5013 }
5014 
5015 int
5016 t4_register_cpl_handler(struct adapter *sc, int opcode, cpl_handler_t h)
5017 {
5018 	uintptr_t *loc, new;
5019 
5020 	if (opcode >= nitems(sc->cpl_handler))
5021 		return (EINVAL);
5022 
5023 	new = h ? (uintptr_t)h : (uintptr_t)cpl_not_handled;
5024 	loc = (uintptr_t *) &sc->cpl_handler[opcode];
5025 	atomic_store_rel_ptr(loc, new);
5026 
5027 	return (0);
5028 }
5029 
5030 static int
5031 an_not_handled(struct sge_iq *iq, const struct rsp_ctrl *ctrl)
5032 {
5033 
5034 #ifdef INVARIANTS
5035 	panic("%s: async notification on iq %p (ctrl %p)", __func__, iq, ctrl);
5036 #else
5037 	log(LOG_ERR, "%s: async notification on iq %p (ctrl %p)\n",
5038 	    __func__, iq, ctrl);
5039 #endif
5040 	return (EDOOFUS);
5041 }
5042 
5043 int
5044 t4_register_an_handler(struct adapter *sc, an_handler_t h)
5045 {
5046 	uintptr_t *loc, new;
5047 
5048 	new = h ? (uintptr_t)h : (uintptr_t)an_not_handled;
5049 	loc = (uintptr_t *) &sc->an_handler;
5050 	atomic_store_rel_ptr(loc, new);
5051 
5052 	return (0);
5053 }
5054 
5055 static int
5056 fw_msg_not_handled(struct adapter *sc, const __be64 *rpl)
5057 {
5058 	const struct cpl_fw6_msg *cpl =
5059 	    __containerof(rpl, struct cpl_fw6_msg, data[0]);
5060 
5061 #ifdef INVARIANTS
5062 	panic("%s: fw_msg type %d", __func__, cpl->type);
5063 #else
5064 	log(LOG_ERR, "%s: fw_msg type %d\n", __func__, cpl->type);
5065 #endif
5066 	return (EDOOFUS);
5067 }
5068 
5069 int
5070 t4_register_fw_msg_handler(struct adapter *sc, int type, fw_msg_handler_t h)
5071 {
5072 	uintptr_t *loc, new;
5073 
5074 	if (type >= nitems(sc->fw_msg_handler))
5075 		return (EINVAL);
5076 
5077 	/*
5078 	 * These are dispatched by the handler for FW{4|6}_CPL_MSG using the CPL
5079 	 * handler dispatch table.  Reject any attempt to install a handler for
5080 	 * this subtype.
5081 	 */
5082 	if (type == FW_TYPE_RSSCPL || type == FW6_TYPE_RSSCPL)
5083 		return (EINVAL);
5084 
5085 	new = h ? (uintptr_t)h : (uintptr_t)fw_msg_not_handled;
5086 	loc = (uintptr_t *) &sc->fw_msg_handler[type];
5087 	atomic_store_rel_ptr(loc, new);
5088 
5089 	return (0);
5090 }
5091 
5092 static void
5093 t4_sysctls(struct adapter *sc)
5094 {
5095 	struct sysctl_ctx_list *ctx;
5096 	struct sysctl_oid *oid;
5097 	struct sysctl_oid_list *children, *c0;
5098 	static char *caps[] = {
5099 		"\20\1PPP\2QFC\3DCBX",			/* caps[0] linkcaps */
5100 		"\20\1NIC\2VM\3IDS\4UM\5UM_ISGL"	/* caps[1] niccaps */
5101 		    "\6HASHFILTER\7ETHOFLD",
5102 		"\20\1TOE",				/* caps[2] toecaps */
5103 		"\20\1RDDP\2RDMAC",			/* caps[3] rdmacaps */
5104 		"\20\1INITIATOR_PDU\2TARGET_PDU"	/* caps[4] iscsicaps */
5105 		    "\3INITIATOR_CNXOFLD\4TARGET_CNXOFLD"
5106 		    "\5INITIATOR_SSNOFLD\6TARGET_SSNOFLD",
5107 		"\20\1INITIATOR\2TARGET\3CTRL_OFLD"	/* caps[5] fcoecaps */
5108 		    "\4PO_INITIAOR\5PO_TARGET"
5109 	};
5110 	static char *doorbells = {"\20\1UDB\2WCWR\3UDBWC\4KDB"};
5111 
5112 	ctx = device_get_sysctl_ctx(sc->dev);
5113 
5114 	/*
5115 	 * dev.t4nex.X.
5116 	 */
5117 	oid = device_get_sysctl_tree(sc->dev);
5118 	c0 = children = SYSCTL_CHILDREN(oid);
5119 
5120 	sc->sc_do_rxcopy = 1;
5121 	SYSCTL_ADD_INT(ctx, children, OID_AUTO, "do_rx_copy", CTLFLAG_RW,
5122 	    &sc->sc_do_rxcopy, 1, "Do RX copy of small frames");
5123 
5124 	SYSCTL_ADD_INT(ctx, children, OID_AUTO, "nports", CTLFLAG_RD, NULL,
5125 	    sc->params.nports, "# of ports");
5126 
5127 	SYSCTL_ADD_INT(ctx, children, OID_AUTO, "hw_revision", CTLFLAG_RD,
5128 	    NULL, chip_rev(sc), "chip hardware revision");
5129 
5130 	SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "firmware_version",
5131 	    CTLFLAG_RD, sc->fw_version, 0, "firmware version");
5132 
5133 	SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "cf",
5134 	    CTLFLAG_RD, sc->cfg_file, 0, "configuration file");
5135 
5136 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "cfcsum", CTLFLAG_RD, NULL,
5137 	    sc->cfcsum, "config file checksum");
5138 
5139 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "doorbells",
5140 	    CTLTYPE_STRING | CTLFLAG_RD, doorbells, sc->doorbells,
5141 	    sysctl_bitfield, "A", "available doorbells");
5142 
5143 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "linkcaps",
5144 	    CTLTYPE_STRING | CTLFLAG_RD, caps[0], sc->linkcaps,
5145 	    sysctl_bitfield, "A", "available link capabilities");
5146 
5147 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "niccaps",
5148 	    CTLTYPE_STRING | CTLFLAG_RD, caps[1], sc->niccaps,
5149 	    sysctl_bitfield, "A", "available NIC capabilities");
5150 
5151 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "toecaps",
5152 	    CTLTYPE_STRING | CTLFLAG_RD, caps[2], sc->toecaps,
5153 	    sysctl_bitfield, "A", "available TCP offload capabilities");
5154 
5155 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "rdmacaps",
5156 	    CTLTYPE_STRING | CTLFLAG_RD, caps[3], sc->rdmacaps,
5157 	    sysctl_bitfield, "A", "available RDMA capabilities");
5158 
5159 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "iscsicaps",
5160 	    CTLTYPE_STRING | CTLFLAG_RD, caps[4], sc->iscsicaps,
5161 	    sysctl_bitfield, "A", "available iSCSI capabilities");
5162 
5163 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "fcoecaps",
5164 	    CTLTYPE_STRING | CTLFLAG_RD, caps[5], sc->fcoecaps,
5165 	    sysctl_bitfield, "A", "available FCoE capabilities");
5166 
5167 	SYSCTL_ADD_INT(ctx, children, OID_AUTO, "core_clock", CTLFLAG_RD, NULL,
5168 	    sc->params.vpd.cclk, "core clock frequency (in KHz)");
5169 
5170 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "holdoff_timers",
5171 	    CTLTYPE_STRING | CTLFLAG_RD, sc->sge.timer_val,
5172 	    sizeof(sc->sge.timer_val), sysctl_int_array, "A",
5173 	    "interrupt holdoff timer values (us)");
5174 
5175 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "holdoff_pkt_counts",
5176 	    CTLTYPE_STRING | CTLFLAG_RD, sc->sge.counter_val,
5177 	    sizeof(sc->sge.counter_val), sysctl_int_array, "A",
5178 	    "interrupt holdoff packet counter values");
5179 
5180 	SYSCTL_ADD_INT(ctx, children, OID_AUTO, "nfilters", CTLFLAG_RD,
5181 	    NULL, sc->tids.nftids, "number of filters");
5182 
5183 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "temperature", CTLTYPE_INT |
5184 	    CTLFLAG_RD, sc, 0, sysctl_temperature, "I",
5185 	    "chip temperature (in Celsius)");
5186 
5187 	t4_sge_sysctls(sc, ctx, children);
5188 
5189 	sc->lro_timeout = 100;
5190 	SYSCTL_ADD_INT(ctx, children, OID_AUTO, "lro_timeout", CTLFLAG_RW,
5191 	    &sc->lro_timeout, 0, "lro inactive-flush timeout (in us)");
5192 
5193 	SYSCTL_ADD_INT(ctx, children, OID_AUTO, "debug_flags", CTLFLAG_RW,
5194 	    &sc->debug_flags, 0, "flags to enable runtime debugging");
5195 
5196 #ifdef SBUF_DRAIN
5197 	/*
5198 	 * dev.t4nex.X.misc.  Marked CTLFLAG_SKIP to avoid information overload.
5199 	 */
5200 	oid = SYSCTL_ADD_NODE(ctx, c0, OID_AUTO, "misc",
5201 	    CTLFLAG_RD | CTLFLAG_SKIP, NULL,
5202 	    "logs and miscellaneous information");
5203 	children = SYSCTL_CHILDREN(oid);
5204 
5205 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cctrl",
5206 	    CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
5207 	    sysctl_cctrl, "A", "congestion control");
5208 
5209 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_ibq_tp0",
5210 	    CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
5211 	    sysctl_cim_ibq_obq, "A", "CIM IBQ 0 (TP0)");
5212 
5213 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_ibq_tp1",
5214 	    CTLTYPE_STRING | CTLFLAG_RD, sc, 1,
5215 	    sysctl_cim_ibq_obq, "A", "CIM IBQ 1 (TP1)");
5216 
5217 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_ibq_ulp",
5218 	    CTLTYPE_STRING | CTLFLAG_RD, sc, 2,
5219 	    sysctl_cim_ibq_obq, "A", "CIM IBQ 2 (ULP)");
5220 
5221 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_ibq_sge0",
5222 	    CTLTYPE_STRING | CTLFLAG_RD, sc, 3,
5223 	    sysctl_cim_ibq_obq, "A", "CIM IBQ 3 (SGE0)");
5224 
5225 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_ibq_sge1",
5226 	    CTLTYPE_STRING | CTLFLAG_RD, sc, 4,
5227 	    sysctl_cim_ibq_obq, "A", "CIM IBQ 4 (SGE1)");
5228 
5229 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_ibq_ncsi",
5230 	    CTLTYPE_STRING | CTLFLAG_RD, sc, 5,
5231 	    sysctl_cim_ibq_obq, "A", "CIM IBQ 5 (NCSI)");
5232 
5233 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_la",
5234 	    CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
5235 	    sysctl_cim_la, "A", "CIM logic analyzer");
5236 
5237 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_ma_la",
5238 	    CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
5239 	    sysctl_cim_ma_la, "A", "CIM MA logic analyzer");
5240 
5241 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_obq_ulp0",
5242 	    CTLTYPE_STRING | CTLFLAG_RD, sc, 0 + CIM_NUM_IBQ,
5243 	    sysctl_cim_ibq_obq, "A", "CIM OBQ 0 (ULP0)");
5244 
5245 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_obq_ulp1",
5246 	    CTLTYPE_STRING | CTLFLAG_RD, sc, 1 + CIM_NUM_IBQ,
5247 	    sysctl_cim_ibq_obq, "A", "CIM OBQ 1 (ULP1)");
5248 
5249 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_obq_ulp2",
5250 	    CTLTYPE_STRING | CTLFLAG_RD, sc, 2 + CIM_NUM_IBQ,
5251 	    sysctl_cim_ibq_obq, "A", "CIM OBQ 2 (ULP2)");
5252 
5253 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_obq_ulp3",
5254 	    CTLTYPE_STRING | CTLFLAG_RD, sc, 3 + CIM_NUM_IBQ,
5255 	    sysctl_cim_ibq_obq, "A", "CIM OBQ 3 (ULP3)");
5256 
5257 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_obq_sge",
5258 	    CTLTYPE_STRING | CTLFLAG_RD, sc, 4 + CIM_NUM_IBQ,
5259 	    sysctl_cim_ibq_obq, "A", "CIM OBQ 4 (SGE)");
5260 
5261 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_obq_ncsi",
5262 	    CTLTYPE_STRING | CTLFLAG_RD, sc, 5 + CIM_NUM_IBQ,
5263 	    sysctl_cim_ibq_obq, "A", "CIM OBQ 5 (NCSI)");
5264 
5265 	if (is_t5(sc)) {
5266 		SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_obq_sge0_rx",
5267 		    CTLTYPE_STRING | CTLFLAG_RD, sc, 6 + CIM_NUM_IBQ,
5268 		    sysctl_cim_ibq_obq, "A", "CIM OBQ 6 (SGE0-RX)");
5269 
5270 		SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_obq_sge1_rx",
5271 		    CTLTYPE_STRING | CTLFLAG_RD, sc, 7 + CIM_NUM_IBQ,
5272 		    sysctl_cim_ibq_obq, "A", "CIM OBQ 7 (SGE1-RX)");
5273 	}
5274 
5275 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_pif_la",
5276 	    CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
5277 	    sysctl_cim_pif_la, "A", "CIM PIF logic analyzer");
5278 
5279 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_qcfg",
5280 	    CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
5281 	    sysctl_cim_qcfg, "A", "CIM queue configuration");
5282 
5283 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cpl_stats",
5284 	    CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
5285 	    sysctl_cpl_stats, "A", "CPL statistics");
5286 
5287 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "ddp_stats",
5288 	    CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
5289 	    sysctl_ddp_stats, "A", "non-TCP DDP statistics");
5290 
5291 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "devlog",
5292 	    CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
5293 	    sysctl_devlog, "A", "firmware's device log");
5294 
5295 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "fcoe_stats",
5296 	    CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
5297 	    sysctl_fcoe_stats, "A", "FCoE statistics");
5298 
5299 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "hw_sched",
5300 	    CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
5301 	    sysctl_hw_sched, "A", "hardware scheduler ");
5302 
5303 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "l2t",
5304 	    CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
5305 	    sysctl_l2t, "A", "hardware L2 table");
5306 
5307 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "lb_stats",
5308 	    CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
5309 	    sysctl_lb_stats, "A", "loopback statistics");
5310 
5311 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "meminfo",
5312 	    CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
5313 	    sysctl_meminfo, "A", "memory regions");
5314 
5315 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "mps_tcam",
5316 	    CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
5317 	    sysctl_mps_tcam, "A", "MPS TCAM entries");
5318 
5319 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "path_mtus",
5320 	    CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
5321 	    sysctl_path_mtus, "A", "path MTUs");
5322 
5323 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "pm_stats",
5324 	    CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
5325 	    sysctl_pm_stats, "A", "PM statistics");
5326 
5327 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "rdma_stats",
5328 	    CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
5329 	    sysctl_rdma_stats, "A", "RDMA statistics");
5330 
5331 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "tcp_stats",
5332 	    CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
5333 	    sysctl_tcp_stats, "A", "TCP statistics");
5334 
5335 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "tids",
5336 	    CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
5337 	    sysctl_tids, "A", "TID information");
5338 
5339 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "tp_err_stats",
5340 	    CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
5341 	    sysctl_tp_err_stats, "A", "TP error statistics");
5342 
5343 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "tp_la",
5344 	    CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
5345 	    sysctl_tp_la, "A", "TP logic analyzer");
5346 
5347 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "tx_rate",
5348 	    CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
5349 	    sysctl_tx_rate, "A", "Tx rate");
5350 
5351 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "ulprx_la",
5352 	    CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
5353 	    sysctl_ulprx_la, "A", "ULPRX logic analyzer");
5354 
5355 	if (is_t5(sc)) {
5356 		SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "wcwr_stats",
5357 		    CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
5358 		    sysctl_wcwr_stats, "A", "write combined work requests");
5359 	}
5360 #endif
5361 
5362 #ifdef TCP_OFFLOAD
5363 	if (is_offload(sc)) {
5364 		/*
5365 		 * dev.t4nex.X.toe.
5366 		 */
5367 		oid = SYSCTL_ADD_NODE(ctx, c0, OID_AUTO, "toe", CTLFLAG_RD,
5368 		    NULL, "TOE parameters");
5369 		children = SYSCTL_CHILDREN(oid);
5370 
5371 		sc->tt.sndbuf = 256 * 1024;
5372 		SYSCTL_ADD_INT(ctx, children, OID_AUTO, "sndbuf", CTLFLAG_RW,
5373 		    &sc->tt.sndbuf, 0, "max hardware send buffer size");
5374 
5375 		sc->tt.ddp = 0;
5376 		SYSCTL_ADD_INT(ctx, children, OID_AUTO, "ddp", CTLFLAG_RW,
5377 		    &sc->tt.ddp, 0, "DDP allowed");
5378 
5379 		sc->tt.indsz = G_INDICATESIZE(t4_read_reg(sc, A_TP_PARA_REG5));
5380 		SYSCTL_ADD_INT(ctx, children, OID_AUTO, "indsz", CTLFLAG_RW,
5381 		    &sc->tt.indsz, 0, "DDP max indicate size allowed");
5382 
5383 		sc->tt.ddp_thres =
5384 		    G_RXCOALESCESIZE(t4_read_reg(sc, A_TP_PARA_REG2));
5385 		SYSCTL_ADD_INT(ctx, children, OID_AUTO, "ddp_thres", CTLFLAG_RW,
5386 		    &sc->tt.ddp_thres, 0, "DDP threshold");
5387 
5388 		sc->tt.rx_coalesce = 1;
5389 		SYSCTL_ADD_INT(ctx, children, OID_AUTO, "rx_coalesce",
5390 		    CTLFLAG_RW, &sc->tt.rx_coalesce, 0, "receive coalescing");
5391 
5392 		sc->tt.tx_align = 1;
5393 		SYSCTL_ADD_INT(ctx, children, OID_AUTO, "tx_align",
5394 		    CTLFLAG_RW, &sc->tt.tx_align, 0, "chop and align payload");
5395 	}
5396 #endif
5397 }
5398 
5399 void
5400 vi_sysctls(struct vi_info *vi)
5401 {
5402 	struct sysctl_ctx_list *ctx;
5403 	struct sysctl_oid *oid;
5404 	struct sysctl_oid_list *children;
5405 
5406 	ctx = device_get_sysctl_ctx(vi->dev);
5407 
5408 	/*
5409 	 * dev.[nv](cxgbe|cxl).X.
5410 	 */
5411 	oid = device_get_sysctl_tree(vi->dev);
5412 	children = SYSCTL_CHILDREN(oid);
5413 
5414 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "viid", CTLFLAG_RD, NULL,
5415 	    vi->viid, "VI identifer");
5416 	SYSCTL_ADD_INT(ctx, children, OID_AUTO, "nrxq", CTLFLAG_RD,
5417 	    &vi->nrxq, 0, "# of rx queues");
5418 	SYSCTL_ADD_INT(ctx, children, OID_AUTO, "ntxq", CTLFLAG_RD,
5419 	    &vi->ntxq, 0, "# of tx queues");
5420 	SYSCTL_ADD_INT(ctx, children, OID_AUTO, "first_rxq", CTLFLAG_RD,
5421 	    &vi->first_rxq, 0, "index of first rx queue");
5422 	SYSCTL_ADD_INT(ctx, children, OID_AUTO, "first_txq", CTLFLAG_RD,
5423 	    &vi->first_txq, 0, "index of first tx queue");
5424 
5425 	if (vi->flags & VI_NETMAP)
5426 		return;
5427 
5428 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "rsrv_noflowq", CTLTYPE_INT |
5429 	    CTLFLAG_RW, vi, 0, sysctl_noflowq, "IU",
5430 	    "Reserve queue 0 for non-flowid packets");
5431 
5432 #ifdef TCP_OFFLOAD
5433 	if (vi->nofldrxq != 0) {
5434 		SYSCTL_ADD_INT(ctx, children, OID_AUTO, "nofldrxq", CTLFLAG_RD,
5435 		    &vi->nofldrxq, 0,
5436 		    "# of rx queues for offloaded TCP connections");
5437 		SYSCTL_ADD_INT(ctx, children, OID_AUTO, "nofldtxq", CTLFLAG_RD,
5438 		    &vi->nofldtxq, 0,
5439 		    "# of tx queues for offloaded TCP connections");
5440 		SYSCTL_ADD_INT(ctx, children, OID_AUTO, "first_ofld_rxq",
5441 		    CTLFLAG_RD, &vi->first_ofld_rxq, 0,
5442 		    "index of first TOE rx queue");
5443 		SYSCTL_ADD_INT(ctx, children, OID_AUTO, "first_ofld_txq",
5444 		    CTLFLAG_RD, &vi->first_ofld_txq, 0,
5445 		    "index of first TOE tx queue");
5446 	}
5447 #endif
5448 
5449 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "holdoff_tmr_idx",
5450 	    CTLTYPE_INT | CTLFLAG_RW, vi, 0, sysctl_holdoff_tmr_idx, "I",
5451 	    "holdoff timer index");
5452 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "holdoff_pktc_idx",
5453 	    CTLTYPE_INT | CTLFLAG_RW, vi, 0, sysctl_holdoff_pktc_idx, "I",
5454 	    "holdoff packet counter index");
5455 
5456 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "qsize_rxq",
5457 	    CTLTYPE_INT | CTLFLAG_RW, vi, 0, sysctl_qsize_rxq, "I",
5458 	    "rx queue size");
5459 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "qsize_txq",
5460 	    CTLTYPE_INT | CTLFLAG_RW, vi, 0, sysctl_qsize_txq, "I",
5461 	    "tx queue size");
5462 }
5463 
5464 static void
5465 cxgbe_sysctls(struct port_info *pi)
5466 {
5467 	struct sysctl_ctx_list *ctx;
5468 	struct sysctl_oid *oid;
5469 	struct sysctl_oid_list *children;
5470 	struct adapter *sc = pi->adapter;
5471 
5472 	ctx = device_get_sysctl_ctx(pi->dev);
5473 
5474 	/*
5475 	 * dev.cxgbe.X.
5476 	 */
5477 	oid = device_get_sysctl_tree(pi->dev);
5478 	children = SYSCTL_CHILDREN(oid);
5479 
5480 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "linkdnrc", CTLTYPE_STRING |
5481 	   CTLFLAG_RD, pi, 0, sysctl_linkdnrc, "A", "reason why link is down");
5482 	if (pi->port_type == FW_PORT_TYPE_BT_XAUI) {
5483 		SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "temperature",
5484 		    CTLTYPE_INT | CTLFLAG_RD, pi, 0, sysctl_btphy, "I",
5485 		    "PHY temperature (in Celsius)");
5486 		SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "fw_version",
5487 		    CTLTYPE_INT | CTLFLAG_RD, pi, 1, sysctl_btphy, "I",
5488 		    "PHY firmware version");
5489 	}
5490 
5491 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "pause_settings",
5492 	    CTLTYPE_STRING | CTLFLAG_RW, pi, PAUSE_TX, sysctl_pause_settings,
5493 	    "A", "PAUSE settings (bit 0 = rx_pause, bit 1 = tx_pause)");
5494 
5495 	/*
5496 	 * dev.cxgbe.X.stats.
5497 	 */
5498 	oid = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, "stats", CTLFLAG_RD,
5499 	    NULL, "port statistics");
5500 	children = SYSCTL_CHILDREN(oid);
5501 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "tx_parse_error", CTLFLAG_RD,
5502 	    &pi->tx_parse_error, 0,
5503 	    "# of tx packets with invalid length or # of segments");
5504 
5505 #define SYSCTL_ADD_T4_REG64(pi, name, desc, reg) \
5506 	SYSCTL_ADD_OID(ctx, children, OID_AUTO, name, \
5507 	    CTLTYPE_U64 | CTLFLAG_RD, sc, reg, \
5508 	    sysctl_handle_t4_reg64, "QU", desc)
5509 
5510 	SYSCTL_ADD_T4_REG64(pi, "tx_octets", "# of octets in good frames",
5511 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_BYTES_L));
5512 	SYSCTL_ADD_T4_REG64(pi, "tx_frames", "total # of good frames",
5513 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_FRAMES_L));
5514 	SYSCTL_ADD_T4_REG64(pi, "tx_bcast_frames", "# of broadcast frames",
5515 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_BCAST_L));
5516 	SYSCTL_ADD_T4_REG64(pi, "tx_mcast_frames", "# of multicast frames",
5517 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_MCAST_L));
5518 	SYSCTL_ADD_T4_REG64(pi, "tx_ucast_frames", "# of unicast frames",
5519 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_UCAST_L));
5520 	SYSCTL_ADD_T4_REG64(pi, "tx_error_frames", "# of error frames",
5521 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_ERROR_L));
5522 	SYSCTL_ADD_T4_REG64(pi, "tx_frames_64",
5523 	    "# of tx frames in this range",
5524 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_64B_L));
5525 	SYSCTL_ADD_T4_REG64(pi, "tx_frames_65_127",
5526 	    "# of tx frames in this range",
5527 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_65B_127B_L));
5528 	SYSCTL_ADD_T4_REG64(pi, "tx_frames_128_255",
5529 	    "# of tx frames in this range",
5530 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_128B_255B_L));
5531 	SYSCTL_ADD_T4_REG64(pi, "tx_frames_256_511",
5532 	    "# of tx frames in this range",
5533 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_256B_511B_L));
5534 	SYSCTL_ADD_T4_REG64(pi, "tx_frames_512_1023",
5535 	    "# of tx frames in this range",
5536 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_512B_1023B_L));
5537 	SYSCTL_ADD_T4_REG64(pi, "tx_frames_1024_1518",
5538 	    "# of tx frames in this range",
5539 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_1024B_1518B_L));
5540 	SYSCTL_ADD_T4_REG64(pi, "tx_frames_1519_max",
5541 	    "# of tx frames in this range",
5542 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_1519B_MAX_L));
5543 	SYSCTL_ADD_T4_REG64(pi, "tx_drop", "# of dropped tx frames",
5544 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_DROP_L));
5545 	SYSCTL_ADD_T4_REG64(pi, "tx_pause", "# of pause frames transmitted",
5546 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_PAUSE_L));
5547 	SYSCTL_ADD_T4_REG64(pi, "tx_ppp0", "# of PPP prio 0 frames transmitted",
5548 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_PPP0_L));
5549 	SYSCTL_ADD_T4_REG64(pi, "tx_ppp1", "# of PPP prio 1 frames transmitted",
5550 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_PPP1_L));
5551 	SYSCTL_ADD_T4_REG64(pi, "tx_ppp2", "# of PPP prio 2 frames transmitted",
5552 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_PPP2_L));
5553 	SYSCTL_ADD_T4_REG64(pi, "tx_ppp3", "# of PPP prio 3 frames transmitted",
5554 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_PPP3_L));
5555 	SYSCTL_ADD_T4_REG64(pi, "tx_ppp4", "# of PPP prio 4 frames transmitted",
5556 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_PPP4_L));
5557 	SYSCTL_ADD_T4_REG64(pi, "tx_ppp5", "# of PPP prio 5 frames transmitted",
5558 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_PPP5_L));
5559 	SYSCTL_ADD_T4_REG64(pi, "tx_ppp6", "# of PPP prio 6 frames transmitted",
5560 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_PPP6_L));
5561 	SYSCTL_ADD_T4_REG64(pi, "tx_ppp7", "# of PPP prio 7 frames transmitted",
5562 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_PPP7_L));
5563 
5564 	SYSCTL_ADD_T4_REG64(pi, "rx_octets", "# of octets in good frames",
5565 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_BYTES_L));
5566 	SYSCTL_ADD_T4_REG64(pi, "rx_frames", "total # of good frames",
5567 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_FRAMES_L));
5568 	SYSCTL_ADD_T4_REG64(pi, "rx_bcast_frames", "# of broadcast frames",
5569 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_BCAST_L));
5570 	SYSCTL_ADD_T4_REG64(pi, "rx_mcast_frames", "# of multicast frames",
5571 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_MCAST_L));
5572 	SYSCTL_ADD_T4_REG64(pi, "rx_ucast_frames", "# of unicast frames",
5573 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_UCAST_L));
5574 	SYSCTL_ADD_T4_REG64(pi, "rx_too_long", "# of frames exceeding MTU",
5575 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_MTU_ERROR_L));
5576 	SYSCTL_ADD_T4_REG64(pi, "rx_jabber", "# of jabber frames",
5577 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_MTU_CRC_ERROR_L));
5578 	SYSCTL_ADD_T4_REG64(pi, "rx_fcs_err",
5579 	    "# of frames received with bad FCS",
5580 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_CRC_ERROR_L));
5581 	SYSCTL_ADD_T4_REG64(pi, "rx_len_err",
5582 	    "# of frames received with length error",
5583 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_LEN_ERROR_L));
5584 	SYSCTL_ADD_T4_REG64(pi, "rx_symbol_err", "symbol errors",
5585 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_SYM_ERROR_L));
5586 	SYSCTL_ADD_T4_REG64(pi, "rx_runt", "# of short frames received",
5587 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_LESS_64B_L));
5588 	SYSCTL_ADD_T4_REG64(pi, "rx_frames_64",
5589 	    "# of rx frames in this range",
5590 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_64B_L));
5591 	SYSCTL_ADD_T4_REG64(pi, "rx_frames_65_127",
5592 	    "# of rx frames in this range",
5593 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_65B_127B_L));
5594 	SYSCTL_ADD_T4_REG64(pi, "rx_frames_128_255",
5595 	    "# of rx frames in this range",
5596 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_128B_255B_L));
5597 	SYSCTL_ADD_T4_REG64(pi, "rx_frames_256_511",
5598 	    "# of rx frames in this range",
5599 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_256B_511B_L));
5600 	SYSCTL_ADD_T4_REG64(pi, "rx_frames_512_1023",
5601 	    "# of rx frames in this range",
5602 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_512B_1023B_L));
5603 	SYSCTL_ADD_T4_REG64(pi, "rx_frames_1024_1518",
5604 	    "# of rx frames in this range",
5605 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_1024B_1518B_L));
5606 	SYSCTL_ADD_T4_REG64(pi, "rx_frames_1519_max",
5607 	    "# of rx frames in this range",
5608 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_1519B_MAX_L));
5609 	SYSCTL_ADD_T4_REG64(pi, "rx_pause", "# of pause frames received",
5610 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_PAUSE_L));
5611 	SYSCTL_ADD_T4_REG64(pi, "rx_ppp0", "# of PPP prio 0 frames received",
5612 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_PPP0_L));
5613 	SYSCTL_ADD_T4_REG64(pi, "rx_ppp1", "# of PPP prio 1 frames received",
5614 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_PPP1_L));
5615 	SYSCTL_ADD_T4_REG64(pi, "rx_ppp2", "# of PPP prio 2 frames received",
5616 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_PPP2_L));
5617 	SYSCTL_ADD_T4_REG64(pi, "rx_ppp3", "# of PPP prio 3 frames received",
5618 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_PPP3_L));
5619 	SYSCTL_ADD_T4_REG64(pi, "rx_ppp4", "# of PPP prio 4 frames received",
5620 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_PPP4_L));
5621 	SYSCTL_ADD_T4_REG64(pi, "rx_ppp5", "# of PPP prio 5 frames received",
5622 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_PPP5_L));
5623 	SYSCTL_ADD_T4_REG64(pi, "rx_ppp6", "# of PPP prio 6 frames received",
5624 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_PPP6_L));
5625 	SYSCTL_ADD_T4_REG64(pi, "rx_ppp7", "# of PPP prio 7 frames received",
5626 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_PPP7_L));
5627 
5628 #undef SYSCTL_ADD_T4_REG64
5629 
5630 #define SYSCTL_ADD_T4_PORTSTAT(name, desc) \
5631 	SYSCTL_ADD_UQUAD(ctx, children, OID_AUTO, #name, CTLFLAG_RD, \
5632 	    &pi->stats.name, desc)
5633 
5634 	/* We get these from port_stats and they may be stale by upto 1s */
5635 	SYSCTL_ADD_T4_PORTSTAT(rx_ovflow0,
5636 	    "# drops due to buffer-group 0 overflows");
5637 	SYSCTL_ADD_T4_PORTSTAT(rx_ovflow1,
5638 	    "# drops due to buffer-group 1 overflows");
5639 	SYSCTL_ADD_T4_PORTSTAT(rx_ovflow2,
5640 	    "# drops due to buffer-group 2 overflows");
5641 	SYSCTL_ADD_T4_PORTSTAT(rx_ovflow3,
5642 	    "# drops due to buffer-group 3 overflows");
5643 	SYSCTL_ADD_T4_PORTSTAT(rx_trunc0,
5644 	    "# of buffer-group 0 truncated packets");
5645 	SYSCTL_ADD_T4_PORTSTAT(rx_trunc1,
5646 	    "# of buffer-group 1 truncated packets");
5647 	SYSCTL_ADD_T4_PORTSTAT(rx_trunc2,
5648 	    "# of buffer-group 2 truncated packets");
5649 	SYSCTL_ADD_T4_PORTSTAT(rx_trunc3,
5650 	    "# of buffer-group 3 truncated packets");
5651 
5652 #undef SYSCTL_ADD_T4_PORTSTAT
5653 }
5654 
5655 static int
5656 sysctl_int_array(SYSCTL_HANDLER_ARGS)
5657 {
5658 	int rc, *i, space = 0;
5659 	struct sbuf sb;
5660 
5661 	sbuf_new_for_sysctl(&sb, NULL, 64, req);
5662 	for (i = arg1; arg2; arg2 -= sizeof(int), i++) {
5663 		if (space)
5664 			sbuf_printf(&sb, " ");
5665 		sbuf_printf(&sb, "%d", *i);
5666 		space = 1;
5667 	}
5668 	rc = sbuf_finish(&sb);
5669 	sbuf_delete(&sb);
5670 	return (rc);
5671 }
5672 
5673 static int
5674 sysctl_bitfield(SYSCTL_HANDLER_ARGS)
5675 {
5676 	int rc;
5677 	struct sbuf *sb;
5678 
5679 	rc = sysctl_wire_old_buffer(req, 0);
5680 	if (rc != 0)
5681 		return(rc);
5682 
5683 	sb = sbuf_new_for_sysctl(NULL, NULL, 128, req);
5684 	if (sb == NULL)
5685 		return (ENOMEM);
5686 
5687 	sbuf_printf(sb, "%b", (int)arg2, (char *)arg1);
5688 	rc = sbuf_finish(sb);
5689 	sbuf_delete(sb);
5690 
5691 	return (rc);
5692 }
5693 
5694 static int
5695 sysctl_btphy(SYSCTL_HANDLER_ARGS)
5696 {
5697 	struct port_info *pi = arg1;
5698 	int op = arg2;
5699 	struct adapter *sc = pi->adapter;
5700 	u_int v;
5701 	int rc;
5702 
5703 	rc = begin_synchronized_op(sc, &pi->vi[0], SLEEP_OK | INTR_OK, "t4btt");
5704 	if (rc)
5705 		return (rc);
5706 	/* XXX: magic numbers */
5707 	rc = -t4_mdio_rd(sc, sc->mbox, pi->mdio_addr, 0x1e, op ? 0x20 : 0xc820,
5708 	    &v);
5709 	end_synchronized_op(sc, 0);
5710 	if (rc)
5711 		return (rc);
5712 	if (op == 0)
5713 		v /= 256;
5714 
5715 	rc = sysctl_handle_int(oidp, &v, 0, req);
5716 	return (rc);
5717 }
5718 
5719 static int
5720 sysctl_noflowq(SYSCTL_HANDLER_ARGS)
5721 {
5722 	struct vi_info *vi = arg1;
5723 	int rc, val;
5724 
5725 	val = vi->rsrv_noflowq;
5726 	rc = sysctl_handle_int(oidp, &val, 0, req);
5727 	if (rc != 0 || req->newptr == NULL)
5728 		return (rc);
5729 
5730 	if ((val >= 1) && (vi->ntxq > 1))
5731 		vi->rsrv_noflowq = 1;
5732 	else
5733 		vi->rsrv_noflowq = 0;
5734 
5735 	return (rc);
5736 }
5737 
5738 static int
5739 sysctl_holdoff_tmr_idx(SYSCTL_HANDLER_ARGS)
5740 {
5741 	struct vi_info *vi = arg1;
5742 	struct adapter *sc = vi->pi->adapter;
5743 	int idx, rc, i;
5744 	struct sge_rxq *rxq;
5745 #ifdef TCP_OFFLOAD
5746 	struct sge_ofld_rxq *ofld_rxq;
5747 #endif
5748 	uint8_t v;
5749 
5750 	idx = vi->tmr_idx;
5751 
5752 	rc = sysctl_handle_int(oidp, &idx, 0, req);
5753 	if (rc != 0 || req->newptr == NULL)
5754 		return (rc);
5755 
5756 	if (idx < 0 || idx >= SGE_NTIMERS)
5757 		return (EINVAL);
5758 
5759 	rc = begin_synchronized_op(sc, vi, HOLD_LOCK | SLEEP_OK | INTR_OK,
5760 	    "t4tmr");
5761 	if (rc)
5762 		return (rc);
5763 
5764 	v = V_QINTR_TIMER_IDX(idx) | V_QINTR_CNT_EN(vi->pktc_idx != -1);
5765 	for_each_rxq(vi, i, rxq) {
5766 #ifdef atomic_store_rel_8
5767 		atomic_store_rel_8(&rxq->iq.intr_params, v);
5768 #else
5769 		rxq->iq.intr_params = v;
5770 #endif
5771 	}
5772 #ifdef TCP_OFFLOAD
5773 	for_each_ofld_rxq(vi, i, ofld_rxq) {
5774 #ifdef atomic_store_rel_8
5775 		atomic_store_rel_8(&ofld_rxq->iq.intr_params, v);
5776 #else
5777 		ofld_rxq->iq.intr_params = v;
5778 #endif
5779 	}
5780 #endif
5781 	vi->tmr_idx = idx;
5782 
5783 	end_synchronized_op(sc, LOCK_HELD);
5784 	return (0);
5785 }
5786 
5787 static int
5788 sysctl_holdoff_pktc_idx(SYSCTL_HANDLER_ARGS)
5789 {
5790 	struct vi_info *vi = arg1;
5791 	struct adapter *sc = vi->pi->adapter;
5792 	int idx, rc;
5793 
5794 	idx = vi->pktc_idx;
5795 
5796 	rc = sysctl_handle_int(oidp, &idx, 0, req);
5797 	if (rc != 0 || req->newptr == NULL)
5798 		return (rc);
5799 
5800 	if (idx < -1 || idx >= SGE_NCOUNTERS)
5801 		return (EINVAL);
5802 
5803 	rc = begin_synchronized_op(sc, vi, HOLD_LOCK | SLEEP_OK | INTR_OK,
5804 	    "t4pktc");
5805 	if (rc)
5806 		return (rc);
5807 
5808 	if (vi->flags & VI_INIT_DONE)
5809 		rc = EBUSY; /* cannot be changed once the queues are created */
5810 	else
5811 		vi->pktc_idx = idx;
5812 
5813 	end_synchronized_op(sc, LOCK_HELD);
5814 	return (rc);
5815 }
5816 
5817 static int
5818 sysctl_qsize_rxq(SYSCTL_HANDLER_ARGS)
5819 {
5820 	struct vi_info *vi = arg1;
5821 	struct adapter *sc = vi->pi->adapter;
5822 	int qsize, rc;
5823 
5824 	qsize = vi->qsize_rxq;
5825 
5826 	rc = sysctl_handle_int(oidp, &qsize, 0, req);
5827 	if (rc != 0 || req->newptr == NULL)
5828 		return (rc);
5829 
5830 	if (qsize < 128 || (qsize & 7))
5831 		return (EINVAL);
5832 
5833 	rc = begin_synchronized_op(sc, vi, HOLD_LOCK | SLEEP_OK | INTR_OK,
5834 	    "t4rxqs");
5835 	if (rc)
5836 		return (rc);
5837 
5838 	if (vi->flags & VI_INIT_DONE)
5839 		rc = EBUSY; /* cannot be changed once the queues are created */
5840 	else
5841 		vi->qsize_rxq = qsize;
5842 
5843 	end_synchronized_op(sc, LOCK_HELD);
5844 	return (rc);
5845 }
5846 
5847 static int
5848 sysctl_qsize_txq(SYSCTL_HANDLER_ARGS)
5849 {
5850 	struct vi_info *vi = arg1;
5851 	struct adapter *sc = vi->pi->adapter;
5852 	int qsize, rc;
5853 
5854 	qsize = vi->qsize_txq;
5855 
5856 	rc = sysctl_handle_int(oidp, &qsize, 0, req);
5857 	if (rc != 0 || req->newptr == NULL)
5858 		return (rc);
5859 
5860 	if (qsize < 128 || qsize > 65536)
5861 		return (EINVAL);
5862 
5863 	rc = begin_synchronized_op(sc, vi, HOLD_LOCK | SLEEP_OK | INTR_OK,
5864 	    "t4txqs");
5865 	if (rc)
5866 		return (rc);
5867 
5868 	if (vi->flags & VI_INIT_DONE)
5869 		rc = EBUSY; /* cannot be changed once the queues are created */
5870 	else
5871 		vi->qsize_txq = qsize;
5872 
5873 	end_synchronized_op(sc, LOCK_HELD);
5874 	return (rc);
5875 }
5876 
5877 static int
5878 sysctl_pause_settings(SYSCTL_HANDLER_ARGS)
5879 {
5880 	struct port_info *pi = arg1;
5881 	struct adapter *sc = pi->adapter;
5882 	struct link_config *lc = &pi->link_cfg;
5883 	int rc;
5884 
5885 	if (req->newptr == NULL) {
5886 		struct sbuf *sb;
5887 		static char *bits = "\20\1PAUSE_RX\2PAUSE_TX";
5888 
5889 		rc = sysctl_wire_old_buffer(req, 0);
5890 		if (rc != 0)
5891 			return(rc);
5892 
5893 		sb = sbuf_new_for_sysctl(NULL, NULL, 128, req);
5894 		if (sb == NULL)
5895 			return (ENOMEM);
5896 
5897 		sbuf_printf(sb, "%b", lc->fc & (PAUSE_TX | PAUSE_RX), bits);
5898 		rc = sbuf_finish(sb);
5899 		sbuf_delete(sb);
5900 	} else {
5901 		char s[2];
5902 		int n;
5903 
5904 		s[0] = '0' + (lc->requested_fc & (PAUSE_TX | PAUSE_RX));
5905 		s[1] = 0;
5906 
5907 		rc = sysctl_handle_string(oidp, s, sizeof(s), req);
5908 		if (rc != 0)
5909 			return(rc);
5910 
5911 		if (s[1] != 0)
5912 			return (EINVAL);
5913 		if (s[0] < '0' || s[0] > '9')
5914 			return (EINVAL);	/* not a number */
5915 		n = s[0] - '0';
5916 		if (n & ~(PAUSE_TX | PAUSE_RX))
5917 			return (EINVAL);	/* some other bit is set too */
5918 
5919 		rc = begin_synchronized_op(sc, &pi->vi[0], SLEEP_OK | INTR_OK,
5920 		    "t4PAUSE");
5921 		if (rc)
5922 			return (rc);
5923 		if ((lc->requested_fc & (PAUSE_TX | PAUSE_RX)) != n) {
5924 			int link_ok = lc->link_ok;
5925 
5926 			lc->requested_fc &= ~(PAUSE_TX | PAUSE_RX);
5927 			lc->requested_fc |= n;
5928 			rc = -t4_link_start(sc, sc->mbox, pi->tx_chan, lc);
5929 			lc->link_ok = link_ok;	/* restore */
5930 		}
5931 		end_synchronized_op(sc, 0);
5932 	}
5933 
5934 	return (rc);
5935 }
5936 
5937 static int
5938 sysctl_handle_t4_reg64(SYSCTL_HANDLER_ARGS)
5939 {
5940 	struct adapter *sc = arg1;
5941 	int reg = arg2;
5942 	uint64_t val;
5943 
5944 	val = t4_read_reg64(sc, reg);
5945 
5946 	return (sysctl_handle_64(oidp, &val, 0, req));
5947 }
5948 
5949 static int
5950 sysctl_temperature(SYSCTL_HANDLER_ARGS)
5951 {
5952 	struct adapter *sc = arg1;
5953 	int rc, t;
5954 	uint32_t param, val;
5955 
5956 	rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4temp");
5957 	if (rc)
5958 		return (rc);
5959 	param = V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) |
5960 	    V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_DIAG) |
5961 	    V_FW_PARAMS_PARAM_Y(FW_PARAM_DEV_DIAG_TMP);
5962 	rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 1, &param, &val);
5963 	end_synchronized_op(sc, 0);
5964 	if (rc)
5965 		return (rc);
5966 
5967 	/* unknown is returned as 0 but we display -1 in that case */
5968 	t = val == 0 ? -1 : val;
5969 
5970 	rc = sysctl_handle_int(oidp, &t, 0, req);
5971 	return (rc);
5972 }
5973 
5974 #ifdef SBUF_DRAIN
5975 static int
5976 sysctl_cctrl(SYSCTL_HANDLER_ARGS)
5977 {
5978 	struct adapter *sc = arg1;
5979 	struct sbuf *sb;
5980 	int rc, i;
5981 	uint16_t incr[NMTUS][NCCTRL_WIN];
5982 	static const char *dec_fac[] = {
5983 		"0.5", "0.5625", "0.625", "0.6875", "0.75", "0.8125", "0.875",
5984 		"0.9375"
5985 	};
5986 
5987 	rc = sysctl_wire_old_buffer(req, 0);
5988 	if (rc != 0)
5989 		return (rc);
5990 
5991 	sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req);
5992 	if (sb == NULL)
5993 		return (ENOMEM);
5994 
5995 	t4_read_cong_tbl(sc, incr);
5996 
5997 	for (i = 0; i < NCCTRL_WIN; ++i) {
5998 		sbuf_printf(sb, "%2d: %4u %4u %4u %4u %4u %4u %4u %4u\n", i,
5999 		    incr[0][i], incr[1][i], incr[2][i], incr[3][i], incr[4][i],
6000 		    incr[5][i], incr[6][i], incr[7][i]);
6001 		sbuf_printf(sb, "%8u %4u %4u %4u %4u %4u %4u %4u %5u %s\n",
6002 		    incr[8][i], incr[9][i], incr[10][i], incr[11][i],
6003 		    incr[12][i], incr[13][i], incr[14][i], incr[15][i],
6004 		    sc->params.a_wnd[i], dec_fac[sc->params.b_wnd[i]]);
6005 	}
6006 
6007 	rc = sbuf_finish(sb);
6008 	sbuf_delete(sb);
6009 
6010 	return (rc);
6011 }
6012 
6013 static const char *qname[CIM_NUM_IBQ + CIM_NUM_OBQ_T5] = {
6014 	"TP0", "TP1", "ULP", "SGE0", "SGE1", "NC-SI",	/* ibq's */
6015 	"ULP0", "ULP1", "ULP2", "ULP3", "SGE", "NC-SI",	/* obq's */
6016 	"SGE0-RX", "SGE1-RX"	/* additional obq's (T5 onwards) */
6017 };
6018 
6019 static int
6020 sysctl_cim_ibq_obq(SYSCTL_HANDLER_ARGS)
6021 {
6022 	struct adapter *sc = arg1;
6023 	struct sbuf *sb;
6024 	int rc, i, n, qid = arg2;
6025 	uint32_t *buf, *p;
6026 	char *qtype;
6027 	u_int cim_num_obq = is_t4(sc) ? CIM_NUM_OBQ : CIM_NUM_OBQ_T5;
6028 
6029 	KASSERT(qid >= 0 && qid < CIM_NUM_IBQ + cim_num_obq,
6030 	    ("%s: bad qid %d\n", __func__, qid));
6031 
6032 	if (qid < CIM_NUM_IBQ) {
6033 		/* inbound queue */
6034 		qtype = "IBQ";
6035 		n = 4 * CIM_IBQ_SIZE;
6036 		buf = malloc(n * sizeof(uint32_t), M_CXGBE, M_ZERO | M_WAITOK);
6037 		rc = t4_read_cim_ibq(sc, qid, buf, n);
6038 	} else {
6039 		/* outbound queue */
6040 		qtype = "OBQ";
6041 		qid -= CIM_NUM_IBQ;
6042 		n = 4 * cim_num_obq * CIM_OBQ_SIZE;
6043 		buf = malloc(n * sizeof(uint32_t), M_CXGBE, M_ZERO | M_WAITOK);
6044 		rc = t4_read_cim_obq(sc, qid, buf, n);
6045 	}
6046 
6047 	if (rc < 0) {
6048 		rc = -rc;
6049 		goto done;
6050 	}
6051 	n = rc * sizeof(uint32_t);	/* rc has # of words actually read */
6052 
6053 	rc = sysctl_wire_old_buffer(req, 0);
6054 	if (rc != 0)
6055 		goto done;
6056 
6057 	sb = sbuf_new_for_sysctl(NULL, NULL, PAGE_SIZE, req);
6058 	if (sb == NULL) {
6059 		rc = ENOMEM;
6060 		goto done;
6061 	}
6062 
6063 	sbuf_printf(sb, "%s%d %s", qtype , qid, qname[arg2]);
6064 	for (i = 0, p = buf; i < n; i += 16, p += 4)
6065 		sbuf_printf(sb, "\n%#06x: %08x %08x %08x %08x", i, p[0], p[1],
6066 		    p[2], p[3]);
6067 
6068 	rc = sbuf_finish(sb);
6069 	sbuf_delete(sb);
6070 done:
6071 	free(buf, M_CXGBE);
6072 	return (rc);
6073 }
6074 
6075 static int
6076 sysctl_cim_la(SYSCTL_HANDLER_ARGS)
6077 {
6078 	struct adapter *sc = arg1;
6079 	u_int cfg;
6080 	struct sbuf *sb;
6081 	uint32_t *buf, *p;
6082 	int rc;
6083 
6084 	rc = -t4_cim_read(sc, A_UP_UP_DBG_LA_CFG, 1, &cfg);
6085 	if (rc != 0)
6086 		return (rc);
6087 
6088 	rc = sysctl_wire_old_buffer(req, 0);
6089 	if (rc != 0)
6090 		return (rc);
6091 
6092 	sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req);
6093 	if (sb == NULL)
6094 		return (ENOMEM);
6095 
6096 	buf = malloc(sc->params.cim_la_size * sizeof(uint32_t), M_CXGBE,
6097 	    M_ZERO | M_WAITOK);
6098 
6099 	rc = -t4_cim_read_la(sc, buf, NULL);
6100 	if (rc != 0)
6101 		goto done;
6102 
6103 	sbuf_printf(sb, "Status   Data      PC%s",
6104 	    cfg & F_UPDBGLACAPTPCONLY ? "" :
6105 	    "     LS0Stat  LS0Addr             LS0Data");
6106 
6107 	KASSERT((sc->params.cim_la_size & 7) == 0,
6108 	    ("%s: p will walk off the end of buf", __func__));
6109 
6110 	for (p = buf; p < &buf[sc->params.cim_la_size]; p += 8) {
6111 		if (cfg & F_UPDBGLACAPTPCONLY) {
6112 			sbuf_printf(sb, "\n  %02x   %08x %08x", p[5] & 0xff,
6113 			    p[6], p[7]);
6114 			sbuf_printf(sb, "\n  %02x   %02x%06x %02x%06x",
6115 			    (p[3] >> 8) & 0xff, p[3] & 0xff, p[4] >> 8,
6116 			    p[4] & 0xff, p[5] >> 8);
6117 			sbuf_printf(sb, "\n  %02x   %x%07x %x%07x",
6118 			    (p[0] >> 4) & 0xff, p[0] & 0xf, p[1] >> 4,
6119 			    p[1] & 0xf, p[2] >> 4);
6120 		} else {
6121 			sbuf_printf(sb,
6122 			    "\n  %02x   %x%07x %x%07x %08x %08x "
6123 			    "%08x%08x%08x%08x",
6124 			    (p[0] >> 4) & 0xff, p[0] & 0xf, p[1] >> 4,
6125 			    p[1] & 0xf, p[2] >> 4, p[2] & 0xf, p[3], p[4], p[5],
6126 			    p[6], p[7]);
6127 		}
6128 	}
6129 
6130 	rc = sbuf_finish(sb);
6131 	sbuf_delete(sb);
6132 done:
6133 	free(buf, M_CXGBE);
6134 	return (rc);
6135 }
6136 
6137 static int
6138 sysctl_cim_ma_la(SYSCTL_HANDLER_ARGS)
6139 {
6140 	struct adapter *sc = arg1;
6141 	u_int i;
6142 	struct sbuf *sb;
6143 	uint32_t *buf, *p;
6144 	int rc;
6145 
6146 	rc = sysctl_wire_old_buffer(req, 0);
6147 	if (rc != 0)
6148 		return (rc);
6149 
6150 	sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req);
6151 	if (sb == NULL)
6152 		return (ENOMEM);
6153 
6154 	buf = malloc(2 * CIM_MALA_SIZE * 5 * sizeof(uint32_t), M_CXGBE,
6155 	    M_ZERO | M_WAITOK);
6156 
6157 	t4_cim_read_ma_la(sc, buf, buf + 5 * CIM_MALA_SIZE);
6158 	p = buf;
6159 
6160 	for (i = 0; i < CIM_MALA_SIZE; i++, p += 5) {
6161 		sbuf_printf(sb, "\n%02x%08x%08x%08x%08x", p[4], p[3], p[2],
6162 		    p[1], p[0]);
6163 	}
6164 
6165 	sbuf_printf(sb, "\n\nCnt ID Tag UE       Data       RDY VLD");
6166 	for (i = 0; i < CIM_MALA_SIZE; i++, p += 5) {
6167 		sbuf_printf(sb, "\n%3u %2u  %x   %u %08x%08x  %u   %u",
6168 		    (p[2] >> 10) & 0xff, (p[2] >> 7) & 7,
6169 		    (p[2] >> 3) & 0xf, (p[2] >> 2) & 1,
6170 		    (p[1] >> 2) | ((p[2] & 3) << 30),
6171 		    (p[0] >> 2) | ((p[1] & 3) << 30), (p[0] >> 1) & 1,
6172 		    p[0] & 1);
6173 	}
6174 
6175 	rc = sbuf_finish(sb);
6176 	sbuf_delete(sb);
6177 	free(buf, M_CXGBE);
6178 	return (rc);
6179 }
6180 
6181 static int
6182 sysctl_cim_pif_la(SYSCTL_HANDLER_ARGS)
6183 {
6184 	struct adapter *sc = arg1;
6185 	u_int i;
6186 	struct sbuf *sb;
6187 	uint32_t *buf, *p;
6188 	int rc;
6189 
6190 	rc = sysctl_wire_old_buffer(req, 0);
6191 	if (rc != 0)
6192 		return (rc);
6193 
6194 	sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req);
6195 	if (sb == NULL)
6196 		return (ENOMEM);
6197 
6198 	buf = malloc(2 * CIM_PIFLA_SIZE * 6 * sizeof(uint32_t), M_CXGBE,
6199 	    M_ZERO | M_WAITOK);
6200 
6201 	t4_cim_read_pif_la(sc, buf, buf + 6 * CIM_PIFLA_SIZE, NULL, NULL);
6202 	p = buf;
6203 
6204 	sbuf_printf(sb, "Cntl ID DataBE   Addr                 Data");
6205 	for (i = 0; i < CIM_MALA_SIZE; i++, p += 6) {
6206 		sbuf_printf(sb, "\n %02x  %02x  %04x  %08x %08x%08x%08x%08x",
6207 		    (p[5] >> 22) & 0xff, (p[5] >> 16) & 0x3f, p[5] & 0xffff,
6208 		    p[4], p[3], p[2], p[1], p[0]);
6209 	}
6210 
6211 	sbuf_printf(sb, "\n\nCntl ID               Data");
6212 	for (i = 0; i < CIM_MALA_SIZE; i++, p += 6) {
6213 		sbuf_printf(sb, "\n %02x  %02x %08x%08x%08x%08x",
6214 		    (p[4] >> 6) & 0xff, p[4] & 0x3f, p[3], p[2], p[1], p[0]);
6215 	}
6216 
6217 	rc = sbuf_finish(sb);
6218 	sbuf_delete(sb);
6219 	free(buf, M_CXGBE);
6220 	return (rc);
6221 }
6222 
6223 static int
6224 sysctl_cim_qcfg(SYSCTL_HANDLER_ARGS)
6225 {
6226 	struct adapter *sc = arg1;
6227 	struct sbuf *sb;
6228 	int rc, i;
6229 	uint16_t base[CIM_NUM_IBQ + CIM_NUM_OBQ_T5];
6230 	uint16_t size[CIM_NUM_IBQ + CIM_NUM_OBQ_T5];
6231 	uint16_t thres[CIM_NUM_IBQ];
6232 	uint32_t obq_wr[2 * CIM_NUM_OBQ_T5], *wr = obq_wr;
6233 	uint32_t stat[4 * (CIM_NUM_IBQ + CIM_NUM_OBQ_T5)], *p = stat;
6234 	u_int cim_num_obq, ibq_rdaddr, obq_rdaddr, nq;
6235 
6236 	if (is_t4(sc)) {
6237 		cim_num_obq = CIM_NUM_OBQ;
6238 		ibq_rdaddr = A_UP_IBQ_0_RDADDR;
6239 		obq_rdaddr = A_UP_OBQ_0_REALADDR;
6240 	} else {
6241 		cim_num_obq = CIM_NUM_OBQ_T5;
6242 		ibq_rdaddr = A_UP_IBQ_0_SHADOW_RDADDR;
6243 		obq_rdaddr = A_UP_OBQ_0_SHADOW_REALADDR;
6244 	}
6245 	nq = CIM_NUM_IBQ + cim_num_obq;
6246 
6247 	rc = -t4_cim_read(sc, ibq_rdaddr, 4 * nq, stat);
6248 	if (rc == 0)
6249 		rc = -t4_cim_read(sc, obq_rdaddr, 2 * cim_num_obq, obq_wr);
6250 	if (rc != 0)
6251 		return (rc);
6252 
6253 	t4_read_cimq_cfg(sc, base, size, thres);
6254 
6255 	rc = sysctl_wire_old_buffer(req, 0);
6256 	if (rc != 0)
6257 		return (rc);
6258 
6259 	sb = sbuf_new_for_sysctl(NULL, NULL, PAGE_SIZE, req);
6260 	if (sb == NULL)
6261 		return (ENOMEM);
6262 
6263 	sbuf_printf(sb, "Queue  Base  Size Thres RdPtr WrPtr  SOP  EOP Avail");
6264 
6265 	for (i = 0; i < CIM_NUM_IBQ; i++, p += 4)
6266 		sbuf_printf(sb, "\n%7s %5x %5u %5u %6x  %4x %4u %4u %5u",
6267 		    qname[i], base[i], size[i], thres[i], G_IBQRDADDR(p[0]),
6268 		    G_IBQWRADDR(p[1]), G_QUESOPCNT(p[3]), G_QUEEOPCNT(p[3]),
6269 		    G_QUEREMFLITS(p[2]) * 16);
6270 	for ( ; i < nq; i++, p += 4, wr += 2)
6271 		sbuf_printf(sb, "\n%7s %5x %5u %12x  %4x %4u %4u %5u", qname[i],
6272 		    base[i], size[i], G_QUERDADDR(p[0]) & 0x3fff,
6273 		    wr[0] - base[i], G_QUESOPCNT(p[3]), G_QUEEOPCNT(p[3]),
6274 		    G_QUEREMFLITS(p[2]) * 16);
6275 
6276 	rc = sbuf_finish(sb);
6277 	sbuf_delete(sb);
6278 
6279 	return (rc);
6280 }
6281 
6282 static int
6283 sysctl_cpl_stats(SYSCTL_HANDLER_ARGS)
6284 {
6285 	struct adapter *sc = arg1;
6286 	struct sbuf *sb;
6287 	int rc;
6288 	struct tp_cpl_stats stats;
6289 
6290 	rc = sysctl_wire_old_buffer(req, 0);
6291 	if (rc != 0)
6292 		return (rc);
6293 
6294 	sb = sbuf_new_for_sysctl(NULL, NULL, 256, req);
6295 	if (sb == NULL)
6296 		return (ENOMEM);
6297 
6298 	t4_tp_get_cpl_stats(sc, &stats);
6299 
6300 	sbuf_printf(sb, "                 channel 0  channel 1  channel 2  "
6301 	    "channel 3\n");
6302 	sbuf_printf(sb, "CPL requests:   %10u %10u %10u %10u\n",
6303 		   stats.req[0], stats.req[1], stats.req[2], stats.req[3]);
6304 	sbuf_printf(sb, "CPL responses:  %10u %10u %10u %10u",
6305 		   stats.rsp[0], stats.rsp[1], stats.rsp[2], stats.rsp[3]);
6306 
6307 	rc = sbuf_finish(sb);
6308 	sbuf_delete(sb);
6309 
6310 	return (rc);
6311 }
6312 
6313 static int
6314 sysctl_ddp_stats(SYSCTL_HANDLER_ARGS)
6315 {
6316 	struct adapter *sc = arg1;
6317 	struct sbuf *sb;
6318 	int rc;
6319 	struct tp_usm_stats stats;
6320 
6321 	rc = sysctl_wire_old_buffer(req, 0);
6322 	if (rc != 0)
6323 		return(rc);
6324 
6325 	sb = sbuf_new_for_sysctl(NULL, NULL, 256, req);
6326 	if (sb == NULL)
6327 		return (ENOMEM);
6328 
6329 	t4_get_usm_stats(sc, &stats);
6330 
6331 	sbuf_printf(sb, "Frames: %u\n", stats.frames);
6332 	sbuf_printf(sb, "Octets: %ju\n", stats.octets);
6333 	sbuf_printf(sb, "Drops:  %u", stats.drops);
6334 
6335 	rc = sbuf_finish(sb);
6336 	sbuf_delete(sb);
6337 
6338 	return (rc);
6339 }
6340 
6341 const char *devlog_level_strings[] = {
6342 	[FW_DEVLOG_LEVEL_EMERG]		= "EMERG",
6343 	[FW_DEVLOG_LEVEL_CRIT]		= "CRIT",
6344 	[FW_DEVLOG_LEVEL_ERR]		= "ERR",
6345 	[FW_DEVLOG_LEVEL_NOTICE]	= "NOTICE",
6346 	[FW_DEVLOG_LEVEL_INFO]		= "INFO",
6347 	[FW_DEVLOG_LEVEL_DEBUG]		= "DEBUG"
6348 };
6349 
6350 const char *devlog_facility_strings[] = {
6351 	[FW_DEVLOG_FACILITY_CORE]	= "CORE",
6352 	[FW_DEVLOG_FACILITY_CF]		= "CF",
6353 	[FW_DEVLOG_FACILITY_SCHED]	= "SCHED",
6354 	[FW_DEVLOG_FACILITY_TIMER]	= "TIMER",
6355 	[FW_DEVLOG_FACILITY_RES]	= "RES",
6356 	[FW_DEVLOG_FACILITY_HW]		= "HW",
6357 	[FW_DEVLOG_FACILITY_FLR]	= "FLR",
6358 	[FW_DEVLOG_FACILITY_DMAQ]	= "DMAQ",
6359 	[FW_DEVLOG_FACILITY_PHY]	= "PHY",
6360 	[FW_DEVLOG_FACILITY_MAC]	= "MAC",
6361 	[FW_DEVLOG_FACILITY_PORT]	= "PORT",
6362 	[FW_DEVLOG_FACILITY_VI]		= "VI",
6363 	[FW_DEVLOG_FACILITY_FILTER]	= "FILTER",
6364 	[FW_DEVLOG_FACILITY_ACL]	= "ACL",
6365 	[FW_DEVLOG_FACILITY_TM]		= "TM",
6366 	[FW_DEVLOG_FACILITY_QFC]	= "QFC",
6367 	[FW_DEVLOG_FACILITY_DCB]	= "DCB",
6368 	[FW_DEVLOG_FACILITY_ETH]	= "ETH",
6369 	[FW_DEVLOG_FACILITY_OFLD]	= "OFLD",
6370 	[FW_DEVLOG_FACILITY_RI]		= "RI",
6371 	[FW_DEVLOG_FACILITY_ISCSI]	= "ISCSI",
6372 	[FW_DEVLOG_FACILITY_FCOE]	= "FCOE",
6373 	[FW_DEVLOG_FACILITY_FOISCSI]	= "FOISCSI",
6374 	[FW_DEVLOG_FACILITY_FOFCOE]	= "FOFCOE"
6375 };
6376 
6377 static int
6378 sysctl_devlog(SYSCTL_HANDLER_ARGS)
6379 {
6380 	struct adapter *sc = arg1;
6381 	struct devlog_params *dparams = &sc->params.devlog;
6382 	struct fw_devlog_e *buf, *e;
6383 	int i, j, rc, nentries, first = 0, m;
6384 	struct sbuf *sb;
6385 	uint64_t ftstamp = UINT64_MAX;
6386 
6387 	if (dparams->start == 0) {
6388 		dparams->memtype = FW_MEMTYPE_EDC0;
6389 		dparams->start = 0x84000;
6390 		dparams->size = 32768;
6391 	}
6392 
6393 	nentries = dparams->size / sizeof(struct fw_devlog_e);
6394 
6395 	buf = malloc(dparams->size, M_CXGBE, M_NOWAIT);
6396 	if (buf == NULL)
6397 		return (ENOMEM);
6398 
6399 	m = fwmtype_to_hwmtype(dparams->memtype);
6400 	rc = -t4_mem_read(sc, m, dparams->start, dparams->size, (void *)buf);
6401 	if (rc != 0)
6402 		goto done;
6403 
6404 	for (i = 0; i < nentries; i++) {
6405 		e = &buf[i];
6406 
6407 		if (e->timestamp == 0)
6408 			break;	/* end */
6409 
6410 		e->timestamp = be64toh(e->timestamp);
6411 		e->seqno = be32toh(e->seqno);
6412 		for (j = 0; j < 8; j++)
6413 			e->params[j] = be32toh(e->params[j]);
6414 
6415 		if (e->timestamp < ftstamp) {
6416 			ftstamp = e->timestamp;
6417 			first = i;
6418 		}
6419 	}
6420 
6421 	if (buf[first].timestamp == 0)
6422 		goto done;	/* nothing in the log */
6423 
6424 	rc = sysctl_wire_old_buffer(req, 0);
6425 	if (rc != 0)
6426 		goto done;
6427 
6428 	sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req);
6429 	if (sb == NULL) {
6430 		rc = ENOMEM;
6431 		goto done;
6432 	}
6433 	sbuf_printf(sb, "%10s  %15s  %8s  %8s  %s\n",
6434 	    "Seq#", "Tstamp", "Level", "Facility", "Message");
6435 
6436 	i = first;
6437 	do {
6438 		e = &buf[i];
6439 		if (e->timestamp == 0)
6440 			break;	/* end */
6441 
6442 		sbuf_printf(sb, "%10d  %15ju  %8s  %8s  ",
6443 		    e->seqno, e->timestamp,
6444 		    (e->level < nitems(devlog_level_strings) ?
6445 			devlog_level_strings[e->level] : "UNKNOWN"),
6446 		    (e->facility < nitems(devlog_facility_strings) ?
6447 			devlog_facility_strings[e->facility] : "UNKNOWN"));
6448 		sbuf_printf(sb, e->fmt, e->params[0], e->params[1],
6449 		    e->params[2], e->params[3], e->params[4],
6450 		    e->params[5], e->params[6], e->params[7]);
6451 
6452 		if (++i == nentries)
6453 			i = 0;
6454 	} while (i != first);
6455 
6456 	rc = sbuf_finish(sb);
6457 	sbuf_delete(sb);
6458 done:
6459 	free(buf, M_CXGBE);
6460 	return (rc);
6461 }
6462 
6463 static int
6464 sysctl_fcoe_stats(SYSCTL_HANDLER_ARGS)
6465 {
6466 	struct adapter *sc = arg1;
6467 	struct sbuf *sb;
6468 	int rc;
6469 	struct tp_fcoe_stats stats[4];
6470 
6471 	rc = sysctl_wire_old_buffer(req, 0);
6472 	if (rc != 0)
6473 		return (rc);
6474 
6475 	sb = sbuf_new_for_sysctl(NULL, NULL, 256, req);
6476 	if (sb == NULL)
6477 		return (ENOMEM);
6478 
6479 	t4_get_fcoe_stats(sc, 0, &stats[0]);
6480 	t4_get_fcoe_stats(sc, 1, &stats[1]);
6481 	t4_get_fcoe_stats(sc, 2, &stats[2]);
6482 	t4_get_fcoe_stats(sc, 3, &stats[3]);
6483 
6484 	sbuf_printf(sb, "                   channel 0        channel 1        "
6485 	    "channel 2        channel 3\n");
6486 	sbuf_printf(sb, "octetsDDP:  %16ju %16ju %16ju %16ju\n",
6487 	    stats[0].octetsDDP, stats[1].octetsDDP, stats[2].octetsDDP,
6488 	    stats[3].octetsDDP);
6489 	sbuf_printf(sb, "framesDDP:  %16u %16u %16u %16u\n", stats[0].framesDDP,
6490 	    stats[1].framesDDP, stats[2].framesDDP, stats[3].framesDDP);
6491 	sbuf_printf(sb, "framesDrop: %16u %16u %16u %16u",
6492 	    stats[0].framesDrop, stats[1].framesDrop, stats[2].framesDrop,
6493 	    stats[3].framesDrop);
6494 
6495 	rc = sbuf_finish(sb);
6496 	sbuf_delete(sb);
6497 
6498 	return (rc);
6499 }
6500 
6501 static int
6502 sysctl_hw_sched(SYSCTL_HANDLER_ARGS)
6503 {
6504 	struct adapter *sc = arg1;
6505 	struct sbuf *sb;
6506 	int rc, i;
6507 	unsigned int map, kbps, ipg, mode;
6508 	unsigned int pace_tab[NTX_SCHED];
6509 
6510 	rc = sysctl_wire_old_buffer(req, 0);
6511 	if (rc != 0)
6512 		return (rc);
6513 
6514 	sb = sbuf_new_for_sysctl(NULL, NULL, 256, req);
6515 	if (sb == NULL)
6516 		return (ENOMEM);
6517 
6518 	map = t4_read_reg(sc, A_TP_TX_MOD_QUEUE_REQ_MAP);
6519 	mode = G_TIMERMODE(t4_read_reg(sc, A_TP_MOD_CONFIG));
6520 	t4_read_pace_tbl(sc, pace_tab);
6521 
6522 	sbuf_printf(sb, "Scheduler  Mode   Channel  Rate (Kbps)   "
6523 	    "Class IPG (0.1 ns)   Flow IPG (us)");
6524 
6525 	for (i = 0; i < NTX_SCHED; ++i, map >>= 2) {
6526 		t4_get_tx_sched(sc, i, &kbps, &ipg);
6527 		sbuf_printf(sb, "\n    %u      %-5s     %u     ", i,
6528 		    (mode & (1 << i)) ? "flow" : "class", map & 3);
6529 		if (kbps)
6530 			sbuf_printf(sb, "%9u     ", kbps);
6531 		else
6532 			sbuf_printf(sb, " disabled     ");
6533 
6534 		if (ipg)
6535 			sbuf_printf(sb, "%13u        ", ipg);
6536 		else
6537 			sbuf_printf(sb, "     disabled        ");
6538 
6539 		if (pace_tab[i])
6540 			sbuf_printf(sb, "%10u", pace_tab[i]);
6541 		else
6542 			sbuf_printf(sb, "  disabled");
6543 	}
6544 
6545 	rc = sbuf_finish(sb);
6546 	sbuf_delete(sb);
6547 
6548 	return (rc);
6549 }
6550 
6551 static int
6552 sysctl_lb_stats(SYSCTL_HANDLER_ARGS)
6553 {
6554 	struct adapter *sc = arg1;
6555 	struct sbuf *sb;
6556 	int rc, i, j;
6557 	uint64_t *p0, *p1;
6558 	struct lb_port_stats s[2];
6559 	static const char *stat_name[] = {
6560 		"OctetsOK:", "FramesOK:", "BcastFrames:", "McastFrames:",
6561 		"UcastFrames:", "ErrorFrames:", "Frames64:", "Frames65To127:",
6562 		"Frames128To255:", "Frames256To511:", "Frames512To1023:",
6563 		"Frames1024To1518:", "Frames1519ToMax:", "FramesDropped:",
6564 		"BG0FramesDropped:", "BG1FramesDropped:", "BG2FramesDropped:",
6565 		"BG3FramesDropped:", "BG0FramesTrunc:", "BG1FramesTrunc:",
6566 		"BG2FramesTrunc:", "BG3FramesTrunc:"
6567 	};
6568 
6569 	rc = sysctl_wire_old_buffer(req, 0);
6570 	if (rc != 0)
6571 		return (rc);
6572 
6573 	sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req);
6574 	if (sb == NULL)
6575 		return (ENOMEM);
6576 
6577 	memset(s, 0, sizeof(s));
6578 
6579 	for (i = 0; i < 4; i += 2) {
6580 		t4_get_lb_stats(sc, i, &s[0]);
6581 		t4_get_lb_stats(sc, i + 1, &s[1]);
6582 
6583 		p0 = &s[0].octets;
6584 		p1 = &s[1].octets;
6585 		sbuf_printf(sb, "%s                       Loopback %u"
6586 		    "           Loopback %u", i == 0 ? "" : "\n", i, i + 1);
6587 
6588 		for (j = 0; j < nitems(stat_name); j++)
6589 			sbuf_printf(sb, "\n%-17s %20ju %20ju", stat_name[j],
6590 				   *p0++, *p1++);
6591 	}
6592 
6593 	rc = sbuf_finish(sb);
6594 	sbuf_delete(sb);
6595 
6596 	return (rc);
6597 }
6598 
6599 static int
6600 sysctl_linkdnrc(SYSCTL_HANDLER_ARGS)
6601 {
6602 	int rc = 0;
6603 	struct port_info *pi = arg1;
6604 	struct sbuf *sb;
6605 	static const char *linkdnreasons[] = {
6606 		"non-specific", "remote fault", "autoneg failed", "reserved3",
6607 		"PHY overheated", "unknown", "rx los", "reserved7"
6608 	};
6609 
6610 	rc = sysctl_wire_old_buffer(req, 0);
6611 	if (rc != 0)
6612 		return(rc);
6613 	sb = sbuf_new_for_sysctl(NULL, NULL, 64, req);
6614 	if (sb == NULL)
6615 		return (ENOMEM);
6616 
6617 	if (pi->linkdnrc < 0)
6618 		sbuf_printf(sb, "n/a");
6619 	else if (pi->linkdnrc < nitems(linkdnreasons))
6620 		sbuf_printf(sb, "%s", linkdnreasons[pi->linkdnrc]);
6621 	else
6622 		sbuf_printf(sb, "%d", pi->linkdnrc);
6623 
6624 	rc = sbuf_finish(sb);
6625 	sbuf_delete(sb);
6626 
6627 	return (rc);
6628 }
6629 
6630 struct mem_desc {
6631 	unsigned int base;
6632 	unsigned int limit;
6633 	unsigned int idx;
6634 };
6635 
6636 static int
6637 mem_desc_cmp(const void *a, const void *b)
6638 {
6639 	return ((const struct mem_desc *)a)->base -
6640 	       ((const struct mem_desc *)b)->base;
6641 }
6642 
6643 static void
6644 mem_region_show(struct sbuf *sb, const char *name, unsigned int from,
6645     unsigned int to)
6646 {
6647 	unsigned int size;
6648 
6649 	size = to - from + 1;
6650 	if (size == 0)
6651 		return;
6652 
6653 	/* XXX: need humanize_number(3) in libkern for a more readable 'size' */
6654 	sbuf_printf(sb, "%-15s %#x-%#x [%u]\n", name, from, to, size);
6655 }
6656 
6657 static int
6658 sysctl_meminfo(SYSCTL_HANDLER_ARGS)
6659 {
6660 	struct adapter *sc = arg1;
6661 	struct sbuf *sb;
6662 	int rc, i, n;
6663 	uint32_t lo, hi, used, alloc;
6664 	static const char *memory[] = {"EDC0:", "EDC1:", "MC:", "MC0:", "MC1:"};
6665 	static const char *region[] = {
6666 		"DBQ contexts:", "IMSG contexts:", "FLM cache:", "TCBs:",
6667 		"Pstructs:", "Timers:", "Rx FL:", "Tx FL:", "Pstruct FL:",
6668 		"Tx payload:", "Rx payload:", "LE hash:", "iSCSI region:",
6669 		"TDDP region:", "TPT region:", "STAG region:", "RQ region:",
6670 		"RQUDP region:", "PBL region:", "TXPBL region:",
6671 		"DBVFIFO region:", "ULPRX state:", "ULPTX state:",
6672 		"On-chip queues:"
6673 	};
6674 	struct mem_desc avail[4];
6675 	struct mem_desc mem[nitems(region) + 3];	/* up to 3 holes */
6676 	struct mem_desc *md = mem;
6677 
6678 	rc = sysctl_wire_old_buffer(req, 0);
6679 	if (rc != 0)
6680 		return (rc);
6681 
6682 	sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req);
6683 	if (sb == NULL)
6684 		return (ENOMEM);
6685 
6686 	for (i = 0; i < nitems(mem); i++) {
6687 		mem[i].limit = 0;
6688 		mem[i].idx = i;
6689 	}
6690 
6691 	/* Find and sort the populated memory ranges */
6692 	i = 0;
6693 	lo = t4_read_reg(sc, A_MA_TARGET_MEM_ENABLE);
6694 	if (lo & F_EDRAM0_ENABLE) {
6695 		hi = t4_read_reg(sc, A_MA_EDRAM0_BAR);
6696 		avail[i].base = G_EDRAM0_BASE(hi) << 20;
6697 		avail[i].limit = avail[i].base + (G_EDRAM0_SIZE(hi) << 20);
6698 		avail[i].idx = 0;
6699 		i++;
6700 	}
6701 	if (lo & F_EDRAM1_ENABLE) {
6702 		hi = t4_read_reg(sc, A_MA_EDRAM1_BAR);
6703 		avail[i].base = G_EDRAM1_BASE(hi) << 20;
6704 		avail[i].limit = avail[i].base + (G_EDRAM1_SIZE(hi) << 20);
6705 		avail[i].idx = 1;
6706 		i++;
6707 	}
6708 	if (lo & F_EXT_MEM_ENABLE) {
6709 		hi = t4_read_reg(sc, A_MA_EXT_MEMORY_BAR);
6710 		avail[i].base = G_EXT_MEM_BASE(hi) << 20;
6711 		avail[i].limit = avail[i].base +
6712 		    (G_EXT_MEM_SIZE(hi) << 20);
6713 		avail[i].idx = is_t4(sc) ? 2 : 3;	/* Call it MC for T4 */
6714 		i++;
6715 	}
6716 	if (!is_t4(sc) && lo & F_EXT_MEM1_ENABLE) {
6717 		hi = t4_read_reg(sc, A_MA_EXT_MEMORY1_BAR);
6718 		avail[i].base = G_EXT_MEM1_BASE(hi) << 20;
6719 		avail[i].limit = avail[i].base +
6720 		    (G_EXT_MEM1_SIZE(hi) << 20);
6721 		avail[i].idx = 4;
6722 		i++;
6723 	}
6724 	if (!i)                                    /* no memory available */
6725 		return 0;
6726 	qsort(avail, i, sizeof(struct mem_desc), mem_desc_cmp);
6727 
6728 	(md++)->base = t4_read_reg(sc, A_SGE_DBQ_CTXT_BADDR);
6729 	(md++)->base = t4_read_reg(sc, A_SGE_IMSG_CTXT_BADDR);
6730 	(md++)->base = t4_read_reg(sc, A_SGE_FLM_CACHE_BADDR);
6731 	(md++)->base = t4_read_reg(sc, A_TP_CMM_TCB_BASE);
6732 	(md++)->base = t4_read_reg(sc, A_TP_CMM_MM_BASE);
6733 	(md++)->base = t4_read_reg(sc, A_TP_CMM_TIMER_BASE);
6734 	(md++)->base = t4_read_reg(sc, A_TP_CMM_MM_RX_FLST_BASE);
6735 	(md++)->base = t4_read_reg(sc, A_TP_CMM_MM_TX_FLST_BASE);
6736 	(md++)->base = t4_read_reg(sc, A_TP_CMM_MM_PS_FLST_BASE);
6737 
6738 	/* the next few have explicit upper bounds */
6739 	md->base = t4_read_reg(sc, A_TP_PMM_TX_BASE);
6740 	md->limit = md->base - 1 +
6741 		    t4_read_reg(sc, A_TP_PMM_TX_PAGE_SIZE) *
6742 		    G_PMTXMAXPAGE(t4_read_reg(sc, A_TP_PMM_TX_MAX_PAGE));
6743 	md++;
6744 
6745 	md->base = t4_read_reg(sc, A_TP_PMM_RX_BASE);
6746 	md->limit = md->base - 1 +
6747 		    t4_read_reg(sc, A_TP_PMM_RX_PAGE_SIZE) *
6748 		    G_PMRXMAXPAGE(t4_read_reg(sc, A_TP_PMM_RX_MAX_PAGE));
6749 	md++;
6750 
6751 	if (t4_read_reg(sc, A_LE_DB_CONFIG) & F_HASHEN) {
6752 		hi = t4_read_reg(sc, A_LE_DB_TID_HASHBASE) / 4;
6753 		md->base = t4_read_reg(sc, A_LE_DB_HASH_TID_BASE);
6754 		md->limit = (sc->tids.ntids - hi) * 16 + md->base - 1;
6755 	} else {
6756 		md->base = 0;
6757 		md->idx = nitems(region);  /* hide it */
6758 	}
6759 	md++;
6760 
6761 #define ulp_region(reg) \
6762 	md->base = t4_read_reg(sc, A_ULP_ ## reg ## _LLIMIT);\
6763 	(md++)->limit = t4_read_reg(sc, A_ULP_ ## reg ## _ULIMIT)
6764 
6765 	ulp_region(RX_ISCSI);
6766 	ulp_region(RX_TDDP);
6767 	ulp_region(TX_TPT);
6768 	ulp_region(RX_STAG);
6769 	ulp_region(RX_RQ);
6770 	ulp_region(RX_RQUDP);
6771 	ulp_region(RX_PBL);
6772 	ulp_region(TX_PBL);
6773 #undef ulp_region
6774 
6775 	md->base = 0;
6776 	md->idx = nitems(region);
6777 	if (!is_t4(sc) && t4_read_reg(sc, A_SGE_CONTROL2) & F_VFIFO_ENABLE) {
6778 		md->base = G_BASEADDR(t4_read_reg(sc, A_SGE_DBVFIFO_BADDR));
6779 		md->limit = md->base + (G_DBVFIFO_SIZE((t4_read_reg(sc,
6780 		    A_SGE_DBVFIFO_SIZE))) << 2) - 1;
6781 	}
6782 	md++;
6783 
6784 	md->base = t4_read_reg(sc, A_ULP_RX_CTX_BASE);
6785 	md->limit = md->base + sc->tids.ntids - 1;
6786 	md++;
6787 	md->base = t4_read_reg(sc, A_ULP_TX_ERR_TABLE_BASE);
6788 	md->limit = md->base + sc->tids.ntids - 1;
6789 	md++;
6790 
6791 	md->base = sc->vres.ocq.start;
6792 	if (sc->vres.ocq.size)
6793 		md->limit = md->base + sc->vres.ocq.size - 1;
6794 	else
6795 		md->idx = nitems(region);  /* hide it */
6796 	md++;
6797 
6798 	/* add any address-space holes, there can be up to 3 */
6799 	for (n = 0; n < i - 1; n++)
6800 		if (avail[n].limit < avail[n + 1].base)
6801 			(md++)->base = avail[n].limit;
6802 	if (avail[n].limit)
6803 		(md++)->base = avail[n].limit;
6804 
6805 	n = md - mem;
6806 	qsort(mem, n, sizeof(struct mem_desc), mem_desc_cmp);
6807 
6808 	for (lo = 0; lo < i; lo++)
6809 		mem_region_show(sb, memory[avail[lo].idx], avail[lo].base,
6810 				avail[lo].limit - 1);
6811 
6812 	sbuf_printf(sb, "\n");
6813 	for (i = 0; i < n; i++) {
6814 		if (mem[i].idx >= nitems(region))
6815 			continue;                        /* skip holes */
6816 		if (!mem[i].limit)
6817 			mem[i].limit = i < n - 1 ? mem[i + 1].base - 1 : ~0;
6818 		mem_region_show(sb, region[mem[i].idx], mem[i].base,
6819 				mem[i].limit);
6820 	}
6821 
6822 	sbuf_printf(sb, "\n");
6823 	lo = t4_read_reg(sc, A_CIM_SDRAM_BASE_ADDR);
6824 	hi = t4_read_reg(sc, A_CIM_SDRAM_ADDR_SIZE) + lo - 1;
6825 	mem_region_show(sb, "uP RAM:", lo, hi);
6826 
6827 	lo = t4_read_reg(sc, A_CIM_EXTMEM2_BASE_ADDR);
6828 	hi = t4_read_reg(sc, A_CIM_EXTMEM2_ADDR_SIZE) + lo - 1;
6829 	mem_region_show(sb, "uP Extmem2:", lo, hi);
6830 
6831 	lo = t4_read_reg(sc, A_TP_PMM_RX_MAX_PAGE);
6832 	sbuf_printf(sb, "\n%u Rx pages of size %uKiB for %u channels\n",
6833 		   G_PMRXMAXPAGE(lo),
6834 		   t4_read_reg(sc, A_TP_PMM_RX_PAGE_SIZE) >> 10,
6835 		   (lo & F_PMRXNUMCHN) ? 2 : 1);
6836 
6837 	lo = t4_read_reg(sc, A_TP_PMM_TX_MAX_PAGE);
6838 	hi = t4_read_reg(sc, A_TP_PMM_TX_PAGE_SIZE);
6839 	sbuf_printf(sb, "%u Tx pages of size %u%ciB for %u channels\n",
6840 		   G_PMTXMAXPAGE(lo),
6841 		   hi >= (1 << 20) ? (hi >> 20) : (hi >> 10),
6842 		   hi >= (1 << 20) ? 'M' : 'K', 1 << G_PMTXNUMCHN(lo));
6843 	sbuf_printf(sb, "%u p-structs\n",
6844 		   t4_read_reg(sc, A_TP_CMM_MM_MAX_PSTRUCT));
6845 
6846 	for (i = 0; i < 4; i++) {
6847 		lo = t4_read_reg(sc, A_MPS_RX_PG_RSV0 + i * 4);
6848 		if (is_t4(sc)) {
6849 			used = G_USED(lo);
6850 			alloc = G_ALLOC(lo);
6851 		} else {
6852 			used = G_T5_USED(lo);
6853 			alloc = G_T5_ALLOC(lo);
6854 		}
6855 		sbuf_printf(sb, "\nPort %d using %u pages out of %u allocated",
6856 			   i, used, alloc);
6857 	}
6858 	for (i = 0; i < 4; i++) {
6859 		lo = t4_read_reg(sc, A_MPS_RX_PG_RSV4 + i * 4);
6860 		if (is_t4(sc)) {
6861 			used = G_USED(lo);
6862 			alloc = G_ALLOC(lo);
6863 		} else {
6864 			used = G_T5_USED(lo);
6865 			alloc = G_T5_ALLOC(lo);
6866 		}
6867 		sbuf_printf(sb,
6868 			   "\nLoopback %d using %u pages out of %u allocated",
6869 			   i, used, alloc);
6870 	}
6871 
6872 	rc = sbuf_finish(sb);
6873 	sbuf_delete(sb);
6874 
6875 	return (rc);
6876 }
6877 
6878 static inline void
6879 tcamxy2valmask(uint64_t x, uint64_t y, uint8_t *addr, uint64_t *mask)
6880 {
6881 	*mask = x | y;
6882 	y = htobe64(y);
6883 	memcpy(addr, (char *)&y + 2, ETHER_ADDR_LEN);
6884 }
6885 
6886 static int
6887 sysctl_mps_tcam(SYSCTL_HANDLER_ARGS)
6888 {
6889 	struct adapter *sc = arg1;
6890 	struct sbuf *sb;
6891 	int rc, i, n;
6892 
6893 	rc = sysctl_wire_old_buffer(req, 0);
6894 	if (rc != 0)
6895 		return (rc);
6896 
6897 	sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req);
6898 	if (sb == NULL)
6899 		return (ENOMEM);
6900 
6901 	sbuf_printf(sb,
6902 	    "Idx  Ethernet address     Mask     Vld Ports PF"
6903 	    "  VF              Replication             P0 P1 P2 P3  ML");
6904 	n = is_t4(sc) ? NUM_MPS_CLS_SRAM_L_INSTANCES :
6905 	    NUM_MPS_T5_CLS_SRAM_L_INSTANCES;
6906 	for (i = 0; i < n; i++) {
6907 		uint64_t tcamx, tcamy, mask;
6908 		uint32_t cls_lo, cls_hi;
6909 		uint8_t addr[ETHER_ADDR_LEN];
6910 
6911 		tcamy = t4_read_reg64(sc, MPS_CLS_TCAM_Y_L(i));
6912 		tcamx = t4_read_reg64(sc, MPS_CLS_TCAM_X_L(i));
6913 		cls_lo = t4_read_reg(sc, MPS_CLS_SRAM_L(i));
6914 		cls_hi = t4_read_reg(sc, MPS_CLS_SRAM_H(i));
6915 
6916 		if (tcamx & tcamy)
6917 			continue;
6918 
6919 		tcamxy2valmask(tcamx, tcamy, addr, &mask);
6920 		sbuf_printf(sb, "\n%3u %02x:%02x:%02x:%02x:%02x:%02x %012jx"
6921 			   "  %c   %#x%4u%4d", i, addr[0], addr[1], addr[2],
6922 			   addr[3], addr[4], addr[5], (uintmax_t)mask,
6923 			   (cls_lo & F_SRAM_VLD) ? 'Y' : 'N',
6924 			   G_PORTMAP(cls_hi), G_PF(cls_lo),
6925 			   (cls_lo & F_VF_VALID) ? G_VF(cls_lo) : -1);
6926 
6927 		if (cls_lo & F_REPLICATE) {
6928 			struct fw_ldst_cmd ldst_cmd;
6929 
6930 			memset(&ldst_cmd, 0, sizeof(ldst_cmd));
6931 			ldst_cmd.op_to_addrspace =
6932 			    htobe32(V_FW_CMD_OP(FW_LDST_CMD) |
6933 				F_FW_CMD_REQUEST | F_FW_CMD_READ |
6934 				V_FW_LDST_CMD_ADDRSPACE(FW_LDST_ADDRSPC_MPS));
6935 			ldst_cmd.cycles_to_len16 = htobe32(FW_LEN16(ldst_cmd));
6936 			ldst_cmd.u.mps.rplc.fid_idx =
6937 			    htobe16(V_FW_LDST_CMD_FID(FW_LDST_MPS_RPLC) |
6938 				V_FW_LDST_CMD_IDX(i));
6939 
6940 			rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK,
6941 			    "t4mps");
6942 			if (rc)
6943 				break;
6944 			rc = -t4_wr_mbox(sc, sc->mbox, &ldst_cmd,
6945 			    sizeof(ldst_cmd), &ldst_cmd);
6946 			end_synchronized_op(sc, 0);
6947 
6948 			if (rc != 0) {
6949 				sbuf_printf(sb,
6950 				    " ------------ error %3u ------------", rc);
6951 				rc = 0;
6952 			} else {
6953 				sbuf_printf(sb, " %08x %08x %08x %08x",
6954 				    be32toh(ldst_cmd.u.mps.rplc.rplc127_96),
6955 				    be32toh(ldst_cmd.u.mps.rplc.rplc95_64),
6956 				    be32toh(ldst_cmd.u.mps.rplc.rplc63_32),
6957 				    be32toh(ldst_cmd.u.mps.rplc.rplc31_0));
6958 			}
6959 		} else
6960 			sbuf_printf(sb, "%36s", "");
6961 
6962 		sbuf_printf(sb, "%4u%3u%3u%3u %#3x", G_SRAM_PRIO0(cls_lo),
6963 		    G_SRAM_PRIO1(cls_lo), G_SRAM_PRIO2(cls_lo),
6964 		    G_SRAM_PRIO3(cls_lo), (cls_lo >> S_MULTILISTEN0) & 0xf);
6965 	}
6966 
6967 	if (rc)
6968 		(void) sbuf_finish(sb);
6969 	else
6970 		rc = sbuf_finish(sb);
6971 	sbuf_delete(sb);
6972 
6973 	return (rc);
6974 }
6975 
6976 static int
6977 sysctl_path_mtus(SYSCTL_HANDLER_ARGS)
6978 {
6979 	struct adapter *sc = arg1;
6980 	struct sbuf *sb;
6981 	int rc;
6982 	uint16_t mtus[NMTUS];
6983 
6984 	rc = sysctl_wire_old_buffer(req, 0);
6985 	if (rc != 0)
6986 		return (rc);
6987 
6988 	sb = sbuf_new_for_sysctl(NULL, NULL, 256, req);
6989 	if (sb == NULL)
6990 		return (ENOMEM);
6991 
6992 	t4_read_mtu_tbl(sc, mtus, NULL);
6993 
6994 	sbuf_printf(sb, "%u %u %u %u %u %u %u %u %u %u %u %u %u %u %u %u",
6995 	    mtus[0], mtus[1], mtus[2], mtus[3], mtus[4], mtus[5], mtus[6],
6996 	    mtus[7], mtus[8], mtus[9], mtus[10], mtus[11], mtus[12], mtus[13],
6997 	    mtus[14], mtus[15]);
6998 
6999 	rc = sbuf_finish(sb);
7000 	sbuf_delete(sb);
7001 
7002 	return (rc);
7003 }
7004 
7005 static int
7006 sysctl_pm_stats(SYSCTL_HANDLER_ARGS)
7007 {
7008 	struct adapter *sc = arg1;
7009 	struct sbuf *sb;
7010 	int rc, i;
7011 	uint32_t cnt[PM_NSTATS];
7012 	uint64_t cyc[PM_NSTATS];
7013 	static const char *rx_stats[] = {
7014 		"Read:", "Write bypass:", "Write mem:", "Flush:"
7015 	};
7016 	static const char *tx_stats[] = {
7017 		"Read:", "Write bypass:", "Write mem:", "Bypass + mem:"
7018 	};
7019 
7020 	rc = sysctl_wire_old_buffer(req, 0);
7021 	if (rc != 0)
7022 		return (rc);
7023 
7024 	sb = sbuf_new_for_sysctl(NULL, NULL, 256, req);
7025 	if (sb == NULL)
7026 		return (ENOMEM);
7027 
7028 	t4_pmtx_get_stats(sc, cnt, cyc);
7029 	sbuf_printf(sb, "                Tx pcmds             Tx bytes");
7030 	for (i = 0; i < ARRAY_SIZE(tx_stats); i++)
7031 		sbuf_printf(sb, "\n%-13s %10u %20ju", tx_stats[i], cnt[i],
7032 		    cyc[i]);
7033 
7034 	t4_pmrx_get_stats(sc, cnt, cyc);
7035 	sbuf_printf(sb, "\n                Rx pcmds             Rx bytes");
7036 	for (i = 0; i < ARRAY_SIZE(rx_stats); i++)
7037 		sbuf_printf(sb, "\n%-13s %10u %20ju", rx_stats[i], cnt[i],
7038 		    cyc[i]);
7039 
7040 	rc = sbuf_finish(sb);
7041 	sbuf_delete(sb);
7042 
7043 	return (rc);
7044 }
7045 
7046 static int
7047 sysctl_rdma_stats(SYSCTL_HANDLER_ARGS)
7048 {
7049 	struct adapter *sc = arg1;
7050 	struct sbuf *sb;
7051 	int rc;
7052 	struct tp_rdma_stats stats;
7053 
7054 	rc = sysctl_wire_old_buffer(req, 0);
7055 	if (rc != 0)
7056 		return (rc);
7057 
7058 	sb = sbuf_new_for_sysctl(NULL, NULL, 256, req);
7059 	if (sb == NULL)
7060 		return (ENOMEM);
7061 
7062 	t4_tp_get_rdma_stats(sc, &stats);
7063 	sbuf_printf(sb, "NoRQEModDefferals: %u\n", stats.rqe_dfr_mod);
7064 	sbuf_printf(sb, "NoRQEPktDefferals: %u", stats.rqe_dfr_pkt);
7065 
7066 	rc = sbuf_finish(sb);
7067 	sbuf_delete(sb);
7068 
7069 	return (rc);
7070 }
7071 
7072 static int
7073 sysctl_tcp_stats(SYSCTL_HANDLER_ARGS)
7074 {
7075 	struct adapter *sc = arg1;
7076 	struct sbuf *sb;
7077 	int rc;
7078 	struct tp_tcp_stats v4, v6;
7079 
7080 	rc = sysctl_wire_old_buffer(req, 0);
7081 	if (rc != 0)
7082 		return (rc);
7083 
7084 	sb = sbuf_new_for_sysctl(NULL, NULL, 256, req);
7085 	if (sb == NULL)
7086 		return (ENOMEM);
7087 
7088 	t4_tp_get_tcp_stats(sc, &v4, &v6);
7089 	sbuf_printf(sb,
7090 	    "                                IP                 IPv6\n");
7091 	sbuf_printf(sb, "OutRsts:      %20u %20u\n",
7092 	    v4.tcpOutRsts, v6.tcpOutRsts);
7093 	sbuf_printf(sb, "InSegs:       %20ju %20ju\n",
7094 	    v4.tcpInSegs, v6.tcpInSegs);
7095 	sbuf_printf(sb, "OutSegs:      %20ju %20ju\n",
7096 	    v4.tcpOutSegs, v6.tcpOutSegs);
7097 	sbuf_printf(sb, "RetransSegs:  %20ju %20ju",
7098 	    v4.tcpRetransSegs, v6.tcpRetransSegs);
7099 
7100 	rc = sbuf_finish(sb);
7101 	sbuf_delete(sb);
7102 
7103 	return (rc);
7104 }
7105 
7106 static int
7107 sysctl_tids(SYSCTL_HANDLER_ARGS)
7108 {
7109 	struct adapter *sc = arg1;
7110 	struct sbuf *sb;
7111 	int rc;
7112 	struct tid_info *t = &sc->tids;
7113 
7114 	rc = sysctl_wire_old_buffer(req, 0);
7115 	if (rc != 0)
7116 		return (rc);
7117 
7118 	sb = sbuf_new_for_sysctl(NULL, NULL, 256, req);
7119 	if (sb == NULL)
7120 		return (ENOMEM);
7121 
7122 	if (t->natids) {
7123 		sbuf_printf(sb, "ATID range: 0-%u, in use: %u\n", t->natids - 1,
7124 		    t->atids_in_use);
7125 	}
7126 
7127 	if (t->ntids) {
7128 		if (t4_read_reg(sc, A_LE_DB_CONFIG) & F_HASHEN) {
7129 			uint32_t b = t4_read_reg(sc, A_LE_DB_SERVER_INDEX) / 4;
7130 
7131 			if (b) {
7132 				sbuf_printf(sb, "TID range: 0-%u, %u-%u", b - 1,
7133 				    t4_read_reg(sc, A_LE_DB_TID_HASHBASE) / 4,
7134 				    t->ntids - 1);
7135 			} else {
7136 				sbuf_printf(sb, "TID range: %u-%u",
7137 				    t4_read_reg(sc, A_LE_DB_TID_HASHBASE) / 4,
7138 				    t->ntids - 1);
7139 			}
7140 		} else
7141 			sbuf_printf(sb, "TID range: 0-%u", t->ntids - 1);
7142 		sbuf_printf(sb, ", in use: %u\n",
7143 		    atomic_load_acq_int(&t->tids_in_use));
7144 	}
7145 
7146 	if (t->nstids) {
7147 		sbuf_printf(sb, "STID range: %u-%u, in use: %u\n", t->stid_base,
7148 		    t->stid_base + t->nstids - 1, t->stids_in_use);
7149 	}
7150 
7151 	if (t->nftids) {
7152 		sbuf_printf(sb, "FTID range: %u-%u\n", t->ftid_base,
7153 		    t->ftid_base + t->nftids - 1);
7154 	}
7155 
7156 	if (t->netids) {
7157 		sbuf_printf(sb, "ETID range: %u-%u\n", t->etid_base,
7158 		    t->etid_base + t->netids - 1);
7159 	}
7160 
7161 	sbuf_printf(sb, "HW TID usage: %u IP users, %u IPv6 users",
7162 	    t4_read_reg(sc, A_LE_DB_ACT_CNT_IPV4),
7163 	    t4_read_reg(sc, A_LE_DB_ACT_CNT_IPV6));
7164 
7165 	rc = sbuf_finish(sb);
7166 	sbuf_delete(sb);
7167 
7168 	return (rc);
7169 }
7170 
7171 static int
7172 sysctl_tp_err_stats(SYSCTL_HANDLER_ARGS)
7173 {
7174 	struct adapter *sc = arg1;
7175 	struct sbuf *sb;
7176 	int rc;
7177 	struct tp_err_stats stats;
7178 
7179 	rc = sysctl_wire_old_buffer(req, 0);
7180 	if (rc != 0)
7181 		return (rc);
7182 
7183 	sb = sbuf_new_for_sysctl(NULL, NULL, 256, req);
7184 	if (sb == NULL)
7185 		return (ENOMEM);
7186 
7187 	t4_tp_get_err_stats(sc, &stats);
7188 
7189 	sbuf_printf(sb, "                 channel 0  channel 1  channel 2  "
7190 		      "channel 3\n");
7191 	sbuf_printf(sb, "macInErrs:      %10u %10u %10u %10u\n",
7192 	    stats.macInErrs[0], stats.macInErrs[1], stats.macInErrs[2],
7193 	    stats.macInErrs[3]);
7194 	sbuf_printf(sb, "hdrInErrs:      %10u %10u %10u %10u\n",
7195 	    stats.hdrInErrs[0], stats.hdrInErrs[1], stats.hdrInErrs[2],
7196 	    stats.hdrInErrs[3]);
7197 	sbuf_printf(sb, "tcpInErrs:      %10u %10u %10u %10u\n",
7198 	    stats.tcpInErrs[0], stats.tcpInErrs[1], stats.tcpInErrs[2],
7199 	    stats.tcpInErrs[3]);
7200 	sbuf_printf(sb, "tcp6InErrs:     %10u %10u %10u %10u\n",
7201 	    stats.tcp6InErrs[0], stats.tcp6InErrs[1], stats.tcp6InErrs[2],
7202 	    stats.tcp6InErrs[3]);
7203 	sbuf_printf(sb, "tnlCongDrops:   %10u %10u %10u %10u\n",
7204 	    stats.tnlCongDrops[0], stats.tnlCongDrops[1], stats.tnlCongDrops[2],
7205 	    stats.tnlCongDrops[3]);
7206 	sbuf_printf(sb, "tnlTxDrops:     %10u %10u %10u %10u\n",
7207 	    stats.tnlTxDrops[0], stats.tnlTxDrops[1], stats.tnlTxDrops[2],
7208 	    stats.tnlTxDrops[3]);
7209 	sbuf_printf(sb, "ofldVlanDrops:  %10u %10u %10u %10u\n",
7210 	    stats.ofldVlanDrops[0], stats.ofldVlanDrops[1],
7211 	    stats.ofldVlanDrops[2], stats.ofldVlanDrops[3]);
7212 	sbuf_printf(sb, "ofldChanDrops:  %10u %10u %10u %10u\n\n",
7213 	    stats.ofldChanDrops[0], stats.ofldChanDrops[1],
7214 	    stats.ofldChanDrops[2], stats.ofldChanDrops[3]);
7215 	sbuf_printf(sb, "ofldNoNeigh:    %u\nofldCongDefer:  %u",
7216 	    stats.ofldNoNeigh, stats.ofldCongDefer);
7217 
7218 	rc = sbuf_finish(sb);
7219 	sbuf_delete(sb);
7220 
7221 	return (rc);
7222 }
7223 
7224 struct field_desc {
7225 	const char *name;
7226 	u_int start;
7227 	u_int width;
7228 };
7229 
7230 static void
7231 field_desc_show(struct sbuf *sb, uint64_t v, const struct field_desc *f)
7232 {
7233 	char buf[32];
7234 	int line_size = 0;
7235 
7236 	while (f->name) {
7237 		uint64_t mask = (1ULL << f->width) - 1;
7238 		int len = snprintf(buf, sizeof(buf), "%s: %ju", f->name,
7239 		    ((uintmax_t)v >> f->start) & mask);
7240 
7241 		if (line_size + len >= 79) {
7242 			line_size = 8;
7243 			sbuf_printf(sb, "\n        ");
7244 		}
7245 		sbuf_printf(sb, "%s ", buf);
7246 		line_size += len + 1;
7247 		f++;
7248 	}
7249 	sbuf_printf(sb, "\n");
7250 }
7251 
7252 static struct field_desc tp_la0[] = {
7253 	{ "RcfOpCodeOut", 60, 4 },
7254 	{ "State", 56, 4 },
7255 	{ "WcfState", 52, 4 },
7256 	{ "RcfOpcSrcOut", 50, 2 },
7257 	{ "CRxError", 49, 1 },
7258 	{ "ERxError", 48, 1 },
7259 	{ "SanityFailed", 47, 1 },
7260 	{ "SpuriousMsg", 46, 1 },
7261 	{ "FlushInputMsg", 45, 1 },
7262 	{ "FlushInputCpl", 44, 1 },
7263 	{ "RssUpBit", 43, 1 },
7264 	{ "RssFilterHit", 42, 1 },
7265 	{ "Tid", 32, 10 },
7266 	{ "InitTcb", 31, 1 },
7267 	{ "LineNumber", 24, 7 },
7268 	{ "Emsg", 23, 1 },
7269 	{ "EdataOut", 22, 1 },
7270 	{ "Cmsg", 21, 1 },
7271 	{ "CdataOut", 20, 1 },
7272 	{ "EreadPdu", 19, 1 },
7273 	{ "CreadPdu", 18, 1 },
7274 	{ "TunnelPkt", 17, 1 },
7275 	{ "RcfPeerFin", 16, 1 },
7276 	{ "RcfReasonOut", 12, 4 },
7277 	{ "TxCchannel", 10, 2 },
7278 	{ "RcfTxChannel", 8, 2 },
7279 	{ "RxEchannel", 6, 2 },
7280 	{ "RcfRxChannel", 5, 1 },
7281 	{ "RcfDataOutSrdy", 4, 1 },
7282 	{ "RxDvld", 3, 1 },
7283 	{ "RxOoDvld", 2, 1 },
7284 	{ "RxCongestion", 1, 1 },
7285 	{ "TxCongestion", 0, 1 },
7286 	{ NULL }
7287 };
7288 
7289 static struct field_desc tp_la1[] = {
7290 	{ "CplCmdIn", 56, 8 },
7291 	{ "CplCmdOut", 48, 8 },
7292 	{ "ESynOut", 47, 1 },
7293 	{ "EAckOut", 46, 1 },
7294 	{ "EFinOut", 45, 1 },
7295 	{ "ERstOut", 44, 1 },
7296 	{ "SynIn", 43, 1 },
7297 	{ "AckIn", 42, 1 },
7298 	{ "FinIn", 41, 1 },
7299 	{ "RstIn", 40, 1 },
7300 	{ "DataIn", 39, 1 },
7301 	{ "DataInVld", 38, 1 },
7302 	{ "PadIn", 37, 1 },
7303 	{ "RxBufEmpty", 36, 1 },
7304 	{ "RxDdp", 35, 1 },
7305 	{ "RxFbCongestion", 34, 1 },
7306 	{ "TxFbCongestion", 33, 1 },
7307 	{ "TxPktSumSrdy", 32, 1 },
7308 	{ "RcfUlpType", 28, 4 },
7309 	{ "Eread", 27, 1 },
7310 	{ "Ebypass", 26, 1 },
7311 	{ "Esave", 25, 1 },
7312 	{ "Static0", 24, 1 },
7313 	{ "Cread", 23, 1 },
7314 	{ "Cbypass", 22, 1 },
7315 	{ "Csave", 21, 1 },
7316 	{ "CPktOut", 20, 1 },
7317 	{ "RxPagePoolFull", 18, 2 },
7318 	{ "RxLpbkPkt", 17, 1 },
7319 	{ "TxLpbkPkt", 16, 1 },
7320 	{ "RxVfValid", 15, 1 },
7321 	{ "SynLearned", 14, 1 },
7322 	{ "SetDelEntry", 13, 1 },
7323 	{ "SetInvEntry", 12, 1 },
7324 	{ "CpcmdDvld", 11, 1 },
7325 	{ "CpcmdSave", 10, 1 },
7326 	{ "RxPstructsFull", 8, 2 },
7327 	{ "EpcmdDvld", 7, 1 },
7328 	{ "EpcmdFlush", 6, 1 },
7329 	{ "EpcmdTrimPrefix", 5, 1 },
7330 	{ "EpcmdTrimPostfix", 4, 1 },
7331 	{ "ERssIp4Pkt", 3, 1 },
7332 	{ "ERssIp6Pkt", 2, 1 },
7333 	{ "ERssTcpUdpPkt", 1, 1 },
7334 	{ "ERssFceFipPkt", 0, 1 },
7335 	{ NULL }
7336 };
7337 
7338 static struct field_desc tp_la2[] = {
7339 	{ "CplCmdIn", 56, 8 },
7340 	{ "MpsVfVld", 55, 1 },
7341 	{ "MpsPf", 52, 3 },
7342 	{ "MpsVf", 44, 8 },
7343 	{ "SynIn", 43, 1 },
7344 	{ "AckIn", 42, 1 },
7345 	{ "FinIn", 41, 1 },
7346 	{ "RstIn", 40, 1 },
7347 	{ "DataIn", 39, 1 },
7348 	{ "DataInVld", 38, 1 },
7349 	{ "PadIn", 37, 1 },
7350 	{ "RxBufEmpty", 36, 1 },
7351 	{ "RxDdp", 35, 1 },
7352 	{ "RxFbCongestion", 34, 1 },
7353 	{ "TxFbCongestion", 33, 1 },
7354 	{ "TxPktSumSrdy", 32, 1 },
7355 	{ "RcfUlpType", 28, 4 },
7356 	{ "Eread", 27, 1 },
7357 	{ "Ebypass", 26, 1 },
7358 	{ "Esave", 25, 1 },
7359 	{ "Static0", 24, 1 },
7360 	{ "Cread", 23, 1 },
7361 	{ "Cbypass", 22, 1 },
7362 	{ "Csave", 21, 1 },
7363 	{ "CPktOut", 20, 1 },
7364 	{ "RxPagePoolFull", 18, 2 },
7365 	{ "RxLpbkPkt", 17, 1 },
7366 	{ "TxLpbkPkt", 16, 1 },
7367 	{ "RxVfValid", 15, 1 },
7368 	{ "SynLearned", 14, 1 },
7369 	{ "SetDelEntry", 13, 1 },
7370 	{ "SetInvEntry", 12, 1 },
7371 	{ "CpcmdDvld", 11, 1 },
7372 	{ "CpcmdSave", 10, 1 },
7373 	{ "RxPstructsFull", 8, 2 },
7374 	{ "EpcmdDvld", 7, 1 },
7375 	{ "EpcmdFlush", 6, 1 },
7376 	{ "EpcmdTrimPrefix", 5, 1 },
7377 	{ "EpcmdTrimPostfix", 4, 1 },
7378 	{ "ERssIp4Pkt", 3, 1 },
7379 	{ "ERssIp6Pkt", 2, 1 },
7380 	{ "ERssTcpUdpPkt", 1, 1 },
7381 	{ "ERssFceFipPkt", 0, 1 },
7382 	{ NULL }
7383 };
7384 
7385 static void
7386 tp_la_show(struct sbuf *sb, uint64_t *p, int idx)
7387 {
7388 
7389 	field_desc_show(sb, *p, tp_la0);
7390 }
7391 
7392 static void
7393 tp_la_show2(struct sbuf *sb, uint64_t *p, int idx)
7394 {
7395 
7396 	if (idx)
7397 		sbuf_printf(sb, "\n");
7398 	field_desc_show(sb, p[0], tp_la0);
7399 	if (idx < (TPLA_SIZE / 2 - 1) || p[1] != ~0ULL)
7400 		field_desc_show(sb, p[1], tp_la0);
7401 }
7402 
7403 static void
7404 tp_la_show3(struct sbuf *sb, uint64_t *p, int idx)
7405 {
7406 
7407 	if (idx)
7408 		sbuf_printf(sb, "\n");
7409 	field_desc_show(sb, p[0], tp_la0);
7410 	if (idx < (TPLA_SIZE / 2 - 1) || p[1] != ~0ULL)
7411 		field_desc_show(sb, p[1], (p[0] & (1 << 17)) ? tp_la2 : tp_la1);
7412 }
7413 
7414 static int
7415 sysctl_tp_la(SYSCTL_HANDLER_ARGS)
7416 {
7417 	struct adapter *sc = arg1;
7418 	struct sbuf *sb;
7419 	uint64_t *buf, *p;
7420 	int rc;
7421 	u_int i, inc;
7422 	void (*show_func)(struct sbuf *, uint64_t *, int);
7423 
7424 	rc = sysctl_wire_old_buffer(req, 0);
7425 	if (rc != 0)
7426 		return (rc);
7427 
7428 	sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req);
7429 	if (sb == NULL)
7430 		return (ENOMEM);
7431 
7432 	buf = malloc(TPLA_SIZE * sizeof(uint64_t), M_CXGBE, M_ZERO | M_WAITOK);
7433 
7434 	t4_tp_read_la(sc, buf, NULL);
7435 	p = buf;
7436 
7437 	switch (G_DBGLAMODE(t4_read_reg(sc, A_TP_DBG_LA_CONFIG))) {
7438 	case 2:
7439 		inc = 2;
7440 		show_func = tp_la_show2;
7441 		break;
7442 	case 3:
7443 		inc = 2;
7444 		show_func = tp_la_show3;
7445 		break;
7446 	default:
7447 		inc = 1;
7448 		show_func = tp_la_show;
7449 	}
7450 
7451 	for (i = 0; i < TPLA_SIZE / inc; i++, p += inc)
7452 		(*show_func)(sb, p, i);
7453 
7454 	rc = sbuf_finish(sb);
7455 	sbuf_delete(sb);
7456 	free(buf, M_CXGBE);
7457 	return (rc);
7458 }
7459 
7460 static int
7461 sysctl_tx_rate(SYSCTL_HANDLER_ARGS)
7462 {
7463 	struct adapter *sc = arg1;
7464 	struct sbuf *sb;
7465 	int rc;
7466 	u64 nrate[NCHAN], orate[NCHAN];
7467 
7468 	rc = sysctl_wire_old_buffer(req, 0);
7469 	if (rc != 0)
7470 		return (rc);
7471 
7472 	sb = sbuf_new_for_sysctl(NULL, NULL, 256, req);
7473 	if (sb == NULL)
7474 		return (ENOMEM);
7475 
7476 	t4_get_chan_txrate(sc, nrate, orate);
7477 	sbuf_printf(sb, "              channel 0   channel 1   channel 2   "
7478 		 "channel 3\n");
7479 	sbuf_printf(sb, "NIC B/s:     %10ju  %10ju  %10ju  %10ju\n",
7480 	    nrate[0], nrate[1], nrate[2], nrate[3]);
7481 	sbuf_printf(sb, "Offload B/s: %10ju  %10ju  %10ju  %10ju",
7482 	    orate[0], orate[1], orate[2], orate[3]);
7483 
7484 	rc = sbuf_finish(sb);
7485 	sbuf_delete(sb);
7486 
7487 	return (rc);
7488 }
7489 
7490 static int
7491 sysctl_ulprx_la(SYSCTL_HANDLER_ARGS)
7492 {
7493 	struct adapter *sc = arg1;
7494 	struct sbuf *sb;
7495 	uint32_t *buf, *p;
7496 	int rc, i;
7497 
7498 	rc = sysctl_wire_old_buffer(req, 0);
7499 	if (rc != 0)
7500 		return (rc);
7501 
7502 	sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req);
7503 	if (sb == NULL)
7504 		return (ENOMEM);
7505 
7506 	buf = malloc(ULPRX_LA_SIZE * 8 * sizeof(uint32_t), M_CXGBE,
7507 	    M_ZERO | M_WAITOK);
7508 
7509 	t4_ulprx_read_la(sc, buf);
7510 	p = buf;
7511 
7512 	sbuf_printf(sb, "      Pcmd        Type   Message"
7513 	    "                Data");
7514 	for (i = 0; i < ULPRX_LA_SIZE; i++, p += 8) {
7515 		sbuf_printf(sb, "\n%08x%08x  %4x  %08x  %08x%08x%08x%08x",
7516 		    p[1], p[0], p[2], p[3], p[7], p[6], p[5], p[4]);
7517 	}
7518 
7519 	rc = sbuf_finish(sb);
7520 	sbuf_delete(sb);
7521 	free(buf, M_CXGBE);
7522 	return (rc);
7523 }
7524 
7525 static int
7526 sysctl_wcwr_stats(SYSCTL_HANDLER_ARGS)
7527 {
7528 	struct adapter *sc = arg1;
7529 	struct sbuf *sb;
7530 	int rc, v;
7531 
7532 	rc = sysctl_wire_old_buffer(req, 0);
7533 	if (rc != 0)
7534 		return (rc);
7535 
7536 	sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req);
7537 	if (sb == NULL)
7538 		return (ENOMEM);
7539 
7540 	v = t4_read_reg(sc, A_SGE_STAT_CFG);
7541 	if (G_STATSOURCE_T5(v) == 7) {
7542 		if (G_STATMODE(v) == 0) {
7543 			sbuf_printf(sb, "total %d, incomplete %d",
7544 			    t4_read_reg(sc, A_SGE_STAT_TOTAL),
7545 			    t4_read_reg(sc, A_SGE_STAT_MATCH));
7546 		} else if (G_STATMODE(v) == 1) {
7547 			sbuf_printf(sb, "total %d, data overflow %d",
7548 			    t4_read_reg(sc, A_SGE_STAT_TOTAL),
7549 			    t4_read_reg(sc, A_SGE_STAT_MATCH));
7550 		}
7551 	}
7552 	rc = sbuf_finish(sb);
7553 	sbuf_delete(sb);
7554 
7555 	return (rc);
7556 }
7557 #endif
7558 
7559 static uint32_t
7560 fconf_to_mode(uint32_t fconf)
7561 {
7562 	uint32_t mode;
7563 
7564 	mode = T4_FILTER_IPv4 | T4_FILTER_IPv6 | T4_FILTER_IP_SADDR |
7565 	    T4_FILTER_IP_DADDR | T4_FILTER_IP_SPORT | T4_FILTER_IP_DPORT;
7566 
7567 	if (fconf & F_FRAGMENTATION)
7568 		mode |= T4_FILTER_IP_FRAGMENT;
7569 
7570 	if (fconf & F_MPSHITTYPE)
7571 		mode |= T4_FILTER_MPS_HIT_TYPE;
7572 
7573 	if (fconf & F_MACMATCH)
7574 		mode |= T4_FILTER_MAC_IDX;
7575 
7576 	if (fconf & F_ETHERTYPE)
7577 		mode |= T4_FILTER_ETH_TYPE;
7578 
7579 	if (fconf & F_PROTOCOL)
7580 		mode |= T4_FILTER_IP_PROTO;
7581 
7582 	if (fconf & F_TOS)
7583 		mode |= T4_FILTER_IP_TOS;
7584 
7585 	if (fconf & F_VLAN)
7586 		mode |= T4_FILTER_VLAN;
7587 
7588 	if (fconf & F_VNIC_ID)
7589 		mode |= T4_FILTER_VNIC;
7590 
7591 	if (fconf & F_PORT)
7592 		mode |= T4_FILTER_PORT;
7593 
7594 	if (fconf & F_FCOE)
7595 		mode |= T4_FILTER_FCoE;
7596 
7597 	return (mode);
7598 }
7599 
7600 static uint32_t
7601 mode_to_fconf(uint32_t mode)
7602 {
7603 	uint32_t fconf = 0;
7604 
7605 	if (mode & T4_FILTER_IP_FRAGMENT)
7606 		fconf |= F_FRAGMENTATION;
7607 
7608 	if (mode & T4_FILTER_MPS_HIT_TYPE)
7609 		fconf |= F_MPSHITTYPE;
7610 
7611 	if (mode & T4_FILTER_MAC_IDX)
7612 		fconf |= F_MACMATCH;
7613 
7614 	if (mode & T4_FILTER_ETH_TYPE)
7615 		fconf |= F_ETHERTYPE;
7616 
7617 	if (mode & T4_FILTER_IP_PROTO)
7618 		fconf |= F_PROTOCOL;
7619 
7620 	if (mode & T4_FILTER_IP_TOS)
7621 		fconf |= F_TOS;
7622 
7623 	if (mode & T4_FILTER_VLAN)
7624 		fconf |= F_VLAN;
7625 
7626 	if (mode & T4_FILTER_VNIC)
7627 		fconf |= F_VNIC_ID;
7628 
7629 	if (mode & T4_FILTER_PORT)
7630 		fconf |= F_PORT;
7631 
7632 	if (mode & T4_FILTER_FCoE)
7633 		fconf |= F_FCOE;
7634 
7635 	return (fconf);
7636 }
7637 
7638 static uint32_t
7639 fspec_to_fconf(struct t4_filter_specification *fs)
7640 {
7641 	uint32_t fconf = 0;
7642 
7643 	if (fs->val.frag || fs->mask.frag)
7644 		fconf |= F_FRAGMENTATION;
7645 
7646 	if (fs->val.matchtype || fs->mask.matchtype)
7647 		fconf |= F_MPSHITTYPE;
7648 
7649 	if (fs->val.macidx || fs->mask.macidx)
7650 		fconf |= F_MACMATCH;
7651 
7652 	if (fs->val.ethtype || fs->mask.ethtype)
7653 		fconf |= F_ETHERTYPE;
7654 
7655 	if (fs->val.proto || fs->mask.proto)
7656 		fconf |= F_PROTOCOL;
7657 
7658 	if (fs->val.tos || fs->mask.tos)
7659 		fconf |= F_TOS;
7660 
7661 	if (fs->val.vlan_vld || fs->mask.vlan_vld)
7662 		fconf |= F_VLAN;
7663 
7664 	if (fs->val.vnic_vld || fs->mask.vnic_vld)
7665 		fconf |= F_VNIC_ID;
7666 
7667 	if (fs->val.iport || fs->mask.iport)
7668 		fconf |= F_PORT;
7669 
7670 	if (fs->val.fcoe || fs->mask.fcoe)
7671 		fconf |= F_FCOE;
7672 
7673 	return (fconf);
7674 }
7675 
7676 static int
7677 get_filter_mode(struct adapter *sc, uint32_t *mode)
7678 {
7679 	int rc;
7680 	uint32_t fconf;
7681 
7682 	rc = begin_synchronized_op(sc, NULL, HOLD_LOCK | SLEEP_OK | INTR_OK,
7683 	    "t4getfm");
7684 	if (rc)
7685 		return (rc);
7686 
7687 	t4_read_indirect(sc, A_TP_PIO_ADDR, A_TP_PIO_DATA, &fconf, 1,
7688 	    A_TP_VLAN_PRI_MAP);
7689 
7690 	if (sc->params.tp.vlan_pri_map != fconf) {
7691 		log(LOG_WARNING, "%s: cached filter mode out of sync %x %x.\n",
7692 		    device_get_nameunit(sc->dev), sc->params.tp.vlan_pri_map,
7693 		    fconf);
7694 	}
7695 
7696 	*mode = fconf_to_mode(fconf);
7697 
7698 	end_synchronized_op(sc, LOCK_HELD);
7699 	return (0);
7700 }
7701 
7702 static int
7703 set_filter_mode(struct adapter *sc, uint32_t mode)
7704 {
7705 	uint32_t fconf;
7706 	int rc;
7707 
7708 	fconf = mode_to_fconf(mode);
7709 
7710 	rc = begin_synchronized_op(sc, NULL, HOLD_LOCK | SLEEP_OK | INTR_OK,
7711 	    "t4setfm");
7712 	if (rc)
7713 		return (rc);
7714 
7715 	if (sc->tids.ftids_in_use > 0) {
7716 		rc = EBUSY;
7717 		goto done;
7718 	}
7719 
7720 #ifdef TCP_OFFLOAD
7721 	if (uld_active(sc, ULD_TOM)) {
7722 		rc = EBUSY;
7723 		goto done;
7724 	}
7725 #endif
7726 
7727 	rc = -t4_set_filter_mode(sc, fconf);
7728 done:
7729 	end_synchronized_op(sc, LOCK_HELD);
7730 	return (rc);
7731 }
7732 
7733 static inline uint64_t
7734 get_filter_hits(struct adapter *sc, uint32_t fid)
7735 {
7736 	uint32_t mw_base, off, tcb_base = t4_read_reg(sc, A_TP_CMM_TCB_BASE);
7737 	uint64_t hits;
7738 
7739 	memwin_info(sc, 0, &mw_base, NULL);
7740 	off = position_memwin(sc, 0,
7741 	    tcb_base + (fid + sc->tids.ftid_base) * TCB_SIZE);
7742 	if (is_t4(sc)) {
7743 		hits = t4_read_reg64(sc, mw_base + off + 16);
7744 		hits = be64toh(hits);
7745 	} else {
7746 		hits = t4_read_reg(sc, mw_base + off + 24);
7747 		hits = be32toh(hits);
7748 	}
7749 
7750 	return (hits);
7751 }
7752 
7753 static int
7754 get_filter(struct adapter *sc, struct t4_filter *t)
7755 {
7756 	int i, rc, nfilters = sc->tids.nftids;
7757 	struct filter_entry *f;
7758 
7759 	rc = begin_synchronized_op(sc, NULL, HOLD_LOCK | SLEEP_OK | INTR_OK,
7760 	    "t4getf");
7761 	if (rc)
7762 		return (rc);
7763 
7764 	if (sc->tids.ftids_in_use == 0 || sc->tids.ftid_tab == NULL ||
7765 	    t->idx >= nfilters) {
7766 		t->idx = 0xffffffff;
7767 		goto done;
7768 	}
7769 
7770 	f = &sc->tids.ftid_tab[t->idx];
7771 	for (i = t->idx; i < nfilters; i++, f++) {
7772 		if (f->valid) {
7773 			t->idx = i;
7774 			t->l2tidx = f->l2t ? f->l2t->idx : 0;
7775 			t->smtidx = f->smtidx;
7776 			if (f->fs.hitcnts)
7777 				t->hits = get_filter_hits(sc, t->idx);
7778 			else
7779 				t->hits = UINT64_MAX;
7780 			t->fs = f->fs;
7781 
7782 			goto done;
7783 		}
7784 	}
7785 
7786 	t->idx = 0xffffffff;
7787 done:
7788 	end_synchronized_op(sc, LOCK_HELD);
7789 	return (0);
7790 }
7791 
7792 static int
7793 set_filter(struct adapter *sc, struct t4_filter *t)
7794 {
7795 	unsigned int nfilters, nports;
7796 	struct filter_entry *f;
7797 	int i, rc;
7798 
7799 	rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4setf");
7800 	if (rc)
7801 		return (rc);
7802 
7803 	nfilters = sc->tids.nftids;
7804 	nports = sc->params.nports;
7805 
7806 	if (nfilters == 0) {
7807 		rc = ENOTSUP;
7808 		goto done;
7809 	}
7810 
7811 	if (!(sc->flags & FULL_INIT_DONE)) {
7812 		rc = EAGAIN;
7813 		goto done;
7814 	}
7815 
7816 	if (t->idx >= nfilters) {
7817 		rc = EINVAL;
7818 		goto done;
7819 	}
7820 
7821 	/* Validate against the global filter mode */
7822 	if ((sc->params.tp.vlan_pri_map | fspec_to_fconf(&t->fs)) !=
7823 	    sc->params.tp.vlan_pri_map) {
7824 		rc = E2BIG;
7825 		goto done;
7826 	}
7827 
7828 	if (t->fs.action == FILTER_SWITCH && t->fs.eport >= nports) {
7829 		rc = EINVAL;
7830 		goto done;
7831 	}
7832 
7833 	if (t->fs.val.iport >= nports) {
7834 		rc = EINVAL;
7835 		goto done;
7836 	}
7837 
7838 	/* Can't specify an iq if not steering to it */
7839 	if (!t->fs.dirsteer && t->fs.iq) {
7840 		rc = EINVAL;
7841 		goto done;
7842 	}
7843 
7844 	/* IPv6 filter idx must be 4 aligned */
7845 	if (t->fs.type == 1 &&
7846 	    ((t->idx & 0x3) || t->idx + 4 >= nfilters)) {
7847 		rc = EINVAL;
7848 		goto done;
7849 	}
7850 
7851 	if (sc->tids.ftid_tab == NULL) {
7852 		KASSERT(sc->tids.ftids_in_use == 0,
7853 		    ("%s: no memory allocated but filters_in_use > 0",
7854 		    __func__));
7855 
7856 		sc->tids.ftid_tab = malloc(sizeof (struct filter_entry) *
7857 		    nfilters, M_CXGBE, M_NOWAIT | M_ZERO);
7858 		if (sc->tids.ftid_tab == NULL) {
7859 			rc = ENOMEM;
7860 			goto done;
7861 		}
7862 		mtx_init(&sc->tids.ftid_lock, "T4 filters", 0, MTX_DEF);
7863 	}
7864 
7865 	for (i = 0; i < 4; i++) {
7866 		f = &sc->tids.ftid_tab[t->idx + i];
7867 
7868 		if (f->pending || f->valid) {
7869 			rc = EBUSY;
7870 			goto done;
7871 		}
7872 		if (f->locked) {
7873 			rc = EPERM;
7874 			goto done;
7875 		}
7876 
7877 		if (t->fs.type == 0)
7878 			break;
7879 	}
7880 
7881 	f = &sc->tids.ftid_tab[t->idx];
7882 	f->fs = t->fs;
7883 
7884 	rc = set_filter_wr(sc, t->idx);
7885 done:
7886 	end_synchronized_op(sc, 0);
7887 
7888 	if (rc == 0) {
7889 		mtx_lock(&sc->tids.ftid_lock);
7890 		for (;;) {
7891 			if (f->pending == 0) {
7892 				rc = f->valid ? 0 : EIO;
7893 				break;
7894 			}
7895 
7896 			if (mtx_sleep(&sc->tids.ftid_tab, &sc->tids.ftid_lock,
7897 			    PCATCH, "t4setfw", 0)) {
7898 				rc = EINPROGRESS;
7899 				break;
7900 			}
7901 		}
7902 		mtx_unlock(&sc->tids.ftid_lock);
7903 	}
7904 	return (rc);
7905 }
7906 
7907 static int
7908 del_filter(struct adapter *sc, struct t4_filter *t)
7909 {
7910 	unsigned int nfilters;
7911 	struct filter_entry *f;
7912 	int rc;
7913 
7914 	rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4delf");
7915 	if (rc)
7916 		return (rc);
7917 
7918 	nfilters = sc->tids.nftids;
7919 
7920 	if (nfilters == 0) {
7921 		rc = ENOTSUP;
7922 		goto done;
7923 	}
7924 
7925 	if (sc->tids.ftid_tab == NULL || sc->tids.ftids_in_use == 0 ||
7926 	    t->idx >= nfilters) {
7927 		rc = EINVAL;
7928 		goto done;
7929 	}
7930 
7931 	if (!(sc->flags & FULL_INIT_DONE)) {
7932 		rc = EAGAIN;
7933 		goto done;
7934 	}
7935 
7936 	f = &sc->tids.ftid_tab[t->idx];
7937 
7938 	if (f->pending) {
7939 		rc = EBUSY;
7940 		goto done;
7941 	}
7942 	if (f->locked) {
7943 		rc = EPERM;
7944 		goto done;
7945 	}
7946 
7947 	if (f->valid) {
7948 		t->fs = f->fs;	/* extra info for the caller */
7949 		rc = del_filter_wr(sc, t->idx);
7950 	}
7951 
7952 done:
7953 	end_synchronized_op(sc, 0);
7954 
7955 	if (rc == 0) {
7956 		mtx_lock(&sc->tids.ftid_lock);
7957 		for (;;) {
7958 			if (f->pending == 0) {
7959 				rc = f->valid ? EIO : 0;
7960 				break;
7961 			}
7962 
7963 			if (mtx_sleep(&sc->tids.ftid_tab, &sc->tids.ftid_lock,
7964 			    PCATCH, "t4delfw", 0)) {
7965 				rc = EINPROGRESS;
7966 				break;
7967 			}
7968 		}
7969 		mtx_unlock(&sc->tids.ftid_lock);
7970 	}
7971 
7972 	return (rc);
7973 }
7974 
7975 static void
7976 clear_filter(struct filter_entry *f)
7977 {
7978 	if (f->l2t)
7979 		t4_l2t_release(f->l2t);
7980 
7981 	bzero(f, sizeof (*f));
7982 }
7983 
7984 static int
7985 set_filter_wr(struct adapter *sc, int fidx)
7986 {
7987 	struct filter_entry *f = &sc->tids.ftid_tab[fidx];
7988 	struct fw_filter_wr *fwr;
7989 	unsigned int ftid;
7990 	struct wrq_cookie cookie;
7991 
7992 	ASSERT_SYNCHRONIZED_OP(sc);
7993 
7994 	if (f->fs.newdmac || f->fs.newvlan) {
7995 		/* This filter needs an L2T entry; allocate one. */
7996 		f->l2t = t4_l2t_alloc_switching(sc->l2t);
7997 		if (f->l2t == NULL)
7998 			return (EAGAIN);
7999 		if (t4_l2t_set_switching(sc, f->l2t, f->fs.vlan, f->fs.eport,
8000 		    f->fs.dmac)) {
8001 			t4_l2t_release(f->l2t);
8002 			f->l2t = NULL;
8003 			return (ENOMEM);
8004 		}
8005 	}
8006 
8007 	ftid = sc->tids.ftid_base + fidx;
8008 
8009 	fwr = start_wrq_wr(&sc->sge.mgmtq, howmany(sizeof(*fwr), 16), &cookie);
8010 	if (fwr == NULL)
8011 		return (ENOMEM);
8012 	bzero(fwr, sizeof(*fwr));
8013 
8014 	fwr->op_pkd = htobe32(V_FW_WR_OP(FW_FILTER_WR));
8015 	fwr->len16_pkd = htobe32(FW_LEN16(*fwr));
8016 	fwr->tid_to_iq =
8017 	    htobe32(V_FW_FILTER_WR_TID(ftid) |
8018 		V_FW_FILTER_WR_RQTYPE(f->fs.type) |
8019 		V_FW_FILTER_WR_NOREPLY(0) |
8020 		V_FW_FILTER_WR_IQ(f->fs.iq));
8021 	fwr->del_filter_to_l2tix =
8022 	    htobe32(V_FW_FILTER_WR_RPTTID(f->fs.rpttid) |
8023 		V_FW_FILTER_WR_DROP(f->fs.action == FILTER_DROP) |
8024 		V_FW_FILTER_WR_DIRSTEER(f->fs.dirsteer) |
8025 		V_FW_FILTER_WR_MASKHASH(f->fs.maskhash) |
8026 		V_FW_FILTER_WR_DIRSTEERHASH(f->fs.dirsteerhash) |
8027 		V_FW_FILTER_WR_LPBK(f->fs.action == FILTER_SWITCH) |
8028 		V_FW_FILTER_WR_DMAC(f->fs.newdmac) |
8029 		V_FW_FILTER_WR_SMAC(f->fs.newsmac) |
8030 		V_FW_FILTER_WR_INSVLAN(f->fs.newvlan == VLAN_INSERT ||
8031 		    f->fs.newvlan == VLAN_REWRITE) |
8032 		V_FW_FILTER_WR_RMVLAN(f->fs.newvlan == VLAN_REMOVE ||
8033 		    f->fs.newvlan == VLAN_REWRITE) |
8034 		V_FW_FILTER_WR_HITCNTS(f->fs.hitcnts) |
8035 		V_FW_FILTER_WR_TXCHAN(f->fs.eport) |
8036 		V_FW_FILTER_WR_PRIO(f->fs.prio) |
8037 		V_FW_FILTER_WR_L2TIX(f->l2t ? f->l2t->idx : 0));
8038 	fwr->ethtype = htobe16(f->fs.val.ethtype);
8039 	fwr->ethtypem = htobe16(f->fs.mask.ethtype);
8040 	fwr->frag_to_ovlan_vldm =
8041 	    (V_FW_FILTER_WR_FRAG(f->fs.val.frag) |
8042 		V_FW_FILTER_WR_FRAGM(f->fs.mask.frag) |
8043 		V_FW_FILTER_WR_IVLAN_VLD(f->fs.val.vlan_vld) |
8044 		V_FW_FILTER_WR_OVLAN_VLD(f->fs.val.vnic_vld) |
8045 		V_FW_FILTER_WR_IVLAN_VLDM(f->fs.mask.vlan_vld) |
8046 		V_FW_FILTER_WR_OVLAN_VLDM(f->fs.mask.vnic_vld));
8047 	fwr->smac_sel = 0;
8048 	fwr->rx_chan_rx_rpl_iq = htobe16(V_FW_FILTER_WR_RX_CHAN(0) |
8049 	    V_FW_FILTER_WR_RX_RPL_IQ(sc->sge.fwq.abs_id));
8050 	fwr->maci_to_matchtypem =
8051 	    htobe32(V_FW_FILTER_WR_MACI(f->fs.val.macidx) |
8052 		V_FW_FILTER_WR_MACIM(f->fs.mask.macidx) |
8053 		V_FW_FILTER_WR_FCOE(f->fs.val.fcoe) |
8054 		V_FW_FILTER_WR_FCOEM(f->fs.mask.fcoe) |
8055 		V_FW_FILTER_WR_PORT(f->fs.val.iport) |
8056 		V_FW_FILTER_WR_PORTM(f->fs.mask.iport) |
8057 		V_FW_FILTER_WR_MATCHTYPE(f->fs.val.matchtype) |
8058 		V_FW_FILTER_WR_MATCHTYPEM(f->fs.mask.matchtype));
8059 	fwr->ptcl = f->fs.val.proto;
8060 	fwr->ptclm = f->fs.mask.proto;
8061 	fwr->ttyp = f->fs.val.tos;
8062 	fwr->ttypm = f->fs.mask.tos;
8063 	fwr->ivlan = htobe16(f->fs.val.vlan);
8064 	fwr->ivlanm = htobe16(f->fs.mask.vlan);
8065 	fwr->ovlan = htobe16(f->fs.val.vnic);
8066 	fwr->ovlanm = htobe16(f->fs.mask.vnic);
8067 	bcopy(f->fs.val.dip, fwr->lip, sizeof (fwr->lip));
8068 	bcopy(f->fs.mask.dip, fwr->lipm, sizeof (fwr->lipm));
8069 	bcopy(f->fs.val.sip, fwr->fip, sizeof (fwr->fip));
8070 	bcopy(f->fs.mask.sip, fwr->fipm, sizeof (fwr->fipm));
8071 	fwr->lp = htobe16(f->fs.val.dport);
8072 	fwr->lpm = htobe16(f->fs.mask.dport);
8073 	fwr->fp = htobe16(f->fs.val.sport);
8074 	fwr->fpm = htobe16(f->fs.mask.sport);
8075 	if (f->fs.newsmac)
8076 		bcopy(f->fs.smac, fwr->sma, sizeof (fwr->sma));
8077 
8078 	f->pending = 1;
8079 	sc->tids.ftids_in_use++;
8080 
8081 	commit_wrq_wr(&sc->sge.mgmtq, fwr, &cookie);
8082 	return (0);
8083 }
8084 
8085 static int
8086 del_filter_wr(struct adapter *sc, int fidx)
8087 {
8088 	struct filter_entry *f = &sc->tids.ftid_tab[fidx];
8089 	struct fw_filter_wr *fwr;
8090 	unsigned int ftid;
8091 	struct wrq_cookie cookie;
8092 
8093 	ftid = sc->tids.ftid_base + fidx;
8094 
8095 	fwr = start_wrq_wr(&sc->sge.mgmtq, howmany(sizeof(*fwr), 16), &cookie);
8096 	if (fwr == NULL)
8097 		return (ENOMEM);
8098 	bzero(fwr, sizeof (*fwr));
8099 
8100 	t4_mk_filtdelwr(ftid, fwr, sc->sge.fwq.abs_id);
8101 
8102 	f->pending = 1;
8103 	commit_wrq_wr(&sc->sge.mgmtq, fwr, &cookie);
8104 	return (0);
8105 }
8106 
8107 int
8108 t4_filter_rpl(struct sge_iq *iq, const struct rss_header *rss, struct mbuf *m)
8109 {
8110 	struct adapter *sc = iq->adapter;
8111 	const struct cpl_set_tcb_rpl *rpl = (const void *)(rss + 1);
8112 	unsigned int idx = GET_TID(rpl);
8113 	unsigned int rc;
8114 	struct filter_entry *f;
8115 
8116 	KASSERT(m == NULL, ("%s: payload with opcode %02x", __func__,
8117 	    rss->opcode));
8118 
8119 	if (is_ftid(sc, idx)) {
8120 
8121 		idx -= sc->tids.ftid_base;
8122 		f = &sc->tids.ftid_tab[idx];
8123 		rc = G_COOKIE(rpl->cookie);
8124 
8125 		mtx_lock(&sc->tids.ftid_lock);
8126 		if (rc == FW_FILTER_WR_FLT_ADDED) {
8127 			KASSERT(f->pending, ("%s: filter[%u] isn't pending.",
8128 			    __func__, idx));
8129 			f->smtidx = (be64toh(rpl->oldval) >> 24) & 0xff;
8130 			f->pending = 0;  /* asynchronous setup completed */
8131 			f->valid = 1;
8132 		} else {
8133 			if (rc != FW_FILTER_WR_FLT_DELETED) {
8134 				/* Add or delete failed, display an error */
8135 				log(LOG_ERR,
8136 				    "filter %u setup failed with error %u\n",
8137 				    idx, rc);
8138 			}
8139 
8140 			clear_filter(f);
8141 			sc->tids.ftids_in_use--;
8142 		}
8143 		wakeup(&sc->tids.ftid_tab);
8144 		mtx_unlock(&sc->tids.ftid_lock);
8145 	}
8146 
8147 	return (0);
8148 }
8149 
8150 static int
8151 get_sge_context(struct adapter *sc, struct t4_sge_context *cntxt)
8152 {
8153 	int rc;
8154 
8155 	if (cntxt->cid > M_CTXTQID)
8156 		return (EINVAL);
8157 
8158 	if (cntxt->mem_id != CTXT_EGRESS && cntxt->mem_id != CTXT_INGRESS &&
8159 	    cntxt->mem_id != CTXT_FLM && cntxt->mem_id != CTXT_CNM)
8160 		return (EINVAL);
8161 
8162 	rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4ctxt");
8163 	if (rc)
8164 		return (rc);
8165 
8166 	if (sc->flags & FW_OK) {
8167 		rc = -t4_sge_ctxt_rd(sc, sc->mbox, cntxt->cid, cntxt->mem_id,
8168 		    &cntxt->data[0]);
8169 		if (rc == 0)
8170 			goto done;
8171 	}
8172 
8173 	/*
8174 	 * Read via firmware failed or wasn't even attempted.  Read directly via
8175 	 * the backdoor.
8176 	 */
8177 	rc = -t4_sge_ctxt_rd_bd(sc, cntxt->cid, cntxt->mem_id, &cntxt->data[0]);
8178 done:
8179 	end_synchronized_op(sc, 0);
8180 	return (rc);
8181 }
8182 
8183 static int
8184 load_fw(struct adapter *sc, struct t4_data *fw)
8185 {
8186 	int rc;
8187 	uint8_t *fw_data;
8188 
8189 	rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4ldfw");
8190 	if (rc)
8191 		return (rc);
8192 
8193 	if (sc->flags & FULL_INIT_DONE) {
8194 		rc = EBUSY;
8195 		goto done;
8196 	}
8197 
8198 	fw_data = malloc(fw->len, M_CXGBE, M_WAITOK);
8199 	if (fw_data == NULL) {
8200 		rc = ENOMEM;
8201 		goto done;
8202 	}
8203 
8204 	rc = copyin(fw->data, fw_data, fw->len);
8205 	if (rc == 0)
8206 		rc = -t4_load_fw(sc, fw_data, fw->len);
8207 
8208 	free(fw_data, M_CXGBE);
8209 done:
8210 	end_synchronized_op(sc, 0);
8211 	return (rc);
8212 }
8213 
8214 static int
8215 read_card_mem(struct adapter *sc, int win, struct t4_mem_range *mr)
8216 {
8217 	uint32_t addr, off, remaining, i, n;
8218 	uint32_t *buf, *b;
8219 	uint32_t mw_base, mw_aperture;
8220 	int rc;
8221 	uint8_t *dst;
8222 
8223 	rc = validate_mem_range(sc, mr->addr, mr->len);
8224 	if (rc != 0)
8225 		return (rc);
8226 
8227 	memwin_info(sc, win, &mw_base, &mw_aperture);
8228 	buf = b = malloc(min(mr->len, mw_aperture), M_CXGBE, M_WAITOK);
8229 	addr = mr->addr;
8230 	remaining = mr->len;
8231 	dst = (void *)mr->data;
8232 
8233 	while (remaining) {
8234 		off = position_memwin(sc, win, addr);
8235 
8236 		/* number of bytes that we'll copy in the inner loop */
8237 		n = min(remaining, mw_aperture - off);
8238 		for (i = 0; i < n; i += 4)
8239 			*b++ = t4_read_reg(sc, mw_base + off + i);
8240 
8241 		rc = copyout(buf, dst, n);
8242 		if (rc != 0)
8243 			break;
8244 
8245 		b = buf;
8246 		dst += n;
8247 		remaining -= n;
8248 		addr += n;
8249 	}
8250 
8251 	free(buf, M_CXGBE);
8252 	return (rc);
8253 }
8254 
8255 static int
8256 read_i2c(struct adapter *sc, struct t4_i2c_data *i2cd)
8257 {
8258 	int rc;
8259 
8260 	if (i2cd->len == 0 || i2cd->port_id >= sc->params.nports)
8261 		return (EINVAL);
8262 
8263 	if (i2cd->len > sizeof(i2cd->data))
8264 		return (EFBIG);
8265 
8266 	rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4i2crd");
8267 	if (rc)
8268 		return (rc);
8269 	rc = -t4_i2c_rd(sc, sc->mbox, i2cd->port_id, i2cd->dev_addr,
8270 	    i2cd->offset, i2cd->len, &i2cd->data[0]);
8271 	end_synchronized_op(sc, 0);
8272 
8273 	return (rc);
8274 }
8275 
8276 static int
8277 in_range(int val, int lo, int hi)
8278 {
8279 
8280 	return (val < 0 || (val <= hi && val >= lo));
8281 }
8282 
8283 static int
8284 set_sched_class(struct adapter *sc, struct t4_sched_params *p)
8285 {
8286 	int fw_subcmd, fw_type, rc;
8287 
8288 	rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4setsc");
8289 	if (rc)
8290 		return (rc);
8291 
8292 	if (!(sc->flags & FULL_INIT_DONE)) {
8293 		rc = EAGAIN;
8294 		goto done;
8295 	}
8296 
8297 	/*
8298 	 * Translate the cxgbetool parameters into T4 firmware parameters.  (The
8299 	 * sub-command and type are in common locations.)
8300 	 */
8301 	if (p->subcmd == SCHED_CLASS_SUBCMD_CONFIG)
8302 		fw_subcmd = FW_SCHED_SC_CONFIG;
8303 	else if (p->subcmd == SCHED_CLASS_SUBCMD_PARAMS)
8304 		fw_subcmd = FW_SCHED_SC_PARAMS;
8305 	else {
8306 		rc = EINVAL;
8307 		goto done;
8308 	}
8309 	if (p->type == SCHED_CLASS_TYPE_PACKET)
8310 		fw_type = FW_SCHED_TYPE_PKTSCHED;
8311 	else {
8312 		rc = EINVAL;
8313 		goto done;
8314 	}
8315 
8316 	if (fw_subcmd == FW_SCHED_SC_CONFIG) {
8317 		/* Vet our parameters ..*/
8318 		if (p->u.config.minmax < 0) {
8319 			rc = EINVAL;
8320 			goto done;
8321 		}
8322 
8323 		/* And pass the request to the firmware ...*/
8324 		rc = -t4_sched_config(sc, fw_type, p->u.config.minmax, 1);
8325 		goto done;
8326 	}
8327 
8328 	if (fw_subcmd == FW_SCHED_SC_PARAMS) {
8329 		int fw_level;
8330 		int fw_mode;
8331 		int fw_rateunit;
8332 		int fw_ratemode;
8333 
8334 		if (p->u.params.level == SCHED_CLASS_LEVEL_CL_RL)
8335 			fw_level = FW_SCHED_PARAMS_LEVEL_CL_RL;
8336 		else if (p->u.params.level == SCHED_CLASS_LEVEL_CL_WRR)
8337 			fw_level = FW_SCHED_PARAMS_LEVEL_CL_WRR;
8338 		else if (p->u.params.level == SCHED_CLASS_LEVEL_CH_RL)
8339 			fw_level = FW_SCHED_PARAMS_LEVEL_CH_RL;
8340 		else {
8341 			rc = EINVAL;
8342 			goto done;
8343 		}
8344 
8345 		if (p->u.params.mode == SCHED_CLASS_MODE_CLASS)
8346 			fw_mode = FW_SCHED_PARAMS_MODE_CLASS;
8347 		else if (p->u.params.mode == SCHED_CLASS_MODE_FLOW)
8348 			fw_mode = FW_SCHED_PARAMS_MODE_FLOW;
8349 		else {
8350 			rc = EINVAL;
8351 			goto done;
8352 		}
8353 
8354 		if (p->u.params.rateunit == SCHED_CLASS_RATEUNIT_BITS)
8355 			fw_rateunit = FW_SCHED_PARAMS_UNIT_BITRATE;
8356 		else if (p->u.params.rateunit == SCHED_CLASS_RATEUNIT_PKTS)
8357 			fw_rateunit = FW_SCHED_PARAMS_UNIT_PKTRATE;
8358 		else {
8359 			rc = EINVAL;
8360 			goto done;
8361 		}
8362 
8363 		if (p->u.params.ratemode == SCHED_CLASS_RATEMODE_REL)
8364 			fw_ratemode = FW_SCHED_PARAMS_RATE_REL;
8365 		else if (p->u.params.ratemode == SCHED_CLASS_RATEMODE_ABS)
8366 			fw_ratemode = FW_SCHED_PARAMS_RATE_ABS;
8367 		else {
8368 			rc = EINVAL;
8369 			goto done;
8370 		}
8371 
8372 		/* Vet our parameters ... */
8373 		if (!in_range(p->u.params.channel, 0, 3) ||
8374 		    !in_range(p->u.params.cl, 0, is_t4(sc) ? 15 : 16) ||
8375 		    !in_range(p->u.params.minrate, 0, 10000000) ||
8376 		    !in_range(p->u.params.maxrate, 0, 10000000) ||
8377 		    !in_range(p->u.params.weight, 0, 100)) {
8378 			rc = ERANGE;
8379 			goto done;
8380 		}
8381 
8382 		/*
8383 		 * Translate any unset parameters into the firmware's
8384 		 * nomenclature and/or fail the call if the parameters
8385 		 * are required ...
8386 		 */
8387 		if (p->u.params.rateunit < 0 || p->u.params.ratemode < 0 ||
8388 		    p->u.params.channel < 0 || p->u.params.cl < 0) {
8389 			rc = EINVAL;
8390 			goto done;
8391 		}
8392 		if (p->u.params.minrate < 0)
8393 			p->u.params.minrate = 0;
8394 		if (p->u.params.maxrate < 0) {
8395 			if (p->u.params.level == SCHED_CLASS_LEVEL_CL_RL ||
8396 			    p->u.params.level == SCHED_CLASS_LEVEL_CH_RL) {
8397 				rc = EINVAL;
8398 				goto done;
8399 			} else
8400 				p->u.params.maxrate = 0;
8401 		}
8402 		if (p->u.params.weight < 0) {
8403 			if (p->u.params.level == SCHED_CLASS_LEVEL_CL_WRR) {
8404 				rc = EINVAL;
8405 				goto done;
8406 			} else
8407 				p->u.params.weight = 0;
8408 		}
8409 		if (p->u.params.pktsize < 0) {
8410 			if (p->u.params.level == SCHED_CLASS_LEVEL_CL_RL ||
8411 			    p->u.params.level == SCHED_CLASS_LEVEL_CH_RL) {
8412 				rc = EINVAL;
8413 				goto done;
8414 			} else
8415 				p->u.params.pktsize = 0;
8416 		}
8417 
8418 		/* See what the firmware thinks of the request ... */
8419 		rc = -t4_sched_params(sc, fw_type, fw_level, fw_mode,
8420 		    fw_rateunit, fw_ratemode, p->u.params.channel,
8421 		    p->u.params.cl, p->u.params.minrate, p->u.params.maxrate,
8422 		    p->u.params.weight, p->u.params.pktsize, 1);
8423 		goto done;
8424 	}
8425 
8426 	rc = EINVAL;
8427 done:
8428 	end_synchronized_op(sc, 0);
8429 	return (rc);
8430 }
8431 
8432 static int
8433 set_sched_queue(struct adapter *sc, struct t4_sched_queue *p)
8434 {
8435 	struct port_info *pi = NULL;
8436 	struct vi_info *vi;
8437 	struct sge_txq *txq;
8438 	uint32_t fw_mnem, fw_queue, fw_class;
8439 	int i, rc;
8440 
8441 	rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4setsq");
8442 	if (rc)
8443 		return (rc);
8444 
8445 	if (!(sc->flags & FULL_INIT_DONE)) {
8446 		rc = EAGAIN;
8447 		goto done;
8448 	}
8449 
8450 	if (p->port >= sc->params.nports) {
8451 		rc = EINVAL;
8452 		goto done;
8453 	}
8454 
8455 	/* XXX: Only supported for the main VI. */
8456 	pi = sc->port[p->port];
8457 	vi = &pi->vi[0];
8458 	if (!in_range(p->queue, 0, vi->ntxq - 1) || !in_range(p->cl, 0, 7)) {
8459 		rc = EINVAL;
8460 		goto done;
8461 	}
8462 
8463 	/*
8464 	 * Create a template for the FW_PARAMS_CMD mnemonic and value (TX
8465 	 * Scheduling Class in this case).
8466 	 */
8467 	fw_mnem = (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DMAQ) |
8468 	    V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DMAQ_EQ_SCHEDCLASS_ETH));
8469 	fw_class = p->cl < 0 ? 0xffffffff : p->cl;
8470 
8471 	/*
8472 	 * If op.queue is non-negative, then we're only changing the scheduling
8473 	 * on a single specified TX queue.
8474 	 */
8475 	if (p->queue >= 0) {
8476 		txq = &sc->sge.txq[vi->first_txq + p->queue];
8477 		fw_queue = (fw_mnem | V_FW_PARAMS_PARAM_YZ(txq->eq.cntxt_id));
8478 		rc = -t4_set_params(sc, sc->mbox, sc->pf, 0, 1, &fw_queue,
8479 		    &fw_class);
8480 		goto done;
8481 	}
8482 
8483 	/*
8484 	 * Change the scheduling on all the TX queues for the
8485 	 * interface.
8486 	 */
8487 	for_each_txq(vi, i, txq) {
8488 		fw_queue = (fw_mnem | V_FW_PARAMS_PARAM_YZ(txq->eq.cntxt_id));
8489 		rc = -t4_set_params(sc, sc->mbox, sc->pf, 0, 1, &fw_queue,
8490 		    &fw_class);
8491 		if (rc)
8492 			goto done;
8493 	}
8494 
8495 	rc = 0;
8496 done:
8497 	end_synchronized_op(sc, 0);
8498 	return (rc);
8499 }
8500 
8501 int
8502 t4_os_find_pci_capability(struct adapter *sc, int cap)
8503 {
8504 	int i;
8505 
8506 	return (pci_find_cap(sc->dev, cap, &i) == 0 ? i : 0);
8507 }
8508 
8509 int
8510 t4_os_pci_save_state(struct adapter *sc)
8511 {
8512 	device_t dev;
8513 	struct pci_devinfo *dinfo;
8514 
8515 	dev = sc->dev;
8516 	dinfo = device_get_ivars(dev);
8517 
8518 	pci_cfg_save(dev, dinfo, 0);
8519 	return (0);
8520 }
8521 
8522 int
8523 t4_os_pci_restore_state(struct adapter *sc)
8524 {
8525 	device_t dev;
8526 	struct pci_devinfo *dinfo;
8527 
8528 	dev = sc->dev;
8529 	dinfo = device_get_ivars(dev);
8530 
8531 	pci_cfg_restore(dev, dinfo);
8532 	return (0);
8533 }
8534 
8535 void
8536 t4_os_portmod_changed(const struct adapter *sc, int idx)
8537 {
8538 	struct port_info *pi = sc->port[idx];
8539 	struct vi_info *vi;
8540 	struct ifnet *ifp;
8541 	int v;
8542 	static const char *mod_str[] = {
8543 		NULL, "LR", "SR", "ER", "TWINAX", "active TWINAX", "LRM"
8544 	};
8545 
8546 	for_each_vi(pi, v, vi) {
8547 		build_medialist(pi, &vi->media);
8548 	}
8549 
8550 	ifp = pi->vi[0].ifp;
8551 	if (pi->mod_type == FW_PORT_MOD_TYPE_NONE)
8552 		if_printf(ifp, "transceiver unplugged.\n");
8553 	else if (pi->mod_type == FW_PORT_MOD_TYPE_UNKNOWN)
8554 		if_printf(ifp, "unknown transceiver inserted.\n");
8555 	else if (pi->mod_type == FW_PORT_MOD_TYPE_NOTSUPPORTED)
8556 		if_printf(ifp, "unsupported transceiver inserted.\n");
8557 	else if (pi->mod_type > 0 && pi->mod_type < nitems(mod_str)) {
8558 		if_printf(ifp, "%s transceiver inserted.\n",
8559 		    mod_str[pi->mod_type]);
8560 	} else {
8561 		if_printf(ifp, "transceiver (type %d) inserted.\n",
8562 		    pi->mod_type);
8563 	}
8564 }
8565 
8566 void
8567 t4_os_link_changed(struct adapter *sc, int idx, int link_stat, int reason)
8568 {
8569 	struct port_info *pi = sc->port[idx];
8570 	struct vi_info *vi;
8571 	struct ifnet *ifp;
8572 	int v;
8573 
8574 	if (link_stat)
8575 		pi->linkdnrc = -1;
8576 	else {
8577 		if (reason >= 0)
8578 			pi->linkdnrc = reason;
8579 	}
8580 	for_each_vi(pi, v, vi) {
8581 		ifp = vi->ifp;
8582 		if (ifp == NULL)
8583 			continue;
8584 
8585 		if (link_stat) {
8586 			ifp->if_baudrate = IF_Mbps(pi->link_cfg.speed);
8587 			if_link_state_change(ifp, LINK_STATE_UP);
8588 		} else {
8589 			if_link_state_change(ifp, LINK_STATE_DOWN);
8590 		}
8591 	}
8592 }
8593 
8594 void
8595 t4_iterate(void (*func)(struct adapter *, void *), void *arg)
8596 {
8597 	struct adapter *sc;
8598 
8599 	sx_slock(&t4_list_lock);
8600 	SLIST_FOREACH(sc, &t4_list, link) {
8601 		/*
8602 		 * func should not make any assumptions about what state sc is
8603 		 * in - the only guarantee is that sc->sc_lock is a valid lock.
8604 		 */
8605 		func(sc, arg);
8606 	}
8607 	sx_sunlock(&t4_list_lock);
8608 }
8609 
8610 static int
8611 t4_open(struct cdev *dev, int flags, int type, struct thread *td)
8612 {
8613        return (0);
8614 }
8615 
8616 static int
8617 t4_close(struct cdev *dev, int flags, int type, struct thread *td)
8618 {
8619        return (0);
8620 }
8621 
8622 static int
8623 t4_ioctl(struct cdev *dev, unsigned long cmd, caddr_t data, int fflag,
8624     struct thread *td)
8625 {
8626 	int rc;
8627 	struct adapter *sc = dev->si_drv1;
8628 
8629 	rc = priv_check(td, PRIV_DRIVER);
8630 	if (rc != 0)
8631 		return (rc);
8632 
8633 	switch (cmd) {
8634 	case CHELSIO_T4_GETREG: {
8635 		struct t4_reg *edata = (struct t4_reg *)data;
8636 
8637 		if ((edata->addr & 0x3) != 0 || edata->addr >= sc->mmio_len)
8638 			return (EFAULT);
8639 
8640 		if (edata->size == 4)
8641 			edata->val = t4_read_reg(sc, edata->addr);
8642 		else if (edata->size == 8)
8643 			edata->val = t4_read_reg64(sc, edata->addr);
8644 		else
8645 			return (EINVAL);
8646 
8647 		break;
8648 	}
8649 	case CHELSIO_T4_SETREG: {
8650 		struct t4_reg *edata = (struct t4_reg *)data;
8651 
8652 		if ((edata->addr & 0x3) != 0 || edata->addr >= sc->mmio_len)
8653 			return (EFAULT);
8654 
8655 		if (edata->size == 4) {
8656 			if (edata->val & 0xffffffff00000000)
8657 				return (EINVAL);
8658 			t4_write_reg(sc, edata->addr, (uint32_t) edata->val);
8659 		} else if (edata->size == 8)
8660 			t4_write_reg64(sc, edata->addr, edata->val);
8661 		else
8662 			return (EINVAL);
8663 		break;
8664 	}
8665 	case CHELSIO_T4_REGDUMP: {
8666 		struct t4_regdump *regs = (struct t4_regdump *)data;
8667 		int reglen = is_t4(sc) ? T4_REGDUMP_SIZE : T5_REGDUMP_SIZE;
8668 		uint8_t *buf;
8669 
8670 		if (regs->len < reglen) {
8671 			regs->len = reglen; /* hint to the caller */
8672 			return (ENOBUFS);
8673 		}
8674 
8675 		regs->len = reglen;
8676 		buf = malloc(reglen, M_CXGBE, M_WAITOK | M_ZERO);
8677 		t4_get_regs(sc, regs, buf);
8678 		rc = copyout(buf, regs->data, reglen);
8679 		free(buf, M_CXGBE);
8680 		break;
8681 	}
8682 	case CHELSIO_T4_GET_FILTER_MODE:
8683 		rc = get_filter_mode(sc, (uint32_t *)data);
8684 		break;
8685 	case CHELSIO_T4_SET_FILTER_MODE:
8686 		rc = set_filter_mode(sc, *(uint32_t *)data);
8687 		break;
8688 	case CHELSIO_T4_GET_FILTER:
8689 		rc = get_filter(sc, (struct t4_filter *)data);
8690 		break;
8691 	case CHELSIO_T4_SET_FILTER:
8692 		rc = set_filter(sc, (struct t4_filter *)data);
8693 		break;
8694 	case CHELSIO_T4_DEL_FILTER:
8695 		rc = del_filter(sc, (struct t4_filter *)data);
8696 		break;
8697 	case CHELSIO_T4_GET_SGE_CONTEXT:
8698 		rc = get_sge_context(sc, (struct t4_sge_context *)data);
8699 		break;
8700 	case CHELSIO_T4_LOAD_FW:
8701 		rc = load_fw(sc, (struct t4_data *)data);
8702 		break;
8703 	case CHELSIO_T4_GET_MEM:
8704 		rc = read_card_mem(sc, 2, (struct t4_mem_range *)data);
8705 		break;
8706 	case CHELSIO_T4_GET_I2C:
8707 		rc = read_i2c(sc, (struct t4_i2c_data *)data);
8708 		break;
8709 	case CHELSIO_T4_CLEAR_STATS: {
8710 		int i, v;
8711 		u_int port_id = *(uint32_t *)data;
8712 		struct port_info *pi;
8713 		struct vi_info *vi;
8714 
8715 		if (port_id >= sc->params.nports)
8716 			return (EINVAL);
8717 		pi = sc->port[port_id];
8718 
8719 		/* MAC stats */
8720 		t4_clr_port_stats(sc, pi->tx_chan);
8721 		pi->tx_parse_error = 0;
8722 		mtx_lock(&sc->regwin_lock);
8723 		for_each_vi(pi, v, vi) {
8724 			if (vi->flags & VI_INIT_DONE)
8725 				t4_clr_vi_stats(sc, vi->viid);
8726 		}
8727 		mtx_unlock(&sc->regwin_lock);
8728 
8729 		/*
8730 		 * Since this command accepts a port, clear stats for
8731 		 * all VIs on this port.
8732 		 */
8733 		for_each_vi(pi, v, vi) {
8734 			if (vi->flags & VI_INIT_DONE) {
8735 				struct sge_rxq *rxq;
8736 				struct sge_txq *txq;
8737 				struct sge_wrq *wrq;
8738 
8739 				if (vi->flags & VI_NETMAP)
8740 					continue;
8741 
8742 				for_each_rxq(vi, i, rxq) {
8743 #if defined(INET) || defined(INET6)
8744 					rxq->lro.lro_queued = 0;
8745 					rxq->lro.lro_flushed = 0;
8746 #endif
8747 					rxq->rxcsum = 0;
8748 					rxq->vlan_extraction = 0;
8749 				}
8750 
8751 				for_each_txq(vi, i, txq) {
8752 					txq->txcsum = 0;
8753 					txq->tso_wrs = 0;
8754 					txq->vlan_insertion = 0;
8755 					txq->imm_wrs = 0;
8756 					txq->sgl_wrs = 0;
8757 					txq->txpkt_wrs = 0;
8758 					txq->txpkts0_wrs = 0;
8759 					txq->txpkts1_wrs = 0;
8760 					txq->txpkts0_pkts = 0;
8761 					txq->txpkts1_pkts = 0;
8762 					mp_ring_reset_stats(txq->r);
8763 				}
8764 
8765 #ifdef TCP_OFFLOAD
8766 				/* nothing to clear for each ofld_rxq */
8767 
8768 				for_each_ofld_txq(vi, i, wrq) {
8769 					wrq->tx_wrs_direct = 0;
8770 					wrq->tx_wrs_copied = 0;
8771 				}
8772 #endif
8773 
8774 				if (IS_MAIN_VI(vi)) {
8775 					wrq = &sc->sge.ctrlq[pi->port_id];
8776 					wrq->tx_wrs_direct = 0;
8777 					wrq->tx_wrs_copied = 0;
8778 				}
8779 			}
8780 		}
8781 		break;
8782 	}
8783 	case CHELSIO_T4_SCHED_CLASS:
8784 		rc = set_sched_class(sc, (struct t4_sched_params *)data);
8785 		break;
8786 	case CHELSIO_T4_SCHED_QUEUE:
8787 		rc = set_sched_queue(sc, (struct t4_sched_queue *)data);
8788 		break;
8789 	case CHELSIO_T4_GET_TRACER:
8790 		rc = t4_get_tracer(sc, (struct t4_tracer *)data);
8791 		break;
8792 	case CHELSIO_T4_SET_TRACER:
8793 		rc = t4_set_tracer(sc, (struct t4_tracer *)data);
8794 		break;
8795 	default:
8796 		rc = EINVAL;
8797 	}
8798 
8799 	return (rc);
8800 }
8801 
8802 #ifdef TCP_OFFLOAD
8803 void
8804 t4_iscsi_init(struct ifnet *ifp, unsigned int tag_mask,
8805     const unsigned int *pgsz_order)
8806 {
8807 	struct vi_info *vi = ifp->if_softc;
8808 	struct adapter *sc = vi->pi->adapter;
8809 
8810 	t4_write_reg(sc, A_ULP_RX_ISCSI_TAGMASK, tag_mask);
8811 	t4_write_reg(sc, A_ULP_RX_ISCSI_PSZ, V_HPZ0(pgsz_order[0]) |
8812 		V_HPZ1(pgsz_order[1]) | V_HPZ2(pgsz_order[2]) |
8813 		V_HPZ3(pgsz_order[3]));
8814 }
8815 
8816 static int
8817 toe_capability(struct vi_info *vi, int enable)
8818 {
8819 	int rc;
8820 	struct port_info *pi = vi->pi;
8821 	struct adapter *sc = pi->adapter;
8822 
8823 	ASSERT_SYNCHRONIZED_OP(sc);
8824 
8825 	if (!is_offload(sc))
8826 		return (ENODEV);
8827 
8828 	if (enable) {
8829 		if ((vi->ifp->if_capenable & IFCAP_TOE) != 0) {
8830 			/* TOE is already enabled. */
8831 			return (0);
8832 		}
8833 
8834 		/*
8835 		 * We need the port's queues around so that we're able to send
8836 		 * and receive CPLs to/from the TOE even if the ifnet for this
8837 		 * port has never been UP'd administratively.
8838 		 */
8839 		if (!(vi->flags & VI_INIT_DONE)) {
8840 			rc = cxgbe_init_synchronized(vi);
8841 			if (rc)
8842 				return (rc);
8843 		}
8844 		if (!(pi->vi[0].flags & VI_INIT_DONE)) {
8845 			rc = cxgbe_init_synchronized(&pi->vi[0]);
8846 			if (rc)
8847 				return (rc);
8848 		}
8849 
8850 		if (isset(&sc->offload_map, pi->port_id)) {
8851 			/* TOE is enabled on another VI of this port. */
8852 			pi->uld_vis++;
8853 			return (0);
8854 		}
8855 
8856 		if (!uld_active(sc, ULD_TOM)) {
8857 			rc = t4_activate_uld(sc, ULD_TOM);
8858 			if (rc == EAGAIN) {
8859 				log(LOG_WARNING,
8860 				    "You must kldload t4_tom.ko before trying "
8861 				    "to enable TOE on a cxgbe interface.\n");
8862 			}
8863 			if (rc != 0)
8864 				return (rc);
8865 			KASSERT(sc->tom_softc != NULL,
8866 			    ("%s: TOM activated but softc NULL", __func__));
8867 			KASSERT(uld_active(sc, ULD_TOM),
8868 			    ("%s: TOM activated but flag not set", __func__));
8869 		}
8870 
8871 		/* Activate iWARP and iSCSI too, if the modules are loaded. */
8872 		if (!uld_active(sc, ULD_IWARP))
8873 			(void) t4_activate_uld(sc, ULD_IWARP);
8874 		if (!uld_active(sc, ULD_ISCSI))
8875 			(void) t4_activate_uld(sc, ULD_ISCSI);
8876 
8877 		pi->uld_vis++;
8878 		setbit(&sc->offload_map, pi->port_id);
8879 	} else {
8880 		pi->uld_vis--;
8881 
8882 		if (!isset(&sc->offload_map, pi->port_id) || pi->uld_vis > 0)
8883 			return (0);
8884 
8885 		KASSERT(uld_active(sc, ULD_TOM),
8886 		    ("%s: TOM never initialized?", __func__));
8887 		clrbit(&sc->offload_map, pi->port_id);
8888 	}
8889 
8890 	return (0);
8891 }
8892 
8893 /*
8894  * Add an upper layer driver to the global list.
8895  */
8896 int
8897 t4_register_uld(struct uld_info *ui)
8898 {
8899 	int rc = 0;
8900 	struct uld_info *u;
8901 
8902 	sx_xlock(&t4_uld_list_lock);
8903 	SLIST_FOREACH(u, &t4_uld_list, link) {
8904 	    if (u->uld_id == ui->uld_id) {
8905 		    rc = EEXIST;
8906 		    goto done;
8907 	    }
8908 	}
8909 
8910 	SLIST_INSERT_HEAD(&t4_uld_list, ui, link);
8911 	ui->refcount = 0;
8912 done:
8913 	sx_xunlock(&t4_uld_list_lock);
8914 	return (rc);
8915 }
8916 
8917 int
8918 t4_unregister_uld(struct uld_info *ui)
8919 {
8920 	int rc = EINVAL;
8921 	struct uld_info *u;
8922 
8923 	sx_xlock(&t4_uld_list_lock);
8924 
8925 	SLIST_FOREACH(u, &t4_uld_list, link) {
8926 	    if (u == ui) {
8927 		    if (ui->refcount > 0) {
8928 			    rc = EBUSY;
8929 			    goto done;
8930 		    }
8931 
8932 		    SLIST_REMOVE(&t4_uld_list, ui, uld_info, link);
8933 		    rc = 0;
8934 		    goto done;
8935 	    }
8936 	}
8937 done:
8938 	sx_xunlock(&t4_uld_list_lock);
8939 	return (rc);
8940 }
8941 
8942 int
8943 t4_activate_uld(struct adapter *sc, int id)
8944 {
8945 	int rc;
8946 	struct uld_info *ui;
8947 
8948 	ASSERT_SYNCHRONIZED_OP(sc);
8949 
8950 	if (id < 0 || id > ULD_MAX)
8951 		return (EINVAL);
8952 	rc = EAGAIN;	/* kldoad the module with this ULD and try again. */
8953 
8954 	sx_slock(&t4_uld_list_lock);
8955 
8956 	SLIST_FOREACH(ui, &t4_uld_list, link) {
8957 		if (ui->uld_id == id) {
8958 			if (!(sc->flags & FULL_INIT_DONE)) {
8959 				rc = adapter_full_init(sc);
8960 				if (rc != 0)
8961 					break;
8962 			}
8963 
8964 			rc = ui->activate(sc);
8965 			if (rc == 0) {
8966 				setbit(&sc->active_ulds, id);
8967 				ui->refcount++;
8968 			}
8969 			break;
8970 		}
8971 	}
8972 
8973 	sx_sunlock(&t4_uld_list_lock);
8974 
8975 	return (rc);
8976 }
8977 
8978 int
8979 t4_deactivate_uld(struct adapter *sc, int id)
8980 {
8981 	int rc;
8982 	struct uld_info *ui;
8983 
8984 	ASSERT_SYNCHRONIZED_OP(sc);
8985 
8986 	if (id < 0 || id > ULD_MAX)
8987 		return (EINVAL);
8988 	rc = ENXIO;
8989 
8990 	sx_slock(&t4_uld_list_lock);
8991 
8992 	SLIST_FOREACH(ui, &t4_uld_list, link) {
8993 		if (ui->uld_id == id) {
8994 			rc = ui->deactivate(sc);
8995 			if (rc == 0) {
8996 				clrbit(&sc->active_ulds, id);
8997 				ui->refcount--;
8998 			}
8999 			break;
9000 		}
9001 	}
9002 
9003 	sx_sunlock(&t4_uld_list_lock);
9004 
9005 	return (rc);
9006 }
9007 
9008 int
9009 uld_active(struct adapter *sc, int uld_id)
9010 {
9011 
9012 	MPASS(uld_id >= 0 && uld_id <= ULD_MAX);
9013 
9014 	return (isset(&sc->active_ulds, uld_id));
9015 }
9016 #endif
9017 
9018 /*
9019  * Come up with reasonable defaults for some of the tunables, provided they're
9020  * not set by the user (in which case we'll use the values as is).
9021  */
9022 static void
9023 tweak_tunables(void)
9024 {
9025 	int nc = mp_ncpus;	/* our snapshot of the number of CPUs */
9026 
9027 	if (t4_ntxq10g < 1) {
9028 #ifdef RSS
9029 		t4_ntxq10g = rss_getnumbuckets();
9030 #else
9031 		t4_ntxq10g = min(nc, NTXQ_10G);
9032 #endif
9033 	}
9034 
9035 	if (t4_ntxq1g < 1) {
9036 #ifdef RSS
9037 		/* XXX: way too many for 1GbE? */
9038 		t4_ntxq1g = rss_getnumbuckets();
9039 #else
9040 		t4_ntxq1g = min(nc, NTXQ_1G);
9041 #endif
9042 	}
9043 
9044 	if (t4_nrxq10g < 1) {
9045 #ifdef RSS
9046 		t4_nrxq10g = rss_getnumbuckets();
9047 #else
9048 		t4_nrxq10g = min(nc, NRXQ_10G);
9049 #endif
9050 	}
9051 
9052 	if (t4_nrxq1g < 1) {
9053 #ifdef RSS
9054 		/* XXX: way too many for 1GbE? */
9055 		t4_nrxq1g = rss_getnumbuckets();
9056 #else
9057 		t4_nrxq1g = min(nc, NRXQ_1G);
9058 #endif
9059 	}
9060 
9061 #ifdef TCP_OFFLOAD
9062 	if (t4_nofldtxq10g < 1)
9063 		t4_nofldtxq10g = min(nc, NOFLDTXQ_10G);
9064 
9065 	if (t4_nofldtxq1g < 1)
9066 		t4_nofldtxq1g = min(nc, NOFLDTXQ_1G);
9067 
9068 	if (t4_nofldrxq10g < 1)
9069 		t4_nofldrxq10g = min(nc, NOFLDRXQ_10G);
9070 
9071 	if (t4_nofldrxq1g < 1)
9072 		t4_nofldrxq1g = min(nc, NOFLDRXQ_1G);
9073 
9074 	if (t4_toecaps_allowed == -1)
9075 		t4_toecaps_allowed = FW_CAPS_CONFIG_TOE;
9076 #else
9077 	if (t4_toecaps_allowed == -1)
9078 		t4_toecaps_allowed = 0;
9079 #endif
9080 
9081 #ifdef DEV_NETMAP
9082 	if (t4_nnmtxq10g < 1)
9083 		t4_nnmtxq10g = min(nc, NNMTXQ_10G);
9084 
9085 	if (t4_nnmtxq1g < 1)
9086 		t4_nnmtxq1g = min(nc, NNMTXQ_1G);
9087 
9088 	if (t4_nnmrxq10g < 1)
9089 		t4_nnmrxq10g = min(nc, NNMRXQ_10G);
9090 
9091 	if (t4_nnmrxq1g < 1)
9092 		t4_nnmrxq1g = min(nc, NNMRXQ_1G);
9093 #endif
9094 
9095 	if (t4_tmr_idx_10g < 0 || t4_tmr_idx_10g >= SGE_NTIMERS)
9096 		t4_tmr_idx_10g = TMR_IDX_10G;
9097 
9098 	if (t4_pktc_idx_10g < -1 || t4_pktc_idx_10g >= SGE_NCOUNTERS)
9099 		t4_pktc_idx_10g = PKTC_IDX_10G;
9100 
9101 	if (t4_tmr_idx_1g < 0 || t4_tmr_idx_1g >= SGE_NTIMERS)
9102 		t4_tmr_idx_1g = TMR_IDX_1G;
9103 
9104 	if (t4_pktc_idx_1g < -1 || t4_pktc_idx_1g >= SGE_NCOUNTERS)
9105 		t4_pktc_idx_1g = PKTC_IDX_1G;
9106 
9107 	if (t4_qsize_txq < 128)
9108 		t4_qsize_txq = 128;
9109 
9110 	if (t4_qsize_rxq < 128)
9111 		t4_qsize_rxq = 128;
9112 	while (t4_qsize_rxq & 7)
9113 		t4_qsize_rxq++;
9114 
9115 	t4_intr_types &= INTR_MSIX | INTR_MSI | INTR_INTX;
9116 }
9117 
9118 static struct sx mlu;	/* mod load unload */
9119 SX_SYSINIT(cxgbe_mlu, &mlu, "cxgbe mod load/unload");
9120 
9121 static int
9122 mod_event(module_t mod, int cmd, void *arg)
9123 {
9124 	int rc = 0;
9125 	static int loaded = 0;
9126 
9127 	switch (cmd) {
9128 	case MOD_LOAD:
9129 		sx_xlock(&mlu);
9130 		if (loaded++ == 0) {
9131 			t4_sge_modload();
9132 			sx_init(&t4_list_lock, "T4/T5 adapters");
9133 			SLIST_INIT(&t4_list);
9134 #ifdef TCP_OFFLOAD
9135 			sx_init(&t4_uld_list_lock, "T4/T5 ULDs");
9136 			SLIST_INIT(&t4_uld_list);
9137 #endif
9138 			t4_tracer_modload();
9139 			tweak_tunables();
9140 		}
9141 		sx_xunlock(&mlu);
9142 		break;
9143 
9144 	case MOD_UNLOAD:
9145 		sx_xlock(&mlu);
9146 		if (--loaded == 0) {
9147 			int tries;
9148 
9149 			sx_slock(&t4_list_lock);
9150 			if (!SLIST_EMPTY(&t4_list)) {
9151 				rc = EBUSY;
9152 				sx_sunlock(&t4_list_lock);
9153 				goto done_unload;
9154 			}
9155 #ifdef TCP_OFFLOAD
9156 			sx_slock(&t4_uld_list_lock);
9157 			if (!SLIST_EMPTY(&t4_uld_list)) {
9158 				rc = EBUSY;
9159 				sx_sunlock(&t4_uld_list_lock);
9160 				sx_sunlock(&t4_list_lock);
9161 				goto done_unload;
9162 			}
9163 #endif
9164 			tries = 0;
9165 			while (tries++ < 5 && t4_sge_extfree_refs() != 0) {
9166 				uprintf("%ju clusters with custom free routine "
9167 				    "still is use.\n", t4_sge_extfree_refs());
9168 				pause("t4unload", 2 * hz);
9169 			}
9170 #ifdef TCP_OFFLOAD
9171 			sx_sunlock(&t4_uld_list_lock);
9172 #endif
9173 			sx_sunlock(&t4_list_lock);
9174 
9175 			if (t4_sge_extfree_refs() == 0) {
9176 				t4_tracer_modunload();
9177 #ifdef TCP_OFFLOAD
9178 				sx_destroy(&t4_uld_list_lock);
9179 #endif
9180 				sx_destroy(&t4_list_lock);
9181 				t4_sge_modunload();
9182 				loaded = 0;
9183 			} else {
9184 				rc = EBUSY;
9185 				loaded++;	/* undo earlier decrement */
9186 			}
9187 		}
9188 done_unload:
9189 		sx_xunlock(&mlu);
9190 		break;
9191 	}
9192 
9193 	return (rc);
9194 }
9195 
9196 static devclass_t t4_devclass, t5_devclass;
9197 static devclass_t cxgbe_devclass, cxl_devclass;
9198 static devclass_t vcxgbe_devclass, vcxl_devclass;
9199 
9200 DRIVER_MODULE(t4nex, pci, t4_driver, t4_devclass, mod_event, 0);
9201 MODULE_VERSION(t4nex, 1);
9202 MODULE_DEPEND(t4nex, firmware, 1, 1, 1);
9203 #ifdef DEV_NETMAP
9204 MODULE_DEPEND(t4nex, netmap, 1, 1, 1);
9205 #endif /* DEV_NETMAP */
9206 
9207 
9208 DRIVER_MODULE(t5nex, pci, t5_driver, t5_devclass, mod_event, 0);
9209 MODULE_VERSION(t5nex, 1);
9210 MODULE_DEPEND(t5nex, firmware, 1, 1, 1);
9211 #ifdef DEV_NETMAP
9212 MODULE_DEPEND(t5nex, netmap, 1, 1, 1);
9213 #endif /* DEV_NETMAP */
9214 
9215 DRIVER_MODULE(cxgbe, t4nex, cxgbe_driver, cxgbe_devclass, 0, 0);
9216 MODULE_VERSION(cxgbe, 1);
9217 
9218 DRIVER_MODULE(cxl, t5nex, cxl_driver, cxl_devclass, 0, 0);
9219 MODULE_VERSION(cxl, 1);
9220 
9221 DRIVER_MODULE(vcxgbe, cxgbe, vcxgbe_driver, vcxgbe_devclass, 0, 0);
9222 MODULE_VERSION(vcxgbe, 1);
9223 
9224 DRIVER_MODULE(vcxl, cxl, vcxl_driver, vcxl_devclass, 0, 0);
9225 MODULE_VERSION(vcxl, 1);
9226