xref: /freebsd/sys/dev/cxgbe/t4_main.c (revision bb15ca603fa442c72dde3f3cb8b46db6970e3950)
1 /*-
2  * Copyright (c) 2011 Chelsio Communications, Inc.
3  * All rights reserved.
4  * Written by: Navdeep Parhar <np@FreeBSD.org>
5  *
6  * Redistribution and use in source and binary forms, with or without
7  * modification, are permitted provided that the following conditions
8  * are met:
9  * 1. Redistributions of source code must retain the above copyright
10  *    notice, this list of conditions and the following disclaimer.
11  * 2. Redistributions in binary form must reproduce the above copyright
12  *    notice, this list of conditions and the following disclaimer in the
13  *    documentation and/or other materials provided with the distribution.
14  *
15  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
16  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
25  * SUCH DAMAGE.
26  */
27 
28 #include <sys/cdefs.h>
29 __FBSDID("$FreeBSD$");
30 
31 #include "opt_inet.h"
32 
33 #include <sys/param.h>
34 #include <sys/conf.h>
35 #include <sys/priv.h>
36 #include <sys/kernel.h>
37 #include <sys/bus.h>
38 #include <sys/module.h>
39 #include <sys/malloc.h>
40 #include <sys/queue.h>
41 #include <sys/taskqueue.h>
42 #include <sys/pciio.h>
43 #include <dev/pci/pcireg.h>
44 #include <dev/pci/pcivar.h>
45 #include <dev/pci/pci_private.h>
46 #include <sys/firmware.h>
47 #include <sys/sbuf.h>
48 #include <sys/smp.h>
49 #include <sys/socket.h>
50 #include <sys/sockio.h>
51 #include <sys/sysctl.h>
52 #include <net/ethernet.h>
53 #include <net/if.h>
54 #include <net/if_types.h>
55 #include <net/if_dl.h>
56 #include <net/if_vlan_var.h>
57 
58 #include "common/t4_hw.h"
59 #include "common/common.h"
60 #include "common/t4_msg.h"
61 #include "common/t4_regs.h"
62 #include "common/t4_regs_values.h"
63 #include "common/t4fw_interface.h"
64 #include "t4_ioctl.h"
65 #include "t4_l2t.h"
66 
67 /* T4 bus driver interface */
68 static int t4_probe(device_t);
69 static int t4_attach(device_t);
70 static int t4_detach(device_t);
71 static device_method_t t4_methods[] = {
72 	DEVMETHOD(device_probe,		t4_probe),
73 	DEVMETHOD(device_attach,	t4_attach),
74 	DEVMETHOD(device_detach,	t4_detach),
75 
76 	DEVMETHOD_END
77 };
78 static driver_t t4_driver = {
79 	"t4nex",
80 	t4_methods,
81 	sizeof(struct adapter)
82 };
83 
84 
85 /* T4 port (cxgbe) interface */
86 static int cxgbe_probe(device_t);
87 static int cxgbe_attach(device_t);
88 static int cxgbe_detach(device_t);
89 static device_method_t cxgbe_methods[] = {
90 	DEVMETHOD(device_probe,		cxgbe_probe),
91 	DEVMETHOD(device_attach,	cxgbe_attach),
92 	DEVMETHOD(device_detach,	cxgbe_detach),
93 	{ 0, 0 }
94 };
95 static driver_t cxgbe_driver = {
96 	"cxgbe",
97 	cxgbe_methods,
98 	sizeof(struct port_info)
99 };
100 
101 static d_ioctl_t t4_ioctl;
102 static d_open_t t4_open;
103 static d_close_t t4_close;
104 
105 static struct cdevsw t4_cdevsw = {
106        .d_version = D_VERSION,
107        .d_flags = 0,
108        .d_open = t4_open,
109        .d_close = t4_close,
110        .d_ioctl = t4_ioctl,
111        .d_name = "t4nex",
112 };
113 
114 /* ifnet + media interface */
115 static void cxgbe_init(void *);
116 static int cxgbe_ioctl(struct ifnet *, unsigned long, caddr_t);
117 static void cxgbe_start(struct ifnet *);
118 static int cxgbe_transmit(struct ifnet *, struct mbuf *);
119 static void cxgbe_qflush(struct ifnet *);
120 static int cxgbe_media_change(struct ifnet *);
121 static void cxgbe_media_status(struct ifnet *, struct ifmediareq *);
122 
123 MALLOC_DEFINE(M_CXGBE, "cxgbe", "Chelsio T4 Ethernet driver and services");
124 
125 /*
126  * Tunables.
127  */
128 static SYSCTL_NODE(_hw, OID_AUTO, cxgbe, CTLFLAG_RD, 0,
129     "cxgbe driver parameters");
130 
131 static int force_firmware_install = 0;
132 TUNABLE_INT("hw.cxgbe.force_firmware_install", &force_firmware_install);
133 SYSCTL_UINT(_hw_cxgbe, OID_AUTO, force_firmware_install, CTLFLAG_RDTUN,
134     &force_firmware_install, 0, "install firmware on every attach.");
135 
136 /*
137  * Holdoff timer and packet counter values.
138  */
139 static unsigned int intr_timer[SGE_NTIMERS] = {1, 5, 10, 50, 100, 200};
140 static unsigned int intr_pktcount[SGE_NCOUNTERS] = {1, 8, 16, 32}; /* 63 max */
141 
142 /*
143  * Max # of tx and rx queues to use for each 10G and 1G port.
144  */
145 static unsigned int max_ntxq_10g = 8;
146 TUNABLE_INT("hw.cxgbe.max_ntxq_10G_port", &max_ntxq_10g);
147 SYSCTL_UINT(_hw_cxgbe, OID_AUTO, max_ntxq_10G_port, CTLFLAG_RDTUN,
148     &max_ntxq_10g, 0, "maximum number of tx queues per 10G port.");
149 
150 static unsigned int max_nrxq_10g = 8;
151 TUNABLE_INT("hw.cxgbe.max_nrxq_10G_port", &max_nrxq_10g);
152 SYSCTL_UINT(_hw_cxgbe, OID_AUTO, max_nrxq_10G_port, CTLFLAG_RDTUN,
153     &max_nrxq_10g, 0, "maximum number of rxq's (per 10G port).");
154 
155 static unsigned int max_ntxq_1g = 2;
156 TUNABLE_INT("hw.cxgbe.max_ntxq_1G_port", &max_ntxq_1g);
157 SYSCTL_UINT(_hw_cxgbe, OID_AUTO, max_ntxq_1G_port, CTLFLAG_RDTUN,
158     &max_ntxq_1g, 0, "maximum number of tx queues per 1G port.");
159 
160 static unsigned int max_nrxq_1g = 2;
161 TUNABLE_INT("hw.cxgbe.max_nrxq_1G_port", &max_nrxq_1g);
162 SYSCTL_UINT(_hw_cxgbe, OID_AUTO, max_nrxq_1G_port, CTLFLAG_RDTUN,
163     &max_nrxq_1g, 0, "maximum number of rxq's (per 1G port).");
164 
165 /*
166  * Holdoff parameters for 10G and 1G ports.
167  */
168 static unsigned int tmr_idx_10g = 1;
169 TUNABLE_INT("hw.cxgbe.holdoff_timer_idx_10G", &tmr_idx_10g);
170 SYSCTL_UINT(_hw_cxgbe, OID_AUTO, holdoff_timer_idx_10G, CTLFLAG_RDTUN,
171     &tmr_idx_10g, 0,
172     "default timer index for interrupt holdoff (10G ports).");
173 
174 static int pktc_idx_10g = 2;
175 TUNABLE_INT("hw.cxgbe.holdoff_pktc_idx_10G", &pktc_idx_10g);
176 SYSCTL_UINT(_hw_cxgbe, OID_AUTO, holdoff_pktc_idx_10G, CTLFLAG_RDTUN,
177     &pktc_idx_10g, 0,
178     "default pkt counter index for interrupt holdoff (10G ports).");
179 
180 static unsigned int tmr_idx_1g = 1;
181 TUNABLE_INT("hw.cxgbe.holdoff_timer_idx_1G", &tmr_idx_1g);
182 SYSCTL_UINT(_hw_cxgbe, OID_AUTO, holdoff_timer_idx_1G, CTLFLAG_RDTUN,
183     &tmr_idx_1g, 0,
184     "default timer index for interrupt holdoff (1G ports).");
185 
186 static int pktc_idx_1g = 2;
187 TUNABLE_INT("hw.cxgbe.holdoff_pktc_idx_1G", &pktc_idx_1g);
188 SYSCTL_UINT(_hw_cxgbe, OID_AUTO, holdoff_pktc_idx_1G, CTLFLAG_RDTUN,
189     &pktc_idx_1g, 0,
190     "default pkt counter index for interrupt holdoff (1G ports).");
191 
192 /*
193  * Size (# of entries) of each tx and rx queue.
194  */
195 static unsigned int qsize_txq = TX_EQ_QSIZE;
196 TUNABLE_INT("hw.cxgbe.qsize_txq", &qsize_txq);
197 SYSCTL_UINT(_hw_cxgbe, OID_AUTO, qsize_txq, CTLFLAG_RDTUN,
198     &qsize_txq, 0, "default queue size of NIC tx queues.");
199 
200 static unsigned int qsize_rxq = RX_IQ_QSIZE;
201 TUNABLE_INT("hw.cxgbe.qsize_rxq", &qsize_rxq);
202 SYSCTL_UINT(_hw_cxgbe, OID_AUTO, qsize_rxq, CTLFLAG_RDTUN,
203     &qsize_rxq, 0, "default queue size of NIC rx queues.");
204 
205 /*
206  * Interrupt types allowed.
207  */
208 static int intr_types = INTR_MSIX | INTR_MSI | INTR_INTX;
209 TUNABLE_INT("hw.cxgbe.interrupt_types", &intr_types);
210 SYSCTL_UINT(_hw_cxgbe, OID_AUTO, interrupt_types, CTLFLAG_RDTUN, &intr_types, 0,
211     "interrupt types allowed (bits 0, 1, 2 = INTx, MSI, MSI-X respectively)");
212 
213 /*
214  * Force the driver to use the same set of interrupts for all ports.
215  */
216 static int intr_shared = 0;
217 TUNABLE_INT("hw.cxgbe.interrupts_shared", &intr_shared);
218 SYSCTL_UINT(_hw_cxgbe, OID_AUTO, interrupts_shared, CTLFLAG_RDTUN,
219     &intr_shared, 0, "interrupts shared between all ports");
220 
221 static unsigned int filter_mode = HW_TPL_FR_MT_PR_IV_P_FC;
222 TUNABLE_INT("hw.cxgbe.filter_mode", &filter_mode);
223 SYSCTL_UINT(_hw_cxgbe, OID_AUTO, filter_mode, CTLFLAG_RDTUN,
224     &filter_mode, 0, "default global filter mode.");
225 
226 struct intrs_and_queues {
227 	int intr_type;		/* INTx, MSI, or MSI-X */
228 	int nirq;		/* Number of vectors */
229 	int intr_shared;	/* Interrupts shared between all ports */
230 	int ntxq10g;		/* # of NIC txq's for each 10G port */
231 	int nrxq10g;		/* # of NIC rxq's for each 10G port */
232 	int ntxq1g;		/* # of NIC txq's for each 1G port */
233 	int nrxq1g;		/* # of NIC rxq's for each 1G port */
234 };
235 
236 struct filter_entry {
237         uint32_t valid:1;	/* filter allocated and valid */
238         uint32_t locked:1;	/* filter is administratively locked */
239         uint32_t pending:1;	/* filter action is pending firmware reply */
240 	uint32_t smtidx:8;	/* Source MAC Table index for smac */
241 	struct l2t_entry *l2t;	/* Layer Two Table entry for dmac */
242 
243         struct t4_filter_specification fs;
244 };
245 
246 enum {
247 	MEMWIN0_APERTURE = 2048,
248 	MEMWIN0_BASE     = 0x1b800,
249 	MEMWIN1_APERTURE = 32768,
250 	MEMWIN1_BASE     = 0x28000,
251 	MEMWIN2_APERTURE = 65536,
252 	MEMWIN2_BASE     = 0x30000,
253 };
254 
255 enum {
256 	XGMAC_MTU	= (1 << 0),
257 	XGMAC_PROMISC	= (1 << 1),
258 	XGMAC_ALLMULTI	= (1 << 2),
259 	XGMAC_VLANEX	= (1 << 3),
260 	XGMAC_UCADDR	= (1 << 4),
261 	XGMAC_MCADDRS	= (1 << 5),
262 
263 	XGMAC_ALL	= 0xffff
264 };
265 
266 static int map_bars(struct adapter *);
267 static void setup_memwin(struct adapter *);
268 static int cfg_itype_and_nqueues(struct adapter *, int, int,
269     struct intrs_and_queues *);
270 static int prep_firmware(struct adapter *);
271 static int get_devlog_params(struct adapter *, struct devlog_params *);
272 static int get_capabilities(struct adapter *, struct fw_caps_config_cmd *);
273 static int get_params(struct adapter *, struct fw_caps_config_cmd *);
274 static void t4_set_desc(struct adapter *);
275 static void build_medialist(struct port_info *);
276 static int update_mac_settings(struct port_info *, int);
277 static int cxgbe_init_locked(struct port_info *);
278 static int cxgbe_init_synchronized(struct port_info *);
279 static int cxgbe_uninit_locked(struct port_info *);
280 static int cxgbe_uninit_synchronized(struct port_info *);
281 static int first_port_up(struct adapter *);
282 static int last_port_down(struct adapter *);
283 static int t4_alloc_irq(struct adapter *, struct irq *, int rid,
284     iq_intr_handler_t *, void *, char *);
285 static int t4_free_irq(struct adapter *, struct irq *);
286 static void reg_block_dump(struct adapter *, uint8_t *, unsigned int,
287     unsigned int);
288 static void t4_get_regs(struct adapter *, struct t4_regdump *, uint8_t *);
289 static void cxgbe_tick(void *);
290 static int t4_sysctls(struct adapter *);
291 static int cxgbe_sysctls(struct port_info *);
292 static int sysctl_int_array(SYSCTL_HANDLER_ARGS);
293 static int sysctl_holdoff_tmr_idx(SYSCTL_HANDLER_ARGS);
294 static int sysctl_holdoff_pktc_idx(SYSCTL_HANDLER_ARGS);
295 static int sysctl_qsize_rxq(SYSCTL_HANDLER_ARGS);
296 static int sysctl_qsize_txq(SYSCTL_HANDLER_ARGS);
297 static int sysctl_handle_t4_reg64(SYSCTL_HANDLER_ARGS);
298 static int sysctl_devlog(SYSCTL_HANDLER_ARGS);
299 static inline void txq_start(struct ifnet *, struct sge_txq *);
300 static uint32_t fconf_to_mode(uint32_t);
301 static uint32_t mode_to_fconf(uint32_t);
302 static uint32_t fspec_to_fconf(struct t4_filter_specification *);
303 static int get_filter_mode(struct adapter *, uint32_t *);
304 static int set_filter_mode(struct adapter *, uint32_t);
305 static inline uint64_t get_filter_hits(struct adapter *, uint32_t);
306 static int get_filter(struct adapter *, struct t4_filter *);
307 static int set_filter(struct adapter *, struct t4_filter *);
308 static int del_filter(struct adapter *, struct t4_filter *);
309 static void clear_filter(struct filter_entry *);
310 static int set_filter_wr(struct adapter *, int);
311 static int del_filter_wr(struct adapter *, int);
312 void filter_rpl(struct adapter *, const struct cpl_set_tcb_rpl *);
313 static int get_sge_context(struct adapter *, struct t4_sge_context *);
314 static int t4_mod_event(module_t, int, void *);
315 
316 struct t4_pciids {
317 	uint16_t device;
318 	uint8_t mpf;
319 	char *desc;
320 } t4_pciids[] = {
321 	{0xa000, 0, "Chelsio Terminator 4 FPGA"},
322 	{0x4400, 4, "Chelsio T440-dbg"},
323 	{0x4401, 4, "Chelsio T420-CR"},
324 	{0x4402, 4, "Chelsio T422-CR"},
325 	{0x4403, 4, "Chelsio T440-CR"},
326 	{0x4404, 4, "Chelsio T420-BCH"},
327 	{0x4405, 4, "Chelsio T440-BCH"},
328 	{0x4406, 4, "Chelsio T440-CH"},
329 	{0x4407, 4, "Chelsio T420-SO"},
330 	{0x4408, 4, "Chelsio T420-CX"},
331 	{0x4409, 4, "Chelsio T420-BT"},
332 	{0x440a, 4, "Chelsio T404-BT"},
333 };
334 
335 static int
336 t4_probe(device_t dev)
337 {
338 	int i;
339 	uint16_t v = pci_get_vendor(dev);
340 	uint16_t d = pci_get_device(dev);
341 
342 	if (v != PCI_VENDOR_ID_CHELSIO)
343 		return (ENXIO);
344 
345 	for (i = 0; i < ARRAY_SIZE(t4_pciids); i++) {
346 		if (d == t4_pciids[i].device &&
347 		    pci_get_function(dev) == t4_pciids[i].mpf) {
348 			device_set_desc(dev, t4_pciids[i].desc);
349 			return (BUS_PROBE_DEFAULT);
350 		}
351 	}
352 
353 	return (ENXIO);
354 }
355 
356 static int
357 t4_attach(device_t dev)
358 {
359 	struct adapter *sc;
360 	int rc = 0, i, n10g, n1g, rqidx, tqidx;
361 	struct fw_caps_config_cmd caps;
362 	uint32_t p, v;
363 	struct intrs_and_queues iaq;
364 	struct sge *s;
365 
366 	sc = device_get_softc(dev);
367 	sc->dev = dev;
368 	sc->pf = pci_get_function(dev);
369 	sc->mbox = sc->pf;
370 
371 	pci_enable_busmaster(dev);
372 	if (pci_find_cap(dev, PCIY_EXPRESS, &i) == 0) {
373 		pci_set_max_read_req(dev, 4096);
374 		v = pci_read_config(dev, i + PCIR_EXPRESS_DEVICE_CTL, 2);
375 		v |= PCIM_EXP_CTL_RELAXED_ORD_ENABLE;
376 		pci_write_config(dev, i + PCIR_EXPRESS_DEVICE_CTL, v, 2);
377 	}
378 
379 	snprintf(sc->lockname, sizeof(sc->lockname), "%s",
380 	    device_get_nameunit(dev));
381 	mtx_init(&sc->sc_lock, sc->lockname, 0, MTX_DEF);
382 
383 	rc = map_bars(sc);
384 	if (rc != 0)
385 		goto done; /* error message displayed already */
386 
387 	memset(sc->chan_map, 0xff, sizeof(sc->chan_map));
388 
389 	/* Prepare the adapter for operation */
390 	rc = -t4_prep_adapter(sc);
391 	if (rc != 0) {
392 		device_printf(dev, "failed to prepare adapter: %d.\n", rc);
393 		goto done;
394 	}
395 
396 	/* Do this really early */
397 	sc->cdev = make_dev(&t4_cdevsw, device_get_unit(dev), UID_ROOT,
398 	    GID_WHEEL, 0600, "%s", device_get_nameunit(dev));
399 	sc->cdev->si_drv1 = sc;
400 
401 	/* Prepare the firmware for operation */
402 	rc = prep_firmware(sc);
403 	if (rc != 0)
404 		goto done; /* error message displayed already */
405 
406 	/* Read firmware devlog parameters */
407 	(void) get_devlog_params(sc, &sc->params.devlog);
408 
409 	/* Get device capabilities and select which ones we'll use */
410 	rc = get_capabilities(sc, &caps);
411 	if (rc != 0) {
412 		device_printf(dev,
413 		    "failed to initialize adapter capabilities: %d.\n", rc);
414 		goto done;
415 	}
416 
417 	/* Choose the global RSS mode. */
418 	rc = -t4_config_glbl_rss(sc, sc->mbox,
419 	    FW_RSS_GLB_CONFIG_CMD_MODE_BASICVIRTUAL,
420 	    F_FW_RSS_GLB_CONFIG_CMD_TNLMAPEN |
421 	    F_FW_RSS_GLB_CONFIG_CMD_HASHTOEPLITZ |
422 	    F_FW_RSS_GLB_CONFIG_CMD_TNLALLLKP);
423 	if (rc != 0) {
424 		device_printf(dev,
425 		    "failed to select global RSS mode: %d.\n", rc);
426 		goto done;
427 	}
428 
429 	/* These are total (sum of all ports) limits for a bus driver */
430 	rc = -t4_cfg_pfvf(sc, sc->mbox, sc->pf, 0,
431 	    128,	/* max # of egress queues */
432 	    64,		/* max # of egress Ethernet or control queues */
433 	    64,		/* max # of ingress queues with fl/interrupt */
434 	    0,		/* max # of ingress queues without interrupt */
435 	    0,		/* PCIe traffic class */
436 	    4,		/* max # of virtual interfaces */
437 	    M_FW_PFVF_CMD_CMASK, M_FW_PFVF_CMD_PMASK, 16,
438 	    FW_CMD_CAP_PF, FW_CMD_CAP_PF);
439 	if (rc != 0) {
440 		device_printf(dev,
441 		    "failed to configure pf/vf resources: %d.\n", rc);
442 		goto done;
443 	}
444 
445 	/* Need this before sge_init */
446 	for (i = 0; i < SGE_NTIMERS; i++)
447 		sc->sge.timer_val[i] = min(intr_timer[i], 200U);
448 	for (i = 0; i < SGE_NCOUNTERS; i++)
449 		sc->sge.counter_val[i] = min(intr_pktcount[i], M_THRESHOLD_0);
450 
451 	/* Also need the cooked value of cclk before sge_init */
452 	p = (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) |
453 	    V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_CCLK));
454 	rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 1, &p, &v);
455 	if (rc != 0) {
456 		device_printf(sc->dev,
457 		    "failed to obtain core clock value: %d.\n", rc);
458 		goto done;
459 	}
460 	sc->params.vpd.cclk = v;
461 
462 	t4_sge_init(sc);
463 
464 	t4_set_filter_mode(sc, filter_mode);
465 	t4_set_reg_field(sc, A_TP_GLOBAL_CONFIG,
466 	    V_FIVETUPLELOOKUP(M_FIVETUPLELOOKUP),
467 	    V_FIVETUPLELOOKUP(M_FIVETUPLELOOKUP));
468 	t4_tp_wr_bits_indirect(sc, A_TP_INGRESS_CONFIG, F_CSUM_HAS_PSEUDO_HDR,
469 	    F_LOOKUPEVERYPKT);
470 
471 	/* get basic stuff going */
472 	rc = -t4_early_init(sc, sc->mbox);
473 	if (rc != 0) {
474 		device_printf(dev, "early init failed: %d.\n", rc);
475 		goto done;
476 	}
477 
478 	rc = get_params(sc, &caps);
479 	if (rc != 0)
480 		goto done; /* error message displayed already */
481 
482 	/* These are finalized by FW initialization, load their values now */
483 	v = t4_read_reg(sc, A_TP_TIMER_RESOLUTION);
484 	sc->params.tp.tre = G_TIMERRESOLUTION(v);
485 	sc->params.tp.dack_re = G_DELAYEDACKRESOLUTION(v);
486 	t4_read_mtu_tbl(sc, sc->params.mtus, NULL);
487 
488 	/* tweak some settings */
489 	t4_write_reg(sc, A_TP_SHIFT_CNT, V_SYNSHIFTMAX(6) | V_RXTSHIFTMAXR1(4) |
490 	    V_RXTSHIFTMAXR2(15) | V_PERSHIFTBACKOFFMAX(8) | V_PERSHIFTMAX(8) |
491 	    V_KEEPALIVEMAXR1(4) | V_KEEPALIVEMAXR2(9));
492 	t4_write_reg(sc, A_ULP_RX_TDDP_PSZ, V_HPZ0(PAGE_SHIFT - 12));
493 	t4_set_reg_field(sc, A_TP_PARA_REG3, F_TUNNELCNGDROP0 |
494 	    F_TUNNELCNGDROP1 | F_TUNNELCNGDROP2 | F_TUNNELCNGDROP3, 0);
495 
496 	setup_memwin(sc);
497 
498 	rc = t4_create_dma_tag(sc);
499 	if (rc != 0)
500 		goto done; /* error message displayed already */
501 
502 	/*
503 	 * First pass over all the ports - allocate VIs and initialize some
504 	 * basic parameters like mac address, port type, etc.  We also figure
505 	 * out whether a port is 10G or 1G and use that information when
506 	 * calculating how many interrupts to attempt to allocate.
507 	 */
508 	n10g = n1g = 0;
509 	for_each_port(sc, i) {
510 		struct port_info *pi;
511 
512 		pi = malloc(sizeof(*pi), M_CXGBE, M_ZERO | M_WAITOK);
513 		sc->port[i] = pi;
514 
515 		/* These must be set before t4_port_init */
516 		pi->adapter = sc;
517 		pi->port_id = i;
518 
519 		/* Allocate the vi and initialize parameters like mac addr */
520 		rc = -t4_port_init(pi, sc->mbox, sc->pf, 0);
521 		if (rc != 0) {
522 			device_printf(dev, "unable to initialize port %d: %d\n",
523 			    i, rc);
524 			free(pi, M_CXGBE);
525 			sc->port[i] = NULL;
526 			goto done;
527 		}
528 
529 		snprintf(pi->lockname, sizeof(pi->lockname), "%sp%d",
530 		    device_get_nameunit(dev), i);
531 		mtx_init(&pi->pi_lock, pi->lockname, 0, MTX_DEF);
532 
533 		if (is_10G_port(pi)) {
534 			n10g++;
535 			pi->tmr_idx = tmr_idx_10g;
536 			pi->pktc_idx = pktc_idx_10g;
537 		} else {
538 			n1g++;
539 			pi->tmr_idx = tmr_idx_1g;
540 			pi->pktc_idx = pktc_idx_1g;
541 		}
542 
543 		pi->xact_addr_filt = -1;
544 
545 		pi->qsize_rxq = max(qsize_rxq, 128);
546 		while (pi->qsize_rxq & 7)
547 			pi->qsize_rxq++;
548 		pi->qsize_txq = max(qsize_txq, 128);
549 
550 		if (pi->qsize_rxq != qsize_rxq) {
551 			device_printf(dev,
552 			    "using %d instead of %d as the rx queue size.\n",
553 			    pi->qsize_rxq, qsize_rxq);
554 		}
555 		if (pi->qsize_txq != qsize_txq) {
556 			device_printf(dev,
557 			    "using %d instead of %d as the tx queue size.\n",
558 			    pi->qsize_txq, qsize_txq);
559 		}
560 
561 		pi->dev = device_add_child(dev, "cxgbe", -1);
562 		if (pi->dev == NULL) {
563 			device_printf(dev,
564 			    "failed to add device for port %d.\n", i);
565 			rc = ENXIO;
566 			goto done;
567 		}
568 		device_set_softc(pi->dev, pi);
569 
570 		setbit(&sc->registered_device_map, i);
571 	}
572 
573 	if (sc->registered_device_map == 0) {
574 		device_printf(dev, "no usable ports\n");
575 		rc = ENXIO;
576 		goto done;
577 	}
578 
579 	/*
580 	 * Interrupt type, # of interrupts, # of rx/tx queues, etc.
581 	 */
582 	rc = cfg_itype_and_nqueues(sc, n10g, n1g, &iaq);
583 	if (rc != 0)
584 		goto done; /* error message displayed already */
585 
586 	sc->intr_type = iaq.intr_type;
587 	sc->intr_count = iaq.nirq;
588 
589 	s = &sc->sge;
590 	s->nrxq = n10g * iaq.nrxq10g + n1g * iaq.nrxq1g;
591 	s->ntxq = n10g * iaq.ntxq10g + n1g * iaq.ntxq1g;
592 	s->neq = s->ntxq + s->nrxq;	/* the free list in an rxq is an eq */
593 	s->neq += sc->params.nports;	/* control queues, 1 per port */
594 	s->niq = s->nrxq + 1;		/* 1 extra for firmware event queue */
595 	if (iaq.intr_shared)
596 		sc->flags |= INTR_SHARED;
597 	s->niq += NINTRQ(sc);		/* interrupt queues */
598 
599 	s->intrq = malloc(NINTRQ(sc) * sizeof(struct sge_iq), M_CXGBE,
600 	    M_ZERO | M_WAITOK);
601 	s->ctrlq = malloc(sc->params.nports * sizeof(struct sge_ctrlq), M_CXGBE,
602 	    M_ZERO | M_WAITOK);
603 	s->rxq = malloc(s->nrxq * sizeof(struct sge_rxq), M_CXGBE,
604 	    M_ZERO | M_WAITOK);
605 	s->txq = malloc(s->ntxq * sizeof(struct sge_txq), M_CXGBE,
606 	    M_ZERO | M_WAITOK);
607 	s->iqmap = malloc(s->niq * sizeof(struct sge_iq *), M_CXGBE,
608 	    M_ZERO | M_WAITOK);
609 	s->eqmap = malloc(s->neq * sizeof(struct sge_eq *), M_CXGBE,
610 	    M_ZERO | M_WAITOK);
611 
612 	sc->irq = malloc(sc->intr_count * sizeof(struct irq), M_CXGBE,
613 	    M_ZERO | M_WAITOK);
614 
615 	sc->l2t = t4_init_l2t(M_WAITOK);
616 
617 	t4_sysctls(sc);
618 
619 	/*
620 	 * Second pass over the ports.  This time we know the number of rx and
621 	 * tx queues that each port should get.
622 	 */
623 	rqidx = tqidx = 0;
624 	for_each_port(sc, i) {
625 		struct port_info *pi = sc->port[i];
626 
627 		if (pi == NULL)
628 			continue;
629 
630 		pi->first_rxq = rqidx;
631 		pi->nrxq = is_10G_port(pi) ? iaq.nrxq10g : iaq.nrxq1g;
632 
633 		pi->first_txq = tqidx;
634 		pi->ntxq = is_10G_port(pi) ? iaq.ntxq10g : iaq.ntxq1g;
635 
636 		rqidx += pi->nrxq;
637 		tqidx += pi->ntxq;
638 	}
639 
640 	rc = bus_generic_attach(dev);
641 	if (rc != 0) {
642 		device_printf(dev,
643 		    "failed to attach all child ports: %d\n", rc);
644 		goto done;
645 	}
646 
647 #ifdef INVARIANTS
648 	device_printf(dev,
649 	    "%p, %d ports (0x%x), %d intr_type, %d intr_count\n",
650 	    sc, sc->params.nports, sc->params.portvec,
651 	    sc->intr_type, sc->intr_count);
652 #endif
653 	t4_set_desc(sc);
654 
655 done:
656 	if (rc != 0)
657 		t4_detach(dev);
658 
659 	return (rc);
660 }
661 
662 /*
663  * Idempotent
664  */
665 static int
666 t4_detach(device_t dev)
667 {
668 	struct adapter *sc;
669 	struct port_info *pi;
670 	int i;
671 
672 	sc = device_get_softc(dev);
673 
674 	if (sc->cdev)
675 		destroy_dev(sc->cdev);
676 
677 	bus_generic_detach(dev);
678 	for (i = 0; i < MAX_NPORTS; i++) {
679 		pi = sc->port[i];
680 		if (pi) {
681 			t4_free_vi(pi->adapter, sc->mbox, sc->pf, 0, pi->viid);
682 			if (pi->dev)
683 				device_delete_child(dev, pi->dev);
684 
685 			mtx_destroy(&pi->pi_lock);
686 			free(pi, M_CXGBE);
687 		}
688 	}
689 
690 	if (sc->flags & FW_OK)
691 		t4_fw_bye(sc, sc->mbox);
692 
693 	if (sc->intr_type == INTR_MSI || sc->intr_type == INTR_MSIX)
694 		pci_release_msi(dev);
695 
696 	if (sc->regs_res)
697 		bus_release_resource(dev, SYS_RES_MEMORY, sc->regs_rid,
698 		    sc->regs_res);
699 
700 	if (sc->msix_res)
701 		bus_release_resource(dev, SYS_RES_MEMORY, sc->msix_rid,
702 		    sc->msix_res);
703 
704 	if (sc->l2t)
705 		t4_free_l2t(sc->l2t);
706 
707 	free(sc->irq, M_CXGBE);
708 	free(sc->sge.rxq, M_CXGBE);
709 	free(sc->sge.txq, M_CXGBE);
710 	free(sc->sge.ctrlq, M_CXGBE);
711 	free(sc->sge.intrq, M_CXGBE);
712 	free(sc->sge.iqmap, M_CXGBE);
713 	free(sc->sge.eqmap, M_CXGBE);
714 	free(sc->tids.ftid_tab, M_CXGBE);
715 	t4_destroy_dma_tag(sc);
716 	mtx_destroy(&sc->sc_lock);
717 
718 	bzero(sc, sizeof(*sc));
719 
720 	return (0);
721 }
722 
723 
724 static int
725 cxgbe_probe(device_t dev)
726 {
727 	char buf[128];
728 	struct port_info *pi = device_get_softc(dev);
729 
730 	snprintf(buf, sizeof(buf), "Port %d", pi->port_id);
731 	device_set_desc_copy(dev, buf);
732 
733 	return (BUS_PROBE_DEFAULT);
734 }
735 
736 #define T4_CAP (IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_MTU | IFCAP_HWCSUM | \
737     IFCAP_VLAN_HWCSUM | IFCAP_TSO | IFCAP_JUMBO_MTU | IFCAP_LRO | \
738     IFCAP_VLAN_HWTSO)
739 #define T4_CAP_ENABLE (T4_CAP & ~IFCAP_TSO6)
740 
741 static int
742 cxgbe_attach(device_t dev)
743 {
744 	struct port_info *pi = device_get_softc(dev);
745 	struct ifnet *ifp;
746 
747 	/* Allocate an ifnet and set it up */
748 	ifp = if_alloc(IFT_ETHER);
749 	if (ifp == NULL) {
750 		device_printf(dev, "Cannot allocate ifnet\n");
751 		return (ENOMEM);
752 	}
753 	pi->ifp = ifp;
754 	ifp->if_softc = pi;
755 
756 	callout_init(&pi->tick, CALLOUT_MPSAFE);
757 	pi->tq = taskqueue_create("cxgbe_taskq", M_NOWAIT,
758 	    taskqueue_thread_enqueue, &pi->tq);
759 	if (pi->tq == NULL) {
760 		device_printf(dev, "failed to allocate port task queue\n");
761 		if_free(pi->ifp);
762 		return (ENOMEM);
763 	}
764 	taskqueue_start_threads(&pi->tq, 1, PI_NET, "%s taskq",
765 	    device_get_nameunit(dev));
766 
767 	if_initname(ifp, device_get_name(dev), device_get_unit(dev));
768 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
769 
770 	ifp->if_init = cxgbe_init;
771 	ifp->if_ioctl = cxgbe_ioctl;
772 	ifp->if_start = cxgbe_start;
773 	ifp->if_transmit = cxgbe_transmit;
774 	ifp->if_qflush = cxgbe_qflush;
775 
776 	ifp->if_snd.ifq_drv_maxlen = 1024;
777 	IFQ_SET_MAXLEN(&ifp->if_snd, ifp->if_snd.ifq_drv_maxlen);
778 	IFQ_SET_READY(&ifp->if_snd);
779 
780 	ifp->if_capabilities = T4_CAP;
781 	ifp->if_capenable = T4_CAP_ENABLE;
782 	ifp->if_hwassist = CSUM_TCP | CSUM_UDP | CSUM_IP | CSUM_TSO;
783 
784 	/* Initialize ifmedia for this port */
785 	ifmedia_init(&pi->media, IFM_IMASK, cxgbe_media_change,
786 	    cxgbe_media_status);
787 	build_medialist(pi);
788 
789 	ether_ifattach(ifp, pi->hw_addr);
790 
791 #ifdef INVARIANTS
792 	device_printf(dev, "%p, %d txq, %d rxq\n", pi, pi->ntxq, pi->nrxq);
793 #endif
794 
795 	cxgbe_sysctls(pi);
796 
797 	return (0);
798 }
799 
800 static int
801 cxgbe_detach(device_t dev)
802 {
803 	struct port_info *pi = device_get_softc(dev);
804 	struct adapter *sc = pi->adapter;
805 	int rc;
806 
807 	/* Tell if_ioctl and if_init that the port is going away */
808 	ADAPTER_LOCK(sc);
809 	SET_DOOMED(pi);
810 	wakeup(&sc->flags);
811 	while (IS_BUSY(sc))
812 		mtx_sleep(&sc->flags, &sc->sc_lock, 0, "t4detach", 0);
813 	SET_BUSY(sc);
814 	ADAPTER_UNLOCK(sc);
815 
816 	rc = cxgbe_uninit_synchronized(pi);
817 	if (rc != 0)
818 		device_printf(dev, "port uninit failed: %d.\n", rc);
819 
820 	taskqueue_free(pi->tq);
821 
822 	ifmedia_removeall(&pi->media);
823 	ether_ifdetach(pi->ifp);
824 	if_free(pi->ifp);
825 
826 	ADAPTER_LOCK(sc);
827 	CLR_BUSY(sc);
828 	wakeup_one(&sc->flags);
829 	ADAPTER_UNLOCK(sc);
830 
831 	return (0);
832 }
833 
834 static void
835 cxgbe_init(void *arg)
836 {
837 	struct port_info *pi = arg;
838 	struct adapter *sc = pi->adapter;
839 
840 	ADAPTER_LOCK(sc);
841 	cxgbe_init_locked(pi); /* releases adapter lock */
842 	ADAPTER_LOCK_ASSERT_NOTOWNED(sc);
843 }
844 
845 static int
846 cxgbe_ioctl(struct ifnet *ifp, unsigned long cmd, caddr_t data)
847 {
848 	int rc = 0, mtu, flags;
849 	struct port_info *pi = ifp->if_softc;
850 	struct adapter *sc = pi->adapter;
851 	struct ifreq *ifr = (struct ifreq *)data;
852 	uint32_t mask;
853 
854 	switch (cmd) {
855 	case SIOCSIFMTU:
856 		ADAPTER_LOCK(sc);
857 		rc = IS_DOOMED(pi) ? ENXIO : (IS_BUSY(sc) ? EBUSY : 0);
858 		if (rc) {
859 fail:
860 			ADAPTER_UNLOCK(sc);
861 			return (rc);
862 		}
863 
864 		mtu = ifr->ifr_mtu;
865 		if ((mtu < ETHERMIN) || (mtu > ETHERMTU_JUMBO)) {
866 			rc = EINVAL;
867 		} else {
868 			ifp->if_mtu = mtu;
869 			if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
870 				t4_update_fl_bufsize(ifp);
871 				PORT_LOCK(pi);
872 				rc = update_mac_settings(pi, XGMAC_MTU);
873 				PORT_UNLOCK(pi);
874 			}
875 		}
876 		ADAPTER_UNLOCK(sc);
877 		break;
878 
879 	case SIOCSIFFLAGS:
880 		ADAPTER_LOCK(sc);
881 		if (IS_DOOMED(pi)) {
882 			rc = ENXIO;
883 			goto fail;
884 		}
885 		if (ifp->if_flags & IFF_UP) {
886 			if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
887 				flags = pi->if_flags;
888 				if ((ifp->if_flags ^ flags) &
889 				    (IFF_PROMISC | IFF_ALLMULTI)) {
890 					if (IS_BUSY(sc)) {
891 						rc = EBUSY;
892 						goto fail;
893 					}
894 					PORT_LOCK(pi);
895 					rc = update_mac_settings(pi,
896 					    XGMAC_PROMISC | XGMAC_ALLMULTI);
897 					PORT_UNLOCK(pi);
898 				}
899 				ADAPTER_UNLOCK(sc);
900 			} else
901 				rc = cxgbe_init_locked(pi);
902 			pi->if_flags = ifp->if_flags;
903 		} else if (ifp->if_drv_flags & IFF_DRV_RUNNING)
904 			rc = cxgbe_uninit_locked(pi);
905 		else
906 			ADAPTER_UNLOCK(sc);
907 
908 		ADAPTER_LOCK_ASSERT_NOTOWNED(sc);
909 		break;
910 
911 	case SIOCADDMULTI:
912 	case SIOCDELMULTI: /* these two can be called with a mutex held :-( */
913 		ADAPTER_LOCK(sc);
914 		rc = IS_DOOMED(pi) ? ENXIO : (IS_BUSY(sc) ? EBUSY : 0);
915 		if (rc)
916 			goto fail;
917 
918 		if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
919 			PORT_LOCK(pi);
920 			rc = update_mac_settings(pi, XGMAC_MCADDRS);
921 			PORT_UNLOCK(pi);
922 		}
923 		ADAPTER_UNLOCK(sc);
924 		break;
925 
926 	case SIOCSIFCAP:
927 		ADAPTER_LOCK(sc);
928 		rc = IS_DOOMED(pi) ? ENXIO : (IS_BUSY(sc) ? EBUSY : 0);
929 		if (rc)
930 			goto fail;
931 
932 		mask = ifr->ifr_reqcap ^ ifp->if_capenable;
933 		if (mask & IFCAP_TXCSUM) {
934 			ifp->if_capenable ^= IFCAP_TXCSUM;
935 			ifp->if_hwassist ^= (CSUM_TCP | CSUM_UDP | CSUM_IP);
936 
937 			if (IFCAP_TSO & ifp->if_capenable &&
938 			    !(IFCAP_TXCSUM & ifp->if_capenable)) {
939 				ifp->if_capenable &= ~IFCAP_TSO;
940 				ifp->if_hwassist &= ~CSUM_TSO;
941 				if_printf(ifp,
942 				    "tso disabled due to -txcsum.\n");
943 			}
944 		}
945 		if (mask & IFCAP_RXCSUM)
946 			ifp->if_capenable ^= IFCAP_RXCSUM;
947 		if (mask & IFCAP_TSO4) {
948 			ifp->if_capenable ^= IFCAP_TSO4;
949 
950 			if (IFCAP_TSO & ifp->if_capenable) {
951 				if (IFCAP_TXCSUM & ifp->if_capenable)
952 					ifp->if_hwassist |= CSUM_TSO;
953 				else {
954 					ifp->if_capenable &= ~IFCAP_TSO;
955 					ifp->if_hwassist &= ~CSUM_TSO;
956 					if_printf(ifp,
957 					    "enable txcsum first.\n");
958 					rc = EAGAIN;
959 				}
960 			} else
961 				ifp->if_hwassist &= ~CSUM_TSO;
962 		}
963 		if (mask & IFCAP_LRO) {
964 #ifdef INET
965 			int i;
966 			struct sge_rxq *rxq;
967 
968 			ifp->if_capenable ^= IFCAP_LRO;
969 			for_each_rxq(pi, i, rxq) {
970 				if (ifp->if_capenable & IFCAP_LRO)
971 					rxq->flags |= RXQ_LRO_ENABLED;
972 				else
973 					rxq->flags &= ~RXQ_LRO_ENABLED;
974 			}
975 #endif
976 		}
977 #ifndef TCP_OFFLOAD_DISABLE
978 		if (mask & IFCAP_TOE4) {
979 			rc = EOPNOTSUPP;
980 		}
981 #endif
982 		if (mask & IFCAP_VLAN_HWTAGGING) {
983 			ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING;
984 			if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
985 				PORT_LOCK(pi);
986 				rc = update_mac_settings(pi, XGMAC_VLANEX);
987 				PORT_UNLOCK(pi);
988 			}
989 		}
990 		if (mask & IFCAP_VLAN_MTU) {
991 			ifp->if_capenable ^= IFCAP_VLAN_MTU;
992 
993 			/* Need to find out how to disable auto-mtu-inflation */
994 		}
995 		if (mask & IFCAP_VLAN_HWTSO)
996 			ifp->if_capenable ^= IFCAP_VLAN_HWTSO;
997 		if (mask & IFCAP_VLAN_HWCSUM)
998 			ifp->if_capenable ^= IFCAP_VLAN_HWCSUM;
999 
1000 #ifdef VLAN_CAPABILITIES
1001 		VLAN_CAPABILITIES(ifp);
1002 #endif
1003 		ADAPTER_UNLOCK(sc);
1004 		break;
1005 
1006 	case SIOCSIFMEDIA:
1007 	case SIOCGIFMEDIA:
1008 		ifmedia_ioctl(ifp, ifr, &pi->media, cmd);
1009 		break;
1010 
1011 	default:
1012 		rc = ether_ioctl(ifp, cmd, data);
1013 	}
1014 
1015 	return (rc);
1016 }
1017 
1018 static void
1019 cxgbe_start(struct ifnet *ifp)
1020 {
1021 	struct port_info *pi = ifp->if_softc;
1022 	struct sge_txq *txq;
1023 	int i;
1024 
1025 	for_each_txq(pi, i, txq) {
1026 		if (TXQ_TRYLOCK(txq)) {
1027 			txq_start(ifp, txq);
1028 			TXQ_UNLOCK(txq);
1029 		}
1030 	}
1031 }
1032 
1033 static int
1034 cxgbe_transmit(struct ifnet *ifp, struct mbuf *m)
1035 {
1036 	struct port_info *pi = ifp->if_softc;
1037 	struct adapter *sc = pi->adapter;
1038 	struct sge_txq *txq = &sc->sge.txq[pi->first_txq];
1039 	struct buf_ring *br;
1040 	int rc;
1041 
1042 	M_ASSERTPKTHDR(m);
1043 
1044 	if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) {
1045 		m_freem(m);
1046 		return (0);
1047 	}
1048 
1049 	if (m->m_flags & M_FLOWID)
1050 		txq += (m->m_pkthdr.flowid % pi->ntxq);
1051 	br = txq->br;
1052 
1053 	if (TXQ_TRYLOCK(txq) == 0) {
1054 		/*
1055 		 * XXX: make sure that this packet really is sent out.  There is
1056 		 * a small race where t4_eth_tx may stop draining the drbr and
1057 		 * goes away, just before we enqueued this mbuf.
1058 		 */
1059 
1060 		return (drbr_enqueue(ifp, br, m));
1061 	}
1062 
1063 	/*
1064 	 * txq->m is the mbuf that is held up due to a temporary shortage of
1065 	 * resources and it should be put on the wire first.  Then what's in
1066 	 * drbr and finally the mbuf that was just passed in to us.
1067 	 *
1068 	 * Return code should indicate the fate of the mbuf that was passed in
1069 	 * this time.
1070 	 */
1071 
1072 	TXQ_LOCK_ASSERT_OWNED(txq);
1073 	if (drbr_needs_enqueue(ifp, br) || txq->m) {
1074 
1075 		/* Queued for transmission. */
1076 
1077 		rc = drbr_enqueue(ifp, br, m);
1078 		m = txq->m ? txq->m : drbr_dequeue(ifp, br);
1079 		(void) t4_eth_tx(ifp, txq, m);
1080 		TXQ_UNLOCK(txq);
1081 		return (rc);
1082 	}
1083 
1084 	/* Direct transmission. */
1085 	rc = t4_eth_tx(ifp, txq, m);
1086 	if (rc != 0 && txq->m)
1087 		rc = 0;	/* held, will be transmitted soon (hopefully) */
1088 
1089 	TXQ_UNLOCK(txq);
1090 	return (rc);
1091 }
1092 
1093 static void
1094 cxgbe_qflush(struct ifnet *ifp)
1095 {
1096 	struct port_info *pi = ifp->if_softc;
1097 	struct sge_txq *txq;
1098 	int i;
1099 	struct mbuf *m;
1100 
1101 	/* queues do not exist if !IFF_DRV_RUNNING. */
1102 	if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1103 		for_each_txq(pi, i, txq) {
1104 			TXQ_LOCK(txq);
1105 			m_freem(txq->m);
1106 			while ((m = buf_ring_dequeue_sc(txq->br)) != NULL)
1107 				m_freem(m);
1108 			TXQ_UNLOCK(txq);
1109 		}
1110 	}
1111 	if_qflush(ifp);
1112 }
1113 
1114 static int
1115 cxgbe_media_change(struct ifnet *ifp)
1116 {
1117 	struct port_info *pi = ifp->if_softc;
1118 
1119 	device_printf(pi->dev, "%s unimplemented.\n", __func__);
1120 
1121 	return (EOPNOTSUPP);
1122 }
1123 
1124 static void
1125 cxgbe_media_status(struct ifnet *ifp, struct ifmediareq *ifmr)
1126 {
1127 	struct port_info *pi = ifp->if_softc;
1128 	struct ifmedia_entry *cur = pi->media.ifm_cur;
1129 	int speed = pi->link_cfg.speed;
1130 	int data = (pi->port_type << 8) | pi->mod_type;
1131 
1132 	if (cur->ifm_data != data) {
1133 		build_medialist(pi);
1134 		cur = pi->media.ifm_cur;
1135 	}
1136 
1137 	ifmr->ifm_status = IFM_AVALID;
1138 	if (!pi->link_cfg.link_ok)
1139 		return;
1140 
1141 	ifmr->ifm_status |= IFM_ACTIVE;
1142 
1143 	/* active and current will differ iff current media is autoselect. */
1144 	if (IFM_SUBTYPE(cur->ifm_media) != IFM_AUTO)
1145 		return;
1146 
1147 	ifmr->ifm_active = IFM_ETHER | IFM_FDX;
1148 	if (speed == SPEED_10000)
1149 		ifmr->ifm_active |= IFM_10G_T;
1150 	else if (speed == SPEED_1000)
1151 		ifmr->ifm_active |= IFM_1000_T;
1152 	else if (speed == SPEED_100)
1153 		ifmr->ifm_active |= IFM_100_TX;
1154 	else if (speed == SPEED_10)
1155 		ifmr->ifm_active |= IFM_10_T;
1156 	else
1157 		KASSERT(0, ("%s: link up but speed unknown (%u)", __func__,
1158 			    speed));
1159 }
1160 
1161 void
1162 t4_fatal_err(struct adapter *sc)
1163 {
1164 	t4_set_reg_field(sc, A_SGE_CONTROL, F_GLOBALENABLE, 0);
1165 	t4_intr_disable(sc);
1166 	log(LOG_EMERG, "%s: encountered fatal error, adapter stopped.\n",
1167 	    device_get_nameunit(sc->dev));
1168 }
1169 
1170 static int
1171 map_bars(struct adapter *sc)
1172 {
1173 	sc->regs_rid = PCIR_BAR(0);
1174 	sc->regs_res = bus_alloc_resource_any(sc->dev, SYS_RES_MEMORY,
1175 	    &sc->regs_rid, RF_ACTIVE);
1176 	if (sc->regs_res == NULL) {
1177 		device_printf(sc->dev, "cannot map registers.\n");
1178 		return (ENXIO);
1179 	}
1180 	sc->bt = rman_get_bustag(sc->regs_res);
1181 	sc->bh = rman_get_bushandle(sc->regs_res);
1182 	sc->mmio_len = rman_get_size(sc->regs_res);
1183 
1184 	sc->msix_rid = PCIR_BAR(4);
1185 	sc->msix_res = bus_alloc_resource_any(sc->dev, SYS_RES_MEMORY,
1186 	    &sc->msix_rid, RF_ACTIVE);
1187 	if (sc->msix_res == NULL) {
1188 		device_printf(sc->dev, "cannot map MSI-X BAR.\n");
1189 		return (ENXIO);
1190 	}
1191 
1192 	return (0);
1193 }
1194 
1195 static void
1196 setup_memwin(struct adapter *sc)
1197 {
1198 	u_long bar0;
1199 
1200 	bar0 = rman_get_start(sc->regs_res);
1201 
1202 	t4_write_reg(sc, PCIE_MEM_ACCESS_REG(A_PCIE_MEM_ACCESS_BASE_WIN, 0),
1203 	    	     (bar0 + MEMWIN0_BASE) | V_BIR(0) |
1204 		     V_WINDOW(ilog2(MEMWIN0_APERTURE) - 10));
1205 
1206 	t4_write_reg(sc, PCIE_MEM_ACCESS_REG(A_PCIE_MEM_ACCESS_BASE_WIN, 1),
1207 		     (bar0 + MEMWIN1_BASE) | V_BIR(0) |
1208 		     V_WINDOW(ilog2(MEMWIN1_APERTURE) - 10));
1209 
1210 	t4_write_reg(sc, PCIE_MEM_ACCESS_REG(A_PCIE_MEM_ACCESS_BASE_WIN, 2),
1211 		     (bar0 + MEMWIN2_BASE) | V_BIR(0) |
1212 		     V_WINDOW(ilog2(MEMWIN2_APERTURE) - 10));
1213 }
1214 
1215 static int
1216 cfg_itype_and_nqueues(struct adapter *sc, int n10g, int n1g,
1217     struct intrs_and_queues *iaq)
1218 {
1219 	int rc, itype, navail, nc, nrxq10g, nrxq1g;
1220 
1221 	bzero(iaq, sizeof(*iaq));
1222 	nc = mp_ncpus;	/* our snapshot of the number of CPUs */
1223 
1224 	for (itype = INTR_MSIX; itype; itype >>= 1) {
1225 
1226 		if ((itype & intr_types) == 0)
1227 			continue;	/* not allowed */
1228 
1229 		if (itype == INTR_MSIX)
1230 			navail = pci_msix_count(sc->dev);
1231 		else if (itype == INTR_MSI)
1232 			navail = pci_msi_count(sc->dev);
1233 		else
1234 			navail = 1;
1235 
1236 		if (navail == 0)
1237 			continue;
1238 
1239 		iaq->intr_type = itype;
1240 
1241 		iaq->ntxq10g = min(nc, max_ntxq_10g);
1242 		iaq->ntxq1g = min(nc, max_ntxq_1g);
1243 
1244 		nrxq10g = min(nc, max_nrxq_10g);
1245 		nrxq1g = min(nc, max_nrxq_1g);
1246 
1247 		iaq->nirq = n10g * nrxq10g + n1g * nrxq1g + T4_EXTRA_INTR;
1248 		if (iaq->nirq <= navail && intr_shared == 0) {
1249 
1250 			if (itype == INTR_MSI && !powerof2(iaq->nirq))
1251 				goto share;
1252 
1253 			/* One for err, one for fwq, and one for each rxq */
1254 
1255 			iaq->intr_shared = 0;
1256 			iaq->nrxq10g = nrxq10g;
1257 			iaq->nrxq1g = nrxq1g;
1258 
1259 		} else {
1260 share:
1261 			iaq->intr_shared = 1;
1262 
1263 			if (navail >= nc + T4_EXTRA_INTR) {
1264 				if (itype == INTR_MSIX)
1265 					navail = nc + T4_EXTRA_INTR;
1266 
1267 				/* navail is and must remain a pow2 for MSI */
1268 				if (itype == INTR_MSI) {
1269 					KASSERT(powerof2(navail),
1270 					    ("%d not power of 2", navail));
1271 
1272 					while (navail / 2 >= nc + T4_EXTRA_INTR)
1273 						navail /= 2;
1274 				}
1275 			}
1276 			iaq->nirq = navail;	/* total # of interrupts */
1277 
1278 			/*
1279 			 * If we have multiple vectors available reserve one
1280 			 * exclusively for errors.  The rest will be shared by
1281 			 * the fwq and data.
1282 			 */
1283 			if (navail > 1)
1284 				navail--;
1285 			iaq->nrxq10g = min(nrxq10g, navail);
1286 			iaq->nrxq1g = min(nrxq1g, navail);
1287 		}
1288 
1289 		navail = iaq->nirq;
1290 		rc = 0;
1291 		if (itype == INTR_MSIX)
1292 			rc = pci_alloc_msix(sc->dev, &navail);
1293 		else if (itype == INTR_MSI)
1294 			rc = pci_alloc_msi(sc->dev, &navail);
1295 
1296 		if (rc == 0) {
1297 			if (navail == iaq->nirq)
1298 				return (0);
1299 
1300 			/*
1301 			 * Didn't get the number requested.  Use whatever number
1302 			 * the kernel is willing to allocate (it's in navail).
1303 			 */
1304 			pci_release_msi(sc->dev);
1305 			goto share;
1306 		}
1307 
1308 		device_printf(sc->dev,
1309 		    "failed to allocate vectors:%d, type=%d, req=%d, rcvd=%d\n",
1310 		    itype, rc, iaq->nirq, navail);
1311 	}
1312 
1313 	device_printf(sc->dev,
1314 	    "failed to find a usable interrupt type.  "
1315 	    "allowed=%d, msi-x=%d, msi=%d, intx=1", intr_types,
1316 	    pci_msix_count(sc->dev), pci_msi_count(sc->dev));
1317 
1318 	return (ENXIO);
1319 }
1320 
1321 /*
1322  * Install a compatible firmware (if required), establish contact with it,
1323  * become the master, and reset the device.
1324  */
1325 static int
1326 prep_firmware(struct adapter *sc)
1327 {
1328 	const struct firmware *fw;
1329 	int rc;
1330 	enum dev_state state;
1331 
1332 	/* Check firmware version and install a different one if necessary */
1333 	rc = t4_check_fw_version(sc);
1334 	if (rc != 0 || force_firmware_install) {
1335 		uint32_t v = 0;
1336 
1337 		fw = firmware_get(T4_FWNAME);
1338 		if (fw != NULL) {
1339 			const struct fw_hdr *hdr = (const void *)fw->data;
1340 
1341 			v = ntohl(hdr->fw_ver);
1342 
1343 			/*
1344 			 * The firmware module will not be used if it isn't the
1345 			 * same major version as what the driver was compiled
1346 			 * with.  This check trumps force_firmware_install.
1347 			 */
1348 			if (G_FW_HDR_FW_VER_MAJOR(v) != FW_VERSION_MAJOR) {
1349 				device_printf(sc->dev,
1350 				    "Found firmware image but version %d "
1351 				    "can not be used with this driver (%d)\n",
1352 				    G_FW_HDR_FW_VER_MAJOR(v), FW_VERSION_MAJOR);
1353 
1354 				firmware_put(fw, FIRMWARE_UNLOAD);
1355 				fw = NULL;
1356 			}
1357 		}
1358 
1359 		if (fw == NULL && (rc < 0 || force_firmware_install)) {
1360 			device_printf(sc->dev, "No usable firmware. "
1361 			    "card has %d.%d.%d, driver compiled with %d.%d.%d, "
1362 			    "force_firmware_install%s set",
1363 			    G_FW_HDR_FW_VER_MAJOR(sc->params.fw_vers),
1364 			    G_FW_HDR_FW_VER_MINOR(sc->params.fw_vers),
1365 			    G_FW_HDR_FW_VER_MICRO(sc->params.fw_vers),
1366 			    FW_VERSION_MAJOR, FW_VERSION_MINOR,
1367 			    FW_VERSION_MICRO,
1368 			    force_firmware_install ? "" : " not");
1369 			return (EAGAIN);
1370 		}
1371 
1372 		/*
1373 		 * Always upgrade, even for minor/micro/build mismatches.
1374 		 * Downgrade only for a major version mismatch or if
1375 		 * force_firmware_install was specified.
1376 		 */
1377 		if (fw != NULL && (rc < 0 || force_firmware_install ||
1378 		    v > sc->params.fw_vers)) {
1379 			device_printf(sc->dev,
1380 			    "installing firmware %d.%d.%d.%d on card.\n",
1381 			    G_FW_HDR_FW_VER_MAJOR(v), G_FW_HDR_FW_VER_MINOR(v),
1382 			    G_FW_HDR_FW_VER_MICRO(v), G_FW_HDR_FW_VER_BUILD(v));
1383 
1384 			rc = -t4_load_fw(sc, fw->data, fw->datasize);
1385 			if (rc != 0) {
1386 				device_printf(sc->dev,
1387 				    "failed to install firmware: %d\n", rc);
1388 				firmware_put(fw, FIRMWARE_UNLOAD);
1389 				return (rc);
1390 			} else {
1391 				/* refresh */
1392 				(void) t4_check_fw_version(sc);
1393 			}
1394 		}
1395 
1396 		if (fw != NULL)
1397 			firmware_put(fw, FIRMWARE_UNLOAD);
1398 	}
1399 
1400 	/* Contact firmware, request master */
1401 	rc = t4_fw_hello(sc, sc->mbox, sc->mbox, MASTER_MUST, &state);
1402 	if (rc < 0) {
1403 		rc = -rc;
1404 		device_printf(sc->dev,
1405 		    "failed to connect to the firmware: %d.\n", rc);
1406 		return (rc);
1407 	}
1408 
1409 	/* Reset device */
1410 	rc = -t4_fw_reset(sc, sc->mbox, F_PIORSTMODE | F_PIORST);
1411 	if (rc != 0) {
1412 		device_printf(sc->dev, "firmware reset failed: %d.\n", rc);
1413 		if (rc != ETIMEDOUT && rc != EIO)
1414 			t4_fw_bye(sc, sc->mbox);
1415 		return (rc);
1416 	}
1417 
1418 	snprintf(sc->fw_version, sizeof(sc->fw_version), "%u.%u.%u.%u",
1419 	    G_FW_HDR_FW_VER_MAJOR(sc->params.fw_vers),
1420 	    G_FW_HDR_FW_VER_MINOR(sc->params.fw_vers),
1421 	    G_FW_HDR_FW_VER_MICRO(sc->params.fw_vers),
1422 	    G_FW_HDR_FW_VER_BUILD(sc->params.fw_vers));
1423 	sc->flags |= FW_OK;
1424 
1425 	return (0);
1426 }
1427 
1428 static int
1429 get_devlog_params(struct adapter *sc, struct devlog_params *dlog)
1430 {
1431 	struct fw_devlog_cmd devlog_cmd;
1432 	uint32_t meminfo;
1433 	int rc;
1434 
1435 	bzero(&devlog_cmd, sizeof(devlog_cmd));
1436 	devlog_cmd.op_to_write = htobe32(V_FW_CMD_OP(FW_DEVLOG_CMD) |
1437 	    F_FW_CMD_REQUEST | F_FW_CMD_READ);
1438 	devlog_cmd.retval_len16 = htobe32(FW_LEN16(devlog_cmd));
1439 	rc = -t4_wr_mbox(sc, sc->mbox, &devlog_cmd, sizeof(devlog_cmd),
1440 	    &devlog_cmd);
1441 	if (rc != 0) {
1442 		device_printf(sc->dev,
1443 		    "failed to get devlog parameters: %d.\n", rc);
1444 		bzero(dlog, sizeof (*dlog));
1445 		return (rc);
1446 	}
1447 
1448 	meminfo = be32toh(devlog_cmd.memtype_devlog_memaddr16_devlog);
1449 	dlog->memtype = G_FW_DEVLOG_CMD_MEMTYPE_DEVLOG(meminfo);
1450 	dlog->start = G_FW_DEVLOG_CMD_MEMADDR16_DEVLOG(meminfo) << 4;
1451 	dlog->size = be32toh(devlog_cmd.memsize_devlog);
1452 
1453 	return (0);
1454 }
1455 
1456 static int
1457 get_capabilities(struct adapter *sc, struct fw_caps_config_cmd *caps)
1458 {
1459 	int rc;
1460 
1461 	bzero(caps, sizeof(*caps));
1462 	caps->op_to_write = htobe32(V_FW_CMD_OP(FW_CAPS_CONFIG_CMD) |
1463 	    F_FW_CMD_REQUEST | F_FW_CMD_READ);
1464 	caps->retval_len16 = htobe32(FW_LEN16(*caps));
1465 
1466 	rc = -t4_wr_mbox(sc, sc->mbox, caps, sizeof(*caps), caps);
1467 	if (rc != 0)
1468 		return (rc);
1469 
1470 	if (caps->niccaps & htobe16(FW_CAPS_CONFIG_NIC_VM))
1471 		caps->niccaps ^= htobe16(FW_CAPS_CONFIG_NIC_VM);
1472 
1473 	caps->op_to_write = htobe32(V_FW_CMD_OP(FW_CAPS_CONFIG_CMD) |
1474 	    F_FW_CMD_REQUEST | F_FW_CMD_WRITE);
1475 	rc = -t4_wr_mbox(sc, sc->mbox, caps, sizeof(*caps), NULL);
1476 
1477 	return (rc);
1478 }
1479 
1480 static int
1481 get_params(struct adapter *sc, struct fw_caps_config_cmd *caps)
1482 {
1483 	int rc;
1484 	uint32_t params[7], val[7];
1485 
1486 #define FW_PARAM_DEV(param) \
1487 	(V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) | \
1488 	 V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_##param))
1489 #define FW_PARAM_PFVF(param) \
1490 	(V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_PFVF) | \
1491 	 V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_PFVF_##param))
1492 
1493 	params[0] = FW_PARAM_DEV(PORTVEC);
1494 	params[1] = FW_PARAM_PFVF(IQFLINT_START);
1495 	params[2] = FW_PARAM_PFVF(EQ_START);
1496 	params[3] = FW_PARAM_PFVF(FILTER_START);
1497 	params[4] = FW_PARAM_PFVF(FILTER_END);
1498 	rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 5, params, val);
1499 	if (rc != 0) {
1500 		device_printf(sc->dev,
1501 		    "failed to query parameters: %d.\n", rc);
1502 		goto done;
1503 	}
1504 
1505 	sc->params.portvec = val[0];
1506 	sc->params.nports = 0;
1507 	while (val[0]) {
1508 		sc->params.nports++;
1509 		val[0] &= val[0] - 1;
1510 	}
1511 
1512 	sc->sge.iq_start = val[1];
1513 	sc->sge.eq_start = val[2];
1514 	sc->tids.ftid_base = val[3];
1515 	sc->tids.nftids = val[4] - val[3] + 1;
1516 
1517 	if (caps->toecaps) {
1518 		/* query offload-related parameters */
1519 		params[0] = FW_PARAM_DEV(NTID);
1520 		params[1] = FW_PARAM_PFVF(SERVER_START);
1521 		params[2] = FW_PARAM_PFVF(SERVER_END);
1522 		params[3] = FW_PARAM_PFVF(TDDP_START);
1523 		params[4] = FW_PARAM_PFVF(TDDP_END);
1524 		params[5] = FW_PARAM_DEV(FLOWC_BUFFIFO_SZ);
1525 		rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 6, params, val);
1526 		if (rc != 0) {
1527 			device_printf(sc->dev,
1528 			    "failed to query TOE parameters: %d.\n", rc);
1529 			goto done;
1530 		}
1531 		sc->tids.ntids = val[0];
1532 		sc->tids.natids = min(sc->tids.ntids / 2, MAX_ATIDS);
1533 		sc->tids.stid_base = val[1];
1534 		sc->tids.nstids = val[2] - val[1] + 1;
1535 		sc->vres.ddp.start = val[3];
1536 		sc->vres.ddp.size = val[4] - val[3] + 1;
1537 		sc->params.ofldq_wr_cred = val[5];
1538 		sc->params.offload = 1;
1539 	}
1540 	if (caps->rdmacaps) {
1541 		params[0] = FW_PARAM_PFVF(STAG_START);
1542 		params[1] = FW_PARAM_PFVF(STAG_END);
1543 		params[2] = FW_PARAM_PFVF(RQ_START);
1544 		params[3] = FW_PARAM_PFVF(RQ_END);
1545 		params[4] = FW_PARAM_PFVF(PBL_START);
1546 		params[5] = FW_PARAM_PFVF(PBL_END);
1547 		rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 6, params, val);
1548 		if (rc != 0) {
1549 			device_printf(sc->dev,
1550 			    "failed to query RDMA parameters: %d.\n", rc);
1551 			goto done;
1552 		}
1553 		sc->vres.stag.start = val[0];
1554 		sc->vres.stag.size = val[1] - val[0] + 1;
1555 		sc->vres.rq.start = val[2];
1556 		sc->vres.rq.size = val[3] - val[2] + 1;
1557 		sc->vres.pbl.start = val[4];
1558 		sc->vres.pbl.size = val[5] - val[4] + 1;
1559 	}
1560 	if (caps->iscsicaps) {
1561 		params[0] = FW_PARAM_PFVF(ISCSI_START);
1562 		params[1] = FW_PARAM_PFVF(ISCSI_END);
1563 		rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 2, params, val);
1564 		if (rc != 0) {
1565 			device_printf(sc->dev,
1566 			    "failed to query iSCSI parameters: %d.\n", rc);
1567 			goto done;
1568 		}
1569 		sc->vres.iscsi.start = val[0];
1570 		sc->vres.iscsi.size = val[1] - val[0] + 1;
1571 	}
1572 #undef FW_PARAM_PFVF
1573 #undef FW_PARAM_DEV
1574 
1575 done:
1576 	return (rc);
1577 }
1578 
1579 static void
1580 t4_set_desc(struct adapter *sc)
1581 {
1582 	char buf[128];
1583 	struct adapter_params *p = &sc->params;
1584 
1585 	snprintf(buf, sizeof(buf),
1586 	    "Chelsio %s (rev %d) %d port %sNIC PCIe-x%d %d %s, S/N:%s, E/C:%s",
1587 	    p->vpd.id, p->rev, p->nports, is_offload(sc) ? "R" : "",
1588 	    p->pci.width, sc->intr_count, sc->intr_type == INTR_MSIX ? "MSI-X" :
1589 	    (sc->intr_type == INTR_MSI ? "MSI" : "INTx"), p->vpd.sn, p->vpd.ec);
1590 
1591 	device_set_desc_copy(sc->dev, buf);
1592 }
1593 
1594 static void
1595 build_medialist(struct port_info *pi)
1596 {
1597 	struct ifmedia *media = &pi->media;
1598 	int data, m;
1599 
1600 	PORT_LOCK(pi);
1601 
1602 	ifmedia_removeall(media);
1603 
1604 	m = IFM_ETHER | IFM_FDX;
1605 	data = (pi->port_type << 8) | pi->mod_type;
1606 
1607 	switch(pi->port_type) {
1608 	case FW_PORT_TYPE_BT_XFI:
1609 		ifmedia_add(media, m | IFM_10G_T, data, NULL);
1610 		break;
1611 
1612 	case FW_PORT_TYPE_BT_XAUI:
1613 		ifmedia_add(media, m | IFM_10G_T, data, NULL);
1614 		/* fall through */
1615 
1616 	case FW_PORT_TYPE_BT_SGMII:
1617 		ifmedia_add(media, m | IFM_1000_T, data, NULL);
1618 		ifmedia_add(media, m | IFM_100_TX, data, NULL);
1619 		ifmedia_add(media, IFM_ETHER | IFM_AUTO, data, NULL);
1620 		ifmedia_set(media, IFM_ETHER | IFM_AUTO);
1621 		break;
1622 
1623 	case FW_PORT_TYPE_CX4:
1624 		ifmedia_add(media, m | IFM_10G_CX4, data, NULL);
1625 		ifmedia_set(media, m | IFM_10G_CX4);
1626 		break;
1627 
1628 	case FW_PORT_TYPE_SFP:
1629 	case FW_PORT_TYPE_FIBER_XFI:
1630 	case FW_PORT_TYPE_FIBER_XAUI:
1631 		switch (pi->mod_type) {
1632 
1633 		case FW_PORT_MOD_TYPE_LR:
1634 			ifmedia_add(media, m | IFM_10G_LR, data, NULL);
1635 			ifmedia_set(media, m | IFM_10G_LR);
1636 			break;
1637 
1638 		case FW_PORT_MOD_TYPE_SR:
1639 			ifmedia_add(media, m | IFM_10G_SR, data, NULL);
1640 			ifmedia_set(media, m | IFM_10G_SR);
1641 			break;
1642 
1643 		case FW_PORT_MOD_TYPE_LRM:
1644 			ifmedia_add(media, m | IFM_10G_LRM, data, NULL);
1645 			ifmedia_set(media, m | IFM_10G_LRM);
1646 			break;
1647 
1648 		case FW_PORT_MOD_TYPE_TWINAX_PASSIVE:
1649 		case FW_PORT_MOD_TYPE_TWINAX_ACTIVE:
1650 			ifmedia_add(media, m | IFM_10G_TWINAX, data, NULL);
1651 			ifmedia_set(media, m | IFM_10G_TWINAX);
1652 			break;
1653 
1654 		case FW_PORT_MOD_TYPE_NONE:
1655 			m &= ~IFM_FDX;
1656 			ifmedia_add(media, m | IFM_NONE, data, NULL);
1657 			ifmedia_set(media, m | IFM_NONE);
1658 			break;
1659 
1660 		case FW_PORT_MOD_TYPE_NA:
1661 		case FW_PORT_MOD_TYPE_ER:
1662 		default:
1663 			ifmedia_add(media, m | IFM_UNKNOWN, data, NULL);
1664 			ifmedia_set(media, m | IFM_UNKNOWN);
1665 			break;
1666 		}
1667 		break;
1668 
1669 	case FW_PORT_TYPE_KX4:
1670 	case FW_PORT_TYPE_KX:
1671 	case FW_PORT_TYPE_KR:
1672 	default:
1673 		ifmedia_add(media, m | IFM_UNKNOWN, data, NULL);
1674 		ifmedia_set(media, m | IFM_UNKNOWN);
1675 		break;
1676 	}
1677 
1678 	PORT_UNLOCK(pi);
1679 }
1680 
1681 /*
1682  * Program the port's XGMAC based on parameters in ifnet.  The caller also
1683  * indicates which parameters should be programmed (the rest are left alone).
1684  */
1685 static int
1686 update_mac_settings(struct port_info *pi, int flags)
1687 {
1688 	int rc;
1689 	struct ifnet *ifp = pi->ifp;
1690 	struct adapter *sc = pi->adapter;
1691 	int mtu = -1, promisc = -1, allmulti = -1, vlanex = -1;
1692 
1693 	PORT_LOCK_ASSERT_OWNED(pi);
1694 	KASSERT(flags, ("%s: not told what to update.", __func__));
1695 
1696 	if (flags & XGMAC_MTU)
1697 		mtu = ifp->if_mtu;
1698 
1699 	if (flags & XGMAC_PROMISC)
1700 		promisc = ifp->if_flags & IFF_PROMISC ? 1 : 0;
1701 
1702 	if (flags & XGMAC_ALLMULTI)
1703 		allmulti = ifp->if_flags & IFF_ALLMULTI ? 1 : 0;
1704 
1705 	if (flags & XGMAC_VLANEX)
1706 		vlanex = ifp->if_capenable & IFCAP_VLAN_HWTAGGING ? 1 : 0;
1707 
1708 	rc = -t4_set_rxmode(sc, sc->mbox, pi->viid, mtu, promisc, allmulti, 1,
1709 	    vlanex, false);
1710 	if (rc) {
1711 		if_printf(ifp, "set_rxmode (%x) failed: %d\n", flags, rc);
1712 		return (rc);
1713 	}
1714 
1715 	if (flags & XGMAC_UCADDR) {
1716 		uint8_t ucaddr[ETHER_ADDR_LEN];
1717 
1718 		bcopy(IF_LLADDR(ifp), ucaddr, sizeof(ucaddr));
1719 		rc = t4_change_mac(sc, sc->mbox, pi->viid, pi->xact_addr_filt,
1720 		    ucaddr, true, true);
1721 		if (rc < 0) {
1722 			rc = -rc;
1723 			if_printf(ifp, "change_mac failed: %d\n", rc);
1724 			return (rc);
1725 		} else {
1726 			pi->xact_addr_filt = rc;
1727 			rc = 0;
1728 		}
1729 	}
1730 
1731 	if (flags & XGMAC_MCADDRS) {
1732 		const uint8_t *mcaddr;
1733 		int del = 1;
1734 		uint64_t hash = 0;
1735 		struct ifmultiaddr *ifma;
1736 
1737 		if_maddr_rlock(ifp);
1738 		TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
1739 			if (ifma->ifma_addr->sa_family != AF_LINK)
1740 				continue;
1741 			mcaddr = LLADDR((struct sockaddr_dl *)ifma->ifma_addr);
1742 
1743 			rc = t4_alloc_mac_filt(sc, sc->mbox, pi->viid, del, 1,
1744 			    &mcaddr, NULL, &hash, 0);
1745 			if (rc < 0) {
1746 				rc = -rc;
1747 				if_printf(ifp, "failed to add mc address"
1748 				    " %02x:%02x:%02x:%02x:%02x:%02x rc=%d\n",
1749 				    mcaddr[0], mcaddr[1], mcaddr[2], mcaddr[3],
1750 				    mcaddr[4], mcaddr[5], rc);
1751 				goto mcfail;
1752 			}
1753 			del = 0;
1754 		}
1755 
1756 		rc = -t4_set_addr_hash(sc, sc->mbox, pi->viid, 0, hash, 0);
1757 		if (rc != 0)
1758 			if_printf(ifp, "failed to set mc address hash: %d", rc);
1759 mcfail:
1760 		if_maddr_runlock(ifp);
1761 	}
1762 
1763 	return (rc);
1764 }
1765 
1766 static int
1767 cxgbe_init_locked(struct port_info *pi)
1768 {
1769 	struct adapter *sc = pi->adapter;
1770 	int rc = 0;
1771 
1772 	ADAPTER_LOCK_ASSERT_OWNED(sc);
1773 
1774 	while (!IS_DOOMED(pi) && IS_BUSY(sc)) {
1775 		if (mtx_sleep(&sc->flags, &sc->sc_lock, PCATCH, "t4init", 0)) {
1776 			rc = EINTR;
1777 			goto done;
1778 		}
1779 	}
1780 	if (IS_DOOMED(pi)) {
1781 		rc = ENXIO;
1782 		goto done;
1783 	}
1784 	KASSERT(!IS_BUSY(sc), ("%s: controller busy.", __func__));
1785 
1786 	/* Give up the adapter lock, port init code can sleep. */
1787 	SET_BUSY(sc);
1788 	ADAPTER_UNLOCK(sc);
1789 
1790 	rc = cxgbe_init_synchronized(pi);
1791 
1792 done:
1793 	ADAPTER_LOCK(sc);
1794 	KASSERT(IS_BUSY(sc), ("%s: controller not busy.", __func__));
1795 	CLR_BUSY(sc);
1796 	wakeup_one(&sc->flags);
1797 	ADAPTER_UNLOCK(sc);
1798 	return (rc);
1799 }
1800 
1801 static int
1802 cxgbe_init_synchronized(struct port_info *pi)
1803 {
1804 	struct adapter *sc = pi->adapter;
1805 	struct ifnet *ifp = pi->ifp;
1806 	int rc = 0, i;
1807 	uint16_t *rss;
1808 	struct sge_rxq *rxq;
1809 
1810 	ADAPTER_LOCK_ASSERT_NOTOWNED(sc);
1811 
1812 	if (isset(&sc->open_device_map, pi->port_id)) {
1813 		KASSERT(ifp->if_drv_flags & IFF_DRV_RUNNING,
1814 		    ("mismatch between open_device_map and if_drv_flags"));
1815 		return (0);	/* already running */
1816 	}
1817 
1818 	if (sc->open_device_map == 0 && ((rc = first_port_up(sc)) != 0))
1819 		return (rc);	/* error message displayed already */
1820 
1821 	/*
1822 	 * Allocate tx/rx/fl queues for this port.
1823 	 */
1824 	rc = t4_setup_eth_queues(pi);
1825 	if (rc != 0)
1826 		goto done;	/* error message displayed already */
1827 
1828 	/*
1829 	 * Setup RSS for this port.
1830 	 */
1831 	rss = malloc(pi->nrxq * sizeof (*rss), M_CXGBE, M_ZERO | M_WAITOK);
1832 	for_each_rxq(pi, i, rxq) {
1833 		rss[i] = rxq->iq.abs_id;
1834 	}
1835 	rc = -t4_config_rss_range(sc, sc->mbox, pi->viid, 0, pi->rss_size, rss,
1836 	    pi->nrxq);
1837 	free(rss, M_CXGBE);
1838 	if (rc != 0) {
1839 		if_printf(ifp, "rss_config failed: %d\n", rc);
1840 		goto done;
1841 	}
1842 
1843 	PORT_LOCK(pi);
1844 	rc = update_mac_settings(pi, XGMAC_ALL);
1845 	PORT_UNLOCK(pi);
1846 	if (rc)
1847 		goto done;	/* error message displayed already */
1848 
1849 	rc = -t4_link_start(sc, sc->mbox, pi->tx_chan, &pi->link_cfg);
1850 	if (rc != 0) {
1851 		if_printf(ifp, "start_link failed: %d\n", rc);
1852 		goto done;
1853 	}
1854 
1855 	rc = -t4_enable_vi(sc, sc->mbox, pi->viid, true, true);
1856 	if (rc != 0) {
1857 		if_printf(ifp, "enable_vi failed: %d\n", rc);
1858 		goto done;
1859 	}
1860 	pi->flags |= VI_ENABLED;
1861 
1862 	/* all ok */
1863 	setbit(&sc->open_device_map, pi->port_id);
1864 	ifp->if_drv_flags |= IFF_DRV_RUNNING;
1865 	ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
1866 
1867 	callout_reset(&pi->tick, hz, cxgbe_tick, pi);
1868 done:
1869 	if (rc != 0)
1870 		cxgbe_uninit_synchronized(pi);
1871 
1872 	return (rc);
1873 }
1874 
1875 static int
1876 cxgbe_uninit_locked(struct port_info *pi)
1877 {
1878 	struct adapter *sc = pi->adapter;
1879 	int rc;
1880 
1881 	ADAPTER_LOCK_ASSERT_OWNED(sc);
1882 
1883 	while (!IS_DOOMED(pi) && IS_BUSY(sc)) {
1884 		if (mtx_sleep(&sc->flags, &sc->sc_lock, PCATCH, "t4uninit", 0)) {
1885 			rc = EINTR;
1886 			goto done;
1887 		}
1888 	}
1889 	if (IS_DOOMED(pi)) {
1890 		rc = ENXIO;
1891 		goto done;
1892 	}
1893 	KASSERT(!IS_BUSY(sc), ("%s: controller busy.", __func__));
1894 	SET_BUSY(sc);
1895 	ADAPTER_UNLOCK(sc);
1896 
1897 	rc = cxgbe_uninit_synchronized(pi);
1898 
1899 	ADAPTER_LOCK(sc);
1900 	KASSERT(IS_BUSY(sc), ("%s: controller not busy.", __func__));
1901 	CLR_BUSY(sc);
1902 	wakeup_one(&sc->flags);
1903 done:
1904 	ADAPTER_UNLOCK(sc);
1905 	return (rc);
1906 }
1907 
1908 /*
1909  * Idempotent.
1910  */
1911 static int
1912 cxgbe_uninit_synchronized(struct port_info *pi)
1913 {
1914 	struct adapter *sc = pi->adapter;
1915 	struct ifnet *ifp = pi->ifp;
1916 	int rc;
1917 
1918 	/*
1919 	 * taskqueue_drain may cause a deadlock if the adapter lock is held.
1920 	 */
1921 	ADAPTER_LOCK_ASSERT_NOTOWNED(sc);
1922 
1923 	/*
1924 	 * Clear this port's bit from the open device map, and then drain
1925 	 * tasks and callouts.
1926 	 */
1927 	clrbit(&sc->open_device_map, pi->port_id);
1928 
1929 	PORT_LOCK(pi);
1930 	ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
1931 	callout_stop(&pi->tick);
1932 	PORT_UNLOCK(pi);
1933 	callout_drain(&pi->tick);
1934 
1935 	/*
1936 	 * Stop and then free the queues' resources, including the queues
1937 	 * themselves.
1938 	 *
1939 	 * XXX: we could just stop the queues here (on ifconfig down) and free
1940 	 * them later (on port detach), but having up/down go through the entire
1941 	 * allocate/activate/deactivate/free sequence is a good way to find
1942 	 * leaks and bugs.
1943 	 */
1944 	rc = t4_teardown_eth_queues(pi);
1945 	if (rc != 0)
1946 		if_printf(ifp, "teardown failed: %d\n", rc);
1947 
1948 	if (pi->flags & VI_ENABLED) {
1949 		rc = -t4_enable_vi(sc, sc->mbox, pi->viid, false, false);
1950 		if (rc)
1951 			if_printf(ifp, "disable_vi failed: %d\n", rc);
1952 		else
1953 			pi->flags &= ~VI_ENABLED;
1954 	}
1955 
1956 	pi->link_cfg.link_ok = 0;
1957 	pi->link_cfg.speed = 0;
1958 	t4_os_link_changed(sc, pi->port_id, 0);
1959 
1960 	if (sc->open_device_map == 0)
1961 		last_port_down(sc);
1962 
1963 	return (0);
1964 }
1965 
1966 #define T4_ALLOC_IRQ(sc, irq, rid, handler, arg, name) do { \
1967 	rc = t4_alloc_irq(sc, irq, rid, handler, arg, name); \
1968 	if (rc != 0) \
1969 		goto done; \
1970 } while (0)
1971 static int
1972 first_port_up(struct adapter *sc)
1973 {
1974 	int rc, i, rid, p, q;
1975 	char s[8];
1976 	struct irq *irq;
1977 	struct sge_iq *intrq;
1978 
1979 	ADAPTER_LOCK_ASSERT_NOTOWNED(sc);
1980 
1981 	/*
1982 	 * queues that belong to the adapter (not any particular port).
1983 	 */
1984 	rc = t4_setup_adapter_queues(sc);
1985 	if (rc != 0)
1986 		goto done;
1987 
1988 	/*
1989 	 * Setup interrupts.
1990 	 */
1991 	irq = &sc->irq[0];
1992 	rid = sc->intr_type == INTR_INTX ? 0 : 1;
1993 	if (sc->intr_count == 1) {
1994 		KASSERT(sc->flags & INTR_SHARED,
1995 		    ("%s: single interrupt but not shared?", __func__));
1996 
1997 		T4_ALLOC_IRQ(sc, irq, rid, t4_intr_all, sc, "all");
1998 	} else {
1999 		/* Multiple interrupts.  The first one is always error intr */
2000 		T4_ALLOC_IRQ(sc, irq, rid, t4_intr_err, sc, "err");
2001 		irq++;
2002 		rid++;
2003 
2004 		/* Firmware event queue normally has an interrupt of its own */
2005 		if (sc->intr_count > T4_EXTRA_INTR) {
2006 			T4_ALLOC_IRQ(sc, irq, rid, t4_intr_evt, &sc->sge.fwq,
2007 			    "evt");
2008 			irq++;
2009 			rid++;
2010 		}
2011 
2012 		intrq = &sc->sge.intrq[0];
2013 		if (sc->flags & INTR_SHARED) {
2014 
2015 			/* All ports share these interrupt queues */
2016 
2017 			for (i = 0; i < NINTRQ(sc); i++) {
2018 				snprintf(s, sizeof(s), "*.%d", i);
2019 				T4_ALLOC_IRQ(sc, irq, rid, t4_intr, intrq, s);
2020 				irq++;
2021 				rid++;
2022 				intrq++;
2023 			}
2024 		} else {
2025 
2026 			/* Each port has its own set of interrupt queues */
2027 
2028 			for (p = 0; p < sc->params.nports; p++) {
2029 				for (q = 0; q < sc->port[p]->nrxq; q++) {
2030 					snprintf(s, sizeof(s), "%d.%d", p, q);
2031 					T4_ALLOC_IRQ(sc, irq, rid, t4_intr,
2032 					    intrq, s);
2033 					irq++;
2034 					rid++;
2035 					intrq++;
2036 				}
2037 			}
2038 		}
2039 	}
2040 
2041 	t4_intr_enable(sc);
2042 	sc->flags |= FULL_INIT_DONE;
2043 
2044 done:
2045 	if (rc != 0)
2046 		last_port_down(sc);
2047 
2048 	return (rc);
2049 }
2050 #undef T4_ALLOC_IRQ
2051 
2052 /*
2053  * Idempotent.
2054  */
2055 static int
2056 last_port_down(struct adapter *sc)
2057 {
2058 	int i;
2059 
2060 	ADAPTER_LOCK_ASSERT_NOTOWNED(sc);
2061 
2062 	t4_intr_disable(sc);
2063 
2064 	t4_teardown_adapter_queues(sc);
2065 
2066 	for (i = 0; i < sc->intr_count; i++)
2067 		t4_free_irq(sc, &sc->irq[i]);
2068 
2069 	sc->flags &= ~FULL_INIT_DONE;
2070 
2071 	return (0);
2072 }
2073 
2074 static int
2075 t4_alloc_irq(struct adapter *sc, struct irq *irq, int rid,
2076     iq_intr_handler_t *handler, void *arg, char *name)
2077 {
2078 	int rc;
2079 
2080 	irq->rid = rid;
2081 	irq->res = bus_alloc_resource_any(sc->dev, SYS_RES_IRQ, &irq->rid,
2082 	    RF_SHAREABLE | RF_ACTIVE);
2083 	if (irq->res == NULL) {
2084 		device_printf(sc->dev,
2085 		    "failed to allocate IRQ for rid %d, name %s.\n", rid, name);
2086 		return (ENOMEM);
2087 	}
2088 
2089 	rc = bus_setup_intr(sc->dev, irq->res, INTR_MPSAFE | INTR_TYPE_NET,
2090 	    NULL, handler, arg, &irq->tag);
2091 	if (rc != 0) {
2092 		device_printf(sc->dev,
2093 		    "failed to setup interrupt for rid %d, name %s: %d\n",
2094 		    rid, name, rc);
2095 	} else if (name)
2096 		bus_describe_intr(sc->dev, irq->res, irq->tag, name);
2097 
2098 	return (rc);
2099 }
2100 
2101 static int
2102 t4_free_irq(struct adapter *sc, struct irq *irq)
2103 {
2104 	if (irq->tag)
2105 		bus_teardown_intr(sc->dev, irq->res, irq->tag);
2106 	if (irq->res)
2107 		bus_release_resource(sc->dev, SYS_RES_IRQ, irq->rid, irq->res);
2108 
2109 	bzero(irq, sizeof(*irq));
2110 
2111 	return (0);
2112 }
2113 
2114 static void
2115 reg_block_dump(struct adapter *sc, uint8_t *buf, unsigned int start,
2116     unsigned int end)
2117 {
2118 	uint32_t *p = (uint32_t *)(buf + start);
2119 
2120 	for ( ; start <= end; start += sizeof(uint32_t))
2121 		*p++ = t4_read_reg(sc, start);
2122 }
2123 
2124 static void
2125 t4_get_regs(struct adapter *sc, struct t4_regdump *regs, uint8_t *buf)
2126 {
2127 	int i;
2128 	static const unsigned int reg_ranges[] = {
2129 		0x1008, 0x1108,
2130 		0x1180, 0x11b4,
2131 		0x11fc, 0x123c,
2132 		0x1300, 0x173c,
2133 		0x1800, 0x18fc,
2134 		0x3000, 0x30d8,
2135 		0x30e0, 0x5924,
2136 		0x5960, 0x59d4,
2137 		0x5a00, 0x5af8,
2138 		0x6000, 0x6098,
2139 		0x6100, 0x6150,
2140 		0x6200, 0x6208,
2141 		0x6240, 0x6248,
2142 		0x6280, 0x6338,
2143 		0x6370, 0x638c,
2144 		0x6400, 0x643c,
2145 		0x6500, 0x6524,
2146 		0x6a00, 0x6a38,
2147 		0x6a60, 0x6a78,
2148 		0x6b00, 0x6b84,
2149 		0x6bf0, 0x6c84,
2150 		0x6cf0, 0x6d84,
2151 		0x6df0, 0x6e84,
2152 		0x6ef0, 0x6f84,
2153 		0x6ff0, 0x7084,
2154 		0x70f0, 0x7184,
2155 		0x71f0, 0x7284,
2156 		0x72f0, 0x7384,
2157 		0x73f0, 0x7450,
2158 		0x7500, 0x7530,
2159 		0x7600, 0x761c,
2160 		0x7680, 0x76cc,
2161 		0x7700, 0x7798,
2162 		0x77c0, 0x77fc,
2163 		0x7900, 0x79fc,
2164 		0x7b00, 0x7c38,
2165 		0x7d00, 0x7efc,
2166 		0x8dc0, 0x8e1c,
2167 		0x8e30, 0x8e78,
2168 		0x8ea0, 0x8f6c,
2169 		0x8fc0, 0x9074,
2170 		0x90fc, 0x90fc,
2171 		0x9400, 0x9458,
2172 		0x9600, 0x96bc,
2173 		0x9800, 0x9808,
2174 		0x9820, 0x983c,
2175 		0x9850, 0x9864,
2176 		0x9c00, 0x9c6c,
2177 		0x9c80, 0x9cec,
2178 		0x9d00, 0x9d6c,
2179 		0x9d80, 0x9dec,
2180 		0x9e00, 0x9e6c,
2181 		0x9e80, 0x9eec,
2182 		0x9f00, 0x9f6c,
2183 		0x9f80, 0x9fec,
2184 		0xd004, 0xd03c,
2185 		0xdfc0, 0xdfe0,
2186 		0xe000, 0xea7c,
2187 		0xf000, 0x11190,
2188 		0x19040, 0x19124,
2189 		0x19150, 0x191b0,
2190 		0x191d0, 0x191e8,
2191 		0x19238, 0x1924c,
2192 		0x193f8, 0x19474,
2193 		0x19490, 0x194f8,
2194 		0x19800, 0x19f30,
2195 		0x1a000, 0x1a06c,
2196 		0x1a0b0, 0x1a120,
2197 		0x1a128, 0x1a138,
2198 		0x1a190, 0x1a1c4,
2199 		0x1a1fc, 0x1a1fc,
2200 		0x1e040, 0x1e04c,
2201 		0x1e240, 0x1e28c,
2202 		0x1e2c0, 0x1e2c0,
2203 		0x1e2e0, 0x1e2e0,
2204 		0x1e300, 0x1e384,
2205 		0x1e3c0, 0x1e3c8,
2206 		0x1e440, 0x1e44c,
2207 		0x1e640, 0x1e68c,
2208 		0x1e6c0, 0x1e6c0,
2209 		0x1e6e0, 0x1e6e0,
2210 		0x1e700, 0x1e784,
2211 		0x1e7c0, 0x1e7c8,
2212 		0x1e840, 0x1e84c,
2213 		0x1ea40, 0x1ea8c,
2214 		0x1eac0, 0x1eac0,
2215 		0x1eae0, 0x1eae0,
2216 		0x1eb00, 0x1eb84,
2217 		0x1ebc0, 0x1ebc8,
2218 		0x1ec40, 0x1ec4c,
2219 		0x1ee40, 0x1ee8c,
2220 		0x1eec0, 0x1eec0,
2221 		0x1eee0, 0x1eee0,
2222 		0x1ef00, 0x1ef84,
2223 		0x1efc0, 0x1efc8,
2224 		0x1f040, 0x1f04c,
2225 		0x1f240, 0x1f28c,
2226 		0x1f2c0, 0x1f2c0,
2227 		0x1f2e0, 0x1f2e0,
2228 		0x1f300, 0x1f384,
2229 		0x1f3c0, 0x1f3c8,
2230 		0x1f440, 0x1f44c,
2231 		0x1f640, 0x1f68c,
2232 		0x1f6c0, 0x1f6c0,
2233 		0x1f6e0, 0x1f6e0,
2234 		0x1f700, 0x1f784,
2235 		0x1f7c0, 0x1f7c8,
2236 		0x1f840, 0x1f84c,
2237 		0x1fa40, 0x1fa8c,
2238 		0x1fac0, 0x1fac0,
2239 		0x1fae0, 0x1fae0,
2240 		0x1fb00, 0x1fb84,
2241 		0x1fbc0, 0x1fbc8,
2242 		0x1fc40, 0x1fc4c,
2243 		0x1fe40, 0x1fe8c,
2244 		0x1fec0, 0x1fec0,
2245 		0x1fee0, 0x1fee0,
2246 		0x1ff00, 0x1ff84,
2247 		0x1ffc0, 0x1ffc8,
2248 		0x20000, 0x2002c,
2249 		0x20100, 0x2013c,
2250 		0x20190, 0x201c8,
2251 		0x20200, 0x20318,
2252 		0x20400, 0x20528,
2253 		0x20540, 0x20614,
2254 		0x21000, 0x21040,
2255 		0x2104c, 0x21060,
2256 		0x210c0, 0x210ec,
2257 		0x21200, 0x21268,
2258 		0x21270, 0x21284,
2259 		0x212fc, 0x21388,
2260 		0x21400, 0x21404,
2261 		0x21500, 0x21518,
2262 		0x2152c, 0x2153c,
2263 		0x21550, 0x21554,
2264 		0x21600, 0x21600,
2265 		0x21608, 0x21628,
2266 		0x21630, 0x2163c,
2267 		0x21700, 0x2171c,
2268 		0x21780, 0x2178c,
2269 		0x21800, 0x21c38,
2270 		0x21c80, 0x21d7c,
2271 		0x21e00, 0x21e04,
2272 		0x22000, 0x2202c,
2273 		0x22100, 0x2213c,
2274 		0x22190, 0x221c8,
2275 		0x22200, 0x22318,
2276 		0x22400, 0x22528,
2277 		0x22540, 0x22614,
2278 		0x23000, 0x23040,
2279 		0x2304c, 0x23060,
2280 		0x230c0, 0x230ec,
2281 		0x23200, 0x23268,
2282 		0x23270, 0x23284,
2283 		0x232fc, 0x23388,
2284 		0x23400, 0x23404,
2285 		0x23500, 0x23518,
2286 		0x2352c, 0x2353c,
2287 		0x23550, 0x23554,
2288 		0x23600, 0x23600,
2289 		0x23608, 0x23628,
2290 		0x23630, 0x2363c,
2291 		0x23700, 0x2371c,
2292 		0x23780, 0x2378c,
2293 		0x23800, 0x23c38,
2294 		0x23c80, 0x23d7c,
2295 		0x23e00, 0x23e04,
2296 		0x24000, 0x2402c,
2297 		0x24100, 0x2413c,
2298 		0x24190, 0x241c8,
2299 		0x24200, 0x24318,
2300 		0x24400, 0x24528,
2301 		0x24540, 0x24614,
2302 		0x25000, 0x25040,
2303 		0x2504c, 0x25060,
2304 		0x250c0, 0x250ec,
2305 		0x25200, 0x25268,
2306 		0x25270, 0x25284,
2307 		0x252fc, 0x25388,
2308 		0x25400, 0x25404,
2309 		0x25500, 0x25518,
2310 		0x2552c, 0x2553c,
2311 		0x25550, 0x25554,
2312 		0x25600, 0x25600,
2313 		0x25608, 0x25628,
2314 		0x25630, 0x2563c,
2315 		0x25700, 0x2571c,
2316 		0x25780, 0x2578c,
2317 		0x25800, 0x25c38,
2318 		0x25c80, 0x25d7c,
2319 		0x25e00, 0x25e04,
2320 		0x26000, 0x2602c,
2321 		0x26100, 0x2613c,
2322 		0x26190, 0x261c8,
2323 		0x26200, 0x26318,
2324 		0x26400, 0x26528,
2325 		0x26540, 0x26614,
2326 		0x27000, 0x27040,
2327 		0x2704c, 0x27060,
2328 		0x270c0, 0x270ec,
2329 		0x27200, 0x27268,
2330 		0x27270, 0x27284,
2331 		0x272fc, 0x27388,
2332 		0x27400, 0x27404,
2333 		0x27500, 0x27518,
2334 		0x2752c, 0x2753c,
2335 		0x27550, 0x27554,
2336 		0x27600, 0x27600,
2337 		0x27608, 0x27628,
2338 		0x27630, 0x2763c,
2339 		0x27700, 0x2771c,
2340 		0x27780, 0x2778c,
2341 		0x27800, 0x27c38,
2342 		0x27c80, 0x27d7c,
2343 		0x27e00, 0x27e04
2344 	};
2345 
2346 	regs->version = 4 | (sc->params.rev << 10);
2347 	for (i = 0; i < ARRAY_SIZE(reg_ranges); i += 2)
2348 		reg_block_dump(sc, buf, reg_ranges[i], reg_ranges[i + 1]);
2349 }
2350 
2351 static void
2352 cxgbe_tick(void *arg)
2353 {
2354 	struct port_info *pi = arg;
2355 	struct ifnet *ifp = pi->ifp;
2356 	struct sge_txq *txq;
2357 	int i, drops;
2358 	struct port_stats *s = &pi->stats;
2359 
2360 	PORT_LOCK(pi);
2361 	if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) {
2362 		PORT_UNLOCK(pi);
2363 		return;	/* without scheduling another callout */
2364 	}
2365 
2366 	t4_get_port_stats(pi->adapter, pi->tx_chan, s);
2367 
2368 	ifp->if_opackets = s->tx_frames;
2369 	ifp->if_ipackets = s->rx_frames;
2370 	ifp->if_obytes = s->tx_octets;
2371 	ifp->if_ibytes = s->rx_octets;
2372 	ifp->if_omcasts = s->tx_mcast_frames;
2373 	ifp->if_imcasts = s->rx_mcast_frames;
2374 	ifp->if_iqdrops = s->rx_ovflow0 + s->rx_ovflow1 + s->rx_ovflow2 +
2375 	    s->rx_ovflow3;
2376 
2377 	drops = s->tx_drop;
2378 	for_each_txq(pi, i, txq)
2379 		drops += txq->br->br_drops;
2380 	ifp->if_snd.ifq_drops = drops;
2381 
2382 	ifp->if_oerrors = s->tx_error_frames;
2383 	ifp->if_ierrors = s->rx_jabber + s->rx_runt + s->rx_too_long +
2384 	    s->rx_fcs_err + s->rx_len_err;
2385 
2386 	callout_schedule(&pi->tick, hz);
2387 	PORT_UNLOCK(pi);
2388 }
2389 
2390 static int
2391 t4_sysctls(struct adapter *sc)
2392 {
2393 	struct sysctl_ctx_list *ctx;
2394 	struct sysctl_oid *oid;
2395 	struct sysctl_oid_list *children;
2396 
2397 	ctx = device_get_sysctl_ctx(sc->dev);
2398 	oid = device_get_sysctl_tree(sc->dev);
2399 	children = SYSCTL_CHILDREN(oid);
2400 
2401 	SYSCTL_ADD_INT(ctx, children, OID_AUTO, "nports", CTLFLAG_RD,
2402 	    &sc->params.nports, 0, "# of ports");
2403 
2404 	SYSCTL_ADD_INT(ctx, children, OID_AUTO, "hw_revision", CTLFLAG_RD,
2405 	    &sc->params.rev, 0, "chip hardware revision");
2406 
2407 	SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "firmware_version",
2408 	    CTLFLAG_RD, &sc->fw_version, 0, "firmware version");
2409 
2410 	SYSCTL_ADD_INT(ctx, children, OID_AUTO, "TOE", CTLFLAG_RD,
2411 	    &sc->params.offload, 0, "hardware is capable of TCP offload");
2412 
2413 	SYSCTL_ADD_INT(ctx, children, OID_AUTO, "core_clock", CTLFLAG_RD,
2414 	    &sc->params.vpd.cclk, 0, "core clock frequency (in KHz)");
2415 
2416 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "holdoff_timers",
2417 	    CTLTYPE_STRING | CTLFLAG_RD, &intr_timer, sizeof(intr_timer),
2418 	    sysctl_int_array, "A", "interrupt holdoff timer values (us)");
2419 
2420 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "holdoff_pkt_counts",
2421 	    CTLTYPE_STRING | CTLFLAG_RD, &intr_pktcount, sizeof(intr_pktcount),
2422 	    sysctl_int_array, "A", "interrupt holdoff packet counter values");
2423 
2424 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "devlog",
2425 	    CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
2426 	    sysctl_devlog, "A", "device log");
2427 
2428 	return (0);
2429 }
2430 
2431 static int
2432 cxgbe_sysctls(struct port_info *pi)
2433 {
2434 	struct sysctl_ctx_list *ctx;
2435 	struct sysctl_oid *oid;
2436 	struct sysctl_oid_list *children;
2437 
2438 	ctx = device_get_sysctl_ctx(pi->dev);
2439 
2440 	/*
2441 	 * dev.cxgbe.X.
2442 	 */
2443 	oid = device_get_sysctl_tree(pi->dev);
2444 	children = SYSCTL_CHILDREN(oid);
2445 
2446 	SYSCTL_ADD_INT(ctx, children, OID_AUTO, "nrxq", CTLFLAG_RD,
2447 	    &pi->nrxq, 0, "# of rx queues");
2448 	SYSCTL_ADD_INT(ctx, children, OID_AUTO, "ntxq", CTLFLAG_RD,
2449 	    &pi->ntxq, 0, "# of tx queues");
2450 	SYSCTL_ADD_INT(ctx, children, OID_AUTO, "first_rxq", CTLFLAG_RD,
2451 	    &pi->first_rxq, 0, "index of first rx queue");
2452 	SYSCTL_ADD_INT(ctx, children, OID_AUTO, "first_txq", CTLFLAG_RD,
2453 	    &pi->first_txq, 0, "index of first tx queue");
2454 
2455 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "holdoff_tmr_idx",
2456 	    CTLTYPE_INT | CTLFLAG_RW, pi, 0, sysctl_holdoff_tmr_idx, "I",
2457 	    "holdoff timer index");
2458 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "holdoff_pktc_idx",
2459 	    CTLTYPE_INT | CTLFLAG_RW, pi, 0, sysctl_holdoff_pktc_idx, "I",
2460 	    "holdoff packet counter index");
2461 
2462 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "qsize_rxq",
2463 	    CTLTYPE_INT | CTLFLAG_RW, pi, 0, sysctl_qsize_rxq, "I",
2464 	    "rx queue size");
2465 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "qsize_txq",
2466 	    CTLTYPE_INT | CTLFLAG_RW, pi, 0, sysctl_qsize_txq, "I",
2467 	    "tx queue size");
2468 
2469 	/*
2470 	 * dev.cxgbe.X.stats.
2471 	 */
2472 	oid = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, "stats", CTLFLAG_RD,
2473 	    NULL, "port statistics");
2474 	children = SYSCTL_CHILDREN(oid);
2475 
2476 #define SYSCTL_ADD_T4_REG64(pi, name, desc, reg) \
2477 	SYSCTL_ADD_OID(ctx, children, OID_AUTO, name, \
2478 	    CTLTYPE_U64 | CTLFLAG_RD, pi->adapter, reg, \
2479 	    sysctl_handle_t4_reg64, "QU", desc)
2480 
2481 	SYSCTL_ADD_T4_REG64(pi, "tx_octets", "# of octets in good frames",
2482 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_BYTES_L));
2483 	SYSCTL_ADD_T4_REG64(pi, "tx_frames", "total # of good frames",
2484 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_FRAMES_L));
2485 	SYSCTL_ADD_T4_REG64(pi, "tx_bcast_frames", "# of broadcast frames",
2486 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_BCAST_L));
2487 	SYSCTL_ADD_T4_REG64(pi, "tx_mcast_frames", "# of multicast frames",
2488 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_MCAST_L));
2489 	SYSCTL_ADD_T4_REG64(pi, "tx_ucast_frames", "# of unicast frames",
2490 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_UCAST_L));
2491 	SYSCTL_ADD_T4_REG64(pi, "tx_error_frames", "# of error frames",
2492 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_ERROR_L));
2493 	SYSCTL_ADD_T4_REG64(pi, "tx_frames_64",
2494 	    "# of tx frames in this range",
2495 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_64B_L));
2496 	SYSCTL_ADD_T4_REG64(pi, "tx_frames_65_127",
2497 	    "# of tx frames in this range",
2498 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_65B_127B_L));
2499 	SYSCTL_ADD_T4_REG64(pi, "tx_frames_128_255",
2500 	    "# of tx frames in this range",
2501 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_128B_255B_L));
2502 	SYSCTL_ADD_T4_REG64(pi, "tx_frames_256_511",
2503 	    "# of tx frames in this range",
2504 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_256B_511B_L));
2505 	SYSCTL_ADD_T4_REG64(pi, "tx_frames_512_1023",
2506 	    "# of tx frames in this range",
2507 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_512B_1023B_L));
2508 	SYSCTL_ADD_T4_REG64(pi, "tx_frames_1024_1518",
2509 	    "# of tx frames in this range",
2510 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_1024B_1518B_L));
2511 	SYSCTL_ADD_T4_REG64(pi, "tx_frames_1519_max",
2512 	    "# of tx frames in this range",
2513 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_1519B_MAX_L));
2514 	SYSCTL_ADD_T4_REG64(pi, "tx_drop", "# of dropped tx frames",
2515 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_DROP_L));
2516 	SYSCTL_ADD_T4_REG64(pi, "tx_pause", "# of pause frames transmitted",
2517 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_PAUSE_L));
2518 	SYSCTL_ADD_T4_REG64(pi, "tx_ppp0", "# of PPP prio 0 frames transmitted",
2519 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_PPP0_L));
2520 	SYSCTL_ADD_T4_REG64(pi, "tx_ppp1", "# of PPP prio 1 frames transmitted",
2521 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_PPP1_L));
2522 	SYSCTL_ADD_T4_REG64(pi, "tx_ppp2", "# of PPP prio 2 frames transmitted",
2523 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_PPP2_L));
2524 	SYSCTL_ADD_T4_REG64(pi, "tx_ppp3", "# of PPP prio 3 frames transmitted",
2525 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_PPP3_L));
2526 	SYSCTL_ADD_T4_REG64(pi, "tx_ppp4", "# of PPP prio 4 frames transmitted",
2527 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_PPP4_L));
2528 	SYSCTL_ADD_T4_REG64(pi, "tx_ppp5", "# of PPP prio 5 frames transmitted",
2529 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_PPP5_L));
2530 	SYSCTL_ADD_T4_REG64(pi, "tx_ppp6", "# of PPP prio 6 frames transmitted",
2531 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_PPP6_L));
2532 	SYSCTL_ADD_T4_REG64(pi, "tx_ppp7", "# of PPP prio 7 frames transmitted",
2533 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_PPP7_L));
2534 
2535 	SYSCTL_ADD_T4_REG64(pi, "rx_octets", "# of octets in good frames",
2536 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_BYTES_L));
2537 	SYSCTL_ADD_T4_REG64(pi, "rx_frames", "total # of good frames",
2538 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_FRAMES_L));
2539 	SYSCTL_ADD_T4_REG64(pi, "rx_bcast_frames", "# of broadcast frames",
2540 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_BCAST_L));
2541 	SYSCTL_ADD_T4_REG64(pi, "rx_mcast_frames", "# of multicast frames",
2542 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_MCAST_L));
2543 	SYSCTL_ADD_T4_REG64(pi, "rx_ucast_frames", "# of unicast frames",
2544 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_UCAST_L));
2545 	SYSCTL_ADD_T4_REG64(pi, "rx_too_long", "# of frames exceeding MTU",
2546 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_MTU_ERROR_L));
2547 	SYSCTL_ADD_T4_REG64(pi, "rx_jabber", "# of jabber frames",
2548 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_MTU_CRC_ERROR_L));
2549 	SYSCTL_ADD_T4_REG64(pi, "rx_fcs_err",
2550 	    "# of frames received with bad FCS",
2551 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_CRC_ERROR_L));
2552 	SYSCTL_ADD_T4_REG64(pi, "rx_len_err",
2553 	    "# of frames received with length error",
2554 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_LEN_ERROR_L));
2555 	SYSCTL_ADD_T4_REG64(pi, "rx_symbol_err", "symbol errors",
2556 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_SYM_ERROR_L));
2557 	SYSCTL_ADD_T4_REG64(pi, "rx_runt", "# of short frames received",
2558 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_LESS_64B_L));
2559 	SYSCTL_ADD_T4_REG64(pi, "rx_frames_64",
2560 	    "# of rx frames in this range",
2561 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_64B_L));
2562 	SYSCTL_ADD_T4_REG64(pi, "rx_frames_65_127",
2563 	    "# of rx frames in this range",
2564 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_65B_127B_L));
2565 	SYSCTL_ADD_T4_REG64(pi, "rx_frames_128_255",
2566 	    "# of rx frames in this range",
2567 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_128B_255B_L));
2568 	SYSCTL_ADD_T4_REG64(pi, "rx_frames_256_511",
2569 	    "# of rx frames in this range",
2570 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_256B_511B_L));
2571 	SYSCTL_ADD_T4_REG64(pi, "rx_frames_512_1023",
2572 	    "# of rx frames in this range",
2573 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_512B_1023B_L));
2574 	SYSCTL_ADD_T4_REG64(pi, "rx_frames_1024_1518",
2575 	    "# of rx frames in this range",
2576 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_1024B_1518B_L));
2577 	SYSCTL_ADD_T4_REG64(pi, "rx_frames_1519_max",
2578 	    "# of rx frames in this range",
2579 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_1519B_MAX_L));
2580 	SYSCTL_ADD_T4_REG64(pi, "rx_pause", "# of pause frames received",
2581 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_PAUSE_L));
2582 	SYSCTL_ADD_T4_REG64(pi, "rx_ppp0", "# of PPP prio 0 frames received",
2583 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_PPP0_L));
2584 	SYSCTL_ADD_T4_REG64(pi, "rx_ppp1", "# of PPP prio 1 frames received",
2585 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_PPP1_L));
2586 	SYSCTL_ADD_T4_REG64(pi, "rx_ppp2", "# of PPP prio 2 frames received",
2587 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_PPP2_L));
2588 	SYSCTL_ADD_T4_REG64(pi, "rx_ppp3", "# of PPP prio 3 frames received",
2589 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_PPP3_L));
2590 	SYSCTL_ADD_T4_REG64(pi, "rx_ppp4", "# of PPP prio 4 frames received",
2591 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_PPP4_L));
2592 	SYSCTL_ADD_T4_REG64(pi, "rx_ppp5", "# of PPP prio 5 frames received",
2593 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_PPP5_L));
2594 	SYSCTL_ADD_T4_REG64(pi, "rx_ppp6", "# of PPP prio 6 frames received",
2595 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_PPP6_L));
2596 	SYSCTL_ADD_T4_REG64(pi, "rx_ppp7", "# of PPP prio 7 frames received",
2597 	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_PPP7_L));
2598 
2599 #undef SYSCTL_ADD_T4_REG64
2600 
2601 #define SYSCTL_ADD_T4_PORTSTAT(name, desc) \
2602 	SYSCTL_ADD_UQUAD(ctx, children, OID_AUTO, #name, CTLFLAG_RD, \
2603 	    &pi->stats.name, desc)
2604 
2605 	/* We get these from port_stats and they may be stale by upto 1s */
2606 	SYSCTL_ADD_T4_PORTSTAT(rx_ovflow0,
2607 	    "# drops due to buffer-group 0 overflows");
2608 	SYSCTL_ADD_T4_PORTSTAT(rx_ovflow1,
2609 	    "# drops due to buffer-group 1 overflows");
2610 	SYSCTL_ADD_T4_PORTSTAT(rx_ovflow2,
2611 	    "# drops due to buffer-group 2 overflows");
2612 	SYSCTL_ADD_T4_PORTSTAT(rx_ovflow3,
2613 	    "# drops due to buffer-group 3 overflows");
2614 	SYSCTL_ADD_T4_PORTSTAT(rx_trunc0,
2615 	    "# of buffer-group 0 truncated packets");
2616 	SYSCTL_ADD_T4_PORTSTAT(rx_trunc1,
2617 	    "# of buffer-group 1 truncated packets");
2618 	SYSCTL_ADD_T4_PORTSTAT(rx_trunc2,
2619 	    "# of buffer-group 2 truncated packets");
2620 	SYSCTL_ADD_T4_PORTSTAT(rx_trunc3,
2621 	    "# of buffer-group 3 truncated packets");
2622 
2623 #undef SYSCTL_ADD_T4_PORTSTAT
2624 
2625 	return (0);
2626 }
2627 
2628 static int
2629 sysctl_int_array(SYSCTL_HANDLER_ARGS)
2630 {
2631 	int rc, *i;
2632 	struct sbuf sb;
2633 
2634 	sbuf_new(&sb, NULL, 32, SBUF_AUTOEXTEND);
2635 	for (i = arg1; arg2; arg2 -= sizeof(int), i++)
2636 		sbuf_printf(&sb, "%d ", *i);
2637 	sbuf_trim(&sb);
2638 	sbuf_finish(&sb);
2639 	rc = sysctl_handle_string(oidp, sbuf_data(&sb), sbuf_len(&sb), req);
2640 	sbuf_delete(&sb);
2641 	return (rc);
2642 }
2643 
2644 static int
2645 sysctl_holdoff_tmr_idx(SYSCTL_HANDLER_ARGS)
2646 {
2647 	struct port_info *pi = arg1;
2648 	struct adapter *sc = pi->adapter;
2649 	struct sge_rxq *rxq;
2650 	int idx, rc, i;
2651 
2652 	idx = pi->tmr_idx;
2653 
2654 	rc = sysctl_handle_int(oidp, &idx, 0, req);
2655 	if (rc != 0 || req->newptr == NULL)
2656 		return (rc);
2657 
2658 	if (idx < 0 || idx >= SGE_NTIMERS)
2659 		return (EINVAL);
2660 
2661 	ADAPTER_LOCK(sc);
2662 	rc = IS_DOOMED(pi) ? ENXIO : (IS_BUSY(sc) ? EBUSY : 0);
2663 	if (rc == 0) {
2664 		for_each_rxq(pi, i, rxq) {
2665 			rxq->iq.intr_params = V_QINTR_TIMER_IDX(idx) |
2666 			    V_QINTR_CNT_EN(pi->pktc_idx != -1);
2667 		}
2668 		pi->tmr_idx = idx;
2669 	}
2670 
2671 	ADAPTER_UNLOCK(sc);
2672 	return (rc);
2673 }
2674 
2675 static int
2676 sysctl_holdoff_pktc_idx(SYSCTL_HANDLER_ARGS)
2677 {
2678 	struct port_info *pi = arg1;
2679 	struct adapter *sc = pi->adapter;
2680 	int idx, rc;
2681 
2682 	idx = pi->pktc_idx;
2683 
2684 	rc = sysctl_handle_int(oidp, &idx, 0, req);
2685 	if (rc != 0 || req->newptr == NULL)
2686 		return (rc);
2687 
2688 	if (idx < -1 || idx >= SGE_NCOUNTERS)
2689 		return (EINVAL);
2690 
2691 	ADAPTER_LOCK(sc);
2692 	rc = IS_DOOMED(pi) ? ENXIO : (IS_BUSY(sc) ? EBUSY : 0);
2693 	if (rc == 0 && pi->ifp->if_drv_flags & IFF_DRV_RUNNING)
2694 		rc = EBUSY; /* can be changed only when port is down */
2695 
2696 	if (rc == 0)
2697 		pi->pktc_idx = idx;
2698 
2699 	ADAPTER_UNLOCK(sc);
2700 	return (rc);
2701 }
2702 
2703 static int
2704 sysctl_qsize_rxq(SYSCTL_HANDLER_ARGS)
2705 {
2706 	struct port_info *pi = arg1;
2707 	struct adapter *sc = pi->adapter;
2708 	int qsize, rc;
2709 
2710 	qsize = pi->qsize_rxq;
2711 
2712 	rc = sysctl_handle_int(oidp, &qsize, 0, req);
2713 	if (rc != 0 || req->newptr == NULL)
2714 		return (rc);
2715 
2716 	if (qsize < 128 || (qsize & 7))
2717 		return (EINVAL);
2718 
2719 	ADAPTER_LOCK(sc);
2720 	rc = IS_DOOMED(pi) ? ENXIO : (IS_BUSY(sc) ? EBUSY : 0);
2721 	if (rc == 0 && pi->ifp->if_drv_flags & IFF_DRV_RUNNING)
2722 		rc = EBUSY; /* can be changed only when port is down */
2723 
2724 	if (rc == 0)
2725 		pi->qsize_rxq = qsize;
2726 
2727 	ADAPTER_UNLOCK(sc);
2728 	return (rc);
2729 }
2730 
2731 static int
2732 sysctl_qsize_txq(SYSCTL_HANDLER_ARGS)
2733 {
2734 	struct port_info *pi = arg1;
2735 	struct adapter *sc = pi->adapter;
2736 	int qsize, rc;
2737 
2738 	qsize = pi->qsize_txq;
2739 
2740 	rc = sysctl_handle_int(oidp, &qsize, 0, req);
2741 	if (rc != 0 || req->newptr == NULL)
2742 		return (rc);
2743 
2744 	if (qsize < 128)
2745 		return (EINVAL);
2746 
2747 	ADAPTER_LOCK(sc);
2748 	rc = IS_DOOMED(pi) ? ENXIO : (IS_BUSY(sc) ? EBUSY : 0);
2749 	if (rc == 0 && pi->ifp->if_drv_flags & IFF_DRV_RUNNING)
2750 		rc = EBUSY; /* can be changed only when port is down */
2751 
2752 	if (rc == 0)
2753 		pi->qsize_txq = qsize;
2754 
2755 	ADAPTER_UNLOCK(sc);
2756 	return (rc);
2757 }
2758 
2759 static int
2760 sysctl_handle_t4_reg64(SYSCTL_HANDLER_ARGS)
2761 {
2762 	struct adapter *sc = arg1;
2763 	int reg = arg2;
2764 	uint64_t val;
2765 
2766 	val = t4_read_reg64(sc, reg);
2767 
2768 	return (sysctl_handle_64(oidp, &val, 0, req));
2769 }
2770 
2771 const char *devlog_level_strings[] = {
2772 	[FW_DEVLOG_LEVEL_EMERG]		= "EMERG",
2773 	[FW_DEVLOG_LEVEL_CRIT]		= "CRIT",
2774 	[FW_DEVLOG_LEVEL_ERR]		= "ERR",
2775 	[FW_DEVLOG_LEVEL_NOTICE]	= "NOTICE",
2776 	[FW_DEVLOG_LEVEL_INFO]		= "INFO",
2777 	[FW_DEVLOG_LEVEL_DEBUG]		= "DEBUG"
2778 };
2779 
2780 const char *devlog_facility_strings[] = {
2781 	[FW_DEVLOG_FACILITY_CORE]	= "CORE",
2782 	[FW_DEVLOG_FACILITY_SCHED]	= "SCHED",
2783 	[FW_DEVLOG_FACILITY_TIMER]	= "TIMER",
2784 	[FW_DEVLOG_FACILITY_RES]	= "RES",
2785 	[FW_DEVLOG_FACILITY_HW]		= "HW",
2786 	[FW_DEVLOG_FACILITY_FLR]	= "FLR",
2787 	[FW_DEVLOG_FACILITY_DMAQ]	= "DMAQ",
2788 	[FW_DEVLOG_FACILITY_PHY]	= "PHY",
2789 	[FW_DEVLOG_FACILITY_MAC]	= "MAC",
2790 	[FW_DEVLOG_FACILITY_PORT]	= "PORT",
2791 	[FW_DEVLOG_FACILITY_VI]		= "VI",
2792 	[FW_DEVLOG_FACILITY_FILTER]	= "FILTER",
2793 	[FW_DEVLOG_FACILITY_ACL]	= "ACL",
2794 	[FW_DEVLOG_FACILITY_TM]		= "TM",
2795 	[FW_DEVLOG_FACILITY_QFC]	= "QFC",
2796 	[FW_DEVLOG_FACILITY_DCB]	= "DCB",
2797 	[FW_DEVLOG_FACILITY_ETH]	= "ETH",
2798 	[FW_DEVLOG_FACILITY_OFLD]	= "OFLD",
2799 	[FW_DEVLOG_FACILITY_RI]		= "RI",
2800 	[FW_DEVLOG_FACILITY_ISCSI]	= "ISCSI",
2801 	[FW_DEVLOG_FACILITY_FCOE]	= "FCOE",
2802 	[FW_DEVLOG_FACILITY_FOISCSI]	= "FOISCSI",
2803 	[FW_DEVLOG_FACILITY_FOFCOE]	= "FOFCOE"
2804 };
2805 
2806 static int
2807 sysctl_devlog(SYSCTL_HANDLER_ARGS)
2808 {
2809 	struct adapter *sc = arg1;
2810 	struct devlog_params *dparams = &sc->params.devlog;
2811 	struct fw_devlog_e *buf, *e;
2812 	int i, j, rc, nentries, first = 0;
2813 	struct sbuf *sb;
2814 	uint64_t ftstamp = UINT64_MAX;
2815 
2816 	if (dparams->start == 0)
2817 		return (ENXIO);
2818 
2819 	nentries = dparams->size / sizeof(struct fw_devlog_e);
2820 
2821 	buf = malloc(dparams->size, M_CXGBE, M_NOWAIT);
2822 	if (buf == NULL)
2823 		return (ENOMEM);
2824 
2825 	rc = -t4_mem_read(sc, dparams->memtype, dparams->start, dparams->size,
2826 	    (void *)buf);
2827 	if (rc != 0)
2828 		goto done;
2829 
2830 	for (i = 0; i < nentries; i++) {
2831 		e = &buf[i];
2832 
2833 		if (e->timestamp == 0)
2834 			break;	/* end */
2835 
2836 		e->timestamp = be64toh(e->timestamp);
2837 		e->seqno = be32toh(e->seqno);
2838 		for (j = 0; j < 8; j++)
2839 			e->params[j] = be32toh(e->params[j]);
2840 
2841 		if (e->timestamp < ftstamp) {
2842 			ftstamp = e->timestamp;
2843 			first = i;
2844 		}
2845 	}
2846 
2847 	if (buf[first].timestamp == 0)
2848 		goto done;	/* nothing in the log */
2849 
2850 	rc = sysctl_wire_old_buffer(req, 0);
2851 	if (rc != 0)
2852 		goto done;
2853 
2854 	sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req);
2855 	sbuf_printf(sb, "\n%10s  %15s  %8s  %8s  %s\n",
2856 	    "Seq#", "Tstamp", "Level", "Facility", "Message");
2857 
2858 	i = first;
2859 	do {
2860 		e = &buf[i];
2861 		if (e->timestamp == 0)
2862 			break;	/* end */
2863 
2864 		sbuf_printf(sb, "%10d  %15ju  %8s  %8s  ",
2865 		    e->seqno, e->timestamp,
2866 		    (e->level < ARRAY_SIZE(devlog_level_strings) ?
2867 			devlog_level_strings[e->level] : "UNKNOWN"),
2868 		    (e->facility < ARRAY_SIZE(devlog_facility_strings) ?
2869 			devlog_facility_strings[e->facility] : "UNKNOWN"));
2870 		sbuf_printf(sb, e->fmt, e->params[0], e->params[1],
2871 		    e->params[2], e->params[3], e->params[4],
2872 		    e->params[5], e->params[6], e->params[7]);
2873 
2874 		if (++i == nentries)
2875 			i = 0;
2876 	} while (i != first);
2877 
2878 	rc = sbuf_finish(sb);
2879 	sbuf_delete(sb);
2880 done:
2881 	free(buf, M_CXGBE);
2882 	return (rc);
2883 }
2884 
2885 static inline void
2886 txq_start(struct ifnet *ifp, struct sge_txq *txq)
2887 {
2888 	struct buf_ring *br;
2889 	struct mbuf *m;
2890 
2891 	TXQ_LOCK_ASSERT_OWNED(txq);
2892 
2893 	br = txq->br;
2894 	m = txq->m ? txq->m : drbr_dequeue(ifp, br);
2895 	if (m)
2896 		t4_eth_tx(ifp, txq, m);
2897 }
2898 
2899 void
2900 cxgbe_txq_start(void *arg, int count)
2901 {
2902 	struct sge_txq *txq = arg;
2903 
2904 	TXQ_LOCK(txq);
2905 	if (txq->eq.flags & EQ_CRFLUSHED) {
2906 		txq->eq.flags &= ~EQ_CRFLUSHED;
2907 		txq_start(txq->ifp, txq);
2908 	} else
2909 		wakeup_one(txq);	/* txq is going away, wakeup free_txq */
2910 	TXQ_UNLOCK(txq);
2911 }
2912 
2913 static uint32_t
2914 fconf_to_mode(uint32_t fconf)
2915 {
2916 	uint32_t mode;
2917 
2918 	mode = T4_FILTER_IPv4 | T4_FILTER_IPv6 | T4_FILTER_IP_SADDR |
2919 	    T4_FILTER_IP_DADDR | T4_FILTER_IP_SPORT | T4_FILTER_IP_DPORT;
2920 
2921 	if (fconf & F_FRAGMENTATION)
2922 		mode |= T4_FILTER_IP_FRAGMENT;
2923 
2924 	if (fconf & F_MPSHITTYPE)
2925 		mode |= T4_FILTER_MPS_HIT_TYPE;
2926 
2927 	if (fconf & F_MACMATCH)
2928 		mode |= T4_FILTER_MAC_IDX;
2929 
2930 	if (fconf & F_ETHERTYPE)
2931 		mode |= T4_FILTER_ETH_TYPE;
2932 
2933 	if (fconf & F_PROTOCOL)
2934 		mode |= T4_FILTER_IP_PROTO;
2935 
2936 	if (fconf & F_TOS)
2937 		mode |= T4_FILTER_IP_TOS;
2938 
2939 	if (fconf & F_VLAN)
2940 		mode |= T4_FILTER_IVLAN;
2941 
2942 	if (fconf & F_VNIC_ID)
2943 		mode |= T4_FILTER_OVLAN;
2944 
2945 	if (fconf & F_PORT)
2946 		mode |= T4_FILTER_PORT;
2947 
2948 	if (fconf & F_FCOE)
2949 		mode |= T4_FILTER_FCoE;
2950 
2951 	return (mode);
2952 }
2953 
2954 static uint32_t
2955 mode_to_fconf(uint32_t mode)
2956 {
2957 	uint32_t fconf = 0;
2958 
2959 	if (mode & T4_FILTER_IP_FRAGMENT)
2960 		fconf |= F_FRAGMENTATION;
2961 
2962 	if (mode & T4_FILTER_MPS_HIT_TYPE)
2963 		fconf |= F_MPSHITTYPE;
2964 
2965 	if (mode & T4_FILTER_MAC_IDX)
2966 		fconf |= F_MACMATCH;
2967 
2968 	if (mode & T4_FILTER_ETH_TYPE)
2969 		fconf |= F_ETHERTYPE;
2970 
2971 	if (mode & T4_FILTER_IP_PROTO)
2972 		fconf |= F_PROTOCOL;
2973 
2974 	if (mode & T4_FILTER_IP_TOS)
2975 		fconf |= F_TOS;
2976 
2977 	if (mode & T4_FILTER_IVLAN)
2978 		fconf |= F_VLAN;
2979 
2980 	if (mode & T4_FILTER_OVLAN)
2981 		fconf |= F_VNIC_ID;
2982 
2983 	if (mode & T4_FILTER_PORT)
2984 		fconf |= F_PORT;
2985 
2986 	if (mode & T4_FILTER_FCoE)
2987 		fconf |= F_FCOE;
2988 
2989 	return (fconf);
2990 }
2991 
2992 static uint32_t
2993 fspec_to_fconf(struct t4_filter_specification *fs)
2994 {
2995 	uint32_t fconf = 0;
2996 
2997 	if (fs->val.frag || fs->mask.frag)
2998 		fconf |= F_FRAGMENTATION;
2999 
3000 	if (fs->val.matchtype || fs->mask.matchtype)
3001 		fconf |= F_MPSHITTYPE;
3002 
3003 	if (fs->val.macidx || fs->mask.macidx)
3004 		fconf |= F_MACMATCH;
3005 
3006 	if (fs->val.ethtype || fs->mask.ethtype)
3007 		fconf |= F_ETHERTYPE;
3008 
3009 	if (fs->val.proto || fs->mask.proto)
3010 		fconf |= F_PROTOCOL;
3011 
3012 	if (fs->val.tos || fs->mask.tos)
3013 		fconf |= F_TOS;
3014 
3015 	if (fs->val.ivlan_vld || fs->mask.ivlan_vld)
3016 		fconf |= F_VLAN;
3017 
3018 	if (fs->val.ovlan_vld || fs->mask.ovlan_vld)
3019 		fconf |= F_VNIC_ID;
3020 
3021 	if (fs->val.iport || fs->mask.iport)
3022 		fconf |= F_PORT;
3023 
3024 	if (fs->val.fcoe || fs->mask.fcoe)
3025 		fconf |= F_FCOE;
3026 
3027 	return (fconf);
3028 }
3029 
3030 static int
3031 get_filter_mode(struct adapter *sc, uint32_t *mode)
3032 {
3033 	uint32_t fconf;
3034 
3035 	t4_read_indirect(sc, A_TP_PIO_ADDR, A_TP_PIO_DATA, &fconf, 1,
3036 	    A_TP_VLAN_PRI_MAP);
3037 
3038 	*mode = fconf_to_mode(fconf);
3039 
3040 	return (0);
3041 }
3042 
3043 static int
3044 set_filter_mode(struct adapter *sc, uint32_t mode)
3045 {
3046 	uint32_t fconf;
3047 	int rc;
3048 
3049 	fconf = mode_to_fconf(mode);
3050 
3051 	ADAPTER_LOCK(sc);
3052 	if (IS_BUSY(sc)) {
3053 		rc = EAGAIN;
3054 		goto done;
3055 	}
3056 
3057 	if (sc->tids.ftids_in_use > 0) {
3058 		rc = EBUSY;
3059 		goto done;
3060 	}
3061 
3062 	rc = -t4_set_filter_mode(sc, fconf);
3063 done:
3064 	ADAPTER_UNLOCK(sc);
3065 	return (rc);
3066 }
3067 
3068 static inline uint64_t
3069 get_filter_hits(struct adapter *sc, uint32_t fid)
3070 {
3071 	uint32_t tcb_base = t4_read_reg(sc, A_TP_CMM_TCB_BASE);
3072 	uint64_t hits;
3073 
3074 	t4_write_reg(sc, PCIE_MEM_ACCESS_REG(A_PCIE_MEM_ACCESS_OFFSET, 0),
3075 	    tcb_base + (fid + sc->tids.ftid_base) * TCB_SIZE);
3076 	t4_read_reg(sc, PCIE_MEM_ACCESS_REG(A_PCIE_MEM_ACCESS_OFFSET, 0));
3077 	hits = t4_read_reg64(sc, MEMWIN0_BASE + 16);
3078 
3079 	return (be64toh(hits));
3080 }
3081 
3082 static int
3083 get_filter(struct adapter *sc, struct t4_filter *t)
3084 {
3085 	int i, nfilters = sc->tids.nftids;
3086 	struct filter_entry *f;
3087 
3088 	ADAPTER_LOCK_ASSERT_OWNED(sc);
3089 
3090 	if (IS_BUSY(sc))
3091 		return (EAGAIN);
3092 
3093 	if (sc->tids.ftids_in_use == 0 || sc->tids.ftid_tab == NULL ||
3094 	    t->idx >= nfilters) {
3095 		t->idx = 0xffffffff;
3096 		return (0);
3097 	}
3098 
3099 	f = &sc->tids.ftid_tab[t->idx];
3100 	for (i = t->idx; i < nfilters; i++, f++) {
3101 		if (f->valid) {
3102 			t->idx = i;
3103 			t->l2tidx = f->l2t ? f->l2t->idx : 0;
3104 			t->smtidx = f->smtidx;
3105 			if (f->fs.hitcnts)
3106 				t->hits = get_filter_hits(sc, t->idx);
3107 			else
3108 				t->hits = UINT64_MAX;
3109 			t->fs = f->fs;
3110 
3111 			return (0);
3112 		}
3113 	}
3114 
3115 	t->idx = 0xffffffff;
3116 	return (0);
3117 }
3118 
3119 static int
3120 set_filter(struct adapter *sc, struct t4_filter *t)
3121 {
3122 	uint32_t fconf;
3123 	unsigned int nfilters, nports;
3124 	struct filter_entry *f;
3125 	int i;
3126 
3127 	ADAPTER_LOCK_ASSERT_OWNED(sc);
3128 
3129 	nfilters = sc->tids.nftids;
3130 	nports = sc->params.nports;
3131 
3132 	if (nfilters == 0)
3133 		return (ENOTSUP);
3134 
3135 	if (!(sc->flags & FULL_INIT_DONE))
3136 		return (EAGAIN);
3137 
3138 	if (t->idx >= nfilters)
3139 		return (EINVAL);
3140 
3141 	/* Validate against the global filter mode */
3142 	t4_read_indirect(sc, A_TP_PIO_ADDR, A_TP_PIO_DATA, &fconf, 1,
3143 	    A_TP_VLAN_PRI_MAP);
3144 	if ((fconf | fspec_to_fconf(&t->fs)) != fconf)
3145 		return (E2BIG);
3146 
3147 	if (t->fs.action == FILTER_SWITCH && t->fs.eport >= nports)
3148 		return (EINVAL);
3149 
3150 	if (t->fs.val.iport >= nports)
3151 		return (EINVAL);
3152 
3153 	/* Can't specify an iq if not steering to it */
3154 	if (!t->fs.dirsteer && t->fs.iq)
3155 		return (EINVAL);
3156 
3157 	/* IPv6 filter idx must be 4 aligned */
3158 	if (t->fs.type == 1 &&
3159 	    ((t->idx & 0x3) || t->idx + 4 >= nfilters))
3160 		return (EINVAL);
3161 
3162 	if (sc->tids.ftid_tab == NULL) {
3163 		KASSERT(sc->tids.ftids_in_use == 0,
3164 		    ("%s: no memory allocated but filters_in_use > 0",
3165 		    __func__));
3166 
3167 		sc->tids.ftid_tab = malloc(sizeof (struct filter_entry) *
3168 		    nfilters, M_CXGBE, M_NOWAIT | M_ZERO);
3169 		if (sc->tids.ftid_tab == NULL)
3170 			return (ENOMEM);
3171 	}
3172 
3173 	for (i = 0; i < 4; i++) {
3174 		f = &sc->tids.ftid_tab[t->idx + i];
3175 
3176 		if (f->pending || f->valid)
3177 			return (EBUSY);
3178 		if (f->locked)
3179 			return (EPERM);
3180 
3181 		if (t->fs.type == 0)
3182 			break;
3183 	}
3184 
3185 	f = &sc->tids.ftid_tab[t->idx];
3186 	f->fs = t->fs;
3187 
3188 	return set_filter_wr(sc, t->idx);
3189 }
3190 
3191 static int
3192 del_filter(struct adapter *sc, struct t4_filter *t)
3193 {
3194 	unsigned int nfilters;
3195 	struct filter_entry *f;
3196 
3197 	ADAPTER_LOCK_ASSERT_OWNED(sc);
3198 
3199 	if (IS_BUSY(sc))
3200 		return (EAGAIN);
3201 
3202 	nfilters = sc->tids.nftids;
3203 
3204 	if (nfilters == 0)
3205 		return (ENOTSUP);
3206 
3207 	if (sc->tids.ftid_tab == NULL || sc->tids.ftids_in_use == 0 ||
3208 	    t->idx >= nfilters)
3209 		return (EINVAL);
3210 
3211 	if (!(sc->flags & FULL_INIT_DONE))
3212 		return (EAGAIN);
3213 
3214 	f = &sc->tids.ftid_tab[t->idx];
3215 
3216 	if (f->pending)
3217 		return (EBUSY);
3218 	if (f->locked)
3219 		return (EPERM);
3220 
3221 	if (f->valid) {
3222 		t->fs = f->fs;	/* extra info for the caller */
3223 		return del_filter_wr(sc, t->idx);
3224 	}
3225 
3226 	return (0);
3227 }
3228 
3229 static void
3230 clear_filter(struct filter_entry *f)
3231 {
3232 	if (f->l2t)
3233 		t4_l2t_release(f->l2t);
3234 
3235 	bzero(f, sizeof (*f));
3236 }
3237 
3238 static int
3239 set_filter_wr(struct adapter *sc, int fidx)
3240 {
3241 	int rc;
3242 	struct filter_entry *f = &sc->tids.ftid_tab[fidx];
3243 	struct mbuf *m;
3244 	struct fw_filter_wr *fwr;
3245 	unsigned int ftid;
3246 
3247 	ADAPTER_LOCK_ASSERT_OWNED(sc);
3248 
3249 	if (f->fs.newdmac || f->fs.newvlan) {
3250 		/* This filter needs an L2T entry; allocate one. */
3251 		f->l2t = t4_l2t_alloc_switching(sc->l2t);
3252 		if (f->l2t == NULL)
3253 			return (EAGAIN);
3254 		if (t4_l2t_set_switching(sc, f->l2t, f->fs.vlan, f->fs.eport,
3255 		    f->fs.dmac)) {
3256 			t4_l2t_release(f->l2t);
3257 			f->l2t = NULL;
3258 			return (ENOMEM);
3259 		}
3260 	}
3261 
3262 	ftid = sc->tids.ftid_base + fidx;
3263 
3264 	m = m_gethdr(M_NOWAIT, MT_DATA);
3265 	if (m == NULL)
3266 		return (ENOMEM);
3267 
3268 	fwr = mtod(m, struct fw_filter_wr *);
3269 	m->m_len = m->m_pkthdr.len = sizeof(*fwr);
3270 	bzero(fwr, sizeof (*fwr));
3271 
3272 	fwr->op_pkd = htobe32(V_FW_WR_OP(FW_FILTER_WR));
3273 	fwr->len16_pkd = htobe32(FW_LEN16(*fwr));
3274 	fwr->tid_to_iq =
3275 	    htobe32(V_FW_FILTER_WR_TID(ftid) |
3276 		V_FW_FILTER_WR_RQTYPE(f->fs.type) |
3277 		V_FW_FILTER_WR_NOREPLY(0) |
3278 		V_FW_FILTER_WR_IQ(f->fs.iq));
3279 	fwr->del_filter_to_l2tix =
3280 	    htobe32(V_FW_FILTER_WR_RPTTID(f->fs.rpttid) |
3281 		V_FW_FILTER_WR_DROP(f->fs.action == FILTER_DROP) |
3282 		V_FW_FILTER_WR_DIRSTEER(f->fs.dirsteer) |
3283 		V_FW_FILTER_WR_MASKHASH(f->fs.maskhash) |
3284 		V_FW_FILTER_WR_DIRSTEERHASH(f->fs.dirsteerhash) |
3285 		V_FW_FILTER_WR_LPBK(f->fs.action == FILTER_SWITCH) |
3286 		V_FW_FILTER_WR_DMAC(f->fs.newdmac) |
3287 		V_FW_FILTER_WR_SMAC(f->fs.newsmac) |
3288 		V_FW_FILTER_WR_INSVLAN(f->fs.newvlan == VLAN_INSERT ||
3289 		    f->fs.newvlan == VLAN_REWRITE) |
3290 		V_FW_FILTER_WR_RMVLAN(f->fs.newvlan == VLAN_REMOVE ||
3291 		    f->fs.newvlan == VLAN_REWRITE) |
3292 		V_FW_FILTER_WR_HITCNTS(f->fs.hitcnts) |
3293 		V_FW_FILTER_WR_TXCHAN(f->fs.eport) |
3294 		V_FW_FILTER_WR_PRIO(f->fs.prio) |
3295 		V_FW_FILTER_WR_L2TIX(f->l2t ? f->l2t->idx : 0));
3296 	fwr->ethtype = htobe16(f->fs.val.ethtype);
3297 	fwr->ethtypem = htobe16(f->fs.mask.ethtype);
3298 	fwr->frag_to_ovlan_vldm =
3299 	    (V_FW_FILTER_WR_FRAG(f->fs.val.frag) |
3300 		V_FW_FILTER_WR_FRAGM(f->fs.mask.frag) |
3301 		V_FW_FILTER_WR_IVLAN_VLD(f->fs.val.ivlan_vld) |
3302 		V_FW_FILTER_WR_OVLAN_VLD(f->fs.val.ovlan_vld) |
3303 		V_FW_FILTER_WR_IVLAN_VLDM(f->fs.mask.ivlan_vld) |
3304 		V_FW_FILTER_WR_OVLAN_VLDM(f->fs.mask.ovlan_vld));
3305 	fwr->smac_sel = 0;
3306 	fwr->rx_chan_rx_rpl_iq = htobe16(V_FW_FILTER_WR_RX_CHAN(0) |
3307 	    V_FW_FILTER_WR_RX_RPL_IQ(sc->sge.intrq[0].abs_id));
3308 	fwr->maci_to_matchtypem =
3309 	    htobe32(V_FW_FILTER_WR_MACI(f->fs.val.macidx) |
3310 		V_FW_FILTER_WR_MACIM(f->fs.mask.macidx) |
3311 		V_FW_FILTER_WR_FCOE(f->fs.val.fcoe) |
3312 		V_FW_FILTER_WR_FCOEM(f->fs.mask.fcoe) |
3313 		V_FW_FILTER_WR_PORT(f->fs.val.iport) |
3314 		V_FW_FILTER_WR_PORTM(f->fs.mask.iport) |
3315 		V_FW_FILTER_WR_MATCHTYPE(f->fs.val.matchtype) |
3316 		V_FW_FILTER_WR_MATCHTYPEM(f->fs.mask.matchtype));
3317 	fwr->ptcl = f->fs.val.proto;
3318 	fwr->ptclm = f->fs.mask.proto;
3319 	fwr->ttyp = f->fs.val.tos;
3320 	fwr->ttypm = f->fs.mask.tos;
3321 	fwr->ivlan = htobe16(f->fs.val.ivlan);
3322 	fwr->ivlanm = htobe16(f->fs.mask.ivlan);
3323 	fwr->ovlan = htobe16(f->fs.val.ovlan);
3324 	fwr->ovlanm = htobe16(f->fs.mask.ovlan);
3325 	bcopy(f->fs.val.dip, fwr->lip, sizeof (fwr->lip));
3326 	bcopy(f->fs.mask.dip, fwr->lipm, sizeof (fwr->lipm));
3327 	bcopy(f->fs.val.sip, fwr->fip, sizeof (fwr->fip));
3328 	bcopy(f->fs.mask.sip, fwr->fipm, sizeof (fwr->fipm));
3329 	fwr->lp = htobe16(f->fs.val.dport);
3330 	fwr->lpm = htobe16(f->fs.mask.dport);
3331 	fwr->fp = htobe16(f->fs.val.sport);
3332 	fwr->fpm = htobe16(f->fs.mask.sport);
3333 	if (f->fs.newsmac)
3334 		bcopy(f->fs.smac, fwr->sma, sizeof (fwr->sma));
3335 
3336 	f->pending = 1;
3337 	sc->tids.ftids_in_use++;
3338 	rc = t4_mgmt_tx(sc, m);
3339 	if (rc != 0) {
3340 		sc->tids.ftids_in_use--;
3341 		m_freem(m);
3342 		clear_filter(f);
3343 	}
3344 	return (rc);
3345 }
3346 
3347 static int
3348 del_filter_wr(struct adapter *sc, int fidx)
3349 {
3350 	struct filter_entry *f = &sc->tids.ftid_tab[fidx];
3351 	struct mbuf *m;
3352 	struct fw_filter_wr *fwr;
3353 	unsigned int rc, ftid;
3354 
3355 	ADAPTER_LOCK_ASSERT_OWNED(sc);
3356 
3357 	ftid = sc->tids.ftid_base + fidx;
3358 
3359 	m = m_gethdr(M_NOWAIT, MT_DATA);
3360 	if (m == NULL)
3361 		return (ENOMEM);
3362 
3363 	fwr = mtod(m, struct fw_filter_wr *);
3364 	m->m_len = m->m_pkthdr.len = sizeof(*fwr);
3365 	bzero(fwr, sizeof (*fwr));
3366 
3367 	t4_mk_filtdelwr(ftid, fwr, sc->sge.intrq[0].abs_id);
3368 
3369 	f->pending = 1;
3370 	rc = t4_mgmt_tx(sc, m);
3371 	if (rc != 0) {
3372 		f->pending = 0;
3373 		m_freem(m);
3374 	}
3375 	return (rc);
3376 }
3377 
3378 /* XXX move intr handlers to main.c and make this static */
3379 void
3380 filter_rpl(struct adapter *sc, const struct cpl_set_tcb_rpl *rpl)
3381 {
3382 	unsigned int idx = GET_TID(rpl);
3383 
3384 	if (idx >= sc->tids.ftid_base &&
3385 	    (idx -= sc->tids.ftid_base) < sc->tids.nftids) {
3386 		unsigned int rc = G_COOKIE(rpl->cookie);
3387 		struct filter_entry *f = &sc->tids.ftid_tab[idx];
3388 
3389 		if (rc == FW_FILTER_WR_FLT_DELETED) {
3390 			/*
3391 			 * Clear the filter when we get confirmation from the
3392 			 * hardware that the filter has been deleted.
3393 			 */
3394 			clear_filter(f);
3395 			sc->tids.ftids_in_use--;
3396 		} else if (rc == FW_FILTER_WR_SMT_TBL_FULL) {
3397 			device_printf(sc->dev,
3398 			    "filter %u setup failed due to full SMT\n", idx);
3399 			clear_filter(f);
3400 			sc->tids.ftids_in_use--;
3401 		} else if (rc == FW_FILTER_WR_FLT_ADDED) {
3402 			f->smtidx = (be64toh(rpl->oldval) >> 24) & 0xff;
3403 			f->pending = 0;  /* asynchronous setup completed */
3404 			f->valid = 1;
3405 		} else {
3406 			/*
3407 			 * Something went wrong.  Issue a warning about the
3408 			 * problem and clear everything out.
3409 			 */
3410 			device_printf(sc->dev,
3411 			    "filter %u setup failed with error %u\n", idx, rc);
3412 			clear_filter(f);
3413 			sc->tids.ftids_in_use--;
3414 		}
3415 	}
3416 }
3417 
3418 static int
3419 get_sge_context(struct adapter *sc, struct t4_sge_context *cntxt)
3420 {
3421 	int rc = EINVAL;
3422 
3423 	if (cntxt->cid > M_CTXTQID)
3424 		return (rc);
3425 
3426 	if (cntxt->mem_id != CTXT_EGRESS && cntxt->mem_id != CTXT_INGRESS &&
3427 	    cntxt->mem_id != CTXT_FLM && cntxt->mem_id != CTXT_CNM)
3428 		return (rc);
3429 
3430 	if (sc->flags & FW_OK) {
3431 		ADAPTER_LOCK(sc);	/* Avoid parallel t4_wr_mbox */
3432 		rc = -t4_sge_ctxt_rd(sc, sc->mbox, cntxt->cid, cntxt->mem_id,
3433 		    &cntxt->data[0]);
3434 		ADAPTER_UNLOCK(sc);
3435 	}
3436 
3437 	if (rc != 0) {
3438 		/* Read via firmware failed or wasn't even attempted */
3439 
3440 		rc = -t4_sge_ctxt_rd_bd(sc, cntxt->cid, cntxt->mem_id,
3441 		    &cntxt->data[0]);
3442 	}
3443 
3444 	return (rc);
3445 }
3446 
3447 int
3448 t4_os_find_pci_capability(struct adapter *sc, int cap)
3449 {
3450 	int i;
3451 
3452 	return (pci_find_cap(sc->dev, cap, &i) == 0 ? i : 0);
3453 }
3454 
3455 int
3456 t4_os_pci_save_state(struct adapter *sc)
3457 {
3458 	device_t dev;
3459 	struct pci_devinfo *dinfo;
3460 
3461 	dev = sc->dev;
3462 	dinfo = device_get_ivars(dev);
3463 
3464 	pci_cfg_save(dev, dinfo, 0);
3465 	return (0);
3466 }
3467 
3468 int
3469 t4_os_pci_restore_state(struct adapter *sc)
3470 {
3471 	device_t dev;
3472 	struct pci_devinfo *dinfo;
3473 
3474 	dev = sc->dev;
3475 	dinfo = device_get_ivars(dev);
3476 
3477 	pci_cfg_restore(dev, dinfo);
3478 	return (0);
3479 }
3480 
3481 void
3482 t4_os_portmod_changed(const struct adapter *sc, int idx)
3483 {
3484 	struct port_info *pi = sc->port[idx];
3485 	static const char *mod_str[] = {
3486 		NULL, "LR", "SR", "ER", "TWINAX", "active TWINAX", "LRM"
3487 	};
3488 
3489 	if (pi->mod_type == FW_PORT_MOD_TYPE_NONE)
3490 		if_printf(pi->ifp, "transceiver unplugged.\n");
3491 	else if (pi->mod_type == FW_PORT_MOD_TYPE_UNKNOWN)
3492 		if_printf(pi->ifp, "unknown transceiver inserted.\n");
3493 	else if (pi->mod_type == FW_PORT_MOD_TYPE_NOTSUPPORTED)
3494 		if_printf(pi->ifp, "unsupported transceiver inserted.\n");
3495 	else if (pi->mod_type > 0 && pi->mod_type < ARRAY_SIZE(mod_str)) {
3496 		if_printf(pi->ifp, "%s transceiver inserted.\n",
3497 		    mod_str[pi->mod_type]);
3498 	} else {
3499 		if_printf(pi->ifp, "transceiver (type %d) inserted.\n",
3500 		    pi->mod_type);
3501 	}
3502 }
3503 
3504 void
3505 t4_os_link_changed(struct adapter *sc, int idx, int link_stat)
3506 {
3507 	struct port_info *pi = sc->port[idx];
3508 	struct ifnet *ifp = pi->ifp;
3509 
3510 	if (link_stat) {
3511 		ifp->if_baudrate = IF_Mbps(pi->link_cfg.speed);
3512 		if_link_state_change(ifp, LINK_STATE_UP);
3513 	} else
3514 		if_link_state_change(ifp, LINK_STATE_DOWN);
3515 }
3516 
3517 static int
3518 t4_open(struct cdev *dev, int flags, int type, struct thread *td)
3519 {
3520        return (0);
3521 }
3522 
3523 static int
3524 t4_close(struct cdev *dev, int flags, int type, struct thread *td)
3525 {
3526        return (0);
3527 }
3528 
3529 static int
3530 t4_ioctl(struct cdev *dev, unsigned long cmd, caddr_t data, int fflag,
3531     struct thread *td)
3532 {
3533 	int rc;
3534 	struct adapter *sc = dev->si_drv1;
3535 
3536 	rc = priv_check(td, PRIV_DRIVER);
3537 	if (rc != 0)
3538 		return (rc);
3539 
3540 	switch (cmd) {
3541 	case CHELSIO_T4_GETREG: {
3542 		struct t4_reg *edata = (struct t4_reg *)data;
3543 
3544 		if ((edata->addr & 0x3) != 0 || edata->addr >= sc->mmio_len)
3545 			return (EFAULT);
3546 
3547 		if (edata->size == 4)
3548 			edata->val = t4_read_reg(sc, edata->addr);
3549 		else if (edata->size == 8)
3550 			edata->val = t4_read_reg64(sc, edata->addr);
3551 		else
3552 			return (EINVAL);
3553 
3554 		break;
3555 	}
3556 	case CHELSIO_T4_SETREG: {
3557 		struct t4_reg *edata = (struct t4_reg *)data;
3558 
3559 		if ((edata->addr & 0x3) != 0 || edata->addr >= sc->mmio_len)
3560 			return (EFAULT);
3561 
3562 		if (edata->size == 4) {
3563 			if (edata->val & 0xffffffff00000000)
3564 				return (EINVAL);
3565 			t4_write_reg(sc, edata->addr, (uint32_t) edata->val);
3566 		} else if (edata->size == 8)
3567 			t4_write_reg64(sc, edata->addr, edata->val);
3568 		else
3569 			return (EINVAL);
3570 		break;
3571 	}
3572 	case CHELSIO_T4_REGDUMP: {
3573 		struct t4_regdump *regs = (struct t4_regdump *)data;
3574 		int reglen = T4_REGDUMP_SIZE;
3575 		uint8_t *buf;
3576 
3577 		if (regs->len < reglen) {
3578 			regs->len = reglen; /* hint to the caller */
3579 			return (ENOBUFS);
3580 		}
3581 
3582 		regs->len = reglen;
3583 		buf = malloc(reglen, M_CXGBE, M_WAITOK | M_ZERO);
3584 		t4_get_regs(sc, regs, buf);
3585 		rc = copyout(buf, regs->data, reglen);
3586 		free(buf, M_CXGBE);
3587 		break;
3588 	}
3589 	case CHELSIO_T4_GET_FILTER_MODE:
3590 		rc = get_filter_mode(sc, (uint32_t *)data);
3591 		break;
3592 	case CHELSIO_T4_SET_FILTER_MODE:
3593 		rc = set_filter_mode(sc, *(uint32_t *)data);
3594 		break;
3595 	case CHELSIO_T4_GET_FILTER:
3596 		ADAPTER_LOCK(sc);
3597 		rc = get_filter(sc, (struct t4_filter *)data);
3598 		ADAPTER_UNLOCK(sc);
3599 		break;
3600 	case CHELSIO_T4_SET_FILTER:
3601 		ADAPTER_LOCK(sc);
3602 		rc = set_filter(sc, (struct t4_filter *)data);
3603 		ADAPTER_UNLOCK(sc);
3604 		break;
3605 	case CHELSIO_T4_DEL_FILTER:
3606 		ADAPTER_LOCK(sc);
3607 		rc = del_filter(sc, (struct t4_filter *)data);
3608 		ADAPTER_UNLOCK(sc);
3609 		break;
3610 	case CHELSIO_T4_GET_SGE_CONTEXT:
3611 		rc = get_sge_context(sc, (struct t4_sge_context *)data);
3612 		break;
3613 	default:
3614 		rc = EINVAL;
3615 	}
3616 
3617 	return (rc);
3618 }
3619 
3620 static int
3621 t4_mod_event(module_t mod, int cmd, void *arg)
3622 {
3623 
3624 	if (cmd == MOD_LOAD)
3625 		t4_sge_modload();
3626 
3627 	return (0);
3628 }
3629 
3630 static devclass_t t4_devclass;
3631 static devclass_t cxgbe_devclass;
3632 
3633 DRIVER_MODULE(t4nex, pci, t4_driver, t4_devclass, t4_mod_event, 0);
3634 MODULE_VERSION(t4nex, 1);
3635 
3636 DRIVER_MODULE(cxgbe, t4nex, cxgbe_driver, cxgbe_devclass, 0, 0);
3637 MODULE_VERSION(cxgbe, 1);
3638