xref: /freebsd/sys/dev/cxgb/cxgb_main.c (revision 1c05a6ea6b849ff95e539c31adea887c644a6a01)
1 /**************************************************************************
2 
3 Copyright (c) 2007-2009, Chelsio Inc.
4 All rights reserved.
5 
6 Redistribution and use in source and binary forms, with or without
7 modification, are permitted provided that the following conditions are met:
8 
9  1. Redistributions of source code must retain the above copyright notice,
10     this list of conditions and the following disclaimer.
11 
12  2. Neither the name of the Chelsio Corporation nor the names of its
13     contributors may be used to endorse or promote products derived from
14     this software without specific prior written permission.
15 
16 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
17 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
20 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
21 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
22 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
23 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
24 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
25 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
26 POSSIBILITY OF SUCH DAMAGE.
27 
28 ***************************************************************************/
29 
30 #include <sys/cdefs.h>
31 __FBSDID("$FreeBSD$");
32 
33 #include "opt_inet.h"
34 
35 #include <sys/param.h>
36 #include <sys/systm.h>
37 #include <sys/kernel.h>
38 #include <sys/bus.h>
39 #include <sys/module.h>
40 #include <sys/pciio.h>
41 #include <sys/conf.h>
42 #include <machine/bus.h>
43 #include <machine/resource.h>
44 #include <sys/ktr.h>
45 #include <sys/rman.h>
46 #include <sys/ioccom.h>
47 #include <sys/mbuf.h>
48 #include <sys/linker.h>
49 #include <sys/firmware.h>
50 #include <sys/socket.h>
51 #include <sys/sockio.h>
52 #include <sys/smp.h>
53 #include <sys/sysctl.h>
54 #include <sys/syslog.h>
55 #include <sys/queue.h>
56 #include <sys/taskqueue.h>
57 #include <sys/proc.h>
58 
59 #include <net/bpf.h>
60 #include <net/ethernet.h>
61 #include <net/if.h>
62 #include <net/if_var.h>
63 #include <net/if_arp.h>
64 #include <net/if_dl.h>
65 #include <net/if_media.h>
66 #include <net/if_types.h>
67 #include <net/if_vlan_var.h>
68 
69 #include <netinet/in_systm.h>
70 #include <netinet/in.h>
71 #include <netinet/if_ether.h>
72 #include <netinet/ip.h>
73 #include <netinet/ip.h>
74 #include <netinet/tcp.h>
75 #include <netinet/udp.h>
76 
77 #include <dev/pci/pcireg.h>
78 #include <dev/pci/pcivar.h>
79 #include <dev/pci/pci_private.h>
80 
81 #include <cxgb_include.h>
82 
83 #ifdef PRIV_SUPPORTED
84 #include <sys/priv.h>
85 #endif
86 
87 static int cxgb_setup_interrupts(adapter_t *);
88 static void cxgb_teardown_interrupts(adapter_t *);
89 static void cxgb_init(void *);
90 static int cxgb_init_locked(struct port_info *);
91 static int cxgb_uninit_locked(struct port_info *);
92 static int cxgb_uninit_synchronized(struct port_info *);
93 static int cxgb_ioctl(struct ifnet *, unsigned long, caddr_t);
94 static int cxgb_media_change(struct ifnet *);
95 static int cxgb_ifm_type(int);
96 static void cxgb_build_medialist(struct port_info *);
97 static void cxgb_media_status(struct ifnet *, struct ifmediareq *);
98 static uint64_t cxgb_get_counter(struct ifnet *, ift_counter);
99 static int setup_sge_qsets(adapter_t *);
100 static void cxgb_async_intr(void *);
101 static void cxgb_tick_handler(void *, int);
102 static void cxgb_tick(void *);
103 static void link_check_callout(void *);
104 static void check_link_status(void *, int);
105 static void setup_rss(adapter_t *sc);
106 static int alloc_filters(struct adapter *);
107 static int setup_hw_filters(struct adapter *);
108 static int set_filter(struct adapter *, int, const struct filter_info *);
109 static inline void mk_set_tcb_field(struct cpl_set_tcb_field *, unsigned int,
110     unsigned int, u64, u64);
111 static inline void set_tcb_field_ulp(struct cpl_set_tcb_field *, unsigned int,
112     unsigned int, u64, u64);
113 #ifdef TCP_OFFLOAD
114 static int cpl_not_handled(struct sge_qset *, struct rsp_desc *, struct mbuf *);
115 #endif
116 
117 /* Attachment glue for the PCI controller end of the device.  Each port of
118  * the device is attached separately, as defined later.
119  */
120 static int cxgb_controller_probe(device_t);
121 static int cxgb_controller_attach(device_t);
122 static int cxgb_controller_detach(device_t);
123 static void cxgb_free(struct adapter *);
124 static __inline void reg_block_dump(struct adapter *ap, uint8_t *buf, unsigned int start,
125     unsigned int end);
126 static void cxgb_get_regs(adapter_t *sc, struct ch_ifconf_regs *regs, uint8_t *buf);
127 static int cxgb_get_regs_len(void);
128 static void touch_bars(device_t dev);
129 static void cxgb_update_mac_settings(struct port_info *p);
130 #ifdef TCP_OFFLOAD
131 static int toe_capability(struct port_info *, int);
132 #endif
133 
134 static device_method_t cxgb_controller_methods[] = {
135 	DEVMETHOD(device_probe,		cxgb_controller_probe),
136 	DEVMETHOD(device_attach,	cxgb_controller_attach),
137 	DEVMETHOD(device_detach,	cxgb_controller_detach),
138 
139 	DEVMETHOD_END
140 };
141 
142 static driver_t cxgb_controller_driver = {
143 	"cxgbc",
144 	cxgb_controller_methods,
145 	sizeof(struct adapter)
146 };
147 
148 static int cxgbc_mod_event(module_t, int, void *);
149 static devclass_t	cxgb_controller_devclass;
150 DRIVER_MODULE(cxgbc, pci, cxgb_controller_driver, cxgb_controller_devclass,
151     cxgbc_mod_event, 0);
152 MODULE_VERSION(cxgbc, 1);
153 MODULE_DEPEND(cxgbc, firmware, 1, 1, 1);
154 
155 /*
156  * Attachment glue for the ports.  Attachment is done directly to the
157  * controller device.
158  */
159 static int cxgb_port_probe(device_t);
160 static int cxgb_port_attach(device_t);
161 static int cxgb_port_detach(device_t);
162 
163 static device_method_t cxgb_port_methods[] = {
164 	DEVMETHOD(device_probe,		cxgb_port_probe),
165 	DEVMETHOD(device_attach,	cxgb_port_attach),
166 	DEVMETHOD(device_detach,	cxgb_port_detach),
167 	{ 0, 0 }
168 };
169 
170 static driver_t cxgb_port_driver = {
171 	"cxgb",
172 	cxgb_port_methods,
173 	0
174 };
175 
176 static d_ioctl_t cxgb_extension_ioctl;
177 static d_open_t cxgb_extension_open;
178 static d_close_t cxgb_extension_close;
179 
180 static struct cdevsw cxgb_cdevsw = {
181        .d_version =    D_VERSION,
182        .d_flags =      0,
183        .d_open =       cxgb_extension_open,
184        .d_close =      cxgb_extension_close,
185        .d_ioctl =      cxgb_extension_ioctl,
186        .d_name =       "cxgb",
187 };
188 
189 static devclass_t	cxgb_port_devclass;
190 DRIVER_MODULE(cxgb, cxgbc, cxgb_port_driver, cxgb_port_devclass, 0, 0);
191 MODULE_VERSION(cxgb, 1);
192 
193 static struct mtx t3_list_lock;
194 static SLIST_HEAD(, adapter) t3_list;
195 #ifdef TCP_OFFLOAD
196 static struct mtx t3_uld_list_lock;
197 static SLIST_HEAD(, uld_info) t3_uld_list;
198 #endif
199 
200 /*
201  * The driver uses the best interrupt scheme available on a platform in the
202  * order MSI-X, MSI, legacy pin interrupts.  This parameter determines which
203  * of these schemes the driver may consider as follows:
204  *
205  * msi = 2: choose from among all three options
206  * msi = 1 : only consider MSI and pin interrupts
207  * msi = 0: force pin interrupts
208  */
209 static int msi_allowed = 2;
210 
211 SYSCTL_NODE(_hw, OID_AUTO, cxgb, CTLFLAG_RD, 0, "CXGB driver parameters");
212 SYSCTL_INT(_hw_cxgb, OID_AUTO, msi_allowed, CTLFLAG_RDTUN, &msi_allowed, 0,
213     "MSI-X, MSI, INTx selector");
214 
215 /*
216  * The driver uses an auto-queue algorithm by default.
217  * To disable it and force a single queue-set per port, use multiq = 0
218  */
219 static int multiq = 1;
220 SYSCTL_INT(_hw_cxgb, OID_AUTO, multiq, CTLFLAG_RDTUN, &multiq, 0,
221     "use min(ncpus/ports, 8) queue-sets per port");
222 
223 /*
224  * By default the driver will not update the firmware unless
225  * it was compiled against a newer version
226  *
227  */
228 static int force_fw_update = 0;
229 SYSCTL_INT(_hw_cxgb, OID_AUTO, force_fw_update, CTLFLAG_RDTUN, &force_fw_update, 0,
230     "update firmware even if up to date");
231 
232 int cxgb_use_16k_clusters = -1;
233 SYSCTL_INT(_hw_cxgb, OID_AUTO, use_16k_clusters, CTLFLAG_RDTUN,
234     &cxgb_use_16k_clusters, 0, "use 16kB clusters for the jumbo queue ");
235 
236 static int nfilters = -1;
237 SYSCTL_INT(_hw_cxgb, OID_AUTO, nfilters, CTLFLAG_RDTUN,
238     &nfilters, 0, "max number of entries in the filter table");
239 
240 enum {
241 	MAX_TXQ_ENTRIES      = 16384,
242 	MAX_CTRL_TXQ_ENTRIES = 1024,
243 	MAX_RSPQ_ENTRIES     = 16384,
244 	MAX_RX_BUFFERS       = 16384,
245 	MAX_RX_JUMBO_BUFFERS = 16384,
246 	MIN_TXQ_ENTRIES      = 4,
247 	MIN_CTRL_TXQ_ENTRIES = 4,
248 	MIN_RSPQ_ENTRIES     = 32,
249 	MIN_FL_ENTRIES       = 32,
250 	MIN_FL_JUMBO_ENTRIES = 32
251 };
252 
253 struct filter_info {
254 	u32 sip;
255 	u32 sip_mask;
256 	u32 dip;
257 	u16 sport;
258 	u16 dport;
259 	u32 vlan:12;
260 	u32 vlan_prio:3;
261 	u32 mac_hit:1;
262 	u32 mac_idx:4;
263 	u32 mac_vld:1;
264 	u32 pkt_type:2;
265 	u32 report_filter_id:1;
266 	u32 pass:1;
267 	u32 rss:1;
268 	u32 qset:3;
269 	u32 locked:1;
270 	u32 valid:1;
271 };
272 
273 enum { FILTER_NO_VLAN_PRI = 7 };
274 
275 #define EEPROM_MAGIC 0x38E2F10C
276 
277 #define PORT_MASK ((1 << MAX_NPORTS) - 1)
278 
279 /* Table for probing the cards.  The desc field isn't actually used */
280 struct cxgb_ident {
281 	uint16_t	vendor;
282 	uint16_t	device;
283 	int		index;
284 	char		*desc;
285 } cxgb_identifiers[] = {
286 	{PCI_VENDOR_ID_CHELSIO, 0x0020, 0, "PE9000"},
287 	{PCI_VENDOR_ID_CHELSIO, 0x0021, 1, "T302E"},
288 	{PCI_VENDOR_ID_CHELSIO, 0x0022, 2, "T310E"},
289 	{PCI_VENDOR_ID_CHELSIO, 0x0023, 3, "T320X"},
290 	{PCI_VENDOR_ID_CHELSIO, 0x0024, 1, "T302X"},
291 	{PCI_VENDOR_ID_CHELSIO, 0x0025, 3, "T320E"},
292 	{PCI_VENDOR_ID_CHELSIO, 0x0026, 2, "T310X"},
293 	{PCI_VENDOR_ID_CHELSIO, 0x0030, 2, "T3B10"},
294 	{PCI_VENDOR_ID_CHELSIO, 0x0031, 3, "T3B20"},
295 	{PCI_VENDOR_ID_CHELSIO, 0x0032, 1, "T3B02"},
296 	{PCI_VENDOR_ID_CHELSIO, 0x0033, 4, "T3B04"},
297 	{PCI_VENDOR_ID_CHELSIO, 0x0035, 6, "T3C10"},
298 	{PCI_VENDOR_ID_CHELSIO, 0x0036, 3, "S320E-CR"},
299 	{PCI_VENDOR_ID_CHELSIO, 0x0037, 7, "N320E-G2"},
300 	{0, 0, 0, NULL}
301 };
302 
303 static int set_eeprom(struct port_info *pi, const uint8_t *data, int len, int offset);
304 
305 
306 static __inline char
307 t3rev2char(struct adapter *adapter)
308 {
309 	char rev = 'z';
310 
311 	switch(adapter->params.rev) {
312 	case T3_REV_A:
313 		rev = 'a';
314 		break;
315 	case T3_REV_B:
316 	case T3_REV_B2:
317 		rev = 'b';
318 		break;
319 	case T3_REV_C:
320 		rev = 'c';
321 		break;
322 	}
323 	return rev;
324 }
325 
326 static struct cxgb_ident *
327 cxgb_get_ident(device_t dev)
328 {
329 	struct cxgb_ident *id;
330 
331 	for (id = cxgb_identifiers; id->desc != NULL; id++) {
332 		if ((id->vendor == pci_get_vendor(dev)) &&
333 		    (id->device == pci_get_device(dev))) {
334 			return (id);
335 		}
336 	}
337 	return (NULL);
338 }
339 
340 static const struct adapter_info *
341 cxgb_get_adapter_info(device_t dev)
342 {
343 	struct cxgb_ident *id;
344 	const struct adapter_info *ai;
345 
346 	id = cxgb_get_ident(dev);
347 	if (id == NULL)
348 		return (NULL);
349 
350 	ai = t3_get_adapter_info(id->index);
351 
352 	return (ai);
353 }
354 
355 static int
356 cxgb_controller_probe(device_t dev)
357 {
358 	const struct adapter_info *ai;
359 	char *ports, buf[80];
360 	int nports;
361 
362 	ai = cxgb_get_adapter_info(dev);
363 	if (ai == NULL)
364 		return (ENXIO);
365 
366 	nports = ai->nports0 + ai->nports1;
367 	if (nports == 1)
368 		ports = "port";
369 	else
370 		ports = "ports";
371 
372 	snprintf(buf, sizeof(buf), "%s, %d %s", ai->desc, nports, ports);
373 	device_set_desc_copy(dev, buf);
374 	return (BUS_PROBE_DEFAULT);
375 }
376 
377 #define FW_FNAME "cxgb_t3fw"
378 #define TPEEPROM_NAME "cxgb_t3%c_tp_eeprom"
379 #define TPSRAM_NAME "cxgb_t3%c_protocol_sram"
380 
381 static int
382 upgrade_fw(adapter_t *sc)
383 {
384 	const struct firmware *fw;
385 	int status;
386 	u32 vers;
387 
388 	if ((fw = firmware_get(FW_FNAME)) == NULL)  {
389 		device_printf(sc->dev, "Could not find firmware image %s\n", FW_FNAME);
390 		return (ENOENT);
391 	} else
392 		device_printf(sc->dev, "installing firmware on card\n");
393 	status = t3_load_fw(sc, (const uint8_t *)fw->data, fw->datasize);
394 
395 	if (status != 0) {
396 		device_printf(sc->dev, "failed to install firmware: %d\n",
397 		    status);
398 	} else {
399 		t3_get_fw_version(sc, &vers);
400 		snprintf(&sc->fw_version[0], sizeof(sc->fw_version), "%d.%d.%d",
401 		    G_FW_VERSION_MAJOR(vers), G_FW_VERSION_MINOR(vers),
402 		    G_FW_VERSION_MICRO(vers));
403 	}
404 
405 	firmware_put(fw, FIRMWARE_UNLOAD);
406 
407 	return (status);
408 }
409 
410 /*
411  * The cxgb_controller_attach function is responsible for the initial
412  * bringup of the device.  Its responsibilities include:
413  *
414  *  1. Determine if the device supports MSI or MSI-X.
415  *  2. Allocate bus resources so that we can access the Base Address Register
416  *  3. Create and initialize mutexes for the controller and its control
417  *     logic such as SGE and MDIO.
418  *  4. Call hardware specific setup routine for the adapter as a whole.
419  *  5. Allocate the BAR for doing MSI-X.
420  *  6. Setup the line interrupt iff MSI-X is not supported.
421  *  7. Create the driver's taskq.
422  *  8. Start one task queue service thread.
423  *  9. Check if the firmware and SRAM are up-to-date.  They will be
424  *     auto-updated later (before FULL_INIT_DONE), if required.
425  * 10. Create a child device for each MAC (port)
426  * 11. Initialize T3 private state.
427  * 12. Trigger the LED
428  * 13. Setup offload iff supported.
429  * 14. Reset/restart the tick callout.
430  * 15. Attach sysctls
431  *
432  * NOTE: Any modification or deviation from this list MUST be reflected in
433  * the above comment.  Failure to do so will result in problems on various
434  * error conditions including link flapping.
435  */
436 static int
437 cxgb_controller_attach(device_t dev)
438 {
439 	device_t child;
440 	const struct adapter_info *ai;
441 	struct adapter *sc;
442 	int i, error = 0;
443 	uint32_t vers;
444 	int port_qsets = 1;
445 	int msi_needed, reg;
446 	char buf[80];
447 
448 	sc = device_get_softc(dev);
449 	sc->dev = dev;
450 	sc->msi_count = 0;
451 	ai = cxgb_get_adapter_info(dev);
452 
453 	snprintf(sc->lockbuf, ADAPTER_LOCK_NAME_LEN, "cxgb controller lock %d",
454 	    device_get_unit(dev));
455 	ADAPTER_LOCK_INIT(sc, sc->lockbuf);
456 
457 	snprintf(sc->reglockbuf, ADAPTER_LOCK_NAME_LEN, "SGE reg lock %d",
458 	    device_get_unit(dev));
459 	snprintf(sc->mdiolockbuf, ADAPTER_LOCK_NAME_LEN, "cxgb mdio lock %d",
460 	    device_get_unit(dev));
461 	snprintf(sc->elmerlockbuf, ADAPTER_LOCK_NAME_LEN, "cxgb elmer lock %d",
462 	    device_get_unit(dev));
463 
464 	MTX_INIT(&sc->sge.reg_lock, sc->reglockbuf, NULL, MTX_SPIN);
465 	MTX_INIT(&sc->mdio_lock, sc->mdiolockbuf, NULL, MTX_DEF);
466 	MTX_INIT(&sc->elmer_lock, sc->elmerlockbuf, NULL, MTX_DEF);
467 
468 	mtx_lock(&t3_list_lock);
469 	SLIST_INSERT_HEAD(&t3_list, sc, link);
470 	mtx_unlock(&t3_list_lock);
471 
472 	/* find the PCIe link width and set max read request to 4KB*/
473 	if (pci_find_cap(dev, PCIY_EXPRESS, &reg) == 0) {
474 		uint16_t lnk;
475 
476 		lnk = pci_read_config(dev, reg + PCIER_LINK_STA, 2);
477 		sc->link_width = (lnk & PCIEM_LINK_STA_WIDTH) >> 4;
478 		if (sc->link_width < 8 &&
479 		    (ai->caps & SUPPORTED_10000baseT_Full)) {
480 			device_printf(sc->dev,
481 			    "PCIe x%d Link, expect reduced performance\n",
482 			    sc->link_width);
483 		}
484 
485 		pci_set_max_read_req(dev, 4096);
486 	}
487 
488 	touch_bars(dev);
489 	pci_enable_busmaster(dev);
490 	/*
491 	 * Allocate the registers and make them available to the driver.
492 	 * The registers that we care about for NIC mode are in BAR 0
493 	 */
494 	sc->regs_rid = PCIR_BAR(0);
495 	if ((sc->regs_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
496 	    &sc->regs_rid, RF_ACTIVE)) == NULL) {
497 		device_printf(dev, "Cannot allocate BAR region 0\n");
498 		error = ENXIO;
499 		goto out;
500 	}
501 
502 	sc->bt = rman_get_bustag(sc->regs_res);
503 	sc->bh = rman_get_bushandle(sc->regs_res);
504 	sc->mmio_len = rman_get_size(sc->regs_res);
505 
506 	for (i = 0; i < MAX_NPORTS; i++)
507 		sc->port[i].adapter = sc;
508 
509 	if (t3_prep_adapter(sc, ai, 1) < 0) {
510 		printf("prep adapter failed\n");
511 		error = ENODEV;
512 		goto out;
513 	}
514 
515 	sc->udbs_rid = PCIR_BAR(2);
516 	sc->udbs_res = NULL;
517 	if (is_offload(sc) &&
518 	    ((sc->udbs_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
519 		   &sc->udbs_rid, RF_ACTIVE)) == NULL)) {
520 		device_printf(dev, "Cannot allocate BAR region 1\n");
521 		error = ENXIO;
522 		goto out;
523 	}
524 
525         /* Allocate the BAR for doing MSI-X.  If it succeeds, try to allocate
526 	 * enough messages for the queue sets.  If that fails, try falling
527 	 * back to MSI.  If that fails, then try falling back to the legacy
528 	 * interrupt pin model.
529 	 */
530 	sc->msix_regs_rid = 0x20;
531 	if ((msi_allowed >= 2) &&
532 	    (sc->msix_regs_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
533 	    &sc->msix_regs_rid, RF_ACTIVE)) != NULL) {
534 
535 		if (multiq)
536 			port_qsets = min(SGE_QSETS/sc->params.nports, mp_ncpus);
537 		msi_needed = sc->msi_count = sc->params.nports * port_qsets + 1;
538 
539 		if (pci_msix_count(dev) == 0 ||
540 		    (error = pci_alloc_msix(dev, &sc->msi_count)) != 0 ||
541 		    sc->msi_count != msi_needed) {
542 			device_printf(dev, "alloc msix failed - "
543 				      "msi_count=%d, msi_needed=%d, err=%d; "
544 				      "will try MSI\n", sc->msi_count,
545 				      msi_needed, error);
546 			sc->msi_count = 0;
547 			port_qsets = 1;
548 			pci_release_msi(dev);
549 			bus_release_resource(dev, SYS_RES_MEMORY,
550 			    sc->msix_regs_rid, sc->msix_regs_res);
551 			sc->msix_regs_res = NULL;
552 		} else {
553 			sc->flags |= USING_MSIX;
554 			sc->cxgb_intr = cxgb_async_intr;
555 			device_printf(dev,
556 				      "using MSI-X interrupts (%u vectors)\n",
557 				      sc->msi_count);
558 		}
559 	}
560 
561 	if ((msi_allowed >= 1) && (sc->msi_count == 0)) {
562 		sc->msi_count = 1;
563 		if ((error = pci_alloc_msi(dev, &sc->msi_count)) != 0) {
564 			device_printf(dev, "alloc msi failed - "
565 				      "err=%d; will try INTx\n", error);
566 			sc->msi_count = 0;
567 			port_qsets = 1;
568 			pci_release_msi(dev);
569 		} else {
570 			sc->flags |= USING_MSI;
571 			sc->cxgb_intr = t3_intr_msi;
572 			device_printf(dev, "using MSI interrupts\n");
573 		}
574 	}
575 	if (sc->msi_count == 0) {
576 		device_printf(dev, "using line interrupts\n");
577 		sc->cxgb_intr = t3b_intr;
578 	}
579 
580 	/* Create a private taskqueue thread for handling driver events */
581 	sc->tq = taskqueue_create("cxgb_taskq", M_NOWAIT,
582 	    taskqueue_thread_enqueue, &sc->tq);
583 	if (sc->tq == NULL) {
584 		device_printf(dev, "failed to allocate controller task queue\n");
585 		goto out;
586 	}
587 
588 	taskqueue_start_threads(&sc->tq, 1, PI_NET, "%s taskq",
589 	    device_get_nameunit(dev));
590 	TASK_INIT(&sc->tick_task, 0, cxgb_tick_handler, sc);
591 
592 
593 	/* Create a periodic callout for checking adapter status */
594 	callout_init(&sc->cxgb_tick_ch, 1);
595 
596 	if (t3_check_fw_version(sc) < 0 || force_fw_update) {
597 		/*
598 		 * Warn user that a firmware update will be attempted in init.
599 		 */
600 		device_printf(dev, "firmware needs to be updated to version %d.%d.%d\n",
601 		    FW_VERSION_MAJOR, FW_VERSION_MINOR, FW_VERSION_MICRO);
602 		sc->flags &= ~FW_UPTODATE;
603 	} else {
604 		sc->flags |= FW_UPTODATE;
605 	}
606 
607 	if (t3_check_tpsram_version(sc) < 0) {
608 		/*
609 		 * Warn user that a firmware update will be attempted in init.
610 		 */
611 		device_printf(dev, "SRAM needs to be updated to version %c-%d.%d.%d\n",
612 		    t3rev2char(sc), TP_VERSION_MAJOR, TP_VERSION_MINOR, TP_VERSION_MICRO);
613 		sc->flags &= ~TPS_UPTODATE;
614 	} else {
615 		sc->flags |= TPS_UPTODATE;
616 	}
617 
618 	/*
619 	 * Create a child device for each MAC.  The ethernet attachment
620 	 * will be done in these children.
621 	 */
622 	for (i = 0; i < (sc)->params.nports; i++) {
623 		struct port_info *pi;
624 
625 		if ((child = device_add_child(dev, "cxgb", -1)) == NULL) {
626 			device_printf(dev, "failed to add child port\n");
627 			error = EINVAL;
628 			goto out;
629 		}
630 		pi = &sc->port[i];
631 		pi->adapter = sc;
632 		pi->nqsets = port_qsets;
633 		pi->first_qset = i*port_qsets;
634 		pi->port_id = i;
635 		pi->tx_chan = i >= ai->nports0;
636 		pi->txpkt_intf = pi->tx_chan ? 2 * (i - ai->nports0) + 1 : 2 * i;
637 		sc->rxpkt_map[pi->txpkt_intf] = i;
638 		sc->port[i].tx_chan = i >= ai->nports0;
639 		sc->portdev[i] = child;
640 		device_set_softc(child, pi);
641 	}
642 	if ((error = bus_generic_attach(dev)) != 0)
643 		goto out;
644 
645 	/* initialize sge private state */
646 	t3_sge_init_adapter(sc);
647 
648 	t3_led_ready(sc);
649 
650 	error = t3_get_fw_version(sc, &vers);
651 	if (error)
652 		goto out;
653 
654 	snprintf(&sc->fw_version[0], sizeof(sc->fw_version), "%d.%d.%d",
655 	    G_FW_VERSION_MAJOR(vers), G_FW_VERSION_MINOR(vers),
656 	    G_FW_VERSION_MICRO(vers));
657 
658 	snprintf(buf, sizeof(buf), "%s %sNIC\t E/C: %s S/N: %s",
659 		 ai->desc, is_offload(sc) ? "R" : "",
660 		 sc->params.vpd.ec, sc->params.vpd.sn);
661 	device_set_desc_copy(dev, buf);
662 
663 	snprintf(&sc->port_types[0], sizeof(sc->port_types), "%x%x%x%x",
664 		 sc->params.vpd.port_type[0], sc->params.vpd.port_type[1],
665 		 sc->params.vpd.port_type[2], sc->params.vpd.port_type[3]);
666 
667 	device_printf(sc->dev, "Firmware Version %s\n", &sc->fw_version[0]);
668 	callout_reset(&sc->cxgb_tick_ch, hz, cxgb_tick, sc);
669 	t3_add_attach_sysctls(sc);
670 
671 #ifdef TCP_OFFLOAD
672 	for (i = 0; i < NUM_CPL_HANDLERS; i++)
673 		sc->cpl_handler[i] = cpl_not_handled;
674 #endif
675 
676 	t3_intr_clear(sc);
677 	error = cxgb_setup_interrupts(sc);
678 out:
679 	if (error)
680 		cxgb_free(sc);
681 
682 	return (error);
683 }
684 
685 /*
686  * The cxgb_controller_detach routine is called with the device is
687  * unloaded from the system.
688  */
689 
690 static int
691 cxgb_controller_detach(device_t dev)
692 {
693 	struct adapter *sc;
694 
695 	sc = device_get_softc(dev);
696 
697 	cxgb_free(sc);
698 
699 	return (0);
700 }
701 
702 /*
703  * The cxgb_free() is called by the cxgb_controller_detach() routine
704  * to tear down the structures that were built up in
705  * cxgb_controller_attach(), and should be the final piece of work
706  * done when fully unloading the driver.
707  *
708  *
709  *  1. Shutting down the threads started by the cxgb_controller_attach()
710  *     routine.
711  *  2. Stopping the lower level device and all callouts (cxgb_down_locked()).
712  *  3. Detaching all of the port devices created during the
713  *     cxgb_controller_attach() routine.
714  *  4. Removing the device children created via cxgb_controller_attach().
715  *  5. Releasing PCI resources associated with the device.
716  *  6. Turning off the offload support, iff it was turned on.
717  *  7. Destroying the mutexes created in cxgb_controller_attach().
718  *
719  */
720 static void
721 cxgb_free(struct adapter *sc)
722 {
723 	int i, nqsets = 0;
724 
725 	ADAPTER_LOCK(sc);
726 	sc->flags |= CXGB_SHUTDOWN;
727 	ADAPTER_UNLOCK(sc);
728 
729 	/*
730 	 * Make sure all child devices are gone.
731 	 */
732 	bus_generic_detach(sc->dev);
733 	for (i = 0; i < (sc)->params.nports; i++) {
734 		if (sc->portdev[i] &&
735 		    device_delete_child(sc->dev, sc->portdev[i]) != 0)
736 			device_printf(sc->dev, "failed to delete child port\n");
737 		nqsets += sc->port[i].nqsets;
738 	}
739 
740 	/*
741 	 * At this point, it is as if cxgb_port_detach has run on all ports, and
742 	 * cxgb_down has run on the adapter.  All interrupts have been silenced,
743 	 * all open devices have been closed.
744 	 */
745 	KASSERT(sc->open_device_map == 0, ("%s: device(s) still open (%x)",
746 					   __func__, sc->open_device_map));
747 	for (i = 0; i < sc->params.nports; i++) {
748 		KASSERT(sc->port[i].ifp == NULL, ("%s: port %i undead!",
749 						  __func__, i));
750 	}
751 
752 	/*
753 	 * Finish off the adapter's callouts.
754 	 */
755 	callout_drain(&sc->cxgb_tick_ch);
756 	callout_drain(&sc->sge_timer_ch);
757 
758 	/*
759 	 * Release resources grabbed under FULL_INIT_DONE by cxgb_up.  The
760 	 * sysctls are cleaned up by the kernel linker.
761 	 */
762 	if (sc->flags & FULL_INIT_DONE) {
763  		t3_free_sge_resources(sc, nqsets);
764  		sc->flags &= ~FULL_INIT_DONE;
765  	}
766 
767 	/*
768 	 * Release all interrupt resources.
769 	 */
770 	cxgb_teardown_interrupts(sc);
771 	if (sc->flags & (USING_MSI | USING_MSIX)) {
772 		device_printf(sc->dev, "releasing msi message(s)\n");
773 		pci_release_msi(sc->dev);
774 	} else {
775 		device_printf(sc->dev, "no msi message to release\n");
776 	}
777 
778 	if (sc->msix_regs_res != NULL) {
779 		bus_release_resource(sc->dev, SYS_RES_MEMORY, sc->msix_regs_rid,
780 		    sc->msix_regs_res);
781 	}
782 
783 	/*
784 	 * Free the adapter's taskqueue.
785 	 */
786 	if (sc->tq != NULL) {
787 		taskqueue_free(sc->tq);
788 		sc->tq = NULL;
789 	}
790 
791 	free(sc->filters, M_DEVBUF);
792 	t3_sge_free(sc);
793 
794 	if (sc->udbs_res != NULL)
795 		bus_release_resource(sc->dev, SYS_RES_MEMORY, sc->udbs_rid,
796 		    sc->udbs_res);
797 
798 	if (sc->regs_res != NULL)
799 		bus_release_resource(sc->dev, SYS_RES_MEMORY, sc->regs_rid,
800 		    sc->regs_res);
801 
802 	MTX_DESTROY(&sc->mdio_lock);
803 	MTX_DESTROY(&sc->sge.reg_lock);
804 	MTX_DESTROY(&sc->elmer_lock);
805 	mtx_lock(&t3_list_lock);
806 	SLIST_REMOVE(&t3_list, sc, adapter, link);
807 	mtx_unlock(&t3_list_lock);
808 	ADAPTER_LOCK_DEINIT(sc);
809 }
810 
811 /**
812  *	setup_sge_qsets - configure SGE Tx/Rx/response queues
813  *	@sc: the controller softc
814  *
815  *	Determines how many sets of SGE queues to use and initializes them.
816  *	We support multiple queue sets per port if we have MSI-X, otherwise
817  *	just one queue set per port.
818  */
819 static int
820 setup_sge_qsets(adapter_t *sc)
821 {
822 	int i, j, err, irq_idx = 0, qset_idx = 0;
823 	u_int ntxq = SGE_TXQ_PER_SET;
824 
825 	if ((err = t3_sge_alloc(sc)) != 0) {
826 		device_printf(sc->dev, "t3_sge_alloc returned %d\n", err);
827 		return (err);
828 	}
829 
830 	if (sc->params.rev > 0 && !(sc->flags & USING_MSI))
831 		irq_idx = -1;
832 
833 	for (i = 0; i < (sc)->params.nports; i++) {
834 		struct port_info *pi = &sc->port[i];
835 
836 		for (j = 0; j < pi->nqsets; j++, qset_idx++) {
837 			err = t3_sge_alloc_qset(sc, qset_idx, (sc)->params.nports,
838 			    (sc->flags & USING_MSIX) ? qset_idx + 1 : irq_idx,
839 			    &sc->params.sge.qset[qset_idx], ntxq, pi);
840 			if (err) {
841 				t3_free_sge_resources(sc, qset_idx);
842 				device_printf(sc->dev,
843 				    "t3_sge_alloc_qset failed with %d\n", err);
844 				return (err);
845 			}
846 		}
847 	}
848 
849 	return (0);
850 }
851 
852 static void
853 cxgb_teardown_interrupts(adapter_t *sc)
854 {
855 	int i;
856 
857 	for (i = 0; i < SGE_QSETS; i++) {
858 		if (sc->msix_intr_tag[i] == NULL) {
859 
860 			/* Should have been setup fully or not at all */
861 			KASSERT(sc->msix_irq_res[i] == NULL &&
862 				sc->msix_irq_rid[i] == 0,
863 				("%s: half-done interrupt (%d).", __func__, i));
864 
865 			continue;
866 		}
867 
868 		bus_teardown_intr(sc->dev, sc->msix_irq_res[i],
869 				  sc->msix_intr_tag[i]);
870 		bus_release_resource(sc->dev, SYS_RES_IRQ, sc->msix_irq_rid[i],
871 				     sc->msix_irq_res[i]);
872 
873 		sc->msix_irq_res[i] = sc->msix_intr_tag[i] = NULL;
874 		sc->msix_irq_rid[i] = 0;
875 	}
876 
877 	if (sc->intr_tag) {
878 		KASSERT(sc->irq_res != NULL,
879 			("%s: half-done interrupt.", __func__));
880 
881 		bus_teardown_intr(sc->dev, sc->irq_res, sc->intr_tag);
882 		bus_release_resource(sc->dev, SYS_RES_IRQ, sc->irq_rid,
883 				     sc->irq_res);
884 
885 		sc->irq_res = sc->intr_tag = NULL;
886 		sc->irq_rid = 0;
887 	}
888 }
889 
890 static int
891 cxgb_setup_interrupts(adapter_t *sc)
892 {
893 	struct resource *res;
894 	void *tag;
895 	int i, rid, err, intr_flag = sc->flags & (USING_MSI | USING_MSIX);
896 
897 	sc->irq_rid = intr_flag ? 1 : 0;
898 	sc->irq_res = bus_alloc_resource_any(sc->dev, SYS_RES_IRQ, &sc->irq_rid,
899 					     RF_SHAREABLE | RF_ACTIVE);
900 	if (sc->irq_res == NULL) {
901 		device_printf(sc->dev, "Cannot allocate interrupt (%x, %u)\n",
902 			      intr_flag, sc->irq_rid);
903 		err = EINVAL;
904 		sc->irq_rid = 0;
905 	} else {
906 		err = bus_setup_intr(sc->dev, sc->irq_res,
907 		    INTR_MPSAFE | INTR_TYPE_NET, NULL,
908 		    sc->cxgb_intr, sc, &sc->intr_tag);
909 
910 		if (err) {
911 			device_printf(sc->dev,
912 				      "Cannot set up interrupt (%x, %u, %d)\n",
913 				      intr_flag, sc->irq_rid, err);
914 			bus_release_resource(sc->dev, SYS_RES_IRQ, sc->irq_rid,
915 					     sc->irq_res);
916 			sc->irq_res = sc->intr_tag = NULL;
917 			sc->irq_rid = 0;
918 		}
919 	}
920 
921 	/* That's all for INTx or MSI */
922 	if (!(intr_flag & USING_MSIX) || err)
923 		return (err);
924 
925 	bus_describe_intr(sc->dev, sc->irq_res, sc->intr_tag, "err");
926 	for (i = 0; i < sc->msi_count - 1; i++) {
927 		rid = i + 2;
928 		res = bus_alloc_resource_any(sc->dev, SYS_RES_IRQ, &rid,
929 					     RF_SHAREABLE | RF_ACTIVE);
930 		if (res == NULL) {
931 			device_printf(sc->dev, "Cannot allocate interrupt "
932 				      "for message %d\n", rid);
933 			err = EINVAL;
934 			break;
935 		}
936 
937 		err = bus_setup_intr(sc->dev, res, INTR_MPSAFE | INTR_TYPE_NET,
938 				     NULL, t3_intr_msix, &sc->sge.qs[i], &tag);
939 		if (err) {
940 			device_printf(sc->dev, "Cannot set up interrupt "
941 				      "for message %d (%d)\n", rid, err);
942 			bus_release_resource(sc->dev, SYS_RES_IRQ, rid, res);
943 			break;
944 		}
945 
946 		sc->msix_irq_rid[i] = rid;
947 		sc->msix_irq_res[i] = res;
948 		sc->msix_intr_tag[i] = tag;
949 		bus_describe_intr(sc->dev, res, tag, "qs%d", i);
950 	}
951 
952 	if (err)
953 		cxgb_teardown_interrupts(sc);
954 
955 	return (err);
956 }
957 
958 
959 static int
960 cxgb_port_probe(device_t dev)
961 {
962 	struct port_info *p;
963 	char buf[80];
964 	const char *desc;
965 
966 	p = device_get_softc(dev);
967 	desc = p->phy.desc;
968 	snprintf(buf, sizeof(buf), "Port %d %s", p->port_id, desc);
969 	device_set_desc_copy(dev, buf);
970 	return (0);
971 }
972 
973 
974 static int
975 cxgb_makedev(struct port_info *pi)
976 {
977 
978 	pi->port_cdev = make_dev(&cxgb_cdevsw, pi->ifp->if_dunit,
979 	    UID_ROOT, GID_WHEEL, 0600, "%s", if_name(pi->ifp));
980 
981 	if (pi->port_cdev == NULL)
982 		return (ENOMEM);
983 
984 	pi->port_cdev->si_drv1 = (void *)pi;
985 
986 	return (0);
987 }
988 
989 #define CXGB_CAP (IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_MTU | IFCAP_HWCSUM | \
990     IFCAP_VLAN_HWCSUM | IFCAP_TSO | IFCAP_JUMBO_MTU | IFCAP_LRO | \
991     IFCAP_VLAN_HWTSO | IFCAP_LINKSTATE | IFCAP_HWCSUM_IPV6)
992 #define CXGB_CAP_ENABLE CXGB_CAP
993 
994 static int
995 cxgb_port_attach(device_t dev)
996 {
997 	struct port_info *p;
998 	struct ifnet *ifp;
999 	int err;
1000 	struct adapter *sc;
1001 
1002 	p = device_get_softc(dev);
1003 	sc = p->adapter;
1004 	snprintf(p->lockbuf, PORT_NAME_LEN, "cxgb port lock %d:%d",
1005 	    device_get_unit(device_get_parent(dev)), p->port_id);
1006 	PORT_LOCK_INIT(p, p->lockbuf);
1007 
1008 	callout_init(&p->link_check_ch, 1);
1009 	TASK_INIT(&p->link_check_task, 0, check_link_status, p);
1010 
1011 	/* Allocate an ifnet object and set it up */
1012 	ifp = p->ifp = if_alloc(IFT_ETHER);
1013 	if (ifp == NULL) {
1014 		device_printf(dev, "Cannot allocate ifnet\n");
1015 		return (ENOMEM);
1016 	}
1017 
1018 	if_initname(ifp, device_get_name(dev), device_get_unit(dev));
1019 	ifp->if_init = cxgb_init;
1020 	ifp->if_softc = p;
1021 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
1022 	ifp->if_ioctl = cxgb_ioctl;
1023 	ifp->if_transmit = cxgb_transmit;
1024 	ifp->if_qflush = cxgb_qflush;
1025 	ifp->if_get_counter = cxgb_get_counter;
1026 
1027 	ifp->if_capabilities = CXGB_CAP;
1028 #ifdef TCP_OFFLOAD
1029 	if (is_offload(sc))
1030 		ifp->if_capabilities |= IFCAP_TOE4;
1031 #endif
1032 	ifp->if_capenable = CXGB_CAP_ENABLE;
1033 	ifp->if_hwassist = CSUM_TCP | CSUM_UDP | CSUM_IP | CSUM_TSO |
1034 	    CSUM_UDP_IPV6 | CSUM_TCP_IPV6;
1035 
1036 	/*
1037 	 * Disable TSO on 4-port - it isn't supported by the firmware.
1038 	 */
1039 	if (sc->params.nports > 2) {
1040 		ifp->if_capabilities &= ~(IFCAP_TSO | IFCAP_VLAN_HWTSO);
1041 		ifp->if_capenable &= ~(IFCAP_TSO | IFCAP_VLAN_HWTSO);
1042 		ifp->if_hwassist &= ~CSUM_TSO;
1043 	}
1044 
1045 	ether_ifattach(ifp, p->hw_addr);
1046 
1047 #ifdef DEFAULT_JUMBO
1048 	if (sc->params.nports <= 2)
1049 		ifp->if_mtu = ETHERMTU_JUMBO;
1050 #endif
1051 	if ((err = cxgb_makedev(p)) != 0) {
1052 		printf("makedev failed %d\n", err);
1053 		return (err);
1054 	}
1055 
1056 	/* Create a list of media supported by this port */
1057 	ifmedia_init(&p->media, IFM_IMASK, cxgb_media_change,
1058 	    cxgb_media_status);
1059 	cxgb_build_medialist(p);
1060 
1061 	t3_sge_init_port(p);
1062 
1063 	return (err);
1064 }
1065 
1066 /*
1067  * cxgb_port_detach() is called via the device_detach methods when
1068  * cxgb_free() calls the bus_generic_detach.  It is responsible for
1069  * removing the device from the view of the kernel, i.e. from all
1070  * interfaces lists etc.  This routine is only called when the driver is
1071  * being unloaded, not when the link goes down.
1072  */
1073 static int
1074 cxgb_port_detach(device_t dev)
1075 {
1076 	struct port_info *p;
1077 	struct adapter *sc;
1078 	int i;
1079 
1080 	p = device_get_softc(dev);
1081 	sc = p->adapter;
1082 
1083 	/* Tell cxgb_ioctl and if_init that the port is going away */
1084 	ADAPTER_LOCK(sc);
1085 	SET_DOOMED(p);
1086 	wakeup(&sc->flags);
1087 	while (IS_BUSY(sc))
1088 		mtx_sleep(&sc->flags, &sc->lock, 0, "cxgbdtch", 0);
1089 	SET_BUSY(sc);
1090 	ADAPTER_UNLOCK(sc);
1091 
1092 	if (p->port_cdev != NULL)
1093 		destroy_dev(p->port_cdev);
1094 
1095 	cxgb_uninit_synchronized(p);
1096 	ether_ifdetach(p->ifp);
1097 
1098 	for (i = p->first_qset; i < p->first_qset + p->nqsets; i++) {
1099 		struct sge_qset *qs = &sc->sge.qs[i];
1100 		struct sge_txq *txq = &qs->txq[TXQ_ETH];
1101 
1102 		callout_drain(&txq->txq_watchdog);
1103 		callout_drain(&txq->txq_timer);
1104 	}
1105 
1106 	PORT_LOCK_DEINIT(p);
1107 	if_free(p->ifp);
1108 	p->ifp = NULL;
1109 
1110 	ADAPTER_LOCK(sc);
1111 	CLR_BUSY(sc);
1112 	wakeup_one(&sc->flags);
1113 	ADAPTER_UNLOCK(sc);
1114 	return (0);
1115 }
1116 
1117 void
1118 t3_fatal_err(struct adapter *sc)
1119 {
1120 	u_int fw_status[4];
1121 
1122 	if (sc->flags & FULL_INIT_DONE) {
1123 		t3_sge_stop(sc);
1124 		t3_write_reg(sc, A_XGM_TX_CTRL, 0);
1125 		t3_write_reg(sc, A_XGM_RX_CTRL, 0);
1126 		t3_write_reg(sc, XGM_REG(A_XGM_TX_CTRL, 1), 0);
1127 		t3_write_reg(sc, XGM_REG(A_XGM_RX_CTRL, 1), 0);
1128 		t3_intr_disable(sc);
1129 	}
1130 	device_printf(sc->dev,"encountered fatal error, operation suspended\n");
1131 	if (!t3_cim_ctl_blk_read(sc, 0xa0, 4, fw_status))
1132 		device_printf(sc->dev, "FW_ status: 0x%x, 0x%x, 0x%x, 0x%x\n",
1133 		    fw_status[0], fw_status[1], fw_status[2], fw_status[3]);
1134 }
1135 
1136 int
1137 t3_os_find_pci_capability(adapter_t *sc, int cap)
1138 {
1139 	device_t dev;
1140 	struct pci_devinfo *dinfo;
1141 	pcicfgregs *cfg;
1142 	uint32_t status;
1143 	uint8_t ptr;
1144 
1145 	dev = sc->dev;
1146 	dinfo = device_get_ivars(dev);
1147 	cfg = &dinfo->cfg;
1148 
1149 	status = pci_read_config(dev, PCIR_STATUS, 2);
1150 	if (!(status & PCIM_STATUS_CAPPRESENT))
1151 		return (0);
1152 
1153 	switch (cfg->hdrtype & PCIM_HDRTYPE) {
1154 	case 0:
1155 	case 1:
1156 		ptr = PCIR_CAP_PTR;
1157 		break;
1158 	case 2:
1159 		ptr = PCIR_CAP_PTR_2;
1160 		break;
1161 	default:
1162 		return (0);
1163 		break;
1164 	}
1165 	ptr = pci_read_config(dev, ptr, 1);
1166 
1167 	while (ptr != 0) {
1168 		if (pci_read_config(dev, ptr + PCICAP_ID, 1) == cap)
1169 			return (ptr);
1170 		ptr = pci_read_config(dev, ptr + PCICAP_NEXTPTR, 1);
1171 	}
1172 
1173 	return (0);
1174 }
1175 
1176 int
1177 t3_os_pci_save_state(struct adapter *sc)
1178 {
1179 	device_t dev;
1180 	struct pci_devinfo *dinfo;
1181 
1182 	dev = sc->dev;
1183 	dinfo = device_get_ivars(dev);
1184 
1185 	pci_cfg_save(dev, dinfo, 0);
1186 	return (0);
1187 }
1188 
1189 int
1190 t3_os_pci_restore_state(struct adapter *sc)
1191 {
1192 	device_t dev;
1193 	struct pci_devinfo *dinfo;
1194 
1195 	dev = sc->dev;
1196 	dinfo = device_get_ivars(dev);
1197 
1198 	pci_cfg_restore(dev, dinfo);
1199 	return (0);
1200 }
1201 
1202 /**
1203  *	t3_os_link_changed - handle link status changes
1204  *	@sc: the adapter associated with the link change
1205  *	@port_id: the port index whose link status has changed
1206  *	@link_status: the new status of the link
1207  *	@speed: the new speed setting
1208  *	@duplex: the new duplex setting
1209  *	@fc: the new flow-control setting
1210  *
1211  *	This is the OS-dependent handler for link status changes.  The OS
1212  *	neutral handler takes care of most of the processing for these events,
1213  *	then calls this handler for any OS-specific processing.
1214  */
1215 void
1216 t3_os_link_changed(adapter_t *adapter, int port_id, int link_status, int speed,
1217      int duplex, int fc, int mac_was_reset)
1218 {
1219 	struct port_info *pi = &adapter->port[port_id];
1220 	struct ifnet *ifp = pi->ifp;
1221 
1222 	/* no race with detach, so ifp should always be good */
1223 	KASSERT(ifp, ("%s: if detached.", __func__));
1224 
1225 	/* Reapply mac settings if they were lost due to a reset */
1226 	if (mac_was_reset) {
1227 		PORT_LOCK(pi);
1228 		cxgb_update_mac_settings(pi);
1229 		PORT_UNLOCK(pi);
1230 	}
1231 
1232 	if (link_status) {
1233 		ifp->if_baudrate = IF_Mbps(speed);
1234 		if_link_state_change(ifp, LINK_STATE_UP);
1235 	} else
1236 		if_link_state_change(ifp, LINK_STATE_DOWN);
1237 }
1238 
1239 /**
1240  *	t3_os_phymod_changed - handle PHY module changes
1241  *	@phy: the PHY reporting the module change
1242  *	@mod_type: new module type
1243  *
1244  *	This is the OS-dependent handler for PHY module changes.  It is
1245  *	invoked when a PHY module is removed or inserted for any OS-specific
1246  *	processing.
1247  */
1248 void t3_os_phymod_changed(struct adapter *adap, int port_id)
1249 {
1250 	static const char *mod_str[] = {
1251 		NULL, "SR", "LR", "LRM", "TWINAX", "TWINAX-L", "unknown"
1252 	};
1253 	struct port_info *pi = &adap->port[port_id];
1254 	int mod = pi->phy.modtype;
1255 
1256 	if (mod != pi->media.ifm_cur->ifm_data)
1257 		cxgb_build_medialist(pi);
1258 
1259 	if (mod == phy_modtype_none)
1260 		if_printf(pi->ifp, "PHY module unplugged\n");
1261 	else {
1262 		KASSERT(mod < ARRAY_SIZE(mod_str),
1263 			("invalid PHY module type %d", mod));
1264 		if_printf(pi->ifp, "%s PHY module inserted\n", mod_str[mod]);
1265 	}
1266 }
1267 
1268 void
1269 t3_os_set_hw_addr(adapter_t *adapter, int port_idx, u8 hw_addr[])
1270 {
1271 
1272 	/*
1273 	 * The ifnet might not be allocated before this gets called,
1274 	 * as this is called early on in attach by t3_prep_adapter
1275 	 * save the address off in the port structure
1276 	 */
1277 	if (cxgb_debug)
1278 		printf("set_hw_addr on idx %d addr %6D\n", port_idx, hw_addr, ":");
1279 	bcopy(hw_addr, adapter->port[port_idx].hw_addr, ETHER_ADDR_LEN);
1280 }
1281 
1282 /*
1283  * Programs the XGMAC based on the settings in the ifnet.  These settings
1284  * include MTU, MAC address, mcast addresses, etc.
1285  */
1286 static void
1287 cxgb_update_mac_settings(struct port_info *p)
1288 {
1289 	struct ifnet *ifp = p->ifp;
1290 	struct t3_rx_mode rm;
1291 	struct cmac *mac = &p->mac;
1292 	int mtu, hwtagging;
1293 
1294 	PORT_LOCK_ASSERT_OWNED(p);
1295 
1296 	bcopy(IF_LLADDR(ifp), p->hw_addr, ETHER_ADDR_LEN);
1297 
1298 	mtu = ifp->if_mtu;
1299 	if (ifp->if_capenable & IFCAP_VLAN_MTU)
1300 		mtu += ETHER_VLAN_ENCAP_LEN;
1301 
1302 	hwtagging = (ifp->if_capenable & IFCAP_VLAN_HWTAGGING) != 0;
1303 
1304 	t3_mac_set_mtu(mac, mtu);
1305 	t3_set_vlan_accel(p->adapter, 1 << p->tx_chan, hwtagging);
1306 	t3_mac_set_address(mac, 0, p->hw_addr);
1307 	t3_init_rx_mode(&rm, p);
1308 	t3_mac_set_rx_mode(mac, &rm);
1309 }
1310 
1311 
1312 static int
1313 await_mgmt_replies(struct adapter *adap, unsigned long init_cnt,
1314 			      unsigned long n)
1315 {
1316 	int attempts = 5;
1317 
1318 	while (adap->sge.qs[0].rspq.offload_pkts < init_cnt + n) {
1319 		if (!--attempts)
1320 			return (ETIMEDOUT);
1321 		t3_os_sleep(10);
1322 	}
1323 	return 0;
1324 }
1325 
1326 static int
1327 init_tp_parity(struct adapter *adap)
1328 {
1329 	int i;
1330 	struct mbuf *m;
1331 	struct cpl_set_tcb_field *greq;
1332 	unsigned long cnt = adap->sge.qs[0].rspq.offload_pkts;
1333 
1334 	t3_tp_set_offload_mode(adap, 1);
1335 
1336 	for (i = 0; i < 16; i++) {
1337 		struct cpl_smt_write_req *req;
1338 
1339 		m = m_gethdr(M_WAITOK, MT_DATA);
1340 		req = mtod(m, struct cpl_smt_write_req *);
1341 		m->m_len = m->m_pkthdr.len = sizeof(*req);
1342 		memset(req, 0, sizeof(*req));
1343 		req->wr.wrh_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
1344 		OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SMT_WRITE_REQ, i));
1345 		req->iff = i;
1346 		t3_mgmt_tx(adap, m);
1347 	}
1348 
1349 	for (i = 0; i < 2048; i++) {
1350 		struct cpl_l2t_write_req *req;
1351 
1352 		m = m_gethdr(M_WAITOK, MT_DATA);
1353 		req = mtod(m, struct cpl_l2t_write_req *);
1354 		m->m_len = m->m_pkthdr.len = sizeof(*req);
1355 		memset(req, 0, sizeof(*req));
1356 		req->wr.wrh_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
1357 		OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_L2T_WRITE_REQ, i));
1358 		req->params = htonl(V_L2T_W_IDX(i));
1359 		t3_mgmt_tx(adap, m);
1360 	}
1361 
1362 	for (i = 0; i < 2048; i++) {
1363 		struct cpl_rte_write_req *req;
1364 
1365 		m = m_gethdr(M_WAITOK, MT_DATA);
1366 		req = mtod(m, struct cpl_rte_write_req *);
1367 		m->m_len = m->m_pkthdr.len = sizeof(*req);
1368 		memset(req, 0, sizeof(*req));
1369 		req->wr.wrh_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
1370 		OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_RTE_WRITE_REQ, i));
1371 		req->l2t_idx = htonl(V_L2T_W_IDX(i));
1372 		t3_mgmt_tx(adap, m);
1373 	}
1374 
1375 	m = m_gethdr(M_WAITOK, MT_DATA);
1376 	greq = mtod(m, struct cpl_set_tcb_field *);
1377 	m->m_len = m->m_pkthdr.len = sizeof(*greq);
1378 	memset(greq, 0, sizeof(*greq));
1379 	greq->wr.wrh_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
1380 	OPCODE_TID(greq) = htonl(MK_OPCODE_TID(CPL_SET_TCB_FIELD, 0));
1381 	greq->mask = htobe64(1);
1382 	t3_mgmt_tx(adap, m);
1383 
1384 	i = await_mgmt_replies(adap, cnt, 16 + 2048 + 2048 + 1);
1385 	t3_tp_set_offload_mode(adap, 0);
1386 	return (i);
1387 }
1388 
1389 /**
1390  *	setup_rss - configure Receive Side Steering (per-queue connection demux)
1391  *	@adap: the adapter
1392  *
1393  *	Sets up RSS to distribute packets to multiple receive queues.  We
1394  *	configure the RSS CPU lookup table to distribute to the number of HW
1395  *	receive queues, and the response queue lookup table to narrow that
1396  *	down to the response queues actually configured for each port.
1397  *	We always configure the RSS mapping for two ports since the mapping
1398  *	table has plenty of entries.
1399  */
1400 static void
1401 setup_rss(adapter_t *adap)
1402 {
1403 	int i;
1404 	u_int nq[2];
1405 	uint8_t cpus[SGE_QSETS + 1];
1406 	uint16_t rspq_map[RSS_TABLE_SIZE];
1407 
1408 	for (i = 0; i < SGE_QSETS; ++i)
1409 		cpus[i] = i;
1410 	cpus[SGE_QSETS] = 0xff;
1411 
1412 	nq[0] = nq[1] = 0;
1413 	for_each_port(adap, i) {
1414 		const struct port_info *pi = adap2pinfo(adap, i);
1415 
1416 		nq[pi->tx_chan] += pi->nqsets;
1417 	}
1418 	for (i = 0; i < RSS_TABLE_SIZE / 2; ++i) {
1419 		rspq_map[i] = nq[0] ? i % nq[0] : 0;
1420 		rspq_map[i + RSS_TABLE_SIZE / 2] = nq[1] ? i % nq[1] + nq[0] : 0;
1421 	}
1422 
1423 	/* Calculate the reverse RSS map table */
1424 	for (i = 0; i < SGE_QSETS; ++i)
1425 		adap->rrss_map[i] = 0xff;
1426 	for (i = 0; i < RSS_TABLE_SIZE; ++i)
1427 		if (adap->rrss_map[rspq_map[i]] == 0xff)
1428 			adap->rrss_map[rspq_map[i]] = i;
1429 
1430 	t3_config_rss(adap, F_RQFEEDBACKENABLE | F_TNLLKPEN | F_TNLMAPEN |
1431 		      F_TNLPRTEN | F_TNL2TUPEN | F_TNL4TUPEN | F_OFDMAPEN |
1432 	              F_RRCPLMAPEN | V_RRCPLCPUSIZE(6) | F_HASHTOEPLITZ,
1433 	              cpus, rspq_map);
1434 
1435 }
1436 static void
1437 send_pktsched_cmd(struct adapter *adap, int sched, int qidx, int lo,
1438 			      int hi, int port)
1439 {
1440 	struct mbuf *m;
1441 	struct mngt_pktsched_wr *req;
1442 
1443 	m = m_gethdr(M_NOWAIT, MT_DATA);
1444 	if (m) {
1445 		req = mtod(m, struct mngt_pktsched_wr *);
1446 		req->wr.wrh_hi = htonl(V_WR_OP(FW_WROPCODE_MNGT));
1447 		req->mngt_opcode = FW_MNGTOPCODE_PKTSCHED_SET;
1448 		req->sched = sched;
1449 		req->idx = qidx;
1450 		req->min = lo;
1451 		req->max = hi;
1452 		req->binding = port;
1453 		m->m_len = m->m_pkthdr.len = sizeof(*req);
1454 		t3_mgmt_tx(adap, m);
1455 	}
1456 }
1457 
1458 static void
1459 bind_qsets(adapter_t *sc)
1460 {
1461 	int i, j;
1462 
1463 	for (i = 0; i < (sc)->params.nports; ++i) {
1464 		const struct port_info *pi = adap2pinfo(sc, i);
1465 
1466 		for (j = 0; j < pi->nqsets; ++j) {
1467 			send_pktsched_cmd(sc, 1, pi->first_qset + j, -1,
1468 					  -1, pi->tx_chan);
1469 
1470 		}
1471 	}
1472 }
1473 
1474 static void
1475 update_tpeeprom(struct adapter *adap)
1476 {
1477 	const struct firmware *tpeeprom;
1478 
1479 	uint32_t version;
1480 	unsigned int major, minor;
1481 	int ret, len;
1482 	char rev, name[32];
1483 
1484 	t3_seeprom_read(adap, TP_SRAM_OFFSET, &version);
1485 
1486 	major = G_TP_VERSION_MAJOR(version);
1487 	minor = G_TP_VERSION_MINOR(version);
1488 	if (major == TP_VERSION_MAJOR  && minor == TP_VERSION_MINOR)
1489 		return;
1490 
1491 	rev = t3rev2char(adap);
1492 	snprintf(name, sizeof(name), TPEEPROM_NAME, rev);
1493 
1494 	tpeeprom = firmware_get(name);
1495 	if (tpeeprom == NULL) {
1496 		device_printf(adap->dev,
1497 			      "could not load TP EEPROM: unable to load %s\n",
1498 			      name);
1499 		return;
1500 	}
1501 
1502 	len = tpeeprom->datasize - 4;
1503 
1504 	ret = t3_check_tpsram(adap, tpeeprom->data, tpeeprom->datasize);
1505 	if (ret)
1506 		goto release_tpeeprom;
1507 
1508 	if (len != TP_SRAM_LEN) {
1509 		device_printf(adap->dev,
1510 			      "%s length is wrong len=%d expected=%d\n", name,
1511 			      len, TP_SRAM_LEN);
1512 		return;
1513 	}
1514 
1515 	ret = set_eeprom(&adap->port[0], tpeeprom->data, tpeeprom->datasize,
1516 	    TP_SRAM_OFFSET);
1517 
1518 	if (!ret) {
1519 		device_printf(adap->dev,
1520 			"Protocol SRAM image updated in EEPROM to %d.%d.%d\n",
1521 			 TP_VERSION_MAJOR, TP_VERSION_MINOR, TP_VERSION_MICRO);
1522 	} else
1523 		device_printf(adap->dev,
1524 			      "Protocol SRAM image update in EEPROM failed\n");
1525 
1526 release_tpeeprom:
1527 	firmware_put(tpeeprom, FIRMWARE_UNLOAD);
1528 
1529 	return;
1530 }
1531 
1532 static int
1533 update_tpsram(struct adapter *adap)
1534 {
1535 	const struct firmware *tpsram;
1536 	int ret;
1537 	char rev, name[32];
1538 
1539 	rev = t3rev2char(adap);
1540 	snprintf(name, sizeof(name), TPSRAM_NAME, rev);
1541 
1542 	update_tpeeprom(adap);
1543 
1544 	tpsram = firmware_get(name);
1545 	if (tpsram == NULL){
1546 		device_printf(adap->dev, "could not load TP SRAM\n");
1547 		return (EINVAL);
1548 	} else
1549 		device_printf(adap->dev, "updating TP SRAM\n");
1550 
1551 	ret = t3_check_tpsram(adap, tpsram->data, tpsram->datasize);
1552 	if (ret)
1553 		goto release_tpsram;
1554 
1555 	ret = t3_set_proto_sram(adap, tpsram->data);
1556 	if (ret)
1557 		device_printf(adap->dev, "loading protocol SRAM failed\n");
1558 
1559 release_tpsram:
1560 	firmware_put(tpsram, FIRMWARE_UNLOAD);
1561 
1562 	return ret;
1563 }
1564 
1565 /**
1566  *	cxgb_up - enable the adapter
1567  *	@adap: adapter being enabled
1568  *
1569  *	Called when the first port is enabled, this function performs the
1570  *	actions necessary to make an adapter operational, such as completing
1571  *	the initialization of HW modules, and enabling interrupts.
1572  */
1573 static int
1574 cxgb_up(struct adapter *sc)
1575 {
1576 	int err = 0;
1577 	unsigned int mxf = t3_mc5_size(&sc->mc5) - MC5_MIN_TIDS;
1578 
1579 	KASSERT(sc->open_device_map == 0, ("%s: device(s) already open (%x)",
1580 					   __func__, sc->open_device_map));
1581 
1582 	if ((sc->flags & FULL_INIT_DONE) == 0) {
1583 
1584 		ADAPTER_LOCK_ASSERT_NOTOWNED(sc);
1585 
1586 		if ((sc->flags & FW_UPTODATE) == 0)
1587 			if ((err = upgrade_fw(sc)))
1588 				goto out;
1589 
1590 		if ((sc->flags & TPS_UPTODATE) == 0)
1591 			if ((err = update_tpsram(sc)))
1592 				goto out;
1593 
1594 		if (is_offload(sc) && nfilters != 0) {
1595 			sc->params.mc5.nservers = 0;
1596 
1597 			if (nfilters < 0)
1598 				sc->params.mc5.nfilters = mxf;
1599 			else
1600 				sc->params.mc5.nfilters = min(nfilters, mxf);
1601 		}
1602 
1603 		err = t3_init_hw(sc, 0);
1604 		if (err)
1605 			goto out;
1606 
1607 		t3_set_reg_field(sc, A_TP_PARA_REG5, 0, F_RXDDPOFFINIT);
1608 		t3_write_reg(sc, A_ULPRX_TDDP_PSZ, V_HPZ0(PAGE_SHIFT - 12));
1609 
1610 		err = setup_sge_qsets(sc);
1611 		if (err)
1612 			goto out;
1613 
1614 		alloc_filters(sc);
1615 		setup_rss(sc);
1616 
1617 		t3_add_configured_sysctls(sc);
1618 		sc->flags |= FULL_INIT_DONE;
1619 	}
1620 
1621 	t3_intr_clear(sc);
1622 	t3_sge_start(sc);
1623 	t3_intr_enable(sc);
1624 
1625 	if (sc->params.rev >= T3_REV_C && !(sc->flags & TP_PARITY_INIT) &&
1626 	    is_offload(sc) && init_tp_parity(sc) == 0)
1627 		sc->flags |= TP_PARITY_INIT;
1628 
1629 	if (sc->flags & TP_PARITY_INIT) {
1630 		t3_write_reg(sc, A_TP_INT_CAUSE, F_CMCACHEPERR | F_ARPLUTPERR);
1631 		t3_write_reg(sc, A_TP_INT_ENABLE, 0x7fbfffff);
1632 	}
1633 
1634 	if (!(sc->flags & QUEUES_BOUND)) {
1635 		bind_qsets(sc);
1636 		setup_hw_filters(sc);
1637 		sc->flags |= QUEUES_BOUND;
1638 	}
1639 
1640 	t3_sge_reset_adapter(sc);
1641 out:
1642 	return (err);
1643 }
1644 
1645 /*
1646  * Called when the last open device is closed.  Does NOT undo all of cxgb_up's
1647  * work.  Specifically, the resources grabbed under FULL_INIT_DONE are released
1648  * during controller_detach, not here.
1649  */
1650 static void
1651 cxgb_down(struct adapter *sc)
1652 {
1653 	t3_sge_stop(sc);
1654 	t3_intr_disable(sc);
1655 }
1656 
1657 /*
1658  * if_init for cxgb ports.
1659  */
1660 static void
1661 cxgb_init(void *arg)
1662 {
1663 	struct port_info *p = arg;
1664 	struct adapter *sc = p->adapter;
1665 
1666 	ADAPTER_LOCK(sc);
1667 	cxgb_init_locked(p); /* releases adapter lock */
1668 	ADAPTER_LOCK_ASSERT_NOTOWNED(sc);
1669 }
1670 
1671 static int
1672 cxgb_init_locked(struct port_info *p)
1673 {
1674 	struct adapter *sc = p->adapter;
1675 	struct ifnet *ifp = p->ifp;
1676 	struct cmac *mac = &p->mac;
1677 	int i, rc = 0, may_sleep = 0, gave_up_lock = 0;
1678 
1679 	ADAPTER_LOCK_ASSERT_OWNED(sc);
1680 
1681 	while (!IS_DOOMED(p) && IS_BUSY(sc)) {
1682 		gave_up_lock = 1;
1683 		if (mtx_sleep(&sc->flags, &sc->lock, PCATCH, "cxgbinit", 0)) {
1684 			rc = EINTR;
1685 			goto done;
1686 		}
1687 	}
1688 	if (IS_DOOMED(p)) {
1689 		rc = ENXIO;
1690 		goto done;
1691 	}
1692 	KASSERT(!IS_BUSY(sc), ("%s: controller busy.", __func__));
1693 
1694 	/*
1695 	 * The code that runs during one-time adapter initialization can sleep
1696 	 * so it's important not to hold any locks across it.
1697 	 */
1698 	may_sleep = sc->flags & FULL_INIT_DONE ? 0 : 1;
1699 
1700 	if (may_sleep) {
1701 		SET_BUSY(sc);
1702 		gave_up_lock = 1;
1703 		ADAPTER_UNLOCK(sc);
1704 	}
1705 
1706 	if (sc->open_device_map == 0 && ((rc = cxgb_up(sc)) != 0))
1707 			goto done;
1708 
1709 	PORT_LOCK(p);
1710 	if (isset(&sc->open_device_map, p->port_id) &&
1711 	    (ifp->if_drv_flags & IFF_DRV_RUNNING)) {
1712 		PORT_UNLOCK(p);
1713 		goto done;
1714 	}
1715 	t3_port_intr_enable(sc, p->port_id);
1716 	if (!mac->multiport)
1717 		t3_mac_init(mac);
1718 	cxgb_update_mac_settings(p);
1719 	t3_link_start(&p->phy, mac, &p->link_config);
1720 	t3_mac_enable(mac, MAC_DIRECTION_RX | MAC_DIRECTION_TX);
1721 	ifp->if_drv_flags |= IFF_DRV_RUNNING;
1722 	ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
1723 	PORT_UNLOCK(p);
1724 
1725 	for (i = p->first_qset; i < p->first_qset + p->nqsets; i++) {
1726 		struct sge_qset *qs = &sc->sge.qs[i];
1727 		struct sge_txq *txq = &qs->txq[TXQ_ETH];
1728 
1729 		callout_reset_on(&txq->txq_watchdog, hz, cxgb_tx_watchdog, qs,
1730 				 txq->txq_watchdog.c_cpu);
1731 	}
1732 
1733 	/* all ok */
1734 	setbit(&sc->open_device_map, p->port_id);
1735 	callout_reset(&p->link_check_ch,
1736 	    p->phy.caps & SUPPORTED_LINK_IRQ ?  hz * 3 : hz / 4,
1737 	    link_check_callout, p);
1738 
1739 done:
1740 	if (may_sleep) {
1741 		ADAPTER_LOCK(sc);
1742 		KASSERT(IS_BUSY(sc), ("%s: controller not busy.", __func__));
1743 		CLR_BUSY(sc);
1744 	}
1745 	if (gave_up_lock)
1746 		wakeup_one(&sc->flags);
1747 	ADAPTER_UNLOCK(sc);
1748 	return (rc);
1749 }
1750 
1751 static int
1752 cxgb_uninit_locked(struct port_info *p)
1753 {
1754 	struct adapter *sc = p->adapter;
1755 	int rc;
1756 
1757 	ADAPTER_LOCK_ASSERT_OWNED(sc);
1758 
1759 	while (!IS_DOOMED(p) && IS_BUSY(sc)) {
1760 		if (mtx_sleep(&sc->flags, &sc->lock, PCATCH, "cxgbunin", 0)) {
1761 			rc = EINTR;
1762 			goto done;
1763 		}
1764 	}
1765 	if (IS_DOOMED(p)) {
1766 		rc = ENXIO;
1767 		goto done;
1768 	}
1769 	KASSERT(!IS_BUSY(sc), ("%s: controller busy.", __func__));
1770 	SET_BUSY(sc);
1771 	ADAPTER_UNLOCK(sc);
1772 
1773 	rc = cxgb_uninit_synchronized(p);
1774 
1775 	ADAPTER_LOCK(sc);
1776 	KASSERT(IS_BUSY(sc), ("%s: controller not busy.", __func__));
1777 	CLR_BUSY(sc);
1778 	wakeup_one(&sc->flags);
1779 done:
1780 	ADAPTER_UNLOCK(sc);
1781 	return (rc);
1782 }
1783 
1784 /*
1785  * Called on "ifconfig down", and from port_detach
1786  */
1787 static int
1788 cxgb_uninit_synchronized(struct port_info *pi)
1789 {
1790 	struct adapter *sc = pi->adapter;
1791 	struct ifnet *ifp = pi->ifp;
1792 
1793 	/*
1794 	 * taskqueue_drain may cause a deadlock if the adapter lock is held.
1795 	 */
1796 	ADAPTER_LOCK_ASSERT_NOTOWNED(sc);
1797 
1798 	/*
1799 	 * Clear this port's bit from the open device map, and then drain all
1800 	 * the tasks that can access/manipulate this port's port_info or ifp.
1801 	 * We disable this port's interrupts here and so the slow/ext
1802 	 * interrupt tasks won't be enqueued.  The tick task will continue to
1803 	 * be enqueued every second but the runs after this drain will not see
1804 	 * this port in the open device map.
1805 	 *
1806 	 * A well behaved task must take open_device_map into account and ignore
1807 	 * ports that are not open.
1808 	 */
1809 	clrbit(&sc->open_device_map, pi->port_id);
1810 	t3_port_intr_disable(sc, pi->port_id);
1811 	taskqueue_drain(sc->tq, &sc->slow_intr_task);
1812 	taskqueue_drain(sc->tq, &sc->tick_task);
1813 
1814 	callout_drain(&pi->link_check_ch);
1815 	taskqueue_drain(sc->tq, &pi->link_check_task);
1816 
1817 	PORT_LOCK(pi);
1818 	ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
1819 
1820 	/* disable pause frames */
1821 	t3_set_reg_field(sc, A_XGM_TX_CFG + pi->mac.offset, F_TXPAUSEEN, 0);
1822 
1823 	/* Reset RX FIFO HWM */
1824 	t3_set_reg_field(sc, A_XGM_RXFIFO_CFG +  pi->mac.offset,
1825 			 V_RXFIFOPAUSEHWM(M_RXFIFOPAUSEHWM), 0);
1826 
1827 	DELAY(100 * 1000);
1828 
1829 	/* Wait for TXFIFO empty */
1830 	t3_wait_op_done(sc, A_XGM_TXFIFO_CFG + pi->mac.offset,
1831 			F_TXFIFO_EMPTY, 1, 20, 5);
1832 
1833 	DELAY(100 * 1000);
1834 	t3_mac_disable(&pi->mac, MAC_DIRECTION_RX);
1835 
1836 	pi->phy.ops->power_down(&pi->phy, 1);
1837 
1838 	PORT_UNLOCK(pi);
1839 
1840 	pi->link_config.link_ok = 0;
1841 	t3_os_link_changed(sc, pi->port_id, 0, 0, 0, 0, 0);
1842 
1843 	if (sc->open_device_map == 0)
1844 		cxgb_down(pi->adapter);
1845 
1846 	return (0);
1847 }
1848 
1849 /*
1850  * Mark lro enabled or disabled in all qsets for this port
1851  */
1852 static int
1853 cxgb_set_lro(struct port_info *p, int enabled)
1854 {
1855 	int i;
1856 	struct adapter *adp = p->adapter;
1857 	struct sge_qset *q;
1858 
1859 	for (i = 0; i < p->nqsets; i++) {
1860 		q = &adp->sge.qs[p->first_qset + i];
1861 		q->lro.enabled = (enabled != 0);
1862 	}
1863 	return (0);
1864 }
1865 
1866 static int
1867 cxgb_ioctl(struct ifnet *ifp, unsigned long command, caddr_t data)
1868 {
1869 	struct port_info *p = ifp->if_softc;
1870 	struct adapter *sc = p->adapter;
1871 	struct ifreq *ifr = (struct ifreq *)data;
1872 	int flags, error = 0, mtu;
1873 	uint32_t mask;
1874 
1875 	switch (command) {
1876 	case SIOCSIFMTU:
1877 		ADAPTER_LOCK(sc);
1878 		error = IS_DOOMED(p) ? ENXIO : (IS_BUSY(sc) ? EBUSY : 0);
1879 		if (error) {
1880 fail:
1881 			ADAPTER_UNLOCK(sc);
1882 			return (error);
1883 		}
1884 
1885 		mtu = ifr->ifr_mtu;
1886 		if ((mtu < ETHERMIN) || (mtu > ETHERMTU_JUMBO)) {
1887 			error = EINVAL;
1888 		} else {
1889 			ifp->if_mtu = mtu;
1890 			PORT_LOCK(p);
1891 			cxgb_update_mac_settings(p);
1892 			PORT_UNLOCK(p);
1893 		}
1894 		ADAPTER_UNLOCK(sc);
1895 		break;
1896 	case SIOCSIFFLAGS:
1897 		ADAPTER_LOCK(sc);
1898 		if (IS_DOOMED(p)) {
1899 			error = ENXIO;
1900 			goto fail;
1901 		}
1902 		if (ifp->if_flags & IFF_UP) {
1903 			if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1904 				flags = p->if_flags;
1905 				if (((ifp->if_flags ^ flags) & IFF_PROMISC) ||
1906 				    ((ifp->if_flags ^ flags) & IFF_ALLMULTI)) {
1907 					if (IS_BUSY(sc)) {
1908 						error = EBUSY;
1909 						goto fail;
1910 					}
1911 					PORT_LOCK(p);
1912 					cxgb_update_mac_settings(p);
1913 					PORT_UNLOCK(p);
1914 				}
1915 				ADAPTER_UNLOCK(sc);
1916 			} else
1917 				error = cxgb_init_locked(p);
1918 			p->if_flags = ifp->if_flags;
1919 		} else if (ifp->if_drv_flags & IFF_DRV_RUNNING)
1920 			error = cxgb_uninit_locked(p);
1921 		else
1922 			ADAPTER_UNLOCK(sc);
1923 
1924 		ADAPTER_LOCK_ASSERT_NOTOWNED(sc);
1925 		break;
1926 	case SIOCADDMULTI:
1927 	case SIOCDELMULTI:
1928 		ADAPTER_LOCK(sc);
1929 		error = IS_DOOMED(p) ? ENXIO : (IS_BUSY(sc) ? EBUSY : 0);
1930 		if (error)
1931 			goto fail;
1932 
1933 		if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1934 			PORT_LOCK(p);
1935 			cxgb_update_mac_settings(p);
1936 			PORT_UNLOCK(p);
1937 		}
1938 		ADAPTER_UNLOCK(sc);
1939 
1940 		break;
1941 	case SIOCSIFCAP:
1942 		ADAPTER_LOCK(sc);
1943 		error = IS_DOOMED(p) ? ENXIO : (IS_BUSY(sc) ? EBUSY : 0);
1944 		if (error)
1945 			goto fail;
1946 
1947 		mask = ifr->ifr_reqcap ^ ifp->if_capenable;
1948 		if (mask & IFCAP_TXCSUM) {
1949 			ifp->if_capenable ^= IFCAP_TXCSUM;
1950 			ifp->if_hwassist ^= (CSUM_TCP | CSUM_UDP | CSUM_IP);
1951 
1952 			if (IFCAP_TSO4 & ifp->if_capenable &&
1953 			    !(IFCAP_TXCSUM & ifp->if_capenable)) {
1954 				ifp->if_capenable &= ~IFCAP_TSO4;
1955 				if_printf(ifp,
1956 				    "tso4 disabled due to -txcsum.\n");
1957 			}
1958 		}
1959 		if (mask & IFCAP_TXCSUM_IPV6) {
1960 			ifp->if_capenable ^= IFCAP_TXCSUM_IPV6;
1961 			ifp->if_hwassist ^= (CSUM_UDP_IPV6 | CSUM_TCP_IPV6);
1962 
1963 			if (IFCAP_TSO6 & ifp->if_capenable &&
1964 			    !(IFCAP_TXCSUM_IPV6 & ifp->if_capenable)) {
1965 				ifp->if_capenable &= ~IFCAP_TSO6;
1966 				if_printf(ifp,
1967 				    "tso6 disabled due to -txcsum6.\n");
1968 			}
1969 		}
1970 		if (mask & IFCAP_RXCSUM)
1971 			ifp->if_capenable ^= IFCAP_RXCSUM;
1972 		if (mask & IFCAP_RXCSUM_IPV6)
1973 			ifp->if_capenable ^= IFCAP_RXCSUM_IPV6;
1974 
1975 		/*
1976 		 * Note that we leave CSUM_TSO alone (it is always set).  The
1977 		 * kernel takes both IFCAP_TSOx and CSUM_TSO into account before
1978 		 * sending a TSO request our way, so it's sufficient to toggle
1979 		 * IFCAP_TSOx only.
1980 		 */
1981 		if (mask & IFCAP_TSO4) {
1982 			if (!(IFCAP_TSO4 & ifp->if_capenable) &&
1983 			    !(IFCAP_TXCSUM & ifp->if_capenable)) {
1984 				if_printf(ifp, "enable txcsum first.\n");
1985 				error = EAGAIN;
1986 				goto fail;
1987 			}
1988 			ifp->if_capenable ^= IFCAP_TSO4;
1989 		}
1990 		if (mask & IFCAP_TSO6) {
1991 			if (!(IFCAP_TSO6 & ifp->if_capenable) &&
1992 			    !(IFCAP_TXCSUM_IPV6 & ifp->if_capenable)) {
1993 				if_printf(ifp, "enable txcsum6 first.\n");
1994 				error = EAGAIN;
1995 				goto fail;
1996 			}
1997 			ifp->if_capenable ^= IFCAP_TSO6;
1998 		}
1999 		if (mask & IFCAP_LRO) {
2000 			ifp->if_capenable ^= IFCAP_LRO;
2001 
2002 			/* Safe to do this even if cxgb_up not called yet */
2003 			cxgb_set_lro(p, ifp->if_capenable & IFCAP_LRO);
2004 		}
2005 #ifdef TCP_OFFLOAD
2006 		if (mask & IFCAP_TOE4) {
2007 			int enable = (ifp->if_capenable ^ mask) & IFCAP_TOE4;
2008 
2009 			error = toe_capability(p, enable);
2010 			if (error == 0)
2011 				ifp->if_capenable ^= mask;
2012 		}
2013 #endif
2014 		if (mask & IFCAP_VLAN_HWTAGGING) {
2015 			ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING;
2016 			if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
2017 				PORT_LOCK(p);
2018 				cxgb_update_mac_settings(p);
2019 				PORT_UNLOCK(p);
2020 			}
2021 		}
2022 		if (mask & IFCAP_VLAN_MTU) {
2023 			ifp->if_capenable ^= IFCAP_VLAN_MTU;
2024 			if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
2025 				PORT_LOCK(p);
2026 				cxgb_update_mac_settings(p);
2027 				PORT_UNLOCK(p);
2028 			}
2029 		}
2030 		if (mask & IFCAP_VLAN_HWTSO)
2031 			ifp->if_capenable ^= IFCAP_VLAN_HWTSO;
2032 		if (mask & IFCAP_VLAN_HWCSUM)
2033 			ifp->if_capenable ^= IFCAP_VLAN_HWCSUM;
2034 
2035 #ifdef VLAN_CAPABILITIES
2036 		VLAN_CAPABILITIES(ifp);
2037 #endif
2038 		ADAPTER_UNLOCK(sc);
2039 		break;
2040 	case SIOCSIFMEDIA:
2041 	case SIOCGIFMEDIA:
2042 		error = ifmedia_ioctl(ifp, ifr, &p->media, command);
2043 		break;
2044 	default:
2045 		error = ether_ioctl(ifp, command, data);
2046 	}
2047 
2048 	return (error);
2049 }
2050 
2051 static int
2052 cxgb_media_change(struct ifnet *ifp)
2053 {
2054 	return (EOPNOTSUPP);
2055 }
2056 
2057 /*
2058  * Translates phy->modtype to the correct Ethernet media subtype.
2059  */
2060 static int
2061 cxgb_ifm_type(int mod)
2062 {
2063 	switch (mod) {
2064 	case phy_modtype_sr:
2065 		return (IFM_10G_SR);
2066 	case phy_modtype_lr:
2067 		return (IFM_10G_LR);
2068 	case phy_modtype_lrm:
2069 		return (IFM_10G_LRM);
2070 	case phy_modtype_twinax:
2071 		return (IFM_10G_TWINAX);
2072 	case phy_modtype_twinax_long:
2073 		return (IFM_10G_TWINAX_LONG);
2074 	case phy_modtype_none:
2075 		return (IFM_NONE);
2076 	case phy_modtype_unknown:
2077 		return (IFM_UNKNOWN);
2078 	}
2079 
2080 	KASSERT(0, ("%s: modtype %d unknown", __func__, mod));
2081 	return (IFM_UNKNOWN);
2082 }
2083 
2084 /*
2085  * Rebuilds the ifmedia list for this port, and sets the current media.
2086  */
2087 static void
2088 cxgb_build_medialist(struct port_info *p)
2089 {
2090 	struct cphy *phy = &p->phy;
2091 	struct ifmedia *media = &p->media;
2092 	int mod = phy->modtype;
2093 	int m = IFM_ETHER | IFM_FDX;
2094 
2095 	PORT_LOCK(p);
2096 
2097 	ifmedia_removeall(media);
2098 	if (phy->caps & SUPPORTED_TP && phy->caps & SUPPORTED_Autoneg) {
2099 		/* Copper (RJ45) */
2100 
2101 		if (phy->caps & SUPPORTED_10000baseT_Full)
2102 			ifmedia_add(media, m | IFM_10G_T, mod, NULL);
2103 
2104 		if (phy->caps & SUPPORTED_1000baseT_Full)
2105 			ifmedia_add(media, m | IFM_1000_T, mod, NULL);
2106 
2107 		if (phy->caps & SUPPORTED_100baseT_Full)
2108 			ifmedia_add(media, m | IFM_100_TX, mod, NULL);
2109 
2110 		if (phy->caps & SUPPORTED_10baseT_Full)
2111 			ifmedia_add(media, m | IFM_10_T, mod, NULL);
2112 
2113 		ifmedia_add(media, IFM_ETHER | IFM_AUTO, mod, NULL);
2114 		ifmedia_set(media, IFM_ETHER | IFM_AUTO);
2115 
2116 	} else if (phy->caps & SUPPORTED_TP) {
2117 		/* Copper (CX4) */
2118 
2119 		KASSERT(phy->caps & SUPPORTED_10000baseT_Full,
2120 			("%s: unexpected cap 0x%x", __func__, phy->caps));
2121 
2122 		ifmedia_add(media, m | IFM_10G_CX4, mod, NULL);
2123 		ifmedia_set(media, m | IFM_10G_CX4);
2124 
2125 	} else if (phy->caps & SUPPORTED_FIBRE &&
2126 		   phy->caps & SUPPORTED_10000baseT_Full) {
2127 		/* 10G optical (but includes SFP+ twinax) */
2128 
2129 		m |= cxgb_ifm_type(mod);
2130 		if (IFM_SUBTYPE(m) == IFM_NONE)
2131 			m &= ~IFM_FDX;
2132 
2133 		ifmedia_add(media, m, mod, NULL);
2134 		ifmedia_set(media, m);
2135 
2136 	} else if (phy->caps & SUPPORTED_FIBRE &&
2137 		   phy->caps & SUPPORTED_1000baseT_Full) {
2138 		/* 1G optical */
2139 
2140 		/* XXX: Lie and claim to be SX, could actually be any 1G-X */
2141 		ifmedia_add(media, m | IFM_1000_SX, mod, NULL);
2142 		ifmedia_set(media, m | IFM_1000_SX);
2143 
2144 	} else {
2145 		KASSERT(0, ("%s: don't know how to handle 0x%x.", __func__,
2146 			    phy->caps));
2147 	}
2148 
2149 	PORT_UNLOCK(p);
2150 }
2151 
2152 static void
2153 cxgb_media_status(struct ifnet *ifp, struct ifmediareq *ifmr)
2154 {
2155 	struct port_info *p = ifp->if_softc;
2156 	struct ifmedia_entry *cur = p->media.ifm_cur;
2157 	int speed = p->link_config.speed;
2158 
2159 	if (cur->ifm_data != p->phy.modtype) {
2160 		cxgb_build_medialist(p);
2161 		cur = p->media.ifm_cur;
2162 	}
2163 
2164 	ifmr->ifm_status = IFM_AVALID;
2165 	if (!p->link_config.link_ok)
2166 		return;
2167 
2168 	ifmr->ifm_status |= IFM_ACTIVE;
2169 
2170 	/*
2171 	 * active and current will differ iff current media is autoselect.  That
2172 	 * can happen only for copper RJ45.
2173 	 */
2174 	if (IFM_SUBTYPE(cur->ifm_media) != IFM_AUTO)
2175 		return;
2176 	KASSERT(p->phy.caps & SUPPORTED_TP && p->phy.caps & SUPPORTED_Autoneg,
2177 		("%s: unexpected PHY caps 0x%x", __func__, p->phy.caps));
2178 
2179 	ifmr->ifm_active = IFM_ETHER | IFM_FDX;
2180 	if (speed == SPEED_10000)
2181 		ifmr->ifm_active |= IFM_10G_T;
2182 	else if (speed == SPEED_1000)
2183 		ifmr->ifm_active |= IFM_1000_T;
2184 	else if (speed == SPEED_100)
2185 		ifmr->ifm_active |= IFM_100_TX;
2186 	else if (speed == SPEED_10)
2187 		ifmr->ifm_active |= IFM_10_T;
2188 	else
2189 		KASSERT(0, ("%s: link up but speed unknown (%u)", __func__,
2190 			    speed));
2191 }
2192 
2193 static uint64_t
2194 cxgb_get_counter(struct ifnet *ifp, ift_counter c)
2195 {
2196 	struct port_info *pi = ifp->if_softc;
2197 	struct adapter *sc = pi->adapter;
2198 	struct cmac *mac = &pi->mac;
2199 	struct mac_stats *mstats = &mac->stats;
2200 
2201 	cxgb_refresh_stats(pi);
2202 
2203 	switch (c) {
2204 	case IFCOUNTER_IPACKETS:
2205 		return (mstats->rx_frames);
2206 
2207 	case IFCOUNTER_IERRORS:
2208 		return (mstats->rx_jabber + mstats->rx_data_errs +
2209 		    mstats->rx_sequence_errs + mstats->rx_runt +
2210 		    mstats->rx_too_long + mstats->rx_mac_internal_errs +
2211 		    mstats->rx_short + mstats->rx_fcs_errs);
2212 
2213 	case IFCOUNTER_OPACKETS:
2214 		return (mstats->tx_frames);
2215 
2216 	case IFCOUNTER_OERRORS:
2217 		return (mstats->tx_excess_collisions + mstats->tx_underrun +
2218 		    mstats->tx_len_errs + mstats->tx_mac_internal_errs +
2219 		    mstats->tx_excess_deferral + mstats->tx_fcs_errs);
2220 
2221 	case IFCOUNTER_COLLISIONS:
2222 		return (mstats->tx_total_collisions);
2223 
2224 	case IFCOUNTER_IBYTES:
2225 		return (mstats->rx_octets);
2226 
2227 	case IFCOUNTER_OBYTES:
2228 		return (mstats->tx_octets);
2229 
2230 	case IFCOUNTER_IMCASTS:
2231 		return (mstats->rx_mcast_frames);
2232 
2233 	case IFCOUNTER_OMCASTS:
2234 		return (mstats->tx_mcast_frames);
2235 
2236 	case IFCOUNTER_IQDROPS:
2237 		return (mstats->rx_cong_drops);
2238 
2239 	case IFCOUNTER_OQDROPS: {
2240 		int i;
2241 		uint64_t drops;
2242 
2243 		drops = 0;
2244 		if (sc->flags & FULL_INIT_DONE) {
2245 			for (i = pi->first_qset; i < pi->first_qset + pi->nqsets; i++)
2246 				drops += sc->sge.qs[i].txq[TXQ_ETH].txq_mr->br_drops;
2247 		}
2248 
2249 		return (drops);
2250 
2251 	}
2252 
2253 	default:
2254 		return (if_get_counter_default(ifp, c));
2255 	}
2256 }
2257 
2258 static void
2259 cxgb_async_intr(void *data)
2260 {
2261 	adapter_t *sc = data;
2262 
2263 	t3_write_reg(sc, A_PL_INT_ENABLE0, 0);
2264 	(void) t3_read_reg(sc, A_PL_INT_ENABLE0);
2265 	taskqueue_enqueue(sc->tq, &sc->slow_intr_task);
2266 }
2267 
2268 static void
2269 link_check_callout(void *arg)
2270 {
2271 	struct port_info *pi = arg;
2272 	struct adapter *sc = pi->adapter;
2273 
2274 	if (!isset(&sc->open_device_map, pi->port_id))
2275 		return;
2276 
2277 	taskqueue_enqueue(sc->tq, &pi->link_check_task);
2278 }
2279 
2280 static void
2281 check_link_status(void *arg, int pending)
2282 {
2283 	struct port_info *pi = arg;
2284 	struct adapter *sc = pi->adapter;
2285 
2286 	if (!isset(&sc->open_device_map, pi->port_id))
2287 		return;
2288 
2289 	t3_link_changed(sc, pi->port_id);
2290 
2291 	if (pi->link_fault || !(pi->phy.caps & SUPPORTED_LINK_IRQ) ||
2292 	    pi->link_config.link_ok == 0)
2293 		callout_reset(&pi->link_check_ch, hz, link_check_callout, pi);
2294 }
2295 
2296 void
2297 t3_os_link_intr(struct port_info *pi)
2298 {
2299 	/*
2300 	 * Schedule a link check in the near future.  If the link is flapping
2301 	 * rapidly we'll keep resetting the callout and delaying the check until
2302 	 * things stabilize a bit.
2303 	 */
2304 	callout_reset(&pi->link_check_ch, hz / 4, link_check_callout, pi);
2305 }
2306 
2307 static void
2308 check_t3b2_mac(struct adapter *sc)
2309 {
2310 	int i;
2311 
2312 	if (sc->flags & CXGB_SHUTDOWN)
2313 		return;
2314 
2315 	for_each_port(sc, i) {
2316 		struct port_info *p = &sc->port[i];
2317 		int status;
2318 #ifdef INVARIANTS
2319 		struct ifnet *ifp = p->ifp;
2320 #endif
2321 
2322 		if (!isset(&sc->open_device_map, p->port_id) || p->link_fault ||
2323 		    !p->link_config.link_ok)
2324 			continue;
2325 
2326 		KASSERT(ifp->if_drv_flags & IFF_DRV_RUNNING,
2327 			("%s: state mismatch (drv_flags %x, device_map %x)",
2328 			 __func__, ifp->if_drv_flags, sc->open_device_map));
2329 
2330 		PORT_LOCK(p);
2331 		status = t3b2_mac_watchdog_task(&p->mac);
2332 		if (status == 1)
2333 			p->mac.stats.num_toggled++;
2334 		else if (status == 2) {
2335 			struct cmac *mac = &p->mac;
2336 
2337 			cxgb_update_mac_settings(p);
2338 			t3_link_start(&p->phy, mac, &p->link_config);
2339 			t3_mac_enable(mac, MAC_DIRECTION_RX | MAC_DIRECTION_TX);
2340 			t3_port_intr_enable(sc, p->port_id);
2341 			p->mac.stats.num_resets++;
2342 		}
2343 		PORT_UNLOCK(p);
2344 	}
2345 }
2346 
2347 static void
2348 cxgb_tick(void *arg)
2349 {
2350 	adapter_t *sc = (adapter_t *)arg;
2351 
2352 	if (sc->flags & CXGB_SHUTDOWN)
2353 		return;
2354 
2355 	taskqueue_enqueue(sc->tq, &sc->tick_task);
2356 	callout_reset(&sc->cxgb_tick_ch, hz, cxgb_tick, sc);
2357 }
2358 
2359 void
2360 cxgb_refresh_stats(struct port_info *pi)
2361 {
2362 	struct timeval tv;
2363 	const struct timeval interval = {0, 250000};    /* 250ms */
2364 
2365 	getmicrotime(&tv);
2366 	timevalsub(&tv, &interval);
2367 	if (timevalcmp(&tv, &pi->last_refreshed, <))
2368 		return;
2369 
2370 	PORT_LOCK(pi);
2371 	t3_mac_update_stats(&pi->mac);
2372 	PORT_UNLOCK(pi);
2373 	getmicrotime(&pi->last_refreshed);
2374 }
2375 
2376 static void
2377 cxgb_tick_handler(void *arg, int count)
2378 {
2379 	adapter_t *sc = (adapter_t *)arg;
2380 	const struct adapter_params *p = &sc->params;
2381 	int i;
2382 	uint32_t cause, reset;
2383 
2384 	if (sc->flags & CXGB_SHUTDOWN || !(sc->flags & FULL_INIT_DONE))
2385 		return;
2386 
2387 	if (p->rev == T3_REV_B2 && p->nports < 4 && sc->open_device_map)
2388 		check_t3b2_mac(sc);
2389 
2390 	cause = t3_read_reg(sc, A_SG_INT_CAUSE) & (F_RSPQSTARVE | F_FLEMPTY);
2391 	if (cause) {
2392 		struct sge_qset *qs = &sc->sge.qs[0];
2393 		uint32_t mask, v;
2394 
2395 		v = t3_read_reg(sc, A_SG_RSPQ_FL_STATUS) & ~0xff00;
2396 
2397 		mask = 1;
2398 		for (i = 0; i < SGE_QSETS; i++) {
2399 			if (v & mask)
2400 				qs[i].rspq.starved++;
2401 			mask <<= 1;
2402 		}
2403 
2404 		mask <<= SGE_QSETS; /* skip RSPQXDISABLED */
2405 
2406 		for (i = 0; i < SGE_QSETS * 2; i++) {
2407 			if (v & mask) {
2408 				qs[i / 2].fl[i % 2].empty++;
2409 			}
2410 			mask <<= 1;
2411 		}
2412 
2413 		/* clear */
2414 		t3_write_reg(sc, A_SG_RSPQ_FL_STATUS, v);
2415 		t3_write_reg(sc, A_SG_INT_CAUSE, cause);
2416 	}
2417 
2418 	for (i = 0; i < sc->params.nports; i++) {
2419 		struct port_info *pi = &sc->port[i];
2420 		struct cmac *mac = &pi->mac;
2421 
2422 		if (!isset(&sc->open_device_map, pi->port_id))
2423 			continue;
2424 
2425 		cxgb_refresh_stats(pi);
2426 
2427 		if (mac->multiport)
2428 			continue;
2429 
2430 		/* Count rx fifo overflows, once per second */
2431 		cause = t3_read_reg(sc, A_XGM_INT_CAUSE + mac->offset);
2432 		reset = 0;
2433 		if (cause & F_RXFIFO_OVERFLOW) {
2434 			mac->stats.rx_fifo_ovfl++;
2435 			reset |= F_RXFIFO_OVERFLOW;
2436 		}
2437 		t3_write_reg(sc, A_XGM_INT_CAUSE + mac->offset, reset);
2438 	}
2439 }
2440 
2441 static void
2442 touch_bars(device_t dev)
2443 {
2444 	/*
2445 	 * Don't enable yet
2446 	 */
2447 #if !defined(__LP64__) && 0
2448 	u32 v;
2449 
2450 	pci_read_config_dword(pdev, PCI_BASE_ADDRESS_1, &v);
2451 	pci_write_config_dword(pdev, PCI_BASE_ADDRESS_1, v);
2452 	pci_read_config_dword(pdev, PCI_BASE_ADDRESS_3, &v);
2453 	pci_write_config_dword(pdev, PCI_BASE_ADDRESS_3, v);
2454 	pci_read_config_dword(pdev, PCI_BASE_ADDRESS_5, &v);
2455 	pci_write_config_dword(pdev, PCI_BASE_ADDRESS_5, v);
2456 #endif
2457 }
2458 
2459 static int
2460 set_eeprom(struct port_info *pi, const uint8_t *data, int len, int offset)
2461 {
2462 	uint8_t *buf;
2463 	int err = 0;
2464 	u32 aligned_offset, aligned_len, *p;
2465 	struct adapter *adapter = pi->adapter;
2466 
2467 
2468 	aligned_offset = offset & ~3;
2469 	aligned_len = (len + (offset & 3) + 3) & ~3;
2470 
2471 	if (aligned_offset != offset || aligned_len != len) {
2472 		buf = malloc(aligned_len, M_DEVBUF, M_WAITOK|M_ZERO);
2473 		if (!buf)
2474 			return (ENOMEM);
2475 		err = t3_seeprom_read(adapter, aligned_offset, (u32 *)buf);
2476 		if (!err && aligned_len > 4)
2477 			err = t3_seeprom_read(adapter,
2478 					      aligned_offset + aligned_len - 4,
2479 					      (u32 *)&buf[aligned_len - 4]);
2480 		if (err)
2481 			goto out;
2482 		memcpy(buf + (offset & 3), data, len);
2483 	} else
2484 		buf = (uint8_t *)(uintptr_t)data;
2485 
2486 	err = t3_seeprom_wp(adapter, 0);
2487 	if (err)
2488 		goto out;
2489 
2490 	for (p = (u32 *)buf; !err && aligned_len; aligned_len -= 4, p++) {
2491 		err = t3_seeprom_write(adapter, aligned_offset, *p);
2492 		aligned_offset += 4;
2493 	}
2494 
2495 	if (!err)
2496 		err = t3_seeprom_wp(adapter, 1);
2497 out:
2498 	if (buf != data)
2499 		free(buf, M_DEVBUF);
2500 	return err;
2501 }
2502 
2503 
2504 static int
2505 in_range(int val, int lo, int hi)
2506 {
2507 	return val < 0 || (val <= hi && val >= lo);
2508 }
2509 
2510 static int
2511 cxgb_extension_open(struct cdev *dev, int flags, int fmp, struct thread *td)
2512 {
2513        return (0);
2514 }
2515 
2516 static int
2517 cxgb_extension_close(struct cdev *dev, int flags, int fmt, struct thread *td)
2518 {
2519        return (0);
2520 }
2521 
2522 static int
2523 cxgb_extension_ioctl(struct cdev *dev, unsigned long cmd, caddr_t data,
2524     int fflag, struct thread *td)
2525 {
2526 	int mmd, error = 0;
2527 	struct port_info *pi = dev->si_drv1;
2528 	adapter_t *sc = pi->adapter;
2529 
2530 #ifdef PRIV_SUPPORTED
2531 	if (priv_check(td, PRIV_DRIVER)) {
2532 		if (cxgb_debug)
2533 			printf("user does not have access to privileged ioctls\n");
2534 		return (EPERM);
2535 	}
2536 #else
2537 	if (suser(td)) {
2538 		if (cxgb_debug)
2539 			printf("user does not have access to privileged ioctls\n");
2540 		return (EPERM);
2541 	}
2542 #endif
2543 
2544 	switch (cmd) {
2545 	case CHELSIO_GET_MIIREG: {
2546 		uint32_t val;
2547 		struct cphy *phy = &pi->phy;
2548 		struct ch_mii_data *mid = (struct ch_mii_data *)data;
2549 
2550 		if (!phy->mdio_read)
2551 			return (EOPNOTSUPP);
2552 		if (is_10G(sc)) {
2553 			mmd = mid->phy_id >> 8;
2554 			if (!mmd)
2555 				mmd = MDIO_DEV_PCS;
2556 			else if (mmd > MDIO_DEV_VEND2)
2557 				return (EINVAL);
2558 
2559 			error = phy->mdio_read(sc, mid->phy_id & 0x1f, mmd,
2560 					     mid->reg_num, &val);
2561 		} else
2562 		        error = phy->mdio_read(sc, mid->phy_id & 0x1f, 0,
2563 					     mid->reg_num & 0x1f, &val);
2564 		if (error == 0)
2565 			mid->val_out = val;
2566 		break;
2567 	}
2568 	case CHELSIO_SET_MIIREG: {
2569 		struct cphy *phy = &pi->phy;
2570 		struct ch_mii_data *mid = (struct ch_mii_data *)data;
2571 
2572 		if (!phy->mdio_write)
2573 			return (EOPNOTSUPP);
2574 		if (is_10G(sc)) {
2575 			mmd = mid->phy_id >> 8;
2576 			if (!mmd)
2577 				mmd = MDIO_DEV_PCS;
2578 			else if (mmd > MDIO_DEV_VEND2)
2579 				return (EINVAL);
2580 
2581 			error = phy->mdio_write(sc, mid->phy_id & 0x1f,
2582 					      mmd, mid->reg_num, mid->val_in);
2583 		} else
2584 			error = phy->mdio_write(sc, mid->phy_id & 0x1f, 0,
2585 					      mid->reg_num & 0x1f,
2586 					      mid->val_in);
2587 		break;
2588 	}
2589 	case CHELSIO_SETREG: {
2590 		struct ch_reg *edata = (struct ch_reg *)data;
2591 		if ((edata->addr & 0x3) != 0 || edata->addr >= sc->mmio_len)
2592 			return (EFAULT);
2593 		t3_write_reg(sc, edata->addr, edata->val);
2594 		break;
2595 	}
2596 	case CHELSIO_GETREG: {
2597 		struct ch_reg *edata = (struct ch_reg *)data;
2598 		if ((edata->addr & 0x3) != 0 || edata->addr >= sc->mmio_len)
2599 			return (EFAULT);
2600 		edata->val = t3_read_reg(sc, edata->addr);
2601 		break;
2602 	}
2603 	case CHELSIO_GET_SGE_CONTEXT: {
2604 		struct ch_cntxt *ecntxt = (struct ch_cntxt *)data;
2605 		mtx_lock_spin(&sc->sge.reg_lock);
2606 		switch (ecntxt->cntxt_type) {
2607 		case CNTXT_TYPE_EGRESS:
2608 			error = -t3_sge_read_ecntxt(sc, ecntxt->cntxt_id,
2609 			    ecntxt->data);
2610 			break;
2611 		case CNTXT_TYPE_FL:
2612 			error = -t3_sge_read_fl(sc, ecntxt->cntxt_id,
2613 			    ecntxt->data);
2614 			break;
2615 		case CNTXT_TYPE_RSP:
2616 			error = -t3_sge_read_rspq(sc, ecntxt->cntxt_id,
2617 			    ecntxt->data);
2618 			break;
2619 		case CNTXT_TYPE_CQ:
2620 			error = -t3_sge_read_cq(sc, ecntxt->cntxt_id,
2621 			    ecntxt->data);
2622 			break;
2623 		default:
2624 			error = EINVAL;
2625 			break;
2626 		}
2627 		mtx_unlock_spin(&sc->sge.reg_lock);
2628 		break;
2629 	}
2630 	case CHELSIO_GET_SGE_DESC: {
2631 		struct ch_desc *edesc = (struct ch_desc *)data;
2632 		int ret;
2633 		if (edesc->queue_num >= SGE_QSETS * 6)
2634 			return (EINVAL);
2635 		ret = t3_get_desc(&sc->sge.qs[edesc->queue_num / 6],
2636 		    edesc->queue_num % 6, edesc->idx, edesc->data);
2637 		if (ret < 0)
2638 			return (EINVAL);
2639 		edesc->size = ret;
2640 		break;
2641 	}
2642 	case CHELSIO_GET_QSET_PARAMS: {
2643 		struct qset_params *q;
2644 		struct ch_qset_params *t = (struct ch_qset_params *)data;
2645 		int q1 = pi->first_qset;
2646 		int nqsets = pi->nqsets;
2647 		int i;
2648 
2649 		if (t->qset_idx >= nqsets)
2650 			return EINVAL;
2651 
2652 		i = q1 + t->qset_idx;
2653 		q = &sc->params.sge.qset[i];
2654 		t->rspq_size   = q->rspq_size;
2655 		t->txq_size[0] = q->txq_size[0];
2656 		t->txq_size[1] = q->txq_size[1];
2657 		t->txq_size[2] = q->txq_size[2];
2658 		t->fl_size[0]  = q->fl_size;
2659 		t->fl_size[1]  = q->jumbo_size;
2660 		t->polling     = q->polling;
2661 		t->lro         = q->lro;
2662 		t->intr_lat    = q->coalesce_usecs;
2663 		t->cong_thres  = q->cong_thres;
2664 		t->qnum        = i;
2665 
2666 		if ((sc->flags & FULL_INIT_DONE) == 0)
2667 			t->vector = 0;
2668 		else if (sc->flags & USING_MSIX)
2669 			t->vector = rman_get_start(sc->msix_irq_res[i]);
2670 		else
2671 			t->vector = rman_get_start(sc->irq_res);
2672 
2673 		break;
2674 	}
2675 	case CHELSIO_GET_QSET_NUM: {
2676 		struct ch_reg *edata = (struct ch_reg *)data;
2677 		edata->val = pi->nqsets;
2678 		break;
2679 	}
2680 	case CHELSIO_LOAD_FW: {
2681 		uint8_t *fw_data;
2682 		uint32_t vers;
2683 		struct ch_mem_range *t = (struct ch_mem_range *)data;
2684 
2685 		/*
2686 		 * You're allowed to load a firmware only before FULL_INIT_DONE
2687 		 *
2688 		 * FW_UPTODATE is also set so the rest of the initialization
2689 		 * will not overwrite what was loaded here.  This gives you the
2690 		 * flexibility to load any firmware (and maybe shoot yourself in
2691 		 * the foot).
2692 		 */
2693 
2694 		ADAPTER_LOCK(sc);
2695 		if (sc->open_device_map || sc->flags & FULL_INIT_DONE) {
2696 			ADAPTER_UNLOCK(sc);
2697 			return (EBUSY);
2698 		}
2699 
2700 		fw_data = malloc(t->len, M_DEVBUF, M_NOWAIT);
2701 		if (!fw_data)
2702 			error = ENOMEM;
2703 		else
2704 			error = copyin(t->buf, fw_data, t->len);
2705 
2706 		if (!error)
2707 			error = -t3_load_fw(sc, fw_data, t->len);
2708 
2709 		if (t3_get_fw_version(sc, &vers) == 0) {
2710 			snprintf(&sc->fw_version[0], sizeof(sc->fw_version),
2711 			    "%d.%d.%d", G_FW_VERSION_MAJOR(vers),
2712 			    G_FW_VERSION_MINOR(vers), G_FW_VERSION_MICRO(vers));
2713 		}
2714 
2715 		if (!error)
2716 			sc->flags |= FW_UPTODATE;
2717 
2718 		free(fw_data, M_DEVBUF);
2719 		ADAPTER_UNLOCK(sc);
2720 		break;
2721 	}
2722 	case CHELSIO_LOAD_BOOT: {
2723 		uint8_t *boot_data;
2724 		struct ch_mem_range *t = (struct ch_mem_range *)data;
2725 
2726 		boot_data = malloc(t->len, M_DEVBUF, M_NOWAIT);
2727 		if (!boot_data)
2728 			return ENOMEM;
2729 
2730 		error = copyin(t->buf, boot_data, t->len);
2731 		if (!error)
2732 			error = -t3_load_boot(sc, boot_data, t->len);
2733 
2734 		free(boot_data, M_DEVBUF);
2735 		break;
2736 	}
2737 	case CHELSIO_GET_PM: {
2738 		struct ch_pm *m = (struct ch_pm *)data;
2739 		struct tp_params *p = &sc->params.tp;
2740 
2741 		if (!is_offload(sc))
2742 			return (EOPNOTSUPP);
2743 
2744 		m->tx_pg_sz = p->tx_pg_size;
2745 		m->tx_num_pg = p->tx_num_pgs;
2746 		m->rx_pg_sz  = p->rx_pg_size;
2747 		m->rx_num_pg = p->rx_num_pgs;
2748 		m->pm_total  = p->pmtx_size + p->chan_rx_size * p->nchan;
2749 
2750 		break;
2751 	}
2752 	case CHELSIO_SET_PM: {
2753 		struct ch_pm *m = (struct ch_pm *)data;
2754 		struct tp_params *p = &sc->params.tp;
2755 
2756 		if (!is_offload(sc))
2757 			return (EOPNOTSUPP);
2758 		if (sc->flags & FULL_INIT_DONE)
2759 			return (EBUSY);
2760 
2761 		if (!m->rx_pg_sz || (m->rx_pg_sz & (m->rx_pg_sz - 1)) ||
2762 		    !m->tx_pg_sz || (m->tx_pg_sz & (m->tx_pg_sz - 1)))
2763 			return (EINVAL);	/* not power of 2 */
2764 		if (!(m->rx_pg_sz & 0x14000))
2765 			return (EINVAL);	/* not 16KB or 64KB */
2766 		if (!(m->tx_pg_sz & 0x1554000))
2767 			return (EINVAL);
2768 		if (m->tx_num_pg == -1)
2769 			m->tx_num_pg = p->tx_num_pgs;
2770 		if (m->rx_num_pg == -1)
2771 			m->rx_num_pg = p->rx_num_pgs;
2772 		if (m->tx_num_pg % 24 || m->rx_num_pg % 24)
2773 			return (EINVAL);
2774 		if (m->rx_num_pg * m->rx_pg_sz > p->chan_rx_size ||
2775 		    m->tx_num_pg * m->tx_pg_sz > p->chan_tx_size)
2776 			return (EINVAL);
2777 
2778 		p->rx_pg_size = m->rx_pg_sz;
2779 		p->tx_pg_size = m->tx_pg_sz;
2780 		p->rx_num_pgs = m->rx_num_pg;
2781 		p->tx_num_pgs = m->tx_num_pg;
2782 		break;
2783 	}
2784 	case CHELSIO_SETMTUTAB: {
2785 		struct ch_mtus *m = (struct ch_mtus *)data;
2786 		int i;
2787 
2788 		if (!is_offload(sc))
2789 			return (EOPNOTSUPP);
2790 		if (offload_running(sc))
2791 			return (EBUSY);
2792 		if (m->nmtus != NMTUS)
2793 			return (EINVAL);
2794 		if (m->mtus[0] < 81)         /* accommodate SACK */
2795 			return (EINVAL);
2796 
2797 		/*
2798 		 * MTUs must be in ascending order
2799 		 */
2800 		for (i = 1; i < NMTUS; ++i)
2801 			if (m->mtus[i] < m->mtus[i - 1])
2802 				return (EINVAL);
2803 
2804 		memcpy(sc->params.mtus, m->mtus, sizeof(sc->params.mtus));
2805 		break;
2806 	}
2807 	case CHELSIO_GETMTUTAB: {
2808 		struct ch_mtus *m = (struct ch_mtus *)data;
2809 
2810 		if (!is_offload(sc))
2811 			return (EOPNOTSUPP);
2812 
2813 		memcpy(m->mtus, sc->params.mtus, sizeof(m->mtus));
2814 		m->nmtus = NMTUS;
2815 		break;
2816 	}
2817 	case CHELSIO_GET_MEM: {
2818 		struct ch_mem_range *t = (struct ch_mem_range *)data;
2819 		struct mc7 *mem;
2820 		uint8_t *useraddr;
2821 		u64 buf[32];
2822 
2823 		/*
2824 		 * Use these to avoid modifying len/addr in the return
2825 		 * struct
2826 		 */
2827 		uint32_t len = t->len, addr = t->addr;
2828 
2829 		if (!is_offload(sc))
2830 			return (EOPNOTSUPP);
2831 		if (!(sc->flags & FULL_INIT_DONE))
2832 			return (EIO);         /* need the memory controllers */
2833 		if ((addr & 0x7) || (len & 0x7))
2834 			return (EINVAL);
2835 		if (t->mem_id == MEM_CM)
2836 			mem = &sc->cm;
2837 		else if (t->mem_id == MEM_PMRX)
2838 			mem = &sc->pmrx;
2839 		else if (t->mem_id == MEM_PMTX)
2840 			mem = &sc->pmtx;
2841 		else
2842 			return (EINVAL);
2843 
2844 		/*
2845 		 * Version scheme:
2846 		 * bits 0..9: chip version
2847 		 * bits 10..15: chip revision
2848 		 */
2849 		t->version = 3 | (sc->params.rev << 10);
2850 
2851 		/*
2852 		 * Read 256 bytes at a time as len can be large and we don't
2853 		 * want to use huge intermediate buffers.
2854 		 */
2855 		useraddr = (uint8_t *)t->buf;
2856 		while (len) {
2857 			unsigned int chunk = min(len, sizeof(buf));
2858 
2859 			error = t3_mc7_bd_read(mem, addr / 8, chunk / 8, buf);
2860 			if (error)
2861 				return (-error);
2862 			if (copyout(buf, useraddr, chunk))
2863 				return (EFAULT);
2864 			useraddr += chunk;
2865 			addr += chunk;
2866 			len -= chunk;
2867 		}
2868 		break;
2869 	}
2870 	case CHELSIO_READ_TCAM_WORD: {
2871 		struct ch_tcam_word *t = (struct ch_tcam_word *)data;
2872 
2873 		if (!is_offload(sc))
2874 			return (EOPNOTSUPP);
2875 		if (!(sc->flags & FULL_INIT_DONE))
2876 			return (EIO);         /* need MC5 */
2877 		return -t3_read_mc5_range(&sc->mc5, t->addr, 1, t->buf);
2878 		break;
2879 	}
2880 	case CHELSIO_SET_TRACE_FILTER: {
2881 		struct ch_trace *t = (struct ch_trace *)data;
2882 		const struct trace_params *tp;
2883 
2884 		tp = (const struct trace_params *)&t->sip;
2885 		if (t->config_tx)
2886 			t3_config_trace_filter(sc, tp, 0, t->invert_match,
2887 					       t->trace_tx);
2888 		if (t->config_rx)
2889 			t3_config_trace_filter(sc, tp, 1, t->invert_match,
2890 					       t->trace_rx);
2891 		break;
2892 	}
2893 	case CHELSIO_SET_PKTSCHED: {
2894 		struct ch_pktsched_params *p = (struct ch_pktsched_params *)data;
2895 		if (sc->open_device_map == 0)
2896 			return (EAGAIN);
2897 		send_pktsched_cmd(sc, p->sched, p->idx, p->min, p->max,
2898 		    p->binding);
2899 		break;
2900 	}
2901 	case CHELSIO_IFCONF_GETREGS: {
2902 		struct ch_ifconf_regs *regs = (struct ch_ifconf_regs *)data;
2903 		int reglen = cxgb_get_regs_len();
2904 		uint8_t *buf = malloc(reglen, M_DEVBUF, M_NOWAIT);
2905 		if (buf == NULL) {
2906 			return (ENOMEM);
2907 		}
2908 		if (regs->len > reglen)
2909 			regs->len = reglen;
2910 		else if (regs->len < reglen)
2911 			error = ENOBUFS;
2912 
2913 		if (!error) {
2914 			cxgb_get_regs(sc, regs, buf);
2915 			error = copyout(buf, regs->data, reglen);
2916 		}
2917 		free(buf, M_DEVBUF);
2918 
2919 		break;
2920 	}
2921 	case CHELSIO_SET_HW_SCHED: {
2922 		struct ch_hw_sched *t = (struct ch_hw_sched *)data;
2923 		unsigned int ticks_per_usec = core_ticks_per_usec(sc);
2924 
2925 		if ((sc->flags & FULL_INIT_DONE) == 0)
2926 			return (EAGAIN);       /* need TP to be initialized */
2927 		if (t->sched >= NTX_SCHED || !in_range(t->mode, 0, 1) ||
2928 		    !in_range(t->channel, 0, 1) ||
2929 		    !in_range(t->kbps, 0, 10000000) ||
2930 		    !in_range(t->class_ipg, 0, 10000 * 65535 / ticks_per_usec) ||
2931 		    !in_range(t->flow_ipg, 0,
2932 			      dack_ticks_to_usec(sc, 0x7ff)))
2933 			return (EINVAL);
2934 
2935 		if (t->kbps >= 0) {
2936 			error = t3_config_sched(sc, t->kbps, t->sched);
2937 			if (error < 0)
2938 				return (-error);
2939 		}
2940 		if (t->class_ipg >= 0)
2941 			t3_set_sched_ipg(sc, t->sched, t->class_ipg);
2942 		if (t->flow_ipg >= 0) {
2943 			t->flow_ipg *= 1000;     /* us -> ns */
2944 			t3_set_pace_tbl(sc, &t->flow_ipg, t->sched, 1);
2945 		}
2946 		if (t->mode >= 0) {
2947 			int bit = 1 << (S_TX_MOD_TIMER_MODE + t->sched);
2948 
2949 			t3_set_reg_field(sc, A_TP_TX_MOD_QUEUE_REQ_MAP,
2950 					 bit, t->mode ? bit : 0);
2951 		}
2952 		if (t->channel >= 0)
2953 			t3_set_reg_field(sc, A_TP_TX_MOD_QUEUE_REQ_MAP,
2954 					 1 << t->sched, t->channel << t->sched);
2955 		break;
2956 	}
2957 	case CHELSIO_GET_EEPROM: {
2958 		int i;
2959 		struct ch_eeprom *e = (struct ch_eeprom *)data;
2960 		uint8_t *buf = malloc(EEPROMSIZE, M_DEVBUF, M_NOWAIT);
2961 
2962 		if (buf == NULL) {
2963 			return (ENOMEM);
2964 		}
2965 		e->magic = EEPROM_MAGIC;
2966 		for (i = e->offset & ~3; !error && i < e->offset + e->len; i += 4)
2967 			error = -t3_seeprom_read(sc, i, (uint32_t *)&buf[i]);
2968 
2969 		if (!error)
2970 			error = copyout(buf + e->offset, e->data, e->len);
2971 
2972 		free(buf, M_DEVBUF);
2973 		break;
2974 	}
2975 	case CHELSIO_CLEAR_STATS: {
2976 		if (!(sc->flags & FULL_INIT_DONE))
2977 			return EAGAIN;
2978 
2979 		PORT_LOCK(pi);
2980 		t3_mac_update_stats(&pi->mac);
2981 		memset(&pi->mac.stats, 0, sizeof(pi->mac.stats));
2982 		PORT_UNLOCK(pi);
2983 		break;
2984 	}
2985 	case CHELSIO_GET_UP_LA: {
2986 		struct ch_up_la *la = (struct ch_up_la *)data;
2987 		uint8_t *buf = malloc(LA_BUFSIZE, M_DEVBUF, M_NOWAIT);
2988 		if (buf == NULL) {
2989 			return (ENOMEM);
2990 		}
2991 		if (la->bufsize < LA_BUFSIZE)
2992 			error = ENOBUFS;
2993 
2994 		if (!error)
2995 			error = -t3_get_up_la(sc, &la->stopped, &la->idx,
2996 					      &la->bufsize, buf);
2997 		if (!error)
2998 			error = copyout(buf, la->data, la->bufsize);
2999 
3000 		free(buf, M_DEVBUF);
3001 		break;
3002 	}
3003 	case CHELSIO_GET_UP_IOQS: {
3004 		struct ch_up_ioqs *ioqs = (struct ch_up_ioqs *)data;
3005 		uint8_t *buf = malloc(IOQS_BUFSIZE, M_DEVBUF, M_NOWAIT);
3006 		uint32_t *v;
3007 
3008 		if (buf == NULL) {
3009 			return (ENOMEM);
3010 		}
3011 		if (ioqs->bufsize < IOQS_BUFSIZE)
3012 			error = ENOBUFS;
3013 
3014 		if (!error)
3015 			error = -t3_get_up_ioqs(sc, &ioqs->bufsize, buf);
3016 
3017 		if (!error) {
3018 			v = (uint32_t *)buf;
3019 
3020 			ioqs->ioq_rx_enable = *v++;
3021 			ioqs->ioq_tx_enable = *v++;
3022 			ioqs->ioq_rx_status = *v++;
3023 			ioqs->ioq_tx_status = *v++;
3024 
3025 			error = copyout(v, ioqs->data, ioqs->bufsize);
3026 		}
3027 
3028 		free(buf, M_DEVBUF);
3029 		break;
3030 	}
3031 	case CHELSIO_SET_FILTER: {
3032 		struct ch_filter *f = (struct ch_filter *)data;
3033 		struct filter_info *p;
3034 		unsigned int nfilters = sc->params.mc5.nfilters;
3035 
3036 		if (!is_offload(sc))
3037 			return (EOPNOTSUPP);	/* No TCAM */
3038 		if (!(sc->flags & FULL_INIT_DONE))
3039 			return (EAGAIN);	/* mc5 not setup yet */
3040 		if (nfilters == 0)
3041 			return (EBUSY);		/* TOE will use TCAM */
3042 
3043 		/* sanity checks */
3044 		if (f->filter_id >= nfilters ||
3045 		    (f->val.dip && f->mask.dip != 0xffffffff) ||
3046 		    (f->val.sport && f->mask.sport != 0xffff) ||
3047 		    (f->val.dport && f->mask.dport != 0xffff) ||
3048 		    (f->val.vlan && f->mask.vlan != 0xfff) ||
3049 		    (f->val.vlan_prio &&
3050 			f->mask.vlan_prio != FILTER_NO_VLAN_PRI) ||
3051 		    (f->mac_addr_idx != 0xffff && f->mac_addr_idx > 15) ||
3052 		    f->qset >= SGE_QSETS ||
3053 		    sc->rrss_map[f->qset] >= RSS_TABLE_SIZE)
3054 			return (EINVAL);
3055 
3056 		/* Was allocated with M_WAITOK */
3057 		KASSERT(sc->filters, ("filter table NULL\n"));
3058 
3059 		p = &sc->filters[f->filter_id];
3060 		if (p->locked)
3061 			return (EPERM);
3062 
3063 		bzero(p, sizeof(*p));
3064 		p->sip = f->val.sip;
3065 		p->sip_mask = f->mask.sip;
3066 		p->dip = f->val.dip;
3067 		p->sport = f->val.sport;
3068 		p->dport = f->val.dport;
3069 		p->vlan = f->mask.vlan ? f->val.vlan : 0xfff;
3070 		p->vlan_prio = f->mask.vlan_prio ? (f->val.vlan_prio & 6) :
3071 		    FILTER_NO_VLAN_PRI;
3072 		p->mac_hit = f->mac_hit;
3073 		p->mac_vld = f->mac_addr_idx != 0xffff;
3074 		p->mac_idx = f->mac_addr_idx;
3075 		p->pkt_type = f->proto;
3076 		p->report_filter_id = f->want_filter_id;
3077 		p->pass = f->pass;
3078 		p->rss = f->rss;
3079 		p->qset = f->qset;
3080 
3081 		error = set_filter(sc, f->filter_id, p);
3082 		if (error == 0)
3083 			p->valid = 1;
3084 		break;
3085 	}
3086 	case CHELSIO_DEL_FILTER: {
3087 		struct ch_filter *f = (struct ch_filter *)data;
3088 		struct filter_info *p;
3089 		unsigned int nfilters = sc->params.mc5.nfilters;
3090 
3091 		if (!is_offload(sc))
3092 			return (EOPNOTSUPP);
3093 		if (!(sc->flags & FULL_INIT_DONE))
3094 			return (EAGAIN);
3095 		if (nfilters == 0 || sc->filters == NULL)
3096 			return (EINVAL);
3097 		if (f->filter_id >= nfilters)
3098 		       return (EINVAL);
3099 
3100 		p = &sc->filters[f->filter_id];
3101 		if (p->locked)
3102 			return (EPERM);
3103 		if (!p->valid)
3104 			return (EFAULT); /* Read "Bad address" as "Bad index" */
3105 
3106 		bzero(p, sizeof(*p));
3107 		p->sip = p->sip_mask = 0xffffffff;
3108 		p->vlan = 0xfff;
3109 		p->vlan_prio = FILTER_NO_VLAN_PRI;
3110 		p->pkt_type = 1;
3111 		error = set_filter(sc, f->filter_id, p);
3112 		break;
3113 	}
3114 	case CHELSIO_GET_FILTER: {
3115 		struct ch_filter *f = (struct ch_filter *)data;
3116 		struct filter_info *p;
3117 		unsigned int i, nfilters = sc->params.mc5.nfilters;
3118 
3119 		if (!is_offload(sc))
3120 			return (EOPNOTSUPP);
3121 		if (!(sc->flags & FULL_INIT_DONE))
3122 			return (EAGAIN);
3123 		if (nfilters == 0 || sc->filters == NULL)
3124 			return (EINVAL);
3125 
3126 		i = f->filter_id == 0xffffffff ? 0 : f->filter_id + 1;
3127 		for (; i < nfilters; i++) {
3128 			p = &sc->filters[i];
3129 			if (!p->valid)
3130 				continue;
3131 
3132 			bzero(f, sizeof(*f));
3133 
3134 			f->filter_id = i;
3135 			f->val.sip = p->sip;
3136 			f->mask.sip = p->sip_mask;
3137 			f->val.dip = p->dip;
3138 			f->mask.dip = p->dip ? 0xffffffff : 0;
3139 			f->val.sport = p->sport;
3140 			f->mask.sport = p->sport ? 0xffff : 0;
3141 			f->val.dport = p->dport;
3142 			f->mask.dport = p->dport ? 0xffff : 0;
3143 			f->val.vlan = p->vlan == 0xfff ? 0 : p->vlan;
3144 			f->mask.vlan = p->vlan == 0xfff ? 0 : 0xfff;
3145 			f->val.vlan_prio = p->vlan_prio == FILTER_NO_VLAN_PRI ?
3146 			    0 : p->vlan_prio;
3147 			f->mask.vlan_prio = p->vlan_prio == FILTER_NO_VLAN_PRI ?
3148 			    0 : FILTER_NO_VLAN_PRI;
3149 			f->mac_hit = p->mac_hit;
3150 			f->mac_addr_idx = p->mac_vld ? p->mac_idx : 0xffff;
3151 			f->proto = p->pkt_type;
3152 			f->want_filter_id = p->report_filter_id;
3153 			f->pass = p->pass;
3154 			f->rss = p->rss;
3155 			f->qset = p->qset;
3156 
3157 			break;
3158 		}
3159 
3160 		if (i == nfilters)
3161 			f->filter_id = 0xffffffff;
3162 		break;
3163 	}
3164 	default:
3165 		return (EOPNOTSUPP);
3166 		break;
3167 	}
3168 
3169 	return (error);
3170 }
3171 
3172 static __inline void
3173 reg_block_dump(struct adapter *ap, uint8_t *buf, unsigned int start,
3174     unsigned int end)
3175 {
3176 	uint32_t *p = (uint32_t *)(buf + start);
3177 
3178 	for ( ; start <= end; start += sizeof(uint32_t))
3179 		*p++ = t3_read_reg(ap, start);
3180 }
3181 
3182 #define T3_REGMAP_SIZE (3 * 1024)
3183 static int
3184 cxgb_get_regs_len(void)
3185 {
3186 	return T3_REGMAP_SIZE;
3187 }
3188 
3189 static void
3190 cxgb_get_regs(adapter_t *sc, struct ch_ifconf_regs *regs, uint8_t *buf)
3191 {
3192 
3193 	/*
3194 	 * Version scheme:
3195 	 * bits 0..9: chip version
3196 	 * bits 10..15: chip revision
3197 	 * bit 31: set for PCIe cards
3198 	 */
3199 	regs->version = 3 | (sc->params.rev << 10) | (is_pcie(sc) << 31);
3200 
3201 	/*
3202 	 * We skip the MAC statistics registers because they are clear-on-read.
3203 	 * Also reading multi-register stats would need to synchronize with the
3204 	 * periodic mac stats accumulation.  Hard to justify the complexity.
3205 	 */
3206 	memset(buf, 0, cxgb_get_regs_len());
3207 	reg_block_dump(sc, buf, 0, A_SG_RSPQ_CREDIT_RETURN);
3208 	reg_block_dump(sc, buf, A_SG_HI_DRB_HI_THRSH, A_ULPRX_PBL_ULIMIT);
3209 	reg_block_dump(sc, buf, A_ULPTX_CONFIG, A_MPS_INT_CAUSE);
3210 	reg_block_dump(sc, buf, A_CPL_SWITCH_CNTRL, A_CPL_MAP_TBL_DATA);
3211 	reg_block_dump(sc, buf, A_SMB_GLOBAL_TIME_CFG, A_XGM_SERDES_STAT3);
3212 	reg_block_dump(sc, buf, A_XGM_SERDES_STATUS0,
3213 		       XGM_REG(A_XGM_SERDES_STAT3, 1));
3214 	reg_block_dump(sc, buf, XGM_REG(A_XGM_SERDES_STATUS0, 1),
3215 		       XGM_REG(A_XGM_RX_SPI4_SOP_EOP_CNT, 1));
3216 }
3217 
3218 static int
3219 alloc_filters(struct adapter *sc)
3220 {
3221 	struct filter_info *p;
3222 	unsigned int nfilters = sc->params.mc5.nfilters;
3223 
3224 	if (nfilters == 0)
3225 		return (0);
3226 
3227 	p = malloc(sizeof(*p) * nfilters, M_DEVBUF, M_WAITOK | M_ZERO);
3228 	sc->filters = p;
3229 
3230 	p = &sc->filters[nfilters - 1];
3231 	p->vlan = 0xfff;
3232 	p->vlan_prio = FILTER_NO_VLAN_PRI;
3233 	p->pass = p->rss = p->valid = p->locked = 1;
3234 
3235 	return (0);
3236 }
3237 
3238 static int
3239 setup_hw_filters(struct adapter *sc)
3240 {
3241 	int i, rc;
3242 	unsigned int nfilters = sc->params.mc5.nfilters;
3243 
3244 	if (!sc->filters)
3245 		return (0);
3246 
3247 	t3_enable_filters(sc);
3248 
3249 	for (i = rc = 0; i < nfilters && !rc; i++) {
3250 		if (sc->filters[i].locked)
3251 			rc = set_filter(sc, i, &sc->filters[i]);
3252 	}
3253 
3254 	return (rc);
3255 }
3256 
3257 static int
3258 set_filter(struct adapter *sc, int id, const struct filter_info *f)
3259 {
3260 	int len;
3261 	struct mbuf *m;
3262 	struct ulp_txpkt *txpkt;
3263 	struct work_request_hdr *wr;
3264 	struct cpl_pass_open_req *oreq;
3265 	struct cpl_set_tcb_field *sreq;
3266 
3267 	len = sizeof(*wr) + sizeof(*oreq) + 2 * sizeof(*sreq);
3268 	KASSERT(len <= MHLEN, ("filter request too big for an mbuf"));
3269 
3270 	id += t3_mc5_size(&sc->mc5) - sc->params.mc5.nroutes -
3271 	      sc->params.mc5.nfilters;
3272 
3273 	m = m_gethdr(M_WAITOK, MT_DATA);
3274 	m->m_len = m->m_pkthdr.len = len;
3275 	bzero(mtod(m, char *), len);
3276 
3277 	wr = mtod(m, struct work_request_hdr *);
3278 	wr->wrh_hi = htonl(V_WR_OP(FW_WROPCODE_BYPASS) | F_WR_ATOMIC);
3279 
3280 	oreq = (struct cpl_pass_open_req *)(wr + 1);
3281 	txpkt = (struct ulp_txpkt *)oreq;
3282 	txpkt->cmd_dest = htonl(V_ULPTX_CMD(ULP_TXPKT));
3283 	txpkt->len = htonl(V_ULPTX_NFLITS(sizeof(*oreq) / 8));
3284 	OPCODE_TID(oreq) = htonl(MK_OPCODE_TID(CPL_PASS_OPEN_REQ, id));
3285 	oreq->local_port = htons(f->dport);
3286 	oreq->peer_port = htons(f->sport);
3287 	oreq->local_ip = htonl(f->dip);
3288 	oreq->peer_ip = htonl(f->sip);
3289 	oreq->peer_netmask = htonl(f->sip_mask);
3290 	oreq->opt0h = 0;
3291 	oreq->opt0l = htonl(F_NO_OFFLOAD);
3292 	oreq->opt1 = htonl(V_MAC_MATCH_VALID(f->mac_vld) |
3293 			 V_CONN_POLICY(CPL_CONN_POLICY_FILTER) |
3294 			 V_VLAN_PRI(f->vlan_prio >> 1) |
3295 			 V_VLAN_PRI_VALID(f->vlan_prio != FILTER_NO_VLAN_PRI) |
3296 			 V_PKT_TYPE(f->pkt_type) | V_OPT1_VLAN(f->vlan) |
3297 			 V_MAC_MATCH(f->mac_idx | (f->mac_hit << 4)));
3298 
3299 	sreq = (struct cpl_set_tcb_field *)(oreq + 1);
3300 	set_tcb_field_ulp(sreq, id, 1, 0x1800808000ULL,
3301 			  (f->report_filter_id << 15) | (1 << 23) |
3302 			  ((u64)f->pass << 35) | ((u64)!f->rss << 36));
3303 	set_tcb_field_ulp(sreq + 1, id, 0, 0xffffffff, (2 << 19) | 1);
3304 	t3_mgmt_tx(sc, m);
3305 
3306 	if (f->pass && !f->rss) {
3307 		len = sizeof(*sreq);
3308 		m = m_gethdr(M_WAITOK, MT_DATA);
3309 		m->m_len = m->m_pkthdr.len = len;
3310 		bzero(mtod(m, char *), len);
3311 		sreq = mtod(m, struct cpl_set_tcb_field *);
3312 		sreq->wr.wrh_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
3313 		mk_set_tcb_field(sreq, id, 25, 0x3f80000,
3314 				 (u64)sc->rrss_map[f->qset] << 19);
3315 		t3_mgmt_tx(sc, m);
3316 	}
3317 	return 0;
3318 }
3319 
3320 static inline void
3321 mk_set_tcb_field(struct cpl_set_tcb_field *req, unsigned int tid,
3322     unsigned int word, u64 mask, u64 val)
3323 {
3324 	OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SET_TCB_FIELD, tid));
3325 	req->reply = V_NO_REPLY(1);
3326 	req->cpu_idx = 0;
3327 	req->word = htons(word);
3328 	req->mask = htobe64(mask);
3329 	req->val = htobe64(val);
3330 }
3331 
3332 static inline void
3333 set_tcb_field_ulp(struct cpl_set_tcb_field *req, unsigned int tid,
3334     unsigned int word, u64 mask, u64 val)
3335 {
3336 	struct ulp_txpkt *txpkt = (struct ulp_txpkt *)req;
3337 
3338 	txpkt->cmd_dest = htonl(V_ULPTX_CMD(ULP_TXPKT));
3339 	txpkt->len = htonl(V_ULPTX_NFLITS(sizeof(*req) / 8));
3340 	mk_set_tcb_field(req, tid, word, mask, val);
3341 }
3342 
3343 void
3344 t3_iterate(void (*func)(struct adapter *, void *), void *arg)
3345 {
3346 	struct adapter *sc;
3347 
3348 	mtx_lock(&t3_list_lock);
3349 	SLIST_FOREACH(sc, &t3_list, link) {
3350 		/*
3351 		 * func should not make any assumptions about what state sc is
3352 		 * in - the only guarantee is that sc->sc_lock is a valid lock.
3353 		 */
3354 		func(sc, arg);
3355 	}
3356 	mtx_unlock(&t3_list_lock);
3357 }
3358 
3359 #ifdef TCP_OFFLOAD
3360 static int
3361 toe_capability(struct port_info *pi, int enable)
3362 {
3363 	int rc;
3364 	struct adapter *sc = pi->adapter;
3365 
3366 	ADAPTER_LOCK_ASSERT_OWNED(sc);
3367 
3368 	if (!is_offload(sc))
3369 		return (ENODEV);
3370 
3371 	if (enable) {
3372 		if (!(sc->flags & FULL_INIT_DONE)) {
3373 			log(LOG_WARNING,
3374 			    "You must enable a cxgb interface first\n");
3375 			return (EAGAIN);
3376 		}
3377 
3378 		if (isset(&sc->offload_map, pi->port_id))
3379 			return (0);
3380 
3381 		if (!(sc->flags & TOM_INIT_DONE)) {
3382 			rc = t3_activate_uld(sc, ULD_TOM);
3383 			if (rc == EAGAIN) {
3384 				log(LOG_WARNING,
3385 				    "You must kldload t3_tom.ko before trying "
3386 				    "to enable TOE on a cxgb interface.\n");
3387 			}
3388 			if (rc != 0)
3389 				return (rc);
3390 			KASSERT(sc->tom_softc != NULL,
3391 			    ("%s: TOM activated but softc NULL", __func__));
3392 			KASSERT(sc->flags & TOM_INIT_DONE,
3393 			    ("%s: TOM activated but flag not set", __func__));
3394 		}
3395 
3396 		setbit(&sc->offload_map, pi->port_id);
3397 
3398 		/*
3399 		 * XXX: Temporary code to allow iWARP to be enabled when TOE is
3400 		 * enabled on any port.  Need to figure out how to enable,
3401 		 * disable, load, and unload iWARP cleanly.
3402 		 */
3403 		if (!isset(&sc->offload_map, MAX_NPORTS) &&
3404 		    t3_activate_uld(sc, ULD_IWARP) == 0)
3405 			setbit(&sc->offload_map, MAX_NPORTS);
3406 	} else {
3407 		if (!isset(&sc->offload_map, pi->port_id))
3408 			return (0);
3409 
3410 		KASSERT(sc->flags & TOM_INIT_DONE,
3411 		    ("%s: TOM never initialized?", __func__));
3412 		clrbit(&sc->offload_map, pi->port_id);
3413 	}
3414 
3415 	return (0);
3416 }
3417 
3418 /*
3419  * Add an upper layer driver to the global list.
3420  */
3421 int
3422 t3_register_uld(struct uld_info *ui)
3423 {
3424 	int rc = 0;
3425 	struct uld_info *u;
3426 
3427 	mtx_lock(&t3_uld_list_lock);
3428 	SLIST_FOREACH(u, &t3_uld_list, link) {
3429 	    if (u->uld_id == ui->uld_id) {
3430 		    rc = EEXIST;
3431 		    goto done;
3432 	    }
3433 	}
3434 
3435 	SLIST_INSERT_HEAD(&t3_uld_list, ui, link);
3436 	ui->refcount = 0;
3437 done:
3438 	mtx_unlock(&t3_uld_list_lock);
3439 	return (rc);
3440 }
3441 
3442 int
3443 t3_unregister_uld(struct uld_info *ui)
3444 {
3445 	int rc = EINVAL;
3446 	struct uld_info *u;
3447 
3448 	mtx_lock(&t3_uld_list_lock);
3449 
3450 	SLIST_FOREACH(u, &t3_uld_list, link) {
3451 	    if (u == ui) {
3452 		    if (ui->refcount > 0) {
3453 			    rc = EBUSY;
3454 			    goto done;
3455 		    }
3456 
3457 		    SLIST_REMOVE(&t3_uld_list, ui, uld_info, link);
3458 		    rc = 0;
3459 		    goto done;
3460 	    }
3461 	}
3462 done:
3463 	mtx_unlock(&t3_uld_list_lock);
3464 	return (rc);
3465 }
3466 
3467 int
3468 t3_activate_uld(struct adapter *sc, int id)
3469 {
3470 	int rc = EAGAIN;
3471 	struct uld_info *ui;
3472 
3473 	mtx_lock(&t3_uld_list_lock);
3474 
3475 	SLIST_FOREACH(ui, &t3_uld_list, link) {
3476 		if (ui->uld_id == id) {
3477 			rc = ui->activate(sc);
3478 			if (rc == 0)
3479 				ui->refcount++;
3480 			goto done;
3481 		}
3482 	}
3483 done:
3484 	mtx_unlock(&t3_uld_list_lock);
3485 
3486 	return (rc);
3487 }
3488 
3489 int
3490 t3_deactivate_uld(struct adapter *sc, int id)
3491 {
3492 	int rc = EINVAL;
3493 	struct uld_info *ui;
3494 
3495 	mtx_lock(&t3_uld_list_lock);
3496 
3497 	SLIST_FOREACH(ui, &t3_uld_list, link) {
3498 		if (ui->uld_id == id) {
3499 			rc = ui->deactivate(sc);
3500 			if (rc == 0)
3501 				ui->refcount--;
3502 			goto done;
3503 		}
3504 	}
3505 done:
3506 	mtx_unlock(&t3_uld_list_lock);
3507 
3508 	return (rc);
3509 }
3510 
3511 static int
3512 cpl_not_handled(struct sge_qset *qs __unused, struct rsp_desc *r __unused,
3513     struct mbuf *m)
3514 {
3515 	m_freem(m);
3516 	return (EDOOFUS);
3517 }
3518 
3519 int
3520 t3_register_cpl_handler(struct adapter *sc, int opcode, cpl_handler_t h)
3521 {
3522 	uintptr_t *loc, new;
3523 
3524 	if (opcode >= NUM_CPL_HANDLERS)
3525 		return (EINVAL);
3526 
3527 	new = h ? (uintptr_t)h : (uintptr_t)cpl_not_handled;
3528 	loc = (uintptr_t *) &sc->cpl_handler[opcode];
3529 	atomic_store_rel_ptr(loc, new);
3530 
3531 	return (0);
3532 }
3533 #endif
3534 
3535 static int
3536 cxgbc_mod_event(module_t mod, int cmd, void *arg)
3537 {
3538 	int rc = 0;
3539 
3540 	switch (cmd) {
3541 	case MOD_LOAD:
3542 		mtx_init(&t3_list_lock, "T3 adapters", 0, MTX_DEF);
3543 		SLIST_INIT(&t3_list);
3544 #ifdef TCP_OFFLOAD
3545 		mtx_init(&t3_uld_list_lock, "T3 ULDs", 0, MTX_DEF);
3546 		SLIST_INIT(&t3_uld_list);
3547 #endif
3548 		break;
3549 
3550 	case MOD_UNLOAD:
3551 #ifdef TCP_OFFLOAD
3552 		mtx_lock(&t3_uld_list_lock);
3553 		if (!SLIST_EMPTY(&t3_uld_list)) {
3554 			rc = EBUSY;
3555 			mtx_unlock(&t3_uld_list_lock);
3556 			break;
3557 		}
3558 		mtx_unlock(&t3_uld_list_lock);
3559 		mtx_destroy(&t3_uld_list_lock);
3560 #endif
3561 		mtx_lock(&t3_list_lock);
3562 		if (!SLIST_EMPTY(&t3_list)) {
3563 			rc = EBUSY;
3564 			mtx_unlock(&t3_list_lock);
3565 			break;
3566 		}
3567 		mtx_unlock(&t3_list_lock);
3568 		mtx_destroy(&t3_list_lock);
3569 		break;
3570 	}
3571 
3572 	return (rc);
3573 }
3574