xref: /titanic_41/usr/src/uts/common/io/elxl/elxl.c (revision 82629e3015252bf18319ba3815c773df23e21436)
1 /*
2  * Copyright 2010 Sun Microsystems, Inc.  All rights reserved.
3  * Use is subject to license terms.
4  */
5 
6 /*
7  * Copyright (c) 1998 The NetBSD Foundation, Inc.
8  * All rights reserved.
9  *
10  * This code is derived from software contributed to The NetBSD Foundation
11  * by Frank van der Linden.
12  *
13  * Redistribution and use in source and binary forms, with or without
14  * modification, are permitted provided that the following conditions
15  * are met:
16  * 1. Redistributions of source code must retain the above copyright
17  *    notice, this list of conditions and the following disclaimer.
18  * 2. Redistributions in binary form must reproduce the above copyright
19  *    notice, this list of conditions and the following disclaimer in the
20  *    documentation and/or other materials provided with the distribution.
21  *
22  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
23  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
24  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
25  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
26  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
27  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
28  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
29  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
30  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
31  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
32  * POSSIBILITY OF SUCH DAMAGE.
33  */
34 
35 #include <sys/varargs.h>
36 #include <sys/types.h>
37 #include <sys/modctl.h>
38 #include <sys/conf.h>
39 #include <sys/devops.h>
40 #include <sys/stream.h>
41 #include <sys/strsun.h>
42 #include <sys/cmn_err.h>
43 #include <sys/ethernet.h>
44 #include <sys/pci.h>
45 #include <sys/kmem.h>
46 #include <sys/time.h>
47 #include <sys/mii.h>
48 #include <sys/miiregs.h>
49 #include <sys/mac_ether.h>
50 #include <sys/mac_provider.h>
51 #include <sys/strsubr.h>
52 #include <sys/pattr.h>
53 #include <sys/dlpi.h>
54 #include <sys/ddi.h>
55 #include <sys/sunddi.h>
56 
57 #include <sys/vlan.h>
58 
59 #include "elxl.h"
60 
61 static boolean_t elxl_add_intr(elxl_t *);
62 static void elxl_probe_media(elxl_t *);
63 static void elxl_set_rxfilter(elxl_t *);
64 static void elxl_set_media(elxl_t *);
65 static uint16_t elxl_read_eeprom(elxl_t *, int);
66 static void elxl_init(elxl_t *);
67 static void elxl_stop(elxl_t *);
68 static void elxl_reset(elxl_t *);
69 static void elxl_getstats(elxl_t *);
70 
71 static int elxl_eeprom_busy(elxl_t *);
72 
73 static void elxl_setup_tx(elxl_t *);
74 
75 static uint16_t elxl_mii_read(void *, uint8_t, uint8_t);
76 static void elxl_mii_write(void *, uint8_t, uint8_t, uint16_t);
77 static void elxl_mii_notify(void *, link_state_t);
78 
79 static int elxl_m_stat(void *, uint_t, uint64_t *);
80 static int elxl_m_start(void *);
81 static void elxl_m_stop(void *);
82 static mblk_t *elxl_m_tx(void *, mblk_t *);
83 static int elxl_m_promisc(void *, boolean_t);
84 static int elxl_m_multicst(void *, boolean_t, const uint8_t *);
85 static int elxl_m_unicst(void *, const uint8_t *);
86 static int elxl_m_getprop(void *, const char *, mac_prop_id_t, uint_t,
87     uint_t, void *, uint_t *);
88 static int elxl_m_setprop(void *, const char *, mac_prop_id_t, uint_t,
89     const void *);
90 static boolean_t elxl_m_getcapab(void *, mac_capab_t cap, void *);
91 static uint_t elxl_intr(caddr_t, caddr_t);
92 static void elxl_error(elxl_t *, char *, ...);
93 static void elxl_linkcheck(void *);
94 static int elxl_attach(dev_info_t *);
95 static void elxl_detach(elxl_t *);
96 static void elxl_suspend(elxl_t *);
97 static void elxl_resume(dev_info_t *);
98 static int elxl_ddi_attach(dev_info_t *, ddi_attach_cmd_t);
99 static int elxl_ddi_detach(dev_info_t *, ddi_detach_cmd_t);
100 static int elxl_ddi_quiesce(dev_info_t *);
101 
102 static ddi_device_acc_attr_t ex_dev_acc_attr = {
103 	DDI_DEVICE_ATTR_V0,
104 	DDI_STRUCTURE_LE_ACC,
105 	DDI_STRICTORDER_ACC
106 };
107 
108 static ddi_device_acc_attr_t ex_buf_acc_attr = {
109 	DDI_DEVICE_ATTR_V0,
110 	DDI_NEVERSWAP_ACC,
111 	DDI_STORECACHING_OK_ACC
112 };
113 
114 /*
115  * In theory buffers can have more flexible DMA attributes, but since
116  * we're just using a preallocated region with bcopy, there is little
117  * reason to allow for rougher alignment.  (Further, the 8-byte
118  * alignment can allow for more efficient bcopy and similar operations
119  * from the buffer.)
120  */
121 static ddi_dma_attr_t ex_dma_attr = {
122 	DMA_ATTR_V0,		/* dma_attr_version */
123 	0,			/* dma_attr_addr_lo */
124 	0xFFFFFFFFU,		/* dma_attr_addr_hi */
125 	0x00FFFFFFU,		/* dma_attr_count_max */
126 	8,			/* dma_attr_align */
127 	0x7F,			/* dma_attr_burstsizes */
128 	1,			/* dma_attr_minxfer */
129 	0xFFFFFFFFU,		/* dma_attr_maxxfer */
130 	0xFFFFFFFFU,		/* dma_attr_seg */
131 	1,			/* dma_attr_sgllen */
132 	1,			/* dma_attr_granular */
133 	0			/* dma_attr_flags */
134 };
135 
136 static uint8_t ex_broadcast[6] = {
137 	0xff, 0xff, 0xff, 0xff, 0xff, 0xff
138 };
139 
140 /*
141  * Structure to map media-present bits in boards to ifmedia codes and
142  * printable media names.  Used for table-driven ifmedia initialization.
143  */
144 typedef struct ex_media {
145 	int	exm_mpbit;		/* media present bit */
146 	int	exm_xcvr;		/* XCVR_SEL_* constant */
147 } ex_media_t;
148 
149 /*
150  * Media table for 3c90x chips.  Note that chips with MII have no
151  * `native' media.  This is sorted in "reverse preference".
152  */
153 static ex_media_t ex_native_media[] = {
154 	{ MEDIAOPT_AUI,		XCVR_SEL_AUI },
155 	{ MEDIAOPT_BNC,		XCVR_SEL_BNC },
156 	{ MEDIAOPT_10T,		XCVR_SEL_10T },
157 	{ MEDIAOPT_100TX,	XCVR_SEL_AUTO },	/* only 90XB */
158 	{ MEDIAOPT_100FX,	XCVR_SEL_100FX },
159 	{ MEDIAOPT_MII,		XCVR_SEL_MII },
160 	{ MEDIAOPT_100T4,	XCVR_SEL_MII },
161 	{ 0,			0 },
162 };
163 
164 
165 /*
166  * NB: There are lots of other models that *could* be supported.
167  * Specifically there are cardbus and miniPCI variants that could be
168  * easily added here, but they require special hacks and I have no
169  * access to the hardware required to verify them.  Especially they
170  * seem to require some extra work in another register window, and I
171  * have no supporting documentation.
172  */
173 static const struct ex_product {
174 	uint16_t	epp_prodid;	/* PCI product ID */
175 	const char	*epp_name;	/* device name */
176 	unsigned	epp_flags;	/* initial softc flags */
177 } ex_products[] = {
178 	{ 0x4500, "3c450-TX",		0 },
179 	{ 0x7646, "3cSOHO100-TX",	0 },
180 	{ 0x9000, "3c900-TPO",		0 },
181 	{ 0x9001, "3c900-COMBO",	0 },
182 	{ 0x9004, "3c900B-TPO",		0 },
183 	{ 0x9005, "3c900B-COMBO",	0 },
184 	{ 0x9006, "3c900B-TPC",		0 },
185 	{ 0x900a, "3c900B-FL",		0 },
186 	{ 0x9050, "3c905-TX",		0 },
187 	{ 0x9051, "3c905-T4",		0 },
188 	{ 0x9055, "3c905B-TX",		0 },
189 	{ 0x9056, "3c905B-T4",		0 },
190 	{ 0x9058, "3c905B-COMBO",	0 },
191 	{ 0x905a, "3c905B-FX",		0 },
192 	{ 0x9200, "3c905C-TX",		0 },
193 	{ 0x9201, "3c920B-EMB",		0 },
194 	{ 0x9202, "3c920B-EMB-WNM",	0 },
195 	{ 0x9800, "3c980",		0 },
196 	{ 0x9805, "3c980C-TXM",		0 },
197 
198 	{ 0, NULL, 0 },
199 };
200 
201 mac_priv_prop_t ex_priv_prop[] = {
202 	{ "_media", MAC_PROP_PERM_RW },
203 	{ "_available_media", MAC_PROP_PERM_READ },
204 };
205 
206 static mii_ops_t ex_mii_ops = {
207 	MII_OPS_VERSION,
208 	elxl_mii_read,
209 	elxl_mii_write,
210 	elxl_mii_notify,
211 };
212 
213 static mac_callbacks_t elxl_m_callbacks = {
214 	MC_GETCAPAB | MC_SETPROP | MC_GETPROP,
215 	elxl_m_stat,
216 	elxl_m_start,
217 	elxl_m_stop,
218 	elxl_m_promisc,
219 	elxl_m_multicst,
220 	elxl_m_unicst,
221 	elxl_m_tx,
222 	NULL,
223 	elxl_m_getcapab,
224 	NULL,
225 	NULL,
226 	elxl_m_setprop,
227 	elxl_m_getprop
228 };
229 
230 /*
231  * Stream information
232  */
233 DDI_DEFINE_STREAM_OPS(ex_devops, nulldev, nulldev,
234     elxl_ddi_attach, elxl_ddi_detach,
235     nodev, NULL, D_MP, NULL, elxl_ddi_quiesce);
236 
237 /*
238  * Module linkage information.
239  */
240 
241 static struct modldrv ex_modldrv = {
242 	&mod_driverops,			/* drv_modops */
243 	"3Com EtherLink XL",		/* drv_linkinfo */
244 	&ex_devops			/* drv_dev_ops */
245 };
246 
247 static struct modlinkage ex_modlinkage = {
248 	MODREV_1,		/* ml_rev */
249 	{ &ex_modldrv, NULL }	/* ml_linkage */
250 };
251 
252 int
253 _init(void)
254 {
255 	int	rv;
256 	mac_init_ops(&ex_devops, "elxl");
257 	if ((rv = mod_install(&ex_modlinkage)) != DDI_SUCCESS) {
258 		mac_fini_ops(&ex_devops);
259 	}
260 	return (rv);
261 }
262 
263 int
264 _fini(void)
265 {
266 	int	rv;
267 	if ((rv = mod_remove(&ex_modlinkage)) == DDI_SUCCESS) {
268 		mac_fini_ops(&ex_devops);
269 	}
270 	return (rv);
271 }
272 
273 int
274 _info(struct modinfo *modinfop)
275 {
276 	return (mod_info(&ex_modlinkage, modinfop));
277 }
278 
279 static void
280 ex_free_ring(ex_ring_t *r)
281 {
282 	for (int i = 0; i < r->r_count; i++) {
283 		ex_desc_t *ed = &r->r_desc[i];
284 		if (ed->ed_bufaddr)
285 			(void) ddi_dma_unbind_handle(ed->ed_dmah);
286 		if (ed->ed_acch)
287 			ddi_dma_mem_free(&ed->ed_acch);
288 		if (ed->ed_dmah)
289 			ddi_dma_free_handle(&ed->ed_dmah);
290 	}
291 
292 	if (r->r_paddr)
293 		(void) ddi_dma_unbind_handle(r->r_dmah);
294 	if (r->r_acch)
295 		ddi_dma_mem_free(&r->r_acch);
296 	if (r->r_dmah)
297 		ddi_dma_free_handle(&r->r_dmah);
298 
299 	kmem_free(r->r_desc, sizeof (ex_desc_t) * r->r_count);
300 	r->r_desc = NULL;
301 }
302 
303 static void
304 elxl_reset_ring(ex_ring_t *r, uint_t dir)
305 {
306 	ex_desc_t	*ed;
307 	ex_pd_t		*pd;
308 
309 	if (dir == DDI_DMA_WRITE) {
310 		/* transmit ring, not linked yet */
311 		for (int i = 0; i < r->r_count; i++) {
312 			ed = &r->r_desc[i];
313 			pd = ed->ed_pd;
314 			PUT_PD(r, pd->pd_link, 0);
315 			PUT_PD(r, pd->pd_fsh, 0);
316 			PUT_PD(r, pd->pd_len, EX_FR_LAST);
317 			PUT_PD(r, pd->pd_addr, ed->ed_bufaddr);
318 		}
319 		r->r_head = NULL;
320 		r->r_tail = NULL;
321 		r->r_avail = r->r_count;
322 	} else {
323 		/* receive is linked into a list */
324 		for (int i = 0; i < r->r_count; i++) {
325 			ed = &r->r_desc[i];
326 			pd = ed->ed_pd;
327 			PUT_PD(r, pd->pd_link, ed->ed_next->ed_descaddr);
328 			PUT_PD(r, pd->pd_status, 0);
329 			PUT_PD(r, pd->pd_len, EX_BUFSZ | EX_FR_LAST);
330 			PUT_PD(r, pd->pd_addr, ed->ed_bufaddr);
331 		}
332 		r->r_head = &r->r_desc[0];
333 		r->r_tail = NULL;
334 		r->r_avail = 0;
335 	}
336 	(void) ddi_dma_sync(r->r_dmah, 0, 0, DDI_DMA_SYNC_FORDEV);
337 }
338 
339 static boolean_t
340 ex_alloc_ring(elxl_t *sc, int count, ex_ring_t *r, uint_t dir)
341 {
342 	dev_info_t		*dip = sc->ex_dip;
343 	int			i;
344 	int			rv;
345 	size_t			len;
346 	ddi_dma_cookie_t	dmac;
347 	unsigned		ndmac;
348 
349 	r->r_count = count;
350 	r->r_desc = kmem_zalloc(sizeof (ex_desc_t) * count, KM_SLEEP);
351 
352 	rv = ddi_dma_alloc_handle(dip, &ex_dma_attr, DDI_DMA_DONTWAIT,
353 	    NULL, &r->r_dmah);
354 	if (rv != DDI_SUCCESS) {
355 		elxl_error(sc, "unable to allocate descriptor dma handle");
356 		return (B_FALSE);
357 	}
358 
359 	rv = ddi_dma_mem_alloc(r->r_dmah, count * sizeof (struct ex_pd),
360 	    &ex_dev_acc_attr, DDI_DMA_CONSISTENT, DDI_DMA_DONTWAIT, NULL,
361 	    (caddr_t *)&r->r_pd, &len, &r->r_acch);
362 	if (rv != DDI_SUCCESS) {
363 		elxl_error(sc, "unable to allocate descriptor memory");
364 		return (B_FALSE);
365 	}
366 	bzero(r->r_pd, len);
367 
368 	rv = ddi_dma_addr_bind_handle(r->r_dmah, NULL,
369 	    (caddr_t)r->r_pd, len, DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
370 	    DDI_DMA_DONTWAIT, NULL, &dmac, &ndmac);
371 	if (rv != DDI_DMA_MAPPED) {
372 		elxl_error(sc, "unable to map descriptor memory");
373 		return (B_FALSE);
374 	}
375 	r->r_paddr = dmac.dmac_address;
376 
377 	for (i = 0; i < count; i++) {
378 		ex_desc_t	*ed = &r->r_desc[i];
379 		ex_pd_t		*pd = &r->r_pd[i];
380 
381 		ed->ed_pd = pd;
382 		ed->ed_off = (i * sizeof (ex_pd_t));
383 		ed->ed_descaddr = r->r_paddr + (i * sizeof (ex_pd_t));
384 
385 		/* Link the high level descriptors into a ring. */
386 		ed->ed_next = &r->r_desc[(i + 1) % count];
387 		ed->ed_next->ed_prev = ed;
388 
389 		rv = ddi_dma_alloc_handle(dip, &ex_dma_attr,
390 		    DDI_DMA_DONTWAIT, NULL, &ed->ed_dmah);
391 		if (rv != 0) {
392 			elxl_error(sc, "can't allocate buf dma handle");
393 			return (B_FALSE);
394 		}
395 		rv = ddi_dma_mem_alloc(ed->ed_dmah, EX_BUFSZ, &ex_buf_acc_attr,
396 		    DDI_DMA_STREAMING, DDI_DMA_DONTWAIT, NULL, &ed->ed_buf,
397 		    &len, &ed->ed_acch);
398 		if (rv != DDI_SUCCESS) {
399 			elxl_error(sc, "unable to allocate buf memory");
400 			return (B_FALSE);
401 		}
402 		bzero(ed->ed_buf, len);
403 
404 		rv = ddi_dma_addr_bind_handle(ed->ed_dmah, NULL,
405 		    ed->ed_buf, len, dir | DDI_DMA_STREAMING,
406 		    DDI_DMA_DONTWAIT, NULL, &dmac, &ndmac);
407 		if (rv != DDI_DMA_MAPPED) {
408 			elxl_error(sc, "unable to map buf memory");
409 			return (B_FALSE);
410 		}
411 		ed->ed_bufaddr = dmac.dmac_address;
412 	}
413 
414 	elxl_reset_ring(r, dir);
415 
416 	return (B_TRUE);
417 }
418 
419 static boolean_t
420 elxl_add_intr(elxl_t *sc)
421 {
422 	dev_info_t		*dip;
423 	int			actual;
424 	uint_t			ipri;
425 
426 	int			rv;
427 
428 	dip = sc->ex_dip;
429 
430 	rv = ddi_intr_alloc(dip, &sc->ex_intrh, DDI_INTR_TYPE_FIXED,
431 	    0, 1, &actual, DDI_INTR_ALLOC_STRICT);
432 	if ((rv != DDI_SUCCESS) || (actual != 1)) {
433 		elxl_error(sc, "Unable to allocate interrupt, %d, count %d",
434 		    rv, actual);
435 		return (B_FALSE);
436 	}
437 
438 	if (ddi_intr_get_pri(sc->ex_intrh, &ipri) != DDI_SUCCESS) {
439 		elxl_error(sc, "Unable to get interrupt priority");
440 		return (B_FALSE);
441 	}
442 
443 	if (ddi_intr_add_handler(sc->ex_intrh, elxl_intr, sc, NULL) !=
444 	    DDI_SUCCESS) {
445 		elxl_error(sc, "Can't add interrupt handler");
446 		(void) ddi_intr_free(sc->ex_intrh);
447 		sc->ex_intrh = NULL;
448 		return (B_FALSE);
449 	}
450 	mutex_init(&sc->ex_intrlock, NULL, MUTEX_DRIVER, DDI_INTR_PRI(ipri));
451 	mutex_init(&sc->ex_txlock, NULL, MUTEX_DRIVER, DDI_INTR_PRI(ipri));
452 
453 	return (B_TRUE);
454 }
455 
456 static int
457 elxl_attach(dev_info_t *dip)
458 {
459 	elxl_t		*sc;
460 	mac_register_t	*macp;
461 	uint16_t	val;
462 	uint16_t	venid;
463 	uint16_t	devid;
464 	int		i;
465 
466 	sc = kmem_zalloc(sizeof (*sc), KM_SLEEP);
467 	ddi_set_driver_private(dip, sc);
468 	sc->ex_dip = dip;
469 
470 	if (pci_config_setup(dip, &sc->ex_pcih) != DDI_SUCCESS) {
471 		elxl_error(sc, "unable to setup PCI config handle");
472 		goto fail;
473 	}
474 	venid = pci_config_get16(sc->ex_pcih, PCI_CONF_VENID);
475 	devid = pci_config_get16(sc->ex_pcih, PCI_CONF_DEVID);
476 
477 	if (venid != 0x10b7) {
478 		/* Not a 3Com part! */
479 		elxl_error(sc, "Unsupported vendor id (0x%x)", venid);
480 		goto fail;
481 	}
482 	for (i = 0; ex_products[i].epp_name; i++) {
483 		if (devid == ex_products[i].epp_prodid) {
484 			cmn_err(CE_CONT, "?%s%d: 3Com %s",
485 			    ddi_driver_name(dip),
486 			    ddi_get_instance(dip),
487 			    ex_products[i].epp_name);
488 			sc->ex_conf = ex_products[i].epp_flags;
489 			break;
490 		}
491 	}
492 	if (ex_products[i].epp_name == NULL) {
493 		/* Not a produce we know how to support */
494 		elxl_error(sc, "Unsupported device id (0x%x)", devid);
495 		elxl_error(sc, "Driver may or may not function.");
496 	}
497 
498 	pci_config_put16(sc->ex_pcih, PCI_CONF_COMM,
499 	    pci_config_get16(sc->ex_pcih, PCI_CONF_COMM) |
500 	    PCI_COMM_IO | PCI_COMM_MAE | PCI_COMM_ME);
501 
502 	if (ddi_regs_map_setup(dip, 1, &sc->ex_regsva, 0, 0, &ex_dev_acc_attr,
503 	    &sc->ex_regsh) != DDI_SUCCESS) {
504 		elxl_error(sc, "Unable to map device registers");
505 		goto fail;
506 	}
507 
508 	if (!elxl_add_intr(sc)) {
509 		goto fail;
510 	}
511 
512 	elxl_reset(sc);
513 
514 	val = elxl_read_eeprom(sc, EE_OEM_ADDR_0);
515 	sc->ex_factaddr[0] = val >> 8;
516 	sc->ex_factaddr[1] = val & 0xff;
517 	val = elxl_read_eeprom(sc, EE_OEM_ADDR_1);
518 	sc->ex_factaddr[2] = val >> 8;
519 	sc->ex_factaddr[3] = val & 0xff;
520 	val = elxl_read_eeprom(sc, EE_OEM_ADDR_2);
521 	sc->ex_factaddr[4] = val >> 8;
522 	sc->ex_factaddr[5] = val & 0xff;
523 	bcopy(sc->ex_factaddr, sc->ex_curraddr, 6);
524 
525 	sc->ex_capab = elxl_read_eeprom(sc, EE_CAPABILITIES);
526 
527 	/*
528 	 * Is this a 90XB?  If bit 2 (supportsLargePackets) is set, or
529 	 * bit (supportsNoTxLength) is clear, then its a 90X.
530 	 * Otherwise its a 90XB.
531 	 */
532 	if ((sc->ex_capab & (1 << 2)) || !(sc->ex_capab & (1 << 9))) {
533 		sc->ex_conf &= ~CONF_90XB;
534 	} else {
535 		sc->ex_conf |= CONF_90XB;
536 	}
537 
538 	if (!ex_alloc_ring(sc, EX_NRX, &sc->ex_rxring, DDI_DMA_READ)) {
539 		goto fail;
540 	}
541 
542 	if (!ex_alloc_ring(sc, EX_NTX, &sc->ex_txring, DDI_DMA_WRITE)) {
543 		goto fail;
544 	}
545 
546 	elxl_probe_media(sc);
547 
548 	/*
549 	 * The probe may have indicated MII!
550 	 */
551 	if (sc->ex_mediaopt & (MEDIAOPT_MII | MEDIAOPT_100TX)) {
552 		sc->ex_miih = mii_alloc(sc, sc->ex_dip, &ex_mii_ops);
553 		if (sc->ex_miih == NULL) {
554 			goto fail;
555 		}
556 		/*
557 		 * Note: The 90XB models can in theory support pause,
558 		 * but we're not enabling now due to lack of units for
559 		 * testing with.  If this is changed, make sure to
560 		 * update the code in elxl_mii_notify to set the flow
561 		 * control field in the W3_MAC_CONTROL register.
562 		 */
563 		mii_set_pauseable(sc->ex_miih, B_FALSE, B_FALSE);
564 	}
565 	if ((macp = mac_alloc(MAC_VERSION)) == NULL) {
566 		elxl_error(sc, "MAC register allocation failed");
567 		goto fail;
568 	}
569 	macp->m_type_ident = MAC_PLUGIN_IDENT_ETHER;
570 	macp->m_driver = sc;
571 	macp->m_dip = dip;
572 	macp->m_src_addr = sc->ex_curraddr;
573 	macp->m_callbacks = &elxl_m_callbacks;
574 	macp->m_min_sdu = 0;
575 	macp->m_max_sdu = ETHERMTU;
576 	macp->m_margin = VLAN_TAGSZ;
577 	macp->m_priv_props = ex_priv_prop;
578 	macp->m_priv_prop_count = 2;
579 
580 	(void) ddi_intr_enable(sc->ex_intrh);
581 
582 	if (mac_register(macp, &sc->ex_mach) == DDI_SUCCESS) {
583 
584 			/*
585 			 * Note: we don't want to start link checking
586 			 * until *after* we have added the MAC handle.
587 			 */
588 		if (sc->ex_mediaopt &
589 		    (MEDIAOPT_MASK & ~(MEDIAOPT_MII | MEDIAOPT_100TX))) {
590 
591 			/* Check non-MII link state once per second. */
592 			sc->ex_linkcheck =
593 			    ddi_periodic_add(elxl_linkcheck, sc, 10000000, 0);
594 		}
595 
596 		mac_free(macp);
597 		return (DDI_SUCCESS);
598 	}
599 
600 	mac_free(macp);
601 
602 fail:
603 	elxl_detach(sc);
604 	return (DDI_FAILURE);
605 }
606 
607 /*
608  * Find the media present on non-MII chips, and select the one to use.
609  */
610 static void
611 elxl_probe_media(elxl_t *sc)
612 {
613 	ex_media_t	*exm;
614 	uint32_t	config;
615 	uint32_t	default_media;
616 	uint16_t	media_options;
617 
618 	SET_WIN(3);
619 	config = GET32(W3_INTERNAL_CONFIG);
620 	media_options = GET16(W3_MEDIAOPT);
621 
622 	/*
623 	 * We modify the media_options field so that we have a
624 	 * consistent view of the media available, without worrying
625 	 * about the version of ASIC, etc.
626 	 */
627 
628 	/*
629 	 * 100BASE-TX is handled differently on 90XB from 90X.  Older
630 	 * parts use the external MII to provide this support.
631 	 */
632 	if (sc->ex_conf & CONF_90XB) {
633 		if (media_options & MEDIAOPT_100TX) {
634 			/*
635 			 * 3Com advises that we should only ever use the
636 			 * auto mode.  Notably, it seems that there should
637 			 * never be a 90XB board with the MEDIAOPT_10T bit set
638 			 * without this bit.  If it happens, the driver will
639 			 * run in compatible 10BASE-T only mode.
640 			 */
641 			media_options &= ~MEDIAOPT_10T;
642 		}
643 	} else {
644 		if (media_options & MEDIAOPT_100TX) {
645 			/*
646 			 * If this occurs, we really want to use it like
647 			 * an MII device.  Generally in this situation we
648 			 * want to use the MII exclusively, and there ought
649 			 * not be a 10bT transceiver.
650 			 */
651 			media_options |= MEDIAOPT_MII;
652 			media_options &= ~MEDIAOPT_100TX;
653 			media_options &= ~MEDIAOPT_10T;
654 
655 			/*
656 			 * Additionally, some of these devices map all
657 			 * internal PHY register at *every* address, not
658 			 * just the "allowed" address 24.
659 			 */
660 			sc->ex_conf |= CONF_INTPHY;
661 		}
662 		/*
663 		 * Early versions didn't have 10FL models, and used this
664 		 * bit for something else (VCO).
665 		 */
666 		media_options &= ~MEDIAOPT_10FL;
667 	}
668 	if (media_options & MEDIAOPT_100T4) {
669 		/* 100BASE-T4 units all use the MII bus. */
670 		media_options |= MEDIAOPT_MII;
671 		media_options &= ~MEDIAOPT_100T4;
672 	}
673 
674 	/* Save our media options. */
675 	sc->ex_mediaopt = media_options;
676 
677 #define	APPEND_MEDIA(str, bit, name)					\
678 	if (media_options & (bit)) {					\
679 		(void) strlcat(str, *str ? "," : "", sizeof (str));	\
680 		(void) strlcat(str, name, sizeof (str));		\
681 	}
682 
683 	APPEND_MEDIA(sc->ex_medias, (MEDIAOPT_MII|MEDIAOPT_100TX), "mii");
684 	APPEND_MEDIA(sc->ex_medias, MEDIAOPT_10T, "tp-hdx,tp-fdx");
685 	APPEND_MEDIA(sc->ex_medias, MEDIAOPT_100FX, "fx-hdx,fx-fdx");
686 	APPEND_MEDIA(sc->ex_medias, MEDIAOPT_BNC, "bnc");
687 	APPEND_MEDIA(sc->ex_medias, MEDIAOPT_AUI, "aui");
688 	APPEND_MEDIA(sc->ex_medias, MEDIAOPT_10FL, "fl-hdx,fl-fdx");
689 
690 	if (config & XCVR_SEL_100TX) {
691 		/* Only found on 90XB.  Don't use this, use AUTO instead! */
692 		config |= XCVR_SEL_AUTO;
693 		config &= ~XCVR_SEL_100TX;
694 	}
695 
696 	default_media = (config & XCVR_SEL_MASK);
697 
698 	/* Sanity check that there are any media! */
699 	if ((media_options & MEDIAOPT_MASK) == 0) {
700 		elxl_error(sc,
701 		    "No media present?  Attempting to use default.");
702 		/*
703 		 * This "default" may be non-sensical.  At worst it should
704 		 * cause a busted link.
705 		 */
706 		sc->ex_xcvr = default_media;
707 	}
708 
709 	for (exm = ex_native_media; exm->exm_mpbit != 0; exm++) {
710 		if (media_options & exm->exm_mpbit) {
711 			if (exm->exm_xcvr == default_media) {
712 				/* preferred default is present, just use it */
713 				sc->ex_xcvr = default_media;
714 				return;
715 			}
716 
717 			sc->ex_xcvr = exm->exm_xcvr;
718 			/* but keep trying for other more preferred options */
719 		}
720 	}
721 }
722 
723 /*
724  * Setup transmitter parameters.
725  */
726 static void
727 elxl_setup_tx(elxl_t *sc)
728 {
729 	/*
730 	 * Disable reclaim threshold for 90xB, set free threshold to
731 	 * 6 * 256 = 1536 for 90x.
732 	 */
733 	if (sc->ex_conf & CONF_90XB)
734 		PUT_CMD(CMD_SET_TXRECLAIM | 255);
735 	else
736 		PUT8(REG_TXFREETHRESH, 6);
737 
738 	/*
739 	 * We've seen underflows at the root cause of NIC hangs on
740 	 * older cards.  Use a store-and-forward model to prevent that.
741 	 */
742 	PUT_CMD(CMD_SET_TXSTART | EX_BUFSZ >> 2);
743 }
744 
745 /*
746  * Bring device up.
747  */
748 static void
749 elxl_init(elxl_t *sc)
750 {
751 	if (sc->ex_suspended)
752 		return;
753 
754 	WAIT_CMD(sc);
755 	elxl_stop(sc);
756 
757 	PUT_CMD(CMD_RX_RESET);
758 	WAIT_CMD(sc);
759 	PUT_CMD(CMD_TX_RESET);
760 	WAIT_CMD(sc);
761 
762 	/* Load Tx parameters. */
763 	elxl_setup_tx(sc);
764 
765 	PUT32(REG_DMACTRL, GET32(REG_DMACTRL) | DMACTRL_UPRXEAREN);
766 
767 	PUT_CMD(CMD_IND_ENABLE | INT_WATCHED);
768 	PUT_CMD(CMD_INT_ENABLE | INT_WATCHED);
769 
770 	PUT_CMD(CMD_INT_ACK | 0xff);
771 
772 	elxl_set_media(sc);
773 	elxl_set_rxfilter(sc);
774 
775 	/* Configure for VLAN tag sizing. */
776 	SET_WIN(3);
777 	if (sc->ex_conf & CONF_90XB) {
778 		PUT16(W3_MAX_PKT_SIZE, EX_BUFSZ);
779 	} else {
780 		PUT16(W3_MAC_CONTROL, GET16(W3_MAC_CONTROL) |
781 		    MAC_CONTROL_ALLOW_LARGE);
782 	}
783 
784 	PUT_CMD(CMD_SET_RXEARLY | (EX_BUFSZ >> 2));
785 
786 	PUT_CMD(CMD_STATS_ENABLE);
787 	PUT_CMD(CMD_TX_ENABLE);
788 	PUT32(REG_UPLISTPTR, sc->ex_rxring.r_paddr);
789 	PUT_CMD(CMD_RX_ENABLE);
790 	PUT_CMD(CMD_UP_UNSTALL);
791 }
792 
793 /*
794  * Set multicast receive filter. Also take care of promiscuous mode.
795  * Note that *some* of this hardware is fully capable of either a 256
796  * or 64 bit multicast hash.  However, we can't determine what the
797  * size of the hash table is easily, and so we are expected to be able
798  * to resubmit the entire list of addresses each time.  This puts an
799  * onerous burden on the driver to maintain its list of multicast
800  * addresses.  Since multicast stuff is usually not that performance
801  * sensitive, and since we don't usually have much of it, we are just
802  * going to skip it.  We allow the upper layers to filter it, as
803  * needed, by setting the all-multicast bit if the hardware can do it.
804  * This also reduces our test burden.
805  */
806 static void
807 elxl_set_rxfilter(elxl_t *sc)
808 {
809 	uint16_t mask = FILTER_UNICAST | FILTER_ALLBCAST;
810 
811 	if (sc->ex_suspended)
812 		return;
813 
814 	/*
815 	 * Set the station address and clear the station mask. The latter
816 	 * is needed for 90x cards, 0 is the default for 90xB cards.
817 	 */
818 	SET_WIN(2);
819 	for (int i = 0; i < ETHERADDRL; i++) {
820 		PUT8(W2_STATION_ADDRESS + i, sc->ex_curraddr[i]);
821 		PUT8(W2_STATION_MASK + i, 0);
822 	}
823 
824 	if (sc->ex_mccount) {
825 		mask |= FILTER_ALLMULTI;
826 	}
827 	if (sc->ex_promisc) {
828 		mask |= FILTER_PROMISC;
829 	}
830 	PUT_CMD(CMD_SET_FILTER | mask);
831 }
832 
833 static void
834 elxl_set_media(elxl_t *sc)
835 {
836 	uint32_t configreg;
837 
838 	SET_WIN(4);
839 	PUT16(W4_MEDIASTAT, 0);
840 	PUT_CMD(CMD_BNC_DISABLE);
841 	drv_usecwait(800);
842 
843 	/*
844 	 * Now turn on the selected media/transceiver.
845 	 */
846 	switch (sc->ex_xcvr) {
847 	case XCVR_SEL_10T:
848 		sc->ex_mii_active = B_FALSE;
849 		PUT16(W4_MEDIASTAT,
850 		    MEDIASTAT_JABGUARD_EN | MEDIASTAT_LINKBEAT_EN);
851 		drv_usecwait(800);
852 		break;
853 
854 	case XCVR_SEL_BNC:
855 		sc->ex_mii_active = B_FALSE;
856 		PUT_CMD(CMD_BNC_ENABLE);
857 		drv_usecwait(800);
858 		break;
859 
860 	case XCVR_SEL_100FX:
861 		sc->ex_mii_active = B_FALSE;	/* Is this really true? */
862 		PUT16(W4_MEDIASTAT, MEDIASTAT_LINKBEAT_EN);
863 		drv_usecwait(800);
864 		break;
865 
866 	case XCVR_SEL_AUI:
867 		sc->ex_mii_active = B_FALSE;
868 		PUT16(W4_MEDIASTAT, MEDIASTAT_SQE_EN);
869 		drv_usecwait(800);
870 		break;
871 
872 	case XCVR_SEL_AUTO:
873 	case XCVR_SEL_MII:
874 		/*
875 		 * This is due to paranoia.  If a card claims
876 		 * to default to MII, but doesn't have it set in
877 		 * media options, then we don't want to leave
878 		 * the MII active or we'll have problems derferencing
879 		 * the "mii handle".
880 		 */
881 		if (sc->ex_miih) {
882 			sc->ex_mii_active = B_TRUE;
883 		} else {
884 			sc->ex_mii_active = B_FALSE;
885 		}
886 		break;
887 
888 	default:
889 		sc->ex_mii_active = B_FALSE;
890 		elxl_error(sc, "Impossible media setting!");
891 		break;
892 	}
893 
894 	SET_WIN(3);
895 	configreg = GET32(W3_INTERNAL_CONFIG);
896 
897 	configreg &= ~(XCVR_SEL_MASK);
898 	configreg |= (sc->ex_xcvr);
899 
900 	PUT32(W3_INTERNAL_CONFIG, configreg);
901 
902 	/*
903 	 * If we're not using MII, force the full-duplex setting.  MII
904 	 * based modes handle the full-duplex setting via the MII
905 	 * notify callback.
906 	 */
907 	if (!sc->ex_mii_active) {
908 		uint16_t mctl;
909 		mctl = GET16(W3_MAC_CONTROL);
910 		if (sc->ex_fdx) {
911 			mctl |= MAC_CONTROL_FDX;
912 		} else {
913 			mctl &= ~MAC_CONTROL_FDX;
914 		}
915 		PUT16(W3_MAC_CONTROL, mctl);
916 	}
917 }
918 
919 /*
920  * Get currently-selected media from card.
921  * (if_media callback, may be called before interface is brought up).
922  */
923 static void
924 elxl_linkcheck(void *arg)
925 {
926 	elxl_t		*sc = arg;
927 	uint16_t	stat;
928 	link_state_t	link;
929 
930 	mutex_enter(&sc->ex_txlock);
931 	if (sc->ex_mii_active) {
932 		mutex_exit(&sc->ex_txlock);
933 		return;
934 	}
935 	if (sc->ex_running && !sc->ex_suspended) {
936 		switch (sc->ex_xcvr) {
937 		case XCVR_SEL_100FX:
938 			/* these media we can detect link on */
939 			SET_WIN(4);
940 			stat = GET16(W4_MEDIASTAT);
941 			if (stat & MEDIASTAT_LINKDETECT) {
942 				sc->ex_link = LINK_STATE_UP;
943 				sc->ex_speed = 100000000;
944 			} else {
945 				sc->ex_link = LINK_STATE_DOWN;
946 				sc->ex_speed = 0;
947 			}
948 			break;
949 
950 		case XCVR_SEL_10T:
951 			/* these media we can detect link on */
952 			SET_WIN(4);
953 			stat = GET16(W4_MEDIASTAT);
954 			if (stat & MEDIASTAT_LINKDETECT) {
955 				sc->ex_link = LINK_STATE_UP;
956 				sc->ex_speed = 10000000;
957 			} else {
958 				sc->ex_link = LINK_STATE_DOWN;
959 				sc->ex_speed = 0;
960 			}
961 			break;
962 
963 		case XCVR_SEL_BNC:
964 		case XCVR_SEL_AUI:
965 		default:
966 			/*
967 			 * For these we don't really know the answer,
968 			 * but if we lie then at least it won't cause
969 			 * ifconfig to turn off the RUNNING flag.
970 			 * This is necessary because we might
971 			 * transition from LINK_STATE_DOWN when
972 			 * switching media.
973 			 */
974 			sc->ex_speed = 10000000;
975 			sc->ex_link = LINK_STATE_UP;
976 			break;
977 		}
978 		SET_WIN(3);
979 		sc->ex_duplex = GET16(W3_MAC_CONTROL) & MAC_CONTROL_FDX ?
980 		    LINK_DUPLEX_FULL : LINK_DUPLEX_HALF;
981 	} else {
982 		sc->ex_speed = 0;
983 		sc->ex_duplex = LINK_DUPLEX_UNKNOWN;
984 		sc->ex_link = LINK_STATE_UNKNOWN;
985 	}
986 	link = sc->ex_link;
987 	mutex_exit(&sc->ex_txlock);
988 
989 	mac_link_update(sc->ex_mach, link);
990 }
991 
992 static int
993 elxl_m_promisc(void *arg, boolean_t on)
994 {
995 	elxl_t	*sc = arg;
996 
997 	mutex_enter(&sc->ex_intrlock);
998 	mutex_enter(&sc->ex_txlock);
999 	sc->ex_promisc = on;
1000 	elxl_set_rxfilter(sc);
1001 	mutex_exit(&sc->ex_txlock);
1002 	mutex_exit(&sc->ex_intrlock);
1003 	return (0);
1004 }
1005 
1006 static int
1007 elxl_m_multicst(void *arg, boolean_t add, const uint8_t *addr)
1008 {
1009 	elxl_t	*sc = arg;
1010 
1011 	_NOTE(ARGUNUSED(addr));
1012 
1013 	mutex_enter(&sc->ex_intrlock);
1014 	mutex_enter(&sc->ex_txlock);
1015 	if (add) {
1016 		sc->ex_mccount++;
1017 		if (sc->ex_mccount == 1) {
1018 			elxl_set_rxfilter(sc);
1019 		}
1020 	} else {
1021 		sc->ex_mccount--;
1022 		if (sc->ex_mccount == 0) {
1023 			elxl_set_rxfilter(sc);
1024 		}
1025 	}
1026 	mutex_exit(&sc->ex_txlock);
1027 	mutex_exit(&sc->ex_intrlock);
1028 	return (0);
1029 }
1030 
1031 static int
1032 elxl_m_unicst(void *arg, const uint8_t *addr)
1033 {
1034 	elxl_t	*sc = arg;
1035 
1036 	mutex_enter(&sc->ex_intrlock);
1037 	mutex_enter(&sc->ex_txlock);
1038 	bcopy(addr, sc->ex_curraddr, ETHERADDRL);
1039 	elxl_set_rxfilter(sc);
1040 	mutex_exit(&sc->ex_txlock);
1041 	mutex_exit(&sc->ex_intrlock);
1042 
1043 	return (0);
1044 }
1045 
1046 static mblk_t *
1047 elxl_m_tx(void *arg, mblk_t *mp)
1048 {
1049 	elxl_t		*sc = arg;
1050 	ex_desc_t	*txd;
1051 	ex_desc_t	*first;
1052 	ex_desc_t	*tail;
1053 	size_t		len;
1054 	ex_ring_t	*r;
1055 	ex_pd_t		*pd;
1056 	uint32_t	cflags;
1057 	mblk_t		*nmp;
1058 	boolean_t	reenable = B_FALSE;
1059 	boolean_t	reset = B_FALSE;
1060 	uint32_t	paddr;
1061 
1062 	r = &sc->ex_txring;
1063 	mutex_enter(&sc->ex_txlock);
1064 	if (sc->ex_suspended) {
1065 		while (mp != NULL) {
1066 			sc->ex_nocarrier++;
1067 			nmp = mp->b_next;
1068 			freemsg(mp);
1069 			mp = nmp;
1070 		}
1071 		mutex_exit(&sc->ex_txlock);
1072 		return (NULL);
1073 	}
1074 
1075 	for (int limit = (EX_NTX * 2); limit; limit--) {
1076 		uint8_t stat = GET8(REG_TXSTATUS);
1077 		if ((stat & TXSTATUS_COMPLETE) == 0) {
1078 			break;
1079 		}
1080 		if (stat & TXSTATUS_MAXCOLLISIONS) {
1081 			reenable = B_TRUE;
1082 			sc->ex_excoll++;
1083 		}
1084 		if ((stat & TXSTATUS_ERRS) != 0) {
1085 			reset = B_TRUE;
1086 			if (stat & TXSTATUS_JABBER) {
1087 				sc->ex_jabber++;
1088 			}
1089 			if (stat & TXSTATUS_RECLAIM_ERR) {
1090 				sc->ex_txerr++;
1091 			}
1092 			if (stat & TXSTATUS_UNDERRUN) {
1093 				sc->ex_uflo++;
1094 			}
1095 		}
1096 		PUT8(REG_TXSTATUS, 0);
1097 	}
1098 
1099 	if (reset || reenable) {
1100 		paddr = GET32(REG_DNLISTPTR);
1101 		if (reset) {
1102 			WAIT_CMD(sc);
1103 			PUT_CMD(CMD_TX_RESET);
1104 			WAIT_CMD(sc);
1105 			elxl_setup_tx(sc);
1106 		}
1107 		PUT_CMD(CMD_TX_ENABLE);
1108 		if (paddr) {
1109 			PUT32(REG_DNLISTPTR, paddr);
1110 		}
1111 	}
1112 
1113 	/* first reclaim any free descriptors */
1114 	while (r->r_avail < r->r_count) {
1115 
1116 		paddr = GET32(REG_DNLISTPTR);
1117 		txd = r->r_head;
1118 		if (paddr == txd->ed_descaddr) {
1119 			/* still processing this one, we're done */
1120 			break;
1121 		}
1122 		if (paddr == 0) {
1123 			/* done processing the entire list! */
1124 			r->r_head = NULL;
1125 			r->r_tail = NULL;
1126 			r->r_avail = r->r_count;
1127 			break;
1128 		}
1129 		r->r_avail++;
1130 		r->r_head = txd->ed_next;
1131 	}
1132 
1133 	if ((r->r_avail < r->r_count) && (GET32(REG_DNLISTPTR) != 0)) {
1134 		PUT_CMD(CMD_DN_STALL);
1135 		WAIT_CMD(sc);
1136 	}
1137 
1138 	first = NULL;
1139 	tail = r->r_tail;
1140 
1141 	/*
1142 	 * If there is already a tx list, select the next desc on the list.
1143 	 * Otherwise, just pick the first descriptor.
1144 	 */
1145 	txd = tail ? tail->ed_next : &r->r_desc[0];
1146 
1147 	while ((mp != NULL) && (r->r_avail)) {
1148 
1149 		nmp = mp->b_next;
1150 
1151 		len = msgsize(mp);
1152 		if (len > (ETHERMAX + VLAN_TAGSZ)) {
1153 			sc->ex_txerr++;
1154 			freemsg(mp);
1155 			mp = nmp;
1156 			continue;
1157 		}
1158 
1159 		cflags = 0;
1160 		if ((sc->ex_conf & CONF_90XB) != 0) {
1161 			uint32_t	pflags;
1162 			hcksum_retrieve(mp, NULL, NULL, NULL, NULL, NULL, NULL,
1163 			    &pflags);
1164 			if (pflags & HCK_IPV4_HDRCKSUM) {
1165 				cflags |= EX_DPD_IPCKSUM;
1166 			}
1167 			if (pflags & HCK_FULLCKSUM) {
1168 				cflags |= (EX_DPD_TCPCKSUM | EX_DPD_UDPCKSUM);
1169 			}
1170 		}
1171 
1172 		/* Mark this descriptor is in use.  We're committed now. */
1173 		mcopymsg(mp, txd->ed_buf);	/* frees the mblk! */
1174 		r->r_avail--;
1175 		mp = nmp;
1176 
1177 		/* Accounting stuff. */
1178 		sc->ex_opackets++;
1179 		sc->ex_obytes += len;
1180 		if (txd->ed_buf[0] & 0x1) {
1181 			if (bcmp(txd->ed_buf, ex_broadcast, ETHERADDRL) != 0) {
1182 				sc->ex_multixmt++;
1183 			} else {
1184 				sc->ex_brdcstxmt++;
1185 			}
1186 		}
1187 
1188 		pd = txd->ed_pd;
1189 
1190 
1191 		/*
1192 		 * Zero pad the frame if its too short.  This
1193 		 * also avoids a checksum offload bug.
1194 		 */
1195 		if (len < 30) {
1196 			bzero(txd->ed_buf + len, ETHERMIN - len);
1197 			len = ETHERMIN;
1198 		}
1199 
1200 		/*
1201 		 * If this our first packet so far, record the head
1202 		 * of the list.
1203 		 */
1204 		if (first == NULL) {
1205 			first = txd;
1206 		}
1207 
1208 		(void) ddi_dma_sync(txd->ed_dmah, 0, 0, DDI_DMA_SYNC_FORDEV);
1209 
1210 		PUT_PD(r, pd->pd_link, 0);
1211 		PUT_PD(r, pd->pd_fsh, len | cflags);
1212 		PUT_PD(r, pd->pd_addr, txd->ed_bufaddr);
1213 		PUT_PD(r, pd->pd_len, len | EX_FR_LAST);
1214 
1215 		/*
1216 		 * Write the link into the previous descriptor.  Note that
1217 		 * if this is the first packet (so no previous queued), this
1218 		 * will be benign because the previous descriptor won't be
1219 		 * on any tx list.  (Furthermore, we'll clear its link field
1220 		 * when we do later use it.)
1221 		 */
1222 		PUT_PD(r, txd->ed_prev->ed_pd->pd_link, txd->ed_descaddr);
1223 	}
1224 
1225 	/*
1226 	 * Are we submitting any packets?
1227 	 */
1228 	if (first != NULL) {
1229 		/* Interrupt on the last packet. */
1230 		PUT_PD(r, pd->pd_fsh, len | cflags | EX_DPD_DNIND);
1231 
1232 		if (tail == NULL) {
1233 			/* No packets pending, so its a new list head! */
1234 			r->r_head = first;
1235 		} else {
1236 			pd = tail->ed_pd;
1237 			/* We've added frames, so don't interrupt mid-list. */
1238 			PUT_PD(r, pd->pd_fsh,
1239 			    GET_PD(r, pd->pd_fsh) & ~(EX_DPD_DNIND));
1240 		}
1241 		/* Record the last descriptor. */
1242 		r->r_tail = txd;
1243 
1244 		/* flush the entire ring - we're stopped so its safe */
1245 		(void) ddi_dma_sync(r->r_dmah, 0, 0, DDI_DMA_SYNC_FORDEV);
1246 	}
1247 
1248 	/* Restart transmitter. */
1249 	if (sc->ex_txring.r_head) {
1250 		PUT32(REG_DNLISTPTR, sc->ex_txring.r_head->ed_descaddr);
1251 	}
1252 	PUT_CMD(CMD_DN_UNSTALL);
1253 
1254 	mutex_exit(&sc->ex_txlock);
1255 
1256 	return (mp);
1257 }
1258 
1259 static mblk_t *
1260 elxl_recv(elxl_t *sc, ex_desc_t *rxd, uint32_t stat)
1261 {
1262 	mblk_t		*mp = NULL;
1263 	uint32_t	len;
1264 
1265 	len = stat & EX_UPD_PKTLENMASK;
1266 	if (stat & (EX_UPD_ERR_VLAN | EX_UPD_OVERFLOW)) {
1267 		if (stat & EX_UPD_RUNT) {
1268 			sc->ex_runt++;
1269 		}
1270 		if (stat & EX_UPD_OVERRUN) {
1271 			sc->ex_oflo++;
1272 		}
1273 		if (stat & EX_UPD_CRCERR) {
1274 			sc->ex_fcs++;
1275 		}
1276 		if (stat & EX_UPD_ALIGNERR) {
1277 			sc->ex_align++;
1278 		}
1279 		if (stat & EX_UPD_OVERFLOW) {
1280 			sc->ex_toolong++;
1281 		}
1282 		return (NULL);
1283 	}
1284 	if (len < sizeof (struct ether_header)) {
1285 		sc->ex_runt++;
1286 		return (NULL);
1287 	}
1288 	if (len > (ETHERMAX + VLAN_TAGSZ)) {
1289 		/* Allow four bytes for the VLAN header */
1290 		sc->ex_toolong++;
1291 		return (NULL);
1292 	}
1293 	if ((mp = allocb(len + 14, BPRI_HI)) == NULL) {
1294 		sc->ex_allocbfail++;
1295 		return (NULL);
1296 	}
1297 
1298 	(void) ddi_dma_sync(rxd->ed_dmah, 0, 0, DDI_DMA_SYNC_FORKERNEL);
1299 	mp->b_rptr += 14;
1300 	mp->b_wptr = mp->b_rptr + len;
1301 	bcopy(rxd->ed_buf, mp->b_rptr, len);
1302 
1303 	sc->ex_ipackets++;
1304 	sc->ex_ibytes += len;
1305 	if (rxd->ed_buf[0] & 0x1) {
1306 		if (bcmp(rxd->ed_buf, ex_broadcast, ETHERADDRL) != 0) {
1307 			sc->ex_multircv++;
1308 		} else {
1309 			sc->ex_brdcstrcv++;
1310 		}
1311 	}
1312 
1313 	/*
1314 	 * Set the incoming checksum information for the packet.
1315 	 */
1316 	if (((sc->ex_conf & CONF_90XB) != 0) &&
1317 	    ((stat & EX_UPD_IPCHECKED) != 0) &&
1318 	    ((stat & (EX_UPD_CKSUMERR)) == 0)) {
1319 		uint32_t	pflags = 0;
1320 		if (stat & EX_UPD_IPCHECKED) {
1321 			pflags |= HCK_IPV4_HDRCKSUM;
1322 		}
1323 		if (stat & (EX_UPD_TCPCHECKED | EX_UPD_UDPCHECKED)) {
1324 			pflags |= (HCK_FULLCKSUM | HCK_FULLCKSUM_OK);
1325 		}
1326 		(void) hcksum_assoc(mp, NULL, NULL, 0, 0, 0, 0, pflags, 0);
1327 	}
1328 
1329 	return (mp);
1330 }
1331 
1332 static int
1333 elxl_m_start(void *arg)
1334 {
1335 	elxl_t	*sc = arg;
1336 
1337 	mutex_enter(&sc->ex_intrlock);
1338 	mutex_enter(&sc->ex_txlock);
1339 
1340 	elxl_init(sc);
1341 	sc->ex_running = B_TRUE;
1342 
1343 	mutex_exit(&sc->ex_txlock);
1344 	mutex_exit(&sc->ex_intrlock);
1345 
1346 	if (sc->ex_miih) {
1347 		mii_start(sc->ex_miih);
1348 	}
1349 	return (0);
1350 }
1351 
1352 static void
1353 elxl_m_stop(void *arg)
1354 {
1355 	elxl_t	*sc = arg;
1356 
1357 	if (sc->ex_miih) {
1358 		mii_stop(sc->ex_miih);
1359 	}
1360 
1361 	mutex_enter(&sc->ex_intrlock);
1362 	mutex_enter(&sc->ex_txlock);
1363 
1364 	elxl_stop(sc);
1365 	sc->ex_running = B_FALSE;
1366 
1367 	mutex_exit(&sc->ex_txlock);
1368 	mutex_exit(&sc->ex_intrlock);
1369 }
1370 
1371 static boolean_t
1372 elxl_m_getcapab(void *arg, mac_capab_t cap, void *data)
1373 {
1374 	elxl_t		*sc = arg;
1375 	switch (cap) {
1376 	case MAC_CAPAB_HCKSUM: {
1377 		uint32_t	*flags = data;
1378 		if (sc->ex_conf & CONF_90XB) {
1379 			*flags = HCKSUM_IPHDRCKSUM | HCKSUM_INET_FULL_V4;
1380 			return (B_TRUE);
1381 		}
1382 		return (B_FALSE);
1383 	}
1384 	default:
1385 		return (B_FALSE);
1386 	}
1387 }
1388 
1389 static int
1390 elxl_m_getprop(void *arg, const char *name, mac_prop_id_t num, uint_t flags,
1391     uint_t sz, void *val, uint_t *perm)
1392 {
1393 	elxl_t		*sc = arg;
1394 	int		rv;
1395 	boolean_t	isdef = (flags & MAC_PROP_DEFAULT);
1396 
1397 	if (sc->ex_mii_active) {
1398 		rv = mii_m_getprop(sc->ex_miih, name, num, flags, sz,
1399 		    val, perm);
1400 		if (rv != ENOTSUP)
1401 			return (rv);
1402 	}
1403 
1404 	switch (num) {
1405 	case MAC_PROP_DUPLEX:
1406 		*perm = MAC_PROP_PERM_READ;
1407 		*(uint8_t *)val = isdef ? LINK_DUPLEX_HALF : sc->ex_duplex;
1408 		break;
1409 	case MAC_PROP_SPEED:
1410 		*perm = MAC_PROP_PERM_READ;
1411 		*(uint8_t *)val = sc->ex_speed;
1412 		break;
1413 	case MAC_PROP_STATUS:
1414 		*perm  = MAC_PROP_PERM_READ;
1415 		bcopy(&sc->ex_link, val, sizeof (link_state_t));
1416 		break;
1417 
1418 	case MAC_PROP_PRIVATE:
1419 		if (strcmp(name, "_media") == 0) {
1420 			char *str;
1421 			*perm = MAC_PROP_PERM_RW;
1422 
1423 			switch (sc->ex_xcvr) {
1424 			case XCVR_SEL_AUTO:
1425 			case XCVR_SEL_MII:
1426 				str = "mii";
1427 				break;
1428 			case XCVR_SEL_10T:
1429 				str = sc->ex_fdx ? "tp-fdx" : "tp-hdx";
1430 				break;
1431 			case XCVR_SEL_BNC:
1432 				str = "bnc";
1433 				break;
1434 			case XCVR_SEL_AUI:
1435 				if (sc->ex_mediaopt & MEDIAOPT_10FL) {
1436 					str = sc->ex_fdx ? "fl-fdx" : "fl-hdx";
1437 				} else {
1438 					str = "aui";
1439 				}
1440 				break;
1441 			case XCVR_SEL_100FX:
1442 				str = sc->ex_fdx ? "fx-fdx" : "fx-hdx";
1443 				break;
1444 			default:
1445 				str = "unknown";
1446 				break;
1447 			}
1448 			(void) snprintf(val, sz, "%s", str);
1449 			return (0);
1450 		}
1451 		/*
1452 		 * This available media property is a hack, and should
1453 		 * be removed when we can provide proper support for
1454 		 * querying it as proposed in PSARC 2009/235.  (At the
1455 		 * moment the implementation lacks support for using
1456 		 * MAC_PROP_POSSIBLE with private properties.)
1457 		 */
1458 		if (strcmp(name, "_available_media") == 0) {
1459 			*perm = MAC_PROP_PERM_READ;
1460 			(void) snprintf(val, sz, "%s", sc->ex_medias);
1461 			return (0);
1462 		}
1463 		break;
1464 	}
1465 	return (ENOTSUP);
1466 }
1467 
1468 static int
1469 elxl_m_setprop(void *arg, const char *name, mac_prop_id_t num, uint_t sz,
1470     const void *val)
1471 {
1472 	elxl_t		*sc = arg;
1473 	int		rv;
1474 
1475 	if (sc->ex_mii_active) {
1476 		rv = mii_m_setprop(sc->ex_miih, name, num, sz, val);
1477 		if (rv != ENOTSUP) {
1478 			return (rv);
1479 		}
1480 	}
1481 	switch (num) {
1482 
1483 	case MAC_PROP_PRIVATE:
1484 		if (strcmp(name, "_media") == 0) {
1485 			uint32_t mopt = sc->ex_mediaopt;
1486 
1487 			if (strcmp(val, "mii") == 0) {
1488 				if (mopt & MEDIAOPT_100TX) {
1489 					sc->ex_xcvr = XCVR_SEL_AUTO;
1490 				} else if (mopt & MEDIAOPT_MII)  {
1491 					sc->ex_xcvr = XCVR_SEL_MII;
1492 				} else {
1493 					return (EINVAL);
1494 				}
1495 			} else if (strcmp(val, "tp-fdx") == 0) {
1496 				/* select media option */
1497 				if (mopt & MEDIAOPT_10T) {
1498 					sc->ex_xcvr = XCVR_SEL_10T;
1499 					sc->ex_fdx = B_TRUE;
1500 				} else {
1501 					return (EINVAL);
1502 				}
1503 			} else if (strcmp(val, "tp-hdx") == 0) {
1504 				/* select media option */
1505 				if (mopt & MEDIAOPT_10T) {
1506 					sc->ex_xcvr = XCVR_SEL_10T;
1507 					sc->ex_fdx = B_FALSE;
1508 				} else {
1509 					return (EINVAL);
1510 				}
1511 			} else if (strcmp(val, "fx-fdx") == 0) {
1512 				if (mopt & MEDIAOPT_100FX) {
1513 					sc->ex_xcvr = XCVR_SEL_100FX;
1514 					sc->ex_fdx = B_TRUE;
1515 				} else {
1516 					return (EINVAL);
1517 				}
1518 			} else if (strcmp(val, "fx-hdx") == 0) {
1519 				if (mopt & MEDIAOPT_100FX) {
1520 					sc->ex_xcvr = XCVR_SEL_100FX;
1521 					sc->ex_fdx = B_FALSE;
1522 				} else {
1523 					return (EINVAL);
1524 				}
1525 			} else if (strcmp(val, "bnc") == 0) {
1526 				if (mopt & MEDIAOPT_BNC) {
1527 					sc->ex_xcvr = XCVR_SEL_BNC;
1528 					sc->ex_fdx = B_FALSE;
1529 				} else {
1530 					return (EINVAL);
1531 				}
1532 			} else if (strcmp(val, "aui") == 0) {
1533 				if (mopt & MEDIAOPT_AUI) {
1534 					sc->ex_xcvr = XCVR_SEL_AUI;
1535 					sc->ex_fdx = B_FALSE;
1536 				} else {
1537 					return (EINVAL);
1538 				}
1539 			} else if (strcmp(val, "fl-fdx") == 0) {
1540 				if (mopt & MEDIAOPT_10FL) {
1541 					sc->ex_xcvr = XCVR_SEL_AUI;
1542 					sc->ex_fdx = B_TRUE;
1543 				} else {
1544 					return (EINVAL);
1545 				}
1546 			} else if (strcmp(val, "fl-hdx") == 0) {
1547 				if (mopt & MEDIAOPT_10FL) {
1548 					sc->ex_xcvr = XCVR_SEL_AUI;
1549 					sc->ex_fdx = B_FALSE;
1550 				} else {
1551 					return (EINVAL);
1552 				}
1553 
1554 			} else {
1555 				return (EINVAL);
1556 			}
1557 			goto reset;
1558 		}
1559 		break;
1560 	default:
1561 		break;
1562 	}
1563 
1564 	return (ENOTSUP);
1565 
1566 reset:
1567 	mutex_enter(&sc->ex_intrlock);
1568 	mutex_enter(&sc->ex_txlock);
1569 	if (!sc->ex_suspended) {
1570 		elxl_reset(sc);
1571 		if (sc->ex_running) {
1572 			elxl_init(sc);
1573 		}
1574 	}
1575 	mutex_exit(&sc->ex_txlock);
1576 	mutex_exit(&sc->ex_intrlock);
1577 	return (0);
1578 }
1579 
1580 static int
1581 elxl_m_stat(void *arg, uint_t stat, uint64_t *val)
1582 {
1583 	elxl_t	*sc = arg;
1584 
1585 	if (stat == MAC_STAT_IFSPEED) {
1586 		elxl_getstats(sc);
1587 	}
1588 
1589 	if ((sc->ex_mii_active) &&
1590 	    (mii_m_getstat(sc->ex_miih, stat, val) == 0)) {
1591 		return (0);
1592 	}
1593 
1594 	switch (stat) {
1595 	case MAC_STAT_IFSPEED:
1596 		*val = sc->ex_speed;
1597 		break;
1598 
1599 	case ETHER_STAT_LINK_DUPLEX:
1600 		*val = sc->ex_duplex;
1601 		break;
1602 
1603 	case MAC_STAT_MULTIRCV:
1604 		*val = sc->ex_multircv;
1605 		break;
1606 
1607 	case MAC_STAT_BRDCSTRCV:
1608 		*val = sc->ex_brdcstrcv;
1609 		break;
1610 
1611 	case MAC_STAT_MULTIXMT:
1612 		*val = sc->ex_multixmt;
1613 		break;
1614 
1615 	case MAC_STAT_BRDCSTXMT:
1616 		*val = sc->ex_brdcstxmt;
1617 		break;
1618 
1619 	case MAC_STAT_IPACKETS:
1620 		*val = sc->ex_ipackets;
1621 		break;
1622 
1623 	case MAC_STAT_OPACKETS:
1624 		*val = sc->ex_opackets;
1625 		break;
1626 
1627 	case MAC_STAT_RBYTES:
1628 		*val = sc->ex_ibytes;
1629 		break;
1630 	case MAC_STAT_OBYTES:
1631 		*val = sc->ex_obytes;
1632 		break;
1633 
1634 	case MAC_STAT_COLLISIONS:
1635 	case ETHER_STAT_FIRST_COLLISIONS:
1636 		*val = sc->ex_singlecol + sc->ex_multcol;
1637 		break;
1638 
1639 	case ETHER_STAT_MULTI_COLLISIONS:
1640 		*val = sc->ex_multcol;
1641 		break;
1642 
1643 	case ETHER_STAT_TX_LATE_COLLISIONS:
1644 		*val = sc->ex_latecol;
1645 		break;
1646 
1647 	case ETHER_STAT_ALIGN_ERRORS:
1648 		*val = sc->ex_align;
1649 		break;
1650 
1651 	case ETHER_STAT_FCS_ERRORS:
1652 		*val = sc->ex_fcs;
1653 		break;
1654 
1655 	case ETHER_STAT_SQE_ERRORS:
1656 		*val = sc->ex_sqe;
1657 		break;
1658 
1659 	case ETHER_STAT_DEFER_XMTS:
1660 		*val = sc->ex_defer;
1661 		break;
1662 
1663 	case ETHER_STAT_CARRIER_ERRORS:
1664 		*val = sc->ex_nocarrier;
1665 		break;
1666 
1667 	case ETHER_STAT_TOOLONG_ERRORS:
1668 		*val = sc->ex_toolong;
1669 		break;
1670 
1671 	case ETHER_STAT_EX_COLLISIONS:
1672 		*val = sc->ex_excoll;
1673 		break;
1674 
1675 	case MAC_STAT_OVERFLOWS:
1676 		*val = sc->ex_oflo;
1677 		break;
1678 
1679 	case MAC_STAT_UNDERFLOWS:
1680 		*val = sc->ex_uflo;
1681 		break;
1682 
1683 	case ETHER_STAT_TOOSHORT_ERRORS:
1684 		*val = sc->ex_runt;
1685 		break;
1686 
1687 	case ETHER_STAT_JABBER_ERRORS:
1688 		*val = sc->ex_jabber;
1689 		break;
1690 
1691 	case MAC_STAT_NORCVBUF:
1692 		*val = sc->ex_allocbfail;
1693 		break;
1694 
1695 	case MAC_STAT_OERRORS:
1696 		*val = sc->ex_jabber + sc->ex_latecol + sc->ex_uflo;
1697 		break;
1698 
1699 	case MAC_STAT_IERRORS:
1700 		*val = sc->ex_align + sc->ex_fcs + sc->ex_runt +
1701 		    sc->ex_toolong + sc->ex_oflo + sc->ex_allocbfail;
1702 		break;
1703 
1704 	default:
1705 		return (ENOTSUP);
1706 	}
1707 	return (0);
1708 }
1709 
1710 static uint_t
1711 elxl_intr(caddr_t arg, caddr_t dontcare)
1712 {
1713 	elxl_t		*sc = (void *)arg;
1714 	uint16_t	stat;
1715 	mblk_t		*mphead = NULL;
1716 	mblk_t		**mpp = &mphead;
1717 
1718 	_NOTE(ARGUNUSED(dontcare));
1719 
1720 	mutex_enter(&sc->ex_intrlock);
1721 	if (sc->ex_suspended) {
1722 		mutex_exit(&sc->ex_intrlock);
1723 		return (DDI_INTR_UNCLAIMED);
1724 	}
1725 
1726 	stat = GET16(REG_CMD_STAT);
1727 
1728 	if ((stat & INT_LATCH) == 0)  {
1729 		mutex_exit(&sc->ex_intrlock);
1730 		return (DDI_INTR_UNCLAIMED);
1731 	}
1732 
1733 	/*
1734 	 * Acknowledge interrupts.
1735 	 */
1736 	PUT_CMD(CMD_INT_ACK | (stat & INT_WATCHED) | INT_LATCH);
1737 
1738 	if (stat & INT_HOST_ERROR) {
1739 		/* XXX: Potentially a good spot for FMA */
1740 		elxl_error(sc, "Adapter failure (%x)", stat);
1741 		mutex_enter(&sc->ex_txlock);
1742 		elxl_reset(sc);
1743 		if (sc->ex_running)
1744 			elxl_init(sc);
1745 		mutex_exit(&sc->ex_txlock);
1746 		mutex_exit(&sc->ex_intrlock);
1747 		return (DDI_INTR_CLAIMED);
1748 	}
1749 	if (stat & INT_UP_COMPLETE) {
1750 		ex_ring_t		*r;
1751 		ex_desc_t		*rxd;
1752 		ex_pd_t			*pd;
1753 		mblk_t			*mp;
1754 		uint32_t		pktstat;
1755 
1756 		r = &sc->ex_rxring;
1757 
1758 		for (;;) {
1759 			rxd = r->r_head;
1760 			pd = rxd->ed_pd;
1761 
1762 			(void) ddi_dma_sync(r->r_dmah, rxd->ed_off,
1763 			    sizeof (ex_pd_t), DDI_DMA_SYNC_FORKERNEL);
1764 
1765 			pktstat = GET_PD(r, pd->pd_status);
1766 
1767 			if ((pktstat & EX_UPD_COMPLETE) == 0) {
1768 				break;
1769 			}
1770 
1771 			/* Advance head to next packet. */
1772 			r->r_head = r->r_head->ed_next;
1773 
1774 			if ((mp = elxl_recv(sc, rxd, pktstat)) != NULL) {
1775 				*mpp = mp;
1776 				mpp = &mp->b_next;
1777 			}
1778 
1779 			/* clear the upComplete status, reset other fields */
1780 			PUT_PD(r, pd->pd_status, 0);
1781 			PUT_PD(r, pd->pd_len, EX_BUFSZ | EX_FR_LAST);
1782 			PUT_PD(r, pd->pd_addr, rxd->ed_bufaddr);
1783 			(void) ddi_dma_sync(r->r_dmah, rxd->ed_off,
1784 			    sizeof (ex_pd_t), DDI_DMA_SYNC_FORDEV);
1785 		}
1786 
1787 		/*
1788 		 * If the engine stalled processing (due to
1789 		 * insufficient UPDs usually), restart it.
1790 		 */
1791 		if (GET32(REG_UPLISTPTR) == 0) {
1792 			/*
1793 			 * This seems that it can happen in an RX overrun
1794 			 * situation.
1795 			 */
1796 			mutex_enter(&sc->ex_txlock);
1797 			if (sc->ex_running)
1798 				elxl_init(sc);
1799 			mutex_exit(&sc->ex_txlock);
1800 		}
1801 		PUT_CMD(CMD_UP_UNSTALL);
1802 	}
1803 
1804 	mutex_exit(&sc->ex_intrlock);
1805 
1806 	if (mphead) {
1807 		mac_rx(sc->ex_mach, NULL, mphead);
1808 	}
1809 	if (stat & INT_STATS) {
1810 		elxl_getstats(sc);
1811 	}
1812 	if (stat & INT_DN_COMPLETE) {
1813 		mac_tx_update(sc->ex_mach);
1814 	}
1815 
1816 	return (DDI_INTR_CLAIMED);
1817 }
1818 
1819 static void
1820 elxl_getstats(elxl_t *sc)
1821 {
1822 	mutex_enter(&sc->ex_txlock);
1823 	if (sc->ex_suspended) {
1824 		mutex_exit(&sc->ex_txlock);
1825 		return;
1826 	}
1827 
1828 	SET_WIN(6);
1829 	/*
1830 	 * We count the packets and bytes elsewhere, but we need to
1831 	 * read the registers to clear them.
1832 	 */
1833 	(void) GET8(W6_RX_FRAMES);
1834 	(void) GET8(W6_TX_FRAMES);
1835 	(void) GET8(W6_UPPER_FRAMES);
1836 	(void) GET8(W6_RX_OVERRUNS);	/* counted by elxl_recv */
1837 	(void) GET16(W6_RX_BYTES);
1838 	(void) GET16(W6_TX_BYTES);
1839 
1840 	sc->ex_defer += GET8(W6_DEFER);
1841 	sc->ex_latecol += GET8(W6_TX_LATE_COL);
1842 	sc->ex_singlecol += GET8(W6_SINGLE_COL);
1843 	sc->ex_multcol += GET8(W6_MULT_COL);
1844 	sc->ex_sqe += GET8(W6_SQE_ERRORS);
1845 	sc->ex_nocarrier += GET8(W6_NO_CARRIER);
1846 
1847 	SET_WIN(4);
1848 	/* Note: we ought to report this somewhere... */
1849 	(void) GET8(W4_BADSSD);
1850 
1851 	mutex_exit(&sc->ex_txlock);
1852 }
1853 
1854 static void
1855 elxl_reset(elxl_t *sc)
1856 {
1857 	PUT_CMD(CMD_GLOBAL_RESET);
1858 	/*
1859 	 * Some ASICs need a longer time (20 ms) to come properly out
1860 	 * of reset.  Do not reduce this value.
1861 	 *
1862 	 * Note that this occurs only during attach and failure recovery,
1863 	 * so it should be mostly harmless.
1864 	 */
1865 	drv_usecwait(20000);
1866 	WAIT_CMD(sc);
1867 }
1868 
1869 static void
1870 elxl_stop(elxl_t *sc)
1871 {
1872 	ASSERT(mutex_owned(&sc->ex_intrlock));
1873 	ASSERT(mutex_owned(&sc->ex_txlock));
1874 
1875 	if (sc->ex_suspended)
1876 		return;
1877 
1878 	PUT_CMD(CMD_RX_DISABLE);
1879 	PUT_CMD(CMD_TX_DISABLE);
1880 	PUT_CMD(CMD_BNC_DISABLE);
1881 
1882 	elxl_reset_ring(&sc->ex_rxring, DDI_DMA_READ);
1883 	elxl_reset_ring(&sc->ex_txring, DDI_DMA_WRITE);
1884 
1885 	PUT_CMD(CMD_INT_ACK | INT_LATCH);
1886 	/* Disable all interrupts. (0 means "none".) */
1887 	PUT_CMD(CMD_INT_ENABLE | 0);
1888 }
1889 
1890 static void
1891 elxl_suspend(elxl_t *sc)
1892 {
1893 	if (sc->ex_miih) {
1894 		mii_suspend(sc->ex_miih);
1895 	}
1896 
1897 	mutex_enter(&sc->ex_intrlock);
1898 	mutex_enter(&sc->ex_txlock);
1899 	elxl_stop(sc);
1900 	sc->ex_suspended = B_TRUE;
1901 	mutex_exit(&sc->ex_txlock);
1902 	mutex_exit(&sc->ex_intrlock);
1903 }
1904 
1905 static void
1906 elxl_resume(dev_info_t *dip)
1907 {
1908 	elxl_t	*sc;
1909 
1910 	/* This should always succeed. */
1911 	sc = ddi_get_driver_private(dip);
1912 	ASSERT(sc);
1913 
1914 	mutex_enter(&sc->ex_intrlock);
1915 	mutex_enter(&sc->ex_txlock);
1916 	sc->ex_suspended = B_FALSE;
1917 	elxl_reset(sc);
1918 	if (sc->ex_running)
1919 		elxl_init(sc);
1920 	mutex_exit(&sc->ex_txlock);
1921 	mutex_exit(&sc->ex_intrlock);
1922 
1923 	if (sc->ex_miih) {
1924 		mii_resume(sc->ex_miih);
1925 	}
1926 }
1927 
1928 static void
1929 elxl_detach(elxl_t *sc)
1930 {
1931 	if (sc->ex_miih) {
1932 		/* Detach all PHYs */
1933 		mii_free(sc->ex_miih);
1934 	}
1935 	if (sc->ex_linkcheck) {
1936 		ddi_periodic_delete(sc->ex_linkcheck);
1937 	}
1938 
1939 	if (sc->ex_intrh != NULL) {
1940 		(void) ddi_intr_disable(sc->ex_intrh);
1941 		(void) ddi_intr_remove_handler(sc->ex_intrh);
1942 		(void) ddi_intr_free(sc->ex_intrh);
1943 		mutex_destroy(&sc->ex_intrlock);
1944 		mutex_destroy(&sc->ex_txlock);
1945 	}
1946 
1947 	if (sc->ex_pcih) {
1948 		pci_config_teardown(&sc->ex_pcih);
1949 	}
1950 	if (sc->ex_regsh) {
1951 		ddi_regs_map_free(&sc->ex_regsh);
1952 	}
1953 	ex_free_ring(&sc->ex_txring);
1954 	ex_free_ring(&sc->ex_rxring);
1955 
1956 	kmem_free(sc, sizeof (*sc));
1957 }
1958 
1959 /*
1960  * Read EEPROM data.  If we can't unbusy the EEPROM, then zero will be
1961  * returned.  This will probably result in a bogus node address.
1962  */
1963 static uint16_t
1964 elxl_read_eeprom(elxl_t *sc, int offset)
1965 {
1966 	uint16_t data = 0;
1967 
1968 	SET_WIN(0);
1969 	if (elxl_eeprom_busy(sc))
1970 		goto out;
1971 
1972 	PUT16(W0_EE_CMD, EE_CMD_READ | (offset & 0x3f));
1973 	if (elxl_eeprom_busy(sc))
1974 		goto out;
1975 	data = GET16(W0_EE_DATA);
1976 out:
1977 	return (data);
1978 }
1979 
1980 static int
1981 elxl_eeprom_busy(elxl_t *sc)
1982 {
1983 	int i = 2000;
1984 
1985 	while (i--) {
1986 		if (!(GET16(W0_EE_CMD) & EE_CMD_BUSY))
1987 			return (0);
1988 		drv_usecwait(100);
1989 	}
1990 	elxl_error(sc, "Eeprom stays busy.");
1991 	return (1);
1992 }
1993 
1994 static void
1995 ex_mii_send_bits(struct ex_softc *sc, uint16_t bits, int cnt)
1996 {
1997 	uint16_t val;
1998 	ASSERT(cnt > 0);
1999 
2000 	PUT16(W4_PHYSMGMT, PHYSMGMT_DIR);
2001 	drv_usecwait(1);
2002 
2003 	for (int i = (1 << (cnt - 1)); i; i >>= 1) {
2004 		if (bits & i) {
2005 			val = PHYSMGMT_DIR | PHYSMGMT_DATA;
2006 		} else {
2007 			val = PHYSMGMT_DIR;
2008 		}
2009 		PUT16(W4_PHYSMGMT, val);
2010 		drv_usecwait(1);
2011 		PUT16(W4_PHYSMGMT, val | PHYSMGMT_CLK);
2012 		drv_usecwait(1);
2013 		PUT16(W4_PHYSMGMT, val);
2014 		drv_usecwait(1);
2015 	}
2016 }
2017 
2018 static void
2019 ex_mii_sync(struct ex_softc *sc)
2020 {
2021 	/*
2022 	 * We set the data bit output, and strobe the clock 32 times.
2023 	 */
2024 	PUT16(W4_PHYSMGMT, PHYSMGMT_DATA | PHYSMGMT_DIR);
2025 	drv_usecwait(1);
2026 
2027 	for (int i = 0; i < 32; i++) {
2028 		PUT16(W4_PHYSMGMT, PHYSMGMT_DATA | PHYSMGMT_DIR | PHYSMGMT_CLK);
2029 		drv_usecwait(1);
2030 		PUT16(W4_PHYSMGMT, PHYSMGMT_DATA | PHYSMGMT_DIR);
2031 		drv_usecwait(1);
2032 	}
2033 }
2034 
2035 static uint16_t
2036 elxl_mii_read(void *arg, uint8_t phy, uint8_t reg)
2037 {
2038 	elxl_t		*sc = arg;
2039 	uint16_t	data;
2040 	int		val;
2041 
2042 	if ((sc->ex_conf & CONF_INTPHY) && phy != INTPHY_ID)
2043 		return (0xffff);
2044 
2045 	mutex_enter(&sc->ex_txlock);
2046 	SET_WIN(4);
2047 
2048 	ex_mii_sync(sc);
2049 
2050 	ex_mii_send_bits(sc, 1, 2);	/* start */
2051 	ex_mii_send_bits(sc, 2, 2);	/* read command */
2052 	ex_mii_send_bits(sc, phy, 5);
2053 	ex_mii_send_bits(sc, reg, 5);
2054 
2055 	PUT16(W4_PHYSMGMT, 0);			/* switch to input */
2056 	drv_usecwait(1);
2057 	PUT16(W4_PHYSMGMT, PHYSMGMT_CLK);	/* turnaround time */
2058 	drv_usecwait(1);
2059 	PUT16(W4_PHYSMGMT, 0);
2060 	drv_usecwait(1);
2061 
2062 	PUT16(W4_PHYSMGMT, PHYSMGMT_CLK);	/* idle time */
2063 	drv_usecwait(1);
2064 	PUT16(W4_PHYSMGMT, 0);
2065 	drv_usecwait(1);
2066 
2067 	for (data = 0, val = 0x8000; val; val >>= 1) {
2068 		if (GET16(W4_PHYSMGMT) & PHYSMGMT_DATA) {
2069 			data |= val;
2070 		}
2071 		/* strobe the clock */
2072 		PUT16(W4_PHYSMGMT, PHYSMGMT_CLK);
2073 		drv_usecwait(1);
2074 		PUT16(W4_PHYSMGMT, 0);
2075 		drv_usecwait(1);
2076 	}
2077 
2078 	/* return to output mode */
2079 	PUT16(W4_PHYSMGMT, PHYSMGMT_DIR);
2080 	drv_usecwait(1);
2081 
2082 	mutex_exit(&sc->ex_txlock);
2083 
2084 	return (data);
2085 }
2086 
2087 static void
2088 elxl_mii_write(void *arg, uint8_t phy, uint8_t reg, uint16_t data)
2089 {
2090 	elxl_t *sc = arg;
2091 
2092 	if ((sc->ex_conf & CONF_INTPHY) && phy != INTPHY_ID)
2093 		return;
2094 
2095 	mutex_enter(&sc->ex_txlock);
2096 	SET_WIN(4);
2097 
2098 	ex_mii_sync(sc);
2099 	ex_mii_send_bits(sc, 1, 2);	/* start */
2100 	ex_mii_send_bits(sc, 1, 2);	/* write */
2101 	ex_mii_send_bits(sc, phy, 5);
2102 	ex_mii_send_bits(sc, reg, 5);
2103 	ex_mii_send_bits(sc, 2, 2);	/* ack/turnaround */
2104 	ex_mii_send_bits(sc, data, 16);
2105 
2106 	/* return to output mode */
2107 	PUT16(W4_PHYSMGMT, PHYSMGMT_DIR);
2108 	drv_usecwait(1);
2109 
2110 	mutex_exit(&sc->ex_txlock);
2111 }
2112 
2113 static void
2114 elxl_mii_notify(void *arg, link_state_t link)
2115 {
2116 	elxl_t		*sc = arg;
2117 	int		mctl;
2118 	link_duplex_t	duplex;
2119 
2120 	duplex = mii_get_duplex(sc->ex_miih);
2121 
2122 	mutex_enter(&sc->ex_txlock);
2123 	if (!sc->ex_mii_active) {
2124 		/* If we're using some other legacy media, bail out now */
2125 		mutex_exit(&sc->ex_txlock);
2126 		return;
2127 	}
2128 	if (!sc->ex_suspended) {
2129 		SET_WIN(3);
2130 		mctl = GET16(W3_MAC_CONTROL);
2131 		if (duplex == LINK_DUPLEX_FULL)
2132 			mctl |= MAC_CONTROL_FDX;
2133 		else
2134 			mctl &= ~MAC_CONTROL_FDX;
2135 		PUT16(W3_MAC_CONTROL, mctl);
2136 	}
2137 	mutex_exit(&sc->ex_txlock);
2138 
2139 	mac_link_update(sc->ex_mach, link);
2140 }
2141 
2142 static int
2143 elxl_ddi_attach(dev_info_t *dip, ddi_attach_cmd_t cmd)
2144 {
2145 	switch (cmd) {
2146 	case DDI_ATTACH:
2147 		return (elxl_attach(dip));
2148 
2149 	case DDI_RESUME:
2150 		elxl_resume(dip);
2151 		return (DDI_SUCCESS);
2152 
2153 	default:
2154 		return (DDI_FAILURE);
2155 	}
2156 }
2157 
2158 static int
2159 elxl_ddi_detach(dev_info_t *dip, ddi_detach_cmd_t cmd)
2160 {
2161 	elxl_t	*sc;
2162 
2163 	sc = ddi_get_driver_private(dip);
2164 	ASSERT(sc);
2165 
2166 	switch (cmd) {
2167 	case DDI_DETACH:
2168 		if (mac_disable(sc->ex_mach) != 0) {
2169 			return (DDI_FAILURE);
2170 		}
2171 		(void) mac_unregister(sc->ex_mach);
2172 		elxl_detach(sc);
2173 		return (DDI_SUCCESS);
2174 
2175 	case DDI_SUSPEND:
2176 		elxl_suspend(sc);
2177 		return (DDI_SUCCESS);
2178 
2179 	default:
2180 		return (DDI_FAILURE);
2181 	}
2182 }
2183 
2184 static int
2185 elxl_ddi_quiesce(dev_info_t *dip)
2186 {
2187 	elxl_t	*sc;
2188 
2189 	sc = ddi_get_driver_private(dip);
2190 	ASSERT(sc);
2191 
2192 	if (!sc->ex_suspended)
2193 		elxl_reset(sc);
2194 	return (DDI_SUCCESS);
2195 }
2196 
2197 static void
2198 elxl_error(elxl_t *sc, char *fmt, ...)
2199 {
2200 	va_list	ap;
2201 	char	buf[256];
2202 
2203 	va_start(ap, fmt);
2204 	(void) vsnprintf(buf, sizeof (buf), fmt, ap);
2205 	va_end(ap);
2206 
2207 	cmn_err(CE_WARN, "%s%d: %s",
2208 	    ddi_driver_name(sc->ex_dip), ddi_get_instance(sc->ex_dip), buf);
2209 }
2210