xref: /titanic_51/usr/src/uts/common/io/afe/afe.c (revision 67dbe2be0c0f1e2eb428b89088bb5667e8f0b9f6)
1 /*
2  * Solaris driver for ethernet cards based on the ADMtek Centaur
3  *
4  * Copyright (c) 2007 by Garrett D'Amore <garrett@damore.org>.
5  * All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions and the following disclaimer.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  * 3. Neither the name of the author nor the names of any co-contributors
16  *    may be used to endorse or promote products derived from this software
17  *    without specific prior written permission.
18  *
19  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER AND CONTRIBUTORS ``AS IS''
20  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22  * ARE DISCLAIMED.  IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
23  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29  * POSSIBILITY OF SUCH DAMAGE.
30  */
31 /*
32  * Copyright 2009 Sun Microsystems, Inc.  All rights reserved.
33  * Use is subject to license terms.
34  */
35 
36 
37 #include <sys/varargs.h>
38 #include <sys/types.h>
39 #include <sys/modctl.h>
40 #include <sys/conf.h>
41 #include <sys/devops.h>
42 #include <sys/stream.h>
43 #include <sys/strsun.h>
44 #include <sys/cmn_err.h>
45 #include <sys/ethernet.h>
46 #include <sys/kmem.h>
47 #include <sys/time.h>
48 #include <sys/crc32.h>
49 #include <sys/mii.h>
50 #include <sys/miiregs.h>
51 #include <sys/mac.h>
52 #include <sys/mac_ether.h>
53 #include <sys/ddi.h>
54 #include <sys/sunddi.h>
55 #include <sys/vlan.h>
56 
57 #include "afe.h"
58 #include "afeimpl.h"
59 
60 /*
61  * Driver globals.
62  */
63 
64 /* table of supported devices */
65 static afe_card_t afe_cards[] = {
66 
67 	/*
68 	 * ADMtek Centaur and Comet
69 	 */
70 	{ 0x1317, 0x0981, "ADMtek AL981", MODEL_COMET },
71 	{ 0x1317, 0x0985, "ADMtek AN983", MODEL_CENTAUR },
72 	{ 0x1317, 0x1985, "ADMtek AN985", MODEL_CENTAUR },
73 	{ 0x1317, 0x9511, "ADMtek ADM9511", MODEL_CENTAUR },
74 	{ 0x1317, 0x9513, "ADMtek ADM9513", MODEL_CENTAUR },
75 	/*
76 	 * Accton just relabels other companies' controllers
77 	 */
78 	{ 0x1113, 0x1216, "Accton EN5251", MODEL_CENTAUR },
79 	/*
80 	 * Models listed here.
81 	 */
82 	{ 0x10b7, 0x9300, "3Com 3CSOHO100B-TX", MODEL_CENTAUR },
83 	{ 0x1113, 0xec02, "SMC SMC1244TX", MODEL_CENTAUR },
84 	{ 0x10b8, 0x1255, "SMC SMC1255TX", MODEL_CENTAUR },
85 	{ 0x111a, 0x1020, "Siemens SpeedStream PCI 10/100", MODEL_CENTAUR },
86 	{ 0x1113, 0x1207, "Accton EN1207F", MODEL_CENTAUR },
87 	{ 0x1113, 0x2242, "Accton EN2242", MODEL_CENTAUR },
88 	{ 0x1113, 0x2220, "Accton EN2220", MODEL_CENTAUR },
89 	{ 0x1113, 0x9216, "3M VOL-N100VF+TX", MODEL_CENTAUR },
90 	{ 0x1317, 0x0574, "Linksys LNE100TX", MODEL_CENTAUR },
91 	{ 0x1317, 0x0570, "Linksys NC100", MODEL_CENTAUR },
92 	{ 0x1385, 0x511a, "Netgear FA511", MODEL_CENTAUR },
93 	{ 0x13d1, 0xab02, "AboCom FE2500", MODEL_CENTAUR },
94 	{ 0x13d1, 0xab03, "AboCom PCM200", MODEL_CENTAUR },
95 	{ 0x13d1, 0xab08, "AboCom FE2500MX", MODEL_CENTAUR },
96 	{ 0x1414, 0x0001, "Microsoft MN-120", MODEL_CENTAUR },
97 	{ 0x16ec, 0x00ed, "U.S. Robotics USR997900", MODEL_CENTAUR },
98 	{ 0x1734, 0x100c, "Fujitsu-Siemens D1961", MODEL_CENTAUR },
99 	{ 0x1737, 0xab08, "Linksys PCMPC200", MODEL_CENTAUR },
100 	{ 0x1737, 0xab09, "Linksys PCM200", MODEL_CENTAUR },
101 	{ 0x17b3, 0xab08, "Hawking PN672TX", MODEL_CENTAUR },
102 };
103 
104 #define	ETHERVLANMTU	(ETHERMAX + 4)
105 
106 /*
107  * Function prototypes
108  */
109 static int	afe_attach(dev_info_t *, ddi_attach_cmd_t);
110 static int	afe_detach(dev_info_t *, ddi_detach_cmd_t);
111 static int	afe_resume(dev_info_t *);
112 static int	afe_quiesce(dev_info_t *);
113 static int	afe_m_unicst(void *, const uint8_t *);
114 static int	afe_m_multicst(void *, boolean_t, const uint8_t *);
115 static int	afe_m_promisc(void *, boolean_t);
116 static mblk_t	*afe_m_tx(void *, mblk_t *);
117 static void	afe_m_ioctl(void *, queue_t *, mblk_t *);
118 static int	afe_m_stat(void *, uint_t, uint64_t *);
119 static int	afe_m_start(void *);
120 static void	afe_m_stop(void *);
121 static int	afe_m_getprop(void *, const char *, mac_prop_id_t, uint_t,
122     uint_t, void *, uint_t *);
123 static int	afe_m_setprop(void *, const char *, mac_prop_id_t, uint_t,
124     const void *);
125 static unsigned	afe_intr(caddr_t);
126 static void	afe_startmac(afe_t *);
127 static void	afe_stopmac(afe_t *);
128 static void	afe_resetrings(afe_t *);
129 static boolean_t	afe_initialize(afe_t *);
130 static void	afe_startall(afe_t *);
131 static void	afe_stopall(afe_t *);
132 static void	afe_resetall(afe_t *);
133 static afe_txbuf_t *afe_alloctxbuf(afe_t *);
134 static void	afe_destroytxbuf(afe_txbuf_t *);
135 static afe_rxbuf_t *afe_allocrxbuf(afe_t *);
136 static void	afe_destroyrxbuf(afe_rxbuf_t *);
137 static boolean_t	afe_send(afe_t *, mblk_t *);
138 static int	afe_allocrxring(afe_t *);
139 static void	afe_freerxring(afe_t *);
140 static int	afe_alloctxring(afe_t *);
141 static void	afe_freetxring(afe_t *);
142 static void	afe_error(dev_info_t *, char *, ...);
143 static void	afe_setrxfilt(afe_t *);
144 static int	afe_watchdog(afe_t *);
145 static uint8_t	afe_sromwidth(afe_t *);
146 static uint16_t	afe_readsromword(afe_t *, unsigned);
147 static void	afe_readsrom(afe_t *, unsigned, unsigned, char *);
148 static void	afe_getfactaddr(afe_t *, uchar_t *);
149 static uint8_t	afe_miireadbit(afe_t *);
150 static void	afe_miiwritebit(afe_t *, uint8_t);
151 static void	afe_miitristate(afe_t *);
152 static uint16_t	afe_miireadgeneral(afe_t *, uint8_t, uint8_t);
153 static void	afe_miiwritegeneral(afe_t *, uint8_t, uint8_t, uint16_t);
154 static uint16_t	afe_miireadcomet(afe_t *, uint8_t, uint8_t);
155 static void	afe_miiwritecomet(afe_t *, uint8_t, uint8_t, uint16_t);
156 static uint16_t	afe_mii_read(void *, uint8_t, uint8_t);
157 static void	afe_mii_write(void *, uint8_t, uint8_t, uint16_t);
158 static void	afe_mii_notify(void *, link_state_t);
159 static void	afe_mii_reset(void *);
160 static void	afe_disableinterrupts(afe_t *);
161 static void	afe_enableinterrupts(afe_t *);
162 static void	afe_reclaim(afe_t *);
163 static mblk_t	*afe_receive(afe_t *);
164 
165 #define	KIOIP	KSTAT_INTR_PTR(afep->afe_intrstat)
166 
167 static mii_ops_t afe_mii_ops = {
168 	MII_OPS_VERSION,
169 	afe_mii_read,
170 	afe_mii_write,
171 	afe_mii_notify,
172 	afe_mii_reset
173 };
174 
175 static mac_callbacks_t afe_m_callbacks = {
176 	MC_IOCTL | MC_SETPROP | MC_GETPROP,
177 	afe_m_stat,
178 	afe_m_start,
179 	afe_m_stop,
180 	afe_m_promisc,
181 	afe_m_multicst,
182 	afe_m_unicst,
183 	afe_m_tx,
184 	afe_m_ioctl,	/* mc_ioctl */
185 	NULL,		/* mc_getcapab */
186 	NULL,		/* mc_open */
187 	NULL,		/* mc_close */
188 	afe_m_setprop,
189 	afe_m_getprop,
190 };
191 
192 
193 /*
194  * Stream information
195  */
196 DDI_DEFINE_STREAM_OPS(afe_devops, nulldev, nulldev, afe_attach, afe_detach,
197     nodev, NULL, D_MP, NULL, afe_quiesce);
198 
199 /*
200  * Module linkage information.
201  */
202 
203 static struct modldrv afe_modldrv = {
204 	&mod_driverops,			/* drv_modops */
205 	"ADMtek Fast Ethernet",		/* drv_linkinfo */
206 	&afe_devops			/* drv_dev_ops */
207 };
208 
209 static struct modlinkage afe_modlinkage = {
210 	MODREV_1,		/* ml_rev */
211 	{ &afe_modldrv, NULL }	/* ml_linkage */
212 };
213 
214 /*
215  * Device attributes.
216  */
217 static ddi_device_acc_attr_t afe_devattr = {
218 	DDI_DEVICE_ATTR_V0,
219 	DDI_STRUCTURE_LE_ACC,
220 	DDI_STRICTORDER_ACC
221 };
222 
223 static ddi_device_acc_attr_t afe_bufattr = {
224 	DDI_DEVICE_ATTR_V0,
225 	DDI_NEVERSWAP_ACC,
226 	DDI_STRICTORDER_ACC
227 };
228 
229 static ddi_dma_attr_t afe_dma_attr = {
230 	DMA_ATTR_V0,		/* dma_attr_version */
231 	0,			/* dma_attr_addr_lo */
232 	0xFFFFFFFFU,		/* dma_attr_addr_hi */
233 	0x7FFFFFFFU,		/* dma_attr_count_max */
234 	4,			/* dma_attr_align */
235 	0x3F,			/* dma_attr_burstsizes */
236 	1,			/* dma_attr_minxfer */
237 	0xFFFFFFFFU,		/* dma_attr_maxxfer */
238 	0xFFFFFFFFU,		/* dma_attr_seg */
239 	1,			/* dma_attr_sgllen */
240 	1,			/* dma_attr_granular */
241 	0			/* dma_attr_flags */
242 };
243 
244 /*
245  * Tx buffers can be arbitrarily aligned.  Additionally, they can
246  * cross a page boundary, so we use the two buffer addresses of the
247  * chip to provide a two-entry scatter-gather list.
248  */
249 static ddi_dma_attr_t afe_dma_txattr = {
250 	DMA_ATTR_V0,		/* dma_attr_version */
251 	0,			/* dma_attr_addr_lo */
252 	0xFFFFFFFFU,		/* dma_attr_addr_hi */
253 	0x7FFFFFFFU,		/* dma_attr_count_max */
254 	1,			/* dma_attr_align */
255 	0x3F,			/* dma_attr_burstsizes */
256 	1,			/* dma_attr_minxfer */
257 	0xFFFFFFFFU,		/* dma_attr_maxxfer */
258 	0xFFFFFFFFU,		/* dma_attr_seg */
259 	2,			/* dma_attr_sgllen */
260 	1,			/* dma_attr_granular */
261 	0			/* dma_attr_flags */
262 };
263 
264 /*
265  * Ethernet addresses.
266  */
267 static uchar_t afe_broadcast[ETHERADDRL] = {
268 	0xff, 0xff, 0xff, 0xff, 0xff, 0xff
269 };
270 
271 /*
272  * DDI entry points.
273  */
274 int
275 _init(void)
276 {
277 	int	rv;
278 	mac_init_ops(&afe_devops, "afe");
279 	if ((rv = mod_install(&afe_modlinkage)) != DDI_SUCCESS) {
280 		mac_fini_ops(&afe_devops);
281 	}
282 	return (rv);
283 }
284 
285 int
286 _fini(void)
287 {
288 	int	rv;
289 	if ((rv = mod_remove(&afe_modlinkage)) == DDI_SUCCESS) {
290 		mac_fini_ops(&afe_devops);
291 	}
292 	return (rv);
293 }
294 
295 int
296 _info(struct modinfo *modinfop)
297 {
298 	return (mod_info(&afe_modlinkage, modinfop));
299 }
300 
301 int
302 afe_attach(dev_info_t *dip, ddi_attach_cmd_t cmd)
303 {
304 	afe_t			*afep;
305 	mac_register_t		*macp;
306 	int			inst = ddi_get_instance(dip);
307 	ddi_acc_handle_t	pci;
308 	uint16_t		venid;
309 	uint16_t		devid;
310 	uint16_t		svid;
311 	uint16_t		ssid;
312 	uint16_t		cachesize;
313 	afe_card_t		*cardp;
314 	int			i;
315 
316 	switch (cmd) {
317 	case DDI_RESUME:
318 		return (afe_resume(dip));
319 
320 	case DDI_ATTACH:
321 		break;
322 
323 	default:
324 		return (DDI_FAILURE);
325 	}
326 
327 	/* this card is a bus master, reject any slave-only slot */
328 	if (ddi_slaveonly(dip) == DDI_SUCCESS) {
329 		afe_error(dip, "slot does not support PCI bus-master");
330 		return (DDI_FAILURE);
331 	}
332 	/* PCI devices shouldn't generate hilevel interrupts */
333 	if (ddi_intr_hilevel(dip, 0) != 0) {
334 		afe_error(dip, "hilevel interrupts not supported");
335 		return (DDI_FAILURE);
336 	}
337 	if (pci_config_setup(dip, &pci) != DDI_SUCCESS) {
338 		afe_error(dip, "unable to setup PCI config handle");
339 		return (DDI_FAILURE);
340 	}
341 
342 	venid = pci_config_get16(pci, PCI_VID);
343 	devid = pci_config_get16(pci, PCI_DID);
344 	svid = pci_config_get16(pci, PCI_SVID);
345 	ssid = pci_config_get16(pci, PCI_SSID);
346 
347 	/*
348 	 * Note: ADMtek boards seem to misprogram themselves with bogus
349 	 * timings, which do not seem to work properly on SPARC.  We
350 	 * reprogram them zero (but only if they appear to be broken),
351 	 * which seems to at least work.  Its unclear that this is a
352 	 * legal or wise practice to me, but it certainly works better
353 	 * than the original values.  (I would love to hear
354 	 * suggestions for better values, or a better strategy.)
355 	 */
356 	if ((pci_config_get8(pci, PCI_MINGNT) == 0xff) &&
357 	    (pci_config_get8(pci, PCI_MAXLAT) == 0xff)) {
358 		pci_config_put8(pci, PCI_MINGNT, 0);
359 		pci_config_put8(pci, PCI_MAXLAT, 0);
360 	}
361 
362 	/*
363 	 * the last entry in the card table matches every possible
364 	 * card, so the for-loop always terminates properly.
365 	 */
366 	cardp = NULL;
367 	for (i = 0; i < (sizeof (afe_cards) / sizeof (afe_card_t)); i++) {
368 		if ((venid == afe_cards[i].card_venid) &&
369 		    (devid == afe_cards[i].card_devid)) {
370 			cardp = &afe_cards[i];
371 		}
372 		if ((svid == afe_cards[i].card_venid) &&
373 		    (ssid == afe_cards[i].card_devid)) {
374 			cardp = &afe_cards[i];
375 			break;
376 		}
377 	}
378 
379 	if (cardp == NULL) {
380 		pci_config_teardown(&pci);
381 		afe_error(dip, "Unable to identify PCI card");
382 		return (DDI_FAILURE);
383 	}
384 
385 	if (ddi_prop_update_string(DDI_DEV_T_NONE, dip, "model",
386 	    cardp->card_cardname) != DDI_PROP_SUCCESS) {
387 		pci_config_teardown(&pci);
388 		afe_error(dip, "Unable to create model property");
389 		return (DDI_FAILURE);
390 	}
391 
392 	/*
393 	 * Grab the PCI cachesize -- we use this to program the
394 	 * cache-optimization bus access bits.
395 	 */
396 	cachesize = pci_config_get8(pci, PCI_CLS);
397 
398 	/* this cannot fail */
399 	afep = kmem_zalloc(sizeof (afe_t), KM_SLEEP);
400 	ddi_set_driver_private(dip, afep);
401 
402 	/* get the interrupt block cookie */
403 	if (ddi_get_iblock_cookie(dip, 0, &afep->afe_icookie) != DDI_SUCCESS) {
404 		afe_error(dip, "ddi_get_iblock_cookie failed");
405 		pci_config_teardown(&pci);
406 		kmem_free(afep, sizeof (afe_t));
407 		return (DDI_FAILURE);
408 	}
409 
410 	afep->afe_dip = dip;
411 	afep->afe_cardp = cardp;
412 	afep->afe_phyaddr = -1;
413 	afep->afe_cachesize = cachesize;
414 
415 	afep->afe_forcefiber = ddi_prop_get_int(DDI_DEV_T_ANY, dip, 0,
416 	    "fiber", 0);
417 
418 	mutex_init(&afep->afe_xmtlock, NULL, MUTEX_DRIVER, afep->afe_icookie);
419 	mutex_init(&afep->afe_intrlock, NULL, MUTEX_DRIVER, afep->afe_icookie);
420 
421 	/*
422 	 * Enable bus master, IO space, and memory space accesses.
423 	 */
424 	pci_config_put16(pci, PCI_CMD,
425 	    pci_config_get16(pci, PCI_CMD) | PCI_CMD_BME | PCI_CMD_MAE);
426 
427 	/* we're done with this now, drop it */
428 	pci_config_teardown(&pci);
429 
430 	/*
431 	 * Initialize interrupt kstat.  This should not normally fail, since
432 	 * we don't use a persistent stat.  We do it this way to avoid having
433 	 * to test for it at run time on the hot path.
434 	 */
435 	afep->afe_intrstat = kstat_create("afe", inst, "intr", "controller",
436 	    KSTAT_TYPE_INTR, 1, 0);
437 	if (afep->afe_intrstat == NULL) {
438 		afe_error(dip, "kstat_create failed");
439 		goto failed;
440 	}
441 	kstat_install(afep->afe_intrstat);
442 
443 	/*
444 	 * Set up the MII.
445 	 */
446 	if ((afep->afe_mii = mii_alloc(afep, dip, &afe_mii_ops)) == NULL) {
447 		goto failed;
448 	}
449 
450 	/*
451 	 * Centaur can support PAUSE, but Comet can't.
452 	 */
453 	if (AFE_MODEL(afep) == MODEL_CENTAUR) {
454 		mii_set_pauseable(afep->afe_mii, B_TRUE, B_FALSE);
455 	} else {
456 		mii_set_pauseable(afep->afe_mii, B_FALSE, B_FALSE);
457 	}
458 
459 	/*
460 	 * Map in the device registers.
461 	 */
462 	if (ddi_regs_map_setup(dip, 1, (caddr_t *)&afep->afe_regs,
463 	    0, 0, &afe_devattr, &afep->afe_regshandle)) {
464 		afe_error(dip, "ddi_regs_map_setup failed");
465 		goto failed;
466 	}
467 
468 	/*
469 	 * Allocate DMA resources (descriptor rings and buffers).
470 	 */
471 	if ((afe_allocrxring(afep) != DDI_SUCCESS) ||
472 	    (afe_alloctxring(afep) != DDI_SUCCESS)) {
473 		afe_error(dip, "unable to allocate DMA resources");
474 		goto failed;
475 	}
476 
477 	/* Initialize the chip. */
478 	mutex_enter(&afep->afe_intrlock);
479 	mutex_enter(&afep->afe_xmtlock);
480 	if (!afe_initialize(afep)) {
481 		mutex_exit(&afep->afe_xmtlock);
482 		mutex_exit(&afep->afe_intrlock);
483 		goto failed;
484 	}
485 	mutex_exit(&afep->afe_xmtlock);
486 	mutex_exit(&afep->afe_intrlock);
487 
488 	/* Determine the number of address bits to our EEPROM. */
489 	afep->afe_sromwidth = afe_sromwidth(afep);
490 
491 	/*
492 	 * Get the factory ethernet address.  This becomes the current
493 	 * ethernet address (it can be overridden later via ifconfig).
494 	 */
495 	afe_getfactaddr(afep, afep->afe_curraddr);
496 	afep->afe_promisc = B_FALSE;
497 
498 	/* make sure we add configure the initial filter */
499 	(void) afe_m_unicst(afep, afep->afe_curraddr);
500 	(void) afe_m_multicst(afep, B_TRUE, afe_broadcast);
501 
502 	/*
503 	 * Establish interrupt handler.
504 	 */
505 	if (ddi_add_intr(dip, 0, NULL, NULL, afe_intr, (caddr_t)afep) !=
506 	    DDI_SUCCESS) {
507 		afe_error(dip, "unable to add interrupt");
508 		goto failed;
509 	}
510 
511 	/* TODO: do the power management stuff */
512 
513 	if ((macp = mac_alloc(MAC_VERSION)) == NULL) {
514 		afe_error(dip, "mac_alloc failed");
515 		goto failed;
516 	}
517 
518 	macp->m_type_ident = MAC_PLUGIN_IDENT_ETHER;
519 	macp->m_driver = afep;
520 	macp->m_dip = dip;
521 	macp->m_src_addr = afep->afe_curraddr;
522 	macp->m_callbacks = &afe_m_callbacks;
523 	macp->m_min_sdu = 0;
524 	macp->m_max_sdu = ETHERMTU;
525 	macp->m_margin = VLAN_TAGSZ;
526 
527 	if (mac_register(macp, &afep->afe_mh) == DDI_SUCCESS) {
528 		mac_free(macp);
529 		return (DDI_SUCCESS);
530 	}
531 
532 	/* failed to register with MAC */
533 	mac_free(macp);
534 failed:
535 	if (afep->afe_icookie != NULL) {
536 		ddi_remove_intr(dip, 0, afep->afe_icookie);
537 	}
538 	if (afep->afe_intrstat) {
539 		kstat_delete(afep->afe_intrstat);
540 	}
541 	mutex_destroy(&afep->afe_intrlock);
542 	mutex_destroy(&afep->afe_xmtlock);
543 
544 	afe_freerxring(afep);
545 	afe_freetxring(afep);
546 
547 	if (afep->afe_regshandle != NULL) {
548 		ddi_regs_map_free(&afep->afe_regshandle);
549 	}
550 	kmem_free(afep, sizeof (afe_t));
551 	return (DDI_FAILURE);
552 }
553 
554 int
555 afe_detach(dev_info_t *dip, ddi_detach_cmd_t cmd)
556 {
557 	afe_t		*afep;
558 
559 	afep = ddi_get_driver_private(dip);
560 	if (afep == NULL) {
561 		afe_error(dip, "no soft state in detach!");
562 		return (DDI_FAILURE);
563 	}
564 
565 	switch (cmd) {
566 	case DDI_DETACH:
567 
568 		if (mac_unregister(afep->afe_mh) != 0) {
569 			return (DDI_FAILURE);
570 		}
571 
572 		/* make sure hardware is quiesced */
573 		mutex_enter(&afep->afe_intrlock);
574 		mutex_enter(&afep->afe_xmtlock);
575 		afep->afe_flags &= ~AFE_RUNNING;
576 		afe_stopall(afep);
577 		mutex_exit(&afep->afe_xmtlock);
578 		mutex_exit(&afep->afe_intrlock);
579 
580 		/* clean up and shut down device */
581 		ddi_remove_intr(dip, 0, afep->afe_icookie);
582 
583 		/* clean up MII layer */
584 		mii_free(afep->afe_mii);
585 
586 		/* clean up kstats */
587 		kstat_delete(afep->afe_intrstat);
588 
589 		ddi_prop_remove_all(dip);
590 
591 		/* free up any left over buffers or DMA resources */
592 		afe_freerxring(afep);
593 		afe_freetxring(afep);
594 
595 		ddi_regs_map_free(&afep->afe_regshandle);
596 		mutex_destroy(&afep->afe_intrlock);
597 		mutex_destroy(&afep->afe_xmtlock);
598 
599 		kmem_free(afep, sizeof (afe_t));
600 		return (DDI_SUCCESS);
601 
602 	case DDI_SUSPEND:
603 		/* stop MII monitoring */
604 		mii_suspend(afep->afe_mii);
605 
606 		/* quiesce the hardware */
607 		mutex_enter(&afep->afe_intrlock);
608 		mutex_enter(&afep->afe_xmtlock);
609 		afep->afe_flags |= AFE_SUSPENDED;
610 		afe_stopall(afep);
611 		mutex_exit(&afep->afe_xmtlock);
612 		mutex_exit(&afep->afe_intrlock);
613 		return (DDI_SUCCESS);
614 	default:
615 		return (DDI_FAILURE);
616 	}
617 }
618 
619 int
620 afe_resume(dev_info_t *dip)
621 {
622 	afe_t	*afep;
623 
624 	if ((afep = ddi_get_driver_private(dip)) == NULL) {
625 		return (DDI_FAILURE);
626 	}
627 
628 	mutex_enter(&afep->afe_intrlock);
629 	mutex_enter(&afep->afe_xmtlock);
630 
631 	afep->afe_flags &= ~AFE_SUSPENDED;
632 
633 	/* re-initialize chip */
634 	if (!afe_initialize(afep)) {
635 		afe_error(afep->afe_dip, "unable to resume chip!");
636 		afep->afe_flags |= AFE_SUSPENDED;
637 		mutex_exit(&afep->afe_intrlock);
638 		mutex_exit(&afep->afe_xmtlock);
639 		return (DDI_SUCCESS);
640 	}
641 
642 	/* start the chip */
643 	if (afep->afe_flags & AFE_RUNNING) {
644 		afe_startall(afep);
645 	}
646 
647 	/* drop locks */
648 	mutex_exit(&afep->afe_xmtlock);
649 	mutex_exit(&afep->afe_intrlock);
650 
651 	mii_resume(afep->afe_mii);
652 
653 	return (DDI_SUCCESS);
654 }
655 
656 int
657 afe_quiesce(dev_info_t *dip)
658 {
659 	afe_t	*afep;
660 
661 	if ((afep = ddi_get_driver_private(dip)) == NULL) {
662 		return (DDI_FAILURE);
663 	}
664 
665 	SETBIT(afep, CSR_PAR, PAR_RESET);
666 	/*
667 	 * At 66 MHz it is 16 nsec per access or more (always more)
668 	 * So we need 3,333 times to retry for 50 usec.  We just
669 	 * round up to 5000 times.  Unless the hardware is horked,
670 	 * it will always terminate *well* before that anyway.
671 	 */
672 	for (int i = 0; i < 5000; i++) {
673 		if ((GETCSR(afep, CSR_PAR) & PAR_RESET) == 0) {
674 			return (DDI_SUCCESS);
675 		}
676 	}
677 
678 	/* hardware didn't quiesce - force a full reboot (PCI reset) */
679 	return (DDI_FAILURE);
680 }
681 
682 void
683 afe_setrxfilt(afe_t *afep)
684 {
685 	unsigned rxen, pa0, pa1;
686 
687 	if (afep->afe_flags & AFE_SUSPENDED) {
688 		/* don't touch a suspended interface */
689 		return;
690 	}
691 
692 	rxen = GETCSR(afep, CSR_NAR) & NAR_RX_ENABLE;
693 
694 	/* stop receiver */
695 	if (rxen) {
696 		afe_stopmac(afep);
697 	}
698 
699 	/* program promiscuous mode */
700 	if (afep->afe_promisc)
701 		SETBIT(afep, CSR_NAR, NAR_RX_PROMISC);
702 	else
703 		CLRBIT(afep, CSR_NAR, NAR_RX_PROMISC);
704 
705 	/* program mac address */
706 	pa0 = (afep->afe_curraddr[3] << 24) | (afep->afe_curraddr[2] << 16) |
707 	    (afep->afe_curraddr[1] << 8) | afep->afe_curraddr[0];
708 	pa1 = (afep->afe_curraddr[5] << 8) | afep->afe_curraddr[4];
709 
710 	PUTCSR(afep, CSR_PAR0, pa0);
711 	PUTCSR(afep, CSR_PAR1, pa1);
712 	if (rxen) {
713 		SETBIT(afep, CSR_NAR, rxen);
714 	}
715 
716 	/* program multicast filter */
717 	if (AFE_MODEL(afep) == MODEL_COMET) {
718 		if (afep->afe_mctab[0] || afep->afe_mctab[1]) {
719 			SETBIT(afep, CSR_NAR, NAR_RX_MULTI);
720 		} else {
721 			CLRBIT(afep, CSR_NAR, NAR_RX_MULTI);
722 		}
723 	} else {
724 		CLRBIT(afep, CSR_NAR, NAR_RX_MULTI);
725 		PUTCSR(afep, CSR_MAR0, afep->afe_mctab[0]);
726 		PUTCSR(afep, CSR_MAR1, afep->afe_mctab[1]);
727 	}
728 
729 	/* restart receiver */
730 	if (rxen) {
731 		afe_startmac(afep);
732 	}
733 }
734 
735 int
736 afe_watchdog(afe_t *afep)
737 {
738 	if ((afep->afe_txstall_time != 0) &&
739 	    (gethrtime() > afep->afe_txstall_time) &&
740 	    (afep->afe_txavail != AFE_TXRING)) {
741 		afep->afe_txstall_time = 0;
742 		afe_error(afep->afe_dip, "TX stall detected!");
743 		return (DDI_FAILURE);
744 	} else {
745 		return (DDI_SUCCESS);
746 	}
747 }
748 
749 int
750 afe_m_multicst(void *arg, boolean_t add, const uint8_t *macaddr)
751 {
752 	afe_t		*afep = arg;
753 	int		index;
754 	uint32_t	crc;
755 	uint32_t	bit;
756 	uint32_t	newval, oldval;
757 
758 	CRC32(crc, macaddr, ETHERADDRL, -1U, crc32_table);
759 	crc %= AFE_MCHASH;
760 
761 	/* bit within a 32-bit word */
762 	index = crc / 32;
763 	bit = (1 << (crc % 32));
764 
765 	mutex_enter(&afep->afe_intrlock);
766 	mutex_enter(&afep->afe_xmtlock);
767 	newval = oldval = afep->afe_mctab[index];
768 
769 	if (add) {
770 		afep->afe_mccount[crc]++;
771 		if (afep->afe_mccount[crc] == 1)
772 			newval |= bit;
773 	} else {
774 		afep->afe_mccount[crc]--;
775 		if (afep->afe_mccount[crc] == 0)
776 			newval &= ~bit;
777 	}
778 	if (newval != oldval) {
779 		afep->afe_mctab[index] = newval;
780 		afe_setrxfilt(afep);
781 	}
782 
783 	mutex_exit(&afep->afe_xmtlock);
784 	mutex_exit(&afep->afe_intrlock);
785 
786 	return (0);
787 }
788 
789 int
790 afe_m_promisc(void *arg, boolean_t on)
791 {
792 	afe_t		*afep = arg;
793 
794 	/* exclusive access to the card while we reprogram it */
795 	mutex_enter(&afep->afe_intrlock);
796 	mutex_enter(&afep->afe_xmtlock);
797 	/* save current promiscuous mode state for replay in resume */
798 	afep->afe_promisc = on;
799 
800 	afe_setrxfilt(afep);
801 	mutex_exit(&afep->afe_xmtlock);
802 	mutex_exit(&afep->afe_intrlock);
803 
804 	return (0);
805 }
806 
807 int
808 afe_m_unicst(void *arg, const uint8_t *macaddr)
809 {
810 	afe_t		*afep = arg;
811 
812 	/* exclusive access to the card while we reprogram it */
813 	mutex_enter(&afep->afe_intrlock);
814 	mutex_enter(&afep->afe_xmtlock);
815 
816 	bcopy(macaddr, afep->afe_curraddr, ETHERADDRL);
817 	afe_setrxfilt(afep);
818 
819 	mutex_exit(&afep->afe_xmtlock);
820 	mutex_exit(&afep->afe_intrlock);
821 
822 	return (0);
823 }
824 
825 mblk_t *
826 afe_m_tx(void *arg, mblk_t *mp)
827 {
828 	afe_t	*afep = arg;
829 	mblk_t	*nmp;
830 
831 	mutex_enter(&afep->afe_xmtlock);
832 
833 	if (afep->afe_flags & AFE_SUSPENDED) {
834 		while ((nmp = mp) != NULL) {
835 			afep->afe_carrier_errors++;
836 			mp = mp->b_next;
837 			freemsg(nmp);
838 		}
839 		mutex_exit(&afep->afe_xmtlock);
840 		return (NULL);
841 	}
842 
843 	while (mp != NULL) {
844 		nmp = mp->b_next;
845 		mp->b_next = NULL;
846 
847 		if (!afe_send(afep, mp)) {
848 			mp->b_next = nmp;
849 			break;
850 		}
851 		mp = nmp;
852 	}
853 	mutex_exit(&afep->afe_xmtlock);
854 
855 	return (mp);
856 }
857 
858 void
859 afe_m_ioctl(void *arg, queue_t *wq, mblk_t *mp)
860 {
861 	afe_t	*afep = arg;
862 
863 	if (mii_m_loop_ioctl(afep->afe_mii, wq, mp))
864 		return;
865 
866 	miocnak(wq, mp, 0, EINVAL);
867 }
868 
869 /*
870  * Hardware management.
871  */
872 static boolean_t
873 afe_initialize(afe_t *afep)
874 {
875 	int		i;
876 	unsigned	val;
877 	uint32_t	par, nar;
878 
879 	ASSERT(mutex_owned(&afep->afe_intrlock));
880 	ASSERT(mutex_owned(&afep->afe_xmtlock));
881 
882 	SETBIT(afep, CSR_PAR, PAR_RESET);
883 	for (i = 1; i < 10; i++) {
884 		drv_usecwait(5);
885 		val = GETCSR(afep, CSR_PAR);
886 		if (!(val & PAR_RESET)) {
887 			break;
888 		}
889 	}
890 	if (i == 10) {
891 		afe_error(afep->afe_dip, "timed out waiting for reset!");
892 		return (B_FALSE);
893 	}
894 
895 	/*
896 	 * Updated Centaur data sheets show that the Comet and Centaur are
897 	 * alike here (contrary to earlier versions of the data sheet).
898 	 */
899 	/* XXX:? chip problems */
900 	/* par = PAR_MRLE | PAR_MRME | PAR_MWIE; */
901 	par = 0;
902 	switch (afep->afe_cachesize) {
903 	case 8:
904 		par |= PAR_CALIGN_8 | PAR_BURST_8;
905 		break;
906 	case 16:
907 		par |= PAR_CALIGN_16 | PAR_BURST_16;
908 		break;
909 	case 32:
910 		par |= PAR_CALIGN_32 | PAR_BURST_32;
911 		break;
912 	default:
913 		par |= PAR_BURST_32;
914 		par &= ~(PAR_MWIE | PAR_MRLE | PAR_MRME);
915 		break;
916 
917 	}
918 
919 	PUTCSR(afep, CSR_PAR, par);
920 
921 	/* enable transmit underrun auto-recovery */
922 	SETBIT(afep, CSR_CR, CR_TXURAUTOR);
923 
924 	afe_resetrings(afep);
925 
926 	/* clear the lost packet counter (cleared on read) */
927 	(void) GETCSR(afep, CSR_LPC);
928 
929 	nar = GETCSR(afep, CSR_NAR);
930 	nar &= ~NAR_TR;		/* clear tx threshold */
931 	nar |= NAR_SF;		/* store-and-forward */
932 	nar |= NAR_HBD;		/* disable SQE test */
933 	PUTCSR(afep, CSR_NAR, nar);
934 
935 	afe_setrxfilt(afep);
936 
937 	return (B_TRUE);
938 }
939 
940 /*
941  * Serial EEPROM access - inspired by the FreeBSD implementation.
942  */
943 
944 uint8_t
945 afe_sromwidth(afe_t *afep)
946 {
947 	int		i;
948 	uint32_t	eeread;
949 	uint8_t		addrlen = 8;
950 
951 	eeread = SPR_SROM_READ | SPR_SROM_SEL | SPR_SROM_CHIP;
952 
953 	PUTCSR(afep, CSR_SPR, eeread & ~SPR_SROM_CHIP);
954 	drv_usecwait(1);
955 	PUTCSR(afep, CSR_SPR, eeread);
956 
957 	/* command bits first */
958 	for (i = 4; i != 0; i >>= 1) {
959 		unsigned val = (SROM_READCMD & i) ? SPR_SROM_DIN : 0;
960 
961 		PUTCSR(afep, CSR_SPR, eeread | val);
962 		drv_usecwait(1);
963 		PUTCSR(afep, CSR_SPR, eeread | val | SPR_SROM_CLOCK);
964 		drv_usecwait(1);
965 	}
966 
967 	PUTCSR(afep, CSR_SPR, eeread);
968 
969 	for (addrlen = 1; addrlen <= 12; addrlen++) {
970 		PUTCSR(afep, CSR_SPR, eeread | SPR_SROM_CLOCK);
971 		drv_usecwait(1);
972 		if (!(GETCSR(afep, CSR_SPR) & SPR_SROM_DOUT)) {
973 			PUTCSR(afep, CSR_SPR, eeread);
974 			drv_usecwait(1);
975 			break;
976 		}
977 		PUTCSR(afep, CSR_SPR, eeread);
978 		drv_usecwait(1);
979 	}
980 
981 	/* turn off accesses to the EEPROM */
982 	PUTCSR(afep, CSR_SPR, eeread &~ SPR_SROM_CHIP);
983 
984 	return ((addrlen < 4 || addrlen > 12) ? 6 : addrlen);
985 }
986 
987 /*
988  * The words in EEPROM are stored in little endian order.  We
989  * shift bits out in big endian order, though.  This requires
990  * a byte swap on some platforms.
991  */
992 uint16_t
993 afe_readsromword(afe_t *afep, unsigned romaddr)
994 {
995 	int		i;
996 	uint16_t	word = 0;
997 	uint16_t	retval;
998 	int		eeread;
999 	uint8_t		addrlen;
1000 	int		readcmd;
1001 	uchar_t		*ptr;
1002 
1003 	eeread = SPR_SROM_READ | SPR_SROM_SEL | SPR_SROM_CHIP;
1004 	addrlen = afep->afe_sromwidth;
1005 	readcmd = (SROM_READCMD << addrlen) | romaddr;
1006 
1007 	if (romaddr >= (1 << addrlen)) {
1008 		/* too big to fit! */
1009 		return (0);
1010 	}
1011 
1012 	PUTCSR(afep, CSR_SPR, eeread & ~SPR_SROM_CHIP);
1013 	PUTCSR(afep, CSR_SPR, eeread);
1014 
1015 	/* command and address bits */
1016 	for (i = 4 + addrlen; i >= 0; i--) {
1017 		short val = (readcmd & (1 << i)) ? SPR_SROM_DIN : 0;
1018 
1019 		PUTCSR(afep, CSR_SPR, eeread | val);
1020 		drv_usecwait(1);
1021 		PUTCSR(afep, CSR_SPR, eeread | val | SPR_SROM_CLOCK);
1022 		drv_usecwait(1);
1023 	}
1024 
1025 	PUTCSR(afep, CSR_SPR, eeread);
1026 
1027 	for (i = 0; i < 16; i++) {
1028 		PUTCSR(afep, CSR_SPR, eeread | SPR_SROM_CLOCK);
1029 		drv_usecwait(1);
1030 		word <<= 1;
1031 		if (GETCSR(afep, CSR_SPR) & SPR_SROM_DOUT) {
1032 			word |= 1;
1033 		}
1034 		PUTCSR(afep, CSR_SPR, eeread);
1035 		drv_usecwait(1);
1036 	}
1037 
1038 	/* turn off accesses to the EEPROM */
1039 	PUTCSR(afep, CSR_SPR, eeread &~ SPR_SROM_CHIP);
1040 
1041 	/*
1042 	 * Fix up the endianness thing.  Note that the values
1043 	 * are stored in little endian format on the SROM.
1044 	 */
1045 	ptr = (uchar_t *)&word;
1046 	retval = (ptr[1] << 8) | ptr[0];
1047 	return (retval);
1048 }
1049 
1050 void
1051 afe_readsrom(afe_t *afep, unsigned romaddr, unsigned len, char *dest)
1052 {
1053 	int	i;
1054 	uint16_t	word;
1055 	uint16_t	*ptr = (uint16_t *)((void *)dest);
1056 	for (i = 0; i < len; i++) {
1057 		word = afe_readsromword(afep, romaddr + i);
1058 		*ptr = word;
1059 		ptr++;
1060 	}
1061 }
1062 
1063 void
1064 afe_getfactaddr(afe_t *afep, uchar_t *eaddr)
1065 {
1066 	afe_readsrom(afep, SROM_ENADDR, ETHERADDRL / 2, (char *)eaddr);
1067 }
1068 
1069 
1070 
1071 /*
1072  * MII management.
1073  */
1074 void
1075 afe_mii_reset(void *arg)
1076 {
1077 	afe_t		*afep = arg;
1078 	int		fiber;
1079 	uint16_t	mcr;
1080 	uint16_t	pilr;
1081 	uint8_t		phyaddr;
1082 
1083 	/*
1084 	 * Its entirely possible that this belongs as a PHY specific
1085 	 * override.
1086 	 */
1087 	if ((mii_get_id(afep->afe_mii) & 0xfffffff0) != 0x225410) {
1088 		/* if its not an AN983B, we don't care */
1089 		return;
1090 	}
1091 
1092 	phyaddr = mii_get_addr(afep->afe_mii);
1093 
1094 	fiber = 0;
1095 
1096 	switch (afep->afe_forcefiber) {
1097 	case 0:
1098 		/* UTP Port */
1099 		fiber = 0;
1100 		break;
1101 	case 1:
1102 		/* Fiber Port */
1103 		fiber = 1;
1104 		break;
1105 	}
1106 
1107 	mcr = afe_mii_read(afep, phyaddr, PHY_MCR);
1108 	switch (fiber) {
1109 	case 0:
1110 		mcr &= ~MCR_FIBER;
1111 		break;
1112 
1113 	case 1:
1114 		mcr |= MCR_FIBER;
1115 		break;
1116 	}
1117 	afe_mii_write(afep, phyaddr, PHY_MCR, mcr);
1118 	drv_usecwait(500);
1119 
1120 	/*
1121 	 * work around for errata 983B_0416 -- duplex light flashes
1122 	 * in 10 HDX.  we just disable SQE testing on the device.
1123 	 */
1124 	pilr = afe_mii_read(afep, phyaddr, PHY_PILR);
1125 	pilr |= PILR_NOSQE;
1126 	afe_mii_write(afep, phyaddr, PHY_PILR, pilr);
1127 }
1128 
1129 void
1130 afe_mii_notify(void *arg, link_state_t link)
1131 {
1132 	afe_t	*afep = arg;
1133 
1134 	if (AFE_MODEL(afep) == MODEL_CENTAUR) {
1135 		if (mii_get_flowctrl(afep->afe_mii) == LINK_FLOWCTRL_BI) {
1136 			SETBIT(afep, CSR_CR, CR_PAUSE);
1137 		} else {
1138 			CLRBIT(afep, CSR_CR, CR_PAUSE);
1139 		}
1140 	}
1141 	mac_link_update(afep->afe_mh, link);
1142 }
1143 
1144 void
1145 afe_miitristate(afe_t *afep)
1146 {
1147 	uint32_t val = SPR_SROM_WRITE | SPR_MII_CTRL;
1148 
1149 	PUTCSR(afep, CSR_SPR, val);
1150 	drv_usecwait(1);
1151 	PUTCSR(afep, CSR_SPR, val | SPR_MII_CLOCK);
1152 	drv_usecwait(1);
1153 }
1154 
1155 void
1156 afe_miiwritebit(afe_t *afep, uint8_t bit)
1157 {
1158 	uint32_t val = bit ? SPR_MII_DOUT : 0;
1159 
1160 	PUTCSR(afep, CSR_SPR, val);
1161 	drv_usecwait(1);
1162 	PUTCSR(afep, CSR_SPR, val | SPR_MII_CLOCK);
1163 	drv_usecwait(1);
1164 }
1165 
1166 uint8_t
1167 afe_miireadbit(afe_t *afep)
1168 {
1169 	uint32_t	val = SPR_MII_CTRL | SPR_SROM_READ;
1170 	uint8_t		bit;
1171 
1172 	PUTCSR(afep, CSR_SPR, val);
1173 	drv_usecwait(1);
1174 	bit = (GETCSR(afep, CSR_SPR) & SPR_MII_DIN) ? 1 : 0;
1175 	PUTCSR(afep, CSR_SPR, val | SPR_MII_CLOCK);
1176 	drv_usecwait(1);
1177 	return (bit);
1178 }
1179 
1180 uint16_t
1181 afe_mii_read(void *arg, uint8_t phy, uint8_t reg)
1182 {
1183 	afe_t *afep = arg;
1184 	/*
1185 	 * ADMtek bugs ignore address decode bits -- they only
1186 	 * support PHY at 1.
1187 	 */
1188 	if (phy != 1) {
1189 		return (0xffff);
1190 	}
1191 	switch (AFE_MODEL(afep)) {
1192 	case MODEL_COMET:
1193 		return (afe_miireadcomet(afep, phy, reg));
1194 	case MODEL_CENTAUR:
1195 		return (afe_miireadgeneral(afep, phy, reg));
1196 	}
1197 	return (0xffff);
1198 }
1199 
1200 uint16_t
1201 afe_miireadgeneral(afe_t *afep, uint8_t phy, uint8_t reg)
1202 {
1203 	uint16_t	value = 0;
1204 	int		i;
1205 
1206 	/* send the 32 bit preamble */
1207 	for (i = 0; i < 32; i++) {
1208 		afe_miiwritebit(afep, 1);
1209 	}
1210 
1211 	/* send the start code - 01b */
1212 	afe_miiwritebit(afep, 0);
1213 	afe_miiwritebit(afep, 1);
1214 
1215 	/* send the opcode for read, - 10b */
1216 	afe_miiwritebit(afep, 1);
1217 	afe_miiwritebit(afep, 0);
1218 
1219 	/* next we send the 5 bit phy address */
1220 	for (i = 0x10; i > 0; i >>= 1) {
1221 		afe_miiwritebit(afep, (phy & i) ? 1 : 0);
1222 	}
1223 
1224 	/* the 5 bit register address goes next */
1225 	for (i = 0x10; i > 0; i >>= 1) {
1226 		afe_miiwritebit(afep, (reg & i) ? 1 : 0);
1227 	}
1228 
1229 	/* turnaround - tristate followed by logic 0 */
1230 	afe_miitristate(afep);
1231 	afe_miiwritebit(afep, 0);
1232 
1233 	/* read the 16 bit register value */
1234 	for (i = 0x8000; i > 0; i >>= 1) {
1235 		value <<= 1;
1236 		value |= afe_miireadbit(afep);
1237 	}
1238 	afe_miitristate(afep);
1239 	return (value);
1240 }
1241 
1242 uint16_t
1243 afe_miireadcomet(afe_t *afep, uint8_t phy, uint8_t reg)
1244 {
1245 	if (phy != 1) {
1246 		return (0xffff);
1247 	}
1248 	switch (reg) {
1249 	case MII_CONTROL:
1250 		reg = CSR_BMCR;
1251 		break;
1252 	case MII_STATUS:
1253 		reg = CSR_BMSR;
1254 		break;
1255 	case MII_PHYIDH:
1256 		reg = CSR_PHYIDR1;
1257 		break;
1258 	case MII_PHYIDL:
1259 		reg = CSR_PHYIDR2;
1260 		break;
1261 	case MII_AN_ADVERT:
1262 		reg = CSR_ANAR;
1263 		break;
1264 	case MII_AN_LPABLE:
1265 		reg = CSR_ANLPAR;
1266 		break;
1267 	case MII_AN_EXPANSION:
1268 		reg = CSR_ANER;
1269 		break;
1270 	default:
1271 		return (0);
1272 	}
1273 	return (GETCSR16(afep, reg) & 0xFFFF);
1274 }
1275 
1276 void
1277 afe_mii_write(void *arg, uint8_t phy, uint8_t reg, uint16_t val)
1278 {
1279 	afe_t	*afep = arg;
1280 
1281 	/*
1282 	 * ADMtek bugs ignore address decode bits -- they only
1283 	 * support PHY at 1.
1284 	 */
1285 	if (phy != 1) {
1286 		return;
1287 	}
1288 	switch (AFE_MODEL(afep)) {
1289 	case MODEL_COMET:
1290 		afe_miiwritecomet(afep, phy, reg, val);
1291 		break;
1292 	case MODEL_CENTAUR:
1293 		afe_miiwritegeneral(afep, phy, reg, val);
1294 		break;
1295 	}
1296 }
1297 
1298 void
1299 afe_miiwritegeneral(afe_t *afep, uint8_t phy, uint8_t reg, uint16_t val)
1300 {
1301 	int i;
1302 
1303 	/* send the 32 bit preamble */
1304 	for (i = 0; i < 32; i++) {
1305 		afe_miiwritebit(afep, 1);
1306 	}
1307 
1308 	/* send the start code - 01b */
1309 	afe_miiwritebit(afep, 0);
1310 	afe_miiwritebit(afep, 1);
1311 
1312 	/* send the opcode for write, - 01b */
1313 	afe_miiwritebit(afep, 0);
1314 	afe_miiwritebit(afep, 1);
1315 
1316 	/* next we send the 5 bit phy address */
1317 	for (i = 0x10; i > 0; i >>= 1) {
1318 		afe_miiwritebit(afep, (phy & i) ? 1 : 0);
1319 	}
1320 
1321 	/* the 5 bit register address goes next */
1322 	for (i = 0x10; i > 0; i >>= 1) {
1323 		afe_miiwritebit(afep, (reg & i) ? 1 : 0);
1324 	}
1325 
1326 	/* turnaround - 1 bit followed by logic 0 */
1327 	afe_miiwritebit(afep, 1);
1328 	afe_miiwritebit(afep, 0);
1329 
1330 	/* now write out our data (16 bits) */
1331 	for (i = 0x8000; i > 0; i >>= 1) {
1332 		afe_miiwritebit(afep, (val & i) ? 1 : 0);
1333 	}
1334 
1335 	/* idle mode */
1336 	afe_miitristate(afep);
1337 }
1338 
1339 void
1340 afe_miiwritecomet(afe_t *afep, uint8_t phy, uint8_t reg, uint16_t val)
1341 {
1342 	if (phy != 1) {
1343 		return;
1344 	}
1345 	switch (reg) {
1346 	case MII_CONTROL:
1347 		reg = CSR_BMCR;
1348 		break;
1349 	case MII_STATUS:
1350 		reg = CSR_BMSR;
1351 		break;
1352 	case MII_PHYIDH:
1353 		reg = CSR_PHYIDR1;
1354 		break;
1355 	case MII_PHYIDL:
1356 		reg = CSR_PHYIDR2;
1357 		break;
1358 	case MII_AN_ADVERT:
1359 		reg = CSR_ANAR;
1360 		break;
1361 	case MII_AN_LPABLE:
1362 		reg = CSR_ANLPAR;
1363 		break;
1364 	case MII_AN_EXPANSION:
1365 		reg = CSR_ANER;
1366 		break;
1367 	default:
1368 		return;
1369 	}
1370 	PUTCSR16(afep, reg, val);
1371 }
1372 
1373 int
1374 afe_m_start(void *arg)
1375 {
1376 	afe_t	*afep = arg;
1377 
1378 	/* grab exclusive access to the card */
1379 	mutex_enter(&afep->afe_intrlock);
1380 	mutex_enter(&afep->afe_xmtlock);
1381 
1382 	afe_startall(afep);
1383 	afep->afe_flags |= AFE_RUNNING;
1384 
1385 	mutex_exit(&afep->afe_xmtlock);
1386 	mutex_exit(&afep->afe_intrlock);
1387 
1388 	mii_start(afep->afe_mii);
1389 
1390 	return (0);
1391 }
1392 
1393 void
1394 afe_m_stop(void *arg)
1395 {
1396 	afe_t	*afep = arg;
1397 
1398 	mii_stop(afep->afe_mii);
1399 
1400 	/* exclusive access to the hardware! */
1401 	mutex_enter(&afep->afe_intrlock);
1402 	mutex_enter(&afep->afe_xmtlock);
1403 
1404 	afe_stopall(afep);
1405 	afep->afe_flags &= ~AFE_RUNNING;
1406 
1407 	mutex_exit(&afep->afe_xmtlock);
1408 	mutex_exit(&afep->afe_intrlock);
1409 }
1410 
1411 void
1412 afe_startmac(afe_t *afep)
1413 {
1414 	/* verify exclusive access to the card */
1415 	ASSERT(mutex_owned(&afep->afe_intrlock));
1416 	ASSERT(mutex_owned(&afep->afe_xmtlock));
1417 
1418 	/* start the card */
1419 	SETBIT(afep, CSR_NAR, NAR_TX_ENABLE | NAR_RX_ENABLE);
1420 
1421 	if (afep->afe_txavail != AFE_TXRING)
1422 		PUTCSR(afep, CSR_TDR, 0);
1423 
1424 	/* tell the mac that we are ready to go! */
1425 	if (afep->afe_flags & AFE_RUNNING)
1426 		mac_tx_update(afep->afe_mh);
1427 
1428 	/* start watchdog timer */
1429 	PUTCSR(afep, CSR_TIMER, TIMER_LOOP |
1430 	    (AFE_WDOGTIMER * 1000 / TIMER_USEC));
1431 }
1432 
1433 void
1434 afe_stopmac(afe_t *afep)
1435 {
1436 	int		i;
1437 
1438 	/* exclusive access to the hardware! */
1439 	ASSERT(mutex_owned(&afep->afe_intrlock));
1440 	ASSERT(mutex_owned(&afep->afe_xmtlock));
1441 
1442 	CLRBIT(afep, CSR_NAR, NAR_TX_ENABLE | NAR_RX_ENABLE);
1443 
1444 	/*
1445 	 * A 1518 byte frame at 10Mbps takes about 1.2 msec to drain.
1446 	 * We just add up to the nearest msec (2), which should be
1447 	 * plenty to complete.
1448 	 *
1449 	 * Note that some chips never seem to indicate the transition to
1450 	 * the stopped state properly.  Experience shows that we can safely
1451 	 * proceed anyway, after waiting the requisite timeout.
1452 	 */
1453 	for (i = 2000; i != 0; i -= 10) {
1454 		if ((GETCSR(afep, CSR_SR) & (SR_TX_STATE | SR_RX_STATE)) == 0)
1455 			break;
1456 		drv_usecwait(10);
1457 	}
1458 
1459 	/* prevent an interrupt */
1460 	PUTCSR(afep, CSR_SR2, INT_RXSTOPPED | INT_TXSTOPPED);
1461 
1462 	/* stop the watchdog timer */
1463 	PUTCSR(afep, CSR_TIMER, 0);
1464 }
1465 
1466 void
1467 afe_resetrings(afe_t *afep)
1468 {
1469 	int	i;
1470 
1471 	/* now we need to reset the pointers... */
1472 	PUTCSR(afep, CSR_RDB, 0);
1473 	PUTCSR(afep, CSR_TDB, 0);
1474 
1475 	/* reset the descriptor ring pointers */
1476 	afep->afe_rxhead = 0;
1477 	afep->afe_txreclaim = 0;
1478 	afep->afe_txsend = 0;
1479 	afep->afe_txavail = AFE_TXRING;
1480 
1481 	/* set up transmit descriptor ring */
1482 	for (i = 0; i < AFE_TXRING; i++) {
1483 		afe_desc_t	*tmdp = &afep->afe_txdescp[i];
1484 		unsigned	control = 0;
1485 		if (i == (AFE_TXRING - 1)) {
1486 			control |= TXCTL_ENDRING;
1487 		}
1488 		PUTTXDESC(afep, tmdp->desc_status, 0);
1489 		PUTTXDESC(afep, tmdp->desc_control, control);
1490 		PUTTXDESC(afep, tmdp->desc_buffer1, 0);
1491 		PUTTXDESC(afep, tmdp->desc_buffer2, 0);
1492 		SYNCTXDESC(afep, i, DDI_DMA_SYNC_FORDEV);
1493 	}
1494 	PUTCSR(afep, CSR_TDB, afep->afe_txdesc_paddr);
1495 
1496 	/* make the receive buffers available */
1497 	for (i = 0; i < AFE_RXRING; i++) {
1498 		afe_rxbuf_t	*rxb = afep->afe_rxbufs[i];
1499 		afe_desc_t	*rmdp = &afep->afe_rxdescp[i];
1500 		unsigned	control;
1501 
1502 		control = AFE_BUFSZ & RXCTL_BUFLEN1;
1503 		if (i == (AFE_RXRING - 1)) {
1504 			control |= RXCTL_ENDRING;
1505 		}
1506 		PUTRXDESC(afep, rmdp->desc_buffer1, rxb->rxb_paddr);
1507 		PUTRXDESC(afep, rmdp->desc_buffer2, 0);
1508 		PUTRXDESC(afep, rmdp->desc_control, control);
1509 		PUTRXDESC(afep, rmdp->desc_status, RXSTAT_OWN);
1510 		SYNCRXDESC(afep, i, DDI_DMA_SYNC_FORDEV);
1511 	}
1512 	PUTCSR(afep, CSR_RDB, afep->afe_rxdesc_paddr);
1513 }
1514 
1515 void
1516 afe_stopall(afe_t *afep)
1517 {
1518 	afe_disableinterrupts(afep);
1519 	afe_stopmac(afep);
1520 }
1521 
1522 void
1523 afe_startall(afe_t *afep)
1524 {
1525 	ASSERT(mutex_owned(&afep->afe_intrlock));
1526 	ASSERT(mutex_owned(&afep->afe_xmtlock));
1527 
1528 	/* make sure interrupts are disabled to begin */
1529 	afe_disableinterrupts(afep);
1530 
1531 	/* initialize the chip */
1532 	(void) afe_initialize(afep);
1533 
1534 	/* now we can enable interrupts */
1535 	afe_enableinterrupts(afep);
1536 
1537 	/* start up the mac */
1538 	afe_startmac(afep);
1539 }
1540 
1541 void
1542 afe_resetall(afe_t *afep)
1543 {
1544 	afe_stopall(afep);
1545 	afe_startall(afep);
1546 }
1547 
1548 afe_txbuf_t *
1549 afe_alloctxbuf(afe_t *afep)
1550 {
1551 	ddi_dma_cookie_t	dmac;
1552 	unsigned		ncookies;
1553 	afe_txbuf_t		*txb;
1554 	size_t			len;
1555 
1556 	txb = kmem_zalloc(sizeof (*txb), KM_SLEEP);
1557 
1558 	if (ddi_dma_alloc_handle(afep->afe_dip, &afe_dma_txattr,
1559 	    DDI_DMA_SLEEP, NULL, &txb->txb_dmah) != DDI_SUCCESS) {
1560 		return (NULL);
1561 	}
1562 
1563 	if (ddi_dma_mem_alloc(txb->txb_dmah, AFE_BUFSZ, &afe_bufattr,
1564 	    DDI_DMA_STREAMING, DDI_DMA_SLEEP, NULL, &txb->txb_buf, &len,
1565 	    &txb->txb_acch) != DDI_SUCCESS) {
1566 		return (NULL);
1567 	}
1568 	if (ddi_dma_addr_bind_handle(txb->txb_dmah, NULL, txb->txb_buf,
1569 	    len, DDI_DMA_WRITE | DDI_DMA_STREAMING, DDI_DMA_SLEEP, NULL,
1570 	    &dmac, &ncookies) != DDI_DMA_MAPPED) {
1571 		return (NULL);
1572 	}
1573 	txb->txb_paddr = dmac.dmac_address;
1574 
1575 	return (txb);
1576 }
1577 
1578 void
1579 afe_destroytxbuf(afe_txbuf_t *txb)
1580 {
1581 	if (txb != NULL) {
1582 		if (txb->txb_paddr)
1583 			(void) ddi_dma_unbind_handle(txb->txb_dmah);
1584 		if (txb->txb_acch)
1585 			ddi_dma_mem_free(&txb->txb_acch);
1586 		if (txb->txb_dmah)
1587 			ddi_dma_free_handle(&txb->txb_dmah);
1588 		kmem_free(txb, sizeof (*txb));
1589 	}
1590 }
1591 
1592 afe_rxbuf_t *
1593 afe_allocrxbuf(afe_t *afep)
1594 {
1595 	afe_rxbuf_t		*rxb;
1596 	size_t			len;
1597 	unsigned		ccnt;
1598 	ddi_dma_cookie_t	dmac;
1599 
1600 	rxb = kmem_zalloc(sizeof (*rxb), KM_SLEEP);
1601 
1602 	if (ddi_dma_alloc_handle(afep->afe_dip, &afe_dma_attr,
1603 	    DDI_DMA_SLEEP, NULL, &rxb->rxb_dmah) != DDI_SUCCESS) {
1604 		kmem_free(rxb, sizeof (*rxb));
1605 		return (NULL);
1606 	}
1607 	if (ddi_dma_mem_alloc(rxb->rxb_dmah, AFE_BUFSZ, &afe_bufattr,
1608 	    DDI_DMA_STREAMING, DDI_DMA_SLEEP, NULL, &rxb->rxb_buf, &len,
1609 	    &rxb->rxb_acch) != DDI_SUCCESS) {
1610 		ddi_dma_free_handle(&rxb->rxb_dmah);
1611 		kmem_free(rxb, sizeof (*rxb));
1612 		return (NULL);
1613 	}
1614 	if (ddi_dma_addr_bind_handle(rxb->rxb_dmah, NULL, rxb->rxb_buf, len,
1615 	    DDI_DMA_READ | DDI_DMA_STREAMING, DDI_DMA_SLEEP, NULL, &dmac,
1616 	    &ccnt) != DDI_DMA_MAPPED) {
1617 		ddi_dma_mem_free(&rxb->rxb_acch);
1618 		ddi_dma_free_handle(&rxb->rxb_dmah);
1619 		kmem_free(rxb, sizeof (*rxb));
1620 		return (NULL);
1621 	}
1622 	rxb->rxb_paddr = dmac.dmac_address;
1623 
1624 	return (rxb);
1625 }
1626 
1627 void
1628 afe_destroyrxbuf(afe_rxbuf_t *rxb)
1629 {
1630 	if (rxb) {
1631 		(void) ddi_dma_unbind_handle(rxb->rxb_dmah);
1632 		ddi_dma_mem_free(&rxb->rxb_acch);
1633 		ddi_dma_free_handle(&rxb->rxb_dmah);
1634 		kmem_free(rxb, sizeof (*rxb));
1635 	}
1636 }
1637 
1638 /*
1639  * Allocate receive resources.
1640  */
1641 int
1642 afe_allocrxring(afe_t *afep)
1643 {
1644 	int			rval;
1645 	int			i;
1646 	size_t			size;
1647 	size_t			len;
1648 	ddi_dma_cookie_t	dmac;
1649 	unsigned		ncookies;
1650 	caddr_t			kaddr;
1651 
1652 	size = AFE_RXRING * sizeof (afe_desc_t);
1653 
1654 	rval = ddi_dma_alloc_handle(afep->afe_dip, &afe_dma_attr,
1655 	    DDI_DMA_SLEEP, NULL, &afep->afe_rxdesc_dmah);
1656 	if (rval != DDI_SUCCESS) {
1657 		afe_error(afep->afe_dip,
1658 		    "unable to allocate DMA handle for rx descriptors");
1659 		return (DDI_FAILURE);
1660 	}
1661 
1662 	rval = ddi_dma_mem_alloc(afep->afe_rxdesc_dmah, size, &afe_devattr,
1663 	    DDI_DMA_CONSISTENT, DDI_DMA_SLEEP, NULL, &kaddr, &len,
1664 	    &afep->afe_rxdesc_acch);
1665 	if (rval != DDI_SUCCESS) {
1666 		afe_error(afep->afe_dip,
1667 		    "unable to allocate DMA memory for rx descriptors");
1668 		return (DDI_FAILURE);
1669 	}
1670 
1671 	rval = ddi_dma_addr_bind_handle(afep->afe_rxdesc_dmah, NULL, kaddr,
1672 	    size, DDI_DMA_RDWR | DDI_DMA_CONSISTENT, DDI_DMA_SLEEP, NULL,
1673 	    &dmac, &ncookies);
1674 	if (rval != DDI_DMA_MAPPED) {
1675 		afe_error(afep->afe_dip,
1676 		    "unable to bind DMA for rx descriptors");
1677 		return (DDI_FAILURE);
1678 	}
1679 
1680 	/* because of afe_dma_attr */
1681 	ASSERT(ncookies == 1);
1682 
1683 	/* we take the 32-bit physical address out of the cookie */
1684 	afep->afe_rxdesc_paddr = dmac.dmac_address;
1685 	afep->afe_rxdescp = (void *)kaddr;
1686 
1687 	/* allocate buffer pointers (not the buffers themselves, yet) */
1688 	afep->afe_rxbufs = kmem_zalloc(AFE_RXRING * sizeof (afe_rxbuf_t *),
1689 	    KM_SLEEP);
1690 
1691 	/* now allocate rx buffers */
1692 	for (i = 0; i < AFE_RXRING; i++) {
1693 		afe_rxbuf_t *rxb = afe_allocrxbuf(afep);
1694 		if (rxb == NULL)
1695 			return (DDI_FAILURE);
1696 		afep->afe_rxbufs[i] = rxb;
1697 	}
1698 
1699 	return (DDI_SUCCESS);
1700 }
1701 
1702 /*
1703  * Allocate transmit resources.
1704  */
1705 int
1706 afe_alloctxring(afe_t *afep)
1707 {
1708 	int			rval;
1709 	int			i;
1710 	size_t			size;
1711 	size_t			len;
1712 	ddi_dma_cookie_t	dmac;
1713 	unsigned		ncookies;
1714 	caddr_t			kaddr;
1715 
1716 	size = AFE_TXRING * sizeof (afe_desc_t);
1717 
1718 	rval = ddi_dma_alloc_handle(afep->afe_dip, &afe_dma_attr,
1719 	    DDI_DMA_SLEEP, NULL, &afep->afe_txdesc_dmah);
1720 	if (rval != DDI_SUCCESS) {
1721 		afe_error(afep->afe_dip,
1722 		    "unable to allocate DMA handle for tx descriptors");
1723 		return (DDI_FAILURE);
1724 	}
1725 
1726 	rval = ddi_dma_mem_alloc(afep->afe_txdesc_dmah, size, &afe_devattr,
1727 	    DDI_DMA_CONSISTENT, DDI_DMA_SLEEP, NULL, &kaddr, &len,
1728 	    &afep->afe_txdesc_acch);
1729 	if (rval != DDI_SUCCESS) {
1730 		afe_error(afep->afe_dip,
1731 		    "unable to allocate DMA memory for tx descriptors");
1732 		return (DDI_FAILURE);
1733 	}
1734 
1735 	rval = ddi_dma_addr_bind_handle(afep->afe_txdesc_dmah, NULL, kaddr,
1736 	    size, DDI_DMA_RDWR | DDI_DMA_CONSISTENT, DDI_DMA_SLEEP, NULL,
1737 	    &dmac, &ncookies);
1738 	if (rval != DDI_DMA_MAPPED) {
1739 		afe_error(afep->afe_dip,
1740 		    "unable to bind DMA for tx descriptors");
1741 		return (DDI_FAILURE);
1742 	}
1743 
1744 	/* because of afe_dma_attr */
1745 	ASSERT(ncookies == 1);
1746 
1747 	/* we take the 32-bit physical address out of the cookie */
1748 	afep->afe_txdesc_paddr = dmac.dmac_address;
1749 	afep->afe_txdescp = (void *)kaddr;
1750 
1751 	/* allocate buffer pointers (not the buffers themselves, yet) */
1752 	afep->afe_txbufs = kmem_zalloc(AFE_TXRING * sizeof (afe_txbuf_t *),
1753 	    KM_SLEEP);
1754 
1755 	/* now allocate tx buffers */
1756 	for (i = 0; i < AFE_TXRING; i++) {
1757 		afe_txbuf_t *txb = afe_alloctxbuf(afep);
1758 		if (txb == NULL)
1759 			return (DDI_FAILURE);
1760 		afep->afe_txbufs[i] = txb;
1761 	}
1762 
1763 	return (DDI_SUCCESS);
1764 }
1765 
1766 void
1767 afe_freerxring(afe_t *afep)
1768 {
1769 	int		i;
1770 
1771 	for (i = 0; i < AFE_RXRING; i++) {
1772 		afe_destroyrxbuf(afep->afe_rxbufs[i]);
1773 	}
1774 
1775 	if (afep->afe_rxbufs) {
1776 		kmem_free(afep->afe_rxbufs,
1777 		    AFE_RXRING * sizeof (afe_rxbuf_t *));
1778 	}
1779 
1780 	if (afep->afe_rxdesc_paddr)
1781 		(void) ddi_dma_unbind_handle(afep->afe_rxdesc_dmah);
1782 	if (afep->afe_rxdesc_acch)
1783 		ddi_dma_mem_free(&afep->afe_rxdesc_acch);
1784 	if (afep->afe_rxdesc_dmah)
1785 		ddi_dma_free_handle(&afep->afe_rxdesc_dmah);
1786 }
1787 
1788 void
1789 afe_freetxring(afe_t *afep)
1790 {
1791 	int			i;
1792 
1793 	for (i = 0; i < AFE_TXRING; i++) {
1794 		afe_destroytxbuf(afep->afe_txbufs[i]);
1795 	}
1796 
1797 	if (afep->afe_txbufs) {
1798 		kmem_free(afep->afe_txbufs,
1799 		    AFE_TXRING * sizeof (afe_txbuf_t *));
1800 	}
1801 	if (afep->afe_txdesc_paddr)
1802 		(void) ddi_dma_unbind_handle(afep->afe_txdesc_dmah);
1803 	if (afep->afe_txdesc_acch)
1804 		ddi_dma_mem_free(&afep->afe_txdesc_acch);
1805 	if (afep->afe_txdesc_dmah)
1806 		ddi_dma_free_handle(&afep->afe_txdesc_dmah);
1807 }
1808 
1809 /*
1810  * Interrupt service routine.
1811  */
1812 unsigned
1813 afe_intr(caddr_t arg)
1814 {
1815 	afe_t		*afep = (void *)arg;
1816 	uint32_t	status;
1817 	mblk_t		*mp = NULL;
1818 	boolean_t	doreset = B_FALSE;
1819 
1820 	mutex_enter(&afep->afe_intrlock);
1821 
1822 	if (afep->afe_flags & AFE_SUSPENDED) {
1823 		/* we cannot receive interrupts! */
1824 		mutex_exit(&afep->afe_intrlock);
1825 		return (DDI_INTR_UNCLAIMED);
1826 	}
1827 
1828 	/* check interrupt status bits, did we interrupt? */
1829 	status = GETCSR(afep, CSR_SR2) & INT_ALL;
1830 
1831 	if (status == 0) {
1832 		KIOIP->intrs[KSTAT_INTR_SPURIOUS]++;
1833 		mutex_exit(&afep->afe_intrlock);
1834 		return (DDI_INTR_UNCLAIMED);
1835 	}
1836 	/* ack the interrupt */
1837 	PUTCSR(afep, CSR_SR2, status);
1838 	KIOIP->intrs[KSTAT_INTR_HARD]++;
1839 
1840 	if (!(afep->afe_flags & AFE_RUNNING)) {
1841 		/* not running, don't touch anything */
1842 		mutex_exit(&afep->afe_intrlock);
1843 		return (DDI_INTR_CLAIMED);
1844 	}
1845 
1846 	if (status & (INT_RXOK|INT_RXNOBUF)) {
1847 		/* receive packets */
1848 		mp = afe_receive(afep);
1849 		if (status & INT_RXNOBUF)
1850 			PUTCSR(afep, CSR_RDR, 0);	/* wake up chip */
1851 	}
1852 
1853 	if (status & INT_TXOK) {
1854 		/* transmit completed */
1855 		mutex_enter(&afep->afe_xmtlock);
1856 		afe_reclaim(afep);
1857 		mutex_exit(&afep->afe_xmtlock);
1858 	}
1859 
1860 	if ((status & INT_TIMER) && (afe_watchdog(afep) != DDI_SUCCESS)) {
1861 		doreset = B_TRUE;
1862 	}
1863 
1864 	if (status & (INT_RXSTOPPED|INT_TXSTOPPED|
1865 	    INT_RXJABBER|INT_TXJABBER|INT_TXUNDERFLOW)) {
1866 
1867 		if (status & (INT_RXJABBER | INT_TXJABBER)) {
1868 			afep->afe_jabber++;
1869 		}
1870 		doreset = B_TRUE;
1871 	}
1872 
1873 	if (status & INT_BUSERR) {
1874 		switch (GETCSR(afep, CSR_SR) & SR_BERR_TYPE) {
1875 		case SR_BERR_PARITY:
1876 			afe_error(afep->afe_dip, "PCI parity error");
1877 			break;
1878 		case SR_BERR_TARGET_ABORT:
1879 			afe_error(afep->afe_dip, "PCI target abort");
1880 			break;
1881 		case SR_BERR_MASTER_ABORT:
1882 			afe_error(afep->afe_dip, "PCI master abort");
1883 			break;
1884 		default:
1885 			afe_error(afep->afe_dip, "Unknown PCI error");
1886 			break;
1887 		}
1888 
1889 		/* reset the chip in an attempt to fix things */
1890 		doreset = B_TRUE;
1891 	}
1892 
1893 
1894 	if (doreset) {
1895 		mutex_enter(&afep->afe_xmtlock);
1896 		afe_resetall(afep);
1897 		mutex_exit(&afep->afe_xmtlock);
1898 		mutex_exit(&afep->afe_intrlock);
1899 
1900 		mii_reset(afep->afe_mii);
1901 	} else {
1902 		mutex_exit(&afep->afe_intrlock);
1903 	}
1904 
1905 	if (status & INT_LINKCHG) {
1906 		mii_check(afep->afe_mii);
1907 	}
1908 
1909 	/*
1910 	 * Send up packets.  We do this outside of the intrlock.
1911 	 */
1912 	if (mp) {
1913 		mac_rx(afep->afe_mh, NULL, mp);
1914 	}
1915 
1916 	return (DDI_INTR_CLAIMED);
1917 }
1918 
1919 void
1920 afe_enableinterrupts(afe_t *afep)
1921 {
1922 	unsigned mask = INT_WANTED;
1923 
1924 	if (afep->afe_wantw)
1925 		mask |= INT_TXOK;
1926 
1927 	PUTCSR(afep, CSR_IER2, mask);
1928 
1929 	if (AFE_MODEL(afep) == MODEL_COMET) {
1930 		/*
1931 		 * On the Comet, this is the internal transceiver
1932 		 * interrupt.  We program the Comet's built-in PHY to
1933 		 * enable certain interrupts.
1934 		 */
1935 		PUTCSR16(afep, CSR_XIE, XIE_LDE | XIE_ANCE);
1936 	}
1937 }
1938 
1939 void
1940 afe_disableinterrupts(afe_t *afep)
1941 {
1942 	/* disable further interrupts */
1943 	PUTCSR(afep, CSR_IER2, INT_NONE);
1944 
1945 	/* clear any pending interrupts */
1946 	PUTCSR(afep, CSR_SR2, INT_ALL);
1947 }
1948 
1949 boolean_t
1950 afe_send(afe_t *afep, mblk_t *mp)
1951 {
1952 	size_t			len;
1953 	afe_txbuf_t		*txb;
1954 	afe_desc_t		*tmd;
1955 	uint32_t		control;
1956 	int			txsend;
1957 
1958 	ASSERT(mutex_owned(&afep->afe_xmtlock));
1959 	ASSERT(mp != NULL);
1960 
1961 	len = msgsize(mp);
1962 	if (len > ETHERVLANMTU) {
1963 		afep->afe_macxmt_errors++;
1964 		freemsg(mp);
1965 		return (B_TRUE);
1966 	}
1967 
1968 	if (afep->afe_txavail < AFE_TXRECLAIM)
1969 		afe_reclaim(afep);
1970 
1971 	if (afep->afe_txavail == 0) {
1972 		/* no more tmds */
1973 		afep->afe_wantw = B_TRUE;
1974 		/* enable TX interrupt */
1975 		afe_enableinterrupts(afep);
1976 		return (B_FALSE);
1977 	}
1978 
1979 	txsend = afep->afe_txsend;
1980 
1981 	/*
1982 	 * For simplicity, we just do a copy into a preallocated
1983 	 * DMA buffer.
1984 	 */
1985 
1986 	txb = afep->afe_txbufs[txsend];
1987 	mcopymsg(mp, txb->txb_buf);	/* frees mp! */
1988 
1989 	/*
1990 	 * Statistics.
1991 	 */
1992 	afep->afe_opackets++;
1993 	afep->afe_obytes += len;
1994 	if (txb->txb_buf[0] & 0x1) {
1995 		if (bcmp(txb->txb_buf, afe_broadcast, ETHERADDRL) != 0)
1996 			afep->afe_multixmt++;
1997 		else
1998 			afep->afe_brdcstxmt++;
1999 	}
2000 
2001 	/* note len is already known to be a small unsigned */
2002 	control = len | TXCTL_FIRST | TXCTL_LAST | TXCTL_INTCMPLTE;
2003 
2004 	if (txsend == (AFE_TXRING - 1))
2005 		control |= TXCTL_ENDRING;
2006 
2007 	tmd = &afep->afe_txdescp[txsend];
2008 
2009 	SYNCTXBUF(txb, len, DDI_DMA_SYNC_FORDEV);
2010 	PUTTXDESC(afep, tmd->desc_control, control);
2011 	PUTTXDESC(afep, tmd->desc_buffer1, txb->txb_paddr);
2012 	PUTTXDESC(afep, tmd->desc_buffer2, 0);
2013 	PUTTXDESC(afep, tmd->desc_status, TXSTAT_OWN);
2014 	/* sync the descriptor out to the device */
2015 	SYNCTXDESC(afep, txsend, DDI_DMA_SYNC_FORDEV);
2016 
2017 	/*
2018 	 * Note the new values of txavail and txsend.
2019 	 */
2020 	afep->afe_txavail--;
2021 	afep->afe_txsend = (txsend + 1) % AFE_TXRING;
2022 
2023 	/*
2024 	 * It should never, ever take more than 5 seconds to drain
2025 	 * the ring.  If it happens, then we are stuck!
2026 	 */
2027 	afep->afe_txstall_time = gethrtime() + (5 * 1000000000ULL);
2028 
2029 	/*
2030 	 * wake up the chip ... inside the lock to protect against DR suspend,
2031 	 * etc.
2032 	 */
2033 	PUTCSR(afep, CSR_TDR, 0);
2034 
2035 	return (B_TRUE);
2036 }
2037 
2038 /*
2039  * Reclaim buffers that have completed transmission.
2040  */
2041 void
2042 afe_reclaim(afe_t *afep)
2043 {
2044 	afe_desc_t	*tmdp;
2045 
2046 	while (afep->afe_txavail != AFE_TXRING) {
2047 		uint32_t	status;
2048 		uint32_t	control;
2049 		int		index = afep->afe_txreclaim;
2050 
2051 		tmdp = &afep->afe_txdescp[index];
2052 
2053 		/* sync it before we read it */
2054 		SYNCTXDESC(afep, index, DDI_DMA_SYNC_FORKERNEL);
2055 
2056 		control = GETTXDESC(afep, tmdp->desc_control);
2057 		status = GETTXDESC(afep, tmdp->desc_status);
2058 
2059 		if (status & TXSTAT_OWN) {
2060 			/* chip is still working on it, we're done */
2061 			break;
2062 		}
2063 
2064 		afep->afe_txavail++;
2065 		afep->afe_txreclaim = (index + 1) % AFE_TXRING;
2066 
2067 		/* in the most common successful case, all bits are clear */
2068 		if (status == 0)
2069 			continue;
2070 
2071 		if ((control & TXCTL_LAST) == 0)
2072 			continue;
2073 
2074 		if (status & TXSTAT_TXERR) {
2075 			afep->afe_errxmt++;
2076 
2077 			if (status & TXSTAT_JABBER) {
2078 				/* transmit jabber timeout */
2079 				afep->afe_macxmt_errors++;
2080 			}
2081 			if (status &
2082 			    (TXSTAT_CARRLOST | TXSTAT_NOCARR)) {
2083 				afep->afe_carrier_errors++;
2084 			}
2085 			if (status & TXSTAT_UFLOW) {
2086 				afep->afe_underflow++;
2087 			}
2088 			if (status & TXSTAT_LATECOL) {
2089 				afep->afe_tx_late_collisions++;
2090 			}
2091 			if (status & TXSTAT_EXCOLL) {
2092 				afep->afe_ex_collisions++;
2093 				afep->afe_collisions += 16;
2094 			}
2095 		}
2096 
2097 		if (status & TXSTAT_DEFER) {
2098 			afep->afe_defer_xmts++;
2099 		}
2100 
2101 		/* collision counting */
2102 		if (TXCOLLCNT(status) == 1) {
2103 			afep->afe_collisions++;
2104 			afep->afe_first_collisions++;
2105 		} else if (TXCOLLCNT(status)) {
2106 			afep->afe_collisions += TXCOLLCNT(status);
2107 			afep->afe_multi_collisions += TXCOLLCNT(status);
2108 		}
2109 	}
2110 
2111 	if (afep->afe_txavail >= AFE_TXRESCHED) {
2112 		if (afep->afe_wantw) {
2113 			/*
2114 			 * we were able to reclaim some packets, so
2115 			 * disable tx interrupts
2116 			 */
2117 			afep->afe_wantw = B_FALSE;
2118 			afe_enableinterrupts(afep);
2119 			mac_tx_update(afep->afe_mh);
2120 		}
2121 	}
2122 }
2123 
2124 mblk_t *
2125 afe_receive(afe_t *afep)
2126 {
2127 	unsigned		len;
2128 	afe_rxbuf_t		*rxb;
2129 	afe_desc_t		*rmd;
2130 	uint32_t		status;
2131 	mblk_t			*mpchain, **mpp, *mp;
2132 	int			head, cnt;
2133 
2134 	mpchain = NULL;
2135 	mpp = &mpchain;
2136 	head = afep->afe_rxhead;
2137 
2138 	/* limit the number of packets we process to a half ring size */
2139 	for (cnt = 0; cnt < AFE_RXRING / 2; cnt++) {
2140 
2141 		rmd = &afep->afe_rxdescp[head];
2142 		rxb = afep->afe_rxbufs[head];
2143 
2144 		SYNCRXDESC(afep, head, DDI_DMA_SYNC_FORKERNEL);
2145 		status = GETRXDESC(afep, rmd->desc_status);
2146 		if (status & RXSTAT_OWN) {
2147 			/* chip is still chewing on it */
2148 			break;
2149 		}
2150 
2151 		/* discard the ethernet frame checksum */
2152 		len = RXLENGTH(status) - ETHERFCSL;
2153 
2154 		if ((status & (RXSTAT_ERRS | RXSTAT_FIRST | RXSTAT_LAST)) !=
2155 		    (RXSTAT_FIRST | RXSTAT_LAST)) {
2156 
2157 			afep->afe_errrcv++;
2158 
2159 			/*
2160 			 * Abnormal status bits detected, analyze further.
2161 			 */
2162 			if ((status & (RXSTAT_LAST|RXSTAT_FIRST)) !=
2163 			    (RXSTAT_LAST|RXSTAT_FIRST)) {
2164 
2165 				if (status & RXSTAT_FIRST) {
2166 					afep->afe_toolong_errors++;
2167 				}
2168 			} else if (status & RXSTAT_DESCERR) {
2169 				afep->afe_macrcv_errors++;
2170 
2171 			} else if (status & RXSTAT_RUNT) {
2172 				afep->afe_runt++;
2173 
2174 			} else if (status & RXSTAT_COLLSEEN) {
2175 				/* this should really be rx_late_collisions */
2176 				afep->afe_macrcv_errors++;
2177 
2178 			} else if (status & RXSTAT_DRIBBLE) {
2179 				afep->afe_align_errors++;
2180 
2181 			} else if (status & RXSTAT_CRCERR) {
2182 				afep->afe_fcs_errors++;
2183 
2184 			} else if (status & RXSTAT_OFLOW) {
2185 				afep->afe_overflow++;
2186 			}
2187 		}
2188 
2189 		else if (len > ETHERVLANMTU) {
2190 			afep->afe_errrcv++;
2191 			afep->afe_toolong_errors++;
2192 		}
2193 
2194 		/*
2195 		 * At this point, the chip thinks the packet is OK.
2196 		 */
2197 		else {
2198 			mp = allocb(len + AFE_HEADROOM, 0);
2199 			if (mp == NULL) {
2200 				afep->afe_errrcv++;
2201 				afep->afe_norcvbuf++;
2202 				goto skip;
2203 			}
2204 
2205 			/* sync the buffer before we look at it */
2206 			SYNCRXBUF(rxb, len, DDI_DMA_SYNC_FORKERNEL);
2207 			mp->b_rptr += AFE_HEADROOM;
2208 			mp->b_wptr = mp->b_rptr + len;
2209 			bcopy((char *)rxb->rxb_buf, mp->b_rptr, len);
2210 
2211 			afep->afe_ipackets++;
2212 			afep->afe_rbytes += len;
2213 			if (status & RXSTAT_GROUP) {
2214 				if (bcmp(mp->b_rptr, afe_broadcast,
2215 				    ETHERADDRL) == 0)
2216 					afep->afe_brdcstrcv++;
2217 				else
2218 					afep->afe_multircv++;
2219 			}
2220 			*mpp = mp;
2221 			mpp = &mp->b_next;
2222 		}
2223 
2224 skip:
2225 		/* return ring entry to the hardware */
2226 		PUTRXDESC(afep, rmd->desc_status, RXSTAT_OWN);
2227 		SYNCRXDESC(afep, head, DDI_DMA_SYNC_FORDEV);
2228 
2229 		/* advance to next RMD */
2230 		head = (head + 1) % AFE_RXRING;
2231 	}
2232 
2233 	afep->afe_rxhead = head;
2234 
2235 	return (mpchain);
2236 }
2237 
2238 int
2239 afe_m_stat(void *arg, uint_t stat, uint64_t *val)
2240 {
2241 	afe_t	*afep = arg;
2242 
2243 	mutex_enter(&afep->afe_xmtlock);
2244 	if ((afep->afe_flags & (AFE_RUNNING|AFE_SUSPENDED)) == AFE_RUNNING)
2245 		afe_reclaim(afep);
2246 	mutex_exit(&afep->afe_xmtlock);
2247 
2248 	if (mii_m_getstat(afep->afe_mii, stat, val) == 0) {
2249 		return (0);
2250 	}
2251 	switch (stat) {
2252 	case MAC_STAT_MULTIRCV:
2253 		*val = afep->afe_multircv;
2254 		break;
2255 
2256 	case MAC_STAT_BRDCSTRCV:
2257 		*val = afep->afe_brdcstrcv;
2258 		break;
2259 
2260 	case MAC_STAT_MULTIXMT:
2261 		*val = afep->afe_multixmt;
2262 		break;
2263 
2264 	case MAC_STAT_BRDCSTXMT:
2265 		*val = afep->afe_brdcstxmt;
2266 		break;
2267 
2268 	case MAC_STAT_IPACKETS:
2269 		*val = afep->afe_ipackets;
2270 		break;
2271 
2272 	case MAC_STAT_RBYTES:
2273 		*val = afep->afe_rbytes;
2274 		break;
2275 
2276 	case MAC_STAT_OPACKETS:
2277 		*val = afep->afe_opackets;
2278 		break;
2279 
2280 	case MAC_STAT_OBYTES:
2281 		*val = afep->afe_obytes;
2282 		break;
2283 
2284 	case MAC_STAT_NORCVBUF:
2285 		*val = afep->afe_norcvbuf;
2286 		break;
2287 
2288 	case MAC_STAT_NOXMTBUF:
2289 		*val = 0;
2290 		break;
2291 
2292 	case MAC_STAT_COLLISIONS:
2293 		*val = afep->afe_collisions;
2294 		break;
2295 
2296 	case MAC_STAT_IERRORS:
2297 		*val = afep->afe_errrcv;
2298 		break;
2299 
2300 	case MAC_STAT_OERRORS:
2301 		*val = afep->afe_errxmt;
2302 		break;
2303 
2304 	case ETHER_STAT_ALIGN_ERRORS:
2305 		*val = afep->afe_align_errors;
2306 		break;
2307 
2308 	case ETHER_STAT_FCS_ERRORS:
2309 		*val = afep->afe_fcs_errors;
2310 		break;
2311 
2312 	case ETHER_STAT_SQE_ERRORS:
2313 		*val = afep->afe_sqe_errors;
2314 		break;
2315 
2316 	case ETHER_STAT_DEFER_XMTS:
2317 		*val = afep->afe_defer_xmts;
2318 		break;
2319 
2320 	case ETHER_STAT_FIRST_COLLISIONS:
2321 		*val = afep->afe_first_collisions;
2322 		break;
2323 
2324 	case ETHER_STAT_MULTI_COLLISIONS:
2325 		*val = afep->afe_multi_collisions;
2326 		break;
2327 
2328 	case ETHER_STAT_TX_LATE_COLLISIONS:
2329 		*val = afep->afe_tx_late_collisions;
2330 		break;
2331 
2332 	case ETHER_STAT_EX_COLLISIONS:
2333 		*val = afep->afe_ex_collisions;
2334 		break;
2335 
2336 	case ETHER_STAT_MACXMT_ERRORS:
2337 		*val = afep->afe_macxmt_errors;
2338 		break;
2339 
2340 	case ETHER_STAT_CARRIER_ERRORS:
2341 		*val = afep->afe_carrier_errors;
2342 		break;
2343 
2344 	case ETHER_STAT_TOOLONG_ERRORS:
2345 		*val = afep->afe_toolong_errors;
2346 		break;
2347 
2348 	case ETHER_STAT_MACRCV_ERRORS:
2349 		*val = afep->afe_macrcv_errors;
2350 		break;
2351 
2352 	case MAC_STAT_OVERFLOWS:
2353 		*val = afep->afe_overflow;
2354 		break;
2355 
2356 	case MAC_STAT_UNDERFLOWS:
2357 		*val = afep->afe_underflow;
2358 		break;
2359 
2360 	case ETHER_STAT_TOOSHORT_ERRORS:
2361 		*val = afep->afe_runt;
2362 		break;
2363 
2364 	case ETHER_STAT_JABBER_ERRORS:
2365 		*val = afep->afe_jabber;
2366 		break;
2367 
2368 	default:
2369 		return (ENOTSUP);
2370 	}
2371 	return (0);
2372 }
2373 
2374 int
2375 afe_m_getprop(void *arg, const char *name, mac_prop_id_t num, uint_t flags,
2376     uint_t sz, void *val, uint_t *perm)
2377 {
2378 	afe_t		*afep = arg;
2379 
2380 	return (mii_m_getprop(afep->afe_mii, name, num, flags, sz, val, perm));
2381 }
2382 
2383 int
2384 afe_m_setprop(void *arg, const char *name, mac_prop_id_t num, uint_t sz,
2385     const void *val)
2386 {
2387 	afe_t		*afep = arg;
2388 
2389 	return (mii_m_setprop(afep->afe_mii, name, num, sz, val));
2390 }
2391 
2392 /*
2393  * Debugging and error reporting.
2394  */
2395 void
2396 afe_error(dev_info_t *dip, char *fmt, ...)
2397 {
2398 	va_list	ap;
2399 	char	buf[256];
2400 
2401 	va_start(ap, fmt);
2402 	(void) vsnprintf(buf, sizeof (buf), fmt, ap);
2403 	va_end(ap);
2404 
2405 	if (dip) {
2406 		cmn_err(CE_WARN, "%s%d: %s",
2407 		    ddi_driver_name(dip), ddi_get_instance(dip), buf);
2408 	} else {
2409 		cmn_err(CE_WARN, "afe: %s", buf);
2410 	}
2411 }
2412