xref: /titanic_44/usr/src/uts/common/io/dmfe/dmfe_main.c (revision 2f0fcb93196badcdd803715656c809058d9f3114)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 /*
22  * Copyright 2008 Sun Microsystems, Inc.  All rights reserved.
23  * Use is subject to license terms.
24  */
25 
26 
27 #include <sys/types.h>
28 #include <sys/sunddi.h>
29 #include "dmfe_impl.h"
30 
31 /*
32  * This is the string displayed by modinfo, etc.
33  */
34 static char dmfe_ident[] = "Davicom DM9102 Ethernet";
35 
36 
37 /*
38  * NOTES:
39  *
40  * #defines:
41  *
42  *	DMFE_PCI_RNUMBER is the register-set number to use for the operating
43  *	registers.  On an OBP-based machine, regset 0 refers to CONFIG space,
44  *	regset 1 will be the operating registers in I/O space, and regset 2
45  *	will be the operating registers in MEMORY space (preferred).  If an
46  *	expansion ROM is fitted, it may appear as a further register set.
47  *
48  *	DMFE_SLOP defines the amount by which the chip may read beyond
49  *	the end of a buffer or descriptor, apparently 6-8 dwords :(
50  *	We have to make sure this doesn't cause it to access unallocated
51  *	or unmapped memory.
52  *
53  *	DMFE_BUF_SIZE must be at least (ETHERMAX + ETHERFCSL + DMFE_SLOP)
54  *	rounded up to a multiple of 4.  Here we choose a power of two for
55  *	speed & simplicity at the cost of a bit more memory.
56  *
57  *	However, the buffer length field in the TX/RX descriptors is only
58  *	eleven bits, so even though we allocate DMFE_BUF_SIZE (2048) bytes
59  *	per buffer, we tell the chip that they're only DMFE_BUF_SIZE_1
60  *	(2000) bytes each.
61  *
62  *	DMFE_DMA_MODE defines the mode (STREAMING/CONSISTENT) used for
63  *	the data buffers.  The descriptors are always set up in CONSISTENT
64  *	mode.
65  *
66  *	DMFE_HEADROOM defines how much space we'll leave in allocated
67  *	mblks before the first valid data byte.  This should be chosen
68  *	to be 2 modulo 4, so that once the ethernet header (14 bytes)
69  *	has been stripped off, the packet data will be 4-byte aligned.
70  *	The remaining space can be used by upstream modules to prepend
71  *	any headers required.
72  *
73  * Patchable globals:
74  *
75  *	dmfe_bus_modes: the bus mode bits to be put into CSR0.
76  *		Setting READ_MULTIPLE in this register seems to cause
77  *		the chip to generate a READ LINE command with a parity
78  *		error!  Don't do it!
79  *
80  *	dmfe_setup_desc1: the value to be put into descriptor word 1
81  *		when sending a SETUP packet.
82  *
83  *		Setting TX_LAST_DESC in desc1 in a setup packet seems
84  *		to make the chip spontaneously reset internally - it
85  *		attempts to give back the setup packet descriptor by
86  *		writing to PCI address 00000000 - which may or may not
87  *		get a MASTER ABORT - after which most of its registers
88  *		seem to have either default values or garbage!
89  *
90  *		TX_FIRST_DESC doesn't seem to have the same effect but
91  *		it isn't needed on a setup packet so we'll leave it out
92  *		too, just in case it has some other wierd side-effect.
93  *
94  *		The default hardware packet filtering mode is now
95  *		HASH_AND_PERFECT (imperfect filtering of multicast
96  *		packets and perfect filtering of unicast packets).
97  *		If this is found not to work reliably, setting the
98  *		TX_FILTER_TYPE1 bit will cause a switchover to using
99  *		HASH_ONLY mode (imperfect filtering of *all* packets).
100  *		Software will then perform the additional filtering
101  *		as required.
102  */
103 
104 #define	DMFE_PCI_RNUMBER	2
105 #define	DMFE_SLOP		(8*sizeof (uint32_t))
106 #define	DMFE_BUF_SIZE		2048
107 #define	DMFE_BUF_SIZE_1		2000
108 #define	DMFE_DMA_MODE		DDI_DMA_STREAMING
109 #define	DMFE_HEADROOM		34
110 
111 static uint32_t dmfe_bus_modes = TX_POLL_INTVL | CACHE_ALIGN;
112 static uint32_t dmfe_setup_desc1 = TX_SETUP_PACKET | SETUPBUF_SIZE |
113 					TX_FILTER_TYPE0;
114 
115 /*
116  * Some tunable parameters ...
117  *	Number of RX/TX ring entries (128/128)
118  *	Minimum number of TX ring slots to keep free (1)
119  *	Low-water mark at which to try to reclaim TX ring slots (1)
120  *	How often to take a TX-done interrupt (twice per ring cycle)
121  *	Whether to reclaim TX ring entries on a TX-done interrupt (no)
122  */
123 
124 #define	DMFE_TX_DESC		128	/* Should be a multiple of 4 <= 256 */
125 #define	DMFE_RX_DESC		128	/* Should be a multiple of 4 <= 256 */
126 
127 static uint32_t dmfe_rx_desc = DMFE_RX_DESC;
128 static uint32_t dmfe_tx_desc = DMFE_TX_DESC;
129 static uint32_t dmfe_tx_min_free = 1;
130 static uint32_t dmfe_tx_reclaim_level = 1;
131 static uint32_t dmfe_tx_int_factor = (DMFE_TX_DESC / 2) - 1;
132 static boolean_t dmfe_reclaim_on_done = B_FALSE;
133 
134 /*
135  * Time-related parameters:
136  *
137  *	We use a cyclic to provide a periodic callback; this is then used
138  * 	to check for TX-stall and poll the link status register.
139  *
140  *	DMFE_TICK is the interval between cyclic callbacks, in microseconds.
141  *
142  *	TX_STALL_TIME_100 is the timeout in microseconds between passing
143  *	a packet to the chip for transmission and seeing that it's gone,
144  *	when running at 100Mb/s.  If we haven't reclaimed at least one
145  *	descriptor in this time we assume the transmitter has stalled
146  *	and reset the chip.
147  *
148  *	TX_STALL_TIME_10 is the equivalent timeout when running at 10Mb/s.
149  *
150  *	LINK_POLL_TIME is the interval between checks on the link state
151  *	when nothing appears to have happened (this is in addition to the
152  *	case where we think we've detected a link change, and serves as a
153  *	backup in case the quick link check doesn't work properly).
154  *
155  * Patchable globals:
156  *
157  *	dmfe_tick_us:		DMFE_TICK
158  *	dmfe_tx100_stall_us:	TX_STALL_TIME_100
159  *	dmfe_tx10_stall_us:	TX_STALL_TIME_10
160  *	dmfe_link_poll_us:	LINK_POLL_TIME
161  *
162  * These are then used in _init() to calculate:
163  *
164  *	stall_100_tix[]: number of consecutive cyclic callbacks without a
165  *			 reclaim before the TX process is considered stalled,
166  *			 when running at 100Mb/s.  The elements are indexed
167  *			 by transmit-engine-state.
168  *	stall_10_tix[]:	 number of consecutive cyclic callbacks without a
169  *			 reclaim before the TX process is considered stalled,
170  *			 when running at 10Mb/s.  The elements are indexed
171  *			 by transmit-engine-state.
172  *	factotum_tix:	 number of consecutive cyclic callbacks before waking
173  *			 up the factotum even though there doesn't appear to
174  *			 be anything for it to do
175  */
176 
177 #define	DMFE_TICK		25000		/* microseconds		*/
178 #define	TX_STALL_TIME_100	50000		/* microseconds		*/
179 #define	TX_STALL_TIME_10	200000		/* microseconds		*/
180 #define	LINK_POLL_TIME		5000000		/* microseconds		*/
181 
182 static uint32_t dmfe_tick_us = DMFE_TICK;
183 static uint32_t dmfe_tx100_stall_us = TX_STALL_TIME_100;
184 static uint32_t dmfe_tx10_stall_us = TX_STALL_TIME_10;
185 static uint32_t dmfe_link_poll_us = LINK_POLL_TIME;
186 
187 /*
188  * Calculated from above in _init()
189  */
190 
191 static uint32_t stall_100_tix[TX_PROCESS_MAX_STATE+1];
192 static uint32_t stall_10_tix[TX_PROCESS_MAX_STATE+1];
193 static uint32_t factotum_tix;
194 static uint32_t factotum_fast_tix;
195 static uint32_t factotum_start_tix;
196 
197 /*
198  * Property names
199  */
200 static char localmac_propname[] = "local-mac-address";
201 static char opmode_propname[] = "opmode-reg-value";
202 static char debug_propname[] = "dmfe-debug-flags";
203 
204 static int		dmfe_m_start(void *);
205 static void		dmfe_m_stop(void *);
206 static int		dmfe_m_promisc(void *, boolean_t);
207 static int		dmfe_m_multicst(void *, boolean_t, const uint8_t *);
208 static int		dmfe_m_unicst(void *, const uint8_t *);
209 static void		dmfe_m_ioctl(void *, queue_t *, mblk_t *);
210 static boolean_t	dmfe_m_getcapab(void *, mac_capab_t, void *);
211 static mblk_t		*dmfe_m_tx(void *, mblk_t *);
212 static int 		dmfe_m_stat(void *, uint_t, uint64_t *);
213 
214 static mac_callbacks_t dmfe_m_callbacks = {
215 	(MC_IOCTL | MC_GETCAPAB),
216 	dmfe_m_stat,
217 	dmfe_m_start,
218 	dmfe_m_stop,
219 	dmfe_m_promisc,
220 	dmfe_m_multicst,
221 	dmfe_m_unicst,
222 	dmfe_m_tx,
223 	NULL,
224 	dmfe_m_ioctl,
225 	dmfe_m_getcapab,
226 };
227 
228 
229 /*
230  * Describes the chip's DMA engine
231  */
232 static ddi_dma_attr_t dma_attr = {
233 	DMA_ATTR_V0,		/* dma_attr version */
234 	0,			/* dma_attr_addr_lo */
235 	(uint32_t)0xFFFFFFFF,	/* dma_attr_addr_hi */
236 	0x0FFFFFF,		/* dma_attr_count_max */
237 	0x20,			/* dma_attr_align */
238 	0x7F,			/* dma_attr_burstsizes */
239 	1,			/* dma_attr_minxfer */
240 	(uint32_t)0xFFFFFFFF,	/* dma_attr_maxxfer */
241 	(uint32_t)0xFFFFFFFF,	/* dma_attr_seg */
242 	1,			/* dma_attr_sgllen */
243 	1,			/* dma_attr_granular */
244 	0			/* dma_attr_flags */
245 };
246 
247 /*
248  * DMA access attributes for registers and descriptors
249  */
250 static ddi_device_acc_attr_t dmfe_reg_accattr = {
251 	DDI_DEVICE_ATTR_V0,
252 	DDI_STRUCTURE_LE_ACC,
253 	DDI_STRICTORDER_ACC
254 };
255 
256 /*
257  * DMA access attributes for data: NOT to be byte swapped.
258  */
259 static ddi_device_acc_attr_t dmfe_data_accattr = {
260 	DDI_DEVICE_ATTR_V0,
261 	DDI_NEVERSWAP_ACC,
262 	DDI_STRICTORDER_ACC
263 };
264 
265 static uchar_t dmfe_broadcast_addr[ETHERADDRL] = {
266 	0xff, 0xff, 0xff, 0xff, 0xff, 0xff
267 };
268 
269 
270 /*
271  * ========== Lowest-level chip register & ring access routines ==========
272  */
273 
274 /*
275  * I/O register get/put routines
276  */
277 uint32_t
278 dmfe_chip_get32(dmfe_t *dmfep, off_t offset)
279 {
280 	uint32_t *addr;
281 
282 	addr = (void *)(dmfep->io_reg + offset);
283 	return (ddi_get32(dmfep->io_handle, addr));
284 }
285 
286 void
287 dmfe_chip_put32(dmfe_t *dmfep, off_t offset, uint32_t value)
288 {
289 	uint32_t *addr;
290 
291 	addr = (void *)(dmfep->io_reg + offset);
292 	ddi_put32(dmfep->io_handle, addr, value);
293 }
294 
295 /*
296  * TX/RX ring get/put routines
297  */
298 static uint32_t
299 dmfe_ring_get32(dma_area_t *dma_p, uint_t index, uint_t offset)
300 {
301 	uint32_t *addr;
302 
303 	addr = (void *)dma_p->mem_va;
304 	return (ddi_get32(dma_p->acc_hdl, addr + index*DESC_SIZE + offset));
305 }
306 
307 static void
308 dmfe_ring_put32(dma_area_t *dma_p, uint_t index, uint_t offset, uint32_t value)
309 {
310 	uint32_t *addr;
311 
312 	addr = (void *)dma_p->mem_va;
313 	ddi_put32(dma_p->acc_hdl, addr + index*DESC_SIZE + offset, value);
314 }
315 
316 /*
317  * Setup buffer get/put routines
318  */
319 static uint32_t
320 dmfe_setup_get32(dma_area_t *dma_p, uint_t index)
321 {
322 	uint32_t *addr;
323 
324 	addr = (void *)dma_p->setup_va;
325 	return (ddi_get32(dma_p->acc_hdl, addr + index));
326 }
327 
328 static void
329 dmfe_setup_put32(dma_area_t *dma_p, uint_t index, uint32_t value)
330 {
331 	uint32_t *addr;
332 
333 	addr = (void *)dma_p->setup_va;
334 	ddi_put32(dma_p->acc_hdl, addr + index, value);
335 }
336 
337 
338 /*
339  * ========== Low-level chip & ring buffer manipulation ==========
340  */
341 
342 #define	DMFE_DBG	DMFE_DBG_REGS	/* debug flag for this code	*/
343 
344 /*
345  * dmfe_set_opmode() -- function to set operating mode
346  */
347 static void
348 dmfe_set_opmode(dmfe_t *dmfep)
349 {
350 	DMFE_DEBUG(("dmfe_set_opmode: opmode 0x%x", dmfep->opmode));
351 
352 	ASSERT(mutex_owned(dmfep->oplock));
353 
354 	dmfe_chip_put32(dmfep, OPN_MODE_REG, dmfep->opmode);
355 	drv_usecwait(10);
356 }
357 
358 /*
359  * dmfe_stop_chip() -- stop all chip processing & optionally reset the h/w
360  */
361 static void
362 dmfe_stop_chip(dmfe_t *dmfep, enum chip_state newstate)
363 {
364 	ASSERT(mutex_owned(dmfep->oplock));
365 
366 	/*
367 	 * Stop the chip:
368 	 *	disable all interrupts
369 	 *	stop TX/RX processes
370 	 *	clear the status bits for TX/RX stopped
371 	 * If required, reset the chip
372 	 * Record the new state
373 	 */
374 	dmfe_chip_put32(dmfep, INT_MASK_REG, 0);
375 	dmfep->opmode &= ~(START_TRANSMIT | START_RECEIVE);
376 	dmfe_set_opmode(dmfep);
377 	dmfe_chip_put32(dmfep, STATUS_REG, TX_STOPPED_INT | RX_STOPPED_INT);
378 
379 	switch (newstate) {
380 	default:
381 		ASSERT(!"can't get here");
382 		return;
383 
384 	case CHIP_STOPPED:
385 	case CHIP_ERROR:
386 		break;
387 
388 	case CHIP_RESET:
389 		dmfe_chip_put32(dmfep, BUS_MODE_REG, SW_RESET);
390 		drv_usecwait(10);
391 		dmfe_chip_put32(dmfep, BUS_MODE_REG, 0);
392 		drv_usecwait(10);
393 		dmfe_chip_put32(dmfep, BUS_MODE_REG, dmfe_bus_modes);
394 		break;
395 	}
396 
397 	dmfep->chip_state = newstate;
398 }
399 
400 /*
401  * Initialize transmit and receive descriptor rings, and
402  * set the chip to point to the first entry in each ring
403  */
404 static void
405 dmfe_init_rings(dmfe_t *dmfep)
406 {
407 	dma_area_t *descp;
408 	uint32_t pstart;
409 	uint32_t pnext;
410 	uint32_t pbuff;
411 	uint32_t desc1;
412 	int i;
413 
414 	/*
415 	 * You need all the locks in order to rewrite the descriptor rings
416 	 */
417 	ASSERT(mutex_owned(dmfep->oplock));
418 	ASSERT(mutex_owned(dmfep->rxlock));
419 	ASSERT(mutex_owned(dmfep->txlock));
420 
421 	/*
422 	 * Program the RX ring entries
423 	 */
424 	descp = &dmfep->rx_desc;
425 	pstart = descp->mem_dvma;
426 	pnext = pstart + sizeof (struct rx_desc_type);
427 	pbuff = dmfep->rx_buff.mem_dvma;
428 	desc1 = RX_CHAINING | DMFE_BUF_SIZE_1;
429 
430 	for (i = 0; i < dmfep->rx.n_desc; ++i) {
431 		dmfe_ring_put32(descp, i, RD_NEXT, pnext);
432 		dmfe_ring_put32(descp, i, BUFFER1, pbuff);
433 		dmfe_ring_put32(descp, i, DESC1, desc1);
434 		dmfe_ring_put32(descp, i, DESC0, RX_OWN);
435 
436 		pnext += sizeof (struct rx_desc_type);
437 		pbuff += DMFE_BUF_SIZE;
438 	}
439 
440 	/*
441 	 * Fix up last entry & sync
442 	 */
443 	dmfe_ring_put32(descp, --i, RD_NEXT, pstart);
444 	DMA_SYNC(descp, DDI_DMA_SYNC_FORDEV);
445 	dmfep->rx.next_free = 0;
446 
447 	/*
448 	 * Set the base address of the RX descriptor list in CSR3
449 	 */
450 	DMFE_DEBUG(("RX descriptor VA: $%p (DVMA $%x)",
451 	    descp->mem_va, descp->mem_dvma));
452 	dmfe_chip_put32(dmfep, RX_BASE_ADDR_REG, descp->mem_dvma);
453 
454 	/*
455 	 * Program the TX ring entries
456 	 */
457 	descp = &dmfep->tx_desc;
458 	pstart = descp->mem_dvma;
459 	pnext = pstart + sizeof (struct tx_desc_type);
460 	pbuff = dmfep->tx_buff.mem_dvma;
461 	desc1 = TX_CHAINING;
462 
463 	for (i = 0; i < dmfep->tx.n_desc; ++i) {
464 		dmfe_ring_put32(descp, i, TD_NEXT, pnext);
465 		dmfe_ring_put32(descp, i, BUFFER1, pbuff);
466 		dmfe_ring_put32(descp, i, DESC1, desc1);
467 		dmfe_ring_put32(descp, i, DESC0, 0);
468 
469 		pnext += sizeof (struct tx_desc_type);
470 		pbuff += DMFE_BUF_SIZE;
471 	}
472 
473 	/*
474 	 * Fix up last entry & sync
475 	 */
476 	dmfe_ring_put32(descp, --i, TD_NEXT, pstart);
477 	DMA_SYNC(descp, DDI_DMA_SYNC_FORDEV);
478 	dmfep->tx.n_free = dmfep->tx.n_desc;
479 	dmfep->tx.next_free = dmfep->tx.next_busy = 0;
480 
481 	/*
482 	 * Set the base address of the TX descrptor list in CSR4
483 	 */
484 	DMFE_DEBUG(("TX descriptor VA: $%p (DVMA $%x)",
485 	    descp->mem_va, descp->mem_dvma));
486 	dmfe_chip_put32(dmfep, TX_BASE_ADDR_REG, descp->mem_dvma);
487 }
488 
489 /*
490  * dmfe_start_chip() -- start the chip transmitting and/or receiving
491  */
492 static void
493 dmfe_start_chip(dmfe_t *dmfep, int mode)
494 {
495 	ASSERT(mutex_owned(dmfep->oplock));
496 
497 	dmfep->opmode |= mode;
498 	dmfe_set_opmode(dmfep);
499 
500 	dmfe_chip_put32(dmfep, W_J_TIMER_REG, 0);
501 	/*
502 	 * Enable VLAN length mode (allows packets to be 4 bytes Longer).
503 	 */
504 	dmfe_chip_put32(dmfep, W_J_TIMER_REG, VLAN_ENABLE);
505 
506 	/*
507 	 * Clear any pending process-stopped interrupts
508 	 */
509 	dmfe_chip_put32(dmfep, STATUS_REG, TX_STOPPED_INT | RX_STOPPED_INT);
510 	dmfep->chip_state = mode & START_RECEIVE ? CHIP_TX_RX :
511 	    mode & START_TRANSMIT ? CHIP_TX_ONLY : CHIP_STOPPED;
512 }
513 
514 /*
515  * dmfe_enable_interrupts() -- enable our favourite set of interrupts.
516  *
517  * Normal interrupts:
518  *	We always enable:
519  *		RX_PKTDONE_INT		(packet received)
520  *		TX_PKTDONE_INT		(TX complete)
521  *	We never enable:
522  *		TX_ALLDONE_INT		(next TX buffer not ready)
523  *
524  * Abnormal interrupts:
525  *	We always enable:
526  *		RX_STOPPED_INT
527  *		TX_STOPPED_INT
528  *		SYSTEM_ERR_INT
529  *		RX_UNAVAIL_INT
530  *	We never enable:
531  *		RX_EARLY_INT
532  *		RX_WATCHDOG_INT
533  *		TX_JABBER_INT
534  *		TX_EARLY_INT
535  *		TX_UNDERFLOW_INT
536  *		GP_TIMER_INT		(not valid in -9 chips)
537  *		LINK_STATUS_INT		(not valid in -9 chips)
538  */
539 static void
540 dmfe_enable_interrupts(dmfe_t *dmfep)
541 {
542 	ASSERT(mutex_owned(dmfep->oplock));
543 
544 	/*
545 	 * Put 'the standard set of interrupts' in the interrupt mask register
546 	 */
547 	dmfep->imask =	RX_PKTDONE_INT | TX_PKTDONE_INT |
548 	    RX_STOPPED_INT | TX_STOPPED_INT | RX_UNAVAIL_INT | SYSTEM_ERR_INT;
549 
550 	dmfe_chip_put32(dmfep, INT_MASK_REG,
551 	    NORMAL_SUMMARY_INT | ABNORMAL_SUMMARY_INT | dmfep->imask);
552 	dmfep->chip_state = CHIP_RUNNING;
553 
554 	DMFE_DEBUG(("dmfe_enable_interrupts: imask 0x%x", dmfep->imask));
555 }
556 
557 #undef	DMFE_DBG
558 
559 
560 /*
561  * ========== RX side routines ==========
562  */
563 
564 #define	DMFE_DBG	DMFE_DBG_RECV	/* debug flag for this code	*/
565 
566 /*
567  * Function to update receive statistics on various errors
568  */
569 static void
570 dmfe_update_rx_stats(dmfe_t *dmfep, uint32_t desc0)
571 {
572 	ASSERT(mutex_owned(dmfep->rxlock));
573 
574 	/*
575 	 * The error summary bit and the error bits that it summarises
576 	 * are only valid if this is the last fragment.  Therefore, a
577 	 * fragment only contributes to the error statistics if both
578 	 * the last-fragment and error summary bits are set.
579 	 */
580 	if (((RX_LAST_DESC | RX_ERR_SUMMARY) & ~desc0) == 0) {
581 		dmfep->rx_stats_ierrors += 1;
582 
583 		/*
584 		 * There are some other error bits in the descriptor for
585 		 * which there don't seem to be appropriate MAC statistics,
586 		 * notably RX_COLLISION and perhaps RX_DESC_ERR.  The
587 		 * latter may not be possible if it is supposed to indicate
588 		 * that one buffer has been filled with a partial packet
589 		 * and the next buffer required for the rest of the packet
590 		 * was not available, as all our buffers are more than large
591 		 * enough for a whole packet without fragmenting.
592 		 */
593 
594 		if (desc0 & RX_OVERFLOW) {
595 			dmfep->rx_stats_overflow += 1;
596 
597 		} else if (desc0 & RX_RUNT_FRAME)
598 			dmfep->rx_stats_short += 1;
599 
600 		if (desc0 & RX_CRC)
601 			dmfep->rx_stats_fcs += 1;
602 
603 		if (desc0 & RX_FRAME2LONG)
604 			dmfep->rx_stats_toolong += 1;
605 	}
606 
607 	/*
608 	 * A receive watchdog timeout is counted as a MAC-level receive
609 	 * error.  Strangely, it doesn't set the packet error summary bit,
610 	 * according to the chip data sheet :-?
611 	 */
612 	if (desc0 & RX_RCV_WD_TO)
613 		dmfep->rx_stats_macrcv_errors += 1;
614 
615 	if (desc0 & RX_DRIBBLING)
616 		dmfep->rx_stats_align += 1;
617 
618 	if (desc0 & RX_MII_ERR)
619 		dmfep->rx_stats_macrcv_errors += 1;
620 }
621 
622 /*
623  * Receive incoming packet(s) and pass them up ...
624  */
625 static mblk_t *
626 dmfe_getp(dmfe_t *dmfep)
627 {
628 	dma_area_t *descp;
629 	mblk_t **tail;
630 	mblk_t *head;
631 	mblk_t *mp;
632 	char *rxb;
633 	uchar_t *dp;
634 	uint32_t desc0;
635 	uint32_t misses;
636 	int packet_length;
637 	int index;
638 
639 	mutex_enter(dmfep->rxlock);
640 
641 	/*
642 	 * Update the missed frame statistic from the on-chip counter.
643 	 */
644 	misses = dmfe_chip_get32(dmfep, MISSED_FRAME_REG);
645 	dmfep->rx_stats_norcvbuf += (misses & MISSED_FRAME_MASK);
646 
647 	/*
648 	 * sync (all) receive descriptors before inspecting them
649 	 */
650 	descp = &dmfep->rx_desc;
651 	DMA_SYNC(descp, DDI_DMA_SYNC_FORKERNEL);
652 
653 	/*
654 	 * We should own at least one RX entry, since we've had a
655 	 * receive interrupt, but let's not be dogmatic about it.
656 	 */
657 	index = dmfep->rx.next_free;
658 	desc0 = dmfe_ring_get32(descp, index, DESC0);
659 	if (desc0 & RX_OWN)
660 		DMFE_DEBUG(("dmfe_getp: no work, desc0 0x%x", desc0));
661 
662 	for (head = NULL, tail = &head; (desc0 & RX_OWN) == 0; ) {
663 		/*
664 		 * Maintain statistics for every descriptor returned
665 		 * to us by the chip ...
666 		 */
667 		DMFE_DEBUG(("dmfe_getp: desc0 0x%x", desc0));
668 		dmfe_update_rx_stats(dmfep, desc0);
669 
670 		/*
671 		 * Check that the entry has both "packet start" and
672 		 * "packet end" flags.  We really shouldn't get packet
673 		 * fragments, 'cos all the RX buffers are bigger than
674 		 * the largest valid packet.  So we'll just drop any
675 		 * fragments we find & skip on to the next entry.
676 		 */
677 		if (((RX_FIRST_DESC | RX_LAST_DESC) & ~desc0) != 0) {
678 			DMFE_DEBUG(("dmfe_getp: dropping fragment"));
679 			goto skip;
680 		}
681 
682 		/*
683 		 * A whole packet in one buffer.  We have to check error
684 		 * status and packet length before forwarding it upstream.
685 		 */
686 		if (desc0 & RX_ERR_SUMMARY) {
687 			DMFE_DEBUG(("dmfe_getp: dropping errored packet"));
688 			goto skip;
689 		}
690 
691 		packet_length = (desc0 >> 16) & 0x3fff;
692 		if (packet_length > DMFE_MAX_PKT_SIZE) {
693 			DMFE_DEBUG(("dmfe_getp: dropping oversize packet, "
694 			    "length %d", packet_length));
695 			goto skip;
696 		} else if (packet_length < ETHERMIN) {
697 			/*
698 			 * Note that VLAN packet would be even larger,
699 			 * but we don't worry about dropping runt VLAN
700 			 * frames.
701 			 *
702 			 * This check is probably redundant, as well,
703 			 * since the hardware should drop RUNT frames.
704 			 */
705 			DMFE_DEBUG(("dmfe_getp: dropping undersize packet, "
706 			    "length %d", packet_length));
707 			goto skip;
708 		}
709 
710 		/*
711 		 * Sync the data, so we can examine it; then check that
712 		 * the packet is really intended for us (remember that
713 		 * if we're using Imperfect Filtering, then the chip will
714 		 * receive unicast packets sent to stations whose addresses
715 		 * just happen to hash to the same value as our own; we
716 		 * discard these here so they don't get sent upstream ...)
717 		 */
718 		(void) ddi_dma_sync(dmfep->rx_buff.dma_hdl,
719 		    index * DMFE_BUF_SIZE, DMFE_BUF_SIZE,
720 		    DDI_DMA_SYNC_FORKERNEL);
721 		rxb = &dmfep->rx_buff.mem_va[index*DMFE_BUF_SIZE];
722 
723 
724 		/*
725 		 * We do not bother to check that the packet is really for
726 		 * us, we let the MAC framework make that check instead.
727 		 * This is especially important if we ever want to support
728 		 * multiple MAC addresses.
729 		 */
730 
731 		/*
732 		 * Packet looks good; get a buffer to copy it into.  We
733 		 * allow some space at the front of the allocated buffer
734 		 * (HEADROOM) in case any upstream modules want to prepend
735 		 * some sort of header.  The value has been carefully chosen
736 		 * So that it also has the side-effect of making the packet
737 		 * *contents* 4-byte aligned, as required by NCA!
738 		 */
739 		mp = allocb(DMFE_HEADROOM + packet_length, 0);
740 		if (mp == NULL) {
741 			DMFE_DEBUG(("dmfe_getp: no buffer - dropping packet"));
742 			dmfep->rx_stats_norcvbuf += 1;
743 			goto skip;
744 		}
745 
746 		/*
747 		 * Account for statistics of good packets.
748 		 */
749 		dmfep->rx_stats_ipackets += 1;
750 		dmfep->rx_stats_rbytes += packet_length;
751 		if (desc0 & RX_MULTI_FRAME) {
752 			if (bcmp(rxb, dmfe_broadcast_addr, ETHERADDRL)) {
753 				dmfep->rx_stats_multi += 1;
754 			} else {
755 				dmfep->rx_stats_bcast += 1;
756 			}
757 		}
758 
759 		/*
760 		 * Copy the packet into the STREAMS buffer
761 		 */
762 		dp = mp->b_rptr += DMFE_HEADROOM;
763 		mp->b_cont = mp->b_next = NULL;
764 
765 		/*
766 		 * Don't worry about stripping the vlan tag, the MAC
767 		 * layer will take care of that for us.
768 		 */
769 		bcopy(rxb, dp, packet_length);
770 
771 		/*
772 		 * Fix up the packet length, and link it to the chain
773 		 */
774 		mp->b_wptr = mp->b_rptr + packet_length - ETHERFCSL;
775 		*tail = mp;
776 		tail = &mp->b_next;
777 
778 	skip:
779 		/*
780 		 * Return ownership of ring entry & advance to next
781 		 */
782 		dmfe_ring_put32(descp, index, DESC0, RX_OWN);
783 		index = NEXT(index, dmfep->rx.n_desc);
784 		desc0 = dmfe_ring_get32(descp, index, DESC0);
785 	}
786 
787 	/*
788 	 * Remember where to start looking next time ...
789 	 */
790 	dmfep->rx.next_free = index;
791 
792 	/*
793 	 * sync the receive descriptors that we've given back
794 	 * (actually, we sync all of them for simplicity), and
795 	 * wake the chip in case it had suspended receive
796 	 */
797 	DMA_SYNC(descp, DDI_DMA_SYNC_FORDEV);
798 	dmfe_chip_put32(dmfep, RX_POLL_REG, 0);
799 
800 	mutex_exit(dmfep->rxlock);
801 	return (head);
802 }
803 
804 #undef	DMFE_DBG
805 
806 
807 /*
808  * ========== Primary TX side routines ==========
809  */
810 
811 #define	DMFE_DBG	DMFE_DBG_SEND	/* debug flag for this code	*/
812 
813 /*
814  *	TX ring management:
815  *
816  *	There are <tx.n_desc> entries in the ring, of which those from
817  *	<tx.next_free> round to but not including <tx.next_busy> must
818  *	be owned by the CPU.  The number of such entries should equal
819  *	<tx.n_free>; but there may also be some more entries which the
820  *	chip has given back but which we haven't yet accounted for.
821  *	The routine dmfe_reclaim_tx_desc() adjusts the indexes & counts
822  *	as it discovers such entries.
823  *
824  *	Initially, or when the ring is entirely free:
825  *		C = Owned by CPU
826  *		D = Owned by Davicom (DMFE) chip
827  *
828  *	tx.next_free					tx.n_desc = 16
829  *	  |
830  *	  v
831  *	+---+---+---+---+---+---+---+---+---+---+---+---+---+---+---+---+
832  *	| C | C | C | C | C | C | C | C | C | C | C | C | C | C | C | C |
833  *	+---+---+---+---+---+---+---+---+---+---+---+---+---+---+---+---+
834  *	  ^
835  *	  |
836  *	tx.next_busy					tx.n_free = 16
837  *
838  *	On entry to reclaim() during normal use:
839  *
840  *					tx.next_free	tx.n_desc = 16
841  *					      |
842  *					      v
843  *	+---+---+---+---+---+---+---+---+---+---+---+---+---+---+---+---+
844  *	| C | C | C | C | C | C | D | D | D | C | C | C | C | C | C | C |
845  *	+---+---+---+---+---+---+---+---+---+---+---+---+---+---+---+---+
846  *		  ^
847  *		  |
848  *		tx.next_busy				tx.n_free = 9
849  *
850  *	On exit from reclaim():
851  *
852  *					tx.next_free	tx.n_desc = 16
853  *					      |
854  *					      v
855  *	+---+---+---+---+---+---+---+---+---+---+---+---+---+---+---+---+
856  *	| C | C | C | C | C | C | D | D | D | C | C | C | C | C | C | C |
857  *	+---+---+---+---+---+---+---+---+---+---+---+---+---+---+---+---+
858  *				  ^
859  *				  |
860  *			     tx.next_busy		tx.n_free = 13
861  *
862  *	The ring is considered "full" when only one entry is owned by
863  *	the CPU; thus <tx.n_free> should always be >= 1.
864  *
865  *			tx.next_free			tx.n_desc = 16
866  *			      |
867  *			      v
868  *	+---+---+---+---+---+---+---+---+---+---+---+---+---+---+---+---+
869  *	| D | D | D | D | D | C | D | D | D | D | D | D | D | D | D | D |
870  *	+---+---+---+---+---+---+---+---+---+---+---+---+---+---+---+---+
871  *				  ^
872  *				  |
873  *			     tx.next_busy		tx.n_free = 1
874  */
875 
876 /*
877  * Function to update transmit statistics on various errors
878  */
879 static void
880 dmfe_update_tx_stats(dmfe_t *dmfep, int index, uint32_t desc0, uint32_t desc1)
881 {
882 	uint32_t collisions;
883 	uint32_t errbits;
884 	uint32_t errsum;
885 
886 	ASSERT(mutex_owned(dmfep->txlock));
887 
888 	collisions = ((desc0 >> 3) & 0x0f);
889 	errsum = desc0 & TX_ERR_SUMMARY;
890 	errbits = desc0 & (TX_UNDERFLOW | TX_LATE_COLL | TX_CARRIER_LOSS |
891 	    TX_NO_CARRIER | TX_EXCESS_COLL | TX_JABBER_TO);
892 	if ((errsum == 0) != (errbits == 0)) {
893 		dmfe_log(dmfep, "dubious TX error status 0x%x", desc0);
894 		desc0 |= TX_ERR_SUMMARY;
895 	}
896 
897 	if (desc0 & TX_ERR_SUMMARY) {
898 		dmfep->tx_stats_oerrors += 1;
899 
900 		/*
901 		 * If we ever see a transmit jabber timeout, we count it
902 		 * as a MAC-level transmit error; but we probably won't
903 		 * see it as it causes an Abnormal interrupt and we reset
904 		 * the chip in order to recover
905 		 */
906 		if (desc0 & TX_JABBER_TO) {
907 			dmfep->tx_stats_macxmt_errors += 1;
908 			dmfep->tx_stats_jabber += 1;
909 		}
910 
911 		if (desc0 & TX_UNDERFLOW)
912 			dmfep->tx_stats_underflow += 1;
913 		else if (desc0 & TX_LATE_COLL)
914 			dmfep->tx_stats_xmtlatecoll += 1;
915 
916 		if (desc0 & (TX_CARRIER_LOSS | TX_NO_CARRIER))
917 			dmfep->tx_stats_nocarrier += 1;
918 
919 		if (desc0 & TX_EXCESS_COLL) {
920 			dmfep->tx_stats_excoll += 1;
921 			collisions = 16;
922 		}
923 	} else {
924 		int	bit = index % NBBY;
925 		int	byt = index / NBBY;
926 
927 		if (dmfep->tx_mcast[byt] & bit) {
928 			dmfep->tx_mcast[byt] &= ~bit;
929 			dmfep->tx_stats_multi += 1;
930 
931 		} else if (dmfep->tx_bcast[byt] & bit) {
932 			dmfep->tx_bcast[byt] &= ~bit;
933 			dmfep->tx_stats_bcast += 1;
934 		}
935 
936 		dmfep->tx_stats_opackets += 1;
937 		dmfep->tx_stats_obytes += desc1 & TX_BUFFER_SIZE1;
938 	}
939 
940 	if (collisions == 1)
941 		dmfep->tx_stats_first_coll += 1;
942 	else if (collisions != 0)
943 		dmfep->tx_stats_multi_coll += 1;
944 	dmfep->tx_stats_collisions += collisions;
945 
946 	if (desc0 & TX_DEFERRED)
947 		dmfep->tx_stats_defer += 1;
948 }
949 
950 /*
951  * Reclaim all the ring entries that the chip has returned to us ...
952  *
953  * Returns B_FALSE if no entries could be reclaimed.  Otherwise, reclaims
954  * as many as possible, restarts the TX stall timeout, and returns B_TRUE.
955  */
956 static boolean_t
957 dmfe_reclaim_tx_desc(dmfe_t *dmfep)
958 {
959 	dma_area_t *descp;
960 	uint32_t desc0;
961 	uint32_t desc1;
962 	int i;
963 
964 	ASSERT(mutex_owned(dmfep->txlock));
965 
966 	/*
967 	 * sync transmit descriptor ring before looking at it
968 	 */
969 	descp = &dmfep->tx_desc;
970 	DMA_SYNC(descp, DDI_DMA_SYNC_FORKERNEL);
971 
972 	/*
973 	 * Early exit if there are no descriptors to reclaim, either
974 	 * because they're all reclaimed already, or because the next
975 	 * one is still owned by the chip ...
976 	 */
977 	i = dmfep->tx.next_busy;
978 	if (i == dmfep->tx.next_free)
979 		return (B_FALSE);
980 	desc0 = dmfe_ring_get32(descp, i, DESC0);
981 	if (desc0 & TX_OWN)
982 		return (B_FALSE);
983 
984 	/*
985 	 * Reclaim as many descriptors as possible ...
986 	 */
987 	for (;;) {
988 		desc1 = dmfe_ring_get32(descp, i, DESC1);
989 		ASSERT((desc1 & (TX_SETUP_PACKET | TX_LAST_DESC)) != 0);
990 
991 		if (desc1 & TX_SETUP_PACKET) {
992 			/*
993 			 * Setup packet - restore buffer address
994 			 */
995 			ASSERT(dmfe_ring_get32(descp, i, BUFFER1) ==
996 			    descp->setup_dvma);
997 			dmfe_ring_put32(descp, i, BUFFER1,
998 			    dmfep->tx_buff.mem_dvma + i*DMFE_BUF_SIZE);
999 		} else {
1000 			/*
1001 			 * Regular packet - just update stats
1002 			 */
1003 			ASSERT(dmfe_ring_get32(descp, i, BUFFER1) ==
1004 			    dmfep->tx_buff.mem_dvma + i*DMFE_BUF_SIZE);
1005 			dmfe_update_tx_stats(dmfep, i, desc0, desc1);
1006 		}
1007 
1008 #if	DMFEDEBUG
1009 		/*
1010 		 * We can use one of the SPARE bits in the TX descriptor
1011 		 * to track when a ring buffer slot is reclaimed.  Then
1012 		 * we can deduce the last operation on a slot from the
1013 		 * top half of DESC0:
1014 		 *
1015 		 *	0x8000 xxxx	given to DMFE chip (TX_OWN)
1016 		 *	0x7fff xxxx	returned but not yet reclaimed
1017 		 *	0x3fff xxxx	reclaimed
1018 		 */
1019 #define	TX_PEND_RECLAIM		(1UL<<30)
1020 		dmfe_ring_put32(descp, i, DESC0, desc0 & ~TX_PEND_RECLAIM);
1021 #endif	/* DMFEDEBUG */
1022 
1023 		/*
1024 		 * Update count & index; we're all done if the ring is
1025 		 * now fully reclaimed, or the next entry if still owned
1026 		 * by the chip ...
1027 		 */
1028 		dmfep->tx.n_free += 1;
1029 		i = NEXT(i, dmfep->tx.n_desc);
1030 		if (i == dmfep->tx.next_free)
1031 			break;
1032 		desc0 = dmfe_ring_get32(descp, i, DESC0);
1033 		if (desc0 & TX_OWN)
1034 			break;
1035 	}
1036 
1037 	dmfep->tx.next_busy = i;
1038 	dmfep->tx_pending_tix = 0;
1039 	return (B_TRUE);
1040 }
1041 
1042 /*
1043  * Send the message in the message block chain <mp>.
1044  *
1045  * The message is freed if and only if its contents are successfully copied
1046  * and queued for transmission (so that the return value is B_TRUE).
1047  * If we can't queue the message, the return value is B_FALSE and
1048  * the message is *not* freed.
1049  *
1050  * This routine handles the special case of <mp> == NULL, which indicates
1051  * that we want to "send" the special "setup packet" allocated during
1052  * startup.  We have to use some different flags in the packet descriptor
1053  * to say its a setup packet (from the global <dmfe_setup_desc1>), and the
1054  * setup packet *isn't* freed after use.
1055  */
1056 static boolean_t
1057 dmfe_send_msg(dmfe_t *dmfep, mblk_t *mp)
1058 {
1059 	dma_area_t *descp;
1060 	mblk_t *bp;
1061 	char *txb;
1062 	uint32_t desc1;
1063 	uint32_t index;
1064 	size_t totlen;
1065 	size_t mblen;
1066 
1067 	/*
1068 	 * If the number of free slots is below the reclaim threshold
1069 	 * (soft limit), we'll try to reclaim some.  If we fail, and
1070 	 * the number of free slots is also below the minimum required
1071 	 * (the hard limit, usually 1), then we can't send the packet.
1072 	 */
1073 	mutex_enter(dmfep->txlock);
1074 	if (dmfep->tx.n_free <= dmfe_tx_reclaim_level &&
1075 	    dmfe_reclaim_tx_desc(dmfep) == B_FALSE &&
1076 	    dmfep->tx.n_free <= dmfe_tx_min_free) {
1077 		/*
1078 		 * Resource shortage - return B_FALSE so the packet
1079 		 * will be queued for retry after the next TX-done
1080 		 * interrupt.
1081 		 */
1082 		mutex_exit(dmfep->txlock);
1083 		DMFE_DEBUG(("dmfe_send_msg: no free descriptors"));
1084 		return (B_FALSE);
1085 	}
1086 
1087 	/*
1088 	 * There's a slot available, so claim it by incrementing
1089 	 * the next-free index and decrementing the free count.
1090 	 * If the ring is currently empty, we also restart the
1091 	 * stall-detect timer.  The ASSERTions check that our
1092 	 * invariants still hold:
1093 	 *	the next-free index must not match the next-busy index
1094 	 *	there must still be at least one free entry
1095 	 * After this, we now have exclusive ownership of the ring
1096 	 * entry (and matching buffer) indicated by <index>, so we
1097 	 * don't need to hold the TX lock any longer
1098 	 */
1099 	index = dmfep->tx.next_free;
1100 	dmfep->tx.next_free = NEXT(index, dmfep->tx.n_desc);
1101 	ASSERT(dmfep->tx.next_free != dmfep->tx.next_busy);
1102 	if (dmfep->tx.n_free-- == dmfep->tx.n_desc)
1103 		dmfep->tx_pending_tix = 0;
1104 	ASSERT(dmfep->tx.n_free >= 1);
1105 	mutex_exit(dmfep->txlock);
1106 
1107 	/*
1108 	 * Check the ownership of the ring entry ...
1109 	 */
1110 	descp = &dmfep->tx_desc;
1111 	ASSERT((dmfe_ring_get32(descp, index, DESC0) & TX_OWN) == 0);
1112 
1113 	if (mp == NULL) {
1114 		/*
1115 		 * Indicates we should send a SETUP packet, which we do by
1116 		 * temporarily switching the BUFFER1 pointer in the ring
1117 		 * entry.  The reclaim routine will restore BUFFER1 to its
1118 		 * usual value.
1119 		 *
1120 		 * Note that as the setup packet is tagged on the end of
1121 		 * the TX ring, when we sync the descriptor we're also
1122 		 * implicitly syncing the setup packet - hence, we don't
1123 		 * need a separate ddi_dma_sync() call here.
1124 		 */
1125 		desc1 = dmfe_setup_desc1;
1126 		dmfe_ring_put32(descp, index, BUFFER1, descp->setup_dvma);
1127 	} else {
1128 		/*
1129 		 * A regular packet; we copy the data into a pre-mapped
1130 		 * buffer, which avoids the overhead (and complication)
1131 		 * of mapping/unmapping STREAMS buffers and keeping hold
1132 		 * of them until the DMA has completed.
1133 		 *
1134 		 * Because all buffers are the same size, and larger
1135 		 * than the longest single valid message, we don't have
1136 		 * to bother about splitting the message across multiple
1137 		 * buffers.
1138 		 */
1139 		txb = &dmfep->tx_buff.mem_va[index*DMFE_BUF_SIZE];
1140 		totlen = 0;
1141 		bp = mp;
1142 
1143 		/*
1144 		 * Copy all (remaining) mblks in the message ...
1145 		 */
1146 		for (; bp != NULL; bp = bp->b_cont) {
1147 			mblen = MBLKL(bp);
1148 			if ((totlen += mblen) <= DMFE_MAX_PKT_SIZE) {
1149 				bcopy(bp->b_rptr, txb, mblen);
1150 				txb += mblen;
1151 			}
1152 		}
1153 
1154 		/*
1155 		 * Is this a multicast or broadcast packet?  We do
1156 		 * this so that we can track statistics accurately
1157 		 * when we reclaim it.
1158 		 */
1159 		txb = &dmfep->tx_buff.mem_va[index*DMFE_BUF_SIZE];
1160 		if (txb[0] & 0x1) {
1161 			if (bcmp(txb, dmfe_broadcast_addr, ETHERADDRL) == 0) {
1162 				dmfep->tx_bcast[index / NBBY] |=
1163 				    (1 << (index % NBBY));
1164 			} else {
1165 				dmfep->tx_mcast[index / NBBY] |=
1166 				    (1 << (index % NBBY));
1167 			}
1168 		}
1169 
1170 		/*
1171 		 * We'e reached the end of the chain; and we should have
1172 		 * collected no more than DMFE_MAX_PKT_SIZE bytes into our
1173 		 * buffer.  Note that the <size> field in the descriptor is
1174 		 * only 11 bits, so bigger packets would be a problem!
1175 		 */
1176 		ASSERT(bp == NULL);
1177 		ASSERT(totlen <= DMFE_MAX_PKT_SIZE);
1178 		totlen &= TX_BUFFER_SIZE1;
1179 		desc1 = TX_FIRST_DESC | TX_LAST_DESC | totlen;
1180 
1181 		(void) ddi_dma_sync(dmfep->tx_buff.dma_hdl,
1182 		    index * DMFE_BUF_SIZE, DMFE_BUF_SIZE, DDI_DMA_SYNC_FORDEV);
1183 	}
1184 
1185 	/*
1186 	 * Update ring descriptor entries, sync them, and wake up the
1187 	 * transmit process
1188 	 */
1189 	if ((index & dmfe_tx_int_factor) == 0)
1190 		desc1 |= TX_INT_ON_COMP;
1191 	desc1 |= TX_CHAINING;
1192 	dmfe_ring_put32(descp, index, DESC1, desc1);
1193 	dmfe_ring_put32(descp, index, DESC0, TX_OWN);
1194 	DMA_SYNC(descp, DDI_DMA_SYNC_FORDEV);
1195 	dmfe_chip_put32(dmfep, TX_POLL_REG, 0);
1196 
1197 	/*
1198 	 * Finally, free the message & return success
1199 	 */
1200 	if (mp)
1201 		freemsg(mp);
1202 	return (B_TRUE);
1203 }
1204 
1205 /*
1206  *	dmfe_m_tx() -- send a chain of packets
1207  *
1208  *	Called when packet(s) are ready to be transmitted. A pointer to an
1209  *	M_DATA message that contains the packet is passed to this routine.
1210  *	The complete LLC header is contained in the message's first message
1211  *	block, and the remainder of the packet is contained within
1212  *	additional M_DATA message blocks linked to the first message block.
1213  *
1214  *	Additional messages may be passed by linking with b_next.
1215  */
1216 static mblk_t *
1217 dmfe_m_tx(void *arg, mblk_t *mp)
1218 {
1219 	dmfe_t *dmfep = arg;			/* private device info	*/
1220 	mblk_t *next;
1221 
1222 	ASSERT(mp != NULL);
1223 	ASSERT(dmfep->mac_state == DMFE_MAC_STARTED);
1224 
1225 	if (dmfep->chip_state != CHIP_RUNNING)
1226 		return (mp);
1227 
1228 	while (mp != NULL) {
1229 		next = mp->b_next;
1230 		mp->b_next = NULL;
1231 		if (!dmfe_send_msg(dmfep, mp)) {
1232 			mp->b_next = next;
1233 			break;
1234 		}
1235 		mp = next;
1236 	}
1237 
1238 	return (mp);
1239 }
1240 
1241 #undef	DMFE_DBG
1242 
1243 
1244 /*
1245  * ========== Address-setting routines (TX-side) ==========
1246  */
1247 
1248 #define	DMFE_DBG	DMFE_DBG_ADDR	/* debug flag for this code	*/
1249 
1250 /*
1251  * Find the index of the relevant bit in the setup packet.
1252  * This must mirror the way the hardware will actually calculate it!
1253  */
1254 static uint32_t
1255 dmfe_hash_index(const uint8_t *address)
1256 {
1257 	uint32_t const POLY = HASH_POLY;
1258 	uint32_t crc = HASH_CRC;
1259 	uint32_t index;
1260 	uint32_t msb;
1261 	uchar_t currentbyte;
1262 	int byteslength;
1263 	int shift;
1264 	int bit;
1265 
1266 	for (byteslength = 0; byteslength < ETHERADDRL; ++byteslength) {
1267 		currentbyte = address[byteslength];
1268 		for (bit = 0; bit < 8; ++bit) {
1269 			msb = crc >> 31;
1270 			crc <<= 1;
1271 			if (msb ^ (currentbyte & 1)) {
1272 				crc ^= POLY;
1273 				crc |= 0x00000001;
1274 			}
1275 			currentbyte >>= 1;
1276 		}
1277 	}
1278 
1279 	for (index = 0, bit = 23, shift = 8; shift >= 0; ++bit, --shift)
1280 		index |= (((crc >> bit) & 1) << shift);
1281 
1282 	return (index);
1283 }
1284 
1285 /*
1286  * Find and set/clear the relevant bit in the setup packet hash table
1287  * This must mirror the way the hardware will actually interpret it!
1288  */
1289 static void
1290 dmfe_update_hash(dmfe_t *dmfep, uint32_t index, boolean_t val)
1291 {
1292 	dma_area_t *descp;
1293 	uint32_t tmp;
1294 
1295 	ASSERT(mutex_owned(dmfep->oplock));
1296 
1297 	descp = &dmfep->tx_desc;
1298 	tmp = dmfe_setup_get32(descp, index/16);
1299 	if (val)
1300 		tmp |= 1 << (index%16);
1301 	else
1302 		tmp &= ~(1 << (index%16));
1303 	dmfe_setup_put32(descp, index/16, tmp);
1304 }
1305 
1306 /*
1307  * Update the refcount for the bit in the setup packet corresponding
1308  * to the specified address; if it changes between zero & nonzero,
1309  * also update the bitmap itself & return B_TRUE, so that the caller
1310  * knows to re-send the setup packet.  Otherwise (only the refcount
1311  * changed), return B_FALSE
1312  */
1313 static boolean_t
1314 dmfe_update_mcast(dmfe_t *dmfep, const uint8_t *mca, boolean_t val)
1315 {
1316 	uint32_t index;
1317 	uint8_t *refp;
1318 	boolean_t change;
1319 
1320 	index = dmfe_hash_index(mca);
1321 	refp = &dmfep->mcast_refs[index];
1322 	change = (val ? (*refp)++ : --(*refp)) == 0;
1323 
1324 	if (change)
1325 		dmfe_update_hash(dmfep, index, val);
1326 
1327 	return (change);
1328 }
1329 
1330 /*
1331  * "Transmit" the (possibly updated) magic setup packet
1332  */
1333 static int
1334 dmfe_send_setup(dmfe_t *dmfep)
1335 {
1336 	int status;
1337 
1338 	ASSERT(mutex_owned(dmfep->oplock));
1339 
1340 	/*
1341 	 * If the chip isn't running, we can't really send the setup frame
1342 	 * now but it doesn't matter, 'cos it will be sent when the transmit
1343 	 * process is restarted (see dmfe_start()).
1344 	 */
1345 	if ((dmfep->opmode & START_TRANSMIT) == 0)
1346 		return (0);
1347 
1348 	/*
1349 	 * "Send" the setup frame.  If it fails (e.g. no resources),
1350 	 * set a flag; then the factotum will retry the "send".  Once
1351 	 * it works, we can clear the flag no matter how many attempts
1352 	 * had previously failed.  We tell the caller that it worked
1353 	 * whether it did or not; after all, it *will* work eventually.
1354 	 */
1355 	status = dmfe_send_msg(dmfep, NULL);
1356 	dmfep->need_setup = status ? B_FALSE : B_TRUE;
1357 	return (0);
1358 }
1359 
1360 /*
1361  *	dmfe_m_unicst() -- set the physical network address
1362  */
1363 static int
1364 dmfe_m_unicst(void *arg, const uint8_t *macaddr)
1365 {
1366 	dmfe_t *dmfep = arg;
1367 	int status;
1368 	int index;
1369 
1370 	/*
1371 	 * Update our current address and send out a new setup packet
1372 	 *
1373 	 * Here we accommodate the use of HASH_ONLY or HASH_AND_PERFECT
1374 	 * filtering modes (we don't support PERFECT_ONLY or INVERSE modes).
1375 	 *
1376 	 * It is said that there is a bug in the 21140 where it fails to
1377 	 * receive packes addresses to the specified perfect filter address.
1378 	 * If the same bug is present in the DM9102A, the TX_FILTER_TYPE1
1379 	 * bit should be set in the module variable dmfe_setup_desc1.
1380 	 *
1381 	 * If TX_FILTER_TYPE1 is set, we will use HASH_ONLY filtering.
1382 	 * In this mode, *all* incoming addresses are hashed and looked
1383 	 * up in the bitmap described by the setup packet.  Therefore,
1384 	 * the bit representing the station address has to be added to
1385 	 * the table before sending it out.  If the address is changed,
1386 	 * the old entry should be removed before the new entry is made.
1387 	 *
1388 	 * NOTE: in this mode, unicast packets that are not intended for
1389 	 * this station may be received; it is up to software to filter
1390 	 * them out afterwards!
1391 	 *
1392 	 * If TX_FILTER_TYPE1 is *not* set, we will use HASH_AND_PERFECT
1393 	 * filtering.  In this mode, multicast addresses are hashed and
1394 	 * checked against the bitmap, while unicast addresses are simply
1395 	 * matched against the one physical address specified in the setup
1396 	 * packet.  This means that we shouldn't receive unicast packets
1397 	 * that aren't intended for us (but software still has to filter
1398 	 * multicast packets just the same).
1399 	 *
1400 	 * Whichever mode we're using, we have to enter the broadcast
1401 	 * address into the multicast filter map too, so we do this on
1402 	 * the first time through after attach or reset.
1403 	 */
1404 	mutex_enter(dmfep->oplock);
1405 
1406 	if (dmfep->addr_set && dmfe_setup_desc1 & TX_FILTER_TYPE1)
1407 		(void) dmfe_update_mcast(dmfep, dmfep->curr_addr, B_FALSE);
1408 	if (dmfe_setup_desc1 & TX_FILTER_TYPE1)
1409 		(void) dmfe_update_mcast(dmfep, macaddr, B_TRUE);
1410 	if (!dmfep->addr_set)
1411 		(void) dmfe_update_mcast(dmfep, dmfe_broadcast_addr, B_TRUE);
1412 
1413 	/*
1414 	 * Remember the new current address
1415 	 */
1416 	ethaddr_copy(macaddr, dmfep->curr_addr);
1417 	dmfep->addr_set = B_TRUE;
1418 
1419 	/*
1420 	 * Install the new physical address into the proper position in
1421 	 * the setup frame; this is only used if we select hash+perfect
1422 	 * filtering, but we'll put it in anyway.  The ugliness here is
1423 	 * down to the usual war of the egg :(
1424 	 */
1425 	for (index = 0; index < ETHERADDRL; index += 2)
1426 		dmfe_setup_put32(&dmfep->tx_desc, SETUPBUF_PHYS+index/2,
1427 		    (macaddr[index+1] << 8) | macaddr[index]);
1428 
1429 	/*
1430 	 * Finally, we're ready to "transmit" the setup frame
1431 	 */
1432 	status = dmfe_send_setup(dmfep);
1433 	mutex_exit(dmfep->oplock);
1434 
1435 	return (status);
1436 }
1437 
1438 /*
1439  *	dmfe_m_multicst() -- enable or disable a multicast address
1440  *
1441  *	Program the hardware to enable/disable the multicast address
1442  *	in "mca" (enable if add is true, otherwise disable it.)
1443  *	We keep a refcount for each bit in the map, so that it still
1444  *	works out properly if multiple addresses hash to the same bit.
1445  *	dmfe_update_mcast() tells us whether the map actually changed;
1446  *	if so, we have to re-"transmit" the magic setup packet.
1447  */
1448 static int
1449 dmfe_m_multicst(void *arg, boolean_t add, const uint8_t *mca)
1450 {
1451 	dmfe_t *dmfep = arg;			/* private device info	*/
1452 	int status = 0;
1453 
1454 	mutex_enter(dmfep->oplock);
1455 	if (dmfe_update_mcast(dmfep, mca, add))
1456 		status = dmfe_send_setup(dmfep);
1457 	mutex_exit(dmfep->oplock);
1458 
1459 	return (status);
1460 }
1461 
1462 #undef	DMFE_DBG
1463 
1464 
1465 /*
1466  * ========== Internal state management entry points ==========
1467  */
1468 
1469 #define	DMFE_DBG	DMFE_DBG_GLD	/* debug flag for this code	*/
1470 
1471 /*
1472  * These routines provide all the functionality required by the
1473  * corresponding MAC layer entry points, but don't update the MAC layer state
1474  * so they can be called internally without disturbing our record
1475  * of what MAC layer thinks we should be doing ...
1476  */
1477 
1478 /*
1479  *	dmfe_stop() -- stop processing, don't reset h/w or rings
1480  */
1481 static void
1482 dmfe_stop(dmfe_t *dmfep)
1483 {
1484 	ASSERT(mutex_owned(dmfep->oplock));
1485 
1486 	dmfe_stop_chip(dmfep, CHIP_STOPPED);
1487 }
1488 
1489 /*
1490  *	dmfe_reset() -- stop processing, reset h/w & rings to initial state
1491  */
1492 static void
1493 dmfe_reset(dmfe_t *dmfep)
1494 {
1495 	ASSERT(mutex_owned(dmfep->oplock));
1496 	ASSERT(mutex_owned(dmfep->rxlock));
1497 	ASSERT(mutex_owned(dmfep->txlock));
1498 
1499 	dmfe_stop_chip(dmfep, CHIP_RESET);
1500 	dmfe_init_rings(dmfep);
1501 }
1502 
1503 /*
1504  *	dmfe_start() -- start transmitting/receiving
1505  */
1506 static void
1507 dmfe_start(dmfe_t *dmfep)
1508 {
1509 	uint32_t gpsr;
1510 
1511 	ASSERT(mutex_owned(dmfep->oplock));
1512 
1513 	ASSERT(dmfep->chip_state == CHIP_RESET ||
1514 	    dmfep->chip_state == CHIP_STOPPED);
1515 
1516 	/*
1517 	 * Make opmode consistent with PHY duplex setting
1518 	 */
1519 	gpsr = dmfe_chip_get32(dmfep, PHY_STATUS_REG);
1520 	if (gpsr & GPS_FULL_DUPLEX)
1521 		dmfep->opmode |= FULL_DUPLEX;
1522 	else
1523 		dmfep->opmode &= ~FULL_DUPLEX;
1524 
1525 	/*
1526 	 * Start transmit processing
1527 	 * Set up the address filters
1528 	 * Start receive processing
1529 	 * Enable interrupts
1530 	 */
1531 	dmfe_start_chip(dmfep, START_TRANSMIT);
1532 	(void) dmfe_send_setup(dmfep);
1533 	drv_usecwait(10);
1534 	dmfe_start_chip(dmfep, START_RECEIVE);
1535 	dmfe_enable_interrupts(dmfep);
1536 }
1537 
1538 /*
1539  * dmfe_restart - restart transmitting/receiving after error or suspend
1540  */
1541 static void
1542 dmfe_restart(dmfe_t *dmfep)
1543 {
1544 	ASSERT(mutex_owned(dmfep->oplock));
1545 
1546 	/*
1547 	 * You need not only <oplock>, but also <rxlock> AND <txlock>
1548 	 * in order to reset the rings, but then <txlock> *mustn't*
1549 	 * be held across the call to dmfe_start()
1550 	 */
1551 	mutex_enter(dmfep->rxlock);
1552 	mutex_enter(dmfep->txlock);
1553 	dmfe_reset(dmfep);
1554 	mutex_exit(dmfep->txlock);
1555 	mutex_exit(dmfep->rxlock);
1556 	if (dmfep->mac_state == DMFE_MAC_STARTED)
1557 		dmfe_start(dmfep);
1558 }
1559 
1560 
1561 /*
1562  * ========== MAC-required management entry points ==========
1563  */
1564 
1565 /*
1566  *	dmfe_m_stop() -- stop transmitting/receiving
1567  */
1568 static void
1569 dmfe_m_stop(void *arg)
1570 {
1571 	dmfe_t *dmfep = arg;			/* private device info	*/
1572 
1573 	/*
1574 	 * Just stop processing, then record new MAC state
1575 	 */
1576 	mutex_enter(dmfep->oplock);
1577 	dmfe_stop(dmfep);
1578 	dmfep->mac_state = DMFE_MAC_STOPPED;
1579 	mutex_exit(dmfep->oplock);
1580 }
1581 
1582 /*
1583  *	dmfe_m_start() -- start transmitting/receiving
1584  */
1585 static int
1586 dmfe_m_start(void *arg)
1587 {
1588 	dmfe_t *dmfep = arg;			/* private device info	*/
1589 
1590 	/*
1591 	 * Start processing and record new MAC state
1592 	 */
1593 	mutex_enter(dmfep->oplock);
1594 	dmfe_start(dmfep);
1595 	dmfep->mac_state = DMFE_MAC_STARTED;
1596 	mutex_exit(dmfep->oplock);
1597 
1598 	return (0);
1599 }
1600 
1601 /*
1602  * dmfe_m_promisc() -- set or reset promiscuous mode on the board
1603  *
1604  *	Program the hardware to enable/disable promiscuous and/or
1605  *	receive-all-multicast modes.  Davicom don't document this
1606  *	clearly, but it looks like we can do this on-the-fly (i.e.
1607  *	without stopping & restarting the TX/RX processes).
1608  */
1609 static int
1610 dmfe_m_promisc(void *arg, boolean_t on)
1611 {
1612 	dmfe_t *dmfep = arg;
1613 
1614 	mutex_enter(dmfep->oplock);
1615 	dmfep->opmode &= ~(PROMISC_MODE | PASS_MULTICAST);
1616 	if (on)
1617 		dmfep->opmode |= PROMISC_MODE;
1618 	dmfe_set_opmode(dmfep);
1619 	mutex_exit(dmfep->oplock);
1620 
1621 	return (0);
1622 }
1623 
1624 /*ARGSUSED*/
1625 static boolean_t
1626 dmfe_m_getcapab(void *arg, mac_capab_t cap, void *cap_data)
1627 {
1628 	/*
1629 	 * Note that the chip could support some form of polling and
1630 	 * multiaddress support.  We should look into adding polling
1631 	 * support later, once Solaris is better positioned to take
1632 	 * advantage of it, although it may be of little use since
1633 	 * even a lowly 500MHz US-IIe should be able to keep up with
1634 	 * 100Mbps.  (Esp. if the packets are not unreasonably sized.)
1635 	 *
1636 	 * Multiaddress support, however, is likely to be of more
1637 	 * utility with crossbow and virtualized NICs.  Although, the
1638 	 * fact that dmfe is only supported on low-end US-IIe hardware
1639 	 * makes one wonder whether VNICs are likely to be used on
1640 	 * such platforms.  The chip certainly supports the notion,
1641 	 * since it can be run in HASH-ONLY mode.  (Though this would
1642 	 * require software to drop unicast packets that are
1643 	 * incorrectly received due to hash collision of the
1644 	 * destination mac address.)
1645 	 *
1646 	 * Interestingly enough, modern Davicom chips (the 9102D)
1647 	 * support full IP checksum offload, though its unclear
1648 	 * whether any of these chips are used on any systems that can
1649 	 * run Solaris.
1650 	 *
1651 	 * If this driver is ever supported on x86 hardware, then
1652 	 * these assumptions should be revisited.
1653 	 */
1654 	switch (cap) {
1655 	case MAC_CAPAB_POLL:
1656 	case MAC_CAPAB_MULTIADDRESS:
1657 	case MAC_CAPAB_HCKSUM:
1658 	default:
1659 		return (B_FALSE);
1660 	}
1661 }
1662 
1663 
1664 #undef	DMFE_DBG
1665 
1666 
1667 /*
1668  * ========== Factotum, implemented as a softint handler ==========
1669  */
1670 
1671 #define	DMFE_DBG	DMFE_DBG_FACT	/* debug flag for this code	*/
1672 
1673 /*
1674  * The factotum is woken up when there's something to do that we'd rather
1675  * not do from inside a (high-level?) hardware interrupt handler.  Its
1676  * two main tasks are:
1677  *	reset & restart the chip after an error
1678  *	update & restart the chip after a link status change
1679  */
1680 static uint_t
1681 dmfe_factotum(caddr_t arg)
1682 {
1683 	dmfe_t *dmfep;
1684 
1685 	dmfep = (void *)arg;
1686 	ASSERT(dmfep->dmfe_guard == DMFE_GUARD);
1687 
1688 	mutex_enter(dmfep->oplock);
1689 
1690 	dmfep->factotum_flag = 0;
1691 	DRV_KS_INC(dmfep, KS_FACTOTUM_RUN);
1692 
1693 	/*
1694 	 * Check for chip error ...
1695 	 */
1696 	if (dmfep->chip_state == CHIP_ERROR) {
1697 		/*
1698 		 * Error recovery required: reset the chip and the rings,
1699 		 * then, if it's supposed to be running, kick it off again.
1700 		 */
1701 		DRV_KS_INC(dmfep, KS_RECOVERY);
1702 		dmfe_restart(dmfep);
1703 	} else if (dmfep->need_setup) {
1704 		(void) dmfe_send_setup(dmfep);
1705 	}
1706 	mutex_exit(dmfep->oplock);
1707 
1708 	/*
1709 	 * Then, check the link state.  We need <milock> but not <oplock>
1710 	 * to do this, but if something's changed, we need <oplock> as well
1711 	 * in order to stop/restart the chip!  Note: we could simply hold
1712 	 * <oplock> right through here, but we'd rather not 'cos checking
1713 	 * the link state involves reading over the bit-serial MII bus,
1714 	 * which takes ~500us even when nothing's changed.  Holding <oplock>
1715 	 * would lock out the interrupt handler for the duration, so it's
1716 	 * better to release it first and reacquire it only if needed.
1717 	 */
1718 	mutex_enter(dmfep->milock);
1719 	if (dmfe_check_link(dmfep)) {
1720 		mutex_enter(dmfep->oplock);
1721 		dmfe_stop(dmfep);
1722 		DRV_KS_INC(dmfep, KS_LINK_CHECK);
1723 		if (dmfep->update_phy) {
1724 			/*
1725 			 *  The chip may reset itself for some unknown
1726 			 * reason.  If this happens, the chip will use
1727 			 * default settings (for speed, duplex, and autoneg),
1728 			 * which possibly aren't the user's desired settings.
1729 			 */
1730 			dmfe_update_phy(dmfep);
1731 			dmfep->update_phy = B_FALSE;
1732 		}
1733 		dmfe_recheck_link(dmfep, B_FALSE);
1734 		if (dmfep->mac_state == DMFE_MAC_STARTED)
1735 			dmfe_start(dmfep);
1736 		mutex_exit(dmfep->oplock);
1737 	}
1738 	mutex_exit(dmfep->milock);
1739 
1740 	/*
1741 	 * Keep MAC up-to-date about the state of the link ...
1742 	 */
1743 	mac_link_update(dmfep->mh, dmfep->link_state);
1744 
1745 	return (DDI_INTR_CLAIMED);
1746 }
1747 
1748 static void
1749 dmfe_wake_factotum(dmfe_t *dmfep, int ks_id, const char *why)
1750 {
1751 	DMFE_DEBUG(("dmfe_wake_factotum: %s [%d] flag %d",
1752 	    why, ks_id, dmfep->factotum_flag));
1753 
1754 	ASSERT(mutex_owned(dmfep->oplock));
1755 	DRV_KS_INC(dmfep, ks_id);
1756 
1757 	if (dmfep->factotum_flag++ == 0)
1758 		ddi_trigger_softintr(dmfep->factotum_id);
1759 }
1760 
1761 #undef	DMFE_DBG
1762 
1763 
1764 /*
1765  * ========== Periodic Tasks (Cyclic handler & friends) ==========
1766  */
1767 
1768 #define	DMFE_DBG	DMFE_DBG_TICK	/* debug flag for this code	*/
1769 
1770 /*
1771  * Periodic tick tasks, run from the cyclic handler
1772  *
1773  * Check the state of the link and wake the factotum if necessary
1774  */
1775 static void
1776 dmfe_tick_link_check(dmfe_t *dmfep, uint32_t gpsr, uint32_t istat)
1777 {
1778 	link_state_t phy_state;
1779 	link_state_t utp_state;
1780 	const char *why;
1781 	int ks_id;
1782 
1783 	_NOTE(ARGUNUSED(istat))
1784 
1785 	ASSERT(mutex_owned(dmfep->oplock));
1786 
1787 	/*
1788 	 * Is it time to wake the factotum?  We do so periodically, in
1789 	 * case the fast check below doesn't always reveal a link change
1790 	 */
1791 	if (dmfep->link_poll_tix-- == 0) {
1792 		dmfep->link_poll_tix = factotum_tix;
1793 		why = "tick (link poll)";
1794 		ks_id = KS_TICK_LINK_POLL;
1795 	} else {
1796 		why = NULL;
1797 		ks_id = KS_TICK_LINK_STATE;
1798 	}
1799 
1800 	/*
1801 	 * Has the link status changed?  If so, we might want to wake
1802 	 * the factotum to deal with it.
1803 	 */
1804 	phy_state = (gpsr & GPS_LINK_STATUS) ? LINK_STATE_UP : LINK_STATE_DOWN;
1805 	utp_state = (gpsr & GPS_UTP_SIG) ? LINK_STATE_UP : LINK_STATE_DOWN;
1806 	if (phy_state != utp_state)
1807 		why = "tick (phy <> utp)";
1808 	else if ((dmfep->link_state == LINK_STATE_UP) &&
1809 	    (phy_state == LINK_STATE_DOWN))
1810 		why = "tick (UP -> DOWN)";
1811 	else if (phy_state != dmfep->link_state) {
1812 		if (dmfep->link_poll_tix > factotum_fast_tix)
1813 			dmfep->link_poll_tix = factotum_fast_tix;
1814 	}
1815 
1816 	if (why != NULL) {
1817 		DMFE_DEBUG(("dmfe_%s: link %d phy %d utp %d",
1818 		    why, dmfep->link_state, phy_state, utp_state));
1819 		dmfe_wake_factotum(dmfep, ks_id, why);
1820 	}
1821 }
1822 
1823 /*
1824  * Periodic tick tasks, run from the cyclic handler
1825  *
1826  * Check for TX stall; flag an error and wake the factotum if so.
1827  */
1828 static void
1829 dmfe_tick_stall_check(dmfe_t *dmfep, uint32_t gpsr, uint32_t istat)
1830 {
1831 	boolean_t tx_stall;
1832 	uint32_t tx_state;
1833 	uint32_t limit;
1834 
1835 	ASSERT(mutex_owned(dmfep->oplock));
1836 
1837 	/*
1838 	 * Check for transmit stall ...
1839 	 *
1840 	 * IF there's at least one packet in the ring, AND the timeout
1841 	 * has elapsed, AND we can't reclaim any descriptors, THEN we've
1842 	 * stalled; we return B_TRUE to trigger a reset-and-recover cycle.
1843 	 *
1844 	 * Note that the timeout limit is based on the transmit engine
1845 	 * state; we allow the transmitter longer to make progress in
1846 	 * some states than in others, based on observations of this
1847 	 * chip's actual behaviour in the lab.
1848 	 *
1849 	 * By observation, we find that on about 1 in 10000 passes through
1850 	 * here, the TX lock is already held.  In that case, we'll skip
1851 	 * the check on this pass rather than wait.  Most likely, the send
1852 	 * routine was holding the lock when the interrupt happened, and
1853 	 * we'll succeed next time through.  In the event of a real stall,
1854 	 * the TX ring will fill up, after which the send routine won't be
1855 	 * called any more and then we're sure to get in.
1856 	 */
1857 	tx_stall = B_FALSE;
1858 	if (mutex_tryenter(dmfep->txlock)) {
1859 		if (dmfep->tx.n_free < dmfep->tx.n_desc) {
1860 			tx_state = TX_PROCESS_STATE(istat);
1861 			if (gpsr & GPS_LINK_100)
1862 				limit = stall_100_tix[tx_state];
1863 			else
1864 				limit = stall_10_tix[tx_state];
1865 			if (++dmfep->tx_pending_tix >= limit &&
1866 			    dmfe_reclaim_tx_desc(dmfep) == B_FALSE) {
1867 				dmfe_log(dmfep, "TX stall detected "
1868 				    "after %d ticks in state %d; "
1869 				    "automatic recovery initiated",
1870 				    dmfep->tx_pending_tix, tx_state);
1871 				tx_stall = B_TRUE;
1872 			}
1873 		}
1874 		mutex_exit(dmfep->txlock);
1875 	}
1876 
1877 	if (tx_stall) {
1878 		dmfe_stop_chip(dmfep, CHIP_ERROR);
1879 		dmfe_wake_factotum(dmfep, KS_TX_STALL, "tick (TX stall)");
1880 	}
1881 }
1882 
1883 /*
1884  * Cyclic callback handler
1885  */
1886 static void
1887 dmfe_cyclic(void *arg)
1888 {
1889 	dmfe_t *dmfep = arg;			/* private device info */
1890 	uint32_t istat;
1891 	uint32_t gpsr;
1892 
1893 	/*
1894 	 * If the chip's not RUNNING, there's nothing to do.
1895 	 * If we can't get the mutex straight away, we'll just
1896 	 * skip this pass; we'll back back soon enough anyway.
1897 	 */
1898 	if (dmfep->chip_state != CHIP_RUNNING)
1899 		return;
1900 	if (mutex_tryenter(dmfep->oplock) == 0)
1901 		return;
1902 
1903 	/*
1904 	 * Recheck chip state (it might have been stopped since we
1905 	 * checked above).  If still running, call each of the *tick*
1906 	 * tasks.  They will check for link change, TX stall, etc ...
1907 	 */
1908 	if (dmfep->chip_state == CHIP_RUNNING) {
1909 		istat = dmfe_chip_get32(dmfep, STATUS_REG);
1910 		gpsr = dmfe_chip_get32(dmfep, PHY_STATUS_REG);
1911 		dmfe_tick_link_check(dmfep, gpsr, istat);
1912 		dmfe_tick_stall_check(dmfep, gpsr, istat);
1913 	}
1914 
1915 	DRV_KS_INC(dmfep, KS_CYCLIC_RUN);
1916 	mutex_exit(dmfep->oplock);
1917 }
1918 
1919 #undef	DMFE_DBG
1920 
1921 
1922 /*
1923  * ========== Hardware interrupt handler ==========
1924  */
1925 
1926 #define	DMFE_DBG	DMFE_DBG_INT	/* debug flag for this code	*/
1927 
1928 /*
1929  *	dmfe_interrupt() -- handle chip interrupts
1930  */
1931 static uint_t
1932 dmfe_interrupt(caddr_t arg)
1933 {
1934 	dmfe_t *dmfep;			/* private device info */
1935 	uint32_t interrupts;
1936 	uint32_t istat;
1937 	const char *msg;
1938 	mblk_t *mp;
1939 	boolean_t warning_msg = B_TRUE;
1940 
1941 	dmfep = (void *)arg;
1942 
1943 	/*
1944 	 * A quick check as to whether the interrupt was from this
1945 	 * device, before we even finish setting up all our local
1946 	 * variables.  Note that reading the interrupt status register
1947 	 * doesn't have any unpleasant side effects such as clearing
1948 	 * the bits read, so it's quite OK to re-read it once we have
1949 	 * determined that we are going to service this interrupt and
1950 	 * grabbed the mutexen.
1951 	 */
1952 	istat = dmfe_chip_get32(dmfep, STATUS_REG);
1953 	if ((istat & (NORMAL_SUMMARY_INT | ABNORMAL_SUMMARY_INT)) == 0)
1954 		return (DDI_INTR_UNCLAIMED);
1955 
1956 	/*
1957 	 * Unfortunately, there can be a race condition between attach()
1958 	 * adding the interrupt handler and initialising the mutexen,
1959 	 * and the handler itself being called because of a pending
1960 	 * interrupt.  So, we check <imask>; if it shows that interrupts
1961 	 * haven't yet been enabled (and therefore we shouldn't really
1962 	 * be here at all), we will just write back the value read from
1963 	 * the status register, thus acknowledging (and clearing) *all*
1964 	 * pending conditions without really servicing them, and claim
1965 	 * the interrupt.
1966 	 */
1967 	if (dmfep->imask == 0) {
1968 		DMFE_DEBUG(("dmfe_interrupt: early interrupt 0x%x", istat));
1969 		dmfe_chip_put32(dmfep, STATUS_REG, istat);
1970 		return (DDI_INTR_CLAIMED);
1971 	}
1972 
1973 	/*
1974 	 * We're committed to servicing this interrupt, but we
1975 	 * need to get the lock before going any further ...
1976 	 */
1977 	mutex_enter(dmfep->oplock);
1978 	DRV_KS_INC(dmfep, KS_INTERRUPT);
1979 
1980 	/*
1981 	 * Identify bits that represent enabled interrupts ...
1982 	 */
1983 	istat |= dmfe_chip_get32(dmfep, STATUS_REG);
1984 	interrupts = istat & dmfep->imask;
1985 	ASSERT(interrupts != 0);
1986 
1987 	DMFE_DEBUG(("dmfe_interrupt: istat 0x%x -> 0x%x", istat, interrupts));
1988 
1989 	/*
1990 	 * Check for any interrupts other than TX/RX done.
1991 	 * If there are any, they are considered Abnormal
1992 	 * and will cause the chip to be reset.
1993 	 */
1994 	if (interrupts & ~(RX_PKTDONE_INT | TX_PKTDONE_INT)) {
1995 		if (istat & ABNORMAL_SUMMARY_INT) {
1996 			/*
1997 			 * Any Abnormal interrupts will lead to us
1998 			 * resetting the chip, so we don't bother
1999 			 * to clear each interrupt individually.
2000 			 *
2001 			 * Our main task here is to identify the problem,
2002 			 * by pointing out the most significant unexpected
2003 			 * bit.  Additional bits may well be consequences
2004 			 * of the first problem, so we consider the possible
2005 			 * causes in order of severity.
2006 			 */
2007 			if (interrupts & SYSTEM_ERR_INT) {
2008 				switch (istat & SYSTEM_ERR_BITS) {
2009 				case SYSTEM_ERR_M_ABORT:
2010 					msg = "Bus Master Abort";
2011 					break;
2012 
2013 				case SYSTEM_ERR_T_ABORT:
2014 					msg = "Bus Target Abort";
2015 					break;
2016 
2017 				case SYSTEM_ERR_PARITY:
2018 					msg = "Parity Error";
2019 					break;
2020 
2021 				default:
2022 					msg = "Unknown System Bus Error";
2023 					break;
2024 				}
2025 			} else if (interrupts & RX_STOPPED_INT) {
2026 				msg = "RX process stopped";
2027 			} else if (interrupts & RX_UNAVAIL_INT) {
2028 				msg = "RX buffer unavailable";
2029 				warning_msg = B_FALSE;
2030 			} else if (interrupts & RX_WATCHDOG_INT) {
2031 				msg = "RX watchdog timeout?";
2032 			} else if (interrupts & RX_EARLY_INT) {
2033 				msg = "RX early interrupt?";
2034 			} else if (interrupts & TX_STOPPED_INT) {
2035 				msg = "TX process stopped";
2036 			} else if (interrupts & TX_JABBER_INT) {
2037 				msg = "TX jabber timeout";
2038 			} else if (interrupts & TX_UNDERFLOW_INT) {
2039 				msg = "TX underflow?";
2040 			} else if (interrupts & TX_EARLY_INT) {
2041 				msg = "TX early interrupt?";
2042 
2043 			} else if (interrupts & LINK_STATUS_INT) {
2044 				msg = "Link status change?";
2045 			} else if (interrupts & GP_TIMER_INT) {
2046 				msg = "Timer expired?";
2047 			}
2048 
2049 			if (warning_msg)
2050 				dmfe_warning(dmfep, "abnormal interrupt, "
2051 				    "status 0x%x: %s", istat, msg);
2052 
2053 			/*
2054 			 * We don't want to run the entire reinitialisation
2055 			 * code out of this (high-level?) interrupt, so we
2056 			 * simply STOP the chip, and wake up the factotum
2057 			 * to reinitalise it ...
2058 			 */
2059 			dmfe_stop_chip(dmfep, CHIP_ERROR);
2060 			dmfe_wake_factotum(dmfep, KS_CHIP_ERROR,
2061 			    "interrupt (error)");
2062 		} else {
2063 			/*
2064 			 * We shouldn't really get here (it would mean
2065 			 * there were some unprocessed enabled bits but
2066 			 * they weren't Abnormal?), but we'll check just
2067 			 * in case ...
2068 			 */
2069 			DMFE_DEBUG(("unexpected interrupt bits: 0x%x", istat));
2070 		}
2071 	}
2072 
2073 	/*
2074 	 * Acknowledge all the original bits - except in the case of an
2075 	 * error, when we leave them unacknowledged so that the recovery
2076 	 * code can see what was going on when the problem occurred ...
2077 	 */
2078 	if (dmfep->chip_state != CHIP_ERROR) {
2079 		(void) dmfe_chip_put32(dmfep, STATUS_REG, istat);
2080 		/*
2081 		 * Read-after-write forces completion on PCI bus.
2082 		 *
2083 		 */
2084 		(void) dmfe_chip_get32(dmfep, STATUS_REG);
2085 	}
2086 
2087 
2088 	/*
2089 	 * We've finished talking to the chip, so we can drop <oplock>
2090 	 * before handling the normal interrupts, which only involve
2091 	 * manipulation of descriptors ...
2092 	 */
2093 	mutex_exit(dmfep->oplock);
2094 
2095 	if (interrupts & RX_PKTDONE_INT)
2096 		if ((mp = dmfe_getp(dmfep)) != NULL)
2097 			mac_rx(dmfep->mh, NULL, mp);
2098 
2099 	if (interrupts & TX_PKTDONE_INT) {
2100 		/*
2101 		 * The only reason for taking this interrupt is to give
2102 		 * MAC a chance to schedule queued packets after a
2103 		 * ring-full condition.  To minimise the number of
2104 		 * redundant TX-Done interrupts, we only mark two of the
2105 		 * ring descriptors as 'interrupt-on-complete' - all the
2106 		 * others are simply handed back without an interrupt.
2107 		 */
2108 		if (dmfe_reclaim_on_done && mutex_tryenter(dmfep->txlock)) {
2109 			(void) dmfe_reclaim_tx_desc(dmfep);
2110 			mutex_exit(dmfep->txlock);
2111 		}
2112 		mac_tx_update(dmfep->mh);
2113 	}
2114 
2115 	return (DDI_INTR_CLAIMED);
2116 }
2117 
2118 #undef	DMFE_DBG
2119 
2120 
2121 /*
2122  * ========== Statistics update handler ==========
2123  */
2124 
2125 #define	DMFE_DBG	DMFE_DBG_STATS	/* debug flag for this code	*/
2126 
2127 static int
2128 dmfe_m_stat(void *arg, uint_t stat, uint64_t *val)
2129 {
2130 	dmfe_t *dmfep = arg;
2131 	int rv = 0;
2132 
2133 	mutex_enter(dmfep->milock);
2134 	mutex_enter(dmfep->oplock);
2135 	mutex_enter(dmfep->rxlock);
2136 	mutex_enter(dmfep->txlock);
2137 
2138 	/* make sure we have all the stats collected */
2139 	(void) dmfe_reclaim_tx_desc(dmfep);
2140 
2141 	switch (stat) {
2142 	case MAC_STAT_IFSPEED:
2143 		*val = dmfep->op_stats_speed;
2144 		break;
2145 
2146 	case MAC_STAT_IPACKETS:
2147 		*val = dmfep->rx_stats_ipackets;
2148 		break;
2149 
2150 	case MAC_STAT_MULTIRCV:
2151 		*val = dmfep->rx_stats_multi;
2152 		break;
2153 
2154 	case MAC_STAT_BRDCSTRCV:
2155 		*val = dmfep->rx_stats_bcast;
2156 		break;
2157 
2158 	case MAC_STAT_RBYTES:
2159 		*val = dmfep->rx_stats_rbytes;
2160 		break;
2161 
2162 	case MAC_STAT_IERRORS:
2163 		*val = dmfep->rx_stats_ierrors;
2164 		break;
2165 
2166 	case MAC_STAT_NORCVBUF:
2167 		*val = dmfep->rx_stats_norcvbuf;
2168 		break;
2169 
2170 	case MAC_STAT_COLLISIONS:
2171 		*val = dmfep->tx_stats_collisions;
2172 		break;
2173 
2174 	case MAC_STAT_OERRORS:
2175 		*val = dmfep->tx_stats_oerrors;
2176 		break;
2177 
2178 	case MAC_STAT_OPACKETS:
2179 		*val = dmfep->tx_stats_opackets;
2180 		break;
2181 
2182 	case MAC_STAT_MULTIXMT:
2183 		*val = dmfep->tx_stats_multi;
2184 		break;
2185 
2186 	case MAC_STAT_BRDCSTXMT:
2187 		*val = dmfep->tx_stats_bcast;
2188 		break;
2189 
2190 	case MAC_STAT_OBYTES:
2191 		*val = dmfep->tx_stats_obytes;
2192 		break;
2193 
2194 	case MAC_STAT_OVERFLOWS:
2195 		*val = dmfep->rx_stats_overflow;
2196 		break;
2197 
2198 	case MAC_STAT_UNDERFLOWS:
2199 		*val = dmfep->tx_stats_underflow;
2200 		break;
2201 
2202 	case ETHER_STAT_ALIGN_ERRORS:
2203 		*val = dmfep->rx_stats_align;
2204 		break;
2205 
2206 	case ETHER_STAT_FCS_ERRORS:
2207 		*val = dmfep->rx_stats_fcs;
2208 		break;
2209 
2210 	case ETHER_STAT_TOOLONG_ERRORS:
2211 		*val = dmfep->rx_stats_toolong;
2212 		break;
2213 
2214 	case ETHER_STAT_TOOSHORT_ERRORS:
2215 		*val = dmfep->rx_stats_short;
2216 		break;
2217 
2218 	case ETHER_STAT_MACRCV_ERRORS:
2219 		*val = dmfep->rx_stats_macrcv_errors;
2220 		break;
2221 
2222 	case ETHER_STAT_MACXMT_ERRORS:
2223 		*val = dmfep->tx_stats_macxmt_errors;
2224 		break;
2225 
2226 	case ETHER_STAT_JABBER_ERRORS:
2227 		*val = dmfep->tx_stats_jabber;
2228 		break;
2229 
2230 	case ETHER_STAT_CARRIER_ERRORS:
2231 		*val = dmfep->tx_stats_nocarrier;
2232 		break;
2233 
2234 	case ETHER_STAT_TX_LATE_COLLISIONS:
2235 		*val = dmfep->tx_stats_xmtlatecoll;
2236 		break;
2237 
2238 	case ETHER_STAT_EX_COLLISIONS:
2239 		*val = dmfep->tx_stats_excoll;
2240 		break;
2241 
2242 	case ETHER_STAT_DEFER_XMTS:
2243 		*val = dmfep->tx_stats_defer;
2244 		break;
2245 
2246 	case ETHER_STAT_FIRST_COLLISIONS:
2247 		*val = dmfep->tx_stats_first_coll;
2248 		break;
2249 
2250 	case ETHER_STAT_MULTI_COLLISIONS:
2251 		*val = dmfep->tx_stats_multi_coll;
2252 		break;
2253 
2254 	case ETHER_STAT_XCVR_INUSE:
2255 		*val = dmfep->phy_inuse;
2256 		break;
2257 
2258 	case ETHER_STAT_XCVR_ID:
2259 		*val = dmfep->phy_id;
2260 		break;
2261 
2262 	case ETHER_STAT_XCVR_ADDR:
2263 		*val = dmfep->phy_addr;
2264 		break;
2265 
2266 	case ETHER_STAT_LINK_DUPLEX:
2267 		*val = dmfep->op_stats_duplex;
2268 		break;
2269 
2270 	case ETHER_STAT_CAP_100T4:
2271 		*val = dmfep->param_bmsr_100T4;
2272 		break;
2273 
2274 	case ETHER_STAT_CAP_100FDX:
2275 		*val = dmfep->param_bmsr_100fdx;
2276 		break;
2277 
2278 	case ETHER_STAT_CAP_100HDX:
2279 		*val = dmfep->param_bmsr_100hdx;
2280 		break;
2281 
2282 	case ETHER_STAT_CAP_10FDX:
2283 		*val = dmfep->param_bmsr_10fdx;
2284 		break;
2285 
2286 	case ETHER_STAT_CAP_10HDX:
2287 		*val = dmfep->param_bmsr_10hdx;
2288 		break;
2289 
2290 	case ETHER_STAT_CAP_AUTONEG:
2291 		*val = dmfep->param_bmsr_autoneg;
2292 		break;
2293 
2294 	case ETHER_STAT_CAP_REMFAULT:
2295 		*val = dmfep->param_bmsr_remfault;
2296 		break;
2297 
2298 	case ETHER_STAT_ADV_CAP_AUTONEG:
2299 		*val = dmfep->param_autoneg;
2300 		break;
2301 
2302 	case ETHER_STAT_ADV_CAP_100T4:
2303 		*val = dmfep->param_anar_100T4;
2304 		break;
2305 
2306 	case ETHER_STAT_ADV_CAP_100FDX:
2307 		*val = dmfep->param_anar_100fdx;
2308 		break;
2309 
2310 	case ETHER_STAT_ADV_CAP_100HDX:
2311 		*val = dmfep->param_anar_100hdx;
2312 		break;
2313 
2314 	case ETHER_STAT_ADV_CAP_10FDX:
2315 		*val = dmfep->param_anar_10fdx;
2316 		break;
2317 
2318 	case ETHER_STAT_ADV_CAP_10HDX:
2319 		*val = dmfep->param_anar_10hdx;
2320 		break;
2321 
2322 	case ETHER_STAT_ADV_REMFAULT:
2323 		*val = dmfep->param_anar_remfault;
2324 		break;
2325 
2326 	case ETHER_STAT_LP_CAP_AUTONEG:
2327 		*val = dmfep->param_lp_autoneg;
2328 		break;
2329 
2330 	case ETHER_STAT_LP_CAP_100T4:
2331 		*val = dmfep->param_lp_100T4;
2332 		break;
2333 
2334 	case ETHER_STAT_LP_CAP_100FDX:
2335 		*val = dmfep->param_lp_100fdx;
2336 		break;
2337 
2338 	case ETHER_STAT_LP_CAP_100HDX:
2339 		*val = dmfep->param_lp_100hdx;
2340 		break;
2341 
2342 	case ETHER_STAT_LP_CAP_10FDX:
2343 		*val = dmfep->param_lp_10fdx;
2344 		break;
2345 
2346 	case ETHER_STAT_LP_CAP_10HDX:
2347 		*val = dmfep->param_lp_10hdx;
2348 		break;
2349 
2350 	case ETHER_STAT_LP_REMFAULT:
2351 		*val = dmfep->param_lp_remfault;
2352 		break;
2353 
2354 	default:
2355 		rv = ENOTSUP;
2356 	}
2357 
2358 	mutex_exit(dmfep->txlock);
2359 	mutex_exit(dmfep->rxlock);
2360 	mutex_exit(dmfep->oplock);
2361 	mutex_exit(dmfep->milock);
2362 
2363 	return (rv);
2364 }
2365 
2366 #undef	DMFE_DBG
2367 
2368 
2369 /*
2370  * ========== Ioctl handler & subfunctions ==========
2371  */
2372 
2373 #define	DMFE_DBG	DMFE_DBG_IOCTL	/* debug flag for this code	*/
2374 
2375 /*
2376  * Loopback operation
2377  *
2378  * Support access to the internal loopback and external loopback
2379  * functions selected via the Operation Mode Register (OPR).
2380  * These will be used by netlbtest (see BugId 4370609)
2381  *
2382  * Note that changing the loopback mode causes a stop/restart cycle
2383  *
2384  * It would be nice to evolve this to support the ioctls in sys/netlb.h,
2385  * but then it would be even better to use Brussels to configure this.
2386  */
2387 static enum ioc_reply
2388 dmfe_loop_ioctl(dmfe_t *dmfep, queue_t *wq, mblk_t *mp, int cmd)
2389 {
2390 	loopback_t *loop_req_p;
2391 	uint32_t loopmode;
2392 
2393 	if (mp->b_cont == NULL || MBLKL(mp->b_cont) < sizeof (loopback_t))
2394 		return (IOC_INVAL);
2395 
2396 	loop_req_p = (void *)mp->b_cont->b_rptr;
2397 
2398 	switch (cmd) {
2399 	default:
2400 		/*
2401 		 * This should never happen ...
2402 		 */
2403 		dmfe_error(dmfep, "dmfe_loop_ioctl: invalid cmd 0x%x", cmd);
2404 		return (IOC_INVAL);
2405 
2406 	case DMFE_GET_LOOP_MODE:
2407 		/*
2408 		 * This doesn't return the current loopback mode - it
2409 		 * returns a bitmask :-( of all possible loopback modes
2410 		 */
2411 		DMFE_DEBUG(("dmfe_loop_ioctl: GET_LOOP_MODE"));
2412 		loop_req_p->loopback = DMFE_LOOPBACK_MODES;
2413 		miocack(wq, mp, sizeof (loopback_t), 0);
2414 		return (IOC_DONE);
2415 
2416 	case DMFE_SET_LOOP_MODE:
2417 		/*
2418 		 * Select any of the various loopback modes
2419 		 */
2420 		DMFE_DEBUG(("dmfe_loop_ioctl: SET_LOOP_MODE %d",
2421 		    loop_req_p->loopback));
2422 		switch (loop_req_p->loopback) {
2423 		default:
2424 			return (IOC_INVAL);
2425 
2426 		case DMFE_LOOPBACK_OFF:
2427 			loopmode = LOOPBACK_OFF;
2428 			break;
2429 
2430 		case DMFE_PHY_A_LOOPBACK_ON:
2431 			loopmode = LOOPBACK_PHY_A;
2432 			break;
2433 
2434 		case DMFE_PHY_D_LOOPBACK_ON:
2435 			loopmode = LOOPBACK_PHY_D;
2436 			break;
2437 
2438 		case DMFE_INT_LOOPBACK_ON:
2439 			loopmode = LOOPBACK_INTERNAL;
2440 			break;
2441 		}
2442 
2443 		if ((dmfep->opmode & LOOPBACK_MODE_MASK) != loopmode) {
2444 			dmfep->opmode &= ~LOOPBACK_MODE_MASK;
2445 			dmfep->opmode |= loopmode;
2446 			return (IOC_RESTART_ACK);
2447 		}
2448 
2449 		return (IOC_ACK);
2450 	}
2451 }
2452 
2453 /*
2454  * Specific dmfe IOCTLs, the mac module handles the generic ones.
2455  */
2456 static void
2457 dmfe_m_ioctl(void *arg, queue_t *wq, mblk_t *mp)
2458 {
2459 	dmfe_t *dmfep = arg;
2460 	struct iocblk *iocp;
2461 	enum ioc_reply status;
2462 	int cmd;
2463 
2464 	/*
2465 	 * Validate the command before bothering with the mutexen ...
2466 	 */
2467 	iocp = (void *)mp->b_rptr;
2468 	cmd = iocp->ioc_cmd;
2469 	switch (cmd) {
2470 	default:
2471 		DMFE_DEBUG(("dmfe_m_ioctl: unknown cmd 0x%x", cmd));
2472 		miocnak(wq, mp, 0, EINVAL);
2473 		return;
2474 
2475 	case DMFE_SET_LOOP_MODE:
2476 	case DMFE_GET_LOOP_MODE:
2477 	case ND_GET:
2478 	case ND_SET:
2479 		break;
2480 	}
2481 
2482 	mutex_enter(dmfep->milock);
2483 	mutex_enter(dmfep->oplock);
2484 
2485 	switch (cmd) {
2486 	default:
2487 		_NOTE(NOTREACHED)
2488 		status = IOC_INVAL;
2489 		break;
2490 
2491 	case DMFE_SET_LOOP_MODE:
2492 	case DMFE_GET_LOOP_MODE:
2493 		status = dmfe_loop_ioctl(dmfep, wq, mp, cmd);
2494 		break;
2495 
2496 	case ND_GET:
2497 	case ND_SET:
2498 		status = dmfe_nd_ioctl(dmfep, wq, mp, cmd);
2499 		break;
2500 	}
2501 
2502 	/*
2503 	 * Do we need to restart?
2504 	 */
2505 	switch (status) {
2506 	default:
2507 		break;
2508 
2509 	case IOC_RESTART_ACK:
2510 	case IOC_RESTART:
2511 		/*
2512 		 * PHY parameters changed; we need to stop, update the
2513 		 * PHY layer and restart before sending the reply or ACK
2514 		 */
2515 		dmfe_stop(dmfep);
2516 		dmfe_update_phy(dmfep);
2517 		dmfep->update_phy = B_FALSE;
2518 
2519 		/*
2520 		 * The link will now most likely go DOWN and UP, because
2521 		 * we've changed the loopback state or the link parameters
2522 		 * or autonegotiation.  So we have to check that it's
2523 		 * settled down before we restart the TX/RX processes.
2524 		 * The ioctl code will have planted some reason strings
2525 		 * to explain what's happening, so the link state change
2526 		 * messages won't be printed on the console . We wake the
2527 		 * factotum to deal with link notifications, if any ...
2528 		 */
2529 		if (dmfe_check_link(dmfep)) {
2530 			dmfe_recheck_link(dmfep, B_TRUE);
2531 			dmfe_wake_factotum(dmfep, KS_LINK_CHECK, "ioctl");
2532 		}
2533 
2534 		if (dmfep->mac_state == DMFE_MAC_STARTED)
2535 			dmfe_start(dmfep);
2536 		break;
2537 	}
2538 
2539 	/*
2540 	 * The 'reasons-for-link-change', if any, don't apply any more
2541 	 */
2542 	mutex_exit(dmfep->oplock);
2543 	mutex_exit(dmfep->milock);
2544 
2545 	/*
2546 	 * Finally, decide how to reply
2547 	 */
2548 	switch (status) {
2549 	default:
2550 		/*
2551 		 * Error, reply with a NAK and EINVAL
2552 		 */
2553 		miocnak(wq, mp, 0, EINVAL);
2554 		break;
2555 
2556 	case IOC_RESTART_ACK:
2557 	case IOC_ACK:
2558 		/*
2559 		 * OK, reply with an ACK
2560 		 */
2561 		miocack(wq, mp, 0, 0);
2562 		break;
2563 
2564 	case IOC_RESTART:
2565 	case IOC_REPLY:
2566 		/*
2567 		 * OK, send prepared reply
2568 		 */
2569 		qreply(wq, mp);
2570 		break;
2571 
2572 	case IOC_DONE:
2573 		/*
2574 		 * OK, reply already sent
2575 		 */
2576 		break;
2577 	}
2578 }
2579 
2580 #undef	DMFE_DBG
2581 
2582 
2583 /*
2584  * ========== Per-instance setup/teardown code ==========
2585  */
2586 
2587 #define	DMFE_DBG	DMFE_DBG_INIT	/* debug flag for this code	*/
2588 
2589 /*
2590  * Determine local MAC address & broadcast address for this interface
2591  */
2592 static void
2593 dmfe_find_mac_address(dmfe_t *dmfep)
2594 {
2595 	uchar_t *prop;
2596 	uint_t propsize;
2597 	int err;
2598 
2599 	/*
2600 	 * We have to find the "vendor's factory-set address".  This is
2601 	 * the value of the property "local-mac-address", as set by OBP
2602 	 * (or a .conf file!)
2603 	 *
2604 	 * If the property is not there, then we try to find the factory
2605 	 * mac address from the devices serial EEPROM.
2606 	 */
2607 	bzero(dmfep->curr_addr, sizeof (dmfep->curr_addr));
2608 	err = ddi_prop_lookup_byte_array(DDI_DEV_T_ANY, dmfep->devinfo,
2609 	    DDI_PROP_DONTPASS, localmac_propname, &prop, &propsize);
2610 	if (err == DDI_PROP_SUCCESS) {
2611 		if (propsize == ETHERADDRL)
2612 			ethaddr_copy(prop, dmfep->curr_addr);
2613 		ddi_prop_free(prop);
2614 	} else {
2615 		/* no property set... check eeprom */
2616 		dmfe_read_eeprom(dmfep, EEPROM_EN_ADDR, dmfep->curr_addr,
2617 		    ETHERADDRL);
2618 	}
2619 
2620 	DMFE_DEBUG(("dmfe_setup_mac_address: factory %s",
2621 	    ether_sprintf((void *)dmfep->curr_addr)));
2622 }
2623 
2624 static int
2625 dmfe_alloc_dma_mem(dmfe_t *dmfep, size_t memsize,
2626 	size_t setup, size_t slop, ddi_device_acc_attr_t *attr_p,
2627 	uint_t dma_flags, dma_area_t *dma_p)
2628 {
2629 	ddi_dma_cookie_t dma_cookie;
2630 	uint_t ncookies;
2631 	int err;
2632 
2633 	/*
2634 	 * Allocate handle
2635 	 */
2636 	err = ddi_dma_alloc_handle(dmfep->devinfo, &dma_attr,
2637 	    DDI_DMA_SLEEP, NULL, &dma_p->dma_hdl);
2638 	if (err != DDI_SUCCESS)
2639 		return (DDI_FAILURE);
2640 
2641 	/*
2642 	 * Allocate memory
2643 	 */
2644 	err = ddi_dma_mem_alloc(dma_p->dma_hdl, memsize + setup + slop,
2645 	    attr_p, dma_flags & (DDI_DMA_CONSISTENT | DDI_DMA_STREAMING),
2646 	    DDI_DMA_SLEEP, NULL,
2647 	    &dma_p->mem_va, &dma_p->alength, &dma_p->acc_hdl);
2648 	if (err != DDI_SUCCESS)
2649 		return (DDI_FAILURE);
2650 
2651 	/*
2652 	 * Bind the two together
2653 	 */
2654 	err = ddi_dma_addr_bind_handle(dma_p->dma_hdl, NULL,
2655 	    dma_p->mem_va, dma_p->alength, dma_flags,
2656 	    DDI_DMA_SLEEP, NULL, &dma_cookie, &ncookies);
2657 	if (err != DDI_DMA_MAPPED)
2658 		return (DDI_FAILURE);
2659 	if ((dma_p->ncookies = ncookies) != 1)
2660 		return (DDI_FAILURE);
2661 
2662 	dma_p->mem_dvma = dma_cookie.dmac_address;
2663 	if (setup > 0) {
2664 		dma_p->setup_dvma = dma_p->mem_dvma + memsize;
2665 		dma_p->setup_va = dma_p->mem_va + memsize;
2666 	} else {
2667 		dma_p->setup_dvma = 0;
2668 		dma_p->setup_va = NULL;
2669 	}
2670 
2671 	return (DDI_SUCCESS);
2672 }
2673 
2674 /*
2675  * This function allocates the transmit and receive buffers and descriptors.
2676  */
2677 static int
2678 dmfe_alloc_bufs(dmfe_t *dmfep)
2679 {
2680 	size_t memsize;
2681 	int err;
2682 
2683 	/*
2684 	 * Allocate memory & handles for TX descriptor ring
2685 	 */
2686 	memsize = dmfep->tx.n_desc * sizeof (struct tx_desc_type);
2687 	err = dmfe_alloc_dma_mem(dmfep, memsize, SETUPBUF_SIZE, DMFE_SLOP,
2688 	    &dmfe_reg_accattr, DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
2689 	    &dmfep->tx_desc);
2690 	if (err != DDI_SUCCESS)
2691 		return (DDI_FAILURE);
2692 
2693 	/*
2694 	 * Allocate memory & handles for TX buffers
2695 	 */
2696 	memsize = dmfep->tx.n_desc * DMFE_BUF_SIZE;
2697 	err = dmfe_alloc_dma_mem(dmfep, memsize, 0, 0,
2698 	    &dmfe_data_accattr, DDI_DMA_WRITE | DMFE_DMA_MODE,
2699 	    &dmfep->tx_buff);
2700 	if (err != DDI_SUCCESS)
2701 		return (DDI_FAILURE);
2702 
2703 	/*
2704 	 * Allocate memory & handles for RX descriptor ring
2705 	 */
2706 	memsize = dmfep->rx.n_desc * sizeof (struct rx_desc_type);
2707 	err = dmfe_alloc_dma_mem(dmfep, memsize, 0, DMFE_SLOP,
2708 	    &dmfe_reg_accattr, DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
2709 	    &dmfep->rx_desc);
2710 	if (err != DDI_SUCCESS)
2711 		return (DDI_FAILURE);
2712 
2713 	/*
2714 	 * Allocate memory & handles for RX buffers
2715 	 */
2716 	memsize = dmfep->rx.n_desc * DMFE_BUF_SIZE;
2717 	err = dmfe_alloc_dma_mem(dmfep, memsize, 0, 0,
2718 	    &dmfe_data_accattr, DDI_DMA_READ | DMFE_DMA_MODE, &dmfep->rx_buff);
2719 	if (err != DDI_SUCCESS)
2720 		return (DDI_FAILURE);
2721 
2722 	/*
2723 	 * Allocate bitmasks for tx packet type tracking
2724 	 */
2725 	dmfep->tx_mcast = kmem_zalloc(dmfep->tx.n_desc / NBBY, KM_SLEEP);
2726 	dmfep->tx_bcast = kmem_zalloc(dmfep->tx.n_desc / NBBY, KM_SLEEP);
2727 
2728 	return (DDI_SUCCESS);
2729 }
2730 
2731 static void
2732 dmfe_free_dma_mem(dma_area_t *dma_p)
2733 {
2734 	if (dma_p->dma_hdl != NULL) {
2735 		if (dma_p->ncookies) {
2736 			(void) ddi_dma_unbind_handle(dma_p->dma_hdl);
2737 			dma_p->ncookies = 0;
2738 		}
2739 		ddi_dma_free_handle(&dma_p->dma_hdl);
2740 		dma_p->dma_hdl = NULL;
2741 		dma_p->mem_dvma = 0;
2742 		dma_p->setup_dvma = 0;
2743 	}
2744 
2745 	if (dma_p->acc_hdl != NULL) {
2746 		ddi_dma_mem_free(&dma_p->acc_hdl);
2747 		dma_p->acc_hdl = NULL;
2748 		dma_p->mem_va = NULL;
2749 		dma_p->setup_va = NULL;
2750 	}
2751 }
2752 
2753 /*
2754  * This routine frees the transmit and receive buffers and descriptors.
2755  * Make sure the chip is stopped before calling it!
2756  */
2757 static void
2758 dmfe_free_bufs(dmfe_t *dmfep)
2759 {
2760 	dmfe_free_dma_mem(&dmfep->rx_buff);
2761 	dmfe_free_dma_mem(&dmfep->rx_desc);
2762 	dmfe_free_dma_mem(&dmfep->tx_buff);
2763 	dmfe_free_dma_mem(&dmfep->tx_desc);
2764 	kmem_free(dmfep->tx_mcast, dmfep->tx.n_desc / NBBY);
2765 	kmem_free(dmfep->tx_bcast, dmfep->tx.n_desc / NBBY);
2766 }
2767 
2768 static void
2769 dmfe_unattach(dmfe_t *dmfep)
2770 {
2771 	/*
2772 	 * Clean up and free all DMFE data structures
2773 	 */
2774 	if (dmfep->cycid != NULL) {
2775 		ddi_periodic_delete(dmfep->cycid);
2776 		dmfep->cycid = NULL;
2777 	}
2778 
2779 	if (dmfep->ksp_drv != NULL)
2780 		kstat_delete(dmfep->ksp_drv);
2781 	if (dmfep->progress & PROGRESS_HWINT) {
2782 		ddi_remove_intr(dmfep->devinfo, 0, dmfep->iblk);
2783 		mutex_destroy(dmfep->txlock);
2784 		mutex_destroy(dmfep->rxlock);
2785 		mutex_destroy(dmfep->oplock);
2786 	}
2787 	if (dmfep->progress & PROGRESS_SOFTINT)
2788 		ddi_remove_softintr(dmfep->factotum_id);
2789 	if (dmfep->progress & PROGRESS_BUFS)
2790 		dmfe_free_bufs(dmfep);
2791 	if (dmfep->progress & PROGRESS_REGS)
2792 		ddi_regs_map_free(&dmfep->io_handle);
2793 	if (dmfep->progress & PROGRESS_NDD)
2794 		dmfe_nd_cleanup(dmfep);
2795 
2796 	kmem_free(dmfep, sizeof (*dmfep));
2797 }
2798 
2799 static int
2800 dmfe_config_init(dmfe_t *dmfep, chip_id_t *idp)
2801 {
2802 	ddi_acc_handle_t handle;
2803 	uint32_t regval;
2804 
2805 	if (pci_config_setup(dmfep->devinfo, &handle) != DDI_SUCCESS)
2806 		return (DDI_FAILURE);
2807 
2808 	/*
2809 	 * Get vendor/device/revision.  We expect (but don't check) that
2810 	 * (vendorid == DAVICOM_VENDOR_ID) && (deviceid == DEVICE_ID_9102)
2811 	 */
2812 	idp->vendor = pci_config_get16(handle, PCI_CONF_VENID);
2813 	idp->device = pci_config_get16(handle, PCI_CONF_DEVID);
2814 	idp->revision = pci_config_get8(handle, PCI_CONF_REVID);
2815 
2816 	/*
2817 	 * Turn on Bus Master Enable bit and ensure the device is not asleep
2818 	 */
2819 	regval = pci_config_get32(handle, PCI_CONF_COMM);
2820 	pci_config_put32(handle, PCI_CONF_COMM, (regval | PCI_COMM_ME));
2821 
2822 	regval = pci_config_get32(handle, PCI_DMFE_CONF_CFDD);
2823 	pci_config_put32(handle, PCI_DMFE_CONF_CFDD,
2824 	    regval & ~(CFDD_SLEEP | CFDD_SNOOZE));
2825 
2826 	pci_config_teardown(&handle);
2827 	return (DDI_SUCCESS);
2828 }
2829 
2830 struct ks_index {
2831 	int index;
2832 	char *name;
2833 };
2834 
2835 static const struct ks_index ks_drv_names[] = {
2836 	{	KS_INTERRUPT,			"intr"			},
2837 	{	KS_CYCLIC_RUN,			"cyclic_run"		},
2838 
2839 	{	KS_TICK_LINK_STATE,		"link_state_change"	},
2840 	{	KS_TICK_LINK_POLL,		"link_state_poll"	},
2841 	{	KS_TX_STALL,			"tx_stall_detect"	},
2842 	{	KS_CHIP_ERROR,			"chip_error_interrupt"	},
2843 
2844 	{	KS_FACTOTUM_RUN,		"factotum_run"		},
2845 	{	KS_RECOVERY,			"factotum_recover"	},
2846 	{	KS_LINK_CHECK,			"factotum_link_check"	},
2847 
2848 	{	KS_LINK_UP_CNT,			"link_up_cnt"		},
2849 	{	KS_LINK_DROP_CNT,		"link_drop_cnt"		},
2850 
2851 	{	KS_MIIREG_BMSR,			"mii_status"		},
2852 	{	KS_MIIREG_ANAR,			"mii_advert_cap"	},
2853 	{	KS_MIIREG_ANLPAR,		"mii_partner_cap"	},
2854 	{	KS_MIIREG_ANER,			"mii_expansion_cap"	},
2855 	{	KS_MIIREG_DSCSR,		"mii_dscsr"		},
2856 
2857 	{	-1,				NULL			}
2858 };
2859 
2860 static void
2861 dmfe_init_kstats(dmfe_t *dmfep, int instance)
2862 {
2863 	kstat_t *ksp;
2864 	kstat_named_t *knp;
2865 	const struct ks_index *ksip;
2866 
2867 	/* no need to create MII stats, the mac module already does it */
2868 
2869 	/* Create and initialise driver-defined kstats */
2870 	ksp = kstat_create(DRIVER_NAME, instance, "dmfe_events", "net",
2871 	    KSTAT_TYPE_NAMED, KS_DRV_COUNT, KSTAT_FLAG_PERSISTENT);
2872 	if (ksp != NULL) {
2873 		for (knp = ksp->ks_data, ksip = ks_drv_names;
2874 		    ksip->name != NULL; ++ksip) {
2875 			kstat_named_init(&knp[ksip->index], ksip->name,
2876 			    KSTAT_DATA_UINT64);
2877 		}
2878 		dmfep->ksp_drv = ksp;
2879 		dmfep->knp_drv = knp;
2880 		kstat_install(ksp);
2881 	} else {
2882 		dmfe_error(dmfep, "kstat_create() for dmfe_events failed");
2883 	}
2884 }
2885 
2886 static int
2887 dmfe_resume(dev_info_t *devinfo)
2888 {
2889 	dmfe_t *dmfep;				/* Our private data	*/
2890 	chip_id_t chipid;
2891 
2892 	dmfep = ddi_get_driver_private(devinfo);
2893 	if (dmfep == NULL)
2894 		return (DDI_FAILURE);
2895 
2896 	/*
2897 	 * Refuse to resume if the data structures aren't consistent
2898 	 */
2899 	if (dmfep->devinfo != devinfo)
2900 		return (DDI_FAILURE);
2901 
2902 	/*
2903 	 * Refuse to resume if the chip's changed its identity (*boggle*)
2904 	 */
2905 	if (dmfe_config_init(dmfep, &chipid) != DDI_SUCCESS)
2906 		return (DDI_FAILURE);
2907 	if (chipid.vendor != dmfep->chipid.vendor)
2908 		return (DDI_FAILURE);
2909 	if (chipid.device != dmfep->chipid.device)
2910 		return (DDI_FAILURE);
2911 	if (chipid.revision != dmfep->chipid.revision)
2912 		return (DDI_FAILURE);
2913 
2914 	/*
2915 	 * All OK, reinitialise h/w & kick off MAC scheduling
2916 	 */
2917 	mutex_enter(dmfep->oplock);
2918 	dmfe_restart(dmfep);
2919 	mutex_exit(dmfep->oplock);
2920 	mac_tx_update(dmfep->mh);
2921 	return (DDI_SUCCESS);
2922 }
2923 
2924 /*
2925  * attach(9E) -- Attach a device to the system
2926  *
2927  * Called once for each board successfully probed.
2928  */
2929 static int
2930 dmfe_attach(dev_info_t *devinfo, ddi_attach_cmd_t cmd)
2931 {
2932 	mac_register_t *macp;
2933 	dmfe_t *dmfep;				/* Our private data	*/
2934 	uint32_t csr6;
2935 	int instance;
2936 	int err;
2937 
2938 	instance = ddi_get_instance(devinfo);
2939 
2940 	switch (cmd) {
2941 	default:
2942 		return (DDI_FAILURE);
2943 
2944 	case DDI_RESUME:
2945 		return (dmfe_resume(devinfo));
2946 
2947 	case DDI_ATTACH:
2948 		break;
2949 	}
2950 
2951 	dmfep = kmem_zalloc(sizeof (*dmfep), KM_SLEEP);
2952 	ddi_set_driver_private(devinfo, dmfep);
2953 	dmfep->devinfo = devinfo;
2954 	dmfep->dmfe_guard = DMFE_GUARD;
2955 
2956 	/*
2957 	 * Initialize more fields in DMFE private data
2958 	 * Determine the local MAC address
2959 	 */
2960 #if	DMFEDEBUG
2961 	dmfep->debug = ddi_prop_get_int(DDI_DEV_T_ANY, devinfo, 0,
2962 	    debug_propname, dmfe_debug);
2963 #endif	/* DMFEDEBUG */
2964 	dmfep->cycid = NULL;
2965 	(void) snprintf(dmfep->ifname, sizeof (dmfep->ifname), "dmfe%d",
2966 	    instance);
2967 
2968 	/*
2969 	 * Check for custom "opmode-reg-value" property;
2970 	 * if none, use the defaults below for CSR6 ...
2971 	 */
2972 	csr6 = TX_THRESHOLD_HI | STORE_AND_FORWARD | EXT_MII_IF | OPN_25_MB1;
2973 	dmfep->opmode = ddi_prop_get_int(DDI_DEV_T_ANY, devinfo,
2974 	    DDI_PROP_DONTPASS, opmode_propname, csr6);
2975 
2976 	/*
2977 	 * Read chip ID & set up config space command register(s)
2978 	 */
2979 	if (dmfe_config_init(dmfep, &dmfep->chipid) != DDI_SUCCESS) {
2980 		dmfe_error(dmfep, "dmfe_config_init() failed");
2981 		goto attach_fail;
2982 	}
2983 	dmfep->progress |= PROGRESS_CONFIG;
2984 
2985 	/*
2986 	 * Register NDD-tweakable parameters
2987 	 */
2988 	if (dmfe_nd_init(dmfep)) {
2989 		dmfe_error(dmfep, "dmfe_nd_init() failed");
2990 		goto attach_fail;
2991 	}
2992 	dmfep->progress |= PROGRESS_NDD;
2993 
2994 	/*
2995 	 * Map operating registers
2996 	 */
2997 	err = ddi_regs_map_setup(devinfo, DMFE_PCI_RNUMBER,
2998 	    &dmfep->io_reg, 0, 0, &dmfe_reg_accattr, &dmfep->io_handle);
2999 	if (err != DDI_SUCCESS) {
3000 		dmfe_error(dmfep, "ddi_regs_map_setup() failed");
3001 		goto attach_fail;
3002 	}
3003 	dmfep->progress |= PROGRESS_REGS;
3004 
3005 	/*
3006 	 * Get our MAC address.
3007 	 */
3008 	dmfe_find_mac_address(dmfep);
3009 
3010 	/*
3011 	 * Allocate the TX and RX descriptors/buffers.
3012 	 */
3013 	dmfep->tx.n_desc = dmfe_tx_desc;
3014 	dmfep->rx.n_desc = dmfe_rx_desc;
3015 	err = dmfe_alloc_bufs(dmfep);
3016 	if (err != DDI_SUCCESS) {
3017 		dmfe_error(dmfep, "DMA buffer allocation failed");
3018 		goto attach_fail;
3019 	}
3020 	dmfep->progress |= PROGRESS_BUFS;
3021 
3022 	/*
3023 	 * Add the softint handler
3024 	 */
3025 	dmfep->link_poll_tix = factotum_start_tix;
3026 	if (ddi_add_softintr(devinfo, DDI_SOFTINT_LOW, &dmfep->factotum_id,
3027 	    NULL, NULL, dmfe_factotum, (caddr_t)dmfep) != DDI_SUCCESS) {
3028 		dmfe_error(dmfep, "ddi_add_softintr() failed");
3029 		goto attach_fail;
3030 	}
3031 	dmfep->progress |= PROGRESS_SOFTINT;
3032 
3033 	/*
3034 	 * Add the h/w interrupt handler & initialise mutexen
3035 	 */
3036 	if (ddi_add_intr(devinfo, 0, &dmfep->iblk, NULL,
3037 	    dmfe_interrupt, (caddr_t)dmfep) != DDI_SUCCESS) {
3038 		dmfe_error(dmfep, "ddi_add_intr() failed");
3039 		goto attach_fail;
3040 	}
3041 	mutex_init(dmfep->milock, NULL, MUTEX_DRIVER, NULL);
3042 	mutex_init(dmfep->oplock, NULL, MUTEX_DRIVER, dmfep->iblk);
3043 	mutex_init(dmfep->rxlock, NULL, MUTEX_DRIVER, dmfep->iblk);
3044 	mutex_init(dmfep->txlock, NULL, MUTEX_DRIVER, dmfep->iblk);
3045 	dmfep->progress |= PROGRESS_HWINT;
3046 
3047 	/*
3048 	 * Create & initialise named kstats
3049 	 */
3050 	dmfe_init_kstats(dmfep, instance);
3051 
3052 	/*
3053 	 * Reset & initialise the chip and the ring buffers
3054 	 * Initialise the (internal) PHY
3055 	 */
3056 	mutex_enter(dmfep->oplock);
3057 	mutex_enter(dmfep->rxlock);
3058 	mutex_enter(dmfep->txlock);
3059 
3060 	dmfe_reset(dmfep);
3061 
3062 	/*
3063 	 * Prepare the setup packet
3064 	 */
3065 	bzero(dmfep->tx_desc.setup_va, SETUPBUF_SIZE);
3066 	bzero(dmfep->mcast_refs, MCASTBUF_SIZE);
3067 	dmfep->addr_set = B_FALSE;
3068 	dmfep->opmode &= ~(PROMISC_MODE | PASS_MULTICAST);
3069 	dmfep->mac_state = DMFE_MAC_RESET;
3070 
3071 	mutex_exit(dmfep->txlock);
3072 	mutex_exit(dmfep->rxlock);
3073 	mutex_exit(dmfep->oplock);
3074 
3075 	dmfep->link_state = LINK_STATE_UNKNOWN;
3076 	if (dmfe_init_phy(dmfep) != B_TRUE)
3077 		goto attach_fail;
3078 	dmfep->update_phy = B_TRUE;
3079 
3080 	/*
3081 	 * Send a reasonable setup frame.  This configures our starting
3082 	 * address and the broadcast address.
3083 	 */
3084 	(void) dmfe_m_unicst(dmfep, dmfep->curr_addr);
3085 
3086 	/*
3087 	 * Initialize pointers to device specific functions which
3088 	 * will be used by the generic layer.
3089 	 */
3090 	if ((macp = mac_alloc(MAC_VERSION)) == NULL)
3091 		goto attach_fail;
3092 	macp->m_type_ident = MAC_PLUGIN_IDENT_ETHER;
3093 	macp->m_driver = dmfep;
3094 	macp->m_dip = devinfo;
3095 	macp->m_src_addr = dmfep->curr_addr;
3096 	macp->m_callbacks = &dmfe_m_callbacks;
3097 	macp->m_min_sdu = 0;
3098 	macp->m_max_sdu = ETHERMTU;
3099 	macp->m_margin = VLAN_TAGSZ;
3100 
3101 	/*
3102 	 * Finally, we're ready to register ourselves with the MAC layer
3103 	 * interface; if this succeeds, we're all ready to start()
3104 	 */
3105 	err = mac_register(macp, &dmfep->mh);
3106 	mac_free(macp);
3107 	if (err != 0)
3108 		goto attach_fail;
3109 	ASSERT(dmfep->dmfe_guard == DMFE_GUARD);
3110 
3111 	/*
3112 	 * Install the cyclic callback that we use to check for link
3113 	 * status, transmit stall, etc. The cyclic callback (dmfe_cyclic())
3114 	 * is invoked in kernel context then.
3115 	 */
3116 	ASSERT(dmfep->cycid == NULL);
3117 	dmfep->cycid = ddi_periodic_add(dmfe_cyclic, dmfep,
3118 	    dmfe_tick_us * 1000, DDI_IPL_0);
3119 	return (DDI_SUCCESS);
3120 
3121 attach_fail:
3122 	dmfe_unattach(dmfep);
3123 	return (DDI_FAILURE);
3124 }
3125 
3126 /*
3127  *	dmfe_suspend() -- suspend transmit/receive for powerdown
3128  */
3129 static int
3130 dmfe_suspend(dmfe_t *dmfep)
3131 {
3132 	/*
3133 	 * Just stop processing ...
3134 	 */
3135 	mutex_enter(dmfep->oplock);
3136 	dmfe_stop(dmfep);
3137 	mutex_exit(dmfep->oplock);
3138 
3139 	return (DDI_SUCCESS);
3140 }
3141 
3142 /*
3143  * detach(9E) -- Detach a device from the system
3144  */
3145 static int
3146 dmfe_detach(dev_info_t *devinfo, ddi_detach_cmd_t cmd)
3147 {
3148 	dmfe_t *dmfep;
3149 
3150 	dmfep = ddi_get_driver_private(devinfo);
3151 
3152 	switch (cmd) {
3153 	default:
3154 		return (DDI_FAILURE);
3155 
3156 	case DDI_SUSPEND:
3157 		return (dmfe_suspend(dmfep));
3158 
3159 	case DDI_DETACH:
3160 		break;
3161 	}
3162 
3163 	/*
3164 	 * Unregister from the MAC subsystem.  This can fail, in
3165 	 * particular if there are DLPI style-2 streams still open -
3166 	 * in which case we just return failure without shutting
3167 	 * down chip operations.
3168 	 */
3169 	if (mac_unregister(dmfep->mh) != DDI_SUCCESS)
3170 		return (DDI_FAILURE);
3171 
3172 	/*
3173 	 * All activity stopped, so we can clean up & exit
3174 	 */
3175 	dmfe_unattach(dmfep);
3176 	return (DDI_SUCCESS);
3177 }
3178 
3179 
3180 /*
3181  * ========== Module Loading Data & Entry Points ==========
3182  */
3183 
3184 DDI_DEFINE_STREAM_OPS(dmfe_dev_ops, nulldev, nulldev, dmfe_attach, dmfe_detach,
3185 	nodev, NULL, D_MP, NULL, ddi_quiesce_not_supported);
3186 
3187 static struct modldrv dmfe_modldrv = {
3188 	&mod_driverops,		/* Type of module.  This one is a driver */
3189 	dmfe_ident,		/* short description */
3190 	&dmfe_dev_ops		/* driver specific ops */
3191 };
3192 
3193 static struct modlinkage modlinkage = {
3194 	MODREV_1, (void *)&dmfe_modldrv, NULL
3195 };
3196 
3197 int
3198 _info(struct modinfo *modinfop)
3199 {
3200 	return (mod_info(&modlinkage, modinfop));
3201 }
3202 
3203 int
3204 _init(void)
3205 {
3206 	uint32_t tmp100;
3207 	uint32_t tmp10;
3208 	int i;
3209 	int status;
3210 
3211 	/* Calculate global timing parameters */
3212 	tmp100 = (dmfe_tx100_stall_us+dmfe_tick_us-1)/dmfe_tick_us;
3213 	tmp10 = (dmfe_tx10_stall_us+dmfe_tick_us-1)/dmfe_tick_us;
3214 
3215 	for (i = 0; i <= TX_PROCESS_MAX_STATE; ++i) {
3216 		switch (i) {
3217 		case TX_PROCESS_STATE(TX_PROCESS_FETCH_DATA):
3218 		case TX_PROCESS_STATE(TX_PROCESS_WAIT_END):
3219 			/*
3220 			 * The chip doesn't spontaneously recover from
3221 			 * a stall in these states, so we reset early
3222 			 */
3223 			stall_100_tix[i] = tmp100;
3224 			stall_10_tix[i] = tmp10;
3225 			break;
3226 
3227 		case TX_PROCESS_STATE(TX_PROCESS_SUSPEND):
3228 		default:
3229 			/*
3230 			 * The chip has been seen to spontaneously recover
3231 			 * after an apparent stall in the SUSPEND state,
3232 			 * so we'll allow it rather longer to do so.  As
3233 			 * stalls in other states have not been observed,
3234 			 * we'll use long timeouts for them too ...
3235 			 */
3236 			stall_100_tix[i] = tmp100 * 20;
3237 			stall_10_tix[i] = tmp10 * 20;
3238 			break;
3239 		}
3240 	}
3241 
3242 	factotum_tix = (dmfe_link_poll_us+dmfe_tick_us-1)/dmfe_tick_us;
3243 	factotum_fast_tix = 1+(factotum_tix/5);
3244 	factotum_start_tix = 1+(factotum_tix*2);
3245 
3246 	mac_init_ops(&dmfe_dev_ops, "dmfe");
3247 	status = mod_install(&modlinkage);
3248 	if (status == DDI_SUCCESS)
3249 		dmfe_log_init();
3250 
3251 	return (status);
3252 }
3253 
3254 int
3255 _fini(void)
3256 {
3257 	int status;
3258 
3259 	status = mod_remove(&modlinkage);
3260 	if (status == DDI_SUCCESS) {
3261 		mac_fini_ops(&dmfe_dev_ops);
3262 		dmfe_log_fini();
3263 	}
3264 
3265 	return (status);
3266 }
3267 
3268 #undef	DMFE_DBG
3269