xref: /illumos-gate/usr/src/uts/common/io/bfe/bfe.c (revision 46b592853d0f4f11781b6b0a7533f267c6aee132)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 
22 /*
23  * Copyright 2009 Sun Microsystems, Inc.  All rights reserved.
24  * Use is subject to license terms.
25  */
26 #include <sys/stream.h>
27 #include <sys/strsun.h>
28 #include <sys/stat.h>
29 #include <sys/pci.h>
30 #include <sys/modctl.h>
31 #include <sys/kstat.h>
32 #include <sys/ethernet.h>
33 #include <sys/devops.h>
34 #include <sys/debug.h>
35 #include <sys/conf.h>
36 #include <sys/sysmacros.h>
37 #include <sys/dditypes.h>
38 #include <sys/ddi.h>
39 #include <sys/sunddi.h>
40 #include <sys/miiregs.h>
41 #include <sys/byteorder.h>
42 #include <sys/cyclic.h>
43 #include <sys/note.h>
44 #include <sys/crc32.h>
45 #include <sys/mac_provider.h>
46 #include <sys/mac_ether.h>
47 #include <sys/vlan.h>
48 #include <sys/errno.h>
49 #include <sys/sdt.h>
50 #include <sys/strsubr.h>
51 
52 #include "bfe.h"
53 #include "bfe_hw.h"
54 
55 
56 /*
57  * Broadcom BCM4401 chipsets use two rings :
58  *
59  * - One TX : For sending packets down the wire.
60  * - One RX : For receving packets.
61  *
62  * Each ring can have any number of descriptors (configured during attach).
63  * As of now we configure only 128 descriptor per ring (TX/RX). Each descriptor
64  * has address (desc_addr) and control (desc_ctl) which holds a DMA buffer for
65  * the packet and control information (like start/end of frame or end of table).
66  * The descriptor table is allocated first and then a DMA buffer (for a packet)
67  * is allocated and linked to each descriptor.
68  *
69  * Each descriptor entry is bfe_desc_t structure in bfe. During TX/RX
70  * interrupt, the stat register will point to current descriptor being
71  * processed.
72  *
73  * Here's an example of TX and RX ring :
74  *
75  * TX:
76  *
77  *   Base of the descriptor table is programmed using BFE_DMATX_CTRL control
78  *   register. Each 'addr' points to DMA buffer (or packet data buffer) to
79  *   be transmitted and 'ctl' has the length of the packet (usually MTU).
80  *
81  *  ----------------------|
82  *  | addr |Descriptor 0  |
83  *  | ctl  |              |
84  *  ----------------------|
85  *  | addr |Descriptor 1  |    SOF (start of the frame)
86  *  | ctl  |              |
87  *  ----------------------|
88  *  | ...  |Descriptor... |    EOF (end of the frame)
89  *  | ...  |              |
90  *  ----------------------|
91  *  | addr |Descritor 127 |
92  *  | ctl  | EOT          |    EOT (End of Table)
93  *  ----------------------|
94  *
95  * 'r_curr_desc'  : pointer to current descriptor which can be used to transmit
96  *                  a packet.
97  * 'r_avail_desc' : decremented whenever a packet is being sent.
98  * 'r_cons_desc'  : incremented whenever a packet is sent down the wire and
99  *                  notified by an interrupt to bfe driver.
100  *
101  * RX:
102  *
103  *   Base of the descriptor table is programmed using BFE_DMARX_CTRL control
104  *   register. Each 'addr' points to DMA buffer (or packet data buffer). 'ctl'
105  *   contains the size of the DMA buffer and all the DMA buffers are
106  *   pre-allocated during attach and hence the maxmium size of the packet is
107  *   also known (r_buf_len from the bfe_rint_t structure). During RX interrupt
108  *   the packet length is embedded in bfe_header_t which is added by the
109  *   chip in the beginning of the packet.
110  *
111  *  ----------------------|
112  *  | addr |Descriptor 0  |
113  *  | ctl  |              |
114  *  ----------------------|
115  *  | addr |Descriptor 1  |
116  *  | ctl  |              |
117  *  ----------------------|
118  *  | ...  |Descriptor... |
119  *  | ...  |              |
120  *  ----------------------|
121  *  | addr |Descriptor 127|
122  *  | ctl  | EOT          |    EOT (End of Table)
123  *  ----------------------|
124  *
125  * 'r_curr_desc'  : pointer to current descriptor while receving a packet.
126  *
127  */
128 
129 #define	MODULE_NAME	"bfe"
130 
131 /*
132  * Used for checking PHY (link state, speed)
133  */
134 #define	BFE_TIMEOUT_INTERVAL	(1000 * 1000 * 1000)
135 
136 
137 /*
138  * Chip restart action and reason for restart
139  */
140 #define	BFE_ACTION_RESTART		0x1	/* For restarting the chip */
141 #define	BFE_ACTION_RESTART_SETPROP	0x2	/* restart due to setprop */
142 #define	BFE_ACTION_RESTART_FAULT	0x4	/* restart due to fault */
143 #define	BFE_ACTION_RESTART_PKT		0x8	/* restart due to pkt timeout */
144 
145 static	char	bfe_ident[] = "bfe driver for Broadcom BCM4401 chipsets";
146 
147 /*
148  * Function Prototypes for bfe driver.
149  */
150 static	int	bfe_check_link(bfe_t *);
151 static	void	bfe_report_link(bfe_t *);
152 static	void	bfe_chip_halt(bfe_t *);
153 static	void	bfe_chip_reset(bfe_t *);
154 static	void	bfe_tx_desc_init(bfe_ring_t *);
155 static	void	bfe_rx_desc_init(bfe_ring_t *);
156 static	void	bfe_set_rx_mode(bfe_t *);
157 static	void	bfe_enable_chip_intrs(bfe_t *);
158 static	void	bfe_chip_restart(bfe_t *);
159 static	void	bfe_init_vars(bfe_t *);
160 static	void	bfe_clear_stats(bfe_t *);
161 static	void	bfe_gather_stats(bfe_t *);
162 static	void	bfe_error(dev_info_t *, char *, ...);
163 static	int	bfe_mac_getprop(void *, const char *, mac_prop_id_t, uint_t,
164     uint_t, void *, uint_t *);
165 static	int	bfe_mac_setprop(void *, const char *, mac_prop_id_t, uint_t,
166     const void *);
167 static	int	bfe_tx_reclaim(bfe_ring_t *);
168 int	bfe_mac_set_ether_addr(void *, const uint8_t *);
169 
170 
171 /*
172  * Macros for ddi_dma_sync().
173  */
174 #define	SYNC_DESC(r, s, l, d)	\
175 	(void) ddi_dma_sync(r->r_desc_dma_handle, \
176 	    (off_t)(s * sizeof (bfe_desc_t)), \
177 	    (size_t)(l * sizeof (bfe_desc_t)), \
178 	    d)
179 
180 #define	SYNC_BUF(r, s, b, l, d) \
181 	(void) ddi_dma_sync(r->r_buf_dma[s].handle, \
182 	    (off_t)(b), (size_t)(l), d)
183 
184 /*
185  * Supported Broadcom BCM4401 Cards.
186  */
187 static bfe_cards_t bfe_cards[] = {
188 	{ 0x14e4, 0x170c, "BCM4401 100Base-TX"},
189 };
190 
191 
192 /*
193  * DMA attributes for device registers, packet data (buffer) and
194  * descriptor table.
195  */
196 static struct ddi_device_acc_attr bfe_dev_attr = {
197 	DDI_DEVICE_ATTR_V0,
198 	DDI_STRUCTURE_LE_ACC,
199 	DDI_STRICTORDER_ACC
200 };
201 
202 static struct ddi_device_acc_attr bfe_buf_attr = {
203 	DDI_DEVICE_ATTR_V0,
204 	DDI_NEVERSWAP_ACC,	/* native endianness */
205 	DDI_STRICTORDER_ACC
206 };
207 
208 static ddi_dma_attr_t bfe_dma_attr_buf = {
209 	DMA_ATTR_V0,		/* dma_attr_version */
210 	0,			/* dma_attr_addr_lo */
211 	BFE_PCI_DMA - 1,	/* dma_attr_addr_hi */
212 	0x1fff,			/* dma_attr_count_max */
213 	8,			/* dma_attr_align */
214 	0,			/* dma_attr_burstsizes */
215 	1,			/* dma_attr_minxfer */
216 	0x1fff,			/* dma_attr_maxxfer */
217 	BFE_PCI_DMA - 1,	/* dma_attr_seg */
218 	1,			/* dma_attr_sgllen */
219 	1,			/* dma_attr_granular */
220 	0			/* dma_attr_flags */
221 };
222 
223 static ddi_dma_attr_t bfe_dma_attr_desc = {
224 	DMA_ATTR_V0,		/* dma_attr_version */
225 	0,			/* dma_attr_addr_lo */
226 	BFE_PCI_DMA - 1,	/* dma_attr_addr_hi */
227 	BFE_PCI_DMA - 1,	/* dma_attr_count_max */
228 	BFE_DESC_ALIGN,		/* dma_attr_align */
229 	0,			/* dma_attr_burstsizes */
230 	1,			/* dma_attr_minxfer */
231 	BFE_PCI_DMA - 1,	/* dma_attr_maxxfer */
232 	BFE_PCI_DMA - 1,	/* dma_attr_seg */
233 	1,			/* dma_attr_sgllen */
234 	1,			/* dma_attr_granular */
235 	0			/* dma_attr_flags */
236 };
237 
238 /*
239  * Ethernet broadcast addresses.
240  */
241 static uchar_t bfe_broadcast[ETHERADDRL] = {
242 	0xff, 0xff, 0xff, 0xff, 0xff, 0xff
243 };
244 
245 #define	ASSERT_ALL_LOCKS(bfe) {	\
246 	ASSERT(mutex_owned(&bfe->bfe_tx_ring.r_lock));	\
247 	ASSERT(rw_write_held(&bfe->bfe_rwlock));	\
248 }
249 
250 /*
251  * Debugging and error reproting code.
252  */
253 static void
254 bfe_error(dev_info_t *dip, char *fmt, ...)
255 {
256 	va_list ap;
257 	char	buf[256];
258 
259 	va_start(ap, fmt);
260 	(void) vsnprintf(buf, sizeof (buf), fmt, ap);
261 	va_end(ap);
262 
263 	if (dip) {
264 		cmn_err(CE_WARN, "%s%d: %s",
265 		    ddi_driver_name(dip), ddi_get_instance(dip), buf);
266 	} else {
267 		cmn_err(CE_WARN, "bfe: %s", buf);
268 	}
269 }
270 
271 /*
272  * Grabs all necessary locks to block any other operation on the chip.
273  */
274 static void
275 bfe_grab_locks(bfe_t *bfe)
276 {
277 	bfe_ring_t *tx = &bfe->bfe_tx_ring;
278 
279 	/*
280 	 * Grab all the locks.
281 	 * - bfe_rwlock : locks down whole chip including RX.
282 	 * - tx's r_lock : locks down only TX side.
283 	 */
284 	rw_enter(&bfe->bfe_rwlock, RW_WRITER);
285 	mutex_enter(&tx->r_lock);
286 
287 	/*
288 	 * Note that we don't use RX's r_lock.
289 	 */
290 }
291 
292 /*
293  * Release lock on chip/drver.
294  */
295 static void
296 bfe_release_locks(bfe_t *bfe)
297 {
298 	bfe_ring_t *tx = &bfe->bfe_tx_ring;
299 
300 	/*
301 	 * Release all the locks in the order in which they were grabbed.
302 	 */
303 	mutex_exit(&tx->r_lock);
304 	rw_exit(&bfe->bfe_rwlock);
305 }
306 
307 
308 /*
309  * It's used to make sure that the write to device register was successful.
310  */
311 static int
312 bfe_wait_bit(bfe_t *bfe, uint32_t reg, uint32_t bit,
313     ulong_t t, const int clear)
314 {
315 	ulong_t i;
316 	uint32_t v;
317 
318 	for (i = 0; i < t; i++) {
319 		v = INL(bfe, reg);
320 
321 		if (clear && !(v & bit))
322 			break;
323 
324 		if (!clear && (v & bit))
325 			break;
326 
327 		drv_usecwait(10);
328 	}
329 
330 	/* if device still didn't see the value */
331 	if (i == t)
332 		return (-1);
333 
334 	return (0);
335 }
336 
337 /*
338  * PHY functions (read, write, stop, reset and startup)
339  */
340 static int
341 bfe_read_phy(bfe_t *bfe, uint32_t reg)
342 {
343 	OUTL(bfe, BFE_EMAC_ISTAT, BFE_EMAC_INT_MII);
344 	OUTL(bfe, BFE_MDIO_DATA, (BFE_MDIO_SB_START |
345 	    (BFE_MDIO_OP_READ << BFE_MDIO_OP_SHIFT) |
346 	    (bfe->bfe_phy_addr << BFE_MDIO_PMD_SHIFT) |
347 	    (reg << BFE_MDIO_RA_SHIFT) |
348 	    (BFE_MDIO_TA_VALID << BFE_MDIO_TA_SHIFT)));
349 
350 	(void) bfe_wait_bit(bfe, BFE_EMAC_ISTAT, BFE_EMAC_INT_MII, 10, 0);
351 
352 	return ((INL(bfe, BFE_MDIO_DATA) & BFE_MDIO_DATA_DATA));
353 }
354 
355 static void
356 bfe_write_phy(bfe_t *bfe, uint32_t reg, uint32_t val)
357 {
358 	OUTL(bfe, BFE_EMAC_ISTAT, BFE_EMAC_INT_MII);
359 	OUTL(bfe,  BFE_MDIO_DATA, (BFE_MDIO_SB_START |
360 	    (BFE_MDIO_OP_WRITE << BFE_MDIO_OP_SHIFT) |
361 	    (bfe->bfe_phy_addr << BFE_MDIO_PMD_SHIFT) |
362 	    (reg << BFE_MDIO_RA_SHIFT) |
363 	    (BFE_MDIO_TA_VALID << BFE_MDIO_TA_SHIFT) |
364 	    (val & BFE_MDIO_DATA_DATA)));
365 
366 	(void) bfe_wait_bit(bfe, BFE_EMAC_ISTAT, BFE_EMAC_INT_MII, 10, 0);
367 }
368 
369 /*
370  * It resets the PHY layer.
371  */
372 static int
373 bfe_reset_phy(bfe_t *bfe)
374 {
375 	uint32_t i;
376 
377 	bfe_write_phy(bfe, MII_CONTROL, MII_CONTROL_RESET);
378 	drv_usecwait(100);
379 	for (i = 0; i < 10; i++) {
380 		if (bfe_read_phy(bfe, MII_CONTROL) &
381 		    MII_CONTROL_RESET) {
382 			drv_usecwait(500);
383 			continue;
384 		}
385 
386 		break;
387 	}
388 
389 	if (i == 10) {
390 		bfe_error(bfe->bfe_dip, "Timeout waiting for PHY to reset");
391 		bfe->bfe_phy_state = BFE_PHY_RESET_TIMEOUT;
392 		return (BFE_FAILURE);
393 	}
394 
395 	bfe->bfe_phy_state = BFE_PHY_RESET_DONE;
396 
397 	return (BFE_SUCCESS);
398 }
399 
400 /*
401  * Make sure timer function is out of our way and especially during
402  * detach.
403  */
404 static void
405 bfe_stop_timer(bfe_t *bfe)
406 {
407 	if (bfe->bfe_periodic_id) {
408 		ddi_periodic_delete(bfe->bfe_periodic_id);
409 		bfe->bfe_periodic_id = NULL;
410 	}
411 }
412 
413 /*
414  * Stops the PHY
415  */
416 static void
417 bfe_stop_phy(bfe_t *bfe)
418 {
419 	bfe_write_phy(bfe, MII_CONTROL, MII_CONTROL_PWRDN |
420 	    MII_CONTROL_ISOLATE);
421 
422 	bfe->bfe_chip.link = LINK_STATE_UNKNOWN;
423 	bfe->bfe_chip.speed = 0;
424 	bfe->bfe_chip.duplex = LINK_DUPLEX_UNKNOWN;
425 
426 	bfe->bfe_phy_state = BFE_PHY_STOPPED;
427 
428 	/*
429 	 * Report the link status to MAC layer.
430 	 */
431 	if (bfe->bfe_machdl != NULL)
432 		(void) bfe_report_link(bfe);
433 }
434 
435 static int
436 bfe_probe_phy(bfe_t *bfe)
437 {
438 	int phy;
439 	uint32_t status;
440 
441 	if (bfe->bfe_phy_addr) {
442 		status = bfe_read_phy(bfe, MII_STATUS);
443 		if (status != 0xffff && status != 0) {
444 			bfe_write_phy(bfe, MII_CONTROL, 0);
445 			return (BFE_SUCCESS);
446 		}
447 	}
448 
449 	for (phy = 0; phy < 32; phy++) {
450 		bfe->bfe_phy_addr = phy;
451 		status = bfe_read_phy(bfe, MII_STATUS);
452 		if (status != 0xffff && status != 0) {
453 			bfe_write_phy(bfe, MII_CONTROL, 0);
454 			return (BFE_SUCCESS);
455 		}
456 	}
457 
458 	return (BFE_FAILURE);
459 }
460 
461 /*
462  * This timeout function fires at BFE_TIMEOUT_INTERVAL to check the link
463  * status.
464  */
465 static void
466 bfe_timeout(void *arg)
467 {
468 	bfe_t *bfe = (bfe_t *)arg;
469 	int resched = 0;
470 
471 	/*
472 	 * We don't grab any lock because bfe can't go away.
473 	 * untimeout() will wait for this timeout instance to complete.
474 	 */
475 	if (bfe->bfe_chip_action & BFE_ACTION_RESTART) {
476 		/*
477 		 * Restart the chip.
478 		 */
479 		bfe_grab_locks(bfe);
480 		bfe_chip_restart(bfe);
481 		bfe->bfe_chip_action &= ~BFE_ACTION_RESTART;
482 		bfe->bfe_chip_action &= ~BFE_ACTION_RESTART_FAULT;
483 		bfe->bfe_chip_action &= ~BFE_ACTION_RESTART_PKT;
484 		bfe_release_locks(bfe);
485 		mac_tx_update(bfe->bfe_machdl);
486 		/* Restart will register a new timeout */
487 		return;
488 	}
489 
490 	rw_enter(&bfe->bfe_rwlock, RW_READER);
491 
492 	if (bfe->bfe_chip_state == BFE_CHIP_ACTIVE) {
493 		hrtime_t hr;
494 
495 		hr = gethrtime();
496 		if (bfe->bfe_tx_stall_time != 0 &&
497 		    hr > bfe->bfe_tx_stall_time) {
498 			DTRACE_PROBE2(chip__restart, int, bfe->bfe_unit,
499 			    char *, "pkt timeout");
500 			bfe->bfe_chip_action |=
501 			    (BFE_ACTION_RESTART | BFE_ACTION_RESTART_PKT);
502 			bfe->bfe_tx_stall_time = 0;
503 		}
504 	}
505 
506 	if (bfe->bfe_phy_state == BFE_PHY_STARTED) {
507 		/*
508 		 * Report the link status to MAC layer if link status changed.
509 		 */
510 		if (bfe_check_link(bfe)) {
511 			bfe_report_link(bfe);
512 			if (bfe->bfe_chip.link == LINK_STATE_UP) {
513 				uint32_t val, flow;
514 
515 				val = INL(bfe, BFE_TX_CTRL);
516 				val &= ~BFE_TX_DUPLEX;
517 				if (bfe->bfe_chip.duplex == LINK_DUPLEX_FULL) {
518 					val |= BFE_TX_DUPLEX;
519 					flow = INL(bfe, BFE_RXCONF);
520 					flow &= ~BFE_RXCONF_FLOW;
521 					OUTL(bfe, BFE_RXCONF, flow);
522 
523 					flow = INL(bfe, BFE_MAC_FLOW);
524 					flow &= ~(BFE_FLOW_RX_HIWAT);
525 					OUTL(bfe, BFE_MAC_FLOW, flow);
526 				}
527 
528 				resched = 1;
529 
530 				OUTL(bfe, BFE_TX_CTRL, val);
531 				DTRACE_PROBE1(link__up,
532 				    int, bfe->bfe_unit);
533 			}
534 		}
535 	}
536 
537 	rw_exit(&bfe->bfe_rwlock);
538 
539 	if (resched)
540 		mac_tx_update(bfe->bfe_machdl);
541 }
542 
543 /*
544  * Starts PHY layer.
545  */
546 static int
547 bfe_startup_phy(bfe_t *bfe)
548 {
549 	uint16_t bmsr, bmcr, anar;
550 	int	prog, s;
551 	int phyid1, phyid2;
552 
553 	if (bfe_probe_phy(bfe) == BFE_FAILURE) {
554 		bfe->bfe_phy_state = BFE_PHY_NOTFOUND;
555 		return (BFE_FAILURE);
556 	}
557 
558 	(void) bfe_reset_phy(bfe);
559 
560 	phyid1 = bfe_read_phy(bfe, MII_PHYIDH);
561 	phyid2 = bfe_read_phy(bfe, MII_PHYIDL);
562 	bfe->bfe_phy_id = (phyid1 << 16) | phyid2;
563 
564 	bmsr = bfe_read_phy(bfe, MII_STATUS);
565 	anar = bfe_read_phy(bfe, MII_AN_ADVERT);
566 
567 again:
568 	anar &= ~(MII_ABILITY_100BASE_T4 |
569 	    MII_ABILITY_100BASE_TX_FD | MII_ABILITY_100BASE_TX |
570 	    MII_ABILITY_10BASE_T_FD | MII_ABILITY_10BASE_T);
571 
572 	/*
573 	 * Supported hardware modes are in bmsr.
574 	 */
575 	bfe->bfe_chip.bmsr = bmsr;
576 
577 	/*
578 	 * Assume no capabilities are supported in the hardware.
579 	 */
580 	bfe->bfe_cap_aneg = bfe->bfe_cap_100T4 =
581 	    bfe->bfe_cap_100fdx = bfe->bfe_cap_100hdx =
582 	    bfe->bfe_cap_10fdx = bfe->bfe_cap_10hdx = 0;
583 
584 	/*
585 	 * Assume property is set.
586 	 */
587 	s = 1;
588 	if (!(bfe->bfe_chip_action & BFE_ACTION_RESTART_SETPROP)) {
589 		/*
590 		 * Property is not set which means bfe_mac_setprop()
591 		 * is not called on us.
592 		 */
593 		s = 0;
594 	}
595 
596 	bmcr = prog = 0;
597 
598 	if (bmsr & MII_STATUS_100_BASEX_FD) {
599 		bfe->bfe_cap_100fdx = 1;
600 		if (s == 0) {
601 			anar |= MII_ABILITY_100BASE_TX_FD;
602 			bfe->bfe_adv_100fdx = 1;
603 			prog++;
604 		} else if (bfe->bfe_adv_100fdx) {
605 			anar |= MII_ABILITY_100BASE_TX_FD;
606 			prog++;
607 		}
608 	}
609 
610 	if (bmsr & MII_STATUS_100_BASE_T4) {
611 		bfe->bfe_cap_100T4 = 1;
612 		if (s == 0) {
613 			anar |= MII_ABILITY_100BASE_T4;
614 			bfe->bfe_adv_100T4 = 1;
615 			prog++;
616 		} else if (bfe->bfe_adv_100T4) {
617 			anar |= MII_ABILITY_100BASE_T4;
618 			prog++;
619 		}
620 	}
621 
622 	if (bmsr & MII_STATUS_100_BASEX) {
623 		bfe->bfe_cap_100hdx = 1;
624 		if (s == 0) {
625 			anar |= MII_ABILITY_100BASE_TX;
626 			bfe->bfe_adv_100hdx = 1;
627 			prog++;
628 		} else if (bfe->bfe_adv_100hdx) {
629 			anar |= MII_ABILITY_100BASE_TX;
630 			prog++;
631 		}
632 	}
633 
634 	if (bmsr & MII_STATUS_10_FD) {
635 		bfe->bfe_cap_10fdx = 1;
636 		if (s == 0) {
637 			anar |= MII_ABILITY_10BASE_T_FD;
638 			bfe->bfe_adv_10fdx = 1;
639 			prog++;
640 		} else if (bfe->bfe_adv_10fdx) {
641 			anar |= MII_ABILITY_10BASE_T_FD;
642 			prog++;
643 		}
644 	}
645 
646 	if (bmsr & MII_STATUS_10) {
647 		bfe->bfe_cap_10hdx = 1;
648 		if (s == 0) {
649 			anar |= MII_ABILITY_10BASE_T;
650 			bfe->bfe_adv_10hdx = 1;
651 			prog++;
652 		} else if (bfe->bfe_adv_10hdx) {
653 			anar |= MII_ABILITY_10BASE_T;
654 			prog++;
655 		}
656 	}
657 
658 	if (bmsr & MII_STATUS_CANAUTONEG) {
659 		bfe->bfe_cap_aneg = 1;
660 		if (s == 0) {
661 			bfe->bfe_adv_aneg = 1;
662 		}
663 	}
664 
665 	if (prog == 0) {
666 		if (s == 0) {
667 			bfe_error(bfe->bfe_dip,
668 			    "No valid link mode selected. Powering down PHY");
669 			bfe_stop_phy(bfe);
670 			bfe_report_link(bfe);
671 			return (BFE_FAILURE);
672 		}
673 
674 		/*
675 		 * If property is set then user would have goofed up. So we
676 		 * go back to default properties.
677 		 */
678 		bfe->bfe_chip_action &= ~BFE_ACTION_RESTART_SETPROP;
679 		goto again;
680 	}
681 
682 	if (bfe->bfe_adv_aneg && (bmsr & MII_STATUS_CANAUTONEG)) {
683 		bmcr = (MII_CONTROL_ANE | MII_CONTROL_RSAN);
684 	} else {
685 		if (bfe->bfe_adv_100fdx)
686 			bmcr = (MII_CONTROL_100MB | MII_CONTROL_FDUPLEX);
687 		else if (bfe->bfe_adv_100hdx)
688 			bmcr = MII_CONTROL_100MB;
689 		else if (bfe->bfe_adv_10fdx)
690 			bmcr = MII_CONTROL_FDUPLEX;
691 		else
692 			bmcr = 0;		/* 10HDX */
693 	}
694 
695 	if (prog)
696 		bfe_write_phy(bfe, MII_AN_ADVERT, anar);
697 
698 	if (bmcr)
699 		bfe_write_phy(bfe, MII_CONTROL, bmcr);
700 
701 	bfe->bfe_mii_anar = anar;
702 	bfe->bfe_mii_bmcr = bmcr;
703 	bfe->bfe_phy_state = BFE_PHY_STARTED;
704 
705 	if (bfe->bfe_periodic_id == NULL) {
706 		bfe->bfe_periodic_id = ddi_periodic_add(bfe_timeout,
707 		    (void *)bfe, BFE_TIMEOUT_INTERVAL, DDI_IPL_0);
708 
709 		DTRACE_PROBE1(first__timeout, int, bfe->bfe_unit);
710 	}
711 
712 	DTRACE_PROBE4(phy_started, int, bfe->bfe_unit,
713 	    int, bmsr, int, bmcr, int, anar);
714 
715 	return (BFE_SUCCESS);
716 }
717 
718 /*
719  * Reports link status back to MAC Layer.
720  */
721 static void
722 bfe_report_link(bfe_t *bfe)
723 {
724 	mac_link_update(bfe->bfe_machdl, bfe->bfe_chip.link);
725 }
726 
727 /*
728  * Reads PHY/MII registers and get the link status for us.
729  */
730 static int
731 bfe_check_link(bfe_t *bfe)
732 {
733 	uint16_t bmsr, bmcr, anar, anlpar;
734 	int speed, duplex, link;
735 
736 	speed = bfe->bfe_chip.speed;
737 	duplex = bfe->bfe_chip.duplex;
738 	link = bfe->bfe_chip.link;
739 
740 	bmsr = bfe_read_phy(bfe, MII_STATUS);
741 	bfe->bfe_mii_bmsr = bmsr;
742 
743 	bmcr = bfe_read_phy(bfe, MII_CONTROL);
744 
745 	anar = bfe_read_phy(bfe, MII_AN_ADVERT);
746 	bfe->bfe_mii_anar = anar;
747 
748 	anlpar = bfe_read_phy(bfe, MII_AN_LPABLE);
749 	bfe->bfe_mii_anlpar = anlpar;
750 
751 	bfe->bfe_mii_exp = bfe_read_phy(bfe, MII_AN_EXPANSION);
752 
753 	/*
754 	 * If exp register is not present in PHY.
755 	 */
756 	if (bfe->bfe_mii_exp == 0xffff) {
757 		bfe->bfe_mii_exp = 0;
758 	}
759 
760 	if ((bmsr & MII_STATUS_LINKUP) == 0) {
761 		bfe->bfe_chip.link = LINK_STATE_DOWN;
762 		bfe->bfe_chip.speed = 0;
763 		bfe->bfe_chip.duplex = LINK_DUPLEX_UNKNOWN;
764 		goto done;
765 	}
766 
767 	bfe->bfe_chip.link = LINK_STATE_UP;
768 
769 	if (!(bmcr & MII_CONTROL_ANE)) {
770 		/* Forced mode */
771 		if (bmcr & MII_CONTROL_100MB)
772 			bfe->bfe_chip.speed = 100000000;
773 		else
774 			bfe->bfe_chip.speed = 10000000;
775 
776 		if (bmcr & MII_CONTROL_FDUPLEX)
777 			bfe->bfe_chip.duplex = LINK_DUPLEX_FULL;
778 		else
779 			bfe->bfe_chip.duplex = LINK_DUPLEX_HALF;
780 
781 	} else if ((!(bmsr & MII_STATUS_CANAUTONEG)) ||
782 	    (!(bmsr & MII_STATUS_ANDONE))) {
783 		bfe->bfe_chip.speed = 0;
784 		bfe->bfe_chip.duplex = LINK_DUPLEX_UNKNOWN;
785 	} else if (anar & anlpar & MII_ABILITY_100BASE_TX_FD) {
786 		bfe->bfe_chip.speed = 100000000;
787 		bfe->bfe_chip.duplex = LINK_DUPLEX_FULL;
788 	} else if (anar & anlpar & MII_ABILITY_100BASE_T4) {
789 		bfe->bfe_chip.speed = 100000000;
790 		bfe->bfe_chip.duplex = LINK_DUPLEX_HALF;
791 	} else if (anar & anlpar & MII_ABILITY_100BASE_TX) {
792 		bfe->bfe_chip.speed = 100000000;
793 		bfe->bfe_chip.duplex = LINK_DUPLEX_HALF;
794 	} else if (anar & anlpar & MII_ABILITY_10BASE_T_FD) {
795 		bfe->bfe_chip.speed = 10000000;
796 		bfe->bfe_chip.duplex = LINK_DUPLEX_FULL;
797 	} else if (anar & anlpar & MII_ABILITY_10BASE_T) {
798 		bfe->bfe_chip.speed = 10000000;
799 		bfe->bfe_chip.duplex = LINK_DUPLEX_HALF;
800 	} else {
801 		bfe->bfe_chip.speed = 0;
802 		bfe->bfe_chip.duplex = LINK_DUPLEX_UNKNOWN;
803 	}
804 
805 done:
806 	/*
807 	 * If speed or link status or duplex mode changed then report to
808 	 * MAC layer which is done by the caller.
809 	 */
810 	if (speed != bfe->bfe_chip.speed ||
811 	    duplex != bfe->bfe_chip.duplex ||
812 	    link != bfe->bfe_chip.link) {
813 		return (1);
814 	}
815 
816 	return (0);
817 }
818 
819 static void
820 bfe_cam_write(bfe_t *bfe, uchar_t *d, int index)
821 {
822 	uint32_t v;
823 
824 	v = ((uint32_t)d[2] << 24);
825 	v |= ((uint32_t)d[3] << 16);
826 	v |= ((uint32_t)d[4] << 8);
827 	v |= (uint32_t)d[5];
828 
829 	OUTL(bfe, BFE_CAM_DATA_LO, v);
830 	v = (BFE_CAM_HI_VALID |
831 	    (((uint32_t)d[0]) << 8) |
832 	    (((uint32_t)d[1])));
833 
834 	OUTL(bfe, BFE_CAM_DATA_HI, v);
835 	OUTL(bfe, BFE_CAM_CTRL, (BFE_CAM_WRITE |
836 	    ((uint32_t)index << BFE_CAM_INDEX_SHIFT)));
837 	(void) bfe_wait_bit(bfe, BFE_CAM_CTRL, BFE_CAM_BUSY, 10, 1);
838 }
839 
840 /*
841  * Chip related functions (halt, reset, start).
842  */
843 static void
844 bfe_chip_halt(bfe_t *bfe)
845 {
846 	/*
847 	 * Disables interrupts.
848 	 */
849 	OUTL(bfe, BFE_INTR_MASK, 0);
850 	FLUSH(bfe, BFE_INTR_MASK);
851 
852 	OUTL(bfe,  BFE_ENET_CTRL, BFE_ENET_DISABLE);
853 
854 	/*
855 	 * Wait until TX and RX finish their job.
856 	 */
857 	(void) bfe_wait_bit(bfe, BFE_ENET_CTRL, BFE_ENET_DISABLE, 20, 1);
858 
859 	/*
860 	 * Disables DMA engine.
861 	 */
862 	OUTL(bfe, BFE_DMARX_CTRL, 0);
863 	OUTL(bfe, BFE_DMATX_CTRL, 0);
864 
865 	drv_usecwait(10);
866 
867 	bfe->bfe_chip_state = BFE_CHIP_HALT;
868 }
869 
870 static void
871 bfe_chip_restart(bfe_t *bfe)
872 {
873 	DTRACE_PROBE2(chip__restart, int, bfe->bfe_unit,
874 	    int, bfe->bfe_chip_action);
875 
876 	/*
877 	 * Halt chip and PHY.
878 	 */
879 	bfe_chip_halt(bfe);
880 	bfe_stop_phy(bfe);
881 	bfe->bfe_chip_state = BFE_CHIP_STOPPED;
882 
883 	/*
884 	 * Init variables.
885 	 */
886 	bfe_init_vars(bfe);
887 
888 	/*
889 	 * Reset chip and start PHY.
890 	 */
891 	bfe_chip_reset(bfe);
892 
893 	/*
894 	 * DMA descriptor rings.
895 	 */
896 	bfe_tx_desc_init(&bfe->bfe_tx_ring);
897 	bfe_rx_desc_init(&bfe->bfe_rx_ring);
898 
899 	bfe->bfe_chip_state = BFE_CHIP_ACTIVE;
900 	bfe_set_rx_mode(bfe);
901 	bfe_enable_chip_intrs(bfe);
902 }
903 
904 /*
905  * Disables core by stopping the clock.
906  */
907 static void
908 bfe_core_disable(bfe_t *bfe)
909 {
910 	if ((INL(bfe, BFE_SBTMSLOW) & BFE_RESET))
911 		return;
912 
913 	OUTL(bfe, BFE_SBTMSLOW, (BFE_REJECT | BFE_CLOCK));
914 	(void) bfe_wait_bit(bfe, BFE_SBTMSLOW, BFE_REJECT, 100, 0);
915 	(void) bfe_wait_bit(bfe, BFE_SBTMSHIGH, BFE_BUSY, 100, 1);
916 	OUTL(bfe, BFE_SBTMSLOW, (BFE_FGC | BFE_CLOCK | BFE_REJECT | BFE_RESET));
917 	FLUSH(bfe, BFE_SBTMSLOW);
918 	drv_usecwait(10);
919 	OUTL(bfe, BFE_SBTMSLOW, (BFE_REJECT | BFE_RESET));
920 	drv_usecwait(10);
921 }
922 
923 /*
924  * Resets core.
925  */
926 static void
927 bfe_core_reset(bfe_t *bfe)
928 {
929 	uint32_t val;
930 
931 	/*
932 	 * First disable the core.
933 	 */
934 	bfe_core_disable(bfe);
935 
936 	OUTL(bfe, BFE_SBTMSLOW, (BFE_RESET | BFE_CLOCK | BFE_FGC));
937 	FLUSH(bfe, BFE_SBTMSLOW);
938 	drv_usecwait(1);
939 
940 	if (INL(bfe, BFE_SBTMSHIGH) & BFE_SERR)
941 		OUTL(bfe, BFE_SBTMSHIGH, 0);
942 
943 	val = INL(bfe, BFE_SBIMSTATE);
944 	if (val & (BFE_IBE | BFE_TO))
945 		OUTL(bfe, BFE_SBIMSTATE, val & ~(BFE_IBE | BFE_TO));
946 
947 	OUTL(bfe, BFE_SBTMSLOW, (BFE_CLOCK | BFE_FGC));
948 	FLUSH(bfe, BFE_SBTMSLOW);
949 	drv_usecwait(1);
950 
951 	OUTL(bfe, BFE_SBTMSLOW, BFE_CLOCK);
952 	FLUSH(bfe, BFE_SBTMSLOW);
953 	drv_usecwait(1);
954 }
955 
956 static void
957 bfe_setup_config(bfe_t *bfe, uint32_t cores)
958 {
959 	uint32_t bar_orig, val;
960 
961 	/*
962 	 * Change bar0 window to map sbtopci registers.
963 	 */
964 	bar_orig = pci_config_get32(bfe->bfe_conf_handle, BFE_BAR0_WIN);
965 	pci_config_put32(bfe->bfe_conf_handle, BFE_BAR0_WIN, BFE_REG_PCI);
966 
967 	/* Just read it and don't do anything */
968 	val = INL(bfe, BFE_SBIDHIGH) & BFE_IDH_CORE;
969 
970 	val = INL(bfe, BFE_SBINTVEC);
971 	val |= cores;
972 	OUTL(bfe, BFE_SBINTVEC, val);
973 
974 	val = INL(bfe, BFE_SSB_PCI_TRANS_2);
975 	val |= BFE_SSB_PCI_PREF | BFE_SSB_PCI_BURST;
976 	OUTL(bfe, BFE_SSB_PCI_TRANS_2, val);
977 
978 	/*
979 	 * Restore bar0 window mapping.
980 	 */
981 	pci_config_put32(bfe->bfe_conf_handle, BFE_BAR0_WIN, bar_orig);
982 }
983 
984 /*
985  * Resets chip and starts PHY.
986  */
987 static void
988 bfe_chip_reset(bfe_t *bfe)
989 {
990 	uint32_t val;
991 
992 	/* Set the interrupt vector for the enet core */
993 	bfe_setup_config(bfe, BFE_INTVEC_ENET0);
994 
995 	/* check if core is up */
996 	val = INL(bfe, BFE_SBTMSLOW) &
997 	    (BFE_RESET | BFE_REJECT | BFE_CLOCK);
998 
999 	if (val == BFE_CLOCK) {
1000 		OUTL(bfe, BFE_RCV_LAZY, 0);
1001 		OUTL(bfe, BFE_ENET_CTRL, BFE_ENET_DISABLE);
1002 		(void) bfe_wait_bit(bfe, BFE_ENET_CTRL,
1003 		    BFE_ENET_DISABLE, 10, 1);
1004 		OUTL(bfe, BFE_DMATX_CTRL, 0);
1005 		FLUSH(bfe, BFE_DMARX_STAT);
1006 		drv_usecwait(20000);	/* 20 milli seconds */
1007 		if (INL(bfe, BFE_DMARX_STAT) & BFE_STAT_EMASK) {
1008 			(void) bfe_wait_bit(bfe, BFE_DMARX_STAT, BFE_STAT_SIDLE,
1009 			    10, 0);
1010 		}
1011 		OUTL(bfe, BFE_DMARX_CTRL, 0);
1012 	}
1013 
1014 	bfe_core_reset(bfe);
1015 	bfe_clear_stats(bfe);
1016 
1017 	OUTL(bfe, BFE_MDIO_CTRL, 0x8d);
1018 	val = INL(bfe, BFE_DEVCTRL);
1019 	if (!(val & BFE_IPP))
1020 		OUTL(bfe, BFE_ENET_CTRL, BFE_ENET_EPSEL);
1021 	else if (INL(bfe, BFE_DEVCTRL & BFE_EPR)) {
1022 		OUTL_AND(bfe, BFE_DEVCTRL, ~BFE_EPR);
1023 		drv_usecwait(20000);    /* 20 milli seconds */
1024 	}
1025 
1026 	OUTL_OR(bfe, BFE_MAC_CTRL, BFE_CTRL_CRC32_ENAB | BFE_CTRL_LED);
1027 
1028 	OUTL_AND(bfe, BFE_MAC_CTRL, ~BFE_CTRL_PDOWN);
1029 
1030 	OUTL(bfe, BFE_RCV_LAZY, ((1 << BFE_LAZY_FC_SHIFT) &
1031 	    BFE_LAZY_FC_MASK));
1032 
1033 	OUTL_OR(bfe, BFE_RCV_LAZY, 0);
1034 
1035 	OUTL(bfe, BFE_RXMAXLEN, bfe->bfe_rx_ring.r_buf_len);
1036 	OUTL(bfe, BFE_TXMAXLEN, bfe->bfe_tx_ring.r_buf_len);
1037 
1038 	OUTL(bfe, BFE_TX_WMARK, 56);
1039 
1040 	/* Program DMA channels */
1041 	OUTL(bfe, BFE_DMATX_CTRL, BFE_TX_CTRL_ENABLE);
1042 
1043 	/*
1044 	 * DMA addresses need to be added to BFE_PCI_DMA
1045 	 */
1046 	OUTL(bfe, BFE_DMATX_ADDR,
1047 	    bfe->bfe_tx_ring.r_desc_cookie.dmac_laddress + BFE_PCI_DMA);
1048 
1049 	OUTL(bfe, BFE_DMARX_CTRL, (BFE_RX_OFFSET << BFE_RX_CTRL_ROSHIFT)
1050 	    | BFE_RX_CTRL_ENABLE);
1051 
1052 	OUTL(bfe, BFE_DMARX_ADDR,
1053 	    bfe->bfe_rx_ring.r_desc_cookie.dmac_laddress + BFE_PCI_DMA);
1054 
1055 	(void) bfe_startup_phy(bfe);
1056 
1057 	bfe->bfe_chip_state = BFE_CHIP_INITIALIZED;
1058 }
1059 
1060 /*
1061  * It enables interrupts. Should be the last step while starting chip.
1062  */
1063 static void
1064 bfe_enable_chip_intrs(bfe_t *bfe)
1065 {
1066 	/* Enable the chip and core */
1067 	OUTL(bfe, BFE_ENET_CTRL, BFE_ENET_ENABLE);
1068 
1069 	/* Enable interrupts */
1070 	OUTL(bfe, BFE_INTR_MASK, BFE_IMASK_DEF);
1071 }
1072 
1073 /*
1074  * Common code to take care of setting RX side mode (filter).
1075  */
1076 static void
1077 bfe_set_rx_mode(bfe_t *bfe)
1078 {
1079 	uint32_t val;
1080 	int i;
1081 	ether_addr_t mac[ETHERADDRL] = {0, 0, 0, 0, 0, 0};
1082 
1083 	/*
1084 	 * We don't touch RX filter if we were asked to suspend. It's fine
1085 	 * if chip is not active (no interface is plumbed on us).
1086 	 */
1087 	if (bfe->bfe_chip_state == BFE_CHIP_SUSPENDED)
1088 		return;
1089 
1090 	val = INL(bfe, BFE_RXCONF);
1091 
1092 	val &= ~BFE_RXCONF_PROMISC;
1093 	val &= ~BFE_RXCONF_DBCAST;
1094 
1095 	if ((bfe->bfe_chip_mode & BFE_RX_MODE_ENABLE) == 0) {
1096 		OUTL(bfe, BFE_CAM_CTRL, 0);
1097 		FLUSH(bfe, BFE_CAM_CTRL);
1098 	} else if (bfe->bfe_chip_mode & BFE_RX_MODE_PROMISC) {
1099 		val |= BFE_RXCONF_PROMISC;
1100 		val &= ~BFE_RXCONF_DBCAST;
1101 	} else {
1102 		if (bfe->bfe_chip_state == BFE_CHIP_ACTIVE) {
1103 			/* Flush everything */
1104 			OUTL(bfe, BFE_RXCONF, val |
1105 			    BFE_RXCONF_PROMISC | BFE_RXCONF_ALLMULTI);
1106 			FLUSH(bfe, BFE_RXCONF);
1107 		}
1108 
1109 		/* Disable CAM */
1110 		OUTL(bfe, BFE_CAM_CTRL, 0);
1111 		FLUSH(bfe, BFE_CAM_CTRL);
1112 
1113 		/*
1114 		 * We receive all multicast packets.
1115 		 */
1116 		val |= BFE_RXCONF_ALLMULTI;
1117 
1118 		for (i = 0; i < BFE_MAX_MULTICAST_TABLE - 1; i++) {
1119 			bfe_cam_write(bfe, (uchar_t *)mac, i);
1120 		}
1121 
1122 		bfe_cam_write(bfe, bfe->bfe_ether_addr, i);
1123 
1124 		/* Enable CAM */
1125 		OUTL_OR(bfe, BFE_CAM_CTRL, BFE_CAM_ENABLE);
1126 		FLUSH(bfe, BFE_CAM_CTRL);
1127 	}
1128 
1129 	DTRACE_PROBE2(rx__mode__filter, int, bfe->bfe_unit,
1130 	    int, val);
1131 
1132 	OUTL(bfe, BFE_RXCONF, val);
1133 	FLUSH(bfe, BFE_RXCONF);
1134 }
1135 
1136 /*
1137  * Reset various variable values to initial state.
1138  */
1139 static void
1140 bfe_init_vars(bfe_t *bfe)
1141 {
1142 	bfe->bfe_chip_mode = BFE_RX_MODE_ENABLE;
1143 
1144 	/* Initial assumption */
1145 	bfe->bfe_chip.link = LINK_STATE_UNKNOWN;
1146 	bfe->bfe_chip.speed = 0;
1147 	bfe->bfe_chip.duplex = LINK_DUPLEX_UNKNOWN;
1148 
1149 	bfe->bfe_periodic_id = NULL;
1150 	bfe->bfe_chip_state = BFE_CHIP_UNINITIALIZED;
1151 
1152 	bfe->bfe_tx_stall_time = 0;
1153 }
1154 
1155 /*
1156  * Initializes TX side descriptor entries (bfe_desc_t). Each descriptor entry
1157  * has control (desc_ctl) and address (desc_addr) member.
1158  */
1159 static void
1160 bfe_tx_desc_init(bfe_ring_t *r)
1161 {
1162 	int i;
1163 	uint32_t v;
1164 
1165 	for (i = 0; i < r->r_ndesc; i++) {
1166 		PUT_DESC(r, (uint32_t *)&(r->r_desc[i].desc_ctl),
1167 		    (r->r_buf_dma[i].len & BFE_DESC_LEN));
1168 
1169 		/*
1170 		 * DMA addresses need to be added to BFE_PCI_DMA
1171 		 */
1172 		PUT_DESC(r, (uint32_t *)&(r->r_desc[i].desc_addr),
1173 		    (r->r_buf_dma[i].cookie.dmac_laddress + BFE_PCI_DMA));
1174 	}
1175 
1176 	v = GET_DESC(r, (uint32_t *)&(r->r_desc[i - 1].desc_ctl));
1177 	PUT_DESC(r, (uint32_t *)&(r->r_desc[i - 1].desc_ctl),
1178 	    v | BFE_DESC_EOT);
1179 
1180 	(void) SYNC_DESC(r, 0, r->r_ndesc, DDI_DMA_SYNC_FORDEV);
1181 
1182 	r->r_curr_desc = 0;
1183 	r->r_avail_desc = TX_NUM_DESC;
1184 	r->r_cons_desc = 0;
1185 }
1186 
1187 /*
1188  * Initializes RX side descriptor entries (bfe_desc_t). Each descriptor entry
1189  * has control (desc_ctl) and address (desc_addr) member.
1190  */
1191 static void
1192 bfe_rx_desc_init(bfe_ring_t *r)
1193 {
1194 	int i;
1195 	uint32_t v;
1196 
1197 	for (i = 0; i < r->r_ndesc; i++) {
1198 		PUT_DESC(r, (uint32_t *)&(r->r_desc[i].desc_ctl),
1199 		    (r->r_buf_dma[i].len& BFE_DESC_LEN));
1200 
1201 		PUT_DESC(r, (uint32_t *)&(r->r_desc[i].desc_addr),
1202 		    (r->r_buf_dma[i].cookie.dmac_laddress + BFE_PCI_DMA));
1203 
1204 		/* Initialize rx header (len, flags) */
1205 		bzero(r->r_buf_dma[i].addr, sizeof (bfe_rx_header_t));
1206 
1207 		(void) SYNC_BUF(r, i, 0, sizeof (bfe_rx_header_t),
1208 		    DDI_DMA_SYNC_FORDEV);
1209 	}
1210 
1211 	v = GET_DESC(r, (uint32_t *)&(r->r_desc[i - 1].desc_ctl));
1212 	PUT_DESC(r, (uint32_t *)&(r->r_desc[i - 1].desc_ctl),
1213 	    v | BFE_DESC_EOT);
1214 
1215 	(void) SYNC_DESC(r, 0, r->r_ndesc, DDI_DMA_SYNC_FORDEV);
1216 
1217 	/* TAIL of RX Descriptor */
1218 	OUTL(r->r_bfe, BFE_DMARX_PTR, ((i) * sizeof (bfe_desc_t)));
1219 
1220 	r->r_curr_desc = 0;
1221 	r->r_avail_desc = RX_NUM_DESC;
1222 }
1223 
1224 static int
1225 bfe_chip_start(bfe_t *bfe)
1226 {
1227 	ASSERT_ALL_LOCKS(bfe);
1228 
1229 	/*
1230 	 * Stop the chip first & then Reset the chip. At last enable interrupts.
1231 	 */
1232 	bfe_chip_halt(bfe);
1233 	bfe_stop_phy(bfe);
1234 
1235 	/*
1236 	 * Reset chip and start PHY.
1237 	 */
1238 	bfe_chip_reset(bfe);
1239 
1240 	/*
1241 	 * Initailize Descriptor Rings.
1242 	 */
1243 	bfe_tx_desc_init(&bfe->bfe_tx_ring);
1244 	bfe_rx_desc_init(&bfe->bfe_rx_ring);
1245 
1246 	bfe->bfe_chip_state = BFE_CHIP_ACTIVE;
1247 	bfe->bfe_chip_mode |= BFE_RX_MODE_ENABLE;
1248 	bfe_set_rx_mode(bfe);
1249 	bfe_enable_chip_intrs(bfe);
1250 
1251 	/* Check link, speed and duplex mode */
1252 	(void) bfe_check_link(bfe);
1253 
1254 	return (DDI_SUCCESS);
1255 }
1256 
1257 
1258 /*
1259  * Clear chip statistics.
1260  */
1261 static void
1262 bfe_clear_stats(bfe_t *bfe)
1263 {
1264 	ulong_t r;
1265 
1266 	OUTL(bfe, BFE_MIB_CTRL, BFE_MIB_CLR_ON_READ);
1267 
1268 	/*
1269 	 * Stat registers are cleared by reading.
1270 	 */
1271 	for (r = BFE_TX_GOOD_O; r <= BFE_TX_PAUSE; r += 4)
1272 		(void) INL(bfe, r);
1273 
1274 	for (r = BFE_RX_GOOD_O; r <= BFE_RX_NPAUSE; r += 4)
1275 		(void) INL(bfe, r);
1276 }
1277 
1278 /*
1279  * Collect chip statistics.
1280  */
1281 static void
1282 bfe_gather_stats(bfe_t *bfe)
1283 {
1284 	ulong_t r;
1285 	uint32_t *v;
1286 	uint32_t txerr = 0, rxerr = 0, coll = 0;
1287 
1288 	v = &bfe->bfe_hw_stats.tx_good_octets;
1289 	for (r = BFE_TX_GOOD_O; r <= BFE_TX_PAUSE; r += 4) {
1290 		*v += INL(bfe, r);
1291 		v++;
1292 	}
1293 
1294 	v = &bfe->bfe_hw_stats.rx_good_octets;
1295 	for (r = BFE_RX_GOOD_O; r <= BFE_RX_NPAUSE; r += 4) {
1296 		*v += INL(bfe, r);
1297 		v++;
1298 	}
1299 
1300 	/*
1301 	 * TX :
1302 	 * -------
1303 	 * tx_good_octets, tx_good_pkts, tx_octets
1304 	 * tx_pkts, tx_broadcast_pkts, tx_multicast_pkts
1305 	 * tx_len_64, tx_len_65_to_127, tx_len_128_to_255
1306 	 * tx_len_256_to_511, tx_len_512_to_1023, tx_len_1024_to_max
1307 	 * tx_jabber_pkts, tx_oversize_pkts, tx_fragment_pkts
1308 	 * tx_underruns, tx_total_cols, tx_single_cols
1309 	 * tx_multiple_cols, tx_excessive_cols, tx_late_cols
1310 	 * tx_defered, tx_carrier_lost, tx_pause_pkts
1311 	 *
1312 	 * RX :
1313 	 * -------
1314 	 * rx_good_octets, rx_good_pkts, rx_octets
1315 	 * rx_pkts, rx_broadcast_pkts, rx_multicast_pkts
1316 	 * rx_len_64, rx_len_65_to_127, rx_len_128_to_255
1317 	 * rx_len_256_to_511, rx_len_512_to_1023, rx_len_1024_to_max
1318 	 * rx_jabber_pkts, rx_oversize_pkts, rx_fragment_pkts
1319 	 * rx_missed_pkts, rx_crc_align_errs, rx_undersize
1320 	 * rx_crc_errs, rx_align_errs, rx_symbol_errs
1321 	 * rx_pause_pkts, rx_nonpause_pkts
1322 	 */
1323 
1324 	bfe->bfe_stats.ether_stat_carrier_errors =
1325 	    bfe->bfe_hw_stats.tx_carrier_lost;
1326 
1327 	/* txerr += bfe->bfe_hw_stats.tx_carrier_lost; */
1328 
1329 	bfe->bfe_stats.ether_stat_ex_collisions =
1330 	    bfe->bfe_hw_stats.tx_excessive_cols;
1331 	txerr += bfe->bfe_hw_stats.tx_excessive_cols;
1332 	coll += bfe->bfe_hw_stats.tx_excessive_cols;
1333 
1334 	bfe->bfe_stats.ether_stat_fcs_errors =
1335 	    bfe->bfe_hw_stats.rx_crc_errs;
1336 	rxerr += bfe->bfe_hw_stats.rx_crc_errs;
1337 
1338 	bfe->bfe_stats.ether_stat_first_collisions =
1339 	    bfe->bfe_hw_stats.tx_single_cols;
1340 	coll += bfe->bfe_hw_stats.tx_single_cols;
1341 	bfe->bfe_stats.ether_stat_multi_collisions =
1342 	    bfe->bfe_hw_stats.tx_multiple_cols;
1343 	coll += bfe->bfe_hw_stats.tx_multiple_cols;
1344 
1345 	bfe->bfe_stats.ether_stat_toolong_errors =
1346 	    bfe->bfe_hw_stats.rx_oversize_pkts;
1347 	rxerr += bfe->bfe_hw_stats.rx_oversize_pkts;
1348 
1349 	bfe->bfe_stats.ether_stat_tooshort_errors =
1350 	    bfe->bfe_hw_stats.rx_undersize;
1351 	rxerr += bfe->bfe_hw_stats.rx_undersize;
1352 
1353 	bfe->bfe_stats.ether_stat_tx_late_collisions +=
1354 	    bfe->bfe_hw_stats.tx_late_cols;
1355 
1356 	bfe->bfe_stats.ether_stat_defer_xmts +=
1357 	    bfe->bfe_hw_stats.tx_defered;
1358 
1359 	bfe->bfe_stats.ether_stat_macrcv_errors += rxerr;
1360 	bfe->bfe_stats.ether_stat_macxmt_errors += txerr;
1361 
1362 	bfe->bfe_stats.collisions += coll;
1363 }
1364 
1365 /*
1366  * Gets the state for dladm command and all.
1367  */
1368 int
1369 bfe_mac_getstat(void *arg, uint_t stat, uint64_t *val)
1370 {
1371 	bfe_t *bfe = (bfe_t *)arg;
1372 	uint64_t	v;
1373 	int err = 0;
1374 
1375 	rw_enter(&bfe->bfe_rwlock, RW_READER);
1376 
1377 
1378 	switch (stat) {
1379 	default:
1380 		err = ENOTSUP;
1381 		break;
1382 
1383 	case MAC_STAT_IFSPEED:
1384 		/*
1385 		 * MAC layer will ask for IFSPEED first and hence we
1386 		 * collect it only once.
1387 		 */
1388 		if (bfe->bfe_chip_state == BFE_CHIP_ACTIVE) {
1389 			/*
1390 			 * Update stats from the hardware.
1391 			 */
1392 			bfe_gather_stats(bfe);
1393 		}
1394 		v = bfe->bfe_chip.speed;
1395 		break;
1396 
1397 	case ETHER_STAT_ADV_CAP_100T4:
1398 		v = bfe->bfe_adv_100T4;
1399 		break;
1400 
1401 	case ETHER_STAT_ADV_CAP_100FDX:
1402 		v = (bfe->bfe_mii_anar & MII_ABILITY_100BASE_TX_FD) != 0;
1403 		break;
1404 
1405 	case ETHER_STAT_ADV_CAP_100HDX:
1406 		v = (bfe->bfe_mii_anar & MII_ABILITY_100BASE_TX) != 0;
1407 		break;
1408 
1409 	case ETHER_STAT_ADV_CAP_10FDX:
1410 		v = (bfe->bfe_mii_anar & MII_ABILITY_10BASE_T_FD) != 0;
1411 		break;
1412 
1413 	case ETHER_STAT_ADV_CAP_10HDX:
1414 		v = (bfe->bfe_mii_anar & MII_ABILITY_10BASE_T) != 0;
1415 		break;
1416 
1417 	case ETHER_STAT_ADV_CAP_ASMPAUSE:
1418 		v = 0;
1419 		break;
1420 
1421 	case ETHER_STAT_ADV_CAP_AUTONEG:
1422 		v = bfe->bfe_adv_aneg;
1423 		break;
1424 
1425 	case ETHER_STAT_ADV_CAP_PAUSE:
1426 		v = (bfe->bfe_mii_anar & MII_ABILITY_PAUSE) != 0;
1427 		break;
1428 
1429 	case ETHER_STAT_ADV_REMFAULT:
1430 		v = (bfe->bfe_mii_anar & MII_AN_ADVERT_REMFAULT) != 0;
1431 		break;
1432 
1433 	case ETHER_STAT_ALIGN_ERRORS:
1434 		/* MIB */
1435 		v = bfe->bfe_stats.ether_stat_align_errors;
1436 		break;
1437 
1438 	case ETHER_STAT_CAP_100T4:
1439 		v = (bfe->bfe_mii_bmsr & MII_STATUS_100_BASE_T4) != 0;
1440 		break;
1441 
1442 	case ETHER_STAT_CAP_100FDX:
1443 		v = (bfe->bfe_mii_bmsr & MII_STATUS_100_BASEX_FD) != 0;
1444 		break;
1445 
1446 	case ETHER_STAT_CAP_100HDX:
1447 		v = (bfe->bfe_mii_bmsr & MII_STATUS_100_BASEX) != 0;
1448 		break;
1449 
1450 	case ETHER_STAT_CAP_10FDX:
1451 		v = (bfe->bfe_mii_bmsr & MII_STATUS_10_FD) != 0;
1452 		break;
1453 
1454 	case ETHER_STAT_CAP_10HDX:
1455 		v = (bfe->bfe_mii_bmsr & MII_STATUS_10) != 0;
1456 		break;
1457 
1458 	case ETHER_STAT_CAP_ASMPAUSE:
1459 		v = 0;
1460 		break;
1461 
1462 	case ETHER_STAT_CAP_AUTONEG:
1463 		v = ((bfe->bfe_mii_bmsr & MII_STATUS_CANAUTONEG) != 0);
1464 		break;
1465 
1466 	case ETHER_STAT_CAP_PAUSE:
1467 		v = 1;
1468 		break;
1469 
1470 	case ETHER_STAT_CAP_REMFAULT:
1471 		v = (bfe->bfe_mii_bmsr & MII_STATUS_REMFAULT) != 0;
1472 		break;
1473 
1474 	case ETHER_STAT_CARRIER_ERRORS:
1475 		v = bfe->bfe_stats.ether_stat_carrier_errors;
1476 		break;
1477 
1478 	case ETHER_STAT_JABBER_ERRORS:
1479 		err = ENOTSUP;
1480 		break;
1481 
1482 	case ETHER_STAT_DEFER_XMTS:
1483 		v = bfe->bfe_stats.ether_stat_defer_xmts;
1484 		break;
1485 
1486 	case ETHER_STAT_EX_COLLISIONS:
1487 		/* MIB */
1488 		v = bfe->bfe_stats.ether_stat_ex_collisions;
1489 		break;
1490 
1491 	case ETHER_STAT_FCS_ERRORS:
1492 		/* MIB */
1493 		v = bfe->bfe_stats.ether_stat_fcs_errors;
1494 		break;
1495 
1496 	case ETHER_STAT_FIRST_COLLISIONS:
1497 		/* MIB */
1498 		v = bfe->bfe_stats.ether_stat_first_collisions;
1499 		break;
1500 
1501 	case ETHER_STAT_LINK_ASMPAUSE:
1502 		v = 0;
1503 		break;
1504 
1505 	case ETHER_STAT_LINK_AUTONEG:
1506 		v = (bfe->bfe_mii_bmcr & MII_CONTROL_ANE) != 0 &&
1507 		    (bfe->bfe_mii_bmsr & MII_STATUS_ANDONE) != 0;
1508 		break;
1509 
1510 	case ETHER_STAT_LINK_DUPLEX:
1511 		v = bfe->bfe_chip.duplex;
1512 		break;
1513 
1514 	case ETHER_STAT_LP_CAP_100T4:
1515 		v = (bfe->bfe_mii_anlpar & MII_ABILITY_100BASE_T4) != 0;
1516 		break;
1517 
1518 	case ETHER_STAT_LP_CAP_100FDX:
1519 		v = (bfe->bfe_mii_anlpar & MII_ABILITY_100BASE_TX_FD) != 0;
1520 		break;
1521 
1522 	case ETHER_STAT_LP_CAP_100HDX:
1523 		v = (bfe->bfe_mii_anlpar & MII_ABILITY_100BASE_TX) != 0;
1524 		break;
1525 
1526 	case ETHER_STAT_LP_CAP_10FDX:
1527 		v = (bfe->bfe_mii_anlpar & MII_ABILITY_10BASE_T_FD) != 0;
1528 		break;
1529 
1530 	case ETHER_STAT_LP_CAP_10HDX:
1531 		v = (bfe->bfe_mii_anlpar & MII_ABILITY_10BASE_T) != 0;
1532 		break;
1533 
1534 	case ETHER_STAT_LP_CAP_ASMPAUSE:
1535 		v = 0;
1536 		break;
1537 
1538 	case ETHER_STAT_LP_CAP_AUTONEG:
1539 		v = (bfe->bfe_mii_exp & MII_AN_EXP_LPCANAN) != 0;
1540 		break;
1541 
1542 	case ETHER_STAT_LP_CAP_PAUSE:
1543 		v = (bfe->bfe_mii_anlpar & MII_ABILITY_PAUSE) != 0;
1544 		break;
1545 
1546 	case ETHER_STAT_LP_REMFAULT:
1547 		v = (bfe->bfe_mii_anlpar & MII_STATUS_REMFAULT) != 0;
1548 		break;
1549 
1550 	case ETHER_STAT_MACRCV_ERRORS:
1551 		v = bfe->bfe_stats.ether_stat_macrcv_errors;
1552 		break;
1553 
1554 	case ETHER_STAT_MACXMT_ERRORS:
1555 		v = bfe->bfe_stats.ether_stat_macxmt_errors;
1556 		break;
1557 
1558 	case ETHER_STAT_MULTI_COLLISIONS:
1559 		v = bfe->bfe_stats.ether_stat_multi_collisions;
1560 		break;
1561 
1562 	case ETHER_STAT_SQE_ERRORS:
1563 		err = ENOTSUP;
1564 		break;
1565 
1566 	case ETHER_STAT_TOOLONG_ERRORS:
1567 		v = bfe->bfe_stats.ether_stat_toolong_errors;
1568 		break;
1569 
1570 	case ETHER_STAT_TOOSHORT_ERRORS:
1571 		v = bfe->bfe_stats.ether_stat_tooshort_errors;
1572 		break;
1573 
1574 	case ETHER_STAT_TX_LATE_COLLISIONS:
1575 		v = bfe->bfe_stats.ether_stat_tx_late_collisions;
1576 		break;
1577 
1578 	case ETHER_STAT_XCVR_ADDR:
1579 		v = bfe->bfe_phy_addr;
1580 		break;
1581 
1582 	case ETHER_STAT_XCVR_ID:
1583 		v = bfe->bfe_phy_id;
1584 		break;
1585 
1586 	case MAC_STAT_BRDCSTRCV:
1587 		v = bfe->bfe_stats.brdcstrcv;
1588 		break;
1589 
1590 	case MAC_STAT_BRDCSTXMT:
1591 		v = bfe->bfe_stats.brdcstxmt;
1592 		break;
1593 
1594 	case MAC_STAT_MULTIXMT:
1595 		v = bfe->bfe_stats.multixmt;
1596 		break;
1597 
1598 	case MAC_STAT_COLLISIONS:
1599 		v = bfe->bfe_stats.collisions;
1600 		break;
1601 
1602 	case MAC_STAT_IERRORS:
1603 		v = bfe->bfe_stats.ierrors;
1604 		break;
1605 
1606 	case MAC_STAT_IPACKETS:
1607 		v = bfe->bfe_stats.ipackets;
1608 		break;
1609 
1610 	case MAC_STAT_MULTIRCV:
1611 		v = bfe->bfe_stats.multircv;
1612 		break;
1613 
1614 	case MAC_STAT_NORCVBUF:
1615 		v = bfe->bfe_stats.norcvbuf;
1616 		break;
1617 
1618 	case MAC_STAT_NOXMTBUF:
1619 		v = bfe->bfe_stats.noxmtbuf;
1620 		break;
1621 
1622 	case MAC_STAT_OBYTES:
1623 		v = bfe->bfe_stats.obytes;
1624 		break;
1625 
1626 	case MAC_STAT_OERRORS:
1627 		/* MIB */
1628 		v = bfe->bfe_stats.ether_stat_macxmt_errors;
1629 		break;
1630 
1631 	case MAC_STAT_OPACKETS:
1632 		v = bfe->bfe_stats.opackets;
1633 		break;
1634 
1635 	case MAC_STAT_RBYTES:
1636 		v = bfe->bfe_stats.rbytes;
1637 		break;
1638 
1639 	case MAC_STAT_UNDERFLOWS:
1640 		v = bfe->bfe_stats.underflows;
1641 		break;
1642 
1643 	case MAC_STAT_OVERFLOWS:
1644 		v = bfe->bfe_stats.overflows;
1645 		break;
1646 	}
1647 
1648 	rw_exit(&bfe->bfe_rwlock);
1649 
1650 	*val = v;
1651 	return (err);
1652 }
1653 
1654 /*ARGSUSED*/
1655 int
1656 bfe_mac_getprop(void *arg, const char *name, mac_prop_id_t num, uint_t flags,
1657     uint_t sz, void *val, uint_t *perm)
1658 {
1659 	bfe_t		*bfe = (bfe_t *)arg;
1660 	int		err = 0;
1661 	boolean_t	dfl = flags & MAC_PROP_DEFAULT;
1662 
1663 	if (sz == 0)
1664 		return (EINVAL);
1665 
1666 	*perm = MAC_PROP_PERM_RW;
1667 	switch (num) {
1668 	case MAC_PROP_DUPLEX:
1669 		*perm = MAC_PROP_PERM_READ;
1670 		if (sz >= sizeof (link_duplex_t)) {
1671 			bcopy(&bfe->bfe_chip.duplex, val,
1672 			    sizeof (link_duplex_t));
1673 		} else {
1674 			err = EINVAL;
1675 		}
1676 		break;
1677 
1678 	case MAC_PROP_SPEED:
1679 		*perm = MAC_PROP_PERM_READ;
1680 		if (sz >= sizeof (uint64_t)) {
1681 			bcopy(&bfe->bfe_chip.speed, val, sizeof (uint64_t));
1682 		} else {
1683 			err = EINVAL;
1684 		}
1685 		break;
1686 
1687 	case MAC_PROP_AUTONEG:
1688 		*(uint8_t *)val =
1689 		    dfl ? bfe->bfe_cap_aneg : bfe->bfe_adv_aneg;
1690 		break;
1691 
1692 	case MAC_PROP_ADV_100FDX_CAP:
1693 		*perm = MAC_PROP_PERM_READ;
1694 		*(uint8_t *)val =
1695 		    dfl ? bfe->bfe_cap_100fdx : bfe->bfe_adv_100fdx;
1696 		break;
1697 	case MAC_PROP_EN_100FDX_CAP:
1698 		*(uint8_t *)val =
1699 		    dfl ? bfe->bfe_cap_100fdx : bfe->bfe_adv_100fdx;
1700 		break;
1701 
1702 	case MAC_PROP_ADV_100HDX_CAP:
1703 		*perm = MAC_PROP_PERM_READ;
1704 		*(uint8_t *)val =
1705 		    dfl ? bfe->bfe_cap_100hdx : bfe->bfe_adv_100hdx;
1706 		break;
1707 	case MAC_PROP_EN_100HDX_CAP:
1708 		*(uint8_t *)val =
1709 		    dfl ? bfe->bfe_cap_100hdx : bfe->bfe_adv_100hdx;
1710 		break;
1711 
1712 	case MAC_PROP_ADV_10FDX_CAP:
1713 		*perm = MAC_PROP_PERM_READ;
1714 		*(uint8_t *)val =
1715 		    dfl ? bfe->bfe_cap_10fdx : bfe->bfe_adv_10fdx;
1716 		break;
1717 	case MAC_PROP_EN_10FDX_CAP:
1718 		*(uint8_t *)val =
1719 		    dfl ? bfe->bfe_cap_10fdx : bfe->bfe_adv_10fdx;
1720 		break;
1721 
1722 	case MAC_PROP_ADV_10HDX_CAP:
1723 		*perm = MAC_PROP_PERM_READ;
1724 		*(uint8_t *)val =
1725 		    dfl ? bfe->bfe_cap_10hdx : bfe->bfe_adv_10hdx;
1726 		break;
1727 	case MAC_PROP_EN_10HDX_CAP:
1728 		*(uint8_t *)val =
1729 		    dfl ? bfe->bfe_cap_10hdx : bfe->bfe_adv_10hdx;
1730 		break;
1731 
1732 	case MAC_PROP_ADV_100T4_CAP:
1733 		*perm = MAC_PROP_PERM_READ;
1734 		*(uint8_t *)val =
1735 		    dfl ? bfe->bfe_cap_100T4 : bfe->bfe_adv_100T4;
1736 		break;
1737 	case MAC_PROP_EN_100T4_CAP:
1738 		*(uint8_t *)val =
1739 		    dfl ? bfe->bfe_cap_100T4 : bfe->bfe_adv_100T4;
1740 		break;
1741 
1742 	default:
1743 		err = ENOTSUP;
1744 	}
1745 
1746 	return (err);
1747 }
1748 
1749 /*ARGSUSED*/
1750 int
1751 bfe_mac_setprop(void *arg, const char *name, mac_prop_id_t num, uint_t sz,
1752     const void *val)
1753 {
1754 	bfe_t		*bfe = (bfe_t *)arg;
1755 	uint8_t		*advp;
1756 	uint8_t		*capp;
1757 	int 		r = 0;
1758 
1759 	switch (num) {
1760 	case MAC_PROP_EN_100FDX_CAP:
1761 		advp = &bfe->bfe_adv_100fdx;
1762 		capp = &bfe->bfe_cap_100fdx;
1763 		break;
1764 
1765 	case MAC_PROP_EN_100HDX_CAP:
1766 		advp = &bfe->bfe_adv_100hdx;
1767 		capp = &bfe->bfe_cap_100hdx;
1768 		break;
1769 
1770 	case MAC_PROP_EN_10FDX_CAP:
1771 		advp = &bfe->bfe_adv_10fdx;
1772 		capp = &bfe->bfe_cap_10fdx;
1773 		break;
1774 
1775 	case MAC_PROP_EN_10HDX_CAP:
1776 		advp = &bfe->bfe_adv_10hdx;
1777 		capp = &bfe->bfe_cap_10hdx;
1778 		break;
1779 
1780 	case MAC_PROP_EN_100T4_CAP:
1781 		advp = &bfe->bfe_adv_100T4;
1782 		capp = &bfe->bfe_cap_100T4;
1783 		break;
1784 
1785 	case MAC_PROP_AUTONEG:
1786 		advp = &bfe->bfe_adv_aneg;
1787 		capp = &bfe->bfe_cap_aneg;
1788 		break;
1789 
1790 	default:
1791 		return (ENOTSUP);
1792 	}
1793 
1794 	if (*capp == 0)
1795 		return (ENOTSUP);
1796 
1797 	bfe_grab_locks(bfe);
1798 
1799 	if (*advp != *(const uint8_t *)val) {
1800 		*advp = *(const uint8_t *)val;
1801 
1802 		bfe->bfe_chip_action = BFE_ACTION_RESTART_SETPROP;
1803 		if (bfe->bfe_chip_state == BFE_CHIP_ACTIVE) {
1804 			/*
1805 			 * We need to stop the timer before grabbing locks
1806 			 * otherwise we can land-up in deadlock with untimeout.
1807 			 */
1808 			bfe_stop_timer(bfe);
1809 
1810 			bfe->bfe_chip_action |= BFE_ACTION_RESTART;
1811 
1812 			bfe_chip_restart(bfe);
1813 
1814 			/*
1815 			 * We leave SETPROP because properties can be
1816 			 * temporary.
1817 			 */
1818 			bfe->bfe_chip_action &= ~(BFE_ACTION_RESTART);
1819 			r = 1;
1820 		}
1821 	}
1822 
1823 	bfe_release_locks(bfe);
1824 
1825 	/* kick-off a potential stopped downstream */
1826 	if (r)
1827 		mac_tx_update(bfe->bfe_machdl);
1828 
1829 	return (0);
1830 }
1831 
1832 
1833 int
1834 bfe_mac_set_ether_addr(void *arg, const uint8_t *ea)
1835 {
1836 	bfe_t *bfe = (bfe_t *)arg;
1837 
1838 	bfe_grab_locks(bfe);
1839 	bcopy(ea, bfe->bfe_ether_addr, ETHERADDRL);
1840 	bfe_set_rx_mode(bfe);
1841 	bfe_release_locks(bfe);
1842 	return (0);
1843 }
1844 
1845 int
1846 bfe_mac_start(void *arg)
1847 {
1848 	bfe_t *bfe = (bfe_t *)arg;
1849 
1850 	bfe_grab_locks(bfe);
1851 	if (bfe_chip_start(bfe) == DDI_FAILURE) {
1852 		bfe_release_locks(bfe);
1853 		return (EINVAL);
1854 	}
1855 
1856 	bfe_release_locks(bfe);
1857 
1858 	mac_tx_update(bfe->bfe_machdl);
1859 
1860 	return (0);
1861 }
1862 
1863 void
1864 bfe_mac_stop(void *arg)
1865 {
1866 	bfe_t *bfe = (bfe_t *)arg;
1867 
1868 	/*
1869 	 * We need to stop the timer before grabbing locks otherwise
1870 	 * we can land-up in deadlock with untimeout.
1871 	 */
1872 	bfe_stop_timer(bfe);
1873 
1874 	bfe_grab_locks(bfe);
1875 
1876 	/*
1877 	 * First halt the chip by disabling interrupts.
1878 	 */
1879 	bfe_chip_halt(bfe);
1880 	bfe_stop_phy(bfe);
1881 
1882 	bfe->bfe_chip_state = BFE_CHIP_STOPPED;
1883 
1884 	/*
1885 	 * This will leave the PHY running.
1886 	 */
1887 	bfe_chip_reset(bfe);
1888 
1889 	/*
1890 	 * Disable RX register.
1891 	 */
1892 	bfe->bfe_chip_mode &= ~BFE_RX_MODE_ENABLE;
1893 	bfe_set_rx_mode(bfe);
1894 
1895 	bfe_release_locks(bfe);
1896 }
1897 
1898 /*
1899  * Send a packet down the wire.
1900  */
1901 static int
1902 bfe_send_a_packet(bfe_t *bfe, mblk_t *mp)
1903 {
1904 	bfe_ring_t *r = &bfe->bfe_tx_ring;
1905 	uint32_t cur = r->r_curr_desc;
1906 	uint32_t next;
1907 	size_t	pktlen = msgsize(mp);
1908 	uchar_t *buf;
1909 	uint32_t v;
1910 
1911 	ASSERT(MUTEX_HELD(&r->r_lock));
1912 	ASSERT(mp != NULL);
1913 
1914 	if (pktlen > r->r_buf_len) {
1915 		freemsg(mp);
1916 		return (BFE_SUCCESS);
1917 	}
1918 
1919 	/*
1920 	 * There is a big reason why we don't check for '0'. It becomes easy
1921 	 * for us to not roll over the ring since we are based on producer (tx)
1922 	 * and consumer (reclaim by an interrupt) model. Especially when we
1923 	 * run out of TX descriptor, chip will send a single interrupt and
1924 	 * both producer and consumer counter will be same. So we keep a
1925 	 * difference of 1 always.
1926 	 */
1927 	if (r->r_avail_desc <= 1) {
1928 		bfe->bfe_stats.noxmtbuf++;
1929 		bfe->bfe_tx_resched = 1;
1930 		return (BFE_FAILURE);
1931 	}
1932 
1933 	/*
1934 	 * Get the DMA buffer to hold packet.
1935 	 */
1936 	buf = (uchar_t *)r->r_buf_dma[cur].addr;
1937 
1938 	mcopymsg(mp, buf);	/* it also frees mp */
1939 
1940 	/*
1941 	 * Gather statistics.
1942 	 */
1943 	if (buf[0] & 0x1) {
1944 		if (bcmp(buf, bfe_broadcast, ETHERADDRL) != 0)
1945 			bfe->bfe_stats.multixmt++;
1946 		else
1947 			bfe->bfe_stats.brdcstxmt++;
1948 	}
1949 	bfe->bfe_stats.opackets++;
1950 	bfe->bfe_stats.obytes += pktlen;
1951 
1952 
1953 	/*
1954 	 * Program the DMA descriptor (start and end of frame are same).
1955 	 */
1956 	next = cur;
1957 	v = (pktlen & BFE_DESC_LEN) | BFE_DESC_IOC | BFE_DESC_SOF |
1958 	    BFE_DESC_EOF;
1959 
1960 	if (cur == (TX_NUM_DESC - 1))
1961 		v |= BFE_DESC_EOT;
1962 
1963 	PUT_DESC(r, (uint32_t *)&(r->r_desc[cur].desc_ctl), v);
1964 
1965 	/*
1966 	 * DMA addresses need to be added to BFE_PCI_DMA
1967 	 */
1968 	PUT_DESC(r, (uint32_t *)&(r->r_desc[cur].desc_addr),
1969 	    (r->r_buf_dma[cur].cookie.dmac_laddress + BFE_PCI_DMA));
1970 
1971 	/*
1972 	 * Sync the packet data for the device.
1973 	 */
1974 	(void) SYNC_BUF(r, cur, 0, pktlen, DDI_DMA_SYNC_FORDEV);
1975 
1976 	/* Move to next descriptor slot */
1977 	BFE_INC_SLOT(next, TX_NUM_DESC);
1978 
1979 	(void) SYNC_DESC(r, 0, r->r_ndesc, DDI_DMA_SYNC_FORDEV);
1980 
1981 	r->r_curr_desc = next;
1982 
1983 	/*
1984 	 * The order should be 1,2,3,... for BFE_DMATX_PTR if 0,1,2,3,...
1985 	 * descriptor slot are being programmed.
1986 	 */
1987 	OUTL(bfe, BFE_DMATX_PTR, next * sizeof (bfe_desc_t));
1988 	FLUSH(bfe, BFE_DMATX_PTR);
1989 
1990 	r->r_avail_desc--;
1991 
1992 	/*
1993 	 * Let timeout know that it must reset the chip if a
1994 	 * packet is not sent down the wire for more than 5 seconds.
1995 	 */
1996 	bfe->bfe_tx_stall_time = gethrtime() + (5 * 1000000000ULL);
1997 
1998 	return (BFE_SUCCESS);
1999 }
2000 
2001 mblk_t *
2002 bfe_mac_transmit_packet(void *arg, mblk_t *mp)
2003 {
2004 	bfe_t *bfe = (bfe_t *)arg;
2005 	bfe_ring_t *r = &bfe->bfe_tx_ring;
2006 	mblk_t	*nmp;
2007 
2008 	mutex_enter(&r->r_lock);
2009 
2010 	if (bfe->bfe_chip_state != BFE_CHIP_ACTIVE) {
2011 		DTRACE_PROBE1(tx__chip__not__active, int, bfe->bfe_unit);
2012 
2013 		freemsgchain(mp);
2014 		mutex_exit(&r->r_lock);
2015 		return (NULL);
2016 	}
2017 
2018 
2019 	while (mp != NULL) {
2020 		nmp = mp->b_next;
2021 		mp->b_next = NULL;
2022 
2023 		if (bfe_send_a_packet(bfe, mp) == BFE_FAILURE) {
2024 			mp->b_next = nmp;
2025 			break;
2026 		}
2027 		mp = nmp;
2028 	}
2029 
2030 	mutex_exit(&r->r_lock);
2031 
2032 	return (mp);
2033 }
2034 
2035 int
2036 bfe_mac_set_promisc(void *arg, boolean_t promiscflag)
2037 {
2038 	bfe_t *bfe = (bfe_t *)arg;
2039 
2040 	bfe_grab_locks(bfe);
2041 	if (bfe->bfe_chip_state != BFE_CHIP_ACTIVE) {
2042 		bfe_release_locks(bfe);
2043 		return (EIO);
2044 	}
2045 
2046 	if (promiscflag) {
2047 		/* Set Promiscous on */
2048 		bfe->bfe_chip_mode |= BFE_RX_MODE_PROMISC;
2049 	} else {
2050 		bfe->bfe_chip_mode &= ~BFE_RX_MODE_PROMISC;
2051 	}
2052 
2053 	bfe_set_rx_mode(bfe);
2054 	bfe_release_locks(bfe);
2055 
2056 	return (0);
2057 }
2058 
2059 int
2060 bfe_mac_set_multicast(void *arg, boolean_t add, const uint8_t *macaddr)
2061 {
2062 	/*
2063 	 * It was too much of pain to implement multicast in CAM. Instead
2064 	 * we never disable multicast filter.
2065 	 */
2066 	return (0);
2067 }
2068 
2069 static mac_callbacks_t bfe_mac_callbacks = {
2070 	MC_SETPROP | MC_GETPROP,
2071 	bfe_mac_getstat,	/* gets stats */
2072 	bfe_mac_start,		/* starts mac */
2073 	bfe_mac_stop,		/* stops mac */
2074 	bfe_mac_set_promisc,	/* sets promisc mode for snoop */
2075 	bfe_mac_set_multicast,	/* multicast implementation */
2076 	bfe_mac_set_ether_addr,	/* sets ethernet address (unicast) */
2077 	bfe_mac_transmit_packet, /* transmits packet */
2078 	NULL,			/* ioctl */
2079 	NULL,			/* getcap */
2080 	NULL,			/* open */
2081 	NULL,			/* close */
2082 	bfe_mac_setprop,
2083 	bfe_mac_getprop,
2084 };
2085 
2086 static void
2087 bfe_error_handler(bfe_t *bfe, int intr_mask)
2088 {
2089 	uint32_t v;
2090 
2091 	if (intr_mask & BFE_ISTAT_RFO) {
2092 		bfe->bfe_stats.overflows++;
2093 		bfe->bfe_chip_action |=
2094 		    (BFE_ACTION_RESTART | BFE_ACTION_RESTART_FAULT);
2095 		goto action;
2096 	}
2097 
2098 	if (intr_mask & BFE_ISTAT_TFU) {
2099 		bfe->bfe_stats.underflows++;
2100 		return;
2101 	}
2102 
2103 	/* Descriptor Protocol Error */
2104 	if (intr_mask & BFE_ISTAT_DPE) {
2105 		bfe_error(bfe->bfe_dip,
2106 		    "Descriptor Protocol Error. Halting Chip");
2107 		bfe->bfe_chip_action |=
2108 		    (BFE_ACTION_RESTART | BFE_ACTION_RESTART_FAULT);
2109 		goto action;
2110 	}
2111 
2112 	/* Descriptor Error */
2113 	if (intr_mask & BFE_ISTAT_DSCE && halt == 0) {
2114 		bfe_error(bfe->bfe_dip, "Descriptor Error. Restarting Chip");
2115 		goto action;
2116 	}
2117 
2118 	/* Receive Descr. Underflow */
2119 	if (intr_mask & BFE_ISTAT_RDU) {
2120 		bfe_error(bfe->bfe_dip,
2121 		    "Receive Descriptor Underflow. Restarting Chip");
2122 		bfe->bfe_stats.ether_stat_macrcv_errors++;
2123 		bfe->bfe_chip_action |=
2124 		    (BFE_ACTION_RESTART | BFE_ACTION_RESTART_FAULT);
2125 		goto action;
2126 	}
2127 
2128 	v = INL(bfe, BFE_DMATX_STAT);
2129 
2130 	/* Error while sending a packet */
2131 	if (v & BFE_STAT_EMASK) {
2132 		bfe->bfe_stats.ether_stat_macxmt_errors++;
2133 		bfe_error(bfe->bfe_dip,
2134 		    "Error while sending a packet. Restarting Chip");
2135 	}
2136 
2137 	/* Error while receiving a packet */
2138 	v = INL(bfe, BFE_DMARX_STAT);
2139 	if (v & BFE_RX_FLAG_ERRORS) {
2140 		bfe->bfe_stats.ierrors++;
2141 		bfe_error(bfe->bfe_dip,
2142 		    "Error while receiving a packet. Restarting Chip");
2143 	}
2144 
2145 
2146 	bfe->bfe_chip_action |=
2147 	    (BFE_ACTION_RESTART | BFE_ACTION_RESTART_FAULT);
2148 
2149 action:
2150 	bfe_chip_halt(bfe);
2151 }
2152 
2153 /*
2154  * It will recycle a RX descriptor slot.
2155  */
2156 static void
2157 bfe_rx_desc_buf_reinit(bfe_t *bfe, uint_t slot)
2158 {
2159 	bfe_ring_t *r = &bfe->bfe_rx_ring;
2160 	uint32_t v;
2161 
2162 	slot %= RX_NUM_DESC;
2163 
2164 	bzero(r->r_buf_dma[slot].addr, sizeof (bfe_rx_header_t));
2165 
2166 	(void) SYNC_BUF(r, slot, 0, BFE_RX_OFFSET, DDI_DMA_SYNC_FORDEV);
2167 
2168 	v = r->r_buf_dma[slot].len  & BFE_DESC_LEN;
2169 	if (slot == (RX_NUM_DESC - 1))
2170 		v |= BFE_DESC_EOT;
2171 
2172 	PUT_DESC(r, (uint32_t *)&(r->r_desc[slot].desc_ctl), v);
2173 
2174 	/*
2175 	 * DMA addresses need to be added to BFE_PCI_DMA
2176 	 */
2177 	PUT_DESC(r, (uint32_t *)&(r->r_desc[slot].desc_addr),
2178 	    (r->r_buf_dma[slot].cookie.dmac_laddress + BFE_PCI_DMA));
2179 }
2180 
2181 /*
2182  * Gets called from interrupt context to handle RX interrupt.
2183  */
2184 static mblk_t *
2185 bfe_receive(bfe_t *bfe, int intr_mask)
2186 {
2187 	int rxstat, current;
2188 	mblk_t	*mp = NULL, *rx_head, *rx_tail;
2189 	uchar_t	*rx_header;
2190 	uint16_t len;
2191 	uchar_t	*bp;
2192 	bfe_ring_t *r = &bfe->bfe_rx_ring;
2193 	int i;
2194 
2195 	rxstat = INL(bfe, BFE_DMARX_STAT);
2196 	current = (rxstat & BFE_STAT_CDMASK) / sizeof (bfe_desc_t);
2197 	i = r->r_curr_desc;
2198 
2199 	rx_head = rx_tail = NULL;
2200 
2201 	DTRACE_PROBE3(receive, int, bfe->bfe_unit,
2202 	    int, r->r_curr_desc,
2203 	    int, current);
2204 
2205 	for (i = r->r_curr_desc; i != current;
2206 	    BFE_INC_SLOT(i, RX_NUM_DESC)) {
2207 
2208 		/*
2209 		 * Sync the buffer associated with the descriptor table entry.
2210 		 */
2211 		(void) SYNC_BUF(r, i, 0, r->r_buf_dma[i].len,
2212 		    DDI_DMA_SYNC_FORKERNEL);
2213 
2214 		rx_header = (void *)r->r_buf_dma[i].addr;
2215 
2216 		/*
2217 		 * We do this to make sure we are endian neutral. Chip is
2218 		 * big endian.
2219 		 *
2220 		 * The header looks like :-
2221 		 *
2222 		 *  Offset 0  -> uint16_t len
2223 		 *  Offset 2  -> uint16_t flags
2224 		 *  Offset 4  -> uint16_t pad[12]
2225 		 */
2226 		len = (rx_header[1] << 8) | rx_header[0];
2227 		len -= 4;	/* CRC bytes need to be removed */
2228 
2229 		/*
2230 		 * Don't receive this packet if pkt length is greater than
2231 		 * MTU + VLAN_TAGSZ.
2232 		 */
2233 		if (len > r->r_buf_len) {
2234 			/* Recycle slot for later use */
2235 			bfe_rx_desc_buf_reinit(bfe, i);
2236 			continue;
2237 		}
2238 
2239 		if ((mp = allocb(len + VLAN_TAGSZ, BPRI_MED)) != NULL) {
2240 			mp->b_rptr += VLAN_TAGSZ;
2241 			bp = mp->b_rptr;
2242 			mp->b_wptr = bp + len;
2243 
2244 			/* sizeof (bfe_rx_header_t) + 2 */
2245 			bcopy(r->r_buf_dma[i].addr +
2246 			    BFE_RX_OFFSET, bp, len);
2247 
2248 			mp->b_next = NULL;
2249 			if (rx_tail == NULL)
2250 				rx_head = rx_tail = mp;
2251 			else {
2252 				rx_tail->b_next = mp;
2253 				rx_tail = mp;
2254 			}
2255 
2256 			/* Number of packets received so far */
2257 			bfe->bfe_stats.ipackets++;
2258 
2259 			/* Total bytes of packets received so far */
2260 			bfe->bfe_stats.rbytes += len;
2261 
2262 			if (bcmp(mp->b_rptr, bfe_broadcast, ETHERADDRL) == 0)
2263 				bfe->bfe_stats.brdcstrcv++;
2264 			else
2265 				bfe->bfe_stats.multircv++;
2266 		} else {
2267 			bfe->bfe_stats.norcvbuf++;
2268 			/* Recycle the slot for later use */
2269 			bfe_rx_desc_buf_reinit(bfe, i);
2270 			break;
2271 		}
2272 
2273 		/*
2274 		 * Reinitialize the current descriptor slot's buffer so that
2275 		 * it can be reused.
2276 		 */
2277 		bfe_rx_desc_buf_reinit(bfe, i);
2278 	}
2279 
2280 	r->r_curr_desc = i;
2281 
2282 	(void) SYNC_DESC(r, 0, r->r_ndesc, DDI_DMA_SYNC_FORDEV);
2283 
2284 	return (rx_head);
2285 }
2286 
2287 static int
2288 bfe_tx_reclaim(bfe_ring_t *r)
2289 {
2290 	uint32_t cur, start;
2291 	uint32_t v;
2292 
2293 	cur = INL(r->r_bfe, BFE_DMATX_STAT) & BFE_STAT_CDMASK;
2294 	cur = cur / sizeof (bfe_desc_t);
2295 
2296 	/*
2297 	 * Start with the last descriptor consumed by the chip.
2298 	 */
2299 	start = r->r_cons_desc;
2300 
2301 	DTRACE_PROBE3(tx__reclaim, int, r->r_bfe->bfe_unit,
2302 	    int, start,
2303 	    int, cur);
2304 
2305 	/*
2306 	 * There will be at least one descriptor to process.
2307 	 */
2308 	while (start != cur) {
2309 		r->r_avail_desc++;
2310 		v = r->r_buf_dma[start].len  & BFE_DESC_LEN;
2311 		if (start == (TX_NUM_DESC - 1))
2312 			v |= BFE_DESC_EOT;
2313 
2314 		PUT_DESC(r, (uint32_t *)&(r->r_desc[start].desc_ctl), v);
2315 		PUT_DESC(r, (uint32_t *)&(r->r_desc[start].desc_addr),
2316 		    (r->r_buf_dma[start].cookie.dmac_laddress + BFE_PCI_DMA));
2317 
2318 		/* Move to next descriptor in TX ring */
2319 		BFE_INC_SLOT(start, TX_NUM_DESC);
2320 	}
2321 
2322 	(void) ddi_dma_sync(r->r_desc_dma_handle,
2323 	    0, (r->r_ndesc * sizeof (bfe_desc_t)),
2324 	    DDI_DMA_SYNC_FORDEV);
2325 
2326 	r->r_cons_desc = start; 	/* consumed pointer */
2327 	r->r_bfe->bfe_tx_stall_time = 0;
2328 
2329 	return (cur);
2330 }
2331 
2332 static int
2333 bfe_tx_done(bfe_t *bfe, int intr_mask)
2334 {
2335 	bfe_ring_t *r = &bfe->bfe_tx_ring;
2336 	int resched = 0;
2337 
2338 	mutex_enter(&r->r_lock);
2339 	(void) bfe_tx_reclaim(r);
2340 
2341 	if (bfe->bfe_tx_resched) {
2342 		resched = 1;
2343 		bfe->bfe_tx_resched = 0;
2344 	}
2345 	mutex_exit(&r->r_lock);
2346 
2347 	return (resched);
2348 }
2349 
2350 /*
2351  * ISR for interrupt handling
2352  */
2353 static uint_t
2354 bfe_interrupt(caddr_t arg1, caddr_t arg2)
2355 {
2356 	bfe_t *bfe =  (void *)arg1;
2357 	uint32_t	intr_stat;
2358 	mblk_t *rx_head = NULL;
2359 	int resched = 0;
2360 
2361 	/*
2362 	 * Grab the lock to avoid stopping the chip while this interrupt
2363 	 * is handled.
2364 	 */
2365 	rw_enter(&bfe->bfe_rwlock, RW_READER);
2366 
2367 	/*
2368 	 * It's necessary to read intr stat again because masking interrupt
2369 	 * register does not really mask interrupts coming from the chip.
2370 	 */
2371 	intr_stat = INL(bfe, BFE_INTR_STAT);
2372 	intr_stat &= BFE_IMASK_DEF;
2373 	OUTL(bfe, BFE_INTR_STAT, intr_stat);
2374 	(void) INL(bfe, BFE_INTR_STAT);
2375 
2376 	if (intr_stat == 0) {
2377 		rw_exit(&bfe->bfe_rwlock);
2378 		return (DDI_INTR_UNCLAIMED);
2379 	}
2380 
2381 	DTRACE_PROBE2(bfe__interrupt, int, bfe->bfe_unit,
2382 	    int, intr_stat);
2383 
2384 	if (bfe->bfe_chip_state != BFE_CHIP_ACTIVE) {
2385 		/*
2386 		 * If chip is suspended then we just return.
2387 		 */
2388 		if (bfe->bfe_chip_state == BFE_CHIP_SUSPENDED) {
2389 			rw_exit(&bfe->bfe_rwlock);
2390 			DTRACE_PROBE1(interrupt__chip__is__suspend, int,
2391 			    bfe->bfe_unit);
2392 			return (DDI_INTR_CLAIMED);
2393 		}
2394 
2395 		/*
2396 		 * Halt the chip again i.e basically disable interrupts.
2397 		 */
2398 		bfe_chip_halt(bfe);
2399 		rw_exit(&bfe->bfe_rwlock);
2400 		DTRACE_PROBE1(interrupt__chip__not__active, int,
2401 		    bfe->bfe_unit);
2402 		return (DDI_INTR_CLAIMED);
2403 	}
2404 
2405 	/* A packet was received */
2406 	if (intr_stat & BFE_ISTAT_RX) {
2407 		rx_head = bfe_receive(bfe, intr_stat);
2408 	}
2409 
2410 	/* A packet was sent down the wire */
2411 	if (intr_stat & BFE_ISTAT_TX) {
2412 		resched = bfe_tx_done(bfe, intr_stat);
2413 	}
2414 
2415 	/* There was an error */
2416 	if (intr_stat & BFE_ISTAT_ERRORS) {
2417 		bfe_error_handler(bfe, intr_stat);
2418 	}
2419 
2420 	rw_exit(&bfe->bfe_rwlock);
2421 
2422 	/*
2423 	 * Pass the list of packets received from chip to MAC layer.
2424 	 */
2425 	if (rx_head) {
2426 		mac_rx(bfe->bfe_machdl, 0, rx_head);
2427 	}
2428 
2429 	/*
2430 	 * Let the MAC start sending pkts to a potential stopped stream.
2431 	 */
2432 	if (resched)
2433 		mac_tx_update(bfe->bfe_machdl);
2434 
2435 	return (DDI_INTR_CLAIMED);
2436 }
2437 
2438 /*
2439  * Removes registered interrupt handler.
2440  */
2441 static void
2442 bfe_remove_intr(bfe_t *bfe)
2443 {
2444 	(void) ddi_intr_remove_handler(bfe->bfe_intrhdl);
2445 	(void) ddi_intr_free(bfe->bfe_intrhdl);
2446 }
2447 
2448 /*
2449  * Add an interrupt for the driver.
2450  */
2451 static int
2452 bfe_add_intr(bfe_t *bfe)
2453 {
2454 	int	nintrs = 1;
2455 	int ret;
2456 
2457 	ret = ddi_intr_alloc(bfe->bfe_dip, &bfe->bfe_intrhdl,
2458 	    DDI_INTR_TYPE_FIXED,	/* type */
2459 	    0,	/* inumber */
2460 	    1,	/* count */
2461 	    &nintrs,	/* actual nintrs */
2462 	    DDI_INTR_ALLOC_STRICT);
2463 
2464 	if (ret != DDI_SUCCESS) {
2465 		bfe_error(bfe->bfe_dip, "ddi_intr_alloc() failed"
2466 		    " : ret : %d", ret);
2467 		return (DDI_FAILURE);
2468 	}
2469 
2470 	ret = ddi_intr_add_handler(bfe->bfe_intrhdl, bfe_interrupt, bfe, NULL);
2471 	if (ret != DDI_SUCCESS) {
2472 		bfe_error(bfe->bfe_dip, "ddi_intr_add_handler() failed");
2473 		(void) ddi_intr_free(bfe->bfe_intrhdl);
2474 		return (DDI_FAILURE);
2475 	}
2476 
2477 	ret = ddi_intr_get_pri(bfe->bfe_intrhdl, &bfe->bfe_intrpri);
2478 	if (ret != DDI_SUCCESS) {
2479 		bfe_error(bfe->bfe_dip, "ddi_intr_get_pri() failed");
2480 		bfe_remove_intr(bfe);
2481 		return (DDI_FAILURE);
2482 	}
2483 
2484 	return (DDI_SUCCESS);
2485 }
2486 
2487 
2488 /*
2489  * Identify chipset family.
2490  */
2491 static int
2492 bfe_identify_hardware(bfe_t *bfe)
2493 {
2494 	uint16_t	vid, did;
2495 	int i;
2496 
2497 	vid = pci_config_get16(bfe->bfe_conf_handle, PCI_CONF_VENID);
2498 	did = pci_config_get16(bfe->bfe_conf_handle, PCI_CONF_DEVID);
2499 
2500 	for (i = 0; i < (sizeof (bfe_cards) / sizeof (bfe_cards_t)); i++) {
2501 		if (bfe_cards[i].vendor_id == vid &&
2502 		    bfe_cards[i].device_id == did) {
2503 			return (BFE_SUCCESS);
2504 		}
2505 	}
2506 
2507 	bfe_error(bfe->bfe_dip, "bfe driver is attaching to unknown pci%d,%d"
2508 	    " vendor/device-id card", vid, did);
2509 
2510 	return (BFE_SUCCESS);
2511 }
2512 
2513 /*
2514  * Maps device registers.
2515  */
2516 static int
2517 bfe_regs_map(bfe_t *bfe)
2518 {
2519 	dev_info_t *dip = bfe->bfe_dip;
2520 	int ret;
2521 
2522 	ret = ddi_regs_map_setup(dip, 1, &bfe->bfe_mem_regset.addr, 0, 0,
2523 	    &bfe_dev_attr, &bfe->bfe_mem_regset.hdl);
2524 
2525 	if (ret != DDI_SUCCESS) {
2526 		bfe_error(bfe->bfe_dip, "ddi_regs_map_setup failed");
2527 		return (DDI_FAILURE);
2528 	}
2529 
2530 	return (DDI_SUCCESS);
2531 }
2532 
2533 static void
2534 bfe_unmap_regs(bfe_t *bfe)
2535 {
2536 	ddi_regs_map_free(&bfe->bfe_mem_regset.hdl);
2537 }
2538 
2539 static int
2540 bfe_get_chip_config(bfe_t *bfe)
2541 {
2542 	uint32_t	prom[BFE_EEPROM_SIZE];
2543 	int i;
2544 
2545 	/*
2546 	 * Read EEPROM in prom[]
2547 	 */
2548 	for (i = 0; i < BFE_EEPROM_SIZE; i++) {
2549 		prom[i] = INL(bfe, BFE_EEPROM_BASE + i * sizeof (uint32_t));
2550 	}
2551 
2552 	bfe->bfe_dev_addr[0] = bfe->bfe_ether_addr[0] =
2553 	    INB(bfe, BFE_EEPROM_BASE + 79);
2554 
2555 	bfe->bfe_dev_addr[1] = bfe->bfe_ether_addr[1] =
2556 	    INB(bfe, BFE_EEPROM_BASE + 78);
2557 
2558 	bfe->bfe_dev_addr[2] = bfe->bfe_ether_addr[2] =
2559 	    INB(bfe, BFE_EEPROM_BASE + 81);
2560 
2561 	bfe->bfe_dev_addr[3] = bfe->bfe_ether_addr[3] =
2562 	    INB(bfe, BFE_EEPROM_BASE + 80);
2563 
2564 	bfe->bfe_dev_addr[4] = bfe->bfe_ether_addr[4] =
2565 	    INB(bfe, BFE_EEPROM_BASE + 83);
2566 
2567 	bfe->bfe_dev_addr[5] = bfe->bfe_ether_addr[5] =
2568 	    INB(bfe, BFE_EEPROM_BASE + 82);
2569 
2570 	bfe->bfe_phy_addr = -1;
2571 
2572 	return (DDI_SUCCESS);
2573 }
2574 
2575 /*
2576  * Ring Management routines
2577  */
2578 static int
2579 bfe_ring_buf_alloc(bfe_t *bfe, bfe_ring_t *r, int slot, int d)
2580 {
2581 	int err;
2582 	uint_t count = 0;
2583 
2584 	err = ddi_dma_alloc_handle(bfe->bfe_dip,
2585 	    &bfe_dma_attr_buf, DDI_DMA_SLEEP, NULL,
2586 	    &r->r_buf_dma[slot].handle);
2587 
2588 	if (err != DDI_SUCCESS) {
2589 		bfe_error(bfe->bfe_dip, " bfe_ring_buf_alloc() :"
2590 		    " alloc_handle failed");
2591 		goto fail0;
2592 	}
2593 
2594 	err = ddi_dma_mem_alloc(r->r_buf_dma[slot].handle,
2595 	    r->r_buf_len, &bfe_buf_attr, DDI_DMA_STREAMING,
2596 	    DDI_DMA_SLEEP, NULL, &r->r_buf_dma[slot].addr,
2597 	    &r->r_buf_dma[slot].len,
2598 	    &r->r_buf_dma[slot].acchdl);
2599 
2600 	if (err != DDI_SUCCESS) {
2601 		bfe_error(bfe->bfe_dip, " bfe_ring_buf_alloc() :"
2602 		    " mem_alloc failed :%d", err);
2603 		goto fail1;
2604 	}
2605 
2606 	err = ddi_dma_addr_bind_handle(r->r_buf_dma[slot].handle,
2607 	    NULL, r->r_buf_dma[slot].addr,
2608 	    r->r_buf_dma[slot].len,
2609 	    (DDI_DMA_RDWR | DDI_DMA_STREAMING),
2610 	    DDI_DMA_SLEEP, NULL,
2611 	    &r->r_buf_dma[slot].cookie,
2612 	    &count);
2613 
2614 	if (err != DDI_DMA_MAPPED) {
2615 		bfe_error(bfe->bfe_dip, " bfe_ring_buf_alloc() :"
2616 		    " bind_handle failed");
2617 		goto fail2;
2618 	}
2619 
2620 	if (count > 1) {
2621 		bfe_error(bfe->bfe_dip, " bfe_ring_buf_alloc() :"
2622 		    " more than one DMA cookie");
2623 		(void) ddi_dma_unbind_handle(r->r_buf_dma[slot].handle);
2624 		goto fail2;
2625 	}
2626 
2627 	return (DDI_SUCCESS);
2628 fail2:
2629 	ddi_dma_mem_free(&r->r_buf_dma[slot].acchdl);
2630 fail1:
2631 	ddi_dma_free_handle(&r->r_buf_dma[slot].handle);
2632 fail0:
2633 	return (DDI_FAILURE);
2634 }
2635 
2636 static void
2637 bfe_ring_buf_free(bfe_ring_t *r, int slot)
2638 {
2639 	if (r->r_buf_dma == NULL)
2640 		return;
2641 
2642 	(void) ddi_dma_unbind_handle(r->r_buf_dma[slot].handle);
2643 	ddi_dma_mem_free(&r->r_buf_dma[slot].acchdl);
2644 	ddi_dma_free_handle(&r->r_buf_dma[slot].handle);
2645 }
2646 
2647 static void
2648 bfe_buffer_free(bfe_ring_t *r)
2649 {
2650 	int i;
2651 
2652 	for (i = 0; i < r->r_ndesc; i++) {
2653 		bfe_ring_buf_free(r, i);
2654 	}
2655 }
2656 
2657 static void
2658 bfe_ring_desc_free(bfe_ring_t *r)
2659 {
2660 	(void) ddi_dma_unbind_handle(r->r_desc_dma_handle);
2661 	ddi_dma_mem_free(&r->r_desc_acc_handle);
2662 	ddi_dma_free_handle(&r->r_desc_dma_handle);
2663 	kmem_free(r->r_buf_dma, r->r_ndesc * sizeof (bfe_dma_t));
2664 
2665 	r->r_buf_dma = NULL;
2666 	r->r_desc = NULL;
2667 }
2668 
2669 
2670 static int
2671 bfe_ring_desc_alloc(bfe_t *bfe, bfe_ring_t *r, int d)
2672 {
2673 	int err, i, fail = 0;
2674 	caddr_t	ring;
2675 	size_t	size_krnl = 0, size_dma = 0, ring_len = 0;
2676 	ddi_dma_cookie_t cookie;
2677 	uint_t	count = 0;
2678 
2679 	ASSERT(bfe != NULL);
2680 
2681 	size_krnl = r->r_ndesc * sizeof (bfe_dma_t);
2682 	size_dma = r->r_ndesc * sizeof (bfe_desc_t);
2683 	r->r_buf_dma = kmem_zalloc(size_krnl, KM_SLEEP);
2684 
2685 
2686 	err = ddi_dma_alloc_handle(bfe->bfe_dip, &bfe_dma_attr_desc,
2687 	    DDI_DMA_SLEEP, NULL, &r->r_desc_dma_handle);
2688 
2689 	if (err != DDI_SUCCESS) {
2690 		bfe_error(bfe->bfe_dip, "bfe_ring_desc_alloc() failed on"
2691 		    " ddi_dma_alloc_handle()");
2692 		kmem_free(r->r_buf_dma, size_krnl);
2693 		return (DDI_FAILURE);
2694 	}
2695 
2696 
2697 	err = ddi_dma_mem_alloc(r->r_desc_dma_handle,
2698 	    size_dma, &bfe_buf_attr,
2699 	    DDI_DMA_CONSISTENT, DDI_DMA_SLEEP, NULL,
2700 	    &ring, &ring_len, &r->r_desc_acc_handle);
2701 
2702 	if (err != DDI_SUCCESS) {
2703 		bfe_error(bfe->bfe_dip, "bfe_ring_desc_alloc() failed on"
2704 		    " ddi_dma_mem_alloc()");
2705 		ddi_dma_free_handle(&r->r_desc_dma_handle);
2706 		kmem_free(r->r_buf_dma, size_krnl);
2707 		return (DDI_FAILURE);
2708 	}
2709 
2710 	err = ddi_dma_addr_bind_handle(r->r_desc_dma_handle,
2711 	    NULL, ring, ring_len,
2712 	    DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
2713 	    DDI_DMA_SLEEP, NULL,
2714 	    &cookie, &count);
2715 
2716 	if (err != DDI_SUCCESS) {
2717 		bfe_error(bfe->bfe_dip, "bfe_ring_desc_alloc() failed on"
2718 		    " ddi_dma_addr_bind_handle()");
2719 		ddi_dma_mem_free(&r->r_desc_acc_handle);
2720 		ddi_dma_free_handle(&r->r_desc_dma_handle);
2721 		kmem_free(r->r_buf_dma, size_krnl);
2722 		return (DDI_FAILURE);
2723 	}
2724 
2725 	/*
2726 	 * We don't want to have multiple cookies. Descriptor should be
2727 	 * aligned to PAGESIZE boundary.
2728 	 */
2729 	ASSERT(count == 1);
2730 
2731 	/* The actual descriptor for the ring */
2732 	r->r_desc_len = ring_len;
2733 	r->r_desc_cookie = cookie;
2734 
2735 	r->r_desc = (void *)ring;
2736 
2737 	bzero(r->r_desc, size_dma);
2738 	bzero(r->r_desc, ring_len);
2739 
2740 	/* For each descriptor, allocate a DMA buffer */
2741 	fail = 0;
2742 	for (i = 0; i < r->r_ndesc; i++) {
2743 		if (bfe_ring_buf_alloc(bfe, r, i, d) != DDI_SUCCESS) {
2744 			i--;
2745 			fail = 1;
2746 			break;
2747 		}
2748 	}
2749 
2750 	if (fail) {
2751 		while (i-- >= 0) {
2752 			bfe_ring_buf_free(r, i);
2753 		}
2754 
2755 		/* We don't need the descriptor anymore */
2756 		bfe_ring_desc_free(r);
2757 		return (DDI_FAILURE);
2758 	}
2759 
2760 	return (DDI_SUCCESS);
2761 }
2762 
2763 static int
2764 bfe_rings_alloc(bfe_t *bfe)
2765 {
2766 	/* TX */
2767 	mutex_init(&bfe->bfe_tx_ring.r_lock, NULL, MUTEX_DRIVER, NULL);
2768 	bfe->bfe_tx_ring.r_lockp = &bfe->bfe_tx_ring.r_lock;
2769 	bfe->bfe_tx_ring.r_buf_len = BFE_MTU + sizeof (struct ether_header) +
2770 	    VLAN_TAGSZ + ETHERFCSL;
2771 	bfe->bfe_tx_ring.r_ndesc = TX_NUM_DESC;
2772 	bfe->bfe_tx_ring.r_bfe = bfe;
2773 	bfe->bfe_tx_ring.r_avail_desc = TX_NUM_DESC;
2774 
2775 	/* RX */
2776 	mutex_init(&bfe->bfe_rx_ring.r_lock, NULL, MUTEX_DRIVER, NULL);
2777 	bfe->bfe_rx_ring.r_lockp = &bfe->bfe_rx_ring.r_lock;
2778 	bfe->bfe_rx_ring.r_buf_len = BFE_MTU + sizeof (struct ether_header) +
2779 	    VLAN_TAGSZ + ETHERFCSL + RX_HEAD_ROOM;
2780 	bfe->bfe_rx_ring.r_ndesc = RX_NUM_DESC;
2781 	bfe->bfe_rx_ring.r_bfe = bfe;
2782 	bfe->bfe_rx_ring.r_avail_desc = RX_NUM_DESC;
2783 
2784 	/* Allocate TX Ring */
2785 	if (bfe_ring_desc_alloc(bfe, &bfe->bfe_tx_ring,
2786 	    DDI_DMA_WRITE) != DDI_SUCCESS)
2787 		return (DDI_FAILURE);
2788 
2789 	/* Allocate RX Ring */
2790 	if (bfe_ring_desc_alloc(bfe, &bfe->bfe_rx_ring,
2791 	    DDI_DMA_READ) != DDI_SUCCESS) {
2792 		cmn_err(CE_NOTE, "RX ring allocation failed");
2793 		bfe_ring_desc_free(&bfe->bfe_tx_ring);
2794 		return (DDI_FAILURE);
2795 	}
2796 
2797 	bfe->bfe_tx_ring.r_flags = BFE_RING_ALLOCATED;
2798 	bfe->bfe_rx_ring.r_flags = BFE_RING_ALLOCATED;
2799 
2800 	return (DDI_SUCCESS);
2801 }
2802 
2803 static int
2804 bfe_resume(dev_info_t *dip)
2805 {
2806 	bfe_t *bfe;
2807 	int err = DDI_SUCCESS;
2808 
2809 	if ((bfe = ddi_get_driver_private(dip)) == NULL) {
2810 		bfe_error(dip, "Unexpected error (no driver private data)"
2811 		    " while resume");
2812 		return (DDI_FAILURE);
2813 	}
2814 
2815 	/*
2816 	 * Grab all the locks first.
2817 	 */
2818 	bfe_grab_locks(bfe);
2819 	bfe->bfe_chip_state = BFE_CHIP_RESUME;
2820 
2821 	bfe_init_vars(bfe);
2822 	/* PHY will also start running */
2823 	bfe_chip_reset(bfe);
2824 	if (bfe_chip_start(bfe) == DDI_FAILURE) {
2825 		bfe_error(dip, "Could not resume chip");
2826 		err = DDI_FAILURE;
2827 	}
2828 
2829 	bfe_release_locks(bfe);
2830 
2831 	if (err == DDI_SUCCESS)
2832 		mac_tx_update(bfe->bfe_machdl);
2833 
2834 	return (err);
2835 }
2836 
2837 static int
2838 bfe_attach(dev_info_t *dip, ddi_attach_cmd_t cmd)
2839 {
2840 	int	unit;
2841 	bfe_t	*bfe;
2842 	mac_register_t	*macreg;
2843 	int	ret;
2844 
2845 	switch (cmd) {
2846 	case DDI_RESUME:
2847 		return (bfe_resume(dip));
2848 
2849 	case DDI_ATTACH:
2850 		break;
2851 
2852 	default:
2853 		return (DDI_FAILURE);
2854 	}
2855 
2856 
2857 	unit = ddi_get_instance(dip);
2858 
2859 	bfe = kmem_zalloc(sizeof (bfe_t), KM_SLEEP);
2860 	bfe->bfe_dip = dip;
2861 	bfe->bfe_unit = unit;
2862 
2863 	if (pci_config_setup(dip, &bfe->bfe_conf_handle) != DDI_SUCCESS) {
2864 		bfe_error(dip, "pci_config_setup failed");
2865 		goto fail0;
2866 	}
2867 
2868 	/*
2869 	 * Enable IO space, Bus Master and Memory Space accessess.
2870 	 */
2871 	ret = pci_config_get16(bfe->bfe_conf_handle, PCI_CONF_COMM);
2872 	pci_config_put16(bfe->bfe_conf_handle, PCI_CONF_COMM,
2873 	    PCI_COMM_IO | PCI_COMM_MAE | PCI_COMM_ME | ret);
2874 
2875 	ddi_set_driver_private(dip, bfe);
2876 
2877 	/* Identify hardware */
2878 	if (bfe_identify_hardware(bfe) == BFE_FAILURE) {
2879 		bfe_error(dip, "Could not identify device");
2880 		goto fail1;
2881 	}
2882 
2883 	if (bfe_regs_map(bfe) != DDI_SUCCESS) {
2884 		bfe_error(dip, "Could not map device registers");
2885 		goto fail1;
2886 	}
2887 
2888 	(void) bfe_get_chip_config(bfe);
2889 
2890 	/*
2891 	 * Register with MAC layer
2892 	 */
2893 	if ((macreg = mac_alloc(MAC_VERSION)) == NULL) {
2894 		bfe_error(dip, "mac_alloc() failed");
2895 		goto fail2;
2896 	}
2897 
2898 	macreg->m_type_ident = MAC_PLUGIN_IDENT_ETHER;
2899 	macreg->m_driver = bfe;
2900 	macreg->m_dip = dip;
2901 	macreg->m_instance = unit;
2902 	macreg->m_src_addr = bfe->bfe_ether_addr;
2903 	macreg->m_callbacks = &bfe_mac_callbacks;
2904 	macreg->m_min_sdu = 0;
2905 	macreg->m_max_sdu = ETHERMTU;
2906 	macreg->m_margin = VLAN_TAGSZ;
2907 
2908 	if ((ret = mac_register(macreg, &bfe->bfe_machdl)) != 0) {
2909 		bfe_error(dip, "mac_register() failed with %d error", ret);
2910 		mac_free(macreg);
2911 		goto fail2;
2912 	}
2913 
2914 	mac_free(macreg);
2915 
2916 	rw_init(&bfe->bfe_rwlock, NULL, RW_DRIVER,
2917 	    DDI_INTR_PRI(bfe->bfe_intrpri));
2918 
2919 	if (bfe_add_intr(bfe) != DDI_SUCCESS) {
2920 		bfe_error(dip, "Could not add interrupt");
2921 		goto fail3;
2922 	}
2923 
2924 	if (bfe_rings_alloc(bfe) != DDI_SUCCESS) {
2925 		bfe_error(dip, "Could not allocate TX/RX Ring");
2926 		goto fail4;
2927 	}
2928 
2929 	/* Init and then reset the chip */
2930 	bfe->bfe_chip_action = 0;
2931 	bfe_init_vars(bfe);
2932 
2933 	/* PHY will also start running */
2934 	bfe_chip_reset(bfe);
2935 
2936 	/*
2937 	 * Even though we enable the interrupts here but chip's interrupt
2938 	 * is not enabled yet. It will be enabled once we plumb the interface.
2939 	 */
2940 	if (ddi_intr_enable(bfe->bfe_intrhdl) != DDI_SUCCESS) {
2941 		bfe_error(dip, "Could not enable interrupt");
2942 		goto fail4;
2943 	}
2944 
2945 	return (DDI_SUCCESS);
2946 
2947 fail4:
2948 	bfe_remove_intr(bfe);
2949 fail3:
2950 	mac_unregister(bfe->bfe_machdl);
2951 fail2:
2952 	bfe_unmap_regs(bfe);
2953 fail1:
2954 	pci_config_teardown(&bfe->bfe_conf_handle);
2955 fail0:
2956 	kmem_free(bfe, sizeof (bfe_t));
2957 	return (DDI_FAILURE);
2958 }
2959 
2960 static int
2961 bfe_detach(dev_info_t *devinfo, ddi_detach_cmd_t cmd)
2962 {
2963 	bfe_t *bfe;
2964 
2965 	bfe = ddi_get_driver_private(devinfo);
2966 
2967 	switch (cmd) {
2968 	case DDI_DETACH:
2969 		/*
2970 		 * We need to stop the timer before grabbing locks otherwise
2971 		 * we can land-up in deadlock with untimeout.
2972 		 */
2973 		bfe_stop_timer(bfe);
2974 
2975 		/*
2976 		 * First unregister with MAC layer before stopping DMA
2977 		 * engine.
2978 		 */
2979 		if (mac_unregister(bfe->bfe_machdl) != DDI_SUCCESS)
2980 			return (DDI_FAILURE);
2981 
2982 		bfe->bfe_machdl = NULL;
2983 
2984 		/*
2985 		 * Quiesce the chip first.
2986 		 */
2987 		bfe_grab_locks(bfe);
2988 		bfe_chip_halt(bfe);
2989 		bfe_stop_phy(bfe);
2990 		bfe_release_locks(bfe);
2991 
2992 		(void) ddi_intr_disable(bfe->bfe_intrhdl);
2993 
2994 		/* Make sure timer is gone. */
2995 		bfe_stop_timer(bfe);
2996 
2997 		/*
2998 		 * Free the DMA resources for buffer and then descriptors
2999 		 */
3000 		if (bfe->bfe_tx_ring.r_flags == BFE_RING_ALLOCATED) {
3001 			/* TX */
3002 			bfe_buffer_free(&bfe->bfe_tx_ring);
3003 			bfe_ring_desc_free(&bfe->bfe_tx_ring);
3004 		}
3005 
3006 		if (bfe->bfe_rx_ring.r_flags == BFE_RING_ALLOCATED) {
3007 			/* RX */
3008 			bfe_buffer_free(&bfe->bfe_rx_ring);
3009 			bfe_ring_desc_free(&bfe->bfe_rx_ring);
3010 		}
3011 
3012 		bfe_remove_intr(bfe);
3013 		bfe_unmap_regs(bfe);
3014 		pci_config_teardown(&bfe->bfe_conf_handle);
3015 
3016 		mutex_destroy(&bfe->bfe_tx_ring.r_lock);
3017 		mutex_destroy(&bfe->bfe_rx_ring.r_lock);
3018 		rw_destroy(&bfe->bfe_rwlock);
3019 
3020 		kmem_free(bfe, sizeof (bfe_t));
3021 
3022 		ddi_set_driver_private(devinfo, NULL);
3023 		return (DDI_SUCCESS);
3024 
3025 	case DDI_SUSPEND:
3026 		/*
3027 		 * We need to stop the timer before grabbing locks otherwise
3028 		 * we can land-up in deadlock with untimeout.
3029 		 */
3030 		bfe_stop_timer(bfe);
3031 
3032 		/*
3033 		 * Grab all the locks first.
3034 		 */
3035 		bfe_grab_locks(bfe);
3036 		bfe_chip_halt(bfe);
3037 		bfe_stop_phy(bfe);
3038 		bfe->bfe_chip_state = BFE_CHIP_SUSPENDED;
3039 		bfe_release_locks(bfe);
3040 
3041 		return (DDI_SUCCESS);
3042 
3043 	default:
3044 		return (DDI_FAILURE);
3045 	}
3046 }
3047 
3048 /*
3049  * Quiesce the card for fast reboot
3050  */
3051 int
3052 bfe_quiesce(dev_info_t *dev_info)
3053 {
3054 	bfe_t *bfe;
3055 
3056 	bfe = ddi_get_driver_private(dev_info);
3057 
3058 	bfe_chip_halt(bfe);
3059 	bfe_stop_phy(bfe);
3060 	bfe->bfe_chip_state = BFE_CHIP_QUIESCED;
3061 
3062 	return (DDI_SUCCESS);
3063 }
3064 
3065 static struct cb_ops bfe_cb_ops = {
3066 	nulldev,		/* cb_open */
3067 	nulldev,		/* cb_close */
3068 	nodev,			/* cb_strategy */
3069 	nodev,			/* cb_print */
3070 	nodev,			/* cb_dump */
3071 	nodev,			/* cb_read */
3072 	nodev,			/* cb_write */
3073 	nodev,			/* cb_ioctl */
3074 	nodev,			/* cb_devmap */
3075 	nodev,			/* cb_mmap */
3076 	nodev,			/* cb_segmap */
3077 	nochpoll,		/* cb_chpoll */
3078 	ddi_prop_op,		/* cb_prop_op */
3079 	NULL,			/* cb_stream */
3080 	D_MP | D_HOTPLUG,	/* cb_flag */
3081 	CB_REV,			/* cb_rev */
3082 	nodev,			/* cb_aread */
3083 	nodev			/* cb_awrite */
3084 };
3085 
3086 static struct dev_ops bfe_dev_ops = {
3087 	DEVO_REV,	/* devo_rev */
3088 	0,		/* devo_refcnt */
3089 	NULL,		/* devo_getinfo */
3090 	nulldev,	/* devo_identify */
3091 	nulldev,	/* devo_probe */
3092 	bfe_attach,	/* devo_attach */
3093 	bfe_detach,	/* devo_detach */
3094 	nodev,		/* devo_reset */
3095 	&bfe_cb_ops,	/* devo_cb_ops */
3096 	NULL,		/* devo_bus_ops */
3097 	ddi_power,	/* devo_power */
3098 	bfe_quiesce	/* devo_quiesce */
3099 };
3100 
3101 static struct modldrv bfe_modldrv = {
3102 	&mod_driverops,
3103 	bfe_ident,
3104 	&bfe_dev_ops
3105 };
3106 
3107 static struct modlinkage modlinkage = {
3108 	MODREV_1, (void *)&bfe_modldrv, NULL
3109 };
3110 
3111 int
3112 _info(struct modinfo *modinfop)
3113 {
3114 	return (mod_info(&modlinkage, modinfop));
3115 }
3116 
3117 int
3118 _init(void)
3119 {
3120 	int	status;
3121 
3122 	mac_init_ops(&bfe_dev_ops, MODULE_NAME);
3123 	status = mod_install(&modlinkage);
3124 	if (status == DDI_FAILURE)
3125 		mac_fini_ops(&bfe_dev_ops);
3126 	return (status);
3127 }
3128 
3129 int
3130 _fini(void)
3131 {
3132 	int status;
3133 
3134 	status = mod_remove(&modlinkage);
3135 	if (status == 0) {
3136 		mac_fini_ops(&bfe_dev_ops);
3137 	}
3138 	return (status);
3139 }
3140