xref: /titanic_51/usr/src/uts/common/io/bfe/bfe.c (revision a799b1e741b6f59012a469e6b57c40cb8061127b)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 
22 /*
23  * Copyright 2009 Sun Microsystems, Inc.  All rights reserved.
24  * Use is subject to license terms.
25  */
26 #include <sys/stream.h>
27 #include <sys/strsun.h>
28 #include <sys/stat.h>
29 #include <sys/pci.h>
30 #include <sys/modctl.h>
31 #include <sys/kstat.h>
32 #include <sys/ethernet.h>
33 #include <sys/devops.h>
34 #include <sys/debug.h>
35 #include <sys/conf.h>
36 #include <sys/sysmacros.h>
37 #include <sys/dditypes.h>
38 #include <sys/ddi.h>
39 #include <sys/sunddi.h>
40 #include <sys/miiregs.h>
41 #include <sys/byteorder.h>
42 #include <sys/cyclic.h>
43 #include <sys/note.h>
44 #include <sys/crc32.h>
45 #include <sys/mac_provider.h>
46 #include <sys/mac_ether.h>
47 #include <sys/vlan.h>
48 #include <sys/errno.h>
49 #include <sys/sdt.h>
50 #include <sys/strsubr.h>
51 
52 #include "bfe.h"
53 #include "bfe_hw.h"
54 
55 
56 /*
57  * Broadcom BCM4401 chipsets use two rings :
58  *
59  * - One TX : For sending packets down the wire.
60  * - One RX : For receving packets.
61  *
62  * Each ring can have any number of descriptors (configured during attach).
63  * As of now we configure only 128 descriptor per ring (TX/RX). Each descriptor
64  * has address (desc_addr) and control (desc_ctl) which holds a DMA buffer for
65  * the packet and control information (like start/end of frame or end of table).
66  * The descriptor table is allocated first and then a DMA buffer (for a packet)
67  * is allocated and linked to each descriptor.
68  *
69  * Each descriptor entry is bfe_desc_t structure in bfe. During TX/RX
70  * interrupt, the stat register will point to current descriptor being
71  * processed.
72  *
73  * Here's an example of TX and RX ring :
74  *
75  * TX:
76  *
77  *   Base of the descriptor table is programmed using BFE_DMATX_CTRL control
78  *   register. Each 'addr' points to DMA buffer (or packet data buffer) to
79  *   be transmitted and 'ctl' has the length of the packet (usually MTU).
80  *
81  *  ----------------------|
82  *  | addr |Descriptor 0  |
83  *  | ctl  |              |
84  *  ----------------------|
85  *  | addr |Descriptor 1  |    SOF (start of the frame)
86  *  | ctl  |              |
87  *  ----------------------|
88  *  | ...  |Descriptor... |    EOF (end of the frame)
89  *  | ...  |              |
90  *  ----------------------|
91  *  | addr |Descritor 127 |
92  *  | ctl  | EOT          |    EOT (End of Table)
93  *  ----------------------|
94  *
95  * 'r_curr_desc'  : pointer to current descriptor which can be used to transmit
96  *                  a packet.
97  * 'r_avail_desc' : decremented whenever a packet is being sent.
98  * 'r_cons_desc'  : incremented whenever a packet is sent down the wire and
99  *                  notified by an interrupt to bfe driver.
100  *
101  * RX:
102  *
103  *   Base of the descriptor table is programmed using BFE_DMARX_CTRL control
104  *   register. Each 'addr' points to DMA buffer (or packet data buffer). 'ctl'
105  *   contains the size of the DMA buffer and all the DMA buffers are
106  *   pre-allocated during attach and hence the maxmium size of the packet is
107  *   also known (r_buf_len from the bfe_rint_t structure). During RX interrupt
108  *   the packet length is embedded in bfe_header_t which is added by the
109  *   chip in the beginning of the packet.
110  *
111  *  ----------------------|
112  *  | addr |Descriptor 0  |
113  *  | ctl  |              |
114  *  ----------------------|
115  *  | addr |Descriptor 1  |
116  *  | ctl  |              |
117  *  ----------------------|
118  *  | ...  |Descriptor... |
119  *  | ...  |              |
120  *  ----------------------|
121  *  | addr |Descriptor 127|
122  *  | ctl  | EOT          |    EOT (End of Table)
123  *  ----------------------|
124  *
125  * 'r_curr_desc'  : pointer to current descriptor while receving a packet.
126  *
127  */
128 
129 #define	MODULE_NAME	"bfe"
130 
131 /*
132  * Used for checking PHY (link state, speed)
133  */
134 #define	BFE_TIMEOUT_INTERVAL	(1000 * 1000 * 1000)
135 
136 
137 /*
138  * Chip restart action and reason for restart
139  */
140 #define	BFE_ACTION_RESTART		0x1	/* For restarting the chip */
141 #define	BFE_ACTION_RESTART_SETPROP	0x2	/* restart due to setprop */
142 #define	BFE_ACTION_RESTART_FAULT	0x4	/* restart due to fault */
143 #define	BFE_ACTION_RESTART_PKT		0x8	/* restart due to pkt timeout */
144 
145 static	char	bfe_ident[] = "bfe driver for Broadcom BCM4401 chipsets";
146 
147 /*
148  * Function Prototypes for bfe driver.
149  */
150 static	int	bfe_check_link(bfe_t *);
151 static	void	bfe_report_link(bfe_t *);
152 static	void	bfe_chip_halt(bfe_t *);
153 static	void	bfe_chip_reset(bfe_t *);
154 static	void	bfe_tx_desc_init(bfe_ring_t *);
155 static	void	bfe_rx_desc_init(bfe_ring_t *);
156 static	void	bfe_set_rx_mode(bfe_t *);
157 static	void	bfe_enable_chip_intrs(bfe_t *);
158 static	void	bfe_chip_restart(bfe_t *);
159 static	void	bfe_init_vars(bfe_t *);
160 static	void	bfe_clear_stats(bfe_t *);
161 static	void	bfe_gather_stats(bfe_t *);
162 static	void	bfe_error(dev_info_t *, char *, ...);
163 static	int	bfe_mac_getprop(void *, const char *, mac_prop_id_t, uint_t,
164     uint_t, void *, uint_t *);
165 static	int	bfe_mac_setprop(void *, const char *, mac_prop_id_t, uint_t,
166     const void *);
167 static	int	bfe_tx_reclaim(bfe_ring_t *);
168 int	bfe_mac_set_ether_addr(void *, const uint8_t *);
169 
170 
171 /*
172  * Macros for ddi_dma_sync().
173  */
174 #define	SYNC_DESC(r, s, l, d)	\
175 	(void) ddi_dma_sync(r->r_desc_dma_handle, \
176 	    (off_t)(s * sizeof (bfe_desc_t)), \
177 	    (size_t)(l * sizeof (bfe_desc_t)), \
178 	    d)
179 
180 #define	SYNC_BUF(r, s, b, l, d) \
181 	(void) ddi_dma_sync(r->r_buf_dma[s].handle, \
182 	    (off_t)(b), (size_t)(l), d)
183 
184 /*
185  * Supported Broadcom BCM4401 Cards.
186  */
187 static bfe_cards_t bfe_cards[] = {
188 	{ 0x14e4, 0x170c, "BCM4401 100Base-TX"},
189 };
190 
191 
192 /*
193  * DMA attributes for device registers, packet data (buffer) and
194  * descriptor table.
195  */
196 static struct ddi_device_acc_attr bfe_dev_attr = {
197 	DDI_DEVICE_ATTR_V0,
198 	DDI_STRUCTURE_LE_ACC,
199 	DDI_STRICTORDER_ACC
200 };
201 
202 static struct ddi_device_acc_attr bfe_buf_attr = {
203 	DDI_DEVICE_ATTR_V0,
204 	DDI_NEVERSWAP_ACC,	/* native endianness */
205 	DDI_STRICTORDER_ACC
206 };
207 
208 static ddi_dma_attr_t bfe_dma_attr_buf = {
209 	DMA_ATTR_V0,		/* dma_attr_version */
210 	0,			/* dma_attr_addr_lo */
211 	BFE_PCI_DMA - 1,	/* dma_attr_addr_hi */
212 	0x1fff,			/* dma_attr_count_max */
213 	8,			/* dma_attr_align */
214 	0,			/* dma_attr_burstsizes */
215 	1,			/* dma_attr_minxfer */
216 	0x1fff,			/* dma_attr_maxxfer */
217 	BFE_PCI_DMA - 1,	/* dma_attr_seg */
218 	1,			/* dma_attr_sgllen */
219 	1,			/* dma_attr_granular */
220 	0			/* dma_attr_flags */
221 };
222 
223 static ddi_dma_attr_t bfe_dma_attr_desc = {
224 	DMA_ATTR_V0,		/* dma_attr_version */
225 	0,			/* dma_attr_addr_lo */
226 	BFE_PCI_DMA - 1,	/* dma_attr_addr_hi */
227 	BFE_PCI_DMA - 1,	/* dma_attr_count_max */
228 	BFE_DESC_ALIGN,		/* dma_attr_align */
229 	0,			/* dma_attr_burstsizes */
230 	1,			/* dma_attr_minxfer */
231 	BFE_PCI_DMA - 1,	/* dma_attr_maxxfer */
232 	BFE_PCI_DMA - 1,	/* dma_attr_seg */
233 	1,			/* dma_attr_sgllen */
234 	1,			/* dma_attr_granular */
235 	0			/* dma_attr_flags */
236 };
237 
238 /*
239  * Ethernet broadcast addresses.
240  */
241 static uchar_t bfe_broadcast[ETHERADDRL] = {
242 	0xff, 0xff, 0xff, 0xff, 0xff, 0xff
243 };
244 
245 #define	ASSERT_ALL_LOCKS(bfe) {	\
246 	ASSERT(mutex_owned(&bfe->bfe_tx_ring.r_lock));	\
247 	ASSERT(rw_write_held(&bfe->bfe_rwlock));	\
248 }
249 
250 /*
251  * Debugging and error reproting code.
252  */
253 static void
254 bfe_error(dev_info_t *dip, char *fmt, ...)
255 {
256 	va_list ap;
257 	char	buf[256];
258 
259 	va_start(ap, fmt);
260 	(void) vsnprintf(buf, sizeof (buf), fmt, ap);
261 	va_end(ap);
262 
263 	if (dip) {
264 		cmn_err(CE_WARN, "%s%d: %s",
265 		    ddi_driver_name(dip), ddi_get_instance(dip), buf);
266 	} else {
267 		cmn_err(CE_WARN, "bfe: %s", buf);
268 	}
269 }
270 
271 /*
272  * Grabs all necessary locks to block any other operation on the chip.
273  */
274 static void
275 bfe_grab_locks(bfe_t *bfe)
276 {
277 	bfe_ring_t *tx = &bfe->bfe_tx_ring;
278 
279 	/*
280 	 * Grab all the locks.
281 	 * - bfe_rwlock : locks down whole chip including RX.
282 	 * - tx's r_lock : locks down only TX side.
283 	 */
284 	rw_enter(&bfe->bfe_rwlock, RW_WRITER);
285 	mutex_enter(&tx->r_lock);
286 
287 	/*
288 	 * Note that we don't use RX's r_lock.
289 	 */
290 }
291 
292 /*
293  * Release lock on chip/drver.
294  */
295 static void
296 bfe_release_locks(bfe_t *bfe)
297 {
298 	bfe_ring_t *tx = &bfe->bfe_tx_ring;
299 
300 	/*
301 	 * Release all the locks in the order in which they were grabbed.
302 	 */
303 	mutex_exit(&tx->r_lock);
304 	rw_exit(&bfe->bfe_rwlock);
305 }
306 
307 
308 /*
309  * It's used to make sure that the write to device register was successful.
310  */
311 static int
312 bfe_wait_bit(bfe_t *bfe, uint32_t reg, uint32_t bit,
313     ulong_t t, const int clear)
314 {
315 	ulong_t i;
316 	uint32_t v;
317 
318 	for (i = 0; i < t; i++) {
319 		v = INL(bfe, reg);
320 
321 		if (clear && !(v & bit))
322 			break;
323 
324 		if (!clear && (v & bit))
325 			break;
326 
327 		drv_usecwait(10);
328 	}
329 
330 	/* if device still didn't see the value */
331 	if (i == t)
332 		return (-1);
333 
334 	return (0);
335 }
336 
337 /*
338  * PHY functions (read, write, stop, reset and startup)
339  */
340 static int
341 bfe_read_phy(bfe_t *bfe, uint32_t reg)
342 {
343 	OUTL(bfe, BFE_EMAC_ISTAT, BFE_EMAC_INT_MII);
344 	OUTL(bfe, BFE_MDIO_DATA, (BFE_MDIO_SB_START |
345 	    (BFE_MDIO_OP_READ << BFE_MDIO_OP_SHIFT) |
346 	    (bfe->bfe_phy_addr << BFE_MDIO_PMD_SHIFT) |
347 	    (reg << BFE_MDIO_RA_SHIFT) |
348 	    (BFE_MDIO_TA_VALID << BFE_MDIO_TA_SHIFT)));
349 
350 	(void) bfe_wait_bit(bfe, BFE_EMAC_ISTAT, BFE_EMAC_INT_MII, 10, 0);
351 
352 	return ((INL(bfe, BFE_MDIO_DATA) & BFE_MDIO_DATA_DATA));
353 }
354 
355 static void
356 bfe_write_phy(bfe_t *bfe, uint32_t reg, uint32_t val)
357 {
358 	OUTL(bfe, BFE_EMAC_ISTAT, BFE_EMAC_INT_MII);
359 	OUTL(bfe,  BFE_MDIO_DATA, (BFE_MDIO_SB_START |
360 	    (BFE_MDIO_OP_WRITE << BFE_MDIO_OP_SHIFT) |
361 	    (bfe->bfe_phy_addr << BFE_MDIO_PMD_SHIFT) |
362 	    (reg << BFE_MDIO_RA_SHIFT) |
363 	    (BFE_MDIO_TA_VALID << BFE_MDIO_TA_SHIFT) |
364 	    (val & BFE_MDIO_DATA_DATA)));
365 
366 	(void) bfe_wait_bit(bfe, BFE_EMAC_ISTAT, BFE_EMAC_INT_MII, 10, 0);
367 }
368 
369 /*
370  * It resets the PHY layer.
371  */
372 static int
373 bfe_reset_phy(bfe_t *bfe)
374 {
375 	uint32_t i;
376 
377 	bfe_write_phy(bfe, MII_CONTROL, MII_CONTROL_RESET);
378 	drv_usecwait(100);
379 	for (i = 0; i < 10; i++) {
380 		if (bfe_read_phy(bfe, MII_CONTROL) &
381 		    MII_CONTROL_RESET) {
382 			drv_usecwait(500);
383 			continue;
384 		}
385 
386 		break;
387 	}
388 
389 	if (i == 10) {
390 		bfe_error(bfe->bfe_dip, "Timeout waiting for PHY to reset");
391 		bfe->bfe_phy_state = BFE_PHY_RESET_TIMEOUT;
392 		return (BFE_FAILURE);
393 	}
394 
395 	bfe->bfe_phy_state = BFE_PHY_RESET_DONE;
396 
397 	return (BFE_SUCCESS);
398 }
399 
400 /*
401  * Make sure timer function is out of our way and especially during
402  * detach.
403  */
404 static void
405 bfe_stop_timer(bfe_t *bfe)
406 {
407 	if (bfe->bfe_periodic_id) {
408 		ddi_periodic_delete(bfe->bfe_periodic_id);
409 		bfe->bfe_periodic_id = NULL;
410 	}
411 }
412 
413 /*
414  * Stops the PHY
415  */
416 static void
417 bfe_stop_phy(bfe_t *bfe)
418 {
419 	bfe_write_phy(bfe, MII_CONTROL, MII_CONTROL_PWRDN |
420 	    MII_CONTROL_ISOLATE);
421 
422 	bfe->bfe_chip.link = LINK_STATE_UNKNOWN;
423 	bfe->bfe_chip.speed = 0;
424 	bfe->bfe_chip.duplex = LINK_DUPLEX_UNKNOWN;
425 
426 	bfe->bfe_phy_state = BFE_PHY_STOPPED;
427 
428 	/*
429 	 * Report the link status to MAC layer.
430 	 */
431 	if (bfe->bfe_machdl != NULL)
432 		(void) bfe_report_link(bfe);
433 }
434 
435 static int
436 bfe_probe_phy(bfe_t *bfe)
437 {
438 	int phy;
439 	uint32_t status;
440 
441 	if (bfe->bfe_phy_addr) {
442 		status = bfe_read_phy(bfe, MII_STATUS);
443 		if (status != 0xffff && status != 0) {
444 			bfe_write_phy(bfe, MII_CONTROL, 0);
445 			return (BFE_SUCCESS);
446 		}
447 	}
448 
449 	for (phy = 0; phy < 32; phy++) {
450 		bfe->bfe_phy_addr = phy;
451 		status = bfe_read_phy(bfe, MII_STATUS);
452 		if (status != 0xffff && status != 0) {
453 			bfe_write_phy(bfe, MII_CONTROL, 0);
454 			return (BFE_SUCCESS);
455 		}
456 	}
457 
458 	return (BFE_FAILURE);
459 }
460 
461 /*
462  * This timeout function fires at BFE_TIMEOUT_INTERVAL to check the link
463  * status.
464  */
465 static void
466 bfe_timeout(void *arg)
467 {
468 	bfe_t *bfe = (bfe_t *)arg;
469 
470 	/*
471 	 * We don't grab any lock because bfe can't go away.
472 	 * untimeout() will wait for this timeout instance to complete.
473 	 */
474 	if (bfe->bfe_chip_action & BFE_ACTION_RESTART) {
475 		/*
476 		 * Restart the chip.
477 		 */
478 		bfe_grab_locks(bfe);
479 		bfe_chip_restart(bfe);
480 		bfe->bfe_chip_action &= ~BFE_ACTION_RESTART;
481 		bfe->bfe_chip_action &= ~BFE_ACTION_RESTART_FAULT;
482 		bfe->bfe_chip_action &= ~BFE_ACTION_RESTART_PKT;
483 		bfe_release_locks(bfe);
484 		mac_tx_update(bfe->bfe_machdl);
485 		/* Restart will register a new timeout */
486 		return;
487 	}
488 
489 	rw_enter(&bfe->bfe_rwlock, RW_READER);
490 
491 	if (bfe->bfe_chip_state == BFE_CHIP_ACTIVE) {
492 		hrtime_t hr;
493 
494 		hr = gethrtime();
495 		if (bfe->bfe_tx_stall_time != 0 &&
496 		    hr > bfe->bfe_tx_stall_time) {
497 			DTRACE_PROBE2(chip__restart, int, bfe->bfe_unit,
498 			    char *, "pkt timeout");
499 			bfe->bfe_chip_action |=
500 			    (BFE_ACTION_RESTART | BFE_ACTION_RESTART_PKT);
501 			bfe->bfe_tx_stall_time = 0;
502 		}
503 	}
504 
505 	if (bfe->bfe_phy_state == BFE_PHY_STARTED) {
506 		/*
507 		 * Report the link status to MAC layer if link status changed.
508 		 */
509 		if (bfe_check_link(bfe)) {
510 			bfe_report_link(bfe);
511 			if (bfe->bfe_chip.link == LINK_STATE_UP) {
512 				uint32_t val, flow;
513 
514 				val = INL(bfe, BFE_TX_CTRL);
515 				val &= ~BFE_TX_DUPLEX;
516 				if (bfe->bfe_chip.duplex == LINK_DUPLEX_FULL) {
517 					val |= BFE_TX_DUPLEX;
518 					flow = INL(bfe, BFE_RXCONF);
519 					flow &= ~BFE_RXCONF_FLOW;
520 					OUTL(bfe, BFE_RXCONF, flow);
521 
522 					flow = INL(bfe, BFE_MAC_FLOW);
523 					flow &= ~(BFE_FLOW_RX_HIWAT);
524 					OUTL(bfe, BFE_MAC_FLOW, flow);
525 				}
526 
527 				OUTL(bfe, BFE_TX_CTRL, val);
528 				DTRACE_PROBE1(link__up,
529 				    int, bfe->bfe_unit);
530 			}
531 		}
532 	}
533 
534 	rw_exit(&bfe->bfe_rwlock);
535 }
536 
537 /*
538  * Starts PHY layer.
539  */
540 static int
541 bfe_startup_phy(bfe_t *bfe)
542 {
543 	uint16_t bmsr, bmcr, anar;
544 	int	prog, s;
545 	int phyid1, phyid2;
546 
547 	if (bfe_probe_phy(bfe) == BFE_FAILURE) {
548 		bfe->bfe_phy_state = BFE_PHY_NOTFOUND;
549 		return (BFE_FAILURE);
550 	}
551 
552 	(void) bfe_reset_phy(bfe);
553 
554 	phyid1 = bfe_read_phy(bfe, MII_PHYIDH);
555 	phyid2 = bfe_read_phy(bfe, MII_PHYIDL);
556 	bfe->bfe_phy_id = (phyid1 << 16) | phyid2;
557 
558 	bmsr = bfe_read_phy(bfe, MII_STATUS);
559 	anar = bfe_read_phy(bfe, MII_AN_ADVERT);
560 
561 again:
562 	anar &= ~(MII_ABILITY_100BASE_T4 |
563 	    MII_ABILITY_100BASE_TX_FD | MII_ABILITY_100BASE_TX |
564 	    MII_ABILITY_10BASE_T_FD | MII_ABILITY_10BASE_T);
565 
566 	/*
567 	 * Supported hardware modes are in bmsr.
568 	 */
569 	bfe->bfe_chip.bmsr = bmsr;
570 
571 	/*
572 	 * Assume no capabilities are supported in the hardware.
573 	 */
574 	bfe->bfe_cap_aneg = bfe->bfe_cap_100T4 =
575 	    bfe->bfe_cap_100fdx = bfe->bfe_cap_100hdx =
576 	    bfe->bfe_cap_10fdx = bfe->bfe_cap_10hdx = 0;
577 
578 	/*
579 	 * Assume property is set.
580 	 */
581 	s = 1;
582 	if (!(bfe->bfe_chip_action & BFE_ACTION_RESTART_SETPROP)) {
583 		/*
584 		 * Property is not set which means bfe_mac_setprop()
585 		 * is not called on us.
586 		 */
587 		s = 0;
588 	}
589 
590 	bmcr = prog = 0;
591 
592 	if (bmsr & MII_STATUS_100_BASEX_FD) {
593 		bfe->bfe_cap_100fdx = 1;
594 		if (s == 0) {
595 			anar |= MII_ABILITY_100BASE_TX_FD;
596 			bfe->bfe_adv_100fdx = 1;
597 			prog++;
598 		} else if (bfe->bfe_adv_100fdx) {
599 			anar |= MII_ABILITY_100BASE_TX_FD;
600 			prog++;
601 		}
602 	}
603 
604 	if (bmsr & MII_STATUS_100_BASE_T4) {
605 		bfe->bfe_cap_100T4 = 1;
606 		if (s == 0) {
607 			anar |= MII_ABILITY_100BASE_T4;
608 			bfe->bfe_adv_100T4 = 1;
609 			prog++;
610 		} else if (bfe->bfe_adv_100T4) {
611 			anar |= MII_ABILITY_100BASE_T4;
612 			prog++;
613 		}
614 	}
615 
616 	if (bmsr & MII_STATUS_100_BASEX) {
617 		bfe->bfe_cap_100hdx = 1;
618 		if (s == 0) {
619 			anar |= MII_ABILITY_100BASE_TX;
620 			bfe->bfe_adv_100hdx = 1;
621 			prog++;
622 		} else if (bfe->bfe_adv_100hdx) {
623 			anar |= MII_ABILITY_100BASE_TX;
624 			prog++;
625 		}
626 	}
627 
628 	if (bmsr & MII_STATUS_10_FD) {
629 		bfe->bfe_cap_10fdx = 1;
630 		if (s == 0) {
631 			anar |= MII_ABILITY_10BASE_T_FD;
632 			bfe->bfe_adv_10fdx = 1;
633 			prog++;
634 		} else if (bfe->bfe_adv_10fdx) {
635 			anar |= MII_ABILITY_10BASE_T_FD;
636 			prog++;
637 		}
638 	}
639 
640 	if (bmsr & MII_STATUS_10) {
641 		bfe->bfe_cap_10hdx = 1;
642 		if (s == 0) {
643 			anar |= MII_ABILITY_10BASE_T;
644 			bfe->bfe_adv_10hdx = 1;
645 			prog++;
646 		} else if (bfe->bfe_adv_10hdx) {
647 			anar |= MII_ABILITY_10BASE_T;
648 			prog++;
649 		}
650 	}
651 
652 	if (bmsr & MII_STATUS_CANAUTONEG) {
653 		bfe->bfe_cap_aneg = 1;
654 		if (s == 0) {
655 			bfe->bfe_adv_aneg = 1;
656 		}
657 	}
658 
659 	if (prog == 0) {
660 		if (s == 0) {
661 			bfe_error(bfe->bfe_dip,
662 			    "No valid link mode selected. Powering down PHY");
663 			bfe_stop_phy(bfe);
664 			bfe_report_link(bfe);
665 			return (BFE_FAILURE);
666 		}
667 
668 		/*
669 		 * If property is set then user would have goofed up. So we
670 		 * go back to default properties.
671 		 */
672 		bfe->bfe_chip_action &= ~BFE_ACTION_RESTART_SETPROP;
673 		goto again;
674 	}
675 
676 	if (bfe->bfe_adv_aneg && (bmsr & MII_STATUS_CANAUTONEG)) {
677 		bmcr = (MII_CONTROL_ANE | MII_CONTROL_RSAN);
678 	} else {
679 		if (bfe->bfe_adv_100fdx)
680 			bmcr = (MII_CONTROL_100MB | MII_CONTROL_FDUPLEX);
681 		else if (bfe->bfe_adv_100hdx)
682 			bmcr = MII_CONTROL_100MB;
683 		else if (bfe->bfe_adv_10fdx)
684 			bmcr = MII_CONTROL_FDUPLEX;
685 		else
686 			bmcr = 0;		/* 10HDX */
687 	}
688 
689 	if (prog)
690 		bfe_write_phy(bfe, MII_AN_ADVERT, anar);
691 
692 	if (bmcr)
693 		bfe_write_phy(bfe, MII_CONTROL, bmcr);
694 
695 	bfe->bfe_mii_anar = anar;
696 	bfe->bfe_mii_bmcr = bmcr;
697 	bfe->bfe_phy_state = BFE_PHY_STARTED;
698 
699 	if (bfe->bfe_periodic_id == NULL) {
700 		bfe->bfe_periodic_id = ddi_periodic_add(bfe_timeout,
701 		    (void *)bfe, BFE_TIMEOUT_INTERVAL, DDI_IPL_0);
702 
703 		DTRACE_PROBE1(first__timeout, int, bfe->bfe_unit);
704 	}
705 
706 	DTRACE_PROBE4(phy_started, int, bfe->bfe_unit,
707 	    int, bmsr, int, bmcr, int, anar);
708 
709 	return (BFE_SUCCESS);
710 }
711 
712 /*
713  * Reports link status back to MAC Layer.
714  */
715 static void
716 bfe_report_link(bfe_t *bfe)
717 {
718 	mac_link_update(bfe->bfe_machdl, bfe->bfe_chip.link);
719 }
720 
721 /*
722  * Reads PHY/MII registers and get the link status for us.
723  */
724 static int
725 bfe_check_link(bfe_t *bfe)
726 {
727 	uint16_t bmsr, bmcr, anar, anlpar;
728 	int speed, duplex, link;
729 
730 	speed = bfe->bfe_chip.speed;
731 	duplex = bfe->bfe_chip.duplex;
732 	link = bfe->bfe_chip.link;
733 
734 	bmsr = bfe_read_phy(bfe, MII_STATUS);
735 	bfe->bfe_mii_bmsr = bmsr;
736 
737 	bmcr = bfe_read_phy(bfe, MII_CONTROL);
738 
739 	anar = bfe_read_phy(bfe, MII_AN_ADVERT);
740 	bfe->bfe_mii_anar = anar;
741 
742 	anlpar = bfe_read_phy(bfe, MII_AN_LPABLE);
743 	bfe->bfe_mii_anlpar = anlpar;
744 
745 	bfe->bfe_mii_exp = bfe_read_phy(bfe, MII_AN_EXPANSION);
746 
747 	/*
748 	 * If exp register is not present in PHY.
749 	 */
750 	if (bfe->bfe_mii_exp == 0xffff) {
751 		bfe->bfe_mii_exp = 0;
752 	}
753 
754 	if ((bmsr & MII_STATUS_LINKUP) == 0) {
755 		bfe->bfe_chip.link = LINK_STATE_DOWN;
756 		bfe->bfe_chip.speed = 0;
757 		bfe->bfe_chip.duplex = LINK_DUPLEX_UNKNOWN;
758 		goto done;
759 	}
760 
761 	bfe->bfe_chip.link = LINK_STATE_UP;
762 
763 	if (!(bmcr & MII_CONTROL_ANE)) {
764 		/* Forced mode */
765 		if (bmcr & MII_CONTROL_100MB)
766 			bfe->bfe_chip.speed = 100000000;
767 		else
768 			bfe->bfe_chip.speed = 10000000;
769 
770 		if (bmcr & MII_CONTROL_FDUPLEX)
771 			bfe->bfe_chip.duplex = LINK_DUPLEX_FULL;
772 		else
773 			bfe->bfe_chip.duplex = LINK_DUPLEX_HALF;
774 
775 	} else if ((!(bmsr & MII_STATUS_CANAUTONEG)) ||
776 	    (!(bmsr & MII_STATUS_ANDONE))) {
777 		bfe->bfe_chip.speed = 0;
778 		bfe->bfe_chip.duplex = LINK_DUPLEX_UNKNOWN;
779 	} else if (anar & anlpar & MII_ABILITY_100BASE_TX_FD) {
780 		bfe->bfe_chip.speed = 100000000;
781 		bfe->bfe_chip.duplex = LINK_DUPLEX_FULL;
782 	} else if (anar & anlpar & MII_ABILITY_100BASE_T4) {
783 		bfe->bfe_chip.speed = 100000000;
784 		bfe->bfe_chip.duplex = LINK_DUPLEX_HALF;
785 	} else if (anar & anlpar & MII_ABILITY_100BASE_TX) {
786 		bfe->bfe_chip.speed = 100000000;
787 		bfe->bfe_chip.duplex = LINK_DUPLEX_HALF;
788 	} else if (anar & anlpar & MII_ABILITY_10BASE_T_FD) {
789 		bfe->bfe_chip.speed = 10000000;
790 		bfe->bfe_chip.duplex = LINK_DUPLEX_FULL;
791 	} else if (anar & anlpar & MII_ABILITY_10BASE_T) {
792 		bfe->bfe_chip.speed = 10000000;
793 		bfe->bfe_chip.duplex = LINK_DUPLEX_HALF;
794 	} else {
795 		bfe->bfe_chip.speed = 0;
796 		bfe->bfe_chip.duplex = LINK_DUPLEX_UNKNOWN;
797 	}
798 
799 done:
800 	/*
801 	 * If speed or link status or duplex mode changed then report to
802 	 * MAC layer which is done by the caller.
803 	 */
804 	if (speed != bfe->bfe_chip.speed ||
805 	    duplex != bfe->bfe_chip.duplex ||
806 	    link != bfe->bfe_chip.link) {
807 		return (1);
808 	}
809 
810 	return (0);
811 }
812 
813 static void
814 bfe_cam_write(bfe_t *bfe, uchar_t *d, int index)
815 {
816 	uint32_t v;
817 
818 	v = ((uint32_t)d[2] << 24);
819 	v |= ((uint32_t)d[3] << 16);
820 	v |= ((uint32_t)d[4] << 8);
821 	v |= (uint32_t)d[5];
822 
823 	OUTL(bfe, BFE_CAM_DATA_LO, v);
824 	v = (BFE_CAM_HI_VALID |
825 	    (((uint32_t)d[0]) << 8) |
826 	    (((uint32_t)d[1])));
827 
828 	OUTL(bfe, BFE_CAM_DATA_HI, v);
829 	OUTL(bfe, BFE_CAM_CTRL, (BFE_CAM_WRITE |
830 	    ((uint32_t)index << BFE_CAM_INDEX_SHIFT)));
831 	(void) bfe_wait_bit(bfe, BFE_CAM_CTRL, BFE_CAM_BUSY, 10, 1);
832 }
833 
834 /*
835  * Chip related functions (halt, reset, start).
836  */
837 static void
838 bfe_chip_halt(bfe_t *bfe)
839 {
840 	/*
841 	 * Disables interrupts.
842 	 */
843 	OUTL(bfe, BFE_INTR_MASK, 0);
844 	FLUSH(bfe, BFE_INTR_MASK);
845 
846 	OUTL(bfe,  BFE_ENET_CTRL, BFE_ENET_DISABLE);
847 
848 	/*
849 	 * Wait until TX and RX finish their job.
850 	 */
851 	(void) bfe_wait_bit(bfe, BFE_ENET_CTRL, BFE_ENET_DISABLE, 20, 1);
852 
853 	/*
854 	 * Disables DMA engine.
855 	 */
856 	OUTL(bfe, BFE_DMARX_CTRL, 0);
857 	OUTL(bfe, BFE_DMATX_CTRL, 0);
858 
859 	drv_usecwait(10);
860 
861 	bfe->bfe_chip_state = BFE_CHIP_HALT;
862 }
863 
864 static void
865 bfe_chip_restart(bfe_t *bfe)
866 {
867 	DTRACE_PROBE2(chip__restart, int, bfe->bfe_unit,
868 	    int, bfe->bfe_chip_action);
869 
870 	/*
871 	 * Halt chip and PHY.
872 	 */
873 	bfe_chip_halt(bfe);
874 	bfe_stop_phy(bfe);
875 	bfe->bfe_chip_state = BFE_CHIP_STOPPED;
876 
877 	/*
878 	 * Init variables.
879 	 */
880 	bfe_init_vars(bfe);
881 
882 	/*
883 	 * Reset chip and start PHY.
884 	 */
885 	bfe_chip_reset(bfe);
886 
887 	/*
888 	 * DMA descriptor rings.
889 	 */
890 	bfe_tx_desc_init(&bfe->bfe_tx_ring);
891 	bfe_rx_desc_init(&bfe->bfe_rx_ring);
892 
893 	bfe->bfe_chip_state = BFE_CHIP_ACTIVE;
894 	bfe_set_rx_mode(bfe);
895 	bfe_enable_chip_intrs(bfe);
896 }
897 
898 /*
899  * Disables core by stopping the clock.
900  */
901 static void
902 bfe_core_disable(bfe_t *bfe)
903 {
904 	if ((INL(bfe, BFE_SBTMSLOW) & BFE_RESET))
905 		return;
906 
907 	OUTL(bfe, BFE_SBTMSLOW, (BFE_REJECT | BFE_CLOCK));
908 	(void) bfe_wait_bit(bfe, BFE_SBTMSLOW, BFE_REJECT, 100, 0);
909 	(void) bfe_wait_bit(bfe, BFE_SBTMSHIGH, BFE_BUSY, 100, 1);
910 	OUTL(bfe, BFE_SBTMSLOW, (BFE_FGC | BFE_CLOCK | BFE_REJECT | BFE_RESET));
911 	FLUSH(bfe, BFE_SBTMSLOW);
912 	drv_usecwait(10);
913 	OUTL(bfe, BFE_SBTMSLOW, (BFE_REJECT | BFE_RESET));
914 	drv_usecwait(10);
915 }
916 
917 /*
918  * Resets core.
919  */
920 static void
921 bfe_core_reset(bfe_t *bfe)
922 {
923 	uint32_t val;
924 
925 	/*
926 	 * First disable the core.
927 	 */
928 	bfe_core_disable(bfe);
929 
930 	OUTL(bfe, BFE_SBTMSLOW, (BFE_RESET | BFE_CLOCK | BFE_FGC));
931 	FLUSH(bfe, BFE_SBTMSLOW);
932 	drv_usecwait(1);
933 
934 	if (INL(bfe, BFE_SBTMSHIGH) & BFE_SERR)
935 		OUTL(bfe, BFE_SBTMSHIGH, 0);
936 
937 	val = INL(bfe, BFE_SBIMSTATE);
938 	if (val & (BFE_IBE | BFE_TO))
939 		OUTL(bfe, BFE_SBIMSTATE, val & ~(BFE_IBE | BFE_TO));
940 
941 	OUTL(bfe, BFE_SBTMSLOW, (BFE_CLOCK | BFE_FGC));
942 	FLUSH(bfe, BFE_SBTMSLOW);
943 	drv_usecwait(1);
944 
945 	OUTL(bfe, BFE_SBTMSLOW, BFE_CLOCK);
946 	FLUSH(bfe, BFE_SBTMSLOW);
947 	drv_usecwait(1);
948 }
949 
950 static void
951 bfe_setup_config(bfe_t *bfe, uint32_t cores)
952 {
953 	uint32_t bar_orig, val;
954 
955 	/*
956 	 * Change bar0 window to map sbtopci registers.
957 	 */
958 	bar_orig = pci_config_get32(bfe->bfe_conf_handle, BFE_BAR0_WIN);
959 	pci_config_put32(bfe->bfe_conf_handle, BFE_BAR0_WIN, BFE_REG_PCI);
960 
961 	/* Just read it and don't do anything */
962 	val = INL(bfe, BFE_SBIDHIGH) & BFE_IDH_CORE;
963 
964 	val = INL(bfe, BFE_SBINTVEC);
965 	val |= cores;
966 	OUTL(bfe, BFE_SBINTVEC, val);
967 
968 	val = INL(bfe, BFE_SSB_PCI_TRANS_2);
969 	val |= BFE_SSB_PCI_PREF | BFE_SSB_PCI_BURST;
970 	OUTL(bfe, BFE_SSB_PCI_TRANS_2, val);
971 
972 	/*
973 	 * Restore bar0 window mapping.
974 	 */
975 	pci_config_put32(bfe->bfe_conf_handle, BFE_BAR0_WIN, bar_orig);
976 }
977 
978 /*
979  * Resets chip and starts PHY.
980  */
981 static void
982 bfe_chip_reset(bfe_t *bfe)
983 {
984 	uint32_t val;
985 
986 	/* Set the interrupt vector for the enet core */
987 	bfe_setup_config(bfe, BFE_INTVEC_ENET0);
988 
989 	/* check if core is up */
990 	val = INL(bfe, BFE_SBTMSLOW) &
991 	    (BFE_RESET | BFE_REJECT | BFE_CLOCK);
992 
993 	if (val == BFE_CLOCK) {
994 		OUTL(bfe, BFE_RCV_LAZY, 0);
995 		OUTL(bfe, BFE_ENET_CTRL, BFE_ENET_DISABLE);
996 		(void) bfe_wait_bit(bfe, BFE_ENET_CTRL,
997 		    BFE_ENET_DISABLE, 10, 1);
998 		OUTL(bfe, BFE_DMATX_CTRL, 0);
999 		FLUSH(bfe, BFE_DMARX_STAT);
1000 		drv_usecwait(20000);	/* 20 milli seconds */
1001 		if (INL(bfe, BFE_DMARX_STAT) & BFE_STAT_EMASK) {
1002 			(void) bfe_wait_bit(bfe, BFE_DMARX_STAT, BFE_STAT_SIDLE,
1003 			    10, 0);
1004 		}
1005 		OUTL(bfe, BFE_DMARX_CTRL, 0);
1006 	}
1007 
1008 	bfe_core_reset(bfe);
1009 	bfe_clear_stats(bfe);
1010 
1011 	OUTL(bfe, BFE_MDIO_CTRL, 0x8d);
1012 	val = INL(bfe, BFE_DEVCTRL);
1013 	if (!(val & BFE_IPP))
1014 		OUTL(bfe, BFE_ENET_CTRL, BFE_ENET_EPSEL);
1015 	else if (INL(bfe, BFE_DEVCTRL & BFE_EPR)) {
1016 		OUTL_AND(bfe, BFE_DEVCTRL, ~BFE_EPR);
1017 		drv_usecwait(20000);    /* 20 milli seconds */
1018 	}
1019 
1020 	OUTL_OR(bfe, BFE_MAC_CTRL, BFE_CTRL_CRC32_ENAB | BFE_CTRL_LED);
1021 
1022 	OUTL_AND(bfe, BFE_MAC_CTRL, ~BFE_CTRL_PDOWN);
1023 
1024 	OUTL(bfe, BFE_RCV_LAZY, ((1 << BFE_LAZY_FC_SHIFT) &
1025 	    BFE_LAZY_FC_MASK));
1026 
1027 	OUTL_OR(bfe, BFE_RCV_LAZY, 0);
1028 
1029 	OUTL(bfe, BFE_RXMAXLEN, bfe->bfe_rx_ring.r_buf_len);
1030 	OUTL(bfe, BFE_TXMAXLEN, bfe->bfe_tx_ring.r_buf_len);
1031 
1032 	OUTL(bfe, BFE_TX_WMARK, 56);
1033 
1034 	/* Program DMA channels */
1035 	OUTL(bfe, BFE_DMATX_CTRL, BFE_TX_CTRL_ENABLE);
1036 
1037 	/*
1038 	 * DMA addresses need to be added to BFE_PCI_DMA
1039 	 */
1040 	OUTL(bfe, BFE_DMATX_ADDR,
1041 	    bfe->bfe_tx_ring.r_desc_cookie.dmac_laddress + BFE_PCI_DMA);
1042 
1043 	OUTL(bfe, BFE_DMARX_CTRL, (BFE_RX_OFFSET << BFE_RX_CTRL_ROSHIFT)
1044 	    | BFE_RX_CTRL_ENABLE);
1045 
1046 	OUTL(bfe, BFE_DMARX_ADDR,
1047 	    bfe->bfe_rx_ring.r_desc_cookie.dmac_laddress + BFE_PCI_DMA);
1048 
1049 	(void) bfe_startup_phy(bfe);
1050 
1051 	bfe->bfe_chip_state = BFE_CHIP_INITIALIZED;
1052 }
1053 
1054 /*
1055  * It enables interrupts. Should be the last step while starting chip.
1056  */
1057 static void
1058 bfe_enable_chip_intrs(bfe_t *bfe)
1059 {
1060 	/* Enable the chip and core */
1061 	OUTL(bfe, BFE_ENET_CTRL, BFE_ENET_ENABLE);
1062 
1063 	/* Enable interrupts */
1064 	OUTL(bfe, BFE_INTR_MASK, BFE_IMASK_DEF);
1065 }
1066 
1067 /*
1068  * Common code to take care of setting RX side mode (filter).
1069  */
1070 static void
1071 bfe_set_rx_mode(bfe_t *bfe)
1072 {
1073 	uint32_t val;
1074 	int i;
1075 	ether_addr_t mac[ETHERADDRL] = {0, 0, 0, 0, 0, 0};
1076 
1077 	/*
1078 	 * We don't touch RX filter if we were asked to suspend. It's fine
1079 	 * if chip is not active (no interface is plumbed on us).
1080 	 */
1081 	if (bfe->bfe_chip_state == BFE_CHIP_SUSPENDED)
1082 		return;
1083 
1084 	val = INL(bfe, BFE_RXCONF);
1085 
1086 	val &= ~BFE_RXCONF_PROMISC;
1087 	val &= ~BFE_RXCONF_DBCAST;
1088 
1089 	if ((bfe->bfe_chip_mode & BFE_RX_MODE_ENABLE) == 0) {
1090 		OUTL(bfe, BFE_CAM_CTRL, 0);
1091 		FLUSH(bfe, BFE_CAM_CTRL);
1092 	} else if (bfe->bfe_chip_mode & BFE_RX_MODE_PROMISC) {
1093 		val |= BFE_RXCONF_PROMISC;
1094 		val &= ~BFE_RXCONF_DBCAST;
1095 	} else {
1096 		if (bfe->bfe_chip_state == BFE_CHIP_ACTIVE) {
1097 			/* Flush everything */
1098 			OUTL(bfe, BFE_RXCONF, val |
1099 			    BFE_RXCONF_PROMISC | BFE_RXCONF_ALLMULTI);
1100 			FLUSH(bfe, BFE_RXCONF);
1101 		}
1102 
1103 		/* Disable CAM */
1104 		OUTL(bfe, BFE_CAM_CTRL, 0);
1105 		FLUSH(bfe, BFE_CAM_CTRL);
1106 
1107 		/*
1108 		 * We receive all multicast packets.
1109 		 */
1110 		val |= BFE_RXCONF_ALLMULTI;
1111 
1112 		for (i = 0; i < BFE_MAX_MULTICAST_TABLE - 1; i++) {
1113 			bfe_cam_write(bfe, (uchar_t *)mac, i);
1114 		}
1115 
1116 		bfe_cam_write(bfe, bfe->bfe_ether_addr, i);
1117 
1118 		/* Enable CAM */
1119 		OUTL_OR(bfe, BFE_CAM_CTRL, BFE_CAM_ENABLE);
1120 		FLUSH(bfe, BFE_CAM_CTRL);
1121 	}
1122 
1123 	DTRACE_PROBE2(rx__mode__filter, int, bfe->bfe_unit,
1124 	    int, val);
1125 
1126 	OUTL(bfe, BFE_RXCONF, val);
1127 	FLUSH(bfe, BFE_RXCONF);
1128 }
1129 
1130 /*
1131  * Reset various variable values to initial state.
1132  */
1133 static void
1134 bfe_init_vars(bfe_t *bfe)
1135 {
1136 	bfe->bfe_chip_mode = BFE_RX_MODE_ENABLE;
1137 
1138 	/* Initial assumption */
1139 	bfe->bfe_chip.link = LINK_STATE_UNKNOWN;
1140 	bfe->bfe_chip.speed = 0;
1141 	bfe->bfe_chip.duplex = LINK_DUPLEX_UNKNOWN;
1142 
1143 	bfe->bfe_periodic_id = NULL;
1144 	bfe->bfe_chip_state = BFE_CHIP_UNINITIALIZED;
1145 
1146 	bfe->bfe_tx_stall_time = 0;
1147 }
1148 
1149 /*
1150  * Initializes TX side descriptor entries (bfe_desc_t). Each descriptor entry
1151  * has control (desc_ctl) and address (desc_addr) member.
1152  */
1153 static void
1154 bfe_tx_desc_init(bfe_ring_t *r)
1155 {
1156 	int i;
1157 	uint32_t v;
1158 
1159 	for (i = 0; i < r->r_ndesc; i++) {
1160 		PUT_DESC(r, (uint32_t *)&(r->r_desc[i].desc_ctl),
1161 		    (r->r_buf_dma[i].len & BFE_DESC_LEN));
1162 
1163 		/*
1164 		 * DMA addresses need to be added to BFE_PCI_DMA
1165 		 */
1166 		PUT_DESC(r, (uint32_t *)&(r->r_desc[i].desc_addr),
1167 		    (r->r_buf_dma[i].cookie.dmac_laddress + BFE_PCI_DMA));
1168 	}
1169 
1170 	v = GET_DESC(r, (uint32_t *)&(r->r_desc[i - 1].desc_ctl));
1171 	PUT_DESC(r, (uint32_t *)&(r->r_desc[i - 1].desc_ctl),
1172 	    v | BFE_DESC_EOT);
1173 
1174 	(void) SYNC_DESC(r, 0, r->r_ndesc, DDI_DMA_SYNC_FORDEV);
1175 
1176 	r->r_curr_desc = 0;
1177 	r->r_avail_desc = TX_NUM_DESC;
1178 	r->r_cons_desc = 0;
1179 }
1180 
1181 /*
1182  * Initializes RX side descriptor entries (bfe_desc_t). Each descriptor entry
1183  * has control (desc_ctl) and address (desc_addr) member.
1184  */
1185 static void
1186 bfe_rx_desc_init(bfe_ring_t *r)
1187 {
1188 	int i;
1189 	uint32_t v;
1190 
1191 	for (i = 0; i < r->r_ndesc; i++) {
1192 		PUT_DESC(r, (uint32_t *)&(r->r_desc[i].desc_ctl),
1193 		    (r->r_buf_dma[i].len& BFE_DESC_LEN));
1194 
1195 		PUT_DESC(r, (uint32_t *)&(r->r_desc[i].desc_addr),
1196 		    (r->r_buf_dma[i].cookie.dmac_laddress + BFE_PCI_DMA));
1197 
1198 		/* Initialize rx header (len, flags) */
1199 		bzero(r->r_buf_dma[i].addr, sizeof (bfe_rx_header_t));
1200 
1201 		(void) SYNC_BUF(r, i, 0, sizeof (bfe_rx_header_t),
1202 		    DDI_DMA_SYNC_FORDEV);
1203 	}
1204 
1205 	v = GET_DESC(r, (uint32_t *)&(r->r_desc[i - 1].desc_ctl));
1206 	PUT_DESC(r, (uint32_t *)&(r->r_desc[i - 1].desc_ctl),
1207 	    v | BFE_DESC_EOT);
1208 
1209 	(void) SYNC_DESC(r, 0, r->r_ndesc, DDI_DMA_SYNC_FORDEV);
1210 
1211 	/* TAIL of RX Descriptor */
1212 	OUTL(r->r_bfe, BFE_DMARX_PTR, ((i) * sizeof (bfe_desc_t)));
1213 
1214 	r->r_curr_desc = 0;
1215 	r->r_avail_desc = RX_NUM_DESC;
1216 }
1217 
1218 static int
1219 bfe_chip_start(bfe_t *bfe)
1220 {
1221 	bfe_grab_locks(bfe);
1222 
1223 	/*
1224 	 * Stop the chip first & then Reset the chip. At last enable interrupts.
1225 	 */
1226 	bfe_chip_halt(bfe);
1227 	bfe_stop_phy(bfe);
1228 
1229 	/*
1230 	 * Reset chip and start PHY.
1231 	 */
1232 	bfe_chip_reset(bfe);
1233 
1234 	/*
1235 	 * Initailize Descriptor Rings.
1236 	 */
1237 	bfe_tx_desc_init(&bfe->bfe_tx_ring);
1238 	bfe_rx_desc_init(&bfe->bfe_rx_ring);
1239 
1240 	bfe->bfe_chip_state = BFE_CHIP_ACTIVE;
1241 	bfe->bfe_chip_mode |= BFE_RX_MODE_ENABLE;
1242 	bfe_set_rx_mode(bfe);
1243 	bfe_enable_chip_intrs(bfe);
1244 
1245 	/* Check link, speed and duplex mode */
1246 	(void) bfe_check_link(bfe);
1247 
1248 	bfe_release_locks(bfe);
1249 
1250 	mac_tx_update(bfe->bfe_machdl);
1251 
1252 	return (DDI_SUCCESS);
1253 }
1254 
1255 
1256 /*
1257  * Clear chip statistics.
1258  */
1259 static void
1260 bfe_clear_stats(bfe_t *bfe)
1261 {
1262 	ulong_t r;
1263 
1264 	OUTL(bfe, BFE_MIB_CTRL, BFE_MIB_CLR_ON_READ);
1265 
1266 	/*
1267 	 * Stat registers are cleared by reading.
1268 	 */
1269 	for (r = BFE_TX_GOOD_O; r <= BFE_TX_PAUSE; r += 4)
1270 		(void) INL(bfe, r);
1271 
1272 	for (r = BFE_RX_GOOD_O; r <= BFE_RX_NPAUSE; r += 4)
1273 		(void) INL(bfe, r);
1274 }
1275 
1276 /*
1277  * Collect chip statistics.
1278  */
1279 static void
1280 bfe_gather_stats(bfe_t *bfe)
1281 {
1282 	ulong_t r;
1283 	uint32_t *v;
1284 	uint32_t txerr = 0, rxerr = 0, coll = 0;
1285 
1286 	v = &bfe->bfe_hw_stats.tx_good_octets;
1287 	for (r = BFE_TX_GOOD_O; r <= BFE_TX_PAUSE; r += 4) {
1288 		*v += INL(bfe, r);
1289 		v++;
1290 	}
1291 
1292 	v = &bfe->bfe_hw_stats.rx_good_octets;
1293 	for (r = BFE_RX_GOOD_O; r <= BFE_RX_NPAUSE; r += 4) {
1294 		*v += INL(bfe, r);
1295 		v++;
1296 	}
1297 
1298 	/*
1299 	 * TX :
1300 	 * -------
1301 	 * tx_good_octets, tx_good_pkts, tx_octets
1302 	 * tx_pkts, tx_broadcast_pkts, tx_multicast_pkts
1303 	 * tx_len_64, tx_len_65_to_127, tx_len_128_to_255
1304 	 * tx_len_256_to_511, tx_len_512_to_1023, tx_len_1024_to_max
1305 	 * tx_jabber_pkts, tx_oversize_pkts, tx_fragment_pkts
1306 	 * tx_underruns, tx_total_cols, tx_single_cols
1307 	 * tx_multiple_cols, tx_excessive_cols, tx_late_cols
1308 	 * tx_defered, tx_carrier_lost, tx_pause_pkts
1309 	 *
1310 	 * RX :
1311 	 * -------
1312 	 * rx_good_octets, rx_good_pkts, rx_octets
1313 	 * rx_pkts, rx_broadcast_pkts, rx_multicast_pkts
1314 	 * rx_len_64, rx_len_65_to_127, rx_len_128_to_255
1315 	 * rx_len_256_to_511, rx_len_512_to_1023, rx_len_1024_to_max
1316 	 * rx_jabber_pkts, rx_oversize_pkts, rx_fragment_pkts
1317 	 * rx_missed_pkts, rx_crc_align_errs, rx_undersize
1318 	 * rx_crc_errs, rx_align_errs, rx_symbol_errs
1319 	 * rx_pause_pkts, rx_nonpause_pkts
1320 	 */
1321 
1322 	bfe->bfe_stats.ether_stat_carrier_errors =
1323 	    bfe->bfe_hw_stats.tx_carrier_lost;
1324 
1325 	/* txerr += bfe->bfe_hw_stats.tx_carrier_lost; */
1326 
1327 	bfe->bfe_stats.ether_stat_ex_collisions =
1328 	    bfe->bfe_hw_stats.tx_excessive_cols;
1329 	txerr += bfe->bfe_hw_stats.tx_excessive_cols;
1330 	coll += bfe->bfe_hw_stats.tx_excessive_cols;
1331 
1332 	bfe->bfe_stats.ether_stat_fcs_errors =
1333 	    bfe->bfe_hw_stats.rx_crc_errs;
1334 	rxerr += bfe->bfe_hw_stats.rx_crc_errs;
1335 
1336 	bfe->bfe_stats.ether_stat_first_collisions =
1337 	    bfe->bfe_hw_stats.tx_single_cols;
1338 	coll += bfe->bfe_hw_stats.tx_single_cols;
1339 	bfe->bfe_stats.ether_stat_multi_collisions =
1340 	    bfe->bfe_hw_stats.tx_multiple_cols;
1341 	coll += bfe->bfe_hw_stats.tx_multiple_cols;
1342 
1343 	bfe->bfe_stats.ether_stat_toolong_errors =
1344 	    bfe->bfe_hw_stats.rx_oversize_pkts;
1345 	rxerr += bfe->bfe_hw_stats.rx_oversize_pkts;
1346 
1347 	bfe->bfe_stats.ether_stat_tooshort_errors =
1348 	    bfe->bfe_hw_stats.rx_undersize;
1349 	rxerr += bfe->bfe_hw_stats.rx_undersize;
1350 
1351 	bfe->bfe_stats.ether_stat_tx_late_collisions +=
1352 	    bfe->bfe_hw_stats.tx_late_cols;
1353 
1354 	bfe->bfe_stats.ether_stat_defer_xmts +=
1355 	    bfe->bfe_hw_stats.tx_defered;
1356 
1357 	bfe->bfe_stats.ether_stat_macrcv_errors += rxerr;
1358 	bfe->bfe_stats.ether_stat_macxmt_errors += txerr;
1359 
1360 	bfe->bfe_stats.collisions += coll;
1361 }
1362 
1363 /*
1364  * Gets the state for dladm command and all.
1365  */
1366 int
1367 bfe_mac_getstat(void *arg, uint_t stat, uint64_t *val)
1368 {
1369 	bfe_t *bfe = (bfe_t *)arg;
1370 	uint64_t	v;
1371 	int err = 0;
1372 
1373 	rw_enter(&bfe->bfe_rwlock, RW_READER);
1374 
1375 
1376 	switch (stat) {
1377 	default:
1378 		err = ENOTSUP;
1379 		break;
1380 
1381 	case MAC_STAT_IFSPEED:
1382 		/*
1383 		 * MAC layer will ask for IFSPEED first and hence we
1384 		 * collect it only once.
1385 		 */
1386 		if (bfe->bfe_chip_state == BFE_CHIP_ACTIVE) {
1387 			/*
1388 			 * Update stats from the hardware.
1389 			 */
1390 			bfe_gather_stats(bfe);
1391 		}
1392 		v = bfe->bfe_chip.speed;
1393 		break;
1394 
1395 	case ETHER_STAT_ADV_CAP_100T4:
1396 		v = bfe->bfe_adv_100T4;
1397 		break;
1398 
1399 	case ETHER_STAT_ADV_CAP_100FDX:
1400 		v = (bfe->bfe_mii_anar & MII_ABILITY_100BASE_TX_FD) != 0;
1401 		break;
1402 
1403 	case ETHER_STAT_ADV_CAP_100HDX:
1404 		v = (bfe->bfe_mii_anar & MII_ABILITY_100BASE_TX) != 0;
1405 		break;
1406 
1407 	case ETHER_STAT_ADV_CAP_10FDX:
1408 		v = (bfe->bfe_mii_anar & MII_ABILITY_10BASE_T_FD) != 0;
1409 		break;
1410 
1411 	case ETHER_STAT_ADV_CAP_10HDX:
1412 		v = (bfe->bfe_mii_anar & MII_ABILITY_10BASE_T) != 0;
1413 		break;
1414 
1415 	case ETHER_STAT_ADV_CAP_ASMPAUSE:
1416 		v = 0;
1417 		break;
1418 
1419 	case ETHER_STAT_ADV_CAP_AUTONEG:
1420 		v = bfe->bfe_adv_aneg;
1421 		break;
1422 
1423 	case ETHER_STAT_ADV_CAP_PAUSE:
1424 		v = (bfe->bfe_mii_anar & MII_ABILITY_PAUSE) != 0;
1425 		break;
1426 
1427 	case ETHER_STAT_ADV_REMFAULT:
1428 		v = (bfe->bfe_mii_anar & MII_AN_ADVERT_REMFAULT) != 0;
1429 		break;
1430 
1431 	case ETHER_STAT_ALIGN_ERRORS:
1432 		/* MIB */
1433 		v = bfe->bfe_stats.ether_stat_align_errors;
1434 		break;
1435 
1436 	case ETHER_STAT_CAP_100T4:
1437 		v = (bfe->bfe_mii_bmsr & MII_STATUS_100_BASE_T4) != 0;
1438 		break;
1439 
1440 	case ETHER_STAT_CAP_100FDX:
1441 		v = (bfe->bfe_mii_bmsr & MII_STATUS_100_BASEX_FD) != 0;
1442 		break;
1443 
1444 	case ETHER_STAT_CAP_100HDX:
1445 		v = (bfe->bfe_mii_bmsr & MII_STATUS_100_BASEX) != 0;
1446 		break;
1447 
1448 	case ETHER_STAT_CAP_10FDX:
1449 		v = (bfe->bfe_mii_bmsr & MII_STATUS_10_FD) != 0;
1450 		break;
1451 
1452 	case ETHER_STAT_CAP_10HDX:
1453 		v = (bfe->bfe_mii_bmsr & MII_STATUS_10) != 0;
1454 		break;
1455 
1456 	case ETHER_STAT_CAP_ASMPAUSE:
1457 		v = 0;
1458 		break;
1459 
1460 	case ETHER_STAT_CAP_AUTONEG:
1461 		v = ((bfe->bfe_mii_bmsr & MII_STATUS_CANAUTONEG) != 0);
1462 		break;
1463 
1464 	case ETHER_STAT_CAP_PAUSE:
1465 		v = 1;
1466 		break;
1467 
1468 	case ETHER_STAT_CAP_REMFAULT:
1469 		v = (bfe->bfe_mii_bmsr & MII_STATUS_REMFAULT) != 0;
1470 		break;
1471 
1472 	case ETHER_STAT_CARRIER_ERRORS:
1473 		v = bfe->bfe_stats.ether_stat_carrier_errors;
1474 		break;
1475 
1476 	case ETHER_STAT_JABBER_ERRORS:
1477 		err = ENOTSUP;
1478 		break;
1479 
1480 	case ETHER_STAT_DEFER_XMTS:
1481 		v = bfe->bfe_stats.ether_stat_defer_xmts;
1482 		break;
1483 
1484 	case ETHER_STAT_EX_COLLISIONS:
1485 		/* MIB */
1486 		v = bfe->bfe_stats.ether_stat_ex_collisions;
1487 		break;
1488 
1489 	case ETHER_STAT_FCS_ERRORS:
1490 		/* MIB */
1491 		v = bfe->bfe_stats.ether_stat_fcs_errors;
1492 		break;
1493 
1494 	case ETHER_STAT_FIRST_COLLISIONS:
1495 		/* MIB */
1496 		v = bfe->bfe_stats.ether_stat_first_collisions;
1497 		break;
1498 
1499 	case ETHER_STAT_LINK_ASMPAUSE:
1500 		v = 0;
1501 		break;
1502 
1503 	case ETHER_STAT_LINK_AUTONEG:
1504 		v = (bfe->bfe_mii_bmcr & MII_CONTROL_ANE) != 0 &&
1505 		    (bfe->bfe_mii_bmsr & MII_STATUS_ANDONE) != 0;
1506 		break;
1507 
1508 	case ETHER_STAT_LINK_DUPLEX:
1509 		v = bfe->bfe_chip.duplex;
1510 		break;
1511 
1512 	case ETHER_STAT_LP_CAP_100T4:
1513 		v = (bfe->bfe_mii_anlpar & MII_ABILITY_100BASE_T4) != 0;
1514 		break;
1515 
1516 	case ETHER_STAT_LP_CAP_100FDX:
1517 		v = (bfe->bfe_mii_anlpar & MII_ABILITY_100BASE_TX_FD) != 0;
1518 		break;
1519 
1520 	case ETHER_STAT_LP_CAP_100HDX:
1521 		v = (bfe->bfe_mii_anlpar & MII_ABILITY_100BASE_TX) != 0;
1522 		break;
1523 
1524 	case ETHER_STAT_LP_CAP_10FDX:
1525 		v = (bfe->bfe_mii_anlpar & MII_ABILITY_10BASE_T_FD) != 0;
1526 		break;
1527 
1528 	case ETHER_STAT_LP_CAP_10HDX:
1529 		v = (bfe->bfe_mii_anlpar & MII_ABILITY_10BASE_T) != 0;
1530 		break;
1531 
1532 	case ETHER_STAT_LP_CAP_ASMPAUSE:
1533 		v = 0;
1534 		break;
1535 
1536 	case ETHER_STAT_LP_CAP_AUTONEG:
1537 		v = (bfe->bfe_mii_exp & MII_AN_EXP_LPCANAN) != 0;
1538 		break;
1539 
1540 	case ETHER_STAT_LP_CAP_PAUSE:
1541 		v = (bfe->bfe_mii_anlpar & MII_ABILITY_PAUSE) != 0;
1542 		break;
1543 
1544 	case ETHER_STAT_LP_REMFAULT:
1545 		v = (bfe->bfe_mii_anlpar & MII_STATUS_REMFAULT) != 0;
1546 		break;
1547 
1548 	case ETHER_STAT_MACRCV_ERRORS:
1549 		v = bfe->bfe_stats.ether_stat_macrcv_errors;
1550 		break;
1551 
1552 	case ETHER_STAT_MACXMT_ERRORS:
1553 		v = bfe->bfe_stats.ether_stat_macxmt_errors;
1554 		break;
1555 
1556 	case ETHER_STAT_MULTI_COLLISIONS:
1557 		v = bfe->bfe_stats.ether_stat_multi_collisions;
1558 		break;
1559 
1560 	case ETHER_STAT_SQE_ERRORS:
1561 		err = ENOTSUP;
1562 		break;
1563 
1564 	case ETHER_STAT_TOOLONG_ERRORS:
1565 		v = bfe->bfe_stats.ether_stat_toolong_errors;
1566 		break;
1567 
1568 	case ETHER_STAT_TOOSHORT_ERRORS:
1569 		v = bfe->bfe_stats.ether_stat_tooshort_errors;
1570 		break;
1571 
1572 	case ETHER_STAT_TX_LATE_COLLISIONS:
1573 		v = bfe->bfe_stats.ether_stat_tx_late_collisions;
1574 		break;
1575 
1576 	case ETHER_STAT_XCVR_ADDR:
1577 		v = bfe->bfe_phy_addr;
1578 		break;
1579 
1580 	case ETHER_STAT_XCVR_ID:
1581 		v = bfe->bfe_phy_id;
1582 		break;
1583 
1584 	case MAC_STAT_BRDCSTRCV:
1585 		v = bfe->bfe_stats.brdcstrcv;
1586 		break;
1587 
1588 	case MAC_STAT_BRDCSTXMT:
1589 		v = bfe->bfe_stats.brdcstxmt;
1590 		break;
1591 
1592 	case MAC_STAT_MULTIXMT:
1593 		v = bfe->bfe_stats.multixmt;
1594 		break;
1595 
1596 	case MAC_STAT_COLLISIONS:
1597 		v = bfe->bfe_stats.collisions;
1598 		break;
1599 
1600 	case MAC_STAT_IERRORS:
1601 		v = bfe->bfe_stats.ierrors;
1602 		break;
1603 
1604 	case MAC_STAT_IPACKETS:
1605 		v = bfe->bfe_stats.ipackets;
1606 		break;
1607 
1608 	case MAC_STAT_MULTIRCV:
1609 		v = bfe->bfe_stats.multircv;
1610 		break;
1611 
1612 	case MAC_STAT_NORCVBUF:
1613 		v = bfe->bfe_stats.norcvbuf;
1614 		break;
1615 
1616 	case MAC_STAT_NOXMTBUF:
1617 		v = bfe->bfe_stats.noxmtbuf;
1618 		break;
1619 
1620 	case MAC_STAT_OBYTES:
1621 		v = bfe->bfe_stats.obytes;
1622 		break;
1623 
1624 	case MAC_STAT_OERRORS:
1625 		/* MIB */
1626 		v = bfe->bfe_stats.ether_stat_macxmt_errors;
1627 		break;
1628 
1629 	case MAC_STAT_OPACKETS:
1630 		v = bfe->bfe_stats.opackets;
1631 		break;
1632 
1633 	case MAC_STAT_RBYTES:
1634 		v = bfe->bfe_stats.rbytes;
1635 		break;
1636 
1637 	case MAC_STAT_UNDERFLOWS:
1638 		v = bfe->bfe_stats.underflows;
1639 		break;
1640 
1641 	case MAC_STAT_OVERFLOWS:
1642 		v = bfe->bfe_stats.overflows;
1643 		break;
1644 	}
1645 
1646 	rw_exit(&bfe->bfe_rwlock);
1647 
1648 	*val = v;
1649 	return (err);
1650 }
1651 
1652 /*ARGSUSED*/
1653 int
1654 bfe_mac_getprop(void *arg, const char *name, mac_prop_id_t num, uint_t flags,
1655     uint_t sz, void *val, uint_t *perm)
1656 {
1657 	bfe_t		*bfe = (bfe_t *)arg;
1658 	int		err = 0;
1659 	boolean_t	dfl = flags & MAC_PROP_DEFAULT;
1660 
1661 	if (sz == 0)
1662 		return (EINVAL);
1663 
1664 	*perm = MAC_PROP_PERM_RW;
1665 	switch (num) {
1666 	case MAC_PROP_DUPLEX:
1667 		*perm = MAC_PROP_PERM_READ;
1668 		if (sz >= sizeof (link_duplex_t)) {
1669 			bcopy(&bfe->bfe_chip.duplex, val,
1670 			    sizeof (link_duplex_t));
1671 		} else {
1672 			err = EINVAL;
1673 		}
1674 		break;
1675 
1676 	case MAC_PROP_SPEED:
1677 		*perm = MAC_PROP_PERM_READ;
1678 		if (sz >= sizeof (uint64_t)) {
1679 			bcopy(&bfe->bfe_chip.speed, val, sizeof (uint64_t));
1680 		} else {
1681 			err = EINVAL;
1682 		}
1683 		break;
1684 
1685 	case MAC_PROP_AUTONEG:
1686 		*(uint8_t *)val =
1687 		    dfl ? bfe->bfe_cap_aneg : bfe->bfe_adv_aneg;
1688 		break;
1689 
1690 	case MAC_PROP_ADV_100FDX_CAP:
1691 		*perm = MAC_PROP_PERM_READ;
1692 		*(uint8_t *)val =
1693 		    dfl ? bfe->bfe_cap_100fdx : bfe->bfe_adv_100fdx;
1694 		break;
1695 	case MAC_PROP_EN_100FDX_CAP:
1696 		*(uint8_t *)val =
1697 		    dfl ? bfe->bfe_cap_100fdx : bfe->bfe_adv_100fdx;
1698 		break;
1699 
1700 	case MAC_PROP_ADV_100HDX_CAP:
1701 		*perm = MAC_PROP_PERM_READ;
1702 		*(uint8_t *)val =
1703 		    dfl ? bfe->bfe_cap_100hdx : bfe->bfe_adv_100hdx;
1704 		break;
1705 	case MAC_PROP_EN_100HDX_CAP:
1706 		*(uint8_t *)val =
1707 		    dfl ? bfe->bfe_cap_100hdx : bfe->bfe_adv_100hdx;
1708 		break;
1709 
1710 	case MAC_PROP_ADV_10FDX_CAP:
1711 		*perm = MAC_PROP_PERM_READ;
1712 		*(uint8_t *)val =
1713 		    dfl ? bfe->bfe_cap_10fdx : bfe->bfe_adv_10fdx;
1714 		break;
1715 	case MAC_PROP_EN_10FDX_CAP:
1716 		*(uint8_t *)val =
1717 		    dfl ? bfe->bfe_cap_10fdx : bfe->bfe_adv_10fdx;
1718 		break;
1719 
1720 	case MAC_PROP_ADV_10HDX_CAP:
1721 		*perm = MAC_PROP_PERM_READ;
1722 		*(uint8_t *)val =
1723 		    dfl ? bfe->bfe_cap_10hdx : bfe->bfe_adv_10hdx;
1724 		break;
1725 	case MAC_PROP_EN_10HDX_CAP:
1726 		*(uint8_t *)val =
1727 		    dfl ? bfe->bfe_cap_10hdx : bfe->bfe_adv_10hdx;
1728 		break;
1729 
1730 	case MAC_PROP_ADV_100T4_CAP:
1731 		*perm = MAC_PROP_PERM_READ;
1732 		*(uint8_t *)val =
1733 		    dfl ? bfe->bfe_cap_100T4 : bfe->bfe_adv_100T4;
1734 		break;
1735 	case MAC_PROP_EN_100T4_CAP:
1736 		*(uint8_t *)val =
1737 		    dfl ? bfe->bfe_cap_100T4 : bfe->bfe_adv_100T4;
1738 		break;
1739 
1740 	default:
1741 		err = ENOTSUP;
1742 	}
1743 
1744 	return (err);
1745 }
1746 
1747 /*ARGSUSED*/
1748 int
1749 bfe_mac_setprop(void *arg, const char *name, mac_prop_id_t num, uint_t sz,
1750     const void *val)
1751 {
1752 	bfe_t		*bfe = (bfe_t *)arg;
1753 	uint8_t		*advp;
1754 	uint8_t		*capp;
1755 	int 		r = 0;
1756 
1757 	switch (num) {
1758 	case MAC_PROP_EN_100FDX_CAP:
1759 		advp = &bfe->bfe_adv_100fdx;
1760 		capp = &bfe->bfe_cap_100fdx;
1761 		break;
1762 
1763 	case MAC_PROP_EN_100HDX_CAP:
1764 		advp = &bfe->bfe_adv_100hdx;
1765 		capp = &bfe->bfe_cap_100hdx;
1766 		break;
1767 
1768 	case MAC_PROP_EN_10FDX_CAP:
1769 		advp = &bfe->bfe_adv_10fdx;
1770 		capp = &bfe->bfe_cap_10fdx;
1771 		break;
1772 
1773 	case MAC_PROP_EN_10HDX_CAP:
1774 		advp = &bfe->bfe_adv_10hdx;
1775 		capp = &bfe->bfe_cap_10hdx;
1776 		break;
1777 
1778 	case MAC_PROP_EN_100T4_CAP:
1779 		advp = &bfe->bfe_adv_100T4;
1780 		capp = &bfe->bfe_cap_100T4;
1781 		break;
1782 
1783 	case MAC_PROP_AUTONEG:
1784 		advp = &bfe->bfe_adv_aneg;
1785 		capp = &bfe->bfe_cap_aneg;
1786 		break;
1787 
1788 	default:
1789 		return (ENOTSUP);
1790 	}
1791 
1792 	if (*capp == 0)
1793 		return (ENOTSUP);
1794 
1795 	bfe_grab_locks(bfe);
1796 
1797 	if (*advp != *(const uint8_t *)val) {
1798 		*advp = *(const uint8_t *)val;
1799 
1800 		bfe->bfe_chip_action = BFE_ACTION_RESTART_SETPROP;
1801 		if (bfe->bfe_chip_state == BFE_CHIP_ACTIVE) {
1802 			/*
1803 			 * We need to stop the timer before grabbing locks
1804 			 * otherwise we can land-up in deadlock with untimeout.
1805 			 */
1806 			bfe_stop_timer(bfe);
1807 
1808 			bfe->bfe_chip_action |= BFE_ACTION_RESTART;
1809 
1810 			bfe_chip_restart(bfe);
1811 
1812 			/*
1813 			 * We leave SETPROP because properties can be
1814 			 * temporary.
1815 			 */
1816 			bfe->bfe_chip_action &= ~(BFE_ACTION_RESTART);
1817 			r = 1;
1818 		}
1819 	}
1820 
1821 	bfe_release_locks(bfe);
1822 
1823 	/* kick-off a potential stopped downstream */
1824 	if (r)
1825 		mac_tx_update(bfe->bfe_machdl);
1826 
1827 	return (0);
1828 }
1829 
1830 
1831 int
1832 bfe_mac_set_ether_addr(void *arg, const uint8_t *ea)
1833 {
1834 	bfe_t *bfe = (bfe_t *)arg;
1835 
1836 	bfe_grab_locks(bfe);
1837 	bcopy(ea, bfe->bfe_ether_addr, ETHERADDRL);
1838 	bfe_set_rx_mode(bfe);
1839 	bfe_release_locks(bfe);
1840 	return (0);
1841 }
1842 
1843 int
1844 bfe_mac_start(void *arg)
1845 {
1846 	bfe_t *bfe = (bfe_t *)arg;
1847 
1848 	if (bfe_chip_start(bfe) == DDI_FAILURE)
1849 		return (EINVAL);
1850 
1851 	return (0);
1852 }
1853 
1854 void
1855 bfe_mac_stop(void *arg)
1856 {
1857 	bfe_t *bfe = (bfe_t *)arg;
1858 
1859 	/*
1860 	 * We need to stop the timer before grabbing locks otherwise
1861 	 * we can land-up in deadlock with untimeout.
1862 	 */
1863 	bfe_stop_timer(bfe);
1864 
1865 	bfe_grab_locks(bfe);
1866 
1867 	/*
1868 	 * First halt the chip by disabling interrupts.
1869 	 */
1870 	bfe_chip_halt(bfe);
1871 	bfe_stop_phy(bfe);
1872 
1873 	bfe->bfe_chip_state = BFE_CHIP_STOPPED;
1874 
1875 	/*
1876 	 * This will leave the PHY running.
1877 	 */
1878 	bfe_chip_reset(bfe);
1879 
1880 	/*
1881 	 * Disable RX register.
1882 	 */
1883 	bfe->bfe_chip_mode &= ~BFE_RX_MODE_ENABLE;
1884 	bfe_set_rx_mode(bfe);
1885 
1886 	bfe_release_locks(bfe);
1887 }
1888 
1889 /*
1890  * Send a packet down the wire.
1891  */
1892 static int
1893 bfe_send_a_packet(bfe_t *bfe, mblk_t *mp)
1894 {
1895 	bfe_ring_t *r = &bfe->bfe_tx_ring;
1896 	uint32_t cur = r->r_curr_desc;
1897 	uint32_t next;
1898 	size_t	pktlen = msgsize(mp);
1899 	uchar_t *buf;
1900 	uint32_t v;
1901 
1902 	ASSERT(MUTEX_HELD(&r->r_lock));
1903 	ASSERT(mp != NULL);
1904 
1905 	if (pktlen > r->r_buf_len) {
1906 		freemsg(mp);
1907 		return (BFE_SUCCESS);
1908 	}
1909 
1910 	/*
1911 	 * There is a big reason why we don't check for '0'. It becomes easy
1912 	 * for us to not roll over the ring since we are based on producer (tx)
1913 	 * and consumer (reclaim by an interrupt) model. Especially when we
1914 	 * run out of TX descriptor, chip will send a single interrupt and
1915 	 * both producer and consumer counter will be same. So we keep a
1916 	 * difference of 1 always.
1917 	 */
1918 	if (r->r_avail_desc <= 1) {
1919 		bfe->bfe_stats.noxmtbuf++;
1920 		bfe->bfe_tx_resched = 1;
1921 		return (BFE_FAILURE);
1922 	}
1923 
1924 	/*
1925 	 * Get the DMA buffer to hold packet.
1926 	 */
1927 	buf = (uchar_t *)r->r_buf_dma[cur].addr;
1928 
1929 	mcopymsg(mp, buf);	/* it also frees mp */
1930 
1931 	/*
1932 	 * Gather statistics.
1933 	 */
1934 	if (buf[0] & 0x1) {
1935 		if (bcmp(buf, bfe_broadcast, ETHERADDRL) != 0)
1936 			bfe->bfe_stats.multixmt++;
1937 		else
1938 			bfe->bfe_stats.brdcstxmt++;
1939 	}
1940 	bfe->bfe_stats.opackets++;
1941 	bfe->bfe_stats.obytes += pktlen;
1942 
1943 
1944 	/*
1945 	 * Program the DMA descriptor (start and end of frame are same).
1946 	 */
1947 	next = cur;
1948 	v = (pktlen & BFE_DESC_LEN) | BFE_DESC_IOC | BFE_DESC_SOF |
1949 	    BFE_DESC_EOF;
1950 
1951 	if (cur == (TX_NUM_DESC - 1))
1952 		v |= BFE_DESC_EOT;
1953 
1954 	PUT_DESC(r, (uint32_t *)&(r->r_desc[cur].desc_ctl), v);
1955 
1956 	/*
1957 	 * DMA addresses need to be added to BFE_PCI_DMA
1958 	 */
1959 	PUT_DESC(r, (uint32_t *)&(r->r_desc[cur].desc_addr),
1960 	    (r->r_buf_dma[cur].cookie.dmac_laddress + BFE_PCI_DMA));
1961 
1962 	/*
1963 	 * Sync the packet data for the device.
1964 	 */
1965 	(void) SYNC_BUF(r, cur, 0, pktlen, DDI_DMA_SYNC_FORDEV);
1966 
1967 	/* Move to next descriptor slot */
1968 	BFE_INC_SLOT(next, TX_NUM_DESC);
1969 
1970 	(void) SYNC_DESC(r, 0, r->r_ndesc, DDI_DMA_SYNC_FORDEV);
1971 
1972 	r->r_curr_desc = next;
1973 
1974 	/*
1975 	 * The order should be 1,2,3,... for BFE_DMATX_PTR if 0,1,2,3,...
1976 	 * descriptor slot are being programmed.
1977 	 */
1978 	OUTL(bfe, BFE_DMATX_PTR, next * sizeof (bfe_desc_t));
1979 	FLUSH(bfe, BFE_DMATX_PTR);
1980 
1981 	r->r_avail_desc--;
1982 
1983 	/*
1984 	 * Let timeout know that it must reset the chip if a
1985 	 * packet is not sent down the wire for more than 5 seconds.
1986 	 */
1987 	bfe->bfe_tx_stall_time = gethrtime() + (5 * 1000000000ULL);
1988 
1989 	return (BFE_SUCCESS);
1990 }
1991 
1992 mblk_t *
1993 bfe_mac_transmit_packet(void *arg, mblk_t *mp)
1994 {
1995 	bfe_t *bfe = (bfe_t *)arg;
1996 	bfe_ring_t *r = &bfe->bfe_tx_ring;
1997 	mblk_t	*nmp;
1998 
1999 	mutex_enter(&r->r_lock);
2000 
2001 	if (bfe->bfe_chip_state != BFE_CHIP_ACTIVE) {
2002 		bfe_error(bfe->bfe_dip, "bfe_mac_transmit_packet(): sending pkt"
2003 		    " while chip/link is not up");
2004 
2005 		freemsgchain(mp);
2006 		mutex_exit(&r->r_lock);
2007 		return (NULL);
2008 	}
2009 
2010 
2011 	while (mp != NULL) {
2012 		nmp = mp->b_next;
2013 		mp->b_next = NULL;
2014 
2015 		if (bfe_send_a_packet(bfe, mp) == BFE_FAILURE) {
2016 			mp->b_next = nmp;
2017 			break;
2018 		}
2019 		mp = nmp;
2020 	}
2021 
2022 	mutex_exit(&r->r_lock);
2023 
2024 	return (mp);
2025 }
2026 
2027 int
2028 bfe_mac_set_promisc(void *arg, boolean_t promiscflag)
2029 {
2030 	bfe_t *bfe = (bfe_t *)arg;
2031 
2032 	bfe_grab_locks(bfe);
2033 	if (bfe->bfe_chip_state != BFE_CHIP_ACTIVE) {
2034 		bfe_release_locks(bfe);
2035 		return (EIO);
2036 	}
2037 
2038 	if (promiscflag) {
2039 		/* Set Promiscous on */
2040 		bfe->bfe_chip_mode |= BFE_RX_MODE_PROMISC;
2041 	} else {
2042 		bfe->bfe_chip_mode &= ~BFE_RX_MODE_PROMISC;
2043 	}
2044 
2045 	bfe_set_rx_mode(bfe);
2046 	bfe_release_locks(bfe);
2047 
2048 	return (0);
2049 }
2050 
2051 int
2052 bfe_mac_set_multicast(void *arg, boolean_t add, const uint8_t *macaddr)
2053 {
2054 	/*
2055 	 * It was too much of pain to implement multicast in CAM. Instead
2056 	 * we never disable multicast filter.
2057 	 */
2058 	return (0);
2059 }
2060 
2061 static mac_callbacks_t bfe_mac_callbacks = {
2062 	MC_SETPROP | MC_GETPROP,
2063 	bfe_mac_getstat,	/* gets stats */
2064 	bfe_mac_start,		/* starts mac */
2065 	bfe_mac_stop,		/* stops mac */
2066 	bfe_mac_set_promisc,	/* sets promisc mode for snoop */
2067 	bfe_mac_set_multicast,	/* multicast implementation */
2068 	bfe_mac_set_ether_addr,	/* sets ethernet address (unicast) */
2069 	bfe_mac_transmit_packet, /* transmits packet */
2070 	NULL,			/* ioctl */
2071 	NULL,			/* getcap */
2072 	NULL,			/* open */
2073 	NULL,			/* close */
2074 	bfe_mac_setprop,
2075 	bfe_mac_getprop,
2076 };
2077 
2078 static void
2079 bfe_error_handler(bfe_t *bfe, int intr_mask)
2080 {
2081 	uint32_t v;
2082 
2083 	if (intr_mask & BFE_ISTAT_RFO) {
2084 		bfe->bfe_stats.overflows++;
2085 		return;
2086 	}
2087 
2088 	if (intr_mask & BFE_ISTAT_TFU) {
2089 		bfe->bfe_stats.underflows++;
2090 		return;
2091 	}
2092 
2093 	/* Descriptor Protocol Error */
2094 	if (intr_mask & BFE_ISTAT_DPE) {
2095 		bfe_error(bfe->bfe_dip,
2096 		    "Descriptor Protocol Error. Halting Chip");
2097 		bfe->bfe_chip_action |=
2098 		    (BFE_ACTION_RESTART | BFE_ACTION_RESTART_FAULT);
2099 		goto action;
2100 	}
2101 
2102 	/* Descriptor Error */
2103 	if (intr_mask & BFE_ISTAT_DSCE && halt == 0) {
2104 		bfe_error(bfe->bfe_dip, "Descriptor Error. Restarting Chip");
2105 		goto action;
2106 	}
2107 
2108 	/* Receive Descr. Underflow */
2109 	if (intr_mask & BFE_ISTAT_RDU) {
2110 		bfe_error(bfe->bfe_dip,
2111 		    "Receive Descriptor Underflow. Restarting Chip");
2112 		bfe->bfe_stats.ether_stat_macrcv_errors++;
2113 		bfe->bfe_chip_action |=
2114 		    (BFE_ACTION_RESTART | BFE_ACTION_RESTART_FAULT);
2115 		goto action;
2116 	}
2117 
2118 	v = INL(bfe, BFE_DMATX_STAT);
2119 
2120 	/* Error while sending a packet */
2121 	if (v & BFE_STAT_EMASK) {
2122 		bfe->bfe_stats.ether_stat_macxmt_errors++;
2123 		bfe_error(bfe->bfe_dip,
2124 		    "Error while sending a packet. Restarting Chip");
2125 	}
2126 
2127 	/* Error while receiving a packet */
2128 	v = INL(bfe, BFE_DMARX_STAT);
2129 	if (v & BFE_RX_FLAG_ERRORS) {
2130 		bfe->bfe_stats.ierrors++;
2131 		bfe_error(bfe->bfe_dip,
2132 		    "Error while receiving a packet. Restarting Chip");
2133 	}
2134 
2135 
2136 	bfe->bfe_chip_action |=
2137 	    (BFE_ACTION_RESTART | BFE_ACTION_RESTART_FAULT);
2138 
2139 action:
2140 	bfe_chip_halt(bfe);
2141 }
2142 
2143 /*
2144  * It will recycle a RX descriptor slot.
2145  */
2146 static void
2147 bfe_rx_desc_buf_reinit(bfe_t *bfe, uint_t slot)
2148 {
2149 	bfe_ring_t *r = &bfe->bfe_rx_ring;
2150 	uint32_t v;
2151 
2152 	slot %= RX_NUM_DESC;
2153 
2154 	bzero(r->r_buf_dma[slot].addr, sizeof (bfe_rx_header_t));
2155 
2156 	(void) SYNC_BUF(r, slot, 0, BFE_RX_OFFSET, DDI_DMA_SYNC_FORDEV);
2157 
2158 	v = r->r_buf_dma[slot].len  & BFE_DESC_LEN;
2159 	if (slot == (RX_NUM_DESC - 1))
2160 		v |= BFE_DESC_EOT;
2161 
2162 	PUT_DESC(r, (uint32_t *)&(r->r_desc[slot].desc_ctl), v);
2163 
2164 	/*
2165 	 * DMA addresses need to be added to BFE_PCI_DMA
2166 	 */
2167 	PUT_DESC(r, (uint32_t *)&(r->r_desc[slot].desc_addr),
2168 	    (r->r_buf_dma[slot].cookie.dmac_laddress + BFE_PCI_DMA));
2169 }
2170 
2171 /*
2172  * Gets called from interrupt context to handle RX interrupt.
2173  */
2174 static mblk_t *
2175 bfe_receive(bfe_t *bfe, int intr_mask)
2176 {
2177 	int rxstat, current;
2178 	mblk_t	*mp = NULL, *rx_head, *rx_tail;
2179 	uchar_t	*rx_header;
2180 	uint16_t len;
2181 	uchar_t	*bp;
2182 	bfe_ring_t *r = &bfe->bfe_rx_ring;
2183 	int i;
2184 
2185 	rxstat = INL(bfe, BFE_DMARX_STAT);
2186 	current = (rxstat & BFE_STAT_CDMASK) / sizeof (bfe_desc_t);
2187 	i = r->r_curr_desc;
2188 
2189 	rx_head = rx_tail = NULL;
2190 
2191 	DTRACE_PROBE3(receive, int, bfe->bfe_unit,
2192 	    int, r->r_curr_desc,
2193 	    int, current);
2194 
2195 	for (i = r->r_curr_desc; i != current;
2196 	    BFE_INC_SLOT(i, RX_NUM_DESC)) {
2197 
2198 		/*
2199 		 * Sync the buffer associated with the descriptor table entry.
2200 		 */
2201 		(void) SYNC_BUF(r, i, 0, r->r_buf_dma[i].len,
2202 		    DDI_DMA_SYNC_FORKERNEL);
2203 
2204 		rx_header = (void *)r->r_buf_dma[i].addr;
2205 
2206 		/*
2207 		 * We do this to make sure we are endian neutral. Chip is
2208 		 * big endian.
2209 		 *
2210 		 * The header looks like :-
2211 		 *
2212 		 *  Offset 0  -> uint16_t len
2213 		 *  Offset 2  -> uint16_t flags
2214 		 *  Offset 4  -> uint16_t pad[12]
2215 		 */
2216 		len = (rx_header[1] << 8) | rx_header[0];
2217 		len -= 4;	/* CRC bytes need to be removed */
2218 
2219 		/*
2220 		 * Don't receive this packet if pkt length is greater than
2221 		 * MTU + VLAN_TAGSZ.
2222 		 */
2223 		if (len > r->r_buf_len) {
2224 			/* Recycle slot for later use */
2225 			bfe_rx_desc_buf_reinit(bfe, i);
2226 			continue;
2227 		}
2228 
2229 		if ((mp = allocb(len + VLAN_TAGSZ, BPRI_MED)) != NULL) {
2230 			mp->b_rptr += VLAN_TAGSZ;
2231 			bp = mp->b_rptr;
2232 			mp->b_wptr = bp + len;
2233 
2234 			/* sizeof (bfe_rx_header_t) + 2 */
2235 			bcopy(r->r_buf_dma[i].addr +
2236 			    BFE_RX_OFFSET, bp, len);
2237 
2238 			mp->b_next = NULL;
2239 			if (rx_tail == NULL)
2240 				rx_head = rx_tail = mp;
2241 			else {
2242 				rx_tail->b_next = mp;
2243 				rx_tail = mp;
2244 			}
2245 
2246 			/* Number of packets received so far */
2247 			bfe->bfe_stats.ipackets++;
2248 
2249 			/* Total bytes of packets received so far */
2250 			bfe->bfe_stats.rbytes += len;
2251 
2252 			if (bcmp(mp->b_rptr, bfe_broadcast, ETHERADDRL) == 0)
2253 				bfe->bfe_stats.brdcstrcv++;
2254 			else
2255 				bfe->bfe_stats.multircv++;
2256 		} else {
2257 			bfe->bfe_stats.norcvbuf++;
2258 			/* Recycle the slot for later use */
2259 			bfe_rx_desc_buf_reinit(bfe, i);
2260 			break;
2261 		}
2262 
2263 		/*
2264 		 * Reinitialize the current descriptor slot's buffer so that
2265 		 * it can be reused.
2266 		 */
2267 		bfe_rx_desc_buf_reinit(bfe, i);
2268 	}
2269 
2270 	r->r_curr_desc = i;
2271 
2272 	(void) SYNC_DESC(r, 0, r->r_ndesc, DDI_DMA_SYNC_FORDEV);
2273 
2274 	return (rx_head);
2275 }
2276 
2277 static int
2278 bfe_tx_reclaim(bfe_ring_t *r)
2279 {
2280 	uint32_t cur, start;
2281 	uint32_t v;
2282 
2283 	cur = INL(r->r_bfe, BFE_DMATX_STAT) & BFE_STAT_CDMASK;
2284 	cur = cur / sizeof (bfe_desc_t);
2285 
2286 	/*
2287 	 * Start with the last descriptor consumed by the chip.
2288 	 */
2289 	start = r->r_cons_desc;
2290 
2291 	DTRACE_PROBE3(tx__reclaim, int, r->r_bfe->bfe_unit,
2292 	    int, start,
2293 	    int, cur);
2294 
2295 	/*
2296 	 * There will be at least one descriptor to process.
2297 	 */
2298 	while (start != cur) {
2299 		r->r_avail_desc++;
2300 		v = r->r_buf_dma[start].len  & BFE_DESC_LEN;
2301 		if (start == (TX_NUM_DESC - 1))
2302 			v |= BFE_DESC_EOT;
2303 
2304 		PUT_DESC(r, (uint32_t *)&(r->r_desc[start].desc_ctl), v);
2305 		PUT_DESC(r, (uint32_t *)&(r->r_desc[start].desc_addr),
2306 		    (r->r_buf_dma[start].cookie.dmac_laddress + BFE_PCI_DMA));
2307 
2308 		/* Move to next descriptor in TX ring */
2309 		BFE_INC_SLOT(start, TX_NUM_DESC);
2310 	}
2311 
2312 	(void) ddi_dma_sync(r->r_desc_dma_handle,
2313 	    0, (r->r_ndesc * sizeof (bfe_desc_t)),
2314 	    DDI_DMA_SYNC_FORDEV);
2315 
2316 	r->r_cons_desc = start; 	/* consumed pointer */
2317 	r->r_bfe->bfe_tx_stall_time = 0;
2318 
2319 	return (cur);
2320 }
2321 
2322 static int
2323 bfe_tx_done(bfe_t *bfe, int intr_mask)
2324 {
2325 	bfe_ring_t *r = &bfe->bfe_tx_ring;
2326 	int resched = 0;
2327 
2328 	mutex_enter(&r->r_lock);
2329 	(void) bfe_tx_reclaim(r);
2330 
2331 	if (bfe->bfe_tx_resched) {
2332 		resched = 1;
2333 		bfe->bfe_tx_resched = 0;
2334 	}
2335 	mutex_exit(&r->r_lock);
2336 
2337 	return (resched);
2338 }
2339 
2340 /*
2341  * ISR for interrupt handling
2342  */
2343 static uint_t
2344 bfe_interrupt(caddr_t arg1, caddr_t arg2)
2345 {
2346 	bfe_t *bfe =  (void *)arg1;
2347 	uint32_t	intr_stat;
2348 	mblk_t *rx_head = NULL;
2349 	int resched = 0;
2350 
2351 	/*
2352 	 * Grab the lock to avoid stopping the chip while this interrupt
2353 	 * is handled.
2354 	 */
2355 	rw_enter(&bfe->bfe_rwlock, RW_READER);
2356 
2357 	/*
2358 	 * It's necessary to read intr stat again because masking interrupt
2359 	 * register does not really mask interrupts coming from the chip.
2360 	 */
2361 	intr_stat = INL(bfe, BFE_INTR_STAT);
2362 	intr_stat &= BFE_IMASK_DEF;
2363 	OUTL(bfe, BFE_INTR_STAT, intr_stat);
2364 	(void) INL(bfe, BFE_INTR_STAT);
2365 
2366 	if (intr_stat == 0) {
2367 		rw_exit(&bfe->bfe_rwlock);
2368 		return (DDI_INTR_UNCLAIMED);
2369 	}
2370 
2371 	if (bfe->bfe_chip_state != BFE_CHIP_ACTIVE) {
2372 		/*
2373 		 * If chip is suspended then we just return.
2374 		 */
2375 		if (bfe->bfe_chip_state == BFE_CHIP_SUSPENDED) {
2376 			rw_exit(&bfe->bfe_rwlock);
2377 			DTRACE_PROBE1(interrupt__chip__is__suspend, int,
2378 			    bfe->bfe_unit);
2379 			return (DDI_INTR_CLAIMED);
2380 		}
2381 
2382 		/*
2383 		 * Halt the chip again i.e basically disable interrupts.
2384 		 */
2385 		bfe_chip_halt(bfe);
2386 		rw_exit(&bfe->bfe_rwlock);
2387 		DTRACE_PROBE1(interrupt__chip__not__active, int,
2388 		    bfe->bfe_unit);
2389 		return (DDI_INTR_CLAIMED);
2390 	}
2391 
2392 	/* A packet was received */
2393 	if (intr_stat & BFE_ISTAT_RX) {
2394 		rx_head = bfe_receive(bfe, intr_stat);
2395 	}
2396 
2397 	/* A packet was sent down the wire */
2398 	if (intr_stat & BFE_ISTAT_TX) {
2399 		resched = bfe_tx_done(bfe, intr_stat);
2400 	}
2401 
2402 	/* There was an error */
2403 	if (intr_stat & BFE_ISTAT_ERRORS) {
2404 		bfe_error_handler(bfe, intr_stat);
2405 	}
2406 
2407 	rw_exit(&bfe->bfe_rwlock);
2408 
2409 	/*
2410 	 * Pass the list of packets received from chip to MAC layer.
2411 	 */
2412 	if (rx_head) {
2413 		mac_rx(bfe->bfe_machdl, 0, rx_head);
2414 	}
2415 
2416 	/*
2417 	 * Let the MAC start sending pkts to a potential stopped stream.
2418 	 */
2419 	if (resched)
2420 		mac_tx_update(bfe->bfe_machdl);
2421 
2422 	return (DDI_INTR_CLAIMED);
2423 }
2424 
2425 /*
2426  * Removes registered interrupt handler.
2427  */
2428 static void
2429 bfe_remove_intr(bfe_t *bfe)
2430 {
2431 	(void) ddi_intr_remove_handler(bfe->bfe_intrhdl);
2432 	(void) ddi_intr_free(bfe->bfe_intrhdl);
2433 }
2434 
2435 /*
2436  * Add an interrupt for the driver.
2437  */
2438 static int
2439 bfe_add_intr(bfe_t *bfe)
2440 {
2441 	int	nintrs = 1;
2442 	int ret;
2443 
2444 	ret = ddi_intr_alloc(bfe->bfe_dip, &bfe->bfe_intrhdl,
2445 	    DDI_INTR_TYPE_FIXED,	/* type */
2446 	    0,	/* inumber */
2447 	    1,	/* count */
2448 	    &nintrs,	/* actual nintrs */
2449 	    DDI_INTR_ALLOC_STRICT);
2450 
2451 	if (ret != DDI_SUCCESS) {
2452 		bfe_error(bfe->bfe_dip, "ddi_intr_alloc() failed"
2453 		    " : ret : %d", ret);
2454 		return (DDI_FAILURE);
2455 	}
2456 
2457 	ret = ddi_intr_add_handler(bfe->bfe_intrhdl, bfe_interrupt, bfe, NULL);
2458 	if (ret != DDI_SUCCESS) {
2459 		bfe_error(bfe->bfe_dip, "ddi_intr_add_handler() failed");
2460 		(void) ddi_intr_free(bfe->bfe_intrhdl);
2461 		return (DDI_FAILURE);
2462 	}
2463 
2464 	ret = ddi_intr_get_pri(bfe->bfe_intrhdl, &bfe->bfe_intrpri);
2465 	if (ret != DDI_SUCCESS) {
2466 		bfe_error(bfe->bfe_dip, "ddi_intr_get_pri() failed");
2467 		bfe_remove_intr(bfe);
2468 		return (DDI_FAILURE);
2469 	}
2470 
2471 	return (DDI_SUCCESS);
2472 }
2473 
2474 
2475 /*
2476  * Identify chipset family.
2477  */
2478 static int
2479 bfe_identify_hardware(bfe_t *bfe)
2480 {
2481 	uint16_t	vid, did;
2482 	int i;
2483 
2484 	vid = pci_config_get16(bfe->bfe_conf_handle, PCI_CONF_VENID);
2485 	did = pci_config_get16(bfe->bfe_conf_handle, PCI_CONF_DEVID);
2486 
2487 	for (i = 0; i < (sizeof (bfe_cards) / sizeof (bfe_cards_t)); i++) {
2488 		if (bfe_cards[i].vendor_id == vid &&
2489 		    bfe_cards[i].device_id == did) {
2490 			return (BFE_SUCCESS);
2491 		}
2492 	}
2493 
2494 	bfe_error(bfe->bfe_dip, "bfe driver is attaching to unknown pci%d,%d"
2495 	    " vendor/device-id card", vid, did);
2496 
2497 	return (BFE_SUCCESS);
2498 }
2499 
2500 /*
2501  * Maps device registers.
2502  */
2503 static int
2504 bfe_regs_map(bfe_t *bfe)
2505 {
2506 	dev_info_t *dip = bfe->bfe_dip;
2507 	int ret;
2508 
2509 	ret = ddi_regs_map_setup(dip, 1, &bfe->bfe_mem_regset.addr, 0, 0,
2510 	    &bfe_dev_attr, &bfe->bfe_mem_regset.hdl);
2511 
2512 	if (ret != DDI_SUCCESS) {
2513 		bfe_error(bfe->bfe_dip, "ddi_regs_map_setup failed");
2514 		return (DDI_FAILURE);
2515 	}
2516 
2517 	return (DDI_SUCCESS);
2518 }
2519 
2520 static void
2521 bfe_unmap_regs(bfe_t *bfe)
2522 {
2523 	ddi_regs_map_free(&bfe->bfe_mem_regset.hdl);
2524 }
2525 
2526 static int
2527 bfe_get_chip_config(bfe_t *bfe)
2528 {
2529 	uint32_t	prom[BFE_EEPROM_SIZE];
2530 	int i;
2531 
2532 	/*
2533 	 * Read EEPROM in prom[]
2534 	 */
2535 	for (i = 0; i < BFE_EEPROM_SIZE; i++) {
2536 		prom[i] = INL(bfe, BFE_EEPROM_BASE + i * sizeof (uint32_t));
2537 	}
2538 
2539 	bfe->bfe_dev_addr[0] = bfe->bfe_ether_addr[0] =
2540 	    INB(bfe, BFE_EEPROM_BASE + 79);
2541 
2542 	bfe->bfe_dev_addr[1] = bfe->bfe_ether_addr[1] =
2543 	    INB(bfe, BFE_EEPROM_BASE + 78);
2544 
2545 	bfe->bfe_dev_addr[2] = bfe->bfe_ether_addr[2] =
2546 	    INB(bfe, BFE_EEPROM_BASE + 81);
2547 
2548 	bfe->bfe_dev_addr[3] = bfe->bfe_ether_addr[3] =
2549 	    INB(bfe, BFE_EEPROM_BASE + 80);
2550 
2551 	bfe->bfe_dev_addr[4] = bfe->bfe_ether_addr[4] =
2552 	    INB(bfe, BFE_EEPROM_BASE + 83);
2553 
2554 	bfe->bfe_dev_addr[5] = bfe->bfe_ether_addr[5] =
2555 	    INB(bfe, BFE_EEPROM_BASE + 82);
2556 
2557 	bfe->bfe_phy_addr = -1;
2558 
2559 	return (DDI_SUCCESS);
2560 }
2561 
2562 /*
2563  * Ring Management routines
2564  */
2565 static int
2566 bfe_ring_buf_alloc(bfe_t *bfe, bfe_ring_t *r, int slot, int d)
2567 {
2568 	int err;
2569 	uint_t count = 0;
2570 
2571 	err = ddi_dma_alloc_handle(bfe->bfe_dip,
2572 	    &bfe_dma_attr_buf, DDI_DMA_SLEEP, NULL,
2573 	    &r->r_buf_dma[slot].handle);
2574 
2575 	if (err != DDI_SUCCESS) {
2576 		bfe_error(bfe->bfe_dip, " bfe_ring_buf_alloc() :"
2577 		    " alloc_handle failed");
2578 		goto fail0;
2579 	}
2580 
2581 	err = ddi_dma_mem_alloc(r->r_buf_dma[slot].handle,
2582 	    r->r_buf_len, &bfe_buf_attr, DDI_DMA_STREAMING,
2583 	    DDI_DMA_SLEEP, NULL, &r->r_buf_dma[slot].addr,
2584 	    &r->r_buf_dma[slot].len,
2585 	    &r->r_buf_dma[slot].acchdl);
2586 
2587 	if (err != DDI_SUCCESS) {
2588 		bfe_error(bfe->bfe_dip, " bfe_ring_buf_alloc() :"
2589 		    " mem_alloc failed :%d", err);
2590 		goto fail1;
2591 	}
2592 
2593 	err = ddi_dma_addr_bind_handle(r->r_buf_dma[slot].handle,
2594 	    NULL, r->r_buf_dma[slot].addr,
2595 	    r->r_buf_dma[slot].len,
2596 	    (DDI_DMA_RDWR | DDI_DMA_STREAMING),
2597 	    DDI_DMA_SLEEP, NULL,
2598 	    &r->r_buf_dma[slot].cookie,
2599 	    &count);
2600 
2601 	if (err != DDI_DMA_MAPPED) {
2602 		bfe_error(bfe->bfe_dip, " bfe_ring_buf_alloc() :"
2603 		    " bind_handle failed");
2604 		goto fail2;
2605 	}
2606 
2607 	if (count > 1) {
2608 		bfe_error(bfe->bfe_dip, " bfe_ring_buf_alloc() :"
2609 		    " more than one DMA cookie");
2610 		(void) ddi_dma_unbind_handle(r->r_buf_dma[slot].handle);
2611 		goto fail2;
2612 	}
2613 
2614 	return (DDI_SUCCESS);
2615 fail2:
2616 	ddi_dma_mem_free(&r->r_buf_dma[slot].acchdl);
2617 fail1:
2618 	ddi_dma_free_handle(&r->r_buf_dma[slot].handle);
2619 fail0:
2620 	return (DDI_FAILURE);
2621 }
2622 
2623 static void
2624 bfe_ring_buf_free(bfe_ring_t *r, int slot)
2625 {
2626 	if (r->r_buf_dma == NULL)
2627 		return;
2628 
2629 	(void) ddi_dma_unbind_handle(r->r_buf_dma[slot].handle);
2630 	ddi_dma_mem_free(&r->r_buf_dma[slot].acchdl);
2631 	ddi_dma_free_handle(&r->r_buf_dma[slot].handle);
2632 }
2633 
2634 static void
2635 bfe_buffer_free(bfe_ring_t *r)
2636 {
2637 	int i;
2638 
2639 	for (i = 0; i < r->r_ndesc; i++) {
2640 		bfe_ring_buf_free(r, i);
2641 	}
2642 }
2643 
2644 static void
2645 bfe_ring_desc_free(bfe_ring_t *r)
2646 {
2647 	(void) ddi_dma_unbind_handle(r->r_desc_dma_handle);
2648 	ddi_dma_mem_free(&r->r_desc_acc_handle);
2649 	ddi_dma_free_handle(&r->r_desc_dma_handle);
2650 	kmem_free(r->r_buf_dma, r->r_ndesc * sizeof (bfe_dma_t));
2651 
2652 	r->r_buf_dma = NULL;
2653 	r->r_desc = NULL;
2654 }
2655 
2656 
2657 static int
2658 bfe_ring_desc_alloc(bfe_t *bfe, bfe_ring_t *r, int d)
2659 {
2660 	int err, i, fail = 0;
2661 	caddr_t	ring;
2662 	size_t	size_krnl = 0, size_dma = 0, ring_len = 0;
2663 	ddi_dma_cookie_t cookie;
2664 	uint_t	count = 0;
2665 
2666 	ASSERT(bfe != NULL);
2667 
2668 	size_krnl = r->r_ndesc * sizeof (bfe_dma_t);
2669 	size_dma = r->r_ndesc * sizeof (bfe_desc_t);
2670 	r->r_buf_dma = kmem_zalloc(size_krnl, KM_SLEEP);
2671 
2672 
2673 	err = ddi_dma_alloc_handle(bfe->bfe_dip, &bfe_dma_attr_desc,
2674 	    DDI_DMA_SLEEP, NULL, &r->r_desc_dma_handle);
2675 
2676 	if (err != DDI_SUCCESS) {
2677 		bfe_error(bfe->bfe_dip, "bfe_ring_desc_alloc() failed on"
2678 		    " ddi_dma_alloc_handle()");
2679 		kmem_free(r->r_buf_dma, size_krnl);
2680 		return (DDI_FAILURE);
2681 	}
2682 
2683 
2684 	err = ddi_dma_mem_alloc(r->r_desc_dma_handle,
2685 	    size_dma, &bfe_buf_attr,
2686 	    DDI_DMA_CONSISTENT, DDI_DMA_SLEEP, NULL,
2687 	    &ring, &ring_len, &r->r_desc_acc_handle);
2688 
2689 	if (err != DDI_SUCCESS) {
2690 		bfe_error(bfe->bfe_dip, "bfe_ring_desc_alloc() failed on"
2691 		    " ddi_dma_mem_alloc()");
2692 		ddi_dma_free_handle(&r->r_desc_dma_handle);
2693 		kmem_free(r->r_buf_dma, size_krnl);
2694 		return (DDI_FAILURE);
2695 	}
2696 
2697 	err = ddi_dma_addr_bind_handle(r->r_desc_dma_handle,
2698 	    NULL, ring, ring_len,
2699 	    DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
2700 	    DDI_DMA_SLEEP, NULL,
2701 	    &cookie, &count);
2702 
2703 	if (err != DDI_SUCCESS) {
2704 		bfe_error(bfe->bfe_dip, "bfe_ring_desc_alloc() failed on"
2705 		    " ddi_dma_addr_bind_handle()");
2706 		ddi_dma_mem_free(&r->r_desc_acc_handle);
2707 		ddi_dma_free_handle(&r->r_desc_dma_handle);
2708 		kmem_free(r->r_buf_dma, size_krnl);
2709 		return (DDI_FAILURE);
2710 	}
2711 
2712 	/*
2713 	 * We don't want to have multiple cookies. Descriptor should be
2714 	 * aligned to PAGESIZE boundary.
2715 	 */
2716 	ASSERT(count == 1);
2717 
2718 	/* The actual descriptor for the ring */
2719 	r->r_desc_len = ring_len;
2720 	r->r_desc_cookie = cookie;
2721 
2722 	r->r_desc = (void *)ring;
2723 
2724 	bzero(r->r_desc, size_dma);
2725 	bzero(r->r_desc, ring_len);
2726 
2727 	/* For each descriptor, allocate a DMA buffer */
2728 	fail = 0;
2729 	for (i = 0; i < r->r_ndesc; i++) {
2730 		if (bfe_ring_buf_alloc(bfe, r, i, d) != DDI_SUCCESS) {
2731 			i--;
2732 			fail = 1;
2733 			break;
2734 		}
2735 	}
2736 
2737 	if (fail) {
2738 		while (i-- >= 0) {
2739 			bfe_ring_buf_free(r, i);
2740 		}
2741 
2742 		/* We don't need the descriptor anymore */
2743 		bfe_ring_desc_free(r);
2744 		return (DDI_FAILURE);
2745 	}
2746 
2747 	return (DDI_SUCCESS);
2748 }
2749 
2750 static int
2751 bfe_rings_alloc(bfe_t *bfe)
2752 {
2753 	/* TX */
2754 	mutex_init(&bfe->bfe_tx_ring.r_lock, NULL, MUTEX_DRIVER, NULL);
2755 	bfe->bfe_tx_ring.r_lockp = &bfe->bfe_tx_ring.r_lock;
2756 	bfe->bfe_tx_ring.r_buf_len = BFE_MTU + sizeof (struct ether_header) +
2757 	    VLAN_TAGSZ + ETHERFCSL;
2758 	bfe->bfe_tx_ring.r_ndesc = TX_NUM_DESC;
2759 	bfe->bfe_tx_ring.r_bfe = bfe;
2760 	bfe->bfe_tx_ring.r_avail_desc = TX_NUM_DESC;
2761 
2762 	/* RX */
2763 	mutex_init(&bfe->bfe_rx_ring.r_lock, NULL, MUTEX_DRIVER, NULL);
2764 	bfe->bfe_rx_ring.r_lockp = &bfe->bfe_rx_ring.r_lock;
2765 	bfe->bfe_rx_ring.r_buf_len = BFE_MTU + sizeof (struct ether_header) +
2766 	    VLAN_TAGSZ + ETHERFCSL + RX_HEAD_ROOM;
2767 	bfe->bfe_rx_ring.r_ndesc = RX_NUM_DESC;
2768 	bfe->bfe_rx_ring.r_bfe = bfe;
2769 	bfe->bfe_rx_ring.r_avail_desc = RX_NUM_DESC;
2770 
2771 	/* Allocate TX Ring */
2772 	if (bfe_ring_desc_alloc(bfe, &bfe->bfe_tx_ring,
2773 	    DDI_DMA_WRITE) != DDI_SUCCESS)
2774 		return (DDI_FAILURE);
2775 
2776 	/* Allocate RX Ring */
2777 	if (bfe_ring_desc_alloc(bfe, &bfe->bfe_rx_ring,
2778 	    DDI_DMA_READ) != DDI_SUCCESS) {
2779 		cmn_err(CE_NOTE, "RX ring allocation failed");
2780 		bfe_ring_desc_free(&bfe->bfe_tx_ring);
2781 		return (DDI_FAILURE);
2782 	}
2783 
2784 	bfe->bfe_tx_ring.r_flags = BFE_RING_ALLOCATED;
2785 	bfe->bfe_rx_ring.r_flags = BFE_RING_ALLOCATED;
2786 
2787 	return (DDI_SUCCESS);
2788 }
2789 
2790 static int
2791 bfe_resume(dev_info_t *dip)
2792 {
2793 	bfe_t *bfe;
2794 	int err = DDI_SUCCESS;
2795 
2796 	if ((bfe = ddi_get_driver_private(dip)) == NULL) {
2797 		bfe_error(dip, "Unexpected error (no driver private data)"
2798 		    " while resume");
2799 		return (DDI_FAILURE);
2800 	}
2801 
2802 	/*
2803 	 * Grab all the locks first.
2804 	 */
2805 	bfe_grab_locks(bfe);
2806 	bfe->bfe_chip_state = BFE_CHIP_RESUME;
2807 
2808 	bfe_init_vars(bfe);
2809 	/* PHY will also start running */
2810 	bfe_chip_reset(bfe);
2811 	if (bfe_chip_start(bfe) == DDI_FAILURE) {
2812 		bfe_error(dip, "Could not resume chip");
2813 		err = DDI_FAILURE;
2814 		goto done;
2815 	}
2816 done:
2817 	bfe_release_locks(bfe);
2818 	return (err);
2819 }
2820 
2821 static int
2822 bfe_attach(dev_info_t *dip, ddi_attach_cmd_t cmd)
2823 {
2824 	int	unit;
2825 	bfe_t	*bfe;
2826 	mac_register_t	*macreg;
2827 	int	ret;
2828 
2829 	switch (cmd) {
2830 	case DDI_RESUME:
2831 		return (bfe_resume(dip));
2832 
2833 	case DDI_ATTACH:
2834 		break;
2835 
2836 	default:
2837 		return (DDI_FAILURE);
2838 	}
2839 
2840 
2841 	unit = ddi_get_instance(dip);
2842 
2843 	bfe = kmem_zalloc(sizeof (bfe_t), KM_SLEEP);
2844 	bfe->bfe_dip = dip;
2845 	bfe->bfe_unit = unit;
2846 
2847 	if (pci_config_setup(dip, &bfe->bfe_conf_handle) != DDI_SUCCESS) {
2848 		bfe_error(dip, "pci_config_setup failed");
2849 		goto fail0;
2850 	}
2851 
2852 	/*
2853 	 * Enable IO space, Bus Master and Memory Space accessess.
2854 	 */
2855 	ret = pci_config_get16(bfe->bfe_conf_handle, PCI_CONF_COMM);
2856 	pci_config_put16(bfe->bfe_conf_handle, PCI_CONF_COMM,
2857 	    PCI_COMM_IO | PCI_COMM_MAE | PCI_COMM_ME | ret);
2858 
2859 	ddi_set_driver_private(dip, bfe);
2860 
2861 	/* Identify hardware */
2862 	if (bfe_identify_hardware(bfe) == BFE_FAILURE) {
2863 		bfe_error(dip, "Could not identify device");
2864 		goto fail1;
2865 	}
2866 
2867 	if (bfe_regs_map(bfe) != DDI_SUCCESS) {
2868 		bfe_error(dip, "Could not map device registers");
2869 		goto fail1;
2870 	}
2871 
2872 	(void) bfe_get_chip_config(bfe);
2873 
2874 	/*
2875 	 * Register with MAC layer
2876 	 */
2877 	if ((macreg = mac_alloc(MAC_VERSION)) == NULL) {
2878 		bfe_error(dip, "mac_alloc() failed");
2879 		goto fail2;
2880 	}
2881 
2882 	macreg->m_type_ident = MAC_PLUGIN_IDENT_ETHER;
2883 	macreg->m_driver = bfe;
2884 	macreg->m_dip = dip;
2885 	macreg->m_instance = unit;
2886 	macreg->m_src_addr = bfe->bfe_ether_addr;
2887 	macreg->m_callbacks = &bfe_mac_callbacks;
2888 	macreg->m_min_sdu = 0;
2889 	macreg->m_max_sdu = ETHERMTU;
2890 	macreg->m_margin = VLAN_TAGSZ;
2891 
2892 	if ((ret = mac_register(macreg, &bfe->bfe_machdl)) != 0) {
2893 		bfe_error(dip, "mac_register() failed with %d error", ret);
2894 		mac_free(macreg);
2895 		goto fail2;
2896 	}
2897 
2898 	mac_free(macreg);
2899 
2900 	rw_init(&bfe->bfe_rwlock, NULL, RW_DRIVER,
2901 	    DDI_INTR_PRI(bfe->bfe_intrpri));
2902 
2903 	if (bfe_add_intr(bfe) != DDI_SUCCESS) {
2904 		bfe_error(dip, "Could not add interrupt");
2905 		goto fail3;
2906 	}
2907 
2908 	if (bfe_rings_alloc(bfe) != DDI_SUCCESS) {
2909 		bfe_error(dip, "Could not allocate TX/RX Ring");
2910 		goto fail4;
2911 	}
2912 
2913 	/* Init and then reset the chip */
2914 	bfe->bfe_chip_action = 0;
2915 	bfe_init_vars(bfe);
2916 
2917 	/* PHY will also start running */
2918 	bfe_chip_reset(bfe);
2919 
2920 	/*
2921 	 * Even though we enable the interrupts here but chip's interrupt
2922 	 * is not enabled yet. It will be enabled once we plumb the interface.
2923 	 */
2924 	if (ddi_intr_enable(bfe->bfe_intrhdl) != DDI_SUCCESS) {
2925 		bfe_error(dip, "Could not enable interrupt");
2926 		goto fail4;
2927 	}
2928 
2929 	return (DDI_SUCCESS);
2930 
2931 fail4:
2932 	bfe_remove_intr(bfe);
2933 fail3:
2934 	mac_unregister(bfe->bfe_machdl);
2935 fail2:
2936 	bfe_unmap_regs(bfe);
2937 fail1:
2938 	pci_config_teardown(&bfe->bfe_conf_handle);
2939 fail0:
2940 	kmem_free(bfe, sizeof (bfe_t));
2941 	return (DDI_FAILURE);
2942 }
2943 
2944 static int
2945 bfe_detach(dev_info_t *devinfo, ddi_detach_cmd_t cmd)
2946 {
2947 	bfe_t *bfe;
2948 
2949 	bfe = ddi_get_driver_private(devinfo);
2950 
2951 	switch (cmd) {
2952 	case DDI_DETACH:
2953 		/*
2954 		 * We need to stop the timer before grabbing locks otherwise
2955 		 * we can land-up in deadlock with untimeout.
2956 		 */
2957 		bfe_stop_timer(bfe);
2958 
2959 		/*
2960 		 * First unregister with MAC layer before stopping DMA
2961 		 * engine.
2962 		 */
2963 		if (mac_unregister(bfe->bfe_machdl) != DDI_SUCCESS)
2964 			return (DDI_FAILURE);
2965 
2966 		bfe->bfe_machdl = NULL;
2967 
2968 		/*
2969 		 * Quiesce the chip first.
2970 		 */
2971 		bfe_grab_locks(bfe);
2972 		bfe_chip_halt(bfe);
2973 		bfe_stop_phy(bfe);
2974 		bfe_release_locks(bfe);
2975 
2976 		(void) ddi_intr_disable(bfe->bfe_intrhdl);
2977 
2978 		/* Make sure timer is gone. */
2979 		bfe_stop_timer(bfe);
2980 
2981 		/*
2982 		 * Free the DMA resources for buffer and then descriptors
2983 		 */
2984 		if (bfe->bfe_tx_ring.r_flags == BFE_RING_ALLOCATED) {
2985 			/* TX */
2986 			bfe_buffer_free(&bfe->bfe_tx_ring);
2987 			bfe_ring_desc_free(&bfe->bfe_tx_ring);
2988 		}
2989 
2990 		if (bfe->bfe_rx_ring.r_flags == BFE_RING_ALLOCATED) {
2991 			/* RX */
2992 			bfe_buffer_free(&bfe->bfe_rx_ring);
2993 			bfe_ring_desc_free(&bfe->bfe_rx_ring);
2994 		}
2995 
2996 		bfe_remove_intr(bfe);
2997 		bfe_unmap_regs(bfe);
2998 		pci_config_teardown(&bfe->bfe_conf_handle);
2999 
3000 		mutex_destroy(&bfe->bfe_tx_ring.r_lock);
3001 		mutex_destroy(&bfe->bfe_rx_ring.r_lock);
3002 		rw_destroy(&bfe->bfe_rwlock);
3003 
3004 		kmem_free(bfe, sizeof (bfe_t));
3005 
3006 		ddi_set_driver_private(devinfo, NULL);
3007 		return (DDI_SUCCESS);
3008 
3009 	case DDI_SUSPEND:
3010 		/*
3011 		 * We need to stop the timer before grabbing locks otherwise
3012 		 * we can land-up in deadlock with untimeout.
3013 		 */
3014 		bfe_stop_timer(bfe);
3015 
3016 		/*
3017 		 * Grab all the locks first.
3018 		 */
3019 		bfe_grab_locks(bfe);
3020 		bfe_chip_halt(bfe);
3021 		bfe_stop_phy(bfe);
3022 		bfe->bfe_chip_state = BFE_CHIP_SUSPENDED;
3023 		bfe_release_locks(bfe);
3024 
3025 		return (DDI_SUCCESS);
3026 
3027 	default:
3028 		return (DDI_FAILURE);
3029 	}
3030 }
3031 
3032 /*
3033  * Quiesce the card for fast reboot
3034  */
3035 int
3036 bfe_quiesce(dev_info_t *dev_info)
3037 {
3038 	bfe_t *bfe;
3039 
3040 	bfe = ddi_get_driver_private(dev_info);
3041 
3042 	bfe_chip_halt(bfe);
3043 	bfe_stop_phy(bfe);
3044 	bfe->bfe_chip_state = BFE_CHIP_QUIESCED;
3045 
3046 	return (DDI_SUCCESS);
3047 }
3048 
3049 static struct cb_ops bfe_cb_ops = {
3050 	nulldev,		/* cb_open */
3051 	nulldev,		/* cb_close */
3052 	nodev,			/* cb_strategy */
3053 	nodev,			/* cb_print */
3054 	nodev,			/* cb_dump */
3055 	nodev,			/* cb_read */
3056 	nodev,			/* cb_write */
3057 	nodev,			/* cb_ioctl */
3058 	nodev,			/* cb_devmap */
3059 	nodev,			/* cb_mmap */
3060 	nodev,			/* cb_segmap */
3061 	nochpoll,		/* cb_chpoll */
3062 	ddi_prop_op,		/* cb_prop_op */
3063 	NULL,			/* cb_stream */
3064 	D_MP | D_HOTPLUG,	/* cb_flag */
3065 	CB_REV,			/* cb_rev */
3066 	nodev,			/* cb_aread */
3067 	nodev			/* cb_awrite */
3068 };
3069 
3070 static struct dev_ops bfe_dev_ops = {
3071 	DEVO_REV,	/* devo_rev */
3072 	0,		/* devo_refcnt */
3073 	NULL,		/* devo_getinfo */
3074 	nulldev,	/* devo_identify */
3075 	nulldev,	/* devo_probe */
3076 	bfe_attach,	/* devo_attach */
3077 	bfe_detach,	/* devo_detach */
3078 	nodev,		/* devo_reset */
3079 	&bfe_cb_ops,	/* devo_cb_ops */
3080 	NULL,		/* devo_bus_ops */
3081 	ddi_power,	/* devo_power */
3082 	bfe_quiesce	/* devo_quiesce */
3083 };
3084 
3085 static struct modldrv bfe_modldrv = {
3086 	&mod_driverops,
3087 	bfe_ident,
3088 	&bfe_dev_ops
3089 };
3090 
3091 static struct modlinkage modlinkage = {
3092 	MODREV_1, (void *)&bfe_modldrv, NULL
3093 };
3094 
3095 int
3096 _info(struct modinfo *modinfop)
3097 {
3098 	return (mod_info(&modlinkage, modinfop));
3099 }
3100 
3101 int
3102 _init(void)
3103 {
3104 	int	status;
3105 
3106 	mac_init_ops(&bfe_dev_ops, MODULE_NAME);
3107 	status = mod_install(&modlinkage);
3108 	if (status == DDI_FAILURE)
3109 		mac_fini_ops(&bfe_dev_ops);
3110 	return (status);
3111 }
3112 
3113 int
3114 _fini(void)
3115 {
3116 	int status;
3117 
3118 	status = mod_remove(&modlinkage);
3119 	if (status == 0) {
3120 		mac_fini_ops(&bfe_dev_ops);
3121 	}
3122 	return (status);
3123 }
3124