xref: /titanic_51/usr/src/uts/common/io/bge/bge_main2.c (revision 70163ac57e58ace1c5c94dfbe85dca5a974eff36)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 
22 /*
23  * Copyright (c) 2010-2013, by Broadcom, Inc.
24  * All Rights Reserved.
25  */
26 
27 /*
28  * Copyright (c) 2002, 2010, Oracle and/or its affiliates.
29  * All rights reserved.
30  */
31 
32 #include "bge_impl.h"
33 #include <sys/sdt.h>
34 #include <sys/mac_provider.h>
35 #include <sys/mac.h>
36 #include <sys/mac_flow.h>
37 
38 
39 #ifndef STRINGIFY
40 #define XSTRINGIFY(x) #x
41 #define STRINGIFY(x) XSTRINGIFY(x)
42 #endif
43 
44 /*
45  * This is the string displayed by modinfo, etc.
46  */
47 static char bge_ident[] = "Broadcom Gb Ethernet";
48 
49 /*
50  * Property names
51  */
52 static char debug_propname[] = "bge-debug-flags";
53 static char clsize_propname[] = "cache-line-size";
54 static char latency_propname[] = "latency-timer";
55 static char localmac_boolname[] = "local-mac-address?";
56 static char localmac_propname[] = "local-mac-address";
57 static char macaddr_propname[] = "mac-address";
58 static char subdev_propname[] = "subsystem-id";
59 static char subven_propname[] = "subsystem-vendor-id";
60 static char rxrings_propname[] = "bge-rx-rings";
61 static char txrings_propname[] = "bge-tx-rings";
62 static char eee_propname[] = "bge-eee";
63 static char fm_cap[] = "fm-capable";
64 static char default_mtu[] = "default_mtu";
65 
66 static int bge_add_intrs(bge_t *, int);
67 static void bge_rem_intrs(bge_t *);
68 static int bge_unicst_set(void *, const uint8_t *, int);
69 static int bge_addmac(void *, const uint8_t *);
70 static int bge_remmac(void *, const uint8_t *);
71 
72 /*
73  * Describes the chip's DMA engine
74  */
75 static ddi_dma_attr_t dma_attr = {
76 	DMA_ATTR_V0,			/* dma_attr_version	*/
77 	0x0000000000000000ull,		/* dma_attr_addr_lo	*/
78 	0xFFFFFFFFFFFFFFFFull,		/* dma_attr_addr_hi	*/
79 	0x00000000FFFFFFFFull,		/* dma_attr_count_max	*/
80 	0x0000000000000001ull,		/* dma_attr_align	*/
81 	0x00000FFF,			/* dma_attr_burstsizes	*/
82 	0x00000001,			/* dma_attr_minxfer	*/
83 	0x000000000000FFFFull,		/* dma_attr_maxxfer	*/
84 	0x00000000FFFFFFFFull,		/* dma_attr_seg		*/
85 	1,				/* dma_attr_sgllen 	*/
86 	0x00000001,			/* dma_attr_granular 	*/
87 	DDI_DMA_FLAGERR			/* dma_attr_flags */
88 };
89 
90 /*
91  * PIO access attributes for registers
92  */
93 static ddi_device_acc_attr_t bge_reg_accattr = {
94 	DDI_DEVICE_ATTR_V1,
95 	DDI_NEVERSWAP_ACC,
96 	DDI_STRICTORDER_ACC,
97 	DDI_FLAGERR_ACC
98 };
99 
100 /*
101  * DMA access attributes for descriptors: NOT to be byte swapped.
102  */
103 static ddi_device_acc_attr_t bge_desc_accattr = {
104 	DDI_DEVICE_ATTR_V0,
105 	DDI_NEVERSWAP_ACC,
106 	DDI_STRICTORDER_ACC
107 };
108 
109 /*
110  * DMA access attributes for data: NOT to be byte swapped.
111  */
112 static ddi_device_acc_attr_t bge_data_accattr = {
113 	DDI_DEVICE_ATTR_V0,
114 	DDI_NEVERSWAP_ACC,
115 	DDI_STRICTORDER_ACC
116 };
117 
118 static int		bge_m_start(void *);
119 static void		bge_m_stop(void *);
120 static int		bge_m_promisc(void *, boolean_t);
121 static int		bge_m_unicst(void * pArg, const uint8_t *);
122 static int		bge_m_multicst(void *, boolean_t, const uint8_t *);
123 static void		bge_m_resources(void * arg);
124 static void		bge_m_ioctl(void *, queue_t *, mblk_t *);
125 static boolean_t	bge_m_getcapab(void *, mac_capab_t, void *);
126 static int		bge_unicst_set(void *, const uint8_t *,
127     int);
128 static int		bge_m_setprop(void *, const char *, mac_prop_id_t,
129     uint_t, const void *);
130 static int		bge_m_getprop(void *, const char *, mac_prop_id_t,
131     uint_t, void *);
132 static void		bge_m_propinfo(void *, const char *, mac_prop_id_t,
133     mac_prop_info_handle_t);
134 static int		bge_set_priv_prop(bge_t *, const char *, uint_t,
135     const void *);
136 static int		bge_get_priv_prop(bge_t *, const char *, uint_t,
137     void *);
138 static void		bge_priv_propinfo(const char *,
139     mac_prop_info_handle_t);
140 
141 static mac_callbacks_t bge_m_callbacks = {
142     MC_IOCTL
143 #ifdef MC_RESOURCES
144   | MC_RESOURCES
145 #endif
146 #ifdef MC_SETPROP
147   | MC_SETPROP
148 #endif
149 #ifdef MC_GETPROP
150   | MC_GETPROP
151 #endif
152 #ifdef MC_PROPINFO
153   | MC_PROPINFO
154 #endif
155   | MC_GETCAPAB,
156 	bge_m_stat,
157 	bge_m_start,
158 	bge_m_stop,
159 	bge_m_promisc,
160 	bge_m_multicst,
161 	bge_m_unicst,
162 	bge_m_tx,
163 #ifdef MC_RESOURCES
164 	bge_m_resources,
165 #else
166 	NULL,
167 #endif
168 	bge_m_ioctl,
169 	bge_m_getcapab,
170 #ifdef MC_OPEN
171 	NULL,
172 	NULL,
173 #endif
174 #ifdef MC_SETPROP
175 	bge_m_setprop,
176 #endif
177 #ifdef MC_GETPROP
178 	bge_m_getprop,
179 #endif
180 #ifdef MC_PROPINFO
181 	bge_m_propinfo
182 #endif
183 };
184 
185 char *bge_priv_prop[] = {
186 	"_adv_asym_pause_cap",
187 	"_adv_pause_cap",
188 	"_drain_max",
189 	"_msi_cnt",
190 	"_rx_intr_coalesce_blank_time",
191 	"_tx_intr_coalesce_blank_time",
192 	"_rx_intr_coalesce_pkt_cnt",
193 	"_tx_intr_coalesce_pkt_cnt",
194 	NULL
195 };
196 
197 uint8_t zero_addr[6] = {0, 0, 0, 0, 0, 0};
198 /*
199  * ========== Transmit and receive ring reinitialisation ==========
200  */
201 
202 /*
203  * These <reinit> routines each reset the specified ring to an initial
204  * state, assuming that the corresponding <init> routine has already
205  * been called exactly once.
206  */
207 
208 static void
209 bge_reinit_send_ring(send_ring_t *srp)
210 {
211 	bge_queue_t *txbuf_queue;
212 	bge_queue_item_t *txbuf_head;
213 	sw_txbuf_t *txbuf;
214 	sw_sbd_t *ssbdp;
215 	uint32_t slot;
216 
217 	/*
218 	 * Reinitialise control variables ...
219 	 */
220 	srp->tx_flow = 0;
221 	srp->tx_next = 0;
222 	srp->txfill_next = 0;
223 	srp->tx_free = srp->desc.nslots;
224 	ASSERT(mutex_owned(srp->tc_lock));
225 	srp->tc_next = 0;
226 	srp->txpkt_next = 0;
227 	srp->tx_block = 0;
228 	srp->tx_nobd = 0;
229 	srp->tx_nobuf = 0;
230 
231 	/*
232 	 * Initialize the tx buffer push queue
233 	 */
234 	mutex_enter(srp->freetxbuf_lock);
235 	mutex_enter(srp->txbuf_lock);
236 	txbuf_queue = &srp->freetxbuf_queue;
237 	txbuf_queue->head = NULL;
238 	txbuf_queue->count = 0;
239 	txbuf_queue->lock = srp->freetxbuf_lock;
240 	srp->txbuf_push_queue = txbuf_queue;
241 
242 	/*
243 	 * Initialize the tx buffer pop queue
244 	 */
245 	txbuf_queue = &srp->txbuf_queue;
246 	txbuf_queue->head = NULL;
247 	txbuf_queue->count = 0;
248 	txbuf_queue->lock = srp->txbuf_lock;
249 	srp->txbuf_pop_queue = txbuf_queue;
250 	txbuf_head = srp->txbuf_head;
251 	txbuf = srp->txbuf;
252 	for (slot = 0; slot < srp->tx_buffers; ++slot) {
253 		txbuf_head->item = txbuf;
254 		txbuf_head->next = txbuf_queue->head;
255 		txbuf_queue->head = txbuf_head;
256 		txbuf_queue->count++;
257 		txbuf++;
258 		txbuf_head++;
259 	}
260 	mutex_exit(srp->txbuf_lock);
261 	mutex_exit(srp->freetxbuf_lock);
262 
263 	/*
264 	 * Zero and sync all the h/w Send Buffer Descriptors
265 	 */
266 	DMA_ZERO(srp->desc);
267 	DMA_SYNC(srp->desc, DDI_DMA_SYNC_FORDEV);
268 	bzero(srp->pktp, BGE_SEND_BUF_MAX * sizeof (*srp->pktp));
269 	ssbdp = srp->sw_sbds;
270 	for (slot = 0; slot < srp->desc.nslots; ++ssbdp, ++slot)
271 		ssbdp->pbuf = NULL;
272 }
273 
274 static void
275 bge_reinit_recv_ring(recv_ring_t *rrp)
276 {
277 	/*
278 	 * Reinitialise control variables ...
279 	 */
280 	rrp->rx_next = 0;
281 }
282 
283 static void
284 bge_reinit_buff_ring(buff_ring_t *brp, uint32_t ring)
285 {
286 	bge_rbd_t *hw_rbd_p;
287 	sw_rbd_t *srbdp;
288 	uint32_t bufsize;
289 	uint32_t nslots;
290 	uint32_t slot;
291 
292 	static uint16_t ring_type_flag[BGE_BUFF_RINGS_MAX] = {
293 		RBD_FLAG_STD_RING,
294 		RBD_FLAG_JUMBO_RING,
295 		RBD_FLAG_MINI_RING
296 	};
297 
298 	/*
299 	 * Zero, initialise and sync all the h/w Receive Buffer Descriptors
300 	 * Note: all the remaining fields (<type>, <flags>, <ip_cksum>,
301 	 * <tcp_udp_cksum>, <error_flag>, <vlan_tag>, and <reserved>)
302 	 * should be zeroed, and so don't need to be set up specifically
303 	 * once the whole area has been cleared.
304 	 */
305 	DMA_ZERO(brp->desc);
306 
307 	hw_rbd_p = DMA_VPTR(brp->desc);
308 	nslots = brp->desc.nslots;
309 	ASSERT(brp->buf[0].nslots == nslots/BGE_SPLIT);
310 	bufsize = brp->buf[0].size;
311 	srbdp = brp->sw_rbds;
312 	for (slot = 0; slot < nslots; ++hw_rbd_p, ++srbdp, ++slot) {
313 		hw_rbd_p->host_buf_addr = srbdp->pbuf.cookie.dmac_laddress;
314 		hw_rbd_p->index = (uint16_t)slot;
315 		hw_rbd_p->len = (uint16_t)bufsize;
316 		hw_rbd_p->opaque = srbdp->pbuf.token;
317 		hw_rbd_p->flags |= ring_type_flag[ring];
318 	}
319 
320 	DMA_SYNC(brp->desc, DDI_DMA_SYNC_FORDEV);
321 
322 	/*
323 	 * Finally, reinitialise the ring control variables ...
324 	 */
325 	brp->rf_next = (nslots != 0) ? (nslots-1) : 0;
326 }
327 
328 /*
329  * Reinitialize all rings
330  */
331 static void
332 bge_reinit_rings(bge_t *bgep)
333 {
334 	uint32_t ring;
335 
336 	ASSERT(mutex_owned(bgep->genlock));
337 
338 	/*
339 	 * Send Rings ...
340 	 */
341 	for (ring = 0; ring < bgep->chipid.tx_rings; ++ring)
342 		bge_reinit_send_ring(&bgep->send[ring]);
343 
344 	/*
345 	 * Receive Return Rings ...
346 	 */
347 	for (ring = 0; ring < bgep->chipid.rx_rings; ++ring)
348 		bge_reinit_recv_ring(&bgep->recv[ring]);
349 
350 	/*
351 	 * Receive Producer Rings ...
352 	 */
353 	for (ring = 0; ring < BGE_BUFF_RINGS_USED; ++ring)
354 		bge_reinit_buff_ring(&bgep->buff[ring], ring);
355 }
356 
357 /*
358  * ========== Internal state management entry points ==========
359  */
360 
361 #undef	BGE_DBG
362 #define	BGE_DBG		BGE_DBG_NEMO	/* debug flag for this code	*/
363 
364 /*
365  * These routines provide all the functionality required by the
366  * corresponding GLD entry points, but don't update the GLD state
367  * so they can be called internally without disturbing our record
368  * of what GLD thinks we should be doing ...
369  */
370 
371 /*
372  *	bge_reset() -- reset h/w & rings to initial state
373  */
374 static int
375 #ifdef BGE_IPMI_ASF
376 bge_reset(bge_t *bgep, uint_t asf_mode)
377 #else
378 bge_reset(bge_t *bgep)
379 #endif
380 {
381 	uint32_t	ring;
382 	int retval;
383 
384 	BGE_TRACE(("bge_reset($%p)", (void *)bgep));
385 
386 	ASSERT(mutex_owned(bgep->genlock));
387 
388 	/*
389 	 * Grab all the other mutexes in the world (this should
390 	 * ensure no other threads are manipulating driver state)
391 	 */
392 	for (ring = 0; ring < BGE_RECV_RINGS_MAX; ++ring)
393 		mutex_enter(bgep->recv[ring].rx_lock);
394 	for (ring = 0; ring < BGE_BUFF_RINGS_MAX; ++ring)
395 		mutex_enter(bgep->buff[ring].rf_lock);
396 	rw_enter(bgep->errlock, RW_WRITER);
397 	for (ring = 0; ring < BGE_SEND_RINGS_MAX; ++ring)
398 		mutex_enter(bgep->send[ring].tx_lock);
399 	for (ring = 0; ring < BGE_SEND_RINGS_MAX; ++ring)
400 		mutex_enter(bgep->send[ring].tc_lock);
401 
402 #ifdef BGE_IPMI_ASF
403 	retval = bge_chip_reset(bgep, B_TRUE, asf_mode);
404 #else
405 	retval = bge_chip_reset(bgep, B_TRUE);
406 #endif
407 	bge_reinit_rings(bgep);
408 
409 	/*
410 	 * Free the world ...
411 	 */
412 	for (ring = BGE_SEND_RINGS_MAX; ring-- > 0; )
413 		mutex_exit(bgep->send[ring].tc_lock);
414 	for (ring = 0; ring < BGE_SEND_RINGS_MAX; ++ring)
415 		mutex_exit(bgep->send[ring].tx_lock);
416 	rw_exit(bgep->errlock);
417 	for (ring = BGE_BUFF_RINGS_MAX; ring-- > 0; )
418 		mutex_exit(bgep->buff[ring].rf_lock);
419 	for (ring = BGE_RECV_RINGS_MAX; ring-- > 0; )
420 		mutex_exit(bgep->recv[ring].rx_lock);
421 
422 	BGE_DEBUG(("bge_reset($%p) done", (void *)bgep));
423 	return (retval);
424 }
425 
426 /*
427  *	bge_stop() -- stop processing, don't reset h/w or rings
428  */
429 static void
430 bge_stop(bge_t *bgep)
431 {
432 	BGE_TRACE(("bge_stop($%p)", (void *)bgep));
433 
434 	ASSERT(mutex_owned(bgep->genlock));
435 
436 #ifdef BGE_IPMI_ASF
437 	if (bgep->asf_enabled) {
438 		bgep->asf_pseudostop = B_TRUE;
439 	} else {
440 #endif
441 		bge_chip_stop(bgep, B_FALSE);
442 #ifdef BGE_IPMI_ASF
443 	}
444 #endif
445 
446 	BGE_DEBUG(("bge_stop($%p) done", (void *)bgep));
447 }
448 
449 /*
450  *	bge_start() -- start transmitting/receiving
451  */
452 static int
453 bge_start(bge_t *bgep, boolean_t reset_phys)
454 {
455 	int retval;
456 
457 	BGE_TRACE(("bge_start($%p, %d)", (void *)bgep, reset_phys));
458 
459 	ASSERT(mutex_owned(bgep->genlock));
460 
461 	/*
462 	 * Start chip processing, including enabling interrupts
463 	 */
464 	retval = bge_chip_start(bgep, reset_phys);
465 
466 	BGE_DEBUG(("bge_start($%p, %d) done", (void *)bgep, reset_phys));
467 	return (retval);
468 }
469 
470 /*
471  * bge_restart - restart transmitting/receiving after error or suspend
472  */
473 int
474 bge_restart(bge_t *bgep, boolean_t reset_phys)
475 {
476 	int retval = DDI_SUCCESS;
477 	ASSERT(mutex_owned(bgep->genlock));
478 
479 #ifdef BGE_IPMI_ASF
480 	if (bgep->asf_enabled) {
481 		if (bge_reset(bgep, ASF_MODE_POST_INIT) != DDI_SUCCESS)
482 			retval = DDI_FAILURE;
483 	} else
484 		if (bge_reset(bgep, ASF_MODE_NONE) != DDI_SUCCESS)
485 			retval = DDI_FAILURE;
486 #else
487 	if (bge_reset(bgep) != DDI_SUCCESS)
488 		retval = DDI_FAILURE;
489 #endif
490 	if (bgep->bge_mac_state == BGE_MAC_STARTED) {
491 		if (bge_start(bgep, reset_phys) != DDI_SUCCESS)
492 			retval = DDI_FAILURE;
493 		bgep->watchdog = 0;
494 		ddi_trigger_softintr(bgep->drain_id);
495 	}
496 
497 	BGE_DEBUG(("bge_restart($%p, %d) done", (void *)bgep, reset_phys));
498 	return (retval);
499 }
500 
501 
502 /*
503  * ========== Nemo-required management entry points ==========
504  */
505 
506 #undef	BGE_DBG
507 #define	BGE_DBG		BGE_DBG_NEMO	/* debug flag for this code	*/
508 
509 /*
510  *	bge_m_stop() -- stop transmitting/receiving
511  */
512 static void
513 bge_m_stop(void *arg)
514 {
515 	bge_t *bgep = arg;		/* private device info	*/
516 	send_ring_t *srp;
517 	uint32_t ring;
518 
519 	BGE_TRACE(("bge_m_stop($%p)", arg));
520 
521 	/*
522 	 * Just stop processing, then record new GLD state
523 	 */
524 	mutex_enter(bgep->genlock);
525 	if (!(bgep->progress & PROGRESS_INTR)) {
526 		/* can happen during autorecovery */
527 		bgep->bge_chip_state = BGE_CHIP_STOPPED;
528 	} else
529 		bge_stop(bgep);
530 
531 	bgep->link_state = LINK_STATE_UNKNOWN;
532 	mac_link_update(bgep->mh, bgep->link_state);
533 
534 	/*
535 	 * Free the possible tx buffers allocated in tx process.
536 	 */
537 #ifdef BGE_IPMI_ASF
538 	if (!bgep->asf_pseudostop)
539 #endif
540 	{
541 		rw_enter(bgep->errlock, RW_WRITER);
542 		for (ring = 0; ring < bgep->chipid.tx_rings; ++ring) {
543 			srp = &bgep->send[ring];
544 			mutex_enter(srp->tx_lock);
545 			if (srp->tx_array > 1)
546 				bge_free_txbuf_arrays(srp);
547 			mutex_exit(srp->tx_lock);
548 		}
549 		rw_exit(bgep->errlock);
550 	}
551 	bgep->bge_mac_state = BGE_MAC_STOPPED;
552 	BGE_DEBUG(("bge_m_stop($%p) done", arg));
553 	if (bge_check_acc_handle(bgep, bgep->io_handle) != DDI_FM_OK)
554 		ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_UNAFFECTED);
555 	mutex_exit(bgep->genlock);
556 }
557 
558 /*
559  *	bge_m_start() -- start transmitting/receiving
560  */
561 static int
562 bge_m_start(void *arg)
563 {
564 	bge_t *bgep = arg;		/* private device info	*/
565 
566 	BGE_TRACE(("bge_m_start($%p)", arg));
567 
568 	/*
569 	 * Start processing and record new GLD state
570 	 */
571 	mutex_enter(bgep->genlock);
572 	if (!(bgep->progress & PROGRESS_INTR)) {
573 		/* can happen during autorecovery */
574 		ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_DEGRADED);
575 		mutex_exit(bgep->genlock);
576 		return (EIO);
577 	}
578 #ifdef BGE_IPMI_ASF
579 	if (bgep->asf_enabled) {
580 		if ((bgep->asf_status == ASF_STAT_RUN) &&
581 		    (bgep->asf_pseudostop)) {
582 			bgep->bge_mac_state = BGE_MAC_STARTED;
583 			/* forcing a mac link update here */
584 			bge_phys_check(bgep);
585 			bgep->link_state = (bgep->param_link_up) ? LINK_STATE_UP :
586 			                                           LINK_STATE_DOWN;
587 			mac_link_update(bgep->mh, bgep->link_state);
588 			mutex_exit(bgep->genlock);
589 			return (0);
590 		}
591 	}
592 	if (bge_reset(bgep, ASF_MODE_INIT) != DDI_SUCCESS) {
593 #else
594 	if (bge_reset(bgep) != DDI_SUCCESS) {
595 #endif
596 		(void) bge_check_acc_handle(bgep, bgep->cfg_handle);
597 		(void) bge_check_acc_handle(bgep, bgep->io_handle);
598 		ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_DEGRADED);
599 		mutex_exit(bgep->genlock);
600 		return (EIO);
601 	}
602 	if (bge_start(bgep, B_TRUE) != DDI_SUCCESS) {
603 		(void) bge_check_acc_handle(bgep, bgep->cfg_handle);
604 		(void) bge_check_acc_handle(bgep, bgep->io_handle);
605 		ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_DEGRADED);
606 		mutex_exit(bgep->genlock);
607 		return (EIO);
608 	}
609 	bgep->watchdog = 0;
610 	bgep->bge_mac_state = BGE_MAC_STARTED;
611 	BGE_DEBUG(("bge_m_start($%p) done", arg));
612 
613 	if (bge_check_acc_handle(bgep, bgep->cfg_handle) != DDI_FM_OK) {
614 		ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_DEGRADED);
615 		mutex_exit(bgep->genlock);
616 		return (EIO);
617 	}
618 	if (bge_check_acc_handle(bgep, bgep->io_handle) != DDI_FM_OK) {
619 		ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_DEGRADED);
620 		mutex_exit(bgep->genlock);
621 		return (EIO);
622 	}
623 #ifdef BGE_IPMI_ASF
624 	if (bgep->asf_enabled) {
625 		if (bgep->asf_status != ASF_STAT_RUN) {
626 			/* start ASF heart beat */
627 			bgep->asf_timeout_id = timeout(bge_asf_heartbeat,
628 			    (void *)bgep,
629 			    drv_usectohz(BGE_ASF_HEARTBEAT_INTERVAL));
630 			bgep->asf_status = ASF_STAT_RUN;
631 		}
632 	}
633 #endif
634 	mutex_exit(bgep->genlock);
635 
636 	return (0);
637 }
638 
639 /*
640  *	bge_unicst_set() -- set the physical network address
641  */
642 static int
643 bge_unicst_set(void *arg, const uint8_t *macaddr, int slot)
644 {
645 	bge_t *bgep = arg;		/* private device info	*/
646 
647 	BGE_TRACE(("bge_unicst_set($%p, %s)", arg,
648 	    ether_sprintf((void *)macaddr)));
649 	/*
650 	 * Remember the new current address in the driver state
651 	 * Sync the chip's idea of the address too ...
652 	 */
653 	mutex_enter(bgep->genlock);
654 	if (!(bgep->progress & PROGRESS_INTR)) {
655 		/* can happen during autorecovery */
656 		ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_DEGRADED);
657 		mutex_exit(bgep->genlock);
658 		return (EIO);
659 	}
660 	ethaddr_copy(macaddr, bgep->curr_addr[slot].addr);
661 #ifdef BGE_IPMI_ASF
662 	if (bge_chip_sync(bgep, B_FALSE) == DDI_FAILURE) {
663 #else
664 	if (bge_chip_sync(bgep) == DDI_FAILURE) {
665 #endif
666 		(void) bge_check_acc_handle(bgep, bgep->cfg_handle);
667 		(void) bge_check_acc_handle(bgep, bgep->io_handle);
668 		ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_DEGRADED);
669 		mutex_exit(bgep->genlock);
670 		return (EIO);
671 	}
672 #ifdef BGE_IPMI_ASF
673 	if (bgep->asf_enabled) {
674 		/*
675 		 * The above bge_chip_sync() function wrote the ethernet MAC
676 		 * addresses registers which destroyed the IPMI/ASF sideband.
677 		 * Here, we have to reset chip to make IPMI/ASF sideband work.
678 		 */
679 		if (bgep->asf_status == ASF_STAT_RUN) {
680 			/*
681 			 * We must stop ASF heart beat before bge_chip_stop(),
682 			 * otherwise some computers (ex. IBM HS20 blade server)
683 			 * may crash.
684 			 */
685 			bge_asf_update_status(bgep);
686 			bge_asf_stop_timer(bgep);
687 			bgep->asf_status = ASF_STAT_STOP;
688 
689 			bge_asf_pre_reset_operations(bgep, BGE_INIT_RESET);
690 		}
691 		bge_chip_stop(bgep, B_FALSE);
692 
693 		if (bge_restart(bgep, B_FALSE) == DDI_FAILURE) {
694 			(void) bge_check_acc_handle(bgep, bgep->cfg_handle);
695 			(void) bge_check_acc_handle(bgep, bgep->io_handle);
696 			ddi_fm_service_impact(bgep->devinfo,
697 			    DDI_SERVICE_DEGRADED);
698 			mutex_exit(bgep->genlock);
699 			return (EIO);
700 		}
701 
702 		/*
703 		 * Start our ASF heartbeat counter as soon as possible.
704 		 */
705 		if (bgep->asf_status != ASF_STAT_RUN) {
706 			/* start ASF heart beat */
707 			bgep->asf_timeout_id = timeout(bge_asf_heartbeat,
708 			    (void *)bgep,
709 			    drv_usectohz(BGE_ASF_HEARTBEAT_INTERVAL));
710 			bgep->asf_status = ASF_STAT_RUN;
711 		}
712 	}
713 #endif
714 	BGE_DEBUG(("bge_unicst_set($%p) done", arg));
715 	if (bge_check_acc_handle(bgep, bgep->cfg_handle) != DDI_FM_OK) {
716 		ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_DEGRADED);
717 		mutex_exit(bgep->genlock);
718 		return (EIO);
719 	}
720 	if (bge_check_acc_handle(bgep, bgep->io_handle) != DDI_FM_OK) {
721 		ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_DEGRADED);
722 		mutex_exit(bgep->genlock);
723 		return (EIO);
724 	}
725 	mutex_exit(bgep->genlock);
726 
727 	return (0);
728 }
729 
730 extern void bge_wake_factotum(bge_t *);
731 
732 static boolean_t
733 bge_param_locked(mac_prop_id_t pr_num)
734 {
735 	/*
736 	 * All adv_* parameters are locked (read-only) while
737 	 * the device is in any sort of loopback mode ...
738 	 */
739 	switch (pr_num) {
740 		case MAC_PROP_ADV_1000FDX_CAP:
741 		case MAC_PROP_EN_1000FDX_CAP:
742 		case MAC_PROP_ADV_1000HDX_CAP:
743 		case MAC_PROP_EN_1000HDX_CAP:
744 		case MAC_PROP_ADV_100FDX_CAP:
745 		case MAC_PROP_EN_100FDX_CAP:
746 		case MAC_PROP_ADV_100HDX_CAP:
747 		case MAC_PROP_EN_100HDX_CAP:
748 		case MAC_PROP_ADV_10FDX_CAP:
749 		case MAC_PROP_EN_10FDX_CAP:
750 		case MAC_PROP_ADV_10HDX_CAP:
751 		case MAC_PROP_EN_10HDX_CAP:
752 		case MAC_PROP_AUTONEG:
753 		case MAC_PROP_FLOWCTRL:
754 			return (B_TRUE);
755 	}
756 	return (B_FALSE);
757 }
758 /*
759  * callback functions for set/get of properties
760  */
761 static int
762 bge_m_setprop(void *barg, const char *pr_name, mac_prop_id_t pr_num,
763     uint_t pr_valsize, const void *pr_val)
764 {
765 	bge_t *bgep = barg;
766 	int err = 0;
767 	uint32_t cur_mtu, new_mtu;
768 	link_flowctrl_t fl;
769 
770 	mutex_enter(bgep->genlock);
771 	if (bgep->param_loop_mode != BGE_LOOP_NONE &&
772 	    bge_param_locked(pr_num)) {
773 		/*
774 		 * All adv_* parameters are locked (read-only)
775 		 * while the device is in any sort of loopback mode.
776 		 */
777 		mutex_exit(bgep->genlock);
778 		return (EBUSY);
779 	}
780 	if ((bgep->chipid.flags & CHIP_FLAG_SERDES) &&
781 	    ((pr_num == MAC_PROP_EN_100FDX_CAP) ||
782 	    (pr_num == MAC_PROP_EN_100HDX_CAP) ||
783 	    (pr_num == MAC_PROP_EN_10FDX_CAP) ||
784 	    (pr_num == MAC_PROP_EN_10HDX_CAP))) {
785 		/*
786 		 * these properties are read/write on copper,
787 		 * read-only and 0 on serdes
788 		 */
789 		mutex_exit(bgep->genlock);
790 		return (ENOTSUP);
791 	}
792 	if (DEVICE_5906_SERIES_CHIPSETS(bgep) &&
793 	    ((pr_num == MAC_PROP_EN_1000FDX_CAP) ||
794 	    (pr_num == MAC_PROP_EN_1000HDX_CAP))) {
795 		mutex_exit(bgep->genlock);
796 		return (ENOTSUP);
797 	}
798 
799 	switch (pr_num) {
800 		case MAC_PROP_EN_1000FDX_CAP:
801 			bgep->param_en_1000fdx = *(uint8_t *)pr_val;
802 			bgep->param_adv_1000fdx = *(uint8_t *)pr_val;
803 			goto reprogram;
804 		case MAC_PROP_EN_1000HDX_CAP:
805 			bgep->param_en_1000hdx = *(uint8_t *)pr_val;
806 			bgep->param_adv_1000hdx = *(uint8_t *)pr_val;
807 			goto reprogram;
808 		case MAC_PROP_EN_100FDX_CAP:
809 			bgep->param_en_100fdx = *(uint8_t *)pr_val;
810 			bgep->param_adv_100fdx = *(uint8_t *)pr_val;
811 			goto reprogram;
812 		case MAC_PROP_EN_100HDX_CAP:
813 			bgep->param_en_100hdx = *(uint8_t *)pr_val;
814 			bgep->param_adv_100hdx = *(uint8_t *)pr_val;
815 			goto reprogram;
816 		case MAC_PROP_EN_10FDX_CAP:
817 			bgep->param_en_10fdx = *(uint8_t *)pr_val;
818 			bgep->param_adv_10fdx = *(uint8_t *)pr_val;
819 			goto reprogram;
820 		case MAC_PROP_EN_10HDX_CAP:
821 			bgep->param_en_10hdx = *(uint8_t *)pr_val;
822 			bgep->param_adv_10hdx = *(uint8_t *)pr_val;
823 reprogram:
824 			if (err == 0 && bge_reprogram(bgep) == IOC_INVAL)
825 				err = EINVAL;
826 			break;
827 		case MAC_PROP_ADV_1000FDX_CAP:
828 		case MAC_PROP_ADV_1000HDX_CAP:
829 		case MAC_PROP_ADV_100FDX_CAP:
830 		case MAC_PROP_ADV_100HDX_CAP:
831 		case MAC_PROP_ADV_10FDX_CAP:
832 		case MAC_PROP_ADV_10HDX_CAP:
833 		case MAC_PROP_STATUS:
834 		case MAC_PROP_SPEED:
835 		case MAC_PROP_DUPLEX:
836 			err = ENOTSUP; /* read-only prop. Can't set this */
837 			break;
838 		case MAC_PROP_AUTONEG:
839 			bgep->param_adv_autoneg = *(uint8_t *)pr_val;
840 			if (bge_reprogram(bgep) == IOC_INVAL)
841 				err = EINVAL;
842 			break;
843 		case MAC_PROP_MTU:
844 			cur_mtu = bgep->chipid.default_mtu;
845 			bcopy(pr_val, &new_mtu, sizeof (new_mtu));
846 
847 			if (new_mtu == cur_mtu) {
848 				err = 0;
849 				break;
850 			}
851 			if (new_mtu < BGE_DEFAULT_MTU ||
852 			    new_mtu > BGE_MAXIMUM_MTU) {
853 				err = EINVAL;
854 				break;
855 			}
856 			if ((new_mtu > BGE_DEFAULT_MTU) &&
857 			    (bgep->chipid.flags & CHIP_FLAG_NO_JUMBO)) {
858 				err = EINVAL;
859 				break;
860 			}
861 			if (bgep->bge_mac_state == BGE_MAC_STARTED) {
862 				err = EBUSY;
863 				break;
864 			}
865 			bgep->chipid.default_mtu = new_mtu;
866 			if (bge_chip_id_init(bgep)) {
867 				err = EINVAL;
868 				break;
869 			}
870 			bgep->bge_dma_error = B_TRUE;
871 			bgep->manual_reset = B_TRUE;
872 			bge_chip_stop(bgep, B_TRUE);
873 			bge_wake_factotum(bgep);
874 			err = 0;
875 			break;
876 		case MAC_PROP_FLOWCTRL:
877 			bcopy(pr_val, &fl, sizeof (fl));
878 			switch (fl) {
879 			default:
880 				err = ENOTSUP;
881 				break;
882 			case LINK_FLOWCTRL_NONE:
883 				bgep->param_adv_pause = 0;
884 				bgep->param_adv_asym_pause = 0;
885 
886 				bgep->param_link_rx_pause = B_FALSE;
887 				bgep->param_link_tx_pause = B_FALSE;
888 				break;
889 			case LINK_FLOWCTRL_RX:
890 				bgep->param_adv_pause = 1;
891 				bgep->param_adv_asym_pause = 1;
892 
893 				bgep->param_link_rx_pause = B_TRUE;
894 				bgep->param_link_tx_pause = B_FALSE;
895 				break;
896 			case LINK_FLOWCTRL_TX:
897 				bgep->param_adv_pause = 0;
898 				bgep->param_adv_asym_pause = 1;
899 
900 				bgep->param_link_rx_pause = B_FALSE;
901 				bgep->param_link_tx_pause = B_TRUE;
902 				break;
903 			case LINK_FLOWCTRL_BI:
904 				bgep->param_adv_pause = 1;
905 				bgep->param_adv_asym_pause = 0;
906 
907 				bgep->param_link_rx_pause = B_TRUE;
908 				bgep->param_link_tx_pause = B_TRUE;
909 				break;
910 			}
911 
912 			if (err == 0) {
913 				if (bge_reprogram(bgep) == IOC_INVAL)
914 					err = EINVAL;
915 			}
916 
917 			break;
918 		case MAC_PROP_PRIVATE:
919 			err = bge_set_priv_prop(bgep, pr_name, pr_valsize,
920 			    pr_val);
921 			break;
922 		default:
923 			err = ENOTSUP;
924 			break;
925 	}
926 	mutex_exit(bgep->genlock);
927 	return (err);
928 }
929 
930 /* ARGSUSED */
931 static int
932 bge_m_getprop(void *barg, const char *pr_name, mac_prop_id_t pr_num,
933     uint_t pr_valsize, void *pr_val)
934 {
935 	bge_t *bgep = barg;
936 	int err = 0;
937 
938 	switch (pr_num) {
939 		case MAC_PROP_DUPLEX:
940 			ASSERT(pr_valsize >= sizeof (link_duplex_t));
941 			bcopy(&bgep->param_link_duplex, pr_val,
942 			    sizeof (link_duplex_t));
943 			break;
944 		case MAC_PROP_SPEED: {
945 			uint64_t speed = bgep->param_link_speed * 1000000ull;
946 
947 			ASSERT(pr_valsize >= sizeof (speed));
948 			bcopy(&speed, pr_val, sizeof (speed));
949 			break;
950 		}
951 		case MAC_PROP_STATUS:
952 			ASSERT(pr_valsize >= sizeof (link_state_t));
953 			bcopy(&bgep->link_state, pr_val,
954 			    sizeof (link_state_t));
955 			break;
956 		case MAC_PROP_AUTONEG:
957 			*(uint8_t *)pr_val = bgep->param_adv_autoneg;
958 			break;
959 		case MAC_PROP_FLOWCTRL: {
960 			link_flowctrl_t fl;
961 
962 			ASSERT(pr_valsize >= sizeof (fl));
963 
964 			if (bgep->param_link_rx_pause &&
965 			    !bgep->param_link_tx_pause)
966 				fl = LINK_FLOWCTRL_RX;
967 
968 			if (!bgep->param_link_rx_pause &&
969 			    !bgep->param_link_tx_pause)
970 				fl = LINK_FLOWCTRL_NONE;
971 
972 			if (!bgep->param_link_rx_pause &&
973 			    bgep->param_link_tx_pause)
974 				fl = LINK_FLOWCTRL_TX;
975 
976 			if (bgep->param_link_rx_pause &&
977 			    bgep->param_link_tx_pause)
978 				fl = LINK_FLOWCTRL_BI;
979 			bcopy(&fl, pr_val, sizeof (fl));
980 			break;
981 		}
982 		case MAC_PROP_ADV_1000FDX_CAP:
983 			*(uint8_t *)pr_val = bgep->param_adv_1000fdx;
984 			break;
985 		case MAC_PROP_EN_1000FDX_CAP:
986 			*(uint8_t *)pr_val = bgep->param_en_1000fdx;
987 			break;
988 		case MAC_PROP_ADV_1000HDX_CAP:
989 			*(uint8_t *)pr_val = bgep->param_adv_1000hdx;
990 			break;
991 		case MAC_PROP_EN_1000HDX_CAP:
992 			*(uint8_t *)pr_val = bgep->param_en_1000hdx;
993 			break;
994 		case MAC_PROP_ADV_100FDX_CAP:
995 			*(uint8_t *)pr_val = bgep->param_adv_100fdx;
996 			break;
997 		case MAC_PROP_EN_100FDX_CAP:
998 			*(uint8_t *)pr_val = bgep->param_en_100fdx;
999 			break;
1000 		case MAC_PROP_ADV_100HDX_CAP:
1001 			*(uint8_t *)pr_val = bgep->param_adv_100hdx;
1002 			break;
1003 		case MAC_PROP_EN_100HDX_CAP:
1004 			*(uint8_t *)pr_val = bgep->param_en_100hdx;
1005 			break;
1006 		case MAC_PROP_ADV_10FDX_CAP:
1007 			*(uint8_t *)pr_val = bgep->param_adv_10fdx;
1008 			break;
1009 		case MAC_PROP_EN_10FDX_CAP:
1010 			*(uint8_t *)pr_val = bgep->param_en_10fdx;
1011 			break;
1012 		case MAC_PROP_ADV_10HDX_CAP:
1013 			*(uint8_t *)pr_val = bgep->param_adv_10hdx;
1014 			break;
1015 		case MAC_PROP_EN_10HDX_CAP:
1016 			*(uint8_t *)pr_val = bgep->param_en_10hdx;
1017 			break;
1018 		case MAC_PROP_ADV_100T4_CAP:
1019 		case MAC_PROP_EN_100T4_CAP:
1020 			*(uint8_t *)pr_val = 0;
1021 			break;
1022 		case MAC_PROP_PRIVATE:
1023 			err = bge_get_priv_prop(bgep, pr_name,
1024 			    pr_valsize, pr_val);
1025 			return (err);
1026 		default:
1027 			return (ENOTSUP);
1028 	}
1029 	return (0);
1030 }
1031 
1032 static void
1033 bge_m_propinfo(void *barg, const char *pr_name, mac_prop_id_t pr_num,
1034     mac_prop_info_handle_t prh)
1035 {
1036 	bge_t *bgep = barg;
1037 	int flags = bgep->chipid.flags;
1038 
1039 	/*
1040 	 * By default permissions are read/write unless specified
1041 	 * otherwise by the driver.
1042 	 */
1043 
1044 	switch (pr_num) {
1045 	case MAC_PROP_DUPLEX:
1046 	case MAC_PROP_SPEED:
1047 	case MAC_PROP_STATUS:
1048 	case MAC_PROP_ADV_1000FDX_CAP:
1049 	case MAC_PROP_ADV_1000HDX_CAP:
1050 	case MAC_PROP_ADV_100FDX_CAP:
1051 	case MAC_PROP_ADV_100HDX_CAP:
1052 	case MAC_PROP_ADV_10FDX_CAP:
1053 	case MAC_PROP_ADV_10HDX_CAP:
1054 	case MAC_PROP_ADV_100T4_CAP:
1055 	case MAC_PROP_EN_100T4_CAP:
1056 		mac_prop_info_set_perm(prh, MAC_PROP_PERM_READ);
1057 		break;
1058 
1059 	case MAC_PROP_EN_1000FDX_CAP:
1060 	case MAC_PROP_EN_1000HDX_CAP:
1061 		if (DEVICE_5906_SERIES_CHIPSETS(bgep))
1062 			mac_prop_info_set_default_uint8(prh, 0);
1063 		else
1064 			mac_prop_info_set_default_uint8(prh, 1);
1065 		break;
1066 
1067 	case MAC_PROP_EN_100FDX_CAP:
1068 	case MAC_PROP_EN_100HDX_CAP:
1069 	case MAC_PROP_EN_10FDX_CAP:
1070 	case MAC_PROP_EN_10HDX_CAP:
1071 		mac_prop_info_set_default_uint8(prh,
1072 		    (flags & CHIP_FLAG_SERDES) ? 0 : 1);
1073 		break;
1074 
1075 	case MAC_PROP_AUTONEG:
1076 		mac_prop_info_set_default_uint8(prh, 1);
1077 		break;
1078 
1079 	case MAC_PROP_FLOWCTRL:
1080 		mac_prop_info_set_default_link_flowctrl(prh,
1081 		    LINK_FLOWCTRL_BI);
1082 		break;
1083 
1084 	case MAC_PROP_MTU:
1085 		mac_prop_info_set_range_uint32(prh, BGE_DEFAULT_MTU,
1086 		    (flags & CHIP_FLAG_NO_JUMBO) ?
1087 		    BGE_DEFAULT_MTU : BGE_MAXIMUM_MTU);
1088 		break;
1089 
1090 	case MAC_PROP_PRIVATE:
1091 		bge_priv_propinfo(pr_name, prh);
1092 		break;
1093 	}
1094 
1095 	mutex_enter(bgep->genlock);
1096 	if ((bgep->param_loop_mode != BGE_LOOP_NONE &&
1097 	    bge_param_locked(pr_num)) ||
1098 	    ((bgep->chipid.flags & CHIP_FLAG_SERDES) &&
1099 	    ((pr_num == MAC_PROP_EN_100FDX_CAP) ||
1100 	    (pr_num == MAC_PROP_EN_100HDX_CAP) ||
1101 	    (pr_num == MAC_PROP_EN_10FDX_CAP) ||
1102 	    (pr_num == MAC_PROP_EN_10HDX_CAP))) ||
1103 	    (DEVICE_5906_SERIES_CHIPSETS(bgep) &&
1104 	    ((pr_num == MAC_PROP_EN_1000FDX_CAP) ||
1105 	    (pr_num == MAC_PROP_EN_1000HDX_CAP))))
1106 		mac_prop_info_set_perm(prh, MAC_PROP_PERM_READ);
1107 	mutex_exit(bgep->genlock);
1108 }
1109 
1110 /* ARGSUSED */
1111 static int
1112 bge_set_priv_prop(bge_t *bgep, const char *pr_name, uint_t pr_valsize,
1113     const void *pr_val)
1114 {
1115 	int err = 0;
1116 	long result;
1117 
1118 	if (strcmp(pr_name, "_adv_pause_cap") == 0) {
1119 		(void) ddi_strtol(pr_val, (char **)NULL, 0, &result);
1120 		if (result > 1 || result < 0) {
1121 			err = EINVAL;
1122 		} else {
1123 			bgep->param_adv_pause = (uint32_t)result;
1124 			if (bge_reprogram(bgep) == IOC_INVAL)
1125 				err = EINVAL;
1126 		}
1127 		return (err);
1128 	}
1129 	if (strcmp(pr_name, "_adv_asym_pause_cap") == 0) {
1130 		(void) ddi_strtol(pr_val, (char **)NULL, 0, &result);
1131 		if (result > 1 || result < 0) {
1132 			err = EINVAL;
1133 		} else {
1134 			bgep->param_adv_asym_pause = (uint32_t)result;
1135 			if (bge_reprogram(bgep) == IOC_INVAL)
1136 				err = EINVAL;
1137 		}
1138 		return (err);
1139 	}
1140 	if (strcmp(pr_name, "_drain_max") == 0) {
1141 
1142 		/*
1143 		 * on the Tx side, we need to update the h/w register for
1144 		 * real packet transmission per packet. The drain_max parameter
1145 		 * is used to reduce the register access. This parameter
1146 		 * controls the max number of packets that we will hold before
1147 		 * updating the bge h/w to trigger h/w transmit. The bge
1148 		 * chipset usually has a max of 512 Tx descriptors, thus
1149 		 * the upper bound on drain_max is 512.
1150 		 */
1151 		if (pr_val == NULL) {
1152 			err = EINVAL;
1153 			return (err);
1154 		}
1155 		(void) ddi_strtol(pr_val, (char **)NULL, 0, &result);
1156 		if (result > 512 || result < 1)
1157 			err = EINVAL;
1158 		else {
1159 			bgep->param_drain_max = (uint32_t)result;
1160 			if (bge_reprogram(bgep) == IOC_INVAL)
1161 				err = EINVAL;
1162 		}
1163 		return (err);
1164 	}
1165 	if (strcmp(pr_name, "_msi_cnt") == 0) {
1166 
1167 		if (pr_val == NULL) {
1168 			err = EINVAL;
1169 			return (err);
1170 		}
1171 		(void) ddi_strtol(pr_val, (char **)NULL, 0, &result);
1172 		if (result > 7 || result < 0)
1173 			err = EINVAL;
1174 		else {
1175 			bgep->param_msi_cnt = (uint32_t)result;
1176 			if (bge_reprogram(bgep) == IOC_INVAL)
1177 				err = EINVAL;
1178 		}
1179 		return (err);
1180 	}
1181 	if (strcmp(pr_name, "_rx_intr_coalesce_blank_time") == 0) {
1182 		if (ddi_strtol(pr_val, (char **)NULL, 0, &result) != 0)
1183 			return (EINVAL);
1184 		if (result < 0)
1185 			err = EINVAL;
1186 		else {
1187 			bgep->chipid.rx_ticks_norm = (uint32_t)result;
1188 			bge_chip_coalesce_update(bgep);
1189 		}
1190 		return (err);
1191 	}
1192 
1193 	if (strcmp(pr_name, "_rx_intr_coalesce_pkt_cnt") == 0) {
1194 		if (ddi_strtol(pr_val, (char **)NULL, 0, &result) != 0)
1195 			return (EINVAL);
1196 
1197 		if (result < 0)
1198 			err = EINVAL;
1199 		else {
1200 			bgep->chipid.rx_count_norm = (uint32_t)result;
1201 			bge_chip_coalesce_update(bgep);
1202 		}
1203 		return (err);
1204 	}
1205 	if (strcmp(pr_name, "_tx_intr_coalesce_blank_time") == 0) {
1206 		if (ddi_strtol(pr_val, (char **)NULL, 0, &result) != 0)
1207 			return (EINVAL);
1208 		if (result < 0)
1209 			err = EINVAL;
1210 		else {
1211 			bgep->chipid.tx_ticks_norm = (uint32_t)result;
1212 			bge_chip_coalesce_update(bgep);
1213 		}
1214 		return (err);
1215 	}
1216 
1217 	if (strcmp(pr_name, "_tx_intr_coalesce_pkt_cnt") == 0) {
1218 		if (ddi_strtol(pr_val, (char **)NULL, 0, &result) != 0)
1219 			return (EINVAL);
1220 
1221 		if (result < 0)
1222 			err = EINVAL;
1223 		else {
1224 			bgep->chipid.tx_count_norm = (uint32_t)result;
1225 			bge_chip_coalesce_update(bgep);
1226 		}
1227 		return (err);
1228 	}
1229 	return (ENOTSUP);
1230 }
1231 
1232 static int
1233 bge_get_priv_prop(bge_t *bge, const char *pr_name, uint_t pr_valsize,
1234     void *pr_val)
1235 {
1236 	int value;
1237 
1238 	if (strcmp(pr_name, "_adv_pause_cap") == 0)
1239 		value = bge->param_adv_pause;
1240 	else if (strcmp(pr_name, "_adv_asym_pause_cap") == 0)
1241 		value = bge->param_adv_asym_pause;
1242 	else if (strcmp(pr_name, "_drain_max") == 0)
1243 		value = bge->param_drain_max;
1244 	else if (strcmp(pr_name, "_msi_cnt") == 0)
1245 		value = bge->param_msi_cnt;
1246 	else if (strcmp(pr_name, "_rx_intr_coalesce_blank_time") == 0)
1247 		value = bge->chipid.rx_ticks_norm;
1248 	else if (strcmp(pr_name, "_tx_intr_coalesce_blank_time") == 0)
1249 		value = bge->chipid.tx_ticks_norm;
1250 	else if (strcmp(pr_name, "_rx_intr_coalesce_pkt_cnt") == 0)
1251 		value = bge->chipid.rx_count_norm;
1252 	else if (strcmp(pr_name, "_tx_intr_coalesce_pkt_cnt") == 0)
1253 		value = bge->chipid.tx_count_norm;
1254 	else
1255 		return (ENOTSUP);
1256 
1257 	(void) snprintf(pr_val, pr_valsize, "%d", value);
1258 	return (0);
1259 }
1260 
1261 static void
1262 bge_priv_propinfo(const char *pr_name, mac_prop_info_handle_t mph)
1263 {
1264 	char valstr[64];
1265 	int value;
1266 
1267 	if (strcmp(pr_name, "_adv_pause_cap") == 0)
1268 		value = 1;
1269 	else if (strcmp(pr_name, "_adv_asym_pause_cap") == 0)
1270 		value = 1;
1271 	else if (strcmp(pr_name, "_drain_max") == 0)
1272 		value = 64;
1273 	else if (strcmp(pr_name, "_msi_cnt") == 0)
1274 		value = 0;
1275 	else if (strcmp(pr_name, "_rx_intr_coalesce_blank_time") == 0)
1276 		value = bge_rx_ticks_norm;
1277 	else if (strcmp(pr_name, "_tx_intr_coalesce_blank_time") == 0)
1278 		value = bge_tx_ticks_norm;
1279 	else if (strcmp(pr_name, "_rx_intr_coalesce_pkt_cnt") == 0)
1280 		value = bge_rx_count_norm;
1281 	else if (strcmp(pr_name, "_tx_intr_coalesce_pkt_cnt") == 0)
1282 		value = bge_tx_count_norm;
1283 	else
1284 		return;
1285 
1286 	(void) snprintf(valstr, sizeof (valstr), "%d", value);
1287 	mac_prop_info_set_default_str(mph, valstr);
1288 }
1289 
1290 
1291 static int
1292 bge_m_unicst(void * arg, const uint8_t * mac_addr)
1293 {
1294 	bge_t *bgep = arg;
1295 	int i;
1296 
1297 	/* XXX sets the mac address for all ring slots... OK? */
1298 	for (i = 0; i < MIN(bgep->chipid.rx_rings, MAC_ADDRESS_REGS_MAX); i++)
1299 		bge_addmac(&bgep->recv[i], mac_addr);
1300 
1301 	return (0);
1302 }
1303 
1304 
1305 /*
1306  * Compute the index of the required bit in the multicast hash map.
1307  * This must mirror the way the hardware actually does it!
1308  * See Broadcom document 570X-PG102-R page 125.
1309  */
1310 static uint32_t
1311 bge_hash_index(const uint8_t *mca)
1312 {
1313 	uint32_t hash;
1314 
1315 	CRC32(hash, mca, ETHERADDRL, -1U, crc32_table);
1316 
1317 	return (hash);
1318 }
1319 
1320 /*
1321  *	bge_m_multicst_add() -- enable/disable a multicast address
1322  */
1323 static int
1324 bge_m_multicst(void *arg, boolean_t add, const uint8_t *mca)
1325 {
1326 	bge_t *bgep = arg;		/* private device info	*/
1327 	uint32_t hash;
1328 	uint32_t index;
1329 	uint32_t word;
1330 	uint32_t bit;
1331 	uint8_t *refp;
1332 
1333 	BGE_TRACE(("bge_m_multicst($%p, %s, %s)", arg,
1334 	    (add) ? "add" : "remove", ether_sprintf((void *)mca)));
1335 
1336 	/*
1337 	 * Precalculate all required masks, pointers etc ...
1338 	 */
1339 	hash = bge_hash_index(mca);
1340 	index = hash % BGE_HASH_TABLE_SIZE;
1341 	word = index/32u;
1342 	bit = 1 << (index % 32u);
1343 	refp = &bgep->mcast_refs[index];
1344 
1345 	BGE_DEBUG(("bge_m_multicst: hash 0x%x index %d (%d:0x%x) = %d",
1346 	    hash, index, word, bit, *refp));
1347 
1348 	/*
1349 	 * We must set the appropriate bit in the hash map (and the
1350 	 * corresponding h/w register) when the refcount goes from 0
1351 	 * to >0, and clear it when the last ref goes away (refcount
1352 	 * goes from >0 back to 0).  If we change the hash map, we
1353 	 * must also update the chip's hardware map registers.
1354 	 */
1355 	mutex_enter(bgep->genlock);
1356 	if (!(bgep->progress & PROGRESS_INTR)) {
1357 		/* can happen during autorecovery */
1358 		ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_DEGRADED);
1359 		mutex_exit(bgep->genlock);
1360 		return (EIO);
1361 	}
1362 	if (add) {
1363 		if ((*refp)++ == 0) {
1364 			bgep->mcast_hash[word] |= bit;
1365 #ifdef BGE_IPMI_ASF
1366 			if (bge_chip_sync(bgep, B_TRUE) == DDI_FAILURE) {
1367 #else
1368 			if (bge_chip_sync(bgep) == DDI_FAILURE) {
1369 #endif
1370 				(void) bge_check_acc_handle(bgep,
1371 				    bgep->cfg_handle);
1372 				(void) bge_check_acc_handle(bgep,
1373 				    bgep->io_handle);
1374 				ddi_fm_service_impact(bgep->devinfo,
1375 				    DDI_SERVICE_DEGRADED);
1376 				mutex_exit(bgep->genlock);
1377 				return (EIO);
1378 			}
1379 		}
1380 	} else {
1381 		if (--(*refp) == 0) {
1382 			bgep->mcast_hash[word] &= ~bit;
1383 #ifdef BGE_IPMI_ASF
1384 			if (bge_chip_sync(bgep, B_TRUE) == DDI_FAILURE) {
1385 #else
1386 			if (bge_chip_sync(bgep) == DDI_FAILURE) {
1387 #endif
1388 				(void) bge_check_acc_handle(bgep,
1389 				    bgep->cfg_handle);
1390 				(void) bge_check_acc_handle(bgep,
1391 				    bgep->io_handle);
1392 				ddi_fm_service_impact(bgep->devinfo,
1393 				    DDI_SERVICE_DEGRADED);
1394 				mutex_exit(bgep->genlock);
1395 				return (EIO);
1396 			}
1397 		}
1398 	}
1399 	BGE_DEBUG(("bge_m_multicst($%p) done", arg));
1400 	if (bge_check_acc_handle(bgep, bgep->cfg_handle) != DDI_FM_OK) {
1401 		ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_DEGRADED);
1402 		mutex_exit(bgep->genlock);
1403 		return (EIO);
1404 	}
1405 	if (bge_check_acc_handle(bgep, bgep->io_handle) != DDI_FM_OK) {
1406 		ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_DEGRADED);
1407 		mutex_exit(bgep->genlock);
1408 		return (EIO);
1409 	}
1410 	mutex_exit(bgep->genlock);
1411 
1412 	return (0);
1413 }
1414 
1415 /*
1416  * bge_m_promisc() -- set or reset promiscuous mode on the board
1417  *
1418  *	Program the hardware to enable/disable promiscuous and/or
1419  *	receive-all-multicast modes.
1420  */
1421 static int
1422 bge_m_promisc(void *arg, boolean_t on)
1423 {
1424 	bge_t *bgep = arg;
1425 
1426 	BGE_TRACE(("bge_m_promisc_set($%p, %d)", arg, on));
1427 
1428 	/*
1429 	 * Store MAC layer specified mode and pass to chip layer to update h/w
1430 	 */
1431 	mutex_enter(bgep->genlock);
1432 	if (!(bgep->progress & PROGRESS_INTR)) {
1433 		/* can happen during autorecovery */
1434 		ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_DEGRADED);
1435 		mutex_exit(bgep->genlock);
1436 		return (EIO);
1437 	}
1438 	bgep->promisc = on;
1439 #ifdef BGE_IPMI_ASF
1440 	if (bge_chip_sync(bgep, B_TRUE) == DDI_FAILURE) {
1441 #else
1442 	if (bge_chip_sync(bgep) == DDI_FAILURE) {
1443 #endif
1444 		(void) bge_check_acc_handle(bgep, bgep->cfg_handle);
1445 		(void) bge_check_acc_handle(bgep, bgep->io_handle);
1446 		ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_DEGRADED);
1447 		mutex_exit(bgep->genlock);
1448 		return (EIO);
1449 	}
1450 	BGE_DEBUG(("bge_m_promisc_set($%p) done", arg));
1451 	if (bge_check_acc_handle(bgep, bgep->cfg_handle) != DDI_FM_OK) {
1452 		ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_DEGRADED);
1453 		mutex_exit(bgep->genlock);
1454 		return (EIO);
1455 	}
1456 	if (bge_check_acc_handle(bgep, bgep->io_handle) != DDI_FM_OK) {
1457 		ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_DEGRADED);
1458 		mutex_exit(bgep->genlock);
1459 		return (EIO);
1460 	}
1461 	mutex_exit(bgep->genlock);
1462 	return (0);
1463 }
1464 
1465 #ifdef MC_RESOURCES
1466 
1467 static void
1468 bge_blank(void * arg, time_t tick_cnt, uint_t pkt_cnt)
1469 {
1470 	(void)arg;
1471 	(void)tick_cnt;
1472 	(void)pkt_cnt;
1473 }
1474 
1475 static void
1476 bge_m_resources(void * arg)
1477 {
1478 	bge_t *bgep = arg;
1479 	mac_rx_fifo_t mrf;
1480 	int i;
1481 
1482 	mrf.mrf_type              = MAC_RX_FIFO;
1483 	mrf.mrf_blank             = bge_blank;
1484 	mrf.mrf_arg               = (void *)bgep;
1485 	mrf.mrf_normal_blank_time = 25;
1486 	mrf.mrf_normal_pkt_count  = 8;
1487 
1488 	for (i = 0; i < BGE_RECV_RINGS_MAX; i++) {
1489 		bgep->macRxResourceHandles[i] =
1490 		    mac_resource_add(bgep->mh, (mac_resource_t *)&mrf);
1491 	}
1492 }
1493 
1494 #endif /* MC_RESOURCES */
1495 
1496 /*
1497  * Find the slot for the specified unicast address
1498  */
1499 int
1500 bge_unicst_find(bge_t *bgep, const uint8_t *mac_addr)
1501 {
1502 	int slot;
1503 
1504 	ASSERT(mutex_owned(bgep->genlock));
1505 
1506 	for (slot = 0; slot < bgep->unicst_addr_total; slot++) {
1507 		if (bcmp(bgep->curr_addr[slot].addr, mac_addr, ETHERADDRL) == 0)
1508 			return (slot);
1509 	}
1510 
1511 	return (-1);
1512 }
1513 
1514 /*
1515  * Programs the classifier to start steering packets matching 'mac_addr' to the
1516  * specified ring 'arg'.
1517  */
1518 static int
1519 bge_addmac(void *arg, const uint8_t * mac_addr)
1520 {
1521 	recv_ring_t *rrp = (recv_ring_t *)arg;
1522 	bge_t		*bgep = rrp->bgep;
1523 	bge_recv_rule_t	*rulep = bgep->recv_rules;
1524 	bge_rule_info_t	*rinfop = NULL;
1525 	uint8_t		ring = (uint8_t)(rrp - bgep->recv) + 1;
1526 	int		i;
1527 	uint16_t	tmp16;
1528 	uint32_t	tmp32;
1529 	int		slot;
1530 	int		err;
1531 
1532 	mutex_enter(bgep->genlock);
1533 	if (bgep->unicst_addr_avail == 0) {
1534 		mutex_exit(bgep->genlock);
1535 		return (ENOSPC);
1536 	}
1537 
1538 	/*
1539 	 * First add the unicast address to a available slot.
1540 	 */
1541 	slot = bge_unicst_find(bgep, mac_addr);
1542 	ASSERT(slot == -1);
1543 
1544 	for (slot = 0; slot < bgep->unicst_addr_total; slot++) {
1545 		if (!bgep->curr_addr[slot].set) {
1546 			bgep->curr_addr[slot].set = B_TRUE;
1547 			break;
1548 		}
1549 	}
1550 
1551 	ASSERT(slot < bgep->unicst_addr_total);
1552 	bgep->unicst_addr_avail--;
1553 	mutex_exit(bgep->genlock);
1554 
1555 	if ((err = bge_unicst_set(bgep, mac_addr, slot)) != 0)
1556 		goto fail;
1557 
1558 	/* A rule is already here. Deny this.  */
1559 	if (rrp->mac_addr_rule != NULL) {
1560 		err = ether_cmp(mac_addr, rrp->mac_addr_val) ? EEXIST : EBUSY;
1561 		goto fail;
1562 	}
1563 
1564 	/*
1565 	 * Allocate a bge_rule_info_t to keep track of which rule slots
1566 	 * are being used.
1567 	 */
1568 	rinfop = kmem_zalloc(sizeof (bge_rule_info_t), KM_NOSLEEP);
1569 	if (rinfop == NULL) {
1570 		err = ENOMEM;
1571 		goto fail;
1572 	}
1573 
1574 	/*
1575 	 * Look for the starting slot to place the rules.
1576 	 * The two slots we reserve must be contiguous.
1577 	 */
1578 	for (i = 0; i + 1 < RECV_RULES_NUM_MAX; i++)
1579 		if ((rulep[i].control & RECV_RULE_CTL_ENABLE) == 0 &&
1580 		    (rulep[i+1].control & RECV_RULE_CTL_ENABLE) == 0)
1581 			break;
1582 
1583 	ASSERT(i + 1 < RECV_RULES_NUM_MAX);
1584 
1585 	bcopy(mac_addr, &tmp32, sizeof (tmp32));
1586 	rulep[i].mask_value = ntohl(tmp32);
1587 	rulep[i].control = RULE_DEST_MAC_1(ring) | RECV_RULE_CTL_AND;
1588 	bge_reg_put32(bgep, RECV_RULE_MASK_REG(i), rulep[i].mask_value);
1589 	bge_reg_put32(bgep, RECV_RULE_CONTROL_REG(i), rulep[i].control);
1590 
1591 	bcopy(mac_addr + 4, &tmp16, sizeof (tmp16));
1592 	rulep[i+1].mask_value = 0xffff0000 | ntohs(tmp16);
1593 	rulep[i+1].control = RULE_DEST_MAC_2(ring);
1594 	bge_reg_put32(bgep, RECV_RULE_MASK_REG(i+1), rulep[i+1].mask_value);
1595 	bge_reg_put32(bgep, RECV_RULE_CONTROL_REG(i+1), rulep[i+1].control);
1596 	rinfop->start = i;
1597 	rinfop->count = 2;
1598 
1599 	rrp->mac_addr_rule = rinfop;
1600 	bcopy(mac_addr, rrp->mac_addr_val, ETHERADDRL);
1601 
1602 	return (0);
1603 
1604 fail:
1605 	/* Clear the address just set */
1606 	(void) bge_unicst_set(bgep, zero_addr, slot);
1607 	mutex_enter(bgep->genlock);
1608 	bgep->curr_addr[slot].set = B_FALSE;
1609 	bgep->unicst_addr_avail++;
1610 	mutex_exit(bgep->genlock);
1611 
1612 	return (err);
1613 }
1614 
1615 /*
1616  * Stop classifying packets matching the MAC address to the specified ring.
1617  */
1618 static int
1619 bge_remmac(void *arg, const uint8_t *mac_addr)
1620 {
1621 	recv_ring_t	*rrp = (recv_ring_t *)arg;
1622 	bge_t		*bgep = rrp->bgep;
1623 	bge_recv_rule_t *rulep = bgep->recv_rules;
1624 	bge_rule_info_t *rinfop = rrp->mac_addr_rule;
1625 	int		start;
1626 	int		slot;
1627 	int		err;
1628 
1629 	/*
1630 	 * Remove the MAC address from its slot.
1631 	 */
1632 	mutex_enter(bgep->genlock);
1633 	slot = bge_unicst_find(bgep, mac_addr);
1634 	if (slot == -1) {
1635 		mutex_exit(bgep->genlock);
1636 		return (EINVAL);
1637 	}
1638 
1639 	ASSERT(bgep->curr_addr[slot].set);
1640 	mutex_exit(bgep->genlock);
1641 
1642 	if ((err = bge_unicst_set(bgep, zero_addr, slot)) != 0)
1643 		return (err);
1644 
1645 	if (rinfop == NULL || ether_cmp(mac_addr, rrp->mac_addr_val) != 0)
1646 		return (EINVAL);
1647 
1648 	start = rinfop->start;
1649 	rulep[start].mask_value = 0;
1650 	rulep[start].control = 0;
1651 	bge_reg_put32(bgep, RECV_RULE_MASK_REG(start), rulep[start].mask_value);
1652 	bge_reg_put32(bgep, RECV_RULE_CONTROL_REG(start), rulep[start].control);
1653 	start++;
1654 	rulep[start].mask_value = 0;
1655 	rulep[start].control = 0;
1656 	bge_reg_put32(bgep, RECV_RULE_MASK_REG(start), rulep[start].mask_value);
1657 	bge_reg_put32(bgep, RECV_RULE_CONTROL_REG(start), rulep[start].control);
1658 
1659 	kmem_free(rinfop, sizeof (bge_rule_info_t));
1660 	rrp->mac_addr_rule = NULL;
1661 	bzero(rrp->mac_addr_val, ETHERADDRL);
1662 
1663 	mutex_enter(bgep->genlock);
1664 	bgep->curr_addr[slot].set = B_FALSE;
1665 	bgep->unicst_addr_avail++;
1666 	mutex_exit(bgep->genlock);
1667 
1668 	return (0);
1669 }
1670 
1671 
1672 static int
1673 bge_flag_intr_enable(mac_ring_driver_t ih)
1674 {
1675 	recv_ring_t *rrp = (recv_ring_t *)ih;
1676 	bge_t *bgep = rrp->bgep;
1677 
1678 	mutex_enter(bgep->genlock);
1679 	rrp->poll_flag = 0;
1680 	mutex_exit(bgep->genlock);
1681 
1682 	return (0);
1683 }
1684 
1685 static int
1686 bge_flag_intr_disable(mac_ring_driver_t ih)
1687 {
1688 	recv_ring_t *rrp = (recv_ring_t *)ih;
1689 	bge_t *bgep = rrp->bgep;
1690 
1691 	mutex_enter(bgep->genlock);
1692 	rrp->poll_flag = 1;
1693 	mutex_exit(bgep->genlock);
1694 
1695 	return (0);
1696 }
1697 
1698 static int
1699 bge_ring_start(mac_ring_driver_t rh, uint64_t mr_gen_num)
1700 {
1701 	recv_ring_t *rx_ring;
1702 
1703 	rx_ring = (recv_ring_t *)rh;
1704 	mutex_enter(rx_ring->rx_lock);
1705 	rx_ring->ring_gen_num = mr_gen_num;
1706 	mutex_exit(rx_ring->rx_lock);
1707 	return (0);
1708 }
1709 
1710 
1711 /*
1712  * Callback funtion for MAC layer to register all rings
1713  * for given ring_group, noted by rg_index.
1714  */
1715 void
1716 bge_fill_ring(void *arg, mac_ring_type_t rtype, const int rg_index,
1717     const int index, mac_ring_info_t *infop, mac_ring_handle_t rh)
1718 {
1719 	bge_t *bgep = arg;
1720 	mac_intr_t *mintr;
1721 
1722 	switch (rtype) {
1723 	case MAC_RING_TYPE_RX: {
1724 		recv_ring_t *rx_ring;
1725 		ASSERT(rg_index >= 0 && rg_index < MIN(bgep->chipid.rx_rings,
1726 		    MAC_ADDRESS_REGS_MAX) && index == 0);
1727 
1728 		rx_ring = &bgep->recv[rg_index];
1729 		rx_ring->ring_handle = rh;
1730 
1731 		infop->mri_driver = (mac_ring_driver_t)rx_ring;
1732 		infop->mri_start = bge_ring_start;
1733 		infop->mri_stop = NULL;
1734 		infop->mri_poll = bge_poll_ring;
1735 		infop->mri_stat = bge_rx_ring_stat;
1736 
1737 		mintr = &infop->mri_intr;
1738 		mintr->mi_enable = (mac_intr_enable_t)bge_flag_intr_enable;
1739 		mintr->mi_disable = (mac_intr_disable_t)bge_flag_intr_disable;
1740 
1741 		break;
1742 	}
1743 	case MAC_RING_TYPE_TX:
1744 	default:
1745 		ASSERT(0);
1746 		break;
1747 	}
1748 }
1749 
1750 /*
1751  * Fill infop passed as argument
1752  * fill in respective ring_group info
1753  * Each group has a single ring in it. We keep it simple
1754  * and use the same internal handle for rings and groups.
1755  */
1756 void
1757 bge_fill_group(void *arg, mac_ring_type_t rtype, const int rg_index,
1758     mac_group_info_t * infop, mac_group_handle_t gh)
1759 {
1760 	bge_t *bgep = arg;
1761 
1762 	switch (rtype) {
1763 	case MAC_RING_TYPE_RX: {
1764 		recv_ring_t *rx_ring;
1765 
1766 		ASSERT(rg_index >= 0 && rg_index < MIN(bgep->chipid.rx_rings,
1767 		    MAC_ADDRESS_REGS_MAX));
1768 		rx_ring = &bgep->recv[rg_index];
1769 		rx_ring->ring_group_handle = gh;
1770 
1771 		infop->mgi_driver = (mac_group_driver_t)rx_ring;
1772 		infop->mgi_start = NULL;
1773 		infop->mgi_stop = NULL;
1774 		infop->mgi_addmac = bge_addmac;
1775 		infop->mgi_remmac = bge_remmac;
1776 		infop->mgi_count = 1;
1777 		break;
1778 	}
1779 	case MAC_RING_TYPE_TX:
1780 	default:
1781 		ASSERT(0);
1782 		break;
1783 	}
1784 }
1785 
1786 
1787 /*ARGSUSED*/
1788 static boolean_t
1789 bge_m_getcapab(void *arg, mac_capab_t cap, void *cap_data)
1790 {
1791 	bge_t *bgep = arg;
1792 	mac_capab_rings_t *cap_rings;
1793 
1794 	switch (cap) {
1795 	case MAC_CAPAB_HCKSUM: {
1796 		uint32_t *txflags = cap_data;
1797 
1798 		*txflags = HCKSUM_INET_FULL_V4 | HCKSUM_IPHDRCKSUM;
1799 		break;
1800 	}
1801 
1802 	case MAC_CAPAB_RINGS:
1803 		cap_rings = (mac_capab_rings_t *)cap_data;
1804 
1805 		/* Temporarily disable multiple tx rings. */
1806 		if (cap_rings->mr_type != MAC_RING_TYPE_RX)
1807 			return (B_FALSE);
1808 
1809 		cap_rings->mr_group_type = MAC_GROUP_TYPE_STATIC;
1810 		cap_rings->mr_rnum =
1811 		cap_rings->mr_gnum =
1812 		    MIN(bgep->chipid.rx_rings, MAC_ADDRESS_REGS_MAX);
1813 		cap_rings->mr_rget = bge_fill_ring;
1814 		cap_rings->mr_gget = bge_fill_group;
1815 		break;
1816 
1817 	default:
1818 		return (B_FALSE);
1819 	}
1820 	return (B_TRUE);
1821 }
1822 
1823 #ifdef NOT_SUPPORTED_XXX
1824 
1825 /*
1826  * Loopback ioctl code
1827  */
1828 
1829 static lb_property_t loopmodes[] = {
1830 	{ normal,	"normal",	BGE_LOOP_NONE		},
1831 	{ external,	"1000Mbps",	BGE_LOOP_EXTERNAL_1000	},
1832 	{ external,	"100Mbps",	BGE_LOOP_EXTERNAL_100	},
1833 	{ external,	"10Mbps",	BGE_LOOP_EXTERNAL_10	},
1834 	{ internal,	"PHY",		BGE_LOOP_INTERNAL_PHY	},
1835 	{ internal,	"MAC",		BGE_LOOP_INTERNAL_MAC	}
1836 };
1837 
1838 static enum ioc_reply
1839 bge_set_loop_mode(bge_t *bgep, uint32_t mode)
1840 {
1841 	/*
1842 	 * If the mode isn't being changed, there's nothing to do ...
1843 	 */
1844 	if (mode == bgep->param_loop_mode)
1845 		return (IOC_ACK);
1846 
1847 	/*
1848 	 * Validate the requested mode and prepare a suitable message
1849 	 * to explain the link down/up cycle that the change will
1850 	 * probably induce ...
1851 	 */
1852 	switch (mode) {
1853 	default:
1854 		return (IOC_INVAL);
1855 
1856 	case BGE_LOOP_NONE:
1857 	case BGE_LOOP_EXTERNAL_1000:
1858 	case BGE_LOOP_EXTERNAL_100:
1859 	case BGE_LOOP_EXTERNAL_10:
1860 	case BGE_LOOP_INTERNAL_PHY:
1861 	case BGE_LOOP_INTERNAL_MAC:
1862 		break;
1863 	}
1864 
1865 	/*
1866 	 * All OK; tell the caller to reprogram
1867 	 * the PHY and/or MAC for the new mode ...
1868 	 */
1869 	bgep->param_loop_mode = mode;
1870 	return (IOC_RESTART_ACK);
1871 }
1872 
1873 static enum ioc_reply
1874 bge_loop_ioctl(bge_t *bgep, queue_t *wq, mblk_t *mp, struct iocblk *iocp)
1875 {
1876 	lb_info_sz_t *lbsp;
1877 	lb_property_t *lbpp;
1878 	uint32_t *lbmp;
1879 	int cmd;
1880 
1881 	_NOTE(ARGUNUSED(wq))
1882 
1883 	/*
1884 	 * Validate format of ioctl
1885 	 */
1886 	if (mp->b_cont == NULL)
1887 		return (IOC_INVAL);
1888 
1889 	cmd = iocp->ioc_cmd;
1890 	switch (cmd) {
1891 	default:
1892 		/* NOTREACHED */
1893 		bge_error(bgep, "bge_loop_ioctl: invalid cmd 0x%x", cmd);
1894 		return (IOC_INVAL);
1895 
1896 	case LB_GET_INFO_SIZE:
1897 		if (iocp->ioc_count != sizeof (lb_info_sz_t))
1898 			return (IOC_INVAL);
1899 		lbsp = (void *)mp->b_cont->b_rptr;
1900 		*lbsp = sizeof (loopmodes);
1901 		return (IOC_REPLY);
1902 
1903 	case LB_GET_INFO:
1904 		if (iocp->ioc_count != sizeof (loopmodes))
1905 			return (IOC_INVAL);
1906 		lbpp = (void *)mp->b_cont->b_rptr;
1907 		bcopy(loopmodes, lbpp, sizeof (loopmodes));
1908 		return (IOC_REPLY);
1909 
1910 	case LB_GET_MODE:
1911 		if (iocp->ioc_count != sizeof (uint32_t))
1912 			return (IOC_INVAL);
1913 		lbmp = (void *)mp->b_cont->b_rptr;
1914 		*lbmp = bgep->param_loop_mode;
1915 		return (IOC_REPLY);
1916 
1917 	case LB_SET_MODE:
1918 		if (iocp->ioc_count != sizeof (uint32_t))
1919 			return (IOC_INVAL);
1920 		lbmp = (void *)mp->b_cont->b_rptr;
1921 		return (bge_set_loop_mode(bgep, *lbmp));
1922 	}
1923 }
1924 
1925 #endif /* NOT_SUPPORTED_XXX */
1926 
1927 /*
1928  * Specific bge IOCTLs, the gld module handles the generic ones.
1929  */
1930 static void
1931 bge_m_ioctl(void *arg, queue_t *wq, mblk_t *mp)
1932 {
1933 	bge_t *bgep = arg;
1934 	struct iocblk *iocp;
1935 	enum ioc_reply status;
1936 	boolean_t need_privilege;
1937 	int err;
1938 	int cmd;
1939 
1940 	/*
1941 	 * Validate the command before bothering with the mutex ...
1942 	 */
1943 	iocp = (void *)mp->b_rptr;
1944 	iocp->ioc_error = 0;
1945 	need_privilege = B_TRUE;
1946 	cmd = iocp->ioc_cmd;
1947 	switch (cmd) {
1948 	default:
1949 		miocnak(wq, mp, 0, EINVAL);
1950 		return;
1951 
1952 	case BGE_MII_READ:
1953 	case BGE_MII_WRITE:
1954 	case BGE_SEE_READ:
1955 	case BGE_SEE_WRITE:
1956 	case BGE_FLASH_READ:
1957 	case BGE_FLASH_WRITE:
1958 	case BGE_DIAG:
1959 	case BGE_PEEK:
1960 	case BGE_POKE:
1961 	case BGE_PHY_RESET:
1962 	case BGE_SOFT_RESET:
1963 	case BGE_HARD_RESET:
1964 		break;
1965 
1966 #ifdef NOT_SUPPORTED_XXX
1967 	case LB_GET_INFO_SIZE:
1968 	case LB_GET_INFO:
1969 	case LB_GET_MODE:
1970 		need_privilege = B_FALSE;
1971 		/* FALLTHRU */
1972 	case LB_SET_MODE:
1973 		break;
1974 #endif
1975 
1976 	}
1977 
1978 	if (need_privilege) {
1979 		/*
1980 		 * Check for specific net_config privilege on Solaris 10+.
1981 		 */
1982 		err = secpolicy_net_config(iocp->ioc_cr, B_FALSE);
1983 		if (err != 0) {
1984 			miocnak(wq, mp, 0, err);
1985 			return;
1986 		}
1987 	}
1988 
1989 	mutex_enter(bgep->genlock);
1990 	if (!(bgep->progress & PROGRESS_INTR)) {
1991 		/* can happen during autorecovery */
1992 		mutex_exit(bgep->genlock);
1993 		miocnak(wq, mp, 0, EIO);
1994 		return;
1995 	}
1996 
1997 	switch (cmd) {
1998 	default:
1999 		_NOTE(NOTREACHED)
2000 		status = IOC_INVAL;
2001 		break;
2002 
2003 	case BGE_MII_READ:
2004 	case BGE_MII_WRITE:
2005 	case BGE_SEE_READ:
2006 	case BGE_SEE_WRITE:
2007 	case BGE_FLASH_READ:
2008 	case BGE_FLASH_WRITE:
2009 	case BGE_DIAG:
2010 	case BGE_PEEK:
2011 	case BGE_POKE:
2012 	case BGE_PHY_RESET:
2013 	case BGE_SOFT_RESET:
2014 	case BGE_HARD_RESET:
2015 		status = bge_chip_ioctl(bgep, wq, mp, iocp);
2016 		break;
2017 
2018 #ifdef NOT_SUPPORTED_XXX
2019 	case LB_GET_INFO_SIZE:
2020 	case LB_GET_INFO:
2021 	case LB_GET_MODE:
2022 	case LB_SET_MODE:
2023 		status = bge_loop_ioctl(bgep, wq, mp, iocp);
2024 		break;
2025 #endif
2026 
2027 	}
2028 
2029 	/*
2030 	 * Do we need to reprogram the PHY and/or the MAC?
2031 	 * Do it now, while we still have the mutex.
2032 	 *
2033 	 * Note: update the PHY first, 'cos it controls the
2034 	 * speed/duplex parameters that the MAC code uses.
2035 	 */
2036 	switch (status) {
2037 	case IOC_RESTART_REPLY:
2038 	case IOC_RESTART_ACK:
2039 		if (bge_reprogram(bgep) == IOC_INVAL)
2040 			status = IOC_INVAL;
2041 		break;
2042 	}
2043 
2044 	if (bge_check_acc_handle(bgep, bgep->cfg_handle) != DDI_FM_OK) {
2045 		ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_DEGRADED);
2046 		status = IOC_INVAL;
2047 	}
2048 	if (bge_check_acc_handle(bgep, bgep->io_handle) != DDI_FM_OK) {
2049 		ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_DEGRADED);
2050 		status = IOC_INVAL;
2051 	}
2052 	mutex_exit(bgep->genlock);
2053 
2054 	/*
2055 	 * Finally, decide how to reply
2056 	 */
2057 	switch (status) {
2058 	default:
2059 	case IOC_INVAL:
2060 		/*
2061 		 * Error, reply with a NAK and EINVAL or the specified error
2062 		 */
2063 		miocnak(wq, mp, 0, iocp->ioc_error == 0 ?
2064 		    EINVAL : iocp->ioc_error);
2065 		break;
2066 
2067 	case IOC_DONE:
2068 		/*
2069 		 * OK, reply already sent
2070 		 */
2071 		break;
2072 
2073 	case IOC_RESTART_ACK:
2074 	case IOC_ACK:
2075 		/*
2076 		 * OK, reply with an ACK
2077 		 */
2078 		miocack(wq, mp, 0, 0);
2079 		break;
2080 
2081 	case IOC_RESTART_REPLY:
2082 	case IOC_REPLY:
2083 		/*
2084 		 * OK, send prepared reply as ACK or NAK
2085 		 */
2086 		mp->b_datap->db_type = iocp->ioc_error == 0 ?
2087 		    M_IOCACK : M_IOCNAK;
2088 		qreply(wq, mp);
2089 		break;
2090 	}
2091 }
2092 
2093 /*
2094  * ========== Per-instance setup/teardown code ==========
2095  */
2096 
2097 #undef	BGE_DBG
2098 #define	BGE_DBG		BGE_DBG_MEM	/* debug flag for this code	*/
2099 /*
2100  * Allocate an area of memory and a DMA handle for accessing it
2101  */
2102 static int
2103 bge_alloc_dma_mem(bge_t *bgep, size_t memsize, ddi_device_acc_attr_t *attr_p,
2104 	uint_t dma_flags, dma_area_t *dma_p)
2105 {
2106 	caddr_t va;
2107 	int err;
2108 
2109 	BGE_TRACE(("bge_alloc_dma_mem($%p, %ld, $%p, 0x%x, $%p)",
2110 	    (void *)bgep, memsize, attr_p, dma_flags, dma_p));
2111 
2112 	/*
2113 	 * Allocate handle
2114 	 */
2115 	err = ddi_dma_alloc_handle(bgep->devinfo, &dma_attr,
2116 	    DDI_DMA_DONTWAIT, NULL, &dma_p->dma_hdl);
2117 	if (err != DDI_SUCCESS)
2118 		return (DDI_FAILURE);
2119 
2120 	/*
2121 	 * Allocate memory
2122 	 */
2123 	err = ddi_dma_mem_alloc(dma_p->dma_hdl, memsize, attr_p,
2124 	    dma_flags, DDI_DMA_DONTWAIT, NULL, &va, &dma_p->alength,
2125 	    &dma_p->acc_hdl);
2126 	if (err != DDI_SUCCESS)
2127 		return (DDI_FAILURE);
2128 
2129 	/*
2130 	 * Bind the two together
2131 	 */
2132 	dma_p->mem_va = va;
2133 	err = ddi_dma_addr_bind_handle(dma_p->dma_hdl, NULL,
2134 	    va, dma_p->alength, dma_flags, DDI_DMA_DONTWAIT, NULL,
2135 	    &dma_p->cookie, &dma_p->ncookies);
2136 
2137 	BGE_DEBUG(("bge_alloc_dma_mem(): bind %d bytes; err %d, %d cookies",
2138 	    dma_p->alength, err, dma_p->ncookies));
2139 
2140 	if (err != DDI_DMA_MAPPED || dma_p->ncookies != 1)
2141 		return (DDI_FAILURE);
2142 
2143 	dma_p->nslots = ~0U;
2144 	dma_p->size = ~0U;
2145 	dma_p->token = ~0U;
2146 	dma_p->offset = 0;
2147 	return (DDI_SUCCESS);
2148 }
2149 
2150 /*
2151  * Free one allocated area of DMAable memory
2152  */
2153 static void
2154 bge_free_dma_mem(dma_area_t *dma_p)
2155 {
2156 	if (dma_p->dma_hdl != NULL) {
2157 		if (dma_p->ncookies) {
2158 			(void) ddi_dma_unbind_handle(dma_p->dma_hdl);
2159 			dma_p->ncookies = 0;
2160 		}
2161 		ddi_dma_free_handle(&dma_p->dma_hdl);
2162 		dma_p->dma_hdl = NULL;
2163 	}
2164 
2165 	if (dma_p->acc_hdl != NULL) {
2166 		ddi_dma_mem_free(&dma_p->acc_hdl);
2167 		dma_p->acc_hdl = NULL;
2168 	}
2169 }
2170 /*
2171  * Utility routine to carve a slice off a chunk of allocated memory,
2172  * updating the chunk descriptor accordingly.  The size of the slice
2173  * is given by the product of the <qty> and <size> parameters.
2174  */
2175 static void
2176 bge_slice_chunk(dma_area_t *slice, dma_area_t *chunk,
2177 	uint32_t qty, uint32_t size)
2178 {
2179 	static uint32_t sequence = 0xbcd5704a;
2180 	size_t totsize;
2181 
2182 	totsize = qty*size;
2183 	ASSERT(totsize <= chunk->alength);
2184 
2185 	*slice = *chunk;
2186 	slice->nslots = qty;
2187 	slice->size = size;
2188 	slice->alength = totsize;
2189 	slice->token = ++sequence;
2190 
2191 	chunk->mem_va = (caddr_t)chunk->mem_va + totsize;
2192 	chunk->alength -= totsize;
2193 	chunk->offset += totsize;
2194 	chunk->cookie.dmac_laddress += totsize;
2195 	chunk->cookie.dmac_size -= totsize;
2196 }
2197 
2198 /*
2199  * Initialise the specified Receive Producer (Buffer) Ring, using
2200  * the information in the <dma_area> descriptors that it contains
2201  * to set up all the other fields. This routine should be called
2202  * only once for each ring.
2203  */
2204 static void
2205 bge_init_buff_ring(bge_t *bgep, uint64_t ring)
2206 {
2207 	buff_ring_t *brp;
2208 	bge_status_t *bsp;
2209 	sw_rbd_t *srbdp;
2210 	dma_area_t pbuf;
2211 	uint32_t bufsize;
2212 	uint32_t nslots;
2213 	uint32_t slot;
2214 	uint32_t split;
2215 
2216 	static bge_regno_t nic_ring_addrs[BGE_BUFF_RINGS_MAX] = {
2217 		NIC_MEM_SHADOW_BUFF_STD,
2218 		NIC_MEM_SHADOW_BUFF_JUMBO,
2219 		NIC_MEM_SHADOW_BUFF_MINI
2220 	};
2221 	static bge_regno_t mailbox_regs[BGE_BUFF_RINGS_MAX] = {
2222 		RECV_STD_PROD_INDEX_REG,
2223 		RECV_JUMBO_PROD_INDEX_REG,
2224 		RECV_MINI_PROD_INDEX_REG
2225 	};
2226 	static bge_regno_t buff_cons_xref[BGE_BUFF_RINGS_MAX] = {
2227 		STATUS_STD_BUFF_CONS_INDEX,
2228 		STATUS_JUMBO_BUFF_CONS_INDEX,
2229 		STATUS_MINI_BUFF_CONS_INDEX
2230 	};
2231 
2232 	BGE_TRACE(("bge_init_buff_ring($%p, %d)",
2233 	    (void *)bgep, ring));
2234 
2235 	brp = &bgep->buff[ring];
2236 	nslots = brp->desc.nslots;
2237 	ASSERT(brp->buf[0].nslots == nslots/BGE_SPLIT);
2238 	bufsize = brp->buf[0].size;
2239 
2240 	/*
2241 	 * Set up the copy of the h/w RCB
2242 	 *
2243 	 * Note: unlike Send & Receive Return Rings, (where the max_len
2244 	 * field holds the number of slots), in a Receive Buffer Ring
2245 	 * this field indicates the size of each buffer in the ring.
2246 	 */
2247 	brp->hw_rcb.host_ring_addr = brp->desc.cookie.dmac_laddress;
2248 	brp->hw_rcb.max_len = (uint16_t)bufsize;
2249 	brp->hw_rcb.flags = nslots > 0 ? 0 : RCB_FLAG_RING_DISABLED;
2250 	brp->hw_rcb.nic_ring_addr = nic_ring_addrs[ring];
2251 
2252 	/*
2253 	 * Other one-off initialisation of per-ring data
2254 	 */
2255 	brp->bgep = bgep;
2256 	bsp = DMA_VPTR(bgep->status_block);
2257 	brp->cons_index_p = &bsp->buff_cons_index[buff_cons_xref[ring]];
2258 	brp->chip_mbx_reg = mailbox_regs[ring];
2259 	mutex_init(brp->rf_lock, NULL, MUTEX_DRIVER,
2260 	    DDI_INTR_PRI(bgep->intr_pri));
2261 
2262 	/*
2263 	 * Allocate the array of s/w Receive Buffer Descriptors
2264 	 */
2265 	srbdp = kmem_zalloc(nslots*sizeof (*srbdp), KM_SLEEP);
2266 	brp->sw_rbds = srbdp;
2267 
2268 	/*
2269 	 * Now initialise each array element once and for all
2270 	 */
2271 	for (split = 0; split < BGE_SPLIT; ++split) {
2272 		pbuf = brp->buf[split];
2273 		for (slot = 0; slot < nslots/BGE_SPLIT; ++srbdp, ++slot)
2274 			bge_slice_chunk(&srbdp->pbuf, &pbuf, 1, bufsize);
2275 		ASSERT(pbuf.alength == 0);
2276 	}
2277 }
2278 
2279 /*
2280  * Clean up initialisation done above before the memory is freed
2281  */
2282 static void
2283 bge_fini_buff_ring(bge_t *bgep, uint64_t ring)
2284 {
2285 	buff_ring_t *brp;
2286 	sw_rbd_t *srbdp;
2287 
2288 	BGE_TRACE(("bge_fini_buff_ring($%p, %d)",
2289 	    (void *)bgep, ring));
2290 
2291 	brp = &bgep->buff[ring];
2292 	srbdp = brp->sw_rbds;
2293 	kmem_free(srbdp, brp->desc.nslots*sizeof (*srbdp));
2294 
2295 	mutex_destroy(brp->rf_lock);
2296 }
2297 
2298 /*
2299  * Initialise the specified Receive (Return) Ring, using the
2300  * information in the <dma_area> descriptors that it contains
2301  * to set up all the other fields. This routine should be called
2302  * only once for each ring.
2303  */
2304 static void
2305 bge_init_recv_ring(bge_t *bgep, uint64_t ring)
2306 {
2307 	recv_ring_t *rrp;
2308 	bge_status_t *bsp;
2309 	uint32_t nslots;
2310 
2311 	BGE_TRACE(("bge_init_recv_ring($%p, %d)",
2312 	    (void *)bgep, ring));
2313 
2314 	/*
2315 	 * The chip architecture requires that receive return rings have
2316 	 * 512 or 1024 or 2048 elements per ring.  See 570X-PG108-R page 103.
2317 	 */
2318 	rrp = &bgep->recv[ring];
2319 	nslots = rrp->desc.nslots;
2320 	ASSERT(nslots == 0 || nslots == 512 ||
2321 	    nslots == 1024 || nslots == 2048);
2322 
2323 	/*
2324 	 * Set up the copy of the h/w RCB
2325 	 */
2326 	rrp->hw_rcb.host_ring_addr = rrp->desc.cookie.dmac_laddress;
2327 	rrp->hw_rcb.max_len = (uint16_t)nslots;
2328 	rrp->hw_rcb.flags = nslots > 0 ? 0 : RCB_FLAG_RING_DISABLED;
2329 	rrp->hw_rcb.nic_ring_addr = 0;
2330 
2331 	/*
2332 	 * Other one-off initialisation of per-ring data
2333 	 */
2334 	rrp->bgep = bgep;
2335 	bsp = DMA_VPTR(bgep->status_block);
2336 	rrp->prod_index_p = RECV_INDEX_P(bsp, ring);
2337 	rrp->chip_mbx_reg = RECV_RING_CONS_INDEX_REG(ring);
2338 	mutex_init(rrp->rx_lock, NULL, MUTEX_DRIVER,
2339 	    DDI_INTR_PRI(bgep->intr_pri));
2340 }
2341 
2342 
2343 /*
2344  * Clean up initialisation done above before the memory is freed
2345  */
2346 static void
2347 bge_fini_recv_ring(bge_t *bgep, uint64_t ring)
2348 {
2349 	recv_ring_t *rrp;
2350 
2351 	BGE_TRACE(("bge_fini_recv_ring($%p, %d)",
2352 	    (void *)bgep, ring));
2353 
2354 	rrp = &bgep->recv[ring];
2355 	if (rrp->rx_softint)
2356 		ddi_remove_softintr(rrp->rx_softint);
2357 	mutex_destroy(rrp->rx_lock);
2358 }
2359 
2360 /*
2361  * Initialise the specified Send Ring, using the information in the
2362  * <dma_area> descriptors that it contains to set up all the other
2363  * fields. This routine should be called only once for each ring.
2364  */
2365 static void
2366 bge_init_send_ring(bge_t *bgep, uint64_t ring)
2367 {
2368 	send_ring_t *srp;
2369 	bge_status_t *bsp;
2370 	sw_sbd_t *ssbdp;
2371 	dma_area_t desc;
2372 	dma_area_t pbuf;
2373 	uint32_t nslots;
2374 	uint32_t slot;
2375 	uint32_t split;
2376 	sw_txbuf_t *txbuf;
2377 
2378 	BGE_TRACE(("bge_init_send_ring($%p, %d)",
2379 	    (void *)bgep, ring));
2380 
2381 	/*
2382 	 * The chip architecture requires that host-based send rings
2383 	 * have 512 elements per ring.  See 570X-PG102-R page 56.
2384 	 */
2385 	srp = &bgep->send[ring];
2386 	nslots = srp->desc.nslots;
2387 	ASSERT(nslots == 0 || nslots == 512);
2388 
2389 	/*
2390 	 * Set up the copy of the h/w RCB
2391 	 */
2392 	srp->hw_rcb.host_ring_addr = srp->desc.cookie.dmac_laddress;
2393 	srp->hw_rcb.max_len = (uint16_t)nslots;
2394 	srp->hw_rcb.flags = nslots > 0 ? 0 : RCB_FLAG_RING_DISABLED;
2395 	srp->hw_rcb.nic_ring_addr = NIC_MEM_SHADOW_SEND_RING(ring, nslots);
2396 
2397 	/*
2398 	 * Other one-off initialisation of per-ring data
2399 	 */
2400 	srp->bgep = bgep;
2401 	bsp = DMA_VPTR(bgep->status_block);
2402 	srp->cons_index_p = SEND_INDEX_P(bsp, ring);
2403 	srp->chip_mbx_reg = SEND_RING_HOST_INDEX_REG(ring);
2404 	mutex_init(srp->tx_lock, NULL, MUTEX_DRIVER,
2405 	    DDI_INTR_PRI(bgep->intr_pri));
2406 	mutex_init(srp->txbuf_lock, NULL, MUTEX_DRIVER,
2407 	    DDI_INTR_PRI(bgep->intr_pri));
2408 	mutex_init(srp->freetxbuf_lock, NULL, MUTEX_DRIVER,
2409 	    DDI_INTR_PRI(bgep->intr_pri));
2410 	mutex_init(srp->tc_lock, NULL, MUTEX_DRIVER,
2411 	    DDI_INTR_PRI(bgep->intr_pri));
2412 	if (nslots == 0)
2413 		return;
2414 
2415 	/*
2416 	 * Allocate the array of s/w Send Buffer Descriptors
2417 	 */
2418 	ssbdp = kmem_zalloc(nslots*sizeof (*ssbdp), KM_SLEEP);
2419 	txbuf = kmem_zalloc(BGE_SEND_BUF_MAX*sizeof (*txbuf), KM_SLEEP);
2420 	srp->txbuf_head =
2421 	    kmem_zalloc(BGE_SEND_BUF_MAX*sizeof (bge_queue_item_t), KM_SLEEP);
2422 	srp->pktp = kmem_zalloc(BGE_SEND_BUF_MAX*sizeof (send_pkt_t), KM_SLEEP);
2423 	srp->sw_sbds = ssbdp;
2424 	srp->txbuf = txbuf;
2425 	srp->tx_buffers = BGE_SEND_BUF_NUM;
2426 	srp->tx_buffers_low = srp->tx_buffers / 4;
2427 	if (bgep->chipid.snd_buff_size > BGE_SEND_BUFF_SIZE_DEFAULT)
2428 		srp->tx_array_max = BGE_SEND_BUF_ARRAY_JUMBO;
2429 	else
2430 		srp->tx_array_max = BGE_SEND_BUF_ARRAY;
2431 	srp->tx_array = 1;
2432 
2433 	/*
2434 	 * Chunk tx desc area
2435 	 */
2436 	desc = srp->desc;
2437 	for (slot = 0; slot < nslots; ++ssbdp, ++slot) {
2438 		bge_slice_chunk(&ssbdp->desc, &desc, 1,
2439 		    sizeof (bge_sbd_t));
2440 	}
2441 	ASSERT(desc.alength == 0);
2442 
2443 	/*
2444 	 * Chunk tx buffer area
2445 	 */
2446 	for (split = 0; split < BGE_SPLIT; ++split) {
2447 		pbuf = srp->buf[0][split];
2448 		for (slot = 0; slot < BGE_SEND_BUF_NUM/BGE_SPLIT; ++slot) {
2449 			bge_slice_chunk(&txbuf->buf, &pbuf, 1,
2450 			    bgep->chipid.snd_buff_size);
2451 			txbuf++;
2452 		}
2453 		ASSERT(pbuf.alength == 0);
2454 	}
2455 }
2456 
2457 /*
2458  * Clean up initialisation done above before the memory is freed
2459  */
2460 static void
2461 bge_fini_send_ring(bge_t *bgep, uint64_t ring)
2462 {
2463 	send_ring_t *srp;
2464 	uint32_t array;
2465 	uint32_t split;
2466 	uint32_t nslots;
2467 
2468 	BGE_TRACE(("bge_fini_send_ring($%p, %d)",
2469 	    (void *)bgep, ring));
2470 
2471 	srp = &bgep->send[ring];
2472 	mutex_destroy(srp->tc_lock);
2473 	mutex_destroy(srp->freetxbuf_lock);
2474 	mutex_destroy(srp->txbuf_lock);
2475 	mutex_destroy(srp->tx_lock);
2476 	nslots = srp->desc.nslots;
2477 	if (nslots == 0)
2478 		return;
2479 
2480 	for (array = 1; array < srp->tx_array; ++array)
2481 		for (split = 0; split < BGE_SPLIT; ++split)
2482 			bge_free_dma_mem(&srp->buf[array][split]);
2483 	kmem_free(srp->sw_sbds, nslots*sizeof (*srp->sw_sbds));
2484 	kmem_free(srp->txbuf_head, BGE_SEND_BUF_MAX*sizeof (*srp->txbuf_head));
2485 	kmem_free(srp->txbuf, BGE_SEND_BUF_MAX*sizeof (*srp->txbuf));
2486 	kmem_free(srp->pktp, BGE_SEND_BUF_MAX*sizeof (*srp->pktp));
2487 	srp->sw_sbds = NULL;
2488 	srp->txbuf_head = NULL;
2489 	srp->txbuf = NULL;
2490 	srp->pktp = NULL;
2491 }
2492 
2493 /*
2494  * Initialise all transmit, receive, and buffer rings.
2495  */
2496 void
2497 bge_init_rings(bge_t *bgep)
2498 {
2499 	uint32_t ring;
2500 
2501 	BGE_TRACE(("bge_init_rings($%p)", (void *)bgep));
2502 
2503 	/*
2504 	 * Perform one-off initialisation of each ring ...
2505 	 */
2506 	for (ring = 0; ring < BGE_SEND_RINGS_MAX; ++ring)
2507 		bge_init_send_ring(bgep, ring);
2508 	for (ring = 0; ring < BGE_RECV_RINGS_MAX; ++ring)
2509 		bge_init_recv_ring(bgep, ring);
2510 	for (ring = 0; ring < BGE_BUFF_RINGS_MAX; ++ring)
2511 		bge_init_buff_ring(bgep, ring);
2512 }
2513 
2514 /*
2515  * Undo the work of bge_init_rings() above before the memory is freed
2516  */
2517 void
2518 bge_fini_rings(bge_t *bgep)
2519 {
2520 	uint32_t ring;
2521 
2522 	BGE_TRACE(("bge_fini_rings($%p)", (void *)bgep));
2523 
2524 	for (ring = 0; ring < BGE_BUFF_RINGS_MAX; ++ring)
2525 		bge_fini_buff_ring(bgep, ring);
2526 	for (ring = 0; ring < BGE_RECV_RINGS_MAX; ++ring)
2527 		bge_fini_recv_ring(bgep, ring);
2528 	for (ring = 0; ring < BGE_SEND_RINGS_MAX; ++ring)
2529 		bge_fini_send_ring(bgep, ring);
2530 }
2531 
2532 /*
2533  * Called from the bge_m_stop() to free the tx buffers which are
2534  * allocated from the tx process.
2535  */
2536 void
2537 bge_free_txbuf_arrays(send_ring_t *srp)
2538 {
2539 	uint32_t array;
2540 	uint32_t split;
2541 
2542 	ASSERT(mutex_owned(srp->tx_lock));
2543 
2544 	/*
2545 	 * Free the extra tx buffer DMA area
2546 	 */
2547 	for (array = 1; array < srp->tx_array; ++array)
2548 		for (split = 0; split < BGE_SPLIT; ++split)
2549 			bge_free_dma_mem(&srp->buf[array][split]);
2550 
2551 	/*
2552 	 * Restore initial tx buffer numbers
2553 	 */
2554 	srp->tx_array = 1;
2555 	srp->tx_buffers = BGE_SEND_BUF_NUM;
2556 	srp->tx_buffers_low = srp->tx_buffers / 4;
2557 	srp->tx_flow = 0;
2558 	bzero(srp->pktp, BGE_SEND_BUF_MAX * sizeof (*srp->pktp));
2559 }
2560 
2561 /*
2562  * Called from tx process to allocate more tx buffers
2563  */
2564 bge_queue_item_t *
2565 bge_alloc_txbuf_array(bge_t *bgep, send_ring_t *srp)
2566 {
2567 	bge_queue_t *txbuf_queue;
2568 	bge_queue_item_t *txbuf_item_last;
2569 	bge_queue_item_t *txbuf_item;
2570 	bge_queue_item_t *txbuf_item_rtn;
2571 	sw_txbuf_t *txbuf;
2572 	dma_area_t area;
2573 	size_t txbuffsize;
2574 	uint32_t slot;
2575 	uint32_t array;
2576 	uint32_t split;
2577 	uint32_t err;
2578 
2579 	ASSERT(mutex_owned(srp->tx_lock));
2580 
2581 	array = srp->tx_array;
2582 	if (array >= srp->tx_array_max)
2583 		return (NULL);
2584 
2585 	/*
2586 	 * Allocate memory & handles for TX buffers
2587 	 */
2588 	txbuffsize = BGE_SEND_BUF_NUM*bgep->chipid.snd_buff_size;
2589 	ASSERT((txbuffsize % BGE_SPLIT) == 0);
2590 	for (split = 0; split < BGE_SPLIT; ++split) {
2591 		err = bge_alloc_dma_mem(bgep, txbuffsize/BGE_SPLIT,
2592 		    &bge_data_accattr, DDI_DMA_WRITE | BGE_DMA_MODE,
2593 		    &srp->buf[array][split]);
2594 		if (err != DDI_SUCCESS) {
2595 			/* Free the last already allocated OK chunks */
2596 			for (slot = 0; slot <= split; ++slot)
2597 				bge_free_dma_mem(&srp->buf[array][slot]);
2598 			srp->tx_alloc_fail++;
2599 			return (NULL);
2600 		}
2601 	}
2602 
2603 	/*
2604 	 * Chunk tx buffer area
2605 	 */
2606 	txbuf = srp->txbuf + array*BGE_SEND_BUF_NUM;
2607 	for (split = 0; split < BGE_SPLIT; ++split) {
2608 		area = srp->buf[array][split];
2609 		for (slot = 0; slot < BGE_SEND_BUF_NUM/BGE_SPLIT; ++slot) {
2610 			bge_slice_chunk(&txbuf->buf, &area, 1,
2611 			    bgep->chipid.snd_buff_size);
2612 			txbuf++;
2613 		}
2614 	}
2615 
2616 	/*
2617 	 * Add above buffers to the tx buffer pop queue
2618 	 */
2619 	txbuf_item = srp->txbuf_head + array*BGE_SEND_BUF_NUM;
2620 	txbuf = srp->txbuf + array*BGE_SEND_BUF_NUM;
2621 	txbuf_item_last = NULL;
2622 	for (slot = 0; slot < BGE_SEND_BUF_NUM; ++slot) {
2623 		txbuf_item->item = txbuf;
2624 		txbuf_item->next = txbuf_item_last;
2625 		txbuf_item_last = txbuf_item;
2626 		txbuf++;
2627 		txbuf_item++;
2628 	}
2629 	txbuf_item = srp->txbuf_head + array*BGE_SEND_BUF_NUM;
2630 	txbuf_item_rtn = txbuf_item;
2631 	txbuf_item++;
2632 	txbuf_queue = srp->txbuf_pop_queue;
2633 	mutex_enter(txbuf_queue->lock);
2634 	txbuf_item->next = txbuf_queue->head;
2635 	txbuf_queue->head = txbuf_item_last;
2636 	txbuf_queue->count += BGE_SEND_BUF_NUM - 1;
2637 	mutex_exit(txbuf_queue->lock);
2638 
2639 	srp->tx_array++;
2640 	srp->tx_buffers += BGE_SEND_BUF_NUM;
2641 	srp->tx_buffers_low = srp->tx_buffers / 4;
2642 
2643 	return (txbuf_item_rtn);
2644 }
2645 
2646 /*
2647  * This function allocates all the transmit and receive buffers
2648  * and descriptors, in four chunks.
2649  */
2650 int
2651 bge_alloc_bufs(bge_t *bgep)
2652 {
2653 	dma_area_t area;
2654 	size_t rxbuffsize;
2655 	size_t txbuffsize;
2656 	size_t rxbuffdescsize;
2657 	size_t rxdescsize;
2658 	size_t txdescsize;
2659 	uint32_t ring;
2660 	uint32_t rx_rings = bgep->chipid.rx_rings;
2661 	uint32_t tx_rings = bgep->chipid.tx_rings;
2662 	int split;
2663 	int err;
2664 
2665 	BGE_TRACE(("bge_alloc_bufs($%p)",
2666 	    (void *)bgep));
2667 
2668 	rxbuffsize = BGE_STD_SLOTS_USED*bgep->chipid.std_buf_size;
2669 	rxbuffsize += bgep->chipid.jumbo_slots*bgep->chipid.recv_jumbo_size;
2670 	rxbuffsize += BGE_MINI_SLOTS_USED*BGE_MINI_BUFF_SIZE;
2671 
2672 	txbuffsize = BGE_SEND_BUF_NUM*bgep->chipid.snd_buff_size;
2673 	txbuffsize *= tx_rings;
2674 
2675 	rxdescsize = rx_rings*bgep->chipid.recv_slots;
2676 	rxdescsize *= sizeof (bge_rbd_t);
2677 
2678 	rxbuffdescsize = BGE_STD_SLOTS_USED;
2679 	rxbuffdescsize += bgep->chipid.jumbo_slots;
2680 	rxbuffdescsize += BGE_MINI_SLOTS_USED;
2681 	rxbuffdescsize *= sizeof (bge_rbd_t);
2682 
2683 	txdescsize = tx_rings*BGE_SEND_SLOTS_USED;
2684 	txdescsize *= sizeof (bge_sbd_t);
2685 	txdescsize += sizeof (bge_statistics_t);
2686 	txdescsize += sizeof (bge_status_t);
2687 	txdescsize += BGE_STATUS_PADDING;
2688 
2689 	/*
2690 	 * Enable PCI relaxed ordering only for RX/TX data buffers
2691 	 */
2692 	if (!(DEVICE_5717_SERIES_CHIPSETS(bgep) ||
2693 	    DEVICE_5725_SERIES_CHIPSETS(bgep))) {
2694 		if (bge_relaxed_ordering)
2695 			dma_attr.dma_attr_flags |= DDI_DMA_RELAXED_ORDERING;
2696 	}
2697 
2698 	/*
2699 	 * Allocate memory & handles for RX buffers
2700 	 */
2701 	ASSERT((rxbuffsize % BGE_SPLIT) == 0);
2702 	for (split = 0; split < BGE_SPLIT; ++split) {
2703 		err = bge_alloc_dma_mem(bgep, rxbuffsize/BGE_SPLIT,
2704 		    &bge_data_accattr, DDI_DMA_READ | BGE_DMA_MODE,
2705 		    &bgep->rx_buff[split]);
2706 		if (err != DDI_SUCCESS)
2707 			return (DDI_FAILURE);
2708 	}
2709 	BGE_DEBUG(("DMA ALLOC: allocated %d chunks for Rx Buffers (rxbuffsize = %d)",
2710 	           rxbuffsize/BGE_SPLIT,
2711 	           rxbuffsize));
2712 
2713 	/*
2714 	 * Allocate memory & handles for TX buffers
2715 	 */
2716 	ASSERT((txbuffsize % BGE_SPLIT) == 0);
2717 	for (split = 0; split < BGE_SPLIT; ++split) {
2718 		err = bge_alloc_dma_mem(bgep, txbuffsize/BGE_SPLIT,
2719 		    &bge_data_accattr, DDI_DMA_WRITE | BGE_DMA_MODE,
2720 		    &bgep->tx_buff[split]);
2721 		if (err != DDI_SUCCESS)
2722 			return (DDI_FAILURE);
2723 	}
2724 	BGE_DEBUG(("DMA ALLOC: allocated %d chunks for Tx Buffers (txbuffsize = %d)",
2725 	           txbuffsize/BGE_SPLIT,
2726 	           txbuffsize));
2727 
2728 	if (!(DEVICE_5717_SERIES_CHIPSETS(bgep) ||
2729 	    DEVICE_5725_SERIES_CHIPSETS(bgep))) {
2730 		/* no relaxed ordering for descriptors rings? */
2731 		dma_attr.dma_attr_flags &= ~DDI_DMA_RELAXED_ORDERING;
2732 	}
2733 
2734 	/*
2735 	 * Allocate memory & handles for receive return rings
2736 	 */
2737 	ASSERT((rxdescsize % rx_rings) == 0);
2738 	for (split = 0; split < rx_rings; ++split) {
2739 		err = bge_alloc_dma_mem(bgep, rxdescsize/rx_rings,
2740 		    &bge_desc_accattr, DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
2741 		    &bgep->rx_desc[split]);
2742 		if (err != DDI_SUCCESS)
2743 			return (DDI_FAILURE);
2744 	}
2745 	BGE_DEBUG(("DMA ALLOC: allocated %d chunks for Rx Descs cons (rx_rings = %d, rxdescsize = %d)",
2746 	           rxdescsize/rx_rings,
2747 	           rx_rings,
2748 	           rxdescsize));
2749 
2750 	/*
2751 	 * Allocate memory & handles for buffer (producer) descriptor rings.
2752 	 * Note that split=rx_rings.
2753 	 */
2754 	err = bge_alloc_dma_mem(bgep, rxbuffdescsize, &bge_desc_accattr,
2755 	    DDI_DMA_RDWR | DDI_DMA_CONSISTENT, &bgep->rx_desc[split]);
2756 	if (err != DDI_SUCCESS)
2757 		return (DDI_FAILURE);
2758 	BGE_DEBUG(("DMA ALLOC: allocated 1 chunks for Rx Descs prod (rxbuffdescsize = %d)",
2759 	           rxdescsize));
2760 
2761 	/*
2762 	 * Allocate memory & handles for TX descriptor rings,
2763 	 * status block, and statistics area
2764 	 */
2765 	err = bge_alloc_dma_mem(bgep, txdescsize, &bge_desc_accattr,
2766 	    DDI_DMA_RDWR | DDI_DMA_CONSISTENT, &bgep->tx_desc);
2767 	if (err != DDI_SUCCESS)
2768 		return (DDI_FAILURE);
2769 	BGE_DEBUG(("DMA ALLOC: allocated 1 chunks for Tx Descs / Status Block / Stats (txdescdize = %d)",
2770                txdescsize));
2771 
2772 	/*
2773 	 * Now carve up each of the allocated areas ...
2774 	 */
2775 
2776 	/* rx buffers */
2777 	for (split = 0; split < BGE_SPLIT; ++split) {
2778 		area = bgep->rx_buff[split];
2779 
2780 		BGE_DEBUG(("RXB CHNK %d INIT: va=%p alen=%d off=%d pa=%llx psz=%d",
2781 		           split,
2782 		           area.mem_va,
2783 		           area.alength,
2784 		           area.offset,
2785 		           area.cookie.dmac_laddress,
2786 		           area.cookie.dmac_size));
2787 
2788 		bge_slice_chunk(&bgep->buff[BGE_STD_BUFF_RING].buf[split],
2789 		    &area, BGE_STD_SLOTS_USED/BGE_SPLIT,
2790 		    bgep->chipid.std_buf_size);
2791 
2792 		BGE_DEBUG(("RXB SLCE %d STND: va=%p alen=%d off=%d pa=%llx psz=%d (nslots=%d slotlen=%d)",
2793 		           split,
2794 		           bgep->buff[BGE_STD_BUFF_RING].buf[split].mem_va,
2795 		           bgep->buff[BGE_STD_BUFF_RING].buf[split].alength,
2796 		           bgep->buff[BGE_STD_BUFF_RING].buf[split].offset,
2797 		           bgep->buff[BGE_STD_BUFF_RING].buf[split].cookie.dmac_laddress,
2798 		           bgep->buff[BGE_STD_BUFF_RING].buf[split].cookie.dmac_size,
2799 		           BGE_STD_SLOTS_USED/BGE_SPLIT,
2800 		           bgep->chipid.std_buf_size));
2801 
2802 		bge_slice_chunk(&bgep->buff[BGE_JUMBO_BUFF_RING].buf[split],
2803 		    &area, bgep->chipid.jumbo_slots/BGE_SPLIT,
2804 		    bgep->chipid.recv_jumbo_size);
2805 
2806 		if ((bgep->chipid.jumbo_slots / BGE_SPLIT) > 0)
2807 		{
2808 			BGE_DEBUG(("RXB SLCE %d JUMB: va=%p alen=%d off=%d pa=%llx psz=%d (nslots=%d slotlen=%d)",
2809 			           split,
2810 			           bgep->buff[BGE_JUMBO_BUFF_RING].buf[split].mem_va,
2811 			           bgep->buff[BGE_JUMBO_BUFF_RING].buf[split].alength,
2812 			           bgep->buff[BGE_JUMBO_BUFF_RING].buf[split].offset,
2813 			           bgep->buff[BGE_JUMBO_BUFF_RING].buf[split].cookie.dmac_laddress,
2814 			           bgep->buff[BGE_JUMBO_BUFF_RING].buf[split].cookie.dmac_size,
2815 			           bgep->chipid.jumbo_slots/BGE_SPLIT,
2816 			           bgep->chipid.recv_jumbo_size));
2817 		}
2818 
2819 		bge_slice_chunk(&bgep->buff[BGE_MINI_BUFF_RING].buf[split],
2820 		    &area, BGE_MINI_SLOTS_USED/BGE_SPLIT,
2821 		    BGE_MINI_BUFF_SIZE);
2822 
2823 		if ((BGE_MINI_SLOTS_USED / BGE_SPLIT) > 0)
2824 		{
2825 			BGE_DEBUG(("RXB SLCE %d MINI: va=%p alen=%d off=%d pa=%llx psz=%d (nslots=%d slotlen=%d)",
2826 			           split,
2827 			           bgep->buff[BGE_MINI_BUFF_RING].buf[split].mem_va,
2828 			           bgep->buff[BGE_MINI_BUFF_RING].buf[split].alength,
2829 			           bgep->buff[BGE_MINI_BUFF_RING].buf[split].offset,
2830 			           bgep->buff[BGE_MINI_BUFF_RING].buf[split].cookie.dmac_laddress,
2831 			           bgep->buff[BGE_MINI_BUFF_RING].buf[split].cookie.dmac_size,
2832 			           BGE_MINI_SLOTS_USED/BGE_SPLIT,
2833 			           BGE_MINI_BUFF_SIZE));
2834 		}
2835 
2836 		BGE_DEBUG(("RXB CHNK %d DONE: va=%p alen=%d off=%d pa=%llx psz=%d",
2837 		           split,
2838 		           area.mem_va,
2839 		           area.alength,
2840 		           area.offset,
2841 		           area.cookie.dmac_laddress,
2842 		           area.cookie.dmac_size));
2843 	}
2844 
2845 	/* tx buffers */
2846 	for (split = 0; split < BGE_SPLIT; ++split) {
2847 		area = bgep->tx_buff[split];
2848 
2849 		BGE_DEBUG(("TXB CHNK %d INIT: va=%p alen=%d off=%d pa=%llx psz=%d",
2850 		           split,
2851 		           area.mem_va,
2852 		           area.alength,
2853 		           area.offset,
2854 		           area.cookie.dmac_laddress,
2855 		           area.cookie.dmac_size));
2856 
2857 		for (ring = 0; ring < tx_rings; ++ring) {
2858 			bge_slice_chunk(&bgep->send[ring].buf[0][split],
2859 			    &area, BGE_SEND_BUF_NUM/BGE_SPLIT,
2860 			    bgep->chipid.snd_buff_size);
2861 
2862 			BGE_DEBUG(("TXB SLCE %d RING %d: va=%p alen=%d off=%d pa=%llx psz=%d (nslots=%d slotlen=%d)",
2863 			           split, ring,
2864 			           bgep->send[ring].buf[0][split].mem_va,
2865 			           bgep->send[ring].buf[0][split].alength,
2866 			           bgep->send[ring].buf[0][split].offset,
2867 			           bgep->send[ring].buf[0][split].cookie.dmac_laddress,
2868 			           bgep->send[ring].buf[0][split].cookie.dmac_size,
2869 			           BGE_SEND_BUF_NUM/BGE_SPLIT,
2870 			           bgep->chipid.snd_buff_size));
2871 		}
2872 
2873 		for (; ring < BGE_SEND_RINGS_MAX; ++ring) {
2874 			bge_slice_chunk(&bgep->send[ring].buf[0][split],
2875 			    &area, 0, bgep->chipid.snd_buff_size);
2876 		}
2877 
2878 		BGE_DEBUG(("TXB CHNK %d DONE: va=%p alen=%d off=%d pa=%llx psz=%d",
2879 		           split,
2880 		           area.mem_va,
2881 		           area.alength,
2882 		           area.offset,
2883 		           area.cookie.dmac_laddress,
2884 		           area.cookie.dmac_size));
2885 	}
2886 
2887 	for (ring = 0; ring < rx_rings; ++ring) {
2888 		bge_slice_chunk(&bgep->recv[ring].desc, &bgep->rx_desc[ring],
2889 		    bgep->chipid.recv_slots, sizeof (bge_rbd_t));
2890 
2891 		BGE_DEBUG(("RXD CONS RING %d: va=%p alen=%d off=%d pa=%llx psz=%d (nslots=%d slotlen=%d)",
2892 		           ring,
2893 		           bgep->recv[ring].desc.mem_va,
2894 		           bgep->recv[ring].desc.alength,
2895 		           bgep->recv[ring].desc.offset,
2896 		           bgep->recv[ring].desc.cookie.dmac_laddress,
2897 		           bgep->recv[ring].desc.cookie.dmac_size,
2898 		           bgep->chipid.recv_slots,
2899 		           sizeof(bge_rbd_t)));
2900 	}
2901 
2902 	/* dma alloc for rxbuffdescsize is located at bgep->rx_desc[#rings] */
2903 	area = bgep->rx_desc[rx_rings]; /* note rx_rings = one beyond rings */
2904 
2905 	for (; ring < BGE_RECV_RINGS_MAX; ++ring) /* skip unused rings */
2906 		bge_slice_chunk(&bgep->recv[ring].desc, &area,
2907 		    0, sizeof (bge_rbd_t));
2908 
2909 	BGE_DEBUG(("RXD PROD INIT: va=%p alen=%d off=%d pa=%llx psz=%d",
2910 	           area.mem_va,
2911 	           area.alength,
2912 	           area.offset,
2913 	           area.cookie.dmac_laddress,
2914 	           area.cookie.dmac_size));
2915 
2916 	bge_slice_chunk(&bgep->buff[BGE_STD_BUFF_RING].desc, &area,
2917 	    BGE_STD_SLOTS_USED, sizeof (bge_rbd_t));
2918 	BGE_DEBUG(("RXD PROD STND: va=%p alen=%d off=%d pa=%llx psz=%d (nslots=%d slotlen=%d)",
2919 	           bgep->buff[BGE_STD_BUFF_RING].desc.mem_va,
2920 	           bgep->buff[BGE_STD_BUFF_RING].desc.alength,
2921 	           bgep->buff[BGE_STD_BUFF_RING].desc.offset,
2922 	           bgep->buff[BGE_STD_BUFF_RING].desc.cookie.dmac_laddress,
2923 	           bgep->buff[BGE_STD_BUFF_RING].desc.cookie.dmac_size,
2924 	           BGE_STD_SLOTS_USED,
2925 	           sizeof(bge_rbd_t)));
2926 
2927 	bge_slice_chunk(&bgep->buff[BGE_JUMBO_BUFF_RING].desc, &area,
2928 	    bgep->chipid.jumbo_slots, sizeof (bge_rbd_t));
2929 	BGE_DEBUG(("RXD PROD JUMB: va=%p alen=%d off=%d pa=%llx psz=%d (nslots=%d slotlen=%d)",
2930 	           bgep->buff[BGE_JUMBO_BUFF_RING].desc.mem_va,
2931 	           bgep->buff[BGE_JUMBO_BUFF_RING].desc.alength,
2932 	           bgep->buff[BGE_JUMBO_BUFF_RING].desc.offset,
2933 	           bgep->buff[BGE_JUMBO_BUFF_RING].desc.cookie.dmac_laddress,
2934 	           bgep->buff[BGE_JUMBO_BUFF_RING].desc.cookie.dmac_size,
2935 	           bgep->chipid.jumbo_slots,
2936 	           sizeof(bge_rbd_t)));
2937 
2938 	bge_slice_chunk(&bgep->buff[BGE_MINI_BUFF_RING].desc, &area,
2939 	    BGE_MINI_SLOTS_USED, sizeof (bge_rbd_t));
2940 	BGE_DEBUG(("RXD PROD MINI: va=%p alen=%d off=%d pa=%llx psz=%d (nslots=%d slotlen=%d)",
2941 	           bgep->buff[BGE_MINI_BUFF_RING].desc.mem_va,
2942 	           bgep->buff[BGE_MINI_BUFF_RING].desc.alength,
2943 	           bgep->buff[BGE_MINI_BUFF_RING].desc.offset,
2944 	           bgep->buff[BGE_MINI_BUFF_RING].desc.cookie.dmac_laddress,
2945 	           bgep->buff[BGE_MINI_BUFF_RING].desc.cookie.dmac_size,
2946 	           BGE_MINI_SLOTS_USED,
2947 	           sizeof(bge_rbd_t)));
2948 
2949 	BGE_DEBUG(("RXD PROD DONE: va=%p alen=%d off=%d pa=%llx psz=%d",
2950 	           area.mem_va,
2951 	           area.alength,
2952 	           area.offset,
2953 	           area.cookie.dmac_laddress,
2954 	           area.cookie.dmac_size));
2955 
2956 	ASSERT(area.alength == 0);
2957 
2958 	area = bgep->tx_desc;
2959 
2960 	BGE_DEBUG(("TXD INIT: va=%p alen=%d off=%d pa=%llx psz=%d",
2961 	           area.mem_va,
2962 	           area.alength,
2963 	           area.offset,
2964 	           area.cookie.dmac_laddress,
2965 	           area.cookie.dmac_size));
2966 
2967 	for (ring = 0; ring < tx_rings; ++ring) {
2968 		bge_slice_chunk(&bgep->send[ring].desc, &area,
2969 		    BGE_SEND_SLOTS_USED, sizeof (bge_sbd_t));
2970 
2971 		BGE_DEBUG(("TXD RING %d: va=%p alen=%d off=%d pa=%llx psz=%d (nslots=%d slotlen=%d)",
2972 		           ring,
2973 		           bgep->send[ring].desc.mem_va,
2974 		           bgep->send[ring].desc.alength,
2975 		           bgep->send[ring].desc.offset,
2976 		           bgep->send[ring].desc.cookie.dmac_laddress,
2977 		           bgep->send[ring].desc.cookie.dmac_size,
2978 		           BGE_SEND_SLOTS_USED,
2979 		           sizeof(bge_sbd_t)));
2980 	}
2981 
2982 	for (; ring < BGE_SEND_RINGS_MAX; ++ring) /* skip unused rings */
2983 		bge_slice_chunk(&bgep->send[ring].desc, &area,
2984 		    0, sizeof (bge_sbd_t));
2985 
2986 	bge_slice_chunk(&bgep->statistics, &area, 1, sizeof (bge_statistics_t));
2987 	BGE_DEBUG(("TXD STATISTICS: va=%p alen=%d off=%d pa=%llx psz=%d (nslots=%d slotlen=%d)",
2988 	           bgep->statistics.mem_va,
2989 	           bgep->statistics.alength,
2990 	           bgep->statistics.offset,
2991 	           bgep->statistics.cookie.dmac_laddress,
2992 	           bgep->statistics.cookie.dmac_size,
2993 	           1,
2994 	           sizeof(bge_statistics_t)));
2995 
2996 	bge_slice_chunk(&bgep->status_block, &area, 1, sizeof (bge_status_t));
2997 	BGE_DEBUG(("TXD STATUS BLOCK: va=%p alen=%d off=%d pa=%llx psz=%d (nslots=%d slotlen=%d)",
2998 	           bgep->status_block.mem_va,
2999 	           bgep->status_block.alength,
3000 	           bgep->status_block.offset,
3001 	           bgep->status_block.cookie.dmac_laddress,
3002 	           bgep->status_block.cookie.dmac_size,
3003 	           1,
3004 	           sizeof(bge_status_t)));
3005 
3006 	BGE_DEBUG(("TXD DONE: va=%p alen=%d off=%d pa=%llx psz=%d",
3007 	           area.mem_va,
3008 	           area.alength,
3009 	           area.offset,
3010 	           area.cookie.dmac_laddress,
3011 	           area.cookie.dmac_size));
3012 
3013 	ASSERT(area.alength == BGE_STATUS_PADDING);
3014 
3015 	DMA_ZERO(bgep->status_block);
3016 
3017 	return (DDI_SUCCESS);
3018 }
3019 
3020 #undef	BGE_DBG
3021 #define	BGE_DBG		BGE_DBG_INIT	/* debug flag for this code	*/
3022 
3023 /*
3024  * This routine frees the transmit and receive buffers and descriptors.
3025  * Make sure the chip is stopped before calling it!
3026  */
3027 void
3028 bge_free_bufs(bge_t *bgep)
3029 {
3030 	int split;
3031 
3032 	BGE_TRACE(("bge_free_bufs($%p)",
3033 	    (void *)bgep));
3034 
3035 	bge_free_dma_mem(&bgep->tx_desc);
3036 	for (split = 0; split < BGE_RECV_RINGS_SPLIT; ++split)
3037 		bge_free_dma_mem(&bgep->rx_desc[split]);
3038 	for (split = 0; split < BGE_SPLIT; ++split)
3039 		bge_free_dma_mem(&bgep->tx_buff[split]);
3040 	for (split = 0; split < BGE_SPLIT; ++split)
3041 		bge_free_dma_mem(&bgep->rx_buff[split]);
3042 }
3043 
3044 /*
3045  * Determine (initial) MAC address ("BIA") to use for this interface
3046  */
3047 
3048 static void
3049 bge_find_mac_address(bge_t *bgep, chip_id_t *cidp)
3050 {
3051 	struct ether_addr sysaddr;
3052 	char propbuf[8];		/* "true" or "false", plus NUL	*/
3053 	uchar_t *bytes;
3054 	int *ints;
3055 	uint_t nelts;
3056 	int err;
3057 
3058 	BGE_TRACE(("bge_find_mac_address($%p)",
3059 	    (void *)bgep));
3060 
3061 	BGE_DEBUG(("bge_find_mac_address: hw_mac_addr %012llx, => %s (%sset)",
3062 	    cidp->hw_mac_addr,
3063 	    ether_sprintf((void *)cidp->vendor_addr.addr),
3064 	    cidp->vendor_addr.set ? "" : "not "));
3065 
3066 	/*
3067 	 * The "vendor's factory-set address" may already have
3068 	 * been extracted from the chip, but if the property
3069 	 * "local-mac-address" is set we use that instead.  It
3070 	 * will normally be set by OBP, but it could also be
3071 	 * specified in a .conf file(!)
3072 	 *
3073 	 * There doesn't seem to be a way to define byte-array
3074 	 * properties in a .conf, so we check whether it looks
3075 	 * like an array of 6 ints instead.
3076 	 *
3077 	 * Then, we check whether it looks like an array of 6
3078 	 * bytes (which it should, if OBP set it).  If we can't
3079 	 * make sense of it either way, we'll ignore it.
3080 	 */
3081 	err = ddi_prop_lookup_int_array(DDI_DEV_T_ANY, bgep->devinfo,
3082 	    DDI_PROP_DONTPASS, localmac_propname, &ints, &nelts);
3083 	if (err == DDI_PROP_SUCCESS) {
3084 		if (nelts == ETHERADDRL) {
3085 			while (nelts--)
3086 				cidp->vendor_addr.addr[nelts] = ints[nelts];
3087 			cidp->vendor_addr.set = B_TRUE;
3088 		}
3089 		ddi_prop_free(ints);
3090 	}
3091 
3092 	err = ddi_prop_lookup_byte_array(DDI_DEV_T_ANY, bgep->devinfo,
3093 	    DDI_PROP_DONTPASS, localmac_propname, &bytes, &nelts);
3094 	if (err == DDI_PROP_SUCCESS) {
3095 		if (nelts == ETHERADDRL) {
3096 			while (nelts--)
3097 				cidp->vendor_addr.addr[nelts] = bytes[nelts];
3098 			cidp->vendor_addr.set = B_TRUE;
3099 		}
3100 		ddi_prop_free(bytes);
3101 	}
3102 
3103 	BGE_DEBUG(("bge_find_mac_address: +local %s (%sset)",
3104 	    ether_sprintf((void *)cidp->vendor_addr.addr),
3105 	    cidp->vendor_addr.set ? "" : "not "));
3106 
3107 	/*
3108 	 * Look up the OBP property "local-mac-address?".  Note that even
3109 	 * though its value is a string (which should be "true" or "false"),
3110 	 * it can't be decoded by ddi_prop_lookup_string(9F).  So, we zero
3111 	 * the buffer first and then fetch the property as an untyped array;
3112 	 * this may or may not include a final NUL, but since there will
3113 	 * always be one left at the end of the buffer we can now treat it
3114 	 * as a string anyway.
3115 	 */
3116 	nelts = sizeof (propbuf);
3117 	bzero(propbuf, nelts--);
3118 	err = ddi_getlongprop_buf(DDI_DEV_T_ANY, bgep->devinfo,
3119 	    DDI_PROP_CANSLEEP, localmac_boolname, propbuf, (int *)&nelts);
3120 
3121 	/*
3122 	 * Now, if the address still isn't set from the hardware (SEEPROM)
3123 	 * or the OBP or .conf property, OR if the user has foolishly set
3124 	 * 'local-mac-address? = false', use "the system address" instead
3125 	 * (but only if it's non-null i.e. has been set from the IDPROM).
3126 	 */
3127 	if (cidp->vendor_addr.set == B_FALSE || strcmp(propbuf, "false") == 0)
3128 		if (localetheraddr(NULL, &sysaddr) != 0) {
3129 			ethaddr_copy(&sysaddr, cidp->vendor_addr.addr);
3130 			cidp->vendor_addr.set = B_TRUE;
3131 		}
3132 
3133 	BGE_DEBUG(("bge_find_mac_address: +system %s (%sset)",
3134 	    ether_sprintf((void *)cidp->vendor_addr.addr),
3135 	    cidp->vendor_addr.set ? "" : "not "));
3136 
3137 	/*
3138 	 * Finally(!), if there's a valid "mac-address" property (created
3139 	 * if we netbooted from this interface), we must use this instead
3140 	 * of any of the above to ensure that the NFS/install server doesn't
3141 	 * get confused by the address changing as Solaris takes over!
3142 	 */
3143 	err = ddi_prop_lookup_byte_array(DDI_DEV_T_ANY, bgep->devinfo,
3144 	    DDI_PROP_DONTPASS, macaddr_propname, &bytes, &nelts);
3145 	if (err == DDI_PROP_SUCCESS) {
3146 		if (nelts == ETHERADDRL) {
3147 			while (nelts--)
3148 				cidp->vendor_addr.addr[nelts] = bytes[nelts];
3149 			cidp->vendor_addr.set = B_TRUE;
3150 		}
3151 		ddi_prop_free(bytes);
3152 	}
3153 
3154 	BGE_DEBUG(("bge_find_mac_address: =final %s (%sset)",
3155 	    ether_sprintf((void *)cidp->vendor_addr.addr),
3156 	    cidp->vendor_addr.set ? "" : "not "));
3157 }
3158 
3159 /*ARGSUSED*/
3160 int
3161 bge_check_acc_handle(bge_t *bgep, ddi_acc_handle_t handle)
3162 {
3163 	ddi_fm_error_t de;
3164 
3165 	ddi_fm_acc_err_get(handle, &de, DDI_FME_VERSION);
3166 	ddi_fm_acc_err_clear(handle, DDI_FME_VERSION);
3167 	return (de.fme_status);
3168 }
3169 
3170 /*ARGSUSED*/
3171 int
3172 bge_check_dma_handle(bge_t *bgep, ddi_dma_handle_t handle)
3173 {
3174 	ddi_fm_error_t de;
3175 
3176 	ASSERT(bgep->progress & PROGRESS_BUFS);
3177 	ddi_fm_dma_err_get(handle, &de, DDI_FME_VERSION);
3178 	return (de.fme_status);
3179 }
3180 
3181 /*
3182  * The IO fault service error handling callback function
3183  */
3184 /*ARGSUSED*/
3185 static int
3186 bge_fm_error_cb(dev_info_t *dip, ddi_fm_error_t *err, const void *impl_data)
3187 {
3188 	/*
3189 	 * as the driver can always deal with an error in any dma or
3190 	 * access handle, we can just return the fme_status value.
3191 	 */
3192 	pci_ereport_post(dip, err, NULL);
3193 	return (err->fme_status);
3194 }
3195 
3196 static void
3197 bge_fm_init(bge_t *bgep)
3198 {
3199 	ddi_iblock_cookie_t iblk;
3200 
3201 	/* Only register with IO Fault Services if we have some capability */
3202 	if (bgep->fm_capabilities) {
3203 		bge_reg_accattr.devacc_attr_access = DDI_FLAGERR_ACC;
3204 		dma_attr.dma_attr_flags = DDI_DMA_FLAGERR;
3205 
3206 		/* Register capabilities with IO Fault Services */
3207 		ddi_fm_init(bgep->devinfo, &bgep->fm_capabilities, &iblk);
3208 
3209 		/*
3210 		 * Initialize pci ereport capabilities if ereport capable
3211 		 */
3212 		if (DDI_FM_EREPORT_CAP(bgep->fm_capabilities) ||
3213 		    DDI_FM_ERRCB_CAP(bgep->fm_capabilities))
3214 			pci_ereport_setup(bgep->devinfo);
3215 
3216 		/*
3217 		 * Register error callback if error callback capable
3218 		 */
3219 		if (DDI_FM_ERRCB_CAP(bgep->fm_capabilities))
3220 			ddi_fm_handler_register(bgep->devinfo,
3221 			    bge_fm_error_cb, (void*) bgep);
3222 	} else {
3223 		/*
3224 		 * These fields have to be cleared of FMA if there are no
3225 		 * FMA capabilities at runtime.
3226 		 */
3227 		bge_reg_accattr.devacc_attr_access = DDI_DEFAULT_ACC;
3228 		dma_attr.dma_attr_flags = 0;
3229 	}
3230 }
3231 
3232 static void
3233 bge_fm_fini(bge_t *bgep)
3234 {
3235 	/* Only unregister FMA capabilities if we registered some */
3236 	if (bgep->fm_capabilities) {
3237 
3238 		/*
3239 		 * Release any resources allocated by pci_ereport_setup()
3240 		 */
3241 		if (DDI_FM_EREPORT_CAP(bgep->fm_capabilities) ||
3242 		    DDI_FM_ERRCB_CAP(bgep->fm_capabilities))
3243 			pci_ereport_teardown(bgep->devinfo);
3244 
3245 		/*
3246 		 * Un-register error callback if error callback capable
3247 		 */
3248 		if (DDI_FM_ERRCB_CAP(bgep->fm_capabilities))
3249 			ddi_fm_handler_unregister(bgep->devinfo);
3250 
3251 		/* Unregister from IO Fault Services */
3252 		ddi_fm_fini(bgep->devinfo);
3253 	}
3254 }
3255 
3256 static void
3257 #ifdef BGE_IPMI_ASF
3258 bge_unattach(bge_t *bgep, uint_t asf_mode)
3259 #else
3260 bge_unattach(bge_t *bgep)
3261 #endif
3262 {
3263 	BGE_TRACE(("bge_unattach($%p)",
3264 		(void *)bgep));
3265 
3266 	/*
3267 	 * Flag that no more activity may be initiated
3268 	 */
3269 	bgep->progress &= ~PROGRESS_READY;
3270 
3271 	/*
3272 	 * Quiesce the PHY and MAC (leave it reset but still powered).
3273 	 * Clean up and free all BGE data structures
3274 	 */
3275 	if (bgep->periodic_id != NULL) {
3276 		ddi_periodic_delete(bgep->periodic_id);
3277 		bgep->periodic_id = NULL;
3278 	}
3279 
3280 	if (bgep->progress & PROGRESS_KSTATS)
3281 		bge_fini_kstats(bgep);
3282 	if (bgep->progress & PROGRESS_PHY)
3283 		bge_phys_reset(bgep);
3284 	if (bgep->progress & PROGRESS_HWINT) {
3285 		mutex_enter(bgep->genlock);
3286 #ifdef BGE_IPMI_ASF
3287 		if (bge_chip_reset(bgep, B_FALSE, asf_mode) != DDI_SUCCESS)
3288 #else
3289 		if (bge_chip_reset(bgep, B_FALSE) != DDI_SUCCESS)
3290 #endif
3291 			ddi_fm_service_impact(bgep->devinfo,
3292 			    DDI_SERVICE_UNAFFECTED);
3293 #ifdef BGE_IPMI_ASF
3294 		if (bgep->asf_enabled) {
3295 			/*
3296 			 * This register has been overlaid. We restore its
3297 			 * initial value here.
3298 			 */
3299 			bge_nic_put32(bgep, BGE_NIC_DATA_SIG_ADDR,
3300 			    BGE_NIC_DATA_SIG);
3301 		}
3302 #endif
3303 		if (bge_check_acc_handle(bgep, bgep->cfg_handle) != DDI_FM_OK)
3304 			ddi_fm_service_impact(bgep->devinfo,
3305 			    DDI_SERVICE_UNAFFECTED);
3306 		if (bge_check_acc_handle(bgep, bgep->io_handle) != DDI_FM_OK)
3307 			ddi_fm_service_impact(bgep->devinfo,
3308 			    DDI_SERVICE_UNAFFECTED);
3309 		mutex_exit(bgep->genlock);
3310 	}
3311 	if (bgep->progress & PROGRESS_INTR) {
3312 		bge_intr_disable(bgep);
3313 		bge_fini_rings(bgep);
3314 	}
3315 	if (bgep->progress & PROGRESS_HWINT) {
3316 		bge_rem_intrs(bgep);
3317 		rw_destroy(bgep->errlock);
3318 		mutex_destroy(bgep->softintrlock);
3319 		mutex_destroy(bgep->genlock);
3320 	}
3321 	if (bgep->progress & PROGRESS_FACTOTUM)
3322 		ddi_remove_softintr(bgep->factotum_id);
3323 	if (bgep->progress & PROGRESS_RESCHED)
3324 		ddi_remove_softintr(bgep->drain_id);
3325 	if (bgep->progress & PROGRESS_BUFS)
3326 		bge_free_bufs(bgep);
3327 	if (bgep->progress & PROGRESS_REGS) {
3328 		ddi_regs_map_free(&bgep->io_handle);
3329 		if (bgep->ape_enabled)
3330 			ddi_regs_map_free(&bgep->ape_handle);
3331 	}
3332 	if (bgep->progress & PROGRESS_CFG)
3333 		pci_config_teardown(&bgep->cfg_handle);
3334 
3335 	bge_fm_fini(bgep);
3336 
3337 	ddi_remove_minor_node(bgep->devinfo, NULL);
3338 	kmem_free(bgep->pstats, sizeof (bge_statistics_reg_t));
3339 	kmem_free(bgep, sizeof (*bgep));
3340 }
3341 
3342 static int
3343 bge_resume(dev_info_t *devinfo)
3344 {
3345 	bge_t *bgep;				/* Our private data	*/
3346 	chip_id_t *cidp;
3347 	chip_id_t chipid;
3348 
3349 	bgep = ddi_get_driver_private(devinfo);
3350 	if (bgep == NULL)
3351 		return (DDI_FAILURE);
3352 
3353 	/*
3354 	 * Refuse to resume if the data structures aren't consistent
3355 	 */
3356 	if (bgep->devinfo != devinfo)
3357 		return (DDI_FAILURE);
3358 
3359 #ifdef BGE_IPMI_ASF
3360 	/*
3361 	 * Power management hasn't been supported in BGE now. If you
3362 	 * want to implement it, please add the ASF/IPMI related
3363 	 * code here.
3364 	 */
3365 
3366 #endif
3367 
3368 	/*
3369 	 * Read chip ID & set up config space command register(s)
3370 	 * Refuse to resume if the chip has changed its identity!
3371 	 */
3372 	cidp = &bgep->chipid;
3373 	mutex_enter(bgep->genlock);
3374 	bge_chip_cfg_init(bgep, &chipid, B_FALSE);
3375 	if (bge_check_acc_handle(bgep, bgep->cfg_handle) != DDI_FM_OK) {
3376 		ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_LOST);
3377 		mutex_exit(bgep->genlock);
3378 		return (DDI_FAILURE);
3379 	}
3380 	mutex_exit(bgep->genlock);
3381 	if (chipid.vendor != cidp->vendor)
3382 		return (DDI_FAILURE);
3383 	if (chipid.device != cidp->device)
3384 		return (DDI_FAILURE);
3385 	if (chipid.revision != cidp->revision)
3386 		return (DDI_FAILURE);
3387 	if (chipid.asic_rev != cidp->asic_rev)
3388 		return (DDI_FAILURE);
3389 
3390 	/*
3391 	 * All OK, reinitialise h/w & kick off GLD scheduling
3392 	 */
3393 	mutex_enter(bgep->genlock);
3394 	if (bge_restart(bgep, B_TRUE) != DDI_SUCCESS) {
3395 		(void) bge_check_acc_handle(bgep, bgep->cfg_handle);
3396 		(void) bge_check_acc_handle(bgep, bgep->io_handle);
3397 		ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_LOST);
3398 		mutex_exit(bgep->genlock);
3399 		return (DDI_FAILURE);
3400 	}
3401 	if (bge_check_acc_handle(bgep, bgep->cfg_handle) != DDI_FM_OK) {
3402 		ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_LOST);
3403 		mutex_exit(bgep->genlock);
3404 		return (DDI_FAILURE);
3405 	}
3406 	if (bge_check_acc_handle(bgep, bgep->io_handle) != DDI_FM_OK) {
3407 		ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_LOST);
3408 		mutex_exit(bgep->genlock);
3409 		return (DDI_FAILURE);
3410 	}
3411 	mutex_exit(bgep->genlock);
3412 	return (DDI_SUCCESS);
3413 }
3414 
3415 static int
3416 bge_fw_img_is_valid(bge_t *bgep, uint32_t offset)
3417 {
3418 	uint32_t val;
3419 
3420 	if (bge_nvmem_read32(bgep, offset, &val) ||
3421 	    (val & 0xfc000000) != 0x0c000000 ||
3422 	    bge_nvmem_read32(bgep, offset + 4, &val) ||
3423 	    val != 0)
3424 		return (0);
3425 
3426 	return (1);
3427 }
3428 
3429 static void
3430 bge_read_mgmtfw_ver(bge_t *bgep)
3431 {
3432 	uint32_t val;
3433 	uint32_t offset;
3434 	uint32_t start;
3435 	int i, vlen;
3436 
3437 	for (offset = NVM_DIR_START;
3438 	     offset < NVM_DIR_END;
3439 	     offset += NVM_DIRENT_SIZE) {
3440 		if (bge_nvmem_read32(bgep, offset, &val))
3441 			return;
3442 
3443 		if ((val >> NVM_DIRTYPE_SHIFT) == NVM_DIRTYPE_ASFINI)
3444 			break;
3445 	}
3446 
3447 	if (offset == NVM_DIR_END)
3448 		return;
3449 
3450 	if (bge_nvmem_read32(bgep, offset - 4, &start))
3451 		return;
3452 
3453 	if (bge_nvmem_read32(bgep, offset + 4, &offset) ||
3454 	    !bge_fw_img_is_valid(bgep, offset) ||
3455 	    bge_nvmem_read32(bgep, offset + 8, &val))
3456 		return;
3457 
3458 	offset += val - start;
3459 
3460 	vlen = strlen(bgep->fw_version);
3461 
3462 	bgep->fw_version[vlen++] = ',';
3463 	bgep->fw_version[vlen++] = ' ';
3464 
3465 	for (i = 0; i < 4; i++) {
3466 		uint32_t v;
3467 
3468 		if (bge_nvmem_read32(bgep, offset, &v))
3469 			return;
3470 
3471 		v = BE_32(v);
3472 
3473 		offset += sizeof(v);
3474 
3475 		if (vlen > BGE_FW_VER_SIZE - sizeof(v)) {
3476 			memcpy(&bgep->fw_version[vlen], &v, BGE_FW_VER_SIZE - vlen);
3477 			break;
3478 		}
3479 
3480 		memcpy(&bgep->fw_version[vlen], &v, sizeof(v));
3481 		vlen += sizeof(v);
3482 	}
3483 }
3484 
3485 static void
3486 bge_read_dash_ver(bge_t *bgep)
3487 {
3488 	int vlen;
3489 	uint32_t apedata;
3490 	char *fwtype;
3491 
3492 	if (!bgep->ape_enabled || !bgep->asf_enabled)
3493 		return;
3494 
3495 	apedata = bge_ape_get32(bgep, BGE_APE_SEG_SIG);
3496 	if (apedata != APE_SEG_SIG_MAGIC)
3497 		return;
3498 
3499 	apedata = bge_ape_get32(bgep, BGE_APE_FW_STATUS);
3500 	if (!(apedata & APE_FW_STATUS_READY))
3501 		return;
3502 
3503 	apedata = bge_ape_get32(bgep, BGE_APE_FW_VERSION);
3504 
3505 	if (bge_ape_get32(bgep, BGE_APE_FW_FEATURES) &
3506 	    BGE_APE_FW_FEATURE_NCSI) {
3507 		bgep->ape_has_ncsi = B_TRUE;
3508 		fwtype = "NCSI";
3509 	} else if ((bgep->chipid.device == DEVICE_ID_5725) ||
3510 	    (bgep->chipid.device == DEVICE_ID_5727)) {
3511 		fwtype = "SMASH";
3512 	} else {
3513 		fwtype = "DASH";
3514 	}
3515 
3516 	vlen = strlen(bgep->fw_version);
3517 
3518 	snprintf(&bgep->fw_version[vlen], BGE_FW_VER_SIZE - vlen,
3519 	    " %s v%d.%d.%d.%d", fwtype,
3520 	    (apedata & APE_FW_VERSION_MAJMSK) >> APE_FW_VERSION_MAJSFT,
3521 	    (apedata & APE_FW_VERSION_MINMSK) >> APE_FW_VERSION_MINSFT,
3522 	    (apedata & APE_FW_VERSION_REVMSK) >> APE_FW_VERSION_REVSFT,
3523 	    (apedata & APE_FW_VERSION_BLDMSK));
3524 }
3525 
3526 static void
3527 bge_read_bc_ver(bge_t *bgep)
3528 {
3529 	uint32_t val;
3530 	uint32_t offset;
3531 	uint32_t start;
3532 	uint32_t ver_offset;
3533 	int i, dst_off;
3534 	uint32_t major;
3535 	uint32_t minor;
3536 	boolean_t newver = B_FALSE;
3537 
3538 	if (bge_nvmem_read32(bgep, 0xc, &offset) ||
3539 	    bge_nvmem_read32(bgep, 0x4, &start))
3540 		return;
3541 
3542 	if (bge_nvmem_read32(bgep, offset, &val))
3543 		return;
3544 
3545 	if ((val & 0xfc000000) == 0x0c000000) {
3546 		if (bge_nvmem_read32(bgep, offset + 4, &val))
3547 			return;
3548 
3549 		if (val == 0)
3550 			newver = B_TRUE;
3551 	}
3552 
3553 	dst_off = strlen(bgep->fw_version);
3554 
3555 	if (newver) {
3556 		if (((BGE_FW_VER_SIZE - dst_off) < 16) ||
3557 		    bge_nvmem_read32(bgep, offset + 8, &ver_offset))
3558 			return;
3559 
3560 		offset = offset + ver_offset - start;
3561 		for (i = 0; i < 16; i += 4) {
3562 			if (bge_nvmem_read32(bgep, offset + i, &val))
3563 				return;
3564 			val = BE_32(val);
3565 			memcpy(bgep->fw_version + dst_off + i, &val,
3566 			    sizeof(val));
3567 		}
3568 	} else {
3569 		if (bge_nvmem_read32(bgep, NVM_PTREV_BCVER, &ver_offset))
3570 			return;
3571 
3572 		major = (ver_offset & NVM_BCVER_MAJMSK) >> NVM_BCVER_MAJSFT;
3573 		minor = ver_offset & NVM_BCVER_MINMSK;
3574 		snprintf(&bgep->fw_version[dst_off], BGE_FW_VER_SIZE - dst_off,
3575 		    "v%d.%02d", major, minor);
3576 	}
3577 }
3578 
3579 static void
3580 bge_read_fw_ver(bge_t *bgep)
3581 {
3582 	uint32_t val;
3583 	uint32_t magic;
3584 
3585 	*bgep->fw_version = 0;
3586 
3587 	if ((bgep->chipid.nvtype == BGE_NVTYPE_NONE) ||
3588 	    (bgep->chipid.nvtype == BGE_NVTYPE_UNKNOWN)) {
3589 		snprintf(bgep->fw_version, sizeof(bgep->fw_version), "sb");
3590 		return;
3591 	}
3592 
3593 	mutex_enter(bgep->genlock);
3594 
3595 	bge_nvmem_read32(bgep, 0, &magic);
3596 
3597 	if (magic == EEPROM_MAGIC) {
3598 		bge_read_bc_ver(bgep);
3599 	} else {
3600 		/* ignore other configs for now */
3601 		mutex_exit(bgep->genlock);
3602 		return;
3603 	}
3604 
3605 	if (bgep->ape_enabled) {
3606 		if (bgep->asf_enabled) {
3607 			bge_read_dash_ver(bgep);
3608 		}
3609 	} else if (bgep->asf_enabled) {
3610 		bge_read_mgmtfw_ver(bgep);
3611 	}
3612 
3613 	mutex_exit(bgep->genlock);
3614 
3615 	bgep->fw_version[BGE_FW_VER_SIZE - 1] = 0; /* safety */
3616 }
3617 
3618 /*
3619  * attach(9E) -- Attach a device to the system
3620  *
3621  * Called once for each board successfully probed.
3622  */
3623 static int
3624 bge_attach(dev_info_t *devinfo, ddi_attach_cmd_t cmd)
3625 {
3626 	bge_t *bgep;				/* Our private data	*/
3627 	mac_register_t *macp;
3628 	chip_id_t *cidp;
3629 	caddr_t regs;
3630 	int instance;
3631 	int err;
3632 	int intr_types;
3633 	int *props = NULL;
3634 	uint_t numProps;
3635 	uint32_t regval;
3636 	uint32_t pci_state_reg;
3637 #ifdef BGE_IPMI_ASF
3638 	uint32_t mhcrValue;
3639 #ifdef __sparc
3640 	uint16_t value16;
3641 #endif
3642 #ifdef BGE_NETCONSOLE
3643 	int retval;
3644 #endif
3645 #endif
3646 
3647 	instance = ddi_get_instance(devinfo);
3648 
3649 	BGE_GTRACE(("bge_attach($%p, %d) instance %d",
3650 	    (void *)devinfo, cmd, instance));
3651 	BGE_BRKPT(NULL, "bge_attach");
3652 
3653 	switch (cmd) {
3654 	default:
3655 		return (DDI_FAILURE);
3656 
3657 	case DDI_RESUME:
3658 		return (bge_resume(devinfo));
3659 
3660 	case DDI_ATTACH:
3661 		break;
3662 	}
3663 
3664 	bgep = kmem_zalloc(sizeof (*bgep), KM_SLEEP);
3665 	bgep->pstats = kmem_zalloc(sizeof (bge_statistics_reg_t), KM_SLEEP);
3666 	ddi_set_driver_private(devinfo, bgep);
3667 	bgep->bge_guard = BGE_GUARD;
3668 	bgep->devinfo = devinfo;
3669 	bgep->param_drain_max = 64;
3670 	bgep->param_msi_cnt = 0;
3671 	bgep->param_loop_mode = 0;
3672 
3673 	/*
3674 	 * Initialize more fields in BGE private data
3675 	 */
3676 	bgep->debug = ddi_prop_get_int(DDI_DEV_T_ANY, devinfo,
3677 	    DDI_PROP_DONTPASS, debug_propname, bge_debug);
3678 	(void) snprintf(bgep->ifname, sizeof (bgep->ifname), "%s%d",
3679 	    BGE_DRIVER_NAME, instance);
3680 
3681 	/*
3682 	 * Initialize for fma support
3683 	 */
3684 	bgep->fm_capabilities = ddi_prop_get_int(DDI_DEV_T_ANY, devinfo,
3685 	    DDI_PROP_DONTPASS, fm_cap,
3686 	    DDI_FM_EREPORT_CAPABLE | DDI_FM_ACCCHK_CAPABLE |
3687 	    DDI_FM_DMACHK_CAPABLE | DDI_FM_ERRCB_CAPABLE);
3688 	BGE_DEBUG(("bgep->fm_capabilities = %d", bgep->fm_capabilities));
3689 	bge_fm_init(bgep);
3690 
3691 	/*
3692 	 * Look up the IOMMU's page size for DVMA mappings (must be
3693 	 * a power of 2) and convert to a mask.  This can be used to
3694 	 * determine whether a message buffer crosses a page boundary.
3695 	 * Note: in 2s complement binary notation, if X is a power of
3696 	 * 2, then -X has the representation "11...1100...00".
3697 	 */
3698 	bgep->pagemask = dvma_pagesize(devinfo);
3699 	ASSERT(ddi_ffs(bgep->pagemask) == ddi_fls(bgep->pagemask));
3700 	bgep->pagemask = -bgep->pagemask;
3701 
3702 	/*
3703 	 * Map config space registers
3704 	 * Read chip ID & set up config space command register(s)
3705 	 *
3706 	 * Note: this leaves the chip accessible by Memory Space
3707 	 * accesses, but with interrupts and Bus Mastering off.
3708 	 * This should ensure that nothing untoward will happen
3709 	 * if it has been left active by the (net-)bootloader.
3710 	 * We'll re-enable Bus Mastering once we've reset the chip,
3711 	 * and allow interrupts only when everything else is set up.
3712 	 */
3713 	err = pci_config_setup(devinfo, &bgep->cfg_handle);
3714 #ifdef BGE_IPMI_ASF
3715 #ifdef __sparc
3716 	/*
3717 	 * We need to determine the type of chipset for accessing some configure
3718 	 * registers. (This information will be used by bge_ind_put32,
3719 	 * bge_ind_get32 and bge_nic_read32)
3720 	 */
3721 	bgep->chipid.device = pci_config_get16(bgep->cfg_handle,
3722 	    PCI_CONF_DEVID);
3723 	value16 = pci_config_get16(bgep->cfg_handle, PCI_CONF_COMM);
3724 	value16 = value16 | (PCI_COMM_MAE | PCI_COMM_ME);
3725 	pci_config_put16(bgep->cfg_handle, PCI_CONF_COMM, value16);
3726 	mhcrValue = MHCR_ENABLE_INDIRECT_ACCESS |
3727 	    MHCR_ENABLE_TAGGED_STATUS_MODE |
3728 	    MHCR_MASK_INTERRUPT_MODE |
3729 	    MHCR_MASK_PCI_INT_OUTPUT |
3730 	    MHCR_CLEAR_INTERRUPT_INTA |
3731 	    MHCR_ENABLE_ENDIAN_WORD_SWAP |
3732 	    MHCR_ENABLE_ENDIAN_BYTE_SWAP;
3733 	/*
3734 	 * For some chipsets (e.g., BCM5718), if MHCR_ENABLE_ENDIAN_BYTE_SWAP
3735 	 * has been set in PCI_CONF_COMM already, we need to write the
3736 	 * byte-swapped value to it. So we just write zero first for simplicity.
3737 	 */
3738 	if (DEVICE_5717_SERIES_CHIPSETS(bgep) ||
3739 	    DEVICE_5725_SERIES_CHIPSETS(bgep))
3740 		pci_config_put32(bgep->cfg_handle, PCI_CONF_BGE_MHCR, 0);
3741 #else
3742 	mhcrValue = MHCR_ENABLE_INDIRECT_ACCESS |
3743 	    MHCR_ENABLE_TAGGED_STATUS_MODE |
3744 	    MHCR_MASK_INTERRUPT_MODE |
3745 	    MHCR_MASK_PCI_INT_OUTPUT |
3746 	    MHCR_CLEAR_INTERRUPT_INTA;
3747 #endif
3748 	pci_config_put32(bgep->cfg_handle, PCI_CONF_BGE_MHCR, mhcrValue);
3749 	bge_ind_put32(bgep, MEMORY_ARBITER_MODE_REG,
3750 	    bge_ind_get32(bgep, MEMORY_ARBITER_MODE_REG) |
3751 	    MEMORY_ARBITER_ENABLE);
3752 	if (mhcrValue & MHCR_ENABLE_ENDIAN_WORD_SWAP) {
3753 		bgep->asf_wordswapped = B_TRUE;
3754 	} else {
3755 		bgep->asf_wordswapped = B_FALSE;
3756 	}
3757 	bge_asf_get_config(bgep);
3758 #endif
3759 	if (err != DDI_SUCCESS) {
3760 		bge_problem(bgep, "pci_config_setup() failed");
3761 		goto attach_fail;
3762 	}
3763 	bgep->progress |= PROGRESS_CFG;
3764 	cidp = &bgep->chipid;
3765 	bzero(cidp, sizeof(*cidp));
3766 	bge_chip_cfg_init(bgep, cidp, B_FALSE);
3767 	if (bge_check_acc_handle(bgep, bgep->cfg_handle) != DDI_FM_OK) {
3768 		ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_LOST);
3769 		goto attach_fail;
3770 	}
3771 
3772 #ifdef BGE_IPMI_ASF
3773 	if (DEVICE_5721_SERIES_CHIPSETS(bgep) ||
3774 	    DEVICE_5714_SERIES_CHIPSETS(bgep)) {
3775 		bgep->asf_newhandshake = B_TRUE;
3776 	} else {
3777 		bgep->asf_newhandshake = B_FALSE;
3778 	}
3779 #endif
3780 
3781 	/*
3782 	 * Update those parts of the chip ID derived from volatile
3783 	 * registers with the values seen by OBP (in case the chip
3784 	 * has been reset externally and therefore lost them).
3785 	 */
3786 	cidp->subven = ddi_prop_get_int(DDI_DEV_T_ANY, devinfo,
3787 	    DDI_PROP_DONTPASS, subven_propname, cidp->subven);
3788 	cidp->subdev = ddi_prop_get_int(DDI_DEV_T_ANY, devinfo,
3789 	    DDI_PROP_DONTPASS, subdev_propname, cidp->subdev);
3790 	cidp->clsize = ddi_prop_get_int(DDI_DEV_T_ANY, devinfo,
3791 	    DDI_PROP_DONTPASS, clsize_propname, cidp->clsize);
3792 	cidp->latency = ddi_prop_get_int(DDI_DEV_T_ANY, devinfo,
3793 	    DDI_PROP_DONTPASS, latency_propname, cidp->latency);
3794 	cidp->rx_rings = ddi_prop_get_int(DDI_DEV_T_ANY, devinfo,
3795 	    DDI_PROP_DONTPASS, rxrings_propname, cidp->rx_rings);
3796 	cidp->tx_rings = ddi_prop_get_int(DDI_DEV_T_ANY, devinfo,
3797 	    DDI_PROP_DONTPASS, txrings_propname, cidp->tx_rings);
3798 	cidp->eee = ddi_prop_get_int(DDI_DEV_T_ANY, devinfo,
3799 	    DDI_PROP_DONTPASS, eee_propname, cidp->eee);
3800 
3801 	cidp->default_mtu = ddi_prop_get_int(DDI_DEV_T_ANY, devinfo,
3802 	    DDI_PROP_DONTPASS, default_mtu, BGE_DEFAULT_MTU);
3803 	if ((cidp->default_mtu < BGE_DEFAULT_MTU) ||
3804 	    (cidp->default_mtu > BGE_MAXIMUM_MTU)) {
3805 		cidp->default_mtu = BGE_DEFAULT_MTU;
3806 	}
3807 
3808 	/*
3809 	 * Map operating registers
3810 	 */
3811 	err = ddi_regs_map_setup(devinfo, BGE_PCI_OPREGS_RNUMBER,
3812 	    &regs, 0, 0, &bge_reg_accattr, &bgep->io_handle);
3813 	if (err != DDI_SUCCESS) {
3814 		bge_problem(bgep, "ddi_regs_map_setup() failed");
3815 		goto attach_fail;
3816 	}
3817 	bgep->io_regs = regs;
3818 
3819 	bgep->ape_enabled = B_FALSE;
3820 	bgep->ape_regs = NULL;
3821 	if (DEVICE_5717_SERIES_CHIPSETS(bgep) ||
3822 	    DEVICE_5725_SERIES_CHIPSETS(bgep)) {
3823 		err = ddi_regs_map_setup(devinfo, BGE_PCI_APEREGS_RNUMBER,
3824 		    &regs, 0, 0, &bge_reg_accattr, &bgep->ape_handle);
3825 		if (err != DDI_SUCCESS) {
3826 			ddi_regs_map_free(&bgep->io_handle);
3827 			bge_problem(bgep, "ddi_regs_map_setup() failed");
3828 			goto attach_fail;
3829 		}
3830 		bgep->ape_regs    = regs;
3831 		bgep->ape_enabled = B_TRUE;
3832 
3833 		/*
3834 		 * Allow reads and writes to the
3835 		 * APE register and memory space.
3836 		 */
3837 
3838 		pci_state_reg = pci_config_get32(bgep->cfg_handle,
3839 		    PCI_CONF_BGE_PCISTATE);
3840 		pci_state_reg |= PCISTATE_ALLOW_APE_CTLSPC_WR |
3841 		    PCISTATE_ALLOW_APE_SHMEM_WR | PCISTATE_ALLOW_APE_PSPACE_WR;
3842 		pci_config_put32(bgep->cfg_handle,
3843 		    PCI_CONF_BGE_PCISTATE, pci_state_reg);
3844 
3845 		bge_ape_lock_init(bgep);
3846 	}
3847 
3848 	bgep->progress |= PROGRESS_REGS;
3849 
3850 	/*
3851 	 * Characterise the device, so we know its requirements.
3852 	 * Then allocate the appropriate TX and RX descriptors & buffers.
3853 	 */
3854 	if (bge_chip_id_init(bgep) == EIO) {
3855 		ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_LOST);
3856 		goto attach_fail;
3857 	}
3858 
3859 	err = ddi_prop_lookup_int_array(DDI_DEV_T_ANY, bgep->devinfo,
3860 	    0, "reg", &props, &numProps);
3861 	if ((err == DDI_PROP_SUCCESS) && (numProps > 0)) {
3862 		bgep->pci_bus  = PCI_REG_BUS_G(props[0]);
3863 		bgep->pci_dev  = PCI_REG_DEV_G(props[0]);
3864 		bgep->pci_func = PCI_REG_FUNC_G(props[0]);
3865 		ddi_prop_free(props);
3866 	}
3867 
3868 	if (DEVICE_5717_SERIES_CHIPSETS(bgep) ||
3869 	    DEVICE_5725_SERIES_CHIPSETS(bgep)) {
3870 		regval = bge_reg_get32(bgep, CPMU_STATUS_REG);
3871 		if ((bgep->chipid.device == DEVICE_ID_5719) ||
3872 		    (bgep->chipid.device == DEVICE_ID_5720)) {
3873 			bgep->pci_func =
3874 			    ((regval & CPMU_STATUS_FUNC_NUM_5719) >>
3875 			    CPMU_STATUS_FUNC_NUM_5719_SHIFT);
3876 		} else {
3877 			bgep->pci_func = ((regval & CPMU_STATUS_FUNC_NUM) >>
3878 			    CPMU_STATUS_FUNC_NUM_SHIFT);
3879 		}
3880 	}
3881 
3882 	err = bge_alloc_bufs(bgep);
3883 	if (err != DDI_SUCCESS) {
3884 		bge_problem(bgep, "DMA buffer allocation failed");
3885 		goto attach_fail;
3886 	}
3887 	bgep->progress |= PROGRESS_BUFS;
3888 
3889 	/*
3890 	 * Add the softint handlers:
3891 	 *
3892 	 * Both of these handlers are used to avoid restrictions on the
3893 	 * context and/or mutexes required for some operations.  In
3894 	 * particular, the hardware interrupt handler and its subfunctions
3895 	 * can detect a number of conditions that we don't want to handle
3896 	 * in that context or with that set of mutexes held.  So, these
3897 	 * softints are triggered instead:
3898 	 *
3899 	 * the <resched> softint is triggered if we have previously
3900 	 * had to refuse to send a packet because of resource shortage
3901 	 * (we've run out of transmit buffers), but the send completion
3902 	 * interrupt handler has now detected that more buffers have
3903 	 * become available.
3904 	 *
3905 	 * the <factotum> is triggered if the h/w interrupt handler
3906 	 * sees the <link state changed> or <error> bits in the status
3907 	 * block.  It's also triggered periodically to poll the link
3908 	 * state, just in case we aren't getting link status change
3909 	 * interrupts ...
3910 	 */
3911 	err = ddi_add_softintr(devinfo, DDI_SOFTINT_LOW, &bgep->drain_id,
3912 	    NULL, NULL, bge_send_drain, (caddr_t)bgep);
3913 	if (err != DDI_SUCCESS) {
3914 		bge_problem(bgep, "ddi_add_softintr() failed");
3915 		goto attach_fail;
3916 	}
3917 	bgep->progress |= PROGRESS_RESCHED;
3918 	err = ddi_add_softintr(devinfo, DDI_SOFTINT_LOW, &bgep->factotum_id,
3919 	    NULL, NULL, bge_chip_factotum, (caddr_t)bgep);
3920 	if (err != DDI_SUCCESS) {
3921 		bge_problem(bgep, "ddi_add_softintr() failed");
3922 		goto attach_fail;
3923 	}
3924 	bgep->progress |= PROGRESS_FACTOTUM;
3925 
3926 	/* Get supported interrupt types */
3927 	if (ddi_intr_get_supported_types(devinfo, &intr_types) != DDI_SUCCESS) {
3928 		bge_error(bgep, "ddi_intr_get_supported_types failed\n");
3929 
3930 		goto attach_fail;
3931 	}
3932 
3933 	BGE_DEBUG(("%s: ddi_intr_get_supported_types() returned: %x",
3934 	    bgep->ifname, intr_types));
3935 
3936 	if ((intr_types & DDI_INTR_TYPE_MSI) && bgep->chipid.msi_enabled) {
3937 		if (bge_add_intrs(bgep, DDI_INTR_TYPE_MSI) != DDI_SUCCESS) {
3938 			bge_error(bgep, "MSI registration failed, "
3939 			    "trying FIXED interrupt type\n");
3940 		} else {
3941 			BGE_DEBUG(("%s: Using MSI interrupt type",
3942 			    bgep->ifname));
3943 			bgep->intr_type = DDI_INTR_TYPE_MSI;
3944 			bgep->progress |= PROGRESS_HWINT;
3945 		}
3946 	}
3947 
3948 	if (!(bgep->progress & PROGRESS_HWINT) &&
3949 	    (intr_types & DDI_INTR_TYPE_FIXED)) {
3950 		if (bge_add_intrs(bgep, DDI_INTR_TYPE_FIXED) != DDI_SUCCESS) {
3951 			bge_error(bgep, "FIXED interrupt "
3952 			    "registration failed\n");
3953 			goto attach_fail;
3954 		}
3955 
3956 		BGE_DEBUG(("%s: Using FIXED interrupt type", bgep->ifname));
3957 
3958 		bgep->intr_type = DDI_INTR_TYPE_FIXED;
3959 		bgep->progress |= PROGRESS_HWINT;
3960 	}
3961 
3962 	if (!(bgep->progress & PROGRESS_HWINT)) {
3963 		bge_error(bgep, "No interrupts registered\n");
3964 		goto attach_fail;
3965 	}
3966 
3967 	/*
3968 	 * Note that interrupts are not enabled yet as
3969 	 * mutex locks are not initialized. Initialize mutex locks.
3970 	 */
3971 	mutex_init(bgep->genlock, NULL, MUTEX_DRIVER,
3972 	    DDI_INTR_PRI(bgep->intr_pri));
3973 	mutex_init(bgep->softintrlock, NULL, MUTEX_DRIVER,
3974 	    DDI_INTR_PRI(bgep->intr_pri));
3975 	rw_init(bgep->errlock, NULL, RW_DRIVER,
3976 	    DDI_INTR_PRI(bgep->intr_pri));
3977 
3978 	/*
3979 	 * Initialize rings.
3980 	 */
3981 	bge_init_rings(bgep);
3982 
3983 	/*
3984 	 * Now that mutex locks are initialized, enable interrupts.
3985 	 */
3986 	bge_intr_enable(bgep);
3987 	bgep->progress |= PROGRESS_INTR;
3988 
3989 	/*
3990 	 * Initialise link state variables
3991 	 * Stop, reset & reinitialise the chip.
3992 	 * Initialise the (internal) PHY.
3993 	 */
3994 	bgep->link_state = LINK_STATE_UNKNOWN;
3995 
3996 	mutex_enter(bgep->genlock);
3997 
3998 	/*
3999 	 * Reset chip & rings to initial state; also reset address
4000 	 * filtering, promiscuity, loopback mode.
4001 	 */
4002 #ifdef BGE_IPMI_ASF
4003 #ifdef BGE_NETCONSOLE
4004 	if (bge_reset(bgep, ASF_MODE_INIT) != DDI_SUCCESS) {
4005 #else
4006 	if (bge_reset(bgep, ASF_MODE_SHUTDOWN) != DDI_SUCCESS) {
4007 #endif
4008 #else
4009 	if (bge_reset(bgep) != DDI_SUCCESS) {
4010 #endif
4011 		(void) bge_check_acc_handle(bgep, bgep->cfg_handle);
4012 		(void) bge_check_acc_handle(bgep, bgep->io_handle);
4013 		ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_LOST);
4014 		mutex_exit(bgep->genlock);
4015 		goto attach_fail;
4016 	}
4017 
4018 #ifdef BGE_IPMI_ASF
4019 	if (bgep->asf_enabled) {
4020 		bgep->asf_status = ASF_STAT_RUN_INIT;
4021 	}
4022 #endif
4023 
4024 	bzero(bgep->mcast_hash, sizeof (bgep->mcast_hash));
4025 	bzero(bgep->mcast_refs, sizeof (bgep->mcast_refs));
4026 	bgep->promisc = B_FALSE;
4027 	bgep->param_loop_mode = BGE_LOOP_NONE;
4028 	if (bge_check_acc_handle(bgep, bgep->cfg_handle) != DDI_FM_OK) {
4029 		ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_LOST);
4030 		mutex_exit(bgep->genlock);
4031 		goto attach_fail;
4032 	}
4033 	if (bge_check_acc_handle(bgep, bgep->io_handle) != DDI_FM_OK) {
4034 		ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_LOST);
4035 		mutex_exit(bgep->genlock);
4036 		goto attach_fail;
4037 	}
4038 
4039 	mutex_exit(bgep->genlock);
4040 
4041 	if (bge_phys_init(bgep) == EIO) {
4042 		ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_LOST);
4043 		goto attach_fail;
4044 	}
4045 	bgep->progress |= PROGRESS_PHY;
4046 
4047 	/*
4048 	 * initialize NDD-tweakable parameters
4049 	 */
4050 	if (bge_nd_init(bgep)) {
4051 		bge_problem(bgep, "bge_nd_init() failed");
4052 		goto attach_fail;
4053 	}
4054 	bgep->progress |= PROGRESS_NDD;
4055 
4056 	/*
4057 	 * Create & initialise named kstats
4058 	 */
4059 	bge_init_kstats(bgep, instance);
4060 	bgep->progress |= PROGRESS_KSTATS;
4061 
4062 	/*
4063 	 * Determine whether to override the chip's own MAC address
4064 	 */
4065 	bge_find_mac_address(bgep, cidp);
4066 	{
4067 		int slot;
4068 		for (slot = 0; slot < MAC_ADDRESS_REGS_MAX; slot++) {
4069 			ethaddr_copy(cidp->vendor_addr.addr,
4070 			    bgep->curr_addr[slot].addr);
4071 			bgep->curr_addr[slot].set = 1;
4072 		}
4073 	}
4074 
4075 	bge_read_fw_ver(bgep);
4076 
4077 	bgep->unicst_addr_total = MAC_ADDRESS_REGS_MAX;
4078 	bgep->unicst_addr_avail = MAC_ADDRESS_REGS_MAX;
4079 
4080 	if ((macp = mac_alloc(MAC_VERSION)) == NULL)
4081 		goto attach_fail;
4082 	macp->m_type_ident = MAC_PLUGIN_IDENT_ETHER;
4083 	macp->m_driver = bgep;
4084 	macp->m_dip = devinfo;
4085 	macp->m_src_addr = cidp->vendor_addr.addr;
4086 	macp->m_callbacks = &bge_m_callbacks;
4087 	macp->m_min_sdu = 0;
4088 	macp->m_max_sdu = cidp->ethmax_size - sizeof (struct ether_header);
4089 	macp->m_margin = VLAN_TAGSZ;
4090 	macp->m_priv_props = bge_priv_prop;
4091 
4092 #if defined(ILLUMOS)
4093 	bge_m_unicst(bgep, cidp->vendor_addr.addr);
4094 #endif
4095 
4096 	/*
4097 	 * Finally, we're ready to register ourselves with the MAC layer
4098 	 * interface; if this succeeds, we're all ready to start()
4099 	 */
4100 	err = mac_register(macp, &bgep->mh);
4101 	mac_free(macp);
4102 	if (err != 0)
4103 		goto attach_fail;
4104 
4105 	mac_link_update(bgep->mh, LINK_STATE_UNKNOWN);
4106 
4107 	/*
4108 	 * Register a periodical handler.
4109 	 * bge_chip_cyclic() is invoked in kernel context.
4110 	 */
4111 	bgep->periodic_id = ddi_periodic_add(bge_chip_cyclic, bgep,
4112 	    BGE_CYCLIC_PERIOD, DDI_IPL_0);
4113 
4114 	bgep->progress |= PROGRESS_READY;
4115 	ASSERT(bgep->bge_guard == BGE_GUARD);
4116 #ifdef BGE_IPMI_ASF
4117 #ifdef BGE_NETCONSOLE
4118 	if (bgep->asf_enabled) {
4119 		mutex_enter(bgep->genlock);
4120 		retval = bge_chip_start(bgep, B_TRUE);
4121 		mutex_exit(bgep->genlock);
4122 		if (retval != DDI_SUCCESS)
4123 			goto attach_fail;
4124 	}
4125 #endif
4126 #endif
4127 
4128 	ddi_report_dev(devinfo);
4129 
4130 	return (DDI_SUCCESS);
4131 
4132 attach_fail:
4133 #ifdef BGE_IPMI_ASF
4134 	bge_unattach(bgep, ASF_MODE_SHUTDOWN);
4135 #else
4136 	bge_unattach(bgep);
4137 #endif
4138 	return (DDI_FAILURE);
4139 }
4140 
4141 /*
4142  *	bge_suspend() -- suspend transmit/receive for powerdown
4143  */
4144 static int
4145 bge_suspend(bge_t *bgep)
4146 {
4147 	/*
4148 	 * Stop processing and idle (powerdown) the PHY ...
4149 	 */
4150 	mutex_enter(bgep->genlock);
4151 #ifdef BGE_IPMI_ASF
4152 	/*
4153 	 * Power management hasn't been supported in BGE now. If you
4154 	 * want to implement it, please add the ASF/IPMI related
4155 	 * code here.
4156 	 */
4157 #endif
4158 	bge_stop(bgep);
4159 	if (bge_phys_idle(bgep) != DDI_SUCCESS) {
4160 		(void) bge_check_acc_handle(bgep, bgep->io_handle);
4161 		ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_DEGRADED);
4162 		mutex_exit(bgep->genlock);
4163 		return (DDI_FAILURE);
4164 	}
4165 	if (bge_check_acc_handle(bgep, bgep->io_handle) != DDI_FM_OK) {
4166 		ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_DEGRADED);
4167 		mutex_exit(bgep->genlock);
4168 		return (DDI_FAILURE);
4169 	}
4170 	mutex_exit(bgep->genlock);
4171 
4172 	return (DDI_SUCCESS);
4173 }
4174 
4175 /*
4176  * quiesce(9E) entry point.
4177  *
4178  * This function is called when the system is single-threaded at high
4179  * PIL with preemption disabled. Therefore, this function must not be
4180  * blocked.
4181  *
4182  * This function returns DDI_SUCCESS on success, or DDI_FAILURE on failure.
4183  * DDI_FAILURE indicates an error condition and should almost never happen.
4184  */
4185 #ifdef	__sparc
4186 #define	bge_quiesce	ddi_quiesce_not_supported
4187 #else
4188 static int
4189 bge_quiesce(dev_info_t *devinfo)
4190 {
4191 	bge_t *bgep = ddi_get_driver_private(devinfo);
4192 
4193 	if (bgep == NULL)
4194 		return (DDI_FAILURE);
4195 
4196 	if (bgep->intr_type == DDI_INTR_TYPE_FIXED) {
4197 		bge_reg_set32(bgep, PCI_CONF_BGE_MHCR,
4198 		    MHCR_MASK_PCI_INT_OUTPUT);
4199 	} else {
4200 		bge_reg_clr32(bgep, MSI_MODE_REG, MSI_MSI_ENABLE);
4201 	}
4202 
4203 	/* Stop the chip */
4204 	bge_chip_stop_nonblocking(bgep);
4205 
4206 	return (DDI_SUCCESS);
4207 }
4208 #endif
4209 
4210 /*
4211  * detach(9E) -- Detach a device from the system
4212  */
4213 static int
4214 bge_detach(dev_info_t *devinfo, ddi_detach_cmd_t cmd)
4215 {
4216 	bge_t *bgep;
4217 #ifdef BGE_IPMI_ASF
4218 	uint_t asf_mode;
4219 	asf_mode = ASF_MODE_NONE;
4220 #endif
4221 
4222 	BGE_GTRACE(("bge_detach($%p, %d)", (void *)devinfo, cmd));
4223 
4224 	bgep = ddi_get_driver_private(devinfo);
4225 
4226 	switch (cmd) {
4227 	default:
4228 		return (DDI_FAILURE);
4229 
4230 	case DDI_SUSPEND:
4231 		return (bge_suspend(bgep));
4232 
4233 	case DDI_DETACH:
4234 		break;
4235 	}
4236 
4237 #ifdef BGE_IPMI_ASF
4238 	mutex_enter(bgep->genlock);
4239 	if (bgep->asf_enabled && ((bgep->asf_status == ASF_STAT_RUN) ||
4240 	    (bgep->asf_status == ASF_STAT_RUN_INIT))) {
4241 
4242 		bge_asf_update_status(bgep);
4243 		if (bgep->asf_status == ASF_STAT_RUN) {
4244 			bge_asf_stop_timer(bgep);
4245 		}
4246 		bgep->asf_status = ASF_STAT_STOP;
4247 
4248 		bge_asf_pre_reset_operations(bgep, BGE_SHUTDOWN_RESET);
4249 
4250 		if (bgep->asf_pseudostop) {
4251 			bge_chip_stop(bgep, B_FALSE);
4252 			bgep->bge_mac_state = BGE_MAC_STOPPED;
4253 			bgep->asf_pseudostop = B_FALSE;
4254 		}
4255 
4256 		asf_mode = ASF_MODE_POST_SHUTDOWN;
4257 
4258 		if (bge_check_acc_handle(bgep, bgep->cfg_handle) != DDI_FM_OK)
4259 			ddi_fm_service_impact(bgep->devinfo,
4260 			    DDI_SERVICE_UNAFFECTED);
4261 		if (bge_check_acc_handle(bgep, bgep->io_handle) != DDI_FM_OK)
4262 			ddi_fm_service_impact(bgep->devinfo,
4263 			    DDI_SERVICE_UNAFFECTED);
4264 	}
4265 	mutex_exit(bgep->genlock);
4266 #endif
4267 
4268 	/*
4269 	 * Unregister from the GLD subsystem.  This can fail, in
4270 	 * particular if there are DLPI style-2 streams still open -
4271 	 * in which case we just return failure without shutting
4272 	 * down chip operations.
4273 	 */
4274 	if (mac_unregister(bgep->mh) != 0)
4275 		return (DDI_FAILURE);
4276 
4277 	/*
4278 	 * All activity stopped, so we can clean up & exit
4279 	 */
4280 #ifdef BGE_IPMI_ASF
4281 	bge_unattach(bgep, asf_mode);
4282 #else
4283 	bge_unattach(bgep);
4284 #endif
4285 	return (DDI_SUCCESS);
4286 }
4287 
4288 
4289 /*
4290  * ========== Module Loading Data & Entry Points ==========
4291  */
4292 
4293 #undef	BGE_DBG
4294 #define	BGE_DBG		BGE_DBG_INIT	/* debug flag for this code	*/
4295 
4296 DDI_DEFINE_STREAM_OPS(bge_dev_ops,
4297 	nulldev,	/* identify */
4298 	nulldev,	/* probe */
4299 	bge_attach,	/* attach */
4300 	bge_detach,	/* detach */
4301 	nodev,		/* reset */
4302 	NULL,		/* cb_ops */
4303 	D_MP,		/* bus_ops */
4304 	NULL,		/* power */
4305 	bge_quiesce	/* quiesce */
4306 );
4307 
4308 static struct modldrv bge_modldrv = {
4309 	&mod_driverops,		/* Type of module.  This one is a driver */
4310 	bge_ident,		/* short description */
4311 	&bge_dev_ops		/* driver specific ops */
4312 };
4313 
4314 static struct modlinkage modlinkage = {
4315 	MODREV_1, (void *)&bge_modldrv, NULL
4316 };
4317 
4318 
4319 int
4320 _info(struct modinfo *modinfop)
4321 {
4322 	return (mod_info(&modlinkage, modinfop));
4323 }
4324 
4325 int
4326 _init(void)
4327 {
4328 	int status;
4329 
4330 	mac_init_ops(&bge_dev_ops, "bge");
4331 	status = mod_install(&modlinkage);
4332 	if (status == DDI_SUCCESS)
4333 		mutex_init(bge_log_mutex, NULL, MUTEX_DRIVER, NULL);
4334 	else
4335 		mac_fini_ops(&bge_dev_ops);
4336 	return (status);
4337 }
4338 
4339 int
4340 _fini(void)
4341 {
4342 	int status;
4343 
4344 	status = mod_remove(&modlinkage);
4345 	if (status == DDI_SUCCESS) {
4346 		mac_fini_ops(&bge_dev_ops);
4347 		mutex_destroy(bge_log_mutex);
4348 	}
4349 	return (status);
4350 }
4351 
4352 
4353 /*
4354  * bge_add_intrs:
4355  *
4356  * Register FIXED or MSI interrupts.
4357  */
4358 static int
4359 bge_add_intrs(bge_t *bgep, int	intr_type)
4360 {
4361 	dev_info_t	*dip = bgep->devinfo;
4362 	int		avail, actual, intr_size, count = 0;
4363 	int		i, flag, ret;
4364 
4365 	BGE_DEBUG(("bge_add_intrs($%p, 0x%x)", (void *)bgep, intr_type));
4366 
4367 	/* Get number of interrupts */
4368 	ret = ddi_intr_get_nintrs(dip, intr_type, &count);
4369 	if ((ret != DDI_SUCCESS) || (count == 0)) {
4370 		bge_error(bgep, "ddi_intr_get_nintrs() failure, ret: %d, "
4371 		    "count: %d", ret, count);
4372 
4373 		return (DDI_FAILURE);
4374 	}
4375 
4376 	/* Get number of available interrupts */
4377 	ret = ddi_intr_get_navail(dip, intr_type, &avail);
4378 	if ((ret != DDI_SUCCESS) || (avail == 0)) {
4379 		bge_error(bgep, "ddi_intr_get_navail() failure, "
4380 		    "ret: %d, avail: %d\n", ret, avail);
4381 
4382 		return (DDI_FAILURE);
4383 	}
4384 
4385 	if (avail < count) {
4386 		BGE_DEBUG(("%s: nintrs() returned %d, navail returned %d",
4387 		    bgep->ifname, count, avail));
4388 	}
4389 
4390 	/*
4391 	 * BGE hardware generates only single MSI even though it claims
4392 	 * to support multiple MSIs. So, hard code MSI count value to 1.
4393 	 */
4394 	if (intr_type == DDI_INTR_TYPE_MSI) {
4395 		count = 1;
4396 		flag = DDI_INTR_ALLOC_STRICT;
4397 	} else {
4398 		flag = DDI_INTR_ALLOC_NORMAL;
4399 	}
4400 
4401 	/* Allocate an array of interrupt handles */
4402 	intr_size = count * sizeof (ddi_intr_handle_t);
4403 	bgep->htable = kmem_alloc(intr_size, KM_SLEEP);
4404 
4405 	/* Call ddi_intr_alloc() */
4406 	ret = ddi_intr_alloc(dip, bgep->htable, intr_type, 0,
4407 	    count, &actual, flag);
4408 
4409 	if ((ret != DDI_SUCCESS) || (actual == 0)) {
4410 		bge_error(bgep, "ddi_intr_alloc() failed %d\n", ret);
4411 
4412 		kmem_free(bgep->htable, intr_size);
4413 		return (DDI_FAILURE);
4414 	}
4415 
4416 	if (actual < count) {
4417 		BGE_DEBUG(("%s: Requested: %d, Received: %d",
4418 		    bgep->ifname, count, actual));
4419 	}
4420 
4421 	bgep->intr_cnt = actual;
4422 
4423 	/*
4424 	 * Get priority for first msi, assume remaining are all the same
4425 	 */
4426 	if ((ret = ddi_intr_get_pri(bgep->htable[0], &bgep->intr_pri)) !=
4427 	    DDI_SUCCESS) {
4428 		bge_error(bgep, "ddi_intr_get_pri() failed %d\n", ret);
4429 
4430 		/* Free already allocated intr */
4431 		for (i = 0; i < actual; i++) {
4432 			(void) ddi_intr_free(bgep->htable[i]);
4433 		}
4434 
4435 		kmem_free(bgep->htable, intr_size);
4436 		return (DDI_FAILURE);
4437 	}
4438 
4439 	/* Call ddi_intr_add_handler() */
4440 	for (i = 0; i < actual; i++) {
4441 		if ((ret = ddi_intr_add_handler(bgep->htable[i], bge_intr,
4442 		    (caddr_t)bgep, (caddr_t)(uintptr_t)i)) != DDI_SUCCESS) {
4443 			bge_error(bgep, "ddi_intr_add_handler() "
4444 			    "failed %d\n", ret);
4445 
4446 			/* Free already allocated intr */
4447 			for (i = 0; i < actual; i++) {
4448 				(void) ddi_intr_free(bgep->htable[i]);
4449 			}
4450 
4451 			kmem_free(bgep->htable, intr_size);
4452 			return (DDI_FAILURE);
4453 		}
4454 	}
4455 
4456 	if ((ret = ddi_intr_get_cap(bgep->htable[0], &bgep->intr_cap))
4457 	    != DDI_SUCCESS) {
4458 		bge_error(bgep, "ddi_intr_get_cap() failed %d\n", ret);
4459 
4460 		for (i = 0; i < actual; i++) {
4461 			(void) ddi_intr_remove_handler(bgep->htable[i]);
4462 			(void) ddi_intr_free(bgep->htable[i]);
4463 		}
4464 
4465 		kmem_free(bgep->htable, intr_size);
4466 		return (DDI_FAILURE);
4467 	}
4468 
4469 	return (DDI_SUCCESS);
4470 }
4471 
4472 /*
4473  * bge_rem_intrs:
4474  *
4475  * Unregister FIXED or MSI interrupts
4476  */
4477 static void
4478 bge_rem_intrs(bge_t *bgep)
4479 {
4480 	int	i;
4481 
4482 	BGE_DEBUG(("bge_rem_intrs($%p)", (void *)bgep));
4483 
4484 	/* Call ddi_intr_remove_handler() */
4485 	for (i = 0; i < bgep->intr_cnt; i++) {
4486 		(void) ddi_intr_remove_handler(bgep->htable[i]);
4487 		(void) ddi_intr_free(bgep->htable[i]);
4488 	}
4489 
4490 	kmem_free(bgep->htable, bgep->intr_cnt * sizeof (ddi_intr_handle_t));
4491 }
4492 
4493 
4494 void
4495 bge_intr_enable(bge_t *bgep)
4496 {
4497 	int i;
4498 
4499 	if (bgep->intr_cap & DDI_INTR_FLAG_BLOCK) {
4500 		/* Call ddi_intr_block_enable() for MSI interrupts */
4501 		(void) ddi_intr_block_enable(bgep->htable, bgep->intr_cnt);
4502 	} else {
4503 		/* Call ddi_intr_enable for MSI or FIXED interrupts */
4504 		for (i = 0; i < bgep->intr_cnt; i++) {
4505 			(void) ddi_intr_enable(bgep->htable[i]);
4506 		}
4507 	}
4508 }
4509 
4510 
4511 void
4512 bge_intr_disable(bge_t *bgep)
4513 {
4514 	int i;
4515 
4516 	if (bgep->intr_cap & DDI_INTR_FLAG_BLOCK) {
4517 		/* Call ddi_intr_block_disable() */
4518 		(void) ddi_intr_block_disable(bgep->htable, bgep->intr_cnt);
4519 	} else {
4520 		for (i = 0; i < bgep->intr_cnt; i++) {
4521 			(void) ddi_intr_disable(bgep->htable[i]);
4522 		}
4523 	}
4524 }
4525 
4526 int
4527 bge_reprogram(bge_t *bgep)
4528 {
4529 	int status = 0;
4530 
4531 	ASSERT(mutex_owned(bgep->genlock));
4532 
4533 	if (bge_phys_update(bgep) != DDI_SUCCESS) {
4534 		ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_DEGRADED);
4535 		status = IOC_INVAL;
4536 	}
4537 #ifdef BGE_IPMI_ASF
4538 	if (bge_chip_sync(bgep, B_TRUE) == DDI_FAILURE) {
4539 #else
4540 	if (bge_chip_sync(bgep) == DDI_FAILURE) {
4541 #endif
4542 		ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_DEGRADED);
4543 		status = IOC_INVAL;
4544 	}
4545 	if (bgep->intr_type == DDI_INTR_TYPE_MSI)
4546 		bge_chip_msi_trig(bgep);
4547 	return (status);
4548 }
4549