xref: /titanic_51/usr/src/uts/common/io/bge/bge_main2.c (revision ba2e4443695ee6a6f420a35cd4fc3d3346d22932)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 
22 /*
23  * Copyright 2006 Sun Microsystems, Inc.  All rights reserved.
24  * Use is subject to license terms.
25  */
26 
27 #pragma ident	"%Z%%M%	%I%	%E% SMI"
28 
29 #include "sys/bge_impl2.h"
30 #include <sys/sdt.h>
31 
32 /*
33  * This is the string displayed by modinfo, etc.
34  * Make sure you keep the version ID up to date!
35  */
36 static char bge_ident[] = "Broadcom Gb Ethernet v0.52";
37 
38 /*
39  * Property names
40  */
41 static char debug_propname[] = "bge-debug-flags";
42 static char clsize_propname[] = "cache-line-size";
43 static char latency_propname[] = "latency-timer";
44 static char localmac_boolname[] = "local-mac-address?";
45 static char localmac_propname[] = "local-mac-address";
46 static char macaddr_propname[] = "mac-address";
47 static char subdev_propname[] = "subsystem-id";
48 static char subven_propname[] = "subsystem-vendor-id";
49 static char rxrings_propname[] = "bge-rx-rings";
50 static char txrings_propname[] = "bge-tx-rings";
51 static char fm_cap[] = "fm-capable";
52 static char default_mtu[] = "default_mtu";
53 
54 static int bge_add_intrs(bge_t *, int);
55 static void bge_rem_intrs(bge_t *);
56 
57 /*
58  * Describes the chip's DMA engine
59  */
60 static ddi_dma_attr_t dma_attr = {
61 	DMA_ATTR_V0,			/* dma_attr version	*/
62 	0x0000000000000000ull,		/* dma_attr_addr_lo	*/
63 	0xFFFFFFFFFFFFFFFFull,		/* dma_attr_addr_hi	*/
64 	0x00000000FFFFFFFFull,		/* dma_attr_count_max	*/
65 	0x0000000000000001ull,		/* dma_attr_align	*/
66 	0x00000FFF,			/* dma_attr_burstsizes	*/
67 	0x00000001,			/* dma_attr_minxfer	*/
68 	0x000000000000FFFFull,		/* dma_attr_maxxfer	*/
69 	0xFFFFFFFFFFFFFFFFull,		/* dma_attr_seg		*/
70 	1,				/* dma_attr_sgllen 	*/
71 	0x00000001,			/* dma_attr_granular 	*/
72 	DDI_DMA_FLAGERR			/* dma_attr_flags */
73 };
74 
75 /*
76  * PIO access attributes for registers
77  */
78 static ddi_device_acc_attr_t bge_reg_accattr = {
79 	DDI_DEVICE_ATTR_V0,
80 	DDI_NEVERSWAP_ACC,
81 	DDI_STRICTORDER_ACC,
82 	DDI_FLAGERR_ACC
83 };
84 
85 /*
86  * DMA access attributes for descriptors: NOT to be byte swapped.
87  */
88 static ddi_device_acc_attr_t bge_desc_accattr = {
89 	DDI_DEVICE_ATTR_V0,
90 	DDI_NEVERSWAP_ACC,
91 	DDI_STRICTORDER_ACC,
92 	DDI_FLAGERR_ACC
93 };
94 
95 /*
96  * DMA access attributes for data: NOT to be byte swapped.
97  */
98 static ddi_device_acc_attr_t bge_data_accattr = {
99 	DDI_DEVICE_ATTR_V0,
100 	DDI_NEVERSWAP_ACC,
101 	DDI_STRICTORDER_ACC
102 };
103 
104 static ether_addr_t bge_broadcast_addr = {
105 	0xff, 0xff, 0xff, 0xff, 0xff, 0xff
106 };
107 
108 /*
109  * Versions of the O/S up to Solaris 8 didn't support network booting
110  * from any network interface except the first (NET0).  Patching this
111  * flag to a non-zero value will tell the driver to work around this
112  * limitation by creating an extra (internal) pathname node.  To do
113  * this, just add a line like the following to the CLIENT'S etc/system
114  * file ON THE ROOT FILESYSTEM SERVER before booting the client:
115  *
116  *	set bge:bge_net1_boot_support = 1;
117  */
118 static uint32_t bge_net1_boot_support = 1;
119 
120 static int		bge_m_start(void *);
121 static void		bge_m_stop(void *);
122 static int		bge_m_promisc(void *, boolean_t);
123 static int		bge_m_multicst(void *, boolean_t, const uint8_t *);
124 static int		bge_m_unicst(void *, const uint8_t *);
125 static void		bge_m_resources(void *);
126 static void		bge_m_ioctl(void *, queue_t *, mblk_t *);
127 static boolean_t	bge_m_getcapab(void *, mac_capab_t, void *);
128 
129 #define	BGE_M_CALLBACK_FLAGS	(MC_RESOURCES | MC_IOCTL | MC_GETCAPAB)
130 
131 static mac_callbacks_t bge_m_callbacks = {
132 	BGE_M_CALLBACK_FLAGS,
133 	bge_m_stat,
134 	bge_m_start,
135 	bge_m_stop,
136 	bge_m_promisc,
137 	bge_m_multicst,
138 	bge_m_unicst,
139 	bge_m_tx,
140 	bge_m_resources,
141 	bge_m_ioctl,
142 	bge_m_getcapab
143 };
144 
145 /*
146  * ========== Transmit and receive ring reinitialisation ==========
147  */
148 
149 /*
150  * These <reinit> routines each reset the specified ring to an initial
151  * state, assuming that the corresponding <init> routine has already
152  * been called exactly once.
153  */
154 
155 static void
156 bge_reinit_send_ring(send_ring_t *srp)
157 {
158 	/*
159 	 * Reinitialise control variables ...
160 	 */
161 	ASSERT(srp->tx_flow == 0);
162 	srp->tx_next = 0;
163 	srp->tx_free = srp->desc.nslots;
164 
165 	ASSERT(mutex_owned(srp->tc_lock));
166 	srp->tc_next = 0;
167 
168 	/*
169 	 * Zero and sync all the h/w Send Buffer Descriptors
170 	 */
171 	DMA_ZERO(srp->desc);
172 	DMA_SYNC(srp->desc, DDI_DMA_SYNC_FORDEV);
173 }
174 
175 static void
176 bge_reinit_recv_ring(recv_ring_t *rrp)
177 {
178 	/*
179 	 * Reinitialise control variables ...
180 	 */
181 	rrp->rx_next = 0;
182 }
183 
184 static void
185 bge_reinit_buff_ring(buff_ring_t *brp, uint64_t ring)
186 {
187 	bge_rbd_t *hw_rbd_p;
188 	sw_rbd_t *srbdp;
189 	uint32_t bufsize;
190 	uint32_t nslots;
191 	uint32_t slot;
192 
193 	static uint16_t ring_type_flag[BGE_BUFF_RINGS_MAX] = {
194 		RBD_FLAG_STD_RING,
195 		RBD_FLAG_JUMBO_RING,
196 		RBD_FLAG_MINI_RING
197 	};
198 
199 	/*
200 	 * Zero, initialise and sync all the h/w Receive Buffer Descriptors
201 	 * Note: all the remaining fields (<type>, <flags>, <ip_cksum>,
202 	 * <tcp_udp_cksum>, <error_flag>, <vlan_tag>, and <reserved>)
203 	 * should be zeroed, and so don't need to be set up specifically
204 	 * once the whole area has been cleared.
205 	 */
206 	DMA_ZERO(brp->desc);
207 
208 	hw_rbd_p = DMA_VPTR(brp->desc);
209 	nslots = brp->desc.nslots;
210 	ASSERT(brp->buf[0].nslots == nslots/BGE_SPLIT);
211 	bufsize = brp->buf[0].size;
212 	srbdp = brp->sw_rbds;
213 	for (slot = 0; slot < nslots; ++hw_rbd_p, ++srbdp, ++slot) {
214 		hw_rbd_p->host_buf_addr = srbdp->pbuf.cookie.dmac_laddress;
215 		hw_rbd_p->index = slot;
216 		hw_rbd_p->len = bufsize;
217 		hw_rbd_p->opaque = srbdp->pbuf.token;
218 		hw_rbd_p->flags |= ring_type_flag[ring];
219 	}
220 
221 	DMA_SYNC(brp->desc, DDI_DMA_SYNC_FORDEV);
222 
223 	/*
224 	 * Finally, reinitialise the ring control variables ...
225 	 */
226 	brp->rf_next = (nslots != 0) ? (nslots-1) : 0;
227 }
228 
229 /*
230  * Reinitialize all rings
231  */
232 static void
233 bge_reinit_rings(bge_t *bgep)
234 {
235 	uint64_t ring;
236 
237 	ASSERT(mutex_owned(bgep->genlock));
238 
239 	/*
240 	 * Send Rings ...
241 	 */
242 	for (ring = 0; ring < bgep->chipid.tx_rings; ++ring)
243 		bge_reinit_send_ring(&bgep->send[ring]);
244 
245 	/*
246 	 * Receive Return Rings ...
247 	 */
248 	for (ring = 0; ring < bgep->chipid.rx_rings; ++ring)
249 		bge_reinit_recv_ring(&bgep->recv[ring]);
250 
251 	/*
252 	 * Receive Producer Rings ...
253 	 */
254 	for (ring = 0; ring < BGE_BUFF_RINGS_USED; ++ring)
255 		bge_reinit_buff_ring(&bgep->buff[ring], ring);
256 }
257 
258 /*
259  * ========== Internal state management entry points ==========
260  */
261 
262 #undef	BGE_DBG
263 #define	BGE_DBG		BGE_DBG_NEMO	/* debug flag for this code	*/
264 
265 /*
266  * These routines provide all the functionality required by the
267  * corresponding GLD entry points, but don't update the GLD state
268  * so they can be called internally without disturbing our record
269  * of what GLD thinks we should be doing ...
270  */
271 
272 /*
273  *	bge_reset() -- reset h/w & rings to initial state
274  */
275 static int
276 #ifdef BGE_IPMI_ASF
277 bge_reset(bge_t *bgep, uint_t asf_mode)
278 #else
279 bge_reset(bge_t *bgep)
280 #endif
281 {
282 	uint64_t	ring;
283 	int retval;
284 
285 	BGE_TRACE(("bge_reset($%p)", (void *)bgep));
286 
287 	ASSERT(mutex_owned(bgep->genlock));
288 
289 	/*
290 	 * Grab all the other mutexes in the world (this should
291 	 * ensure no other threads are manipulating driver state)
292 	 */
293 	for (ring = 0; ring < BGE_RECV_RINGS_MAX; ++ring)
294 		mutex_enter(bgep->recv[ring].rx_lock);
295 	for (ring = 0; ring < BGE_BUFF_RINGS_MAX; ++ring)
296 		mutex_enter(bgep->buff[ring].rf_lock);
297 	rw_enter(bgep->errlock, RW_WRITER);
298 	for (ring = 0; ring < BGE_SEND_RINGS_MAX; ++ring)
299 		mutex_enter(bgep->send[ring].tc_lock);
300 
301 #ifdef BGE_IPMI_ASF
302 	retval = bge_chip_reset(bgep, B_TRUE, asf_mode);
303 #else
304 	retval = bge_chip_reset(bgep, B_TRUE);
305 #endif
306 	bge_reinit_rings(bgep);
307 
308 	/*
309 	 * Free the world ...
310 	 */
311 	for (ring = BGE_SEND_RINGS_MAX; ring-- > 0; )
312 		mutex_exit(bgep->send[ring].tc_lock);
313 	rw_exit(bgep->errlock);
314 	for (ring = BGE_BUFF_RINGS_MAX; ring-- > 0; )
315 		mutex_exit(bgep->buff[ring].rf_lock);
316 	for (ring = BGE_RECV_RINGS_MAX; ring-- > 0; )
317 		mutex_exit(bgep->recv[ring].rx_lock);
318 
319 	BGE_DEBUG(("bge_reset($%p) done", (void *)bgep));
320 	return (retval);
321 }
322 
323 /*
324  *	bge_stop() -- stop processing, don't reset h/w or rings
325  */
326 static void
327 bge_stop(bge_t *bgep)
328 {
329 	BGE_TRACE(("bge_stop($%p)", (void *)bgep));
330 
331 	ASSERT(mutex_owned(bgep->genlock));
332 
333 #ifdef BGE_IPMI_ASF
334 	if (bgep->asf_enabled) {
335 		bgep->asf_pseudostop = B_TRUE;
336 	} else {
337 #endif
338 		bge_chip_stop(bgep, B_FALSE);
339 #ifdef BGE_IPMI_ASF
340 	}
341 #endif
342 
343 	BGE_DEBUG(("bge_stop($%p) done", (void *)bgep));
344 }
345 
346 /*
347  *	bge_start() -- start transmitting/receiving
348  */
349 static int
350 bge_start(bge_t *bgep, boolean_t reset_phys)
351 {
352 	int retval;
353 
354 	BGE_TRACE(("bge_start($%p, %d)", (void *)bgep, reset_phys));
355 
356 	ASSERT(mutex_owned(bgep->genlock));
357 
358 	/*
359 	 * Start chip processing, including enabling interrupts
360 	 */
361 	retval = bge_chip_start(bgep, reset_phys);
362 
363 	BGE_DEBUG(("bge_start($%p, %d) done", (void *)bgep, reset_phys));
364 	return (retval);
365 }
366 
367 /*
368  * bge_restart - restart transmitting/receiving after error or suspend
369  */
370 int
371 bge_restart(bge_t *bgep, boolean_t reset_phys)
372 {
373 	int retval = DDI_SUCCESS;
374 	ASSERT(mutex_owned(bgep->genlock));
375 
376 #ifdef BGE_IPMI_ASF
377 	if (bgep->asf_enabled) {
378 		if (bge_reset(bgep, ASF_MODE_POST_INIT) != DDI_SUCCESS)
379 			retval = DDI_FAILURE;
380 	} else
381 		if (bge_reset(bgep, ASF_MODE_NONE) != DDI_SUCCESS)
382 			retval = DDI_FAILURE;
383 #else
384 	if (bge_reset(bgep) != DDI_SUCCESS)
385 		retval = DDI_FAILURE;
386 #endif
387 	if (bgep->bge_mac_state == BGE_MAC_STARTED) {
388 		if (bge_start(bgep, reset_phys) != DDI_SUCCESS)
389 			retval = DDI_FAILURE;
390 		bgep->watchdog = 0;
391 		ddi_trigger_softintr(bgep->resched_id);
392 	}
393 
394 	BGE_DEBUG(("bge_restart($%p, %d) done", (void *)bgep, reset_phys));
395 	return (retval);
396 }
397 
398 
399 /*
400  * ========== Nemo-required management entry points ==========
401  */
402 
403 #undef	BGE_DBG
404 #define	BGE_DBG		BGE_DBG_NEMO	/* debug flag for this code	*/
405 
406 /*
407  *	bge_m_stop() -- stop transmitting/receiving
408  */
409 static void
410 bge_m_stop(void *arg)
411 {
412 	bge_t *bgep = arg;		/* private device info	*/
413 
414 	BGE_TRACE(("bge_m_stop($%p)", arg));
415 
416 	/*
417 	 * Just stop processing, then record new GLD state
418 	 */
419 	mutex_enter(bgep->genlock);
420 	if (!(bgep->progress & PROGRESS_INTR)) {
421 		/* can happen during autorecovery */
422 		mutex_exit(bgep->genlock);
423 		return;
424 	}
425 
426 	bgep->link_up_msg = bgep->link_down_msg = " (stopped)";
427 	bge_stop(bgep);
428 	bgep->bge_mac_state = BGE_MAC_STOPPED;
429 	BGE_DEBUG(("bge_m_stop($%p) done", arg));
430 	if (bge_check_acc_handle(bgep, bgep->io_handle) != DDI_FM_OK)
431 		ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_UNAFFECTED);
432 	mutex_exit(bgep->genlock);
433 }
434 
435 /*
436  *	bge_m_start() -- start transmitting/receiving
437  */
438 static int
439 bge_m_start(void *arg)
440 {
441 	bge_t *bgep = arg;		/* private device info	*/
442 
443 	BGE_TRACE(("bge_m_start($%p)", arg));
444 
445 	/*
446 	 * Start processing and record new GLD state
447 	 */
448 	mutex_enter(bgep->genlock);
449 	if (!(bgep->progress & PROGRESS_INTR)) {
450 		/* can happen during autorecovery */
451 		ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_DEGRADED);
452 		mutex_exit(bgep->genlock);
453 		return (EIO);
454 	}
455 #ifdef BGE_IPMI_ASF
456 	if (bgep->asf_enabled) {
457 		if ((bgep->asf_status == ASF_STAT_RUN) &&
458 			(bgep->asf_pseudostop)) {
459 
460 			bgep->link_up_msg = bgep->link_down_msg
461 				= " (initialized)";
462 			bgep->bge_mac_state = BGE_MAC_STARTED;
463 			mutex_exit(bgep->genlock);
464 			return (0);
465 		}
466 	}
467 	if (bge_reset(bgep, ASF_MODE_INIT) != DDI_SUCCESS) {
468 #else
469 	if (bge_reset(bgep) != DDI_SUCCESS) {
470 #endif
471 		(void) bge_check_acc_handle(bgep, bgep->cfg_handle);
472 		(void) bge_check_acc_handle(bgep, bgep->io_handle);
473 		ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_DEGRADED);
474 		mutex_exit(bgep->genlock);
475 		return (EIO);
476 	}
477 	bgep->link_up_msg = bgep->link_down_msg = " (initialized)";
478 	if (bge_start(bgep, B_TRUE) != DDI_SUCCESS) {
479 		(void) bge_check_acc_handle(bgep, bgep->cfg_handle);
480 		(void) bge_check_acc_handle(bgep, bgep->io_handle);
481 		ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_DEGRADED);
482 		mutex_exit(bgep->genlock);
483 		return (EIO);
484 	}
485 	bgep->bge_mac_state = BGE_MAC_STARTED;
486 	BGE_DEBUG(("bge_m_start($%p) done", arg));
487 
488 	if (bge_check_acc_handle(bgep, bgep->cfg_handle) != DDI_FM_OK) {
489 		ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_DEGRADED);
490 		mutex_exit(bgep->genlock);
491 		return (EIO);
492 	}
493 	if (bge_check_acc_handle(bgep, bgep->io_handle) != DDI_FM_OK) {
494 		ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_DEGRADED);
495 		mutex_exit(bgep->genlock);
496 		return (EIO);
497 	}
498 #ifdef BGE_IPMI_ASF
499 	if (bgep->asf_enabled) {
500 		if (bgep->asf_status != ASF_STAT_RUN) {
501 			/* start ASF heart beat */
502 			bgep->asf_timeout_id = timeout(bge_asf_heartbeat,
503 				(void *)bgep,
504 				drv_usectohz(BGE_ASF_HEARTBEAT_INTERVAL));
505 			bgep->asf_status = ASF_STAT_RUN;
506 		}
507 	}
508 #endif
509 	mutex_exit(bgep->genlock);
510 
511 	return (0);
512 }
513 
514 /*
515  *	bge_m_unicst_set() -- set the physical network address
516  */
517 static int
518 bge_m_unicst(void *arg, const uint8_t *macaddr)
519 {
520 	bge_t *bgep = arg;		/* private device info	*/
521 
522 	BGE_TRACE(("bge_m_unicst_set($%p, %s)", arg,
523 		ether_sprintf((void *)macaddr)));
524 
525 	/*
526 	 * Remember the new current address in the driver state
527 	 * Sync the chip's idea of the address too ...
528 	 */
529 	mutex_enter(bgep->genlock);
530 	if (!(bgep->progress & PROGRESS_INTR)) {
531 		/* can happen during autorecovery */
532 		ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_DEGRADED);
533 		mutex_exit(bgep->genlock);
534 		return (EIO);
535 	}
536 	ethaddr_copy(macaddr, bgep->curr_addr.addr);
537 #ifdef BGE_IPMI_ASF
538 	if (bge_chip_sync(bgep, B_FALSE) == DDI_FAILURE) {
539 #else
540 	if (bge_chip_sync(bgep) == DDI_FAILURE) {
541 #endif
542 		(void) bge_check_acc_handle(bgep, bgep->cfg_handle);
543 		(void) bge_check_acc_handle(bgep, bgep->io_handle);
544 		ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_DEGRADED);
545 		mutex_exit(bgep->genlock);
546 		return (EIO);
547 	}
548 #ifdef BGE_IPMI_ASF
549 	if (bgep->asf_enabled) {
550 		/*
551 		 * The above bge_chip_sync() function wrote the ethernet MAC
552 		 * addresses registers which destroyed the IPMI/ASF sideband.
553 		 * Here, we have to reset chip to make IPMI/ASF sideband work.
554 		 */
555 		if (bgep->asf_status == ASF_STAT_RUN) {
556 			/*
557 			 * We must stop ASF heart beat before bge_chip_stop(),
558 			 * otherwise some computers (ex. IBM HS20 blade server)
559 			 * may crash.
560 			 */
561 			bge_asf_update_status(bgep);
562 			bge_asf_stop_timer(bgep);
563 			bgep->asf_status = ASF_STAT_STOP;
564 
565 			bge_asf_pre_reset_operations(bgep, BGE_INIT_RESET);
566 		}
567 		bge_chip_stop(bgep, B_FALSE);
568 
569 		if (bge_restart(bgep, B_FALSE) == DDI_FAILURE) {
570 			(void) bge_check_acc_handle(bgep, bgep->cfg_handle);
571 			(void) bge_check_acc_handle(bgep, bgep->io_handle);
572 			ddi_fm_service_impact(bgep->devinfo,
573 			    DDI_SERVICE_DEGRADED);
574 			mutex_exit(bgep->genlock);
575 			return (EIO);
576 		}
577 
578 		/*
579 		 * Start our ASF heartbeat counter as soon as possible.
580 		 */
581 		if (bgep->asf_status != ASF_STAT_RUN) {
582 			/* start ASF heart beat */
583 			bgep->asf_timeout_id = timeout(bge_asf_heartbeat,
584 				(void *)bgep,
585 				drv_usectohz(BGE_ASF_HEARTBEAT_INTERVAL));
586 			bgep->asf_status = ASF_STAT_RUN;
587 		}
588 	}
589 #endif
590 	BGE_DEBUG(("bge_m_unicst_set($%p) done", arg));
591 	if (bge_check_acc_handle(bgep, bgep->cfg_handle) != DDI_FM_OK) {
592 		ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_DEGRADED);
593 		mutex_exit(bgep->genlock);
594 		return (EIO);
595 	}
596 	if (bge_check_acc_handle(bgep, bgep->io_handle) != DDI_FM_OK) {
597 		ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_DEGRADED);
598 		mutex_exit(bgep->genlock);
599 		return (EIO);
600 	}
601 	mutex_exit(bgep->genlock);
602 
603 	return (0);
604 }
605 
606 /*
607  * Compute the index of the required bit in the multicast hash map.
608  * This must mirror the way the hardware actually does it!
609  * See Broadcom document 570X-PG102-R page 125.
610  */
611 static uint32_t
612 bge_hash_index(const uint8_t *mca)
613 {
614 	uint32_t hash;
615 
616 	CRC32(hash, mca, ETHERADDRL, -1U, crc32_table);
617 
618 	return (hash);
619 }
620 
621 /*
622  *	bge_m_multicst_add() -- enable/disable a multicast address
623  */
624 static int
625 bge_m_multicst(void *arg, boolean_t add, const uint8_t *mca)
626 {
627 	bge_t *bgep = arg;		/* private device info	*/
628 	uint32_t hash;
629 	uint32_t index;
630 	uint32_t word;
631 	uint32_t bit;
632 	uint8_t *refp;
633 
634 	BGE_TRACE(("bge_m_multicst($%p, %s, %s)", arg,
635 		(add) ? "add" : "remove", ether_sprintf((void *)mca)));
636 
637 	/*
638 	 * Precalculate all required masks, pointers etc ...
639 	 */
640 	hash = bge_hash_index(mca);
641 	index = hash % BGE_HASH_TABLE_SIZE;
642 	word = index/32u;
643 	bit = 1 << (index % 32u);
644 	refp = &bgep->mcast_refs[index];
645 
646 	BGE_DEBUG(("bge_m_multicst: hash 0x%x index %d (%d:0x%x) = %d",
647 		hash, index, word, bit, *refp));
648 
649 	/*
650 	 * We must set the appropriate bit in the hash map (and the
651 	 * corresponding h/w register) when the refcount goes from 0
652 	 * to >0, and clear it when the last ref goes away (refcount
653 	 * goes from >0 back to 0).  If we change the hash map, we
654 	 * must also update the chip's hardware map registers.
655 	 */
656 	mutex_enter(bgep->genlock);
657 	if (!(bgep->progress & PROGRESS_INTR)) {
658 		/* can happen during autorecovery */
659 		ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_DEGRADED);
660 		mutex_exit(bgep->genlock);
661 		return (EIO);
662 	}
663 	if (add) {
664 		if ((*refp)++ == 0) {
665 			bgep->mcast_hash[word] |= bit;
666 #ifdef BGE_IPMI_ASF
667 			if (bge_chip_sync(bgep, B_TRUE) == DDI_FAILURE) {
668 #else
669 			if (bge_chip_sync(bgep) == DDI_FAILURE) {
670 #endif
671 				(void) bge_check_acc_handle(bgep,
672 				    bgep->cfg_handle);
673 				(void) bge_check_acc_handle(bgep,
674 				    bgep->io_handle);
675 				ddi_fm_service_impact(bgep->devinfo,
676 				    DDI_SERVICE_DEGRADED);
677 				mutex_exit(bgep->genlock);
678 				return (EIO);
679 			}
680 		}
681 	} else {
682 		if (--(*refp) == 0) {
683 			bgep->mcast_hash[word] &= ~bit;
684 #ifdef BGE_IPMI_ASF
685 			if (bge_chip_sync(bgep, B_TRUE) == DDI_FAILURE) {
686 #else
687 			if (bge_chip_sync(bgep) == DDI_FAILURE) {
688 #endif
689 				(void) bge_check_acc_handle(bgep,
690 				    bgep->cfg_handle);
691 				(void) bge_check_acc_handle(bgep,
692 				    bgep->io_handle);
693 				ddi_fm_service_impact(bgep->devinfo,
694 				    DDI_SERVICE_DEGRADED);
695 				mutex_exit(bgep->genlock);
696 				return (EIO);
697 			}
698 		}
699 	}
700 	BGE_DEBUG(("bge_m_multicst($%p) done", arg));
701 	if (bge_check_acc_handle(bgep, bgep->cfg_handle) != DDI_FM_OK) {
702 		ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_DEGRADED);
703 		mutex_exit(bgep->genlock);
704 		return (EIO);
705 	}
706 	if (bge_check_acc_handle(bgep, bgep->io_handle) != DDI_FM_OK) {
707 		ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_DEGRADED);
708 		mutex_exit(bgep->genlock);
709 		return (EIO);
710 	}
711 	mutex_exit(bgep->genlock);
712 
713 	return (0);
714 }
715 
716 /*
717  * bge_m_promisc() -- set or reset promiscuous mode on the board
718  *
719  *	Program the hardware to enable/disable promiscuous and/or
720  *	receive-all-multicast modes.
721  */
722 static int
723 bge_m_promisc(void *arg, boolean_t on)
724 {
725 	bge_t *bgep = arg;
726 
727 	BGE_TRACE(("bge_m_promisc_set($%p, %d)", arg, on));
728 
729 	/*
730 	 * Store MAC layer specified mode and pass to chip layer to update h/w
731 	 */
732 	mutex_enter(bgep->genlock);
733 	if (!(bgep->progress & PROGRESS_INTR)) {
734 		/* can happen during autorecovery */
735 		ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_DEGRADED);
736 		mutex_exit(bgep->genlock);
737 		return (EIO);
738 	}
739 	bgep->promisc = on;
740 #ifdef BGE_IPMI_ASF
741 	if (bge_chip_sync(bgep, B_TRUE) == DDI_FAILURE) {
742 #else
743 	if (bge_chip_sync(bgep) == DDI_FAILURE) {
744 #endif
745 		(void) bge_check_acc_handle(bgep, bgep->cfg_handle);
746 		(void) bge_check_acc_handle(bgep, bgep->io_handle);
747 		ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_DEGRADED);
748 		mutex_exit(bgep->genlock);
749 		return (EIO);
750 	}
751 	BGE_DEBUG(("bge_m_promisc_set($%p) done", arg));
752 	if (bge_check_acc_handle(bgep, bgep->cfg_handle) != DDI_FM_OK) {
753 		ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_DEGRADED);
754 		mutex_exit(bgep->genlock);
755 		return (EIO);
756 	}
757 	if (bge_check_acc_handle(bgep, bgep->io_handle) != DDI_FM_OK) {
758 		ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_DEGRADED);
759 		mutex_exit(bgep->genlock);
760 		return (EIO);
761 	}
762 	mutex_exit(bgep->genlock);
763 	return (0);
764 }
765 
766 /*ARGSUSED*/
767 static boolean_t
768 bge_m_getcapab(void *arg, mac_capab_t cap, void *cap_data)
769 {
770 	switch (cap) {
771 	case MAC_CAPAB_HCKSUM: {
772 		uint32_t *txflags = cap_data;
773 
774 		*txflags = HCKSUM_INET_FULL_V4 | HCKSUM_IPHDRCKSUM;
775 		break;
776 	}
777 	case MAC_CAPAB_POLL:
778 		/*
779 		 * There's nothing for us to fill in, simply returning
780 		 * B_TRUE stating that we support polling is sufficient.
781 		 */
782 		break;
783 	default:
784 		return (B_FALSE);
785 	}
786 	return (B_TRUE);
787 }
788 
789 /*
790  * Loopback ioctl code
791  */
792 
793 static lb_property_t loopmodes[] = {
794 	{ normal,	"normal",	BGE_LOOP_NONE		},
795 	{ external,	"1000Mbps",	BGE_LOOP_EXTERNAL_1000	},
796 	{ external,	"100Mbps",	BGE_LOOP_EXTERNAL_100	},
797 	{ external,	"10Mbps",	BGE_LOOP_EXTERNAL_10	},
798 	{ internal,	"PHY",		BGE_LOOP_INTERNAL_PHY	},
799 	{ internal,	"MAC",		BGE_LOOP_INTERNAL_MAC	}
800 };
801 
802 static enum ioc_reply
803 bge_set_loop_mode(bge_t *bgep, uint32_t mode)
804 {
805 	const char *msg;
806 
807 	/*
808 	 * If the mode isn't being changed, there's nothing to do ...
809 	 */
810 	if (mode == bgep->param_loop_mode)
811 		return (IOC_ACK);
812 
813 	/*
814 	 * Validate the requested mode and prepare a suitable message
815 	 * to explain the link down/up cycle that the change will
816 	 * probably induce ...
817 	 */
818 	switch (mode) {
819 	default:
820 		return (IOC_INVAL);
821 
822 	case BGE_LOOP_NONE:
823 		msg = " (loopback disabled)";
824 		break;
825 
826 	case BGE_LOOP_EXTERNAL_1000:
827 	case BGE_LOOP_EXTERNAL_100:
828 	case BGE_LOOP_EXTERNAL_10:
829 		msg = " (external loopback selected)";
830 		break;
831 
832 	case BGE_LOOP_INTERNAL_PHY:
833 		msg = " (PHY internal loopback selected)";
834 		break;
835 
836 	case BGE_LOOP_INTERNAL_MAC:
837 		msg = " (MAC internal loopback selected)";
838 		break;
839 	}
840 
841 	/*
842 	 * All OK; tell the caller to reprogram
843 	 * the PHY and/or MAC for the new mode ...
844 	 */
845 	bgep->link_down_msg = bgep->link_up_msg = msg;
846 	bgep->param_loop_mode = mode;
847 	return (IOC_RESTART_ACK);
848 }
849 
850 static enum ioc_reply
851 bge_loop_ioctl(bge_t *bgep, queue_t *wq, mblk_t *mp, struct iocblk *iocp)
852 {
853 	lb_info_sz_t *lbsp;
854 	lb_property_t *lbpp;
855 	uint32_t *lbmp;
856 	int cmd;
857 
858 	_NOTE(ARGUNUSED(wq))
859 
860 	/*
861 	 * Validate format of ioctl
862 	 */
863 	if (mp->b_cont == NULL)
864 		return (IOC_INVAL);
865 
866 	cmd = iocp->ioc_cmd;
867 	switch (cmd) {
868 	default:
869 		/* NOTREACHED */
870 		bge_error(bgep, "bge_loop_ioctl: invalid cmd 0x%x", cmd);
871 		return (IOC_INVAL);
872 
873 	case LB_GET_INFO_SIZE:
874 		if (iocp->ioc_count != sizeof (lb_info_sz_t))
875 			return (IOC_INVAL);
876 		lbsp = (lb_info_sz_t *)mp->b_cont->b_rptr;
877 		*lbsp = sizeof (loopmodes);
878 		return (IOC_REPLY);
879 
880 	case LB_GET_INFO:
881 		if (iocp->ioc_count != sizeof (loopmodes))
882 			return (IOC_INVAL);
883 		lbpp = (lb_property_t *)mp->b_cont->b_rptr;
884 		bcopy(loopmodes, lbpp, sizeof (loopmodes));
885 		return (IOC_REPLY);
886 
887 	case LB_GET_MODE:
888 		if (iocp->ioc_count != sizeof (uint32_t))
889 			return (IOC_INVAL);
890 		lbmp = (uint32_t *)mp->b_cont->b_rptr;
891 		*lbmp = bgep->param_loop_mode;
892 		return (IOC_REPLY);
893 
894 	case LB_SET_MODE:
895 		if (iocp->ioc_count != sizeof (uint32_t))
896 			return (IOC_INVAL);
897 		lbmp = (uint32_t *)mp->b_cont->b_rptr;
898 		return (bge_set_loop_mode(bgep, *lbmp));
899 	}
900 }
901 
902 /*
903  * Specific bge IOCTLs, the gld module handles the generic ones.
904  */
905 static void
906 bge_m_ioctl(void *arg, queue_t *wq, mblk_t *mp)
907 {
908 	bge_t *bgep = arg;
909 	struct iocblk *iocp;
910 	enum ioc_reply status;
911 	boolean_t need_privilege;
912 	int err;
913 	int cmd;
914 
915 	/*
916 	 * Validate the command before bothering with the mutex ...
917 	 */
918 	iocp = (struct iocblk *)mp->b_rptr;
919 	iocp->ioc_error = 0;
920 	need_privilege = B_TRUE;
921 	cmd = iocp->ioc_cmd;
922 	switch (cmd) {
923 	default:
924 		miocnak(wq, mp, 0, EINVAL);
925 		return;
926 
927 	case BGE_MII_READ:
928 	case BGE_MII_WRITE:
929 	case BGE_SEE_READ:
930 	case BGE_SEE_WRITE:
931 	case BGE_DIAG:
932 	case BGE_PEEK:
933 	case BGE_POKE:
934 	case BGE_PHY_RESET:
935 	case BGE_SOFT_RESET:
936 	case BGE_HARD_RESET:
937 		break;
938 
939 	case LB_GET_INFO_SIZE:
940 	case LB_GET_INFO:
941 	case LB_GET_MODE:
942 		need_privilege = B_FALSE;
943 		/* FALLTHRU */
944 	case LB_SET_MODE:
945 		break;
946 
947 	case ND_GET:
948 		need_privilege = B_FALSE;
949 		/* FALLTHRU */
950 	case ND_SET:
951 		break;
952 	}
953 
954 	if (need_privilege) {
955 		/*
956 		 * Check for specific net_config privilege on Solaris 10+.
957 		 * Otherwise just check for root access ...
958 		 */
959 		if (secpolicy_net_config != NULL)
960 			err = secpolicy_net_config(iocp->ioc_cr, B_FALSE);
961 		else
962 			err = drv_priv(iocp->ioc_cr);
963 		if (err != 0) {
964 			miocnak(wq, mp, 0, err);
965 			return;
966 		}
967 	}
968 
969 	mutex_enter(bgep->genlock);
970 	if (!(bgep->progress & PROGRESS_INTR)) {
971 		/* can happen during autorecovery */
972 		mutex_exit(bgep->genlock);
973 		miocnak(wq, mp, 0, EIO);
974 		return;
975 	}
976 
977 	switch (cmd) {
978 	default:
979 		_NOTE(NOTREACHED)
980 		status = IOC_INVAL;
981 		break;
982 
983 	case BGE_MII_READ:
984 	case BGE_MII_WRITE:
985 	case BGE_SEE_READ:
986 	case BGE_SEE_WRITE:
987 	case BGE_DIAG:
988 	case BGE_PEEK:
989 	case BGE_POKE:
990 	case BGE_PHY_RESET:
991 	case BGE_SOFT_RESET:
992 	case BGE_HARD_RESET:
993 		status = bge_chip_ioctl(bgep, wq, mp, iocp);
994 		break;
995 
996 	case LB_GET_INFO_SIZE:
997 	case LB_GET_INFO:
998 	case LB_GET_MODE:
999 	case LB_SET_MODE:
1000 		status = bge_loop_ioctl(bgep, wq, mp, iocp);
1001 		break;
1002 
1003 	case ND_GET:
1004 	case ND_SET:
1005 		status = bge_nd_ioctl(bgep, wq, mp, iocp);
1006 		break;
1007 	}
1008 
1009 	/*
1010 	 * Do we need to reprogram the PHY and/or the MAC?
1011 	 * Do it now, while we still have the mutex.
1012 	 *
1013 	 * Note: update the PHY first, 'cos it controls the
1014 	 * speed/duplex parameters that the MAC code uses.
1015 	 */
1016 	switch (status) {
1017 	case IOC_RESTART_REPLY:
1018 	case IOC_RESTART_ACK:
1019 		if (bge_phys_update(bgep) != DDI_SUCCESS) {
1020 			ddi_fm_service_impact(bgep->devinfo,
1021 			    DDI_SERVICE_DEGRADED);
1022 			status = IOC_INVAL;
1023 		}
1024 #ifdef BGE_IPMI_ASF
1025 		if (bge_chip_sync(bgep, B_FALSE) == DDI_FAILURE) {
1026 #else
1027 		if (bge_chip_sync(bgep) == DDI_FAILURE) {
1028 #endif
1029 			ddi_fm_service_impact(bgep->devinfo,
1030 			    DDI_SERVICE_DEGRADED);
1031 			status = IOC_INVAL;
1032 		}
1033 		if (bgep->intr_type == DDI_INTR_TYPE_MSI)
1034 			bge_chip_msi_trig(bgep);
1035 		break;
1036 	}
1037 
1038 	if (bge_check_acc_handle(bgep, bgep->cfg_handle) != DDI_FM_OK) {
1039 		ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_DEGRADED);
1040 		status = IOC_INVAL;
1041 	}
1042 	if (bge_check_acc_handle(bgep, bgep->io_handle) != DDI_FM_OK) {
1043 		ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_DEGRADED);
1044 		status = IOC_INVAL;
1045 	}
1046 	mutex_exit(bgep->genlock);
1047 
1048 	/*
1049 	 * Finally, decide how to reply
1050 	 */
1051 	switch (status) {
1052 	default:
1053 	case IOC_INVAL:
1054 		/*
1055 		 * Error, reply with a NAK and EINVAL or the specified error
1056 		 */
1057 		miocnak(wq, mp, 0, iocp->ioc_error == 0 ?
1058 			EINVAL : iocp->ioc_error);
1059 		break;
1060 
1061 	case IOC_DONE:
1062 		/*
1063 		 * OK, reply already sent
1064 		 */
1065 		break;
1066 
1067 	case IOC_RESTART_ACK:
1068 	case IOC_ACK:
1069 		/*
1070 		 * OK, reply with an ACK
1071 		 */
1072 		miocack(wq, mp, 0, 0);
1073 		break;
1074 
1075 	case IOC_RESTART_REPLY:
1076 	case IOC_REPLY:
1077 		/*
1078 		 * OK, send prepared reply as ACK or NAK
1079 		 */
1080 		mp->b_datap->db_type = iocp->ioc_error == 0 ?
1081 			M_IOCACK : M_IOCNAK;
1082 		qreply(wq, mp);
1083 		break;
1084 	}
1085 }
1086 
1087 static void
1088 bge_m_resources(void *arg)
1089 {
1090 	bge_t *bgep = arg;
1091 	recv_ring_t *rrp;
1092 	mac_rx_fifo_t mrf;
1093 	int ring;
1094 
1095 	mutex_enter(bgep->genlock);
1096 
1097 	/*
1098 	 * Register Rx rings as resources and save mac
1099 	 * resource id for future reference
1100 	 */
1101 	mrf.mrf_type = MAC_RX_FIFO;
1102 	mrf.mrf_blank = bge_chip_blank;
1103 	mrf.mrf_arg = (void *)bgep;
1104 	mrf.mrf_normal_blank_time = bge_rx_ticks_norm;
1105 	mrf.mrf_normal_pkt_count = bge_rx_count_norm;
1106 
1107 	for (ring = 0; ring < bgep->chipid.rx_rings; ring++) {
1108 		rrp = &bgep->recv[ring];
1109 		rrp->handle = mac_resource_add(bgep->mh,
1110 		    (mac_resource_t *)&mrf);
1111 	}
1112 
1113 	mutex_exit(bgep->genlock);
1114 }
1115 
1116 /*
1117  * ========== Per-instance setup/teardown code ==========
1118  */
1119 
1120 #undef	BGE_DBG
1121 #define	BGE_DBG		BGE_DBG_INIT	/* debug flag for this code	*/
1122 
1123 /*
1124  * Utility routine to carve a slice off a chunk of allocated memory,
1125  * updating the chunk descriptor accordingly.  The size of the slice
1126  * is given by the product of the <qty> and <size> parameters.
1127  */
1128 static void
1129 bge_slice_chunk(dma_area_t *slice, dma_area_t *chunk,
1130 	uint32_t qty, uint32_t size)
1131 {
1132 	static uint32_t sequence = 0xbcd5704a;
1133 	size_t totsize;
1134 
1135 	totsize = qty*size;
1136 	ASSERT(size >= 0);
1137 	ASSERT(totsize <= chunk->alength);
1138 
1139 	*slice = *chunk;
1140 	slice->nslots = qty;
1141 	slice->size = size;
1142 	slice->alength = totsize;
1143 	slice->token = ++sequence;
1144 
1145 	chunk->mem_va = (caddr_t)chunk->mem_va + totsize;
1146 	chunk->alength -= totsize;
1147 	chunk->offset += totsize;
1148 	chunk->cookie.dmac_laddress += totsize;
1149 	chunk->cookie.dmac_size -= totsize;
1150 }
1151 
1152 /*
1153  * Initialise the specified Receive Producer (Buffer) Ring, using
1154  * the information in the <dma_area> descriptors that it contains
1155  * to set up all the other fields. This routine should be called
1156  * only once for each ring.
1157  */
1158 static void
1159 bge_init_buff_ring(bge_t *bgep, uint64_t ring)
1160 {
1161 	buff_ring_t *brp;
1162 	bge_status_t *bsp;
1163 	sw_rbd_t *srbdp;
1164 	dma_area_t pbuf;
1165 	uint32_t bufsize;
1166 	uint32_t nslots;
1167 	uint32_t slot;
1168 	uint32_t split;
1169 
1170 	static bge_regno_t nic_ring_addrs[BGE_BUFF_RINGS_MAX] = {
1171 		NIC_MEM_SHADOW_BUFF_STD,
1172 		NIC_MEM_SHADOW_BUFF_JUMBO,
1173 		NIC_MEM_SHADOW_BUFF_MINI
1174 	};
1175 	static bge_regno_t mailbox_regs[BGE_BUFF_RINGS_MAX] = {
1176 		RECV_STD_PROD_INDEX_REG,
1177 		RECV_JUMBO_PROD_INDEX_REG,
1178 		RECV_MINI_PROD_INDEX_REG
1179 	};
1180 	static bge_regno_t buff_cons_xref[BGE_BUFF_RINGS_MAX] = {
1181 		STATUS_STD_BUFF_CONS_INDEX,
1182 		STATUS_JUMBO_BUFF_CONS_INDEX,
1183 		STATUS_MINI_BUFF_CONS_INDEX
1184 	};
1185 
1186 	BGE_TRACE(("bge_init_buff_ring($%p, %d)",
1187 		(void *)bgep, ring));
1188 
1189 	brp = &bgep->buff[ring];
1190 	nslots = brp->desc.nslots;
1191 	ASSERT(brp->buf[0].nslots == nslots/BGE_SPLIT);
1192 	bufsize = brp->buf[0].size;
1193 
1194 	/*
1195 	 * Set up the copy of the h/w RCB
1196 	 *
1197 	 * Note: unlike Send & Receive Return Rings, (where the max_len
1198 	 * field holds the number of slots), in a Receive Buffer Ring
1199 	 * this field indicates the size of each buffer in the ring.
1200 	 */
1201 	brp->hw_rcb.host_ring_addr = brp->desc.cookie.dmac_laddress;
1202 	brp->hw_rcb.max_len = bufsize;
1203 	brp->hw_rcb.flags = nslots > 0 ? 0 : RCB_FLAG_RING_DISABLED;
1204 	brp->hw_rcb.nic_ring_addr = nic_ring_addrs[ring];
1205 
1206 	/*
1207 	 * Other one-off initialisation of per-ring data
1208 	 */
1209 	brp->bgep = bgep;
1210 	bsp = DMA_VPTR(bgep->status_block);
1211 	brp->cons_index_p = &bsp->buff_cons_index[buff_cons_xref[ring]];
1212 	brp->chip_mbx_reg = mailbox_regs[ring];
1213 	mutex_init(brp->rf_lock, NULL, MUTEX_DRIVER,
1214 	    DDI_INTR_PRI(bgep->intr_pri));
1215 
1216 	/*
1217 	 * Allocate the array of s/w Receive Buffer Descriptors
1218 	 */
1219 	srbdp = kmem_zalloc(nslots*sizeof (*srbdp), KM_SLEEP);
1220 	brp->sw_rbds = srbdp;
1221 
1222 	/*
1223 	 * Now initialise each array element once and for all
1224 	 */
1225 	for (split = 0; split < BGE_SPLIT; ++split) {
1226 		pbuf = brp->buf[split];
1227 		for (slot = 0; slot < nslots/BGE_SPLIT; ++srbdp, ++slot)
1228 			bge_slice_chunk(&srbdp->pbuf, &pbuf, 1, bufsize);
1229 		ASSERT(pbuf.alength == 0);
1230 	}
1231 }
1232 
1233 /*
1234  * Clean up initialisation done above before the memory is freed
1235  */
1236 static void
1237 bge_fini_buff_ring(bge_t *bgep, uint64_t ring)
1238 {
1239 	buff_ring_t *brp;
1240 	sw_rbd_t *srbdp;
1241 
1242 	BGE_TRACE(("bge_fini_buff_ring($%p, %d)",
1243 		(void *)bgep, ring));
1244 
1245 	brp = &bgep->buff[ring];
1246 	srbdp = brp->sw_rbds;
1247 	kmem_free(srbdp, brp->desc.nslots*sizeof (*srbdp));
1248 
1249 	mutex_destroy(brp->rf_lock);
1250 }
1251 
1252 /*
1253  * Initialise the specified Receive (Return) Ring, using the
1254  * information in the <dma_area> descriptors that it contains
1255  * to set up all the other fields. This routine should be called
1256  * only once for each ring.
1257  */
1258 static void
1259 bge_init_recv_ring(bge_t *bgep, uint64_t ring)
1260 {
1261 	recv_ring_t *rrp;
1262 	bge_status_t *bsp;
1263 	uint32_t nslots;
1264 
1265 	BGE_TRACE(("bge_init_recv_ring($%p, %d)",
1266 		(void *)bgep, ring));
1267 
1268 	/*
1269 	 * The chip architecture requires that receive return rings have
1270 	 * 512 or 1024 or 2048 elements per ring.  See 570X-PG108-R page 103.
1271 	 */
1272 	rrp = &bgep->recv[ring];
1273 	nslots = rrp->desc.nslots;
1274 	ASSERT(nslots == 0 || nslots == 512 ||
1275 		nslots == 1024 || nslots == 2048);
1276 
1277 	/*
1278 	 * Set up the copy of the h/w RCB
1279 	 */
1280 	rrp->hw_rcb.host_ring_addr = rrp->desc.cookie.dmac_laddress;
1281 	rrp->hw_rcb.max_len = nslots;
1282 	rrp->hw_rcb.flags = nslots > 0 ? 0 : RCB_FLAG_RING_DISABLED;
1283 	rrp->hw_rcb.nic_ring_addr = 0;
1284 
1285 	/*
1286 	 * Other one-off initialisation of per-ring data
1287 	 */
1288 	rrp->bgep = bgep;
1289 	bsp = DMA_VPTR(bgep->status_block);
1290 	rrp->prod_index_p = RECV_INDEX_P(bsp, ring);
1291 	rrp->chip_mbx_reg = RECV_RING_CONS_INDEX_REG(ring);
1292 	mutex_init(rrp->rx_lock, NULL, MUTEX_DRIVER,
1293 	    DDI_INTR_PRI(bgep->intr_pri));
1294 }
1295 
1296 
1297 /*
1298  * Clean up initialisation done above before the memory is freed
1299  */
1300 static void
1301 bge_fini_recv_ring(bge_t *bgep, uint64_t ring)
1302 {
1303 	recv_ring_t *rrp;
1304 
1305 	BGE_TRACE(("bge_fini_recv_ring($%p, %d)",
1306 		(void *)bgep, ring));
1307 
1308 	rrp = &bgep->recv[ring];
1309 	if (rrp->rx_softint)
1310 		ddi_remove_softintr(rrp->rx_softint);
1311 	mutex_destroy(rrp->rx_lock);
1312 }
1313 
1314 /*
1315  * Initialise the specified Send Ring, using the information in the
1316  * <dma_area> descriptors that it contains to set up all the other
1317  * fields. This routine should be called only once for each ring.
1318  */
1319 static void
1320 bge_init_send_ring(bge_t *bgep, uint64_t ring)
1321 {
1322 	send_ring_t *srp;
1323 	bge_status_t *bsp;
1324 	sw_sbd_t *ssbdp;
1325 	dma_area_t desc;
1326 	dma_area_t pbuf;
1327 	uint32_t nslots;
1328 	uint32_t slot;
1329 	uint32_t split;
1330 
1331 	BGE_TRACE(("bge_init_send_ring($%p, %d)",
1332 		(void *)bgep, ring));
1333 
1334 	/*
1335 	 * The chip architecture requires that host-based send rings
1336 	 * have 512 elements per ring.  See 570X-PG102-R page 56.
1337 	 */
1338 	srp = &bgep->send[ring];
1339 	nslots = srp->desc.nslots;
1340 	ASSERT(nslots == 0 || nslots == 512);
1341 
1342 	/*
1343 	 * Set up the copy of the h/w RCB
1344 	 */
1345 	srp->hw_rcb.host_ring_addr = srp->desc.cookie.dmac_laddress;
1346 	srp->hw_rcb.max_len = nslots;
1347 	srp->hw_rcb.flags = nslots > 0 ? 0 : RCB_FLAG_RING_DISABLED;
1348 	srp->hw_rcb.nic_ring_addr = NIC_MEM_SHADOW_SEND_RING(ring, nslots);
1349 
1350 	/*
1351 	 * Other one-off initialisation of per-ring data
1352 	 */
1353 	srp->bgep = bgep;
1354 	bsp = DMA_VPTR(bgep->status_block);
1355 	srp->cons_index_p = SEND_INDEX_P(bsp, ring);
1356 	srp->chip_mbx_reg = SEND_RING_HOST_INDEX_REG(ring);
1357 	mutex_init(srp->tx_lock, NULL, MUTEX_DRIVER,
1358 	    DDI_INTR_PRI(bgep->intr_pri));
1359 	mutex_init(srp->tc_lock, NULL, MUTEX_DRIVER,
1360 	    DDI_INTR_PRI(bgep->intr_pri));
1361 
1362 	/*
1363 	 * Allocate the array of s/w Send Buffer Descriptors
1364 	 */
1365 	ssbdp = kmem_zalloc(nslots*sizeof (*ssbdp), KM_SLEEP);
1366 	srp->sw_sbds = ssbdp;
1367 
1368 	/*
1369 	 * Now initialise each array element once and for all
1370 	 */
1371 	desc = srp->desc;
1372 	for (split = 0; split < BGE_SPLIT; ++split) {
1373 		pbuf = srp->buf[split];
1374 		for (slot = 0; slot < nslots/BGE_SPLIT; ++ssbdp, ++slot) {
1375 			bge_slice_chunk(&ssbdp->desc, &desc, 1,
1376 				sizeof (bge_sbd_t));
1377 			bge_slice_chunk(&ssbdp->pbuf, &pbuf, 1,
1378 				bgep->chipid.snd_buff_size);
1379 		}
1380 		ASSERT(pbuf.alength == 0);
1381 	}
1382 	ASSERT(desc.alength == 0);
1383 }
1384 
1385 /*
1386  * Clean up initialisation done above before the memory is freed
1387  */
1388 static void
1389 bge_fini_send_ring(bge_t *bgep, uint64_t ring)
1390 {
1391 	send_ring_t *srp;
1392 	sw_sbd_t *ssbdp;
1393 
1394 	BGE_TRACE(("bge_fini_send_ring($%p, %d)",
1395 		(void *)bgep, ring));
1396 
1397 	srp = &bgep->send[ring];
1398 	ssbdp = srp->sw_sbds;
1399 	kmem_free(ssbdp, srp->desc.nslots*sizeof (*ssbdp));
1400 
1401 	mutex_destroy(srp->tx_lock);
1402 	mutex_destroy(srp->tc_lock);
1403 }
1404 
1405 /*
1406  * Initialise all transmit, receive, and buffer rings.
1407  */
1408 void
1409 bge_init_rings(bge_t *bgep)
1410 {
1411 	uint64_t ring;
1412 
1413 	BGE_TRACE(("bge_init_rings($%p)", (void *)bgep));
1414 
1415 	/*
1416 	 * Perform one-off initialisation of each ring ...
1417 	 */
1418 	for (ring = 0; ring < BGE_SEND_RINGS_MAX; ++ring)
1419 		bge_init_send_ring(bgep, ring);
1420 	for (ring = 0; ring < BGE_RECV_RINGS_MAX; ++ring)
1421 		bge_init_recv_ring(bgep, ring);
1422 	for (ring = 0; ring < BGE_BUFF_RINGS_MAX; ++ring)
1423 		bge_init_buff_ring(bgep, ring);
1424 }
1425 
1426 /*
1427  * Undo the work of bge_init_rings() above before the memory is freed
1428  */
1429 void
1430 bge_fini_rings(bge_t *bgep)
1431 {
1432 	uint64_t ring;
1433 
1434 	BGE_TRACE(("bge_fini_rings($%p)", (void *)bgep));
1435 
1436 	for (ring = 0; ring < BGE_BUFF_RINGS_MAX; ++ring)
1437 		bge_fini_buff_ring(bgep, ring);
1438 	for (ring = 0; ring < BGE_RECV_RINGS_MAX; ++ring)
1439 		bge_fini_recv_ring(bgep, ring);
1440 	for (ring = 0; ring < BGE_SEND_RINGS_MAX; ++ring)
1441 		bge_fini_send_ring(bgep, ring);
1442 }
1443 
1444 /*
1445  * Allocate an area of memory and a DMA handle for accessing it
1446  */
1447 static int
1448 bge_alloc_dma_mem(bge_t *bgep, size_t memsize, ddi_device_acc_attr_t *attr_p,
1449 	uint_t dma_flags, dma_area_t *dma_p)
1450 {
1451 	caddr_t va;
1452 	int err;
1453 
1454 	BGE_TRACE(("bge_alloc_dma_mem($%p, %ld, $%p, 0x%x, $%p)",
1455 		(void *)bgep, memsize, attr_p, dma_flags, dma_p));
1456 
1457 	/*
1458 	 * Allocate handle
1459 	 */
1460 	err = ddi_dma_alloc_handle(bgep->devinfo, &dma_attr,
1461 		DDI_DMA_SLEEP, NULL, &dma_p->dma_hdl);
1462 	if (err != DDI_SUCCESS)
1463 		return (DDI_FAILURE);
1464 
1465 	/*
1466 	 * Allocate memory
1467 	 */
1468 	err = ddi_dma_mem_alloc(dma_p->dma_hdl, memsize, attr_p,
1469 		dma_flags & (DDI_DMA_CONSISTENT | DDI_DMA_STREAMING),
1470 		DDI_DMA_SLEEP, NULL, &va, &dma_p->alength, &dma_p->acc_hdl);
1471 	if (err != DDI_SUCCESS)
1472 		return (DDI_FAILURE);
1473 
1474 	/*
1475 	 * Bind the two together
1476 	 */
1477 	dma_p->mem_va = va;
1478 	err = ddi_dma_addr_bind_handle(dma_p->dma_hdl, NULL,
1479 		va, dma_p->alength, dma_flags, DDI_DMA_SLEEP, NULL,
1480 		&dma_p->cookie, &dma_p->ncookies);
1481 
1482 	BGE_DEBUG(("bge_alloc_dma_mem(): bind %d bytes; err %d, %d cookies",
1483 		dma_p->alength, err, dma_p->ncookies));
1484 
1485 	if (err != DDI_DMA_MAPPED || dma_p->ncookies != 1)
1486 		return (DDI_FAILURE);
1487 
1488 	dma_p->nslots = ~0U;
1489 	dma_p->size = ~0U;
1490 	dma_p->token = ~0U;
1491 	dma_p->offset = 0;
1492 	return (DDI_SUCCESS);
1493 }
1494 
1495 /*
1496  * Free one allocated area of DMAable memory
1497  */
1498 static void
1499 bge_free_dma_mem(dma_area_t *dma_p)
1500 {
1501 	if (dma_p->dma_hdl != NULL) {
1502 		if (dma_p->ncookies) {
1503 			(void) ddi_dma_unbind_handle(dma_p->dma_hdl);
1504 			dma_p->ncookies = 0;
1505 		}
1506 		ddi_dma_free_handle(&dma_p->dma_hdl);
1507 		dma_p->dma_hdl = NULL;
1508 	}
1509 
1510 	if (dma_p->acc_hdl != NULL) {
1511 		ddi_dma_mem_free(&dma_p->acc_hdl);
1512 		dma_p->acc_hdl = NULL;
1513 	}
1514 }
1515 
1516 /*
1517  * This function allocates all the transmit and receive buffers
1518  * and descriptors, in four chunks (or one, if MONOLITHIC).
1519  */
1520 int
1521 bge_alloc_bufs(bge_t *bgep)
1522 {
1523 	dma_area_t area;
1524 	size_t rxbuffsize;
1525 	size_t txbuffsize;
1526 	size_t rxbuffdescsize;
1527 	size_t rxdescsize;
1528 	size_t txdescsize;
1529 	uint64_t ring;
1530 	uint64_t rx_rings = bgep->chipid.rx_rings;
1531 	uint64_t tx_rings = bgep->chipid.tx_rings;
1532 	int split;
1533 	int err;
1534 
1535 	BGE_TRACE(("bge_alloc_bufs($%p)",
1536 		(void *)bgep));
1537 
1538 	rxbuffsize = BGE_STD_SLOTS_USED*bgep->chipid.std_buf_size;
1539 	rxbuffsize += bgep->chipid.jumbo_slots*bgep->chipid.recv_jumbo_size;
1540 	rxbuffsize += BGE_MINI_SLOTS_USED*BGE_MINI_BUFF_SIZE;
1541 
1542 	txbuffsize = BGE_SEND_SLOTS_USED*bgep->chipid.snd_buff_size;
1543 	txbuffsize *= tx_rings;
1544 
1545 	rxdescsize = rx_rings*bgep->chipid.recv_slots;
1546 	rxdescsize *= sizeof (bge_rbd_t);
1547 
1548 	rxbuffdescsize = BGE_STD_SLOTS_USED;
1549 	rxbuffdescsize += bgep->chipid.jumbo_slots;
1550 	rxbuffdescsize += BGE_MINI_SLOTS_USED;
1551 	rxbuffdescsize *= sizeof (bge_rbd_t);
1552 
1553 	txdescsize = tx_rings*BGE_SEND_SLOTS_USED;
1554 	txdescsize *= sizeof (bge_sbd_t);
1555 	txdescsize += sizeof (bge_statistics_t);
1556 	txdescsize += sizeof (bge_status_t);
1557 	txdescsize += BGE_STATUS_PADDING;
1558 
1559 #if	BGE_MONOLITHIC
1560 
1561 	err = bge_alloc_dma_mem(bgep,
1562 		rxbuffsize+txbuffsize+rxbuffdescsize+rxdescsize+txdescsize,
1563 		&bge_data_accattr, DDI_DMA_RDWR | DDI_DMA_CONSISTENT, &area);
1564 	if (err != DDI_SUCCESS)
1565 		return (DDI_FAILURE);
1566 
1567 	BGE_DEBUG(("allocated range $%p-$%p (0x%lx-0x%lx)",
1568 		DMA_VPTR(area),
1569 		(caddr_t)DMA_VPTR(area)+area.alength,
1570 		area.cookie.dmac_laddress,
1571 		area.cookie.dmac_laddress+area.alength));
1572 
1573 	bge_slice_chunk(&bgep->rx_buff[0], &area, 1, rxbuffsize);
1574 	bge_slice_chunk(&bgep->tx_buff[0], &area, 1, txbuffsize);
1575 	bge_slice_chunk(&bgep->rx_desc[0], &area, 1, rxdescsize);
1576 	bge_slice_chunk(&bgep->tx_desc, &area, 1, txdescsize);
1577 
1578 #else
1579 	/*
1580 	 * Allocate memory & handles for RX buffers
1581 	 */
1582 	ASSERT((rxbuffsize % BGE_SPLIT) == 0);
1583 	for (split = 0; split < BGE_SPLIT; ++split) {
1584 		err = bge_alloc_dma_mem(bgep, rxbuffsize/BGE_SPLIT,
1585 			&bge_data_accattr, DDI_DMA_READ | BGE_DMA_MODE,
1586 			&bgep->rx_buff[split]);
1587 		if (err != DDI_SUCCESS)
1588 			return (DDI_FAILURE);
1589 	}
1590 
1591 	/*
1592 	 * Allocate memory & handles for TX buffers
1593 	 */
1594 	ASSERT((txbuffsize % BGE_SPLIT) == 0);
1595 	for (split = 0; split < BGE_SPLIT; ++split) {
1596 		err = bge_alloc_dma_mem(bgep, txbuffsize/BGE_SPLIT,
1597 			&bge_data_accattr, DDI_DMA_WRITE | BGE_DMA_MODE,
1598 			&bgep->tx_buff[split]);
1599 		if (err != DDI_SUCCESS)
1600 			return (DDI_FAILURE);
1601 	}
1602 
1603 	/*
1604 	 * Allocate memory & handles for receive return rings
1605 	 */
1606 	ASSERT((rxdescsize % rx_rings) == 0);
1607 	for (split = 0; split < rx_rings; ++split) {
1608 		err = bge_alloc_dma_mem(bgep, rxdescsize/rx_rings,
1609 			&bge_desc_accattr, DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
1610 			&bgep->rx_desc[split]);
1611 		if (err != DDI_SUCCESS)
1612 			return (DDI_FAILURE);
1613 	}
1614 
1615 	/*
1616 	 * Allocate memory & handles for buffer (producer) descriptor rings
1617 	 */
1618 	err = bge_alloc_dma_mem(bgep, rxbuffdescsize, &bge_desc_accattr,
1619 		DDI_DMA_RDWR | DDI_DMA_CONSISTENT, &bgep->rx_desc[split]);
1620 	if (err != DDI_SUCCESS)
1621 		return (DDI_FAILURE);
1622 
1623 	/*
1624 	 * Allocate memory & handles for TX descriptor rings,
1625 	 * status block, and statistics area
1626 	 */
1627 	err = bge_alloc_dma_mem(bgep, txdescsize, &bge_desc_accattr,
1628 		DDI_DMA_RDWR | DDI_DMA_CONSISTENT, &bgep->tx_desc);
1629 	if (err != DDI_SUCCESS)
1630 		return (DDI_FAILURE);
1631 
1632 #endif	/* BGE_MONOLITHIC */
1633 
1634 	/*
1635 	 * Now carve up each of the allocated areas ...
1636 	 */
1637 	for (split = 0; split < BGE_SPLIT; ++split) {
1638 		area = bgep->rx_buff[split];
1639 		bge_slice_chunk(&bgep->buff[BGE_STD_BUFF_RING].buf[split],
1640 			&area, BGE_STD_SLOTS_USED/BGE_SPLIT,
1641 			bgep->chipid.std_buf_size);
1642 		bge_slice_chunk(&bgep->buff[BGE_JUMBO_BUFF_RING].buf[split],
1643 			&area, bgep->chipid.jumbo_slots/BGE_SPLIT,
1644 			bgep->chipid.recv_jumbo_size);
1645 		bge_slice_chunk(&bgep->buff[BGE_MINI_BUFF_RING].buf[split],
1646 			&area, BGE_MINI_SLOTS_USED/BGE_SPLIT,
1647 			BGE_MINI_BUFF_SIZE);
1648 		ASSERT(area.alength >= 0);
1649 	}
1650 
1651 	for (split = 0; split < BGE_SPLIT; ++split) {
1652 		area = bgep->tx_buff[split];
1653 		for (ring = 0; ring < tx_rings; ++ring)
1654 			bge_slice_chunk(&bgep->send[ring].buf[split],
1655 				&area, BGE_SEND_SLOTS_USED/BGE_SPLIT,
1656 				bgep->chipid.snd_buff_size);
1657 		for (; ring < BGE_SEND_RINGS_MAX; ++ring)
1658 			bge_slice_chunk(&bgep->send[ring].buf[split],
1659 				&area, 0/BGE_SPLIT,
1660 				bgep->chipid.snd_buff_size);
1661 		ASSERT(area.alength >= 0);
1662 	}
1663 
1664 	for (ring = 0; ring < rx_rings; ++ring)
1665 		bge_slice_chunk(&bgep->recv[ring].desc, &bgep->rx_desc[ring],
1666 			bgep->chipid.recv_slots, sizeof (bge_rbd_t));
1667 
1668 	area = bgep->rx_desc[rx_rings];
1669 	for (; ring < BGE_RECV_RINGS_MAX; ++ring)
1670 		bge_slice_chunk(&bgep->recv[ring].desc, &area,
1671 			0, sizeof (bge_rbd_t));
1672 	bge_slice_chunk(&bgep->buff[BGE_STD_BUFF_RING].desc, &area,
1673 		BGE_STD_SLOTS_USED, sizeof (bge_rbd_t));
1674 	bge_slice_chunk(&bgep->buff[BGE_JUMBO_BUFF_RING].desc, &area,
1675 		bgep->chipid.jumbo_slots, sizeof (bge_rbd_t));
1676 	bge_slice_chunk(&bgep->buff[BGE_MINI_BUFF_RING].desc, &area,
1677 		BGE_MINI_SLOTS_USED, sizeof (bge_rbd_t));
1678 	ASSERT(area.alength == 0);
1679 
1680 	area = bgep->tx_desc;
1681 	for (ring = 0; ring < tx_rings; ++ring)
1682 		bge_slice_chunk(&bgep->send[ring].desc, &area,
1683 			BGE_SEND_SLOTS_USED, sizeof (bge_sbd_t));
1684 	for (; ring < BGE_SEND_RINGS_MAX; ++ring)
1685 		bge_slice_chunk(&bgep->send[ring].desc, &area,
1686 			0, sizeof (bge_sbd_t));
1687 	bge_slice_chunk(&bgep->statistics, &area, 1, sizeof (bge_statistics_t));
1688 	bge_slice_chunk(&bgep->status_block, &area, 1, sizeof (bge_status_t));
1689 	ASSERT(area.alength == BGE_STATUS_PADDING);
1690 	DMA_ZERO(bgep->status_block);
1691 
1692 	return (DDI_SUCCESS);
1693 }
1694 
1695 /*
1696  * This routine frees the transmit and receive buffers and descriptors.
1697  * Make sure the chip is stopped before calling it!
1698  */
1699 void
1700 bge_free_bufs(bge_t *bgep)
1701 {
1702 	int split;
1703 
1704 	BGE_TRACE(("bge_free_bufs($%p)",
1705 		(void *)bgep));
1706 
1707 #if	BGE_MONOLITHIC
1708 	bge_free_dma_mem(&bgep->rx_buff[0]);
1709 #else
1710 	bge_free_dma_mem(&bgep->tx_desc);
1711 	for (split = 0; split < BGE_RECV_RINGS_SPLIT; ++split)
1712 		bge_free_dma_mem(&bgep->rx_desc[split]);
1713 	for (split = 0; split < BGE_SPLIT; ++split)
1714 		bge_free_dma_mem(&bgep->tx_buff[split]);
1715 	for (split = 0; split < BGE_SPLIT; ++split)
1716 		bge_free_dma_mem(&bgep->rx_buff[split]);
1717 #endif	/* BGE_MONOLITHIC */
1718 }
1719 
1720 /*
1721  * Determine (initial) MAC address ("BIA") to use for this interface
1722  */
1723 
1724 static void
1725 bge_find_mac_address(bge_t *bgep, chip_id_t *cidp)
1726 {
1727 	struct ether_addr sysaddr;
1728 	char propbuf[8];		/* "true" or "false", plus NUL	*/
1729 	uchar_t *bytes;
1730 	int *ints;
1731 	uint_t nelts;
1732 	int err;
1733 
1734 	BGE_TRACE(("bge_find_mac_address($%p)",
1735 		(void *)bgep));
1736 
1737 	BGE_DEBUG(("bge_find_mac_address: hw_mac_addr %012llx, => %s (%sset)",
1738 		cidp->hw_mac_addr,
1739 		ether_sprintf((void *)cidp->vendor_addr.addr),
1740 		cidp->vendor_addr.set ? "" : "not "));
1741 
1742 	/*
1743 	 * The "vendor's factory-set address" may already have
1744 	 * been extracted from the chip, but if the property
1745 	 * "local-mac-address" is set we use that instead.  It
1746 	 * will normally be set by OBP, but it could also be
1747 	 * specified in a .conf file(!)
1748 	 *
1749 	 * There doesn't seem to be a way to define byte-array
1750 	 * properties in a .conf, so we check whether it looks
1751 	 * like an array of 6 ints instead.
1752 	 *
1753 	 * Then, we check whether it looks like an array of 6
1754 	 * bytes (which it should, if OBP set it).  If we can't
1755 	 * make sense of it either way, we'll ignore it.
1756 	 */
1757 	err = ddi_prop_lookup_int_array(DDI_DEV_T_ANY, bgep->devinfo,
1758 		DDI_PROP_DONTPASS, localmac_propname, &ints, &nelts);
1759 	if (err == DDI_PROP_SUCCESS) {
1760 		if (nelts == ETHERADDRL) {
1761 			while (nelts--)
1762 				cidp->vendor_addr.addr[nelts] = ints[nelts];
1763 			cidp->vendor_addr.set = 1;
1764 		}
1765 		ddi_prop_free(ints);
1766 	}
1767 
1768 	err = ddi_prop_lookup_byte_array(DDI_DEV_T_ANY, bgep->devinfo,
1769 		DDI_PROP_DONTPASS, localmac_propname, &bytes, &nelts);
1770 	if (err == DDI_PROP_SUCCESS) {
1771 		if (nelts == ETHERADDRL) {
1772 			while (nelts--)
1773 				cidp->vendor_addr.addr[nelts] = bytes[nelts];
1774 			cidp->vendor_addr.set = 1;
1775 		}
1776 		ddi_prop_free(bytes);
1777 	}
1778 
1779 	BGE_DEBUG(("bge_find_mac_address: +local %s (%sset)",
1780 		ether_sprintf((void *)cidp->vendor_addr.addr),
1781 		cidp->vendor_addr.set ? "" : "not "));
1782 
1783 	/*
1784 	 * Look up the OBP property "local-mac-address?".  Note that even
1785 	 * though its value is a string (which should be "true" or "false"),
1786 	 * it can't be decoded by ddi_prop_lookup_string(9F).  So, we zero
1787 	 * the buffer first and then fetch the property as an untyped array;
1788 	 * this may or may not include a final NUL, but since there will
1789 	 * always be one left at the end of the buffer we can now treat it
1790 	 * as a string anyway.
1791 	 */
1792 	nelts = sizeof (propbuf);
1793 	bzero(propbuf, nelts--);
1794 	err = ddi_getlongprop_buf(DDI_DEV_T_ANY, bgep->devinfo,
1795 		DDI_PROP_CANSLEEP, localmac_boolname, propbuf, (int *)&nelts);
1796 
1797 	/*
1798 	 * Now, if the address still isn't set from the hardware (SEEPROM)
1799 	 * or the OBP or .conf property, OR if the user has foolishly set
1800 	 * 'local-mac-address? = false', use "the system address" instead
1801 	 * (but only if it's non-null i.e. has been set from the IDPROM).
1802 	 */
1803 	if (cidp->vendor_addr.set == 0 || strcmp(propbuf, "false") == 0)
1804 		if (localetheraddr(NULL, &sysaddr) != 0) {
1805 			ethaddr_copy(&sysaddr, cidp->vendor_addr.addr);
1806 			cidp->vendor_addr.set = 1;
1807 		}
1808 
1809 	BGE_DEBUG(("bge_find_mac_address: +system %s (%sset)",
1810 		ether_sprintf((void *)cidp->vendor_addr.addr),
1811 		cidp->vendor_addr.set ? "" : "not "));
1812 
1813 	/*
1814 	 * Finally(!), if there's a valid "mac-address" property (created
1815 	 * if we netbooted from this interface), we must use this instead
1816 	 * of any of the above to ensure that the NFS/install server doesn't
1817 	 * get confused by the address changing as Solaris takes over!
1818 	 */
1819 	err = ddi_prop_lookup_byte_array(DDI_DEV_T_ANY, bgep->devinfo,
1820 		DDI_PROP_DONTPASS, macaddr_propname, &bytes, &nelts);
1821 	if (err == DDI_PROP_SUCCESS) {
1822 		if (nelts == ETHERADDRL) {
1823 			while (nelts--)
1824 				cidp->vendor_addr.addr[nelts] = bytes[nelts];
1825 			cidp->vendor_addr.set = 1;
1826 		}
1827 		ddi_prop_free(bytes);
1828 	}
1829 
1830 	BGE_DEBUG(("bge_find_mac_address: =final %s (%sset)",
1831 		ether_sprintf((void *)cidp->vendor_addr.addr),
1832 		cidp->vendor_addr.set ? "" : "not "));
1833 }
1834 
1835 
1836 /*ARGSUSED*/
1837 int
1838 bge_check_acc_handle(bge_t *bgep, ddi_acc_handle_t handle)
1839 {
1840 	ddi_fm_error_t de;
1841 
1842 	ddi_fm_acc_err_get(handle, &de, DDI_FME_VERSION);
1843 	ddi_fm_acc_err_clear(handle, DDI_FME_VERSION);
1844 	return (de.fme_status);
1845 }
1846 
1847 /*ARGSUSED*/
1848 int
1849 bge_check_dma_handle(bge_t *bgep, ddi_dma_handle_t handle)
1850 {
1851 	ddi_fm_error_t de;
1852 
1853 	ASSERT(bgep->progress & PROGRESS_BUFS);
1854 	ddi_fm_dma_err_get(handle, &de, DDI_FME_VERSION);
1855 	return (de.fme_status);
1856 }
1857 
1858 /*
1859  * The IO fault service error handling callback function
1860  */
1861 /*ARGSUSED*/
1862 static int
1863 bge_fm_error_cb(dev_info_t *dip, ddi_fm_error_t *err, const void *impl_data)
1864 {
1865 	/*
1866 	 * as the driver can always deal with an error in any dma or
1867 	 * access handle, we can just return the fme_status value.
1868 	 */
1869 	pci_ereport_post(dip, err, NULL);
1870 	return (err->fme_status);
1871 }
1872 
1873 static void
1874 bge_fm_init(bge_t *bgep)
1875 {
1876 	ddi_iblock_cookie_t iblk;
1877 
1878 	/* Only register with IO Fault Services if we have some capability */
1879 	if (bgep->fm_capabilities) {
1880 		bge_reg_accattr.devacc_attr_access = DDI_FLAGERR_ACC;
1881 		bge_desc_accattr.devacc_attr_access = DDI_FLAGERR_ACC;
1882 		dma_attr.dma_attr_flags = DDI_DMA_FLAGERR;
1883 
1884 		/* Register capabilities with IO Fault Services */
1885 		ddi_fm_init(bgep->devinfo, &bgep->fm_capabilities, &iblk);
1886 
1887 		/*
1888 		 * Initialize pci ereport capabilities if ereport capable
1889 		 */
1890 		if (DDI_FM_EREPORT_CAP(bgep->fm_capabilities) ||
1891 		    DDI_FM_ERRCB_CAP(bgep->fm_capabilities))
1892 			pci_ereport_setup(bgep->devinfo);
1893 
1894 		/*
1895 		 * Register error callback if error callback capable
1896 		 */
1897 		if (DDI_FM_ERRCB_CAP(bgep->fm_capabilities))
1898 			ddi_fm_handler_register(bgep->devinfo,
1899 			bge_fm_error_cb, (void*) bgep);
1900 	} else {
1901 		/*
1902 		 * These fields have to be cleared of FMA if there are no
1903 		 * FMA capabilities at runtime.
1904 		 */
1905 		bge_reg_accattr.devacc_attr_access = DDI_DEFAULT_ACC;
1906 		bge_desc_accattr.devacc_attr_access = DDI_DEFAULT_ACC;
1907 		dma_attr.dma_attr_flags = 0;
1908 	}
1909 }
1910 
1911 static void
1912 bge_fm_fini(bge_t *bgep)
1913 {
1914 	/* Only unregister FMA capabilities if we registered some */
1915 	if (bgep->fm_capabilities) {
1916 
1917 		/*
1918 		 * Release any resources allocated by pci_ereport_setup()
1919 		 */
1920 		if (DDI_FM_EREPORT_CAP(bgep->fm_capabilities) ||
1921 		    DDI_FM_ERRCB_CAP(bgep->fm_capabilities))
1922 			pci_ereport_teardown(bgep->devinfo);
1923 
1924 		/*
1925 		 * Un-register error callback if error callback capable
1926 		 */
1927 		if (DDI_FM_ERRCB_CAP(bgep->fm_capabilities))
1928 			ddi_fm_handler_unregister(bgep->devinfo);
1929 
1930 		/* Unregister from IO Fault Services */
1931 		ddi_fm_fini(bgep->devinfo);
1932 	}
1933 }
1934 
1935 static void
1936 #ifdef BGE_IPMI_ASF
1937 bge_unattach(bge_t *bgep, uint_t asf_mode)
1938 #else
1939 bge_unattach(bge_t *bgep)
1940 #endif
1941 {
1942 	BGE_TRACE(("bge_unattach($%p)",
1943 		(void *)bgep));
1944 
1945 	/*
1946 	 * Flag that no more activity may be initiated
1947 	 */
1948 	bgep->progress &= ~PROGRESS_READY;
1949 
1950 	/*
1951 	 * Quiesce the PHY and MAC (leave it reset but still powered).
1952 	 * Clean up and free all BGE data structures
1953 	 */
1954 	if (bgep->cyclic_id) {
1955 		mutex_enter(&cpu_lock);
1956 		cyclic_remove(bgep->cyclic_id);
1957 		mutex_exit(&cpu_lock);
1958 	}
1959 	if (bgep->progress & PROGRESS_KSTATS)
1960 		bge_fini_kstats(bgep);
1961 	if (bgep->progress & PROGRESS_NDD)
1962 		bge_nd_cleanup(bgep);
1963 	if (bgep->progress & PROGRESS_PHY)
1964 		bge_phys_reset(bgep);
1965 	if (bgep->progress & PROGRESS_HWINT) {
1966 		mutex_enter(bgep->genlock);
1967 #ifdef BGE_IPMI_ASF
1968 		if (bge_chip_reset(bgep, B_FALSE, asf_mode) != DDI_SUCCESS)
1969 #else
1970 		if (bge_chip_reset(bgep, B_FALSE) != DDI_SUCCESS)
1971 #endif
1972 			ddi_fm_service_impact(bgep->devinfo,
1973 			    DDI_SERVICE_UNAFFECTED);
1974 #ifdef BGE_IPMI_ASF
1975 		if (bgep->asf_enabled) {
1976 			/*
1977 			 * This register has been overlaid. We restore its
1978 			 * initial value here.
1979 			 */
1980 			bge_nic_put32(bgep, BGE_NIC_DATA_SIG_ADDR,
1981 			    BGE_NIC_DATA_SIG);
1982 		}
1983 #endif
1984 		if (bge_check_acc_handle(bgep, bgep->cfg_handle) != DDI_FM_OK)
1985 			ddi_fm_service_impact(bgep->devinfo,
1986 			    DDI_SERVICE_UNAFFECTED);
1987 		if (bge_check_acc_handle(bgep, bgep->io_handle) != DDI_FM_OK)
1988 			ddi_fm_service_impact(bgep->devinfo,
1989 			    DDI_SERVICE_UNAFFECTED);
1990 		mutex_exit(bgep->genlock);
1991 	}
1992 	if (bgep->progress & PROGRESS_INTR) {
1993 		bge_intr_disable(bgep);
1994 		bge_fini_rings(bgep);
1995 	}
1996 	if (bgep->progress & PROGRESS_HWINT) {
1997 		bge_rem_intrs(bgep);
1998 		rw_destroy(bgep->errlock);
1999 		mutex_destroy(bgep->softintrlock);
2000 		mutex_destroy(bgep->genlock);
2001 	}
2002 	if (bgep->progress & PROGRESS_FACTOTUM)
2003 		ddi_remove_softintr(bgep->factotum_id);
2004 	if (bgep->progress & PROGRESS_RESCHED)
2005 		ddi_remove_softintr(bgep->resched_id);
2006 	if (bgep->progress & PROGRESS_BUFS)
2007 		bge_free_bufs(bgep);
2008 	if (bgep->progress & PROGRESS_REGS)
2009 		ddi_regs_map_free(&bgep->io_handle);
2010 	if (bgep->progress & PROGRESS_CFG)
2011 		pci_config_teardown(&bgep->cfg_handle);
2012 
2013 	bge_fm_fini(bgep);
2014 
2015 	ddi_remove_minor_node(bgep->devinfo, NULL);
2016 	kmem_free(bgep, sizeof (*bgep));
2017 }
2018 
2019 static int
2020 bge_resume(dev_info_t *devinfo)
2021 {
2022 	bge_t *bgep;				/* Our private data	*/
2023 	chip_id_t *cidp;
2024 	chip_id_t chipid;
2025 
2026 	bgep = ddi_get_driver_private(devinfo);
2027 	if (bgep == NULL)
2028 		return (DDI_FAILURE);
2029 
2030 	/*
2031 	 * Refuse to resume if the data structures aren't consistent
2032 	 */
2033 	if (bgep->devinfo != devinfo)
2034 		return (DDI_FAILURE);
2035 
2036 #ifdef BGE_IPMI_ASF
2037 	/*
2038 	 * Power management hasn't been supported in BGE now. If you
2039 	 * want to implement it, please add the ASF/IPMI related
2040 	 * code here.
2041 	 */
2042 
2043 #endif
2044 
2045 	/*
2046 	 * Read chip ID & set up config space command register(s)
2047 	 * Refuse to resume if the chip has changed its identity!
2048 	 */
2049 	cidp = &bgep->chipid;
2050 	mutex_enter(bgep->genlock);
2051 	bge_chip_cfg_init(bgep, &chipid, B_FALSE);
2052 	if (bge_check_acc_handle(bgep, bgep->cfg_handle) != DDI_FM_OK) {
2053 		ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_LOST);
2054 		mutex_exit(bgep->genlock);
2055 		return (DDI_FAILURE);
2056 	}
2057 	mutex_exit(bgep->genlock);
2058 	if (chipid.vendor != cidp->vendor)
2059 		return (DDI_FAILURE);
2060 	if (chipid.device != cidp->device)
2061 		return (DDI_FAILURE);
2062 	if (chipid.revision != cidp->revision)
2063 		return (DDI_FAILURE);
2064 	if (chipid.asic_rev != cidp->asic_rev)
2065 		return (DDI_FAILURE);
2066 
2067 	/*
2068 	 * All OK, reinitialise h/w & kick off GLD scheduling
2069 	 */
2070 	mutex_enter(bgep->genlock);
2071 	if (bge_restart(bgep, B_TRUE) != DDI_SUCCESS) {
2072 		(void) bge_check_acc_handle(bgep, bgep->cfg_handle);
2073 		(void) bge_check_acc_handle(bgep, bgep->io_handle);
2074 		ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_LOST);
2075 		mutex_exit(bgep->genlock);
2076 		return (DDI_FAILURE);
2077 	}
2078 	if (bge_check_acc_handle(bgep, bgep->cfg_handle) != DDI_FM_OK) {
2079 		ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_LOST);
2080 		mutex_exit(bgep->genlock);
2081 		return (DDI_FAILURE);
2082 	}
2083 	if (bge_check_acc_handle(bgep, bgep->io_handle) != DDI_FM_OK) {
2084 		ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_LOST);
2085 		mutex_exit(bgep->genlock);
2086 		return (DDI_FAILURE);
2087 	}
2088 	mutex_exit(bgep->genlock);
2089 	return (DDI_SUCCESS);
2090 }
2091 
2092 /*
2093  * attach(9E) -- Attach a device to the system
2094  *
2095  * Called once for each board successfully probed.
2096  */
2097 static int
2098 bge_attach(dev_info_t *devinfo, ddi_attach_cmd_t cmd)
2099 {
2100 	bge_t *bgep;				/* Our private data	*/
2101 	mac_register_t *macp;
2102 	chip_id_t *cidp;
2103 	cyc_handler_t cychand;
2104 	cyc_time_t cyctime;
2105 	caddr_t regs;
2106 	int instance;
2107 	int err;
2108 	int intr_types;
2109 #ifdef BGE_IPMI_ASF
2110 	uint32_t mhcrValue;
2111 #endif
2112 
2113 	instance = ddi_get_instance(devinfo);
2114 
2115 	BGE_GTRACE(("bge_attach($%p, %d) instance %d",
2116 		(void *)devinfo, cmd, instance));
2117 	BGE_BRKPT(NULL, "bge_attach");
2118 
2119 	switch (cmd) {
2120 	default:
2121 		return (DDI_FAILURE);
2122 
2123 	case DDI_RESUME:
2124 		return (bge_resume(devinfo));
2125 
2126 	case DDI_ATTACH:
2127 		break;
2128 	}
2129 
2130 	bgep = kmem_zalloc(sizeof (*bgep), KM_SLEEP);
2131 	ddi_set_driver_private(devinfo, bgep);
2132 	bgep->bge_guard = BGE_GUARD;
2133 	bgep->devinfo = devinfo;
2134 
2135 	/*
2136 	 * Initialize more fields in BGE private data
2137 	 */
2138 	bgep->debug = ddi_prop_get_int(DDI_DEV_T_ANY, devinfo,
2139 		DDI_PROP_DONTPASS, debug_propname, bge_debug);
2140 	(void) snprintf(bgep->ifname, sizeof (bgep->ifname), "%s%d",
2141 		BGE_DRIVER_NAME, instance);
2142 
2143 	/*
2144 	 * Initialize for fma support
2145 	 */
2146 	bgep->fm_capabilities = ddi_prop_get_int(DDI_DEV_T_ANY, devinfo,
2147 	    DDI_PROP_DONTPASS, fm_cap,
2148 	    DDI_FM_EREPORT_CAPABLE | DDI_FM_ACCCHK_CAPABLE |
2149 	    DDI_FM_DMACHK_CAPABLE | DDI_FM_ERRCB_CAPABLE);
2150 	BGE_DEBUG(("bgep->fm_capabilities = %d", bgep->fm_capabilities));
2151 	bge_fm_init(bgep);
2152 
2153 	/*
2154 	 * Look up the IOMMU's page size for DVMA mappings (must be
2155 	 * a power of 2) and convert to a mask.  This can be used to
2156 	 * determine whether a message buffer crosses a page boundary.
2157 	 * Note: in 2s complement binary notation, if X is a power of
2158 	 * 2, then -X has the representation "11...1100...00".
2159 	 */
2160 	bgep->pagemask = dvma_pagesize(devinfo);
2161 	ASSERT(ddi_ffs(bgep->pagemask) == ddi_fls(bgep->pagemask));
2162 	bgep->pagemask = -bgep->pagemask;
2163 
2164 	/*
2165 	 * Map config space registers
2166 	 * Read chip ID & set up config space command register(s)
2167 	 *
2168 	 * Note: this leaves the chip accessible by Memory Space
2169 	 * accesses, but with interrupts and Bus Mastering off.
2170 	 * This should ensure that nothing untoward will happen
2171 	 * if it has been left active by the (net-)bootloader.
2172 	 * We'll re-enable Bus Mastering once we've reset the chip,
2173 	 * and allow interrupts only when everything else is set up.
2174 	 */
2175 	err = pci_config_setup(devinfo, &bgep->cfg_handle);
2176 #ifdef BGE_IPMI_ASF
2177 	mhcrValue = pci_config_get32(bgep->cfg_handle, PCI_CONF_BGE_MHCR);
2178 	if (mhcrValue & MHCR_ENABLE_ENDIAN_WORD_SWAP) {
2179 		bgep->asf_wordswapped = B_TRUE;
2180 	} else {
2181 		bgep->asf_wordswapped = B_FALSE;
2182 	}
2183 	bge_asf_get_config(bgep);
2184 #endif
2185 	if (err != DDI_SUCCESS) {
2186 		bge_problem(bgep, "pci_config_setup() failed");
2187 		goto attach_fail;
2188 	}
2189 	bgep->progress |= PROGRESS_CFG;
2190 	cidp = &bgep->chipid;
2191 	bzero(cidp, sizeof (*cidp));
2192 	bge_chip_cfg_init(bgep, cidp, B_FALSE);
2193 	if (bge_check_acc_handle(bgep, bgep->cfg_handle) != DDI_FM_OK) {
2194 		ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_LOST);
2195 		goto attach_fail;
2196 	}
2197 
2198 #ifdef BGE_IPMI_ASF
2199 	if (DEVICE_5721_SERIES_CHIPSETS(bgep) ||
2200 	    DEVICE_5714_SERIES_CHIPSETS(bgep)) {
2201 		bgep->asf_newhandshake = B_TRUE;
2202 	} else {
2203 		bgep->asf_newhandshake = B_FALSE;
2204 	}
2205 #endif
2206 
2207 	/*
2208 	 * Update those parts of the chip ID derived from volatile
2209 	 * registers with the values seen by OBP (in case the chip
2210 	 * has been reset externally and therefore lost them).
2211 	 */
2212 	cidp->subven = ddi_prop_get_int(DDI_DEV_T_ANY, devinfo,
2213 		DDI_PROP_DONTPASS, subven_propname, cidp->subven);
2214 	cidp->subdev = ddi_prop_get_int(DDI_DEV_T_ANY, devinfo,
2215 		DDI_PROP_DONTPASS, subdev_propname, cidp->subdev);
2216 	cidp->clsize = ddi_prop_get_int(DDI_DEV_T_ANY, devinfo,
2217 		DDI_PROP_DONTPASS, clsize_propname, cidp->clsize);
2218 	cidp->latency = ddi_prop_get_int(DDI_DEV_T_ANY, devinfo,
2219 		DDI_PROP_DONTPASS, latency_propname, cidp->latency);
2220 	cidp->rx_rings = ddi_prop_get_int(DDI_DEV_T_ANY, devinfo,
2221 		DDI_PROP_DONTPASS, rxrings_propname, cidp->rx_rings);
2222 	cidp->tx_rings = ddi_prop_get_int(DDI_DEV_T_ANY, devinfo,
2223 		DDI_PROP_DONTPASS, txrings_propname, cidp->tx_rings);
2224 
2225 	if (bge_jumbo_enable == B_TRUE) {
2226 		cidp->default_mtu = ddi_prop_get_int(DDI_DEV_T_ANY, devinfo,
2227 			DDI_PROP_DONTPASS, default_mtu, BGE_DEFAULT_MTU);
2228 		if ((cidp->default_mtu < BGE_DEFAULT_MTU)||
2229 			(cidp->default_mtu > BGE_MAXIMUM_MTU)) {
2230 			cidp->default_mtu = BGE_DEFAULT_MTU;
2231 		}
2232 	}
2233 	/*
2234 	 * Map operating registers
2235 	 */
2236 	err = ddi_regs_map_setup(devinfo, BGE_PCI_OPREGS_RNUMBER,
2237 		&regs, 0, 0, &bge_reg_accattr, &bgep->io_handle);
2238 	if (err != DDI_SUCCESS) {
2239 		bge_problem(bgep, "ddi_regs_map_setup() failed");
2240 		goto attach_fail;
2241 	}
2242 	bgep->io_regs = regs;
2243 	bgep->progress |= PROGRESS_REGS;
2244 
2245 	/*
2246 	 * Characterise the device, so we know its requirements.
2247 	 * Then allocate the appropriate TX and RX descriptors & buffers.
2248 	 */
2249 	if (bge_chip_id_init(bgep) == EIO) {
2250 		ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_LOST);
2251 		goto attach_fail;
2252 	}
2253 	err = bge_alloc_bufs(bgep);
2254 	if (err != DDI_SUCCESS) {
2255 		bge_problem(bgep, "DMA buffer allocation failed");
2256 		goto attach_fail;
2257 	}
2258 	bgep->progress |= PROGRESS_BUFS;
2259 
2260 	/*
2261 	 * Add the softint handlers:
2262 	 *
2263 	 * Both of these handlers are used to avoid restrictions on the
2264 	 * context and/or mutexes required for some operations.  In
2265 	 * particular, the hardware interrupt handler and its subfunctions
2266 	 * can detect a number of conditions that we don't want to handle
2267 	 * in that context or with that set of mutexes held.  So, these
2268 	 * softints are triggered instead:
2269 	 *
2270 	 * the <resched> softint is triggered if we have previously
2271 	 * had to refuse to send a packet because of resource shortage
2272 	 * (we've run out of transmit buffers), but the send completion
2273 	 * interrupt handler has now detected that more buffers have
2274 	 * become available.
2275 	 *
2276 	 * the <factotum> is triggered if the h/w interrupt handler
2277 	 * sees the <link state changed> or <error> bits in the status
2278 	 * block.  It's also triggered periodically to poll the link
2279 	 * state, just in case we aren't getting link status change
2280 	 * interrupts ...
2281 	 */
2282 	err = ddi_add_softintr(devinfo, DDI_SOFTINT_LOW, &bgep->resched_id,
2283 		NULL, NULL, bge_reschedule, (caddr_t)bgep);
2284 	if (err != DDI_SUCCESS) {
2285 		bge_problem(bgep, "ddi_add_softintr() failed");
2286 		goto attach_fail;
2287 	}
2288 	bgep->progress |= PROGRESS_RESCHED;
2289 	err = ddi_add_softintr(devinfo, DDI_SOFTINT_LOW, &bgep->factotum_id,
2290 		NULL, NULL, bge_chip_factotum, (caddr_t)bgep);
2291 	if (err != DDI_SUCCESS) {
2292 		bge_problem(bgep, "ddi_add_softintr() failed");
2293 		goto attach_fail;
2294 	}
2295 	bgep->progress |= PROGRESS_FACTOTUM;
2296 
2297 	/* Get supported interrupt types */
2298 	if (ddi_intr_get_supported_types(devinfo, &intr_types) != DDI_SUCCESS) {
2299 		bge_error(bgep, "ddi_intr_get_supported_types failed\n");
2300 
2301 		goto attach_fail;
2302 	}
2303 
2304 	bge_log(bgep, "ddi_intr_get_supported_types() returned: %x",
2305 	    intr_types);
2306 
2307 	if ((intr_types & DDI_INTR_TYPE_MSI) && bgep->chipid.msi_enabled) {
2308 		if (bge_add_intrs(bgep, DDI_INTR_TYPE_MSI) != DDI_SUCCESS) {
2309 			bge_error(bgep, "MSI registration failed, "
2310 			    "trying FIXED interrupt type\n");
2311 		} else {
2312 			bge_log(bgep, "Using MSI interrupt type\n");
2313 
2314 			bgep->intr_type = DDI_INTR_TYPE_MSI;
2315 			bgep->progress |= PROGRESS_HWINT;
2316 		}
2317 	}
2318 
2319 	if (!(bgep->progress & PROGRESS_HWINT) &&
2320 	    (intr_types & DDI_INTR_TYPE_FIXED)) {
2321 		if (bge_add_intrs(bgep, DDI_INTR_TYPE_FIXED) != DDI_SUCCESS) {
2322 			bge_error(bgep, "FIXED interrupt "
2323 			    "registration failed\n");
2324 			goto attach_fail;
2325 		}
2326 
2327 		bge_log(bgep, "Using FIXED interrupt type\n");
2328 
2329 		bgep->intr_type = DDI_INTR_TYPE_FIXED;
2330 		bgep->progress |= PROGRESS_HWINT;
2331 	}
2332 
2333 	if (!(bgep->progress & PROGRESS_HWINT)) {
2334 		bge_error(bgep, "No interrupts registered\n");
2335 		goto attach_fail;
2336 	}
2337 
2338 	/*
2339 	 * Note that interrupts are not enabled yet as
2340 	 * mutex locks are not initialized. Initialize mutex locks.
2341 	 */
2342 	mutex_init(bgep->genlock, NULL, MUTEX_DRIVER,
2343 	    DDI_INTR_PRI(bgep->intr_pri));
2344 	mutex_init(bgep->softintrlock, NULL, MUTEX_DRIVER,
2345 	    DDI_INTR_PRI(bgep->intr_pri));
2346 	rw_init(bgep->errlock, NULL, RW_DRIVER,
2347 	    DDI_INTR_PRI(bgep->intr_pri));
2348 
2349 	/*
2350 	 * Initialize rings.
2351 	 */
2352 	bge_init_rings(bgep);
2353 
2354 	/*
2355 	 * Now that mutex locks are initialized, enable interrupts.
2356 	 */
2357 	bge_intr_enable(bgep);
2358 	bgep->progress |= PROGRESS_INTR;
2359 
2360 	/*
2361 	 * Initialise link state variables
2362 	 * Stop, reset & reinitialise the chip.
2363 	 * Initialise the (internal) PHY.
2364 	 */
2365 	bgep->link_state = LINK_STATE_UNKNOWN;
2366 	bgep->link_up_msg = bgep->link_down_msg = " (initialized)";
2367 
2368 	mutex_enter(bgep->genlock);
2369 
2370 	/*
2371 	 * Reset chip & rings to initial state; also reset address
2372 	 * filtering, promiscuity, loopback mode.
2373 	 */
2374 #ifdef BGE_IPMI_ASF
2375 	if (bge_reset(bgep, ASF_MODE_SHUTDOWN) != DDI_SUCCESS) {
2376 #else
2377 	if (bge_reset(bgep) != DDI_SUCCESS) {
2378 #endif
2379 		(void) bge_check_acc_handle(bgep, bgep->cfg_handle);
2380 		(void) bge_check_acc_handle(bgep, bgep->io_handle);
2381 		ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_LOST);
2382 		mutex_exit(bgep->genlock);
2383 		goto attach_fail;
2384 	}
2385 
2386 	bzero(bgep->mcast_hash, sizeof (bgep->mcast_hash));
2387 	bzero(bgep->mcast_refs, sizeof (bgep->mcast_refs));
2388 	bgep->promisc = B_FALSE;
2389 	bgep->param_loop_mode = BGE_LOOP_NONE;
2390 	if (bge_check_acc_handle(bgep, bgep->cfg_handle) != DDI_FM_OK) {
2391 		ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_LOST);
2392 		mutex_exit(bgep->genlock);
2393 		goto attach_fail;
2394 	}
2395 	if (bge_check_acc_handle(bgep, bgep->io_handle) != DDI_FM_OK) {
2396 		ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_LOST);
2397 		mutex_exit(bgep->genlock);
2398 		goto attach_fail;
2399 	}
2400 
2401 	mutex_exit(bgep->genlock);
2402 
2403 	if (bge_phys_init(bgep) == EIO) {
2404 		ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_LOST);
2405 		goto attach_fail;
2406 	}
2407 	bgep->progress |= PROGRESS_PHY;
2408 
2409 	/*
2410 	 * Register NDD-tweakable parameters
2411 	 */
2412 	if (bge_nd_init(bgep)) {
2413 		bge_problem(bgep, "bge_nd_init() failed");
2414 		goto attach_fail;
2415 	}
2416 	bgep->progress |= PROGRESS_NDD;
2417 
2418 	/*
2419 	 * Create & initialise named kstats
2420 	 */
2421 	bge_init_kstats(bgep, instance);
2422 	bgep->progress |= PROGRESS_KSTATS;
2423 
2424 	/*
2425 	 * Determine whether to override the chip's own MAC address
2426 	 */
2427 	bge_find_mac_address(bgep, cidp);
2428 	ethaddr_copy(cidp->vendor_addr.addr, bgep->curr_addr.addr);
2429 	bgep->curr_addr.set = 1;
2430 
2431 	if ((macp = mac_alloc(MAC_VERSION)) == NULL)
2432 		goto attach_fail;
2433 	macp->m_type_ident = MAC_PLUGIN_IDENT_ETHER;
2434 	macp->m_driver = bgep;
2435 	macp->m_dip = devinfo;
2436 	macp->m_src_addr = bgep->curr_addr.addr;
2437 	macp->m_callbacks = &bge_m_callbacks;
2438 	macp->m_min_sdu = 0;
2439 	macp->m_max_sdu = cidp->ethmax_size - sizeof (struct ether_header);
2440 	/*
2441 	 * Finally, we're ready to register ourselves with the MAC layer
2442 	 * interface; if this succeeds, we're all ready to start()
2443 	 */
2444 	err = mac_register(macp, &bgep->mh);
2445 	mac_free(macp);
2446 	if (err != 0)
2447 		goto attach_fail;
2448 
2449 	cychand.cyh_func = bge_chip_cyclic;
2450 	cychand.cyh_arg = bgep;
2451 	cychand.cyh_level = CY_LOCK_LEVEL;
2452 	cyctime.cyt_when = 0;
2453 	cyctime.cyt_interval = BGE_CYCLIC_PERIOD;
2454 	mutex_enter(&cpu_lock);
2455 	bgep->cyclic_id = cyclic_add(&cychand, &cyctime);
2456 	mutex_exit(&cpu_lock);
2457 
2458 	bgep->progress |= PROGRESS_READY;
2459 	ASSERT(bgep->bge_guard == BGE_GUARD);
2460 	return (DDI_SUCCESS);
2461 
2462 attach_fail:
2463 #ifdef BGE_IPMI_ASF
2464 	bge_unattach(bgep, ASF_MODE_NONE);
2465 #else
2466 	bge_unattach(bgep);
2467 #endif
2468 	return (DDI_FAILURE);
2469 }
2470 
2471 /*
2472  *	bge_suspend() -- suspend transmit/receive for powerdown
2473  */
2474 static int
2475 bge_suspend(bge_t *bgep)
2476 {
2477 	/*
2478 	 * Stop processing and idle (powerdown) the PHY ...
2479 	 */
2480 	mutex_enter(bgep->genlock);
2481 #ifdef BGE_IPMI_ASF
2482 	/*
2483 	 * Power management hasn't been supported in BGE now. If you
2484 	 * want to implement it, please add the ASF/IPMI related
2485 	 * code here.
2486 	 */
2487 #endif
2488 	bge_stop(bgep);
2489 	if (bge_phys_idle(bgep) != DDI_SUCCESS) {
2490 		(void) bge_check_acc_handle(bgep, bgep->io_handle);
2491 		ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_DEGRADED);
2492 		mutex_exit(bgep->genlock);
2493 		return (DDI_FAILURE);
2494 	}
2495 	if (bge_check_acc_handle(bgep, bgep->io_handle) != DDI_FM_OK) {
2496 		ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_DEGRADED);
2497 		mutex_exit(bgep->genlock);
2498 		return (DDI_FAILURE);
2499 	}
2500 	mutex_exit(bgep->genlock);
2501 
2502 	return (DDI_SUCCESS);
2503 }
2504 
2505 /*
2506  * detach(9E) -- Detach a device from the system
2507  */
2508 static int
2509 bge_detach(dev_info_t *devinfo, ddi_detach_cmd_t cmd)
2510 {
2511 	bge_t *bgep;
2512 #ifdef BGE_IPMI_ASF
2513 	uint_t asf_mode;
2514 	asf_mode = ASF_MODE_NONE;
2515 #endif
2516 
2517 	BGE_GTRACE(("bge_detach($%p, %d)", (void *)devinfo, cmd));
2518 
2519 	bgep = ddi_get_driver_private(devinfo);
2520 
2521 	switch (cmd) {
2522 	default:
2523 		return (DDI_FAILURE);
2524 
2525 	case DDI_SUSPEND:
2526 		return (bge_suspend(bgep));
2527 
2528 	case DDI_DETACH:
2529 		break;
2530 	}
2531 
2532 #ifdef BGE_IPMI_ASF
2533 	mutex_enter(bgep->genlock);
2534 	if (bgep->asf_enabled && (bgep->asf_status == ASF_STAT_RUN)) {
2535 
2536 		bge_asf_update_status(bgep);
2537 		bge_asf_stop_timer(bgep);
2538 		bgep->asf_status = ASF_STAT_STOP;
2539 
2540 		bge_asf_pre_reset_operations(bgep, BGE_SHUTDOWN_RESET);
2541 
2542 		if (bgep->asf_pseudostop) {
2543 			bgep->link_up_msg = bgep->link_down_msg = " (stopped)";
2544 			bge_chip_stop(bgep, B_FALSE);
2545 			bgep->bge_mac_state = BGE_MAC_STOPPED;
2546 			bgep->asf_pseudostop = B_FALSE;
2547 		}
2548 
2549 		asf_mode = ASF_MODE_POST_SHUTDOWN;
2550 
2551 		if (bge_check_acc_handle(bgep, bgep->cfg_handle) != DDI_FM_OK)
2552 			ddi_fm_service_impact(bgep->devinfo,
2553 			    DDI_SERVICE_UNAFFECTED);
2554 		if (bge_check_acc_handle(bgep, bgep->io_handle) != DDI_FM_OK)
2555 			ddi_fm_service_impact(bgep->devinfo,
2556 			    DDI_SERVICE_UNAFFECTED);
2557 	}
2558 	mutex_exit(bgep->genlock);
2559 #endif
2560 
2561 	/*
2562 	 * Unregister from the GLD subsystem.  This can fail, in
2563 	 * particular if there are DLPI style-2 streams still open -
2564 	 * in which case we just return failure without shutting
2565 	 * down chip operations.
2566 	 */
2567 	if (mac_unregister(bgep->mh) != 0)
2568 		return (DDI_FAILURE);
2569 
2570 	/*
2571 	 * All activity stopped, so we can clean up & exit
2572 	 */
2573 #ifdef BGE_IPMI_ASF
2574 	bge_unattach(bgep, asf_mode);
2575 #else
2576 	bge_unattach(bgep);
2577 #endif
2578 	return (DDI_SUCCESS);
2579 }
2580 
2581 
2582 /*
2583  * ========== Module Loading Data & Entry Points ==========
2584  */
2585 
2586 #undef	BGE_DBG
2587 #define	BGE_DBG		BGE_DBG_INIT	/* debug flag for this code	*/
2588 
2589 DDI_DEFINE_STREAM_OPS(bge_dev_ops, nulldev, nulldev, bge_attach, bge_detach,
2590     nodev, NULL, D_MP, NULL);
2591 
2592 static struct modldrv bge_modldrv = {
2593 	&mod_driverops,		/* Type of module.  This one is a driver */
2594 	bge_ident,		/* short description */
2595 	&bge_dev_ops		/* driver specific ops */
2596 };
2597 
2598 static struct modlinkage modlinkage = {
2599 	MODREV_1, (void *)&bge_modldrv, NULL
2600 };
2601 
2602 
2603 int
2604 _info(struct modinfo *modinfop)
2605 {
2606 	return (mod_info(&modlinkage, modinfop));
2607 }
2608 
2609 int
2610 _init(void)
2611 {
2612 	int status;
2613 
2614 	mac_init_ops(&bge_dev_ops, "bge");
2615 	status = mod_install(&modlinkage);
2616 	if (status == DDI_SUCCESS)
2617 		mutex_init(bge_log_mutex, NULL, MUTEX_DRIVER, NULL);
2618 	else
2619 		mac_fini_ops(&bge_dev_ops);
2620 	return (status);
2621 }
2622 
2623 int
2624 _fini(void)
2625 {
2626 	int status;
2627 
2628 	status = mod_remove(&modlinkage);
2629 	if (status == DDI_SUCCESS) {
2630 		mac_fini_ops(&bge_dev_ops);
2631 		mutex_destroy(bge_log_mutex);
2632 	}
2633 	return (status);
2634 }
2635 
2636 
2637 /*
2638  * bge_add_intrs:
2639  *
2640  * Register FIXED or MSI interrupts.
2641  */
2642 static int
2643 bge_add_intrs(bge_t *bgep, int	intr_type)
2644 {
2645 	dev_info_t	*dip = bgep->devinfo;
2646 	int		avail, actual, intr_size, count = 0;
2647 	int		i, flag, ret;
2648 
2649 	bge_log(bgep, "bge_add_intrs: interrupt type 0x%x\n", intr_type);
2650 
2651 	/* Get number of interrupts */
2652 	ret = ddi_intr_get_nintrs(dip, intr_type, &count);
2653 	if ((ret != DDI_SUCCESS) || (count == 0)) {
2654 		bge_error(bgep, "ddi_intr_get_nintrs() failure, ret: %d, "
2655 		    "count: %d", ret, count);
2656 
2657 		return (DDI_FAILURE);
2658 	}
2659 
2660 	/* Get number of available interrupts */
2661 	ret = ddi_intr_get_navail(dip, intr_type, &avail);
2662 	if ((ret != DDI_SUCCESS) || (avail == 0)) {
2663 		bge_error(bgep, "ddi_intr_get_navail() failure, "
2664 		    "ret: %d, avail: %d\n", ret, avail);
2665 
2666 		return (DDI_FAILURE);
2667 	}
2668 
2669 	if (avail < count) {
2670 		bge_log(bgep, "nitrs() returned %d, navail returned %d\n",
2671 		    count, avail);
2672 	}
2673 
2674 	/*
2675 	 * BGE hardware generates only single MSI even though it claims
2676 	 * to support multiple MSIs. So, hard code MSI count value to 1.
2677 	 */
2678 	if (intr_type == DDI_INTR_TYPE_MSI) {
2679 		count = 1;
2680 		flag = DDI_INTR_ALLOC_STRICT;
2681 	} else {
2682 		flag = DDI_INTR_ALLOC_NORMAL;
2683 	}
2684 
2685 	/* Allocate an array of interrupt handles */
2686 	intr_size = count * sizeof (ddi_intr_handle_t);
2687 	bgep->htable = kmem_alloc(intr_size, KM_SLEEP);
2688 
2689 	/* Call ddi_intr_alloc() */
2690 	ret = ddi_intr_alloc(dip, bgep->htable, intr_type, 0,
2691 	    count, &actual, flag);
2692 
2693 	if ((ret != DDI_SUCCESS) || (actual == 0)) {
2694 		bge_error(bgep, "ddi_intr_alloc() failed %d\n", ret);
2695 
2696 		kmem_free(bgep->htable, intr_size);
2697 		return (DDI_FAILURE);
2698 	}
2699 
2700 	if (actual < count) {
2701 		bge_log(bgep, "Requested: %d, Received: %d\n", count, actual);
2702 	}
2703 
2704 	bgep->intr_cnt = actual;
2705 
2706 	/*
2707 	 * Get priority for first msi, assume remaining are all the same
2708 	 */
2709 	if ((ret = ddi_intr_get_pri(bgep->htable[0], &bgep->intr_pri)) !=
2710 	    DDI_SUCCESS) {
2711 		bge_error(bgep, "ddi_intr_get_pri() failed %d\n", ret);
2712 
2713 		/* Free already allocated intr */
2714 		for (i = 0; i < actual; i++) {
2715 			(void) ddi_intr_free(bgep->htable[i]);
2716 		}
2717 
2718 		kmem_free(bgep->htable, intr_size);
2719 		return (DDI_FAILURE);
2720 	}
2721 
2722 	/* Call ddi_intr_add_handler() */
2723 	for (i = 0; i < actual; i++) {
2724 		if ((ret = ddi_intr_add_handler(bgep->htable[i], bge_intr,
2725 		    (caddr_t)bgep, (caddr_t)(uintptr_t)i)) != DDI_SUCCESS) {
2726 			bge_error(bgep, "ddi_intr_add_handler() "
2727 			    "failed %d\n", ret);
2728 
2729 			/* Free already allocated intr */
2730 			for (i = 0; i < actual; i++) {
2731 				(void) ddi_intr_free(bgep->htable[i]);
2732 			}
2733 
2734 			kmem_free(bgep->htable, intr_size);
2735 			return (DDI_FAILURE);
2736 		}
2737 	}
2738 
2739 	if ((ret = ddi_intr_get_cap(bgep->htable[0], &bgep->intr_cap))
2740 		!= DDI_SUCCESS) {
2741 		bge_error(bgep, "ddi_intr_get_cap() failed %d\n", ret);
2742 
2743 		for (i = 0; i < actual; i++) {
2744 			(void) ddi_intr_remove_handler(bgep->htable[i]);
2745 			(void) ddi_intr_free(bgep->htable[i]);
2746 		}
2747 
2748 		kmem_free(bgep->htable, intr_size);
2749 		return (DDI_FAILURE);
2750 	}
2751 
2752 	return (DDI_SUCCESS);
2753 }
2754 
2755 /*
2756  * bge_rem_intrs:
2757  *
2758  * Unregister FIXED or MSI interrupts
2759  */
2760 static void
2761 bge_rem_intrs(bge_t *bgep)
2762 {
2763 	int	i;
2764 
2765 	bge_log(bgep, "bge_rem_intrs\n");
2766 
2767 	/* Call ddi_intr_remove_handler() */
2768 	for (i = 0; i < bgep->intr_cnt; i++) {
2769 		(void) ddi_intr_remove_handler(bgep->htable[i]);
2770 		(void) ddi_intr_free(bgep->htable[i]);
2771 	}
2772 
2773 	kmem_free(bgep->htable, bgep->intr_cnt * sizeof (ddi_intr_handle_t));
2774 }
2775 
2776 
2777 void
2778 bge_intr_enable(bge_t *bgep)
2779 {
2780 	int i;
2781 
2782 	if (bgep->intr_cap & DDI_INTR_FLAG_BLOCK) {
2783 		/* Call ddi_intr_block_enable() for MSI interrupts */
2784 		(void) ddi_intr_block_enable(bgep->htable, bgep->intr_cnt);
2785 	} else {
2786 		/* Call ddi_intr_enable for MSI or FIXED interrupts */
2787 		for (i = 0; i < bgep->intr_cnt; i++) {
2788 			(void) ddi_intr_enable(bgep->htable[i]);
2789 		}
2790 	}
2791 }
2792 
2793 
2794 void
2795 bge_intr_disable(bge_t *bgep)
2796 {
2797 	int i;
2798 
2799 	if (bgep->intr_cap & DDI_INTR_FLAG_BLOCK) {
2800 		/* Call ddi_intr_block_disable() */
2801 		(void) ddi_intr_block_disable(bgep->htable, bgep->intr_cnt);
2802 	} else {
2803 		for (i = 0; i < bgep->intr_cnt; i++) {
2804 			(void) ddi_intr_disable(bgep->htable[i]);
2805 		}
2806 	}
2807 }
2808