xref: /illumos-gate/usr/src/uts/common/io/rge/rge_main.c (revision 99e6869ff7df020d73a2a959aafdf73d3e7b31ca)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 /*
22  * Copyright 2008 Sun Microsystems, Inc.  All rights reserved.
23  * Use is subject to license terms.
24  */
25 
26 #pragma ident	"%Z%%M%	%I%	%E% SMI"
27 
28 #include "rge.h"
29 
30 /*
31  * This is the string displayed by modinfo, etc.
32  * Make sure you keep the version ID up to date!
33  */
34 static char rge_ident[] = "Realtek 1Gb Ethernet v%I%";
35 
36 /*
37  * Used for buffers allocated by ddi_dma_mem_alloc()
38  */
39 static ddi_dma_attr_t dma_attr_buf = {
40 	DMA_ATTR_V0,		/* dma_attr version */
41 	(uint32_t)0,		/* dma_attr_addr_lo */
42 	(uint32_t)0xFFFFFFFF,	/* dma_attr_addr_hi */
43 	(uint32_t)0xFFFFFFFF,	/* dma_attr_count_max */
44 	(uint32_t)16,		/* dma_attr_align */
45 	0xFFFFFFFF,		/* dma_attr_burstsizes */
46 	1,			/* dma_attr_minxfer */
47 	(uint32_t)0xFFFFFFFF,	/* dma_attr_maxxfer */
48 	(uint32_t)0xFFFFFFFF,	/* dma_attr_seg */
49 	1,			/* dma_attr_sgllen */
50 	1,			/* dma_attr_granular */
51 	0,			/* dma_attr_flags */
52 };
53 
54 /*
55  * Used for BDs allocated by ddi_dma_mem_alloc()
56  */
57 static ddi_dma_attr_t dma_attr_desc = {
58 	DMA_ATTR_V0,		/* dma_attr version */
59 	(uint32_t)0,		/* dma_attr_addr_lo */
60 	(uint32_t)0xFFFFFFFF,	/* dma_attr_addr_hi */
61 	(uint32_t)0xFFFFFFFF,	/* dma_attr_count_max */
62 	(uint32_t)256,		/* dma_attr_align */
63 	0xFFFFFFFF,		/* dma_attr_burstsizes */
64 	1,			/* dma_attr_minxfer */
65 	(uint32_t)0xFFFFFFFF,	/* dma_attr_maxxfer */
66 	(uint32_t)0xFFFFFFFF,	/* dma_attr_seg */
67 	1,			/* dma_attr_sgllen */
68 	1,			/* dma_attr_granular */
69 	0,			/* dma_attr_flags */
70 };
71 
72 /*
73  * PIO access attributes for registers
74  */
75 static ddi_device_acc_attr_t rge_reg_accattr = {
76 	DDI_DEVICE_ATTR_V0,
77 	DDI_STRUCTURE_LE_ACC,
78 	DDI_STRICTORDER_ACC,
79 	DDI_DEFAULT_ACC
80 };
81 
82 /*
83  * DMA access attributes for descriptors
84  */
85 static ddi_device_acc_attr_t rge_desc_accattr = {
86 	DDI_DEVICE_ATTR_V0,
87 	DDI_NEVERSWAP_ACC,
88 	DDI_STRICTORDER_ACC,
89 	DDI_DEFAULT_ACC
90 };
91 
92 /*
93  * DMA access attributes for data
94  */
95 static ddi_device_acc_attr_t rge_buf_accattr = {
96 	DDI_DEVICE_ATTR_V0,
97 	DDI_NEVERSWAP_ACC,
98 	DDI_STRICTORDER_ACC,
99 	DDI_DEFAULT_ACC
100 };
101 
102 /*
103  * Property names
104  */
105 static char debug_propname[] = "rge_debug_flags";
106 static char mtu_propname[] = "default_mtu";
107 static char msi_propname[] = "msi_enable";
108 
109 static int		rge_m_start(void *);
110 static void		rge_m_stop(void *);
111 static int		rge_m_promisc(void *, boolean_t);
112 static int		rge_m_multicst(void *, boolean_t, const uint8_t *);
113 static int		rge_m_unicst(void *, const uint8_t *);
114 static void		rge_m_resources(void *);
115 static void		rge_m_ioctl(void *, queue_t *, mblk_t *);
116 static boolean_t	rge_m_getcapab(void *, mac_capab_t, void *);
117 
118 #define	RGE_M_CALLBACK_FLAGS	(MC_RESOURCES | MC_IOCTL | MC_GETCAPAB)
119 
120 static mac_callbacks_t rge_m_callbacks = {
121 	RGE_M_CALLBACK_FLAGS,
122 	rge_m_stat,
123 	rge_m_start,
124 	rge_m_stop,
125 	rge_m_promisc,
126 	rge_m_multicst,
127 	rge_m_unicst,
128 	rge_m_tx,
129 	rge_m_resources,
130 	rge_m_ioctl,
131 	rge_m_getcapab
132 };
133 
134 /*
135  * Allocate an area of memory and a DMA handle for accessing it
136  */
137 static int
138 rge_alloc_dma_mem(rge_t *rgep, size_t memsize, ddi_dma_attr_t *dma_attr_p,
139 	ddi_device_acc_attr_t *acc_attr_p, uint_t dma_flags, dma_area_t *dma_p)
140 {
141 	caddr_t vaddr;
142 	int err;
143 
144 	/*
145 	 * Allocate handle
146 	 */
147 	err = ddi_dma_alloc_handle(rgep->devinfo, dma_attr_p,
148 	    DDI_DMA_SLEEP, NULL, &dma_p->dma_hdl);
149 	if (err != DDI_SUCCESS) {
150 		dma_p->dma_hdl = NULL;
151 		return (DDI_FAILURE);
152 	}
153 
154 	/*
155 	 * Allocate memory
156 	 */
157 	err = ddi_dma_mem_alloc(dma_p->dma_hdl, memsize, acc_attr_p,
158 	    dma_flags & (DDI_DMA_CONSISTENT | DDI_DMA_STREAMING),
159 	    DDI_DMA_SLEEP, NULL, &vaddr, &dma_p->alength, &dma_p->acc_hdl);
160 	if (err != DDI_SUCCESS) {
161 		ddi_dma_free_handle(&dma_p->dma_hdl);
162 		dma_p->dma_hdl = NULL;
163 		dma_p->acc_hdl = NULL;
164 		return (DDI_FAILURE);
165 	}
166 
167 	/*
168 	 * Bind the two together
169 	 */
170 	dma_p->mem_va = vaddr;
171 	err = ddi_dma_addr_bind_handle(dma_p->dma_hdl, NULL,
172 	    vaddr, dma_p->alength, dma_flags, DDI_DMA_SLEEP, NULL,
173 	    &dma_p->cookie, &dma_p->ncookies);
174 	if (err != DDI_DMA_MAPPED || dma_p->ncookies != 1) {
175 		ddi_dma_mem_free(&dma_p->acc_hdl);
176 		ddi_dma_free_handle(&dma_p->dma_hdl);
177 		dma_p->acc_hdl = NULL;
178 		dma_p->dma_hdl = NULL;
179 		return (DDI_FAILURE);
180 	}
181 
182 	dma_p->nslots = ~0U;
183 	dma_p->size = ~0U;
184 	dma_p->token = ~0U;
185 	dma_p->offset = 0;
186 	return (DDI_SUCCESS);
187 }
188 
189 /*
190  * Free one allocated area of DMAable memory
191  */
192 static void
193 rge_free_dma_mem(dma_area_t *dma_p)
194 {
195 	if (dma_p->dma_hdl != NULL) {
196 		if (dma_p->ncookies) {
197 			(void) ddi_dma_unbind_handle(dma_p->dma_hdl);
198 			dma_p->ncookies = 0;
199 		}
200 		ddi_dma_free_handle(&dma_p->dma_hdl);
201 		dma_p->dma_hdl = NULL;
202 	}
203 
204 	if (dma_p->acc_hdl != NULL) {
205 		ddi_dma_mem_free(&dma_p->acc_hdl);
206 		dma_p->acc_hdl = NULL;
207 	}
208 }
209 
210 /*
211  * Utility routine to carve a slice off a chunk of allocated memory,
212  * updating the chunk descriptor accordingly.  The size of the slice
213  * is given by the product of the <qty> and <size> parameters.
214  */
215 static void
216 rge_slice_chunk(dma_area_t *slice, dma_area_t *chunk,
217 	uint32_t qty, uint32_t size)
218 {
219 	static uint32_t sequence = 0xbcd5704a;
220 	size_t totsize;
221 
222 	totsize = qty*size;
223 	ASSERT(size >= 0);
224 	ASSERT(totsize <= chunk->alength);
225 
226 	*slice = *chunk;
227 	slice->nslots = qty;
228 	slice->size = size;
229 	slice->alength = totsize;
230 	slice->token = ++sequence;
231 
232 	chunk->mem_va = (caddr_t)chunk->mem_va + totsize;
233 	chunk->alength -= totsize;
234 	chunk->offset += totsize;
235 	chunk->cookie.dmac_laddress += totsize;
236 	chunk->cookie.dmac_size -= totsize;
237 }
238 
239 static int
240 rge_alloc_bufs(rge_t *rgep)
241 {
242 	size_t txdescsize;
243 	size_t rxdescsize;
244 	int err;
245 
246 	/*
247 	 * Allocate memory & handle for packet statistics
248 	 */
249 	err = rge_alloc_dma_mem(rgep,
250 	    RGE_STATS_DUMP_SIZE,
251 	    &dma_attr_desc,
252 	    &rge_desc_accattr,
253 	    DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
254 	    &rgep->dma_area_stats);
255 	if (err != DDI_SUCCESS)
256 		return (DDI_FAILURE);
257 	rgep->hw_stats = DMA_VPTR(rgep->dma_area_stats);
258 
259 	/*
260 	 * Allocate memory & handle for Tx descriptor ring
261 	 */
262 	txdescsize = RGE_SEND_SLOTS * sizeof (rge_bd_t);
263 	err = rge_alloc_dma_mem(rgep,
264 	    txdescsize,
265 	    &dma_attr_desc,
266 	    &rge_desc_accattr,
267 	    DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
268 	    &rgep->dma_area_txdesc);
269 	if (err != DDI_SUCCESS)
270 		return (DDI_FAILURE);
271 
272 	/*
273 	 * Allocate memory & handle for Rx descriptor ring
274 	 */
275 	rxdescsize = RGE_RECV_SLOTS * sizeof (rge_bd_t);
276 	err = rge_alloc_dma_mem(rgep,
277 	    rxdescsize,
278 	    &dma_attr_desc,
279 	    &rge_desc_accattr,
280 	    DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
281 	    &rgep->dma_area_rxdesc);
282 	if (err != DDI_SUCCESS)
283 		return (DDI_FAILURE);
284 
285 	return (DDI_SUCCESS);
286 }
287 
288 /*
289  * rge_free_bufs() -- free descriptors/buffers allocated for this
290  * device instance.
291  */
292 static void
293 rge_free_bufs(rge_t *rgep)
294 {
295 	rge_free_dma_mem(&rgep->dma_area_stats);
296 	rge_free_dma_mem(&rgep->dma_area_txdesc);
297 	rge_free_dma_mem(&rgep->dma_area_rxdesc);
298 }
299 
300 /*
301  * ========== Transmit and receive ring reinitialisation ==========
302  */
303 
304 /*
305  * These <reinit> routines each reset the rx/tx rings to an initial
306  * state, assuming that the corresponding <init> routine has already
307  * been called exactly once.
308  */
309 static void
310 rge_reinit_send_ring(rge_t *rgep)
311 {
312 	sw_sbd_t *ssbdp;
313 	rge_bd_t *bdp;
314 	uint32_t slot;
315 
316 	/*
317 	 * re-init send ring
318 	 */
319 	DMA_ZERO(rgep->tx_desc);
320 	ssbdp = rgep->sw_sbds;
321 	bdp = rgep->tx_ring;
322 	for (slot = 0; slot < RGE_SEND_SLOTS; slot++) {
323 		bdp->host_buf_addr =
324 		    RGE_BSWAP_32(ssbdp->pbuf.cookie.dmac_laddress);
325 		bdp->host_buf_addr_hi =
326 		    RGE_BSWAP_32(ssbdp->pbuf.cookie.dmac_laddress >> 32);
327 		/* last BD in Tx ring */
328 		if (slot == (RGE_SEND_SLOTS - 1))
329 			bdp->flags_len = RGE_BSWAP_32(BD_FLAG_EOR);
330 		ssbdp++;
331 		bdp++;
332 	}
333 	DMA_SYNC(rgep->tx_desc, DDI_DMA_SYNC_FORDEV);
334 	rgep->tx_next = 0;
335 	rgep->tc_next = 0;
336 	rgep->tc_tail = 0;
337 	rgep->tx_flow = 0;
338 	rgep->tx_free = RGE_SEND_SLOTS;
339 }
340 
341 static void
342 rge_reinit_recv_ring(rge_t *rgep)
343 {
344 	rge_bd_t *bdp;
345 	sw_rbd_t *srbdp;
346 	dma_area_t *pbuf;
347 	uint32_t slot;
348 
349 	/*
350 	 * re-init receive ring
351 	 */
352 	DMA_ZERO(rgep->rx_desc);
353 	srbdp = rgep->sw_rbds;
354 	bdp = rgep->rx_ring;
355 	for (slot = 0; slot < RGE_RECV_SLOTS; slot++) {
356 		pbuf = &srbdp->rx_buf->pbuf;
357 		bdp->host_buf_addr =
358 		    RGE_BSWAP_32(pbuf->cookie.dmac_laddress + rgep->head_room);
359 		bdp->host_buf_addr_hi =
360 		    RGE_BSWAP_32(pbuf->cookie.dmac_laddress >> 32);
361 		bdp->flags_len = RGE_BSWAP_32(BD_FLAG_HW_OWN |
362 		    (rgep->rxbuf_size - rgep->head_room));
363 		/* last BD in Tx ring */
364 		if (slot == (RGE_RECV_SLOTS - 1))
365 			bdp->flags_len |= RGE_BSWAP_32(BD_FLAG_EOR);
366 		srbdp++;
367 		bdp++;
368 	}
369 	DMA_SYNC(rgep->rx_desc, DDI_DMA_SYNC_FORDEV);
370 	rgep->watchdog = 0;
371 	rgep->rx_next = 0;
372 }
373 
374 static void
375 rge_reinit_buf_ring(rge_t *rgep)
376 {
377 
378 	if (rgep->chip_flags & CHIP_FLAG_FORCE_BCOPY)
379 		return;
380 
381 	/*
382 	 * If all the up-sending buffers haven't been returned to driver,
383 	 * use bcopy() only in rx process.
384 	 */
385 	if (rgep->rx_free != RGE_BUF_SLOTS)
386 		rgep->rx_bcopy = B_TRUE;
387 }
388 
389 static void
390 rge_reinit_rings(rge_t *rgep)
391 {
392 	rge_reinit_send_ring(rgep);
393 	rge_reinit_recv_ring(rgep);
394 	rge_reinit_buf_ring(rgep);
395 }
396 
397 static void
398 rge_fini_send_ring(rge_t *rgep)
399 {
400 	sw_sbd_t *ssbdp;
401 	uint32_t slot;
402 
403 	ssbdp = rgep->sw_sbds;
404 	for (slot = 0; slot < RGE_SEND_SLOTS; ++slot) {
405 		rge_free_dma_mem(&ssbdp->pbuf);
406 		ssbdp++;
407 	}
408 
409 	kmem_free(rgep->sw_sbds, RGE_SEND_SLOTS * sizeof (sw_sbd_t));
410 	rgep->sw_sbds = NULL;
411 }
412 
413 static void
414 rge_fini_recv_ring(rge_t *rgep)
415 {
416 	sw_rbd_t *srbdp;
417 	uint32_t slot;
418 
419 	srbdp = rgep->sw_rbds;
420 	for (slot = 0; slot < RGE_RECV_SLOTS; ++srbdp, ++slot) {
421 		if (srbdp->rx_buf) {
422 			if (srbdp->rx_buf->mp != NULL) {
423 				freemsg(srbdp->rx_buf->mp);
424 				srbdp->rx_buf->mp = NULL;
425 			}
426 			rge_free_dma_mem(&srbdp->rx_buf->pbuf);
427 			kmem_free(srbdp->rx_buf, sizeof (dma_buf_t));
428 			srbdp->rx_buf = NULL;
429 		}
430 	}
431 
432 	kmem_free(rgep->sw_rbds, RGE_RECV_SLOTS * sizeof (sw_rbd_t));
433 	rgep->sw_rbds = NULL;
434 }
435 
436 static void
437 rge_fini_buf_ring(rge_t *rgep)
438 {
439 	sw_rbd_t *srbdp;
440 	uint32_t slot;
441 
442 	if (rgep->chip_flags & CHIP_FLAG_FORCE_BCOPY)
443 		return;
444 
445 	ASSERT(rgep->rx_free == RGE_BUF_SLOTS);
446 
447 	srbdp = rgep->free_srbds;
448 	for (slot = 0; slot < RGE_BUF_SLOTS; ++srbdp, ++slot) {
449 		if (srbdp->rx_buf != NULL) {
450 			if (srbdp->rx_buf->mp != NULL) {
451 				freemsg(srbdp->rx_buf->mp);
452 				srbdp->rx_buf->mp = NULL;
453 			}
454 			rge_free_dma_mem(&srbdp->rx_buf->pbuf);
455 			kmem_free(srbdp->rx_buf, sizeof (dma_buf_t));
456 			srbdp->rx_buf = NULL;
457 		}
458 	}
459 
460 	kmem_free(rgep->free_srbds, RGE_BUF_SLOTS * sizeof (sw_rbd_t));
461 	rgep->free_srbds = NULL;
462 }
463 
464 static void
465 rge_fini_rings(rge_t *rgep)
466 {
467 	rge_fini_send_ring(rgep);
468 	rge_fini_recv_ring(rgep);
469 	rge_fini_buf_ring(rgep);
470 }
471 
472 static int
473 rge_init_send_ring(rge_t *rgep)
474 {
475 	uint32_t slot;
476 	sw_sbd_t *ssbdp;
477 	dma_area_t *pbuf;
478 	dma_area_t desc;
479 	int err;
480 
481 	/*
482 	 * Allocate the array of s/w Tx Buffer Descriptors
483 	 */
484 	ssbdp = kmem_zalloc(RGE_SEND_SLOTS*sizeof (*ssbdp), KM_SLEEP);
485 	rgep->sw_sbds = ssbdp;
486 
487 	/*
488 	 * Init send ring
489 	 */
490 	rgep->tx_desc = rgep->dma_area_txdesc;
491 	DMA_ZERO(rgep->tx_desc);
492 	rgep->tx_ring = rgep->tx_desc.mem_va;
493 
494 	desc = rgep->tx_desc;
495 	for (slot = 0; slot < RGE_SEND_SLOTS; slot++) {
496 		rge_slice_chunk(&ssbdp->desc, &desc, 1, sizeof (rge_bd_t));
497 
498 		/*
499 		 * Allocate memory & handle for Tx buffers
500 		 */
501 		pbuf = &ssbdp->pbuf;
502 		err = rge_alloc_dma_mem(rgep, rgep->txbuf_size,
503 		    &dma_attr_buf, &rge_buf_accattr,
504 		    DDI_DMA_WRITE | DDI_DMA_STREAMING, pbuf);
505 		if (err != DDI_SUCCESS) {
506 			rge_error(rgep,
507 			    "rge_init_send_ring: alloc tx buffer failed");
508 			rge_fini_send_ring(rgep);
509 			return (DDI_FAILURE);
510 		}
511 		ssbdp++;
512 	}
513 	ASSERT(desc.alength == 0);
514 
515 	DMA_SYNC(rgep->tx_desc, DDI_DMA_SYNC_FORDEV);
516 	return (DDI_SUCCESS);
517 }
518 
519 static int
520 rge_init_recv_ring(rge_t *rgep)
521 {
522 	uint32_t slot;
523 	sw_rbd_t *srbdp;
524 	dma_buf_t *rx_buf;
525 	dma_area_t *pbuf;
526 	int err;
527 
528 	/*
529 	 * Allocate the array of s/w Rx Buffer Descriptors
530 	 */
531 	srbdp = kmem_zalloc(RGE_RECV_SLOTS*sizeof (*srbdp), KM_SLEEP);
532 	rgep->sw_rbds = srbdp;
533 
534 	/*
535 	 * Init receive ring
536 	 */
537 	rgep->rx_next = 0;
538 	rgep->rx_desc = rgep->dma_area_rxdesc;
539 	DMA_ZERO(rgep->rx_desc);
540 	rgep->rx_ring = rgep->rx_desc.mem_va;
541 
542 	for (slot = 0; slot < RGE_RECV_SLOTS; slot++) {
543 		srbdp->rx_buf = rx_buf =
544 		    kmem_zalloc(sizeof (dma_buf_t), KM_SLEEP);
545 
546 		/*
547 		 * Allocate memory & handle for Rx buffers
548 		 */
549 		pbuf = &rx_buf->pbuf;
550 		err = rge_alloc_dma_mem(rgep, rgep->rxbuf_size,
551 		    &dma_attr_buf, &rge_buf_accattr,
552 		    DDI_DMA_READ | DDI_DMA_STREAMING, pbuf);
553 		if (err != DDI_SUCCESS) {
554 			rge_fini_recv_ring(rgep);
555 			rge_error(rgep,
556 			    "rge_init_recv_ring: alloc rx buffer failed");
557 			return (DDI_FAILURE);
558 		}
559 
560 		pbuf->alength -= rgep->head_room;
561 		pbuf->offset += rgep->head_room;
562 		if (!(rgep->chip_flags & CHIP_FLAG_FORCE_BCOPY)) {
563 			rx_buf->rx_recycle.free_func = rge_rx_recycle;
564 			rx_buf->rx_recycle.free_arg = (caddr_t)rx_buf;
565 			rx_buf->private = (caddr_t)rgep;
566 			rx_buf->mp = desballoc(DMA_VPTR(rx_buf->pbuf),
567 			    rgep->rxbuf_size, 0, &rx_buf->rx_recycle);
568 			if (rx_buf->mp == NULL) {
569 				rge_fini_recv_ring(rgep);
570 				rge_problem(rgep,
571 				    "rge_init_recv_ring: desballoc() failed");
572 				return (DDI_FAILURE);
573 			}
574 		}
575 		srbdp++;
576 	}
577 	DMA_SYNC(rgep->rx_desc, DDI_DMA_SYNC_FORDEV);
578 	return (DDI_SUCCESS);
579 }
580 
581 static int
582 rge_init_buf_ring(rge_t *rgep)
583 {
584 	uint32_t slot;
585 	sw_rbd_t *free_srbdp;
586 	dma_buf_t *rx_buf;
587 	dma_area_t *pbuf;
588 	int err;
589 
590 	if (rgep->chip_flags & CHIP_FLAG_FORCE_BCOPY) {
591 		rgep->rx_bcopy = B_TRUE;
592 		return (DDI_SUCCESS);
593 	}
594 
595 	/*
596 	 * Allocate the array of s/w free Buffer Descriptors
597 	 */
598 	free_srbdp = kmem_zalloc(RGE_BUF_SLOTS*sizeof (*free_srbdp), KM_SLEEP);
599 	rgep->free_srbds = free_srbdp;
600 
601 	/*
602 	 * Init free buffer ring
603 	 */
604 	rgep->rc_next = 0;
605 	rgep->rf_next = 0;
606 	rgep->rx_bcopy = B_FALSE;
607 	rgep->rx_free = RGE_BUF_SLOTS;
608 	for (slot = 0; slot < RGE_BUF_SLOTS; slot++) {
609 		free_srbdp->rx_buf = rx_buf =
610 		    kmem_zalloc(sizeof (dma_buf_t), KM_SLEEP);
611 
612 		/*
613 		 * Allocate memory & handle for free Rx buffers
614 		 */
615 		pbuf = &rx_buf->pbuf;
616 		err = rge_alloc_dma_mem(rgep, rgep->rxbuf_size,
617 		    &dma_attr_buf, &rge_buf_accattr,
618 		    DDI_DMA_READ | DDI_DMA_STREAMING, pbuf);
619 		if (err != DDI_SUCCESS) {
620 			rge_fini_buf_ring(rgep);
621 			rge_error(rgep,
622 			    "rge_init_buf_ring: alloc rx free buffer failed");
623 			return (DDI_FAILURE);
624 		}
625 		pbuf->alength -= rgep->head_room;
626 		pbuf->offset += rgep->head_room;
627 		rx_buf->rx_recycle.free_func = rge_rx_recycle;
628 		rx_buf->rx_recycle.free_arg = (caddr_t)rx_buf;
629 		rx_buf->private = (caddr_t)rgep;
630 		rx_buf->mp = desballoc(DMA_VPTR(rx_buf->pbuf),
631 		    rgep->rxbuf_size, 0, &rx_buf->rx_recycle);
632 		if (rx_buf->mp == NULL) {
633 			rge_fini_buf_ring(rgep);
634 			rge_problem(rgep,
635 			    "rge_init_buf_ring: desballoc() failed");
636 			return (DDI_FAILURE);
637 		}
638 		free_srbdp++;
639 	}
640 	return (DDI_SUCCESS);
641 }
642 
643 static int
644 rge_init_rings(rge_t *rgep)
645 {
646 	int err;
647 
648 	err = rge_init_send_ring(rgep);
649 	if (err != DDI_SUCCESS)
650 		return (DDI_FAILURE);
651 
652 	err = rge_init_recv_ring(rgep);
653 	if (err != DDI_SUCCESS) {
654 		rge_fini_send_ring(rgep);
655 		return (DDI_FAILURE);
656 	}
657 
658 	err = rge_init_buf_ring(rgep);
659 	if (err != DDI_SUCCESS) {
660 		rge_fini_send_ring(rgep);
661 		rge_fini_recv_ring(rgep);
662 		return (DDI_FAILURE);
663 	}
664 
665 	return (DDI_SUCCESS);
666 }
667 
668 /*
669  * ========== Internal state management entry points ==========
670  */
671 
672 #undef	RGE_DBG
673 #define	RGE_DBG		RGE_DBG_NEMO	/* debug flag for this code	*/
674 
675 /*
676  * These routines provide all the functionality required by the
677  * corresponding MAC layer entry points, but don't update the
678  * MAC state so they can be called internally without disturbing
679  * our record of what NEMO thinks we should be doing ...
680  */
681 
682 /*
683  *	rge_reset() -- reset h/w & rings to initial state
684  */
685 static void
686 rge_reset(rge_t *rgep)
687 {
688 	ASSERT(mutex_owned(rgep->genlock));
689 
690 	/*
691 	 * Grab all the other mutexes in the world (this should
692 	 * ensure no other threads are manipulating driver state)
693 	 */
694 	mutex_enter(rgep->rx_lock);
695 	mutex_enter(rgep->rc_lock);
696 	rw_enter(rgep->errlock, RW_WRITER);
697 
698 	(void) rge_chip_reset(rgep);
699 	rge_reinit_rings(rgep);
700 	rge_chip_init(rgep);
701 
702 	/*
703 	 * Free the world ...
704 	 */
705 	rw_exit(rgep->errlock);
706 	mutex_exit(rgep->rc_lock);
707 	mutex_exit(rgep->rx_lock);
708 
709 	rgep->stats.rpackets = 0;
710 	rgep->stats.rbytes = 0;
711 	rgep->stats.opackets = 0;
712 	rgep->stats.obytes = 0;
713 	rgep->stats.tx_pre_ismax = B_FALSE;
714 	rgep->stats.tx_cur_ismax = B_FALSE;
715 
716 	RGE_DEBUG(("rge_reset($%p) done", (void *)rgep));
717 }
718 
719 /*
720  *	rge_stop() -- stop processing, don't reset h/w or rings
721  */
722 static void
723 rge_stop(rge_t *rgep)
724 {
725 	ASSERT(mutex_owned(rgep->genlock));
726 
727 	rge_chip_stop(rgep, B_FALSE);
728 
729 	RGE_DEBUG(("rge_stop($%p) done", (void *)rgep));
730 }
731 
732 /*
733  *	rge_start() -- start transmitting/receiving
734  */
735 static void
736 rge_start(rge_t *rgep)
737 {
738 	ASSERT(mutex_owned(rgep->genlock));
739 
740 	/*
741 	 * Start chip processing, including enabling interrupts
742 	 */
743 	rge_chip_start(rgep);
744 	rgep->watchdog = 0;
745 }
746 
747 /*
748  * rge_restart - restart transmitting/receiving after error or suspend
749  */
750 void
751 rge_restart(rge_t *rgep)
752 {
753 	uint32_t i;
754 
755 	ASSERT(mutex_owned(rgep->genlock));
756 	/*
757 	 * Wait for posted buffer to be freed...
758 	 */
759 	if (!rgep->rx_bcopy) {
760 		for (i = 0; i < RXBUFF_FREE_LOOP; i++) {
761 			if (rgep->rx_free == RGE_BUF_SLOTS)
762 				break;
763 			drv_usecwait(1000);
764 			RGE_DEBUG(("rge_restart: waiting for rx buf free..."));
765 		}
766 	}
767 	rge_reset(rgep);
768 	rgep->stats.chip_reset++;
769 	if (rgep->rge_mac_state == RGE_MAC_STARTED) {
770 		rge_start(rgep);
771 		rgep->resched_needed = B_TRUE;
772 		(void) ddi_intr_trigger_softint(rgep->resched_hdl, NULL);
773 	}
774 }
775 
776 
777 /*
778  * ========== Nemo-required management entry points ==========
779  */
780 
781 #undef	RGE_DBG
782 #define	RGE_DBG		RGE_DBG_NEMO	/* debug flag for this code	*/
783 
784 /*
785  *	rge_m_stop() -- stop transmitting/receiving
786  */
787 static void
788 rge_m_stop(void *arg)
789 {
790 	rge_t *rgep = arg;		/* private device info	*/
791 	uint32_t i;
792 
793 	/*
794 	 * Just stop processing, then record new MAC state
795 	 */
796 	mutex_enter(rgep->genlock);
797 	if (rgep->suspended) {
798 		ASSERT(rgep->rge_mac_state == RGE_MAC_STOPPED);
799 		mutex_exit(rgep->genlock);
800 		return;
801 	}
802 	rge_stop(rgep);
803 	/*
804 	 * Wait for posted buffer to be freed...
805 	 */
806 	if (!rgep->rx_bcopy) {
807 		for (i = 0; i < RXBUFF_FREE_LOOP; i++) {
808 			if (rgep->rx_free == RGE_BUF_SLOTS)
809 				break;
810 			drv_usecwait(1000);
811 			RGE_DEBUG(("rge_m_stop: waiting for rx buf free..."));
812 		}
813 	}
814 	rgep->rge_mac_state = RGE_MAC_STOPPED;
815 	RGE_DEBUG(("rge_m_stop($%p) done", arg));
816 	mutex_exit(rgep->genlock);
817 }
818 
819 /*
820  *	rge_m_start() -- start transmitting/receiving
821  */
822 static int
823 rge_m_start(void *arg)
824 {
825 	rge_t *rgep = arg;		/* private device info	*/
826 
827 	mutex_enter(rgep->genlock);
828 	if (rgep->suspended) {
829 		mutex_exit(rgep->genlock);
830 		return (DDI_FAILURE);
831 	}
832 	/*
833 	 * Clear hw/sw statistics
834 	 */
835 	DMA_ZERO(rgep->dma_area_stats);
836 	bzero(&rgep->stats, sizeof (rge_stats_t));
837 
838 	/*
839 	 * Start processing and record new MAC state
840 	 */
841 	rge_reset(rgep);
842 	rge_start(rgep);
843 	rgep->rge_mac_state = RGE_MAC_STARTED;
844 	RGE_DEBUG(("rge_m_start($%p) done", arg));
845 
846 	mutex_exit(rgep->genlock);
847 
848 	return (0);
849 }
850 
851 /*
852  *	rge_m_unicst_set() -- set the physical network address
853  */
854 static int
855 rge_m_unicst(void *arg, const uint8_t *macaddr)
856 {
857 	rge_t *rgep = arg;		/* private device info	*/
858 
859 	/*
860 	 * Remember the new current address in the driver state
861 	 * Sync the chip's idea of the address too ...
862 	 */
863 	mutex_enter(rgep->genlock);
864 	bcopy(macaddr, rgep->netaddr, ETHERADDRL);
865 
866 	if (rgep->suspended) {
867 		mutex_exit(rgep->genlock);
868 		return (DDI_SUCCESS);
869 	}
870 
871 	rge_chip_sync(rgep, RGE_SET_MAC);
872 	mutex_exit(rgep->genlock);
873 
874 	return (0);
875 }
876 
877 /*
878  * Compute the index of the required bit in the multicast hash map.
879  * This must mirror the way the hardware actually does it!
880  */
881 static uint32_t
882 rge_hash_index(const uint8_t *mca)
883 {
884 	uint32_t crc = (uint32_t)RGE_HASH_CRC;
885 	uint32_t const POLY = RGE_HASH_POLY;
886 	uint32_t msb;
887 	int bytes;
888 	uchar_t currentbyte;
889 	uint32_t index;
890 	int bit;
891 
892 	for (bytes = 0; bytes < ETHERADDRL; bytes++) {
893 		currentbyte = mca[bytes];
894 		for (bit = 0; bit < 8; bit++) {
895 			msb = crc >> 31;
896 			crc <<= 1;
897 			if (msb ^ (currentbyte & 1))
898 				crc ^= POLY;
899 			currentbyte >>= 1;
900 		}
901 	}
902 	index = crc >> 26;
903 		/* the index value is between 0 and 63(0x3f) */
904 
905 	return (index);
906 }
907 
908 /*
909  *	rge_m_multicst_add() -- enable/disable a multicast address
910  */
911 static int
912 rge_m_multicst(void *arg, boolean_t add, const uint8_t *mca)
913 {
914 	rge_t *rgep = arg;		/* private device info	*/
915 	struct ether_addr *addr;
916 	uint32_t index;
917 	uint32_t reg;
918 	uint8_t *hashp;
919 
920 	mutex_enter(rgep->genlock);
921 	hashp = rgep->mcast_hash;
922 	addr = (struct ether_addr *)mca;
923 	/*
924 	 * Calculate the Multicast address hash index value
925 	 *	Normally, the position of MAR0-MAR7 is
926 	 *	MAR0: offset 0x08, ..., MAR7: offset 0x0F.
927 	 *
928 	 *	For pcie chipset, the position of MAR0-MAR7 is
929 	 *	different from others:
930 	 *	MAR0: offset 0x0F, ..., MAR7: offset 0x08.
931 	 */
932 	index = rge_hash_index(addr->ether_addr_octet);
933 	if (rgep->chipid.is_pcie)
934 		reg = (~(index / RGE_MCAST_NUM)) & 0x7;
935 	else
936 		reg = index / RGE_MCAST_NUM;
937 
938 	if (add) {
939 		if (rgep->mcast_refs[index]++) {
940 			mutex_exit(rgep->genlock);
941 			return (0);
942 		}
943 		hashp[reg] |= 1 << (index % RGE_MCAST_NUM);
944 	} else {
945 		if (--rgep->mcast_refs[index]) {
946 			mutex_exit(rgep->genlock);
947 			return (0);
948 		}
949 		hashp[reg] &= ~ (1 << (index % RGE_MCAST_NUM));
950 	}
951 
952 	if (rgep->suspended) {
953 		mutex_exit(rgep->genlock);
954 		return (DDI_SUCCESS);
955 	}
956 
957 	/*
958 	 * Set multicast register
959 	 */
960 	rge_chip_sync(rgep, RGE_SET_MUL);
961 
962 	mutex_exit(rgep->genlock);
963 	return (0);
964 }
965 
966 /*
967  * rge_m_promisc() -- set or reset promiscuous mode on the board
968  *
969  *	Program the hardware to enable/disable promiscuous and/or
970  *	receive-all-multicast modes.
971  */
972 static int
973 rge_m_promisc(void *arg, boolean_t on)
974 {
975 	rge_t *rgep = arg;
976 
977 	/*
978 	 * Store MAC layer specified mode and pass to chip layer to update h/w
979 	 */
980 	mutex_enter(rgep->genlock);
981 
982 	if (rgep->promisc == on) {
983 		mutex_exit(rgep->genlock);
984 		return (0);
985 	}
986 	rgep->promisc = on;
987 
988 	if (rgep->suspended) {
989 		mutex_exit(rgep->genlock);
990 		return (DDI_SUCCESS);
991 	}
992 
993 	rge_chip_sync(rgep, RGE_SET_PROMISC);
994 	RGE_DEBUG(("rge_m_promisc_set($%p) done", arg));
995 	mutex_exit(rgep->genlock);
996 	return (0);
997 }
998 
999 /*
1000  * Loopback ioctl code
1001  */
1002 
1003 static lb_property_t loopmodes[] = {
1004 	{ normal,	"normal",	RGE_LOOP_NONE		},
1005 	{ internal,	"PHY",		RGE_LOOP_INTERNAL_PHY	},
1006 	{ internal,	"MAC",		RGE_LOOP_INTERNAL_MAC	}
1007 };
1008 
1009 static enum ioc_reply
1010 rge_set_loop_mode(rge_t *rgep, uint32_t mode)
1011 {
1012 	/*
1013 	 * If the mode isn't being changed, there's nothing to do ...
1014 	 */
1015 	if (mode == rgep->param_loop_mode)
1016 		return (IOC_ACK);
1017 
1018 	/*
1019 	 * Validate the requested mode and prepare a suitable message
1020 	 * to explain the link down/up cycle that the change will
1021 	 * probably induce ...
1022 	 */
1023 	switch (mode) {
1024 	default:
1025 		return (IOC_INVAL);
1026 
1027 	case RGE_LOOP_NONE:
1028 	case RGE_LOOP_INTERNAL_PHY:
1029 	case RGE_LOOP_INTERNAL_MAC:
1030 		break;
1031 	}
1032 
1033 	/*
1034 	 * All OK; tell the caller to reprogram
1035 	 * the PHY and/or MAC for the new mode ...
1036 	 */
1037 	rgep->param_loop_mode = mode;
1038 	return (IOC_RESTART_ACK);
1039 }
1040 
1041 static enum ioc_reply
1042 rge_loop_ioctl(rge_t *rgep, queue_t *wq, mblk_t *mp, struct iocblk *iocp)
1043 {
1044 	lb_info_sz_t *lbsp;
1045 	lb_property_t *lbpp;
1046 	uint32_t *lbmp;
1047 	int cmd;
1048 
1049 	_NOTE(ARGUNUSED(wq))
1050 
1051 	/*
1052 	 * Validate format of ioctl
1053 	 */
1054 	if (mp->b_cont == NULL)
1055 		return (IOC_INVAL);
1056 
1057 	cmd = iocp->ioc_cmd;
1058 	switch (cmd) {
1059 	default:
1060 		/* NOTREACHED */
1061 		rge_error(rgep, "rge_loop_ioctl: invalid cmd 0x%x", cmd);
1062 		return (IOC_INVAL);
1063 
1064 	case LB_GET_INFO_SIZE:
1065 		if (iocp->ioc_count != sizeof (lb_info_sz_t))
1066 			return (IOC_INVAL);
1067 		lbsp = (lb_info_sz_t *)mp->b_cont->b_rptr;
1068 		*lbsp = sizeof (loopmodes);
1069 		return (IOC_REPLY);
1070 
1071 	case LB_GET_INFO:
1072 		if (iocp->ioc_count != sizeof (loopmodes))
1073 			return (IOC_INVAL);
1074 		lbpp = (lb_property_t *)mp->b_cont->b_rptr;
1075 		bcopy(loopmodes, lbpp, sizeof (loopmodes));
1076 		return (IOC_REPLY);
1077 
1078 	case LB_GET_MODE:
1079 		if (iocp->ioc_count != sizeof (uint32_t))
1080 			return (IOC_INVAL);
1081 		lbmp = (uint32_t *)mp->b_cont->b_rptr;
1082 		*lbmp = rgep->param_loop_mode;
1083 		return (IOC_REPLY);
1084 
1085 	case LB_SET_MODE:
1086 		if (iocp->ioc_count != sizeof (uint32_t))
1087 			return (IOC_INVAL);
1088 		lbmp = (uint32_t *)mp->b_cont->b_rptr;
1089 		return (rge_set_loop_mode(rgep, *lbmp));
1090 	}
1091 }
1092 
1093 /*
1094  * Specific rge IOCTLs, the MAC layer handles the generic ones.
1095  */
1096 static void
1097 rge_m_ioctl(void *arg, queue_t *wq, mblk_t *mp)
1098 {
1099 	rge_t *rgep = arg;
1100 	struct iocblk *iocp;
1101 	enum ioc_reply status;
1102 	boolean_t need_privilege;
1103 	int err;
1104 	int cmd;
1105 
1106 	/*
1107 	 * If suspended, we might actually be able to do some of
1108 	 * these ioctls, but it is harder to make sure they occur
1109 	 * without actually putting the hardware in an undesireable
1110 	 * state.  So just NAK it.
1111 	 */
1112 	mutex_enter(rgep->genlock);
1113 	if (rgep->suspended) {
1114 		miocnak(wq, mp, 0, EINVAL);
1115 		mutex_exit(rgep->genlock);
1116 		return;
1117 	}
1118 	mutex_exit(rgep->genlock);
1119 
1120 	/*
1121 	 * Validate the command before bothering with the mutex ...
1122 	 */
1123 	iocp = (struct iocblk *)mp->b_rptr;
1124 	iocp->ioc_error = 0;
1125 	need_privilege = B_TRUE;
1126 	cmd = iocp->ioc_cmd;
1127 	switch (cmd) {
1128 	default:
1129 		miocnak(wq, mp, 0, EINVAL);
1130 		return;
1131 
1132 	case RGE_MII_READ:
1133 	case RGE_MII_WRITE:
1134 	case RGE_DIAG:
1135 	case RGE_PEEK:
1136 	case RGE_POKE:
1137 	case RGE_PHY_RESET:
1138 	case RGE_SOFT_RESET:
1139 	case RGE_HARD_RESET:
1140 		break;
1141 
1142 	case LB_GET_INFO_SIZE:
1143 	case LB_GET_INFO:
1144 	case LB_GET_MODE:
1145 		need_privilege = B_FALSE;
1146 		/* FALLTHRU */
1147 	case LB_SET_MODE:
1148 		break;
1149 
1150 	case ND_GET:
1151 		need_privilege = B_FALSE;
1152 		/* FALLTHRU */
1153 	case ND_SET:
1154 		break;
1155 	}
1156 
1157 	if (need_privilege) {
1158 		/*
1159 		 * Check for specific net_config privilege
1160 		 */
1161 		err = secpolicy_net_config(iocp->ioc_cr, B_FALSE);
1162 		if (err != 0) {
1163 			miocnak(wq, mp, 0, err);
1164 			return;
1165 		}
1166 	}
1167 
1168 	mutex_enter(rgep->genlock);
1169 
1170 	switch (cmd) {
1171 	default:
1172 		_NOTE(NOTREACHED)
1173 		status = IOC_INVAL;
1174 		break;
1175 
1176 	case RGE_MII_READ:
1177 	case RGE_MII_WRITE:
1178 	case RGE_DIAG:
1179 	case RGE_PEEK:
1180 	case RGE_POKE:
1181 	case RGE_PHY_RESET:
1182 	case RGE_SOFT_RESET:
1183 	case RGE_HARD_RESET:
1184 		status = rge_chip_ioctl(rgep, wq, mp, iocp);
1185 		break;
1186 
1187 	case LB_GET_INFO_SIZE:
1188 	case LB_GET_INFO:
1189 	case LB_GET_MODE:
1190 	case LB_SET_MODE:
1191 		status = rge_loop_ioctl(rgep, wq, mp, iocp);
1192 		break;
1193 
1194 	case ND_GET:
1195 	case ND_SET:
1196 		status = rge_nd_ioctl(rgep, wq, mp, iocp);
1197 		break;
1198 	}
1199 
1200 	/*
1201 	 * Do we need to reprogram the PHY and/or the MAC?
1202 	 * Do it now, while we still have the mutex.
1203 	 *
1204 	 * Note: update the PHY first, 'cos it controls the
1205 	 * speed/duplex parameters that the MAC code uses.
1206 	 */
1207 	switch (status) {
1208 	case IOC_RESTART_REPLY:
1209 	case IOC_RESTART_ACK:
1210 		rge_phy_update(rgep);
1211 		break;
1212 	}
1213 
1214 	mutex_exit(rgep->genlock);
1215 
1216 	/*
1217 	 * Finally, decide how to reply
1218 	 */
1219 	switch (status) {
1220 	default:
1221 	case IOC_INVAL:
1222 		/*
1223 		 * Error, reply with a NAK and EINVAL or the specified error
1224 		 */
1225 		miocnak(wq, mp, 0, iocp->ioc_error == 0 ?
1226 		    EINVAL : iocp->ioc_error);
1227 		break;
1228 
1229 	case IOC_DONE:
1230 		/*
1231 		 * OK, reply already sent
1232 		 */
1233 		break;
1234 
1235 	case IOC_RESTART_ACK:
1236 	case IOC_ACK:
1237 		/*
1238 		 * OK, reply with an ACK
1239 		 */
1240 		miocack(wq, mp, 0, 0);
1241 		break;
1242 
1243 	case IOC_RESTART_REPLY:
1244 	case IOC_REPLY:
1245 		/*
1246 		 * OK, send prepared reply as ACK or NAK
1247 		 */
1248 		mp->b_datap->db_type = iocp->ioc_error == 0 ?
1249 		    M_IOCACK : M_IOCNAK;
1250 		qreply(wq, mp);
1251 		break;
1252 	}
1253 }
1254 
1255 static void
1256 rge_m_resources(void *arg)
1257 {
1258 	rge_t *rgep = arg;
1259 	mac_rx_fifo_t mrf;
1260 
1261 	mutex_enter(rgep->genlock);
1262 
1263 	/*
1264 	 * Register Rx rings as resources and save mac
1265 	 * resource id for future reference
1266 	 */
1267 	mrf.mrf_type = MAC_RX_FIFO;
1268 	mrf.mrf_blank = rge_chip_blank;
1269 	mrf.mrf_arg = (void *)rgep;
1270 	mrf.mrf_normal_blank_time = RGE_RX_INT_TIME;
1271 	mrf.mrf_normal_pkt_count = RGE_RX_INT_PKTS;
1272 	rgep->handle = mac_resource_add(rgep->mh, (mac_resource_t *)&mrf);
1273 
1274 	mutex_exit(rgep->genlock);
1275 }
1276 
1277 /* ARGSUSED */
1278 static boolean_t
1279 rge_m_getcapab(void *arg, mac_capab_t cap, void *cap_data)
1280 {
1281 	switch (cap) {
1282 	case MAC_CAPAB_HCKSUM: {
1283 		uint32_t *hcksum_txflags = cap_data;
1284 		*hcksum_txflags = HCKSUM_INET_FULL_V4 | HCKSUM_IPHDRCKSUM;
1285 		break;
1286 	}
1287 	case MAC_CAPAB_POLL:
1288 		/*
1289 		 * There's nothing for us to fill in, simply returning
1290 		 * B_TRUE stating that we support polling is sufficient.
1291 		 */
1292 		break;
1293 	default:
1294 		return (B_FALSE);
1295 	}
1296 	return (B_TRUE);
1297 }
1298 
1299 /*
1300  * ============ Init MSI/Fixed Interrupt routines ==============
1301  */
1302 
1303 /*
1304  * rge_add_intrs:
1305  *
1306  * Register FIXED or MSI interrupts.
1307  */
1308 static int
1309 rge_add_intrs(rge_t *rgep, int intr_type)
1310 {
1311 	dev_info_t *dip = rgep->devinfo;
1312 	int avail;
1313 	int actual;
1314 	int intr_size;
1315 	int count;
1316 	int i, j;
1317 	int ret;
1318 
1319 	/* Get number of interrupts */
1320 	ret = ddi_intr_get_nintrs(dip, intr_type, &count);
1321 	if ((ret != DDI_SUCCESS) || (count == 0)) {
1322 		rge_error(rgep, "ddi_intr_get_nintrs() failure, ret: %d, "
1323 		    "count: %d", ret, count);
1324 		return (DDI_FAILURE);
1325 	}
1326 
1327 	/* Get number of available interrupts */
1328 	ret = ddi_intr_get_navail(dip, intr_type, &avail);
1329 	if ((ret != DDI_SUCCESS) || (avail == 0)) {
1330 		rge_error(rgep, "ddi_intr_get_navail() failure, "
1331 		    "ret: %d, avail: %d\n", ret, avail);
1332 		return (DDI_FAILURE);
1333 	}
1334 
1335 	/* Allocate an array of interrupt handles */
1336 	intr_size = count * sizeof (ddi_intr_handle_t);
1337 	rgep->htable = kmem_alloc(intr_size, KM_SLEEP);
1338 	rgep->intr_rqst = count;
1339 
1340 	/* Call ddi_intr_alloc() */
1341 	ret = ddi_intr_alloc(dip, rgep->htable, intr_type, 0,
1342 	    count, &actual, DDI_INTR_ALLOC_NORMAL);
1343 	if (ret != DDI_SUCCESS || actual == 0) {
1344 		rge_error(rgep, "ddi_intr_alloc() failed %d\n", ret);
1345 		kmem_free(rgep->htable, intr_size);
1346 		return (DDI_FAILURE);
1347 	}
1348 	if (actual < count) {
1349 		rge_log(rgep, "ddi_intr_alloc() Requested: %d, Received: %d\n",
1350 		    count, actual);
1351 	}
1352 	rgep->intr_cnt = actual;
1353 
1354 	/*
1355 	 * Get priority for first msi, assume remaining are all the same
1356 	 */
1357 	if ((ret = ddi_intr_get_pri(rgep->htable[0], &rgep->intr_pri)) !=
1358 	    DDI_SUCCESS) {
1359 		rge_error(rgep, "ddi_intr_get_pri() failed %d\n", ret);
1360 		/* Free already allocated intr */
1361 		for (i = 0; i < actual; i++) {
1362 			(void) ddi_intr_free(rgep->htable[i]);
1363 		}
1364 		kmem_free(rgep->htable, intr_size);
1365 		return (DDI_FAILURE);
1366 	}
1367 
1368 	/* Test for high level mutex */
1369 	if (rgep->intr_pri >= ddi_intr_get_hilevel_pri()) {
1370 		rge_error(rgep, "rge_add_intrs:"
1371 		    "Hi level interrupt not supported");
1372 		for (i = 0; i < actual; i++)
1373 			(void) ddi_intr_free(rgep->htable[i]);
1374 		kmem_free(rgep->htable, intr_size);
1375 		return (DDI_FAILURE);
1376 	}
1377 
1378 	/* Call ddi_intr_add_handler() */
1379 	for (i = 0; i < actual; i++) {
1380 		if ((ret = ddi_intr_add_handler(rgep->htable[i], rge_intr,
1381 		    (caddr_t)rgep, (caddr_t)(uintptr_t)i)) != DDI_SUCCESS) {
1382 			rge_error(rgep, "ddi_intr_add_handler() "
1383 			    "failed %d\n", ret);
1384 			/* Remove already added intr */
1385 			for (j = 0; j < i; j++)
1386 				(void) ddi_intr_remove_handler(rgep->htable[j]);
1387 			/* Free already allocated intr */
1388 			for (i = 0; i < actual; i++) {
1389 				(void) ddi_intr_free(rgep->htable[i]);
1390 			}
1391 			kmem_free(rgep->htable, intr_size);
1392 			return (DDI_FAILURE);
1393 		}
1394 	}
1395 
1396 	if ((ret = ddi_intr_get_cap(rgep->htable[0], &rgep->intr_cap))
1397 	    != DDI_SUCCESS) {
1398 		rge_error(rgep, "ddi_intr_get_cap() failed %d\n", ret);
1399 		for (i = 0; i < actual; i++) {
1400 			(void) ddi_intr_remove_handler(rgep->htable[i]);
1401 			(void) ddi_intr_free(rgep->htable[i]);
1402 		}
1403 		kmem_free(rgep->htable, intr_size);
1404 		return (DDI_FAILURE);
1405 	}
1406 
1407 	return (DDI_SUCCESS);
1408 }
1409 
1410 /*
1411  * rge_rem_intrs:
1412  *
1413  * Unregister FIXED or MSI interrupts
1414  */
1415 static void
1416 rge_rem_intrs(rge_t *rgep)
1417 {
1418 	int i;
1419 
1420 	/* Disable all interrupts */
1421 	if (rgep->intr_cap & DDI_INTR_FLAG_BLOCK) {
1422 		/* Call ddi_intr_block_disable() */
1423 		(void) ddi_intr_block_disable(rgep->htable, rgep->intr_cnt);
1424 	} else {
1425 		for (i = 0; i < rgep->intr_cnt; i++) {
1426 			(void) ddi_intr_disable(rgep->htable[i]);
1427 		}
1428 	}
1429 
1430 	/* Call ddi_intr_remove_handler() */
1431 	for (i = 0; i < rgep->intr_cnt; i++) {
1432 		(void) ddi_intr_remove_handler(rgep->htable[i]);
1433 		(void) ddi_intr_free(rgep->htable[i]);
1434 	}
1435 
1436 	kmem_free(rgep->htable, rgep->intr_rqst * sizeof (ddi_intr_handle_t));
1437 }
1438 
1439 /*
1440  * ========== Per-instance setup/teardown code ==========
1441  */
1442 
1443 #undef	RGE_DBG
1444 #define	RGE_DBG		RGE_DBG_INIT	/* debug flag for this code	*/
1445 
1446 static void
1447 rge_unattach(rge_t *rgep)
1448 {
1449 	/*
1450 	 * Flag that no more activity may be initiated
1451 	 */
1452 	rgep->progress &= ~PROGRESS_READY;
1453 	rgep->rge_mac_state = RGE_MAC_UNATTACH;
1454 
1455 	/*
1456 	 * Quiesce the PHY and MAC (leave it reset but still powered).
1457 	 * Clean up and free all RGE data structures
1458 	 */
1459 	if (rgep->periodic_id != NULL) {
1460 		ddi_periodic_delete(rgep->periodic_id);
1461 		rgep->periodic_id = NULL;
1462 	}
1463 
1464 	if (rgep->progress & PROGRESS_KSTATS)
1465 		rge_fini_kstats(rgep);
1466 
1467 	if (rgep->progress & PROGRESS_PHY)
1468 		(void) rge_phy_reset(rgep);
1469 
1470 	if (rgep->progress & PROGRESS_INIT) {
1471 		mutex_enter(rgep->genlock);
1472 		(void) rge_chip_reset(rgep);
1473 		mutex_exit(rgep->genlock);
1474 		rge_fini_rings(rgep);
1475 	}
1476 
1477 	if (rgep->progress & PROGRESS_INTR) {
1478 		rge_rem_intrs(rgep);
1479 		mutex_destroy(rgep->rc_lock);
1480 		mutex_destroy(rgep->rx_lock);
1481 		mutex_destroy(rgep->tc_lock);
1482 		mutex_destroy(rgep->tx_lock);
1483 		rw_destroy(rgep->errlock);
1484 		mutex_destroy(rgep->genlock);
1485 	}
1486 
1487 	if (rgep->progress & PROGRESS_FACTOTUM)
1488 		(void) ddi_intr_remove_softint(rgep->factotum_hdl);
1489 
1490 	if (rgep->progress & PROGRESS_RESCHED)
1491 		(void) ddi_intr_remove_softint(rgep->resched_hdl);
1492 
1493 	if (rgep->progress & PROGRESS_NDD)
1494 		rge_nd_cleanup(rgep);
1495 
1496 	rge_free_bufs(rgep);
1497 
1498 	if (rgep->progress & PROGRESS_REGS)
1499 		ddi_regs_map_free(&rgep->io_handle);
1500 
1501 	if (rgep->progress & PROGRESS_CFG)
1502 		pci_config_teardown(&rgep->cfg_handle);
1503 
1504 	ddi_remove_minor_node(rgep->devinfo, NULL);
1505 	kmem_free(rgep, sizeof (*rgep));
1506 }
1507 
1508 static int
1509 rge_resume(dev_info_t *devinfo)
1510 {
1511 	rge_t *rgep;			/* Our private data	*/
1512 	chip_id_t *cidp;
1513 	chip_id_t chipid;
1514 
1515 	rgep = ddi_get_driver_private(devinfo);
1516 
1517 	/*
1518 	 * If there are state inconsistancies, this is bad.  Returning
1519 	 * DDI_FAILURE here will eventually cause the machine to panic,
1520 	 * so it is best done here so that there is a possibility of
1521 	 * debugging the problem.
1522 	 */
1523 	if (rgep == NULL)
1524 		cmn_err(CE_PANIC,
1525 		    "rge: ngep returned from ddi_get_driver_private was NULL");
1526 
1527 	/*
1528 	 * Refuse to resume if the data structures aren't consistent
1529 	 */
1530 	if (rgep->devinfo != devinfo)
1531 		cmn_err(CE_PANIC,
1532 		    "rge: passed devinfo not the same as saved devinfo");
1533 
1534 	/*
1535 	 * Read chip ID & set up config space command register(s)
1536 	 * Refuse to resume if the chip has changed its identity!
1537 	 */
1538 	cidp = &rgep->chipid;
1539 	rge_chip_cfg_init(rgep, &chipid);
1540 	if (chipid.vendor != cidp->vendor)
1541 		return (DDI_FAILURE);
1542 	if (chipid.device != cidp->device)
1543 		return (DDI_FAILURE);
1544 	if (chipid.revision != cidp->revision)
1545 		return (DDI_FAILURE);
1546 
1547 	mutex_enter(rgep->genlock);
1548 
1549 	/*
1550 	 * Only in one case, this conditional branch can be executed: the port
1551 	 * hasn't been plumbed.
1552 	 */
1553 	if (rgep->suspended == B_FALSE) {
1554 		mutex_exit(rgep->genlock);
1555 		return (DDI_SUCCESS);
1556 	}
1557 	rgep->rge_mac_state = RGE_MAC_STARTED;
1558 	/*
1559 	 * All OK, reinitialise h/w & kick off NEMO scheduling
1560 	 */
1561 	rge_restart(rgep);
1562 	rgep->suspended = B_FALSE;
1563 
1564 	mutex_exit(rgep->genlock);
1565 
1566 	return (DDI_SUCCESS);
1567 }
1568 
1569 
1570 /*
1571  * attach(9E) -- Attach a device to the system
1572  *
1573  * Called once for each board successfully probed.
1574  */
1575 static int
1576 rge_attach(dev_info_t *devinfo, ddi_attach_cmd_t cmd)
1577 {
1578 	rge_t *rgep;			/* Our private data	*/
1579 	mac_register_t *macp;
1580 	chip_id_t *cidp;
1581 	int intr_types;
1582 	caddr_t regs;
1583 	int instance;
1584 	int i;
1585 	int err;
1586 
1587 	/*
1588 	 * we don't support high level interrupts in the driver
1589 	 */
1590 	if (ddi_intr_hilevel(devinfo, 0) != 0) {
1591 		cmn_err(CE_WARN,
1592 		    "rge_attach -- unsupported high level interrupt");
1593 		return (DDI_FAILURE);
1594 	}
1595 
1596 	instance = ddi_get_instance(devinfo);
1597 	RGE_GTRACE(("rge_attach($%p, %d) instance %d",
1598 	    (void *)devinfo, cmd, instance));
1599 	RGE_BRKPT(NULL, "rge_attach");
1600 
1601 	switch (cmd) {
1602 	default:
1603 		return (DDI_FAILURE);
1604 
1605 	case DDI_RESUME:
1606 		return (rge_resume(devinfo));
1607 
1608 	case DDI_ATTACH:
1609 		break;
1610 	}
1611 
1612 	rgep = kmem_zalloc(sizeof (*rgep), KM_SLEEP);
1613 	ddi_set_driver_private(devinfo, rgep);
1614 	rgep->devinfo = devinfo;
1615 
1616 	/*
1617 	 * Initialize more fields in RGE private data
1618 	 */
1619 	rgep->rge_mac_state = RGE_MAC_ATTACH;
1620 	rgep->debug = ddi_prop_get_int(DDI_DEV_T_ANY, devinfo,
1621 	    DDI_PROP_DONTPASS, debug_propname, rge_debug);
1622 	rgep->default_mtu = ddi_prop_get_int(DDI_DEV_T_ANY, devinfo,
1623 	    DDI_PROP_DONTPASS, mtu_propname, ETHERMTU);
1624 	rgep->msi_enable = ddi_prop_get_int(DDI_DEV_T_ANY, devinfo,
1625 	    DDI_PROP_DONTPASS, msi_propname, B_TRUE);
1626 	(void) snprintf(rgep->ifname, sizeof (rgep->ifname), "%s%d",
1627 	    RGE_DRIVER_NAME, instance);
1628 
1629 	/*
1630 	 * Map config space registers
1631 	 * Read chip ID & set up config space command register(s)
1632 	 *
1633 	 * Note: this leaves the chip accessible by Memory Space
1634 	 * accesses, but with interrupts and Bus Mastering off.
1635 	 * This should ensure that nothing untoward will happen
1636 	 * if it has been left active by the (net-)bootloader.
1637 	 * We'll re-enable Bus Mastering once we've reset the chip,
1638 	 * and allow interrupts only when everything else is set up.
1639 	 */
1640 	err = pci_config_setup(devinfo, &rgep->cfg_handle);
1641 	if (err != DDI_SUCCESS) {
1642 		rge_problem(rgep, "pci_config_setup() failed");
1643 		goto attach_fail;
1644 	}
1645 	rgep->progress |= PROGRESS_CFG;
1646 	cidp = &rgep->chipid;
1647 	bzero(cidp, sizeof (*cidp));
1648 	rge_chip_cfg_init(rgep, cidp);
1649 
1650 	/*
1651 	 * Map operating registers
1652 	 */
1653 	err = ddi_regs_map_setup(devinfo, 1, &regs,
1654 	    0, 0, &rge_reg_accattr, &rgep->io_handle);
1655 	if (err != DDI_SUCCESS) {
1656 		rge_problem(rgep, "ddi_regs_map_setup() failed");
1657 		goto attach_fail;
1658 	}
1659 	rgep->io_regs = regs;
1660 	rgep->progress |= PROGRESS_REGS;
1661 
1662 	/*
1663 	 * Characterise the device, so we know its requirements.
1664 	 * Then allocate the appropriate TX and RX descriptors & buffers.
1665 	 */
1666 	rge_chip_ident(rgep);
1667 	err = rge_alloc_bufs(rgep);
1668 	if (err != DDI_SUCCESS) {
1669 		rge_problem(rgep, "DMA buffer allocation failed");
1670 		goto attach_fail;
1671 	}
1672 
1673 	/*
1674 	 * Register NDD-tweakable parameters
1675 	 */
1676 	if (rge_nd_init(rgep)) {
1677 		rge_problem(rgep, "rge_nd_init() failed");
1678 		goto attach_fail;
1679 	}
1680 	rgep->progress |= PROGRESS_NDD;
1681 
1682 	/*
1683 	 * Add the softint handlers:
1684 	 *
1685 	 * Both of these handlers are used to avoid restrictions on the
1686 	 * context and/or mutexes required for some operations.  In
1687 	 * particular, the hardware interrupt handler and its subfunctions
1688 	 * can detect a number of conditions that we don't want to handle
1689 	 * in that context or with that set of mutexes held.  So, these
1690 	 * softints are triggered instead:
1691 	 *
1692 	 * the <resched> softint is triggered if if we have previously
1693 	 * had to refuse to send a packet because of resource shortage
1694 	 * (we've run out of transmit buffers), but the send completion
1695 	 * interrupt handler has now detected that more buffers have
1696 	 * become available.
1697 	 *
1698 	 * the <factotum> is triggered if the h/w interrupt handler
1699 	 * sees the <link state changed> or <error> bits in the status
1700 	 * block.  It's also triggered periodically to poll the link
1701 	 * state, just in case we aren't getting link status change
1702 	 * interrupts ...
1703 	 */
1704 	err = ddi_intr_add_softint(devinfo, &rgep->resched_hdl,
1705 	    DDI_INTR_SOFTPRI_MIN, rge_reschedule, (caddr_t)rgep);
1706 	if (err != DDI_SUCCESS) {
1707 		rge_problem(rgep, "ddi_intr_add_softint() failed");
1708 		goto attach_fail;
1709 	}
1710 	rgep->progress |= PROGRESS_RESCHED;
1711 	err = ddi_intr_add_softint(devinfo, &rgep->factotum_hdl,
1712 	    DDI_INTR_SOFTPRI_MIN, rge_chip_factotum, (caddr_t)rgep);
1713 	if (err != DDI_SUCCESS) {
1714 		rge_problem(rgep, "ddi_intr_add_softint() failed");
1715 		goto attach_fail;
1716 	}
1717 	rgep->progress |= PROGRESS_FACTOTUM;
1718 
1719 	/*
1720 	 * Get supported interrupt types
1721 	 */
1722 	if (ddi_intr_get_supported_types(devinfo, &intr_types)
1723 	    != DDI_SUCCESS) {
1724 		rge_error(rgep, "ddi_intr_get_supported_types failed\n");
1725 		goto attach_fail;
1726 	}
1727 
1728 	/*
1729 	 * Add the h/w interrupt handler and initialise mutexes
1730 	 * RTL8101E is observed to have MSI invalidation issue after S/R.
1731 	 * So the FIXED interrupt is used instead.
1732 	 */
1733 	if (rgep->chipid.mac_ver == MAC_VER_8101E)
1734 		rgep->msi_enable = B_FALSE;
1735 	if ((intr_types & DDI_INTR_TYPE_MSI) && rgep->msi_enable) {
1736 		if (rge_add_intrs(rgep, DDI_INTR_TYPE_MSI) != DDI_SUCCESS) {
1737 			rge_error(rgep, "MSI registration failed, "
1738 			    "trying FIXED interrupt type\n");
1739 		} else {
1740 			rge_log(rgep, "Using MSI interrupt type\n");
1741 			rgep->intr_type = DDI_INTR_TYPE_MSI;
1742 			rgep->progress |= PROGRESS_INTR;
1743 		}
1744 	}
1745 	if (!(rgep->progress & PROGRESS_INTR) &&
1746 	    (intr_types & DDI_INTR_TYPE_FIXED)) {
1747 		if (rge_add_intrs(rgep, DDI_INTR_TYPE_FIXED) != DDI_SUCCESS) {
1748 			rge_error(rgep, "FIXED interrupt "
1749 			    "registration failed\n");
1750 			goto attach_fail;
1751 		}
1752 		rge_log(rgep, "Using FIXED interrupt type\n");
1753 		rgep->intr_type = DDI_INTR_TYPE_FIXED;
1754 		rgep->progress |= PROGRESS_INTR;
1755 	}
1756 	if (!(rgep->progress & PROGRESS_INTR)) {
1757 		rge_error(rgep, "No interrupts registered\n");
1758 		goto attach_fail;
1759 	}
1760 	mutex_init(rgep->genlock, NULL, MUTEX_DRIVER,
1761 	    DDI_INTR_PRI(rgep->intr_pri));
1762 	rw_init(rgep->errlock, NULL, RW_DRIVER,
1763 	    DDI_INTR_PRI(rgep->intr_pri));
1764 	mutex_init(rgep->tx_lock, NULL, MUTEX_DRIVER,
1765 	    DDI_INTR_PRI(rgep->intr_pri));
1766 	mutex_init(rgep->tc_lock, NULL, MUTEX_DRIVER,
1767 	    DDI_INTR_PRI(rgep->intr_pri));
1768 	mutex_init(rgep->rx_lock, NULL, MUTEX_DRIVER,
1769 	    DDI_INTR_PRI(rgep->intr_pri));
1770 	mutex_init(rgep->rc_lock, NULL, MUTEX_DRIVER,
1771 	    DDI_INTR_PRI(rgep->intr_pri));
1772 
1773 	/*
1774 	 * Initialize rings
1775 	 */
1776 	err = rge_init_rings(rgep);
1777 	if (err != DDI_SUCCESS) {
1778 		rge_problem(rgep, "rge_init_rings() failed");
1779 		goto attach_fail;
1780 	}
1781 	rgep->progress |= PROGRESS_INIT;
1782 
1783 	/*
1784 	 * Now that mutex locks are initialized, enable interrupts.
1785 	 */
1786 	if (rgep->intr_cap & DDI_INTR_FLAG_BLOCK) {
1787 		/* Call ddi_intr_block_enable() for MSI interrupts */
1788 		(void) ddi_intr_block_enable(rgep->htable, rgep->intr_cnt);
1789 	} else {
1790 		/* Call ddi_intr_enable for MSI or FIXED interrupts */
1791 		for (i = 0; i < rgep->intr_cnt; i++) {
1792 			(void) ddi_intr_enable(rgep->htable[i]);
1793 		}
1794 	}
1795 
1796 	/*
1797 	 * Initialise link state variables
1798 	 * Stop, reset & reinitialise the chip.
1799 	 * Initialise the (internal) PHY.
1800 	 */
1801 	rgep->param_link_up = LINK_STATE_UNKNOWN;
1802 
1803 	/*
1804 	 * Reset chip & rings to initial state; also reset address
1805 	 * filtering, promiscuity, loopback mode.
1806 	 */
1807 	mutex_enter(rgep->genlock);
1808 	(void) rge_chip_reset(rgep);
1809 	rge_chip_sync(rgep, RGE_GET_MAC);
1810 	bzero(rgep->mcast_hash, sizeof (rgep->mcast_hash));
1811 	bzero(rgep->mcast_refs, sizeof (rgep->mcast_refs));
1812 	rgep->promisc = B_FALSE;
1813 	rgep->param_loop_mode = RGE_LOOP_NONE;
1814 	mutex_exit(rgep->genlock);
1815 	rge_phy_init(rgep);
1816 	rgep->progress |= PROGRESS_PHY;
1817 
1818 	/*
1819 	 * Create & initialise named kstats
1820 	 */
1821 	rge_init_kstats(rgep, instance);
1822 	rgep->progress |= PROGRESS_KSTATS;
1823 
1824 	if ((macp = mac_alloc(MAC_VERSION)) == NULL)
1825 		goto attach_fail;
1826 	macp->m_type_ident = MAC_PLUGIN_IDENT_ETHER;
1827 	macp->m_driver = rgep;
1828 	macp->m_dip = devinfo;
1829 	macp->m_src_addr = rgep->netaddr;
1830 	macp->m_callbacks = &rge_m_callbacks;
1831 	macp->m_min_sdu = 0;
1832 	macp->m_max_sdu = rgep->default_mtu;
1833 	macp->m_margin = VLAN_TAGSZ;
1834 
1835 	/*
1836 	 * Finally, we're ready to register ourselves with the MAC layer
1837 	 * interface; if this succeeds, we're all ready to start()
1838 	 */
1839 	err = mac_register(macp, &rgep->mh);
1840 	mac_free(macp);
1841 	if (err != 0)
1842 		goto attach_fail;
1843 
1844 	/*
1845 	 * Register a periodical handler.
1846 	 * reg_chip_cyclic() is invoked in kernel context.
1847 	 */
1848 	rgep->periodic_id = ddi_periodic_add(rge_chip_cyclic, rgep,
1849 	    RGE_CYCLIC_PERIOD, DDI_IPL_0);
1850 
1851 	rgep->progress |= PROGRESS_READY;
1852 	return (DDI_SUCCESS);
1853 
1854 attach_fail:
1855 	rge_unattach(rgep);
1856 	return (DDI_FAILURE);
1857 }
1858 
1859 /*
1860  *	rge_suspend() -- suspend transmit/receive for powerdown
1861  */
1862 static int
1863 rge_suspend(rge_t *rgep)
1864 {
1865 	/*
1866 	 * Stop processing and idle (powerdown) the PHY ...
1867 	 */
1868 	mutex_enter(rgep->genlock);
1869 	rw_enter(rgep->errlock, RW_READER);
1870 
1871 	if (rgep->rge_mac_state != RGE_MAC_STARTED) {
1872 		mutex_exit(rgep->genlock);
1873 		return (DDI_SUCCESS);
1874 	}
1875 
1876 	rgep->suspended = B_TRUE;
1877 	rge_stop(rgep);
1878 	rgep->rge_mac_state = RGE_MAC_STOPPED;
1879 
1880 	rw_exit(rgep->errlock);
1881 	mutex_exit(rgep->genlock);
1882 
1883 	return (DDI_SUCCESS);
1884 }
1885 
1886 /*
1887  * detach(9E) -- Detach a device from the system
1888  */
1889 static int
1890 rge_detach(dev_info_t *devinfo, ddi_detach_cmd_t cmd)
1891 {
1892 	rge_t *rgep;
1893 
1894 	RGE_GTRACE(("rge_detach($%p, %d)", (void *)devinfo, cmd));
1895 
1896 	rgep = ddi_get_driver_private(devinfo);
1897 
1898 	switch (cmd) {
1899 	default:
1900 		return (DDI_FAILURE);
1901 
1902 	case DDI_SUSPEND:
1903 		return (rge_suspend(rgep));
1904 
1905 	case DDI_DETACH:
1906 		break;
1907 	}
1908 
1909 	/*
1910 	 * If there is any posted buffer, the driver should reject to be
1911 	 * detached. Need notice upper layer to release them.
1912 	 */
1913 	if (!(rgep->chip_flags & CHIP_FLAG_FORCE_BCOPY) &&
1914 	    rgep->rx_free != RGE_BUF_SLOTS)
1915 		return (DDI_FAILURE);
1916 
1917 	/*
1918 	 * Unregister from the MAC layer subsystem.  This can fail, in
1919 	 * particular if there are DLPI style-2 streams still open -
1920 	 * in which case we just return failure without shutting
1921 	 * down chip operations.
1922 	 */
1923 	if (mac_unregister(rgep->mh) != 0)
1924 		return (DDI_FAILURE);
1925 
1926 	/*
1927 	 * All activity stopped, so we can clean up & exit
1928 	 */
1929 	rge_unattach(rgep);
1930 	return (DDI_SUCCESS);
1931 }
1932 
1933 
1934 /*
1935  * ========== Module Loading Data & Entry Points ==========
1936  */
1937 
1938 #undef	RGE_DBG
1939 #define	RGE_DBG		RGE_DBG_INIT	/* debug flag for this code	*/
1940 DDI_DEFINE_STREAM_OPS(rge_dev_ops, nulldev, nulldev, rge_attach, rge_detach,
1941     nodev, NULL, D_MP, NULL);
1942 
1943 static struct modldrv rge_modldrv = {
1944 	&mod_driverops,		/* Type of module.  This one is a driver */
1945 	rge_ident,		/* short description */
1946 	&rge_dev_ops		/* driver specific ops */
1947 };
1948 
1949 static struct modlinkage modlinkage = {
1950 	MODREV_1, (void *)&rge_modldrv, NULL
1951 };
1952 
1953 
1954 int
1955 _info(struct modinfo *modinfop)
1956 {
1957 	return (mod_info(&modlinkage, modinfop));
1958 }
1959 
1960 int
1961 _init(void)
1962 {
1963 	int status;
1964 
1965 	mac_init_ops(&rge_dev_ops, "rge");
1966 	status = mod_install(&modlinkage);
1967 	if (status == DDI_SUCCESS)
1968 		mutex_init(rge_log_mutex, NULL, MUTEX_DRIVER, NULL);
1969 	else
1970 		mac_fini_ops(&rge_dev_ops);
1971 
1972 	return (status);
1973 }
1974 
1975 int
1976 _fini(void)
1977 {
1978 	int status;
1979 
1980 	status = mod_remove(&modlinkage);
1981 	if (status == DDI_SUCCESS) {
1982 		mac_fini_ops(&rge_dev_ops);
1983 		mutex_destroy(rge_log_mutex);
1984 	}
1985 	return (status);
1986 }
1987