xref: /titanic_41/usr/src/uts/common/io/rge/rge_main.c (revision 9c7bbd7179a96404fb22fa83f4680174da8b2239)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 /*
22  * Copyright 2008 Sun Microsystems, Inc.  All rights reserved.
23  * Use is subject to license terms.
24  */
25 
26 #include "rge.h"
27 
28 /*
29  * This is the string displayed by modinfo, etc.
30  * Make sure you keep the version ID up to date!
31  */
32 static char rge_ident[] = "Realtek 1Gb Ethernet";
33 
34 /*
35  * Used for buffers allocated by ddi_dma_mem_alloc()
36  */
37 static ddi_dma_attr_t dma_attr_buf = {
38 	DMA_ATTR_V0,		/* dma_attr version */
39 	(uint32_t)0,		/* dma_attr_addr_lo */
40 	(uint32_t)0xFFFFFFFF,	/* dma_attr_addr_hi */
41 	(uint32_t)0xFFFFFFFF,	/* dma_attr_count_max */
42 	(uint32_t)16,		/* dma_attr_align */
43 	0xFFFFFFFF,		/* dma_attr_burstsizes */
44 	1,			/* dma_attr_minxfer */
45 	(uint32_t)0xFFFFFFFF,	/* dma_attr_maxxfer */
46 	(uint32_t)0xFFFFFFFF,	/* dma_attr_seg */
47 	1,			/* dma_attr_sgllen */
48 	1,			/* dma_attr_granular */
49 	0,			/* dma_attr_flags */
50 };
51 
52 /*
53  * Used for BDs allocated by ddi_dma_mem_alloc()
54  */
55 static ddi_dma_attr_t dma_attr_desc = {
56 	DMA_ATTR_V0,		/* dma_attr version */
57 	(uint32_t)0,		/* dma_attr_addr_lo */
58 	(uint32_t)0xFFFFFFFF,	/* dma_attr_addr_hi */
59 	(uint32_t)0xFFFFFFFF,	/* dma_attr_count_max */
60 	(uint32_t)256,		/* dma_attr_align */
61 	0xFFFFFFFF,		/* dma_attr_burstsizes */
62 	1,			/* dma_attr_minxfer */
63 	(uint32_t)0xFFFFFFFF,	/* dma_attr_maxxfer */
64 	(uint32_t)0xFFFFFFFF,	/* dma_attr_seg */
65 	1,			/* dma_attr_sgllen */
66 	1,			/* dma_attr_granular */
67 	0,			/* dma_attr_flags */
68 };
69 
70 /*
71  * PIO access attributes for registers
72  */
73 static ddi_device_acc_attr_t rge_reg_accattr = {
74 	DDI_DEVICE_ATTR_V0,
75 	DDI_STRUCTURE_LE_ACC,
76 	DDI_STRICTORDER_ACC,
77 	DDI_DEFAULT_ACC
78 };
79 
80 /*
81  * DMA access attributes for descriptors
82  */
83 static ddi_device_acc_attr_t rge_desc_accattr = {
84 	DDI_DEVICE_ATTR_V0,
85 	DDI_NEVERSWAP_ACC,
86 	DDI_STRICTORDER_ACC,
87 	DDI_DEFAULT_ACC
88 };
89 
90 /*
91  * DMA access attributes for data
92  */
93 static ddi_device_acc_attr_t rge_buf_accattr = {
94 	DDI_DEVICE_ATTR_V0,
95 	DDI_NEVERSWAP_ACC,
96 	DDI_STRICTORDER_ACC,
97 	DDI_DEFAULT_ACC
98 };
99 
100 /*
101  * Property names
102  */
103 static char debug_propname[] = "rge_debug_flags";
104 static char mtu_propname[] = "default_mtu";
105 static char msi_propname[] = "msi_enable";
106 
107 static int		rge_m_start(void *);
108 static void		rge_m_stop(void *);
109 static int		rge_m_promisc(void *, boolean_t);
110 static int		rge_m_multicst(void *, boolean_t, const uint8_t *);
111 static int		rge_m_unicst(void *, const uint8_t *);
112 static void		rge_m_ioctl(void *, queue_t *, mblk_t *);
113 static boolean_t	rge_m_getcapab(void *, mac_capab_t, void *);
114 
115 #define	RGE_M_CALLBACK_FLAGS	(MC_IOCTL | MC_GETCAPAB)
116 
117 static mac_callbacks_t rge_m_callbacks = {
118 	RGE_M_CALLBACK_FLAGS,
119 	rge_m_stat,
120 	rge_m_start,
121 	rge_m_stop,
122 	rge_m_promisc,
123 	rge_m_multicst,
124 	rge_m_unicst,
125 	rge_m_tx,
126 	rge_m_ioctl,
127 	rge_m_getcapab
128 };
129 
130 /*
131  * Allocate an area of memory and a DMA handle for accessing it
132  */
133 static int
134 rge_alloc_dma_mem(rge_t *rgep, size_t memsize, ddi_dma_attr_t *dma_attr_p,
135 	ddi_device_acc_attr_t *acc_attr_p, uint_t dma_flags, dma_area_t *dma_p)
136 {
137 	caddr_t vaddr;
138 	int err;
139 
140 	/*
141 	 * Allocate handle
142 	 */
143 	err = ddi_dma_alloc_handle(rgep->devinfo, dma_attr_p,
144 	    DDI_DMA_SLEEP, NULL, &dma_p->dma_hdl);
145 	if (err != DDI_SUCCESS) {
146 		dma_p->dma_hdl = NULL;
147 		return (DDI_FAILURE);
148 	}
149 
150 	/*
151 	 * Allocate memory
152 	 */
153 	err = ddi_dma_mem_alloc(dma_p->dma_hdl, memsize, acc_attr_p,
154 	    dma_flags & (DDI_DMA_CONSISTENT | DDI_DMA_STREAMING),
155 	    DDI_DMA_SLEEP, NULL, &vaddr, &dma_p->alength, &dma_p->acc_hdl);
156 	if (err != DDI_SUCCESS) {
157 		ddi_dma_free_handle(&dma_p->dma_hdl);
158 		dma_p->dma_hdl = NULL;
159 		dma_p->acc_hdl = NULL;
160 		return (DDI_FAILURE);
161 	}
162 
163 	/*
164 	 * Bind the two together
165 	 */
166 	dma_p->mem_va = vaddr;
167 	err = ddi_dma_addr_bind_handle(dma_p->dma_hdl, NULL,
168 	    vaddr, dma_p->alength, dma_flags, DDI_DMA_SLEEP, NULL,
169 	    &dma_p->cookie, &dma_p->ncookies);
170 	if (err != DDI_DMA_MAPPED || dma_p->ncookies != 1) {
171 		ddi_dma_mem_free(&dma_p->acc_hdl);
172 		ddi_dma_free_handle(&dma_p->dma_hdl);
173 		dma_p->acc_hdl = NULL;
174 		dma_p->dma_hdl = NULL;
175 		return (DDI_FAILURE);
176 	}
177 
178 	dma_p->nslots = ~0U;
179 	dma_p->size = ~0U;
180 	dma_p->token = ~0U;
181 	dma_p->offset = 0;
182 	return (DDI_SUCCESS);
183 }
184 
185 /*
186  * Free one allocated area of DMAable memory
187  */
188 static void
189 rge_free_dma_mem(dma_area_t *dma_p)
190 {
191 	if (dma_p->dma_hdl != NULL) {
192 		if (dma_p->ncookies) {
193 			(void) ddi_dma_unbind_handle(dma_p->dma_hdl);
194 			dma_p->ncookies = 0;
195 		}
196 		ddi_dma_free_handle(&dma_p->dma_hdl);
197 		dma_p->dma_hdl = NULL;
198 	}
199 
200 	if (dma_p->acc_hdl != NULL) {
201 		ddi_dma_mem_free(&dma_p->acc_hdl);
202 		dma_p->acc_hdl = NULL;
203 	}
204 }
205 
206 /*
207  * Utility routine to carve a slice off a chunk of allocated memory,
208  * updating the chunk descriptor accordingly.  The size of the slice
209  * is given by the product of the <qty> and <size> parameters.
210  */
211 static void
212 rge_slice_chunk(dma_area_t *slice, dma_area_t *chunk,
213 	uint32_t qty, uint32_t size)
214 {
215 	static uint32_t sequence = 0xbcd5704a;
216 	size_t totsize;
217 
218 	totsize = qty*size;
219 	ASSERT(totsize <= chunk->alength);
220 
221 	*slice = *chunk;
222 	slice->nslots = qty;
223 	slice->size = size;
224 	slice->alength = totsize;
225 	slice->token = ++sequence;
226 
227 	chunk->mem_va = (caddr_t)chunk->mem_va + totsize;
228 	chunk->alength -= totsize;
229 	chunk->offset += totsize;
230 	chunk->cookie.dmac_laddress += totsize;
231 	chunk->cookie.dmac_size -= totsize;
232 }
233 
234 static int
235 rge_alloc_bufs(rge_t *rgep)
236 {
237 	size_t txdescsize;
238 	size_t rxdescsize;
239 	int err;
240 
241 	/*
242 	 * Allocate memory & handle for packet statistics
243 	 */
244 	err = rge_alloc_dma_mem(rgep,
245 	    RGE_STATS_DUMP_SIZE,
246 	    &dma_attr_desc,
247 	    &rge_desc_accattr,
248 	    DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
249 	    &rgep->dma_area_stats);
250 	if (err != DDI_SUCCESS)
251 		return (DDI_FAILURE);
252 	rgep->hw_stats = DMA_VPTR(rgep->dma_area_stats);
253 
254 	/*
255 	 * Allocate memory & handle for Tx descriptor ring
256 	 */
257 	txdescsize = RGE_SEND_SLOTS * sizeof (rge_bd_t);
258 	err = rge_alloc_dma_mem(rgep,
259 	    txdescsize,
260 	    &dma_attr_desc,
261 	    &rge_desc_accattr,
262 	    DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
263 	    &rgep->dma_area_txdesc);
264 	if (err != DDI_SUCCESS)
265 		return (DDI_FAILURE);
266 
267 	/*
268 	 * Allocate memory & handle for Rx descriptor ring
269 	 */
270 	rxdescsize = RGE_RECV_SLOTS * sizeof (rge_bd_t);
271 	err = rge_alloc_dma_mem(rgep,
272 	    rxdescsize,
273 	    &dma_attr_desc,
274 	    &rge_desc_accattr,
275 	    DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
276 	    &rgep->dma_area_rxdesc);
277 	if (err != DDI_SUCCESS)
278 		return (DDI_FAILURE);
279 
280 	return (DDI_SUCCESS);
281 }
282 
283 /*
284  * rge_free_bufs() -- free descriptors/buffers allocated for this
285  * device instance.
286  */
287 static void
288 rge_free_bufs(rge_t *rgep)
289 {
290 	rge_free_dma_mem(&rgep->dma_area_stats);
291 	rge_free_dma_mem(&rgep->dma_area_txdesc);
292 	rge_free_dma_mem(&rgep->dma_area_rxdesc);
293 }
294 
295 /*
296  * ========== Transmit and receive ring reinitialisation ==========
297  */
298 
299 /*
300  * These <reinit> routines each reset the rx/tx rings to an initial
301  * state, assuming that the corresponding <init> routine has already
302  * been called exactly once.
303  */
304 static void
305 rge_reinit_send_ring(rge_t *rgep)
306 {
307 	sw_sbd_t *ssbdp;
308 	rge_bd_t *bdp;
309 	uint32_t slot;
310 
311 	/*
312 	 * re-init send ring
313 	 */
314 	DMA_ZERO(rgep->tx_desc);
315 	ssbdp = rgep->sw_sbds;
316 	bdp = rgep->tx_ring;
317 	for (slot = 0; slot < RGE_SEND_SLOTS; slot++) {
318 		bdp->host_buf_addr =
319 		    RGE_BSWAP_32(ssbdp->pbuf.cookie.dmac_laddress);
320 		bdp->host_buf_addr_hi =
321 		    RGE_BSWAP_32(ssbdp->pbuf.cookie.dmac_laddress >> 32);
322 		/* last BD in Tx ring */
323 		if (slot == (RGE_SEND_SLOTS - 1))
324 			bdp->flags_len = RGE_BSWAP_32(BD_FLAG_EOR);
325 		ssbdp++;
326 		bdp++;
327 	}
328 	DMA_SYNC(rgep->tx_desc, DDI_DMA_SYNC_FORDEV);
329 	rgep->tx_next = 0;
330 	rgep->tc_next = 0;
331 	rgep->tc_tail = 0;
332 	rgep->tx_flow = 0;
333 	rgep->tx_free = RGE_SEND_SLOTS;
334 }
335 
336 static void
337 rge_reinit_recv_ring(rge_t *rgep)
338 {
339 	rge_bd_t *bdp;
340 	sw_rbd_t *srbdp;
341 	dma_area_t *pbuf;
342 	uint32_t slot;
343 
344 	/*
345 	 * re-init receive ring
346 	 */
347 	DMA_ZERO(rgep->rx_desc);
348 	srbdp = rgep->sw_rbds;
349 	bdp = rgep->rx_ring;
350 	for (slot = 0; slot < RGE_RECV_SLOTS; slot++) {
351 		pbuf = &srbdp->rx_buf->pbuf;
352 		bdp->host_buf_addr =
353 		    RGE_BSWAP_32(pbuf->cookie.dmac_laddress + rgep->head_room);
354 		bdp->host_buf_addr_hi =
355 		    RGE_BSWAP_32(pbuf->cookie.dmac_laddress >> 32);
356 		bdp->flags_len = RGE_BSWAP_32(BD_FLAG_HW_OWN |
357 		    (rgep->rxbuf_size - rgep->head_room));
358 		/* last BD in Tx ring */
359 		if (slot == (RGE_RECV_SLOTS - 1))
360 			bdp->flags_len |= RGE_BSWAP_32(BD_FLAG_EOR);
361 		srbdp++;
362 		bdp++;
363 	}
364 	DMA_SYNC(rgep->rx_desc, DDI_DMA_SYNC_FORDEV);
365 	rgep->watchdog = 0;
366 	rgep->rx_next = 0;
367 }
368 
369 static void
370 rge_reinit_buf_ring(rge_t *rgep)
371 {
372 
373 	if (rgep->chip_flags & CHIP_FLAG_FORCE_BCOPY)
374 		return;
375 
376 	/*
377 	 * If all the up-sending buffers haven't been returned to driver,
378 	 * use bcopy() only in rx process.
379 	 */
380 	if (rgep->rx_free != RGE_BUF_SLOTS)
381 		rgep->rx_bcopy = B_TRUE;
382 }
383 
384 static void
385 rge_reinit_rings(rge_t *rgep)
386 {
387 	rge_reinit_send_ring(rgep);
388 	rge_reinit_recv_ring(rgep);
389 	rge_reinit_buf_ring(rgep);
390 }
391 
392 static void
393 rge_fini_send_ring(rge_t *rgep)
394 {
395 	sw_sbd_t *ssbdp;
396 	uint32_t slot;
397 
398 	ssbdp = rgep->sw_sbds;
399 	for (slot = 0; slot < RGE_SEND_SLOTS; ++slot) {
400 		rge_free_dma_mem(&ssbdp->pbuf);
401 		ssbdp++;
402 	}
403 
404 	kmem_free(rgep->sw_sbds, RGE_SEND_SLOTS * sizeof (sw_sbd_t));
405 	rgep->sw_sbds = NULL;
406 }
407 
408 static void
409 rge_fini_recv_ring(rge_t *rgep)
410 {
411 	sw_rbd_t *srbdp;
412 	uint32_t slot;
413 
414 	srbdp = rgep->sw_rbds;
415 	for (slot = 0; slot < RGE_RECV_SLOTS; ++srbdp, ++slot) {
416 		if (srbdp->rx_buf) {
417 			if (srbdp->rx_buf->mp != NULL) {
418 				freemsg(srbdp->rx_buf->mp);
419 				srbdp->rx_buf->mp = NULL;
420 			}
421 			rge_free_dma_mem(&srbdp->rx_buf->pbuf);
422 			kmem_free(srbdp->rx_buf, sizeof (dma_buf_t));
423 			srbdp->rx_buf = NULL;
424 		}
425 	}
426 
427 	kmem_free(rgep->sw_rbds, RGE_RECV_SLOTS * sizeof (sw_rbd_t));
428 	rgep->sw_rbds = NULL;
429 }
430 
431 static void
432 rge_fini_buf_ring(rge_t *rgep)
433 {
434 	sw_rbd_t *srbdp;
435 	uint32_t slot;
436 
437 	if (rgep->chip_flags & CHIP_FLAG_FORCE_BCOPY)
438 		return;
439 
440 	ASSERT(rgep->rx_free == RGE_BUF_SLOTS);
441 
442 	srbdp = rgep->free_srbds;
443 	for (slot = 0; slot < RGE_BUF_SLOTS; ++srbdp, ++slot) {
444 		if (srbdp->rx_buf != NULL) {
445 			if (srbdp->rx_buf->mp != NULL) {
446 				freemsg(srbdp->rx_buf->mp);
447 				srbdp->rx_buf->mp = NULL;
448 			}
449 			rge_free_dma_mem(&srbdp->rx_buf->pbuf);
450 			kmem_free(srbdp->rx_buf, sizeof (dma_buf_t));
451 			srbdp->rx_buf = NULL;
452 		}
453 	}
454 
455 	kmem_free(rgep->free_srbds, RGE_BUF_SLOTS * sizeof (sw_rbd_t));
456 	rgep->free_srbds = NULL;
457 }
458 
459 static void
460 rge_fini_rings(rge_t *rgep)
461 {
462 	rge_fini_send_ring(rgep);
463 	rge_fini_recv_ring(rgep);
464 	rge_fini_buf_ring(rgep);
465 }
466 
467 static int
468 rge_init_send_ring(rge_t *rgep)
469 {
470 	uint32_t slot;
471 	sw_sbd_t *ssbdp;
472 	dma_area_t *pbuf;
473 	dma_area_t desc;
474 	int err;
475 
476 	/*
477 	 * Allocate the array of s/w Tx Buffer Descriptors
478 	 */
479 	ssbdp = kmem_zalloc(RGE_SEND_SLOTS*sizeof (*ssbdp), KM_SLEEP);
480 	rgep->sw_sbds = ssbdp;
481 
482 	/*
483 	 * Init send ring
484 	 */
485 	rgep->tx_desc = rgep->dma_area_txdesc;
486 	DMA_ZERO(rgep->tx_desc);
487 	rgep->tx_ring = rgep->tx_desc.mem_va;
488 
489 	desc = rgep->tx_desc;
490 	for (slot = 0; slot < RGE_SEND_SLOTS; slot++) {
491 		rge_slice_chunk(&ssbdp->desc, &desc, 1, sizeof (rge_bd_t));
492 
493 		/*
494 		 * Allocate memory & handle for Tx buffers
495 		 */
496 		pbuf = &ssbdp->pbuf;
497 		err = rge_alloc_dma_mem(rgep, rgep->txbuf_size,
498 		    &dma_attr_buf, &rge_buf_accattr,
499 		    DDI_DMA_WRITE | DDI_DMA_STREAMING, pbuf);
500 		if (err != DDI_SUCCESS) {
501 			rge_error(rgep,
502 			    "rge_init_send_ring: alloc tx buffer failed");
503 			rge_fini_send_ring(rgep);
504 			return (DDI_FAILURE);
505 		}
506 		ssbdp++;
507 	}
508 	ASSERT(desc.alength == 0);
509 
510 	DMA_SYNC(rgep->tx_desc, DDI_DMA_SYNC_FORDEV);
511 	return (DDI_SUCCESS);
512 }
513 
514 static int
515 rge_init_recv_ring(rge_t *rgep)
516 {
517 	uint32_t slot;
518 	sw_rbd_t *srbdp;
519 	dma_buf_t *rx_buf;
520 	dma_area_t *pbuf;
521 	int err;
522 
523 	/*
524 	 * Allocate the array of s/w Rx Buffer Descriptors
525 	 */
526 	srbdp = kmem_zalloc(RGE_RECV_SLOTS*sizeof (*srbdp), KM_SLEEP);
527 	rgep->sw_rbds = srbdp;
528 
529 	/*
530 	 * Init receive ring
531 	 */
532 	rgep->rx_next = 0;
533 	rgep->rx_desc = rgep->dma_area_rxdesc;
534 	DMA_ZERO(rgep->rx_desc);
535 	rgep->rx_ring = rgep->rx_desc.mem_va;
536 
537 	for (slot = 0; slot < RGE_RECV_SLOTS; slot++) {
538 		srbdp->rx_buf = rx_buf =
539 		    kmem_zalloc(sizeof (dma_buf_t), KM_SLEEP);
540 
541 		/*
542 		 * Allocate memory & handle for Rx buffers
543 		 */
544 		pbuf = &rx_buf->pbuf;
545 		err = rge_alloc_dma_mem(rgep, rgep->rxbuf_size,
546 		    &dma_attr_buf, &rge_buf_accattr,
547 		    DDI_DMA_READ | DDI_DMA_STREAMING, pbuf);
548 		if (err != DDI_SUCCESS) {
549 			rge_fini_recv_ring(rgep);
550 			rge_error(rgep,
551 			    "rge_init_recv_ring: alloc rx buffer failed");
552 			return (DDI_FAILURE);
553 		}
554 
555 		pbuf->alength -= rgep->head_room;
556 		pbuf->offset += rgep->head_room;
557 		if (!(rgep->chip_flags & CHIP_FLAG_FORCE_BCOPY)) {
558 			rx_buf->rx_recycle.free_func = rge_rx_recycle;
559 			rx_buf->rx_recycle.free_arg = (caddr_t)rx_buf;
560 			rx_buf->private = (caddr_t)rgep;
561 			rx_buf->mp = desballoc(DMA_VPTR(rx_buf->pbuf),
562 			    rgep->rxbuf_size, 0, &rx_buf->rx_recycle);
563 			if (rx_buf->mp == NULL) {
564 				rge_fini_recv_ring(rgep);
565 				rge_problem(rgep,
566 				    "rge_init_recv_ring: desballoc() failed");
567 				return (DDI_FAILURE);
568 			}
569 		}
570 		srbdp++;
571 	}
572 	DMA_SYNC(rgep->rx_desc, DDI_DMA_SYNC_FORDEV);
573 	return (DDI_SUCCESS);
574 }
575 
576 static int
577 rge_init_buf_ring(rge_t *rgep)
578 {
579 	uint32_t slot;
580 	sw_rbd_t *free_srbdp;
581 	dma_buf_t *rx_buf;
582 	dma_area_t *pbuf;
583 	int err;
584 
585 	if (rgep->chip_flags & CHIP_FLAG_FORCE_BCOPY) {
586 		rgep->rx_bcopy = B_TRUE;
587 		return (DDI_SUCCESS);
588 	}
589 
590 	/*
591 	 * Allocate the array of s/w free Buffer Descriptors
592 	 */
593 	free_srbdp = kmem_zalloc(RGE_BUF_SLOTS*sizeof (*free_srbdp), KM_SLEEP);
594 	rgep->free_srbds = free_srbdp;
595 
596 	/*
597 	 * Init free buffer ring
598 	 */
599 	rgep->rc_next = 0;
600 	rgep->rf_next = 0;
601 	rgep->rx_bcopy = B_FALSE;
602 	rgep->rx_free = RGE_BUF_SLOTS;
603 	for (slot = 0; slot < RGE_BUF_SLOTS; slot++) {
604 		free_srbdp->rx_buf = rx_buf =
605 		    kmem_zalloc(sizeof (dma_buf_t), KM_SLEEP);
606 
607 		/*
608 		 * Allocate memory & handle for free Rx buffers
609 		 */
610 		pbuf = &rx_buf->pbuf;
611 		err = rge_alloc_dma_mem(rgep, rgep->rxbuf_size,
612 		    &dma_attr_buf, &rge_buf_accattr,
613 		    DDI_DMA_READ | DDI_DMA_STREAMING, pbuf);
614 		if (err != DDI_SUCCESS) {
615 			rge_fini_buf_ring(rgep);
616 			rge_error(rgep,
617 			    "rge_init_buf_ring: alloc rx free buffer failed");
618 			return (DDI_FAILURE);
619 		}
620 		pbuf->alength -= rgep->head_room;
621 		pbuf->offset += rgep->head_room;
622 		rx_buf->rx_recycle.free_func = rge_rx_recycle;
623 		rx_buf->rx_recycle.free_arg = (caddr_t)rx_buf;
624 		rx_buf->private = (caddr_t)rgep;
625 		rx_buf->mp = desballoc(DMA_VPTR(rx_buf->pbuf),
626 		    rgep->rxbuf_size, 0, &rx_buf->rx_recycle);
627 		if (rx_buf->mp == NULL) {
628 			rge_fini_buf_ring(rgep);
629 			rge_problem(rgep,
630 			    "rge_init_buf_ring: desballoc() failed");
631 			return (DDI_FAILURE);
632 		}
633 		free_srbdp++;
634 	}
635 	return (DDI_SUCCESS);
636 }
637 
638 static int
639 rge_init_rings(rge_t *rgep)
640 {
641 	int err;
642 
643 	err = rge_init_send_ring(rgep);
644 	if (err != DDI_SUCCESS)
645 		return (DDI_FAILURE);
646 
647 	err = rge_init_recv_ring(rgep);
648 	if (err != DDI_SUCCESS) {
649 		rge_fini_send_ring(rgep);
650 		return (DDI_FAILURE);
651 	}
652 
653 	err = rge_init_buf_ring(rgep);
654 	if (err != DDI_SUCCESS) {
655 		rge_fini_send_ring(rgep);
656 		rge_fini_recv_ring(rgep);
657 		return (DDI_FAILURE);
658 	}
659 
660 	return (DDI_SUCCESS);
661 }
662 
663 /*
664  * ========== Internal state management entry points ==========
665  */
666 
667 #undef	RGE_DBG
668 #define	RGE_DBG		RGE_DBG_NEMO	/* debug flag for this code	*/
669 
670 /*
671  * These routines provide all the functionality required by the
672  * corresponding MAC layer entry points, but don't update the
673  * MAC state so they can be called internally without disturbing
674  * our record of what NEMO thinks we should be doing ...
675  */
676 
677 /*
678  *	rge_reset() -- reset h/w & rings to initial state
679  */
680 static void
681 rge_reset(rge_t *rgep)
682 {
683 	ASSERT(mutex_owned(rgep->genlock));
684 
685 	/*
686 	 * Grab all the other mutexes in the world (this should
687 	 * ensure no other threads are manipulating driver state)
688 	 */
689 	mutex_enter(rgep->rx_lock);
690 	mutex_enter(rgep->rc_lock);
691 	rw_enter(rgep->errlock, RW_WRITER);
692 
693 	(void) rge_chip_reset(rgep);
694 	rge_reinit_rings(rgep);
695 	rge_chip_init(rgep);
696 
697 	/*
698 	 * Free the world ...
699 	 */
700 	rw_exit(rgep->errlock);
701 	mutex_exit(rgep->rc_lock);
702 	mutex_exit(rgep->rx_lock);
703 
704 	rgep->stats.rpackets = 0;
705 	rgep->stats.rbytes = 0;
706 	rgep->stats.opackets = 0;
707 	rgep->stats.obytes = 0;
708 	rgep->stats.tx_pre_ismax = B_FALSE;
709 	rgep->stats.tx_cur_ismax = B_FALSE;
710 
711 	RGE_DEBUG(("rge_reset($%p) done", (void *)rgep));
712 }
713 
714 /*
715  *	rge_stop() -- stop processing, don't reset h/w or rings
716  */
717 static void
718 rge_stop(rge_t *rgep)
719 {
720 	ASSERT(mutex_owned(rgep->genlock));
721 
722 	rge_chip_stop(rgep, B_FALSE);
723 
724 	RGE_DEBUG(("rge_stop($%p) done", (void *)rgep));
725 }
726 
727 /*
728  *	rge_start() -- start transmitting/receiving
729  */
730 static void
731 rge_start(rge_t *rgep)
732 {
733 	ASSERT(mutex_owned(rgep->genlock));
734 
735 	/*
736 	 * Start chip processing, including enabling interrupts
737 	 */
738 	rge_chip_start(rgep);
739 	rgep->watchdog = 0;
740 }
741 
742 /*
743  * rge_restart - restart transmitting/receiving after error or suspend
744  */
745 void
746 rge_restart(rge_t *rgep)
747 {
748 	uint32_t i;
749 
750 	ASSERT(mutex_owned(rgep->genlock));
751 	/*
752 	 * Wait for posted buffer to be freed...
753 	 */
754 	if (!rgep->rx_bcopy) {
755 		for (i = 0; i < RXBUFF_FREE_LOOP; i++) {
756 			if (rgep->rx_free == RGE_BUF_SLOTS)
757 				break;
758 			drv_usecwait(1000);
759 			RGE_DEBUG(("rge_restart: waiting for rx buf free..."));
760 		}
761 	}
762 	rge_reset(rgep);
763 	rgep->stats.chip_reset++;
764 	if (rgep->rge_mac_state == RGE_MAC_STARTED) {
765 		rge_start(rgep);
766 		rgep->resched_needed = B_TRUE;
767 		(void) ddi_intr_trigger_softint(rgep->resched_hdl, NULL);
768 	}
769 }
770 
771 
772 /*
773  * ========== Nemo-required management entry points ==========
774  */
775 
776 #undef	RGE_DBG
777 #define	RGE_DBG		RGE_DBG_NEMO	/* debug flag for this code	*/
778 
779 /*
780  *	rge_m_stop() -- stop transmitting/receiving
781  */
782 static void
783 rge_m_stop(void *arg)
784 {
785 	rge_t *rgep = arg;		/* private device info	*/
786 	uint32_t i;
787 
788 	/*
789 	 * Just stop processing, then record new MAC state
790 	 */
791 	mutex_enter(rgep->genlock);
792 	if (rgep->suspended) {
793 		ASSERT(rgep->rge_mac_state == RGE_MAC_STOPPED);
794 		mutex_exit(rgep->genlock);
795 		return;
796 	}
797 	rge_stop(rgep);
798 	/*
799 	 * Wait for posted buffer to be freed...
800 	 */
801 	if (!rgep->rx_bcopy) {
802 		for (i = 0; i < RXBUFF_FREE_LOOP; i++) {
803 			if (rgep->rx_free == RGE_BUF_SLOTS)
804 				break;
805 			drv_usecwait(1000);
806 			RGE_DEBUG(("rge_m_stop: waiting for rx buf free..."));
807 		}
808 	}
809 	rgep->rge_mac_state = RGE_MAC_STOPPED;
810 	RGE_DEBUG(("rge_m_stop($%p) done", arg));
811 	mutex_exit(rgep->genlock);
812 }
813 
814 /*
815  *	rge_m_start() -- start transmitting/receiving
816  */
817 static int
818 rge_m_start(void *arg)
819 {
820 	rge_t *rgep = arg;		/* private device info	*/
821 
822 	mutex_enter(rgep->genlock);
823 	if (rgep->suspended) {
824 		mutex_exit(rgep->genlock);
825 		return (DDI_FAILURE);
826 	}
827 	/*
828 	 * Clear hw/sw statistics
829 	 */
830 	DMA_ZERO(rgep->dma_area_stats);
831 	bzero(&rgep->stats, sizeof (rge_stats_t));
832 
833 	/*
834 	 * Start processing and record new MAC state
835 	 */
836 	rge_reset(rgep);
837 	rge_start(rgep);
838 	rgep->rge_mac_state = RGE_MAC_STARTED;
839 	RGE_DEBUG(("rge_m_start($%p) done", arg));
840 
841 	mutex_exit(rgep->genlock);
842 
843 	return (0);
844 }
845 
846 /*
847  *	rge_m_unicst_set() -- set the physical network address
848  */
849 static int
850 rge_m_unicst(void *arg, const uint8_t *macaddr)
851 {
852 	rge_t *rgep = arg;		/* private device info	*/
853 
854 	/*
855 	 * Remember the new current address in the driver state
856 	 * Sync the chip's idea of the address too ...
857 	 */
858 	mutex_enter(rgep->genlock);
859 	bcopy(macaddr, rgep->netaddr, ETHERADDRL);
860 
861 	if (rgep->suspended) {
862 		mutex_exit(rgep->genlock);
863 		return (DDI_SUCCESS);
864 	}
865 
866 	rge_chip_sync(rgep, RGE_SET_MAC);
867 	mutex_exit(rgep->genlock);
868 
869 	return (0);
870 }
871 
872 /*
873  * Compute the index of the required bit in the multicast hash map.
874  * This must mirror the way the hardware actually does it!
875  */
876 static uint32_t
877 rge_hash_index(const uint8_t *mca)
878 {
879 	uint32_t crc = (uint32_t)RGE_HASH_CRC;
880 	uint32_t const POLY = RGE_HASH_POLY;
881 	uint32_t msb;
882 	int bytes;
883 	uchar_t currentbyte;
884 	uint32_t index;
885 	int bit;
886 
887 	for (bytes = 0; bytes < ETHERADDRL; bytes++) {
888 		currentbyte = mca[bytes];
889 		for (bit = 0; bit < 8; bit++) {
890 			msb = crc >> 31;
891 			crc <<= 1;
892 			if (msb ^ (currentbyte & 1))
893 				crc ^= POLY;
894 			currentbyte >>= 1;
895 		}
896 	}
897 	index = crc >> 26;
898 		/* the index value is between 0 and 63(0x3f) */
899 
900 	return (index);
901 }
902 
903 /*
904  *	rge_m_multicst_add() -- enable/disable a multicast address
905  */
906 static int
907 rge_m_multicst(void *arg, boolean_t add, const uint8_t *mca)
908 {
909 	rge_t *rgep = arg;		/* private device info	*/
910 	struct ether_addr *addr;
911 	uint32_t index;
912 	uint32_t reg;
913 	uint8_t *hashp;
914 
915 	mutex_enter(rgep->genlock);
916 	hashp = rgep->mcast_hash;
917 	addr = (struct ether_addr *)mca;
918 	/*
919 	 * Calculate the Multicast address hash index value
920 	 *	Normally, the position of MAR0-MAR7 is
921 	 *	MAR0: offset 0x08, ..., MAR7: offset 0x0F.
922 	 *
923 	 *	For pcie chipset, the position of MAR0-MAR7 is
924 	 *	different from others:
925 	 *	MAR0: offset 0x0F, ..., MAR7: offset 0x08.
926 	 */
927 	index = rge_hash_index(addr->ether_addr_octet);
928 	if (rgep->chipid.is_pcie)
929 		reg = (~(index / RGE_MCAST_NUM)) & 0x7;
930 	else
931 		reg = index / RGE_MCAST_NUM;
932 
933 	if (add) {
934 		if (rgep->mcast_refs[index]++) {
935 			mutex_exit(rgep->genlock);
936 			return (0);
937 		}
938 		hashp[reg] |= 1 << (index % RGE_MCAST_NUM);
939 	} else {
940 		if (--rgep->mcast_refs[index]) {
941 			mutex_exit(rgep->genlock);
942 			return (0);
943 		}
944 		hashp[reg] &= ~ (1 << (index % RGE_MCAST_NUM));
945 	}
946 
947 	if (rgep->suspended) {
948 		mutex_exit(rgep->genlock);
949 		return (DDI_SUCCESS);
950 	}
951 
952 	/*
953 	 * Set multicast register
954 	 */
955 	rge_chip_sync(rgep, RGE_SET_MUL);
956 
957 	mutex_exit(rgep->genlock);
958 	return (0);
959 }
960 
961 /*
962  * rge_m_promisc() -- set or reset promiscuous mode on the board
963  *
964  *	Program the hardware to enable/disable promiscuous and/or
965  *	receive-all-multicast modes.
966  */
967 static int
968 rge_m_promisc(void *arg, boolean_t on)
969 {
970 	rge_t *rgep = arg;
971 
972 	/*
973 	 * Store MAC layer specified mode and pass to chip layer to update h/w
974 	 */
975 	mutex_enter(rgep->genlock);
976 
977 	if (rgep->promisc == on) {
978 		mutex_exit(rgep->genlock);
979 		return (0);
980 	}
981 	rgep->promisc = on;
982 
983 	if (rgep->suspended) {
984 		mutex_exit(rgep->genlock);
985 		return (DDI_SUCCESS);
986 	}
987 
988 	rge_chip_sync(rgep, RGE_SET_PROMISC);
989 	RGE_DEBUG(("rge_m_promisc_set($%p) done", arg));
990 	mutex_exit(rgep->genlock);
991 	return (0);
992 }
993 
994 /*
995  * Loopback ioctl code
996  */
997 
998 static lb_property_t loopmodes[] = {
999 	{ normal,	"normal",	RGE_LOOP_NONE		},
1000 	{ internal,	"PHY",		RGE_LOOP_INTERNAL_PHY	},
1001 	{ internal,	"MAC",		RGE_LOOP_INTERNAL_MAC	}
1002 };
1003 
1004 static enum ioc_reply
1005 rge_set_loop_mode(rge_t *rgep, uint32_t mode)
1006 {
1007 	/*
1008 	 * If the mode isn't being changed, there's nothing to do ...
1009 	 */
1010 	if (mode == rgep->param_loop_mode)
1011 		return (IOC_ACK);
1012 
1013 	/*
1014 	 * Validate the requested mode and prepare a suitable message
1015 	 * to explain the link down/up cycle that the change will
1016 	 * probably induce ...
1017 	 */
1018 	switch (mode) {
1019 	default:
1020 		return (IOC_INVAL);
1021 
1022 	case RGE_LOOP_NONE:
1023 	case RGE_LOOP_INTERNAL_PHY:
1024 	case RGE_LOOP_INTERNAL_MAC:
1025 		break;
1026 	}
1027 
1028 	/*
1029 	 * All OK; tell the caller to reprogram
1030 	 * the PHY and/or MAC for the new mode ...
1031 	 */
1032 	rgep->param_loop_mode = mode;
1033 	return (IOC_RESTART_ACK);
1034 }
1035 
1036 static enum ioc_reply
1037 rge_loop_ioctl(rge_t *rgep, queue_t *wq, mblk_t *mp, struct iocblk *iocp)
1038 {
1039 	lb_info_sz_t *lbsp;
1040 	lb_property_t *lbpp;
1041 	uint32_t *lbmp;
1042 	int cmd;
1043 
1044 	_NOTE(ARGUNUSED(wq))
1045 
1046 	/*
1047 	 * Validate format of ioctl
1048 	 */
1049 	if (mp->b_cont == NULL)
1050 		return (IOC_INVAL);
1051 
1052 	cmd = iocp->ioc_cmd;
1053 	switch (cmd) {
1054 	default:
1055 		/* NOTREACHED */
1056 		rge_error(rgep, "rge_loop_ioctl: invalid cmd 0x%x", cmd);
1057 		return (IOC_INVAL);
1058 
1059 	case LB_GET_INFO_SIZE:
1060 		if (iocp->ioc_count != sizeof (lb_info_sz_t))
1061 			return (IOC_INVAL);
1062 		lbsp = (lb_info_sz_t *)mp->b_cont->b_rptr;
1063 		*lbsp = sizeof (loopmodes);
1064 		return (IOC_REPLY);
1065 
1066 	case LB_GET_INFO:
1067 		if (iocp->ioc_count != sizeof (loopmodes))
1068 			return (IOC_INVAL);
1069 		lbpp = (lb_property_t *)mp->b_cont->b_rptr;
1070 		bcopy(loopmodes, lbpp, sizeof (loopmodes));
1071 		return (IOC_REPLY);
1072 
1073 	case LB_GET_MODE:
1074 		if (iocp->ioc_count != sizeof (uint32_t))
1075 			return (IOC_INVAL);
1076 		lbmp = (uint32_t *)mp->b_cont->b_rptr;
1077 		*lbmp = rgep->param_loop_mode;
1078 		return (IOC_REPLY);
1079 
1080 	case LB_SET_MODE:
1081 		if (iocp->ioc_count != sizeof (uint32_t))
1082 			return (IOC_INVAL);
1083 		lbmp = (uint32_t *)mp->b_cont->b_rptr;
1084 		return (rge_set_loop_mode(rgep, *lbmp));
1085 	}
1086 }
1087 
1088 /*
1089  * Specific rge IOCTLs, the MAC layer handles the generic ones.
1090  */
1091 static void
1092 rge_m_ioctl(void *arg, queue_t *wq, mblk_t *mp)
1093 {
1094 	rge_t *rgep = arg;
1095 	struct iocblk *iocp;
1096 	enum ioc_reply status;
1097 	boolean_t need_privilege;
1098 	int err;
1099 	int cmd;
1100 
1101 	/*
1102 	 * If suspended, we might actually be able to do some of
1103 	 * these ioctls, but it is harder to make sure they occur
1104 	 * without actually putting the hardware in an undesireable
1105 	 * state.  So just NAK it.
1106 	 */
1107 	mutex_enter(rgep->genlock);
1108 	if (rgep->suspended) {
1109 		miocnak(wq, mp, 0, EINVAL);
1110 		mutex_exit(rgep->genlock);
1111 		return;
1112 	}
1113 	mutex_exit(rgep->genlock);
1114 
1115 	/*
1116 	 * Validate the command before bothering with the mutex ...
1117 	 */
1118 	iocp = (struct iocblk *)mp->b_rptr;
1119 	iocp->ioc_error = 0;
1120 	need_privilege = B_TRUE;
1121 	cmd = iocp->ioc_cmd;
1122 	switch (cmd) {
1123 	default:
1124 		miocnak(wq, mp, 0, EINVAL);
1125 		return;
1126 
1127 	case RGE_MII_READ:
1128 	case RGE_MII_WRITE:
1129 	case RGE_DIAG:
1130 	case RGE_PEEK:
1131 	case RGE_POKE:
1132 	case RGE_PHY_RESET:
1133 	case RGE_SOFT_RESET:
1134 	case RGE_HARD_RESET:
1135 		break;
1136 
1137 	case LB_GET_INFO_SIZE:
1138 	case LB_GET_INFO:
1139 	case LB_GET_MODE:
1140 		need_privilege = B_FALSE;
1141 		/* FALLTHRU */
1142 	case LB_SET_MODE:
1143 		break;
1144 
1145 	case ND_GET:
1146 		need_privilege = B_FALSE;
1147 		/* FALLTHRU */
1148 	case ND_SET:
1149 		break;
1150 	}
1151 
1152 	if (need_privilege) {
1153 		/*
1154 		 * Check for specific net_config privilege
1155 		 */
1156 		err = secpolicy_net_config(iocp->ioc_cr, B_FALSE);
1157 		if (err != 0) {
1158 			miocnak(wq, mp, 0, err);
1159 			return;
1160 		}
1161 	}
1162 
1163 	mutex_enter(rgep->genlock);
1164 
1165 	switch (cmd) {
1166 	default:
1167 		_NOTE(NOTREACHED)
1168 		status = IOC_INVAL;
1169 		break;
1170 
1171 	case RGE_MII_READ:
1172 	case RGE_MII_WRITE:
1173 	case RGE_DIAG:
1174 	case RGE_PEEK:
1175 	case RGE_POKE:
1176 	case RGE_PHY_RESET:
1177 	case RGE_SOFT_RESET:
1178 	case RGE_HARD_RESET:
1179 		status = rge_chip_ioctl(rgep, wq, mp, iocp);
1180 		break;
1181 
1182 	case LB_GET_INFO_SIZE:
1183 	case LB_GET_INFO:
1184 	case LB_GET_MODE:
1185 	case LB_SET_MODE:
1186 		status = rge_loop_ioctl(rgep, wq, mp, iocp);
1187 		break;
1188 
1189 	case ND_GET:
1190 	case ND_SET:
1191 		status = rge_nd_ioctl(rgep, wq, mp, iocp);
1192 		break;
1193 	}
1194 
1195 	/*
1196 	 * Do we need to reprogram the PHY and/or the MAC?
1197 	 * Do it now, while we still have the mutex.
1198 	 *
1199 	 * Note: update the PHY first, 'cos it controls the
1200 	 * speed/duplex parameters that the MAC code uses.
1201 	 */
1202 	switch (status) {
1203 	case IOC_RESTART_REPLY:
1204 	case IOC_RESTART_ACK:
1205 		rge_phy_update(rgep);
1206 		break;
1207 	}
1208 
1209 	mutex_exit(rgep->genlock);
1210 
1211 	/*
1212 	 * Finally, decide how to reply
1213 	 */
1214 	switch (status) {
1215 	default:
1216 	case IOC_INVAL:
1217 		/*
1218 		 * Error, reply with a NAK and EINVAL or the specified error
1219 		 */
1220 		miocnak(wq, mp, 0, iocp->ioc_error == 0 ?
1221 		    EINVAL : iocp->ioc_error);
1222 		break;
1223 
1224 	case IOC_DONE:
1225 		/*
1226 		 * OK, reply already sent
1227 		 */
1228 		break;
1229 
1230 	case IOC_RESTART_ACK:
1231 	case IOC_ACK:
1232 		/*
1233 		 * OK, reply with an ACK
1234 		 */
1235 		miocack(wq, mp, 0, 0);
1236 		break;
1237 
1238 	case IOC_RESTART_REPLY:
1239 	case IOC_REPLY:
1240 		/*
1241 		 * OK, send prepared reply as ACK or NAK
1242 		 */
1243 		mp->b_datap->db_type = iocp->ioc_error == 0 ?
1244 		    M_IOCACK : M_IOCNAK;
1245 		qreply(wq, mp);
1246 		break;
1247 	}
1248 }
1249 
1250 /* ARGSUSED */
1251 static boolean_t
1252 rge_m_getcapab(void *arg, mac_capab_t cap, void *cap_data)
1253 {
1254 	rge_t *rgep = arg;
1255 
1256 	switch (cap) {
1257 	case MAC_CAPAB_HCKSUM: {
1258 		uint32_t *hcksum_txflags = cap_data;
1259 		switch (rgep->chipid.mac_ver) {
1260 		case MAC_VER_8169:
1261 		case MAC_VER_8169S_D:
1262 		case MAC_VER_8169S_E:
1263 		case MAC_VER_8169SB:
1264 		case MAC_VER_8169SC:
1265 		case MAC_VER_8168:
1266 		case MAC_VER_8168B_B:
1267 		case MAC_VER_8168B_C:
1268 		case MAC_VER_8101E:
1269 			*hcksum_txflags = HCKSUM_INET_FULL_V4 |
1270 			    HCKSUM_IPHDRCKSUM;
1271 			break;
1272 		case MAC_VER_8168B_D:
1273 		case MAC_VER_8101E_B:
1274 		case MAC_VER_8101E_C:
1275 		default:
1276 			*hcksum_txflags = 0;
1277 			break;
1278 		}
1279 		break;
1280 	}
1281 	default:
1282 		return (B_FALSE);
1283 	}
1284 	return (B_TRUE);
1285 }
1286 
1287 /*
1288  * ============ Init MSI/Fixed Interrupt routines ==============
1289  */
1290 
1291 /*
1292  * rge_add_intrs:
1293  *
1294  * Register FIXED or MSI interrupts.
1295  */
1296 static int
1297 rge_add_intrs(rge_t *rgep, int intr_type)
1298 {
1299 	dev_info_t *dip = rgep->devinfo;
1300 	int avail;
1301 	int actual;
1302 	int intr_size;
1303 	int count;
1304 	int i, j;
1305 	int ret;
1306 
1307 	/* Get number of interrupts */
1308 	ret = ddi_intr_get_nintrs(dip, intr_type, &count);
1309 	if ((ret != DDI_SUCCESS) || (count == 0)) {
1310 		rge_error(rgep, "ddi_intr_get_nintrs() failure, ret: %d, "
1311 		    "count: %d", ret, count);
1312 		return (DDI_FAILURE);
1313 	}
1314 
1315 	/* Get number of available interrupts */
1316 	ret = ddi_intr_get_navail(dip, intr_type, &avail);
1317 	if ((ret != DDI_SUCCESS) || (avail == 0)) {
1318 		rge_error(rgep, "ddi_intr_get_navail() failure, "
1319 		    "ret: %d, avail: %d\n", ret, avail);
1320 		return (DDI_FAILURE);
1321 	}
1322 
1323 	/* Allocate an array of interrupt handles */
1324 	intr_size = count * sizeof (ddi_intr_handle_t);
1325 	rgep->htable = kmem_alloc(intr_size, KM_SLEEP);
1326 	rgep->intr_rqst = count;
1327 
1328 	/* Call ddi_intr_alloc() */
1329 	ret = ddi_intr_alloc(dip, rgep->htable, intr_type, 0,
1330 	    count, &actual, DDI_INTR_ALLOC_NORMAL);
1331 	if (ret != DDI_SUCCESS || actual == 0) {
1332 		rge_error(rgep, "ddi_intr_alloc() failed %d\n", ret);
1333 		kmem_free(rgep->htable, intr_size);
1334 		return (DDI_FAILURE);
1335 	}
1336 	if (actual < count) {
1337 		rge_log(rgep, "ddi_intr_alloc() Requested: %d, Received: %d\n",
1338 		    count, actual);
1339 	}
1340 	rgep->intr_cnt = actual;
1341 
1342 	/*
1343 	 * Get priority for first msi, assume remaining are all the same
1344 	 */
1345 	if ((ret = ddi_intr_get_pri(rgep->htable[0], &rgep->intr_pri)) !=
1346 	    DDI_SUCCESS) {
1347 		rge_error(rgep, "ddi_intr_get_pri() failed %d\n", ret);
1348 		/* Free already allocated intr */
1349 		for (i = 0; i < actual; i++) {
1350 			(void) ddi_intr_free(rgep->htable[i]);
1351 		}
1352 		kmem_free(rgep->htable, intr_size);
1353 		return (DDI_FAILURE);
1354 	}
1355 
1356 	/* Test for high level mutex */
1357 	if (rgep->intr_pri >= ddi_intr_get_hilevel_pri()) {
1358 		rge_error(rgep, "rge_add_intrs:"
1359 		    "Hi level interrupt not supported");
1360 		for (i = 0; i < actual; i++)
1361 			(void) ddi_intr_free(rgep->htable[i]);
1362 		kmem_free(rgep->htable, intr_size);
1363 		return (DDI_FAILURE);
1364 	}
1365 
1366 	/* Call ddi_intr_add_handler() */
1367 	for (i = 0; i < actual; i++) {
1368 		if ((ret = ddi_intr_add_handler(rgep->htable[i], rge_intr,
1369 		    (caddr_t)rgep, (caddr_t)(uintptr_t)i)) != DDI_SUCCESS) {
1370 			rge_error(rgep, "ddi_intr_add_handler() "
1371 			    "failed %d\n", ret);
1372 			/* Remove already added intr */
1373 			for (j = 0; j < i; j++)
1374 				(void) ddi_intr_remove_handler(rgep->htable[j]);
1375 			/* Free already allocated intr */
1376 			for (i = 0; i < actual; i++) {
1377 				(void) ddi_intr_free(rgep->htable[i]);
1378 			}
1379 			kmem_free(rgep->htable, intr_size);
1380 			return (DDI_FAILURE);
1381 		}
1382 	}
1383 
1384 	if ((ret = ddi_intr_get_cap(rgep->htable[0], &rgep->intr_cap))
1385 	    != DDI_SUCCESS) {
1386 		rge_error(rgep, "ddi_intr_get_cap() failed %d\n", ret);
1387 		for (i = 0; i < actual; i++) {
1388 			(void) ddi_intr_remove_handler(rgep->htable[i]);
1389 			(void) ddi_intr_free(rgep->htable[i]);
1390 		}
1391 		kmem_free(rgep->htable, intr_size);
1392 		return (DDI_FAILURE);
1393 	}
1394 
1395 	return (DDI_SUCCESS);
1396 }
1397 
1398 /*
1399  * rge_rem_intrs:
1400  *
1401  * Unregister FIXED or MSI interrupts
1402  */
1403 static void
1404 rge_rem_intrs(rge_t *rgep)
1405 {
1406 	int i;
1407 
1408 	/* Disable all interrupts */
1409 	if (rgep->intr_cap & DDI_INTR_FLAG_BLOCK) {
1410 		/* Call ddi_intr_block_disable() */
1411 		(void) ddi_intr_block_disable(rgep->htable, rgep->intr_cnt);
1412 	} else {
1413 		for (i = 0; i < rgep->intr_cnt; i++) {
1414 			(void) ddi_intr_disable(rgep->htable[i]);
1415 		}
1416 	}
1417 
1418 	/* Call ddi_intr_remove_handler() */
1419 	for (i = 0; i < rgep->intr_cnt; i++) {
1420 		(void) ddi_intr_remove_handler(rgep->htable[i]);
1421 		(void) ddi_intr_free(rgep->htable[i]);
1422 	}
1423 
1424 	kmem_free(rgep->htable, rgep->intr_rqst * sizeof (ddi_intr_handle_t));
1425 }
1426 
1427 /*
1428  * ========== Per-instance setup/teardown code ==========
1429  */
1430 
1431 #undef	RGE_DBG
1432 #define	RGE_DBG		RGE_DBG_INIT	/* debug flag for this code	*/
1433 
1434 static void
1435 rge_unattach(rge_t *rgep)
1436 {
1437 	/*
1438 	 * Flag that no more activity may be initiated
1439 	 */
1440 	rgep->progress &= ~PROGRESS_READY;
1441 	rgep->rge_mac_state = RGE_MAC_UNATTACH;
1442 
1443 	/*
1444 	 * Quiesce the PHY and MAC (leave it reset but still powered).
1445 	 * Clean up and free all RGE data structures
1446 	 */
1447 	if (rgep->periodic_id != NULL) {
1448 		ddi_periodic_delete(rgep->periodic_id);
1449 		rgep->periodic_id = NULL;
1450 	}
1451 
1452 	if (rgep->progress & PROGRESS_KSTATS)
1453 		rge_fini_kstats(rgep);
1454 
1455 	if (rgep->progress & PROGRESS_PHY)
1456 		(void) rge_phy_reset(rgep);
1457 
1458 	if (rgep->progress & PROGRESS_INIT) {
1459 		mutex_enter(rgep->genlock);
1460 		(void) rge_chip_reset(rgep);
1461 		mutex_exit(rgep->genlock);
1462 		rge_fini_rings(rgep);
1463 	}
1464 
1465 	if (rgep->progress & PROGRESS_INTR) {
1466 		rge_rem_intrs(rgep);
1467 		mutex_destroy(rgep->rc_lock);
1468 		mutex_destroy(rgep->rx_lock);
1469 		mutex_destroy(rgep->tc_lock);
1470 		mutex_destroy(rgep->tx_lock);
1471 		rw_destroy(rgep->errlock);
1472 		mutex_destroy(rgep->genlock);
1473 	}
1474 
1475 	if (rgep->progress & PROGRESS_FACTOTUM)
1476 		(void) ddi_intr_remove_softint(rgep->factotum_hdl);
1477 
1478 	if (rgep->progress & PROGRESS_RESCHED)
1479 		(void) ddi_intr_remove_softint(rgep->resched_hdl);
1480 
1481 	if (rgep->progress & PROGRESS_NDD)
1482 		rge_nd_cleanup(rgep);
1483 
1484 	rge_free_bufs(rgep);
1485 
1486 	if (rgep->progress & PROGRESS_REGS)
1487 		ddi_regs_map_free(&rgep->io_handle);
1488 
1489 	if (rgep->progress & PROGRESS_CFG)
1490 		pci_config_teardown(&rgep->cfg_handle);
1491 
1492 	ddi_remove_minor_node(rgep->devinfo, NULL);
1493 	kmem_free(rgep, sizeof (*rgep));
1494 }
1495 
1496 static int
1497 rge_resume(dev_info_t *devinfo)
1498 {
1499 	rge_t *rgep;			/* Our private data	*/
1500 	chip_id_t *cidp;
1501 	chip_id_t chipid;
1502 
1503 	rgep = ddi_get_driver_private(devinfo);
1504 
1505 	/*
1506 	 * If there are state inconsistancies, this is bad.  Returning
1507 	 * DDI_FAILURE here will eventually cause the machine to panic,
1508 	 * so it is best done here so that there is a possibility of
1509 	 * debugging the problem.
1510 	 */
1511 	if (rgep == NULL)
1512 		cmn_err(CE_PANIC,
1513 		    "rge: ngep returned from ddi_get_driver_private was NULL");
1514 
1515 	/*
1516 	 * Refuse to resume if the data structures aren't consistent
1517 	 */
1518 	if (rgep->devinfo != devinfo)
1519 		cmn_err(CE_PANIC,
1520 		    "rge: passed devinfo not the same as saved devinfo");
1521 
1522 	/*
1523 	 * Read chip ID & set up config space command register(s)
1524 	 * Refuse to resume if the chip has changed its identity!
1525 	 */
1526 	cidp = &rgep->chipid;
1527 	rge_chip_cfg_init(rgep, &chipid);
1528 	if (chipid.vendor != cidp->vendor)
1529 		return (DDI_FAILURE);
1530 	if (chipid.device != cidp->device)
1531 		return (DDI_FAILURE);
1532 	if (chipid.revision != cidp->revision)
1533 		return (DDI_FAILURE);
1534 
1535 	mutex_enter(rgep->genlock);
1536 
1537 	/*
1538 	 * Only in one case, this conditional branch can be executed: the port
1539 	 * hasn't been plumbed.
1540 	 */
1541 	if (rgep->suspended == B_FALSE) {
1542 		mutex_exit(rgep->genlock);
1543 		return (DDI_SUCCESS);
1544 	}
1545 	rgep->rge_mac_state = RGE_MAC_STARTED;
1546 	/*
1547 	 * All OK, reinitialise h/w & kick off NEMO scheduling
1548 	 */
1549 	rge_restart(rgep);
1550 	rgep->suspended = B_FALSE;
1551 
1552 	mutex_exit(rgep->genlock);
1553 
1554 	return (DDI_SUCCESS);
1555 }
1556 
1557 
1558 /*
1559  * attach(9E) -- Attach a device to the system
1560  *
1561  * Called once for each board successfully probed.
1562  */
1563 static int
1564 rge_attach(dev_info_t *devinfo, ddi_attach_cmd_t cmd)
1565 {
1566 	rge_t *rgep;			/* Our private data	*/
1567 	mac_register_t *macp;
1568 	chip_id_t *cidp;
1569 	int intr_types;
1570 	caddr_t regs;
1571 	int instance;
1572 	int i;
1573 	int err;
1574 
1575 	/*
1576 	 * we don't support high level interrupts in the driver
1577 	 */
1578 	if (ddi_intr_hilevel(devinfo, 0) != 0) {
1579 		cmn_err(CE_WARN,
1580 		    "rge_attach -- unsupported high level interrupt");
1581 		return (DDI_FAILURE);
1582 	}
1583 
1584 	instance = ddi_get_instance(devinfo);
1585 	RGE_GTRACE(("rge_attach($%p, %d) instance %d",
1586 	    (void *)devinfo, cmd, instance));
1587 	RGE_BRKPT(NULL, "rge_attach");
1588 
1589 	switch (cmd) {
1590 	default:
1591 		return (DDI_FAILURE);
1592 
1593 	case DDI_RESUME:
1594 		return (rge_resume(devinfo));
1595 
1596 	case DDI_ATTACH:
1597 		break;
1598 	}
1599 
1600 	rgep = kmem_zalloc(sizeof (*rgep), KM_SLEEP);
1601 	ddi_set_driver_private(devinfo, rgep);
1602 	rgep->devinfo = devinfo;
1603 
1604 	/*
1605 	 * Initialize more fields in RGE private data
1606 	 */
1607 	rgep->rge_mac_state = RGE_MAC_ATTACH;
1608 	rgep->debug = ddi_prop_get_int(DDI_DEV_T_ANY, devinfo,
1609 	    DDI_PROP_DONTPASS, debug_propname, rge_debug);
1610 	rgep->default_mtu = ddi_prop_get_int(DDI_DEV_T_ANY, devinfo,
1611 	    DDI_PROP_DONTPASS, mtu_propname, ETHERMTU);
1612 	rgep->msi_enable = ddi_prop_get_int(DDI_DEV_T_ANY, devinfo,
1613 	    DDI_PROP_DONTPASS, msi_propname, B_TRUE);
1614 	(void) snprintf(rgep->ifname, sizeof (rgep->ifname), "%s%d",
1615 	    RGE_DRIVER_NAME, instance);
1616 
1617 	/*
1618 	 * Map config space registers
1619 	 * Read chip ID & set up config space command register(s)
1620 	 *
1621 	 * Note: this leaves the chip accessible by Memory Space
1622 	 * accesses, but with interrupts and Bus Mastering off.
1623 	 * This should ensure that nothing untoward will happen
1624 	 * if it has been left active by the (net-)bootloader.
1625 	 * We'll re-enable Bus Mastering once we've reset the chip,
1626 	 * and allow interrupts only when everything else is set up.
1627 	 */
1628 	err = pci_config_setup(devinfo, &rgep->cfg_handle);
1629 	if (err != DDI_SUCCESS) {
1630 		rge_problem(rgep, "pci_config_setup() failed");
1631 		goto attach_fail;
1632 	}
1633 	rgep->progress |= PROGRESS_CFG;
1634 	cidp = &rgep->chipid;
1635 	bzero(cidp, sizeof (*cidp));
1636 	rge_chip_cfg_init(rgep, cidp);
1637 
1638 	/*
1639 	 * Map operating registers
1640 	 */
1641 	err = ddi_regs_map_setup(devinfo, 1, &regs,
1642 	    0, 0, &rge_reg_accattr, &rgep->io_handle);
1643 	if (err != DDI_SUCCESS) {
1644 		rge_problem(rgep, "ddi_regs_map_setup() failed");
1645 		goto attach_fail;
1646 	}
1647 	rgep->io_regs = regs;
1648 	rgep->progress |= PROGRESS_REGS;
1649 
1650 	/*
1651 	 * Characterise the device, so we know its requirements.
1652 	 * Then allocate the appropriate TX and RX descriptors & buffers.
1653 	 */
1654 	rge_chip_ident(rgep);
1655 	err = rge_alloc_bufs(rgep);
1656 	if (err != DDI_SUCCESS) {
1657 		rge_problem(rgep, "DMA buffer allocation failed");
1658 		goto attach_fail;
1659 	}
1660 
1661 	/*
1662 	 * Register NDD-tweakable parameters
1663 	 */
1664 	if (rge_nd_init(rgep)) {
1665 		rge_problem(rgep, "rge_nd_init() failed");
1666 		goto attach_fail;
1667 	}
1668 	rgep->progress |= PROGRESS_NDD;
1669 
1670 	/*
1671 	 * Add the softint handlers:
1672 	 *
1673 	 * Both of these handlers are used to avoid restrictions on the
1674 	 * context and/or mutexes required for some operations.  In
1675 	 * particular, the hardware interrupt handler and its subfunctions
1676 	 * can detect a number of conditions that we don't want to handle
1677 	 * in that context or with that set of mutexes held.  So, these
1678 	 * softints are triggered instead:
1679 	 *
1680 	 * the <resched> softint is triggered if if we have previously
1681 	 * had to refuse to send a packet because of resource shortage
1682 	 * (we've run out of transmit buffers), but the send completion
1683 	 * interrupt handler has now detected that more buffers have
1684 	 * become available.
1685 	 *
1686 	 * the <factotum> is triggered if the h/w interrupt handler
1687 	 * sees the <link state changed> or <error> bits in the status
1688 	 * block.  It's also triggered periodically to poll the link
1689 	 * state, just in case we aren't getting link status change
1690 	 * interrupts ...
1691 	 */
1692 	err = ddi_intr_add_softint(devinfo, &rgep->resched_hdl,
1693 	    DDI_INTR_SOFTPRI_MIN, rge_reschedule, (caddr_t)rgep);
1694 	if (err != DDI_SUCCESS) {
1695 		rge_problem(rgep, "ddi_intr_add_softint() failed");
1696 		goto attach_fail;
1697 	}
1698 	rgep->progress |= PROGRESS_RESCHED;
1699 	err = ddi_intr_add_softint(devinfo, &rgep->factotum_hdl,
1700 	    DDI_INTR_SOFTPRI_MIN, rge_chip_factotum, (caddr_t)rgep);
1701 	if (err != DDI_SUCCESS) {
1702 		rge_problem(rgep, "ddi_intr_add_softint() failed");
1703 		goto attach_fail;
1704 	}
1705 	rgep->progress |= PROGRESS_FACTOTUM;
1706 
1707 	/*
1708 	 * Get supported interrupt types
1709 	 */
1710 	if (ddi_intr_get_supported_types(devinfo, &intr_types)
1711 	    != DDI_SUCCESS) {
1712 		rge_error(rgep, "ddi_intr_get_supported_types failed\n");
1713 		goto attach_fail;
1714 	}
1715 
1716 	/*
1717 	 * Add the h/w interrupt handler and initialise mutexes
1718 	 * RTL8101E is observed to have MSI invalidation issue after S/R.
1719 	 * So the FIXED interrupt is used instead.
1720 	 */
1721 	if (rgep->chipid.mac_ver == MAC_VER_8101E)
1722 		rgep->msi_enable = B_FALSE;
1723 	if ((intr_types & DDI_INTR_TYPE_MSI) && rgep->msi_enable) {
1724 		if (rge_add_intrs(rgep, DDI_INTR_TYPE_MSI) != DDI_SUCCESS) {
1725 			rge_error(rgep, "MSI registration failed, "
1726 			    "trying FIXED interrupt type\n");
1727 		} else {
1728 			rge_log(rgep, "Using MSI interrupt type\n");
1729 			rgep->intr_type = DDI_INTR_TYPE_MSI;
1730 			rgep->progress |= PROGRESS_INTR;
1731 		}
1732 	}
1733 	if (!(rgep->progress & PROGRESS_INTR) &&
1734 	    (intr_types & DDI_INTR_TYPE_FIXED)) {
1735 		if (rge_add_intrs(rgep, DDI_INTR_TYPE_FIXED) != DDI_SUCCESS) {
1736 			rge_error(rgep, "FIXED interrupt "
1737 			    "registration failed\n");
1738 			goto attach_fail;
1739 		}
1740 		rge_log(rgep, "Using FIXED interrupt type\n");
1741 		rgep->intr_type = DDI_INTR_TYPE_FIXED;
1742 		rgep->progress |= PROGRESS_INTR;
1743 	}
1744 	if (!(rgep->progress & PROGRESS_INTR)) {
1745 		rge_error(rgep, "No interrupts registered\n");
1746 		goto attach_fail;
1747 	}
1748 	mutex_init(rgep->genlock, NULL, MUTEX_DRIVER,
1749 	    DDI_INTR_PRI(rgep->intr_pri));
1750 	rw_init(rgep->errlock, NULL, RW_DRIVER,
1751 	    DDI_INTR_PRI(rgep->intr_pri));
1752 	mutex_init(rgep->tx_lock, NULL, MUTEX_DRIVER,
1753 	    DDI_INTR_PRI(rgep->intr_pri));
1754 	mutex_init(rgep->tc_lock, NULL, MUTEX_DRIVER,
1755 	    DDI_INTR_PRI(rgep->intr_pri));
1756 	mutex_init(rgep->rx_lock, NULL, MUTEX_DRIVER,
1757 	    DDI_INTR_PRI(rgep->intr_pri));
1758 	mutex_init(rgep->rc_lock, NULL, MUTEX_DRIVER,
1759 	    DDI_INTR_PRI(rgep->intr_pri));
1760 
1761 	/*
1762 	 * Initialize rings
1763 	 */
1764 	err = rge_init_rings(rgep);
1765 	if (err != DDI_SUCCESS) {
1766 		rge_problem(rgep, "rge_init_rings() failed");
1767 		goto attach_fail;
1768 	}
1769 	rgep->progress |= PROGRESS_INIT;
1770 
1771 	/*
1772 	 * Now that mutex locks are initialized, enable interrupts.
1773 	 */
1774 	if (rgep->intr_cap & DDI_INTR_FLAG_BLOCK) {
1775 		/* Call ddi_intr_block_enable() for MSI interrupts */
1776 		(void) ddi_intr_block_enable(rgep->htable, rgep->intr_cnt);
1777 	} else {
1778 		/* Call ddi_intr_enable for MSI or FIXED interrupts */
1779 		for (i = 0; i < rgep->intr_cnt; i++) {
1780 			(void) ddi_intr_enable(rgep->htable[i]);
1781 		}
1782 	}
1783 
1784 	/*
1785 	 * Initialise link state variables
1786 	 * Stop, reset & reinitialise the chip.
1787 	 * Initialise the (internal) PHY.
1788 	 */
1789 	rgep->param_link_up = LINK_STATE_UNKNOWN;
1790 
1791 	/*
1792 	 * Reset chip & rings to initial state; also reset address
1793 	 * filtering, promiscuity, loopback mode.
1794 	 */
1795 	mutex_enter(rgep->genlock);
1796 	(void) rge_chip_reset(rgep);
1797 	rge_chip_sync(rgep, RGE_GET_MAC);
1798 	bzero(rgep->mcast_hash, sizeof (rgep->mcast_hash));
1799 	bzero(rgep->mcast_refs, sizeof (rgep->mcast_refs));
1800 	rgep->promisc = B_FALSE;
1801 	rgep->param_loop_mode = RGE_LOOP_NONE;
1802 	mutex_exit(rgep->genlock);
1803 	rge_phy_init(rgep);
1804 	rgep->progress |= PROGRESS_PHY;
1805 
1806 	/*
1807 	 * Create & initialise named kstats
1808 	 */
1809 	rge_init_kstats(rgep, instance);
1810 	rgep->progress |= PROGRESS_KSTATS;
1811 
1812 	if ((macp = mac_alloc(MAC_VERSION)) == NULL)
1813 		goto attach_fail;
1814 	macp->m_type_ident = MAC_PLUGIN_IDENT_ETHER;
1815 	macp->m_driver = rgep;
1816 	macp->m_dip = devinfo;
1817 	macp->m_src_addr = rgep->netaddr;
1818 	macp->m_callbacks = &rge_m_callbacks;
1819 	macp->m_min_sdu = 0;
1820 	macp->m_max_sdu = rgep->default_mtu;
1821 	macp->m_margin = VLAN_TAGSZ;
1822 
1823 	/*
1824 	 * Finally, we're ready to register ourselves with the MAC layer
1825 	 * interface; if this succeeds, we're all ready to start()
1826 	 */
1827 	err = mac_register(macp, &rgep->mh);
1828 	mac_free(macp);
1829 	if (err != 0)
1830 		goto attach_fail;
1831 
1832 	/*
1833 	 * Register a periodical handler.
1834 	 * reg_chip_cyclic() is invoked in kernel context.
1835 	 */
1836 	rgep->periodic_id = ddi_periodic_add(rge_chip_cyclic, rgep,
1837 	    RGE_CYCLIC_PERIOD, DDI_IPL_0);
1838 
1839 	rgep->progress |= PROGRESS_READY;
1840 	return (DDI_SUCCESS);
1841 
1842 attach_fail:
1843 	rge_unattach(rgep);
1844 	return (DDI_FAILURE);
1845 }
1846 
1847 /*
1848  *	rge_suspend() -- suspend transmit/receive for powerdown
1849  */
1850 static int
1851 rge_suspend(rge_t *rgep)
1852 {
1853 	/*
1854 	 * Stop processing and idle (powerdown) the PHY ...
1855 	 */
1856 	mutex_enter(rgep->genlock);
1857 	rw_enter(rgep->errlock, RW_WRITER);
1858 
1859 	if (rgep->rge_mac_state != RGE_MAC_STARTED) {
1860 		rw_exit(rgep->errlock);
1861 		mutex_exit(rgep->genlock);
1862 		return (DDI_SUCCESS);
1863 	}
1864 
1865 	rgep->suspended = B_TRUE;
1866 	rge_stop(rgep);
1867 	rgep->rge_mac_state = RGE_MAC_STOPPED;
1868 
1869 	rw_exit(rgep->errlock);
1870 	mutex_exit(rgep->genlock);
1871 
1872 	return (DDI_SUCCESS);
1873 }
1874 
1875 /*
1876  * quiesce(9E) entry point.
1877  *
1878  * This function is called when the system is single-threaded at high
1879  * PIL with preemption disabled. Therefore, this function must not be
1880  * blocked.
1881  *
1882  * This function returns DDI_SUCCESS on success, or DDI_FAILURE on failure.
1883  * DDI_FAILURE indicates an error condition and should almost never happen.
1884  */
1885 static int
1886 rge_quiesce(dev_info_t *devinfo)
1887 {
1888 	rge_t *rgep = ddi_get_driver_private(devinfo);
1889 
1890 	if (rgep == NULL)
1891 		return (DDI_FAILURE);
1892 
1893 	/*
1894 	 * Turn off debugging
1895 	 */
1896 	rge_debug = 0;
1897 	rgep->debug = 0;
1898 
1899 	/* Stop the chip */
1900 	rge_chip_stop(rgep, B_FALSE);
1901 
1902 	return (DDI_SUCCESS);
1903 }
1904 
1905 /*
1906  * detach(9E) -- Detach a device from the system
1907  */
1908 static int
1909 rge_detach(dev_info_t *devinfo, ddi_detach_cmd_t cmd)
1910 {
1911 	rge_t *rgep;
1912 
1913 	RGE_GTRACE(("rge_detach($%p, %d)", (void *)devinfo, cmd));
1914 
1915 	rgep = ddi_get_driver_private(devinfo);
1916 
1917 	switch (cmd) {
1918 	default:
1919 		return (DDI_FAILURE);
1920 
1921 	case DDI_SUSPEND:
1922 		return (rge_suspend(rgep));
1923 
1924 	case DDI_DETACH:
1925 		break;
1926 	}
1927 
1928 	/*
1929 	 * If there is any posted buffer, the driver should reject to be
1930 	 * detached. Need notice upper layer to release them.
1931 	 */
1932 	if (!(rgep->chip_flags & CHIP_FLAG_FORCE_BCOPY) &&
1933 	    rgep->rx_free != RGE_BUF_SLOTS)
1934 		return (DDI_FAILURE);
1935 
1936 	/*
1937 	 * Unregister from the MAC layer subsystem.  This can fail, in
1938 	 * particular if there are DLPI style-2 streams still open -
1939 	 * in which case we just return failure without shutting
1940 	 * down chip operations.
1941 	 */
1942 	if (mac_unregister(rgep->mh) != 0)
1943 		return (DDI_FAILURE);
1944 
1945 	/*
1946 	 * All activity stopped, so we can clean up & exit
1947 	 */
1948 	rge_unattach(rgep);
1949 	return (DDI_SUCCESS);
1950 }
1951 
1952 
1953 /*
1954  * ========== Module Loading Data & Entry Points ==========
1955  */
1956 
1957 #undef	RGE_DBG
1958 #define	RGE_DBG		RGE_DBG_INIT	/* debug flag for this code	*/
1959 DDI_DEFINE_STREAM_OPS(rge_dev_ops, nulldev, nulldev, rge_attach, rge_detach,
1960     nodev, NULL, D_MP, NULL, rge_quiesce);
1961 
1962 static struct modldrv rge_modldrv = {
1963 	&mod_driverops,		/* Type of module.  This one is a driver */
1964 	rge_ident,		/* short description */
1965 	&rge_dev_ops		/* driver specific ops */
1966 };
1967 
1968 static struct modlinkage modlinkage = {
1969 	MODREV_1, (void *)&rge_modldrv, NULL
1970 };
1971 
1972 
1973 int
1974 _info(struct modinfo *modinfop)
1975 {
1976 	return (mod_info(&modlinkage, modinfop));
1977 }
1978 
1979 int
1980 _init(void)
1981 {
1982 	int status;
1983 
1984 	mac_init_ops(&rge_dev_ops, "rge");
1985 	status = mod_install(&modlinkage);
1986 	if (status == DDI_SUCCESS)
1987 		mutex_init(rge_log_mutex, NULL, MUTEX_DRIVER, NULL);
1988 	else
1989 		mac_fini_ops(&rge_dev_ops);
1990 
1991 	return (status);
1992 }
1993 
1994 int
1995 _fini(void)
1996 {
1997 	int status;
1998 
1999 	status = mod_remove(&modlinkage);
2000 	if (status == DDI_SUCCESS) {
2001 		mac_fini_ops(&rge_dev_ops);
2002 		mutex_destroy(rge_log_mutex);
2003 	}
2004 	return (status);
2005 }
2006