xref: /illumos-gate/usr/src/uts/common/io/rge/rge_main.c (revision fe54a78e1aacf39261ad56e9903bce02e3fb6d21)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 /*
22  * Copyright 2008 Sun Microsystems, Inc.  All rights reserved.
23  * Use is subject to license terms.
24  */
25 
26 #include "rge.h"
27 
28 /*
29  * This is the string displayed by modinfo, etc.
30  * Make sure you keep the version ID up to date!
31  */
32 static char rge_ident[] = "Realtek 1Gb Ethernet";
33 
34 /*
35  * Used for buffers allocated by ddi_dma_mem_alloc()
36  */
37 static ddi_dma_attr_t dma_attr_buf = {
38 	DMA_ATTR_V0,		/* dma_attr version */
39 	(uint32_t)0,		/* dma_attr_addr_lo */
40 	(uint32_t)0xFFFFFFFF,	/* dma_attr_addr_hi */
41 	(uint32_t)0xFFFFFFFF,	/* dma_attr_count_max */
42 	(uint32_t)16,		/* dma_attr_align */
43 	0xFFFFFFFF,		/* dma_attr_burstsizes */
44 	1,			/* dma_attr_minxfer */
45 	(uint32_t)0xFFFFFFFF,	/* dma_attr_maxxfer */
46 	(uint32_t)0xFFFFFFFF,	/* dma_attr_seg */
47 	1,			/* dma_attr_sgllen */
48 	1,			/* dma_attr_granular */
49 	0,			/* dma_attr_flags */
50 };
51 
52 /*
53  * Used for BDs allocated by ddi_dma_mem_alloc()
54  */
55 static ddi_dma_attr_t dma_attr_desc = {
56 	DMA_ATTR_V0,		/* dma_attr version */
57 	(uint32_t)0,		/* dma_attr_addr_lo */
58 	(uint32_t)0xFFFFFFFF,	/* dma_attr_addr_hi */
59 	(uint32_t)0xFFFFFFFF,	/* dma_attr_count_max */
60 	(uint32_t)256,		/* dma_attr_align */
61 	0xFFFFFFFF,		/* dma_attr_burstsizes */
62 	1,			/* dma_attr_minxfer */
63 	(uint32_t)0xFFFFFFFF,	/* dma_attr_maxxfer */
64 	(uint32_t)0xFFFFFFFF,	/* dma_attr_seg */
65 	1,			/* dma_attr_sgllen */
66 	1,			/* dma_attr_granular */
67 	0,			/* dma_attr_flags */
68 };
69 
70 /*
71  * PIO access attributes for registers
72  */
73 static ddi_device_acc_attr_t rge_reg_accattr = {
74 	DDI_DEVICE_ATTR_V0,
75 	DDI_STRUCTURE_LE_ACC,
76 	DDI_STRICTORDER_ACC,
77 	DDI_DEFAULT_ACC
78 };
79 
80 /*
81  * DMA access attributes for descriptors
82  */
83 static ddi_device_acc_attr_t rge_desc_accattr = {
84 	DDI_DEVICE_ATTR_V0,
85 	DDI_NEVERSWAP_ACC,
86 	DDI_STRICTORDER_ACC,
87 	DDI_DEFAULT_ACC
88 };
89 
90 /*
91  * DMA access attributes for data
92  */
93 static ddi_device_acc_attr_t rge_buf_accattr = {
94 	DDI_DEVICE_ATTR_V0,
95 	DDI_NEVERSWAP_ACC,
96 	DDI_STRICTORDER_ACC,
97 	DDI_DEFAULT_ACC
98 };
99 
100 /*
101  * Property names
102  */
103 static char debug_propname[] = "rge_debug_flags";
104 static char mtu_propname[] = "default_mtu";
105 static char msi_propname[] = "msi_enable";
106 
107 static int		rge_m_start(void *);
108 static void		rge_m_stop(void *);
109 static int		rge_m_promisc(void *, boolean_t);
110 static int		rge_m_multicst(void *, boolean_t, const uint8_t *);
111 static int		rge_m_unicst(void *, const uint8_t *);
112 static void		rge_m_resources(void *);
113 static void		rge_m_ioctl(void *, queue_t *, mblk_t *);
114 static boolean_t	rge_m_getcapab(void *, mac_capab_t, void *);
115 
116 #define	RGE_M_CALLBACK_FLAGS	(MC_RESOURCES | MC_IOCTL | MC_GETCAPAB)
117 
118 static mac_callbacks_t rge_m_callbacks = {
119 	RGE_M_CALLBACK_FLAGS,
120 	rge_m_stat,
121 	rge_m_start,
122 	rge_m_stop,
123 	rge_m_promisc,
124 	rge_m_multicst,
125 	rge_m_unicst,
126 	rge_m_tx,
127 	rge_m_resources,
128 	rge_m_ioctl,
129 	rge_m_getcapab
130 };
131 
132 /*
133  * Allocate an area of memory and a DMA handle for accessing it
134  */
135 static int
136 rge_alloc_dma_mem(rge_t *rgep, size_t memsize, ddi_dma_attr_t *dma_attr_p,
137 	ddi_device_acc_attr_t *acc_attr_p, uint_t dma_flags, dma_area_t *dma_p)
138 {
139 	caddr_t vaddr;
140 	int err;
141 
142 	/*
143 	 * Allocate handle
144 	 */
145 	err = ddi_dma_alloc_handle(rgep->devinfo, dma_attr_p,
146 	    DDI_DMA_SLEEP, NULL, &dma_p->dma_hdl);
147 	if (err != DDI_SUCCESS) {
148 		dma_p->dma_hdl = NULL;
149 		return (DDI_FAILURE);
150 	}
151 
152 	/*
153 	 * Allocate memory
154 	 */
155 	err = ddi_dma_mem_alloc(dma_p->dma_hdl, memsize, acc_attr_p,
156 	    dma_flags & (DDI_DMA_CONSISTENT | DDI_DMA_STREAMING),
157 	    DDI_DMA_SLEEP, NULL, &vaddr, &dma_p->alength, &dma_p->acc_hdl);
158 	if (err != DDI_SUCCESS) {
159 		ddi_dma_free_handle(&dma_p->dma_hdl);
160 		dma_p->dma_hdl = NULL;
161 		dma_p->acc_hdl = NULL;
162 		return (DDI_FAILURE);
163 	}
164 
165 	/*
166 	 * Bind the two together
167 	 */
168 	dma_p->mem_va = vaddr;
169 	err = ddi_dma_addr_bind_handle(dma_p->dma_hdl, NULL,
170 	    vaddr, dma_p->alength, dma_flags, DDI_DMA_SLEEP, NULL,
171 	    &dma_p->cookie, &dma_p->ncookies);
172 	if (err != DDI_DMA_MAPPED || dma_p->ncookies != 1) {
173 		ddi_dma_mem_free(&dma_p->acc_hdl);
174 		ddi_dma_free_handle(&dma_p->dma_hdl);
175 		dma_p->acc_hdl = NULL;
176 		dma_p->dma_hdl = NULL;
177 		return (DDI_FAILURE);
178 	}
179 
180 	dma_p->nslots = ~0U;
181 	dma_p->size = ~0U;
182 	dma_p->token = ~0U;
183 	dma_p->offset = 0;
184 	return (DDI_SUCCESS);
185 }
186 
187 /*
188  * Free one allocated area of DMAable memory
189  */
190 static void
191 rge_free_dma_mem(dma_area_t *dma_p)
192 {
193 	if (dma_p->dma_hdl != NULL) {
194 		if (dma_p->ncookies) {
195 			(void) ddi_dma_unbind_handle(dma_p->dma_hdl);
196 			dma_p->ncookies = 0;
197 		}
198 		ddi_dma_free_handle(&dma_p->dma_hdl);
199 		dma_p->dma_hdl = NULL;
200 	}
201 
202 	if (dma_p->acc_hdl != NULL) {
203 		ddi_dma_mem_free(&dma_p->acc_hdl);
204 		dma_p->acc_hdl = NULL;
205 	}
206 }
207 
208 /*
209  * Utility routine to carve a slice off a chunk of allocated memory,
210  * updating the chunk descriptor accordingly.  The size of the slice
211  * is given by the product of the <qty> and <size> parameters.
212  */
213 static void
214 rge_slice_chunk(dma_area_t *slice, dma_area_t *chunk,
215 	uint32_t qty, uint32_t size)
216 {
217 	static uint32_t sequence = 0xbcd5704a;
218 	size_t totsize;
219 
220 	totsize = qty*size;
221 	ASSERT(totsize <= chunk->alength);
222 
223 	*slice = *chunk;
224 	slice->nslots = qty;
225 	slice->size = size;
226 	slice->alength = totsize;
227 	slice->token = ++sequence;
228 
229 	chunk->mem_va = (caddr_t)chunk->mem_va + totsize;
230 	chunk->alength -= totsize;
231 	chunk->offset += totsize;
232 	chunk->cookie.dmac_laddress += totsize;
233 	chunk->cookie.dmac_size -= totsize;
234 }
235 
236 static int
237 rge_alloc_bufs(rge_t *rgep)
238 {
239 	size_t txdescsize;
240 	size_t rxdescsize;
241 	int err;
242 
243 	/*
244 	 * Allocate memory & handle for packet statistics
245 	 */
246 	err = rge_alloc_dma_mem(rgep,
247 	    RGE_STATS_DUMP_SIZE,
248 	    &dma_attr_desc,
249 	    &rge_desc_accattr,
250 	    DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
251 	    &rgep->dma_area_stats);
252 	if (err != DDI_SUCCESS)
253 		return (DDI_FAILURE);
254 	rgep->hw_stats = DMA_VPTR(rgep->dma_area_stats);
255 
256 	/*
257 	 * Allocate memory & handle for Tx descriptor ring
258 	 */
259 	txdescsize = RGE_SEND_SLOTS * sizeof (rge_bd_t);
260 	err = rge_alloc_dma_mem(rgep,
261 	    txdescsize,
262 	    &dma_attr_desc,
263 	    &rge_desc_accattr,
264 	    DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
265 	    &rgep->dma_area_txdesc);
266 	if (err != DDI_SUCCESS)
267 		return (DDI_FAILURE);
268 
269 	/*
270 	 * Allocate memory & handle for Rx descriptor ring
271 	 */
272 	rxdescsize = RGE_RECV_SLOTS * sizeof (rge_bd_t);
273 	err = rge_alloc_dma_mem(rgep,
274 	    rxdescsize,
275 	    &dma_attr_desc,
276 	    &rge_desc_accattr,
277 	    DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
278 	    &rgep->dma_area_rxdesc);
279 	if (err != DDI_SUCCESS)
280 		return (DDI_FAILURE);
281 
282 	return (DDI_SUCCESS);
283 }
284 
285 /*
286  * rge_free_bufs() -- free descriptors/buffers allocated for this
287  * device instance.
288  */
289 static void
290 rge_free_bufs(rge_t *rgep)
291 {
292 	rge_free_dma_mem(&rgep->dma_area_stats);
293 	rge_free_dma_mem(&rgep->dma_area_txdesc);
294 	rge_free_dma_mem(&rgep->dma_area_rxdesc);
295 }
296 
297 /*
298  * ========== Transmit and receive ring reinitialisation ==========
299  */
300 
301 /*
302  * These <reinit> routines each reset the rx/tx rings to an initial
303  * state, assuming that the corresponding <init> routine has already
304  * been called exactly once.
305  */
306 static void
307 rge_reinit_send_ring(rge_t *rgep)
308 {
309 	sw_sbd_t *ssbdp;
310 	rge_bd_t *bdp;
311 	uint32_t slot;
312 
313 	/*
314 	 * re-init send ring
315 	 */
316 	DMA_ZERO(rgep->tx_desc);
317 	ssbdp = rgep->sw_sbds;
318 	bdp = rgep->tx_ring;
319 	for (slot = 0; slot < RGE_SEND_SLOTS; slot++) {
320 		bdp->host_buf_addr =
321 		    RGE_BSWAP_32(ssbdp->pbuf.cookie.dmac_laddress);
322 		bdp->host_buf_addr_hi =
323 		    RGE_BSWAP_32(ssbdp->pbuf.cookie.dmac_laddress >> 32);
324 		/* last BD in Tx ring */
325 		if (slot == (RGE_SEND_SLOTS - 1))
326 			bdp->flags_len = RGE_BSWAP_32(BD_FLAG_EOR);
327 		ssbdp++;
328 		bdp++;
329 	}
330 	DMA_SYNC(rgep->tx_desc, DDI_DMA_SYNC_FORDEV);
331 	rgep->tx_next = 0;
332 	rgep->tc_next = 0;
333 	rgep->tc_tail = 0;
334 	rgep->tx_flow = 0;
335 	rgep->tx_free = RGE_SEND_SLOTS;
336 }
337 
338 static void
339 rge_reinit_recv_ring(rge_t *rgep)
340 {
341 	rge_bd_t *bdp;
342 	sw_rbd_t *srbdp;
343 	dma_area_t *pbuf;
344 	uint32_t slot;
345 
346 	/*
347 	 * re-init receive ring
348 	 */
349 	DMA_ZERO(rgep->rx_desc);
350 	srbdp = rgep->sw_rbds;
351 	bdp = rgep->rx_ring;
352 	for (slot = 0; slot < RGE_RECV_SLOTS; slot++) {
353 		pbuf = &srbdp->rx_buf->pbuf;
354 		bdp->host_buf_addr =
355 		    RGE_BSWAP_32(pbuf->cookie.dmac_laddress + rgep->head_room);
356 		bdp->host_buf_addr_hi =
357 		    RGE_BSWAP_32(pbuf->cookie.dmac_laddress >> 32);
358 		bdp->flags_len = RGE_BSWAP_32(BD_FLAG_HW_OWN |
359 		    (rgep->rxbuf_size - rgep->head_room));
360 		/* last BD in Tx ring */
361 		if (slot == (RGE_RECV_SLOTS - 1))
362 			bdp->flags_len |= RGE_BSWAP_32(BD_FLAG_EOR);
363 		srbdp++;
364 		bdp++;
365 	}
366 	DMA_SYNC(rgep->rx_desc, DDI_DMA_SYNC_FORDEV);
367 	rgep->watchdog = 0;
368 	rgep->rx_next = 0;
369 }
370 
371 static void
372 rge_reinit_buf_ring(rge_t *rgep)
373 {
374 
375 	if (rgep->chip_flags & CHIP_FLAG_FORCE_BCOPY)
376 		return;
377 
378 	/*
379 	 * If all the up-sending buffers haven't been returned to driver,
380 	 * use bcopy() only in rx process.
381 	 */
382 	if (rgep->rx_free != RGE_BUF_SLOTS)
383 		rgep->rx_bcopy = B_TRUE;
384 }
385 
386 static void
387 rge_reinit_rings(rge_t *rgep)
388 {
389 	rge_reinit_send_ring(rgep);
390 	rge_reinit_recv_ring(rgep);
391 	rge_reinit_buf_ring(rgep);
392 }
393 
394 static void
395 rge_fini_send_ring(rge_t *rgep)
396 {
397 	sw_sbd_t *ssbdp;
398 	uint32_t slot;
399 
400 	ssbdp = rgep->sw_sbds;
401 	for (slot = 0; slot < RGE_SEND_SLOTS; ++slot) {
402 		rge_free_dma_mem(&ssbdp->pbuf);
403 		ssbdp++;
404 	}
405 
406 	kmem_free(rgep->sw_sbds, RGE_SEND_SLOTS * sizeof (sw_sbd_t));
407 	rgep->sw_sbds = NULL;
408 }
409 
410 static void
411 rge_fini_recv_ring(rge_t *rgep)
412 {
413 	sw_rbd_t *srbdp;
414 	uint32_t slot;
415 
416 	srbdp = rgep->sw_rbds;
417 	for (slot = 0; slot < RGE_RECV_SLOTS; ++srbdp, ++slot) {
418 		if (srbdp->rx_buf) {
419 			if (srbdp->rx_buf->mp != NULL) {
420 				freemsg(srbdp->rx_buf->mp);
421 				srbdp->rx_buf->mp = NULL;
422 			}
423 			rge_free_dma_mem(&srbdp->rx_buf->pbuf);
424 			kmem_free(srbdp->rx_buf, sizeof (dma_buf_t));
425 			srbdp->rx_buf = NULL;
426 		}
427 	}
428 
429 	kmem_free(rgep->sw_rbds, RGE_RECV_SLOTS * sizeof (sw_rbd_t));
430 	rgep->sw_rbds = NULL;
431 }
432 
433 static void
434 rge_fini_buf_ring(rge_t *rgep)
435 {
436 	sw_rbd_t *srbdp;
437 	uint32_t slot;
438 
439 	if (rgep->chip_flags & CHIP_FLAG_FORCE_BCOPY)
440 		return;
441 
442 	ASSERT(rgep->rx_free == RGE_BUF_SLOTS);
443 
444 	srbdp = rgep->free_srbds;
445 	for (slot = 0; slot < RGE_BUF_SLOTS; ++srbdp, ++slot) {
446 		if (srbdp->rx_buf != NULL) {
447 			if (srbdp->rx_buf->mp != NULL) {
448 				freemsg(srbdp->rx_buf->mp);
449 				srbdp->rx_buf->mp = NULL;
450 			}
451 			rge_free_dma_mem(&srbdp->rx_buf->pbuf);
452 			kmem_free(srbdp->rx_buf, sizeof (dma_buf_t));
453 			srbdp->rx_buf = NULL;
454 		}
455 	}
456 
457 	kmem_free(rgep->free_srbds, RGE_BUF_SLOTS * sizeof (sw_rbd_t));
458 	rgep->free_srbds = NULL;
459 }
460 
461 static void
462 rge_fini_rings(rge_t *rgep)
463 {
464 	rge_fini_send_ring(rgep);
465 	rge_fini_recv_ring(rgep);
466 	rge_fini_buf_ring(rgep);
467 }
468 
469 static int
470 rge_init_send_ring(rge_t *rgep)
471 {
472 	uint32_t slot;
473 	sw_sbd_t *ssbdp;
474 	dma_area_t *pbuf;
475 	dma_area_t desc;
476 	int err;
477 
478 	/*
479 	 * Allocate the array of s/w Tx Buffer Descriptors
480 	 */
481 	ssbdp = kmem_zalloc(RGE_SEND_SLOTS*sizeof (*ssbdp), KM_SLEEP);
482 	rgep->sw_sbds = ssbdp;
483 
484 	/*
485 	 * Init send ring
486 	 */
487 	rgep->tx_desc = rgep->dma_area_txdesc;
488 	DMA_ZERO(rgep->tx_desc);
489 	rgep->tx_ring = rgep->tx_desc.mem_va;
490 
491 	desc = rgep->tx_desc;
492 	for (slot = 0; slot < RGE_SEND_SLOTS; slot++) {
493 		rge_slice_chunk(&ssbdp->desc, &desc, 1, sizeof (rge_bd_t));
494 
495 		/*
496 		 * Allocate memory & handle for Tx buffers
497 		 */
498 		pbuf = &ssbdp->pbuf;
499 		err = rge_alloc_dma_mem(rgep, rgep->txbuf_size,
500 		    &dma_attr_buf, &rge_buf_accattr,
501 		    DDI_DMA_WRITE | DDI_DMA_STREAMING, pbuf);
502 		if (err != DDI_SUCCESS) {
503 			rge_error(rgep,
504 			    "rge_init_send_ring: alloc tx buffer failed");
505 			rge_fini_send_ring(rgep);
506 			return (DDI_FAILURE);
507 		}
508 		ssbdp++;
509 	}
510 	ASSERT(desc.alength == 0);
511 
512 	DMA_SYNC(rgep->tx_desc, DDI_DMA_SYNC_FORDEV);
513 	return (DDI_SUCCESS);
514 }
515 
516 static int
517 rge_init_recv_ring(rge_t *rgep)
518 {
519 	uint32_t slot;
520 	sw_rbd_t *srbdp;
521 	dma_buf_t *rx_buf;
522 	dma_area_t *pbuf;
523 	int err;
524 
525 	/*
526 	 * Allocate the array of s/w Rx Buffer Descriptors
527 	 */
528 	srbdp = kmem_zalloc(RGE_RECV_SLOTS*sizeof (*srbdp), KM_SLEEP);
529 	rgep->sw_rbds = srbdp;
530 
531 	/*
532 	 * Init receive ring
533 	 */
534 	rgep->rx_next = 0;
535 	rgep->rx_desc = rgep->dma_area_rxdesc;
536 	DMA_ZERO(rgep->rx_desc);
537 	rgep->rx_ring = rgep->rx_desc.mem_va;
538 
539 	for (slot = 0; slot < RGE_RECV_SLOTS; slot++) {
540 		srbdp->rx_buf = rx_buf =
541 		    kmem_zalloc(sizeof (dma_buf_t), KM_SLEEP);
542 
543 		/*
544 		 * Allocate memory & handle for Rx buffers
545 		 */
546 		pbuf = &rx_buf->pbuf;
547 		err = rge_alloc_dma_mem(rgep, rgep->rxbuf_size,
548 		    &dma_attr_buf, &rge_buf_accattr,
549 		    DDI_DMA_READ | DDI_DMA_STREAMING, pbuf);
550 		if (err != DDI_SUCCESS) {
551 			rge_fini_recv_ring(rgep);
552 			rge_error(rgep,
553 			    "rge_init_recv_ring: alloc rx buffer failed");
554 			return (DDI_FAILURE);
555 		}
556 
557 		pbuf->alength -= rgep->head_room;
558 		pbuf->offset += rgep->head_room;
559 		if (!(rgep->chip_flags & CHIP_FLAG_FORCE_BCOPY)) {
560 			rx_buf->rx_recycle.free_func = rge_rx_recycle;
561 			rx_buf->rx_recycle.free_arg = (caddr_t)rx_buf;
562 			rx_buf->private = (caddr_t)rgep;
563 			rx_buf->mp = desballoc(DMA_VPTR(rx_buf->pbuf),
564 			    rgep->rxbuf_size, 0, &rx_buf->rx_recycle);
565 			if (rx_buf->mp == NULL) {
566 				rge_fini_recv_ring(rgep);
567 				rge_problem(rgep,
568 				    "rge_init_recv_ring: desballoc() failed");
569 				return (DDI_FAILURE);
570 			}
571 		}
572 		srbdp++;
573 	}
574 	DMA_SYNC(rgep->rx_desc, DDI_DMA_SYNC_FORDEV);
575 	return (DDI_SUCCESS);
576 }
577 
578 static int
579 rge_init_buf_ring(rge_t *rgep)
580 {
581 	uint32_t slot;
582 	sw_rbd_t *free_srbdp;
583 	dma_buf_t *rx_buf;
584 	dma_area_t *pbuf;
585 	int err;
586 
587 	if (rgep->chip_flags & CHIP_FLAG_FORCE_BCOPY) {
588 		rgep->rx_bcopy = B_TRUE;
589 		return (DDI_SUCCESS);
590 	}
591 
592 	/*
593 	 * Allocate the array of s/w free Buffer Descriptors
594 	 */
595 	free_srbdp = kmem_zalloc(RGE_BUF_SLOTS*sizeof (*free_srbdp), KM_SLEEP);
596 	rgep->free_srbds = free_srbdp;
597 
598 	/*
599 	 * Init free buffer ring
600 	 */
601 	rgep->rc_next = 0;
602 	rgep->rf_next = 0;
603 	rgep->rx_bcopy = B_FALSE;
604 	rgep->rx_free = RGE_BUF_SLOTS;
605 	for (slot = 0; slot < RGE_BUF_SLOTS; slot++) {
606 		free_srbdp->rx_buf = rx_buf =
607 		    kmem_zalloc(sizeof (dma_buf_t), KM_SLEEP);
608 
609 		/*
610 		 * Allocate memory & handle for free Rx buffers
611 		 */
612 		pbuf = &rx_buf->pbuf;
613 		err = rge_alloc_dma_mem(rgep, rgep->rxbuf_size,
614 		    &dma_attr_buf, &rge_buf_accattr,
615 		    DDI_DMA_READ | DDI_DMA_STREAMING, pbuf);
616 		if (err != DDI_SUCCESS) {
617 			rge_fini_buf_ring(rgep);
618 			rge_error(rgep,
619 			    "rge_init_buf_ring: alloc rx free buffer failed");
620 			return (DDI_FAILURE);
621 		}
622 		pbuf->alength -= rgep->head_room;
623 		pbuf->offset += rgep->head_room;
624 		rx_buf->rx_recycle.free_func = rge_rx_recycle;
625 		rx_buf->rx_recycle.free_arg = (caddr_t)rx_buf;
626 		rx_buf->private = (caddr_t)rgep;
627 		rx_buf->mp = desballoc(DMA_VPTR(rx_buf->pbuf),
628 		    rgep->rxbuf_size, 0, &rx_buf->rx_recycle);
629 		if (rx_buf->mp == NULL) {
630 			rge_fini_buf_ring(rgep);
631 			rge_problem(rgep,
632 			    "rge_init_buf_ring: desballoc() failed");
633 			return (DDI_FAILURE);
634 		}
635 		free_srbdp++;
636 	}
637 	return (DDI_SUCCESS);
638 }
639 
640 static int
641 rge_init_rings(rge_t *rgep)
642 {
643 	int err;
644 
645 	err = rge_init_send_ring(rgep);
646 	if (err != DDI_SUCCESS)
647 		return (DDI_FAILURE);
648 
649 	err = rge_init_recv_ring(rgep);
650 	if (err != DDI_SUCCESS) {
651 		rge_fini_send_ring(rgep);
652 		return (DDI_FAILURE);
653 	}
654 
655 	err = rge_init_buf_ring(rgep);
656 	if (err != DDI_SUCCESS) {
657 		rge_fini_send_ring(rgep);
658 		rge_fini_recv_ring(rgep);
659 		return (DDI_FAILURE);
660 	}
661 
662 	return (DDI_SUCCESS);
663 }
664 
665 /*
666  * ========== Internal state management entry points ==========
667  */
668 
669 #undef	RGE_DBG
670 #define	RGE_DBG		RGE_DBG_NEMO	/* debug flag for this code	*/
671 
672 /*
673  * These routines provide all the functionality required by the
674  * corresponding MAC layer entry points, but don't update the
675  * MAC state so they can be called internally without disturbing
676  * our record of what NEMO thinks we should be doing ...
677  */
678 
679 /*
680  *	rge_reset() -- reset h/w & rings to initial state
681  */
682 static void
683 rge_reset(rge_t *rgep)
684 {
685 	ASSERT(mutex_owned(rgep->genlock));
686 
687 	/*
688 	 * Grab all the other mutexes in the world (this should
689 	 * ensure no other threads are manipulating driver state)
690 	 */
691 	mutex_enter(rgep->rx_lock);
692 	mutex_enter(rgep->rc_lock);
693 	rw_enter(rgep->errlock, RW_WRITER);
694 
695 	(void) rge_chip_reset(rgep);
696 	rge_reinit_rings(rgep);
697 	rge_chip_init(rgep);
698 
699 	/*
700 	 * Free the world ...
701 	 */
702 	rw_exit(rgep->errlock);
703 	mutex_exit(rgep->rc_lock);
704 	mutex_exit(rgep->rx_lock);
705 
706 	rgep->stats.rpackets = 0;
707 	rgep->stats.rbytes = 0;
708 	rgep->stats.opackets = 0;
709 	rgep->stats.obytes = 0;
710 	rgep->stats.tx_pre_ismax = B_FALSE;
711 	rgep->stats.tx_cur_ismax = B_FALSE;
712 
713 	RGE_DEBUG(("rge_reset($%p) done", (void *)rgep));
714 }
715 
716 /*
717  *	rge_stop() -- stop processing, don't reset h/w or rings
718  */
719 static void
720 rge_stop(rge_t *rgep)
721 {
722 	ASSERT(mutex_owned(rgep->genlock));
723 
724 	rge_chip_stop(rgep, B_FALSE);
725 
726 	RGE_DEBUG(("rge_stop($%p) done", (void *)rgep));
727 }
728 
729 /*
730  *	rge_start() -- start transmitting/receiving
731  */
732 static void
733 rge_start(rge_t *rgep)
734 {
735 	ASSERT(mutex_owned(rgep->genlock));
736 
737 	/*
738 	 * Start chip processing, including enabling interrupts
739 	 */
740 	rge_chip_start(rgep);
741 	rgep->watchdog = 0;
742 }
743 
744 /*
745  * rge_restart - restart transmitting/receiving after error or suspend
746  */
747 void
748 rge_restart(rge_t *rgep)
749 {
750 	uint32_t i;
751 
752 	ASSERT(mutex_owned(rgep->genlock));
753 	/*
754 	 * Wait for posted buffer to be freed...
755 	 */
756 	if (!rgep->rx_bcopy) {
757 		for (i = 0; i < RXBUFF_FREE_LOOP; i++) {
758 			if (rgep->rx_free == RGE_BUF_SLOTS)
759 				break;
760 			drv_usecwait(1000);
761 			RGE_DEBUG(("rge_restart: waiting for rx buf free..."));
762 		}
763 	}
764 	rge_reset(rgep);
765 	rgep->stats.chip_reset++;
766 	if (rgep->rge_mac_state == RGE_MAC_STARTED) {
767 		rge_start(rgep);
768 		rgep->resched_needed = B_TRUE;
769 		(void) ddi_intr_trigger_softint(rgep->resched_hdl, NULL);
770 	}
771 }
772 
773 
774 /*
775  * ========== Nemo-required management entry points ==========
776  */
777 
778 #undef	RGE_DBG
779 #define	RGE_DBG		RGE_DBG_NEMO	/* debug flag for this code	*/
780 
781 /*
782  *	rge_m_stop() -- stop transmitting/receiving
783  */
784 static void
785 rge_m_stop(void *arg)
786 {
787 	rge_t *rgep = arg;		/* private device info	*/
788 	uint32_t i;
789 
790 	/*
791 	 * Just stop processing, then record new MAC state
792 	 */
793 	mutex_enter(rgep->genlock);
794 	if (rgep->suspended) {
795 		ASSERT(rgep->rge_mac_state == RGE_MAC_STOPPED);
796 		mutex_exit(rgep->genlock);
797 		return;
798 	}
799 	rge_stop(rgep);
800 	/*
801 	 * Wait for posted buffer to be freed...
802 	 */
803 	if (!rgep->rx_bcopy) {
804 		for (i = 0; i < RXBUFF_FREE_LOOP; i++) {
805 			if (rgep->rx_free == RGE_BUF_SLOTS)
806 				break;
807 			drv_usecwait(1000);
808 			RGE_DEBUG(("rge_m_stop: waiting for rx buf free..."));
809 		}
810 	}
811 	rgep->rge_mac_state = RGE_MAC_STOPPED;
812 	RGE_DEBUG(("rge_m_stop($%p) done", arg));
813 	mutex_exit(rgep->genlock);
814 }
815 
816 /*
817  *	rge_m_start() -- start transmitting/receiving
818  */
819 static int
820 rge_m_start(void *arg)
821 {
822 	rge_t *rgep = arg;		/* private device info	*/
823 
824 	mutex_enter(rgep->genlock);
825 	if (rgep->suspended) {
826 		mutex_exit(rgep->genlock);
827 		return (DDI_FAILURE);
828 	}
829 	/*
830 	 * Clear hw/sw statistics
831 	 */
832 	DMA_ZERO(rgep->dma_area_stats);
833 	bzero(&rgep->stats, sizeof (rge_stats_t));
834 
835 	/*
836 	 * Start processing and record new MAC state
837 	 */
838 	rge_reset(rgep);
839 	rge_start(rgep);
840 	rgep->rge_mac_state = RGE_MAC_STARTED;
841 	RGE_DEBUG(("rge_m_start($%p) done", arg));
842 
843 	mutex_exit(rgep->genlock);
844 
845 	return (0);
846 }
847 
848 /*
849  *	rge_m_unicst_set() -- set the physical network address
850  */
851 static int
852 rge_m_unicst(void *arg, const uint8_t *macaddr)
853 {
854 	rge_t *rgep = arg;		/* private device info	*/
855 
856 	/*
857 	 * Remember the new current address in the driver state
858 	 * Sync the chip's idea of the address too ...
859 	 */
860 	mutex_enter(rgep->genlock);
861 	bcopy(macaddr, rgep->netaddr, ETHERADDRL);
862 
863 	if (rgep->suspended) {
864 		mutex_exit(rgep->genlock);
865 		return (DDI_SUCCESS);
866 	}
867 
868 	rge_chip_sync(rgep, RGE_SET_MAC);
869 	mutex_exit(rgep->genlock);
870 
871 	return (0);
872 }
873 
874 /*
875  * Compute the index of the required bit in the multicast hash map.
876  * This must mirror the way the hardware actually does it!
877  */
878 static uint32_t
879 rge_hash_index(const uint8_t *mca)
880 {
881 	uint32_t crc = (uint32_t)RGE_HASH_CRC;
882 	uint32_t const POLY = RGE_HASH_POLY;
883 	uint32_t msb;
884 	int bytes;
885 	uchar_t currentbyte;
886 	uint32_t index;
887 	int bit;
888 
889 	for (bytes = 0; bytes < ETHERADDRL; bytes++) {
890 		currentbyte = mca[bytes];
891 		for (bit = 0; bit < 8; bit++) {
892 			msb = crc >> 31;
893 			crc <<= 1;
894 			if (msb ^ (currentbyte & 1))
895 				crc ^= POLY;
896 			currentbyte >>= 1;
897 		}
898 	}
899 	index = crc >> 26;
900 		/* the index value is between 0 and 63(0x3f) */
901 
902 	return (index);
903 }
904 
905 /*
906  *	rge_m_multicst_add() -- enable/disable a multicast address
907  */
908 static int
909 rge_m_multicst(void *arg, boolean_t add, const uint8_t *mca)
910 {
911 	rge_t *rgep = arg;		/* private device info	*/
912 	struct ether_addr *addr;
913 	uint32_t index;
914 	uint32_t reg;
915 	uint8_t *hashp;
916 
917 	mutex_enter(rgep->genlock);
918 	hashp = rgep->mcast_hash;
919 	addr = (struct ether_addr *)mca;
920 	/*
921 	 * Calculate the Multicast address hash index value
922 	 *	Normally, the position of MAR0-MAR7 is
923 	 *	MAR0: offset 0x08, ..., MAR7: offset 0x0F.
924 	 *
925 	 *	For pcie chipset, the position of MAR0-MAR7 is
926 	 *	different from others:
927 	 *	MAR0: offset 0x0F, ..., MAR7: offset 0x08.
928 	 */
929 	index = rge_hash_index(addr->ether_addr_octet);
930 	if (rgep->chipid.is_pcie)
931 		reg = (~(index / RGE_MCAST_NUM)) & 0x7;
932 	else
933 		reg = index / RGE_MCAST_NUM;
934 
935 	if (add) {
936 		if (rgep->mcast_refs[index]++) {
937 			mutex_exit(rgep->genlock);
938 			return (0);
939 		}
940 		hashp[reg] |= 1 << (index % RGE_MCAST_NUM);
941 	} else {
942 		if (--rgep->mcast_refs[index]) {
943 			mutex_exit(rgep->genlock);
944 			return (0);
945 		}
946 		hashp[reg] &= ~ (1 << (index % RGE_MCAST_NUM));
947 	}
948 
949 	if (rgep->suspended) {
950 		mutex_exit(rgep->genlock);
951 		return (DDI_SUCCESS);
952 	}
953 
954 	/*
955 	 * Set multicast register
956 	 */
957 	rge_chip_sync(rgep, RGE_SET_MUL);
958 
959 	mutex_exit(rgep->genlock);
960 	return (0);
961 }
962 
963 /*
964  * rge_m_promisc() -- set or reset promiscuous mode on the board
965  *
966  *	Program the hardware to enable/disable promiscuous and/or
967  *	receive-all-multicast modes.
968  */
969 static int
970 rge_m_promisc(void *arg, boolean_t on)
971 {
972 	rge_t *rgep = arg;
973 
974 	/*
975 	 * Store MAC layer specified mode and pass to chip layer to update h/w
976 	 */
977 	mutex_enter(rgep->genlock);
978 
979 	if (rgep->promisc == on) {
980 		mutex_exit(rgep->genlock);
981 		return (0);
982 	}
983 	rgep->promisc = on;
984 
985 	if (rgep->suspended) {
986 		mutex_exit(rgep->genlock);
987 		return (DDI_SUCCESS);
988 	}
989 
990 	rge_chip_sync(rgep, RGE_SET_PROMISC);
991 	RGE_DEBUG(("rge_m_promisc_set($%p) done", arg));
992 	mutex_exit(rgep->genlock);
993 	return (0);
994 }
995 
996 /*
997  * Loopback ioctl code
998  */
999 
1000 static lb_property_t loopmodes[] = {
1001 	{ normal,	"normal",	RGE_LOOP_NONE		},
1002 	{ internal,	"PHY",		RGE_LOOP_INTERNAL_PHY	},
1003 	{ internal,	"MAC",		RGE_LOOP_INTERNAL_MAC	}
1004 };
1005 
1006 static enum ioc_reply
1007 rge_set_loop_mode(rge_t *rgep, uint32_t mode)
1008 {
1009 	/*
1010 	 * If the mode isn't being changed, there's nothing to do ...
1011 	 */
1012 	if (mode == rgep->param_loop_mode)
1013 		return (IOC_ACK);
1014 
1015 	/*
1016 	 * Validate the requested mode and prepare a suitable message
1017 	 * to explain the link down/up cycle that the change will
1018 	 * probably induce ...
1019 	 */
1020 	switch (mode) {
1021 	default:
1022 		return (IOC_INVAL);
1023 
1024 	case RGE_LOOP_NONE:
1025 	case RGE_LOOP_INTERNAL_PHY:
1026 	case RGE_LOOP_INTERNAL_MAC:
1027 		break;
1028 	}
1029 
1030 	/*
1031 	 * All OK; tell the caller to reprogram
1032 	 * the PHY and/or MAC for the new mode ...
1033 	 */
1034 	rgep->param_loop_mode = mode;
1035 	return (IOC_RESTART_ACK);
1036 }
1037 
1038 static enum ioc_reply
1039 rge_loop_ioctl(rge_t *rgep, queue_t *wq, mblk_t *mp, struct iocblk *iocp)
1040 {
1041 	lb_info_sz_t *lbsp;
1042 	lb_property_t *lbpp;
1043 	uint32_t *lbmp;
1044 	int cmd;
1045 
1046 	_NOTE(ARGUNUSED(wq))
1047 
1048 	/*
1049 	 * Validate format of ioctl
1050 	 */
1051 	if (mp->b_cont == NULL)
1052 		return (IOC_INVAL);
1053 
1054 	cmd = iocp->ioc_cmd;
1055 	switch (cmd) {
1056 	default:
1057 		/* NOTREACHED */
1058 		rge_error(rgep, "rge_loop_ioctl: invalid cmd 0x%x", cmd);
1059 		return (IOC_INVAL);
1060 
1061 	case LB_GET_INFO_SIZE:
1062 		if (iocp->ioc_count != sizeof (lb_info_sz_t))
1063 			return (IOC_INVAL);
1064 		lbsp = (lb_info_sz_t *)mp->b_cont->b_rptr;
1065 		*lbsp = sizeof (loopmodes);
1066 		return (IOC_REPLY);
1067 
1068 	case LB_GET_INFO:
1069 		if (iocp->ioc_count != sizeof (loopmodes))
1070 			return (IOC_INVAL);
1071 		lbpp = (lb_property_t *)mp->b_cont->b_rptr;
1072 		bcopy(loopmodes, lbpp, sizeof (loopmodes));
1073 		return (IOC_REPLY);
1074 
1075 	case LB_GET_MODE:
1076 		if (iocp->ioc_count != sizeof (uint32_t))
1077 			return (IOC_INVAL);
1078 		lbmp = (uint32_t *)mp->b_cont->b_rptr;
1079 		*lbmp = rgep->param_loop_mode;
1080 		return (IOC_REPLY);
1081 
1082 	case LB_SET_MODE:
1083 		if (iocp->ioc_count != sizeof (uint32_t))
1084 			return (IOC_INVAL);
1085 		lbmp = (uint32_t *)mp->b_cont->b_rptr;
1086 		return (rge_set_loop_mode(rgep, *lbmp));
1087 	}
1088 }
1089 
1090 /*
1091  * Specific rge IOCTLs, the MAC layer handles the generic ones.
1092  */
1093 static void
1094 rge_m_ioctl(void *arg, queue_t *wq, mblk_t *mp)
1095 {
1096 	rge_t *rgep = arg;
1097 	struct iocblk *iocp;
1098 	enum ioc_reply status;
1099 	boolean_t need_privilege;
1100 	int err;
1101 	int cmd;
1102 
1103 	/*
1104 	 * If suspended, we might actually be able to do some of
1105 	 * these ioctls, but it is harder to make sure they occur
1106 	 * without actually putting the hardware in an undesireable
1107 	 * state.  So just NAK it.
1108 	 */
1109 	mutex_enter(rgep->genlock);
1110 	if (rgep->suspended) {
1111 		miocnak(wq, mp, 0, EINVAL);
1112 		mutex_exit(rgep->genlock);
1113 		return;
1114 	}
1115 	mutex_exit(rgep->genlock);
1116 
1117 	/*
1118 	 * Validate the command before bothering with the mutex ...
1119 	 */
1120 	iocp = (struct iocblk *)mp->b_rptr;
1121 	iocp->ioc_error = 0;
1122 	need_privilege = B_TRUE;
1123 	cmd = iocp->ioc_cmd;
1124 	switch (cmd) {
1125 	default:
1126 		miocnak(wq, mp, 0, EINVAL);
1127 		return;
1128 
1129 	case RGE_MII_READ:
1130 	case RGE_MII_WRITE:
1131 	case RGE_DIAG:
1132 	case RGE_PEEK:
1133 	case RGE_POKE:
1134 	case RGE_PHY_RESET:
1135 	case RGE_SOFT_RESET:
1136 	case RGE_HARD_RESET:
1137 		break;
1138 
1139 	case LB_GET_INFO_SIZE:
1140 	case LB_GET_INFO:
1141 	case LB_GET_MODE:
1142 		need_privilege = B_FALSE;
1143 		/* FALLTHRU */
1144 	case LB_SET_MODE:
1145 		break;
1146 
1147 	case ND_GET:
1148 		need_privilege = B_FALSE;
1149 		/* FALLTHRU */
1150 	case ND_SET:
1151 		break;
1152 	}
1153 
1154 	if (need_privilege) {
1155 		/*
1156 		 * Check for specific net_config privilege
1157 		 */
1158 		err = secpolicy_net_config(iocp->ioc_cr, B_FALSE);
1159 		if (err != 0) {
1160 			miocnak(wq, mp, 0, err);
1161 			return;
1162 		}
1163 	}
1164 
1165 	mutex_enter(rgep->genlock);
1166 
1167 	switch (cmd) {
1168 	default:
1169 		_NOTE(NOTREACHED)
1170 		status = IOC_INVAL;
1171 		break;
1172 
1173 	case RGE_MII_READ:
1174 	case RGE_MII_WRITE:
1175 	case RGE_DIAG:
1176 	case RGE_PEEK:
1177 	case RGE_POKE:
1178 	case RGE_PHY_RESET:
1179 	case RGE_SOFT_RESET:
1180 	case RGE_HARD_RESET:
1181 		status = rge_chip_ioctl(rgep, wq, mp, iocp);
1182 		break;
1183 
1184 	case LB_GET_INFO_SIZE:
1185 	case LB_GET_INFO:
1186 	case LB_GET_MODE:
1187 	case LB_SET_MODE:
1188 		status = rge_loop_ioctl(rgep, wq, mp, iocp);
1189 		break;
1190 
1191 	case ND_GET:
1192 	case ND_SET:
1193 		status = rge_nd_ioctl(rgep, wq, mp, iocp);
1194 		break;
1195 	}
1196 
1197 	/*
1198 	 * Do we need to reprogram the PHY and/or the MAC?
1199 	 * Do it now, while we still have the mutex.
1200 	 *
1201 	 * Note: update the PHY first, 'cos it controls the
1202 	 * speed/duplex parameters that the MAC code uses.
1203 	 */
1204 	switch (status) {
1205 	case IOC_RESTART_REPLY:
1206 	case IOC_RESTART_ACK:
1207 		rge_phy_update(rgep);
1208 		break;
1209 	}
1210 
1211 	mutex_exit(rgep->genlock);
1212 
1213 	/*
1214 	 * Finally, decide how to reply
1215 	 */
1216 	switch (status) {
1217 	default:
1218 	case IOC_INVAL:
1219 		/*
1220 		 * Error, reply with a NAK and EINVAL or the specified error
1221 		 */
1222 		miocnak(wq, mp, 0, iocp->ioc_error == 0 ?
1223 		    EINVAL : iocp->ioc_error);
1224 		break;
1225 
1226 	case IOC_DONE:
1227 		/*
1228 		 * OK, reply already sent
1229 		 */
1230 		break;
1231 
1232 	case IOC_RESTART_ACK:
1233 	case IOC_ACK:
1234 		/*
1235 		 * OK, reply with an ACK
1236 		 */
1237 		miocack(wq, mp, 0, 0);
1238 		break;
1239 
1240 	case IOC_RESTART_REPLY:
1241 	case IOC_REPLY:
1242 		/*
1243 		 * OK, send prepared reply as ACK or NAK
1244 		 */
1245 		mp->b_datap->db_type = iocp->ioc_error == 0 ?
1246 		    M_IOCACK : M_IOCNAK;
1247 		qreply(wq, mp);
1248 		break;
1249 	}
1250 }
1251 
1252 static void
1253 rge_m_resources(void *arg)
1254 {
1255 	rge_t *rgep = arg;
1256 	mac_rx_fifo_t mrf;
1257 
1258 	mutex_enter(rgep->genlock);
1259 
1260 	/*
1261 	 * Register Rx rings as resources and save mac
1262 	 * resource id for future reference
1263 	 */
1264 	mrf.mrf_type = MAC_RX_FIFO;
1265 	mrf.mrf_blank = rge_chip_blank;
1266 	mrf.mrf_arg = (void *)rgep;
1267 	mrf.mrf_normal_blank_time = RGE_RX_INT_TIME;
1268 	mrf.mrf_normal_pkt_count = RGE_RX_INT_PKTS;
1269 	rgep->handle = mac_resource_add(rgep->mh, (mac_resource_t *)&mrf);
1270 
1271 	mutex_exit(rgep->genlock);
1272 }
1273 
1274 /* ARGSUSED */
1275 static boolean_t
1276 rge_m_getcapab(void *arg, mac_capab_t cap, void *cap_data)
1277 {
1278 	rge_t *rgep = arg;
1279 
1280 	switch (cap) {
1281 	case MAC_CAPAB_HCKSUM: {
1282 		uint32_t *hcksum_txflags = cap_data;
1283 		switch (rgep->chipid.mac_ver) {
1284 		case MAC_VER_8169:
1285 		case MAC_VER_8169S_D:
1286 		case MAC_VER_8169S_E:
1287 		case MAC_VER_8169SB:
1288 		case MAC_VER_8169SC:
1289 		case MAC_VER_8168:
1290 		case MAC_VER_8168B_B:
1291 		case MAC_VER_8168B_C:
1292 		case MAC_VER_8101E:
1293 			*hcksum_txflags = HCKSUM_INET_FULL_V4 |
1294 			    HCKSUM_IPHDRCKSUM;
1295 			break;
1296 		case MAC_VER_8168B_D:
1297 		case MAC_VER_8101E_B:
1298 		case MAC_VER_8101E_C:
1299 		default:
1300 			*hcksum_txflags = 0;
1301 			break;
1302 		}
1303 		break;
1304 	}
1305 	case MAC_CAPAB_POLL:
1306 		/*
1307 		 * There's nothing for us to fill in, simply returning
1308 		 * B_TRUE stating that we support polling is sufficient.
1309 		 */
1310 		break;
1311 	default:
1312 		return (B_FALSE);
1313 	}
1314 	return (B_TRUE);
1315 }
1316 
1317 /*
1318  * ============ Init MSI/Fixed Interrupt routines ==============
1319  */
1320 
1321 /*
1322  * rge_add_intrs:
1323  *
1324  * Register FIXED or MSI interrupts.
1325  */
1326 static int
1327 rge_add_intrs(rge_t *rgep, int intr_type)
1328 {
1329 	dev_info_t *dip = rgep->devinfo;
1330 	int avail;
1331 	int actual;
1332 	int intr_size;
1333 	int count;
1334 	int i, j;
1335 	int ret;
1336 
1337 	/* Get number of interrupts */
1338 	ret = ddi_intr_get_nintrs(dip, intr_type, &count);
1339 	if ((ret != DDI_SUCCESS) || (count == 0)) {
1340 		rge_error(rgep, "ddi_intr_get_nintrs() failure, ret: %d, "
1341 		    "count: %d", ret, count);
1342 		return (DDI_FAILURE);
1343 	}
1344 
1345 	/* Get number of available interrupts */
1346 	ret = ddi_intr_get_navail(dip, intr_type, &avail);
1347 	if ((ret != DDI_SUCCESS) || (avail == 0)) {
1348 		rge_error(rgep, "ddi_intr_get_navail() failure, "
1349 		    "ret: %d, avail: %d\n", ret, avail);
1350 		return (DDI_FAILURE);
1351 	}
1352 
1353 	/* Allocate an array of interrupt handles */
1354 	intr_size = count * sizeof (ddi_intr_handle_t);
1355 	rgep->htable = kmem_alloc(intr_size, KM_SLEEP);
1356 	rgep->intr_rqst = count;
1357 
1358 	/* Call ddi_intr_alloc() */
1359 	ret = ddi_intr_alloc(dip, rgep->htable, intr_type, 0,
1360 	    count, &actual, DDI_INTR_ALLOC_NORMAL);
1361 	if (ret != DDI_SUCCESS || actual == 0) {
1362 		rge_error(rgep, "ddi_intr_alloc() failed %d\n", ret);
1363 		kmem_free(rgep->htable, intr_size);
1364 		return (DDI_FAILURE);
1365 	}
1366 	if (actual < count) {
1367 		rge_log(rgep, "ddi_intr_alloc() Requested: %d, Received: %d\n",
1368 		    count, actual);
1369 	}
1370 	rgep->intr_cnt = actual;
1371 
1372 	/*
1373 	 * Get priority for first msi, assume remaining are all the same
1374 	 */
1375 	if ((ret = ddi_intr_get_pri(rgep->htable[0], &rgep->intr_pri)) !=
1376 	    DDI_SUCCESS) {
1377 		rge_error(rgep, "ddi_intr_get_pri() failed %d\n", ret);
1378 		/* Free already allocated intr */
1379 		for (i = 0; i < actual; i++) {
1380 			(void) ddi_intr_free(rgep->htable[i]);
1381 		}
1382 		kmem_free(rgep->htable, intr_size);
1383 		return (DDI_FAILURE);
1384 	}
1385 
1386 	/* Test for high level mutex */
1387 	if (rgep->intr_pri >= ddi_intr_get_hilevel_pri()) {
1388 		rge_error(rgep, "rge_add_intrs:"
1389 		    "Hi level interrupt not supported");
1390 		for (i = 0; i < actual; i++)
1391 			(void) ddi_intr_free(rgep->htable[i]);
1392 		kmem_free(rgep->htable, intr_size);
1393 		return (DDI_FAILURE);
1394 	}
1395 
1396 	/* Call ddi_intr_add_handler() */
1397 	for (i = 0; i < actual; i++) {
1398 		if ((ret = ddi_intr_add_handler(rgep->htable[i], rge_intr,
1399 		    (caddr_t)rgep, (caddr_t)(uintptr_t)i)) != DDI_SUCCESS) {
1400 			rge_error(rgep, "ddi_intr_add_handler() "
1401 			    "failed %d\n", ret);
1402 			/* Remove already added intr */
1403 			for (j = 0; j < i; j++)
1404 				(void) ddi_intr_remove_handler(rgep->htable[j]);
1405 			/* Free already allocated intr */
1406 			for (i = 0; i < actual; i++) {
1407 				(void) ddi_intr_free(rgep->htable[i]);
1408 			}
1409 			kmem_free(rgep->htable, intr_size);
1410 			return (DDI_FAILURE);
1411 		}
1412 	}
1413 
1414 	if ((ret = ddi_intr_get_cap(rgep->htable[0], &rgep->intr_cap))
1415 	    != DDI_SUCCESS) {
1416 		rge_error(rgep, "ddi_intr_get_cap() failed %d\n", ret);
1417 		for (i = 0; i < actual; i++) {
1418 			(void) ddi_intr_remove_handler(rgep->htable[i]);
1419 			(void) ddi_intr_free(rgep->htable[i]);
1420 		}
1421 		kmem_free(rgep->htable, intr_size);
1422 		return (DDI_FAILURE);
1423 	}
1424 
1425 	return (DDI_SUCCESS);
1426 }
1427 
1428 /*
1429  * rge_rem_intrs:
1430  *
1431  * Unregister FIXED or MSI interrupts
1432  */
1433 static void
1434 rge_rem_intrs(rge_t *rgep)
1435 {
1436 	int i;
1437 
1438 	/* Disable all interrupts */
1439 	if (rgep->intr_cap & DDI_INTR_FLAG_BLOCK) {
1440 		/* Call ddi_intr_block_disable() */
1441 		(void) ddi_intr_block_disable(rgep->htable, rgep->intr_cnt);
1442 	} else {
1443 		for (i = 0; i < rgep->intr_cnt; i++) {
1444 			(void) ddi_intr_disable(rgep->htable[i]);
1445 		}
1446 	}
1447 
1448 	/* Call ddi_intr_remove_handler() */
1449 	for (i = 0; i < rgep->intr_cnt; i++) {
1450 		(void) ddi_intr_remove_handler(rgep->htable[i]);
1451 		(void) ddi_intr_free(rgep->htable[i]);
1452 	}
1453 
1454 	kmem_free(rgep->htable, rgep->intr_rqst * sizeof (ddi_intr_handle_t));
1455 }
1456 
1457 /*
1458  * ========== Per-instance setup/teardown code ==========
1459  */
1460 
1461 #undef	RGE_DBG
1462 #define	RGE_DBG		RGE_DBG_INIT	/* debug flag for this code	*/
1463 
1464 static void
1465 rge_unattach(rge_t *rgep)
1466 {
1467 	/*
1468 	 * Flag that no more activity may be initiated
1469 	 */
1470 	rgep->progress &= ~PROGRESS_READY;
1471 	rgep->rge_mac_state = RGE_MAC_UNATTACH;
1472 
1473 	/*
1474 	 * Quiesce the PHY and MAC (leave it reset but still powered).
1475 	 * Clean up and free all RGE data structures
1476 	 */
1477 	if (rgep->periodic_id != NULL) {
1478 		ddi_periodic_delete(rgep->periodic_id);
1479 		rgep->periodic_id = NULL;
1480 	}
1481 
1482 	if (rgep->progress & PROGRESS_KSTATS)
1483 		rge_fini_kstats(rgep);
1484 
1485 	if (rgep->progress & PROGRESS_PHY)
1486 		(void) rge_phy_reset(rgep);
1487 
1488 	if (rgep->progress & PROGRESS_INIT) {
1489 		mutex_enter(rgep->genlock);
1490 		(void) rge_chip_reset(rgep);
1491 		mutex_exit(rgep->genlock);
1492 		rge_fini_rings(rgep);
1493 	}
1494 
1495 	if (rgep->progress & PROGRESS_INTR) {
1496 		rge_rem_intrs(rgep);
1497 		mutex_destroy(rgep->rc_lock);
1498 		mutex_destroy(rgep->rx_lock);
1499 		mutex_destroy(rgep->tc_lock);
1500 		mutex_destroy(rgep->tx_lock);
1501 		rw_destroy(rgep->errlock);
1502 		mutex_destroy(rgep->genlock);
1503 	}
1504 
1505 	if (rgep->progress & PROGRESS_FACTOTUM)
1506 		(void) ddi_intr_remove_softint(rgep->factotum_hdl);
1507 
1508 	if (rgep->progress & PROGRESS_RESCHED)
1509 		(void) ddi_intr_remove_softint(rgep->resched_hdl);
1510 
1511 	if (rgep->progress & PROGRESS_NDD)
1512 		rge_nd_cleanup(rgep);
1513 
1514 	rge_free_bufs(rgep);
1515 
1516 	if (rgep->progress & PROGRESS_REGS)
1517 		ddi_regs_map_free(&rgep->io_handle);
1518 
1519 	if (rgep->progress & PROGRESS_CFG)
1520 		pci_config_teardown(&rgep->cfg_handle);
1521 
1522 	ddi_remove_minor_node(rgep->devinfo, NULL);
1523 	kmem_free(rgep, sizeof (*rgep));
1524 }
1525 
1526 static int
1527 rge_resume(dev_info_t *devinfo)
1528 {
1529 	rge_t *rgep;			/* Our private data	*/
1530 	chip_id_t *cidp;
1531 	chip_id_t chipid;
1532 
1533 	rgep = ddi_get_driver_private(devinfo);
1534 
1535 	/*
1536 	 * If there are state inconsistancies, this is bad.  Returning
1537 	 * DDI_FAILURE here will eventually cause the machine to panic,
1538 	 * so it is best done here so that there is a possibility of
1539 	 * debugging the problem.
1540 	 */
1541 	if (rgep == NULL)
1542 		cmn_err(CE_PANIC,
1543 		    "rge: ngep returned from ddi_get_driver_private was NULL");
1544 
1545 	/*
1546 	 * Refuse to resume if the data structures aren't consistent
1547 	 */
1548 	if (rgep->devinfo != devinfo)
1549 		cmn_err(CE_PANIC,
1550 		    "rge: passed devinfo not the same as saved devinfo");
1551 
1552 	/*
1553 	 * Read chip ID & set up config space command register(s)
1554 	 * Refuse to resume if the chip has changed its identity!
1555 	 */
1556 	cidp = &rgep->chipid;
1557 	rge_chip_cfg_init(rgep, &chipid);
1558 	if (chipid.vendor != cidp->vendor)
1559 		return (DDI_FAILURE);
1560 	if (chipid.device != cidp->device)
1561 		return (DDI_FAILURE);
1562 	if (chipid.revision != cidp->revision)
1563 		return (DDI_FAILURE);
1564 
1565 	mutex_enter(rgep->genlock);
1566 
1567 	/*
1568 	 * Only in one case, this conditional branch can be executed: the port
1569 	 * hasn't been plumbed.
1570 	 */
1571 	if (rgep->suspended == B_FALSE) {
1572 		mutex_exit(rgep->genlock);
1573 		return (DDI_SUCCESS);
1574 	}
1575 	rgep->rge_mac_state = RGE_MAC_STARTED;
1576 	/*
1577 	 * All OK, reinitialise h/w & kick off NEMO scheduling
1578 	 */
1579 	rge_restart(rgep);
1580 	rgep->suspended = B_FALSE;
1581 
1582 	mutex_exit(rgep->genlock);
1583 
1584 	return (DDI_SUCCESS);
1585 }
1586 
1587 
1588 /*
1589  * attach(9E) -- Attach a device to the system
1590  *
1591  * Called once for each board successfully probed.
1592  */
1593 static int
1594 rge_attach(dev_info_t *devinfo, ddi_attach_cmd_t cmd)
1595 {
1596 	rge_t *rgep;			/* Our private data	*/
1597 	mac_register_t *macp;
1598 	chip_id_t *cidp;
1599 	int intr_types;
1600 	caddr_t regs;
1601 	int instance;
1602 	int i;
1603 	int err;
1604 
1605 	/*
1606 	 * we don't support high level interrupts in the driver
1607 	 */
1608 	if (ddi_intr_hilevel(devinfo, 0) != 0) {
1609 		cmn_err(CE_WARN,
1610 		    "rge_attach -- unsupported high level interrupt");
1611 		return (DDI_FAILURE);
1612 	}
1613 
1614 	instance = ddi_get_instance(devinfo);
1615 	RGE_GTRACE(("rge_attach($%p, %d) instance %d",
1616 	    (void *)devinfo, cmd, instance));
1617 	RGE_BRKPT(NULL, "rge_attach");
1618 
1619 	switch (cmd) {
1620 	default:
1621 		return (DDI_FAILURE);
1622 
1623 	case DDI_RESUME:
1624 		return (rge_resume(devinfo));
1625 
1626 	case DDI_ATTACH:
1627 		break;
1628 	}
1629 
1630 	rgep = kmem_zalloc(sizeof (*rgep), KM_SLEEP);
1631 	ddi_set_driver_private(devinfo, rgep);
1632 	rgep->devinfo = devinfo;
1633 
1634 	/*
1635 	 * Initialize more fields in RGE private data
1636 	 */
1637 	rgep->rge_mac_state = RGE_MAC_ATTACH;
1638 	rgep->debug = ddi_prop_get_int(DDI_DEV_T_ANY, devinfo,
1639 	    DDI_PROP_DONTPASS, debug_propname, rge_debug);
1640 	rgep->default_mtu = ddi_prop_get_int(DDI_DEV_T_ANY, devinfo,
1641 	    DDI_PROP_DONTPASS, mtu_propname, ETHERMTU);
1642 	rgep->msi_enable = ddi_prop_get_int(DDI_DEV_T_ANY, devinfo,
1643 	    DDI_PROP_DONTPASS, msi_propname, B_TRUE);
1644 	(void) snprintf(rgep->ifname, sizeof (rgep->ifname), "%s%d",
1645 	    RGE_DRIVER_NAME, instance);
1646 
1647 	/*
1648 	 * Map config space registers
1649 	 * Read chip ID & set up config space command register(s)
1650 	 *
1651 	 * Note: this leaves the chip accessible by Memory Space
1652 	 * accesses, but with interrupts and Bus Mastering off.
1653 	 * This should ensure that nothing untoward will happen
1654 	 * if it has been left active by the (net-)bootloader.
1655 	 * We'll re-enable Bus Mastering once we've reset the chip,
1656 	 * and allow interrupts only when everything else is set up.
1657 	 */
1658 	err = pci_config_setup(devinfo, &rgep->cfg_handle);
1659 	if (err != DDI_SUCCESS) {
1660 		rge_problem(rgep, "pci_config_setup() failed");
1661 		goto attach_fail;
1662 	}
1663 	rgep->progress |= PROGRESS_CFG;
1664 	cidp = &rgep->chipid;
1665 	bzero(cidp, sizeof (*cidp));
1666 	rge_chip_cfg_init(rgep, cidp);
1667 
1668 	/*
1669 	 * Map operating registers
1670 	 */
1671 	err = ddi_regs_map_setup(devinfo, 1, &regs,
1672 	    0, 0, &rge_reg_accattr, &rgep->io_handle);
1673 	if (err != DDI_SUCCESS) {
1674 		rge_problem(rgep, "ddi_regs_map_setup() failed");
1675 		goto attach_fail;
1676 	}
1677 	rgep->io_regs = regs;
1678 	rgep->progress |= PROGRESS_REGS;
1679 
1680 	/*
1681 	 * Characterise the device, so we know its requirements.
1682 	 * Then allocate the appropriate TX and RX descriptors & buffers.
1683 	 */
1684 	rge_chip_ident(rgep);
1685 	err = rge_alloc_bufs(rgep);
1686 	if (err != DDI_SUCCESS) {
1687 		rge_problem(rgep, "DMA buffer allocation failed");
1688 		goto attach_fail;
1689 	}
1690 
1691 	/*
1692 	 * Register NDD-tweakable parameters
1693 	 */
1694 	if (rge_nd_init(rgep)) {
1695 		rge_problem(rgep, "rge_nd_init() failed");
1696 		goto attach_fail;
1697 	}
1698 	rgep->progress |= PROGRESS_NDD;
1699 
1700 	/*
1701 	 * Add the softint handlers:
1702 	 *
1703 	 * Both of these handlers are used to avoid restrictions on the
1704 	 * context and/or mutexes required for some operations.  In
1705 	 * particular, the hardware interrupt handler and its subfunctions
1706 	 * can detect a number of conditions that we don't want to handle
1707 	 * in that context or with that set of mutexes held.  So, these
1708 	 * softints are triggered instead:
1709 	 *
1710 	 * the <resched> softint is triggered if if we have previously
1711 	 * had to refuse to send a packet because of resource shortage
1712 	 * (we've run out of transmit buffers), but the send completion
1713 	 * interrupt handler has now detected that more buffers have
1714 	 * become available.
1715 	 *
1716 	 * the <factotum> is triggered if the h/w interrupt handler
1717 	 * sees the <link state changed> or <error> bits in the status
1718 	 * block.  It's also triggered periodically to poll the link
1719 	 * state, just in case we aren't getting link status change
1720 	 * interrupts ...
1721 	 */
1722 	err = ddi_intr_add_softint(devinfo, &rgep->resched_hdl,
1723 	    DDI_INTR_SOFTPRI_MIN, rge_reschedule, (caddr_t)rgep);
1724 	if (err != DDI_SUCCESS) {
1725 		rge_problem(rgep, "ddi_intr_add_softint() failed");
1726 		goto attach_fail;
1727 	}
1728 	rgep->progress |= PROGRESS_RESCHED;
1729 	err = ddi_intr_add_softint(devinfo, &rgep->factotum_hdl,
1730 	    DDI_INTR_SOFTPRI_MIN, rge_chip_factotum, (caddr_t)rgep);
1731 	if (err != DDI_SUCCESS) {
1732 		rge_problem(rgep, "ddi_intr_add_softint() failed");
1733 		goto attach_fail;
1734 	}
1735 	rgep->progress |= PROGRESS_FACTOTUM;
1736 
1737 	/*
1738 	 * Get supported interrupt types
1739 	 */
1740 	if (ddi_intr_get_supported_types(devinfo, &intr_types)
1741 	    != DDI_SUCCESS) {
1742 		rge_error(rgep, "ddi_intr_get_supported_types failed\n");
1743 		goto attach_fail;
1744 	}
1745 
1746 	/*
1747 	 * Add the h/w interrupt handler and initialise mutexes
1748 	 * RTL8101E is observed to have MSI invalidation issue after S/R.
1749 	 * So the FIXED interrupt is used instead.
1750 	 */
1751 	if (rgep->chipid.mac_ver == MAC_VER_8101E)
1752 		rgep->msi_enable = B_FALSE;
1753 	if ((intr_types & DDI_INTR_TYPE_MSI) && rgep->msi_enable) {
1754 		if (rge_add_intrs(rgep, DDI_INTR_TYPE_MSI) != DDI_SUCCESS) {
1755 			rge_error(rgep, "MSI registration failed, "
1756 			    "trying FIXED interrupt type\n");
1757 		} else {
1758 			rge_log(rgep, "Using MSI interrupt type\n");
1759 			rgep->intr_type = DDI_INTR_TYPE_MSI;
1760 			rgep->progress |= PROGRESS_INTR;
1761 		}
1762 	}
1763 	if (!(rgep->progress & PROGRESS_INTR) &&
1764 	    (intr_types & DDI_INTR_TYPE_FIXED)) {
1765 		if (rge_add_intrs(rgep, DDI_INTR_TYPE_FIXED) != DDI_SUCCESS) {
1766 			rge_error(rgep, "FIXED interrupt "
1767 			    "registration failed\n");
1768 			goto attach_fail;
1769 		}
1770 		rge_log(rgep, "Using FIXED interrupt type\n");
1771 		rgep->intr_type = DDI_INTR_TYPE_FIXED;
1772 		rgep->progress |= PROGRESS_INTR;
1773 	}
1774 	if (!(rgep->progress & PROGRESS_INTR)) {
1775 		rge_error(rgep, "No interrupts registered\n");
1776 		goto attach_fail;
1777 	}
1778 	mutex_init(rgep->genlock, NULL, MUTEX_DRIVER,
1779 	    DDI_INTR_PRI(rgep->intr_pri));
1780 	rw_init(rgep->errlock, NULL, RW_DRIVER,
1781 	    DDI_INTR_PRI(rgep->intr_pri));
1782 	mutex_init(rgep->tx_lock, NULL, MUTEX_DRIVER,
1783 	    DDI_INTR_PRI(rgep->intr_pri));
1784 	mutex_init(rgep->tc_lock, NULL, MUTEX_DRIVER,
1785 	    DDI_INTR_PRI(rgep->intr_pri));
1786 	mutex_init(rgep->rx_lock, NULL, MUTEX_DRIVER,
1787 	    DDI_INTR_PRI(rgep->intr_pri));
1788 	mutex_init(rgep->rc_lock, NULL, MUTEX_DRIVER,
1789 	    DDI_INTR_PRI(rgep->intr_pri));
1790 
1791 	/*
1792 	 * Initialize rings
1793 	 */
1794 	err = rge_init_rings(rgep);
1795 	if (err != DDI_SUCCESS) {
1796 		rge_problem(rgep, "rge_init_rings() failed");
1797 		goto attach_fail;
1798 	}
1799 	rgep->progress |= PROGRESS_INIT;
1800 
1801 	/*
1802 	 * Now that mutex locks are initialized, enable interrupts.
1803 	 */
1804 	if (rgep->intr_cap & DDI_INTR_FLAG_BLOCK) {
1805 		/* Call ddi_intr_block_enable() for MSI interrupts */
1806 		(void) ddi_intr_block_enable(rgep->htable, rgep->intr_cnt);
1807 	} else {
1808 		/* Call ddi_intr_enable for MSI or FIXED interrupts */
1809 		for (i = 0; i < rgep->intr_cnt; i++) {
1810 			(void) ddi_intr_enable(rgep->htable[i]);
1811 		}
1812 	}
1813 
1814 	/*
1815 	 * Initialise link state variables
1816 	 * Stop, reset & reinitialise the chip.
1817 	 * Initialise the (internal) PHY.
1818 	 */
1819 	rgep->param_link_up = LINK_STATE_UNKNOWN;
1820 
1821 	/*
1822 	 * Reset chip & rings to initial state; also reset address
1823 	 * filtering, promiscuity, loopback mode.
1824 	 */
1825 	mutex_enter(rgep->genlock);
1826 	(void) rge_chip_reset(rgep);
1827 	rge_chip_sync(rgep, RGE_GET_MAC);
1828 	bzero(rgep->mcast_hash, sizeof (rgep->mcast_hash));
1829 	bzero(rgep->mcast_refs, sizeof (rgep->mcast_refs));
1830 	rgep->promisc = B_FALSE;
1831 	rgep->param_loop_mode = RGE_LOOP_NONE;
1832 	mutex_exit(rgep->genlock);
1833 	rge_phy_init(rgep);
1834 	rgep->progress |= PROGRESS_PHY;
1835 
1836 	/*
1837 	 * Create & initialise named kstats
1838 	 */
1839 	rge_init_kstats(rgep, instance);
1840 	rgep->progress |= PROGRESS_KSTATS;
1841 
1842 	if ((macp = mac_alloc(MAC_VERSION)) == NULL)
1843 		goto attach_fail;
1844 	macp->m_type_ident = MAC_PLUGIN_IDENT_ETHER;
1845 	macp->m_driver = rgep;
1846 	macp->m_dip = devinfo;
1847 	macp->m_src_addr = rgep->netaddr;
1848 	macp->m_callbacks = &rge_m_callbacks;
1849 	macp->m_min_sdu = 0;
1850 	macp->m_max_sdu = rgep->default_mtu;
1851 	macp->m_margin = VLAN_TAGSZ;
1852 
1853 	/*
1854 	 * Finally, we're ready to register ourselves with the MAC layer
1855 	 * interface; if this succeeds, we're all ready to start()
1856 	 */
1857 	err = mac_register(macp, &rgep->mh);
1858 	mac_free(macp);
1859 	if (err != 0)
1860 		goto attach_fail;
1861 
1862 	/*
1863 	 * Register a periodical handler.
1864 	 * reg_chip_cyclic() is invoked in kernel context.
1865 	 */
1866 	rgep->periodic_id = ddi_periodic_add(rge_chip_cyclic, rgep,
1867 	    RGE_CYCLIC_PERIOD, DDI_IPL_0);
1868 
1869 	rgep->progress |= PROGRESS_READY;
1870 	return (DDI_SUCCESS);
1871 
1872 attach_fail:
1873 	rge_unattach(rgep);
1874 	return (DDI_FAILURE);
1875 }
1876 
1877 /*
1878  *	rge_suspend() -- suspend transmit/receive for powerdown
1879  */
1880 static int
1881 rge_suspend(rge_t *rgep)
1882 {
1883 	/*
1884 	 * Stop processing and idle (powerdown) the PHY ...
1885 	 */
1886 	mutex_enter(rgep->genlock);
1887 	rw_enter(rgep->errlock, RW_WRITER);
1888 
1889 	if (rgep->rge_mac_state != RGE_MAC_STARTED) {
1890 		rw_exit(rgep->errlock);
1891 		mutex_exit(rgep->genlock);
1892 		return (DDI_SUCCESS);
1893 	}
1894 
1895 	rgep->suspended = B_TRUE;
1896 	rge_stop(rgep);
1897 	rgep->rge_mac_state = RGE_MAC_STOPPED;
1898 
1899 	rw_exit(rgep->errlock);
1900 	mutex_exit(rgep->genlock);
1901 
1902 	return (DDI_SUCCESS);
1903 }
1904 
1905 /*
1906  * quiesce(9E) entry point.
1907  *
1908  * This function is called when the system is single-threaded at high
1909  * PIL with preemption disabled. Therefore, this function must not be
1910  * blocked.
1911  *
1912  * This function returns DDI_SUCCESS on success, or DDI_FAILURE on failure.
1913  * DDI_FAILURE indicates an error condition and should almost never happen.
1914  */
1915 static int
1916 rge_quiesce(dev_info_t *devinfo)
1917 {
1918 	rge_t *rgep = ddi_get_driver_private(devinfo);
1919 
1920 	if (rgep == NULL)
1921 		return (DDI_FAILURE);
1922 
1923 	/*
1924 	 * Turn off debugging
1925 	 */
1926 	rge_debug = 0;
1927 	rgep->debug = 0;
1928 
1929 	/* Stop the chip */
1930 	rge_chip_stop(rgep, B_FALSE);
1931 
1932 	return (DDI_SUCCESS);
1933 }
1934 
1935 /*
1936  * detach(9E) -- Detach a device from the system
1937  */
1938 static int
1939 rge_detach(dev_info_t *devinfo, ddi_detach_cmd_t cmd)
1940 {
1941 	rge_t *rgep;
1942 
1943 	RGE_GTRACE(("rge_detach($%p, %d)", (void *)devinfo, cmd));
1944 
1945 	rgep = ddi_get_driver_private(devinfo);
1946 
1947 	switch (cmd) {
1948 	default:
1949 		return (DDI_FAILURE);
1950 
1951 	case DDI_SUSPEND:
1952 		return (rge_suspend(rgep));
1953 
1954 	case DDI_DETACH:
1955 		break;
1956 	}
1957 
1958 	/*
1959 	 * If there is any posted buffer, the driver should reject to be
1960 	 * detached. Need notice upper layer to release them.
1961 	 */
1962 	if (!(rgep->chip_flags & CHIP_FLAG_FORCE_BCOPY) &&
1963 	    rgep->rx_free != RGE_BUF_SLOTS)
1964 		return (DDI_FAILURE);
1965 
1966 	/*
1967 	 * Unregister from the MAC layer subsystem.  This can fail, in
1968 	 * particular if there are DLPI style-2 streams still open -
1969 	 * in which case we just return failure without shutting
1970 	 * down chip operations.
1971 	 */
1972 	if (mac_unregister(rgep->mh) != 0)
1973 		return (DDI_FAILURE);
1974 
1975 	/*
1976 	 * All activity stopped, so we can clean up & exit
1977 	 */
1978 	rge_unattach(rgep);
1979 	return (DDI_SUCCESS);
1980 }
1981 
1982 
1983 /*
1984  * ========== Module Loading Data & Entry Points ==========
1985  */
1986 
1987 #undef	RGE_DBG
1988 #define	RGE_DBG		RGE_DBG_INIT	/* debug flag for this code	*/
1989 DDI_DEFINE_STREAM_OPS(rge_dev_ops, nulldev, nulldev, rge_attach, rge_detach,
1990     nodev, NULL, D_MP, NULL, rge_quiesce);
1991 
1992 static struct modldrv rge_modldrv = {
1993 	&mod_driverops,		/* Type of module.  This one is a driver */
1994 	rge_ident,		/* short description */
1995 	&rge_dev_ops		/* driver specific ops */
1996 };
1997 
1998 static struct modlinkage modlinkage = {
1999 	MODREV_1, (void *)&rge_modldrv, NULL
2000 };
2001 
2002 
2003 int
2004 _info(struct modinfo *modinfop)
2005 {
2006 	return (mod_info(&modlinkage, modinfop));
2007 }
2008 
2009 int
2010 _init(void)
2011 {
2012 	int status;
2013 
2014 	mac_init_ops(&rge_dev_ops, "rge");
2015 	status = mod_install(&modlinkage);
2016 	if (status == DDI_SUCCESS)
2017 		mutex_init(rge_log_mutex, NULL, MUTEX_DRIVER, NULL);
2018 	else
2019 		mac_fini_ops(&rge_dev_ops);
2020 
2021 	return (status);
2022 }
2023 
2024 int
2025 _fini(void)
2026 {
2027 	int status;
2028 
2029 	status = mod_remove(&modlinkage);
2030 	if (status == DDI_SUCCESS) {
2031 		mac_fini_ops(&rge_dev_ops);
2032 		mutex_destroy(rge_log_mutex);
2033 	}
2034 	return (status);
2035 }
2036