xref: /titanic_52/usr/src/uts/common/io/rge/rge_main.c (revision ba4e3c84e6b9390bbf7df80b5f1d11dec34cc525)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 /*
22  * Copyright 2006 Sun Microsystems, Inc.  All rights reserved.
23  * Use is subject to license terms.
24  */
25 
26 #pragma ident	"%Z%%M%	%I%	%E% SMI"
27 
28 #include "rge.h"
29 
30 /*
31  * This is the string displayed by modinfo, etc.
32  * Make sure you keep the version ID up to date!
33  */
34 static char rge_ident[] = "Realtek Gigabit Ethernet Driver v%I%";
35 
36 /*
37  * Used for buffers allocated by ddi_dma_mem_alloc()
38  */
39 static ddi_dma_attr_t dma_attr_buf = {
40 	DMA_ATTR_V0,		/* dma_attr version */
41 	(uint32_t)0,		/* dma_attr_addr_lo */
42 	(uint32_t)0xFFFFFFFF,	/* dma_attr_addr_hi */
43 	(uint32_t)0xFFFFFFFF,	/* dma_attr_count_max */
44 	(uint32_t)16,		/* dma_attr_align */
45 	0xFFFFFFFF,		/* dma_attr_burstsizes */
46 	1,			/* dma_attr_minxfer */
47 	(uint32_t)0xFFFFFFFF,	/* dma_attr_maxxfer */
48 	(uint32_t)0xFFFFFFFF,	/* dma_attr_seg */
49 	1,			/* dma_attr_sgllen */
50 	1,			/* dma_attr_granular */
51 	0,			/* dma_attr_flags */
52 };
53 
54 /*
55  * Used for BDs allocated by ddi_dma_mem_alloc()
56  */
57 static ddi_dma_attr_t dma_attr_desc = {
58 	DMA_ATTR_V0,		/* dma_attr version */
59 	(uint32_t)0,		/* dma_attr_addr_lo */
60 	(uint32_t)0xFFFFFFFF,	/* dma_attr_addr_hi */
61 	(uint32_t)0xFFFFFFFF,	/* dma_attr_count_max */
62 	(uint32_t)256,		/* dma_attr_align */
63 	0xFFFFFFFF,		/* dma_attr_burstsizes */
64 	1,			/* dma_attr_minxfer */
65 	(uint32_t)0xFFFFFFFF,	/* dma_attr_maxxfer */
66 	(uint32_t)0xFFFFFFFF,	/* dma_attr_seg */
67 	1,			/* dma_attr_sgllen */
68 	1,			/* dma_attr_granular */
69 	0,			/* dma_attr_flags */
70 };
71 
72 /*
73  * PIO access attributes for registers
74  */
75 static ddi_device_acc_attr_t rge_reg_accattr = {
76 	DDI_DEVICE_ATTR_V0,
77 	DDI_STRUCTURE_LE_ACC,
78 	DDI_STRICTORDER_ACC,
79 	DDI_DEFAULT_ACC
80 };
81 
82 /*
83  * DMA access attributes for descriptors
84  */
85 static ddi_device_acc_attr_t rge_desc_accattr = {
86 	DDI_DEVICE_ATTR_V0,
87 	DDI_NEVERSWAP_ACC,
88 	DDI_STRICTORDER_ACC,
89 	DDI_DEFAULT_ACC
90 };
91 
92 /*
93  * DMA access attributes for data
94  */
95 static ddi_device_acc_attr_t rge_buf_accattr = {
96 	DDI_DEVICE_ATTR_V0,
97 	DDI_NEVERSWAP_ACC,
98 	DDI_STRICTORDER_ACC,
99 	DDI_DEFAULT_ACC
100 };
101 
102 /*
103  * Property names
104  */
105 static char debug_propname[] = "rge-debug-flags";
106 
107 static int		rge_m_start(void *);
108 static void		rge_m_stop(void *);
109 static int		rge_m_promisc(void *, boolean_t);
110 static int		rge_m_multicst(void *, boolean_t, const uint8_t *);
111 static int		rge_m_unicst(void *, const uint8_t *);
112 static void		rge_m_resources(void *);
113 static void		rge_m_ioctl(void *, queue_t *, mblk_t *);
114 static boolean_t	rge_m_getcapab(void *, mac_capab_t, void *);
115 
116 #define	RGE_M_CALLBACK_FLAGS	(MC_RESOURCES | MC_IOCTL | MC_GETCAPAB)
117 
118 static mac_callbacks_t rge_m_callbacks = {
119 	RGE_M_CALLBACK_FLAGS,
120 	rge_m_stat,
121 	rge_m_start,
122 	rge_m_stop,
123 	rge_m_promisc,
124 	rge_m_multicst,
125 	rge_m_unicst,
126 	rge_m_tx,
127 	rge_m_resources,
128 	rge_m_ioctl,
129 	rge_m_getcapab
130 };
131 
132 /*
133  * Allocate an area of memory and a DMA handle for accessing it
134  */
135 static int
136 rge_alloc_dma_mem(rge_t *rgep, size_t memsize, ddi_dma_attr_t *dma_attr_p,
137 	ddi_device_acc_attr_t *acc_attr_p, uint_t dma_flags, dma_area_t *dma_p)
138 {
139 	caddr_t vaddr;
140 	int err;
141 
142 	/*
143 	 * Allocate handle
144 	 */
145 	err = ddi_dma_alloc_handle(rgep->devinfo, dma_attr_p,
146 		    DDI_DMA_SLEEP, NULL, &dma_p->dma_hdl);
147 	if (err != DDI_SUCCESS) {
148 		dma_p->dma_hdl = NULL;
149 		return (DDI_FAILURE);
150 	}
151 
152 	/*
153 	 * Allocate memory
154 	 */
155 	err = ddi_dma_mem_alloc(dma_p->dma_hdl, memsize, acc_attr_p,
156 	    dma_flags & (DDI_DMA_CONSISTENT | DDI_DMA_STREAMING),
157 	    DDI_DMA_SLEEP, NULL, &vaddr, &dma_p->alength, &dma_p->acc_hdl);
158 	if (err != DDI_SUCCESS) {
159 		ddi_dma_free_handle(&dma_p->dma_hdl);
160 		dma_p->dma_hdl = NULL;
161 		dma_p->acc_hdl = NULL;
162 		return (DDI_FAILURE);
163 	}
164 
165 	/*
166 	 * Bind the two together
167 	 */
168 	dma_p->mem_va = vaddr;
169 	err = ddi_dma_addr_bind_handle(dma_p->dma_hdl, NULL,
170 	    vaddr, dma_p->alength, dma_flags, DDI_DMA_SLEEP, NULL,
171 	    &dma_p->cookie, &dma_p->ncookies);
172 	if (err != DDI_DMA_MAPPED || dma_p->ncookies != 1) {
173 		ddi_dma_mem_free(&dma_p->acc_hdl);
174 		ddi_dma_free_handle(&dma_p->dma_hdl);
175 		dma_p->acc_hdl = NULL;
176 		dma_p->dma_hdl = NULL;
177 		return (DDI_FAILURE);
178 	}
179 
180 	dma_p->nslots = ~0U;
181 	dma_p->size = ~0U;
182 	dma_p->token = ~0U;
183 	dma_p->offset = 0;
184 	return (DDI_SUCCESS);
185 }
186 
187 /*
188  * Free one allocated area of DMAable memory
189  */
190 static void
191 rge_free_dma_mem(dma_area_t *dma_p)
192 {
193 	if (dma_p->dma_hdl != NULL) {
194 		if (dma_p->ncookies) {
195 			(void) ddi_dma_unbind_handle(dma_p->dma_hdl);
196 			dma_p->ncookies = 0;
197 		}
198 		ddi_dma_free_handle(&dma_p->dma_hdl);
199 		dma_p->dma_hdl = NULL;
200 	}
201 
202 	if (dma_p->acc_hdl != NULL) {
203 		ddi_dma_mem_free(&dma_p->acc_hdl);
204 		dma_p->acc_hdl = NULL;
205 	}
206 }
207 
208 /*
209  * Utility routine to carve a slice off a chunk of allocated memory,
210  * updating the chunk descriptor accordingly.  The size of the slice
211  * is given by the product of the <qty> and <size> parameters.
212  */
213 static void
214 rge_slice_chunk(dma_area_t *slice, dma_area_t *chunk,
215 	uint32_t qty, uint32_t size)
216 {
217 	static uint32_t sequence = 0xbcd5704a;
218 	size_t totsize;
219 
220 	totsize = qty*size;
221 	ASSERT(size >= 0);
222 	ASSERT(totsize <= chunk->alength);
223 
224 	*slice = *chunk;
225 	slice->nslots = qty;
226 	slice->size = size;
227 	slice->alength = totsize;
228 	slice->token = ++sequence;
229 
230 	chunk->mem_va = (caddr_t)chunk->mem_va + totsize;
231 	chunk->alength -= totsize;
232 	chunk->offset += totsize;
233 	chunk->cookie.dmac_laddress += totsize;
234 	chunk->cookie.dmac_size -= totsize;
235 }
236 
237 
238 static int
239 rge_alloc_bufs(rge_t *rgep)
240 {
241 	size_t txdescsize;
242 	size_t rxdescsize;
243 	size_t txbuffsize;
244 	size_t rxbuffsize;
245 	size_t freebuffsize;
246 	int split;
247 	int err;
248 
249 	/*
250 	 * Allocate memory & handle for packet statistics
251 	 */
252 	err = rge_alloc_dma_mem(rgep,
253 	    RGE_STATS_DUMP_SIZE,
254 	    &dma_attr_desc,
255 	    &rge_desc_accattr,
256 	    DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
257 	    &rgep->dma_area_stats);
258 	if (err != DDI_SUCCESS)
259 		return (DDI_FAILURE);
260 	rgep->hw_stats = DMA_VPTR(rgep->dma_area_stats);
261 
262 	/*
263 	 * Allocate memory & handle for Tx descriptor ring
264 	 */
265 	txdescsize = RGE_SEND_SLOTS * sizeof (rge_bd_t);
266 	err = rge_alloc_dma_mem(rgep,
267 	    txdescsize,
268 	    &dma_attr_desc,
269 	    &rge_desc_accattr,
270 	    DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
271 	    &rgep->dma_area_txdesc);
272 	if (err != DDI_SUCCESS)
273 		return (DDI_FAILURE);
274 
275 	/*
276 	 * Allocate memory & handle for Rx descriptor ring
277 	 */
278 	rxdescsize = RGE_RECV_SLOTS * sizeof (rge_bd_t);
279 	err = rge_alloc_dma_mem(rgep,
280 	    rxdescsize,
281 	    &dma_attr_desc,
282 	    &rge_desc_accattr,
283 	    DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
284 	    &rgep->dma_area_rxdesc);
285 	if (err != DDI_SUCCESS)
286 		return (DDI_FAILURE);
287 
288 	/*
289 	 * Allocate memory & handle for Tx buffers
290 	 */
291 	txbuffsize = RGE_SEND_SLOTS * rgep->txbuf_size;
292 	ASSERT((txbuffsize % RGE_SPLIT) == 0);
293 	for (split = 0; split < RGE_SPLIT; ++split) {
294 		err = rge_alloc_dma_mem(rgep,
295 		    txbuffsize/RGE_SPLIT,
296 		    &dma_attr_buf,
297 		    &rge_buf_accattr,
298 		    DDI_DMA_WRITE | DDI_DMA_STREAMING,
299 		    &rgep->dma_area_txbuf[split]);
300 		if (err != DDI_SUCCESS)
301 			return (DDI_FAILURE);
302 	}
303 
304 	/*
305 	 * Allocate memory & handle for Rx buffers
306 	 */
307 	rxbuffsize = RGE_RECV_SLOTS * rgep->rxbuf_size;
308 	ASSERT((rxbuffsize % RGE_SPLIT) == 0);
309 	for (split = 0; split < RGE_SPLIT; ++split) {
310 		err = rge_alloc_dma_mem(rgep,
311 		    rxbuffsize/RGE_SPLIT,
312 		    &dma_attr_buf,
313 		    &rge_buf_accattr,
314 		    DDI_DMA_READ | DDI_DMA_STREAMING,
315 		    &rgep->dma_area_rxbuf[split]);
316 		if (err != DDI_SUCCESS)
317 			return (DDI_FAILURE);
318 	}
319 
320 	/*
321 	 * Allocate memory & handle for free Rx buffers
322 	 */
323 	freebuffsize = RGE_BUF_SLOTS * rgep->rxbuf_size;
324 	ASSERT((freebuffsize % RGE_SPLIT) == 0);
325 	for (split = 0; split < RGE_SPLIT; ++split) {
326 		err = rge_alloc_dma_mem(rgep,
327 		    freebuffsize/RGE_SPLIT,
328 		    &dma_attr_buf,
329 		    &rge_buf_accattr,
330 		    DDI_DMA_READ | DDI_DMA_STREAMING,
331 		    &rgep->dma_area_freebuf[split]);
332 		if (err != DDI_SUCCESS)
333 			return (DDI_FAILURE);
334 	}
335 
336 	return (DDI_SUCCESS);
337 }
338 
339 /*
340  * rge_free_bufs() -- free descriptors/buffers allocated for this
341  * device instance.
342  */
343 static void
344 rge_free_bufs(rge_t *rgep)
345 {
346 	int i;
347 
348 	rge_free_dma_mem(&rgep->dma_area_stats);
349 	rge_free_dma_mem(&rgep->dma_area_txdesc);
350 	rge_free_dma_mem(&rgep->dma_area_rxdesc);
351 	for (i = 0; i < RGE_SPLIT; i++) {
352 		rge_free_dma_mem(&rgep->dma_area_txbuf[i]);
353 		rge_free_dma_mem(&rgep->dma_area_rxbuf[i]);
354 		rge_free_dma_mem(&rgep->dma_area_freebuf[i]);
355 	}
356 }
357 
358 /*
359  * ========== Transmit and receive ring reinitialisation ==========
360  */
361 
362 /*
363  * These <reinit> routines each reset the rx/tx rings to an initial
364  * state, assuming that the corresponding <init> routine has already
365  * been called exactly once.
366  */
367 static void
368 rge_reinit_send_ring(rge_t *rgep)
369 {
370 	sw_sbd_t *ssbdp;
371 	rge_bd_t *bdp;
372 	uint32_t slot;
373 
374 	/*
375 	 * re-init send ring
376 	 */
377 	DMA_ZERO(rgep->tx_desc);
378 	ssbdp = rgep->sw_sbds;
379 	bdp = rgep->tx_ring;
380 	for (slot = 0; slot < RGE_SEND_SLOTS; slot++) {
381 		bdp->host_buf_addr =
382 		    RGE_BSWAP_32(ssbdp->pbuf.cookie.dmac_laddress);
383 		bdp->host_buf_addr_hi =
384 		    RGE_BSWAP_32(ssbdp->pbuf.cookie.dmac_laddress >> 32);
385 		/* last BD in Tx ring */
386 		if (slot == (RGE_SEND_SLOTS - 1))
387 			bdp->flags_len = RGE_BSWAP_32(BD_FLAG_EOR);
388 		ssbdp++;
389 		bdp++;
390 	}
391 	DMA_SYNC(rgep->tx_desc, DDI_DMA_SYNC_FORDEV);
392 	rgep->tx_next = 0;
393 	rgep->tc_next = 0;
394 	rgep->tc_tail = 0;
395 	rgep->tx_flow = 0;
396 	rgep->tx_free = RGE_SEND_SLOTS;
397 }
398 
399 static void
400 rge_reinit_recv_ring(rge_t *rgep)
401 {
402 	rge_bd_t *bdp;
403 	sw_rbd_t *srbdp;
404 	dma_area_t *pbuf;
405 	uint32_t slot;
406 
407 	/*
408 	 * re-init receive ring
409 	 */
410 	DMA_ZERO(rgep->rx_desc);
411 	srbdp = rgep->sw_rbds;
412 	bdp = rgep->rx_ring;
413 	for (slot = 0; slot < RGE_RECV_SLOTS; slot++) {
414 		pbuf = &srbdp->rx_buf->pbuf;
415 		bdp->host_buf_addr =
416 		    RGE_BSWAP_32(pbuf->cookie.dmac_laddress + RGE_HEADROOM);
417 		bdp->host_buf_addr_hi =
418 		    RGE_BSWAP_32(pbuf->cookie.dmac_laddress >> 32);
419 		bdp->flags_len = RGE_BSWAP_32(BD_FLAG_HW_OWN |
420 		    (rgep->rxbuf_size - RGE_HEADROOM));
421 		/* last BD in Tx ring */
422 		if (slot == (RGE_RECV_SLOTS - 1))
423 			bdp->flags_len |= RGE_BSWAP_32(BD_FLAG_EOR);
424 		srbdp++;
425 		bdp++;
426 	}
427 	DMA_SYNC(rgep->rx_desc, DDI_DMA_SYNC_FORDEV);
428 	rgep->watchdog = 0;
429 	rgep->rx_next = 0;
430 }
431 
432 static void
433 rge_reinit_buf_ring(rge_t *rgep)
434 {
435 	/*
436 	 * re-init free buffer ring
437 	 */
438 	rgep->rc_next = 0;
439 	rgep->rf_next = 0;
440 	if (rgep->rx_free != RGE_BUF_SLOTS)
441 		rgep->rx_bcopy = B_TRUE;
442 }
443 
444 static void
445 rge_reinit_rings(rge_t *rgep)
446 {
447 	rge_reinit_send_ring(rgep);
448 	rge_reinit_recv_ring(rgep);
449 	rge_reinit_buf_ring(rgep);
450 }
451 
452 static void
453 rge_init_send_ring(rge_t *rgep)
454 {
455 	uint32_t slot;
456 	uint32_t split;
457 	rge_bd_t *bdp;
458 	sw_sbd_t *ssbdp;
459 	dma_area_t buf_chunk;
460 	dma_area_t *pbuf;
461 
462 	/*
463 	 * Allocate the array of s/w Tx Buffer Descriptors
464 	 */
465 	ssbdp = kmem_zalloc(RGE_SEND_SLOTS*sizeof (*ssbdp), KM_SLEEP);
466 	rgep->sw_sbds = ssbdp;
467 
468 	/*
469 	 * Init send ring
470 	 */
471 	rgep->tx_next = 0;
472 	rgep->tc_next = 0;
473 	rgep->tc_tail = 0;
474 	rgep->tx_flow = 0;
475 	rgep->tx_free = RGE_SEND_SLOTS;
476 	rgep->tx_desc = rgep->dma_area_txdesc;
477 	DMA_ZERO(rgep->tx_desc);
478 	bdp = rgep->tx_desc.mem_va;
479 	rgep->tx_ring = bdp;
480 	for (split = 0; split < RGE_SPLIT; split++) {
481 		buf_chunk = rgep->dma_area_txbuf[split];
482 		for (slot = 0; slot < RGE_SEND_SLOTS/RGE_SPLIT; slot++) {
483 			rge_slice_chunk(&ssbdp->desc, &rgep->dma_area_txdesc,
484 			    1, sizeof (rge_bd_t));
485 			pbuf = &ssbdp->pbuf;
486 			rge_slice_chunk(pbuf, &buf_chunk, 1, rgep->txbuf_size);
487 			bdp->host_buf_addr =
488 			    RGE_BSWAP_32(pbuf->cookie.dmac_laddress);
489 			bdp->host_buf_addr_hi =
490 			    RGE_BSWAP_32(pbuf->cookie.dmac_laddress >> 32);
491 			/* last BD in Tx ring */
492 			if (split == (RGE_SPLIT - 1) &&
493 			    slot == (RGE_SEND_SLOTS/RGE_SPLIT -1))
494 				bdp->flags_len |= RGE_BSWAP_32(BD_FLAG_EOR);
495 			ssbdp++;
496 			bdp++;
497 		}
498 	}
499 	DMA_SYNC(rgep->tx_desc, DDI_DMA_SYNC_FORDEV);
500 }
501 
502 static int
503 rge_init_recv_ring(rge_t *rgep)
504 {
505 	uint32_t slot;
506 	uint32_t split;
507 	rge_bd_t *bdp;
508 	sw_rbd_t *srbdp;
509 	dma_buf_t *rx_buf;
510 	dma_area_t buf_chunk;
511 	dma_area_t *pbuf;
512 
513 	/*
514 	 * Allocate the array of s/w Rx Buffer Descriptors
515 	 */
516 	srbdp = kmem_zalloc(RGE_RECV_SLOTS*sizeof (*srbdp), KM_SLEEP);
517 	rx_buf = kmem_zalloc(RGE_RECV_SLOTS*sizeof (*rx_buf), KM_SLEEP);
518 	rgep->sw_rbds = srbdp;
519 	rgep->sw_rbuf = rx_buf;
520 
521 	/*
522 	 * Init receive ring
523 	 */
524 	rgep->rx_next = 0;
525 	rgep->rx_desc = rgep->dma_area_rxdesc;
526 	DMA_ZERO(rgep->rx_desc);
527 	bdp = rgep->rx_desc.mem_va;
528 	rgep->rx_ring = bdp;
529 	for (split = 0; split < RGE_SPLIT; split++) {
530 		buf_chunk = rgep->dma_area_rxbuf[split];
531 		for (slot = 0; slot < RGE_RECV_SLOTS/RGE_SPLIT; slot++) {
532 			srbdp->rx_buf = rx_buf;
533 			pbuf = &rx_buf->pbuf;
534 			rge_slice_chunk(pbuf, &buf_chunk, 1, rgep->rxbuf_size);
535 			pbuf->alength -= RGE_HEADROOM;
536 			pbuf->offset += RGE_HEADROOM;
537 			rx_buf->rx_recycle.free_func = rge_rx_recycle;
538 			rx_buf->rx_recycle.free_arg = (caddr_t)rx_buf;
539 			rx_buf->private = (caddr_t)rgep;
540 			rx_buf->mp = desballoc(DMA_VPTR(rx_buf->pbuf),
541 			    rgep->rxbuf_size, 0, &rx_buf->rx_recycle);
542 			if (rx_buf->mp == NULL) {
543 				rge_problem(rgep,
544 				    "rge_init_recv_ring: desballoc() failed");
545 				return (DDI_FAILURE);
546 			}
547 
548 			bdp->host_buf_addr = RGE_BSWAP_32(RGE_HEADROOM +
549 			    pbuf->cookie.dmac_laddress);
550 			bdp->host_buf_addr_hi =
551 			    RGE_BSWAP_32(pbuf->cookie.dmac_laddress >> 32);
552 			bdp->flags_len = RGE_BSWAP_32(BD_FLAG_HW_OWN |
553 			    (rgep->rxbuf_size - RGE_HEADROOM));
554 			/* last BD in Rx ring */
555 			if (split == (RGE_SPLIT - 1) &&
556 			    slot == (RGE_RECV_SLOTS/RGE_SPLIT -1))
557 				bdp->flags_len |= RGE_BSWAP_32(BD_FLAG_EOR);
558 			srbdp++;
559 			bdp++;
560 			rx_buf++;
561 		}
562 	}
563 	DMA_SYNC(rgep->rx_desc, DDI_DMA_SYNC_FORDEV);
564 	return (DDI_SUCCESS);
565 }
566 
567 static int
568 rge_init_buf_ring(rge_t *rgep)
569 {
570 	uint32_t slot;
571 	uint32_t split;
572 	sw_rbd_t *free_rbdp;
573 	dma_buf_t *rx_buf;
574 	dma_area_t buf_chunk;
575 	dma_area_t *pbuf;
576 
577 	/*
578 	 * Allocate the array of s/w free Buffer Descriptors
579 	 */
580 	free_rbdp = kmem_zalloc(RGE_BUF_SLOTS*sizeof (*free_rbdp), KM_SLEEP);
581 	rx_buf = kmem_zalloc(RGE_BUF_SLOTS*sizeof (*rx_buf), KM_SLEEP);
582 	rgep->free_rbds = free_rbdp;
583 	rgep->sw_freebuf = rx_buf;
584 
585 	/*
586 	 * Init free buffer ring
587 	 */
588 	rgep->rc_next = 0;
589 	rgep->rf_next = 0;
590 	rgep->rx_bcopy = B_FALSE;
591 	rgep->rx_free = RGE_BUF_SLOTS;
592 	for (split = 0; split < RGE_SPLIT; split++) {
593 		buf_chunk = rgep->dma_area_freebuf[split];
594 		for (slot = 0; slot < RGE_BUF_SLOTS/RGE_SPLIT; slot++) {
595 			free_rbdp->rx_buf = rx_buf;
596 			pbuf = &rx_buf->pbuf;
597 			rge_slice_chunk(pbuf, &buf_chunk, 1, rgep->rxbuf_size);
598 			pbuf->alength -= RGE_HEADROOM;
599 			pbuf->offset += RGE_HEADROOM;
600 			rx_buf->rx_recycle.free_func = rge_rx_recycle;
601 			rx_buf->rx_recycle.free_arg = (caddr_t)rx_buf;
602 			rx_buf->private = (caddr_t)rgep;
603 			rx_buf->mp = desballoc(DMA_VPTR(rx_buf->pbuf),
604 			    rgep->rxbuf_size, 0, &rx_buf->rx_recycle);
605 			if (rx_buf->mp == NULL) {
606 				rge_problem(rgep,
607 				    "rge_init_buf_ring: desballoc() failed");
608 				return (DDI_FAILURE);
609 			}
610 			free_rbdp++;
611 			rx_buf++;
612 		}
613 	}
614 	return (DDI_SUCCESS);
615 }
616 
617 static int
618 rge_init_rings(rge_t *rgep)
619 {
620 	int err;
621 
622 	rge_init_send_ring(rgep);
623 	err = rge_init_recv_ring(rgep);
624 	err = rge_init_buf_ring(rgep);
625 	return (err);
626 }
627 
628 static void
629 rge_fini_send_ring(rge_t *rgep)
630 {
631 	kmem_free(rgep->sw_sbds, RGE_SEND_SLOTS * sizeof (sw_sbd_t));
632 }
633 
634 static void
635 rge_fini_recv_ring(rge_t *rgep)
636 {
637 	dma_buf_t *rx_buf = rgep->sw_rbuf;
638 	uint32_t slot;
639 
640 	for (slot = 0; slot < RGE_RECV_SLOTS; slot++, rx_buf++)
641 		freemsg(rx_buf->mp);
642 	kmem_free(rgep->sw_rbuf, RGE_RECV_SLOTS * sizeof (dma_buf_t));
643 	kmem_free(rgep->sw_rbds, RGE_RECV_SLOTS * sizeof (sw_rbd_t));
644 }
645 
646 static void
647 rge_fini_buf_ring(rge_t *rgep)
648 {
649 	dma_buf_t *rx_buf = rgep->sw_freebuf;
650 	uint32_t slot;
651 
652 	for (slot = 0; slot < RGE_BUF_SLOTS; slot++, rx_buf++)
653 		freemsg(rx_buf->mp);
654 	kmem_free(rgep->sw_freebuf, RGE_BUF_SLOTS * sizeof (dma_buf_t));
655 	kmem_free(rgep->free_rbds, RGE_BUF_SLOTS * sizeof (sw_rbd_t));
656 }
657 
658 static void
659 rge_fini_rings(rge_t *rgep)
660 {
661 	rge_fini_send_ring(rgep);
662 	rge_fini_recv_ring(rgep);
663 	rge_fini_buf_ring(rgep);
664 }
665 
666 /*
667  * ========== Internal state management entry points ==========
668  */
669 
670 #undef	RGE_DBG
671 #define	RGE_DBG		RGE_DBG_NEMO	/* debug flag for this code	*/
672 
673 /*
674  * These routines provide all the functionality required by the
675  * corresponding MAC layer entry points, but don't update the
676  * MAC state so they can be called internally without disturbing
677  * our record of what NEMO thinks we should be doing ...
678  */
679 
680 /*
681  *	rge_reset() -- reset h/w & rings to initial state
682  */
683 static void
684 rge_reset(rge_t *rgep)
685 {
686 	ASSERT(mutex_owned(rgep->genlock));
687 
688 	/*
689 	 * Grab all the other mutexes in the world (this should
690 	 * ensure no other threads are manipulating driver state)
691 	 */
692 	mutex_enter(rgep->rx_lock);
693 	mutex_enter(rgep->rc_lock);
694 	rw_enter(rgep->errlock, RW_WRITER);
695 
696 	(void) rge_chip_reset(rgep);
697 	rge_reinit_rings(rgep);
698 	rge_chip_init(rgep);
699 
700 	/*
701 	 * Free the world ...
702 	 */
703 	rw_exit(rgep->errlock);
704 	mutex_exit(rgep->rc_lock);
705 	mutex_exit(rgep->rx_lock);
706 
707 	RGE_DEBUG(("rge_reset($%p) done", (void *)rgep));
708 }
709 
710 /*
711  *	rge_stop() -- stop processing, don't reset h/w or rings
712  */
713 static void
714 rge_stop(rge_t *rgep)
715 {
716 	ASSERT(mutex_owned(rgep->genlock));
717 
718 	rge_chip_stop(rgep, B_FALSE);
719 
720 	RGE_DEBUG(("rge_stop($%p) done", (void *)rgep));
721 }
722 
723 /*
724  *	rge_start() -- start transmitting/receiving
725  */
726 static void
727 rge_start(rge_t *rgep)
728 {
729 	ASSERT(mutex_owned(rgep->genlock));
730 
731 	/*
732 	 * Start chip processing, including enabling interrupts
733 	 */
734 	rge_chip_start(rgep);
735 	rgep->watchdog = 0;
736 }
737 
738 /*
739  * rge_restart - restart transmitting/receiving after error or suspend
740  */
741 void
742 rge_restart(rge_t *rgep)
743 {
744 	uint32_t i;
745 
746 	ASSERT(mutex_owned(rgep->genlock));
747 	/*
748 	 * Wait for posted buffer to be freed...
749 	 */
750 	if (!rgep->rx_bcopy) {
751 		for (i = 0; i < RXBUFF_FREE_LOOP; i++) {
752 			if (rgep->rx_free == RGE_BUF_SLOTS)
753 				break;
754 			drv_usecwait(1000);
755 			RGE_DEBUG(("rge_restart: waiting for rx buf free..."));
756 		}
757 	}
758 	rge_reset(rgep);
759 	rgep->stats.chip_reset++;
760 	if (rgep->rge_mac_state == RGE_MAC_STARTED) {
761 		rge_start(rgep);
762 		ddi_trigger_softintr(rgep->resched_id);
763 	}
764 }
765 
766 
767 /*
768  * ========== Nemo-required management entry points ==========
769  */
770 
771 #undef	RGE_DBG
772 #define	RGE_DBG		RGE_DBG_NEMO	/* debug flag for this code	*/
773 
774 /*
775  *	rge_m_stop() -- stop transmitting/receiving
776  */
777 static void
778 rge_m_stop(void *arg)
779 {
780 	rge_t *rgep = arg;		/* private device info	*/
781 	uint32_t i;
782 
783 	/*
784 	 * Just stop processing, then record new MAC state
785 	 */
786 	mutex_enter(rgep->genlock);
787 	rge_stop(rgep);
788 	rgep->link_up_msg = rgep->link_down_msg = " (stopped)";
789 	/*
790 	 * Wait for posted buffer to be freed...
791 	 */
792 	if (!rgep->rx_bcopy) {
793 		for (i = 0; i < RXBUFF_FREE_LOOP; i++) {
794 			if (rgep->rx_free == RGE_BUF_SLOTS)
795 				break;
796 			drv_usecwait(1000);
797 			RGE_DEBUG(("rge_m_stop: waiting for rx buf free..."));
798 		}
799 	}
800 	rgep->rge_mac_state = RGE_MAC_STOPPED;
801 	RGE_DEBUG(("rge_m_stop($%p) done", arg));
802 	mutex_exit(rgep->genlock);
803 }
804 
805 /*
806  *	rge_m_start() -- start transmitting/receiving
807  */
808 static int
809 rge_m_start(void *arg)
810 {
811 	rge_t *rgep = arg;		/* private device info	*/
812 
813 	mutex_enter(rgep->genlock);
814 
815 	/*
816 	 * Clear hw/sw statistics
817 	 */
818 	DMA_ZERO(rgep->dma_area_stats);
819 	bzero(&rgep->stats, sizeof (rge_stats_t));
820 
821 	/*
822 	 * Start processing and record new MAC state
823 	 */
824 	rge_reset(rgep);
825 	rgep->link_up_msg = rgep->link_down_msg = " (initialized)";
826 	rge_start(rgep);
827 	rgep->rge_mac_state = RGE_MAC_STARTED;
828 	RGE_DEBUG(("rge_m_start($%p) done", arg));
829 
830 	mutex_exit(rgep->genlock);
831 
832 	return (0);
833 }
834 
835 /*
836  *	rge_m_unicst_set() -- set the physical network address
837  */
838 static int
839 rge_m_unicst(void *arg, const uint8_t *macaddr)
840 {
841 	rge_t *rgep = arg;		/* private device info	*/
842 
843 	/*
844 	 * Remember the new current address in the driver state
845 	 * Sync the chip's idea of the address too ...
846 	 */
847 	mutex_enter(rgep->genlock);
848 	bcopy(macaddr, rgep->netaddr, ETHERADDRL);
849 	rge_chip_sync(rgep, RGE_SET_MAC);
850 	mutex_exit(rgep->genlock);
851 
852 	return (0);
853 }
854 
855 /*
856  * Compute the index of the required bit in the multicast hash map.
857  * This must mirror the way the hardware actually does it!
858  */
859 static uint32_t
860 rge_hash_index(const uint8_t *mca)
861 {
862 	uint32_t crc = (ulong_t)RGE_HASH_CRC;
863 	uint32_t const POLY = RGE_HASH_POLY;
864 	uint32_t msb;
865 	int bytes;
866 	uchar_t currentbyte;
867 	uint32_t index;
868 	int bit;
869 
870 	for (bytes = 0; bytes < ETHERADDRL; bytes++) {
871 		currentbyte = mca[bytes];
872 		for (bit = 0; bit < 8; bit++) {
873 			msb = crc >> 31;
874 			crc <<= 1;
875 			if (msb ^ (currentbyte & 1)) {
876 				crc ^= POLY;
877 				crc |= 0x00000001;
878 			}
879 			currentbyte >>= 1;
880 		}
881 	}
882 	index = crc >> 26;
883 
884 	return (index);
885 }
886 
887 /*
888  *	rge_m_multicst_add() -- enable/disable a multicast address
889  */
890 static int
891 rge_m_multicst(void *arg, boolean_t add, const uint8_t *mca)
892 {
893 	rge_t *rgep = arg;		/* private device info	*/
894 	struct ether_addr *addr;
895 	uint32_t index;
896 	uint32_t *hashp;
897 
898 	mutex_enter(rgep->genlock);
899 	hashp = rgep->mcast_hash;
900 	addr = (struct ether_addr *)mca;
901 	index = rge_hash_index(addr->ether_addr_octet);
902 			/* index value is between 0 and 63 */
903 
904 	if (add) {
905 		if (rgep->mcast_refs[index]++) {
906 			mutex_exit(rgep->genlock);
907 			return (0);
908 		}
909 		hashp[index/32] |= 1<< (index % 32);
910 	} else {
911 		if (--rgep->mcast_refs[index]) {
912 			mutex_exit(rgep->genlock);
913 			return (0);
914 		}
915 		hashp[index/32] &= ~(1<< (index % 32));
916 	}
917 
918 	/*
919 	 * Set multicast register
920 	 */
921 	rge_chip_sync(rgep, RGE_SET_MUL);
922 
923 	mutex_exit(rgep->genlock);
924 	return (0);
925 }
926 
927 /*
928  * rge_m_promisc() -- set or reset promiscuous mode on the board
929  *
930  *	Program the hardware to enable/disable promiscuous and/or
931  *	receive-all-multicast modes.
932  */
933 static int
934 rge_m_promisc(void *arg, boolean_t on)
935 {
936 	rge_t *rgep = arg;
937 
938 	/*
939 	 * Store MAC layer specified mode and pass to chip layer to update h/w
940 	 */
941 	mutex_enter(rgep->genlock);
942 
943 	if (rgep->promisc == on) {
944 		mutex_exit(rgep->genlock);
945 		return (0);
946 	}
947 	rgep->promisc = on;
948 	rge_chip_sync(rgep, RGE_SET_PROMISC);
949 	RGE_DEBUG(("rge_m_promisc_set($%p) done", arg));
950 	mutex_exit(rgep->genlock);
951 	return (0);
952 }
953 
954 /*
955  * Loopback ioctl code
956  */
957 
958 static lb_property_t loopmodes[] = {
959 	{ normal,	"normal",	RGE_LOOP_NONE		},
960 	{ internal,	"PHY",		RGE_LOOP_INTERNAL_PHY	},
961 	{ internal,	"MAC",		RGE_LOOP_INTERNAL_MAC	}
962 };
963 
964 static enum ioc_reply
965 rge_set_loop_mode(rge_t *rgep, uint32_t mode)
966 {
967 	const char *msg;
968 
969 	/*
970 	 * If the mode isn't being changed, there's nothing to do ...
971 	 */
972 	if (mode == rgep->param_loop_mode)
973 		return (IOC_ACK);
974 
975 	/*
976 	 * Validate the requested mode and prepare a suitable message
977 	 * to explain the link down/up cycle that the change will
978 	 * probably induce ...
979 	 */
980 	switch (mode) {
981 	default:
982 		return (IOC_INVAL);
983 
984 	case RGE_LOOP_NONE:
985 		msg = " (loopback disabled)";
986 		break;
987 
988 	case RGE_LOOP_INTERNAL_PHY:
989 		msg = " (PHY internal loopback selected)";
990 		break;
991 
992 	case RGE_LOOP_INTERNAL_MAC:
993 		msg = " (MAC internal loopback selected)";
994 		break;
995 	}
996 
997 	/*
998 	 * All OK; tell the caller to reprogram
999 	 * the PHY and/or MAC for the new mode ...
1000 	 */
1001 	rgep->link_down_msg = rgep->link_up_msg = msg;
1002 	rgep->param_loop_mode = mode;
1003 	return (IOC_RESTART_ACK);
1004 }
1005 
1006 static enum ioc_reply
1007 rge_loop_ioctl(rge_t *rgep, queue_t *wq, mblk_t *mp, struct iocblk *iocp)
1008 {
1009 	lb_info_sz_t *lbsp;
1010 	lb_property_t *lbpp;
1011 	uint32_t *lbmp;
1012 	int cmd;
1013 
1014 	_NOTE(ARGUNUSED(wq))
1015 
1016 	/*
1017 	 * Validate format of ioctl
1018 	 */
1019 	if (mp->b_cont == NULL)
1020 		return (IOC_INVAL);
1021 
1022 	cmd = iocp->ioc_cmd;
1023 	switch (cmd) {
1024 	default:
1025 		/* NOTREACHED */
1026 		rge_error(rgep, "rge_loop_ioctl: invalid cmd 0x%x", cmd);
1027 		return (IOC_INVAL);
1028 
1029 	case LB_GET_INFO_SIZE:
1030 		if (iocp->ioc_count != sizeof (lb_info_sz_t))
1031 			return (IOC_INVAL);
1032 		lbsp = (lb_info_sz_t *)mp->b_cont->b_rptr;
1033 		*lbsp = sizeof (loopmodes);
1034 		return (IOC_REPLY);
1035 
1036 	case LB_GET_INFO:
1037 		if (iocp->ioc_count != sizeof (loopmodes))
1038 			return (IOC_INVAL);
1039 		lbpp = (lb_property_t *)mp->b_cont->b_rptr;
1040 		bcopy(loopmodes, lbpp, sizeof (loopmodes));
1041 		return (IOC_REPLY);
1042 
1043 	case LB_GET_MODE:
1044 		if (iocp->ioc_count != sizeof (uint32_t))
1045 			return (IOC_INVAL);
1046 		lbmp = (uint32_t *)mp->b_cont->b_rptr;
1047 		*lbmp = rgep->param_loop_mode;
1048 		return (IOC_REPLY);
1049 
1050 	case LB_SET_MODE:
1051 		if (iocp->ioc_count != sizeof (uint32_t))
1052 			return (IOC_INVAL);
1053 		lbmp = (uint32_t *)mp->b_cont->b_rptr;
1054 		return (rge_set_loop_mode(rgep, *lbmp));
1055 	}
1056 }
1057 
1058 /*
1059  * Specific rge IOCTLs, the MAC layer handles the generic ones.
1060  */
1061 static void
1062 rge_m_ioctl(void *arg, queue_t *wq, mblk_t *mp)
1063 {
1064 	rge_t *rgep = arg;
1065 	struct iocblk *iocp;
1066 	enum ioc_reply status;
1067 	boolean_t need_privilege;
1068 	int err;
1069 	int cmd;
1070 
1071 	/*
1072 	 * Validate the command before bothering with the mutex ...
1073 	 */
1074 	iocp = (struct iocblk *)mp->b_rptr;
1075 	iocp->ioc_error = 0;
1076 	need_privilege = B_TRUE;
1077 	cmd = iocp->ioc_cmd;
1078 	switch (cmd) {
1079 	default:
1080 		miocnak(wq, mp, 0, EINVAL);
1081 		return;
1082 
1083 	case RGE_MII_READ:
1084 	case RGE_MII_WRITE:
1085 	case RGE_DIAG:
1086 	case RGE_PEEK:
1087 	case RGE_POKE:
1088 	case RGE_PHY_RESET:
1089 	case RGE_SOFT_RESET:
1090 	case RGE_HARD_RESET:
1091 		break;
1092 
1093 	case LB_GET_INFO_SIZE:
1094 	case LB_GET_INFO:
1095 	case LB_GET_MODE:
1096 		need_privilege = B_FALSE;
1097 		/* FALLTHRU */
1098 	case LB_SET_MODE:
1099 		break;
1100 
1101 	case ND_GET:
1102 		need_privilege = B_FALSE;
1103 		/* FALLTHRU */
1104 	case ND_SET:
1105 		break;
1106 	}
1107 
1108 	if (need_privilege) {
1109 		/*
1110 		 * Check for specific net_config privilege on Solaris 10+.
1111 		 * Otherwise just check for root access ...
1112 		 */
1113 		if (secpolicy_net_config != NULL)
1114 			err = secpolicy_net_config(iocp->ioc_cr, B_FALSE);
1115 		else
1116 			err = drv_priv(iocp->ioc_cr);
1117 		if (err != 0) {
1118 			miocnak(wq, mp, 0, err);
1119 			return;
1120 		}
1121 	}
1122 
1123 	mutex_enter(rgep->genlock);
1124 
1125 	switch (cmd) {
1126 	default:
1127 		_NOTE(NOTREACHED)
1128 		status = IOC_INVAL;
1129 		break;
1130 
1131 	case RGE_MII_READ:
1132 	case RGE_MII_WRITE:
1133 	case RGE_DIAG:
1134 	case RGE_PEEK:
1135 	case RGE_POKE:
1136 	case RGE_PHY_RESET:
1137 	case RGE_SOFT_RESET:
1138 	case RGE_HARD_RESET:
1139 		status = rge_chip_ioctl(rgep, wq, mp, iocp);
1140 		break;
1141 
1142 	case LB_GET_INFO_SIZE:
1143 	case LB_GET_INFO:
1144 	case LB_GET_MODE:
1145 	case LB_SET_MODE:
1146 		status = rge_loop_ioctl(rgep, wq, mp, iocp);
1147 		break;
1148 
1149 	case ND_GET:
1150 	case ND_SET:
1151 		status = rge_nd_ioctl(rgep, wq, mp, iocp);
1152 		break;
1153 	}
1154 
1155 	/*
1156 	 * Do we need to reprogram the PHY and/or the MAC?
1157 	 * Do it now, while we still have the mutex.
1158 	 *
1159 	 * Note: update the PHY first, 'cos it controls the
1160 	 * speed/duplex parameters that the MAC code uses.
1161 	 */
1162 	switch (status) {
1163 	case IOC_RESTART_REPLY:
1164 	case IOC_RESTART_ACK:
1165 		rge_phy_update(rgep);
1166 		break;
1167 	}
1168 
1169 	mutex_exit(rgep->genlock);
1170 
1171 	/*
1172 	 * Finally, decide how to reply
1173 	 */
1174 	switch (status) {
1175 	default:
1176 	case IOC_INVAL:
1177 		/*
1178 		 * Error, reply with a NAK and EINVAL or the specified error
1179 		 */
1180 		miocnak(wq, mp, 0, iocp->ioc_error == 0 ?
1181 			EINVAL : iocp->ioc_error);
1182 		break;
1183 
1184 	case IOC_DONE:
1185 		/*
1186 		 * OK, reply already sent
1187 		 */
1188 		break;
1189 
1190 	case IOC_RESTART_ACK:
1191 	case IOC_ACK:
1192 		/*
1193 		 * OK, reply with an ACK
1194 		 */
1195 		miocack(wq, mp, 0, 0);
1196 		break;
1197 
1198 	case IOC_RESTART_REPLY:
1199 	case IOC_REPLY:
1200 		/*
1201 		 * OK, send prepared reply as ACK or NAK
1202 		 */
1203 		mp->b_datap->db_type = iocp->ioc_error == 0 ?
1204 			M_IOCACK : M_IOCNAK;
1205 		qreply(wq, mp);
1206 		break;
1207 	}
1208 }
1209 
1210 static void
1211 rge_m_resources(void *arg)
1212 {
1213 	rge_t *rgep = arg;
1214 	mac_rx_fifo_t mrf;
1215 
1216 	mutex_enter(rgep->genlock);
1217 
1218 	/*
1219 	 * Register Rx rings as resources and save mac
1220 	 * resource id for future reference
1221 	 */
1222 	mrf.mrf_type = MAC_RX_FIFO;
1223 	mrf.mrf_blank = rge_chip_blank;
1224 	mrf.mrf_arg = (void *)rgep;
1225 	mrf.mrf_normal_blank_time = RGE_RX_INT_TIME;
1226 	mrf.mrf_normal_pkt_count = RGE_RX_INT_PKTS;
1227 	rgep->handle = mac_resource_add(rgep->mh, (mac_resource_t *)&mrf);
1228 
1229 	mutex_exit(rgep->genlock);
1230 }
1231 
1232 /* ARGSUSED */
1233 static boolean_t
1234 rge_m_getcapab(void *arg, mac_capab_t cap, void *cap_data)
1235 {
1236 	switch (cap) {
1237 	case MAC_CAPAB_HCKSUM: {
1238 		uint32_t *hcksum_txflags = cap_data;
1239 		*hcksum_txflags = HCKSUM_INET_FULL_V4 | HCKSUM_IPHDRCKSUM;
1240 		break;
1241 	}
1242 	case MAC_CAPAB_POLL:
1243 		/*
1244 		 * There's nothing for us to fill in, simply returning
1245 		 * B_TRUE stating that we support polling is sufficient.
1246 		 */
1247 		break;
1248 	default:
1249 		return (B_FALSE);
1250 	}
1251 	return (B_TRUE);
1252 }
1253 
1254 /*
1255  * ========== Per-instance setup/teardown code ==========
1256  */
1257 
1258 #undef	RGE_DBG
1259 #define	RGE_DBG		RGE_DBG_INIT	/* debug flag for this code	*/
1260 
1261 static void
1262 rge_unattach(rge_t *rgep)
1263 {
1264 	/*
1265 	 * Flag that no more activity may be initiated
1266 	 */
1267 	rgep->progress &= ~PROGRESS_READY;
1268 	rgep->rge_mac_state = RGE_MAC_UNATTACH;
1269 
1270 	/*
1271 	 * Quiesce the PHY and MAC (leave it reset but still powered).
1272 	 * Clean up and free all RGE data structures
1273 	 */
1274 	if (rgep->cyclic_id) {
1275 		mutex_enter(&cpu_lock);
1276 		cyclic_remove(rgep->cyclic_id);
1277 		mutex_exit(&cpu_lock);
1278 	}
1279 
1280 	if (rgep->progress & PROGRESS_KSTATS)
1281 		rge_fini_kstats(rgep);
1282 
1283 	if (rgep->progress & PROGRESS_PHY)
1284 		(void) rge_phy_reset(rgep);
1285 
1286 	if (rgep->progress & PROGRESS_INTR) {
1287 		mutex_enter(rgep->genlock);
1288 		(void) rge_chip_reset(rgep);
1289 		mutex_exit(rgep->genlock);
1290 		ddi_remove_intr(rgep->devinfo, 0, rgep->iblk);
1291 		rge_fini_rings(rgep);
1292 		mutex_destroy(rgep->rc_lock);
1293 		mutex_destroy(rgep->rx_lock);
1294 		mutex_destroy(rgep->tc_lock);
1295 		mutex_destroy(rgep->tx_lock);
1296 		rw_destroy(rgep->errlock);
1297 		mutex_destroy(rgep->genlock);
1298 	}
1299 
1300 	if (rgep->progress & PROGRESS_FACTOTUM)
1301 		ddi_remove_softintr(rgep->factotum_id);
1302 
1303 	if (rgep->progress & PROGRESS_RESCHED)
1304 		ddi_remove_softintr(rgep->resched_id);
1305 
1306 	rge_free_bufs(rgep);
1307 
1308 	if (rgep->progress & PROGRESS_NDD)
1309 		rge_nd_cleanup(rgep);
1310 
1311 	if (rgep->progress & PROGRESS_REGS)
1312 		ddi_regs_map_free(&rgep->io_handle);
1313 
1314 	if (rgep->progress & PROGRESS_CFG)
1315 		pci_config_teardown(&rgep->cfg_handle);
1316 
1317 	ddi_remove_minor_node(rgep->devinfo, NULL);
1318 	kmem_free(rgep, sizeof (*rgep));
1319 }
1320 
1321 static int
1322 rge_resume(dev_info_t *devinfo)
1323 {
1324 	rge_t *rgep;			/* Our private data	*/
1325 	chip_id_t *cidp;
1326 	chip_id_t chipid;
1327 
1328 	rgep = ddi_get_driver_private(devinfo);
1329 	if (rgep == NULL)
1330 		return (DDI_FAILURE);
1331 
1332 	/*
1333 	 * Refuse to resume if the data structures aren't consistent
1334 	 */
1335 	if (rgep->devinfo != devinfo)
1336 		return (DDI_FAILURE);
1337 
1338 	/*
1339 	 * Read chip ID & set up config space command register(s)
1340 	 * Refuse to resume if the chip has changed its identity!
1341 	 */
1342 	cidp = &rgep->chipid;
1343 	rge_chip_cfg_init(rgep, &chipid);
1344 	if (chipid.vendor != cidp->vendor)
1345 		return (DDI_FAILURE);
1346 	if (chipid.device != cidp->device)
1347 		return (DDI_FAILURE);
1348 	if (chipid.revision != cidp->revision)
1349 		return (DDI_FAILURE);
1350 
1351 	/*
1352 	 * All OK, reinitialise h/w & kick off NEMO scheduling
1353 	 */
1354 	mutex_enter(rgep->genlock);
1355 	rge_restart(rgep);
1356 	mutex_exit(rgep->genlock);
1357 	return (DDI_SUCCESS);
1358 }
1359 
1360 
1361 /*
1362  * attach(9E) -- Attach a device to the system
1363  *
1364  * Called once for each board successfully probed.
1365  */
1366 static int
1367 rge_attach(dev_info_t *devinfo, ddi_attach_cmd_t cmd)
1368 {
1369 	rge_t *rgep;			/* Our private data	*/
1370 	mac_register_t *macp;
1371 	chip_id_t *cidp;
1372 	cyc_handler_t cychand;
1373 	cyc_time_t cyctime;
1374 	caddr_t regs;
1375 	int instance;
1376 	int err;
1377 
1378 	/*
1379 	 * we don't support high level interrupts in the driver
1380 	 */
1381 	if (ddi_intr_hilevel(devinfo, 0) != 0) {
1382 		cmn_err(CE_WARN,
1383 		    "rge_attach -- unsupported high level interrupt");
1384 		return (DDI_FAILURE);
1385 	}
1386 
1387 	instance = ddi_get_instance(devinfo);
1388 	RGE_GTRACE(("rge_attach($%p, %d) instance %d",
1389 		(void *)devinfo, cmd, instance));
1390 	RGE_BRKPT(NULL, "rge_attach");
1391 
1392 	switch (cmd) {
1393 	default:
1394 		return (DDI_FAILURE);
1395 
1396 	case DDI_RESUME:
1397 		return (rge_resume(devinfo));
1398 
1399 	case DDI_ATTACH:
1400 		break;
1401 	}
1402 
1403 	rgep = kmem_zalloc(sizeof (*rgep), KM_SLEEP);
1404 	ddi_set_driver_private(devinfo, rgep);
1405 	rgep->devinfo = devinfo;
1406 
1407 	/*
1408 	 * Initialize more fields in RGE private data
1409 	 */
1410 	rgep->debug = ddi_prop_get_int(DDI_DEV_T_ANY, devinfo,
1411 		DDI_PROP_DONTPASS, debug_propname, rge_debug);
1412 	(void) snprintf(rgep->ifname, sizeof (rgep->ifname), "%s%d",
1413 		RGE_DRIVER_NAME, instance);
1414 
1415 	/*
1416 	 * Map config space registers
1417 	 * Read chip ID & set up config space command register(s)
1418 	 *
1419 	 * Note: this leaves the chip accessible by Memory Space
1420 	 * accesses, but with interrupts and Bus Mastering off.
1421 	 * This should ensure that nothing untoward will happen
1422 	 * if it has been left active by the (net-)bootloader.
1423 	 * We'll re-enable Bus Mastering once we've reset the chip,
1424 	 * and allow interrupts only when everything else is set up.
1425 	 */
1426 	err = pci_config_setup(devinfo, &rgep->cfg_handle);
1427 	if (err != DDI_SUCCESS) {
1428 		rge_problem(rgep, "pci_config_setup() failed");
1429 		goto attach_fail;
1430 	}
1431 	rgep->progress |= PROGRESS_CFG;
1432 	cidp = &rgep->chipid;
1433 	bzero(cidp, sizeof (*cidp));
1434 	rge_chip_cfg_init(rgep, cidp);
1435 
1436 	/*
1437 	 * Map operating registers
1438 	 */
1439 	err = ddi_regs_map_setup(devinfo, 1, &regs,
1440 	    0, 0, &rge_reg_accattr, &rgep->io_handle);
1441 	if (err != DDI_SUCCESS) {
1442 		rge_problem(rgep, "ddi_regs_map_setup() failed");
1443 		goto attach_fail;
1444 	}
1445 	rgep->io_regs = regs;
1446 	rgep->progress |= PROGRESS_REGS;
1447 
1448 	/*
1449 	 * Register NDD-tweakable parameters
1450 	 */
1451 	if (rge_nd_init(rgep)) {
1452 		rge_problem(rgep, "rge_nd_init() failed");
1453 		goto attach_fail;
1454 	}
1455 	rgep->progress |= PROGRESS_NDD;
1456 
1457 	/*
1458 	 * Characterise the device, so we know its requirements.
1459 	 * Then allocate the appropriate TX and RX descriptors & buffers.
1460 	 */
1461 	rge_chip_ident(rgep);
1462 	err = rge_alloc_bufs(rgep);
1463 	if (err != DDI_SUCCESS) {
1464 		rge_problem(rgep, "DMA buffer allocation failed");
1465 		goto attach_fail;
1466 	}
1467 
1468 	/*
1469 	 * Add the softint handlers:
1470 	 *
1471 	 * Both of these handlers are used to avoid restrictions on the
1472 	 * context and/or mutexes required for some operations.  In
1473 	 * particular, the hardware interrupt handler and its subfunctions
1474 	 * can detect a number of conditions that we don't want to handle
1475 	 * in that context or with that set of mutexes held.  So, these
1476 	 * softints are triggered instead:
1477 	 *
1478 	 * the <resched> softint is triggered if if we have previously
1479 	 * had to refuse to send a packet because of resource shortage
1480 	 * (we've run out of transmit buffers), but the send completion
1481 	 * interrupt handler has now detected that more buffers have
1482 	 * become available.
1483 	 *
1484 	 * the <factotum> is triggered if the h/w interrupt handler
1485 	 * sees the <link state changed> or <error> bits in the status
1486 	 * block.  It's also triggered periodically to poll the link
1487 	 * state, just in case we aren't getting link status change
1488 	 * interrupts ...
1489 	 */
1490 	err = ddi_add_softintr(devinfo, DDI_SOFTINT_LOW, &rgep->resched_id,
1491 		NULL, NULL, rge_reschedule, (caddr_t)rgep);
1492 	if (err != DDI_SUCCESS) {
1493 		rge_problem(rgep, "ddi_add_softintr() failed");
1494 		goto attach_fail;
1495 	}
1496 	rgep->progress |= PROGRESS_RESCHED;
1497 	err = ddi_add_softintr(devinfo, DDI_SOFTINT_LOW, &rgep->factotum_id,
1498 		NULL, NULL, rge_chip_factotum, (caddr_t)rgep);
1499 	if (err != DDI_SUCCESS) {
1500 		rge_problem(rgep, "ddi_add_softintr() failed");
1501 		goto attach_fail;
1502 	}
1503 	rgep->progress |= PROGRESS_FACTOTUM;
1504 
1505 	/*
1506 	 * Add the h/w interrupt handler and initialise mutexes
1507 	 */
1508 	err = ddi_add_intr(devinfo, 0, &rgep->iblk, NULL,
1509 		rge_intr, (caddr_t)rgep);
1510 	if (err != DDI_SUCCESS) {
1511 		rge_problem(rgep, "ddi_add_intr() failed");
1512 		goto attach_fail;
1513 	}
1514 	mutex_init(rgep->genlock, NULL, MUTEX_DRIVER, rgep->iblk);
1515 	rw_init(rgep->errlock, NULL, RW_DRIVER, rgep->iblk);
1516 	mutex_init(rgep->tx_lock, NULL, MUTEX_DRIVER, rgep->iblk);
1517 	mutex_init(rgep->tc_lock, NULL, MUTEX_DRIVER, rgep->iblk);
1518 	mutex_init(rgep->rx_lock, NULL, MUTEX_DRIVER, rgep->iblk);
1519 	mutex_init(rgep->rc_lock, NULL, MUTEX_DRIVER, rgep->iblk);
1520 	rgep->progress |= PROGRESS_INTR;
1521 
1522 	/*
1523 	 * Initialize rings
1524 	 */
1525 	err = rge_init_rings(rgep);
1526 	if (err != DDI_SUCCESS) {
1527 		rge_problem(rgep, "rge_init_rings() failed");
1528 		goto attach_fail;
1529 	}
1530 
1531 	/*
1532 	 * Initialise link state variables
1533 	 * Stop, reset & reinitialise the chip.
1534 	 * Initialise the (internal) PHY.
1535 	 */
1536 	rgep->param_link_up = LINK_STATE_UNKNOWN;
1537 	rgep->link_up_msg = rgep->link_down_msg = " (initialised)";
1538 
1539 	/*
1540 	 * Reset chip & rings to initial state; also reset address
1541 	 * filtering, promiscuity, loopback mode.
1542 	 */
1543 	mutex_enter(rgep->genlock);
1544 	(void) rge_chip_reset(rgep);
1545 	rge_chip_sync(rgep, RGE_GET_MAC);
1546 	bzero(rgep->mcast_hash, sizeof (rgep->mcast_hash));
1547 	bzero(rgep->mcast_refs, sizeof (rgep->mcast_refs));
1548 	rgep->promisc = B_FALSE;
1549 	rgep->param_loop_mode = RGE_LOOP_NONE;
1550 	mutex_exit(rgep->genlock);
1551 	rge_phy_init(rgep);
1552 	rgep->progress |= PROGRESS_PHY;
1553 
1554 	/*
1555 	 * Create & initialise named kstats
1556 	 */
1557 	rge_init_kstats(rgep, instance);
1558 	rgep->progress |= PROGRESS_KSTATS;
1559 
1560 	if ((macp = mac_alloc(MAC_VERSION)) == NULL)
1561 		goto attach_fail;
1562 	macp->m_type_ident = MAC_PLUGIN_IDENT_ETHER;
1563 	macp->m_driver = rgep;
1564 	macp->m_dip = devinfo;
1565 	macp->m_src_addr = rgep->netaddr;
1566 	macp->m_callbacks = &rge_m_callbacks;
1567 	macp->m_min_sdu = 0;
1568 	macp->m_max_sdu = rgep->param_default_mtu;
1569 
1570 	/*
1571 	 * Finally, we're ready to register ourselves with the MAC layer
1572 	 * interface; if this succeeds, we're all ready to start()
1573 	 */
1574 	err = mac_register(macp, &rgep->mh);
1575 	mac_free(macp);
1576 	if (err != 0)
1577 		goto attach_fail;
1578 
1579 	cychand.cyh_func = rge_chip_cyclic;
1580 	cychand.cyh_arg = rgep;
1581 	cychand.cyh_level = CY_LOCK_LEVEL;
1582 	cyctime.cyt_when = 0;
1583 	cyctime.cyt_interval = RGE_CYCLIC_PERIOD;
1584 	mutex_enter(&cpu_lock);
1585 	rgep->cyclic_id = cyclic_add(&cychand, &cyctime);
1586 	mutex_exit(&cpu_lock);
1587 
1588 	rgep->progress |= PROGRESS_READY;
1589 	return (DDI_SUCCESS);
1590 
1591 attach_fail:
1592 	rge_unattach(rgep);
1593 	return (DDI_FAILURE);
1594 }
1595 
1596 /*
1597  *	rge_suspend() -- suspend transmit/receive for powerdown
1598  */
1599 static int
1600 rge_suspend(rge_t *rgep)
1601 {
1602 	/*
1603 	 * Stop processing and idle (powerdown) the PHY ...
1604 	 */
1605 	mutex_enter(rgep->genlock);
1606 	rge_stop(rgep);
1607 	mutex_exit(rgep->genlock);
1608 
1609 	return (DDI_SUCCESS);
1610 }
1611 
1612 /*
1613  * detach(9E) -- Detach a device from the system
1614  */
1615 static int
1616 rge_detach(dev_info_t *devinfo, ddi_detach_cmd_t cmd)
1617 {
1618 	rge_t *rgep;
1619 
1620 	RGE_GTRACE(("rge_detach($%p, %d)", (void *)devinfo, cmd));
1621 
1622 	rgep = ddi_get_driver_private(devinfo);
1623 
1624 	switch (cmd) {
1625 	default:
1626 		return (DDI_FAILURE);
1627 
1628 	case DDI_SUSPEND:
1629 		return (rge_suspend(rgep));
1630 
1631 	case DDI_DETACH:
1632 		break;
1633 	}
1634 
1635 	/*
1636 	 * If there is any posted buffer, the driver should reject to be
1637 	 * detached. Need notice upper layer to release them.
1638 	 */
1639 	if (rgep->rx_free != RGE_BUF_SLOTS)
1640 		return (DDI_FAILURE);
1641 
1642 	/*
1643 	 * Unregister from the MAC layer subsystem.  This can fail, in
1644 	 * particular if there are DLPI style-2 streams still open -
1645 	 * in which case we just return failure without shutting
1646 	 * down chip operations.
1647 	 */
1648 	if (mac_unregister(rgep->mh) != 0)
1649 		return (DDI_FAILURE);
1650 
1651 	/*
1652 	 * All activity stopped, so we can clean up & exit
1653 	 */
1654 	rge_unattach(rgep);
1655 	return (DDI_SUCCESS);
1656 }
1657 
1658 
1659 /*
1660  * ========== Module Loading Data & Entry Points ==========
1661  */
1662 
1663 #undef	RGE_DBG
1664 #define	RGE_DBG		RGE_DBG_INIT	/* debug flag for this code	*/
1665 DDI_DEFINE_STREAM_OPS(rge_dev_ops, nulldev, nulldev, rge_attach, rge_detach,
1666     nodev, NULL, D_MP, NULL);
1667 
1668 static struct modldrv rge_modldrv = {
1669 	&mod_driverops,		/* Type of module.  This one is a driver */
1670 	rge_ident,		/* short description */
1671 	&rge_dev_ops		/* driver specific ops */
1672 };
1673 
1674 static struct modlinkage modlinkage = {
1675 	MODREV_1, (void *)&rge_modldrv, NULL
1676 };
1677 
1678 
1679 int
1680 _info(struct modinfo *modinfop)
1681 {
1682 	return (mod_info(&modlinkage, modinfop));
1683 }
1684 
1685 int
1686 _init(void)
1687 {
1688 	int status;
1689 
1690 	mac_init_ops(&rge_dev_ops, "rge");
1691 	status = mod_install(&modlinkage);
1692 	if (status == DDI_SUCCESS)
1693 		mutex_init(rge_log_mutex, NULL, MUTEX_DRIVER, NULL);
1694 	else
1695 		mac_fini_ops(&rge_dev_ops);
1696 
1697 	return (status);
1698 }
1699 
1700 int
1701 _fini(void)
1702 {
1703 	int status;
1704 
1705 	status = mod_remove(&modlinkage);
1706 	if (status == DDI_SUCCESS) {
1707 		mac_fini_ops(&rge_dev_ops);
1708 		mutex_destroy(rge_log_mutex);
1709 	}
1710 	return (status);
1711 }
1712