xref: /titanic_50/usr/src/uts/common/io/rge/rge_main.c (revision 3db86aab554edbb4244c8d1a1c90f152eee768af)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License, Version 1.0 only
6  * (the "License").  You may not use this file except in compliance
7  * with the License.
8  *
9  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
10  * or http://www.opensolaris.org/os/licensing.
11  * See the License for the specific language governing permissions
12  * and limitations under the License.
13  *
14  * When distributing Covered Code, include this CDDL HEADER in each
15  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
16  * If applicable, add the following below this CDDL HEADER, with the
17  * fields enclosed by brackets "[]" replaced with your own identifying
18  * information: Portions Copyright [yyyy] [name of copyright owner]
19  *
20  * CDDL HEADER END
21  */
22 /*
23  * Copyright 2005 Sun Microsystems, Inc.  All rights reserved.
24  * Use is subject to license terms.
25  */
26 
27 #pragma ident	"%Z%%M%	%I%	%E% SMI"
28 
29 #include "rge.h"
30 
31 /*
32  * This is the string displayed by modinfo, etc.
33  * Make sure you keep the version ID up to date!
34  */
35 static char rge_ident[] = "Realtek Gigabit Ethernet Driver v%I%";
36 
37 /*
38  * Used for buffers allocated by ddi_dma_mem_alloc()
39  */
40 static ddi_dma_attr_t dma_attr_buf = {
41 	DMA_ATTR_V0,		/* dma_attr version */
42 	(uint32_t)0,		/* dma_attr_addr_lo */
43 	(uint32_t)0xFFFFFFFF,	/* dma_attr_addr_hi */
44 	(uint32_t)0xFFFFFFFF,	/* dma_attr_count_max */
45 	(uint32_t)16,		/* dma_attr_align */
46 	0xFFFFFFFF,		/* dma_attr_burstsizes */
47 	1,			/* dma_attr_minxfer */
48 	(uint32_t)0xFFFFFFFF,	/* dma_attr_maxxfer */
49 	(uint32_t)0xFFFFFFFF,	/* dma_attr_seg */
50 	1,			/* dma_attr_sgllen */
51 	1,			/* dma_attr_granular */
52 	0,			/* dma_attr_flags */
53 };
54 
55 /*
56  * Used for BDs allocated by ddi_dma_mem_alloc()
57  */
58 static ddi_dma_attr_t dma_attr_desc = {
59 	DMA_ATTR_V0,		/* dma_attr version */
60 	(uint32_t)0,		/* dma_attr_addr_lo */
61 	(uint32_t)0xFFFFFFFF,	/* dma_attr_addr_hi */
62 	(uint32_t)0xFFFFFFFF,	/* dma_attr_count_max */
63 	(uint32_t)256,		/* dma_attr_align */
64 	0xFFFFFFFF,		/* dma_attr_burstsizes */
65 	1,			/* dma_attr_minxfer */
66 	(uint32_t)0xFFFFFFFF,	/* dma_attr_maxxfer */
67 	(uint32_t)0xFFFFFFFF,	/* dma_attr_seg */
68 	1,			/* dma_attr_sgllen */
69 	1,			/* dma_attr_granular */
70 	0,			/* dma_attr_flags */
71 };
72 
73 /*
74  * PIO access attributes for registers
75  */
76 static ddi_device_acc_attr_t rge_reg_accattr = {
77 	DDI_DEVICE_ATTR_V0,
78 	DDI_STRUCTURE_LE_ACC,
79 	DDI_STRICTORDER_ACC,
80 	DDI_DEFAULT_ACC
81 };
82 
83 /*
84  * DMA access attributes for descriptors
85  */
86 static ddi_device_acc_attr_t rge_desc_accattr = {
87 	DDI_DEVICE_ATTR_V0,
88 	DDI_NEVERSWAP_ACC,
89 	DDI_STRICTORDER_ACC,
90 	DDI_DEFAULT_ACC
91 };
92 
93 /*
94  * DMA access attributes for data
95  */
96 static ddi_device_acc_attr_t rge_buf_accattr = {
97 	DDI_DEVICE_ATTR_V0,
98 	DDI_NEVERSWAP_ACC,
99 	DDI_STRICTORDER_ACC,
100 	DDI_DEFAULT_ACC
101 };
102 
103 static ether_addr_t rge_broadcast_addr = {
104 	0xff, 0xff, 0xff, 0xff, 0xff, 0xff
105 };
106 
107 /*
108  * Property names
109  */
110 static char debug_propname[] = "rge-debug-flags";
111 
112 
113 /*
114  * Allocate an area of memory and a DMA handle for accessing it
115  */
116 static int
117 rge_alloc_dma_mem(rge_t *rgep, size_t memsize, ddi_dma_attr_t *dma_attr_p,
118 	ddi_device_acc_attr_t *acc_attr_p, uint_t dma_flags, dma_area_t *dma_p)
119 {
120 	caddr_t vaddr;
121 	int err;
122 
123 	/*
124 	 * Allocate handle
125 	 */
126 	err = ddi_dma_alloc_handle(rgep->devinfo, dma_attr_p,
127 		    DDI_DMA_SLEEP, NULL, &dma_p->dma_hdl);
128 	if (err != DDI_SUCCESS) {
129 		dma_p->dma_hdl = NULL;
130 		return (DDI_FAILURE);
131 	}
132 
133 	/*
134 	 * Allocate memory
135 	 */
136 	err = ddi_dma_mem_alloc(dma_p->dma_hdl, memsize, acc_attr_p,
137 	    dma_flags & (DDI_DMA_CONSISTENT | DDI_DMA_STREAMING),
138 	    DDI_DMA_SLEEP, NULL, &vaddr, &dma_p->alength, &dma_p->acc_hdl);
139 	if (err != DDI_SUCCESS) {
140 		ddi_dma_free_handle(&dma_p->dma_hdl);
141 		dma_p->dma_hdl = NULL;
142 		dma_p->acc_hdl = NULL;
143 		return (DDI_FAILURE);
144 	}
145 
146 	/*
147 	 * Bind the two together
148 	 */
149 	dma_p->mem_va = vaddr;
150 	err = ddi_dma_addr_bind_handle(dma_p->dma_hdl, NULL,
151 	    vaddr, dma_p->alength, dma_flags, DDI_DMA_SLEEP, NULL,
152 	    &dma_p->cookie, &dma_p->ncookies);
153 	if (err != DDI_DMA_MAPPED || dma_p->ncookies != 1) {
154 		ddi_dma_mem_free(&dma_p->acc_hdl);
155 		ddi_dma_free_handle(&dma_p->dma_hdl);
156 		dma_p->acc_hdl = NULL;
157 		dma_p->dma_hdl = NULL;
158 		return (DDI_FAILURE);
159 	}
160 
161 	dma_p->nslots = ~0U;
162 	dma_p->size = ~0U;
163 	dma_p->token = ~0U;
164 	dma_p->offset = 0;
165 	return (DDI_SUCCESS);
166 }
167 
168 /*
169  * Free one allocated area of DMAable memory
170  */
171 static void
172 rge_free_dma_mem(dma_area_t *dma_p)
173 {
174 	if (dma_p->dma_hdl != NULL) {
175 		if (dma_p->ncookies) {
176 			(void) ddi_dma_unbind_handle(dma_p->dma_hdl);
177 			dma_p->ncookies = 0;
178 		}
179 		ddi_dma_free_handle(&dma_p->dma_hdl);
180 		dma_p->dma_hdl = NULL;
181 	}
182 
183 	if (dma_p->acc_hdl != NULL) {
184 		ddi_dma_mem_free(&dma_p->acc_hdl);
185 		dma_p->acc_hdl = NULL;
186 	}
187 }
188 
189 /*
190  * Utility routine to carve a slice off a chunk of allocated memory,
191  * updating the chunk descriptor accordingly.  The size of the slice
192  * is given by the product of the <qty> and <size> parameters.
193  */
194 static void
195 rge_slice_chunk(dma_area_t *slice, dma_area_t *chunk,
196 	uint32_t qty, uint32_t size)
197 {
198 	static uint32_t sequence = 0xbcd5704a;
199 	size_t totsize;
200 
201 	totsize = qty*size;
202 	ASSERT(size >= 0);
203 	ASSERT(totsize <= chunk->alength);
204 
205 	*slice = *chunk;
206 	slice->nslots = qty;
207 	slice->size = size;
208 	slice->alength = totsize;
209 	slice->token = ++sequence;
210 
211 	chunk->mem_va = (caddr_t)chunk->mem_va + totsize;
212 	chunk->alength -= totsize;
213 	chunk->offset += totsize;
214 	chunk->cookie.dmac_laddress += totsize;
215 	chunk->cookie.dmac_size -= totsize;
216 }
217 
218 
219 static int
220 rge_alloc_bufs(rge_t *rgep)
221 {
222 	size_t txdescsize;
223 	size_t rxdescsize;
224 	size_t txbuffsize;
225 	size_t rxbuffsize;
226 	size_t freebuffsize;
227 	int split;
228 	int err;
229 
230 	/*
231 	 * Allocate memory & handle for packet statistics
232 	 */
233 	err = rge_alloc_dma_mem(rgep,
234 	    RGE_STATS_DUMP_SIZE,
235 	    &dma_attr_desc,
236 	    &rge_desc_accattr,
237 	    DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
238 	    &rgep->dma_area_stats);
239 	if (err != DDI_SUCCESS)
240 		return (DDI_FAILURE);
241 	rgep->hw_stats = DMA_VPTR(rgep->dma_area_stats);
242 
243 	/*
244 	 * Allocate memory & handle for Tx descriptor ring
245 	 */
246 	txdescsize = RGE_SEND_SLOTS * sizeof (rge_bd_t);
247 	err = rge_alloc_dma_mem(rgep,
248 	    txdescsize,
249 	    &dma_attr_desc,
250 	    &rge_desc_accattr,
251 	    DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
252 	    &rgep->dma_area_txdesc);
253 	if (err != DDI_SUCCESS)
254 		return (DDI_FAILURE);
255 
256 	/*
257 	 * Allocate memory & handle for Rx descriptor ring
258 	 */
259 	rxdescsize = RGE_RECV_SLOTS * sizeof (rge_bd_t);
260 	err = rge_alloc_dma_mem(rgep,
261 	    rxdescsize,
262 	    &dma_attr_desc,
263 	    &rge_desc_accattr,
264 	    DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
265 	    &rgep->dma_area_rxdesc);
266 	if (err != DDI_SUCCESS)
267 		return (DDI_FAILURE);
268 
269 	/*
270 	 * Allocate memory & handle for Tx buffers
271 	 */
272 	txbuffsize = RGE_SEND_SLOTS * rgep->txbuf_size;
273 	ASSERT((txbuffsize % RGE_SPLIT) == 0);
274 	for (split = 0; split < RGE_SPLIT; ++split) {
275 		err = rge_alloc_dma_mem(rgep,
276 		    txbuffsize/RGE_SPLIT,
277 		    &dma_attr_buf,
278 		    &rge_buf_accattr,
279 		    DDI_DMA_WRITE | DDI_DMA_STREAMING,
280 		    &rgep->dma_area_txbuf[split]);
281 		if (err != DDI_SUCCESS)
282 			return (DDI_FAILURE);
283 	}
284 
285 	/*
286 	 * Allocate memory & handle for Rx buffers
287 	 */
288 	rxbuffsize = RGE_RECV_SLOTS * rgep->rxbuf_size;
289 	ASSERT((rxbuffsize % RGE_SPLIT) == 0);
290 	for (split = 0; split < RGE_SPLIT; ++split) {
291 		err = rge_alloc_dma_mem(rgep,
292 		    rxbuffsize/RGE_SPLIT,
293 		    &dma_attr_buf,
294 		    &rge_buf_accattr,
295 		    DDI_DMA_READ | DDI_DMA_STREAMING,
296 		    &rgep->dma_area_rxbuf[split]);
297 		if (err != DDI_SUCCESS)
298 			return (DDI_FAILURE);
299 	}
300 
301 	/*
302 	 * Allocate memory & handle for free Rx buffers
303 	 */
304 	freebuffsize = RGE_BUF_SLOTS * rgep->rxbuf_size;
305 	ASSERT((freebuffsize % RGE_SPLIT) == 0);
306 	for (split = 0; split < RGE_SPLIT; ++split) {
307 		err = rge_alloc_dma_mem(rgep,
308 		    freebuffsize/RGE_SPLIT,
309 		    &dma_attr_buf,
310 		    &rge_buf_accattr,
311 		    DDI_DMA_READ | DDI_DMA_STREAMING,
312 		    &rgep->dma_area_freebuf[split]);
313 		if (err != DDI_SUCCESS)
314 			return (DDI_FAILURE);
315 	}
316 
317 	return (DDI_SUCCESS);
318 }
319 
320 /*
321  * rge_free_bufs() -- free descriptors/buffers allocated for this
322  * device instance.
323  */
324 static void
325 rge_free_bufs(rge_t *rgep)
326 {
327 	int i;
328 
329 	rge_free_dma_mem(&rgep->dma_area_stats);
330 	rge_free_dma_mem(&rgep->dma_area_txdesc);
331 	rge_free_dma_mem(&rgep->dma_area_rxdesc);
332 	for (i = 0; i < RGE_SPLIT; i++) {
333 		rge_free_dma_mem(&rgep->dma_area_txbuf[i]);
334 		rge_free_dma_mem(&rgep->dma_area_rxbuf[i]);
335 		rge_free_dma_mem(&rgep->dma_area_freebuf[i]);
336 	}
337 }
338 
339 /*
340  * ========== Transmit and receive ring reinitialisation ==========
341  */
342 
343 /*
344  * These <reinit> routines each reset the rx/tx rings to an initial
345  * state, assuming that the corresponding <init> routine has already
346  * been called exactly once.
347  */
348 static void
349 rge_reinit_send_ring(rge_t *rgep)
350 {
351 	sw_sbd_t *ssbdp;
352 	rge_bd_t *bdp;
353 	uint32_t slot;
354 
355 	/*
356 	 * re-init send ring
357 	 */
358 	DMA_ZERO(rgep->tx_desc);
359 	ssbdp = rgep->sw_sbds;
360 	bdp = rgep->tx_ring;
361 	for (slot = 0; slot < RGE_SEND_SLOTS; slot++) {
362 		bdp->host_buf_addr =
363 		    RGE_BSWAP_32(ssbdp->pbuf.cookie.dmac_laddress);
364 		bdp->host_buf_addr_hi =
365 		    RGE_BSWAP_32(ssbdp->pbuf.cookie.dmac_laddress >> 32);
366 		/* last BD in Tx ring */
367 		if (slot == (RGE_SEND_SLOTS - 1))
368 			bdp->flags_len = RGE_BSWAP_32(BD_FLAG_EOR);
369 		ssbdp++;
370 		bdp++;
371 	}
372 	DMA_SYNC(rgep->tx_desc, DDI_DMA_SYNC_FORDEV);
373 	rgep->tx_next = 0;
374 	rgep->tc_next = 0;
375 	rgep->tc_tail = 0;
376 	rgep->tx_flow = 0;
377 	rgep->tx_free = RGE_SEND_SLOTS;
378 }
379 
380 static void
381 rge_reinit_recv_ring(rge_t *rgep)
382 {
383 	rge_bd_t *bdp;
384 	sw_rbd_t *srbdp;
385 	dma_area_t *pbuf;
386 	uint32_t slot;
387 
388 	/*
389 	 * re-init receive ring
390 	 */
391 	DMA_ZERO(rgep->rx_desc);
392 	srbdp = rgep->sw_rbds;
393 	bdp = rgep->rx_ring;
394 	for (slot = 0; slot < RGE_RECV_SLOTS; slot++) {
395 		pbuf = &srbdp->rx_buf->pbuf;
396 		bdp->host_buf_addr =
397 		    RGE_BSWAP_32(pbuf->cookie.dmac_laddress + RGE_HEADROOM);
398 		bdp->host_buf_addr_hi =
399 		    RGE_BSWAP_32(pbuf->cookie.dmac_laddress >> 32);
400 		bdp->flags_len = RGE_BSWAP_32(BD_FLAG_HW_OWN |
401 		    (rgep->rxbuf_size - RGE_HEADROOM));
402 		/* last BD in Tx ring */
403 		if (slot == (RGE_RECV_SLOTS - 1))
404 			bdp->flags_len |= RGE_BSWAP_32(BD_FLAG_EOR);
405 		srbdp++;
406 		bdp++;
407 	}
408 	DMA_SYNC(rgep->rx_desc, DDI_DMA_SYNC_FORDEV);
409 	rgep->watchdog = 0;
410 	rgep->rx_next = 0;
411 }
412 
413 static void
414 rge_reinit_buf_ring(rge_t *rgep)
415 {
416 	/*
417 	 * re-init free buffer ring
418 	 */
419 	rgep->rc_next = 0;
420 	rgep->rf_next = 0;
421 	if (rgep->rx_free != RGE_BUF_SLOTS)
422 		rgep->rx_bcopy = B_TRUE;
423 }
424 
425 static void
426 rge_reinit_rings(rge_t *rgep)
427 {
428 	rge_reinit_send_ring(rgep);
429 	rge_reinit_recv_ring(rgep);
430 	rge_reinit_buf_ring(rgep);
431 }
432 
433 static void
434 rge_init_send_ring(rge_t *rgep)
435 {
436 	uint32_t slot;
437 	uint32_t split;
438 	rge_bd_t *bdp;
439 	sw_sbd_t *ssbdp;
440 	dma_area_t buf_chunk;
441 	dma_area_t *pbuf;
442 
443 	/*
444 	 * Allocate the array of s/w Tx Buffer Descriptors
445 	 */
446 	ssbdp = kmem_zalloc(RGE_SEND_SLOTS*sizeof (*ssbdp), KM_SLEEP);
447 	rgep->sw_sbds = ssbdp;
448 
449 	/*
450 	 * Init send ring
451 	 */
452 	rgep->tx_next = 0;
453 	rgep->tc_next = 0;
454 	rgep->tc_tail = 0;
455 	rgep->tx_flow = 0;
456 	rgep->tx_free = RGE_SEND_SLOTS;
457 	rgep->tx_desc = rgep->dma_area_txdesc;
458 	DMA_ZERO(rgep->tx_desc);
459 	bdp = rgep->tx_desc.mem_va;
460 	rgep->tx_ring = bdp;
461 	for (split = 0; split < RGE_SPLIT; split++) {
462 		buf_chunk = rgep->dma_area_txbuf[split];
463 		for (slot = 0; slot < RGE_SEND_SLOTS/RGE_SPLIT; slot++) {
464 			rge_slice_chunk(&ssbdp->desc, &rgep->dma_area_txdesc,
465 			    1, sizeof (rge_bd_t));
466 			pbuf = &ssbdp->pbuf;
467 			rge_slice_chunk(pbuf, &buf_chunk, 1, rgep->txbuf_size);
468 			bdp->host_buf_addr =
469 			    RGE_BSWAP_32(pbuf->cookie.dmac_laddress);
470 			bdp->host_buf_addr_hi =
471 			    RGE_BSWAP_32(pbuf->cookie.dmac_laddress >> 32);
472 			/* last BD in Tx ring */
473 			if (split == (RGE_SPLIT - 1) &&
474 			    slot == (RGE_SEND_SLOTS/RGE_SPLIT -1))
475 				bdp->flags_len |= RGE_BSWAP_32(BD_FLAG_EOR);
476 			ssbdp++;
477 			bdp++;
478 		}
479 	}
480 	DMA_SYNC(rgep->tx_desc, DDI_DMA_SYNC_FORDEV);
481 }
482 
483 static int
484 rge_init_recv_ring(rge_t *rgep)
485 {
486 	uint32_t slot;
487 	uint32_t split;
488 	rge_bd_t *bdp;
489 	sw_rbd_t *srbdp;
490 	dma_buf_t *rx_buf;
491 	dma_area_t buf_chunk;
492 	dma_area_t *pbuf;
493 
494 	/*
495 	 * Allocate the array of s/w Rx Buffer Descriptors
496 	 */
497 	srbdp = kmem_zalloc(RGE_RECV_SLOTS*sizeof (*srbdp), KM_SLEEP);
498 	rx_buf = kmem_zalloc(RGE_RECV_SLOTS*sizeof (*rx_buf), KM_SLEEP);
499 	rgep->sw_rbds = srbdp;
500 	rgep->sw_rbuf = rx_buf;
501 
502 	/*
503 	 * Init receive ring
504 	 */
505 	rgep->rx_next = 0;
506 	rgep->rx_desc = rgep->dma_area_rxdesc;
507 	DMA_ZERO(rgep->rx_desc);
508 	bdp = rgep->rx_desc.mem_va;
509 	rgep->rx_ring = bdp;
510 	for (split = 0; split < RGE_SPLIT; split++) {
511 		buf_chunk = rgep->dma_area_rxbuf[split];
512 		for (slot = 0; slot < RGE_RECV_SLOTS/RGE_SPLIT; slot++) {
513 			srbdp->rx_buf = rx_buf;
514 			pbuf = &rx_buf->pbuf;
515 			rge_slice_chunk(pbuf, &buf_chunk, 1, rgep->rxbuf_size);
516 			pbuf->alength -= RGE_HEADROOM;
517 			pbuf->offset += RGE_HEADROOM;
518 			rx_buf->rx_recycle.free_func = rge_rx_recycle;
519 			rx_buf->rx_recycle.free_arg = (caddr_t)rx_buf;
520 			rx_buf->private = (caddr_t)rgep;
521 			rx_buf->mp = desballoc(DMA_VPTR(rx_buf->pbuf),
522 			    rgep->rxbuf_size, 0, &rx_buf->rx_recycle);
523 			if (rx_buf->mp == NULL) {
524 				rge_problem(rgep,
525 				    "rge_init_recv_ring: desballoc() failed");
526 				return (DDI_FAILURE);
527 			}
528 
529 			bdp->host_buf_addr = RGE_BSWAP_32(RGE_HEADROOM +
530 			    pbuf->cookie.dmac_laddress);
531 			bdp->host_buf_addr_hi =
532 			    RGE_BSWAP_32(pbuf->cookie.dmac_laddress >> 32);
533 			bdp->flags_len = RGE_BSWAP_32(BD_FLAG_HW_OWN |
534 			    (rgep->rxbuf_size - RGE_HEADROOM));
535 			/* last BD in Rx ring */
536 			if (split == (RGE_SPLIT - 1) &&
537 			    slot == (RGE_RECV_SLOTS/RGE_SPLIT -1))
538 				bdp->flags_len |= RGE_BSWAP_32(BD_FLAG_EOR);
539 			srbdp++;
540 			bdp++;
541 			rx_buf++;
542 		}
543 	}
544 	DMA_SYNC(rgep->rx_desc, DDI_DMA_SYNC_FORDEV);
545 	return (DDI_SUCCESS);
546 }
547 
548 static int
549 rge_init_buf_ring(rge_t *rgep)
550 {
551 	uint32_t slot;
552 	uint32_t split;
553 	sw_rbd_t *free_rbdp;
554 	dma_buf_t *rx_buf;
555 	dma_area_t buf_chunk;
556 	dma_area_t *pbuf;
557 
558 	/*
559 	 * Allocate the array of s/w free Buffer Descriptors
560 	 */
561 	free_rbdp = kmem_zalloc(RGE_BUF_SLOTS*sizeof (*free_rbdp), KM_SLEEP);
562 	rx_buf = kmem_zalloc(RGE_BUF_SLOTS*sizeof (*rx_buf), KM_SLEEP);
563 	rgep->free_rbds = free_rbdp;
564 	rgep->sw_freebuf = rx_buf;
565 
566 	/*
567 	 * Init free buffer ring
568 	 */
569 	rgep->rc_next = 0;
570 	rgep->rf_next = 0;
571 	rgep->rx_bcopy = B_FALSE;
572 	rgep->rx_free = RGE_BUF_SLOTS;
573 	for (split = 0; split < RGE_SPLIT; split++) {
574 		buf_chunk = rgep->dma_area_freebuf[split];
575 		for (slot = 0; slot < RGE_BUF_SLOTS/RGE_SPLIT; slot++) {
576 			free_rbdp->rx_buf = rx_buf;
577 			pbuf = &rx_buf->pbuf;
578 			rge_slice_chunk(pbuf, &buf_chunk, 1, rgep->rxbuf_size);
579 			pbuf->alength -= RGE_HEADROOM;
580 			pbuf->offset += RGE_HEADROOM;
581 			rx_buf->rx_recycle.free_func = rge_rx_recycle;
582 			rx_buf->rx_recycle.free_arg = (caddr_t)rx_buf;
583 			rx_buf->private = (caddr_t)rgep;
584 			rx_buf->mp = desballoc(DMA_VPTR(rx_buf->pbuf),
585 			    rgep->rxbuf_size, 0, &rx_buf->rx_recycle);
586 			if (rx_buf->mp == NULL) {
587 				rge_problem(rgep,
588 				    "rge_init_buf_ring: desballoc() failed");
589 				return (DDI_FAILURE);
590 			}
591 			free_rbdp++;
592 			rx_buf++;
593 		}
594 	}
595 	return (DDI_SUCCESS);
596 }
597 
598 static int
599 rge_init_rings(rge_t *rgep)
600 {
601 	int err;
602 
603 	rge_init_send_ring(rgep);
604 	err = rge_init_recv_ring(rgep);
605 	err = rge_init_buf_ring(rgep);
606 	return (err);
607 }
608 
609 static void
610 rge_fini_send_ring(rge_t *rgep)
611 {
612 	kmem_free(rgep->sw_sbds, RGE_SEND_SLOTS * sizeof (sw_sbd_t));
613 }
614 
615 static void
616 rge_fini_recv_ring(rge_t *rgep)
617 {
618 	dma_buf_t *rx_buf = rgep->sw_rbuf;
619 	uint32_t slot;
620 
621 	for (slot = 0; slot < RGE_RECV_SLOTS; slot++, rx_buf++)
622 		freemsg(rx_buf->mp);
623 	kmem_free(rgep->sw_rbuf, RGE_RECV_SLOTS * sizeof (dma_buf_t));
624 	kmem_free(rgep->sw_rbds, RGE_RECV_SLOTS * sizeof (sw_rbd_t));
625 }
626 
627 static void
628 rge_fini_buf_ring(rge_t *rgep)
629 {
630 	dma_buf_t *rx_buf = rgep->sw_freebuf;
631 	uint32_t slot;
632 
633 	for (slot = 0; slot < RGE_BUF_SLOTS; slot++, rx_buf++)
634 		freemsg(rx_buf->mp);
635 	kmem_free(rgep->sw_freebuf, RGE_BUF_SLOTS * sizeof (dma_buf_t));
636 	kmem_free(rgep->free_rbds, RGE_BUF_SLOTS * sizeof (sw_rbd_t));
637 }
638 
639 static void
640 rge_fini_rings(rge_t *rgep)
641 {
642 	rge_fini_send_ring(rgep);
643 	rge_fini_recv_ring(rgep);
644 	rge_fini_buf_ring(rgep);
645 }
646 
647 /*
648  * ========== Internal state management entry points ==========
649  */
650 
651 #undef	RGE_DBG
652 #define	RGE_DBG		RGE_DBG_NEMO	/* debug flag for this code	*/
653 
654 /*
655  * These routines provide all the functionality required by the
656  * corresponding MAC layer entry points, but don't update the
657  * MAC state so they can be called internally without disturbing
658  * our record of what NEMO thinks we should be doing ...
659  */
660 
661 /*
662  *	rge_reset() -- reset h/w & rings to initial state
663  */
664 static void
665 rge_reset(rge_t *rgep)
666 {
667 	ASSERT(mutex_owned(rgep->genlock));
668 
669 	/*
670 	 * Grab all the other mutexes in the world (this should
671 	 * ensure no other threads are manipulating driver state)
672 	 */
673 	mutex_enter(rgep->rx_lock);
674 	mutex_enter(rgep->rc_lock);
675 	rw_enter(rgep->errlock, RW_WRITER);
676 
677 	(void) rge_chip_reset(rgep);
678 	rge_reinit_rings(rgep);
679 	rge_chip_init(rgep);
680 
681 	/*
682 	 * Free the world ...
683 	 */
684 	rw_exit(rgep->errlock);
685 	mutex_exit(rgep->rc_lock);
686 	mutex_exit(rgep->rx_lock);
687 
688 	RGE_DEBUG(("rge_reset($%p) done", (void *)rgep));
689 }
690 
691 /*
692  *	rge_stop() -- stop processing, don't reset h/w or rings
693  */
694 static void
695 rge_stop(rge_t *rgep)
696 {
697 	ASSERT(mutex_owned(rgep->genlock));
698 
699 	rge_chip_stop(rgep, B_FALSE);
700 
701 	RGE_DEBUG(("rge_stop($%p) done", (void *)rgep));
702 }
703 
704 /*
705  *	rge_start() -- start transmitting/receiving
706  */
707 static void
708 rge_start(rge_t *rgep)
709 {
710 	ASSERT(mutex_owned(rgep->genlock));
711 
712 	/*
713 	 * Start chip processing, including enabling interrupts
714 	 */
715 	rge_chip_start(rgep);
716 	rgep->watchdog = 0;
717 }
718 
719 /*
720  * rge_restart - restart transmitting/receiving after error or suspend
721  */
722 void
723 rge_restart(rge_t *rgep)
724 {
725 	uint32_t i;
726 
727 	ASSERT(mutex_owned(rgep->genlock));
728 	/*
729 	 * Wait for posted buffer to be freed...
730 	 */
731 	if (!rgep->rx_bcopy) {
732 		for (i = 0; i < RXBUFF_FREE_LOOP; i++) {
733 			if (rgep->rx_free == RGE_BUF_SLOTS)
734 				break;
735 			drv_usecwait(1000);
736 			RGE_DEBUG(("rge_restart: waiting for rx buf free..."));
737 		}
738 	}
739 	rge_reset(rgep);
740 	rgep->stats.chip_reset++;
741 	if (rgep->rge_mac_state == RGE_MAC_STARTED) {
742 		rge_start(rgep);
743 		ddi_trigger_softintr(rgep->resched_id);
744 	}
745 }
746 
747 
748 /*
749  * ========== Nemo-required management entry points ==========
750  */
751 
752 #undef	RGE_DBG
753 #define	RGE_DBG		RGE_DBG_NEMO	/* debug flag for this code	*/
754 
755 /*
756  *	rge_m_stop() -- stop transmitting/receiving
757  */
758 static void
759 rge_m_stop(void *arg)
760 {
761 	rge_t *rgep = arg;		/* private device info	*/
762 	uint32_t i;
763 
764 	/*
765 	 * Just stop processing, then record new MAC state
766 	 */
767 	mutex_enter(rgep->genlock);
768 	rge_stop(rgep);
769 	rgep->link_up_msg = rgep->link_down_msg = " (stopped)";
770 	/*
771 	 * Wait for posted buffer to be freed...
772 	 */
773 	if (!rgep->rx_bcopy) {
774 		for (i = 0; i < RXBUFF_FREE_LOOP; i++) {
775 			if (rgep->rx_free == RGE_BUF_SLOTS)
776 				break;
777 			drv_usecwait(1000);
778 			RGE_DEBUG(("rge_m_stop: waiting for rx buf free..."));
779 		}
780 	}
781 	rgep->rge_mac_state = RGE_MAC_STOPPED;
782 	RGE_DEBUG(("rge_m_stop($%p) done", arg));
783 	mutex_exit(rgep->genlock);
784 }
785 
786 /*
787  *	rge_m_start() -- start transmitting/receiving
788  */
789 static int
790 rge_m_start(void *arg)
791 {
792 	rge_t *rgep = arg;		/* private device info	*/
793 
794 	mutex_enter(rgep->genlock);
795 
796 	/*
797 	 * Clear hw/sw statistics
798 	 */
799 	DMA_ZERO(rgep->dma_area_stats);
800 	bzero(&rgep->stats, sizeof (rge_stats_t));
801 
802 	/*
803 	 * Start processing and record new MAC state
804 	 */
805 	rge_reset(rgep);
806 	rgep->link_up_msg = rgep->link_down_msg = " (initialized)";
807 	rge_start(rgep);
808 	rgep->rge_mac_state = RGE_MAC_STARTED;
809 	RGE_DEBUG(("rge_m_start($%p) done", arg));
810 
811 	mutex_exit(rgep->genlock);
812 
813 	return (0);
814 }
815 
816 /*
817  *	rge_m_unicst_set() -- set the physical network address
818  */
819 static int
820 rge_m_unicst(void *arg, const uint8_t *macaddr)
821 {
822 	rge_t *rgep = arg;		/* private device info	*/
823 
824 	/*
825 	 * Remember the new current address in the driver state
826 	 * Sync the chip's idea of the address too ...
827 	 */
828 	mutex_enter(rgep->genlock);
829 	bcopy(macaddr, rgep->netaddr, ETHERADDRL);
830 	rge_chip_sync(rgep, RGE_SET_MAC);
831 	mutex_exit(rgep->genlock);
832 
833 	return (0);
834 }
835 
836 /*
837  * Compute the index of the required bit in the multicast hash map.
838  * This must mirror the way the hardware actually does it!
839  */
840 static uint32_t
841 rge_hash_index(const uint8_t *mca)
842 {
843 	uint32_t crc = (ulong_t)RGE_HASH_CRC;
844 	uint32_t const POLY = RGE_HASH_POLY;
845 	uint32_t msb;
846 	int bytes;
847 	uchar_t currentbyte;
848 	uint32_t index;
849 	int bit;
850 
851 	for (bytes = 0; bytes < ETHERADDRL; bytes++) {
852 		currentbyte = mca[bytes];
853 		for (bit = 0; bit < 8; bit++) {
854 			msb = crc >> 31;
855 			crc <<= 1;
856 			if (msb ^ (currentbyte & 1)) {
857 				crc ^= POLY;
858 				crc |= 0x00000001;
859 			}
860 			currentbyte >>= 1;
861 		}
862 	}
863 	index = crc >> 26;
864 
865 	return (index);
866 }
867 
868 /*
869  *	rge_m_multicst_add() -- enable/disable a multicast address
870  */
871 static int
872 rge_m_multicst(void *arg, boolean_t add, const uint8_t *mca)
873 {
874 	rge_t *rgep = arg;		/* private device info	*/
875 	struct ether_addr *addr;
876 	uint32_t index;
877 	uint32_t *hashp;
878 
879 	mutex_enter(rgep->genlock);
880 	hashp = rgep->mcast_hash;
881 	addr = (struct ether_addr *)mca;
882 	index = rge_hash_index(addr->ether_addr_octet);
883 			/* index value is between 0 and 63 */
884 
885 	if (add) {
886 		if (rgep->mcast_refs[index]++) {
887 			mutex_exit(rgep->genlock);
888 			return (0);
889 		}
890 		hashp[index/32] |= 1<< (index % 32);
891 	} else {
892 		if (--rgep->mcast_refs[index]) {
893 			mutex_exit(rgep->genlock);
894 			return (0);
895 		}
896 		hashp[index/32] &= ~(1<< (index % 32));
897 	}
898 
899 	/*
900 	 * Set multicast register
901 	 */
902 	rge_chip_sync(rgep, RGE_SET_MUL);
903 
904 	mutex_exit(rgep->genlock);
905 	return (0);
906 }
907 
908 /*
909  * rge_m_promisc() -- set or reset promiscuous mode on the board
910  *
911  *	Program the hardware to enable/disable promiscuous and/or
912  *	receive-all-multicast modes.
913  */
914 static int
915 rge_m_promisc(void *arg, boolean_t on)
916 {
917 	rge_t *rgep = arg;
918 
919 	/*
920 	 * Store MAC layer specified mode and pass to chip layer to update h/w
921 	 */
922 	mutex_enter(rgep->genlock);
923 
924 	if (rgep->promisc == on) {
925 		mutex_exit(rgep->genlock);
926 		return (0);
927 	}
928 	rgep->promisc = on;
929 	rge_chip_sync(rgep, RGE_SET_PROMISC);
930 	RGE_DEBUG(("rge_m_promisc_set($%p) done", arg));
931 	mutex_exit(rgep->genlock);
932 	return (0);
933 }
934 
935 /*
936  * Loopback ioctl code
937  */
938 
939 static lb_property_t loopmodes[] = {
940 	{ normal,	"normal",	RGE_LOOP_NONE		},
941 	{ internal,	"PHY",		RGE_LOOP_INTERNAL_PHY	},
942 	{ internal,	"MAC",		RGE_LOOP_INTERNAL_MAC	}
943 };
944 
945 static enum ioc_reply
946 rge_set_loop_mode(rge_t *rgep, uint32_t mode)
947 {
948 	const char *msg;
949 
950 	/*
951 	 * If the mode isn't being changed, there's nothing to do ...
952 	 */
953 	if (mode == rgep->param_loop_mode)
954 		return (IOC_ACK);
955 
956 	/*
957 	 * Validate the requested mode and prepare a suitable message
958 	 * to explain the link down/up cycle that the change will
959 	 * probably induce ...
960 	 */
961 	switch (mode) {
962 	default:
963 		return (IOC_INVAL);
964 
965 	case RGE_LOOP_NONE:
966 		msg = " (loopback disabled)";
967 		break;
968 
969 	case RGE_LOOP_INTERNAL_PHY:
970 		msg = " (PHY internal loopback selected)";
971 		break;
972 
973 	case RGE_LOOP_INTERNAL_MAC:
974 		msg = " (MAC internal loopback selected)";
975 		break;
976 	}
977 
978 	/*
979 	 * All OK; tell the caller to reprogram
980 	 * the PHY and/or MAC for the new mode ...
981 	 */
982 	rgep->link_down_msg = rgep->link_up_msg = msg;
983 	rgep->param_loop_mode = mode;
984 	return (IOC_RESTART_ACK);
985 }
986 
987 static enum ioc_reply
988 rge_loop_ioctl(rge_t *rgep, queue_t *wq, mblk_t *mp, struct iocblk *iocp)
989 {
990 	lb_info_sz_t *lbsp;
991 	lb_property_t *lbpp;
992 	uint32_t *lbmp;
993 	int cmd;
994 
995 	_NOTE(ARGUNUSED(wq))
996 
997 	/*
998 	 * Validate format of ioctl
999 	 */
1000 	if (mp->b_cont == NULL)
1001 		return (IOC_INVAL);
1002 
1003 	cmd = iocp->ioc_cmd;
1004 	switch (cmd) {
1005 	default:
1006 		/* NOTREACHED */
1007 		rge_error(rgep, "rge_loop_ioctl: invalid cmd 0x%x", cmd);
1008 		return (IOC_INVAL);
1009 
1010 	case LB_GET_INFO_SIZE:
1011 		if (iocp->ioc_count != sizeof (lb_info_sz_t))
1012 			return (IOC_INVAL);
1013 		lbsp = (lb_info_sz_t *)mp->b_cont->b_rptr;
1014 		*lbsp = sizeof (loopmodes);
1015 		return (IOC_REPLY);
1016 
1017 	case LB_GET_INFO:
1018 		if (iocp->ioc_count != sizeof (loopmodes))
1019 			return (IOC_INVAL);
1020 		lbpp = (lb_property_t *)mp->b_cont->b_rptr;
1021 		bcopy(loopmodes, lbpp, sizeof (loopmodes));
1022 		return (IOC_REPLY);
1023 
1024 	case LB_GET_MODE:
1025 		if (iocp->ioc_count != sizeof (uint32_t))
1026 			return (IOC_INVAL);
1027 		lbmp = (uint32_t *)mp->b_cont->b_rptr;
1028 		*lbmp = rgep->param_loop_mode;
1029 		return (IOC_REPLY);
1030 
1031 	case LB_SET_MODE:
1032 		if (iocp->ioc_count != sizeof (uint32_t))
1033 			return (IOC_INVAL);
1034 		lbmp = (uint32_t *)mp->b_cont->b_rptr;
1035 		return (rge_set_loop_mode(rgep, *lbmp));
1036 	}
1037 }
1038 
1039 /*
1040  * Specific rge IOCTLs, the MAC layer handles the generic ones.
1041  */
1042 static void
1043 rge_m_ioctl(void *arg, queue_t *wq, mblk_t *mp)
1044 {
1045 	rge_t *rgep = arg;
1046 	struct iocblk *iocp;
1047 	enum ioc_reply status;
1048 	boolean_t need_privilege;
1049 	int err;
1050 	int cmd;
1051 
1052 	/*
1053 	 * Validate the command before bothering with the mutex ...
1054 	 */
1055 	iocp = (struct iocblk *)mp->b_rptr;
1056 	iocp->ioc_error = 0;
1057 	need_privilege = B_TRUE;
1058 	cmd = iocp->ioc_cmd;
1059 	switch (cmd) {
1060 	default:
1061 		miocnak(wq, mp, 0, EINVAL);
1062 		return;
1063 
1064 	case RGE_MII_READ:
1065 	case RGE_MII_WRITE:
1066 	case RGE_DIAG:
1067 	case RGE_PEEK:
1068 	case RGE_POKE:
1069 	case RGE_PHY_RESET:
1070 	case RGE_SOFT_RESET:
1071 	case RGE_HARD_RESET:
1072 		break;
1073 
1074 	case LB_GET_INFO_SIZE:
1075 	case LB_GET_INFO:
1076 	case LB_GET_MODE:
1077 		need_privilege = B_FALSE;
1078 		/* FALLTHRU */
1079 	case LB_SET_MODE:
1080 		break;
1081 
1082 	case ND_GET:
1083 		need_privilege = B_FALSE;
1084 		/* FALLTHRU */
1085 	case ND_SET:
1086 		break;
1087 	}
1088 
1089 	if (need_privilege) {
1090 		/*
1091 		 * Check for specific net_config privilege on Solaris 10+.
1092 		 * Otherwise just check for root access ...
1093 		 */
1094 		if (secpolicy_net_config != NULL)
1095 			err = secpolicy_net_config(iocp->ioc_cr, B_FALSE);
1096 		else
1097 			err = drv_priv(iocp->ioc_cr);
1098 		if (err != 0) {
1099 			miocnak(wq, mp, 0, err);
1100 			return;
1101 		}
1102 	}
1103 
1104 	mutex_enter(rgep->genlock);
1105 
1106 	switch (cmd) {
1107 	default:
1108 		_NOTE(NOTREACHED)
1109 		status = IOC_INVAL;
1110 		break;
1111 
1112 	case RGE_MII_READ:
1113 	case RGE_MII_WRITE:
1114 	case RGE_DIAG:
1115 	case RGE_PEEK:
1116 	case RGE_POKE:
1117 	case RGE_PHY_RESET:
1118 	case RGE_SOFT_RESET:
1119 	case RGE_HARD_RESET:
1120 		status = rge_chip_ioctl(rgep, wq, mp, iocp);
1121 		break;
1122 
1123 	case LB_GET_INFO_SIZE:
1124 	case LB_GET_INFO:
1125 	case LB_GET_MODE:
1126 	case LB_SET_MODE:
1127 		status = rge_loop_ioctl(rgep, wq, mp, iocp);
1128 		break;
1129 
1130 	case ND_GET:
1131 	case ND_SET:
1132 		status = rge_nd_ioctl(rgep, wq, mp, iocp);
1133 		break;
1134 	}
1135 
1136 	/*
1137 	 * Do we need to reprogram the PHY and/or the MAC?
1138 	 * Do it now, while we still have the mutex.
1139 	 *
1140 	 * Note: update the PHY first, 'cos it controls the
1141 	 * speed/duplex parameters that the MAC code uses.
1142 	 */
1143 	switch (status) {
1144 	case IOC_RESTART_REPLY:
1145 	case IOC_RESTART_ACK:
1146 		rge_phy_update(rgep);
1147 		break;
1148 	}
1149 
1150 	mutex_exit(rgep->genlock);
1151 
1152 	/*
1153 	 * Finally, decide how to reply
1154 	 */
1155 	switch (status) {
1156 	default:
1157 	case IOC_INVAL:
1158 		/*
1159 		 * Error, reply with a NAK and EINVAL or the specified error
1160 		 */
1161 		miocnak(wq, mp, 0, iocp->ioc_error == 0 ?
1162 			EINVAL : iocp->ioc_error);
1163 		break;
1164 
1165 	case IOC_DONE:
1166 		/*
1167 		 * OK, reply already sent
1168 		 */
1169 		break;
1170 
1171 	case IOC_RESTART_ACK:
1172 	case IOC_ACK:
1173 		/*
1174 		 * OK, reply with an ACK
1175 		 */
1176 		miocack(wq, mp, 0, 0);
1177 		break;
1178 
1179 	case IOC_RESTART_REPLY:
1180 	case IOC_REPLY:
1181 		/*
1182 		 * OK, send prepared reply as ACK or NAK
1183 		 */
1184 		mp->b_datap->db_type = iocp->ioc_error == 0 ?
1185 			M_IOCACK : M_IOCNAK;
1186 		qreply(wq, mp);
1187 		break;
1188 	}
1189 }
1190 
1191 static void
1192 rge_m_resources(void *arg)
1193 {
1194 	rge_t *rgep = arg;
1195 	mac_rx_fifo_t mrf;
1196 
1197 	mutex_enter(rgep->genlock);
1198 
1199 	/*
1200 	 * Register Rx rings as resources and save mac
1201 	 * resource id for future reference
1202 	 */
1203 	mrf.mrf_type = MAC_RX_FIFO;
1204 	mrf.mrf_blank = rge_chip_blank;
1205 	mrf.mrf_arg = (void *)rgep;
1206 	mrf.mrf_normal_blank_time = RGE_RX_INT_TIME;
1207 	mrf.mrf_normal_pkt_count = RGE_RX_INT_PKTS;
1208 	rgep->handle = mac_resource_add(rgep->macp, (mac_resource_t *)&mrf);
1209 
1210 	mutex_exit(rgep->genlock);
1211 }
1212 
1213 /*
1214  * ========== Per-instance setup/teardown code ==========
1215  */
1216 
1217 #undef	RGE_DBG
1218 #define	RGE_DBG		RGE_DBG_INIT	/* debug flag for this code	*/
1219 
1220 static void
1221 rge_unattach(rge_t *rgep)
1222 {
1223 	mac_t *macp;
1224 
1225 	/*
1226 	 * Flag that no more activity may be initiated
1227 	 */
1228 	rgep->progress &= ~PROGRESS_READY;
1229 	rgep->rge_mac_state = RGE_MAC_UNATTACH;
1230 
1231 	/*
1232 	 * Quiesce the PHY and MAC (leave it reset but still powered).
1233 	 * Clean up and free all RGE data structures
1234 	 */
1235 	if (rgep->cyclic_id) {
1236 		mutex_enter(&cpu_lock);
1237 		cyclic_remove(rgep->cyclic_id);
1238 		mutex_exit(&cpu_lock);
1239 	}
1240 
1241 	if (rgep->progress & PROGRESS_KSTATS)
1242 		rge_fini_kstats(rgep);
1243 
1244 	if (rgep->progress & PROGRESS_PHY)
1245 		(void) rge_phy_reset(rgep);
1246 
1247 	if (rgep->progress & PROGRESS_INTR) {
1248 		mutex_enter(rgep->genlock);
1249 		(void) rge_chip_reset(rgep);
1250 		mutex_exit(rgep->genlock);
1251 		ddi_remove_intr(rgep->devinfo, 0, rgep->iblk);
1252 		rge_fini_rings(rgep);
1253 		mutex_destroy(rgep->rc_lock);
1254 		mutex_destroy(rgep->rx_lock);
1255 		mutex_destroy(rgep->tc_lock);
1256 		mutex_destroy(rgep->tx_lock);
1257 		rw_destroy(rgep->errlock);
1258 		mutex_destroy(rgep->genlock);
1259 	}
1260 
1261 	if (rgep->progress & PROGRESS_FACTOTUM)
1262 		ddi_remove_softintr(rgep->factotum_id);
1263 
1264 	if (rgep->progress & PROGRESS_RESCHED)
1265 		ddi_remove_softintr(rgep->resched_id);
1266 
1267 	rge_free_bufs(rgep);
1268 
1269 	if (rgep->progress & PROGRESS_NDD)
1270 		rge_nd_cleanup(rgep);
1271 
1272 	if (rgep->progress & PROGRESS_REGS)
1273 		ddi_regs_map_free(&rgep->io_handle);
1274 
1275 	if (rgep->progress & PROGRESS_CFG)
1276 		pci_config_teardown(&rgep->cfg_handle);
1277 
1278 	ddi_remove_minor_node(rgep->devinfo, NULL);
1279 	macp = rgep->macp;
1280 	kmem_free(macp, sizeof (*macp));
1281 	kmem_free(rgep, sizeof (*rgep));
1282 }
1283 
1284 static int
1285 rge_resume(dev_info_t *devinfo)
1286 {
1287 	rge_t *rgep;			/* Our private data	*/
1288 	chip_id_t *cidp;
1289 	chip_id_t chipid;
1290 
1291 	rgep = ddi_get_driver_private(devinfo);
1292 	if (rgep == NULL)
1293 		return (DDI_FAILURE);
1294 
1295 	/*
1296 	 * Refuse to resume if the data structures aren't consistent
1297 	 */
1298 	if (rgep->devinfo != devinfo)
1299 		return (DDI_FAILURE);
1300 
1301 	/*
1302 	 * Read chip ID & set up config space command register(s)
1303 	 * Refuse to resume if the chip has changed its identity!
1304 	 */
1305 	cidp = &rgep->chipid;
1306 	rge_chip_cfg_init(rgep, &chipid);
1307 	if (chipid.vendor != cidp->vendor)
1308 		return (DDI_FAILURE);
1309 	if (chipid.device != cidp->device)
1310 		return (DDI_FAILURE);
1311 	if (chipid.revision != cidp->revision)
1312 		return (DDI_FAILURE);
1313 
1314 	/*
1315 	 * All OK, reinitialise h/w & kick off NEMO scheduling
1316 	 */
1317 	mutex_enter(rgep->genlock);
1318 	rge_restart(rgep);
1319 	mutex_exit(rgep->genlock);
1320 	return (DDI_SUCCESS);
1321 }
1322 
1323 
1324 /*
1325  * attach(9E) -- Attach a device to the system
1326  *
1327  * Called once for each board successfully probed.
1328  */
1329 static int
1330 rge_attach(dev_info_t *devinfo, ddi_attach_cmd_t cmd)
1331 {
1332 	rge_t *rgep;			/* Our private data	*/
1333 	mac_t *macp;
1334 	mac_info_t *mip;
1335 	chip_id_t *cidp;
1336 	cyc_handler_t cychand;
1337 	cyc_time_t cyctime;
1338 	caddr_t regs;
1339 	int instance;
1340 	int err;
1341 
1342 	/*
1343 	 * we don't support high level interrupts in the driver
1344 	 */
1345 	if (ddi_intr_hilevel(devinfo, 0) != 0) {
1346 		cmn_err(CE_WARN,
1347 		    "rge_attach -- unsupported high level interrupt");
1348 		return (DDI_FAILURE);
1349 	}
1350 
1351 	instance = ddi_get_instance(devinfo);
1352 	RGE_GTRACE(("rge_attach($%p, %d) instance %d",
1353 		(void *)devinfo, cmd, instance));
1354 	RGE_BRKPT(NULL, "rge_attach");
1355 
1356 	switch (cmd) {
1357 	default:
1358 		return (DDI_FAILURE);
1359 
1360 	case DDI_RESUME:
1361 		return (rge_resume(devinfo));
1362 
1363 	case DDI_ATTACH:
1364 		break;
1365 	}
1366 
1367 	/*
1368 	 * Allocate mac_t and RGE private structures, and
1369 	 * cross-link them so that given either one of these or
1370 	 * the devinfo the others can be derived.
1371 	 */
1372 	macp = kmem_zalloc(sizeof (*macp), KM_SLEEP);
1373 	rgep = kmem_zalloc(sizeof (*rgep), KM_SLEEP);
1374 	ddi_set_driver_private(devinfo, rgep);
1375 	rgep->devinfo = devinfo;
1376 	rgep->macp = macp;
1377 	macp->m_driver = rgep;
1378 
1379 	/*
1380 	 * Initialize more fields in RGE private data
1381 	 */
1382 	rgep->debug = ddi_prop_get_int(DDI_DEV_T_ANY, devinfo,
1383 		DDI_PROP_DONTPASS, debug_propname, rge_debug);
1384 	(void) snprintf(rgep->ifname, sizeof (rgep->ifname), "%s%d",
1385 		RGE_DRIVER_NAME, instance);
1386 
1387 	/*
1388 	 * Map config space registers
1389 	 * Read chip ID & set up config space command register(s)
1390 	 *
1391 	 * Note: this leaves the chip accessible by Memory Space
1392 	 * accesses, but with interrupts and Bus Mastering off.
1393 	 * This should ensure that nothing untoward will happen
1394 	 * if it has been left active by the (net-)bootloader.
1395 	 * We'll re-enable Bus Mastering once we've reset the chip,
1396 	 * and allow interrupts only when everything else is set up.
1397 	 */
1398 	err = pci_config_setup(devinfo, &rgep->cfg_handle);
1399 	if (err != DDI_SUCCESS) {
1400 		rge_problem(rgep, "pci_config_setup() failed");
1401 		goto attach_fail;
1402 	}
1403 	rgep->progress |= PROGRESS_CFG;
1404 	cidp = &rgep->chipid;
1405 	bzero(cidp, sizeof (*cidp));
1406 	rge_chip_cfg_init(rgep, cidp);
1407 
1408 	/*
1409 	 * Map operating registers
1410 	 */
1411 	err = ddi_regs_map_setup(devinfo, 1, &regs,
1412 	    0, 0, &rge_reg_accattr, &rgep->io_handle);
1413 	if (err != DDI_SUCCESS) {
1414 		rge_problem(rgep, "ddi_regs_map_setup() failed");
1415 		goto attach_fail;
1416 	}
1417 	rgep->io_regs = regs;
1418 	rgep->progress |= PROGRESS_REGS;
1419 
1420 	/*
1421 	 * Register NDD-tweakable parameters
1422 	 */
1423 	if (rge_nd_init(rgep)) {
1424 		rge_problem(rgep, "rge_nd_init() failed");
1425 		goto attach_fail;
1426 	}
1427 	rgep->progress |= PROGRESS_NDD;
1428 
1429 	/*
1430 	 * Characterise the device, so we know its requirements.
1431 	 * Then allocate the appropriate TX and RX descriptors & buffers.
1432 	 */
1433 	rge_chip_ident(rgep);
1434 	err = rge_alloc_bufs(rgep);
1435 	if (err != DDI_SUCCESS) {
1436 		rge_problem(rgep, "DMA buffer allocation failed");
1437 		goto attach_fail;
1438 	}
1439 
1440 	/*
1441 	 * Add the softint handlers:
1442 	 *
1443 	 * Both of these handlers are used to avoid restrictions on the
1444 	 * context and/or mutexes required for some operations.  In
1445 	 * particular, the hardware interrupt handler and its subfunctions
1446 	 * can detect a number of conditions that we don't want to handle
1447 	 * in that context or with that set of mutexes held.  So, these
1448 	 * softints are triggered instead:
1449 	 *
1450 	 * the <resched> softint is triggered if if we have previously
1451 	 * had to refuse to send a packet because of resource shortage
1452 	 * (we've run out of transmit buffers), but the send completion
1453 	 * interrupt handler has now detected that more buffers have
1454 	 * become available.
1455 	 *
1456 	 * the <factotum> is triggered if the h/w interrupt handler
1457 	 * sees the <link state changed> or <error> bits in the status
1458 	 * block.  It's also triggered periodically to poll the link
1459 	 * state, just in case we aren't getting link status change
1460 	 * interrupts ...
1461 	 */
1462 	err = ddi_add_softintr(devinfo, DDI_SOFTINT_LOW, &rgep->resched_id,
1463 		NULL, NULL, rge_reschedule, (caddr_t)rgep);
1464 	if (err != DDI_SUCCESS) {
1465 		rge_problem(rgep, "ddi_add_softintr() failed");
1466 		goto attach_fail;
1467 	}
1468 	rgep->progress |= PROGRESS_RESCHED;
1469 	err = ddi_add_softintr(devinfo, DDI_SOFTINT_LOW, &rgep->factotum_id,
1470 		NULL, NULL, rge_chip_factotum, (caddr_t)rgep);
1471 	if (err != DDI_SUCCESS) {
1472 		rge_problem(rgep, "ddi_add_softintr() failed");
1473 		goto attach_fail;
1474 	}
1475 	rgep->progress |= PROGRESS_FACTOTUM;
1476 
1477 	/*
1478 	 * Add the h/w interrupt handler and initialise mutexes
1479 	 */
1480 	err = ddi_add_intr(devinfo, 0, &rgep->iblk, NULL,
1481 		rge_intr, (caddr_t)rgep);
1482 	if (err != DDI_SUCCESS) {
1483 		rge_problem(rgep, "ddi_add_intr() failed");
1484 		goto attach_fail;
1485 	}
1486 	mutex_init(rgep->genlock, NULL, MUTEX_DRIVER, rgep->iblk);
1487 	rw_init(rgep->errlock, NULL, RW_DRIVER, rgep->iblk);
1488 	mutex_init(rgep->tx_lock, NULL, MUTEX_DRIVER, rgep->iblk);
1489 	mutex_init(rgep->tc_lock, NULL, MUTEX_DRIVER, rgep->iblk);
1490 	mutex_init(rgep->rx_lock, NULL, MUTEX_DRIVER, rgep->iblk);
1491 	mutex_init(rgep->rc_lock, NULL, MUTEX_DRIVER, rgep->iblk);
1492 	rgep->progress |= PROGRESS_INTR;
1493 
1494 	/*
1495 	 * Initialize rings
1496 	 */
1497 	err = rge_init_rings(rgep);
1498 	if (err != DDI_SUCCESS) {
1499 		rge_problem(rgep, "rge_init_rings() failed");
1500 		goto attach_fail;
1501 	}
1502 
1503 	/*
1504 	 * Initialise link state variables
1505 	 * Stop, reset & reinitialise the chip.
1506 	 * Initialise the (internal) PHY.
1507 	 */
1508 	rgep->param_link_up = LINK_STATE_UNKNOWN;
1509 	rgep->link_up_msg = rgep->link_down_msg = " (initialised)";
1510 
1511 	/*
1512 	 * Reset chip & rings to initial state; also reset address
1513 	 * filtering, promiscuity, loopback mode.
1514 	 */
1515 	mutex_enter(rgep->genlock);
1516 	(void) rge_chip_reset(rgep);
1517 	rge_chip_sync(rgep, RGE_GET_MAC);
1518 	bzero(rgep->mcast_hash, sizeof (rgep->mcast_hash));
1519 	bzero(rgep->mcast_refs, sizeof (rgep->mcast_refs));
1520 	rgep->promisc = B_FALSE;
1521 	rgep->param_loop_mode = RGE_LOOP_NONE;
1522 	mutex_exit(rgep->genlock);
1523 	rge_phy_init(rgep);
1524 	rgep->progress |= PROGRESS_PHY;
1525 
1526 	/*
1527 	 * Create & initialise named kstats
1528 	 */
1529 	rge_init_kstats(rgep, instance);
1530 	rgep->progress |= PROGRESS_KSTATS;
1531 
1532 	/*
1533 	 * Initialize pointers to device specific functions which
1534 	 * will be used by the generic layer.
1535 	 */
1536 	mip = &(macp->m_info);
1537 	mip->mi_media = DL_ETHER;
1538 	mip->mi_sdu_min = 0;
1539 	mip->mi_sdu_max = rgep->param_default_mtu;
1540 	mip->mi_cksum = HCKSUM_INET_FULL_V4 | HCKSUM_IPHDRCKSUM;
1541 	mip->mi_poll = DL_CAPAB_POLL;
1542 
1543 	mip->mi_addr_length = ETHERADDRL;
1544 	bcopy(rge_broadcast_addr, mip->mi_brdcst_addr, ETHERADDRL);
1545 	bcopy(rgep->netaddr, mip->mi_unicst_addr, ETHERADDRL);
1546 
1547 	/*
1548 	 * Register h/w supported statistics
1549 	 */
1550 	MAC_STAT_MIB(mip->mi_stat);
1551 	mip->mi_stat[MAC_STAT_MULTIXMT] = B_FALSE;
1552 	mip->mi_stat[MAC_STAT_BRDCSTXMT] = B_FALSE;
1553 	mip->mi_stat[MAC_STAT_UNKNOWNS] = B_FALSE;
1554 	mip->mi_stat[MAC_STAT_NOXMTBUF] = B_FALSE;
1555 
1556 	MAC_STAT_ETHER(mip->mi_stat);
1557 	mip->mi_stat[MAC_STAT_FCS_ERRORS] = B_FALSE;
1558 	mip->mi_stat[MAC_STAT_SQE_ERRORS] = B_FALSE;
1559 	mip->mi_stat[MAC_STAT_TX_LATE_COLLISIONS] = B_FALSE;
1560 	mip->mi_stat[MAC_STAT_EX_COLLISIONS] = B_FALSE;
1561 	mip->mi_stat[MAC_STAT_MACXMT_ERRORS] = B_FALSE;
1562 	mip->mi_stat[MAC_STAT_CARRIER_ERRORS] = B_FALSE;
1563 	mip->mi_stat[MAC_STAT_TOOLONG_ERRORS] = B_FALSE;
1564 	mip->mi_stat[MAC_STAT_MACRCV_ERRORS] = B_FALSE;
1565 
1566 	MAC_STAT_MII(mip->mi_stat);
1567 	mip->mi_stat[MAC_STAT_LP_CAP_1000FDX] = B_FALSE;
1568 	mip->mi_stat[MAC_STAT_LP_CAP_1000HDX] = B_FALSE;
1569 	mip->mi_stat[MAC_STAT_LP_CAP_100FDX] = B_FALSE;
1570 	mip->mi_stat[MAC_STAT_LP_CAP_100HDX] = B_FALSE;
1571 	mip->mi_stat[MAC_STAT_LP_CAP_10FDX] = B_FALSE;
1572 	mip->mi_stat[MAC_STAT_LP_CAP_10HDX] = B_FALSE;
1573 	mip->mi_stat[MAC_STAT_LP_CAP_ASMPAUSE] = B_FALSE;
1574 	mip->mi_stat[MAC_STAT_LP_CAP_PAUSE] = B_FALSE;
1575 	mip->mi_stat[MAC_STAT_LP_CAP_AUTONEG] = B_FALSE;
1576 	mip->mi_stat[MAC_STAT_LINK_ASMPAUSE] = B_FALSE;
1577 	mip->mi_stat[MAC_STAT_LINK_PAUSE] = B_FALSE;
1578 	mip->mi_stat[MAC_STAT_LINK_AUTONEG] = B_FALSE;
1579 
1580 	macp->m_stat = rge_m_stat;
1581 	macp->m_stop = rge_m_stop;
1582 	macp->m_start = rge_m_start;
1583 	macp->m_unicst = rge_m_unicst;
1584 	macp->m_multicst = rge_m_multicst;
1585 	macp->m_promisc = rge_m_promisc;
1586 	macp->m_tx = rge_m_tx;
1587 	macp->m_resources = rge_m_resources;
1588 	macp->m_ioctl = rge_m_ioctl;
1589 
1590 	macp->m_dip = devinfo;
1591 	macp->m_ident = MAC_IDENT;
1592 
1593 	/*
1594 	 * Finally, we're ready to register ourselves with the MAC layer
1595 	 * interface; if this succeeds, we're all ready to start()
1596 	 */
1597 	if (mac_register(macp) != 0)
1598 		goto attach_fail;
1599 
1600 	cychand.cyh_func = rge_chip_cyclic;
1601 	cychand.cyh_arg = rgep;
1602 	cychand.cyh_level = CY_LOCK_LEVEL;
1603 	cyctime.cyt_when = 0;
1604 	cyctime.cyt_interval = RGE_CYCLIC_PERIOD;
1605 	mutex_enter(&cpu_lock);
1606 	rgep->cyclic_id = cyclic_add(&cychand, &cyctime);
1607 	mutex_exit(&cpu_lock);
1608 
1609 	rgep->progress |= PROGRESS_READY;
1610 	return (DDI_SUCCESS);
1611 
1612 attach_fail:
1613 	rge_unattach(rgep);
1614 	return (DDI_FAILURE);
1615 }
1616 
1617 /*
1618  *	rge_suspend() -- suspend transmit/receive for powerdown
1619  */
1620 static int
1621 rge_suspend(rge_t *rgep)
1622 {
1623 	/*
1624 	 * Stop processing and idle (powerdown) the PHY ...
1625 	 */
1626 	mutex_enter(rgep->genlock);
1627 	rge_stop(rgep);
1628 	mutex_exit(rgep->genlock);
1629 
1630 	return (DDI_SUCCESS);
1631 }
1632 
1633 /*
1634  * detach(9E) -- Detach a device from the system
1635  */
1636 static int
1637 rge_detach(dev_info_t *devinfo, ddi_detach_cmd_t cmd)
1638 {
1639 	rge_t *rgep;
1640 
1641 	RGE_GTRACE(("rge_detach($%p, %d)", (void *)devinfo, cmd));
1642 
1643 	rgep = ddi_get_driver_private(devinfo);
1644 
1645 	switch (cmd) {
1646 	default:
1647 		return (DDI_FAILURE);
1648 
1649 	case DDI_SUSPEND:
1650 		return (rge_suspend(rgep));
1651 
1652 	case DDI_DETACH:
1653 		break;
1654 	}
1655 
1656 	/*
1657 	 * If there is any posted buffer, the driver should reject to be
1658 	 * detached. Need notice upper layer to release them.
1659 	 */
1660 	if (rgep->rx_free != RGE_BUF_SLOTS)
1661 		return (DDI_FAILURE);
1662 
1663 	/*
1664 	 * Unregister from the MAC layer subsystem.  This can fail, in
1665 	 * particular if there are DLPI style-2 streams still open -
1666 	 * in which case we just return failure without shutting
1667 	 * down chip operations.
1668 	 */
1669 	if (mac_unregister(rgep->macp) != 0)
1670 		return (DDI_FAILURE);
1671 
1672 	/*
1673 	 * All activity stopped, so we can clean up & exit
1674 	 */
1675 	rge_unattach(rgep);
1676 	return (DDI_SUCCESS);
1677 }
1678 
1679 
1680 /*
1681  * ========== Module Loading Data & Entry Points ==========
1682  */
1683 
1684 #undef	RGE_DBG
1685 #define	RGE_DBG		RGE_DBG_INIT	/* debug flag for this code	*/
1686 DDI_DEFINE_STREAM_OPS(rge_dev_ops, nulldev, nulldev, rge_attach, rge_detach,
1687     nodev, NULL, D_MP, NULL);
1688 
1689 static struct modldrv rge_modldrv = {
1690 	&mod_driverops,		/* Type of module.  This one is a driver */
1691 	rge_ident,		/* short description */
1692 	&rge_dev_ops		/* driver specific ops */
1693 };
1694 
1695 static struct modlinkage modlinkage = {
1696 	MODREV_1, (void *)&rge_modldrv, NULL
1697 };
1698 
1699 
1700 int
1701 _info(struct modinfo *modinfop)
1702 {
1703 	return (mod_info(&modlinkage, modinfop));
1704 }
1705 
1706 int
1707 _init(void)
1708 {
1709 	int status;
1710 
1711 	mac_init_ops(&rge_dev_ops, "rge");
1712 	status = mod_install(&modlinkage);
1713 	if (status == DDI_SUCCESS)
1714 		mutex_init(rge_log_mutex, NULL, MUTEX_DRIVER, NULL);
1715 	else
1716 		mac_fini_ops(&rge_dev_ops);
1717 
1718 	return (status);
1719 }
1720 
1721 int
1722 _fini(void)
1723 {
1724 	int status;
1725 
1726 	status = mod_remove(&modlinkage);
1727 	if (status == DDI_SUCCESS) {
1728 		mac_fini_ops(&rge_dev_ops);
1729 		mutex_destroy(rge_log_mutex);
1730 	}
1731 	return (status);
1732 }
1733