xref: /illumos-gate/usr/src/uts/common/io/nge/nge_main.c (revision bea83d026ee1bd1b2a2419e1d0232f107a5d7d9b)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 
22 /*
23  * Copyright 2008 Sun Microsystems, Inc.  All rights reserved.
24  * Use is subject to license terms.
25  */
26 
27 #pragma ident	"%Z%%M%	%I%	%E% SMI"
28 
29 #include "nge.h"
30 
31 /*
32  * Describes the chip's DMA engine
33  */
34 
35 static ddi_dma_attr_t hot_dma_attr = {
36 	DMA_ATTR_V0,			/* dma_attr version	*/
37 	0x0000000000000000ull,		/* dma_attr_addr_lo	*/
38 	0x000000FFFFFFFFFFull,		/* dma_attr_addr_hi	*/
39 	0x000000007FFFFFFFull,		/* dma_attr_count_max	*/
40 	0x0000000000000010ull,		/* dma_attr_align	*/
41 	0x00000FFF,			/* dma_attr_burstsizes	*/
42 	0x00000001,			/* dma_attr_minxfer	*/
43 	0x000000000000FFFFull,		/* dma_attr_maxxfer	*/
44 	0x000000FFFFFFFFFFull,		/* dma_attr_seg		*/
45 	1,				/* dma_attr_sgllen 	*/
46 	0x00000001,			/* dma_attr_granular 	*/
47 	0
48 };
49 
50 static ddi_dma_attr_t hot_tx_dma_attr = {
51 	DMA_ATTR_V0,			/* dma_attr version	*/
52 	0x0000000000000000ull,		/* dma_attr_addr_lo	*/
53 	0x000000FFFFFFFFFFull,		/* dma_attr_addr_hi	*/
54 	0x0000000000003FFFull,		/* dma_attr_count_max	*/
55 	0x0000000000000010ull,		/* dma_attr_align	*/
56 	0x00000FFF,			/* dma_attr_burstsizes	*/
57 	0x00000001,			/* dma_attr_minxfer	*/
58 	0x0000000000003FFFull,		/* dma_attr_maxxfer	*/
59 	0x000000FFFFFFFFFFull,		/* dma_attr_seg		*/
60 	NGE_MAX_COOKIES,		/* dma_attr_sgllen 	*/
61 	1,				/* dma_attr_granular 	*/
62 	0
63 };
64 
65 static ddi_dma_attr_t sum_dma_attr = {
66 	DMA_ATTR_V0,			/* dma_attr version	*/
67 	0x0000000000000000ull,		/* dma_attr_addr_lo	*/
68 	0x00000000FFFFFFFFull,		/* dma_attr_addr_hi	*/
69 	0x000000007FFFFFFFull,		/* dma_attr_count_max	*/
70 	0x0000000000000010ull,		/* dma_attr_align	*/
71 	0x00000FFF,			/* dma_attr_burstsizes	*/
72 	0x00000001,			/* dma_attr_minxfer	*/
73 	0x000000000000FFFFull,		/* dma_attr_maxxfer	*/
74 	0x00000000FFFFFFFFull,		/* dma_attr_seg		*/
75 	1,				/* dma_attr_sgllen 	*/
76 	0x00000001,			/* dma_attr_granular 	*/
77 	0
78 };
79 
80 static ddi_dma_attr_t sum_tx_dma_attr = {
81 	DMA_ATTR_V0,			/* dma_attr version	*/
82 	0x0000000000000000ull,		/* dma_attr_addr_lo	*/
83 	0x00000000FFFFFFFFull,		/* dma_attr_addr_hi	*/
84 	0x0000000000003FFFull,		/* dma_attr_count_max	*/
85 	0x0000000000000010ull,		/* dma_attr_align	*/
86 	0x00000FFF,			/* dma_attr_burstsizes	*/
87 	0x00000001,			/* dma_attr_minxfer	*/
88 	0x0000000000003FFFull,		/* dma_attr_maxxfer	*/
89 	0x00000000FFFFFFFFull,		/* dma_attr_seg		*/
90 	NGE_MAX_COOKIES,		/* dma_attr_sgllen 	*/
91 	1,				/* dma_attr_granular 	*/
92 	0
93 };
94 
95 /*
96  * DMA access attributes for data.
97  */
98 ddi_device_acc_attr_t nge_data_accattr = {
99 	DDI_DEVICE_ATTR_V0,
100 	DDI_STRUCTURE_LE_ACC,
101 	DDI_STRICTORDER_ACC,
102 	DDI_DEFAULT_ACC
103 };
104 
105 /*
106  * DMA access attributes for descriptors.
107  */
108 static ddi_device_acc_attr_t nge_desc_accattr = {
109 	DDI_DEVICE_ATTR_V0,
110 	DDI_STRUCTURE_LE_ACC,
111 	DDI_STRICTORDER_ACC,
112 	DDI_DEFAULT_ACC
113 };
114 
115 /*
116  * PIO access attributes for registers
117  */
118 static ddi_device_acc_attr_t nge_reg_accattr = {
119 	DDI_DEVICE_ATTR_V0,
120 	DDI_STRUCTURE_LE_ACC,
121 	DDI_STRICTORDER_ACC,
122 	DDI_DEFAULT_ACC
123 };
124 
125 /*
126  * NIC DESC MODE 2
127  */
128 
129 static const nge_desc_attr_t nge_sum_desc = {
130 
131 	sizeof (sum_rx_bd),
132 	sizeof (sum_tx_bd),
133 	&sum_dma_attr,
134 	&sum_tx_dma_attr,
135 	nge_sum_rxd_fill,
136 	nge_sum_rxd_check,
137 	nge_sum_txd_fill,
138 	nge_sum_txd_check,
139 };
140 
141 /*
142  * NIC DESC MODE 3
143  */
144 
145 static const nge_desc_attr_t nge_hot_desc = {
146 
147 	sizeof (hot_rx_bd),
148 	sizeof (hot_tx_bd),
149 	&hot_dma_attr,
150 	&hot_tx_dma_attr,
151 	nge_hot_rxd_fill,
152 	nge_hot_rxd_check,
153 	nge_hot_txd_fill,
154 	nge_hot_txd_check,
155 };
156 
157 static char nge_ident[] = "nVidia 1Gb Ethernet %I%";
158 static char clsize_propname[] = "cache-line-size";
159 static char latency_propname[] = "latency-timer";
160 static char debug_propname[]	= "nge-debug-flags";
161 static char intr_moderation[] = "intr-moderation";
162 static char rx_data_hw[] = "rx-data-hw";
163 static char rx_prd_lw[] = "rx-prd-lw";
164 static char rx_prd_hw[] = "rx-prd-hw";
165 static char sw_intr_intv[] = "sw-intr-intvl";
166 static char nge_desc_mode[] = "desc-mode";
167 static char default_mtu[] = "default_mtu";
168 static char low_memory_mode[] = "minimal-memory-usage";
169 extern kmutex_t nge_log_mutex[1];
170 
171 static int		nge_m_start(void *);
172 static void		nge_m_stop(void *);
173 static int		nge_m_promisc(void *, boolean_t);
174 static int		nge_m_multicst(void *, boolean_t, const uint8_t *);
175 static int		nge_m_unicst(void *, const uint8_t *);
176 static void		nge_m_ioctl(void *, queue_t *, mblk_t *);
177 static boolean_t	nge_m_getcapab(void *, mac_capab_t, void *);
178 
179 #define		NGE_M_CALLBACK_FLAGS	(MC_IOCTL | MC_GETCAPAB)
180 
181 static mac_callbacks_t nge_m_callbacks = {
182 	NGE_M_CALLBACK_FLAGS,
183 	nge_m_stat,
184 	nge_m_start,
185 	nge_m_stop,
186 	nge_m_promisc,
187 	nge_m_multicst,
188 	nge_m_unicst,
189 	nge_m_tx,
190 	NULL,
191 	nge_m_ioctl,
192 	nge_m_getcapab
193 };
194 
195 static int nge_add_intrs(nge_t *, int);
196 static void nge_rem_intrs(nge_t *);
197 static int nge_register_intrs_and_init_locks(nge_t *);
198 
199 /*
200  * NGE MSI tunable:
201  */
202 boolean_t nge_enable_msi = B_FALSE;
203 
204 static enum ioc_reply
205 nge_set_loop_mode(nge_t *ngep, uint32_t mode)
206 {
207 	/*
208 	 * If the mode isn't being changed, there's nothing to do ...
209 	 */
210 	if (mode == ngep->param_loop_mode)
211 		return (IOC_ACK);
212 
213 	/*
214 	 * Validate the requested mode and prepare a suitable message
215 	 * to explain the link down/up cycle that the change will
216 	 * probably induce ...
217 	 */
218 	switch (mode) {
219 	default:
220 		return (IOC_INVAL);
221 
222 	case NGE_LOOP_NONE:
223 	case NGE_LOOP_EXTERNAL_100:
224 	case NGE_LOOP_EXTERNAL_10:
225 	case NGE_LOOP_INTERNAL_PHY:
226 		break;
227 	}
228 
229 	/*
230 	 * All OK; tell the caller to reprogram
231 	 * the PHY and/or MAC for the new mode ...
232 	 */
233 	ngep->param_loop_mode = mode;
234 	return (IOC_RESTART_ACK);
235 }
236 
237 #undef	NGE_DBG
238 #define	NGE_DBG		NGE_DBG_INIT
239 
240 /*
241  * Utility routine to carve a slice off a chunk of allocated memory,
242  * updating the chunk descriptor accordingly.  The size of the slice
243  * is given by the product of the <qty> and <size> parameters.
244  */
245 void
246 nge_slice_chunk(dma_area_t *slice, dma_area_t *chunk,
247     uint32_t qty, uint32_t size)
248 {
249 	size_t totsize;
250 
251 	totsize = qty*size;
252 	ASSERT(size > 0);
253 	ASSERT(totsize <= chunk->alength);
254 
255 	*slice = *chunk;
256 	slice->nslots = qty;
257 	slice->size = size;
258 	slice->alength = totsize;
259 
260 	chunk->mem_va = (caddr_t)chunk->mem_va + totsize;
261 	chunk->alength -= totsize;
262 	chunk->offset += totsize;
263 	chunk->cookie.dmac_laddress += totsize;
264 	chunk->cookie.dmac_size -= totsize;
265 }
266 
267 /*
268  * Allocate an area of memory and a DMA handle for accessing it
269  */
270 int
271 nge_alloc_dma_mem(nge_t *ngep, size_t memsize, ddi_device_acc_attr_t *attr_p,
272     uint_t dma_flags, dma_area_t *dma_p)
273 {
274 	int err;
275 	caddr_t va;
276 
277 	NGE_TRACE(("nge_alloc_dma_mem($%p, %ld, $%p, 0x%x, $%p)",
278 	    (void *)ngep, memsize, attr_p, dma_flags, dma_p));
279 	/*
280 	 * Allocate handle
281 	 */
282 	err = ddi_dma_alloc_handle(ngep->devinfo, ngep->desc_attr.dma_attr,
283 	    DDI_DMA_DONTWAIT, NULL, &dma_p->dma_hdl);
284 	if (err != DDI_SUCCESS)
285 		goto fail;
286 
287 	/*
288 	 * Allocate memory
289 	 */
290 	err = ddi_dma_mem_alloc(dma_p->dma_hdl, memsize, attr_p,
291 	    dma_flags & (DDI_DMA_CONSISTENT | DDI_DMA_STREAMING),
292 	    DDI_DMA_DONTWAIT, NULL, &va, &dma_p->alength, &dma_p->acc_hdl);
293 	if (err != DDI_SUCCESS)
294 		goto fail;
295 
296 	/*
297 	 * Bind the two together
298 	 */
299 	dma_p->mem_va = va;
300 	err = ddi_dma_addr_bind_handle(dma_p->dma_hdl, NULL,
301 	    va, dma_p->alength, dma_flags, DDI_DMA_DONTWAIT, NULL,
302 	    &dma_p->cookie, &dma_p->ncookies);
303 
304 	if (err != DDI_DMA_MAPPED || dma_p->ncookies != 1)
305 		goto fail;
306 
307 	dma_p->nslots = ~0U;
308 	dma_p->size = ~0U;
309 	dma_p->offset = 0;
310 
311 	return (DDI_SUCCESS);
312 
313 fail:
314 	nge_free_dma_mem(dma_p);
315 	NGE_DEBUG(("nge_alloc_dma_mem: fail to alloc dma memory!"));
316 
317 	return (DDI_FAILURE);
318 }
319 
320 /*
321  * Free one allocated area of DMAable memory
322  */
323 void
324 nge_free_dma_mem(dma_area_t *dma_p)
325 {
326 	if (dma_p->dma_hdl != NULL) {
327 		if (dma_p->ncookies) {
328 			(void) ddi_dma_unbind_handle(dma_p->dma_hdl);
329 			dma_p->ncookies = 0;
330 		}
331 	}
332 	if (dma_p->acc_hdl != NULL) {
333 		ddi_dma_mem_free(&dma_p->acc_hdl);
334 		dma_p->acc_hdl = NULL;
335 	}
336 	if (dma_p->dma_hdl != NULL) {
337 		ddi_dma_free_handle(&dma_p->dma_hdl);
338 		dma_p->dma_hdl = NULL;
339 	}
340 }
341 
342 #define	ALLOC_TX_BUF	0x1
343 #define	ALLOC_TX_DESC	0x2
344 #define	ALLOC_RX_DESC	0x4
345 
346 int
347 nge_alloc_bufs(nge_t *ngep)
348 {
349 	int err;
350 	int split;
351 	int progress;
352 	size_t txbuffsize;
353 	size_t rxdescsize;
354 	size_t txdescsize;
355 
356 	txbuffsize = ngep->tx_desc * ngep->buf_size;
357 	rxdescsize = ngep->rx_desc;
358 	txdescsize = ngep->tx_desc;
359 	rxdescsize *= ngep->desc_attr.rxd_size;
360 	txdescsize *= ngep->desc_attr.txd_size;
361 	progress = 0;
362 
363 	NGE_TRACE(("nge_alloc_bufs($%p)", (void *)ngep));
364 	/*
365 	 * Allocate memory & handles for TX buffers
366 	 */
367 	ASSERT((txbuffsize % ngep->nge_split) == 0);
368 	for (split = 0; split < ngep->nge_split; ++split) {
369 		err = nge_alloc_dma_mem(ngep, txbuffsize/ngep->nge_split,
370 		    &nge_data_accattr, DDI_DMA_WRITE | NGE_DMA_MODE,
371 		    &ngep->send->buf[split]);
372 		if (err != DDI_SUCCESS)
373 			goto fail;
374 	}
375 
376 	progress |= ALLOC_TX_BUF;
377 
378 	/*
379 	 * Allocate memory & handles for receive return rings and
380 	 * buffer (producer) descriptor rings
381 	 */
382 	err = nge_alloc_dma_mem(ngep, rxdescsize, &nge_desc_accattr,
383 	    DDI_DMA_RDWR | DDI_DMA_CONSISTENT, &ngep->recv->desc);
384 	if (err != DDI_SUCCESS)
385 		goto fail;
386 	progress |= ALLOC_RX_DESC;
387 
388 	/*
389 	 * Allocate memory & handles for TX descriptor rings,
390 	 */
391 	err = nge_alloc_dma_mem(ngep, txdescsize, &nge_desc_accattr,
392 	    DDI_DMA_RDWR | DDI_DMA_CONSISTENT, &ngep->send->desc);
393 	if (err != DDI_SUCCESS)
394 		goto fail;
395 	return (DDI_SUCCESS);
396 
397 fail:
398 	if (progress & ALLOC_RX_DESC)
399 		nge_free_dma_mem(&ngep->recv->desc);
400 	if (progress & ALLOC_TX_BUF) {
401 		for (split = 0; split < ngep->nge_split; ++split)
402 			nge_free_dma_mem(&ngep->send->buf[split]);
403 	}
404 
405 	return (DDI_FAILURE);
406 }
407 
408 /*
409  * This routine frees the transmit and receive buffers and descriptors.
410  * Make sure the chip is stopped before calling it!
411  */
412 void
413 nge_free_bufs(nge_t *ngep)
414 {
415 	int split;
416 
417 	NGE_TRACE(("nge_free_bufs($%p)", (void *)ngep));
418 
419 	nge_free_dma_mem(&ngep->recv->desc);
420 	nge_free_dma_mem(&ngep->send->desc);
421 
422 	for (split = 0; split < ngep->nge_split; ++split)
423 		nge_free_dma_mem(&ngep->send->buf[split]);
424 }
425 
426 /*
427  * Clean up initialisation done above before the memory is freed
428  */
429 static void
430 nge_fini_send_ring(nge_t *ngep)
431 {
432 	uint32_t slot;
433 	size_t dmah_num;
434 	send_ring_t *srp;
435 	sw_tx_sbd_t *ssbdp;
436 
437 	srp = ngep->send;
438 	ssbdp = srp->sw_sbds;
439 
440 	NGE_TRACE(("nge_fini_send_ring($%p)", (void *)ngep));
441 
442 	dmah_num = sizeof (srp->dmahndl) / sizeof (srp->dmahndl[0]);
443 
444 	for (slot = 0; slot < dmah_num; ++slot) {
445 		if (srp->dmahndl[slot].hndl) {
446 			(void) ddi_dma_unbind_handle(srp->dmahndl[slot].hndl);
447 			ddi_dma_free_handle(&srp->dmahndl[slot].hndl);
448 			srp->dmahndl[slot].hndl = NULL;
449 			srp->dmahndl[slot].next = NULL;
450 		}
451 	}
452 
453 	srp->dmah_free.head = NULL;
454 	srp->dmah_free.tail = NULL;
455 
456 	kmem_free(ssbdp, srp->desc.nslots*sizeof (*ssbdp));
457 
458 }
459 
460 /*
461  * Initialise the specified Send Ring, using the information in the
462  * <dma_area> descriptors that it contains to set up all the other
463  * fields. This routine should be called only once for each ring.
464  */
465 static int
466 nge_init_send_ring(nge_t *ngep)
467 {
468 	size_t dmah_num;
469 	uint32_t nslots;
470 	uint32_t err;
471 	uint32_t slot;
472 	uint32_t split;
473 	send_ring_t *srp;
474 	sw_tx_sbd_t *ssbdp;
475 	dma_area_t desc;
476 	dma_area_t pbuf;
477 
478 	srp = ngep->send;
479 	srp->desc.nslots = ngep->tx_desc;
480 	nslots = srp->desc.nslots;
481 
482 	NGE_TRACE(("nge_init_send_ring($%p)", (void *)ngep));
483 	/*
484 	 * Other one-off initialisation of per-ring data
485 	 */
486 	srp->ngep = ngep;
487 
488 	/*
489 	 * Allocate the array of s/w Send Buffer Descriptors
490 	 */
491 	ssbdp = kmem_zalloc(nslots*sizeof (*ssbdp), KM_SLEEP);
492 	srp->sw_sbds = ssbdp;
493 
494 	/*
495 	 * Now initialise each array element once and for all
496 	 */
497 	desc = srp->desc;
498 	for (split = 0; split < ngep->nge_split; ++split) {
499 		pbuf = srp->buf[split];
500 		for (slot = 0; slot < nslots/ngep->nge_split; ++ssbdp, ++slot) {
501 			nge_slice_chunk(&ssbdp->desc, &desc, 1,
502 			    ngep->desc_attr.txd_size);
503 			nge_slice_chunk(&ssbdp->pbuf, &pbuf, 1,
504 			    ngep->buf_size);
505 		}
506 		ASSERT(pbuf.alength == 0);
507 	}
508 	ASSERT(desc.alength == 0);
509 
510 	dmah_num = sizeof (srp->dmahndl) / sizeof (srp->dmahndl[0]);
511 
512 	/* preallocate dma handles for tx buffer */
513 	for (slot = 0; slot < dmah_num; ++slot) {
514 
515 		err = ddi_dma_alloc_handle(ngep->devinfo,
516 		    ngep->desc_attr.tx_dma_attr, DDI_DMA_DONTWAIT,
517 		    NULL, &srp->dmahndl[slot].hndl);
518 
519 		if (err != DDI_SUCCESS) {
520 			nge_fini_send_ring(ngep);
521 			nge_error(ngep,
522 			    "nge_init_send_ring: alloc dma handle fails");
523 			return (DDI_FAILURE);
524 		}
525 		srp->dmahndl[slot].next = srp->dmahndl + slot + 1;
526 	}
527 
528 	srp->dmah_free.head = srp->dmahndl;
529 	srp->dmah_free.tail = srp->dmahndl + dmah_num - 1;
530 	srp->dmah_free.tail->next = NULL;
531 
532 	return (DDI_SUCCESS);
533 }
534 
535 /*
536  * Intialize the tx recycle pointer and tx sending pointer of tx ring
537  * and set the type of tx's data descriptor by default.
538  */
539 static void
540 nge_reinit_send_ring(nge_t *ngep)
541 {
542 	size_t dmah_num;
543 	uint32_t slot;
544 	send_ring_t *srp;
545 	sw_tx_sbd_t *ssbdp;
546 
547 	srp = ngep->send;
548 
549 	/*
550 	 * Reinitialise control variables ...
551 	 */
552 
553 	srp->tx_hwmark = NGE_DESC_MIN;
554 	srp->tx_lwmark = NGE_DESC_MIN;
555 
556 	srp->tx_next = 0;
557 	srp->tx_free = srp->desc.nslots;
558 	srp->tc_next = 0;
559 
560 	dmah_num = sizeof (srp->dmahndl) / sizeof (srp->dmahndl[0]);
561 
562 	for (slot = 0; slot - dmah_num != 0; ++slot)
563 		srp->dmahndl[slot].next = srp->dmahndl + slot + 1;
564 
565 	srp->dmah_free.head = srp->dmahndl;
566 	srp->dmah_free.tail = srp->dmahndl + dmah_num - 1;
567 	srp->dmah_free.tail->next = NULL;
568 
569 	/*
570 	 * Zero and sync all the h/w Send Buffer Descriptors
571 	 */
572 	for (slot = 0; slot < srp->desc.nslots; ++slot) {
573 		ssbdp = &srp->sw_sbds[slot];
574 		ssbdp->flags = HOST_OWN;
575 	}
576 
577 	DMA_ZERO(srp->desc);
578 	DMA_SYNC(srp->desc, DDI_DMA_SYNC_FORDEV);
579 }
580 
581 /*
582  * Initialize the slot number of rx's ring
583  */
584 static void
585 nge_init_recv_ring(nge_t *ngep)
586 {
587 	recv_ring_t *rrp;
588 
589 	rrp = ngep->recv;
590 	rrp->desc.nslots = ngep->rx_desc;
591 	rrp->ngep = ngep;
592 }
593 
594 /*
595  * Intialize the rx recycle pointer and rx sending pointer of rx ring
596  */
597 static void
598 nge_reinit_recv_ring(nge_t *ngep)
599 {
600 	recv_ring_t *rrp;
601 
602 	rrp = ngep->recv;
603 
604 	/*
605 	 * Reinitialise control variables ...
606 	 */
607 	rrp->prod_index = 0;
608 	/*
609 	 * Zero and sync all the h/w Send Buffer Descriptors
610 	 */
611 	DMA_ZERO(rrp->desc);
612 	DMA_SYNC(rrp->desc, DDI_DMA_SYNC_FORDEV);
613 }
614 
615 /*
616  * Clean up initialisation done above before the memory is freed
617  */
618 static void
619 nge_fini_buff_ring(nge_t *ngep)
620 {
621 	uint32_t i;
622 	buff_ring_t *brp;
623 	dma_area_t *bufp;
624 	sw_rx_sbd_t *bsbdp;
625 
626 	brp = ngep->buff;
627 	bsbdp = brp->sw_rbds;
628 
629 	NGE_DEBUG(("nge_fini_buff_ring($%p)", (void *)ngep));
630 
631 	mutex_enter(brp->recycle_lock);
632 	brp->buf_sign++;
633 	mutex_exit(brp->recycle_lock);
634 	for (i = 0; i < ngep->rx_desc; i++, ++bsbdp) {
635 		if (bsbdp->bufp) {
636 			if (bsbdp->bufp->mp)
637 				freemsg(bsbdp->bufp->mp);
638 			nge_free_dma_mem(bsbdp->bufp);
639 			kmem_free(bsbdp->bufp, sizeof (dma_area_t));
640 			bsbdp->bufp = NULL;
641 		}
642 	}
643 	while (brp->free_list != NULL) {
644 		bufp = brp->free_list;
645 		brp->free_list = bufp->next;
646 		bufp->next = NULL;
647 		if (bufp->mp)
648 			freemsg(bufp->mp);
649 		nge_free_dma_mem(bufp);
650 		kmem_free(bufp, sizeof (dma_area_t));
651 	}
652 	while (brp->recycle_list != NULL) {
653 		bufp = brp->recycle_list;
654 		brp->recycle_list = bufp->next;
655 		bufp->next = NULL;
656 		if (bufp->mp)
657 			freemsg(bufp->mp);
658 		nge_free_dma_mem(bufp);
659 		kmem_free(bufp, sizeof (dma_area_t));
660 	}
661 
662 
663 	kmem_free(brp->sw_rbds, (ngep->rx_desc * sizeof (*bsbdp)));
664 	brp->sw_rbds = NULL;
665 }
666 
667 /*
668  * Intialize the Rx's data ring and free ring
669  */
670 static int
671 nge_init_buff_ring(nge_t *ngep)
672 {
673 	uint32_t err;
674 	uint32_t slot;
675 	uint32_t nslots_buff;
676 	uint32_t nslots_recv;
677 	buff_ring_t *brp;
678 	recv_ring_t *rrp;
679 	dma_area_t desc;
680 	dma_area_t *bufp;
681 	sw_rx_sbd_t *bsbdp;
682 
683 	rrp = ngep->recv;
684 	brp = ngep->buff;
685 	brp->nslots = ngep->rx_buf;
686 	brp->rx_bcopy = B_FALSE;
687 	nslots_recv = rrp->desc.nslots;
688 	nslots_buff = brp->nslots;
689 	brp->ngep = ngep;
690 
691 	NGE_TRACE(("nge_init_buff_ring($%p)", (void *)ngep));
692 
693 	/*
694 	 * Allocate the array of s/w Recv Buffer Descriptors
695 	 */
696 	bsbdp = kmem_zalloc(nslots_recv *sizeof (*bsbdp), KM_SLEEP);
697 	brp->sw_rbds = bsbdp;
698 	brp->free_list = NULL;
699 	brp->recycle_list = NULL;
700 	for (slot = 0; slot < nslots_buff; ++slot) {
701 		bufp = kmem_zalloc(sizeof (dma_area_t), KM_SLEEP);
702 		err = nge_alloc_dma_mem(ngep, (ngep->buf_size
703 		    + NGE_HEADROOM),
704 		    &nge_data_accattr, DDI_DMA_READ | NGE_DMA_MODE, bufp);
705 		if (err != DDI_SUCCESS) {
706 			kmem_free(bufp, sizeof (dma_area_t));
707 			return (DDI_FAILURE);
708 		}
709 
710 		bufp->alength -= NGE_HEADROOM;
711 		bufp->offset += NGE_HEADROOM;
712 		bufp->private = (caddr_t)ngep;
713 		bufp->rx_recycle.free_func = nge_recv_recycle;
714 		bufp->rx_recycle.free_arg = (caddr_t)bufp;
715 		bufp->signature = brp->buf_sign;
716 		bufp->rx_delivered = B_FALSE;
717 		bufp->mp = desballoc(DMA_VPTR(*bufp),
718 		    ngep->buf_size + NGE_HEADROOM,
719 		    0, &bufp->rx_recycle);
720 
721 		if (bufp->mp == NULL) {
722 			return (DDI_FAILURE);
723 		}
724 		bufp->next = brp->free_list;
725 		brp->free_list = bufp;
726 	}
727 
728 	/*
729 	 * Now initialise each array element once and for all
730 	 */
731 	desc = rrp->desc;
732 	for (slot = 0; slot < nslots_recv; ++slot, ++bsbdp) {
733 		nge_slice_chunk(&bsbdp->desc, &desc, 1,
734 		    ngep->desc_attr.rxd_size);
735 		bufp = brp->free_list;
736 		brp->free_list = bufp->next;
737 		bsbdp->bufp = bufp;
738 		bsbdp->flags = CONTROLER_OWN;
739 		bufp->next = NULL;
740 	}
741 
742 	ASSERT(desc.alength == 0);
743 	return (DDI_SUCCESS);
744 }
745 
746 /*
747  * Fill the host address of data in rx' descriptor
748  * and initialize free pointers of rx free ring
749  */
750 static int
751 nge_reinit_buff_ring(nge_t *ngep)
752 {
753 	uint32_t slot;
754 	uint32_t nslots_recv;
755 	buff_ring_t *brp;
756 	recv_ring_t *rrp;
757 	sw_rx_sbd_t *bsbdp;
758 	void *hw_bd_p;
759 
760 	brp = ngep->buff;
761 	rrp = ngep->recv;
762 	bsbdp = brp->sw_rbds;
763 	nslots_recv = rrp->desc.nslots;
764 	for (slot = 0; slot < nslots_recv; ++bsbdp, ++slot) {
765 		hw_bd_p = DMA_VPTR(bsbdp->desc);
766 	/*
767 	 * There is a scenario: When the traffic of small tcp
768 	 * packet is heavy, suspending the tcp traffic will
769 	 * cause the preallocated buffers for rx not to be
770 	 * released in time by tcp taffic and cause rx's buffer
771 	 * pointers not to be refilled in time.
772 	 *
773 	 * At this point, if we reinitialize the driver, the bufp
774 	 * pointer for rx's traffic will be NULL.
775 	 * So the result of the reinitializion fails.
776 	 */
777 		if (bsbdp->bufp == NULL)
778 			return (DDI_FAILURE);
779 
780 		ngep->desc_attr.rxd_fill(hw_bd_p, &bsbdp->bufp->cookie,
781 		    bsbdp->bufp->alength);
782 	}
783 	return (DDI_SUCCESS);
784 }
785 
786 static void
787 nge_init_ring_param_lock(nge_t *ngep)
788 {
789 	buff_ring_t *brp;
790 	send_ring_t *srp;
791 
792 	srp = ngep->send;
793 	brp = ngep->buff;
794 
795 	/* Init the locks for send ring */
796 	mutex_init(srp->tx_lock, NULL, MUTEX_DRIVER,
797 	    DDI_INTR_PRI(ngep->intr_pri));
798 	mutex_init(srp->tc_lock, NULL, MUTEX_DRIVER,
799 	    DDI_INTR_PRI(ngep->intr_pri));
800 	mutex_init(&srp->dmah_lock, NULL, MUTEX_DRIVER,
801 	    DDI_INTR_PRI(ngep->intr_pri));
802 
803 	/* Init parameters of buffer ring */
804 	brp->free_list = NULL;
805 	brp->recycle_list = NULL;
806 	brp->rx_hold = 0;
807 	brp->buf_sign = 0;
808 
809 	/* Init recycle list lock */
810 	mutex_init(brp->recycle_lock, NULL, MUTEX_DRIVER,
811 	    DDI_INTR_PRI(ngep->intr_pri));
812 }
813 
814 int
815 nge_init_rings(nge_t *ngep)
816 {
817 	uint32_t err;
818 
819 	err = nge_init_send_ring(ngep);
820 	if (err != DDI_SUCCESS) {
821 		return (err);
822 	}
823 	nge_init_recv_ring(ngep);
824 
825 	err = nge_init_buff_ring(ngep);
826 	if (err != DDI_SUCCESS) {
827 		nge_fini_send_ring(ngep);
828 		return (DDI_FAILURE);
829 	}
830 
831 	return (err);
832 }
833 
834 static int
835 nge_reinit_ring(nge_t *ngep)
836 {
837 	int err;
838 
839 	nge_reinit_recv_ring(ngep);
840 	nge_reinit_send_ring(ngep);
841 	err = nge_reinit_buff_ring(ngep);
842 	return (err);
843 }
844 
845 
846 void
847 nge_fini_rings(nge_t *ngep)
848 {
849 	/*
850 	 * For receive ring, nothing need to be finished.
851 	 * So only finish buffer ring and send ring here.
852 	 */
853 	nge_fini_buff_ring(ngep);
854 	nge_fini_send_ring(ngep);
855 }
856 
857 /*
858  * Loopback ioctl code
859  */
860 
861 static lb_property_t loopmodes[] = {
862 	{ normal,	"normal",	NGE_LOOP_NONE		},
863 	{ external,	"100Mbps",	NGE_LOOP_EXTERNAL_100	},
864 	{ external,	"10Mbps",	NGE_LOOP_EXTERNAL_10	},
865 	{ internal,	"PHY",		NGE_LOOP_INTERNAL_PHY	},
866 };
867 
868 enum ioc_reply
869 nge_loop_ioctl(nge_t *ngep, mblk_t *mp, struct iocblk *iocp)
870 {
871 	int cmd;
872 	uint32_t *lbmp;
873 	lb_info_sz_t *lbsp;
874 	lb_property_t *lbpp;
875 
876 	/*
877 	 * Validate format of ioctl
878 	 */
879 	if (mp->b_cont == NULL)
880 		return (IOC_INVAL);
881 
882 	cmd = iocp->ioc_cmd;
883 
884 	switch (cmd) {
885 	default:
886 		return (IOC_INVAL);
887 
888 	case LB_GET_INFO_SIZE:
889 		if (iocp->ioc_count != sizeof (lb_info_sz_t))
890 			return (IOC_INVAL);
891 		lbsp = (lb_info_sz_t *)mp->b_cont->b_rptr;
892 		*lbsp = sizeof (loopmodes);
893 		return (IOC_REPLY);
894 
895 	case LB_GET_INFO:
896 		if (iocp->ioc_count != sizeof (loopmodes))
897 			return (IOC_INVAL);
898 		lbpp = (lb_property_t *)mp->b_cont->b_rptr;
899 		bcopy(loopmodes, lbpp, sizeof (loopmodes));
900 		return (IOC_REPLY);
901 
902 	case LB_GET_MODE:
903 		if (iocp->ioc_count != sizeof (uint32_t))
904 			return (IOC_INVAL);
905 		lbmp = (uint32_t *)mp->b_cont->b_rptr;
906 		*lbmp = ngep->param_loop_mode;
907 		return (IOC_REPLY);
908 
909 	case LB_SET_MODE:
910 		if (iocp->ioc_count != sizeof (uint32_t))
911 			return (IOC_INVAL);
912 		lbmp = (uint32_t *)mp->b_cont->b_rptr;
913 		return (nge_set_loop_mode(ngep, *lbmp));
914 	}
915 }
916 
917 #undef	NGE_DBG
918 #define	NGE_DBG	NGE_DBG_NEMO
919 
920 
921 static void
922 nge_check_desc_prop(nge_t *ngep)
923 {
924 	if (ngep->desc_mode != DESC_HOT && ngep->desc_mode != DESC_OFFLOAD)
925 		ngep->desc_mode = DESC_HOT;
926 
927 	if (ngep->desc_mode == DESC_OFFLOAD)	{
928 
929 		ngep->desc_attr = nge_sum_desc;
930 
931 	}	else if (ngep->desc_mode == DESC_HOT)	{
932 
933 		ngep->desc_attr = nge_hot_desc;
934 	}
935 }
936 
937 /*
938  * nge_get_props -- get the parameters to tune the driver
939  */
940 static void
941 nge_get_props(nge_t *ngep)
942 {
943 	chip_info_t *infop;
944 	dev_info_t *devinfo;
945 	nge_dev_spec_param_t *dev_param_p;
946 
947 	devinfo = ngep->devinfo;
948 	infop = (chip_info_t *)&ngep->chipinfo;
949 	dev_param_p = &ngep->dev_spec_param;
950 
951 	infop->clsize = ddi_prop_get_int(DDI_DEV_T_ANY, devinfo,
952 	    DDI_PROP_DONTPASS, clsize_propname, 32);
953 
954 	infop->latency = ddi_prop_get_int(DDI_DEV_T_ANY, devinfo,
955 	    DDI_PROP_DONTPASS, latency_propname, 64);
956 	ngep->intr_moderation = ddi_prop_get_int(DDI_DEV_T_ANY, devinfo,
957 	    DDI_PROP_DONTPASS, intr_moderation, NGE_SET);
958 	ngep->rx_datahwm = ddi_prop_get_int(DDI_DEV_T_ANY, devinfo,
959 	    DDI_PROP_DONTPASS, rx_data_hw, 0x20);
960 	ngep->rx_prdlwm = ddi_prop_get_int(DDI_DEV_T_ANY, devinfo,
961 	    DDI_PROP_DONTPASS, rx_prd_lw, 0x4);
962 	ngep->rx_prdhwm = ddi_prop_get_int(DDI_DEV_T_ANY, devinfo,
963 	    DDI_PROP_DONTPASS, rx_prd_hw, 0xc);
964 
965 	ngep->sw_intr_intv = ddi_prop_get_int(DDI_DEV_T_ANY, devinfo,
966 	    DDI_PROP_DONTPASS, sw_intr_intv, SWTR_ITC);
967 	ngep->debug = ddi_prop_get_int(DDI_DEV_T_ANY, devinfo,
968 	    DDI_PROP_DONTPASS, debug_propname, NGE_DBG_CHIP);
969 	ngep->desc_mode = ddi_prop_get_int(DDI_DEV_T_ANY, devinfo,
970 	    DDI_PROP_DONTPASS, nge_desc_mode, dev_param_p->desc_type);
971 	ngep->lowmem_mode = ddi_prop_get_int(DDI_DEV_T_ANY, devinfo,
972 	    DDI_PROP_DONTPASS, low_memory_mode, 0);
973 
974 	if (dev_param_p->jumbo) {
975 		ngep->default_mtu = ddi_prop_get_int(DDI_DEV_T_ANY, devinfo,
976 		    DDI_PROP_DONTPASS, default_mtu, ETHERMTU);
977 	} else
978 		ngep->default_mtu = ETHERMTU;
979 
980 	if (ngep->default_mtu > ETHERMTU &&
981 	    ngep->default_mtu <= NGE_MTU_2500) {
982 		ngep->buf_size = NGE_JB2500_BUFSZ;
983 		ngep->tx_desc = NGE_SEND_JB2500_SLOTS_DESC;
984 		ngep->rx_desc = NGE_RECV_JB2500_SLOTS_DESC;
985 		ngep->rx_buf = NGE_RECV_JB2500_SLOTS_DESC * 2;
986 		ngep->nge_split = NGE_SPLIT_256;
987 	} else if (ngep->default_mtu > NGE_MTU_2500 &&
988 	    ngep->default_mtu <= NGE_MTU_4500) {
989 		ngep->buf_size = NGE_JB4500_BUFSZ;
990 		ngep->tx_desc = NGE_SEND_JB4500_SLOTS_DESC;
991 		ngep->rx_desc = NGE_RECV_JB4500_SLOTS_DESC;
992 		ngep->rx_buf = NGE_RECV_JB4500_SLOTS_DESC * 2;
993 		ngep->nge_split = NGE_SPLIT_256;
994 	} else if (ngep->default_mtu > NGE_MTU_4500 &&
995 	    ngep->default_mtu <= NGE_MAX_MTU) {
996 		ngep->buf_size = NGE_JB9000_BUFSZ;
997 		ngep->tx_desc = NGE_SEND_JB9000_SLOTS_DESC;
998 		ngep->rx_desc = NGE_RECV_JB9000_SLOTS_DESC;
999 		ngep->rx_buf = NGE_RECV_JB9000_SLOTS_DESC * 2;
1000 		ngep->nge_split = NGE_SPLIT_256;
1001 	} else if (ngep->default_mtu > NGE_MAX_MTU) {
1002 		ngep->default_mtu = NGE_MAX_MTU;
1003 		ngep->buf_size = NGE_JB9000_BUFSZ;
1004 		ngep->tx_desc = NGE_SEND_JB9000_SLOTS_DESC;
1005 		ngep->rx_desc = NGE_RECV_JB9000_SLOTS_DESC;
1006 		ngep->rx_buf = NGE_RECV_JB9000_SLOTS_DESC * 2;
1007 		ngep->nge_split = NGE_SPLIT_256;
1008 	} else if (ngep->lowmem_mode != 0) {
1009 		ngep->default_mtu = ETHERMTU;
1010 		ngep->buf_size = NGE_STD_BUFSZ;
1011 		ngep->tx_desc = NGE_SEND_LOWMEM_SLOTS_DESC;
1012 		ngep->rx_desc = NGE_RECV_LOWMEM_SLOTS_DESC;
1013 		ngep->rx_buf = NGE_RECV_LOWMEM_SLOTS_DESC * 2;
1014 		ngep->nge_split = NGE_SPLIT_32;
1015 	} else {
1016 		ngep->default_mtu = ETHERMTU;
1017 		ngep->buf_size = NGE_STD_BUFSZ;
1018 		ngep->tx_desc = dev_param_p->tx_desc_num;
1019 		ngep->rx_desc = dev_param_p->rx_desc_num;
1020 		ngep->rx_buf = dev_param_p->rx_desc_num * 2;
1021 		ngep->nge_split = dev_param_p->nge_split;
1022 	}
1023 
1024 	nge_check_desc_prop(ngep);
1025 }
1026 
1027 
1028 static int
1029 nge_reset(nge_t *ngep)
1030 {
1031 	int err;
1032 	nge_mul_addr1 maddr1;
1033 	nge_sw_statistics_t *sw_stp;
1034 	sw_stp = &ngep->statistics.sw_statistics;
1035 	send_ring_t *srp = ngep->send;
1036 
1037 	ASSERT(mutex_owned(ngep->genlock));
1038 	mutex_enter(srp->tc_lock);
1039 	mutex_enter(srp->tx_lock);
1040 
1041 	nge_tx_recycle_all(ngep);
1042 	err = nge_reinit_ring(ngep);
1043 	if (err == DDI_FAILURE) {
1044 		mutex_exit(srp->tx_lock);
1045 		mutex_exit(srp->tc_lock);
1046 		return (err);
1047 	}
1048 	err = nge_chip_reset(ngep);
1049 	/*
1050 	 * Clear the Multicast mac address table
1051 	 */
1052 	nge_reg_put32(ngep, NGE_MUL_ADDR0, 0);
1053 	maddr1.addr_val = nge_reg_get32(ngep, NGE_MUL_ADDR1);
1054 	maddr1.addr_bits.addr = 0;
1055 	nge_reg_put32(ngep, NGE_MUL_ADDR1, maddr1.addr_val);
1056 
1057 	mutex_exit(srp->tx_lock);
1058 	mutex_exit(srp->tc_lock);
1059 	if (err == DDI_FAILURE)
1060 		return (err);
1061 	ngep->watchdog = 0;
1062 	ngep->resched_needed = B_FALSE;
1063 	ngep->promisc = B_FALSE;
1064 	ngep->param_loop_mode = NGE_LOOP_NONE;
1065 	ngep->factotum_flag = 0;
1066 	ngep->resched_needed = 0;
1067 	ngep->nge_mac_state = NGE_MAC_RESET;
1068 	ngep->max_sdu = ngep->default_mtu + ETHER_HEAD_LEN + ETHERFCSL;
1069 	ngep->max_sdu += VTAG_SIZE;
1070 	ngep->rx_def = 0x16;
1071 
1072 	/* Clear the software statistics */
1073 	sw_stp->recv_count = 0;
1074 	sw_stp->xmit_count = 0;
1075 	sw_stp->rbytes = 0;
1076 	sw_stp->obytes = 0;
1077 
1078 	return (DDI_SUCCESS);
1079 }
1080 
1081 static void
1082 nge_m_stop(void *arg)
1083 {
1084 	nge_t *ngep = arg;		/* private device info	*/
1085 
1086 	NGE_TRACE(("nge_m_stop($%p)", arg));
1087 
1088 	/*
1089 	 * Just stop processing, then record new MAC state
1090 	 */
1091 	mutex_enter(ngep->genlock);
1092 	/* If suspended, the adapter is already stopped, just return. */
1093 	if (ngep->suspended) {
1094 		ASSERT(ngep->nge_mac_state == NGE_MAC_STOPPED);
1095 		mutex_exit(ngep->genlock);
1096 		return;
1097 	}
1098 	rw_enter(ngep->rwlock, RW_WRITER);
1099 
1100 	(void) nge_chip_stop(ngep, B_FALSE);
1101 	/* Try to wait all the buffer post to upper layer be released */
1102 	ngep->nge_mac_state = NGE_MAC_STOPPED;
1103 
1104 	/* Recycle all the TX BD */
1105 	nge_tx_recycle_all(ngep);
1106 	nge_fini_rings(ngep);
1107 	nge_free_bufs(ngep);
1108 
1109 	NGE_DEBUG(("nge_m_stop($%p) done", arg));
1110 
1111 	rw_exit(ngep->rwlock);
1112 	mutex_exit(ngep->genlock);
1113 }
1114 
1115 static int
1116 nge_m_start(void *arg)
1117 {
1118 	int err;
1119 	nge_t *ngep = arg;
1120 
1121 	NGE_TRACE(("nge_m_start($%p)", arg));
1122 
1123 	/*
1124 	 * Start processing and record new MAC state
1125 	 */
1126 	mutex_enter(ngep->genlock);
1127 	/*
1128 	 * If suspended, don't start, as the resume processing
1129 	 * will recall this function with the suspended flag off.
1130 	 */
1131 	if (ngep->suspended) {
1132 		mutex_exit(ngep->genlock);
1133 		return (DDI_FAILURE);
1134 	}
1135 	rw_enter(ngep->rwlock, RW_WRITER);
1136 	err = nge_alloc_bufs(ngep);
1137 	if (err != DDI_SUCCESS) {
1138 		nge_problem(ngep, "nge_m_start: DMA buffer allocation failed");
1139 		goto finish;
1140 	}
1141 	err = nge_init_rings(ngep);
1142 	if (err != DDI_SUCCESS) {
1143 		nge_free_bufs(ngep);
1144 		nge_problem(ngep, "nge_init_rings() failed,err=%x");
1145 		goto finish;
1146 	}
1147 	err = nge_restart(ngep);
1148 
1149 	NGE_DEBUG(("nge_m_start($%p) done", arg));
1150 	finish:
1151 		rw_exit(ngep->rwlock);
1152 		mutex_exit(ngep->genlock);
1153 
1154 		return (err);
1155 }
1156 
1157 static int
1158 nge_m_unicst(void *arg, const uint8_t *macaddr)
1159 {
1160 	nge_t *ngep = arg;
1161 
1162 	NGE_TRACE(("nge_m_unicst($%p)", arg));
1163 	/*
1164 	 * Remember the new current address in the driver state
1165 	 * Sync the chip's idea of the address too ...
1166 	 */
1167 	mutex_enter(ngep->genlock);
1168 
1169 	ethaddr_copy(macaddr, ngep->cur_uni_addr.addr);
1170 	ngep->cur_uni_addr.set = 1;
1171 
1172 	/*
1173 	 * If we are suspended, we want to quit now, and not update
1174 	 * the chip.  Doing so might put it in a bad state, but the
1175 	 * resume will get the unicast address installed.
1176 	 */
1177 	if (ngep->suspended) {
1178 		mutex_exit(ngep->genlock);
1179 		return (DDI_SUCCESS);
1180 	}
1181 	nge_chip_sync(ngep);
1182 
1183 	NGE_DEBUG(("nge_m_unicst($%p) done", arg));
1184 	mutex_exit(ngep->genlock);
1185 
1186 	return (0);
1187 }
1188 
1189 static int
1190 nge_m_promisc(void *arg, boolean_t on)
1191 {
1192 	nge_t *ngep = arg;
1193 
1194 	NGE_TRACE(("nge_m_promisc($%p)", arg));
1195 
1196 	/*
1197 	 * Store specified mode and pass to chip layer to update h/w
1198 	 */
1199 	mutex_enter(ngep->genlock);
1200 	/*
1201 	 * If suspended, there is no need to do anything, even
1202 	 * recording the promiscuious mode is not neccessary, as
1203 	 * it won't be properly set on resume.  Just return failing.
1204 	 */
1205 	if (ngep->suspended) {
1206 		mutex_exit(ngep->genlock);
1207 		return (DDI_FAILURE);
1208 	}
1209 	if (ngep->promisc == on) {
1210 		mutex_exit(ngep->genlock);
1211 		NGE_DEBUG(("nge_m_promisc($%p) done", arg));
1212 		return (0);
1213 	}
1214 	ngep->promisc = on;
1215 	nge_chip_sync(ngep);
1216 	NGE_DEBUG(("nge_m_promisc($%p) done", arg));
1217 	mutex_exit(ngep->genlock);
1218 
1219 	return (0);
1220 }
1221 
1222 static void nge_mulparam(nge_t *ngep)
1223 {
1224 	uint8_t number;
1225 	ether_addr_t pand;
1226 	ether_addr_t por;
1227 	mul_item *plist;
1228 
1229 	for (number = 0; number < ETHERADDRL; number++) {
1230 		pand[number] = 0x00;
1231 		por[number] = 0x00;
1232 	}
1233 	for (plist = ngep->pcur_mulist; plist != NULL; plist = plist->next) {
1234 		for (number = 0; number < ETHERADDRL; number++) {
1235 			pand[number] &= plist->mul_addr[number];
1236 			por[number] |= plist->mul_addr[number];
1237 		}
1238 	}
1239 	for (number = 0; number < ETHERADDRL; number++) {
1240 		ngep->cur_mul_addr.addr[number]
1241 		    = pand[number] & por[number];
1242 		ngep->cur_mul_mask.addr[number]
1243 		    = pand [number] | (~por[number]);
1244 	}
1245 }
1246 static int
1247 nge_m_multicst(void *arg, boolean_t add, const uint8_t *mca)
1248 {
1249 	boolean_t update;
1250 	boolean_t b_eq;
1251 	nge_t *ngep = arg;
1252 	mul_item *plist;
1253 	mul_item *plist_prev;
1254 	mul_item *pitem;
1255 
1256 	NGE_TRACE(("nge_m_multicst($%p, %s, %s)", arg,
1257 	    (add) ? "add" : "remove", ether_sprintf((void *)mca)));
1258 
1259 	update = B_FALSE;
1260 	plist = plist_prev = NULL;
1261 	mutex_enter(ngep->genlock);
1262 	if (add) {
1263 		if (ngep->pcur_mulist != NULL) {
1264 			for (plist = ngep->pcur_mulist; plist != NULL;
1265 			    plist = plist->next) {
1266 				b_eq = ether_eq(plist->mul_addr, mca);
1267 				if (b_eq) {
1268 					plist->ref_cnt++;
1269 					break;
1270 				}
1271 				plist_prev = plist;
1272 			}
1273 		}
1274 
1275 		if (plist == NULL) {
1276 			pitem = kmem_zalloc(sizeof (mul_item), KM_SLEEP);
1277 			ether_copy(mca, pitem->mul_addr);
1278 			pitem ->ref_cnt++;
1279 			pitem ->next = NULL;
1280 			if (plist_prev == NULL)
1281 				ngep->pcur_mulist = pitem;
1282 			else
1283 				plist_prev->next = pitem;
1284 			update = B_TRUE;
1285 		}
1286 	} else {
1287 		if (ngep->pcur_mulist != NULL) {
1288 			for (plist = ngep->pcur_mulist; plist != NULL;
1289 			    plist = plist->next) {
1290 				b_eq = ether_eq(plist->mul_addr, mca);
1291 				if (b_eq) {
1292 					update = B_TRUE;
1293 					break;
1294 				}
1295 				plist_prev = plist;
1296 			}
1297 
1298 			if (update) {
1299 				if ((plist_prev == NULL) &&
1300 				    (plist->next == NULL))
1301 					ngep->pcur_mulist = NULL;
1302 				else if ((plist_prev == NULL) &&
1303 				    (plist->next != NULL))
1304 					ngep->pcur_mulist = plist->next;
1305 				else
1306 					plist_prev->next = plist->next;
1307 				kmem_free(plist, sizeof (mul_item));
1308 			}
1309 		}
1310 	}
1311 
1312 	if (update && !ngep->suspended) {
1313 		nge_mulparam(ngep);
1314 		nge_chip_sync(ngep);
1315 	}
1316 	NGE_DEBUG(("nge_m_multicst($%p) done", arg));
1317 	mutex_exit(ngep->genlock);
1318 
1319 	return (0);
1320 }
1321 
1322 static void
1323 nge_m_ioctl(void *arg, queue_t *wq, mblk_t *mp)
1324 {
1325 	int err;
1326 	int cmd;
1327 	nge_t *ngep = arg;
1328 	struct iocblk *iocp;
1329 	enum ioc_reply status;
1330 	boolean_t need_privilege;
1331 
1332 	/*
1333 	 * If suspended, we might actually be able to do some of
1334 	 * these ioctls, but it is harder to make sure they occur
1335 	 * without actually putting the hardware in an undesireable
1336 	 * state.  So just NAK it.
1337 	 */
1338 	mutex_enter(ngep->genlock);
1339 	if (ngep->suspended) {
1340 		miocnak(wq, mp, 0, EINVAL);
1341 		mutex_exit(ngep->genlock);
1342 		return;
1343 	}
1344 	mutex_exit(ngep->genlock);
1345 
1346 	/*
1347 	 * Validate the command before bothering with the mutex ...
1348 	 */
1349 	iocp = (struct iocblk *)mp->b_rptr;
1350 	iocp->ioc_error = 0;
1351 	need_privilege = B_TRUE;
1352 	cmd = iocp->ioc_cmd;
1353 
1354 	NGE_DEBUG(("nge_m_ioctl:  cmd 0x%x", cmd));
1355 	switch (cmd) {
1356 	default:
1357 		NGE_LDB(NGE_DBG_BADIOC,
1358 		    ("nge_m_ioctl: unknown cmd 0x%x", cmd));
1359 
1360 		miocnak(wq, mp, 0, EINVAL);
1361 		return;
1362 
1363 	case NGE_MII_READ:
1364 	case NGE_MII_WRITE:
1365 	case NGE_SEE_READ:
1366 	case NGE_SEE_WRITE:
1367 	case NGE_DIAG:
1368 	case NGE_PEEK:
1369 	case NGE_POKE:
1370 	case NGE_PHY_RESET:
1371 	case NGE_SOFT_RESET:
1372 	case NGE_HARD_RESET:
1373 		break;
1374 
1375 	case LB_GET_INFO_SIZE:
1376 	case LB_GET_INFO:
1377 	case LB_GET_MODE:
1378 		need_privilege = B_FALSE;
1379 		break;
1380 	case LB_SET_MODE:
1381 		break;
1382 
1383 	case ND_GET:
1384 		need_privilege = B_FALSE;
1385 		break;
1386 	case ND_SET:
1387 		break;
1388 	}
1389 
1390 	if (need_privilege) {
1391 		/*
1392 		 * Check for specific net_config privilege.
1393 		 */
1394 		err = secpolicy_net_config(iocp->ioc_cr, B_FALSE);
1395 		if (err != 0) {
1396 			NGE_DEBUG(("nge_m_ioctl: rejected cmd 0x%x, err %d",
1397 			    cmd, err));
1398 			miocnak(wq, mp, 0, err);
1399 			return;
1400 		}
1401 	}
1402 
1403 	mutex_enter(ngep->genlock);
1404 
1405 	switch (cmd) {
1406 	default:
1407 		_NOTE(NOTREACHED)
1408 		status = IOC_INVAL;
1409 	break;
1410 
1411 	case NGE_MII_READ:
1412 	case NGE_MII_WRITE:
1413 	case NGE_SEE_READ:
1414 	case NGE_SEE_WRITE:
1415 	case NGE_DIAG:
1416 	case NGE_PEEK:
1417 	case NGE_POKE:
1418 	case NGE_PHY_RESET:
1419 	case NGE_SOFT_RESET:
1420 	case NGE_HARD_RESET:
1421 		status = nge_chip_ioctl(ngep, mp, iocp);
1422 	break;
1423 
1424 	case LB_GET_INFO_SIZE:
1425 	case LB_GET_INFO:
1426 	case LB_GET_MODE:
1427 	case LB_SET_MODE:
1428 		status = nge_loop_ioctl(ngep, mp, iocp);
1429 	break;
1430 
1431 	case ND_GET:
1432 	case ND_SET:
1433 		status = nge_nd_ioctl(ngep, wq, mp, iocp);
1434 	break;
1435 
1436 	}
1437 
1438 	/*
1439 	 * Do we need to reprogram the PHY and/or the MAC?
1440 	 * Do it now, while we still have the mutex.
1441 	 *
1442 	 * Note: update the PHY first, 'cos it controls the
1443 	 * speed/duplex parameters that the MAC code uses.
1444 	 */
1445 
1446 	NGE_DEBUG(("nge_m_ioctl: cmd 0x%x status %d", cmd, status));
1447 
1448 	switch (status) {
1449 	case IOC_RESTART_REPLY:
1450 	case IOC_RESTART_ACK:
1451 		(*ngep->physops->phys_update)(ngep);
1452 		nge_chip_sync(ngep);
1453 		break;
1454 
1455 	default:
1456 	break;
1457 	}
1458 
1459 	mutex_exit(ngep->genlock);
1460 
1461 	/*
1462 	 * Finally, decide how to reply
1463 	 */
1464 	switch (status) {
1465 
1466 	default:
1467 	case IOC_INVAL:
1468 		miocnak(wq, mp, 0, iocp->ioc_error == 0 ?
1469 		    EINVAL : iocp->ioc_error);
1470 		break;
1471 
1472 	case IOC_DONE:
1473 		break;
1474 
1475 	case IOC_RESTART_ACK:
1476 	case IOC_ACK:
1477 		miocack(wq, mp, 0, 0);
1478 		break;
1479 
1480 	case IOC_RESTART_REPLY:
1481 	case IOC_REPLY:
1482 		mp->b_datap->db_type = iocp->ioc_error == 0 ?
1483 		    M_IOCACK : M_IOCNAK;
1484 		qreply(wq, mp);
1485 		break;
1486 	}
1487 }
1488 
1489 /* ARGSUSED */
1490 static boolean_t
1491 nge_m_getcapab(void *arg, mac_capab_t cap, void *cap_data)
1492 {
1493 	nge_t	*ngep = arg;
1494 	nge_dev_spec_param_t *dev_param_p;
1495 
1496 	dev_param_p = &ngep->dev_spec_param;
1497 
1498 	switch (cap) {
1499 	case MAC_CAPAB_HCKSUM: {
1500 		uint32_t *hcksum_txflags = cap_data;
1501 
1502 		if (dev_param_p->tx_hw_checksum) {
1503 			*hcksum_txflags = dev_param_p->tx_hw_checksum;
1504 		} else
1505 			return (B_FALSE);
1506 		break;
1507 	}
1508 	case MAC_CAPAB_POLL:
1509 		/*
1510 		 * There's nothing for us to fill in, simply returning
1511 		 * B_TRUE, stating that we support polling is sufficient.
1512 		 */
1513 		break;
1514 	default:
1515 		return (B_FALSE);
1516 	}
1517 	return (B_TRUE);
1518 }
1519 
1520 #undef	NGE_DBG
1521 #define	NGE_DBG	NGE_DBG_INIT	/* debug flag for this code	*/
1522 int
1523 nge_restart(nge_t *ngep)
1524 {
1525 	int err = 0;
1526 	err = nge_reset(ngep);
1527 	if (!err)
1528 		err = nge_chip_start(ngep);
1529 
1530 	if (err) {
1531 		ngep->nge_mac_state = NGE_MAC_STOPPED;
1532 		return (DDI_FAILURE);
1533 	} else {
1534 		ngep->nge_mac_state = NGE_MAC_STARTED;
1535 		return (DDI_SUCCESS);
1536 	}
1537 }
1538 
1539 void
1540 nge_wake_factotum(nge_t *ngep)
1541 {
1542 	mutex_enter(ngep->softlock);
1543 	if (ngep->factotum_flag == 0) {
1544 		ngep->factotum_flag = 1;
1545 		(void) ddi_intr_trigger_softint(ngep->factotum_hdl, NULL);
1546 	}
1547 	mutex_exit(ngep->softlock);
1548 }
1549 
1550 /*
1551  * High-level cyclic handler
1552  *
1553  * This routine schedules a (low-level) softint callback to the
1554  * factotum.
1555  */
1556 
1557 static void
1558 nge_chip_cyclic(void *arg)
1559 {
1560 	nge_t *ngep;
1561 
1562 	ngep = (nge_t *)arg;
1563 
1564 	switch (ngep->nge_chip_state) {
1565 	default:
1566 		return;
1567 
1568 	case NGE_CHIP_RUNNING:
1569 		break;
1570 
1571 	case NGE_CHIP_FAULT:
1572 	case NGE_CHIP_ERROR:
1573 		break;
1574 	}
1575 
1576 	nge_wake_factotum(ngep);
1577 }
1578 
1579 static void
1580 nge_unattach(nge_t *ngep)
1581 {
1582 	send_ring_t *srp;
1583 	buff_ring_t *brp;
1584 
1585 	srp = ngep->send;
1586 	brp = ngep->buff;
1587 	NGE_TRACE(("nge_unattach($%p)", (void *)ngep));
1588 
1589 	/*
1590 	 * Flag that no more activity may be initiated
1591 	 */
1592 	ngep->progress &= ~PROGRESS_READY;
1593 	ngep->nge_mac_state = NGE_MAC_UNATTACH;
1594 
1595 	/*
1596 	 * Quiesce the PHY and MAC (leave it reset but still powered).
1597 	 * Clean up and free all NGE data structures
1598 	 */
1599 	if (ngep->periodic_id != NULL) {
1600 		ddi_periodic_delete(ngep->periodic_id);
1601 		ngep->periodic_id = NULL;
1602 	}
1603 
1604 	if (ngep->progress & PROGRESS_KSTATS)
1605 		nge_fini_kstats(ngep);
1606 
1607 	if (ngep->progress & PROGRESS_NDD)
1608 		nge_nd_cleanup(ngep);
1609 
1610 	if (ngep->progress & PROGRESS_HWINT) {
1611 		mutex_enter(ngep->genlock);
1612 		nge_restore_mac_addr(ngep);
1613 		(void) nge_chip_stop(ngep, B_FALSE);
1614 		mutex_exit(ngep->genlock);
1615 	}
1616 
1617 	if (ngep->progress & PROGRESS_SWINT)
1618 		nge_rem_intrs(ngep);
1619 
1620 	if (ngep->progress & PROGRESS_FACTOTUM)
1621 		(void) ddi_intr_remove_softint(ngep->factotum_hdl);
1622 
1623 	if (ngep->progress & PROGRESS_RESCHED)
1624 		(void) ddi_intr_remove_softint(ngep->resched_hdl);
1625 
1626 	if (ngep->progress & PROGRESS_INTR) {
1627 		mutex_destroy(srp->tx_lock);
1628 		mutex_destroy(srp->tc_lock);
1629 		mutex_destroy(&srp->dmah_lock);
1630 		mutex_destroy(brp->recycle_lock);
1631 
1632 		mutex_destroy(ngep->genlock);
1633 		mutex_destroy(ngep->softlock);
1634 		rw_destroy(ngep->rwlock);
1635 	}
1636 
1637 	if (ngep->progress & PROGRESS_REGS)
1638 		ddi_regs_map_free(&ngep->io_handle);
1639 
1640 	if (ngep->progress & PROGRESS_CFG)
1641 		pci_config_teardown(&ngep->cfg_handle);
1642 
1643 	ddi_remove_minor_node(ngep->devinfo, NULL);
1644 
1645 	kmem_free(ngep, sizeof (*ngep));
1646 }
1647 
1648 static int
1649 nge_resume(dev_info_t *devinfo)
1650 {
1651 	nge_t		*ngep;
1652 	chip_info_t	*infop;
1653 	int 		err;
1654 
1655 	ASSERT(devinfo != NULL);
1656 
1657 	ngep = ddi_get_driver_private(devinfo);
1658 	err = 0;
1659 
1660 	/*
1661 	 * If there are state inconsistancies, this is bad.  Returning
1662 	 * DDI_FAILURE here will eventually cause the machine to panic,
1663 	 * so it is best done here so that there is a possibility of
1664 	 * debugging the problem.
1665 	 */
1666 	if (ngep == NULL)
1667 		cmn_err(CE_PANIC,
1668 		    "nge: ngep returned from ddi_get_driver_private was NULL");
1669 	infop = (chip_info_t *)&ngep->chipinfo;
1670 
1671 	if (ngep->devinfo != devinfo)
1672 		cmn_err(CE_PANIC,
1673 		    "nge: passed devinfo not the same as saved devinfo");
1674 
1675 	mutex_enter(ngep->genlock);
1676 	rw_enter(ngep->rwlock, RW_WRITER);
1677 
1678 	/*
1679 	 * Fetch the config space.  Even though we have most of it cached,
1680 	 * some values *might* change across a suspend/resume.
1681 	 */
1682 	nge_chip_cfg_init(ngep, infop, B_FALSE);
1683 
1684 	/*
1685 	 * Only in one case, this conditional branch can be executed: the port
1686 	 * hasn't been plumbed.
1687 	 */
1688 	if (ngep->suspended == B_FALSE) {
1689 		rw_exit(ngep->rwlock);
1690 		mutex_exit(ngep->genlock);
1691 		return (DDI_SUCCESS);
1692 	}
1693 
1694 	nge_tx_recycle_all(ngep);
1695 	err = nge_reinit_ring(ngep);
1696 	if (!err) {
1697 		err = nge_chip_reset(ngep);
1698 		if (!err)
1699 			err = nge_chip_start(ngep);
1700 	}
1701 
1702 	if (err) {
1703 		/*
1704 		 * We note the failure, but return success, as the
1705 		 * system is still usable without this controller.
1706 		 */
1707 		cmn_err(CE_WARN, "nge: resume: failed to restart controller");
1708 	} else {
1709 		ngep->nge_mac_state = NGE_MAC_STARTED;
1710 	}
1711 	ngep->suspended = B_FALSE;
1712 
1713 	rw_exit(ngep->rwlock);
1714 	mutex_exit(ngep->genlock);
1715 
1716 	return (DDI_SUCCESS);
1717 }
1718 
1719 /*
1720  * attach(9E) -- Attach a device to the system
1721  *
1722  * Called once for each board successfully probed.
1723  */
1724 static int
1725 nge_attach(dev_info_t *devinfo, ddi_attach_cmd_t cmd)
1726 {
1727 	int		err;
1728 	int		i;
1729 	int		instance;
1730 	caddr_t		regs;
1731 	nge_t		*ngep;
1732 	chip_info_t	*infop;
1733 	mac_register_t	*macp;
1734 
1735 	switch (cmd) {
1736 	default:
1737 		return (DDI_FAILURE);
1738 
1739 	case DDI_RESUME:
1740 		return (nge_resume(devinfo));
1741 
1742 	case DDI_ATTACH:
1743 		break;
1744 	}
1745 
1746 	ngep = kmem_zalloc(sizeof (*ngep), KM_SLEEP);
1747 	instance = ddi_get_instance(devinfo);
1748 	ddi_set_driver_private(devinfo, ngep);
1749 	ngep->devinfo = devinfo;
1750 
1751 	(void) snprintf(ngep->ifname, sizeof (ngep->ifname), "%s%d",
1752 	    NGE_DRIVER_NAME, instance);
1753 	err = pci_config_setup(devinfo, &ngep->cfg_handle);
1754 	if (err != DDI_SUCCESS) {
1755 		nge_problem(ngep, "nge_attach: pci_config_setup() failed");
1756 		goto attach_fail;
1757 	}
1758 	infop = (chip_info_t *)&ngep->chipinfo;
1759 	nge_chip_cfg_init(ngep, infop, B_FALSE);
1760 	nge_init_dev_spec_param(ngep);
1761 	nge_get_props(ngep);
1762 	ngep->progress |= PROGRESS_CFG;
1763 
1764 	err = ddi_regs_map_setup(devinfo, NGE_PCI_OPREGS_RNUMBER,
1765 	    &regs, 0, 0, &nge_reg_accattr, &ngep->io_handle);
1766 	if (err != DDI_SUCCESS) {
1767 		nge_problem(ngep, "nge_attach: ddi_regs_map_setup() failed");
1768 		goto attach_fail;
1769 	}
1770 	ngep->io_regs = regs;
1771 	ngep->progress |= PROGRESS_REGS;
1772 
1773 	err = nge_register_intrs_and_init_locks(ngep);
1774 	if (err != DDI_SUCCESS) {
1775 		nge_problem(ngep, "nge_attach:"
1776 		    " register intrs and init locks failed");
1777 		goto attach_fail;
1778 	}
1779 	nge_init_ring_param_lock(ngep);
1780 	ngep->progress |= PROGRESS_INTR;
1781 
1782 	mutex_enter(ngep->genlock);
1783 
1784 	/*
1785 	 * Initialise link state variables
1786 	 * Stop, reset & reinitialise the chip.
1787 	 * Initialise the (internal) PHY.
1788 	 */
1789 	nge_phys_init(ngep);
1790 	err = nge_chip_reset(ngep);
1791 	if (err != DDI_SUCCESS) {
1792 		nge_problem(ngep, "nge_attach: nge_chip_reset() failed");
1793 		mutex_exit(ngep->genlock);
1794 		goto attach_fail;
1795 	}
1796 	nge_chip_sync(ngep);
1797 
1798 	/*
1799 	 * Now that mutex locks are initialized, enable interrupts.
1800 	 */
1801 	if (ngep->intr_cap & DDI_INTR_FLAG_BLOCK) {
1802 		/* Call ddi_intr_block_enable() for MSI interrupts */
1803 		(void) ddi_intr_block_enable(ngep->htable,
1804 		    ngep->intr_actual_cnt);
1805 	} else {
1806 		/* Call ddi_intr_enable for MSI or FIXED interrupts */
1807 		for (i = 0; i < ngep->intr_actual_cnt; i++) {
1808 			(void) ddi_intr_enable(ngep->htable[i]);
1809 		}
1810 	}
1811 
1812 	ngep->link_state = LINK_STATE_UNKNOWN;
1813 	ngep->progress |= PROGRESS_HWINT;
1814 
1815 	/*
1816 	 * Register NDD-tweakable parameters
1817 	 */
1818 	if (nge_nd_init(ngep)) {
1819 		nge_problem(ngep, "nge_attach: nge_nd_init() failed");
1820 		mutex_exit(ngep->genlock);
1821 		goto attach_fail;
1822 	}
1823 	ngep->progress |= PROGRESS_NDD;
1824 
1825 	/*
1826 	 * Create & initialise named kstats
1827 	 */
1828 	nge_init_kstats(ngep, instance);
1829 	ngep->progress |= PROGRESS_KSTATS;
1830 
1831 	mutex_exit(ngep->genlock);
1832 
1833 	if ((macp = mac_alloc(MAC_VERSION)) == NULL)
1834 		goto attach_fail;
1835 	macp->m_type_ident = MAC_PLUGIN_IDENT_ETHER;
1836 	macp->m_driver = ngep;
1837 	macp->m_dip = devinfo;
1838 	macp->m_src_addr = infop->vendor_addr.addr;
1839 	macp->m_callbacks = &nge_m_callbacks;
1840 	macp->m_min_sdu = 0;
1841 	macp->m_max_sdu = ngep->default_mtu;
1842 	macp->m_margin = VTAG_SIZE;
1843 	/*
1844 	 * Finally, we're ready to register ourselves with the mac
1845 	 * interface; if this succeeds, we're all ready to start()
1846 	 */
1847 	err = mac_register(macp, &ngep->mh);
1848 	mac_free(macp);
1849 	if (err != 0)
1850 		goto attach_fail;
1851 
1852 	/*
1853 	 * Register a periodical handler.
1854 	 * nge_chip_cyclic() is invoked in kernel context.
1855 	 */
1856 	ngep->periodic_id = ddi_periodic_add(nge_chip_cyclic, ngep,
1857 	    NGE_CYCLIC_PERIOD, DDI_IPL_0);
1858 
1859 	ngep->progress |= PROGRESS_READY;
1860 	return (DDI_SUCCESS);
1861 
1862 attach_fail:
1863 	nge_unattach(ngep);
1864 	return (DDI_FAILURE);
1865 }
1866 
1867 static int
1868 nge_suspend(nge_t *ngep)
1869 {
1870 	mutex_enter(ngep->genlock);
1871 	rw_enter(ngep->rwlock, RW_WRITER);
1872 
1873 	/* if the port hasn't been plumbed, just return */
1874 	if (ngep->nge_mac_state != NGE_MAC_STARTED) {
1875 		rw_exit(ngep->rwlock);
1876 		mutex_exit(ngep->genlock);
1877 		return (DDI_SUCCESS);
1878 	}
1879 	ngep->suspended = B_TRUE;
1880 	(void) nge_chip_stop(ngep, B_FALSE);
1881 	ngep->nge_mac_state = NGE_MAC_STOPPED;
1882 
1883 	rw_exit(ngep->rwlock);
1884 	mutex_exit(ngep->genlock);
1885 	return (DDI_SUCCESS);
1886 }
1887 
1888 /*
1889  * detach(9E) -- Detach a device from the system
1890  */
1891 static int
1892 nge_detach(dev_info_t *devinfo, ddi_detach_cmd_t cmd)
1893 {
1894 	int i;
1895 	nge_t *ngep;
1896 	mul_item *p, *nextp;
1897 	buff_ring_t *brp;
1898 
1899 	NGE_GTRACE(("nge_detach($%p, %d)", (void *)devinfo, cmd));
1900 
1901 	ngep = ddi_get_driver_private(devinfo);
1902 	brp = ngep->buff;
1903 
1904 	switch (cmd) {
1905 	default:
1906 		return (DDI_FAILURE);
1907 
1908 	case DDI_SUSPEND:
1909 		/*
1910 		 * Stop the NIC
1911 		 * Note: This driver doesn't currently support WOL, but
1912 		 *	should it in the future, it is important to
1913 		 *	make sure the PHY remains powered so that the
1914 		 *	wakeup packet can actually be recieved.
1915 		 */
1916 		return (nge_suspend(ngep));
1917 
1918 	case DDI_DETACH:
1919 		break;
1920 	}
1921 
1922 	/* Try to wait all the buffer post to upper layer be released */
1923 	for (i = 0; i < 1000; i++) {
1924 		if (brp->rx_hold == 0)
1925 			break;
1926 		drv_usecwait(1000);
1927 	}
1928 
1929 	/* If there is any posted buffer, reject to detach */
1930 	if (brp->rx_hold != 0)
1931 		return (DDI_FAILURE);
1932 
1933 	/* Recycle the multicast table */
1934 	for (p = ngep->pcur_mulist; p != NULL; p = nextp) {
1935 		nextp = p->next;
1936 		kmem_free(p, sizeof (mul_item));
1937 	}
1938 	ngep->pcur_mulist = NULL;
1939 
1940 	/*
1941 	 * Unregister from the GLD subsystem.  This can fail, in
1942 	 * particular if there are DLPI style-2 streams still open -
1943 	 * in which case we just return failure without shutting
1944 	 * down chip operations.
1945 	 */
1946 	if (mac_unregister(ngep->mh) != DDI_SUCCESS)
1947 		return (DDI_FAILURE);
1948 
1949 	/*
1950 	 * All activity stopped, so we can clean up & exit
1951 	 */
1952 	nge_unattach(ngep);
1953 	return (DDI_SUCCESS);
1954 }
1955 
1956 
1957 /*
1958  * ========== Module Loading Data & Entry Points ==========
1959  */
1960 
1961 DDI_DEFINE_STREAM_OPS(nge_dev_ops, nulldev, nulldev, nge_attach, nge_detach,
1962     nodev, NULL, D_MP, NULL);
1963 
1964 
1965 static struct modldrv nge_modldrv = {
1966 	&mod_driverops,		/* Type of module.  This one is a driver */
1967 	nge_ident,		/* short description */
1968 	&nge_dev_ops		/* driver specific ops */
1969 };
1970 
1971 static struct modlinkage modlinkage = {
1972 	MODREV_1, (void *)&nge_modldrv, NULL
1973 };
1974 
1975 
1976 int
1977 _info(struct modinfo *modinfop)
1978 {
1979 	return (mod_info(&modlinkage, modinfop));
1980 }
1981 
1982 int
1983 _init(void)
1984 {
1985 	int status;
1986 
1987 	mac_init_ops(&nge_dev_ops, "nge");
1988 	status = mod_install(&modlinkage);
1989 	if (status != DDI_SUCCESS)
1990 		mac_fini_ops(&nge_dev_ops);
1991 	else
1992 		mutex_init(nge_log_mutex, NULL, MUTEX_DRIVER, NULL);
1993 
1994 	return (status);
1995 }
1996 
1997 int
1998 _fini(void)
1999 {
2000 	int status;
2001 
2002 	status = mod_remove(&modlinkage);
2003 	if (status == DDI_SUCCESS) {
2004 		mac_fini_ops(&nge_dev_ops);
2005 		mutex_destroy(nge_log_mutex);
2006 	}
2007 
2008 	return (status);
2009 }
2010 
2011 /*
2012  * ============ Init MSI/Fixed/SoftInterrupt routines ==============
2013  */
2014 
2015 /*
2016  * Register interrupts and initialize each mutex and condition variables
2017  */
2018 
2019 static int
2020 nge_register_intrs_and_init_locks(nge_t *ngep)
2021 {
2022 	int		err;
2023 	int		intr_types;
2024 	uint_t		soft_prip;
2025 	nge_msi_mask	msi_mask;
2026 	nge_msi_map0_vec map0_vec;
2027 	nge_msi_map1_vec map1_vec;
2028 
2029 	/*
2030 	 * Add the softint handlers:
2031 	 *
2032 	 * Both of these handlers are used to avoid restrictions on the
2033 	 * context and/or mutexes required for some operations.  In
2034 	 * particular, the hardware interrupt handler and its subfunctions
2035 	 * can detect a number of conditions that we don't want to handle
2036 	 * in that context or with that set of mutexes held.  So, these
2037 	 * softints are triggered instead:
2038 	 *
2039 	 * the <resched> softint is triggered if if we have previously
2040 	 * had to refuse to send a packet because of resource shortage
2041 	 * (we've run out of transmit buffers), but the send completion
2042 	 * interrupt handler has now detected that more buffers have
2043 	 * become available.  Its only purpose is to call gld_sched()
2044 	 * to retry the pending transmits (we're not allowed to hold
2045 	 * driver-defined mutexes across gld_sched()).
2046 	 *
2047 	 * the <factotum> is triggered if the h/w interrupt handler
2048 	 * sees the <link state changed> or <error> bits in the status
2049 	 * block.  It's also triggered periodically to poll the link
2050 	 * state, just in case we aren't getting link status change
2051 	 * interrupts ...
2052 	 */
2053 	err = ddi_intr_add_softint(ngep->devinfo, &ngep->resched_hdl,
2054 	    DDI_INTR_SOFTPRI_MIN, nge_reschedule, (caddr_t)ngep);
2055 	if (err != DDI_SUCCESS) {
2056 		nge_problem(ngep,
2057 		    "nge_attach: add nge_reschedule softintr failed");
2058 
2059 		return (DDI_FAILURE);
2060 	}
2061 	ngep->progress |= PROGRESS_RESCHED;
2062 	err = ddi_intr_add_softint(ngep->devinfo, &ngep->factotum_hdl,
2063 	    DDI_INTR_SOFTPRI_MIN, nge_chip_factotum, (caddr_t)ngep);
2064 	if (err != DDI_SUCCESS) {
2065 		nge_problem(ngep,
2066 		    "nge_attach: add nge_chip_factotum softintr failed!");
2067 
2068 		return (DDI_FAILURE);
2069 	}
2070 	if (ddi_intr_get_softint_pri(ngep->factotum_hdl, &soft_prip)
2071 	    != DDI_SUCCESS) {
2072 		nge_problem(ngep, "nge_attach: get softintr priority failed\n");
2073 
2074 		return (DDI_FAILURE);
2075 	}
2076 	ngep->soft_pri = soft_prip;
2077 
2078 	ngep->progress |= PROGRESS_FACTOTUM;
2079 	/* Get supported interrupt types */
2080 	if (ddi_intr_get_supported_types(ngep->devinfo, &intr_types)
2081 	    != DDI_SUCCESS) {
2082 		nge_error(ngep, "ddi_intr_get_supported_types failed\n");
2083 
2084 		return (DDI_FAILURE);
2085 	}
2086 
2087 	NGE_DEBUG(("ddi_intr_get_supported_types() returned: %x",
2088 	    intr_types));
2089 
2090 	if ((intr_types & DDI_INTR_TYPE_MSI) && nge_enable_msi) {
2091 
2092 		/* MSI Configurations for mcp55 chipset */
2093 		if (ngep->chipinfo.device == DEVICE_ID_MCP55_373 ||
2094 		    ngep->chipinfo.device == DEVICE_ID_MCP55_372) {
2095 
2096 
2097 			/* Enable the 8 vectors */
2098 			msi_mask.msi_mask_val =
2099 			    nge_reg_get32(ngep, NGE_MSI_MASK);
2100 			msi_mask.msi_msk_bits.vec0 = NGE_SET;
2101 			msi_mask.msi_msk_bits.vec1 = NGE_SET;
2102 			msi_mask.msi_msk_bits.vec2 = NGE_SET;
2103 			msi_mask.msi_msk_bits.vec3 = NGE_SET;
2104 			msi_mask.msi_msk_bits.vec4 = NGE_SET;
2105 			msi_mask.msi_msk_bits.vec5 = NGE_SET;
2106 			msi_mask.msi_msk_bits.vec6 = NGE_SET;
2107 			msi_mask.msi_msk_bits.vec7 = NGE_SET;
2108 			nge_reg_put32(ngep, NGE_MSI_MASK,
2109 			    msi_mask.msi_mask_val);
2110 
2111 			/*
2112 			 * Remapping the MSI MAP0 and MAP1. MCP55
2113 			 * is default mapping all the interrupt to 0 vector.
2114 			 * Software needs to remapping this.
2115 			 * This mapping is same as CK804.
2116 			 */
2117 			map0_vec.msi_map0_val =
2118 			    nge_reg_get32(ngep, NGE_MSI_MAP0);
2119 			map1_vec.msi_map1_val =
2120 			    nge_reg_get32(ngep, NGE_MSI_MAP1);
2121 			map0_vec.vecs_bits.reint_vec = 0;
2122 			map0_vec.vecs_bits.rcint_vec = 0;
2123 			map0_vec.vecs_bits.miss_vec = 3;
2124 			map0_vec.vecs_bits.teint_vec = 5;
2125 			map0_vec.vecs_bits.tcint_vec = 5;
2126 			map0_vec.vecs_bits.stint_vec = 2;
2127 			map0_vec.vecs_bits.mint_vec = 6;
2128 			map0_vec.vecs_bits.rfint_vec = 0;
2129 			map1_vec.vecs_bits.tfint_vec = 5;
2130 			map1_vec.vecs_bits.feint_vec = 6;
2131 			map1_vec.vecs_bits.resv8_11 = 3;
2132 			map1_vec.vecs_bits.resv12_15 = 1;
2133 			map1_vec.vecs_bits.resv16_19 = 0;
2134 			map1_vec.vecs_bits.resv20_23 = 7;
2135 			map1_vec.vecs_bits.resv24_31 = 0xff;
2136 			nge_reg_put32(ngep, NGE_MSI_MAP0,
2137 			    map0_vec.msi_map0_val);
2138 			nge_reg_put32(ngep, NGE_MSI_MAP1,
2139 			    map1_vec.msi_map1_val);
2140 		}
2141 		if (nge_add_intrs(ngep, DDI_INTR_TYPE_MSI) != DDI_SUCCESS) {
2142 			NGE_DEBUG(("MSI registration failed, "
2143 			    "trying FIXED interrupt type\n"));
2144 		} else {
2145 			nge_log(ngep, "Using MSI interrupt type\n");
2146 
2147 			ngep->intr_type = DDI_INTR_TYPE_MSI;
2148 			ngep->progress |= PROGRESS_SWINT;
2149 		}
2150 	}
2151 
2152 	if (!(ngep->progress & PROGRESS_SWINT) &&
2153 	    (intr_types & DDI_INTR_TYPE_FIXED)) {
2154 		if (nge_add_intrs(ngep, DDI_INTR_TYPE_FIXED) != DDI_SUCCESS) {
2155 			nge_error(ngep, "FIXED interrupt "
2156 			    "registration failed\n");
2157 
2158 			return (DDI_FAILURE);
2159 		}
2160 
2161 		nge_log(ngep, "Using FIXED interrupt type\n");
2162 
2163 		ngep->intr_type = DDI_INTR_TYPE_FIXED;
2164 		ngep->progress |= PROGRESS_SWINT;
2165 	}
2166 
2167 
2168 	if (!(ngep->progress & PROGRESS_SWINT)) {
2169 		nge_error(ngep, "No interrupts registered\n");
2170 
2171 		return (DDI_FAILURE);
2172 	}
2173 	mutex_init(ngep->genlock, NULL, MUTEX_DRIVER,
2174 	    DDI_INTR_PRI(ngep->intr_pri));
2175 	mutex_init(ngep->softlock, NULL, MUTEX_DRIVER,
2176 	    DDI_INTR_PRI(ngep->soft_pri));
2177 	rw_init(ngep->rwlock, NULL, RW_DRIVER,
2178 	    DDI_INTR_PRI(ngep->intr_pri));
2179 
2180 	return (DDI_SUCCESS);
2181 }
2182 
2183 /*
2184  * nge_add_intrs:
2185  *
2186  * Register FIXED or MSI interrupts.
2187  */
2188 static int
2189 nge_add_intrs(nge_t *ngep, int	intr_type)
2190 {
2191 	dev_info_t	*dip = ngep->devinfo;
2192 	int		avail, actual, intr_size, count = 0;
2193 	int		i, flag, ret;
2194 
2195 	NGE_DEBUG(("nge_add_intrs: interrupt type 0x%x\n", intr_type));
2196 
2197 	/* Get number of interrupts */
2198 	ret = ddi_intr_get_nintrs(dip, intr_type, &count);
2199 	if ((ret != DDI_SUCCESS) || (count == 0)) {
2200 		nge_error(ngep, "ddi_intr_get_nintrs() failure, ret: %d, "
2201 		    "count: %d", ret, count);
2202 
2203 		return (DDI_FAILURE);
2204 	}
2205 
2206 	/* Get number of available interrupts */
2207 	ret = ddi_intr_get_navail(dip, intr_type, &avail);
2208 	if ((ret != DDI_SUCCESS) || (avail == 0)) {
2209 		nge_error(ngep, "ddi_intr_get_navail() failure, "
2210 		    "ret: %d, avail: %d\n", ret, avail);
2211 
2212 		return (DDI_FAILURE);
2213 	}
2214 
2215 	if (avail < count) {
2216 		NGE_DEBUG(("nitrs() returned %d, navail returned %d\n",
2217 		    count, avail));
2218 	}
2219 	flag = DDI_INTR_ALLOC_NORMAL;
2220 
2221 	/* Allocate an array of interrupt handles */
2222 	intr_size = count * sizeof (ddi_intr_handle_t);
2223 	ngep->htable = kmem_alloc(intr_size, KM_SLEEP);
2224 
2225 	/* Call ddi_intr_alloc() */
2226 	ret = ddi_intr_alloc(dip, ngep->htable, intr_type, 0,
2227 	    count, &actual, flag);
2228 
2229 	if ((ret != DDI_SUCCESS) || (actual == 0)) {
2230 		nge_error(ngep, "ddi_intr_alloc() failed %d\n", ret);
2231 
2232 		kmem_free(ngep->htable, intr_size);
2233 		return (DDI_FAILURE);
2234 	}
2235 
2236 	if (actual < count) {
2237 		NGE_DEBUG(("Requested: %d, Received: %d\n",
2238 		    count, actual));
2239 	}
2240 
2241 	ngep->intr_actual_cnt = actual;
2242 	ngep->intr_req_cnt = count;
2243 
2244 	/*
2245 	 * Get priority for first msi, assume remaining are all the same
2246 	 */
2247 	if ((ret = ddi_intr_get_pri(ngep->htable[0], &ngep->intr_pri)) !=
2248 	    DDI_SUCCESS) {
2249 		nge_error(ngep, "ddi_intr_get_pri() failed %d\n", ret);
2250 
2251 		/* Free already allocated intr */
2252 		for (i = 0; i < actual; i++) {
2253 			(void) ddi_intr_free(ngep->htable[i]);
2254 		}
2255 
2256 		kmem_free(ngep->htable, intr_size);
2257 
2258 		return (DDI_FAILURE);
2259 	}
2260 	/* Test for high level mutex */
2261 	if (ngep->intr_pri >= ddi_intr_get_hilevel_pri()) {
2262 		nge_error(ngep, "nge_add_intrs:"
2263 		    "Hi level interrupt not supported");
2264 
2265 		for (i = 0; i < actual; i++)
2266 			(void) ddi_intr_free(ngep->htable[i]);
2267 
2268 		kmem_free(ngep->htable, intr_size);
2269 
2270 		return (DDI_FAILURE);
2271 	}
2272 
2273 
2274 	/* Call ddi_intr_add_handler() */
2275 	for (i = 0; i < actual; i++) {
2276 		if ((ret = ddi_intr_add_handler(ngep->htable[i], nge_chip_intr,
2277 		    (caddr_t)ngep, (caddr_t)(uintptr_t)i)) != DDI_SUCCESS) {
2278 			nge_error(ngep, "ddi_intr_add_handler() "
2279 			    "failed %d\n", ret);
2280 
2281 			/* Free already allocated intr */
2282 			for (i = 0; i < actual; i++) {
2283 				(void) ddi_intr_free(ngep->htable[i]);
2284 			}
2285 
2286 			kmem_free(ngep->htable, intr_size);
2287 
2288 			return (DDI_FAILURE);
2289 		}
2290 	}
2291 
2292 	if ((ret = ddi_intr_get_cap(ngep->htable[0], &ngep->intr_cap))
2293 	    != DDI_SUCCESS) {
2294 		nge_error(ngep, "ddi_intr_get_cap() failed %d\n", ret);
2295 
2296 		for (i = 0; i < actual; i++) {
2297 			(void) ddi_intr_remove_handler(ngep->htable[i]);
2298 			(void) ddi_intr_free(ngep->htable[i]);
2299 		}
2300 
2301 		kmem_free(ngep->htable, intr_size);
2302 
2303 		return (DDI_FAILURE);
2304 	}
2305 
2306 	return (DDI_SUCCESS);
2307 }
2308 
2309 /*
2310  * nge_rem_intrs:
2311  *
2312  * Unregister FIXED or MSI interrupts
2313  */
2314 static void
2315 nge_rem_intrs(nge_t *ngep)
2316 {
2317 	int	i;
2318 
2319 	NGE_DEBUG(("nge_rem_intrs\n"));
2320 
2321 	/* Disable all interrupts */
2322 	if (ngep->intr_cap & DDI_INTR_FLAG_BLOCK) {
2323 		/* Call ddi_intr_block_disable() */
2324 		(void) ddi_intr_block_disable(ngep->htable,
2325 		    ngep->intr_actual_cnt);
2326 	} else {
2327 		for (i = 0; i < ngep->intr_actual_cnt; i++) {
2328 			(void) ddi_intr_disable(ngep->htable[i]);
2329 		}
2330 	}
2331 
2332 	/* Call ddi_intr_remove_handler() */
2333 	for (i = 0; i < ngep->intr_actual_cnt; i++) {
2334 		(void) ddi_intr_remove_handler(ngep->htable[i]);
2335 		(void) ddi_intr_free(ngep->htable[i]);
2336 	}
2337 
2338 	kmem_free(ngep->htable,
2339 	    ngep->intr_req_cnt * sizeof (ddi_intr_handle_t));
2340 }
2341