xref: /titanic_50/usr/src/uts/common/io/nge/nge_main.c (revision 074bb90d80fdbeb2d04a8450a55ecbc96de28785)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 
22 /*
23  * Copyright 2009 Sun Microsystems, Inc.  All rights reserved.
24  * Use is subject to license terms.
25  */
26 
27 
28 #include "nge.h"
29 
30 /*
31  * Describes the chip's DMA engine
32  */
33 
34 static ddi_dma_attr_t hot_dma_attr = {
35 	DMA_ATTR_V0,			/* dma_attr version	*/
36 	0x0000000000000000ull,		/* dma_attr_addr_lo	*/
37 	0x000000FFFFFFFFFFull,		/* dma_attr_addr_hi	*/
38 	0x000000007FFFFFFFull,		/* dma_attr_count_max	*/
39 	0x0000000000000010ull,		/* dma_attr_align	*/
40 	0x00000FFF,			/* dma_attr_burstsizes	*/
41 	0x00000001,			/* dma_attr_minxfer	*/
42 	0x000000000000FFFFull,		/* dma_attr_maxxfer	*/
43 	0x000000FFFFFFFFFFull,		/* dma_attr_seg		*/
44 	1,				/* dma_attr_sgllen 	*/
45 	0x00000001,			/* dma_attr_granular 	*/
46 	0
47 };
48 
49 static ddi_dma_attr_t hot_tx_dma_attr = {
50 	DMA_ATTR_V0,			/* dma_attr version	*/
51 	0x0000000000000000ull,		/* dma_attr_addr_lo	*/
52 	0x000000FFFFFFFFFFull,		/* dma_attr_addr_hi	*/
53 	0x0000000000003FFFull,		/* dma_attr_count_max	*/
54 	0x0000000000000010ull,		/* dma_attr_align	*/
55 	0x00000FFF,			/* dma_attr_burstsizes	*/
56 	0x00000001,			/* dma_attr_minxfer	*/
57 	0x0000000000003FFFull,		/* dma_attr_maxxfer	*/
58 	0x000000FFFFFFFFFFull,		/* dma_attr_seg		*/
59 	NGE_MAX_COOKIES,		/* dma_attr_sgllen 	*/
60 	1,				/* dma_attr_granular 	*/
61 	0
62 };
63 
64 static ddi_dma_attr_t sum_dma_attr = {
65 	DMA_ATTR_V0,			/* dma_attr version	*/
66 	0x0000000000000000ull,		/* dma_attr_addr_lo	*/
67 	0x00000000FFFFFFFFull,		/* dma_attr_addr_hi	*/
68 	0x000000007FFFFFFFull,		/* dma_attr_count_max	*/
69 	0x0000000000000010ull,		/* dma_attr_align	*/
70 	0x00000FFF,			/* dma_attr_burstsizes	*/
71 	0x00000001,			/* dma_attr_minxfer	*/
72 	0x000000000000FFFFull,		/* dma_attr_maxxfer	*/
73 	0x00000000FFFFFFFFull,		/* dma_attr_seg		*/
74 	1,				/* dma_attr_sgllen 	*/
75 	0x00000001,			/* dma_attr_granular 	*/
76 	0
77 };
78 
79 static ddi_dma_attr_t sum_tx_dma_attr = {
80 	DMA_ATTR_V0,			/* dma_attr version	*/
81 	0x0000000000000000ull,		/* dma_attr_addr_lo	*/
82 	0x00000000FFFFFFFFull,		/* dma_attr_addr_hi	*/
83 	0x0000000000003FFFull,		/* dma_attr_count_max	*/
84 	0x0000000000000010ull,		/* dma_attr_align	*/
85 	0x00000FFF,			/* dma_attr_burstsizes	*/
86 	0x00000001,			/* dma_attr_minxfer	*/
87 	0x0000000000003FFFull,		/* dma_attr_maxxfer	*/
88 	0x00000000FFFFFFFFull,		/* dma_attr_seg		*/
89 	NGE_MAX_COOKIES,		/* dma_attr_sgllen 	*/
90 	1,				/* dma_attr_granular 	*/
91 	0
92 };
93 
94 /*
95  * DMA access attributes for data.
96  */
97 ddi_device_acc_attr_t nge_data_accattr = {
98 	DDI_DEVICE_ATTR_V0,
99 	DDI_STRUCTURE_LE_ACC,
100 	DDI_STRICTORDER_ACC,
101 	DDI_DEFAULT_ACC
102 };
103 
104 /*
105  * DMA access attributes for descriptors.
106  */
107 static ddi_device_acc_attr_t nge_desc_accattr = {
108 	DDI_DEVICE_ATTR_V0,
109 	DDI_STRUCTURE_LE_ACC,
110 	DDI_STRICTORDER_ACC,
111 	DDI_DEFAULT_ACC
112 };
113 
114 /*
115  * PIO access attributes for registers
116  */
117 static ddi_device_acc_attr_t nge_reg_accattr = {
118 	DDI_DEVICE_ATTR_V0,
119 	DDI_STRUCTURE_LE_ACC,
120 	DDI_STRICTORDER_ACC,
121 	DDI_DEFAULT_ACC
122 };
123 
124 /*
125  * NIC DESC MODE 2
126  */
127 
128 static const nge_desc_attr_t nge_sum_desc = {
129 
130 	sizeof (sum_rx_bd),
131 	sizeof (sum_tx_bd),
132 	&sum_dma_attr,
133 	&sum_tx_dma_attr,
134 	nge_sum_rxd_fill,
135 	nge_sum_rxd_check,
136 	nge_sum_txd_fill,
137 	nge_sum_txd_check,
138 };
139 
140 /*
141  * NIC DESC MODE 3
142  */
143 
144 static const nge_desc_attr_t nge_hot_desc = {
145 
146 	sizeof (hot_rx_bd),
147 	sizeof (hot_tx_bd),
148 	&hot_dma_attr,
149 	&hot_tx_dma_attr,
150 	nge_hot_rxd_fill,
151 	nge_hot_rxd_check,
152 	nge_hot_txd_fill,
153 	nge_hot_txd_check,
154 };
155 
156 static char nge_ident[] = "nVidia 1Gb Ethernet";
157 static char clsize_propname[] = "cache-line-size";
158 static char latency_propname[] = "latency-timer";
159 static char debug_propname[]	= "nge-debug-flags";
160 static char intr_moderation[] = "intr-moderation";
161 static char rx_data_hw[] = "rx-data-hw";
162 static char rx_prd_lw[] = "rx-prd-lw";
163 static char rx_prd_hw[] = "rx-prd-hw";
164 static char sw_intr_intv[] = "sw-intr-intvl";
165 static char nge_desc_mode[] = "desc-mode";
166 static char default_mtu[] = "default_mtu";
167 static char low_memory_mode[] = "minimal-memory-usage";
168 extern kmutex_t nge_log_mutex[1];
169 
170 static int		nge_m_start(void *);
171 static void		nge_m_stop(void *);
172 static int		nge_m_promisc(void *, boolean_t);
173 static int		nge_m_multicst(void *, boolean_t, const uint8_t *);
174 static int		nge_m_unicst(void *, const uint8_t *);
175 static void		nge_m_ioctl(void *, queue_t *, mblk_t *);
176 static boolean_t	nge_m_getcapab(void *, mac_capab_t, void *);
177 static int		nge_m_setprop(void *, const char *, mac_prop_id_t,
178 	uint_t, const void *);
179 static int		nge_m_getprop(void *, const char *, mac_prop_id_t,
180 	uint_t, uint_t, void *, uint_t *);
181 static int		nge_set_priv_prop(nge_t *, const char *, uint_t,
182 	const void *);
183 static int		nge_get_priv_prop(nge_t *, const char *, uint_t,
184 	uint_t, void *);
185 
186 #define		NGE_M_CALLBACK_FLAGS\
187 		(MC_IOCTL | MC_GETCAPAB | MC_SETPROP | MC_GETPROP)
188 
189 static mac_callbacks_t nge_m_callbacks = {
190 	NGE_M_CALLBACK_FLAGS,
191 	nge_m_stat,
192 	nge_m_start,
193 	nge_m_stop,
194 	nge_m_promisc,
195 	nge_m_multicst,
196 	nge_m_unicst,
197 	nge_m_tx,
198 	nge_m_ioctl,
199 	nge_m_getcapab,
200 	NULL,
201 	NULL,
202 	nge_m_setprop,
203 	nge_m_getprop
204 };
205 
206 mac_priv_prop_t nge_priv_props[] = {
207 	{"_tx_bcopy_threshold", MAC_PROP_PERM_RW},
208 	{"_rx_bcopy_threshold", MAC_PROP_PERM_RW},
209 	{"_recv_max_packet", MAC_PROP_PERM_RW},
210 	{"_poll_quiet_time", MAC_PROP_PERM_RW},
211 	{"_poll_busy_time", MAC_PROP_PERM_RW},
212 	{"_rx_intr_hwater", MAC_PROP_PERM_RW},
213 	{"_rx_intr_lwater", MAC_PROP_PERM_RW},
214 };
215 
216 #define	NGE_MAX_PRIV_PROPS \
217 	(sizeof (nge_priv_props)/sizeof (mac_priv_prop_t))
218 
219 static int nge_add_intrs(nge_t *, int);
220 static void nge_rem_intrs(nge_t *);
221 static int nge_register_intrs_and_init_locks(nge_t *);
222 
223 /*
224  * NGE MSI tunable:
225  */
226 boolean_t nge_enable_msi = B_FALSE;
227 
228 static enum ioc_reply
229 nge_set_loop_mode(nge_t *ngep, uint32_t mode)
230 {
231 	/*
232 	 * If the mode isn't being changed, there's nothing to do ...
233 	 */
234 	if (mode == ngep->param_loop_mode)
235 		return (IOC_ACK);
236 
237 	/*
238 	 * Validate the requested mode and prepare a suitable message
239 	 * to explain the link down/up cycle that the change will
240 	 * probably induce ...
241 	 */
242 	switch (mode) {
243 	default:
244 		return (IOC_INVAL);
245 
246 	case NGE_LOOP_NONE:
247 	case NGE_LOOP_EXTERNAL_100:
248 	case NGE_LOOP_EXTERNAL_10:
249 	case NGE_LOOP_INTERNAL_PHY:
250 		break;
251 	}
252 
253 	/*
254 	 * All OK; tell the caller to reprogram
255 	 * the PHY and/or MAC for the new mode ...
256 	 */
257 	ngep->param_loop_mode = mode;
258 	return (IOC_RESTART_ACK);
259 }
260 
261 #undef	NGE_DBG
262 #define	NGE_DBG		NGE_DBG_INIT
263 
264 /*
265  * Utility routine to carve a slice off a chunk of allocated memory,
266  * updating the chunk descriptor accordingly.  The size of the slice
267  * is given by the product of the <qty> and <size> parameters.
268  */
269 void
270 nge_slice_chunk(dma_area_t *slice, dma_area_t *chunk,
271     uint32_t qty, uint32_t size)
272 {
273 	size_t totsize;
274 
275 	totsize = qty*size;
276 	ASSERT(size > 0);
277 	ASSERT(totsize <= chunk->alength);
278 
279 	*slice = *chunk;
280 	slice->nslots = qty;
281 	slice->size = size;
282 	slice->alength = totsize;
283 
284 	chunk->mem_va = (caddr_t)chunk->mem_va + totsize;
285 	chunk->alength -= totsize;
286 	chunk->offset += totsize;
287 	chunk->cookie.dmac_laddress += totsize;
288 	chunk->cookie.dmac_size -= totsize;
289 }
290 
291 /*
292  * Allocate an area of memory and a DMA handle for accessing it
293  */
294 int
295 nge_alloc_dma_mem(nge_t *ngep, size_t memsize, ddi_device_acc_attr_t *attr_p,
296     uint_t dma_flags, dma_area_t *dma_p)
297 {
298 	int err;
299 	caddr_t va;
300 
301 	NGE_TRACE(("nge_alloc_dma_mem($%p, %ld, $%p, 0x%x, $%p)",
302 	    (void *)ngep, memsize, attr_p, dma_flags, dma_p));
303 	/*
304 	 * Allocate handle
305 	 */
306 	err = ddi_dma_alloc_handle(ngep->devinfo, ngep->desc_attr.dma_attr,
307 	    DDI_DMA_DONTWAIT, NULL, &dma_p->dma_hdl);
308 	if (err != DDI_SUCCESS)
309 		goto fail;
310 
311 	/*
312 	 * Allocate memory
313 	 */
314 	err = ddi_dma_mem_alloc(dma_p->dma_hdl, memsize, attr_p,
315 	    dma_flags & (DDI_DMA_CONSISTENT | DDI_DMA_STREAMING),
316 	    DDI_DMA_DONTWAIT, NULL, &va, &dma_p->alength, &dma_p->acc_hdl);
317 	if (err != DDI_SUCCESS)
318 		goto fail;
319 
320 	/*
321 	 * Bind the two together
322 	 */
323 	dma_p->mem_va = va;
324 	err = ddi_dma_addr_bind_handle(dma_p->dma_hdl, NULL,
325 	    va, dma_p->alength, dma_flags, DDI_DMA_DONTWAIT, NULL,
326 	    &dma_p->cookie, &dma_p->ncookies);
327 
328 	if (err != DDI_DMA_MAPPED || dma_p->ncookies != 1)
329 		goto fail;
330 
331 	dma_p->nslots = ~0U;
332 	dma_p->size = ~0U;
333 	dma_p->offset = 0;
334 
335 	return (DDI_SUCCESS);
336 
337 fail:
338 	nge_free_dma_mem(dma_p);
339 	NGE_DEBUG(("nge_alloc_dma_mem: fail to alloc dma memory!"));
340 
341 	return (DDI_FAILURE);
342 }
343 
344 /*
345  * Free one allocated area of DMAable memory
346  */
347 void
348 nge_free_dma_mem(dma_area_t *dma_p)
349 {
350 	if (dma_p->dma_hdl != NULL) {
351 		if (dma_p->ncookies) {
352 			(void) ddi_dma_unbind_handle(dma_p->dma_hdl);
353 			dma_p->ncookies = 0;
354 		}
355 	}
356 	if (dma_p->acc_hdl != NULL) {
357 		ddi_dma_mem_free(&dma_p->acc_hdl);
358 		dma_p->acc_hdl = NULL;
359 	}
360 	if (dma_p->dma_hdl != NULL) {
361 		ddi_dma_free_handle(&dma_p->dma_hdl);
362 		dma_p->dma_hdl = NULL;
363 	}
364 }
365 
366 #define	ALLOC_TX_BUF	0x1
367 #define	ALLOC_TX_DESC	0x2
368 #define	ALLOC_RX_DESC	0x4
369 
370 int
371 nge_alloc_bufs(nge_t *ngep)
372 {
373 	int err;
374 	int split;
375 	int progress;
376 	size_t txbuffsize;
377 	size_t rxdescsize;
378 	size_t txdescsize;
379 
380 	txbuffsize = ngep->tx_desc * ngep->buf_size;
381 	rxdescsize = ngep->rx_desc;
382 	txdescsize = ngep->tx_desc;
383 	rxdescsize *= ngep->desc_attr.rxd_size;
384 	txdescsize *= ngep->desc_attr.txd_size;
385 	progress = 0;
386 
387 	NGE_TRACE(("nge_alloc_bufs($%p)", (void *)ngep));
388 	/*
389 	 * Allocate memory & handles for TX buffers
390 	 */
391 	ASSERT((txbuffsize % ngep->nge_split) == 0);
392 	for (split = 0; split < ngep->nge_split; ++split) {
393 		err = nge_alloc_dma_mem(ngep, txbuffsize/ngep->nge_split,
394 		    &nge_data_accattr, DDI_DMA_WRITE | NGE_DMA_MODE,
395 		    &ngep->send->buf[split]);
396 		if (err != DDI_SUCCESS)
397 			goto fail;
398 	}
399 
400 	progress |= ALLOC_TX_BUF;
401 
402 	/*
403 	 * Allocate memory & handles for receive return rings and
404 	 * buffer (producer) descriptor rings
405 	 */
406 	err = nge_alloc_dma_mem(ngep, rxdescsize, &nge_desc_accattr,
407 	    DDI_DMA_RDWR | DDI_DMA_CONSISTENT, &ngep->recv->desc);
408 	if (err != DDI_SUCCESS)
409 		goto fail;
410 	progress |= ALLOC_RX_DESC;
411 
412 	/*
413 	 * Allocate memory & handles for TX descriptor rings,
414 	 */
415 	err = nge_alloc_dma_mem(ngep, txdescsize, &nge_desc_accattr,
416 	    DDI_DMA_RDWR | DDI_DMA_CONSISTENT, &ngep->send->desc);
417 	if (err != DDI_SUCCESS)
418 		goto fail;
419 	return (DDI_SUCCESS);
420 
421 fail:
422 	if (progress & ALLOC_RX_DESC)
423 		nge_free_dma_mem(&ngep->recv->desc);
424 	if (progress & ALLOC_TX_BUF) {
425 		for (split = 0; split < ngep->nge_split; ++split)
426 			nge_free_dma_mem(&ngep->send->buf[split]);
427 	}
428 
429 	return (DDI_FAILURE);
430 }
431 
432 /*
433  * This routine frees the transmit and receive buffers and descriptors.
434  * Make sure the chip is stopped before calling it!
435  */
436 void
437 nge_free_bufs(nge_t *ngep)
438 {
439 	int split;
440 
441 	NGE_TRACE(("nge_free_bufs($%p)", (void *)ngep));
442 
443 	nge_free_dma_mem(&ngep->recv->desc);
444 	nge_free_dma_mem(&ngep->send->desc);
445 
446 	for (split = 0; split < ngep->nge_split; ++split)
447 		nge_free_dma_mem(&ngep->send->buf[split]);
448 }
449 
450 /*
451  * Clean up initialisation done above before the memory is freed
452  */
453 static void
454 nge_fini_send_ring(nge_t *ngep)
455 {
456 	uint32_t slot;
457 	size_t dmah_num;
458 	send_ring_t *srp;
459 	sw_tx_sbd_t *ssbdp;
460 
461 	srp = ngep->send;
462 	ssbdp = srp->sw_sbds;
463 
464 	NGE_TRACE(("nge_fini_send_ring($%p)", (void *)ngep));
465 
466 	dmah_num = sizeof (srp->dmahndl) / sizeof (srp->dmahndl[0]);
467 
468 	for (slot = 0; slot < dmah_num; ++slot) {
469 		if (srp->dmahndl[slot].hndl) {
470 			(void) ddi_dma_unbind_handle(srp->dmahndl[slot].hndl);
471 			ddi_dma_free_handle(&srp->dmahndl[slot].hndl);
472 			srp->dmahndl[slot].hndl = NULL;
473 			srp->dmahndl[slot].next = NULL;
474 		}
475 	}
476 
477 	srp->dmah_free.head = NULL;
478 	srp->dmah_free.tail = NULL;
479 
480 	kmem_free(ssbdp, srp->desc.nslots*sizeof (*ssbdp));
481 
482 }
483 
484 /*
485  * Initialise the specified Send Ring, using the information in the
486  * <dma_area> descriptors that it contains to set up all the other
487  * fields. This routine should be called only once for each ring.
488  */
489 static int
490 nge_init_send_ring(nge_t *ngep)
491 {
492 	size_t dmah_num;
493 	uint32_t nslots;
494 	uint32_t err;
495 	uint32_t slot;
496 	uint32_t split;
497 	send_ring_t *srp;
498 	sw_tx_sbd_t *ssbdp;
499 	dma_area_t desc;
500 	dma_area_t pbuf;
501 
502 	srp = ngep->send;
503 	srp->desc.nslots = ngep->tx_desc;
504 	nslots = srp->desc.nslots;
505 
506 	NGE_TRACE(("nge_init_send_ring($%p)", (void *)ngep));
507 	/*
508 	 * Other one-off initialisation of per-ring data
509 	 */
510 	srp->ngep = ngep;
511 
512 	/*
513 	 * Allocate the array of s/w Send Buffer Descriptors
514 	 */
515 	ssbdp = kmem_zalloc(nslots*sizeof (*ssbdp), KM_SLEEP);
516 	srp->sw_sbds = ssbdp;
517 
518 	/*
519 	 * Now initialise each array element once and for all
520 	 */
521 	desc = srp->desc;
522 	for (split = 0; split < ngep->nge_split; ++split) {
523 		pbuf = srp->buf[split];
524 		for (slot = 0; slot < nslots/ngep->nge_split; ++ssbdp, ++slot) {
525 			nge_slice_chunk(&ssbdp->desc, &desc, 1,
526 			    ngep->desc_attr.txd_size);
527 			nge_slice_chunk(&ssbdp->pbuf, &pbuf, 1,
528 			    ngep->buf_size);
529 		}
530 		ASSERT(pbuf.alength == 0);
531 	}
532 	ASSERT(desc.alength == 0);
533 
534 	dmah_num = sizeof (srp->dmahndl) / sizeof (srp->dmahndl[0]);
535 
536 	/* preallocate dma handles for tx buffer */
537 	for (slot = 0; slot < dmah_num; ++slot) {
538 
539 		err = ddi_dma_alloc_handle(ngep->devinfo,
540 		    ngep->desc_attr.tx_dma_attr, DDI_DMA_DONTWAIT,
541 		    NULL, &srp->dmahndl[slot].hndl);
542 
543 		if (err != DDI_SUCCESS) {
544 			nge_fini_send_ring(ngep);
545 			nge_error(ngep,
546 			    "nge_init_send_ring: alloc dma handle fails");
547 			return (DDI_FAILURE);
548 		}
549 		srp->dmahndl[slot].next = srp->dmahndl + slot + 1;
550 	}
551 
552 	srp->dmah_free.head = srp->dmahndl;
553 	srp->dmah_free.tail = srp->dmahndl + dmah_num - 1;
554 	srp->dmah_free.tail->next = NULL;
555 
556 	return (DDI_SUCCESS);
557 }
558 
559 /*
560  * Intialize the tx recycle pointer and tx sending pointer of tx ring
561  * and set the type of tx's data descriptor by default.
562  */
563 static void
564 nge_reinit_send_ring(nge_t *ngep)
565 {
566 	size_t dmah_num;
567 	uint32_t slot;
568 	send_ring_t *srp;
569 	sw_tx_sbd_t *ssbdp;
570 
571 	srp = ngep->send;
572 
573 	/*
574 	 * Reinitialise control variables ...
575 	 */
576 
577 	srp->tx_hwmark = NGE_DESC_MIN;
578 	srp->tx_lwmark = NGE_DESC_MIN;
579 
580 	srp->tx_next = 0;
581 	srp->tx_free = srp->desc.nslots;
582 	srp->tc_next = 0;
583 
584 	dmah_num = sizeof (srp->dmahndl) / sizeof (srp->dmahndl[0]);
585 
586 	for (slot = 0; slot - dmah_num != 0; ++slot)
587 		srp->dmahndl[slot].next = srp->dmahndl + slot + 1;
588 
589 	srp->dmah_free.head = srp->dmahndl;
590 	srp->dmah_free.tail = srp->dmahndl + dmah_num - 1;
591 	srp->dmah_free.tail->next = NULL;
592 
593 	/*
594 	 * Zero and sync all the h/w Send Buffer Descriptors
595 	 */
596 	for (slot = 0; slot < srp->desc.nslots; ++slot) {
597 		ssbdp = &srp->sw_sbds[slot];
598 		ssbdp->flags = HOST_OWN;
599 	}
600 
601 	DMA_ZERO(srp->desc);
602 	DMA_SYNC(srp->desc, DDI_DMA_SYNC_FORDEV);
603 }
604 
605 /*
606  * Initialize the slot number of rx's ring
607  */
608 static void
609 nge_init_recv_ring(nge_t *ngep)
610 {
611 	recv_ring_t *rrp;
612 
613 	rrp = ngep->recv;
614 	rrp->desc.nslots = ngep->rx_desc;
615 	rrp->ngep = ngep;
616 }
617 
618 /*
619  * Intialize the rx recycle pointer and rx sending pointer of rx ring
620  */
621 static void
622 nge_reinit_recv_ring(nge_t *ngep)
623 {
624 	recv_ring_t *rrp;
625 
626 	rrp = ngep->recv;
627 
628 	/*
629 	 * Reinitialise control variables ...
630 	 */
631 	rrp->prod_index = 0;
632 	/*
633 	 * Zero and sync all the h/w Send Buffer Descriptors
634 	 */
635 	DMA_ZERO(rrp->desc);
636 	DMA_SYNC(rrp->desc, DDI_DMA_SYNC_FORDEV);
637 }
638 
639 /*
640  * Clean up initialisation done above before the memory is freed
641  */
642 static void
643 nge_fini_buff_ring(nge_t *ngep)
644 {
645 	uint32_t i;
646 	buff_ring_t *brp;
647 	dma_area_t *bufp;
648 	sw_rx_sbd_t *bsbdp;
649 
650 	brp = ngep->buff;
651 	bsbdp = brp->sw_rbds;
652 
653 	NGE_DEBUG(("nge_fini_buff_ring($%p)", (void *)ngep));
654 
655 	mutex_enter(brp->recycle_lock);
656 	brp->buf_sign++;
657 	mutex_exit(brp->recycle_lock);
658 	for (i = 0; i < ngep->rx_desc; i++, ++bsbdp) {
659 		if (bsbdp->bufp) {
660 			if (bsbdp->bufp->mp)
661 				freemsg(bsbdp->bufp->mp);
662 			nge_free_dma_mem(bsbdp->bufp);
663 			kmem_free(bsbdp->bufp, sizeof (dma_area_t));
664 			bsbdp->bufp = NULL;
665 		}
666 	}
667 	while (brp->free_list != NULL) {
668 		bufp = brp->free_list;
669 		brp->free_list = bufp->next;
670 		bufp->next = NULL;
671 		if (bufp->mp)
672 			freemsg(bufp->mp);
673 		nge_free_dma_mem(bufp);
674 		kmem_free(bufp, sizeof (dma_area_t));
675 	}
676 	while (brp->recycle_list != NULL) {
677 		bufp = brp->recycle_list;
678 		brp->recycle_list = bufp->next;
679 		bufp->next = NULL;
680 		if (bufp->mp)
681 			freemsg(bufp->mp);
682 		nge_free_dma_mem(bufp);
683 		kmem_free(bufp, sizeof (dma_area_t));
684 	}
685 
686 
687 	kmem_free(brp->sw_rbds, (ngep->rx_desc * sizeof (*bsbdp)));
688 	brp->sw_rbds = NULL;
689 }
690 
691 /*
692  * Intialize the Rx's data ring and free ring
693  */
694 static int
695 nge_init_buff_ring(nge_t *ngep)
696 {
697 	uint32_t err;
698 	uint32_t slot;
699 	uint32_t nslots_buff;
700 	uint32_t nslots_recv;
701 	buff_ring_t *brp;
702 	recv_ring_t *rrp;
703 	dma_area_t desc;
704 	dma_area_t *bufp;
705 	sw_rx_sbd_t *bsbdp;
706 
707 	rrp = ngep->recv;
708 	brp = ngep->buff;
709 	brp->nslots = ngep->rx_buf;
710 	brp->rx_bcopy = B_FALSE;
711 	nslots_recv = rrp->desc.nslots;
712 	nslots_buff = brp->nslots;
713 	brp->ngep = ngep;
714 
715 	NGE_TRACE(("nge_init_buff_ring($%p)", (void *)ngep));
716 
717 	/*
718 	 * Allocate the array of s/w Recv Buffer Descriptors
719 	 */
720 	bsbdp = kmem_zalloc(nslots_recv *sizeof (*bsbdp), KM_SLEEP);
721 	brp->sw_rbds = bsbdp;
722 	brp->free_list = NULL;
723 	brp->recycle_list = NULL;
724 	for (slot = 0; slot < nslots_buff; ++slot) {
725 		bufp = kmem_zalloc(sizeof (dma_area_t), KM_SLEEP);
726 		err = nge_alloc_dma_mem(ngep, (ngep->buf_size
727 		    + NGE_HEADROOM),
728 		    &nge_data_accattr, DDI_DMA_READ | NGE_DMA_MODE, bufp);
729 		if (err != DDI_SUCCESS) {
730 			kmem_free(bufp, sizeof (dma_area_t));
731 			return (DDI_FAILURE);
732 		}
733 
734 		bufp->alength -= NGE_HEADROOM;
735 		bufp->offset += NGE_HEADROOM;
736 		bufp->private = (caddr_t)ngep;
737 		bufp->rx_recycle.free_func = nge_recv_recycle;
738 		bufp->rx_recycle.free_arg = (caddr_t)bufp;
739 		bufp->signature = brp->buf_sign;
740 		bufp->rx_delivered = B_FALSE;
741 		bufp->mp = desballoc(DMA_VPTR(*bufp),
742 		    ngep->buf_size + NGE_HEADROOM,
743 		    0, &bufp->rx_recycle);
744 
745 		if (bufp->mp == NULL) {
746 			return (DDI_FAILURE);
747 		}
748 		bufp->next = brp->free_list;
749 		brp->free_list = bufp;
750 	}
751 
752 	/*
753 	 * Now initialise each array element once and for all
754 	 */
755 	desc = rrp->desc;
756 	for (slot = 0; slot < nslots_recv; ++slot, ++bsbdp) {
757 		nge_slice_chunk(&bsbdp->desc, &desc, 1,
758 		    ngep->desc_attr.rxd_size);
759 		bufp = brp->free_list;
760 		brp->free_list = bufp->next;
761 		bsbdp->bufp = bufp;
762 		bsbdp->flags = CONTROLER_OWN;
763 		bufp->next = NULL;
764 	}
765 
766 	ASSERT(desc.alength == 0);
767 	return (DDI_SUCCESS);
768 }
769 
770 /*
771  * Fill the host address of data in rx' descriptor
772  * and initialize free pointers of rx free ring
773  */
774 static int
775 nge_reinit_buff_ring(nge_t *ngep)
776 {
777 	uint32_t slot;
778 	uint32_t nslots_recv;
779 	buff_ring_t *brp;
780 	recv_ring_t *rrp;
781 	sw_rx_sbd_t *bsbdp;
782 	void *hw_bd_p;
783 
784 	brp = ngep->buff;
785 	rrp = ngep->recv;
786 	bsbdp = brp->sw_rbds;
787 	nslots_recv = rrp->desc.nslots;
788 	for (slot = 0; slot < nslots_recv; ++bsbdp, ++slot) {
789 		hw_bd_p = DMA_VPTR(bsbdp->desc);
790 	/*
791 	 * There is a scenario: When the traffic of small tcp
792 	 * packet is heavy, suspending the tcp traffic will
793 	 * cause the preallocated buffers for rx not to be
794 	 * released in time by tcp taffic and cause rx's buffer
795 	 * pointers not to be refilled in time.
796 	 *
797 	 * At this point, if we reinitialize the driver, the bufp
798 	 * pointer for rx's traffic will be NULL.
799 	 * So the result of the reinitializion fails.
800 	 */
801 		if (bsbdp->bufp == NULL)
802 			return (DDI_FAILURE);
803 
804 		ngep->desc_attr.rxd_fill(hw_bd_p, &bsbdp->bufp->cookie,
805 		    bsbdp->bufp->alength);
806 	}
807 	return (DDI_SUCCESS);
808 }
809 
810 static void
811 nge_init_ring_param_lock(nge_t *ngep)
812 {
813 	buff_ring_t *brp;
814 	send_ring_t *srp;
815 
816 	srp = ngep->send;
817 	brp = ngep->buff;
818 
819 	/* Init the locks for send ring */
820 	mutex_init(srp->tx_lock, NULL, MUTEX_DRIVER,
821 	    DDI_INTR_PRI(ngep->intr_pri));
822 	mutex_init(srp->tc_lock, NULL, MUTEX_DRIVER,
823 	    DDI_INTR_PRI(ngep->intr_pri));
824 	mutex_init(&srp->dmah_lock, NULL, MUTEX_DRIVER,
825 	    DDI_INTR_PRI(ngep->intr_pri));
826 
827 	/* Init parameters of buffer ring */
828 	brp->free_list = NULL;
829 	brp->recycle_list = NULL;
830 	brp->rx_hold = 0;
831 	brp->buf_sign = 0;
832 
833 	/* Init recycle list lock */
834 	mutex_init(brp->recycle_lock, NULL, MUTEX_DRIVER,
835 	    DDI_INTR_PRI(ngep->intr_pri));
836 }
837 
838 int
839 nge_init_rings(nge_t *ngep)
840 {
841 	uint32_t err;
842 
843 	err = nge_init_send_ring(ngep);
844 	if (err != DDI_SUCCESS) {
845 		return (err);
846 	}
847 	nge_init_recv_ring(ngep);
848 
849 	err = nge_init_buff_ring(ngep);
850 	if (err != DDI_SUCCESS) {
851 		nge_fini_send_ring(ngep);
852 		return (DDI_FAILURE);
853 	}
854 
855 	return (err);
856 }
857 
858 static int
859 nge_reinit_ring(nge_t *ngep)
860 {
861 	int err;
862 
863 	nge_reinit_recv_ring(ngep);
864 	nge_reinit_send_ring(ngep);
865 	err = nge_reinit_buff_ring(ngep);
866 	return (err);
867 }
868 
869 
870 void
871 nge_fini_rings(nge_t *ngep)
872 {
873 	/*
874 	 * For receive ring, nothing need to be finished.
875 	 * So only finish buffer ring and send ring here.
876 	 */
877 	nge_fini_buff_ring(ngep);
878 	nge_fini_send_ring(ngep);
879 }
880 
881 /*
882  * Loopback ioctl code
883  */
884 
885 static lb_property_t loopmodes[] = {
886 	{ normal,	"normal",	NGE_LOOP_NONE		},
887 	{ external,	"100Mbps",	NGE_LOOP_EXTERNAL_100	},
888 	{ external,	"10Mbps",	NGE_LOOP_EXTERNAL_10	},
889 	{ internal,	"PHY",		NGE_LOOP_INTERNAL_PHY	},
890 };
891 
892 enum ioc_reply
893 nge_loop_ioctl(nge_t *ngep, mblk_t *mp, struct iocblk *iocp)
894 {
895 	int cmd;
896 	uint32_t *lbmp;
897 	lb_info_sz_t *lbsp;
898 	lb_property_t *lbpp;
899 
900 	/*
901 	 * Validate format of ioctl
902 	 */
903 	if (mp->b_cont == NULL)
904 		return (IOC_INVAL);
905 
906 	cmd = iocp->ioc_cmd;
907 
908 	switch (cmd) {
909 	default:
910 		return (IOC_INVAL);
911 
912 	case LB_GET_INFO_SIZE:
913 		if (iocp->ioc_count != sizeof (lb_info_sz_t))
914 			return (IOC_INVAL);
915 		lbsp = (lb_info_sz_t *)mp->b_cont->b_rptr;
916 		*lbsp = sizeof (loopmodes);
917 		return (IOC_REPLY);
918 
919 	case LB_GET_INFO:
920 		if (iocp->ioc_count != sizeof (loopmodes))
921 			return (IOC_INVAL);
922 		lbpp = (lb_property_t *)mp->b_cont->b_rptr;
923 		bcopy(loopmodes, lbpp, sizeof (loopmodes));
924 		return (IOC_REPLY);
925 
926 	case LB_GET_MODE:
927 		if (iocp->ioc_count != sizeof (uint32_t))
928 			return (IOC_INVAL);
929 		lbmp = (uint32_t *)mp->b_cont->b_rptr;
930 		*lbmp = ngep->param_loop_mode;
931 		return (IOC_REPLY);
932 
933 	case LB_SET_MODE:
934 		if (iocp->ioc_count != sizeof (uint32_t))
935 			return (IOC_INVAL);
936 		lbmp = (uint32_t *)mp->b_cont->b_rptr;
937 		return (nge_set_loop_mode(ngep, *lbmp));
938 	}
939 }
940 
941 #undef	NGE_DBG
942 #define	NGE_DBG	NGE_DBG_NEMO
943 
944 
945 static void
946 nge_check_desc_prop(nge_t *ngep)
947 {
948 	if (ngep->desc_mode != DESC_HOT && ngep->desc_mode != DESC_OFFLOAD)
949 		ngep->desc_mode = DESC_HOT;
950 
951 	if (ngep->desc_mode == DESC_OFFLOAD)	{
952 
953 		ngep->desc_attr = nge_sum_desc;
954 
955 	}	else if (ngep->desc_mode == DESC_HOT)	{
956 
957 		ngep->desc_attr = nge_hot_desc;
958 	}
959 }
960 
961 /*
962  * nge_get_props -- get the parameters to tune the driver
963  */
964 static void
965 nge_get_props(nge_t *ngep)
966 {
967 	chip_info_t *infop;
968 	dev_info_t *devinfo;
969 	nge_dev_spec_param_t *dev_param_p;
970 
971 	devinfo = ngep->devinfo;
972 	infop = (chip_info_t *)&ngep->chipinfo;
973 	dev_param_p = &ngep->dev_spec_param;
974 
975 	infop->clsize = ddi_prop_get_int(DDI_DEV_T_ANY, devinfo,
976 	    DDI_PROP_DONTPASS, clsize_propname, 32);
977 
978 	infop->latency = ddi_prop_get_int(DDI_DEV_T_ANY, devinfo,
979 	    DDI_PROP_DONTPASS, latency_propname, 64);
980 	ngep->intr_moderation = ddi_prop_get_int(DDI_DEV_T_ANY, devinfo,
981 	    DDI_PROP_DONTPASS, intr_moderation, NGE_SET);
982 	ngep->rx_datahwm = ddi_prop_get_int(DDI_DEV_T_ANY, devinfo,
983 	    DDI_PROP_DONTPASS, rx_data_hw, 0x20);
984 	ngep->rx_prdlwm = ddi_prop_get_int(DDI_DEV_T_ANY, devinfo,
985 	    DDI_PROP_DONTPASS, rx_prd_lw, 0x4);
986 	ngep->rx_prdhwm = ddi_prop_get_int(DDI_DEV_T_ANY, devinfo,
987 	    DDI_PROP_DONTPASS, rx_prd_hw, 0xc);
988 
989 	ngep->sw_intr_intv = ddi_prop_get_int(DDI_DEV_T_ANY, devinfo,
990 	    DDI_PROP_DONTPASS, sw_intr_intv, SWTR_ITC);
991 	ngep->debug = ddi_prop_get_int(DDI_DEV_T_ANY, devinfo,
992 	    DDI_PROP_DONTPASS, debug_propname, NGE_DBG_CHIP);
993 	ngep->desc_mode = ddi_prop_get_int(DDI_DEV_T_ANY, devinfo,
994 	    DDI_PROP_DONTPASS, nge_desc_mode, dev_param_p->desc_type);
995 	ngep->lowmem_mode = ddi_prop_get_int(DDI_DEV_T_ANY, devinfo,
996 	    DDI_PROP_DONTPASS, low_memory_mode, 0);
997 
998 	if (dev_param_p->jumbo) {
999 		ngep->default_mtu = ddi_prop_get_int(DDI_DEV_T_ANY, devinfo,
1000 		    DDI_PROP_DONTPASS, default_mtu, ETHERMTU);
1001 	} else
1002 		ngep->default_mtu = ETHERMTU;
1003 	if (dev_param_p->tx_pause_frame)
1004 			ngep->param_link_tx_pause = B_TRUE;
1005 	else
1006 			ngep->param_link_tx_pause = B_FALSE;
1007 
1008 	if (dev_param_p->rx_pause_frame)
1009 			ngep->param_link_rx_pause = B_TRUE;
1010 	else
1011 			ngep->param_link_rx_pause = B_FALSE;
1012 
1013 	if (ngep->default_mtu > ETHERMTU &&
1014 	    ngep->default_mtu <= NGE_MTU_2500) {
1015 		ngep->buf_size = NGE_JB2500_BUFSZ;
1016 		ngep->tx_desc = NGE_SEND_JB2500_SLOTS_DESC;
1017 		ngep->rx_desc = NGE_RECV_JB2500_SLOTS_DESC;
1018 		ngep->rx_buf = NGE_RECV_JB2500_SLOTS_DESC * 2;
1019 		ngep->nge_split = NGE_SPLIT_256;
1020 	} else if (ngep->default_mtu > NGE_MTU_2500 &&
1021 	    ngep->default_mtu <= NGE_MTU_4500) {
1022 		ngep->buf_size = NGE_JB4500_BUFSZ;
1023 		ngep->tx_desc = NGE_SEND_JB4500_SLOTS_DESC;
1024 		ngep->rx_desc = NGE_RECV_JB4500_SLOTS_DESC;
1025 		ngep->rx_buf = NGE_RECV_JB4500_SLOTS_DESC * 2;
1026 		ngep->nge_split = NGE_SPLIT_256;
1027 	} else if (ngep->default_mtu > NGE_MTU_4500 &&
1028 	    ngep->default_mtu <= NGE_MAX_MTU) {
1029 		ngep->buf_size = NGE_JB9000_BUFSZ;
1030 		ngep->tx_desc = NGE_SEND_JB9000_SLOTS_DESC;
1031 		ngep->rx_desc = NGE_RECV_JB9000_SLOTS_DESC;
1032 		ngep->rx_buf = NGE_RECV_JB9000_SLOTS_DESC * 2;
1033 		ngep->nge_split = NGE_SPLIT_256;
1034 	} else if (ngep->default_mtu > NGE_MAX_MTU) {
1035 		ngep->default_mtu = NGE_MAX_MTU;
1036 		ngep->buf_size = NGE_JB9000_BUFSZ;
1037 		ngep->tx_desc = NGE_SEND_JB9000_SLOTS_DESC;
1038 		ngep->rx_desc = NGE_RECV_JB9000_SLOTS_DESC;
1039 		ngep->rx_buf = NGE_RECV_JB9000_SLOTS_DESC * 2;
1040 		ngep->nge_split = NGE_SPLIT_256;
1041 	} else if (ngep->lowmem_mode != 0) {
1042 		ngep->default_mtu = ETHERMTU;
1043 		ngep->buf_size = NGE_STD_BUFSZ;
1044 		ngep->tx_desc = NGE_SEND_LOWMEM_SLOTS_DESC;
1045 		ngep->rx_desc = NGE_RECV_LOWMEM_SLOTS_DESC;
1046 		ngep->rx_buf = NGE_RECV_LOWMEM_SLOTS_DESC * 2;
1047 		ngep->nge_split = NGE_SPLIT_32;
1048 	} else {
1049 		ngep->default_mtu = ETHERMTU;
1050 		ngep->buf_size = NGE_STD_BUFSZ;
1051 		ngep->tx_desc = dev_param_p->tx_desc_num;
1052 		ngep->rx_desc = dev_param_p->rx_desc_num;
1053 		ngep->rx_buf = dev_param_p->rx_desc_num * 2;
1054 		ngep->nge_split = dev_param_p->nge_split;
1055 	}
1056 
1057 	nge_check_desc_prop(ngep);
1058 }
1059 
1060 
1061 static int
1062 nge_reset_dev(nge_t *ngep)
1063 {
1064 	int err;
1065 	nge_mul_addr1 maddr1;
1066 	nge_sw_statistics_t *sw_stp;
1067 	sw_stp = &ngep->statistics.sw_statistics;
1068 	send_ring_t *srp = ngep->send;
1069 
1070 	ASSERT(mutex_owned(ngep->genlock));
1071 	mutex_enter(srp->tc_lock);
1072 	mutex_enter(srp->tx_lock);
1073 
1074 	nge_tx_recycle_all(ngep);
1075 	err = nge_reinit_ring(ngep);
1076 	if (err == DDI_FAILURE) {
1077 		mutex_exit(srp->tx_lock);
1078 		mutex_exit(srp->tc_lock);
1079 		return (err);
1080 	}
1081 	err = nge_chip_reset(ngep);
1082 	/*
1083 	 * Clear the Multicast mac address table
1084 	 */
1085 	nge_reg_put32(ngep, NGE_MUL_ADDR0, 0);
1086 	maddr1.addr_val = nge_reg_get32(ngep, NGE_MUL_ADDR1);
1087 	maddr1.addr_bits.addr = 0;
1088 	nge_reg_put32(ngep, NGE_MUL_ADDR1, maddr1.addr_val);
1089 
1090 	mutex_exit(srp->tx_lock);
1091 	mutex_exit(srp->tc_lock);
1092 	if (err == DDI_FAILURE)
1093 		return (err);
1094 	ngep->watchdog = 0;
1095 	ngep->resched_needed = B_FALSE;
1096 	ngep->promisc = B_FALSE;
1097 	ngep->param_loop_mode = NGE_LOOP_NONE;
1098 	ngep->factotum_flag = 0;
1099 	ngep->resched_needed = 0;
1100 	ngep->nge_mac_state = NGE_MAC_RESET;
1101 	ngep->max_sdu = ngep->default_mtu + ETHER_HEAD_LEN + ETHERFCSL;
1102 	ngep->max_sdu += VTAG_SIZE;
1103 	ngep->rx_def = 0x16;
1104 
1105 	/* Clear the software statistics */
1106 	sw_stp->recv_count = 0;
1107 	sw_stp->xmit_count = 0;
1108 	sw_stp->rbytes = 0;
1109 	sw_stp->obytes = 0;
1110 
1111 	return (DDI_SUCCESS);
1112 }
1113 
1114 static void
1115 nge_m_stop(void *arg)
1116 {
1117 	nge_t *ngep = arg;		/* private device info	*/
1118 	int err;
1119 
1120 	NGE_TRACE(("nge_m_stop($%p)", arg));
1121 
1122 	/*
1123 	 * Just stop processing, then record new MAC state
1124 	 */
1125 	mutex_enter(ngep->genlock);
1126 	/* If suspended, the adapter is already stopped, just return. */
1127 	if (ngep->suspended) {
1128 		ASSERT(ngep->nge_mac_state == NGE_MAC_STOPPED);
1129 		mutex_exit(ngep->genlock);
1130 		return;
1131 	}
1132 	rw_enter(ngep->rwlock, RW_WRITER);
1133 
1134 	err = nge_chip_stop(ngep, B_FALSE);
1135 	if (err == DDI_FAILURE)
1136 		err = nge_chip_reset(ngep);
1137 	if (err == DDI_FAILURE)
1138 		nge_problem(ngep, "nge_m_stop: stop chip failed");
1139 	ngep->nge_mac_state = NGE_MAC_STOPPED;
1140 
1141 	/* Recycle all the TX BD */
1142 	nge_tx_recycle_all(ngep);
1143 	nge_fini_rings(ngep);
1144 	nge_free_bufs(ngep);
1145 
1146 	NGE_DEBUG(("nge_m_stop($%p) done", arg));
1147 
1148 	rw_exit(ngep->rwlock);
1149 	mutex_exit(ngep->genlock);
1150 }
1151 
1152 static int
1153 nge_m_start(void *arg)
1154 {
1155 	int err;
1156 	nge_t *ngep = arg;
1157 
1158 	NGE_TRACE(("nge_m_start($%p)", arg));
1159 
1160 	/*
1161 	 * Start processing and record new MAC state
1162 	 */
1163 	mutex_enter(ngep->genlock);
1164 	/*
1165 	 * If suspended, don't start, as the resume processing
1166 	 * will recall this function with the suspended flag off.
1167 	 */
1168 	if (ngep->suspended) {
1169 		mutex_exit(ngep->genlock);
1170 		return (EIO);
1171 	}
1172 	rw_enter(ngep->rwlock, RW_WRITER);
1173 	err = nge_alloc_bufs(ngep);
1174 	if (err != DDI_SUCCESS) {
1175 		nge_problem(ngep, "nge_m_start: DMA buffer allocation failed");
1176 		goto finish;
1177 	}
1178 	err = nge_init_rings(ngep);
1179 	if (err != DDI_SUCCESS) {
1180 		nge_free_bufs(ngep);
1181 		nge_problem(ngep, "nge_init_rings() failed,err=%x", err);
1182 		goto finish;
1183 	}
1184 	err = nge_restart(ngep);
1185 
1186 	NGE_DEBUG(("nge_m_start($%p) done", arg));
1187 finish:
1188 	rw_exit(ngep->rwlock);
1189 	mutex_exit(ngep->genlock);
1190 
1191 	return (err == DDI_SUCCESS ? 0 : EIO);
1192 }
1193 
1194 static int
1195 nge_m_unicst(void *arg, const uint8_t *macaddr)
1196 {
1197 	nge_t *ngep = arg;
1198 
1199 	NGE_TRACE(("nge_m_unicst($%p)", arg));
1200 	/*
1201 	 * Remember the new current address in the driver state
1202 	 * Sync the chip's idea of the address too ...
1203 	 */
1204 	mutex_enter(ngep->genlock);
1205 
1206 	ethaddr_copy(macaddr, ngep->cur_uni_addr.addr);
1207 	ngep->cur_uni_addr.set = 1;
1208 
1209 	/*
1210 	 * If we are suspended, we want to quit now, and not update
1211 	 * the chip.  Doing so might put it in a bad state, but the
1212 	 * resume will get the unicast address installed.
1213 	 */
1214 	if (ngep->suspended) {
1215 		mutex_exit(ngep->genlock);
1216 		return (DDI_SUCCESS);
1217 	}
1218 	nge_chip_sync(ngep);
1219 
1220 	NGE_DEBUG(("nge_m_unicst($%p) done", arg));
1221 	mutex_exit(ngep->genlock);
1222 
1223 	return (0);
1224 }
1225 
1226 static int
1227 nge_m_promisc(void *arg, boolean_t on)
1228 {
1229 	nge_t *ngep = arg;
1230 
1231 	NGE_TRACE(("nge_m_promisc($%p)", arg));
1232 
1233 	/*
1234 	 * Store specified mode and pass to chip layer to update h/w
1235 	 */
1236 	mutex_enter(ngep->genlock);
1237 	/*
1238 	 * If suspended, there is no need to do anything, even
1239 	 * recording the promiscuious mode is not neccessary, as
1240 	 * it won't be properly set on resume.  Just return failing.
1241 	 */
1242 	if (ngep->suspended) {
1243 		mutex_exit(ngep->genlock);
1244 		return (DDI_FAILURE);
1245 	}
1246 	if (ngep->promisc == on) {
1247 		mutex_exit(ngep->genlock);
1248 		NGE_DEBUG(("nge_m_promisc($%p) done", arg));
1249 		return (0);
1250 	}
1251 	ngep->promisc = on;
1252 	ngep->record_promisc = ngep->promisc;
1253 	nge_chip_sync(ngep);
1254 	NGE_DEBUG(("nge_m_promisc($%p) done", arg));
1255 	mutex_exit(ngep->genlock);
1256 
1257 	return (0);
1258 }
1259 
1260 static void nge_mulparam(nge_t *ngep)
1261 {
1262 	uint8_t number;
1263 	ether_addr_t pand;
1264 	ether_addr_t por;
1265 	mul_item *plist;
1266 
1267 	for (number = 0; number < ETHERADDRL; number++) {
1268 		pand[number] = 0x00;
1269 		por[number] = 0x00;
1270 	}
1271 	for (plist = ngep->pcur_mulist; plist != NULL; plist = plist->next) {
1272 		for (number = 0; number < ETHERADDRL; number++) {
1273 			pand[number] &= plist->mul_addr[number];
1274 			por[number] |= plist->mul_addr[number];
1275 		}
1276 	}
1277 	for (number = 0; number < ETHERADDRL; number++) {
1278 		ngep->cur_mul_addr.addr[number]
1279 		    = pand[number] & por[number];
1280 		ngep->cur_mul_mask.addr[number]
1281 		    = pand [number] | (~por[number]);
1282 	}
1283 }
1284 static int
1285 nge_m_multicst(void *arg, boolean_t add, const uint8_t *mca)
1286 {
1287 	boolean_t update;
1288 	boolean_t b_eq;
1289 	nge_t *ngep = arg;
1290 	mul_item *plist;
1291 	mul_item *plist_prev;
1292 	mul_item *pitem;
1293 
1294 	NGE_TRACE(("nge_m_multicst($%p, %s, %s)", arg,
1295 	    (add) ? "add" : "remove", ether_sprintf((void *)mca)));
1296 
1297 	update = B_FALSE;
1298 	plist = plist_prev = NULL;
1299 	mutex_enter(ngep->genlock);
1300 	if (add) {
1301 		if (ngep->pcur_mulist != NULL) {
1302 			for (plist = ngep->pcur_mulist; plist != NULL;
1303 			    plist = plist->next) {
1304 				b_eq = ether_eq(plist->mul_addr, mca);
1305 				if (b_eq) {
1306 					plist->ref_cnt++;
1307 					break;
1308 				}
1309 				plist_prev = plist;
1310 			}
1311 		}
1312 
1313 		if (plist == NULL) {
1314 			pitem = kmem_zalloc(sizeof (mul_item), KM_SLEEP);
1315 			ether_copy(mca, pitem->mul_addr);
1316 			pitem ->ref_cnt++;
1317 			pitem ->next = NULL;
1318 			if (plist_prev == NULL)
1319 				ngep->pcur_mulist = pitem;
1320 			else
1321 				plist_prev->next = pitem;
1322 			update = B_TRUE;
1323 		}
1324 	} else {
1325 		if (ngep->pcur_mulist != NULL) {
1326 			for (plist = ngep->pcur_mulist; plist != NULL;
1327 			    plist = plist->next) {
1328 				b_eq = ether_eq(plist->mul_addr, mca);
1329 				if (b_eq) {
1330 					update = B_TRUE;
1331 					break;
1332 				}
1333 				plist_prev = plist;
1334 			}
1335 
1336 			if (update) {
1337 				if ((plist_prev == NULL) &&
1338 				    (plist->next == NULL))
1339 					ngep->pcur_mulist = NULL;
1340 				else if ((plist_prev == NULL) &&
1341 				    (plist->next != NULL))
1342 					ngep->pcur_mulist = plist->next;
1343 				else
1344 					plist_prev->next = plist->next;
1345 				kmem_free(plist, sizeof (mul_item));
1346 			}
1347 		}
1348 	}
1349 
1350 	if (update && !ngep->suspended) {
1351 		nge_mulparam(ngep);
1352 		nge_chip_sync(ngep);
1353 	}
1354 	NGE_DEBUG(("nge_m_multicst($%p) done", arg));
1355 	mutex_exit(ngep->genlock);
1356 
1357 	return (0);
1358 }
1359 
1360 static void
1361 nge_m_ioctl(void *arg, queue_t *wq, mblk_t *mp)
1362 {
1363 	int err;
1364 	int cmd;
1365 	nge_t *ngep = arg;
1366 	struct iocblk *iocp;
1367 	enum ioc_reply status;
1368 	boolean_t need_privilege;
1369 
1370 	/*
1371 	 * If suspended, we might actually be able to do some of
1372 	 * these ioctls, but it is harder to make sure they occur
1373 	 * without actually putting the hardware in an undesireable
1374 	 * state.  So just NAK it.
1375 	 */
1376 	mutex_enter(ngep->genlock);
1377 	if (ngep->suspended) {
1378 		miocnak(wq, mp, 0, EINVAL);
1379 		mutex_exit(ngep->genlock);
1380 		return;
1381 	}
1382 	mutex_exit(ngep->genlock);
1383 
1384 	/*
1385 	 * Validate the command before bothering with the mutex ...
1386 	 */
1387 	iocp = (struct iocblk *)mp->b_rptr;
1388 	iocp->ioc_error = 0;
1389 	need_privilege = B_TRUE;
1390 	cmd = iocp->ioc_cmd;
1391 
1392 	NGE_DEBUG(("nge_m_ioctl:  cmd 0x%x", cmd));
1393 	switch (cmd) {
1394 	default:
1395 		NGE_LDB(NGE_DBG_BADIOC,
1396 		    ("nge_m_ioctl: unknown cmd 0x%x", cmd));
1397 
1398 		miocnak(wq, mp, 0, EINVAL);
1399 		return;
1400 
1401 	case NGE_MII_READ:
1402 	case NGE_MII_WRITE:
1403 	case NGE_SEE_READ:
1404 	case NGE_SEE_WRITE:
1405 	case NGE_DIAG:
1406 	case NGE_PEEK:
1407 	case NGE_POKE:
1408 	case NGE_PHY_RESET:
1409 	case NGE_SOFT_RESET:
1410 	case NGE_HARD_RESET:
1411 		break;
1412 
1413 	case LB_GET_INFO_SIZE:
1414 	case LB_GET_INFO:
1415 	case LB_GET_MODE:
1416 		need_privilege = B_FALSE;
1417 		break;
1418 	case LB_SET_MODE:
1419 		break;
1420 	}
1421 
1422 	if (need_privilege) {
1423 		/*
1424 		 * Check for specific net_config privilege.
1425 		 */
1426 		err = secpolicy_net_config(iocp->ioc_cr, B_FALSE);
1427 		if (err != 0) {
1428 			NGE_DEBUG(("nge_m_ioctl: rejected cmd 0x%x, err %d",
1429 			    cmd, err));
1430 			miocnak(wq, mp, 0, err);
1431 			return;
1432 		}
1433 	}
1434 
1435 	mutex_enter(ngep->genlock);
1436 
1437 	switch (cmd) {
1438 	default:
1439 		_NOTE(NOTREACHED)
1440 		status = IOC_INVAL;
1441 	break;
1442 
1443 	case NGE_MII_READ:
1444 	case NGE_MII_WRITE:
1445 	case NGE_SEE_READ:
1446 	case NGE_SEE_WRITE:
1447 	case NGE_DIAG:
1448 	case NGE_PEEK:
1449 	case NGE_POKE:
1450 	case NGE_PHY_RESET:
1451 	case NGE_SOFT_RESET:
1452 	case NGE_HARD_RESET:
1453 		status = nge_chip_ioctl(ngep, mp, iocp);
1454 	break;
1455 
1456 	case LB_GET_INFO_SIZE:
1457 	case LB_GET_INFO:
1458 	case LB_GET_MODE:
1459 	case LB_SET_MODE:
1460 		status = nge_loop_ioctl(ngep, mp, iocp);
1461 	break;
1462 
1463 	}
1464 
1465 	/*
1466 	 * Do we need to reprogram the PHY and/or the MAC?
1467 	 * Do it now, while we still have the mutex.
1468 	 *
1469 	 * Note: update the PHY first, 'cos it controls the
1470 	 * speed/duplex parameters that the MAC code uses.
1471 	 */
1472 
1473 	NGE_DEBUG(("nge_m_ioctl: cmd 0x%x status %d", cmd, status));
1474 
1475 	switch (status) {
1476 	case IOC_RESTART_REPLY:
1477 	case IOC_RESTART_ACK:
1478 		(*ngep->physops->phys_update)(ngep);
1479 		nge_chip_sync(ngep);
1480 		break;
1481 
1482 	default:
1483 	break;
1484 	}
1485 
1486 	mutex_exit(ngep->genlock);
1487 
1488 	/*
1489 	 * Finally, decide how to reply
1490 	 */
1491 	switch (status) {
1492 
1493 	default:
1494 	case IOC_INVAL:
1495 		miocnak(wq, mp, 0, iocp->ioc_error == 0 ?
1496 		    EINVAL : iocp->ioc_error);
1497 		break;
1498 
1499 	case IOC_DONE:
1500 		break;
1501 
1502 	case IOC_RESTART_ACK:
1503 	case IOC_ACK:
1504 		miocack(wq, mp, 0, 0);
1505 		break;
1506 
1507 	case IOC_RESTART_REPLY:
1508 	case IOC_REPLY:
1509 		mp->b_datap->db_type = iocp->ioc_error == 0 ?
1510 		    M_IOCACK : M_IOCNAK;
1511 		qreply(wq, mp);
1512 		break;
1513 	}
1514 }
1515 
1516 static boolean_t
1517 nge_param_locked(mac_prop_id_t pr_num)
1518 {
1519 	/*
1520 	 * All adv_* parameters are locked (read-only) while
1521 	 * the device is in any sort of loopback mode ...
1522 	 */
1523 	switch (pr_num) {
1524 		case MAC_PROP_ADV_1000FDX_CAP:
1525 		case MAC_PROP_EN_1000FDX_CAP:
1526 		case MAC_PROP_ADV_1000HDX_CAP:
1527 		case MAC_PROP_EN_1000HDX_CAP:
1528 		case MAC_PROP_ADV_100FDX_CAP:
1529 		case MAC_PROP_EN_100FDX_CAP:
1530 		case MAC_PROP_ADV_100HDX_CAP:
1531 		case MAC_PROP_EN_100HDX_CAP:
1532 		case MAC_PROP_ADV_10FDX_CAP:
1533 		case MAC_PROP_EN_10FDX_CAP:
1534 		case MAC_PROP_ADV_10HDX_CAP:
1535 		case MAC_PROP_EN_10HDX_CAP:
1536 		case MAC_PROP_AUTONEG:
1537 		case MAC_PROP_FLOWCTRL:
1538 			return (B_TRUE);
1539 	}
1540 	return (B_FALSE);
1541 }
1542 
1543 /*
1544  * callback functions for set/get of properties
1545  */
1546 static int
1547 nge_m_setprop(void *barg, const char *pr_name, mac_prop_id_t pr_num,
1548     uint_t pr_valsize, const void *pr_val)
1549 {
1550 	nge_t *ngep = barg;
1551 	int err = 0;
1552 	uint32_t cur_mtu, new_mtu;
1553 	link_flowctrl_t fl;
1554 
1555 	mutex_enter(ngep->genlock);
1556 	if (ngep->param_loop_mode != NGE_LOOP_NONE &&
1557 	    nge_param_locked(pr_num)) {
1558 		/*
1559 		 * All adv_* parameters are locked (read-only)
1560 		 * while the device is in any sort of loopback mode.
1561 		 */
1562 		mutex_exit(ngep->genlock);
1563 		return (EBUSY);
1564 	}
1565 	switch (pr_num) {
1566 		case MAC_PROP_EN_1000FDX_CAP:
1567 			ngep->param_en_1000fdx = *(uint8_t *)pr_val;
1568 			ngep->param_adv_1000fdx = *(uint8_t *)pr_val;
1569 			goto reprogram;
1570 		case MAC_PROP_EN_100FDX_CAP:
1571 			ngep->param_en_100fdx = *(uint8_t *)pr_val;
1572 			ngep->param_adv_100fdx = *(uint8_t *)pr_val;
1573 			goto reprogram;
1574 		case MAC_PROP_EN_100HDX_CAP:
1575 			ngep->param_en_100hdx = *(uint8_t *)pr_val;
1576 			ngep->param_adv_100hdx = *(uint8_t *)pr_val;
1577 			goto reprogram;
1578 		case MAC_PROP_EN_10FDX_CAP:
1579 			ngep->param_en_10fdx = *(uint8_t *)pr_val;
1580 			ngep->param_adv_10fdx = *(uint8_t *)pr_val;
1581 			goto reprogram;
1582 		case MAC_PROP_EN_10HDX_CAP:
1583 			ngep->param_en_10hdx = *(uint8_t *)pr_val;
1584 			ngep->param_adv_10hdx = *(uint8_t *)pr_val;
1585 reprogram:
1586 		(*ngep->physops->phys_update)(ngep);
1587 		nge_chip_sync(ngep);
1588 		break;
1589 
1590 		case MAC_PROP_ADV_1000FDX_CAP:
1591 		case MAC_PROP_ADV_1000HDX_CAP:
1592 		case MAC_PROP_ADV_100FDX_CAP:
1593 		case MAC_PROP_ADV_100HDX_CAP:
1594 		case MAC_PROP_ADV_10FDX_CAP:
1595 		case MAC_PROP_ADV_10HDX_CAP:
1596 		case MAC_PROP_STATUS:
1597 		case MAC_PROP_SPEED:
1598 		case MAC_PROP_DUPLEX:
1599 		case MAC_PROP_EN_1000HDX_CAP:
1600 			err = ENOTSUP; /* read-only prop. Can't set this */
1601 			break;
1602 		case MAC_PROP_AUTONEG:
1603 			ngep->param_adv_autoneg = *(uint8_t *)pr_val;
1604 			(*ngep->physops->phys_update)(ngep);
1605 			nge_chip_sync(ngep);
1606 			break;
1607 		case MAC_PROP_MTU:
1608 			cur_mtu = ngep->default_mtu;
1609 			bcopy(pr_val, &new_mtu, sizeof (new_mtu));
1610 			if (new_mtu == cur_mtu) {
1611 				err = 0;
1612 				break;
1613 			}
1614 			if (new_mtu < ETHERMTU ||
1615 			    new_mtu > NGE_MAX_MTU) {
1616 				err = EINVAL;
1617 				break;
1618 			}
1619 			if ((new_mtu > ETHERMTU) &&
1620 			    (!ngep->dev_spec_param.jumbo)) {
1621 				err = EINVAL;
1622 				break;
1623 			}
1624 			if (ngep->nge_mac_state == NGE_MAC_STARTED) {
1625 				err = EBUSY;
1626 				break;
1627 			}
1628 
1629 			ngep->default_mtu = new_mtu;
1630 			if (ngep->default_mtu > ETHERMTU &&
1631 			    ngep->default_mtu <= NGE_MTU_2500) {
1632 				ngep->buf_size = NGE_JB2500_BUFSZ;
1633 				ngep->tx_desc = NGE_SEND_JB2500_SLOTS_DESC;
1634 				ngep->rx_desc = NGE_RECV_JB2500_SLOTS_DESC;
1635 				ngep->rx_buf = NGE_RECV_JB2500_SLOTS_DESC * 2;
1636 				ngep->nge_split = NGE_SPLIT_256;
1637 			} else if (ngep->default_mtu > NGE_MTU_2500 &&
1638 			    ngep->default_mtu <= NGE_MTU_4500) {
1639 				ngep->buf_size = NGE_JB4500_BUFSZ;
1640 				ngep->tx_desc = NGE_SEND_JB4500_SLOTS_DESC;
1641 				ngep->rx_desc = NGE_RECV_JB4500_SLOTS_DESC;
1642 				ngep->rx_buf = NGE_RECV_JB4500_SLOTS_DESC * 2;
1643 				ngep->nge_split = NGE_SPLIT_256;
1644 			} else if (ngep->default_mtu > NGE_MTU_4500 &&
1645 			    ngep->default_mtu <= NGE_MAX_MTU) {
1646 				ngep->buf_size = NGE_JB9000_BUFSZ;
1647 				ngep->tx_desc = NGE_SEND_JB9000_SLOTS_DESC;
1648 				ngep->rx_desc = NGE_RECV_JB9000_SLOTS_DESC;
1649 				ngep->rx_buf = NGE_RECV_JB9000_SLOTS_DESC * 2;
1650 				ngep->nge_split = NGE_SPLIT_256;
1651 			} else if (ngep->default_mtu > NGE_MAX_MTU) {
1652 				ngep->default_mtu = NGE_MAX_MTU;
1653 				ngep->buf_size = NGE_JB9000_BUFSZ;
1654 				ngep->tx_desc = NGE_SEND_JB9000_SLOTS_DESC;
1655 				ngep->rx_desc = NGE_RECV_JB9000_SLOTS_DESC;
1656 				ngep->rx_buf = NGE_RECV_JB9000_SLOTS_DESC * 2;
1657 				ngep->nge_split = NGE_SPLIT_256;
1658 			} else if (ngep->lowmem_mode != 0) {
1659 				ngep->default_mtu = ETHERMTU;
1660 				ngep->buf_size = NGE_STD_BUFSZ;
1661 				ngep->tx_desc = NGE_SEND_LOWMEM_SLOTS_DESC;
1662 				ngep->rx_desc = NGE_RECV_LOWMEM_SLOTS_DESC;
1663 				ngep->rx_buf = NGE_RECV_LOWMEM_SLOTS_DESC * 2;
1664 				ngep->nge_split = NGE_SPLIT_32;
1665 			} else {
1666 				ngep->default_mtu = ETHERMTU;
1667 				ngep->buf_size = NGE_STD_BUFSZ;
1668 				ngep->tx_desc =
1669 				    ngep->dev_spec_param.tx_desc_num;
1670 				ngep->rx_desc =
1671 				    ngep->dev_spec_param.rx_desc_num;
1672 				ngep->rx_buf =
1673 				    ngep->dev_spec_param.rx_desc_num * 2;
1674 				ngep->nge_split =
1675 				    ngep->dev_spec_param.nge_split;
1676 			}
1677 
1678 			err = mac_maxsdu_update(ngep->mh, ngep->default_mtu);
1679 
1680 			break;
1681 		case MAC_PROP_FLOWCTRL:
1682 			bcopy(pr_val, &fl, sizeof (fl));
1683 			switch (fl) {
1684 			default:
1685 				err = ENOTSUP;
1686 				break;
1687 			case LINK_FLOWCTRL_NONE:
1688 				ngep->param_adv_pause = 0;
1689 				ngep->param_adv_asym_pause = 0;
1690 
1691 				ngep->param_link_rx_pause = B_FALSE;
1692 				ngep->param_link_tx_pause = B_FALSE;
1693 				break;
1694 			case LINK_FLOWCTRL_RX:
1695 				if (!((ngep->param_lp_pause == 0) &&
1696 				    (ngep->param_lp_asym_pause == 1))) {
1697 					err = EINVAL;
1698 					break;
1699 				}
1700 				ngep->param_adv_pause = 1;
1701 				ngep->param_adv_asym_pause = 1;
1702 
1703 				ngep->param_link_rx_pause = B_TRUE;
1704 				ngep->param_link_tx_pause = B_FALSE;
1705 				break;
1706 			case LINK_FLOWCTRL_TX:
1707 				if (!((ngep->param_lp_pause == 1) &&
1708 				    (ngep->param_lp_asym_pause == 1))) {
1709 					err = EINVAL;
1710 					break;
1711 				}
1712 				ngep->param_adv_pause = 0;
1713 				ngep->param_adv_asym_pause = 1;
1714 
1715 				ngep->param_link_rx_pause = B_FALSE;
1716 				ngep->param_link_tx_pause = B_TRUE;
1717 				break;
1718 			case LINK_FLOWCTRL_BI:
1719 				if (ngep->param_lp_pause != 1) {
1720 					err = EINVAL;
1721 					break;
1722 				}
1723 				ngep->param_adv_pause = 1;
1724 
1725 				ngep->param_link_rx_pause = B_TRUE;
1726 				ngep->param_link_tx_pause = B_TRUE;
1727 				break;
1728 			}
1729 
1730 			if (err == 0) {
1731 				(*ngep->physops->phys_update)(ngep);
1732 				nge_chip_sync(ngep);
1733 			}
1734 
1735 			break;
1736 		case MAC_PROP_PRIVATE:
1737 			err = nge_set_priv_prop(ngep, pr_name, pr_valsize,
1738 			    pr_val);
1739 			if (err == 0) {
1740 				(*ngep->physops->phys_update)(ngep);
1741 				nge_chip_sync(ngep);
1742 			}
1743 			break;
1744 		default:
1745 			err = ENOTSUP;
1746 	}
1747 	mutex_exit(ngep->genlock);
1748 	return (err);
1749 }
1750 
1751 static int
1752 nge_m_getprop(void *barg, const char *pr_name, mac_prop_id_t pr_num,
1753     uint_t pr_flags, uint_t pr_valsize, void *pr_val, uint_t *perm)
1754 {
1755 	nge_t *ngep = barg;
1756 	int err = 0;
1757 	link_flowctrl_t fl;
1758 	uint64_t speed;
1759 	boolean_t is_default = (pr_flags & MAC_PROP_DEFAULT);
1760 
1761 	if (pr_valsize == 0)
1762 		return (EINVAL);
1763 
1764 	*perm = MAC_PROP_PERM_RW;
1765 
1766 	bzero(pr_val, pr_valsize);
1767 
1768 	switch (pr_num) {
1769 		case MAC_PROP_DUPLEX:
1770 			*perm = MAC_PROP_PERM_READ;
1771 			if (pr_valsize >= sizeof (link_duplex_t)) {
1772 				bcopy(&ngep->param_link_duplex, pr_val,
1773 				    sizeof (link_duplex_t));
1774 			} else
1775 				err = EINVAL;
1776 			break;
1777 		case MAC_PROP_SPEED:
1778 			*perm = MAC_PROP_PERM_READ;
1779 			if (pr_valsize >= sizeof (uint64_t)) {
1780 				speed = ngep->param_link_speed * 1000000ull;
1781 				bcopy(&speed, pr_val, sizeof (speed));
1782 			} else
1783 				err = EINVAL;
1784 			break;
1785 		case MAC_PROP_AUTONEG:
1786 			if (is_default) {
1787 				*(uint8_t *)pr_val = 1;
1788 			} else {
1789 				*(uint8_t *)pr_val = ngep->param_adv_autoneg;
1790 			}
1791 			break;
1792 		case MAC_PROP_FLOWCTRL:
1793 			if (pr_valsize >= sizeof (link_flowctrl_t)) {
1794 				if (pr_flags & MAC_PROP_DEFAULT) {
1795 					fl = LINK_FLOWCTRL_BI;
1796 					bcopy(&fl, pr_val, sizeof (fl));
1797 					break;
1798 				}
1799 				if (ngep->param_link_rx_pause &&
1800 				    !ngep->param_link_tx_pause)
1801 					fl = LINK_FLOWCTRL_RX;
1802 
1803 				if (!ngep->param_link_rx_pause &&
1804 				    !ngep->param_link_tx_pause)
1805 					fl = LINK_FLOWCTRL_NONE;
1806 
1807 				if (!ngep->param_link_rx_pause &&
1808 				    ngep->param_link_tx_pause)
1809 					fl = LINK_FLOWCTRL_TX;
1810 
1811 				if (ngep->param_link_rx_pause &&
1812 				    ngep->param_link_tx_pause)
1813 					fl = LINK_FLOWCTRL_BI;
1814 				bcopy(&fl, pr_val, sizeof (fl));
1815 			} else
1816 				err = EINVAL;
1817 			break;
1818 		case MAC_PROP_ADV_1000FDX_CAP:
1819 			*perm = MAC_PROP_PERM_READ;
1820 			if (is_default) {
1821 				*(uint8_t *)pr_val = 1;
1822 			} else {
1823 				*(uint8_t *)pr_val = ngep->param_adv_1000fdx;
1824 			}
1825 			break;
1826 		case MAC_PROP_EN_1000FDX_CAP:
1827 			if (is_default) {
1828 				*(uint8_t *)pr_val = 1;
1829 			} else {
1830 				*(uint8_t *)pr_val = ngep->param_en_1000fdx;
1831 			}
1832 			break;
1833 		case MAC_PROP_ADV_1000HDX_CAP:
1834 			*perm = MAC_PROP_PERM_READ;
1835 			if (is_default) {
1836 				*(uint8_t *)pr_val = 0;
1837 			} else {
1838 				*(uint8_t *)pr_val = ngep->param_adv_1000hdx;
1839 			}
1840 			break;
1841 		case MAC_PROP_EN_1000HDX_CAP:
1842 			*perm = MAC_PROP_PERM_READ;
1843 			if (is_default) {
1844 				*(uint8_t *)pr_val = 0;
1845 			} else {
1846 				*(uint8_t *)pr_val = ngep->param_en_1000hdx;
1847 			}
1848 			break;
1849 		case MAC_PROP_ADV_100FDX_CAP:
1850 			*perm = MAC_PROP_PERM_READ;
1851 			if (is_default) {
1852 				*(uint8_t *)pr_val = 1;
1853 			} else {
1854 				*(uint8_t *)pr_val = ngep->param_adv_100fdx;
1855 			}
1856 			break;
1857 		case MAC_PROP_EN_100FDX_CAP:
1858 			if (is_default) {
1859 				*(uint8_t *)pr_val = 1;
1860 			} else {
1861 				*(uint8_t *)pr_val = ngep->param_en_100fdx;
1862 			}
1863 			break;
1864 		case MAC_PROP_ADV_100HDX_CAP:
1865 			*perm = MAC_PROP_PERM_READ;
1866 			if (is_default) {
1867 				*(uint8_t *)pr_val = 1;
1868 			} else {
1869 				*(uint8_t *)pr_val = ngep->param_adv_100hdx;
1870 			}
1871 			break;
1872 		case MAC_PROP_EN_100HDX_CAP:
1873 			if (is_default) {
1874 				*(uint8_t *)pr_val = 1;
1875 			} else {
1876 				*(uint8_t *)pr_val = ngep->param_en_100hdx;
1877 			}
1878 			break;
1879 		case MAC_PROP_ADV_10FDX_CAP:
1880 			*perm = MAC_PROP_PERM_READ;
1881 			if (is_default) {
1882 				*(uint8_t *)pr_val = 1;
1883 			} else {
1884 				*(uint8_t *)pr_val = ngep->param_adv_10fdx;
1885 			}
1886 			break;
1887 		case MAC_PROP_EN_10FDX_CAP:
1888 			if (is_default) {
1889 				*(uint8_t *)pr_val = 1;
1890 			} else {
1891 				*(uint8_t *)pr_val = ngep->param_en_10fdx;
1892 			}
1893 			break;
1894 		case MAC_PROP_ADV_10HDX_CAP:
1895 			*perm = MAC_PROP_PERM_READ;
1896 			if (is_default) {
1897 				*(uint8_t *)pr_val = 1;
1898 			} else {
1899 				*(uint8_t *)pr_val = ngep->param_adv_10hdx;
1900 			}
1901 			break;
1902 		case MAC_PROP_EN_10HDX_CAP:
1903 			if (is_default) {
1904 				*(uint8_t *)pr_val = 1;
1905 			} else {
1906 				*(uint8_t *)pr_val = ngep->param_en_10hdx;
1907 			}
1908 			break;
1909 		case MAC_PROP_ADV_100T4_CAP:
1910 		case MAC_PROP_EN_100T4_CAP:
1911 			*perm = MAC_PROP_PERM_READ;
1912 			*(uint8_t *)pr_val = 0;
1913 			break;
1914 		case MAC_PROP_PRIVATE:
1915 			err = nge_get_priv_prop(ngep, pr_name, pr_flags,
1916 			    pr_valsize, pr_val);
1917 			break;
1918 		case MAC_PROP_MTU: {
1919 			mac_propval_range_t range;
1920 
1921 			if (!(pr_flags & MAC_PROP_POSSIBLE))
1922 				return (ENOTSUP);
1923 			if (pr_valsize < sizeof (mac_propval_range_t))
1924 				return (EINVAL);
1925 			range.mpr_count = 1;
1926 			range.mpr_type = MAC_PROPVAL_UINT32;
1927 			range.range_uint32[0].mpur_min =
1928 			    range.range_uint32[0].mpur_max = ETHERMTU;
1929 			if (ngep->dev_spec_param.jumbo)
1930 				range.range_uint32[0].mpur_max = NGE_MAX_MTU;
1931 			bcopy(&range, pr_val, sizeof (range));
1932 			break;
1933 		}
1934 		default:
1935 			err = ENOTSUP;
1936 	}
1937 	return (err);
1938 }
1939 
1940 /* ARGSUSED */
1941 static int
1942 nge_set_priv_prop(nge_t *ngep, const char *pr_name, uint_t pr_valsize,
1943     const void *pr_val)
1944 {
1945 	int err = 0;
1946 	long result;
1947 
1948 	if (strcmp(pr_name, "_tx_bcopy_threshold") == 0) {
1949 		if (pr_val == NULL) {
1950 			err = EINVAL;
1951 			return (err);
1952 		}
1953 		(void) ddi_strtol(pr_val, (char **)NULL, 0, &result);
1954 		if (result < 0 || result > NGE_MAX_SDU) {
1955 			err = EINVAL;
1956 		} else {
1957 			ngep->param_txbcopy_threshold = (uint32_t)result;
1958 			goto reprogram;
1959 		}
1960 		return (err);
1961 	}
1962 	if (strcmp(pr_name, "_rx_bcopy_threshold") == 0) {
1963 		if (pr_val == NULL) {
1964 			err = EINVAL;
1965 			return (err);
1966 		}
1967 		(void) ddi_strtol(pr_val, (char **)NULL, 0, &result);
1968 		if (result < 0 || result > NGE_MAX_SDU) {
1969 			err = EINVAL;
1970 		} else {
1971 			ngep->param_rxbcopy_threshold = (uint32_t)result;
1972 			goto reprogram;
1973 		}
1974 		return (err);
1975 	}
1976 	if (strcmp(pr_name, "_recv_max_packet") == 0) {
1977 		if (pr_val == NULL) {
1978 			err = EINVAL;
1979 			return (err);
1980 		}
1981 		(void) ddi_strtol(pr_val, (char **)NULL, 0, &result);
1982 		if (result < 0 || result > NGE_RECV_SLOTS_DESC_1024) {
1983 			err = EINVAL;
1984 		} else {
1985 			ngep->param_recv_max_packet = (uint32_t)result;
1986 			goto reprogram;
1987 		}
1988 		return (err);
1989 	}
1990 	if (strcmp(pr_name, "_poll_quiet_time") == 0) {
1991 		if (pr_val == NULL) {
1992 			err = EINVAL;
1993 			return (err);
1994 		}
1995 		(void) ddi_strtol(pr_val, (char **)NULL, 0, &result);
1996 		if (result < 0 || result > 10000) {
1997 			err = EINVAL;
1998 		} else {
1999 			ngep->param_poll_quiet_time = (uint32_t)result;
2000 			goto reprogram;
2001 		}
2002 		return (err);
2003 	}
2004 	if (strcmp(pr_name, "_poll_busy_time") == 0) {
2005 		if (pr_val == NULL) {
2006 			err = EINVAL;
2007 			return (err);
2008 		}
2009 		(void) ddi_strtol(pr_val, (char **)NULL, 0, &result);
2010 		if (result < 0 || result > 10000) {
2011 			err = EINVAL;
2012 		} else {
2013 			ngep->param_poll_busy_time = (uint32_t)result;
2014 			goto reprogram;
2015 		}
2016 		return (err);
2017 	}
2018 	if (strcmp(pr_name, "_rx_intr_hwater") == 0) {
2019 		if (pr_val == NULL) {
2020 			err = EINVAL;
2021 			return (err);
2022 		}
2023 		(void) ddi_strtol(pr_val, (char **)NULL, 0, &result);
2024 		if (result < 0 || result > NGE_RECV_SLOTS_DESC_1024) {
2025 			err = EINVAL;
2026 		} else {
2027 			ngep->param_rx_intr_hwater = (uint32_t)result;
2028 			goto reprogram;
2029 		}
2030 		return (err);
2031 	}
2032 	if (strcmp(pr_name, "_rx_intr_lwater") == 0) {
2033 		if (pr_val == NULL) {
2034 			err = EINVAL;
2035 			return (err);
2036 		}
2037 		(void) ddi_strtol(pr_val, (char **)NULL, 0, &result);
2038 		if (result < 0 || result > NGE_RECV_SLOTS_DESC_1024) {
2039 			err = EINVAL;
2040 		} else {
2041 			ngep->param_rx_intr_lwater = (uint32_t)result;
2042 			goto reprogram;
2043 		}
2044 		return (err);
2045 	}
2046 	err = ENOTSUP;
2047 	return (err);
2048 
2049 reprogram:
2050 	if (err == 0) {
2051 		(*ngep->physops->phys_update)(ngep);
2052 		nge_chip_sync(ngep);
2053 	}
2054 
2055 	return (err);
2056 }
2057 
2058 static int
2059 nge_get_priv_prop(nge_t *ngep, const char *pr_name, uint_t pr_flags,
2060     uint_t pr_valsize, void *pr_val)
2061 {
2062 	int err = ENOTSUP;
2063 	boolean_t is_default = (pr_flags & MAC_PROP_DEFAULT);
2064 	int value;
2065 
2066 	if (strcmp(pr_name, "_tx_bcopy_threshold") == 0) {
2067 		value = (is_default ? NGE_TX_COPY_SIZE :
2068 		    ngep->param_txbcopy_threshold);
2069 		err = 0;
2070 		goto done;
2071 	}
2072 	if (strcmp(pr_name, "_rx_bcopy_threshold") == 0) {
2073 		value = (is_default ? NGE_RX_COPY_SIZE :
2074 		    ngep->param_rxbcopy_threshold);
2075 		err = 0;
2076 		goto done;
2077 	}
2078 	if (strcmp(pr_name, "_recv_max_packet") == 0) {
2079 		value = (is_default ? 128 : ngep->param_recv_max_packet);
2080 		err = 0;
2081 		goto done;
2082 	}
2083 	if (strcmp(pr_name, "_poll_quiet_time") == 0) {
2084 		value = (is_default ? NGE_POLL_QUIET_TIME :
2085 		    ngep->param_poll_quiet_time);
2086 		err = 0;
2087 		goto done;
2088 	}
2089 	if (strcmp(pr_name, "_poll_busy_time") == 0) {
2090 		value = (is_default ? NGE_POLL_BUSY_TIME :
2091 		    ngep->param_poll_busy_time);
2092 		err = 0;
2093 		goto done;
2094 	}
2095 	if (strcmp(pr_name, "_rx_intr_hwater") == 0) {
2096 		value = (is_default ? 1 : ngep->param_rx_intr_hwater);
2097 		err = 0;
2098 		goto done;
2099 	}
2100 	if (strcmp(pr_name, "_rx_intr_lwater") == 0) {
2101 		value = (is_default ? 8 : ngep->param_rx_intr_lwater);
2102 		err = 0;
2103 		goto done;
2104 	}
2105 
2106 done:
2107 	if (err == 0) {
2108 		(void) snprintf(pr_val, pr_valsize, "%d", value);
2109 	}
2110 	return (err);
2111 }
2112 
2113 /* ARGSUSED */
2114 static boolean_t
2115 nge_m_getcapab(void *arg, mac_capab_t cap, void *cap_data)
2116 {
2117 	nge_t	*ngep = arg;
2118 	nge_dev_spec_param_t *dev_param_p;
2119 
2120 	dev_param_p = &ngep->dev_spec_param;
2121 
2122 	switch (cap) {
2123 	case MAC_CAPAB_HCKSUM: {
2124 		uint32_t *hcksum_txflags = cap_data;
2125 
2126 		if (dev_param_p->tx_hw_checksum) {
2127 			*hcksum_txflags = dev_param_p->tx_hw_checksum;
2128 		} else
2129 			return (B_FALSE);
2130 		break;
2131 	}
2132 	default:
2133 		return (B_FALSE);
2134 	}
2135 	return (B_TRUE);
2136 }
2137 
2138 #undef	NGE_DBG
2139 #define	NGE_DBG	NGE_DBG_INIT	/* debug flag for this code	*/
2140 int
2141 nge_restart(nge_t *ngep)
2142 {
2143 	int err = 0;
2144 	err = nge_reset_dev(ngep);
2145 	/* write back the promisc setting */
2146 	ngep->promisc = ngep->record_promisc;
2147 	nge_chip_sync(ngep);
2148 	if (!err)
2149 		err = nge_chip_start(ngep);
2150 
2151 	if (err) {
2152 		ngep->nge_mac_state = NGE_MAC_STOPPED;
2153 		return (DDI_FAILURE);
2154 	} else {
2155 		ngep->nge_mac_state = NGE_MAC_STARTED;
2156 		return (DDI_SUCCESS);
2157 	}
2158 }
2159 
2160 void
2161 nge_wake_factotum(nge_t *ngep)
2162 {
2163 	mutex_enter(ngep->softlock);
2164 	if (ngep->factotum_flag == 0) {
2165 		ngep->factotum_flag = 1;
2166 		(void) ddi_intr_trigger_softint(ngep->factotum_hdl, NULL);
2167 	}
2168 	mutex_exit(ngep->softlock);
2169 }
2170 
2171 void
2172 nge_interrupt_optimize(nge_t *ngep)
2173 {
2174 	uint32_t tx_pkts;
2175 	tx_pkts = ngep->statistics.sw_statistics.xmit_count - ngep->tpkts_last;
2176 	ngep->tpkts_last = ngep->statistics.sw_statistics.xmit_count;
2177 	if ((tx_pkts > NGE_POLL_TUNE) &&
2178 	    (tx_pkts <= NGE_POLL_MAX))
2179 		ngep->tfint_threshold = (tx_pkts / NGE_POLL_ENTER);
2180 	else
2181 		ngep->tfint_threshold = NGE_TFINT_DEFAULT;
2182 }
2183 
2184 /*
2185  * High-level cyclic handler
2186  *
2187  * This routine schedules a (low-level) softint callback to the
2188  * factotum.
2189  */
2190 
2191 static void
2192 nge_chip_cyclic(void *arg)
2193 {
2194 	nge_t *ngep;
2195 
2196 	ngep = (nge_t *)arg;
2197 
2198 	switch (ngep->nge_chip_state) {
2199 	default:
2200 		return;
2201 
2202 	case NGE_CHIP_RUNNING:
2203 		nge_interrupt_optimize(ngep);
2204 		break;
2205 
2206 	case NGE_CHIP_FAULT:
2207 	case NGE_CHIP_ERROR:
2208 		break;
2209 	}
2210 
2211 	nge_wake_factotum(ngep);
2212 }
2213 
2214 /*
2215  * Get/Release semaphore of SMU
2216  * For SMU enabled chipset
2217  * When nge driver is attached, driver should acquire
2218  * semaphore before PHY init and accessing MAC registers.
2219  * When nge driver is unattached, driver should release
2220  * semaphore.
2221  */
2222 
2223 static int
2224 nge_smu_sema(nge_t *ngep, boolean_t acquire)
2225 {
2226 	nge_tx_en tx_en;
2227 	uint32_t tries;
2228 
2229 	if (acquire) {
2230 		for (tries = 0; tries < 5; tries++) {
2231 			tx_en.val = nge_reg_get32(ngep, NGE_TX_EN);
2232 			if (tx_en.bits.smu2mac == NGE_SMU_FREE)
2233 				break;
2234 			delay(drv_usectohz(1000000));
2235 		}
2236 		if (tx_en.bits.smu2mac != NGE_SMU_FREE)
2237 			return (DDI_FAILURE);
2238 		for (tries = 0; tries < 5; tries++) {
2239 			tx_en.val = nge_reg_get32(ngep, NGE_TX_EN);
2240 			tx_en.bits.mac2smu = NGE_SMU_GET;
2241 			nge_reg_put32(ngep, NGE_TX_EN, tx_en.val);
2242 			tx_en.val = nge_reg_get32(ngep, NGE_TX_EN);
2243 
2244 			if (tx_en.bits.mac2smu == NGE_SMU_GET &&
2245 			    tx_en.bits.smu2mac == NGE_SMU_FREE)
2246 				return (DDI_SUCCESS);
2247 			drv_usecwait(10);
2248 		}
2249 		return (DDI_FAILURE);
2250 	} else
2251 		nge_reg_put32(ngep, NGE_TX_EN, 0x0);
2252 
2253 	return (DDI_SUCCESS);
2254 
2255 }
2256 static void
2257 nge_unattach(nge_t *ngep)
2258 {
2259 	send_ring_t *srp;
2260 	buff_ring_t *brp;
2261 
2262 	srp = ngep->send;
2263 	brp = ngep->buff;
2264 	NGE_TRACE(("nge_unattach($%p)", (void *)ngep));
2265 
2266 	/*
2267 	 * Flag that no more activity may be initiated
2268 	 */
2269 	ngep->progress &= ~PROGRESS_READY;
2270 	ngep->nge_mac_state = NGE_MAC_UNATTACH;
2271 
2272 	/*
2273 	 * Quiesce the PHY and MAC (leave it reset but still powered).
2274 	 * Clean up and free all NGE data structures
2275 	 */
2276 	if (ngep->periodic_id != NULL) {
2277 		ddi_periodic_delete(ngep->periodic_id);
2278 		ngep->periodic_id = NULL;
2279 	}
2280 
2281 	if (ngep->progress & PROGRESS_KSTATS)
2282 		nge_fini_kstats(ngep);
2283 
2284 	if (ngep->progress & PROGRESS_HWINT) {
2285 		mutex_enter(ngep->genlock);
2286 		nge_restore_mac_addr(ngep);
2287 		(void) nge_chip_stop(ngep, B_FALSE);
2288 		if (ngep->chipinfo.device == DEVICE_ID_MCP55_373 ||
2289 		    ngep->chipinfo.device == DEVICE_ID_MCP55_372) {
2290 			(void) nge_smu_sema(ngep, B_FALSE);
2291 		}
2292 		mutex_exit(ngep->genlock);
2293 	}
2294 
2295 	if (ngep->progress & PROGRESS_SWINT)
2296 		nge_rem_intrs(ngep);
2297 
2298 	if (ngep->progress & PROGRESS_FACTOTUM)
2299 		(void) ddi_intr_remove_softint(ngep->factotum_hdl);
2300 
2301 	if (ngep->progress & PROGRESS_RESCHED)
2302 		(void) ddi_intr_remove_softint(ngep->resched_hdl);
2303 
2304 	if (ngep->progress & PROGRESS_INTR) {
2305 		mutex_destroy(srp->tx_lock);
2306 		mutex_destroy(srp->tc_lock);
2307 		mutex_destroy(&srp->dmah_lock);
2308 		mutex_destroy(brp->recycle_lock);
2309 
2310 		mutex_destroy(ngep->genlock);
2311 		mutex_destroy(ngep->softlock);
2312 		rw_destroy(ngep->rwlock);
2313 	}
2314 
2315 	if (ngep->progress & PROGRESS_REGS)
2316 		ddi_regs_map_free(&ngep->io_handle);
2317 
2318 	if (ngep->progress & PROGRESS_CFG)
2319 		pci_config_teardown(&ngep->cfg_handle);
2320 
2321 	ddi_remove_minor_node(ngep->devinfo, NULL);
2322 
2323 	kmem_free(ngep, sizeof (*ngep));
2324 }
2325 
2326 static int
2327 nge_resume(dev_info_t *devinfo)
2328 {
2329 	nge_t		*ngep;
2330 	chip_info_t	*infop;
2331 	int 		err;
2332 
2333 	ASSERT(devinfo != NULL);
2334 
2335 	ngep = ddi_get_driver_private(devinfo);
2336 	err = 0;
2337 
2338 	/*
2339 	 * If there are state inconsistancies, this is bad.  Returning
2340 	 * DDI_FAILURE here will eventually cause the machine to panic,
2341 	 * so it is best done here so that there is a possibility of
2342 	 * debugging the problem.
2343 	 */
2344 	if (ngep == NULL)
2345 		cmn_err(CE_PANIC,
2346 		    "nge: ngep returned from ddi_get_driver_private was NULL");
2347 	infop = (chip_info_t *)&ngep->chipinfo;
2348 
2349 	if (ngep->devinfo != devinfo)
2350 		cmn_err(CE_PANIC,
2351 		    "nge: passed devinfo not the same as saved devinfo");
2352 
2353 	mutex_enter(ngep->genlock);
2354 	rw_enter(ngep->rwlock, RW_WRITER);
2355 
2356 	/*
2357 	 * Fetch the config space.  Even though we have most of it cached,
2358 	 * some values *might* change across a suspend/resume.
2359 	 */
2360 	nge_chip_cfg_init(ngep, infop, B_FALSE);
2361 
2362 	/*
2363 	 * Only in one case, this conditional branch can be executed: the port
2364 	 * hasn't been plumbed.
2365 	 */
2366 	if (ngep->suspended == B_FALSE) {
2367 		rw_exit(ngep->rwlock);
2368 		mutex_exit(ngep->genlock);
2369 		return (DDI_SUCCESS);
2370 	}
2371 
2372 	nge_tx_recycle_all(ngep);
2373 	err = nge_reinit_ring(ngep);
2374 	if (!err) {
2375 		err = nge_chip_reset(ngep);
2376 		if (!err)
2377 			err = nge_chip_start(ngep);
2378 	}
2379 
2380 	if (err) {
2381 		/*
2382 		 * We note the failure, but return success, as the
2383 		 * system is still usable without this controller.
2384 		 */
2385 		cmn_err(CE_WARN, "nge: resume: failed to restart controller");
2386 	} else {
2387 		ngep->nge_mac_state = NGE_MAC_STARTED;
2388 	}
2389 	ngep->suspended = B_FALSE;
2390 
2391 	rw_exit(ngep->rwlock);
2392 	mutex_exit(ngep->genlock);
2393 
2394 	return (DDI_SUCCESS);
2395 }
2396 
2397 /*
2398  * attach(9E) -- Attach a device to the system
2399  *
2400  * Called once for each board successfully probed.
2401  */
2402 static int
2403 nge_attach(dev_info_t *devinfo, ddi_attach_cmd_t cmd)
2404 {
2405 	int		err;
2406 	int		i;
2407 	int		instance;
2408 	caddr_t		regs;
2409 	nge_t		*ngep;
2410 	chip_info_t	*infop;
2411 	mac_register_t	*macp;
2412 
2413 	switch (cmd) {
2414 	default:
2415 		return (DDI_FAILURE);
2416 
2417 	case DDI_RESUME:
2418 		return (nge_resume(devinfo));
2419 
2420 	case DDI_ATTACH:
2421 		break;
2422 	}
2423 
2424 	ngep = kmem_zalloc(sizeof (*ngep), KM_SLEEP);
2425 	instance = ddi_get_instance(devinfo);
2426 	ddi_set_driver_private(devinfo, ngep);
2427 	ngep->devinfo = devinfo;
2428 
2429 	(void) snprintf(ngep->ifname, sizeof (ngep->ifname), "%s%d",
2430 	    NGE_DRIVER_NAME, instance);
2431 	err = pci_config_setup(devinfo, &ngep->cfg_handle);
2432 	if (err != DDI_SUCCESS) {
2433 		nge_problem(ngep, "nge_attach: pci_config_setup() failed");
2434 		goto attach_fail;
2435 	}
2436 	/*
2437 	 * param_txbcopy_threshold and param_rxbcopy_threshold are tx/rx bcopy
2438 	 * thresholds. Bounds: min 0, max NGE_MAX_SDU
2439 	 */
2440 	ngep->param_txbcopy_threshold = NGE_TX_COPY_SIZE;
2441 	ngep->param_rxbcopy_threshold = NGE_RX_COPY_SIZE;
2442 
2443 	/*
2444 	 * param_recv_max_packet is max packet received per interupt.
2445 	 * Bounds: min 0, max NGE_RECV_SLOTS_DESC_1024
2446 	 */
2447 	ngep->param_recv_max_packet = 128;
2448 
2449 	/*
2450 	 * param_poll_quiet_time and param_poll_busy_time are quiet/busy time
2451 	 * switch from per packet interrupt to polling interrupt.
2452 	 * Bounds: min 0, max 10000
2453 	 */
2454 	ngep->param_poll_quiet_time = NGE_POLL_QUIET_TIME;
2455 	ngep->param_poll_busy_time = NGE_POLL_BUSY_TIME;
2456 	ngep->tfint_threshold = NGE_TFINT_DEFAULT;
2457 	ngep->poll = B_FALSE;
2458 	ngep->ch_intr_mode = B_FALSE;
2459 
2460 	/*
2461 	 * param_rx_intr_hwater/param_rx_intr_lwater: ackets received
2462 	 * to trigger the poll_quiet_time/poll_busy_time counter.
2463 	 * Bounds: min 0, max  NGE_RECV_SLOTS_DESC_1024.
2464 	 */
2465 	ngep->param_rx_intr_hwater = 1;
2466 	ngep->param_rx_intr_lwater = 8;
2467 
2468 
2469 	infop = (chip_info_t *)&ngep->chipinfo;
2470 	nge_chip_cfg_init(ngep, infop, B_FALSE);
2471 	nge_init_dev_spec_param(ngep);
2472 	nge_get_props(ngep);
2473 	ngep->progress |= PROGRESS_CFG;
2474 
2475 	err = ddi_regs_map_setup(devinfo, NGE_PCI_OPREGS_RNUMBER,
2476 	    &regs, 0, 0, &nge_reg_accattr, &ngep->io_handle);
2477 	if (err != DDI_SUCCESS) {
2478 		nge_problem(ngep, "nge_attach: ddi_regs_map_setup() failed");
2479 		goto attach_fail;
2480 	}
2481 	ngep->io_regs = regs;
2482 	ngep->progress |= PROGRESS_REGS;
2483 
2484 	err = nge_register_intrs_and_init_locks(ngep);
2485 	if (err != DDI_SUCCESS) {
2486 		nge_problem(ngep, "nge_attach:"
2487 		    " register intrs and init locks failed");
2488 		goto attach_fail;
2489 	}
2490 	nge_init_ring_param_lock(ngep);
2491 	ngep->progress |= PROGRESS_INTR;
2492 
2493 	mutex_enter(ngep->genlock);
2494 
2495 	if (ngep->chipinfo.device == DEVICE_ID_MCP55_373 ||
2496 	    ngep->chipinfo.device == DEVICE_ID_MCP55_372) {
2497 		err = nge_smu_sema(ngep, B_TRUE);
2498 		if (err != DDI_SUCCESS) {
2499 			nge_problem(ngep, "nge_attach: nge_smu_sema() failed");
2500 			goto attach_fail;
2501 		}
2502 	}
2503 	/*
2504 	 * Initialise link state variables
2505 	 * Stop, reset & reinitialise the chip.
2506 	 * Initialise the (internal) PHY.
2507 	 */
2508 	nge_phys_init(ngep);
2509 	ngep->nge_chip_state = NGE_CHIP_INITIAL;
2510 	err = nge_chip_reset(ngep);
2511 	if (err != DDI_SUCCESS) {
2512 		nge_problem(ngep, "nge_attach: nge_chip_reset() failed");
2513 		mutex_exit(ngep->genlock);
2514 		goto attach_fail;
2515 	}
2516 	nge_chip_sync(ngep);
2517 
2518 	/*
2519 	 * Now that mutex locks are initialized, enable interrupts.
2520 	 */
2521 	if (ngep->intr_cap & DDI_INTR_FLAG_BLOCK) {
2522 		/* Call ddi_intr_block_enable() for MSI interrupts */
2523 		(void) ddi_intr_block_enable(ngep->htable,
2524 		    ngep->intr_actual_cnt);
2525 	} else {
2526 		/* Call ddi_intr_enable for MSI or FIXED interrupts */
2527 		for (i = 0; i < ngep->intr_actual_cnt; i++) {
2528 			(void) ddi_intr_enable(ngep->htable[i]);
2529 		}
2530 	}
2531 
2532 	ngep->link_state = LINK_STATE_UNKNOWN;
2533 	ngep->progress |= PROGRESS_HWINT;
2534 
2535 	/*
2536 	 * Register NDD-tweakable parameters
2537 	 */
2538 	if (nge_nd_init(ngep)) {
2539 		nge_problem(ngep, "nge_attach: nge_nd_init() failed");
2540 		mutex_exit(ngep->genlock);
2541 		goto attach_fail;
2542 	}
2543 	ngep->progress |= PROGRESS_NDD;
2544 
2545 	/*
2546 	 * Create & initialise named kstats
2547 	 */
2548 	nge_init_kstats(ngep, instance);
2549 	ngep->progress |= PROGRESS_KSTATS;
2550 
2551 	mutex_exit(ngep->genlock);
2552 
2553 	if ((macp = mac_alloc(MAC_VERSION)) == NULL)
2554 		goto attach_fail;
2555 	macp->m_type_ident = MAC_PLUGIN_IDENT_ETHER;
2556 	macp->m_driver = ngep;
2557 	macp->m_dip = devinfo;
2558 	macp->m_src_addr = infop->vendor_addr.addr;
2559 	macp->m_callbacks = &nge_m_callbacks;
2560 	macp->m_min_sdu = 0;
2561 	macp->m_max_sdu = ngep->default_mtu;
2562 	macp->m_margin = VTAG_SIZE;
2563 	macp->m_priv_props = nge_priv_props;
2564 	macp->m_priv_prop_count = NGE_MAX_PRIV_PROPS;
2565 	/*
2566 	 * Finally, we're ready to register ourselves with the mac
2567 	 * interface; if this succeeds, we're all ready to start()
2568 	 */
2569 	err = mac_register(macp, &ngep->mh);
2570 	mac_free(macp);
2571 	if (err != 0)
2572 		goto attach_fail;
2573 
2574 	/*
2575 	 * Register a periodical handler.
2576 	 * nge_chip_cyclic() is invoked in kernel context.
2577 	 */
2578 	ngep->periodic_id = ddi_periodic_add(nge_chip_cyclic, ngep,
2579 	    NGE_CYCLIC_PERIOD, DDI_IPL_0);
2580 
2581 	ngep->progress |= PROGRESS_READY;
2582 	return (DDI_SUCCESS);
2583 
2584 attach_fail:
2585 	nge_unattach(ngep);
2586 	return (DDI_FAILURE);
2587 }
2588 
2589 static int
2590 nge_suspend(nge_t *ngep)
2591 {
2592 	mutex_enter(ngep->genlock);
2593 	rw_enter(ngep->rwlock, RW_WRITER);
2594 
2595 	/* if the port hasn't been plumbed, just return */
2596 	if (ngep->nge_mac_state != NGE_MAC_STARTED) {
2597 		rw_exit(ngep->rwlock);
2598 		mutex_exit(ngep->genlock);
2599 		return (DDI_SUCCESS);
2600 	}
2601 	ngep->suspended = B_TRUE;
2602 	(void) nge_chip_stop(ngep, B_FALSE);
2603 	ngep->nge_mac_state = NGE_MAC_STOPPED;
2604 
2605 	rw_exit(ngep->rwlock);
2606 	mutex_exit(ngep->genlock);
2607 	return (DDI_SUCCESS);
2608 }
2609 
2610 /*
2611  * detach(9E) -- Detach a device from the system
2612  */
2613 static int
2614 nge_detach(dev_info_t *devinfo, ddi_detach_cmd_t cmd)
2615 {
2616 	int i;
2617 	nge_t *ngep;
2618 	mul_item *p, *nextp;
2619 	buff_ring_t *brp;
2620 
2621 	NGE_GTRACE(("nge_detach($%p, %d)", (void *)devinfo, cmd));
2622 
2623 	ngep = ddi_get_driver_private(devinfo);
2624 	brp = ngep->buff;
2625 
2626 	switch (cmd) {
2627 	default:
2628 		return (DDI_FAILURE);
2629 
2630 	case DDI_SUSPEND:
2631 		/*
2632 		 * Stop the NIC
2633 		 * Note: This driver doesn't currently support WOL, but
2634 		 *	should it in the future, it is important to
2635 		 *	make sure the PHY remains powered so that the
2636 		 *	wakeup packet can actually be recieved.
2637 		 */
2638 		return (nge_suspend(ngep));
2639 
2640 	case DDI_DETACH:
2641 		break;
2642 	}
2643 
2644 	/* Try to wait all the buffer post to upper layer be released */
2645 	for (i = 0; i < 1000; i++) {
2646 		if (brp->rx_hold == 0)
2647 			break;
2648 		drv_usecwait(1000);
2649 	}
2650 
2651 	/* If there is any posted buffer, reject to detach */
2652 	if (brp->rx_hold != 0)
2653 		return (DDI_FAILURE);
2654 
2655 	/*
2656 	 * Unregister from the GLD subsystem.  This can fail, in
2657 	 * particular if there are DLPI style-2 streams still open -
2658 	 * in which case we just return failure without shutting
2659 	 * down chip operations.
2660 	 */
2661 	if (mac_unregister(ngep->mh) != DDI_SUCCESS)
2662 		return (DDI_FAILURE);
2663 
2664 	/*
2665 	 * Recycle the multicast table. mac_unregister() should be called
2666 	 * before it to ensure the multicast table can be used even if
2667 	 * mac_unregister() fails.
2668 	 */
2669 	for (p = ngep->pcur_mulist; p != NULL; p = nextp) {
2670 		nextp = p->next;
2671 		kmem_free(p, sizeof (mul_item));
2672 	}
2673 	ngep->pcur_mulist = NULL;
2674 
2675 	/*
2676 	 * All activity stopped, so we can clean up & exit
2677 	 */
2678 	nge_unattach(ngep);
2679 	return (DDI_SUCCESS);
2680 }
2681 
2682 /*
2683  * quiesce(9E) entry point.
2684  *
2685  * This function is called when the system is single-threaded at high
2686  * PIL with preemption disabled. Therefore, this function must not be
2687  * blocked.
2688  *
2689  * This function returns DDI_SUCCESS on success, or DDI_FAILURE on failure.
2690  * DDI_FAILURE indicates an error condition and should almost never happen.
2691  */
2692 static int
2693 nge_quiesce(dev_info_t *devinfo)
2694 {
2695 	nge_t *ngep;
2696 
2697 	ngep = ddi_get_driver_private(devinfo);
2698 
2699 	if (ngep == NULL)
2700 		return (DDI_FAILURE);
2701 
2702 	/*
2703 	 * Turn off debug tracing
2704 	 */
2705 	nge_debug = 0;
2706 	ngep->debug = 0;
2707 
2708 	nge_restore_mac_addr(ngep);
2709 	(void) nge_chip_stop(ngep, B_FALSE);
2710 
2711 	return (DDI_SUCCESS);
2712 }
2713 
2714 
2715 
2716 /*
2717  * ========== Module Loading Data & Entry Points ==========
2718  */
2719 
2720 DDI_DEFINE_STREAM_OPS(nge_dev_ops, nulldev, nulldev, nge_attach, nge_detach,
2721     NULL, NULL, D_MP, NULL, nge_quiesce);
2722 
2723 
2724 static struct modldrv nge_modldrv = {
2725 	&mod_driverops,		/* Type of module.  This one is a driver */
2726 	nge_ident,		/* short description */
2727 	&nge_dev_ops		/* driver specific ops */
2728 };
2729 
2730 static struct modlinkage modlinkage = {
2731 	MODREV_1, (void *)&nge_modldrv, NULL
2732 };
2733 
2734 
2735 int
2736 _info(struct modinfo *modinfop)
2737 {
2738 	return (mod_info(&modlinkage, modinfop));
2739 }
2740 
2741 int
2742 _init(void)
2743 {
2744 	int status;
2745 
2746 	mac_init_ops(&nge_dev_ops, "nge");
2747 	status = mod_install(&modlinkage);
2748 	if (status != DDI_SUCCESS)
2749 		mac_fini_ops(&nge_dev_ops);
2750 	else
2751 		mutex_init(nge_log_mutex, NULL, MUTEX_DRIVER, NULL);
2752 
2753 	return (status);
2754 }
2755 
2756 int
2757 _fini(void)
2758 {
2759 	int status;
2760 
2761 	status = mod_remove(&modlinkage);
2762 	if (status == DDI_SUCCESS) {
2763 		mac_fini_ops(&nge_dev_ops);
2764 		mutex_destroy(nge_log_mutex);
2765 	}
2766 
2767 	return (status);
2768 }
2769 
2770 /*
2771  * ============ Init MSI/Fixed/SoftInterrupt routines ==============
2772  */
2773 
2774 /*
2775  * Register interrupts and initialize each mutex and condition variables
2776  */
2777 
2778 static int
2779 nge_register_intrs_and_init_locks(nge_t *ngep)
2780 {
2781 	int		err;
2782 	int		intr_types;
2783 	uint_t		soft_prip;
2784 	nge_msi_mask	msi_mask;
2785 	nge_msi_map0_vec map0_vec;
2786 	nge_msi_map1_vec map1_vec;
2787 
2788 	/*
2789 	 * Add the softint handlers:
2790 	 *
2791 	 * Both of these handlers are used to avoid restrictions on the
2792 	 * context and/or mutexes required for some operations.  In
2793 	 * particular, the hardware interrupt handler and its subfunctions
2794 	 * can detect a number of conditions that we don't want to handle
2795 	 * in that context or with that set of mutexes held.  So, these
2796 	 * softints are triggered instead:
2797 	 *
2798 	 * the <resched> softint is triggered if if we have previously
2799 	 * had to refuse to send a packet because of resource shortage
2800 	 * (we've run out of transmit buffers), but the send completion
2801 	 * interrupt handler has now detected that more buffers have
2802 	 * become available.  Its only purpose is to call gld_sched()
2803 	 * to retry the pending transmits (we're not allowed to hold
2804 	 * driver-defined mutexes across gld_sched()).
2805 	 *
2806 	 * the <factotum> is triggered if the h/w interrupt handler
2807 	 * sees the <link state changed> or <error> bits in the status
2808 	 * block.  It's also triggered periodically to poll the link
2809 	 * state, just in case we aren't getting link status change
2810 	 * interrupts ...
2811 	 */
2812 	err = ddi_intr_add_softint(ngep->devinfo, &ngep->resched_hdl,
2813 	    DDI_INTR_SOFTPRI_MIN, nge_reschedule, (caddr_t)ngep);
2814 	if (err != DDI_SUCCESS) {
2815 		nge_problem(ngep,
2816 		    "nge_attach: add nge_reschedule softintr failed");
2817 
2818 		return (DDI_FAILURE);
2819 	}
2820 	ngep->progress |= PROGRESS_RESCHED;
2821 	err = ddi_intr_add_softint(ngep->devinfo, &ngep->factotum_hdl,
2822 	    DDI_INTR_SOFTPRI_MIN, nge_chip_factotum, (caddr_t)ngep);
2823 	if (err != DDI_SUCCESS) {
2824 		nge_problem(ngep,
2825 		    "nge_attach: add nge_chip_factotum softintr failed!");
2826 
2827 		return (DDI_FAILURE);
2828 	}
2829 	if (ddi_intr_get_softint_pri(ngep->factotum_hdl, &soft_prip)
2830 	    != DDI_SUCCESS) {
2831 		nge_problem(ngep, "nge_attach: get softintr priority failed\n");
2832 
2833 		return (DDI_FAILURE);
2834 	}
2835 	ngep->soft_pri = soft_prip;
2836 
2837 	ngep->progress |= PROGRESS_FACTOTUM;
2838 	/* Get supported interrupt types */
2839 	if (ddi_intr_get_supported_types(ngep->devinfo, &intr_types)
2840 	    != DDI_SUCCESS) {
2841 		nge_error(ngep, "ddi_intr_get_supported_types failed\n");
2842 
2843 		return (DDI_FAILURE);
2844 	}
2845 
2846 	NGE_DEBUG(("ddi_intr_get_supported_types() returned: %x",
2847 	    intr_types));
2848 
2849 	if ((intr_types & DDI_INTR_TYPE_MSI) && nge_enable_msi) {
2850 
2851 		/* MSI Configurations for mcp55 chipset */
2852 		if (ngep->chipinfo.device == DEVICE_ID_MCP55_373 ||
2853 		    ngep->chipinfo.device == DEVICE_ID_MCP55_372) {
2854 
2855 
2856 			/* Enable the 8 vectors */
2857 			msi_mask.msi_mask_val =
2858 			    nge_reg_get32(ngep, NGE_MSI_MASK);
2859 			msi_mask.msi_msk_bits.vec0 = NGE_SET;
2860 			msi_mask.msi_msk_bits.vec1 = NGE_SET;
2861 			msi_mask.msi_msk_bits.vec2 = NGE_SET;
2862 			msi_mask.msi_msk_bits.vec3 = NGE_SET;
2863 			msi_mask.msi_msk_bits.vec4 = NGE_SET;
2864 			msi_mask.msi_msk_bits.vec5 = NGE_SET;
2865 			msi_mask.msi_msk_bits.vec6 = NGE_SET;
2866 			msi_mask.msi_msk_bits.vec7 = NGE_SET;
2867 			nge_reg_put32(ngep, NGE_MSI_MASK,
2868 			    msi_mask.msi_mask_val);
2869 
2870 			/*
2871 			 * Remapping the MSI MAP0 and MAP1. MCP55
2872 			 * is default mapping all the interrupt to 0 vector.
2873 			 * Software needs to remapping this.
2874 			 * This mapping is same as CK804.
2875 			 */
2876 			map0_vec.msi_map0_val =
2877 			    nge_reg_get32(ngep, NGE_MSI_MAP0);
2878 			map1_vec.msi_map1_val =
2879 			    nge_reg_get32(ngep, NGE_MSI_MAP1);
2880 			map0_vec.vecs_bits.reint_vec = 0;
2881 			map0_vec.vecs_bits.rcint_vec = 0;
2882 			map0_vec.vecs_bits.miss_vec = 3;
2883 			map0_vec.vecs_bits.teint_vec = 5;
2884 			map0_vec.vecs_bits.tcint_vec = 5;
2885 			map0_vec.vecs_bits.stint_vec = 2;
2886 			map0_vec.vecs_bits.mint_vec = 6;
2887 			map0_vec.vecs_bits.rfint_vec = 0;
2888 			map1_vec.vecs_bits.tfint_vec = 5;
2889 			map1_vec.vecs_bits.feint_vec = 6;
2890 			map1_vec.vecs_bits.resv8_11 = 3;
2891 			map1_vec.vecs_bits.resv12_15 = 1;
2892 			map1_vec.vecs_bits.resv16_19 = 0;
2893 			map1_vec.vecs_bits.resv20_23 = 7;
2894 			map1_vec.vecs_bits.resv24_31 = 0xff;
2895 			nge_reg_put32(ngep, NGE_MSI_MAP0,
2896 			    map0_vec.msi_map0_val);
2897 			nge_reg_put32(ngep, NGE_MSI_MAP1,
2898 			    map1_vec.msi_map1_val);
2899 		}
2900 		if (nge_add_intrs(ngep, DDI_INTR_TYPE_MSI) != DDI_SUCCESS) {
2901 			NGE_DEBUG(("MSI registration failed, "
2902 			    "trying FIXED interrupt type\n"));
2903 		} else {
2904 			nge_log(ngep, "Using MSI interrupt type\n");
2905 
2906 			ngep->intr_type = DDI_INTR_TYPE_MSI;
2907 			ngep->progress |= PROGRESS_SWINT;
2908 		}
2909 	}
2910 
2911 	if (!(ngep->progress & PROGRESS_SWINT) &&
2912 	    (intr_types & DDI_INTR_TYPE_FIXED)) {
2913 		if (nge_add_intrs(ngep, DDI_INTR_TYPE_FIXED) != DDI_SUCCESS) {
2914 			nge_error(ngep, "FIXED interrupt "
2915 			    "registration failed\n");
2916 
2917 			return (DDI_FAILURE);
2918 		}
2919 
2920 		nge_log(ngep, "Using FIXED interrupt type\n");
2921 
2922 		ngep->intr_type = DDI_INTR_TYPE_FIXED;
2923 		ngep->progress |= PROGRESS_SWINT;
2924 	}
2925 
2926 
2927 	if (!(ngep->progress & PROGRESS_SWINT)) {
2928 		nge_error(ngep, "No interrupts registered\n");
2929 
2930 		return (DDI_FAILURE);
2931 	}
2932 	mutex_init(ngep->genlock, NULL, MUTEX_DRIVER,
2933 	    DDI_INTR_PRI(ngep->intr_pri));
2934 	mutex_init(ngep->softlock, NULL, MUTEX_DRIVER,
2935 	    DDI_INTR_PRI(ngep->soft_pri));
2936 	rw_init(ngep->rwlock, NULL, RW_DRIVER,
2937 	    DDI_INTR_PRI(ngep->intr_pri));
2938 
2939 	return (DDI_SUCCESS);
2940 }
2941 
2942 /*
2943  * nge_add_intrs:
2944  *
2945  * Register FIXED or MSI interrupts.
2946  */
2947 static int
2948 nge_add_intrs(nge_t *ngep, int	intr_type)
2949 {
2950 	dev_info_t	*dip = ngep->devinfo;
2951 	int		avail, actual, intr_size, count = 0;
2952 	int		i, flag, ret;
2953 
2954 	NGE_DEBUG(("nge_add_intrs: interrupt type 0x%x\n", intr_type));
2955 
2956 	/* Get number of interrupts */
2957 	ret = ddi_intr_get_nintrs(dip, intr_type, &count);
2958 	if ((ret != DDI_SUCCESS) || (count == 0)) {
2959 		nge_error(ngep, "ddi_intr_get_nintrs() failure, ret: %d, "
2960 		    "count: %d", ret, count);
2961 
2962 		return (DDI_FAILURE);
2963 	}
2964 
2965 	/* Get number of available interrupts */
2966 	ret = ddi_intr_get_navail(dip, intr_type, &avail);
2967 	if ((ret != DDI_SUCCESS) || (avail == 0)) {
2968 		nge_error(ngep, "ddi_intr_get_navail() failure, "
2969 		    "ret: %d, avail: %d\n", ret, avail);
2970 
2971 		return (DDI_FAILURE);
2972 	}
2973 
2974 	if (avail < count) {
2975 		NGE_DEBUG(("nitrs() returned %d, navail returned %d\n",
2976 		    count, avail));
2977 	}
2978 	flag = DDI_INTR_ALLOC_NORMAL;
2979 
2980 	/* Allocate an array of interrupt handles */
2981 	intr_size = count * sizeof (ddi_intr_handle_t);
2982 	ngep->htable = kmem_alloc(intr_size, KM_SLEEP);
2983 
2984 	/* Call ddi_intr_alloc() */
2985 	ret = ddi_intr_alloc(dip, ngep->htable, intr_type, 0,
2986 	    count, &actual, flag);
2987 
2988 	if ((ret != DDI_SUCCESS) || (actual == 0)) {
2989 		nge_error(ngep, "ddi_intr_alloc() failed %d\n", ret);
2990 
2991 		kmem_free(ngep->htable, intr_size);
2992 		return (DDI_FAILURE);
2993 	}
2994 
2995 	if (actual < count) {
2996 		NGE_DEBUG(("Requested: %d, Received: %d\n",
2997 		    count, actual));
2998 	}
2999 
3000 	ngep->intr_actual_cnt = actual;
3001 	ngep->intr_req_cnt = count;
3002 
3003 	/*
3004 	 * Get priority for first msi, assume remaining are all the same
3005 	 */
3006 	if ((ret = ddi_intr_get_pri(ngep->htable[0], &ngep->intr_pri)) !=
3007 	    DDI_SUCCESS) {
3008 		nge_error(ngep, "ddi_intr_get_pri() failed %d\n", ret);
3009 
3010 		/* Free already allocated intr */
3011 		for (i = 0; i < actual; i++) {
3012 			(void) ddi_intr_free(ngep->htable[i]);
3013 		}
3014 
3015 		kmem_free(ngep->htable, intr_size);
3016 
3017 		return (DDI_FAILURE);
3018 	}
3019 	/* Test for high level mutex */
3020 	if (ngep->intr_pri >= ddi_intr_get_hilevel_pri()) {
3021 		nge_error(ngep, "nge_add_intrs:"
3022 		    "Hi level interrupt not supported");
3023 
3024 		for (i = 0; i < actual; i++)
3025 			(void) ddi_intr_free(ngep->htable[i]);
3026 
3027 		kmem_free(ngep->htable, intr_size);
3028 
3029 		return (DDI_FAILURE);
3030 	}
3031 
3032 
3033 	/* Call ddi_intr_add_handler() */
3034 	for (i = 0; i < actual; i++) {
3035 		if ((ret = ddi_intr_add_handler(ngep->htable[i], nge_chip_intr,
3036 		    (caddr_t)ngep, (caddr_t)(uintptr_t)i)) != DDI_SUCCESS) {
3037 			nge_error(ngep, "ddi_intr_add_handler() "
3038 			    "failed %d\n", ret);
3039 
3040 			/* Free already allocated intr */
3041 			for (i = 0; i < actual; i++) {
3042 				(void) ddi_intr_free(ngep->htable[i]);
3043 			}
3044 
3045 			kmem_free(ngep->htable, intr_size);
3046 
3047 			return (DDI_FAILURE);
3048 		}
3049 	}
3050 
3051 	if ((ret = ddi_intr_get_cap(ngep->htable[0], &ngep->intr_cap))
3052 	    != DDI_SUCCESS) {
3053 		nge_error(ngep, "ddi_intr_get_cap() failed %d\n", ret);
3054 
3055 		for (i = 0; i < actual; i++) {
3056 			(void) ddi_intr_remove_handler(ngep->htable[i]);
3057 			(void) ddi_intr_free(ngep->htable[i]);
3058 		}
3059 
3060 		kmem_free(ngep->htable, intr_size);
3061 
3062 		return (DDI_FAILURE);
3063 	}
3064 
3065 	return (DDI_SUCCESS);
3066 }
3067 
3068 /*
3069  * nge_rem_intrs:
3070  *
3071  * Unregister FIXED or MSI interrupts
3072  */
3073 static void
3074 nge_rem_intrs(nge_t *ngep)
3075 {
3076 	int	i;
3077 
3078 	NGE_DEBUG(("nge_rem_intrs\n"));
3079 
3080 	/* Disable all interrupts */
3081 	if (ngep->intr_cap & DDI_INTR_FLAG_BLOCK) {
3082 		/* Call ddi_intr_block_disable() */
3083 		(void) ddi_intr_block_disable(ngep->htable,
3084 		    ngep->intr_actual_cnt);
3085 	} else {
3086 		for (i = 0; i < ngep->intr_actual_cnt; i++) {
3087 			(void) ddi_intr_disable(ngep->htable[i]);
3088 		}
3089 	}
3090 
3091 	/* Call ddi_intr_remove_handler() */
3092 	for (i = 0; i < ngep->intr_actual_cnt; i++) {
3093 		(void) ddi_intr_remove_handler(ngep->htable[i]);
3094 		(void) ddi_intr_free(ngep->htable[i]);
3095 	}
3096 
3097 	kmem_free(ngep->htable,
3098 	    ngep->intr_req_cnt * sizeof (ddi_intr_handle_t));
3099 }
3100