xref: /titanic_41/usr/src/uts/common/io/nge/nge_main.c (revision c6d6228cbba828ab5b2b6db6c280a44b2d841653)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 
22 /*
23  * Copyright 2009 Sun Microsystems, Inc.  All rights reserved.
24  * Use is subject to license terms.
25  */
26 
27 
28 #include "nge.h"
29 
30 /*
31  * Describes the chip's DMA engine
32  */
33 
34 static ddi_dma_attr_t hot_dma_attr = {
35 	DMA_ATTR_V0,			/* dma_attr version	*/
36 	0x0000000000000000ull,		/* dma_attr_addr_lo	*/
37 	0x000000FFFFFFFFFFull,		/* dma_attr_addr_hi	*/
38 	0x000000007FFFFFFFull,		/* dma_attr_count_max	*/
39 	0x0000000000000010ull,		/* dma_attr_align	*/
40 	0x00000FFF,			/* dma_attr_burstsizes	*/
41 	0x00000001,			/* dma_attr_minxfer	*/
42 	0x000000000000FFFFull,		/* dma_attr_maxxfer	*/
43 	0x000000FFFFFFFFFFull,		/* dma_attr_seg		*/
44 	1,				/* dma_attr_sgllen 	*/
45 	0x00000001,			/* dma_attr_granular 	*/
46 	0
47 };
48 
49 static ddi_dma_attr_t hot_tx_dma_attr = {
50 	DMA_ATTR_V0,			/* dma_attr version	*/
51 	0x0000000000000000ull,		/* dma_attr_addr_lo	*/
52 	0x000000FFFFFFFFFFull,		/* dma_attr_addr_hi	*/
53 	0x0000000000003FFFull,		/* dma_attr_count_max	*/
54 	0x0000000000000010ull,		/* dma_attr_align	*/
55 	0x00000FFF,			/* dma_attr_burstsizes	*/
56 	0x00000001,			/* dma_attr_minxfer	*/
57 	0x0000000000003FFFull,		/* dma_attr_maxxfer	*/
58 	0x000000FFFFFFFFFFull,		/* dma_attr_seg		*/
59 	NGE_MAX_COOKIES,		/* dma_attr_sgllen 	*/
60 	1,				/* dma_attr_granular 	*/
61 	0
62 };
63 
64 static ddi_dma_attr_t sum_dma_attr = {
65 	DMA_ATTR_V0,			/* dma_attr version	*/
66 	0x0000000000000000ull,		/* dma_attr_addr_lo	*/
67 	0x00000000FFFFFFFFull,		/* dma_attr_addr_hi	*/
68 	0x000000007FFFFFFFull,		/* dma_attr_count_max	*/
69 	0x0000000000000010ull,		/* dma_attr_align	*/
70 	0x00000FFF,			/* dma_attr_burstsizes	*/
71 	0x00000001,			/* dma_attr_minxfer	*/
72 	0x000000000000FFFFull,		/* dma_attr_maxxfer	*/
73 	0x00000000FFFFFFFFull,		/* dma_attr_seg		*/
74 	1,				/* dma_attr_sgllen 	*/
75 	0x00000001,			/* dma_attr_granular 	*/
76 	0
77 };
78 
79 static ddi_dma_attr_t sum_tx_dma_attr = {
80 	DMA_ATTR_V0,			/* dma_attr version	*/
81 	0x0000000000000000ull,		/* dma_attr_addr_lo	*/
82 	0x00000000FFFFFFFFull,		/* dma_attr_addr_hi	*/
83 	0x0000000000003FFFull,		/* dma_attr_count_max	*/
84 	0x0000000000000010ull,		/* dma_attr_align	*/
85 	0x00000FFF,			/* dma_attr_burstsizes	*/
86 	0x00000001,			/* dma_attr_minxfer	*/
87 	0x0000000000003FFFull,		/* dma_attr_maxxfer	*/
88 	0x00000000FFFFFFFFull,		/* dma_attr_seg		*/
89 	NGE_MAX_COOKIES,		/* dma_attr_sgllen 	*/
90 	1,				/* dma_attr_granular 	*/
91 	0
92 };
93 
94 /*
95  * DMA access attributes for data.
96  */
97 ddi_device_acc_attr_t nge_data_accattr = {
98 	DDI_DEVICE_ATTR_V0,
99 	DDI_STRUCTURE_LE_ACC,
100 	DDI_STRICTORDER_ACC,
101 	DDI_DEFAULT_ACC
102 };
103 
104 /*
105  * DMA access attributes for descriptors.
106  */
107 static ddi_device_acc_attr_t nge_desc_accattr = {
108 	DDI_DEVICE_ATTR_V0,
109 	DDI_STRUCTURE_LE_ACC,
110 	DDI_STRICTORDER_ACC,
111 	DDI_DEFAULT_ACC
112 };
113 
114 /*
115  * PIO access attributes for registers
116  */
117 static ddi_device_acc_attr_t nge_reg_accattr = {
118 	DDI_DEVICE_ATTR_V0,
119 	DDI_STRUCTURE_LE_ACC,
120 	DDI_STRICTORDER_ACC,
121 	DDI_DEFAULT_ACC
122 };
123 
124 /*
125  * NIC DESC MODE 2
126  */
127 
128 static const nge_desc_attr_t nge_sum_desc = {
129 
130 	sizeof (sum_rx_bd),
131 	sizeof (sum_tx_bd),
132 	&sum_dma_attr,
133 	&sum_tx_dma_attr,
134 	nge_sum_rxd_fill,
135 	nge_sum_rxd_check,
136 	nge_sum_txd_fill,
137 	nge_sum_txd_check,
138 };
139 
140 /*
141  * NIC DESC MODE 3
142  */
143 
144 static const nge_desc_attr_t nge_hot_desc = {
145 
146 	sizeof (hot_rx_bd),
147 	sizeof (hot_tx_bd),
148 	&hot_dma_attr,
149 	&hot_tx_dma_attr,
150 	nge_hot_rxd_fill,
151 	nge_hot_rxd_check,
152 	nge_hot_txd_fill,
153 	nge_hot_txd_check,
154 };
155 
156 static char nge_ident[] = "nVidia 1Gb Ethernet";
157 static char clsize_propname[] = "cache-line-size";
158 static char latency_propname[] = "latency-timer";
159 static char debug_propname[]	= "nge-debug-flags";
160 static char intr_moderation[] = "intr-moderation";
161 static char rx_data_hw[] = "rx-data-hw";
162 static char rx_prd_lw[] = "rx-prd-lw";
163 static char rx_prd_hw[] = "rx-prd-hw";
164 static char sw_intr_intv[] = "sw-intr-intvl";
165 static char nge_desc_mode[] = "desc-mode";
166 static char default_mtu[] = "default_mtu";
167 static char low_memory_mode[] = "minimal-memory-usage";
168 extern kmutex_t nge_log_mutex[1];
169 
170 static int		nge_m_start(void *);
171 static void		nge_m_stop(void *);
172 static int		nge_m_promisc(void *, boolean_t);
173 static int		nge_m_multicst(void *, boolean_t, const uint8_t *);
174 static int		nge_m_unicst(void *, const uint8_t *);
175 static void		nge_m_ioctl(void *, queue_t *, mblk_t *);
176 static boolean_t	nge_m_getcapab(void *, mac_capab_t, void *);
177 static int		nge_m_setprop(void *, const char *, mac_prop_id_t,
178 	uint_t, const void *);
179 static int		nge_m_getprop(void *, const char *, mac_prop_id_t,
180 	uint_t, uint_t, void *, uint_t *);
181 static int		nge_set_priv_prop(nge_t *, const char *, uint_t,
182 	const void *);
183 static int		nge_get_priv_prop(nge_t *, const char *, uint_t,
184 	uint_t, void *);
185 
186 #define		NGE_M_CALLBACK_FLAGS\
187 		(MC_IOCTL | MC_GETCAPAB | MC_SETPROP | MC_GETPROP)
188 
189 static mac_callbacks_t nge_m_callbacks = {
190 	NGE_M_CALLBACK_FLAGS,
191 	nge_m_stat,
192 	nge_m_start,
193 	nge_m_stop,
194 	nge_m_promisc,
195 	nge_m_multicst,
196 	nge_m_unicst,
197 	nge_m_tx,
198 	nge_m_ioctl,
199 	nge_m_getcapab,
200 	NULL,
201 	NULL,
202 	nge_m_setprop,
203 	nge_m_getprop
204 };
205 
206 mac_priv_prop_t nge_priv_props[] = {
207 	{"_tx_bcopy_threshold", MAC_PROP_PERM_RW},
208 	{"_rx_bcopy_threshold", MAC_PROP_PERM_RW},
209 	{"_recv_max_packet", MAC_PROP_PERM_RW},
210 	{"_poll_quiet_time", MAC_PROP_PERM_RW},
211 	{"_poll_busy_time", MAC_PROP_PERM_RW},
212 	{"_rx_intr_hwater", MAC_PROP_PERM_RW},
213 	{"_rx_intr_lwater", MAC_PROP_PERM_RW},
214 	{"_adv_pause_cap", MAC_PROP_PERM_RW},
215 	{"_adv_asym_pause_cap", MAC_PROP_PERM_RW},
216 	{"_tx_n_intr", MAC_PROP_PERM_RW}
217 };
218 
219 #define	NGE_MAX_PRIV_PROPS \
220 	(sizeof (nge_priv_props)/sizeof (mac_priv_prop_t))
221 
222 static int nge_add_intrs(nge_t *, int);
223 static void nge_rem_intrs(nge_t *);
224 static int nge_register_intrs_and_init_locks(nge_t *);
225 
226 /*
227  * NGE MSI tunable:
228  */
229 boolean_t nge_enable_msi = B_FALSE;
230 
231 static enum ioc_reply
232 nge_set_loop_mode(nge_t *ngep, uint32_t mode)
233 {
234 	/*
235 	 * If the mode isn't being changed, there's nothing to do ...
236 	 */
237 	if (mode == ngep->param_loop_mode)
238 		return (IOC_ACK);
239 
240 	/*
241 	 * Validate the requested mode and prepare a suitable message
242 	 * to explain the link down/up cycle that the change will
243 	 * probably induce ...
244 	 */
245 	switch (mode) {
246 	default:
247 		return (IOC_INVAL);
248 
249 	case NGE_LOOP_NONE:
250 	case NGE_LOOP_EXTERNAL_100:
251 	case NGE_LOOP_EXTERNAL_10:
252 	case NGE_LOOP_INTERNAL_PHY:
253 		break;
254 	}
255 
256 	/*
257 	 * All OK; tell the caller to reprogram
258 	 * the PHY and/or MAC for the new mode ...
259 	 */
260 	ngep->param_loop_mode = mode;
261 	return (IOC_RESTART_ACK);
262 }
263 
264 #undef	NGE_DBG
265 #define	NGE_DBG		NGE_DBG_INIT
266 
267 /*
268  * Utility routine to carve a slice off a chunk of allocated memory,
269  * updating the chunk descriptor accordingly.  The size of the slice
270  * is given by the product of the <qty> and <size> parameters.
271  */
272 void
273 nge_slice_chunk(dma_area_t *slice, dma_area_t *chunk,
274     uint32_t qty, uint32_t size)
275 {
276 	size_t totsize;
277 
278 	totsize = qty*size;
279 	ASSERT(size > 0);
280 	ASSERT(totsize <= chunk->alength);
281 
282 	*slice = *chunk;
283 	slice->nslots = qty;
284 	slice->size = size;
285 	slice->alength = totsize;
286 
287 	chunk->mem_va = (caddr_t)chunk->mem_va + totsize;
288 	chunk->alength -= totsize;
289 	chunk->offset += totsize;
290 	chunk->cookie.dmac_laddress += totsize;
291 	chunk->cookie.dmac_size -= totsize;
292 }
293 
294 /*
295  * Allocate an area of memory and a DMA handle for accessing it
296  */
297 int
298 nge_alloc_dma_mem(nge_t *ngep, size_t memsize, ddi_device_acc_attr_t *attr_p,
299     uint_t dma_flags, dma_area_t *dma_p)
300 {
301 	int err;
302 	caddr_t va;
303 
304 	NGE_TRACE(("nge_alloc_dma_mem($%p, %ld, $%p, 0x%x, $%p)",
305 	    (void *)ngep, memsize, attr_p, dma_flags, dma_p));
306 	/*
307 	 * Allocate handle
308 	 */
309 	err = ddi_dma_alloc_handle(ngep->devinfo, ngep->desc_attr.dma_attr,
310 	    DDI_DMA_DONTWAIT, NULL, &dma_p->dma_hdl);
311 	if (err != DDI_SUCCESS)
312 		goto fail;
313 
314 	/*
315 	 * Allocate memory
316 	 */
317 	err = ddi_dma_mem_alloc(dma_p->dma_hdl, memsize, attr_p,
318 	    dma_flags & (DDI_DMA_CONSISTENT | DDI_DMA_STREAMING),
319 	    DDI_DMA_DONTWAIT, NULL, &va, &dma_p->alength, &dma_p->acc_hdl);
320 	if (err != DDI_SUCCESS)
321 		goto fail;
322 
323 	/*
324 	 * Bind the two together
325 	 */
326 	dma_p->mem_va = va;
327 	err = ddi_dma_addr_bind_handle(dma_p->dma_hdl, NULL,
328 	    va, dma_p->alength, dma_flags, DDI_DMA_DONTWAIT, NULL,
329 	    &dma_p->cookie, &dma_p->ncookies);
330 
331 	if (err != DDI_DMA_MAPPED || dma_p->ncookies != 1)
332 		goto fail;
333 
334 	dma_p->nslots = ~0U;
335 	dma_p->size = ~0U;
336 	dma_p->offset = 0;
337 
338 	return (DDI_SUCCESS);
339 
340 fail:
341 	nge_free_dma_mem(dma_p);
342 	NGE_DEBUG(("nge_alloc_dma_mem: fail to alloc dma memory!"));
343 
344 	return (DDI_FAILURE);
345 }
346 
347 /*
348  * Free one allocated area of DMAable memory
349  */
350 void
351 nge_free_dma_mem(dma_area_t *dma_p)
352 {
353 	if (dma_p->dma_hdl != NULL) {
354 		if (dma_p->ncookies) {
355 			(void) ddi_dma_unbind_handle(dma_p->dma_hdl);
356 			dma_p->ncookies = 0;
357 		}
358 	}
359 	if (dma_p->acc_hdl != NULL) {
360 		ddi_dma_mem_free(&dma_p->acc_hdl);
361 		dma_p->acc_hdl = NULL;
362 	}
363 	if (dma_p->dma_hdl != NULL) {
364 		ddi_dma_free_handle(&dma_p->dma_hdl);
365 		dma_p->dma_hdl = NULL;
366 	}
367 }
368 
369 #define	ALLOC_TX_BUF	0x1
370 #define	ALLOC_TX_DESC	0x2
371 #define	ALLOC_RX_DESC	0x4
372 
373 int
374 nge_alloc_bufs(nge_t *ngep)
375 {
376 	int err;
377 	int split;
378 	int progress;
379 	size_t txbuffsize;
380 	size_t rxdescsize;
381 	size_t txdescsize;
382 
383 	txbuffsize = ngep->tx_desc * ngep->buf_size;
384 	rxdescsize = ngep->rx_desc;
385 	txdescsize = ngep->tx_desc;
386 	rxdescsize *= ngep->desc_attr.rxd_size;
387 	txdescsize *= ngep->desc_attr.txd_size;
388 	progress = 0;
389 
390 	NGE_TRACE(("nge_alloc_bufs($%p)", (void *)ngep));
391 	/*
392 	 * Allocate memory & handles for TX buffers
393 	 */
394 	ASSERT((txbuffsize % ngep->nge_split) == 0);
395 	for (split = 0; split < ngep->nge_split; ++split) {
396 		err = nge_alloc_dma_mem(ngep, txbuffsize/ngep->nge_split,
397 		    &nge_data_accattr, DDI_DMA_WRITE | NGE_DMA_MODE,
398 		    &ngep->send->buf[split]);
399 		if (err != DDI_SUCCESS)
400 			goto fail;
401 	}
402 
403 	progress |= ALLOC_TX_BUF;
404 
405 	/*
406 	 * Allocate memory & handles for receive return rings and
407 	 * buffer (producer) descriptor rings
408 	 */
409 	err = nge_alloc_dma_mem(ngep, rxdescsize, &nge_desc_accattr,
410 	    DDI_DMA_RDWR | DDI_DMA_CONSISTENT, &ngep->recv->desc);
411 	if (err != DDI_SUCCESS)
412 		goto fail;
413 	progress |= ALLOC_RX_DESC;
414 
415 	/*
416 	 * Allocate memory & handles for TX descriptor rings,
417 	 */
418 	err = nge_alloc_dma_mem(ngep, txdescsize, &nge_desc_accattr,
419 	    DDI_DMA_RDWR | DDI_DMA_CONSISTENT, &ngep->send->desc);
420 	if (err != DDI_SUCCESS)
421 		goto fail;
422 	return (DDI_SUCCESS);
423 
424 fail:
425 	if (progress & ALLOC_RX_DESC)
426 		nge_free_dma_mem(&ngep->recv->desc);
427 	if (progress & ALLOC_TX_BUF) {
428 		for (split = 0; split < ngep->nge_split; ++split)
429 			nge_free_dma_mem(&ngep->send->buf[split]);
430 	}
431 
432 	return (DDI_FAILURE);
433 }
434 
435 /*
436  * This routine frees the transmit and receive buffers and descriptors.
437  * Make sure the chip is stopped before calling it!
438  */
439 void
440 nge_free_bufs(nge_t *ngep)
441 {
442 	int split;
443 
444 	NGE_TRACE(("nge_free_bufs($%p)", (void *)ngep));
445 
446 	nge_free_dma_mem(&ngep->recv->desc);
447 	nge_free_dma_mem(&ngep->send->desc);
448 
449 	for (split = 0; split < ngep->nge_split; ++split)
450 		nge_free_dma_mem(&ngep->send->buf[split]);
451 }
452 
453 /*
454  * Clean up initialisation done above before the memory is freed
455  */
456 static void
457 nge_fini_send_ring(nge_t *ngep)
458 {
459 	uint32_t slot;
460 	size_t dmah_num;
461 	send_ring_t *srp;
462 	sw_tx_sbd_t *ssbdp;
463 
464 	srp = ngep->send;
465 	ssbdp = srp->sw_sbds;
466 
467 	NGE_TRACE(("nge_fini_send_ring($%p)", (void *)ngep));
468 
469 	dmah_num = sizeof (srp->dmahndl) / sizeof (srp->dmahndl[0]);
470 
471 	for (slot = 0; slot < dmah_num; ++slot) {
472 		if (srp->dmahndl[slot].hndl) {
473 			(void) ddi_dma_unbind_handle(srp->dmahndl[slot].hndl);
474 			ddi_dma_free_handle(&srp->dmahndl[slot].hndl);
475 			srp->dmahndl[slot].hndl = NULL;
476 			srp->dmahndl[slot].next = NULL;
477 		}
478 	}
479 
480 	srp->dmah_free.head = NULL;
481 	srp->dmah_free.tail = NULL;
482 
483 	kmem_free(ssbdp, srp->desc.nslots*sizeof (*ssbdp));
484 
485 }
486 
487 /*
488  * Initialise the specified Send Ring, using the information in the
489  * <dma_area> descriptors that it contains to set up all the other
490  * fields. This routine should be called only once for each ring.
491  */
492 static int
493 nge_init_send_ring(nge_t *ngep)
494 {
495 	size_t dmah_num;
496 	uint32_t nslots;
497 	uint32_t err;
498 	uint32_t slot;
499 	uint32_t split;
500 	send_ring_t *srp;
501 	sw_tx_sbd_t *ssbdp;
502 	dma_area_t desc;
503 	dma_area_t pbuf;
504 
505 	srp = ngep->send;
506 	srp->desc.nslots = ngep->tx_desc;
507 	nslots = srp->desc.nslots;
508 
509 	NGE_TRACE(("nge_init_send_ring($%p)", (void *)ngep));
510 	/*
511 	 * Other one-off initialisation of per-ring data
512 	 */
513 	srp->ngep = ngep;
514 
515 	/*
516 	 * Allocate the array of s/w Send Buffer Descriptors
517 	 */
518 	ssbdp = kmem_zalloc(nslots*sizeof (*ssbdp), KM_SLEEP);
519 	srp->sw_sbds = ssbdp;
520 
521 	/*
522 	 * Now initialise each array element once and for all
523 	 */
524 	desc = srp->desc;
525 	for (split = 0; split < ngep->nge_split; ++split) {
526 		pbuf = srp->buf[split];
527 		for (slot = 0; slot < nslots/ngep->nge_split; ++ssbdp, ++slot) {
528 			nge_slice_chunk(&ssbdp->desc, &desc, 1,
529 			    ngep->desc_attr.txd_size);
530 			nge_slice_chunk(&ssbdp->pbuf, &pbuf, 1,
531 			    ngep->buf_size);
532 		}
533 		ASSERT(pbuf.alength == 0);
534 	}
535 	ASSERT(desc.alength == 0);
536 
537 	dmah_num = sizeof (srp->dmahndl) / sizeof (srp->dmahndl[0]);
538 
539 	/* preallocate dma handles for tx buffer */
540 	for (slot = 0; slot < dmah_num; ++slot) {
541 
542 		err = ddi_dma_alloc_handle(ngep->devinfo,
543 		    ngep->desc_attr.tx_dma_attr, DDI_DMA_DONTWAIT,
544 		    NULL, &srp->dmahndl[slot].hndl);
545 
546 		if (err != DDI_SUCCESS) {
547 			nge_fini_send_ring(ngep);
548 			nge_error(ngep,
549 			    "nge_init_send_ring: alloc dma handle fails");
550 			return (DDI_FAILURE);
551 		}
552 		srp->dmahndl[slot].next = srp->dmahndl + slot + 1;
553 	}
554 
555 	srp->dmah_free.head = srp->dmahndl;
556 	srp->dmah_free.tail = srp->dmahndl + dmah_num - 1;
557 	srp->dmah_free.tail->next = NULL;
558 
559 	return (DDI_SUCCESS);
560 }
561 
562 /*
563  * Intialize the tx recycle pointer and tx sending pointer of tx ring
564  * and set the type of tx's data descriptor by default.
565  */
566 static void
567 nge_reinit_send_ring(nge_t *ngep)
568 {
569 	size_t dmah_num;
570 	uint32_t slot;
571 	send_ring_t *srp;
572 	sw_tx_sbd_t *ssbdp;
573 
574 	srp = ngep->send;
575 
576 	/*
577 	 * Reinitialise control variables ...
578 	 */
579 
580 	srp->tx_hwmark = NGE_DESC_MIN;
581 	srp->tx_lwmark = NGE_DESC_MIN;
582 
583 	srp->tx_next = 0;
584 	srp->tx_free = srp->desc.nslots;
585 	srp->tc_next = 0;
586 
587 	dmah_num = sizeof (srp->dmahndl) / sizeof (srp->dmahndl[0]);
588 
589 	for (slot = 0; slot - dmah_num != 0; ++slot)
590 		srp->dmahndl[slot].next = srp->dmahndl + slot + 1;
591 
592 	srp->dmah_free.head = srp->dmahndl;
593 	srp->dmah_free.tail = srp->dmahndl + dmah_num - 1;
594 	srp->dmah_free.tail->next = NULL;
595 
596 	/*
597 	 * Zero and sync all the h/w Send Buffer Descriptors
598 	 */
599 	for (slot = 0; slot < srp->desc.nslots; ++slot) {
600 		ssbdp = &srp->sw_sbds[slot];
601 		ssbdp->flags = HOST_OWN;
602 	}
603 
604 	DMA_ZERO(srp->desc);
605 	DMA_SYNC(srp->desc, DDI_DMA_SYNC_FORDEV);
606 }
607 
608 /*
609  * Initialize the slot number of rx's ring
610  */
611 static void
612 nge_init_recv_ring(nge_t *ngep)
613 {
614 	recv_ring_t *rrp;
615 
616 	rrp = ngep->recv;
617 	rrp->desc.nslots = ngep->rx_desc;
618 	rrp->ngep = ngep;
619 }
620 
621 /*
622  * Intialize the rx recycle pointer and rx sending pointer of rx ring
623  */
624 static void
625 nge_reinit_recv_ring(nge_t *ngep)
626 {
627 	recv_ring_t *rrp;
628 
629 	rrp = ngep->recv;
630 
631 	/*
632 	 * Reinitialise control variables ...
633 	 */
634 	rrp->prod_index = 0;
635 	/*
636 	 * Zero and sync all the h/w Send Buffer Descriptors
637 	 */
638 	DMA_ZERO(rrp->desc);
639 	DMA_SYNC(rrp->desc, DDI_DMA_SYNC_FORDEV);
640 }
641 
642 /*
643  * Clean up initialisation done above before the memory is freed
644  */
645 static void
646 nge_fini_buff_ring(nge_t *ngep)
647 {
648 	uint32_t i;
649 	buff_ring_t *brp;
650 	dma_area_t *bufp;
651 	sw_rx_sbd_t *bsbdp;
652 
653 	brp = ngep->buff;
654 	bsbdp = brp->sw_rbds;
655 
656 	NGE_DEBUG(("nge_fini_buff_ring($%p)", (void *)ngep));
657 
658 	mutex_enter(brp->recycle_lock);
659 	brp->buf_sign++;
660 	mutex_exit(brp->recycle_lock);
661 	for (i = 0; i < ngep->rx_desc; i++, ++bsbdp) {
662 		if (bsbdp->bufp) {
663 			if (bsbdp->bufp->mp)
664 				freemsg(bsbdp->bufp->mp);
665 			nge_free_dma_mem(bsbdp->bufp);
666 			kmem_free(bsbdp->bufp, sizeof (dma_area_t));
667 			bsbdp->bufp = NULL;
668 		}
669 	}
670 	while (brp->free_list != NULL) {
671 		bufp = brp->free_list;
672 		brp->free_list = bufp->next;
673 		bufp->next = NULL;
674 		if (bufp->mp)
675 			freemsg(bufp->mp);
676 		nge_free_dma_mem(bufp);
677 		kmem_free(bufp, sizeof (dma_area_t));
678 	}
679 	while (brp->recycle_list != NULL) {
680 		bufp = brp->recycle_list;
681 		brp->recycle_list = bufp->next;
682 		bufp->next = NULL;
683 		if (bufp->mp)
684 			freemsg(bufp->mp);
685 		nge_free_dma_mem(bufp);
686 		kmem_free(bufp, sizeof (dma_area_t));
687 	}
688 
689 
690 	kmem_free(brp->sw_rbds, (ngep->rx_desc * sizeof (*bsbdp)));
691 	brp->sw_rbds = NULL;
692 }
693 
694 /*
695  * Intialize the Rx's data ring and free ring
696  */
697 static int
698 nge_init_buff_ring(nge_t *ngep)
699 {
700 	uint32_t err;
701 	uint32_t slot;
702 	uint32_t nslots_buff;
703 	uint32_t nslots_recv;
704 	buff_ring_t *brp;
705 	recv_ring_t *rrp;
706 	dma_area_t desc;
707 	dma_area_t *bufp;
708 	sw_rx_sbd_t *bsbdp;
709 
710 	rrp = ngep->recv;
711 	brp = ngep->buff;
712 	brp->nslots = ngep->rx_buf;
713 	brp->rx_bcopy = B_FALSE;
714 	nslots_recv = rrp->desc.nslots;
715 	nslots_buff = brp->nslots;
716 	brp->ngep = ngep;
717 
718 	NGE_TRACE(("nge_init_buff_ring($%p)", (void *)ngep));
719 
720 	/*
721 	 * Allocate the array of s/w Recv Buffer Descriptors
722 	 */
723 	bsbdp = kmem_zalloc(nslots_recv *sizeof (*bsbdp), KM_SLEEP);
724 	brp->sw_rbds = bsbdp;
725 	brp->free_list = NULL;
726 	brp->recycle_list = NULL;
727 	for (slot = 0; slot < nslots_buff; ++slot) {
728 		bufp = kmem_zalloc(sizeof (dma_area_t), KM_SLEEP);
729 		err = nge_alloc_dma_mem(ngep, (ngep->buf_size
730 		    + NGE_HEADROOM),
731 		    &nge_data_accattr, DDI_DMA_READ | NGE_DMA_MODE, bufp);
732 		if (err != DDI_SUCCESS) {
733 			kmem_free(bufp, sizeof (dma_area_t));
734 			return (DDI_FAILURE);
735 		}
736 
737 		bufp->alength -= NGE_HEADROOM;
738 		bufp->offset += NGE_HEADROOM;
739 		bufp->private = (caddr_t)ngep;
740 		bufp->rx_recycle.free_func = nge_recv_recycle;
741 		bufp->rx_recycle.free_arg = (caddr_t)bufp;
742 		bufp->signature = brp->buf_sign;
743 		bufp->rx_delivered = B_FALSE;
744 		bufp->mp = desballoc(DMA_VPTR(*bufp),
745 		    ngep->buf_size + NGE_HEADROOM,
746 		    0, &bufp->rx_recycle);
747 
748 		if (bufp->mp == NULL) {
749 			return (DDI_FAILURE);
750 		}
751 		bufp->next = brp->free_list;
752 		brp->free_list = bufp;
753 	}
754 
755 	/*
756 	 * Now initialise each array element once and for all
757 	 */
758 	desc = rrp->desc;
759 	for (slot = 0; slot < nslots_recv; ++slot, ++bsbdp) {
760 		nge_slice_chunk(&bsbdp->desc, &desc, 1,
761 		    ngep->desc_attr.rxd_size);
762 		bufp = brp->free_list;
763 		brp->free_list = bufp->next;
764 		bsbdp->bufp = bufp;
765 		bsbdp->flags = CONTROLER_OWN;
766 		bufp->next = NULL;
767 	}
768 
769 	ASSERT(desc.alength == 0);
770 	return (DDI_SUCCESS);
771 }
772 
773 /*
774  * Fill the host address of data in rx' descriptor
775  * and initialize free pointers of rx free ring
776  */
777 static int
778 nge_reinit_buff_ring(nge_t *ngep)
779 {
780 	uint32_t slot;
781 	uint32_t nslots_recv;
782 	buff_ring_t *brp;
783 	recv_ring_t *rrp;
784 	sw_rx_sbd_t *bsbdp;
785 	void *hw_bd_p;
786 
787 	brp = ngep->buff;
788 	rrp = ngep->recv;
789 	bsbdp = brp->sw_rbds;
790 	nslots_recv = rrp->desc.nslots;
791 	for (slot = 0; slot < nslots_recv; ++bsbdp, ++slot) {
792 		hw_bd_p = DMA_VPTR(bsbdp->desc);
793 	/*
794 	 * There is a scenario: When the traffic of small tcp
795 	 * packet is heavy, suspending the tcp traffic will
796 	 * cause the preallocated buffers for rx not to be
797 	 * released in time by tcp taffic and cause rx's buffer
798 	 * pointers not to be refilled in time.
799 	 *
800 	 * At this point, if we reinitialize the driver, the bufp
801 	 * pointer for rx's traffic will be NULL.
802 	 * So the result of the reinitializion fails.
803 	 */
804 		if (bsbdp->bufp == NULL)
805 			return (DDI_FAILURE);
806 
807 		ngep->desc_attr.rxd_fill(hw_bd_p, &bsbdp->bufp->cookie,
808 		    bsbdp->bufp->alength);
809 	}
810 	return (DDI_SUCCESS);
811 }
812 
813 static void
814 nge_init_ring_param_lock(nge_t *ngep)
815 {
816 	buff_ring_t *brp;
817 	send_ring_t *srp;
818 
819 	srp = ngep->send;
820 	brp = ngep->buff;
821 
822 	/* Init the locks for send ring */
823 	mutex_init(srp->tx_lock, NULL, MUTEX_DRIVER,
824 	    DDI_INTR_PRI(ngep->intr_pri));
825 	mutex_init(srp->tc_lock, NULL, MUTEX_DRIVER,
826 	    DDI_INTR_PRI(ngep->intr_pri));
827 	mutex_init(&srp->dmah_lock, NULL, MUTEX_DRIVER,
828 	    DDI_INTR_PRI(ngep->intr_pri));
829 
830 	/* Init parameters of buffer ring */
831 	brp->free_list = NULL;
832 	brp->recycle_list = NULL;
833 	brp->rx_hold = 0;
834 	brp->buf_sign = 0;
835 
836 	/* Init recycle list lock */
837 	mutex_init(brp->recycle_lock, NULL, MUTEX_DRIVER,
838 	    DDI_INTR_PRI(ngep->intr_pri));
839 }
840 
841 int
842 nge_init_rings(nge_t *ngep)
843 {
844 	uint32_t err;
845 
846 	err = nge_init_send_ring(ngep);
847 	if (err != DDI_SUCCESS) {
848 		return (err);
849 	}
850 	nge_init_recv_ring(ngep);
851 
852 	err = nge_init_buff_ring(ngep);
853 	if (err != DDI_SUCCESS) {
854 		nge_fini_send_ring(ngep);
855 		return (DDI_FAILURE);
856 	}
857 
858 	return (err);
859 }
860 
861 static int
862 nge_reinit_ring(nge_t *ngep)
863 {
864 	int err;
865 
866 	nge_reinit_recv_ring(ngep);
867 	nge_reinit_send_ring(ngep);
868 	err = nge_reinit_buff_ring(ngep);
869 	return (err);
870 }
871 
872 
873 void
874 nge_fini_rings(nge_t *ngep)
875 {
876 	/*
877 	 * For receive ring, nothing need to be finished.
878 	 * So only finish buffer ring and send ring here.
879 	 */
880 	nge_fini_buff_ring(ngep);
881 	nge_fini_send_ring(ngep);
882 }
883 
884 /*
885  * Loopback ioctl code
886  */
887 
888 static lb_property_t loopmodes[] = {
889 	{ normal,	"normal",	NGE_LOOP_NONE		},
890 	{ external,	"100Mbps",	NGE_LOOP_EXTERNAL_100	},
891 	{ external,	"10Mbps",	NGE_LOOP_EXTERNAL_10	},
892 	{ internal,	"PHY",		NGE_LOOP_INTERNAL_PHY	},
893 };
894 
895 enum ioc_reply
896 nge_loop_ioctl(nge_t *ngep, mblk_t *mp, struct iocblk *iocp)
897 {
898 	int cmd;
899 	uint32_t *lbmp;
900 	lb_info_sz_t *lbsp;
901 	lb_property_t *lbpp;
902 
903 	/*
904 	 * Validate format of ioctl
905 	 */
906 	if (mp->b_cont == NULL)
907 		return (IOC_INVAL);
908 
909 	cmd = iocp->ioc_cmd;
910 
911 	switch (cmd) {
912 	default:
913 		return (IOC_INVAL);
914 
915 	case LB_GET_INFO_SIZE:
916 		if (iocp->ioc_count != sizeof (lb_info_sz_t))
917 			return (IOC_INVAL);
918 		lbsp = (lb_info_sz_t *)mp->b_cont->b_rptr;
919 		*lbsp = sizeof (loopmodes);
920 		return (IOC_REPLY);
921 
922 	case LB_GET_INFO:
923 		if (iocp->ioc_count != sizeof (loopmodes))
924 			return (IOC_INVAL);
925 		lbpp = (lb_property_t *)mp->b_cont->b_rptr;
926 		bcopy(loopmodes, lbpp, sizeof (loopmodes));
927 		return (IOC_REPLY);
928 
929 	case LB_GET_MODE:
930 		if (iocp->ioc_count != sizeof (uint32_t))
931 			return (IOC_INVAL);
932 		lbmp = (uint32_t *)mp->b_cont->b_rptr;
933 		*lbmp = ngep->param_loop_mode;
934 		return (IOC_REPLY);
935 
936 	case LB_SET_MODE:
937 		if (iocp->ioc_count != sizeof (uint32_t))
938 			return (IOC_INVAL);
939 		lbmp = (uint32_t *)mp->b_cont->b_rptr;
940 		return (nge_set_loop_mode(ngep, *lbmp));
941 	}
942 }
943 
944 #undef	NGE_DBG
945 #define	NGE_DBG	NGE_DBG_NEMO
946 
947 
948 static void
949 nge_check_desc_prop(nge_t *ngep)
950 {
951 	if (ngep->desc_mode != DESC_HOT && ngep->desc_mode != DESC_OFFLOAD)
952 		ngep->desc_mode = DESC_HOT;
953 
954 	if (ngep->desc_mode == DESC_OFFLOAD)	{
955 
956 		ngep->desc_attr = nge_sum_desc;
957 
958 	}	else if (ngep->desc_mode == DESC_HOT)	{
959 
960 		ngep->desc_attr = nge_hot_desc;
961 	}
962 }
963 
964 /*
965  * nge_get_props -- get the parameters to tune the driver
966  */
967 static void
968 nge_get_props(nge_t *ngep)
969 {
970 	chip_info_t *infop;
971 	dev_info_t *devinfo;
972 	nge_dev_spec_param_t *dev_param_p;
973 
974 	devinfo = ngep->devinfo;
975 	infop = (chip_info_t *)&ngep->chipinfo;
976 	dev_param_p = &ngep->dev_spec_param;
977 
978 	infop->clsize = ddi_prop_get_int(DDI_DEV_T_ANY, devinfo,
979 	    DDI_PROP_DONTPASS, clsize_propname, 32);
980 
981 	infop->latency = ddi_prop_get_int(DDI_DEV_T_ANY, devinfo,
982 	    DDI_PROP_DONTPASS, latency_propname, 64);
983 	ngep->intr_moderation = ddi_prop_get_int(DDI_DEV_T_ANY, devinfo,
984 	    DDI_PROP_DONTPASS, intr_moderation, NGE_SET);
985 	ngep->rx_datahwm = ddi_prop_get_int(DDI_DEV_T_ANY, devinfo,
986 	    DDI_PROP_DONTPASS, rx_data_hw, 0x20);
987 	ngep->rx_prdlwm = ddi_prop_get_int(DDI_DEV_T_ANY, devinfo,
988 	    DDI_PROP_DONTPASS, rx_prd_lw, 0x4);
989 	ngep->rx_prdhwm = ddi_prop_get_int(DDI_DEV_T_ANY, devinfo,
990 	    DDI_PROP_DONTPASS, rx_prd_hw, 0xc);
991 
992 	ngep->sw_intr_intv = ddi_prop_get_int(DDI_DEV_T_ANY, devinfo,
993 	    DDI_PROP_DONTPASS, sw_intr_intv, SWTR_ITC);
994 	ngep->debug = ddi_prop_get_int(DDI_DEV_T_ANY, devinfo,
995 	    DDI_PROP_DONTPASS, debug_propname, NGE_DBG_CHIP);
996 	ngep->desc_mode = ddi_prop_get_int(DDI_DEV_T_ANY, devinfo,
997 	    DDI_PROP_DONTPASS, nge_desc_mode, dev_param_p->desc_type);
998 	ngep->lowmem_mode = ddi_prop_get_int(DDI_DEV_T_ANY, devinfo,
999 	    DDI_PROP_DONTPASS, low_memory_mode, 0);
1000 
1001 	if (dev_param_p->jumbo) {
1002 		ngep->default_mtu = ddi_prop_get_int(DDI_DEV_T_ANY, devinfo,
1003 		    DDI_PROP_DONTPASS, default_mtu, ETHERMTU);
1004 	} else
1005 		ngep->default_mtu = ETHERMTU;
1006 
1007 	if (ngep->default_mtu > ETHERMTU &&
1008 	    ngep->default_mtu <= NGE_MTU_2500) {
1009 		ngep->buf_size = NGE_JB2500_BUFSZ;
1010 		ngep->tx_desc = NGE_SEND_JB2500_SLOTS_DESC;
1011 		ngep->rx_desc = NGE_RECV_JB2500_SLOTS_DESC;
1012 		ngep->rx_buf = NGE_RECV_JB2500_SLOTS_DESC * 2;
1013 		ngep->nge_split = NGE_SPLIT_256;
1014 	} else if (ngep->default_mtu > NGE_MTU_2500 &&
1015 	    ngep->default_mtu <= NGE_MTU_4500) {
1016 		ngep->buf_size = NGE_JB4500_BUFSZ;
1017 		ngep->tx_desc = NGE_SEND_JB4500_SLOTS_DESC;
1018 		ngep->rx_desc = NGE_RECV_JB4500_SLOTS_DESC;
1019 		ngep->rx_buf = NGE_RECV_JB4500_SLOTS_DESC * 2;
1020 		ngep->nge_split = NGE_SPLIT_256;
1021 	} else if (ngep->default_mtu > NGE_MTU_4500 &&
1022 	    ngep->default_mtu <= NGE_MAX_MTU) {
1023 		ngep->buf_size = NGE_JB9000_BUFSZ;
1024 		ngep->tx_desc = NGE_SEND_JB9000_SLOTS_DESC;
1025 		ngep->rx_desc = NGE_RECV_JB9000_SLOTS_DESC;
1026 		ngep->rx_buf = NGE_RECV_JB9000_SLOTS_DESC * 2;
1027 		ngep->nge_split = NGE_SPLIT_256;
1028 	} else if (ngep->default_mtu > NGE_MAX_MTU) {
1029 		ngep->default_mtu = NGE_MAX_MTU;
1030 		ngep->buf_size = NGE_JB9000_BUFSZ;
1031 		ngep->tx_desc = NGE_SEND_JB9000_SLOTS_DESC;
1032 		ngep->rx_desc = NGE_RECV_JB9000_SLOTS_DESC;
1033 		ngep->rx_buf = NGE_RECV_JB9000_SLOTS_DESC * 2;
1034 		ngep->nge_split = NGE_SPLIT_256;
1035 	} else if (ngep->lowmem_mode != 0) {
1036 		ngep->default_mtu = ETHERMTU;
1037 		ngep->buf_size = NGE_STD_BUFSZ;
1038 		ngep->tx_desc = NGE_SEND_LOWMEM_SLOTS_DESC;
1039 		ngep->rx_desc = NGE_RECV_LOWMEM_SLOTS_DESC;
1040 		ngep->rx_buf = NGE_RECV_LOWMEM_SLOTS_DESC * 2;
1041 		ngep->nge_split = NGE_SPLIT_32;
1042 	} else {
1043 		ngep->default_mtu = ETHERMTU;
1044 		ngep->buf_size = NGE_STD_BUFSZ;
1045 		ngep->tx_desc = dev_param_p->tx_desc_num;
1046 		ngep->rx_desc = dev_param_p->rx_desc_num;
1047 		ngep->rx_buf = dev_param_p->rx_desc_num * 2;
1048 		ngep->nge_split = dev_param_p->nge_split;
1049 	}
1050 
1051 	nge_check_desc_prop(ngep);
1052 }
1053 
1054 
1055 static int
1056 nge_reset_dev(nge_t *ngep)
1057 {
1058 	int err;
1059 	nge_mul_addr1 maddr1;
1060 	nge_sw_statistics_t *sw_stp;
1061 	sw_stp = &ngep->statistics.sw_statistics;
1062 	send_ring_t *srp = ngep->send;
1063 
1064 	ASSERT(mutex_owned(ngep->genlock));
1065 	mutex_enter(srp->tc_lock);
1066 	mutex_enter(srp->tx_lock);
1067 
1068 	nge_tx_recycle_all(ngep);
1069 	err = nge_reinit_ring(ngep);
1070 	if (err == DDI_FAILURE) {
1071 		mutex_exit(srp->tx_lock);
1072 		mutex_exit(srp->tc_lock);
1073 		return (err);
1074 	}
1075 	err = nge_chip_reset(ngep);
1076 	/*
1077 	 * Clear the Multicast mac address table
1078 	 */
1079 	nge_reg_put32(ngep, NGE_MUL_ADDR0, 0);
1080 	maddr1.addr_val = nge_reg_get32(ngep, NGE_MUL_ADDR1);
1081 	maddr1.addr_bits.addr = 0;
1082 	nge_reg_put32(ngep, NGE_MUL_ADDR1, maddr1.addr_val);
1083 
1084 	mutex_exit(srp->tx_lock);
1085 	mutex_exit(srp->tc_lock);
1086 	if (err == DDI_FAILURE)
1087 		return (err);
1088 	ngep->watchdog = 0;
1089 	ngep->resched_needed = B_FALSE;
1090 	ngep->promisc = B_FALSE;
1091 	ngep->param_loop_mode = NGE_LOOP_NONE;
1092 	ngep->factotum_flag = 0;
1093 	ngep->resched_needed = 0;
1094 	ngep->nge_mac_state = NGE_MAC_RESET;
1095 	ngep->max_sdu = ngep->default_mtu + ETHER_HEAD_LEN + ETHERFCSL;
1096 	ngep->max_sdu += VTAG_SIZE;
1097 	ngep->rx_def = 0x16;
1098 
1099 	/* Clear the software statistics */
1100 	sw_stp->recv_count = 0;
1101 	sw_stp->xmit_count = 0;
1102 	sw_stp->rbytes = 0;
1103 	sw_stp->obytes = 0;
1104 
1105 	return (DDI_SUCCESS);
1106 }
1107 
1108 static void
1109 nge_m_stop(void *arg)
1110 {
1111 	nge_t *ngep = arg;		/* private device info	*/
1112 	int err;
1113 
1114 	NGE_TRACE(("nge_m_stop($%p)", arg));
1115 
1116 	/*
1117 	 * Just stop processing, then record new MAC state
1118 	 */
1119 	mutex_enter(ngep->genlock);
1120 	/* If suspended, the adapter is already stopped, just return. */
1121 	if (ngep->suspended) {
1122 		ASSERT(ngep->nge_mac_state == NGE_MAC_STOPPED);
1123 		mutex_exit(ngep->genlock);
1124 		return;
1125 	}
1126 	rw_enter(ngep->rwlock, RW_WRITER);
1127 
1128 	err = nge_chip_stop(ngep, B_FALSE);
1129 	if (err == DDI_FAILURE)
1130 		err = nge_chip_reset(ngep);
1131 	if (err == DDI_FAILURE)
1132 		nge_problem(ngep, "nge_m_stop: stop chip failed");
1133 	ngep->nge_mac_state = NGE_MAC_STOPPED;
1134 
1135 	/* Recycle all the TX BD */
1136 	nge_tx_recycle_all(ngep);
1137 	nge_fini_rings(ngep);
1138 	nge_free_bufs(ngep);
1139 
1140 	NGE_DEBUG(("nge_m_stop($%p) done", arg));
1141 
1142 	rw_exit(ngep->rwlock);
1143 	mutex_exit(ngep->genlock);
1144 }
1145 
1146 static int
1147 nge_m_start(void *arg)
1148 {
1149 	int err;
1150 	nge_t *ngep = arg;
1151 
1152 	NGE_TRACE(("nge_m_start($%p)", arg));
1153 
1154 	/*
1155 	 * Start processing and record new MAC state
1156 	 */
1157 	mutex_enter(ngep->genlock);
1158 	/*
1159 	 * If suspended, don't start, as the resume processing
1160 	 * will recall this function with the suspended flag off.
1161 	 */
1162 	if (ngep->suspended) {
1163 		mutex_exit(ngep->genlock);
1164 		return (EIO);
1165 	}
1166 	rw_enter(ngep->rwlock, RW_WRITER);
1167 	err = nge_alloc_bufs(ngep);
1168 	if (err != DDI_SUCCESS) {
1169 		nge_problem(ngep, "nge_m_start: DMA buffer allocation failed");
1170 		goto finish;
1171 	}
1172 	err = nge_init_rings(ngep);
1173 	if (err != DDI_SUCCESS) {
1174 		nge_free_bufs(ngep);
1175 		nge_problem(ngep, "nge_init_rings() failed,err=%x", err);
1176 		goto finish;
1177 	}
1178 	err = nge_restart(ngep);
1179 
1180 	NGE_DEBUG(("nge_m_start($%p) done", arg));
1181 finish:
1182 	rw_exit(ngep->rwlock);
1183 	mutex_exit(ngep->genlock);
1184 
1185 	return (err == DDI_SUCCESS ? 0 : EIO);
1186 }
1187 
1188 static int
1189 nge_m_unicst(void *arg, const uint8_t *macaddr)
1190 {
1191 	nge_t *ngep = arg;
1192 
1193 	NGE_TRACE(("nge_m_unicst($%p)", arg));
1194 	/*
1195 	 * Remember the new current address in the driver state
1196 	 * Sync the chip's idea of the address too ...
1197 	 */
1198 	mutex_enter(ngep->genlock);
1199 
1200 	ethaddr_copy(macaddr, ngep->cur_uni_addr.addr);
1201 	ngep->cur_uni_addr.set = 1;
1202 
1203 	/*
1204 	 * If we are suspended, we want to quit now, and not update
1205 	 * the chip.  Doing so might put it in a bad state, but the
1206 	 * resume will get the unicast address installed.
1207 	 */
1208 	if (ngep->suspended) {
1209 		mutex_exit(ngep->genlock);
1210 		return (DDI_SUCCESS);
1211 	}
1212 	nge_chip_sync(ngep);
1213 
1214 	NGE_DEBUG(("nge_m_unicst($%p) done", arg));
1215 	mutex_exit(ngep->genlock);
1216 
1217 	return (0);
1218 }
1219 
1220 static int
1221 nge_m_promisc(void *arg, boolean_t on)
1222 {
1223 	nge_t *ngep = arg;
1224 
1225 	NGE_TRACE(("nge_m_promisc($%p)", arg));
1226 
1227 	/*
1228 	 * Store specified mode and pass to chip layer to update h/w
1229 	 */
1230 	mutex_enter(ngep->genlock);
1231 	/*
1232 	 * If suspended, there is no need to do anything, even
1233 	 * recording the promiscuious mode is not neccessary, as
1234 	 * it won't be properly set on resume.  Just return failing.
1235 	 */
1236 	if (ngep->suspended) {
1237 		mutex_exit(ngep->genlock);
1238 		return (DDI_FAILURE);
1239 	}
1240 	if (ngep->promisc == on) {
1241 		mutex_exit(ngep->genlock);
1242 		NGE_DEBUG(("nge_m_promisc($%p) done", arg));
1243 		return (0);
1244 	}
1245 	ngep->promisc = on;
1246 	ngep->record_promisc = ngep->promisc;
1247 	nge_chip_sync(ngep);
1248 	NGE_DEBUG(("nge_m_promisc($%p) done", arg));
1249 	mutex_exit(ngep->genlock);
1250 
1251 	return (0);
1252 }
1253 
1254 static void nge_mulparam(nge_t *ngep)
1255 {
1256 	uint8_t number;
1257 	ether_addr_t pand;
1258 	ether_addr_t por;
1259 	mul_item *plist;
1260 
1261 	for (number = 0; number < ETHERADDRL; number++) {
1262 		pand[number] = 0x00;
1263 		por[number] = 0x00;
1264 	}
1265 	for (plist = ngep->pcur_mulist; plist != NULL; plist = plist->next) {
1266 		for (number = 0; number < ETHERADDRL; number++) {
1267 			pand[number] &= plist->mul_addr[number];
1268 			por[number] |= plist->mul_addr[number];
1269 		}
1270 	}
1271 	for (number = 0; number < ETHERADDRL; number++) {
1272 		ngep->cur_mul_addr.addr[number]
1273 		    = pand[number] & por[number];
1274 		ngep->cur_mul_mask.addr[number]
1275 		    = pand [number] | (~por[number]);
1276 	}
1277 }
1278 static int
1279 nge_m_multicst(void *arg, boolean_t add, const uint8_t *mca)
1280 {
1281 	boolean_t update;
1282 	boolean_t b_eq;
1283 	nge_t *ngep = arg;
1284 	mul_item *plist;
1285 	mul_item *plist_prev;
1286 	mul_item *pitem;
1287 
1288 	NGE_TRACE(("nge_m_multicst($%p, %s, %s)", arg,
1289 	    (add) ? "add" : "remove", ether_sprintf((void *)mca)));
1290 
1291 	update = B_FALSE;
1292 	plist = plist_prev = NULL;
1293 	mutex_enter(ngep->genlock);
1294 	if (add) {
1295 		if (ngep->pcur_mulist != NULL) {
1296 			for (plist = ngep->pcur_mulist; plist != NULL;
1297 			    plist = plist->next) {
1298 				b_eq = ether_eq(plist->mul_addr, mca);
1299 				if (b_eq) {
1300 					plist->ref_cnt++;
1301 					break;
1302 				}
1303 				plist_prev = plist;
1304 			}
1305 		}
1306 
1307 		if (plist == NULL) {
1308 			pitem = kmem_zalloc(sizeof (mul_item), KM_SLEEP);
1309 			ether_copy(mca, pitem->mul_addr);
1310 			pitem ->ref_cnt++;
1311 			pitem ->next = NULL;
1312 			if (plist_prev == NULL)
1313 				ngep->pcur_mulist = pitem;
1314 			else
1315 				plist_prev->next = pitem;
1316 			update = B_TRUE;
1317 		}
1318 	} else {
1319 		if (ngep->pcur_mulist != NULL) {
1320 			for (plist = ngep->pcur_mulist; plist != NULL;
1321 			    plist = plist->next) {
1322 				b_eq = ether_eq(plist->mul_addr, mca);
1323 				if (b_eq) {
1324 					update = B_TRUE;
1325 					break;
1326 				}
1327 				plist_prev = plist;
1328 			}
1329 
1330 			if (update) {
1331 				if ((plist_prev == NULL) &&
1332 				    (plist->next == NULL))
1333 					ngep->pcur_mulist = NULL;
1334 				else if ((plist_prev == NULL) &&
1335 				    (plist->next != NULL))
1336 					ngep->pcur_mulist = plist->next;
1337 				else
1338 					plist_prev->next = plist->next;
1339 				kmem_free(plist, sizeof (mul_item));
1340 			}
1341 		}
1342 	}
1343 
1344 	if (update && !ngep->suspended) {
1345 		nge_mulparam(ngep);
1346 		nge_chip_sync(ngep);
1347 	}
1348 	NGE_DEBUG(("nge_m_multicst($%p) done", arg));
1349 	mutex_exit(ngep->genlock);
1350 
1351 	return (0);
1352 }
1353 
1354 static void
1355 nge_m_ioctl(void *arg, queue_t *wq, mblk_t *mp)
1356 {
1357 	int err;
1358 	int cmd;
1359 	nge_t *ngep = arg;
1360 	struct iocblk *iocp;
1361 	enum ioc_reply status;
1362 	boolean_t need_privilege;
1363 
1364 	/*
1365 	 * If suspended, we might actually be able to do some of
1366 	 * these ioctls, but it is harder to make sure they occur
1367 	 * without actually putting the hardware in an undesireable
1368 	 * state.  So just NAK it.
1369 	 */
1370 	mutex_enter(ngep->genlock);
1371 	if (ngep->suspended) {
1372 		miocnak(wq, mp, 0, EINVAL);
1373 		mutex_exit(ngep->genlock);
1374 		return;
1375 	}
1376 	mutex_exit(ngep->genlock);
1377 
1378 	/*
1379 	 * Validate the command before bothering with the mutex ...
1380 	 */
1381 	iocp = (struct iocblk *)mp->b_rptr;
1382 	iocp->ioc_error = 0;
1383 	need_privilege = B_TRUE;
1384 	cmd = iocp->ioc_cmd;
1385 
1386 	NGE_DEBUG(("nge_m_ioctl:  cmd 0x%x", cmd));
1387 	switch (cmd) {
1388 	default:
1389 		NGE_LDB(NGE_DBG_BADIOC,
1390 		    ("nge_m_ioctl: unknown cmd 0x%x", cmd));
1391 
1392 		miocnak(wq, mp, 0, EINVAL);
1393 		return;
1394 
1395 	case NGE_MII_READ:
1396 	case NGE_MII_WRITE:
1397 	case NGE_SEE_READ:
1398 	case NGE_SEE_WRITE:
1399 	case NGE_DIAG:
1400 	case NGE_PEEK:
1401 	case NGE_POKE:
1402 	case NGE_PHY_RESET:
1403 	case NGE_SOFT_RESET:
1404 	case NGE_HARD_RESET:
1405 		break;
1406 
1407 	case LB_GET_INFO_SIZE:
1408 	case LB_GET_INFO:
1409 	case LB_GET_MODE:
1410 		need_privilege = B_FALSE;
1411 		break;
1412 	case LB_SET_MODE:
1413 		break;
1414 	}
1415 
1416 	if (need_privilege) {
1417 		/*
1418 		 * Check for specific net_config privilege.
1419 		 */
1420 		err = secpolicy_net_config(iocp->ioc_cr, B_FALSE);
1421 		if (err != 0) {
1422 			NGE_DEBUG(("nge_m_ioctl: rejected cmd 0x%x, err %d",
1423 			    cmd, err));
1424 			miocnak(wq, mp, 0, err);
1425 			return;
1426 		}
1427 	}
1428 
1429 	mutex_enter(ngep->genlock);
1430 
1431 	switch (cmd) {
1432 	default:
1433 		_NOTE(NOTREACHED)
1434 		status = IOC_INVAL;
1435 	break;
1436 
1437 	case NGE_MII_READ:
1438 	case NGE_MII_WRITE:
1439 	case NGE_SEE_READ:
1440 	case NGE_SEE_WRITE:
1441 	case NGE_DIAG:
1442 	case NGE_PEEK:
1443 	case NGE_POKE:
1444 	case NGE_PHY_RESET:
1445 	case NGE_SOFT_RESET:
1446 	case NGE_HARD_RESET:
1447 		status = nge_chip_ioctl(ngep, mp, iocp);
1448 	break;
1449 
1450 	case LB_GET_INFO_SIZE:
1451 	case LB_GET_INFO:
1452 	case LB_GET_MODE:
1453 	case LB_SET_MODE:
1454 		status = nge_loop_ioctl(ngep, mp, iocp);
1455 	break;
1456 
1457 	}
1458 
1459 	/*
1460 	 * Do we need to reprogram the PHY and/or the MAC?
1461 	 * Do it now, while we still have the mutex.
1462 	 *
1463 	 * Note: update the PHY first, 'cos it controls the
1464 	 * speed/duplex parameters that the MAC code uses.
1465 	 */
1466 
1467 	NGE_DEBUG(("nge_m_ioctl: cmd 0x%x status %d", cmd, status));
1468 
1469 	switch (status) {
1470 	case IOC_RESTART_REPLY:
1471 	case IOC_RESTART_ACK:
1472 		(*ngep->physops->phys_update)(ngep);
1473 		nge_chip_sync(ngep);
1474 		break;
1475 
1476 	default:
1477 	break;
1478 	}
1479 
1480 	mutex_exit(ngep->genlock);
1481 
1482 	/*
1483 	 * Finally, decide how to reply
1484 	 */
1485 	switch (status) {
1486 
1487 	default:
1488 	case IOC_INVAL:
1489 		miocnak(wq, mp, 0, iocp->ioc_error == 0 ?
1490 		    EINVAL : iocp->ioc_error);
1491 		break;
1492 
1493 	case IOC_DONE:
1494 		break;
1495 
1496 	case IOC_RESTART_ACK:
1497 	case IOC_ACK:
1498 		miocack(wq, mp, 0, 0);
1499 		break;
1500 
1501 	case IOC_RESTART_REPLY:
1502 	case IOC_REPLY:
1503 		mp->b_datap->db_type = iocp->ioc_error == 0 ?
1504 		    M_IOCACK : M_IOCNAK;
1505 		qreply(wq, mp);
1506 		break;
1507 	}
1508 }
1509 
1510 static boolean_t
1511 nge_param_locked(mac_prop_id_t pr_num)
1512 {
1513 	/*
1514 	 * All adv_* parameters are locked (read-only) while
1515 	 * the device is in any sort of loopback mode ...
1516 	 */
1517 	switch (pr_num) {
1518 		case MAC_PROP_ADV_1000FDX_CAP:
1519 		case MAC_PROP_EN_1000FDX_CAP:
1520 		case MAC_PROP_ADV_1000HDX_CAP:
1521 		case MAC_PROP_EN_1000HDX_CAP:
1522 		case MAC_PROP_ADV_100FDX_CAP:
1523 		case MAC_PROP_EN_100FDX_CAP:
1524 		case MAC_PROP_ADV_100HDX_CAP:
1525 		case MAC_PROP_EN_100HDX_CAP:
1526 		case MAC_PROP_ADV_10FDX_CAP:
1527 		case MAC_PROP_EN_10FDX_CAP:
1528 		case MAC_PROP_ADV_10HDX_CAP:
1529 		case MAC_PROP_EN_10HDX_CAP:
1530 		case MAC_PROP_AUTONEG:
1531 		case MAC_PROP_FLOWCTRL:
1532 			return (B_TRUE);
1533 	}
1534 	return (B_FALSE);
1535 }
1536 
1537 /*
1538  * callback functions for set/get of properties
1539  */
1540 static int
1541 nge_m_setprop(void *barg, const char *pr_name, mac_prop_id_t pr_num,
1542     uint_t pr_valsize, const void *pr_val)
1543 {
1544 	nge_t *ngep = barg;
1545 	int err = 0;
1546 	uint32_t cur_mtu, new_mtu;
1547 	link_flowctrl_t fl;
1548 
1549 	mutex_enter(ngep->genlock);
1550 	if (ngep->param_loop_mode != NGE_LOOP_NONE &&
1551 	    nge_param_locked(pr_num)) {
1552 		/*
1553 		 * All adv_* parameters are locked (read-only)
1554 		 * while the device is in any sort of loopback mode.
1555 		 */
1556 		mutex_exit(ngep->genlock);
1557 		return (EBUSY);
1558 	}
1559 	switch (pr_num) {
1560 		case MAC_PROP_EN_1000FDX_CAP:
1561 			ngep->param_en_1000fdx = *(uint8_t *)pr_val;
1562 			ngep->param_adv_1000fdx = *(uint8_t *)pr_val;
1563 			goto reprogram;
1564 		case MAC_PROP_EN_100FDX_CAP:
1565 			ngep->param_en_100fdx = *(uint8_t *)pr_val;
1566 			ngep->param_adv_100fdx = *(uint8_t *)pr_val;
1567 			goto reprogram;
1568 		case MAC_PROP_EN_100HDX_CAP:
1569 			ngep->param_en_100hdx = *(uint8_t *)pr_val;
1570 			ngep->param_adv_100hdx = *(uint8_t *)pr_val;
1571 			goto reprogram;
1572 		case MAC_PROP_EN_10FDX_CAP:
1573 			ngep->param_en_10fdx = *(uint8_t *)pr_val;
1574 			ngep->param_adv_10fdx = *(uint8_t *)pr_val;
1575 			goto reprogram;
1576 		case MAC_PROP_EN_10HDX_CAP:
1577 			ngep->param_en_10hdx = *(uint8_t *)pr_val;
1578 			ngep->param_adv_10hdx = *(uint8_t *)pr_val;
1579 reprogram:
1580 		(*ngep->physops->phys_update)(ngep);
1581 		nge_chip_sync(ngep);
1582 		break;
1583 
1584 		case MAC_PROP_ADV_1000FDX_CAP:
1585 		case MAC_PROP_ADV_1000HDX_CAP:
1586 		case MAC_PROP_ADV_100FDX_CAP:
1587 		case MAC_PROP_ADV_100HDX_CAP:
1588 		case MAC_PROP_ADV_10FDX_CAP:
1589 		case MAC_PROP_ADV_10HDX_CAP:
1590 		case MAC_PROP_STATUS:
1591 		case MAC_PROP_SPEED:
1592 		case MAC_PROP_DUPLEX:
1593 		case MAC_PROP_EN_1000HDX_CAP:
1594 			err = ENOTSUP; /* read-only prop. Can't set this */
1595 			break;
1596 		case MAC_PROP_AUTONEG:
1597 			ngep->param_adv_autoneg = *(uint8_t *)pr_val;
1598 			(*ngep->physops->phys_update)(ngep);
1599 			nge_chip_sync(ngep);
1600 			break;
1601 		case MAC_PROP_MTU:
1602 			cur_mtu = ngep->default_mtu;
1603 			bcopy(pr_val, &new_mtu, sizeof (new_mtu));
1604 			if (new_mtu == cur_mtu) {
1605 				err = 0;
1606 				break;
1607 			}
1608 			if (new_mtu < ETHERMTU ||
1609 			    new_mtu > NGE_MAX_MTU) {
1610 				err = EINVAL;
1611 				break;
1612 			}
1613 			if ((new_mtu > ETHERMTU) &&
1614 			    (!ngep->dev_spec_param.jumbo)) {
1615 				err = EINVAL;
1616 				break;
1617 			}
1618 			if (ngep->nge_mac_state == NGE_MAC_STARTED) {
1619 				err = EBUSY;
1620 				break;
1621 			}
1622 
1623 			ngep->default_mtu = new_mtu;
1624 			if (ngep->default_mtu > ETHERMTU &&
1625 			    ngep->default_mtu <= NGE_MTU_2500) {
1626 				ngep->buf_size = NGE_JB2500_BUFSZ;
1627 				ngep->tx_desc = NGE_SEND_JB2500_SLOTS_DESC;
1628 				ngep->rx_desc = NGE_RECV_JB2500_SLOTS_DESC;
1629 				ngep->rx_buf = NGE_RECV_JB2500_SLOTS_DESC * 2;
1630 				ngep->nge_split = NGE_SPLIT_256;
1631 			} else if (ngep->default_mtu > NGE_MTU_2500 &&
1632 			    ngep->default_mtu <= NGE_MTU_4500) {
1633 				ngep->buf_size = NGE_JB4500_BUFSZ;
1634 				ngep->tx_desc = NGE_SEND_JB4500_SLOTS_DESC;
1635 				ngep->rx_desc = NGE_RECV_JB4500_SLOTS_DESC;
1636 				ngep->rx_buf = NGE_RECV_JB4500_SLOTS_DESC * 2;
1637 				ngep->nge_split = NGE_SPLIT_256;
1638 			} else if (ngep->default_mtu > NGE_MTU_4500 &&
1639 			    ngep->default_mtu <= NGE_MAX_MTU) {
1640 				ngep->buf_size = NGE_JB9000_BUFSZ;
1641 				ngep->tx_desc = NGE_SEND_JB9000_SLOTS_DESC;
1642 				ngep->rx_desc = NGE_RECV_JB9000_SLOTS_DESC;
1643 				ngep->rx_buf = NGE_RECV_JB9000_SLOTS_DESC * 2;
1644 				ngep->nge_split = NGE_SPLIT_256;
1645 			} else if (ngep->default_mtu > NGE_MAX_MTU) {
1646 				ngep->default_mtu = NGE_MAX_MTU;
1647 				ngep->buf_size = NGE_JB9000_BUFSZ;
1648 				ngep->tx_desc = NGE_SEND_JB9000_SLOTS_DESC;
1649 				ngep->rx_desc = NGE_RECV_JB9000_SLOTS_DESC;
1650 				ngep->rx_buf = NGE_RECV_JB9000_SLOTS_DESC * 2;
1651 				ngep->nge_split = NGE_SPLIT_256;
1652 			} else if (ngep->lowmem_mode != 0) {
1653 				ngep->default_mtu = ETHERMTU;
1654 				ngep->buf_size = NGE_STD_BUFSZ;
1655 				ngep->tx_desc = NGE_SEND_LOWMEM_SLOTS_DESC;
1656 				ngep->rx_desc = NGE_RECV_LOWMEM_SLOTS_DESC;
1657 				ngep->rx_buf = NGE_RECV_LOWMEM_SLOTS_DESC * 2;
1658 				ngep->nge_split = NGE_SPLIT_32;
1659 			} else {
1660 				ngep->default_mtu = ETHERMTU;
1661 				ngep->buf_size = NGE_STD_BUFSZ;
1662 				ngep->tx_desc =
1663 				    ngep->dev_spec_param.tx_desc_num;
1664 				ngep->rx_desc =
1665 				    ngep->dev_spec_param.rx_desc_num;
1666 				ngep->rx_buf =
1667 				    ngep->dev_spec_param.rx_desc_num * 2;
1668 				ngep->nge_split =
1669 				    ngep->dev_spec_param.nge_split;
1670 			}
1671 
1672 			err = mac_maxsdu_update(ngep->mh, ngep->default_mtu);
1673 
1674 			break;
1675 		case MAC_PROP_FLOWCTRL:
1676 			bcopy(pr_val, &fl, sizeof (fl));
1677 			switch (fl) {
1678 			default:
1679 				err = ENOTSUP;
1680 				break;
1681 			case LINK_FLOWCTRL_NONE:
1682 				ngep->param_adv_pause = 0;
1683 				ngep->param_adv_asym_pause = 0;
1684 
1685 				ngep->param_link_rx_pause = B_FALSE;
1686 				ngep->param_link_tx_pause = B_FALSE;
1687 				break;
1688 			case LINK_FLOWCTRL_RX:
1689 				if (!((ngep->param_lp_pause == 0) &&
1690 				    (ngep->param_lp_asym_pause == 1))) {
1691 					err = EINVAL;
1692 					break;
1693 				}
1694 				ngep->param_adv_pause = 1;
1695 				ngep->param_adv_asym_pause = 1;
1696 
1697 				ngep->param_link_rx_pause = B_TRUE;
1698 				ngep->param_link_tx_pause = B_FALSE;
1699 				break;
1700 			case LINK_FLOWCTRL_TX:
1701 				if (!((ngep->param_lp_pause == 1) &&
1702 				    (ngep->param_lp_asym_pause == 1))) {
1703 					err = EINVAL;
1704 					break;
1705 				}
1706 				ngep->param_adv_pause = 0;
1707 				ngep->param_adv_asym_pause = 1;
1708 
1709 				ngep->param_link_rx_pause = B_FALSE;
1710 				ngep->param_link_tx_pause = B_TRUE;
1711 				break;
1712 			case LINK_FLOWCTRL_BI:
1713 				if (ngep->param_lp_pause != 1) {
1714 					err = EINVAL;
1715 					break;
1716 				}
1717 				ngep->param_adv_pause = 1;
1718 
1719 				ngep->param_link_rx_pause = B_TRUE;
1720 				ngep->param_link_tx_pause = B_TRUE;
1721 				break;
1722 			}
1723 
1724 			if (err == 0) {
1725 				(*ngep->physops->phys_update)(ngep);
1726 				nge_chip_sync(ngep);
1727 			}
1728 
1729 			break;
1730 		case MAC_PROP_PRIVATE:
1731 			err = nge_set_priv_prop(ngep, pr_name, pr_valsize,
1732 			    pr_val);
1733 			if (err == 0) {
1734 				(*ngep->physops->phys_update)(ngep);
1735 				nge_chip_sync(ngep);
1736 			}
1737 			break;
1738 		default:
1739 			err = ENOTSUP;
1740 	}
1741 	mutex_exit(ngep->genlock);
1742 	return (err);
1743 }
1744 
1745 static int
1746 nge_m_getprop(void *barg, const char *pr_name, mac_prop_id_t pr_num,
1747     uint_t pr_flags, uint_t pr_valsize, void *pr_val, uint_t *perm)
1748 {
1749 	nge_t *ngep = barg;
1750 	int err = 0;
1751 	link_flowctrl_t fl;
1752 	uint64_t speed;
1753 	boolean_t is_default = (pr_flags & MAC_PROP_DEFAULT);
1754 
1755 	if (pr_valsize == 0)
1756 		return (EINVAL);
1757 
1758 	*perm = MAC_PROP_PERM_RW;
1759 
1760 	bzero(pr_val, pr_valsize);
1761 
1762 	switch (pr_num) {
1763 		case MAC_PROP_DUPLEX:
1764 			*perm = MAC_PROP_PERM_READ;
1765 			if (pr_valsize >= sizeof (link_duplex_t)) {
1766 				bcopy(&ngep->param_link_duplex, pr_val,
1767 				    sizeof (link_duplex_t));
1768 			} else
1769 				err = EINVAL;
1770 			break;
1771 		case MAC_PROP_SPEED:
1772 			*perm = MAC_PROP_PERM_READ;
1773 			if (pr_valsize >= sizeof (uint64_t)) {
1774 				speed = ngep->param_link_speed * 1000000ull;
1775 				bcopy(&speed, pr_val, sizeof (speed));
1776 			} else
1777 				err = EINVAL;
1778 			break;
1779 		case MAC_PROP_AUTONEG:
1780 			if (is_default) {
1781 				*(uint8_t *)pr_val = 1;
1782 			} else {
1783 				*(uint8_t *)pr_val = ngep->param_adv_autoneg;
1784 			}
1785 			break;
1786 		case MAC_PROP_FLOWCTRL:
1787 			if (pr_valsize >= sizeof (link_flowctrl_t)) {
1788 				if (pr_flags & MAC_PROP_DEFAULT) {
1789 					fl = LINK_FLOWCTRL_BI;
1790 					bcopy(&fl, pr_val, sizeof (fl));
1791 					break;
1792 				}
1793 				if (ngep->param_link_rx_pause &&
1794 				    !ngep->param_link_tx_pause)
1795 					fl = LINK_FLOWCTRL_RX;
1796 
1797 				if (!ngep->param_link_rx_pause &&
1798 				    !ngep->param_link_tx_pause)
1799 					fl = LINK_FLOWCTRL_NONE;
1800 
1801 				if (!ngep->param_link_rx_pause &&
1802 				    ngep->param_link_tx_pause)
1803 					fl = LINK_FLOWCTRL_TX;
1804 
1805 				if (ngep->param_link_rx_pause &&
1806 				    ngep->param_link_tx_pause)
1807 					fl = LINK_FLOWCTRL_BI;
1808 				bcopy(&fl, pr_val, sizeof (fl));
1809 			} else
1810 				err = EINVAL;
1811 			break;
1812 		case MAC_PROP_ADV_1000FDX_CAP:
1813 			*perm = MAC_PROP_PERM_READ;
1814 			if (is_default) {
1815 				*(uint8_t *)pr_val = 1;
1816 			} else {
1817 				*(uint8_t *)pr_val = ngep->param_adv_1000fdx;
1818 			}
1819 			break;
1820 		case MAC_PROP_EN_1000FDX_CAP:
1821 			if (is_default) {
1822 				*(uint8_t *)pr_val = 1;
1823 			} else {
1824 				*(uint8_t *)pr_val = ngep->param_en_1000fdx;
1825 			}
1826 			break;
1827 		case MAC_PROP_ADV_1000HDX_CAP:
1828 			*perm = MAC_PROP_PERM_READ;
1829 			if (is_default) {
1830 				*(uint8_t *)pr_val = 0;
1831 			} else {
1832 				*(uint8_t *)pr_val = ngep->param_adv_1000hdx;
1833 			}
1834 			break;
1835 		case MAC_PROP_EN_1000HDX_CAP:
1836 			*perm = MAC_PROP_PERM_READ;
1837 			if (is_default) {
1838 				*(uint8_t *)pr_val = 0;
1839 			} else {
1840 				*(uint8_t *)pr_val = ngep->param_en_1000hdx;
1841 			}
1842 			break;
1843 		case MAC_PROP_ADV_100FDX_CAP:
1844 			*perm = MAC_PROP_PERM_READ;
1845 			if (is_default) {
1846 				*(uint8_t *)pr_val = 1;
1847 			} else {
1848 				*(uint8_t *)pr_val = ngep->param_adv_100fdx;
1849 			}
1850 			break;
1851 		case MAC_PROP_EN_100FDX_CAP:
1852 			if (is_default) {
1853 				*(uint8_t *)pr_val = 1;
1854 			} else {
1855 				*(uint8_t *)pr_val = ngep->param_en_100fdx;
1856 			}
1857 			break;
1858 		case MAC_PROP_ADV_100HDX_CAP:
1859 			*perm = MAC_PROP_PERM_READ;
1860 			if (is_default) {
1861 				*(uint8_t *)pr_val = 1;
1862 			} else {
1863 				*(uint8_t *)pr_val = ngep->param_adv_100hdx;
1864 			}
1865 			break;
1866 		case MAC_PROP_EN_100HDX_CAP:
1867 			if (is_default) {
1868 				*(uint8_t *)pr_val = 1;
1869 			} else {
1870 				*(uint8_t *)pr_val = ngep->param_en_100hdx;
1871 			}
1872 			break;
1873 		case MAC_PROP_ADV_10FDX_CAP:
1874 			*perm = MAC_PROP_PERM_READ;
1875 			if (is_default) {
1876 				*(uint8_t *)pr_val = 1;
1877 			} else {
1878 				*(uint8_t *)pr_val = ngep->param_adv_10fdx;
1879 			}
1880 			break;
1881 		case MAC_PROP_EN_10FDX_CAP:
1882 			if (is_default) {
1883 				*(uint8_t *)pr_val = 1;
1884 			} else {
1885 				*(uint8_t *)pr_val = ngep->param_en_10fdx;
1886 			}
1887 			break;
1888 		case MAC_PROP_ADV_10HDX_CAP:
1889 			*perm = MAC_PROP_PERM_READ;
1890 			if (is_default) {
1891 				*(uint8_t *)pr_val = 1;
1892 			} else {
1893 				*(uint8_t *)pr_val = ngep->param_adv_10hdx;
1894 			}
1895 			break;
1896 		case MAC_PROP_EN_10HDX_CAP:
1897 			if (is_default) {
1898 				*(uint8_t *)pr_val = 1;
1899 			} else {
1900 				*(uint8_t *)pr_val = ngep->param_en_10hdx;
1901 			}
1902 			break;
1903 		case MAC_PROP_ADV_100T4_CAP:
1904 		case MAC_PROP_EN_100T4_CAP:
1905 			*perm = MAC_PROP_PERM_READ;
1906 			*(uint8_t *)pr_val = 0;
1907 			break;
1908 		case MAC_PROP_PRIVATE:
1909 			err = nge_get_priv_prop(ngep, pr_name, pr_flags,
1910 			    pr_valsize, pr_val);
1911 			break;
1912 		case MAC_PROP_MTU: {
1913 			mac_propval_range_t range;
1914 
1915 			if (!(pr_flags & MAC_PROP_POSSIBLE))
1916 				return (ENOTSUP);
1917 			if (pr_valsize < sizeof (mac_propval_range_t))
1918 				return (EINVAL);
1919 			range.mpr_count = 1;
1920 			range.mpr_type = MAC_PROPVAL_UINT32;
1921 			range.range_uint32[0].mpur_min =
1922 			    range.range_uint32[0].mpur_max = ETHERMTU;
1923 			if (ngep->dev_spec_param.jumbo)
1924 				range.range_uint32[0].mpur_max = NGE_MAX_MTU;
1925 			bcopy(&range, pr_val, sizeof (range));
1926 			break;
1927 		}
1928 		default:
1929 			err = ENOTSUP;
1930 	}
1931 	return (err);
1932 }
1933 
1934 /* ARGSUSED */
1935 static int
1936 nge_set_priv_prop(nge_t *ngep, const char *pr_name, uint_t pr_valsize,
1937     const void *pr_val)
1938 {
1939 	int err = 0;
1940 	long result;
1941 
1942 	if (strcmp(pr_name, "_tx_bcopy_threshold") == 0) {
1943 		if (pr_val == NULL) {
1944 			err = EINVAL;
1945 			return (err);
1946 		}
1947 		(void) ddi_strtol(pr_val, (char **)NULL, 0, &result);
1948 		if (result < 0 || result > NGE_MAX_SDU) {
1949 			err = EINVAL;
1950 		} else {
1951 			ngep->param_txbcopy_threshold = (uint32_t)result;
1952 			goto reprogram;
1953 		}
1954 		return (err);
1955 	}
1956 	if (strcmp(pr_name, "_rx_bcopy_threshold") == 0) {
1957 		if (pr_val == NULL) {
1958 			err = EINVAL;
1959 			return (err);
1960 		}
1961 		(void) ddi_strtol(pr_val, (char **)NULL, 0, &result);
1962 		if (result < 0 || result > NGE_MAX_SDU) {
1963 			err = EINVAL;
1964 		} else {
1965 			ngep->param_rxbcopy_threshold = (uint32_t)result;
1966 			goto reprogram;
1967 		}
1968 		return (err);
1969 	}
1970 	if (strcmp(pr_name, "_recv_max_packet") == 0) {
1971 		if (pr_val == NULL) {
1972 			err = EINVAL;
1973 			return (err);
1974 		}
1975 		(void) ddi_strtol(pr_val, (char **)NULL, 0, &result);
1976 		if (result < 0 || result > NGE_RECV_SLOTS_DESC_1024) {
1977 			err = EINVAL;
1978 		} else {
1979 			ngep->param_recv_max_packet = (uint32_t)result;
1980 			goto reprogram;
1981 		}
1982 		return (err);
1983 	}
1984 	if (strcmp(pr_name, "_poll_quiet_time") == 0) {
1985 		if (pr_val == NULL) {
1986 			err = EINVAL;
1987 			return (err);
1988 		}
1989 		(void) ddi_strtol(pr_val, (char **)NULL, 0, &result);
1990 		if (result < 0 || result > 10000) {
1991 			err = EINVAL;
1992 		} else {
1993 			ngep->param_poll_quiet_time = (uint32_t)result;
1994 			goto reprogram;
1995 		}
1996 		return (err);
1997 	}
1998 	if (strcmp(pr_name, "_poll_busy_time") == 0) {
1999 		if (pr_val == NULL) {
2000 			err = EINVAL;
2001 			return (err);
2002 		}
2003 		(void) ddi_strtol(pr_val, (char **)NULL, 0, &result);
2004 		if (result < 0 || result > 10000) {
2005 			err = EINVAL;
2006 		} else {
2007 			ngep->param_poll_busy_time = (uint32_t)result;
2008 			goto reprogram;
2009 		}
2010 		return (err);
2011 	}
2012 	if (strcmp(pr_name, "_rx_intr_hwater") == 0) {
2013 		if (pr_val == NULL) {
2014 			err = EINVAL;
2015 			return (err);
2016 		}
2017 		(void) ddi_strtol(pr_val, (char **)NULL, 0, &result);
2018 		if (result < 0 || result > NGE_RECV_SLOTS_DESC_1024) {
2019 			err = EINVAL;
2020 		} else {
2021 			ngep->param_rx_intr_hwater = (uint32_t)result;
2022 			goto reprogram;
2023 		}
2024 		return (err);
2025 	}
2026 	if (strcmp(pr_name, "_rx_intr_lwater") == 0) {
2027 		if (pr_val == NULL) {
2028 			err = EINVAL;
2029 			return (err);
2030 		}
2031 		(void) ddi_strtol(pr_val, (char **)NULL, 0, &result);
2032 		if (result < 0 || result > NGE_RECV_SLOTS_DESC_1024) {
2033 			err = EINVAL;
2034 		} else {
2035 			ngep->param_rx_intr_lwater = (uint32_t)result;
2036 			goto reprogram;
2037 		}
2038 		return (err);
2039 	}
2040 	if (strcmp(pr_name, "_tx_n_intr") == 0) {
2041 		if (pr_val == NULL) {
2042 			err = EINVAL;
2043 			return (err);
2044 		}
2045 		(void) ddi_strtol(pr_val, (char **)NULL, 0, &result);
2046 		if (result < 1 || result > 10000) {
2047 			err = EINVAL;
2048 		} else {
2049 			ngep->param_tx_n_intr = (uint32_t)result;
2050 			goto reprogram;
2051 		}
2052 		return (err);
2053 	}
2054 
2055 	err = ENOTSUP;
2056 	return (err);
2057 
2058 reprogram:
2059 	if (err == 0) {
2060 		(*ngep->physops->phys_update)(ngep);
2061 		nge_chip_sync(ngep);
2062 	}
2063 
2064 	return (err);
2065 }
2066 
2067 static int
2068 nge_get_priv_prop(nge_t *ngep, const char *pr_name, uint_t pr_flags,
2069     uint_t pr_valsize, void *pr_val)
2070 {
2071 	int err = ENOTSUP;
2072 	boolean_t is_default = (pr_flags & MAC_PROP_DEFAULT);
2073 	int value;
2074 
2075 	if (strcmp(pr_name, "_adv_pause_cap") == 0) {
2076 		value = (is_default ? 1 : ngep->param_adv_pause);
2077 		err = 0;
2078 		goto done;
2079 	}
2080 	if (strcmp(pr_name, "_adv_asym_pause_cap") == 0) {
2081 		value = (is_default ? 1 : ngep->param_adv_asym_pause);
2082 		err = 0;
2083 		goto done;
2084 	}
2085 	if (strcmp(pr_name, "_tx_bcopy_threshold") == 0) {
2086 		value = (is_default ? NGE_TX_COPY_SIZE :
2087 		    ngep->param_txbcopy_threshold);
2088 		err = 0;
2089 		goto done;
2090 	}
2091 	if (strcmp(pr_name, "_rx_bcopy_threshold") == 0) {
2092 		value = (is_default ? NGE_RX_COPY_SIZE :
2093 		    ngep->param_rxbcopy_threshold);
2094 		err = 0;
2095 		goto done;
2096 	}
2097 	if (strcmp(pr_name, "_recv_max_packet") == 0) {
2098 		value = (is_default ? 128 : ngep->param_recv_max_packet);
2099 		err = 0;
2100 		goto done;
2101 	}
2102 	if (strcmp(pr_name, "_poll_quiet_time") == 0) {
2103 		value = (is_default ? NGE_POLL_QUIET_TIME :
2104 		    ngep->param_poll_quiet_time);
2105 		err = 0;
2106 		goto done;
2107 	}
2108 	if (strcmp(pr_name, "_poll_busy_time") == 0) {
2109 		value = (is_default ? NGE_POLL_BUSY_TIME :
2110 		    ngep->param_poll_busy_time);
2111 		err = 0;
2112 		goto done;
2113 	}
2114 	if (strcmp(pr_name, "_rx_intr_hwater") == 0) {
2115 		value = (is_default ? 1 : ngep->param_rx_intr_hwater);
2116 		err = 0;
2117 		goto done;
2118 	}
2119 	if (strcmp(pr_name, "_rx_intr_lwater") == 0) {
2120 		value = (is_default ? 8 : ngep->param_rx_intr_lwater);
2121 		err = 0;
2122 		goto done;
2123 	}
2124 	if (strcmp(pr_name, "_tx_n_intr") == 0) {
2125 		value = (is_default ? NGE_TX_N_INTR :
2126 		    ngep->param_tx_n_intr);
2127 		err = 0;
2128 		goto done;
2129 	}
2130 
2131 done:
2132 	if (err == 0) {
2133 		(void) snprintf(pr_val, pr_valsize, "%d", value);
2134 	}
2135 	return (err);
2136 }
2137 
2138 /* ARGSUSED */
2139 static boolean_t
2140 nge_m_getcapab(void *arg, mac_capab_t cap, void *cap_data)
2141 {
2142 	nge_t	*ngep = arg;
2143 	nge_dev_spec_param_t *dev_param_p;
2144 
2145 	dev_param_p = &ngep->dev_spec_param;
2146 
2147 	switch (cap) {
2148 	case MAC_CAPAB_HCKSUM: {
2149 		uint32_t *hcksum_txflags = cap_data;
2150 
2151 		if (dev_param_p->tx_hw_checksum) {
2152 			*hcksum_txflags = dev_param_p->tx_hw_checksum;
2153 		} else
2154 			return (B_FALSE);
2155 		break;
2156 	}
2157 	default:
2158 		return (B_FALSE);
2159 	}
2160 	return (B_TRUE);
2161 }
2162 
2163 #undef	NGE_DBG
2164 #define	NGE_DBG	NGE_DBG_INIT	/* debug flag for this code	*/
2165 int
2166 nge_restart(nge_t *ngep)
2167 {
2168 	int err = 0;
2169 	err = nge_reset_dev(ngep);
2170 	/* write back the promisc setting */
2171 	ngep->promisc = ngep->record_promisc;
2172 	nge_chip_sync(ngep);
2173 	if (!err)
2174 		err = nge_chip_start(ngep);
2175 
2176 	if (err) {
2177 		ngep->nge_mac_state = NGE_MAC_STOPPED;
2178 		return (DDI_FAILURE);
2179 	} else {
2180 		ngep->nge_mac_state = NGE_MAC_STARTED;
2181 		return (DDI_SUCCESS);
2182 	}
2183 }
2184 
2185 void
2186 nge_wake_factotum(nge_t *ngep)
2187 {
2188 	mutex_enter(ngep->softlock);
2189 	if (ngep->factotum_flag == 0) {
2190 		ngep->factotum_flag = 1;
2191 		(void) ddi_intr_trigger_softint(ngep->factotum_hdl, NULL);
2192 	}
2193 	mutex_exit(ngep->softlock);
2194 }
2195 
2196 /*
2197  * High-level cyclic handler
2198  *
2199  * This routine schedules a (low-level) softint callback to the
2200  * factotum.
2201  */
2202 
2203 static void
2204 nge_chip_cyclic(void *arg)
2205 {
2206 	nge_t *ngep;
2207 
2208 	ngep = (nge_t *)arg;
2209 
2210 	switch (ngep->nge_chip_state) {
2211 	default:
2212 		return;
2213 
2214 	case NGE_CHIP_RUNNING:
2215 		break;
2216 
2217 	case NGE_CHIP_FAULT:
2218 	case NGE_CHIP_ERROR:
2219 		break;
2220 	}
2221 
2222 	nge_wake_factotum(ngep);
2223 }
2224 
2225 /*
2226  * Get/Release semaphore of SMU
2227  * For SMU enabled chipset
2228  * When nge driver is attached, driver should acquire
2229  * semaphore before PHY init and accessing MAC registers.
2230  * When nge driver is unattached, driver should release
2231  * semaphore.
2232  */
2233 
2234 static int
2235 nge_smu_sema(nge_t *ngep, boolean_t acquire)
2236 {
2237 	nge_tx_en tx_en;
2238 	uint32_t tries;
2239 
2240 	if (acquire) {
2241 		for (tries = 0; tries < 5; tries++) {
2242 			tx_en.val = nge_reg_get32(ngep, NGE_TX_EN);
2243 			if (tx_en.bits.smu2mac == NGE_SMU_FREE)
2244 				break;
2245 			delay(drv_usectohz(1000000));
2246 		}
2247 		if (tx_en.bits.smu2mac != NGE_SMU_FREE)
2248 			return (DDI_FAILURE);
2249 		for (tries = 0; tries < 5; tries++) {
2250 			tx_en.val = nge_reg_get32(ngep, NGE_TX_EN);
2251 			tx_en.bits.mac2smu = NGE_SMU_GET;
2252 			nge_reg_put32(ngep, NGE_TX_EN, tx_en.val);
2253 			tx_en.val = nge_reg_get32(ngep, NGE_TX_EN);
2254 
2255 			if (tx_en.bits.mac2smu == NGE_SMU_GET &&
2256 			    tx_en.bits.smu2mac == NGE_SMU_FREE)
2257 				return (DDI_SUCCESS);
2258 			drv_usecwait(10);
2259 		}
2260 		return (DDI_FAILURE);
2261 	} else
2262 		nge_reg_put32(ngep, NGE_TX_EN, 0x0);
2263 
2264 	return (DDI_SUCCESS);
2265 
2266 }
2267 static void
2268 nge_unattach(nge_t *ngep)
2269 {
2270 	send_ring_t *srp;
2271 	buff_ring_t *brp;
2272 
2273 	srp = ngep->send;
2274 	brp = ngep->buff;
2275 	NGE_TRACE(("nge_unattach($%p)", (void *)ngep));
2276 
2277 	/*
2278 	 * Flag that no more activity may be initiated
2279 	 */
2280 	ngep->progress &= ~PROGRESS_READY;
2281 	ngep->nge_mac_state = NGE_MAC_UNATTACH;
2282 
2283 	/*
2284 	 * Quiesce the PHY and MAC (leave it reset but still powered).
2285 	 * Clean up and free all NGE data structures
2286 	 */
2287 	if (ngep->periodic_id != NULL) {
2288 		ddi_periodic_delete(ngep->periodic_id);
2289 		ngep->periodic_id = NULL;
2290 	}
2291 
2292 	if (ngep->progress & PROGRESS_KSTATS)
2293 		nge_fini_kstats(ngep);
2294 
2295 	if (ngep->progress & PROGRESS_HWINT) {
2296 		mutex_enter(ngep->genlock);
2297 		nge_restore_mac_addr(ngep);
2298 		(void) nge_chip_stop(ngep, B_FALSE);
2299 		if (ngep->chipinfo.device == DEVICE_ID_MCP55_373 ||
2300 		    ngep->chipinfo.device == DEVICE_ID_MCP55_372) {
2301 			(void) nge_smu_sema(ngep, B_FALSE);
2302 		}
2303 		mutex_exit(ngep->genlock);
2304 	}
2305 
2306 	if (ngep->progress & PROGRESS_SWINT)
2307 		nge_rem_intrs(ngep);
2308 
2309 	if (ngep->progress & PROGRESS_FACTOTUM)
2310 		(void) ddi_intr_remove_softint(ngep->factotum_hdl);
2311 
2312 	if (ngep->progress & PROGRESS_RESCHED)
2313 		(void) ddi_intr_remove_softint(ngep->resched_hdl);
2314 
2315 	if (ngep->progress & PROGRESS_INTR) {
2316 		mutex_destroy(srp->tx_lock);
2317 		mutex_destroy(srp->tc_lock);
2318 		mutex_destroy(&srp->dmah_lock);
2319 		mutex_destroy(brp->recycle_lock);
2320 
2321 		mutex_destroy(ngep->genlock);
2322 		mutex_destroy(ngep->softlock);
2323 		rw_destroy(ngep->rwlock);
2324 	}
2325 
2326 	if (ngep->progress & PROGRESS_REGS)
2327 		ddi_regs_map_free(&ngep->io_handle);
2328 
2329 	if (ngep->progress & PROGRESS_CFG)
2330 		pci_config_teardown(&ngep->cfg_handle);
2331 
2332 	ddi_remove_minor_node(ngep->devinfo, NULL);
2333 
2334 	kmem_free(ngep, sizeof (*ngep));
2335 }
2336 
2337 static int
2338 nge_resume(dev_info_t *devinfo)
2339 {
2340 	nge_t		*ngep;
2341 	chip_info_t	*infop;
2342 	int 		err;
2343 
2344 	ASSERT(devinfo != NULL);
2345 
2346 	ngep = ddi_get_driver_private(devinfo);
2347 	err = 0;
2348 
2349 	/*
2350 	 * If there are state inconsistancies, this is bad.  Returning
2351 	 * DDI_FAILURE here will eventually cause the machine to panic,
2352 	 * so it is best done here so that there is a possibility of
2353 	 * debugging the problem.
2354 	 */
2355 	if (ngep == NULL)
2356 		cmn_err(CE_PANIC,
2357 		    "nge: ngep returned from ddi_get_driver_private was NULL");
2358 	infop = (chip_info_t *)&ngep->chipinfo;
2359 
2360 	if (ngep->devinfo != devinfo)
2361 		cmn_err(CE_PANIC,
2362 		    "nge: passed devinfo not the same as saved devinfo");
2363 
2364 	mutex_enter(ngep->genlock);
2365 	rw_enter(ngep->rwlock, RW_WRITER);
2366 
2367 	/*
2368 	 * Fetch the config space.  Even though we have most of it cached,
2369 	 * some values *might* change across a suspend/resume.
2370 	 */
2371 	nge_chip_cfg_init(ngep, infop, B_FALSE);
2372 
2373 	/*
2374 	 * Only in one case, this conditional branch can be executed: the port
2375 	 * hasn't been plumbed.
2376 	 */
2377 	if (ngep->suspended == B_FALSE) {
2378 		rw_exit(ngep->rwlock);
2379 		mutex_exit(ngep->genlock);
2380 		return (DDI_SUCCESS);
2381 	}
2382 
2383 	nge_tx_recycle_all(ngep);
2384 	err = nge_reinit_ring(ngep);
2385 	if (!err) {
2386 		err = nge_chip_reset(ngep);
2387 		if (!err)
2388 			err = nge_chip_start(ngep);
2389 	}
2390 
2391 	if (err) {
2392 		/*
2393 		 * We note the failure, but return success, as the
2394 		 * system is still usable without this controller.
2395 		 */
2396 		cmn_err(CE_WARN, "nge: resume: failed to restart controller");
2397 	} else {
2398 		ngep->nge_mac_state = NGE_MAC_STARTED;
2399 	}
2400 	ngep->suspended = B_FALSE;
2401 
2402 	rw_exit(ngep->rwlock);
2403 	mutex_exit(ngep->genlock);
2404 
2405 	return (DDI_SUCCESS);
2406 }
2407 
2408 /*
2409  * attach(9E) -- Attach a device to the system
2410  *
2411  * Called once for each board successfully probed.
2412  */
2413 static int
2414 nge_attach(dev_info_t *devinfo, ddi_attach_cmd_t cmd)
2415 {
2416 	int		err;
2417 	int		i;
2418 	int		instance;
2419 	caddr_t		regs;
2420 	nge_t		*ngep;
2421 	chip_info_t	*infop;
2422 	mac_register_t	*macp;
2423 
2424 	switch (cmd) {
2425 	default:
2426 		return (DDI_FAILURE);
2427 
2428 	case DDI_RESUME:
2429 		return (nge_resume(devinfo));
2430 
2431 	case DDI_ATTACH:
2432 		break;
2433 	}
2434 
2435 	ngep = kmem_zalloc(sizeof (*ngep), KM_SLEEP);
2436 	instance = ddi_get_instance(devinfo);
2437 	ddi_set_driver_private(devinfo, ngep);
2438 	ngep->devinfo = devinfo;
2439 
2440 	(void) snprintf(ngep->ifname, sizeof (ngep->ifname), "%s%d",
2441 	    NGE_DRIVER_NAME, instance);
2442 	err = pci_config_setup(devinfo, &ngep->cfg_handle);
2443 	if (err != DDI_SUCCESS) {
2444 		nge_problem(ngep, "nge_attach: pci_config_setup() failed");
2445 		goto attach_fail;
2446 	}
2447 	/*
2448 	 * param_txbcopy_threshold and param_rxbcopy_threshold are tx/rx bcopy
2449 	 * thresholds. Bounds: min 0, max NGE_MAX_SDU
2450 	 */
2451 	ngep->param_txbcopy_threshold = NGE_TX_COPY_SIZE;
2452 	ngep->param_rxbcopy_threshold = NGE_RX_COPY_SIZE;
2453 
2454 	/*
2455 	 * param_recv_max_packet is max packet received per interupt.
2456 	 * Bounds: min 0, max NGE_RECV_SLOTS_DESC_1024
2457 	 */
2458 	ngep->param_recv_max_packet = 128;
2459 
2460 	/*
2461 	 * param_poll_quiet_time and param_poll_busy_time are quiet/busy time
2462 	 * switch from per packet interrupt to polling interrupt.
2463 	 * Bounds: min 0, max 10000
2464 	 */
2465 	ngep->param_poll_quiet_time = NGE_POLL_QUIET_TIME;
2466 	ngep->param_poll_busy_time = NGE_POLL_BUSY_TIME;
2467 
2468 	/*
2469 	 * param_rx_intr_hwater/param_rx_intr_lwater: ackets received
2470 	 * to trigger the poll_quiet_time/poll_busy_time counter.
2471 	 * Bounds: min 0, max  NGE_RECV_SLOTS_DESC_1024.
2472 	 */
2473 	ngep->param_rx_intr_hwater = 1;
2474 	ngep->param_rx_intr_lwater = 8;
2475 
2476 	/*
2477 	 * param_tx_n_intr: Per N tx packets to do tx recycle in poll mode.
2478 	 * Bounds: min 1, max 10000.
2479 	 */
2480 	ngep->param_tx_n_intr = NGE_TX_N_INTR;
2481 
2482 	infop = (chip_info_t *)&ngep->chipinfo;
2483 	nge_chip_cfg_init(ngep, infop, B_FALSE);
2484 	nge_init_dev_spec_param(ngep);
2485 	nge_get_props(ngep);
2486 	ngep->progress |= PROGRESS_CFG;
2487 
2488 	err = ddi_regs_map_setup(devinfo, NGE_PCI_OPREGS_RNUMBER,
2489 	    &regs, 0, 0, &nge_reg_accattr, &ngep->io_handle);
2490 	if (err != DDI_SUCCESS) {
2491 		nge_problem(ngep, "nge_attach: ddi_regs_map_setup() failed");
2492 		goto attach_fail;
2493 	}
2494 	ngep->io_regs = regs;
2495 	ngep->progress |= PROGRESS_REGS;
2496 
2497 	err = nge_register_intrs_and_init_locks(ngep);
2498 	if (err != DDI_SUCCESS) {
2499 		nge_problem(ngep, "nge_attach:"
2500 		    " register intrs and init locks failed");
2501 		goto attach_fail;
2502 	}
2503 	nge_init_ring_param_lock(ngep);
2504 	ngep->progress |= PROGRESS_INTR;
2505 
2506 	mutex_enter(ngep->genlock);
2507 
2508 	if (ngep->chipinfo.device == DEVICE_ID_MCP55_373 ||
2509 	    ngep->chipinfo.device == DEVICE_ID_MCP55_372) {
2510 		err = nge_smu_sema(ngep, B_TRUE);
2511 		if (err != DDI_SUCCESS) {
2512 			nge_problem(ngep, "nge_attach: nge_smu_sema() failed");
2513 			goto attach_fail;
2514 		}
2515 	}
2516 	/*
2517 	 * Initialise link state variables
2518 	 * Stop, reset & reinitialise the chip.
2519 	 * Initialise the (internal) PHY.
2520 	 */
2521 	nge_phys_init(ngep);
2522 	ngep->nge_chip_state = NGE_CHIP_INITIAL;
2523 	err = nge_chip_reset(ngep);
2524 	if (err != DDI_SUCCESS) {
2525 		nge_problem(ngep, "nge_attach: nge_chip_reset() failed");
2526 		mutex_exit(ngep->genlock);
2527 		goto attach_fail;
2528 	}
2529 	nge_chip_sync(ngep);
2530 
2531 	/*
2532 	 * Now that mutex locks are initialized, enable interrupts.
2533 	 */
2534 	if (ngep->intr_cap & DDI_INTR_FLAG_BLOCK) {
2535 		/* Call ddi_intr_block_enable() for MSI interrupts */
2536 		(void) ddi_intr_block_enable(ngep->htable,
2537 		    ngep->intr_actual_cnt);
2538 	} else {
2539 		/* Call ddi_intr_enable for MSI or FIXED interrupts */
2540 		for (i = 0; i < ngep->intr_actual_cnt; i++) {
2541 			(void) ddi_intr_enable(ngep->htable[i]);
2542 		}
2543 	}
2544 
2545 	ngep->link_state = LINK_STATE_UNKNOWN;
2546 	ngep->progress |= PROGRESS_HWINT;
2547 
2548 	/*
2549 	 * Register NDD-tweakable parameters
2550 	 */
2551 	if (nge_nd_init(ngep)) {
2552 		nge_problem(ngep, "nge_attach: nge_nd_init() failed");
2553 		mutex_exit(ngep->genlock);
2554 		goto attach_fail;
2555 	}
2556 	ngep->progress |= PROGRESS_NDD;
2557 
2558 	/*
2559 	 * Create & initialise named kstats
2560 	 */
2561 	nge_init_kstats(ngep, instance);
2562 	ngep->progress |= PROGRESS_KSTATS;
2563 
2564 	mutex_exit(ngep->genlock);
2565 
2566 	if ((macp = mac_alloc(MAC_VERSION)) == NULL)
2567 		goto attach_fail;
2568 	macp->m_type_ident = MAC_PLUGIN_IDENT_ETHER;
2569 	macp->m_driver = ngep;
2570 	macp->m_dip = devinfo;
2571 	macp->m_src_addr = infop->vendor_addr.addr;
2572 	macp->m_callbacks = &nge_m_callbacks;
2573 	macp->m_min_sdu = 0;
2574 	macp->m_max_sdu = ngep->default_mtu;
2575 	macp->m_margin = VTAG_SIZE;
2576 	macp->m_priv_props = nge_priv_props;
2577 	macp->m_priv_prop_count = NGE_MAX_PRIV_PROPS;
2578 	/*
2579 	 * Finally, we're ready to register ourselves with the mac
2580 	 * interface; if this succeeds, we're all ready to start()
2581 	 */
2582 	err = mac_register(macp, &ngep->mh);
2583 	mac_free(macp);
2584 	if (err != 0)
2585 		goto attach_fail;
2586 
2587 	/*
2588 	 * Register a periodical handler.
2589 	 * nge_chip_cyclic() is invoked in kernel context.
2590 	 */
2591 	ngep->periodic_id = ddi_periodic_add(nge_chip_cyclic, ngep,
2592 	    NGE_CYCLIC_PERIOD, DDI_IPL_0);
2593 
2594 	ngep->progress |= PROGRESS_READY;
2595 	return (DDI_SUCCESS);
2596 
2597 attach_fail:
2598 	nge_unattach(ngep);
2599 	return (DDI_FAILURE);
2600 }
2601 
2602 static int
2603 nge_suspend(nge_t *ngep)
2604 {
2605 	mutex_enter(ngep->genlock);
2606 	rw_enter(ngep->rwlock, RW_WRITER);
2607 
2608 	/* if the port hasn't been plumbed, just return */
2609 	if (ngep->nge_mac_state != NGE_MAC_STARTED) {
2610 		rw_exit(ngep->rwlock);
2611 		mutex_exit(ngep->genlock);
2612 		return (DDI_SUCCESS);
2613 	}
2614 	ngep->suspended = B_TRUE;
2615 	(void) nge_chip_stop(ngep, B_FALSE);
2616 	ngep->nge_mac_state = NGE_MAC_STOPPED;
2617 
2618 	rw_exit(ngep->rwlock);
2619 	mutex_exit(ngep->genlock);
2620 	return (DDI_SUCCESS);
2621 }
2622 
2623 /*
2624  * detach(9E) -- Detach a device from the system
2625  */
2626 static int
2627 nge_detach(dev_info_t *devinfo, ddi_detach_cmd_t cmd)
2628 {
2629 	int i;
2630 	nge_t *ngep;
2631 	mul_item *p, *nextp;
2632 	buff_ring_t *brp;
2633 
2634 	NGE_GTRACE(("nge_detach($%p, %d)", (void *)devinfo, cmd));
2635 
2636 	ngep = ddi_get_driver_private(devinfo);
2637 	brp = ngep->buff;
2638 
2639 	switch (cmd) {
2640 	default:
2641 		return (DDI_FAILURE);
2642 
2643 	case DDI_SUSPEND:
2644 		/*
2645 		 * Stop the NIC
2646 		 * Note: This driver doesn't currently support WOL, but
2647 		 *	should it in the future, it is important to
2648 		 *	make sure the PHY remains powered so that the
2649 		 *	wakeup packet can actually be recieved.
2650 		 */
2651 		return (nge_suspend(ngep));
2652 
2653 	case DDI_DETACH:
2654 		break;
2655 	}
2656 
2657 	/* Try to wait all the buffer post to upper layer be released */
2658 	for (i = 0; i < 1000; i++) {
2659 		if (brp->rx_hold == 0)
2660 			break;
2661 		drv_usecwait(1000);
2662 	}
2663 
2664 	/* If there is any posted buffer, reject to detach */
2665 	if (brp->rx_hold != 0)
2666 		return (DDI_FAILURE);
2667 
2668 	/*
2669 	 * Unregister from the GLD subsystem.  This can fail, in
2670 	 * particular if there are DLPI style-2 streams still open -
2671 	 * in which case we just return failure without shutting
2672 	 * down chip operations.
2673 	 */
2674 	if (mac_unregister(ngep->mh) != DDI_SUCCESS)
2675 		return (DDI_FAILURE);
2676 
2677 	/*
2678 	 * Recycle the multicast table. mac_unregister() should be called
2679 	 * before it to ensure the multicast table can be used even if
2680 	 * mac_unregister() fails.
2681 	 */
2682 	for (p = ngep->pcur_mulist; p != NULL; p = nextp) {
2683 		nextp = p->next;
2684 		kmem_free(p, sizeof (mul_item));
2685 	}
2686 	ngep->pcur_mulist = NULL;
2687 
2688 	/*
2689 	 * All activity stopped, so we can clean up & exit
2690 	 */
2691 	nge_unattach(ngep);
2692 	return (DDI_SUCCESS);
2693 }
2694 
2695 /*
2696  * quiesce(9E) entry point.
2697  *
2698  * This function is called when the system is single-threaded at high
2699  * PIL with preemption disabled. Therefore, this function must not be
2700  * blocked.
2701  *
2702  * This function returns DDI_SUCCESS on success, or DDI_FAILURE on failure.
2703  * DDI_FAILURE indicates an error condition and should almost never happen.
2704  */
2705 static int
2706 nge_quiesce(dev_info_t *devinfo)
2707 {
2708 	nge_t *ngep;
2709 
2710 	ngep = ddi_get_driver_private(devinfo);
2711 
2712 	if (ngep == NULL)
2713 		return (DDI_FAILURE);
2714 
2715 	/*
2716 	 * Turn off debug tracing
2717 	 */
2718 	nge_debug = 0;
2719 	ngep->debug = 0;
2720 
2721 	nge_restore_mac_addr(ngep);
2722 	(void) nge_chip_stop(ngep, B_FALSE);
2723 
2724 	return (DDI_SUCCESS);
2725 }
2726 
2727 
2728 
2729 /*
2730  * ========== Module Loading Data & Entry Points ==========
2731  */
2732 
2733 DDI_DEFINE_STREAM_OPS(nge_dev_ops, nulldev, nulldev, nge_attach, nge_detach,
2734     NULL, NULL, D_MP, NULL, nge_quiesce);
2735 
2736 
2737 static struct modldrv nge_modldrv = {
2738 	&mod_driverops,		/* Type of module.  This one is a driver */
2739 	nge_ident,		/* short description */
2740 	&nge_dev_ops		/* driver specific ops */
2741 };
2742 
2743 static struct modlinkage modlinkage = {
2744 	MODREV_1, (void *)&nge_modldrv, NULL
2745 };
2746 
2747 
2748 int
2749 _info(struct modinfo *modinfop)
2750 {
2751 	return (mod_info(&modlinkage, modinfop));
2752 }
2753 
2754 int
2755 _init(void)
2756 {
2757 	int status;
2758 
2759 	mac_init_ops(&nge_dev_ops, "nge");
2760 	status = mod_install(&modlinkage);
2761 	if (status != DDI_SUCCESS)
2762 		mac_fini_ops(&nge_dev_ops);
2763 	else
2764 		mutex_init(nge_log_mutex, NULL, MUTEX_DRIVER, NULL);
2765 
2766 	return (status);
2767 }
2768 
2769 int
2770 _fini(void)
2771 {
2772 	int status;
2773 
2774 	status = mod_remove(&modlinkage);
2775 	if (status == DDI_SUCCESS) {
2776 		mac_fini_ops(&nge_dev_ops);
2777 		mutex_destroy(nge_log_mutex);
2778 	}
2779 
2780 	return (status);
2781 }
2782 
2783 /*
2784  * ============ Init MSI/Fixed/SoftInterrupt routines ==============
2785  */
2786 
2787 /*
2788  * Register interrupts and initialize each mutex and condition variables
2789  */
2790 
2791 static int
2792 nge_register_intrs_and_init_locks(nge_t *ngep)
2793 {
2794 	int		err;
2795 	int		intr_types;
2796 	uint_t		soft_prip;
2797 	nge_msi_mask	msi_mask;
2798 	nge_msi_map0_vec map0_vec;
2799 	nge_msi_map1_vec map1_vec;
2800 
2801 	/*
2802 	 * Add the softint handlers:
2803 	 *
2804 	 * Both of these handlers are used to avoid restrictions on the
2805 	 * context and/or mutexes required for some operations.  In
2806 	 * particular, the hardware interrupt handler and its subfunctions
2807 	 * can detect a number of conditions that we don't want to handle
2808 	 * in that context or with that set of mutexes held.  So, these
2809 	 * softints are triggered instead:
2810 	 *
2811 	 * the <resched> softint is triggered if if we have previously
2812 	 * had to refuse to send a packet because of resource shortage
2813 	 * (we've run out of transmit buffers), but the send completion
2814 	 * interrupt handler has now detected that more buffers have
2815 	 * become available.  Its only purpose is to call gld_sched()
2816 	 * to retry the pending transmits (we're not allowed to hold
2817 	 * driver-defined mutexes across gld_sched()).
2818 	 *
2819 	 * the <factotum> is triggered if the h/w interrupt handler
2820 	 * sees the <link state changed> or <error> bits in the status
2821 	 * block.  It's also triggered periodically to poll the link
2822 	 * state, just in case we aren't getting link status change
2823 	 * interrupts ...
2824 	 */
2825 	err = ddi_intr_add_softint(ngep->devinfo, &ngep->resched_hdl,
2826 	    DDI_INTR_SOFTPRI_MIN, nge_reschedule, (caddr_t)ngep);
2827 	if (err != DDI_SUCCESS) {
2828 		nge_problem(ngep,
2829 		    "nge_attach: add nge_reschedule softintr failed");
2830 
2831 		return (DDI_FAILURE);
2832 	}
2833 	ngep->progress |= PROGRESS_RESCHED;
2834 	err = ddi_intr_add_softint(ngep->devinfo, &ngep->factotum_hdl,
2835 	    DDI_INTR_SOFTPRI_MIN, nge_chip_factotum, (caddr_t)ngep);
2836 	if (err != DDI_SUCCESS) {
2837 		nge_problem(ngep,
2838 		    "nge_attach: add nge_chip_factotum softintr failed!");
2839 
2840 		return (DDI_FAILURE);
2841 	}
2842 	if (ddi_intr_get_softint_pri(ngep->factotum_hdl, &soft_prip)
2843 	    != DDI_SUCCESS) {
2844 		nge_problem(ngep, "nge_attach: get softintr priority failed\n");
2845 
2846 		return (DDI_FAILURE);
2847 	}
2848 	ngep->soft_pri = soft_prip;
2849 
2850 	ngep->progress |= PROGRESS_FACTOTUM;
2851 	/* Get supported interrupt types */
2852 	if (ddi_intr_get_supported_types(ngep->devinfo, &intr_types)
2853 	    != DDI_SUCCESS) {
2854 		nge_error(ngep, "ddi_intr_get_supported_types failed\n");
2855 
2856 		return (DDI_FAILURE);
2857 	}
2858 
2859 	NGE_DEBUG(("ddi_intr_get_supported_types() returned: %x",
2860 	    intr_types));
2861 
2862 	if ((intr_types & DDI_INTR_TYPE_MSI) && nge_enable_msi) {
2863 
2864 		/* MSI Configurations for mcp55 chipset */
2865 		if (ngep->chipinfo.device == DEVICE_ID_MCP55_373 ||
2866 		    ngep->chipinfo.device == DEVICE_ID_MCP55_372) {
2867 
2868 
2869 			/* Enable the 8 vectors */
2870 			msi_mask.msi_mask_val =
2871 			    nge_reg_get32(ngep, NGE_MSI_MASK);
2872 			msi_mask.msi_msk_bits.vec0 = NGE_SET;
2873 			msi_mask.msi_msk_bits.vec1 = NGE_SET;
2874 			msi_mask.msi_msk_bits.vec2 = NGE_SET;
2875 			msi_mask.msi_msk_bits.vec3 = NGE_SET;
2876 			msi_mask.msi_msk_bits.vec4 = NGE_SET;
2877 			msi_mask.msi_msk_bits.vec5 = NGE_SET;
2878 			msi_mask.msi_msk_bits.vec6 = NGE_SET;
2879 			msi_mask.msi_msk_bits.vec7 = NGE_SET;
2880 			nge_reg_put32(ngep, NGE_MSI_MASK,
2881 			    msi_mask.msi_mask_val);
2882 
2883 			/*
2884 			 * Remapping the MSI MAP0 and MAP1. MCP55
2885 			 * is default mapping all the interrupt to 0 vector.
2886 			 * Software needs to remapping this.
2887 			 * This mapping is same as CK804.
2888 			 */
2889 			map0_vec.msi_map0_val =
2890 			    nge_reg_get32(ngep, NGE_MSI_MAP0);
2891 			map1_vec.msi_map1_val =
2892 			    nge_reg_get32(ngep, NGE_MSI_MAP1);
2893 			map0_vec.vecs_bits.reint_vec = 0;
2894 			map0_vec.vecs_bits.rcint_vec = 0;
2895 			map0_vec.vecs_bits.miss_vec = 3;
2896 			map0_vec.vecs_bits.teint_vec = 5;
2897 			map0_vec.vecs_bits.tcint_vec = 5;
2898 			map0_vec.vecs_bits.stint_vec = 2;
2899 			map0_vec.vecs_bits.mint_vec = 6;
2900 			map0_vec.vecs_bits.rfint_vec = 0;
2901 			map1_vec.vecs_bits.tfint_vec = 5;
2902 			map1_vec.vecs_bits.feint_vec = 6;
2903 			map1_vec.vecs_bits.resv8_11 = 3;
2904 			map1_vec.vecs_bits.resv12_15 = 1;
2905 			map1_vec.vecs_bits.resv16_19 = 0;
2906 			map1_vec.vecs_bits.resv20_23 = 7;
2907 			map1_vec.vecs_bits.resv24_31 = 0xff;
2908 			nge_reg_put32(ngep, NGE_MSI_MAP0,
2909 			    map0_vec.msi_map0_val);
2910 			nge_reg_put32(ngep, NGE_MSI_MAP1,
2911 			    map1_vec.msi_map1_val);
2912 		}
2913 		if (nge_add_intrs(ngep, DDI_INTR_TYPE_MSI) != DDI_SUCCESS) {
2914 			NGE_DEBUG(("MSI registration failed, "
2915 			    "trying FIXED interrupt type\n"));
2916 		} else {
2917 			nge_log(ngep, "Using MSI interrupt type\n");
2918 
2919 			ngep->intr_type = DDI_INTR_TYPE_MSI;
2920 			ngep->progress |= PROGRESS_SWINT;
2921 		}
2922 	}
2923 
2924 	if (!(ngep->progress & PROGRESS_SWINT) &&
2925 	    (intr_types & DDI_INTR_TYPE_FIXED)) {
2926 		if (nge_add_intrs(ngep, DDI_INTR_TYPE_FIXED) != DDI_SUCCESS) {
2927 			nge_error(ngep, "FIXED interrupt "
2928 			    "registration failed\n");
2929 
2930 			return (DDI_FAILURE);
2931 		}
2932 
2933 		nge_log(ngep, "Using FIXED interrupt type\n");
2934 
2935 		ngep->intr_type = DDI_INTR_TYPE_FIXED;
2936 		ngep->progress |= PROGRESS_SWINT;
2937 	}
2938 
2939 
2940 	if (!(ngep->progress & PROGRESS_SWINT)) {
2941 		nge_error(ngep, "No interrupts registered\n");
2942 
2943 		return (DDI_FAILURE);
2944 	}
2945 	mutex_init(ngep->genlock, NULL, MUTEX_DRIVER,
2946 	    DDI_INTR_PRI(ngep->intr_pri));
2947 	mutex_init(ngep->softlock, NULL, MUTEX_DRIVER,
2948 	    DDI_INTR_PRI(ngep->soft_pri));
2949 	rw_init(ngep->rwlock, NULL, RW_DRIVER,
2950 	    DDI_INTR_PRI(ngep->intr_pri));
2951 
2952 	return (DDI_SUCCESS);
2953 }
2954 
2955 /*
2956  * nge_add_intrs:
2957  *
2958  * Register FIXED or MSI interrupts.
2959  */
2960 static int
2961 nge_add_intrs(nge_t *ngep, int	intr_type)
2962 {
2963 	dev_info_t	*dip = ngep->devinfo;
2964 	int		avail, actual, intr_size, count = 0;
2965 	int		i, flag, ret;
2966 
2967 	NGE_DEBUG(("nge_add_intrs: interrupt type 0x%x\n", intr_type));
2968 
2969 	/* Get number of interrupts */
2970 	ret = ddi_intr_get_nintrs(dip, intr_type, &count);
2971 	if ((ret != DDI_SUCCESS) || (count == 0)) {
2972 		nge_error(ngep, "ddi_intr_get_nintrs() failure, ret: %d, "
2973 		    "count: %d", ret, count);
2974 
2975 		return (DDI_FAILURE);
2976 	}
2977 
2978 	/* Get number of available interrupts */
2979 	ret = ddi_intr_get_navail(dip, intr_type, &avail);
2980 	if ((ret != DDI_SUCCESS) || (avail == 0)) {
2981 		nge_error(ngep, "ddi_intr_get_navail() failure, "
2982 		    "ret: %d, avail: %d\n", ret, avail);
2983 
2984 		return (DDI_FAILURE);
2985 	}
2986 
2987 	if (avail < count) {
2988 		NGE_DEBUG(("nitrs() returned %d, navail returned %d\n",
2989 		    count, avail));
2990 	}
2991 	flag = DDI_INTR_ALLOC_NORMAL;
2992 
2993 	/* Allocate an array of interrupt handles */
2994 	intr_size = count * sizeof (ddi_intr_handle_t);
2995 	ngep->htable = kmem_alloc(intr_size, KM_SLEEP);
2996 
2997 	/* Call ddi_intr_alloc() */
2998 	ret = ddi_intr_alloc(dip, ngep->htable, intr_type, 0,
2999 	    count, &actual, flag);
3000 
3001 	if ((ret != DDI_SUCCESS) || (actual == 0)) {
3002 		nge_error(ngep, "ddi_intr_alloc() failed %d\n", ret);
3003 
3004 		kmem_free(ngep->htable, intr_size);
3005 		return (DDI_FAILURE);
3006 	}
3007 
3008 	if (actual < count) {
3009 		NGE_DEBUG(("Requested: %d, Received: %d\n",
3010 		    count, actual));
3011 	}
3012 
3013 	ngep->intr_actual_cnt = actual;
3014 	ngep->intr_req_cnt = count;
3015 
3016 	/*
3017 	 * Get priority for first msi, assume remaining are all the same
3018 	 */
3019 	if ((ret = ddi_intr_get_pri(ngep->htable[0], &ngep->intr_pri)) !=
3020 	    DDI_SUCCESS) {
3021 		nge_error(ngep, "ddi_intr_get_pri() failed %d\n", ret);
3022 
3023 		/* Free already allocated intr */
3024 		for (i = 0; i < actual; i++) {
3025 			(void) ddi_intr_free(ngep->htable[i]);
3026 		}
3027 
3028 		kmem_free(ngep->htable, intr_size);
3029 
3030 		return (DDI_FAILURE);
3031 	}
3032 	/* Test for high level mutex */
3033 	if (ngep->intr_pri >= ddi_intr_get_hilevel_pri()) {
3034 		nge_error(ngep, "nge_add_intrs:"
3035 		    "Hi level interrupt not supported");
3036 
3037 		for (i = 0; i < actual; i++)
3038 			(void) ddi_intr_free(ngep->htable[i]);
3039 
3040 		kmem_free(ngep->htable, intr_size);
3041 
3042 		return (DDI_FAILURE);
3043 	}
3044 
3045 
3046 	/* Call ddi_intr_add_handler() */
3047 	for (i = 0; i < actual; i++) {
3048 		if ((ret = ddi_intr_add_handler(ngep->htable[i], nge_chip_intr,
3049 		    (caddr_t)ngep, (caddr_t)(uintptr_t)i)) != DDI_SUCCESS) {
3050 			nge_error(ngep, "ddi_intr_add_handler() "
3051 			    "failed %d\n", ret);
3052 
3053 			/* Free already allocated intr */
3054 			for (i = 0; i < actual; i++) {
3055 				(void) ddi_intr_free(ngep->htable[i]);
3056 			}
3057 
3058 			kmem_free(ngep->htable, intr_size);
3059 
3060 			return (DDI_FAILURE);
3061 		}
3062 	}
3063 
3064 	if ((ret = ddi_intr_get_cap(ngep->htable[0], &ngep->intr_cap))
3065 	    != DDI_SUCCESS) {
3066 		nge_error(ngep, "ddi_intr_get_cap() failed %d\n", ret);
3067 
3068 		for (i = 0; i < actual; i++) {
3069 			(void) ddi_intr_remove_handler(ngep->htable[i]);
3070 			(void) ddi_intr_free(ngep->htable[i]);
3071 		}
3072 
3073 		kmem_free(ngep->htable, intr_size);
3074 
3075 		return (DDI_FAILURE);
3076 	}
3077 
3078 	return (DDI_SUCCESS);
3079 }
3080 
3081 /*
3082  * nge_rem_intrs:
3083  *
3084  * Unregister FIXED or MSI interrupts
3085  */
3086 static void
3087 nge_rem_intrs(nge_t *ngep)
3088 {
3089 	int	i;
3090 
3091 	NGE_DEBUG(("nge_rem_intrs\n"));
3092 
3093 	/* Disable all interrupts */
3094 	if (ngep->intr_cap & DDI_INTR_FLAG_BLOCK) {
3095 		/* Call ddi_intr_block_disable() */
3096 		(void) ddi_intr_block_disable(ngep->htable,
3097 		    ngep->intr_actual_cnt);
3098 	} else {
3099 		for (i = 0; i < ngep->intr_actual_cnt; i++) {
3100 			(void) ddi_intr_disable(ngep->htable[i]);
3101 		}
3102 	}
3103 
3104 	/* Call ddi_intr_remove_handler() */
3105 	for (i = 0; i < ngep->intr_actual_cnt; i++) {
3106 		(void) ddi_intr_remove_handler(ngep->htable[i]);
3107 		(void) ddi_intr_free(ngep->htable[i]);
3108 	}
3109 
3110 	kmem_free(ngep->htable,
3111 	    ngep->intr_req_cnt * sizeof (ddi_intr_handle_t));
3112 }
3113