xref: /titanic_51/usr/src/uts/common/io/nge/nge_main.c (revision 942c5e3c2dd127463517e5cc1694ee94ca45e021)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 
22 /*
23  * Copyright 2007 Sun Microsystems, Inc.  All rights reserved.
24  * Use is subject to license terms.
25  */
26 
27 #pragma ident	"%Z%%M%	%I%	%E% SMI"
28 
29 #include "nge.h"
30 
31 /*
32  * Describes the chip's DMA engine
33  */
34 
35 static ddi_dma_attr_t hot_dma_attr = {
36 	DMA_ATTR_V0,			/* dma_attr version	*/
37 	0x0000000000000000ull,		/* dma_attr_addr_lo	*/
38 	0x000000FFFFFFFFFFull,		/* dma_attr_addr_hi	*/
39 	0x000000007FFFFFFFull,		/* dma_attr_count_max	*/
40 	0x0000000000000010ull,		/* dma_attr_align	*/
41 	0x00000FFF,			/* dma_attr_burstsizes	*/
42 	0x00000001,			/* dma_attr_minxfer	*/
43 	0x000000000000FFFFull,		/* dma_attr_maxxfer	*/
44 	0x000000FFFFFFFFFFull,		/* dma_attr_seg		*/
45 	1,				/* dma_attr_sgllen 	*/
46 	0x00000001,			/* dma_attr_granular 	*/
47 	0
48 };
49 
50 static ddi_dma_attr_t hot_tx_dma_attr = {
51 	DMA_ATTR_V0,			/* dma_attr version	*/
52 	0x0000000000000000ull,		/* dma_attr_addr_lo	*/
53 	0x000000FFFFFFFFFFull,		/* dma_attr_addr_hi	*/
54 	0x0000000000003FFFull,		/* dma_attr_count_max	*/
55 	0x0000000000000010ull,		/* dma_attr_align	*/
56 	0x00000FFF,			/* dma_attr_burstsizes	*/
57 	0x00000001,			/* dma_attr_minxfer	*/
58 	0x0000000000003FFFull,		/* dma_attr_maxxfer	*/
59 	0x000000FFFFFFFFFFull,		/* dma_attr_seg		*/
60 	NGE_MAX_COOKIES,		/* dma_attr_sgllen 	*/
61 	1,				/* dma_attr_granular 	*/
62 	0
63 };
64 
65 static ddi_dma_attr_t sum_dma_attr = {
66 	DMA_ATTR_V0,			/* dma_attr version	*/
67 	0x0000000000000000ull,		/* dma_attr_addr_lo	*/
68 	0x00000000FFFFFFFFull,		/* dma_attr_addr_hi	*/
69 	0x000000007FFFFFFFull,		/* dma_attr_count_max	*/
70 	0x0000000000000010ull,		/* dma_attr_align	*/
71 	0x00000FFF,			/* dma_attr_burstsizes	*/
72 	0x00000001,			/* dma_attr_minxfer	*/
73 	0x000000000000FFFFull,		/* dma_attr_maxxfer	*/
74 	0x00000000FFFFFFFFull,		/* dma_attr_seg		*/
75 	1,				/* dma_attr_sgllen 	*/
76 	0x00000001,			/* dma_attr_granular 	*/
77 	0
78 };
79 
80 static ddi_dma_attr_t sum_tx_dma_attr = {
81 	DMA_ATTR_V0,			/* dma_attr version	*/
82 	0x0000000000000000ull,		/* dma_attr_addr_lo	*/
83 	0x00000000FFFFFFFFull,		/* dma_attr_addr_hi	*/
84 	0x0000000000003FFFull,		/* dma_attr_count_max	*/
85 	0x0000000000000010ull,		/* dma_attr_align	*/
86 	0x00000FFF,			/* dma_attr_burstsizes	*/
87 	0x00000001,			/* dma_attr_minxfer	*/
88 	0x0000000000003FFFull,		/* dma_attr_maxxfer	*/
89 	0x00000000FFFFFFFFull,		/* dma_attr_seg		*/
90 	NGE_MAX_COOKIES,		/* dma_attr_sgllen 	*/
91 	1,				/* dma_attr_granular 	*/
92 	0
93 };
94 
95 /*
96  * DMA access attributes for data.
97  */
98 ddi_device_acc_attr_t nge_data_accattr = {
99 	DDI_DEVICE_ATTR_V0,
100 	DDI_STRUCTURE_LE_ACC,
101 	DDI_STRICTORDER_ACC,
102 	DDI_DEFAULT_ACC
103 };
104 
105 /*
106  * DMA access attributes for descriptors.
107  */
108 static ddi_device_acc_attr_t nge_desc_accattr = {
109 	DDI_DEVICE_ATTR_V0,
110 	DDI_STRUCTURE_LE_ACC,
111 	DDI_STRICTORDER_ACC,
112 	DDI_DEFAULT_ACC
113 };
114 
115 /*
116  * PIO access attributes for registers
117  */
118 static ddi_device_acc_attr_t nge_reg_accattr = {
119 	DDI_DEVICE_ATTR_V0,
120 	DDI_STRUCTURE_LE_ACC,
121 	DDI_STRICTORDER_ACC,
122 	DDI_DEFAULT_ACC
123 };
124 
125 /*
126  * NIC DESC MODE 2
127  */
128 
129 static const nge_desc_attr_t nge_sum_desc = {
130 
131 	sizeof (sum_rx_bd),
132 	sizeof (sum_tx_bd),
133 	&sum_dma_attr,
134 	&sum_tx_dma_attr,
135 	nge_sum_rxd_fill,
136 	nge_sum_rxd_check,
137 	nge_sum_txd_fill,
138 	nge_sum_txd_check,
139 };
140 
141 /*
142  * NIC DESC MODE 3
143  */
144 
145 static const nge_desc_attr_t nge_hot_desc = {
146 
147 	sizeof (hot_rx_bd),
148 	sizeof (hot_tx_bd),
149 	&hot_dma_attr,
150 	&hot_tx_dma_attr,
151 	nge_hot_rxd_fill,
152 	nge_hot_rxd_check,
153 	nge_hot_txd_fill,
154 	nge_hot_txd_check,
155 };
156 
157 static char nge_ident[] = "nVidia 1Gb Ethernet %I%";
158 static char clsize_propname[] = "cache-line-size";
159 static char latency_propname[] = "latency-timer";
160 static char debug_propname[]	= "nge-debug-flags";
161 static char intr_moderation[] = "intr-moderation";
162 static char rx_data_hw[] = "rx-data-hw";
163 static char rx_prd_lw[] = "rx-prd-lw";
164 static char rx_prd_hw[] = "rx-prd-hw";
165 static char sw_intr_intv[] = "sw-intr-intvl";
166 static char nge_desc_mode[] = "desc-mode";
167 static char default_mtu[] = "default_mtu";
168 static char low_memory_mode[] = "minimal-memory-usage";
169 extern kmutex_t nge_log_mutex[1];
170 
171 static int		nge_m_start(void *);
172 static void		nge_m_stop(void *);
173 static int		nge_m_promisc(void *, boolean_t);
174 static int		nge_m_multicst(void *, boolean_t, const uint8_t *);
175 static int		nge_m_unicst(void *, const uint8_t *);
176 static void		nge_m_resources(void *);
177 static void		nge_m_ioctl(void *, queue_t *, mblk_t *);
178 static boolean_t	nge_m_getcapab(void *, mac_capab_t, void *);
179 
180 #define		NGE_M_CALLBACK_FLAGS	(MC_IOCTL | MC_GETCAPAB)
181 
182 static mac_callbacks_t nge_m_callbacks = {
183 	NGE_M_CALLBACK_FLAGS,
184 	nge_m_stat,
185 	nge_m_start,
186 	nge_m_stop,
187 	nge_m_promisc,
188 	nge_m_multicst,
189 	nge_m_unicst,
190 	nge_m_tx,
191 	NULL,
192 	nge_m_ioctl,
193 	nge_m_getcapab
194 };
195 
196 static int nge_add_intrs(nge_t *, int);
197 static void nge_rem_intrs(nge_t *);
198 static int nge_register_intrs_and_init_locks(nge_t *);
199 
200 /*
201  * NGE MSI tunable:
202  */
203 boolean_t nge_enable_msi = B_FALSE;
204 
205 static enum ioc_reply
206 nge_set_loop_mode(nge_t *ngep, uint32_t mode)
207 {
208 	/*
209 	 * If the mode isn't being changed, there's nothing to do ...
210 	 */
211 	if (mode == ngep->param_loop_mode)
212 		return (IOC_ACK);
213 
214 	/*
215 	 * Validate the requested mode and prepare a suitable message
216 	 * to explain the link down/up cycle that the change will
217 	 * probably induce ...
218 	 */
219 	switch (mode) {
220 	default:
221 		return (IOC_INVAL);
222 
223 	case NGE_LOOP_NONE:
224 	case NGE_LOOP_EXTERNAL_100:
225 	case NGE_LOOP_EXTERNAL_10:
226 	case NGE_LOOP_INTERNAL_PHY:
227 		break;
228 	}
229 
230 	/*
231 	 * All OK; tell the caller to reprogram
232 	 * the PHY and/or MAC for the new mode ...
233 	 */
234 	ngep->param_loop_mode = mode;
235 	return (IOC_RESTART_ACK);
236 }
237 
238 #undef	NGE_DBG
239 #define	NGE_DBG		NGE_DBG_INIT
240 
241 /*
242  * Utility routine to carve a slice off a chunk of allocated memory,
243  * updating the chunk descriptor accordingly.  The size of the slice
244  * is given by the product of the <qty> and <size> parameters.
245  */
246 void
247 nge_slice_chunk(dma_area_t *slice, dma_area_t *chunk,
248     uint32_t qty, uint32_t size)
249 {
250 	size_t totsize;
251 
252 	totsize = qty*size;
253 	ASSERT(size > 0);
254 	ASSERT(totsize <= chunk->alength);
255 
256 	*slice = *chunk;
257 	slice->nslots = qty;
258 	slice->size = size;
259 	slice->alength = totsize;
260 
261 	chunk->mem_va = (caddr_t)chunk->mem_va + totsize;
262 	chunk->alength -= totsize;
263 	chunk->offset += totsize;
264 	chunk->cookie.dmac_laddress += totsize;
265 	chunk->cookie.dmac_size -= totsize;
266 }
267 
268 /*
269  * Allocate an area of memory and a DMA handle for accessing it
270  */
271 int
272 nge_alloc_dma_mem(nge_t *ngep, size_t memsize, ddi_device_acc_attr_t *attr_p,
273     uint_t dma_flags, dma_area_t *dma_p)
274 {
275 	int err;
276 	caddr_t va;
277 
278 	NGE_TRACE(("nge_alloc_dma_mem($%p, %ld, $%p, 0x%x, $%p)",
279 	    (void *)ngep, memsize, attr_p, dma_flags, dma_p));
280 	/*
281 	 * Allocate handle
282 	 */
283 	err = ddi_dma_alloc_handle(ngep->devinfo, ngep->desc_attr.dma_attr,
284 	    DDI_DMA_DONTWAIT, NULL, &dma_p->dma_hdl);
285 	if (err != DDI_SUCCESS)
286 		goto fail;
287 
288 	/*
289 	 * Allocate memory
290 	 */
291 	err = ddi_dma_mem_alloc(dma_p->dma_hdl, memsize, attr_p,
292 	    dma_flags & (DDI_DMA_CONSISTENT | DDI_DMA_STREAMING),
293 	    DDI_DMA_DONTWAIT, NULL, &va, &dma_p->alength, &dma_p->acc_hdl);
294 	if (err != DDI_SUCCESS)
295 		goto fail;
296 
297 	/*
298 	 * Bind the two together
299 	 */
300 	dma_p->mem_va = va;
301 	err = ddi_dma_addr_bind_handle(dma_p->dma_hdl, NULL,
302 	    va, dma_p->alength, dma_flags, DDI_DMA_DONTWAIT, NULL,
303 	    &dma_p->cookie, &dma_p->ncookies);
304 
305 	if (err != DDI_DMA_MAPPED || dma_p->ncookies != 1)
306 		goto fail;
307 
308 	dma_p->nslots = ~0U;
309 	dma_p->size = ~0U;
310 	dma_p->offset = 0;
311 
312 	return (DDI_SUCCESS);
313 
314 fail:
315 	nge_free_dma_mem(dma_p);
316 	NGE_DEBUG(("nge_alloc_dma_mem: fail to alloc dma memory!"));
317 
318 	return (DDI_FAILURE);
319 }
320 
321 /*
322  * Free one allocated area of DMAable memory
323  */
324 void
325 nge_free_dma_mem(dma_area_t *dma_p)
326 {
327 	if (dma_p->dma_hdl != NULL) {
328 		if (dma_p->ncookies) {
329 			(void) ddi_dma_unbind_handle(dma_p->dma_hdl);
330 			dma_p->ncookies = 0;
331 		}
332 	}
333 	if (dma_p->acc_hdl != NULL) {
334 		ddi_dma_mem_free(&dma_p->acc_hdl);
335 		dma_p->acc_hdl = NULL;
336 	}
337 	if (dma_p->dma_hdl != NULL) {
338 		ddi_dma_free_handle(&dma_p->dma_hdl);
339 		dma_p->dma_hdl = NULL;
340 	}
341 }
342 
343 #define	ALLOC_TX_BUF	0x1
344 #define	ALLOC_TX_DESC	0x2
345 #define	ALLOC_RX_DESC	0x4
346 
347 int
348 nge_alloc_bufs(nge_t *ngep)
349 {
350 	int err;
351 	int split;
352 	int progress;
353 	size_t txbuffsize;
354 	size_t rxdescsize;
355 	size_t txdescsize;
356 
357 	txbuffsize = ngep->tx_desc * ngep->buf_size;
358 	rxdescsize = ngep->rx_desc;
359 	txdescsize = ngep->tx_desc;
360 	rxdescsize *= ngep->desc_attr.rxd_size;
361 	txdescsize *= ngep->desc_attr.txd_size;
362 	progress = 0;
363 
364 	NGE_TRACE(("nge_alloc_bufs($%p)", (void *)ngep));
365 	/*
366 	 * Allocate memory & handles for TX buffers
367 	 */
368 	ASSERT((txbuffsize % ngep->nge_split) == 0);
369 	for (split = 0; split < ngep->nge_split; ++split) {
370 		err = nge_alloc_dma_mem(ngep, txbuffsize/ngep->nge_split,
371 		    &nge_data_accattr, DDI_DMA_WRITE | NGE_DMA_MODE,
372 		    &ngep->send->buf[split]);
373 		if (err != DDI_SUCCESS)
374 			goto fail;
375 	}
376 
377 	progress |= ALLOC_TX_BUF;
378 
379 	/*
380 	 * Allocate memory & handles for receive return rings and
381 	 * buffer (producer) descriptor rings
382 	 */
383 	err = nge_alloc_dma_mem(ngep, rxdescsize, &nge_desc_accattr,
384 	    DDI_DMA_RDWR | DDI_DMA_CONSISTENT, &ngep->recv->desc);
385 	if (err != DDI_SUCCESS)
386 		goto fail;
387 	progress |= ALLOC_RX_DESC;
388 
389 	/*
390 	 * Allocate memory & handles for TX descriptor rings,
391 	 */
392 	err = nge_alloc_dma_mem(ngep, txdescsize, &nge_desc_accattr,
393 	    DDI_DMA_RDWR | DDI_DMA_CONSISTENT, &ngep->send->desc);
394 	if (err != DDI_SUCCESS)
395 		goto fail;
396 	return (DDI_SUCCESS);
397 
398 fail:
399 	if (progress & ALLOC_RX_DESC)
400 		nge_free_dma_mem(&ngep->recv->desc);
401 	if (progress & ALLOC_TX_BUF) {
402 		for (split = 0; split < ngep->nge_split; ++split)
403 			nge_free_dma_mem(&ngep->send->buf[split]);
404 	}
405 
406 	return (DDI_FAILURE);
407 }
408 
409 /*
410  * This routine frees the transmit and receive buffers and descriptors.
411  * Make sure the chip is stopped before calling it!
412  */
413 void
414 nge_free_bufs(nge_t *ngep)
415 {
416 	int split;
417 
418 	NGE_TRACE(("nge_free_bufs($%p)", (void *)ngep));
419 
420 	nge_free_dma_mem(&ngep->recv->desc);
421 	nge_free_dma_mem(&ngep->send->desc);
422 
423 	for (split = 0; split < ngep->nge_split; ++split)
424 		nge_free_dma_mem(&ngep->send->buf[split]);
425 }
426 
427 /*
428  * Clean up initialisation done above before the memory is freed
429  */
430 static void
431 nge_fini_send_ring(nge_t *ngep)
432 {
433 	uint32_t slot;
434 	size_t dmah_num;
435 	send_ring_t *srp;
436 	sw_tx_sbd_t *ssbdp;
437 
438 	srp = ngep->send;
439 	ssbdp = srp->sw_sbds;
440 
441 	NGE_TRACE(("nge_fini_send_ring($%p)", (void *)ngep));
442 
443 	dmah_num = sizeof (srp->dmahndl) / sizeof (srp->dmahndl[0]);
444 
445 	for (slot = 0; slot < dmah_num; ++slot) {
446 		if (srp->dmahndl[slot].hndl) {
447 			(void) ddi_dma_unbind_handle(srp->dmahndl[slot].hndl);
448 			ddi_dma_free_handle(&srp->dmahndl[slot].hndl);
449 			srp->dmahndl[slot].hndl = NULL;
450 			srp->dmahndl[slot].next = NULL;
451 		}
452 	}
453 
454 	srp->dmah_free.head = NULL;
455 	srp->dmah_free.tail = NULL;
456 
457 	kmem_free(ssbdp, srp->desc.nslots*sizeof (*ssbdp));
458 
459 }
460 
461 /*
462  * Initialise the specified Send Ring, using the information in the
463  * <dma_area> descriptors that it contains to set up all the other
464  * fields. This routine should be called only once for each ring.
465  */
466 static int
467 nge_init_send_ring(nge_t *ngep)
468 {
469 	size_t dmah_num;
470 	uint32_t nslots;
471 	uint32_t err;
472 	uint32_t slot;
473 	uint32_t split;
474 	send_ring_t *srp;
475 	sw_tx_sbd_t *ssbdp;
476 	dma_area_t desc;
477 	dma_area_t pbuf;
478 
479 	srp = ngep->send;
480 	srp->desc.nslots = ngep->tx_desc;
481 	nslots = srp->desc.nslots;
482 
483 	NGE_TRACE(("nge_init_send_ring($%p)", (void *)ngep));
484 	/*
485 	 * Other one-off initialisation of per-ring data
486 	 */
487 	srp->ngep = ngep;
488 
489 	/*
490 	 * Allocate the array of s/w Send Buffer Descriptors
491 	 */
492 	ssbdp = kmem_zalloc(nslots*sizeof (*ssbdp), KM_SLEEP);
493 	srp->sw_sbds = ssbdp;
494 
495 	/*
496 	 * Now initialise each array element once and for all
497 	 */
498 	desc = srp->desc;
499 	for (split = 0; split < ngep->nge_split; ++split) {
500 		pbuf = srp->buf[split];
501 		for (slot = 0; slot < nslots/ngep->nge_split; ++ssbdp, ++slot) {
502 			nge_slice_chunk(&ssbdp->desc, &desc, 1,
503 			    ngep->desc_attr.txd_size);
504 			nge_slice_chunk(&ssbdp->pbuf, &pbuf, 1,
505 			    ngep->buf_size);
506 		}
507 		ASSERT(pbuf.alength == 0);
508 	}
509 	ASSERT(desc.alength == 0);
510 
511 	dmah_num = sizeof (srp->dmahndl) / sizeof (srp->dmahndl[0]);
512 
513 	/* preallocate dma handles for tx buffer */
514 	for (slot = 0; slot < dmah_num; ++slot) {
515 
516 		err = ddi_dma_alloc_handle(ngep->devinfo,
517 		    ngep->desc_attr.tx_dma_attr, DDI_DMA_DONTWAIT,
518 		    NULL, &srp->dmahndl[slot].hndl);
519 
520 		if (err != DDI_SUCCESS) {
521 			nge_fini_send_ring(ngep);
522 			nge_error(ngep,
523 			    "nge_init_send_ring: alloc dma handle fails");
524 			return (DDI_FAILURE);
525 		}
526 		srp->dmahndl[slot].next = srp->dmahndl + slot + 1;
527 	}
528 
529 	srp->dmah_free.head = srp->dmahndl;
530 	srp->dmah_free.tail = srp->dmahndl + dmah_num - 1;
531 	srp->dmah_free.tail->next = NULL;
532 
533 	return (DDI_SUCCESS);
534 }
535 
536 /*
537  * Intialize the tx recycle pointer and tx sending pointer of tx ring
538  * and set the type of tx's data descriptor by default.
539  */
540 static void
541 nge_reinit_send_ring(nge_t *ngep)
542 {
543 	size_t dmah_num;
544 	uint32_t slot;
545 	send_ring_t *srp;
546 	sw_tx_sbd_t *ssbdp;
547 
548 	srp = ngep->send;
549 
550 	/*
551 	 * Reinitialise control variables ...
552 	 */
553 
554 	srp->tx_hwmark = NGE_DESC_MIN;
555 	srp->tx_lwmark = NGE_DESC_MIN;
556 
557 	srp->tx_next = 0;
558 	srp->tx_free = srp->desc.nslots;
559 	srp->tc_next = 0;
560 
561 	dmah_num = sizeof (srp->dmahndl) / sizeof (srp->dmahndl[0]);
562 
563 	for (slot = 0; slot - dmah_num != 0; ++slot)
564 		srp->dmahndl[slot].next = srp->dmahndl + slot + 1;
565 
566 	srp->dmah_free.head = srp->dmahndl;
567 	srp->dmah_free.tail = srp->dmahndl + dmah_num - 1;
568 	srp->dmah_free.tail->next = NULL;
569 
570 	/*
571 	 * Zero and sync all the h/w Send Buffer Descriptors
572 	 */
573 	for (slot = 0; slot < srp->desc.nslots; ++slot) {
574 		ssbdp = &srp->sw_sbds[slot];
575 		ssbdp->flags = HOST_OWN;
576 	}
577 
578 	DMA_ZERO(srp->desc);
579 	DMA_SYNC(srp->desc, DDI_DMA_SYNC_FORDEV);
580 }
581 
582 /*
583  * Initialize the slot number of rx's ring
584  */
585 static void
586 nge_init_recv_ring(nge_t *ngep)
587 {
588 	recv_ring_t *rrp;
589 
590 	rrp = ngep->recv;
591 	rrp->desc.nslots = ngep->rx_desc;
592 	rrp->ngep = ngep;
593 }
594 
595 /*
596  * Intialize the rx recycle pointer and rx sending pointer of rx ring
597  */
598 static void
599 nge_reinit_recv_ring(nge_t *ngep)
600 {
601 	recv_ring_t *rrp;
602 
603 	rrp = ngep->recv;
604 
605 	/*
606 	 * Reinitialise control variables ...
607 	 */
608 	rrp->prod_index = 0;
609 	/*
610 	 * Zero and sync all the h/w Send Buffer Descriptors
611 	 */
612 	DMA_ZERO(rrp->desc);
613 	DMA_SYNC(rrp->desc, DDI_DMA_SYNC_FORDEV);
614 }
615 
616 /*
617  * Clean up initialisation done above before the memory is freed
618  */
619 static void
620 nge_fini_buff_ring(nge_t *ngep)
621 {
622 	uint32_t i;
623 	buff_ring_t *brp;
624 	dma_area_t *bufp;
625 	sw_rx_sbd_t *bsbdp;
626 
627 	brp = ngep->buff;
628 	bsbdp = brp->sw_rbds;
629 
630 	NGE_DEBUG(("nge_fini_buff_ring($%p)", (void *)ngep));
631 
632 	mutex_enter(brp->recycle_lock);
633 	brp->buf_sign++;
634 	mutex_exit(brp->recycle_lock);
635 	for (i = 0; i < ngep->rx_desc; i++, ++bsbdp) {
636 		if (bsbdp->bufp) {
637 			if (bsbdp->bufp->mp)
638 				freemsg(bsbdp->bufp->mp);
639 			nge_free_dma_mem(bsbdp->bufp);
640 			kmem_free(bsbdp->bufp, sizeof (dma_area_t));
641 			bsbdp->bufp = NULL;
642 		}
643 	}
644 	while (brp->free_list != NULL) {
645 		bufp = brp->free_list;
646 		brp->free_list = bufp->next;
647 		bufp->next = NULL;
648 		if (bufp->mp)
649 			freemsg(bufp->mp);
650 		nge_free_dma_mem(bufp);
651 		kmem_free(bufp, sizeof (dma_area_t));
652 	}
653 	while (brp->recycle_list != NULL) {
654 		bufp = brp->recycle_list;
655 		brp->recycle_list = bufp->next;
656 		bufp->next = NULL;
657 		if (bufp->mp)
658 			freemsg(bufp->mp);
659 		nge_free_dma_mem(bufp);
660 		kmem_free(bufp, sizeof (dma_area_t));
661 	}
662 
663 
664 	kmem_free(brp->sw_rbds, (ngep->rx_desc * sizeof (*bsbdp)));
665 	brp->sw_rbds = NULL;
666 }
667 
668 /*
669  * Intialize the Rx's data ring and free ring
670  */
671 static int
672 nge_init_buff_ring(nge_t *ngep)
673 {
674 	uint32_t err;
675 	uint32_t slot;
676 	uint32_t nslots_buff;
677 	uint32_t nslots_recv;
678 	buff_ring_t *brp;
679 	recv_ring_t *rrp;
680 	dma_area_t desc;
681 	dma_area_t *bufp;
682 	sw_rx_sbd_t *bsbdp;
683 
684 	rrp = ngep->recv;
685 	brp = ngep->buff;
686 	brp->nslots = ngep->rx_buf;
687 	brp->rx_bcopy = B_FALSE;
688 	nslots_recv = rrp->desc.nslots;
689 	nslots_buff = brp->nslots;
690 	brp->ngep = ngep;
691 
692 	NGE_TRACE(("nge_init_buff_ring($%p)", (void *)ngep));
693 
694 	/*
695 	 * Allocate the array of s/w Recv Buffer Descriptors
696 	 */
697 	bsbdp = kmem_zalloc(nslots_recv *sizeof (*bsbdp), KM_SLEEP);
698 	brp->sw_rbds = bsbdp;
699 	brp->free_list = NULL;
700 	brp->recycle_list = NULL;
701 	for (slot = 0; slot < nslots_buff; ++slot) {
702 		bufp = kmem_zalloc(sizeof (dma_area_t), KM_SLEEP);
703 		err = nge_alloc_dma_mem(ngep, (ngep->buf_size
704 		    + NGE_HEADROOM),
705 		    &nge_data_accattr, DDI_DMA_READ | NGE_DMA_MODE, bufp);
706 		if (err != DDI_SUCCESS) {
707 			kmem_free(bufp, sizeof (dma_area_t));
708 			return (DDI_FAILURE);
709 		}
710 
711 		bufp->alength -= NGE_HEADROOM;
712 		bufp->offset += NGE_HEADROOM;
713 		bufp->private = (caddr_t)ngep;
714 		bufp->rx_recycle.free_func = nge_recv_recycle;
715 		bufp->rx_recycle.free_arg = (caddr_t)bufp;
716 		bufp->signature = brp->buf_sign;
717 		bufp->rx_delivered = B_FALSE;
718 		bufp->mp = desballoc(DMA_VPTR(*bufp),
719 		    ngep->buf_size + NGE_HEADROOM,
720 		    0, &bufp->rx_recycle);
721 
722 		if (bufp->mp == NULL) {
723 			return (DDI_FAILURE);
724 		}
725 		bufp->next = brp->free_list;
726 		brp->free_list = bufp;
727 	}
728 
729 	/*
730 	 * Now initialise each array element once and for all
731 	 */
732 	desc = rrp->desc;
733 	for (slot = 0; slot < nslots_recv; ++slot, ++bsbdp) {
734 		nge_slice_chunk(&bsbdp->desc, &desc, 1,
735 		    ngep->desc_attr.rxd_size);
736 		bufp = brp->free_list;
737 		brp->free_list = bufp->next;
738 		bsbdp->bufp = bufp;
739 		bsbdp->flags = CONTROLER_OWN;
740 		bufp->next = NULL;
741 	}
742 
743 	ASSERT(desc.alength == 0);
744 	return (DDI_SUCCESS);
745 }
746 
747 /*
748  * Fill the host address of data in rx' descriptor
749  * and initialize free pointers of rx free ring
750  */
751 static int
752 nge_reinit_buff_ring(nge_t *ngep)
753 {
754 	uint32_t slot;
755 	uint32_t nslots_recv;
756 	buff_ring_t *brp;
757 	recv_ring_t *rrp;
758 	sw_rx_sbd_t *bsbdp;
759 	void *hw_bd_p;
760 
761 	brp = ngep->buff;
762 	rrp = ngep->recv;
763 	bsbdp = brp->sw_rbds;
764 	nslots_recv = rrp->desc.nslots;
765 	for (slot = 0; slot < nslots_recv; ++bsbdp, ++slot) {
766 		hw_bd_p = DMA_VPTR(bsbdp->desc);
767 	/*
768 	 * There is a scenario: When the traffic of small tcp
769 	 * packet is heavy, suspending the tcp traffic will
770 	 * cause the preallocated buffers for rx not to be
771 	 * released in time by tcp taffic and cause rx's buffer
772 	 * pointers not to be refilled in time.
773 	 *
774 	 * At this point, if we reinitialize the driver, the bufp
775 	 * pointer for rx's traffic will be NULL.
776 	 * So the result of the reinitializion fails.
777 	 */
778 		if (bsbdp->bufp == NULL)
779 			return (DDI_FAILURE);
780 
781 		ngep->desc_attr.rxd_fill(hw_bd_p, &bsbdp->bufp->cookie,
782 		    bsbdp->bufp->alength);
783 	}
784 	return (DDI_SUCCESS);
785 }
786 
787 static void
788 nge_init_ring_param_lock(nge_t *ngep)
789 {
790 	buff_ring_t *brp;
791 	send_ring_t *srp;
792 
793 	srp = ngep->send;
794 	brp = ngep->buff;
795 
796 	/* Init the locks for send ring */
797 	mutex_init(srp->tx_lock, NULL, MUTEX_DRIVER,
798 	    DDI_INTR_PRI(ngep->intr_pri));
799 	mutex_init(srp->tc_lock, NULL, MUTEX_DRIVER,
800 	    DDI_INTR_PRI(ngep->intr_pri));
801 	mutex_init(&srp->dmah_lock, NULL, MUTEX_DRIVER,
802 	    DDI_INTR_PRI(ngep->intr_pri));
803 
804 	/* Init parameters of buffer ring */
805 	brp->free_list = NULL;
806 	brp->recycle_list = NULL;
807 	brp->rx_hold = 0;
808 	brp->buf_sign = 0;
809 
810 	/* Init recycle list lock */
811 	mutex_init(brp->recycle_lock, NULL, MUTEX_DRIVER,
812 	    DDI_INTR_PRI(ngep->intr_pri));
813 }
814 
815 int
816 nge_init_rings(nge_t *ngep)
817 {
818 	uint32_t err;
819 
820 	err = nge_init_send_ring(ngep);
821 	if (err != DDI_SUCCESS) {
822 		return (err);
823 	}
824 	nge_init_recv_ring(ngep);
825 
826 	err = nge_init_buff_ring(ngep);
827 	if (err != DDI_SUCCESS) {
828 		nge_fini_send_ring(ngep);
829 		return (DDI_FAILURE);
830 	}
831 
832 	return (err);
833 }
834 
835 static int
836 nge_reinit_ring(nge_t *ngep)
837 {
838 	int err;
839 
840 	nge_reinit_recv_ring(ngep);
841 	nge_reinit_send_ring(ngep);
842 	err = nge_reinit_buff_ring(ngep);
843 	return (err);
844 }
845 
846 
847 void
848 nge_fini_rings(nge_t *ngep)
849 {
850 	/*
851 	 * For receive ring, nothing need to be finished.
852 	 * So only finish buffer ring and send ring here.
853 	 */
854 	nge_fini_buff_ring(ngep);
855 	nge_fini_send_ring(ngep);
856 }
857 
858 /*
859  * Loopback ioctl code
860  */
861 
862 static lb_property_t loopmodes[] = {
863 	{ normal,	"normal",	NGE_LOOP_NONE		},
864 	{ external,	"100Mbps",	NGE_LOOP_EXTERNAL_100	},
865 	{ external,	"10Mbps",	NGE_LOOP_EXTERNAL_10	},
866 	{ internal,	"PHY",		NGE_LOOP_INTERNAL_PHY	},
867 };
868 
869 enum ioc_reply
870 nge_loop_ioctl(nge_t *ngep, mblk_t *mp, struct iocblk *iocp)
871 {
872 	int cmd;
873 	uint32_t *lbmp;
874 	lb_info_sz_t *lbsp;
875 	lb_property_t *lbpp;
876 
877 	/*
878 	 * Validate format of ioctl
879 	 */
880 	if (mp->b_cont == NULL)
881 		return (IOC_INVAL);
882 
883 	cmd = iocp->ioc_cmd;
884 
885 	switch (cmd) {
886 	default:
887 		return (IOC_INVAL);
888 
889 	case LB_GET_INFO_SIZE:
890 		if (iocp->ioc_count != sizeof (lb_info_sz_t))
891 			return (IOC_INVAL);
892 		lbsp = (lb_info_sz_t *)mp->b_cont->b_rptr;
893 		*lbsp = sizeof (loopmodes);
894 		return (IOC_REPLY);
895 
896 	case LB_GET_INFO:
897 		if (iocp->ioc_count != sizeof (loopmodes))
898 			return (IOC_INVAL);
899 		lbpp = (lb_property_t *)mp->b_cont->b_rptr;
900 		bcopy(loopmodes, lbpp, sizeof (loopmodes));
901 		return (IOC_REPLY);
902 
903 	case LB_GET_MODE:
904 		if (iocp->ioc_count != sizeof (uint32_t))
905 			return (IOC_INVAL);
906 		lbmp = (uint32_t *)mp->b_cont->b_rptr;
907 		*lbmp = ngep->param_loop_mode;
908 		return (IOC_REPLY);
909 
910 	case LB_SET_MODE:
911 		if (iocp->ioc_count != sizeof (uint32_t))
912 			return (IOC_INVAL);
913 		lbmp = (uint32_t *)mp->b_cont->b_rptr;
914 		return (nge_set_loop_mode(ngep, *lbmp));
915 	}
916 }
917 
918 #undef	NGE_DBG
919 #define	NGE_DBG	NGE_DBG_NEMO
920 
921 
922 static void
923 nge_check_desc_prop(nge_t *ngep)
924 {
925 	if (ngep->desc_mode != DESC_HOT && ngep->desc_mode != DESC_OFFLOAD)
926 		ngep->desc_mode = DESC_HOT;
927 
928 	if (ngep->desc_mode == DESC_OFFLOAD)	{
929 
930 		ngep->desc_attr = nge_sum_desc;
931 
932 	}	else if (ngep->desc_mode == DESC_HOT)	{
933 
934 		ngep->desc_attr = nge_hot_desc;
935 	}
936 }
937 
938 /*
939  * nge_get_props -- get the parameters to tune the driver
940  */
941 static void
942 nge_get_props(nge_t *ngep)
943 {
944 	chip_info_t *infop;
945 	dev_info_t *devinfo;
946 	nge_dev_spec_param_t *dev_param_p;
947 
948 	devinfo = ngep->devinfo;
949 	infop = (chip_info_t *)&ngep->chipinfo;
950 	dev_param_p = &ngep->dev_spec_param;
951 
952 	infop->clsize = ddi_prop_get_int(DDI_DEV_T_ANY, devinfo,
953 	    DDI_PROP_DONTPASS, clsize_propname, 32);
954 
955 	infop->latency = ddi_prop_get_int(DDI_DEV_T_ANY, devinfo,
956 	    DDI_PROP_DONTPASS, latency_propname, 64);
957 	ngep->intr_moderation = ddi_prop_get_int(DDI_DEV_T_ANY, devinfo,
958 	    DDI_PROP_DONTPASS, intr_moderation, NGE_SET);
959 	ngep->rx_datahwm = ddi_prop_get_int(DDI_DEV_T_ANY, devinfo,
960 	    DDI_PROP_DONTPASS, rx_data_hw, 0x20);
961 	ngep->rx_prdlwm = ddi_prop_get_int(DDI_DEV_T_ANY, devinfo,
962 	    DDI_PROP_DONTPASS, rx_prd_lw, 0x4);
963 	ngep->rx_prdhwm = ddi_prop_get_int(DDI_DEV_T_ANY, devinfo,
964 	    DDI_PROP_DONTPASS, rx_prd_hw, 0xc);
965 
966 	ngep->sw_intr_intv = ddi_prop_get_int(DDI_DEV_T_ANY, devinfo,
967 	    DDI_PROP_DONTPASS, sw_intr_intv, SWTR_ITC);
968 	ngep->debug = ddi_prop_get_int(DDI_DEV_T_ANY, devinfo,
969 	    DDI_PROP_DONTPASS, debug_propname, NGE_DBG_CHIP);
970 	ngep->desc_mode = ddi_prop_get_int(DDI_DEV_T_ANY, devinfo,
971 	    DDI_PROP_DONTPASS, nge_desc_mode, dev_param_p->desc_type);
972 	ngep->lowmem_mode = ddi_prop_get_int(DDI_DEV_T_ANY, devinfo,
973 	    DDI_PROP_DONTPASS, low_memory_mode, 0);
974 
975 	if (dev_param_p->jumbo) {
976 		ngep->default_mtu = ddi_prop_get_int(DDI_DEV_T_ANY, devinfo,
977 		    DDI_PROP_DONTPASS, default_mtu, ETHERMTU);
978 	} else
979 		ngep->default_mtu = ETHERMTU;
980 
981 	if (ngep->default_mtu > ETHERMTU &&
982 	    ngep->default_mtu <= NGE_MTU_2500) {
983 		ngep->buf_size = NGE_JB2500_BUFSZ;
984 		ngep->tx_desc = NGE_SEND_JB2500_SLOTS_DESC;
985 		ngep->rx_desc = NGE_RECV_JB2500_SLOTS_DESC;
986 		ngep->rx_buf = NGE_RECV_JB2500_SLOTS_DESC * 2;
987 		ngep->nge_split = NGE_SPLIT_256;
988 	} else if (ngep->default_mtu > NGE_MTU_2500 &&
989 	    ngep->default_mtu <= NGE_MTU_4500) {
990 		ngep->buf_size = NGE_JB4500_BUFSZ;
991 		ngep->tx_desc = NGE_SEND_JB4500_SLOTS_DESC;
992 		ngep->rx_desc = NGE_RECV_JB4500_SLOTS_DESC;
993 		ngep->rx_buf = NGE_RECV_JB4500_SLOTS_DESC * 2;
994 		ngep->nge_split = NGE_SPLIT_256;
995 	} else if (ngep->default_mtu > NGE_MTU_4500 &&
996 	    ngep->default_mtu <= NGE_MAX_MTU) {
997 		ngep->buf_size = NGE_JB9000_BUFSZ;
998 		ngep->tx_desc = NGE_SEND_JB9000_SLOTS_DESC;
999 		ngep->rx_desc = NGE_RECV_JB9000_SLOTS_DESC;
1000 		ngep->rx_buf = NGE_RECV_JB9000_SLOTS_DESC * 2;
1001 		ngep->nge_split = NGE_SPLIT_256;
1002 	} else if (ngep->default_mtu > NGE_MAX_MTU) {
1003 		ngep->default_mtu = NGE_MAX_MTU;
1004 		ngep->buf_size = NGE_JB9000_BUFSZ;
1005 		ngep->tx_desc = NGE_SEND_JB9000_SLOTS_DESC;
1006 		ngep->rx_desc = NGE_RECV_JB9000_SLOTS_DESC;
1007 		ngep->rx_buf = NGE_RECV_JB9000_SLOTS_DESC * 2;
1008 		ngep->nge_split = NGE_SPLIT_256;
1009 	} else if (ngep->lowmem_mode != 0) {
1010 		ngep->default_mtu = ETHERMTU;
1011 		ngep->buf_size = NGE_STD_BUFSZ;
1012 		ngep->tx_desc = NGE_SEND_LOWMEM_SLOTS_DESC;
1013 		ngep->rx_desc = NGE_RECV_LOWMEM_SLOTS_DESC;
1014 		ngep->rx_buf = NGE_RECV_LOWMEM_SLOTS_DESC * 2;
1015 		ngep->nge_split = NGE_SPLIT_32;
1016 	} else {
1017 		ngep->default_mtu = ETHERMTU;
1018 		ngep->buf_size = NGE_STD_BUFSZ;
1019 		ngep->tx_desc = dev_param_p->tx_desc_num;
1020 		ngep->rx_desc = dev_param_p->rx_desc_num;
1021 		ngep->rx_buf = dev_param_p->rx_desc_num * 2;
1022 		ngep->nge_split = dev_param_p->nge_split;
1023 	}
1024 
1025 	nge_check_desc_prop(ngep);
1026 }
1027 
1028 
1029 static int
1030 nge_reset(nge_t *ngep)
1031 {
1032 	int err;
1033 	send_ring_t *srp = ngep->send;
1034 
1035 	ASSERT(mutex_owned(ngep->genlock));
1036 	mutex_enter(srp->tc_lock);
1037 	mutex_enter(srp->tx_lock);
1038 
1039 	nge_tx_recycle_all(ngep);
1040 	err = nge_reinit_ring(ngep);
1041 	if (err == DDI_FAILURE) {
1042 		mutex_exit(srp->tx_lock);
1043 		mutex_exit(srp->tc_lock);
1044 		return (err);
1045 	}
1046 	err = nge_chip_reset(ngep);
1047 	mutex_exit(srp->tx_lock);
1048 	mutex_exit(srp->tc_lock);
1049 	if (err == DDI_FAILURE)
1050 		return (err);
1051 	ngep->watchdog = 0;
1052 	ngep->resched_needed = B_FALSE;
1053 	ngep->promisc = B_FALSE;
1054 	ngep->param_loop_mode = NGE_LOOP_NONE;
1055 	ngep->factotum_flag = 0;
1056 	ngep->resched_needed = 0;
1057 	ngep->nge_mac_state = NGE_MAC_RESET;
1058 	ngep->max_sdu = ngep->default_mtu + ETHER_HEAD_LEN + ETHERFCSL;
1059 	ngep->max_sdu += VTAG_SIZE;
1060 	ngep->rx_def = 0x16;
1061 	return (DDI_SUCCESS);
1062 }
1063 
1064 static void
1065 nge_m_stop(void *arg)
1066 {
1067 	nge_t *ngep = arg;		/* private device info	*/
1068 
1069 	NGE_TRACE(("nge_m_stop($%p)", arg));
1070 
1071 	/*
1072 	 * If suspended, adapter is already stopped, just return.
1073 	 */
1074 	if (ngep->suspended) {
1075 		ASSERT(ngep->nge_mac_state == NGE_MAC_STOPPED);
1076 		return;
1077 	}
1078 
1079 	/*
1080 	 * Just stop processing, then record new MAC state
1081 	 */
1082 	mutex_enter(ngep->genlock);
1083 	rw_enter(ngep->rwlock, RW_WRITER);
1084 
1085 	(void) nge_chip_stop(ngep, B_FALSE);
1086 	/* Try to wait all the buffer post to upper layer be released */
1087 	ngep->nge_mac_state = NGE_MAC_STOPPED;
1088 
1089 	/* Recycle all the TX BD */
1090 	nge_tx_recycle_all(ngep);
1091 	nge_fini_rings(ngep);
1092 	nge_free_bufs(ngep);
1093 
1094 	NGE_DEBUG(("nge_m_stop($%p) done", arg));
1095 
1096 	rw_exit(ngep->rwlock);
1097 	mutex_exit(ngep->genlock);
1098 }
1099 
1100 static int
1101 nge_m_start(void *arg)
1102 {
1103 	int err;
1104 	nge_t *ngep = arg;
1105 
1106 	NGE_TRACE(("nge_m_start($%p)", arg));
1107 	/*
1108 	 * If suspended, don't start, as the resume processing
1109 	 * will recall this function with the suspended flag off.
1110 	 */
1111 	if (ngep->suspended)
1112 		return (DDI_FAILURE);
1113 	/*
1114 	 * Start processing and record new MAC state
1115 	 */
1116 	mutex_enter(ngep->genlock);
1117 	rw_enter(ngep->rwlock, RW_WRITER);
1118 	err = nge_alloc_bufs(ngep);
1119 	if (err != DDI_SUCCESS) {
1120 		nge_problem(ngep, "nge_m_start: DMA buffer allocation failed");
1121 		goto finish;
1122 	}
1123 	err = nge_init_rings(ngep);
1124 	if (err != DDI_SUCCESS) {
1125 		nge_free_bufs(ngep);
1126 		nge_problem(ngep, "nge_init_rings() failed,err=%x");
1127 		goto finish;
1128 	}
1129 	err = nge_restart(ngep);
1130 
1131 	NGE_DEBUG(("nge_m_start($%p) done", arg));
1132 	finish:
1133 		rw_exit(ngep->rwlock);
1134 		mutex_exit(ngep->genlock);
1135 
1136 		return (err);
1137 }
1138 
1139 static int
1140 nge_m_unicst(void *arg, const uint8_t *macaddr)
1141 {
1142 	nge_t *ngep = arg;
1143 
1144 	NGE_TRACE(("nge_m_unicst($%p)", arg));
1145 	/*
1146 	 * Remember the new current address in the driver state
1147 	 * Sync the chip's idea of the address too ...
1148 	 */
1149 	mutex_enter(ngep->genlock);
1150 
1151 	ethaddr_copy(macaddr, ngep->cur_uni_addr.addr);
1152 	ngep->cur_uni_addr.set = 1;
1153 
1154 	/*
1155 	 * If we are suspended, we want to quit now, and not update
1156 	 * the chip.  Doing so might put it in a bad state, but the
1157 	 * resume will get the unicast address installed.
1158 	 */
1159 	if (ngep->suspended)
1160 		return (DDI_SUCCESS);
1161 
1162 	nge_chip_sync(ngep);
1163 
1164 	NGE_DEBUG(("nge_m_unicst($%p) done", arg));
1165 	mutex_exit(ngep->genlock);
1166 
1167 	return (0);
1168 }
1169 
1170 static int
1171 nge_m_promisc(void *arg, boolean_t on)
1172 {
1173 	nge_t *ngep = arg;
1174 
1175 	NGE_TRACE(("nge_m_promisc($%p)", arg));
1176 	/*
1177 	 * If suspended, we don't do anything, even record the promiscuious
1178 	 * mode, as we won't properly set it on resume.  Just fail.
1179 	 */
1180 	if (ngep->suspended)
1181 		return (DDI_FAILURE);
1182 
1183 	/*
1184 	 * Store specified mode and pass to chip layer to update h/w
1185 	 */
1186 	mutex_enter(ngep->genlock);
1187 	if (ngep->promisc == on) {
1188 		mutex_exit(ngep->genlock);
1189 		NGE_DEBUG(("nge_m_promisc($%p) done", arg));
1190 		return (0);
1191 	}
1192 	ngep->promisc = on;
1193 	nge_chip_sync(ngep);
1194 	NGE_DEBUG(("nge_m_promisc($%p) done", arg));
1195 	mutex_exit(ngep->genlock);
1196 
1197 	return (0);
1198 }
1199 
1200 static void nge_mulparam(nge_t *ngep)
1201 {
1202 	uint8_t number;
1203 	ether_addr_t pand;
1204 	ether_addr_t por;
1205 	mul_item *plist;
1206 
1207 	for (number = 0; number < ETHERADDRL; number++) {
1208 		pand[number] = 0x00;
1209 		por[number] = 0x00;
1210 	}
1211 	for (plist = ngep->pcur_mulist; plist != NULL; plist = plist->next) {
1212 		for (number = 0; number < ETHERADDRL; number++) {
1213 			pand[number] &= plist->mul_addr[number];
1214 			por[number] |= plist->mul_addr[number];
1215 		}
1216 	}
1217 	for (number = 0; number < ETHERADDRL; number++) {
1218 		ngep->cur_mul_addr.addr[number]
1219 		    = pand[number] & por[number];
1220 		ngep->cur_mul_mask.addr[number]
1221 		    = pand [number] | (~por[number]);
1222 	}
1223 }
1224 static int
1225 nge_m_multicst(void *arg, boolean_t add, const uint8_t *mca)
1226 {
1227 	boolean_t update;
1228 	boolean_t b_eq;
1229 	nge_t *ngep = arg;
1230 	mul_item *plist;
1231 	mul_item *plist_prev;
1232 	mul_item *pitem;
1233 
1234 	NGE_TRACE(("nge_m_multicst($%p, %s, %s)", arg,
1235 	    (add) ? "add" : "remove", ether_sprintf((void *)mca)));
1236 
1237 	update = B_FALSE;
1238 	plist = plist_prev = NULL;
1239 	mutex_enter(ngep->genlock);
1240 	if (add) {
1241 		if (ngep->pcur_mulist != NULL) {
1242 			for (plist = ngep->pcur_mulist; plist != NULL;
1243 			    plist = plist->next) {
1244 				b_eq = ether_eq(plist->mul_addr, mca);
1245 				if (b_eq) {
1246 					plist->ref_cnt++;
1247 					break;
1248 				}
1249 				plist_prev = plist;
1250 			}
1251 		}
1252 
1253 		if (plist == NULL) {
1254 			pitem = kmem_zalloc(sizeof (mul_item), KM_SLEEP);
1255 			ether_copy(mca, pitem->mul_addr);
1256 			pitem ->ref_cnt++;
1257 			pitem ->next = NULL;
1258 			if (plist_prev == NULL)
1259 				ngep->pcur_mulist = pitem;
1260 			else
1261 				plist_prev->next = pitem;
1262 			update = B_TRUE;
1263 		}
1264 	} else {
1265 		if (ngep->pcur_mulist != NULL) {
1266 			for (plist = ngep->pcur_mulist; plist != NULL;
1267 			    plist = plist->next) {
1268 				b_eq = ether_eq(plist->mul_addr, mca);
1269 				if (b_eq) {
1270 					update = B_TRUE;
1271 					break;
1272 				}
1273 				plist_prev = plist;
1274 			}
1275 
1276 			if (update) {
1277 				if ((plist_prev == NULL) &&
1278 				    (plist->next == NULL))
1279 					ngep->pcur_mulist = NULL;
1280 				else if ((plist_prev == NULL) &&
1281 				    (plist->next != NULL))
1282 					ngep->pcur_mulist = plist->next;
1283 				else
1284 					plist_prev->next = plist->next;
1285 				kmem_free(plist, sizeof (mul_item));
1286 			}
1287 		}
1288 	}
1289 
1290 	if (update || !ngep->suspended) {
1291 		nge_mulparam(ngep);
1292 		nge_chip_sync(ngep);
1293 	}
1294 	NGE_DEBUG(("nge_m_multicst($%p) done", arg));
1295 	mutex_exit(ngep->genlock);
1296 
1297 	return (0);
1298 }
1299 
1300 static void
1301 nge_m_ioctl(void *arg, queue_t *wq, mblk_t *mp)
1302 {
1303 	int err;
1304 	int cmd;
1305 	nge_t *ngep = arg;
1306 	struct iocblk *iocp;
1307 	enum ioc_reply status;
1308 	boolean_t need_privilege;
1309 
1310 	/*
1311 	 * If suspended, we might actually be able to do some of
1312 	 * these ioctls, but it is harder to make sure they occur
1313 	 * without actually putting the hardware in an undesireable
1314 	 * state.  So just NAK it.
1315 	 */
1316 	if (ngep->suspended) {
1317 		miocnak(wq, mp, 0, EINVAL);
1318 		return;
1319 	}
1320 
1321 	/*
1322 	 * Validate the command before bothering with the mutex ...
1323 	 */
1324 	iocp = (struct iocblk *)mp->b_rptr;
1325 	iocp->ioc_error = 0;
1326 	need_privilege = B_TRUE;
1327 	cmd = iocp->ioc_cmd;
1328 
1329 	NGE_DEBUG(("nge_m_ioctl:  cmd 0x%x", cmd));
1330 	switch (cmd) {
1331 	default:
1332 		NGE_LDB(NGE_DBG_BADIOC,
1333 		    ("nge_m_ioctl: unknown cmd 0x%x", cmd));
1334 
1335 		miocnak(wq, mp, 0, EINVAL);
1336 		return;
1337 
1338 	case NGE_MII_READ:
1339 	case NGE_MII_WRITE:
1340 	case NGE_SEE_READ:
1341 	case NGE_SEE_WRITE:
1342 	case NGE_DIAG:
1343 	case NGE_PEEK:
1344 	case NGE_POKE:
1345 	case NGE_PHY_RESET:
1346 	case NGE_SOFT_RESET:
1347 	case NGE_HARD_RESET:
1348 		break;
1349 
1350 	case LB_GET_INFO_SIZE:
1351 	case LB_GET_INFO:
1352 	case LB_GET_MODE:
1353 		need_privilege = B_FALSE;
1354 		break;
1355 	case LB_SET_MODE:
1356 		break;
1357 
1358 	case ND_GET:
1359 		need_privilege = B_FALSE;
1360 		break;
1361 	case ND_SET:
1362 		break;
1363 	}
1364 
1365 	if (need_privilege) {
1366 		/*
1367 		 * Check for specific net_config privilege.
1368 		 */
1369 		err = secpolicy_net_config(iocp->ioc_cr, B_FALSE);
1370 		if (err != 0) {
1371 			NGE_DEBUG(("nge_m_ioctl: rejected cmd 0x%x, err %d",
1372 			    cmd, err));
1373 			miocnak(wq, mp, 0, err);
1374 			return;
1375 		}
1376 	}
1377 
1378 	mutex_enter(ngep->genlock);
1379 
1380 	switch (cmd) {
1381 	default:
1382 		_NOTE(NOTREACHED)
1383 		status = IOC_INVAL;
1384 	break;
1385 
1386 	case NGE_MII_READ:
1387 	case NGE_MII_WRITE:
1388 	case NGE_SEE_READ:
1389 	case NGE_SEE_WRITE:
1390 	case NGE_DIAG:
1391 	case NGE_PEEK:
1392 	case NGE_POKE:
1393 	case NGE_PHY_RESET:
1394 	case NGE_SOFT_RESET:
1395 	case NGE_HARD_RESET:
1396 		status = nge_chip_ioctl(ngep, mp, iocp);
1397 	break;
1398 
1399 	case LB_GET_INFO_SIZE:
1400 	case LB_GET_INFO:
1401 	case LB_GET_MODE:
1402 	case LB_SET_MODE:
1403 		status = nge_loop_ioctl(ngep, mp, iocp);
1404 	break;
1405 
1406 	case ND_GET:
1407 	case ND_SET:
1408 		status = nge_nd_ioctl(ngep, wq, mp, iocp);
1409 	break;
1410 
1411 	}
1412 
1413 	/*
1414 	 * Do we need to reprogram the PHY and/or the MAC?
1415 	 * Do it now, while we still have the mutex.
1416 	 *
1417 	 * Note: update the PHY first, 'cos it controls the
1418 	 * speed/duplex parameters that the MAC code uses.
1419 	 */
1420 
1421 	NGE_DEBUG(("nge_m_ioctl: cmd 0x%x status %d", cmd, status));
1422 
1423 	switch (status) {
1424 	case IOC_RESTART_REPLY:
1425 	case IOC_RESTART_ACK:
1426 		(*ngep->physops->phys_update)(ngep);
1427 		nge_chip_sync(ngep);
1428 		break;
1429 
1430 	default:
1431 	break;
1432 	}
1433 
1434 	mutex_exit(ngep->genlock);
1435 
1436 	/*
1437 	 * Finally, decide how to reply
1438 	 */
1439 	switch (status) {
1440 
1441 	default:
1442 	case IOC_INVAL:
1443 		miocnak(wq, mp, 0, iocp->ioc_error == 0 ?
1444 		    EINVAL : iocp->ioc_error);
1445 		break;
1446 
1447 	case IOC_DONE:
1448 		break;
1449 
1450 	case IOC_RESTART_ACK:
1451 	case IOC_ACK:
1452 		miocack(wq, mp, 0, 0);
1453 		break;
1454 
1455 	case IOC_RESTART_REPLY:
1456 	case IOC_REPLY:
1457 		mp->b_datap->db_type = iocp->ioc_error == 0 ?
1458 		    M_IOCACK : M_IOCNAK;
1459 		qreply(wq, mp);
1460 		break;
1461 	}
1462 }
1463 
1464 /* ARGSUSED */
1465 static boolean_t
1466 nge_m_getcapab(void *arg, mac_capab_t cap, void *cap_data)
1467 {
1468 	nge_t	*ngep = arg;
1469 	nge_dev_spec_param_t *dev_param_p;
1470 
1471 	dev_param_p = &ngep->dev_spec_param;
1472 
1473 	switch (cap) {
1474 	case MAC_CAPAB_HCKSUM: {
1475 		uint32_t *hcksum_txflags = cap_data;
1476 
1477 		if (dev_param_p->tx_hw_checksum) {
1478 			*hcksum_txflags = dev_param_p->tx_hw_checksum;
1479 		} else
1480 			return (B_FALSE);
1481 		break;
1482 	}
1483 	case MAC_CAPAB_POLL:
1484 		/*
1485 		 * There's nothing for us to fill in, simply returning
1486 		 * B_TRUE, stating that we support polling is sufficient.
1487 		 */
1488 		break;
1489 	default:
1490 		return (B_FALSE);
1491 	}
1492 	return (B_TRUE);
1493 }
1494 
1495 #undef	NGE_DBG
1496 #define	NGE_DBG	NGE_DBG_INIT	/* debug flag for this code	*/
1497 int
1498 nge_restart(nge_t *ngep)
1499 {
1500 	int err = 0;
1501 	err += nge_reset(ngep);
1502 	err += nge_chip_start(ngep);
1503 
1504 	if (err) {
1505 		ngep->nge_mac_state = NGE_MAC_STOPPED;
1506 		return (DDI_FAILURE);
1507 	} else {
1508 		ngep->nge_mac_state = NGE_MAC_STARTED;
1509 		return (DDI_SUCCESS);
1510 	}
1511 }
1512 
1513 void
1514 nge_wake_factotum(nge_t *ngep)
1515 {
1516 	mutex_enter(ngep->softlock);
1517 	if (ngep->factotum_flag == 0) {
1518 		ngep->factotum_flag = 1;
1519 		(void) ddi_intr_trigger_softint(ngep->factotum_hdl, NULL);
1520 	}
1521 	mutex_exit(ngep->softlock);
1522 }
1523 
1524 /*
1525  * High-level cyclic handler
1526  *
1527  * This routine schedules a (low-level) softint callback to the
1528  * factotum.
1529  */
1530 
1531 static void
1532 nge_chip_cyclic(void *arg)
1533 {
1534 	nge_t *ngep;
1535 
1536 	ngep = (nge_t *)arg;
1537 
1538 	switch (ngep->nge_chip_state) {
1539 	default:
1540 		return;
1541 
1542 	case NGE_CHIP_RUNNING:
1543 		break;
1544 
1545 	case NGE_CHIP_FAULT:
1546 	case NGE_CHIP_ERROR:
1547 		break;
1548 	}
1549 
1550 	nge_wake_factotum(ngep);
1551 }
1552 
1553 static void
1554 nge_unattach(nge_t *ngep)
1555 {
1556 	send_ring_t *srp;
1557 	buff_ring_t *brp;
1558 
1559 	srp = ngep->send;
1560 	brp = ngep->buff;
1561 	NGE_TRACE(("nge_unattach($%p)", (void *)ngep));
1562 
1563 	/*
1564 	 * Flag that no more activity may be initiated
1565 	 */
1566 	ngep->progress &= ~PROGRESS_READY;
1567 	ngep->nge_mac_state = NGE_MAC_UNATTACH;
1568 
1569 	/*
1570 	 * Quiesce the PHY and MAC (leave it reset but still powered).
1571 	 * Clean up and free all NGE data structures
1572 	 */
1573 	if (ngep->periodic_id != NULL) {
1574 		ddi_periodic_delete(ngep->periodic_id);
1575 		ngep->periodic_id = NULL;
1576 	}
1577 
1578 	if (ngep->progress & PROGRESS_KSTATS)
1579 		nge_fini_kstats(ngep);
1580 
1581 	if (ngep->progress & PROGRESS_NDD)
1582 		nge_nd_cleanup(ngep);
1583 
1584 	if (ngep->progress & PROGRESS_HWINT) {
1585 		mutex_enter(ngep->genlock);
1586 		nge_restore_mac_addr(ngep);
1587 		(void) nge_chip_stop(ngep, B_FALSE);
1588 		mutex_exit(ngep->genlock);
1589 	}
1590 
1591 	if (ngep->progress & PROGRESS_SWINT)
1592 		nge_rem_intrs(ngep);
1593 
1594 	if (ngep->progress & PROGRESS_FACTOTUM)
1595 		(void) ddi_intr_remove_softint(ngep->factotum_hdl);
1596 
1597 	if (ngep->progress & PROGRESS_RESCHED)
1598 		(void) ddi_intr_remove_softint(ngep->resched_hdl);
1599 
1600 	if (ngep->progress & PROGRESS_INTR) {
1601 		mutex_destroy(srp->tx_lock);
1602 		mutex_destroy(srp->tc_lock);
1603 		mutex_destroy(&srp->dmah_lock);
1604 		mutex_destroy(brp->recycle_lock);
1605 
1606 		mutex_destroy(ngep->genlock);
1607 		mutex_destroy(ngep->softlock);
1608 		rw_destroy(ngep->rwlock);
1609 	}
1610 
1611 	if (ngep->progress & PROGRESS_REGS)
1612 		ddi_regs_map_free(&ngep->io_handle);
1613 
1614 	if (ngep->progress & PROGRESS_CFG)
1615 		pci_config_teardown(&ngep->cfg_handle);
1616 
1617 	ddi_remove_minor_node(ngep->devinfo, NULL);
1618 
1619 	kmem_free(ngep, sizeof (*ngep));
1620 }
1621 
1622 static int
1623 nge_resume(dev_info_t *devinfo)
1624 {
1625 	nge_t		*ngep;
1626 	chip_info_t	*infop;
1627 
1628 	ASSERT(devinfo != NULL);
1629 
1630 	ngep = ddi_get_driver_private(devinfo);
1631 	/*
1632 	 * If there are state inconsistancies, this is bad.  Returning
1633 	 * DDI_FAILURE here will eventually cause the machine to panic,
1634 	 * so it is best done here so that there is a possibility of
1635 	 * debugging the problem.
1636 	 */
1637 	if (ngep == NULL)
1638 		cmn_err(CE_PANIC,
1639 		    "nge: ngep returned from ddi_get_driver_private was NULL");
1640 	infop = (chip_info_t *)&ngep->chipinfo;
1641 
1642 	if (ngep->devinfo != devinfo)
1643 		cmn_err(CE_PANIC,
1644 		    "nge: passed devinfo not the same as saved definfo");
1645 
1646 	ngep->suspended = B_FALSE;
1647 
1648 	/*
1649 	 * Fetch the config space.  Even though we have most of it cached,
1650 	 * some values *might* change across a suspend/resume.
1651 	 */
1652 	nge_chip_cfg_init(ngep, infop, B_FALSE);
1653 
1654 	/*
1655 	 * Start the controller.  In this case (and probably most GLDv3
1656 	 * devices), it is sufficient to call nge_m_start().
1657 	 */
1658 	if (nge_m_start((void *)ngep) != DDI_SUCCESS) {
1659 		/*
1660 		 * We note the failure, but return success, as the
1661 		 * system is still usable without this controller.
1662 		 */
1663 		cmn_err(CE_WARN, "nge: resume: failed to restart controller");
1664 	}
1665 	return (DDI_SUCCESS);
1666 }
1667 
1668 /*
1669  * attach(9E) -- Attach a device to the system
1670  *
1671  * Called once for each board successfully probed.
1672  */
1673 static int
1674 nge_attach(dev_info_t *devinfo, ddi_attach_cmd_t cmd)
1675 {
1676 	int		err;
1677 	int		i;
1678 	int		instance;
1679 	caddr_t		regs;
1680 	nge_t		*ngep;
1681 	chip_info_t	*infop;
1682 	mac_register_t	*macp;
1683 
1684 	switch (cmd) {
1685 	default:
1686 		return (DDI_FAILURE);
1687 
1688 	case DDI_RESUME:
1689 		return (nge_resume(devinfo));
1690 
1691 	case DDI_ATTACH:
1692 		break;
1693 	}
1694 
1695 	ngep = kmem_zalloc(sizeof (*ngep), KM_SLEEP);
1696 	instance = ddi_get_instance(devinfo);
1697 	ddi_set_driver_private(devinfo, ngep);
1698 	ngep->devinfo = devinfo;
1699 
1700 	(void) snprintf(ngep->ifname, sizeof (ngep->ifname), "%s%d",
1701 	    NGE_DRIVER_NAME, instance);
1702 	err = pci_config_setup(devinfo, &ngep->cfg_handle);
1703 	if (err != DDI_SUCCESS) {
1704 		nge_problem(ngep, "nge_attach: pci_config_setup() failed");
1705 		goto attach_fail;
1706 	}
1707 	infop = (chip_info_t *)&ngep->chipinfo;
1708 	nge_chip_cfg_init(ngep, infop, B_FALSE);
1709 	nge_init_dev_spec_param(ngep);
1710 	nge_get_props(ngep);
1711 	ngep->progress |= PROGRESS_CFG;
1712 
1713 	err = ddi_regs_map_setup(devinfo, NGE_PCI_OPREGS_RNUMBER,
1714 	    &regs, 0, 0, &nge_reg_accattr, &ngep->io_handle);
1715 	if (err != DDI_SUCCESS) {
1716 		nge_problem(ngep, "nge_attach: ddi_regs_map_setup() failed");
1717 		goto attach_fail;
1718 	}
1719 	ngep->io_regs = regs;
1720 	ngep->progress |= PROGRESS_REGS;
1721 
1722 	err = nge_register_intrs_and_init_locks(ngep);
1723 	if (err != DDI_SUCCESS) {
1724 		nge_problem(ngep, "nge_attach:"
1725 		    " register intrs and init locks failed");
1726 		goto attach_fail;
1727 	}
1728 	nge_init_ring_param_lock(ngep);
1729 	ngep->progress |= PROGRESS_INTR;
1730 
1731 	mutex_enter(ngep->genlock);
1732 
1733 	/*
1734 	 * Initialise link state variables
1735 	 * Stop, reset & reinitialise the chip.
1736 	 * Initialise the (internal) PHY.
1737 	 */
1738 	nge_phys_init(ngep);
1739 	err = nge_chip_reset(ngep);
1740 	if (err != DDI_SUCCESS) {
1741 		nge_problem(ngep, "nge_attach: nge_chip_reset() failed");
1742 		mutex_exit(ngep->genlock);
1743 		goto attach_fail;
1744 	}
1745 	nge_chip_sync(ngep);
1746 
1747 	/*
1748 	 * Now that mutex locks are initialized, enable interrupts.
1749 	 */
1750 	if (ngep->intr_cap & DDI_INTR_FLAG_BLOCK) {
1751 		/* Call ddi_intr_block_enable() for MSI interrupts */
1752 		(void) ddi_intr_block_enable(ngep->htable,
1753 		    ngep->intr_actual_cnt);
1754 	} else {
1755 		/* Call ddi_intr_enable for MSI or FIXED interrupts */
1756 		for (i = 0; i < ngep->intr_actual_cnt; i++) {
1757 			(void) ddi_intr_enable(ngep->htable[i]);
1758 		}
1759 	}
1760 
1761 	ngep->link_state = LINK_STATE_UNKNOWN;
1762 	ngep->progress |= PROGRESS_HWINT;
1763 
1764 	/*
1765 	 * Register NDD-tweakable parameters
1766 	 */
1767 	if (nge_nd_init(ngep)) {
1768 		nge_problem(ngep, "nge_attach: nge_nd_init() failed");
1769 		mutex_exit(ngep->genlock);
1770 		goto attach_fail;
1771 	}
1772 	ngep->progress |= PROGRESS_NDD;
1773 
1774 	/*
1775 	 * Create & initialise named kstats
1776 	 */
1777 	nge_init_kstats(ngep, instance);
1778 	ngep->progress |= PROGRESS_KSTATS;
1779 
1780 	mutex_exit(ngep->genlock);
1781 
1782 	if ((macp = mac_alloc(MAC_VERSION)) == NULL)
1783 		goto attach_fail;
1784 	macp->m_type_ident = MAC_PLUGIN_IDENT_ETHER;
1785 	macp->m_driver = ngep;
1786 	macp->m_dip = devinfo;
1787 	macp->m_src_addr = infop->vendor_addr.addr;
1788 	macp->m_callbacks = &nge_m_callbacks;
1789 	macp->m_min_sdu = 0;
1790 	macp->m_max_sdu = ngep->default_mtu;
1791 	/*
1792 	 * Finally, we're ready to register ourselves with the mac
1793 	 * interface; if this succeeds, we're all ready to start()
1794 	 */
1795 	err = mac_register(macp, &ngep->mh);
1796 	mac_free(macp);
1797 	if (err != 0)
1798 		goto attach_fail;
1799 
1800 	/*
1801 	 * Register a periodical handler.
1802 	 * nge_chip_cyclic() is invoked in kernel context.
1803 	 */
1804 	ngep->periodic_id = ddi_periodic_add(nge_chip_cyclic, ngep,
1805 	    NGE_CYCLIC_PERIOD, DDI_IPL_0);
1806 
1807 	ngep->progress |= PROGRESS_READY;
1808 	return (DDI_SUCCESS);
1809 
1810 attach_fail:
1811 	nge_unattach(ngep);
1812 	return (DDI_FAILURE);
1813 }
1814 
1815 /*
1816  * detach(9E) -- Detach a device from the system
1817  */
1818 static int
1819 nge_detach(dev_info_t *devinfo, ddi_detach_cmd_t cmd)
1820 {
1821 	int i;
1822 	nge_t *ngep;
1823 	mul_item *p, *nextp;
1824 	buff_ring_t *brp;
1825 
1826 	NGE_GTRACE(("nge_detach($%p, %d)", (void *)devinfo, cmd));
1827 
1828 	ngep = ddi_get_driver_private(devinfo);
1829 	brp = ngep->buff;
1830 
1831 	switch (cmd) {
1832 	default:
1833 		return (DDI_FAILURE);
1834 
1835 	case DDI_SUSPEND:
1836 		/*
1837 		 * Stop the NIC
1838 		 * I suspect that we can actually suspend if the stop
1839 		 * routine returns a failure, as the resume will
1840 		 * effectively fully reset the hardware (i.e. we don't
1841 		 * really save any hardware state).  However, nge_m_stop
1842 		 * doesn't return an error code.
1843 		 * Note: This driver doesn't currently support WOL, but
1844 		 *	should it in the future, it is important to
1845 		 *	make sure the PHY remains powered so that the
1846 		 *	wakeup packet can actually be recieved.
1847 		 */
1848 		nge_m_stop(ngep);
1849 		ngep->suspended = B_TRUE;
1850 		return (DDI_SUCCESS);
1851 
1852 	case DDI_DETACH:
1853 		break;
1854 	}
1855 
1856 	/* Try to wait all the buffer post to upper layer be released */
1857 	for (i = 0; i < 1000; i++) {
1858 		if (brp->rx_hold == 0)
1859 			break;
1860 		drv_usecwait(1000);
1861 	}
1862 
1863 	/* If there is any posted buffer, reject to detach */
1864 	if (brp->rx_hold != 0)
1865 		return (DDI_FAILURE);
1866 
1867 	/* Recycle the multicast table */
1868 	for (p = ngep->pcur_mulist; p != NULL; p = nextp) {
1869 		nextp = p->next;
1870 		kmem_free(p, sizeof (mul_item));
1871 	}
1872 	ngep->pcur_mulist = NULL;
1873 
1874 	/*
1875 	 * Unregister from the GLD subsystem.  This can fail, in
1876 	 * particular if there are DLPI style-2 streams still open -
1877 	 * in which case we just return failure without shutting
1878 	 * down chip operations.
1879 	 */
1880 	if (mac_unregister(ngep->mh) != DDI_SUCCESS)
1881 		return (DDI_FAILURE);
1882 
1883 	/*
1884 	 * All activity stopped, so we can clean up & exit
1885 	 */
1886 	nge_unattach(ngep);
1887 	return (DDI_SUCCESS);
1888 }
1889 
1890 
1891 /*
1892  * ========== Module Loading Data & Entry Points ==========
1893  */
1894 
1895 DDI_DEFINE_STREAM_OPS(nge_dev_ops, nulldev, nulldev, nge_attach, nge_detach,
1896     nodev, NULL, D_MP, NULL);
1897 
1898 
1899 static struct modldrv nge_modldrv = {
1900 	&mod_driverops,		/* Type of module.  This one is a driver */
1901 	nge_ident,		/* short description */
1902 	&nge_dev_ops		/* driver specific ops */
1903 };
1904 
1905 static struct modlinkage modlinkage = {
1906 	MODREV_1, (void *)&nge_modldrv, NULL
1907 };
1908 
1909 
1910 int
1911 _info(struct modinfo *modinfop)
1912 {
1913 	return (mod_info(&modlinkage, modinfop));
1914 }
1915 
1916 int
1917 _init(void)
1918 {
1919 	int status;
1920 
1921 	mac_init_ops(&nge_dev_ops, "nge");
1922 	status = mod_install(&modlinkage);
1923 	if (status != DDI_SUCCESS)
1924 		mac_fini_ops(&nge_dev_ops);
1925 	else
1926 		mutex_init(nge_log_mutex, NULL, MUTEX_DRIVER, NULL);
1927 
1928 	return (status);
1929 }
1930 
1931 int
1932 _fini(void)
1933 {
1934 	int status;
1935 
1936 	status = mod_remove(&modlinkage);
1937 	if (status == DDI_SUCCESS) {
1938 		mac_fini_ops(&nge_dev_ops);
1939 		mutex_destroy(nge_log_mutex);
1940 	}
1941 
1942 	return (status);
1943 }
1944 
1945 /*
1946  * ============ Init MSI/Fixed/SoftInterrupt routines ==============
1947  */
1948 
1949 /*
1950  * Register interrupts and initialize each mutex and condition variables
1951  */
1952 
1953 static int
1954 nge_register_intrs_and_init_locks(nge_t *ngep)
1955 {
1956 	int		err;
1957 	int		intr_types;
1958 	uint_t		soft_prip;
1959 	nge_msi_mask	msi_mask;
1960 	nge_msi_map0_vec map0_vec;
1961 	nge_msi_map1_vec map1_vec;
1962 
1963 	/*
1964 	 * Add the softint handlers:
1965 	 *
1966 	 * Both of these handlers are used to avoid restrictions on the
1967 	 * context and/or mutexes required for some operations.  In
1968 	 * particular, the hardware interrupt handler and its subfunctions
1969 	 * can detect a number of conditions that we don't want to handle
1970 	 * in that context or with that set of mutexes held.  So, these
1971 	 * softints are triggered instead:
1972 	 *
1973 	 * the <resched> softint is triggered if if we have previously
1974 	 * had to refuse to send a packet because of resource shortage
1975 	 * (we've run out of transmit buffers), but the send completion
1976 	 * interrupt handler has now detected that more buffers have
1977 	 * become available.  Its only purpose is to call gld_sched()
1978 	 * to retry the pending transmits (we're not allowed to hold
1979 	 * driver-defined mutexes across gld_sched()).
1980 	 *
1981 	 * the <factotum> is triggered if the h/w interrupt handler
1982 	 * sees the <link state changed> or <error> bits in the status
1983 	 * block.  It's also triggered periodically to poll the link
1984 	 * state, just in case we aren't getting link status change
1985 	 * interrupts ...
1986 	 */
1987 	err = ddi_intr_add_softint(ngep->devinfo, &ngep->resched_hdl,
1988 	    DDI_INTR_SOFTPRI_MIN, nge_reschedule, (caddr_t)ngep);
1989 	if (err != DDI_SUCCESS) {
1990 		nge_problem(ngep,
1991 		    "nge_attach: add nge_reschedule softintr failed");
1992 
1993 		return (DDI_FAILURE);
1994 	}
1995 	ngep->progress |= PROGRESS_RESCHED;
1996 	err = ddi_intr_add_softint(ngep->devinfo, &ngep->factotum_hdl,
1997 	    DDI_INTR_SOFTPRI_MIN, nge_chip_factotum, (caddr_t)ngep);
1998 	if (err != DDI_SUCCESS) {
1999 		nge_problem(ngep,
2000 		    "nge_attach: add nge_chip_factotum softintr failed!");
2001 
2002 		return (DDI_FAILURE);
2003 	}
2004 	if (ddi_intr_get_softint_pri(ngep->factotum_hdl, &soft_prip)
2005 	    != DDI_SUCCESS) {
2006 		nge_problem(ngep, "nge_attach: get softintr priority failed\n");
2007 
2008 		return (DDI_FAILURE);
2009 	}
2010 	ngep->soft_pri = soft_prip;
2011 
2012 	ngep->progress |= PROGRESS_FACTOTUM;
2013 	/* Get supported interrupt types */
2014 	if (ddi_intr_get_supported_types(ngep->devinfo, &intr_types)
2015 	    != DDI_SUCCESS) {
2016 		nge_error(ngep, "ddi_intr_get_supported_types failed\n");
2017 
2018 		return (DDI_FAILURE);
2019 	}
2020 
2021 	NGE_DEBUG(("ddi_intr_get_supported_types() returned: %x",
2022 	    intr_types));
2023 
2024 	if ((intr_types & DDI_INTR_TYPE_MSI) && nge_enable_msi) {
2025 
2026 		/* MSI Configurations for mcp55 chipset */
2027 		if (ngep->chipinfo.device == DEVICE_ID_MCP55_373 ||
2028 		    ngep->chipinfo.device == DEVICE_ID_MCP55_372) {
2029 
2030 
2031 			/* Enable the 8 vectors */
2032 			msi_mask.msi_mask_val =
2033 			    nge_reg_get32(ngep, NGE_MSI_MASK);
2034 			msi_mask.msi_msk_bits.vec0 = NGE_SET;
2035 			msi_mask.msi_msk_bits.vec1 = NGE_SET;
2036 			msi_mask.msi_msk_bits.vec2 = NGE_SET;
2037 			msi_mask.msi_msk_bits.vec3 = NGE_SET;
2038 			msi_mask.msi_msk_bits.vec4 = NGE_SET;
2039 			msi_mask.msi_msk_bits.vec5 = NGE_SET;
2040 			msi_mask.msi_msk_bits.vec6 = NGE_SET;
2041 			msi_mask.msi_msk_bits.vec7 = NGE_SET;
2042 			nge_reg_put32(ngep, NGE_MSI_MASK,
2043 			    msi_mask.msi_mask_val);
2044 
2045 			/*
2046 			 * Remapping the MSI MAP0 and MAP1. MCP55
2047 			 * is default mapping all the interrupt to 0 vector.
2048 			 * Software needs to remapping this.
2049 			 * This mapping is same as CK804.
2050 			 */
2051 			map0_vec.msi_map0_val =
2052 			    nge_reg_get32(ngep, NGE_MSI_MAP0);
2053 			map1_vec.msi_map1_val =
2054 			    nge_reg_get32(ngep, NGE_MSI_MAP1);
2055 			map0_vec.vecs_bits.reint_vec = 0;
2056 			map0_vec.vecs_bits.rcint_vec = 0;
2057 			map0_vec.vecs_bits.miss_vec = 3;
2058 			map0_vec.vecs_bits.teint_vec = 5;
2059 			map0_vec.vecs_bits.tcint_vec = 5;
2060 			map0_vec.vecs_bits.stint_vec = 2;
2061 			map0_vec.vecs_bits.mint_vec = 6;
2062 			map0_vec.vecs_bits.rfint_vec = 0;
2063 			map1_vec.vecs_bits.tfint_vec = 5;
2064 			map1_vec.vecs_bits.feint_vec = 6;
2065 			map1_vec.vecs_bits.resv8_11 = 3;
2066 			map1_vec.vecs_bits.resv12_15 = 1;
2067 			map1_vec.vecs_bits.resv16_19 = 0;
2068 			map1_vec.vecs_bits.resv20_23 = 7;
2069 			map1_vec.vecs_bits.resv24_31 = 0xff;
2070 			nge_reg_put32(ngep, NGE_MSI_MAP0,
2071 			    map0_vec.msi_map0_val);
2072 			nge_reg_put32(ngep, NGE_MSI_MAP1,
2073 			    map1_vec.msi_map1_val);
2074 		}
2075 		if (nge_add_intrs(ngep, DDI_INTR_TYPE_MSI) != DDI_SUCCESS) {
2076 			NGE_DEBUG(("MSI registration failed, "
2077 			    "trying FIXED interrupt type\n"));
2078 		} else {
2079 			nge_log(ngep, "Using MSI interrupt type\n");
2080 
2081 			ngep->intr_type = DDI_INTR_TYPE_MSI;
2082 			ngep->progress |= PROGRESS_SWINT;
2083 		}
2084 	}
2085 
2086 	if (!(ngep->progress & PROGRESS_SWINT) &&
2087 	    (intr_types & DDI_INTR_TYPE_FIXED)) {
2088 		if (nge_add_intrs(ngep, DDI_INTR_TYPE_FIXED) != DDI_SUCCESS) {
2089 			nge_error(ngep, "FIXED interrupt "
2090 			    "registration failed\n");
2091 
2092 			return (DDI_FAILURE);
2093 		}
2094 
2095 		nge_log(ngep, "Using FIXED interrupt type\n");
2096 
2097 		ngep->intr_type = DDI_INTR_TYPE_FIXED;
2098 		ngep->progress |= PROGRESS_SWINT;
2099 	}
2100 
2101 
2102 	if (!(ngep->progress & PROGRESS_SWINT)) {
2103 		nge_error(ngep, "No interrupts registered\n");
2104 
2105 		return (DDI_FAILURE);
2106 	}
2107 	mutex_init(ngep->genlock, NULL, MUTEX_DRIVER,
2108 	    DDI_INTR_PRI(ngep->intr_pri));
2109 	mutex_init(ngep->softlock, NULL, MUTEX_DRIVER,
2110 	    DDI_INTR_PRI(ngep->soft_pri));
2111 	rw_init(ngep->rwlock, NULL, RW_DRIVER,
2112 	    DDI_INTR_PRI(ngep->intr_pri));
2113 
2114 	return (DDI_SUCCESS);
2115 }
2116 
2117 /*
2118  * nge_add_intrs:
2119  *
2120  * Register FIXED or MSI interrupts.
2121  */
2122 static int
2123 nge_add_intrs(nge_t *ngep, int	intr_type)
2124 {
2125 	dev_info_t	*dip = ngep->devinfo;
2126 	int		avail, actual, intr_size, count = 0;
2127 	int		i, flag, ret;
2128 
2129 	NGE_DEBUG(("nge_add_intrs: interrupt type 0x%x\n", intr_type));
2130 
2131 	/* Get number of interrupts */
2132 	ret = ddi_intr_get_nintrs(dip, intr_type, &count);
2133 	if ((ret != DDI_SUCCESS) || (count == 0)) {
2134 		nge_error(ngep, "ddi_intr_get_nintrs() failure, ret: %d, "
2135 		    "count: %d", ret, count);
2136 
2137 		return (DDI_FAILURE);
2138 	}
2139 
2140 	/* Get number of available interrupts */
2141 	ret = ddi_intr_get_navail(dip, intr_type, &avail);
2142 	if ((ret != DDI_SUCCESS) || (avail == 0)) {
2143 		nge_error(ngep, "ddi_intr_get_navail() failure, "
2144 		    "ret: %d, avail: %d\n", ret, avail);
2145 
2146 		return (DDI_FAILURE);
2147 	}
2148 
2149 	if (avail < count) {
2150 		NGE_DEBUG(("nitrs() returned %d, navail returned %d\n",
2151 		    count, avail));
2152 	}
2153 	flag = DDI_INTR_ALLOC_NORMAL;
2154 
2155 	/* Allocate an array of interrupt handles */
2156 	intr_size = count * sizeof (ddi_intr_handle_t);
2157 	ngep->htable = kmem_alloc(intr_size, KM_SLEEP);
2158 
2159 	/* Call ddi_intr_alloc() */
2160 	ret = ddi_intr_alloc(dip, ngep->htable, intr_type, 0,
2161 	    count, &actual, flag);
2162 
2163 	if ((ret != DDI_SUCCESS) || (actual == 0)) {
2164 		nge_error(ngep, "ddi_intr_alloc() failed %d\n", ret);
2165 
2166 		kmem_free(ngep->htable, intr_size);
2167 		return (DDI_FAILURE);
2168 	}
2169 
2170 	if (actual < count) {
2171 		NGE_DEBUG(("Requested: %d, Received: %d\n",
2172 		    count, actual));
2173 	}
2174 
2175 	ngep->intr_actual_cnt = actual;
2176 	ngep->intr_req_cnt = count;
2177 
2178 	/*
2179 	 * Get priority for first msi, assume remaining are all the same
2180 	 */
2181 	if ((ret = ddi_intr_get_pri(ngep->htable[0], &ngep->intr_pri)) !=
2182 	    DDI_SUCCESS) {
2183 		nge_error(ngep, "ddi_intr_get_pri() failed %d\n", ret);
2184 
2185 		/* Free already allocated intr */
2186 		for (i = 0; i < actual; i++) {
2187 			(void) ddi_intr_free(ngep->htable[i]);
2188 		}
2189 
2190 		kmem_free(ngep->htable, intr_size);
2191 
2192 		return (DDI_FAILURE);
2193 	}
2194 	/* Test for high level mutex */
2195 	if (ngep->intr_pri >= ddi_intr_get_hilevel_pri()) {
2196 		nge_error(ngep, "nge_add_intrs:"
2197 		    "Hi level interrupt not supported");
2198 
2199 		for (i = 0; i < actual; i++)
2200 			(void) ddi_intr_free(ngep->htable[i]);
2201 
2202 		kmem_free(ngep->htable, intr_size);
2203 
2204 		return (DDI_FAILURE);
2205 	}
2206 
2207 
2208 	/* Call ddi_intr_add_handler() */
2209 	for (i = 0; i < actual; i++) {
2210 		if ((ret = ddi_intr_add_handler(ngep->htable[i], nge_chip_intr,
2211 		    (caddr_t)ngep, (caddr_t)(uintptr_t)i)) != DDI_SUCCESS) {
2212 			nge_error(ngep, "ddi_intr_add_handler() "
2213 			    "failed %d\n", ret);
2214 
2215 			/* Free already allocated intr */
2216 			for (i = 0; i < actual; i++) {
2217 				(void) ddi_intr_free(ngep->htable[i]);
2218 			}
2219 
2220 			kmem_free(ngep->htable, intr_size);
2221 
2222 			return (DDI_FAILURE);
2223 		}
2224 	}
2225 
2226 	if ((ret = ddi_intr_get_cap(ngep->htable[0], &ngep->intr_cap))
2227 	    != DDI_SUCCESS) {
2228 		nge_error(ngep, "ddi_intr_get_cap() failed %d\n", ret);
2229 
2230 		for (i = 0; i < actual; i++) {
2231 			(void) ddi_intr_remove_handler(ngep->htable[i]);
2232 			(void) ddi_intr_free(ngep->htable[i]);
2233 		}
2234 
2235 		kmem_free(ngep->htable, intr_size);
2236 
2237 		return (DDI_FAILURE);
2238 	}
2239 
2240 	return (DDI_SUCCESS);
2241 }
2242 
2243 /*
2244  * nge_rem_intrs:
2245  *
2246  * Unregister FIXED or MSI interrupts
2247  */
2248 static void
2249 nge_rem_intrs(nge_t *ngep)
2250 {
2251 	int	i;
2252 
2253 	NGE_DEBUG(("nge_rem_intrs\n"));
2254 
2255 	/* Disable all interrupts */
2256 	if (ngep->intr_cap & DDI_INTR_FLAG_BLOCK) {
2257 		/* Call ddi_intr_block_disable() */
2258 		(void) ddi_intr_block_disable(ngep->htable,
2259 		    ngep->intr_actual_cnt);
2260 	} else {
2261 		for (i = 0; i < ngep->intr_actual_cnt; i++) {
2262 			(void) ddi_intr_disable(ngep->htable[i]);
2263 		}
2264 	}
2265 
2266 	/* Call ddi_intr_remove_handler() */
2267 	for (i = 0; i < ngep->intr_actual_cnt; i++) {
2268 		(void) ddi_intr_remove_handler(ngep->htable[i]);
2269 		(void) ddi_intr_free(ngep->htable[i]);
2270 	}
2271 
2272 	kmem_free(ngep->htable,
2273 	    ngep->intr_req_cnt * sizeof (ddi_intr_handle_t));
2274 }
2275