xref: /titanic_44/usr/src/uts/common/io/nge/nge_main.c (revision 2a9459bdd821c1cf59590a7a9069ac9c591e8a6b)
1 /*
2  * Copyright 2007 Sun Microsystems, Inc.  All rights reserved.
3  * Use is subject to license terms.
4  */
5 
6 /*
7  * This file may contain confidential information of Nvidia
8  * and should not be distributed in source form without approval
9  * from Sun Legal.
10  */
11 
12 #pragma ident	"%Z%%M%	%I%	%E% SMI"
13 
14 #include "nge.h"
15 
16 /*
17  * Describes the chip's DMA engine
18  */
19 
20 static ddi_dma_attr_t hot_dma_attr = {
21 	DMA_ATTR_V0,			/* dma_attr version	*/
22 	0x0000000000000000ull,		/* dma_attr_addr_lo	*/
23 	0x000000FFFFFFFFFFull,		/* dma_attr_addr_hi	*/
24 	0x000000007FFFFFFFull,		/* dma_attr_count_max	*/
25 	0x0000000000000010ull,		/* dma_attr_align	*/
26 	0x00000FFF,			/* dma_attr_burstsizes	*/
27 	0x00000001,			/* dma_attr_minxfer	*/
28 	0x000000000000FFFFull,		/* dma_attr_maxxfer	*/
29 	0x000000FFFFFFFFFFull,		/* dma_attr_seg		*/
30 	1,				/* dma_attr_sgllen 	*/
31 	0x00000001,			/* dma_attr_granular 	*/
32 	0
33 };
34 
35 static ddi_dma_attr_t hot_tx_dma_attr = {
36 	DMA_ATTR_V0,			/* dma_attr version	*/
37 	0x0000000000000000ull,		/* dma_attr_addr_lo	*/
38 	0x000000FFFFFFFFFFull,		/* dma_attr_addr_hi	*/
39 	0x0000000000003FFFull,		/* dma_attr_count_max	*/
40 	0x0000000000000010ull,		/* dma_attr_align	*/
41 	0x00000FFF,			/* dma_attr_burstsizes	*/
42 	0x00000001,			/* dma_attr_minxfer	*/
43 	0x0000000000003FFFull,		/* dma_attr_maxxfer	*/
44 	0x000000FFFFFFFFFFull,		/* dma_attr_seg		*/
45 	NGE_MAX_COOKIES,		/* dma_attr_sgllen 	*/
46 	1,				/* dma_attr_granular 	*/
47 	0
48 };
49 
50 static ddi_dma_attr_t sum_dma_attr = {
51 	DMA_ATTR_V0,			/* dma_attr version	*/
52 	0x0000000000000000ull,		/* dma_attr_addr_lo	*/
53 	0x00000000FFFFFFFFull,		/* dma_attr_addr_hi	*/
54 	0x000000007FFFFFFFull,		/* dma_attr_count_max	*/
55 	0x0000000000000010ull,		/* dma_attr_align	*/
56 	0x00000FFF,			/* dma_attr_burstsizes	*/
57 	0x00000001,			/* dma_attr_minxfer	*/
58 	0x000000000000FFFFull,		/* dma_attr_maxxfer	*/
59 	0x00000000FFFFFFFFull,		/* dma_attr_seg		*/
60 	1,				/* dma_attr_sgllen 	*/
61 	0x00000001,			/* dma_attr_granular 	*/
62 	0
63 };
64 
65 static ddi_dma_attr_t sum_tx_dma_attr = {
66 	DMA_ATTR_V0,			/* dma_attr version	*/
67 	0x0000000000000000ull,		/* dma_attr_addr_lo	*/
68 	0x00000000FFFFFFFFull,		/* dma_attr_addr_hi	*/
69 	0x0000000000003FFFull,		/* dma_attr_count_max	*/
70 	0x0000000000000010ull,		/* dma_attr_align	*/
71 	0x00000FFF,			/* dma_attr_burstsizes	*/
72 	0x00000001,			/* dma_attr_minxfer	*/
73 	0x0000000000003FFFull,		/* dma_attr_maxxfer	*/
74 	0x00000000FFFFFFFFull,		/* dma_attr_seg		*/
75 	NGE_MAX_COOKIES,		/* dma_attr_sgllen 	*/
76 	1,				/* dma_attr_granular 	*/
77 	0
78 };
79 
80 /*
81  * DMA access attributes for data.
82  */
83 ddi_device_acc_attr_t nge_data_accattr = {
84 	DDI_DEVICE_ATTR_V0,
85 	DDI_STRUCTURE_LE_ACC,
86 	DDI_STRICTORDER_ACC,
87 	DDI_DEFAULT_ACC
88 };
89 
90 /*
91  * DMA access attributes for descriptors.
92  */
93 static ddi_device_acc_attr_t nge_desc_accattr = {
94 	DDI_DEVICE_ATTR_V0,
95 	DDI_STRUCTURE_LE_ACC,
96 	DDI_STRICTORDER_ACC,
97 	DDI_DEFAULT_ACC
98 };
99 
100 /*
101  * PIO access attributes for registers
102  */
103 static ddi_device_acc_attr_t nge_reg_accattr = {
104 	DDI_DEVICE_ATTR_V0,
105 	DDI_STRUCTURE_LE_ACC,
106 	DDI_STRICTORDER_ACC,
107 	DDI_DEFAULT_ACC
108 };
109 
110 /*
111  * NIC DESC MODE 2
112  */
113 
114 static const nge_desc_attr_t nge_sum_desc = {
115 
116 	sizeof (sum_rx_bd),
117 	sizeof (sum_tx_bd),
118 	&sum_dma_attr,
119 	&sum_tx_dma_attr,
120 	nge_sum_rxd_fill,
121 	nge_sum_rxd_check,
122 	nge_sum_txd_fill,
123 	nge_sum_txd_check,
124 };
125 
126 /*
127  * NIC DESC MODE 3
128  */
129 
130 static const nge_desc_attr_t nge_hot_desc = {
131 
132 	sizeof (hot_rx_bd),
133 	sizeof (hot_tx_bd),
134 	&hot_dma_attr,
135 	&hot_tx_dma_attr,
136 	nge_hot_rxd_fill,
137 	nge_hot_rxd_check,
138 	nge_hot_txd_fill,
139 	nge_hot_txd_check,
140 };
141 
142 static char nge_ident[] = "nVidia 1Gb Ethernet %I%";
143 static char clsize_propname[] = "cache-line-size";
144 static char latency_propname[] = "latency-timer";
145 static char debug_propname[]	= "nge-debug-flags";
146 static char rx_data_hw[] = "rx-data-hw";
147 static char rx_prd_lw[] = "rx-prd-lw";
148 static char rx_prd_hw[] = "rx-prd-hw";
149 static char sw_intr_intv[] = "sw-intr-intvl";
150 static char nge_desc_mode[] = "desc-mode";
151 static char default_mtu[] = "default_mtu";
152 static char low_memory_mode[] = "minimal-memory-usage";
153 extern kmutex_t nge_log_mutex[1];
154 
155 static int		nge_m_start(void *);
156 static void		nge_m_stop(void *);
157 static int		nge_m_promisc(void *, boolean_t);
158 static int		nge_m_multicst(void *, boolean_t, const uint8_t *);
159 static int		nge_m_unicst(void *, const uint8_t *);
160 static void		nge_m_resources(void *);
161 static void		nge_m_ioctl(void *, queue_t *, mblk_t *);
162 static boolean_t	nge_m_getcapab(void *, mac_capab_t, void *);
163 
164 #define		NGE_M_CALLBACK_FLAGS	(MC_RESOURCES | MC_IOCTL | MC_GETCAPAB)
165 
166 static mac_callbacks_t nge_m_callbacks = {
167 	NGE_M_CALLBACK_FLAGS,
168 	nge_m_stat,
169 	nge_m_start,
170 	nge_m_stop,
171 	nge_m_promisc,
172 	nge_m_multicst,
173 	nge_m_unicst,
174 	nge_m_tx,
175 	nge_m_resources,
176 	nge_m_ioctl,
177 	nge_m_getcapab
178 };
179 
180 static int nge_add_intrs(nge_t *, int);
181 static void nge_rem_intrs(nge_t *);
182 static int nge_register_intrs_and_init_locks(nge_t *);
183 
184 /*
185  * NGE MSI tunable:
186  */
187 boolean_t nge_enable_msi = B_FALSE;
188 
189 static enum ioc_reply
190 nge_set_loop_mode(nge_t *ngep, uint32_t mode)
191 {
192 	/*
193 	 * If the mode isn't being changed, there's nothing to do ...
194 	 */
195 	if (mode == ngep->param_loop_mode)
196 		return (IOC_ACK);
197 
198 	/*
199 	 * Validate the requested mode and prepare a suitable message
200 	 * to explain the link down/up cycle that the change will
201 	 * probably induce ...
202 	 */
203 	switch (mode) {
204 	default:
205 		return (IOC_INVAL);
206 
207 	case NGE_LOOP_NONE:
208 	case NGE_LOOP_EXTERNAL_100:
209 	case NGE_LOOP_EXTERNAL_10:
210 	case NGE_LOOP_INTERNAL_PHY:
211 		break;
212 	}
213 
214 	/*
215 	 * All OK; tell the caller to reprogram
216 	 * the PHY and/or MAC for the new mode ...
217 	 */
218 	ngep->param_loop_mode = mode;
219 	return (IOC_RESTART_ACK);
220 }
221 
222 #undef	NGE_DBG
223 #define	NGE_DBG		NGE_DBG_INIT
224 
225 /*
226  * Utility routine to carve a slice off a chunk of allocated memory,
227  * updating the chunk descriptor accordingly.  The size of the slice
228  * is given by the product of the <qty> and <size> parameters.
229  */
230 void
231 nge_slice_chunk(dma_area_t *slice, dma_area_t *chunk,
232     uint32_t qty, uint32_t size)
233 {
234 	size_t totsize;
235 
236 	totsize = qty*size;
237 	ASSERT(size > 0);
238 	ASSERT(totsize <= chunk->alength);
239 
240 	*slice = *chunk;
241 	slice->nslots = qty;
242 	slice->size = size;
243 	slice->alength = totsize;
244 
245 	chunk->mem_va = (caddr_t)chunk->mem_va + totsize;
246 	chunk->alength -= totsize;
247 	chunk->offset += totsize;
248 	chunk->cookie.dmac_laddress += totsize;
249 	chunk->cookie.dmac_size -= totsize;
250 }
251 
252 /*
253  * Allocate an area of memory and a DMA handle for accessing it
254  */
255 int
256 nge_alloc_dma_mem(nge_t *ngep, size_t memsize, ddi_device_acc_attr_t *attr_p,
257     uint_t dma_flags, dma_area_t *dma_p)
258 {
259 	int err;
260 	caddr_t va;
261 
262 	NGE_TRACE(("nge_alloc_dma_mem($%p, %ld, $%p, 0x%x, $%p)",
263 	    (void *)ngep, memsize, attr_p, dma_flags, dma_p));
264 	/*
265 	 * Allocate handle
266 	 */
267 	err = ddi_dma_alloc_handle(ngep->devinfo, ngep->desc_attr.dma_attr,
268 	    DDI_DMA_DONTWAIT, NULL, &dma_p->dma_hdl);
269 	if (err != DDI_SUCCESS)
270 		goto fail;
271 
272 	/*
273 	 * Allocate memory
274 	 */
275 	err = ddi_dma_mem_alloc(dma_p->dma_hdl, memsize, attr_p,
276 	    dma_flags & (DDI_DMA_CONSISTENT | DDI_DMA_STREAMING),
277 	    DDI_DMA_DONTWAIT, NULL, &va, &dma_p->alength, &dma_p->acc_hdl);
278 	if (err != DDI_SUCCESS)
279 		goto fail;
280 
281 	/*
282 	 * Bind the two together
283 	 */
284 	dma_p->mem_va = va;
285 	err = ddi_dma_addr_bind_handle(dma_p->dma_hdl, NULL,
286 	    va, dma_p->alength, dma_flags, DDI_DMA_DONTWAIT, NULL,
287 	    &dma_p->cookie, &dma_p->ncookies);
288 
289 	if (err != DDI_DMA_MAPPED || dma_p->ncookies != 1)
290 		goto fail;
291 
292 	dma_p->nslots = ~0U;
293 	dma_p->size = ~0U;
294 	dma_p->offset = 0;
295 
296 	return (DDI_SUCCESS);
297 
298 fail:
299 	nge_free_dma_mem(dma_p);
300 	NGE_DEBUG(("nge_alloc_dma_mem: fail to alloc dma memory!"));
301 
302 	return (DDI_FAILURE);
303 }
304 
305 /*
306  * Free one allocated area of DMAable memory
307  */
308 void
309 nge_free_dma_mem(dma_area_t *dma_p)
310 {
311 	if (dma_p->dma_hdl != NULL) {
312 		if (dma_p->ncookies) {
313 			(void) ddi_dma_unbind_handle(dma_p->dma_hdl);
314 			dma_p->ncookies = 0;
315 		}
316 	}
317 	if (dma_p->acc_hdl != NULL) {
318 		ddi_dma_mem_free(&dma_p->acc_hdl);
319 		dma_p->acc_hdl = NULL;
320 	}
321 	if (dma_p->dma_hdl != NULL) {
322 		ddi_dma_free_handle(&dma_p->dma_hdl);
323 		dma_p->dma_hdl = NULL;
324 	}
325 }
326 
327 #define	ALLOC_TX_BUF	0x1
328 #define	ALLOC_TX_DESC	0x2
329 #define	ALLOC_RX_DESC	0x4
330 
331 int
332 nge_alloc_bufs(nge_t *ngep)
333 {
334 	int err;
335 	int split;
336 	int progress;
337 	size_t txbuffsize;
338 	size_t rxdescsize;
339 	size_t txdescsize;
340 
341 	txbuffsize = ngep->tx_desc * ngep->buf_size;
342 	rxdescsize = ngep->rx_desc;
343 	txdescsize = ngep->tx_desc;
344 	rxdescsize *= ngep->desc_attr.rxd_size;
345 	txdescsize *= ngep->desc_attr.txd_size;
346 	progress = 0;
347 
348 	NGE_TRACE(("nge_alloc_bufs($%p)", (void *)ngep));
349 	/*
350 	 * Allocate memory & handles for TX buffers
351 	 */
352 	ASSERT((txbuffsize % ngep->nge_split) == 0);
353 	for (split = 0; split < ngep->nge_split; ++split) {
354 		err = nge_alloc_dma_mem(ngep, txbuffsize/ngep->nge_split,
355 		    &nge_data_accattr, DDI_DMA_WRITE | NGE_DMA_MODE,
356 		    &ngep->send->buf[split]);
357 		if (err != DDI_SUCCESS)
358 			goto fail;
359 	}
360 
361 	progress |= ALLOC_TX_BUF;
362 
363 	/*
364 	 * Allocate memory & handles for receive return rings and
365 	 * buffer (producer) descriptor rings
366 	 */
367 	err = nge_alloc_dma_mem(ngep, rxdescsize, &nge_desc_accattr,
368 	    DDI_DMA_RDWR | DDI_DMA_CONSISTENT, &ngep->recv->desc);
369 	if (err != DDI_SUCCESS)
370 		goto fail;
371 	progress |= ALLOC_RX_DESC;
372 
373 	/*
374 	 * Allocate memory & handles for TX descriptor rings,
375 	 */
376 	err = nge_alloc_dma_mem(ngep, txdescsize, &nge_desc_accattr,
377 	    DDI_DMA_RDWR | DDI_DMA_CONSISTENT, &ngep->send->desc);
378 	if (err != DDI_SUCCESS)
379 		goto fail;
380 	return (DDI_SUCCESS);
381 
382 fail:
383 	if (progress & ALLOC_RX_DESC)
384 		nge_free_dma_mem(&ngep->recv->desc);
385 	if (progress & ALLOC_TX_BUF) {
386 		for (split = 0; split < ngep->nge_split; ++split)
387 			nge_free_dma_mem(&ngep->send->buf[split]);
388 	}
389 
390 	return (DDI_FAILURE);
391 }
392 
393 /*
394  * This routine frees the transmit and receive buffers and descriptors.
395  * Make sure the chip is stopped before calling it!
396  */
397 void
398 nge_free_bufs(nge_t *ngep)
399 {
400 	int split;
401 
402 	NGE_TRACE(("nge_free_bufs($%p)", (void *)ngep));
403 
404 	nge_free_dma_mem(&ngep->recv->desc);
405 	nge_free_dma_mem(&ngep->send->desc);
406 
407 	for (split = 0; split < ngep->nge_split; ++split)
408 		nge_free_dma_mem(&ngep->send->buf[split]);
409 }
410 
411 /*
412  * Clean up initialisation done above before the memory is freed
413  */
414 static void
415 nge_fini_send_ring(nge_t *ngep)
416 {
417 	uint32_t slot;
418 	size_t dmah_num;
419 	send_ring_t *srp;
420 	sw_tx_sbd_t *ssbdp;
421 
422 	srp = ngep->send;
423 	ssbdp = srp->sw_sbds;
424 
425 	NGE_TRACE(("nge_fini_send_ring($%p)", (void *)ngep));
426 
427 	dmah_num = sizeof (srp->dmahndl) / sizeof (srp->dmahndl[0]);
428 
429 	for (slot = 0; slot < dmah_num; ++slot) {
430 		if (srp->dmahndl[slot].hndl) {
431 			(void) ddi_dma_unbind_handle(srp->dmahndl[slot].hndl);
432 			ddi_dma_free_handle(&srp->dmahndl[slot].hndl);
433 			srp->dmahndl[slot].hndl = NULL;
434 			srp->dmahndl[slot].next = NULL;
435 		}
436 	}
437 
438 	srp->dmah_free.head = NULL;
439 	srp->dmah_free.tail = NULL;
440 
441 	kmem_free(ssbdp, srp->desc.nslots*sizeof (*ssbdp));
442 
443 }
444 
445 /*
446  * Initialise the specified Send Ring, using the information in the
447  * <dma_area> descriptors that it contains to set up all the other
448  * fields. This routine should be called only once for each ring.
449  */
450 static int
451 nge_init_send_ring(nge_t *ngep)
452 {
453 	size_t dmah_num;
454 	uint32_t nslots;
455 	uint32_t err;
456 	uint32_t slot;
457 	uint32_t split;
458 	send_ring_t *srp;
459 	sw_tx_sbd_t *ssbdp;
460 	dma_area_t desc;
461 	dma_area_t pbuf;
462 
463 	srp = ngep->send;
464 	srp->desc.nslots = ngep->tx_desc;
465 	nslots = srp->desc.nslots;
466 
467 	NGE_TRACE(("nge_init_send_ring($%p)", (void *)ngep));
468 	/*
469 	 * Other one-off initialisation of per-ring data
470 	 */
471 	srp->ngep = ngep;
472 
473 	/*
474 	 * Allocate the array of s/w Send Buffer Descriptors
475 	 */
476 	ssbdp = kmem_zalloc(nslots*sizeof (*ssbdp), KM_SLEEP);
477 	srp->sw_sbds = ssbdp;
478 
479 	/*
480 	 * Now initialise each array element once and for all
481 	 */
482 	desc = srp->desc;
483 	for (split = 0; split < ngep->nge_split; ++split) {
484 		pbuf = srp->buf[split];
485 		for (slot = 0; slot < nslots/ngep->nge_split; ++ssbdp, ++slot) {
486 			nge_slice_chunk(&ssbdp->desc, &desc, 1,
487 			    ngep->desc_attr.txd_size);
488 			nge_slice_chunk(&ssbdp->pbuf, &pbuf, 1,
489 			    ngep->buf_size);
490 		}
491 		ASSERT(pbuf.alength == 0);
492 	}
493 	ASSERT(desc.alength == 0);
494 
495 	dmah_num = sizeof (srp->dmahndl) / sizeof (srp->dmahndl[0]);
496 
497 	/* preallocate dma handles for tx buffer */
498 	for (slot = 0; slot < dmah_num; ++slot) {
499 
500 		err = ddi_dma_alloc_handle(ngep->devinfo,
501 		    ngep->desc_attr.tx_dma_attr, DDI_DMA_DONTWAIT,
502 		    NULL, &srp->dmahndl[slot].hndl);
503 
504 		if (err != DDI_SUCCESS) {
505 			nge_fini_send_ring(ngep);
506 			nge_error(ngep,
507 			    "nge_init_send_ring: alloc dma handle fails");
508 			return (DDI_FAILURE);
509 		}
510 		srp->dmahndl[slot].next = srp->dmahndl + slot + 1;
511 	}
512 
513 	srp->dmah_free.head = srp->dmahndl;
514 	srp->dmah_free.tail = srp->dmahndl + dmah_num - 1;
515 	srp->dmah_free.tail->next = NULL;
516 
517 	return (DDI_SUCCESS);
518 }
519 
520 /*
521  * Intialize the tx recycle pointer and tx sending pointer of tx ring
522  * and set the type of tx's data descriptor by default.
523  */
524 static void
525 nge_reinit_send_ring(nge_t *ngep)
526 {
527 	size_t dmah_num;
528 	uint32_t slot;
529 	send_ring_t *srp;
530 	sw_tx_sbd_t *ssbdp;
531 
532 	srp = ngep->send;
533 
534 	/*
535 	 * Reinitialise control variables ...
536 	 */
537 
538 	srp->tx_hwmark = NGE_DESC_MIN;
539 	srp->tx_lwmark = NGE_DESC_MIN;
540 
541 	srp->tx_next = 0;
542 	srp->tx_free = srp->desc.nslots;
543 	srp->tc_next = 0;
544 
545 	dmah_num = sizeof (srp->dmahndl) / sizeof (srp->dmahndl[0]);
546 
547 	for (slot = 0; slot - dmah_num != 0; ++slot)
548 		srp->dmahndl[slot].next = srp->dmahndl + slot + 1;
549 
550 	srp->dmah_free.head = srp->dmahndl;
551 	srp->dmah_free.tail = srp->dmahndl + dmah_num - 1;
552 	srp->dmah_free.tail->next = NULL;
553 
554 	/*
555 	 * Zero and sync all the h/w Send Buffer Descriptors
556 	 */
557 	for (slot = 0; slot < srp->desc.nslots; ++slot) {
558 		ssbdp = &srp->sw_sbds[slot];
559 		ssbdp->flags = HOST_OWN;
560 	}
561 
562 	DMA_ZERO(srp->desc);
563 	DMA_SYNC(srp->desc, DDI_DMA_SYNC_FORDEV);
564 }
565 
566 /*
567  * Initialize the slot number of rx's ring
568  */
569 static void
570 nge_init_recv_ring(nge_t *ngep)
571 {
572 	recv_ring_t *rrp;
573 
574 	rrp = ngep->recv;
575 	rrp->desc.nslots = ngep->rx_desc;
576 	rrp->ngep = ngep;
577 }
578 
579 /*
580  * Intialize the rx recycle pointer and rx sending pointer of rx ring
581  */
582 static void
583 nge_reinit_recv_ring(nge_t *ngep)
584 {
585 	recv_ring_t *rrp;
586 
587 	rrp = ngep->recv;
588 
589 	/*
590 	 * Reinitialise control variables ...
591 	 */
592 	rrp->prod_index = 0;
593 	/*
594 	 * Zero and sync all the h/w Send Buffer Descriptors
595 	 */
596 	DMA_ZERO(rrp->desc);
597 	DMA_SYNC(rrp->desc, DDI_DMA_SYNC_FORDEV);
598 }
599 
600 /*
601  * Clean up initialisation done above before the memory is freed
602  */
603 static void
604 nge_fini_buff_ring(nge_t *ngep)
605 {
606 	uint32_t i;
607 	buff_ring_t *brp;
608 	dma_area_t *bufp;
609 	sw_rx_sbd_t *bsbdp;
610 
611 	brp = ngep->buff;
612 	bsbdp = brp->sw_rbds;
613 
614 	NGE_DEBUG(("nge_fini_buff_ring($%p)", (void *)ngep));
615 
616 	mutex_enter(brp->recycle_lock);
617 	brp->buf_sign++;
618 	mutex_exit(brp->recycle_lock);
619 	for (i = 0; i < ngep->rx_desc; i++, ++bsbdp) {
620 		if (bsbdp->bufp) {
621 			if (bsbdp->bufp->mp)
622 				freemsg(bsbdp->bufp->mp);
623 			nge_free_dma_mem(bsbdp->bufp);
624 			kmem_free(bsbdp->bufp, sizeof (dma_area_t));
625 			bsbdp->bufp = NULL;
626 		}
627 	}
628 	while (brp->free_list != NULL) {
629 		bufp = brp->free_list;
630 		brp->free_list = bufp->next;
631 		bufp->next = NULL;
632 		if (bufp->mp)
633 			freemsg(bufp->mp);
634 		nge_free_dma_mem(bufp);
635 		kmem_free(bufp, sizeof (dma_area_t));
636 	}
637 	while (brp->recycle_list != NULL) {
638 		bufp = brp->recycle_list;
639 		brp->recycle_list = bufp->next;
640 		bufp->next = NULL;
641 		if (bufp->mp)
642 			freemsg(bufp->mp);
643 		nge_free_dma_mem(bufp);
644 		kmem_free(bufp, sizeof (dma_area_t));
645 	}
646 
647 
648 	kmem_free(brp->sw_rbds, (ngep->rx_desc * sizeof (*bsbdp)));
649 	brp->sw_rbds = NULL;
650 }
651 
652 /*
653  * Intialize the Rx's data ring and free ring
654  */
655 static int
656 nge_init_buff_ring(nge_t *ngep)
657 {
658 	uint32_t err;
659 	uint32_t slot;
660 	uint32_t nslots_buff;
661 	uint32_t nslots_recv;
662 	buff_ring_t *brp;
663 	recv_ring_t *rrp;
664 	dma_area_t desc;
665 	dma_area_t *bufp;
666 	sw_rx_sbd_t *bsbdp;
667 
668 	rrp = ngep->recv;
669 	brp = ngep->buff;
670 	brp->nslots = ngep->rx_buf;
671 	brp->rx_bcopy = B_FALSE;
672 	nslots_recv = rrp->desc.nslots;
673 	nslots_buff = brp->nslots;
674 	brp->ngep = ngep;
675 
676 	NGE_TRACE(("nge_init_buff_ring($%p)", (void *)ngep));
677 
678 	/*
679 	 * Allocate the array of s/w Recv Buffer Descriptors
680 	 */
681 	bsbdp = kmem_zalloc(nslots_recv *sizeof (*bsbdp), KM_SLEEP);
682 	brp->sw_rbds = bsbdp;
683 	brp->free_list = NULL;
684 	brp->recycle_list = NULL;
685 	for (slot = 0; slot < nslots_buff; ++slot) {
686 		bufp = kmem_zalloc(sizeof (dma_area_t), KM_SLEEP);
687 		err = nge_alloc_dma_mem(ngep, (ngep->buf_size
688 		    + NGE_HEADROOM),
689 		    &nge_data_accattr, DDI_DMA_READ | NGE_DMA_MODE, bufp);
690 		if (err != DDI_SUCCESS) {
691 			kmem_free(bufp, sizeof (dma_area_t));
692 			return (DDI_FAILURE);
693 		}
694 
695 		bufp->alength -= NGE_HEADROOM;
696 		bufp->offset += NGE_HEADROOM;
697 		bufp->private = (caddr_t)ngep;
698 		bufp->rx_recycle.free_func = nge_recv_recycle;
699 		bufp->rx_recycle.free_arg = (caddr_t)bufp;
700 		bufp->signature = brp->buf_sign;
701 		bufp->rx_delivered = B_FALSE;
702 		bufp->mp = desballoc(DMA_VPTR(*bufp),
703 		    ngep->buf_size + NGE_HEADROOM,
704 		    0, &bufp->rx_recycle);
705 
706 		if (bufp->mp == NULL) {
707 			return (DDI_FAILURE);
708 		}
709 		bufp->next = brp->free_list;
710 		brp->free_list = bufp;
711 	}
712 
713 	/*
714 	 * Now initialise each array element once and for all
715 	 */
716 	desc = rrp->desc;
717 	for (slot = 0; slot < nslots_recv; ++slot, ++bsbdp) {
718 		nge_slice_chunk(&bsbdp->desc, &desc, 1,
719 		    ngep->desc_attr.rxd_size);
720 		bufp = brp->free_list;
721 		brp->free_list = bufp->next;
722 		bsbdp->bufp = bufp;
723 		bsbdp->flags = CONTROLER_OWN;
724 		bufp->next = NULL;
725 	}
726 
727 	ASSERT(desc.alength == 0);
728 	return (DDI_SUCCESS);
729 }
730 
731 /*
732  * Fill the host address of data in rx' descriptor
733  * and initialize free pointers of rx free ring
734  */
735 static int
736 nge_reinit_buff_ring(nge_t *ngep)
737 {
738 	uint32_t slot;
739 	uint32_t nslots_recv;
740 	buff_ring_t *brp;
741 	recv_ring_t *rrp;
742 	sw_rx_sbd_t *bsbdp;
743 	void *hw_bd_p;
744 
745 	brp = ngep->buff;
746 	rrp = ngep->recv;
747 	bsbdp = brp->sw_rbds;
748 	nslots_recv = rrp->desc.nslots;
749 	for (slot = 0; slot < nslots_recv; ++bsbdp, ++slot) {
750 		hw_bd_p = DMA_VPTR(bsbdp->desc);
751 	/*
752 	 * There is a scenario: When the traffic of small tcp
753 	 * packet is heavy, suspending the tcp traffic will
754 	 * cause the preallocated buffers for rx not to be
755 	 * released in time by tcp taffic and cause rx's buffer
756 	 * pointers not to be refilled in time.
757 	 *
758 	 * At this point, if we reinitialize the driver, the bufp
759 	 * pointer for rx's traffic will be NULL.
760 	 * So the result of the reinitializion fails.
761 	 */
762 		if (bsbdp->bufp == NULL)
763 			return (DDI_FAILURE);
764 
765 		ngep->desc_attr.rxd_fill(hw_bd_p, &bsbdp->bufp->cookie,
766 		    bsbdp->bufp->alength);
767 	}
768 	return (DDI_SUCCESS);
769 }
770 
771 static void
772 nge_init_ring_param_lock(nge_t *ngep)
773 {
774 	buff_ring_t *brp;
775 	send_ring_t *srp;
776 
777 	srp = ngep->send;
778 	brp = ngep->buff;
779 
780 	/* Init the locks for send ring */
781 	mutex_init(srp->tx_lock, NULL, MUTEX_DRIVER,
782 	    DDI_INTR_PRI(ngep->intr_pri));
783 	mutex_init(srp->tc_lock, NULL, MUTEX_DRIVER,
784 	    DDI_INTR_PRI(ngep->intr_pri));
785 	mutex_init(&srp->dmah_lock, NULL, MUTEX_DRIVER,
786 	    DDI_INTR_PRI(ngep->intr_pri));
787 
788 	/* Init parameters of buffer ring */
789 	brp->free_list = NULL;
790 	brp->recycle_list = NULL;
791 	brp->rx_hold = 0;
792 	brp->buf_sign = 0;
793 
794 	/* Init recycle list lock */
795 	mutex_init(brp->recycle_lock, NULL, MUTEX_DRIVER,
796 	    DDI_INTR_PRI(ngep->intr_pri));
797 }
798 
799 int
800 nge_init_rings(nge_t *ngep)
801 {
802 	uint32_t err;
803 
804 	err = nge_init_send_ring(ngep);
805 	if (err != DDI_SUCCESS) {
806 		return (err);
807 	}
808 	nge_init_recv_ring(ngep);
809 
810 	err = nge_init_buff_ring(ngep);
811 	if (err != DDI_SUCCESS) {
812 		nge_fini_send_ring(ngep);
813 		return (DDI_FAILURE);
814 	}
815 
816 	return (err);
817 }
818 
819 static int
820 nge_reinit_ring(nge_t *ngep)
821 {
822 	int err;
823 
824 	nge_reinit_recv_ring(ngep);
825 	nge_reinit_send_ring(ngep);
826 	err = nge_reinit_buff_ring(ngep);
827 	return (err);
828 }
829 
830 
831 void
832 nge_fini_rings(nge_t *ngep)
833 {
834 	/*
835 	 * For receive ring, nothing need to be finished.
836 	 * So only finish buffer ring and send ring here.
837 	 */
838 	nge_fini_buff_ring(ngep);
839 	nge_fini_send_ring(ngep);
840 }
841 
842 /*
843  * Loopback ioctl code
844  */
845 
846 static lb_property_t loopmodes[] = {
847 	{ normal,	"normal",	NGE_LOOP_NONE		},
848 	{ external,	"100Mbps",	NGE_LOOP_EXTERNAL_100	},
849 	{ external,	"10Mbps",	NGE_LOOP_EXTERNAL_10	},
850 	{ internal,	"PHY",		NGE_LOOP_INTERNAL_PHY	},
851 };
852 
853 enum ioc_reply
854 nge_loop_ioctl(nge_t *ngep, mblk_t *mp, struct iocblk *iocp)
855 {
856 	int cmd;
857 	uint32_t *lbmp;
858 	lb_info_sz_t *lbsp;
859 	lb_property_t *lbpp;
860 
861 	/*
862 	 * Validate format of ioctl
863 	 */
864 	if (mp->b_cont == NULL)
865 		return (IOC_INVAL);
866 
867 	cmd = iocp->ioc_cmd;
868 
869 	switch (cmd) {
870 	default:
871 		return (IOC_INVAL);
872 
873 	case LB_GET_INFO_SIZE:
874 		if (iocp->ioc_count != sizeof (lb_info_sz_t))
875 			return (IOC_INVAL);
876 		lbsp = (lb_info_sz_t *)mp->b_cont->b_rptr;
877 		*lbsp = sizeof (loopmodes);
878 		return (IOC_REPLY);
879 
880 	case LB_GET_INFO:
881 		if (iocp->ioc_count != sizeof (loopmodes))
882 			return (IOC_INVAL);
883 		lbpp = (lb_property_t *)mp->b_cont->b_rptr;
884 		bcopy(loopmodes, lbpp, sizeof (loopmodes));
885 		return (IOC_REPLY);
886 
887 	case LB_GET_MODE:
888 		if (iocp->ioc_count != sizeof (uint32_t))
889 			return (IOC_INVAL);
890 		lbmp = (uint32_t *)mp->b_cont->b_rptr;
891 		*lbmp = ngep->param_loop_mode;
892 		return (IOC_REPLY);
893 
894 	case LB_SET_MODE:
895 		if (iocp->ioc_count != sizeof (uint32_t))
896 			return (IOC_INVAL);
897 		lbmp = (uint32_t *)mp->b_cont->b_rptr;
898 		return (nge_set_loop_mode(ngep, *lbmp));
899 	}
900 }
901 
902 #undef	NGE_DBG
903 #define	NGE_DBG	NGE_DBG_NEMO
904 
905 
906 static void
907 nge_check_desc_prop(nge_t *ngep)
908 {
909 	if (ngep->desc_mode != DESC_HOT && ngep->desc_mode != DESC_OFFLOAD)
910 		ngep->desc_mode = DESC_HOT;
911 
912 	if (ngep->desc_mode == DESC_OFFLOAD)	{
913 
914 		ngep->desc_attr = nge_sum_desc;
915 
916 	}	else if (ngep->desc_mode == DESC_HOT)	{
917 
918 		ngep->desc_attr = nge_hot_desc;
919 	}
920 }
921 
922 /*
923  * nge_get_props -- get the parameters to tune the driver
924  */
925 static void
926 nge_get_props(nge_t *ngep)
927 {
928 	chip_info_t *infop;
929 	dev_info_t *devinfo;
930 	nge_dev_spec_param_t *dev_param_p;
931 
932 	devinfo = ngep->devinfo;
933 	infop = (chip_info_t *)&ngep->chipinfo;
934 	dev_param_p = &ngep->dev_spec_param;
935 
936 	infop->clsize = ddi_prop_get_int(DDI_DEV_T_ANY, devinfo,
937 	    DDI_PROP_DONTPASS, clsize_propname, 32);
938 
939 	infop->latency = ddi_prop_get_int(DDI_DEV_T_ANY, devinfo,
940 	    DDI_PROP_DONTPASS, latency_propname, 64);
941 	ngep->rx_datahwm = ddi_prop_get_int(DDI_DEV_T_ANY, devinfo,
942 	    DDI_PROP_DONTPASS, rx_data_hw, 0x20);
943 	ngep->rx_prdlwm = ddi_prop_get_int(DDI_DEV_T_ANY, devinfo,
944 	    DDI_PROP_DONTPASS, rx_prd_lw, 0x4);
945 	ngep->rx_prdhwm = ddi_prop_get_int(DDI_DEV_T_ANY, devinfo,
946 	    DDI_PROP_DONTPASS, rx_prd_hw, 0xc);
947 
948 	ngep->sw_intr_intv = ddi_prop_get_int(DDI_DEV_T_ANY, devinfo,
949 	    DDI_PROP_DONTPASS, sw_intr_intv, SWTR_ITC);
950 	ngep->debug = ddi_prop_get_int(DDI_DEV_T_ANY, devinfo,
951 	    DDI_PROP_DONTPASS, debug_propname, NGE_DBG_CHIP);
952 	ngep->desc_mode = ddi_prop_get_int(DDI_DEV_T_ANY, devinfo,
953 	    DDI_PROP_DONTPASS, nge_desc_mode, dev_param_p->desc_type);
954 	ngep->lowmem_mode = ddi_prop_get_int(DDI_DEV_T_ANY, devinfo,
955 	    DDI_PROP_DONTPASS, low_memory_mode, 0);
956 
957 	if (dev_param_p->jumbo) {
958 		ngep->default_mtu = ddi_prop_get_int(DDI_DEV_T_ANY, devinfo,
959 		    DDI_PROP_DONTPASS, default_mtu, ETHERMTU);
960 	} else
961 		ngep->default_mtu = ETHERMTU;
962 
963 	if (ngep->default_mtu > ETHERMTU &&
964 	    ngep->default_mtu <= NGE_MTU_2500) {
965 		ngep->buf_size = NGE_JB2500_BUFSZ;
966 		ngep->tx_desc = NGE_SEND_JB2500_SLOTS_DESC;
967 		ngep->rx_desc = NGE_RECV_JB2500_SLOTS_DESC;
968 		ngep->rx_buf = NGE_RECV_JB2500_SLOTS_DESC * 2;
969 		ngep->nge_split = NGE_SPLIT_256;
970 	} else if (ngep->default_mtu > NGE_MTU_2500 &&
971 	    ngep->default_mtu <= NGE_MTU_4500) {
972 		ngep->buf_size = NGE_JB4500_BUFSZ;
973 		ngep->tx_desc = NGE_SEND_JB4500_SLOTS_DESC;
974 		ngep->rx_desc = NGE_RECV_JB4500_SLOTS_DESC;
975 		ngep->rx_buf = NGE_RECV_JB4500_SLOTS_DESC * 2;
976 		ngep->nge_split = NGE_SPLIT_256;
977 	} else if (ngep->default_mtu > NGE_MTU_4500 &&
978 	    ngep->default_mtu <= NGE_MAX_MTU) {
979 		ngep->buf_size = NGE_JB9000_BUFSZ;
980 		ngep->tx_desc = NGE_SEND_JB9000_SLOTS_DESC;
981 		ngep->rx_desc = NGE_RECV_JB9000_SLOTS_DESC;
982 		ngep->rx_buf = NGE_RECV_JB9000_SLOTS_DESC * 2;
983 		ngep->nge_split = NGE_SPLIT_256;
984 	} else if (ngep->default_mtu > NGE_MAX_MTU) {
985 		ngep->default_mtu = NGE_MAX_MTU;
986 		ngep->buf_size = NGE_JB9000_BUFSZ;
987 		ngep->tx_desc = NGE_SEND_JB9000_SLOTS_DESC;
988 		ngep->rx_desc = NGE_RECV_JB9000_SLOTS_DESC;
989 		ngep->rx_buf = NGE_RECV_JB9000_SLOTS_DESC * 2;
990 		ngep->nge_split = NGE_SPLIT_256;
991 	} else if (ngep->lowmem_mode != 0) {
992 		ngep->default_mtu = ETHERMTU;
993 		ngep->buf_size = NGE_STD_BUFSZ;
994 		ngep->tx_desc = NGE_SEND_LOWMEM_SLOTS_DESC;
995 		ngep->rx_desc = NGE_RECV_LOWMEM_SLOTS_DESC;
996 		ngep->rx_buf = NGE_RECV_LOWMEM_SLOTS_DESC * 2;
997 		ngep->nge_split = NGE_SPLIT_32;
998 	} else {
999 		ngep->default_mtu = ETHERMTU;
1000 		ngep->buf_size = NGE_STD_BUFSZ;
1001 		ngep->tx_desc = dev_param_p->tx_desc_num;
1002 		ngep->rx_desc = dev_param_p->rx_desc_num;
1003 		ngep->rx_buf = dev_param_p->rx_desc_num * 2;
1004 		ngep->nge_split = dev_param_p->nge_split;
1005 	}
1006 
1007 	nge_check_desc_prop(ngep);
1008 }
1009 
1010 
1011 static int
1012 nge_reset(nge_t *ngep)
1013 {
1014 	int err;
1015 	send_ring_t *srp = ngep->send;
1016 
1017 	ASSERT(mutex_owned(ngep->genlock));
1018 	mutex_enter(srp->tc_lock);
1019 	mutex_enter(srp->tx_lock);
1020 
1021 	nge_tx_recycle_all(ngep);
1022 	err = nge_reinit_ring(ngep);
1023 	if (err == DDI_FAILURE) {
1024 		mutex_exit(srp->tx_lock);
1025 		mutex_exit(srp->tc_lock);
1026 		return (err);
1027 	}
1028 	err = nge_chip_reset(ngep);
1029 	mutex_exit(srp->tx_lock);
1030 	mutex_exit(srp->tc_lock);
1031 	if (err == DDI_FAILURE)
1032 		return (err);
1033 	ngep->watchdog = 0;
1034 	ngep->resched_needed = B_FALSE;
1035 	ngep->promisc = B_FALSE;
1036 	ngep->param_loop_mode = NGE_LOOP_NONE;
1037 	ngep->factotum_flag = 0;
1038 	ngep->resched_needed = 0;
1039 	ngep->nge_mac_state = NGE_MAC_RESET;
1040 	ngep->max_sdu = ngep->default_mtu + ETHER_HEAD_LEN + ETHERFCSL;
1041 	ngep->max_sdu += VTAG_SIZE;
1042 	ngep->rx_def = 0x16;
1043 	return (DDI_SUCCESS);
1044 }
1045 
1046 static void
1047 nge_m_stop(void *arg)
1048 {
1049 	nge_t *ngep = arg;		/* private device info	*/
1050 
1051 	NGE_TRACE(("nge_m_stop($%p)", arg));
1052 
1053 	/*
1054 	 * If suspended, adapter is already stopped, just return.
1055 	 */
1056 	if (ngep->suspended) {
1057 		ASSERT(ngep->nge_mac_state == NGE_MAC_STOPPED);
1058 		return;
1059 	}
1060 
1061 	/*
1062 	 * Just stop processing, then record new MAC state
1063 	 */
1064 	mutex_enter(ngep->genlock);
1065 	rw_enter(ngep->rwlock, RW_WRITER);
1066 
1067 	(void) nge_chip_stop(ngep, B_FALSE);
1068 	/* Try to wait all the buffer post to upper layer be released */
1069 	ngep->nge_mac_state = NGE_MAC_STOPPED;
1070 
1071 	/* Recycle all the TX BD */
1072 	nge_tx_recycle_all(ngep);
1073 	nge_fini_rings(ngep);
1074 	nge_free_bufs(ngep);
1075 
1076 	NGE_DEBUG(("nge_m_stop($%p) done", arg));
1077 
1078 	rw_exit(ngep->rwlock);
1079 	mutex_exit(ngep->genlock);
1080 }
1081 
1082 static int
1083 nge_m_start(void *arg)
1084 {
1085 	int err;
1086 	nge_t *ngep = arg;
1087 
1088 	NGE_TRACE(("nge_m_start($%p)", arg));
1089 	/*
1090 	 * If suspended, don't start, as the resume processing
1091 	 * will recall this function with the suspended flag off.
1092 	 */
1093 	if (ngep->suspended)
1094 		return (DDI_FAILURE);
1095 	/*
1096 	 * Start processing and record new MAC state
1097 	 */
1098 	mutex_enter(ngep->genlock);
1099 	rw_enter(ngep->rwlock, RW_WRITER);
1100 	err = nge_alloc_bufs(ngep);
1101 	if (err != DDI_SUCCESS) {
1102 		nge_problem(ngep, "nge_m_start: DMA buffer allocation failed");
1103 		goto finish;
1104 	}
1105 	err = nge_init_rings(ngep);
1106 	if (err != DDI_SUCCESS) {
1107 		nge_free_bufs(ngep);
1108 		nge_problem(ngep, "nge_init_rings() failed,err=%x");
1109 		goto finish;
1110 	}
1111 	err = nge_restart(ngep);
1112 
1113 	NGE_DEBUG(("nge_m_start($%p) done", arg));
1114 	finish:
1115 		rw_exit(ngep->rwlock);
1116 		mutex_exit(ngep->genlock);
1117 
1118 		return (err);
1119 }
1120 
1121 static int
1122 nge_m_unicst(void *arg, const uint8_t *macaddr)
1123 {
1124 	nge_t *ngep = arg;
1125 
1126 	NGE_TRACE(("nge_m_unicst($%p)", arg));
1127 	/*
1128 	 * Remember the new current address in the driver state
1129 	 * Sync the chip's idea of the address too ...
1130 	 */
1131 	mutex_enter(ngep->genlock);
1132 
1133 	ethaddr_copy(macaddr, ngep->cur_uni_addr.addr);
1134 	ngep->cur_uni_addr.set = 1;
1135 
1136 	/*
1137 	 * If we are suspended, we want to quit now, and not update
1138 	 * the chip.  Doing so might put it in a bad state, but the
1139 	 * resume will get the unicast address installed.
1140 	 */
1141 	if (ngep->suspended)
1142 		return (DDI_SUCCESS);
1143 
1144 	nge_chip_sync(ngep);
1145 
1146 	NGE_DEBUG(("nge_m_unicst($%p) done", arg));
1147 	mutex_exit(ngep->genlock);
1148 
1149 	return (0);
1150 }
1151 
1152 static int
1153 nge_m_promisc(void *arg, boolean_t on)
1154 {
1155 	nge_t *ngep = arg;
1156 
1157 	NGE_TRACE(("nge_m_promisc($%p)", arg));
1158 	/*
1159 	 * If suspended, we don't do anything, even record the promiscuious
1160 	 * mode, as we won't properly set it on resume.  Just fail.
1161 	 */
1162 	if (ngep->suspended)
1163 		return (DDI_FAILURE);
1164 
1165 	/*
1166 	 * Store specified mode and pass to chip layer to update h/w
1167 	 */
1168 	mutex_enter(ngep->genlock);
1169 	if (ngep->promisc == on) {
1170 		mutex_exit(ngep->genlock);
1171 		NGE_DEBUG(("nge_m_promisc($%p) done", arg));
1172 		return (0);
1173 	}
1174 	ngep->promisc = on;
1175 	nge_chip_sync(ngep);
1176 	NGE_DEBUG(("nge_m_promisc($%p) done", arg));
1177 	mutex_exit(ngep->genlock);
1178 
1179 	return (0);
1180 }
1181 
1182 static void nge_mulparam(nge_t *ngep)
1183 {
1184 	uint8_t number;
1185 	ether_addr_t pand;
1186 	ether_addr_t por;
1187 	mul_item *plist;
1188 
1189 	for (number = 0; number < ETHERADDRL; number++) {
1190 		pand[number] = 0x00;
1191 		por[number] = 0x00;
1192 	}
1193 	for (plist = ngep->pcur_mulist; plist != NULL; plist = plist->next) {
1194 		for (number = 0; number < ETHERADDRL; number++) {
1195 			pand[number] &= plist->mul_addr[number];
1196 			por[number] |= plist->mul_addr[number];
1197 		}
1198 	}
1199 	for (number = 0; number < ETHERADDRL; number++) {
1200 		ngep->cur_mul_addr.addr[number]
1201 		    = pand[number] & por[number];
1202 		ngep->cur_mul_mask.addr[number]
1203 		    = pand [number] | (~por[number]);
1204 	}
1205 }
1206 static int
1207 nge_m_multicst(void *arg, boolean_t add, const uint8_t *mca)
1208 {
1209 	boolean_t update;
1210 	boolean_t b_eq;
1211 	nge_t *ngep = arg;
1212 	mul_item *plist;
1213 	mul_item *plist_prev;
1214 	mul_item *pitem;
1215 
1216 	NGE_TRACE(("nge_m_multicst($%p, %s, %s)", arg,
1217 	    (add) ? "add" : "remove", ether_sprintf((void *)mca)));
1218 
1219 	update = B_FALSE;
1220 	plist = plist_prev = NULL;
1221 	mutex_enter(ngep->genlock);
1222 	if (add) {
1223 		if (ngep->pcur_mulist != NULL) {
1224 			for (plist = ngep->pcur_mulist; plist != NULL;
1225 			    plist = plist->next) {
1226 				b_eq = ether_eq(plist->mul_addr, mca);
1227 				if (b_eq) {
1228 					plist->ref_cnt++;
1229 					break;
1230 				}
1231 				plist_prev = plist;
1232 			}
1233 		}
1234 
1235 		if (plist == NULL) {
1236 			pitem = kmem_zalloc(sizeof (mul_item), KM_SLEEP);
1237 			ether_copy(mca, pitem->mul_addr);
1238 			pitem ->ref_cnt++;
1239 			pitem ->next = NULL;
1240 			if (plist_prev == NULL)
1241 				ngep->pcur_mulist = pitem;
1242 			else
1243 				plist_prev->next = pitem;
1244 			update = B_TRUE;
1245 		}
1246 	} else {
1247 		if (ngep->pcur_mulist != NULL) {
1248 			for (plist = ngep->pcur_mulist; plist != NULL;
1249 			    plist = plist->next) {
1250 				b_eq = ether_eq(plist->mul_addr, mca);
1251 				if (b_eq) {
1252 					update = B_TRUE;
1253 					break;
1254 				}
1255 				plist_prev = plist;
1256 			}
1257 
1258 			if (update) {
1259 				if ((plist_prev == NULL) &&
1260 				    (plist->next == NULL))
1261 					ngep->pcur_mulist = NULL;
1262 				else if ((plist_prev == NULL) &&
1263 				    (plist->next != NULL))
1264 					ngep->pcur_mulist = plist->next;
1265 				else
1266 					plist_prev->next = plist->next;
1267 				kmem_free(plist, sizeof (mul_item));
1268 			}
1269 		}
1270 	}
1271 
1272 	if (update || !ngep->suspended) {
1273 		nge_mulparam(ngep);
1274 		nge_chip_sync(ngep);
1275 	}
1276 	NGE_DEBUG(("nge_m_multicst($%p) done", arg));
1277 	mutex_exit(ngep->genlock);
1278 
1279 	return (0);
1280 }
1281 
1282 static void
1283 nge_m_ioctl(void *arg, queue_t *wq, mblk_t *mp)
1284 {
1285 	int err;
1286 	int cmd;
1287 	nge_t *ngep = arg;
1288 	struct iocblk *iocp;
1289 	enum ioc_reply status;
1290 	boolean_t need_privilege;
1291 
1292 	/*
1293 	 * If suspended, we might actually be able to do some of
1294 	 * these ioctls, but it is harder to make sure they occur
1295 	 * without actually putting the hardware in an undesireable
1296 	 * state.  So just NAK it.
1297 	 */
1298 	if (ngep->suspended) {
1299 		miocnak(wq, mp, 0, EINVAL);
1300 		return;
1301 	}
1302 
1303 	/*
1304 	 * Validate the command before bothering with the mutex ...
1305 	 */
1306 	iocp = (struct iocblk *)mp->b_rptr;
1307 	iocp->ioc_error = 0;
1308 	need_privilege = B_TRUE;
1309 	cmd = iocp->ioc_cmd;
1310 
1311 	NGE_DEBUG(("nge_m_ioctl:  cmd 0x%x", cmd));
1312 	switch (cmd) {
1313 	default:
1314 		NGE_LDB(NGE_DBG_BADIOC,
1315 		    ("nge_m_ioctl: unknown cmd 0x%x", cmd));
1316 
1317 		miocnak(wq, mp, 0, EINVAL);
1318 		return;
1319 
1320 	case NGE_MII_READ:
1321 	case NGE_MII_WRITE:
1322 	case NGE_SEE_READ:
1323 	case NGE_SEE_WRITE:
1324 	case NGE_DIAG:
1325 	case NGE_PEEK:
1326 	case NGE_POKE:
1327 	case NGE_PHY_RESET:
1328 	case NGE_SOFT_RESET:
1329 	case NGE_HARD_RESET:
1330 		break;
1331 
1332 	case LB_GET_INFO_SIZE:
1333 	case LB_GET_INFO:
1334 	case LB_GET_MODE:
1335 		need_privilege = B_FALSE;
1336 		break;
1337 	case LB_SET_MODE:
1338 		break;
1339 
1340 	case ND_GET:
1341 		need_privilege = B_FALSE;
1342 		break;
1343 	case ND_SET:
1344 		break;
1345 	}
1346 
1347 	if (need_privilege) {
1348 		/*
1349 		 * Check for specific net_config privilege.
1350 		 */
1351 		err = secpolicy_net_config(iocp->ioc_cr, B_FALSE);
1352 		if (err != 0) {
1353 			NGE_DEBUG(("nge_m_ioctl: rejected cmd 0x%x, err %d",
1354 			    cmd, err));
1355 			miocnak(wq, mp, 0, err);
1356 			return;
1357 		}
1358 	}
1359 
1360 	mutex_enter(ngep->genlock);
1361 
1362 	switch (cmd) {
1363 	default:
1364 		_NOTE(NOTREACHED)
1365 		status = IOC_INVAL;
1366 	break;
1367 
1368 	case NGE_MII_READ:
1369 	case NGE_MII_WRITE:
1370 	case NGE_SEE_READ:
1371 	case NGE_SEE_WRITE:
1372 	case NGE_DIAG:
1373 	case NGE_PEEK:
1374 	case NGE_POKE:
1375 	case NGE_PHY_RESET:
1376 	case NGE_SOFT_RESET:
1377 	case NGE_HARD_RESET:
1378 		status = nge_chip_ioctl(ngep, mp, iocp);
1379 	break;
1380 
1381 	case LB_GET_INFO_SIZE:
1382 	case LB_GET_INFO:
1383 	case LB_GET_MODE:
1384 	case LB_SET_MODE:
1385 		status = nge_loop_ioctl(ngep, mp, iocp);
1386 	break;
1387 
1388 	case ND_GET:
1389 	case ND_SET:
1390 		status = nge_nd_ioctl(ngep, wq, mp, iocp);
1391 	break;
1392 
1393 	}
1394 
1395 	/*
1396 	 * Do we need to reprogram the PHY and/or the MAC?
1397 	 * Do it now, while we still have the mutex.
1398 	 *
1399 	 * Note: update the PHY first, 'cos it controls the
1400 	 * speed/duplex parameters that the MAC code uses.
1401 	 */
1402 
1403 	NGE_DEBUG(("nge_m_ioctl: cmd 0x%x status %d", cmd, status));
1404 
1405 	switch (status) {
1406 	case IOC_RESTART_REPLY:
1407 	case IOC_RESTART_ACK:
1408 		(*ngep->physops->phys_update)(ngep);
1409 		nge_chip_sync(ngep);
1410 		break;
1411 
1412 	default:
1413 	break;
1414 	}
1415 
1416 	mutex_exit(ngep->genlock);
1417 
1418 	/*
1419 	 * Finally, decide how to reply
1420 	 */
1421 	switch (status) {
1422 
1423 	default:
1424 	case IOC_INVAL:
1425 		miocnak(wq, mp, 0, iocp->ioc_error == 0 ?
1426 		    EINVAL : iocp->ioc_error);
1427 		break;
1428 
1429 	case IOC_DONE:
1430 		break;
1431 
1432 	case IOC_RESTART_ACK:
1433 	case IOC_ACK:
1434 		miocack(wq, mp, 0, 0);
1435 		break;
1436 
1437 	case IOC_RESTART_REPLY:
1438 	case IOC_REPLY:
1439 		mp->b_datap->db_type = iocp->ioc_error == 0 ?
1440 		    M_IOCACK : M_IOCNAK;
1441 		qreply(wq, mp);
1442 		break;
1443 	}
1444 }
1445 
1446 static void
1447 nge_chip_blank(void *arg, time_t ticks, uint_t count)
1448 {
1449 	_NOTE(ARGUNUSED(arg, ticks, count));
1450 }
1451 
1452 static void
1453 nge_m_resources(void *arg)
1454 {
1455 	nge_t *ngep = arg;
1456 	recv_ring_t *rrp;
1457 	mac_rx_fifo_t mrf;
1458 
1459 	mutex_enter(ngep->genlock);
1460 
1461 	/*
1462 	 * Register Rx rings as resources and save mac
1463 	 * resource id for future reference
1464 	 */
1465 	mrf.mrf_type = MAC_RX_FIFO;
1466 	mrf.mrf_blank = nge_chip_blank;
1467 	mrf.mrf_arg = (void *)ngep;
1468 	mrf.mrf_normal_blank_time = NGE_TICKS_CNT;
1469 	mrf.mrf_normal_pkt_count = NGE_RX_PKT_CNT;
1470 
1471 	rrp = ngep->recv;
1472 	rrp->handle = mac_resource_add(ngep->mh, (mac_resource_t *)&mrf);
1473 	mutex_exit(ngep->genlock);
1474 }
1475 
1476 /* ARGSUSED */
1477 static boolean_t
1478 nge_m_getcapab(void *arg, mac_capab_t cap, void *cap_data)
1479 {
1480 	nge_t	*ngep = arg;
1481 	nge_dev_spec_param_t *dev_param_p;
1482 
1483 	dev_param_p = &ngep->dev_spec_param;
1484 
1485 	switch (cap) {
1486 	case MAC_CAPAB_HCKSUM: {
1487 		uint32_t *hcksum_txflags = cap_data;
1488 
1489 		if (dev_param_p->tx_hw_checksum) {
1490 			*hcksum_txflags = dev_param_p->tx_hw_checksum;
1491 		} else
1492 			return (B_FALSE);
1493 		break;
1494 	}
1495 	case MAC_CAPAB_POLL:
1496 		/*
1497 		 * There's nothing for us to fill in, simply returning
1498 		 * B_TRUE, stating that we support polling is sufficient.
1499 		 */
1500 		break;
1501 	default:
1502 		return (B_FALSE);
1503 	}
1504 	return (B_TRUE);
1505 }
1506 
1507 #undef	NGE_DBG
1508 #define	NGE_DBG	NGE_DBG_INIT	/* debug flag for this code	*/
1509 int
1510 nge_restart(nge_t *ngep)
1511 {
1512 	int err = 0;
1513 	err += nge_reset(ngep);
1514 	err += nge_chip_start(ngep);
1515 
1516 	if (err) {
1517 		ngep->nge_mac_state = NGE_MAC_STOPPED;
1518 		return (DDI_FAILURE);
1519 	} else {
1520 		ngep->nge_mac_state = NGE_MAC_STARTED;
1521 		return (DDI_SUCCESS);
1522 	}
1523 }
1524 
1525 void
1526 nge_wake_factotum(nge_t *ngep)
1527 {
1528 	mutex_enter(ngep->softlock);
1529 	if (ngep->factotum_flag == 0) {
1530 		ngep->factotum_flag = 1;
1531 		(void) ddi_intr_trigger_softint(ngep->factotum_hdl, NULL);
1532 	}
1533 	mutex_exit(ngep->softlock);
1534 }
1535 
1536 /*
1537  * High-level cyclic handler
1538  *
1539  * This routine schedules a (low-level) softint callback to the
1540  * factotum.
1541  */
1542 
1543 static void
1544 nge_chip_cyclic(void *arg)
1545 {
1546 	nge_t *ngep;
1547 
1548 	ngep = (nge_t *)arg;
1549 
1550 	switch (ngep->nge_chip_state) {
1551 	default:
1552 		return;
1553 
1554 	case NGE_CHIP_RUNNING:
1555 		break;
1556 
1557 	case NGE_CHIP_FAULT:
1558 	case NGE_CHIP_ERROR:
1559 		break;
1560 	}
1561 
1562 	nge_wake_factotum(ngep);
1563 }
1564 
1565 static void
1566 nge_unattach(nge_t *ngep)
1567 {
1568 	send_ring_t *srp;
1569 	buff_ring_t *brp;
1570 
1571 	srp = ngep->send;
1572 	brp = ngep->buff;
1573 	NGE_TRACE(("nge_unattach($%p)", (void *)ngep));
1574 
1575 	/*
1576 	 * Flag that no more activity may be initiated
1577 	 */
1578 	ngep->progress &= ~PROGRESS_READY;
1579 	ngep->nge_mac_state = NGE_MAC_UNATTACH;
1580 
1581 	/*
1582 	 * Quiesce the PHY and MAC (leave it reset but still powered).
1583 	 * Clean up and free all NGE data structures
1584 	 */
1585 	if (ngep->periodic_id != NULL) {
1586 		ddi_periodic_delete(ngep->periodic_id);
1587 		ngep->periodic_id = NULL;
1588 	}
1589 
1590 	if (ngep->progress & PROGRESS_KSTATS)
1591 		nge_fini_kstats(ngep);
1592 
1593 	if (ngep->progress & PROGRESS_NDD)
1594 		nge_nd_cleanup(ngep);
1595 
1596 	if (ngep->progress & PROGRESS_HWINT) {
1597 		mutex_enter(ngep->genlock);
1598 		nge_restore_mac_addr(ngep);
1599 		(void) nge_chip_stop(ngep, B_FALSE);
1600 		mutex_exit(ngep->genlock);
1601 	}
1602 
1603 	if (ngep->progress & PROGRESS_SWINT)
1604 		nge_rem_intrs(ngep);
1605 
1606 	if (ngep->progress & PROGRESS_FACTOTUM)
1607 		(void) ddi_intr_remove_softint(ngep->factotum_hdl);
1608 
1609 	if (ngep->progress & PROGRESS_RESCHED)
1610 		(void) ddi_intr_remove_softint(ngep->resched_hdl);
1611 
1612 	if (ngep->progress & PROGRESS_INTR) {
1613 		mutex_destroy(srp->tx_lock);
1614 		mutex_destroy(srp->tc_lock);
1615 		mutex_destroy(&srp->dmah_lock);
1616 		mutex_destroy(brp->recycle_lock);
1617 
1618 		mutex_destroy(ngep->genlock);
1619 		mutex_destroy(ngep->softlock);
1620 		rw_destroy(ngep->rwlock);
1621 	}
1622 
1623 	if (ngep->progress & PROGRESS_REGS)
1624 		ddi_regs_map_free(&ngep->io_handle);
1625 
1626 	if (ngep->progress & PROGRESS_CFG)
1627 		pci_config_teardown(&ngep->cfg_handle);
1628 
1629 	ddi_remove_minor_node(ngep->devinfo, NULL);
1630 
1631 	kmem_free(ngep, sizeof (*ngep));
1632 }
1633 
1634 static int
1635 nge_resume(dev_info_t *devinfo)
1636 {
1637 	nge_t		*ngep;
1638 	chip_info_t	*infop;
1639 
1640 	ASSERT(devinfo != NULL);
1641 
1642 	ngep = ddi_get_driver_private(devinfo);
1643 	/*
1644 	 * If there are state inconsistancies, this is bad.  Returning
1645 	 * DDI_FAILURE here will eventually cause the machine to panic,
1646 	 * so it is best done here so that there is a possibility of
1647 	 * debugging the problem.
1648 	 */
1649 	if (ngep == NULL)
1650 		cmn_err(CE_PANIC,
1651 		    "nge: ngep returned from ddi_get_driver_private was NULL");
1652 	infop = (chip_info_t *)&ngep->chipinfo;
1653 
1654 	if (ngep->devinfo != devinfo)
1655 		cmn_err(CE_PANIC,
1656 		    "nge: passed devinfo not the same as saved definfo");
1657 
1658 	ngep->suspended = B_FALSE;
1659 
1660 	/*
1661 	 * Fetch the config space.  Even though we have most of it cached,
1662 	 * some values *might* change across a suspend/resume.
1663 	 */
1664 	nge_chip_cfg_init(ngep, infop, B_FALSE);
1665 
1666 	/*
1667 	 * Start the controller.  In this case (and probably most GLDv3
1668 	 * devices), it is sufficient to call nge_m_start().
1669 	 */
1670 	if (nge_m_start((void *)ngep) != DDI_SUCCESS) {
1671 		/*
1672 		 * We note the failure, but return success, as the
1673 		 * system is still usable without this controller.
1674 		 */
1675 		cmn_err(CE_WARN, "nge: resume: failed to restart controller");
1676 	}
1677 	return (DDI_SUCCESS);
1678 }
1679 
1680 /*
1681  * attach(9E) -- Attach a device to the system
1682  *
1683  * Called once for each board successfully probed.
1684  */
1685 static int
1686 nge_attach(dev_info_t *devinfo, ddi_attach_cmd_t cmd)
1687 {
1688 	int		err;
1689 	int		i;
1690 	int		instance;
1691 	caddr_t		regs;
1692 	nge_t		*ngep;
1693 	chip_info_t	*infop;
1694 	mac_register_t	*macp;
1695 
1696 	switch (cmd) {
1697 	default:
1698 		return (DDI_FAILURE);
1699 
1700 	case DDI_RESUME:
1701 		return (nge_resume(devinfo));
1702 
1703 	case DDI_ATTACH:
1704 		break;
1705 	}
1706 
1707 	ngep = kmem_zalloc(sizeof (*ngep), KM_SLEEP);
1708 	instance = ddi_get_instance(devinfo);
1709 	ddi_set_driver_private(devinfo, ngep);
1710 	ngep->devinfo = devinfo;
1711 
1712 	(void) snprintf(ngep->ifname, sizeof (ngep->ifname), "%s%d",
1713 	    NGE_DRIVER_NAME, instance);
1714 	err = pci_config_setup(devinfo, &ngep->cfg_handle);
1715 	if (err != DDI_SUCCESS) {
1716 		nge_problem(ngep, "nge_attach: pci_config_setup() failed");
1717 		goto attach_fail;
1718 	}
1719 	infop = (chip_info_t *)&ngep->chipinfo;
1720 	nge_chip_cfg_init(ngep, infop, B_FALSE);
1721 	nge_init_dev_spec_param(ngep);
1722 	nge_get_props(ngep);
1723 	ngep->progress |= PROGRESS_CFG;
1724 
1725 	err = ddi_regs_map_setup(devinfo, NGE_PCI_OPREGS_RNUMBER,
1726 	    &regs, 0, 0, &nge_reg_accattr, &ngep->io_handle);
1727 	if (err != DDI_SUCCESS) {
1728 		nge_problem(ngep, "nge_attach: ddi_regs_map_setup() failed");
1729 		goto attach_fail;
1730 	}
1731 	ngep->io_regs = regs;
1732 	ngep->progress |= PROGRESS_REGS;
1733 
1734 	err = nge_register_intrs_and_init_locks(ngep);
1735 	if (err != DDI_SUCCESS) {
1736 		nge_problem(ngep, "nge_attach:"
1737 		    " register intrs and init locks failed");
1738 		goto attach_fail;
1739 	}
1740 	nge_init_ring_param_lock(ngep);
1741 	ngep->progress |= PROGRESS_INTR;
1742 
1743 	mutex_enter(ngep->genlock);
1744 
1745 	/*
1746 	 * Initialise link state variables
1747 	 * Stop, reset & reinitialise the chip.
1748 	 * Initialise the (internal) PHY.
1749 	 */
1750 	nge_phys_init(ngep);
1751 	err = nge_chip_reset(ngep);
1752 	if (err != DDI_SUCCESS) {
1753 		nge_problem(ngep, "nge_attach: nge_chip_reset() failed");
1754 		mutex_exit(ngep->genlock);
1755 		goto attach_fail;
1756 	}
1757 	nge_chip_sync(ngep);
1758 
1759 	/*
1760 	 * Now that mutex locks are initialized, enable interrupts.
1761 	 */
1762 	if (ngep->intr_cap & DDI_INTR_FLAG_BLOCK) {
1763 		/* Call ddi_intr_block_enable() for MSI interrupts */
1764 		(void) ddi_intr_block_enable(ngep->htable,
1765 		    ngep->intr_actual_cnt);
1766 	} else {
1767 		/* Call ddi_intr_enable for MSI or FIXED interrupts */
1768 		for (i = 0; i < ngep->intr_actual_cnt; i++) {
1769 			(void) ddi_intr_enable(ngep->htable[i]);
1770 		}
1771 	}
1772 
1773 	ngep->link_state = LINK_STATE_UNKNOWN;
1774 	ngep->progress |= PROGRESS_HWINT;
1775 
1776 	/*
1777 	 * Register NDD-tweakable parameters
1778 	 */
1779 	if (nge_nd_init(ngep)) {
1780 		nge_problem(ngep, "nge_attach: nge_nd_init() failed");
1781 		mutex_exit(ngep->genlock);
1782 		goto attach_fail;
1783 	}
1784 	ngep->progress |= PROGRESS_NDD;
1785 
1786 	/*
1787 	 * Create & initialise named kstats
1788 	 */
1789 	nge_init_kstats(ngep, instance);
1790 	ngep->progress |= PROGRESS_KSTATS;
1791 
1792 	mutex_exit(ngep->genlock);
1793 
1794 	if ((macp = mac_alloc(MAC_VERSION)) == NULL)
1795 		goto attach_fail;
1796 	macp->m_type_ident = MAC_PLUGIN_IDENT_ETHER;
1797 	macp->m_driver = ngep;
1798 	macp->m_dip = devinfo;
1799 	macp->m_src_addr = infop->vendor_addr.addr;
1800 	macp->m_callbacks = &nge_m_callbacks;
1801 	macp->m_min_sdu = 0;
1802 	macp->m_max_sdu = ngep->default_mtu;
1803 	/*
1804 	 * Finally, we're ready to register ourselves with the mac
1805 	 * interface; if this succeeds, we're all ready to start()
1806 	 */
1807 	err = mac_register(macp, &ngep->mh);
1808 	mac_free(macp);
1809 	if (err != 0)
1810 		goto attach_fail;
1811 
1812 	/*
1813 	 * Register a periodical handler.
1814 	 * nge_chip_cyclic() is invoked in kernel context.
1815 	 */
1816 	ngep->periodic_id = ddi_periodic_add(nge_chip_cyclic, ngep,
1817 	    NGE_CYCLIC_PERIOD, DDI_IPL_0);
1818 
1819 	ngep->progress |= PROGRESS_READY;
1820 	return (DDI_SUCCESS);
1821 
1822 attach_fail:
1823 	nge_unattach(ngep);
1824 	return (DDI_FAILURE);
1825 }
1826 
1827 /*
1828  * detach(9E) -- Detach a device from the system
1829  */
1830 static int
1831 nge_detach(dev_info_t *devinfo, ddi_detach_cmd_t cmd)
1832 {
1833 	int i;
1834 	nge_t *ngep;
1835 	mul_item *p, *nextp;
1836 	buff_ring_t *brp;
1837 
1838 	NGE_GTRACE(("nge_detach($%p, %d)", (void *)devinfo, cmd));
1839 
1840 	ngep = ddi_get_driver_private(devinfo);
1841 	brp = ngep->buff;
1842 
1843 	switch (cmd) {
1844 	default:
1845 		return (DDI_FAILURE);
1846 
1847 	case DDI_SUSPEND:
1848 		/*
1849 		 * Stop the NIC
1850 		 * I suspect that we can actually suspend if the stop
1851 		 * routine returns a failure, as the resume will
1852 		 * effectively fully reset the hardware (i.e. we don't
1853 		 * really save any hardware state).  However, nge_m_stop
1854 		 * doesn't return an error code.
1855 		 * Note: This driver doesn't currently support WOL, but
1856 		 *	should it in the future, it is important to
1857 		 *	make sure the PHY remains powered so that the
1858 		 *	wakeup packet can actually be recieved.
1859 		 */
1860 		nge_m_stop(ngep);
1861 		ngep->suspended = B_TRUE;
1862 		return (DDI_SUCCESS);
1863 
1864 	case DDI_DETACH:
1865 		break;
1866 	}
1867 
1868 	/* Try to wait all the buffer post to upper layer be released */
1869 	for (i = 0; i < 1000; i++) {
1870 		if (brp->rx_hold == 0)
1871 			break;
1872 		drv_usecwait(1000);
1873 	}
1874 
1875 	/* If there is any posted buffer, reject to detach */
1876 	if (brp->rx_hold != 0)
1877 		return (DDI_FAILURE);
1878 
1879 	/* Recycle the multicast table */
1880 	for (p = ngep->pcur_mulist; p != NULL; p = nextp) {
1881 		nextp = p->next;
1882 		kmem_free(p, sizeof (mul_item));
1883 	}
1884 	ngep->pcur_mulist = NULL;
1885 
1886 	/*
1887 	 * Unregister from the GLD subsystem.  This can fail, in
1888 	 * particular if there are DLPI style-2 streams still open -
1889 	 * in which case we just return failure without shutting
1890 	 * down chip operations.
1891 	 */
1892 	if (mac_unregister(ngep->mh) != DDI_SUCCESS)
1893 		return (DDI_FAILURE);
1894 
1895 	/*
1896 	 * All activity stopped, so we can clean up & exit
1897 	 */
1898 	nge_unattach(ngep);
1899 	return (DDI_SUCCESS);
1900 }
1901 
1902 
1903 /*
1904  * ========== Module Loading Data & Entry Points ==========
1905  */
1906 
1907 DDI_DEFINE_STREAM_OPS(nge_dev_ops, nulldev, nulldev, nge_attach, nge_detach,
1908     nodev, NULL, D_MP, NULL);
1909 
1910 
1911 static struct modldrv nge_modldrv = {
1912 	&mod_driverops,		/* Type of module.  This one is a driver */
1913 	nge_ident,		/* short description */
1914 	&nge_dev_ops		/* driver specific ops */
1915 };
1916 
1917 static struct modlinkage modlinkage = {
1918 	MODREV_1, (void *)&nge_modldrv, NULL
1919 };
1920 
1921 
1922 int
1923 _info(struct modinfo *modinfop)
1924 {
1925 	return (mod_info(&modlinkage, modinfop));
1926 }
1927 
1928 int
1929 _init(void)
1930 {
1931 	int status;
1932 
1933 	mac_init_ops(&nge_dev_ops, "nge");
1934 	status = mod_install(&modlinkage);
1935 	if (status != DDI_SUCCESS)
1936 		mac_fini_ops(&nge_dev_ops);
1937 	else
1938 		mutex_init(nge_log_mutex, NULL, MUTEX_DRIVER, NULL);
1939 
1940 	return (status);
1941 }
1942 
1943 int
1944 _fini(void)
1945 {
1946 	int status;
1947 
1948 	status = mod_remove(&modlinkage);
1949 	if (status == DDI_SUCCESS) {
1950 		mac_fini_ops(&nge_dev_ops);
1951 		mutex_destroy(nge_log_mutex);
1952 	}
1953 
1954 	return (status);
1955 }
1956 
1957 /*
1958  * ============ Init MSI/Fixed/SoftInterrupt routines ==============
1959  */
1960 
1961 /*
1962  * Register interrupts and initialize each mutex and condition variables
1963  */
1964 
1965 static int
1966 nge_register_intrs_and_init_locks(nge_t *ngep)
1967 {
1968 	int		err;
1969 	int		intr_types;
1970 	uint_t		soft_prip;
1971 	nge_msi_mask	msi_mask;
1972 	nge_msi_map0_vec map0_vec;
1973 	nge_msi_map1_vec map1_vec;
1974 
1975 	/*
1976 	 * Add the softint handlers:
1977 	 *
1978 	 * Both of these handlers are used to avoid restrictions on the
1979 	 * context and/or mutexes required for some operations.  In
1980 	 * particular, the hardware interrupt handler and its subfunctions
1981 	 * can detect a number of conditions that we don't want to handle
1982 	 * in that context or with that set of mutexes held.  So, these
1983 	 * softints are triggered instead:
1984 	 *
1985 	 * the <resched> softint is triggered if if we have previously
1986 	 * had to refuse to send a packet because of resource shortage
1987 	 * (we've run out of transmit buffers), but the send completion
1988 	 * interrupt handler has now detected that more buffers have
1989 	 * become available.  Its only purpose is to call gld_sched()
1990 	 * to retry the pending transmits (we're not allowed to hold
1991 	 * driver-defined mutexes across gld_sched()).
1992 	 *
1993 	 * the <factotum> is triggered if the h/w interrupt handler
1994 	 * sees the <link state changed> or <error> bits in the status
1995 	 * block.  It's also triggered periodically to poll the link
1996 	 * state, just in case we aren't getting link status change
1997 	 * interrupts ...
1998 	 */
1999 	err = ddi_intr_add_softint(ngep->devinfo, &ngep->resched_hdl,
2000 	    DDI_INTR_SOFTPRI_MIN, nge_reschedule, (caddr_t)ngep);
2001 	if (err != DDI_SUCCESS) {
2002 		nge_problem(ngep,
2003 		    "nge_attach: add nge_reschedule softintr failed");
2004 
2005 		return (DDI_FAILURE);
2006 	}
2007 	ngep->progress |= PROGRESS_RESCHED;
2008 	err = ddi_intr_add_softint(ngep->devinfo, &ngep->factotum_hdl,
2009 	    DDI_INTR_SOFTPRI_MIN, nge_chip_factotum, (caddr_t)ngep);
2010 	if (err != DDI_SUCCESS) {
2011 		nge_problem(ngep,
2012 		    "nge_attach: add nge_chip_factotum softintr failed!");
2013 
2014 		return (DDI_FAILURE);
2015 	}
2016 	if (ddi_intr_get_softint_pri(ngep->factotum_hdl, &soft_prip)
2017 	    != DDI_SUCCESS) {
2018 		nge_problem(ngep, "nge_attach: get softintr priority failed\n");
2019 
2020 		return (DDI_FAILURE);
2021 	}
2022 	ngep->soft_pri = soft_prip;
2023 
2024 	ngep->progress |= PROGRESS_FACTOTUM;
2025 	/* Get supported interrupt types */
2026 	if (ddi_intr_get_supported_types(ngep->devinfo, &intr_types)
2027 	    != DDI_SUCCESS) {
2028 		nge_error(ngep, "ddi_intr_get_supported_types failed\n");
2029 
2030 		return (DDI_FAILURE);
2031 	}
2032 
2033 	NGE_DEBUG(("ddi_intr_get_supported_types() returned: %x",
2034 	    intr_types));
2035 
2036 	if ((intr_types & DDI_INTR_TYPE_MSI) && nge_enable_msi) {
2037 
2038 		/* MSI Configurations for mcp55 chipset */
2039 		if (ngep->chipinfo.device == DEVICE_ID_MCP55_373 ||
2040 		    ngep->chipinfo.device == DEVICE_ID_MCP55_372) {
2041 
2042 
2043 			/* Enable the 8 vectors */
2044 			msi_mask.msi_mask_val =
2045 			    nge_reg_get32(ngep, NGE_MSI_MASK);
2046 			msi_mask.msi_msk_bits.vec0 = NGE_SET;
2047 			msi_mask.msi_msk_bits.vec1 = NGE_SET;
2048 			msi_mask.msi_msk_bits.vec2 = NGE_SET;
2049 			msi_mask.msi_msk_bits.vec3 = NGE_SET;
2050 			msi_mask.msi_msk_bits.vec4 = NGE_SET;
2051 			msi_mask.msi_msk_bits.vec5 = NGE_SET;
2052 			msi_mask.msi_msk_bits.vec6 = NGE_SET;
2053 			msi_mask.msi_msk_bits.vec7 = NGE_SET;
2054 			nge_reg_put32(ngep, NGE_MSI_MASK,
2055 			    msi_mask.msi_mask_val);
2056 
2057 			/*
2058 			 * Remapping the MSI MAP0 and MAP1. MCP55
2059 			 * is default mapping all the interrupt to 0 vector.
2060 			 * Software needs to remapping this.
2061 			 * This mapping is same as CK804.
2062 			 */
2063 			map0_vec.msi_map0_val =
2064 			    nge_reg_get32(ngep, NGE_MSI_MAP0);
2065 			map1_vec.msi_map1_val =
2066 			    nge_reg_get32(ngep, NGE_MSI_MAP1);
2067 			map0_vec.vecs_bits.reint_vec = 0;
2068 			map0_vec.vecs_bits.rcint_vec = 0;
2069 			map0_vec.vecs_bits.miss_vec = 3;
2070 			map0_vec.vecs_bits.teint_vec = 5;
2071 			map0_vec.vecs_bits.tcint_vec = 5;
2072 			map0_vec.vecs_bits.stint_vec = 2;
2073 			map0_vec.vecs_bits.mint_vec = 6;
2074 			map0_vec.vecs_bits.rfint_vec = 0;
2075 			map1_vec.vecs_bits.tfint_vec = 5;
2076 			map1_vec.vecs_bits.feint_vec = 6;
2077 			map1_vec.vecs_bits.resv8_11 = 3;
2078 			map1_vec.vecs_bits.resv12_15 = 1;
2079 			map1_vec.vecs_bits.resv16_19 = 0;
2080 			map1_vec.vecs_bits.resv20_23 = 7;
2081 			map1_vec.vecs_bits.resv24_31 = 0xff;
2082 			nge_reg_put32(ngep, NGE_MSI_MAP0,
2083 			    map0_vec.msi_map0_val);
2084 			nge_reg_put32(ngep, NGE_MSI_MAP1,
2085 			    map1_vec.msi_map1_val);
2086 		}
2087 		if (nge_add_intrs(ngep, DDI_INTR_TYPE_MSI) != DDI_SUCCESS) {
2088 			NGE_DEBUG(("MSI registration failed, "
2089 			    "trying FIXED interrupt type\n"));
2090 		} else {
2091 			nge_log(ngep, "Using MSI interrupt type\n");
2092 
2093 			ngep->intr_type = DDI_INTR_TYPE_MSI;
2094 			ngep->progress |= PROGRESS_SWINT;
2095 		}
2096 	}
2097 
2098 	if (!(ngep->progress & PROGRESS_SWINT) &&
2099 	    (intr_types & DDI_INTR_TYPE_FIXED)) {
2100 		if (nge_add_intrs(ngep, DDI_INTR_TYPE_FIXED) != DDI_SUCCESS) {
2101 			nge_error(ngep, "FIXED interrupt "
2102 			    "registration failed\n");
2103 
2104 			return (DDI_FAILURE);
2105 		}
2106 
2107 		nge_log(ngep, "Using FIXED interrupt type\n");
2108 
2109 		ngep->intr_type = DDI_INTR_TYPE_FIXED;
2110 		ngep->progress |= PROGRESS_SWINT;
2111 	}
2112 
2113 
2114 	if (!(ngep->progress & PROGRESS_SWINT)) {
2115 		nge_error(ngep, "No interrupts registered\n");
2116 
2117 		return (DDI_FAILURE);
2118 	}
2119 	mutex_init(ngep->genlock, NULL, MUTEX_DRIVER,
2120 	    DDI_INTR_PRI(ngep->intr_pri));
2121 	mutex_init(ngep->softlock, NULL, MUTEX_DRIVER,
2122 	    DDI_INTR_PRI(ngep->soft_pri));
2123 	rw_init(ngep->rwlock, NULL, RW_DRIVER,
2124 	    DDI_INTR_PRI(ngep->intr_pri));
2125 
2126 	return (DDI_SUCCESS);
2127 }
2128 
2129 /*
2130  * nge_add_intrs:
2131  *
2132  * Register FIXED or MSI interrupts.
2133  */
2134 static int
2135 nge_add_intrs(nge_t *ngep, int	intr_type)
2136 {
2137 	dev_info_t	*dip = ngep->devinfo;
2138 	int		avail, actual, intr_size, count = 0;
2139 	int		i, flag, ret;
2140 
2141 	NGE_DEBUG(("nge_add_intrs: interrupt type 0x%x\n", intr_type));
2142 
2143 	/* Get number of interrupts */
2144 	ret = ddi_intr_get_nintrs(dip, intr_type, &count);
2145 	if ((ret != DDI_SUCCESS) || (count == 0)) {
2146 		nge_error(ngep, "ddi_intr_get_nintrs() failure, ret: %d, "
2147 		    "count: %d", ret, count);
2148 
2149 		return (DDI_FAILURE);
2150 	}
2151 
2152 	/* Get number of available interrupts */
2153 	ret = ddi_intr_get_navail(dip, intr_type, &avail);
2154 	if ((ret != DDI_SUCCESS) || (avail == 0)) {
2155 		nge_error(ngep, "ddi_intr_get_navail() failure, "
2156 		    "ret: %d, avail: %d\n", ret, avail);
2157 
2158 		return (DDI_FAILURE);
2159 	}
2160 
2161 	if (avail < count) {
2162 		NGE_DEBUG(("nitrs() returned %d, navail returned %d\n",
2163 		    count, avail));
2164 	}
2165 	flag = DDI_INTR_ALLOC_NORMAL;
2166 
2167 	/* Allocate an array of interrupt handles */
2168 	intr_size = count * sizeof (ddi_intr_handle_t);
2169 	ngep->htable = kmem_alloc(intr_size, KM_SLEEP);
2170 
2171 	/* Call ddi_intr_alloc() */
2172 	ret = ddi_intr_alloc(dip, ngep->htable, intr_type, 0,
2173 	    count, &actual, flag);
2174 
2175 	if ((ret != DDI_SUCCESS) || (actual == 0)) {
2176 		nge_error(ngep, "ddi_intr_alloc() failed %d\n", ret);
2177 
2178 		kmem_free(ngep->htable, intr_size);
2179 		return (DDI_FAILURE);
2180 	}
2181 
2182 	if (actual < count) {
2183 		NGE_DEBUG(("Requested: %d, Received: %d\n",
2184 		    count, actual));
2185 	}
2186 
2187 	ngep->intr_actual_cnt = actual;
2188 	ngep->intr_req_cnt = count;
2189 
2190 	/*
2191 	 * Get priority for first msi, assume remaining are all the same
2192 	 */
2193 	if ((ret = ddi_intr_get_pri(ngep->htable[0], &ngep->intr_pri)) !=
2194 	    DDI_SUCCESS) {
2195 		nge_error(ngep, "ddi_intr_get_pri() failed %d\n", ret);
2196 
2197 		/* Free already allocated intr */
2198 		for (i = 0; i < actual; i++) {
2199 			(void) ddi_intr_free(ngep->htable[i]);
2200 		}
2201 
2202 		kmem_free(ngep->htable, intr_size);
2203 
2204 		return (DDI_FAILURE);
2205 	}
2206 	/* Test for high level mutex */
2207 	if (ngep->intr_pri >= ddi_intr_get_hilevel_pri()) {
2208 		nge_error(ngep, "nge_add_intrs:"
2209 		    "Hi level interrupt not supported");
2210 
2211 		for (i = 0; i < actual; i++)
2212 			(void) ddi_intr_free(ngep->htable[i]);
2213 
2214 		kmem_free(ngep->htable, intr_size);
2215 
2216 		return (DDI_FAILURE);
2217 	}
2218 
2219 
2220 	/* Call ddi_intr_add_handler() */
2221 	for (i = 0; i < actual; i++) {
2222 		if ((ret = ddi_intr_add_handler(ngep->htable[i], nge_chip_intr,
2223 		    (caddr_t)ngep, (caddr_t)(uintptr_t)i)) != DDI_SUCCESS) {
2224 			nge_error(ngep, "ddi_intr_add_handler() "
2225 			    "failed %d\n", ret);
2226 
2227 			/* Free already allocated intr */
2228 			for (i = 0; i < actual; i++) {
2229 				(void) ddi_intr_free(ngep->htable[i]);
2230 			}
2231 
2232 			kmem_free(ngep->htable, intr_size);
2233 
2234 			return (DDI_FAILURE);
2235 		}
2236 	}
2237 
2238 	if ((ret = ddi_intr_get_cap(ngep->htable[0], &ngep->intr_cap))
2239 	    != DDI_SUCCESS) {
2240 		nge_error(ngep, "ddi_intr_get_cap() failed %d\n", ret);
2241 
2242 		for (i = 0; i < actual; i++) {
2243 			(void) ddi_intr_remove_handler(ngep->htable[i]);
2244 			(void) ddi_intr_free(ngep->htable[i]);
2245 		}
2246 
2247 		kmem_free(ngep->htable, intr_size);
2248 
2249 		return (DDI_FAILURE);
2250 	}
2251 
2252 	return (DDI_SUCCESS);
2253 }
2254 
2255 /*
2256  * nge_rem_intrs:
2257  *
2258  * Unregister FIXED or MSI interrupts
2259  */
2260 static void
2261 nge_rem_intrs(nge_t *ngep)
2262 {
2263 	int	i;
2264 
2265 	NGE_DEBUG(("nge_rem_intrs\n"));
2266 
2267 	/* Disable all interrupts */
2268 	if (ngep->intr_cap & DDI_INTR_FLAG_BLOCK) {
2269 		/* Call ddi_intr_block_disable() */
2270 		(void) ddi_intr_block_disable(ngep->htable,
2271 		    ngep->intr_actual_cnt);
2272 	} else {
2273 		for (i = 0; i < ngep->intr_actual_cnt; i++) {
2274 			(void) ddi_intr_disable(ngep->htable[i]);
2275 		}
2276 	}
2277 
2278 	/* Call ddi_intr_remove_handler() */
2279 	for (i = 0; i < ngep->intr_actual_cnt; i++) {
2280 		(void) ddi_intr_remove_handler(ngep->htable[i]);
2281 		(void) ddi_intr_free(ngep->htable[i]);
2282 	}
2283 
2284 	kmem_free(ngep->htable,
2285 	    ngep->intr_req_cnt * sizeof (ddi_intr_handle_t));
2286 }
2287