xref: /titanic_44/usr/src/uts/common/io/nxge/nxge_main.c (revision a237e38e9161f0acd6451439d4a7dd597e66291d)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 /*
22  * Copyright 2007 Sun Microsystems, Inc.  All rights reserved.
23  * Use is subject to license terms.
24  */
25 
26 #pragma ident	"%Z%%M%	%I%	%E% SMI"
27 
28 /*
29  * SunOs MT STREAMS NIU/Neptune 10Gb Ethernet Device Driver.
30  */
31 #include	<sys/nxge/nxge_impl.h>
32 #include	<sys/pcie.h>
33 
34 uint32_t 	nxge_use_partition = 0;		/* debug partition flag */
35 uint32_t 	nxge_dma_obp_props_only = 1;	/* use obp published props */
36 uint32_t 	nxge_use_rdc_intr = 1;		/* debug to assign rdc intr */
37 /*
38  * until MSIX supported, assume msi, use 2 for msix
39  */
40 uint32_t	nxge_msi_enable = 1;		/* debug: turn msi off */
41 
42 /*
43  * Globals: tunable parameters (/etc/system or adb)
44  *
45  */
46 uint32_t 	nxge_rbr_size = NXGE_RBR_RBB_DEFAULT;
47 uint32_t 	nxge_rbr_spare_size = 0;
48 uint32_t 	nxge_rcr_size = NXGE_RCR_DEFAULT;
49 uint32_t 	nxge_tx_ring_size = NXGE_TX_RING_DEFAULT;
50 uint32_t 	nxge_no_msg = 0;		/* control message display */
51 uint32_t 	nxge_no_link_notify = 0;	/* control DL_NOTIFY */
52 uint32_t 	nxge_bcopy_thresh = TX_BCOPY_MAX;
53 uint32_t 	nxge_dvma_thresh = TX_FASTDVMA_MIN;
54 uint32_t 	nxge_dma_stream_thresh = TX_STREAM_MIN;
55 uint32_t	nxge_jumbo_mtu	= TX_JUMBO_MTU;
56 boolean_t	nxge_jumbo_enable = B_FALSE;
57 uint16_t	nxge_rcr_timeout = NXGE_RDC_RCR_TIMEOUT;
58 uint16_t	nxge_rcr_threshold = NXGE_RDC_RCR_THRESHOLD;
59 nxge_tx_mode_t	nxge_tx_scheme = NXGE_USE_SERIAL;
60 
61 /*
62  * Debugging flags:
63  *		nxge_no_tx_lb : transmit load balancing
64  *		nxge_tx_lb_policy: 0 - TCP port (default)
65  *				   3 - DEST MAC
66  */
67 uint32_t 	nxge_no_tx_lb = 0;
68 uint32_t 	nxge_tx_lb_policy = NXGE_TX_LB_TCPUDP;
69 
70 /*
71  * Add tunable to reduce the amount of time spent in the
72  * ISR doing Rx Processing.
73  */
74 uint32_t nxge_max_rx_pkts = 1024;
75 
76 /*
77  * Tunables to manage the receive buffer blocks.
78  *
79  * nxge_rx_threshold_hi: copy all buffers.
80  * nxge_rx_bcopy_size_type: receive buffer block size type.
81  * nxge_rx_threshold_lo: copy only up to tunable block size type.
82  */
83 nxge_rxbuf_threshold_t nxge_rx_threshold_hi = NXGE_RX_COPY_6;
84 nxge_rxbuf_type_t nxge_rx_buf_size_type = RCR_PKTBUFSZ_0;
85 nxge_rxbuf_threshold_t nxge_rx_threshold_lo = NXGE_RX_COPY_3;
86 
87 rtrace_t npi_rtracebuf;
88 
89 #if	defined(sun4v)
90 /*
91  * Hypervisor N2/NIU services information.
92  */
93 static hsvc_info_t niu_hsvc = {
94 	HSVC_REV_1, NULL, HSVC_GROUP_NIU, NIU_MAJOR_VER,
95 	NIU_MINOR_VER, "nxge"
96 };
97 #endif
98 
99 /*
100  * Function Prototypes
101  */
102 static int nxge_attach(dev_info_t *, ddi_attach_cmd_t);
103 static int nxge_detach(dev_info_t *, ddi_detach_cmd_t);
104 static void nxge_unattach(p_nxge_t);
105 
106 #if NXGE_PROPERTY
107 static void nxge_remove_hard_properties(p_nxge_t);
108 #endif
109 
110 static nxge_status_t nxge_setup_system_dma_pages(p_nxge_t);
111 
112 static nxge_status_t nxge_setup_mutexes(p_nxge_t);
113 static void nxge_destroy_mutexes(p_nxge_t);
114 
115 static nxge_status_t nxge_map_regs(p_nxge_t nxgep);
116 static void nxge_unmap_regs(p_nxge_t nxgep);
117 #ifdef	NXGE_DEBUG
118 static void nxge_test_map_regs(p_nxge_t nxgep);
119 #endif
120 
121 static nxge_status_t nxge_add_intrs(p_nxge_t nxgep);
122 static nxge_status_t nxge_add_soft_intrs(p_nxge_t nxgep);
123 static void nxge_remove_intrs(p_nxge_t nxgep);
124 static void nxge_remove_soft_intrs(p_nxge_t nxgep);
125 
126 static nxge_status_t nxge_add_intrs_adv(p_nxge_t nxgep);
127 static nxge_status_t nxge_add_intrs_adv_type(p_nxge_t, uint32_t);
128 static nxge_status_t nxge_add_intrs_adv_type_fix(p_nxge_t, uint32_t);
129 static void nxge_intrs_enable(p_nxge_t nxgep);
130 static void nxge_intrs_disable(p_nxge_t nxgep);
131 
132 static void nxge_suspend(p_nxge_t);
133 static nxge_status_t nxge_resume(p_nxge_t);
134 
135 static nxge_status_t nxge_setup_dev(p_nxge_t);
136 static void nxge_destroy_dev(p_nxge_t);
137 
138 static nxge_status_t nxge_alloc_mem_pool(p_nxge_t);
139 static void nxge_free_mem_pool(p_nxge_t);
140 
141 static nxge_status_t nxge_alloc_rx_mem_pool(p_nxge_t);
142 static void nxge_free_rx_mem_pool(p_nxge_t);
143 
144 static nxge_status_t nxge_alloc_tx_mem_pool(p_nxge_t);
145 static void nxge_free_tx_mem_pool(p_nxge_t);
146 
147 static nxge_status_t nxge_dma_mem_alloc(p_nxge_t, dma_method_t,
148 	struct ddi_dma_attr *,
149 	size_t, ddi_device_acc_attr_t *, uint_t,
150 	p_nxge_dma_common_t);
151 
152 static void nxge_dma_mem_free(p_nxge_dma_common_t);
153 
154 static nxge_status_t nxge_alloc_rx_buf_dma(p_nxge_t, uint16_t,
155 	p_nxge_dma_common_t *, size_t, size_t, uint32_t *);
156 static void nxge_free_rx_buf_dma(p_nxge_t, p_nxge_dma_common_t, uint32_t);
157 
158 static nxge_status_t nxge_alloc_rx_cntl_dma(p_nxge_t, uint16_t,
159 	p_nxge_dma_common_t *, size_t);
160 static void nxge_free_rx_cntl_dma(p_nxge_t, p_nxge_dma_common_t);
161 
162 static nxge_status_t nxge_alloc_tx_buf_dma(p_nxge_t, uint16_t,
163 	p_nxge_dma_common_t *, size_t, size_t, uint32_t *);
164 static void nxge_free_tx_buf_dma(p_nxge_t, p_nxge_dma_common_t, uint32_t);
165 
166 static nxge_status_t nxge_alloc_tx_cntl_dma(p_nxge_t, uint16_t,
167 	p_nxge_dma_common_t *,
168 	size_t);
169 static void nxge_free_tx_cntl_dma(p_nxge_t, p_nxge_dma_common_t);
170 
171 static int nxge_init_common_dev(p_nxge_t);
172 static void nxge_uninit_common_dev(p_nxge_t);
173 
174 /*
175  * The next declarations are for the GLDv3 interface.
176  */
177 static int nxge_m_start(void *);
178 static void nxge_m_stop(void *);
179 static int nxge_m_unicst(void *, const uint8_t *);
180 static int nxge_m_multicst(void *, boolean_t, const uint8_t *);
181 static int nxge_m_promisc(void *, boolean_t);
182 static void nxge_m_ioctl(void *, queue_t *, mblk_t *);
183 static void nxge_m_resources(void *);
184 mblk_t *nxge_m_tx(void *arg, mblk_t *);
185 static nxge_status_t nxge_mac_register(p_nxge_t);
186 static int nxge_altmac_set(p_nxge_t nxgep, uint8_t *mac_addr,
187 	mac_addr_slot_t slot);
188 static void nxge_mmac_kstat_update(p_nxge_t nxgep, mac_addr_slot_t slot,
189 	boolean_t factory);
190 static int nxge_m_mmac_add(void *arg, mac_multi_addr_t *maddr);
191 static int nxge_m_mmac_reserve(void *arg, mac_multi_addr_t *maddr);
192 static int nxge_m_mmac_remove(void *arg, mac_addr_slot_t slot);
193 static int nxge_m_mmac_modify(void *arg, mac_multi_addr_t *maddr);
194 static int nxge_m_mmac_get(void *arg, mac_multi_addr_t *maddr);
195 
196 #define	NXGE_NEPTUNE_MAGIC	0x4E584745UL
197 #define	MAX_DUMP_SZ 256
198 
199 #define	NXGE_M_CALLBACK_FLAGS	(MC_RESOURCES | MC_IOCTL | MC_GETCAPAB)
200 
201 static	boolean_t	nxge_m_getcapab(void *, mac_capab_t, void *);
202 static mac_callbacks_t nxge_m_callbacks = {
203 	NXGE_M_CALLBACK_FLAGS,
204 	nxge_m_stat,
205 	nxge_m_start,
206 	nxge_m_stop,
207 	nxge_m_promisc,
208 	nxge_m_multicst,
209 	nxge_m_unicst,
210 	nxge_m_tx,
211 	nxge_m_resources,
212 	nxge_m_ioctl,
213 	nxge_m_getcapab
214 };
215 
216 void
217 nxge_err_inject(p_nxge_t, queue_t *, mblk_t *);
218 
219 /*
220  * These global variables control the message
221  * output.
222  */
223 out_dbgmsg_t nxge_dbgmsg_out = DBG_CONSOLE | STR_LOG;
224 uint64_t nxge_debug_level = 0;
225 
226 /*
227  * This list contains the instance structures for the Neptune
228  * devices present in the system. The lock exists to guarantee
229  * mutually exclusive access to the list.
230  */
231 void 			*nxge_list = NULL;
232 
233 void			*nxge_hw_list = NULL;
234 nxge_os_mutex_t 	nxge_common_lock;
235 
236 nxge_os_mutex_t		nxge_mii_lock;
237 static uint32_t		nxge_mii_lock_init = 0;
238 nxge_os_mutex_t		nxge_mdio_lock;
239 static uint32_t		nxge_mdio_lock_init = 0;
240 
241 extern uint64_t 	npi_debug_level;
242 
243 extern nxge_status_t	nxge_ldgv_init(p_nxge_t, int *, int *);
244 extern nxge_status_t	nxge_ldgv_init_n2(p_nxge_t, int *, int *);
245 extern nxge_status_t	nxge_ldgv_uninit(p_nxge_t);
246 extern nxge_status_t	nxge_intr_ldgv_init(p_nxge_t);
247 extern void		nxge_fm_init(p_nxge_t,
248 					ddi_device_acc_attr_t *,
249 					ddi_device_acc_attr_t *,
250 					ddi_dma_attr_t *);
251 extern void		nxge_fm_fini(p_nxge_t);
252 extern npi_status_t	npi_mac_altaddr_disable(npi_handle_t, uint8_t, uint8_t);
253 
254 /*
255  * Count used to maintain the number of buffers being used
256  * by Neptune instances and loaned up to the upper layers.
257  */
258 uint32_t nxge_mblks_pending = 0;
259 
260 /*
261  * Device register access attributes for PIO.
262  */
263 static ddi_device_acc_attr_t nxge_dev_reg_acc_attr = {
264 	DDI_DEVICE_ATTR_V0,
265 	DDI_STRUCTURE_LE_ACC,
266 	DDI_STRICTORDER_ACC,
267 };
268 
269 /*
270  * Device descriptor access attributes for DMA.
271  */
272 static ddi_device_acc_attr_t nxge_dev_desc_dma_acc_attr = {
273 	DDI_DEVICE_ATTR_V0,
274 	DDI_STRUCTURE_LE_ACC,
275 	DDI_STRICTORDER_ACC
276 };
277 
278 /*
279  * Device buffer access attributes for DMA.
280  */
281 static ddi_device_acc_attr_t nxge_dev_buf_dma_acc_attr = {
282 	DDI_DEVICE_ATTR_V0,
283 	DDI_STRUCTURE_BE_ACC,
284 	DDI_STRICTORDER_ACC
285 };
286 
287 ddi_dma_attr_t nxge_desc_dma_attr = {
288 	DMA_ATTR_V0,		/* version number. */
289 	0,			/* low address */
290 	0xffffffffffffffff,	/* high address */
291 	0xffffffffffffffff,	/* address counter max */
292 #ifndef NIU_PA_WORKAROUND
293 	0x100000,		/* alignment */
294 #else
295 	0x2000,
296 #endif
297 	0xfc00fc,		/* dlim_burstsizes */
298 	0x1,			/* minimum transfer size */
299 	0xffffffffffffffff,	/* maximum transfer size */
300 	0xffffffffffffffff,	/* maximum segment size */
301 	1,			/* scatter/gather list length */
302 	(unsigned int) 1,	/* granularity */
303 	0			/* attribute flags */
304 };
305 
306 ddi_dma_attr_t nxge_tx_dma_attr = {
307 	DMA_ATTR_V0,		/* version number. */
308 	0,			/* low address */
309 	0xffffffffffffffff,	/* high address */
310 	0xffffffffffffffff,	/* address counter max */
311 #if defined(_BIG_ENDIAN)
312 	0x2000,			/* alignment */
313 #else
314 	0x1000,			/* alignment */
315 #endif
316 	0xfc00fc,		/* dlim_burstsizes */
317 	0x1,			/* minimum transfer size */
318 	0xffffffffffffffff,	/* maximum transfer size */
319 	0xffffffffffffffff,	/* maximum segment size */
320 	5,			/* scatter/gather list length */
321 	(unsigned int) 1,	/* granularity */
322 	0			/* attribute flags */
323 };
324 
325 ddi_dma_attr_t nxge_rx_dma_attr = {
326 	DMA_ATTR_V0,		/* version number. */
327 	0,			/* low address */
328 	0xffffffffffffffff,	/* high address */
329 	0xffffffffffffffff,	/* address counter max */
330 	0x2000,			/* alignment */
331 	0xfc00fc,		/* dlim_burstsizes */
332 	0x1,			/* minimum transfer size */
333 	0xffffffffffffffff,	/* maximum transfer size */
334 	0xffffffffffffffff,	/* maximum segment size */
335 	1,			/* scatter/gather list length */
336 	(unsigned int) 1,	/* granularity */
337 	0			/* attribute flags */
338 };
339 
340 ddi_dma_lim_t nxge_dma_limits = {
341 	(uint_t)0,		/* dlim_addr_lo */
342 	(uint_t)0xffffffff,	/* dlim_addr_hi */
343 	(uint_t)0xffffffff,	/* dlim_cntr_max */
344 	(uint_t)0xfc00fc,	/* dlim_burstsizes for 32 and 64 bit xfers */
345 	0x1,			/* dlim_minxfer */
346 	1024			/* dlim_speed */
347 };
348 
349 dma_method_t nxge_force_dma = DVMA;
350 
351 /*
352  * dma chunk sizes.
353  *
354  * Try to allocate the largest possible size
355  * so that fewer number of dma chunks would be managed
356  */
357 #ifdef NIU_PA_WORKAROUND
358 size_t alloc_sizes [] = {0x2000};
359 #else
360 size_t alloc_sizes [] = {0x1000, 0x2000, 0x4000, 0x8000,
361 		0x10000, 0x20000, 0x40000, 0x80000,
362 		0x100000, 0x200000, 0x400000, 0x800000, 0x1000000};
363 #endif
364 
365 /*
366  * Translate "dev_t" to a pointer to the associated "dev_info_t".
367  */
368 
369 static int
370 nxge_attach(dev_info_t *dip, ddi_attach_cmd_t cmd)
371 {
372 	p_nxge_t	nxgep = NULL;
373 	int		instance;
374 	int		status = DDI_SUCCESS;
375 	nxge_status_t	nxge_status = NXGE_OK;
376 	uint8_t		portn;
377 	nxge_mmac_t	*mmac_info;
378 
379 	NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_attach"));
380 
381 	/*
382 	 * Get the device instance since we'll need to setup
383 	 * or retrieve a soft state for this instance.
384 	 */
385 	instance = ddi_get_instance(dip);
386 
387 	switch (cmd) {
388 	case DDI_ATTACH:
389 		NXGE_DEBUG_MSG((nxgep, DDI_CTL, "doing DDI_ATTACH"));
390 		break;
391 
392 	case DDI_RESUME:
393 		NXGE_DEBUG_MSG((nxgep, DDI_CTL, "doing DDI_RESUME"));
394 		nxgep = (p_nxge_t)ddi_get_soft_state(nxge_list, instance);
395 		if (nxgep == NULL) {
396 			status = DDI_FAILURE;
397 			break;
398 		}
399 		if (nxgep->dip != dip) {
400 			status = DDI_FAILURE;
401 			break;
402 		}
403 		if (nxgep->suspended == DDI_PM_SUSPEND) {
404 			status = ddi_dev_is_needed(nxgep->dip, 0, 1);
405 		} else {
406 			nxge_status = nxge_resume(nxgep);
407 		}
408 		goto nxge_attach_exit;
409 
410 	case DDI_PM_RESUME:
411 		NXGE_DEBUG_MSG((nxgep, DDI_CTL, "doing DDI_PM_RESUME"));
412 		nxgep = (p_nxge_t)ddi_get_soft_state(nxge_list, instance);
413 		if (nxgep == NULL) {
414 			status = DDI_FAILURE;
415 			break;
416 		}
417 		if (nxgep->dip != dip) {
418 			status = DDI_FAILURE;
419 			break;
420 		}
421 		nxge_status = nxge_resume(nxgep);
422 		goto nxge_attach_exit;
423 
424 	default:
425 		NXGE_DEBUG_MSG((nxgep, DDI_CTL, "doing unknown"));
426 		status = DDI_FAILURE;
427 		goto nxge_attach_exit;
428 	}
429 
430 
431 	if (ddi_soft_state_zalloc(nxge_list, instance) == DDI_FAILURE) {
432 		status = DDI_FAILURE;
433 		goto nxge_attach_exit;
434 	}
435 
436 	nxgep = ddi_get_soft_state(nxge_list, instance);
437 	if (nxgep == NULL) {
438 		goto nxge_attach_fail;
439 	}
440 
441 	nxgep->drv_state = 0;
442 	nxgep->dip = dip;
443 	nxgep->instance = instance;
444 	nxgep->p_dip = ddi_get_parent(dip);
445 	nxgep->nxge_debug_level = nxge_debug_level;
446 	npi_debug_level = nxge_debug_level;
447 
448 	nxge_fm_init(nxgep, &nxge_dev_reg_acc_attr, &nxge_dev_desc_dma_acc_attr,
449 				&nxge_rx_dma_attr);
450 
451 	status = nxge_map_regs(nxgep);
452 	if (status != NXGE_OK) {
453 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "nxge_map_regs failed"));
454 		goto nxge_attach_fail;
455 	}
456 
457 	status = nxge_init_common_dev(nxgep);
458 	if (status != NXGE_OK) {
459 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
460 			"nxge_init_common_dev failed"));
461 		goto nxge_attach_fail;
462 	}
463 
464 	portn = NXGE_GET_PORT_NUM(nxgep->function_num);
465 	nxgep->mac.portnum = portn;
466 	if ((portn == 0) || (portn == 1))
467 		nxgep->mac.porttype = PORT_TYPE_XMAC;
468 	else
469 		nxgep->mac.porttype = PORT_TYPE_BMAC;
470 	/*
471 	 * Neptune has 4 ports, the first 2 ports use XMAC (10G MAC)
472 	 * internally, the rest 2 ports use BMAC (1G "Big" MAC).
473 	 * The two types of MACs have different characterizations.
474 	 */
475 	mmac_info = &nxgep->nxge_mmac_info;
476 	if (nxgep->function_num < 2) {
477 		mmac_info->num_mmac = XMAC_MAX_ALT_ADDR_ENTRY;
478 		mmac_info->naddrfree = XMAC_MAX_ALT_ADDR_ENTRY;
479 	} else {
480 		mmac_info->num_mmac = BMAC_MAX_ALT_ADDR_ENTRY;
481 		mmac_info->naddrfree = BMAC_MAX_ALT_ADDR_ENTRY;
482 	}
483 	/*
484 	 * Setup the Ndd parameters for the this instance.
485 	 */
486 	nxge_init_param(nxgep);
487 
488 	/*
489 	 * Setup Register Tracing Buffer.
490 	 */
491 	npi_rtrace_buf_init((rtrace_t *)&npi_rtracebuf);
492 
493 	/* init stats ptr */
494 	nxge_init_statsp(nxgep);
495 	status = nxge_get_xcvr_type(nxgep);
496 
497 	if (status != NXGE_OK) {
498 		NXGE_DEBUG_MSG((nxgep, DDI_CTL, "nxge_attach: "
499 				    " Couldn't determine card type"
500 				    " .... exit "));
501 		goto nxge_attach_fail;
502 	}
503 
504 	if ((nxgep->niu_type == NEPTUNE) &&
505 		(nxgep->mac.portmode == PORT_10G_FIBER)) {
506 		nxgep->niu_type = NEPTUNE_2;
507 	}
508 
509 	status = nxge_get_config_properties(nxgep);
510 
511 	if (status != NXGE_OK) {
512 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "get_hw create failed"));
513 		goto nxge_attach_fail;
514 	}
515 
516 	nxge_get_xcvr_properties(nxgep);
517 
518 	/*
519 	 * Setup the Kstats for the driver.
520 	 */
521 	nxge_setup_kstats(nxgep);
522 
523 	nxge_setup_param(nxgep);
524 
525 	status = nxge_setup_system_dma_pages(nxgep);
526 	if (status != NXGE_OK) {
527 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "set dma page failed"));
528 		goto nxge_attach_fail;
529 	}
530 
531 #if	defined(sun4v)
532 	if (nxgep->niu_type == N2_NIU) {
533 		nxgep->niu_hsvc_available = B_FALSE;
534 		bcopy(&niu_hsvc, &nxgep->niu_hsvc, sizeof (hsvc_info_t));
535 		if ((status =
536 			hsvc_register(&nxgep->niu_hsvc,
537 					&nxgep->niu_min_ver)) != 0) {
538 				NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
539 					"nxge_attach: "
540 					"%s: cannot negotiate "
541 					"hypervisor services "
542 					"revision %d "
543 					"group: 0x%lx "
544 					"major: 0x%lx minor: 0x%lx "
545 					"errno: %d",
546 					niu_hsvc.hsvc_modname,
547 					niu_hsvc.hsvc_rev,
548 					niu_hsvc.hsvc_group,
549 					niu_hsvc.hsvc_major,
550 					niu_hsvc.hsvc_minor,
551 					status));
552 				status = DDI_FAILURE;
553 				goto nxge_attach_fail;
554 		}
555 
556 		nxgep->niu_hsvc_available = B_TRUE;
557 		NXGE_DEBUG_MSG((nxgep, DDI_CTL,
558 			"NIU Hypervisor service enabled"));
559 	}
560 #endif
561 
562 	nxge_hw_id_init(nxgep);
563 	nxge_hw_init_niu_common(nxgep);
564 
565 	status = nxge_setup_mutexes(nxgep);
566 	if (status != NXGE_OK) {
567 		NXGE_DEBUG_MSG((nxgep, DDI_CTL, "set mutex failed"));
568 		goto nxge_attach_fail;
569 	}
570 
571 	status = nxge_setup_dev(nxgep);
572 	if (status != DDI_SUCCESS) {
573 		NXGE_DEBUG_MSG((nxgep, DDI_CTL, "set dev failed"));
574 		goto nxge_attach_fail;
575 	}
576 
577 	status = nxge_add_intrs(nxgep);
578 	if (status != DDI_SUCCESS) {
579 		NXGE_DEBUG_MSG((nxgep, DDI_CTL, "add_intr failed"));
580 		goto nxge_attach_fail;
581 	}
582 	status = nxge_add_soft_intrs(nxgep);
583 	if (status != DDI_SUCCESS) {
584 		NXGE_DEBUG_MSG((nxgep, NXGE_ERR_CTL, "add_soft_intr failed"));
585 		goto nxge_attach_fail;
586 	}
587 
588 	/*
589 	 * Enable interrupts.
590 	 */
591 	nxge_intrs_enable(nxgep);
592 
593 	if ((status = nxge_mac_register(nxgep)) != DDI_SUCCESS) {
594 		NXGE_DEBUG_MSG((nxgep, DDI_CTL,
595 			"unable to register to mac layer (%d)", status));
596 		goto nxge_attach_fail;
597 	}
598 
599 	mac_link_update(nxgep->mach, LINK_STATE_UNKNOWN);
600 
601 	NXGE_DEBUG_MSG((nxgep, DDI_CTL, "registered to mac (instance %d)",
602 		instance));
603 
604 	(void) nxge_link_monitor(nxgep, LINK_MONITOR_START);
605 
606 	goto nxge_attach_exit;
607 
608 nxge_attach_fail:
609 	nxge_unattach(nxgep);
610 	if (nxge_status != NXGE_OK)
611 		nxge_status = (NXGE_ERROR | NXGE_DDI_FAILED);
612 	nxgep = NULL;
613 
614 nxge_attach_exit:
615 	NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_attach status = 0x%08x",
616 		status));
617 
618 	return (status);
619 }
620 
621 static int
622 nxge_detach(dev_info_t *dip, ddi_detach_cmd_t cmd)
623 {
624 	int 		status = DDI_SUCCESS;
625 	int 		instance;
626 	p_nxge_t 	nxgep = NULL;
627 
628 	NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_detach"));
629 	instance = ddi_get_instance(dip);
630 	nxgep = ddi_get_soft_state(nxge_list, instance);
631 	if (nxgep == NULL) {
632 		status = DDI_FAILURE;
633 		goto nxge_detach_exit;
634 	}
635 
636 	switch (cmd) {
637 	case DDI_DETACH:
638 		NXGE_DEBUG_MSG((nxgep, DDI_CTL, "doing DDI_DETACH"));
639 		break;
640 
641 	case DDI_PM_SUSPEND:
642 		NXGE_DEBUG_MSG((nxgep, DDI_CTL, "doing DDI_PM_SUSPEND"));
643 		nxgep->suspended = DDI_PM_SUSPEND;
644 		nxge_suspend(nxgep);
645 		break;
646 
647 	case DDI_SUSPEND:
648 		NXGE_DEBUG_MSG((nxgep, DDI_CTL, "doing DDI_SUSPEND"));
649 		if (nxgep->suspended != DDI_PM_SUSPEND) {
650 			nxgep->suspended = DDI_SUSPEND;
651 			nxge_suspend(nxgep);
652 		}
653 		break;
654 
655 	default:
656 		status = DDI_FAILURE;
657 	}
658 
659 	if (cmd != DDI_DETACH)
660 		goto nxge_detach_exit;
661 
662 	/*
663 	 * Stop the xcvr polling.
664 	 */
665 	nxgep->suspended = cmd;
666 
667 	(void) nxge_link_monitor(nxgep, LINK_MONITOR_STOP);
668 
669 	if (nxgep->mach && (status = mac_unregister(nxgep->mach)) != 0) {
670 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
671 			"<== nxge_detach status = 0x%08X", status));
672 		return (DDI_FAILURE);
673 	}
674 
675 	NXGE_DEBUG_MSG((nxgep, DDI_CTL,
676 		"<== nxge_detach (mac_unregister) status = 0x%08X", status));
677 
678 	nxge_unattach(nxgep);
679 	nxgep = NULL;
680 
681 nxge_detach_exit:
682 	NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_detach status = 0x%08X",
683 		status));
684 
685 	return (status);
686 }
687 
688 static void
689 nxge_unattach(p_nxge_t nxgep)
690 {
691 	NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_unattach"));
692 
693 	if (nxgep == NULL || nxgep->dev_regs == NULL) {
694 		return;
695 	}
696 
697 	if (nxgep->nxge_hw_p) {
698 		nxge_uninit_common_dev(nxgep);
699 		nxgep->nxge_hw_p = NULL;
700 	}
701 
702 	if (nxgep->nxge_timerid) {
703 		nxge_stop_timer(nxgep, nxgep->nxge_timerid);
704 		nxgep->nxge_timerid = 0;
705 	}
706 
707 #if	defined(sun4v)
708 	if (nxgep->niu_type == N2_NIU && nxgep->niu_hsvc_available == B_TRUE) {
709 		(void) hsvc_unregister(&nxgep->niu_hsvc);
710 		nxgep->niu_hsvc_available = B_FALSE;
711 	}
712 #endif
713 	/*
714 	 * Stop any further interrupts.
715 	 */
716 	nxge_remove_intrs(nxgep);
717 
718 	/* remove soft interrups */
719 	nxge_remove_soft_intrs(nxgep);
720 
721 	/*
722 	 * Stop the device and free resources.
723 	 */
724 	nxge_destroy_dev(nxgep);
725 
726 	/*
727 	 * Tear down the ndd parameters setup.
728 	 */
729 	nxge_destroy_param(nxgep);
730 
731 	/*
732 	 * Tear down the kstat setup.
733 	 */
734 	nxge_destroy_kstats(nxgep);
735 
736 	/*
737 	 * Destroy all mutexes.
738 	 */
739 	nxge_destroy_mutexes(nxgep);
740 
741 	/*
742 	 * Remove the list of ndd parameters which
743 	 * were setup during attach.
744 	 */
745 	if (nxgep->dip) {
746 		NXGE_DEBUG_MSG((nxgep, OBP_CTL,
747 				    " nxge_unattach: remove all properties"));
748 
749 		(void) ddi_prop_remove_all(nxgep->dip);
750 	}
751 
752 #if NXGE_PROPERTY
753 	nxge_remove_hard_properties(nxgep);
754 #endif
755 
756 	/*
757 	 * Unmap the register setup.
758 	 */
759 	nxge_unmap_regs(nxgep);
760 
761 	nxge_fm_fini(nxgep);
762 
763 	ddi_soft_state_free(nxge_list, nxgep->instance);
764 
765 	NXGE_DEBUG_MSG((NULL, DDI_CTL, "<== nxge_unattach"));
766 }
767 
768 static char n2_siu_name[] = "niu";
769 
770 static nxge_status_t
771 nxge_map_regs(p_nxge_t nxgep)
772 {
773 	int		ddi_status = DDI_SUCCESS;
774 	p_dev_regs_t 	dev_regs;
775 	char		buf[MAXPATHLEN + 1];
776 	char 		*devname;
777 #ifdef	NXGE_DEBUG
778 	char 		*sysname;
779 #endif
780 	off_t		regsize;
781 	nxge_status_t	status = NXGE_OK;
782 #if !defined(_BIG_ENDIAN)
783 	off_t pci_offset;
784 	uint16_t pcie_devctl;
785 #endif
786 
787 	NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_map_regs"));
788 	nxgep->dev_regs = NULL;
789 	dev_regs = KMEM_ZALLOC(sizeof (dev_regs_t), KM_SLEEP);
790 	dev_regs->nxge_regh = NULL;
791 	dev_regs->nxge_pciregh = NULL;
792 	dev_regs->nxge_msix_regh = NULL;
793 	dev_regs->nxge_vir_regh = NULL;
794 	dev_regs->nxge_vir2_regh = NULL;
795 	nxgep->niu_type = NEPTUNE;
796 
797 	devname = ddi_pathname(nxgep->dip, buf);
798 	ASSERT(strlen(devname) > 0);
799 	NXGE_DEBUG_MSG((nxgep, DDI_CTL,
800 		"nxge_map_regs: pathname devname %s", devname));
801 
802 	if (strstr(devname, n2_siu_name)) {
803 		/* N2/NIU */
804 		nxgep->niu_type = N2_NIU;
805 		NXGE_DEBUG_MSG((nxgep, DDI_CTL,
806 			"nxge_map_regs: N2/NIU devname %s", devname));
807 		/* get function number */
808 		nxgep->function_num =
809 			(devname[strlen(devname) -1] == '1' ? 1 : 0);
810 		NXGE_DEBUG_MSG((nxgep, DDI_CTL,
811 			"nxge_map_regs: N2/NIU function number %d",
812 			nxgep->function_num));
813 	} else {
814 		int		*prop_val;
815 		uint_t 		prop_len;
816 		uint8_t 	func_num;
817 
818 		if (ddi_prop_lookup_int_array(DDI_DEV_T_ANY, nxgep->dip,
819 				0, "reg",
820 				&prop_val, &prop_len) != DDI_PROP_SUCCESS) {
821 			NXGE_DEBUG_MSG((nxgep, VPD_CTL,
822 				"Reg property not found"));
823 			ddi_status = DDI_FAILURE;
824 			goto nxge_map_regs_fail0;
825 
826 		} else {
827 			func_num = (prop_val[0] >> 8) & 0x7;
828 			NXGE_DEBUG_MSG((nxgep, DDI_CTL,
829 				"Reg property found: fun # %d",
830 				func_num));
831 			nxgep->function_num = func_num;
832 			ddi_prop_free(prop_val);
833 		}
834 	}
835 
836 	switch (nxgep->niu_type) {
837 	case NEPTUNE:
838 	case NEPTUNE_2:
839 	default:
840 		(void) ddi_dev_regsize(nxgep->dip, 0, &regsize);
841 		NXGE_DEBUG_MSG((nxgep, DDI_CTL,
842 			"nxge_map_regs: pci config size 0x%x", regsize));
843 
844 		ddi_status = ddi_regs_map_setup(nxgep->dip, 0,
845 			(caddr_t *)&(dev_regs->nxge_pciregp), 0, 0,
846 			&nxge_dev_reg_acc_attr, &dev_regs->nxge_pciregh);
847 		if (ddi_status != DDI_SUCCESS) {
848 			NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
849 				"ddi_map_regs, nxge bus config regs failed"));
850 			goto nxge_map_regs_fail0;
851 		}
852 		NXGE_DEBUG_MSG((nxgep, DDI_CTL,
853 			"nxge_map_reg: PCI config addr 0x%0llx "
854 			" handle 0x%0llx", dev_regs->nxge_pciregp,
855 			dev_regs->nxge_pciregh));
856 			/*
857 			 * IMP IMP
858 			 * workaround  for bit swapping bug in HW
859 			 * which ends up in no-snoop = yes
860 			 * resulting, in DMA not synched properly
861 			 */
862 #if !defined(_BIG_ENDIAN)
863 		/* workarounds for x86 systems */
864 		pci_offset = 0x80 + PCIE_DEVCTL;
865 		pcie_devctl = 0x0;
866 		pcie_devctl &= PCIE_DEVCTL_ENABLE_NO_SNOOP;
867 		pcie_devctl |= PCIE_DEVCTL_RO_EN;
868 		pci_config_put16(dev_regs->nxge_pciregh, pci_offset,
869 				    pcie_devctl);
870 #endif
871 
872 		(void) ddi_dev_regsize(nxgep->dip, 1, &regsize);
873 		NXGE_DEBUG_MSG((nxgep, DDI_CTL,
874 			"nxge_map_regs: pio size 0x%x", regsize));
875 		/* set up the device mapped register */
876 		ddi_status = ddi_regs_map_setup(nxgep->dip, 1,
877 			(caddr_t *)&(dev_regs->nxge_regp), 0, 0,
878 			&nxge_dev_reg_acc_attr, &dev_regs->nxge_regh);
879 		if (ddi_status != DDI_SUCCESS) {
880 			NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
881 				"ddi_map_regs for Neptune global reg failed"));
882 			goto nxge_map_regs_fail1;
883 		}
884 
885 		/* set up the msi/msi-x mapped register */
886 		(void) ddi_dev_regsize(nxgep->dip, 2, &regsize);
887 		NXGE_DEBUG_MSG((nxgep, DDI_CTL,
888 			"nxge_map_regs: msix size 0x%x", regsize));
889 		ddi_status = ddi_regs_map_setup(nxgep->dip, 2,
890 			(caddr_t *)&(dev_regs->nxge_msix_regp), 0, 0,
891 			&nxge_dev_reg_acc_attr, &dev_regs->nxge_msix_regh);
892 		if (ddi_status != DDI_SUCCESS) {
893 			NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
894 				"ddi_map_regs for msi reg failed"));
895 			goto nxge_map_regs_fail2;
896 		}
897 
898 		/* set up the vio region mapped register */
899 		(void) ddi_dev_regsize(nxgep->dip, 3, &regsize);
900 		NXGE_DEBUG_MSG((nxgep, DDI_CTL,
901 			"nxge_map_regs: vio size 0x%x", regsize));
902 		ddi_status = ddi_regs_map_setup(nxgep->dip, 3,
903 			(caddr_t *)&(dev_regs->nxge_vir_regp), 0, 0,
904 			&nxge_dev_reg_acc_attr, &dev_regs->nxge_vir_regh);
905 
906 		if (ddi_status != DDI_SUCCESS) {
907 			NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
908 				"ddi_map_regs for nxge vio reg failed"));
909 			goto nxge_map_regs_fail3;
910 		}
911 		nxgep->dev_regs = dev_regs;
912 
913 		NPI_PCI_ACC_HANDLE_SET(nxgep, dev_regs->nxge_pciregh);
914 		NPI_PCI_ADD_HANDLE_SET(nxgep,
915 			(npi_reg_ptr_t)dev_regs->nxge_pciregp);
916 		NPI_MSI_ACC_HANDLE_SET(nxgep, dev_regs->nxge_msix_regh);
917 		NPI_MSI_ADD_HANDLE_SET(nxgep,
918 			(npi_reg_ptr_t)dev_regs->nxge_msix_regp);
919 
920 		NPI_ACC_HANDLE_SET(nxgep, dev_regs->nxge_regh);
921 		NPI_ADD_HANDLE_SET(nxgep, (npi_reg_ptr_t)dev_regs->nxge_regp);
922 
923 		NPI_REG_ACC_HANDLE_SET(nxgep, dev_regs->nxge_regh);
924 		NPI_REG_ADD_HANDLE_SET(nxgep,
925 			(npi_reg_ptr_t)dev_regs->nxge_regp);
926 
927 		NPI_VREG_ACC_HANDLE_SET(nxgep, dev_regs->nxge_vir_regh);
928 		NPI_VREG_ADD_HANDLE_SET(nxgep,
929 			(npi_reg_ptr_t)dev_regs->nxge_vir_regp);
930 
931 		break;
932 
933 	case N2_NIU:
934 		NXGE_DEBUG_MSG((nxgep, DDI_CTL, "ddi_map_regs, NIU"));
935 		/*
936 		 * Set up the device mapped register (FWARC 2006/556)
937 		 * (changed back to 1: reg starts at 1!)
938 		 */
939 		(void) ddi_dev_regsize(nxgep->dip, 1, &regsize);
940 		NXGE_DEBUG_MSG((nxgep, DDI_CTL,
941 			"nxge_map_regs: dev size 0x%x", regsize));
942 		ddi_status = ddi_regs_map_setup(nxgep->dip, 1,
943 				(caddr_t *)&(dev_regs->nxge_regp), 0, 0,
944 				&nxge_dev_reg_acc_attr, &dev_regs->nxge_regh);
945 
946 		if (ddi_status != DDI_SUCCESS) {
947 			NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
948 				"ddi_map_regs for N2/NIU, global reg failed "));
949 			goto nxge_map_regs_fail1;
950 		}
951 
952 		/* set up the vio region mapped register */
953 		(void) ddi_dev_regsize(nxgep->dip, 2, &regsize);
954 		NXGE_DEBUG_MSG((nxgep, DDI_CTL,
955 			"nxge_map_regs: vio (1) size 0x%x", regsize));
956 		ddi_status = ddi_regs_map_setup(nxgep->dip, 2,
957 			(caddr_t *)&(dev_regs->nxge_vir_regp), 0, 0,
958 			&nxge_dev_reg_acc_attr, &dev_regs->nxge_vir_regh);
959 
960 		if (ddi_status != DDI_SUCCESS) {
961 			NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
962 				"ddi_map_regs for nxge vio reg failed"));
963 			goto nxge_map_regs_fail2;
964 		}
965 		/* set up the vio region mapped register */
966 		(void) ddi_dev_regsize(nxgep->dip, 3, &regsize);
967 		NXGE_DEBUG_MSG((nxgep, DDI_CTL,
968 			"nxge_map_regs: vio (3) size 0x%x", regsize));
969 		ddi_status = ddi_regs_map_setup(nxgep->dip, 3,
970 			(caddr_t *)&(dev_regs->nxge_vir2_regp), 0, 0,
971 			&nxge_dev_reg_acc_attr, &dev_regs->nxge_vir2_regh);
972 
973 		if (ddi_status != DDI_SUCCESS) {
974 			NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
975 				"ddi_map_regs for nxge vio2 reg failed"));
976 			goto nxge_map_regs_fail3;
977 		}
978 		nxgep->dev_regs = dev_regs;
979 
980 		NPI_ACC_HANDLE_SET(nxgep, dev_regs->nxge_regh);
981 		NPI_ADD_HANDLE_SET(nxgep, (npi_reg_ptr_t)dev_regs->nxge_regp);
982 
983 		NPI_REG_ACC_HANDLE_SET(nxgep, dev_regs->nxge_regh);
984 		NPI_REG_ADD_HANDLE_SET(nxgep,
985 			(npi_reg_ptr_t)dev_regs->nxge_regp);
986 
987 		NPI_VREG_ACC_HANDLE_SET(nxgep, dev_regs->nxge_vir_regh);
988 		NPI_VREG_ADD_HANDLE_SET(nxgep,
989 			(npi_reg_ptr_t)dev_regs->nxge_vir_regp);
990 
991 		NPI_V2REG_ACC_HANDLE_SET(nxgep, dev_regs->nxge_vir2_regh);
992 		NPI_V2REG_ADD_HANDLE_SET(nxgep,
993 			(npi_reg_ptr_t)dev_regs->nxge_vir2_regp);
994 
995 		break;
996 	}
997 
998 	NXGE_DEBUG_MSG((nxgep, DDI_CTL, "nxge_map_reg: hardware addr 0x%0llx "
999 		" handle 0x%0llx", dev_regs->nxge_regp, dev_regs->nxge_regh));
1000 
1001 	goto nxge_map_regs_exit;
1002 nxge_map_regs_fail3:
1003 	if (dev_regs->nxge_msix_regh) {
1004 		ddi_regs_map_free(&dev_regs->nxge_msix_regh);
1005 	}
1006 	if (dev_regs->nxge_vir_regh) {
1007 		ddi_regs_map_free(&dev_regs->nxge_regh);
1008 	}
1009 nxge_map_regs_fail2:
1010 	if (dev_regs->nxge_regh) {
1011 		ddi_regs_map_free(&dev_regs->nxge_regh);
1012 	}
1013 nxge_map_regs_fail1:
1014 	if (dev_regs->nxge_pciregh) {
1015 		ddi_regs_map_free(&dev_regs->nxge_pciregh);
1016 	}
1017 nxge_map_regs_fail0:
1018 	NXGE_DEBUG_MSG((nxgep, DDI_CTL, "Freeing register set memory"));
1019 	kmem_free(dev_regs, sizeof (dev_regs_t));
1020 
1021 nxge_map_regs_exit:
1022 	if (ddi_status != DDI_SUCCESS)
1023 		status |= (NXGE_ERROR | NXGE_DDI_FAILED);
1024 	NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_map_regs"));
1025 	return (status);
1026 }
1027 
1028 static void
1029 nxge_unmap_regs(p_nxge_t nxgep)
1030 {
1031 	NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_unmap_regs"));
1032 	if (nxgep->dev_regs) {
1033 		if (nxgep->dev_regs->nxge_pciregh) {
1034 			NXGE_DEBUG_MSG((nxgep, DDI_CTL,
1035 				"==> nxge_unmap_regs: bus"));
1036 			ddi_regs_map_free(&nxgep->dev_regs->nxge_pciregh);
1037 			nxgep->dev_regs->nxge_pciregh = NULL;
1038 		}
1039 		if (nxgep->dev_regs->nxge_regh) {
1040 			NXGE_DEBUG_MSG((nxgep, DDI_CTL,
1041 				"==> nxge_unmap_regs: device registers"));
1042 			ddi_regs_map_free(&nxgep->dev_regs->nxge_regh);
1043 			nxgep->dev_regs->nxge_regh = NULL;
1044 		}
1045 		if (nxgep->dev_regs->nxge_msix_regh) {
1046 			NXGE_DEBUG_MSG((nxgep, DDI_CTL,
1047 				"==> nxge_unmap_regs: device interrupts"));
1048 			ddi_regs_map_free(&nxgep->dev_regs->nxge_msix_regh);
1049 			nxgep->dev_regs->nxge_msix_regh = NULL;
1050 		}
1051 		if (nxgep->dev_regs->nxge_vir_regh) {
1052 			NXGE_DEBUG_MSG((nxgep, DDI_CTL,
1053 				"==> nxge_unmap_regs: vio region"));
1054 			ddi_regs_map_free(&nxgep->dev_regs->nxge_vir_regh);
1055 			nxgep->dev_regs->nxge_vir_regh = NULL;
1056 		}
1057 		if (nxgep->dev_regs->nxge_vir2_regh) {
1058 			NXGE_DEBUG_MSG((nxgep, DDI_CTL,
1059 				"==> nxge_unmap_regs: vio2 region"));
1060 			ddi_regs_map_free(&nxgep->dev_regs->nxge_vir2_regh);
1061 			nxgep->dev_regs->nxge_vir2_regh = NULL;
1062 		}
1063 
1064 		kmem_free(nxgep->dev_regs, sizeof (dev_regs_t));
1065 		nxgep->dev_regs = NULL;
1066 	}
1067 
1068 	NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_unmap_regs"));
1069 }
1070 
1071 static nxge_status_t
1072 nxge_setup_mutexes(p_nxge_t nxgep)
1073 {
1074 	int ddi_status = DDI_SUCCESS;
1075 	nxge_status_t status = NXGE_OK;
1076 	nxge_classify_t *classify_ptr;
1077 	int partition;
1078 
1079 	NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_setup_mutexes"));
1080 
1081 	/*
1082 	 * Get the interrupt cookie so the mutexes can be
1083 	 * Initialized.
1084 	 */
1085 	ddi_status = ddi_get_iblock_cookie(nxgep->dip, 0,
1086 					&nxgep->interrupt_cookie);
1087 	if (ddi_status != DDI_SUCCESS) {
1088 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
1089 			"<== nxge_setup_mutexes: failed 0x%x", ddi_status));
1090 		goto nxge_setup_mutexes_exit;
1091 	}
1092 
1093 	/* Initialize global mutex */
1094 
1095 	if (nxge_mdio_lock_init == 0) {
1096 		MUTEX_INIT(&nxge_mdio_lock, NULL, MUTEX_DRIVER, NULL);
1097 	}
1098 	atomic_add_32(&nxge_mdio_lock_init, 1);
1099 
1100 	if (nxge_mii_lock_init == 0) {
1101 		MUTEX_INIT(&nxge_mii_lock, NULL, MUTEX_DRIVER, NULL);
1102 	}
1103 	atomic_add_32(&nxge_mii_lock_init, 1);
1104 
1105 	nxgep->drv_state |= STATE_MDIO_LOCK_INIT;
1106 	nxgep->drv_state |= STATE_MII_LOCK_INIT;
1107 
1108 	/*
1109 	 * Initialize mutex's for this device.
1110 	 */
1111 	MUTEX_INIT(nxgep->genlock, NULL,
1112 		MUTEX_DRIVER, (void *)nxgep->interrupt_cookie);
1113 	MUTEX_INIT(&nxgep->ouraddr_lock, NULL,
1114 		MUTEX_DRIVER, (void *)nxgep->interrupt_cookie);
1115 	MUTEX_INIT(&nxgep->mif_lock, NULL,
1116 		MUTEX_DRIVER, (void *)nxgep->interrupt_cookie);
1117 	RW_INIT(&nxgep->filter_lock, NULL,
1118 		RW_DRIVER, (void *)nxgep->interrupt_cookie);
1119 
1120 	classify_ptr = &nxgep->classifier;
1121 		/*
1122 		 * FFLP Mutexes are never used in interrupt context
1123 		 * as fflp operation can take very long time to
1124 		 * complete and hence not suitable to invoke from interrupt
1125 		 * handlers.
1126 		 */
1127 	MUTEX_INIT(&classify_ptr->tcam_lock, NULL,
1128 			    NXGE_MUTEX_DRIVER, (void *)nxgep->interrupt_cookie);
1129 	if (nxgep->niu_type == NEPTUNE) {
1130 		MUTEX_INIT(&classify_ptr->fcram_lock, NULL,
1131 			    NXGE_MUTEX_DRIVER, (void *)nxgep->interrupt_cookie);
1132 		for (partition = 0; partition < MAX_PARTITION; partition++) {
1133 			MUTEX_INIT(&classify_ptr->hash_lock[partition], NULL,
1134 			    NXGE_MUTEX_DRIVER, (void *)nxgep->interrupt_cookie);
1135 		}
1136 	}
1137 
1138 nxge_setup_mutexes_exit:
1139 	NXGE_DEBUG_MSG((nxgep, DDI_CTL,
1140 			"<== nxge_setup_mutexes status = %x", status));
1141 
1142 	if (ddi_status != DDI_SUCCESS)
1143 		status |= (NXGE_ERROR | NXGE_DDI_FAILED);
1144 
1145 	return (status);
1146 }
1147 
1148 static void
1149 nxge_destroy_mutexes(p_nxge_t nxgep)
1150 {
1151 	int partition;
1152 	nxge_classify_t *classify_ptr;
1153 
1154 	NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_destroy_mutexes"));
1155 	RW_DESTROY(&nxgep->filter_lock);
1156 	MUTEX_DESTROY(&nxgep->mif_lock);
1157 	MUTEX_DESTROY(&nxgep->ouraddr_lock);
1158 	MUTEX_DESTROY(nxgep->genlock);
1159 
1160 	classify_ptr = &nxgep->classifier;
1161 	MUTEX_DESTROY(&classify_ptr->tcam_lock);
1162 
1163 		/* free data structures, based on HW type */
1164 	if (nxgep->niu_type == NEPTUNE) {
1165 		MUTEX_DESTROY(&classify_ptr->fcram_lock);
1166 		for (partition = 0; partition < MAX_PARTITION; partition++) {
1167 			MUTEX_DESTROY(&classify_ptr->hash_lock[partition]);
1168 		}
1169 	}
1170 	if (nxgep->drv_state & STATE_MDIO_LOCK_INIT) {
1171 		if (nxge_mdio_lock_init == 1) {
1172 			MUTEX_DESTROY(&nxge_mdio_lock);
1173 		}
1174 		atomic_add_32(&nxge_mdio_lock_init, -1);
1175 	}
1176 	if (nxgep->drv_state & STATE_MII_LOCK_INIT) {
1177 		if (nxge_mii_lock_init == 1) {
1178 			MUTEX_DESTROY(&nxge_mii_lock);
1179 		}
1180 		atomic_add_32(&nxge_mii_lock_init, -1);
1181 	}
1182 
1183 	NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_destroy_mutexes"));
1184 }
1185 
1186 nxge_status_t
1187 nxge_init(p_nxge_t nxgep)
1188 {
1189 	nxge_status_t	status = NXGE_OK;
1190 
1191 	NXGE_DEBUG_MSG((nxgep, STR_CTL, "==> nxge_init"));
1192 
1193 	if (nxgep->drv_state & STATE_HW_INITIALIZED) {
1194 		return (status);
1195 	}
1196 
1197 	/*
1198 	 * Allocate system memory for the receive/transmit buffer blocks
1199 	 * and receive/transmit descriptor rings.
1200 	 */
1201 	status = nxge_alloc_mem_pool(nxgep);
1202 	if (status != NXGE_OK) {
1203 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "alloc mem failed\n"));
1204 		goto nxge_init_fail1;
1205 	}
1206 
1207 	/*
1208 	 * Initialize and enable TXC registers
1209 	 * (Globally enable TX controller,
1210 	 *  enable a port, configure dma channel bitmap,
1211 	 *  configure the max burst size).
1212 	 */
1213 	status = nxge_txc_init(nxgep);
1214 	if (status != NXGE_OK) {
1215 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "init txc failed\n"));
1216 		goto nxge_init_fail2;
1217 	}
1218 
1219 	/*
1220 	 * Initialize and enable TXDMA channels.
1221 	 */
1222 	status = nxge_init_txdma_channels(nxgep);
1223 	if (status != NXGE_OK) {
1224 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "init txdma failed\n"));
1225 		goto nxge_init_fail3;
1226 	}
1227 
1228 	/*
1229 	 * Initialize and enable RXDMA channels.
1230 	 */
1231 	status = nxge_init_rxdma_channels(nxgep);
1232 	if (status != NXGE_OK) {
1233 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "init rxdma failed\n"));
1234 		goto nxge_init_fail4;
1235 	}
1236 
1237 	/*
1238 	 * Initialize TCAM and FCRAM (Neptune).
1239 	 */
1240 	status = nxge_classify_init(nxgep);
1241 	if (status != NXGE_OK) {
1242 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "init classify failed\n"));
1243 		goto nxge_init_fail5;
1244 	}
1245 
1246 	/*
1247 	 * Initialize ZCP
1248 	 */
1249 	status = nxge_zcp_init(nxgep);
1250 	if (status != NXGE_OK) {
1251 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "init ZCP failed\n"));
1252 		goto nxge_init_fail5;
1253 	}
1254 
1255 	/*
1256 	 * Initialize IPP.
1257 	 */
1258 	status = nxge_ipp_init(nxgep);
1259 	if (status != NXGE_OK) {
1260 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "init IPP failed\n"));
1261 		goto nxge_init_fail5;
1262 	}
1263 
1264 	/*
1265 	 * Initialize the MAC block.
1266 	 */
1267 	status = nxge_mac_init(nxgep);
1268 	if (status != NXGE_OK) {
1269 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "init MAC failed\n"));
1270 		goto nxge_init_fail5;
1271 	}
1272 
1273 	nxge_intrs_enable(nxgep);
1274 
1275 	/*
1276 	 * Enable hardware interrupts.
1277 	 */
1278 	nxge_intr_hw_enable(nxgep);
1279 	nxgep->drv_state |= STATE_HW_INITIALIZED;
1280 
1281 	goto nxge_init_exit;
1282 
1283 nxge_init_fail5:
1284 	nxge_uninit_rxdma_channels(nxgep);
1285 nxge_init_fail4:
1286 	nxge_uninit_txdma_channels(nxgep);
1287 nxge_init_fail3:
1288 	(void) nxge_txc_uninit(nxgep);
1289 nxge_init_fail2:
1290 	nxge_free_mem_pool(nxgep);
1291 nxge_init_fail1:
1292 	NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
1293 		"<== nxge_init status (failed) = 0x%08x", status));
1294 	return (status);
1295 
1296 nxge_init_exit:
1297 
1298 	NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_init status = 0x%08x",
1299 		status));
1300 	return (status);
1301 }
1302 
1303 
1304 timeout_id_t
1305 nxge_start_timer(p_nxge_t nxgep, fptrv_t func, int msec)
1306 {
1307 	if ((nxgep->suspended == 0) ||
1308 			(nxgep->suspended == DDI_RESUME)) {
1309 		return (timeout(func, (caddr_t)nxgep,
1310 			drv_usectohz(1000 * msec)));
1311 	}
1312 	return (NULL);
1313 }
1314 
1315 /*ARGSUSED*/
1316 void
1317 nxge_stop_timer(p_nxge_t nxgep, timeout_id_t timerid)
1318 {
1319 	if (timerid) {
1320 		(void) untimeout(timerid);
1321 	}
1322 }
1323 
1324 void
1325 nxge_uninit(p_nxge_t nxgep)
1326 {
1327 	NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_uninit"));
1328 
1329 	if (!(nxgep->drv_state & STATE_HW_INITIALIZED)) {
1330 		NXGE_DEBUG_MSG((nxgep, DDI_CTL,
1331 			"==> nxge_uninit: not initialized"));
1332 		NXGE_DEBUG_MSG((nxgep, DDI_CTL,
1333 			"<== nxge_uninit"));
1334 		return;
1335 	}
1336 
1337 	/* stop timer */
1338 	if (nxgep->nxge_timerid) {
1339 		nxge_stop_timer(nxgep, nxgep->nxge_timerid);
1340 		nxgep->nxge_timerid = 0;
1341 	}
1342 
1343 	(void) nxge_link_monitor(nxgep, LINK_MONITOR_STOP);
1344 	(void) nxge_intr_hw_disable(nxgep);
1345 
1346 	/*
1347 	 * Reset the receive MAC side.
1348 	 */
1349 	(void) nxge_rx_mac_disable(nxgep);
1350 
1351 	/* Disable and soft reset the IPP */
1352 	(void) nxge_ipp_disable(nxgep);
1353 
1354 	/* Free classification resources */
1355 	(void) nxge_classify_uninit(nxgep);
1356 
1357 	/*
1358 	 * Reset the transmit/receive DMA side.
1359 	 */
1360 	(void) nxge_txdma_hw_mode(nxgep, NXGE_DMA_STOP);
1361 	(void) nxge_rxdma_hw_mode(nxgep, NXGE_DMA_STOP);
1362 
1363 	nxge_uninit_txdma_channels(nxgep);
1364 	nxge_uninit_rxdma_channels(nxgep);
1365 
1366 	/*
1367 	 * Reset the transmit MAC side.
1368 	 */
1369 	(void) nxge_tx_mac_disable(nxgep);
1370 
1371 	nxge_free_mem_pool(nxgep);
1372 
1373 	(void) nxge_link_monitor(nxgep, LINK_MONITOR_START);
1374 
1375 	nxgep->drv_state &= ~STATE_HW_INITIALIZED;
1376 
1377 	NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_uninit: "
1378 		"nxge_mblks_pending %d", nxge_mblks_pending));
1379 }
1380 
1381 void
1382 nxge_get64(p_nxge_t nxgep, p_mblk_t mp)
1383 {
1384 	uint64_t	reg;
1385 	uint64_t	regdata;
1386 	int		i, retry;
1387 
1388 	bcopy((char *)mp->b_rptr, (char *)&reg, sizeof (uint64_t));
1389 	regdata = 0;
1390 	retry = 1;
1391 
1392 	for (i = 0; i < retry; i++) {
1393 		NXGE_REG_RD64(nxgep->npi_handle, reg, &regdata);
1394 	}
1395 	bcopy((char *)&regdata, (char *)mp->b_rptr, sizeof (uint64_t));
1396 }
1397 
1398 void
1399 nxge_put64(p_nxge_t nxgep, p_mblk_t mp)
1400 {
1401 	uint64_t	reg;
1402 	uint64_t	buf[2];
1403 
1404 	bcopy((char *)mp->b_rptr, (char *)&buf[0], 2 * sizeof (uint64_t));
1405 	reg = buf[0];
1406 
1407 	NXGE_NPI_PIO_WRITE64(nxgep->npi_handle, reg, buf[1]);
1408 }
1409 
1410 
1411 nxge_os_mutex_t nxgedebuglock;
1412 int nxge_debug_init = 0;
1413 
1414 /*ARGSUSED*/
1415 /*VARARGS*/
1416 void
1417 nxge_debug_msg(p_nxge_t nxgep, uint64_t level, char *fmt, ...)
1418 {
1419 	char msg_buffer[1048];
1420 	char prefix_buffer[32];
1421 	int instance;
1422 	uint64_t debug_level;
1423 	int cmn_level = CE_CONT;
1424 	va_list ap;
1425 
1426 	debug_level = (nxgep == NULL) ? nxge_debug_level :
1427 		nxgep->nxge_debug_level;
1428 
1429 	if ((level & debug_level) ||
1430 		(level == NXGE_NOTE) ||
1431 		(level == NXGE_ERR_CTL)) {
1432 		/* do the msg processing */
1433 		if (nxge_debug_init == 0) {
1434 			MUTEX_INIT(&nxgedebuglock, NULL, MUTEX_DRIVER, NULL);
1435 			nxge_debug_init = 1;
1436 		}
1437 
1438 		MUTEX_ENTER(&nxgedebuglock);
1439 
1440 		if ((level & NXGE_NOTE)) {
1441 			cmn_level = CE_NOTE;
1442 		}
1443 
1444 		if (level & NXGE_ERR_CTL) {
1445 			cmn_level = CE_WARN;
1446 		}
1447 
1448 		va_start(ap, fmt);
1449 		(void) vsprintf(msg_buffer, fmt, ap);
1450 		va_end(ap);
1451 		if (nxgep == NULL) {
1452 			instance = -1;
1453 			(void) sprintf(prefix_buffer, "%s :", "nxge");
1454 		} else {
1455 			instance = nxgep->instance;
1456 			(void) sprintf(prefix_buffer,
1457 						    "%s%d :", "nxge", instance);
1458 		}
1459 
1460 		MUTEX_EXIT(&nxgedebuglock);
1461 		cmn_err(cmn_level, "!%s %s\n",
1462 				prefix_buffer, msg_buffer);
1463 
1464 	}
1465 }
1466 
1467 char *
1468 nxge_dump_packet(char *addr, int size)
1469 {
1470 	uchar_t *ap = (uchar_t *)addr;
1471 	int i;
1472 	static char etherbuf[1024];
1473 	char *cp = etherbuf;
1474 	char digits[] = "0123456789abcdef";
1475 
1476 	if (!size)
1477 		size = 60;
1478 
1479 	if (size > MAX_DUMP_SZ) {
1480 		/* Dump the leading bytes */
1481 		for (i = 0; i < MAX_DUMP_SZ/2; i++) {
1482 			if (*ap > 0x0f)
1483 				*cp++ = digits[*ap >> 4];
1484 			*cp++ = digits[*ap++ & 0xf];
1485 			*cp++ = ':';
1486 		}
1487 		for (i = 0; i < 20; i++)
1488 			*cp++ = '.';
1489 		/* Dump the last MAX_DUMP_SZ/2 bytes */
1490 		ap = (uchar_t *)(addr + (size - MAX_DUMP_SZ/2));
1491 		for (i = 0; i < MAX_DUMP_SZ/2; i++) {
1492 			if (*ap > 0x0f)
1493 				*cp++ = digits[*ap >> 4];
1494 			*cp++ = digits[*ap++ & 0xf];
1495 			*cp++ = ':';
1496 		}
1497 	} else {
1498 		for (i = 0; i < size; i++) {
1499 			if (*ap > 0x0f)
1500 				*cp++ = digits[*ap >> 4];
1501 			*cp++ = digits[*ap++ & 0xf];
1502 			*cp++ = ':';
1503 		}
1504 	}
1505 	*--cp = 0;
1506 	return (etherbuf);
1507 }
1508 
1509 #ifdef	NXGE_DEBUG
1510 static void
1511 nxge_test_map_regs(p_nxge_t nxgep)
1512 {
1513 	ddi_acc_handle_t cfg_handle;
1514 	p_pci_cfg_t	cfg_ptr;
1515 	ddi_acc_handle_t dev_handle;
1516 	char		*dev_ptr;
1517 	ddi_acc_handle_t pci_config_handle;
1518 	uint32_t	regval;
1519 	int		i;
1520 
1521 	NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_test_map_regs"));
1522 
1523 	dev_handle = nxgep->dev_regs->nxge_regh;
1524 	dev_ptr = (char *)nxgep->dev_regs->nxge_regp;
1525 
1526 	if (nxgep->niu_type == NEPTUNE) {
1527 		cfg_handle = nxgep->dev_regs->nxge_pciregh;
1528 		cfg_ptr = (void *)nxgep->dev_regs->nxge_pciregp;
1529 
1530 		NXGE_DEBUG_MSG((nxgep, DDI_CTL,
1531 			"Neptune PCI regp cfg_ptr 0x%llx", (char *)cfg_ptr));
1532 		NXGE_DEBUG_MSG((nxgep, DDI_CTL,
1533 			"Neptune PCI cfg_ptr vendor id ptr 0x%llx",
1534 			&cfg_ptr->vendorid));
1535 		NXGE_DEBUG_MSG((nxgep, DDI_CTL,
1536 			"\tvendorid 0x%x devid 0x%x",
1537 			NXGE_PIO_READ16(cfg_handle, &cfg_ptr->vendorid, 0),
1538 			NXGE_PIO_READ16(cfg_handle, &cfg_ptr->devid,    0)));
1539 		NXGE_DEBUG_MSG((nxgep, DDI_CTL,
1540 			"PCI BAR: base 0x%x base14 0x%x base 18 0x%x "
1541 			"bar1c 0x%x",
1542 			NXGE_PIO_READ32(cfg_handle, &cfg_ptr->base,   0),
1543 			NXGE_PIO_READ32(cfg_handle, &cfg_ptr->base14, 0),
1544 			NXGE_PIO_READ32(cfg_handle, &cfg_ptr->base18, 0),
1545 			NXGE_PIO_READ32(cfg_handle, &cfg_ptr->base1c, 0)));
1546 		NXGE_DEBUG_MSG((nxgep, DDI_CTL,
1547 			"\nNeptune PCI BAR: base20 0x%x base24 0x%x "
1548 			"base 28 0x%x bar2c 0x%x\n",
1549 			NXGE_PIO_READ32(cfg_handle, &cfg_ptr->base20, 0),
1550 			NXGE_PIO_READ32(cfg_handle, &cfg_ptr->base24, 0),
1551 			NXGE_PIO_READ32(cfg_handle, &cfg_ptr->base28, 0),
1552 			NXGE_PIO_READ32(cfg_handle, &cfg_ptr->base2c, 0)));
1553 		NXGE_DEBUG_MSG((nxgep, DDI_CTL,
1554 			"\nNeptune PCI BAR: base30 0x%x\n",
1555 			NXGE_PIO_READ32(cfg_handle, &cfg_ptr->base30, 0)));
1556 
1557 		cfg_handle = nxgep->dev_regs->nxge_pciregh;
1558 		cfg_ptr = (void *)nxgep->dev_regs->nxge_pciregp;
1559 		NXGE_DEBUG_MSG((nxgep, DDI_CTL,
1560 			"first  0x%llx second 0x%llx third 0x%llx "
1561 			"last 0x%llx ",
1562 			NXGE_PIO_READ64(dev_handle,
1563 				    (uint64_t *)(dev_ptr + 0),  0),
1564 			NXGE_PIO_READ64(dev_handle,
1565 				    (uint64_t *)(dev_ptr + 8),  0),
1566 			NXGE_PIO_READ64(dev_handle,
1567 				    (uint64_t *)(dev_ptr + 16), 0),
1568 			NXGE_PIO_READ64(cfg_handle,
1569 				    (uint64_t *)(dev_ptr + 24), 0)));
1570 	}
1571 }
1572 
1573 #endif
1574 
1575 static void
1576 nxge_suspend(p_nxge_t nxgep)
1577 {
1578 	NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_suspend"));
1579 
1580 	nxge_intrs_disable(nxgep);
1581 	nxge_destroy_dev(nxgep);
1582 
1583 	NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_suspend"));
1584 }
1585 
1586 static nxge_status_t
1587 nxge_resume(p_nxge_t nxgep)
1588 {
1589 	nxge_status_t status = NXGE_OK;
1590 
1591 	NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_resume"));
1592 	nxgep->suspended = DDI_RESUME;
1593 
1594 	nxge_global_reset(nxgep);
1595 	nxgep->suspended = 0;
1596 
1597 	NXGE_DEBUG_MSG((nxgep, DDI_CTL,
1598 			"<== nxge_resume status = 0x%x", status));
1599 	return (status);
1600 }
1601 
1602 static nxge_status_t
1603 nxge_setup_dev(p_nxge_t nxgep)
1604 {
1605 	nxge_status_t	status = NXGE_OK;
1606 
1607 	NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_setup_dev port %d",
1608 			nxgep->mac.portnum));
1609 
1610 	status = nxge_xcvr_find(nxgep);
1611 	if (status != NXGE_OK) {
1612 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
1613 			    " nxge_setup_dev status "
1614 			    " (xcvr find 0x%08x)", status));
1615 		goto nxge_setup_dev_exit;
1616 	}
1617 
1618 	status = nxge_link_init(nxgep);
1619 
1620 	if (fm_check_acc_handle(nxgep->dev_regs->nxge_regh) != DDI_FM_OK) {
1621 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
1622 			"port%d Bad register acc handle", nxgep->mac.portnum));
1623 		status = NXGE_ERROR;
1624 	}
1625 
1626 	if (status != NXGE_OK) {
1627 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
1628 			    " nxge_setup_dev status "
1629 			    "(xcvr init 0x%08x)", status));
1630 		goto nxge_setup_dev_exit;
1631 	}
1632 
1633 nxge_setup_dev_exit:
1634 	NXGE_DEBUG_MSG((nxgep, DDI_CTL,
1635 		"<== nxge_setup_dev port %d status = 0x%08x",
1636 		nxgep->mac.portnum, status));
1637 
1638 	return (status);
1639 }
1640 
1641 static void
1642 nxge_destroy_dev(p_nxge_t nxgep)
1643 {
1644 	NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_destroy_dev"));
1645 
1646 	(void) nxge_link_monitor(nxgep, LINK_MONITOR_STOP);
1647 
1648 	(void) nxge_hw_stop(nxgep);
1649 
1650 	NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_destroy_dev"));
1651 }
1652 
1653 static nxge_status_t
1654 nxge_setup_system_dma_pages(p_nxge_t nxgep)
1655 {
1656 	int 			ddi_status = DDI_SUCCESS;
1657 	uint_t 			count;
1658 	ddi_dma_cookie_t 	cookie;
1659 	uint_t 			iommu_pagesize;
1660 	nxge_status_t		status = NXGE_OK;
1661 
1662 	NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_setup_system_dma_pages"));
1663 	nxgep->sys_page_sz = ddi_ptob(nxgep->dip, (ulong_t)1);
1664 	if (nxgep->niu_type != N2_NIU) {
1665 		iommu_pagesize = dvma_pagesize(nxgep->dip);
1666 		NXGE_DEBUG_MSG((nxgep, DDI_CTL,
1667 			" nxge_setup_system_dma_pages: page %d (ddi_ptob %d) "
1668 			" default_block_size %d iommu_pagesize %d",
1669 			nxgep->sys_page_sz,
1670 			ddi_ptob(nxgep->dip, (ulong_t)1),
1671 			nxgep->rx_default_block_size,
1672 			iommu_pagesize));
1673 
1674 		if (iommu_pagesize != 0) {
1675 			if (nxgep->sys_page_sz == iommu_pagesize) {
1676 				if (iommu_pagesize > 0x4000)
1677 					nxgep->sys_page_sz = 0x4000;
1678 			} else {
1679 				if (nxgep->sys_page_sz > iommu_pagesize)
1680 					nxgep->sys_page_sz = iommu_pagesize;
1681 			}
1682 		}
1683 	}
1684 	nxgep->sys_page_mask = ~(nxgep->sys_page_sz - 1);
1685 	NXGE_DEBUG_MSG((nxgep, DDI_CTL,
1686 		"==> nxge_setup_system_dma_pages: page %d (ddi_ptob %d) "
1687 		"default_block_size %d page mask %d",
1688 		nxgep->sys_page_sz,
1689 		ddi_ptob(nxgep->dip, (ulong_t)1),
1690 		nxgep->rx_default_block_size,
1691 		nxgep->sys_page_mask));
1692 
1693 
1694 	switch (nxgep->sys_page_sz) {
1695 	default:
1696 		nxgep->sys_page_sz = 0x1000;
1697 		nxgep->sys_page_mask = ~(nxgep->sys_page_sz - 1);
1698 		nxgep->rx_default_block_size = 0x1000;
1699 		nxgep->rx_bksize_code = RBR_BKSIZE_4K;
1700 		break;
1701 	case 0x1000:
1702 		nxgep->rx_default_block_size = 0x1000;
1703 		nxgep->rx_bksize_code = RBR_BKSIZE_4K;
1704 		break;
1705 	case 0x2000:
1706 		nxgep->rx_default_block_size = 0x2000;
1707 		nxgep->rx_bksize_code = RBR_BKSIZE_8K;
1708 		break;
1709 	case 0x4000:
1710 		nxgep->rx_default_block_size = 0x4000;
1711 		nxgep->rx_bksize_code = RBR_BKSIZE_16K;
1712 		break;
1713 	case 0x8000:
1714 		nxgep->rx_default_block_size = 0x8000;
1715 		nxgep->rx_bksize_code = RBR_BKSIZE_32K;
1716 		break;
1717 	}
1718 
1719 #ifndef USE_RX_BIG_BUF
1720 	nxge_rx_dma_attr.dma_attr_align = nxgep->sys_page_sz;
1721 #else
1722 		nxgep->rx_default_block_size = 0x2000;
1723 		nxgep->rx_bksize_code = RBR_BKSIZE_8K;
1724 #endif
1725 	/*
1726 	 * Get the system DMA burst size.
1727 	 */
1728 	ddi_status = ddi_dma_alloc_handle(nxgep->dip, &nxge_tx_dma_attr,
1729 			DDI_DMA_DONTWAIT, 0,
1730 			&nxgep->dmasparehandle);
1731 	if (ddi_status != DDI_SUCCESS) {
1732 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
1733 			"ddi_dma_alloc_handle: failed "
1734 			" status 0x%x", ddi_status));
1735 		goto nxge_get_soft_properties_exit;
1736 	}
1737 
1738 	ddi_status = ddi_dma_addr_bind_handle(nxgep->dmasparehandle, NULL,
1739 				(caddr_t)nxgep->dmasparehandle,
1740 				sizeof (nxgep->dmasparehandle),
1741 				DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
1742 				DDI_DMA_DONTWAIT, 0,
1743 				&cookie, &count);
1744 	if (ddi_status != DDI_DMA_MAPPED) {
1745 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
1746 			"Binding spare handle to find system"
1747 			" burstsize failed."));
1748 		ddi_status = DDI_FAILURE;
1749 		goto nxge_get_soft_properties_fail1;
1750 	}
1751 
1752 	nxgep->sys_burst_sz = ddi_dma_burstsizes(nxgep->dmasparehandle);
1753 	(void) ddi_dma_unbind_handle(nxgep->dmasparehandle);
1754 
1755 nxge_get_soft_properties_fail1:
1756 	ddi_dma_free_handle(&nxgep->dmasparehandle);
1757 
1758 nxge_get_soft_properties_exit:
1759 
1760 	if (ddi_status != DDI_SUCCESS)
1761 		status |= (NXGE_ERROR | NXGE_DDI_FAILED);
1762 
1763 	NXGE_DEBUG_MSG((nxgep, DDI_CTL,
1764 		"<== nxge_setup_system_dma_pages status = 0x%08x", status));
1765 	return (status);
1766 }
1767 
1768 static nxge_status_t
1769 nxge_alloc_mem_pool(p_nxge_t nxgep)
1770 {
1771 	nxge_status_t	status = NXGE_OK;
1772 
1773 	NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_alloc_mem_pool"));
1774 
1775 	status = nxge_alloc_rx_mem_pool(nxgep);
1776 	if (status != NXGE_OK) {
1777 		return (NXGE_ERROR);
1778 	}
1779 
1780 	status = nxge_alloc_tx_mem_pool(nxgep);
1781 	if (status != NXGE_OK) {
1782 		nxge_free_rx_mem_pool(nxgep);
1783 		return (NXGE_ERROR);
1784 	}
1785 
1786 	NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_alloc_mem_pool"));
1787 	return (NXGE_OK);
1788 }
1789 
1790 static void
1791 nxge_free_mem_pool(p_nxge_t nxgep)
1792 {
1793 	NXGE_DEBUG_MSG((nxgep, MEM_CTL, "==> nxge_free_mem_pool"));
1794 
1795 	nxge_free_rx_mem_pool(nxgep);
1796 	nxge_free_tx_mem_pool(nxgep);
1797 
1798 	NXGE_DEBUG_MSG((nxgep, MEM_CTL, "<== nxge_free_mem_pool"));
1799 }
1800 
1801 static nxge_status_t
1802 nxge_alloc_rx_mem_pool(p_nxge_t nxgep)
1803 {
1804 	int			i, j;
1805 	uint32_t		ndmas, st_rdc;
1806 	p_nxge_dma_pt_cfg_t	p_all_cfgp;
1807 	p_nxge_hw_pt_cfg_t	p_cfgp;
1808 	p_nxge_dma_pool_t	dma_poolp;
1809 	p_nxge_dma_common_t	*dma_buf_p;
1810 	p_nxge_dma_pool_t	dma_cntl_poolp;
1811 	p_nxge_dma_common_t	*dma_cntl_p;
1812 	size_t			rx_buf_alloc_size;
1813 	size_t			rx_cntl_alloc_size;
1814 	uint32_t 		*num_chunks; /* per dma */
1815 	nxge_status_t		status = NXGE_OK;
1816 
1817 	uint32_t		nxge_port_rbr_size;
1818 	uint32_t		nxge_port_rbr_spare_size;
1819 	uint32_t		nxge_port_rcr_size;
1820 
1821 	NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_alloc_rx_mem_pool"));
1822 
1823 	p_all_cfgp = (p_nxge_dma_pt_cfg_t)&nxgep->pt_config;
1824 	p_cfgp = (p_nxge_hw_pt_cfg_t)&p_all_cfgp->hw_config;
1825 	st_rdc = p_cfgp->start_rdc;
1826 	ndmas = p_cfgp->max_rdcs;
1827 
1828 	NXGE_DEBUG_MSG((nxgep, DMA_CTL,
1829 		" nxge_alloc_rx_mem_pool st_rdc %d ndmas %d", st_rdc, ndmas));
1830 
1831 	/*
1832 	 * Allocate memory for each receive DMA channel.
1833 	 */
1834 	dma_poolp = (p_nxge_dma_pool_t)KMEM_ZALLOC(sizeof (nxge_dma_pool_t),
1835 			KM_SLEEP);
1836 	dma_buf_p = (p_nxge_dma_common_t *)KMEM_ZALLOC(
1837 			sizeof (p_nxge_dma_common_t) * ndmas, KM_SLEEP);
1838 
1839 	dma_cntl_poolp = (p_nxge_dma_pool_t)
1840 				KMEM_ZALLOC(sizeof (nxge_dma_pool_t), KM_SLEEP);
1841 	dma_cntl_p = (p_nxge_dma_common_t *)KMEM_ZALLOC(
1842 			sizeof (p_nxge_dma_common_t) * ndmas, KM_SLEEP);
1843 
1844 	num_chunks = (uint32_t *)KMEM_ZALLOC(
1845 			sizeof (uint32_t) * ndmas, KM_SLEEP);
1846 
1847 	/*
1848 	 * Assume that each DMA channel will be configured with default
1849 	 * block size.
1850 	 * rbr block counts are mod of batch count (16).
1851 	 */
1852 	nxge_port_rbr_size = p_all_cfgp->rbr_size;
1853 	nxge_port_rcr_size = p_all_cfgp->rcr_size;
1854 
1855 	if (!nxge_port_rbr_size) {
1856 		nxge_port_rbr_size = NXGE_RBR_RBB_DEFAULT;
1857 	}
1858 	if (nxge_port_rbr_size % NXGE_RXDMA_POST_BATCH) {
1859 		nxge_port_rbr_size = (NXGE_RXDMA_POST_BATCH *
1860 			(nxge_port_rbr_size / NXGE_RXDMA_POST_BATCH + 1));
1861 	}
1862 
1863 	p_all_cfgp->rbr_size = nxge_port_rbr_size;
1864 	nxge_port_rbr_spare_size = nxge_rbr_spare_size;
1865 
1866 	if (nxge_port_rbr_spare_size % NXGE_RXDMA_POST_BATCH) {
1867 		nxge_port_rbr_spare_size = (NXGE_RXDMA_POST_BATCH *
1868 			(nxge_port_rbr_spare_size / NXGE_RXDMA_POST_BATCH + 1));
1869 	}
1870 
1871 	/*
1872 	 * N2/NIU has limitation on the descriptor sizes (contiguous
1873 	 * memory allocation on data buffers to 4M (contig_mem_alloc)
1874 	 * and little endian for control buffers (must use the ddi/dki mem alloc
1875 	 * function).
1876 	 */
1877 #if	defined(sun4v) && defined(NIU_LP_WORKAROUND)
1878 	if (nxgep->niu_type == N2_NIU) {
1879 		nxge_port_rbr_spare_size = 0;
1880 		if ((nxge_port_rbr_size > NXGE_NIU_CONTIG_RBR_MAX) ||
1881 				(!ISP2(nxge_port_rbr_size))) {
1882 			nxge_port_rbr_size = NXGE_NIU_CONTIG_RBR_MAX;
1883 		}
1884 		if ((nxge_port_rcr_size > NXGE_NIU_CONTIG_RCR_MAX) ||
1885 				(!ISP2(nxge_port_rcr_size))) {
1886 			nxge_port_rcr_size = NXGE_NIU_CONTIG_RCR_MAX;
1887 		}
1888 	}
1889 #endif
1890 
1891 	rx_buf_alloc_size = (nxgep->rx_default_block_size *
1892 		(nxge_port_rbr_size + nxge_port_rbr_spare_size));
1893 
1894 	/*
1895 	 * Addresses of receive block ring, receive completion ring and the
1896 	 * mailbox must be all cache-aligned (64 bytes).
1897 	 */
1898 	rx_cntl_alloc_size = nxge_port_rbr_size + nxge_port_rbr_spare_size;
1899 	rx_cntl_alloc_size *= (sizeof (rx_desc_t));
1900 	rx_cntl_alloc_size += (sizeof (rcr_entry_t) * nxge_port_rcr_size);
1901 	rx_cntl_alloc_size += sizeof (rxdma_mailbox_t);
1902 
1903 	NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_alloc_rx_mem_pool: "
1904 		"nxge_port_rbr_size = %d nxge_port_rbr_spare_size = %d "
1905 		"nxge_port_rcr_size = %d "
1906 		"rx_cntl_alloc_size = %d",
1907 		nxge_port_rbr_size, nxge_port_rbr_spare_size,
1908 		nxge_port_rcr_size,
1909 		rx_cntl_alloc_size));
1910 
1911 #if	defined(sun4v) && defined(NIU_LP_WORKAROUND)
1912 	if (nxgep->niu_type == N2_NIU) {
1913 		if (!ISP2(rx_buf_alloc_size)) {
1914 			NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
1915 				"==> nxge_alloc_rx_mem_pool: "
1916 				" must be power of 2"));
1917 			status |= (NXGE_ERROR | NXGE_DDI_FAILED);
1918 			goto nxge_alloc_rx_mem_pool_exit;
1919 		}
1920 
1921 		if (rx_buf_alloc_size > (1 << 22)) {
1922 			NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
1923 				"==> nxge_alloc_rx_mem_pool: "
1924 				" limit size to 4M"));
1925 			status |= (NXGE_ERROR | NXGE_DDI_FAILED);
1926 			goto nxge_alloc_rx_mem_pool_exit;
1927 		}
1928 
1929 		if (rx_cntl_alloc_size < 0x2000) {
1930 			rx_cntl_alloc_size = 0x2000;
1931 		}
1932 	}
1933 #endif
1934 	nxgep->nxge_port_rbr_size = nxge_port_rbr_size;
1935 	nxgep->nxge_port_rcr_size = nxge_port_rcr_size;
1936 
1937 	/*
1938 	 * Allocate memory for receive buffers and descriptor rings.
1939 	 * Replace allocation functions with interface functions provided
1940 	 * by the partition manager when it is available.
1941 	 */
1942 	/*
1943 	 * Allocate memory for the receive buffer blocks.
1944 	 */
1945 	for (i = 0; i < ndmas; i++) {
1946 		NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
1947 			" nxge_alloc_rx_mem_pool to alloc mem: "
1948 			" dma %d dma_buf_p %llx &dma_buf_p %llx",
1949 			i, dma_buf_p[i], &dma_buf_p[i]));
1950 		num_chunks[i] = 0;
1951 		status = nxge_alloc_rx_buf_dma(nxgep, st_rdc, &dma_buf_p[i],
1952 				rx_buf_alloc_size,
1953 				nxgep->rx_default_block_size, &num_chunks[i]);
1954 		if (status != NXGE_OK) {
1955 			break;
1956 		}
1957 		st_rdc++;
1958 		NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
1959 			" nxge_alloc_rx_mem_pool DONE  alloc mem: "
1960 			"dma %d dma_buf_p %llx &dma_buf_p %llx", i,
1961 			dma_buf_p[i], &dma_buf_p[i]));
1962 	}
1963 	if (i < ndmas) {
1964 		goto nxge_alloc_rx_mem_fail1;
1965 	}
1966 	/*
1967 	 * Allocate memory for descriptor rings and mailbox.
1968 	 */
1969 	st_rdc = p_cfgp->start_rdc;
1970 	for (j = 0; j < ndmas; j++) {
1971 		status = nxge_alloc_rx_cntl_dma(nxgep, st_rdc, &dma_cntl_p[j],
1972 					rx_cntl_alloc_size);
1973 		if (status != NXGE_OK) {
1974 			break;
1975 		}
1976 		st_rdc++;
1977 	}
1978 	if (j < ndmas) {
1979 		goto nxge_alloc_rx_mem_fail2;
1980 	}
1981 
1982 	dma_poolp->ndmas = ndmas;
1983 	dma_poolp->num_chunks = num_chunks;
1984 	dma_poolp->buf_allocated = B_TRUE;
1985 	nxgep->rx_buf_pool_p = dma_poolp;
1986 	dma_poolp->dma_buf_pool_p = dma_buf_p;
1987 
1988 	dma_cntl_poolp->ndmas = ndmas;
1989 	dma_cntl_poolp->buf_allocated = B_TRUE;
1990 	nxgep->rx_cntl_pool_p = dma_cntl_poolp;
1991 	dma_cntl_poolp->dma_buf_pool_p = dma_cntl_p;
1992 
1993 	goto nxge_alloc_rx_mem_pool_exit;
1994 
1995 nxge_alloc_rx_mem_fail2:
1996 	/* Free control buffers */
1997 	j--;
1998 	NXGE_DEBUG_MSG((nxgep, DMA_CTL,
1999 		"==> nxge_alloc_rx_mem_pool: freeing control bufs (%d)", j));
2000 	for (; j >= 0; j--) {
2001 		nxge_free_rx_cntl_dma(nxgep,
2002 			(p_nxge_dma_common_t)dma_cntl_p[i]);
2003 		NXGE_DEBUG_MSG((nxgep, DMA_CTL,
2004 			"==> nxge_alloc_rx_mem_pool: control bufs freed (%d)",
2005 			j));
2006 	}
2007 	NXGE_DEBUG_MSG((nxgep, DMA_CTL,
2008 		"==> nxge_alloc_rx_mem_pool: control bufs freed (%d)", j));
2009 
2010 nxge_alloc_rx_mem_fail1:
2011 	/* Free data buffers */
2012 	i--;
2013 	NXGE_DEBUG_MSG((nxgep, DMA_CTL,
2014 		"==> nxge_alloc_rx_mem_pool: freeing data bufs (%d)", i));
2015 	for (; i >= 0; i--) {
2016 		nxge_free_rx_buf_dma(nxgep, (p_nxge_dma_common_t)dma_buf_p[i],
2017 			num_chunks[i]);
2018 	}
2019 	NXGE_DEBUG_MSG((nxgep, DMA_CTL,
2020 		"==> nxge_alloc_rx_mem_pool: data bufs freed (%d)", i));
2021 
2022 	KMEM_FREE(num_chunks, sizeof (uint32_t) * ndmas);
2023 	KMEM_FREE(dma_poolp, sizeof (nxge_dma_pool_t));
2024 	KMEM_FREE(dma_buf_p, ndmas * sizeof (p_nxge_dma_common_t));
2025 	KMEM_FREE(dma_cntl_poolp, sizeof (nxge_dma_pool_t));
2026 	KMEM_FREE(dma_cntl_p, ndmas * sizeof (p_nxge_dma_common_t));
2027 
2028 nxge_alloc_rx_mem_pool_exit:
2029 	NXGE_DEBUG_MSG((nxgep, DMA_CTL,
2030 		"<== nxge_alloc_rx_mem_pool:status 0x%08x", status));
2031 
2032 	return (status);
2033 }
2034 
2035 static void
2036 nxge_free_rx_mem_pool(p_nxge_t nxgep)
2037 {
2038 	uint32_t		i, ndmas;
2039 	p_nxge_dma_pool_t	dma_poolp;
2040 	p_nxge_dma_common_t	*dma_buf_p;
2041 	p_nxge_dma_pool_t	dma_cntl_poolp;
2042 	p_nxge_dma_common_t	*dma_cntl_p;
2043 	uint32_t 		*num_chunks;
2044 
2045 	NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_free_rx_mem_pool"));
2046 
2047 	dma_poolp = nxgep->rx_buf_pool_p;
2048 	if (dma_poolp == NULL || (!dma_poolp->buf_allocated)) {
2049 		NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
2050 			"<== nxge_free_rx_mem_pool "
2051 			"(null rx buf pool or buf not allocated"));
2052 		return;
2053 	}
2054 
2055 	dma_cntl_poolp = nxgep->rx_cntl_pool_p;
2056 	if (dma_cntl_poolp == NULL || (!dma_cntl_poolp->buf_allocated)) {
2057 		NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
2058 			"<== nxge_free_rx_mem_pool "
2059 			"(null rx cntl buf pool or cntl buf not allocated"));
2060 		return;
2061 	}
2062 
2063 	dma_buf_p = dma_poolp->dma_buf_pool_p;
2064 	num_chunks = dma_poolp->num_chunks;
2065 
2066 	dma_cntl_p = dma_cntl_poolp->dma_buf_pool_p;
2067 	ndmas = dma_cntl_poolp->ndmas;
2068 
2069 	for (i = 0; i < ndmas; i++) {
2070 		nxge_free_rx_buf_dma(nxgep, dma_buf_p[i], num_chunks[i]);
2071 	}
2072 
2073 	for (i = 0; i < ndmas; i++) {
2074 		nxge_free_rx_cntl_dma(nxgep, dma_cntl_p[i]);
2075 	}
2076 
2077 	for (i = 0; i < ndmas; i++) {
2078 		KMEM_FREE(dma_buf_p[i],
2079 			sizeof (nxge_dma_common_t) * NXGE_DMA_BLOCK);
2080 		KMEM_FREE(dma_cntl_p[i], sizeof (nxge_dma_common_t));
2081 	}
2082 
2083 	KMEM_FREE(num_chunks, sizeof (uint32_t) * ndmas);
2084 	KMEM_FREE(dma_cntl_p, ndmas * sizeof (p_nxge_dma_common_t));
2085 	KMEM_FREE(dma_cntl_poolp, sizeof (nxge_dma_pool_t));
2086 	KMEM_FREE(dma_buf_p, ndmas * sizeof (p_nxge_dma_common_t));
2087 	KMEM_FREE(dma_poolp, sizeof (nxge_dma_pool_t));
2088 
2089 	nxgep->rx_buf_pool_p = NULL;
2090 	nxgep->rx_cntl_pool_p = NULL;
2091 
2092 	NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "<== nxge_free_rx_mem_pool"));
2093 }
2094 
2095 
2096 static nxge_status_t
2097 nxge_alloc_rx_buf_dma(p_nxge_t nxgep, uint16_t dma_channel,
2098 	p_nxge_dma_common_t *dmap,
2099 	size_t alloc_size, size_t block_size, uint32_t *num_chunks)
2100 {
2101 	p_nxge_dma_common_t 	rx_dmap;
2102 	nxge_status_t		status = NXGE_OK;
2103 	size_t			total_alloc_size;
2104 	size_t			allocated = 0;
2105 	int			i, size_index, array_size;
2106 
2107 	NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_alloc_rx_buf_dma"));
2108 
2109 	rx_dmap = (p_nxge_dma_common_t)
2110 			KMEM_ZALLOC(sizeof (nxge_dma_common_t) * NXGE_DMA_BLOCK,
2111 			KM_SLEEP);
2112 
2113 	NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
2114 		" alloc_rx_buf_dma rdc %d asize %x bsize %x bbuf %llx ",
2115 		dma_channel, alloc_size, block_size, dmap));
2116 
2117 	total_alloc_size = alloc_size;
2118 
2119 #if defined(RX_USE_RECLAIM_POST)
2120 	total_alloc_size = alloc_size + alloc_size/4;
2121 #endif
2122 
2123 	i = 0;
2124 	size_index = 0;
2125 	array_size =  sizeof (alloc_sizes)/sizeof (size_t);
2126 	while ((alloc_sizes[size_index] < alloc_size) &&
2127 			(size_index < array_size))
2128 			size_index++;
2129 	if (size_index >= array_size) {
2130 		size_index = array_size - 1;
2131 	}
2132 
2133 	while ((allocated < total_alloc_size) &&
2134 			(size_index >= 0) && (i < NXGE_DMA_BLOCK)) {
2135 		rx_dmap[i].dma_chunk_index = i;
2136 		rx_dmap[i].block_size = block_size;
2137 		rx_dmap[i].alength = alloc_sizes[size_index];
2138 		rx_dmap[i].orig_alength = rx_dmap[i].alength;
2139 		rx_dmap[i].nblocks = alloc_sizes[size_index] / block_size;
2140 		rx_dmap[i].dma_channel = dma_channel;
2141 		rx_dmap[i].contig_alloc_type = B_FALSE;
2142 
2143 		/*
2144 		 * N2/NIU: data buffers must be contiguous as the driver
2145 		 *	   needs to call Hypervisor api to set up
2146 		 *	   logical pages.
2147 		 */
2148 		if ((nxgep->niu_type == N2_NIU) && (NXGE_DMA_BLOCK == 1)) {
2149 			rx_dmap[i].contig_alloc_type = B_TRUE;
2150 		}
2151 
2152 		NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
2153 			"alloc_rx_buf_dma rdc %d chunk %d bufp %llx size %x "
2154 			"i %d nblocks %d alength %d",
2155 			dma_channel, i, &rx_dmap[i], block_size,
2156 			i, rx_dmap[i].nblocks,
2157 			rx_dmap[i].alength));
2158 		status = nxge_dma_mem_alloc(nxgep, nxge_force_dma,
2159 			&nxge_rx_dma_attr,
2160 			rx_dmap[i].alength,
2161 			&nxge_dev_buf_dma_acc_attr,
2162 			DDI_DMA_READ | DDI_DMA_STREAMING,
2163 			(p_nxge_dma_common_t)(&rx_dmap[i]));
2164 		if (status != NXGE_OK) {
2165 			NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
2166 				" nxge_alloc_rx_buf_dma: Alloc Failed "));
2167 			size_index--;
2168 		} else {
2169 			NXGE_DEBUG_MSG((nxgep, DMA_CTL,
2170 				" alloc_rx_buf_dma allocated rdc %d "
2171 				"chunk %d size %x dvma %x bufp %llx ",
2172 				dma_channel, i, rx_dmap[i].alength,
2173 				rx_dmap[i].ioaddr_pp, &rx_dmap[i]));
2174 			i++;
2175 			allocated += alloc_sizes[size_index];
2176 		}
2177 	}
2178 
2179 
2180 	if (allocated < total_alloc_size) {
2181 		goto nxge_alloc_rx_mem_fail1;
2182 	}
2183 
2184 	NXGE_DEBUG_MSG((nxgep, DMA_CTL,
2185 		" alloc_rx_buf_dma rdc %d allocated %d chunks",
2186 		dma_channel, i));
2187 	*num_chunks = i;
2188 	*dmap = rx_dmap;
2189 
2190 	goto nxge_alloc_rx_mem_exit;
2191 
2192 nxge_alloc_rx_mem_fail1:
2193 	KMEM_FREE(rx_dmap, sizeof (nxge_dma_common_t) * NXGE_DMA_BLOCK);
2194 
2195 nxge_alloc_rx_mem_exit:
2196 	NXGE_DEBUG_MSG((nxgep, DMA_CTL,
2197 		"<== nxge_alloc_rx_buf_dma status 0x%08x", status));
2198 
2199 	return (status);
2200 }
2201 
2202 /*ARGSUSED*/
2203 static void
2204 nxge_free_rx_buf_dma(p_nxge_t nxgep, p_nxge_dma_common_t dmap,
2205     uint32_t num_chunks)
2206 {
2207 	int		i;
2208 
2209 	NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
2210 		"==> nxge_free_rx_buf_dma: # of chunks %d", num_chunks));
2211 
2212 	for (i = 0; i < num_chunks; i++) {
2213 		NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
2214 			"==> nxge_free_rx_buf_dma: chunk %d dmap 0x%llx",
2215 				i, dmap));
2216 		nxge_dma_mem_free(dmap++);
2217 	}
2218 
2219 	NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_free_rx_buf_dma"));
2220 }
2221 
2222 /*ARGSUSED*/
2223 static nxge_status_t
2224 nxge_alloc_rx_cntl_dma(p_nxge_t nxgep, uint16_t dma_channel,
2225     p_nxge_dma_common_t *dmap, size_t size)
2226 {
2227 	p_nxge_dma_common_t 	rx_dmap;
2228 	nxge_status_t		status = NXGE_OK;
2229 
2230 	NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_alloc_rx_cntl_dma"));
2231 
2232 	rx_dmap = (p_nxge_dma_common_t)
2233 			KMEM_ZALLOC(sizeof (nxge_dma_common_t), KM_SLEEP);
2234 
2235 	rx_dmap->contig_alloc_type = B_FALSE;
2236 
2237 	status = nxge_dma_mem_alloc(nxgep, nxge_force_dma,
2238 			&nxge_desc_dma_attr,
2239 			size,
2240 			&nxge_dev_desc_dma_acc_attr,
2241 			DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
2242 			rx_dmap);
2243 	if (status != NXGE_OK) {
2244 		goto nxge_alloc_rx_cntl_dma_fail1;
2245 	}
2246 
2247 	*dmap = rx_dmap;
2248 	goto nxge_alloc_rx_cntl_dma_exit;
2249 
2250 nxge_alloc_rx_cntl_dma_fail1:
2251 	KMEM_FREE(rx_dmap, sizeof (nxge_dma_common_t));
2252 
2253 nxge_alloc_rx_cntl_dma_exit:
2254 	NXGE_DEBUG_MSG((nxgep, DMA_CTL,
2255 		"<== nxge_alloc_rx_cntl_dma status 0x%08x", status));
2256 
2257 	return (status);
2258 }
2259 
2260 /*ARGSUSED*/
2261 static void
2262 nxge_free_rx_cntl_dma(p_nxge_t nxgep, p_nxge_dma_common_t dmap)
2263 {
2264 	NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_free_rx_cntl_dma"));
2265 
2266 	nxge_dma_mem_free(dmap);
2267 
2268 	NXGE_DEBUG_MSG((nxgep, DMA_CTL, "<== nxge_free_rx_cntl_dma"));
2269 }
2270 
2271 static nxge_status_t
2272 nxge_alloc_tx_mem_pool(p_nxge_t nxgep)
2273 {
2274 	nxge_status_t		status = NXGE_OK;
2275 	int			i, j;
2276 	uint32_t		ndmas, st_tdc;
2277 	p_nxge_dma_pt_cfg_t	p_all_cfgp;
2278 	p_nxge_hw_pt_cfg_t	p_cfgp;
2279 	p_nxge_dma_pool_t	dma_poolp;
2280 	p_nxge_dma_common_t	*dma_buf_p;
2281 	p_nxge_dma_pool_t	dma_cntl_poolp;
2282 	p_nxge_dma_common_t	*dma_cntl_p;
2283 	size_t			tx_buf_alloc_size;
2284 	size_t			tx_cntl_alloc_size;
2285 	uint32_t		*num_chunks; /* per dma */
2286 	uint32_t		bcopy_thresh;
2287 
2288 	NXGE_DEBUG_MSG((nxgep, MEM_CTL, "==> nxge_alloc_tx_mem_pool"));
2289 
2290 	p_all_cfgp = (p_nxge_dma_pt_cfg_t)&nxgep->pt_config;
2291 	p_cfgp = (p_nxge_hw_pt_cfg_t)&p_all_cfgp->hw_config;
2292 	st_tdc = p_cfgp->start_tdc;
2293 	ndmas = p_cfgp->max_tdcs;
2294 
2295 	NXGE_DEBUG_MSG((nxgep, MEM_CTL, "==> nxge_alloc_tx_mem_pool: "
2296 		"p_cfgp 0x%016llx start_tdc %d ndmas %d nxgep->max_tdcs %d",
2297 		p_cfgp, p_cfgp->start_tdc, p_cfgp->max_tdcs, nxgep->max_tdcs));
2298 	/*
2299 	 * Allocate memory for each transmit DMA channel.
2300 	 */
2301 	dma_poolp = (p_nxge_dma_pool_t)KMEM_ZALLOC(sizeof (nxge_dma_pool_t),
2302 			KM_SLEEP);
2303 	dma_buf_p = (p_nxge_dma_common_t *)KMEM_ZALLOC(
2304 			sizeof (p_nxge_dma_common_t) * ndmas, KM_SLEEP);
2305 
2306 	dma_cntl_poolp = (p_nxge_dma_pool_t)
2307 			KMEM_ZALLOC(sizeof (nxge_dma_pool_t), KM_SLEEP);
2308 	dma_cntl_p = (p_nxge_dma_common_t *)KMEM_ZALLOC(
2309 			sizeof (p_nxge_dma_common_t) * ndmas, KM_SLEEP);
2310 
2311 #if	defined(sun4v) && defined(NIU_LP_WORKAROUND)
2312 	/*
2313 	 * N2/NIU has limitation on the descriptor sizes (contiguous
2314 	 * memory allocation on data buffers to 4M (contig_mem_alloc)
2315 	 * and little endian for control buffers (must use the ddi/dki mem alloc
2316 	 * function). The transmit ring is limited to 8K (includes the
2317 	 * mailbox).
2318 	 */
2319 	if (nxgep->niu_type == N2_NIU) {
2320 		if ((nxge_tx_ring_size > NXGE_NIU_CONTIG_TX_MAX) ||
2321 			(!ISP2(nxge_tx_ring_size))) {
2322 			nxge_tx_ring_size = NXGE_NIU_CONTIG_TX_MAX;
2323 		}
2324 	}
2325 #endif
2326 
2327 	nxgep->nxge_port_tx_ring_size = nxge_tx_ring_size;
2328 
2329 	/*
2330 	 * Assume that each DMA channel will be configured with default
2331 	 * transmit bufer size for copying transmit data.
2332 	 * (For packet payload over this limit, packets will not be
2333 	 *  copied.)
2334 	 */
2335 	if (nxgep->niu_type == N2_NIU) {
2336 		bcopy_thresh = TX_BCOPY_SIZE;
2337 	} else {
2338 		bcopy_thresh = nxge_bcopy_thresh;
2339 	}
2340 	tx_buf_alloc_size = (bcopy_thresh * nxge_tx_ring_size);
2341 
2342 	/*
2343 	 * Addresses of transmit descriptor ring and the
2344 	 * mailbox must be all cache-aligned (64 bytes).
2345 	 */
2346 	tx_cntl_alloc_size = nxge_tx_ring_size;
2347 	tx_cntl_alloc_size *= (sizeof (tx_desc_t));
2348 	tx_cntl_alloc_size += sizeof (txdma_mailbox_t);
2349 
2350 #if	defined(sun4v) && defined(NIU_LP_WORKAROUND)
2351 	if (nxgep->niu_type == N2_NIU) {
2352 		if (!ISP2(tx_buf_alloc_size)) {
2353 			NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
2354 				"==> nxge_alloc_tx_mem_pool: "
2355 				" must be power of 2"));
2356 			status |= (NXGE_ERROR | NXGE_DDI_FAILED);
2357 			goto nxge_alloc_tx_mem_pool_exit;
2358 		}
2359 
2360 		if (tx_buf_alloc_size > (1 << 22)) {
2361 			NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
2362 				"==> nxge_alloc_tx_mem_pool: "
2363 				" limit size to 4M"));
2364 			status |= (NXGE_ERROR | NXGE_DDI_FAILED);
2365 			goto nxge_alloc_tx_mem_pool_exit;
2366 		}
2367 
2368 		if (tx_cntl_alloc_size < 0x2000) {
2369 			tx_cntl_alloc_size = 0x2000;
2370 		}
2371 	}
2372 #endif
2373 
2374 	num_chunks = (uint32_t *)KMEM_ZALLOC(
2375 			sizeof (uint32_t) * ndmas, KM_SLEEP);
2376 
2377 	/*
2378 	 * Allocate memory for transmit buffers and descriptor rings.
2379 	 * Replace allocation functions with interface functions provided
2380 	 * by the partition manager when it is available.
2381 	 *
2382 	 * Allocate memory for the transmit buffer pool.
2383 	 */
2384 	for (i = 0; i < ndmas; i++) {
2385 		num_chunks[i] = 0;
2386 		status = nxge_alloc_tx_buf_dma(nxgep, st_tdc, &dma_buf_p[i],
2387 					tx_buf_alloc_size,
2388 					bcopy_thresh, &num_chunks[i]);
2389 		if (status != NXGE_OK) {
2390 			break;
2391 		}
2392 		st_tdc++;
2393 	}
2394 	if (i < ndmas) {
2395 		goto nxge_alloc_tx_mem_pool_fail1;
2396 	}
2397 
2398 	st_tdc = p_cfgp->start_tdc;
2399 	/*
2400 	 * Allocate memory for descriptor rings and mailbox.
2401 	 */
2402 	for (j = 0; j < ndmas; j++) {
2403 		status = nxge_alloc_tx_cntl_dma(nxgep, st_tdc, &dma_cntl_p[j],
2404 					tx_cntl_alloc_size);
2405 		if (status != NXGE_OK) {
2406 			break;
2407 		}
2408 		st_tdc++;
2409 	}
2410 	if (j < ndmas) {
2411 		goto nxge_alloc_tx_mem_pool_fail2;
2412 	}
2413 
2414 	dma_poolp->ndmas = ndmas;
2415 	dma_poolp->num_chunks = num_chunks;
2416 	dma_poolp->buf_allocated = B_TRUE;
2417 	dma_poolp->dma_buf_pool_p = dma_buf_p;
2418 	nxgep->tx_buf_pool_p = dma_poolp;
2419 
2420 	dma_cntl_poolp->ndmas = ndmas;
2421 	dma_cntl_poolp->buf_allocated = B_TRUE;
2422 	dma_cntl_poolp->dma_buf_pool_p = dma_cntl_p;
2423 	nxgep->tx_cntl_pool_p = dma_cntl_poolp;
2424 
2425 	NXGE_DEBUG_MSG((nxgep, MEM_CTL,
2426 		"==> nxge_alloc_tx_mem_pool: start_tdc %d "
2427 		"ndmas %d poolp->ndmas %d",
2428 		st_tdc, ndmas, dma_poolp->ndmas));
2429 
2430 	goto nxge_alloc_tx_mem_pool_exit;
2431 
2432 nxge_alloc_tx_mem_pool_fail2:
2433 	/* Free control buffers */
2434 	j--;
2435 	for (; j >= 0; j--) {
2436 		nxge_free_tx_cntl_dma(nxgep,
2437 			(p_nxge_dma_common_t)dma_cntl_p[i]);
2438 	}
2439 
2440 nxge_alloc_tx_mem_pool_fail1:
2441 	/* Free data buffers */
2442 	i--;
2443 	for (; i >= 0; i--) {
2444 		nxge_free_tx_buf_dma(nxgep, (p_nxge_dma_common_t)dma_buf_p[i],
2445 			num_chunks[i]);
2446 	}
2447 
2448 	KMEM_FREE(dma_poolp, sizeof (nxge_dma_pool_t));
2449 	KMEM_FREE(dma_buf_p, ndmas * sizeof (p_nxge_dma_common_t));
2450 	KMEM_FREE(dma_cntl_poolp, sizeof (nxge_dma_pool_t));
2451 	KMEM_FREE(dma_cntl_p, ndmas * sizeof (p_nxge_dma_common_t));
2452 	KMEM_FREE(num_chunks, sizeof (uint32_t) * ndmas);
2453 
2454 nxge_alloc_tx_mem_pool_exit:
2455 	NXGE_DEBUG_MSG((nxgep, MEM_CTL,
2456 		"<== nxge_alloc_tx_mem_pool:status 0x%08x", status));
2457 
2458 	return (status);
2459 }
2460 
2461 static nxge_status_t
2462 nxge_alloc_tx_buf_dma(p_nxge_t nxgep, uint16_t dma_channel,
2463     p_nxge_dma_common_t *dmap, size_t alloc_size,
2464     size_t block_size, uint32_t *num_chunks)
2465 {
2466 	p_nxge_dma_common_t 	tx_dmap;
2467 	nxge_status_t		status = NXGE_OK;
2468 	size_t			total_alloc_size;
2469 	size_t			allocated = 0;
2470 	int			i, size_index, array_size;
2471 
2472 	NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_alloc_tx_buf_dma"));
2473 
2474 	tx_dmap = (p_nxge_dma_common_t)
2475 		KMEM_ZALLOC(sizeof (nxge_dma_common_t) * NXGE_DMA_BLOCK,
2476 			KM_SLEEP);
2477 
2478 	total_alloc_size = alloc_size;
2479 	i = 0;
2480 	size_index = 0;
2481 	array_size =  sizeof (alloc_sizes) /  sizeof (size_t);
2482 	while ((alloc_sizes[size_index] < alloc_size) &&
2483 		(size_index < array_size))
2484 		size_index++;
2485 	if (size_index >= array_size) {
2486 		size_index = array_size - 1;
2487 	}
2488 
2489 	while ((allocated < total_alloc_size) &&
2490 			(size_index >= 0) && (i < NXGE_DMA_BLOCK)) {
2491 
2492 		tx_dmap[i].dma_chunk_index = i;
2493 		tx_dmap[i].block_size = block_size;
2494 		tx_dmap[i].alength = alloc_sizes[size_index];
2495 		tx_dmap[i].orig_alength = tx_dmap[i].alength;
2496 		tx_dmap[i].nblocks = alloc_sizes[size_index] / block_size;
2497 		tx_dmap[i].dma_channel = dma_channel;
2498 		tx_dmap[i].contig_alloc_type = B_FALSE;
2499 
2500 		/*
2501 		 * N2/NIU: data buffers must be contiguous as the driver
2502 		 *	   needs to call Hypervisor api to set up
2503 		 *	   logical pages.
2504 		 */
2505 		if ((nxgep->niu_type == N2_NIU) && (NXGE_DMA_BLOCK == 1)) {
2506 			tx_dmap[i].contig_alloc_type = B_TRUE;
2507 		}
2508 
2509 		status = nxge_dma_mem_alloc(nxgep, nxge_force_dma,
2510 			&nxge_tx_dma_attr,
2511 			tx_dmap[i].alength,
2512 			&nxge_dev_buf_dma_acc_attr,
2513 			DDI_DMA_WRITE | DDI_DMA_STREAMING,
2514 			(p_nxge_dma_common_t)(&tx_dmap[i]));
2515 		if (status != NXGE_OK) {
2516 			size_index--;
2517 		} else {
2518 			i++;
2519 			allocated += alloc_sizes[size_index];
2520 		}
2521 	}
2522 
2523 	if (allocated < total_alloc_size) {
2524 		goto nxge_alloc_tx_mem_fail1;
2525 	}
2526 
2527 	*num_chunks = i;
2528 	*dmap = tx_dmap;
2529 	NXGE_DEBUG_MSG((nxgep, DMA_CTL,
2530 		"==> nxge_alloc_tx_buf_dma dmap 0x%016llx num chunks %d",
2531 		*dmap, i));
2532 	goto nxge_alloc_tx_mem_exit;
2533 
2534 nxge_alloc_tx_mem_fail1:
2535 	KMEM_FREE(tx_dmap, sizeof (nxge_dma_common_t) * NXGE_DMA_BLOCK);
2536 
2537 nxge_alloc_tx_mem_exit:
2538 	NXGE_DEBUG_MSG((nxgep, DMA_CTL,
2539 		"<== nxge_alloc_tx_buf_dma status 0x%08x", status));
2540 
2541 	return (status);
2542 }
2543 
2544 /*ARGSUSED*/
2545 static void
2546 nxge_free_tx_buf_dma(p_nxge_t nxgep, p_nxge_dma_common_t dmap,
2547     uint32_t num_chunks)
2548 {
2549 	int		i;
2550 
2551 	NXGE_DEBUG_MSG((nxgep, MEM_CTL, "==> nxge_free_tx_buf_dma"));
2552 
2553 	for (i = 0; i < num_chunks; i++) {
2554 		nxge_dma_mem_free(dmap++);
2555 	}
2556 
2557 	NXGE_DEBUG_MSG((nxgep, MEM_CTL, "<== nxge_free_tx_buf_dma"));
2558 }
2559 
2560 /*ARGSUSED*/
2561 static nxge_status_t
2562 nxge_alloc_tx_cntl_dma(p_nxge_t nxgep, uint16_t dma_channel,
2563     p_nxge_dma_common_t *dmap, size_t size)
2564 {
2565 	p_nxge_dma_common_t 	tx_dmap;
2566 	nxge_status_t		status = NXGE_OK;
2567 
2568 	NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_alloc_tx_cntl_dma"));
2569 	tx_dmap = (p_nxge_dma_common_t)
2570 			KMEM_ZALLOC(sizeof (nxge_dma_common_t), KM_SLEEP);
2571 
2572 	tx_dmap->contig_alloc_type = B_FALSE;
2573 
2574 	status = nxge_dma_mem_alloc(nxgep, nxge_force_dma,
2575 			&nxge_desc_dma_attr,
2576 			size,
2577 			&nxge_dev_desc_dma_acc_attr,
2578 			DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
2579 			tx_dmap);
2580 	if (status != NXGE_OK) {
2581 		goto nxge_alloc_tx_cntl_dma_fail1;
2582 	}
2583 
2584 	*dmap = tx_dmap;
2585 	goto nxge_alloc_tx_cntl_dma_exit;
2586 
2587 nxge_alloc_tx_cntl_dma_fail1:
2588 	KMEM_FREE(tx_dmap, sizeof (nxge_dma_common_t));
2589 
2590 nxge_alloc_tx_cntl_dma_exit:
2591 	NXGE_DEBUG_MSG((nxgep, DMA_CTL,
2592 		"<== nxge_alloc_tx_cntl_dma status 0x%08x", status));
2593 
2594 	return (status);
2595 }
2596 
2597 /*ARGSUSED*/
2598 static void
2599 nxge_free_tx_cntl_dma(p_nxge_t nxgep, p_nxge_dma_common_t dmap)
2600 {
2601 	NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_free_tx_cntl_dma"));
2602 
2603 	nxge_dma_mem_free(dmap);
2604 
2605 	NXGE_DEBUG_MSG((nxgep, DMA_CTL, "<== nxge_free_tx_cntl_dma"));
2606 }
2607 
2608 static void
2609 nxge_free_tx_mem_pool(p_nxge_t nxgep)
2610 {
2611 	uint32_t		i, ndmas;
2612 	p_nxge_dma_pool_t	dma_poolp;
2613 	p_nxge_dma_common_t	*dma_buf_p;
2614 	p_nxge_dma_pool_t	dma_cntl_poolp;
2615 	p_nxge_dma_common_t	*dma_cntl_p;
2616 	uint32_t 		*num_chunks;
2617 
2618 	NXGE_DEBUG_MSG((nxgep, MEM3_CTL, "==> nxge_free_tx_mem_pool"));
2619 
2620 	dma_poolp = nxgep->tx_buf_pool_p;
2621 	if (dma_poolp == NULL || (!dma_poolp->buf_allocated)) {
2622 		NXGE_DEBUG_MSG((nxgep, MEM3_CTL,
2623 			"<== nxge_free_tx_mem_pool "
2624 			"(null rx buf pool or buf not allocated"));
2625 		return;
2626 	}
2627 
2628 	dma_cntl_poolp = nxgep->tx_cntl_pool_p;
2629 	if (dma_cntl_poolp == NULL || (!dma_cntl_poolp->buf_allocated)) {
2630 		NXGE_DEBUG_MSG((nxgep, MEM3_CTL,
2631 			"<== nxge_free_tx_mem_pool "
2632 			"(null tx cntl buf pool or cntl buf not allocated"));
2633 		return;
2634 	}
2635 
2636 	dma_buf_p = dma_poolp->dma_buf_pool_p;
2637 	num_chunks = dma_poolp->num_chunks;
2638 
2639 	dma_cntl_p = dma_cntl_poolp->dma_buf_pool_p;
2640 	ndmas = dma_cntl_poolp->ndmas;
2641 
2642 	for (i = 0; i < ndmas; i++) {
2643 		nxge_free_tx_buf_dma(nxgep, dma_buf_p[i], num_chunks[i]);
2644 	}
2645 
2646 	for (i = 0; i < ndmas; i++) {
2647 		nxge_free_tx_cntl_dma(nxgep, dma_cntl_p[i]);
2648 	}
2649 
2650 	for (i = 0; i < ndmas; i++) {
2651 		KMEM_FREE(dma_buf_p[i],
2652 			sizeof (nxge_dma_common_t) * NXGE_DMA_BLOCK);
2653 		KMEM_FREE(dma_cntl_p[i], sizeof (nxge_dma_common_t));
2654 	}
2655 
2656 	KMEM_FREE(num_chunks, sizeof (uint32_t) * ndmas);
2657 	KMEM_FREE(dma_cntl_p, ndmas * sizeof (p_nxge_dma_common_t));
2658 	KMEM_FREE(dma_cntl_poolp, sizeof (nxge_dma_pool_t));
2659 	KMEM_FREE(dma_buf_p, ndmas * sizeof (p_nxge_dma_common_t));
2660 	KMEM_FREE(dma_poolp, sizeof (nxge_dma_pool_t));
2661 
2662 	nxgep->tx_buf_pool_p = NULL;
2663 	nxgep->tx_cntl_pool_p = NULL;
2664 
2665 	NXGE_DEBUG_MSG((nxgep, MEM3_CTL, "<== nxge_free_tx_mem_pool"));
2666 }
2667 
2668 /*ARGSUSED*/
2669 static nxge_status_t
2670 nxge_dma_mem_alloc(p_nxge_t nxgep, dma_method_t method,
2671 	struct ddi_dma_attr *dma_attrp,
2672 	size_t length, ddi_device_acc_attr_t *acc_attr_p, uint_t xfer_flags,
2673 	p_nxge_dma_common_t dma_p)
2674 {
2675 	caddr_t 		kaddrp;
2676 	int			ddi_status = DDI_SUCCESS;
2677 	boolean_t		contig_alloc_type;
2678 
2679 	contig_alloc_type = dma_p->contig_alloc_type;
2680 
2681 	if (contig_alloc_type && (nxgep->niu_type != N2_NIU)) {
2682 		/*
2683 		 * contig_alloc_type for contiguous memory only allowed
2684 		 * for N2/NIU.
2685 		 */
2686 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
2687 			"nxge_dma_mem_alloc: alloc type not allows (%d)",
2688 			dma_p->contig_alloc_type));
2689 		return (NXGE_ERROR | NXGE_DDI_FAILED);
2690 	}
2691 
2692 	dma_p->dma_handle = NULL;
2693 	dma_p->acc_handle = NULL;
2694 	dma_p->kaddrp = dma_p->last_kaddrp = NULL;
2695 	dma_p->first_ioaddr_pp = dma_p->last_ioaddr_pp = NULL;
2696 	ddi_status = ddi_dma_alloc_handle(nxgep->dip, dma_attrp,
2697 		DDI_DMA_DONTWAIT, NULL, &dma_p->dma_handle);
2698 	if (ddi_status != DDI_SUCCESS) {
2699 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
2700 			"nxge_dma_mem_alloc:ddi_dma_alloc_handle failed."));
2701 		return (NXGE_ERROR | NXGE_DDI_FAILED);
2702 	}
2703 
2704 	switch (contig_alloc_type) {
2705 	case B_FALSE:
2706 		ddi_status = ddi_dma_mem_alloc(dma_p->dma_handle, length,
2707 			acc_attr_p,
2708 			xfer_flags,
2709 			DDI_DMA_DONTWAIT, 0, &kaddrp, &dma_p->alength,
2710 			&dma_p->acc_handle);
2711 		if (ddi_status != DDI_SUCCESS) {
2712 			NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
2713 				"nxge_dma_mem_alloc:ddi_dma_mem_alloc failed"));
2714 			ddi_dma_free_handle(&dma_p->dma_handle);
2715 			dma_p->dma_handle = NULL;
2716 			return (NXGE_ERROR | NXGE_DDI_FAILED);
2717 		}
2718 		if (dma_p->alength < length) {
2719 			NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
2720 				"nxge_dma_mem_alloc:ddi_dma_mem_alloc "
2721 				"< length."));
2722 			ddi_dma_mem_free(&dma_p->acc_handle);
2723 			ddi_dma_free_handle(&dma_p->dma_handle);
2724 			dma_p->acc_handle = NULL;
2725 			dma_p->dma_handle = NULL;
2726 			return (NXGE_ERROR);
2727 		}
2728 
2729 		ddi_status = ddi_dma_addr_bind_handle(dma_p->dma_handle, NULL,
2730 			kaddrp, dma_p->alength, xfer_flags, DDI_DMA_DONTWAIT, 0,
2731 			&dma_p->dma_cookie, &dma_p->ncookies);
2732 		if (ddi_status != DDI_DMA_MAPPED) {
2733 			NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
2734 				"nxge_dma_mem_alloc:di_dma_addr_bind failed "
2735 				"(staus 0x%x ncookies %d.)", ddi_status,
2736 				dma_p->ncookies));
2737 			if (dma_p->acc_handle) {
2738 				ddi_dma_mem_free(&dma_p->acc_handle);
2739 				dma_p->acc_handle = NULL;
2740 			}
2741 			ddi_dma_free_handle(&dma_p->dma_handle);
2742 			dma_p->dma_handle = NULL;
2743 			return (NXGE_ERROR | NXGE_DDI_FAILED);
2744 		}
2745 
2746 		if (dma_p->ncookies != 1) {
2747 			NXGE_DEBUG_MSG((nxgep, DMA_CTL,
2748 				"nxge_dma_mem_alloc:ddi_dma_addr_bind "
2749 				"> 1 cookie"
2750 				"(staus 0x%x ncookies %d.)", ddi_status,
2751 				dma_p->ncookies));
2752 			if (dma_p->acc_handle) {
2753 				ddi_dma_mem_free(&dma_p->acc_handle);
2754 				dma_p->acc_handle = NULL;
2755 			}
2756 			ddi_dma_free_handle(&dma_p->dma_handle);
2757 			dma_p->dma_handle = NULL;
2758 			return (NXGE_ERROR);
2759 		}
2760 		break;
2761 
2762 #if	defined(sun4v) && defined(NIU_LP_WORKAROUND)
2763 	case B_TRUE:
2764 		kaddrp = (caddr_t)contig_mem_alloc(length);
2765 		if (kaddrp == NULL) {
2766 			NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
2767 				"nxge_dma_mem_alloc:contig_mem_alloc failed."));
2768 			ddi_dma_free_handle(&dma_p->dma_handle);
2769 			return (NXGE_ERROR | NXGE_DDI_FAILED);
2770 		}
2771 
2772 		dma_p->alength = length;
2773 		ddi_status = ddi_dma_addr_bind_handle(dma_p->dma_handle, NULL,
2774 			kaddrp, dma_p->alength, xfer_flags, DDI_DMA_DONTWAIT, 0,
2775 			&dma_p->dma_cookie, &dma_p->ncookies);
2776 		if (ddi_status != DDI_DMA_MAPPED) {
2777 			NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
2778 				"nxge_dma_mem_alloc:di_dma_addr_bind failed "
2779 				"(status 0x%x ncookies %d.)", ddi_status,
2780 				dma_p->ncookies));
2781 
2782 			NXGE_DEBUG_MSG((nxgep, DMA_CTL,
2783 				"==> nxge_dma_mem_alloc: (not mapped)"
2784 				"length %lu (0x%x) "
2785 				"free contig kaddrp $%p "
2786 				"va_to_pa $%p",
2787 				length, length,
2788 				kaddrp,
2789 				va_to_pa(kaddrp)));
2790 
2791 
2792 			contig_mem_free((void *)kaddrp, length);
2793 			ddi_dma_free_handle(&dma_p->dma_handle);
2794 
2795 			dma_p->dma_handle = NULL;
2796 			dma_p->acc_handle = NULL;
2797 			dma_p->alength = NULL;
2798 			dma_p->kaddrp = NULL;
2799 
2800 			return (NXGE_ERROR | NXGE_DDI_FAILED);
2801 		}
2802 
2803 		if (dma_p->ncookies != 1 ||
2804 			(dma_p->dma_cookie.dmac_laddress == NULL)) {
2805 			NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
2806 				"nxge_dma_mem_alloc:di_dma_addr_bind > 1 "
2807 				"cookie or "
2808 				"dmac_laddress is NULL $%p size %d "
2809 				" (status 0x%x ncookies %d.)",
2810 				ddi_status,
2811 				dma_p->dma_cookie.dmac_laddress,
2812 				dma_p->dma_cookie.dmac_size,
2813 				dma_p->ncookies));
2814 
2815 			contig_mem_free((void *)kaddrp, length);
2816 			ddi_dma_free_handle(&dma_p->dma_handle);
2817 
2818 			dma_p->alength = 0;
2819 			dma_p->dma_handle = NULL;
2820 			dma_p->acc_handle = NULL;
2821 			dma_p->kaddrp = NULL;
2822 
2823 			return (NXGE_ERROR | NXGE_DDI_FAILED);
2824 		}
2825 		break;
2826 
2827 #else
2828 	case B_TRUE:
2829 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
2830 			"nxge_dma_mem_alloc: invalid alloc type for !sun4v"));
2831 		return (NXGE_ERROR | NXGE_DDI_FAILED);
2832 #endif
2833 	}
2834 
2835 	dma_p->kaddrp = kaddrp;
2836 	dma_p->last_kaddrp = (unsigned char *)kaddrp +
2837 			dma_p->alength - RXBUF_64B_ALIGNED;
2838 	dma_p->ioaddr_pp = (unsigned char *)dma_p->dma_cookie.dmac_laddress;
2839 	dma_p->last_ioaddr_pp =
2840 		(unsigned char *)dma_p->dma_cookie.dmac_laddress +
2841 				dma_p->alength - RXBUF_64B_ALIGNED;
2842 
2843 	NPI_DMA_ACC_HANDLE_SET(dma_p, dma_p->acc_handle);
2844 
2845 #if	defined(sun4v) && defined(NIU_LP_WORKAROUND)
2846 	dma_p->orig_ioaddr_pp =
2847 		(unsigned char *)dma_p->dma_cookie.dmac_laddress;
2848 	dma_p->orig_alength = length;
2849 	dma_p->orig_kaddrp = kaddrp;
2850 	dma_p->orig_vatopa = (uint64_t)va_to_pa(kaddrp);
2851 #endif
2852 
2853 	NXGE_DEBUG_MSG((nxgep, DMA_CTL, "<== nxge_dma_mem_alloc: "
2854 		"dma buffer allocated: dma_p $%p "
2855 		"return dmac_ladress from cookie $%p cookie dmac_size %d "
2856 		"dma_p->ioaddr_p $%p "
2857 		"dma_p->orig_ioaddr_p $%p "
2858 		"orig_vatopa $%p "
2859 		"alength %d (0x%x) "
2860 		"kaddrp $%p "
2861 		"length %d (0x%x)",
2862 		dma_p,
2863 		dma_p->dma_cookie.dmac_laddress, dma_p->dma_cookie.dmac_size,
2864 		dma_p->ioaddr_pp,
2865 		dma_p->orig_ioaddr_pp,
2866 		dma_p->orig_vatopa,
2867 		dma_p->alength, dma_p->alength,
2868 		kaddrp,
2869 		length, length));
2870 
2871 	return (NXGE_OK);
2872 }
2873 
2874 static void
2875 nxge_dma_mem_free(p_nxge_dma_common_t dma_p)
2876 {
2877 	if (dma_p->dma_handle != NULL) {
2878 		if (dma_p->ncookies) {
2879 			(void) ddi_dma_unbind_handle(dma_p->dma_handle);
2880 			dma_p->ncookies = 0;
2881 		}
2882 		ddi_dma_free_handle(&dma_p->dma_handle);
2883 		dma_p->dma_handle = NULL;
2884 	}
2885 
2886 	if (dma_p->acc_handle != NULL) {
2887 		ddi_dma_mem_free(&dma_p->acc_handle);
2888 		dma_p->acc_handle = NULL;
2889 		NPI_DMA_ACC_HANDLE_SET(dma_p, NULL);
2890 	}
2891 
2892 #if	defined(sun4v) && defined(NIU_LP_WORKAROUND)
2893 	if (dma_p->contig_alloc_type &&
2894 			dma_p->orig_kaddrp && dma_p->orig_alength) {
2895 		NXGE_DEBUG_MSG((NULL, DMA_CTL, "nxge_dma_mem_free: "
2896 			"kaddrp $%p (orig_kaddrp $%p)"
2897 			"mem type %d ",
2898 			"orig_alength %d "
2899 			"alength 0x%x (%d)",
2900 			dma_p->kaddrp,
2901 			dma_p->orig_kaddrp,
2902 			dma_p->contig_alloc_type,
2903 			dma_p->orig_alength,
2904 			dma_p->alength, dma_p->alength));
2905 
2906 		contig_mem_free(dma_p->orig_kaddrp, dma_p->orig_alength);
2907 		dma_p->orig_alength = NULL;
2908 		dma_p->orig_kaddrp = NULL;
2909 		dma_p->contig_alloc_type = B_FALSE;
2910 	}
2911 #endif
2912 	dma_p->kaddrp = NULL;
2913 	dma_p->alength = NULL;
2914 }
2915 
2916 /*
2917  *	nxge_m_start() -- start transmitting and receiving.
2918  *
2919  *	This function is called by the MAC layer when the first
2920  *	stream is open to prepare the hardware ready for sending
2921  *	and transmitting packets.
2922  */
2923 static int
2924 nxge_m_start(void *arg)
2925 {
2926 	p_nxge_t 	nxgep = (p_nxge_t)arg;
2927 
2928 	NXGE_DEBUG_MSG((nxgep, NXGE_CTL, "==> nxge_m_start"));
2929 
2930 	MUTEX_ENTER(nxgep->genlock);
2931 	if (nxge_init(nxgep) != NXGE_OK) {
2932 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
2933 			"<== nxge_m_start: initialization failed"));
2934 		MUTEX_EXIT(nxgep->genlock);
2935 		return (EIO);
2936 	}
2937 
2938 	if (nxgep->nxge_mac_state == NXGE_MAC_STARTED)
2939 		goto nxge_m_start_exit;
2940 	/*
2941 	 * Start timer to check the system error and tx hangs
2942 	 */
2943 	nxgep->nxge_timerid = nxge_start_timer(nxgep, nxge_check_hw_state,
2944 		NXGE_CHECK_TIMER);
2945 
2946 	nxgep->link_notify = B_TRUE;
2947 
2948 	nxgep->nxge_mac_state = NXGE_MAC_STARTED;
2949 
2950 nxge_m_start_exit:
2951 	MUTEX_EXIT(nxgep->genlock);
2952 	NXGE_DEBUG_MSG((nxgep, NXGE_CTL, "<== nxge_m_start"));
2953 	return (0);
2954 }
2955 
2956 /*
2957  *	nxge_m_stop(): stop transmitting and receiving.
2958  */
2959 static void
2960 nxge_m_stop(void *arg)
2961 {
2962 	p_nxge_t 	nxgep = (p_nxge_t)arg;
2963 
2964 	NXGE_DEBUG_MSG((nxgep, NXGE_CTL, "==> nxge_m_stop"));
2965 
2966 	if (nxgep->nxge_timerid) {
2967 		nxge_stop_timer(nxgep, nxgep->nxge_timerid);
2968 		nxgep->nxge_timerid = 0;
2969 	}
2970 
2971 	MUTEX_ENTER(nxgep->genlock);
2972 	nxge_uninit(nxgep);
2973 
2974 	nxgep->nxge_mac_state = NXGE_MAC_STOPPED;
2975 
2976 	MUTEX_EXIT(nxgep->genlock);
2977 
2978 	NXGE_DEBUG_MSG((nxgep, NXGE_CTL, "<== nxge_m_stop"));
2979 }
2980 
2981 static int
2982 nxge_m_unicst(void *arg, const uint8_t *macaddr)
2983 {
2984 	p_nxge_t 	nxgep = (p_nxge_t)arg;
2985 	struct 		ether_addr addrp;
2986 
2987 	NXGE_DEBUG_MSG((nxgep, MAC_CTL, "==> nxge_m_unicst"));
2988 
2989 	bcopy(macaddr, (uint8_t *)&addrp, ETHERADDRL);
2990 	if (nxge_set_mac_addr(nxgep, &addrp)) {
2991 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
2992 			"<== nxge_m_unicst: set unitcast failed"));
2993 		return (EINVAL);
2994 	}
2995 
2996 	NXGE_DEBUG_MSG((nxgep, MAC_CTL, "<== nxge_m_unicst"));
2997 
2998 	return (0);
2999 }
3000 
3001 static int
3002 nxge_m_multicst(void *arg, boolean_t add, const uint8_t *mca)
3003 {
3004 	p_nxge_t 	nxgep = (p_nxge_t)arg;
3005 	struct 		ether_addr addrp;
3006 
3007 	NXGE_DEBUG_MSG((nxgep, MAC_CTL,
3008 		"==> nxge_m_multicst: add %d", add));
3009 
3010 	bcopy(mca, (uint8_t *)&addrp, ETHERADDRL);
3011 	if (add) {
3012 		if (nxge_add_mcast_addr(nxgep, &addrp)) {
3013 			NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
3014 				"<== nxge_m_multicst: add multicast failed"));
3015 			return (EINVAL);
3016 		}
3017 	} else {
3018 		if (nxge_del_mcast_addr(nxgep, &addrp)) {
3019 			NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
3020 				"<== nxge_m_multicst: del multicast failed"));
3021 			return (EINVAL);
3022 		}
3023 	}
3024 
3025 	NXGE_DEBUG_MSG((nxgep, MAC_CTL, "<== nxge_m_multicst"));
3026 
3027 	return (0);
3028 }
3029 
3030 static int
3031 nxge_m_promisc(void *arg, boolean_t on)
3032 {
3033 	p_nxge_t 	nxgep = (p_nxge_t)arg;
3034 
3035 	NXGE_DEBUG_MSG((nxgep, MAC_CTL,
3036 		"==> nxge_m_promisc: on %d", on));
3037 
3038 	if (nxge_set_promisc(nxgep, on)) {
3039 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
3040 			"<== nxge_m_promisc: set promisc failed"));
3041 		return (EINVAL);
3042 	}
3043 
3044 	NXGE_DEBUG_MSG((nxgep, MAC_CTL,
3045 		"<== nxge_m_promisc: on %d", on));
3046 
3047 	return (0);
3048 }
3049 
3050 static void
3051 nxge_m_ioctl(void *arg,  queue_t *wq, mblk_t *mp)
3052 {
3053 	p_nxge_t 	nxgep = (p_nxge_t)arg;
3054 	struct 		iocblk *iocp = (struct iocblk *)mp->b_rptr;
3055 	boolean_t 	need_privilege;
3056 	int 		err;
3057 	int 		cmd;
3058 
3059 	NXGE_DEBUG_MSG((nxgep, NXGE_CTL, "==> nxge_m_ioctl"));
3060 
3061 	iocp = (struct iocblk *)mp->b_rptr;
3062 	iocp->ioc_error = 0;
3063 	need_privilege = B_TRUE;
3064 	cmd = iocp->ioc_cmd;
3065 	NXGE_DEBUG_MSG((nxgep, NXGE_CTL, "==> nxge_m_ioctl: cmd 0x%08x", cmd));
3066 	switch (cmd) {
3067 	default:
3068 		miocnak(wq, mp, 0, EINVAL);
3069 		NXGE_DEBUG_MSG((nxgep, NXGE_CTL, "<== nxge_m_ioctl: invalid"));
3070 		return;
3071 
3072 	case LB_GET_INFO_SIZE:
3073 	case LB_GET_INFO:
3074 	case LB_GET_MODE:
3075 		need_privilege = B_FALSE;
3076 		break;
3077 	case LB_SET_MODE:
3078 		break;
3079 
3080 	case ND_GET:
3081 		need_privilege = B_FALSE;
3082 		break;
3083 	case ND_SET:
3084 		break;
3085 
3086 	case NXGE_GET_MII:
3087 	case NXGE_PUT_MII:
3088 	case NXGE_GET64:
3089 	case NXGE_PUT64:
3090 	case NXGE_GET_TX_RING_SZ:
3091 	case NXGE_GET_TX_DESC:
3092 	case NXGE_TX_SIDE_RESET:
3093 	case NXGE_RX_SIDE_RESET:
3094 	case NXGE_GLOBAL_RESET:
3095 	case NXGE_RESET_MAC:
3096 	case NXGE_TX_REGS_DUMP:
3097 	case NXGE_RX_REGS_DUMP:
3098 	case NXGE_INT_REGS_DUMP:
3099 	case NXGE_VIR_INT_REGS_DUMP:
3100 	case NXGE_PUT_TCAM:
3101 	case NXGE_GET_TCAM:
3102 	case NXGE_RTRACE:
3103 	case NXGE_RDUMP:
3104 
3105 		need_privilege = B_FALSE;
3106 		break;
3107 	case NXGE_INJECT_ERR:
3108 		cmn_err(CE_NOTE, "!nxge_m_ioctl: Inject error\n");
3109 		nxge_err_inject(nxgep, wq, mp);
3110 		break;
3111 	}
3112 
3113 	if (need_privilege) {
3114 		if (secpolicy_net_config != NULL)
3115 			err = secpolicy_net_config(iocp->ioc_cr, B_FALSE);
3116 		else
3117 			err = drv_priv(iocp->ioc_cr);
3118 		if (err != 0) {
3119 			miocnak(wq, mp, 0, err);
3120 			NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
3121 				"<== nxge_m_ioctl: no priv"));
3122 			return;
3123 		}
3124 	}
3125 
3126 	switch (cmd) {
3127 	case ND_GET:
3128 		NXGE_DEBUG_MSG((nxgep, NXGE_CTL, "ND_GET command"));
3129 	case ND_SET:
3130 		NXGE_DEBUG_MSG((nxgep, NXGE_CTL, "ND_SET command"));
3131 		nxge_param_ioctl(nxgep, wq, mp, iocp);
3132 		break;
3133 
3134 	case LB_GET_MODE:
3135 	case LB_SET_MODE:
3136 	case LB_GET_INFO_SIZE:
3137 	case LB_GET_INFO:
3138 		nxge_loopback_ioctl(nxgep, wq, mp, iocp);
3139 		break;
3140 
3141 	case NXGE_GET_MII:
3142 	case NXGE_PUT_MII:
3143 	case NXGE_PUT_TCAM:
3144 	case NXGE_GET_TCAM:
3145 	case NXGE_GET64:
3146 	case NXGE_PUT64:
3147 	case NXGE_GET_TX_RING_SZ:
3148 	case NXGE_GET_TX_DESC:
3149 	case NXGE_TX_SIDE_RESET:
3150 	case NXGE_RX_SIDE_RESET:
3151 	case NXGE_GLOBAL_RESET:
3152 	case NXGE_RESET_MAC:
3153 	case NXGE_TX_REGS_DUMP:
3154 	case NXGE_RX_REGS_DUMP:
3155 	case NXGE_INT_REGS_DUMP:
3156 	case NXGE_VIR_INT_REGS_DUMP:
3157 		NXGE_DEBUG_MSG((nxgep, NXGE_CTL,
3158 			"==> nxge_m_ioctl: cmd 0x%x", cmd));
3159 		nxge_hw_ioctl(nxgep, wq, mp, iocp);
3160 		break;
3161 	}
3162 
3163 	NXGE_DEBUG_MSG((nxgep, NXGE_CTL, "<== nxge_m_ioctl"));
3164 }
3165 
3166 extern void nxge_rx_hw_blank(void *arg, time_t ticks, uint_t count);
3167 
3168 static void
3169 nxge_m_resources(void *arg)
3170 {
3171 	p_nxge_t		nxgep = arg;
3172 	mac_rx_fifo_t 		mrf;
3173 	p_rx_rcr_rings_t	rcr_rings;
3174 	p_rx_rcr_ring_t		*rcr_p;
3175 	uint32_t		i, ndmas;
3176 	nxge_status_t		status;
3177 
3178 	NXGE_DEBUG_MSG((nxgep, RX_CTL, "==> nxge_m_resources"));
3179 
3180 	MUTEX_ENTER(nxgep->genlock);
3181 
3182 	/*
3183 	 * CR 6492541 Check to see if the drv_state has been initialized,
3184 	 * if not * call nxge_init().
3185 	 */
3186 	if (!(nxgep->drv_state & STATE_HW_INITIALIZED)) {
3187 		status = nxge_init(nxgep);
3188 		if (status != NXGE_OK)
3189 			goto nxge_m_resources_exit;
3190 	}
3191 
3192 	mrf.mrf_type = MAC_RX_FIFO;
3193 	mrf.mrf_blank = nxge_rx_hw_blank;
3194 	mrf.mrf_arg = (void *)nxgep;
3195 
3196 	mrf.mrf_normal_blank_time = 128;
3197 	mrf.mrf_normal_pkt_count = 8;
3198 	rcr_rings = nxgep->rx_rcr_rings;
3199 	rcr_p = rcr_rings->rcr_rings;
3200 	ndmas = rcr_rings->ndmas;
3201 
3202 	/*
3203 	 * Export our receive resources to the MAC layer.
3204 	 */
3205 	for (i = 0; i < ndmas; i++) {
3206 		((p_rx_rcr_ring_t)rcr_p[i])->rcr_mac_handle =
3207 				mac_resource_add(nxgep->mach,
3208 				    (mac_resource_t *)&mrf);
3209 
3210 		NXGE_DEBUG_MSG((nxgep, NXGE_CTL,
3211 			"==> nxge_m_resources: vdma %d dma %d "
3212 			"rcrptr 0x%016llx mac_handle 0x%016llx",
3213 			i, ((p_rx_rcr_ring_t)rcr_p[i])->rdc,
3214 			rcr_p[i],
3215 			((p_rx_rcr_ring_t)rcr_p[i])->rcr_mac_handle));
3216 	}
3217 
3218 nxge_m_resources_exit:
3219 	MUTEX_EXIT(nxgep->genlock);
3220 	NXGE_DEBUG_MSG((nxgep, RX_CTL, "<== nxge_m_resources"));
3221 }
3222 
3223 static void
3224 nxge_mmac_kstat_update(p_nxge_t nxgep, mac_addr_slot_t slot, boolean_t factory)
3225 {
3226 	p_nxge_mmac_stats_t mmac_stats;
3227 	int i;
3228 	nxge_mmac_t *mmac_info;
3229 
3230 	mmac_info = &nxgep->nxge_mmac_info;
3231 
3232 	mmac_stats = &nxgep->statsp->mmac_stats;
3233 	mmac_stats->mmac_max_cnt = mmac_info->num_mmac;
3234 	mmac_stats->mmac_avail_cnt = mmac_info->naddrfree;
3235 
3236 	for (i = 0; i < ETHERADDRL; i++) {
3237 		if (factory) {
3238 			mmac_stats->mmac_avail_pool[slot-1].ether_addr_octet[i]
3239 			= mmac_info->factory_mac_pool[slot][(ETHERADDRL-1) - i];
3240 		} else {
3241 			mmac_stats->mmac_avail_pool[slot-1].ether_addr_octet[i]
3242 			= mmac_info->mac_pool[slot].addr[(ETHERADDRL - 1) - i];
3243 		}
3244 	}
3245 }
3246 
3247 /*
3248  * nxge_altmac_set() -- Set an alternate MAC address
3249  */
3250 static int
3251 nxge_altmac_set(p_nxge_t nxgep, uint8_t *maddr, mac_addr_slot_t slot)
3252 {
3253 	uint8_t addrn;
3254 	uint8_t portn;
3255 	npi_mac_addr_t altmac;
3256 
3257 	altmac.w2 = ((uint16_t)maddr[0] << 8) | ((uint16_t)maddr[1] & 0x0ff);
3258 	altmac.w1 = ((uint16_t)maddr[2] << 8) | ((uint16_t)maddr[3] & 0x0ff);
3259 	altmac.w0 = ((uint16_t)maddr[4] << 8) | ((uint16_t)maddr[5] & 0x0ff);
3260 
3261 	portn = nxgep->mac.portnum;
3262 	addrn = (uint8_t)slot - 1;
3263 
3264 	if (npi_mac_altaddr_entry(nxgep->npi_handle, OP_SET, portn,
3265 		addrn, &altmac) != NPI_SUCCESS)
3266 		return (EIO);
3267 	/*
3268 	 * Enable comparison with the alternate MAC address.
3269 	 * While the first alternate addr is enabled by bit 1 of register
3270 	 * BMAC_ALTAD_CMPEN, it is enabled by bit 0 of register
3271 	 * XMAC_ADDR_CMPEN, so slot needs to be converted to addrn
3272 	 * accordingly before calling npi_mac_altaddr_entry.
3273 	 */
3274 	if (portn == XMAC_PORT_0 || portn == XMAC_PORT_1)
3275 		addrn = (uint8_t)slot - 1;
3276 	else
3277 		addrn = (uint8_t)slot;
3278 
3279 	if (npi_mac_altaddr_enable(nxgep->npi_handle, portn, addrn)
3280 		!= NPI_SUCCESS)
3281 		return (EIO);
3282 
3283 	return (0);
3284 }
3285 
3286 /*
3287  * nxeg_m_mmac_add() - find an unused address slot, set the address
3288  * value to the one specified, enable the port to start filtering on
3289  * the new MAC address.  Returns 0 on success.
3290  */
3291 static int
3292 nxge_m_mmac_add(void *arg, mac_multi_addr_t *maddr)
3293 {
3294 	p_nxge_t nxgep = arg;
3295 	mac_addr_slot_t slot;
3296 	nxge_mmac_t *mmac_info;
3297 	int err;
3298 	nxge_status_t status;
3299 
3300 	mutex_enter(nxgep->genlock);
3301 
3302 	/*
3303 	 * Make sure that nxge is initialized, if _start() has
3304 	 * not been called.
3305 	 */
3306 	if (!(nxgep->drv_state & STATE_HW_INITIALIZED)) {
3307 		status = nxge_init(nxgep);
3308 		if (status != NXGE_OK) {
3309 			mutex_exit(nxgep->genlock);
3310 			return (ENXIO);
3311 		}
3312 	}
3313 
3314 	mmac_info = &nxgep->nxge_mmac_info;
3315 	if (mmac_info->naddrfree == 0) {
3316 		mutex_exit(nxgep->genlock);
3317 		return (ENOSPC);
3318 	}
3319 	if (!mac_unicst_verify(nxgep->mach, maddr->mma_addr,
3320 		maddr->mma_addrlen)) {
3321 		mutex_exit(nxgep->genlock);
3322 		return (EINVAL);
3323 	}
3324 	/*
3325 	 * 	Search for the first available slot. Because naddrfree
3326 	 * is not zero, we are guaranteed to find one.
3327 	 * 	Slot 0 is for unique (primary) MAC. The first alternate
3328 	 * MAC slot is slot 1.
3329 	 *	Each of the first two ports of Neptune has 16 alternate
3330 	 * MAC slots but only the first 7 (of 15) slots have assigned factory
3331 	 * MAC addresses. We first search among the slots without bundled
3332 	 * factory MACs. If we fail to find one in that range, then we
3333 	 * search the slots with bundled factory MACs.  A factory MAC
3334 	 * will be wasted while the slot is used with a user MAC address.
3335 	 * But the slot could be used by factory MAC again after calling
3336 	 * nxge_m_mmac_remove and nxge_m_mmac_reserve.
3337 	 */
3338 	if (mmac_info->num_factory_mmac < mmac_info->num_mmac) {
3339 		for (slot = mmac_info->num_factory_mmac + 1;
3340 			slot <= mmac_info->num_mmac; slot++) {
3341 			if (!(mmac_info->mac_pool[slot].flags & MMAC_SLOT_USED))
3342 				break;
3343 		}
3344 		if (slot > mmac_info->num_mmac) {
3345 			for (slot = 1; slot <= mmac_info->num_factory_mmac;
3346 				slot++) {
3347 				if (!(mmac_info->mac_pool[slot].flags
3348 					& MMAC_SLOT_USED))
3349 					break;
3350 			}
3351 		}
3352 	} else {
3353 		for (slot = 1; slot <= mmac_info->num_mmac; slot++) {
3354 			if (!(mmac_info->mac_pool[slot].flags & MMAC_SLOT_USED))
3355 				break;
3356 		}
3357 	}
3358 	ASSERT(slot <= mmac_info->num_mmac);
3359 	if ((err = nxge_altmac_set(nxgep, maddr->mma_addr, slot)) != 0) {
3360 		mutex_exit(nxgep->genlock);
3361 		return (err);
3362 	}
3363 	bcopy(maddr->mma_addr, mmac_info->mac_pool[slot].addr, ETHERADDRL);
3364 	mmac_info->mac_pool[slot].flags |= MMAC_SLOT_USED;
3365 	mmac_info->mac_pool[slot].flags &= ~MMAC_VENDOR_ADDR;
3366 	mmac_info->naddrfree--;
3367 	nxge_mmac_kstat_update(nxgep, slot, B_FALSE);
3368 
3369 	maddr->mma_slot = slot;
3370 
3371 	mutex_exit(nxgep->genlock);
3372 	return (0);
3373 }
3374 
3375 /*
3376  * This function reserves an unused slot and programs the slot and the HW
3377  * with a factory mac address.
3378  */
3379 static int
3380 nxge_m_mmac_reserve(void *arg, mac_multi_addr_t *maddr)
3381 {
3382 	p_nxge_t nxgep = arg;
3383 	mac_addr_slot_t slot;
3384 	nxge_mmac_t *mmac_info;
3385 	int err;
3386 	nxge_status_t status;
3387 
3388 	mutex_enter(nxgep->genlock);
3389 
3390 	/*
3391 	 * Make sure that nxge is initialized, if _start() has
3392 	 * not been called.
3393 	 */
3394 	if (!(nxgep->drv_state & STATE_HW_INITIALIZED)) {
3395 		status = nxge_init(nxgep);
3396 		if (status != NXGE_OK) {
3397 			mutex_exit(nxgep->genlock);
3398 			return (ENXIO);
3399 		}
3400 	}
3401 
3402 	mmac_info = &nxgep->nxge_mmac_info;
3403 	if (mmac_info->naddrfree == 0) {
3404 		mutex_exit(nxgep->genlock);
3405 		return (ENOSPC);
3406 	}
3407 
3408 	slot = maddr->mma_slot;
3409 	if (slot == -1) {  /* -1: Take the first available slot */
3410 		for (slot = 1; slot <= mmac_info->num_factory_mmac; slot++) {
3411 			if (!(mmac_info->mac_pool[slot].flags & MMAC_SLOT_USED))
3412 				break;
3413 		}
3414 		if (slot > mmac_info->num_factory_mmac) {
3415 			mutex_exit(nxgep->genlock);
3416 			return (ENOSPC);
3417 		}
3418 	}
3419 	if (slot < 1 || slot > mmac_info->num_factory_mmac) {
3420 		/*
3421 		 * Do not support factory MAC at a slot greater than
3422 		 * num_factory_mmac even when there are available factory
3423 		 * MAC addresses because the alternate MACs are bundled with
3424 		 * slot[1] through slot[num_factory_mmac]
3425 		 */
3426 		mutex_exit(nxgep->genlock);
3427 		return (EINVAL);
3428 	}
3429 	if (mmac_info->mac_pool[slot].flags & MMAC_SLOT_USED) {
3430 		mutex_exit(nxgep->genlock);
3431 		return (EBUSY);
3432 	}
3433 	/* Verify the address to be reserved */
3434 	if (!mac_unicst_verify(nxgep->mach,
3435 		mmac_info->factory_mac_pool[slot], ETHERADDRL)) {
3436 		mutex_exit(nxgep->genlock);
3437 		return (EINVAL);
3438 	}
3439 	if (err = nxge_altmac_set(nxgep,
3440 		mmac_info->factory_mac_pool[slot], slot)) {
3441 		mutex_exit(nxgep->genlock);
3442 		return (err);
3443 	}
3444 	bcopy(mmac_info->factory_mac_pool[slot], maddr->mma_addr, ETHERADDRL);
3445 	mmac_info->mac_pool[slot].flags |= MMAC_SLOT_USED | MMAC_VENDOR_ADDR;
3446 	mmac_info->naddrfree--;
3447 
3448 	nxge_mmac_kstat_update(nxgep, slot, B_TRUE);
3449 	mutex_exit(nxgep->genlock);
3450 
3451 	/* Pass info back to the caller */
3452 	maddr->mma_slot = slot;
3453 	maddr->mma_addrlen = ETHERADDRL;
3454 	maddr->mma_flags = MMAC_SLOT_USED | MMAC_VENDOR_ADDR;
3455 
3456 	return (0);
3457 }
3458 
3459 /*
3460  * Remove the specified mac address and update the HW not to filter
3461  * the mac address anymore.
3462  */
3463 static int
3464 nxge_m_mmac_remove(void *arg, mac_addr_slot_t slot)
3465 {
3466 	p_nxge_t nxgep = arg;
3467 	nxge_mmac_t *mmac_info;
3468 	uint8_t addrn;
3469 	uint8_t portn;
3470 	int err = 0;
3471 	nxge_status_t status;
3472 
3473 	mutex_enter(nxgep->genlock);
3474 
3475 	/*
3476 	 * Make sure that nxge is initialized, if _start() has
3477 	 * not been called.
3478 	 */
3479 	if (!(nxgep->drv_state & STATE_HW_INITIALIZED)) {
3480 		status = nxge_init(nxgep);
3481 		if (status != NXGE_OK) {
3482 			mutex_exit(nxgep->genlock);
3483 			return (ENXIO);
3484 		}
3485 	}
3486 
3487 	mmac_info = &nxgep->nxge_mmac_info;
3488 	if (slot < 1 || slot > mmac_info->num_mmac) {
3489 		mutex_exit(nxgep->genlock);
3490 		return (EINVAL);
3491 	}
3492 
3493 	portn = nxgep->mac.portnum;
3494 	if (portn == XMAC_PORT_0 || portn == XMAC_PORT_1)
3495 		addrn = (uint8_t)slot - 1;
3496 	else
3497 		addrn = (uint8_t)slot;
3498 
3499 	if (mmac_info->mac_pool[slot].flags & MMAC_SLOT_USED) {
3500 		if (npi_mac_altaddr_disable(nxgep->npi_handle, portn, addrn)
3501 				== NPI_SUCCESS) {
3502 			mmac_info->naddrfree++;
3503 			mmac_info->mac_pool[slot].flags &= ~MMAC_SLOT_USED;
3504 			/*
3505 			 * Regardless if the MAC we just stopped filtering
3506 			 * is a user addr or a facory addr, we must set
3507 			 * the MMAC_VENDOR_ADDR flag if this slot has an
3508 			 * associated factory MAC to indicate that a factory
3509 			 * MAC is available.
3510 			 */
3511 			if (slot <= mmac_info->num_factory_mmac) {
3512 				mmac_info->mac_pool[slot].flags
3513 					|= MMAC_VENDOR_ADDR;
3514 			}
3515 			/*
3516 			 * Clear mac_pool[slot].addr so that kstat shows 0
3517 			 * alternate MAC address if the slot is not used.
3518 			 * (But nxge_m_mmac_get returns the factory MAC even
3519 			 * when the slot is not used!)
3520 			 */
3521 			bzero(mmac_info->mac_pool[slot].addr, ETHERADDRL);
3522 			nxge_mmac_kstat_update(nxgep, slot, B_FALSE);
3523 		} else {
3524 			err = EIO;
3525 		}
3526 	} else {
3527 		err = EINVAL;
3528 	}
3529 
3530 	mutex_exit(nxgep->genlock);
3531 	return (err);
3532 }
3533 
3534 
3535 /*
3536  * Modify a mac address added by nxge_m_mmac_add or nxge_m_mmac_reserve().
3537  */
3538 static int
3539 nxge_m_mmac_modify(void *arg, mac_multi_addr_t *maddr)
3540 {
3541 	p_nxge_t nxgep = arg;
3542 	mac_addr_slot_t slot;
3543 	nxge_mmac_t *mmac_info;
3544 	int err = 0;
3545 	nxge_status_t status;
3546 
3547 	if (!mac_unicst_verify(nxgep->mach, maddr->mma_addr,
3548 			maddr->mma_addrlen))
3549 		return (EINVAL);
3550 
3551 	slot = maddr->mma_slot;
3552 
3553 	mutex_enter(nxgep->genlock);
3554 
3555 	/*
3556 	 * Make sure that nxge is initialized, if _start() has
3557 	 * not been called.
3558 	 */
3559 	if (!(nxgep->drv_state & STATE_HW_INITIALIZED)) {
3560 		status = nxge_init(nxgep);
3561 		if (status != NXGE_OK) {
3562 			mutex_exit(nxgep->genlock);
3563 			return (ENXIO);
3564 		}
3565 	}
3566 
3567 	mmac_info = &nxgep->nxge_mmac_info;
3568 	if (slot < 1 || slot > mmac_info->num_mmac) {
3569 		mutex_exit(nxgep->genlock);
3570 		return (EINVAL);
3571 	}
3572 	if (mmac_info->mac_pool[slot].flags & MMAC_SLOT_USED) {
3573 		if ((err = nxge_altmac_set(nxgep, maddr->mma_addr, slot))
3574 			!= 0) {
3575 			bcopy(maddr->mma_addr, mmac_info->mac_pool[slot].addr,
3576 				ETHERADDRL);
3577 			/*
3578 			 * Assume that the MAC passed down from the caller
3579 			 * is not a factory MAC address (The user should
3580 			 * call mmac_remove followed by mmac_reserve if
3581 			 * he wants to use the factory MAC for this slot).
3582 			 */
3583 			mmac_info->mac_pool[slot].flags &= ~MMAC_VENDOR_ADDR;
3584 			nxge_mmac_kstat_update(nxgep, slot, B_FALSE);
3585 		}
3586 	} else {
3587 		err = EINVAL;
3588 	}
3589 	mutex_exit(nxgep->genlock);
3590 	return (err);
3591 }
3592 
3593 /*
3594  * nxge_m_mmac_get() - Get the MAC address and other information
3595  * related to the slot.  mma_flags should be set to 0 in the call.
3596  * Note: although kstat shows MAC address as zero when a slot is
3597  * not used, Crossbow expects nxge_m_mmac_get to copy factory MAC
3598  * to the caller as long as the slot is not using a user MAC address.
3599  * The following table shows the rules,
3600  *
3601  *				   USED    VENDOR    mma_addr
3602  * ------------------------------------------------------------
3603  * (1) Slot uses a user MAC:        yes      no     user MAC
3604  * (2) Slot uses a factory MAC:     yes      yes    factory MAC
3605  * (3) Slot is not used but is
3606  *     factory MAC capable:         no       yes    factory MAC
3607  * (4) Slot is not used and is
3608  *     not factory MAC capable:     no       no        0
3609  * ------------------------------------------------------------
3610  */
3611 static int
3612 nxge_m_mmac_get(void *arg, mac_multi_addr_t *maddr)
3613 {
3614 	nxge_t *nxgep = arg;
3615 	mac_addr_slot_t slot;
3616 	nxge_mmac_t *mmac_info;
3617 	nxge_status_t status;
3618 
3619 	slot = maddr->mma_slot;
3620 
3621 	mutex_enter(nxgep->genlock);
3622 
3623 	/*
3624 	 * Make sure that nxge is initialized, if _start() has
3625 	 * not been called.
3626 	 */
3627 	if (!(nxgep->drv_state & STATE_HW_INITIALIZED)) {
3628 		status = nxge_init(nxgep);
3629 		if (status != NXGE_OK) {
3630 			mutex_exit(nxgep->genlock);
3631 			return (ENXIO);
3632 		}
3633 	}
3634 
3635 	mmac_info = &nxgep->nxge_mmac_info;
3636 
3637 	if (slot < 1 || slot > mmac_info->num_mmac) {
3638 		mutex_exit(nxgep->genlock);
3639 		return (EINVAL);
3640 	}
3641 	maddr->mma_flags = 0;
3642 	if (mmac_info->mac_pool[slot].flags & MMAC_SLOT_USED)
3643 		maddr->mma_flags |= MMAC_SLOT_USED;
3644 
3645 	if (mmac_info->mac_pool[slot].flags & MMAC_VENDOR_ADDR) {
3646 		maddr->mma_flags |= MMAC_VENDOR_ADDR;
3647 		bcopy(mmac_info->factory_mac_pool[slot],
3648 			maddr->mma_addr, ETHERADDRL);
3649 		maddr->mma_addrlen = ETHERADDRL;
3650 	} else {
3651 		if (maddr->mma_flags & MMAC_SLOT_USED) {
3652 			bcopy(mmac_info->mac_pool[slot].addr,
3653 				maddr->mma_addr, ETHERADDRL);
3654 			maddr->mma_addrlen = ETHERADDRL;
3655 		} else {
3656 			bzero(maddr->mma_addr, ETHERADDRL);
3657 			maddr->mma_addrlen = 0;
3658 		}
3659 	}
3660 	mutex_exit(nxgep->genlock);
3661 	return (0);
3662 }
3663 
3664 
3665 static boolean_t
3666 nxge_m_getcapab(void *arg, mac_capab_t cap, void *cap_data)
3667 {
3668 	nxge_t *nxgep = arg;
3669 	uint32_t *txflags = cap_data;
3670 	multiaddress_capab_t *mmacp = cap_data;
3671 
3672 	switch (cap) {
3673 	case MAC_CAPAB_HCKSUM:
3674 		*txflags = HCKSUM_INET_PARTIAL;
3675 		break;
3676 	case MAC_CAPAB_POLL:
3677 		/*
3678 		 * There's nothing for us to fill in, simply returning
3679 		 * B_TRUE stating that we support polling is sufficient.
3680 		 */
3681 		break;
3682 
3683 	case MAC_CAPAB_MULTIADDRESS:
3684 		mutex_enter(nxgep->genlock);
3685 
3686 		mmacp->maddr_naddr = nxgep->nxge_mmac_info.num_mmac;
3687 		mmacp->maddr_naddrfree = nxgep->nxge_mmac_info.naddrfree;
3688 		mmacp->maddr_flag = 0; /* 0 is requried by PSARC2006/265 */
3689 		/*
3690 		 * maddr_handle is driver's private data, passed back to
3691 		 * entry point functions as arg.
3692 		 */
3693 		mmacp->maddr_handle	= nxgep;
3694 		mmacp->maddr_add	= nxge_m_mmac_add;
3695 		mmacp->maddr_remove	= nxge_m_mmac_remove;
3696 		mmacp->maddr_modify	= nxge_m_mmac_modify;
3697 		mmacp->maddr_get	= nxge_m_mmac_get;
3698 		mmacp->maddr_reserve	= nxge_m_mmac_reserve;
3699 
3700 		mutex_exit(nxgep->genlock);
3701 		break;
3702 	default:
3703 		return (B_FALSE);
3704 	}
3705 	return (B_TRUE);
3706 }
3707 
3708 /*
3709  * Module loading and removing entry points.
3710  */
3711 
3712 static	struct cb_ops 	nxge_cb_ops = {
3713 	nodev,			/* cb_open */
3714 	nodev,			/* cb_close */
3715 	nodev,			/* cb_strategy */
3716 	nodev,			/* cb_print */
3717 	nodev,			/* cb_dump */
3718 	nodev,			/* cb_read */
3719 	nodev,			/* cb_write */
3720 	nodev,			/* cb_ioctl */
3721 	nodev,			/* cb_devmap */
3722 	nodev,			/* cb_mmap */
3723 	nodev,			/* cb_segmap */
3724 	nochpoll,		/* cb_chpoll */
3725 	ddi_prop_op,		/* cb_prop_op */
3726 	NULL,
3727 	D_MP, 			/* cb_flag */
3728 	CB_REV,			/* rev */
3729 	nodev,			/* int (*cb_aread)() */
3730 	nodev			/* int (*cb_awrite)() */
3731 };
3732 
3733 static struct dev_ops nxge_dev_ops = {
3734 	DEVO_REV,		/* devo_rev */
3735 	0,			/* devo_refcnt */
3736 	nulldev,
3737 	nulldev,		/* devo_identify */
3738 	nulldev,		/* devo_probe */
3739 	nxge_attach,		/* devo_attach */
3740 	nxge_detach,		/* devo_detach */
3741 	nodev,			/* devo_reset */
3742 	&nxge_cb_ops,		/* devo_cb_ops */
3743 	(struct bus_ops *)NULL, /* devo_bus_ops	*/
3744 	ddi_power		/* devo_power */
3745 };
3746 
3747 extern	struct	mod_ops	mod_driverops;
3748 
3749 #define	NXGE_DESC_VER		"Sun NIU 10Gb Ethernet %I%"
3750 
3751 /*
3752  * Module linkage information for the kernel.
3753  */
3754 static struct modldrv 	nxge_modldrv = {
3755 	&mod_driverops,
3756 	NXGE_DESC_VER,
3757 	&nxge_dev_ops
3758 };
3759 
3760 static struct modlinkage modlinkage = {
3761 	MODREV_1, (void *) &nxge_modldrv, NULL
3762 };
3763 
3764 int
3765 _init(void)
3766 {
3767 	int		status;
3768 
3769 	NXGE_DEBUG_MSG((NULL, MOD_CTL, "==> _init"));
3770 	mac_init_ops(&nxge_dev_ops, "nxge");
3771 	status = ddi_soft_state_init(&nxge_list, sizeof (nxge_t), 0);
3772 	if (status != 0) {
3773 		NXGE_ERROR_MSG((NULL, NXGE_ERR_CTL,
3774 			"failed to init device soft state"));
3775 		goto _init_exit;
3776 	}
3777 
3778 	status = mod_install(&modlinkage);
3779 	if (status != 0) {
3780 		ddi_soft_state_fini(&nxge_list);
3781 		NXGE_ERROR_MSG((NULL, NXGE_ERR_CTL, "Mod install failed"));
3782 		goto _init_exit;
3783 	}
3784 
3785 	MUTEX_INIT(&nxge_common_lock, NULL, MUTEX_DRIVER, NULL);
3786 
3787 _init_exit:
3788 	NXGE_DEBUG_MSG((NULL, MOD_CTL, "_init status = 0x%X", status));
3789 
3790 	return (status);
3791 }
3792 
3793 int
3794 _fini(void)
3795 {
3796 	int		status;
3797 
3798 	NXGE_DEBUG_MSG((NULL, MOD_CTL, "==> _fini"));
3799 
3800 	NXGE_DEBUG_MSG((NULL, MOD_CTL, "==> _fini: mod_remove"));
3801 
3802 	if (nxge_mblks_pending)
3803 		return (EBUSY);
3804 
3805 	status = mod_remove(&modlinkage);
3806 	if (status != DDI_SUCCESS) {
3807 		NXGE_DEBUG_MSG((NULL, MOD_CTL,
3808 			    "Module removal failed 0x%08x",
3809 			    status));
3810 		goto _fini_exit;
3811 	}
3812 
3813 	mac_fini_ops(&nxge_dev_ops);
3814 
3815 	ddi_soft_state_fini(&nxge_list);
3816 
3817 	MUTEX_DESTROY(&nxge_common_lock);
3818 _fini_exit:
3819 	NXGE_DEBUG_MSG((NULL, MOD_CTL, "_fini status = 0x%08x", status));
3820 
3821 	return (status);
3822 }
3823 
3824 int
3825 _info(struct modinfo *modinfop)
3826 {
3827 	int		status;
3828 
3829 	NXGE_DEBUG_MSG((NULL, MOD_CTL, "==> _info"));
3830 	status = mod_info(&modlinkage, modinfop);
3831 	NXGE_DEBUG_MSG((NULL, MOD_CTL, " _info status = 0x%X", status));
3832 
3833 	return (status);
3834 }
3835 
3836 /*ARGSUSED*/
3837 static nxge_status_t
3838 nxge_add_intrs(p_nxge_t nxgep)
3839 {
3840 
3841 	int		intr_types;
3842 	int		type = 0;
3843 	int		ddi_status = DDI_SUCCESS;
3844 	nxge_status_t	status = NXGE_OK;
3845 
3846 	NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_add_intrs"));
3847 
3848 	nxgep->nxge_intr_type.intr_registered = B_FALSE;
3849 	nxgep->nxge_intr_type.intr_enabled = B_FALSE;
3850 	nxgep->nxge_intr_type.msi_intx_cnt = 0;
3851 	nxgep->nxge_intr_type.intr_added = 0;
3852 	nxgep->nxge_intr_type.niu_msi_enable = B_FALSE;
3853 	nxgep->nxge_intr_type.intr_type = 0;
3854 
3855 	if (nxgep->niu_type == N2_NIU) {
3856 		nxgep->nxge_intr_type.niu_msi_enable = B_TRUE;
3857 	} else if (nxge_msi_enable) {
3858 		nxgep->nxge_intr_type.niu_msi_enable = B_TRUE;
3859 	}
3860 
3861 	/* Get the supported interrupt types */
3862 	if ((ddi_status = ddi_intr_get_supported_types(nxgep->dip, &intr_types))
3863 			!= DDI_SUCCESS) {
3864 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "<== nxge_add_intrs: "
3865 			"ddi_intr_get_supported_types failed: status 0x%08x",
3866 			ddi_status));
3867 		return (NXGE_ERROR | NXGE_DDI_FAILED);
3868 	}
3869 	nxgep->nxge_intr_type.intr_types = intr_types;
3870 
3871 	NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_add_intrs: "
3872 		"ddi_intr_get_supported_types: 0x%08x", intr_types));
3873 
3874 	/*
3875 	 * Solaris MSIX is not supported yet. use MSI for now.
3876 	 * nxge_msi_enable (1):
3877 	 *	1 - MSI		2 - MSI-X	others - FIXED
3878 	 */
3879 	switch (nxge_msi_enable) {
3880 	default:
3881 		type = DDI_INTR_TYPE_FIXED;
3882 		NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_add_intrs: "
3883 			"use fixed (intx emulation) type %08x",
3884 			type));
3885 		break;
3886 
3887 	case 2:
3888 		NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_add_intrs: "
3889 			"ddi_intr_get_supported_types: 0x%08x", intr_types));
3890 		if (intr_types & DDI_INTR_TYPE_MSIX) {
3891 			type = DDI_INTR_TYPE_MSIX;
3892 			NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_add_intrs: "
3893 				"ddi_intr_get_supported_types: MSIX 0x%08x",
3894 				type));
3895 		} else if (intr_types & DDI_INTR_TYPE_MSI) {
3896 			type = DDI_INTR_TYPE_MSI;
3897 			NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_add_intrs: "
3898 				"ddi_intr_get_supported_types: MSI 0x%08x",
3899 				type));
3900 		} else if (intr_types & DDI_INTR_TYPE_FIXED) {
3901 			type = DDI_INTR_TYPE_FIXED;
3902 			NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_add_intrs: "
3903 				"ddi_intr_get_supported_types: MSXED0x%08x",
3904 				type));
3905 		}
3906 		break;
3907 
3908 	case 1:
3909 		if (intr_types & DDI_INTR_TYPE_MSI) {
3910 			type = DDI_INTR_TYPE_MSI;
3911 			NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_add_intrs: "
3912 				"ddi_intr_get_supported_types: MSI 0x%08x",
3913 				type));
3914 		} else if (intr_types & DDI_INTR_TYPE_MSIX) {
3915 			type = DDI_INTR_TYPE_MSIX;
3916 			NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_add_intrs: "
3917 				"ddi_intr_get_supported_types: MSIX 0x%08x",
3918 				type));
3919 		} else if (intr_types & DDI_INTR_TYPE_FIXED) {
3920 			type = DDI_INTR_TYPE_FIXED;
3921 			NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_add_intrs: "
3922 				"ddi_intr_get_supported_types: MSXED0x%08x",
3923 				type));
3924 		}
3925 	}
3926 
3927 	nxgep->nxge_intr_type.intr_type = type;
3928 	if ((type == DDI_INTR_TYPE_MSIX || type == DDI_INTR_TYPE_MSI ||
3929 		type == DDI_INTR_TYPE_FIXED) &&
3930 			nxgep->nxge_intr_type.niu_msi_enable) {
3931 		if ((status = nxge_add_intrs_adv(nxgep)) != DDI_SUCCESS) {
3932 			NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
3933 				    " nxge_add_intrs: "
3934 				    " nxge_add_intrs_adv failed: status 0x%08x",
3935 				    status));
3936 			return (status);
3937 		} else {
3938 			NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_add_intrs: "
3939 			"interrupts registered : type %d", type));
3940 			nxgep->nxge_intr_type.intr_registered = B_TRUE;
3941 
3942 			NXGE_DEBUG_MSG((nxgep, DDI_CTL,
3943 				"\nAdded advanced nxge add_intr_adv "
3944 					"intr type 0x%x\n", type));
3945 
3946 			return (status);
3947 		}
3948 	}
3949 
3950 	if (!nxgep->nxge_intr_type.intr_registered) {
3951 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "==> nxge_add_intrs: "
3952 			"failed to register interrupts"));
3953 		return (NXGE_ERROR | NXGE_DDI_FAILED);
3954 	}
3955 
3956 	NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_add_intrs"));
3957 	return (status);
3958 }
3959 
3960 /*ARGSUSED*/
3961 static nxge_status_t
3962 nxge_add_soft_intrs(p_nxge_t nxgep)
3963 {
3964 
3965 	int		ddi_status = DDI_SUCCESS;
3966 	nxge_status_t	status = NXGE_OK;
3967 
3968 	NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_add_soft_intrs"));
3969 
3970 	nxgep->resched_id = NULL;
3971 	nxgep->resched_running = B_FALSE;
3972 	ddi_status = ddi_add_softintr(nxgep->dip, DDI_SOFTINT_LOW,
3973 			&nxgep->resched_id,
3974 		NULL, NULL, nxge_reschedule, (caddr_t)nxgep);
3975 	if (ddi_status != DDI_SUCCESS) {
3976 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "<== nxge_add_soft_intrs: "
3977 			"ddi_add_softintrs failed: status 0x%08x",
3978 			ddi_status));
3979 		return (NXGE_ERROR | NXGE_DDI_FAILED);
3980 	}
3981 
3982 	NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_ddi_add_soft_intrs"));
3983 
3984 	return (status);
3985 }
3986 
3987 static nxge_status_t
3988 nxge_add_intrs_adv(p_nxge_t nxgep)
3989 {
3990 	int		intr_type;
3991 	p_nxge_intr_t	intrp;
3992 
3993 	NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_add_intrs_adv"));
3994 
3995 	intrp = (p_nxge_intr_t)&nxgep->nxge_intr_type;
3996 	intr_type = intrp->intr_type;
3997 	NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_add_intrs_adv: type 0x%x",
3998 		intr_type));
3999 
4000 	switch (intr_type) {
4001 	case DDI_INTR_TYPE_MSI: /* 0x2 */
4002 	case DDI_INTR_TYPE_MSIX: /* 0x4 */
4003 		return (nxge_add_intrs_adv_type(nxgep, intr_type));
4004 
4005 	case DDI_INTR_TYPE_FIXED: /* 0x1 */
4006 		return (nxge_add_intrs_adv_type_fix(nxgep, intr_type));
4007 
4008 	default:
4009 		return (NXGE_ERROR);
4010 	}
4011 }
4012 
4013 
4014 /*ARGSUSED*/
4015 static nxge_status_t
4016 nxge_add_intrs_adv_type(p_nxge_t nxgep, uint32_t int_type)
4017 {
4018 	dev_info_t		*dip = nxgep->dip;
4019 	p_nxge_ldg_t		ldgp;
4020 	p_nxge_intr_t		intrp;
4021 	uint_t			*inthandler;
4022 	void			*arg1, *arg2;
4023 	int			behavior;
4024 	int			nintrs, navail;
4025 	int			nactual, nrequired;
4026 	int			inum = 0;
4027 	int			x, y;
4028 	int			ddi_status = DDI_SUCCESS;
4029 	nxge_status_t		status = NXGE_OK;
4030 
4031 	NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_add_intrs_adv_type"));
4032 	intrp = (p_nxge_intr_t)&nxgep->nxge_intr_type;
4033 	intrp->start_inum = 0;
4034 
4035 	ddi_status = ddi_intr_get_nintrs(dip, int_type, &nintrs);
4036 	if ((ddi_status != DDI_SUCCESS) || (nintrs == 0)) {
4037 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
4038 			"ddi_intr_get_nintrs() failed, status: 0x%x%, "
4039 			    "nintrs: %d", ddi_status, nintrs));
4040 		return (NXGE_ERROR | NXGE_DDI_FAILED);
4041 	}
4042 
4043 	ddi_status = ddi_intr_get_navail(dip, int_type, &navail);
4044 	if ((ddi_status != DDI_SUCCESS) || (navail == 0)) {
4045 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
4046 			"ddi_intr_get_navail() failed, status: 0x%x%, "
4047 			    "nintrs: %d", ddi_status, navail));
4048 		return (NXGE_ERROR | NXGE_DDI_FAILED);
4049 	}
4050 
4051 	NXGE_DEBUG_MSG((nxgep, INT_CTL,
4052 		"ddi_intr_get_navail() returned: nintrs %d, navail %d",
4053 		    nintrs, navail));
4054 
4055 	if (int_type == DDI_INTR_TYPE_MSI && !ISP2(navail)) {
4056 		/* MSI must be power of 2 */
4057 		if ((navail & 16) == 16) {
4058 			navail = 16;
4059 		} else if ((navail & 8) == 8) {
4060 			navail = 8;
4061 		} else if ((navail & 4) == 4) {
4062 			navail = 4;
4063 		} else if ((navail & 2) == 2) {
4064 			navail = 2;
4065 		} else {
4066 			navail = 1;
4067 		}
4068 		NXGE_DEBUG_MSG((nxgep, INT_CTL,
4069 			"ddi_intr_get_navail(): (msi power of 2) nintrs %d, "
4070 			"navail %d", nintrs, navail));
4071 	}
4072 
4073 	behavior = ((int_type == DDI_INTR_TYPE_FIXED) ? DDI_INTR_ALLOC_STRICT :
4074 			DDI_INTR_ALLOC_NORMAL);
4075 	intrp->intr_size = navail * sizeof (ddi_intr_handle_t);
4076 	intrp->htable = kmem_alloc(intrp->intr_size, KM_SLEEP);
4077 	ddi_status = ddi_intr_alloc(dip, intrp->htable, int_type, inum,
4078 		    navail, &nactual, behavior);
4079 	if (ddi_status != DDI_SUCCESS || nactual == 0) {
4080 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
4081 				    " ddi_intr_alloc() failed: %d",
4082 				    ddi_status));
4083 		kmem_free(intrp->htable, intrp->intr_size);
4084 		return (NXGE_ERROR | NXGE_DDI_FAILED);
4085 	}
4086 
4087 	if ((ddi_status = ddi_intr_get_pri(intrp->htable[0],
4088 			(uint_t *)&intrp->pri)) != DDI_SUCCESS) {
4089 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
4090 				    " ddi_intr_get_pri() failed: %d",
4091 				    ddi_status));
4092 		/* Free already allocated interrupts */
4093 		for (y = 0; y < nactual; y++) {
4094 			(void) ddi_intr_free(intrp->htable[y]);
4095 		}
4096 
4097 		kmem_free(intrp->htable, intrp->intr_size);
4098 		return (NXGE_ERROR | NXGE_DDI_FAILED);
4099 	}
4100 
4101 	nrequired = 0;
4102 	switch (nxgep->niu_type) {
4103 	case NEPTUNE:
4104 	case NEPTUNE_2:
4105 	default:
4106 		status = nxge_ldgv_init(nxgep, &nactual, &nrequired);
4107 		break;
4108 
4109 	case N2_NIU:
4110 		status = nxge_ldgv_init_n2(nxgep, &nactual, &nrequired);
4111 		break;
4112 	}
4113 
4114 	if (status != NXGE_OK) {
4115 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
4116 			"nxge_add_intrs_adv_typ:nxge_ldgv_init "
4117 			"failed: 0x%x", status));
4118 		/* Free already allocated interrupts */
4119 		for (y = 0; y < nactual; y++) {
4120 			(void) ddi_intr_free(intrp->htable[y]);
4121 		}
4122 
4123 		kmem_free(intrp->htable, intrp->intr_size);
4124 		return (status);
4125 	}
4126 
4127 	ldgp = nxgep->ldgvp->ldgp;
4128 	for (x = 0; x < nrequired; x++, ldgp++) {
4129 		ldgp->vector = (uint8_t)x;
4130 		ldgp->intdata = SID_DATA(ldgp->func, x);
4131 		arg1 = ldgp->ldvp;
4132 		arg2 = nxgep;
4133 		if (ldgp->nldvs == 1) {
4134 			inthandler = (uint_t *)ldgp->ldvp->ldv_intr_handler;
4135 			NXGE_DEBUG_MSG((nxgep, INT_CTL,
4136 				"nxge_add_intrs_adv_type: "
4137 				"arg1 0x%x arg2 0x%x: "
4138 				"1-1 int handler (entry %d intdata 0x%x)\n",
4139 				arg1, arg2,
4140 				x, ldgp->intdata));
4141 		} else if (ldgp->nldvs > 1) {
4142 			inthandler = (uint_t *)ldgp->sys_intr_handler;
4143 			NXGE_DEBUG_MSG((nxgep, INT_CTL,
4144 				"nxge_add_intrs_adv_type: "
4145 				"arg1 0x%x arg2 0x%x: "
4146 				"nldevs %d int handler "
4147 				"(entry %d intdata 0x%x)\n",
4148 				arg1, arg2,
4149 				ldgp->nldvs, x, ldgp->intdata));
4150 		}
4151 
4152 		NXGE_DEBUG_MSG((nxgep, INT_CTL,
4153 			"==> nxge_add_intrs_adv_type: ddi_add_intr(inum) #%d "
4154 			"htable 0x%llx", x, intrp->htable[x]));
4155 
4156 		if ((ddi_status = ddi_intr_add_handler(intrp->htable[x],
4157 			(ddi_intr_handler_t *)inthandler, arg1, arg2))
4158 				!= DDI_SUCCESS) {
4159 			NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
4160 				"==> nxge_add_intrs_adv_type: failed #%d "
4161 				"status 0x%x", x, ddi_status));
4162 			for (y = 0; y < intrp->intr_added; y++) {
4163 				(void) ddi_intr_remove_handler(
4164 						intrp->htable[y]);
4165 			}
4166 			/* Free already allocated intr */
4167 			for (y = 0; y < nactual; y++) {
4168 				(void) ddi_intr_free(intrp->htable[y]);
4169 			}
4170 			kmem_free(intrp->htable, intrp->intr_size);
4171 
4172 			(void) nxge_ldgv_uninit(nxgep);
4173 
4174 			return (NXGE_ERROR | NXGE_DDI_FAILED);
4175 		}
4176 		intrp->intr_added++;
4177 	}
4178 
4179 	intrp->msi_intx_cnt = nactual;
4180 
4181 	NXGE_DEBUG_MSG((nxgep, DDI_CTL,
4182 		"Requested: %d, Allowed: %d msi_intx_cnt %d intr_added %d",
4183 		navail, nactual,
4184 		intrp->msi_intx_cnt,
4185 		intrp->intr_added));
4186 
4187 	(void) ddi_intr_get_cap(intrp->htable[0], &intrp->intr_cap);
4188 
4189 	(void) nxge_intr_ldgv_init(nxgep);
4190 
4191 	NXGE_DEBUG_MSG((nxgep, INT_CTL, "<== nxge_add_intrs_adv_type"));
4192 
4193 	return (status);
4194 }
4195 
4196 /*ARGSUSED*/
4197 static nxge_status_t
4198 nxge_add_intrs_adv_type_fix(p_nxge_t nxgep, uint32_t int_type)
4199 {
4200 	dev_info_t		*dip = nxgep->dip;
4201 	p_nxge_ldg_t		ldgp;
4202 	p_nxge_intr_t		intrp;
4203 	uint_t			*inthandler;
4204 	void			*arg1, *arg2;
4205 	int			behavior;
4206 	int			nintrs, navail;
4207 	int			nactual, nrequired;
4208 	int			inum = 0;
4209 	int			x, y;
4210 	int			ddi_status = DDI_SUCCESS;
4211 	nxge_status_t		status = NXGE_OK;
4212 
4213 	NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_add_intrs_adv_type_fix"));
4214 	intrp = (p_nxge_intr_t)&nxgep->nxge_intr_type;
4215 	intrp->start_inum = 0;
4216 
4217 	ddi_status = ddi_intr_get_nintrs(dip, int_type, &nintrs);
4218 	if ((ddi_status != DDI_SUCCESS) || (nintrs == 0)) {
4219 		NXGE_DEBUG_MSG((nxgep, INT_CTL,
4220 			"ddi_intr_get_nintrs() failed, status: 0x%x%, "
4221 			    "nintrs: %d", status, nintrs));
4222 		return (NXGE_ERROR | NXGE_DDI_FAILED);
4223 	}
4224 
4225 	ddi_status = ddi_intr_get_navail(dip, int_type, &navail);
4226 	if ((ddi_status != DDI_SUCCESS) || (navail == 0)) {
4227 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
4228 			"ddi_intr_get_navail() failed, status: 0x%x%, "
4229 			    "nintrs: %d", ddi_status, navail));
4230 		return (NXGE_ERROR | NXGE_DDI_FAILED);
4231 	}
4232 
4233 	NXGE_DEBUG_MSG((nxgep, INT_CTL,
4234 		"ddi_intr_get_navail() returned: nintrs %d, naavail %d",
4235 		    nintrs, navail));
4236 
4237 	behavior = ((int_type == DDI_INTR_TYPE_FIXED) ? DDI_INTR_ALLOC_STRICT :
4238 			DDI_INTR_ALLOC_NORMAL);
4239 	intrp->intr_size = navail * sizeof (ddi_intr_handle_t);
4240 	intrp->htable = kmem_alloc(intrp->intr_size, KM_SLEEP);
4241 	ddi_status = ddi_intr_alloc(dip, intrp->htable, int_type, inum,
4242 		    navail, &nactual, behavior);
4243 	if (ddi_status != DDI_SUCCESS || nactual == 0) {
4244 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
4245 			    " ddi_intr_alloc() failed: %d",
4246 			    ddi_status));
4247 		kmem_free(intrp->htable, intrp->intr_size);
4248 		return (NXGE_ERROR | NXGE_DDI_FAILED);
4249 	}
4250 
4251 	if ((ddi_status = ddi_intr_get_pri(intrp->htable[0],
4252 			(uint_t *)&intrp->pri)) != DDI_SUCCESS) {
4253 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
4254 				    " ddi_intr_get_pri() failed: %d",
4255 				    ddi_status));
4256 		/* Free already allocated interrupts */
4257 		for (y = 0; y < nactual; y++) {
4258 			(void) ddi_intr_free(intrp->htable[y]);
4259 		}
4260 
4261 		kmem_free(intrp->htable, intrp->intr_size);
4262 		return (NXGE_ERROR | NXGE_DDI_FAILED);
4263 	}
4264 
4265 	nrequired = 0;
4266 	switch (nxgep->niu_type) {
4267 	case NEPTUNE:
4268 	case NEPTUNE_2:
4269 	default:
4270 		status = nxge_ldgv_init(nxgep, &nactual, &nrequired);
4271 		break;
4272 
4273 	case N2_NIU:
4274 		status = nxge_ldgv_init_n2(nxgep, &nactual, &nrequired);
4275 		break;
4276 	}
4277 
4278 	if (status != NXGE_OK) {
4279 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
4280 			"nxge_add_intrs_adv_type_fix:nxge_ldgv_init "
4281 			"failed: 0x%x", status));
4282 		/* Free already allocated interrupts */
4283 		for (y = 0; y < nactual; y++) {
4284 			(void) ddi_intr_free(intrp->htable[y]);
4285 		}
4286 
4287 		kmem_free(intrp->htable, intrp->intr_size);
4288 		return (status);
4289 	}
4290 
4291 	ldgp = nxgep->ldgvp->ldgp;
4292 	for (x = 0; x < nrequired; x++, ldgp++) {
4293 		ldgp->vector = (uint8_t)x;
4294 		if (nxgep->niu_type != N2_NIU) {
4295 			ldgp->intdata = SID_DATA(ldgp->func, x);
4296 		}
4297 
4298 		arg1 = ldgp->ldvp;
4299 		arg2 = nxgep;
4300 		if (ldgp->nldvs == 1) {
4301 			inthandler = (uint_t *)ldgp->ldvp->ldv_intr_handler;
4302 			NXGE_DEBUG_MSG((nxgep, INT_CTL,
4303 				"nxge_add_intrs_adv_type_fix: "
4304 				"1-1 int handler(%d) ldg %d ldv %d "
4305 				"arg1 $%p arg2 $%p\n",
4306 				x, ldgp->ldg, ldgp->ldvp->ldv,
4307 				arg1, arg2));
4308 		} else if (ldgp->nldvs > 1) {
4309 			inthandler = (uint_t *)ldgp->sys_intr_handler;
4310 			NXGE_DEBUG_MSG((nxgep, INT_CTL,
4311 				"nxge_add_intrs_adv_type_fix: "
4312 				"shared ldv %d int handler(%d) ldv %d ldg %d"
4313 				"arg1 0x%016llx arg2 0x%016llx\n",
4314 				x, ldgp->nldvs, ldgp->ldg, ldgp->ldvp->ldv,
4315 				arg1, arg2));
4316 		}
4317 
4318 		if ((ddi_status = ddi_intr_add_handler(intrp->htable[x],
4319 			(ddi_intr_handler_t *)inthandler, arg1, arg2))
4320 				!= DDI_SUCCESS) {
4321 			NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
4322 				"==> nxge_add_intrs_adv_type_fix: failed #%d "
4323 				"status 0x%x", x, ddi_status));
4324 			for (y = 0; y < intrp->intr_added; y++) {
4325 				(void) ddi_intr_remove_handler(
4326 						intrp->htable[y]);
4327 			}
4328 			for (y = 0; y < nactual; y++) {
4329 				(void) ddi_intr_free(intrp->htable[y]);
4330 			}
4331 			/* Free already allocated intr */
4332 			kmem_free(intrp->htable, intrp->intr_size);
4333 
4334 			(void) nxge_ldgv_uninit(nxgep);
4335 
4336 			return (NXGE_ERROR | NXGE_DDI_FAILED);
4337 		}
4338 		intrp->intr_added++;
4339 	}
4340 
4341 	intrp->msi_intx_cnt = nactual;
4342 
4343 	(void) ddi_intr_get_cap(intrp->htable[0], &intrp->intr_cap);
4344 
4345 	status = nxge_intr_ldgv_init(nxgep);
4346 	NXGE_DEBUG_MSG((nxgep, INT_CTL, "<== nxge_add_intrs_adv_type_fix"));
4347 
4348 	return (status);
4349 }
4350 
4351 static void
4352 nxge_remove_intrs(p_nxge_t nxgep)
4353 {
4354 	int		i, inum;
4355 	p_nxge_intr_t	intrp;
4356 
4357 	NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_remove_intrs"));
4358 	intrp = (p_nxge_intr_t)&nxgep->nxge_intr_type;
4359 	if (!intrp->intr_registered) {
4360 		NXGE_DEBUG_MSG((nxgep, INT_CTL,
4361 			"<== nxge_remove_intrs: interrupts not registered"));
4362 		return;
4363 	}
4364 
4365 	NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_remove_intrs:advanced"));
4366 
4367 	if (intrp->intr_cap & DDI_INTR_FLAG_BLOCK) {
4368 		(void) ddi_intr_block_disable(intrp->htable,
4369 			intrp->intr_added);
4370 	} else {
4371 		for (i = 0; i < intrp->intr_added; i++) {
4372 			(void) ddi_intr_disable(intrp->htable[i]);
4373 		}
4374 	}
4375 
4376 	for (inum = 0; inum < intrp->intr_added; inum++) {
4377 		if (intrp->htable[inum]) {
4378 			(void) ddi_intr_remove_handler(intrp->htable[inum]);
4379 		}
4380 	}
4381 
4382 	for (inum = 0; inum < intrp->msi_intx_cnt; inum++) {
4383 		if (intrp->htable[inum]) {
4384 			NXGE_DEBUG_MSG((nxgep, DDI_CTL,
4385 				"nxge_remove_intrs: ddi_intr_free inum %d "
4386 				"msi_intx_cnt %d intr_added %d",
4387 				inum,
4388 				intrp->msi_intx_cnt,
4389 				intrp->intr_added));
4390 
4391 			(void) ddi_intr_free(intrp->htable[inum]);
4392 		}
4393 	}
4394 
4395 	kmem_free(intrp->htable, intrp->intr_size);
4396 	intrp->intr_registered = B_FALSE;
4397 	intrp->intr_enabled = B_FALSE;
4398 	intrp->msi_intx_cnt = 0;
4399 	intrp->intr_added = 0;
4400 
4401 	(void) nxge_ldgv_uninit(nxgep);
4402 
4403 	NXGE_DEBUG_MSG((nxgep, INT_CTL, "<== nxge_remove_intrs"));
4404 }
4405 
4406 /*ARGSUSED*/
4407 static void
4408 nxge_remove_soft_intrs(p_nxge_t nxgep)
4409 {
4410 	NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_remove_soft_intrs"));
4411 	if (nxgep->resched_id) {
4412 		ddi_remove_softintr(nxgep->resched_id);
4413 		NXGE_DEBUG_MSG((nxgep, INT_CTL,
4414 			"==> nxge_remove_soft_intrs: removed"));
4415 		nxgep->resched_id = NULL;
4416 	}
4417 
4418 	NXGE_DEBUG_MSG((nxgep, INT_CTL, "<== nxge_remove_soft_intrs"));
4419 }
4420 
4421 /*ARGSUSED*/
4422 static void
4423 nxge_intrs_enable(p_nxge_t nxgep)
4424 {
4425 	p_nxge_intr_t	intrp;
4426 	int		i;
4427 	int		status;
4428 
4429 	NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_intrs_enable"));
4430 
4431 	intrp = (p_nxge_intr_t)&nxgep->nxge_intr_type;
4432 
4433 	if (!intrp->intr_registered) {
4434 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "<== nxge_intrs_enable: "
4435 			"interrupts are not registered"));
4436 		return;
4437 	}
4438 
4439 	if (intrp->intr_enabled) {
4440 		NXGE_DEBUG_MSG((nxgep, INT_CTL,
4441 			"<== nxge_intrs_enable: already enabled"));
4442 		return;
4443 	}
4444 
4445 	if (intrp->intr_cap & DDI_INTR_FLAG_BLOCK) {
4446 		status = ddi_intr_block_enable(intrp->htable,
4447 			intrp->intr_added);
4448 		NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_intrs_enable "
4449 			"block enable - status 0x%x total inums #%d\n",
4450 			status, intrp->intr_added));
4451 	} else {
4452 		for (i = 0; i < intrp->intr_added; i++) {
4453 			status = ddi_intr_enable(intrp->htable[i]);
4454 			NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_intrs_enable "
4455 				"ddi_intr_enable:enable - status 0x%x "
4456 				"total inums %d enable inum #%d\n",
4457 				status, intrp->intr_added, i));
4458 			if (status == DDI_SUCCESS) {
4459 				intrp->intr_enabled = B_TRUE;
4460 			}
4461 		}
4462 	}
4463 
4464 	NXGE_DEBUG_MSG((nxgep, INT_CTL, "<== nxge_intrs_enable"));
4465 }
4466 
4467 /*ARGSUSED*/
4468 static void
4469 nxge_intrs_disable(p_nxge_t nxgep)
4470 {
4471 	p_nxge_intr_t	intrp;
4472 	int		i;
4473 
4474 	NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_intrs_disable"));
4475 
4476 	intrp = (p_nxge_intr_t)&nxgep->nxge_intr_type;
4477 
4478 	if (!intrp->intr_registered) {
4479 		NXGE_DEBUG_MSG((nxgep, INT_CTL, "<== nxge_intrs_disable: "
4480 			"interrupts are not registered"));
4481 		return;
4482 	}
4483 
4484 	if (intrp->intr_cap & DDI_INTR_FLAG_BLOCK) {
4485 		(void) ddi_intr_block_disable(intrp->htable,
4486 			intrp->intr_added);
4487 	} else {
4488 		for (i = 0; i < intrp->intr_added; i++) {
4489 			(void) ddi_intr_disable(intrp->htable[i]);
4490 		}
4491 	}
4492 
4493 	intrp->intr_enabled = B_FALSE;
4494 	NXGE_DEBUG_MSG((nxgep, INT_CTL, "<== nxge_intrs_disable"));
4495 }
4496 
4497 static nxge_status_t
4498 nxge_mac_register(p_nxge_t nxgep)
4499 {
4500 	mac_register_t *macp;
4501 	int		status;
4502 
4503 	NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_mac_register"));
4504 
4505 	if ((macp = mac_alloc(MAC_VERSION)) == NULL)
4506 		return (NXGE_ERROR);
4507 
4508 	macp->m_type_ident = MAC_PLUGIN_IDENT_ETHER;
4509 	macp->m_driver = nxgep;
4510 	macp->m_dip = nxgep->dip;
4511 	macp->m_src_addr = nxgep->ouraddr.ether_addr_octet;
4512 	macp->m_callbacks = &nxge_m_callbacks;
4513 	macp->m_min_sdu = 0;
4514 	macp->m_max_sdu = nxgep->mac.maxframesize -
4515 		sizeof (struct ether_header) - ETHERFCSL - 4;
4516 
4517 	status = mac_register(macp, &nxgep->mach);
4518 	mac_free(macp);
4519 
4520 	if (status != 0) {
4521 		cmn_err(CE_WARN,
4522 			"!nxge_mac_register failed (status %d instance %d)",
4523 			status, nxgep->instance);
4524 		return (NXGE_ERROR);
4525 	}
4526 
4527 	NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_mac_register success "
4528 		"(instance %d)", nxgep->instance));
4529 
4530 	return (NXGE_OK);
4531 }
4532 
4533 void
4534 nxge_err_inject(p_nxge_t nxgep, queue_t *wq, mblk_t *mp)
4535 {
4536 	ssize_t		size;
4537 	mblk_t		*nmp;
4538 	uint8_t		blk_id;
4539 	uint8_t		chan;
4540 	uint32_t	err_id;
4541 	err_inject_t	*eip;
4542 
4543 	NXGE_DEBUG_MSG((nxgep, STR_CTL, "==> nxge_err_inject"));
4544 
4545 	size = 1024;
4546 	nmp = mp->b_cont;
4547 	eip = (err_inject_t *)nmp->b_rptr;
4548 	blk_id = eip->blk_id;
4549 	err_id = eip->err_id;
4550 	chan = eip->chan;
4551 	cmn_err(CE_NOTE, "!blk_id = 0x%x\n", blk_id);
4552 	cmn_err(CE_NOTE, "!err_id = 0x%x\n", err_id);
4553 	cmn_err(CE_NOTE, "!chan = 0x%x\n", chan);
4554 	switch (blk_id) {
4555 	case MAC_BLK_ID:
4556 		break;
4557 	case TXMAC_BLK_ID:
4558 		break;
4559 	case RXMAC_BLK_ID:
4560 		break;
4561 	case MIF_BLK_ID:
4562 		break;
4563 	case IPP_BLK_ID:
4564 		nxge_ipp_inject_err(nxgep, err_id);
4565 		break;
4566 	case TXC_BLK_ID:
4567 		nxge_txc_inject_err(nxgep, err_id);
4568 		break;
4569 	case TXDMA_BLK_ID:
4570 		nxge_txdma_inject_err(nxgep, err_id, chan);
4571 		break;
4572 	case RXDMA_BLK_ID:
4573 		nxge_rxdma_inject_err(nxgep, err_id, chan);
4574 		break;
4575 	case ZCP_BLK_ID:
4576 		nxge_zcp_inject_err(nxgep, err_id);
4577 		break;
4578 	case ESPC_BLK_ID:
4579 		break;
4580 	case FFLP_BLK_ID:
4581 		break;
4582 	case PHY_BLK_ID:
4583 		break;
4584 	case ETHER_SERDES_BLK_ID:
4585 		break;
4586 	case PCIE_SERDES_BLK_ID:
4587 		break;
4588 	case VIR_BLK_ID:
4589 		break;
4590 	}
4591 
4592 	nmp->b_wptr = nmp->b_rptr + size;
4593 	NXGE_DEBUG_MSG((nxgep, STR_CTL, "<== nxge_err_inject"));
4594 
4595 	miocack(wq, mp, (int)size, 0);
4596 }
4597 
4598 static int
4599 nxge_init_common_dev(p_nxge_t nxgep)
4600 {
4601 	p_nxge_hw_list_t	hw_p;
4602 	dev_info_t 		*p_dip;
4603 
4604 	NXGE_DEBUG_MSG((nxgep, MOD_CTL, "==> nxge_init_common_device"));
4605 
4606 	p_dip = nxgep->p_dip;
4607 	MUTEX_ENTER(&nxge_common_lock);
4608 	NXGE_DEBUG_MSG((nxgep, MOD_CTL,
4609 		"==> nxge_init_common_dev:func # %d",
4610 			nxgep->function_num));
4611 	/*
4612 	 * Loop through existing per neptune hardware list.
4613 	 */
4614 	for (hw_p = nxge_hw_list; hw_p; hw_p = hw_p->next) {
4615 		NXGE_DEBUG_MSG((nxgep, MOD_CTL,
4616 			"==> nxge_init_common_device:func # %d "
4617 			"hw_p $%p parent dip $%p",
4618 			nxgep->function_num,
4619 			hw_p,
4620 			p_dip));
4621 		if (hw_p->parent_devp == p_dip) {
4622 			nxgep->nxge_hw_p = hw_p;
4623 			hw_p->ndevs++;
4624 			hw_p->nxge_p[nxgep->function_num] = nxgep;
4625 			NXGE_DEBUG_MSG((nxgep, MOD_CTL,
4626 				"==> nxge_init_common_device:func # %d "
4627 				"hw_p $%p parent dip $%p "
4628 				"ndevs %d (found)",
4629 				nxgep->function_num,
4630 				hw_p,
4631 				p_dip,
4632 				hw_p->ndevs));
4633 			break;
4634 		}
4635 	}
4636 
4637 	if (hw_p == NULL) {
4638 		NXGE_DEBUG_MSG((nxgep, MOD_CTL,
4639 			"==> nxge_init_common_device:func # %d "
4640 			"parent dip $%p (new)",
4641 			nxgep->function_num,
4642 			p_dip));
4643 		hw_p = kmem_zalloc(sizeof (nxge_hw_list_t), KM_SLEEP);
4644 		hw_p->parent_devp = p_dip;
4645 		hw_p->magic = NXGE_NEPTUNE_MAGIC;
4646 		nxgep->nxge_hw_p = hw_p;
4647 		hw_p->ndevs++;
4648 		hw_p->nxge_p[nxgep->function_num] = nxgep;
4649 		hw_p->next = nxge_hw_list;
4650 
4651 		MUTEX_INIT(&hw_p->nxge_cfg_lock, NULL, MUTEX_DRIVER, NULL);
4652 		MUTEX_INIT(&hw_p->nxge_tcam_lock, NULL, MUTEX_DRIVER, NULL);
4653 		MUTEX_INIT(&hw_p->nxge_vlan_lock, NULL, MUTEX_DRIVER, NULL);
4654 		MUTEX_INIT(&hw_p->nxge_mdio_lock, NULL, MUTEX_DRIVER, NULL);
4655 		MUTEX_INIT(&hw_p->nxge_mii_lock, NULL, MUTEX_DRIVER, NULL);
4656 
4657 		nxge_hw_list = hw_p;
4658 	}
4659 
4660 	MUTEX_EXIT(&nxge_common_lock);
4661 	NXGE_DEBUG_MSG((nxgep, MOD_CTL,
4662 		"==> nxge_init_common_device (nxge_hw_list) $%p",
4663 		nxge_hw_list));
4664 	NXGE_DEBUG_MSG((nxgep, MOD_CTL, "<== nxge_init_common_device"));
4665 
4666 	return (NXGE_OK);
4667 }
4668 
4669 static void
4670 nxge_uninit_common_dev(p_nxge_t nxgep)
4671 {
4672 	p_nxge_hw_list_t	hw_p, h_hw_p;
4673 	dev_info_t 		*p_dip;
4674 
4675 	NXGE_DEBUG_MSG((nxgep, MOD_CTL, "==> nxge_uninit_common_device"));
4676 	if (nxgep->nxge_hw_p == NULL) {
4677 		NXGE_DEBUG_MSG((nxgep, MOD_CTL,
4678 			"<== nxge_uninit_common_device (no common)"));
4679 		return;
4680 	}
4681 
4682 	MUTEX_ENTER(&nxge_common_lock);
4683 	h_hw_p = nxge_hw_list;
4684 	for (hw_p = nxge_hw_list; hw_p; hw_p = hw_p->next) {
4685 		p_dip = hw_p->parent_devp;
4686 		if (nxgep->nxge_hw_p == hw_p &&
4687 			p_dip == nxgep->p_dip &&
4688 			nxgep->nxge_hw_p->magic == NXGE_NEPTUNE_MAGIC &&
4689 			hw_p->magic == NXGE_NEPTUNE_MAGIC) {
4690 
4691 			NXGE_DEBUG_MSG((nxgep, MOD_CTL,
4692 				"==> nxge_uninit_common_device:func # %d "
4693 				"hw_p $%p parent dip $%p "
4694 				"ndevs %d (found)",
4695 				nxgep->function_num,
4696 				hw_p,
4697 				p_dip,
4698 				hw_p->ndevs));
4699 
4700 			nxgep->nxge_hw_p = NULL;
4701 			if (hw_p->ndevs) {
4702 				hw_p->ndevs--;
4703 			}
4704 			hw_p->nxge_p[nxgep->function_num] = NULL;
4705 			if (!hw_p->ndevs) {
4706 				MUTEX_DESTROY(&hw_p->nxge_vlan_lock);
4707 				MUTEX_DESTROY(&hw_p->nxge_tcam_lock);
4708 				MUTEX_DESTROY(&hw_p->nxge_cfg_lock);
4709 				MUTEX_DESTROY(&hw_p->nxge_mdio_lock);
4710 				MUTEX_DESTROY(&hw_p->nxge_mii_lock);
4711 				NXGE_DEBUG_MSG((nxgep, MOD_CTL,
4712 					"==> nxge_uninit_common_device: "
4713 					"func # %d "
4714 					"hw_p $%p parent dip $%p "
4715 					"ndevs %d (last)",
4716 					nxgep->function_num,
4717 					hw_p,
4718 					p_dip,
4719 					hw_p->ndevs));
4720 
4721 				if (hw_p == nxge_hw_list) {
4722 					NXGE_DEBUG_MSG((nxgep, MOD_CTL,
4723 						"==> nxge_uninit_common_device:"
4724 						"remove head func # %d "
4725 						"hw_p $%p parent dip $%p "
4726 						"ndevs %d (head)",
4727 						nxgep->function_num,
4728 						hw_p,
4729 						p_dip,
4730 						hw_p->ndevs));
4731 					nxge_hw_list = hw_p->next;
4732 				} else {
4733 					NXGE_DEBUG_MSG((nxgep, MOD_CTL,
4734 						"==> nxge_uninit_common_device:"
4735 						"remove middle func # %d "
4736 						"hw_p $%p parent dip $%p "
4737 						"ndevs %d (middle)",
4738 						nxgep->function_num,
4739 						hw_p,
4740 						p_dip,
4741 						hw_p->ndevs));
4742 					h_hw_p->next = hw_p->next;
4743 				}
4744 
4745 				KMEM_FREE(hw_p, sizeof (nxge_hw_list_t));
4746 			}
4747 			break;
4748 		} else {
4749 			h_hw_p = hw_p;
4750 		}
4751 	}
4752 
4753 	MUTEX_EXIT(&nxge_common_lock);
4754 	NXGE_DEBUG_MSG((nxgep, MOD_CTL,
4755 		"==> nxge_uninit_common_device (nxge_hw_list) $%p",
4756 		nxge_hw_list));
4757 
4758 	NXGE_DEBUG_MSG((nxgep, MOD_CTL, "<= nxge_uninit_common_device"));
4759 }
4760