xref: /illumos-gate/usr/src/uts/common/io/nxge/nxge_main.c (revision 5da9ad7bc7967714b6c6e02dcfe8e6f7cc2d6098)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 /*
22  * Copyright 2008 Sun Microsystems, Inc.  All rights reserved.
23  * Use is subject to license terms.
24  */
25 
26 #pragma ident	"%Z%%M%	%I%	%E% SMI"
27 
28 /*
29  * SunOs MT STREAMS NIU/Neptune 10Gb Ethernet Device Driver.
30  */
31 #include	<sys/nxge/nxge_impl.h>
32 #include	<sys/pcie.h>
33 
34 uint32_t 	nxge_use_partition = 0;		/* debug partition flag */
35 uint32_t 	nxge_dma_obp_props_only = 1;	/* use obp published props */
36 uint32_t 	nxge_use_rdc_intr = 1;		/* debug to assign rdc intr */
37 /*
38  * PSARC/2007/453 MSI-X interrupt limit override
39  * (This PSARC case is limited to MSI-X vectors
40  *  and SPARC platforms only).
41  */
42 #if defined(_BIG_ENDIAN)
43 uint32_t	nxge_msi_enable = 2;
44 #else
45 uint32_t	nxge_msi_enable = 1;
46 #endif
47 
48 /*
49  * Globals: tunable parameters (/etc/system or adb)
50  *
51  */
52 uint32_t 	nxge_rbr_size = NXGE_RBR_RBB_DEFAULT;
53 uint32_t 	nxge_rbr_spare_size = 0;
54 uint32_t 	nxge_rcr_size = NXGE_RCR_DEFAULT;
55 uint32_t 	nxge_tx_ring_size = NXGE_TX_RING_DEFAULT;
56 boolean_t 	nxge_no_msg = B_TRUE;		/* control message display */
57 uint32_t 	nxge_no_link_notify = 0;	/* control DL_NOTIFY */
58 uint32_t 	nxge_bcopy_thresh = TX_BCOPY_MAX;
59 uint32_t 	nxge_dvma_thresh = TX_FASTDVMA_MIN;
60 uint32_t 	nxge_dma_stream_thresh = TX_STREAM_MIN;
61 uint32_t	nxge_jumbo_mtu	= TX_JUMBO_MTU;
62 boolean_t	nxge_jumbo_enable = B_FALSE;
63 uint16_t	nxge_rcr_timeout = NXGE_RDC_RCR_TIMEOUT;
64 uint16_t	nxge_rcr_threshold = NXGE_RDC_RCR_THRESHOLD;
65 nxge_tx_mode_t	nxge_tx_scheme = NXGE_USE_SERIAL;
66 
67 /* MAX LSO size */
68 #define		NXGE_LSO_MAXLEN	65535
69 uint32_t	nxge_lso_max = NXGE_LSO_MAXLEN;
70 
71 /*
72  * Debugging flags:
73  *		nxge_no_tx_lb : transmit load balancing
74  *		nxge_tx_lb_policy: 0 - TCP port (default)
75  *				   3 - DEST MAC
76  */
77 uint32_t 	nxge_no_tx_lb = 0;
78 uint32_t 	nxge_tx_lb_policy = NXGE_TX_LB_TCPUDP;
79 
80 /*
81  * Add tunable to reduce the amount of time spent in the
82  * ISR doing Rx Processing.
83  */
84 uint32_t nxge_max_rx_pkts = 1024;
85 
86 /*
87  * Tunables to manage the receive buffer blocks.
88  *
89  * nxge_rx_threshold_hi: copy all buffers.
90  * nxge_rx_bcopy_size_type: receive buffer block size type.
91  * nxge_rx_threshold_lo: copy only up to tunable block size type.
92  */
93 nxge_rxbuf_threshold_t nxge_rx_threshold_hi = NXGE_RX_COPY_6;
94 nxge_rxbuf_type_t nxge_rx_buf_size_type = RCR_PKTBUFSZ_0;
95 nxge_rxbuf_threshold_t nxge_rx_threshold_lo = NXGE_RX_COPY_3;
96 
97 rtrace_t npi_rtracebuf;
98 
99 #if	defined(sun4v)
100 /*
101  * Hypervisor N2/NIU services information.
102  */
103 static hsvc_info_t niu_hsvc = {
104 	HSVC_REV_1, NULL, HSVC_GROUP_NIU, NIU_MAJOR_VER,
105 	NIU_MINOR_VER, "nxge"
106 };
107 #endif
108 
109 /*
110  * Function Prototypes
111  */
112 static int nxge_attach(dev_info_t *, ddi_attach_cmd_t);
113 static int nxge_detach(dev_info_t *, ddi_detach_cmd_t);
114 static void nxge_unattach(p_nxge_t);
115 
116 #if NXGE_PROPERTY
117 static void nxge_remove_hard_properties(p_nxge_t);
118 #endif
119 
120 static nxge_status_t nxge_setup_system_dma_pages(p_nxge_t);
121 
122 static nxge_status_t nxge_setup_mutexes(p_nxge_t);
123 static void nxge_destroy_mutexes(p_nxge_t);
124 
125 static nxge_status_t nxge_map_regs(p_nxge_t nxgep);
126 static void nxge_unmap_regs(p_nxge_t nxgep);
127 #ifdef	NXGE_DEBUG
128 static void nxge_test_map_regs(p_nxge_t nxgep);
129 #endif
130 
131 static nxge_status_t nxge_add_intrs(p_nxge_t nxgep);
132 static nxge_status_t nxge_add_soft_intrs(p_nxge_t nxgep);
133 static void nxge_remove_intrs(p_nxge_t nxgep);
134 static void nxge_remove_soft_intrs(p_nxge_t nxgep);
135 
136 static nxge_status_t nxge_add_intrs_adv(p_nxge_t nxgep);
137 static nxge_status_t nxge_add_intrs_adv_type(p_nxge_t, uint32_t);
138 static nxge_status_t nxge_add_intrs_adv_type_fix(p_nxge_t, uint32_t);
139 static void nxge_intrs_enable(p_nxge_t nxgep);
140 static void nxge_intrs_disable(p_nxge_t nxgep);
141 
142 static void nxge_suspend(p_nxge_t);
143 static nxge_status_t nxge_resume(p_nxge_t);
144 
145 static nxge_status_t nxge_setup_dev(p_nxge_t);
146 static void nxge_destroy_dev(p_nxge_t);
147 
148 static nxge_status_t nxge_alloc_mem_pool(p_nxge_t);
149 static void nxge_free_mem_pool(p_nxge_t);
150 
151 static nxge_status_t nxge_alloc_rx_mem_pool(p_nxge_t);
152 static void nxge_free_rx_mem_pool(p_nxge_t);
153 
154 static nxge_status_t nxge_alloc_tx_mem_pool(p_nxge_t);
155 static void nxge_free_tx_mem_pool(p_nxge_t);
156 
157 static nxge_status_t nxge_dma_mem_alloc(p_nxge_t, dma_method_t,
158 	struct ddi_dma_attr *,
159 	size_t, ddi_device_acc_attr_t *, uint_t,
160 	p_nxge_dma_common_t);
161 
162 static void nxge_dma_mem_free(p_nxge_dma_common_t);
163 
164 static nxge_status_t nxge_alloc_rx_buf_dma(p_nxge_t, uint16_t,
165 	p_nxge_dma_common_t *, size_t, size_t, uint32_t *);
166 static void nxge_free_rx_buf_dma(p_nxge_t, p_nxge_dma_common_t, uint32_t);
167 
168 static nxge_status_t nxge_alloc_rx_cntl_dma(p_nxge_t, uint16_t,
169 	p_nxge_dma_common_t *, size_t);
170 static void nxge_free_rx_cntl_dma(p_nxge_t, p_nxge_dma_common_t);
171 
172 static nxge_status_t nxge_alloc_tx_buf_dma(p_nxge_t, uint16_t,
173 	p_nxge_dma_common_t *, size_t, size_t, uint32_t *);
174 static void nxge_free_tx_buf_dma(p_nxge_t, p_nxge_dma_common_t, uint32_t);
175 
176 static nxge_status_t nxge_alloc_tx_cntl_dma(p_nxge_t, uint16_t,
177 	p_nxge_dma_common_t *,
178 	size_t);
179 static void nxge_free_tx_cntl_dma(p_nxge_t, p_nxge_dma_common_t);
180 
181 static int nxge_init_common_dev(p_nxge_t);
182 static void nxge_uninit_common_dev(p_nxge_t);
183 
184 /*
185  * The next declarations are for the GLDv3 interface.
186  */
187 static int nxge_m_start(void *);
188 static void nxge_m_stop(void *);
189 static int nxge_m_unicst(void *, const uint8_t *);
190 static int nxge_m_multicst(void *, boolean_t, const uint8_t *);
191 static int nxge_m_promisc(void *, boolean_t);
192 static void nxge_m_ioctl(void *, queue_t *, mblk_t *);
193 static void nxge_m_resources(void *);
194 mblk_t *nxge_m_tx(void *arg, mblk_t *);
195 static nxge_status_t nxge_mac_register(p_nxge_t);
196 static int nxge_altmac_set(p_nxge_t nxgep, uint8_t *mac_addr,
197 	mac_addr_slot_t slot);
198 static void nxge_mmac_kstat_update(p_nxge_t nxgep, mac_addr_slot_t slot,
199 	boolean_t factory);
200 static int nxge_m_mmac_add(void *arg, mac_multi_addr_t *maddr);
201 static int nxge_m_mmac_reserve(void *arg, mac_multi_addr_t *maddr);
202 static int nxge_m_mmac_remove(void *arg, mac_addr_slot_t slot);
203 static int nxge_m_mmac_modify(void *arg, mac_multi_addr_t *maddr);
204 static int nxge_m_mmac_get(void *arg, mac_multi_addr_t *maddr);
205 static	boolean_t nxge_m_getcapab(void *, mac_capab_t, void *);
206 static int nxge_m_setprop(void *, const char *, mac_prop_id_t,
207     uint_t, const void *);
208 static int nxge_m_getprop(void *, const char *, mac_prop_id_t,
209     uint_t, void *);
210 static int nxge_set_priv_prop(nxge_t *, const char *, uint_t,
211     const void *);
212 static int nxge_get_priv_prop(nxge_t *, const char *, uint_t,
213     void *);
214 
215 #define	NXGE_M_CALLBACK_FLAGS\
216 	(MC_RESOURCES | MC_IOCTL | MC_GETCAPAB | MC_SETPROP | MC_GETPROP)
217 
218 
219 #define	NXGE_NEPTUNE_MAGIC	0x4E584745UL
220 #define	MAX_DUMP_SZ 256
221 
222 #define	NXGE_M_CALLBACK_FLAGS	\
223 	(MC_RESOURCES | MC_IOCTL | MC_GETCAPAB | MC_SETPROP | MC_GETPROP)
224 
225 static mac_callbacks_t nxge_m_callbacks = {
226 	NXGE_M_CALLBACK_FLAGS,
227 	nxge_m_stat,
228 	nxge_m_start,
229 	nxge_m_stop,
230 	nxge_m_promisc,
231 	nxge_m_multicst,
232 	nxge_m_unicst,
233 	nxge_m_tx,
234 	nxge_m_resources,
235 	nxge_m_ioctl,
236 	nxge_m_getcapab,
237 	NULL,
238 	NULL,
239 	nxge_m_setprop,
240 	nxge_m_getprop
241 };
242 
243 void
244 nxge_err_inject(p_nxge_t, queue_t *, mblk_t *);
245 
246 /* PSARC/2007/453 MSI-X interrupt limit override. */
247 #define	NXGE_MSIX_REQUEST_10G	8
248 #define	NXGE_MSIX_REQUEST_1G	2
249 static int nxge_create_msi_property(p_nxge_t);
250 
251 /*
252  * These global variables control the message
253  * output.
254  */
255 out_dbgmsg_t nxge_dbgmsg_out = DBG_CONSOLE | STR_LOG;
256 uint64_t nxge_debug_level = 0;
257 
258 /*
259  * This list contains the instance structures for the Neptune
260  * devices present in the system. The lock exists to guarantee
261  * mutually exclusive access to the list.
262  */
263 void 			*nxge_list = NULL;
264 
265 void			*nxge_hw_list = NULL;
266 nxge_os_mutex_t 	nxge_common_lock;
267 
268 extern uint64_t 	npi_debug_level;
269 
270 extern nxge_status_t	nxge_ldgv_init(p_nxge_t, int *, int *);
271 extern nxge_status_t	nxge_ldgv_init_n2(p_nxge_t, int *, int *);
272 extern nxge_status_t	nxge_ldgv_uninit(p_nxge_t);
273 extern nxge_status_t	nxge_intr_ldgv_init(p_nxge_t);
274 extern void		nxge_fm_init(p_nxge_t,
275 					ddi_device_acc_attr_t *,
276 					ddi_device_acc_attr_t *,
277 					ddi_dma_attr_t *);
278 extern void		nxge_fm_fini(p_nxge_t);
279 extern npi_status_t	npi_mac_altaddr_disable(npi_handle_t, uint8_t, uint8_t);
280 
281 /*
282  * Count used to maintain the number of buffers being used
283  * by Neptune instances and loaned up to the upper layers.
284  */
285 uint32_t nxge_mblks_pending = 0;
286 
287 /*
288  * Device register access attributes for PIO.
289  */
290 static ddi_device_acc_attr_t nxge_dev_reg_acc_attr = {
291 	DDI_DEVICE_ATTR_V0,
292 	DDI_STRUCTURE_LE_ACC,
293 	DDI_STRICTORDER_ACC,
294 };
295 
296 /*
297  * Device descriptor access attributes for DMA.
298  */
299 static ddi_device_acc_attr_t nxge_dev_desc_dma_acc_attr = {
300 	DDI_DEVICE_ATTR_V0,
301 	DDI_STRUCTURE_LE_ACC,
302 	DDI_STRICTORDER_ACC
303 };
304 
305 /*
306  * Device buffer access attributes for DMA.
307  */
308 static ddi_device_acc_attr_t nxge_dev_buf_dma_acc_attr = {
309 	DDI_DEVICE_ATTR_V0,
310 	DDI_STRUCTURE_BE_ACC,
311 	DDI_STRICTORDER_ACC
312 };
313 
314 ddi_dma_attr_t nxge_desc_dma_attr = {
315 	DMA_ATTR_V0,		/* version number. */
316 	0,			/* low address */
317 	0xffffffffffffffff,	/* high address */
318 	0xffffffffffffffff,	/* address counter max */
319 #ifndef NIU_PA_WORKAROUND
320 	0x100000,		/* alignment */
321 #else
322 	0x2000,
323 #endif
324 	0xfc00fc,		/* dlim_burstsizes */
325 	0x1,			/* minimum transfer size */
326 	0xffffffffffffffff,	/* maximum transfer size */
327 	0xffffffffffffffff,	/* maximum segment size */
328 	1,			/* scatter/gather list length */
329 	(unsigned int) 1,	/* granularity */
330 	0			/* attribute flags */
331 };
332 
333 ddi_dma_attr_t nxge_tx_dma_attr = {
334 	DMA_ATTR_V0,		/* version number. */
335 	0,			/* low address */
336 	0xffffffffffffffff,	/* high address */
337 	0xffffffffffffffff,	/* address counter max */
338 #if defined(_BIG_ENDIAN)
339 	0x2000,			/* alignment */
340 #else
341 	0x1000,			/* alignment */
342 #endif
343 	0xfc00fc,		/* dlim_burstsizes */
344 	0x1,			/* minimum transfer size */
345 	0xffffffffffffffff,	/* maximum transfer size */
346 	0xffffffffffffffff,	/* maximum segment size */
347 	5,			/* scatter/gather list length */
348 	(unsigned int) 1,	/* granularity */
349 	0			/* attribute flags */
350 };
351 
352 ddi_dma_attr_t nxge_rx_dma_attr = {
353 	DMA_ATTR_V0,		/* version number. */
354 	0,			/* low address */
355 	0xffffffffffffffff,	/* high address */
356 	0xffffffffffffffff,	/* address counter max */
357 	0x2000,			/* alignment */
358 	0xfc00fc,		/* dlim_burstsizes */
359 	0x1,			/* minimum transfer size */
360 	0xffffffffffffffff,	/* maximum transfer size */
361 	0xffffffffffffffff,	/* maximum segment size */
362 	1,			/* scatter/gather list length */
363 	(unsigned int) 1,	/* granularity */
364 	DDI_DMA_RELAXED_ORDERING /* attribute flags */
365 };
366 
367 ddi_dma_lim_t nxge_dma_limits = {
368 	(uint_t)0,		/* dlim_addr_lo */
369 	(uint_t)0xffffffff,	/* dlim_addr_hi */
370 	(uint_t)0xffffffff,	/* dlim_cntr_max */
371 	(uint_t)0xfc00fc,	/* dlim_burstsizes for 32 and 64 bit xfers */
372 	0x1,			/* dlim_minxfer */
373 	1024			/* dlim_speed */
374 };
375 
376 dma_method_t nxge_force_dma = DVMA;
377 
378 /*
379  * dma chunk sizes.
380  *
381  * Try to allocate the largest possible size
382  * so that fewer number of dma chunks would be managed
383  */
384 #ifdef NIU_PA_WORKAROUND
385 size_t alloc_sizes [] = {0x2000};
386 #else
387 size_t alloc_sizes [] = {0x1000, 0x2000, 0x4000, 0x8000,
388 		0x10000, 0x20000, 0x40000, 0x80000,
389 		0x100000, 0x200000, 0x400000, 0x800000,
390 		0x1000000, 0x2000000, 0x4000000};
391 #endif
392 
393 /*
394  * Translate "dev_t" to a pointer to the associated "dev_info_t".
395  */
396 
397 static int
398 nxge_attach(dev_info_t *dip, ddi_attach_cmd_t cmd)
399 {
400 	p_nxge_t	nxgep = NULL;
401 	int		instance;
402 	int		status = DDI_SUCCESS;
403 	uint8_t		portn;
404 	nxge_mmac_t	*mmac_info;
405 
406 	NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_attach"));
407 
408 	/*
409 	 * Get the device instance since we'll need to setup
410 	 * or retrieve a soft state for this instance.
411 	 */
412 	instance = ddi_get_instance(dip);
413 
414 	switch (cmd) {
415 	case DDI_ATTACH:
416 		NXGE_DEBUG_MSG((nxgep, DDI_CTL, "doing DDI_ATTACH"));
417 		break;
418 
419 	case DDI_RESUME:
420 		NXGE_DEBUG_MSG((nxgep, DDI_CTL, "doing DDI_RESUME"));
421 		nxgep = (p_nxge_t)ddi_get_soft_state(nxge_list, instance);
422 		if (nxgep == NULL) {
423 			status = DDI_FAILURE;
424 			break;
425 		}
426 		if (nxgep->dip != dip) {
427 			status = DDI_FAILURE;
428 			break;
429 		}
430 		if (nxgep->suspended == DDI_PM_SUSPEND) {
431 			status = ddi_dev_is_needed(nxgep->dip, 0, 1);
432 		} else {
433 			status = nxge_resume(nxgep);
434 		}
435 		goto nxge_attach_exit;
436 
437 	case DDI_PM_RESUME:
438 		NXGE_DEBUG_MSG((nxgep, DDI_CTL, "doing DDI_PM_RESUME"));
439 		nxgep = (p_nxge_t)ddi_get_soft_state(nxge_list, instance);
440 		if (nxgep == NULL) {
441 			status = DDI_FAILURE;
442 			break;
443 		}
444 		if (nxgep->dip != dip) {
445 			status = DDI_FAILURE;
446 			break;
447 		}
448 		status = nxge_resume(nxgep);
449 		goto nxge_attach_exit;
450 
451 	default:
452 		NXGE_DEBUG_MSG((nxgep, DDI_CTL, "doing unknown"));
453 		status = DDI_FAILURE;
454 		goto nxge_attach_exit;
455 	}
456 
457 
458 	if (ddi_soft_state_zalloc(nxge_list, instance) == DDI_FAILURE) {
459 		status = DDI_FAILURE;
460 		goto nxge_attach_exit;
461 	}
462 
463 	nxgep = ddi_get_soft_state(nxge_list, instance);
464 	if (nxgep == NULL) {
465 		status = NXGE_ERROR;
466 		goto nxge_attach_fail2;
467 	}
468 
469 	nxgep->nxge_magic = NXGE_MAGIC;
470 
471 	nxgep->drv_state = 0;
472 	nxgep->dip = dip;
473 	nxgep->instance = instance;
474 	nxgep->p_dip = ddi_get_parent(dip);
475 	nxgep->nxge_debug_level = nxge_debug_level;
476 	npi_debug_level = nxge_debug_level;
477 
478 	nxge_fm_init(nxgep, &nxge_dev_reg_acc_attr, &nxge_dev_desc_dma_acc_attr,
479 				&nxge_rx_dma_attr);
480 
481 	status = nxge_map_regs(nxgep);
482 	if (status != NXGE_OK) {
483 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "nxge_map_regs failed"));
484 		goto nxge_attach_fail3;
485 	}
486 
487 	status = nxge_init_common_dev(nxgep);
488 	if (status != NXGE_OK) {
489 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
490 			"nxge_init_common_dev failed"));
491 		goto nxge_attach_fail4;
492 	}
493 
494 	if (nxgep->niu_type == NEPTUNE_2_10GF) {
495 		if (nxgep->function_num > 1) {
496 			NXGE_DEBUG_MSG((nxgep, DDI_CTL, "Unsupported"
497 			    " function %d. Only functions 0 and 1 are "
498 			    "supported for this card.", nxgep->function_num));
499 			status = NXGE_ERROR;
500 			goto nxge_attach_fail4;
501 		}
502 	}
503 
504 	portn = NXGE_GET_PORT_NUM(nxgep->function_num);
505 	nxgep->mac.portnum = portn;
506 	if ((portn == 0) || (portn == 1))
507 		nxgep->mac.porttype = PORT_TYPE_XMAC;
508 	else
509 		nxgep->mac.porttype = PORT_TYPE_BMAC;
510 	/*
511 	 * Neptune has 4 ports, the first 2 ports use XMAC (10G MAC)
512 	 * internally, the rest 2 ports use BMAC (1G "Big" MAC).
513 	 * The two types of MACs have different characterizations.
514 	 */
515 	mmac_info = &nxgep->nxge_mmac_info;
516 	if (nxgep->function_num < 2) {
517 		mmac_info->num_mmac = XMAC_MAX_ALT_ADDR_ENTRY;
518 		mmac_info->naddrfree = XMAC_MAX_ALT_ADDR_ENTRY;
519 	} else {
520 		mmac_info->num_mmac = BMAC_MAX_ALT_ADDR_ENTRY;
521 		mmac_info->naddrfree = BMAC_MAX_ALT_ADDR_ENTRY;
522 	}
523 	/*
524 	 * Setup the Ndd parameters for the this instance.
525 	 */
526 	nxge_init_param(nxgep);
527 
528 	/*
529 	 * Setup Register Tracing Buffer.
530 	 */
531 	npi_rtrace_buf_init((rtrace_t *)&npi_rtracebuf);
532 
533 	/* init stats ptr */
534 	nxge_init_statsp(nxgep);
535 
536 	/*
537 	 * read the vpd info from the eeprom into local data
538 	 * structure and check for the VPD info validity
539 	 */
540 	nxge_vpd_info_get(nxgep);
541 
542 	status = nxge_xcvr_find(nxgep);
543 
544 	if (status != NXGE_OK) {
545 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "nxge_attach: "
546 				    " Couldn't determine card type"
547 				    " .... exit "));
548 		goto nxge_attach_fail5;
549 	}
550 
551 	status = nxge_get_config_properties(nxgep);
552 
553 	if (status != NXGE_OK) {
554 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "get_hw create failed"));
555 		goto nxge_attach_fail;
556 	}
557 
558 	/*
559 	 * Setup the Kstats for the driver.
560 	 */
561 	nxge_setup_kstats(nxgep);
562 
563 	nxge_setup_param(nxgep);
564 
565 	status = nxge_setup_system_dma_pages(nxgep);
566 	if (status != NXGE_OK) {
567 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "set dma page failed"));
568 		goto nxge_attach_fail;
569 	}
570 
571 #if	defined(sun4v)
572 	if (nxgep->niu_type == N2_NIU) {
573 		nxgep->niu_hsvc_available = B_FALSE;
574 		bcopy(&niu_hsvc, &nxgep->niu_hsvc, sizeof (hsvc_info_t));
575 		if ((status =
576 			hsvc_register(&nxgep->niu_hsvc,
577 					&nxgep->niu_min_ver)) != 0) {
578 				NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
579 					"nxge_attach: "
580 					"%s: cannot negotiate "
581 					"hypervisor services "
582 					"revision %d "
583 					"group: 0x%lx "
584 					"major: 0x%lx minor: 0x%lx "
585 					"errno: %d",
586 					niu_hsvc.hsvc_modname,
587 					niu_hsvc.hsvc_rev,
588 					niu_hsvc.hsvc_group,
589 					niu_hsvc.hsvc_major,
590 					niu_hsvc.hsvc_minor,
591 					status));
592 				status = DDI_FAILURE;
593 				goto nxge_attach_fail;
594 		}
595 
596 		nxgep->niu_hsvc_available = B_TRUE;
597 		NXGE_DEBUG_MSG((nxgep, DDI_CTL,
598 			"NIU Hypervisor service enabled"));
599 	}
600 #endif
601 
602 	nxge_hw_id_init(nxgep);
603 	nxge_hw_init_niu_common(nxgep);
604 
605 	status = nxge_setup_mutexes(nxgep);
606 	if (status != NXGE_OK) {
607 		NXGE_DEBUG_MSG((nxgep, DDI_CTL, "set mutex failed"));
608 		goto nxge_attach_fail;
609 	}
610 
611 	status = nxge_setup_dev(nxgep);
612 	if (status != DDI_SUCCESS) {
613 		NXGE_DEBUG_MSG((nxgep, DDI_CTL, "set dev failed"));
614 		goto nxge_attach_fail;
615 	}
616 
617 	status = nxge_add_intrs(nxgep);
618 	if (status != DDI_SUCCESS) {
619 		NXGE_DEBUG_MSG((nxgep, DDI_CTL, "add_intr failed"));
620 		goto nxge_attach_fail;
621 	}
622 	status = nxge_add_soft_intrs(nxgep);
623 	if (status != DDI_SUCCESS) {
624 		NXGE_DEBUG_MSG((nxgep, NXGE_ERR_CTL, "add_soft_intr failed"));
625 		goto nxge_attach_fail;
626 	}
627 
628 	/*
629 	 * Enable interrupts.
630 	 */
631 	nxge_intrs_enable(nxgep);
632 
633 	if ((status = nxge_mac_register(nxgep)) != NXGE_OK) {
634 		NXGE_DEBUG_MSG((nxgep, DDI_CTL,
635 			"unable to register to mac layer (%d)", status));
636 		goto nxge_attach_fail;
637 	}
638 
639 	mac_link_update(nxgep->mach, LINK_STATE_UNKNOWN);
640 
641 	NXGE_DEBUG_MSG((nxgep, DDI_CTL, "registered to mac (instance %d)",
642 		instance));
643 
644 	(void) nxge_link_monitor(nxgep, LINK_MONITOR_START);
645 
646 	goto nxge_attach_exit;
647 
648 nxge_attach_fail:
649 	nxge_unattach(nxgep);
650 	goto nxge_attach_fail1;
651 
652 nxge_attach_fail5:
653 	/*
654 	 * Tear down the ndd parameters setup.
655 	 */
656 	nxge_destroy_param(nxgep);
657 
658 	/*
659 	 * Tear down the kstat setup.
660 	 */
661 	nxge_destroy_kstats(nxgep);
662 
663 nxge_attach_fail4:
664 	if (nxgep->nxge_hw_p) {
665 		nxge_uninit_common_dev(nxgep);
666 		nxgep->nxge_hw_p = NULL;
667 	}
668 
669 nxge_attach_fail3:
670 	/*
671 	 * Unmap the register setup.
672 	 */
673 	nxge_unmap_regs(nxgep);
674 
675 	nxge_fm_fini(nxgep);
676 
677 nxge_attach_fail2:
678 	ddi_soft_state_free(nxge_list, nxgep->instance);
679 
680 nxge_attach_fail1:
681 	if (status != NXGE_OK)
682 		status = (NXGE_ERROR | NXGE_DDI_FAILED);
683 	nxgep = NULL;
684 
685 nxge_attach_exit:
686 	NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_attach status = 0x%08x",
687 		status));
688 
689 	return (status);
690 }
691 
692 static int
693 nxge_detach(dev_info_t *dip, ddi_detach_cmd_t cmd)
694 {
695 	int 		status = DDI_SUCCESS;
696 	int 		instance;
697 	p_nxge_t 	nxgep = NULL;
698 
699 	NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_detach"));
700 	instance = ddi_get_instance(dip);
701 	nxgep = ddi_get_soft_state(nxge_list, instance);
702 	if (nxgep == NULL) {
703 		status = DDI_FAILURE;
704 		goto nxge_detach_exit;
705 	}
706 
707 	switch (cmd) {
708 	case DDI_DETACH:
709 		NXGE_DEBUG_MSG((nxgep, DDI_CTL, "doing DDI_DETACH"));
710 		break;
711 
712 	case DDI_PM_SUSPEND:
713 		NXGE_DEBUG_MSG((nxgep, DDI_CTL, "doing DDI_PM_SUSPEND"));
714 		nxgep->suspended = DDI_PM_SUSPEND;
715 		nxge_suspend(nxgep);
716 		break;
717 
718 	case DDI_SUSPEND:
719 		NXGE_DEBUG_MSG((nxgep, DDI_CTL, "doing DDI_SUSPEND"));
720 		if (nxgep->suspended != DDI_PM_SUSPEND) {
721 			nxgep->suspended = DDI_SUSPEND;
722 			nxge_suspend(nxgep);
723 		}
724 		break;
725 
726 	default:
727 		status = DDI_FAILURE;
728 	}
729 
730 	if (cmd != DDI_DETACH)
731 		goto nxge_detach_exit;
732 
733 	/*
734 	 * Stop the xcvr polling.
735 	 */
736 	nxgep->suspended = cmd;
737 
738 	(void) nxge_link_monitor(nxgep, LINK_MONITOR_STOP);
739 
740 	if (nxgep->mach && (status = mac_unregister(nxgep->mach)) != 0) {
741 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
742 			"<== nxge_detach status = 0x%08X", status));
743 		return (DDI_FAILURE);
744 	}
745 
746 	NXGE_DEBUG_MSG((nxgep, DDI_CTL,
747 		"<== nxge_detach (mac_unregister) status = 0x%08X", status));
748 
749 	nxge_unattach(nxgep);
750 	nxgep = NULL;
751 
752 nxge_detach_exit:
753 	NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_detach status = 0x%08X",
754 		status));
755 
756 	return (status);
757 }
758 
759 static void
760 nxge_unattach(p_nxge_t nxgep)
761 {
762 	NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_unattach"));
763 
764 	if (nxgep == NULL || nxgep->dev_regs == NULL) {
765 		return;
766 	}
767 
768 	nxgep->nxge_magic = 0;
769 
770 	if (nxgep->nxge_timerid) {
771 		nxge_stop_timer(nxgep, nxgep->nxge_timerid);
772 		nxgep->nxge_timerid = 0;
773 	}
774 
775 	if (nxgep->nxge_hw_p) {
776 		nxge_uninit_common_dev(nxgep);
777 		nxgep->nxge_hw_p = NULL;
778 	}
779 
780 #if	defined(sun4v)
781 	if (nxgep->niu_type == N2_NIU && nxgep->niu_hsvc_available == B_TRUE) {
782 		(void) hsvc_unregister(&nxgep->niu_hsvc);
783 		nxgep->niu_hsvc_available = B_FALSE;
784 	}
785 #endif
786 	/*
787 	 * Stop any further interrupts.
788 	 */
789 	nxge_remove_intrs(nxgep);
790 
791 	/* remove soft interrups */
792 	nxge_remove_soft_intrs(nxgep);
793 
794 	/*
795 	 * Stop the device and free resources.
796 	 */
797 	nxge_destroy_dev(nxgep);
798 
799 	/*
800 	 * Tear down the ndd parameters setup.
801 	 */
802 	nxge_destroy_param(nxgep);
803 
804 	/*
805 	 * Tear down the kstat setup.
806 	 */
807 	nxge_destroy_kstats(nxgep);
808 
809 	/*
810 	 * Destroy all mutexes.
811 	 */
812 	nxge_destroy_mutexes(nxgep);
813 
814 	/*
815 	 * Remove the list of ndd parameters which
816 	 * were setup during attach.
817 	 */
818 	if (nxgep->dip) {
819 		NXGE_DEBUG_MSG((nxgep, OBP_CTL,
820 				    " nxge_unattach: remove all properties"));
821 
822 		(void) ddi_prop_remove_all(nxgep->dip);
823 	}
824 
825 #if NXGE_PROPERTY
826 	nxge_remove_hard_properties(nxgep);
827 #endif
828 
829 	/*
830 	 * Unmap the register setup.
831 	 */
832 	nxge_unmap_regs(nxgep);
833 
834 	nxge_fm_fini(nxgep);
835 
836 	ddi_soft_state_free(nxge_list, nxgep->instance);
837 
838 	NXGE_DEBUG_MSG((NULL, DDI_CTL, "<== nxge_unattach"));
839 }
840 
841 static char n2_siu_name[] = "niu";
842 
843 static nxge_status_t
844 nxge_map_regs(p_nxge_t nxgep)
845 {
846 	int		ddi_status = DDI_SUCCESS;
847 	p_dev_regs_t 	dev_regs;
848 	char		buf[MAXPATHLEN + 1];
849 	char 		*devname;
850 #ifdef	NXGE_DEBUG
851 	char 		*sysname;
852 #endif
853 	off_t		regsize;
854 	nxge_status_t	status = NXGE_OK;
855 #if !defined(_BIG_ENDIAN)
856 	off_t pci_offset;
857 	uint16_t pcie_devctl;
858 #endif
859 
860 	NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_map_regs"));
861 	nxgep->dev_regs = NULL;
862 	dev_regs = KMEM_ZALLOC(sizeof (dev_regs_t), KM_SLEEP);
863 	dev_regs->nxge_regh = NULL;
864 	dev_regs->nxge_pciregh = NULL;
865 	dev_regs->nxge_msix_regh = NULL;
866 	dev_regs->nxge_vir_regh = NULL;
867 	dev_regs->nxge_vir2_regh = NULL;
868 	nxgep->niu_type = NIU_TYPE_NONE;
869 
870 	devname = ddi_pathname(nxgep->dip, buf);
871 	ASSERT(strlen(devname) > 0);
872 	NXGE_DEBUG_MSG((nxgep, DDI_CTL,
873 		"nxge_map_regs: pathname devname %s", devname));
874 
875 	if (strstr(devname, n2_siu_name)) {
876 		/* N2/NIU */
877 		nxgep->niu_type = N2_NIU;
878 		NXGE_DEBUG_MSG((nxgep, DDI_CTL,
879 			"nxge_map_regs: N2/NIU devname %s", devname));
880 		/* get function number */
881 		nxgep->function_num =
882 			(devname[strlen(devname) -1] == '1' ? 1 : 0);
883 		NXGE_DEBUG_MSG((nxgep, DDI_CTL,
884 			"nxge_map_regs: N2/NIU function number %d",
885 			nxgep->function_num));
886 	} else {
887 		int		*prop_val;
888 		uint_t 		prop_len;
889 		uint8_t 	func_num;
890 
891 		if (ddi_prop_lookup_int_array(DDI_DEV_T_ANY, nxgep->dip,
892 				0, "reg",
893 				&prop_val, &prop_len) != DDI_PROP_SUCCESS) {
894 			NXGE_DEBUG_MSG((nxgep, VPD_CTL,
895 				"Reg property not found"));
896 			ddi_status = DDI_FAILURE;
897 			goto nxge_map_regs_fail0;
898 
899 		} else {
900 			func_num = (prop_val[0] >> 8) & 0x7;
901 			NXGE_DEBUG_MSG((nxgep, DDI_CTL,
902 				"Reg property found: fun # %d",
903 				func_num));
904 			nxgep->function_num = func_num;
905 			ddi_prop_free(prop_val);
906 		}
907 	}
908 
909 	switch (nxgep->niu_type) {
910 	default:
911 		(void) ddi_dev_regsize(nxgep->dip, 0, &regsize);
912 		NXGE_DEBUG_MSG((nxgep, DDI_CTL,
913 			"nxge_map_regs: pci config size 0x%x", regsize));
914 
915 		ddi_status = ddi_regs_map_setup(nxgep->dip, 0,
916 			(caddr_t *)&(dev_regs->nxge_pciregp), 0, 0,
917 			&nxge_dev_reg_acc_attr, &dev_regs->nxge_pciregh);
918 		if (ddi_status != DDI_SUCCESS) {
919 			NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
920 				"ddi_map_regs, nxge bus config regs failed"));
921 			goto nxge_map_regs_fail0;
922 		}
923 		NXGE_DEBUG_MSG((nxgep, DDI_CTL,
924 			"nxge_map_reg: PCI config addr 0x%0llx "
925 			" handle 0x%0llx", dev_regs->nxge_pciregp,
926 			dev_regs->nxge_pciregh));
927 			/*
928 			 * IMP IMP
929 			 * workaround  for bit swapping bug in HW
930 			 * which ends up in no-snoop = yes
931 			 * resulting, in DMA not synched properly
932 			 */
933 #if !defined(_BIG_ENDIAN)
934 		/* workarounds for x86 systems */
935 		pci_offset = 0x80 + PCIE_DEVCTL;
936 		pcie_devctl = 0x0;
937 		pcie_devctl &= PCIE_DEVCTL_ENABLE_NO_SNOOP;
938 		pcie_devctl |= PCIE_DEVCTL_RO_EN;
939 		pci_config_put16(dev_regs->nxge_pciregh, pci_offset,
940 				    pcie_devctl);
941 #endif
942 
943 		(void) ddi_dev_regsize(nxgep->dip, 1, &regsize);
944 		NXGE_DEBUG_MSG((nxgep, DDI_CTL,
945 			"nxge_map_regs: pio size 0x%x", regsize));
946 		/* set up the device mapped register */
947 		ddi_status = ddi_regs_map_setup(nxgep->dip, 1,
948 			(caddr_t *)&(dev_regs->nxge_regp), 0, 0,
949 			&nxge_dev_reg_acc_attr, &dev_regs->nxge_regh);
950 		if (ddi_status != DDI_SUCCESS) {
951 			NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
952 				"ddi_map_regs for Neptune global reg failed"));
953 			goto nxge_map_regs_fail1;
954 		}
955 
956 		/* set up the msi/msi-x mapped register */
957 		(void) ddi_dev_regsize(nxgep->dip, 2, &regsize);
958 		NXGE_DEBUG_MSG((nxgep, DDI_CTL,
959 			"nxge_map_regs: msix size 0x%x", regsize));
960 		ddi_status = ddi_regs_map_setup(nxgep->dip, 2,
961 			(caddr_t *)&(dev_regs->nxge_msix_regp), 0, 0,
962 			&nxge_dev_reg_acc_attr, &dev_regs->nxge_msix_regh);
963 		if (ddi_status != DDI_SUCCESS) {
964 			NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
965 				"ddi_map_regs for msi reg failed"));
966 			goto nxge_map_regs_fail2;
967 		}
968 
969 		/* set up the vio region mapped register */
970 		(void) ddi_dev_regsize(nxgep->dip, 3, &regsize);
971 		NXGE_DEBUG_MSG((nxgep, DDI_CTL,
972 			"nxge_map_regs: vio size 0x%x", regsize));
973 		ddi_status = ddi_regs_map_setup(nxgep->dip, 3,
974 			(caddr_t *)&(dev_regs->nxge_vir_regp), 0, 0,
975 			&nxge_dev_reg_acc_attr, &dev_regs->nxge_vir_regh);
976 
977 		if (ddi_status != DDI_SUCCESS) {
978 			NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
979 				"ddi_map_regs for nxge vio reg failed"));
980 			goto nxge_map_regs_fail3;
981 		}
982 		nxgep->dev_regs = dev_regs;
983 
984 		NPI_PCI_ACC_HANDLE_SET(nxgep, dev_regs->nxge_pciregh);
985 		NPI_PCI_ADD_HANDLE_SET(nxgep,
986 			(npi_reg_ptr_t)dev_regs->nxge_pciregp);
987 		NPI_MSI_ACC_HANDLE_SET(nxgep, dev_regs->nxge_msix_regh);
988 		NPI_MSI_ADD_HANDLE_SET(nxgep,
989 			(npi_reg_ptr_t)dev_regs->nxge_msix_regp);
990 
991 		NPI_ACC_HANDLE_SET(nxgep, dev_regs->nxge_regh);
992 		NPI_ADD_HANDLE_SET(nxgep, (npi_reg_ptr_t)dev_regs->nxge_regp);
993 
994 		NPI_REG_ACC_HANDLE_SET(nxgep, dev_regs->nxge_regh);
995 		NPI_REG_ADD_HANDLE_SET(nxgep,
996 			(npi_reg_ptr_t)dev_regs->nxge_regp);
997 
998 		NPI_VREG_ACC_HANDLE_SET(nxgep, dev_regs->nxge_vir_regh);
999 		NPI_VREG_ADD_HANDLE_SET(nxgep,
1000 			(npi_reg_ptr_t)dev_regs->nxge_vir_regp);
1001 
1002 		break;
1003 
1004 	case N2_NIU:
1005 		NXGE_DEBUG_MSG((nxgep, DDI_CTL, "ddi_map_regs, NIU"));
1006 		/*
1007 		 * Set up the device mapped register (FWARC 2006/556)
1008 		 * (changed back to 1: reg starts at 1!)
1009 		 */
1010 		(void) ddi_dev_regsize(nxgep->dip, 1, &regsize);
1011 		NXGE_DEBUG_MSG((nxgep, DDI_CTL,
1012 			"nxge_map_regs: dev size 0x%x", regsize));
1013 		ddi_status = ddi_regs_map_setup(nxgep->dip, 1,
1014 				(caddr_t *)&(dev_regs->nxge_regp), 0, 0,
1015 				&nxge_dev_reg_acc_attr, &dev_regs->nxge_regh);
1016 
1017 		if (ddi_status != DDI_SUCCESS) {
1018 			NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
1019 				"ddi_map_regs for N2/NIU, global reg failed "));
1020 			goto nxge_map_regs_fail1;
1021 		}
1022 
1023 		/* set up the vio region mapped register */
1024 		(void) ddi_dev_regsize(nxgep->dip, 2, &regsize);
1025 		NXGE_DEBUG_MSG((nxgep, DDI_CTL,
1026 			"nxge_map_regs: vio (1) size 0x%x", regsize));
1027 		ddi_status = ddi_regs_map_setup(nxgep->dip, 2,
1028 			(caddr_t *)&(dev_regs->nxge_vir_regp), 0, 0,
1029 			&nxge_dev_reg_acc_attr, &dev_regs->nxge_vir_regh);
1030 
1031 		if (ddi_status != DDI_SUCCESS) {
1032 			NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
1033 				"ddi_map_regs for nxge vio reg failed"));
1034 			goto nxge_map_regs_fail2;
1035 		}
1036 		/* set up the vio region mapped register */
1037 		(void) ddi_dev_regsize(nxgep->dip, 3, &regsize);
1038 		NXGE_DEBUG_MSG((nxgep, DDI_CTL,
1039 			"nxge_map_regs: vio (3) size 0x%x", regsize));
1040 		ddi_status = ddi_regs_map_setup(nxgep->dip, 3,
1041 			(caddr_t *)&(dev_regs->nxge_vir2_regp), 0, 0,
1042 			&nxge_dev_reg_acc_attr, &dev_regs->nxge_vir2_regh);
1043 
1044 		if (ddi_status != DDI_SUCCESS) {
1045 			NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
1046 				"ddi_map_regs for nxge vio2 reg failed"));
1047 			goto nxge_map_regs_fail3;
1048 		}
1049 		nxgep->dev_regs = dev_regs;
1050 
1051 		NPI_ACC_HANDLE_SET(nxgep, dev_regs->nxge_regh);
1052 		NPI_ADD_HANDLE_SET(nxgep, (npi_reg_ptr_t)dev_regs->nxge_regp);
1053 
1054 		NPI_REG_ACC_HANDLE_SET(nxgep, dev_regs->nxge_regh);
1055 		NPI_REG_ADD_HANDLE_SET(nxgep,
1056 			(npi_reg_ptr_t)dev_regs->nxge_regp);
1057 
1058 		NPI_VREG_ACC_HANDLE_SET(nxgep, dev_regs->nxge_vir_regh);
1059 		NPI_VREG_ADD_HANDLE_SET(nxgep,
1060 			(npi_reg_ptr_t)dev_regs->nxge_vir_regp);
1061 
1062 		NPI_V2REG_ACC_HANDLE_SET(nxgep, dev_regs->nxge_vir2_regh);
1063 		NPI_V2REG_ADD_HANDLE_SET(nxgep,
1064 			(npi_reg_ptr_t)dev_regs->nxge_vir2_regp);
1065 
1066 		break;
1067 	}
1068 
1069 	NXGE_DEBUG_MSG((nxgep, DDI_CTL, "nxge_map_reg: hardware addr 0x%0llx "
1070 		" handle 0x%0llx", dev_regs->nxge_regp, dev_regs->nxge_regh));
1071 
1072 	goto nxge_map_regs_exit;
1073 nxge_map_regs_fail3:
1074 	if (dev_regs->nxge_msix_regh) {
1075 		ddi_regs_map_free(&dev_regs->nxge_msix_regh);
1076 	}
1077 	if (dev_regs->nxge_vir_regh) {
1078 		ddi_regs_map_free(&dev_regs->nxge_regh);
1079 	}
1080 nxge_map_regs_fail2:
1081 	if (dev_regs->nxge_regh) {
1082 		ddi_regs_map_free(&dev_regs->nxge_regh);
1083 	}
1084 nxge_map_regs_fail1:
1085 	if (dev_regs->nxge_pciregh) {
1086 		ddi_regs_map_free(&dev_regs->nxge_pciregh);
1087 	}
1088 nxge_map_regs_fail0:
1089 	NXGE_DEBUG_MSG((nxgep, DDI_CTL, "Freeing register set memory"));
1090 	kmem_free(dev_regs, sizeof (dev_regs_t));
1091 
1092 nxge_map_regs_exit:
1093 	if (ddi_status != DDI_SUCCESS)
1094 		status |= (NXGE_ERROR | NXGE_DDI_FAILED);
1095 	NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_map_regs"));
1096 	return (status);
1097 }
1098 
1099 static void
1100 nxge_unmap_regs(p_nxge_t nxgep)
1101 {
1102 	NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_unmap_regs"));
1103 	if (nxgep->dev_regs) {
1104 		if (nxgep->dev_regs->nxge_pciregh) {
1105 			NXGE_DEBUG_MSG((nxgep, DDI_CTL,
1106 				"==> nxge_unmap_regs: bus"));
1107 			ddi_regs_map_free(&nxgep->dev_regs->nxge_pciregh);
1108 			nxgep->dev_regs->nxge_pciregh = NULL;
1109 		}
1110 		if (nxgep->dev_regs->nxge_regh) {
1111 			NXGE_DEBUG_MSG((nxgep, DDI_CTL,
1112 				"==> nxge_unmap_regs: device registers"));
1113 			ddi_regs_map_free(&nxgep->dev_regs->nxge_regh);
1114 			nxgep->dev_regs->nxge_regh = NULL;
1115 		}
1116 		if (nxgep->dev_regs->nxge_msix_regh) {
1117 			NXGE_DEBUG_MSG((nxgep, DDI_CTL,
1118 				"==> nxge_unmap_regs: device interrupts"));
1119 			ddi_regs_map_free(&nxgep->dev_regs->nxge_msix_regh);
1120 			nxgep->dev_regs->nxge_msix_regh = NULL;
1121 		}
1122 		if (nxgep->dev_regs->nxge_vir_regh) {
1123 			NXGE_DEBUG_MSG((nxgep, DDI_CTL,
1124 				"==> nxge_unmap_regs: vio region"));
1125 			ddi_regs_map_free(&nxgep->dev_regs->nxge_vir_regh);
1126 			nxgep->dev_regs->nxge_vir_regh = NULL;
1127 		}
1128 		if (nxgep->dev_regs->nxge_vir2_regh) {
1129 			NXGE_DEBUG_MSG((nxgep, DDI_CTL,
1130 				"==> nxge_unmap_regs: vio2 region"));
1131 			ddi_regs_map_free(&nxgep->dev_regs->nxge_vir2_regh);
1132 			nxgep->dev_regs->nxge_vir2_regh = NULL;
1133 		}
1134 
1135 		kmem_free(nxgep->dev_regs, sizeof (dev_regs_t));
1136 		nxgep->dev_regs = NULL;
1137 	}
1138 
1139 	NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_unmap_regs"));
1140 }
1141 
1142 static nxge_status_t
1143 nxge_setup_mutexes(p_nxge_t nxgep)
1144 {
1145 	int ddi_status = DDI_SUCCESS;
1146 	nxge_status_t status = NXGE_OK;
1147 	nxge_classify_t *classify_ptr;
1148 	int partition;
1149 
1150 	NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_setup_mutexes"));
1151 
1152 	/*
1153 	 * Get the interrupt cookie so the mutexes can be
1154 	 * Initialized.
1155 	 */
1156 	ddi_status = ddi_get_iblock_cookie(nxgep->dip, 0,
1157 					&nxgep->interrupt_cookie);
1158 	if (ddi_status != DDI_SUCCESS) {
1159 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
1160 			"<== nxge_setup_mutexes: failed 0x%x", ddi_status));
1161 		goto nxge_setup_mutexes_exit;
1162 	}
1163 
1164 	cv_init(&nxgep->poll_cv, NULL, CV_DRIVER, NULL);
1165 	MUTEX_INIT(&nxgep->poll_lock, NULL,
1166 	    MUTEX_DRIVER, (void *)nxgep->interrupt_cookie);
1167 
1168 	/*
1169 	 * Initialize mutexes for this device.
1170 	 */
1171 	MUTEX_INIT(nxgep->genlock, NULL,
1172 		MUTEX_DRIVER, (void *)nxgep->interrupt_cookie);
1173 	MUTEX_INIT(&nxgep->ouraddr_lock, NULL,
1174 		MUTEX_DRIVER, (void *)nxgep->interrupt_cookie);
1175 	MUTEX_INIT(&nxgep->mif_lock, NULL,
1176 		MUTEX_DRIVER, (void *)nxgep->interrupt_cookie);
1177 	RW_INIT(&nxgep->filter_lock, NULL,
1178 		RW_DRIVER, (void *)nxgep->interrupt_cookie);
1179 
1180 	classify_ptr = &nxgep->classifier;
1181 		/*
1182 		 * FFLP Mutexes are never used in interrupt context
1183 		 * as fflp operation can take very long time to
1184 		 * complete and hence not suitable to invoke from interrupt
1185 		 * handlers.
1186 		 */
1187 	MUTEX_INIT(&classify_ptr->tcam_lock, NULL,
1188 	    NXGE_MUTEX_DRIVER, (void *)nxgep->interrupt_cookie);
1189 	if (NXGE_IS_VALID_NEPTUNE_TYPE(nxgep)) {
1190 		MUTEX_INIT(&classify_ptr->fcram_lock, NULL,
1191 		    NXGE_MUTEX_DRIVER, (void *)nxgep->interrupt_cookie);
1192 		for (partition = 0; partition < MAX_PARTITION; partition++) {
1193 			MUTEX_INIT(&classify_ptr->hash_lock[partition], NULL,
1194 			    NXGE_MUTEX_DRIVER, (void *)nxgep->interrupt_cookie);
1195 		}
1196 	}
1197 
1198 nxge_setup_mutexes_exit:
1199 	NXGE_DEBUG_MSG((nxgep, DDI_CTL,
1200 	    "<== nxge_setup_mutexes status = %x", status));
1201 
1202 	if (ddi_status != DDI_SUCCESS)
1203 		status |= (NXGE_ERROR | NXGE_DDI_FAILED);
1204 
1205 	return (status);
1206 }
1207 
1208 static void
1209 nxge_destroy_mutexes(p_nxge_t nxgep)
1210 {
1211 	int partition;
1212 	nxge_classify_t *classify_ptr;
1213 
1214 	NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_destroy_mutexes"));
1215 	RW_DESTROY(&nxgep->filter_lock);
1216 	MUTEX_DESTROY(&nxgep->mif_lock);
1217 	MUTEX_DESTROY(&nxgep->ouraddr_lock);
1218 	MUTEX_DESTROY(nxgep->genlock);
1219 
1220 	classify_ptr = &nxgep->classifier;
1221 	MUTEX_DESTROY(&classify_ptr->tcam_lock);
1222 
1223 	/* Destroy all polling resources. */
1224 	MUTEX_DESTROY(&nxgep->poll_lock);
1225 	cv_destroy(&nxgep->poll_cv);
1226 
1227 	/* free data structures, based on HW type */
1228 	if (NXGE_IS_VALID_NEPTUNE_TYPE(nxgep)) {
1229 		MUTEX_DESTROY(&classify_ptr->fcram_lock);
1230 		for (partition = 0; partition < MAX_PARTITION; partition++) {
1231 			MUTEX_DESTROY(&classify_ptr->hash_lock[partition]);
1232 		}
1233 	}
1234 
1235 	NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_destroy_mutexes"));
1236 }
1237 
1238 nxge_status_t
1239 nxge_init(p_nxge_t nxgep)
1240 {
1241 	nxge_status_t	status = NXGE_OK;
1242 
1243 	NXGE_DEBUG_MSG((nxgep, STR_CTL, "==> nxge_init"));
1244 
1245 	if (nxgep->drv_state & STATE_HW_INITIALIZED) {
1246 		return (status);
1247 	}
1248 
1249 	/*
1250 	 * Allocate system memory for the receive/transmit buffer blocks
1251 	 * and receive/transmit descriptor rings.
1252 	 */
1253 	status = nxge_alloc_mem_pool(nxgep);
1254 	if (status != NXGE_OK) {
1255 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "alloc mem failed\n"));
1256 		goto nxge_init_fail1;
1257 	}
1258 
1259 	/*
1260 	 * Initialize and enable TXC registers
1261 	 * (Globally enable TX controller,
1262 	 *  enable a port, configure dma channel bitmap,
1263 	 *  configure the max burst size).
1264 	 */
1265 	status = nxge_txc_init(nxgep);
1266 	if (status != NXGE_OK) {
1267 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "init txc failed\n"));
1268 		goto nxge_init_fail2;
1269 	}
1270 
1271 	/*
1272 	 * Initialize and enable TXDMA channels.
1273 	 */
1274 	status = nxge_init_txdma_channels(nxgep);
1275 	if (status != NXGE_OK) {
1276 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "init txdma failed\n"));
1277 		goto nxge_init_fail3;
1278 	}
1279 
1280 	/*
1281 	 * Initialize and enable RXDMA channels.
1282 	 */
1283 	status = nxge_init_rxdma_channels(nxgep);
1284 	if (status != NXGE_OK) {
1285 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "init rxdma failed\n"));
1286 		goto nxge_init_fail4;
1287 	}
1288 
1289 	/*
1290 	 * Initialize TCAM and FCRAM (Neptune).
1291 	 */
1292 	status = nxge_classify_init(nxgep);
1293 	if (status != NXGE_OK) {
1294 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "init classify failed\n"));
1295 		goto nxge_init_fail5;
1296 	}
1297 
1298 	/*
1299 	 * Initialize ZCP
1300 	 */
1301 	status = nxge_zcp_init(nxgep);
1302 	if (status != NXGE_OK) {
1303 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "init ZCP failed\n"));
1304 		goto nxge_init_fail5;
1305 	}
1306 
1307 	/*
1308 	 * Initialize IPP.
1309 	 */
1310 	status = nxge_ipp_init(nxgep);
1311 	if (status != NXGE_OK) {
1312 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "init IPP failed\n"));
1313 		goto nxge_init_fail5;
1314 	}
1315 
1316 	/*
1317 	 * Initialize the MAC block.
1318 	 */
1319 	status = nxge_mac_init(nxgep);
1320 	if (status != NXGE_OK) {
1321 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "init MAC failed\n"));
1322 		goto nxge_init_fail5;
1323 	}
1324 
1325 	nxge_intrs_enable(nxgep);
1326 
1327 	/*
1328 	 * Enable hardware interrupts.
1329 	 */
1330 	nxge_intr_hw_enable(nxgep);
1331 	nxgep->drv_state |= STATE_HW_INITIALIZED;
1332 
1333 	goto nxge_init_exit;
1334 
1335 nxge_init_fail5:
1336 	nxge_uninit_rxdma_channels(nxgep);
1337 nxge_init_fail4:
1338 	nxge_uninit_txdma_channels(nxgep);
1339 nxge_init_fail3:
1340 	(void) nxge_txc_uninit(nxgep);
1341 nxge_init_fail2:
1342 	nxge_free_mem_pool(nxgep);
1343 nxge_init_fail1:
1344 	NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
1345 		"<== nxge_init status (failed) = 0x%08x", status));
1346 	return (status);
1347 
1348 nxge_init_exit:
1349 
1350 	NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_init status = 0x%08x",
1351 		status));
1352 	return (status);
1353 }
1354 
1355 
1356 timeout_id_t
1357 nxge_start_timer(p_nxge_t nxgep, fptrv_t func, int msec)
1358 {
1359 	if ((nxgep->suspended == 0) ||
1360 			(nxgep->suspended == DDI_RESUME)) {
1361 		return (timeout(func, (caddr_t)nxgep,
1362 			drv_usectohz(1000 * msec)));
1363 	}
1364 	return (NULL);
1365 }
1366 
1367 /*ARGSUSED*/
1368 void
1369 nxge_stop_timer(p_nxge_t nxgep, timeout_id_t timerid)
1370 {
1371 	if (timerid) {
1372 		(void) untimeout(timerid);
1373 	}
1374 }
1375 
1376 void
1377 nxge_uninit(p_nxge_t nxgep)
1378 {
1379 	NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_uninit"));
1380 
1381 	if (!(nxgep->drv_state & STATE_HW_INITIALIZED)) {
1382 		NXGE_DEBUG_MSG((nxgep, DDI_CTL,
1383 			"==> nxge_uninit: not initialized"));
1384 		NXGE_DEBUG_MSG((nxgep, DDI_CTL,
1385 			"<== nxge_uninit"));
1386 		return;
1387 	}
1388 
1389 	/* stop timer */
1390 	if (nxgep->nxge_timerid) {
1391 		nxge_stop_timer(nxgep, nxgep->nxge_timerid);
1392 		nxgep->nxge_timerid = 0;
1393 	}
1394 
1395 	(void) nxge_link_monitor(nxgep, LINK_MONITOR_STOP);
1396 	(void) nxge_intr_hw_disable(nxgep);
1397 
1398 	/*
1399 	 * Reset the receive MAC side.
1400 	 */
1401 	(void) nxge_rx_mac_disable(nxgep);
1402 
1403 	/* Disable and soft reset the IPP */
1404 	(void) nxge_ipp_disable(nxgep);
1405 
1406 	/* Free classification resources */
1407 	(void) nxge_classify_uninit(nxgep);
1408 
1409 	/*
1410 	 * Reset the transmit/receive DMA side.
1411 	 */
1412 	(void) nxge_txdma_hw_mode(nxgep, NXGE_DMA_STOP);
1413 	(void) nxge_rxdma_hw_mode(nxgep, NXGE_DMA_STOP);
1414 
1415 	nxge_uninit_txdma_channels(nxgep);
1416 	nxge_uninit_rxdma_channels(nxgep);
1417 
1418 	/*
1419 	 * Reset the transmit MAC side.
1420 	 */
1421 	(void) nxge_tx_mac_disable(nxgep);
1422 
1423 	nxge_free_mem_pool(nxgep);
1424 
1425 	(void) nxge_link_monitor(nxgep, LINK_MONITOR_START);
1426 
1427 	nxgep->drv_state &= ~STATE_HW_INITIALIZED;
1428 
1429 	NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_uninit: "
1430 		"nxge_mblks_pending %d", nxge_mblks_pending));
1431 }
1432 
1433 void
1434 nxge_get64(p_nxge_t nxgep, p_mblk_t mp)
1435 {
1436 #if defined(__i386)
1437 	size_t		reg;
1438 #else
1439 	uint64_t	reg;
1440 #endif
1441 	uint64_t	regdata;
1442 	int		i, retry;
1443 
1444 	bcopy((char *)mp->b_rptr, (char *)&reg, sizeof (uint64_t));
1445 	regdata = 0;
1446 	retry = 1;
1447 
1448 	for (i = 0; i < retry; i++) {
1449 		NXGE_REG_RD64(nxgep->npi_handle, reg, &regdata);
1450 	}
1451 	bcopy((char *)&regdata, (char *)mp->b_rptr, sizeof (uint64_t));
1452 }
1453 
1454 void
1455 nxge_put64(p_nxge_t nxgep, p_mblk_t mp)
1456 {
1457 #if defined(__i386)
1458 	size_t		reg;
1459 #else
1460 	uint64_t	reg;
1461 #endif
1462 	uint64_t	buf[2];
1463 
1464 	bcopy((char *)mp->b_rptr, (char *)&buf[0], 2 * sizeof (uint64_t));
1465 #if defined(__i386)
1466 	reg = (size_t)buf[0];
1467 #else
1468 	reg = buf[0];
1469 #endif
1470 
1471 	NXGE_NPI_PIO_WRITE64(nxgep->npi_handle, reg, buf[1]);
1472 }
1473 
1474 
1475 nxge_os_mutex_t nxgedebuglock;
1476 int nxge_debug_init = 0;
1477 
1478 /*ARGSUSED*/
1479 /*VARARGS*/
1480 void
1481 nxge_debug_msg(p_nxge_t nxgep, uint64_t level, char *fmt, ...)
1482 {
1483 	char msg_buffer[1048];
1484 	char prefix_buffer[32];
1485 	int instance;
1486 	uint64_t debug_level;
1487 	int cmn_level = CE_CONT;
1488 	va_list ap;
1489 
1490 	debug_level = (nxgep == NULL) ? nxge_debug_level :
1491 		nxgep->nxge_debug_level;
1492 
1493 	if ((level & debug_level) ||
1494 		(level == NXGE_NOTE) ||
1495 		(level == NXGE_ERR_CTL)) {
1496 		/* do the msg processing */
1497 		if (nxge_debug_init == 0) {
1498 			MUTEX_INIT(&nxgedebuglock, NULL, MUTEX_DRIVER, NULL);
1499 			nxge_debug_init = 1;
1500 		}
1501 
1502 		MUTEX_ENTER(&nxgedebuglock);
1503 
1504 		if ((level & NXGE_NOTE)) {
1505 			cmn_level = CE_NOTE;
1506 		}
1507 
1508 		if (level & NXGE_ERR_CTL) {
1509 			cmn_level = CE_WARN;
1510 		}
1511 
1512 		va_start(ap, fmt);
1513 		(void) vsprintf(msg_buffer, fmt, ap);
1514 		va_end(ap);
1515 		if (nxgep == NULL) {
1516 			instance = -1;
1517 			(void) sprintf(prefix_buffer, "%s :", "nxge");
1518 		} else {
1519 			instance = nxgep->instance;
1520 			(void) sprintf(prefix_buffer,
1521 						    "%s%d :", "nxge", instance);
1522 		}
1523 
1524 		MUTEX_EXIT(&nxgedebuglock);
1525 		cmn_err(cmn_level, "!%s %s\n",
1526 				prefix_buffer, msg_buffer);
1527 
1528 	}
1529 }
1530 
1531 char *
1532 nxge_dump_packet(char *addr, int size)
1533 {
1534 	uchar_t *ap = (uchar_t *)addr;
1535 	int i;
1536 	static char etherbuf[1024];
1537 	char *cp = etherbuf;
1538 	char digits[] = "0123456789abcdef";
1539 
1540 	if (!size)
1541 		size = 60;
1542 
1543 	if (size > MAX_DUMP_SZ) {
1544 		/* Dump the leading bytes */
1545 		for (i = 0; i < MAX_DUMP_SZ/2; i++) {
1546 			if (*ap > 0x0f)
1547 				*cp++ = digits[*ap >> 4];
1548 			*cp++ = digits[*ap++ & 0xf];
1549 			*cp++ = ':';
1550 		}
1551 		for (i = 0; i < 20; i++)
1552 			*cp++ = '.';
1553 		/* Dump the last MAX_DUMP_SZ/2 bytes */
1554 		ap = (uchar_t *)(addr + (size - MAX_DUMP_SZ/2));
1555 		for (i = 0; i < MAX_DUMP_SZ/2; i++) {
1556 			if (*ap > 0x0f)
1557 				*cp++ = digits[*ap >> 4];
1558 			*cp++ = digits[*ap++ & 0xf];
1559 			*cp++ = ':';
1560 		}
1561 	} else {
1562 		for (i = 0; i < size; i++) {
1563 			if (*ap > 0x0f)
1564 				*cp++ = digits[*ap >> 4];
1565 			*cp++ = digits[*ap++ & 0xf];
1566 			*cp++ = ':';
1567 		}
1568 	}
1569 	*--cp = 0;
1570 	return (etherbuf);
1571 }
1572 
1573 #ifdef	NXGE_DEBUG
1574 static void
1575 nxge_test_map_regs(p_nxge_t nxgep)
1576 {
1577 	ddi_acc_handle_t cfg_handle;
1578 	p_pci_cfg_t	cfg_ptr;
1579 	ddi_acc_handle_t dev_handle;
1580 	char		*dev_ptr;
1581 	ddi_acc_handle_t pci_config_handle;
1582 	uint32_t	regval;
1583 	int		i;
1584 
1585 	NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_test_map_regs"));
1586 
1587 	dev_handle = nxgep->dev_regs->nxge_regh;
1588 	dev_ptr = (char *)nxgep->dev_regs->nxge_regp;
1589 
1590 	if (NXGE_IS_VALID_NEPTUNE_TYPE(nxgep)) {
1591 		cfg_handle = nxgep->dev_regs->nxge_pciregh;
1592 		cfg_ptr = (void *)nxgep->dev_regs->nxge_pciregp;
1593 
1594 		NXGE_DEBUG_MSG((nxgep, DDI_CTL,
1595 		    "Neptune PCI regp cfg_ptr 0x%llx", (char *)cfg_ptr));
1596 		NXGE_DEBUG_MSG((nxgep, DDI_CTL,
1597 		    "Neptune PCI cfg_ptr vendor id ptr 0x%llx",
1598 		    &cfg_ptr->vendorid));
1599 		NXGE_DEBUG_MSG((nxgep, DDI_CTL,
1600 		    "\tvendorid 0x%x devid 0x%x",
1601 		    NXGE_PIO_READ16(cfg_handle, &cfg_ptr->vendorid, 0),
1602 		    NXGE_PIO_READ16(cfg_handle, &cfg_ptr->devid,    0)));
1603 		NXGE_DEBUG_MSG((nxgep, DDI_CTL,
1604 		    "PCI BAR: base 0x%x base14 0x%x base 18 0x%x "
1605 		    "bar1c 0x%x",
1606 		    NXGE_PIO_READ32(cfg_handle, &cfg_ptr->base,   0),
1607 		    NXGE_PIO_READ32(cfg_handle, &cfg_ptr->base14, 0),
1608 		    NXGE_PIO_READ32(cfg_handle, &cfg_ptr->base18, 0),
1609 		    NXGE_PIO_READ32(cfg_handle, &cfg_ptr->base1c, 0)));
1610 		NXGE_DEBUG_MSG((nxgep, DDI_CTL,
1611 		    "\nNeptune PCI BAR: base20 0x%x base24 0x%x "
1612 		    "base 28 0x%x bar2c 0x%x\n",
1613 		    NXGE_PIO_READ32(cfg_handle, &cfg_ptr->base20, 0),
1614 		    NXGE_PIO_READ32(cfg_handle, &cfg_ptr->base24, 0),
1615 		    NXGE_PIO_READ32(cfg_handle, &cfg_ptr->base28, 0),
1616 		    NXGE_PIO_READ32(cfg_handle, &cfg_ptr->base2c, 0)));
1617 		NXGE_DEBUG_MSG((nxgep, DDI_CTL,
1618 		    "\nNeptune PCI BAR: base30 0x%x\n",
1619 		    NXGE_PIO_READ32(cfg_handle, &cfg_ptr->base30, 0)));
1620 
1621 		cfg_handle = nxgep->dev_regs->nxge_pciregh;
1622 		cfg_ptr = (void *)nxgep->dev_regs->nxge_pciregp;
1623 		NXGE_DEBUG_MSG((nxgep, DDI_CTL,
1624 		    "first  0x%llx second 0x%llx third 0x%llx "
1625 		    "last 0x%llx ",
1626 		    NXGE_PIO_READ64(dev_handle,
1627 		    (uint64_t *)(dev_ptr + 0),  0),
1628 		    NXGE_PIO_READ64(dev_handle,
1629 		    (uint64_t *)(dev_ptr + 8),  0),
1630 		    NXGE_PIO_READ64(dev_handle,
1631 		    (uint64_t *)(dev_ptr + 16), 0),
1632 		    NXGE_PIO_READ64(cfg_handle,
1633 		    (uint64_t *)(dev_ptr + 24), 0)));
1634 	}
1635 }
1636 
1637 #endif
1638 
1639 static void
1640 nxge_suspend(p_nxge_t nxgep)
1641 {
1642 	NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_suspend"));
1643 
1644 	nxge_intrs_disable(nxgep);
1645 	nxge_destroy_dev(nxgep);
1646 
1647 	NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_suspend"));
1648 }
1649 
1650 static nxge_status_t
1651 nxge_resume(p_nxge_t nxgep)
1652 {
1653 	nxge_status_t status = NXGE_OK;
1654 
1655 	NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_resume"));
1656 
1657 	nxgep->suspended = DDI_RESUME;
1658 	(void) nxge_link_monitor(nxgep, LINK_MONITOR_START);
1659 	(void) nxge_rxdma_hw_mode(nxgep, NXGE_DMA_START);
1660 	(void) nxge_txdma_hw_mode(nxgep, NXGE_DMA_START);
1661 	(void) nxge_rx_mac_enable(nxgep);
1662 	(void) nxge_tx_mac_enable(nxgep);
1663 	nxge_intrs_enable(nxgep);
1664 	nxgep->suspended = 0;
1665 
1666 	NXGE_DEBUG_MSG((nxgep, DDI_CTL,
1667 			"<== nxge_resume status = 0x%x", status));
1668 	return (status);
1669 }
1670 
1671 static nxge_status_t
1672 nxge_setup_dev(p_nxge_t nxgep)
1673 {
1674 	nxge_status_t	status = NXGE_OK;
1675 
1676 	NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_setup_dev port %d",
1677 	    nxgep->mac.portnum));
1678 
1679 	status = nxge_link_init(nxgep);
1680 
1681 	if (fm_check_acc_handle(nxgep->dev_regs->nxge_regh) != DDI_FM_OK) {
1682 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
1683 			"port%d Bad register acc handle", nxgep->mac.portnum));
1684 		status = NXGE_ERROR;
1685 	}
1686 
1687 	if (status != NXGE_OK) {
1688 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
1689 			    " nxge_setup_dev status "
1690 			    "(xcvr init 0x%08x)", status));
1691 		goto nxge_setup_dev_exit;
1692 	}
1693 
1694 nxge_setup_dev_exit:
1695 	NXGE_DEBUG_MSG((nxgep, DDI_CTL,
1696 		"<== nxge_setup_dev port %d status = 0x%08x",
1697 		nxgep->mac.portnum, status));
1698 
1699 	return (status);
1700 }
1701 
1702 static void
1703 nxge_destroy_dev(p_nxge_t nxgep)
1704 {
1705 	NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_destroy_dev"));
1706 
1707 	(void) nxge_link_monitor(nxgep, LINK_MONITOR_STOP);
1708 
1709 	(void) nxge_hw_stop(nxgep);
1710 
1711 	NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_destroy_dev"));
1712 }
1713 
1714 static nxge_status_t
1715 nxge_setup_system_dma_pages(p_nxge_t nxgep)
1716 {
1717 	int 			ddi_status = DDI_SUCCESS;
1718 	uint_t 			count;
1719 	ddi_dma_cookie_t 	cookie;
1720 	uint_t 			iommu_pagesize;
1721 	nxge_status_t		status = NXGE_OK;
1722 
1723 	NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_setup_system_dma_pages"));
1724 	nxgep->sys_page_sz = ddi_ptob(nxgep->dip, (ulong_t)1);
1725 	if (nxgep->niu_type != N2_NIU) {
1726 		iommu_pagesize = dvma_pagesize(nxgep->dip);
1727 		NXGE_DEBUG_MSG((nxgep, DDI_CTL,
1728 			" nxge_setup_system_dma_pages: page %d (ddi_ptob %d) "
1729 			" default_block_size %d iommu_pagesize %d",
1730 			nxgep->sys_page_sz,
1731 			ddi_ptob(nxgep->dip, (ulong_t)1),
1732 			nxgep->rx_default_block_size,
1733 			iommu_pagesize));
1734 
1735 		if (iommu_pagesize != 0) {
1736 			if (nxgep->sys_page_sz == iommu_pagesize) {
1737 				if (iommu_pagesize > 0x4000)
1738 					nxgep->sys_page_sz = 0x4000;
1739 			} else {
1740 				if (nxgep->sys_page_sz > iommu_pagesize)
1741 					nxgep->sys_page_sz = iommu_pagesize;
1742 			}
1743 		}
1744 	}
1745 	nxgep->sys_page_mask = ~(nxgep->sys_page_sz - 1);
1746 	NXGE_DEBUG_MSG((nxgep, DDI_CTL,
1747 		"==> nxge_setup_system_dma_pages: page %d (ddi_ptob %d) "
1748 		"default_block_size %d page mask %d",
1749 		nxgep->sys_page_sz,
1750 		ddi_ptob(nxgep->dip, (ulong_t)1),
1751 		nxgep->rx_default_block_size,
1752 		nxgep->sys_page_mask));
1753 
1754 
1755 	switch (nxgep->sys_page_sz) {
1756 	default:
1757 		nxgep->sys_page_sz = 0x1000;
1758 		nxgep->sys_page_mask = ~(nxgep->sys_page_sz - 1);
1759 		nxgep->rx_default_block_size = 0x1000;
1760 		nxgep->rx_bksize_code = RBR_BKSIZE_4K;
1761 		break;
1762 	case 0x1000:
1763 		nxgep->rx_default_block_size = 0x1000;
1764 		nxgep->rx_bksize_code = RBR_BKSIZE_4K;
1765 		break;
1766 	case 0x2000:
1767 		nxgep->rx_default_block_size = 0x2000;
1768 		nxgep->rx_bksize_code = RBR_BKSIZE_8K;
1769 		break;
1770 	case 0x4000:
1771 		nxgep->rx_default_block_size = 0x4000;
1772 		nxgep->rx_bksize_code = RBR_BKSIZE_16K;
1773 		break;
1774 	case 0x8000:
1775 		nxgep->rx_default_block_size = 0x8000;
1776 		nxgep->rx_bksize_code = RBR_BKSIZE_32K;
1777 		break;
1778 	}
1779 
1780 #ifndef USE_RX_BIG_BUF
1781 	nxge_rx_dma_attr.dma_attr_align = nxgep->sys_page_sz;
1782 #else
1783 		nxgep->rx_default_block_size = 0x2000;
1784 		nxgep->rx_bksize_code = RBR_BKSIZE_8K;
1785 #endif
1786 	/*
1787 	 * Get the system DMA burst size.
1788 	 */
1789 	ddi_status = ddi_dma_alloc_handle(nxgep->dip, &nxge_tx_dma_attr,
1790 			DDI_DMA_DONTWAIT, 0,
1791 			&nxgep->dmasparehandle);
1792 	if (ddi_status != DDI_SUCCESS) {
1793 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
1794 			"ddi_dma_alloc_handle: failed "
1795 			" status 0x%x", ddi_status));
1796 		goto nxge_get_soft_properties_exit;
1797 	}
1798 
1799 	ddi_status = ddi_dma_addr_bind_handle(nxgep->dmasparehandle, NULL,
1800 				(caddr_t)nxgep->dmasparehandle,
1801 				sizeof (nxgep->dmasparehandle),
1802 				DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
1803 				DDI_DMA_DONTWAIT, 0,
1804 				&cookie, &count);
1805 	if (ddi_status != DDI_DMA_MAPPED) {
1806 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
1807 			"Binding spare handle to find system"
1808 			" burstsize failed."));
1809 		ddi_status = DDI_FAILURE;
1810 		goto nxge_get_soft_properties_fail1;
1811 	}
1812 
1813 	nxgep->sys_burst_sz = ddi_dma_burstsizes(nxgep->dmasparehandle);
1814 	(void) ddi_dma_unbind_handle(nxgep->dmasparehandle);
1815 
1816 nxge_get_soft_properties_fail1:
1817 	ddi_dma_free_handle(&nxgep->dmasparehandle);
1818 
1819 nxge_get_soft_properties_exit:
1820 
1821 	if (ddi_status != DDI_SUCCESS)
1822 		status |= (NXGE_ERROR | NXGE_DDI_FAILED);
1823 
1824 	NXGE_DEBUG_MSG((nxgep, DDI_CTL,
1825 		"<== nxge_setup_system_dma_pages status = 0x%08x", status));
1826 	return (status);
1827 }
1828 
1829 static nxge_status_t
1830 nxge_alloc_mem_pool(p_nxge_t nxgep)
1831 {
1832 	nxge_status_t	status = NXGE_OK;
1833 
1834 	NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_alloc_mem_pool"));
1835 
1836 	status = nxge_alloc_rx_mem_pool(nxgep);
1837 	if (status != NXGE_OK) {
1838 		return (NXGE_ERROR);
1839 	}
1840 
1841 	status = nxge_alloc_tx_mem_pool(nxgep);
1842 	if (status != NXGE_OK) {
1843 		nxge_free_rx_mem_pool(nxgep);
1844 		return (NXGE_ERROR);
1845 	}
1846 
1847 	NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_alloc_mem_pool"));
1848 	return (NXGE_OK);
1849 }
1850 
1851 static void
1852 nxge_free_mem_pool(p_nxge_t nxgep)
1853 {
1854 	NXGE_DEBUG_MSG((nxgep, MEM_CTL, "==> nxge_free_mem_pool"));
1855 
1856 	nxge_free_rx_mem_pool(nxgep);
1857 	nxge_free_tx_mem_pool(nxgep);
1858 
1859 	NXGE_DEBUG_MSG((nxgep, MEM_CTL, "<== nxge_free_mem_pool"));
1860 }
1861 
1862 static nxge_status_t
1863 nxge_alloc_rx_mem_pool(p_nxge_t nxgep)
1864 {
1865 	int			i, j;
1866 	uint32_t		ndmas, st_rdc;
1867 	p_nxge_dma_pt_cfg_t	p_all_cfgp;
1868 	p_nxge_hw_pt_cfg_t	p_cfgp;
1869 	p_nxge_dma_pool_t	dma_poolp;
1870 	p_nxge_dma_common_t	*dma_buf_p;
1871 	p_nxge_dma_pool_t	dma_cntl_poolp;
1872 	p_nxge_dma_common_t	*dma_cntl_p;
1873 	size_t			rx_buf_alloc_size;
1874 	size_t			rx_cntl_alloc_size;
1875 	uint32_t 		*num_chunks; /* per dma */
1876 	nxge_status_t		status = NXGE_OK;
1877 
1878 	uint32_t		nxge_port_rbr_size;
1879 	uint32_t		nxge_port_rbr_spare_size;
1880 	uint32_t		nxge_port_rcr_size;
1881 
1882 	NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_alloc_rx_mem_pool"));
1883 
1884 	p_all_cfgp = (p_nxge_dma_pt_cfg_t)&nxgep->pt_config;
1885 	p_cfgp = (p_nxge_hw_pt_cfg_t)&p_all_cfgp->hw_config;
1886 	st_rdc = p_cfgp->start_rdc;
1887 	ndmas = p_cfgp->max_rdcs;
1888 
1889 	NXGE_DEBUG_MSG((nxgep, DMA_CTL,
1890 		" nxge_alloc_rx_mem_pool st_rdc %d ndmas %d", st_rdc, ndmas));
1891 
1892 	/*
1893 	 * Allocate memory for each receive DMA channel.
1894 	 */
1895 	dma_poolp = (p_nxge_dma_pool_t)KMEM_ZALLOC(sizeof (nxge_dma_pool_t),
1896 			KM_SLEEP);
1897 	dma_buf_p = (p_nxge_dma_common_t *)KMEM_ZALLOC(
1898 			sizeof (p_nxge_dma_common_t) * ndmas, KM_SLEEP);
1899 
1900 	dma_cntl_poolp = (p_nxge_dma_pool_t)
1901 				KMEM_ZALLOC(sizeof (nxge_dma_pool_t), KM_SLEEP);
1902 	dma_cntl_p = (p_nxge_dma_common_t *)KMEM_ZALLOC(
1903 			sizeof (p_nxge_dma_common_t) * ndmas, KM_SLEEP);
1904 
1905 	num_chunks = (uint32_t *)KMEM_ZALLOC(
1906 			sizeof (uint32_t) * ndmas, KM_SLEEP);
1907 
1908 	/*
1909 	 * Assume that each DMA channel will be configured with default
1910 	 * block size.
1911 	 * rbr block counts are mod of batch count (16).
1912 	 */
1913 	nxge_port_rbr_size = p_all_cfgp->rbr_size;
1914 	nxge_port_rcr_size = p_all_cfgp->rcr_size;
1915 
1916 	if (!nxge_port_rbr_size) {
1917 		nxge_port_rbr_size = NXGE_RBR_RBB_DEFAULT;
1918 	}
1919 	if (nxge_port_rbr_size % NXGE_RXDMA_POST_BATCH) {
1920 		nxge_port_rbr_size = (NXGE_RXDMA_POST_BATCH *
1921 			(nxge_port_rbr_size / NXGE_RXDMA_POST_BATCH + 1));
1922 	}
1923 
1924 	p_all_cfgp->rbr_size = nxge_port_rbr_size;
1925 	nxge_port_rbr_spare_size = nxge_rbr_spare_size;
1926 
1927 	if (nxge_port_rbr_spare_size % NXGE_RXDMA_POST_BATCH) {
1928 		nxge_port_rbr_spare_size = (NXGE_RXDMA_POST_BATCH *
1929 			(nxge_port_rbr_spare_size / NXGE_RXDMA_POST_BATCH + 1));
1930 	}
1931 	if (nxge_port_rbr_size > RBR_DEFAULT_MAX_BLKS) {
1932 		NXGE_DEBUG_MSG((nxgep, MEM_CTL,
1933 		    "nxge_alloc_rx_mem_pool: RBR size too high %d, "
1934 		    "set to default %d",
1935 		    nxge_port_rbr_size, RBR_DEFAULT_MAX_BLKS));
1936 		nxge_port_rbr_size = RBR_DEFAULT_MAX_BLKS;
1937 	}
1938 	if (nxge_port_rcr_size > RCR_DEFAULT_MAX) {
1939 		NXGE_DEBUG_MSG((nxgep, MEM_CTL,
1940 		    "nxge_alloc_rx_mem_pool: RCR too high %d, "
1941 		    "set to default %d",
1942 		    nxge_port_rcr_size, RCR_DEFAULT_MAX));
1943 		nxge_port_rcr_size = RCR_DEFAULT_MAX;
1944 	}
1945 
1946 	/*
1947 	 * N2/NIU has limitation on the descriptor sizes (contiguous
1948 	 * memory allocation on data buffers to 4M (contig_mem_alloc)
1949 	 * and little endian for control buffers (must use the ddi/dki mem alloc
1950 	 * function).
1951 	 */
1952 #if	defined(sun4v) && defined(NIU_LP_WORKAROUND)
1953 	if (nxgep->niu_type == N2_NIU) {
1954 		nxge_port_rbr_spare_size = 0;
1955 		if ((nxge_port_rbr_size > NXGE_NIU_CONTIG_RBR_MAX) ||
1956 				(!ISP2(nxge_port_rbr_size))) {
1957 			nxge_port_rbr_size = NXGE_NIU_CONTIG_RBR_MAX;
1958 		}
1959 		if ((nxge_port_rcr_size > NXGE_NIU_CONTIG_RCR_MAX) ||
1960 				(!ISP2(nxge_port_rcr_size))) {
1961 			nxge_port_rcr_size = NXGE_NIU_CONTIG_RCR_MAX;
1962 		}
1963 	}
1964 #endif
1965 
1966 	rx_buf_alloc_size = (nxgep->rx_default_block_size *
1967 		(nxge_port_rbr_size + nxge_port_rbr_spare_size));
1968 
1969 	/*
1970 	 * Addresses of receive block ring, receive completion ring and the
1971 	 * mailbox must be all cache-aligned (64 bytes).
1972 	 */
1973 	rx_cntl_alloc_size = nxge_port_rbr_size + nxge_port_rbr_spare_size;
1974 	rx_cntl_alloc_size *= (sizeof (rx_desc_t));
1975 	rx_cntl_alloc_size += (sizeof (rcr_entry_t) * nxge_port_rcr_size);
1976 	rx_cntl_alloc_size += sizeof (rxdma_mailbox_t);
1977 
1978 	NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_alloc_rx_mem_pool: "
1979 		"nxge_port_rbr_size = %d nxge_port_rbr_spare_size = %d "
1980 		"nxge_port_rcr_size = %d "
1981 		"rx_cntl_alloc_size = %d",
1982 		nxge_port_rbr_size, nxge_port_rbr_spare_size,
1983 		nxge_port_rcr_size,
1984 		rx_cntl_alloc_size));
1985 
1986 #if	defined(sun4v) && defined(NIU_LP_WORKAROUND)
1987 	if (nxgep->niu_type == N2_NIU) {
1988 		if (!ISP2(rx_buf_alloc_size)) {
1989 			NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
1990 				"==> nxge_alloc_rx_mem_pool: "
1991 				" must be power of 2"));
1992 			status |= (NXGE_ERROR | NXGE_DDI_FAILED);
1993 			goto nxge_alloc_rx_mem_pool_exit;
1994 		}
1995 
1996 		if (rx_buf_alloc_size > (1 << 22)) {
1997 			NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
1998 				"==> nxge_alloc_rx_mem_pool: "
1999 				" limit size to 4M"));
2000 			status |= (NXGE_ERROR | NXGE_DDI_FAILED);
2001 			goto nxge_alloc_rx_mem_pool_exit;
2002 		}
2003 
2004 		if (rx_cntl_alloc_size < 0x2000) {
2005 			rx_cntl_alloc_size = 0x2000;
2006 		}
2007 	}
2008 #endif
2009 	nxgep->nxge_port_rbr_size = nxge_port_rbr_size;
2010 	nxgep->nxge_port_rcr_size = nxge_port_rcr_size;
2011 
2012 	/*
2013 	 * Allocate memory for receive buffers and descriptor rings.
2014 	 * Replace allocation functions with interface functions provided
2015 	 * by the partition manager when it is available.
2016 	 */
2017 	/*
2018 	 * Allocate memory for the receive buffer blocks.
2019 	 */
2020 	for (i = 0; i < ndmas; i++) {
2021 		NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
2022 			" nxge_alloc_rx_mem_pool to alloc mem: "
2023 			" dma %d dma_buf_p %llx &dma_buf_p %llx",
2024 			i, dma_buf_p[i], &dma_buf_p[i]));
2025 		num_chunks[i] = 0;
2026 		status = nxge_alloc_rx_buf_dma(nxgep, st_rdc, &dma_buf_p[i],
2027 				rx_buf_alloc_size,
2028 				nxgep->rx_default_block_size, &num_chunks[i]);
2029 		if (status != NXGE_OK) {
2030 			break;
2031 		}
2032 		st_rdc++;
2033 		NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
2034 			" nxge_alloc_rx_mem_pool DONE  alloc mem: "
2035 			"dma %d dma_buf_p %llx &dma_buf_p %llx", i,
2036 			dma_buf_p[i], &dma_buf_p[i]));
2037 	}
2038 	if (i < ndmas) {
2039 		goto nxge_alloc_rx_mem_fail1;
2040 	}
2041 	/*
2042 	 * Allocate memory for descriptor rings and mailbox.
2043 	 */
2044 	st_rdc = p_cfgp->start_rdc;
2045 	for (j = 0; j < ndmas; j++) {
2046 		status = nxge_alloc_rx_cntl_dma(nxgep, st_rdc, &dma_cntl_p[j],
2047 					rx_cntl_alloc_size);
2048 		if (status != NXGE_OK) {
2049 			break;
2050 		}
2051 		st_rdc++;
2052 	}
2053 	if (j < ndmas) {
2054 		goto nxge_alloc_rx_mem_fail2;
2055 	}
2056 
2057 	dma_poolp->ndmas = ndmas;
2058 	dma_poolp->num_chunks = num_chunks;
2059 	dma_poolp->buf_allocated = B_TRUE;
2060 	nxgep->rx_buf_pool_p = dma_poolp;
2061 	dma_poolp->dma_buf_pool_p = dma_buf_p;
2062 
2063 	dma_cntl_poolp->ndmas = ndmas;
2064 	dma_cntl_poolp->buf_allocated = B_TRUE;
2065 	nxgep->rx_cntl_pool_p = dma_cntl_poolp;
2066 	dma_cntl_poolp->dma_buf_pool_p = dma_cntl_p;
2067 
2068 	goto nxge_alloc_rx_mem_pool_exit;
2069 
2070 nxge_alloc_rx_mem_fail2:
2071 	/* Free control buffers */
2072 	j--;
2073 	NXGE_DEBUG_MSG((nxgep, DMA_CTL,
2074 		"==> nxge_alloc_rx_mem_pool: freeing control bufs (%d)", j));
2075 	for (; j >= 0; j--) {
2076 		nxge_free_rx_cntl_dma(nxgep,
2077 			(p_nxge_dma_common_t)dma_cntl_p[j]);
2078 		NXGE_DEBUG_MSG((nxgep, DMA_CTL,
2079 			"==> nxge_alloc_rx_mem_pool: control bufs freed (%d)",
2080 			j));
2081 	}
2082 	NXGE_DEBUG_MSG((nxgep, DMA_CTL,
2083 		"==> nxge_alloc_rx_mem_pool: control bufs freed (%d)", j));
2084 
2085 nxge_alloc_rx_mem_fail1:
2086 	/* Free data buffers */
2087 	i--;
2088 	NXGE_DEBUG_MSG((nxgep, DMA_CTL,
2089 		"==> nxge_alloc_rx_mem_pool: freeing data bufs (%d)", i));
2090 	for (; i >= 0; i--) {
2091 		nxge_free_rx_buf_dma(nxgep, (p_nxge_dma_common_t)dma_buf_p[i],
2092 			num_chunks[i]);
2093 	}
2094 	NXGE_DEBUG_MSG((nxgep, DMA_CTL,
2095 		"==> nxge_alloc_rx_mem_pool: data bufs freed (%d)", i));
2096 
2097 	KMEM_FREE(num_chunks, sizeof (uint32_t) * ndmas);
2098 	KMEM_FREE(dma_poolp, sizeof (nxge_dma_pool_t));
2099 	KMEM_FREE(dma_buf_p, ndmas * sizeof (p_nxge_dma_common_t));
2100 	KMEM_FREE(dma_cntl_poolp, sizeof (nxge_dma_pool_t));
2101 	KMEM_FREE(dma_cntl_p, ndmas * sizeof (p_nxge_dma_common_t));
2102 
2103 nxge_alloc_rx_mem_pool_exit:
2104 	NXGE_DEBUG_MSG((nxgep, DMA_CTL,
2105 		"<== nxge_alloc_rx_mem_pool:status 0x%08x", status));
2106 
2107 	return (status);
2108 }
2109 
2110 static void
2111 nxge_free_rx_mem_pool(p_nxge_t nxgep)
2112 {
2113 	uint32_t		i, ndmas;
2114 	p_nxge_dma_pool_t	dma_poolp;
2115 	p_nxge_dma_common_t	*dma_buf_p;
2116 	p_nxge_dma_pool_t	dma_cntl_poolp;
2117 	p_nxge_dma_common_t	*dma_cntl_p;
2118 	uint32_t 		*num_chunks;
2119 
2120 	NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_free_rx_mem_pool"));
2121 
2122 	dma_poolp = nxgep->rx_buf_pool_p;
2123 	if (dma_poolp == NULL || (!dma_poolp->buf_allocated)) {
2124 		NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
2125 			"<== nxge_free_rx_mem_pool "
2126 			"(null rx buf pool or buf not allocated"));
2127 		return;
2128 	}
2129 
2130 	dma_cntl_poolp = nxgep->rx_cntl_pool_p;
2131 	if (dma_cntl_poolp == NULL || (!dma_cntl_poolp->buf_allocated)) {
2132 		NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
2133 			"<== nxge_free_rx_mem_pool "
2134 			"(null rx cntl buf pool or cntl buf not allocated"));
2135 		return;
2136 	}
2137 
2138 	dma_buf_p = dma_poolp->dma_buf_pool_p;
2139 	num_chunks = dma_poolp->num_chunks;
2140 
2141 	dma_cntl_p = dma_cntl_poolp->dma_buf_pool_p;
2142 	ndmas = dma_cntl_poolp->ndmas;
2143 
2144 	for (i = 0; i < ndmas; i++) {
2145 		nxge_free_rx_buf_dma(nxgep, dma_buf_p[i], num_chunks[i]);
2146 	}
2147 
2148 	for (i = 0; i < ndmas; i++) {
2149 		nxge_free_rx_cntl_dma(nxgep, dma_cntl_p[i]);
2150 	}
2151 
2152 	for (i = 0; i < ndmas; i++) {
2153 		KMEM_FREE(dma_buf_p[i],
2154 			sizeof (nxge_dma_common_t) * NXGE_DMA_BLOCK);
2155 		KMEM_FREE(dma_cntl_p[i], sizeof (nxge_dma_common_t));
2156 	}
2157 
2158 	KMEM_FREE(num_chunks, sizeof (uint32_t) * ndmas);
2159 	KMEM_FREE(dma_cntl_p, ndmas * sizeof (p_nxge_dma_common_t));
2160 	KMEM_FREE(dma_cntl_poolp, sizeof (nxge_dma_pool_t));
2161 	KMEM_FREE(dma_buf_p, ndmas * sizeof (p_nxge_dma_common_t));
2162 	KMEM_FREE(dma_poolp, sizeof (nxge_dma_pool_t));
2163 
2164 	nxgep->rx_buf_pool_p = NULL;
2165 	nxgep->rx_cntl_pool_p = NULL;
2166 
2167 	NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "<== nxge_free_rx_mem_pool"));
2168 }
2169 
2170 
2171 static nxge_status_t
2172 nxge_alloc_rx_buf_dma(p_nxge_t nxgep, uint16_t dma_channel,
2173 	p_nxge_dma_common_t *dmap,
2174 	size_t alloc_size, size_t block_size, uint32_t *num_chunks)
2175 {
2176 	p_nxge_dma_common_t 	rx_dmap;
2177 	nxge_status_t		status = NXGE_OK;
2178 	size_t			total_alloc_size;
2179 	size_t			allocated = 0;
2180 	int			i, size_index, array_size;
2181 
2182 	NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_alloc_rx_buf_dma"));
2183 
2184 	rx_dmap = (p_nxge_dma_common_t)
2185 			KMEM_ZALLOC(sizeof (nxge_dma_common_t) * NXGE_DMA_BLOCK,
2186 			KM_SLEEP);
2187 
2188 	NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
2189 		" alloc_rx_buf_dma rdc %d asize %x bsize %x bbuf %llx ",
2190 		dma_channel, alloc_size, block_size, dmap));
2191 
2192 	total_alloc_size = alloc_size;
2193 
2194 #if defined(RX_USE_RECLAIM_POST)
2195 	total_alloc_size = alloc_size + alloc_size/4;
2196 #endif
2197 
2198 	i = 0;
2199 	size_index = 0;
2200 	array_size =  sizeof (alloc_sizes)/sizeof (size_t);
2201 	while ((alloc_sizes[size_index] < alloc_size) &&
2202 			(size_index < array_size))
2203 			size_index++;
2204 	if (size_index >= array_size) {
2205 		size_index = array_size - 1;
2206 	}
2207 
2208 	while ((allocated < total_alloc_size) &&
2209 			(size_index >= 0) && (i < NXGE_DMA_BLOCK)) {
2210 		rx_dmap[i].dma_chunk_index = i;
2211 		rx_dmap[i].block_size = block_size;
2212 		rx_dmap[i].alength = alloc_sizes[size_index];
2213 		rx_dmap[i].orig_alength = rx_dmap[i].alength;
2214 		rx_dmap[i].nblocks = alloc_sizes[size_index] / block_size;
2215 		rx_dmap[i].dma_channel = dma_channel;
2216 		rx_dmap[i].contig_alloc_type = B_FALSE;
2217 
2218 		/*
2219 		 * N2/NIU: data buffers must be contiguous as the driver
2220 		 *	   needs to call Hypervisor api to set up
2221 		 *	   logical pages.
2222 		 */
2223 		if ((nxgep->niu_type == N2_NIU) && (NXGE_DMA_BLOCK == 1)) {
2224 			rx_dmap[i].contig_alloc_type = B_TRUE;
2225 		}
2226 
2227 		NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
2228 			"alloc_rx_buf_dma rdc %d chunk %d bufp %llx size %x "
2229 			"i %d nblocks %d alength %d",
2230 			dma_channel, i, &rx_dmap[i], block_size,
2231 			i, rx_dmap[i].nblocks,
2232 			rx_dmap[i].alength));
2233 		status = nxge_dma_mem_alloc(nxgep, nxge_force_dma,
2234 			&nxge_rx_dma_attr,
2235 			rx_dmap[i].alength,
2236 			&nxge_dev_buf_dma_acc_attr,
2237 			DDI_DMA_READ | DDI_DMA_STREAMING,
2238 			(p_nxge_dma_common_t)(&rx_dmap[i]));
2239 		if (status != NXGE_OK) {
2240 			NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
2241 				" nxge_alloc_rx_buf_dma: Alloc Failed "));
2242 			size_index--;
2243 		} else {
2244 			NXGE_DEBUG_MSG((nxgep, DMA_CTL,
2245 				" alloc_rx_buf_dma allocated rdc %d "
2246 				"chunk %d size %x dvma %x bufp %llx ",
2247 				dma_channel, i, rx_dmap[i].alength,
2248 				rx_dmap[i].ioaddr_pp, &rx_dmap[i]));
2249 			i++;
2250 			allocated += alloc_sizes[size_index];
2251 		}
2252 	}
2253 
2254 
2255 	if (allocated < total_alloc_size) {
2256 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
2257 		    "==> nxge_alloc_rx_buf_dma: not enough for channe %d "
2258 		    "allocated 0x%x requested 0x%x",
2259 		    dma_channel,
2260 		    allocated, total_alloc_size));
2261 		status = NXGE_ERROR;
2262 		goto nxge_alloc_rx_mem_fail1;
2263 	}
2264 
2265 	NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
2266 	    "==> nxge_alloc_rx_buf_dma: Allocated for channe %d "
2267 	    "allocated 0x%x requested 0x%x",
2268 	    dma_channel,
2269 	    allocated, total_alloc_size));
2270 
2271 	NXGE_DEBUG_MSG((nxgep, DMA_CTL,
2272 		" alloc_rx_buf_dma rdc %d allocated %d chunks",
2273 		dma_channel, i));
2274 	*num_chunks = i;
2275 	*dmap = rx_dmap;
2276 
2277 	goto nxge_alloc_rx_mem_exit;
2278 
2279 nxge_alloc_rx_mem_fail1:
2280 	KMEM_FREE(rx_dmap, sizeof (nxge_dma_common_t) * NXGE_DMA_BLOCK);
2281 
2282 nxge_alloc_rx_mem_exit:
2283 	NXGE_DEBUG_MSG((nxgep, DMA_CTL,
2284 		"<== nxge_alloc_rx_buf_dma status 0x%08x", status));
2285 
2286 	return (status);
2287 }
2288 
2289 /*ARGSUSED*/
2290 static void
2291 nxge_free_rx_buf_dma(p_nxge_t nxgep, p_nxge_dma_common_t dmap,
2292     uint32_t num_chunks)
2293 {
2294 	int		i;
2295 
2296 	NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
2297 		"==> nxge_free_rx_buf_dma: # of chunks %d", num_chunks));
2298 
2299 	for (i = 0; i < num_chunks; i++) {
2300 		NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
2301 			"==> nxge_free_rx_buf_dma: chunk %d dmap 0x%llx",
2302 				i, dmap));
2303 		nxge_dma_mem_free(dmap++);
2304 	}
2305 
2306 	NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_free_rx_buf_dma"));
2307 }
2308 
2309 /*ARGSUSED*/
2310 static nxge_status_t
2311 nxge_alloc_rx_cntl_dma(p_nxge_t nxgep, uint16_t dma_channel,
2312     p_nxge_dma_common_t *dmap, size_t size)
2313 {
2314 	p_nxge_dma_common_t 	rx_dmap;
2315 	nxge_status_t		status = NXGE_OK;
2316 
2317 	NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_alloc_rx_cntl_dma"));
2318 
2319 	rx_dmap = (p_nxge_dma_common_t)
2320 			KMEM_ZALLOC(sizeof (nxge_dma_common_t), KM_SLEEP);
2321 
2322 	rx_dmap->contig_alloc_type = B_FALSE;
2323 
2324 	status = nxge_dma_mem_alloc(nxgep, nxge_force_dma,
2325 			&nxge_desc_dma_attr,
2326 			size,
2327 			&nxge_dev_desc_dma_acc_attr,
2328 			DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
2329 			rx_dmap);
2330 	if (status != NXGE_OK) {
2331 		goto nxge_alloc_rx_cntl_dma_fail1;
2332 	}
2333 
2334 	*dmap = rx_dmap;
2335 	goto nxge_alloc_rx_cntl_dma_exit;
2336 
2337 nxge_alloc_rx_cntl_dma_fail1:
2338 	KMEM_FREE(rx_dmap, sizeof (nxge_dma_common_t));
2339 
2340 nxge_alloc_rx_cntl_dma_exit:
2341 	NXGE_DEBUG_MSG((nxgep, DMA_CTL,
2342 		"<== nxge_alloc_rx_cntl_dma status 0x%08x", status));
2343 
2344 	return (status);
2345 }
2346 
2347 /*ARGSUSED*/
2348 static void
2349 nxge_free_rx_cntl_dma(p_nxge_t nxgep, p_nxge_dma_common_t dmap)
2350 {
2351 	NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_free_rx_cntl_dma"));
2352 
2353 	nxge_dma_mem_free(dmap);
2354 
2355 	NXGE_DEBUG_MSG((nxgep, DMA_CTL, "<== nxge_free_rx_cntl_dma"));
2356 }
2357 
2358 static nxge_status_t
2359 nxge_alloc_tx_mem_pool(p_nxge_t nxgep)
2360 {
2361 	nxge_status_t		status = NXGE_OK;
2362 	int			i, j;
2363 	uint32_t		ndmas, st_tdc;
2364 	p_nxge_dma_pt_cfg_t	p_all_cfgp;
2365 	p_nxge_hw_pt_cfg_t	p_cfgp;
2366 	p_nxge_dma_pool_t	dma_poolp;
2367 	p_nxge_dma_common_t	*dma_buf_p;
2368 	p_nxge_dma_pool_t	dma_cntl_poolp;
2369 	p_nxge_dma_common_t	*dma_cntl_p;
2370 	size_t			tx_buf_alloc_size;
2371 	size_t			tx_cntl_alloc_size;
2372 	uint32_t		*num_chunks; /* per dma */
2373 	uint32_t		bcopy_thresh;
2374 
2375 	NXGE_DEBUG_MSG((nxgep, MEM_CTL, "==> nxge_alloc_tx_mem_pool"));
2376 
2377 	p_all_cfgp = (p_nxge_dma_pt_cfg_t)&nxgep->pt_config;
2378 	p_cfgp = (p_nxge_hw_pt_cfg_t)&p_all_cfgp->hw_config;
2379 	st_tdc = p_cfgp->start_tdc;
2380 	ndmas = p_cfgp->max_tdcs;
2381 
2382 	NXGE_DEBUG_MSG((nxgep, MEM_CTL, "==> nxge_alloc_tx_mem_pool: "
2383 		"p_cfgp 0x%016llx start_tdc %d ndmas %d nxgep->max_tdcs %d",
2384 		p_cfgp, p_cfgp->start_tdc, p_cfgp->max_tdcs, nxgep->max_tdcs));
2385 	/*
2386 	 * Allocate memory for each transmit DMA channel.
2387 	 */
2388 	dma_poolp = (p_nxge_dma_pool_t)KMEM_ZALLOC(sizeof (nxge_dma_pool_t),
2389 			KM_SLEEP);
2390 	dma_buf_p = (p_nxge_dma_common_t *)KMEM_ZALLOC(
2391 			sizeof (p_nxge_dma_common_t) * ndmas, KM_SLEEP);
2392 
2393 	dma_cntl_poolp = (p_nxge_dma_pool_t)
2394 			KMEM_ZALLOC(sizeof (nxge_dma_pool_t), KM_SLEEP);
2395 	dma_cntl_p = (p_nxge_dma_common_t *)KMEM_ZALLOC(
2396 			sizeof (p_nxge_dma_common_t) * ndmas, KM_SLEEP);
2397 
2398 	if (nxge_tx_ring_size > TDC_DEFAULT_MAX) {
2399 		NXGE_DEBUG_MSG((nxgep, MEM_CTL,
2400 		    "nxge_alloc_tx_mem_pool: TDC too high %d, "
2401 		    "set to default %d",
2402 		    nxge_tx_ring_size, TDC_DEFAULT_MAX));
2403 		nxge_tx_ring_size = TDC_DEFAULT_MAX;
2404 	}
2405 
2406 #if	defined(sun4v) && defined(NIU_LP_WORKAROUND)
2407 	/*
2408 	 * N2/NIU has limitation on the descriptor sizes (contiguous
2409 	 * memory allocation on data buffers to 4M (contig_mem_alloc)
2410 	 * and little endian for control buffers (must use the ddi/dki mem alloc
2411 	 * function). The transmit ring is limited to 8K (includes the
2412 	 * mailbox).
2413 	 */
2414 	if (nxgep->niu_type == N2_NIU) {
2415 		if ((nxge_tx_ring_size > NXGE_NIU_CONTIG_TX_MAX) ||
2416 			(!ISP2(nxge_tx_ring_size))) {
2417 			nxge_tx_ring_size = NXGE_NIU_CONTIG_TX_MAX;
2418 		}
2419 	}
2420 #endif
2421 
2422 	nxgep->nxge_port_tx_ring_size = nxge_tx_ring_size;
2423 
2424 	/*
2425 	 * Assume that each DMA channel will be configured with default
2426 	 * transmit bufer size for copying transmit data.
2427 	 * (For packet payload over this limit, packets will not be
2428 	 *  copied.)
2429 	 */
2430 	if (nxgep->niu_type == N2_NIU) {
2431 		bcopy_thresh = TX_BCOPY_SIZE;
2432 	} else {
2433 		bcopy_thresh = nxge_bcopy_thresh;
2434 	}
2435 	tx_buf_alloc_size = (bcopy_thresh * nxge_tx_ring_size);
2436 
2437 	/*
2438 	 * Addresses of transmit descriptor ring and the
2439 	 * mailbox must be all cache-aligned (64 bytes).
2440 	 */
2441 	tx_cntl_alloc_size = nxge_tx_ring_size;
2442 	tx_cntl_alloc_size *= (sizeof (tx_desc_t));
2443 	tx_cntl_alloc_size += sizeof (txdma_mailbox_t);
2444 
2445 #if	defined(sun4v) && defined(NIU_LP_WORKAROUND)
2446 	if (nxgep->niu_type == N2_NIU) {
2447 		if (!ISP2(tx_buf_alloc_size)) {
2448 			NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
2449 				"==> nxge_alloc_tx_mem_pool: "
2450 				" must be power of 2"));
2451 			status |= (NXGE_ERROR | NXGE_DDI_FAILED);
2452 			goto nxge_alloc_tx_mem_pool_exit;
2453 		}
2454 
2455 		if (tx_buf_alloc_size > (1 << 22)) {
2456 			NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
2457 				"==> nxge_alloc_tx_mem_pool: "
2458 				" limit size to 4M"));
2459 			status |= (NXGE_ERROR | NXGE_DDI_FAILED);
2460 			goto nxge_alloc_tx_mem_pool_exit;
2461 		}
2462 
2463 		if (tx_cntl_alloc_size < 0x2000) {
2464 			tx_cntl_alloc_size = 0x2000;
2465 		}
2466 	}
2467 #endif
2468 
2469 	num_chunks = (uint32_t *)KMEM_ZALLOC(
2470 			sizeof (uint32_t) * ndmas, KM_SLEEP);
2471 
2472 	/*
2473 	 * Allocate memory for transmit buffers and descriptor rings.
2474 	 * Replace allocation functions with interface functions provided
2475 	 * by the partition manager when it is available.
2476 	 *
2477 	 * Allocate memory for the transmit buffer pool.
2478 	 */
2479 	for (i = 0; i < ndmas; i++) {
2480 		num_chunks[i] = 0;
2481 		status = nxge_alloc_tx_buf_dma(nxgep, st_tdc, &dma_buf_p[i],
2482 					tx_buf_alloc_size,
2483 					bcopy_thresh, &num_chunks[i]);
2484 		if (status != NXGE_OK) {
2485 			break;
2486 		}
2487 		st_tdc++;
2488 	}
2489 	if (i < ndmas) {
2490 		goto nxge_alloc_tx_mem_pool_fail1;
2491 	}
2492 
2493 	st_tdc = p_cfgp->start_tdc;
2494 	/*
2495 	 * Allocate memory for descriptor rings and mailbox.
2496 	 */
2497 	for (j = 0; j < ndmas; j++) {
2498 		status = nxge_alloc_tx_cntl_dma(nxgep, st_tdc, &dma_cntl_p[j],
2499 					tx_cntl_alloc_size);
2500 		if (status != NXGE_OK) {
2501 			break;
2502 		}
2503 		st_tdc++;
2504 	}
2505 	if (j < ndmas) {
2506 		goto nxge_alloc_tx_mem_pool_fail2;
2507 	}
2508 
2509 	dma_poolp->ndmas = ndmas;
2510 	dma_poolp->num_chunks = num_chunks;
2511 	dma_poolp->buf_allocated = B_TRUE;
2512 	dma_poolp->dma_buf_pool_p = dma_buf_p;
2513 	nxgep->tx_buf_pool_p = dma_poolp;
2514 
2515 	dma_cntl_poolp->ndmas = ndmas;
2516 	dma_cntl_poolp->buf_allocated = B_TRUE;
2517 	dma_cntl_poolp->dma_buf_pool_p = dma_cntl_p;
2518 	nxgep->tx_cntl_pool_p = dma_cntl_poolp;
2519 
2520 	NXGE_DEBUG_MSG((nxgep, MEM_CTL,
2521 		"==> nxge_alloc_tx_mem_pool: start_tdc %d "
2522 		"ndmas %d poolp->ndmas %d",
2523 		st_tdc, ndmas, dma_poolp->ndmas));
2524 
2525 	goto nxge_alloc_tx_mem_pool_exit;
2526 
2527 nxge_alloc_tx_mem_pool_fail2:
2528 	/* Free control buffers */
2529 	j--;
2530 	for (; j >= 0; j--) {
2531 		nxge_free_tx_cntl_dma(nxgep,
2532 			(p_nxge_dma_common_t)dma_cntl_p[j]);
2533 	}
2534 
2535 nxge_alloc_tx_mem_pool_fail1:
2536 	/* Free data buffers */
2537 	i--;
2538 	for (; i >= 0; i--) {
2539 		nxge_free_tx_buf_dma(nxgep, (p_nxge_dma_common_t)dma_buf_p[i],
2540 			num_chunks[i]);
2541 	}
2542 
2543 	KMEM_FREE(dma_poolp, sizeof (nxge_dma_pool_t));
2544 	KMEM_FREE(dma_buf_p, ndmas * sizeof (p_nxge_dma_common_t));
2545 	KMEM_FREE(dma_cntl_poolp, sizeof (nxge_dma_pool_t));
2546 	KMEM_FREE(dma_cntl_p, ndmas * sizeof (p_nxge_dma_common_t));
2547 	KMEM_FREE(num_chunks, sizeof (uint32_t) * ndmas);
2548 
2549 nxge_alloc_tx_mem_pool_exit:
2550 	NXGE_DEBUG_MSG((nxgep, MEM_CTL,
2551 		"<== nxge_alloc_tx_mem_pool:status 0x%08x", status));
2552 
2553 	return (status);
2554 }
2555 
2556 static nxge_status_t
2557 nxge_alloc_tx_buf_dma(p_nxge_t nxgep, uint16_t dma_channel,
2558     p_nxge_dma_common_t *dmap, size_t alloc_size,
2559     size_t block_size, uint32_t *num_chunks)
2560 {
2561 	p_nxge_dma_common_t 	tx_dmap;
2562 	nxge_status_t		status = NXGE_OK;
2563 	size_t			total_alloc_size;
2564 	size_t			allocated = 0;
2565 	int			i, size_index, array_size;
2566 
2567 	NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_alloc_tx_buf_dma"));
2568 
2569 	tx_dmap = (p_nxge_dma_common_t)
2570 		KMEM_ZALLOC(sizeof (nxge_dma_common_t) * NXGE_DMA_BLOCK,
2571 			KM_SLEEP);
2572 
2573 	total_alloc_size = alloc_size;
2574 	i = 0;
2575 	size_index = 0;
2576 	array_size =  sizeof (alloc_sizes) /  sizeof (size_t);
2577 	while ((alloc_sizes[size_index] < alloc_size) &&
2578 		(size_index < array_size))
2579 		size_index++;
2580 	if (size_index >= array_size) {
2581 		size_index = array_size - 1;
2582 	}
2583 
2584 	while ((allocated < total_alloc_size) &&
2585 			(size_index >= 0) && (i < NXGE_DMA_BLOCK)) {
2586 
2587 		tx_dmap[i].dma_chunk_index = i;
2588 		tx_dmap[i].block_size = block_size;
2589 		tx_dmap[i].alength = alloc_sizes[size_index];
2590 		tx_dmap[i].orig_alength = tx_dmap[i].alength;
2591 		tx_dmap[i].nblocks = alloc_sizes[size_index] / block_size;
2592 		tx_dmap[i].dma_channel = dma_channel;
2593 		tx_dmap[i].contig_alloc_type = B_FALSE;
2594 
2595 		/*
2596 		 * N2/NIU: data buffers must be contiguous as the driver
2597 		 *	   needs to call Hypervisor api to set up
2598 		 *	   logical pages.
2599 		 */
2600 		if ((nxgep->niu_type == N2_NIU) && (NXGE_DMA_BLOCK == 1)) {
2601 			tx_dmap[i].contig_alloc_type = B_TRUE;
2602 		}
2603 
2604 		status = nxge_dma_mem_alloc(nxgep, nxge_force_dma,
2605 			&nxge_tx_dma_attr,
2606 			tx_dmap[i].alength,
2607 			&nxge_dev_buf_dma_acc_attr,
2608 			DDI_DMA_WRITE | DDI_DMA_STREAMING,
2609 			(p_nxge_dma_common_t)(&tx_dmap[i]));
2610 		if (status != NXGE_OK) {
2611 			size_index--;
2612 		} else {
2613 			i++;
2614 			allocated += alloc_sizes[size_index];
2615 		}
2616 	}
2617 
2618 	if (allocated < total_alloc_size) {
2619 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
2620 		    "==> nxge_alloc_tx_buf_dma: not enough channel %d: "
2621 		    "allocated 0x%x requested 0x%x",
2622 		    dma_channel,
2623 		    allocated, total_alloc_size));
2624 		status = NXGE_ERROR;
2625 		goto nxge_alloc_tx_mem_fail1;
2626 	}
2627 
2628 	NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
2629 	    "==> nxge_alloc_tx_buf_dma: Allocated for channel %d: "
2630 	    "allocated 0x%x requested 0x%x",
2631 	    dma_channel,
2632 	    allocated, total_alloc_size));
2633 
2634 	*num_chunks = i;
2635 	*dmap = tx_dmap;
2636 	NXGE_DEBUG_MSG((nxgep, DMA_CTL,
2637 		"==> nxge_alloc_tx_buf_dma dmap 0x%016llx num chunks %d",
2638 		*dmap, i));
2639 	goto nxge_alloc_tx_mem_exit;
2640 
2641 nxge_alloc_tx_mem_fail1:
2642 	KMEM_FREE(tx_dmap, sizeof (nxge_dma_common_t) * NXGE_DMA_BLOCK);
2643 
2644 nxge_alloc_tx_mem_exit:
2645 	NXGE_DEBUG_MSG((nxgep, DMA_CTL,
2646 		"<== nxge_alloc_tx_buf_dma status 0x%08x", status));
2647 
2648 	return (status);
2649 }
2650 
2651 /*ARGSUSED*/
2652 static void
2653 nxge_free_tx_buf_dma(p_nxge_t nxgep, p_nxge_dma_common_t dmap,
2654     uint32_t num_chunks)
2655 {
2656 	int		i;
2657 
2658 	NXGE_DEBUG_MSG((nxgep, MEM_CTL, "==> nxge_free_tx_buf_dma"));
2659 
2660 	for (i = 0; i < num_chunks; i++) {
2661 		nxge_dma_mem_free(dmap++);
2662 	}
2663 
2664 	NXGE_DEBUG_MSG((nxgep, MEM_CTL, "<== nxge_free_tx_buf_dma"));
2665 }
2666 
2667 /*ARGSUSED*/
2668 static nxge_status_t
2669 nxge_alloc_tx_cntl_dma(p_nxge_t nxgep, uint16_t dma_channel,
2670     p_nxge_dma_common_t *dmap, size_t size)
2671 {
2672 	p_nxge_dma_common_t 	tx_dmap;
2673 	nxge_status_t		status = NXGE_OK;
2674 
2675 	NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_alloc_tx_cntl_dma"));
2676 	tx_dmap = (p_nxge_dma_common_t)
2677 			KMEM_ZALLOC(sizeof (nxge_dma_common_t), KM_SLEEP);
2678 
2679 	tx_dmap->contig_alloc_type = B_FALSE;
2680 
2681 	status = nxge_dma_mem_alloc(nxgep, nxge_force_dma,
2682 			&nxge_desc_dma_attr,
2683 			size,
2684 			&nxge_dev_desc_dma_acc_attr,
2685 			DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
2686 			tx_dmap);
2687 	if (status != NXGE_OK) {
2688 		goto nxge_alloc_tx_cntl_dma_fail1;
2689 	}
2690 
2691 	*dmap = tx_dmap;
2692 	goto nxge_alloc_tx_cntl_dma_exit;
2693 
2694 nxge_alloc_tx_cntl_dma_fail1:
2695 	KMEM_FREE(tx_dmap, sizeof (nxge_dma_common_t));
2696 
2697 nxge_alloc_tx_cntl_dma_exit:
2698 	NXGE_DEBUG_MSG((nxgep, DMA_CTL,
2699 		"<== nxge_alloc_tx_cntl_dma status 0x%08x", status));
2700 
2701 	return (status);
2702 }
2703 
2704 /*ARGSUSED*/
2705 static void
2706 nxge_free_tx_cntl_dma(p_nxge_t nxgep, p_nxge_dma_common_t dmap)
2707 {
2708 	NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_free_tx_cntl_dma"));
2709 
2710 	nxge_dma_mem_free(dmap);
2711 
2712 	NXGE_DEBUG_MSG((nxgep, DMA_CTL, "<== nxge_free_tx_cntl_dma"));
2713 }
2714 
2715 static void
2716 nxge_free_tx_mem_pool(p_nxge_t nxgep)
2717 {
2718 	uint32_t		i, ndmas;
2719 	p_nxge_dma_pool_t	dma_poolp;
2720 	p_nxge_dma_common_t	*dma_buf_p;
2721 	p_nxge_dma_pool_t	dma_cntl_poolp;
2722 	p_nxge_dma_common_t	*dma_cntl_p;
2723 	uint32_t 		*num_chunks;
2724 
2725 	NXGE_DEBUG_MSG((nxgep, MEM3_CTL, "==> nxge_free_tx_mem_pool"));
2726 
2727 	dma_poolp = nxgep->tx_buf_pool_p;
2728 	if (dma_poolp == NULL || (!dma_poolp->buf_allocated)) {
2729 		NXGE_DEBUG_MSG((nxgep, MEM3_CTL,
2730 			"<== nxge_free_tx_mem_pool "
2731 			"(null rx buf pool or buf not allocated"));
2732 		return;
2733 	}
2734 
2735 	dma_cntl_poolp = nxgep->tx_cntl_pool_p;
2736 	if (dma_cntl_poolp == NULL || (!dma_cntl_poolp->buf_allocated)) {
2737 		NXGE_DEBUG_MSG((nxgep, MEM3_CTL,
2738 			"<== nxge_free_tx_mem_pool "
2739 			"(null tx cntl buf pool or cntl buf not allocated"));
2740 		return;
2741 	}
2742 
2743 	dma_buf_p = dma_poolp->dma_buf_pool_p;
2744 	num_chunks = dma_poolp->num_chunks;
2745 
2746 	dma_cntl_p = dma_cntl_poolp->dma_buf_pool_p;
2747 	ndmas = dma_cntl_poolp->ndmas;
2748 
2749 	for (i = 0; i < ndmas; i++) {
2750 		nxge_free_tx_buf_dma(nxgep, dma_buf_p[i], num_chunks[i]);
2751 	}
2752 
2753 	for (i = 0; i < ndmas; i++) {
2754 		nxge_free_tx_cntl_dma(nxgep, dma_cntl_p[i]);
2755 	}
2756 
2757 	for (i = 0; i < ndmas; i++) {
2758 		KMEM_FREE(dma_buf_p[i],
2759 			sizeof (nxge_dma_common_t) * NXGE_DMA_BLOCK);
2760 		KMEM_FREE(dma_cntl_p[i], sizeof (nxge_dma_common_t));
2761 	}
2762 
2763 	KMEM_FREE(num_chunks, sizeof (uint32_t) * ndmas);
2764 	KMEM_FREE(dma_cntl_p, ndmas * sizeof (p_nxge_dma_common_t));
2765 	KMEM_FREE(dma_cntl_poolp, sizeof (nxge_dma_pool_t));
2766 	KMEM_FREE(dma_buf_p, ndmas * sizeof (p_nxge_dma_common_t));
2767 	KMEM_FREE(dma_poolp, sizeof (nxge_dma_pool_t));
2768 
2769 	nxgep->tx_buf_pool_p = NULL;
2770 	nxgep->tx_cntl_pool_p = NULL;
2771 
2772 	NXGE_DEBUG_MSG((nxgep, MEM3_CTL, "<== nxge_free_tx_mem_pool"));
2773 }
2774 
2775 /*ARGSUSED*/
2776 static nxge_status_t
2777 nxge_dma_mem_alloc(p_nxge_t nxgep, dma_method_t method,
2778 	struct ddi_dma_attr *dma_attrp,
2779 	size_t length, ddi_device_acc_attr_t *acc_attr_p, uint_t xfer_flags,
2780 	p_nxge_dma_common_t dma_p)
2781 {
2782 	caddr_t 		kaddrp;
2783 	int			ddi_status = DDI_SUCCESS;
2784 	boolean_t		contig_alloc_type;
2785 
2786 	contig_alloc_type = dma_p->contig_alloc_type;
2787 
2788 	if (contig_alloc_type && (nxgep->niu_type != N2_NIU)) {
2789 		/*
2790 		 * contig_alloc_type for contiguous memory only allowed
2791 		 * for N2/NIU.
2792 		 */
2793 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
2794 			"nxge_dma_mem_alloc: alloc type not allows (%d)",
2795 			dma_p->contig_alloc_type));
2796 		return (NXGE_ERROR | NXGE_DDI_FAILED);
2797 	}
2798 
2799 	dma_p->dma_handle = NULL;
2800 	dma_p->acc_handle = NULL;
2801 	dma_p->kaddrp = dma_p->last_kaddrp = NULL;
2802 	dma_p->first_ioaddr_pp = dma_p->last_ioaddr_pp = NULL;
2803 	ddi_status = ddi_dma_alloc_handle(nxgep->dip, dma_attrp,
2804 		DDI_DMA_DONTWAIT, NULL, &dma_p->dma_handle);
2805 	if (ddi_status != DDI_SUCCESS) {
2806 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
2807 			"nxge_dma_mem_alloc:ddi_dma_alloc_handle failed."));
2808 		return (NXGE_ERROR | NXGE_DDI_FAILED);
2809 	}
2810 
2811 	switch (contig_alloc_type) {
2812 	case B_FALSE:
2813 		ddi_status = ddi_dma_mem_alloc(dma_p->dma_handle, length,
2814 			acc_attr_p,
2815 			xfer_flags,
2816 			DDI_DMA_DONTWAIT, 0, &kaddrp, &dma_p->alength,
2817 			&dma_p->acc_handle);
2818 		if (ddi_status != DDI_SUCCESS) {
2819 			NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
2820 				"nxge_dma_mem_alloc:ddi_dma_mem_alloc failed"));
2821 			ddi_dma_free_handle(&dma_p->dma_handle);
2822 			dma_p->dma_handle = NULL;
2823 			return (NXGE_ERROR | NXGE_DDI_FAILED);
2824 		}
2825 		if (dma_p->alength < length) {
2826 			NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
2827 				"nxge_dma_mem_alloc:ddi_dma_mem_alloc "
2828 				"< length."));
2829 			ddi_dma_mem_free(&dma_p->acc_handle);
2830 			ddi_dma_free_handle(&dma_p->dma_handle);
2831 			dma_p->acc_handle = NULL;
2832 			dma_p->dma_handle = NULL;
2833 			return (NXGE_ERROR);
2834 		}
2835 
2836 		ddi_status = ddi_dma_addr_bind_handle(dma_p->dma_handle, NULL,
2837 			kaddrp, dma_p->alength, xfer_flags, DDI_DMA_DONTWAIT, 0,
2838 			&dma_p->dma_cookie, &dma_p->ncookies);
2839 		if (ddi_status != DDI_DMA_MAPPED) {
2840 			NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
2841 				"nxge_dma_mem_alloc:di_dma_addr_bind failed "
2842 				"(staus 0x%x ncookies %d.)", ddi_status,
2843 				dma_p->ncookies));
2844 			if (dma_p->acc_handle) {
2845 				ddi_dma_mem_free(&dma_p->acc_handle);
2846 				dma_p->acc_handle = NULL;
2847 			}
2848 			ddi_dma_free_handle(&dma_p->dma_handle);
2849 			dma_p->dma_handle = NULL;
2850 			return (NXGE_ERROR | NXGE_DDI_FAILED);
2851 		}
2852 
2853 		if (dma_p->ncookies != 1) {
2854 			NXGE_DEBUG_MSG((nxgep, DMA_CTL,
2855 				"nxge_dma_mem_alloc:ddi_dma_addr_bind "
2856 				"> 1 cookie"
2857 				"(staus 0x%x ncookies %d.)", ddi_status,
2858 				dma_p->ncookies));
2859 			if (dma_p->acc_handle) {
2860 				ddi_dma_mem_free(&dma_p->acc_handle);
2861 				dma_p->acc_handle = NULL;
2862 			}
2863 			(void) ddi_dma_unbind_handle(dma_p->dma_handle);
2864 			ddi_dma_free_handle(&dma_p->dma_handle);
2865 			dma_p->dma_handle = NULL;
2866 			return (NXGE_ERROR);
2867 		}
2868 		break;
2869 
2870 #if	defined(sun4v) && defined(NIU_LP_WORKAROUND)
2871 	case B_TRUE:
2872 		kaddrp = (caddr_t)contig_mem_alloc(length);
2873 		if (kaddrp == NULL) {
2874 			NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
2875 				"nxge_dma_mem_alloc:contig_mem_alloc failed."));
2876 			ddi_dma_free_handle(&dma_p->dma_handle);
2877 			return (NXGE_ERROR | NXGE_DDI_FAILED);
2878 		}
2879 
2880 		dma_p->alength = length;
2881 		ddi_status = ddi_dma_addr_bind_handle(dma_p->dma_handle, NULL,
2882 			kaddrp, dma_p->alength, xfer_flags, DDI_DMA_DONTWAIT, 0,
2883 			&dma_p->dma_cookie, &dma_p->ncookies);
2884 		if (ddi_status != DDI_DMA_MAPPED) {
2885 			NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
2886 				"nxge_dma_mem_alloc:di_dma_addr_bind failed "
2887 				"(status 0x%x ncookies %d.)", ddi_status,
2888 				dma_p->ncookies));
2889 
2890 			NXGE_DEBUG_MSG((nxgep, DMA_CTL,
2891 				"==> nxge_dma_mem_alloc: (not mapped)"
2892 				"length %lu (0x%x) "
2893 				"free contig kaddrp $%p "
2894 				"va_to_pa $%p",
2895 				length, length,
2896 				kaddrp,
2897 				va_to_pa(kaddrp)));
2898 
2899 
2900 			contig_mem_free((void *)kaddrp, length);
2901 			ddi_dma_free_handle(&dma_p->dma_handle);
2902 
2903 			dma_p->dma_handle = NULL;
2904 			dma_p->acc_handle = NULL;
2905 			dma_p->alength = NULL;
2906 			dma_p->kaddrp = NULL;
2907 
2908 			return (NXGE_ERROR | NXGE_DDI_FAILED);
2909 		}
2910 
2911 		if (dma_p->ncookies != 1 ||
2912 			(dma_p->dma_cookie.dmac_laddress == NULL)) {
2913 			NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
2914 				"nxge_dma_mem_alloc:di_dma_addr_bind > 1 "
2915 				"cookie or "
2916 				"dmac_laddress is NULL $%p size %d "
2917 				" (status 0x%x ncookies %d.)",
2918 				ddi_status,
2919 				dma_p->dma_cookie.dmac_laddress,
2920 				dma_p->dma_cookie.dmac_size,
2921 				dma_p->ncookies));
2922 
2923 			contig_mem_free((void *)kaddrp, length);
2924 			(void) ddi_dma_unbind_handle(dma_p->dma_handle);
2925 			ddi_dma_free_handle(&dma_p->dma_handle);
2926 
2927 			dma_p->alength = 0;
2928 			dma_p->dma_handle = NULL;
2929 			dma_p->acc_handle = NULL;
2930 			dma_p->kaddrp = NULL;
2931 
2932 			return (NXGE_ERROR | NXGE_DDI_FAILED);
2933 		}
2934 		break;
2935 
2936 #else
2937 	case B_TRUE:
2938 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
2939 			"nxge_dma_mem_alloc: invalid alloc type for !sun4v"));
2940 		return (NXGE_ERROR | NXGE_DDI_FAILED);
2941 #endif
2942 	}
2943 
2944 	dma_p->kaddrp = kaddrp;
2945 	dma_p->last_kaddrp = (unsigned char *)kaddrp +
2946 			dma_p->alength - RXBUF_64B_ALIGNED;
2947 #if defined(__i386)
2948 	dma_p->ioaddr_pp =
2949 		(unsigned char *)(uint32_t)dma_p->dma_cookie.dmac_laddress;
2950 #else
2951 	dma_p->ioaddr_pp = (unsigned char *)dma_p->dma_cookie.dmac_laddress;
2952 #endif
2953 	dma_p->last_ioaddr_pp =
2954 #if defined(__i386)
2955 		(unsigned char *)(uint32_t)dma_p->dma_cookie.dmac_laddress +
2956 #else
2957 		(unsigned char *)dma_p->dma_cookie.dmac_laddress +
2958 #endif
2959 				dma_p->alength - RXBUF_64B_ALIGNED;
2960 
2961 	NPI_DMA_ACC_HANDLE_SET(dma_p, dma_p->acc_handle);
2962 
2963 #if	defined(sun4v) && defined(NIU_LP_WORKAROUND)
2964 	dma_p->orig_ioaddr_pp =
2965 		(unsigned char *)dma_p->dma_cookie.dmac_laddress;
2966 	dma_p->orig_alength = length;
2967 	dma_p->orig_kaddrp = kaddrp;
2968 	dma_p->orig_vatopa = (uint64_t)va_to_pa(kaddrp);
2969 #endif
2970 
2971 	NXGE_DEBUG_MSG((nxgep, DMA_CTL, "<== nxge_dma_mem_alloc: "
2972 		"dma buffer allocated: dma_p $%p "
2973 		"return dmac_ladress from cookie $%p cookie dmac_size %d "
2974 		"dma_p->ioaddr_p $%p "
2975 		"dma_p->orig_ioaddr_p $%p "
2976 		"orig_vatopa $%p "
2977 		"alength %d (0x%x) "
2978 		"kaddrp $%p "
2979 		"length %d (0x%x)",
2980 		dma_p,
2981 		dma_p->dma_cookie.dmac_laddress, dma_p->dma_cookie.dmac_size,
2982 		dma_p->ioaddr_pp,
2983 		dma_p->orig_ioaddr_pp,
2984 		dma_p->orig_vatopa,
2985 		dma_p->alength, dma_p->alength,
2986 		kaddrp,
2987 		length, length));
2988 
2989 	return (NXGE_OK);
2990 }
2991 
2992 static void
2993 nxge_dma_mem_free(p_nxge_dma_common_t dma_p)
2994 {
2995 	if (dma_p->dma_handle != NULL) {
2996 		if (dma_p->ncookies) {
2997 			(void) ddi_dma_unbind_handle(dma_p->dma_handle);
2998 			dma_p->ncookies = 0;
2999 		}
3000 		ddi_dma_free_handle(&dma_p->dma_handle);
3001 		dma_p->dma_handle = NULL;
3002 	}
3003 
3004 	if (dma_p->acc_handle != NULL) {
3005 		ddi_dma_mem_free(&dma_p->acc_handle);
3006 		dma_p->acc_handle = NULL;
3007 		NPI_DMA_ACC_HANDLE_SET(dma_p, NULL);
3008 	}
3009 
3010 #if	defined(sun4v) && defined(NIU_LP_WORKAROUND)
3011 	if (dma_p->contig_alloc_type &&
3012 			dma_p->orig_kaddrp && dma_p->orig_alength) {
3013 		NXGE_DEBUG_MSG((NULL, DMA_CTL, "nxge_dma_mem_free: "
3014 			"kaddrp $%p (orig_kaddrp $%p)"
3015 			"mem type %d ",
3016 			"orig_alength %d "
3017 			"alength 0x%x (%d)",
3018 			dma_p->kaddrp,
3019 			dma_p->orig_kaddrp,
3020 			dma_p->contig_alloc_type,
3021 			dma_p->orig_alength,
3022 			dma_p->alength, dma_p->alength));
3023 
3024 		contig_mem_free(dma_p->orig_kaddrp, dma_p->orig_alength);
3025 		dma_p->orig_alength = NULL;
3026 		dma_p->orig_kaddrp = NULL;
3027 		dma_p->contig_alloc_type = B_FALSE;
3028 	}
3029 #endif
3030 	dma_p->kaddrp = NULL;
3031 	dma_p->alength = NULL;
3032 }
3033 
3034 /*
3035  *	nxge_m_start() -- start transmitting and receiving.
3036  *
3037  *	This function is called by the MAC layer when the first
3038  *	stream is open to prepare the hardware ready for sending
3039  *	and transmitting packets.
3040  */
3041 static int
3042 nxge_m_start(void *arg)
3043 {
3044 	p_nxge_t 	nxgep = (p_nxge_t)arg;
3045 
3046 	NXGE_DEBUG_MSG((nxgep, NXGE_CTL, "==> nxge_m_start"));
3047 
3048 	MUTEX_ENTER(nxgep->genlock);
3049 	if (nxge_init(nxgep) != NXGE_OK) {
3050 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
3051 			"<== nxge_m_start: initialization failed"));
3052 		MUTEX_EXIT(nxgep->genlock);
3053 		return (EIO);
3054 	}
3055 
3056 	if (nxgep->nxge_mac_state == NXGE_MAC_STARTED)
3057 		goto nxge_m_start_exit;
3058 	/*
3059 	 * Start timer to check the system error and tx hangs
3060 	 */
3061 	nxgep->nxge_timerid = nxge_start_timer(nxgep, nxge_check_hw_state,
3062 		NXGE_CHECK_TIMER);
3063 
3064 	nxgep->link_notify = B_TRUE;
3065 
3066 	nxgep->nxge_mac_state = NXGE_MAC_STARTED;
3067 
3068 nxge_m_start_exit:
3069 	MUTEX_EXIT(nxgep->genlock);
3070 	NXGE_DEBUG_MSG((nxgep, NXGE_CTL, "<== nxge_m_start"));
3071 	return (0);
3072 }
3073 
3074 /*
3075  *	nxge_m_stop(): stop transmitting and receiving.
3076  */
3077 static void
3078 nxge_m_stop(void *arg)
3079 {
3080 	p_nxge_t 	nxgep = (p_nxge_t)arg;
3081 
3082 	NXGE_DEBUG_MSG((nxgep, NXGE_CTL, "==> nxge_m_stop"));
3083 
3084 	if (nxgep->nxge_timerid) {
3085 		nxge_stop_timer(nxgep, nxgep->nxge_timerid);
3086 		nxgep->nxge_timerid = 0;
3087 	}
3088 
3089 	MUTEX_ENTER(nxgep->genlock);
3090 	nxge_uninit(nxgep);
3091 
3092 	nxgep->nxge_mac_state = NXGE_MAC_STOPPED;
3093 
3094 	MUTEX_EXIT(nxgep->genlock);
3095 
3096 	NXGE_DEBUG_MSG((nxgep, NXGE_CTL, "<== nxge_m_stop"));
3097 }
3098 
3099 static int
3100 nxge_m_unicst(void *arg, const uint8_t *macaddr)
3101 {
3102 	p_nxge_t 	nxgep = (p_nxge_t)arg;
3103 	struct 		ether_addr addrp;
3104 
3105 	NXGE_DEBUG_MSG((nxgep, MAC_CTL, "==> nxge_m_unicst"));
3106 
3107 	bcopy(macaddr, (uint8_t *)&addrp, ETHERADDRL);
3108 	if (nxge_set_mac_addr(nxgep, &addrp)) {
3109 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
3110 			"<== nxge_m_unicst: set unitcast failed"));
3111 		return (EINVAL);
3112 	}
3113 
3114 	NXGE_DEBUG_MSG((nxgep, MAC_CTL, "<== nxge_m_unicst"));
3115 
3116 	return (0);
3117 }
3118 
3119 static int
3120 nxge_m_multicst(void *arg, boolean_t add, const uint8_t *mca)
3121 {
3122 	p_nxge_t 	nxgep = (p_nxge_t)arg;
3123 	struct 		ether_addr addrp;
3124 
3125 	NXGE_DEBUG_MSG((nxgep, MAC_CTL,
3126 		"==> nxge_m_multicst: add %d", add));
3127 
3128 	bcopy(mca, (uint8_t *)&addrp, ETHERADDRL);
3129 	if (add) {
3130 		if (nxge_add_mcast_addr(nxgep, &addrp)) {
3131 			NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
3132 				"<== nxge_m_multicst: add multicast failed"));
3133 			return (EINVAL);
3134 		}
3135 	} else {
3136 		if (nxge_del_mcast_addr(nxgep, &addrp)) {
3137 			NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
3138 				"<== nxge_m_multicst: del multicast failed"));
3139 			return (EINVAL);
3140 		}
3141 	}
3142 
3143 	NXGE_DEBUG_MSG((nxgep, MAC_CTL, "<== nxge_m_multicst"));
3144 
3145 	return (0);
3146 }
3147 
3148 static int
3149 nxge_m_promisc(void *arg, boolean_t on)
3150 {
3151 	p_nxge_t 	nxgep = (p_nxge_t)arg;
3152 
3153 	NXGE_DEBUG_MSG((nxgep, MAC_CTL,
3154 		"==> nxge_m_promisc: on %d", on));
3155 
3156 	if (nxge_set_promisc(nxgep, on)) {
3157 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
3158 			"<== nxge_m_promisc: set promisc failed"));
3159 		return (EINVAL);
3160 	}
3161 
3162 	NXGE_DEBUG_MSG((nxgep, MAC_CTL,
3163 		"<== nxge_m_promisc: on %d", on));
3164 
3165 	return (0);
3166 }
3167 
3168 static void
3169 nxge_m_ioctl(void *arg,  queue_t *wq, mblk_t *mp)
3170 {
3171 	p_nxge_t 	nxgep = (p_nxge_t)arg;
3172 	struct 		iocblk *iocp;
3173 	boolean_t 	need_privilege;
3174 	int 		err;
3175 	int 		cmd;
3176 
3177 	NXGE_DEBUG_MSG((nxgep, NXGE_CTL, "==> nxge_m_ioctl"));
3178 
3179 	iocp = (struct iocblk *)mp->b_rptr;
3180 	iocp->ioc_error = 0;
3181 	need_privilege = B_TRUE;
3182 	cmd = iocp->ioc_cmd;
3183 	NXGE_DEBUG_MSG((nxgep, NXGE_CTL, "==> nxge_m_ioctl: cmd 0x%08x", cmd));
3184 	switch (cmd) {
3185 	default:
3186 		miocnak(wq, mp, 0, EINVAL);
3187 		NXGE_DEBUG_MSG((nxgep, NXGE_CTL, "<== nxge_m_ioctl: invalid"));
3188 		return;
3189 
3190 	case LB_GET_INFO_SIZE:
3191 	case LB_GET_INFO:
3192 	case LB_GET_MODE:
3193 		need_privilege = B_FALSE;
3194 		break;
3195 	case LB_SET_MODE:
3196 		break;
3197 
3198 	case ND_GET:
3199 		need_privilege = B_FALSE;
3200 		break;
3201 	case ND_SET:
3202 		break;
3203 
3204 	case NXGE_GET_MII:
3205 	case NXGE_PUT_MII:
3206 	case NXGE_GET64:
3207 	case NXGE_PUT64:
3208 	case NXGE_GET_TX_RING_SZ:
3209 	case NXGE_GET_TX_DESC:
3210 	case NXGE_TX_SIDE_RESET:
3211 	case NXGE_RX_SIDE_RESET:
3212 	case NXGE_GLOBAL_RESET:
3213 	case NXGE_RESET_MAC:
3214 	case NXGE_TX_REGS_DUMP:
3215 	case NXGE_RX_REGS_DUMP:
3216 	case NXGE_INT_REGS_DUMP:
3217 	case NXGE_VIR_INT_REGS_DUMP:
3218 	case NXGE_PUT_TCAM:
3219 	case NXGE_GET_TCAM:
3220 	case NXGE_RTRACE:
3221 	case NXGE_RDUMP:
3222 
3223 		need_privilege = B_FALSE;
3224 		break;
3225 	case NXGE_INJECT_ERR:
3226 		cmn_err(CE_NOTE, "!nxge_m_ioctl: Inject error\n");
3227 		nxge_err_inject(nxgep, wq, mp);
3228 		break;
3229 	}
3230 
3231 	if (need_privilege) {
3232 		err = secpolicy_net_config(iocp->ioc_cr, B_FALSE);
3233 		if (err != 0) {
3234 			miocnak(wq, mp, 0, err);
3235 			NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
3236 				"<== nxge_m_ioctl: no priv"));
3237 			return;
3238 		}
3239 	}
3240 
3241 	switch (cmd) {
3242 	case ND_GET:
3243 		NXGE_DEBUG_MSG((nxgep, NXGE_CTL, "ND_GET command"));
3244 	case ND_SET:
3245 		NXGE_DEBUG_MSG((nxgep, NXGE_CTL, "ND_SET command"));
3246 		nxge_param_ioctl(nxgep, wq, mp, iocp);
3247 		break;
3248 
3249 	case LB_GET_MODE:
3250 	case LB_SET_MODE:
3251 	case LB_GET_INFO_SIZE:
3252 	case LB_GET_INFO:
3253 		nxge_loopback_ioctl(nxgep, wq, mp, iocp);
3254 		break;
3255 
3256 	case NXGE_GET_MII:
3257 	case NXGE_PUT_MII:
3258 	case NXGE_PUT_TCAM:
3259 	case NXGE_GET_TCAM:
3260 	case NXGE_GET64:
3261 	case NXGE_PUT64:
3262 	case NXGE_GET_TX_RING_SZ:
3263 	case NXGE_GET_TX_DESC:
3264 	case NXGE_TX_SIDE_RESET:
3265 	case NXGE_RX_SIDE_RESET:
3266 	case NXGE_GLOBAL_RESET:
3267 	case NXGE_RESET_MAC:
3268 	case NXGE_TX_REGS_DUMP:
3269 	case NXGE_RX_REGS_DUMP:
3270 	case NXGE_INT_REGS_DUMP:
3271 	case NXGE_VIR_INT_REGS_DUMP:
3272 		NXGE_DEBUG_MSG((nxgep, NXGE_CTL,
3273 			"==> nxge_m_ioctl: cmd 0x%x", cmd));
3274 		nxge_hw_ioctl(nxgep, wq, mp, iocp);
3275 		break;
3276 	}
3277 
3278 	NXGE_DEBUG_MSG((nxgep, NXGE_CTL, "<== nxge_m_ioctl"));
3279 }
3280 
3281 extern void nxge_rx_hw_blank(void *arg, time_t ticks, uint_t count);
3282 
3283 static void
3284 nxge_m_resources(void *arg)
3285 {
3286 	p_nxge_t		nxgep = arg;
3287 	mac_rx_fifo_t 		mrf;
3288 	p_rx_rcr_rings_t	rcr_rings;
3289 	p_rx_rcr_ring_t		*rcr_p;
3290 	uint32_t		i, ndmas;
3291 	nxge_status_t		status;
3292 
3293 	NXGE_DEBUG_MSG((nxgep, RX_CTL, "==> nxge_m_resources"));
3294 
3295 	MUTEX_ENTER(nxgep->genlock);
3296 
3297 	/*
3298 	 * CR 6492541 Check to see if the drv_state has been initialized,
3299 	 * if not * call nxge_init().
3300 	 */
3301 	if (!(nxgep->drv_state & STATE_HW_INITIALIZED)) {
3302 		status = nxge_init(nxgep);
3303 		if (status != NXGE_OK)
3304 			goto nxge_m_resources_exit;
3305 	}
3306 
3307 	mrf.mrf_type = MAC_RX_FIFO;
3308 	mrf.mrf_blank = nxge_rx_hw_blank;
3309 	mrf.mrf_arg = (void *)nxgep;
3310 
3311 	mrf.mrf_normal_blank_time = 128;
3312 	mrf.mrf_normal_pkt_count = 8;
3313 	rcr_rings = nxgep->rx_rcr_rings;
3314 	rcr_p = rcr_rings->rcr_rings;
3315 	ndmas = rcr_rings->ndmas;
3316 
3317 	/*
3318 	 * Export our receive resources to the MAC layer.
3319 	 */
3320 	for (i = 0; i < ndmas; i++) {
3321 		((p_rx_rcr_ring_t)rcr_p[i])->rcr_mac_handle =
3322 				mac_resource_add(nxgep->mach,
3323 				    (mac_resource_t *)&mrf);
3324 
3325 		NXGE_DEBUG_MSG((nxgep, NXGE_CTL,
3326 			"==> nxge_m_resources: vdma %d dma %d "
3327 			"rcrptr 0x%016llx mac_handle 0x%016llx",
3328 			i, ((p_rx_rcr_ring_t)rcr_p[i])->rdc,
3329 			rcr_p[i],
3330 			((p_rx_rcr_ring_t)rcr_p[i])->rcr_mac_handle));
3331 	}
3332 
3333 nxge_m_resources_exit:
3334 	MUTEX_EXIT(nxgep->genlock);
3335 	NXGE_DEBUG_MSG((nxgep, RX_CTL, "<== nxge_m_resources"));
3336 }
3337 
3338 static void
3339 nxge_mmac_kstat_update(p_nxge_t nxgep, mac_addr_slot_t slot, boolean_t factory)
3340 {
3341 	p_nxge_mmac_stats_t mmac_stats;
3342 	int i;
3343 	nxge_mmac_t *mmac_info;
3344 
3345 	mmac_info = &nxgep->nxge_mmac_info;
3346 
3347 	mmac_stats = &nxgep->statsp->mmac_stats;
3348 	mmac_stats->mmac_max_cnt = mmac_info->num_mmac;
3349 	mmac_stats->mmac_avail_cnt = mmac_info->naddrfree;
3350 
3351 	for (i = 0; i < ETHERADDRL; i++) {
3352 		if (factory) {
3353 			mmac_stats->mmac_avail_pool[slot-1].ether_addr_octet[i]
3354 			= mmac_info->factory_mac_pool[slot][(ETHERADDRL-1) - i];
3355 		} else {
3356 			mmac_stats->mmac_avail_pool[slot-1].ether_addr_octet[i]
3357 			= mmac_info->mac_pool[slot].addr[(ETHERADDRL - 1) - i];
3358 		}
3359 	}
3360 }
3361 
3362 /*
3363  * nxge_altmac_set() -- Set an alternate MAC address
3364  */
3365 static int
3366 nxge_altmac_set(p_nxge_t nxgep, uint8_t *maddr, mac_addr_slot_t slot)
3367 {
3368 	uint8_t addrn;
3369 	uint8_t portn;
3370 	npi_mac_addr_t altmac;
3371 	hostinfo_t mac_rdc;
3372 	p_nxge_class_pt_cfg_t clscfgp;
3373 
3374 	altmac.w2 = ((uint16_t)maddr[0] << 8) | ((uint16_t)maddr[1] & 0x0ff);
3375 	altmac.w1 = ((uint16_t)maddr[2] << 8) | ((uint16_t)maddr[3] & 0x0ff);
3376 	altmac.w0 = ((uint16_t)maddr[4] << 8) | ((uint16_t)maddr[5] & 0x0ff);
3377 
3378 	portn = nxgep->mac.portnum;
3379 	addrn = (uint8_t)slot - 1;
3380 
3381 	if (npi_mac_altaddr_entry(nxgep->npi_handle, OP_SET, portn,
3382 		addrn, &altmac) != NPI_SUCCESS)
3383 		return (EIO);
3384 
3385 	/*
3386 	 * Set the rdc table number for the host info entry
3387 	 * for this mac address slot.
3388 	 */
3389 	clscfgp = (p_nxge_class_pt_cfg_t)&nxgep->class_config;
3390 	mac_rdc.value = 0;
3391 	mac_rdc.bits.w0.rdc_tbl_num = clscfgp->mac_host_info[addrn].rdctbl;
3392 	mac_rdc.bits.w0.mac_pref = clscfgp->mac_host_info[addrn].mpr_npr;
3393 
3394 	if (npi_mac_hostinfo_entry(nxgep->npi_handle, OP_SET,
3395 	    nxgep->function_num, addrn, &mac_rdc) != NPI_SUCCESS) {
3396 		return (EIO);
3397 	}
3398 
3399 	/*
3400 	 * Enable comparison with the alternate MAC address.
3401 	 * While the first alternate addr is enabled by bit 1 of register
3402 	 * BMAC_ALTAD_CMPEN, it is enabled by bit 0 of register
3403 	 * XMAC_ADDR_CMPEN, so slot needs to be converted to addrn
3404 	 * accordingly before calling npi_mac_altaddr_entry.
3405 	 */
3406 	if (portn == XMAC_PORT_0 || portn == XMAC_PORT_1)
3407 		addrn = (uint8_t)slot - 1;
3408 	else
3409 		addrn = (uint8_t)slot;
3410 
3411 	if (npi_mac_altaddr_enable(nxgep->npi_handle, portn, addrn)
3412 		!= NPI_SUCCESS)
3413 		return (EIO);
3414 
3415 	return (0);
3416 }
3417 
3418 /*
3419  * nxeg_m_mmac_add() - find an unused address slot, set the address
3420  * value to the one specified, enable the port to start filtering on
3421  * the new MAC address.  Returns 0 on success.
3422  */
3423 static int
3424 nxge_m_mmac_add(void *arg, mac_multi_addr_t *maddr)
3425 {
3426 	p_nxge_t nxgep = arg;
3427 	mac_addr_slot_t slot;
3428 	nxge_mmac_t *mmac_info;
3429 	int err;
3430 	nxge_status_t status;
3431 
3432 	mutex_enter(nxgep->genlock);
3433 
3434 	/*
3435 	 * Make sure that nxge is initialized, if _start() has
3436 	 * not been called.
3437 	 */
3438 	if (!(nxgep->drv_state & STATE_HW_INITIALIZED)) {
3439 		status = nxge_init(nxgep);
3440 		if (status != NXGE_OK) {
3441 			mutex_exit(nxgep->genlock);
3442 			return (ENXIO);
3443 		}
3444 	}
3445 
3446 	mmac_info = &nxgep->nxge_mmac_info;
3447 	if (mmac_info->naddrfree == 0) {
3448 		mutex_exit(nxgep->genlock);
3449 		return (ENOSPC);
3450 	}
3451 	if (!mac_unicst_verify(nxgep->mach, maddr->mma_addr,
3452 		maddr->mma_addrlen)) {
3453 		mutex_exit(nxgep->genlock);
3454 		return (EINVAL);
3455 	}
3456 	/*
3457 	 * 	Search for the first available slot. Because naddrfree
3458 	 * is not zero, we are guaranteed to find one.
3459 	 * 	Slot 0 is for unique (primary) MAC. The first alternate
3460 	 * MAC slot is slot 1.
3461 	 *	Each of the first two ports of Neptune has 16 alternate
3462 	 * MAC slots but only the first 7 (or 15) slots have assigned factory
3463 	 * MAC addresses. We first search among the slots without bundled
3464 	 * factory MACs. If we fail to find one in that range, then we
3465 	 * search the slots with bundled factory MACs.  A factory MAC
3466 	 * will be wasted while the slot is used with a user MAC address.
3467 	 * But the slot could be used by factory MAC again after calling
3468 	 * nxge_m_mmac_remove and nxge_m_mmac_reserve.
3469 	 */
3470 	if (mmac_info->num_factory_mmac < mmac_info->num_mmac) {
3471 		for (slot = mmac_info->num_factory_mmac + 1;
3472 			slot <= mmac_info->num_mmac; slot++) {
3473 			if (!(mmac_info->mac_pool[slot].flags & MMAC_SLOT_USED))
3474 				break;
3475 		}
3476 		if (slot > mmac_info->num_mmac) {
3477 			for (slot = 1; slot <= mmac_info->num_factory_mmac;
3478 				slot++) {
3479 				if (!(mmac_info->mac_pool[slot].flags
3480 					& MMAC_SLOT_USED))
3481 					break;
3482 			}
3483 		}
3484 	} else {
3485 		for (slot = 1; slot <= mmac_info->num_mmac; slot++) {
3486 			if (!(mmac_info->mac_pool[slot].flags & MMAC_SLOT_USED))
3487 				break;
3488 		}
3489 	}
3490 	ASSERT(slot <= mmac_info->num_mmac);
3491 	if ((err = nxge_altmac_set(nxgep, maddr->mma_addr, slot)) != 0) {
3492 		mutex_exit(nxgep->genlock);
3493 		return (err);
3494 	}
3495 	bcopy(maddr->mma_addr, mmac_info->mac_pool[slot].addr, ETHERADDRL);
3496 	mmac_info->mac_pool[slot].flags |= MMAC_SLOT_USED;
3497 	mmac_info->mac_pool[slot].flags &= ~MMAC_VENDOR_ADDR;
3498 	mmac_info->naddrfree--;
3499 	nxge_mmac_kstat_update(nxgep, slot, B_FALSE);
3500 
3501 	maddr->mma_slot = slot;
3502 
3503 	mutex_exit(nxgep->genlock);
3504 	return (0);
3505 }
3506 
3507 /*
3508  * This function reserves an unused slot and programs the slot and the HW
3509  * with a factory mac address.
3510  */
3511 static int
3512 nxge_m_mmac_reserve(void *arg, mac_multi_addr_t *maddr)
3513 {
3514 	p_nxge_t nxgep = arg;
3515 	mac_addr_slot_t slot;
3516 	nxge_mmac_t *mmac_info;
3517 	int err;
3518 	nxge_status_t status;
3519 
3520 	mutex_enter(nxgep->genlock);
3521 
3522 	/*
3523 	 * Make sure that nxge is initialized, if _start() has
3524 	 * not been called.
3525 	 */
3526 	if (!(nxgep->drv_state & STATE_HW_INITIALIZED)) {
3527 		status = nxge_init(nxgep);
3528 		if (status != NXGE_OK) {
3529 			mutex_exit(nxgep->genlock);
3530 			return (ENXIO);
3531 		}
3532 	}
3533 
3534 	mmac_info = &nxgep->nxge_mmac_info;
3535 	if (mmac_info->naddrfree == 0) {
3536 		mutex_exit(nxgep->genlock);
3537 		return (ENOSPC);
3538 	}
3539 
3540 	slot = maddr->mma_slot;
3541 	if (slot == -1) {  /* -1: Take the first available slot */
3542 		for (slot = 1; slot <= mmac_info->num_factory_mmac; slot++) {
3543 			if (!(mmac_info->mac_pool[slot].flags & MMAC_SLOT_USED))
3544 				break;
3545 		}
3546 		if (slot > mmac_info->num_factory_mmac) {
3547 			mutex_exit(nxgep->genlock);
3548 			return (ENOSPC);
3549 		}
3550 	}
3551 	if (slot < 1 || slot > mmac_info->num_factory_mmac) {
3552 		/*
3553 		 * Do not support factory MAC at a slot greater than
3554 		 * num_factory_mmac even when there are available factory
3555 		 * MAC addresses because the alternate MACs are bundled with
3556 		 * slot[1] through slot[num_factory_mmac]
3557 		 */
3558 		mutex_exit(nxgep->genlock);
3559 		return (EINVAL);
3560 	}
3561 	if (mmac_info->mac_pool[slot].flags & MMAC_SLOT_USED) {
3562 		mutex_exit(nxgep->genlock);
3563 		return (EBUSY);
3564 	}
3565 	/* Verify the address to be reserved */
3566 	if (!mac_unicst_verify(nxgep->mach,
3567 		mmac_info->factory_mac_pool[slot], ETHERADDRL)) {
3568 		mutex_exit(nxgep->genlock);
3569 		return (EINVAL);
3570 	}
3571 	if (err = nxge_altmac_set(nxgep,
3572 		mmac_info->factory_mac_pool[slot], slot)) {
3573 		mutex_exit(nxgep->genlock);
3574 		return (err);
3575 	}
3576 	bcopy(mmac_info->factory_mac_pool[slot], maddr->mma_addr, ETHERADDRL);
3577 	mmac_info->mac_pool[slot].flags |= MMAC_SLOT_USED | MMAC_VENDOR_ADDR;
3578 	mmac_info->naddrfree--;
3579 
3580 	nxge_mmac_kstat_update(nxgep, slot, B_TRUE);
3581 	mutex_exit(nxgep->genlock);
3582 
3583 	/* Pass info back to the caller */
3584 	maddr->mma_slot = slot;
3585 	maddr->mma_addrlen = ETHERADDRL;
3586 	maddr->mma_flags = MMAC_SLOT_USED | MMAC_VENDOR_ADDR;
3587 
3588 	return (0);
3589 }
3590 
3591 /*
3592  * Remove the specified mac address and update the HW not to filter
3593  * the mac address anymore.
3594  */
3595 static int
3596 nxge_m_mmac_remove(void *arg, mac_addr_slot_t slot)
3597 {
3598 	p_nxge_t nxgep = arg;
3599 	nxge_mmac_t *mmac_info;
3600 	uint8_t addrn;
3601 	uint8_t portn;
3602 	int err = 0;
3603 	nxge_status_t status;
3604 
3605 	mutex_enter(nxgep->genlock);
3606 
3607 	/*
3608 	 * Make sure that nxge is initialized, if _start() has
3609 	 * not been called.
3610 	 */
3611 	if (!(nxgep->drv_state & STATE_HW_INITIALIZED)) {
3612 		status = nxge_init(nxgep);
3613 		if (status != NXGE_OK) {
3614 			mutex_exit(nxgep->genlock);
3615 			return (ENXIO);
3616 		}
3617 	}
3618 
3619 	mmac_info = &nxgep->nxge_mmac_info;
3620 	if (slot < 1 || slot > mmac_info->num_mmac) {
3621 		mutex_exit(nxgep->genlock);
3622 		return (EINVAL);
3623 	}
3624 
3625 	portn = nxgep->mac.portnum;
3626 	if (portn == XMAC_PORT_0 || portn == XMAC_PORT_1)
3627 		addrn = (uint8_t)slot - 1;
3628 	else
3629 		addrn = (uint8_t)slot;
3630 
3631 	if (mmac_info->mac_pool[slot].flags & MMAC_SLOT_USED) {
3632 		if (npi_mac_altaddr_disable(nxgep->npi_handle, portn, addrn)
3633 				== NPI_SUCCESS) {
3634 			mmac_info->naddrfree++;
3635 			mmac_info->mac_pool[slot].flags &= ~MMAC_SLOT_USED;
3636 			/*
3637 			 * Regardless if the MAC we just stopped filtering
3638 			 * is a user addr or a facory addr, we must set
3639 			 * the MMAC_VENDOR_ADDR flag if this slot has an
3640 			 * associated factory MAC to indicate that a factory
3641 			 * MAC is available.
3642 			 */
3643 			if (slot <= mmac_info->num_factory_mmac) {
3644 				mmac_info->mac_pool[slot].flags
3645 					|= MMAC_VENDOR_ADDR;
3646 			}
3647 			/*
3648 			 * Clear mac_pool[slot].addr so that kstat shows 0
3649 			 * alternate MAC address if the slot is not used.
3650 			 * (But nxge_m_mmac_get returns the factory MAC even
3651 			 * when the slot is not used!)
3652 			 */
3653 			bzero(mmac_info->mac_pool[slot].addr, ETHERADDRL);
3654 			nxge_mmac_kstat_update(nxgep, slot, B_FALSE);
3655 		} else {
3656 			err = EIO;
3657 		}
3658 	} else {
3659 		err = EINVAL;
3660 	}
3661 
3662 	mutex_exit(nxgep->genlock);
3663 	return (err);
3664 }
3665 
3666 
3667 /*
3668  * Modify a mac address added by nxge_m_mmac_add or nxge_m_mmac_reserve().
3669  */
3670 static int
3671 nxge_m_mmac_modify(void *arg, mac_multi_addr_t *maddr)
3672 {
3673 	p_nxge_t nxgep = arg;
3674 	mac_addr_slot_t slot;
3675 	nxge_mmac_t *mmac_info;
3676 	int err = 0;
3677 	nxge_status_t status;
3678 
3679 	if (!mac_unicst_verify(nxgep->mach, maddr->mma_addr,
3680 			maddr->mma_addrlen))
3681 		return (EINVAL);
3682 
3683 	slot = maddr->mma_slot;
3684 
3685 	mutex_enter(nxgep->genlock);
3686 
3687 	/*
3688 	 * Make sure that nxge is initialized, if _start() has
3689 	 * not been called.
3690 	 */
3691 	if (!(nxgep->drv_state & STATE_HW_INITIALIZED)) {
3692 		status = nxge_init(nxgep);
3693 		if (status != NXGE_OK) {
3694 			mutex_exit(nxgep->genlock);
3695 			return (ENXIO);
3696 		}
3697 	}
3698 
3699 	mmac_info = &nxgep->nxge_mmac_info;
3700 	if (slot < 1 || slot > mmac_info->num_mmac) {
3701 		mutex_exit(nxgep->genlock);
3702 		return (EINVAL);
3703 	}
3704 	if (mmac_info->mac_pool[slot].flags & MMAC_SLOT_USED) {
3705 		if ((err = nxge_altmac_set(nxgep, maddr->mma_addr, slot))
3706 			!= 0) {
3707 			bcopy(maddr->mma_addr, mmac_info->mac_pool[slot].addr,
3708 				ETHERADDRL);
3709 			/*
3710 			 * Assume that the MAC passed down from the caller
3711 			 * is not a factory MAC address (The user should
3712 			 * call mmac_remove followed by mmac_reserve if
3713 			 * he wants to use the factory MAC for this slot).
3714 			 */
3715 			mmac_info->mac_pool[slot].flags &= ~MMAC_VENDOR_ADDR;
3716 			nxge_mmac_kstat_update(nxgep, slot, B_FALSE);
3717 		}
3718 	} else {
3719 		err = EINVAL;
3720 	}
3721 	mutex_exit(nxgep->genlock);
3722 	return (err);
3723 }
3724 
3725 /*
3726  * nxge_m_mmac_get() - Get the MAC address and other information
3727  * related to the slot.  mma_flags should be set to 0 in the call.
3728  * Note: although kstat shows MAC address as zero when a slot is
3729  * not used, Crossbow expects nxge_m_mmac_get to copy factory MAC
3730  * to the caller as long as the slot is not using a user MAC address.
3731  * The following table shows the rules,
3732  *
3733  *				   USED    VENDOR    mma_addr
3734  * ------------------------------------------------------------
3735  * (1) Slot uses a user MAC:        yes      no     user MAC
3736  * (2) Slot uses a factory MAC:     yes      yes    factory MAC
3737  * (3) Slot is not used but is
3738  *     factory MAC capable:         no       yes    factory MAC
3739  * (4) Slot is not used and is
3740  *     not factory MAC capable:     no       no        0
3741  * ------------------------------------------------------------
3742  */
3743 static int
3744 nxge_m_mmac_get(void *arg, mac_multi_addr_t *maddr)
3745 {
3746 	nxge_t *nxgep = arg;
3747 	mac_addr_slot_t slot;
3748 	nxge_mmac_t *mmac_info;
3749 	nxge_status_t status;
3750 
3751 	slot = maddr->mma_slot;
3752 
3753 	mutex_enter(nxgep->genlock);
3754 
3755 	/*
3756 	 * Make sure that nxge is initialized, if _start() has
3757 	 * not been called.
3758 	 */
3759 	if (!(nxgep->drv_state & STATE_HW_INITIALIZED)) {
3760 		status = nxge_init(nxgep);
3761 		if (status != NXGE_OK) {
3762 			mutex_exit(nxgep->genlock);
3763 			return (ENXIO);
3764 		}
3765 	}
3766 
3767 	mmac_info = &nxgep->nxge_mmac_info;
3768 
3769 	if (slot < 1 || slot > mmac_info->num_mmac) {
3770 		mutex_exit(nxgep->genlock);
3771 		return (EINVAL);
3772 	}
3773 	maddr->mma_flags = 0;
3774 	if (mmac_info->mac_pool[slot].flags & MMAC_SLOT_USED)
3775 		maddr->mma_flags |= MMAC_SLOT_USED;
3776 
3777 	if (mmac_info->mac_pool[slot].flags & MMAC_VENDOR_ADDR) {
3778 		maddr->mma_flags |= MMAC_VENDOR_ADDR;
3779 		bcopy(mmac_info->factory_mac_pool[slot],
3780 			maddr->mma_addr, ETHERADDRL);
3781 		maddr->mma_addrlen = ETHERADDRL;
3782 	} else {
3783 		if (maddr->mma_flags & MMAC_SLOT_USED) {
3784 			bcopy(mmac_info->mac_pool[slot].addr,
3785 				maddr->mma_addr, ETHERADDRL);
3786 			maddr->mma_addrlen = ETHERADDRL;
3787 		} else {
3788 			bzero(maddr->mma_addr, ETHERADDRL);
3789 			maddr->mma_addrlen = 0;
3790 		}
3791 	}
3792 	mutex_exit(nxgep->genlock);
3793 	return (0);
3794 }
3795 
3796 
3797 static boolean_t
3798 nxge_m_getcapab(void *arg, mac_capab_t cap, void *cap_data)
3799 {
3800 	nxge_t *nxgep = arg;
3801 	uint32_t *txflags = cap_data;
3802 	multiaddress_capab_t *mmacp = cap_data;
3803 
3804 	switch (cap) {
3805 	case MAC_CAPAB_HCKSUM:
3806 		*txflags = HCKSUM_INET_PARTIAL;
3807 		break;
3808 	case MAC_CAPAB_POLL:
3809 		/*
3810 		 * There's nothing for us to fill in, simply returning
3811 		 * B_TRUE stating that we support polling is sufficient.
3812 		 */
3813 		break;
3814 
3815 	case MAC_CAPAB_MULTIADDRESS:
3816 		mutex_enter(nxgep->genlock);
3817 
3818 		mmacp->maddr_naddr = nxgep->nxge_mmac_info.num_mmac;
3819 		mmacp->maddr_naddrfree = nxgep->nxge_mmac_info.naddrfree;
3820 		mmacp->maddr_flag = 0; /* 0 is requried by PSARC2006/265 */
3821 		/*
3822 		 * maddr_handle is driver's private data, passed back to
3823 		 * entry point functions as arg.
3824 		 */
3825 		mmacp->maddr_handle	= nxgep;
3826 		mmacp->maddr_add	= nxge_m_mmac_add;
3827 		mmacp->maddr_remove	= nxge_m_mmac_remove;
3828 		mmacp->maddr_modify	= nxge_m_mmac_modify;
3829 		mmacp->maddr_get	= nxge_m_mmac_get;
3830 		mmacp->maddr_reserve	= nxge_m_mmac_reserve;
3831 
3832 		mutex_exit(nxgep->genlock);
3833 		break;
3834 	case MAC_CAPAB_LSO: {
3835 		mac_capab_lso_t *cap_lso = cap_data;
3836 
3837 		if (nxgep->soft_lso_enable) {
3838 			cap_lso->lso_flags = LSO_TX_BASIC_TCP_IPV4;
3839 			if (nxge_lso_max > NXGE_LSO_MAXLEN) {
3840 				nxge_lso_max = NXGE_LSO_MAXLEN;
3841 			}
3842 			cap_lso->lso_basic_tcp_ipv4.lso_max = nxge_lso_max;
3843 			break;
3844 		} else {
3845 			return (B_FALSE);
3846 		}
3847 	}
3848 
3849 	default:
3850 		return (B_FALSE);
3851 	}
3852 	return (B_TRUE);
3853 }
3854 
3855 static boolean_t
3856 nxge_param_locked(mac_prop_id_t pr_num)
3857 {
3858 	/*
3859 	 * All adv_* parameters are locked (read-only) while
3860 	 * the device is in any sort of loopback mode ...
3861 	 */
3862 	switch (pr_num) {
3863 		case DLD_PROP_ADV_1000FDX_CAP:
3864 		case DLD_PROP_EN_1000FDX_CAP:
3865 		case DLD_PROP_ADV_1000HDX_CAP:
3866 		case DLD_PROP_EN_1000HDX_CAP:
3867 		case DLD_PROP_ADV_100FDX_CAP:
3868 		case DLD_PROP_EN_100FDX_CAP:
3869 		case DLD_PROP_ADV_100HDX_CAP:
3870 		case DLD_PROP_EN_100HDX_CAP:
3871 		case DLD_PROP_ADV_10FDX_CAP:
3872 		case DLD_PROP_EN_10FDX_CAP:
3873 		case DLD_PROP_ADV_10HDX_CAP:
3874 		case DLD_PROP_EN_10HDX_CAP:
3875 		case DLD_PROP_AUTONEG:
3876 		case DLD_PROP_FLOWCTRL:
3877 			return (B_TRUE);
3878 	}
3879 	return (B_FALSE);
3880 }
3881 
3882 /*
3883  * callback functions for set/get of properties
3884  */
3885 static int
3886 nxge_m_setprop(void *barg, const char *pr_name, mac_prop_id_t pr_num,
3887     uint_t pr_valsize, const void *pr_val)
3888 {
3889 	nxge_t		*nxgep = barg;
3890 	p_nxge_param_t	param_arr;
3891 	p_nxge_stats_t	statsp;
3892 	int		err = 0;
3893 	uint8_t		val;
3894 	uint32_t	cur_mtu, new_mtu, old_framesize;
3895 	link_flowctrl_t	fl;
3896 
3897 	NXGE_DEBUG_MSG((nxgep, NXGE_CTL, "==> nxge_m_setprop"));
3898 	param_arr = nxgep->param_arr;
3899 	statsp = nxgep->statsp;
3900 	mutex_enter(nxgep->genlock);
3901 	if (statsp->port_stats.lb_mode != nxge_lb_normal &&
3902 	    nxge_param_locked(pr_num)) {
3903 		/*
3904 		 * All adv_* parameters are locked (read-only)
3905 		 * while the device is in any sort of loopback mode.
3906 		 */
3907 		NXGE_DEBUG_MSG((nxgep, NXGE_CTL,
3908 		    "==> nxge_m_setprop: loopback mode: read only"));
3909 		mutex_exit(nxgep->genlock);
3910 		return (EBUSY);
3911 	}
3912 
3913 	val = *(uint8_t *)pr_val;
3914 	switch (pr_num) {
3915 		case DLD_PROP_EN_1000FDX_CAP:
3916 			nxgep->param_en_1000fdx = val;
3917 			param_arr[param_anar_1000fdx].value = val;
3918 
3919 			goto reprogram;
3920 
3921 		case DLD_PROP_EN_100FDX_CAP:
3922 			nxgep->param_en_100fdx = val;
3923 			param_arr[param_anar_100fdx].value = val;
3924 
3925 			goto reprogram;
3926 
3927 		case DLD_PROP_EN_10FDX_CAP:
3928 			nxgep->param_en_10fdx = val;
3929 			param_arr[param_anar_10fdx].value = val;
3930 
3931 			goto reprogram;
3932 
3933 		case DLD_PROP_EN_1000HDX_CAP:
3934 		case DLD_PROP_EN_100HDX_CAP:
3935 		case DLD_PROP_EN_10HDX_CAP:
3936 		case DLD_PROP_ADV_1000FDX_CAP:
3937 		case DLD_PROP_ADV_1000HDX_CAP:
3938 		case DLD_PROP_ADV_100FDX_CAP:
3939 		case DLD_PROP_ADV_100HDX_CAP:
3940 		case DLD_PROP_ADV_10FDX_CAP:
3941 		case DLD_PROP_ADV_10HDX_CAP:
3942 		case DLD_PROP_STATUS:
3943 		case DLD_PROP_SPEED:
3944 		case DLD_PROP_DUPLEX:
3945 			err = EINVAL; /* cannot set read-only properties */
3946 			NXGE_DEBUG_MSG((nxgep, NXGE_CTL,
3947 			    "==> nxge_m_setprop:  read only property %d",
3948 			    pr_num));
3949 			break;
3950 
3951 		case DLD_PROP_AUTONEG:
3952 			param_arr[param_autoneg].value = val;
3953 
3954 			goto reprogram;
3955 
3956 		case DLD_PROP_DEFMTU:
3957 			if (nxgep->nxge_mac_state == NXGE_MAC_STARTED) {
3958 				err = EBUSY;
3959 				break;
3960 			}
3961 
3962 			cur_mtu = nxgep->mac.default_mtu;
3963 			bcopy(pr_val, &new_mtu, sizeof (new_mtu));
3964 			NXGE_DEBUG_MSG((nxgep, NXGE_CTL,
3965 			    "==> nxge_m_setprop: set MTU: %d is_jumbo %d",
3966 			    new_mtu, nxgep->mac.is_jumbo));
3967 
3968 			if (new_mtu == cur_mtu) {
3969 				err = 0;
3970 				break;
3971 			}
3972 			if (new_mtu < NXGE_DEFAULT_MTU ||
3973 			    new_mtu > NXGE_MAXIMUM_MTU) {
3974 				err = EINVAL;
3975 				break;
3976 			}
3977 
3978 			if ((new_mtu > NXGE_DEFAULT_MTU) &&
3979 			    !nxgep->mac.is_jumbo) {
3980 				err = EINVAL;
3981 				break;
3982 			}
3983 
3984 			old_framesize = (uint32_t)nxgep->mac.maxframesize;
3985 			nxgep->mac.maxframesize = (uint16_t)
3986 			    (new_mtu + NXGE_EHEADER_VLAN_CRC);
3987 			if (nxge_mac_set_framesize(nxgep)) {
3988 				nxgep->mac.maxframesize = (uint16_t)old_framesize;
3989 				err = EINVAL;
3990 				break;
3991 			}
3992 
3993 			err = mac_maxsdu_update(nxgep->mach, new_mtu);
3994 			if (err) {
3995 				nxgep->mac.maxframesize = (uint16_t)old_framesize;
3996 				err = EINVAL;
3997 				break;
3998 			}
3999 
4000 			nxgep->mac.default_mtu = new_mtu;
4001 			NXGE_DEBUG_MSG((nxgep, NXGE_CTL,
4002 			    "==> nxge_m_setprop: set MTU: %d maxframe %d",
4003 			    new_mtu, nxgep->mac.maxframesize));
4004 			break;
4005 
4006 		case DLD_PROP_FLOWCTRL:
4007 			bcopy(pr_val, &fl, sizeof (fl));
4008 			switch (fl) {
4009 			default:
4010 				err = EINVAL;
4011 				break;
4012 
4013 			case LINK_FLOWCTRL_NONE:
4014 				param_arr[param_anar_pause].value = 0;
4015 				break;
4016 
4017 			case LINK_FLOWCTRL_RX:
4018 				param_arr[param_anar_pause].value = 1;
4019 				break;
4020 
4021 			case LINK_FLOWCTRL_TX:
4022 			case LINK_FLOWCTRL_BI:
4023 				err = EINVAL;
4024 				break;
4025 			}
4026 
4027 reprogram:
4028 			if (err == 0) {
4029 				if (!nxge_param_link_update(nxgep)) {
4030 					err = EINVAL;
4031 				}
4032 			}
4033 			break;
4034 
4035 		default:
4036 			NXGE_DEBUG_MSG((nxgep, NXGE_CTL,
4037 			    "==> nxge_m_setprop: private property"));
4038 			err = nxge_set_priv_prop(nxgep, pr_name, pr_valsize,
4039 			    pr_val);
4040 			break;
4041 	}
4042 
4043 	mutex_exit(nxgep->genlock);
4044 
4045 	NXGE_DEBUG_MSG((nxgep, NXGE_CTL,
4046 	    "<== nxge_m_setprop (return %d)", err));
4047 	return (err);
4048 }
4049 
4050 static int
4051 nxge_m_getprop(void *barg, const char *pr_name, mac_prop_id_t pr_num,
4052     uint_t pr_valsize, void *pr_val)
4053 {
4054 	nxge_t 		*nxgep = barg;
4055 	p_nxge_param_t	param_arr = nxgep->param_arr;
4056 	p_nxge_stats_t	statsp = nxgep->statsp;
4057 	int		err = 0;
4058 	link_flowctrl_t	fl;
4059 	uint64_t	tmp = 0;
4060 
4061 	NXGE_DEBUG_MSG((nxgep, NXGE_CTL,
4062 	    "==> nxge_m_getprop: pr_num %d", pr_num));
4063 	bzero(pr_val, pr_valsize);
4064 	switch (pr_num) {
4065 		case DLD_PROP_DUPLEX:
4066 			if (pr_valsize < sizeof (uint8_t))
4067 				return (EINVAL);
4068 			*(uint8_t *)pr_val = statsp->mac_stats.link_duplex;
4069 			NXGE_DEBUG_MSG((nxgep, NXGE_CTL,
4070 			    "==> nxge_m_getprop: duplex mode %d",
4071 			    *(uint8_t *)pr_val));
4072 			break;
4073 
4074 		case DLD_PROP_SPEED:
4075 			if (pr_valsize < sizeof (uint64_t))
4076 				return (EINVAL);
4077 			tmp = statsp->mac_stats.link_speed * 1000000ull;
4078 			bcopy(&tmp, pr_val, sizeof (tmp));
4079 			break;
4080 
4081 		case DLD_PROP_STATUS:
4082 			if (pr_valsize < sizeof (uint8_t))
4083 				return (EINVAL);
4084 			*(uint8_t *)pr_val = statsp->mac_stats.link_up;
4085 			break;
4086 
4087 		case DLD_PROP_AUTONEG:
4088 			if (pr_valsize < sizeof (uint8_t))
4089 				return (EINVAL);
4090 			*(uint8_t *)pr_val =
4091 			    param_arr[param_autoneg].value;
4092 			break;
4093 
4094 
4095 		case DLD_PROP_DEFMTU: {
4096 			if (pr_valsize < sizeof (uint64_t))
4097 				return (EINVAL);
4098 			tmp = nxgep->mac.default_mtu;
4099 			bcopy(&tmp, pr_val, sizeof (tmp));
4100 			break;
4101 		}
4102 
4103 		case DLD_PROP_FLOWCTRL:
4104 			if (pr_valsize < sizeof (link_flowctrl_t))
4105 				return (EINVAL);
4106 
4107 			fl = LINK_FLOWCTRL_NONE;
4108 			if (param_arr[param_anar_pause].value) {
4109 				fl = LINK_FLOWCTRL_RX;
4110 			}
4111 			bcopy(&fl, pr_val, sizeof (fl));
4112 			break;
4113 
4114 		case DLD_PROP_ADV_1000FDX_CAP:
4115 			if (pr_valsize < sizeof (uint8_t))
4116 				return (EINVAL);
4117 			*(uint8_t *)pr_val =
4118 			    param_arr[param_anar_1000fdx].value;
4119 			break;
4120 
4121 		case DLD_PROP_EN_1000FDX_CAP:
4122 			if (pr_valsize < sizeof (uint8_t))
4123 				return (EINVAL);
4124 			*(uint8_t *)pr_val = nxgep->param_en_1000fdx;
4125 			break;
4126 
4127 		case DLD_PROP_ADV_100FDX_CAP:
4128 			if (pr_valsize < sizeof (uint8_t))
4129 				return (EINVAL);
4130 			*(uint8_t *)pr_val =
4131 			    param_arr[param_anar_100fdx].value;
4132 			break;
4133 
4134 		case DLD_PROP_EN_100FDX_CAP:
4135 			if (pr_valsize < sizeof (uint8_t))
4136 				return (EINVAL);
4137 			*(uint8_t *)pr_val = nxgep->param_en_100fdx;
4138 			break;
4139 
4140 		case DLD_PROP_ADV_10FDX_CAP:
4141 			if (pr_valsize < sizeof (uint8_t))
4142 				return (EINVAL);
4143 			*(uint8_t *)pr_val =
4144 			    param_arr[param_anar_10fdx].value;
4145 			break;
4146 
4147 		case DLD_PROP_EN_10FDX_CAP:
4148 			if (pr_valsize < sizeof (uint8_t))
4149 				return (EINVAL);
4150 			*(uint8_t *)pr_val = nxgep->param_en_10fdx;
4151 			break;
4152 
4153 		case DLD_PROP_EN_1000HDX_CAP:
4154 		case DLD_PROP_EN_100HDX_CAP:
4155 		case DLD_PROP_EN_10HDX_CAP:
4156 		case DLD_PROP_ADV_1000HDX_CAP:
4157 		case DLD_PROP_ADV_100HDX_CAP:
4158 		case DLD_PROP_ADV_10HDX_CAP:
4159 			err = EINVAL;
4160 			break;
4161 
4162 		default:
4163 			err = nxge_get_priv_prop(nxgep, pr_name, pr_valsize,
4164 			    pr_val);
4165 	}
4166 
4167 	NXGE_DEBUG_MSG((nxgep, NXGE_CTL, "<== nxge_m_getprop"));
4168 
4169 	return (err);
4170 }
4171 
4172 /* ARGSUSED */
4173 static int
4174 nxge_set_priv_prop(p_nxge_t nxgep, const char *pr_name, uint_t pr_valsize,
4175     const void *pr_val)
4176 {
4177 	p_nxge_param_t	param_arr = nxgep->param_arr;
4178 	int		err = 0;
4179 	long		result;
4180 
4181 	NXGE_DEBUG_MSG((nxgep, NXGE_CTL,
4182 	    "==> nxge_set_priv_prop: name %s", pr_name));
4183 
4184 	if (strcmp(pr_name, "_accept_jumbo") == 0) {
4185 		(void) ddi_strtol(pr_val, (char **)NULL, 0, &result);
4186 		NXGE_DEBUG_MSG((nxgep, NXGE_CTL,
4187 		    "<== nxge_set_priv_prop: name %s "
4188 		    "pr_val %s result %d "
4189 		    "param %d is_jumbo %d",
4190 		    pr_name, pr_val, result,
4191 		    param_arr[param_accept_jumbo].value,
4192 		    nxgep->mac.is_jumbo));
4193 
4194 		if (result > 1 || result < 0) {
4195 			err = EINVAL;
4196 		} else {
4197 			if (nxgep->mac.is_jumbo ==
4198 			    (uint32_t)result) {
4199 				NXGE_DEBUG_MSG((nxgep, NXGE_CTL,
4200 				    "no change (%d %d)",
4201 				    nxgep->mac.is_jumbo,
4202 				    result));
4203 				return (0);
4204 			}
4205 		}
4206 
4207 		param_arr[param_accept_jumbo].value = result;
4208 		nxgep->mac.is_jumbo = B_FALSE;
4209 		if (result) {
4210 			nxgep->mac.is_jumbo = B_TRUE;
4211 		}
4212 
4213 		NXGE_DEBUG_MSG((nxgep, NXGE_CTL,
4214 		    "<== nxge_set_priv_prop: name %s (value %d) is_jumbo %d",
4215 		    pr_name, result, nxgep->mac.is_jumbo));
4216 
4217 		return (err);
4218 	}
4219 
4220 	/* Blanking */
4221 	if (strcmp(pr_name, "_rxdma_intr_time") == 0) {
4222 		err = nxge_param_rx_intr_time(nxgep, NULL, NULL,
4223 		    (char *)pr_val,
4224 		    (caddr_t)&param_arr[param_rxdma_intr_time]);
4225 		if (err) {
4226 			NXGE_DEBUG_MSG((nxgep, NXGE_CTL,
4227 			    "<== nxge_set_priv_prop: "
4228 			    "unable to set (%s)", pr_name));
4229 			err = EINVAL;
4230 		} else {
4231 			err = 0;
4232 			NXGE_DEBUG_MSG((nxgep, NXGE_CTL,
4233 			    "<== nxge_set_priv_prop: "
4234 			    "set (%s)", pr_name));
4235 		}
4236 
4237 		NXGE_DEBUG_MSG((nxgep, NXGE_CTL,
4238 		    "<== nxge_set_priv_prop: name %s (value %d)",
4239 		    pr_name, result));
4240 
4241 		return (err);
4242 	}
4243 
4244 	if (strcmp(pr_name, "_rxdma_intr_pkts") == 0) {
4245 		err = nxge_param_rx_intr_pkts(nxgep, NULL, NULL,
4246 		    (char *)pr_val,
4247 		    (caddr_t)&param_arr[param_rxdma_intr_pkts]);
4248 		if (err) {
4249 			NXGE_DEBUG_MSG((nxgep, NXGE_CTL,
4250 			    "<== nxge_set_priv_prop: "
4251 			    "unable to set (%s)", pr_name));
4252 			err = EINVAL;
4253 		} else {
4254 			err = 0;
4255 			NXGE_DEBUG_MSG((nxgep, NXGE_CTL,
4256 			    "<== nxge_set_priv_prop: "
4257 			    "set (%s)", pr_name));
4258 		}
4259 
4260 		NXGE_DEBUG_MSG((nxgep, NXGE_CTL,
4261 		    "<== nxge_set_priv_prop: name %s (value %d)",
4262 		    pr_name, result));
4263 
4264 		return (err);
4265 	}
4266 
4267 	/* Classification */
4268 	if (strcmp(pr_name, "_class_opt_ipv4_tcp") == 0) {
4269 		if (pr_val == NULL) {
4270 			err = EINVAL;
4271 			return (err);
4272 		}
4273 		(void) ddi_strtol(pr_val, (char **)NULL, 0, &result);
4274 
4275 		err = nxge_param_set_ip_opt(nxgep, NULL,
4276 		    NULL, (char *)pr_val,
4277 		    (caddr_t)&param_arr[param_class_opt_ipv4_tcp]);
4278 
4279 		NXGE_DEBUG_MSG((nxgep, NXGE_CTL,
4280 		    "<== nxge_set_priv_prop: name %s (value 0x%x)",
4281 		    pr_name, result));
4282 
4283 		return (err);
4284 	}
4285 
4286 	if (strcmp(pr_name, "_class_opt_ipv4_udp") == 0) {
4287 		if (pr_val == NULL) {
4288 			err = EINVAL;
4289 			return (err);
4290 		}
4291 		(void) ddi_strtol(pr_val, (char **)NULL, 0, &result);
4292 
4293 		err = nxge_param_set_ip_opt(nxgep, NULL,
4294 		    NULL, (char *)pr_val,
4295 		    (caddr_t)&param_arr[param_class_opt_ipv4_udp]);
4296 
4297 		NXGE_DEBUG_MSG((nxgep, NXGE_CTL,
4298 		    "<== nxge_set_priv_prop: name %s (value 0x%x)",
4299 		    pr_name, result));
4300 
4301 		return (err);
4302 	}
4303 	if (strcmp(pr_name, "_class_opt_ipv4_ah") == 0) {
4304 		if (pr_val == NULL) {
4305 			err = EINVAL;
4306 			return (err);
4307 		}
4308 		(void) ddi_strtol(pr_val, (char **)NULL, 0, &result);
4309 
4310 		err = nxge_param_set_ip_opt(nxgep, NULL,
4311 		    NULL, (char *)pr_val,
4312 		    (caddr_t)&param_arr[param_class_opt_ipv4_ah]);
4313 
4314 		NXGE_DEBUG_MSG((nxgep, NXGE_CTL,
4315 		    "<== nxge_set_priv_prop: name %s (value 0x%x)",
4316 		    pr_name, result));
4317 
4318 		return (err);
4319 	}
4320 	if (strcmp(pr_name, "_class_opt_ipv4_sctp") == 0) {
4321 		if (pr_val == NULL) {
4322 			err = EINVAL;
4323 			return (err);
4324 		}
4325 		(void) ddi_strtol(pr_val, (char **)NULL, 0, &result);
4326 
4327 		err = nxge_param_set_ip_opt(nxgep, NULL,
4328 		    NULL, (char *)pr_val,
4329 		    (caddr_t)&param_arr[param_class_opt_ipv4_sctp]);
4330 
4331 		NXGE_DEBUG_MSG((nxgep, NXGE_CTL,
4332 		    "<== nxge_set_priv_prop: name %s (value 0x%x)",
4333 		    pr_name, result));
4334 
4335 		return (err);
4336 	}
4337 
4338 	if (strcmp(pr_name, "_class_opt_ipv6_tcp") == 0) {
4339 		if (pr_val == NULL) {
4340 			err = EINVAL;
4341 			return (err);
4342 		}
4343 		(void) ddi_strtol(pr_val, (char **)NULL, 0, &result);
4344 
4345 		err = nxge_param_set_ip_opt(nxgep, NULL,
4346 		    NULL, (char *)pr_val,
4347 		    (caddr_t)&param_arr[param_class_opt_ipv6_tcp]);
4348 
4349 		NXGE_DEBUG_MSG((nxgep, NXGE_CTL,
4350 		    "<== nxge_set_priv_prop: name %s (value 0x%x)",
4351 		    pr_name, result));
4352 
4353 		return (err);
4354 	}
4355 
4356 	if (strcmp(pr_name, "_class_opt_ipv6_udp") == 0) {
4357 		if (pr_val == NULL) {
4358 			err = EINVAL;
4359 			return (err);
4360 		}
4361 		(void) ddi_strtol(pr_val, (char **)NULL, 0, &result);
4362 
4363 		err = nxge_param_set_ip_opt(nxgep, NULL,
4364 		    NULL, (char *)pr_val,
4365 		    (caddr_t)&param_arr[param_class_opt_ipv6_udp]);
4366 
4367 		NXGE_DEBUG_MSG((nxgep, NXGE_CTL,
4368 		    "<== nxge_set_priv_prop: name %s (value 0x%x)",
4369 		    pr_name, result));
4370 
4371 		return (err);
4372 	}
4373 	if (strcmp(pr_name, "_class_opt_ipv6_ah") == 0) {
4374 		if (pr_val == NULL) {
4375 			err = EINVAL;
4376 			return (err);
4377 		}
4378 		(void) ddi_strtol(pr_val, (char **)NULL, 0, &result);
4379 
4380 		err = nxge_param_set_ip_opt(nxgep, NULL,
4381 		    NULL, (char *)pr_val,
4382 		    (caddr_t)&param_arr[param_class_opt_ipv6_ah]);
4383 
4384 		NXGE_DEBUG_MSG((nxgep, NXGE_CTL,
4385 		    "<== nxge_set_priv_prop: name %s (value 0x%x)",
4386 		    pr_name, result));
4387 
4388 		return (err);
4389 	}
4390 	if (strcmp(pr_name, "_class_opt_ipv6_sctp") == 0) {
4391 		if (pr_val == NULL) {
4392 			err = EINVAL;
4393 			return (err);
4394 		}
4395 		(void) ddi_strtol(pr_val, (char **)NULL, 0, &result);
4396 
4397 		err = nxge_param_set_ip_opt(nxgep, NULL,
4398 		    NULL, (char *)pr_val,
4399 		    (caddr_t)&param_arr[param_class_opt_ipv6_sctp]);
4400 
4401 		NXGE_DEBUG_MSG((nxgep, NXGE_CTL,
4402 		    "<== nxge_set_priv_prop: name %s (value 0x%x)",
4403 		    pr_name, result));
4404 
4405 		return (err);
4406 	}
4407 
4408 	if (strcmp(pr_name, "_soft_lso_enable") == 0) {
4409 		if (nxgep->nxge_mac_state == NXGE_MAC_STARTED) {
4410 			NXGE_DEBUG_MSG((nxgep, NXGE_CTL,
4411 			    "==> nxge_set_priv_prop: name %s (busy)", pr_name));
4412 			err = EBUSY;
4413 			return (err);
4414 		}
4415 		if (pr_val == NULL) {
4416 			NXGE_DEBUG_MSG((nxgep, NXGE_CTL,
4417 			    "==> nxge_set_priv_prop: name %s (null)", pr_name));
4418 			err = EINVAL;
4419 			return (err);
4420 		}
4421 
4422 		(void) ddi_strtol(pr_val, (char **)NULL, 0, &result);
4423 		NXGE_DEBUG_MSG((nxgep, NXGE_CTL,
4424 		    "<== nxge_set_priv_prop: name %s "
4425 		    "(lso %d pr_val %s value %d)",
4426 		    pr_name, nxgep->soft_lso_enable, pr_val, result));
4427 
4428 		if (result > 1 || result < 0) {
4429 			err = EINVAL;
4430 		} else {
4431 			if (nxgep->soft_lso_enable == (uint32_t)result) {
4432 				NXGE_DEBUG_MSG((nxgep, NXGE_CTL,
4433 				    "no change (%d %d)",
4434 				    nxgep->soft_lso_enable, result));
4435 				return (0);
4436 			}
4437 		}
4438 
4439 		nxgep->soft_lso_enable = (int)result;
4440 
4441 		NXGE_DEBUG_MSG((nxgep, NXGE_CTL,
4442 		    "<== nxge_set_priv_prop: name %s (value %d)",
4443 		    pr_name, result));
4444 
4445 		return (err);
4446 	}
4447 
4448 	return (EINVAL);
4449 }
4450 
4451 static int
4452 nxge_get_priv_prop(p_nxge_t nxgep, const char *pr_name, uint_t pr_valsize,
4453     void *pr_val)
4454 {
4455 	p_nxge_param_t	param_arr = nxgep->param_arr;
4456 	char		valstr[MAXNAMELEN];
4457 	int		err = EINVAL;
4458 	uint_t		strsize;
4459 
4460 	NXGE_DEBUG_MSG((nxgep, NXGE_CTL,
4461 	    "==> nxge_get_priv_prop: property %s", pr_name));
4462 
4463 	/* function number */
4464 	if (strcmp(pr_name, "_function_number") == 0) {
4465 		(void) sprintf(valstr, "%d", nxgep->function_num);
4466 		NXGE_DEBUG_MSG((nxgep, NXGE_CTL,
4467 		    "==> nxge_get_priv_prop: name %s "
4468 		    "(value %d valstr %s)",
4469 		    pr_name, nxgep->function_num, valstr));
4470 
4471 		err = 0;
4472 		goto done;
4473 	}
4474 
4475 	/* Neptune firmware version */
4476 	if (strcmp(pr_name, "_fw_version") == 0) {
4477 		(void) sprintf(valstr, "%s", nxgep->vpd_info.ver);
4478 		NXGE_DEBUG_MSG((nxgep, NXGE_CTL,
4479 		    "==> nxge_get_priv_prop: name %s "
4480 		    "(value %d valstr %s)",
4481 		    pr_name, nxgep->vpd_info.ver, valstr));
4482 
4483 		err = 0;
4484 		goto done;
4485 	}
4486 
4487 	/* port PHY mode */
4488 	if (strcmp(pr_name, "_port_mode") == 0) {
4489 		switch (nxgep->mac.portmode) {
4490 		case PORT_1G_COPPER:
4491 			(void) sprintf(valstr, "1G copper %s",
4492 			    nxgep->hot_swappable_phy ?
4493 			    "[Hot Swappable]" : "");
4494 			break;
4495 		case PORT_1G_FIBER:
4496 			(void) sprintf(valstr, "1G fiber %s",
4497 			    nxgep->hot_swappable_phy ?
4498 			    "[hot swappable]" : "");
4499 			break;
4500 		case PORT_10G_COPPER:
4501 			(void) sprintf(valstr, "10G copper %s",
4502 			    nxgep->hot_swappable_phy ?
4503 			    "[hot swappable]" : "");
4504 			break;
4505 		case PORT_10G_FIBER:
4506 			(void) sprintf(valstr, "10G fiber %s",
4507 			    nxgep->hot_swappable_phy ?
4508 			    "[hot swappable]" : "");
4509 			break;
4510 		case PORT_10G_SERDES:
4511 			(void) sprintf(valstr, "10G serdes %s",
4512 			    nxgep->hot_swappable_phy ?
4513 			    "[hot swappable]" : "");
4514 			break;
4515 		case PORT_1G_SERDES:
4516 			(void) sprintf(valstr, "1G serdes %s",
4517 			    nxgep->hot_swappable_phy ?
4518 			    "[hot swappable]" : "");
4519 			break;
4520 		case PORT_1G_RGMII_FIBER:
4521 			(void) sprintf(valstr, "1G rgmii fiber %s",
4522 			    nxgep->hot_swappable_phy ?
4523 			    "[hot swappable]" : "");
4524 			break;
4525 		case PORT_HSP_MODE:
4526 			(void) sprintf(valstr, "phy not present[hot swappable]");
4527 			break;
4528 		default:
4529 			(void) sprintf(valstr, "unknown %s",
4530 			    nxgep->hot_swappable_phy ?
4531 			    "[hot swappable]" : "");
4532 			break;
4533 		}
4534 
4535 		NXGE_DEBUG_MSG((nxgep, NXGE_CTL,
4536 		    "==> nxge_get_priv_prop: name %s (value %s)",
4537 		    pr_name, valstr));
4538 
4539 		err = 0;
4540 		goto done;
4541 	}
4542 
4543 	/* Hot swappable PHY */
4544 	if (strcmp(pr_name, "_hot_swap_phy") == 0) {
4545 		(void) sprintf(valstr, "%s",
4546 		    nxgep->hot_swappable_phy ?
4547 		    "yes" : "no");
4548 
4549 		NXGE_DEBUG_MSG((nxgep, NXGE_CTL,
4550 		    "==> nxge_get_priv_prop: name %s "
4551 		    "(value %d valstr %s)",
4552 		    pr_name, nxgep->hot_swappable_phy, valstr));
4553 
4554 		err = 0;
4555 		goto done;
4556 	}
4557 
4558 
4559 	/* accept jumbo */
4560 	if (strcmp(pr_name, "_accept_jumbo") == 0) {
4561 		(void) sprintf(valstr, "%d", nxgep->mac.is_jumbo);
4562 		err = 0;
4563 		NXGE_DEBUG_MSG((nxgep, NXGE_CTL,
4564 		    "==> nxge_get_priv_prop: name %s (value %d (%d, %d))",
4565 		    pr_name,
4566 		    (uint32_t)param_arr[param_accept_jumbo].value,
4567 		    nxgep->mac.is_jumbo,
4568 		    nxge_jumbo_enable));
4569 
4570 		goto done;
4571 	}
4572 
4573 	/* Receive Interrupt Blanking Parameters */
4574 	if (strcmp(pr_name, "_rxdma_intr_time") == 0) {
4575 		(void) sprintf(valstr, "%d", nxgep->intr_timeout);
4576 		NXGE_DEBUG_MSG((nxgep, NXGE_CTL,
4577 		    "==> nxge_get_priv_prop: name %s (value %d)",
4578 		    pr_name,
4579 		    (uint32_t)nxgep->intr_timeout));
4580 		err = 0;
4581 		goto done;
4582 	}
4583 
4584 	if (strcmp(pr_name, "_rxdma_intr_pkts") == 0) {
4585 		(void) sprintf(valstr, "%d", nxgep->intr_threshold);
4586 		NXGE_DEBUG_MSG((nxgep, NXGE_CTL,
4587 		    "==> nxge_get_priv_prop: name %s (value %d)",
4588 		    pr_name, (uint32_t)nxgep->intr_threshold));
4589 
4590 		err = 0;
4591 		goto done;
4592 	}
4593 
4594 	/* Classification and Load Distribution Configuration */
4595 	if (strcmp(pr_name, "_class_opt_ipv4_tcp") == 0) {
4596 		err = nxge_dld_get_ip_opt(nxgep,
4597 		    (caddr_t)&param_arr[param_class_opt_ipv4_tcp]);
4598 
4599 		(void) sprintf(valstr, "%x",
4600 		    (int)param_arr[param_class_opt_ipv4_tcp].value);
4601 
4602 		NXGE_DEBUG_MSG((nxgep, NXGE_CTL,
4603 		    "==> nxge_get_priv_prop: %s", valstr));
4604 		goto done;
4605 	}
4606 
4607 	if (strcmp(pr_name, "_class_opt_ipv4_udp") == 0) {
4608 		err = nxge_dld_get_ip_opt(nxgep,
4609 		    (caddr_t)&param_arr[param_class_opt_ipv4_udp]);
4610 
4611 		(void) sprintf(valstr, "%x",
4612 		    (int)param_arr[param_class_opt_ipv4_udp].value);
4613 
4614 		NXGE_DEBUG_MSG((nxgep, NXGE_CTL,
4615 		    "==> nxge_get_priv_prop: %s", valstr));
4616 		goto done;
4617 	}
4618 	if (strcmp(pr_name, "_class_opt_ipv4_ah") == 0) {
4619 		err = nxge_dld_get_ip_opt(nxgep,
4620 		    (caddr_t)&param_arr[param_class_opt_ipv4_ah]);
4621 
4622 		(void) sprintf(valstr, "%x",
4623 		    (int)param_arr[param_class_opt_ipv4_ah].value);
4624 
4625 		NXGE_DEBUG_MSG((nxgep, NXGE_CTL,
4626 		    "==> nxge_get_priv_prop: %s", valstr));
4627 		goto done;
4628 	}
4629 
4630 	if (strcmp(pr_name, "_class_opt_ipv4_sctp") == 0) {
4631 		err = nxge_dld_get_ip_opt(nxgep,
4632 		    (caddr_t)&param_arr[param_class_opt_ipv4_sctp]);
4633 
4634 		(void) printf(valstr, "%x",
4635 		    (int)param_arr[param_class_opt_ipv4_sctp].value);
4636 
4637 		NXGE_DEBUG_MSG((nxgep, NXGE_CTL,
4638 		    "==> nxge_get_priv_prop: %s", valstr));
4639 		goto done;
4640 	}
4641 
4642 	if (strcmp(pr_name, "_class_opt_ipv6_tcp") == 0) {
4643 		err = nxge_dld_get_ip_opt(nxgep,
4644 		    (caddr_t)&param_arr[param_class_opt_ipv6_tcp]);
4645 
4646 		(void) sprintf(valstr, "%x",
4647 		    (int)param_arr[param_class_opt_ipv6_tcp].value);
4648 
4649 		NXGE_DEBUG_MSG((nxgep, NXGE_CTL,
4650 		    "==> nxge_get_priv_prop: %s", valstr));
4651 		err = 0;
4652 		goto done;
4653 	}
4654 
4655 	if (strcmp(pr_name, "_class_opt_ipv6_udp") == 0) {
4656 		err = nxge_dld_get_ip_opt(nxgep,
4657 		    (caddr_t)&param_arr[param_class_opt_ipv6_udp]);
4658 
4659 		(void) sprintf(valstr, "%x",
4660 		    (int)param_arr[param_class_opt_ipv6_udp].value);
4661 
4662 		NXGE_DEBUG_MSG((nxgep, NXGE_CTL,
4663 		    "==> nxge_get_priv_prop: %s", valstr));
4664 		goto done;
4665 	}
4666 
4667 	if (strcmp(pr_name, "_class_opt_ipv6_ah") == 0) {
4668 		err = nxge_dld_get_ip_opt(nxgep,
4669 		    (caddr_t)&param_arr[param_class_opt_ipv6_ah]);
4670 
4671 		(void) sprintf(valstr, "%x",
4672 		    (int)param_arr[param_class_opt_ipv6_ah].value);
4673 
4674 		NXGE_DEBUG_MSG((nxgep, NXGE_CTL,
4675 		    "==> nxge_get_priv_prop: %s", valstr));
4676 		goto done;
4677 	}
4678 
4679 	if (strcmp(pr_name, "_class_opt_ipv6_sctp") == 0) {
4680 		err = nxge_dld_get_ip_opt(nxgep,
4681 		    (caddr_t)&param_arr[param_class_opt_ipv6_sctp]);
4682 
4683 		(void) sprintf(valstr, "%x",
4684 		    (int)param_arr[param_class_opt_ipv6_sctp].value);
4685 
4686 		NXGE_DEBUG_MSG((nxgep, NXGE_CTL,
4687 		    "==> nxge_get_priv_prop: %s", valstr));
4688 		goto done;
4689 	}
4690 
4691 	/* Software LSO */
4692 	if (strcmp(pr_name, "_soft_lso_enable") == 0) {
4693 		(void) sprintf(valstr, "%d", nxgep->soft_lso_enable);
4694 		err = 0;
4695 		NXGE_DEBUG_MSG((nxgep, NXGE_CTL,
4696 		    "==> nxge_get_priv_prop: name %s (value %d)",
4697 		    pr_name, nxgep->soft_lso_enable));
4698 
4699 		goto done;
4700 	}
4701 
4702 done:
4703 	if (err == 0) {
4704 		strsize = (uint_t)strlen(valstr);
4705 		if (pr_valsize < strsize) {
4706 			err = ENOBUFS;
4707 		} else {
4708 			(void) strlcpy(pr_val, valstr, pr_valsize);
4709 		}
4710 	}
4711 
4712 	NXGE_DEBUG_MSG((nxgep, NXGE_CTL,
4713 	    "<== nxge_get_priv_prop: return %d", err));
4714 	return (err);
4715 }
4716 
4717 /*
4718  * Module loading and removing entry points.
4719  */
4720 
4721 static	struct cb_ops 	nxge_cb_ops = {
4722 	nodev,			/* cb_open */
4723 	nodev,			/* cb_close */
4724 	nodev,			/* cb_strategy */
4725 	nodev,			/* cb_print */
4726 	nodev,			/* cb_dump */
4727 	nodev,			/* cb_read */
4728 	nodev,			/* cb_write */
4729 	nodev,			/* cb_ioctl */
4730 	nodev,			/* cb_devmap */
4731 	nodev,			/* cb_mmap */
4732 	nodev,			/* cb_segmap */
4733 	nochpoll,		/* cb_chpoll */
4734 	ddi_prop_op,		/* cb_prop_op */
4735 	NULL,
4736 	D_MP, 			/* cb_flag */
4737 	CB_REV,			/* rev */
4738 	nodev,			/* int (*cb_aread)() */
4739 	nodev			/* int (*cb_awrite)() */
4740 };
4741 
4742 static struct dev_ops nxge_dev_ops = {
4743 	DEVO_REV,		/* devo_rev */
4744 	0,			/* devo_refcnt */
4745 	nulldev,
4746 	nulldev,		/* devo_identify */
4747 	nulldev,		/* devo_probe */
4748 	nxge_attach,		/* devo_attach */
4749 	nxge_detach,		/* devo_detach */
4750 	nodev,			/* devo_reset */
4751 	&nxge_cb_ops,		/* devo_cb_ops */
4752 	(struct bus_ops *)NULL, /* devo_bus_ops	*/
4753 	ddi_power		/* devo_power */
4754 };
4755 
4756 extern	struct	mod_ops	mod_driverops;
4757 
4758 #define	NXGE_DESC_VER		"Sun NIU 10Gb Ethernet"
4759 
4760 /*
4761  * Module linkage information for the kernel.
4762  */
4763 static struct modldrv 	nxge_modldrv = {
4764 	&mod_driverops,
4765 	NXGE_DESC_VER,
4766 	&nxge_dev_ops
4767 };
4768 
4769 static struct modlinkage modlinkage = {
4770 	MODREV_1, (void *) &nxge_modldrv, NULL
4771 };
4772 
4773 int
4774 _init(void)
4775 {
4776 	int		status;
4777 
4778 	NXGE_DEBUG_MSG((NULL, MOD_CTL, "==> _init"));
4779 	mac_init_ops(&nxge_dev_ops, "nxge");
4780 	status = ddi_soft_state_init(&nxge_list, sizeof (nxge_t), 0);
4781 	if (status != 0) {
4782 		NXGE_ERROR_MSG((NULL, NXGE_ERR_CTL,
4783 			"failed to init device soft state"));
4784 		goto _init_exit;
4785 	}
4786 	status = mod_install(&modlinkage);
4787 	if (status != 0) {
4788 		ddi_soft_state_fini(&nxge_list);
4789 		NXGE_ERROR_MSG((NULL, NXGE_ERR_CTL, "Mod install failed"));
4790 		goto _init_exit;
4791 	}
4792 
4793 	MUTEX_INIT(&nxge_common_lock, NULL, MUTEX_DRIVER, NULL);
4794 
4795 _init_exit:
4796 	NXGE_DEBUG_MSG((NULL, MOD_CTL, "_init status = 0x%X", status));
4797 
4798 	return (status);
4799 }
4800 
4801 int
4802 _fini(void)
4803 {
4804 	int		status;
4805 
4806 	NXGE_DEBUG_MSG((NULL, MOD_CTL, "==> _fini"));
4807 
4808 	NXGE_DEBUG_MSG((NULL, MOD_CTL, "==> _fini: mod_remove"));
4809 
4810 	if (nxge_mblks_pending)
4811 		return (EBUSY);
4812 
4813 	status = mod_remove(&modlinkage);
4814 	if (status != DDI_SUCCESS) {
4815 		NXGE_DEBUG_MSG((NULL, MOD_CTL,
4816 			    "Module removal failed 0x%08x",
4817 			    status));
4818 		goto _fini_exit;
4819 	}
4820 
4821 	mac_fini_ops(&nxge_dev_ops);
4822 
4823 	ddi_soft_state_fini(&nxge_list);
4824 
4825 	MUTEX_DESTROY(&nxge_common_lock);
4826 _fini_exit:
4827 	NXGE_DEBUG_MSG((NULL, MOD_CTL, "_fini status = 0x%08x", status));
4828 
4829 	return (status);
4830 }
4831 
4832 int
4833 _info(struct modinfo *modinfop)
4834 {
4835 	int		status;
4836 
4837 	NXGE_DEBUG_MSG((NULL, MOD_CTL, "==> _info"));
4838 	status = mod_info(&modlinkage, modinfop);
4839 	NXGE_DEBUG_MSG((NULL, MOD_CTL, " _info status = 0x%X", status));
4840 
4841 	return (status);
4842 }
4843 
4844 /*ARGSUSED*/
4845 static nxge_status_t
4846 nxge_add_intrs(p_nxge_t nxgep)
4847 {
4848 
4849 	int		intr_types;
4850 	int		type = 0;
4851 	int		ddi_status = DDI_SUCCESS;
4852 	nxge_status_t	status = NXGE_OK;
4853 
4854 	NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_add_intrs"));
4855 
4856 	nxgep->nxge_intr_type.intr_registered = B_FALSE;
4857 	nxgep->nxge_intr_type.intr_enabled = B_FALSE;
4858 	nxgep->nxge_intr_type.msi_intx_cnt = 0;
4859 	nxgep->nxge_intr_type.intr_added = 0;
4860 	nxgep->nxge_intr_type.niu_msi_enable = B_FALSE;
4861 	nxgep->nxge_intr_type.intr_type = 0;
4862 
4863 	if (nxgep->niu_type == N2_NIU) {
4864 		nxgep->nxge_intr_type.niu_msi_enable = B_TRUE;
4865 	} else if (nxge_msi_enable) {
4866 		nxgep->nxge_intr_type.niu_msi_enable = B_TRUE;
4867 	}
4868 
4869 	/* Get the supported interrupt types */
4870 	if ((ddi_status = ddi_intr_get_supported_types(nxgep->dip, &intr_types))
4871 			!= DDI_SUCCESS) {
4872 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "<== nxge_add_intrs: "
4873 			"ddi_intr_get_supported_types failed: status 0x%08x",
4874 			ddi_status));
4875 		return (NXGE_ERROR | NXGE_DDI_FAILED);
4876 	}
4877 	nxgep->nxge_intr_type.intr_types = intr_types;
4878 
4879 	NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_add_intrs: "
4880 		"ddi_intr_get_supported_types: 0x%08x", intr_types));
4881 
4882 	/*
4883 	 * Solaris MSIX is not supported yet. use MSI for now.
4884 	 * nxge_msi_enable (1):
4885 	 *	1 - MSI		2 - MSI-X	others - FIXED
4886 	 */
4887 	switch (nxge_msi_enable) {
4888 	default:
4889 		type = DDI_INTR_TYPE_FIXED;
4890 		NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_add_intrs: "
4891 			"use fixed (intx emulation) type %08x",
4892 			type));
4893 		break;
4894 
4895 	case 2:
4896 		NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_add_intrs: "
4897 			"ddi_intr_get_supported_types: 0x%08x", intr_types));
4898 		if (intr_types & DDI_INTR_TYPE_MSIX) {
4899 			type = DDI_INTR_TYPE_MSIX;
4900 			NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_add_intrs: "
4901 				"ddi_intr_get_supported_types: MSIX 0x%08x",
4902 				type));
4903 		} else if (intr_types & DDI_INTR_TYPE_MSI) {
4904 			type = DDI_INTR_TYPE_MSI;
4905 			NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_add_intrs: "
4906 				"ddi_intr_get_supported_types: MSI 0x%08x",
4907 				type));
4908 		} else if (intr_types & DDI_INTR_TYPE_FIXED) {
4909 			type = DDI_INTR_TYPE_FIXED;
4910 			NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_add_intrs: "
4911 				"ddi_intr_get_supported_types: MSXED0x%08x",
4912 				type));
4913 		}
4914 		break;
4915 
4916 	case 1:
4917 		if (intr_types & DDI_INTR_TYPE_MSI) {
4918 			type = DDI_INTR_TYPE_MSI;
4919 			NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_add_intrs: "
4920 				"ddi_intr_get_supported_types: MSI 0x%08x",
4921 				type));
4922 		} else if (intr_types & DDI_INTR_TYPE_MSIX) {
4923 			type = DDI_INTR_TYPE_MSIX;
4924 			NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_add_intrs: "
4925 				"ddi_intr_get_supported_types: MSIX 0x%08x",
4926 				type));
4927 		} else if (intr_types & DDI_INTR_TYPE_FIXED) {
4928 			type = DDI_INTR_TYPE_FIXED;
4929 			NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_add_intrs: "
4930 				"ddi_intr_get_supported_types: MSXED0x%08x",
4931 				type));
4932 		}
4933 	}
4934 
4935 	nxgep->nxge_intr_type.intr_type = type;
4936 	if ((type == DDI_INTR_TYPE_MSIX || type == DDI_INTR_TYPE_MSI ||
4937 		type == DDI_INTR_TYPE_FIXED) &&
4938 			nxgep->nxge_intr_type.niu_msi_enable) {
4939 		if ((status = nxge_add_intrs_adv(nxgep)) != DDI_SUCCESS) {
4940 			NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
4941 				    " nxge_add_intrs: "
4942 				    " nxge_add_intrs_adv failed: status 0x%08x",
4943 				    status));
4944 			return (status);
4945 		} else {
4946 			NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_add_intrs: "
4947 			"interrupts registered : type %d", type));
4948 			nxgep->nxge_intr_type.intr_registered = B_TRUE;
4949 
4950 			NXGE_DEBUG_MSG((nxgep, DDI_CTL,
4951 				"\nAdded advanced nxge add_intr_adv "
4952 					"intr type 0x%x\n", type));
4953 
4954 			return (status);
4955 		}
4956 	}
4957 
4958 	if (!nxgep->nxge_intr_type.intr_registered) {
4959 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "==> nxge_add_intrs: "
4960 			"failed to register interrupts"));
4961 		return (NXGE_ERROR | NXGE_DDI_FAILED);
4962 	}
4963 
4964 	NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_add_intrs"));
4965 	return (status);
4966 }
4967 
4968 /*ARGSUSED*/
4969 static nxge_status_t
4970 nxge_add_soft_intrs(p_nxge_t nxgep)
4971 {
4972 
4973 	int		ddi_status = DDI_SUCCESS;
4974 	nxge_status_t	status = NXGE_OK;
4975 
4976 	NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_add_soft_intrs"));
4977 
4978 	nxgep->resched_id = NULL;
4979 	nxgep->resched_running = B_FALSE;
4980 	ddi_status = ddi_add_softintr(nxgep->dip, DDI_SOFTINT_LOW,
4981 			&nxgep->resched_id,
4982 		NULL, NULL, nxge_reschedule, (caddr_t)nxgep);
4983 	if (ddi_status != DDI_SUCCESS) {
4984 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "<== nxge_add_soft_intrs: "
4985 			"ddi_add_softintrs failed: status 0x%08x",
4986 			ddi_status));
4987 		return (NXGE_ERROR | NXGE_DDI_FAILED);
4988 	}
4989 
4990 	NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_ddi_add_soft_intrs"));
4991 
4992 	return (status);
4993 }
4994 
4995 static nxge_status_t
4996 nxge_add_intrs_adv(p_nxge_t nxgep)
4997 {
4998 	int		intr_type;
4999 	p_nxge_intr_t	intrp;
5000 
5001 	NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_add_intrs_adv"));
5002 
5003 	intrp = (p_nxge_intr_t)&nxgep->nxge_intr_type;
5004 	intr_type = intrp->intr_type;
5005 	NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_add_intrs_adv: type 0x%x",
5006 		intr_type));
5007 
5008 	switch (intr_type) {
5009 	case DDI_INTR_TYPE_MSI: /* 0x2 */
5010 	case DDI_INTR_TYPE_MSIX: /* 0x4 */
5011 		return (nxge_add_intrs_adv_type(nxgep, intr_type));
5012 
5013 	case DDI_INTR_TYPE_FIXED: /* 0x1 */
5014 		return (nxge_add_intrs_adv_type_fix(nxgep, intr_type));
5015 
5016 	default:
5017 		return (NXGE_ERROR);
5018 	}
5019 }
5020 
5021 
5022 /*ARGSUSED*/
5023 static nxge_status_t
5024 nxge_add_intrs_adv_type(p_nxge_t nxgep, uint32_t int_type)
5025 {
5026 	dev_info_t		*dip = nxgep->dip;
5027 	p_nxge_ldg_t		ldgp;
5028 	p_nxge_intr_t		intrp;
5029 	uint_t			*inthandler;
5030 	void			*arg1, *arg2;
5031 	int			behavior;
5032 	int			nintrs, navail, nrequest;
5033 	int			nactual, nrequired;
5034 	int			inum = 0;
5035 	int			x, y;
5036 	int			ddi_status = DDI_SUCCESS;
5037 	nxge_status_t		status = NXGE_OK;
5038 
5039 	NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_add_intrs_adv_type"));
5040 	intrp = (p_nxge_intr_t)&nxgep->nxge_intr_type;
5041 	intrp->start_inum = 0;
5042 
5043 	ddi_status = ddi_intr_get_nintrs(dip, int_type, &nintrs);
5044 	if ((ddi_status != DDI_SUCCESS) || (nintrs == 0)) {
5045 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
5046 			"ddi_intr_get_nintrs() failed, status: 0x%x%, "
5047 			    "nintrs: %d", ddi_status, nintrs));
5048 		return (NXGE_ERROR | NXGE_DDI_FAILED);
5049 	}
5050 
5051 	ddi_status = ddi_intr_get_navail(dip, int_type, &navail);
5052 	if ((ddi_status != DDI_SUCCESS) || (navail == 0)) {
5053 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
5054 			"ddi_intr_get_navail() failed, status: 0x%x%, "
5055 			    "nintrs: %d", ddi_status, navail));
5056 		return (NXGE_ERROR | NXGE_DDI_FAILED);
5057 	}
5058 
5059 	NXGE_DEBUG_MSG((nxgep, INT_CTL,
5060 		"ddi_intr_get_navail() returned: nintrs %d, navail %d",
5061 		    nintrs, navail));
5062 
5063 	/* PSARC/2007/453 MSI-X interrupt limit override */
5064 	if (int_type == DDI_INTR_TYPE_MSIX) {
5065 		nrequest = nxge_create_msi_property(nxgep);
5066 		if (nrequest < navail) {
5067 			navail = nrequest;
5068 			NXGE_DEBUG_MSG((nxgep, INT_CTL,
5069 			    "nxge_add_intrs_adv_type: nintrs %d "
5070 			    "navail %d (nrequest %d)",
5071 			    nintrs, navail, nrequest));
5072 		}
5073 	}
5074 
5075 	if (int_type == DDI_INTR_TYPE_MSI && !ISP2(navail)) {
5076 		/* MSI must be power of 2 */
5077 		if ((navail & 16) == 16) {
5078 			navail = 16;
5079 		} else if ((navail & 8) == 8) {
5080 			navail = 8;
5081 		} else if ((navail & 4) == 4) {
5082 			navail = 4;
5083 		} else if ((navail & 2) == 2) {
5084 			navail = 2;
5085 		} else {
5086 			navail = 1;
5087 		}
5088 		NXGE_DEBUG_MSG((nxgep, INT_CTL,
5089 			"ddi_intr_get_navail(): (msi power of 2) nintrs %d, "
5090 			"navail %d", nintrs, navail));
5091 	}
5092 
5093 	behavior = ((int_type == DDI_INTR_TYPE_FIXED) ? DDI_INTR_ALLOC_STRICT :
5094 			DDI_INTR_ALLOC_NORMAL);
5095 	intrp->intr_size = navail * sizeof (ddi_intr_handle_t);
5096 	intrp->htable = kmem_alloc(intrp->intr_size, KM_SLEEP);
5097 	ddi_status = ddi_intr_alloc(dip, intrp->htable, int_type, inum,
5098 		    navail, &nactual, behavior);
5099 	if (ddi_status != DDI_SUCCESS || nactual == 0) {
5100 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
5101 				    " ddi_intr_alloc() failed: %d",
5102 				    ddi_status));
5103 		kmem_free(intrp->htable, intrp->intr_size);
5104 		return (NXGE_ERROR | NXGE_DDI_FAILED);
5105 	}
5106 
5107 	if ((ddi_status = ddi_intr_get_pri(intrp->htable[0],
5108 			(uint_t *)&intrp->pri)) != DDI_SUCCESS) {
5109 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
5110 				    " ddi_intr_get_pri() failed: %d",
5111 				    ddi_status));
5112 		/* Free already allocated interrupts */
5113 		for (y = 0; y < nactual; y++) {
5114 			(void) ddi_intr_free(intrp->htable[y]);
5115 		}
5116 
5117 		kmem_free(intrp->htable, intrp->intr_size);
5118 		return (NXGE_ERROR | NXGE_DDI_FAILED);
5119 	}
5120 
5121 	nrequired = 0;
5122 	switch (nxgep->niu_type) {
5123 	default:
5124 		status = nxge_ldgv_init(nxgep, &nactual, &nrequired);
5125 		break;
5126 
5127 	case N2_NIU:
5128 		status = nxge_ldgv_init_n2(nxgep, &nactual, &nrequired);
5129 		break;
5130 	}
5131 
5132 	if (status != NXGE_OK) {
5133 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
5134 			"nxge_add_intrs_adv_typ:nxge_ldgv_init "
5135 			"failed: 0x%x", status));
5136 		/* Free already allocated interrupts */
5137 		for (y = 0; y < nactual; y++) {
5138 			(void) ddi_intr_free(intrp->htable[y]);
5139 		}
5140 
5141 		kmem_free(intrp->htable, intrp->intr_size);
5142 		return (status);
5143 	}
5144 
5145 	ldgp = nxgep->ldgvp->ldgp;
5146 	for (x = 0; x < nrequired; x++, ldgp++) {
5147 		ldgp->vector = (uint8_t)x;
5148 		ldgp->intdata = SID_DATA(ldgp->func, x);
5149 		arg1 = ldgp->ldvp;
5150 		arg2 = nxgep;
5151 		if (ldgp->nldvs == 1) {
5152 			inthandler = (uint_t *)ldgp->ldvp->ldv_intr_handler;
5153 			NXGE_DEBUG_MSG((nxgep, INT_CTL,
5154 				"nxge_add_intrs_adv_type: "
5155 				"arg1 0x%x arg2 0x%x: "
5156 				"1-1 int handler (entry %d intdata 0x%x)\n",
5157 				arg1, arg2,
5158 				x, ldgp->intdata));
5159 		} else if (ldgp->nldvs > 1) {
5160 			inthandler = (uint_t *)ldgp->sys_intr_handler;
5161 			NXGE_DEBUG_MSG((nxgep, INT_CTL,
5162 				"nxge_add_intrs_adv_type: "
5163 				"arg1 0x%x arg2 0x%x: "
5164 				"nldevs %d int handler "
5165 				"(entry %d intdata 0x%x)\n",
5166 				arg1, arg2,
5167 				ldgp->nldvs, x, ldgp->intdata));
5168 		}
5169 
5170 		NXGE_DEBUG_MSG((nxgep, INT_CTL,
5171 			"==> nxge_add_intrs_adv_type: ddi_add_intr(inum) #%d "
5172 			"htable 0x%llx", x, intrp->htable[x]));
5173 
5174 		if ((ddi_status = ddi_intr_add_handler(intrp->htable[x],
5175 			(ddi_intr_handler_t *)inthandler, arg1, arg2))
5176 				!= DDI_SUCCESS) {
5177 			NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
5178 				"==> nxge_add_intrs_adv_type: failed #%d "
5179 				"status 0x%x", x, ddi_status));
5180 			for (y = 0; y < intrp->intr_added; y++) {
5181 				(void) ddi_intr_remove_handler(
5182 						intrp->htable[y]);
5183 			}
5184 			/* Free already allocated intr */
5185 			for (y = 0; y < nactual; y++) {
5186 				(void) ddi_intr_free(intrp->htable[y]);
5187 			}
5188 			kmem_free(intrp->htable, intrp->intr_size);
5189 
5190 			(void) nxge_ldgv_uninit(nxgep);
5191 
5192 			return (NXGE_ERROR | NXGE_DDI_FAILED);
5193 		}
5194 		intrp->intr_added++;
5195 	}
5196 
5197 	intrp->msi_intx_cnt = nactual;
5198 
5199 	NXGE_DEBUG_MSG((nxgep, DDI_CTL,
5200 		"Requested: %d, Allowed: %d msi_intx_cnt %d intr_added %d",
5201 		navail, nactual,
5202 		intrp->msi_intx_cnt,
5203 		intrp->intr_added));
5204 
5205 	(void) ddi_intr_get_cap(intrp->htable[0], &intrp->intr_cap);
5206 
5207 	(void) nxge_intr_ldgv_init(nxgep);
5208 
5209 	NXGE_DEBUG_MSG((nxgep, INT_CTL, "<== nxge_add_intrs_adv_type"));
5210 
5211 	return (status);
5212 }
5213 
5214 /*ARGSUSED*/
5215 static nxge_status_t
5216 nxge_add_intrs_adv_type_fix(p_nxge_t nxgep, uint32_t int_type)
5217 {
5218 	dev_info_t		*dip = nxgep->dip;
5219 	p_nxge_ldg_t		ldgp;
5220 	p_nxge_intr_t		intrp;
5221 	uint_t			*inthandler;
5222 	void			*arg1, *arg2;
5223 	int			behavior;
5224 	int			nintrs, navail;
5225 	int			nactual, nrequired;
5226 	int			inum = 0;
5227 	int			x, y;
5228 	int			ddi_status = DDI_SUCCESS;
5229 	nxge_status_t		status = NXGE_OK;
5230 
5231 	NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_add_intrs_adv_type_fix"));
5232 	intrp = (p_nxge_intr_t)&nxgep->nxge_intr_type;
5233 	intrp->start_inum = 0;
5234 
5235 	ddi_status = ddi_intr_get_nintrs(dip, int_type, &nintrs);
5236 	if ((ddi_status != DDI_SUCCESS) || (nintrs == 0)) {
5237 		NXGE_DEBUG_MSG((nxgep, INT_CTL,
5238 			"ddi_intr_get_nintrs() failed, status: 0x%x%, "
5239 			    "nintrs: %d", status, nintrs));
5240 		return (NXGE_ERROR | NXGE_DDI_FAILED);
5241 	}
5242 
5243 	ddi_status = ddi_intr_get_navail(dip, int_type, &navail);
5244 	if ((ddi_status != DDI_SUCCESS) || (navail == 0)) {
5245 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
5246 			"ddi_intr_get_navail() failed, status: 0x%x%, "
5247 			    "nintrs: %d", ddi_status, navail));
5248 		return (NXGE_ERROR | NXGE_DDI_FAILED);
5249 	}
5250 
5251 	NXGE_DEBUG_MSG((nxgep, INT_CTL,
5252 		"ddi_intr_get_navail() returned: nintrs %d, naavail %d",
5253 		    nintrs, navail));
5254 
5255 	behavior = ((int_type == DDI_INTR_TYPE_FIXED) ? DDI_INTR_ALLOC_STRICT :
5256 			DDI_INTR_ALLOC_NORMAL);
5257 	intrp->intr_size = navail * sizeof (ddi_intr_handle_t);
5258 	intrp->htable = kmem_alloc(intrp->intr_size, KM_SLEEP);
5259 	ddi_status = ddi_intr_alloc(dip, intrp->htable, int_type, inum,
5260 		    navail, &nactual, behavior);
5261 	if (ddi_status != DDI_SUCCESS || nactual == 0) {
5262 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
5263 			    " ddi_intr_alloc() failed: %d",
5264 			    ddi_status));
5265 		kmem_free(intrp->htable, intrp->intr_size);
5266 		return (NXGE_ERROR | NXGE_DDI_FAILED);
5267 	}
5268 
5269 	if ((ddi_status = ddi_intr_get_pri(intrp->htable[0],
5270 			(uint_t *)&intrp->pri)) != DDI_SUCCESS) {
5271 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
5272 				    " ddi_intr_get_pri() failed: %d",
5273 				    ddi_status));
5274 		/* Free already allocated interrupts */
5275 		for (y = 0; y < nactual; y++) {
5276 			(void) ddi_intr_free(intrp->htable[y]);
5277 		}
5278 
5279 		kmem_free(intrp->htable, intrp->intr_size);
5280 		return (NXGE_ERROR | NXGE_DDI_FAILED);
5281 	}
5282 
5283 	nrequired = 0;
5284 	switch (nxgep->niu_type) {
5285 	default:
5286 		status = nxge_ldgv_init(nxgep, &nactual, &nrequired);
5287 		break;
5288 
5289 	case N2_NIU:
5290 		status = nxge_ldgv_init_n2(nxgep, &nactual, &nrequired);
5291 		break;
5292 	}
5293 
5294 	if (status != NXGE_OK) {
5295 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
5296 			"nxge_add_intrs_adv_type_fix:nxge_ldgv_init "
5297 			"failed: 0x%x", status));
5298 		/* Free already allocated interrupts */
5299 		for (y = 0; y < nactual; y++) {
5300 			(void) ddi_intr_free(intrp->htable[y]);
5301 		}
5302 
5303 		kmem_free(intrp->htable, intrp->intr_size);
5304 		return (status);
5305 	}
5306 
5307 	ldgp = nxgep->ldgvp->ldgp;
5308 	for (x = 0; x < nrequired; x++, ldgp++) {
5309 		ldgp->vector = (uint8_t)x;
5310 		if (nxgep->niu_type != N2_NIU) {
5311 			ldgp->intdata = SID_DATA(ldgp->func, x);
5312 		}
5313 
5314 		arg1 = ldgp->ldvp;
5315 		arg2 = nxgep;
5316 		if (ldgp->nldvs == 1) {
5317 			inthandler = (uint_t *)ldgp->ldvp->ldv_intr_handler;
5318 			NXGE_DEBUG_MSG((nxgep, INT_CTL,
5319 				"nxge_add_intrs_adv_type_fix: "
5320 				"1-1 int handler(%d) ldg %d ldv %d "
5321 				"arg1 $%p arg2 $%p\n",
5322 				x, ldgp->ldg, ldgp->ldvp->ldv,
5323 				arg1, arg2));
5324 		} else if (ldgp->nldvs > 1) {
5325 			inthandler = (uint_t *)ldgp->sys_intr_handler;
5326 			NXGE_DEBUG_MSG((nxgep, INT_CTL,
5327 				"nxge_add_intrs_adv_type_fix: "
5328 				"shared ldv %d int handler(%d) ldv %d ldg %d"
5329 				"arg1 0x%016llx arg2 0x%016llx\n",
5330 				x, ldgp->nldvs, ldgp->ldg, ldgp->ldvp->ldv,
5331 				arg1, arg2));
5332 		}
5333 
5334 		if ((ddi_status = ddi_intr_add_handler(intrp->htable[x],
5335 			(ddi_intr_handler_t *)inthandler, arg1, arg2))
5336 				!= DDI_SUCCESS) {
5337 			NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
5338 				"==> nxge_add_intrs_adv_type_fix: failed #%d "
5339 				"status 0x%x", x, ddi_status));
5340 			for (y = 0; y < intrp->intr_added; y++) {
5341 				(void) ddi_intr_remove_handler(
5342 						intrp->htable[y]);
5343 			}
5344 			for (y = 0; y < nactual; y++) {
5345 				(void) ddi_intr_free(intrp->htable[y]);
5346 			}
5347 			/* Free already allocated intr */
5348 			kmem_free(intrp->htable, intrp->intr_size);
5349 
5350 			(void) nxge_ldgv_uninit(nxgep);
5351 
5352 			return (NXGE_ERROR | NXGE_DDI_FAILED);
5353 		}
5354 		intrp->intr_added++;
5355 	}
5356 
5357 	intrp->msi_intx_cnt = nactual;
5358 
5359 	(void) ddi_intr_get_cap(intrp->htable[0], &intrp->intr_cap);
5360 
5361 	status = nxge_intr_ldgv_init(nxgep);
5362 	NXGE_DEBUG_MSG((nxgep, INT_CTL, "<== nxge_add_intrs_adv_type_fix"));
5363 
5364 	return (status);
5365 }
5366 
5367 static void
5368 nxge_remove_intrs(p_nxge_t nxgep)
5369 {
5370 	int		i, inum;
5371 	p_nxge_intr_t	intrp;
5372 
5373 	NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_remove_intrs"));
5374 	intrp = (p_nxge_intr_t)&nxgep->nxge_intr_type;
5375 	if (!intrp->intr_registered) {
5376 		NXGE_DEBUG_MSG((nxgep, INT_CTL,
5377 			"<== nxge_remove_intrs: interrupts not registered"));
5378 		return;
5379 	}
5380 
5381 	NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_remove_intrs:advanced"));
5382 
5383 	if (intrp->intr_cap & DDI_INTR_FLAG_BLOCK) {
5384 		(void) ddi_intr_block_disable(intrp->htable,
5385 			intrp->intr_added);
5386 	} else {
5387 		for (i = 0; i < intrp->intr_added; i++) {
5388 			(void) ddi_intr_disable(intrp->htable[i]);
5389 		}
5390 	}
5391 
5392 	for (inum = 0; inum < intrp->intr_added; inum++) {
5393 		if (intrp->htable[inum]) {
5394 			(void) ddi_intr_remove_handler(intrp->htable[inum]);
5395 		}
5396 	}
5397 
5398 	for (inum = 0; inum < intrp->msi_intx_cnt; inum++) {
5399 		if (intrp->htable[inum]) {
5400 			NXGE_DEBUG_MSG((nxgep, DDI_CTL,
5401 				"nxge_remove_intrs: ddi_intr_free inum %d "
5402 				"msi_intx_cnt %d intr_added %d",
5403 				inum,
5404 				intrp->msi_intx_cnt,
5405 				intrp->intr_added));
5406 
5407 			(void) ddi_intr_free(intrp->htable[inum]);
5408 		}
5409 	}
5410 
5411 	kmem_free(intrp->htable, intrp->intr_size);
5412 	intrp->intr_registered = B_FALSE;
5413 	intrp->intr_enabled = B_FALSE;
5414 	intrp->msi_intx_cnt = 0;
5415 	intrp->intr_added = 0;
5416 
5417 	(void) nxge_ldgv_uninit(nxgep);
5418 
5419 	(void) ddi_prop_remove(DDI_DEV_T_NONE, nxgep->dip,
5420 	    "#msix-request");
5421 
5422 	NXGE_DEBUG_MSG((nxgep, INT_CTL, "<== nxge_remove_intrs"));
5423 }
5424 
5425 /*ARGSUSED*/
5426 static void
5427 nxge_remove_soft_intrs(p_nxge_t nxgep)
5428 {
5429 	NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_remove_soft_intrs"));
5430 	if (nxgep->resched_id) {
5431 		ddi_remove_softintr(nxgep->resched_id);
5432 		NXGE_DEBUG_MSG((nxgep, INT_CTL,
5433 			"==> nxge_remove_soft_intrs: removed"));
5434 		nxgep->resched_id = NULL;
5435 	}
5436 
5437 	NXGE_DEBUG_MSG((nxgep, INT_CTL, "<== nxge_remove_soft_intrs"));
5438 }
5439 
5440 /*ARGSUSED*/
5441 static void
5442 nxge_intrs_enable(p_nxge_t nxgep)
5443 {
5444 	p_nxge_intr_t	intrp;
5445 	int		i;
5446 	int		status;
5447 
5448 	NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_intrs_enable"));
5449 
5450 	intrp = (p_nxge_intr_t)&nxgep->nxge_intr_type;
5451 
5452 	if (!intrp->intr_registered) {
5453 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "<== nxge_intrs_enable: "
5454 			"interrupts are not registered"));
5455 		return;
5456 	}
5457 
5458 	if (intrp->intr_enabled) {
5459 		NXGE_DEBUG_MSG((nxgep, INT_CTL,
5460 			"<== nxge_intrs_enable: already enabled"));
5461 		return;
5462 	}
5463 
5464 	if (intrp->intr_cap & DDI_INTR_FLAG_BLOCK) {
5465 		status = ddi_intr_block_enable(intrp->htable,
5466 			intrp->intr_added);
5467 		NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_intrs_enable "
5468 			"block enable - status 0x%x total inums #%d\n",
5469 			status, intrp->intr_added));
5470 	} else {
5471 		for (i = 0; i < intrp->intr_added; i++) {
5472 			status = ddi_intr_enable(intrp->htable[i]);
5473 			NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_intrs_enable "
5474 				"ddi_intr_enable:enable - status 0x%x "
5475 				"total inums %d enable inum #%d\n",
5476 				status, intrp->intr_added, i));
5477 			if (status == DDI_SUCCESS) {
5478 				intrp->intr_enabled = B_TRUE;
5479 			}
5480 		}
5481 	}
5482 
5483 	NXGE_DEBUG_MSG((nxgep, INT_CTL, "<== nxge_intrs_enable"));
5484 }
5485 
5486 /*ARGSUSED*/
5487 static void
5488 nxge_intrs_disable(p_nxge_t nxgep)
5489 {
5490 	p_nxge_intr_t	intrp;
5491 	int		i;
5492 
5493 	NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_intrs_disable"));
5494 
5495 	intrp = (p_nxge_intr_t)&nxgep->nxge_intr_type;
5496 
5497 	if (!intrp->intr_registered) {
5498 		NXGE_DEBUG_MSG((nxgep, INT_CTL, "<== nxge_intrs_disable: "
5499 			"interrupts are not registered"));
5500 		return;
5501 	}
5502 
5503 	if (intrp->intr_cap & DDI_INTR_FLAG_BLOCK) {
5504 		(void) ddi_intr_block_disable(intrp->htable,
5505 			intrp->intr_added);
5506 	} else {
5507 		for (i = 0; i < intrp->intr_added; i++) {
5508 			(void) ddi_intr_disable(intrp->htable[i]);
5509 		}
5510 	}
5511 
5512 	intrp->intr_enabled = B_FALSE;
5513 	NXGE_DEBUG_MSG((nxgep, INT_CTL, "<== nxge_intrs_disable"));
5514 }
5515 
5516 static nxge_status_t
5517 nxge_mac_register(p_nxge_t nxgep)
5518 {
5519 	mac_register_t *macp;
5520 	int		status;
5521 
5522 	NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_mac_register"));
5523 
5524 	if ((macp = mac_alloc(MAC_VERSION)) == NULL)
5525 		return (NXGE_ERROR);
5526 
5527 	macp->m_type_ident = MAC_PLUGIN_IDENT_ETHER;
5528 	macp->m_driver = nxgep;
5529 	macp->m_dip = nxgep->dip;
5530 	macp->m_src_addr = nxgep->ouraddr.ether_addr_octet;
5531 	macp->m_callbacks = &nxge_m_callbacks;
5532 	macp->m_min_sdu = 0;
5533 	nxgep->mac.default_mtu = nxgep->mac.maxframesize -
5534 	    NXGE_EHEADER_VLAN_CRC;
5535 	macp->m_max_sdu = nxgep->mac.default_mtu;
5536 	macp->m_margin = VLAN_TAGSZ;
5537 
5538 	NXGE_DEBUG_MSG((nxgep, MAC_CTL,
5539 	    "==> nxge_mac_register: instance %d "
5540 	    "max_sdu %d margin %d maxframe %d (header %d)",
5541 	    nxgep->instance,
5542 	    macp->m_max_sdu, macp->m_margin,
5543 	    nxgep->mac.maxframesize,
5544 	    NXGE_EHEADER_VLAN_CRC));
5545 
5546 	status = mac_register(macp, &nxgep->mach);
5547 	mac_free(macp);
5548 
5549 	if (status != 0) {
5550 		cmn_err(CE_WARN,
5551 			"!nxge_mac_register failed (status %d instance %d)",
5552 			status, nxgep->instance);
5553 		return (NXGE_ERROR);
5554 	}
5555 
5556 	NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_mac_register success "
5557 		"(instance %d)", nxgep->instance));
5558 
5559 	return (NXGE_OK);
5560 }
5561 
5562 void
5563 nxge_err_inject(p_nxge_t nxgep, queue_t *wq, mblk_t *mp)
5564 {
5565 	ssize_t		size;
5566 	mblk_t		*nmp;
5567 	uint8_t		blk_id;
5568 	uint8_t		chan;
5569 	uint32_t	err_id;
5570 	err_inject_t	*eip;
5571 
5572 	NXGE_DEBUG_MSG((nxgep, STR_CTL, "==> nxge_err_inject"));
5573 
5574 	size = 1024;
5575 	nmp = mp->b_cont;
5576 	eip = (err_inject_t *)nmp->b_rptr;
5577 	blk_id = eip->blk_id;
5578 	err_id = eip->err_id;
5579 	chan = eip->chan;
5580 	cmn_err(CE_NOTE, "!blk_id = 0x%x\n", blk_id);
5581 	cmn_err(CE_NOTE, "!err_id = 0x%x\n", err_id);
5582 	cmn_err(CE_NOTE, "!chan = 0x%x\n", chan);
5583 	switch (blk_id) {
5584 	case MAC_BLK_ID:
5585 		break;
5586 	case TXMAC_BLK_ID:
5587 		break;
5588 	case RXMAC_BLK_ID:
5589 		break;
5590 	case MIF_BLK_ID:
5591 		break;
5592 	case IPP_BLK_ID:
5593 		nxge_ipp_inject_err(nxgep, err_id);
5594 		break;
5595 	case TXC_BLK_ID:
5596 		nxge_txc_inject_err(nxgep, err_id);
5597 		break;
5598 	case TXDMA_BLK_ID:
5599 		nxge_txdma_inject_err(nxgep, err_id, chan);
5600 		break;
5601 	case RXDMA_BLK_ID:
5602 		nxge_rxdma_inject_err(nxgep, err_id, chan);
5603 		break;
5604 	case ZCP_BLK_ID:
5605 		nxge_zcp_inject_err(nxgep, err_id);
5606 		break;
5607 	case ESPC_BLK_ID:
5608 		break;
5609 	case FFLP_BLK_ID:
5610 		break;
5611 	case PHY_BLK_ID:
5612 		break;
5613 	case ETHER_SERDES_BLK_ID:
5614 		break;
5615 	case PCIE_SERDES_BLK_ID:
5616 		break;
5617 	case VIR_BLK_ID:
5618 		break;
5619 	}
5620 
5621 	nmp->b_wptr = nmp->b_rptr + size;
5622 	NXGE_DEBUG_MSG((nxgep, STR_CTL, "<== nxge_err_inject"));
5623 
5624 	miocack(wq, mp, (int)size, 0);
5625 }
5626 
5627 static int
5628 nxge_init_common_dev(p_nxge_t nxgep)
5629 {
5630 	p_nxge_hw_list_t	hw_p;
5631 	dev_info_t 		*p_dip;
5632 
5633 	NXGE_DEBUG_MSG((nxgep, MOD_CTL, "==> nxge_init_common_device"));
5634 
5635 	p_dip = nxgep->p_dip;
5636 	MUTEX_ENTER(&nxge_common_lock);
5637 	NXGE_DEBUG_MSG((nxgep, MOD_CTL,
5638 		"==> nxge_init_common_dev:func # %d",
5639 			nxgep->function_num));
5640 	/*
5641 	 * Loop through existing per neptune hardware list.
5642 	 */
5643 	for (hw_p = nxge_hw_list; hw_p; hw_p = hw_p->next) {
5644 		NXGE_DEBUG_MSG((nxgep, MOD_CTL,
5645 			"==> nxge_init_common_device:func # %d "
5646 			"hw_p $%p parent dip $%p",
5647 			nxgep->function_num,
5648 			hw_p,
5649 			p_dip));
5650 		if (hw_p->parent_devp == p_dip) {
5651 			nxgep->nxge_hw_p = hw_p;
5652 			hw_p->ndevs++;
5653 			hw_p->nxge_p[nxgep->function_num] = nxgep;
5654 			NXGE_DEBUG_MSG((nxgep, MOD_CTL,
5655 				"==> nxge_init_common_device:func # %d "
5656 				"hw_p $%p parent dip $%p "
5657 				"ndevs %d (found)",
5658 				nxgep->function_num,
5659 				hw_p,
5660 				p_dip,
5661 				hw_p->ndevs));
5662 			break;
5663 		}
5664 	}
5665 
5666 	if (hw_p == NULL) {
5667 		NXGE_DEBUG_MSG((nxgep, MOD_CTL,
5668 			"==> nxge_init_common_device:func # %d "
5669 			"parent dip $%p (new)",
5670 			nxgep->function_num,
5671 			p_dip));
5672 		hw_p = kmem_zalloc(sizeof (nxge_hw_list_t), KM_SLEEP);
5673 		hw_p->parent_devp = p_dip;
5674 		hw_p->magic = NXGE_NEPTUNE_MAGIC;
5675 		nxgep->nxge_hw_p = hw_p;
5676 		hw_p->ndevs++;
5677 		hw_p->nxge_p[nxgep->function_num] = nxgep;
5678 		hw_p->next = nxge_hw_list;
5679 		if (nxgep->niu_type == N2_NIU) {
5680 			hw_p->niu_type = N2_NIU;
5681 			hw_p->platform_type = P_NEPTUNE_NIU;
5682 		} else {
5683 			hw_p->niu_type = NIU_TYPE_NONE;
5684 			hw_p->platform_type = P_NEPTUNE_NONE;
5685 		}
5686 
5687 		MUTEX_INIT(&hw_p->nxge_cfg_lock, NULL, MUTEX_DRIVER, NULL);
5688 		MUTEX_INIT(&hw_p->nxge_tcam_lock, NULL, MUTEX_DRIVER, NULL);
5689 		MUTEX_INIT(&hw_p->nxge_vlan_lock, NULL, MUTEX_DRIVER, NULL);
5690 		MUTEX_INIT(&hw_p->nxge_mdio_lock, NULL, MUTEX_DRIVER, NULL);
5691 
5692 		nxge_hw_list = hw_p;
5693 
5694 		(void) nxge_scan_ports_phy(nxgep, nxge_hw_list);
5695 	}
5696 
5697 	MUTEX_EXIT(&nxge_common_lock);
5698 
5699 	nxgep->platform_type = hw_p->platform_type;
5700 	if (nxgep->niu_type != N2_NIU) {
5701 		nxgep->niu_type = hw_p->niu_type;
5702 	}
5703 
5704 	NXGE_DEBUG_MSG((nxgep, MOD_CTL,
5705 		"==> nxge_init_common_device (nxge_hw_list) $%p",
5706 		nxge_hw_list));
5707 	NXGE_DEBUG_MSG((nxgep, MOD_CTL, "<== nxge_init_common_device"));
5708 
5709 	return (NXGE_OK);
5710 }
5711 
5712 static void
5713 nxge_uninit_common_dev(p_nxge_t nxgep)
5714 {
5715 	p_nxge_hw_list_t	hw_p, h_hw_p;
5716 	dev_info_t 		*p_dip;
5717 
5718 	NXGE_DEBUG_MSG((nxgep, MOD_CTL, "==> nxge_uninit_common_device"));
5719 	if (nxgep->nxge_hw_p == NULL) {
5720 		NXGE_DEBUG_MSG((nxgep, MOD_CTL,
5721 			"<== nxge_uninit_common_device (no common)"));
5722 		return;
5723 	}
5724 
5725 	MUTEX_ENTER(&nxge_common_lock);
5726 	h_hw_p = nxge_hw_list;
5727 	for (hw_p = nxge_hw_list; hw_p; hw_p = hw_p->next) {
5728 		p_dip = hw_p->parent_devp;
5729 		if (nxgep->nxge_hw_p == hw_p &&
5730 			p_dip == nxgep->p_dip &&
5731 			nxgep->nxge_hw_p->magic == NXGE_NEPTUNE_MAGIC &&
5732 			hw_p->magic == NXGE_NEPTUNE_MAGIC) {
5733 
5734 			NXGE_DEBUG_MSG((nxgep, MOD_CTL,
5735 				"==> nxge_uninit_common_device:func # %d "
5736 				"hw_p $%p parent dip $%p "
5737 				"ndevs %d (found)",
5738 				nxgep->function_num,
5739 				hw_p,
5740 				p_dip,
5741 				hw_p->ndevs));
5742 
5743 			nxgep->nxge_hw_p = NULL;
5744 			if (hw_p->ndevs) {
5745 				hw_p->ndevs--;
5746 			}
5747 			hw_p->nxge_p[nxgep->function_num] = NULL;
5748 			if (!hw_p->ndevs) {
5749 				MUTEX_DESTROY(&hw_p->nxge_vlan_lock);
5750 				MUTEX_DESTROY(&hw_p->nxge_tcam_lock);
5751 				MUTEX_DESTROY(&hw_p->nxge_cfg_lock);
5752 				MUTEX_DESTROY(&hw_p->nxge_mdio_lock);
5753 				NXGE_DEBUG_MSG((nxgep, MOD_CTL,
5754 					"==> nxge_uninit_common_device: "
5755 					"func # %d "
5756 					"hw_p $%p parent dip $%p "
5757 					"ndevs %d (last)",
5758 					nxgep->function_num,
5759 					hw_p,
5760 					p_dip,
5761 					hw_p->ndevs));
5762 
5763 				if (hw_p == nxge_hw_list) {
5764 					NXGE_DEBUG_MSG((nxgep, MOD_CTL,
5765 						"==> nxge_uninit_common_device:"
5766 						"remove head func # %d "
5767 						"hw_p $%p parent dip $%p "
5768 						"ndevs %d (head)",
5769 						nxgep->function_num,
5770 						hw_p,
5771 						p_dip,
5772 						hw_p->ndevs));
5773 					nxge_hw_list = hw_p->next;
5774 				} else {
5775 					NXGE_DEBUG_MSG((nxgep, MOD_CTL,
5776 						"==> nxge_uninit_common_device:"
5777 						"remove middle func # %d "
5778 						"hw_p $%p parent dip $%p "
5779 						"ndevs %d (middle)",
5780 						nxgep->function_num,
5781 						hw_p,
5782 						p_dip,
5783 						hw_p->ndevs));
5784 					h_hw_p->next = hw_p->next;
5785 				}
5786 
5787 				KMEM_FREE(hw_p, sizeof (nxge_hw_list_t));
5788 			}
5789 			break;
5790 		} else {
5791 			h_hw_p = hw_p;
5792 		}
5793 	}
5794 
5795 	MUTEX_EXIT(&nxge_common_lock);
5796 	NXGE_DEBUG_MSG((nxgep, MOD_CTL,
5797 		"==> nxge_uninit_common_device (nxge_hw_list) $%p",
5798 		nxge_hw_list));
5799 
5800 	NXGE_DEBUG_MSG((nxgep, MOD_CTL, "<= nxge_uninit_common_device"));
5801 }
5802 
5803 /*
5804  * Determines the number of ports from the niu_type or the platform type.
5805  * Returns the number of ports, or returns zero on failure.
5806  */
5807 
5808 int
5809 nxge_get_nports(p_nxge_t nxgep)
5810 {
5811 	int	nports = 0;
5812 
5813 	switch (nxgep->niu_type) {
5814 	case N2_NIU:
5815 	case NEPTUNE_2_10GF:
5816 		nports = 2;
5817 		break;
5818 	case NEPTUNE_4_1GC:
5819 	case NEPTUNE_2_10GF_2_1GC:
5820 	case NEPTUNE_1_10GF_3_1GC:
5821 	case NEPTUNE_1_1GC_1_10GF_2_1GC:
5822 	case NEPTUNE_2_10GF_2_1GRF:
5823 		nports = 4;
5824 		break;
5825 	default:
5826 		switch (nxgep->platform_type) {
5827 		case P_NEPTUNE_NIU:
5828 		case P_NEPTUNE_ATLAS_2PORT:
5829 			nports = 2;
5830 			break;
5831 		case P_NEPTUNE_ATLAS_4PORT:
5832 		case P_NEPTUNE_MARAMBA_P0:
5833 		case P_NEPTUNE_MARAMBA_P1:
5834 		case P_NEPTUNE_ALONSO:
5835 			nports = 4;
5836 			break;
5837 		default:
5838 			break;
5839 		}
5840 		break;
5841 	}
5842 
5843 	return (nports);
5844 }
5845 
5846 /*
5847  * The following two functions are to support
5848  * PSARC/2007/453 MSI-X interrupt limit override.
5849  */
5850 static int
5851 nxge_create_msi_property(p_nxge_t nxgep)
5852 {
5853 	int	nmsi;
5854 	extern	int ncpus;
5855 
5856 	NXGE_DEBUG_MSG((nxgep, MOD_CTL, "==>nxge_create_msi_property"));
5857 
5858 	switch (nxgep->mac.portmode) {
5859 	case PORT_10G_COPPER:
5860 	case PORT_10G_FIBER:
5861 		(void) ddi_prop_create(DDI_DEV_T_NONE, nxgep->dip,
5862 		    DDI_PROP_CANSLEEP, "#msix-request", NULL, 0);
5863 		/*
5864 		 * The maximum MSI-X requested will be 8.
5865 		 * If the # of CPUs is less than 8, we will reqeust
5866 		 * # MSI-X based on the # of CPUs.
5867 		 */
5868 		if (ncpus >= NXGE_MSIX_REQUEST_10G) {
5869 			nmsi = NXGE_MSIX_REQUEST_10G;
5870 		} else {
5871 			nmsi = ncpus;
5872 		}
5873 		NXGE_DEBUG_MSG((nxgep, MOD_CTL,
5874 		    "==>nxge_create_msi_property(10G): exists 0x%x (nmsi %d)",
5875 		    ddi_prop_exists(DDI_DEV_T_NONE, nxgep->dip,
5876 		    DDI_PROP_CANSLEEP, "#msix-request"), nmsi));
5877 		break;
5878 
5879 	default:
5880 		nmsi = NXGE_MSIX_REQUEST_1G;
5881 		NXGE_DEBUG_MSG((nxgep, MOD_CTL,
5882 		    "==>nxge_create_msi_property(1G): exists 0x%x (nmsi %d)",
5883 		    ddi_prop_exists(DDI_DEV_T_NONE, nxgep->dip,
5884 		    DDI_PROP_CANSLEEP, "#msix-request"), nmsi));
5885 		break;
5886 	}
5887 
5888 	NXGE_DEBUG_MSG((nxgep, MOD_CTL, "<==nxge_create_msi_property"));
5889 	return (nmsi);
5890 }
5891