xref: /illumos-gate/usr/src/uts/common/io/nxge/nxge_main.c (revision eb2bd6624e082e367f66e2b0fdfe54c9b5d493af)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 /*
22  * Copyright 2008 Sun Microsystems, Inc.  All rights reserved.
23  * Use is subject to license terms.
24  */
25 
26 #pragma ident	"%Z%%M%	%I%	%E% SMI"
27 
28 /*
29  * SunOs MT STREAMS NIU/Neptune 10Gb Ethernet Device Driver.
30  */
31 #include	<sys/nxge/nxge_impl.h>
32 #include	<sys/pcie.h>
33 
34 uint32_t 	nxge_use_partition = 0;		/* debug partition flag */
35 uint32_t 	nxge_dma_obp_props_only = 1;	/* use obp published props */
36 uint32_t 	nxge_use_rdc_intr = 1;		/* debug to assign rdc intr */
37 /*
38  * PSARC/2007/453 MSI-X interrupt limit override
39  * (This PSARC case is limited to MSI-X vectors
40  *  and SPARC platforms only).
41  */
42 #if defined(_BIG_ENDIAN)
43 uint32_t	nxge_msi_enable = 2;
44 #else
45 uint32_t	nxge_msi_enable = 1;
46 #endif
47 
48 /*
49  * Globals: tunable parameters (/etc/system or adb)
50  *
51  */
52 uint32_t 	nxge_rbr_size = NXGE_RBR_RBB_DEFAULT;
53 uint32_t 	nxge_rbr_spare_size = 0;
54 uint32_t 	nxge_rcr_size = NXGE_RCR_DEFAULT;
55 uint32_t 	nxge_tx_ring_size = NXGE_TX_RING_DEFAULT;
56 boolean_t 	nxge_no_msg = B_TRUE;		/* control message display */
57 uint32_t 	nxge_no_link_notify = 0;	/* control DL_NOTIFY */
58 uint32_t 	nxge_bcopy_thresh = TX_BCOPY_MAX;
59 uint32_t 	nxge_dvma_thresh = TX_FASTDVMA_MIN;
60 uint32_t 	nxge_dma_stream_thresh = TX_STREAM_MIN;
61 uint32_t	nxge_jumbo_mtu	= TX_JUMBO_MTU;
62 boolean_t	nxge_jumbo_enable = B_FALSE;
63 uint16_t	nxge_rcr_timeout = NXGE_RDC_RCR_TIMEOUT;
64 uint16_t	nxge_rcr_threshold = NXGE_RDC_RCR_THRESHOLD;
65 nxge_tx_mode_t	nxge_tx_scheme = NXGE_USE_SERIAL;
66 
67 /* MAX LSO size */
68 #define		NXGE_LSO_MAXLEN	65535
69 uint32_t	nxge_lso_max = NXGE_LSO_MAXLEN;
70 
71 /*
72  * Debugging flags:
73  *		nxge_no_tx_lb : transmit load balancing
74  *		nxge_tx_lb_policy: 0 - TCP port (default)
75  *				   3 - DEST MAC
76  */
77 uint32_t 	nxge_no_tx_lb = 0;
78 uint32_t 	nxge_tx_lb_policy = NXGE_TX_LB_TCPUDP;
79 
80 /*
81  * Add tunable to reduce the amount of time spent in the
82  * ISR doing Rx Processing.
83  */
84 uint32_t nxge_max_rx_pkts = 1024;
85 
86 /*
87  * Tunables to manage the receive buffer blocks.
88  *
89  * nxge_rx_threshold_hi: copy all buffers.
90  * nxge_rx_bcopy_size_type: receive buffer block size type.
91  * nxge_rx_threshold_lo: copy only up to tunable block size type.
92  */
93 nxge_rxbuf_threshold_t nxge_rx_threshold_hi = NXGE_RX_COPY_6;
94 nxge_rxbuf_type_t nxge_rx_buf_size_type = RCR_PKTBUFSZ_0;
95 nxge_rxbuf_threshold_t nxge_rx_threshold_lo = NXGE_RX_COPY_3;
96 
97 rtrace_t npi_rtracebuf;
98 
99 #if	defined(sun4v)
100 /*
101  * Hypervisor N2/NIU services information.
102  */
103 static hsvc_info_t niu_hsvc = {
104 	HSVC_REV_1, NULL, HSVC_GROUP_NIU, NIU_MAJOR_VER,
105 	NIU_MINOR_VER, "nxge"
106 };
107 #endif
108 
109 /*
110  * Function Prototypes
111  */
112 static int nxge_attach(dev_info_t *, ddi_attach_cmd_t);
113 static int nxge_detach(dev_info_t *, ddi_detach_cmd_t);
114 static void nxge_unattach(p_nxge_t);
115 
116 #if NXGE_PROPERTY
117 static void nxge_remove_hard_properties(p_nxge_t);
118 #endif
119 
120 static nxge_status_t nxge_setup_system_dma_pages(p_nxge_t);
121 
122 static nxge_status_t nxge_setup_mutexes(p_nxge_t);
123 static void nxge_destroy_mutexes(p_nxge_t);
124 
125 static nxge_status_t nxge_map_regs(p_nxge_t nxgep);
126 static void nxge_unmap_regs(p_nxge_t nxgep);
127 #ifdef	NXGE_DEBUG
128 static void nxge_test_map_regs(p_nxge_t nxgep);
129 #endif
130 
131 static nxge_status_t nxge_add_intrs(p_nxge_t nxgep);
132 static nxge_status_t nxge_add_soft_intrs(p_nxge_t nxgep);
133 static void nxge_remove_intrs(p_nxge_t nxgep);
134 static void nxge_remove_soft_intrs(p_nxge_t nxgep);
135 
136 static nxge_status_t nxge_add_intrs_adv(p_nxge_t nxgep);
137 static nxge_status_t nxge_add_intrs_adv_type(p_nxge_t, uint32_t);
138 static nxge_status_t nxge_add_intrs_adv_type_fix(p_nxge_t, uint32_t);
139 static void nxge_intrs_enable(p_nxge_t nxgep);
140 static void nxge_intrs_disable(p_nxge_t nxgep);
141 
142 static void nxge_suspend(p_nxge_t);
143 static nxge_status_t nxge_resume(p_nxge_t);
144 
145 static nxge_status_t nxge_setup_dev(p_nxge_t);
146 static void nxge_destroy_dev(p_nxge_t);
147 
148 static nxge_status_t nxge_alloc_mem_pool(p_nxge_t);
149 static void nxge_free_mem_pool(p_nxge_t);
150 
151 static nxge_status_t nxge_alloc_rx_mem_pool(p_nxge_t);
152 static void nxge_free_rx_mem_pool(p_nxge_t);
153 
154 static nxge_status_t nxge_alloc_tx_mem_pool(p_nxge_t);
155 static void nxge_free_tx_mem_pool(p_nxge_t);
156 
157 static nxge_status_t nxge_dma_mem_alloc(p_nxge_t, dma_method_t,
158 	struct ddi_dma_attr *,
159 	size_t, ddi_device_acc_attr_t *, uint_t,
160 	p_nxge_dma_common_t);
161 
162 static void nxge_dma_mem_free(p_nxge_dma_common_t);
163 
164 static nxge_status_t nxge_alloc_rx_buf_dma(p_nxge_t, uint16_t,
165 	p_nxge_dma_common_t *, size_t, size_t, uint32_t *);
166 static void nxge_free_rx_buf_dma(p_nxge_t, p_nxge_dma_common_t, uint32_t);
167 
168 static nxge_status_t nxge_alloc_rx_cntl_dma(p_nxge_t, uint16_t,
169 	p_nxge_dma_common_t *, size_t);
170 static void nxge_free_rx_cntl_dma(p_nxge_t, p_nxge_dma_common_t);
171 
172 static nxge_status_t nxge_alloc_tx_buf_dma(p_nxge_t, uint16_t,
173 	p_nxge_dma_common_t *, size_t, size_t, uint32_t *);
174 static void nxge_free_tx_buf_dma(p_nxge_t, p_nxge_dma_common_t, uint32_t);
175 
176 static nxge_status_t nxge_alloc_tx_cntl_dma(p_nxge_t, uint16_t,
177 	p_nxge_dma_common_t *,
178 	size_t);
179 static void nxge_free_tx_cntl_dma(p_nxge_t, p_nxge_dma_common_t);
180 
181 static int nxge_init_common_dev(p_nxge_t);
182 static void nxge_uninit_common_dev(p_nxge_t);
183 
184 /*
185  * The next declarations are for the GLDv3 interface.
186  */
187 static int nxge_m_start(void *);
188 static void nxge_m_stop(void *);
189 static int nxge_m_unicst(void *, const uint8_t *);
190 static int nxge_m_multicst(void *, boolean_t, const uint8_t *);
191 static int nxge_m_promisc(void *, boolean_t);
192 static void nxge_m_ioctl(void *, queue_t *, mblk_t *);
193 static void nxge_m_resources(void *);
194 mblk_t *nxge_m_tx(void *arg, mblk_t *);
195 static nxge_status_t nxge_mac_register(p_nxge_t);
196 static int nxge_altmac_set(p_nxge_t nxgep, uint8_t *mac_addr,
197 	mac_addr_slot_t slot);
198 static void nxge_mmac_kstat_update(p_nxge_t nxgep, mac_addr_slot_t slot,
199 	boolean_t factory);
200 static int nxge_m_mmac_add(void *arg, mac_multi_addr_t *maddr);
201 static int nxge_m_mmac_reserve(void *arg, mac_multi_addr_t *maddr);
202 static int nxge_m_mmac_remove(void *arg, mac_addr_slot_t slot);
203 static int nxge_m_mmac_modify(void *arg, mac_multi_addr_t *maddr);
204 static int nxge_m_mmac_get(void *arg, mac_multi_addr_t *maddr);
205 static	boolean_t nxge_m_getcapab(void *, mac_capab_t, void *);
206 static int nxge_m_setprop(void *, const char *, mac_prop_id_t,
207     uint_t, const void *);
208 static int nxge_m_getprop(void *, const char *, mac_prop_id_t,
209     uint_t, void *);
210 static int nxge_set_priv_prop(nxge_t *, const char *, uint_t,
211     const void *);
212 static int nxge_get_priv_prop(nxge_t *, const char *, uint_t,
213     void *);
214 
215 #define	NXGE_M_CALLBACK_FLAGS\
216 	(MC_RESOURCES | MC_IOCTL | MC_GETCAPAB | MC_SETPROP | MC_GETPROP)
217 
218 
219 #define	NXGE_NEPTUNE_MAGIC	0x4E584745UL
220 #define	MAX_DUMP_SZ 256
221 
222 #define	NXGE_M_CALLBACK_FLAGS	\
223 	(MC_RESOURCES | MC_IOCTL | MC_GETCAPAB | MC_SETPROP | MC_GETPROP)
224 
225 static mac_callbacks_t nxge_m_callbacks = {
226 	NXGE_M_CALLBACK_FLAGS,
227 	nxge_m_stat,
228 	nxge_m_start,
229 	nxge_m_stop,
230 	nxge_m_promisc,
231 	nxge_m_multicst,
232 	nxge_m_unicst,
233 	nxge_m_tx,
234 	nxge_m_resources,
235 	nxge_m_ioctl,
236 	nxge_m_getcapab,
237 	NULL,
238 	NULL,
239 	nxge_m_setprop,
240 	nxge_m_getprop
241 };
242 
243 void
244 nxge_err_inject(p_nxge_t, queue_t *, mblk_t *);
245 
246 /* PSARC/2007/453 MSI-X interrupt limit override. */
247 #define	NXGE_MSIX_REQUEST_10G	8
248 #define	NXGE_MSIX_REQUEST_1G	2
249 static int nxge_create_msi_property(p_nxge_t);
250 
251 /*
252  * These global variables control the message
253  * output.
254  */
255 out_dbgmsg_t nxge_dbgmsg_out = DBG_CONSOLE | STR_LOG;
256 uint64_t nxge_debug_level = 0;
257 
258 /*
259  * This list contains the instance structures for the Neptune
260  * devices present in the system. The lock exists to guarantee
261  * mutually exclusive access to the list.
262  */
263 void 			*nxge_list = NULL;
264 
265 void			*nxge_hw_list = NULL;
266 nxge_os_mutex_t 	nxge_common_lock;
267 
268 extern uint64_t 	npi_debug_level;
269 
270 extern nxge_status_t	nxge_ldgv_init(p_nxge_t, int *, int *);
271 extern nxge_status_t	nxge_ldgv_init_n2(p_nxge_t, int *, int *);
272 extern nxge_status_t	nxge_ldgv_uninit(p_nxge_t);
273 extern nxge_status_t	nxge_intr_ldgv_init(p_nxge_t);
274 extern void		nxge_fm_init(p_nxge_t,
275 					ddi_device_acc_attr_t *,
276 					ddi_device_acc_attr_t *,
277 					ddi_dma_attr_t *);
278 extern void		nxge_fm_fini(p_nxge_t);
279 extern npi_status_t	npi_mac_altaddr_disable(npi_handle_t, uint8_t, uint8_t);
280 
281 /*
282  * Count used to maintain the number of buffers being used
283  * by Neptune instances and loaned up to the upper layers.
284  */
285 uint32_t nxge_mblks_pending = 0;
286 
287 /*
288  * Device register access attributes for PIO.
289  */
290 static ddi_device_acc_attr_t nxge_dev_reg_acc_attr = {
291 	DDI_DEVICE_ATTR_V0,
292 	DDI_STRUCTURE_LE_ACC,
293 	DDI_STRICTORDER_ACC,
294 };
295 
296 /*
297  * Device descriptor access attributes for DMA.
298  */
299 static ddi_device_acc_attr_t nxge_dev_desc_dma_acc_attr = {
300 	DDI_DEVICE_ATTR_V0,
301 	DDI_STRUCTURE_LE_ACC,
302 	DDI_STRICTORDER_ACC
303 };
304 
305 /*
306  * Device buffer access attributes for DMA.
307  */
308 static ddi_device_acc_attr_t nxge_dev_buf_dma_acc_attr = {
309 	DDI_DEVICE_ATTR_V0,
310 	DDI_STRUCTURE_BE_ACC,
311 	DDI_STRICTORDER_ACC
312 };
313 
314 ddi_dma_attr_t nxge_desc_dma_attr = {
315 	DMA_ATTR_V0,		/* version number. */
316 	0,			/* low address */
317 	0xffffffffffffffff,	/* high address */
318 	0xffffffffffffffff,	/* address counter max */
319 #ifndef NIU_PA_WORKAROUND
320 	0x100000,		/* alignment */
321 #else
322 	0x2000,
323 #endif
324 	0xfc00fc,		/* dlim_burstsizes */
325 	0x1,			/* minimum transfer size */
326 	0xffffffffffffffff,	/* maximum transfer size */
327 	0xffffffffffffffff,	/* maximum segment size */
328 	1,			/* scatter/gather list length */
329 	(unsigned int) 1,	/* granularity */
330 	0			/* attribute flags */
331 };
332 
333 ddi_dma_attr_t nxge_tx_dma_attr = {
334 	DMA_ATTR_V0,		/* version number. */
335 	0,			/* low address */
336 	0xffffffffffffffff,	/* high address */
337 	0xffffffffffffffff,	/* address counter max */
338 #if defined(_BIG_ENDIAN)
339 	0x2000,			/* alignment */
340 #else
341 	0x1000,			/* alignment */
342 #endif
343 	0xfc00fc,		/* dlim_burstsizes */
344 	0x1,			/* minimum transfer size */
345 	0xffffffffffffffff,	/* maximum transfer size */
346 	0xffffffffffffffff,	/* maximum segment size */
347 	5,			/* scatter/gather list length */
348 	(unsigned int) 1,	/* granularity */
349 	0			/* attribute flags */
350 };
351 
352 ddi_dma_attr_t nxge_rx_dma_attr = {
353 	DMA_ATTR_V0,		/* version number. */
354 	0,			/* low address */
355 	0xffffffffffffffff,	/* high address */
356 	0xffffffffffffffff,	/* address counter max */
357 	0x2000,			/* alignment */
358 	0xfc00fc,		/* dlim_burstsizes */
359 	0x1,			/* minimum transfer size */
360 	0xffffffffffffffff,	/* maximum transfer size */
361 	0xffffffffffffffff,	/* maximum segment size */
362 	1,			/* scatter/gather list length */
363 	(unsigned int) 1,	/* granularity */
364 	DDI_DMA_RELAXED_ORDERING /* attribute flags */
365 };
366 
367 ddi_dma_lim_t nxge_dma_limits = {
368 	(uint_t)0,		/* dlim_addr_lo */
369 	(uint_t)0xffffffff,	/* dlim_addr_hi */
370 	(uint_t)0xffffffff,	/* dlim_cntr_max */
371 	(uint_t)0xfc00fc,	/* dlim_burstsizes for 32 and 64 bit xfers */
372 	0x1,			/* dlim_minxfer */
373 	1024			/* dlim_speed */
374 };
375 
376 dma_method_t nxge_force_dma = DVMA;
377 
378 /*
379  * dma chunk sizes.
380  *
381  * Try to allocate the largest possible size
382  * so that fewer number of dma chunks would be managed
383  */
384 #ifdef NIU_PA_WORKAROUND
385 size_t alloc_sizes [] = {0x2000};
386 #else
387 size_t alloc_sizes [] = {0x1000, 0x2000, 0x4000, 0x8000,
388 		0x10000, 0x20000, 0x40000, 0x80000,
389 		0x100000, 0x200000, 0x400000, 0x800000,
390 		0x1000000, 0x2000000, 0x4000000};
391 #endif
392 
393 /*
394  * Translate "dev_t" to a pointer to the associated "dev_info_t".
395  */
396 
397 static int
398 nxge_attach(dev_info_t *dip, ddi_attach_cmd_t cmd)
399 {
400 	p_nxge_t	nxgep = NULL;
401 	int		instance;
402 	int		status = DDI_SUCCESS;
403 	uint8_t		portn;
404 	nxge_mmac_t	*mmac_info;
405 
406 	NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_attach"));
407 
408 	/*
409 	 * Get the device instance since we'll need to setup
410 	 * or retrieve a soft state for this instance.
411 	 */
412 	instance = ddi_get_instance(dip);
413 
414 	switch (cmd) {
415 	case DDI_ATTACH:
416 		NXGE_DEBUG_MSG((nxgep, DDI_CTL, "doing DDI_ATTACH"));
417 		break;
418 
419 	case DDI_RESUME:
420 		NXGE_DEBUG_MSG((nxgep, DDI_CTL, "doing DDI_RESUME"));
421 		nxgep = (p_nxge_t)ddi_get_soft_state(nxge_list, instance);
422 		if (nxgep == NULL) {
423 			status = DDI_FAILURE;
424 			break;
425 		}
426 		if (nxgep->dip != dip) {
427 			status = DDI_FAILURE;
428 			break;
429 		}
430 		if (nxgep->suspended == DDI_PM_SUSPEND) {
431 			status = ddi_dev_is_needed(nxgep->dip, 0, 1);
432 		} else {
433 			status = nxge_resume(nxgep);
434 		}
435 		goto nxge_attach_exit;
436 
437 	case DDI_PM_RESUME:
438 		NXGE_DEBUG_MSG((nxgep, DDI_CTL, "doing DDI_PM_RESUME"));
439 		nxgep = (p_nxge_t)ddi_get_soft_state(nxge_list, instance);
440 		if (nxgep == NULL) {
441 			status = DDI_FAILURE;
442 			break;
443 		}
444 		if (nxgep->dip != dip) {
445 			status = DDI_FAILURE;
446 			break;
447 		}
448 		status = nxge_resume(nxgep);
449 		goto nxge_attach_exit;
450 
451 	default:
452 		NXGE_DEBUG_MSG((nxgep, DDI_CTL, "doing unknown"));
453 		status = DDI_FAILURE;
454 		goto nxge_attach_exit;
455 	}
456 
457 
458 	if (ddi_soft_state_zalloc(nxge_list, instance) == DDI_FAILURE) {
459 		status = DDI_FAILURE;
460 		goto nxge_attach_exit;
461 	}
462 
463 	nxgep = ddi_get_soft_state(nxge_list, instance);
464 	if (nxgep == NULL) {
465 		status = NXGE_ERROR;
466 		goto nxge_attach_fail2;
467 	}
468 
469 	nxgep->nxge_magic = NXGE_MAGIC;
470 
471 	nxgep->drv_state = 0;
472 	nxgep->dip = dip;
473 	nxgep->instance = instance;
474 	nxgep->p_dip = ddi_get_parent(dip);
475 	nxgep->nxge_debug_level = nxge_debug_level;
476 	npi_debug_level = nxge_debug_level;
477 
478 	nxge_fm_init(nxgep, &nxge_dev_reg_acc_attr, &nxge_dev_desc_dma_acc_attr,
479 				&nxge_rx_dma_attr);
480 
481 	status = nxge_map_regs(nxgep);
482 	if (status != NXGE_OK) {
483 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "nxge_map_regs failed"));
484 		goto nxge_attach_fail3;
485 	}
486 
487 	status = nxge_init_common_dev(nxgep);
488 	if (status != NXGE_OK) {
489 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
490 			"nxge_init_common_dev failed"));
491 		goto nxge_attach_fail4;
492 	}
493 
494 	if (nxgep->niu_type == NEPTUNE_2_10GF) {
495 		if (nxgep->function_num > 1) {
496 			NXGE_DEBUG_MSG((nxgep, DDI_CTL, "Unsupported"
497 			    " function %d. Only functions 0 and 1 are "
498 			    "supported for this card.", nxgep->function_num));
499 			status = NXGE_ERROR;
500 			goto nxge_attach_fail4;
501 		}
502 	}
503 
504 	portn = NXGE_GET_PORT_NUM(nxgep->function_num);
505 	nxgep->mac.portnum = portn;
506 	if ((portn == 0) || (portn == 1))
507 		nxgep->mac.porttype = PORT_TYPE_XMAC;
508 	else
509 		nxgep->mac.porttype = PORT_TYPE_BMAC;
510 	/*
511 	 * Neptune has 4 ports, the first 2 ports use XMAC (10G MAC)
512 	 * internally, the rest 2 ports use BMAC (1G "Big" MAC).
513 	 * The two types of MACs have different characterizations.
514 	 */
515 	mmac_info = &nxgep->nxge_mmac_info;
516 	if (nxgep->function_num < 2) {
517 		mmac_info->num_mmac = XMAC_MAX_ALT_ADDR_ENTRY;
518 		mmac_info->naddrfree = XMAC_MAX_ALT_ADDR_ENTRY;
519 	} else {
520 		mmac_info->num_mmac = BMAC_MAX_ALT_ADDR_ENTRY;
521 		mmac_info->naddrfree = BMAC_MAX_ALT_ADDR_ENTRY;
522 	}
523 	/*
524 	 * Setup the Ndd parameters for the this instance.
525 	 */
526 	nxge_init_param(nxgep);
527 
528 	/*
529 	 * Setup Register Tracing Buffer.
530 	 */
531 	npi_rtrace_buf_init((rtrace_t *)&npi_rtracebuf);
532 
533 	/* init stats ptr */
534 	nxge_init_statsp(nxgep);
535 
536 	/*
537 	 * read the vpd info from the eeprom into local data
538 	 * structure and check for the VPD info validity
539 	 */
540 	nxge_vpd_info_get(nxgep);
541 
542 	status = nxge_xcvr_find(nxgep);
543 
544 	if (status != NXGE_OK) {
545 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "nxge_attach: "
546 				    " Couldn't determine card type"
547 				    " .... exit "));
548 		goto nxge_attach_fail5;
549 	}
550 
551 	status = nxge_get_config_properties(nxgep);
552 
553 	if (status != NXGE_OK) {
554 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "get_hw create failed"));
555 		goto nxge_attach_fail;
556 	}
557 
558 	/*
559 	 * Setup the Kstats for the driver.
560 	 */
561 	nxge_setup_kstats(nxgep);
562 
563 	nxge_setup_param(nxgep);
564 
565 	status = nxge_setup_system_dma_pages(nxgep);
566 	if (status != NXGE_OK) {
567 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "set dma page failed"));
568 		goto nxge_attach_fail;
569 	}
570 
571 #if	defined(sun4v)
572 	if (nxgep->niu_type == N2_NIU) {
573 		nxgep->niu_hsvc_available = B_FALSE;
574 		bcopy(&niu_hsvc, &nxgep->niu_hsvc, sizeof (hsvc_info_t));
575 		if ((status =
576 			hsvc_register(&nxgep->niu_hsvc,
577 					&nxgep->niu_min_ver)) != 0) {
578 				NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
579 					"nxge_attach: "
580 					"%s: cannot negotiate "
581 					"hypervisor services "
582 					"revision %d "
583 					"group: 0x%lx "
584 					"major: 0x%lx minor: 0x%lx "
585 					"errno: %d",
586 					niu_hsvc.hsvc_modname,
587 					niu_hsvc.hsvc_rev,
588 					niu_hsvc.hsvc_group,
589 					niu_hsvc.hsvc_major,
590 					niu_hsvc.hsvc_minor,
591 					status));
592 				status = DDI_FAILURE;
593 				goto nxge_attach_fail;
594 		}
595 
596 		nxgep->niu_hsvc_available = B_TRUE;
597 		NXGE_DEBUG_MSG((nxgep, DDI_CTL,
598 			"NIU Hypervisor service enabled"));
599 	}
600 #endif
601 
602 	nxge_hw_id_init(nxgep);
603 	nxge_hw_init_niu_common(nxgep);
604 
605 	status = nxge_setup_mutexes(nxgep);
606 	if (status != NXGE_OK) {
607 		NXGE_DEBUG_MSG((nxgep, DDI_CTL, "set mutex failed"));
608 		goto nxge_attach_fail;
609 	}
610 
611 	status = nxge_setup_dev(nxgep);
612 	if (status != DDI_SUCCESS) {
613 		NXGE_DEBUG_MSG((nxgep, DDI_CTL, "set dev failed"));
614 		goto nxge_attach_fail;
615 	}
616 
617 	status = nxge_add_intrs(nxgep);
618 	if (status != DDI_SUCCESS) {
619 		NXGE_DEBUG_MSG((nxgep, DDI_CTL, "add_intr failed"));
620 		goto nxge_attach_fail;
621 	}
622 	status = nxge_add_soft_intrs(nxgep);
623 	if (status != DDI_SUCCESS) {
624 		NXGE_DEBUG_MSG((nxgep, NXGE_ERR_CTL, "add_soft_intr failed"));
625 		goto nxge_attach_fail;
626 	}
627 
628 	/*
629 	 * Enable interrupts.
630 	 */
631 	nxge_intrs_enable(nxgep);
632 
633 	if ((status = nxge_mac_register(nxgep)) != NXGE_OK) {
634 		NXGE_DEBUG_MSG((nxgep, DDI_CTL,
635 			"unable to register to mac layer (%d)", status));
636 		goto nxge_attach_fail;
637 	}
638 
639 	mac_link_update(nxgep->mach, LINK_STATE_UNKNOWN);
640 
641 	NXGE_DEBUG_MSG((nxgep, DDI_CTL, "registered to mac (instance %d)",
642 		instance));
643 
644 	(void) nxge_link_monitor(nxgep, LINK_MONITOR_START);
645 
646 	goto nxge_attach_exit;
647 
648 nxge_attach_fail:
649 	nxge_unattach(nxgep);
650 	goto nxge_attach_fail1;
651 
652 nxge_attach_fail5:
653 	/*
654 	 * Tear down the ndd parameters setup.
655 	 */
656 	nxge_destroy_param(nxgep);
657 
658 	/*
659 	 * Tear down the kstat setup.
660 	 */
661 	nxge_destroy_kstats(nxgep);
662 
663 nxge_attach_fail4:
664 	if (nxgep->nxge_hw_p) {
665 		nxge_uninit_common_dev(nxgep);
666 		nxgep->nxge_hw_p = NULL;
667 	}
668 
669 nxge_attach_fail3:
670 	/*
671 	 * Unmap the register setup.
672 	 */
673 	nxge_unmap_regs(nxgep);
674 
675 	nxge_fm_fini(nxgep);
676 
677 nxge_attach_fail2:
678 	ddi_soft_state_free(nxge_list, nxgep->instance);
679 
680 nxge_attach_fail1:
681 	if (status != NXGE_OK)
682 		status = (NXGE_ERROR | NXGE_DDI_FAILED);
683 	nxgep = NULL;
684 
685 nxge_attach_exit:
686 	NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_attach status = 0x%08x",
687 		status));
688 
689 	return (status);
690 }
691 
692 static int
693 nxge_detach(dev_info_t *dip, ddi_detach_cmd_t cmd)
694 {
695 	int 		status = DDI_SUCCESS;
696 	int 		instance;
697 	p_nxge_t 	nxgep = NULL;
698 
699 	NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_detach"));
700 	instance = ddi_get_instance(dip);
701 	nxgep = ddi_get_soft_state(nxge_list, instance);
702 	if (nxgep == NULL) {
703 		status = DDI_FAILURE;
704 		goto nxge_detach_exit;
705 	}
706 
707 	switch (cmd) {
708 	case DDI_DETACH:
709 		NXGE_DEBUG_MSG((nxgep, DDI_CTL, "doing DDI_DETACH"));
710 		break;
711 
712 	case DDI_PM_SUSPEND:
713 		NXGE_DEBUG_MSG((nxgep, DDI_CTL, "doing DDI_PM_SUSPEND"));
714 		nxgep->suspended = DDI_PM_SUSPEND;
715 		nxge_suspend(nxgep);
716 		break;
717 
718 	case DDI_SUSPEND:
719 		NXGE_DEBUG_MSG((nxgep, DDI_CTL, "doing DDI_SUSPEND"));
720 		if (nxgep->suspended != DDI_PM_SUSPEND) {
721 			nxgep->suspended = DDI_SUSPEND;
722 			nxge_suspend(nxgep);
723 		}
724 		break;
725 
726 	default:
727 		status = DDI_FAILURE;
728 	}
729 
730 	if (cmd != DDI_DETACH)
731 		goto nxge_detach_exit;
732 
733 	/*
734 	 * Stop the xcvr polling.
735 	 */
736 	nxgep->suspended = cmd;
737 
738 	(void) nxge_link_monitor(nxgep, LINK_MONITOR_STOP);
739 
740 	if (nxgep->mach && (status = mac_unregister(nxgep->mach)) != 0) {
741 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
742 			"<== nxge_detach status = 0x%08X", status));
743 		return (DDI_FAILURE);
744 	}
745 
746 	NXGE_DEBUG_MSG((nxgep, DDI_CTL,
747 		"<== nxge_detach (mac_unregister) status = 0x%08X", status));
748 
749 	nxge_unattach(nxgep);
750 	nxgep = NULL;
751 
752 nxge_detach_exit:
753 	NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_detach status = 0x%08X",
754 		status));
755 
756 	return (status);
757 }
758 
759 static void
760 nxge_unattach(p_nxge_t nxgep)
761 {
762 	NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_unattach"));
763 
764 	if (nxgep == NULL || nxgep->dev_regs == NULL) {
765 		return;
766 	}
767 
768 	nxgep->nxge_magic = 0;
769 
770 	if (nxgep->nxge_timerid) {
771 		nxge_stop_timer(nxgep, nxgep->nxge_timerid);
772 		nxgep->nxge_timerid = 0;
773 	}
774 
775 	if (nxgep->nxge_hw_p) {
776 		nxge_uninit_common_dev(nxgep);
777 		nxgep->nxge_hw_p = NULL;
778 	}
779 
780 #if	defined(sun4v)
781 	if (nxgep->niu_type == N2_NIU && nxgep->niu_hsvc_available == B_TRUE) {
782 		(void) hsvc_unregister(&nxgep->niu_hsvc);
783 		nxgep->niu_hsvc_available = B_FALSE;
784 	}
785 #endif
786 	/*
787 	 * Stop any further interrupts.
788 	 */
789 	nxge_remove_intrs(nxgep);
790 
791 	/* remove soft interrups */
792 	nxge_remove_soft_intrs(nxgep);
793 
794 	/*
795 	 * Stop the device and free resources.
796 	 */
797 	nxge_destroy_dev(nxgep);
798 
799 	/*
800 	 * Tear down the ndd parameters setup.
801 	 */
802 	nxge_destroy_param(nxgep);
803 
804 	/*
805 	 * Tear down the kstat setup.
806 	 */
807 	nxge_destroy_kstats(nxgep);
808 
809 	/*
810 	 * Destroy all mutexes.
811 	 */
812 	nxge_destroy_mutexes(nxgep);
813 
814 	/*
815 	 * Remove the list of ndd parameters which
816 	 * were setup during attach.
817 	 */
818 	if (nxgep->dip) {
819 		NXGE_DEBUG_MSG((nxgep, OBP_CTL,
820 				    " nxge_unattach: remove all properties"));
821 
822 		(void) ddi_prop_remove_all(nxgep->dip);
823 	}
824 
825 #if NXGE_PROPERTY
826 	nxge_remove_hard_properties(nxgep);
827 #endif
828 
829 	/*
830 	 * Unmap the register setup.
831 	 */
832 	nxge_unmap_regs(nxgep);
833 
834 	nxge_fm_fini(nxgep);
835 
836 	ddi_soft_state_free(nxge_list, nxgep->instance);
837 
838 	NXGE_DEBUG_MSG((NULL, DDI_CTL, "<== nxge_unattach"));
839 }
840 
841 static char n2_siu_name[] = "niu";
842 
843 static nxge_status_t
844 nxge_map_regs(p_nxge_t nxgep)
845 {
846 	int		ddi_status = DDI_SUCCESS;
847 	p_dev_regs_t 	dev_regs;
848 	char		buf[MAXPATHLEN + 1];
849 	char 		*devname;
850 #ifdef	NXGE_DEBUG
851 	char 		*sysname;
852 #endif
853 	off_t		regsize;
854 	nxge_status_t	status = NXGE_OK;
855 #if !defined(_BIG_ENDIAN)
856 	off_t pci_offset;
857 	uint16_t pcie_devctl;
858 #endif
859 
860 	NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_map_regs"));
861 	nxgep->dev_regs = NULL;
862 	dev_regs = KMEM_ZALLOC(sizeof (dev_regs_t), KM_SLEEP);
863 	dev_regs->nxge_regh = NULL;
864 	dev_regs->nxge_pciregh = NULL;
865 	dev_regs->nxge_msix_regh = NULL;
866 	dev_regs->nxge_vir_regh = NULL;
867 	dev_regs->nxge_vir2_regh = NULL;
868 	nxgep->niu_type = NIU_TYPE_NONE;
869 
870 	devname = ddi_pathname(nxgep->dip, buf);
871 	ASSERT(strlen(devname) > 0);
872 	NXGE_DEBUG_MSG((nxgep, DDI_CTL,
873 		"nxge_map_regs: pathname devname %s", devname));
874 
875 	if (strstr(devname, n2_siu_name)) {
876 		/* N2/NIU */
877 		nxgep->niu_type = N2_NIU;
878 		NXGE_DEBUG_MSG((nxgep, DDI_CTL,
879 			"nxge_map_regs: N2/NIU devname %s", devname));
880 		/* get function number */
881 		nxgep->function_num =
882 			(devname[strlen(devname) -1] == '1' ? 1 : 0);
883 		NXGE_DEBUG_MSG((nxgep, DDI_CTL,
884 			"nxge_map_regs: N2/NIU function number %d",
885 			nxgep->function_num));
886 	} else {
887 		int		*prop_val;
888 		uint_t 		prop_len;
889 		uint8_t 	func_num;
890 
891 		if (ddi_prop_lookup_int_array(DDI_DEV_T_ANY, nxgep->dip,
892 				0, "reg",
893 				&prop_val, &prop_len) != DDI_PROP_SUCCESS) {
894 			NXGE_DEBUG_MSG((nxgep, VPD_CTL,
895 				"Reg property not found"));
896 			ddi_status = DDI_FAILURE;
897 			goto nxge_map_regs_fail0;
898 
899 		} else {
900 			func_num = (prop_val[0] >> 8) & 0x7;
901 			NXGE_DEBUG_MSG((nxgep, DDI_CTL,
902 				"Reg property found: fun # %d",
903 				func_num));
904 			nxgep->function_num = func_num;
905 			ddi_prop_free(prop_val);
906 		}
907 	}
908 
909 	switch (nxgep->niu_type) {
910 	default:
911 		(void) ddi_dev_regsize(nxgep->dip, 0, &regsize);
912 		NXGE_DEBUG_MSG((nxgep, DDI_CTL,
913 			"nxge_map_regs: pci config size 0x%x", regsize));
914 
915 		ddi_status = ddi_regs_map_setup(nxgep->dip, 0,
916 			(caddr_t *)&(dev_regs->nxge_pciregp), 0, 0,
917 			&nxge_dev_reg_acc_attr, &dev_regs->nxge_pciregh);
918 		if (ddi_status != DDI_SUCCESS) {
919 			NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
920 				"ddi_map_regs, nxge bus config regs failed"));
921 			goto nxge_map_regs_fail0;
922 		}
923 		NXGE_DEBUG_MSG((nxgep, DDI_CTL,
924 			"nxge_map_reg: PCI config addr 0x%0llx "
925 			" handle 0x%0llx", dev_regs->nxge_pciregp,
926 			dev_regs->nxge_pciregh));
927 			/*
928 			 * IMP IMP
929 			 * workaround  for bit swapping bug in HW
930 			 * which ends up in no-snoop = yes
931 			 * resulting, in DMA not synched properly
932 			 */
933 #if !defined(_BIG_ENDIAN)
934 		/* workarounds for x86 systems */
935 		pci_offset = 0x80 + PCIE_DEVCTL;
936 		pcie_devctl = 0x0;
937 		pcie_devctl &= PCIE_DEVCTL_ENABLE_NO_SNOOP;
938 		pcie_devctl |= PCIE_DEVCTL_RO_EN;
939 		pci_config_put16(dev_regs->nxge_pciregh, pci_offset,
940 				    pcie_devctl);
941 #endif
942 
943 		(void) ddi_dev_regsize(nxgep->dip, 1, &regsize);
944 		NXGE_DEBUG_MSG((nxgep, DDI_CTL,
945 			"nxge_map_regs: pio size 0x%x", regsize));
946 		/* set up the device mapped register */
947 		ddi_status = ddi_regs_map_setup(nxgep->dip, 1,
948 			(caddr_t *)&(dev_regs->nxge_regp), 0, 0,
949 			&nxge_dev_reg_acc_attr, &dev_regs->nxge_regh);
950 		if (ddi_status != DDI_SUCCESS) {
951 			NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
952 				"ddi_map_regs for Neptune global reg failed"));
953 			goto nxge_map_regs_fail1;
954 		}
955 
956 		/* set up the msi/msi-x mapped register */
957 		(void) ddi_dev_regsize(nxgep->dip, 2, &regsize);
958 		NXGE_DEBUG_MSG((nxgep, DDI_CTL,
959 			"nxge_map_regs: msix size 0x%x", regsize));
960 		ddi_status = ddi_regs_map_setup(nxgep->dip, 2,
961 			(caddr_t *)&(dev_regs->nxge_msix_regp), 0, 0,
962 			&nxge_dev_reg_acc_attr, &dev_regs->nxge_msix_regh);
963 		if (ddi_status != DDI_SUCCESS) {
964 			NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
965 				"ddi_map_regs for msi reg failed"));
966 			goto nxge_map_regs_fail2;
967 		}
968 
969 		/* set up the vio region mapped register */
970 		(void) ddi_dev_regsize(nxgep->dip, 3, &regsize);
971 		NXGE_DEBUG_MSG((nxgep, DDI_CTL,
972 			"nxge_map_regs: vio size 0x%x", regsize));
973 		ddi_status = ddi_regs_map_setup(nxgep->dip, 3,
974 			(caddr_t *)&(dev_regs->nxge_vir_regp), 0, 0,
975 			&nxge_dev_reg_acc_attr, &dev_regs->nxge_vir_regh);
976 
977 		if (ddi_status != DDI_SUCCESS) {
978 			NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
979 				"ddi_map_regs for nxge vio reg failed"));
980 			goto nxge_map_regs_fail3;
981 		}
982 		nxgep->dev_regs = dev_regs;
983 
984 		NPI_PCI_ACC_HANDLE_SET(nxgep, dev_regs->nxge_pciregh);
985 		NPI_PCI_ADD_HANDLE_SET(nxgep,
986 			(npi_reg_ptr_t)dev_regs->nxge_pciregp);
987 		NPI_MSI_ACC_HANDLE_SET(nxgep, dev_regs->nxge_msix_regh);
988 		NPI_MSI_ADD_HANDLE_SET(nxgep,
989 			(npi_reg_ptr_t)dev_regs->nxge_msix_regp);
990 
991 		NPI_ACC_HANDLE_SET(nxgep, dev_regs->nxge_regh);
992 		NPI_ADD_HANDLE_SET(nxgep, (npi_reg_ptr_t)dev_regs->nxge_regp);
993 
994 		NPI_REG_ACC_HANDLE_SET(nxgep, dev_regs->nxge_regh);
995 		NPI_REG_ADD_HANDLE_SET(nxgep,
996 			(npi_reg_ptr_t)dev_regs->nxge_regp);
997 
998 		NPI_VREG_ACC_HANDLE_SET(nxgep, dev_regs->nxge_vir_regh);
999 		NPI_VREG_ADD_HANDLE_SET(nxgep,
1000 			(npi_reg_ptr_t)dev_regs->nxge_vir_regp);
1001 
1002 		break;
1003 
1004 	case N2_NIU:
1005 		NXGE_DEBUG_MSG((nxgep, DDI_CTL, "ddi_map_regs, NIU"));
1006 		/*
1007 		 * Set up the device mapped register (FWARC 2006/556)
1008 		 * (changed back to 1: reg starts at 1!)
1009 		 */
1010 		(void) ddi_dev_regsize(nxgep->dip, 1, &regsize);
1011 		NXGE_DEBUG_MSG((nxgep, DDI_CTL,
1012 			"nxge_map_regs: dev size 0x%x", regsize));
1013 		ddi_status = ddi_regs_map_setup(nxgep->dip, 1,
1014 				(caddr_t *)&(dev_regs->nxge_regp), 0, 0,
1015 				&nxge_dev_reg_acc_attr, &dev_regs->nxge_regh);
1016 
1017 		if (ddi_status != DDI_SUCCESS) {
1018 			NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
1019 				"ddi_map_regs for N2/NIU, global reg failed "));
1020 			goto nxge_map_regs_fail1;
1021 		}
1022 
1023 		/* set up the vio region mapped register */
1024 		(void) ddi_dev_regsize(nxgep->dip, 2, &regsize);
1025 		NXGE_DEBUG_MSG((nxgep, DDI_CTL,
1026 			"nxge_map_regs: vio (1) size 0x%x", regsize));
1027 		ddi_status = ddi_regs_map_setup(nxgep->dip, 2,
1028 			(caddr_t *)&(dev_regs->nxge_vir_regp), 0, 0,
1029 			&nxge_dev_reg_acc_attr, &dev_regs->nxge_vir_regh);
1030 
1031 		if (ddi_status != DDI_SUCCESS) {
1032 			NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
1033 				"ddi_map_regs for nxge vio reg failed"));
1034 			goto nxge_map_regs_fail2;
1035 		}
1036 		/* set up the vio region mapped register */
1037 		(void) ddi_dev_regsize(nxgep->dip, 3, &regsize);
1038 		NXGE_DEBUG_MSG((nxgep, DDI_CTL,
1039 			"nxge_map_regs: vio (3) size 0x%x", regsize));
1040 		ddi_status = ddi_regs_map_setup(nxgep->dip, 3,
1041 			(caddr_t *)&(dev_regs->nxge_vir2_regp), 0, 0,
1042 			&nxge_dev_reg_acc_attr, &dev_regs->nxge_vir2_regh);
1043 
1044 		if (ddi_status != DDI_SUCCESS) {
1045 			NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
1046 				"ddi_map_regs for nxge vio2 reg failed"));
1047 			goto nxge_map_regs_fail3;
1048 		}
1049 		nxgep->dev_regs = dev_regs;
1050 
1051 		NPI_ACC_HANDLE_SET(nxgep, dev_regs->nxge_regh);
1052 		NPI_ADD_HANDLE_SET(nxgep, (npi_reg_ptr_t)dev_regs->nxge_regp);
1053 
1054 		NPI_REG_ACC_HANDLE_SET(nxgep, dev_regs->nxge_regh);
1055 		NPI_REG_ADD_HANDLE_SET(nxgep,
1056 			(npi_reg_ptr_t)dev_regs->nxge_regp);
1057 
1058 		NPI_VREG_ACC_HANDLE_SET(nxgep, dev_regs->nxge_vir_regh);
1059 		NPI_VREG_ADD_HANDLE_SET(nxgep,
1060 			(npi_reg_ptr_t)dev_regs->nxge_vir_regp);
1061 
1062 		NPI_V2REG_ACC_HANDLE_SET(nxgep, dev_regs->nxge_vir2_regh);
1063 		NPI_V2REG_ADD_HANDLE_SET(nxgep,
1064 			(npi_reg_ptr_t)dev_regs->nxge_vir2_regp);
1065 
1066 		break;
1067 	}
1068 
1069 	NXGE_DEBUG_MSG((nxgep, DDI_CTL, "nxge_map_reg: hardware addr 0x%0llx "
1070 		" handle 0x%0llx", dev_regs->nxge_regp, dev_regs->nxge_regh));
1071 
1072 	goto nxge_map_regs_exit;
1073 nxge_map_regs_fail3:
1074 	if (dev_regs->nxge_msix_regh) {
1075 		ddi_regs_map_free(&dev_regs->nxge_msix_regh);
1076 	}
1077 	if (dev_regs->nxge_vir_regh) {
1078 		ddi_regs_map_free(&dev_regs->nxge_regh);
1079 	}
1080 nxge_map_regs_fail2:
1081 	if (dev_regs->nxge_regh) {
1082 		ddi_regs_map_free(&dev_regs->nxge_regh);
1083 	}
1084 nxge_map_regs_fail1:
1085 	if (dev_regs->nxge_pciregh) {
1086 		ddi_regs_map_free(&dev_regs->nxge_pciregh);
1087 	}
1088 nxge_map_regs_fail0:
1089 	NXGE_DEBUG_MSG((nxgep, DDI_CTL, "Freeing register set memory"));
1090 	kmem_free(dev_regs, sizeof (dev_regs_t));
1091 
1092 nxge_map_regs_exit:
1093 	if (ddi_status != DDI_SUCCESS)
1094 		status |= (NXGE_ERROR | NXGE_DDI_FAILED);
1095 	NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_map_regs"));
1096 	return (status);
1097 }
1098 
1099 static void
1100 nxge_unmap_regs(p_nxge_t nxgep)
1101 {
1102 	NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_unmap_regs"));
1103 	if (nxgep->dev_regs) {
1104 		if (nxgep->dev_regs->nxge_pciregh) {
1105 			NXGE_DEBUG_MSG((nxgep, DDI_CTL,
1106 				"==> nxge_unmap_regs: bus"));
1107 			ddi_regs_map_free(&nxgep->dev_regs->nxge_pciregh);
1108 			nxgep->dev_regs->nxge_pciregh = NULL;
1109 		}
1110 		if (nxgep->dev_regs->nxge_regh) {
1111 			NXGE_DEBUG_MSG((nxgep, DDI_CTL,
1112 				"==> nxge_unmap_regs: device registers"));
1113 			ddi_regs_map_free(&nxgep->dev_regs->nxge_regh);
1114 			nxgep->dev_regs->nxge_regh = NULL;
1115 		}
1116 		if (nxgep->dev_regs->nxge_msix_regh) {
1117 			NXGE_DEBUG_MSG((nxgep, DDI_CTL,
1118 				"==> nxge_unmap_regs: device interrupts"));
1119 			ddi_regs_map_free(&nxgep->dev_regs->nxge_msix_regh);
1120 			nxgep->dev_regs->nxge_msix_regh = NULL;
1121 		}
1122 		if (nxgep->dev_regs->nxge_vir_regh) {
1123 			NXGE_DEBUG_MSG((nxgep, DDI_CTL,
1124 				"==> nxge_unmap_regs: vio region"));
1125 			ddi_regs_map_free(&nxgep->dev_regs->nxge_vir_regh);
1126 			nxgep->dev_regs->nxge_vir_regh = NULL;
1127 		}
1128 		if (nxgep->dev_regs->nxge_vir2_regh) {
1129 			NXGE_DEBUG_MSG((nxgep, DDI_CTL,
1130 				"==> nxge_unmap_regs: vio2 region"));
1131 			ddi_regs_map_free(&nxgep->dev_regs->nxge_vir2_regh);
1132 			nxgep->dev_regs->nxge_vir2_regh = NULL;
1133 		}
1134 
1135 		kmem_free(nxgep->dev_regs, sizeof (dev_regs_t));
1136 		nxgep->dev_regs = NULL;
1137 	}
1138 
1139 	NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_unmap_regs"));
1140 }
1141 
1142 static nxge_status_t
1143 nxge_setup_mutexes(p_nxge_t nxgep)
1144 {
1145 	int ddi_status = DDI_SUCCESS;
1146 	nxge_status_t status = NXGE_OK;
1147 	nxge_classify_t *classify_ptr;
1148 	int partition;
1149 
1150 	NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_setup_mutexes"));
1151 
1152 	/*
1153 	 * Get the interrupt cookie so the mutexes can be
1154 	 * Initialized.
1155 	 */
1156 	ddi_status = ddi_get_iblock_cookie(nxgep->dip, 0,
1157 					&nxgep->interrupt_cookie);
1158 	if (ddi_status != DDI_SUCCESS) {
1159 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
1160 			"<== nxge_setup_mutexes: failed 0x%x", ddi_status));
1161 		goto nxge_setup_mutexes_exit;
1162 	}
1163 
1164 	cv_init(&nxgep->poll_cv, NULL, CV_DRIVER, NULL);
1165 	MUTEX_INIT(&nxgep->poll_lock, NULL,
1166 	    MUTEX_DRIVER, (void *)nxgep->interrupt_cookie);
1167 
1168 	/*
1169 	 * Initialize mutexes for this device.
1170 	 */
1171 	MUTEX_INIT(nxgep->genlock, NULL,
1172 		MUTEX_DRIVER, (void *)nxgep->interrupt_cookie);
1173 	MUTEX_INIT(&nxgep->ouraddr_lock, NULL,
1174 		MUTEX_DRIVER, (void *)nxgep->interrupt_cookie);
1175 	MUTEX_INIT(&nxgep->mif_lock, NULL,
1176 		MUTEX_DRIVER, (void *)nxgep->interrupt_cookie);
1177 	RW_INIT(&nxgep->filter_lock, NULL,
1178 		RW_DRIVER, (void *)nxgep->interrupt_cookie);
1179 
1180 	classify_ptr = &nxgep->classifier;
1181 		/*
1182 		 * FFLP Mutexes are never used in interrupt context
1183 		 * as fflp operation can take very long time to
1184 		 * complete and hence not suitable to invoke from interrupt
1185 		 * handlers.
1186 		 */
1187 	MUTEX_INIT(&classify_ptr->tcam_lock, NULL,
1188 	    NXGE_MUTEX_DRIVER, (void *)nxgep->interrupt_cookie);
1189 	if (NXGE_IS_VALID_NEPTUNE_TYPE(nxgep)) {
1190 		MUTEX_INIT(&classify_ptr->fcram_lock, NULL,
1191 		    NXGE_MUTEX_DRIVER, (void *)nxgep->interrupt_cookie);
1192 		for (partition = 0; partition < MAX_PARTITION; partition++) {
1193 			MUTEX_INIT(&classify_ptr->hash_lock[partition], NULL,
1194 			    NXGE_MUTEX_DRIVER, (void *)nxgep->interrupt_cookie);
1195 		}
1196 	}
1197 
1198 nxge_setup_mutexes_exit:
1199 	NXGE_DEBUG_MSG((nxgep, DDI_CTL,
1200 	    "<== nxge_setup_mutexes status = %x", status));
1201 
1202 	if (ddi_status != DDI_SUCCESS)
1203 		status |= (NXGE_ERROR | NXGE_DDI_FAILED);
1204 
1205 	return (status);
1206 }
1207 
1208 static void
1209 nxge_destroy_mutexes(p_nxge_t nxgep)
1210 {
1211 	int partition;
1212 	nxge_classify_t *classify_ptr;
1213 
1214 	NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_destroy_mutexes"));
1215 	RW_DESTROY(&nxgep->filter_lock);
1216 	MUTEX_DESTROY(&nxgep->mif_lock);
1217 	MUTEX_DESTROY(&nxgep->ouraddr_lock);
1218 	MUTEX_DESTROY(nxgep->genlock);
1219 
1220 	classify_ptr = &nxgep->classifier;
1221 	MUTEX_DESTROY(&classify_ptr->tcam_lock);
1222 
1223 	/* Destroy all polling resources. */
1224 	MUTEX_DESTROY(&nxgep->poll_lock);
1225 	cv_destroy(&nxgep->poll_cv);
1226 
1227 	/* free data structures, based on HW type */
1228 	if (NXGE_IS_VALID_NEPTUNE_TYPE(nxgep)) {
1229 		MUTEX_DESTROY(&classify_ptr->fcram_lock);
1230 		for (partition = 0; partition < MAX_PARTITION; partition++) {
1231 			MUTEX_DESTROY(&classify_ptr->hash_lock[partition]);
1232 		}
1233 	}
1234 
1235 	NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_destroy_mutexes"));
1236 }
1237 
1238 nxge_status_t
1239 nxge_init(p_nxge_t nxgep)
1240 {
1241 	nxge_status_t	status = NXGE_OK;
1242 
1243 	NXGE_DEBUG_MSG((nxgep, STR_CTL, "==> nxge_init"));
1244 
1245 	if (nxgep->drv_state & STATE_HW_INITIALIZED) {
1246 		return (status);
1247 	}
1248 
1249 	/*
1250 	 * Allocate system memory for the receive/transmit buffer blocks
1251 	 * and receive/transmit descriptor rings.
1252 	 */
1253 	status = nxge_alloc_mem_pool(nxgep);
1254 	if (status != NXGE_OK) {
1255 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "alloc mem failed\n"));
1256 		goto nxge_init_fail1;
1257 	}
1258 
1259 	/*
1260 	 * Initialize and enable TXC registers
1261 	 * (Globally enable TX controller,
1262 	 *  enable a port, configure dma channel bitmap,
1263 	 *  configure the max burst size).
1264 	 */
1265 	status = nxge_txc_init(nxgep);
1266 	if (status != NXGE_OK) {
1267 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "init txc failed\n"));
1268 		goto nxge_init_fail2;
1269 	}
1270 
1271 	/*
1272 	 * Initialize and enable TXDMA channels.
1273 	 */
1274 	status = nxge_init_txdma_channels(nxgep);
1275 	if (status != NXGE_OK) {
1276 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "init txdma failed\n"));
1277 		goto nxge_init_fail3;
1278 	}
1279 
1280 	/*
1281 	 * Initialize and enable RXDMA channels.
1282 	 */
1283 	status = nxge_init_rxdma_channels(nxgep);
1284 	if (status != NXGE_OK) {
1285 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "init rxdma failed\n"));
1286 		goto nxge_init_fail4;
1287 	}
1288 
1289 	/*
1290 	 * Initialize TCAM and FCRAM (Neptune).
1291 	 */
1292 	status = nxge_classify_init(nxgep);
1293 	if (status != NXGE_OK) {
1294 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "init classify failed\n"));
1295 		goto nxge_init_fail5;
1296 	}
1297 
1298 	/*
1299 	 * Initialize ZCP
1300 	 */
1301 	status = nxge_zcp_init(nxgep);
1302 	if (status != NXGE_OK) {
1303 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "init ZCP failed\n"));
1304 		goto nxge_init_fail5;
1305 	}
1306 
1307 	/*
1308 	 * Initialize IPP.
1309 	 */
1310 	status = nxge_ipp_init(nxgep);
1311 	if (status != NXGE_OK) {
1312 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "init IPP failed\n"));
1313 		goto nxge_init_fail5;
1314 	}
1315 
1316 	/*
1317 	 * Initialize the MAC block.
1318 	 */
1319 	status = nxge_mac_init(nxgep);
1320 	if (status != NXGE_OK) {
1321 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "init MAC failed\n"));
1322 		goto nxge_init_fail5;
1323 	}
1324 
1325 	nxge_intrs_enable(nxgep);
1326 
1327 	/*
1328 	 * Enable hardware interrupts.
1329 	 */
1330 	nxge_intr_hw_enable(nxgep);
1331 	nxgep->drv_state |= STATE_HW_INITIALIZED;
1332 
1333 	goto nxge_init_exit;
1334 
1335 nxge_init_fail5:
1336 	nxge_uninit_rxdma_channels(nxgep);
1337 nxge_init_fail4:
1338 	nxge_uninit_txdma_channels(nxgep);
1339 nxge_init_fail3:
1340 	(void) nxge_txc_uninit(nxgep);
1341 nxge_init_fail2:
1342 	nxge_free_mem_pool(nxgep);
1343 nxge_init_fail1:
1344 	NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
1345 		"<== nxge_init status (failed) = 0x%08x", status));
1346 	return (status);
1347 
1348 nxge_init_exit:
1349 
1350 	NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_init status = 0x%08x",
1351 		status));
1352 	return (status);
1353 }
1354 
1355 
1356 timeout_id_t
1357 nxge_start_timer(p_nxge_t nxgep, fptrv_t func, int msec)
1358 {
1359 	if ((nxgep->suspended == 0) ||
1360 			(nxgep->suspended == DDI_RESUME)) {
1361 		return (timeout(func, (caddr_t)nxgep,
1362 			drv_usectohz(1000 * msec)));
1363 	}
1364 	return (NULL);
1365 }
1366 
1367 /*ARGSUSED*/
1368 void
1369 nxge_stop_timer(p_nxge_t nxgep, timeout_id_t timerid)
1370 {
1371 	if (timerid) {
1372 		(void) untimeout(timerid);
1373 	}
1374 }
1375 
1376 void
1377 nxge_uninit(p_nxge_t nxgep)
1378 {
1379 	NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_uninit"));
1380 
1381 	if (!(nxgep->drv_state & STATE_HW_INITIALIZED)) {
1382 		NXGE_DEBUG_MSG((nxgep, DDI_CTL,
1383 			"==> nxge_uninit: not initialized"));
1384 		NXGE_DEBUG_MSG((nxgep, DDI_CTL,
1385 			"<== nxge_uninit"));
1386 		return;
1387 	}
1388 
1389 	/* stop timer */
1390 	if (nxgep->nxge_timerid) {
1391 		nxge_stop_timer(nxgep, nxgep->nxge_timerid);
1392 		nxgep->nxge_timerid = 0;
1393 	}
1394 
1395 	(void) nxge_link_monitor(nxgep, LINK_MONITOR_STOP);
1396 	(void) nxge_intr_hw_disable(nxgep);
1397 
1398 	/*
1399 	 * Reset the receive MAC side.
1400 	 */
1401 	(void) nxge_rx_mac_disable(nxgep);
1402 
1403 	/* Disable and soft reset the IPP */
1404 	(void) nxge_ipp_disable(nxgep);
1405 
1406 	/* Free classification resources */
1407 	(void) nxge_classify_uninit(nxgep);
1408 
1409 	/*
1410 	 * Reset the transmit/receive DMA side.
1411 	 */
1412 	(void) nxge_txdma_hw_mode(nxgep, NXGE_DMA_STOP);
1413 	(void) nxge_rxdma_hw_mode(nxgep, NXGE_DMA_STOP);
1414 
1415 	nxge_uninit_txdma_channels(nxgep);
1416 	nxge_uninit_rxdma_channels(nxgep);
1417 
1418 	/*
1419 	 * Reset the transmit MAC side.
1420 	 */
1421 	(void) nxge_tx_mac_disable(nxgep);
1422 
1423 	nxge_free_mem_pool(nxgep);
1424 
1425 	(void) nxge_link_monitor(nxgep, LINK_MONITOR_START);
1426 
1427 	nxgep->drv_state &= ~STATE_HW_INITIALIZED;
1428 
1429 	NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_uninit: "
1430 		"nxge_mblks_pending %d", nxge_mblks_pending));
1431 }
1432 
1433 void
1434 nxge_get64(p_nxge_t nxgep, p_mblk_t mp)
1435 {
1436 #if defined(__i386)
1437 	size_t		reg;
1438 #else
1439 	uint64_t	reg;
1440 #endif
1441 	uint64_t	regdata;
1442 	int		i, retry;
1443 
1444 	bcopy((char *)mp->b_rptr, (char *)&reg, sizeof (uint64_t));
1445 	regdata = 0;
1446 	retry = 1;
1447 
1448 	for (i = 0; i < retry; i++) {
1449 		NXGE_REG_RD64(nxgep->npi_handle, reg, &regdata);
1450 	}
1451 	bcopy((char *)&regdata, (char *)mp->b_rptr, sizeof (uint64_t));
1452 }
1453 
1454 void
1455 nxge_put64(p_nxge_t nxgep, p_mblk_t mp)
1456 {
1457 #if defined(__i386)
1458 	size_t		reg;
1459 #else
1460 	uint64_t	reg;
1461 #endif
1462 	uint64_t	buf[2];
1463 
1464 	bcopy((char *)mp->b_rptr, (char *)&buf[0], 2 * sizeof (uint64_t));
1465 #if defined(__i386)
1466 	reg = (size_t)buf[0];
1467 #else
1468 	reg = buf[0];
1469 #endif
1470 
1471 	NXGE_NPI_PIO_WRITE64(nxgep->npi_handle, reg, buf[1]);
1472 }
1473 
1474 
1475 nxge_os_mutex_t nxgedebuglock;
1476 int nxge_debug_init = 0;
1477 
1478 /*ARGSUSED*/
1479 /*VARARGS*/
1480 void
1481 nxge_debug_msg(p_nxge_t nxgep, uint64_t level, char *fmt, ...)
1482 {
1483 	char msg_buffer[1048];
1484 	char prefix_buffer[32];
1485 	int instance;
1486 	uint64_t debug_level;
1487 	int cmn_level = CE_CONT;
1488 	va_list ap;
1489 
1490 	debug_level = (nxgep == NULL) ? nxge_debug_level :
1491 		nxgep->nxge_debug_level;
1492 
1493 	if ((level & debug_level) ||
1494 		(level == NXGE_NOTE) ||
1495 		(level == NXGE_ERR_CTL)) {
1496 		/* do the msg processing */
1497 		if (nxge_debug_init == 0) {
1498 			MUTEX_INIT(&nxgedebuglock, NULL, MUTEX_DRIVER, NULL);
1499 			nxge_debug_init = 1;
1500 		}
1501 
1502 		MUTEX_ENTER(&nxgedebuglock);
1503 
1504 		if ((level & NXGE_NOTE)) {
1505 			cmn_level = CE_NOTE;
1506 		}
1507 
1508 		if (level & NXGE_ERR_CTL) {
1509 			cmn_level = CE_WARN;
1510 		}
1511 
1512 		va_start(ap, fmt);
1513 		(void) vsprintf(msg_buffer, fmt, ap);
1514 		va_end(ap);
1515 		if (nxgep == NULL) {
1516 			instance = -1;
1517 			(void) sprintf(prefix_buffer, "%s :", "nxge");
1518 		} else {
1519 			instance = nxgep->instance;
1520 			(void) sprintf(prefix_buffer,
1521 						    "%s%d :", "nxge", instance);
1522 		}
1523 
1524 		MUTEX_EXIT(&nxgedebuglock);
1525 		cmn_err(cmn_level, "!%s %s\n",
1526 				prefix_buffer, msg_buffer);
1527 
1528 	}
1529 }
1530 
1531 char *
1532 nxge_dump_packet(char *addr, int size)
1533 {
1534 	uchar_t *ap = (uchar_t *)addr;
1535 	int i;
1536 	static char etherbuf[1024];
1537 	char *cp = etherbuf;
1538 	char digits[] = "0123456789abcdef";
1539 
1540 	if (!size)
1541 		size = 60;
1542 
1543 	if (size > MAX_DUMP_SZ) {
1544 		/* Dump the leading bytes */
1545 		for (i = 0; i < MAX_DUMP_SZ/2; i++) {
1546 			if (*ap > 0x0f)
1547 				*cp++ = digits[*ap >> 4];
1548 			*cp++ = digits[*ap++ & 0xf];
1549 			*cp++ = ':';
1550 		}
1551 		for (i = 0; i < 20; i++)
1552 			*cp++ = '.';
1553 		/* Dump the last MAX_DUMP_SZ/2 bytes */
1554 		ap = (uchar_t *)(addr + (size - MAX_DUMP_SZ/2));
1555 		for (i = 0; i < MAX_DUMP_SZ/2; i++) {
1556 			if (*ap > 0x0f)
1557 				*cp++ = digits[*ap >> 4];
1558 			*cp++ = digits[*ap++ & 0xf];
1559 			*cp++ = ':';
1560 		}
1561 	} else {
1562 		for (i = 0; i < size; i++) {
1563 			if (*ap > 0x0f)
1564 				*cp++ = digits[*ap >> 4];
1565 			*cp++ = digits[*ap++ & 0xf];
1566 			*cp++ = ':';
1567 		}
1568 	}
1569 	*--cp = 0;
1570 	return (etherbuf);
1571 }
1572 
1573 #ifdef	NXGE_DEBUG
1574 static void
1575 nxge_test_map_regs(p_nxge_t nxgep)
1576 {
1577 	ddi_acc_handle_t cfg_handle;
1578 	p_pci_cfg_t	cfg_ptr;
1579 	ddi_acc_handle_t dev_handle;
1580 	char		*dev_ptr;
1581 	ddi_acc_handle_t pci_config_handle;
1582 	uint32_t	regval;
1583 	int		i;
1584 
1585 	NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_test_map_regs"));
1586 
1587 	dev_handle = nxgep->dev_regs->nxge_regh;
1588 	dev_ptr = (char *)nxgep->dev_regs->nxge_regp;
1589 
1590 	if (NXGE_IS_VALID_NEPTUNE_TYPE(nxgep)) {
1591 		cfg_handle = nxgep->dev_regs->nxge_pciregh;
1592 		cfg_ptr = (void *)nxgep->dev_regs->nxge_pciregp;
1593 
1594 		NXGE_DEBUG_MSG((nxgep, DDI_CTL,
1595 		    "Neptune PCI regp cfg_ptr 0x%llx", (char *)cfg_ptr));
1596 		NXGE_DEBUG_MSG((nxgep, DDI_CTL,
1597 		    "Neptune PCI cfg_ptr vendor id ptr 0x%llx",
1598 		    &cfg_ptr->vendorid));
1599 		NXGE_DEBUG_MSG((nxgep, DDI_CTL,
1600 		    "\tvendorid 0x%x devid 0x%x",
1601 		    NXGE_PIO_READ16(cfg_handle, &cfg_ptr->vendorid, 0),
1602 		    NXGE_PIO_READ16(cfg_handle, &cfg_ptr->devid,    0)));
1603 		NXGE_DEBUG_MSG((nxgep, DDI_CTL,
1604 		    "PCI BAR: base 0x%x base14 0x%x base 18 0x%x "
1605 		    "bar1c 0x%x",
1606 		    NXGE_PIO_READ32(cfg_handle, &cfg_ptr->base,   0),
1607 		    NXGE_PIO_READ32(cfg_handle, &cfg_ptr->base14, 0),
1608 		    NXGE_PIO_READ32(cfg_handle, &cfg_ptr->base18, 0),
1609 		    NXGE_PIO_READ32(cfg_handle, &cfg_ptr->base1c, 0)));
1610 		NXGE_DEBUG_MSG((nxgep, DDI_CTL,
1611 		    "\nNeptune PCI BAR: base20 0x%x base24 0x%x "
1612 		    "base 28 0x%x bar2c 0x%x\n",
1613 		    NXGE_PIO_READ32(cfg_handle, &cfg_ptr->base20, 0),
1614 		    NXGE_PIO_READ32(cfg_handle, &cfg_ptr->base24, 0),
1615 		    NXGE_PIO_READ32(cfg_handle, &cfg_ptr->base28, 0),
1616 		    NXGE_PIO_READ32(cfg_handle, &cfg_ptr->base2c, 0)));
1617 		NXGE_DEBUG_MSG((nxgep, DDI_CTL,
1618 		    "\nNeptune PCI BAR: base30 0x%x\n",
1619 		    NXGE_PIO_READ32(cfg_handle, &cfg_ptr->base30, 0)));
1620 
1621 		cfg_handle = nxgep->dev_regs->nxge_pciregh;
1622 		cfg_ptr = (void *)nxgep->dev_regs->nxge_pciregp;
1623 		NXGE_DEBUG_MSG((nxgep, DDI_CTL,
1624 		    "first  0x%llx second 0x%llx third 0x%llx "
1625 		    "last 0x%llx ",
1626 		    NXGE_PIO_READ64(dev_handle,
1627 		    (uint64_t *)(dev_ptr + 0),  0),
1628 		    NXGE_PIO_READ64(dev_handle,
1629 		    (uint64_t *)(dev_ptr + 8),  0),
1630 		    NXGE_PIO_READ64(dev_handle,
1631 		    (uint64_t *)(dev_ptr + 16), 0),
1632 		    NXGE_PIO_READ64(cfg_handle,
1633 		    (uint64_t *)(dev_ptr + 24), 0)));
1634 	}
1635 }
1636 
1637 #endif
1638 
1639 static void
1640 nxge_suspend(p_nxge_t nxgep)
1641 {
1642 	NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_suspend"));
1643 
1644 	nxge_intrs_disable(nxgep);
1645 	nxge_destroy_dev(nxgep);
1646 
1647 	NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_suspend"));
1648 }
1649 
1650 static nxge_status_t
1651 nxge_resume(p_nxge_t nxgep)
1652 {
1653 	nxge_status_t status = NXGE_OK;
1654 
1655 	NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_resume"));
1656 
1657 	nxgep->suspended = DDI_RESUME;
1658 	(void) nxge_link_monitor(nxgep, LINK_MONITOR_START);
1659 	(void) nxge_rxdma_hw_mode(nxgep, NXGE_DMA_START);
1660 	(void) nxge_txdma_hw_mode(nxgep, NXGE_DMA_START);
1661 	(void) nxge_rx_mac_enable(nxgep);
1662 	(void) nxge_tx_mac_enable(nxgep);
1663 	nxge_intrs_enable(nxgep);
1664 	nxgep->suspended = 0;
1665 
1666 	NXGE_DEBUG_MSG((nxgep, DDI_CTL,
1667 			"<== nxge_resume status = 0x%x", status));
1668 	return (status);
1669 }
1670 
1671 static nxge_status_t
1672 nxge_setup_dev(p_nxge_t nxgep)
1673 {
1674 	nxge_status_t	status = NXGE_OK;
1675 
1676 	NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_setup_dev port %d",
1677 	    nxgep->mac.portnum));
1678 
1679 	status = nxge_link_init(nxgep);
1680 
1681 	if (fm_check_acc_handle(nxgep->dev_regs->nxge_regh) != DDI_FM_OK) {
1682 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
1683 			"port%d Bad register acc handle", nxgep->mac.portnum));
1684 		status = NXGE_ERROR;
1685 	}
1686 
1687 	if (status != NXGE_OK) {
1688 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
1689 			    " nxge_setup_dev status "
1690 			    "(xcvr init 0x%08x)", status));
1691 		goto nxge_setup_dev_exit;
1692 	}
1693 
1694 nxge_setup_dev_exit:
1695 	NXGE_DEBUG_MSG((nxgep, DDI_CTL,
1696 		"<== nxge_setup_dev port %d status = 0x%08x",
1697 		nxgep->mac.portnum, status));
1698 
1699 	return (status);
1700 }
1701 
1702 static void
1703 nxge_destroy_dev(p_nxge_t nxgep)
1704 {
1705 	NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_destroy_dev"));
1706 
1707 	(void) nxge_link_monitor(nxgep, LINK_MONITOR_STOP);
1708 
1709 	(void) nxge_hw_stop(nxgep);
1710 
1711 	NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_destroy_dev"));
1712 }
1713 
1714 static nxge_status_t
1715 nxge_setup_system_dma_pages(p_nxge_t nxgep)
1716 {
1717 	int 			ddi_status = DDI_SUCCESS;
1718 	uint_t 			count;
1719 	ddi_dma_cookie_t 	cookie;
1720 	uint_t 			iommu_pagesize;
1721 	nxge_status_t		status = NXGE_OK;
1722 
1723 	NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_setup_system_dma_pages"));
1724 	nxgep->sys_page_sz = ddi_ptob(nxgep->dip, (ulong_t)1);
1725 	if (nxgep->niu_type != N2_NIU) {
1726 		iommu_pagesize = dvma_pagesize(nxgep->dip);
1727 		NXGE_DEBUG_MSG((nxgep, DDI_CTL,
1728 			" nxge_setup_system_dma_pages: page %d (ddi_ptob %d) "
1729 			" default_block_size %d iommu_pagesize %d",
1730 			nxgep->sys_page_sz,
1731 			ddi_ptob(nxgep->dip, (ulong_t)1),
1732 			nxgep->rx_default_block_size,
1733 			iommu_pagesize));
1734 
1735 		if (iommu_pagesize != 0) {
1736 			if (nxgep->sys_page_sz == iommu_pagesize) {
1737 				if (iommu_pagesize > 0x4000)
1738 					nxgep->sys_page_sz = 0x4000;
1739 			} else {
1740 				if (nxgep->sys_page_sz > iommu_pagesize)
1741 					nxgep->sys_page_sz = iommu_pagesize;
1742 			}
1743 		}
1744 	}
1745 	nxgep->sys_page_mask = ~(nxgep->sys_page_sz - 1);
1746 	NXGE_DEBUG_MSG((nxgep, DDI_CTL,
1747 		"==> nxge_setup_system_dma_pages: page %d (ddi_ptob %d) "
1748 		"default_block_size %d page mask %d",
1749 		nxgep->sys_page_sz,
1750 		ddi_ptob(nxgep->dip, (ulong_t)1),
1751 		nxgep->rx_default_block_size,
1752 		nxgep->sys_page_mask));
1753 
1754 
1755 	switch (nxgep->sys_page_sz) {
1756 	default:
1757 		nxgep->sys_page_sz = 0x1000;
1758 		nxgep->sys_page_mask = ~(nxgep->sys_page_sz - 1);
1759 		nxgep->rx_default_block_size = 0x1000;
1760 		nxgep->rx_bksize_code = RBR_BKSIZE_4K;
1761 		break;
1762 	case 0x1000:
1763 		nxgep->rx_default_block_size = 0x1000;
1764 		nxgep->rx_bksize_code = RBR_BKSIZE_4K;
1765 		break;
1766 	case 0x2000:
1767 		nxgep->rx_default_block_size = 0x2000;
1768 		nxgep->rx_bksize_code = RBR_BKSIZE_8K;
1769 		break;
1770 	case 0x4000:
1771 		nxgep->rx_default_block_size = 0x4000;
1772 		nxgep->rx_bksize_code = RBR_BKSIZE_16K;
1773 		break;
1774 	case 0x8000:
1775 		nxgep->rx_default_block_size = 0x8000;
1776 		nxgep->rx_bksize_code = RBR_BKSIZE_32K;
1777 		break;
1778 	}
1779 
1780 #ifndef USE_RX_BIG_BUF
1781 	nxge_rx_dma_attr.dma_attr_align = nxgep->sys_page_sz;
1782 #else
1783 		nxgep->rx_default_block_size = 0x2000;
1784 		nxgep->rx_bksize_code = RBR_BKSIZE_8K;
1785 #endif
1786 	/*
1787 	 * Get the system DMA burst size.
1788 	 */
1789 	ddi_status = ddi_dma_alloc_handle(nxgep->dip, &nxge_tx_dma_attr,
1790 			DDI_DMA_DONTWAIT, 0,
1791 			&nxgep->dmasparehandle);
1792 	if (ddi_status != DDI_SUCCESS) {
1793 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
1794 			"ddi_dma_alloc_handle: failed "
1795 			" status 0x%x", ddi_status));
1796 		goto nxge_get_soft_properties_exit;
1797 	}
1798 
1799 	ddi_status = ddi_dma_addr_bind_handle(nxgep->dmasparehandle, NULL,
1800 				(caddr_t)nxgep->dmasparehandle,
1801 				sizeof (nxgep->dmasparehandle),
1802 				DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
1803 				DDI_DMA_DONTWAIT, 0,
1804 				&cookie, &count);
1805 	if (ddi_status != DDI_DMA_MAPPED) {
1806 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
1807 			"Binding spare handle to find system"
1808 			" burstsize failed."));
1809 		ddi_status = DDI_FAILURE;
1810 		goto nxge_get_soft_properties_fail1;
1811 	}
1812 
1813 	nxgep->sys_burst_sz = ddi_dma_burstsizes(nxgep->dmasparehandle);
1814 	(void) ddi_dma_unbind_handle(nxgep->dmasparehandle);
1815 
1816 nxge_get_soft_properties_fail1:
1817 	ddi_dma_free_handle(&nxgep->dmasparehandle);
1818 
1819 nxge_get_soft_properties_exit:
1820 
1821 	if (ddi_status != DDI_SUCCESS)
1822 		status |= (NXGE_ERROR | NXGE_DDI_FAILED);
1823 
1824 	NXGE_DEBUG_MSG((nxgep, DDI_CTL,
1825 		"<== nxge_setup_system_dma_pages status = 0x%08x", status));
1826 	return (status);
1827 }
1828 
1829 static nxge_status_t
1830 nxge_alloc_mem_pool(p_nxge_t nxgep)
1831 {
1832 	nxge_status_t	status = NXGE_OK;
1833 
1834 	NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_alloc_mem_pool"));
1835 
1836 	status = nxge_alloc_rx_mem_pool(nxgep);
1837 	if (status != NXGE_OK) {
1838 		return (NXGE_ERROR);
1839 	}
1840 
1841 	status = nxge_alloc_tx_mem_pool(nxgep);
1842 	if (status != NXGE_OK) {
1843 		nxge_free_rx_mem_pool(nxgep);
1844 		return (NXGE_ERROR);
1845 	}
1846 
1847 	NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_alloc_mem_pool"));
1848 	return (NXGE_OK);
1849 }
1850 
1851 static void
1852 nxge_free_mem_pool(p_nxge_t nxgep)
1853 {
1854 	NXGE_DEBUG_MSG((nxgep, MEM_CTL, "==> nxge_free_mem_pool"));
1855 
1856 	nxge_free_rx_mem_pool(nxgep);
1857 	nxge_free_tx_mem_pool(nxgep);
1858 
1859 	NXGE_DEBUG_MSG((nxgep, MEM_CTL, "<== nxge_free_mem_pool"));
1860 }
1861 
1862 static nxge_status_t
1863 nxge_alloc_rx_mem_pool(p_nxge_t nxgep)
1864 {
1865 	int			i, j;
1866 	uint32_t		ndmas, st_rdc;
1867 	p_nxge_dma_pt_cfg_t	p_all_cfgp;
1868 	p_nxge_hw_pt_cfg_t	p_cfgp;
1869 	p_nxge_dma_pool_t	dma_poolp;
1870 	p_nxge_dma_common_t	*dma_buf_p;
1871 	p_nxge_dma_pool_t	dma_cntl_poolp;
1872 	p_nxge_dma_common_t	*dma_cntl_p;
1873 	size_t			rx_buf_alloc_size;
1874 	size_t			rx_cntl_alloc_size;
1875 	uint32_t 		*num_chunks; /* per dma */
1876 	nxge_status_t		status = NXGE_OK;
1877 
1878 	uint32_t		nxge_port_rbr_size;
1879 	uint32_t		nxge_port_rbr_spare_size;
1880 	uint32_t		nxge_port_rcr_size;
1881 
1882 	NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_alloc_rx_mem_pool"));
1883 
1884 	p_all_cfgp = (p_nxge_dma_pt_cfg_t)&nxgep->pt_config;
1885 	p_cfgp = (p_nxge_hw_pt_cfg_t)&p_all_cfgp->hw_config;
1886 	st_rdc = p_cfgp->start_rdc;
1887 	ndmas = p_cfgp->max_rdcs;
1888 
1889 	NXGE_DEBUG_MSG((nxgep, DMA_CTL,
1890 		" nxge_alloc_rx_mem_pool st_rdc %d ndmas %d", st_rdc, ndmas));
1891 
1892 	/*
1893 	 * Allocate memory for each receive DMA channel.
1894 	 */
1895 	dma_poolp = (p_nxge_dma_pool_t)KMEM_ZALLOC(sizeof (nxge_dma_pool_t),
1896 			KM_SLEEP);
1897 	dma_buf_p = (p_nxge_dma_common_t *)KMEM_ZALLOC(
1898 			sizeof (p_nxge_dma_common_t) * ndmas, KM_SLEEP);
1899 
1900 	dma_cntl_poolp = (p_nxge_dma_pool_t)
1901 				KMEM_ZALLOC(sizeof (nxge_dma_pool_t), KM_SLEEP);
1902 	dma_cntl_p = (p_nxge_dma_common_t *)KMEM_ZALLOC(
1903 			sizeof (p_nxge_dma_common_t) * ndmas, KM_SLEEP);
1904 
1905 	num_chunks = (uint32_t *)KMEM_ZALLOC(
1906 			sizeof (uint32_t) * ndmas, KM_SLEEP);
1907 
1908 	/*
1909 	 * Assume that each DMA channel will be configured with default
1910 	 * block size.
1911 	 * rbr block counts are mod of batch count (16).
1912 	 */
1913 	nxge_port_rbr_size = p_all_cfgp->rbr_size;
1914 	nxge_port_rcr_size = p_all_cfgp->rcr_size;
1915 
1916 	if (!nxge_port_rbr_size) {
1917 		nxge_port_rbr_size = NXGE_RBR_RBB_DEFAULT;
1918 	}
1919 	if (nxge_port_rbr_size % NXGE_RXDMA_POST_BATCH) {
1920 		nxge_port_rbr_size = (NXGE_RXDMA_POST_BATCH *
1921 			(nxge_port_rbr_size / NXGE_RXDMA_POST_BATCH + 1));
1922 	}
1923 
1924 	p_all_cfgp->rbr_size = nxge_port_rbr_size;
1925 	nxge_port_rbr_spare_size = nxge_rbr_spare_size;
1926 
1927 	if (nxge_port_rbr_spare_size % NXGE_RXDMA_POST_BATCH) {
1928 		nxge_port_rbr_spare_size = (NXGE_RXDMA_POST_BATCH *
1929 			(nxge_port_rbr_spare_size / NXGE_RXDMA_POST_BATCH + 1));
1930 	}
1931 	if (nxge_port_rbr_size > RBR_DEFAULT_MAX_BLKS) {
1932 		NXGE_DEBUG_MSG((nxgep, MEM_CTL,
1933 		    "nxge_alloc_rx_mem_pool: RBR size too high %d, "
1934 		    "set to default %d",
1935 		    nxge_port_rbr_size, RBR_DEFAULT_MAX_BLKS));
1936 		nxge_port_rbr_size = RBR_DEFAULT_MAX_BLKS;
1937 	}
1938 	if (nxge_port_rcr_size > RCR_DEFAULT_MAX) {
1939 		NXGE_DEBUG_MSG((nxgep, MEM_CTL,
1940 		    "nxge_alloc_rx_mem_pool: RCR too high %d, "
1941 		    "set to default %d",
1942 		    nxge_port_rcr_size, RCR_DEFAULT_MAX));
1943 		nxge_port_rcr_size = RCR_DEFAULT_MAX;
1944 	}
1945 
1946 	/*
1947 	 * N2/NIU has limitation on the descriptor sizes (contiguous
1948 	 * memory allocation on data buffers to 4M (contig_mem_alloc)
1949 	 * and little endian for control buffers (must use the ddi/dki mem alloc
1950 	 * function).
1951 	 */
1952 #if	defined(sun4v) && defined(NIU_LP_WORKAROUND)
1953 	if (nxgep->niu_type == N2_NIU) {
1954 		nxge_port_rbr_spare_size = 0;
1955 		if ((nxge_port_rbr_size > NXGE_NIU_CONTIG_RBR_MAX) ||
1956 				(!ISP2(nxge_port_rbr_size))) {
1957 			nxge_port_rbr_size = NXGE_NIU_CONTIG_RBR_MAX;
1958 		}
1959 		if ((nxge_port_rcr_size > NXGE_NIU_CONTIG_RCR_MAX) ||
1960 				(!ISP2(nxge_port_rcr_size))) {
1961 			nxge_port_rcr_size = NXGE_NIU_CONTIG_RCR_MAX;
1962 		}
1963 	}
1964 #endif
1965 
1966 	rx_buf_alloc_size = (nxgep->rx_default_block_size *
1967 		(nxge_port_rbr_size + nxge_port_rbr_spare_size));
1968 
1969 	/*
1970 	 * Addresses of receive block ring, receive completion ring and the
1971 	 * mailbox must be all cache-aligned (64 bytes).
1972 	 */
1973 	rx_cntl_alloc_size = nxge_port_rbr_size + nxge_port_rbr_spare_size;
1974 	rx_cntl_alloc_size *= (sizeof (rx_desc_t));
1975 	rx_cntl_alloc_size += (sizeof (rcr_entry_t) * nxge_port_rcr_size);
1976 	rx_cntl_alloc_size += sizeof (rxdma_mailbox_t);
1977 
1978 	NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_alloc_rx_mem_pool: "
1979 		"nxge_port_rbr_size = %d nxge_port_rbr_spare_size = %d "
1980 		"nxge_port_rcr_size = %d "
1981 		"rx_cntl_alloc_size = %d",
1982 		nxge_port_rbr_size, nxge_port_rbr_spare_size,
1983 		nxge_port_rcr_size,
1984 		rx_cntl_alloc_size));
1985 
1986 #if	defined(sun4v) && defined(NIU_LP_WORKAROUND)
1987 	if (nxgep->niu_type == N2_NIU) {
1988 		if (!ISP2(rx_buf_alloc_size)) {
1989 			NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
1990 				"==> nxge_alloc_rx_mem_pool: "
1991 				" must be power of 2"));
1992 			status |= (NXGE_ERROR | NXGE_DDI_FAILED);
1993 			goto nxge_alloc_rx_mem_pool_exit;
1994 		}
1995 
1996 		if (rx_buf_alloc_size > (1 << 22)) {
1997 			NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
1998 				"==> nxge_alloc_rx_mem_pool: "
1999 				" limit size to 4M"));
2000 			status |= (NXGE_ERROR | NXGE_DDI_FAILED);
2001 			goto nxge_alloc_rx_mem_pool_exit;
2002 		}
2003 
2004 		if (rx_cntl_alloc_size < 0x2000) {
2005 			rx_cntl_alloc_size = 0x2000;
2006 		}
2007 	}
2008 #endif
2009 	nxgep->nxge_port_rbr_size = nxge_port_rbr_size;
2010 	nxgep->nxge_port_rcr_size = nxge_port_rcr_size;
2011 
2012 	/*
2013 	 * Allocate memory for receive buffers and descriptor rings.
2014 	 * Replace allocation functions with interface functions provided
2015 	 * by the partition manager when it is available.
2016 	 */
2017 	/*
2018 	 * Allocate memory for the receive buffer blocks.
2019 	 */
2020 	for (i = 0; i < ndmas; i++) {
2021 		NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
2022 			" nxge_alloc_rx_mem_pool to alloc mem: "
2023 			" dma %d dma_buf_p %llx &dma_buf_p %llx",
2024 			i, dma_buf_p[i], &dma_buf_p[i]));
2025 		num_chunks[i] = 0;
2026 		status = nxge_alloc_rx_buf_dma(nxgep, st_rdc, &dma_buf_p[i],
2027 				rx_buf_alloc_size,
2028 				nxgep->rx_default_block_size, &num_chunks[i]);
2029 		if (status != NXGE_OK) {
2030 			break;
2031 		}
2032 		st_rdc++;
2033 		NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
2034 			" nxge_alloc_rx_mem_pool DONE  alloc mem: "
2035 			"dma %d dma_buf_p %llx &dma_buf_p %llx", i,
2036 			dma_buf_p[i], &dma_buf_p[i]));
2037 	}
2038 	if (i < ndmas) {
2039 		goto nxge_alloc_rx_mem_fail1;
2040 	}
2041 	/*
2042 	 * Allocate memory for descriptor rings and mailbox.
2043 	 */
2044 	st_rdc = p_cfgp->start_rdc;
2045 	for (j = 0; j < ndmas; j++) {
2046 		status = nxge_alloc_rx_cntl_dma(nxgep, st_rdc, &dma_cntl_p[j],
2047 					rx_cntl_alloc_size);
2048 		if (status != NXGE_OK) {
2049 			break;
2050 		}
2051 		st_rdc++;
2052 	}
2053 	if (j < ndmas) {
2054 		goto nxge_alloc_rx_mem_fail2;
2055 	}
2056 
2057 	dma_poolp->ndmas = ndmas;
2058 	dma_poolp->num_chunks = num_chunks;
2059 	dma_poolp->buf_allocated = B_TRUE;
2060 	nxgep->rx_buf_pool_p = dma_poolp;
2061 	dma_poolp->dma_buf_pool_p = dma_buf_p;
2062 
2063 	dma_cntl_poolp->ndmas = ndmas;
2064 	dma_cntl_poolp->buf_allocated = B_TRUE;
2065 	nxgep->rx_cntl_pool_p = dma_cntl_poolp;
2066 	dma_cntl_poolp->dma_buf_pool_p = dma_cntl_p;
2067 
2068 	goto nxge_alloc_rx_mem_pool_exit;
2069 
2070 nxge_alloc_rx_mem_fail2:
2071 	/* Free control buffers */
2072 	j--;
2073 	NXGE_DEBUG_MSG((nxgep, DMA_CTL,
2074 		"==> nxge_alloc_rx_mem_pool: freeing control bufs (%d)", j));
2075 	for (; j >= 0; j--) {
2076 		nxge_free_rx_cntl_dma(nxgep,
2077 			(p_nxge_dma_common_t)dma_cntl_p[j]);
2078 		NXGE_DEBUG_MSG((nxgep, DMA_CTL,
2079 			"==> nxge_alloc_rx_mem_pool: control bufs freed (%d)",
2080 			j));
2081 	}
2082 	NXGE_DEBUG_MSG((nxgep, DMA_CTL,
2083 		"==> nxge_alloc_rx_mem_pool: control bufs freed (%d)", j));
2084 
2085 nxge_alloc_rx_mem_fail1:
2086 	/* Free data buffers */
2087 	i--;
2088 	NXGE_DEBUG_MSG((nxgep, DMA_CTL,
2089 		"==> nxge_alloc_rx_mem_pool: freeing data bufs (%d)", i));
2090 	for (; i >= 0; i--) {
2091 		nxge_free_rx_buf_dma(nxgep, (p_nxge_dma_common_t)dma_buf_p[i],
2092 			num_chunks[i]);
2093 	}
2094 	NXGE_DEBUG_MSG((nxgep, DMA_CTL,
2095 		"==> nxge_alloc_rx_mem_pool: data bufs freed (%d)", i));
2096 
2097 	KMEM_FREE(num_chunks, sizeof (uint32_t) * ndmas);
2098 	KMEM_FREE(dma_poolp, sizeof (nxge_dma_pool_t));
2099 	KMEM_FREE(dma_buf_p, ndmas * sizeof (p_nxge_dma_common_t));
2100 	KMEM_FREE(dma_cntl_poolp, sizeof (nxge_dma_pool_t));
2101 	KMEM_FREE(dma_cntl_p, ndmas * sizeof (p_nxge_dma_common_t));
2102 
2103 nxge_alloc_rx_mem_pool_exit:
2104 	NXGE_DEBUG_MSG((nxgep, DMA_CTL,
2105 		"<== nxge_alloc_rx_mem_pool:status 0x%08x", status));
2106 
2107 	return (status);
2108 }
2109 
2110 static void
2111 nxge_free_rx_mem_pool(p_nxge_t nxgep)
2112 {
2113 	uint32_t		i, ndmas;
2114 	p_nxge_dma_pool_t	dma_poolp;
2115 	p_nxge_dma_common_t	*dma_buf_p;
2116 	p_nxge_dma_pool_t	dma_cntl_poolp;
2117 	p_nxge_dma_common_t	*dma_cntl_p;
2118 	uint32_t 		*num_chunks;
2119 
2120 	NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_free_rx_mem_pool"));
2121 
2122 	dma_poolp = nxgep->rx_buf_pool_p;
2123 	if (dma_poolp == NULL || (!dma_poolp->buf_allocated)) {
2124 		NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
2125 			"<== nxge_free_rx_mem_pool "
2126 			"(null rx buf pool or buf not allocated"));
2127 		return;
2128 	}
2129 
2130 	dma_cntl_poolp = nxgep->rx_cntl_pool_p;
2131 	if (dma_cntl_poolp == NULL || (!dma_cntl_poolp->buf_allocated)) {
2132 		NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
2133 			"<== nxge_free_rx_mem_pool "
2134 			"(null rx cntl buf pool or cntl buf not allocated"));
2135 		return;
2136 	}
2137 
2138 	dma_buf_p = dma_poolp->dma_buf_pool_p;
2139 	num_chunks = dma_poolp->num_chunks;
2140 
2141 	dma_cntl_p = dma_cntl_poolp->dma_buf_pool_p;
2142 	ndmas = dma_cntl_poolp->ndmas;
2143 
2144 	for (i = 0; i < ndmas; i++) {
2145 		nxge_free_rx_buf_dma(nxgep, dma_buf_p[i], num_chunks[i]);
2146 	}
2147 
2148 	for (i = 0; i < ndmas; i++) {
2149 		nxge_free_rx_cntl_dma(nxgep, dma_cntl_p[i]);
2150 	}
2151 
2152 	for (i = 0; i < ndmas; i++) {
2153 		KMEM_FREE(dma_buf_p[i],
2154 			sizeof (nxge_dma_common_t) * NXGE_DMA_BLOCK);
2155 		KMEM_FREE(dma_cntl_p[i], sizeof (nxge_dma_common_t));
2156 	}
2157 
2158 	KMEM_FREE(num_chunks, sizeof (uint32_t) * ndmas);
2159 	KMEM_FREE(dma_cntl_p, ndmas * sizeof (p_nxge_dma_common_t));
2160 	KMEM_FREE(dma_cntl_poolp, sizeof (nxge_dma_pool_t));
2161 	KMEM_FREE(dma_buf_p, ndmas * sizeof (p_nxge_dma_common_t));
2162 	KMEM_FREE(dma_poolp, sizeof (nxge_dma_pool_t));
2163 
2164 	nxgep->rx_buf_pool_p = NULL;
2165 	nxgep->rx_cntl_pool_p = NULL;
2166 
2167 	NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "<== nxge_free_rx_mem_pool"));
2168 }
2169 
2170 
2171 static nxge_status_t
2172 nxge_alloc_rx_buf_dma(p_nxge_t nxgep, uint16_t dma_channel,
2173 	p_nxge_dma_common_t *dmap,
2174 	size_t alloc_size, size_t block_size, uint32_t *num_chunks)
2175 {
2176 	p_nxge_dma_common_t 	rx_dmap;
2177 	nxge_status_t		status = NXGE_OK;
2178 	size_t			total_alloc_size;
2179 	size_t			allocated = 0;
2180 	int			i, size_index, array_size;
2181 
2182 	NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_alloc_rx_buf_dma"));
2183 
2184 	rx_dmap = (p_nxge_dma_common_t)
2185 			KMEM_ZALLOC(sizeof (nxge_dma_common_t) * NXGE_DMA_BLOCK,
2186 			KM_SLEEP);
2187 
2188 	NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
2189 		" alloc_rx_buf_dma rdc %d asize %x bsize %x bbuf %llx ",
2190 		dma_channel, alloc_size, block_size, dmap));
2191 
2192 	total_alloc_size = alloc_size;
2193 
2194 #if defined(RX_USE_RECLAIM_POST)
2195 	total_alloc_size = alloc_size + alloc_size/4;
2196 #endif
2197 
2198 	i = 0;
2199 	size_index = 0;
2200 	array_size =  sizeof (alloc_sizes)/sizeof (size_t);
2201 	while ((alloc_sizes[size_index] < alloc_size) &&
2202 			(size_index < array_size))
2203 			size_index++;
2204 	if (size_index >= array_size) {
2205 		size_index = array_size - 1;
2206 	}
2207 
2208 	while ((allocated < total_alloc_size) &&
2209 			(size_index >= 0) && (i < NXGE_DMA_BLOCK)) {
2210 		rx_dmap[i].dma_chunk_index = i;
2211 		rx_dmap[i].block_size = block_size;
2212 		rx_dmap[i].alength = alloc_sizes[size_index];
2213 		rx_dmap[i].orig_alength = rx_dmap[i].alength;
2214 		rx_dmap[i].nblocks = alloc_sizes[size_index] / block_size;
2215 		rx_dmap[i].dma_channel = dma_channel;
2216 		rx_dmap[i].contig_alloc_type = B_FALSE;
2217 
2218 		/*
2219 		 * N2/NIU: data buffers must be contiguous as the driver
2220 		 *	   needs to call Hypervisor api to set up
2221 		 *	   logical pages.
2222 		 */
2223 		if ((nxgep->niu_type == N2_NIU) && (NXGE_DMA_BLOCK == 1)) {
2224 			rx_dmap[i].contig_alloc_type = B_TRUE;
2225 		}
2226 
2227 		NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
2228 			"alloc_rx_buf_dma rdc %d chunk %d bufp %llx size %x "
2229 			"i %d nblocks %d alength %d",
2230 			dma_channel, i, &rx_dmap[i], block_size,
2231 			i, rx_dmap[i].nblocks,
2232 			rx_dmap[i].alength));
2233 		status = nxge_dma_mem_alloc(nxgep, nxge_force_dma,
2234 			&nxge_rx_dma_attr,
2235 			rx_dmap[i].alength,
2236 			&nxge_dev_buf_dma_acc_attr,
2237 			DDI_DMA_READ | DDI_DMA_STREAMING,
2238 			(p_nxge_dma_common_t)(&rx_dmap[i]));
2239 		if (status != NXGE_OK) {
2240 			NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
2241 				" nxge_alloc_rx_buf_dma: Alloc Failed "));
2242 			size_index--;
2243 		} else {
2244 			NXGE_DEBUG_MSG((nxgep, DMA_CTL,
2245 				" alloc_rx_buf_dma allocated rdc %d "
2246 				"chunk %d size %x dvma %x bufp %llx ",
2247 				dma_channel, i, rx_dmap[i].alength,
2248 				rx_dmap[i].ioaddr_pp, &rx_dmap[i]));
2249 			i++;
2250 			allocated += alloc_sizes[size_index];
2251 		}
2252 	}
2253 
2254 
2255 	if (allocated < total_alloc_size) {
2256 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
2257 		    "==> nxge_alloc_rx_buf_dma: not enough for channe %d "
2258 		    "allocated 0x%x requested 0x%x",
2259 		    dma_channel,
2260 		    allocated, total_alloc_size));
2261 		status = NXGE_ERROR;
2262 		goto nxge_alloc_rx_mem_fail1;
2263 	}
2264 
2265 	NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
2266 	    "==> nxge_alloc_rx_buf_dma: Allocated for channe %d "
2267 	    "allocated 0x%x requested 0x%x",
2268 	    dma_channel,
2269 	    allocated, total_alloc_size));
2270 
2271 	NXGE_DEBUG_MSG((nxgep, DMA_CTL,
2272 		" alloc_rx_buf_dma rdc %d allocated %d chunks",
2273 		dma_channel, i));
2274 	*num_chunks = i;
2275 	*dmap = rx_dmap;
2276 
2277 	goto nxge_alloc_rx_mem_exit;
2278 
2279 nxge_alloc_rx_mem_fail1:
2280 	KMEM_FREE(rx_dmap, sizeof (nxge_dma_common_t) * NXGE_DMA_BLOCK);
2281 
2282 nxge_alloc_rx_mem_exit:
2283 	NXGE_DEBUG_MSG((nxgep, DMA_CTL,
2284 		"<== nxge_alloc_rx_buf_dma status 0x%08x", status));
2285 
2286 	return (status);
2287 }
2288 
2289 /*ARGSUSED*/
2290 static void
2291 nxge_free_rx_buf_dma(p_nxge_t nxgep, p_nxge_dma_common_t dmap,
2292     uint32_t num_chunks)
2293 {
2294 	int		i;
2295 
2296 	NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
2297 		"==> nxge_free_rx_buf_dma: # of chunks %d", num_chunks));
2298 
2299 	for (i = 0; i < num_chunks; i++) {
2300 		NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
2301 			"==> nxge_free_rx_buf_dma: chunk %d dmap 0x%llx",
2302 				i, dmap));
2303 		nxge_dma_mem_free(dmap++);
2304 	}
2305 
2306 	NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_free_rx_buf_dma"));
2307 }
2308 
2309 /*ARGSUSED*/
2310 static nxge_status_t
2311 nxge_alloc_rx_cntl_dma(p_nxge_t nxgep, uint16_t dma_channel,
2312     p_nxge_dma_common_t *dmap, size_t size)
2313 {
2314 	p_nxge_dma_common_t 	rx_dmap;
2315 	nxge_status_t		status = NXGE_OK;
2316 
2317 	NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_alloc_rx_cntl_dma"));
2318 
2319 	rx_dmap = (p_nxge_dma_common_t)
2320 			KMEM_ZALLOC(sizeof (nxge_dma_common_t), KM_SLEEP);
2321 
2322 	rx_dmap->contig_alloc_type = B_FALSE;
2323 
2324 	status = nxge_dma_mem_alloc(nxgep, nxge_force_dma,
2325 			&nxge_desc_dma_attr,
2326 			size,
2327 			&nxge_dev_desc_dma_acc_attr,
2328 			DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
2329 			rx_dmap);
2330 	if (status != NXGE_OK) {
2331 		goto nxge_alloc_rx_cntl_dma_fail1;
2332 	}
2333 
2334 	*dmap = rx_dmap;
2335 	goto nxge_alloc_rx_cntl_dma_exit;
2336 
2337 nxge_alloc_rx_cntl_dma_fail1:
2338 	KMEM_FREE(rx_dmap, sizeof (nxge_dma_common_t));
2339 
2340 nxge_alloc_rx_cntl_dma_exit:
2341 	NXGE_DEBUG_MSG((nxgep, DMA_CTL,
2342 		"<== nxge_alloc_rx_cntl_dma status 0x%08x", status));
2343 
2344 	return (status);
2345 }
2346 
2347 /*ARGSUSED*/
2348 static void
2349 nxge_free_rx_cntl_dma(p_nxge_t nxgep, p_nxge_dma_common_t dmap)
2350 {
2351 	NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_free_rx_cntl_dma"));
2352 
2353 	nxge_dma_mem_free(dmap);
2354 
2355 	NXGE_DEBUG_MSG((nxgep, DMA_CTL, "<== nxge_free_rx_cntl_dma"));
2356 }
2357 
2358 static nxge_status_t
2359 nxge_alloc_tx_mem_pool(p_nxge_t nxgep)
2360 {
2361 	nxge_status_t		status = NXGE_OK;
2362 	int			i, j;
2363 	uint32_t		ndmas, st_tdc;
2364 	p_nxge_dma_pt_cfg_t	p_all_cfgp;
2365 	p_nxge_hw_pt_cfg_t	p_cfgp;
2366 	p_nxge_dma_pool_t	dma_poolp;
2367 	p_nxge_dma_common_t	*dma_buf_p;
2368 	p_nxge_dma_pool_t	dma_cntl_poolp;
2369 	p_nxge_dma_common_t	*dma_cntl_p;
2370 	size_t			tx_buf_alloc_size;
2371 	size_t			tx_cntl_alloc_size;
2372 	uint32_t		*num_chunks; /* per dma */
2373 	uint32_t		bcopy_thresh;
2374 
2375 	NXGE_DEBUG_MSG((nxgep, MEM_CTL, "==> nxge_alloc_tx_mem_pool"));
2376 
2377 	p_all_cfgp = (p_nxge_dma_pt_cfg_t)&nxgep->pt_config;
2378 	p_cfgp = (p_nxge_hw_pt_cfg_t)&p_all_cfgp->hw_config;
2379 	st_tdc = p_cfgp->start_tdc;
2380 	ndmas = p_cfgp->max_tdcs;
2381 
2382 	NXGE_DEBUG_MSG((nxgep, MEM_CTL, "==> nxge_alloc_tx_mem_pool: "
2383 		"p_cfgp 0x%016llx start_tdc %d ndmas %d nxgep->max_tdcs %d",
2384 		p_cfgp, p_cfgp->start_tdc, p_cfgp->max_tdcs, nxgep->max_tdcs));
2385 	/*
2386 	 * Allocate memory for each transmit DMA channel.
2387 	 */
2388 	dma_poolp = (p_nxge_dma_pool_t)KMEM_ZALLOC(sizeof (nxge_dma_pool_t),
2389 			KM_SLEEP);
2390 	dma_buf_p = (p_nxge_dma_common_t *)KMEM_ZALLOC(
2391 			sizeof (p_nxge_dma_common_t) * ndmas, KM_SLEEP);
2392 
2393 	dma_cntl_poolp = (p_nxge_dma_pool_t)
2394 			KMEM_ZALLOC(sizeof (nxge_dma_pool_t), KM_SLEEP);
2395 	dma_cntl_p = (p_nxge_dma_common_t *)KMEM_ZALLOC(
2396 			sizeof (p_nxge_dma_common_t) * ndmas, KM_SLEEP);
2397 
2398 	if (nxge_tx_ring_size > TDC_DEFAULT_MAX) {
2399 		NXGE_DEBUG_MSG((nxgep, MEM_CTL,
2400 		    "nxge_alloc_tx_mem_pool: TDC too high %d, "
2401 		    "set to default %d",
2402 		    nxge_tx_ring_size, TDC_DEFAULT_MAX));
2403 		nxge_tx_ring_size = TDC_DEFAULT_MAX;
2404 	}
2405 
2406 #if	defined(sun4v) && defined(NIU_LP_WORKAROUND)
2407 	/*
2408 	 * N2/NIU has limitation on the descriptor sizes (contiguous
2409 	 * memory allocation on data buffers to 4M (contig_mem_alloc)
2410 	 * and little endian for control buffers (must use the ddi/dki mem alloc
2411 	 * function). The transmit ring is limited to 8K (includes the
2412 	 * mailbox).
2413 	 */
2414 	if (nxgep->niu_type == N2_NIU) {
2415 		if ((nxge_tx_ring_size > NXGE_NIU_CONTIG_TX_MAX) ||
2416 			(!ISP2(nxge_tx_ring_size))) {
2417 			nxge_tx_ring_size = NXGE_NIU_CONTIG_TX_MAX;
2418 		}
2419 	}
2420 #endif
2421 
2422 	nxgep->nxge_port_tx_ring_size = nxge_tx_ring_size;
2423 
2424 	/*
2425 	 * Assume that each DMA channel will be configured with default
2426 	 * transmit bufer size for copying transmit data.
2427 	 * (For packet payload over this limit, packets will not be
2428 	 *  copied.)
2429 	 */
2430 	if (nxgep->niu_type == N2_NIU) {
2431 		bcopy_thresh = TX_BCOPY_SIZE;
2432 	} else {
2433 		bcopy_thresh = nxge_bcopy_thresh;
2434 	}
2435 	tx_buf_alloc_size = (bcopy_thresh * nxge_tx_ring_size);
2436 
2437 	/*
2438 	 * Addresses of transmit descriptor ring and the
2439 	 * mailbox must be all cache-aligned (64 bytes).
2440 	 */
2441 	tx_cntl_alloc_size = nxge_tx_ring_size;
2442 	tx_cntl_alloc_size *= (sizeof (tx_desc_t));
2443 	tx_cntl_alloc_size += sizeof (txdma_mailbox_t);
2444 
2445 #if	defined(sun4v) && defined(NIU_LP_WORKAROUND)
2446 	if (nxgep->niu_type == N2_NIU) {
2447 		if (!ISP2(tx_buf_alloc_size)) {
2448 			NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
2449 				"==> nxge_alloc_tx_mem_pool: "
2450 				" must be power of 2"));
2451 			status |= (NXGE_ERROR | NXGE_DDI_FAILED);
2452 			goto nxge_alloc_tx_mem_pool_exit;
2453 		}
2454 
2455 		if (tx_buf_alloc_size > (1 << 22)) {
2456 			NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
2457 				"==> nxge_alloc_tx_mem_pool: "
2458 				" limit size to 4M"));
2459 			status |= (NXGE_ERROR | NXGE_DDI_FAILED);
2460 			goto nxge_alloc_tx_mem_pool_exit;
2461 		}
2462 
2463 		if (tx_cntl_alloc_size < 0x2000) {
2464 			tx_cntl_alloc_size = 0x2000;
2465 		}
2466 	}
2467 #endif
2468 
2469 	num_chunks = (uint32_t *)KMEM_ZALLOC(
2470 			sizeof (uint32_t) * ndmas, KM_SLEEP);
2471 
2472 	/*
2473 	 * Allocate memory for transmit buffers and descriptor rings.
2474 	 * Replace allocation functions with interface functions provided
2475 	 * by the partition manager when it is available.
2476 	 *
2477 	 * Allocate memory for the transmit buffer pool.
2478 	 */
2479 	for (i = 0; i < ndmas; i++) {
2480 		num_chunks[i] = 0;
2481 		status = nxge_alloc_tx_buf_dma(nxgep, st_tdc, &dma_buf_p[i],
2482 					tx_buf_alloc_size,
2483 					bcopy_thresh, &num_chunks[i]);
2484 		if (status != NXGE_OK) {
2485 			break;
2486 		}
2487 		st_tdc++;
2488 	}
2489 	if (i < ndmas) {
2490 		goto nxge_alloc_tx_mem_pool_fail1;
2491 	}
2492 
2493 	st_tdc = p_cfgp->start_tdc;
2494 	/*
2495 	 * Allocate memory for descriptor rings and mailbox.
2496 	 */
2497 	for (j = 0; j < ndmas; j++) {
2498 		status = nxge_alloc_tx_cntl_dma(nxgep, st_tdc, &dma_cntl_p[j],
2499 					tx_cntl_alloc_size);
2500 		if (status != NXGE_OK) {
2501 			break;
2502 		}
2503 		st_tdc++;
2504 	}
2505 	if (j < ndmas) {
2506 		goto nxge_alloc_tx_mem_pool_fail2;
2507 	}
2508 
2509 	dma_poolp->ndmas = ndmas;
2510 	dma_poolp->num_chunks = num_chunks;
2511 	dma_poolp->buf_allocated = B_TRUE;
2512 	dma_poolp->dma_buf_pool_p = dma_buf_p;
2513 	nxgep->tx_buf_pool_p = dma_poolp;
2514 
2515 	dma_cntl_poolp->ndmas = ndmas;
2516 	dma_cntl_poolp->buf_allocated = B_TRUE;
2517 	dma_cntl_poolp->dma_buf_pool_p = dma_cntl_p;
2518 	nxgep->tx_cntl_pool_p = dma_cntl_poolp;
2519 
2520 	NXGE_DEBUG_MSG((nxgep, MEM_CTL,
2521 		"==> nxge_alloc_tx_mem_pool: start_tdc %d "
2522 		"ndmas %d poolp->ndmas %d",
2523 		st_tdc, ndmas, dma_poolp->ndmas));
2524 
2525 	goto nxge_alloc_tx_mem_pool_exit;
2526 
2527 nxge_alloc_tx_mem_pool_fail2:
2528 	/* Free control buffers */
2529 	j--;
2530 	for (; j >= 0; j--) {
2531 		nxge_free_tx_cntl_dma(nxgep,
2532 			(p_nxge_dma_common_t)dma_cntl_p[j]);
2533 	}
2534 
2535 nxge_alloc_tx_mem_pool_fail1:
2536 	/* Free data buffers */
2537 	i--;
2538 	for (; i >= 0; i--) {
2539 		nxge_free_tx_buf_dma(nxgep, (p_nxge_dma_common_t)dma_buf_p[i],
2540 			num_chunks[i]);
2541 	}
2542 
2543 	KMEM_FREE(dma_poolp, sizeof (nxge_dma_pool_t));
2544 	KMEM_FREE(dma_buf_p, ndmas * sizeof (p_nxge_dma_common_t));
2545 	KMEM_FREE(dma_cntl_poolp, sizeof (nxge_dma_pool_t));
2546 	KMEM_FREE(dma_cntl_p, ndmas * sizeof (p_nxge_dma_common_t));
2547 	KMEM_FREE(num_chunks, sizeof (uint32_t) * ndmas);
2548 
2549 nxge_alloc_tx_mem_pool_exit:
2550 	NXGE_DEBUG_MSG((nxgep, MEM_CTL,
2551 		"<== nxge_alloc_tx_mem_pool:status 0x%08x", status));
2552 
2553 	return (status);
2554 }
2555 
2556 static nxge_status_t
2557 nxge_alloc_tx_buf_dma(p_nxge_t nxgep, uint16_t dma_channel,
2558     p_nxge_dma_common_t *dmap, size_t alloc_size,
2559     size_t block_size, uint32_t *num_chunks)
2560 {
2561 	p_nxge_dma_common_t 	tx_dmap;
2562 	nxge_status_t		status = NXGE_OK;
2563 	size_t			total_alloc_size;
2564 	size_t			allocated = 0;
2565 	int			i, size_index, array_size;
2566 
2567 	NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_alloc_tx_buf_dma"));
2568 
2569 	tx_dmap = (p_nxge_dma_common_t)
2570 		KMEM_ZALLOC(sizeof (nxge_dma_common_t) * NXGE_DMA_BLOCK,
2571 			KM_SLEEP);
2572 
2573 	total_alloc_size = alloc_size;
2574 	i = 0;
2575 	size_index = 0;
2576 	array_size =  sizeof (alloc_sizes) /  sizeof (size_t);
2577 	while ((alloc_sizes[size_index] < alloc_size) &&
2578 		(size_index < array_size))
2579 		size_index++;
2580 	if (size_index >= array_size) {
2581 		size_index = array_size - 1;
2582 	}
2583 
2584 	while ((allocated < total_alloc_size) &&
2585 			(size_index >= 0) && (i < NXGE_DMA_BLOCK)) {
2586 
2587 		tx_dmap[i].dma_chunk_index = i;
2588 		tx_dmap[i].block_size = block_size;
2589 		tx_dmap[i].alength = alloc_sizes[size_index];
2590 		tx_dmap[i].orig_alength = tx_dmap[i].alength;
2591 		tx_dmap[i].nblocks = alloc_sizes[size_index] / block_size;
2592 		tx_dmap[i].dma_channel = dma_channel;
2593 		tx_dmap[i].contig_alloc_type = B_FALSE;
2594 
2595 		/*
2596 		 * N2/NIU: data buffers must be contiguous as the driver
2597 		 *	   needs to call Hypervisor api to set up
2598 		 *	   logical pages.
2599 		 */
2600 		if ((nxgep->niu_type == N2_NIU) && (NXGE_DMA_BLOCK == 1)) {
2601 			tx_dmap[i].contig_alloc_type = B_TRUE;
2602 		}
2603 
2604 		status = nxge_dma_mem_alloc(nxgep, nxge_force_dma,
2605 			&nxge_tx_dma_attr,
2606 			tx_dmap[i].alength,
2607 			&nxge_dev_buf_dma_acc_attr,
2608 			DDI_DMA_WRITE | DDI_DMA_STREAMING,
2609 			(p_nxge_dma_common_t)(&tx_dmap[i]));
2610 		if (status != NXGE_OK) {
2611 			size_index--;
2612 		} else {
2613 			i++;
2614 			allocated += alloc_sizes[size_index];
2615 		}
2616 	}
2617 
2618 	if (allocated < total_alloc_size) {
2619 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
2620 		    "==> nxge_alloc_tx_buf_dma: not enough channel %d: "
2621 		    "allocated 0x%x requested 0x%x",
2622 		    dma_channel,
2623 		    allocated, total_alloc_size));
2624 		status = NXGE_ERROR;
2625 		goto nxge_alloc_tx_mem_fail1;
2626 	}
2627 
2628 	NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
2629 	    "==> nxge_alloc_tx_buf_dma: Allocated for channel %d: "
2630 	    "allocated 0x%x requested 0x%x",
2631 	    dma_channel,
2632 	    allocated, total_alloc_size));
2633 
2634 	*num_chunks = i;
2635 	*dmap = tx_dmap;
2636 	NXGE_DEBUG_MSG((nxgep, DMA_CTL,
2637 		"==> nxge_alloc_tx_buf_dma dmap 0x%016llx num chunks %d",
2638 		*dmap, i));
2639 	goto nxge_alloc_tx_mem_exit;
2640 
2641 nxge_alloc_tx_mem_fail1:
2642 	KMEM_FREE(tx_dmap, sizeof (nxge_dma_common_t) * NXGE_DMA_BLOCK);
2643 
2644 nxge_alloc_tx_mem_exit:
2645 	NXGE_DEBUG_MSG((nxgep, DMA_CTL,
2646 		"<== nxge_alloc_tx_buf_dma status 0x%08x", status));
2647 
2648 	return (status);
2649 }
2650 
2651 /*ARGSUSED*/
2652 static void
2653 nxge_free_tx_buf_dma(p_nxge_t nxgep, p_nxge_dma_common_t dmap,
2654     uint32_t num_chunks)
2655 {
2656 	int		i;
2657 
2658 	NXGE_DEBUG_MSG((nxgep, MEM_CTL, "==> nxge_free_tx_buf_dma"));
2659 
2660 	for (i = 0; i < num_chunks; i++) {
2661 		nxge_dma_mem_free(dmap++);
2662 	}
2663 
2664 	NXGE_DEBUG_MSG((nxgep, MEM_CTL, "<== nxge_free_tx_buf_dma"));
2665 }
2666 
2667 /*ARGSUSED*/
2668 static nxge_status_t
2669 nxge_alloc_tx_cntl_dma(p_nxge_t nxgep, uint16_t dma_channel,
2670     p_nxge_dma_common_t *dmap, size_t size)
2671 {
2672 	p_nxge_dma_common_t 	tx_dmap;
2673 	nxge_status_t		status = NXGE_OK;
2674 
2675 	NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_alloc_tx_cntl_dma"));
2676 	tx_dmap = (p_nxge_dma_common_t)
2677 			KMEM_ZALLOC(sizeof (nxge_dma_common_t), KM_SLEEP);
2678 
2679 	tx_dmap->contig_alloc_type = B_FALSE;
2680 
2681 	status = nxge_dma_mem_alloc(nxgep, nxge_force_dma,
2682 			&nxge_desc_dma_attr,
2683 			size,
2684 			&nxge_dev_desc_dma_acc_attr,
2685 			DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
2686 			tx_dmap);
2687 	if (status != NXGE_OK) {
2688 		goto nxge_alloc_tx_cntl_dma_fail1;
2689 	}
2690 
2691 	*dmap = tx_dmap;
2692 	goto nxge_alloc_tx_cntl_dma_exit;
2693 
2694 nxge_alloc_tx_cntl_dma_fail1:
2695 	KMEM_FREE(tx_dmap, sizeof (nxge_dma_common_t));
2696 
2697 nxge_alloc_tx_cntl_dma_exit:
2698 	NXGE_DEBUG_MSG((nxgep, DMA_CTL,
2699 		"<== nxge_alloc_tx_cntl_dma status 0x%08x", status));
2700 
2701 	return (status);
2702 }
2703 
2704 /*ARGSUSED*/
2705 static void
2706 nxge_free_tx_cntl_dma(p_nxge_t nxgep, p_nxge_dma_common_t dmap)
2707 {
2708 	NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_free_tx_cntl_dma"));
2709 
2710 	nxge_dma_mem_free(dmap);
2711 
2712 	NXGE_DEBUG_MSG((nxgep, DMA_CTL, "<== nxge_free_tx_cntl_dma"));
2713 }
2714 
2715 static void
2716 nxge_free_tx_mem_pool(p_nxge_t nxgep)
2717 {
2718 	uint32_t		i, ndmas;
2719 	p_nxge_dma_pool_t	dma_poolp;
2720 	p_nxge_dma_common_t	*dma_buf_p;
2721 	p_nxge_dma_pool_t	dma_cntl_poolp;
2722 	p_nxge_dma_common_t	*dma_cntl_p;
2723 	uint32_t 		*num_chunks;
2724 
2725 	NXGE_DEBUG_MSG((nxgep, MEM3_CTL, "==> nxge_free_tx_mem_pool"));
2726 
2727 	dma_poolp = nxgep->tx_buf_pool_p;
2728 	if (dma_poolp == NULL || (!dma_poolp->buf_allocated)) {
2729 		NXGE_DEBUG_MSG((nxgep, MEM3_CTL,
2730 			"<== nxge_free_tx_mem_pool "
2731 			"(null rx buf pool or buf not allocated"));
2732 		return;
2733 	}
2734 
2735 	dma_cntl_poolp = nxgep->tx_cntl_pool_p;
2736 	if (dma_cntl_poolp == NULL || (!dma_cntl_poolp->buf_allocated)) {
2737 		NXGE_DEBUG_MSG((nxgep, MEM3_CTL,
2738 			"<== nxge_free_tx_mem_pool "
2739 			"(null tx cntl buf pool or cntl buf not allocated"));
2740 		return;
2741 	}
2742 
2743 	dma_buf_p = dma_poolp->dma_buf_pool_p;
2744 	num_chunks = dma_poolp->num_chunks;
2745 
2746 	dma_cntl_p = dma_cntl_poolp->dma_buf_pool_p;
2747 	ndmas = dma_cntl_poolp->ndmas;
2748 
2749 	for (i = 0; i < ndmas; i++) {
2750 		nxge_free_tx_buf_dma(nxgep, dma_buf_p[i], num_chunks[i]);
2751 	}
2752 
2753 	for (i = 0; i < ndmas; i++) {
2754 		nxge_free_tx_cntl_dma(nxgep, dma_cntl_p[i]);
2755 	}
2756 
2757 	for (i = 0; i < ndmas; i++) {
2758 		KMEM_FREE(dma_buf_p[i],
2759 			sizeof (nxge_dma_common_t) * NXGE_DMA_BLOCK);
2760 		KMEM_FREE(dma_cntl_p[i], sizeof (nxge_dma_common_t));
2761 	}
2762 
2763 	KMEM_FREE(num_chunks, sizeof (uint32_t) * ndmas);
2764 	KMEM_FREE(dma_cntl_p, ndmas * sizeof (p_nxge_dma_common_t));
2765 	KMEM_FREE(dma_cntl_poolp, sizeof (nxge_dma_pool_t));
2766 	KMEM_FREE(dma_buf_p, ndmas * sizeof (p_nxge_dma_common_t));
2767 	KMEM_FREE(dma_poolp, sizeof (nxge_dma_pool_t));
2768 
2769 	nxgep->tx_buf_pool_p = NULL;
2770 	nxgep->tx_cntl_pool_p = NULL;
2771 
2772 	NXGE_DEBUG_MSG((nxgep, MEM3_CTL, "<== nxge_free_tx_mem_pool"));
2773 }
2774 
2775 /*ARGSUSED*/
2776 static nxge_status_t
2777 nxge_dma_mem_alloc(p_nxge_t nxgep, dma_method_t method,
2778 	struct ddi_dma_attr *dma_attrp,
2779 	size_t length, ddi_device_acc_attr_t *acc_attr_p, uint_t xfer_flags,
2780 	p_nxge_dma_common_t dma_p)
2781 {
2782 	caddr_t 		kaddrp;
2783 	int			ddi_status = DDI_SUCCESS;
2784 	boolean_t		contig_alloc_type;
2785 
2786 	contig_alloc_type = dma_p->contig_alloc_type;
2787 
2788 	if (contig_alloc_type && (nxgep->niu_type != N2_NIU)) {
2789 		/*
2790 		 * contig_alloc_type for contiguous memory only allowed
2791 		 * for N2/NIU.
2792 		 */
2793 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
2794 			"nxge_dma_mem_alloc: alloc type not allows (%d)",
2795 			dma_p->contig_alloc_type));
2796 		return (NXGE_ERROR | NXGE_DDI_FAILED);
2797 	}
2798 
2799 	dma_p->dma_handle = NULL;
2800 	dma_p->acc_handle = NULL;
2801 	dma_p->kaddrp = dma_p->last_kaddrp = NULL;
2802 	dma_p->first_ioaddr_pp = dma_p->last_ioaddr_pp = NULL;
2803 	ddi_status = ddi_dma_alloc_handle(nxgep->dip, dma_attrp,
2804 		DDI_DMA_DONTWAIT, NULL, &dma_p->dma_handle);
2805 	if (ddi_status != DDI_SUCCESS) {
2806 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
2807 			"nxge_dma_mem_alloc:ddi_dma_alloc_handle failed."));
2808 		return (NXGE_ERROR | NXGE_DDI_FAILED);
2809 	}
2810 
2811 	switch (contig_alloc_type) {
2812 	case B_FALSE:
2813 		ddi_status = ddi_dma_mem_alloc(dma_p->dma_handle, length,
2814 			acc_attr_p,
2815 			xfer_flags,
2816 			DDI_DMA_DONTWAIT, 0, &kaddrp, &dma_p->alength,
2817 			&dma_p->acc_handle);
2818 		if (ddi_status != DDI_SUCCESS) {
2819 			NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
2820 				"nxge_dma_mem_alloc:ddi_dma_mem_alloc failed"));
2821 			ddi_dma_free_handle(&dma_p->dma_handle);
2822 			dma_p->dma_handle = NULL;
2823 			return (NXGE_ERROR | NXGE_DDI_FAILED);
2824 		}
2825 		if (dma_p->alength < length) {
2826 			NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
2827 				"nxge_dma_mem_alloc:ddi_dma_mem_alloc "
2828 				"< length."));
2829 			ddi_dma_mem_free(&dma_p->acc_handle);
2830 			ddi_dma_free_handle(&dma_p->dma_handle);
2831 			dma_p->acc_handle = NULL;
2832 			dma_p->dma_handle = NULL;
2833 			return (NXGE_ERROR);
2834 		}
2835 
2836 		ddi_status = ddi_dma_addr_bind_handle(dma_p->dma_handle, NULL,
2837 			kaddrp, dma_p->alength, xfer_flags, DDI_DMA_DONTWAIT, 0,
2838 			&dma_p->dma_cookie, &dma_p->ncookies);
2839 		if (ddi_status != DDI_DMA_MAPPED) {
2840 			NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
2841 				"nxge_dma_mem_alloc:di_dma_addr_bind failed "
2842 				"(staus 0x%x ncookies %d.)", ddi_status,
2843 				dma_p->ncookies));
2844 			if (dma_p->acc_handle) {
2845 				ddi_dma_mem_free(&dma_p->acc_handle);
2846 				dma_p->acc_handle = NULL;
2847 			}
2848 			ddi_dma_free_handle(&dma_p->dma_handle);
2849 			dma_p->dma_handle = NULL;
2850 			return (NXGE_ERROR | NXGE_DDI_FAILED);
2851 		}
2852 
2853 		if (dma_p->ncookies != 1) {
2854 			NXGE_DEBUG_MSG((nxgep, DMA_CTL,
2855 				"nxge_dma_mem_alloc:ddi_dma_addr_bind "
2856 				"> 1 cookie"
2857 				"(staus 0x%x ncookies %d.)", ddi_status,
2858 				dma_p->ncookies));
2859 			if (dma_p->acc_handle) {
2860 				ddi_dma_mem_free(&dma_p->acc_handle);
2861 				dma_p->acc_handle = NULL;
2862 			}
2863 			(void) ddi_dma_unbind_handle(dma_p->dma_handle);
2864 			ddi_dma_free_handle(&dma_p->dma_handle);
2865 			dma_p->dma_handle = NULL;
2866 			return (NXGE_ERROR);
2867 		}
2868 		break;
2869 
2870 #if	defined(sun4v) && defined(NIU_LP_WORKAROUND)
2871 	case B_TRUE:
2872 		kaddrp = (caddr_t)contig_mem_alloc(length);
2873 		if (kaddrp == NULL) {
2874 			NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
2875 				"nxge_dma_mem_alloc:contig_mem_alloc failed."));
2876 			ddi_dma_free_handle(&dma_p->dma_handle);
2877 			return (NXGE_ERROR | NXGE_DDI_FAILED);
2878 		}
2879 
2880 		dma_p->alength = length;
2881 		ddi_status = ddi_dma_addr_bind_handle(dma_p->dma_handle, NULL,
2882 			kaddrp, dma_p->alength, xfer_flags, DDI_DMA_DONTWAIT, 0,
2883 			&dma_p->dma_cookie, &dma_p->ncookies);
2884 		if (ddi_status != DDI_DMA_MAPPED) {
2885 			NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
2886 				"nxge_dma_mem_alloc:di_dma_addr_bind failed "
2887 				"(status 0x%x ncookies %d.)", ddi_status,
2888 				dma_p->ncookies));
2889 
2890 			NXGE_DEBUG_MSG((nxgep, DMA_CTL,
2891 				"==> nxge_dma_mem_alloc: (not mapped)"
2892 				"length %lu (0x%x) "
2893 				"free contig kaddrp $%p "
2894 				"va_to_pa $%p",
2895 				length, length,
2896 				kaddrp,
2897 				va_to_pa(kaddrp)));
2898 
2899 
2900 			contig_mem_free((void *)kaddrp, length);
2901 			ddi_dma_free_handle(&dma_p->dma_handle);
2902 
2903 			dma_p->dma_handle = NULL;
2904 			dma_p->acc_handle = NULL;
2905 			dma_p->alength = NULL;
2906 			dma_p->kaddrp = NULL;
2907 
2908 			return (NXGE_ERROR | NXGE_DDI_FAILED);
2909 		}
2910 
2911 		if (dma_p->ncookies != 1 ||
2912 			(dma_p->dma_cookie.dmac_laddress == NULL)) {
2913 			NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
2914 				"nxge_dma_mem_alloc:di_dma_addr_bind > 1 "
2915 				"cookie or "
2916 				"dmac_laddress is NULL $%p size %d "
2917 				" (status 0x%x ncookies %d.)",
2918 				ddi_status,
2919 				dma_p->dma_cookie.dmac_laddress,
2920 				dma_p->dma_cookie.dmac_size,
2921 				dma_p->ncookies));
2922 
2923 			contig_mem_free((void *)kaddrp, length);
2924 			(void) ddi_dma_unbind_handle(dma_p->dma_handle);
2925 			ddi_dma_free_handle(&dma_p->dma_handle);
2926 
2927 			dma_p->alength = 0;
2928 			dma_p->dma_handle = NULL;
2929 			dma_p->acc_handle = NULL;
2930 			dma_p->kaddrp = NULL;
2931 
2932 			return (NXGE_ERROR | NXGE_DDI_FAILED);
2933 		}
2934 		break;
2935 
2936 #else
2937 	case B_TRUE:
2938 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
2939 			"nxge_dma_mem_alloc: invalid alloc type for !sun4v"));
2940 		return (NXGE_ERROR | NXGE_DDI_FAILED);
2941 #endif
2942 	}
2943 
2944 	dma_p->kaddrp = kaddrp;
2945 	dma_p->last_kaddrp = (unsigned char *)kaddrp +
2946 			dma_p->alength - RXBUF_64B_ALIGNED;
2947 #if defined(__i386)
2948 	dma_p->ioaddr_pp =
2949 		(unsigned char *)(uint32_t)dma_p->dma_cookie.dmac_laddress;
2950 #else
2951 	dma_p->ioaddr_pp = (unsigned char *)dma_p->dma_cookie.dmac_laddress;
2952 #endif
2953 	dma_p->last_ioaddr_pp =
2954 #if defined(__i386)
2955 		(unsigned char *)(uint32_t)dma_p->dma_cookie.dmac_laddress +
2956 #else
2957 		(unsigned char *)dma_p->dma_cookie.dmac_laddress +
2958 #endif
2959 				dma_p->alength - RXBUF_64B_ALIGNED;
2960 
2961 	NPI_DMA_ACC_HANDLE_SET(dma_p, dma_p->acc_handle);
2962 
2963 #if	defined(sun4v) && defined(NIU_LP_WORKAROUND)
2964 	dma_p->orig_ioaddr_pp =
2965 		(unsigned char *)dma_p->dma_cookie.dmac_laddress;
2966 	dma_p->orig_alength = length;
2967 	dma_p->orig_kaddrp = kaddrp;
2968 	dma_p->orig_vatopa = (uint64_t)va_to_pa(kaddrp);
2969 #endif
2970 
2971 	NXGE_DEBUG_MSG((nxgep, DMA_CTL, "<== nxge_dma_mem_alloc: "
2972 		"dma buffer allocated: dma_p $%p "
2973 		"return dmac_ladress from cookie $%p cookie dmac_size %d "
2974 		"dma_p->ioaddr_p $%p "
2975 		"dma_p->orig_ioaddr_p $%p "
2976 		"orig_vatopa $%p "
2977 		"alength %d (0x%x) "
2978 		"kaddrp $%p "
2979 		"length %d (0x%x)",
2980 		dma_p,
2981 		dma_p->dma_cookie.dmac_laddress, dma_p->dma_cookie.dmac_size,
2982 		dma_p->ioaddr_pp,
2983 		dma_p->orig_ioaddr_pp,
2984 		dma_p->orig_vatopa,
2985 		dma_p->alength, dma_p->alength,
2986 		kaddrp,
2987 		length, length));
2988 
2989 	return (NXGE_OK);
2990 }
2991 
2992 static void
2993 nxge_dma_mem_free(p_nxge_dma_common_t dma_p)
2994 {
2995 	if (dma_p->dma_handle != NULL) {
2996 		if (dma_p->ncookies) {
2997 			(void) ddi_dma_unbind_handle(dma_p->dma_handle);
2998 			dma_p->ncookies = 0;
2999 		}
3000 		ddi_dma_free_handle(&dma_p->dma_handle);
3001 		dma_p->dma_handle = NULL;
3002 	}
3003 
3004 	if (dma_p->acc_handle != NULL) {
3005 		ddi_dma_mem_free(&dma_p->acc_handle);
3006 		dma_p->acc_handle = NULL;
3007 		NPI_DMA_ACC_HANDLE_SET(dma_p, NULL);
3008 	}
3009 
3010 #if	defined(sun4v) && defined(NIU_LP_WORKAROUND)
3011 	if (dma_p->contig_alloc_type &&
3012 			dma_p->orig_kaddrp && dma_p->orig_alength) {
3013 		NXGE_DEBUG_MSG((NULL, DMA_CTL, "nxge_dma_mem_free: "
3014 			"kaddrp $%p (orig_kaddrp $%p)"
3015 			"mem type %d ",
3016 			"orig_alength %d "
3017 			"alength 0x%x (%d)",
3018 			dma_p->kaddrp,
3019 			dma_p->orig_kaddrp,
3020 			dma_p->contig_alloc_type,
3021 			dma_p->orig_alength,
3022 			dma_p->alength, dma_p->alength));
3023 
3024 		contig_mem_free(dma_p->orig_kaddrp, dma_p->orig_alength);
3025 		dma_p->orig_alength = NULL;
3026 		dma_p->orig_kaddrp = NULL;
3027 		dma_p->contig_alloc_type = B_FALSE;
3028 	}
3029 #endif
3030 	dma_p->kaddrp = NULL;
3031 	dma_p->alength = NULL;
3032 }
3033 
3034 /*
3035  *	nxge_m_start() -- start transmitting and receiving.
3036  *
3037  *	This function is called by the MAC layer when the first
3038  *	stream is open to prepare the hardware ready for sending
3039  *	and transmitting packets.
3040  */
3041 static int
3042 nxge_m_start(void *arg)
3043 {
3044 	p_nxge_t 	nxgep = (p_nxge_t)arg;
3045 
3046 	NXGE_DEBUG_MSG((nxgep, NXGE_CTL, "==> nxge_m_start"));
3047 
3048 	MUTEX_ENTER(nxgep->genlock);
3049 	if (nxge_init(nxgep) != NXGE_OK) {
3050 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
3051 			"<== nxge_m_start: initialization failed"));
3052 		MUTEX_EXIT(nxgep->genlock);
3053 		return (EIO);
3054 	}
3055 
3056 	if (nxgep->nxge_mac_state == NXGE_MAC_STARTED)
3057 		goto nxge_m_start_exit;
3058 	/*
3059 	 * Start timer to check the system error and tx hangs
3060 	 */
3061 	nxgep->nxge_timerid = nxge_start_timer(nxgep, nxge_check_hw_state,
3062 		NXGE_CHECK_TIMER);
3063 
3064 	nxgep->link_notify = B_TRUE;
3065 
3066 	nxgep->nxge_mac_state = NXGE_MAC_STARTED;
3067 
3068 nxge_m_start_exit:
3069 	MUTEX_EXIT(nxgep->genlock);
3070 	NXGE_DEBUG_MSG((nxgep, NXGE_CTL, "<== nxge_m_start"));
3071 	return (0);
3072 }
3073 
3074 /*
3075  *	nxge_m_stop(): stop transmitting and receiving.
3076  */
3077 static void
3078 nxge_m_stop(void *arg)
3079 {
3080 	p_nxge_t 	nxgep = (p_nxge_t)arg;
3081 
3082 	NXGE_DEBUG_MSG((nxgep, NXGE_CTL, "==> nxge_m_stop"));
3083 
3084 	if (nxgep->nxge_timerid) {
3085 		nxge_stop_timer(nxgep, nxgep->nxge_timerid);
3086 		nxgep->nxge_timerid = 0;
3087 	}
3088 
3089 	MUTEX_ENTER(nxgep->genlock);
3090 	nxge_uninit(nxgep);
3091 
3092 	nxgep->nxge_mac_state = NXGE_MAC_STOPPED;
3093 
3094 	MUTEX_EXIT(nxgep->genlock);
3095 
3096 	NXGE_DEBUG_MSG((nxgep, NXGE_CTL, "<== nxge_m_stop"));
3097 }
3098 
3099 static int
3100 nxge_m_unicst(void *arg, const uint8_t *macaddr)
3101 {
3102 	p_nxge_t 	nxgep = (p_nxge_t)arg;
3103 	struct 		ether_addr addrp;
3104 
3105 	NXGE_DEBUG_MSG((nxgep, MAC_CTL, "==> nxge_m_unicst"));
3106 
3107 	bcopy(macaddr, (uint8_t *)&addrp, ETHERADDRL);
3108 	if (nxge_set_mac_addr(nxgep, &addrp)) {
3109 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
3110 			"<== nxge_m_unicst: set unitcast failed"));
3111 		return (EINVAL);
3112 	}
3113 
3114 	NXGE_DEBUG_MSG((nxgep, MAC_CTL, "<== nxge_m_unicst"));
3115 
3116 	return (0);
3117 }
3118 
3119 static int
3120 nxge_m_multicst(void *arg, boolean_t add, const uint8_t *mca)
3121 {
3122 	p_nxge_t 	nxgep = (p_nxge_t)arg;
3123 	struct 		ether_addr addrp;
3124 
3125 	NXGE_DEBUG_MSG((nxgep, MAC_CTL,
3126 		"==> nxge_m_multicst: add %d", add));
3127 
3128 	bcopy(mca, (uint8_t *)&addrp, ETHERADDRL);
3129 	if (add) {
3130 		if (nxge_add_mcast_addr(nxgep, &addrp)) {
3131 			NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
3132 				"<== nxge_m_multicst: add multicast failed"));
3133 			return (EINVAL);
3134 		}
3135 	} else {
3136 		if (nxge_del_mcast_addr(nxgep, &addrp)) {
3137 			NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
3138 				"<== nxge_m_multicst: del multicast failed"));
3139 			return (EINVAL);
3140 		}
3141 	}
3142 
3143 	NXGE_DEBUG_MSG((nxgep, MAC_CTL, "<== nxge_m_multicst"));
3144 
3145 	return (0);
3146 }
3147 
3148 static int
3149 nxge_m_promisc(void *arg, boolean_t on)
3150 {
3151 	p_nxge_t 	nxgep = (p_nxge_t)arg;
3152 
3153 	NXGE_DEBUG_MSG((nxgep, MAC_CTL,
3154 		"==> nxge_m_promisc: on %d", on));
3155 
3156 	if (nxge_set_promisc(nxgep, on)) {
3157 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
3158 			"<== nxge_m_promisc: set promisc failed"));
3159 		return (EINVAL);
3160 	}
3161 
3162 	NXGE_DEBUG_MSG((nxgep, MAC_CTL,
3163 		"<== nxge_m_promisc: on %d", on));
3164 
3165 	return (0);
3166 }
3167 
3168 static void
3169 nxge_m_ioctl(void *arg,  queue_t *wq, mblk_t *mp)
3170 {
3171 	p_nxge_t 	nxgep = (p_nxge_t)arg;
3172 	struct 		iocblk *iocp;
3173 	boolean_t 	need_privilege;
3174 	int 		err;
3175 	int 		cmd;
3176 
3177 	NXGE_DEBUG_MSG((nxgep, NXGE_CTL, "==> nxge_m_ioctl"));
3178 
3179 	iocp = (struct iocblk *)mp->b_rptr;
3180 	iocp->ioc_error = 0;
3181 	need_privilege = B_TRUE;
3182 	cmd = iocp->ioc_cmd;
3183 	NXGE_DEBUG_MSG((nxgep, NXGE_CTL, "==> nxge_m_ioctl: cmd 0x%08x", cmd));
3184 	switch (cmd) {
3185 	default:
3186 		miocnak(wq, mp, 0, EINVAL);
3187 		NXGE_DEBUG_MSG((nxgep, NXGE_CTL, "<== nxge_m_ioctl: invalid"));
3188 		return;
3189 
3190 	case LB_GET_INFO_SIZE:
3191 	case LB_GET_INFO:
3192 	case LB_GET_MODE:
3193 		need_privilege = B_FALSE;
3194 		break;
3195 	case LB_SET_MODE:
3196 		break;
3197 
3198 	case ND_GET:
3199 		need_privilege = B_FALSE;
3200 		break;
3201 	case ND_SET:
3202 		break;
3203 
3204 	case NXGE_GET_MII:
3205 	case NXGE_PUT_MII:
3206 	case NXGE_GET64:
3207 	case NXGE_PUT64:
3208 	case NXGE_GET_TX_RING_SZ:
3209 	case NXGE_GET_TX_DESC:
3210 	case NXGE_TX_SIDE_RESET:
3211 	case NXGE_RX_SIDE_RESET:
3212 	case NXGE_GLOBAL_RESET:
3213 	case NXGE_RESET_MAC:
3214 	case NXGE_TX_REGS_DUMP:
3215 	case NXGE_RX_REGS_DUMP:
3216 	case NXGE_INT_REGS_DUMP:
3217 	case NXGE_VIR_INT_REGS_DUMP:
3218 	case NXGE_PUT_TCAM:
3219 	case NXGE_GET_TCAM:
3220 	case NXGE_RTRACE:
3221 	case NXGE_RDUMP:
3222 
3223 		need_privilege = B_FALSE;
3224 		break;
3225 	case NXGE_INJECT_ERR:
3226 		cmn_err(CE_NOTE, "!nxge_m_ioctl: Inject error\n");
3227 		nxge_err_inject(nxgep, wq, mp);
3228 		break;
3229 	}
3230 
3231 	if (need_privilege) {
3232 		err = secpolicy_net_config(iocp->ioc_cr, B_FALSE);
3233 		if (err != 0) {
3234 			miocnak(wq, mp, 0, err);
3235 			NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
3236 				"<== nxge_m_ioctl: no priv"));
3237 			return;
3238 		}
3239 	}
3240 
3241 	switch (cmd) {
3242 	case ND_GET:
3243 		NXGE_DEBUG_MSG((nxgep, NXGE_CTL, "ND_GET command"));
3244 	case ND_SET:
3245 		NXGE_DEBUG_MSG((nxgep, NXGE_CTL, "ND_SET command"));
3246 		nxge_param_ioctl(nxgep, wq, mp, iocp);
3247 		break;
3248 
3249 	case LB_GET_MODE:
3250 	case LB_SET_MODE:
3251 	case LB_GET_INFO_SIZE:
3252 	case LB_GET_INFO:
3253 		nxge_loopback_ioctl(nxgep, wq, mp, iocp);
3254 		break;
3255 
3256 	case NXGE_GET_MII:
3257 	case NXGE_PUT_MII:
3258 	case NXGE_PUT_TCAM:
3259 	case NXGE_GET_TCAM:
3260 	case NXGE_GET64:
3261 	case NXGE_PUT64:
3262 	case NXGE_GET_TX_RING_SZ:
3263 	case NXGE_GET_TX_DESC:
3264 	case NXGE_TX_SIDE_RESET:
3265 	case NXGE_RX_SIDE_RESET:
3266 	case NXGE_GLOBAL_RESET:
3267 	case NXGE_RESET_MAC:
3268 	case NXGE_TX_REGS_DUMP:
3269 	case NXGE_RX_REGS_DUMP:
3270 	case NXGE_INT_REGS_DUMP:
3271 	case NXGE_VIR_INT_REGS_DUMP:
3272 		NXGE_DEBUG_MSG((nxgep, NXGE_CTL,
3273 			"==> nxge_m_ioctl: cmd 0x%x", cmd));
3274 		nxge_hw_ioctl(nxgep, wq, mp, iocp);
3275 		break;
3276 	}
3277 
3278 	NXGE_DEBUG_MSG((nxgep, NXGE_CTL, "<== nxge_m_ioctl"));
3279 }
3280 
3281 extern void nxge_rx_hw_blank(void *arg, time_t ticks, uint_t count);
3282 
3283 static void
3284 nxge_m_resources(void *arg)
3285 {
3286 	p_nxge_t		nxgep = arg;
3287 	mac_rx_fifo_t 		mrf;
3288 	p_rx_rcr_rings_t	rcr_rings;
3289 	p_rx_rcr_ring_t		*rcr_p;
3290 	uint32_t		i, ndmas;
3291 	nxge_status_t		status;
3292 
3293 	NXGE_DEBUG_MSG((nxgep, RX_CTL, "==> nxge_m_resources"));
3294 
3295 	MUTEX_ENTER(nxgep->genlock);
3296 
3297 	/*
3298 	 * CR 6492541 Check to see if the drv_state has been initialized,
3299 	 * if not * call nxge_init().
3300 	 */
3301 	if (!(nxgep->drv_state & STATE_HW_INITIALIZED)) {
3302 		status = nxge_init(nxgep);
3303 		if (status != NXGE_OK)
3304 			goto nxge_m_resources_exit;
3305 	}
3306 
3307 	mrf.mrf_type = MAC_RX_FIFO;
3308 	mrf.mrf_blank = nxge_rx_hw_blank;
3309 	mrf.mrf_arg = (void *)nxgep;
3310 
3311 	mrf.mrf_normal_blank_time = 128;
3312 	mrf.mrf_normal_pkt_count = 8;
3313 	rcr_rings = nxgep->rx_rcr_rings;
3314 	rcr_p = rcr_rings->rcr_rings;
3315 	ndmas = rcr_rings->ndmas;
3316 
3317 	/*
3318 	 * Export our receive resources to the MAC layer.
3319 	 */
3320 	for (i = 0; i < ndmas; i++) {
3321 		((p_rx_rcr_ring_t)rcr_p[i])->rcr_mac_handle =
3322 				mac_resource_add(nxgep->mach,
3323 				    (mac_resource_t *)&mrf);
3324 
3325 		NXGE_DEBUG_MSG((nxgep, NXGE_CTL,
3326 			"==> nxge_m_resources: vdma %d dma %d "
3327 			"rcrptr 0x%016llx mac_handle 0x%016llx",
3328 			i, ((p_rx_rcr_ring_t)rcr_p[i])->rdc,
3329 			rcr_p[i],
3330 			((p_rx_rcr_ring_t)rcr_p[i])->rcr_mac_handle));
3331 	}
3332 
3333 nxge_m_resources_exit:
3334 	MUTEX_EXIT(nxgep->genlock);
3335 	NXGE_DEBUG_MSG((nxgep, RX_CTL, "<== nxge_m_resources"));
3336 }
3337 
3338 static void
3339 nxge_mmac_kstat_update(p_nxge_t nxgep, mac_addr_slot_t slot, boolean_t factory)
3340 {
3341 	p_nxge_mmac_stats_t mmac_stats;
3342 	int i;
3343 	nxge_mmac_t *mmac_info;
3344 
3345 	mmac_info = &nxgep->nxge_mmac_info;
3346 
3347 	mmac_stats = &nxgep->statsp->mmac_stats;
3348 	mmac_stats->mmac_max_cnt = mmac_info->num_mmac;
3349 	mmac_stats->mmac_avail_cnt = mmac_info->naddrfree;
3350 
3351 	for (i = 0; i < ETHERADDRL; i++) {
3352 		if (factory) {
3353 			mmac_stats->mmac_avail_pool[slot-1].ether_addr_octet[i]
3354 			= mmac_info->factory_mac_pool[slot][(ETHERADDRL-1) - i];
3355 		} else {
3356 			mmac_stats->mmac_avail_pool[slot-1].ether_addr_octet[i]
3357 			= mmac_info->mac_pool[slot].addr[(ETHERADDRL - 1) - i];
3358 		}
3359 	}
3360 }
3361 
3362 /*
3363  * nxge_altmac_set() -- Set an alternate MAC address
3364  */
3365 static int
3366 nxge_altmac_set(p_nxge_t nxgep, uint8_t *maddr, mac_addr_slot_t slot)
3367 {
3368 	uint8_t addrn;
3369 	uint8_t portn;
3370 	npi_mac_addr_t altmac;
3371 	hostinfo_t mac_rdc;
3372 	p_nxge_class_pt_cfg_t clscfgp;
3373 
3374 	altmac.w2 = ((uint16_t)maddr[0] << 8) | ((uint16_t)maddr[1] & 0x0ff);
3375 	altmac.w1 = ((uint16_t)maddr[2] << 8) | ((uint16_t)maddr[3] & 0x0ff);
3376 	altmac.w0 = ((uint16_t)maddr[4] << 8) | ((uint16_t)maddr[5] & 0x0ff);
3377 
3378 	portn = nxgep->mac.portnum;
3379 	addrn = (uint8_t)slot - 1;
3380 
3381 	if (npi_mac_altaddr_entry(nxgep->npi_handle, OP_SET, portn,
3382 		addrn, &altmac) != NPI_SUCCESS)
3383 		return (EIO);
3384 
3385 	/*
3386 	 * Set the rdc table number for the host info entry
3387 	 * for this mac address slot.
3388 	 */
3389 	clscfgp = (p_nxge_class_pt_cfg_t)&nxgep->class_config;
3390 	mac_rdc.value = 0;
3391 	mac_rdc.bits.w0.rdc_tbl_num = clscfgp->mac_host_info[addrn].rdctbl;
3392 	mac_rdc.bits.w0.mac_pref = clscfgp->mac_host_info[addrn].mpr_npr;
3393 
3394 	if (npi_mac_hostinfo_entry(nxgep->npi_handle, OP_SET,
3395 	    nxgep->function_num, addrn, &mac_rdc) != NPI_SUCCESS) {
3396 		return (EIO);
3397 	}
3398 
3399 	/*
3400 	 * Enable comparison with the alternate MAC address.
3401 	 * While the first alternate addr is enabled by bit 1 of register
3402 	 * BMAC_ALTAD_CMPEN, it is enabled by bit 0 of register
3403 	 * XMAC_ADDR_CMPEN, so slot needs to be converted to addrn
3404 	 * accordingly before calling npi_mac_altaddr_entry.
3405 	 */
3406 	if (portn == XMAC_PORT_0 || portn == XMAC_PORT_1)
3407 		addrn = (uint8_t)slot - 1;
3408 	else
3409 		addrn = (uint8_t)slot;
3410 
3411 	if (npi_mac_altaddr_enable(nxgep->npi_handle, portn, addrn)
3412 		!= NPI_SUCCESS)
3413 		return (EIO);
3414 
3415 	return (0);
3416 }
3417 
3418 /*
3419  * nxeg_m_mmac_add() - find an unused address slot, set the address
3420  * value to the one specified, enable the port to start filtering on
3421  * the new MAC address.  Returns 0 on success.
3422  */
3423 static int
3424 nxge_m_mmac_add(void *arg, mac_multi_addr_t *maddr)
3425 {
3426 	p_nxge_t nxgep = arg;
3427 	mac_addr_slot_t slot;
3428 	nxge_mmac_t *mmac_info;
3429 	int err;
3430 	nxge_status_t status;
3431 
3432 	mutex_enter(nxgep->genlock);
3433 
3434 	/*
3435 	 * Make sure that nxge is initialized, if _start() has
3436 	 * not been called.
3437 	 */
3438 	if (!(nxgep->drv_state & STATE_HW_INITIALIZED)) {
3439 		status = nxge_init(nxgep);
3440 		if (status != NXGE_OK) {
3441 			mutex_exit(nxgep->genlock);
3442 			return (ENXIO);
3443 		}
3444 	}
3445 
3446 	mmac_info = &nxgep->nxge_mmac_info;
3447 	if (mmac_info->naddrfree == 0) {
3448 		mutex_exit(nxgep->genlock);
3449 		return (ENOSPC);
3450 	}
3451 	if (!mac_unicst_verify(nxgep->mach, maddr->mma_addr,
3452 		maddr->mma_addrlen)) {
3453 		mutex_exit(nxgep->genlock);
3454 		return (EINVAL);
3455 	}
3456 	/*
3457 	 * 	Search for the first available slot. Because naddrfree
3458 	 * is not zero, we are guaranteed to find one.
3459 	 * 	Slot 0 is for unique (primary) MAC. The first alternate
3460 	 * MAC slot is slot 1.
3461 	 *	Each of the first two ports of Neptune has 16 alternate
3462 	 * MAC slots but only the first 7 (or 15) slots have assigned factory
3463 	 * MAC addresses. We first search among the slots without bundled
3464 	 * factory MACs. If we fail to find one in that range, then we
3465 	 * search the slots with bundled factory MACs.  A factory MAC
3466 	 * will be wasted while the slot is used with a user MAC address.
3467 	 * But the slot could be used by factory MAC again after calling
3468 	 * nxge_m_mmac_remove and nxge_m_mmac_reserve.
3469 	 */
3470 	if (mmac_info->num_factory_mmac < mmac_info->num_mmac) {
3471 		for (slot = mmac_info->num_factory_mmac + 1;
3472 			slot <= mmac_info->num_mmac; slot++) {
3473 			if (!(mmac_info->mac_pool[slot].flags & MMAC_SLOT_USED))
3474 				break;
3475 		}
3476 		if (slot > mmac_info->num_mmac) {
3477 			for (slot = 1; slot <= mmac_info->num_factory_mmac;
3478 				slot++) {
3479 				if (!(mmac_info->mac_pool[slot].flags
3480 					& MMAC_SLOT_USED))
3481 					break;
3482 			}
3483 		}
3484 	} else {
3485 		for (slot = 1; slot <= mmac_info->num_mmac; slot++) {
3486 			if (!(mmac_info->mac_pool[slot].flags & MMAC_SLOT_USED))
3487 				break;
3488 		}
3489 	}
3490 	ASSERT(slot <= mmac_info->num_mmac);
3491 	if ((err = nxge_altmac_set(nxgep, maddr->mma_addr, slot)) != 0) {
3492 		mutex_exit(nxgep->genlock);
3493 		return (err);
3494 	}
3495 	bcopy(maddr->mma_addr, mmac_info->mac_pool[slot].addr, ETHERADDRL);
3496 	mmac_info->mac_pool[slot].flags |= MMAC_SLOT_USED;
3497 	mmac_info->mac_pool[slot].flags &= ~MMAC_VENDOR_ADDR;
3498 	mmac_info->naddrfree--;
3499 	nxge_mmac_kstat_update(nxgep, slot, B_FALSE);
3500 
3501 	maddr->mma_slot = slot;
3502 
3503 	mutex_exit(nxgep->genlock);
3504 	return (0);
3505 }
3506 
3507 /*
3508  * This function reserves an unused slot and programs the slot and the HW
3509  * with a factory mac address.
3510  */
3511 static int
3512 nxge_m_mmac_reserve(void *arg, mac_multi_addr_t *maddr)
3513 {
3514 	p_nxge_t nxgep = arg;
3515 	mac_addr_slot_t slot;
3516 	nxge_mmac_t *mmac_info;
3517 	int err;
3518 	nxge_status_t status;
3519 
3520 	mutex_enter(nxgep->genlock);
3521 
3522 	/*
3523 	 * Make sure that nxge is initialized, if _start() has
3524 	 * not been called.
3525 	 */
3526 	if (!(nxgep->drv_state & STATE_HW_INITIALIZED)) {
3527 		status = nxge_init(nxgep);
3528 		if (status != NXGE_OK) {
3529 			mutex_exit(nxgep->genlock);
3530 			return (ENXIO);
3531 		}
3532 	}
3533 
3534 	mmac_info = &nxgep->nxge_mmac_info;
3535 	if (mmac_info->naddrfree == 0) {
3536 		mutex_exit(nxgep->genlock);
3537 		return (ENOSPC);
3538 	}
3539 
3540 	slot = maddr->mma_slot;
3541 	if (slot == -1) {  /* -1: Take the first available slot */
3542 		for (slot = 1; slot <= mmac_info->num_factory_mmac; slot++) {
3543 			if (!(mmac_info->mac_pool[slot].flags & MMAC_SLOT_USED))
3544 				break;
3545 		}
3546 		if (slot > mmac_info->num_factory_mmac) {
3547 			mutex_exit(nxgep->genlock);
3548 			return (ENOSPC);
3549 		}
3550 	}
3551 	if (slot < 1 || slot > mmac_info->num_factory_mmac) {
3552 		/*
3553 		 * Do not support factory MAC at a slot greater than
3554 		 * num_factory_mmac even when there are available factory
3555 		 * MAC addresses because the alternate MACs are bundled with
3556 		 * slot[1] through slot[num_factory_mmac]
3557 		 */
3558 		mutex_exit(nxgep->genlock);
3559 		return (EINVAL);
3560 	}
3561 	if (mmac_info->mac_pool[slot].flags & MMAC_SLOT_USED) {
3562 		mutex_exit(nxgep->genlock);
3563 		return (EBUSY);
3564 	}
3565 	/* Verify the address to be reserved */
3566 	if (!mac_unicst_verify(nxgep->mach,
3567 		mmac_info->factory_mac_pool[slot], ETHERADDRL)) {
3568 		mutex_exit(nxgep->genlock);
3569 		return (EINVAL);
3570 	}
3571 	if (err = nxge_altmac_set(nxgep,
3572 		mmac_info->factory_mac_pool[slot], slot)) {
3573 		mutex_exit(nxgep->genlock);
3574 		return (err);
3575 	}
3576 	bcopy(mmac_info->factory_mac_pool[slot], maddr->mma_addr, ETHERADDRL);
3577 	mmac_info->mac_pool[slot].flags |= MMAC_SLOT_USED | MMAC_VENDOR_ADDR;
3578 	mmac_info->naddrfree--;
3579 
3580 	nxge_mmac_kstat_update(nxgep, slot, B_TRUE);
3581 	mutex_exit(nxgep->genlock);
3582 
3583 	/* Pass info back to the caller */
3584 	maddr->mma_slot = slot;
3585 	maddr->mma_addrlen = ETHERADDRL;
3586 	maddr->mma_flags = MMAC_SLOT_USED | MMAC_VENDOR_ADDR;
3587 
3588 	return (0);
3589 }
3590 
3591 /*
3592  * Remove the specified mac address and update the HW not to filter
3593  * the mac address anymore.
3594  */
3595 static int
3596 nxge_m_mmac_remove(void *arg, mac_addr_slot_t slot)
3597 {
3598 	p_nxge_t nxgep = arg;
3599 	nxge_mmac_t *mmac_info;
3600 	uint8_t addrn;
3601 	uint8_t portn;
3602 	int err = 0;
3603 	nxge_status_t status;
3604 
3605 	mutex_enter(nxgep->genlock);
3606 
3607 	/*
3608 	 * Make sure that nxge is initialized, if _start() has
3609 	 * not been called.
3610 	 */
3611 	if (!(nxgep->drv_state & STATE_HW_INITIALIZED)) {
3612 		status = nxge_init(nxgep);
3613 		if (status != NXGE_OK) {
3614 			mutex_exit(nxgep->genlock);
3615 			return (ENXIO);
3616 		}
3617 	}
3618 
3619 	mmac_info = &nxgep->nxge_mmac_info;
3620 	if (slot < 1 || slot > mmac_info->num_mmac) {
3621 		mutex_exit(nxgep->genlock);
3622 		return (EINVAL);
3623 	}
3624 
3625 	portn = nxgep->mac.portnum;
3626 	if (portn == XMAC_PORT_0 || portn == XMAC_PORT_1)
3627 		addrn = (uint8_t)slot - 1;
3628 	else
3629 		addrn = (uint8_t)slot;
3630 
3631 	if (mmac_info->mac_pool[slot].flags & MMAC_SLOT_USED) {
3632 		if (npi_mac_altaddr_disable(nxgep->npi_handle, portn, addrn)
3633 				== NPI_SUCCESS) {
3634 			mmac_info->naddrfree++;
3635 			mmac_info->mac_pool[slot].flags &= ~MMAC_SLOT_USED;
3636 			/*
3637 			 * Regardless if the MAC we just stopped filtering
3638 			 * is a user addr or a facory addr, we must set
3639 			 * the MMAC_VENDOR_ADDR flag if this slot has an
3640 			 * associated factory MAC to indicate that a factory
3641 			 * MAC is available.
3642 			 */
3643 			if (slot <= mmac_info->num_factory_mmac) {
3644 				mmac_info->mac_pool[slot].flags
3645 					|= MMAC_VENDOR_ADDR;
3646 			}
3647 			/*
3648 			 * Clear mac_pool[slot].addr so that kstat shows 0
3649 			 * alternate MAC address if the slot is not used.
3650 			 * (But nxge_m_mmac_get returns the factory MAC even
3651 			 * when the slot is not used!)
3652 			 */
3653 			bzero(mmac_info->mac_pool[slot].addr, ETHERADDRL);
3654 			nxge_mmac_kstat_update(nxgep, slot, B_FALSE);
3655 		} else {
3656 			err = EIO;
3657 		}
3658 	} else {
3659 		err = EINVAL;
3660 	}
3661 
3662 	mutex_exit(nxgep->genlock);
3663 	return (err);
3664 }
3665 
3666 
3667 /*
3668  * Modify a mac address added by nxge_m_mmac_add or nxge_m_mmac_reserve().
3669  */
3670 static int
3671 nxge_m_mmac_modify(void *arg, mac_multi_addr_t *maddr)
3672 {
3673 	p_nxge_t nxgep = arg;
3674 	mac_addr_slot_t slot;
3675 	nxge_mmac_t *mmac_info;
3676 	int err = 0;
3677 	nxge_status_t status;
3678 
3679 	if (!mac_unicst_verify(nxgep->mach, maddr->mma_addr,
3680 			maddr->mma_addrlen))
3681 		return (EINVAL);
3682 
3683 	slot = maddr->mma_slot;
3684 
3685 	mutex_enter(nxgep->genlock);
3686 
3687 	/*
3688 	 * Make sure that nxge is initialized, if _start() has
3689 	 * not been called.
3690 	 */
3691 	if (!(nxgep->drv_state & STATE_HW_INITIALIZED)) {
3692 		status = nxge_init(nxgep);
3693 		if (status != NXGE_OK) {
3694 			mutex_exit(nxgep->genlock);
3695 			return (ENXIO);
3696 		}
3697 	}
3698 
3699 	mmac_info = &nxgep->nxge_mmac_info;
3700 	if (slot < 1 || slot > mmac_info->num_mmac) {
3701 		mutex_exit(nxgep->genlock);
3702 		return (EINVAL);
3703 	}
3704 	if (mmac_info->mac_pool[slot].flags & MMAC_SLOT_USED) {
3705 		if ((err = nxge_altmac_set(nxgep, maddr->mma_addr, slot))
3706 			!= 0) {
3707 			bcopy(maddr->mma_addr, mmac_info->mac_pool[slot].addr,
3708 				ETHERADDRL);
3709 			/*
3710 			 * Assume that the MAC passed down from the caller
3711 			 * is not a factory MAC address (The user should
3712 			 * call mmac_remove followed by mmac_reserve if
3713 			 * he wants to use the factory MAC for this slot).
3714 			 */
3715 			mmac_info->mac_pool[slot].flags &= ~MMAC_VENDOR_ADDR;
3716 			nxge_mmac_kstat_update(nxgep, slot, B_FALSE);
3717 		}
3718 	} else {
3719 		err = EINVAL;
3720 	}
3721 	mutex_exit(nxgep->genlock);
3722 	return (err);
3723 }
3724 
3725 /*
3726  * nxge_m_mmac_get() - Get the MAC address and other information
3727  * related to the slot.  mma_flags should be set to 0 in the call.
3728  * Note: although kstat shows MAC address as zero when a slot is
3729  * not used, Crossbow expects nxge_m_mmac_get to copy factory MAC
3730  * to the caller as long as the slot is not using a user MAC address.
3731  * The following table shows the rules,
3732  *
3733  *				   USED    VENDOR    mma_addr
3734  * ------------------------------------------------------------
3735  * (1) Slot uses a user MAC:        yes      no     user MAC
3736  * (2) Slot uses a factory MAC:     yes      yes    factory MAC
3737  * (3) Slot is not used but is
3738  *     factory MAC capable:         no       yes    factory MAC
3739  * (4) Slot is not used and is
3740  *     not factory MAC capable:     no       no        0
3741  * ------------------------------------------------------------
3742  */
3743 static int
3744 nxge_m_mmac_get(void *arg, mac_multi_addr_t *maddr)
3745 {
3746 	nxge_t *nxgep = arg;
3747 	mac_addr_slot_t slot;
3748 	nxge_mmac_t *mmac_info;
3749 	nxge_status_t status;
3750 
3751 	slot = maddr->mma_slot;
3752 
3753 	mutex_enter(nxgep->genlock);
3754 
3755 	/*
3756 	 * Make sure that nxge is initialized, if _start() has
3757 	 * not been called.
3758 	 */
3759 	if (!(nxgep->drv_state & STATE_HW_INITIALIZED)) {
3760 		status = nxge_init(nxgep);
3761 		if (status != NXGE_OK) {
3762 			mutex_exit(nxgep->genlock);
3763 			return (ENXIO);
3764 		}
3765 	}
3766 
3767 	mmac_info = &nxgep->nxge_mmac_info;
3768 
3769 	if (slot < 1 || slot > mmac_info->num_mmac) {
3770 		mutex_exit(nxgep->genlock);
3771 		return (EINVAL);
3772 	}
3773 	maddr->mma_flags = 0;
3774 	if (mmac_info->mac_pool[slot].flags & MMAC_SLOT_USED)
3775 		maddr->mma_flags |= MMAC_SLOT_USED;
3776 
3777 	if (mmac_info->mac_pool[slot].flags & MMAC_VENDOR_ADDR) {
3778 		maddr->mma_flags |= MMAC_VENDOR_ADDR;
3779 		bcopy(mmac_info->factory_mac_pool[slot],
3780 			maddr->mma_addr, ETHERADDRL);
3781 		maddr->mma_addrlen = ETHERADDRL;
3782 	} else {
3783 		if (maddr->mma_flags & MMAC_SLOT_USED) {
3784 			bcopy(mmac_info->mac_pool[slot].addr,
3785 				maddr->mma_addr, ETHERADDRL);
3786 			maddr->mma_addrlen = ETHERADDRL;
3787 		} else {
3788 			bzero(maddr->mma_addr, ETHERADDRL);
3789 			maddr->mma_addrlen = 0;
3790 		}
3791 	}
3792 	mutex_exit(nxgep->genlock);
3793 	return (0);
3794 }
3795 
3796 
3797 static boolean_t
3798 nxge_m_getcapab(void *arg, mac_capab_t cap, void *cap_data)
3799 {
3800 	nxge_t *nxgep = arg;
3801 	uint32_t *txflags = cap_data;
3802 	multiaddress_capab_t *mmacp = cap_data;
3803 
3804 	switch (cap) {
3805 	case MAC_CAPAB_HCKSUM:
3806 		*txflags = HCKSUM_INET_PARTIAL;
3807 		break;
3808 	case MAC_CAPAB_POLL:
3809 		/*
3810 		 * There's nothing for us to fill in, simply returning
3811 		 * B_TRUE stating that we support polling is sufficient.
3812 		 */
3813 		break;
3814 
3815 	case MAC_CAPAB_MULTIADDRESS:
3816 		mutex_enter(nxgep->genlock);
3817 
3818 		mmacp->maddr_naddr = nxgep->nxge_mmac_info.num_mmac;
3819 		mmacp->maddr_naddrfree = nxgep->nxge_mmac_info.naddrfree;
3820 		mmacp->maddr_flag = 0; /* 0 is requried by PSARC2006/265 */
3821 		/*
3822 		 * maddr_handle is driver's private data, passed back to
3823 		 * entry point functions as arg.
3824 		 */
3825 		mmacp->maddr_handle	= nxgep;
3826 		mmacp->maddr_add	= nxge_m_mmac_add;
3827 		mmacp->maddr_remove	= nxge_m_mmac_remove;
3828 		mmacp->maddr_modify	= nxge_m_mmac_modify;
3829 		mmacp->maddr_get	= nxge_m_mmac_get;
3830 		mmacp->maddr_reserve	= nxge_m_mmac_reserve;
3831 
3832 		mutex_exit(nxgep->genlock);
3833 		break;
3834 	case MAC_CAPAB_LSO: {
3835 		mac_capab_lso_t *cap_lso = cap_data;
3836 
3837 		if (nxgep->soft_lso_enable) {
3838 			cap_lso->lso_flags = LSO_TX_BASIC_TCP_IPV4;
3839 			if (nxge_lso_max > NXGE_LSO_MAXLEN) {
3840 				nxge_lso_max = NXGE_LSO_MAXLEN;
3841 			}
3842 			cap_lso->lso_basic_tcp_ipv4.lso_max = nxge_lso_max;
3843 			break;
3844 		} else {
3845 			return (B_FALSE);
3846 		}
3847 	}
3848 
3849 	default:
3850 		return (B_FALSE);
3851 	}
3852 	return (B_TRUE);
3853 }
3854 
3855 static boolean_t
3856 nxge_param_locked(mac_prop_id_t pr_num)
3857 {
3858 	/*
3859 	 * All adv_* parameters are locked (read-only) while
3860 	 * the device is in any sort of loopback mode ...
3861 	 */
3862 	switch (pr_num) {
3863 		case DLD_PROP_ADV_1000FDX_CAP:
3864 		case DLD_PROP_EN_1000FDX_CAP:
3865 		case DLD_PROP_ADV_1000HDX_CAP:
3866 		case DLD_PROP_EN_1000HDX_CAP:
3867 		case DLD_PROP_ADV_100FDX_CAP:
3868 		case DLD_PROP_EN_100FDX_CAP:
3869 		case DLD_PROP_ADV_100HDX_CAP:
3870 		case DLD_PROP_EN_100HDX_CAP:
3871 		case DLD_PROP_ADV_10FDX_CAP:
3872 		case DLD_PROP_EN_10FDX_CAP:
3873 		case DLD_PROP_ADV_10HDX_CAP:
3874 		case DLD_PROP_EN_10HDX_CAP:
3875 		case DLD_PROP_AUTONEG:
3876 		case DLD_PROP_FLOWCTRL:
3877 			return (B_TRUE);
3878 	}
3879 	return (B_FALSE);
3880 }
3881 
3882 /*
3883  * callback functions for set/get of properties
3884  */
3885 static int
3886 nxge_m_setprop(void *barg, const char *pr_name, mac_prop_id_t pr_num,
3887     uint_t pr_valsize, const void *pr_val)
3888 {
3889 	nxge_t		*nxgep = barg;
3890 	p_nxge_param_t	param_arr;
3891 	p_nxge_stats_t	statsp;
3892 	int		err = 0;
3893 	uint8_t		val;
3894 	uint32_t	cur_mtu, new_mtu, old_framesize;
3895 	link_flowctrl_t	fl;
3896 
3897 	NXGE_DEBUG_MSG((nxgep, NXGE_CTL, "==> nxge_m_setprop"));
3898 	param_arr = nxgep->param_arr;
3899 	statsp = nxgep->statsp;
3900 	mutex_enter(nxgep->genlock);
3901 	if (statsp->port_stats.lb_mode != nxge_lb_normal &&
3902 	    nxge_param_locked(pr_num)) {
3903 		/*
3904 		 * All adv_* parameters are locked (read-only)
3905 		 * while the device is in any sort of loopback mode.
3906 		 */
3907 		NXGE_DEBUG_MSG((nxgep, NXGE_CTL,
3908 		    "==> nxge_m_setprop: loopback mode: read only"));
3909 		mutex_exit(nxgep->genlock);
3910 		return (EBUSY);
3911 	}
3912 
3913 	val = *(uint8_t *)pr_val;
3914 	switch (pr_num) {
3915 		case DLD_PROP_EN_1000FDX_CAP:
3916 			nxgep->param_en_1000fdx = val;
3917 			param_arr[param_anar_1000fdx].value = val;
3918 
3919 			goto reprogram;
3920 
3921 		case DLD_PROP_EN_100FDX_CAP:
3922 			nxgep->param_en_100fdx = val;
3923 			param_arr[param_anar_100fdx].value = val;
3924 
3925 			goto reprogram;
3926 
3927 		case DLD_PROP_EN_10FDX_CAP:
3928 			nxgep->param_en_10fdx = val;
3929 			param_arr[param_anar_10fdx].value = val;
3930 
3931 			goto reprogram;
3932 
3933 		case DLD_PROP_EN_1000HDX_CAP:
3934 		case DLD_PROP_EN_100HDX_CAP:
3935 		case DLD_PROP_EN_10HDX_CAP:
3936 		case DLD_PROP_ADV_1000FDX_CAP:
3937 		case DLD_PROP_ADV_1000HDX_CAP:
3938 		case DLD_PROP_ADV_100FDX_CAP:
3939 		case DLD_PROP_ADV_100HDX_CAP:
3940 		case DLD_PROP_ADV_10FDX_CAP:
3941 		case DLD_PROP_ADV_10HDX_CAP:
3942 		case DLD_PROP_STATUS:
3943 		case DLD_PROP_SPEED:
3944 		case DLD_PROP_DUPLEX:
3945 			err = EINVAL; /* cannot set read-only properties */
3946 			NXGE_DEBUG_MSG((nxgep, NXGE_CTL,
3947 			    "==> nxge_m_setprop:  read only property %d",
3948 			    pr_num));
3949 			break;
3950 
3951 		case DLD_PROP_AUTONEG:
3952 			param_arr[param_autoneg].value = val;
3953 
3954 			goto reprogram;
3955 
3956 		case DLD_PROP_DEFMTU:
3957 			if (nxgep->nxge_mac_state == NXGE_MAC_STARTED) {
3958 				err = EBUSY;
3959 				break;
3960 			}
3961 
3962 			cur_mtu = nxgep->mac.default_mtu;
3963 			bcopy(pr_val, &new_mtu, sizeof (new_mtu));
3964 			NXGE_DEBUG_MSG((nxgep, NXGE_CTL,
3965 			    "==> nxge_m_setprop: set MTU: %d is_jumbo %d",
3966 			    new_mtu, nxgep->mac.is_jumbo));
3967 
3968 			if (new_mtu == cur_mtu) {
3969 				err = 0;
3970 				break;
3971 			}
3972 			if (new_mtu < NXGE_DEFAULT_MTU ||
3973 			    new_mtu > NXGE_MAXIMUM_MTU) {
3974 				err = EINVAL;
3975 				break;
3976 			}
3977 
3978 			if ((new_mtu > NXGE_DEFAULT_MTU) &&
3979 			    !nxgep->mac.is_jumbo) {
3980 				err = EINVAL;
3981 				break;
3982 			}
3983 
3984 			old_framesize = (uint32_t)nxgep->mac.maxframesize;
3985 			nxgep->mac.maxframesize = (uint16_t)
3986 			    (new_mtu + NXGE_EHEADER_VLAN_CRC);
3987 			if (nxge_mac_set_framesize(nxgep)) {
3988 				nxgep->mac.maxframesize =
3989 				    (uint16_t)old_framesize;
3990 				err = EINVAL;
3991 				break;
3992 			}
3993 
3994 			err = mac_maxsdu_update(nxgep->mach, new_mtu);
3995 			if (err) {
3996 				nxgep->mac.maxframesize =
3997 				    (uint16_t)old_framesize;
3998 				err = EINVAL;
3999 				break;
4000 			}
4001 
4002 			nxgep->mac.default_mtu = new_mtu;
4003 			NXGE_DEBUG_MSG((nxgep, NXGE_CTL,
4004 			    "==> nxge_m_setprop: set MTU: %d maxframe %d",
4005 			    new_mtu, nxgep->mac.maxframesize));
4006 			break;
4007 
4008 		case DLD_PROP_FLOWCTRL:
4009 			bcopy(pr_val, &fl, sizeof (fl));
4010 			switch (fl) {
4011 			default:
4012 				err = EINVAL;
4013 				break;
4014 
4015 			case LINK_FLOWCTRL_NONE:
4016 				param_arr[param_anar_pause].value = 0;
4017 				break;
4018 
4019 			case LINK_FLOWCTRL_RX:
4020 				param_arr[param_anar_pause].value = 1;
4021 				break;
4022 
4023 			case LINK_FLOWCTRL_TX:
4024 			case LINK_FLOWCTRL_BI:
4025 				err = EINVAL;
4026 				break;
4027 			}
4028 
4029 reprogram:
4030 			if (err == 0) {
4031 				if (!nxge_param_link_update(nxgep)) {
4032 					err = EINVAL;
4033 				}
4034 			}
4035 			break;
4036 
4037 		default:
4038 			NXGE_DEBUG_MSG((nxgep, NXGE_CTL,
4039 			    "==> nxge_m_setprop: private property"));
4040 			err = nxge_set_priv_prop(nxgep, pr_name, pr_valsize,
4041 			    pr_val);
4042 			break;
4043 	}
4044 
4045 	mutex_exit(nxgep->genlock);
4046 
4047 	NXGE_DEBUG_MSG((nxgep, NXGE_CTL,
4048 	    "<== nxge_m_setprop (return %d)", err));
4049 	return (err);
4050 }
4051 
4052 static int
4053 nxge_m_getprop(void *barg, const char *pr_name, mac_prop_id_t pr_num,
4054     uint_t pr_valsize, void *pr_val)
4055 {
4056 	nxge_t 		*nxgep = barg;
4057 	p_nxge_param_t	param_arr = nxgep->param_arr;
4058 	p_nxge_stats_t	statsp = nxgep->statsp;
4059 	int		err = 0;
4060 	link_flowctrl_t	fl;
4061 	uint64_t	tmp = 0;
4062 
4063 	NXGE_DEBUG_MSG((nxgep, NXGE_CTL,
4064 	    "==> nxge_m_getprop: pr_num %d", pr_num));
4065 	bzero(pr_val, pr_valsize);
4066 	switch (pr_num) {
4067 		case DLD_PROP_DUPLEX:
4068 			if (pr_valsize < sizeof (uint8_t))
4069 				return (EINVAL);
4070 			*(uint8_t *)pr_val = statsp->mac_stats.link_duplex;
4071 			NXGE_DEBUG_MSG((nxgep, NXGE_CTL,
4072 			    "==> nxge_m_getprop: duplex mode %d",
4073 			    *(uint8_t *)pr_val));
4074 			break;
4075 
4076 		case DLD_PROP_SPEED:
4077 			if (pr_valsize < sizeof (uint64_t))
4078 				return (EINVAL);
4079 			tmp = statsp->mac_stats.link_speed * 1000000ull;
4080 			bcopy(&tmp, pr_val, sizeof (tmp));
4081 			break;
4082 
4083 		case DLD_PROP_STATUS:
4084 			if (pr_valsize < sizeof (uint8_t))
4085 				return (EINVAL);
4086 			*(uint8_t *)pr_val = statsp->mac_stats.link_up;
4087 			break;
4088 
4089 		case DLD_PROP_AUTONEG:
4090 			if (pr_valsize < sizeof (uint8_t))
4091 				return (EINVAL);
4092 			*(uint8_t *)pr_val =
4093 			    param_arr[param_autoneg].value;
4094 			break;
4095 
4096 
4097 		case DLD_PROP_DEFMTU: {
4098 			if (pr_valsize < sizeof (uint64_t))
4099 				return (EINVAL);
4100 			tmp = nxgep->mac.default_mtu;
4101 			bcopy(&tmp, pr_val, sizeof (tmp));
4102 			break;
4103 		}
4104 
4105 		case DLD_PROP_FLOWCTRL:
4106 			if (pr_valsize < sizeof (link_flowctrl_t))
4107 				return (EINVAL);
4108 
4109 			fl = LINK_FLOWCTRL_NONE;
4110 			if (param_arr[param_anar_pause].value) {
4111 				fl = LINK_FLOWCTRL_RX;
4112 			}
4113 			bcopy(&fl, pr_val, sizeof (fl));
4114 			break;
4115 
4116 		case DLD_PROP_ADV_1000FDX_CAP:
4117 			if (pr_valsize < sizeof (uint8_t))
4118 				return (EINVAL);
4119 			*(uint8_t *)pr_val =
4120 			    param_arr[param_anar_1000fdx].value;
4121 			break;
4122 
4123 		case DLD_PROP_EN_1000FDX_CAP:
4124 			if (pr_valsize < sizeof (uint8_t))
4125 				return (EINVAL);
4126 			*(uint8_t *)pr_val = nxgep->param_en_1000fdx;
4127 			break;
4128 
4129 		case DLD_PROP_ADV_100FDX_CAP:
4130 			if (pr_valsize < sizeof (uint8_t))
4131 				return (EINVAL);
4132 			*(uint8_t *)pr_val =
4133 			    param_arr[param_anar_100fdx].value;
4134 			break;
4135 
4136 		case DLD_PROP_EN_100FDX_CAP:
4137 			if (pr_valsize < sizeof (uint8_t))
4138 				return (EINVAL);
4139 			*(uint8_t *)pr_val = nxgep->param_en_100fdx;
4140 			break;
4141 
4142 		case DLD_PROP_ADV_10FDX_CAP:
4143 			if (pr_valsize < sizeof (uint8_t))
4144 				return (EINVAL);
4145 			*(uint8_t *)pr_val =
4146 			    param_arr[param_anar_10fdx].value;
4147 			break;
4148 
4149 		case DLD_PROP_EN_10FDX_CAP:
4150 			if (pr_valsize < sizeof (uint8_t))
4151 				return (EINVAL);
4152 			*(uint8_t *)pr_val = nxgep->param_en_10fdx;
4153 			break;
4154 
4155 		case DLD_PROP_EN_1000HDX_CAP:
4156 		case DLD_PROP_EN_100HDX_CAP:
4157 		case DLD_PROP_EN_10HDX_CAP:
4158 		case DLD_PROP_ADV_1000HDX_CAP:
4159 		case DLD_PROP_ADV_100HDX_CAP:
4160 		case DLD_PROP_ADV_10HDX_CAP:
4161 			err = EINVAL;
4162 			break;
4163 
4164 		default:
4165 			err = nxge_get_priv_prop(nxgep, pr_name, pr_valsize,
4166 			    pr_val);
4167 	}
4168 
4169 	NXGE_DEBUG_MSG((nxgep, NXGE_CTL, "<== nxge_m_getprop"));
4170 
4171 	return (err);
4172 }
4173 
4174 /* ARGSUSED */
4175 static int
4176 nxge_set_priv_prop(p_nxge_t nxgep, const char *pr_name, uint_t pr_valsize,
4177     const void *pr_val)
4178 {
4179 	p_nxge_param_t	param_arr = nxgep->param_arr;
4180 	int		err = 0;
4181 	long		result;
4182 
4183 	NXGE_DEBUG_MSG((nxgep, NXGE_CTL,
4184 	    "==> nxge_set_priv_prop: name %s", pr_name));
4185 
4186 	if (strcmp(pr_name, "_accept_jumbo") == 0) {
4187 		(void) ddi_strtol(pr_val, (char **)NULL, 0, &result);
4188 		NXGE_DEBUG_MSG((nxgep, NXGE_CTL,
4189 		    "<== nxge_set_priv_prop: name %s "
4190 		    "pr_val %s result %d "
4191 		    "param %d is_jumbo %d",
4192 		    pr_name, pr_val, result,
4193 		    param_arr[param_accept_jumbo].value,
4194 		    nxgep->mac.is_jumbo));
4195 
4196 		if (result > 1 || result < 0) {
4197 			err = EINVAL;
4198 		} else {
4199 			if (nxgep->mac.is_jumbo ==
4200 			    (uint32_t)result) {
4201 				NXGE_DEBUG_MSG((nxgep, NXGE_CTL,
4202 				    "no change (%d %d)",
4203 				    nxgep->mac.is_jumbo,
4204 				    result));
4205 				return (0);
4206 			}
4207 		}
4208 
4209 		param_arr[param_accept_jumbo].value = result;
4210 		nxgep->mac.is_jumbo = B_FALSE;
4211 		if (result) {
4212 			nxgep->mac.is_jumbo = B_TRUE;
4213 		}
4214 
4215 		NXGE_DEBUG_MSG((nxgep, NXGE_CTL,
4216 		    "<== nxge_set_priv_prop: name %s (value %d) is_jumbo %d",
4217 		    pr_name, result, nxgep->mac.is_jumbo));
4218 
4219 		return (err);
4220 	}
4221 
4222 	/* Blanking */
4223 	if (strcmp(pr_name, "_rxdma_intr_time") == 0) {
4224 		err = nxge_param_rx_intr_time(nxgep, NULL, NULL,
4225 		    (char *)pr_val,
4226 		    (caddr_t)&param_arr[param_rxdma_intr_time]);
4227 		if (err) {
4228 			NXGE_DEBUG_MSG((nxgep, NXGE_CTL,
4229 			    "<== nxge_set_priv_prop: "
4230 			    "unable to set (%s)", pr_name));
4231 			err = EINVAL;
4232 		} else {
4233 			err = 0;
4234 			NXGE_DEBUG_MSG((nxgep, NXGE_CTL,
4235 			    "<== nxge_set_priv_prop: "
4236 			    "set (%s)", pr_name));
4237 		}
4238 
4239 		NXGE_DEBUG_MSG((nxgep, NXGE_CTL,
4240 		    "<== nxge_set_priv_prop: name %s (value %d)",
4241 		    pr_name, result));
4242 
4243 		return (err);
4244 	}
4245 
4246 	if (strcmp(pr_name, "_rxdma_intr_pkts") == 0) {
4247 		err = nxge_param_rx_intr_pkts(nxgep, NULL, NULL,
4248 		    (char *)pr_val,
4249 		    (caddr_t)&param_arr[param_rxdma_intr_pkts]);
4250 		if (err) {
4251 			NXGE_DEBUG_MSG((nxgep, NXGE_CTL,
4252 			    "<== nxge_set_priv_prop: "
4253 			    "unable to set (%s)", pr_name));
4254 			err = EINVAL;
4255 		} else {
4256 			err = 0;
4257 			NXGE_DEBUG_MSG((nxgep, NXGE_CTL,
4258 			    "<== nxge_set_priv_prop: "
4259 			    "set (%s)", pr_name));
4260 		}
4261 
4262 		NXGE_DEBUG_MSG((nxgep, NXGE_CTL,
4263 		    "<== nxge_set_priv_prop: name %s (value %d)",
4264 		    pr_name, result));
4265 
4266 		return (err);
4267 	}
4268 
4269 	/* Classification */
4270 	if (strcmp(pr_name, "_class_opt_ipv4_tcp") == 0) {
4271 		if (pr_val == NULL) {
4272 			err = EINVAL;
4273 			return (err);
4274 		}
4275 		(void) ddi_strtol(pr_val, (char **)NULL, 0, &result);
4276 
4277 		err = nxge_param_set_ip_opt(nxgep, NULL,
4278 		    NULL, (char *)pr_val,
4279 		    (caddr_t)&param_arr[param_class_opt_ipv4_tcp]);
4280 
4281 		NXGE_DEBUG_MSG((nxgep, NXGE_CTL,
4282 		    "<== nxge_set_priv_prop: name %s (value 0x%x)",
4283 		    pr_name, result));
4284 
4285 		return (err);
4286 	}
4287 
4288 	if (strcmp(pr_name, "_class_opt_ipv4_udp") == 0) {
4289 		if (pr_val == NULL) {
4290 			err = EINVAL;
4291 			return (err);
4292 		}
4293 		(void) ddi_strtol(pr_val, (char **)NULL, 0, &result);
4294 
4295 		err = nxge_param_set_ip_opt(nxgep, NULL,
4296 		    NULL, (char *)pr_val,
4297 		    (caddr_t)&param_arr[param_class_opt_ipv4_udp]);
4298 
4299 		NXGE_DEBUG_MSG((nxgep, NXGE_CTL,
4300 		    "<== nxge_set_priv_prop: name %s (value 0x%x)",
4301 		    pr_name, result));
4302 
4303 		return (err);
4304 	}
4305 	if (strcmp(pr_name, "_class_opt_ipv4_ah") == 0) {
4306 		if (pr_val == NULL) {
4307 			err = EINVAL;
4308 			return (err);
4309 		}
4310 		(void) ddi_strtol(pr_val, (char **)NULL, 0, &result);
4311 
4312 		err = nxge_param_set_ip_opt(nxgep, NULL,
4313 		    NULL, (char *)pr_val,
4314 		    (caddr_t)&param_arr[param_class_opt_ipv4_ah]);
4315 
4316 		NXGE_DEBUG_MSG((nxgep, NXGE_CTL,
4317 		    "<== nxge_set_priv_prop: name %s (value 0x%x)",
4318 		    pr_name, result));
4319 
4320 		return (err);
4321 	}
4322 	if (strcmp(pr_name, "_class_opt_ipv4_sctp") == 0) {
4323 		if (pr_val == NULL) {
4324 			err = EINVAL;
4325 			return (err);
4326 		}
4327 		(void) ddi_strtol(pr_val, (char **)NULL, 0, &result);
4328 
4329 		err = nxge_param_set_ip_opt(nxgep, NULL,
4330 		    NULL, (char *)pr_val,
4331 		    (caddr_t)&param_arr[param_class_opt_ipv4_sctp]);
4332 
4333 		NXGE_DEBUG_MSG((nxgep, NXGE_CTL,
4334 		    "<== nxge_set_priv_prop: name %s (value 0x%x)",
4335 		    pr_name, result));
4336 
4337 		return (err);
4338 	}
4339 
4340 	if (strcmp(pr_name, "_class_opt_ipv6_tcp") == 0) {
4341 		if (pr_val == NULL) {
4342 			err = EINVAL;
4343 			return (err);
4344 		}
4345 		(void) ddi_strtol(pr_val, (char **)NULL, 0, &result);
4346 
4347 		err = nxge_param_set_ip_opt(nxgep, NULL,
4348 		    NULL, (char *)pr_val,
4349 		    (caddr_t)&param_arr[param_class_opt_ipv6_tcp]);
4350 
4351 		NXGE_DEBUG_MSG((nxgep, NXGE_CTL,
4352 		    "<== nxge_set_priv_prop: name %s (value 0x%x)",
4353 		    pr_name, result));
4354 
4355 		return (err);
4356 	}
4357 
4358 	if (strcmp(pr_name, "_class_opt_ipv6_udp") == 0) {
4359 		if (pr_val == NULL) {
4360 			err = EINVAL;
4361 			return (err);
4362 		}
4363 		(void) ddi_strtol(pr_val, (char **)NULL, 0, &result);
4364 
4365 		err = nxge_param_set_ip_opt(nxgep, NULL,
4366 		    NULL, (char *)pr_val,
4367 		    (caddr_t)&param_arr[param_class_opt_ipv6_udp]);
4368 
4369 		NXGE_DEBUG_MSG((nxgep, NXGE_CTL,
4370 		    "<== nxge_set_priv_prop: name %s (value 0x%x)",
4371 		    pr_name, result));
4372 
4373 		return (err);
4374 	}
4375 	if (strcmp(pr_name, "_class_opt_ipv6_ah") == 0) {
4376 		if (pr_val == NULL) {
4377 			err = EINVAL;
4378 			return (err);
4379 		}
4380 		(void) ddi_strtol(pr_val, (char **)NULL, 0, &result);
4381 
4382 		err = nxge_param_set_ip_opt(nxgep, NULL,
4383 		    NULL, (char *)pr_val,
4384 		    (caddr_t)&param_arr[param_class_opt_ipv6_ah]);
4385 
4386 		NXGE_DEBUG_MSG((nxgep, NXGE_CTL,
4387 		    "<== nxge_set_priv_prop: name %s (value 0x%x)",
4388 		    pr_name, result));
4389 
4390 		return (err);
4391 	}
4392 	if (strcmp(pr_name, "_class_opt_ipv6_sctp") == 0) {
4393 		if (pr_val == NULL) {
4394 			err = EINVAL;
4395 			return (err);
4396 		}
4397 		(void) ddi_strtol(pr_val, (char **)NULL, 0, &result);
4398 
4399 		err = nxge_param_set_ip_opt(nxgep, NULL,
4400 		    NULL, (char *)pr_val,
4401 		    (caddr_t)&param_arr[param_class_opt_ipv6_sctp]);
4402 
4403 		NXGE_DEBUG_MSG((nxgep, NXGE_CTL,
4404 		    "<== nxge_set_priv_prop: name %s (value 0x%x)",
4405 		    pr_name, result));
4406 
4407 		return (err);
4408 	}
4409 
4410 	if (strcmp(pr_name, "_soft_lso_enable") == 0) {
4411 		if (nxgep->nxge_mac_state == NXGE_MAC_STARTED) {
4412 			NXGE_DEBUG_MSG((nxgep, NXGE_CTL,
4413 			    "==> nxge_set_priv_prop: name %s (busy)", pr_name));
4414 			err = EBUSY;
4415 			return (err);
4416 		}
4417 		if (pr_val == NULL) {
4418 			NXGE_DEBUG_MSG((nxgep, NXGE_CTL,
4419 			    "==> nxge_set_priv_prop: name %s (null)", pr_name));
4420 			err = EINVAL;
4421 			return (err);
4422 		}
4423 
4424 		(void) ddi_strtol(pr_val, (char **)NULL, 0, &result);
4425 		NXGE_DEBUG_MSG((nxgep, NXGE_CTL,
4426 		    "<== nxge_set_priv_prop: name %s "
4427 		    "(lso %d pr_val %s value %d)",
4428 		    pr_name, nxgep->soft_lso_enable, pr_val, result));
4429 
4430 		if (result > 1 || result < 0) {
4431 			err = EINVAL;
4432 		} else {
4433 			if (nxgep->soft_lso_enable == (uint32_t)result) {
4434 				NXGE_DEBUG_MSG((nxgep, NXGE_CTL,
4435 				    "no change (%d %d)",
4436 				    nxgep->soft_lso_enable, result));
4437 				return (0);
4438 			}
4439 		}
4440 
4441 		nxgep->soft_lso_enable = (int)result;
4442 
4443 		NXGE_DEBUG_MSG((nxgep, NXGE_CTL,
4444 		    "<== nxge_set_priv_prop: name %s (value %d)",
4445 		    pr_name, result));
4446 
4447 		return (err);
4448 	}
4449 
4450 	return (EINVAL);
4451 }
4452 
4453 static int
4454 nxge_get_priv_prop(p_nxge_t nxgep, const char *pr_name, uint_t pr_valsize,
4455     void *pr_val)
4456 {
4457 	p_nxge_param_t	param_arr = nxgep->param_arr;
4458 	char		valstr[MAXNAMELEN];
4459 	int		err = EINVAL;
4460 	uint_t		strsize;
4461 
4462 	NXGE_DEBUG_MSG((nxgep, NXGE_CTL,
4463 	    "==> nxge_get_priv_prop: property %s", pr_name));
4464 
4465 	/* function number */
4466 	if (strcmp(pr_name, "_function_number") == 0) {
4467 		(void) sprintf(valstr, "%d", nxgep->function_num);
4468 		NXGE_DEBUG_MSG((nxgep, NXGE_CTL,
4469 		    "==> nxge_get_priv_prop: name %s "
4470 		    "(value %d valstr %s)",
4471 		    pr_name, nxgep->function_num, valstr));
4472 
4473 		err = 0;
4474 		goto done;
4475 	}
4476 
4477 	/* Neptune firmware version */
4478 	if (strcmp(pr_name, "_fw_version") == 0) {
4479 		(void) sprintf(valstr, "%s", nxgep->vpd_info.ver);
4480 		NXGE_DEBUG_MSG((nxgep, NXGE_CTL,
4481 		    "==> nxge_get_priv_prop: name %s "
4482 		    "(value %d valstr %s)",
4483 		    pr_name, nxgep->vpd_info.ver, valstr));
4484 
4485 		err = 0;
4486 		goto done;
4487 	}
4488 
4489 	/* port PHY mode */
4490 	if (strcmp(pr_name, "_port_mode") == 0) {
4491 		switch (nxgep->mac.portmode) {
4492 		case PORT_1G_COPPER:
4493 			(void) sprintf(valstr, "1G copper %s",
4494 			    nxgep->hot_swappable_phy ?
4495 			    "[Hot Swappable]" : "");
4496 			break;
4497 		case PORT_1G_FIBER:
4498 			(void) sprintf(valstr, "1G fiber %s",
4499 			    nxgep->hot_swappable_phy ?
4500 			    "[hot swappable]" : "");
4501 			break;
4502 		case PORT_10G_COPPER:
4503 			(void) sprintf(valstr, "10G copper %s",
4504 			    nxgep->hot_swappable_phy ?
4505 			    "[hot swappable]" : "");
4506 			break;
4507 		case PORT_10G_FIBER:
4508 			(void) sprintf(valstr, "10G fiber %s",
4509 			    nxgep->hot_swappable_phy ?
4510 			    "[hot swappable]" : "");
4511 			break;
4512 		case PORT_10G_SERDES:
4513 			(void) sprintf(valstr, "10G serdes %s",
4514 			    nxgep->hot_swappable_phy ?
4515 			    "[hot swappable]" : "");
4516 			break;
4517 		case PORT_1G_SERDES:
4518 			(void) sprintf(valstr, "1G serdes %s",
4519 			    nxgep->hot_swappable_phy ?
4520 			    "[hot swappable]" : "");
4521 			break;
4522 		case PORT_1G_RGMII_FIBER:
4523 			(void) sprintf(valstr, "1G rgmii fiber %s",
4524 			    nxgep->hot_swappable_phy ?
4525 			    "[hot swappable]" : "");
4526 			break;
4527 		case PORT_HSP_MODE:
4528 			(void) sprintf(valstr,
4529 			    "phy not present[hot swappable]");
4530 			break;
4531 		default:
4532 			(void) sprintf(valstr, "unknown %s",
4533 			    nxgep->hot_swappable_phy ?
4534 			    "[hot swappable]" : "");
4535 			break;
4536 		}
4537 
4538 		NXGE_DEBUG_MSG((nxgep, NXGE_CTL,
4539 		    "==> nxge_get_priv_prop: name %s (value %s)",
4540 		    pr_name, valstr));
4541 
4542 		err = 0;
4543 		goto done;
4544 	}
4545 
4546 	/* Hot swappable PHY */
4547 	if (strcmp(pr_name, "_hot_swap_phy") == 0) {
4548 		(void) sprintf(valstr, "%s",
4549 		    nxgep->hot_swappable_phy ?
4550 		    "yes" : "no");
4551 
4552 		NXGE_DEBUG_MSG((nxgep, NXGE_CTL,
4553 		    "==> nxge_get_priv_prop: name %s "
4554 		    "(value %d valstr %s)",
4555 		    pr_name, nxgep->hot_swappable_phy, valstr));
4556 
4557 		err = 0;
4558 		goto done;
4559 	}
4560 
4561 
4562 	/* accept jumbo */
4563 	if (strcmp(pr_name, "_accept_jumbo") == 0) {
4564 		(void) sprintf(valstr, "%d", nxgep->mac.is_jumbo);
4565 		err = 0;
4566 		NXGE_DEBUG_MSG((nxgep, NXGE_CTL,
4567 		    "==> nxge_get_priv_prop: name %s (value %d (%d, %d))",
4568 		    pr_name,
4569 		    (uint32_t)param_arr[param_accept_jumbo].value,
4570 		    nxgep->mac.is_jumbo,
4571 		    nxge_jumbo_enable));
4572 
4573 		goto done;
4574 	}
4575 
4576 	/* Receive Interrupt Blanking Parameters */
4577 	if (strcmp(pr_name, "_rxdma_intr_time") == 0) {
4578 		(void) sprintf(valstr, "%d", nxgep->intr_timeout);
4579 		NXGE_DEBUG_MSG((nxgep, NXGE_CTL,
4580 		    "==> nxge_get_priv_prop: name %s (value %d)",
4581 		    pr_name,
4582 		    (uint32_t)nxgep->intr_timeout));
4583 		err = 0;
4584 		goto done;
4585 	}
4586 
4587 	if (strcmp(pr_name, "_rxdma_intr_pkts") == 0) {
4588 		(void) sprintf(valstr, "%d", nxgep->intr_threshold);
4589 		NXGE_DEBUG_MSG((nxgep, NXGE_CTL,
4590 		    "==> nxge_get_priv_prop: name %s (value %d)",
4591 		    pr_name, (uint32_t)nxgep->intr_threshold));
4592 
4593 		err = 0;
4594 		goto done;
4595 	}
4596 
4597 	/* Classification and Load Distribution Configuration */
4598 	if (strcmp(pr_name, "_class_opt_ipv4_tcp") == 0) {
4599 		err = nxge_dld_get_ip_opt(nxgep,
4600 		    (caddr_t)&param_arr[param_class_opt_ipv4_tcp]);
4601 
4602 		(void) sprintf(valstr, "%x",
4603 		    (int)param_arr[param_class_opt_ipv4_tcp].value);
4604 
4605 		NXGE_DEBUG_MSG((nxgep, NXGE_CTL,
4606 		    "==> nxge_get_priv_prop: %s", valstr));
4607 		goto done;
4608 	}
4609 
4610 	if (strcmp(pr_name, "_class_opt_ipv4_udp") == 0) {
4611 		err = nxge_dld_get_ip_opt(nxgep,
4612 		    (caddr_t)&param_arr[param_class_opt_ipv4_udp]);
4613 
4614 		(void) sprintf(valstr, "%x",
4615 		    (int)param_arr[param_class_opt_ipv4_udp].value);
4616 
4617 		NXGE_DEBUG_MSG((nxgep, NXGE_CTL,
4618 		    "==> nxge_get_priv_prop: %s", valstr));
4619 		goto done;
4620 	}
4621 	if (strcmp(pr_name, "_class_opt_ipv4_ah") == 0) {
4622 		err = nxge_dld_get_ip_opt(nxgep,
4623 		    (caddr_t)&param_arr[param_class_opt_ipv4_ah]);
4624 
4625 		(void) sprintf(valstr, "%x",
4626 		    (int)param_arr[param_class_opt_ipv4_ah].value);
4627 
4628 		NXGE_DEBUG_MSG((nxgep, NXGE_CTL,
4629 		    "==> nxge_get_priv_prop: %s", valstr));
4630 		goto done;
4631 	}
4632 
4633 	if (strcmp(pr_name, "_class_opt_ipv4_sctp") == 0) {
4634 		err = nxge_dld_get_ip_opt(nxgep,
4635 		    (caddr_t)&param_arr[param_class_opt_ipv4_sctp]);
4636 
4637 		(void) printf(valstr, "%x",
4638 		    (int)param_arr[param_class_opt_ipv4_sctp].value);
4639 
4640 		NXGE_DEBUG_MSG((nxgep, NXGE_CTL,
4641 		    "==> nxge_get_priv_prop: %s", valstr));
4642 		goto done;
4643 	}
4644 
4645 	if (strcmp(pr_name, "_class_opt_ipv6_tcp") == 0) {
4646 		err = nxge_dld_get_ip_opt(nxgep,
4647 		    (caddr_t)&param_arr[param_class_opt_ipv6_tcp]);
4648 
4649 		(void) sprintf(valstr, "%x",
4650 		    (int)param_arr[param_class_opt_ipv6_tcp].value);
4651 
4652 		NXGE_DEBUG_MSG((nxgep, NXGE_CTL,
4653 		    "==> nxge_get_priv_prop: %s", valstr));
4654 		err = 0;
4655 		goto done;
4656 	}
4657 
4658 	if (strcmp(pr_name, "_class_opt_ipv6_udp") == 0) {
4659 		err = nxge_dld_get_ip_opt(nxgep,
4660 		    (caddr_t)&param_arr[param_class_opt_ipv6_udp]);
4661 
4662 		(void) sprintf(valstr, "%x",
4663 		    (int)param_arr[param_class_opt_ipv6_udp].value);
4664 
4665 		NXGE_DEBUG_MSG((nxgep, NXGE_CTL,
4666 		    "==> nxge_get_priv_prop: %s", valstr));
4667 		goto done;
4668 	}
4669 
4670 	if (strcmp(pr_name, "_class_opt_ipv6_ah") == 0) {
4671 		err = nxge_dld_get_ip_opt(nxgep,
4672 		    (caddr_t)&param_arr[param_class_opt_ipv6_ah]);
4673 
4674 		(void) sprintf(valstr, "%x",
4675 		    (int)param_arr[param_class_opt_ipv6_ah].value);
4676 
4677 		NXGE_DEBUG_MSG((nxgep, NXGE_CTL,
4678 		    "==> nxge_get_priv_prop: %s", valstr));
4679 		goto done;
4680 	}
4681 
4682 	if (strcmp(pr_name, "_class_opt_ipv6_sctp") == 0) {
4683 		err = nxge_dld_get_ip_opt(nxgep,
4684 		    (caddr_t)&param_arr[param_class_opt_ipv6_sctp]);
4685 
4686 		(void) sprintf(valstr, "%x",
4687 		    (int)param_arr[param_class_opt_ipv6_sctp].value);
4688 
4689 		NXGE_DEBUG_MSG((nxgep, NXGE_CTL,
4690 		    "==> nxge_get_priv_prop: %s", valstr));
4691 		goto done;
4692 	}
4693 
4694 	/* Software LSO */
4695 	if (strcmp(pr_name, "_soft_lso_enable") == 0) {
4696 		(void) sprintf(valstr, "%d", nxgep->soft_lso_enable);
4697 		err = 0;
4698 		NXGE_DEBUG_MSG((nxgep, NXGE_CTL,
4699 		    "==> nxge_get_priv_prop: name %s (value %d)",
4700 		    pr_name, nxgep->soft_lso_enable));
4701 
4702 		goto done;
4703 	}
4704 
4705 done:
4706 	if (err == 0) {
4707 		strsize = (uint_t)strlen(valstr);
4708 		if (pr_valsize < strsize) {
4709 			err = ENOBUFS;
4710 		} else {
4711 			(void) strlcpy(pr_val, valstr, pr_valsize);
4712 		}
4713 	}
4714 
4715 	NXGE_DEBUG_MSG((nxgep, NXGE_CTL,
4716 	    "<== nxge_get_priv_prop: return %d", err));
4717 	return (err);
4718 }
4719 
4720 /*
4721  * Module loading and removing entry points.
4722  */
4723 
4724 static	struct cb_ops 	nxge_cb_ops = {
4725 	nodev,			/* cb_open */
4726 	nodev,			/* cb_close */
4727 	nodev,			/* cb_strategy */
4728 	nodev,			/* cb_print */
4729 	nodev,			/* cb_dump */
4730 	nodev,			/* cb_read */
4731 	nodev,			/* cb_write */
4732 	nodev,			/* cb_ioctl */
4733 	nodev,			/* cb_devmap */
4734 	nodev,			/* cb_mmap */
4735 	nodev,			/* cb_segmap */
4736 	nochpoll,		/* cb_chpoll */
4737 	ddi_prop_op,		/* cb_prop_op */
4738 	NULL,
4739 	D_MP, 			/* cb_flag */
4740 	CB_REV,			/* rev */
4741 	nodev,			/* int (*cb_aread)() */
4742 	nodev			/* int (*cb_awrite)() */
4743 };
4744 
4745 static struct dev_ops nxge_dev_ops = {
4746 	DEVO_REV,		/* devo_rev */
4747 	0,			/* devo_refcnt */
4748 	nulldev,
4749 	nulldev,		/* devo_identify */
4750 	nulldev,		/* devo_probe */
4751 	nxge_attach,		/* devo_attach */
4752 	nxge_detach,		/* devo_detach */
4753 	nodev,			/* devo_reset */
4754 	&nxge_cb_ops,		/* devo_cb_ops */
4755 	(struct bus_ops *)NULL, /* devo_bus_ops	*/
4756 	ddi_power		/* devo_power */
4757 };
4758 
4759 extern	struct	mod_ops	mod_driverops;
4760 
4761 #define	NXGE_DESC_VER		"Sun NIU 10Gb Ethernet"
4762 
4763 /*
4764  * Module linkage information for the kernel.
4765  */
4766 static struct modldrv 	nxge_modldrv = {
4767 	&mod_driverops,
4768 	NXGE_DESC_VER,
4769 	&nxge_dev_ops
4770 };
4771 
4772 static struct modlinkage modlinkage = {
4773 	MODREV_1, (void *) &nxge_modldrv, NULL
4774 };
4775 
4776 int
4777 _init(void)
4778 {
4779 	int		status;
4780 
4781 	NXGE_DEBUG_MSG((NULL, MOD_CTL, "==> _init"));
4782 	mac_init_ops(&nxge_dev_ops, "nxge");
4783 	status = ddi_soft_state_init(&nxge_list, sizeof (nxge_t), 0);
4784 	if (status != 0) {
4785 		NXGE_ERROR_MSG((NULL, NXGE_ERR_CTL,
4786 			"failed to init device soft state"));
4787 		goto _init_exit;
4788 	}
4789 	status = mod_install(&modlinkage);
4790 	if (status != 0) {
4791 		ddi_soft_state_fini(&nxge_list);
4792 		NXGE_ERROR_MSG((NULL, NXGE_ERR_CTL, "Mod install failed"));
4793 		goto _init_exit;
4794 	}
4795 
4796 	MUTEX_INIT(&nxge_common_lock, NULL, MUTEX_DRIVER, NULL);
4797 
4798 _init_exit:
4799 	NXGE_DEBUG_MSG((NULL, MOD_CTL, "_init status = 0x%X", status));
4800 
4801 	return (status);
4802 }
4803 
4804 int
4805 _fini(void)
4806 {
4807 	int		status;
4808 
4809 	NXGE_DEBUG_MSG((NULL, MOD_CTL, "==> _fini"));
4810 
4811 	NXGE_DEBUG_MSG((NULL, MOD_CTL, "==> _fini: mod_remove"));
4812 
4813 	if (nxge_mblks_pending)
4814 		return (EBUSY);
4815 
4816 	status = mod_remove(&modlinkage);
4817 	if (status != DDI_SUCCESS) {
4818 		NXGE_DEBUG_MSG((NULL, MOD_CTL,
4819 			    "Module removal failed 0x%08x",
4820 			    status));
4821 		goto _fini_exit;
4822 	}
4823 
4824 	mac_fini_ops(&nxge_dev_ops);
4825 
4826 	ddi_soft_state_fini(&nxge_list);
4827 
4828 	MUTEX_DESTROY(&nxge_common_lock);
4829 _fini_exit:
4830 	NXGE_DEBUG_MSG((NULL, MOD_CTL, "_fini status = 0x%08x", status));
4831 
4832 	return (status);
4833 }
4834 
4835 int
4836 _info(struct modinfo *modinfop)
4837 {
4838 	int		status;
4839 
4840 	NXGE_DEBUG_MSG((NULL, MOD_CTL, "==> _info"));
4841 	status = mod_info(&modlinkage, modinfop);
4842 	NXGE_DEBUG_MSG((NULL, MOD_CTL, " _info status = 0x%X", status));
4843 
4844 	return (status);
4845 }
4846 
4847 /*ARGSUSED*/
4848 static nxge_status_t
4849 nxge_add_intrs(p_nxge_t nxgep)
4850 {
4851 
4852 	int		intr_types;
4853 	int		type = 0;
4854 	int		ddi_status = DDI_SUCCESS;
4855 	nxge_status_t	status = NXGE_OK;
4856 
4857 	NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_add_intrs"));
4858 
4859 	nxgep->nxge_intr_type.intr_registered = B_FALSE;
4860 	nxgep->nxge_intr_type.intr_enabled = B_FALSE;
4861 	nxgep->nxge_intr_type.msi_intx_cnt = 0;
4862 	nxgep->nxge_intr_type.intr_added = 0;
4863 	nxgep->nxge_intr_type.niu_msi_enable = B_FALSE;
4864 	nxgep->nxge_intr_type.intr_type = 0;
4865 
4866 	if (nxgep->niu_type == N2_NIU) {
4867 		nxgep->nxge_intr_type.niu_msi_enable = B_TRUE;
4868 	} else if (nxge_msi_enable) {
4869 		nxgep->nxge_intr_type.niu_msi_enable = B_TRUE;
4870 	}
4871 
4872 	/* Get the supported interrupt types */
4873 	if ((ddi_status = ddi_intr_get_supported_types(nxgep->dip, &intr_types))
4874 			!= DDI_SUCCESS) {
4875 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "<== nxge_add_intrs: "
4876 			"ddi_intr_get_supported_types failed: status 0x%08x",
4877 			ddi_status));
4878 		return (NXGE_ERROR | NXGE_DDI_FAILED);
4879 	}
4880 	nxgep->nxge_intr_type.intr_types = intr_types;
4881 
4882 	NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_add_intrs: "
4883 		"ddi_intr_get_supported_types: 0x%08x", intr_types));
4884 
4885 	/*
4886 	 * Solaris MSIX is not supported yet. use MSI for now.
4887 	 * nxge_msi_enable (1):
4888 	 *	1 - MSI		2 - MSI-X	others - FIXED
4889 	 */
4890 	switch (nxge_msi_enable) {
4891 	default:
4892 		type = DDI_INTR_TYPE_FIXED;
4893 		NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_add_intrs: "
4894 			"use fixed (intx emulation) type %08x",
4895 			type));
4896 		break;
4897 
4898 	case 2:
4899 		NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_add_intrs: "
4900 			"ddi_intr_get_supported_types: 0x%08x", intr_types));
4901 		if (intr_types & DDI_INTR_TYPE_MSIX) {
4902 			type = DDI_INTR_TYPE_MSIX;
4903 			NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_add_intrs: "
4904 				"ddi_intr_get_supported_types: MSIX 0x%08x",
4905 				type));
4906 		} else if (intr_types & DDI_INTR_TYPE_MSI) {
4907 			type = DDI_INTR_TYPE_MSI;
4908 			NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_add_intrs: "
4909 				"ddi_intr_get_supported_types: MSI 0x%08x",
4910 				type));
4911 		} else if (intr_types & DDI_INTR_TYPE_FIXED) {
4912 			type = DDI_INTR_TYPE_FIXED;
4913 			NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_add_intrs: "
4914 				"ddi_intr_get_supported_types: MSXED0x%08x",
4915 				type));
4916 		}
4917 		break;
4918 
4919 	case 1:
4920 		if (intr_types & DDI_INTR_TYPE_MSI) {
4921 			type = DDI_INTR_TYPE_MSI;
4922 			NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_add_intrs: "
4923 				"ddi_intr_get_supported_types: MSI 0x%08x",
4924 				type));
4925 		} else if (intr_types & DDI_INTR_TYPE_MSIX) {
4926 			type = DDI_INTR_TYPE_MSIX;
4927 			NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_add_intrs: "
4928 				"ddi_intr_get_supported_types: MSIX 0x%08x",
4929 				type));
4930 		} else if (intr_types & DDI_INTR_TYPE_FIXED) {
4931 			type = DDI_INTR_TYPE_FIXED;
4932 			NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_add_intrs: "
4933 				"ddi_intr_get_supported_types: MSXED0x%08x",
4934 				type));
4935 		}
4936 	}
4937 
4938 	nxgep->nxge_intr_type.intr_type = type;
4939 	if ((type == DDI_INTR_TYPE_MSIX || type == DDI_INTR_TYPE_MSI ||
4940 		type == DDI_INTR_TYPE_FIXED) &&
4941 			nxgep->nxge_intr_type.niu_msi_enable) {
4942 		if ((status = nxge_add_intrs_adv(nxgep)) != DDI_SUCCESS) {
4943 			NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
4944 				    " nxge_add_intrs: "
4945 				    " nxge_add_intrs_adv failed: status 0x%08x",
4946 				    status));
4947 			return (status);
4948 		} else {
4949 			NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_add_intrs: "
4950 			"interrupts registered : type %d", type));
4951 			nxgep->nxge_intr_type.intr_registered = B_TRUE;
4952 
4953 			NXGE_DEBUG_MSG((nxgep, DDI_CTL,
4954 				"\nAdded advanced nxge add_intr_adv "
4955 					"intr type 0x%x\n", type));
4956 
4957 			return (status);
4958 		}
4959 	}
4960 
4961 	if (!nxgep->nxge_intr_type.intr_registered) {
4962 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "==> nxge_add_intrs: "
4963 			"failed to register interrupts"));
4964 		return (NXGE_ERROR | NXGE_DDI_FAILED);
4965 	}
4966 
4967 	NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_add_intrs"));
4968 	return (status);
4969 }
4970 
4971 /*ARGSUSED*/
4972 static nxge_status_t
4973 nxge_add_soft_intrs(p_nxge_t nxgep)
4974 {
4975 
4976 	int		ddi_status = DDI_SUCCESS;
4977 	nxge_status_t	status = NXGE_OK;
4978 
4979 	NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_add_soft_intrs"));
4980 
4981 	nxgep->resched_id = NULL;
4982 	nxgep->resched_running = B_FALSE;
4983 	ddi_status = ddi_add_softintr(nxgep->dip, DDI_SOFTINT_LOW,
4984 			&nxgep->resched_id,
4985 		NULL, NULL, nxge_reschedule, (caddr_t)nxgep);
4986 	if (ddi_status != DDI_SUCCESS) {
4987 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "<== nxge_add_soft_intrs: "
4988 			"ddi_add_softintrs failed: status 0x%08x",
4989 			ddi_status));
4990 		return (NXGE_ERROR | NXGE_DDI_FAILED);
4991 	}
4992 
4993 	NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_ddi_add_soft_intrs"));
4994 
4995 	return (status);
4996 }
4997 
4998 static nxge_status_t
4999 nxge_add_intrs_adv(p_nxge_t nxgep)
5000 {
5001 	int		intr_type;
5002 	p_nxge_intr_t	intrp;
5003 
5004 	NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_add_intrs_adv"));
5005 
5006 	intrp = (p_nxge_intr_t)&nxgep->nxge_intr_type;
5007 	intr_type = intrp->intr_type;
5008 	NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_add_intrs_adv: type 0x%x",
5009 		intr_type));
5010 
5011 	switch (intr_type) {
5012 	case DDI_INTR_TYPE_MSI: /* 0x2 */
5013 	case DDI_INTR_TYPE_MSIX: /* 0x4 */
5014 		return (nxge_add_intrs_adv_type(nxgep, intr_type));
5015 
5016 	case DDI_INTR_TYPE_FIXED: /* 0x1 */
5017 		return (nxge_add_intrs_adv_type_fix(nxgep, intr_type));
5018 
5019 	default:
5020 		return (NXGE_ERROR);
5021 	}
5022 }
5023 
5024 
5025 /*ARGSUSED*/
5026 static nxge_status_t
5027 nxge_add_intrs_adv_type(p_nxge_t nxgep, uint32_t int_type)
5028 {
5029 	dev_info_t		*dip = nxgep->dip;
5030 	p_nxge_ldg_t		ldgp;
5031 	p_nxge_intr_t		intrp;
5032 	uint_t			*inthandler;
5033 	void			*arg1, *arg2;
5034 	int			behavior;
5035 	int			nintrs, navail, nrequest;
5036 	int			nactual, nrequired;
5037 	int			inum = 0;
5038 	int			x, y;
5039 	int			ddi_status = DDI_SUCCESS;
5040 	nxge_status_t		status = NXGE_OK;
5041 
5042 	NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_add_intrs_adv_type"));
5043 	intrp = (p_nxge_intr_t)&nxgep->nxge_intr_type;
5044 	intrp->start_inum = 0;
5045 
5046 	ddi_status = ddi_intr_get_nintrs(dip, int_type, &nintrs);
5047 	if ((ddi_status != DDI_SUCCESS) || (nintrs == 0)) {
5048 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
5049 			"ddi_intr_get_nintrs() failed, status: 0x%x%, "
5050 			    "nintrs: %d", ddi_status, nintrs));
5051 		return (NXGE_ERROR | NXGE_DDI_FAILED);
5052 	}
5053 
5054 	ddi_status = ddi_intr_get_navail(dip, int_type, &navail);
5055 	if ((ddi_status != DDI_SUCCESS) || (navail == 0)) {
5056 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
5057 			"ddi_intr_get_navail() failed, status: 0x%x%, "
5058 			    "nintrs: %d", ddi_status, navail));
5059 		return (NXGE_ERROR | NXGE_DDI_FAILED);
5060 	}
5061 
5062 	NXGE_DEBUG_MSG((nxgep, INT_CTL,
5063 		"ddi_intr_get_navail() returned: nintrs %d, navail %d",
5064 		    nintrs, navail));
5065 
5066 	/* PSARC/2007/453 MSI-X interrupt limit override */
5067 	if (int_type == DDI_INTR_TYPE_MSIX) {
5068 		nrequest = nxge_create_msi_property(nxgep);
5069 		if (nrequest < navail) {
5070 			navail = nrequest;
5071 			NXGE_DEBUG_MSG((nxgep, INT_CTL,
5072 			    "nxge_add_intrs_adv_type: nintrs %d "
5073 			    "navail %d (nrequest %d)",
5074 			    nintrs, navail, nrequest));
5075 		}
5076 	}
5077 
5078 	if (int_type == DDI_INTR_TYPE_MSI && !ISP2(navail)) {
5079 		/* MSI must be power of 2 */
5080 		if ((navail & 16) == 16) {
5081 			navail = 16;
5082 		} else if ((navail & 8) == 8) {
5083 			navail = 8;
5084 		} else if ((navail & 4) == 4) {
5085 			navail = 4;
5086 		} else if ((navail & 2) == 2) {
5087 			navail = 2;
5088 		} else {
5089 			navail = 1;
5090 		}
5091 		NXGE_DEBUG_MSG((nxgep, INT_CTL,
5092 			"ddi_intr_get_navail(): (msi power of 2) nintrs %d, "
5093 			"navail %d", nintrs, navail));
5094 	}
5095 
5096 	behavior = ((int_type == DDI_INTR_TYPE_FIXED) ? DDI_INTR_ALLOC_STRICT :
5097 			DDI_INTR_ALLOC_NORMAL);
5098 	intrp->intr_size = navail * sizeof (ddi_intr_handle_t);
5099 	intrp->htable = kmem_alloc(intrp->intr_size, KM_SLEEP);
5100 	ddi_status = ddi_intr_alloc(dip, intrp->htable, int_type, inum,
5101 		    navail, &nactual, behavior);
5102 	if (ddi_status != DDI_SUCCESS || nactual == 0) {
5103 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
5104 				    " ddi_intr_alloc() failed: %d",
5105 				    ddi_status));
5106 		kmem_free(intrp->htable, intrp->intr_size);
5107 		return (NXGE_ERROR | NXGE_DDI_FAILED);
5108 	}
5109 
5110 	if ((ddi_status = ddi_intr_get_pri(intrp->htable[0],
5111 			(uint_t *)&intrp->pri)) != DDI_SUCCESS) {
5112 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
5113 				    " ddi_intr_get_pri() failed: %d",
5114 				    ddi_status));
5115 		/* Free already allocated interrupts */
5116 		for (y = 0; y < nactual; y++) {
5117 			(void) ddi_intr_free(intrp->htable[y]);
5118 		}
5119 
5120 		kmem_free(intrp->htable, intrp->intr_size);
5121 		return (NXGE_ERROR | NXGE_DDI_FAILED);
5122 	}
5123 
5124 	nrequired = 0;
5125 	switch (nxgep->niu_type) {
5126 	default:
5127 		status = nxge_ldgv_init(nxgep, &nactual, &nrequired);
5128 		break;
5129 
5130 	case N2_NIU:
5131 		status = nxge_ldgv_init_n2(nxgep, &nactual, &nrequired);
5132 		break;
5133 	}
5134 
5135 	if (status != NXGE_OK) {
5136 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
5137 			"nxge_add_intrs_adv_typ:nxge_ldgv_init "
5138 			"failed: 0x%x", status));
5139 		/* Free already allocated interrupts */
5140 		for (y = 0; y < nactual; y++) {
5141 			(void) ddi_intr_free(intrp->htable[y]);
5142 		}
5143 
5144 		kmem_free(intrp->htable, intrp->intr_size);
5145 		return (status);
5146 	}
5147 
5148 	ldgp = nxgep->ldgvp->ldgp;
5149 	for (x = 0; x < nrequired; x++, ldgp++) {
5150 		ldgp->vector = (uint8_t)x;
5151 		ldgp->intdata = SID_DATA(ldgp->func, x);
5152 		arg1 = ldgp->ldvp;
5153 		arg2 = nxgep;
5154 		if (ldgp->nldvs == 1) {
5155 			inthandler = (uint_t *)ldgp->ldvp->ldv_intr_handler;
5156 			NXGE_DEBUG_MSG((nxgep, INT_CTL,
5157 				"nxge_add_intrs_adv_type: "
5158 				"arg1 0x%x arg2 0x%x: "
5159 				"1-1 int handler (entry %d intdata 0x%x)\n",
5160 				arg1, arg2,
5161 				x, ldgp->intdata));
5162 		} else if (ldgp->nldvs > 1) {
5163 			inthandler = (uint_t *)ldgp->sys_intr_handler;
5164 			NXGE_DEBUG_MSG((nxgep, INT_CTL,
5165 				"nxge_add_intrs_adv_type: "
5166 				"arg1 0x%x arg2 0x%x: "
5167 				"nldevs %d int handler "
5168 				"(entry %d intdata 0x%x)\n",
5169 				arg1, arg2,
5170 				ldgp->nldvs, x, ldgp->intdata));
5171 		}
5172 
5173 		NXGE_DEBUG_MSG((nxgep, INT_CTL,
5174 			"==> nxge_add_intrs_adv_type: ddi_add_intr(inum) #%d "
5175 			"htable 0x%llx", x, intrp->htable[x]));
5176 
5177 		if ((ddi_status = ddi_intr_add_handler(intrp->htable[x],
5178 			(ddi_intr_handler_t *)inthandler, arg1, arg2))
5179 				!= DDI_SUCCESS) {
5180 			NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
5181 				"==> nxge_add_intrs_adv_type: failed #%d "
5182 				"status 0x%x", x, ddi_status));
5183 			for (y = 0; y < intrp->intr_added; y++) {
5184 				(void) ddi_intr_remove_handler(
5185 						intrp->htable[y]);
5186 			}
5187 			/* Free already allocated intr */
5188 			for (y = 0; y < nactual; y++) {
5189 				(void) ddi_intr_free(intrp->htable[y]);
5190 			}
5191 			kmem_free(intrp->htable, intrp->intr_size);
5192 
5193 			(void) nxge_ldgv_uninit(nxgep);
5194 
5195 			return (NXGE_ERROR | NXGE_DDI_FAILED);
5196 		}
5197 		intrp->intr_added++;
5198 	}
5199 
5200 	intrp->msi_intx_cnt = nactual;
5201 
5202 	NXGE_DEBUG_MSG((nxgep, DDI_CTL,
5203 		"Requested: %d, Allowed: %d msi_intx_cnt %d intr_added %d",
5204 		navail, nactual,
5205 		intrp->msi_intx_cnt,
5206 		intrp->intr_added));
5207 
5208 	(void) ddi_intr_get_cap(intrp->htable[0], &intrp->intr_cap);
5209 
5210 	(void) nxge_intr_ldgv_init(nxgep);
5211 
5212 	NXGE_DEBUG_MSG((nxgep, INT_CTL, "<== nxge_add_intrs_adv_type"));
5213 
5214 	return (status);
5215 }
5216 
5217 /*ARGSUSED*/
5218 static nxge_status_t
5219 nxge_add_intrs_adv_type_fix(p_nxge_t nxgep, uint32_t int_type)
5220 {
5221 	dev_info_t		*dip = nxgep->dip;
5222 	p_nxge_ldg_t		ldgp;
5223 	p_nxge_intr_t		intrp;
5224 	uint_t			*inthandler;
5225 	void			*arg1, *arg2;
5226 	int			behavior;
5227 	int			nintrs, navail;
5228 	int			nactual, nrequired;
5229 	int			inum = 0;
5230 	int			x, y;
5231 	int			ddi_status = DDI_SUCCESS;
5232 	nxge_status_t		status = NXGE_OK;
5233 
5234 	NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_add_intrs_adv_type_fix"));
5235 	intrp = (p_nxge_intr_t)&nxgep->nxge_intr_type;
5236 	intrp->start_inum = 0;
5237 
5238 	ddi_status = ddi_intr_get_nintrs(dip, int_type, &nintrs);
5239 	if ((ddi_status != DDI_SUCCESS) || (nintrs == 0)) {
5240 		NXGE_DEBUG_MSG((nxgep, INT_CTL,
5241 			"ddi_intr_get_nintrs() failed, status: 0x%x%, "
5242 			    "nintrs: %d", status, nintrs));
5243 		return (NXGE_ERROR | NXGE_DDI_FAILED);
5244 	}
5245 
5246 	ddi_status = ddi_intr_get_navail(dip, int_type, &navail);
5247 	if ((ddi_status != DDI_SUCCESS) || (navail == 0)) {
5248 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
5249 			"ddi_intr_get_navail() failed, status: 0x%x%, "
5250 			    "nintrs: %d", ddi_status, navail));
5251 		return (NXGE_ERROR | NXGE_DDI_FAILED);
5252 	}
5253 
5254 	NXGE_DEBUG_MSG((nxgep, INT_CTL,
5255 		"ddi_intr_get_navail() returned: nintrs %d, naavail %d",
5256 		    nintrs, navail));
5257 
5258 	behavior = ((int_type == DDI_INTR_TYPE_FIXED) ? DDI_INTR_ALLOC_STRICT :
5259 			DDI_INTR_ALLOC_NORMAL);
5260 	intrp->intr_size = navail * sizeof (ddi_intr_handle_t);
5261 	intrp->htable = kmem_alloc(intrp->intr_size, KM_SLEEP);
5262 	ddi_status = ddi_intr_alloc(dip, intrp->htable, int_type, inum,
5263 		    navail, &nactual, behavior);
5264 	if (ddi_status != DDI_SUCCESS || nactual == 0) {
5265 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
5266 			    " ddi_intr_alloc() failed: %d",
5267 			    ddi_status));
5268 		kmem_free(intrp->htable, intrp->intr_size);
5269 		return (NXGE_ERROR | NXGE_DDI_FAILED);
5270 	}
5271 
5272 	if ((ddi_status = ddi_intr_get_pri(intrp->htable[0],
5273 			(uint_t *)&intrp->pri)) != DDI_SUCCESS) {
5274 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
5275 				    " ddi_intr_get_pri() failed: %d",
5276 				    ddi_status));
5277 		/* Free already allocated interrupts */
5278 		for (y = 0; y < nactual; y++) {
5279 			(void) ddi_intr_free(intrp->htable[y]);
5280 		}
5281 
5282 		kmem_free(intrp->htable, intrp->intr_size);
5283 		return (NXGE_ERROR | NXGE_DDI_FAILED);
5284 	}
5285 
5286 	nrequired = 0;
5287 	switch (nxgep->niu_type) {
5288 	default:
5289 		status = nxge_ldgv_init(nxgep, &nactual, &nrequired);
5290 		break;
5291 
5292 	case N2_NIU:
5293 		status = nxge_ldgv_init_n2(nxgep, &nactual, &nrequired);
5294 		break;
5295 	}
5296 
5297 	if (status != NXGE_OK) {
5298 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
5299 			"nxge_add_intrs_adv_type_fix:nxge_ldgv_init "
5300 			"failed: 0x%x", status));
5301 		/* Free already allocated interrupts */
5302 		for (y = 0; y < nactual; y++) {
5303 			(void) ddi_intr_free(intrp->htable[y]);
5304 		}
5305 
5306 		kmem_free(intrp->htable, intrp->intr_size);
5307 		return (status);
5308 	}
5309 
5310 	ldgp = nxgep->ldgvp->ldgp;
5311 	for (x = 0; x < nrequired; x++, ldgp++) {
5312 		ldgp->vector = (uint8_t)x;
5313 		if (nxgep->niu_type != N2_NIU) {
5314 			ldgp->intdata = SID_DATA(ldgp->func, x);
5315 		}
5316 
5317 		arg1 = ldgp->ldvp;
5318 		arg2 = nxgep;
5319 		if (ldgp->nldvs == 1) {
5320 			inthandler = (uint_t *)ldgp->ldvp->ldv_intr_handler;
5321 			NXGE_DEBUG_MSG((nxgep, INT_CTL,
5322 				"nxge_add_intrs_adv_type_fix: "
5323 				"1-1 int handler(%d) ldg %d ldv %d "
5324 				"arg1 $%p arg2 $%p\n",
5325 				x, ldgp->ldg, ldgp->ldvp->ldv,
5326 				arg1, arg2));
5327 		} else if (ldgp->nldvs > 1) {
5328 			inthandler = (uint_t *)ldgp->sys_intr_handler;
5329 			NXGE_DEBUG_MSG((nxgep, INT_CTL,
5330 				"nxge_add_intrs_adv_type_fix: "
5331 				"shared ldv %d int handler(%d) ldv %d ldg %d"
5332 				"arg1 0x%016llx arg2 0x%016llx\n",
5333 				x, ldgp->nldvs, ldgp->ldg, ldgp->ldvp->ldv,
5334 				arg1, arg2));
5335 		}
5336 
5337 		if ((ddi_status = ddi_intr_add_handler(intrp->htable[x],
5338 			(ddi_intr_handler_t *)inthandler, arg1, arg2))
5339 				!= DDI_SUCCESS) {
5340 			NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
5341 				"==> nxge_add_intrs_adv_type_fix: failed #%d "
5342 				"status 0x%x", x, ddi_status));
5343 			for (y = 0; y < intrp->intr_added; y++) {
5344 				(void) ddi_intr_remove_handler(
5345 						intrp->htable[y]);
5346 			}
5347 			for (y = 0; y < nactual; y++) {
5348 				(void) ddi_intr_free(intrp->htable[y]);
5349 			}
5350 			/* Free already allocated intr */
5351 			kmem_free(intrp->htable, intrp->intr_size);
5352 
5353 			(void) nxge_ldgv_uninit(nxgep);
5354 
5355 			return (NXGE_ERROR | NXGE_DDI_FAILED);
5356 		}
5357 		intrp->intr_added++;
5358 	}
5359 
5360 	intrp->msi_intx_cnt = nactual;
5361 
5362 	(void) ddi_intr_get_cap(intrp->htable[0], &intrp->intr_cap);
5363 
5364 	status = nxge_intr_ldgv_init(nxgep);
5365 	NXGE_DEBUG_MSG((nxgep, INT_CTL, "<== nxge_add_intrs_adv_type_fix"));
5366 
5367 	return (status);
5368 }
5369 
5370 static void
5371 nxge_remove_intrs(p_nxge_t nxgep)
5372 {
5373 	int		i, inum;
5374 	p_nxge_intr_t	intrp;
5375 
5376 	NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_remove_intrs"));
5377 	intrp = (p_nxge_intr_t)&nxgep->nxge_intr_type;
5378 	if (!intrp->intr_registered) {
5379 		NXGE_DEBUG_MSG((nxgep, INT_CTL,
5380 			"<== nxge_remove_intrs: interrupts not registered"));
5381 		return;
5382 	}
5383 
5384 	NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_remove_intrs:advanced"));
5385 
5386 	if (intrp->intr_cap & DDI_INTR_FLAG_BLOCK) {
5387 		(void) ddi_intr_block_disable(intrp->htable,
5388 			intrp->intr_added);
5389 	} else {
5390 		for (i = 0; i < intrp->intr_added; i++) {
5391 			(void) ddi_intr_disable(intrp->htable[i]);
5392 		}
5393 	}
5394 
5395 	for (inum = 0; inum < intrp->intr_added; inum++) {
5396 		if (intrp->htable[inum]) {
5397 			(void) ddi_intr_remove_handler(intrp->htable[inum]);
5398 		}
5399 	}
5400 
5401 	for (inum = 0; inum < intrp->msi_intx_cnt; inum++) {
5402 		if (intrp->htable[inum]) {
5403 			NXGE_DEBUG_MSG((nxgep, DDI_CTL,
5404 				"nxge_remove_intrs: ddi_intr_free inum %d "
5405 				"msi_intx_cnt %d intr_added %d",
5406 				inum,
5407 				intrp->msi_intx_cnt,
5408 				intrp->intr_added));
5409 
5410 			(void) ddi_intr_free(intrp->htable[inum]);
5411 		}
5412 	}
5413 
5414 	kmem_free(intrp->htable, intrp->intr_size);
5415 	intrp->intr_registered = B_FALSE;
5416 	intrp->intr_enabled = B_FALSE;
5417 	intrp->msi_intx_cnt = 0;
5418 	intrp->intr_added = 0;
5419 
5420 	(void) nxge_ldgv_uninit(nxgep);
5421 
5422 	(void) ddi_prop_remove(DDI_DEV_T_NONE, nxgep->dip,
5423 	    "#msix-request");
5424 
5425 	NXGE_DEBUG_MSG((nxgep, INT_CTL, "<== nxge_remove_intrs"));
5426 }
5427 
5428 /*ARGSUSED*/
5429 static void
5430 nxge_remove_soft_intrs(p_nxge_t nxgep)
5431 {
5432 	NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_remove_soft_intrs"));
5433 	if (nxgep->resched_id) {
5434 		ddi_remove_softintr(nxgep->resched_id);
5435 		NXGE_DEBUG_MSG((nxgep, INT_CTL,
5436 			"==> nxge_remove_soft_intrs: removed"));
5437 		nxgep->resched_id = NULL;
5438 	}
5439 
5440 	NXGE_DEBUG_MSG((nxgep, INT_CTL, "<== nxge_remove_soft_intrs"));
5441 }
5442 
5443 /*ARGSUSED*/
5444 static void
5445 nxge_intrs_enable(p_nxge_t nxgep)
5446 {
5447 	p_nxge_intr_t	intrp;
5448 	int		i;
5449 	int		status;
5450 
5451 	NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_intrs_enable"));
5452 
5453 	intrp = (p_nxge_intr_t)&nxgep->nxge_intr_type;
5454 
5455 	if (!intrp->intr_registered) {
5456 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "<== nxge_intrs_enable: "
5457 			"interrupts are not registered"));
5458 		return;
5459 	}
5460 
5461 	if (intrp->intr_enabled) {
5462 		NXGE_DEBUG_MSG((nxgep, INT_CTL,
5463 			"<== nxge_intrs_enable: already enabled"));
5464 		return;
5465 	}
5466 
5467 	if (intrp->intr_cap & DDI_INTR_FLAG_BLOCK) {
5468 		status = ddi_intr_block_enable(intrp->htable,
5469 			intrp->intr_added);
5470 		NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_intrs_enable "
5471 			"block enable - status 0x%x total inums #%d\n",
5472 			status, intrp->intr_added));
5473 	} else {
5474 		for (i = 0; i < intrp->intr_added; i++) {
5475 			status = ddi_intr_enable(intrp->htable[i]);
5476 			NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_intrs_enable "
5477 				"ddi_intr_enable:enable - status 0x%x "
5478 				"total inums %d enable inum #%d\n",
5479 				status, intrp->intr_added, i));
5480 			if (status == DDI_SUCCESS) {
5481 				intrp->intr_enabled = B_TRUE;
5482 			}
5483 		}
5484 	}
5485 
5486 	NXGE_DEBUG_MSG((nxgep, INT_CTL, "<== nxge_intrs_enable"));
5487 }
5488 
5489 /*ARGSUSED*/
5490 static void
5491 nxge_intrs_disable(p_nxge_t nxgep)
5492 {
5493 	p_nxge_intr_t	intrp;
5494 	int		i;
5495 
5496 	NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_intrs_disable"));
5497 
5498 	intrp = (p_nxge_intr_t)&nxgep->nxge_intr_type;
5499 
5500 	if (!intrp->intr_registered) {
5501 		NXGE_DEBUG_MSG((nxgep, INT_CTL, "<== nxge_intrs_disable: "
5502 			"interrupts are not registered"));
5503 		return;
5504 	}
5505 
5506 	if (intrp->intr_cap & DDI_INTR_FLAG_BLOCK) {
5507 		(void) ddi_intr_block_disable(intrp->htable,
5508 			intrp->intr_added);
5509 	} else {
5510 		for (i = 0; i < intrp->intr_added; i++) {
5511 			(void) ddi_intr_disable(intrp->htable[i]);
5512 		}
5513 	}
5514 
5515 	intrp->intr_enabled = B_FALSE;
5516 	NXGE_DEBUG_MSG((nxgep, INT_CTL, "<== nxge_intrs_disable"));
5517 }
5518 
5519 static nxge_status_t
5520 nxge_mac_register(p_nxge_t nxgep)
5521 {
5522 	mac_register_t *macp;
5523 	int		status;
5524 
5525 	NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_mac_register"));
5526 
5527 	if ((macp = mac_alloc(MAC_VERSION)) == NULL)
5528 		return (NXGE_ERROR);
5529 
5530 	macp->m_type_ident = MAC_PLUGIN_IDENT_ETHER;
5531 	macp->m_driver = nxgep;
5532 	macp->m_dip = nxgep->dip;
5533 	macp->m_src_addr = nxgep->ouraddr.ether_addr_octet;
5534 	macp->m_callbacks = &nxge_m_callbacks;
5535 	macp->m_min_sdu = 0;
5536 	nxgep->mac.default_mtu = nxgep->mac.maxframesize -
5537 	    NXGE_EHEADER_VLAN_CRC;
5538 	macp->m_max_sdu = nxgep->mac.default_mtu;
5539 	macp->m_margin = VLAN_TAGSZ;
5540 
5541 	NXGE_DEBUG_MSG((nxgep, MAC_CTL,
5542 	    "==> nxge_mac_register: instance %d "
5543 	    "max_sdu %d margin %d maxframe %d (header %d)",
5544 	    nxgep->instance,
5545 	    macp->m_max_sdu, macp->m_margin,
5546 	    nxgep->mac.maxframesize,
5547 	    NXGE_EHEADER_VLAN_CRC));
5548 
5549 	status = mac_register(macp, &nxgep->mach);
5550 	mac_free(macp);
5551 
5552 	if (status != 0) {
5553 		cmn_err(CE_WARN,
5554 			"!nxge_mac_register failed (status %d instance %d)",
5555 			status, nxgep->instance);
5556 		return (NXGE_ERROR);
5557 	}
5558 
5559 	NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_mac_register success "
5560 		"(instance %d)", nxgep->instance));
5561 
5562 	return (NXGE_OK);
5563 }
5564 
5565 void
5566 nxge_err_inject(p_nxge_t nxgep, queue_t *wq, mblk_t *mp)
5567 {
5568 	ssize_t		size;
5569 	mblk_t		*nmp;
5570 	uint8_t		blk_id;
5571 	uint8_t		chan;
5572 	uint32_t	err_id;
5573 	err_inject_t	*eip;
5574 
5575 	NXGE_DEBUG_MSG((nxgep, STR_CTL, "==> nxge_err_inject"));
5576 
5577 	size = 1024;
5578 	nmp = mp->b_cont;
5579 	eip = (err_inject_t *)nmp->b_rptr;
5580 	blk_id = eip->blk_id;
5581 	err_id = eip->err_id;
5582 	chan = eip->chan;
5583 	cmn_err(CE_NOTE, "!blk_id = 0x%x\n", blk_id);
5584 	cmn_err(CE_NOTE, "!err_id = 0x%x\n", err_id);
5585 	cmn_err(CE_NOTE, "!chan = 0x%x\n", chan);
5586 	switch (blk_id) {
5587 	case MAC_BLK_ID:
5588 		break;
5589 	case TXMAC_BLK_ID:
5590 		break;
5591 	case RXMAC_BLK_ID:
5592 		break;
5593 	case MIF_BLK_ID:
5594 		break;
5595 	case IPP_BLK_ID:
5596 		nxge_ipp_inject_err(nxgep, err_id);
5597 		break;
5598 	case TXC_BLK_ID:
5599 		nxge_txc_inject_err(nxgep, err_id);
5600 		break;
5601 	case TXDMA_BLK_ID:
5602 		nxge_txdma_inject_err(nxgep, err_id, chan);
5603 		break;
5604 	case RXDMA_BLK_ID:
5605 		nxge_rxdma_inject_err(nxgep, err_id, chan);
5606 		break;
5607 	case ZCP_BLK_ID:
5608 		nxge_zcp_inject_err(nxgep, err_id);
5609 		break;
5610 	case ESPC_BLK_ID:
5611 		break;
5612 	case FFLP_BLK_ID:
5613 		break;
5614 	case PHY_BLK_ID:
5615 		break;
5616 	case ETHER_SERDES_BLK_ID:
5617 		break;
5618 	case PCIE_SERDES_BLK_ID:
5619 		break;
5620 	case VIR_BLK_ID:
5621 		break;
5622 	}
5623 
5624 	nmp->b_wptr = nmp->b_rptr + size;
5625 	NXGE_DEBUG_MSG((nxgep, STR_CTL, "<== nxge_err_inject"));
5626 
5627 	miocack(wq, mp, (int)size, 0);
5628 }
5629 
5630 static int
5631 nxge_init_common_dev(p_nxge_t nxgep)
5632 {
5633 	p_nxge_hw_list_t	hw_p;
5634 	dev_info_t 		*p_dip;
5635 
5636 	NXGE_DEBUG_MSG((nxgep, MOD_CTL, "==> nxge_init_common_device"));
5637 
5638 	p_dip = nxgep->p_dip;
5639 	MUTEX_ENTER(&nxge_common_lock);
5640 	NXGE_DEBUG_MSG((nxgep, MOD_CTL,
5641 		"==> nxge_init_common_dev:func # %d",
5642 			nxgep->function_num));
5643 	/*
5644 	 * Loop through existing per neptune hardware list.
5645 	 */
5646 	for (hw_p = nxge_hw_list; hw_p; hw_p = hw_p->next) {
5647 		NXGE_DEBUG_MSG((nxgep, MOD_CTL,
5648 			"==> nxge_init_common_device:func # %d "
5649 			"hw_p $%p parent dip $%p",
5650 			nxgep->function_num,
5651 			hw_p,
5652 			p_dip));
5653 		if (hw_p->parent_devp == p_dip) {
5654 			nxgep->nxge_hw_p = hw_p;
5655 			hw_p->ndevs++;
5656 			hw_p->nxge_p[nxgep->function_num] = nxgep;
5657 			NXGE_DEBUG_MSG((nxgep, MOD_CTL,
5658 				"==> nxge_init_common_device:func # %d "
5659 				"hw_p $%p parent dip $%p "
5660 				"ndevs %d (found)",
5661 				nxgep->function_num,
5662 				hw_p,
5663 				p_dip,
5664 				hw_p->ndevs));
5665 			break;
5666 		}
5667 	}
5668 
5669 	if (hw_p == NULL) {
5670 		NXGE_DEBUG_MSG((nxgep, MOD_CTL,
5671 			"==> nxge_init_common_device:func # %d "
5672 			"parent dip $%p (new)",
5673 			nxgep->function_num,
5674 			p_dip));
5675 		hw_p = kmem_zalloc(sizeof (nxge_hw_list_t), KM_SLEEP);
5676 		hw_p->parent_devp = p_dip;
5677 		hw_p->magic = NXGE_NEPTUNE_MAGIC;
5678 		nxgep->nxge_hw_p = hw_p;
5679 		hw_p->ndevs++;
5680 		hw_p->nxge_p[nxgep->function_num] = nxgep;
5681 		hw_p->next = nxge_hw_list;
5682 		if (nxgep->niu_type == N2_NIU) {
5683 			hw_p->niu_type = N2_NIU;
5684 			hw_p->platform_type = P_NEPTUNE_NIU;
5685 		} else {
5686 			hw_p->niu_type = NIU_TYPE_NONE;
5687 			hw_p->platform_type = P_NEPTUNE_NONE;
5688 		}
5689 
5690 		MUTEX_INIT(&hw_p->nxge_cfg_lock, NULL, MUTEX_DRIVER, NULL);
5691 		MUTEX_INIT(&hw_p->nxge_tcam_lock, NULL, MUTEX_DRIVER, NULL);
5692 		MUTEX_INIT(&hw_p->nxge_vlan_lock, NULL, MUTEX_DRIVER, NULL);
5693 		MUTEX_INIT(&hw_p->nxge_mdio_lock, NULL, MUTEX_DRIVER, NULL);
5694 
5695 		nxge_hw_list = hw_p;
5696 
5697 		(void) nxge_scan_ports_phy(nxgep, nxge_hw_list);
5698 	}
5699 
5700 	MUTEX_EXIT(&nxge_common_lock);
5701 
5702 	nxgep->platform_type = hw_p->platform_type;
5703 	if (nxgep->niu_type != N2_NIU) {
5704 		nxgep->niu_type = hw_p->niu_type;
5705 	}
5706 
5707 	NXGE_DEBUG_MSG((nxgep, MOD_CTL,
5708 		"==> nxge_init_common_device (nxge_hw_list) $%p",
5709 		nxge_hw_list));
5710 	NXGE_DEBUG_MSG((nxgep, MOD_CTL, "<== nxge_init_common_device"));
5711 
5712 	return (NXGE_OK);
5713 }
5714 
5715 static void
5716 nxge_uninit_common_dev(p_nxge_t nxgep)
5717 {
5718 	p_nxge_hw_list_t	hw_p, h_hw_p;
5719 	dev_info_t 		*p_dip;
5720 
5721 	NXGE_DEBUG_MSG((nxgep, MOD_CTL, "==> nxge_uninit_common_device"));
5722 	if (nxgep->nxge_hw_p == NULL) {
5723 		NXGE_DEBUG_MSG((nxgep, MOD_CTL,
5724 			"<== nxge_uninit_common_device (no common)"));
5725 		return;
5726 	}
5727 
5728 	MUTEX_ENTER(&nxge_common_lock);
5729 	h_hw_p = nxge_hw_list;
5730 	for (hw_p = nxge_hw_list; hw_p; hw_p = hw_p->next) {
5731 		p_dip = hw_p->parent_devp;
5732 		if (nxgep->nxge_hw_p == hw_p &&
5733 			p_dip == nxgep->p_dip &&
5734 			nxgep->nxge_hw_p->magic == NXGE_NEPTUNE_MAGIC &&
5735 			hw_p->magic == NXGE_NEPTUNE_MAGIC) {
5736 
5737 			NXGE_DEBUG_MSG((nxgep, MOD_CTL,
5738 				"==> nxge_uninit_common_device:func # %d "
5739 				"hw_p $%p parent dip $%p "
5740 				"ndevs %d (found)",
5741 				nxgep->function_num,
5742 				hw_p,
5743 				p_dip,
5744 				hw_p->ndevs));
5745 
5746 			nxgep->nxge_hw_p = NULL;
5747 			if (hw_p->ndevs) {
5748 				hw_p->ndevs--;
5749 			}
5750 			hw_p->nxge_p[nxgep->function_num] = NULL;
5751 			if (!hw_p->ndevs) {
5752 				MUTEX_DESTROY(&hw_p->nxge_vlan_lock);
5753 				MUTEX_DESTROY(&hw_p->nxge_tcam_lock);
5754 				MUTEX_DESTROY(&hw_p->nxge_cfg_lock);
5755 				MUTEX_DESTROY(&hw_p->nxge_mdio_lock);
5756 				NXGE_DEBUG_MSG((nxgep, MOD_CTL,
5757 					"==> nxge_uninit_common_device: "
5758 					"func # %d "
5759 					"hw_p $%p parent dip $%p "
5760 					"ndevs %d (last)",
5761 					nxgep->function_num,
5762 					hw_p,
5763 					p_dip,
5764 					hw_p->ndevs));
5765 
5766 				if (hw_p == nxge_hw_list) {
5767 					NXGE_DEBUG_MSG((nxgep, MOD_CTL,
5768 						"==> nxge_uninit_common_device:"
5769 						"remove head func # %d "
5770 						"hw_p $%p parent dip $%p "
5771 						"ndevs %d (head)",
5772 						nxgep->function_num,
5773 						hw_p,
5774 						p_dip,
5775 						hw_p->ndevs));
5776 					nxge_hw_list = hw_p->next;
5777 				} else {
5778 					NXGE_DEBUG_MSG((nxgep, MOD_CTL,
5779 						"==> nxge_uninit_common_device:"
5780 						"remove middle func # %d "
5781 						"hw_p $%p parent dip $%p "
5782 						"ndevs %d (middle)",
5783 						nxgep->function_num,
5784 						hw_p,
5785 						p_dip,
5786 						hw_p->ndevs));
5787 					h_hw_p->next = hw_p->next;
5788 				}
5789 
5790 				KMEM_FREE(hw_p, sizeof (nxge_hw_list_t));
5791 			}
5792 			break;
5793 		} else {
5794 			h_hw_p = hw_p;
5795 		}
5796 	}
5797 
5798 	MUTEX_EXIT(&nxge_common_lock);
5799 	NXGE_DEBUG_MSG((nxgep, MOD_CTL,
5800 		"==> nxge_uninit_common_device (nxge_hw_list) $%p",
5801 		nxge_hw_list));
5802 
5803 	NXGE_DEBUG_MSG((nxgep, MOD_CTL, "<= nxge_uninit_common_device"));
5804 }
5805 
5806 /*
5807  * Determines the number of ports from the niu_type or the platform type.
5808  * Returns the number of ports, or returns zero on failure.
5809  */
5810 
5811 int
5812 nxge_get_nports(p_nxge_t nxgep)
5813 {
5814 	int	nports = 0;
5815 
5816 	switch (nxgep->niu_type) {
5817 	case N2_NIU:
5818 	case NEPTUNE_2_10GF:
5819 		nports = 2;
5820 		break;
5821 	case NEPTUNE_4_1GC:
5822 	case NEPTUNE_2_10GF_2_1GC:
5823 	case NEPTUNE_1_10GF_3_1GC:
5824 	case NEPTUNE_1_1GC_1_10GF_2_1GC:
5825 	case NEPTUNE_2_10GF_2_1GRF:
5826 		nports = 4;
5827 		break;
5828 	default:
5829 		switch (nxgep->platform_type) {
5830 		case P_NEPTUNE_NIU:
5831 		case P_NEPTUNE_ATLAS_2PORT:
5832 			nports = 2;
5833 			break;
5834 		case P_NEPTUNE_ATLAS_4PORT:
5835 		case P_NEPTUNE_MARAMBA_P0:
5836 		case P_NEPTUNE_MARAMBA_P1:
5837 		case P_NEPTUNE_ALONSO:
5838 			nports = 4;
5839 			break;
5840 		default:
5841 			break;
5842 		}
5843 		break;
5844 	}
5845 
5846 	return (nports);
5847 }
5848 
5849 /*
5850  * The following two functions are to support
5851  * PSARC/2007/453 MSI-X interrupt limit override.
5852  */
5853 static int
5854 nxge_create_msi_property(p_nxge_t nxgep)
5855 {
5856 	int	nmsi;
5857 	extern	int ncpus;
5858 
5859 	NXGE_DEBUG_MSG((nxgep, MOD_CTL, "==>nxge_create_msi_property"));
5860 
5861 	switch (nxgep->mac.portmode) {
5862 	case PORT_10G_COPPER:
5863 	case PORT_10G_FIBER:
5864 		(void) ddi_prop_create(DDI_DEV_T_NONE, nxgep->dip,
5865 		    DDI_PROP_CANSLEEP, "#msix-request", NULL, 0);
5866 		/*
5867 		 * The maximum MSI-X requested will be 8.
5868 		 * If the # of CPUs is less than 8, we will reqeust
5869 		 * # MSI-X based on the # of CPUs.
5870 		 */
5871 		if (ncpus >= NXGE_MSIX_REQUEST_10G) {
5872 			nmsi = NXGE_MSIX_REQUEST_10G;
5873 		} else {
5874 			nmsi = ncpus;
5875 		}
5876 		NXGE_DEBUG_MSG((nxgep, MOD_CTL,
5877 		    "==>nxge_create_msi_property(10G): exists 0x%x (nmsi %d)",
5878 		    ddi_prop_exists(DDI_DEV_T_NONE, nxgep->dip,
5879 		    DDI_PROP_CANSLEEP, "#msix-request"), nmsi));
5880 		break;
5881 
5882 	default:
5883 		nmsi = NXGE_MSIX_REQUEST_1G;
5884 		NXGE_DEBUG_MSG((nxgep, MOD_CTL,
5885 		    "==>nxge_create_msi_property(1G): exists 0x%x (nmsi %d)",
5886 		    ddi_prop_exists(DDI_DEV_T_NONE, nxgep->dip,
5887 		    DDI_PROP_CANSLEEP, "#msix-request"), nmsi));
5888 		break;
5889 	}
5890 
5891 	NXGE_DEBUG_MSG((nxgep, MOD_CTL, "<==nxge_create_msi_property"));
5892 	return (nmsi);
5893 }
5894