xref: /titanic_44/usr/src/uts/common/io/nxge/nxge_main.c (revision 646cf3c6c5f9a3403fe9d15809bbbb2441deb1e2)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 /*
22  * Copyright 2008 Sun Microsystems, Inc.  All rights reserved.
23  * Use is subject to license terms.
24  */
25 
26 #pragma ident	"%Z%%M%	%I%	%E% SMI"
27 
28 /*
29  * SunOs MT STREAMS NIU/Neptune 10Gb Ethernet Device Driver.
30  */
31 #include	<sys/nxge/nxge_impl.h>
32 #include	<sys/pcie.h>
33 
34 uint32_t 	nxge_use_partition = 0;		/* debug partition flag */
35 uint32_t 	nxge_dma_obp_props_only = 1;	/* use obp published props */
36 uint32_t 	nxge_use_rdc_intr = 1;		/* debug to assign rdc intr */
37 /*
38  * PSARC/2007/453 MSI-X interrupt limit override
39  * (This PSARC case is limited to MSI-X vectors
40  *  and SPARC platforms only).
41  */
42 #if defined(_BIG_ENDIAN)
43 uint32_t	nxge_msi_enable = 2;
44 #else
45 uint32_t	nxge_msi_enable = 1;
46 #endif
47 
48 /*
49  * Globals: tunable parameters (/etc/system or adb)
50  *
51  */
52 uint32_t 	nxge_rbr_size = NXGE_RBR_RBB_DEFAULT;
53 uint32_t 	nxge_rbr_spare_size = 0;
54 uint32_t 	nxge_rcr_size = NXGE_RCR_DEFAULT;
55 uint32_t 	nxge_tx_ring_size = NXGE_TX_RING_DEFAULT;
56 boolean_t 	nxge_no_msg = B_TRUE;		/* control message display */
57 uint32_t 	nxge_no_link_notify = 0;	/* control DL_NOTIFY */
58 uint32_t 	nxge_bcopy_thresh = TX_BCOPY_MAX;
59 uint32_t 	nxge_dvma_thresh = TX_FASTDVMA_MIN;
60 uint32_t 	nxge_dma_stream_thresh = TX_STREAM_MIN;
61 uint32_t	nxge_jumbo_mtu	= TX_JUMBO_MTU;
62 boolean_t	nxge_jumbo_enable = B_FALSE;
63 uint16_t	nxge_rcr_timeout = NXGE_RDC_RCR_TIMEOUT;
64 uint16_t	nxge_rcr_threshold = NXGE_RDC_RCR_THRESHOLD;
65 nxge_tx_mode_t	nxge_tx_scheme = NXGE_USE_SERIAL;
66 
67 /* MAX LSO size */
68 #define		NXGE_LSO_MAXLEN	65535
69 /* Enable Software LSO flag */
70 uint32_t	nxge_lso_enable = 1;
71 uint32_t	nxge_lso_max = NXGE_LSO_MAXLEN;
72 
73 /*
74  * Debugging flags:
75  *		nxge_no_tx_lb : transmit load balancing
76  *		nxge_tx_lb_policy: 0 - TCP port (default)
77  *				   3 - DEST MAC
78  */
79 uint32_t 	nxge_no_tx_lb = 0;
80 uint32_t 	nxge_tx_lb_policy = NXGE_TX_LB_TCPUDP;
81 
82 /*
83  * Add tunable to reduce the amount of time spent in the
84  * ISR doing Rx Processing.
85  */
86 uint32_t nxge_max_rx_pkts = 1024;
87 
88 /*
89  * Tunables to manage the receive buffer blocks.
90  *
91  * nxge_rx_threshold_hi: copy all buffers.
92  * nxge_rx_bcopy_size_type: receive buffer block size type.
93  * nxge_rx_threshold_lo: copy only up to tunable block size type.
94  */
95 nxge_rxbuf_threshold_t nxge_rx_threshold_hi = NXGE_RX_COPY_6;
96 nxge_rxbuf_type_t nxge_rx_buf_size_type = RCR_PKTBUFSZ_0;
97 nxge_rxbuf_threshold_t nxge_rx_threshold_lo = NXGE_RX_COPY_3;
98 
99 rtrace_t npi_rtracebuf;
100 
101 #if	defined(sun4v)
102 /*
103  * Hypervisor N2/NIU services information.
104  */
105 static hsvc_info_t niu_hsvc = {
106 	HSVC_REV_1, NULL, HSVC_GROUP_NIU, NIU_MAJOR_VER,
107 	NIU_MINOR_VER, "nxge"
108 };
109 #endif
110 
111 /*
112  * Function Prototypes
113  */
114 static int nxge_attach(dev_info_t *, ddi_attach_cmd_t);
115 static int nxge_detach(dev_info_t *, ddi_detach_cmd_t);
116 static void nxge_unattach(p_nxge_t);
117 
118 #if NXGE_PROPERTY
119 static void nxge_remove_hard_properties(p_nxge_t);
120 #endif
121 
122 static nxge_status_t nxge_setup_system_dma_pages(p_nxge_t);
123 
124 static nxge_status_t nxge_setup_mutexes(p_nxge_t);
125 static void nxge_destroy_mutexes(p_nxge_t);
126 
127 static nxge_status_t nxge_map_regs(p_nxge_t nxgep);
128 static void nxge_unmap_regs(p_nxge_t nxgep);
129 #ifdef	NXGE_DEBUG
130 static void nxge_test_map_regs(p_nxge_t nxgep);
131 #endif
132 
133 static nxge_status_t nxge_add_intrs(p_nxge_t nxgep);
134 static nxge_status_t nxge_add_soft_intrs(p_nxge_t nxgep);
135 static void nxge_remove_intrs(p_nxge_t nxgep);
136 static void nxge_remove_soft_intrs(p_nxge_t nxgep);
137 
138 static nxge_status_t nxge_add_intrs_adv(p_nxge_t nxgep);
139 static nxge_status_t nxge_add_intrs_adv_type(p_nxge_t, uint32_t);
140 static nxge_status_t nxge_add_intrs_adv_type_fix(p_nxge_t, uint32_t);
141 static void nxge_intrs_enable(p_nxge_t nxgep);
142 static void nxge_intrs_disable(p_nxge_t nxgep);
143 
144 static void nxge_suspend(p_nxge_t);
145 static nxge_status_t nxge_resume(p_nxge_t);
146 
147 static nxge_status_t nxge_setup_dev(p_nxge_t);
148 static void nxge_destroy_dev(p_nxge_t);
149 
150 static nxge_status_t nxge_alloc_mem_pool(p_nxge_t);
151 static void nxge_free_mem_pool(p_nxge_t);
152 
153 static nxge_status_t nxge_alloc_rx_mem_pool(p_nxge_t);
154 static void nxge_free_rx_mem_pool(p_nxge_t);
155 
156 static nxge_status_t nxge_alloc_tx_mem_pool(p_nxge_t);
157 static void nxge_free_tx_mem_pool(p_nxge_t);
158 
159 static nxge_status_t nxge_dma_mem_alloc(p_nxge_t, dma_method_t,
160 	struct ddi_dma_attr *,
161 	size_t, ddi_device_acc_attr_t *, uint_t,
162 	p_nxge_dma_common_t);
163 
164 static void nxge_dma_mem_free(p_nxge_dma_common_t);
165 
166 static nxge_status_t nxge_alloc_rx_buf_dma(p_nxge_t, uint16_t,
167 	p_nxge_dma_common_t *, size_t, size_t, uint32_t *);
168 static void nxge_free_rx_buf_dma(p_nxge_t, p_nxge_dma_common_t, uint32_t);
169 
170 static nxge_status_t nxge_alloc_rx_cntl_dma(p_nxge_t, uint16_t,
171 	p_nxge_dma_common_t *, size_t);
172 static void nxge_free_rx_cntl_dma(p_nxge_t, p_nxge_dma_common_t);
173 
174 static nxge_status_t nxge_alloc_tx_buf_dma(p_nxge_t, uint16_t,
175 	p_nxge_dma_common_t *, size_t, size_t, uint32_t *);
176 static void nxge_free_tx_buf_dma(p_nxge_t, p_nxge_dma_common_t, uint32_t);
177 
178 static nxge_status_t nxge_alloc_tx_cntl_dma(p_nxge_t, uint16_t,
179 	p_nxge_dma_common_t *,
180 	size_t);
181 static void nxge_free_tx_cntl_dma(p_nxge_t, p_nxge_dma_common_t);
182 
183 static int nxge_init_common_dev(p_nxge_t);
184 static void nxge_uninit_common_dev(p_nxge_t);
185 
186 /*
187  * The next declarations are for the GLDv3 interface.
188  */
189 static int nxge_m_start(void *);
190 static void nxge_m_stop(void *);
191 static int nxge_m_unicst(void *, const uint8_t *);
192 static int nxge_m_multicst(void *, boolean_t, const uint8_t *);
193 static int nxge_m_promisc(void *, boolean_t);
194 static void nxge_m_ioctl(void *, queue_t *, mblk_t *);
195 static void nxge_m_resources(void *);
196 mblk_t *nxge_m_tx(void *arg, mblk_t *);
197 static nxge_status_t nxge_mac_register(p_nxge_t);
198 static int nxge_altmac_set(p_nxge_t nxgep, uint8_t *mac_addr,
199 	mac_addr_slot_t slot);
200 static void nxge_mmac_kstat_update(p_nxge_t nxgep, mac_addr_slot_t slot,
201 	boolean_t factory);
202 static int nxge_m_mmac_add(void *arg, mac_multi_addr_t *maddr);
203 static int nxge_m_mmac_reserve(void *arg, mac_multi_addr_t *maddr);
204 static int nxge_m_mmac_remove(void *arg, mac_addr_slot_t slot);
205 static int nxge_m_mmac_modify(void *arg, mac_multi_addr_t *maddr);
206 static int nxge_m_mmac_get(void *arg, mac_multi_addr_t *maddr);
207 
208 #define	NXGE_NEPTUNE_MAGIC	0x4E584745UL
209 #define	MAX_DUMP_SZ 256
210 
211 #define	NXGE_M_CALLBACK_FLAGS	(MC_RESOURCES | MC_IOCTL | MC_GETCAPAB)
212 
213 static	boolean_t	nxge_m_getcapab(void *, mac_capab_t, void *);
214 static mac_callbacks_t nxge_m_callbacks = {
215 	NXGE_M_CALLBACK_FLAGS,
216 	nxge_m_stat,
217 	nxge_m_start,
218 	nxge_m_stop,
219 	nxge_m_promisc,
220 	nxge_m_multicst,
221 	nxge_m_unicst,
222 	nxge_m_tx,
223 	nxge_m_resources,
224 	nxge_m_ioctl,
225 	nxge_m_getcapab
226 };
227 
228 void
229 nxge_err_inject(p_nxge_t, queue_t *, mblk_t *);
230 
231 /* PSARC/2007/453 MSI-X interrupt limit override. */
232 #define	NXGE_MSIX_REQUEST_10G	8
233 #define	NXGE_MSIX_REQUEST_1G	2
234 static int nxge_create_msi_property(p_nxge_t);
235 
236 /*
237  * These global variables control the message
238  * output.
239  */
240 out_dbgmsg_t nxge_dbgmsg_out = DBG_CONSOLE | STR_LOG;
241 uint64_t nxge_debug_level = 0;
242 
243 /*
244  * This list contains the instance structures for the Neptune
245  * devices present in the system. The lock exists to guarantee
246  * mutually exclusive access to the list.
247  */
248 void 			*nxge_list = NULL;
249 
250 void			*nxge_hw_list = NULL;
251 nxge_os_mutex_t 	nxge_common_lock;
252 
253 extern uint64_t 	npi_debug_level;
254 
255 extern nxge_status_t	nxge_ldgv_init(p_nxge_t, int *, int *);
256 extern nxge_status_t	nxge_ldgv_init_n2(p_nxge_t, int *, int *);
257 extern nxge_status_t	nxge_ldgv_uninit(p_nxge_t);
258 extern nxge_status_t	nxge_intr_ldgv_init(p_nxge_t);
259 extern void		nxge_fm_init(p_nxge_t,
260 					ddi_device_acc_attr_t *,
261 					ddi_device_acc_attr_t *,
262 					ddi_dma_attr_t *);
263 extern void		nxge_fm_fini(p_nxge_t);
264 extern npi_status_t	npi_mac_altaddr_disable(npi_handle_t, uint8_t, uint8_t);
265 
266 /*
267  * Count used to maintain the number of buffers being used
268  * by Neptune instances and loaned up to the upper layers.
269  */
270 uint32_t nxge_mblks_pending = 0;
271 
272 /*
273  * Device register access attributes for PIO.
274  */
275 static ddi_device_acc_attr_t nxge_dev_reg_acc_attr = {
276 	DDI_DEVICE_ATTR_V0,
277 	DDI_STRUCTURE_LE_ACC,
278 	DDI_STRICTORDER_ACC,
279 };
280 
281 /*
282  * Device descriptor access attributes for DMA.
283  */
284 static ddi_device_acc_attr_t nxge_dev_desc_dma_acc_attr = {
285 	DDI_DEVICE_ATTR_V0,
286 	DDI_STRUCTURE_LE_ACC,
287 	DDI_STRICTORDER_ACC
288 };
289 
290 /*
291  * Device buffer access attributes for DMA.
292  */
293 static ddi_device_acc_attr_t nxge_dev_buf_dma_acc_attr = {
294 	DDI_DEVICE_ATTR_V0,
295 	DDI_STRUCTURE_BE_ACC,
296 	DDI_STRICTORDER_ACC
297 };
298 
299 ddi_dma_attr_t nxge_desc_dma_attr = {
300 	DMA_ATTR_V0,		/* version number. */
301 	0,			/* low address */
302 	0xffffffffffffffff,	/* high address */
303 	0xffffffffffffffff,	/* address counter max */
304 #ifndef NIU_PA_WORKAROUND
305 	0x100000,		/* alignment */
306 #else
307 	0x2000,
308 #endif
309 	0xfc00fc,		/* dlim_burstsizes */
310 	0x1,			/* minimum transfer size */
311 	0xffffffffffffffff,	/* maximum transfer size */
312 	0xffffffffffffffff,	/* maximum segment size */
313 	1,			/* scatter/gather list length */
314 	(unsigned int) 1,	/* granularity */
315 	0			/* attribute flags */
316 };
317 
318 ddi_dma_attr_t nxge_tx_dma_attr = {
319 	DMA_ATTR_V0,		/* version number. */
320 	0,			/* low address */
321 	0xffffffffffffffff,	/* high address */
322 	0xffffffffffffffff,	/* address counter max */
323 #if defined(_BIG_ENDIAN)
324 	0x2000,			/* alignment */
325 #else
326 	0x1000,			/* alignment */
327 #endif
328 	0xfc00fc,		/* dlim_burstsizes */
329 	0x1,			/* minimum transfer size */
330 	0xffffffffffffffff,	/* maximum transfer size */
331 	0xffffffffffffffff,	/* maximum segment size */
332 	5,			/* scatter/gather list length */
333 	(unsigned int) 1,	/* granularity */
334 	0			/* attribute flags */
335 };
336 
337 ddi_dma_attr_t nxge_rx_dma_attr = {
338 	DMA_ATTR_V0,		/* version number. */
339 	0,			/* low address */
340 	0xffffffffffffffff,	/* high address */
341 	0xffffffffffffffff,	/* address counter max */
342 	0x2000,			/* alignment */
343 	0xfc00fc,		/* dlim_burstsizes */
344 	0x1,			/* minimum transfer size */
345 	0xffffffffffffffff,	/* maximum transfer size */
346 	0xffffffffffffffff,	/* maximum segment size */
347 	1,			/* scatter/gather list length */
348 	(unsigned int) 1,	/* granularity */
349 	DDI_DMA_RELAXED_ORDERING /* attribute flags */
350 };
351 
352 ddi_dma_lim_t nxge_dma_limits = {
353 	(uint_t)0,		/* dlim_addr_lo */
354 	(uint_t)0xffffffff,	/* dlim_addr_hi */
355 	(uint_t)0xffffffff,	/* dlim_cntr_max */
356 	(uint_t)0xfc00fc,	/* dlim_burstsizes for 32 and 64 bit xfers */
357 	0x1,			/* dlim_minxfer */
358 	1024			/* dlim_speed */
359 };
360 
361 dma_method_t nxge_force_dma = DVMA;
362 
363 /*
364  * dma chunk sizes.
365  *
366  * Try to allocate the largest possible size
367  * so that fewer number of dma chunks would be managed
368  */
369 #ifdef NIU_PA_WORKAROUND
370 size_t alloc_sizes [] = {0x2000};
371 #else
372 size_t alloc_sizes [] = {0x1000, 0x2000, 0x4000, 0x8000,
373 		0x10000, 0x20000, 0x40000, 0x80000,
374 		0x100000, 0x200000, 0x400000, 0x800000,
375 		0x1000000, 0x2000000, 0x4000000};
376 #endif
377 
378 /*
379  * Translate "dev_t" to a pointer to the associated "dev_info_t".
380  */
381 
382 static int
383 nxge_attach(dev_info_t *dip, ddi_attach_cmd_t cmd)
384 {
385 	p_nxge_t	nxgep = NULL;
386 	int		instance;
387 	int		status = DDI_SUCCESS;
388 	uint8_t		portn;
389 	nxge_mmac_t	*mmac_info;
390 
391 	NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_attach"));
392 
393 	/*
394 	 * Get the device instance since we'll need to setup
395 	 * or retrieve a soft state for this instance.
396 	 */
397 	instance = ddi_get_instance(dip);
398 
399 	switch (cmd) {
400 	case DDI_ATTACH:
401 		NXGE_DEBUG_MSG((nxgep, DDI_CTL, "doing DDI_ATTACH"));
402 		break;
403 
404 	case DDI_RESUME:
405 		NXGE_DEBUG_MSG((nxgep, DDI_CTL, "doing DDI_RESUME"));
406 		nxgep = (p_nxge_t)ddi_get_soft_state(nxge_list, instance);
407 		if (nxgep == NULL) {
408 			status = DDI_FAILURE;
409 			break;
410 		}
411 		if (nxgep->dip != dip) {
412 			status = DDI_FAILURE;
413 			break;
414 		}
415 		if (nxgep->suspended == DDI_PM_SUSPEND) {
416 			status = ddi_dev_is_needed(nxgep->dip, 0, 1);
417 		} else {
418 			status = nxge_resume(nxgep);
419 		}
420 		goto nxge_attach_exit;
421 
422 	case DDI_PM_RESUME:
423 		NXGE_DEBUG_MSG((nxgep, DDI_CTL, "doing DDI_PM_RESUME"));
424 		nxgep = (p_nxge_t)ddi_get_soft_state(nxge_list, instance);
425 		if (nxgep == NULL) {
426 			status = DDI_FAILURE;
427 			break;
428 		}
429 		if (nxgep->dip != dip) {
430 			status = DDI_FAILURE;
431 			break;
432 		}
433 		status = nxge_resume(nxgep);
434 		goto nxge_attach_exit;
435 
436 	default:
437 		NXGE_DEBUG_MSG((nxgep, DDI_CTL, "doing unknown"));
438 		status = DDI_FAILURE;
439 		goto nxge_attach_exit;
440 	}
441 
442 
443 	if (ddi_soft_state_zalloc(nxge_list, instance) == DDI_FAILURE) {
444 		status = DDI_FAILURE;
445 		goto nxge_attach_exit;
446 	}
447 
448 	nxgep = ddi_get_soft_state(nxge_list, instance);
449 	if (nxgep == NULL) {
450 		status = NXGE_ERROR;
451 		goto nxge_attach_fail2;
452 	}
453 
454 	nxgep->nxge_magic = NXGE_MAGIC;
455 
456 	nxgep->drv_state = 0;
457 	nxgep->dip = dip;
458 	nxgep->instance = instance;
459 	nxgep->p_dip = ddi_get_parent(dip);
460 	nxgep->nxge_debug_level = nxge_debug_level;
461 	npi_debug_level = nxge_debug_level;
462 
463 	nxge_fm_init(nxgep, &nxge_dev_reg_acc_attr, &nxge_dev_desc_dma_acc_attr,
464 				&nxge_rx_dma_attr);
465 
466 	status = nxge_map_regs(nxgep);
467 	if (status != NXGE_OK) {
468 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "nxge_map_regs failed"));
469 		goto nxge_attach_fail3;
470 	}
471 
472 	status = nxge_init_common_dev(nxgep);
473 	if (status != NXGE_OK) {
474 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
475 			"nxge_init_common_dev failed"));
476 		goto nxge_attach_fail4;
477 	}
478 
479 	if (nxgep->niu_type == NEPTUNE_2_10GF) {
480 		if (nxgep->function_num > 1) {
481 			NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "Unsupported"
482 			    " function %d. Only functions 0 and 1 are "
483 			    "supported for this card.", nxgep->function_num));
484 			status = NXGE_ERROR;
485 			goto nxge_attach_fail4;
486 		}
487 	}
488 
489 	portn = NXGE_GET_PORT_NUM(nxgep->function_num);
490 	nxgep->mac.portnum = portn;
491 	if ((portn == 0) || (portn == 1))
492 		nxgep->mac.porttype = PORT_TYPE_XMAC;
493 	else
494 		nxgep->mac.porttype = PORT_TYPE_BMAC;
495 	/*
496 	 * Neptune has 4 ports, the first 2 ports use XMAC (10G MAC)
497 	 * internally, the rest 2 ports use BMAC (1G "Big" MAC).
498 	 * The two types of MACs have different characterizations.
499 	 */
500 	mmac_info = &nxgep->nxge_mmac_info;
501 	if (nxgep->function_num < 2) {
502 		mmac_info->num_mmac = XMAC_MAX_ALT_ADDR_ENTRY;
503 		mmac_info->naddrfree = XMAC_MAX_ALT_ADDR_ENTRY;
504 	} else {
505 		mmac_info->num_mmac = BMAC_MAX_ALT_ADDR_ENTRY;
506 		mmac_info->naddrfree = BMAC_MAX_ALT_ADDR_ENTRY;
507 	}
508 	/*
509 	 * Setup the Ndd parameters for the this instance.
510 	 */
511 	nxge_init_param(nxgep);
512 
513 	/*
514 	 * Setup Register Tracing Buffer.
515 	 */
516 	npi_rtrace_buf_init((rtrace_t *)&npi_rtracebuf);
517 
518 	/* init stats ptr */
519 	nxge_init_statsp(nxgep);
520 
521 	/*
522 	 * read the vpd info from the eeprom into local data
523 	 * structure and check for the VPD info validity
524 	 */
525 	nxge_vpd_info_get(nxgep);
526 
527 	status = nxge_xcvr_find(nxgep);
528 
529 	if (status != NXGE_OK) {
530 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "nxge_attach: "
531 				    " Couldn't determine card type"
532 				    " .... exit "));
533 		goto nxge_attach_fail5;
534 	}
535 
536 	status = nxge_get_config_properties(nxgep);
537 
538 	if (status != NXGE_OK) {
539 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "get_hw create failed"));
540 		goto nxge_attach_fail;
541 	}
542 
543 	/*
544 	 * Setup the Kstats for the driver.
545 	 */
546 	nxge_setup_kstats(nxgep);
547 
548 	nxge_setup_param(nxgep);
549 
550 	status = nxge_setup_system_dma_pages(nxgep);
551 	if (status != NXGE_OK) {
552 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "set dma page failed"));
553 		goto nxge_attach_fail;
554 	}
555 
556 #if	defined(sun4v)
557 	if (nxgep->niu_type == N2_NIU) {
558 		nxgep->niu_hsvc_available = B_FALSE;
559 		bcopy(&niu_hsvc, &nxgep->niu_hsvc, sizeof (hsvc_info_t));
560 		if ((status =
561 			hsvc_register(&nxgep->niu_hsvc,
562 					&nxgep->niu_min_ver)) != 0) {
563 				NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
564 					"nxge_attach: "
565 					"%s: cannot negotiate "
566 					"hypervisor services "
567 					"revision %d "
568 					"group: 0x%lx "
569 					"major: 0x%lx minor: 0x%lx "
570 					"errno: %d",
571 					niu_hsvc.hsvc_modname,
572 					niu_hsvc.hsvc_rev,
573 					niu_hsvc.hsvc_group,
574 					niu_hsvc.hsvc_major,
575 					niu_hsvc.hsvc_minor,
576 					status));
577 				status = DDI_FAILURE;
578 				goto nxge_attach_fail;
579 		}
580 
581 		nxgep->niu_hsvc_available = B_TRUE;
582 		NXGE_DEBUG_MSG((nxgep, DDI_CTL,
583 			"NIU Hypervisor service enabled"));
584 	}
585 #endif
586 
587 	nxge_hw_id_init(nxgep);
588 	nxge_hw_init_niu_common(nxgep);
589 
590 	status = nxge_setup_mutexes(nxgep);
591 	if (status != NXGE_OK) {
592 		NXGE_DEBUG_MSG((nxgep, DDI_CTL, "set mutex failed"));
593 		goto nxge_attach_fail;
594 	}
595 
596 	status = nxge_setup_dev(nxgep);
597 	if (status != DDI_SUCCESS) {
598 		NXGE_DEBUG_MSG((nxgep, DDI_CTL, "set dev failed"));
599 		goto nxge_attach_fail;
600 	}
601 
602 	status = nxge_add_intrs(nxgep);
603 	if (status != DDI_SUCCESS) {
604 		NXGE_DEBUG_MSG((nxgep, DDI_CTL, "add_intr failed"));
605 		goto nxge_attach_fail;
606 	}
607 	status = nxge_add_soft_intrs(nxgep);
608 	if (status != DDI_SUCCESS) {
609 		NXGE_DEBUG_MSG((nxgep, NXGE_ERR_CTL, "add_soft_intr failed"));
610 		goto nxge_attach_fail;
611 	}
612 
613 	/*
614 	 * Enable interrupts.
615 	 */
616 	nxge_intrs_enable(nxgep);
617 
618 	if ((status = nxge_mac_register(nxgep)) != NXGE_OK) {
619 		NXGE_DEBUG_MSG((nxgep, DDI_CTL,
620 			"unable to register to mac layer (%d)", status));
621 		goto nxge_attach_fail;
622 	}
623 
624 	mac_link_update(nxgep->mach, LINK_STATE_UNKNOWN);
625 
626 	NXGE_DEBUG_MSG((nxgep, DDI_CTL, "registered to mac (instance %d)",
627 		instance));
628 
629 	(void) nxge_link_monitor(nxgep, LINK_MONITOR_START);
630 
631 	goto nxge_attach_exit;
632 
633 nxge_attach_fail:
634 	nxge_unattach(nxgep);
635 	goto nxge_attach_fail1;
636 
637 nxge_attach_fail5:
638 	/*
639 	 * Tear down the ndd parameters setup.
640 	 */
641 	nxge_destroy_param(nxgep);
642 
643 	/*
644 	 * Tear down the kstat setup.
645 	 */
646 	nxge_destroy_kstats(nxgep);
647 
648 nxge_attach_fail4:
649 	if (nxgep->nxge_hw_p) {
650 		nxge_uninit_common_dev(nxgep);
651 		nxgep->nxge_hw_p = NULL;
652 	}
653 
654 nxge_attach_fail3:
655 	/*
656 	 * Unmap the register setup.
657 	 */
658 	nxge_unmap_regs(nxgep);
659 
660 	nxge_fm_fini(nxgep);
661 
662 nxge_attach_fail2:
663 	ddi_soft_state_free(nxge_list, nxgep->instance);
664 
665 nxge_attach_fail1:
666 	if (status != NXGE_OK)
667 		status = (NXGE_ERROR | NXGE_DDI_FAILED);
668 	nxgep = NULL;
669 
670 nxge_attach_exit:
671 	NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_attach status = 0x%08x",
672 		status));
673 
674 	return (status);
675 }
676 
677 static int
678 nxge_detach(dev_info_t *dip, ddi_detach_cmd_t cmd)
679 {
680 	int 		status = DDI_SUCCESS;
681 	int 		instance;
682 	p_nxge_t 	nxgep = NULL;
683 
684 	NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_detach"));
685 	instance = ddi_get_instance(dip);
686 	nxgep = ddi_get_soft_state(nxge_list, instance);
687 	if (nxgep == NULL) {
688 		status = DDI_FAILURE;
689 		goto nxge_detach_exit;
690 	}
691 
692 	switch (cmd) {
693 	case DDI_DETACH:
694 		NXGE_DEBUG_MSG((nxgep, DDI_CTL, "doing DDI_DETACH"));
695 		break;
696 
697 	case DDI_PM_SUSPEND:
698 		NXGE_DEBUG_MSG((nxgep, DDI_CTL, "doing DDI_PM_SUSPEND"));
699 		nxgep->suspended = DDI_PM_SUSPEND;
700 		nxge_suspend(nxgep);
701 		break;
702 
703 	case DDI_SUSPEND:
704 		NXGE_DEBUG_MSG((nxgep, DDI_CTL, "doing DDI_SUSPEND"));
705 		if (nxgep->suspended != DDI_PM_SUSPEND) {
706 			nxgep->suspended = DDI_SUSPEND;
707 			nxge_suspend(nxgep);
708 		}
709 		break;
710 
711 	default:
712 		status = DDI_FAILURE;
713 	}
714 
715 	if (cmd != DDI_DETACH)
716 		goto nxge_detach_exit;
717 
718 	/*
719 	 * Stop the xcvr polling.
720 	 */
721 	nxgep->suspended = cmd;
722 
723 	(void) nxge_link_monitor(nxgep, LINK_MONITOR_STOP);
724 
725 	if (nxgep->mach && (status = mac_unregister(nxgep->mach)) != 0) {
726 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
727 			"<== nxge_detach status = 0x%08X", status));
728 		return (DDI_FAILURE);
729 	}
730 
731 	NXGE_DEBUG_MSG((nxgep, DDI_CTL,
732 		"<== nxge_detach (mac_unregister) status = 0x%08X", status));
733 
734 	nxge_unattach(nxgep);
735 	nxgep = NULL;
736 
737 nxge_detach_exit:
738 	NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_detach status = 0x%08X",
739 		status));
740 
741 	return (status);
742 }
743 
744 static void
745 nxge_unattach(p_nxge_t nxgep)
746 {
747 	NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_unattach"));
748 
749 	if (nxgep == NULL || nxgep->dev_regs == NULL) {
750 		return;
751 	}
752 
753 	nxgep->nxge_magic = 0;
754 
755 	if (nxgep->nxge_timerid) {
756 		nxge_stop_timer(nxgep, nxgep->nxge_timerid);
757 		nxgep->nxge_timerid = 0;
758 	}
759 
760 	if (nxgep->nxge_hw_p) {
761 		nxge_uninit_common_dev(nxgep);
762 		nxgep->nxge_hw_p = NULL;
763 	}
764 
765 #if	defined(sun4v)
766 	if (nxgep->niu_type == N2_NIU && nxgep->niu_hsvc_available == B_TRUE) {
767 		(void) hsvc_unregister(&nxgep->niu_hsvc);
768 		nxgep->niu_hsvc_available = B_FALSE;
769 	}
770 #endif
771 	/*
772 	 * Stop any further interrupts.
773 	 */
774 	nxge_remove_intrs(nxgep);
775 
776 	/* remove soft interrups */
777 	nxge_remove_soft_intrs(nxgep);
778 
779 	/*
780 	 * Stop the device and free resources.
781 	 */
782 	nxge_destroy_dev(nxgep);
783 
784 	/*
785 	 * Tear down the ndd parameters setup.
786 	 */
787 	nxge_destroy_param(nxgep);
788 
789 	/*
790 	 * Tear down the kstat setup.
791 	 */
792 	nxge_destroy_kstats(nxgep);
793 
794 	/*
795 	 * Destroy all mutexes.
796 	 */
797 	nxge_destroy_mutexes(nxgep);
798 
799 	/*
800 	 * Remove the list of ndd parameters which
801 	 * were setup during attach.
802 	 */
803 	if (nxgep->dip) {
804 		NXGE_DEBUG_MSG((nxgep, OBP_CTL,
805 				    " nxge_unattach: remove all properties"));
806 
807 		(void) ddi_prop_remove_all(nxgep->dip);
808 	}
809 
810 #if NXGE_PROPERTY
811 	nxge_remove_hard_properties(nxgep);
812 #endif
813 
814 	/*
815 	 * Unmap the register setup.
816 	 */
817 	nxge_unmap_regs(nxgep);
818 
819 	nxge_fm_fini(nxgep);
820 
821 	ddi_soft_state_free(nxge_list, nxgep->instance);
822 
823 	NXGE_DEBUG_MSG((NULL, DDI_CTL, "<== nxge_unattach"));
824 }
825 
826 static char n2_siu_name[] = "niu";
827 
828 static nxge_status_t
829 nxge_map_regs(p_nxge_t nxgep)
830 {
831 	int		ddi_status = DDI_SUCCESS;
832 	p_dev_regs_t 	dev_regs;
833 	char		buf[MAXPATHLEN + 1];
834 	char 		*devname;
835 #ifdef	NXGE_DEBUG
836 	char 		*sysname;
837 #endif
838 	off_t		regsize;
839 	nxge_status_t	status = NXGE_OK;
840 #if !defined(_BIG_ENDIAN)
841 	off_t pci_offset;
842 	uint16_t pcie_devctl;
843 #endif
844 
845 	NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_map_regs"));
846 	nxgep->dev_regs = NULL;
847 	dev_regs = KMEM_ZALLOC(sizeof (dev_regs_t), KM_SLEEP);
848 	dev_regs->nxge_regh = NULL;
849 	dev_regs->nxge_pciregh = NULL;
850 	dev_regs->nxge_msix_regh = NULL;
851 	dev_regs->nxge_vir_regh = NULL;
852 	dev_regs->nxge_vir2_regh = NULL;
853 	nxgep->niu_type = NIU_TYPE_NONE;
854 
855 	devname = ddi_pathname(nxgep->dip, buf);
856 	ASSERT(strlen(devname) > 0);
857 	NXGE_DEBUG_MSG((nxgep, DDI_CTL,
858 		"nxge_map_regs: pathname devname %s", devname));
859 
860 	if (strstr(devname, n2_siu_name)) {
861 		/* N2/NIU */
862 		nxgep->niu_type = N2_NIU;
863 		NXGE_DEBUG_MSG((nxgep, DDI_CTL,
864 			"nxge_map_regs: N2/NIU devname %s", devname));
865 		/* get function number */
866 		nxgep->function_num =
867 			(devname[strlen(devname) -1] == '1' ? 1 : 0);
868 		NXGE_DEBUG_MSG((nxgep, DDI_CTL,
869 			"nxge_map_regs: N2/NIU function number %d",
870 			nxgep->function_num));
871 	} else {
872 		int		*prop_val;
873 		uint_t 		prop_len;
874 		uint8_t 	func_num;
875 
876 		if (ddi_prop_lookup_int_array(DDI_DEV_T_ANY, nxgep->dip,
877 				0, "reg",
878 				&prop_val, &prop_len) != DDI_PROP_SUCCESS) {
879 			NXGE_DEBUG_MSG((nxgep, VPD_CTL,
880 				"Reg property not found"));
881 			ddi_status = DDI_FAILURE;
882 			goto nxge_map_regs_fail0;
883 
884 		} else {
885 			func_num = (prop_val[0] >> 8) & 0x7;
886 			NXGE_DEBUG_MSG((nxgep, DDI_CTL,
887 				"Reg property found: fun # %d",
888 				func_num));
889 			nxgep->function_num = func_num;
890 			ddi_prop_free(prop_val);
891 		}
892 	}
893 
894 	switch (nxgep->niu_type) {
895 	default:
896 		(void) ddi_dev_regsize(nxgep->dip, 0, &regsize);
897 		NXGE_DEBUG_MSG((nxgep, DDI_CTL,
898 			"nxge_map_regs: pci config size 0x%x", regsize));
899 
900 		ddi_status = ddi_regs_map_setup(nxgep->dip, 0,
901 			(caddr_t *)&(dev_regs->nxge_pciregp), 0, 0,
902 			&nxge_dev_reg_acc_attr, &dev_regs->nxge_pciregh);
903 		if (ddi_status != DDI_SUCCESS) {
904 			NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
905 				"ddi_map_regs, nxge bus config regs failed"));
906 			goto nxge_map_regs_fail0;
907 		}
908 		NXGE_DEBUG_MSG((nxgep, DDI_CTL,
909 			"nxge_map_reg: PCI config addr 0x%0llx "
910 			" handle 0x%0llx", dev_regs->nxge_pciregp,
911 			dev_regs->nxge_pciregh));
912 			/*
913 			 * IMP IMP
914 			 * workaround  for bit swapping bug in HW
915 			 * which ends up in no-snoop = yes
916 			 * resulting, in DMA not synched properly
917 			 */
918 #if !defined(_BIG_ENDIAN)
919 		/* workarounds for x86 systems */
920 		pci_offset = 0x80 + PCIE_DEVCTL;
921 		pcie_devctl = 0x0;
922 		pcie_devctl &= PCIE_DEVCTL_ENABLE_NO_SNOOP;
923 		pcie_devctl |= PCIE_DEVCTL_RO_EN;
924 		pci_config_put16(dev_regs->nxge_pciregh, pci_offset,
925 				    pcie_devctl);
926 #endif
927 
928 		(void) ddi_dev_regsize(nxgep->dip, 1, &regsize);
929 		NXGE_DEBUG_MSG((nxgep, DDI_CTL,
930 			"nxge_map_regs: pio size 0x%x", regsize));
931 		/* set up the device mapped register */
932 		ddi_status = ddi_regs_map_setup(nxgep->dip, 1,
933 			(caddr_t *)&(dev_regs->nxge_regp), 0, 0,
934 			&nxge_dev_reg_acc_attr, &dev_regs->nxge_regh);
935 		if (ddi_status != DDI_SUCCESS) {
936 			NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
937 				"ddi_map_regs for Neptune global reg failed"));
938 			goto nxge_map_regs_fail1;
939 		}
940 
941 		/* set up the msi/msi-x mapped register */
942 		(void) ddi_dev_regsize(nxgep->dip, 2, &regsize);
943 		NXGE_DEBUG_MSG((nxgep, DDI_CTL,
944 			"nxge_map_regs: msix size 0x%x", regsize));
945 		ddi_status = ddi_regs_map_setup(nxgep->dip, 2,
946 			(caddr_t *)&(dev_regs->nxge_msix_regp), 0, 0,
947 			&nxge_dev_reg_acc_attr, &dev_regs->nxge_msix_regh);
948 		if (ddi_status != DDI_SUCCESS) {
949 			NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
950 				"ddi_map_regs for msi reg failed"));
951 			goto nxge_map_regs_fail2;
952 		}
953 
954 		/* set up the vio region mapped register */
955 		(void) ddi_dev_regsize(nxgep->dip, 3, &regsize);
956 		NXGE_DEBUG_MSG((nxgep, DDI_CTL,
957 			"nxge_map_regs: vio size 0x%x", regsize));
958 		ddi_status = ddi_regs_map_setup(nxgep->dip, 3,
959 			(caddr_t *)&(dev_regs->nxge_vir_regp), 0, 0,
960 			&nxge_dev_reg_acc_attr, &dev_regs->nxge_vir_regh);
961 
962 		if (ddi_status != DDI_SUCCESS) {
963 			NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
964 				"ddi_map_regs for nxge vio reg failed"));
965 			goto nxge_map_regs_fail3;
966 		}
967 		nxgep->dev_regs = dev_regs;
968 
969 		NPI_PCI_ACC_HANDLE_SET(nxgep, dev_regs->nxge_pciregh);
970 		NPI_PCI_ADD_HANDLE_SET(nxgep,
971 			(npi_reg_ptr_t)dev_regs->nxge_pciregp);
972 		NPI_MSI_ACC_HANDLE_SET(nxgep, dev_regs->nxge_msix_regh);
973 		NPI_MSI_ADD_HANDLE_SET(nxgep,
974 			(npi_reg_ptr_t)dev_regs->nxge_msix_regp);
975 
976 		NPI_ACC_HANDLE_SET(nxgep, dev_regs->nxge_regh);
977 		NPI_ADD_HANDLE_SET(nxgep, (npi_reg_ptr_t)dev_regs->nxge_regp);
978 
979 		NPI_REG_ACC_HANDLE_SET(nxgep, dev_regs->nxge_regh);
980 		NPI_REG_ADD_HANDLE_SET(nxgep,
981 			(npi_reg_ptr_t)dev_regs->nxge_regp);
982 
983 		NPI_VREG_ACC_HANDLE_SET(nxgep, dev_regs->nxge_vir_regh);
984 		NPI_VREG_ADD_HANDLE_SET(nxgep,
985 			(npi_reg_ptr_t)dev_regs->nxge_vir_regp);
986 
987 		break;
988 
989 	case N2_NIU:
990 		NXGE_DEBUG_MSG((nxgep, DDI_CTL, "ddi_map_regs, NIU"));
991 		/*
992 		 * Set up the device mapped register (FWARC 2006/556)
993 		 * (changed back to 1: reg starts at 1!)
994 		 */
995 		(void) ddi_dev_regsize(nxgep->dip, 1, &regsize);
996 		NXGE_DEBUG_MSG((nxgep, DDI_CTL,
997 			"nxge_map_regs: dev size 0x%x", regsize));
998 		ddi_status = ddi_regs_map_setup(nxgep->dip, 1,
999 				(caddr_t *)&(dev_regs->nxge_regp), 0, 0,
1000 				&nxge_dev_reg_acc_attr, &dev_regs->nxge_regh);
1001 
1002 		if (ddi_status != DDI_SUCCESS) {
1003 			NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
1004 				"ddi_map_regs for N2/NIU, global reg failed "));
1005 			goto nxge_map_regs_fail1;
1006 		}
1007 
1008 		/* set up the vio region mapped register */
1009 		(void) ddi_dev_regsize(nxgep->dip, 2, &regsize);
1010 		NXGE_DEBUG_MSG((nxgep, DDI_CTL,
1011 			"nxge_map_regs: vio (1) size 0x%x", regsize));
1012 		ddi_status = ddi_regs_map_setup(nxgep->dip, 2,
1013 			(caddr_t *)&(dev_regs->nxge_vir_regp), 0, 0,
1014 			&nxge_dev_reg_acc_attr, &dev_regs->nxge_vir_regh);
1015 
1016 		if (ddi_status != DDI_SUCCESS) {
1017 			NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
1018 				"ddi_map_regs for nxge vio reg failed"));
1019 			goto nxge_map_regs_fail2;
1020 		}
1021 		/* set up the vio region mapped register */
1022 		(void) ddi_dev_regsize(nxgep->dip, 3, &regsize);
1023 		NXGE_DEBUG_MSG((nxgep, DDI_CTL,
1024 			"nxge_map_regs: vio (3) size 0x%x", regsize));
1025 		ddi_status = ddi_regs_map_setup(nxgep->dip, 3,
1026 			(caddr_t *)&(dev_regs->nxge_vir2_regp), 0, 0,
1027 			&nxge_dev_reg_acc_attr, &dev_regs->nxge_vir2_regh);
1028 
1029 		if (ddi_status != DDI_SUCCESS) {
1030 			NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
1031 				"ddi_map_regs for nxge vio2 reg failed"));
1032 			goto nxge_map_regs_fail3;
1033 		}
1034 		nxgep->dev_regs = dev_regs;
1035 
1036 		NPI_ACC_HANDLE_SET(nxgep, dev_regs->nxge_regh);
1037 		NPI_ADD_HANDLE_SET(nxgep, (npi_reg_ptr_t)dev_regs->nxge_regp);
1038 
1039 		NPI_REG_ACC_HANDLE_SET(nxgep, dev_regs->nxge_regh);
1040 		NPI_REG_ADD_HANDLE_SET(nxgep,
1041 			(npi_reg_ptr_t)dev_regs->nxge_regp);
1042 
1043 		NPI_VREG_ACC_HANDLE_SET(nxgep, dev_regs->nxge_vir_regh);
1044 		NPI_VREG_ADD_HANDLE_SET(nxgep,
1045 			(npi_reg_ptr_t)dev_regs->nxge_vir_regp);
1046 
1047 		NPI_V2REG_ACC_HANDLE_SET(nxgep, dev_regs->nxge_vir2_regh);
1048 		NPI_V2REG_ADD_HANDLE_SET(nxgep,
1049 			(npi_reg_ptr_t)dev_regs->nxge_vir2_regp);
1050 
1051 		break;
1052 	}
1053 
1054 	NXGE_DEBUG_MSG((nxgep, DDI_CTL, "nxge_map_reg: hardware addr 0x%0llx "
1055 		" handle 0x%0llx", dev_regs->nxge_regp, dev_regs->nxge_regh));
1056 
1057 	goto nxge_map_regs_exit;
1058 nxge_map_regs_fail3:
1059 	if (dev_regs->nxge_msix_regh) {
1060 		ddi_regs_map_free(&dev_regs->nxge_msix_regh);
1061 	}
1062 	if (dev_regs->nxge_vir_regh) {
1063 		ddi_regs_map_free(&dev_regs->nxge_regh);
1064 	}
1065 nxge_map_regs_fail2:
1066 	if (dev_regs->nxge_regh) {
1067 		ddi_regs_map_free(&dev_regs->nxge_regh);
1068 	}
1069 nxge_map_regs_fail1:
1070 	if (dev_regs->nxge_pciregh) {
1071 		ddi_regs_map_free(&dev_regs->nxge_pciregh);
1072 	}
1073 nxge_map_regs_fail0:
1074 	NXGE_DEBUG_MSG((nxgep, DDI_CTL, "Freeing register set memory"));
1075 	kmem_free(dev_regs, sizeof (dev_regs_t));
1076 
1077 nxge_map_regs_exit:
1078 	if (ddi_status != DDI_SUCCESS)
1079 		status |= (NXGE_ERROR | NXGE_DDI_FAILED);
1080 	NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_map_regs"));
1081 	return (status);
1082 }
1083 
1084 static void
1085 nxge_unmap_regs(p_nxge_t nxgep)
1086 {
1087 	NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_unmap_regs"));
1088 	if (nxgep->dev_regs) {
1089 		if (nxgep->dev_regs->nxge_pciregh) {
1090 			NXGE_DEBUG_MSG((nxgep, DDI_CTL,
1091 				"==> nxge_unmap_regs: bus"));
1092 			ddi_regs_map_free(&nxgep->dev_regs->nxge_pciregh);
1093 			nxgep->dev_regs->nxge_pciregh = NULL;
1094 		}
1095 		if (nxgep->dev_regs->nxge_regh) {
1096 			NXGE_DEBUG_MSG((nxgep, DDI_CTL,
1097 				"==> nxge_unmap_regs: device registers"));
1098 			ddi_regs_map_free(&nxgep->dev_regs->nxge_regh);
1099 			nxgep->dev_regs->nxge_regh = NULL;
1100 		}
1101 		if (nxgep->dev_regs->nxge_msix_regh) {
1102 			NXGE_DEBUG_MSG((nxgep, DDI_CTL,
1103 				"==> nxge_unmap_regs: device interrupts"));
1104 			ddi_regs_map_free(&nxgep->dev_regs->nxge_msix_regh);
1105 			nxgep->dev_regs->nxge_msix_regh = NULL;
1106 		}
1107 		if (nxgep->dev_regs->nxge_vir_regh) {
1108 			NXGE_DEBUG_MSG((nxgep, DDI_CTL,
1109 				"==> nxge_unmap_regs: vio region"));
1110 			ddi_regs_map_free(&nxgep->dev_regs->nxge_vir_regh);
1111 			nxgep->dev_regs->nxge_vir_regh = NULL;
1112 		}
1113 		if (nxgep->dev_regs->nxge_vir2_regh) {
1114 			NXGE_DEBUG_MSG((nxgep, DDI_CTL,
1115 				"==> nxge_unmap_regs: vio2 region"));
1116 			ddi_regs_map_free(&nxgep->dev_regs->nxge_vir2_regh);
1117 			nxgep->dev_regs->nxge_vir2_regh = NULL;
1118 		}
1119 
1120 		kmem_free(nxgep->dev_regs, sizeof (dev_regs_t));
1121 		nxgep->dev_regs = NULL;
1122 	}
1123 
1124 	NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_unmap_regs"));
1125 }
1126 
1127 static nxge_status_t
1128 nxge_setup_mutexes(p_nxge_t nxgep)
1129 {
1130 	int ddi_status = DDI_SUCCESS;
1131 	nxge_status_t status = NXGE_OK;
1132 	nxge_classify_t *classify_ptr;
1133 	int partition;
1134 
1135 	NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_setup_mutexes"));
1136 
1137 	/*
1138 	 * Get the interrupt cookie so the mutexes can be
1139 	 * Initialized.
1140 	 */
1141 	ddi_status = ddi_get_iblock_cookie(nxgep->dip, 0,
1142 					&nxgep->interrupt_cookie);
1143 	if (ddi_status != DDI_SUCCESS) {
1144 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
1145 			"<== nxge_setup_mutexes: failed 0x%x", ddi_status));
1146 		goto nxge_setup_mutexes_exit;
1147 	}
1148 
1149 	cv_init(&nxgep->poll_cv, NULL, CV_DRIVER, NULL);
1150 	MUTEX_INIT(&nxgep->poll_lock, NULL,
1151 	    MUTEX_DRIVER, (void *)nxgep->interrupt_cookie);
1152 
1153 	/*
1154 	 * Initialize mutexes for this device.
1155 	 */
1156 	MUTEX_INIT(nxgep->genlock, NULL,
1157 		MUTEX_DRIVER, (void *)nxgep->interrupt_cookie);
1158 	MUTEX_INIT(&nxgep->ouraddr_lock, NULL,
1159 		MUTEX_DRIVER, (void *)nxgep->interrupt_cookie);
1160 	MUTEX_INIT(&nxgep->mif_lock, NULL,
1161 		MUTEX_DRIVER, (void *)nxgep->interrupt_cookie);
1162 	RW_INIT(&nxgep->filter_lock, NULL,
1163 		RW_DRIVER, (void *)nxgep->interrupt_cookie);
1164 
1165 	classify_ptr = &nxgep->classifier;
1166 		/*
1167 		 * FFLP Mutexes are never used in interrupt context
1168 		 * as fflp operation can take very long time to
1169 		 * complete and hence not suitable to invoke from interrupt
1170 		 * handlers.
1171 		 */
1172 	MUTEX_INIT(&classify_ptr->tcam_lock, NULL,
1173 	    NXGE_MUTEX_DRIVER, (void *)nxgep->interrupt_cookie);
1174 	if (NXGE_IS_VALID_NEPTUNE_TYPE(nxgep)) {
1175 		MUTEX_INIT(&classify_ptr->fcram_lock, NULL,
1176 		    NXGE_MUTEX_DRIVER, (void *)nxgep->interrupt_cookie);
1177 		for (partition = 0; partition < MAX_PARTITION; partition++) {
1178 			MUTEX_INIT(&classify_ptr->hash_lock[partition], NULL,
1179 			    NXGE_MUTEX_DRIVER, (void *)nxgep->interrupt_cookie);
1180 		}
1181 	}
1182 
1183 nxge_setup_mutexes_exit:
1184 	NXGE_DEBUG_MSG((nxgep, DDI_CTL,
1185 	    "<== nxge_setup_mutexes status = %x", status));
1186 
1187 	if (ddi_status != DDI_SUCCESS)
1188 		status |= (NXGE_ERROR | NXGE_DDI_FAILED);
1189 
1190 	return (status);
1191 }
1192 
1193 static void
1194 nxge_destroy_mutexes(p_nxge_t nxgep)
1195 {
1196 	int partition;
1197 	nxge_classify_t *classify_ptr;
1198 
1199 	NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_destroy_mutexes"));
1200 	RW_DESTROY(&nxgep->filter_lock);
1201 	MUTEX_DESTROY(&nxgep->mif_lock);
1202 	MUTEX_DESTROY(&nxgep->ouraddr_lock);
1203 	MUTEX_DESTROY(nxgep->genlock);
1204 
1205 	classify_ptr = &nxgep->classifier;
1206 	MUTEX_DESTROY(&classify_ptr->tcam_lock);
1207 
1208 	/* Destroy all polling resources. */
1209 	MUTEX_DESTROY(&nxgep->poll_lock);
1210 	cv_destroy(&nxgep->poll_cv);
1211 
1212 	/* free data structures, based on HW type */
1213 	if (NXGE_IS_VALID_NEPTUNE_TYPE(nxgep)) {
1214 		MUTEX_DESTROY(&classify_ptr->fcram_lock);
1215 		for (partition = 0; partition < MAX_PARTITION; partition++) {
1216 			MUTEX_DESTROY(&classify_ptr->hash_lock[partition]);
1217 		}
1218 	}
1219 
1220 	NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_destroy_mutexes"));
1221 }
1222 
1223 nxge_status_t
1224 nxge_init(p_nxge_t nxgep)
1225 {
1226 	nxge_status_t	status = NXGE_OK;
1227 
1228 	NXGE_DEBUG_MSG((nxgep, STR_CTL, "==> nxge_init"));
1229 
1230 	if (nxgep->drv_state & STATE_HW_INITIALIZED) {
1231 		return (status);
1232 	}
1233 
1234 	/*
1235 	 * Allocate system memory for the receive/transmit buffer blocks
1236 	 * and receive/transmit descriptor rings.
1237 	 */
1238 	status = nxge_alloc_mem_pool(nxgep);
1239 	if (status != NXGE_OK) {
1240 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "alloc mem failed\n"));
1241 		goto nxge_init_fail1;
1242 	}
1243 
1244 	/*
1245 	 * Initialize and enable TXC registers
1246 	 * (Globally enable TX controller,
1247 	 *  enable a port, configure dma channel bitmap,
1248 	 *  configure the max burst size).
1249 	 */
1250 	status = nxge_txc_init(nxgep);
1251 	if (status != NXGE_OK) {
1252 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "init txc failed\n"));
1253 		goto nxge_init_fail2;
1254 	}
1255 
1256 	/*
1257 	 * Initialize and enable TXDMA channels.
1258 	 */
1259 	status = nxge_init_txdma_channels(nxgep);
1260 	if (status != NXGE_OK) {
1261 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "init txdma failed\n"));
1262 		goto nxge_init_fail3;
1263 	}
1264 
1265 	/*
1266 	 * Initialize and enable RXDMA channels.
1267 	 */
1268 	status = nxge_init_rxdma_channels(nxgep);
1269 	if (status != NXGE_OK) {
1270 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "init rxdma failed\n"));
1271 		goto nxge_init_fail4;
1272 	}
1273 
1274 	/*
1275 	 * Initialize TCAM and FCRAM (Neptune).
1276 	 */
1277 	status = nxge_classify_init(nxgep);
1278 	if (status != NXGE_OK) {
1279 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "init classify failed\n"));
1280 		goto nxge_init_fail5;
1281 	}
1282 
1283 	/*
1284 	 * Initialize ZCP
1285 	 */
1286 	status = nxge_zcp_init(nxgep);
1287 	if (status != NXGE_OK) {
1288 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "init ZCP failed\n"));
1289 		goto nxge_init_fail5;
1290 	}
1291 
1292 	/*
1293 	 * Initialize IPP.
1294 	 */
1295 	status = nxge_ipp_init(nxgep);
1296 	if (status != NXGE_OK) {
1297 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "init IPP failed\n"));
1298 		goto nxge_init_fail5;
1299 	}
1300 
1301 	/*
1302 	 * Initialize the MAC block.
1303 	 */
1304 	status = nxge_mac_init(nxgep);
1305 	if (status != NXGE_OK) {
1306 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "init MAC failed\n"));
1307 		goto nxge_init_fail5;
1308 	}
1309 
1310 	nxge_intrs_enable(nxgep);
1311 
1312 	/*
1313 	 * Enable hardware interrupts.
1314 	 */
1315 	nxge_intr_hw_enable(nxgep);
1316 	nxgep->drv_state |= STATE_HW_INITIALIZED;
1317 
1318 	goto nxge_init_exit;
1319 
1320 nxge_init_fail5:
1321 	nxge_uninit_rxdma_channels(nxgep);
1322 nxge_init_fail4:
1323 	nxge_uninit_txdma_channels(nxgep);
1324 nxge_init_fail3:
1325 	(void) nxge_txc_uninit(nxgep);
1326 nxge_init_fail2:
1327 	nxge_free_mem_pool(nxgep);
1328 nxge_init_fail1:
1329 	NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
1330 		"<== nxge_init status (failed) = 0x%08x", status));
1331 	return (status);
1332 
1333 nxge_init_exit:
1334 
1335 	NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_init status = 0x%08x",
1336 		status));
1337 	return (status);
1338 }
1339 
1340 
1341 timeout_id_t
1342 nxge_start_timer(p_nxge_t nxgep, fptrv_t func, int msec)
1343 {
1344 	if ((nxgep->suspended == 0) ||
1345 			(nxgep->suspended == DDI_RESUME)) {
1346 		return (timeout(func, (caddr_t)nxgep,
1347 			drv_usectohz(1000 * msec)));
1348 	}
1349 	return (NULL);
1350 }
1351 
1352 /*ARGSUSED*/
1353 void
1354 nxge_stop_timer(p_nxge_t nxgep, timeout_id_t timerid)
1355 {
1356 	if (timerid) {
1357 		(void) untimeout(timerid);
1358 	}
1359 }
1360 
1361 void
1362 nxge_uninit(p_nxge_t nxgep)
1363 {
1364 	NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_uninit"));
1365 
1366 	if (!(nxgep->drv_state & STATE_HW_INITIALIZED)) {
1367 		NXGE_DEBUG_MSG((nxgep, DDI_CTL,
1368 			"==> nxge_uninit: not initialized"));
1369 		NXGE_DEBUG_MSG((nxgep, DDI_CTL,
1370 			"<== nxge_uninit"));
1371 		return;
1372 	}
1373 
1374 	/* stop timer */
1375 	if (nxgep->nxge_timerid) {
1376 		nxge_stop_timer(nxgep, nxgep->nxge_timerid);
1377 		nxgep->nxge_timerid = 0;
1378 	}
1379 
1380 	(void) nxge_link_monitor(nxgep, LINK_MONITOR_STOP);
1381 	(void) nxge_intr_hw_disable(nxgep);
1382 
1383 	/*
1384 	 * Reset the receive MAC side.
1385 	 */
1386 	(void) nxge_rx_mac_disable(nxgep);
1387 
1388 	/* Disable and soft reset the IPP */
1389 	(void) nxge_ipp_disable(nxgep);
1390 
1391 	/* Free classification resources */
1392 	(void) nxge_classify_uninit(nxgep);
1393 
1394 	/*
1395 	 * Reset the transmit/receive DMA side.
1396 	 */
1397 	(void) nxge_txdma_hw_mode(nxgep, NXGE_DMA_STOP);
1398 	(void) nxge_rxdma_hw_mode(nxgep, NXGE_DMA_STOP);
1399 
1400 	nxge_uninit_txdma_channels(nxgep);
1401 	nxge_uninit_rxdma_channels(nxgep);
1402 
1403 	/*
1404 	 * Reset the transmit MAC side.
1405 	 */
1406 	(void) nxge_tx_mac_disable(nxgep);
1407 
1408 	nxge_free_mem_pool(nxgep);
1409 
1410 	(void) nxge_link_monitor(nxgep, LINK_MONITOR_START);
1411 
1412 	nxgep->drv_state &= ~STATE_HW_INITIALIZED;
1413 
1414 	NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_uninit: "
1415 		"nxge_mblks_pending %d", nxge_mblks_pending));
1416 }
1417 
1418 void
1419 nxge_get64(p_nxge_t nxgep, p_mblk_t mp)
1420 {
1421 #if defined(__i386)
1422 	size_t		reg;
1423 #else
1424 	uint64_t	reg;
1425 #endif
1426 	uint64_t	regdata;
1427 	int		i, retry;
1428 
1429 	bcopy((char *)mp->b_rptr, (char *)&reg, sizeof (uint64_t));
1430 	regdata = 0;
1431 	retry = 1;
1432 
1433 	for (i = 0; i < retry; i++) {
1434 		NXGE_REG_RD64(nxgep->npi_handle, reg, &regdata);
1435 	}
1436 	bcopy((char *)&regdata, (char *)mp->b_rptr, sizeof (uint64_t));
1437 }
1438 
1439 void
1440 nxge_put64(p_nxge_t nxgep, p_mblk_t mp)
1441 {
1442 #if defined(__i386)
1443 	size_t		reg;
1444 #else
1445 	uint64_t	reg;
1446 #endif
1447 	uint64_t	buf[2];
1448 
1449 	bcopy((char *)mp->b_rptr, (char *)&buf[0], 2 * sizeof (uint64_t));
1450 #if defined(__i386)
1451 	reg = (size_t)buf[0];
1452 #else
1453 	reg = buf[0];
1454 #endif
1455 
1456 	NXGE_NPI_PIO_WRITE64(nxgep->npi_handle, reg, buf[1]);
1457 }
1458 
1459 
1460 nxge_os_mutex_t nxgedebuglock;
1461 int nxge_debug_init = 0;
1462 
1463 /*ARGSUSED*/
1464 /*VARARGS*/
1465 void
1466 nxge_debug_msg(p_nxge_t nxgep, uint64_t level, char *fmt, ...)
1467 {
1468 	char msg_buffer[1048];
1469 	char prefix_buffer[32];
1470 	int instance;
1471 	uint64_t debug_level;
1472 	int cmn_level = CE_CONT;
1473 	va_list ap;
1474 
1475 	debug_level = (nxgep == NULL) ? nxge_debug_level :
1476 		nxgep->nxge_debug_level;
1477 
1478 	if ((level & debug_level) ||
1479 		(level == NXGE_NOTE) ||
1480 		(level == NXGE_ERR_CTL)) {
1481 		/* do the msg processing */
1482 		if (nxge_debug_init == 0) {
1483 			MUTEX_INIT(&nxgedebuglock, NULL, MUTEX_DRIVER, NULL);
1484 			nxge_debug_init = 1;
1485 		}
1486 
1487 		MUTEX_ENTER(&nxgedebuglock);
1488 
1489 		if ((level & NXGE_NOTE)) {
1490 			cmn_level = CE_NOTE;
1491 		}
1492 
1493 		if (level & NXGE_ERR_CTL) {
1494 			cmn_level = CE_WARN;
1495 		}
1496 
1497 		va_start(ap, fmt);
1498 		(void) vsprintf(msg_buffer, fmt, ap);
1499 		va_end(ap);
1500 		if (nxgep == NULL) {
1501 			instance = -1;
1502 			(void) sprintf(prefix_buffer, "%s :", "nxge");
1503 		} else {
1504 			instance = nxgep->instance;
1505 			(void) sprintf(prefix_buffer,
1506 						    "%s%d :", "nxge", instance);
1507 		}
1508 
1509 		MUTEX_EXIT(&nxgedebuglock);
1510 		cmn_err(cmn_level, "!%s %s\n",
1511 				prefix_buffer, msg_buffer);
1512 
1513 	}
1514 }
1515 
1516 char *
1517 nxge_dump_packet(char *addr, int size)
1518 {
1519 	uchar_t *ap = (uchar_t *)addr;
1520 	int i;
1521 	static char etherbuf[1024];
1522 	char *cp = etherbuf;
1523 	char digits[] = "0123456789abcdef";
1524 
1525 	if (!size)
1526 		size = 60;
1527 
1528 	if (size > MAX_DUMP_SZ) {
1529 		/* Dump the leading bytes */
1530 		for (i = 0; i < MAX_DUMP_SZ/2; i++) {
1531 			if (*ap > 0x0f)
1532 				*cp++ = digits[*ap >> 4];
1533 			*cp++ = digits[*ap++ & 0xf];
1534 			*cp++ = ':';
1535 		}
1536 		for (i = 0; i < 20; i++)
1537 			*cp++ = '.';
1538 		/* Dump the last MAX_DUMP_SZ/2 bytes */
1539 		ap = (uchar_t *)(addr + (size - MAX_DUMP_SZ/2));
1540 		for (i = 0; i < MAX_DUMP_SZ/2; i++) {
1541 			if (*ap > 0x0f)
1542 				*cp++ = digits[*ap >> 4];
1543 			*cp++ = digits[*ap++ & 0xf];
1544 			*cp++ = ':';
1545 		}
1546 	} else {
1547 		for (i = 0; i < size; i++) {
1548 			if (*ap > 0x0f)
1549 				*cp++ = digits[*ap >> 4];
1550 			*cp++ = digits[*ap++ & 0xf];
1551 			*cp++ = ':';
1552 		}
1553 	}
1554 	*--cp = 0;
1555 	return (etherbuf);
1556 }
1557 
1558 #ifdef	NXGE_DEBUG
1559 static void
1560 nxge_test_map_regs(p_nxge_t nxgep)
1561 {
1562 	ddi_acc_handle_t cfg_handle;
1563 	p_pci_cfg_t	cfg_ptr;
1564 	ddi_acc_handle_t dev_handle;
1565 	char		*dev_ptr;
1566 	ddi_acc_handle_t pci_config_handle;
1567 	uint32_t	regval;
1568 	int		i;
1569 
1570 	NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_test_map_regs"));
1571 
1572 	dev_handle = nxgep->dev_regs->nxge_regh;
1573 	dev_ptr = (char *)nxgep->dev_regs->nxge_regp;
1574 
1575 	if (NXGE_IS_VALID_NEPTUNE_TYPE(nxgep)) {
1576 		cfg_handle = nxgep->dev_regs->nxge_pciregh;
1577 		cfg_ptr = (void *)nxgep->dev_regs->nxge_pciregp;
1578 
1579 		NXGE_DEBUG_MSG((nxgep, DDI_CTL,
1580 		    "Neptune PCI regp cfg_ptr 0x%llx", (char *)cfg_ptr));
1581 		NXGE_DEBUG_MSG((nxgep, DDI_CTL,
1582 		    "Neptune PCI cfg_ptr vendor id ptr 0x%llx",
1583 		    &cfg_ptr->vendorid));
1584 		NXGE_DEBUG_MSG((nxgep, DDI_CTL,
1585 		    "\tvendorid 0x%x devid 0x%x",
1586 		    NXGE_PIO_READ16(cfg_handle, &cfg_ptr->vendorid, 0),
1587 		    NXGE_PIO_READ16(cfg_handle, &cfg_ptr->devid,    0)));
1588 		NXGE_DEBUG_MSG((nxgep, DDI_CTL,
1589 		    "PCI BAR: base 0x%x base14 0x%x base 18 0x%x "
1590 		    "bar1c 0x%x",
1591 		    NXGE_PIO_READ32(cfg_handle, &cfg_ptr->base,   0),
1592 		    NXGE_PIO_READ32(cfg_handle, &cfg_ptr->base14, 0),
1593 		    NXGE_PIO_READ32(cfg_handle, &cfg_ptr->base18, 0),
1594 		    NXGE_PIO_READ32(cfg_handle, &cfg_ptr->base1c, 0)));
1595 		NXGE_DEBUG_MSG((nxgep, DDI_CTL,
1596 		    "\nNeptune PCI BAR: base20 0x%x base24 0x%x "
1597 		    "base 28 0x%x bar2c 0x%x\n",
1598 		    NXGE_PIO_READ32(cfg_handle, &cfg_ptr->base20, 0),
1599 		    NXGE_PIO_READ32(cfg_handle, &cfg_ptr->base24, 0),
1600 		    NXGE_PIO_READ32(cfg_handle, &cfg_ptr->base28, 0),
1601 		    NXGE_PIO_READ32(cfg_handle, &cfg_ptr->base2c, 0)));
1602 		NXGE_DEBUG_MSG((nxgep, DDI_CTL,
1603 		    "\nNeptune PCI BAR: base30 0x%x\n",
1604 		    NXGE_PIO_READ32(cfg_handle, &cfg_ptr->base30, 0)));
1605 
1606 		cfg_handle = nxgep->dev_regs->nxge_pciregh;
1607 		cfg_ptr = (void *)nxgep->dev_regs->nxge_pciregp;
1608 		NXGE_DEBUG_MSG((nxgep, DDI_CTL,
1609 		    "first  0x%llx second 0x%llx third 0x%llx "
1610 		    "last 0x%llx ",
1611 		    NXGE_PIO_READ64(dev_handle,
1612 		    (uint64_t *)(dev_ptr + 0),  0),
1613 		    NXGE_PIO_READ64(dev_handle,
1614 		    (uint64_t *)(dev_ptr + 8),  0),
1615 		    NXGE_PIO_READ64(dev_handle,
1616 		    (uint64_t *)(dev_ptr + 16), 0),
1617 		    NXGE_PIO_READ64(cfg_handle,
1618 		    (uint64_t *)(dev_ptr + 24), 0)));
1619 	}
1620 }
1621 
1622 #endif
1623 
1624 static void
1625 nxge_suspend(p_nxge_t nxgep)
1626 {
1627 	NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_suspend"));
1628 
1629 	nxge_intrs_disable(nxgep);
1630 	nxge_destroy_dev(nxgep);
1631 
1632 	NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_suspend"));
1633 }
1634 
1635 static nxge_status_t
1636 nxge_resume(p_nxge_t nxgep)
1637 {
1638 	nxge_status_t status = NXGE_OK;
1639 
1640 	NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_resume"));
1641 
1642 	nxgep->suspended = DDI_RESUME;
1643 	(void) nxge_link_monitor(nxgep, LINK_MONITOR_START);
1644 	(void) nxge_rxdma_hw_mode(nxgep, NXGE_DMA_START);
1645 	(void) nxge_txdma_hw_mode(nxgep, NXGE_DMA_START);
1646 	(void) nxge_rx_mac_enable(nxgep);
1647 	(void) nxge_tx_mac_enable(nxgep);
1648 	nxge_intrs_enable(nxgep);
1649 	nxgep->suspended = 0;
1650 
1651 	NXGE_DEBUG_MSG((nxgep, DDI_CTL,
1652 			"<== nxge_resume status = 0x%x", status));
1653 	return (status);
1654 }
1655 
1656 static nxge_status_t
1657 nxge_setup_dev(p_nxge_t nxgep)
1658 {
1659 	nxge_status_t	status = NXGE_OK;
1660 
1661 	NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_setup_dev port %d",
1662 	    nxgep->mac.portnum));
1663 
1664 	status = nxge_link_init(nxgep);
1665 
1666 	if (fm_check_acc_handle(nxgep->dev_regs->nxge_regh) != DDI_FM_OK) {
1667 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
1668 			"port%d Bad register acc handle", nxgep->mac.portnum));
1669 		status = NXGE_ERROR;
1670 	}
1671 
1672 	if (status != NXGE_OK) {
1673 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
1674 			    " nxge_setup_dev status "
1675 			    "(xcvr init 0x%08x)", status));
1676 		goto nxge_setup_dev_exit;
1677 	}
1678 
1679 nxge_setup_dev_exit:
1680 	NXGE_DEBUG_MSG((nxgep, DDI_CTL,
1681 		"<== nxge_setup_dev port %d status = 0x%08x",
1682 		nxgep->mac.portnum, status));
1683 
1684 	return (status);
1685 }
1686 
1687 static void
1688 nxge_destroy_dev(p_nxge_t nxgep)
1689 {
1690 	NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_destroy_dev"));
1691 
1692 	(void) nxge_link_monitor(nxgep, LINK_MONITOR_STOP);
1693 
1694 	(void) nxge_hw_stop(nxgep);
1695 
1696 	NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_destroy_dev"));
1697 }
1698 
1699 static nxge_status_t
1700 nxge_setup_system_dma_pages(p_nxge_t nxgep)
1701 {
1702 	int 			ddi_status = DDI_SUCCESS;
1703 	uint_t 			count;
1704 	ddi_dma_cookie_t 	cookie;
1705 	uint_t 			iommu_pagesize;
1706 	nxge_status_t		status = NXGE_OK;
1707 
1708 	NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_setup_system_dma_pages"));
1709 	nxgep->sys_page_sz = ddi_ptob(nxgep->dip, (ulong_t)1);
1710 	if (nxgep->niu_type != N2_NIU) {
1711 		iommu_pagesize = dvma_pagesize(nxgep->dip);
1712 		NXGE_DEBUG_MSG((nxgep, DDI_CTL,
1713 			" nxge_setup_system_dma_pages: page %d (ddi_ptob %d) "
1714 			" default_block_size %d iommu_pagesize %d",
1715 			nxgep->sys_page_sz,
1716 			ddi_ptob(nxgep->dip, (ulong_t)1),
1717 			nxgep->rx_default_block_size,
1718 			iommu_pagesize));
1719 
1720 		if (iommu_pagesize != 0) {
1721 			if (nxgep->sys_page_sz == iommu_pagesize) {
1722 				if (iommu_pagesize > 0x4000)
1723 					nxgep->sys_page_sz = 0x4000;
1724 			} else {
1725 				if (nxgep->sys_page_sz > iommu_pagesize)
1726 					nxgep->sys_page_sz = iommu_pagesize;
1727 			}
1728 		}
1729 	}
1730 	nxgep->sys_page_mask = ~(nxgep->sys_page_sz - 1);
1731 	NXGE_DEBUG_MSG((nxgep, DDI_CTL,
1732 		"==> nxge_setup_system_dma_pages: page %d (ddi_ptob %d) "
1733 		"default_block_size %d page mask %d",
1734 		nxgep->sys_page_sz,
1735 		ddi_ptob(nxgep->dip, (ulong_t)1),
1736 		nxgep->rx_default_block_size,
1737 		nxgep->sys_page_mask));
1738 
1739 
1740 	switch (nxgep->sys_page_sz) {
1741 	default:
1742 		nxgep->sys_page_sz = 0x1000;
1743 		nxgep->sys_page_mask = ~(nxgep->sys_page_sz - 1);
1744 		nxgep->rx_default_block_size = 0x1000;
1745 		nxgep->rx_bksize_code = RBR_BKSIZE_4K;
1746 		break;
1747 	case 0x1000:
1748 		nxgep->rx_default_block_size = 0x1000;
1749 		nxgep->rx_bksize_code = RBR_BKSIZE_4K;
1750 		break;
1751 	case 0x2000:
1752 		nxgep->rx_default_block_size = 0x2000;
1753 		nxgep->rx_bksize_code = RBR_BKSIZE_8K;
1754 		break;
1755 	case 0x4000:
1756 		nxgep->rx_default_block_size = 0x4000;
1757 		nxgep->rx_bksize_code = RBR_BKSIZE_16K;
1758 		break;
1759 	case 0x8000:
1760 		nxgep->rx_default_block_size = 0x8000;
1761 		nxgep->rx_bksize_code = RBR_BKSIZE_32K;
1762 		break;
1763 	}
1764 
1765 #ifndef USE_RX_BIG_BUF
1766 	nxge_rx_dma_attr.dma_attr_align = nxgep->sys_page_sz;
1767 #else
1768 		nxgep->rx_default_block_size = 0x2000;
1769 		nxgep->rx_bksize_code = RBR_BKSIZE_8K;
1770 #endif
1771 	/*
1772 	 * Get the system DMA burst size.
1773 	 */
1774 	ddi_status = ddi_dma_alloc_handle(nxgep->dip, &nxge_tx_dma_attr,
1775 			DDI_DMA_DONTWAIT, 0,
1776 			&nxgep->dmasparehandle);
1777 	if (ddi_status != DDI_SUCCESS) {
1778 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
1779 			"ddi_dma_alloc_handle: failed "
1780 			" status 0x%x", ddi_status));
1781 		goto nxge_get_soft_properties_exit;
1782 	}
1783 
1784 	ddi_status = ddi_dma_addr_bind_handle(nxgep->dmasparehandle, NULL,
1785 				(caddr_t)nxgep->dmasparehandle,
1786 				sizeof (nxgep->dmasparehandle),
1787 				DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
1788 				DDI_DMA_DONTWAIT, 0,
1789 				&cookie, &count);
1790 	if (ddi_status != DDI_DMA_MAPPED) {
1791 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
1792 			"Binding spare handle to find system"
1793 			" burstsize failed."));
1794 		ddi_status = DDI_FAILURE;
1795 		goto nxge_get_soft_properties_fail1;
1796 	}
1797 
1798 	nxgep->sys_burst_sz = ddi_dma_burstsizes(nxgep->dmasparehandle);
1799 	(void) ddi_dma_unbind_handle(nxgep->dmasparehandle);
1800 
1801 nxge_get_soft_properties_fail1:
1802 	ddi_dma_free_handle(&nxgep->dmasparehandle);
1803 
1804 nxge_get_soft_properties_exit:
1805 
1806 	if (ddi_status != DDI_SUCCESS)
1807 		status |= (NXGE_ERROR | NXGE_DDI_FAILED);
1808 
1809 	NXGE_DEBUG_MSG((nxgep, DDI_CTL,
1810 		"<== nxge_setup_system_dma_pages status = 0x%08x", status));
1811 	return (status);
1812 }
1813 
1814 static nxge_status_t
1815 nxge_alloc_mem_pool(p_nxge_t nxgep)
1816 {
1817 	nxge_status_t	status = NXGE_OK;
1818 
1819 	NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_alloc_mem_pool"));
1820 
1821 	status = nxge_alloc_rx_mem_pool(nxgep);
1822 	if (status != NXGE_OK) {
1823 		return (NXGE_ERROR);
1824 	}
1825 
1826 	status = nxge_alloc_tx_mem_pool(nxgep);
1827 	if (status != NXGE_OK) {
1828 		nxge_free_rx_mem_pool(nxgep);
1829 		return (NXGE_ERROR);
1830 	}
1831 
1832 	NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_alloc_mem_pool"));
1833 	return (NXGE_OK);
1834 }
1835 
1836 static void
1837 nxge_free_mem_pool(p_nxge_t nxgep)
1838 {
1839 	NXGE_DEBUG_MSG((nxgep, MEM_CTL, "==> nxge_free_mem_pool"));
1840 
1841 	nxge_free_rx_mem_pool(nxgep);
1842 	nxge_free_tx_mem_pool(nxgep);
1843 
1844 	NXGE_DEBUG_MSG((nxgep, MEM_CTL, "<== nxge_free_mem_pool"));
1845 }
1846 
1847 static nxge_status_t
1848 nxge_alloc_rx_mem_pool(p_nxge_t nxgep)
1849 {
1850 	int			i, j;
1851 	uint32_t		ndmas, st_rdc;
1852 	p_nxge_dma_pt_cfg_t	p_all_cfgp;
1853 	p_nxge_hw_pt_cfg_t	p_cfgp;
1854 	p_nxge_dma_pool_t	dma_poolp;
1855 	p_nxge_dma_common_t	*dma_buf_p;
1856 	p_nxge_dma_pool_t	dma_cntl_poolp;
1857 	p_nxge_dma_common_t	*dma_cntl_p;
1858 	size_t			rx_buf_alloc_size;
1859 	size_t			rx_cntl_alloc_size;
1860 	uint32_t 		*num_chunks; /* per dma */
1861 	nxge_status_t		status = NXGE_OK;
1862 
1863 	uint32_t		nxge_port_rbr_size;
1864 	uint32_t		nxge_port_rbr_spare_size;
1865 	uint32_t		nxge_port_rcr_size;
1866 
1867 	NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_alloc_rx_mem_pool"));
1868 
1869 	p_all_cfgp = (p_nxge_dma_pt_cfg_t)&nxgep->pt_config;
1870 	p_cfgp = (p_nxge_hw_pt_cfg_t)&p_all_cfgp->hw_config;
1871 	st_rdc = p_cfgp->start_rdc;
1872 	ndmas = p_cfgp->max_rdcs;
1873 
1874 	NXGE_DEBUG_MSG((nxgep, DMA_CTL,
1875 		" nxge_alloc_rx_mem_pool st_rdc %d ndmas %d", st_rdc, ndmas));
1876 
1877 	/*
1878 	 * Allocate memory for each receive DMA channel.
1879 	 */
1880 	dma_poolp = (p_nxge_dma_pool_t)KMEM_ZALLOC(sizeof (nxge_dma_pool_t),
1881 			KM_SLEEP);
1882 	dma_buf_p = (p_nxge_dma_common_t *)KMEM_ZALLOC(
1883 			sizeof (p_nxge_dma_common_t) * ndmas, KM_SLEEP);
1884 
1885 	dma_cntl_poolp = (p_nxge_dma_pool_t)
1886 				KMEM_ZALLOC(sizeof (nxge_dma_pool_t), KM_SLEEP);
1887 	dma_cntl_p = (p_nxge_dma_common_t *)KMEM_ZALLOC(
1888 			sizeof (p_nxge_dma_common_t) * ndmas, KM_SLEEP);
1889 
1890 	num_chunks = (uint32_t *)KMEM_ZALLOC(
1891 			sizeof (uint32_t) * ndmas, KM_SLEEP);
1892 
1893 	/*
1894 	 * Assume that each DMA channel will be configured with default
1895 	 * block size.
1896 	 * rbr block counts are mod of batch count (16).
1897 	 */
1898 	nxge_port_rbr_size = p_all_cfgp->rbr_size;
1899 	nxge_port_rcr_size = p_all_cfgp->rcr_size;
1900 
1901 	if (!nxge_port_rbr_size) {
1902 		nxge_port_rbr_size = NXGE_RBR_RBB_DEFAULT;
1903 	}
1904 	if (nxge_port_rbr_size % NXGE_RXDMA_POST_BATCH) {
1905 		nxge_port_rbr_size = (NXGE_RXDMA_POST_BATCH *
1906 			(nxge_port_rbr_size / NXGE_RXDMA_POST_BATCH + 1));
1907 	}
1908 
1909 	p_all_cfgp->rbr_size = nxge_port_rbr_size;
1910 	nxge_port_rbr_spare_size = nxge_rbr_spare_size;
1911 
1912 	if (nxge_port_rbr_spare_size % NXGE_RXDMA_POST_BATCH) {
1913 		nxge_port_rbr_spare_size = (NXGE_RXDMA_POST_BATCH *
1914 			(nxge_port_rbr_spare_size / NXGE_RXDMA_POST_BATCH + 1));
1915 	}
1916 	if (nxge_port_rbr_size > RBR_DEFAULT_MAX_BLKS) {
1917 		NXGE_DEBUG_MSG((nxgep, MEM_CTL,
1918 		    "nxge_alloc_rx_mem_pool: RBR size too high %d, "
1919 		    "set to default %d",
1920 		    nxge_port_rbr_size, RBR_DEFAULT_MAX_BLKS));
1921 		nxge_port_rbr_size = RBR_DEFAULT_MAX_BLKS;
1922 	}
1923 	if (nxge_port_rcr_size > RCR_DEFAULT_MAX) {
1924 		NXGE_DEBUG_MSG((nxgep, MEM_CTL,
1925 		    "nxge_alloc_rx_mem_pool: RCR too high %d, "
1926 		    "set to default %d",
1927 		    nxge_port_rcr_size, RCR_DEFAULT_MAX));
1928 		nxge_port_rcr_size = RCR_DEFAULT_MAX;
1929 	}
1930 
1931 	/*
1932 	 * N2/NIU has limitation on the descriptor sizes (contiguous
1933 	 * memory allocation on data buffers to 4M (contig_mem_alloc)
1934 	 * and little endian for control buffers (must use the ddi/dki mem alloc
1935 	 * function).
1936 	 */
1937 #if	defined(sun4v) && defined(NIU_LP_WORKAROUND)
1938 	if (nxgep->niu_type == N2_NIU) {
1939 		nxge_port_rbr_spare_size = 0;
1940 		if ((nxge_port_rbr_size > NXGE_NIU_CONTIG_RBR_MAX) ||
1941 				(!ISP2(nxge_port_rbr_size))) {
1942 			nxge_port_rbr_size = NXGE_NIU_CONTIG_RBR_MAX;
1943 		}
1944 		if ((nxge_port_rcr_size > NXGE_NIU_CONTIG_RCR_MAX) ||
1945 				(!ISP2(nxge_port_rcr_size))) {
1946 			nxge_port_rcr_size = NXGE_NIU_CONTIG_RCR_MAX;
1947 		}
1948 	}
1949 #endif
1950 
1951 	rx_buf_alloc_size = (nxgep->rx_default_block_size *
1952 		(nxge_port_rbr_size + nxge_port_rbr_spare_size));
1953 
1954 	/*
1955 	 * Addresses of receive block ring, receive completion ring and the
1956 	 * mailbox must be all cache-aligned (64 bytes).
1957 	 */
1958 	rx_cntl_alloc_size = nxge_port_rbr_size + nxge_port_rbr_spare_size;
1959 	rx_cntl_alloc_size *= (sizeof (rx_desc_t));
1960 	rx_cntl_alloc_size += (sizeof (rcr_entry_t) * nxge_port_rcr_size);
1961 	rx_cntl_alloc_size += sizeof (rxdma_mailbox_t);
1962 
1963 	NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_alloc_rx_mem_pool: "
1964 		"nxge_port_rbr_size = %d nxge_port_rbr_spare_size = %d "
1965 		"nxge_port_rcr_size = %d "
1966 		"rx_cntl_alloc_size = %d",
1967 		nxge_port_rbr_size, nxge_port_rbr_spare_size,
1968 		nxge_port_rcr_size,
1969 		rx_cntl_alloc_size));
1970 
1971 #if	defined(sun4v) && defined(NIU_LP_WORKAROUND)
1972 	if (nxgep->niu_type == N2_NIU) {
1973 		if (!ISP2(rx_buf_alloc_size)) {
1974 			NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
1975 				"==> nxge_alloc_rx_mem_pool: "
1976 				" must be power of 2"));
1977 			status |= (NXGE_ERROR | NXGE_DDI_FAILED);
1978 			goto nxge_alloc_rx_mem_pool_exit;
1979 		}
1980 
1981 		if (rx_buf_alloc_size > (1 << 22)) {
1982 			NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
1983 				"==> nxge_alloc_rx_mem_pool: "
1984 				" limit size to 4M"));
1985 			status |= (NXGE_ERROR | NXGE_DDI_FAILED);
1986 			goto nxge_alloc_rx_mem_pool_exit;
1987 		}
1988 
1989 		if (rx_cntl_alloc_size < 0x2000) {
1990 			rx_cntl_alloc_size = 0x2000;
1991 		}
1992 	}
1993 #endif
1994 	nxgep->nxge_port_rbr_size = nxge_port_rbr_size;
1995 	nxgep->nxge_port_rcr_size = nxge_port_rcr_size;
1996 
1997 	/*
1998 	 * Allocate memory for receive buffers and descriptor rings.
1999 	 * Replace allocation functions with interface functions provided
2000 	 * by the partition manager when it is available.
2001 	 */
2002 	/*
2003 	 * Allocate memory for the receive buffer blocks.
2004 	 */
2005 	for (i = 0; i < ndmas; i++) {
2006 		NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
2007 			" nxge_alloc_rx_mem_pool to alloc mem: "
2008 			" dma %d dma_buf_p %llx &dma_buf_p %llx",
2009 			i, dma_buf_p[i], &dma_buf_p[i]));
2010 		num_chunks[i] = 0;
2011 		status = nxge_alloc_rx_buf_dma(nxgep, st_rdc, &dma_buf_p[i],
2012 				rx_buf_alloc_size,
2013 				nxgep->rx_default_block_size, &num_chunks[i]);
2014 		if (status != NXGE_OK) {
2015 			break;
2016 		}
2017 		st_rdc++;
2018 		NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
2019 			" nxge_alloc_rx_mem_pool DONE  alloc mem: "
2020 			"dma %d dma_buf_p %llx &dma_buf_p %llx", i,
2021 			dma_buf_p[i], &dma_buf_p[i]));
2022 	}
2023 	if (i < ndmas) {
2024 		goto nxge_alloc_rx_mem_fail1;
2025 	}
2026 	/*
2027 	 * Allocate memory for descriptor rings and mailbox.
2028 	 */
2029 	st_rdc = p_cfgp->start_rdc;
2030 	for (j = 0; j < ndmas; j++) {
2031 		status = nxge_alloc_rx_cntl_dma(nxgep, st_rdc, &dma_cntl_p[j],
2032 					rx_cntl_alloc_size);
2033 		if (status != NXGE_OK) {
2034 			break;
2035 		}
2036 		st_rdc++;
2037 	}
2038 	if (j < ndmas) {
2039 		goto nxge_alloc_rx_mem_fail2;
2040 	}
2041 
2042 	dma_poolp->ndmas = ndmas;
2043 	dma_poolp->num_chunks = num_chunks;
2044 	dma_poolp->buf_allocated = B_TRUE;
2045 	nxgep->rx_buf_pool_p = dma_poolp;
2046 	dma_poolp->dma_buf_pool_p = dma_buf_p;
2047 
2048 	dma_cntl_poolp->ndmas = ndmas;
2049 	dma_cntl_poolp->buf_allocated = B_TRUE;
2050 	nxgep->rx_cntl_pool_p = dma_cntl_poolp;
2051 	dma_cntl_poolp->dma_buf_pool_p = dma_cntl_p;
2052 
2053 	goto nxge_alloc_rx_mem_pool_exit;
2054 
2055 nxge_alloc_rx_mem_fail2:
2056 	/* Free control buffers */
2057 	j--;
2058 	NXGE_DEBUG_MSG((nxgep, DMA_CTL,
2059 		"==> nxge_alloc_rx_mem_pool: freeing control bufs (%d)", j));
2060 	for (; j >= 0; j--) {
2061 		nxge_free_rx_cntl_dma(nxgep,
2062 			(p_nxge_dma_common_t)dma_cntl_p[j]);
2063 		NXGE_DEBUG_MSG((nxgep, DMA_CTL,
2064 			"==> nxge_alloc_rx_mem_pool: control bufs freed (%d)",
2065 			j));
2066 	}
2067 	NXGE_DEBUG_MSG((nxgep, DMA_CTL,
2068 		"==> nxge_alloc_rx_mem_pool: control bufs freed (%d)", j));
2069 
2070 nxge_alloc_rx_mem_fail1:
2071 	/* Free data buffers */
2072 	i--;
2073 	NXGE_DEBUG_MSG((nxgep, DMA_CTL,
2074 		"==> nxge_alloc_rx_mem_pool: freeing data bufs (%d)", i));
2075 	for (; i >= 0; i--) {
2076 		nxge_free_rx_buf_dma(nxgep, (p_nxge_dma_common_t)dma_buf_p[i],
2077 			num_chunks[i]);
2078 	}
2079 	NXGE_DEBUG_MSG((nxgep, DMA_CTL,
2080 		"==> nxge_alloc_rx_mem_pool: data bufs freed (%d)", i));
2081 
2082 	KMEM_FREE(num_chunks, sizeof (uint32_t) * ndmas);
2083 	KMEM_FREE(dma_poolp, sizeof (nxge_dma_pool_t));
2084 	KMEM_FREE(dma_buf_p, ndmas * sizeof (p_nxge_dma_common_t));
2085 	KMEM_FREE(dma_cntl_poolp, sizeof (nxge_dma_pool_t));
2086 	KMEM_FREE(dma_cntl_p, ndmas * sizeof (p_nxge_dma_common_t));
2087 
2088 nxge_alloc_rx_mem_pool_exit:
2089 	NXGE_DEBUG_MSG((nxgep, DMA_CTL,
2090 		"<== nxge_alloc_rx_mem_pool:status 0x%08x", status));
2091 
2092 	return (status);
2093 }
2094 
2095 static void
2096 nxge_free_rx_mem_pool(p_nxge_t nxgep)
2097 {
2098 	uint32_t		i, ndmas;
2099 	p_nxge_dma_pool_t	dma_poolp;
2100 	p_nxge_dma_common_t	*dma_buf_p;
2101 	p_nxge_dma_pool_t	dma_cntl_poolp;
2102 	p_nxge_dma_common_t	*dma_cntl_p;
2103 	uint32_t 		*num_chunks;
2104 
2105 	NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_free_rx_mem_pool"));
2106 
2107 	dma_poolp = nxgep->rx_buf_pool_p;
2108 	if (dma_poolp == NULL || (!dma_poolp->buf_allocated)) {
2109 		NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
2110 			"<== nxge_free_rx_mem_pool "
2111 			"(null rx buf pool or buf not allocated"));
2112 		return;
2113 	}
2114 
2115 	dma_cntl_poolp = nxgep->rx_cntl_pool_p;
2116 	if (dma_cntl_poolp == NULL || (!dma_cntl_poolp->buf_allocated)) {
2117 		NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
2118 			"<== nxge_free_rx_mem_pool "
2119 			"(null rx cntl buf pool or cntl buf not allocated"));
2120 		return;
2121 	}
2122 
2123 	dma_buf_p = dma_poolp->dma_buf_pool_p;
2124 	num_chunks = dma_poolp->num_chunks;
2125 
2126 	dma_cntl_p = dma_cntl_poolp->dma_buf_pool_p;
2127 	ndmas = dma_cntl_poolp->ndmas;
2128 
2129 	for (i = 0; i < ndmas; i++) {
2130 		nxge_free_rx_buf_dma(nxgep, dma_buf_p[i], num_chunks[i]);
2131 	}
2132 
2133 	for (i = 0; i < ndmas; i++) {
2134 		nxge_free_rx_cntl_dma(nxgep, dma_cntl_p[i]);
2135 	}
2136 
2137 	for (i = 0; i < ndmas; i++) {
2138 		KMEM_FREE(dma_buf_p[i],
2139 			sizeof (nxge_dma_common_t) * NXGE_DMA_BLOCK);
2140 		KMEM_FREE(dma_cntl_p[i], sizeof (nxge_dma_common_t));
2141 	}
2142 
2143 	KMEM_FREE(num_chunks, sizeof (uint32_t) * ndmas);
2144 	KMEM_FREE(dma_cntl_p, ndmas * sizeof (p_nxge_dma_common_t));
2145 	KMEM_FREE(dma_cntl_poolp, sizeof (nxge_dma_pool_t));
2146 	KMEM_FREE(dma_buf_p, ndmas * sizeof (p_nxge_dma_common_t));
2147 	KMEM_FREE(dma_poolp, sizeof (nxge_dma_pool_t));
2148 
2149 	nxgep->rx_buf_pool_p = NULL;
2150 	nxgep->rx_cntl_pool_p = NULL;
2151 
2152 	NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "<== nxge_free_rx_mem_pool"));
2153 }
2154 
2155 
2156 static nxge_status_t
2157 nxge_alloc_rx_buf_dma(p_nxge_t nxgep, uint16_t dma_channel,
2158 	p_nxge_dma_common_t *dmap,
2159 	size_t alloc_size, size_t block_size, uint32_t *num_chunks)
2160 {
2161 	p_nxge_dma_common_t 	rx_dmap;
2162 	nxge_status_t		status = NXGE_OK;
2163 	size_t			total_alloc_size;
2164 	size_t			allocated = 0;
2165 	int			i, size_index, array_size;
2166 
2167 	NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_alloc_rx_buf_dma"));
2168 
2169 	rx_dmap = (p_nxge_dma_common_t)
2170 			KMEM_ZALLOC(sizeof (nxge_dma_common_t) * NXGE_DMA_BLOCK,
2171 			KM_SLEEP);
2172 
2173 	NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
2174 		" alloc_rx_buf_dma rdc %d asize %x bsize %x bbuf %llx ",
2175 		dma_channel, alloc_size, block_size, dmap));
2176 
2177 	total_alloc_size = alloc_size;
2178 
2179 #if defined(RX_USE_RECLAIM_POST)
2180 	total_alloc_size = alloc_size + alloc_size/4;
2181 #endif
2182 
2183 	i = 0;
2184 	size_index = 0;
2185 	array_size =  sizeof (alloc_sizes)/sizeof (size_t);
2186 	while ((alloc_sizes[size_index] < alloc_size) &&
2187 			(size_index < array_size))
2188 			size_index++;
2189 	if (size_index >= array_size) {
2190 		size_index = array_size - 1;
2191 	}
2192 
2193 	while ((allocated < total_alloc_size) &&
2194 			(size_index >= 0) && (i < NXGE_DMA_BLOCK)) {
2195 		rx_dmap[i].dma_chunk_index = i;
2196 		rx_dmap[i].block_size = block_size;
2197 		rx_dmap[i].alength = alloc_sizes[size_index];
2198 		rx_dmap[i].orig_alength = rx_dmap[i].alength;
2199 		rx_dmap[i].nblocks = alloc_sizes[size_index] / block_size;
2200 		rx_dmap[i].dma_channel = dma_channel;
2201 		rx_dmap[i].contig_alloc_type = B_FALSE;
2202 
2203 		/*
2204 		 * N2/NIU: data buffers must be contiguous as the driver
2205 		 *	   needs to call Hypervisor api to set up
2206 		 *	   logical pages.
2207 		 */
2208 		if ((nxgep->niu_type == N2_NIU) && (NXGE_DMA_BLOCK == 1)) {
2209 			rx_dmap[i].contig_alloc_type = B_TRUE;
2210 		}
2211 
2212 		NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
2213 			"alloc_rx_buf_dma rdc %d chunk %d bufp %llx size %x "
2214 			"i %d nblocks %d alength %d",
2215 			dma_channel, i, &rx_dmap[i], block_size,
2216 			i, rx_dmap[i].nblocks,
2217 			rx_dmap[i].alength));
2218 		status = nxge_dma_mem_alloc(nxgep, nxge_force_dma,
2219 			&nxge_rx_dma_attr,
2220 			rx_dmap[i].alength,
2221 			&nxge_dev_buf_dma_acc_attr,
2222 			DDI_DMA_READ | DDI_DMA_STREAMING,
2223 			(p_nxge_dma_common_t)(&rx_dmap[i]));
2224 		if (status != NXGE_OK) {
2225 			NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
2226 				" nxge_alloc_rx_buf_dma: Alloc Failed "));
2227 			size_index--;
2228 		} else {
2229 			NXGE_DEBUG_MSG((nxgep, DMA_CTL,
2230 				" alloc_rx_buf_dma allocated rdc %d "
2231 				"chunk %d size %x dvma %x bufp %llx ",
2232 				dma_channel, i, rx_dmap[i].alength,
2233 				rx_dmap[i].ioaddr_pp, &rx_dmap[i]));
2234 			i++;
2235 			allocated += alloc_sizes[size_index];
2236 		}
2237 	}
2238 
2239 
2240 	if (allocated < total_alloc_size) {
2241 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
2242 		    "==> nxge_alloc_rx_buf_dma: not enough for channe %d "
2243 		    "allocated 0x%x requested 0x%x",
2244 		    dma_channel,
2245 		    allocated, total_alloc_size));
2246 		status = NXGE_ERROR;
2247 		goto nxge_alloc_rx_mem_fail1;
2248 	}
2249 
2250 	NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
2251 	    "==> nxge_alloc_rx_buf_dma: Allocated for channe %d "
2252 	    "allocated 0x%x requested 0x%x",
2253 	    dma_channel,
2254 	    allocated, total_alloc_size));
2255 
2256 	NXGE_DEBUG_MSG((nxgep, DMA_CTL,
2257 		" alloc_rx_buf_dma rdc %d allocated %d chunks",
2258 		dma_channel, i));
2259 	*num_chunks = i;
2260 	*dmap = rx_dmap;
2261 
2262 	goto nxge_alloc_rx_mem_exit;
2263 
2264 nxge_alloc_rx_mem_fail1:
2265 	KMEM_FREE(rx_dmap, sizeof (nxge_dma_common_t) * NXGE_DMA_BLOCK);
2266 
2267 nxge_alloc_rx_mem_exit:
2268 	NXGE_DEBUG_MSG((nxgep, DMA_CTL,
2269 		"<== nxge_alloc_rx_buf_dma status 0x%08x", status));
2270 
2271 	return (status);
2272 }
2273 
2274 /*ARGSUSED*/
2275 static void
2276 nxge_free_rx_buf_dma(p_nxge_t nxgep, p_nxge_dma_common_t dmap,
2277     uint32_t num_chunks)
2278 {
2279 	int		i;
2280 
2281 	NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
2282 		"==> nxge_free_rx_buf_dma: # of chunks %d", num_chunks));
2283 
2284 	for (i = 0; i < num_chunks; i++) {
2285 		NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
2286 			"==> nxge_free_rx_buf_dma: chunk %d dmap 0x%llx",
2287 				i, dmap));
2288 		nxge_dma_mem_free(dmap++);
2289 	}
2290 
2291 	NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_free_rx_buf_dma"));
2292 }
2293 
2294 /*ARGSUSED*/
2295 static nxge_status_t
2296 nxge_alloc_rx_cntl_dma(p_nxge_t nxgep, uint16_t dma_channel,
2297     p_nxge_dma_common_t *dmap, size_t size)
2298 {
2299 	p_nxge_dma_common_t 	rx_dmap;
2300 	nxge_status_t		status = NXGE_OK;
2301 
2302 	NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_alloc_rx_cntl_dma"));
2303 
2304 	rx_dmap = (p_nxge_dma_common_t)
2305 			KMEM_ZALLOC(sizeof (nxge_dma_common_t), KM_SLEEP);
2306 
2307 	rx_dmap->contig_alloc_type = B_FALSE;
2308 
2309 	status = nxge_dma_mem_alloc(nxgep, nxge_force_dma,
2310 			&nxge_desc_dma_attr,
2311 			size,
2312 			&nxge_dev_desc_dma_acc_attr,
2313 			DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
2314 			rx_dmap);
2315 	if (status != NXGE_OK) {
2316 		goto nxge_alloc_rx_cntl_dma_fail1;
2317 	}
2318 
2319 	*dmap = rx_dmap;
2320 	goto nxge_alloc_rx_cntl_dma_exit;
2321 
2322 nxge_alloc_rx_cntl_dma_fail1:
2323 	KMEM_FREE(rx_dmap, sizeof (nxge_dma_common_t));
2324 
2325 nxge_alloc_rx_cntl_dma_exit:
2326 	NXGE_DEBUG_MSG((nxgep, DMA_CTL,
2327 		"<== nxge_alloc_rx_cntl_dma status 0x%08x", status));
2328 
2329 	return (status);
2330 }
2331 
2332 /*ARGSUSED*/
2333 static void
2334 nxge_free_rx_cntl_dma(p_nxge_t nxgep, p_nxge_dma_common_t dmap)
2335 {
2336 	NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_free_rx_cntl_dma"));
2337 
2338 	nxge_dma_mem_free(dmap);
2339 
2340 	NXGE_DEBUG_MSG((nxgep, DMA_CTL, "<== nxge_free_rx_cntl_dma"));
2341 }
2342 
2343 static nxge_status_t
2344 nxge_alloc_tx_mem_pool(p_nxge_t nxgep)
2345 {
2346 	nxge_status_t		status = NXGE_OK;
2347 	int			i, j;
2348 	uint32_t		ndmas, st_tdc;
2349 	p_nxge_dma_pt_cfg_t	p_all_cfgp;
2350 	p_nxge_hw_pt_cfg_t	p_cfgp;
2351 	p_nxge_dma_pool_t	dma_poolp;
2352 	p_nxge_dma_common_t	*dma_buf_p;
2353 	p_nxge_dma_pool_t	dma_cntl_poolp;
2354 	p_nxge_dma_common_t	*dma_cntl_p;
2355 	size_t			tx_buf_alloc_size;
2356 	size_t			tx_cntl_alloc_size;
2357 	uint32_t		*num_chunks; /* per dma */
2358 	uint32_t		bcopy_thresh;
2359 
2360 	NXGE_DEBUG_MSG((nxgep, MEM_CTL, "==> nxge_alloc_tx_mem_pool"));
2361 
2362 	p_all_cfgp = (p_nxge_dma_pt_cfg_t)&nxgep->pt_config;
2363 	p_cfgp = (p_nxge_hw_pt_cfg_t)&p_all_cfgp->hw_config;
2364 	st_tdc = p_cfgp->start_tdc;
2365 	ndmas = p_cfgp->max_tdcs;
2366 
2367 	NXGE_DEBUG_MSG((nxgep, MEM_CTL, "==> nxge_alloc_tx_mem_pool: "
2368 		"p_cfgp 0x%016llx start_tdc %d ndmas %d nxgep->max_tdcs %d",
2369 		p_cfgp, p_cfgp->start_tdc, p_cfgp->max_tdcs, nxgep->max_tdcs));
2370 	/*
2371 	 * Allocate memory for each transmit DMA channel.
2372 	 */
2373 	dma_poolp = (p_nxge_dma_pool_t)KMEM_ZALLOC(sizeof (nxge_dma_pool_t),
2374 			KM_SLEEP);
2375 	dma_buf_p = (p_nxge_dma_common_t *)KMEM_ZALLOC(
2376 			sizeof (p_nxge_dma_common_t) * ndmas, KM_SLEEP);
2377 
2378 	dma_cntl_poolp = (p_nxge_dma_pool_t)
2379 			KMEM_ZALLOC(sizeof (nxge_dma_pool_t), KM_SLEEP);
2380 	dma_cntl_p = (p_nxge_dma_common_t *)KMEM_ZALLOC(
2381 			sizeof (p_nxge_dma_common_t) * ndmas, KM_SLEEP);
2382 
2383 	if (nxge_tx_ring_size > TDC_DEFAULT_MAX) {
2384 		NXGE_DEBUG_MSG((nxgep, MEM_CTL,
2385 		    "nxge_alloc_tx_mem_pool: TDC too high %d, "
2386 		    "set to default %d",
2387 		    nxge_tx_ring_size, TDC_DEFAULT_MAX));
2388 		nxge_tx_ring_size = TDC_DEFAULT_MAX;
2389 	}
2390 
2391 #if	defined(sun4v) && defined(NIU_LP_WORKAROUND)
2392 	/*
2393 	 * N2/NIU has limitation on the descriptor sizes (contiguous
2394 	 * memory allocation on data buffers to 4M (contig_mem_alloc)
2395 	 * and little endian for control buffers (must use the ddi/dki mem alloc
2396 	 * function). The transmit ring is limited to 8K (includes the
2397 	 * mailbox).
2398 	 */
2399 	if (nxgep->niu_type == N2_NIU) {
2400 		if ((nxge_tx_ring_size > NXGE_NIU_CONTIG_TX_MAX) ||
2401 			(!ISP2(nxge_tx_ring_size))) {
2402 			nxge_tx_ring_size = NXGE_NIU_CONTIG_TX_MAX;
2403 		}
2404 	}
2405 #endif
2406 
2407 	nxgep->nxge_port_tx_ring_size = nxge_tx_ring_size;
2408 
2409 	/*
2410 	 * Assume that each DMA channel will be configured with default
2411 	 * transmit bufer size for copying transmit data.
2412 	 * (For packet payload over this limit, packets will not be
2413 	 *  copied.)
2414 	 */
2415 	if (nxgep->niu_type == N2_NIU) {
2416 		bcopy_thresh = TX_BCOPY_SIZE;
2417 	} else {
2418 		bcopy_thresh = nxge_bcopy_thresh;
2419 	}
2420 	tx_buf_alloc_size = (bcopy_thresh * nxge_tx_ring_size);
2421 
2422 	/*
2423 	 * Addresses of transmit descriptor ring and the
2424 	 * mailbox must be all cache-aligned (64 bytes).
2425 	 */
2426 	tx_cntl_alloc_size = nxge_tx_ring_size;
2427 	tx_cntl_alloc_size *= (sizeof (tx_desc_t));
2428 	tx_cntl_alloc_size += sizeof (txdma_mailbox_t);
2429 
2430 #if	defined(sun4v) && defined(NIU_LP_WORKAROUND)
2431 	if (nxgep->niu_type == N2_NIU) {
2432 		if (!ISP2(tx_buf_alloc_size)) {
2433 			NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
2434 				"==> nxge_alloc_tx_mem_pool: "
2435 				" must be power of 2"));
2436 			status |= (NXGE_ERROR | NXGE_DDI_FAILED);
2437 			goto nxge_alloc_tx_mem_pool_exit;
2438 		}
2439 
2440 		if (tx_buf_alloc_size > (1 << 22)) {
2441 			NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
2442 				"==> nxge_alloc_tx_mem_pool: "
2443 				" limit size to 4M"));
2444 			status |= (NXGE_ERROR | NXGE_DDI_FAILED);
2445 			goto nxge_alloc_tx_mem_pool_exit;
2446 		}
2447 
2448 		if (tx_cntl_alloc_size < 0x2000) {
2449 			tx_cntl_alloc_size = 0x2000;
2450 		}
2451 	}
2452 #endif
2453 
2454 	num_chunks = (uint32_t *)KMEM_ZALLOC(
2455 			sizeof (uint32_t) * ndmas, KM_SLEEP);
2456 
2457 	/*
2458 	 * Allocate memory for transmit buffers and descriptor rings.
2459 	 * Replace allocation functions with interface functions provided
2460 	 * by the partition manager when it is available.
2461 	 *
2462 	 * Allocate memory for the transmit buffer pool.
2463 	 */
2464 	for (i = 0; i < ndmas; i++) {
2465 		num_chunks[i] = 0;
2466 		status = nxge_alloc_tx_buf_dma(nxgep, st_tdc, &dma_buf_p[i],
2467 					tx_buf_alloc_size,
2468 					bcopy_thresh, &num_chunks[i]);
2469 		if (status != NXGE_OK) {
2470 			break;
2471 		}
2472 		st_tdc++;
2473 	}
2474 	if (i < ndmas) {
2475 		goto nxge_alloc_tx_mem_pool_fail1;
2476 	}
2477 
2478 	st_tdc = p_cfgp->start_tdc;
2479 	/*
2480 	 * Allocate memory for descriptor rings and mailbox.
2481 	 */
2482 	for (j = 0; j < ndmas; j++) {
2483 		status = nxge_alloc_tx_cntl_dma(nxgep, st_tdc, &dma_cntl_p[j],
2484 					tx_cntl_alloc_size);
2485 		if (status != NXGE_OK) {
2486 			break;
2487 		}
2488 		st_tdc++;
2489 	}
2490 	if (j < ndmas) {
2491 		goto nxge_alloc_tx_mem_pool_fail2;
2492 	}
2493 
2494 	dma_poolp->ndmas = ndmas;
2495 	dma_poolp->num_chunks = num_chunks;
2496 	dma_poolp->buf_allocated = B_TRUE;
2497 	dma_poolp->dma_buf_pool_p = dma_buf_p;
2498 	nxgep->tx_buf_pool_p = dma_poolp;
2499 
2500 	dma_cntl_poolp->ndmas = ndmas;
2501 	dma_cntl_poolp->buf_allocated = B_TRUE;
2502 	dma_cntl_poolp->dma_buf_pool_p = dma_cntl_p;
2503 	nxgep->tx_cntl_pool_p = dma_cntl_poolp;
2504 
2505 	NXGE_DEBUG_MSG((nxgep, MEM_CTL,
2506 		"==> nxge_alloc_tx_mem_pool: start_tdc %d "
2507 		"ndmas %d poolp->ndmas %d",
2508 		st_tdc, ndmas, dma_poolp->ndmas));
2509 
2510 	goto nxge_alloc_tx_mem_pool_exit;
2511 
2512 nxge_alloc_tx_mem_pool_fail2:
2513 	/* Free control buffers */
2514 	j--;
2515 	for (; j >= 0; j--) {
2516 		nxge_free_tx_cntl_dma(nxgep,
2517 			(p_nxge_dma_common_t)dma_cntl_p[j]);
2518 	}
2519 
2520 nxge_alloc_tx_mem_pool_fail1:
2521 	/* Free data buffers */
2522 	i--;
2523 	for (; i >= 0; i--) {
2524 		nxge_free_tx_buf_dma(nxgep, (p_nxge_dma_common_t)dma_buf_p[i],
2525 			num_chunks[i]);
2526 	}
2527 
2528 	KMEM_FREE(dma_poolp, sizeof (nxge_dma_pool_t));
2529 	KMEM_FREE(dma_buf_p, ndmas * sizeof (p_nxge_dma_common_t));
2530 	KMEM_FREE(dma_cntl_poolp, sizeof (nxge_dma_pool_t));
2531 	KMEM_FREE(dma_cntl_p, ndmas * sizeof (p_nxge_dma_common_t));
2532 	KMEM_FREE(num_chunks, sizeof (uint32_t) * ndmas);
2533 
2534 nxge_alloc_tx_mem_pool_exit:
2535 	NXGE_DEBUG_MSG((nxgep, MEM_CTL,
2536 		"<== nxge_alloc_tx_mem_pool:status 0x%08x", status));
2537 
2538 	return (status);
2539 }
2540 
2541 static nxge_status_t
2542 nxge_alloc_tx_buf_dma(p_nxge_t nxgep, uint16_t dma_channel,
2543     p_nxge_dma_common_t *dmap, size_t alloc_size,
2544     size_t block_size, uint32_t *num_chunks)
2545 {
2546 	p_nxge_dma_common_t 	tx_dmap;
2547 	nxge_status_t		status = NXGE_OK;
2548 	size_t			total_alloc_size;
2549 	size_t			allocated = 0;
2550 	int			i, size_index, array_size;
2551 
2552 	NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_alloc_tx_buf_dma"));
2553 
2554 	tx_dmap = (p_nxge_dma_common_t)
2555 		KMEM_ZALLOC(sizeof (nxge_dma_common_t) * NXGE_DMA_BLOCK,
2556 			KM_SLEEP);
2557 
2558 	total_alloc_size = alloc_size;
2559 	i = 0;
2560 	size_index = 0;
2561 	array_size =  sizeof (alloc_sizes) /  sizeof (size_t);
2562 	while ((alloc_sizes[size_index] < alloc_size) &&
2563 		(size_index < array_size))
2564 		size_index++;
2565 	if (size_index >= array_size) {
2566 		size_index = array_size - 1;
2567 	}
2568 
2569 	while ((allocated < total_alloc_size) &&
2570 			(size_index >= 0) && (i < NXGE_DMA_BLOCK)) {
2571 
2572 		tx_dmap[i].dma_chunk_index = i;
2573 		tx_dmap[i].block_size = block_size;
2574 		tx_dmap[i].alength = alloc_sizes[size_index];
2575 		tx_dmap[i].orig_alength = tx_dmap[i].alength;
2576 		tx_dmap[i].nblocks = alloc_sizes[size_index] / block_size;
2577 		tx_dmap[i].dma_channel = dma_channel;
2578 		tx_dmap[i].contig_alloc_type = B_FALSE;
2579 
2580 		/*
2581 		 * N2/NIU: data buffers must be contiguous as the driver
2582 		 *	   needs to call Hypervisor api to set up
2583 		 *	   logical pages.
2584 		 */
2585 		if ((nxgep->niu_type == N2_NIU) && (NXGE_DMA_BLOCK == 1)) {
2586 			tx_dmap[i].contig_alloc_type = B_TRUE;
2587 		}
2588 
2589 		status = nxge_dma_mem_alloc(nxgep, nxge_force_dma,
2590 			&nxge_tx_dma_attr,
2591 			tx_dmap[i].alength,
2592 			&nxge_dev_buf_dma_acc_attr,
2593 			DDI_DMA_WRITE | DDI_DMA_STREAMING,
2594 			(p_nxge_dma_common_t)(&tx_dmap[i]));
2595 		if (status != NXGE_OK) {
2596 			size_index--;
2597 		} else {
2598 			i++;
2599 			allocated += alloc_sizes[size_index];
2600 		}
2601 	}
2602 
2603 	if (allocated < total_alloc_size) {
2604 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
2605 		    "==> nxge_alloc_tx_buf_dma: not enough channel %d: "
2606 		    "allocated 0x%x requested 0x%x",
2607 		    dma_channel,
2608 		    allocated, total_alloc_size));
2609 		status = NXGE_ERROR;
2610 		goto nxge_alloc_tx_mem_fail1;
2611 	}
2612 
2613 	NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
2614 	    "==> nxge_alloc_tx_buf_dma: Allocated for channel %d: "
2615 	    "allocated 0x%x requested 0x%x",
2616 	    dma_channel,
2617 	    allocated, total_alloc_size));
2618 
2619 	*num_chunks = i;
2620 	*dmap = tx_dmap;
2621 	NXGE_DEBUG_MSG((nxgep, DMA_CTL,
2622 		"==> nxge_alloc_tx_buf_dma dmap 0x%016llx num chunks %d",
2623 		*dmap, i));
2624 	goto nxge_alloc_tx_mem_exit;
2625 
2626 nxge_alloc_tx_mem_fail1:
2627 	KMEM_FREE(tx_dmap, sizeof (nxge_dma_common_t) * NXGE_DMA_BLOCK);
2628 
2629 nxge_alloc_tx_mem_exit:
2630 	NXGE_DEBUG_MSG((nxgep, DMA_CTL,
2631 		"<== nxge_alloc_tx_buf_dma status 0x%08x", status));
2632 
2633 	return (status);
2634 }
2635 
2636 /*ARGSUSED*/
2637 static void
2638 nxge_free_tx_buf_dma(p_nxge_t nxgep, p_nxge_dma_common_t dmap,
2639     uint32_t num_chunks)
2640 {
2641 	int		i;
2642 
2643 	NXGE_DEBUG_MSG((nxgep, MEM_CTL, "==> nxge_free_tx_buf_dma"));
2644 
2645 	for (i = 0; i < num_chunks; i++) {
2646 		nxge_dma_mem_free(dmap++);
2647 	}
2648 
2649 	NXGE_DEBUG_MSG((nxgep, MEM_CTL, "<== nxge_free_tx_buf_dma"));
2650 }
2651 
2652 /*ARGSUSED*/
2653 static nxge_status_t
2654 nxge_alloc_tx_cntl_dma(p_nxge_t nxgep, uint16_t dma_channel,
2655     p_nxge_dma_common_t *dmap, size_t size)
2656 {
2657 	p_nxge_dma_common_t 	tx_dmap;
2658 	nxge_status_t		status = NXGE_OK;
2659 
2660 	NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_alloc_tx_cntl_dma"));
2661 	tx_dmap = (p_nxge_dma_common_t)
2662 			KMEM_ZALLOC(sizeof (nxge_dma_common_t), KM_SLEEP);
2663 
2664 	tx_dmap->contig_alloc_type = B_FALSE;
2665 
2666 	status = nxge_dma_mem_alloc(nxgep, nxge_force_dma,
2667 			&nxge_desc_dma_attr,
2668 			size,
2669 			&nxge_dev_desc_dma_acc_attr,
2670 			DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
2671 			tx_dmap);
2672 	if (status != NXGE_OK) {
2673 		goto nxge_alloc_tx_cntl_dma_fail1;
2674 	}
2675 
2676 	*dmap = tx_dmap;
2677 	goto nxge_alloc_tx_cntl_dma_exit;
2678 
2679 nxge_alloc_tx_cntl_dma_fail1:
2680 	KMEM_FREE(tx_dmap, sizeof (nxge_dma_common_t));
2681 
2682 nxge_alloc_tx_cntl_dma_exit:
2683 	NXGE_DEBUG_MSG((nxgep, DMA_CTL,
2684 		"<== nxge_alloc_tx_cntl_dma status 0x%08x", status));
2685 
2686 	return (status);
2687 }
2688 
2689 /*ARGSUSED*/
2690 static void
2691 nxge_free_tx_cntl_dma(p_nxge_t nxgep, p_nxge_dma_common_t dmap)
2692 {
2693 	NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_free_tx_cntl_dma"));
2694 
2695 	nxge_dma_mem_free(dmap);
2696 
2697 	NXGE_DEBUG_MSG((nxgep, DMA_CTL, "<== nxge_free_tx_cntl_dma"));
2698 }
2699 
2700 static void
2701 nxge_free_tx_mem_pool(p_nxge_t nxgep)
2702 {
2703 	uint32_t		i, ndmas;
2704 	p_nxge_dma_pool_t	dma_poolp;
2705 	p_nxge_dma_common_t	*dma_buf_p;
2706 	p_nxge_dma_pool_t	dma_cntl_poolp;
2707 	p_nxge_dma_common_t	*dma_cntl_p;
2708 	uint32_t 		*num_chunks;
2709 
2710 	NXGE_DEBUG_MSG((nxgep, MEM3_CTL, "==> nxge_free_tx_mem_pool"));
2711 
2712 	dma_poolp = nxgep->tx_buf_pool_p;
2713 	if (dma_poolp == NULL || (!dma_poolp->buf_allocated)) {
2714 		NXGE_DEBUG_MSG((nxgep, MEM3_CTL,
2715 			"<== nxge_free_tx_mem_pool "
2716 			"(null rx buf pool or buf not allocated"));
2717 		return;
2718 	}
2719 
2720 	dma_cntl_poolp = nxgep->tx_cntl_pool_p;
2721 	if (dma_cntl_poolp == NULL || (!dma_cntl_poolp->buf_allocated)) {
2722 		NXGE_DEBUG_MSG((nxgep, MEM3_CTL,
2723 			"<== nxge_free_tx_mem_pool "
2724 			"(null tx cntl buf pool or cntl buf not allocated"));
2725 		return;
2726 	}
2727 
2728 	dma_buf_p = dma_poolp->dma_buf_pool_p;
2729 	num_chunks = dma_poolp->num_chunks;
2730 
2731 	dma_cntl_p = dma_cntl_poolp->dma_buf_pool_p;
2732 	ndmas = dma_cntl_poolp->ndmas;
2733 
2734 	for (i = 0; i < ndmas; i++) {
2735 		nxge_free_tx_buf_dma(nxgep, dma_buf_p[i], num_chunks[i]);
2736 	}
2737 
2738 	for (i = 0; i < ndmas; i++) {
2739 		nxge_free_tx_cntl_dma(nxgep, dma_cntl_p[i]);
2740 	}
2741 
2742 	for (i = 0; i < ndmas; i++) {
2743 		KMEM_FREE(dma_buf_p[i],
2744 			sizeof (nxge_dma_common_t) * NXGE_DMA_BLOCK);
2745 		KMEM_FREE(dma_cntl_p[i], sizeof (nxge_dma_common_t));
2746 	}
2747 
2748 	KMEM_FREE(num_chunks, sizeof (uint32_t) * ndmas);
2749 	KMEM_FREE(dma_cntl_p, ndmas * sizeof (p_nxge_dma_common_t));
2750 	KMEM_FREE(dma_cntl_poolp, sizeof (nxge_dma_pool_t));
2751 	KMEM_FREE(dma_buf_p, ndmas * sizeof (p_nxge_dma_common_t));
2752 	KMEM_FREE(dma_poolp, sizeof (nxge_dma_pool_t));
2753 
2754 	nxgep->tx_buf_pool_p = NULL;
2755 	nxgep->tx_cntl_pool_p = NULL;
2756 
2757 	NXGE_DEBUG_MSG((nxgep, MEM3_CTL, "<== nxge_free_tx_mem_pool"));
2758 }
2759 
2760 /*ARGSUSED*/
2761 static nxge_status_t
2762 nxge_dma_mem_alloc(p_nxge_t nxgep, dma_method_t method,
2763 	struct ddi_dma_attr *dma_attrp,
2764 	size_t length, ddi_device_acc_attr_t *acc_attr_p, uint_t xfer_flags,
2765 	p_nxge_dma_common_t dma_p)
2766 {
2767 	caddr_t 		kaddrp;
2768 	int			ddi_status = DDI_SUCCESS;
2769 	boolean_t		contig_alloc_type;
2770 
2771 	contig_alloc_type = dma_p->contig_alloc_type;
2772 
2773 	if (contig_alloc_type && (nxgep->niu_type != N2_NIU)) {
2774 		/*
2775 		 * contig_alloc_type for contiguous memory only allowed
2776 		 * for N2/NIU.
2777 		 */
2778 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
2779 			"nxge_dma_mem_alloc: alloc type not allows (%d)",
2780 			dma_p->contig_alloc_type));
2781 		return (NXGE_ERROR | NXGE_DDI_FAILED);
2782 	}
2783 
2784 	dma_p->dma_handle = NULL;
2785 	dma_p->acc_handle = NULL;
2786 	dma_p->kaddrp = dma_p->last_kaddrp = NULL;
2787 	dma_p->first_ioaddr_pp = dma_p->last_ioaddr_pp = NULL;
2788 	ddi_status = ddi_dma_alloc_handle(nxgep->dip, dma_attrp,
2789 		DDI_DMA_DONTWAIT, NULL, &dma_p->dma_handle);
2790 	if (ddi_status != DDI_SUCCESS) {
2791 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
2792 			"nxge_dma_mem_alloc:ddi_dma_alloc_handle failed."));
2793 		return (NXGE_ERROR | NXGE_DDI_FAILED);
2794 	}
2795 
2796 	switch (contig_alloc_type) {
2797 	case B_FALSE:
2798 		ddi_status = ddi_dma_mem_alloc(dma_p->dma_handle, length,
2799 			acc_attr_p,
2800 			xfer_flags,
2801 			DDI_DMA_DONTWAIT, 0, &kaddrp, &dma_p->alength,
2802 			&dma_p->acc_handle);
2803 		if (ddi_status != DDI_SUCCESS) {
2804 			NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
2805 				"nxge_dma_mem_alloc:ddi_dma_mem_alloc failed"));
2806 			ddi_dma_free_handle(&dma_p->dma_handle);
2807 			dma_p->dma_handle = NULL;
2808 			return (NXGE_ERROR | NXGE_DDI_FAILED);
2809 		}
2810 		if (dma_p->alength < length) {
2811 			NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
2812 				"nxge_dma_mem_alloc:ddi_dma_mem_alloc "
2813 				"< length."));
2814 			ddi_dma_mem_free(&dma_p->acc_handle);
2815 			ddi_dma_free_handle(&dma_p->dma_handle);
2816 			dma_p->acc_handle = NULL;
2817 			dma_p->dma_handle = NULL;
2818 			return (NXGE_ERROR);
2819 		}
2820 
2821 		ddi_status = ddi_dma_addr_bind_handle(dma_p->dma_handle, NULL,
2822 			kaddrp, dma_p->alength, xfer_flags, DDI_DMA_DONTWAIT, 0,
2823 			&dma_p->dma_cookie, &dma_p->ncookies);
2824 		if (ddi_status != DDI_DMA_MAPPED) {
2825 			NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
2826 				"nxge_dma_mem_alloc:di_dma_addr_bind failed "
2827 				"(staus 0x%x ncookies %d.)", ddi_status,
2828 				dma_p->ncookies));
2829 			if (dma_p->acc_handle) {
2830 				ddi_dma_mem_free(&dma_p->acc_handle);
2831 				dma_p->acc_handle = NULL;
2832 			}
2833 			ddi_dma_free_handle(&dma_p->dma_handle);
2834 			dma_p->dma_handle = NULL;
2835 			return (NXGE_ERROR | NXGE_DDI_FAILED);
2836 		}
2837 
2838 		if (dma_p->ncookies != 1) {
2839 			NXGE_DEBUG_MSG((nxgep, DMA_CTL,
2840 				"nxge_dma_mem_alloc:ddi_dma_addr_bind "
2841 				"> 1 cookie"
2842 				"(staus 0x%x ncookies %d.)", ddi_status,
2843 				dma_p->ncookies));
2844 			if (dma_p->acc_handle) {
2845 				ddi_dma_mem_free(&dma_p->acc_handle);
2846 				dma_p->acc_handle = NULL;
2847 			}
2848 			(void) ddi_dma_unbind_handle(dma_p->dma_handle);
2849 			ddi_dma_free_handle(&dma_p->dma_handle);
2850 			dma_p->dma_handle = NULL;
2851 			return (NXGE_ERROR);
2852 		}
2853 		break;
2854 
2855 #if	defined(sun4v) && defined(NIU_LP_WORKAROUND)
2856 	case B_TRUE:
2857 		kaddrp = (caddr_t)contig_mem_alloc(length);
2858 		if (kaddrp == NULL) {
2859 			NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
2860 				"nxge_dma_mem_alloc:contig_mem_alloc failed."));
2861 			ddi_dma_free_handle(&dma_p->dma_handle);
2862 			return (NXGE_ERROR | NXGE_DDI_FAILED);
2863 		}
2864 
2865 		dma_p->alength = length;
2866 		ddi_status = ddi_dma_addr_bind_handle(dma_p->dma_handle, NULL,
2867 			kaddrp, dma_p->alength, xfer_flags, DDI_DMA_DONTWAIT, 0,
2868 			&dma_p->dma_cookie, &dma_p->ncookies);
2869 		if (ddi_status != DDI_DMA_MAPPED) {
2870 			NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
2871 				"nxge_dma_mem_alloc:di_dma_addr_bind failed "
2872 				"(status 0x%x ncookies %d.)", ddi_status,
2873 				dma_p->ncookies));
2874 
2875 			NXGE_DEBUG_MSG((nxgep, DMA_CTL,
2876 				"==> nxge_dma_mem_alloc: (not mapped)"
2877 				"length %lu (0x%x) "
2878 				"free contig kaddrp $%p "
2879 				"va_to_pa $%p",
2880 				length, length,
2881 				kaddrp,
2882 				va_to_pa(kaddrp)));
2883 
2884 
2885 			contig_mem_free((void *)kaddrp, length);
2886 			ddi_dma_free_handle(&dma_p->dma_handle);
2887 
2888 			dma_p->dma_handle = NULL;
2889 			dma_p->acc_handle = NULL;
2890 			dma_p->alength = NULL;
2891 			dma_p->kaddrp = NULL;
2892 
2893 			return (NXGE_ERROR | NXGE_DDI_FAILED);
2894 		}
2895 
2896 		if (dma_p->ncookies != 1 ||
2897 			(dma_p->dma_cookie.dmac_laddress == NULL)) {
2898 			NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
2899 				"nxge_dma_mem_alloc:di_dma_addr_bind > 1 "
2900 				"cookie or "
2901 				"dmac_laddress is NULL $%p size %d "
2902 				" (status 0x%x ncookies %d.)",
2903 				ddi_status,
2904 				dma_p->dma_cookie.dmac_laddress,
2905 				dma_p->dma_cookie.dmac_size,
2906 				dma_p->ncookies));
2907 
2908 			contig_mem_free((void *)kaddrp, length);
2909 			(void) ddi_dma_unbind_handle(dma_p->dma_handle);
2910 			ddi_dma_free_handle(&dma_p->dma_handle);
2911 
2912 			dma_p->alength = 0;
2913 			dma_p->dma_handle = NULL;
2914 			dma_p->acc_handle = NULL;
2915 			dma_p->kaddrp = NULL;
2916 
2917 			return (NXGE_ERROR | NXGE_DDI_FAILED);
2918 		}
2919 		break;
2920 
2921 #else
2922 	case B_TRUE:
2923 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
2924 			"nxge_dma_mem_alloc: invalid alloc type for !sun4v"));
2925 		return (NXGE_ERROR | NXGE_DDI_FAILED);
2926 #endif
2927 	}
2928 
2929 	dma_p->kaddrp = kaddrp;
2930 	dma_p->last_kaddrp = (unsigned char *)kaddrp +
2931 			dma_p->alength - RXBUF_64B_ALIGNED;
2932 #if defined(__i386)
2933 	dma_p->ioaddr_pp =
2934 		(unsigned char *)(uint32_t)dma_p->dma_cookie.dmac_laddress;
2935 #else
2936 	dma_p->ioaddr_pp = (unsigned char *)dma_p->dma_cookie.dmac_laddress;
2937 #endif
2938 	dma_p->last_ioaddr_pp =
2939 #if defined(__i386)
2940 		(unsigned char *)(uint32_t)dma_p->dma_cookie.dmac_laddress +
2941 #else
2942 		(unsigned char *)dma_p->dma_cookie.dmac_laddress +
2943 #endif
2944 				dma_p->alength - RXBUF_64B_ALIGNED;
2945 
2946 	NPI_DMA_ACC_HANDLE_SET(dma_p, dma_p->acc_handle);
2947 
2948 #if	defined(sun4v) && defined(NIU_LP_WORKAROUND)
2949 	dma_p->orig_ioaddr_pp =
2950 		(unsigned char *)dma_p->dma_cookie.dmac_laddress;
2951 	dma_p->orig_alength = length;
2952 	dma_p->orig_kaddrp = kaddrp;
2953 	dma_p->orig_vatopa = (uint64_t)va_to_pa(kaddrp);
2954 #endif
2955 
2956 	NXGE_DEBUG_MSG((nxgep, DMA_CTL, "<== nxge_dma_mem_alloc: "
2957 		"dma buffer allocated: dma_p $%p "
2958 		"return dmac_ladress from cookie $%p cookie dmac_size %d "
2959 		"dma_p->ioaddr_p $%p "
2960 		"dma_p->orig_ioaddr_p $%p "
2961 		"orig_vatopa $%p "
2962 		"alength %d (0x%x) "
2963 		"kaddrp $%p "
2964 		"length %d (0x%x)",
2965 		dma_p,
2966 		dma_p->dma_cookie.dmac_laddress, dma_p->dma_cookie.dmac_size,
2967 		dma_p->ioaddr_pp,
2968 		dma_p->orig_ioaddr_pp,
2969 		dma_p->orig_vatopa,
2970 		dma_p->alength, dma_p->alength,
2971 		kaddrp,
2972 		length, length));
2973 
2974 	return (NXGE_OK);
2975 }
2976 
2977 static void
2978 nxge_dma_mem_free(p_nxge_dma_common_t dma_p)
2979 {
2980 	if (dma_p->dma_handle != NULL) {
2981 		if (dma_p->ncookies) {
2982 			(void) ddi_dma_unbind_handle(dma_p->dma_handle);
2983 			dma_p->ncookies = 0;
2984 		}
2985 		ddi_dma_free_handle(&dma_p->dma_handle);
2986 		dma_p->dma_handle = NULL;
2987 	}
2988 
2989 	if (dma_p->acc_handle != NULL) {
2990 		ddi_dma_mem_free(&dma_p->acc_handle);
2991 		dma_p->acc_handle = NULL;
2992 		NPI_DMA_ACC_HANDLE_SET(dma_p, NULL);
2993 	}
2994 
2995 #if	defined(sun4v) && defined(NIU_LP_WORKAROUND)
2996 	if (dma_p->contig_alloc_type &&
2997 			dma_p->orig_kaddrp && dma_p->orig_alength) {
2998 		NXGE_DEBUG_MSG((NULL, DMA_CTL, "nxge_dma_mem_free: "
2999 			"kaddrp $%p (orig_kaddrp $%p)"
3000 			"mem type %d ",
3001 			"orig_alength %d "
3002 			"alength 0x%x (%d)",
3003 			dma_p->kaddrp,
3004 			dma_p->orig_kaddrp,
3005 			dma_p->contig_alloc_type,
3006 			dma_p->orig_alength,
3007 			dma_p->alength, dma_p->alength));
3008 
3009 		contig_mem_free(dma_p->orig_kaddrp, dma_p->orig_alength);
3010 		dma_p->orig_alength = NULL;
3011 		dma_p->orig_kaddrp = NULL;
3012 		dma_p->contig_alloc_type = B_FALSE;
3013 	}
3014 #endif
3015 	dma_p->kaddrp = NULL;
3016 	dma_p->alength = NULL;
3017 }
3018 
3019 /*
3020  *	nxge_m_start() -- start transmitting and receiving.
3021  *
3022  *	This function is called by the MAC layer when the first
3023  *	stream is open to prepare the hardware ready for sending
3024  *	and transmitting packets.
3025  */
3026 static int
3027 nxge_m_start(void *arg)
3028 {
3029 	p_nxge_t 	nxgep = (p_nxge_t)arg;
3030 
3031 	NXGE_DEBUG_MSG((nxgep, NXGE_CTL, "==> nxge_m_start"));
3032 
3033 	MUTEX_ENTER(nxgep->genlock);
3034 	if (nxge_init(nxgep) != NXGE_OK) {
3035 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
3036 			"<== nxge_m_start: initialization failed"));
3037 		MUTEX_EXIT(nxgep->genlock);
3038 		return (EIO);
3039 	}
3040 
3041 	if (nxgep->nxge_mac_state == NXGE_MAC_STARTED)
3042 		goto nxge_m_start_exit;
3043 	/*
3044 	 * Start timer to check the system error and tx hangs
3045 	 */
3046 	nxgep->nxge_timerid = nxge_start_timer(nxgep, nxge_check_hw_state,
3047 		NXGE_CHECK_TIMER);
3048 
3049 	nxgep->link_notify = B_TRUE;
3050 
3051 	nxgep->nxge_mac_state = NXGE_MAC_STARTED;
3052 
3053 nxge_m_start_exit:
3054 	MUTEX_EXIT(nxgep->genlock);
3055 	NXGE_DEBUG_MSG((nxgep, NXGE_CTL, "<== nxge_m_start"));
3056 	return (0);
3057 }
3058 
3059 /*
3060  *	nxge_m_stop(): stop transmitting and receiving.
3061  */
3062 static void
3063 nxge_m_stop(void *arg)
3064 {
3065 	p_nxge_t 	nxgep = (p_nxge_t)arg;
3066 
3067 	NXGE_DEBUG_MSG((nxgep, NXGE_CTL, "==> nxge_m_stop"));
3068 
3069 	if (nxgep->nxge_timerid) {
3070 		nxge_stop_timer(nxgep, nxgep->nxge_timerid);
3071 		nxgep->nxge_timerid = 0;
3072 	}
3073 
3074 	MUTEX_ENTER(nxgep->genlock);
3075 	nxge_uninit(nxgep);
3076 
3077 	nxgep->nxge_mac_state = NXGE_MAC_STOPPED;
3078 
3079 	MUTEX_EXIT(nxgep->genlock);
3080 
3081 	NXGE_DEBUG_MSG((nxgep, NXGE_CTL, "<== nxge_m_stop"));
3082 }
3083 
3084 static int
3085 nxge_m_unicst(void *arg, const uint8_t *macaddr)
3086 {
3087 	p_nxge_t 	nxgep = (p_nxge_t)arg;
3088 	struct 		ether_addr addrp;
3089 
3090 	NXGE_DEBUG_MSG((nxgep, MAC_CTL, "==> nxge_m_unicst"));
3091 
3092 	bcopy(macaddr, (uint8_t *)&addrp, ETHERADDRL);
3093 	if (nxge_set_mac_addr(nxgep, &addrp)) {
3094 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
3095 			"<== nxge_m_unicst: set unitcast failed"));
3096 		return (EINVAL);
3097 	}
3098 
3099 	NXGE_DEBUG_MSG((nxgep, MAC_CTL, "<== nxge_m_unicst"));
3100 
3101 	return (0);
3102 }
3103 
3104 static int
3105 nxge_m_multicst(void *arg, boolean_t add, const uint8_t *mca)
3106 {
3107 	p_nxge_t 	nxgep = (p_nxge_t)arg;
3108 	struct 		ether_addr addrp;
3109 
3110 	NXGE_DEBUG_MSG((nxgep, MAC_CTL,
3111 		"==> nxge_m_multicst: add %d", add));
3112 
3113 	bcopy(mca, (uint8_t *)&addrp, ETHERADDRL);
3114 	if (add) {
3115 		if (nxge_add_mcast_addr(nxgep, &addrp)) {
3116 			NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
3117 				"<== nxge_m_multicst: add multicast failed"));
3118 			return (EINVAL);
3119 		}
3120 	} else {
3121 		if (nxge_del_mcast_addr(nxgep, &addrp)) {
3122 			NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
3123 				"<== nxge_m_multicst: del multicast failed"));
3124 			return (EINVAL);
3125 		}
3126 	}
3127 
3128 	NXGE_DEBUG_MSG((nxgep, MAC_CTL, "<== nxge_m_multicst"));
3129 
3130 	return (0);
3131 }
3132 
3133 static int
3134 nxge_m_promisc(void *arg, boolean_t on)
3135 {
3136 	p_nxge_t 	nxgep = (p_nxge_t)arg;
3137 
3138 	NXGE_DEBUG_MSG((nxgep, MAC_CTL,
3139 		"==> nxge_m_promisc: on %d", on));
3140 
3141 	if (nxge_set_promisc(nxgep, on)) {
3142 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
3143 			"<== nxge_m_promisc: set promisc failed"));
3144 		return (EINVAL);
3145 	}
3146 
3147 	NXGE_DEBUG_MSG((nxgep, MAC_CTL,
3148 		"<== nxge_m_promisc: on %d", on));
3149 
3150 	return (0);
3151 }
3152 
3153 static void
3154 nxge_m_ioctl(void *arg,  queue_t *wq, mblk_t *mp)
3155 {
3156 	p_nxge_t 	nxgep = (p_nxge_t)arg;
3157 	struct 		iocblk *iocp;
3158 	boolean_t 	need_privilege;
3159 	int 		err;
3160 	int 		cmd;
3161 
3162 	NXGE_DEBUG_MSG((nxgep, NXGE_CTL, "==> nxge_m_ioctl"));
3163 
3164 	iocp = (struct iocblk *)mp->b_rptr;
3165 	iocp->ioc_error = 0;
3166 	need_privilege = B_TRUE;
3167 	cmd = iocp->ioc_cmd;
3168 	NXGE_DEBUG_MSG((nxgep, NXGE_CTL, "==> nxge_m_ioctl: cmd 0x%08x", cmd));
3169 	switch (cmd) {
3170 	default:
3171 		miocnak(wq, mp, 0, EINVAL);
3172 		NXGE_DEBUG_MSG((nxgep, NXGE_CTL, "<== nxge_m_ioctl: invalid"));
3173 		return;
3174 
3175 	case LB_GET_INFO_SIZE:
3176 	case LB_GET_INFO:
3177 	case LB_GET_MODE:
3178 		need_privilege = B_FALSE;
3179 		break;
3180 	case LB_SET_MODE:
3181 		break;
3182 
3183 	case ND_GET:
3184 		need_privilege = B_FALSE;
3185 		break;
3186 	case ND_SET:
3187 		break;
3188 
3189 	case NXGE_GET_MII:
3190 	case NXGE_PUT_MII:
3191 	case NXGE_GET64:
3192 	case NXGE_PUT64:
3193 	case NXGE_GET_TX_RING_SZ:
3194 	case NXGE_GET_TX_DESC:
3195 	case NXGE_TX_SIDE_RESET:
3196 	case NXGE_RX_SIDE_RESET:
3197 	case NXGE_GLOBAL_RESET:
3198 	case NXGE_RESET_MAC:
3199 	case NXGE_TX_REGS_DUMP:
3200 	case NXGE_RX_REGS_DUMP:
3201 	case NXGE_INT_REGS_DUMP:
3202 	case NXGE_VIR_INT_REGS_DUMP:
3203 	case NXGE_PUT_TCAM:
3204 	case NXGE_GET_TCAM:
3205 	case NXGE_RTRACE:
3206 	case NXGE_RDUMP:
3207 
3208 		need_privilege = B_FALSE;
3209 		break;
3210 	case NXGE_INJECT_ERR:
3211 		cmn_err(CE_NOTE, "!nxge_m_ioctl: Inject error\n");
3212 		nxge_err_inject(nxgep, wq, mp);
3213 		break;
3214 	}
3215 
3216 	if (need_privilege) {
3217 		err = secpolicy_net_config(iocp->ioc_cr, B_FALSE);
3218 		if (err != 0) {
3219 			miocnak(wq, mp, 0, err);
3220 			NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
3221 				"<== nxge_m_ioctl: no priv"));
3222 			return;
3223 		}
3224 	}
3225 
3226 	switch (cmd) {
3227 	case ND_GET:
3228 		NXGE_DEBUG_MSG((nxgep, NXGE_CTL, "ND_GET command"));
3229 	case ND_SET:
3230 		NXGE_DEBUG_MSG((nxgep, NXGE_CTL, "ND_SET command"));
3231 		nxge_param_ioctl(nxgep, wq, mp, iocp);
3232 		break;
3233 
3234 	case LB_GET_MODE:
3235 	case LB_SET_MODE:
3236 	case LB_GET_INFO_SIZE:
3237 	case LB_GET_INFO:
3238 		nxge_loopback_ioctl(nxgep, wq, mp, iocp);
3239 		break;
3240 
3241 	case NXGE_GET_MII:
3242 	case NXGE_PUT_MII:
3243 	case NXGE_PUT_TCAM:
3244 	case NXGE_GET_TCAM:
3245 	case NXGE_GET64:
3246 	case NXGE_PUT64:
3247 	case NXGE_GET_TX_RING_SZ:
3248 	case NXGE_GET_TX_DESC:
3249 	case NXGE_TX_SIDE_RESET:
3250 	case NXGE_RX_SIDE_RESET:
3251 	case NXGE_GLOBAL_RESET:
3252 	case NXGE_RESET_MAC:
3253 	case NXGE_TX_REGS_DUMP:
3254 	case NXGE_RX_REGS_DUMP:
3255 	case NXGE_INT_REGS_DUMP:
3256 	case NXGE_VIR_INT_REGS_DUMP:
3257 		NXGE_DEBUG_MSG((nxgep, NXGE_CTL,
3258 			"==> nxge_m_ioctl: cmd 0x%x", cmd));
3259 		nxge_hw_ioctl(nxgep, wq, mp, iocp);
3260 		break;
3261 	}
3262 
3263 	NXGE_DEBUG_MSG((nxgep, NXGE_CTL, "<== nxge_m_ioctl"));
3264 }
3265 
3266 extern void nxge_rx_hw_blank(void *arg, time_t ticks, uint_t count);
3267 
3268 static void
3269 nxge_m_resources(void *arg)
3270 {
3271 	p_nxge_t		nxgep = arg;
3272 	mac_rx_fifo_t 		mrf;
3273 	p_rx_rcr_rings_t	rcr_rings;
3274 	p_rx_rcr_ring_t		*rcr_p;
3275 	uint32_t		i, ndmas;
3276 	nxge_status_t		status;
3277 
3278 	NXGE_DEBUG_MSG((nxgep, RX_CTL, "==> nxge_m_resources"));
3279 
3280 	MUTEX_ENTER(nxgep->genlock);
3281 
3282 	/*
3283 	 * CR 6492541 Check to see if the drv_state has been initialized,
3284 	 * if not * call nxge_init().
3285 	 */
3286 	if (!(nxgep->drv_state & STATE_HW_INITIALIZED)) {
3287 		status = nxge_init(nxgep);
3288 		if (status != NXGE_OK)
3289 			goto nxge_m_resources_exit;
3290 	}
3291 
3292 	mrf.mrf_type = MAC_RX_FIFO;
3293 	mrf.mrf_blank = nxge_rx_hw_blank;
3294 	mrf.mrf_arg = (void *)nxgep;
3295 
3296 	mrf.mrf_normal_blank_time = 128;
3297 	mrf.mrf_normal_pkt_count = 8;
3298 	rcr_rings = nxgep->rx_rcr_rings;
3299 	rcr_p = rcr_rings->rcr_rings;
3300 	ndmas = rcr_rings->ndmas;
3301 
3302 	/*
3303 	 * Export our receive resources to the MAC layer.
3304 	 */
3305 	for (i = 0; i < ndmas; i++) {
3306 		((p_rx_rcr_ring_t)rcr_p[i])->rcr_mac_handle =
3307 				mac_resource_add(nxgep->mach,
3308 				    (mac_resource_t *)&mrf);
3309 
3310 		NXGE_DEBUG_MSG((nxgep, NXGE_CTL,
3311 			"==> nxge_m_resources: vdma %d dma %d "
3312 			"rcrptr 0x%016llx mac_handle 0x%016llx",
3313 			i, ((p_rx_rcr_ring_t)rcr_p[i])->rdc,
3314 			rcr_p[i],
3315 			((p_rx_rcr_ring_t)rcr_p[i])->rcr_mac_handle));
3316 	}
3317 
3318 nxge_m_resources_exit:
3319 	MUTEX_EXIT(nxgep->genlock);
3320 	NXGE_DEBUG_MSG((nxgep, RX_CTL, "<== nxge_m_resources"));
3321 }
3322 
3323 static void
3324 nxge_mmac_kstat_update(p_nxge_t nxgep, mac_addr_slot_t slot, boolean_t factory)
3325 {
3326 	p_nxge_mmac_stats_t mmac_stats;
3327 	int i;
3328 	nxge_mmac_t *mmac_info;
3329 
3330 	mmac_info = &nxgep->nxge_mmac_info;
3331 
3332 	mmac_stats = &nxgep->statsp->mmac_stats;
3333 	mmac_stats->mmac_max_cnt = mmac_info->num_mmac;
3334 	mmac_stats->mmac_avail_cnt = mmac_info->naddrfree;
3335 
3336 	for (i = 0; i < ETHERADDRL; i++) {
3337 		if (factory) {
3338 			mmac_stats->mmac_avail_pool[slot-1].ether_addr_octet[i]
3339 			= mmac_info->factory_mac_pool[slot][(ETHERADDRL-1) - i];
3340 		} else {
3341 			mmac_stats->mmac_avail_pool[slot-1].ether_addr_octet[i]
3342 			= mmac_info->mac_pool[slot].addr[(ETHERADDRL - 1) - i];
3343 		}
3344 	}
3345 }
3346 
3347 /*
3348  * nxge_altmac_set() -- Set an alternate MAC address
3349  */
3350 static int
3351 nxge_altmac_set(p_nxge_t nxgep, uint8_t *maddr, mac_addr_slot_t slot)
3352 {
3353 	uint8_t addrn;
3354 	uint8_t portn;
3355 	npi_mac_addr_t altmac;
3356 	hostinfo_t mac_rdc;
3357 	p_nxge_class_pt_cfg_t clscfgp;
3358 
3359 	altmac.w2 = ((uint16_t)maddr[0] << 8) | ((uint16_t)maddr[1] & 0x0ff);
3360 	altmac.w1 = ((uint16_t)maddr[2] << 8) | ((uint16_t)maddr[3] & 0x0ff);
3361 	altmac.w0 = ((uint16_t)maddr[4] << 8) | ((uint16_t)maddr[5] & 0x0ff);
3362 
3363 	portn = nxgep->mac.portnum;
3364 	addrn = (uint8_t)slot - 1;
3365 
3366 	if (npi_mac_altaddr_entry(nxgep->npi_handle, OP_SET, portn,
3367 		addrn, &altmac) != NPI_SUCCESS)
3368 		return (EIO);
3369 
3370 	/*
3371 	 * Set the rdc table number for the host info entry
3372 	 * for this mac address slot.
3373 	 */
3374 	clscfgp = (p_nxge_class_pt_cfg_t)&nxgep->class_config;
3375 	mac_rdc.value = 0;
3376 	mac_rdc.bits.w0.rdc_tbl_num = clscfgp->mac_host_info[addrn].rdctbl;
3377 	mac_rdc.bits.w0.mac_pref = clscfgp->mac_host_info[addrn].mpr_npr;
3378 
3379 	if (npi_mac_hostinfo_entry(nxgep->npi_handle, OP_SET,
3380 	    nxgep->function_num, addrn, &mac_rdc) != NPI_SUCCESS) {
3381 		return (EIO);
3382 	}
3383 
3384 	/*
3385 	 * Enable comparison with the alternate MAC address.
3386 	 * While the first alternate addr is enabled by bit 1 of register
3387 	 * BMAC_ALTAD_CMPEN, it is enabled by bit 0 of register
3388 	 * XMAC_ADDR_CMPEN, so slot needs to be converted to addrn
3389 	 * accordingly before calling npi_mac_altaddr_entry.
3390 	 */
3391 	if (portn == XMAC_PORT_0 || portn == XMAC_PORT_1)
3392 		addrn = (uint8_t)slot - 1;
3393 	else
3394 		addrn = (uint8_t)slot;
3395 
3396 	if (npi_mac_altaddr_enable(nxgep->npi_handle, portn, addrn)
3397 		!= NPI_SUCCESS)
3398 		return (EIO);
3399 
3400 	return (0);
3401 }
3402 
3403 /*
3404  * nxeg_m_mmac_add() - find an unused address slot, set the address
3405  * value to the one specified, enable the port to start filtering on
3406  * the new MAC address.  Returns 0 on success.
3407  */
3408 static int
3409 nxge_m_mmac_add(void *arg, mac_multi_addr_t *maddr)
3410 {
3411 	p_nxge_t nxgep = arg;
3412 	mac_addr_slot_t slot;
3413 	nxge_mmac_t *mmac_info;
3414 	int err;
3415 	nxge_status_t status;
3416 
3417 	mutex_enter(nxgep->genlock);
3418 
3419 	/*
3420 	 * Make sure that nxge is initialized, if _start() has
3421 	 * not been called.
3422 	 */
3423 	if (!(nxgep->drv_state & STATE_HW_INITIALIZED)) {
3424 		status = nxge_init(nxgep);
3425 		if (status != NXGE_OK) {
3426 			mutex_exit(nxgep->genlock);
3427 			return (ENXIO);
3428 		}
3429 	}
3430 
3431 	mmac_info = &nxgep->nxge_mmac_info;
3432 	if (mmac_info->naddrfree == 0) {
3433 		mutex_exit(nxgep->genlock);
3434 		return (ENOSPC);
3435 	}
3436 	if (!mac_unicst_verify(nxgep->mach, maddr->mma_addr,
3437 		maddr->mma_addrlen)) {
3438 		mutex_exit(nxgep->genlock);
3439 		return (EINVAL);
3440 	}
3441 	/*
3442 	 * 	Search for the first available slot. Because naddrfree
3443 	 * is not zero, we are guaranteed to find one.
3444 	 * 	Slot 0 is for unique (primary) MAC. The first alternate
3445 	 * MAC slot is slot 1.
3446 	 *	Each of the first two ports of Neptune has 16 alternate
3447 	 * MAC slots but only the first 7 (or 15) slots have assigned factory
3448 	 * MAC addresses. We first search among the slots without bundled
3449 	 * factory MACs. If we fail to find one in that range, then we
3450 	 * search the slots with bundled factory MACs.  A factory MAC
3451 	 * will be wasted while the slot is used with a user MAC address.
3452 	 * But the slot could be used by factory MAC again after calling
3453 	 * nxge_m_mmac_remove and nxge_m_mmac_reserve.
3454 	 */
3455 	if (mmac_info->num_factory_mmac < mmac_info->num_mmac) {
3456 		for (slot = mmac_info->num_factory_mmac + 1;
3457 			slot <= mmac_info->num_mmac; slot++) {
3458 			if (!(mmac_info->mac_pool[slot].flags & MMAC_SLOT_USED))
3459 				break;
3460 		}
3461 		if (slot > mmac_info->num_mmac) {
3462 			for (slot = 1; slot <= mmac_info->num_factory_mmac;
3463 				slot++) {
3464 				if (!(mmac_info->mac_pool[slot].flags
3465 					& MMAC_SLOT_USED))
3466 					break;
3467 			}
3468 		}
3469 	} else {
3470 		for (slot = 1; slot <= mmac_info->num_mmac; slot++) {
3471 			if (!(mmac_info->mac_pool[slot].flags & MMAC_SLOT_USED))
3472 				break;
3473 		}
3474 	}
3475 	ASSERT(slot <= mmac_info->num_mmac);
3476 	if ((err = nxge_altmac_set(nxgep, maddr->mma_addr, slot)) != 0) {
3477 		mutex_exit(nxgep->genlock);
3478 		return (err);
3479 	}
3480 	bcopy(maddr->mma_addr, mmac_info->mac_pool[slot].addr, ETHERADDRL);
3481 	mmac_info->mac_pool[slot].flags |= MMAC_SLOT_USED;
3482 	mmac_info->mac_pool[slot].flags &= ~MMAC_VENDOR_ADDR;
3483 	mmac_info->naddrfree--;
3484 	nxge_mmac_kstat_update(nxgep, slot, B_FALSE);
3485 
3486 	maddr->mma_slot = slot;
3487 
3488 	mutex_exit(nxgep->genlock);
3489 	return (0);
3490 }
3491 
3492 /*
3493  * This function reserves an unused slot and programs the slot and the HW
3494  * with a factory mac address.
3495  */
3496 static int
3497 nxge_m_mmac_reserve(void *arg, mac_multi_addr_t *maddr)
3498 {
3499 	p_nxge_t nxgep = arg;
3500 	mac_addr_slot_t slot;
3501 	nxge_mmac_t *mmac_info;
3502 	int err;
3503 	nxge_status_t status;
3504 
3505 	mutex_enter(nxgep->genlock);
3506 
3507 	/*
3508 	 * Make sure that nxge is initialized, if _start() has
3509 	 * not been called.
3510 	 */
3511 	if (!(nxgep->drv_state & STATE_HW_INITIALIZED)) {
3512 		status = nxge_init(nxgep);
3513 		if (status != NXGE_OK) {
3514 			mutex_exit(nxgep->genlock);
3515 			return (ENXIO);
3516 		}
3517 	}
3518 
3519 	mmac_info = &nxgep->nxge_mmac_info;
3520 	if (mmac_info->naddrfree == 0) {
3521 		mutex_exit(nxgep->genlock);
3522 		return (ENOSPC);
3523 	}
3524 
3525 	slot = maddr->mma_slot;
3526 	if (slot == -1) {  /* -1: Take the first available slot */
3527 		for (slot = 1; slot <= mmac_info->num_factory_mmac; slot++) {
3528 			if (!(mmac_info->mac_pool[slot].flags & MMAC_SLOT_USED))
3529 				break;
3530 		}
3531 		if (slot > mmac_info->num_factory_mmac) {
3532 			mutex_exit(nxgep->genlock);
3533 			return (ENOSPC);
3534 		}
3535 	}
3536 	if (slot < 1 || slot > mmac_info->num_factory_mmac) {
3537 		/*
3538 		 * Do not support factory MAC at a slot greater than
3539 		 * num_factory_mmac even when there are available factory
3540 		 * MAC addresses because the alternate MACs are bundled with
3541 		 * slot[1] through slot[num_factory_mmac]
3542 		 */
3543 		mutex_exit(nxgep->genlock);
3544 		return (EINVAL);
3545 	}
3546 	if (mmac_info->mac_pool[slot].flags & MMAC_SLOT_USED) {
3547 		mutex_exit(nxgep->genlock);
3548 		return (EBUSY);
3549 	}
3550 	/* Verify the address to be reserved */
3551 	if (!mac_unicst_verify(nxgep->mach,
3552 		mmac_info->factory_mac_pool[slot], ETHERADDRL)) {
3553 		mutex_exit(nxgep->genlock);
3554 		return (EINVAL);
3555 	}
3556 	if (err = nxge_altmac_set(nxgep,
3557 		mmac_info->factory_mac_pool[slot], slot)) {
3558 		mutex_exit(nxgep->genlock);
3559 		return (err);
3560 	}
3561 	bcopy(mmac_info->factory_mac_pool[slot], maddr->mma_addr, ETHERADDRL);
3562 	mmac_info->mac_pool[slot].flags |= MMAC_SLOT_USED | MMAC_VENDOR_ADDR;
3563 	mmac_info->naddrfree--;
3564 
3565 	nxge_mmac_kstat_update(nxgep, slot, B_TRUE);
3566 	mutex_exit(nxgep->genlock);
3567 
3568 	/* Pass info back to the caller */
3569 	maddr->mma_slot = slot;
3570 	maddr->mma_addrlen = ETHERADDRL;
3571 	maddr->mma_flags = MMAC_SLOT_USED | MMAC_VENDOR_ADDR;
3572 
3573 	return (0);
3574 }
3575 
3576 /*
3577  * Remove the specified mac address and update the HW not to filter
3578  * the mac address anymore.
3579  */
3580 static int
3581 nxge_m_mmac_remove(void *arg, mac_addr_slot_t slot)
3582 {
3583 	p_nxge_t nxgep = arg;
3584 	nxge_mmac_t *mmac_info;
3585 	uint8_t addrn;
3586 	uint8_t portn;
3587 	int err = 0;
3588 	nxge_status_t status;
3589 
3590 	mutex_enter(nxgep->genlock);
3591 
3592 	/*
3593 	 * Make sure that nxge is initialized, if _start() has
3594 	 * not been called.
3595 	 */
3596 	if (!(nxgep->drv_state & STATE_HW_INITIALIZED)) {
3597 		status = nxge_init(nxgep);
3598 		if (status != NXGE_OK) {
3599 			mutex_exit(nxgep->genlock);
3600 			return (ENXIO);
3601 		}
3602 	}
3603 
3604 	mmac_info = &nxgep->nxge_mmac_info;
3605 	if (slot < 1 || slot > mmac_info->num_mmac) {
3606 		mutex_exit(nxgep->genlock);
3607 		return (EINVAL);
3608 	}
3609 
3610 	portn = nxgep->mac.portnum;
3611 	if (portn == XMAC_PORT_0 || portn == XMAC_PORT_1)
3612 		addrn = (uint8_t)slot - 1;
3613 	else
3614 		addrn = (uint8_t)slot;
3615 
3616 	if (mmac_info->mac_pool[slot].flags & MMAC_SLOT_USED) {
3617 		if (npi_mac_altaddr_disable(nxgep->npi_handle, portn, addrn)
3618 				== NPI_SUCCESS) {
3619 			mmac_info->naddrfree++;
3620 			mmac_info->mac_pool[slot].flags &= ~MMAC_SLOT_USED;
3621 			/*
3622 			 * Regardless if the MAC we just stopped filtering
3623 			 * is a user addr or a facory addr, we must set
3624 			 * the MMAC_VENDOR_ADDR flag if this slot has an
3625 			 * associated factory MAC to indicate that a factory
3626 			 * MAC is available.
3627 			 */
3628 			if (slot <= mmac_info->num_factory_mmac) {
3629 				mmac_info->mac_pool[slot].flags
3630 					|= MMAC_VENDOR_ADDR;
3631 			}
3632 			/*
3633 			 * Clear mac_pool[slot].addr so that kstat shows 0
3634 			 * alternate MAC address if the slot is not used.
3635 			 * (But nxge_m_mmac_get returns the factory MAC even
3636 			 * when the slot is not used!)
3637 			 */
3638 			bzero(mmac_info->mac_pool[slot].addr, ETHERADDRL);
3639 			nxge_mmac_kstat_update(nxgep, slot, B_FALSE);
3640 		} else {
3641 			err = EIO;
3642 		}
3643 	} else {
3644 		err = EINVAL;
3645 	}
3646 
3647 	mutex_exit(nxgep->genlock);
3648 	return (err);
3649 }
3650 
3651 
3652 /*
3653  * Modify a mac address added by nxge_m_mmac_add or nxge_m_mmac_reserve().
3654  */
3655 static int
3656 nxge_m_mmac_modify(void *arg, mac_multi_addr_t *maddr)
3657 {
3658 	p_nxge_t nxgep = arg;
3659 	mac_addr_slot_t slot;
3660 	nxge_mmac_t *mmac_info;
3661 	int err = 0;
3662 	nxge_status_t status;
3663 
3664 	if (!mac_unicst_verify(nxgep->mach, maddr->mma_addr,
3665 			maddr->mma_addrlen))
3666 		return (EINVAL);
3667 
3668 	slot = maddr->mma_slot;
3669 
3670 	mutex_enter(nxgep->genlock);
3671 
3672 	/*
3673 	 * Make sure that nxge is initialized, if _start() has
3674 	 * not been called.
3675 	 */
3676 	if (!(nxgep->drv_state & STATE_HW_INITIALIZED)) {
3677 		status = nxge_init(nxgep);
3678 		if (status != NXGE_OK) {
3679 			mutex_exit(nxgep->genlock);
3680 			return (ENXIO);
3681 		}
3682 	}
3683 
3684 	mmac_info = &nxgep->nxge_mmac_info;
3685 	if (slot < 1 || slot > mmac_info->num_mmac) {
3686 		mutex_exit(nxgep->genlock);
3687 		return (EINVAL);
3688 	}
3689 	if (mmac_info->mac_pool[slot].flags & MMAC_SLOT_USED) {
3690 		if ((err = nxge_altmac_set(nxgep, maddr->mma_addr, slot))
3691 			!= 0) {
3692 			bcopy(maddr->mma_addr, mmac_info->mac_pool[slot].addr,
3693 				ETHERADDRL);
3694 			/*
3695 			 * Assume that the MAC passed down from the caller
3696 			 * is not a factory MAC address (The user should
3697 			 * call mmac_remove followed by mmac_reserve if
3698 			 * he wants to use the factory MAC for this slot).
3699 			 */
3700 			mmac_info->mac_pool[slot].flags &= ~MMAC_VENDOR_ADDR;
3701 			nxge_mmac_kstat_update(nxgep, slot, B_FALSE);
3702 		}
3703 	} else {
3704 		err = EINVAL;
3705 	}
3706 	mutex_exit(nxgep->genlock);
3707 	return (err);
3708 }
3709 
3710 /*
3711  * nxge_m_mmac_get() - Get the MAC address and other information
3712  * related to the slot.  mma_flags should be set to 0 in the call.
3713  * Note: although kstat shows MAC address as zero when a slot is
3714  * not used, Crossbow expects nxge_m_mmac_get to copy factory MAC
3715  * to the caller as long as the slot is not using a user MAC address.
3716  * The following table shows the rules,
3717  *
3718  *				   USED    VENDOR    mma_addr
3719  * ------------------------------------------------------------
3720  * (1) Slot uses a user MAC:        yes      no     user MAC
3721  * (2) Slot uses a factory MAC:     yes      yes    factory MAC
3722  * (3) Slot is not used but is
3723  *     factory MAC capable:         no       yes    factory MAC
3724  * (4) Slot is not used and is
3725  *     not factory MAC capable:     no       no        0
3726  * ------------------------------------------------------------
3727  */
3728 static int
3729 nxge_m_mmac_get(void *arg, mac_multi_addr_t *maddr)
3730 {
3731 	nxge_t *nxgep = arg;
3732 	mac_addr_slot_t slot;
3733 	nxge_mmac_t *mmac_info;
3734 	nxge_status_t status;
3735 
3736 	slot = maddr->mma_slot;
3737 
3738 	mutex_enter(nxgep->genlock);
3739 
3740 	/*
3741 	 * Make sure that nxge is initialized, if _start() has
3742 	 * not been called.
3743 	 */
3744 	if (!(nxgep->drv_state & STATE_HW_INITIALIZED)) {
3745 		status = nxge_init(nxgep);
3746 		if (status != NXGE_OK) {
3747 			mutex_exit(nxgep->genlock);
3748 			return (ENXIO);
3749 		}
3750 	}
3751 
3752 	mmac_info = &nxgep->nxge_mmac_info;
3753 
3754 	if (slot < 1 || slot > mmac_info->num_mmac) {
3755 		mutex_exit(nxgep->genlock);
3756 		return (EINVAL);
3757 	}
3758 	maddr->mma_flags = 0;
3759 	if (mmac_info->mac_pool[slot].flags & MMAC_SLOT_USED)
3760 		maddr->mma_flags |= MMAC_SLOT_USED;
3761 
3762 	if (mmac_info->mac_pool[slot].flags & MMAC_VENDOR_ADDR) {
3763 		maddr->mma_flags |= MMAC_VENDOR_ADDR;
3764 		bcopy(mmac_info->factory_mac_pool[slot],
3765 			maddr->mma_addr, ETHERADDRL);
3766 		maddr->mma_addrlen = ETHERADDRL;
3767 	} else {
3768 		if (maddr->mma_flags & MMAC_SLOT_USED) {
3769 			bcopy(mmac_info->mac_pool[slot].addr,
3770 				maddr->mma_addr, ETHERADDRL);
3771 			maddr->mma_addrlen = ETHERADDRL;
3772 		} else {
3773 			bzero(maddr->mma_addr, ETHERADDRL);
3774 			maddr->mma_addrlen = 0;
3775 		}
3776 	}
3777 	mutex_exit(nxgep->genlock);
3778 	return (0);
3779 }
3780 
3781 
3782 static boolean_t
3783 nxge_m_getcapab(void *arg, mac_capab_t cap, void *cap_data)
3784 {
3785 	nxge_t *nxgep = arg;
3786 	uint32_t *txflags = cap_data;
3787 	multiaddress_capab_t *mmacp = cap_data;
3788 
3789 	switch (cap) {
3790 	case MAC_CAPAB_HCKSUM:
3791 		*txflags = HCKSUM_INET_PARTIAL;
3792 		break;
3793 	case MAC_CAPAB_POLL:
3794 		/*
3795 		 * There's nothing for us to fill in, simply returning
3796 		 * B_TRUE stating that we support polling is sufficient.
3797 		 */
3798 		break;
3799 
3800 	case MAC_CAPAB_MULTIADDRESS:
3801 		mutex_enter(nxgep->genlock);
3802 
3803 		mmacp->maddr_naddr = nxgep->nxge_mmac_info.num_mmac;
3804 		mmacp->maddr_naddrfree = nxgep->nxge_mmac_info.naddrfree;
3805 		mmacp->maddr_flag = 0; /* 0 is requried by PSARC2006/265 */
3806 		/*
3807 		 * maddr_handle is driver's private data, passed back to
3808 		 * entry point functions as arg.
3809 		 */
3810 		mmacp->maddr_handle	= nxgep;
3811 		mmacp->maddr_add	= nxge_m_mmac_add;
3812 		mmacp->maddr_remove	= nxge_m_mmac_remove;
3813 		mmacp->maddr_modify	= nxge_m_mmac_modify;
3814 		mmacp->maddr_get	= nxge_m_mmac_get;
3815 		mmacp->maddr_reserve	= nxge_m_mmac_reserve;
3816 
3817 		mutex_exit(nxgep->genlock);
3818 		break;
3819 	case MAC_CAPAB_LSO: {
3820 		mac_capab_lso_t *cap_lso = cap_data;
3821 
3822 		if (nxge_lso_enable) {
3823 			cap_lso->lso_flags = LSO_TX_BASIC_TCP_IPV4;
3824 			if (nxge_lso_max > NXGE_LSO_MAXLEN) {
3825 				nxge_lso_max = NXGE_LSO_MAXLEN;
3826 			}
3827 			cap_lso->lso_basic_tcp_ipv4.lso_max = nxge_lso_max;
3828 			break;
3829 		} else {
3830 			return (B_FALSE);
3831 		}
3832 	}
3833 
3834 	default:
3835 		return (B_FALSE);
3836 	}
3837 	return (B_TRUE);
3838 }
3839 
3840 /*
3841  * Module loading and removing entry points.
3842  */
3843 
3844 static	struct cb_ops 	nxge_cb_ops = {
3845 	nodev,			/* cb_open */
3846 	nodev,			/* cb_close */
3847 	nodev,			/* cb_strategy */
3848 	nodev,			/* cb_print */
3849 	nodev,			/* cb_dump */
3850 	nodev,			/* cb_read */
3851 	nodev,			/* cb_write */
3852 	nodev,			/* cb_ioctl */
3853 	nodev,			/* cb_devmap */
3854 	nodev,			/* cb_mmap */
3855 	nodev,			/* cb_segmap */
3856 	nochpoll,		/* cb_chpoll */
3857 	ddi_prop_op,		/* cb_prop_op */
3858 	NULL,
3859 	D_MP, 			/* cb_flag */
3860 	CB_REV,			/* rev */
3861 	nodev,			/* int (*cb_aread)() */
3862 	nodev			/* int (*cb_awrite)() */
3863 };
3864 
3865 static struct dev_ops nxge_dev_ops = {
3866 	DEVO_REV,		/* devo_rev */
3867 	0,			/* devo_refcnt */
3868 	nulldev,
3869 	nulldev,		/* devo_identify */
3870 	nulldev,		/* devo_probe */
3871 	nxge_attach,		/* devo_attach */
3872 	nxge_detach,		/* devo_detach */
3873 	nodev,			/* devo_reset */
3874 	&nxge_cb_ops,		/* devo_cb_ops */
3875 	(struct bus_ops *)NULL, /* devo_bus_ops	*/
3876 	ddi_power		/* devo_power */
3877 };
3878 
3879 extern	struct	mod_ops	mod_driverops;
3880 
3881 #define	NXGE_DESC_VER		"Sun NIU 10Gb Ethernet"
3882 
3883 /*
3884  * Module linkage information for the kernel.
3885  */
3886 static struct modldrv 	nxge_modldrv = {
3887 	&mod_driverops,
3888 	NXGE_DESC_VER,
3889 	&nxge_dev_ops
3890 };
3891 
3892 static struct modlinkage modlinkage = {
3893 	MODREV_1, (void *) &nxge_modldrv, NULL
3894 };
3895 
3896 int
3897 _init(void)
3898 {
3899 	int		status;
3900 
3901 	NXGE_DEBUG_MSG((NULL, MOD_CTL, "==> _init"));
3902 	mac_init_ops(&nxge_dev_ops, "nxge");
3903 	status = ddi_soft_state_init(&nxge_list, sizeof (nxge_t), 0);
3904 	if (status != 0) {
3905 		NXGE_ERROR_MSG((NULL, NXGE_ERR_CTL,
3906 			"failed to init device soft state"));
3907 		goto _init_exit;
3908 	}
3909 	status = mod_install(&modlinkage);
3910 	if (status != 0) {
3911 		ddi_soft_state_fini(&nxge_list);
3912 		NXGE_ERROR_MSG((NULL, NXGE_ERR_CTL, "Mod install failed"));
3913 		goto _init_exit;
3914 	}
3915 
3916 	MUTEX_INIT(&nxge_common_lock, NULL, MUTEX_DRIVER, NULL);
3917 
3918 _init_exit:
3919 	NXGE_DEBUG_MSG((NULL, MOD_CTL, "_init status = 0x%X", status));
3920 
3921 	return (status);
3922 }
3923 
3924 int
3925 _fini(void)
3926 {
3927 	int		status;
3928 
3929 	NXGE_DEBUG_MSG((NULL, MOD_CTL, "==> _fini"));
3930 
3931 	NXGE_DEBUG_MSG((NULL, MOD_CTL, "==> _fini: mod_remove"));
3932 
3933 	if (nxge_mblks_pending)
3934 		return (EBUSY);
3935 
3936 	status = mod_remove(&modlinkage);
3937 	if (status != DDI_SUCCESS) {
3938 		NXGE_DEBUG_MSG((NULL, MOD_CTL,
3939 			    "Module removal failed 0x%08x",
3940 			    status));
3941 		goto _fini_exit;
3942 	}
3943 
3944 	mac_fini_ops(&nxge_dev_ops);
3945 
3946 	ddi_soft_state_fini(&nxge_list);
3947 
3948 	MUTEX_DESTROY(&nxge_common_lock);
3949 _fini_exit:
3950 	NXGE_DEBUG_MSG((NULL, MOD_CTL, "_fini status = 0x%08x", status));
3951 
3952 	return (status);
3953 }
3954 
3955 int
3956 _info(struct modinfo *modinfop)
3957 {
3958 	int		status;
3959 
3960 	NXGE_DEBUG_MSG((NULL, MOD_CTL, "==> _info"));
3961 	status = mod_info(&modlinkage, modinfop);
3962 	NXGE_DEBUG_MSG((NULL, MOD_CTL, " _info status = 0x%X", status));
3963 
3964 	return (status);
3965 }
3966 
3967 /*ARGSUSED*/
3968 static nxge_status_t
3969 nxge_add_intrs(p_nxge_t nxgep)
3970 {
3971 
3972 	int		intr_types;
3973 	int		type = 0;
3974 	int		ddi_status = DDI_SUCCESS;
3975 	nxge_status_t	status = NXGE_OK;
3976 
3977 	NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_add_intrs"));
3978 
3979 	nxgep->nxge_intr_type.intr_registered = B_FALSE;
3980 	nxgep->nxge_intr_type.intr_enabled = B_FALSE;
3981 	nxgep->nxge_intr_type.msi_intx_cnt = 0;
3982 	nxgep->nxge_intr_type.intr_added = 0;
3983 	nxgep->nxge_intr_type.niu_msi_enable = B_FALSE;
3984 	nxgep->nxge_intr_type.intr_type = 0;
3985 
3986 	if (nxgep->niu_type == N2_NIU) {
3987 		nxgep->nxge_intr_type.niu_msi_enable = B_TRUE;
3988 	} else if (nxge_msi_enable) {
3989 		nxgep->nxge_intr_type.niu_msi_enable = B_TRUE;
3990 	}
3991 
3992 	/* Get the supported interrupt types */
3993 	if ((ddi_status = ddi_intr_get_supported_types(nxgep->dip, &intr_types))
3994 			!= DDI_SUCCESS) {
3995 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "<== nxge_add_intrs: "
3996 			"ddi_intr_get_supported_types failed: status 0x%08x",
3997 			ddi_status));
3998 		return (NXGE_ERROR | NXGE_DDI_FAILED);
3999 	}
4000 	nxgep->nxge_intr_type.intr_types = intr_types;
4001 
4002 	NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_add_intrs: "
4003 		"ddi_intr_get_supported_types: 0x%08x", intr_types));
4004 
4005 	/*
4006 	 * Solaris MSIX is not supported yet. use MSI for now.
4007 	 * nxge_msi_enable (1):
4008 	 *	1 - MSI		2 - MSI-X	others - FIXED
4009 	 */
4010 	switch (nxge_msi_enable) {
4011 	default:
4012 		type = DDI_INTR_TYPE_FIXED;
4013 		NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_add_intrs: "
4014 			"use fixed (intx emulation) type %08x",
4015 			type));
4016 		break;
4017 
4018 	case 2:
4019 		NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_add_intrs: "
4020 			"ddi_intr_get_supported_types: 0x%08x", intr_types));
4021 		if (intr_types & DDI_INTR_TYPE_MSIX) {
4022 			type = DDI_INTR_TYPE_MSIX;
4023 			NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_add_intrs: "
4024 				"ddi_intr_get_supported_types: MSIX 0x%08x",
4025 				type));
4026 		} else if (intr_types & DDI_INTR_TYPE_MSI) {
4027 			type = DDI_INTR_TYPE_MSI;
4028 			NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_add_intrs: "
4029 				"ddi_intr_get_supported_types: MSI 0x%08x",
4030 				type));
4031 		} else if (intr_types & DDI_INTR_TYPE_FIXED) {
4032 			type = DDI_INTR_TYPE_FIXED;
4033 			NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_add_intrs: "
4034 				"ddi_intr_get_supported_types: MSXED0x%08x",
4035 				type));
4036 		}
4037 		break;
4038 
4039 	case 1:
4040 		if (intr_types & DDI_INTR_TYPE_MSI) {
4041 			type = DDI_INTR_TYPE_MSI;
4042 			NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_add_intrs: "
4043 				"ddi_intr_get_supported_types: MSI 0x%08x",
4044 				type));
4045 		} else if (intr_types & DDI_INTR_TYPE_MSIX) {
4046 			type = DDI_INTR_TYPE_MSIX;
4047 			NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_add_intrs: "
4048 				"ddi_intr_get_supported_types: MSIX 0x%08x",
4049 				type));
4050 		} else if (intr_types & DDI_INTR_TYPE_FIXED) {
4051 			type = DDI_INTR_TYPE_FIXED;
4052 			NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_add_intrs: "
4053 				"ddi_intr_get_supported_types: MSXED0x%08x",
4054 				type));
4055 		}
4056 	}
4057 
4058 	nxgep->nxge_intr_type.intr_type = type;
4059 	if ((type == DDI_INTR_TYPE_MSIX || type == DDI_INTR_TYPE_MSI ||
4060 		type == DDI_INTR_TYPE_FIXED) &&
4061 			nxgep->nxge_intr_type.niu_msi_enable) {
4062 		if ((status = nxge_add_intrs_adv(nxgep)) != DDI_SUCCESS) {
4063 			NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
4064 				    " nxge_add_intrs: "
4065 				    " nxge_add_intrs_adv failed: status 0x%08x",
4066 				    status));
4067 			return (status);
4068 		} else {
4069 			NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_add_intrs: "
4070 			"interrupts registered : type %d", type));
4071 			nxgep->nxge_intr_type.intr_registered = B_TRUE;
4072 
4073 			NXGE_DEBUG_MSG((nxgep, DDI_CTL,
4074 				"\nAdded advanced nxge add_intr_adv "
4075 					"intr type 0x%x\n", type));
4076 
4077 			return (status);
4078 		}
4079 	}
4080 
4081 	if (!nxgep->nxge_intr_type.intr_registered) {
4082 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "==> nxge_add_intrs: "
4083 			"failed to register interrupts"));
4084 		return (NXGE_ERROR | NXGE_DDI_FAILED);
4085 	}
4086 
4087 	NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_add_intrs"));
4088 	return (status);
4089 }
4090 
4091 /*ARGSUSED*/
4092 static nxge_status_t
4093 nxge_add_soft_intrs(p_nxge_t nxgep)
4094 {
4095 
4096 	int		ddi_status = DDI_SUCCESS;
4097 	nxge_status_t	status = NXGE_OK;
4098 
4099 	NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_add_soft_intrs"));
4100 
4101 	nxgep->resched_id = NULL;
4102 	nxgep->resched_running = B_FALSE;
4103 	ddi_status = ddi_add_softintr(nxgep->dip, DDI_SOFTINT_LOW,
4104 			&nxgep->resched_id,
4105 		NULL, NULL, nxge_reschedule, (caddr_t)nxgep);
4106 	if (ddi_status != DDI_SUCCESS) {
4107 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "<== nxge_add_soft_intrs: "
4108 			"ddi_add_softintrs failed: status 0x%08x",
4109 			ddi_status));
4110 		return (NXGE_ERROR | NXGE_DDI_FAILED);
4111 	}
4112 
4113 	NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_ddi_add_soft_intrs"));
4114 
4115 	return (status);
4116 }
4117 
4118 static nxge_status_t
4119 nxge_add_intrs_adv(p_nxge_t nxgep)
4120 {
4121 	int		intr_type;
4122 	p_nxge_intr_t	intrp;
4123 
4124 	NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_add_intrs_adv"));
4125 
4126 	intrp = (p_nxge_intr_t)&nxgep->nxge_intr_type;
4127 	intr_type = intrp->intr_type;
4128 	NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_add_intrs_adv: type 0x%x",
4129 		intr_type));
4130 
4131 	switch (intr_type) {
4132 	case DDI_INTR_TYPE_MSI: /* 0x2 */
4133 	case DDI_INTR_TYPE_MSIX: /* 0x4 */
4134 		return (nxge_add_intrs_adv_type(nxgep, intr_type));
4135 
4136 	case DDI_INTR_TYPE_FIXED: /* 0x1 */
4137 		return (nxge_add_intrs_adv_type_fix(nxgep, intr_type));
4138 
4139 	default:
4140 		return (NXGE_ERROR);
4141 	}
4142 }
4143 
4144 
4145 /*ARGSUSED*/
4146 static nxge_status_t
4147 nxge_add_intrs_adv_type(p_nxge_t nxgep, uint32_t int_type)
4148 {
4149 	dev_info_t		*dip = nxgep->dip;
4150 	p_nxge_ldg_t		ldgp;
4151 	p_nxge_intr_t		intrp;
4152 	uint_t			*inthandler;
4153 	void			*arg1, *arg2;
4154 	int			behavior;
4155 	int			nintrs, navail, nrequest;
4156 	int			nactual, nrequired;
4157 	int			inum = 0;
4158 	int			x, y;
4159 	int			ddi_status = DDI_SUCCESS;
4160 	nxge_status_t		status = NXGE_OK;
4161 
4162 	NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_add_intrs_adv_type"));
4163 	intrp = (p_nxge_intr_t)&nxgep->nxge_intr_type;
4164 	intrp->start_inum = 0;
4165 
4166 	ddi_status = ddi_intr_get_nintrs(dip, int_type, &nintrs);
4167 	if ((ddi_status != DDI_SUCCESS) || (nintrs == 0)) {
4168 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
4169 			"ddi_intr_get_nintrs() failed, status: 0x%x%, "
4170 			    "nintrs: %d", ddi_status, nintrs));
4171 		return (NXGE_ERROR | NXGE_DDI_FAILED);
4172 	}
4173 
4174 	ddi_status = ddi_intr_get_navail(dip, int_type, &navail);
4175 	if ((ddi_status != DDI_SUCCESS) || (navail == 0)) {
4176 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
4177 			"ddi_intr_get_navail() failed, status: 0x%x%, "
4178 			    "nintrs: %d", ddi_status, navail));
4179 		return (NXGE_ERROR | NXGE_DDI_FAILED);
4180 	}
4181 
4182 	NXGE_DEBUG_MSG((nxgep, INT_CTL,
4183 		"ddi_intr_get_navail() returned: nintrs %d, navail %d",
4184 		    nintrs, navail));
4185 
4186 	/* PSARC/2007/453 MSI-X interrupt limit override */
4187 	if (int_type == DDI_INTR_TYPE_MSIX) {
4188 		nrequest = nxge_create_msi_property(nxgep);
4189 		if (nrequest < navail) {
4190 			navail = nrequest;
4191 			NXGE_DEBUG_MSG((nxgep, INT_CTL,
4192 			    "nxge_add_intrs_adv_type: nintrs %d "
4193 			    "navail %d (nrequest %d)",
4194 			    nintrs, navail, nrequest));
4195 		}
4196 	}
4197 
4198 	if (int_type == DDI_INTR_TYPE_MSI && !ISP2(navail)) {
4199 		/* MSI must be power of 2 */
4200 		if ((navail & 16) == 16) {
4201 			navail = 16;
4202 		} else if ((navail & 8) == 8) {
4203 			navail = 8;
4204 		} else if ((navail & 4) == 4) {
4205 			navail = 4;
4206 		} else if ((navail & 2) == 2) {
4207 			navail = 2;
4208 		} else {
4209 			navail = 1;
4210 		}
4211 		NXGE_DEBUG_MSG((nxgep, INT_CTL,
4212 			"ddi_intr_get_navail(): (msi power of 2) nintrs %d, "
4213 			"navail %d", nintrs, navail));
4214 	}
4215 
4216 	behavior = ((int_type == DDI_INTR_TYPE_FIXED) ? DDI_INTR_ALLOC_STRICT :
4217 			DDI_INTR_ALLOC_NORMAL);
4218 	intrp->intr_size = navail * sizeof (ddi_intr_handle_t);
4219 	intrp->htable = kmem_alloc(intrp->intr_size, KM_SLEEP);
4220 	ddi_status = ddi_intr_alloc(dip, intrp->htable, int_type, inum,
4221 		    navail, &nactual, behavior);
4222 	if (ddi_status != DDI_SUCCESS || nactual == 0) {
4223 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
4224 				    " ddi_intr_alloc() failed: %d",
4225 				    ddi_status));
4226 		kmem_free(intrp->htable, intrp->intr_size);
4227 		return (NXGE_ERROR | NXGE_DDI_FAILED);
4228 	}
4229 
4230 	if ((ddi_status = ddi_intr_get_pri(intrp->htable[0],
4231 			(uint_t *)&intrp->pri)) != DDI_SUCCESS) {
4232 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
4233 				    " ddi_intr_get_pri() failed: %d",
4234 				    ddi_status));
4235 		/* Free already allocated interrupts */
4236 		for (y = 0; y < nactual; y++) {
4237 			(void) ddi_intr_free(intrp->htable[y]);
4238 		}
4239 
4240 		kmem_free(intrp->htable, intrp->intr_size);
4241 		return (NXGE_ERROR | NXGE_DDI_FAILED);
4242 	}
4243 
4244 	nrequired = 0;
4245 	switch (nxgep->niu_type) {
4246 	default:
4247 		status = nxge_ldgv_init(nxgep, &nactual, &nrequired);
4248 		break;
4249 
4250 	case N2_NIU:
4251 		status = nxge_ldgv_init_n2(nxgep, &nactual, &nrequired);
4252 		break;
4253 	}
4254 
4255 	if (status != NXGE_OK) {
4256 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
4257 			"nxge_add_intrs_adv_typ:nxge_ldgv_init "
4258 			"failed: 0x%x", status));
4259 		/* Free already allocated interrupts */
4260 		for (y = 0; y < nactual; y++) {
4261 			(void) ddi_intr_free(intrp->htable[y]);
4262 		}
4263 
4264 		kmem_free(intrp->htable, intrp->intr_size);
4265 		return (status);
4266 	}
4267 
4268 	ldgp = nxgep->ldgvp->ldgp;
4269 	for (x = 0; x < nrequired; x++, ldgp++) {
4270 		ldgp->vector = (uint8_t)x;
4271 		ldgp->intdata = SID_DATA(ldgp->func, x);
4272 		arg1 = ldgp->ldvp;
4273 		arg2 = nxgep;
4274 		if (ldgp->nldvs == 1) {
4275 			inthandler = (uint_t *)ldgp->ldvp->ldv_intr_handler;
4276 			NXGE_DEBUG_MSG((nxgep, INT_CTL,
4277 				"nxge_add_intrs_adv_type: "
4278 				"arg1 0x%x arg2 0x%x: "
4279 				"1-1 int handler (entry %d intdata 0x%x)\n",
4280 				arg1, arg2,
4281 				x, ldgp->intdata));
4282 		} else if (ldgp->nldvs > 1) {
4283 			inthandler = (uint_t *)ldgp->sys_intr_handler;
4284 			NXGE_DEBUG_MSG((nxgep, INT_CTL,
4285 				"nxge_add_intrs_adv_type: "
4286 				"arg1 0x%x arg2 0x%x: "
4287 				"nldevs %d int handler "
4288 				"(entry %d intdata 0x%x)\n",
4289 				arg1, arg2,
4290 				ldgp->nldvs, x, ldgp->intdata));
4291 		}
4292 
4293 		NXGE_DEBUG_MSG((nxgep, INT_CTL,
4294 			"==> nxge_add_intrs_adv_type: ddi_add_intr(inum) #%d "
4295 			"htable 0x%llx", x, intrp->htable[x]));
4296 
4297 		if ((ddi_status = ddi_intr_add_handler(intrp->htable[x],
4298 			(ddi_intr_handler_t *)inthandler, arg1, arg2))
4299 				!= DDI_SUCCESS) {
4300 			NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
4301 				"==> nxge_add_intrs_adv_type: failed #%d "
4302 				"status 0x%x", x, ddi_status));
4303 			for (y = 0; y < intrp->intr_added; y++) {
4304 				(void) ddi_intr_remove_handler(
4305 						intrp->htable[y]);
4306 			}
4307 			/* Free already allocated intr */
4308 			for (y = 0; y < nactual; y++) {
4309 				(void) ddi_intr_free(intrp->htable[y]);
4310 			}
4311 			kmem_free(intrp->htable, intrp->intr_size);
4312 
4313 			(void) nxge_ldgv_uninit(nxgep);
4314 
4315 			return (NXGE_ERROR | NXGE_DDI_FAILED);
4316 		}
4317 		intrp->intr_added++;
4318 	}
4319 
4320 	intrp->msi_intx_cnt = nactual;
4321 
4322 	NXGE_DEBUG_MSG((nxgep, DDI_CTL,
4323 		"Requested: %d, Allowed: %d msi_intx_cnt %d intr_added %d",
4324 		navail, nactual,
4325 		intrp->msi_intx_cnt,
4326 		intrp->intr_added));
4327 
4328 	(void) ddi_intr_get_cap(intrp->htable[0], &intrp->intr_cap);
4329 
4330 	(void) nxge_intr_ldgv_init(nxgep);
4331 
4332 	NXGE_DEBUG_MSG((nxgep, INT_CTL, "<== nxge_add_intrs_adv_type"));
4333 
4334 	return (status);
4335 }
4336 
4337 /*ARGSUSED*/
4338 static nxge_status_t
4339 nxge_add_intrs_adv_type_fix(p_nxge_t nxgep, uint32_t int_type)
4340 {
4341 	dev_info_t		*dip = nxgep->dip;
4342 	p_nxge_ldg_t		ldgp;
4343 	p_nxge_intr_t		intrp;
4344 	uint_t			*inthandler;
4345 	void			*arg1, *arg2;
4346 	int			behavior;
4347 	int			nintrs, navail;
4348 	int			nactual, nrequired;
4349 	int			inum = 0;
4350 	int			x, y;
4351 	int			ddi_status = DDI_SUCCESS;
4352 	nxge_status_t		status = NXGE_OK;
4353 
4354 	NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_add_intrs_adv_type_fix"));
4355 	intrp = (p_nxge_intr_t)&nxgep->nxge_intr_type;
4356 	intrp->start_inum = 0;
4357 
4358 	ddi_status = ddi_intr_get_nintrs(dip, int_type, &nintrs);
4359 	if ((ddi_status != DDI_SUCCESS) || (nintrs == 0)) {
4360 		NXGE_DEBUG_MSG((nxgep, INT_CTL,
4361 			"ddi_intr_get_nintrs() failed, status: 0x%x%, "
4362 			    "nintrs: %d", status, nintrs));
4363 		return (NXGE_ERROR | NXGE_DDI_FAILED);
4364 	}
4365 
4366 	ddi_status = ddi_intr_get_navail(dip, int_type, &navail);
4367 	if ((ddi_status != DDI_SUCCESS) || (navail == 0)) {
4368 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
4369 			"ddi_intr_get_navail() failed, status: 0x%x%, "
4370 			    "nintrs: %d", ddi_status, navail));
4371 		return (NXGE_ERROR | NXGE_DDI_FAILED);
4372 	}
4373 
4374 	NXGE_DEBUG_MSG((nxgep, INT_CTL,
4375 		"ddi_intr_get_navail() returned: nintrs %d, naavail %d",
4376 		    nintrs, navail));
4377 
4378 	behavior = ((int_type == DDI_INTR_TYPE_FIXED) ? DDI_INTR_ALLOC_STRICT :
4379 			DDI_INTR_ALLOC_NORMAL);
4380 	intrp->intr_size = navail * sizeof (ddi_intr_handle_t);
4381 	intrp->htable = kmem_alloc(intrp->intr_size, KM_SLEEP);
4382 	ddi_status = ddi_intr_alloc(dip, intrp->htable, int_type, inum,
4383 		    navail, &nactual, behavior);
4384 	if (ddi_status != DDI_SUCCESS || nactual == 0) {
4385 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
4386 			    " ddi_intr_alloc() failed: %d",
4387 			    ddi_status));
4388 		kmem_free(intrp->htable, intrp->intr_size);
4389 		return (NXGE_ERROR | NXGE_DDI_FAILED);
4390 	}
4391 
4392 	if ((ddi_status = ddi_intr_get_pri(intrp->htable[0],
4393 			(uint_t *)&intrp->pri)) != DDI_SUCCESS) {
4394 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
4395 				    " ddi_intr_get_pri() failed: %d",
4396 				    ddi_status));
4397 		/* Free already allocated interrupts */
4398 		for (y = 0; y < nactual; y++) {
4399 			(void) ddi_intr_free(intrp->htable[y]);
4400 		}
4401 
4402 		kmem_free(intrp->htable, intrp->intr_size);
4403 		return (NXGE_ERROR | NXGE_DDI_FAILED);
4404 	}
4405 
4406 	nrequired = 0;
4407 	switch (nxgep->niu_type) {
4408 	default:
4409 		status = nxge_ldgv_init(nxgep, &nactual, &nrequired);
4410 		break;
4411 
4412 	case N2_NIU:
4413 		status = nxge_ldgv_init_n2(nxgep, &nactual, &nrequired);
4414 		break;
4415 	}
4416 
4417 	if (status != NXGE_OK) {
4418 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
4419 			"nxge_add_intrs_adv_type_fix:nxge_ldgv_init "
4420 			"failed: 0x%x", status));
4421 		/* Free already allocated interrupts */
4422 		for (y = 0; y < nactual; y++) {
4423 			(void) ddi_intr_free(intrp->htable[y]);
4424 		}
4425 
4426 		kmem_free(intrp->htable, intrp->intr_size);
4427 		return (status);
4428 	}
4429 
4430 	ldgp = nxgep->ldgvp->ldgp;
4431 	for (x = 0; x < nrequired; x++, ldgp++) {
4432 		ldgp->vector = (uint8_t)x;
4433 		if (nxgep->niu_type != N2_NIU) {
4434 			ldgp->intdata = SID_DATA(ldgp->func, x);
4435 		}
4436 
4437 		arg1 = ldgp->ldvp;
4438 		arg2 = nxgep;
4439 		if (ldgp->nldvs == 1) {
4440 			inthandler = (uint_t *)ldgp->ldvp->ldv_intr_handler;
4441 			NXGE_DEBUG_MSG((nxgep, INT_CTL,
4442 				"nxge_add_intrs_adv_type_fix: "
4443 				"1-1 int handler(%d) ldg %d ldv %d "
4444 				"arg1 $%p arg2 $%p\n",
4445 				x, ldgp->ldg, ldgp->ldvp->ldv,
4446 				arg1, arg2));
4447 		} else if (ldgp->nldvs > 1) {
4448 			inthandler = (uint_t *)ldgp->sys_intr_handler;
4449 			NXGE_DEBUG_MSG((nxgep, INT_CTL,
4450 				"nxge_add_intrs_adv_type_fix: "
4451 				"shared ldv %d int handler(%d) ldv %d ldg %d"
4452 				"arg1 0x%016llx arg2 0x%016llx\n",
4453 				x, ldgp->nldvs, ldgp->ldg, ldgp->ldvp->ldv,
4454 				arg1, arg2));
4455 		}
4456 
4457 		if ((ddi_status = ddi_intr_add_handler(intrp->htable[x],
4458 			(ddi_intr_handler_t *)inthandler, arg1, arg2))
4459 				!= DDI_SUCCESS) {
4460 			NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
4461 				"==> nxge_add_intrs_adv_type_fix: failed #%d "
4462 				"status 0x%x", x, ddi_status));
4463 			for (y = 0; y < intrp->intr_added; y++) {
4464 				(void) ddi_intr_remove_handler(
4465 						intrp->htable[y]);
4466 			}
4467 			for (y = 0; y < nactual; y++) {
4468 				(void) ddi_intr_free(intrp->htable[y]);
4469 			}
4470 			/* Free already allocated intr */
4471 			kmem_free(intrp->htable, intrp->intr_size);
4472 
4473 			(void) nxge_ldgv_uninit(nxgep);
4474 
4475 			return (NXGE_ERROR | NXGE_DDI_FAILED);
4476 		}
4477 		intrp->intr_added++;
4478 	}
4479 
4480 	intrp->msi_intx_cnt = nactual;
4481 
4482 	(void) ddi_intr_get_cap(intrp->htable[0], &intrp->intr_cap);
4483 
4484 	status = nxge_intr_ldgv_init(nxgep);
4485 	NXGE_DEBUG_MSG((nxgep, INT_CTL, "<== nxge_add_intrs_adv_type_fix"));
4486 
4487 	return (status);
4488 }
4489 
4490 static void
4491 nxge_remove_intrs(p_nxge_t nxgep)
4492 {
4493 	int		i, inum;
4494 	p_nxge_intr_t	intrp;
4495 
4496 	NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_remove_intrs"));
4497 	intrp = (p_nxge_intr_t)&nxgep->nxge_intr_type;
4498 	if (!intrp->intr_registered) {
4499 		NXGE_DEBUG_MSG((nxgep, INT_CTL,
4500 			"<== nxge_remove_intrs: interrupts not registered"));
4501 		return;
4502 	}
4503 
4504 	NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_remove_intrs:advanced"));
4505 
4506 	if (intrp->intr_cap & DDI_INTR_FLAG_BLOCK) {
4507 		(void) ddi_intr_block_disable(intrp->htable,
4508 			intrp->intr_added);
4509 	} else {
4510 		for (i = 0; i < intrp->intr_added; i++) {
4511 			(void) ddi_intr_disable(intrp->htable[i]);
4512 		}
4513 	}
4514 
4515 	for (inum = 0; inum < intrp->intr_added; inum++) {
4516 		if (intrp->htable[inum]) {
4517 			(void) ddi_intr_remove_handler(intrp->htable[inum]);
4518 		}
4519 	}
4520 
4521 	for (inum = 0; inum < intrp->msi_intx_cnt; inum++) {
4522 		if (intrp->htable[inum]) {
4523 			NXGE_DEBUG_MSG((nxgep, DDI_CTL,
4524 				"nxge_remove_intrs: ddi_intr_free inum %d "
4525 				"msi_intx_cnt %d intr_added %d",
4526 				inum,
4527 				intrp->msi_intx_cnt,
4528 				intrp->intr_added));
4529 
4530 			(void) ddi_intr_free(intrp->htable[inum]);
4531 		}
4532 	}
4533 
4534 	kmem_free(intrp->htable, intrp->intr_size);
4535 	intrp->intr_registered = B_FALSE;
4536 	intrp->intr_enabled = B_FALSE;
4537 	intrp->msi_intx_cnt = 0;
4538 	intrp->intr_added = 0;
4539 
4540 	(void) nxge_ldgv_uninit(nxgep);
4541 
4542 	(void) ddi_prop_remove(DDI_DEV_T_NONE, nxgep->dip,
4543 	    "#msix-request");
4544 
4545 	NXGE_DEBUG_MSG((nxgep, INT_CTL, "<== nxge_remove_intrs"));
4546 }
4547 
4548 /*ARGSUSED*/
4549 static void
4550 nxge_remove_soft_intrs(p_nxge_t nxgep)
4551 {
4552 	NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_remove_soft_intrs"));
4553 	if (nxgep->resched_id) {
4554 		ddi_remove_softintr(nxgep->resched_id);
4555 		NXGE_DEBUG_MSG((nxgep, INT_CTL,
4556 			"==> nxge_remove_soft_intrs: removed"));
4557 		nxgep->resched_id = NULL;
4558 	}
4559 
4560 	NXGE_DEBUG_MSG((nxgep, INT_CTL, "<== nxge_remove_soft_intrs"));
4561 }
4562 
4563 /*ARGSUSED*/
4564 static void
4565 nxge_intrs_enable(p_nxge_t nxgep)
4566 {
4567 	p_nxge_intr_t	intrp;
4568 	int		i;
4569 	int		status;
4570 
4571 	NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_intrs_enable"));
4572 
4573 	intrp = (p_nxge_intr_t)&nxgep->nxge_intr_type;
4574 
4575 	if (!intrp->intr_registered) {
4576 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "<== nxge_intrs_enable: "
4577 			"interrupts are not registered"));
4578 		return;
4579 	}
4580 
4581 	if (intrp->intr_enabled) {
4582 		NXGE_DEBUG_MSG((nxgep, INT_CTL,
4583 			"<== nxge_intrs_enable: already enabled"));
4584 		return;
4585 	}
4586 
4587 	if (intrp->intr_cap & DDI_INTR_FLAG_BLOCK) {
4588 		status = ddi_intr_block_enable(intrp->htable,
4589 			intrp->intr_added);
4590 		NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_intrs_enable "
4591 			"block enable - status 0x%x total inums #%d\n",
4592 			status, intrp->intr_added));
4593 	} else {
4594 		for (i = 0; i < intrp->intr_added; i++) {
4595 			status = ddi_intr_enable(intrp->htable[i]);
4596 			NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_intrs_enable "
4597 				"ddi_intr_enable:enable - status 0x%x "
4598 				"total inums %d enable inum #%d\n",
4599 				status, intrp->intr_added, i));
4600 			if (status == DDI_SUCCESS) {
4601 				intrp->intr_enabled = B_TRUE;
4602 			}
4603 		}
4604 	}
4605 
4606 	NXGE_DEBUG_MSG((nxgep, INT_CTL, "<== nxge_intrs_enable"));
4607 }
4608 
4609 /*ARGSUSED*/
4610 static void
4611 nxge_intrs_disable(p_nxge_t nxgep)
4612 {
4613 	p_nxge_intr_t	intrp;
4614 	int		i;
4615 
4616 	NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_intrs_disable"));
4617 
4618 	intrp = (p_nxge_intr_t)&nxgep->nxge_intr_type;
4619 
4620 	if (!intrp->intr_registered) {
4621 		NXGE_DEBUG_MSG((nxgep, INT_CTL, "<== nxge_intrs_disable: "
4622 			"interrupts are not registered"));
4623 		return;
4624 	}
4625 
4626 	if (intrp->intr_cap & DDI_INTR_FLAG_BLOCK) {
4627 		(void) ddi_intr_block_disable(intrp->htable,
4628 			intrp->intr_added);
4629 	} else {
4630 		for (i = 0; i < intrp->intr_added; i++) {
4631 			(void) ddi_intr_disable(intrp->htable[i]);
4632 		}
4633 	}
4634 
4635 	intrp->intr_enabled = B_FALSE;
4636 	NXGE_DEBUG_MSG((nxgep, INT_CTL, "<== nxge_intrs_disable"));
4637 }
4638 
4639 static nxge_status_t
4640 nxge_mac_register(p_nxge_t nxgep)
4641 {
4642 	mac_register_t *macp;
4643 	int		status;
4644 
4645 	NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_mac_register"));
4646 
4647 	if ((macp = mac_alloc(MAC_VERSION)) == NULL)
4648 		return (NXGE_ERROR);
4649 
4650 	macp->m_type_ident = MAC_PLUGIN_IDENT_ETHER;
4651 	macp->m_driver = nxgep;
4652 	macp->m_dip = nxgep->dip;
4653 	macp->m_src_addr = nxgep->ouraddr.ether_addr_octet;
4654 	macp->m_callbacks = &nxge_m_callbacks;
4655 	macp->m_min_sdu = 0;
4656 	macp->m_max_sdu = nxgep->mac.maxframesize -
4657 		sizeof (struct ether_header) - ETHERFCSL - 4;
4658 
4659 	status = mac_register(macp, &nxgep->mach);
4660 	mac_free(macp);
4661 
4662 	if (status != 0) {
4663 		cmn_err(CE_WARN,
4664 			"!nxge_mac_register failed (status %d instance %d)",
4665 			status, nxgep->instance);
4666 		return (NXGE_ERROR);
4667 	}
4668 
4669 	NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_mac_register success "
4670 		"(instance %d)", nxgep->instance));
4671 
4672 	return (NXGE_OK);
4673 }
4674 
4675 void
4676 nxge_err_inject(p_nxge_t nxgep, queue_t *wq, mblk_t *mp)
4677 {
4678 	ssize_t		size;
4679 	mblk_t		*nmp;
4680 	uint8_t		blk_id;
4681 	uint8_t		chan;
4682 	uint32_t	err_id;
4683 	err_inject_t	*eip;
4684 
4685 	NXGE_DEBUG_MSG((nxgep, STR_CTL, "==> nxge_err_inject"));
4686 
4687 	size = 1024;
4688 	nmp = mp->b_cont;
4689 	eip = (err_inject_t *)nmp->b_rptr;
4690 	blk_id = eip->blk_id;
4691 	err_id = eip->err_id;
4692 	chan = eip->chan;
4693 	cmn_err(CE_NOTE, "!blk_id = 0x%x\n", blk_id);
4694 	cmn_err(CE_NOTE, "!err_id = 0x%x\n", err_id);
4695 	cmn_err(CE_NOTE, "!chan = 0x%x\n", chan);
4696 	switch (blk_id) {
4697 	case MAC_BLK_ID:
4698 		break;
4699 	case TXMAC_BLK_ID:
4700 		break;
4701 	case RXMAC_BLK_ID:
4702 		break;
4703 	case MIF_BLK_ID:
4704 		break;
4705 	case IPP_BLK_ID:
4706 		nxge_ipp_inject_err(nxgep, err_id);
4707 		break;
4708 	case TXC_BLK_ID:
4709 		nxge_txc_inject_err(nxgep, err_id);
4710 		break;
4711 	case TXDMA_BLK_ID:
4712 		nxge_txdma_inject_err(nxgep, err_id, chan);
4713 		break;
4714 	case RXDMA_BLK_ID:
4715 		nxge_rxdma_inject_err(nxgep, err_id, chan);
4716 		break;
4717 	case ZCP_BLK_ID:
4718 		nxge_zcp_inject_err(nxgep, err_id);
4719 		break;
4720 	case ESPC_BLK_ID:
4721 		break;
4722 	case FFLP_BLK_ID:
4723 		break;
4724 	case PHY_BLK_ID:
4725 		break;
4726 	case ETHER_SERDES_BLK_ID:
4727 		break;
4728 	case PCIE_SERDES_BLK_ID:
4729 		break;
4730 	case VIR_BLK_ID:
4731 		break;
4732 	}
4733 
4734 	nmp->b_wptr = nmp->b_rptr + size;
4735 	NXGE_DEBUG_MSG((nxgep, STR_CTL, "<== nxge_err_inject"));
4736 
4737 	miocack(wq, mp, (int)size, 0);
4738 }
4739 
4740 static int
4741 nxge_init_common_dev(p_nxge_t nxgep)
4742 {
4743 	p_nxge_hw_list_t	hw_p;
4744 	dev_info_t 		*p_dip;
4745 
4746 	NXGE_DEBUG_MSG((nxgep, MOD_CTL, "==> nxge_init_common_device"));
4747 
4748 	p_dip = nxgep->p_dip;
4749 	MUTEX_ENTER(&nxge_common_lock);
4750 	NXGE_DEBUG_MSG((nxgep, MOD_CTL,
4751 		"==> nxge_init_common_dev:func # %d",
4752 			nxgep->function_num));
4753 	/*
4754 	 * Loop through existing per neptune hardware list.
4755 	 */
4756 	for (hw_p = nxge_hw_list; hw_p; hw_p = hw_p->next) {
4757 		NXGE_DEBUG_MSG((nxgep, MOD_CTL,
4758 			"==> nxge_init_common_device:func # %d "
4759 			"hw_p $%p parent dip $%p",
4760 			nxgep->function_num,
4761 			hw_p,
4762 			p_dip));
4763 		if (hw_p->parent_devp == p_dip) {
4764 			nxgep->nxge_hw_p = hw_p;
4765 			hw_p->ndevs++;
4766 			hw_p->nxge_p[nxgep->function_num] = nxgep;
4767 			NXGE_DEBUG_MSG((nxgep, MOD_CTL,
4768 				"==> nxge_init_common_device:func # %d "
4769 				"hw_p $%p parent dip $%p "
4770 				"ndevs %d (found)",
4771 				nxgep->function_num,
4772 				hw_p,
4773 				p_dip,
4774 				hw_p->ndevs));
4775 			break;
4776 		}
4777 	}
4778 
4779 	if (hw_p == NULL) {
4780 		NXGE_DEBUG_MSG((nxgep, MOD_CTL,
4781 			"==> nxge_init_common_device:func # %d "
4782 			"parent dip $%p (new)",
4783 			nxgep->function_num,
4784 			p_dip));
4785 		hw_p = kmem_zalloc(sizeof (nxge_hw_list_t), KM_SLEEP);
4786 		hw_p->parent_devp = p_dip;
4787 		hw_p->magic = NXGE_NEPTUNE_MAGIC;
4788 		nxgep->nxge_hw_p = hw_p;
4789 		hw_p->ndevs++;
4790 		hw_p->nxge_p[nxgep->function_num] = nxgep;
4791 		hw_p->next = nxge_hw_list;
4792 		if (nxgep->niu_type == N2_NIU) {
4793 			hw_p->niu_type = N2_NIU;
4794 			hw_p->platform_type = P_NEPTUNE_NIU;
4795 		} else {
4796 			hw_p->niu_type = NIU_TYPE_NONE;
4797 			hw_p->platform_type = P_NEPTUNE_NONE;
4798 		}
4799 
4800 		MUTEX_INIT(&hw_p->nxge_cfg_lock, NULL, MUTEX_DRIVER, NULL);
4801 		MUTEX_INIT(&hw_p->nxge_tcam_lock, NULL, MUTEX_DRIVER, NULL);
4802 		MUTEX_INIT(&hw_p->nxge_vlan_lock, NULL, MUTEX_DRIVER, NULL);
4803 		MUTEX_INIT(&hw_p->nxge_mdio_lock, NULL, MUTEX_DRIVER, NULL);
4804 		MUTEX_INIT(&hw_p->nxge_mii_lock, NULL, MUTEX_DRIVER, NULL);
4805 
4806 		nxge_hw_list = hw_p;
4807 
4808 		(void) nxge_scan_ports_phy(nxgep, nxge_hw_list);
4809 	}
4810 
4811 	MUTEX_EXIT(&nxge_common_lock);
4812 
4813 	nxgep->platform_type = hw_p->platform_type;
4814 	if (nxgep->niu_type != N2_NIU) {
4815 		nxgep->niu_type = hw_p->niu_type;
4816 	}
4817 
4818 	NXGE_DEBUG_MSG((nxgep, MOD_CTL,
4819 		"==> nxge_init_common_device (nxge_hw_list) $%p",
4820 		nxge_hw_list));
4821 	NXGE_DEBUG_MSG((nxgep, MOD_CTL, "<== nxge_init_common_device"));
4822 
4823 	return (NXGE_OK);
4824 }
4825 
4826 static void
4827 nxge_uninit_common_dev(p_nxge_t nxgep)
4828 {
4829 	p_nxge_hw_list_t	hw_p, h_hw_p;
4830 	dev_info_t 		*p_dip;
4831 
4832 	NXGE_DEBUG_MSG((nxgep, MOD_CTL, "==> nxge_uninit_common_device"));
4833 	if (nxgep->nxge_hw_p == NULL) {
4834 		NXGE_DEBUG_MSG((nxgep, MOD_CTL,
4835 			"<== nxge_uninit_common_device (no common)"));
4836 		return;
4837 	}
4838 
4839 	MUTEX_ENTER(&nxge_common_lock);
4840 	h_hw_p = nxge_hw_list;
4841 	for (hw_p = nxge_hw_list; hw_p; hw_p = hw_p->next) {
4842 		p_dip = hw_p->parent_devp;
4843 		if (nxgep->nxge_hw_p == hw_p &&
4844 			p_dip == nxgep->p_dip &&
4845 			nxgep->nxge_hw_p->magic == NXGE_NEPTUNE_MAGIC &&
4846 			hw_p->magic == NXGE_NEPTUNE_MAGIC) {
4847 
4848 			NXGE_DEBUG_MSG((nxgep, MOD_CTL,
4849 				"==> nxge_uninit_common_device:func # %d "
4850 				"hw_p $%p parent dip $%p "
4851 				"ndevs %d (found)",
4852 				nxgep->function_num,
4853 				hw_p,
4854 				p_dip,
4855 				hw_p->ndevs));
4856 
4857 			nxgep->nxge_hw_p = NULL;
4858 			if (hw_p->ndevs) {
4859 				hw_p->ndevs--;
4860 			}
4861 			hw_p->nxge_p[nxgep->function_num] = NULL;
4862 			if (!hw_p->ndevs) {
4863 				MUTEX_DESTROY(&hw_p->nxge_vlan_lock);
4864 				MUTEX_DESTROY(&hw_p->nxge_tcam_lock);
4865 				MUTEX_DESTROY(&hw_p->nxge_cfg_lock);
4866 				MUTEX_DESTROY(&hw_p->nxge_mdio_lock);
4867 				MUTEX_DESTROY(&hw_p->nxge_mii_lock);
4868 				NXGE_DEBUG_MSG((nxgep, MOD_CTL,
4869 					"==> nxge_uninit_common_device: "
4870 					"func # %d "
4871 					"hw_p $%p parent dip $%p "
4872 					"ndevs %d (last)",
4873 					nxgep->function_num,
4874 					hw_p,
4875 					p_dip,
4876 					hw_p->ndevs));
4877 
4878 				if (hw_p == nxge_hw_list) {
4879 					NXGE_DEBUG_MSG((nxgep, MOD_CTL,
4880 						"==> nxge_uninit_common_device:"
4881 						"remove head func # %d "
4882 						"hw_p $%p parent dip $%p "
4883 						"ndevs %d (head)",
4884 						nxgep->function_num,
4885 						hw_p,
4886 						p_dip,
4887 						hw_p->ndevs));
4888 					nxge_hw_list = hw_p->next;
4889 				} else {
4890 					NXGE_DEBUG_MSG((nxgep, MOD_CTL,
4891 						"==> nxge_uninit_common_device:"
4892 						"remove middle func # %d "
4893 						"hw_p $%p parent dip $%p "
4894 						"ndevs %d (middle)",
4895 						nxgep->function_num,
4896 						hw_p,
4897 						p_dip,
4898 						hw_p->ndevs));
4899 					h_hw_p->next = hw_p->next;
4900 				}
4901 
4902 				KMEM_FREE(hw_p, sizeof (nxge_hw_list_t));
4903 			}
4904 			break;
4905 		} else {
4906 			h_hw_p = hw_p;
4907 		}
4908 	}
4909 
4910 	MUTEX_EXIT(&nxge_common_lock);
4911 	NXGE_DEBUG_MSG((nxgep, MOD_CTL,
4912 		"==> nxge_uninit_common_device (nxge_hw_list) $%p",
4913 		nxge_hw_list));
4914 
4915 	NXGE_DEBUG_MSG((nxgep, MOD_CTL, "<= nxge_uninit_common_device"));
4916 }
4917 
4918 /*
4919  * Determines the number of ports from the niu_type or the platform type.
4920  * Returns the number of ports, or returns zero on failure.
4921  */
4922 
4923 int
4924 nxge_get_nports(p_nxge_t nxgep)
4925 {
4926 	int	nports = 0;
4927 
4928 	switch (nxgep->niu_type) {
4929 	case N2_NIU:
4930 	case NEPTUNE_2_10GF:
4931 		nports = 2;
4932 		break;
4933 	case NEPTUNE_4_1GC:
4934 	case NEPTUNE_2_10GF_2_1GC:
4935 	case NEPTUNE_1_10GF_3_1GC:
4936 	case NEPTUNE_1_1GC_1_10GF_2_1GC:
4937 		nports = 4;
4938 		break;
4939 	default:
4940 		switch (nxgep->platform_type) {
4941 		case P_NEPTUNE_NIU:
4942 		case P_NEPTUNE_ATLAS_2PORT:
4943 			nports = 2;
4944 			break;
4945 		case P_NEPTUNE_ATLAS_4PORT:
4946 		case P_NEPTUNE_MARAMBA_P0:
4947 		case P_NEPTUNE_MARAMBA_P1:
4948 		case P_NEPTUNE_ALONSO:
4949 			nports = 4;
4950 			break;
4951 		default:
4952 			break;
4953 		}
4954 		break;
4955 	}
4956 
4957 	return (nports);
4958 }
4959 
4960 /*
4961  * The following two functions are to support
4962  * PSARC/2007/453 MSI-X interrupt limit override.
4963  */
4964 static int
4965 nxge_create_msi_property(p_nxge_t nxgep)
4966 {
4967 	int	nmsi;
4968 	extern	int ncpus;
4969 
4970 	NXGE_DEBUG_MSG((nxgep, MOD_CTL, "==>nxge_create_msi_property"));
4971 
4972 	switch (nxgep->mac.portmode) {
4973 	case PORT_10G_COPPER:
4974 	case PORT_10G_FIBER:
4975 		(void) ddi_prop_create(DDI_DEV_T_NONE, nxgep->dip,
4976 		    DDI_PROP_CANSLEEP, "#msix-request", NULL, 0);
4977 		/*
4978 		 * The maximum MSI-X requested will be 8.
4979 		 * If the # of CPUs is less than 8, we will reqeust
4980 		 * # MSI-X based on the # of CPUs.
4981 		 */
4982 		if (ncpus >= NXGE_MSIX_REQUEST_10G) {
4983 			nmsi = NXGE_MSIX_REQUEST_10G;
4984 		} else {
4985 			nmsi = ncpus;
4986 		}
4987 		NXGE_DEBUG_MSG((nxgep, MOD_CTL,
4988 		    "==>nxge_create_msi_property(10G): exists 0x%x (nmsi %d)",
4989 		    ddi_prop_exists(DDI_DEV_T_NONE, nxgep->dip,
4990 		    DDI_PROP_CANSLEEP, "#msix-request"), nmsi));
4991 		break;
4992 
4993 	default:
4994 		nmsi = NXGE_MSIX_REQUEST_1G;
4995 		NXGE_DEBUG_MSG((nxgep, MOD_CTL,
4996 		    "==>nxge_create_msi_property(1G): exists 0x%x (nmsi %d)",
4997 		    ddi_prop_exists(DDI_DEV_T_NONE, nxgep->dip,
4998 		    DDI_PROP_CANSLEEP, "#msix-request"), nmsi));
4999 		break;
5000 	}
5001 
5002 	NXGE_DEBUG_MSG((nxgep, MOD_CTL, "<==nxge_create_msi_property"));
5003 	return (nmsi);
5004 }
5005