xref: /titanic_51/usr/src/uts/common/io/nxge/nxge_main.c (revision f500b19684bd0346ac05bec02a50af07f369da1a)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 /*
22  * Copyright 2008 Sun Microsystems, Inc.  All rights reserved.
23  * Use is subject to license terms.
24  */
25 
26 /*
27  * SunOs MT STREAMS NIU/Neptune 10Gb Ethernet Device Driver.
28  */
29 #include	<sys/nxge/nxge_impl.h>
30 #include	<sys/nxge/nxge_hio.h>
31 #include	<sys/nxge/nxge_rxdma.h>
32 #include	<sys/pcie.h>
33 
34 uint32_t 	nxge_use_partition = 0;		/* debug partition flag */
35 uint32_t 	nxge_dma_obp_props_only = 1;	/* use obp published props */
36 uint32_t 	nxge_use_rdc_intr = 1;		/* debug to assign rdc intr */
37 /*
38  * PSARC/2007/453 MSI-X interrupt limit override
39  * (This PSARC case is limited to MSI-X vectors
40  *  and SPARC platforms only).
41  */
42 #if defined(_BIG_ENDIAN)
43 uint32_t	nxge_msi_enable = 2;
44 #else
45 uint32_t	nxge_msi_enable = 1;
46 #endif
47 
48 /*
49  * Software workaround for a Neptune (PCI-E)
50  * hardware interrupt bug which the hardware
51  * may generate spurious interrupts after the
52  * device interrupt handler was removed. If this flag
53  * is enabled, the driver will reset the
54  * hardware when devices are being detached.
55  */
56 uint32_t	nxge_peu_reset_enable = 0;
57 
58 /*
59  * Software workaround for the hardware
60  * checksum bugs that affect packet transmission
61  * and receive:
62  *
63  * Usage of nxge_cksum_offload:
64  *
65  *  (1) nxge_cksum_offload = 0 (default):
66  *	- transmits packets:
67  *	  TCP: uses the hardware checksum feature.
68  *	  UDP: driver will compute the software checksum
69  *	       based on the partial checksum computed
70  *	       by the IP layer.
71  *	- receives packets
72  *	  TCP: marks packets checksum flags based on hardware result.
73  *	  UDP: will not mark checksum flags.
74  *
75  *  (2) nxge_cksum_offload = 1:
76  *	- transmit packets:
77  *	  TCP/UDP: uses the hardware checksum feature.
78  *	- receives packets
79  *	  TCP/UDP: marks packet checksum flags based on hardware result.
80  *
81  *  (3) nxge_cksum_offload = 2:
82  *	- The driver will not register its checksum capability.
83  *	  Checksum for both TCP and UDP will be computed
84  *	  by the stack.
85  *	- The software LSO is not allowed in this case.
86  *
87  *  (4) nxge_cksum_offload > 2:
88  *	- Will be treated as it is set to 2
89  *	  (stack will compute the checksum).
90  *
91  *  (5) If the hardware bug is fixed, this workaround
92  *	needs to be updated accordingly to reflect
93  *	the new hardware revision.
94  */
95 uint32_t	nxge_cksum_offload = 0;
96 
97 /*
98  * Globals: tunable parameters (/etc/system or adb)
99  *
100  */
101 uint32_t 	nxge_rbr_size = NXGE_RBR_RBB_DEFAULT;
102 uint32_t 	nxge_rbr_spare_size = 0;
103 uint32_t 	nxge_rcr_size = NXGE_RCR_DEFAULT;
104 uint32_t 	nxge_tx_ring_size = NXGE_TX_RING_DEFAULT;
105 boolean_t 	nxge_no_msg = B_TRUE;		/* control message display */
106 uint32_t 	nxge_no_link_notify = 0;	/* control DL_NOTIFY */
107 uint32_t 	nxge_bcopy_thresh = TX_BCOPY_MAX;
108 uint32_t 	nxge_dvma_thresh = TX_FASTDVMA_MIN;
109 uint32_t 	nxge_dma_stream_thresh = TX_STREAM_MIN;
110 uint32_t	nxge_jumbo_mtu	= TX_JUMBO_MTU;
111 boolean_t	nxge_jumbo_enable = B_FALSE;
112 uint16_t	nxge_rcr_timeout = NXGE_RDC_RCR_TIMEOUT;
113 uint16_t	nxge_rcr_threshold = NXGE_RDC_RCR_THRESHOLD;
114 nxge_tx_mode_t	nxge_tx_scheme = NXGE_USE_SERIAL;
115 
116 /* MAX LSO size */
117 #define		NXGE_LSO_MAXLEN	65535
118 uint32_t	nxge_lso_max = NXGE_LSO_MAXLEN;
119 
120 /*
121  * Debugging flags:
122  *		nxge_no_tx_lb : transmit load balancing
123  *		nxge_tx_lb_policy: 0 - TCP port (default)
124  *				   3 - DEST MAC
125  */
126 uint32_t 	nxge_no_tx_lb = 0;
127 uint32_t 	nxge_tx_lb_policy = NXGE_TX_LB_TCPUDP;
128 
129 /*
130  * Add tunable to reduce the amount of time spent in the
131  * ISR doing Rx Processing.
132  */
133 uint32_t nxge_max_rx_pkts = 1024;
134 
135 /*
136  * Tunables to manage the receive buffer blocks.
137  *
138  * nxge_rx_threshold_hi: copy all buffers.
139  * nxge_rx_bcopy_size_type: receive buffer block size type.
140  * nxge_rx_threshold_lo: copy only up to tunable block size type.
141  */
142 nxge_rxbuf_threshold_t nxge_rx_threshold_hi = NXGE_RX_COPY_6;
143 nxge_rxbuf_type_t nxge_rx_buf_size_type = RCR_PKTBUFSZ_0;
144 nxge_rxbuf_threshold_t nxge_rx_threshold_lo = NXGE_RX_COPY_3;
145 
146 /* Use kmem_alloc() to allocate data buffers. */
147 #if defined(_BIG_ENDIAN)
148 uint32_t	nxge_use_kmem_alloc = 1;
149 #else
150 uint32_t	nxge_use_kmem_alloc = 0;
151 #endif
152 
153 rtrace_t npi_rtracebuf;
154 
155 /*
156  * The hardware sometimes fails to allow enough time for the link partner
157  * to send an acknowledgement for packets that the hardware sent to it. The
158  * hardware resends the packets earlier than it should be in those instances.
159  * This behavior caused some switches to acknowledge the wrong packets
160  * and it triggered the fatal error.
161  * This software workaround is to set the replay timer to a value
162  * suggested by the hardware team.
163  *
164  * PCI config space replay timer register:
165  *     The following replay timeout value is 0xc
166  *     for bit 14:18.
167  */
168 #define	PCI_REPLAY_TIMEOUT_CFG_OFFSET	0xb8
169 #define	PCI_REPLAY_TIMEOUT_SHIFT	14
170 
171 uint32_t	nxge_set_replay_timer = 1;
172 uint32_t	nxge_replay_timeout = 0xc;
173 
174 /*
175  * The transmit serialization sometimes causes
176  * longer sleep before calling the driver transmit
177  * function as it sleeps longer than it should.
178  * The performace group suggests that a time wait tunable
179  * can be used to set the maximum wait time when needed
180  * and the default is set to 1 tick.
181  */
182 uint32_t	nxge_tx_serial_maxsleep = 1;
183 
184 #if	defined(sun4v)
185 /*
186  * Hypervisor N2/NIU services information.
187  */
188 static hsvc_info_t niu_hsvc = {
189 	HSVC_REV_1, NULL, HSVC_GROUP_NIU, NIU_MAJOR_VER,
190 	NIU_MINOR_VER, "nxge"
191 };
192 
193 static int nxge_hsvc_register(p_nxge_t);
194 #endif
195 
196 /*
197  * Function Prototypes
198  */
199 static int nxge_attach(dev_info_t *, ddi_attach_cmd_t);
200 static int nxge_detach(dev_info_t *, ddi_detach_cmd_t);
201 static void nxge_unattach(p_nxge_t);
202 static int nxge_quiesce(dev_info_t *);
203 
204 #if NXGE_PROPERTY
205 static void nxge_remove_hard_properties(p_nxge_t);
206 #endif
207 
208 /*
209  * These two functions are required by nxge_hio.c
210  */
211 extern int nxge_m_mmac_add(void *arg, mac_multi_addr_t *maddr);
212 extern int nxge_m_mmac_remove(void *arg, mac_addr_slot_t slot);
213 extern void nxge_grp_cleanup(p_nxge_t nxge);
214 
215 static nxge_status_t nxge_setup_system_dma_pages(p_nxge_t);
216 
217 static nxge_status_t nxge_setup_mutexes(p_nxge_t);
218 static void nxge_destroy_mutexes(p_nxge_t);
219 
220 static nxge_status_t nxge_map_regs(p_nxge_t nxgep);
221 static void nxge_unmap_regs(p_nxge_t nxgep);
222 #ifdef	NXGE_DEBUG
223 static void nxge_test_map_regs(p_nxge_t nxgep);
224 #endif
225 
226 static nxge_status_t nxge_add_intrs(p_nxge_t nxgep);
227 static nxge_status_t nxge_add_soft_intrs(p_nxge_t nxgep);
228 static void nxge_remove_intrs(p_nxge_t nxgep);
229 static void nxge_remove_soft_intrs(p_nxge_t nxgep);
230 
231 static nxge_status_t nxge_add_intrs_adv(p_nxge_t nxgep);
232 static nxge_status_t nxge_add_intrs_adv_type(p_nxge_t, uint32_t);
233 static nxge_status_t nxge_add_intrs_adv_type_fix(p_nxge_t, uint32_t);
234 static void nxge_intrs_enable(p_nxge_t nxgep);
235 static void nxge_intrs_disable(p_nxge_t nxgep);
236 
237 static void nxge_suspend(p_nxge_t);
238 static nxge_status_t nxge_resume(p_nxge_t);
239 
240 static nxge_status_t nxge_setup_dev(p_nxge_t);
241 static void nxge_destroy_dev(p_nxge_t);
242 
243 static nxge_status_t nxge_alloc_mem_pool(p_nxge_t);
244 static void nxge_free_mem_pool(p_nxge_t);
245 
246 nxge_status_t nxge_alloc_rx_mem_pool(p_nxge_t);
247 static void nxge_free_rx_mem_pool(p_nxge_t);
248 
249 nxge_status_t nxge_alloc_tx_mem_pool(p_nxge_t);
250 static void nxge_free_tx_mem_pool(p_nxge_t);
251 
252 static nxge_status_t nxge_dma_mem_alloc(p_nxge_t, dma_method_t,
253 	struct ddi_dma_attr *,
254 	size_t, ddi_device_acc_attr_t *, uint_t,
255 	p_nxge_dma_common_t);
256 
257 static void nxge_dma_mem_free(p_nxge_dma_common_t);
258 static void nxge_dma_free_rx_data_buf(p_nxge_dma_common_t);
259 
260 static nxge_status_t nxge_alloc_rx_buf_dma(p_nxge_t, uint16_t,
261 	p_nxge_dma_common_t *, size_t, size_t, uint32_t *);
262 static void nxge_free_rx_buf_dma(p_nxge_t, p_nxge_dma_common_t, uint32_t);
263 
264 static nxge_status_t nxge_alloc_rx_cntl_dma(p_nxge_t, uint16_t,
265 	p_nxge_dma_common_t *, size_t);
266 static void nxge_free_rx_cntl_dma(p_nxge_t, p_nxge_dma_common_t);
267 
268 extern nxge_status_t nxge_alloc_tx_buf_dma(p_nxge_t, uint16_t,
269 	p_nxge_dma_common_t *, size_t, size_t, uint32_t *);
270 static void nxge_free_tx_buf_dma(p_nxge_t, p_nxge_dma_common_t, uint32_t);
271 
272 extern nxge_status_t nxge_alloc_tx_cntl_dma(p_nxge_t, uint16_t,
273 	p_nxge_dma_common_t *,
274 	size_t);
275 static void nxge_free_tx_cntl_dma(p_nxge_t, p_nxge_dma_common_t);
276 
277 static int nxge_init_common_dev(p_nxge_t);
278 static void nxge_uninit_common_dev(p_nxge_t);
279 extern int nxge_param_set_mac(p_nxge_t, queue_t *, mblk_t *,
280     char *, caddr_t);
281 
282 /*
283  * The next declarations are for the GLDv3 interface.
284  */
285 static int nxge_m_start(void *);
286 static void nxge_m_stop(void *);
287 static int nxge_m_unicst(void *, const uint8_t *);
288 static int nxge_m_multicst(void *, boolean_t, const uint8_t *);
289 static int nxge_m_promisc(void *, boolean_t);
290 static void nxge_m_ioctl(void *, queue_t *, mblk_t *);
291 static void nxge_m_resources(void *);
292 mblk_t *nxge_m_tx(void *arg, mblk_t *);
293 static nxge_status_t nxge_mac_register(p_nxge_t);
294 static int nxge_altmac_set(p_nxge_t nxgep, uint8_t *mac_addr,
295 	mac_addr_slot_t slot);
296 void nxge_mmac_kstat_update(p_nxge_t nxgep, mac_addr_slot_t slot,
297 	boolean_t factory);
298 static int nxge_m_mmac_reserve(void *arg, mac_multi_addr_t *maddr);
299 static int nxge_m_mmac_modify(void *arg, mac_multi_addr_t *maddr);
300 static int nxge_m_mmac_get(void *arg, mac_multi_addr_t *maddr);
301 static	boolean_t nxge_m_getcapab(void *, mac_capab_t, void *);
302 static int nxge_m_setprop(void *, const char *, mac_prop_id_t,
303     uint_t, const void *);
304 static int nxge_m_getprop(void *, const char *, mac_prop_id_t,
305     uint_t, uint_t, void *);
306 static int nxge_set_priv_prop(nxge_t *, const char *, uint_t,
307     const void *);
308 static int nxge_get_priv_prop(nxge_t *, const char *, uint_t, uint_t,
309     void *);
310 static int nxge_get_def_val(nxge_t *, mac_prop_id_t, uint_t, void *);
311 
312 static void nxge_niu_peu_reset(p_nxge_t nxgep);
313 static void nxge_set_pci_replay_timeout(nxge_t *);
314 
315 mac_priv_prop_t nxge_priv_props[] = {
316 	{"_adv_10gfdx_cap", MAC_PROP_PERM_RW},
317 	{"_adv_pause_cap", MAC_PROP_PERM_RW},
318 	{"_function_number", MAC_PROP_PERM_READ},
319 	{"_fw_version", MAC_PROP_PERM_READ},
320 	{"_port_mode", MAC_PROP_PERM_READ},
321 	{"_hot_swap_phy", MAC_PROP_PERM_READ},
322 	{"_accept_jumbo", MAC_PROP_PERM_RW},
323 	{"_rxdma_intr_time", MAC_PROP_PERM_RW},
324 	{"_rxdma_intr_pkts", MAC_PROP_PERM_RW},
325 	{"_class_opt_ipv4_tcp", MAC_PROP_PERM_RW},
326 	{"_class_opt_ipv4_udp", MAC_PROP_PERM_RW},
327 	{"_class_opt_ipv4_ah", MAC_PROP_PERM_RW},
328 	{"_class_opt_ipv4_sctp", MAC_PROP_PERM_RW},
329 	{"_class_opt_ipv6_tcp", MAC_PROP_PERM_RW},
330 	{"_class_opt_ipv6_udp", MAC_PROP_PERM_RW},
331 	{"_class_opt_ipv6_ah", MAC_PROP_PERM_RW},
332 	{"_class_opt_ipv6_sctp", MAC_PROP_PERM_RW},
333 	{"_soft_lso_enable", MAC_PROP_PERM_RW}
334 };
335 
336 #define	NXGE_MAX_PRIV_PROPS	\
337 	(sizeof (nxge_priv_props)/sizeof (mac_priv_prop_t))
338 
339 #define	NXGE_M_CALLBACK_FLAGS\
340 	(MC_RESOURCES | MC_IOCTL | MC_GETCAPAB | MC_SETPROP | MC_GETPROP)
341 
342 
343 #define	NXGE_NEPTUNE_MAGIC	0x4E584745UL
344 #define	MAX_DUMP_SZ 256
345 
346 #define	NXGE_M_CALLBACK_FLAGS	\
347 	(MC_RESOURCES | MC_IOCTL | MC_GETCAPAB | MC_SETPROP | MC_GETPROP)
348 
349 mac_callbacks_t nxge_m_callbacks = {
350 	NXGE_M_CALLBACK_FLAGS,
351 	nxge_m_stat,
352 	nxge_m_start,
353 	nxge_m_stop,
354 	nxge_m_promisc,
355 	nxge_m_multicst,
356 	nxge_m_unicst,
357 	nxge_m_tx,
358 	nxge_m_resources,
359 	nxge_m_ioctl,
360 	nxge_m_getcapab,
361 	NULL,
362 	NULL,
363 	nxge_m_setprop,
364 	nxge_m_getprop
365 };
366 
367 void
368 nxge_err_inject(p_nxge_t, queue_t *, mblk_t *);
369 
370 /* PSARC/2007/453 MSI-X interrupt limit override. */
371 #define	NXGE_MSIX_REQUEST_10G	8
372 #define	NXGE_MSIX_REQUEST_1G	2
373 static int nxge_create_msi_property(p_nxge_t);
374 
375 /*
376  * These global variables control the message
377  * output.
378  */
379 out_dbgmsg_t nxge_dbgmsg_out = DBG_CONSOLE | STR_LOG;
380 uint64_t nxge_debug_level;
381 
382 /*
383  * This list contains the instance structures for the Neptune
384  * devices present in the system. The lock exists to guarantee
385  * mutually exclusive access to the list.
386  */
387 void 			*nxge_list = NULL;
388 
389 void			*nxge_hw_list = NULL;
390 nxge_os_mutex_t 	nxge_common_lock;
391 
392 extern uint64_t 	npi_debug_level;
393 
394 extern nxge_status_t	nxge_ldgv_init(p_nxge_t, int *, int *);
395 extern nxge_status_t	nxge_ldgv_init_n2(p_nxge_t, int *, int *);
396 extern nxge_status_t	nxge_ldgv_uninit(p_nxge_t);
397 extern nxge_status_t	nxge_intr_ldgv_init(p_nxge_t);
398 extern void		nxge_fm_init(p_nxge_t,
399 					ddi_device_acc_attr_t *,
400 					ddi_device_acc_attr_t *,
401 					ddi_dma_attr_t *);
402 extern void		nxge_fm_fini(p_nxge_t);
403 extern npi_status_t	npi_mac_altaddr_disable(npi_handle_t, uint8_t, uint8_t);
404 
405 /*
406  * Count used to maintain the number of buffers being used
407  * by Neptune instances and loaned up to the upper layers.
408  */
409 uint32_t nxge_mblks_pending = 0;
410 
411 /*
412  * Device register access attributes for PIO.
413  */
414 static ddi_device_acc_attr_t nxge_dev_reg_acc_attr = {
415 	DDI_DEVICE_ATTR_V0,
416 	DDI_STRUCTURE_LE_ACC,
417 	DDI_STRICTORDER_ACC,
418 };
419 
420 /*
421  * Device descriptor access attributes for DMA.
422  */
423 static ddi_device_acc_attr_t nxge_dev_desc_dma_acc_attr = {
424 	DDI_DEVICE_ATTR_V0,
425 	DDI_STRUCTURE_LE_ACC,
426 	DDI_STRICTORDER_ACC
427 };
428 
429 /*
430  * Device buffer access attributes for DMA.
431  */
432 static ddi_device_acc_attr_t nxge_dev_buf_dma_acc_attr = {
433 	DDI_DEVICE_ATTR_V0,
434 	DDI_STRUCTURE_BE_ACC,
435 	DDI_STRICTORDER_ACC
436 };
437 
438 ddi_dma_attr_t nxge_desc_dma_attr = {
439 	DMA_ATTR_V0,		/* version number. */
440 	0,			/* low address */
441 	0xffffffffffffffff,	/* high address */
442 	0xffffffffffffffff,	/* address counter max */
443 #ifndef NIU_PA_WORKAROUND
444 	0x100000,		/* alignment */
445 #else
446 	0x2000,
447 #endif
448 	0xfc00fc,		/* dlim_burstsizes */
449 	0x1,			/* minimum transfer size */
450 	0xffffffffffffffff,	/* maximum transfer size */
451 	0xffffffffffffffff,	/* maximum segment size */
452 	1,			/* scatter/gather list length */
453 	(unsigned int) 1,	/* granularity */
454 	0			/* attribute flags */
455 };
456 
457 ddi_dma_attr_t nxge_tx_dma_attr = {
458 	DMA_ATTR_V0,		/* version number. */
459 	0,			/* low address */
460 	0xffffffffffffffff,	/* high address */
461 	0xffffffffffffffff,	/* address counter max */
462 #if defined(_BIG_ENDIAN)
463 	0x2000,			/* alignment */
464 #else
465 	0x1000,			/* alignment */
466 #endif
467 	0xfc00fc,		/* dlim_burstsizes */
468 	0x1,			/* minimum transfer size */
469 	0xffffffffffffffff,	/* maximum transfer size */
470 	0xffffffffffffffff,	/* maximum segment size */
471 	5,			/* scatter/gather list length */
472 	(unsigned int) 1,	/* granularity */
473 	0			/* attribute flags */
474 };
475 
476 ddi_dma_attr_t nxge_rx_dma_attr = {
477 	DMA_ATTR_V0,		/* version number. */
478 	0,			/* low address */
479 	0xffffffffffffffff,	/* high address */
480 	0xffffffffffffffff,	/* address counter max */
481 	0x2000,			/* alignment */
482 	0xfc00fc,		/* dlim_burstsizes */
483 	0x1,			/* minimum transfer size */
484 	0xffffffffffffffff,	/* maximum transfer size */
485 	0xffffffffffffffff,	/* maximum segment size */
486 	1,			/* scatter/gather list length */
487 	(unsigned int) 1,	/* granularity */
488 	DDI_DMA_RELAXED_ORDERING /* attribute flags */
489 };
490 
491 ddi_dma_lim_t nxge_dma_limits = {
492 	(uint_t)0,		/* dlim_addr_lo */
493 	(uint_t)0xffffffff,	/* dlim_addr_hi */
494 	(uint_t)0xffffffff,	/* dlim_cntr_max */
495 	(uint_t)0xfc00fc,	/* dlim_burstsizes for 32 and 64 bit xfers */
496 	0x1,			/* dlim_minxfer */
497 	1024			/* dlim_speed */
498 };
499 
500 dma_method_t nxge_force_dma = DVMA;
501 
502 /*
503  * dma chunk sizes.
504  *
505  * Try to allocate the largest possible size
506  * so that fewer number of dma chunks would be managed
507  */
508 #ifdef NIU_PA_WORKAROUND
509 size_t alloc_sizes [] = {0x2000};
510 #else
511 size_t alloc_sizes [] = {0x1000, 0x2000, 0x4000, 0x8000,
512 		0x10000, 0x20000, 0x40000, 0x80000,
513 		0x100000, 0x200000, 0x400000, 0x800000,
514 		0x1000000, 0x2000000, 0x4000000};
515 #endif
516 
517 /*
518  * Translate "dev_t" to a pointer to the associated "dev_info_t".
519  */
520 
521 extern void nxge_get_environs(nxge_t *);
522 
523 static int
524 nxge_attach(dev_info_t *dip, ddi_attach_cmd_t cmd)
525 {
526 	p_nxge_t	nxgep = NULL;
527 	int		instance;
528 	int		status = DDI_SUCCESS;
529 	uint8_t		portn;
530 	nxge_mmac_t	*mmac_info;
531 	p_nxge_param_t	param_arr;
532 
533 	NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_attach"));
534 
535 	/*
536 	 * Get the device instance since we'll need to setup
537 	 * or retrieve a soft state for this instance.
538 	 */
539 	instance = ddi_get_instance(dip);
540 
541 	switch (cmd) {
542 	case DDI_ATTACH:
543 		NXGE_DEBUG_MSG((nxgep, DDI_CTL, "doing DDI_ATTACH"));
544 		break;
545 
546 	case DDI_RESUME:
547 		NXGE_DEBUG_MSG((nxgep, DDI_CTL, "doing DDI_RESUME"));
548 		nxgep = (p_nxge_t)ddi_get_soft_state(nxge_list, instance);
549 		if (nxgep == NULL) {
550 			status = DDI_FAILURE;
551 			break;
552 		}
553 		if (nxgep->dip != dip) {
554 			status = DDI_FAILURE;
555 			break;
556 		}
557 		if (nxgep->suspended == DDI_PM_SUSPEND) {
558 			status = ddi_dev_is_needed(nxgep->dip, 0, 1);
559 		} else {
560 			status = nxge_resume(nxgep);
561 		}
562 		goto nxge_attach_exit;
563 
564 	case DDI_PM_RESUME:
565 		NXGE_DEBUG_MSG((nxgep, DDI_CTL, "doing DDI_PM_RESUME"));
566 		nxgep = (p_nxge_t)ddi_get_soft_state(nxge_list, instance);
567 		if (nxgep == NULL) {
568 			status = DDI_FAILURE;
569 			break;
570 		}
571 		if (nxgep->dip != dip) {
572 			status = DDI_FAILURE;
573 			break;
574 		}
575 		status = nxge_resume(nxgep);
576 		goto nxge_attach_exit;
577 
578 	default:
579 		NXGE_DEBUG_MSG((nxgep, DDI_CTL, "doing unknown"));
580 		status = DDI_FAILURE;
581 		goto nxge_attach_exit;
582 	}
583 
584 
585 	if (ddi_soft_state_zalloc(nxge_list, instance) == DDI_FAILURE) {
586 		status = DDI_FAILURE;
587 		goto nxge_attach_exit;
588 	}
589 
590 	nxgep = ddi_get_soft_state(nxge_list, instance);
591 	if (nxgep == NULL) {
592 		status = NXGE_ERROR;
593 		goto nxge_attach_fail2;
594 	}
595 
596 	nxgep->nxge_magic = NXGE_MAGIC;
597 
598 	nxgep->drv_state = 0;
599 	nxgep->dip = dip;
600 	nxgep->instance = instance;
601 	nxgep->p_dip = ddi_get_parent(dip);
602 	nxgep->nxge_debug_level = nxge_debug_level;
603 	npi_debug_level = nxge_debug_level;
604 
605 	/* Are we a guest running in a Hybrid I/O environment? */
606 	nxge_get_environs(nxgep);
607 
608 	status = nxge_map_regs(nxgep);
609 
610 	if (status != NXGE_OK) {
611 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "nxge_map_regs failed"));
612 		goto nxge_attach_fail3;
613 	}
614 
615 	nxge_fm_init(nxgep, &nxge_dev_reg_acc_attr,
616 	    &nxge_dev_desc_dma_acc_attr,
617 	    &nxge_rx_dma_attr);
618 
619 	/* Create & initialize the per-Neptune data structure */
620 	/* (even if we're a guest). */
621 	status = nxge_init_common_dev(nxgep);
622 	if (status != NXGE_OK) {
623 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
624 		    "nxge_init_common_dev failed"));
625 		goto nxge_attach_fail4;
626 	}
627 
628 	/*
629 	 * Software workaround: set the replay timer.
630 	 */
631 	if (nxgep->niu_type != N2_NIU) {
632 		nxge_set_pci_replay_timeout(nxgep);
633 	}
634 
635 #if defined(sun4v)
636 	/* This is required by nxge_hio_init(), which follows. */
637 	if ((status = nxge_hsvc_register(nxgep)) != DDI_SUCCESS)
638 		goto nxge_attach_fail4;
639 #endif
640 
641 	if ((status = nxge_hio_init(nxgep)) != NXGE_OK) {
642 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
643 		    "nxge_hio_init failed"));
644 		goto nxge_attach_fail4;
645 	}
646 
647 	if (nxgep->niu_type == NEPTUNE_2_10GF) {
648 		if (nxgep->function_num > 1) {
649 			NXGE_DEBUG_MSG((nxgep, DDI_CTL, "Unsupported"
650 			    " function %d. Only functions 0 and 1 are "
651 			    "supported for this card.", nxgep->function_num));
652 			status = NXGE_ERROR;
653 			goto nxge_attach_fail4;
654 		}
655 	}
656 
657 	if (isLDOMguest(nxgep)) {
658 		/*
659 		 * Use the function number here.
660 		 */
661 		nxgep->mac.portnum = nxgep->function_num;
662 		nxgep->mac.porttype = PORT_TYPE_LOGICAL;
663 
664 		/* XXX We'll set the MAC address counts to 1 for now. */
665 		mmac_info = &nxgep->nxge_mmac_info;
666 		mmac_info->num_mmac = 1;
667 		mmac_info->naddrfree = 1;
668 	} else {
669 		portn = NXGE_GET_PORT_NUM(nxgep->function_num);
670 		nxgep->mac.portnum = portn;
671 		if ((portn == 0) || (portn == 1))
672 			nxgep->mac.porttype = PORT_TYPE_XMAC;
673 		else
674 			nxgep->mac.porttype = PORT_TYPE_BMAC;
675 		/*
676 		 * Neptune has 4 ports, the first 2 ports use XMAC (10G MAC)
677 		 * internally, the rest 2 ports use BMAC (1G "Big" MAC).
678 		 * The two types of MACs have different characterizations.
679 		 */
680 		mmac_info = &nxgep->nxge_mmac_info;
681 		if (nxgep->function_num < 2) {
682 			mmac_info->num_mmac = XMAC_MAX_ALT_ADDR_ENTRY;
683 			mmac_info->naddrfree = XMAC_MAX_ALT_ADDR_ENTRY;
684 		} else {
685 			mmac_info->num_mmac = BMAC_MAX_ALT_ADDR_ENTRY;
686 			mmac_info->naddrfree = BMAC_MAX_ALT_ADDR_ENTRY;
687 		}
688 	}
689 	/*
690 	 * Setup the Ndd parameters for the this instance.
691 	 */
692 	nxge_init_param(nxgep);
693 
694 	/*
695 	 * Setup Register Tracing Buffer.
696 	 */
697 	npi_rtrace_buf_init((rtrace_t *)&npi_rtracebuf);
698 
699 	/* init stats ptr */
700 	nxge_init_statsp(nxgep);
701 
702 	/*
703 	 * Copy the vpd info from eeprom to a local data
704 	 * structure, and then check its validity.
705 	 */
706 	if (!isLDOMguest(nxgep)) {
707 		int *regp;
708 		uint_t reglen;
709 		int rv;
710 
711 		nxge_vpd_info_get(nxgep);
712 
713 		/* Find the NIU config handle. */
714 		rv = ddi_prop_lookup_int_array(DDI_DEV_T_ANY,
715 		    ddi_get_parent(nxgep->dip), DDI_PROP_DONTPASS,
716 		    "reg", &regp, &reglen);
717 
718 		if (rv != DDI_PROP_SUCCESS) {
719 			goto nxge_attach_fail5;
720 		}
721 		/*
722 		 * The address_hi, that is the first int, in the reg
723 		 * property consists of config handle, but need to remove
724 		 * the bits 28-31 which are OBP specific info.
725 		 */
726 		nxgep->niu_cfg_hdl = (*regp) & 0xFFFFFFF;
727 		ddi_prop_free(regp);
728 	}
729 
730 	if (isLDOMguest(nxgep)) {
731 		uchar_t *prop_val;
732 		uint_t prop_len;
733 		uint32_t max_frame_size;
734 
735 		extern void nxge_get_logical_props(p_nxge_t);
736 
737 		nxgep->statsp->mac_stats.xcvr_inuse = LOGICAL_XCVR;
738 		nxgep->mac.portmode = PORT_LOGICAL;
739 		(void) ddi_prop_update_string(DDI_DEV_T_NONE, nxgep->dip,
740 		    "phy-type", "virtual transceiver");
741 
742 		nxgep->nports = 1;
743 		nxgep->board_ver = 0;	/* XXX What? */
744 
745 		/*
746 		 * local-mac-address property gives us info on which
747 		 * specific MAC address the Hybrid resource is associated
748 		 * with.
749 		 */
750 		if (ddi_prop_lookup_byte_array(DDI_DEV_T_ANY, nxgep->dip, 0,
751 		    "local-mac-address", &prop_val,
752 		    &prop_len) != DDI_PROP_SUCCESS) {
753 			goto nxge_attach_fail5;
754 		}
755 		if (prop_len !=  ETHERADDRL) {
756 			ddi_prop_free(prop_val);
757 			goto nxge_attach_fail5;
758 		}
759 		ether_copy(prop_val, nxgep->hio_mac_addr);
760 		ddi_prop_free(prop_val);
761 		nxge_get_logical_props(nxgep);
762 
763 		/*
764 		 * Enable Jumbo property based on the "max-frame-size"
765 		 * property value.
766 		 */
767 		max_frame_size = ddi_prop_get_int(DDI_DEV_T_ANY,
768 		    nxgep->dip, DDI_PROP_DONTPASS | DDI_PROP_NOTPROM,
769 		    "max-frame-size", NXGE_MTU_DEFAULT_MAX);
770 		if ((max_frame_size > NXGE_MTU_DEFAULT_MAX) &&
771 		    (max_frame_size <= TX_JUMBO_MTU)) {
772 			param_arr = nxgep->param_arr;
773 
774 			param_arr[param_accept_jumbo].value = 1;
775 			nxgep->mac.is_jumbo = B_TRUE;
776 			nxgep->mac.maxframesize = (uint16_t)max_frame_size;
777 			nxgep->mac.default_mtu = nxgep->mac.maxframesize -
778 			    NXGE_EHEADER_VLAN_CRC;
779 		}
780 	} else {
781 		status = nxge_xcvr_find(nxgep);
782 
783 		if (status != NXGE_OK) {
784 			NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "nxge_attach: "
785 			    " Couldn't determine card type"
786 			    " .... exit "));
787 			goto nxge_attach_fail5;
788 		}
789 
790 		status = nxge_get_config_properties(nxgep);
791 
792 		if (status != NXGE_OK) {
793 			NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
794 			    "get_hw create failed"));
795 			goto nxge_attach_fail;
796 		}
797 	}
798 
799 	/*
800 	 * Setup the Kstats for the driver.
801 	 */
802 	nxge_setup_kstats(nxgep);
803 
804 	if (!isLDOMguest(nxgep))
805 		nxge_setup_param(nxgep);
806 
807 	status = nxge_setup_system_dma_pages(nxgep);
808 	if (status != NXGE_OK) {
809 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "set dma page failed"));
810 		goto nxge_attach_fail;
811 	}
812 
813 	nxge_hw_id_init(nxgep);
814 
815 	if (!isLDOMguest(nxgep))
816 		nxge_hw_init_niu_common(nxgep);
817 
818 	status = nxge_setup_mutexes(nxgep);
819 	if (status != NXGE_OK) {
820 		NXGE_DEBUG_MSG((nxgep, DDI_CTL, "set mutex failed"));
821 		goto nxge_attach_fail;
822 	}
823 
824 #if defined(sun4v)
825 	if (isLDOMguest(nxgep)) {
826 		/* Find our VR & channel sets. */
827 		status = nxge_hio_vr_add(nxgep);
828 		goto nxge_attach_exit;
829 	}
830 #endif
831 
832 	status = nxge_setup_dev(nxgep);
833 	if (status != DDI_SUCCESS) {
834 		NXGE_DEBUG_MSG((nxgep, DDI_CTL, "set dev failed"));
835 		goto nxge_attach_fail;
836 	}
837 
838 	status = nxge_add_intrs(nxgep);
839 	if (status != DDI_SUCCESS) {
840 		NXGE_DEBUG_MSG((nxgep, DDI_CTL, "add_intr failed"));
841 		goto nxge_attach_fail;
842 	}
843 	status = nxge_add_soft_intrs(nxgep);
844 	if (status != DDI_SUCCESS) {
845 		NXGE_DEBUG_MSG((nxgep, NXGE_ERR_CTL,
846 		    "add_soft_intr failed"));
847 		goto nxge_attach_fail;
848 	}
849 
850 	/* If a guest, register with vio_net instead. */
851 	if ((status = nxge_mac_register(nxgep)) != NXGE_OK) {
852 		NXGE_DEBUG_MSG((nxgep, DDI_CTL,
853 		    "unable to register to mac layer (%d)", status));
854 		goto nxge_attach_fail;
855 	}
856 
857 	mac_link_update(nxgep->mach, LINK_STATE_UNKNOWN);
858 
859 	NXGE_DEBUG_MSG((nxgep, DDI_CTL,
860 	    "registered to mac (instance %d)", instance));
861 
862 	/* nxge_link_monitor calls xcvr.check_link recursively */
863 	(void) nxge_link_monitor(nxgep, LINK_MONITOR_START);
864 
865 	goto nxge_attach_exit;
866 
867 nxge_attach_fail:
868 	nxge_unattach(nxgep);
869 	goto nxge_attach_fail1;
870 
871 nxge_attach_fail5:
872 	/*
873 	 * Tear down the ndd parameters setup.
874 	 */
875 	nxge_destroy_param(nxgep);
876 
877 	/*
878 	 * Tear down the kstat setup.
879 	 */
880 	nxge_destroy_kstats(nxgep);
881 
882 nxge_attach_fail4:
883 	if (nxgep->nxge_hw_p) {
884 		nxge_uninit_common_dev(nxgep);
885 		nxgep->nxge_hw_p = NULL;
886 	}
887 
888 nxge_attach_fail3:
889 	/*
890 	 * Unmap the register setup.
891 	 */
892 	nxge_unmap_regs(nxgep);
893 
894 	nxge_fm_fini(nxgep);
895 
896 nxge_attach_fail2:
897 	ddi_soft_state_free(nxge_list, nxgep->instance);
898 
899 nxge_attach_fail1:
900 	if (status != NXGE_OK)
901 		status = (NXGE_ERROR | NXGE_DDI_FAILED);
902 	nxgep = NULL;
903 
904 nxge_attach_exit:
905 	NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_attach status = 0x%08x",
906 	    status));
907 
908 	return (status);
909 }
910 
911 static int
912 nxge_detach(dev_info_t *dip, ddi_detach_cmd_t cmd)
913 {
914 	int 		status = DDI_SUCCESS;
915 	int 		instance;
916 	p_nxge_t 	nxgep = NULL;
917 
918 	NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_detach"));
919 	instance = ddi_get_instance(dip);
920 	nxgep = ddi_get_soft_state(nxge_list, instance);
921 	if (nxgep == NULL) {
922 		status = DDI_FAILURE;
923 		goto nxge_detach_exit;
924 	}
925 
926 	switch (cmd) {
927 	case DDI_DETACH:
928 		NXGE_DEBUG_MSG((nxgep, DDI_CTL, "doing DDI_DETACH"));
929 		break;
930 
931 	case DDI_PM_SUSPEND:
932 		NXGE_DEBUG_MSG((nxgep, DDI_CTL, "doing DDI_PM_SUSPEND"));
933 		nxgep->suspended = DDI_PM_SUSPEND;
934 		nxge_suspend(nxgep);
935 		break;
936 
937 	case DDI_SUSPEND:
938 		NXGE_DEBUG_MSG((nxgep, DDI_CTL, "doing DDI_SUSPEND"));
939 		if (nxgep->suspended != DDI_PM_SUSPEND) {
940 			nxgep->suspended = DDI_SUSPEND;
941 			nxge_suspend(nxgep);
942 		}
943 		break;
944 
945 	default:
946 		status = DDI_FAILURE;
947 	}
948 
949 	if (cmd != DDI_DETACH)
950 		goto nxge_detach_exit;
951 
952 	/*
953 	 * Stop the xcvr polling.
954 	 */
955 	nxgep->suspended = cmd;
956 
957 	(void) nxge_link_monitor(nxgep, LINK_MONITOR_STOP);
958 
959 	if (isLDOMguest(nxgep)) {
960 		if (nxgep->nxge_mac_state == NXGE_MAC_STARTED)
961 			nxge_m_stop((void *)nxgep);
962 		nxge_hio_unregister(nxgep);
963 	} else if (nxgep->mach && (status = mac_unregister(nxgep->mach)) != 0) {
964 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
965 		    "<== nxge_detach status = 0x%08X", status));
966 		return (DDI_FAILURE);
967 	}
968 
969 	NXGE_DEBUG_MSG((nxgep, DDI_CTL,
970 	    "<== nxge_detach (mac_unregister) status = 0x%08X", status));
971 
972 	nxge_unattach(nxgep);
973 	nxgep = NULL;
974 
975 nxge_detach_exit:
976 	NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_detach status = 0x%08X",
977 	    status));
978 
979 	return (status);
980 }
981 
982 static void
983 nxge_unattach(p_nxge_t nxgep)
984 {
985 	NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_unattach"));
986 
987 	if (nxgep == NULL || nxgep->dev_regs == NULL) {
988 		return;
989 	}
990 
991 	nxgep->nxge_magic = 0;
992 
993 	if (nxgep->nxge_timerid) {
994 		nxge_stop_timer(nxgep, nxgep->nxge_timerid);
995 		nxgep->nxge_timerid = 0;
996 	}
997 
998 	/*
999 	 * If this flag is set, it will affect the Neptune
1000 	 * only.
1001 	 */
1002 	if ((nxgep->niu_type != N2_NIU) && nxge_peu_reset_enable) {
1003 		nxge_niu_peu_reset(nxgep);
1004 	}
1005 
1006 #if	defined(sun4v)
1007 	if (isLDOMguest(nxgep)) {
1008 		(void) nxge_hio_vr_release(nxgep);
1009 	}
1010 #endif
1011 
1012 	if (nxgep->nxge_hw_p) {
1013 		nxge_uninit_common_dev(nxgep);
1014 		nxgep->nxge_hw_p = NULL;
1015 	}
1016 
1017 #if	defined(sun4v)
1018 	if (nxgep->niu_type == N2_NIU && nxgep->niu_hsvc_available == B_TRUE) {
1019 		(void) hsvc_unregister(&nxgep->niu_hsvc);
1020 		nxgep->niu_hsvc_available = B_FALSE;
1021 	}
1022 #endif
1023 	/*
1024 	 * Stop any further interrupts.
1025 	 */
1026 	nxge_remove_intrs(nxgep);
1027 
1028 	/* remove soft interrups */
1029 	nxge_remove_soft_intrs(nxgep);
1030 
1031 	/*
1032 	 * Stop the device and free resources.
1033 	 */
1034 	if (!isLDOMguest(nxgep)) {
1035 		nxge_destroy_dev(nxgep);
1036 	}
1037 
1038 	/*
1039 	 * Tear down the ndd parameters setup.
1040 	 */
1041 	nxge_destroy_param(nxgep);
1042 
1043 	/*
1044 	 * Tear down the kstat setup.
1045 	 */
1046 	nxge_destroy_kstats(nxgep);
1047 
1048 	/*
1049 	 * Destroy all mutexes.
1050 	 */
1051 	nxge_destroy_mutexes(nxgep);
1052 
1053 	/*
1054 	 * Remove the list of ndd parameters which
1055 	 * were setup during attach.
1056 	 */
1057 	if (nxgep->dip) {
1058 		NXGE_DEBUG_MSG((nxgep, OBP_CTL,
1059 		    " nxge_unattach: remove all properties"));
1060 
1061 		(void) ddi_prop_remove_all(nxgep->dip);
1062 	}
1063 
1064 #if NXGE_PROPERTY
1065 	nxge_remove_hard_properties(nxgep);
1066 #endif
1067 
1068 	/*
1069 	 * Unmap the register setup.
1070 	 */
1071 	nxge_unmap_regs(nxgep);
1072 
1073 	nxge_fm_fini(nxgep);
1074 
1075 	ddi_soft_state_free(nxge_list, nxgep->instance);
1076 
1077 	NXGE_DEBUG_MSG((NULL, DDI_CTL, "<== nxge_unattach"));
1078 }
1079 
1080 #if defined(sun4v)
1081 int
1082 nxge_hsvc_register(nxge_t *nxgep)
1083 {
1084 	nxge_status_t status;
1085 
1086 	if (nxgep->niu_type == N2_NIU) {
1087 		nxgep->niu_hsvc_available = B_FALSE;
1088 		bcopy(&niu_hsvc, &nxgep->niu_hsvc, sizeof (hsvc_info_t));
1089 		if ((status = hsvc_register(&nxgep->niu_hsvc,
1090 		    &nxgep->niu_min_ver)) != 0) {
1091 			NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
1092 			    "nxge_attach: %s: cannot negotiate "
1093 			    "hypervisor services revision %d group: 0x%lx "
1094 			    "major: 0x%lx minor: 0x%lx errno: %d",
1095 			    niu_hsvc.hsvc_modname, niu_hsvc.hsvc_rev,
1096 			    niu_hsvc.hsvc_group, niu_hsvc.hsvc_major,
1097 			    niu_hsvc.hsvc_minor, status));
1098 			return (DDI_FAILURE);
1099 		}
1100 		nxgep->niu_hsvc_available = B_TRUE;
1101 		NXGE_DEBUG_MSG((nxgep, DDI_CTL,
1102 		    "NIU Hypervisor service enabled"));
1103 	}
1104 
1105 	return (DDI_SUCCESS);
1106 }
1107 #endif
1108 
1109 static char n2_siu_name[] = "niu";
1110 
1111 static nxge_status_t
1112 nxge_map_regs(p_nxge_t nxgep)
1113 {
1114 	int		ddi_status = DDI_SUCCESS;
1115 	p_dev_regs_t 	dev_regs;
1116 	char		buf[MAXPATHLEN + 1];
1117 	char 		*devname;
1118 #ifdef	NXGE_DEBUG
1119 	char 		*sysname;
1120 #endif
1121 	off_t		regsize;
1122 	nxge_status_t	status = NXGE_OK;
1123 #if !defined(_BIG_ENDIAN)
1124 	off_t pci_offset;
1125 	uint16_t pcie_devctl;
1126 #endif
1127 
1128 	if (isLDOMguest(nxgep)) {
1129 		return (nxge_guest_regs_map(nxgep));
1130 	}
1131 
1132 	NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_map_regs"));
1133 	nxgep->dev_regs = NULL;
1134 	dev_regs = KMEM_ZALLOC(sizeof (dev_regs_t), KM_SLEEP);
1135 	dev_regs->nxge_regh = NULL;
1136 	dev_regs->nxge_pciregh = NULL;
1137 	dev_regs->nxge_msix_regh = NULL;
1138 	dev_regs->nxge_vir_regh = NULL;
1139 	dev_regs->nxge_vir2_regh = NULL;
1140 	nxgep->niu_type = NIU_TYPE_NONE;
1141 
1142 	devname = ddi_pathname(nxgep->dip, buf);
1143 	ASSERT(strlen(devname) > 0);
1144 	NXGE_DEBUG_MSG((nxgep, DDI_CTL,
1145 	    "nxge_map_regs: pathname devname %s", devname));
1146 
1147 	/*
1148 	 * The driver is running on a N2-NIU system if devname is something
1149 	 * like "/niu@80/network@0"
1150 	 */
1151 	if (strstr(devname, n2_siu_name)) {
1152 		/* N2/NIU */
1153 		nxgep->niu_type = N2_NIU;
1154 		NXGE_DEBUG_MSG((nxgep, DDI_CTL,
1155 		    "nxge_map_regs: N2/NIU devname %s", devname));
1156 		/* get function number */
1157 		nxgep->function_num =
1158 		    (devname[strlen(devname) -1] == '1' ? 1 : 0);
1159 		NXGE_DEBUG_MSG((nxgep, DDI_CTL,
1160 		    "nxge_map_regs: N2/NIU function number %d",
1161 		    nxgep->function_num));
1162 	} else {
1163 		int		*prop_val;
1164 		uint_t 		prop_len;
1165 		uint8_t 	func_num;
1166 
1167 		if (ddi_prop_lookup_int_array(DDI_DEV_T_ANY, nxgep->dip,
1168 		    0, "reg",
1169 		    &prop_val, &prop_len) != DDI_PROP_SUCCESS) {
1170 			NXGE_DEBUG_MSG((nxgep, VPD_CTL,
1171 			    "Reg property not found"));
1172 			ddi_status = DDI_FAILURE;
1173 			goto nxge_map_regs_fail0;
1174 
1175 		} else {
1176 			func_num = (prop_val[0] >> 8) & 0x7;
1177 			NXGE_DEBUG_MSG((nxgep, DDI_CTL,
1178 			    "Reg property found: fun # %d",
1179 			    func_num));
1180 			nxgep->function_num = func_num;
1181 			if (isLDOMguest(nxgep)) {
1182 				nxgep->function_num /= 2;
1183 				return (NXGE_OK);
1184 			}
1185 			ddi_prop_free(prop_val);
1186 		}
1187 	}
1188 
1189 	switch (nxgep->niu_type) {
1190 	default:
1191 		(void) ddi_dev_regsize(nxgep->dip, 0, &regsize);
1192 		NXGE_DEBUG_MSG((nxgep, DDI_CTL,
1193 		    "nxge_map_regs: pci config size 0x%x", regsize));
1194 
1195 		ddi_status = ddi_regs_map_setup(nxgep->dip, 0,
1196 		    (caddr_t *)&(dev_regs->nxge_pciregp), 0, 0,
1197 		    &nxge_dev_reg_acc_attr, &dev_regs->nxge_pciregh);
1198 		if (ddi_status != DDI_SUCCESS) {
1199 			NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
1200 			    "ddi_map_regs, nxge bus config regs failed"));
1201 			goto nxge_map_regs_fail0;
1202 		}
1203 		NXGE_DEBUG_MSG((nxgep, DDI_CTL,
1204 		    "nxge_map_reg: PCI config addr 0x%0llx "
1205 		    " handle 0x%0llx", dev_regs->nxge_pciregp,
1206 		    dev_regs->nxge_pciregh));
1207 			/*
1208 			 * IMP IMP
1209 			 * workaround  for bit swapping bug in HW
1210 			 * which ends up in no-snoop = yes
1211 			 * resulting, in DMA not synched properly
1212 			 */
1213 #if !defined(_BIG_ENDIAN)
1214 		/* workarounds for x86 systems */
1215 		pci_offset = 0x80 + PCIE_DEVCTL;
1216 		pcie_devctl = 0x0;
1217 		pcie_devctl &= PCIE_DEVCTL_ENABLE_NO_SNOOP;
1218 		pcie_devctl |= PCIE_DEVCTL_RO_EN;
1219 		pci_config_put16(dev_regs->nxge_pciregh, pci_offset,
1220 		    pcie_devctl);
1221 #endif
1222 
1223 		(void) ddi_dev_regsize(nxgep->dip, 1, &regsize);
1224 		NXGE_DEBUG_MSG((nxgep, DDI_CTL,
1225 		    "nxge_map_regs: pio size 0x%x", regsize));
1226 		/* set up the device mapped register */
1227 		ddi_status = ddi_regs_map_setup(nxgep->dip, 1,
1228 		    (caddr_t *)&(dev_regs->nxge_regp), 0, 0,
1229 		    &nxge_dev_reg_acc_attr, &dev_regs->nxge_regh);
1230 		if (ddi_status != DDI_SUCCESS) {
1231 			NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
1232 			    "ddi_map_regs for Neptune global reg failed"));
1233 			goto nxge_map_regs_fail1;
1234 		}
1235 
1236 		/* set up the msi/msi-x mapped register */
1237 		(void) ddi_dev_regsize(nxgep->dip, 2, &regsize);
1238 		NXGE_DEBUG_MSG((nxgep, DDI_CTL,
1239 		    "nxge_map_regs: msix size 0x%x", regsize));
1240 		ddi_status = ddi_regs_map_setup(nxgep->dip, 2,
1241 		    (caddr_t *)&(dev_regs->nxge_msix_regp), 0, 0,
1242 		    &nxge_dev_reg_acc_attr, &dev_regs->nxge_msix_regh);
1243 		if (ddi_status != DDI_SUCCESS) {
1244 			NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
1245 			    "ddi_map_regs for msi reg failed"));
1246 			goto nxge_map_regs_fail2;
1247 		}
1248 
1249 		/* set up the vio region mapped register */
1250 		(void) ddi_dev_regsize(nxgep->dip, 3, &regsize);
1251 		NXGE_DEBUG_MSG((nxgep, DDI_CTL,
1252 		    "nxge_map_regs: vio size 0x%x", regsize));
1253 		ddi_status = ddi_regs_map_setup(nxgep->dip, 3,
1254 		    (caddr_t *)&(dev_regs->nxge_vir_regp), 0, 0,
1255 		    &nxge_dev_reg_acc_attr, &dev_regs->nxge_vir_regh);
1256 
1257 		if (ddi_status != DDI_SUCCESS) {
1258 			NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
1259 			    "ddi_map_regs for nxge vio reg failed"));
1260 			goto nxge_map_regs_fail3;
1261 		}
1262 		nxgep->dev_regs = dev_regs;
1263 
1264 		NPI_PCI_ACC_HANDLE_SET(nxgep, dev_regs->nxge_pciregh);
1265 		NPI_PCI_ADD_HANDLE_SET(nxgep,
1266 		    (npi_reg_ptr_t)dev_regs->nxge_pciregp);
1267 		NPI_MSI_ACC_HANDLE_SET(nxgep, dev_regs->nxge_msix_regh);
1268 		NPI_MSI_ADD_HANDLE_SET(nxgep,
1269 		    (npi_reg_ptr_t)dev_regs->nxge_msix_regp);
1270 
1271 		NPI_ACC_HANDLE_SET(nxgep, dev_regs->nxge_regh);
1272 		NPI_ADD_HANDLE_SET(nxgep, (npi_reg_ptr_t)dev_regs->nxge_regp);
1273 
1274 		NPI_REG_ACC_HANDLE_SET(nxgep, dev_regs->nxge_regh);
1275 		NPI_REG_ADD_HANDLE_SET(nxgep,
1276 		    (npi_reg_ptr_t)dev_regs->nxge_regp);
1277 
1278 		NPI_VREG_ACC_HANDLE_SET(nxgep, dev_regs->nxge_vir_regh);
1279 		NPI_VREG_ADD_HANDLE_SET(nxgep,
1280 		    (npi_reg_ptr_t)dev_regs->nxge_vir_regp);
1281 
1282 		break;
1283 
1284 	case N2_NIU:
1285 		NXGE_DEBUG_MSG((nxgep, DDI_CTL, "ddi_map_regs, NIU"));
1286 		/*
1287 		 * Set up the device mapped register (FWARC 2006/556)
1288 		 * (changed back to 1: reg starts at 1!)
1289 		 */
1290 		(void) ddi_dev_regsize(nxgep->dip, 1, &regsize);
1291 		NXGE_DEBUG_MSG((nxgep, DDI_CTL,
1292 		    "nxge_map_regs: dev size 0x%x", regsize));
1293 		ddi_status = ddi_regs_map_setup(nxgep->dip, 1,
1294 		    (caddr_t *)&(dev_regs->nxge_regp), 0, 0,
1295 		    &nxge_dev_reg_acc_attr, &dev_regs->nxge_regh);
1296 
1297 		if (ddi_status != DDI_SUCCESS) {
1298 			NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
1299 			    "ddi_map_regs for N2/NIU, global reg failed "));
1300 			goto nxge_map_regs_fail1;
1301 		}
1302 
1303 		/* set up the first vio region mapped register */
1304 		(void) ddi_dev_regsize(nxgep->dip, 2, &regsize);
1305 		NXGE_DEBUG_MSG((nxgep, DDI_CTL,
1306 		    "nxge_map_regs: vio (1) size 0x%x", regsize));
1307 		ddi_status = ddi_regs_map_setup(nxgep->dip, 2,
1308 		    (caddr_t *)&(dev_regs->nxge_vir_regp), 0, 0,
1309 		    &nxge_dev_reg_acc_attr, &dev_regs->nxge_vir_regh);
1310 
1311 		if (ddi_status != DDI_SUCCESS) {
1312 			NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
1313 			    "ddi_map_regs for nxge vio reg failed"));
1314 			goto nxge_map_regs_fail2;
1315 		}
1316 		/* set up the second vio region mapped register */
1317 		(void) ddi_dev_regsize(nxgep->dip, 3, &regsize);
1318 		NXGE_DEBUG_MSG((nxgep, DDI_CTL,
1319 		    "nxge_map_regs: vio (3) size 0x%x", regsize));
1320 		ddi_status = ddi_regs_map_setup(nxgep->dip, 3,
1321 		    (caddr_t *)&(dev_regs->nxge_vir2_regp), 0, 0,
1322 		    &nxge_dev_reg_acc_attr, &dev_regs->nxge_vir2_regh);
1323 
1324 		if (ddi_status != DDI_SUCCESS) {
1325 			NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
1326 			    "ddi_map_regs for nxge vio2 reg failed"));
1327 			goto nxge_map_regs_fail3;
1328 		}
1329 		nxgep->dev_regs = dev_regs;
1330 
1331 		NPI_ACC_HANDLE_SET(nxgep, dev_regs->nxge_regh);
1332 		NPI_ADD_HANDLE_SET(nxgep, (npi_reg_ptr_t)dev_regs->nxge_regp);
1333 
1334 		NPI_REG_ACC_HANDLE_SET(nxgep, dev_regs->nxge_regh);
1335 		NPI_REG_ADD_HANDLE_SET(nxgep,
1336 		    (npi_reg_ptr_t)dev_regs->nxge_regp);
1337 
1338 		NPI_VREG_ACC_HANDLE_SET(nxgep, dev_regs->nxge_vir_regh);
1339 		NPI_VREG_ADD_HANDLE_SET(nxgep,
1340 		    (npi_reg_ptr_t)dev_regs->nxge_vir_regp);
1341 
1342 		NPI_V2REG_ACC_HANDLE_SET(nxgep, dev_regs->nxge_vir2_regh);
1343 		NPI_V2REG_ADD_HANDLE_SET(nxgep,
1344 		    (npi_reg_ptr_t)dev_regs->nxge_vir2_regp);
1345 
1346 		break;
1347 	}
1348 
1349 	NXGE_DEBUG_MSG((nxgep, DDI_CTL, "nxge_map_reg: hardware addr 0x%0llx "
1350 	    " handle 0x%0llx", dev_regs->nxge_regp, dev_regs->nxge_regh));
1351 
1352 	goto nxge_map_regs_exit;
1353 nxge_map_regs_fail3:
1354 	if (dev_regs->nxge_msix_regh) {
1355 		ddi_regs_map_free(&dev_regs->nxge_msix_regh);
1356 	}
1357 	if (dev_regs->nxge_vir_regh) {
1358 		ddi_regs_map_free(&dev_regs->nxge_regh);
1359 	}
1360 nxge_map_regs_fail2:
1361 	if (dev_regs->nxge_regh) {
1362 		ddi_regs_map_free(&dev_regs->nxge_regh);
1363 	}
1364 nxge_map_regs_fail1:
1365 	if (dev_regs->nxge_pciregh) {
1366 		ddi_regs_map_free(&dev_regs->nxge_pciregh);
1367 	}
1368 nxge_map_regs_fail0:
1369 	NXGE_DEBUG_MSG((nxgep, DDI_CTL, "Freeing register set memory"));
1370 	kmem_free(dev_regs, sizeof (dev_regs_t));
1371 
1372 nxge_map_regs_exit:
1373 	if (ddi_status != DDI_SUCCESS)
1374 		status |= (NXGE_ERROR | NXGE_DDI_FAILED);
1375 	NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_map_regs"));
1376 	return (status);
1377 }
1378 
1379 static void
1380 nxge_unmap_regs(p_nxge_t nxgep)
1381 {
1382 	NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_unmap_regs"));
1383 
1384 	if (isLDOMguest(nxgep)) {
1385 		nxge_guest_regs_map_free(nxgep);
1386 		return;
1387 	}
1388 
1389 	if (nxgep->dev_regs) {
1390 		if (nxgep->dev_regs->nxge_pciregh) {
1391 			NXGE_DEBUG_MSG((nxgep, DDI_CTL,
1392 			    "==> nxge_unmap_regs: bus"));
1393 			ddi_regs_map_free(&nxgep->dev_regs->nxge_pciregh);
1394 			nxgep->dev_regs->nxge_pciregh = NULL;
1395 		}
1396 		if (nxgep->dev_regs->nxge_regh) {
1397 			NXGE_DEBUG_MSG((nxgep, DDI_CTL,
1398 			    "==> nxge_unmap_regs: device registers"));
1399 			ddi_regs_map_free(&nxgep->dev_regs->nxge_regh);
1400 			nxgep->dev_regs->nxge_regh = NULL;
1401 		}
1402 		if (nxgep->dev_regs->nxge_msix_regh) {
1403 			NXGE_DEBUG_MSG((nxgep, DDI_CTL,
1404 			    "==> nxge_unmap_regs: device interrupts"));
1405 			ddi_regs_map_free(&nxgep->dev_regs->nxge_msix_regh);
1406 			nxgep->dev_regs->nxge_msix_regh = NULL;
1407 		}
1408 		if (nxgep->dev_regs->nxge_vir_regh) {
1409 			NXGE_DEBUG_MSG((nxgep, DDI_CTL,
1410 			    "==> nxge_unmap_regs: vio region"));
1411 			ddi_regs_map_free(&nxgep->dev_regs->nxge_vir_regh);
1412 			nxgep->dev_regs->nxge_vir_regh = NULL;
1413 		}
1414 		if (nxgep->dev_regs->nxge_vir2_regh) {
1415 			NXGE_DEBUG_MSG((nxgep, DDI_CTL,
1416 			    "==> nxge_unmap_regs: vio2 region"));
1417 			ddi_regs_map_free(&nxgep->dev_regs->nxge_vir2_regh);
1418 			nxgep->dev_regs->nxge_vir2_regh = NULL;
1419 		}
1420 
1421 		kmem_free(nxgep->dev_regs, sizeof (dev_regs_t));
1422 		nxgep->dev_regs = NULL;
1423 	}
1424 
1425 	NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_unmap_regs"));
1426 }
1427 
1428 static nxge_status_t
1429 nxge_setup_mutexes(p_nxge_t nxgep)
1430 {
1431 	int ddi_status = DDI_SUCCESS;
1432 	nxge_status_t status = NXGE_OK;
1433 	nxge_classify_t *classify_ptr;
1434 	int partition;
1435 
1436 	NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_setup_mutexes"));
1437 
1438 	/*
1439 	 * Get the interrupt cookie so the mutexes can be
1440 	 * Initialized.
1441 	 */
1442 	if (isLDOMguest(nxgep)) {
1443 		nxgep->interrupt_cookie = 0;
1444 	} else {
1445 		ddi_status = ddi_get_iblock_cookie(nxgep->dip, 0,
1446 		    &nxgep->interrupt_cookie);
1447 
1448 		if (ddi_status != DDI_SUCCESS) {
1449 			NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
1450 			    "<== nxge_setup_mutexes: failed 0x%x",
1451 			    ddi_status));
1452 			goto nxge_setup_mutexes_exit;
1453 		}
1454 	}
1455 
1456 	cv_init(&nxgep->poll_cv, NULL, CV_DRIVER, NULL);
1457 	MUTEX_INIT(&nxgep->poll_lock, NULL,
1458 	    MUTEX_DRIVER, (void *)nxgep->interrupt_cookie);
1459 
1460 	/*
1461 	 * Initialize mutexes for this device.
1462 	 */
1463 	MUTEX_INIT(nxgep->genlock, NULL,
1464 	    MUTEX_DRIVER, (void *)nxgep->interrupt_cookie);
1465 	MUTEX_INIT(&nxgep->ouraddr_lock, NULL,
1466 	    MUTEX_DRIVER, (void *)nxgep->interrupt_cookie);
1467 	MUTEX_INIT(&nxgep->mif_lock, NULL,
1468 	    MUTEX_DRIVER, (void *)nxgep->interrupt_cookie);
1469 	MUTEX_INIT(&nxgep->group_lock, NULL,
1470 	    MUTEX_DRIVER, (void *)nxgep->interrupt_cookie);
1471 	RW_INIT(&nxgep->filter_lock, NULL,
1472 	    RW_DRIVER, (void *)nxgep->interrupt_cookie);
1473 
1474 	classify_ptr = &nxgep->classifier;
1475 		/*
1476 		 * FFLP Mutexes are never used in interrupt context
1477 		 * as fflp operation can take very long time to
1478 		 * complete and hence not suitable to invoke from interrupt
1479 		 * handlers.
1480 		 */
1481 	MUTEX_INIT(&classify_ptr->tcam_lock, NULL,
1482 	    NXGE_MUTEX_DRIVER, (void *)nxgep->interrupt_cookie);
1483 	if (NXGE_IS_VALID_NEPTUNE_TYPE(nxgep)) {
1484 		MUTEX_INIT(&classify_ptr->fcram_lock, NULL,
1485 		    NXGE_MUTEX_DRIVER, (void *)nxgep->interrupt_cookie);
1486 		for (partition = 0; partition < MAX_PARTITION; partition++) {
1487 			MUTEX_INIT(&classify_ptr->hash_lock[partition], NULL,
1488 			    NXGE_MUTEX_DRIVER, (void *)nxgep->interrupt_cookie);
1489 		}
1490 	}
1491 
1492 nxge_setup_mutexes_exit:
1493 	NXGE_DEBUG_MSG((nxgep, DDI_CTL,
1494 	    "<== nxge_setup_mutexes status = %x", status));
1495 
1496 	if (ddi_status != DDI_SUCCESS)
1497 		status |= (NXGE_ERROR | NXGE_DDI_FAILED);
1498 
1499 	return (status);
1500 }
1501 
1502 static void
1503 nxge_destroy_mutexes(p_nxge_t nxgep)
1504 {
1505 	int partition;
1506 	nxge_classify_t *classify_ptr;
1507 
1508 	NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_destroy_mutexes"));
1509 	RW_DESTROY(&nxgep->filter_lock);
1510 	MUTEX_DESTROY(&nxgep->group_lock);
1511 	MUTEX_DESTROY(&nxgep->mif_lock);
1512 	MUTEX_DESTROY(&nxgep->ouraddr_lock);
1513 	MUTEX_DESTROY(nxgep->genlock);
1514 
1515 	classify_ptr = &nxgep->classifier;
1516 	MUTEX_DESTROY(&classify_ptr->tcam_lock);
1517 
1518 	/* Destroy all polling resources. */
1519 	MUTEX_DESTROY(&nxgep->poll_lock);
1520 	cv_destroy(&nxgep->poll_cv);
1521 
1522 	/* free data structures, based on HW type */
1523 	if (NXGE_IS_VALID_NEPTUNE_TYPE(nxgep)) {
1524 		MUTEX_DESTROY(&classify_ptr->fcram_lock);
1525 		for (partition = 0; partition < MAX_PARTITION; partition++) {
1526 			MUTEX_DESTROY(&classify_ptr->hash_lock[partition]);
1527 		}
1528 	}
1529 
1530 	NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_destroy_mutexes"));
1531 }
1532 
1533 nxge_status_t
1534 nxge_init(p_nxge_t nxgep)
1535 {
1536 	nxge_status_t status = NXGE_OK;
1537 
1538 	NXGE_DEBUG_MSG((nxgep, STR_CTL, "==> nxge_init"));
1539 
1540 	if (nxgep->drv_state & STATE_HW_INITIALIZED) {
1541 		return (status);
1542 	}
1543 
1544 	/*
1545 	 * Allocate system memory for the receive/transmit buffer blocks
1546 	 * and receive/transmit descriptor rings.
1547 	 */
1548 	status = nxge_alloc_mem_pool(nxgep);
1549 	if (status != NXGE_OK) {
1550 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "alloc mem failed\n"));
1551 		goto nxge_init_fail1;
1552 	}
1553 
1554 	if (!isLDOMguest(nxgep)) {
1555 		/*
1556 		 * Initialize and enable the TXC registers.
1557 		 * (Globally enable the Tx controller,
1558 		 *  enable the port, configure the dma channel bitmap,
1559 		 *  configure the max burst size).
1560 		 */
1561 		status = nxge_txc_init(nxgep);
1562 		if (status != NXGE_OK) {
1563 			NXGE_ERROR_MSG((nxgep,
1564 			    NXGE_ERR_CTL, "init txc failed\n"));
1565 			goto nxge_init_fail2;
1566 		}
1567 	}
1568 
1569 	/*
1570 	 * Initialize and enable TXDMA channels.
1571 	 */
1572 	status = nxge_init_txdma_channels(nxgep);
1573 	if (status != NXGE_OK) {
1574 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "init txdma failed\n"));
1575 		goto nxge_init_fail3;
1576 	}
1577 
1578 	/*
1579 	 * Initialize and enable RXDMA channels.
1580 	 */
1581 	status = nxge_init_rxdma_channels(nxgep);
1582 	if (status != NXGE_OK) {
1583 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "init rxdma failed\n"));
1584 		goto nxge_init_fail4;
1585 	}
1586 
1587 	/*
1588 	 * The guest domain is now done.
1589 	 */
1590 	if (isLDOMguest(nxgep)) {
1591 		nxgep->drv_state |= STATE_HW_INITIALIZED;
1592 		goto nxge_init_exit;
1593 	}
1594 
1595 	/*
1596 	 * Initialize TCAM and FCRAM (Neptune).
1597 	 */
1598 	status = nxge_classify_init(nxgep);
1599 	if (status != NXGE_OK) {
1600 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "init classify failed\n"));
1601 		goto nxge_init_fail5;
1602 	}
1603 
1604 	/*
1605 	 * Initialize ZCP
1606 	 */
1607 	status = nxge_zcp_init(nxgep);
1608 	if (status != NXGE_OK) {
1609 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "init ZCP failed\n"));
1610 		goto nxge_init_fail5;
1611 	}
1612 
1613 	/*
1614 	 * Initialize IPP.
1615 	 */
1616 	status = nxge_ipp_init(nxgep);
1617 	if (status != NXGE_OK) {
1618 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "init IPP failed\n"));
1619 		goto nxge_init_fail5;
1620 	}
1621 
1622 	/*
1623 	 * Initialize the MAC block.
1624 	 */
1625 	status = nxge_mac_init(nxgep);
1626 	if (status != NXGE_OK) {
1627 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "init MAC failed\n"));
1628 		goto nxge_init_fail5;
1629 	}
1630 
1631 	nxge_intrs_enable(nxgep); /* XXX What changes do I need to make here? */
1632 
1633 	/*
1634 	 * Enable hardware interrupts.
1635 	 */
1636 	nxge_intr_hw_enable(nxgep);
1637 	nxgep->drv_state |= STATE_HW_INITIALIZED;
1638 
1639 	goto nxge_init_exit;
1640 
1641 nxge_init_fail5:
1642 	nxge_uninit_rxdma_channels(nxgep);
1643 nxge_init_fail4:
1644 	nxge_uninit_txdma_channels(nxgep);
1645 nxge_init_fail3:
1646 	if (!isLDOMguest(nxgep)) {
1647 		(void) nxge_txc_uninit(nxgep);
1648 	}
1649 nxge_init_fail2:
1650 	nxge_free_mem_pool(nxgep);
1651 nxge_init_fail1:
1652 	NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
1653 	    "<== nxge_init status (failed) = 0x%08x", status));
1654 	return (status);
1655 
1656 nxge_init_exit:
1657 	NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_init status = 0x%08x",
1658 	    status));
1659 	return (status);
1660 }
1661 
1662 
1663 timeout_id_t
1664 nxge_start_timer(p_nxge_t nxgep, fptrv_t func, int msec)
1665 {
1666 	if ((nxgep->suspended == 0) || (nxgep->suspended == DDI_RESUME)) {
1667 		return (timeout(func, (caddr_t)nxgep,
1668 		    drv_usectohz(1000 * msec)));
1669 	}
1670 	return (NULL);
1671 }
1672 
1673 /*ARGSUSED*/
1674 void
1675 nxge_stop_timer(p_nxge_t nxgep, timeout_id_t timerid)
1676 {
1677 	if (timerid) {
1678 		(void) untimeout(timerid);
1679 	}
1680 }
1681 
1682 void
1683 nxge_uninit(p_nxge_t nxgep)
1684 {
1685 	NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_uninit"));
1686 
1687 	if (!(nxgep->drv_state & STATE_HW_INITIALIZED)) {
1688 		NXGE_DEBUG_MSG((nxgep, DDI_CTL,
1689 		    "==> nxge_uninit: not initialized"));
1690 		NXGE_DEBUG_MSG((nxgep, DDI_CTL,
1691 		    "<== nxge_uninit"));
1692 		return;
1693 	}
1694 
1695 	/* stop timer */
1696 	if (nxgep->nxge_timerid) {
1697 		nxge_stop_timer(nxgep, nxgep->nxge_timerid);
1698 		nxgep->nxge_timerid = 0;
1699 	}
1700 
1701 	(void) nxge_link_monitor(nxgep, LINK_MONITOR_STOP);
1702 	(void) nxge_intr_hw_disable(nxgep);
1703 
1704 	/*
1705 	 * Reset the receive MAC side.
1706 	 */
1707 	(void) nxge_rx_mac_disable(nxgep);
1708 
1709 	/* Disable and soft reset the IPP */
1710 	if (!isLDOMguest(nxgep))
1711 		(void) nxge_ipp_disable(nxgep);
1712 
1713 	/* Free classification resources */
1714 	(void) nxge_classify_uninit(nxgep);
1715 
1716 	/*
1717 	 * Reset the transmit/receive DMA side.
1718 	 */
1719 	(void) nxge_txdma_hw_mode(nxgep, NXGE_DMA_STOP);
1720 	(void) nxge_rxdma_hw_mode(nxgep, NXGE_DMA_STOP);
1721 
1722 	nxge_uninit_txdma_channels(nxgep);
1723 	nxge_uninit_rxdma_channels(nxgep);
1724 
1725 	/*
1726 	 * Reset the transmit MAC side.
1727 	 */
1728 	(void) nxge_tx_mac_disable(nxgep);
1729 
1730 	nxge_free_mem_pool(nxgep);
1731 
1732 	/*
1733 	 * Start the timer if the reset flag is not set.
1734 	 * If this reset flag is set, the link monitor
1735 	 * will not be started in order to stop furthur bus
1736 	 * activities coming from this interface.
1737 	 * The driver will start the monitor function
1738 	 * if the interface was initialized again later.
1739 	 */
1740 	if (!nxge_peu_reset_enable) {
1741 		(void) nxge_link_monitor(nxgep, LINK_MONITOR_START);
1742 	}
1743 
1744 	nxgep->drv_state &= ~STATE_HW_INITIALIZED;
1745 
1746 	NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_uninit: "
1747 	    "nxge_mblks_pending %d", nxge_mblks_pending));
1748 }
1749 
1750 void
1751 nxge_get64(p_nxge_t nxgep, p_mblk_t mp)
1752 {
1753 #if defined(__i386)
1754 	size_t		reg;
1755 #else
1756 	uint64_t	reg;
1757 #endif
1758 	uint64_t	regdata;
1759 	int		i, retry;
1760 
1761 	bcopy((char *)mp->b_rptr, (char *)&reg, sizeof (uint64_t));
1762 	regdata = 0;
1763 	retry = 1;
1764 
1765 	for (i = 0; i < retry; i++) {
1766 		NXGE_REG_RD64(nxgep->npi_handle, reg, &regdata);
1767 	}
1768 	bcopy((char *)&regdata, (char *)mp->b_rptr, sizeof (uint64_t));
1769 }
1770 
1771 void
1772 nxge_put64(p_nxge_t nxgep, p_mblk_t mp)
1773 {
1774 #if defined(__i386)
1775 	size_t		reg;
1776 #else
1777 	uint64_t	reg;
1778 #endif
1779 	uint64_t	buf[2];
1780 
1781 	bcopy((char *)mp->b_rptr, (char *)&buf[0], 2 * sizeof (uint64_t));
1782 #if defined(__i386)
1783 	reg = (size_t)buf[0];
1784 #else
1785 	reg = buf[0];
1786 #endif
1787 
1788 	NXGE_NPI_PIO_WRITE64(nxgep->npi_handle, reg, buf[1]);
1789 }
1790 
1791 
1792 nxge_os_mutex_t nxgedebuglock;
1793 int nxge_debug_init = 0;
1794 
1795 /*ARGSUSED*/
1796 /*VARARGS*/
1797 void
1798 nxge_debug_msg(p_nxge_t nxgep, uint64_t level, char *fmt, ...)
1799 {
1800 	char msg_buffer[1048];
1801 	char prefix_buffer[32];
1802 	int instance;
1803 	uint64_t debug_level;
1804 	int cmn_level = CE_CONT;
1805 	va_list ap;
1806 
1807 	if (nxgep && nxgep->nxge_debug_level != nxge_debug_level) {
1808 		/* In case a developer has changed nxge_debug_level. */
1809 		if (nxgep->nxge_debug_level != nxge_debug_level)
1810 			nxgep->nxge_debug_level = nxge_debug_level;
1811 	}
1812 
1813 	debug_level = (nxgep == NULL) ? nxge_debug_level :
1814 	    nxgep->nxge_debug_level;
1815 
1816 	if ((level & debug_level) ||
1817 	    (level == NXGE_NOTE) ||
1818 	    (level == NXGE_ERR_CTL)) {
1819 		/* do the msg processing */
1820 		if (nxge_debug_init == 0) {
1821 			MUTEX_INIT(&nxgedebuglock, NULL, MUTEX_DRIVER, NULL);
1822 			nxge_debug_init = 1;
1823 		}
1824 
1825 		MUTEX_ENTER(&nxgedebuglock);
1826 
1827 		if ((level & NXGE_NOTE)) {
1828 			cmn_level = CE_NOTE;
1829 		}
1830 
1831 		if (level & NXGE_ERR_CTL) {
1832 			cmn_level = CE_WARN;
1833 		}
1834 
1835 		va_start(ap, fmt);
1836 		(void) vsprintf(msg_buffer, fmt, ap);
1837 		va_end(ap);
1838 		if (nxgep == NULL) {
1839 			instance = -1;
1840 			(void) sprintf(prefix_buffer, "%s :", "nxge");
1841 		} else {
1842 			instance = nxgep->instance;
1843 			(void) sprintf(prefix_buffer,
1844 			    "%s%d :", "nxge", instance);
1845 		}
1846 
1847 		MUTEX_EXIT(&nxgedebuglock);
1848 		cmn_err(cmn_level, "!%s %s\n",
1849 		    prefix_buffer, msg_buffer);
1850 
1851 	}
1852 }
1853 
1854 char *
1855 nxge_dump_packet(char *addr, int size)
1856 {
1857 	uchar_t *ap = (uchar_t *)addr;
1858 	int i;
1859 	static char etherbuf[1024];
1860 	char *cp = etherbuf;
1861 	char digits[] = "0123456789abcdef";
1862 
1863 	if (!size)
1864 		size = 60;
1865 
1866 	if (size > MAX_DUMP_SZ) {
1867 		/* Dump the leading bytes */
1868 		for (i = 0; i < MAX_DUMP_SZ/2; i++) {
1869 			if (*ap > 0x0f)
1870 				*cp++ = digits[*ap >> 4];
1871 			*cp++ = digits[*ap++ & 0xf];
1872 			*cp++ = ':';
1873 		}
1874 		for (i = 0; i < 20; i++)
1875 			*cp++ = '.';
1876 		/* Dump the last MAX_DUMP_SZ/2 bytes */
1877 		ap = (uchar_t *)(addr + (size - MAX_DUMP_SZ/2));
1878 		for (i = 0; i < MAX_DUMP_SZ/2; i++) {
1879 			if (*ap > 0x0f)
1880 				*cp++ = digits[*ap >> 4];
1881 			*cp++ = digits[*ap++ & 0xf];
1882 			*cp++ = ':';
1883 		}
1884 	} else {
1885 		for (i = 0; i < size; i++) {
1886 			if (*ap > 0x0f)
1887 				*cp++ = digits[*ap >> 4];
1888 			*cp++ = digits[*ap++ & 0xf];
1889 			*cp++ = ':';
1890 		}
1891 	}
1892 	*--cp = 0;
1893 	return (etherbuf);
1894 }
1895 
1896 #ifdef	NXGE_DEBUG
1897 static void
1898 nxge_test_map_regs(p_nxge_t nxgep)
1899 {
1900 	ddi_acc_handle_t cfg_handle;
1901 	p_pci_cfg_t	cfg_ptr;
1902 	ddi_acc_handle_t dev_handle;
1903 	char		*dev_ptr;
1904 	ddi_acc_handle_t pci_config_handle;
1905 	uint32_t	regval;
1906 	int		i;
1907 
1908 	NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_test_map_regs"));
1909 
1910 	dev_handle = nxgep->dev_regs->nxge_regh;
1911 	dev_ptr = (char *)nxgep->dev_regs->nxge_regp;
1912 
1913 	if (NXGE_IS_VALID_NEPTUNE_TYPE(nxgep)) {
1914 		cfg_handle = nxgep->dev_regs->nxge_pciregh;
1915 		cfg_ptr = (void *)nxgep->dev_regs->nxge_pciregp;
1916 
1917 		NXGE_DEBUG_MSG((nxgep, DDI_CTL,
1918 		    "Neptune PCI regp cfg_ptr 0x%llx", (char *)cfg_ptr));
1919 		NXGE_DEBUG_MSG((nxgep, DDI_CTL,
1920 		    "Neptune PCI cfg_ptr vendor id ptr 0x%llx",
1921 		    &cfg_ptr->vendorid));
1922 		NXGE_DEBUG_MSG((nxgep, DDI_CTL,
1923 		    "\tvendorid 0x%x devid 0x%x",
1924 		    NXGE_PIO_READ16(cfg_handle, &cfg_ptr->vendorid, 0),
1925 		    NXGE_PIO_READ16(cfg_handle, &cfg_ptr->devid,    0)));
1926 		NXGE_DEBUG_MSG((nxgep, DDI_CTL,
1927 		    "PCI BAR: base 0x%x base14 0x%x base 18 0x%x "
1928 		    "bar1c 0x%x",
1929 		    NXGE_PIO_READ32(cfg_handle, &cfg_ptr->base,   0),
1930 		    NXGE_PIO_READ32(cfg_handle, &cfg_ptr->base14, 0),
1931 		    NXGE_PIO_READ32(cfg_handle, &cfg_ptr->base18, 0),
1932 		    NXGE_PIO_READ32(cfg_handle, &cfg_ptr->base1c, 0)));
1933 		NXGE_DEBUG_MSG((nxgep, DDI_CTL,
1934 		    "\nNeptune PCI BAR: base20 0x%x base24 0x%x "
1935 		    "base 28 0x%x bar2c 0x%x\n",
1936 		    NXGE_PIO_READ32(cfg_handle, &cfg_ptr->base20, 0),
1937 		    NXGE_PIO_READ32(cfg_handle, &cfg_ptr->base24, 0),
1938 		    NXGE_PIO_READ32(cfg_handle, &cfg_ptr->base28, 0),
1939 		    NXGE_PIO_READ32(cfg_handle, &cfg_ptr->base2c, 0)));
1940 		NXGE_DEBUG_MSG((nxgep, DDI_CTL,
1941 		    "\nNeptune PCI BAR: base30 0x%x\n",
1942 		    NXGE_PIO_READ32(cfg_handle, &cfg_ptr->base30, 0)));
1943 
1944 		cfg_handle = nxgep->dev_regs->nxge_pciregh;
1945 		cfg_ptr = (void *)nxgep->dev_regs->nxge_pciregp;
1946 		NXGE_DEBUG_MSG((nxgep, DDI_CTL,
1947 		    "first  0x%llx second 0x%llx third 0x%llx "
1948 		    "last 0x%llx ",
1949 		    NXGE_PIO_READ64(dev_handle,
1950 		    (uint64_t *)(dev_ptr + 0),  0),
1951 		    NXGE_PIO_READ64(dev_handle,
1952 		    (uint64_t *)(dev_ptr + 8),  0),
1953 		    NXGE_PIO_READ64(dev_handle,
1954 		    (uint64_t *)(dev_ptr + 16), 0),
1955 		    NXGE_PIO_READ64(cfg_handle,
1956 		    (uint64_t *)(dev_ptr + 24), 0)));
1957 	}
1958 }
1959 
1960 #endif
1961 
1962 static void
1963 nxge_suspend(p_nxge_t nxgep)
1964 {
1965 	NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_suspend"));
1966 
1967 	nxge_intrs_disable(nxgep);
1968 	nxge_destroy_dev(nxgep);
1969 
1970 	NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_suspend"));
1971 }
1972 
1973 static nxge_status_t
1974 nxge_resume(p_nxge_t nxgep)
1975 {
1976 	nxge_status_t status = NXGE_OK;
1977 
1978 	NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_resume"));
1979 
1980 	nxgep->suspended = DDI_RESUME;
1981 	(void) nxge_link_monitor(nxgep, LINK_MONITOR_START);
1982 	(void) nxge_rxdma_hw_mode(nxgep, NXGE_DMA_START);
1983 	(void) nxge_txdma_hw_mode(nxgep, NXGE_DMA_START);
1984 	(void) nxge_rx_mac_enable(nxgep);
1985 	(void) nxge_tx_mac_enable(nxgep);
1986 	nxge_intrs_enable(nxgep);
1987 	nxgep->suspended = 0;
1988 
1989 	NXGE_DEBUG_MSG((nxgep, DDI_CTL,
1990 	    "<== nxge_resume status = 0x%x", status));
1991 	return (status);
1992 }
1993 
1994 static nxge_status_t
1995 nxge_setup_dev(p_nxge_t nxgep)
1996 {
1997 	nxge_status_t	status = NXGE_OK;
1998 
1999 	NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_setup_dev port %d",
2000 	    nxgep->mac.portnum));
2001 
2002 	status = nxge_link_init(nxgep);
2003 
2004 	if (fm_check_acc_handle(nxgep->dev_regs->nxge_regh) != DDI_FM_OK) {
2005 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
2006 		    "port%d Bad register acc handle", nxgep->mac.portnum));
2007 		status = NXGE_ERROR;
2008 	}
2009 
2010 	if (status != NXGE_OK) {
2011 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
2012 		    " nxge_setup_dev status "
2013 		    "(xcvr init 0x%08x)", status));
2014 		goto nxge_setup_dev_exit;
2015 	}
2016 
2017 nxge_setup_dev_exit:
2018 	NXGE_DEBUG_MSG((nxgep, DDI_CTL,
2019 	    "<== nxge_setup_dev port %d status = 0x%08x",
2020 	    nxgep->mac.portnum, status));
2021 
2022 	return (status);
2023 }
2024 
2025 static void
2026 nxge_destroy_dev(p_nxge_t nxgep)
2027 {
2028 	NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_destroy_dev"));
2029 
2030 	(void) nxge_link_monitor(nxgep, LINK_MONITOR_STOP);
2031 
2032 	(void) nxge_hw_stop(nxgep);
2033 
2034 	NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_destroy_dev"));
2035 }
2036 
2037 static nxge_status_t
2038 nxge_setup_system_dma_pages(p_nxge_t nxgep)
2039 {
2040 	int 			ddi_status = DDI_SUCCESS;
2041 	uint_t 			count;
2042 	ddi_dma_cookie_t 	cookie;
2043 	uint_t 			iommu_pagesize;
2044 	nxge_status_t		status = NXGE_OK;
2045 
2046 	NXGE_ERROR_MSG((nxgep, DDI_CTL, "==> nxge_setup_system_dma_pages"));
2047 	nxgep->sys_page_sz = ddi_ptob(nxgep->dip, (ulong_t)1);
2048 	if (nxgep->niu_type != N2_NIU) {
2049 		iommu_pagesize = dvma_pagesize(nxgep->dip);
2050 		NXGE_DEBUG_MSG((nxgep, DDI_CTL,
2051 		    " nxge_setup_system_dma_pages: page %d (ddi_ptob %d) "
2052 		    " default_block_size %d iommu_pagesize %d",
2053 		    nxgep->sys_page_sz,
2054 		    ddi_ptob(nxgep->dip, (ulong_t)1),
2055 		    nxgep->rx_default_block_size,
2056 		    iommu_pagesize));
2057 
2058 		if (iommu_pagesize != 0) {
2059 			if (nxgep->sys_page_sz == iommu_pagesize) {
2060 				if (iommu_pagesize > 0x4000)
2061 					nxgep->sys_page_sz = 0x4000;
2062 			} else {
2063 				if (nxgep->sys_page_sz > iommu_pagesize)
2064 					nxgep->sys_page_sz = iommu_pagesize;
2065 			}
2066 		}
2067 	}
2068 	nxgep->sys_page_mask = ~(nxgep->sys_page_sz - 1);
2069 	NXGE_DEBUG_MSG((nxgep, DDI_CTL,
2070 	    "==> nxge_setup_system_dma_pages: page %d (ddi_ptob %d) "
2071 	    "default_block_size %d page mask %d",
2072 	    nxgep->sys_page_sz,
2073 	    ddi_ptob(nxgep->dip, (ulong_t)1),
2074 	    nxgep->rx_default_block_size,
2075 	    nxgep->sys_page_mask));
2076 
2077 
2078 	switch (nxgep->sys_page_sz) {
2079 	default:
2080 		nxgep->sys_page_sz = 0x1000;
2081 		nxgep->sys_page_mask = ~(nxgep->sys_page_sz - 1);
2082 		nxgep->rx_default_block_size = 0x1000;
2083 		nxgep->rx_bksize_code = RBR_BKSIZE_4K;
2084 		break;
2085 	case 0x1000:
2086 		nxgep->rx_default_block_size = 0x1000;
2087 		nxgep->rx_bksize_code = RBR_BKSIZE_4K;
2088 		break;
2089 	case 0x2000:
2090 		nxgep->rx_default_block_size = 0x2000;
2091 		nxgep->rx_bksize_code = RBR_BKSIZE_8K;
2092 		break;
2093 	case 0x4000:
2094 		nxgep->rx_default_block_size = 0x4000;
2095 		nxgep->rx_bksize_code = RBR_BKSIZE_16K;
2096 		break;
2097 	case 0x8000:
2098 		nxgep->rx_default_block_size = 0x8000;
2099 		nxgep->rx_bksize_code = RBR_BKSIZE_32K;
2100 		break;
2101 	}
2102 
2103 #ifndef USE_RX_BIG_BUF
2104 	nxge_rx_dma_attr.dma_attr_align = nxgep->sys_page_sz;
2105 #else
2106 		nxgep->rx_default_block_size = 0x2000;
2107 		nxgep->rx_bksize_code = RBR_BKSIZE_8K;
2108 #endif
2109 	/*
2110 	 * Get the system DMA burst size.
2111 	 */
2112 	ddi_status = ddi_dma_alloc_handle(nxgep->dip, &nxge_tx_dma_attr,
2113 	    DDI_DMA_DONTWAIT, 0,
2114 	    &nxgep->dmasparehandle);
2115 	if (ddi_status != DDI_SUCCESS) {
2116 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
2117 		    "ddi_dma_alloc_handle: failed "
2118 		    " status 0x%x", ddi_status));
2119 		goto nxge_get_soft_properties_exit;
2120 	}
2121 
2122 	ddi_status = ddi_dma_addr_bind_handle(nxgep->dmasparehandle, NULL,
2123 	    (caddr_t)nxgep->dmasparehandle,
2124 	    sizeof (nxgep->dmasparehandle),
2125 	    DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
2126 	    DDI_DMA_DONTWAIT, 0,
2127 	    &cookie, &count);
2128 	if (ddi_status != DDI_DMA_MAPPED) {
2129 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
2130 		    "Binding spare handle to find system"
2131 		    " burstsize failed."));
2132 		ddi_status = DDI_FAILURE;
2133 		goto nxge_get_soft_properties_fail1;
2134 	}
2135 
2136 	nxgep->sys_burst_sz = ddi_dma_burstsizes(nxgep->dmasparehandle);
2137 	(void) ddi_dma_unbind_handle(nxgep->dmasparehandle);
2138 
2139 nxge_get_soft_properties_fail1:
2140 	ddi_dma_free_handle(&nxgep->dmasparehandle);
2141 
2142 nxge_get_soft_properties_exit:
2143 
2144 	if (ddi_status != DDI_SUCCESS)
2145 		status |= (NXGE_ERROR | NXGE_DDI_FAILED);
2146 
2147 	NXGE_DEBUG_MSG((nxgep, DDI_CTL,
2148 	    "<== nxge_setup_system_dma_pages status = 0x%08x", status));
2149 	return (status);
2150 }
2151 
2152 static nxge_status_t
2153 nxge_alloc_mem_pool(p_nxge_t nxgep)
2154 {
2155 	nxge_status_t	status = NXGE_OK;
2156 
2157 	NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_alloc_mem_pool"));
2158 
2159 	status = nxge_alloc_rx_mem_pool(nxgep);
2160 	if (status != NXGE_OK) {
2161 		return (NXGE_ERROR);
2162 	}
2163 
2164 	status = nxge_alloc_tx_mem_pool(nxgep);
2165 	if (status != NXGE_OK) {
2166 		nxge_free_rx_mem_pool(nxgep);
2167 		return (NXGE_ERROR);
2168 	}
2169 
2170 	NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_alloc_mem_pool"));
2171 	return (NXGE_OK);
2172 }
2173 
2174 static void
2175 nxge_free_mem_pool(p_nxge_t nxgep)
2176 {
2177 	NXGE_DEBUG_MSG((nxgep, MEM_CTL, "==> nxge_free_mem_pool"));
2178 
2179 	nxge_free_rx_mem_pool(nxgep);
2180 	nxge_free_tx_mem_pool(nxgep);
2181 
2182 	NXGE_DEBUG_MSG((nxgep, MEM_CTL, "<== nxge_free_mem_pool"));
2183 }
2184 
2185 nxge_status_t
2186 nxge_alloc_rx_mem_pool(p_nxge_t nxgep)
2187 {
2188 	uint32_t		rdc_max;
2189 	p_nxge_dma_pt_cfg_t	p_all_cfgp;
2190 	p_nxge_hw_pt_cfg_t	p_cfgp;
2191 	p_nxge_dma_pool_t	dma_poolp;
2192 	p_nxge_dma_common_t	*dma_buf_p;
2193 	p_nxge_dma_pool_t	dma_cntl_poolp;
2194 	p_nxge_dma_common_t	*dma_cntl_p;
2195 	uint32_t 		*num_chunks; /* per dma */
2196 	nxge_status_t		status = NXGE_OK;
2197 
2198 	uint32_t		nxge_port_rbr_size;
2199 	uint32_t		nxge_port_rbr_spare_size;
2200 	uint32_t		nxge_port_rcr_size;
2201 	uint32_t		rx_cntl_alloc_size;
2202 
2203 	NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_alloc_rx_mem_pool"));
2204 
2205 	p_all_cfgp = (p_nxge_dma_pt_cfg_t)&nxgep->pt_config;
2206 	p_cfgp = (p_nxge_hw_pt_cfg_t)&p_all_cfgp->hw_config;
2207 	rdc_max = NXGE_MAX_RDCS;
2208 
2209 	/*
2210 	 * Allocate memory for the common DMA data structures.
2211 	 */
2212 	dma_poolp = (p_nxge_dma_pool_t)KMEM_ZALLOC(sizeof (nxge_dma_pool_t),
2213 	    KM_SLEEP);
2214 	dma_buf_p = (p_nxge_dma_common_t *)KMEM_ZALLOC(
2215 	    sizeof (p_nxge_dma_common_t) * rdc_max, KM_SLEEP);
2216 
2217 	dma_cntl_poolp = (p_nxge_dma_pool_t)
2218 	    KMEM_ZALLOC(sizeof (nxge_dma_pool_t), KM_SLEEP);
2219 	dma_cntl_p = (p_nxge_dma_common_t *)KMEM_ZALLOC(
2220 	    sizeof (p_nxge_dma_common_t) * rdc_max, KM_SLEEP);
2221 
2222 	num_chunks = (uint32_t *)KMEM_ZALLOC(
2223 	    sizeof (uint32_t) * rdc_max, KM_SLEEP);
2224 
2225 	/*
2226 	 * Assume that each DMA channel will be configured with
2227 	 * the default block size.
2228 	 * rbr block counts are modulo the batch count (16).
2229 	 */
2230 	nxge_port_rbr_size = p_all_cfgp->rbr_size;
2231 	nxge_port_rcr_size = p_all_cfgp->rcr_size;
2232 
2233 	if (!nxge_port_rbr_size) {
2234 		nxge_port_rbr_size = NXGE_RBR_RBB_DEFAULT;
2235 	}
2236 	if (nxge_port_rbr_size % NXGE_RXDMA_POST_BATCH) {
2237 		nxge_port_rbr_size = (NXGE_RXDMA_POST_BATCH *
2238 		    (nxge_port_rbr_size / NXGE_RXDMA_POST_BATCH + 1));
2239 	}
2240 
2241 	p_all_cfgp->rbr_size = nxge_port_rbr_size;
2242 	nxge_port_rbr_spare_size = nxge_rbr_spare_size;
2243 
2244 	if (nxge_port_rbr_spare_size % NXGE_RXDMA_POST_BATCH) {
2245 		nxge_port_rbr_spare_size = (NXGE_RXDMA_POST_BATCH *
2246 		    (nxge_port_rbr_spare_size / NXGE_RXDMA_POST_BATCH + 1));
2247 	}
2248 	if (nxge_port_rbr_size > RBR_DEFAULT_MAX_BLKS) {
2249 		NXGE_DEBUG_MSG((nxgep, MEM_CTL,
2250 		    "nxge_alloc_rx_mem_pool: RBR size too high %d, "
2251 		    "set to default %d",
2252 		    nxge_port_rbr_size, RBR_DEFAULT_MAX_BLKS));
2253 		nxge_port_rbr_size = RBR_DEFAULT_MAX_BLKS;
2254 	}
2255 	if (nxge_port_rcr_size > RCR_DEFAULT_MAX) {
2256 		NXGE_DEBUG_MSG((nxgep, MEM_CTL,
2257 		    "nxge_alloc_rx_mem_pool: RCR too high %d, "
2258 		    "set to default %d",
2259 		    nxge_port_rcr_size, RCR_DEFAULT_MAX));
2260 		nxge_port_rcr_size = RCR_DEFAULT_MAX;
2261 	}
2262 
2263 	/*
2264 	 * N2/NIU has limitation on the descriptor sizes (contiguous
2265 	 * memory allocation on data buffers to 4M (contig_mem_alloc)
2266 	 * and little endian for control buffers (must use the ddi/dki mem alloc
2267 	 * function).
2268 	 */
2269 #if	defined(sun4v) && defined(NIU_LP_WORKAROUND)
2270 	if (nxgep->niu_type == N2_NIU) {
2271 		nxge_port_rbr_spare_size = 0;
2272 		if ((nxge_port_rbr_size > NXGE_NIU_CONTIG_RBR_MAX) ||
2273 		    (!ISP2(nxge_port_rbr_size))) {
2274 			nxge_port_rbr_size = NXGE_NIU_CONTIG_RBR_MAX;
2275 		}
2276 		if ((nxge_port_rcr_size > NXGE_NIU_CONTIG_RCR_MAX) ||
2277 		    (!ISP2(nxge_port_rcr_size))) {
2278 			nxge_port_rcr_size = NXGE_NIU_CONTIG_RCR_MAX;
2279 		}
2280 	}
2281 #endif
2282 
2283 	/*
2284 	 * Addresses of receive block ring, receive completion ring and the
2285 	 * mailbox must be all cache-aligned (64 bytes).
2286 	 */
2287 	rx_cntl_alloc_size = nxge_port_rbr_size + nxge_port_rbr_spare_size;
2288 	rx_cntl_alloc_size *= (sizeof (rx_desc_t));
2289 	rx_cntl_alloc_size += (sizeof (rcr_entry_t) * nxge_port_rcr_size);
2290 	rx_cntl_alloc_size += sizeof (rxdma_mailbox_t);
2291 
2292 	NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_alloc_rx_mem_pool: "
2293 	    "nxge_port_rbr_size = %d nxge_port_rbr_spare_size = %d "
2294 	    "nxge_port_rcr_size = %d "
2295 	    "rx_cntl_alloc_size = %d",
2296 	    nxge_port_rbr_size, nxge_port_rbr_spare_size,
2297 	    nxge_port_rcr_size,
2298 	    rx_cntl_alloc_size));
2299 
2300 #if	defined(sun4v) && defined(NIU_LP_WORKAROUND)
2301 	if (nxgep->niu_type == N2_NIU) {
2302 		uint32_t rx_buf_alloc_size = (nxgep->rx_default_block_size *
2303 		    (nxge_port_rbr_size + nxge_port_rbr_spare_size));
2304 
2305 		if (!ISP2(rx_buf_alloc_size)) {
2306 			NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
2307 			    "==> nxge_alloc_rx_mem_pool: "
2308 			    " must be power of 2"));
2309 			status |= (NXGE_ERROR | NXGE_DDI_FAILED);
2310 			goto nxge_alloc_rx_mem_pool_exit;
2311 		}
2312 
2313 		if (rx_buf_alloc_size > (1 << 22)) {
2314 			NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
2315 			    "==> nxge_alloc_rx_mem_pool: "
2316 			    " limit size to 4M"));
2317 			status |= (NXGE_ERROR | NXGE_DDI_FAILED);
2318 			goto nxge_alloc_rx_mem_pool_exit;
2319 		}
2320 
2321 		if (rx_cntl_alloc_size < 0x2000) {
2322 			rx_cntl_alloc_size = 0x2000;
2323 		}
2324 	}
2325 #endif
2326 	nxgep->nxge_port_rbr_size = nxge_port_rbr_size;
2327 	nxgep->nxge_port_rcr_size = nxge_port_rcr_size;
2328 	nxgep->nxge_port_rbr_spare_size = nxge_port_rbr_spare_size;
2329 	nxgep->nxge_port_rx_cntl_alloc_size = rx_cntl_alloc_size;
2330 
2331 	dma_poolp->ndmas = p_cfgp->max_rdcs;
2332 	dma_poolp->num_chunks = num_chunks;
2333 	dma_poolp->buf_allocated = B_TRUE;
2334 	nxgep->rx_buf_pool_p = dma_poolp;
2335 	dma_poolp->dma_buf_pool_p = dma_buf_p;
2336 
2337 	dma_cntl_poolp->ndmas = p_cfgp->max_rdcs;
2338 	dma_cntl_poolp->buf_allocated = B_TRUE;
2339 	nxgep->rx_cntl_pool_p = dma_cntl_poolp;
2340 	dma_cntl_poolp->dma_buf_pool_p = dma_cntl_p;
2341 
2342 	/* Allocate the receive rings, too. */
2343 	nxgep->rx_rbr_rings =
2344 	    KMEM_ZALLOC(sizeof (rx_rbr_rings_t), KM_SLEEP);
2345 	nxgep->rx_rbr_rings->rbr_rings =
2346 	    KMEM_ZALLOC(sizeof (p_rx_rbr_ring_t) * rdc_max, KM_SLEEP);
2347 	nxgep->rx_rcr_rings =
2348 	    KMEM_ZALLOC(sizeof (rx_rcr_rings_t), KM_SLEEP);
2349 	nxgep->rx_rcr_rings->rcr_rings =
2350 	    KMEM_ZALLOC(sizeof (p_rx_rcr_ring_t) * rdc_max, KM_SLEEP);
2351 	nxgep->rx_mbox_areas_p =
2352 	    KMEM_ZALLOC(sizeof (rx_mbox_areas_t), KM_SLEEP);
2353 	nxgep->rx_mbox_areas_p->rxmbox_areas =
2354 	    KMEM_ZALLOC(sizeof (p_rx_mbox_t) * rdc_max, KM_SLEEP);
2355 
2356 	nxgep->rx_rbr_rings->ndmas = nxgep->rx_rcr_rings->ndmas =
2357 	    p_cfgp->max_rdcs;
2358 
2359 	NXGE_DEBUG_MSG((nxgep, DMA_CTL,
2360 	    "<== nxge_alloc_rx_mem_pool:status 0x%08x", status));
2361 
2362 nxge_alloc_rx_mem_pool_exit:
2363 	return (status);
2364 }
2365 
2366 /*
2367  * nxge_alloc_rxb
2368  *
2369  *	Allocate buffers for an RDC.
2370  *
2371  * Arguments:
2372  * 	nxgep
2373  * 	channel	The channel to map into our kernel space.
2374  *
2375  * Notes:
2376  *
2377  * NPI function calls:
2378  *
2379  * NXGE function calls:
2380  *
2381  * Registers accessed:
2382  *
2383  * Context:
2384  *
2385  * Taking apart:
2386  *
2387  * Open questions:
2388  *
2389  */
2390 nxge_status_t
2391 nxge_alloc_rxb(
2392 	p_nxge_t nxgep,
2393 	int channel)
2394 {
2395 	size_t			rx_buf_alloc_size;
2396 	nxge_status_t		status = NXGE_OK;
2397 
2398 	nxge_dma_common_t	**data;
2399 	nxge_dma_common_t	**control;
2400 	uint32_t 		*num_chunks;
2401 
2402 	NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_alloc_rbb"));
2403 
2404 	/*
2405 	 * Allocate memory for the receive buffers and descriptor rings.
2406 	 * Replace these allocation functions with the interface functions
2407 	 * provided by the partition manager if/when they are available.
2408 	 */
2409 
2410 	/*
2411 	 * Allocate memory for the receive buffer blocks.
2412 	 */
2413 	rx_buf_alloc_size = (nxgep->rx_default_block_size *
2414 	    (nxgep->nxge_port_rbr_size + nxgep->nxge_port_rbr_spare_size));
2415 
2416 	data = &nxgep->rx_buf_pool_p->dma_buf_pool_p[channel];
2417 	num_chunks = &nxgep->rx_buf_pool_p->num_chunks[channel];
2418 
2419 	if ((status = nxge_alloc_rx_buf_dma(
2420 	    nxgep, channel, data, rx_buf_alloc_size,
2421 	    nxgep->rx_default_block_size, num_chunks)) != NXGE_OK) {
2422 		return (status);
2423 	}
2424 
2425 	NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "<== nxge_alloc_rxb(): "
2426 	    "dma %d dma_buf_p %llx &dma_buf_p %llx", channel, *data, data));
2427 
2428 	/*
2429 	 * Allocate memory for descriptor rings and mailbox.
2430 	 */
2431 	control = &nxgep->rx_cntl_pool_p->dma_buf_pool_p[channel];
2432 
2433 	if ((status = nxge_alloc_rx_cntl_dma(
2434 	    nxgep, channel, control, nxgep->nxge_port_rx_cntl_alloc_size))
2435 	    != NXGE_OK) {
2436 		nxge_free_rx_cntl_dma(nxgep, *control);
2437 		(*data)->buf_alloc_state |= BUF_ALLOCATED_WAIT_FREE;
2438 		nxge_free_rx_buf_dma(nxgep, *data, *num_chunks);
2439 		return (status);
2440 	}
2441 
2442 	NXGE_DEBUG_MSG((nxgep, DMA_CTL,
2443 	    "<== nxge_alloc_rx_mem_pool:status 0x%08x", status));
2444 
2445 	return (status);
2446 }
2447 
2448 void
2449 nxge_free_rxb(
2450 	p_nxge_t nxgep,
2451 	int channel)
2452 {
2453 	nxge_dma_common_t	*data;
2454 	nxge_dma_common_t	*control;
2455 	uint32_t 		num_chunks;
2456 
2457 	NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_alloc_rbb"));
2458 
2459 	data = nxgep->rx_buf_pool_p->dma_buf_pool_p[channel];
2460 	num_chunks = nxgep->rx_buf_pool_p->num_chunks[channel];
2461 	nxge_free_rx_buf_dma(nxgep, data, num_chunks);
2462 
2463 	nxgep->rx_buf_pool_p->dma_buf_pool_p[channel] = 0;
2464 	nxgep->rx_buf_pool_p->num_chunks[channel] = 0;
2465 
2466 	control = nxgep->rx_cntl_pool_p->dma_buf_pool_p[channel];
2467 	nxge_free_rx_cntl_dma(nxgep, control);
2468 
2469 	nxgep->rx_cntl_pool_p->dma_buf_pool_p[channel] = 0;
2470 
2471 	KMEM_FREE(data, sizeof (nxge_dma_common_t) * NXGE_DMA_BLOCK);
2472 	KMEM_FREE(control, sizeof (nxge_dma_common_t));
2473 
2474 	NXGE_DEBUG_MSG((nxgep, DMA_CTL, "<== nxge_alloc_rbb"));
2475 }
2476 
2477 static void
2478 nxge_free_rx_mem_pool(p_nxge_t nxgep)
2479 {
2480 	int rdc_max = NXGE_MAX_RDCS;
2481 
2482 	NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_free_rx_mem_pool"));
2483 
2484 	if (!nxgep->rx_buf_pool_p || !nxgep->rx_buf_pool_p->buf_allocated) {
2485 		NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
2486 		    "<== nxge_free_rx_mem_pool "
2487 		    "(null rx buf pool or buf not allocated"));
2488 		return;
2489 	}
2490 	if (!nxgep->rx_cntl_pool_p || !nxgep->rx_cntl_pool_p->buf_allocated) {
2491 		NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
2492 		    "<== nxge_free_rx_mem_pool "
2493 		    "(null rx cntl buf pool or cntl buf not allocated"));
2494 		return;
2495 	}
2496 
2497 	KMEM_FREE(nxgep->rx_cntl_pool_p->dma_buf_pool_p,
2498 	    sizeof (p_nxge_dma_common_t) * rdc_max);
2499 	KMEM_FREE(nxgep->rx_cntl_pool_p, sizeof (nxge_dma_pool_t));
2500 
2501 	KMEM_FREE(nxgep->rx_buf_pool_p->num_chunks,
2502 	    sizeof (uint32_t) * rdc_max);
2503 	KMEM_FREE(nxgep->rx_buf_pool_p->dma_buf_pool_p,
2504 	    sizeof (p_nxge_dma_common_t) * rdc_max);
2505 	KMEM_FREE(nxgep->rx_buf_pool_p, sizeof (nxge_dma_pool_t));
2506 
2507 	nxgep->rx_buf_pool_p = 0;
2508 	nxgep->rx_cntl_pool_p = 0;
2509 
2510 	KMEM_FREE(nxgep->rx_rbr_rings->rbr_rings,
2511 	    sizeof (p_rx_rbr_ring_t) * rdc_max);
2512 	KMEM_FREE(nxgep->rx_rbr_rings, sizeof (rx_rbr_rings_t));
2513 	KMEM_FREE(nxgep->rx_rcr_rings->rcr_rings,
2514 	    sizeof (p_rx_rcr_ring_t) * rdc_max);
2515 	KMEM_FREE(nxgep->rx_rcr_rings, sizeof (rx_rcr_rings_t));
2516 	KMEM_FREE(nxgep->rx_mbox_areas_p->rxmbox_areas,
2517 	    sizeof (p_rx_mbox_t) * rdc_max);
2518 	KMEM_FREE(nxgep->rx_mbox_areas_p, sizeof (rx_mbox_areas_t));
2519 
2520 	nxgep->rx_rbr_rings = 0;
2521 	nxgep->rx_rcr_rings = 0;
2522 	nxgep->rx_mbox_areas_p = 0;
2523 
2524 	NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "<== nxge_free_rx_mem_pool"));
2525 }
2526 
2527 
2528 static nxge_status_t
2529 nxge_alloc_rx_buf_dma(p_nxge_t nxgep, uint16_t dma_channel,
2530 	p_nxge_dma_common_t *dmap,
2531 	size_t alloc_size, size_t block_size, uint32_t *num_chunks)
2532 {
2533 	p_nxge_dma_common_t 	rx_dmap;
2534 	nxge_status_t		status = NXGE_OK;
2535 	size_t			total_alloc_size;
2536 	size_t			allocated = 0;
2537 	int			i, size_index, array_size;
2538 	boolean_t		use_kmem_alloc = B_FALSE;
2539 
2540 	NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_alloc_rx_buf_dma"));
2541 
2542 	rx_dmap = (p_nxge_dma_common_t)
2543 	    KMEM_ZALLOC(sizeof (nxge_dma_common_t) * NXGE_DMA_BLOCK,
2544 	    KM_SLEEP);
2545 
2546 	NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
2547 	    " alloc_rx_buf_dma rdc %d asize %x bsize %x bbuf %llx ",
2548 	    dma_channel, alloc_size, block_size, dmap));
2549 
2550 	total_alloc_size = alloc_size;
2551 
2552 #if defined(RX_USE_RECLAIM_POST)
2553 	total_alloc_size = alloc_size + alloc_size/4;
2554 #endif
2555 
2556 	i = 0;
2557 	size_index = 0;
2558 	array_size =  sizeof (alloc_sizes)/sizeof (size_t);
2559 	while ((alloc_sizes[size_index] < alloc_size) &&
2560 	    (size_index < array_size))
2561 		size_index++;
2562 	if (size_index >= array_size) {
2563 		size_index = array_size - 1;
2564 	}
2565 
2566 	/* For Neptune, use kmem_alloc if the kmem flag is set. */
2567 	if (nxgep->niu_type != N2_NIU && nxge_use_kmem_alloc) {
2568 		use_kmem_alloc = B_TRUE;
2569 #if defined(__i386) || defined(__amd64)
2570 		size_index = 0;
2571 #endif
2572 		NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
2573 		    "==> nxge_alloc_rx_buf_dma: "
2574 		    "Neptune use kmem_alloc() - size_index %d",
2575 		    size_index));
2576 	}
2577 
2578 	while ((allocated < total_alloc_size) &&
2579 	    (size_index >= 0) && (i < NXGE_DMA_BLOCK)) {
2580 		rx_dmap[i].dma_chunk_index = i;
2581 		rx_dmap[i].block_size = block_size;
2582 		rx_dmap[i].alength = alloc_sizes[size_index];
2583 		rx_dmap[i].orig_alength = rx_dmap[i].alength;
2584 		rx_dmap[i].nblocks = alloc_sizes[size_index] / block_size;
2585 		rx_dmap[i].dma_channel = dma_channel;
2586 		rx_dmap[i].contig_alloc_type = B_FALSE;
2587 		rx_dmap[i].kmem_alloc_type = B_FALSE;
2588 		rx_dmap[i].buf_alloc_type = DDI_MEM_ALLOC;
2589 
2590 		/*
2591 		 * N2/NIU: data buffers must be contiguous as the driver
2592 		 *	   needs to call Hypervisor api to set up
2593 		 *	   logical pages.
2594 		 */
2595 		if ((nxgep->niu_type == N2_NIU) && (NXGE_DMA_BLOCK == 1)) {
2596 			rx_dmap[i].contig_alloc_type = B_TRUE;
2597 			rx_dmap[i].buf_alloc_type = CONTIG_MEM_ALLOC;
2598 		} else if (use_kmem_alloc) {
2599 			/* For Neptune, use kmem_alloc */
2600 			NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
2601 			    "==> nxge_alloc_rx_buf_dma: "
2602 			    "Neptune use kmem_alloc()"));
2603 			rx_dmap[i].kmem_alloc_type = B_TRUE;
2604 			rx_dmap[i].buf_alloc_type = KMEM_ALLOC;
2605 		}
2606 
2607 		NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
2608 		    "alloc_rx_buf_dma rdc %d chunk %d bufp %llx size %x "
2609 		    "i %d nblocks %d alength %d",
2610 		    dma_channel, i, &rx_dmap[i], block_size,
2611 		    i, rx_dmap[i].nblocks,
2612 		    rx_dmap[i].alength));
2613 		status = nxge_dma_mem_alloc(nxgep, nxge_force_dma,
2614 		    &nxge_rx_dma_attr,
2615 		    rx_dmap[i].alength,
2616 		    &nxge_dev_buf_dma_acc_attr,
2617 		    DDI_DMA_READ | DDI_DMA_STREAMING,
2618 		    (p_nxge_dma_common_t)(&rx_dmap[i]));
2619 		if (status != NXGE_OK) {
2620 			NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
2621 			    "nxge_alloc_rx_buf_dma: Alloc Failed: "
2622 			    "dma %d size_index %d size requested %d",
2623 			    dma_channel,
2624 			    size_index,
2625 			    rx_dmap[i].alength));
2626 			size_index--;
2627 		} else {
2628 			rx_dmap[i].buf_alloc_state = BUF_ALLOCATED;
2629 			NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
2630 			    " nxge_alloc_rx_buf_dma DONE  alloc mem: "
2631 			    "dma %d dma_buf_p $%p kaddrp $%p alength %d "
2632 			    "buf_alloc_state %d alloc_type %d",
2633 			    dma_channel,
2634 			    &rx_dmap[i],
2635 			    rx_dmap[i].kaddrp,
2636 			    rx_dmap[i].alength,
2637 			    rx_dmap[i].buf_alloc_state,
2638 			    rx_dmap[i].buf_alloc_type));
2639 			NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
2640 			    " alloc_rx_buf_dma allocated rdc %d "
2641 			    "chunk %d size %x dvma %x bufp %llx kaddrp $%p",
2642 			    dma_channel, i, rx_dmap[i].alength,
2643 			    rx_dmap[i].ioaddr_pp, &rx_dmap[i],
2644 			    rx_dmap[i].kaddrp));
2645 			i++;
2646 			allocated += alloc_sizes[size_index];
2647 		}
2648 	}
2649 
2650 	if (allocated < total_alloc_size) {
2651 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
2652 		    "==> nxge_alloc_rx_buf_dma: not enough for channel %d "
2653 		    "allocated 0x%x requested 0x%x",
2654 		    dma_channel,
2655 		    allocated, total_alloc_size));
2656 		status = NXGE_ERROR;
2657 		goto nxge_alloc_rx_mem_fail1;
2658 	}
2659 
2660 	NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
2661 	    "==> nxge_alloc_rx_buf_dma: Allocated for channel %d "
2662 	    "allocated 0x%x requested 0x%x",
2663 	    dma_channel,
2664 	    allocated, total_alloc_size));
2665 
2666 	NXGE_DEBUG_MSG((nxgep, DMA_CTL,
2667 	    " alloc_rx_buf_dma rdc %d allocated %d chunks",
2668 	    dma_channel, i));
2669 	*num_chunks = i;
2670 	*dmap = rx_dmap;
2671 
2672 	goto nxge_alloc_rx_mem_exit;
2673 
2674 nxge_alloc_rx_mem_fail1:
2675 	KMEM_FREE(rx_dmap, sizeof (nxge_dma_common_t) * NXGE_DMA_BLOCK);
2676 
2677 nxge_alloc_rx_mem_exit:
2678 	NXGE_DEBUG_MSG((nxgep, DMA_CTL,
2679 	    "<== nxge_alloc_rx_buf_dma status 0x%08x", status));
2680 
2681 	return (status);
2682 }
2683 
2684 /*ARGSUSED*/
2685 static void
2686 nxge_free_rx_buf_dma(p_nxge_t nxgep, p_nxge_dma_common_t dmap,
2687     uint32_t num_chunks)
2688 {
2689 	int		i;
2690 
2691 	NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
2692 	    "==> nxge_free_rx_buf_dma: # of chunks %d", num_chunks));
2693 
2694 	if (dmap == 0)
2695 		return;
2696 
2697 	for (i = 0; i < num_chunks; i++) {
2698 		NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
2699 		    "==> nxge_free_rx_buf_dma: chunk %d dmap 0x%llx",
2700 		    i, dmap));
2701 		nxge_dma_free_rx_data_buf(dmap++);
2702 	}
2703 
2704 	NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_free_rx_buf_dma"));
2705 }
2706 
2707 /*ARGSUSED*/
2708 static nxge_status_t
2709 nxge_alloc_rx_cntl_dma(p_nxge_t nxgep, uint16_t dma_channel,
2710     p_nxge_dma_common_t *dmap, size_t size)
2711 {
2712 	p_nxge_dma_common_t 	rx_dmap;
2713 	nxge_status_t		status = NXGE_OK;
2714 
2715 	NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_alloc_rx_cntl_dma"));
2716 
2717 	rx_dmap = (p_nxge_dma_common_t)
2718 	    KMEM_ZALLOC(sizeof (nxge_dma_common_t), KM_SLEEP);
2719 
2720 	rx_dmap->contig_alloc_type = B_FALSE;
2721 	rx_dmap->kmem_alloc_type = B_FALSE;
2722 
2723 	status = nxge_dma_mem_alloc(nxgep, nxge_force_dma,
2724 	    &nxge_desc_dma_attr,
2725 	    size,
2726 	    &nxge_dev_desc_dma_acc_attr,
2727 	    DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
2728 	    rx_dmap);
2729 	if (status != NXGE_OK) {
2730 		goto nxge_alloc_rx_cntl_dma_fail1;
2731 	}
2732 
2733 	*dmap = rx_dmap;
2734 	goto nxge_alloc_rx_cntl_dma_exit;
2735 
2736 nxge_alloc_rx_cntl_dma_fail1:
2737 	KMEM_FREE(rx_dmap, sizeof (nxge_dma_common_t));
2738 
2739 nxge_alloc_rx_cntl_dma_exit:
2740 	NXGE_DEBUG_MSG((nxgep, DMA_CTL,
2741 	    "<== nxge_alloc_rx_cntl_dma status 0x%08x", status));
2742 
2743 	return (status);
2744 }
2745 
2746 /*ARGSUSED*/
2747 static void
2748 nxge_free_rx_cntl_dma(p_nxge_t nxgep, p_nxge_dma_common_t dmap)
2749 {
2750 	NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_free_rx_cntl_dma"));
2751 
2752 	if (dmap == 0)
2753 		return;
2754 
2755 	nxge_dma_mem_free(dmap);
2756 
2757 	NXGE_DEBUG_MSG((nxgep, DMA_CTL, "<== nxge_free_rx_cntl_dma"));
2758 }
2759 
2760 typedef struct {
2761 	size_t	tx_size;
2762 	size_t	cr_size;
2763 	size_t	threshhold;
2764 } nxge_tdc_sizes_t;
2765 
2766 static
2767 nxge_status_t
2768 nxge_tdc_sizes(
2769 	nxge_t *nxgep,
2770 	nxge_tdc_sizes_t *sizes)
2771 {
2772 	uint32_t threshhold;	/* The bcopy() threshhold */
2773 	size_t tx_size;		/* Transmit buffer size */
2774 	size_t cr_size;		/* Completion ring size */
2775 
2776 	/*
2777 	 * Assume that each DMA channel will be configured with the
2778 	 * default transmit buffer size for copying transmit data.
2779 	 * (If a packet is bigger than this, it will not be copied.)
2780 	 */
2781 	if (nxgep->niu_type == N2_NIU) {
2782 		threshhold = TX_BCOPY_SIZE;
2783 	} else {
2784 		threshhold = nxge_bcopy_thresh;
2785 	}
2786 	tx_size = nxge_tx_ring_size * threshhold;
2787 
2788 	cr_size = nxge_tx_ring_size * sizeof (tx_desc_t);
2789 	cr_size += sizeof (txdma_mailbox_t);
2790 
2791 #if	defined(sun4v) && defined(NIU_LP_WORKAROUND)
2792 	if (nxgep->niu_type == N2_NIU) {
2793 		if (!ISP2(tx_size)) {
2794 			NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
2795 			    "==> nxge_tdc_sizes: Tx size"
2796 			    " must be power of 2"));
2797 			return (NXGE_ERROR);
2798 		}
2799 
2800 		if (tx_size > (1 << 22)) {
2801 			NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
2802 			    "==> nxge_tdc_sizes: Tx size"
2803 			    " limited to 4M"));
2804 			return (NXGE_ERROR);
2805 		}
2806 
2807 		if (cr_size < 0x2000)
2808 			cr_size = 0x2000;
2809 	}
2810 #endif
2811 
2812 	sizes->threshhold = threshhold;
2813 	sizes->tx_size = tx_size;
2814 	sizes->cr_size = cr_size;
2815 
2816 	return (NXGE_OK);
2817 }
2818 /*
2819  * nxge_alloc_txb
2820  *
2821  *	Allocate buffers for an TDC.
2822  *
2823  * Arguments:
2824  * 	nxgep
2825  * 	channel	The channel to map into our kernel space.
2826  *
2827  * Notes:
2828  *
2829  * NPI function calls:
2830  *
2831  * NXGE function calls:
2832  *
2833  * Registers accessed:
2834  *
2835  * Context:
2836  *
2837  * Taking apart:
2838  *
2839  * Open questions:
2840  *
2841  */
2842 nxge_status_t
2843 nxge_alloc_txb(
2844 	p_nxge_t nxgep,
2845 	int channel)
2846 {
2847 	nxge_dma_common_t	**dma_buf_p;
2848 	nxge_dma_common_t	**dma_cntl_p;
2849 	uint32_t 		*num_chunks;
2850 	nxge_status_t		status = NXGE_OK;
2851 
2852 	nxge_tdc_sizes_t	sizes;
2853 
2854 	NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_alloc_tbb"));
2855 
2856 	if (nxge_tdc_sizes(nxgep, &sizes) != NXGE_OK)
2857 		return (NXGE_ERROR);
2858 
2859 	/*
2860 	 * Allocate memory for transmit buffers and descriptor rings.
2861 	 * Replace these allocation functions with the interface functions
2862 	 * provided by the partition manager Real Soon Now.
2863 	 */
2864 	dma_buf_p = &nxgep->tx_buf_pool_p->dma_buf_pool_p[channel];
2865 	num_chunks = &nxgep->tx_buf_pool_p->num_chunks[channel];
2866 
2867 	dma_cntl_p = &nxgep->tx_cntl_pool_p->dma_buf_pool_p[channel];
2868 
2869 	/*
2870 	 * Allocate memory for transmit buffers and descriptor rings.
2871 	 * Replace allocation functions with interface functions provided
2872 	 * by the partition manager when it is available.
2873 	 *
2874 	 * Allocate memory for the transmit buffer pool.
2875 	 */
2876 	NXGE_DEBUG_MSG((nxgep, DMA_CTL,
2877 	    "sizes: tx: %ld, cr:%ld, th:%ld",
2878 	    sizes.tx_size, sizes.cr_size, sizes.threshhold));
2879 
2880 	*num_chunks = 0;
2881 	status = nxge_alloc_tx_buf_dma(nxgep, channel, dma_buf_p,
2882 	    sizes.tx_size, sizes.threshhold, num_chunks);
2883 	if (status != NXGE_OK) {
2884 		cmn_err(CE_NOTE, "nxge_alloc_tx_buf_dma failed!");
2885 		return (status);
2886 	}
2887 
2888 	/*
2889 	 * Allocate memory for descriptor rings and mailbox.
2890 	 */
2891 	status = nxge_alloc_tx_cntl_dma(nxgep, channel, dma_cntl_p,
2892 	    sizes.cr_size);
2893 	if (status != NXGE_OK) {
2894 		nxge_free_tx_buf_dma(nxgep, *dma_buf_p, *num_chunks);
2895 		cmn_err(CE_NOTE, "nxge_alloc_tx_cntl_dma failed!");
2896 		return (status);
2897 	}
2898 
2899 	return (NXGE_OK);
2900 }
2901 
2902 void
2903 nxge_free_txb(
2904 	p_nxge_t nxgep,
2905 	int channel)
2906 {
2907 	nxge_dma_common_t	*data;
2908 	nxge_dma_common_t	*control;
2909 	uint32_t 		num_chunks;
2910 
2911 	NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_free_txb"));
2912 
2913 	data = nxgep->tx_buf_pool_p->dma_buf_pool_p[channel];
2914 	num_chunks = nxgep->tx_buf_pool_p->num_chunks[channel];
2915 	nxge_free_tx_buf_dma(nxgep, data, num_chunks);
2916 
2917 	nxgep->tx_buf_pool_p->dma_buf_pool_p[channel] = 0;
2918 	nxgep->tx_buf_pool_p->num_chunks[channel] = 0;
2919 
2920 	control = nxgep->tx_cntl_pool_p->dma_buf_pool_p[channel];
2921 	nxge_free_tx_cntl_dma(nxgep, control);
2922 
2923 	nxgep->tx_cntl_pool_p->dma_buf_pool_p[channel] = 0;
2924 
2925 	KMEM_FREE(data, sizeof (nxge_dma_common_t) * NXGE_DMA_BLOCK);
2926 	KMEM_FREE(control, sizeof (nxge_dma_common_t));
2927 
2928 	NXGE_DEBUG_MSG((nxgep, DMA_CTL, "<== nxge_free_txb"));
2929 }
2930 
2931 /*
2932  * nxge_alloc_tx_mem_pool
2933  *
2934  *	This function allocates all of the per-port TDC control data structures.
2935  *	The per-channel (TDC) data structures are allocated when needed.
2936  *
2937  * Arguments:
2938  * 	nxgep
2939  *
2940  * Notes:
2941  *
2942  * Context:
2943  *	Any domain
2944  */
2945 nxge_status_t
2946 nxge_alloc_tx_mem_pool(p_nxge_t nxgep)
2947 {
2948 	nxge_hw_pt_cfg_t	*p_cfgp;
2949 	nxge_dma_pool_t		*dma_poolp;
2950 	nxge_dma_common_t	**dma_buf_p;
2951 	nxge_dma_pool_t		*dma_cntl_poolp;
2952 	nxge_dma_common_t	**dma_cntl_p;
2953 	uint32_t		*num_chunks; /* per dma */
2954 	int			tdc_max;
2955 
2956 	NXGE_DEBUG_MSG((nxgep, MEM_CTL, "==> nxge_alloc_tx_mem_pool"));
2957 
2958 	p_cfgp = &nxgep->pt_config.hw_config;
2959 	tdc_max = NXGE_MAX_TDCS;
2960 
2961 	/*
2962 	 * Allocate memory for each transmit DMA channel.
2963 	 */
2964 	dma_poolp = (p_nxge_dma_pool_t)KMEM_ZALLOC(sizeof (nxge_dma_pool_t),
2965 	    KM_SLEEP);
2966 	dma_buf_p = (p_nxge_dma_common_t *)KMEM_ZALLOC(
2967 	    sizeof (p_nxge_dma_common_t) * tdc_max, KM_SLEEP);
2968 
2969 	dma_cntl_poolp = (p_nxge_dma_pool_t)
2970 	    KMEM_ZALLOC(sizeof (nxge_dma_pool_t), KM_SLEEP);
2971 	dma_cntl_p = (p_nxge_dma_common_t *)KMEM_ZALLOC(
2972 	    sizeof (p_nxge_dma_common_t) * tdc_max, KM_SLEEP);
2973 
2974 	if (nxge_tx_ring_size > TDC_DEFAULT_MAX) {
2975 		NXGE_DEBUG_MSG((nxgep, MEM_CTL,
2976 		    "nxge_alloc_tx_mem_pool: TDC too high %d, "
2977 		    "set to default %d",
2978 		    nxge_tx_ring_size, TDC_DEFAULT_MAX));
2979 		nxge_tx_ring_size = TDC_DEFAULT_MAX;
2980 	}
2981 
2982 #if	defined(sun4v) && defined(NIU_LP_WORKAROUND)
2983 	/*
2984 	 * N2/NIU has limitation on the descriptor sizes (contiguous
2985 	 * memory allocation on data buffers to 4M (contig_mem_alloc)
2986 	 * and little endian for control buffers (must use the ddi/dki mem alloc
2987 	 * function). The transmit ring is limited to 8K (includes the
2988 	 * mailbox).
2989 	 */
2990 	if (nxgep->niu_type == N2_NIU) {
2991 		if ((nxge_tx_ring_size > NXGE_NIU_CONTIG_TX_MAX) ||
2992 		    (!ISP2(nxge_tx_ring_size))) {
2993 			nxge_tx_ring_size = NXGE_NIU_CONTIG_TX_MAX;
2994 		}
2995 	}
2996 #endif
2997 
2998 	nxgep->nxge_port_tx_ring_size = nxge_tx_ring_size;
2999 
3000 	num_chunks = (uint32_t *)KMEM_ZALLOC(
3001 	    sizeof (uint32_t) * tdc_max, KM_SLEEP);
3002 
3003 	dma_poolp->ndmas = p_cfgp->tdc.owned;
3004 	dma_poolp->num_chunks = num_chunks;
3005 	dma_poolp->dma_buf_pool_p = dma_buf_p;
3006 	nxgep->tx_buf_pool_p = dma_poolp;
3007 
3008 	dma_poolp->buf_allocated = B_TRUE;
3009 
3010 	dma_cntl_poolp->ndmas = p_cfgp->tdc.owned;
3011 	dma_cntl_poolp->dma_buf_pool_p = dma_cntl_p;
3012 	nxgep->tx_cntl_pool_p = dma_cntl_poolp;
3013 
3014 	dma_cntl_poolp->buf_allocated = B_TRUE;
3015 
3016 	nxgep->tx_rings =
3017 	    KMEM_ZALLOC(sizeof (tx_rings_t), KM_SLEEP);
3018 	nxgep->tx_rings->rings =
3019 	    KMEM_ZALLOC(sizeof (p_tx_ring_t) * tdc_max, KM_SLEEP);
3020 	nxgep->tx_mbox_areas_p =
3021 	    KMEM_ZALLOC(sizeof (tx_mbox_areas_t), KM_SLEEP);
3022 	nxgep->tx_mbox_areas_p->txmbox_areas_p =
3023 	    KMEM_ZALLOC(sizeof (p_tx_mbox_t) * tdc_max, KM_SLEEP);
3024 
3025 	nxgep->tx_rings->ndmas = p_cfgp->tdc.owned;
3026 
3027 	NXGE_DEBUG_MSG((nxgep, MEM_CTL,
3028 	    "==> nxge_alloc_tx_mem_pool: ndmas %d poolp->ndmas %d",
3029 	    tdc_max, dma_poolp->ndmas));
3030 
3031 	return (NXGE_OK);
3032 }
3033 
3034 nxge_status_t
3035 nxge_alloc_tx_buf_dma(p_nxge_t nxgep, uint16_t dma_channel,
3036     p_nxge_dma_common_t *dmap, size_t alloc_size,
3037     size_t block_size, uint32_t *num_chunks)
3038 {
3039 	p_nxge_dma_common_t 	tx_dmap;
3040 	nxge_status_t		status = NXGE_OK;
3041 	size_t			total_alloc_size;
3042 	size_t			allocated = 0;
3043 	int			i, size_index, array_size;
3044 
3045 	NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_alloc_tx_buf_dma"));
3046 
3047 	tx_dmap = (p_nxge_dma_common_t)
3048 	    KMEM_ZALLOC(sizeof (nxge_dma_common_t) * NXGE_DMA_BLOCK,
3049 	    KM_SLEEP);
3050 
3051 	total_alloc_size = alloc_size;
3052 	i = 0;
3053 	size_index = 0;
3054 	array_size =  sizeof (alloc_sizes) /  sizeof (size_t);
3055 	while ((alloc_sizes[size_index] < alloc_size) &&
3056 	    (size_index < array_size))
3057 		size_index++;
3058 	if (size_index >= array_size) {
3059 		size_index = array_size - 1;
3060 	}
3061 
3062 	while ((allocated < total_alloc_size) &&
3063 	    (size_index >= 0) && (i < NXGE_DMA_BLOCK)) {
3064 
3065 		tx_dmap[i].dma_chunk_index = i;
3066 		tx_dmap[i].block_size = block_size;
3067 		tx_dmap[i].alength = alloc_sizes[size_index];
3068 		tx_dmap[i].orig_alength = tx_dmap[i].alength;
3069 		tx_dmap[i].nblocks = alloc_sizes[size_index] / block_size;
3070 		tx_dmap[i].dma_channel = dma_channel;
3071 		tx_dmap[i].contig_alloc_type = B_FALSE;
3072 		tx_dmap[i].kmem_alloc_type = B_FALSE;
3073 
3074 		/*
3075 		 * N2/NIU: data buffers must be contiguous as the driver
3076 		 *	   needs to call Hypervisor api to set up
3077 		 *	   logical pages.
3078 		 */
3079 		if ((nxgep->niu_type == N2_NIU) && (NXGE_DMA_BLOCK == 1)) {
3080 			tx_dmap[i].contig_alloc_type = B_TRUE;
3081 		}
3082 
3083 		status = nxge_dma_mem_alloc(nxgep, nxge_force_dma,
3084 		    &nxge_tx_dma_attr,
3085 		    tx_dmap[i].alength,
3086 		    &nxge_dev_buf_dma_acc_attr,
3087 		    DDI_DMA_WRITE | DDI_DMA_STREAMING,
3088 		    (p_nxge_dma_common_t)(&tx_dmap[i]));
3089 		if (status != NXGE_OK) {
3090 			size_index--;
3091 		} else {
3092 			i++;
3093 			allocated += alloc_sizes[size_index];
3094 		}
3095 	}
3096 
3097 	if (allocated < total_alloc_size) {
3098 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
3099 		    "==> nxge_alloc_tx_buf_dma: not enough channel %d: "
3100 		    "allocated 0x%x requested 0x%x",
3101 		    dma_channel,
3102 		    allocated, total_alloc_size));
3103 		status = NXGE_ERROR;
3104 		goto nxge_alloc_tx_mem_fail1;
3105 	}
3106 
3107 	NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
3108 	    "==> nxge_alloc_tx_buf_dma: Allocated for channel %d: "
3109 	    "allocated 0x%x requested 0x%x",
3110 	    dma_channel,
3111 	    allocated, total_alloc_size));
3112 
3113 	*num_chunks = i;
3114 	*dmap = tx_dmap;
3115 	NXGE_DEBUG_MSG((nxgep, DMA_CTL,
3116 	    "==> nxge_alloc_tx_buf_dma dmap 0x%016llx num chunks %d",
3117 	    *dmap, i));
3118 	goto nxge_alloc_tx_mem_exit;
3119 
3120 nxge_alloc_tx_mem_fail1:
3121 	KMEM_FREE(tx_dmap, sizeof (nxge_dma_common_t) * NXGE_DMA_BLOCK);
3122 
3123 nxge_alloc_tx_mem_exit:
3124 	NXGE_DEBUG_MSG((nxgep, DMA_CTL,
3125 	    "<== nxge_alloc_tx_buf_dma status 0x%08x", status));
3126 
3127 	return (status);
3128 }
3129 
3130 /*ARGSUSED*/
3131 static void
3132 nxge_free_tx_buf_dma(p_nxge_t nxgep, p_nxge_dma_common_t dmap,
3133     uint32_t num_chunks)
3134 {
3135 	int		i;
3136 
3137 	NXGE_DEBUG_MSG((nxgep, MEM_CTL, "==> nxge_free_tx_buf_dma"));
3138 
3139 	if (dmap == 0)
3140 		return;
3141 
3142 	for (i = 0; i < num_chunks; i++) {
3143 		nxge_dma_mem_free(dmap++);
3144 	}
3145 
3146 	NXGE_DEBUG_MSG((nxgep, MEM_CTL, "<== nxge_free_tx_buf_dma"));
3147 }
3148 
3149 /*ARGSUSED*/
3150 nxge_status_t
3151 nxge_alloc_tx_cntl_dma(p_nxge_t nxgep, uint16_t dma_channel,
3152     p_nxge_dma_common_t *dmap, size_t size)
3153 {
3154 	p_nxge_dma_common_t 	tx_dmap;
3155 	nxge_status_t		status = NXGE_OK;
3156 
3157 	NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_alloc_tx_cntl_dma"));
3158 	tx_dmap = (p_nxge_dma_common_t)
3159 	    KMEM_ZALLOC(sizeof (nxge_dma_common_t), KM_SLEEP);
3160 
3161 	tx_dmap->contig_alloc_type = B_FALSE;
3162 	tx_dmap->kmem_alloc_type = B_FALSE;
3163 
3164 	status = nxge_dma_mem_alloc(nxgep, nxge_force_dma,
3165 	    &nxge_desc_dma_attr,
3166 	    size,
3167 	    &nxge_dev_desc_dma_acc_attr,
3168 	    DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
3169 	    tx_dmap);
3170 	if (status != NXGE_OK) {
3171 		goto nxge_alloc_tx_cntl_dma_fail1;
3172 	}
3173 
3174 	*dmap = tx_dmap;
3175 	goto nxge_alloc_tx_cntl_dma_exit;
3176 
3177 nxge_alloc_tx_cntl_dma_fail1:
3178 	KMEM_FREE(tx_dmap, sizeof (nxge_dma_common_t));
3179 
3180 nxge_alloc_tx_cntl_dma_exit:
3181 	NXGE_DEBUG_MSG((nxgep, DMA_CTL,
3182 	    "<== nxge_alloc_tx_cntl_dma status 0x%08x", status));
3183 
3184 	return (status);
3185 }
3186 
3187 /*ARGSUSED*/
3188 static void
3189 nxge_free_tx_cntl_dma(p_nxge_t nxgep, p_nxge_dma_common_t dmap)
3190 {
3191 	NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_free_tx_cntl_dma"));
3192 
3193 	if (dmap == 0)
3194 		return;
3195 
3196 	nxge_dma_mem_free(dmap);
3197 
3198 	NXGE_DEBUG_MSG((nxgep, DMA_CTL, "<== nxge_free_tx_cntl_dma"));
3199 }
3200 
3201 /*
3202  * nxge_free_tx_mem_pool
3203  *
3204  *	This function frees all of the per-port TDC control data structures.
3205  *	The per-channel (TDC) data structures are freed when the channel
3206  *	is stopped.
3207  *
3208  * Arguments:
3209  * 	nxgep
3210  *
3211  * Notes:
3212  *
3213  * Context:
3214  *	Any domain
3215  */
3216 static void
3217 nxge_free_tx_mem_pool(p_nxge_t nxgep)
3218 {
3219 	int tdc_max = NXGE_MAX_TDCS;
3220 
3221 	NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_free_tx_mem_pool"));
3222 
3223 	if (!nxgep->tx_buf_pool_p || !nxgep->tx_buf_pool_p->buf_allocated) {
3224 		NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
3225 		    "<== nxge_free_tx_mem_pool "
3226 		    "(null tx buf pool or buf not allocated"));
3227 		return;
3228 	}
3229 	if (!nxgep->tx_cntl_pool_p || !nxgep->tx_cntl_pool_p->buf_allocated) {
3230 		NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
3231 		    "<== nxge_free_tx_mem_pool "
3232 		    "(null tx cntl buf pool or cntl buf not allocated"));
3233 		return;
3234 	}
3235 
3236 	/* 1. Free the mailboxes. */
3237 	KMEM_FREE(nxgep->tx_mbox_areas_p->txmbox_areas_p,
3238 	    sizeof (p_tx_mbox_t) * tdc_max);
3239 	KMEM_FREE(nxgep->tx_mbox_areas_p, sizeof (tx_mbox_areas_t));
3240 
3241 	nxgep->tx_mbox_areas_p = 0;
3242 
3243 	/* 2. Free the transmit ring arrays. */
3244 	KMEM_FREE(nxgep->tx_rings->rings,
3245 	    sizeof (p_tx_ring_t) * tdc_max);
3246 	KMEM_FREE(nxgep->tx_rings, sizeof (tx_rings_t));
3247 
3248 	nxgep->tx_rings = 0;
3249 
3250 	/* 3. Free the completion ring data structures. */
3251 	KMEM_FREE(nxgep->tx_cntl_pool_p->dma_buf_pool_p,
3252 	    sizeof (p_nxge_dma_common_t) * tdc_max);
3253 	KMEM_FREE(nxgep->tx_cntl_pool_p, sizeof (nxge_dma_pool_t));
3254 
3255 	nxgep->tx_cntl_pool_p = 0;
3256 
3257 	/* 4. Free the data ring data structures. */
3258 	KMEM_FREE(nxgep->tx_buf_pool_p->num_chunks,
3259 	    sizeof (uint32_t) * tdc_max);
3260 	KMEM_FREE(nxgep->tx_buf_pool_p->dma_buf_pool_p,
3261 	    sizeof (p_nxge_dma_common_t) * tdc_max);
3262 	KMEM_FREE(nxgep->tx_buf_pool_p, sizeof (nxge_dma_pool_t));
3263 
3264 	nxgep->tx_buf_pool_p = 0;
3265 
3266 	NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "<== nxge_free_tx_mem_pool"));
3267 }
3268 
3269 /*ARGSUSED*/
3270 static nxge_status_t
3271 nxge_dma_mem_alloc(p_nxge_t nxgep, dma_method_t method,
3272 	struct ddi_dma_attr *dma_attrp,
3273 	size_t length, ddi_device_acc_attr_t *acc_attr_p, uint_t xfer_flags,
3274 	p_nxge_dma_common_t dma_p)
3275 {
3276 	caddr_t 		kaddrp;
3277 	int			ddi_status = DDI_SUCCESS;
3278 	boolean_t		contig_alloc_type;
3279 	boolean_t		kmem_alloc_type;
3280 
3281 	contig_alloc_type = dma_p->contig_alloc_type;
3282 
3283 	if (contig_alloc_type && (nxgep->niu_type != N2_NIU)) {
3284 		/*
3285 		 * contig_alloc_type for contiguous memory only allowed
3286 		 * for N2/NIU.
3287 		 */
3288 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
3289 		    "nxge_dma_mem_alloc: alloc type not allowed (%d)",
3290 		    dma_p->contig_alloc_type));
3291 		return (NXGE_ERROR | NXGE_DDI_FAILED);
3292 	}
3293 
3294 	dma_p->dma_handle = NULL;
3295 	dma_p->acc_handle = NULL;
3296 	dma_p->kaddrp = dma_p->last_kaddrp = NULL;
3297 	dma_p->first_ioaddr_pp = dma_p->last_ioaddr_pp = NULL;
3298 	ddi_status = ddi_dma_alloc_handle(nxgep->dip, dma_attrp,
3299 	    DDI_DMA_DONTWAIT, NULL, &dma_p->dma_handle);
3300 	if (ddi_status != DDI_SUCCESS) {
3301 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
3302 		    "nxge_dma_mem_alloc:ddi_dma_alloc_handle failed."));
3303 		return (NXGE_ERROR | NXGE_DDI_FAILED);
3304 	}
3305 
3306 	kmem_alloc_type = dma_p->kmem_alloc_type;
3307 
3308 	switch (contig_alloc_type) {
3309 	case B_FALSE:
3310 		switch (kmem_alloc_type) {
3311 		case B_FALSE:
3312 			ddi_status = ddi_dma_mem_alloc(dma_p->dma_handle,
3313 			    length,
3314 			    acc_attr_p,
3315 			    xfer_flags,
3316 			    DDI_DMA_DONTWAIT, 0, &kaddrp, &dma_p->alength,
3317 			    &dma_p->acc_handle);
3318 			if (ddi_status != DDI_SUCCESS) {
3319 				NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
3320 				    "nxge_dma_mem_alloc: "
3321 				    "ddi_dma_mem_alloc failed"));
3322 				ddi_dma_free_handle(&dma_p->dma_handle);
3323 				dma_p->dma_handle = NULL;
3324 				return (NXGE_ERROR | NXGE_DDI_FAILED);
3325 			}
3326 			if (dma_p->alength < length) {
3327 				NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
3328 				    "nxge_dma_mem_alloc:di_dma_mem_alloc "
3329 				    "< length."));
3330 				ddi_dma_mem_free(&dma_p->acc_handle);
3331 				ddi_dma_free_handle(&dma_p->dma_handle);
3332 				dma_p->acc_handle = NULL;
3333 				dma_p->dma_handle = NULL;
3334 				return (NXGE_ERROR);
3335 			}
3336 
3337 			ddi_status = ddi_dma_addr_bind_handle(dma_p->dma_handle,
3338 			    NULL,
3339 			    kaddrp, dma_p->alength, xfer_flags,
3340 			    DDI_DMA_DONTWAIT,
3341 			    0, &dma_p->dma_cookie, &dma_p->ncookies);
3342 			if (ddi_status != DDI_DMA_MAPPED) {
3343 				NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
3344 				    "nxge_dma_mem_alloc: ddi_dma_addr_bind "
3345 				    "failed "
3346 				    "(staus 0x%x ncookies %d.)", ddi_status,
3347 				    dma_p->ncookies));
3348 				if (dma_p->acc_handle) {
3349 					ddi_dma_mem_free(&dma_p->acc_handle);
3350 					dma_p->acc_handle = NULL;
3351 				}
3352 				ddi_dma_free_handle(&dma_p->dma_handle);
3353 				dma_p->dma_handle = NULL;
3354 				return (NXGE_ERROR | NXGE_DDI_FAILED);
3355 			}
3356 
3357 			if (dma_p->ncookies != 1) {
3358 				NXGE_DEBUG_MSG((nxgep, DMA_CTL,
3359 				    "nxge_dma_mem_alloc:ddi_dma_addr_bind "
3360 				    "> 1 cookie"
3361 				    "(staus 0x%x ncookies %d.)", ddi_status,
3362 				    dma_p->ncookies));
3363 				if (dma_p->acc_handle) {
3364 					ddi_dma_mem_free(&dma_p->acc_handle);
3365 					dma_p->acc_handle = NULL;
3366 				}
3367 				(void) ddi_dma_unbind_handle(dma_p->dma_handle);
3368 				ddi_dma_free_handle(&dma_p->dma_handle);
3369 				dma_p->dma_handle = NULL;
3370 				return (NXGE_ERROR);
3371 			}
3372 			break;
3373 
3374 		case B_TRUE:
3375 			kaddrp = KMEM_ALLOC(length, KM_NOSLEEP);
3376 			if (kaddrp == NULL) {
3377 				NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
3378 				    "nxge_dma_mem_alloc:ddi_dma_mem_alloc "
3379 				    "kmem alloc failed"));
3380 				return (NXGE_ERROR);
3381 			}
3382 
3383 			dma_p->alength = length;
3384 			ddi_status = ddi_dma_addr_bind_handle(dma_p->dma_handle,
3385 			    NULL, kaddrp, dma_p->alength, xfer_flags,
3386 			    DDI_DMA_DONTWAIT, 0,
3387 			    &dma_p->dma_cookie, &dma_p->ncookies);
3388 			if (ddi_status != DDI_DMA_MAPPED) {
3389 				NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
3390 				    "nxge_dma_mem_alloc:ddi_dma_addr_bind: "
3391 				    "(kmem_alloc) failed kaddrp $%p length %d "
3392 				    "(staus 0x%x (%d) ncookies %d.)",
3393 				    kaddrp, length,
3394 				    ddi_status, ddi_status, dma_p->ncookies));
3395 				KMEM_FREE(kaddrp, length);
3396 				dma_p->acc_handle = NULL;
3397 				ddi_dma_free_handle(&dma_p->dma_handle);
3398 				dma_p->dma_handle = NULL;
3399 				dma_p->kaddrp = NULL;
3400 				return (NXGE_ERROR | NXGE_DDI_FAILED);
3401 			}
3402 
3403 			if (dma_p->ncookies != 1) {
3404 				NXGE_DEBUG_MSG((nxgep, DMA_CTL,
3405 				    "nxge_dma_mem_alloc:ddi_dma_addr_bind "
3406 				    "(kmem_alloc) > 1 cookie"
3407 				    "(staus 0x%x ncookies %d.)", ddi_status,
3408 				    dma_p->ncookies));
3409 				KMEM_FREE(kaddrp, length);
3410 				dma_p->acc_handle = NULL;
3411 				(void) ddi_dma_unbind_handle(dma_p->dma_handle);
3412 				ddi_dma_free_handle(&dma_p->dma_handle);
3413 				dma_p->dma_handle = NULL;
3414 				dma_p->kaddrp = NULL;
3415 				return (NXGE_ERROR);
3416 			}
3417 
3418 			dma_p->kaddrp = kaddrp;
3419 
3420 			NXGE_DEBUG_MSG((nxgep, NXGE_ERR_CTL,
3421 			    "nxge_dma_mem_alloc: kmem_alloc dmap $%p "
3422 			    "kaddr $%p alength %d",
3423 			    dma_p,
3424 			    kaddrp,
3425 			    dma_p->alength));
3426 			break;
3427 		}
3428 		break;
3429 
3430 #if	defined(sun4v) && defined(NIU_LP_WORKAROUND)
3431 	case B_TRUE:
3432 		kaddrp = (caddr_t)contig_mem_alloc(length);
3433 		if (kaddrp == NULL) {
3434 			NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
3435 			    "nxge_dma_mem_alloc:contig_mem_alloc failed."));
3436 			ddi_dma_free_handle(&dma_p->dma_handle);
3437 			return (NXGE_ERROR | NXGE_DDI_FAILED);
3438 		}
3439 
3440 		dma_p->alength = length;
3441 		ddi_status = ddi_dma_addr_bind_handle(dma_p->dma_handle, NULL,
3442 		    kaddrp, dma_p->alength, xfer_flags, DDI_DMA_DONTWAIT, 0,
3443 		    &dma_p->dma_cookie, &dma_p->ncookies);
3444 		if (ddi_status != DDI_DMA_MAPPED) {
3445 			NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
3446 			    "nxge_dma_mem_alloc:di_dma_addr_bind failed "
3447 			    "(status 0x%x ncookies %d.)", ddi_status,
3448 			    dma_p->ncookies));
3449 
3450 			NXGE_DEBUG_MSG((nxgep, DMA_CTL,
3451 			    "==> nxge_dma_mem_alloc: (not mapped)"
3452 			    "length %lu (0x%x) "
3453 			    "free contig kaddrp $%p "
3454 			    "va_to_pa $%p",
3455 			    length, length,
3456 			    kaddrp,
3457 			    va_to_pa(kaddrp)));
3458 
3459 
3460 			contig_mem_free((void *)kaddrp, length);
3461 			ddi_dma_free_handle(&dma_p->dma_handle);
3462 
3463 			dma_p->dma_handle = NULL;
3464 			dma_p->acc_handle = NULL;
3465 			dma_p->alength = NULL;
3466 			dma_p->kaddrp = NULL;
3467 
3468 			return (NXGE_ERROR | NXGE_DDI_FAILED);
3469 		}
3470 
3471 		if (dma_p->ncookies != 1 ||
3472 		    (dma_p->dma_cookie.dmac_laddress == NULL)) {
3473 			NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
3474 			    "nxge_dma_mem_alloc:di_dma_addr_bind > 1 "
3475 			    "cookie or "
3476 			    "dmac_laddress is NULL $%p size %d "
3477 			    " (status 0x%x ncookies %d.)",
3478 			    ddi_status,
3479 			    dma_p->dma_cookie.dmac_laddress,
3480 			    dma_p->dma_cookie.dmac_size,
3481 			    dma_p->ncookies));
3482 
3483 			contig_mem_free((void *)kaddrp, length);
3484 			(void) ddi_dma_unbind_handle(dma_p->dma_handle);
3485 			ddi_dma_free_handle(&dma_p->dma_handle);
3486 
3487 			dma_p->alength = 0;
3488 			dma_p->dma_handle = NULL;
3489 			dma_p->acc_handle = NULL;
3490 			dma_p->kaddrp = NULL;
3491 
3492 			return (NXGE_ERROR | NXGE_DDI_FAILED);
3493 		}
3494 		break;
3495 
3496 #else
3497 	case B_TRUE:
3498 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
3499 		    "nxge_dma_mem_alloc: invalid alloc type for !sun4v"));
3500 		return (NXGE_ERROR | NXGE_DDI_FAILED);
3501 #endif
3502 	}
3503 
3504 	dma_p->kaddrp = kaddrp;
3505 	dma_p->last_kaddrp = (unsigned char *)kaddrp +
3506 	    dma_p->alength - RXBUF_64B_ALIGNED;
3507 #if defined(__i386)
3508 	dma_p->ioaddr_pp =
3509 	    (unsigned char *)(uint32_t)dma_p->dma_cookie.dmac_laddress;
3510 #else
3511 	dma_p->ioaddr_pp = (unsigned char *)dma_p->dma_cookie.dmac_laddress;
3512 #endif
3513 	dma_p->last_ioaddr_pp =
3514 #if defined(__i386)
3515 	    (unsigned char *)(uint32_t)dma_p->dma_cookie.dmac_laddress +
3516 #else
3517 	    (unsigned char *)dma_p->dma_cookie.dmac_laddress +
3518 #endif
3519 	    dma_p->alength - RXBUF_64B_ALIGNED;
3520 
3521 	NPI_DMA_ACC_HANDLE_SET(dma_p, dma_p->acc_handle);
3522 
3523 #if	defined(sun4v) && defined(NIU_LP_WORKAROUND)
3524 	dma_p->orig_ioaddr_pp =
3525 	    (unsigned char *)dma_p->dma_cookie.dmac_laddress;
3526 	dma_p->orig_alength = length;
3527 	dma_p->orig_kaddrp = kaddrp;
3528 	dma_p->orig_vatopa = (uint64_t)va_to_pa(kaddrp);
3529 #endif
3530 
3531 	NXGE_DEBUG_MSG((nxgep, DMA_CTL, "<== nxge_dma_mem_alloc: "
3532 	    "dma buffer allocated: dma_p $%p "
3533 	    "return dmac_ladress from cookie $%p cookie dmac_size %d "
3534 	    "dma_p->ioaddr_p $%p "
3535 	    "dma_p->orig_ioaddr_p $%p "
3536 	    "orig_vatopa $%p "
3537 	    "alength %d (0x%x) "
3538 	    "kaddrp $%p "
3539 	    "length %d (0x%x)",
3540 	    dma_p,
3541 	    dma_p->dma_cookie.dmac_laddress, dma_p->dma_cookie.dmac_size,
3542 	    dma_p->ioaddr_pp,
3543 	    dma_p->orig_ioaddr_pp,
3544 	    dma_p->orig_vatopa,
3545 	    dma_p->alength, dma_p->alength,
3546 	    kaddrp,
3547 	    length, length));
3548 
3549 	return (NXGE_OK);
3550 }
3551 
3552 static void
3553 nxge_dma_mem_free(p_nxge_dma_common_t dma_p)
3554 {
3555 	if (dma_p->dma_handle != NULL) {
3556 		if (dma_p->ncookies) {
3557 			(void) ddi_dma_unbind_handle(dma_p->dma_handle);
3558 			dma_p->ncookies = 0;
3559 		}
3560 		ddi_dma_free_handle(&dma_p->dma_handle);
3561 		dma_p->dma_handle = NULL;
3562 	}
3563 
3564 	if (dma_p->acc_handle != NULL) {
3565 		ddi_dma_mem_free(&dma_p->acc_handle);
3566 		dma_p->acc_handle = NULL;
3567 		NPI_DMA_ACC_HANDLE_SET(dma_p, NULL);
3568 	}
3569 
3570 #if	defined(sun4v) && defined(NIU_LP_WORKAROUND)
3571 	if (dma_p->contig_alloc_type &&
3572 	    dma_p->orig_kaddrp && dma_p->orig_alength) {
3573 		NXGE_DEBUG_MSG((NULL, DMA_CTL, "nxge_dma_mem_free: "
3574 		    "kaddrp $%p (orig_kaddrp $%p)"
3575 		    "mem type %d ",
3576 		    "orig_alength %d "
3577 		    "alength 0x%x (%d)",
3578 		    dma_p->kaddrp,
3579 		    dma_p->orig_kaddrp,
3580 		    dma_p->contig_alloc_type,
3581 		    dma_p->orig_alength,
3582 		    dma_p->alength, dma_p->alength));
3583 
3584 		contig_mem_free(dma_p->orig_kaddrp, dma_p->orig_alength);
3585 		dma_p->orig_alength = NULL;
3586 		dma_p->orig_kaddrp = NULL;
3587 		dma_p->contig_alloc_type = B_FALSE;
3588 	}
3589 #endif
3590 	dma_p->kaddrp = NULL;
3591 	dma_p->alength = NULL;
3592 }
3593 
3594 static void
3595 nxge_dma_free_rx_data_buf(p_nxge_dma_common_t dma_p)
3596 {
3597 	uint64_t kaddr;
3598 	uint32_t buf_size;
3599 
3600 	NXGE_DEBUG_MSG((NULL, DMA_CTL, "==> nxge_dma_free_rx_data_buf"));
3601 
3602 	if (dma_p->dma_handle != NULL) {
3603 		if (dma_p->ncookies) {
3604 			(void) ddi_dma_unbind_handle(dma_p->dma_handle);
3605 			dma_p->ncookies = 0;
3606 		}
3607 		ddi_dma_free_handle(&dma_p->dma_handle);
3608 		dma_p->dma_handle = NULL;
3609 	}
3610 
3611 	if (dma_p->acc_handle != NULL) {
3612 		ddi_dma_mem_free(&dma_p->acc_handle);
3613 		dma_p->acc_handle = NULL;
3614 		NPI_DMA_ACC_HANDLE_SET(dma_p, NULL);
3615 	}
3616 
3617 	NXGE_DEBUG_MSG((NULL, DMA_CTL,
3618 	    "==> nxge_dma_free_rx_data_buf: dmap $%p buf_alloc_state %d",
3619 	    dma_p,
3620 	    dma_p->buf_alloc_state));
3621 
3622 	if (!(dma_p->buf_alloc_state & BUF_ALLOCATED_WAIT_FREE)) {
3623 		NXGE_DEBUG_MSG((NULL, DMA_CTL,
3624 		    "<== nxge_dma_free_rx_data_buf: "
3625 		    "outstanding data buffers"));
3626 		return;
3627 	}
3628 
3629 #if	defined(sun4v) && defined(NIU_LP_WORKAROUND)
3630 	if (dma_p->contig_alloc_type &&
3631 	    dma_p->orig_kaddrp && dma_p->orig_alength) {
3632 		NXGE_DEBUG_MSG((NULL, DMA_CTL, "nxge_dma_free_rx_data_buf: "
3633 		    "kaddrp $%p (orig_kaddrp $%p)"
3634 		    "mem type %d ",
3635 		    "orig_alength %d "
3636 		    "alength 0x%x (%d)",
3637 		    dma_p->kaddrp,
3638 		    dma_p->orig_kaddrp,
3639 		    dma_p->contig_alloc_type,
3640 		    dma_p->orig_alength,
3641 		    dma_p->alength, dma_p->alength));
3642 
3643 		kaddr = (uint64_t)dma_p->orig_kaddrp;
3644 		buf_size = dma_p->orig_alength;
3645 		nxge_free_buf(CONTIG_MEM_ALLOC, kaddr, buf_size);
3646 		dma_p->orig_alength = NULL;
3647 		dma_p->orig_kaddrp = NULL;
3648 		dma_p->contig_alloc_type = B_FALSE;
3649 		dma_p->kaddrp = NULL;
3650 		dma_p->alength = NULL;
3651 		return;
3652 	}
3653 #endif
3654 
3655 	if (dma_p->kmem_alloc_type) {
3656 		NXGE_DEBUG_MSG((NULL, DMA_CTL,
3657 		    "nxge_dma_free_rx_data_buf: free kmem "
3658 		    "kaddrp $%p (orig_kaddrp $%p)"
3659 		    "alloc type %d "
3660 		    "orig_alength %d "
3661 		    "alength 0x%x (%d)",
3662 		    dma_p->kaddrp,
3663 		    dma_p->orig_kaddrp,
3664 		    dma_p->kmem_alloc_type,
3665 		    dma_p->orig_alength,
3666 		    dma_p->alength, dma_p->alength));
3667 #if defined(__i386)
3668 		kaddr = (uint64_t)(uint32_t)dma_p->kaddrp;
3669 #else
3670 		kaddr = (uint64_t)dma_p->kaddrp;
3671 #endif
3672 		buf_size = dma_p->orig_alength;
3673 		NXGE_DEBUG_MSG((NULL, DMA_CTL,
3674 		    "nxge_dma_free_rx_data_buf: free dmap $%p "
3675 		    "kaddr $%p buf_size %d",
3676 		    dma_p,
3677 		    kaddr, buf_size));
3678 		nxge_free_buf(KMEM_ALLOC, kaddr, buf_size);
3679 		dma_p->alength = 0;
3680 		dma_p->orig_alength = 0;
3681 		dma_p->kaddrp = NULL;
3682 		dma_p->kmem_alloc_type = B_FALSE;
3683 	}
3684 
3685 	NXGE_DEBUG_MSG((NULL, DMA_CTL, "<== nxge_dma_free_rx_data_buf"));
3686 }
3687 
3688 /*
3689  *	nxge_m_start() -- start transmitting and receiving.
3690  *
3691  *	This function is called by the MAC layer when the first
3692  *	stream is open to prepare the hardware ready for sending
3693  *	and transmitting packets.
3694  */
3695 static int
3696 nxge_m_start(void *arg)
3697 {
3698 	p_nxge_t 	nxgep = (p_nxge_t)arg;
3699 
3700 	NXGE_DEBUG_MSG((nxgep, NXGE_CTL, "==> nxge_m_start"));
3701 
3702 	if (nxge_peu_reset_enable && !nxgep->nxge_link_poll_timerid) {
3703 		(void) nxge_link_monitor(nxgep, LINK_MONITOR_START);
3704 	}
3705 
3706 	MUTEX_ENTER(nxgep->genlock);
3707 	if (nxge_init(nxgep) != NXGE_OK) {
3708 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
3709 		    "<== nxge_m_start: initialization failed"));
3710 		MUTEX_EXIT(nxgep->genlock);
3711 		return (EIO);
3712 	}
3713 
3714 	if (nxgep->nxge_mac_state == NXGE_MAC_STARTED)
3715 		goto nxge_m_start_exit;
3716 	/*
3717 	 * Start timer to check the system error and tx hangs
3718 	 */
3719 	if (!isLDOMguest(nxgep))
3720 		nxgep->nxge_timerid = nxge_start_timer(nxgep,
3721 		    nxge_check_hw_state, NXGE_CHECK_TIMER);
3722 #if	defined(sun4v)
3723 	else
3724 		nxge_hio_start_timer(nxgep);
3725 #endif
3726 
3727 	nxgep->link_notify = B_TRUE;
3728 
3729 	nxgep->nxge_mac_state = NXGE_MAC_STARTED;
3730 
3731 nxge_m_start_exit:
3732 	MUTEX_EXIT(nxgep->genlock);
3733 	NXGE_DEBUG_MSG((nxgep, NXGE_CTL, "<== nxge_m_start"));
3734 	return (0);
3735 }
3736 
3737 /*
3738  *	nxge_m_stop(): stop transmitting and receiving.
3739  */
3740 static void
3741 nxge_m_stop(void *arg)
3742 {
3743 	p_nxge_t 	nxgep = (p_nxge_t)arg;
3744 
3745 	NXGE_DEBUG_MSG((nxgep, NXGE_CTL, "==> nxge_m_stop"));
3746 
3747 	MUTEX_ENTER(nxgep->genlock);
3748 	nxgep->nxge_mac_state = NXGE_MAC_STOPPING;
3749 
3750 	if (nxgep->nxge_timerid) {
3751 		nxge_stop_timer(nxgep, nxgep->nxge_timerid);
3752 		nxgep->nxge_timerid = 0;
3753 	}
3754 
3755 	nxge_uninit(nxgep);
3756 
3757 	nxgep->nxge_mac_state = NXGE_MAC_STOPPED;
3758 
3759 	MUTEX_EXIT(nxgep->genlock);
3760 
3761 	NXGE_DEBUG_MSG((nxgep, NXGE_CTL, "<== nxge_m_stop"));
3762 }
3763 
3764 static int
3765 nxge_m_unicst(void *arg, const uint8_t *macaddr)
3766 {
3767 	p_nxge_t 	nxgep = (p_nxge_t)arg;
3768 	struct 		ether_addr addrp;
3769 
3770 	NXGE_DEBUG_MSG((nxgep, MAC_CTL, "==> nxge_m_unicst"));
3771 
3772 	bcopy(macaddr, (uint8_t *)&addrp, ETHERADDRL);
3773 	if (nxge_set_mac_addr(nxgep, &addrp)) {
3774 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
3775 		    "<== nxge_m_unicst: set unitcast failed"));
3776 		return (EINVAL);
3777 	}
3778 
3779 	NXGE_DEBUG_MSG((nxgep, MAC_CTL, "<== nxge_m_unicst"));
3780 
3781 	return (0);
3782 }
3783 
3784 static int
3785 nxge_m_multicst(void *arg, boolean_t add, const uint8_t *mca)
3786 {
3787 	p_nxge_t 	nxgep = (p_nxge_t)arg;
3788 	struct 		ether_addr addrp;
3789 
3790 	NXGE_DEBUG_MSG((nxgep, MAC_CTL,
3791 	    "==> nxge_m_multicst: add %d", add));
3792 
3793 	bcopy(mca, (uint8_t *)&addrp, ETHERADDRL);
3794 	if (add) {
3795 		if (nxge_add_mcast_addr(nxgep, &addrp)) {
3796 			NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
3797 			    "<== nxge_m_multicst: add multicast failed"));
3798 			return (EINVAL);
3799 		}
3800 	} else {
3801 		if (nxge_del_mcast_addr(nxgep, &addrp)) {
3802 			NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
3803 			    "<== nxge_m_multicst: del multicast failed"));
3804 			return (EINVAL);
3805 		}
3806 	}
3807 
3808 	NXGE_DEBUG_MSG((nxgep, MAC_CTL, "<== nxge_m_multicst"));
3809 
3810 	return (0);
3811 }
3812 
3813 static int
3814 nxge_m_promisc(void *arg, boolean_t on)
3815 {
3816 	p_nxge_t 	nxgep = (p_nxge_t)arg;
3817 
3818 	NXGE_DEBUG_MSG((nxgep, MAC_CTL,
3819 	    "==> nxge_m_promisc: on %d", on));
3820 
3821 	if (nxge_set_promisc(nxgep, on)) {
3822 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
3823 		    "<== nxge_m_promisc: set promisc failed"));
3824 		return (EINVAL);
3825 	}
3826 
3827 	NXGE_DEBUG_MSG((nxgep, MAC_CTL,
3828 	    "<== nxge_m_promisc: on %d", on));
3829 
3830 	return (0);
3831 }
3832 
3833 static void
3834 nxge_m_ioctl(void *arg,  queue_t *wq, mblk_t *mp)
3835 {
3836 	p_nxge_t 	nxgep = (p_nxge_t)arg;
3837 	struct 		iocblk *iocp;
3838 	boolean_t 	need_privilege;
3839 	int 		err;
3840 	int 		cmd;
3841 
3842 	NXGE_DEBUG_MSG((nxgep, NXGE_CTL, "==> nxge_m_ioctl"));
3843 
3844 	iocp = (struct iocblk *)mp->b_rptr;
3845 	iocp->ioc_error = 0;
3846 	need_privilege = B_TRUE;
3847 	cmd = iocp->ioc_cmd;
3848 	NXGE_DEBUG_MSG((nxgep, NXGE_CTL, "==> nxge_m_ioctl: cmd 0x%08x", cmd));
3849 	switch (cmd) {
3850 	default:
3851 		miocnak(wq, mp, 0, EINVAL);
3852 		NXGE_DEBUG_MSG((nxgep, NXGE_CTL, "<== nxge_m_ioctl: invalid"));
3853 		return;
3854 
3855 	case LB_GET_INFO_SIZE:
3856 	case LB_GET_INFO:
3857 	case LB_GET_MODE:
3858 		need_privilege = B_FALSE;
3859 		break;
3860 	case LB_SET_MODE:
3861 		break;
3862 
3863 
3864 	case NXGE_GET_MII:
3865 	case NXGE_PUT_MII:
3866 	case NXGE_GET64:
3867 	case NXGE_PUT64:
3868 	case NXGE_GET_TX_RING_SZ:
3869 	case NXGE_GET_TX_DESC:
3870 	case NXGE_TX_SIDE_RESET:
3871 	case NXGE_RX_SIDE_RESET:
3872 	case NXGE_GLOBAL_RESET:
3873 	case NXGE_RESET_MAC:
3874 	case NXGE_TX_REGS_DUMP:
3875 	case NXGE_RX_REGS_DUMP:
3876 	case NXGE_INT_REGS_DUMP:
3877 	case NXGE_VIR_INT_REGS_DUMP:
3878 	case NXGE_PUT_TCAM:
3879 	case NXGE_GET_TCAM:
3880 	case NXGE_RTRACE:
3881 	case NXGE_RDUMP:
3882 
3883 		need_privilege = B_FALSE;
3884 		break;
3885 	case NXGE_INJECT_ERR:
3886 		cmn_err(CE_NOTE, "!nxge_m_ioctl: Inject error\n");
3887 		nxge_err_inject(nxgep, wq, mp);
3888 		break;
3889 	}
3890 
3891 	if (need_privilege) {
3892 		err = secpolicy_net_config(iocp->ioc_cr, B_FALSE);
3893 		if (err != 0) {
3894 			miocnak(wq, mp, 0, err);
3895 			NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
3896 			    "<== nxge_m_ioctl: no priv"));
3897 			return;
3898 		}
3899 	}
3900 
3901 	switch (cmd) {
3902 
3903 	case LB_GET_MODE:
3904 	case LB_SET_MODE:
3905 	case LB_GET_INFO_SIZE:
3906 	case LB_GET_INFO:
3907 		nxge_loopback_ioctl(nxgep, wq, mp, iocp);
3908 		break;
3909 
3910 	case NXGE_GET_MII:
3911 	case NXGE_PUT_MII:
3912 	case NXGE_PUT_TCAM:
3913 	case NXGE_GET_TCAM:
3914 	case NXGE_GET64:
3915 	case NXGE_PUT64:
3916 	case NXGE_GET_TX_RING_SZ:
3917 	case NXGE_GET_TX_DESC:
3918 	case NXGE_TX_SIDE_RESET:
3919 	case NXGE_RX_SIDE_RESET:
3920 	case NXGE_GLOBAL_RESET:
3921 	case NXGE_RESET_MAC:
3922 	case NXGE_TX_REGS_DUMP:
3923 	case NXGE_RX_REGS_DUMP:
3924 	case NXGE_INT_REGS_DUMP:
3925 	case NXGE_VIR_INT_REGS_DUMP:
3926 		NXGE_DEBUG_MSG((nxgep, NXGE_CTL,
3927 		    "==> nxge_m_ioctl: cmd 0x%x", cmd));
3928 		nxge_hw_ioctl(nxgep, wq, mp, iocp);
3929 		break;
3930 	}
3931 
3932 	NXGE_DEBUG_MSG((nxgep, NXGE_CTL, "<== nxge_m_ioctl"));
3933 }
3934 
3935 extern void nxge_rx_hw_blank(void *arg, time_t ticks, uint_t count);
3936 
3937 static void
3938 nxge_m_resources(void *arg)
3939 {
3940 	p_nxge_t		nxgep = arg;
3941 	mac_rx_fifo_t 		mrf;
3942 
3943 	nxge_grp_set_t		*set = &nxgep->rx_set;
3944 	uint8_t			rdc;
3945 
3946 	rx_rcr_ring_t		*ring;
3947 
3948 	NXGE_DEBUG_MSG((nxgep, RX_CTL, "==> nxge_m_resources"));
3949 
3950 	MUTEX_ENTER(nxgep->genlock);
3951 
3952 	if (set->owned.map == 0) {
3953 		NXGE_ERROR_MSG((NULL, NXGE_ERR_CTL,
3954 		    "nxge_m_resources: no receive resources"));
3955 		goto nxge_m_resources_exit;
3956 	}
3957 
3958 	/*
3959 	 * CR 6492541 Check to see if the drv_state has been initialized,
3960 	 * if not * call nxge_init().
3961 	 */
3962 	if (!(nxgep->drv_state & STATE_HW_INITIALIZED)) {
3963 		if (nxge_init(nxgep) != NXGE_OK)
3964 			goto nxge_m_resources_exit;
3965 	}
3966 
3967 	mrf.mrf_type = MAC_RX_FIFO;
3968 	mrf.mrf_blank = nxge_rx_hw_blank;
3969 	mrf.mrf_arg = (void *)nxgep;
3970 
3971 	mrf.mrf_normal_blank_time = 128;
3972 	mrf.mrf_normal_pkt_count = 8;
3973 
3974 	/*
3975 	 * Export our receive resources to the MAC layer.
3976 	 */
3977 	for (rdc = 0; rdc < NXGE_MAX_RDCS; rdc++) {
3978 		if ((1 << rdc) & set->owned.map) {
3979 			ring = nxgep->rx_rcr_rings->rcr_rings[rdc];
3980 			if (ring == 0) {
3981 				/*
3982 				 * This is a big deal only if we are
3983 				 * *not* in an LDOMs environment.
3984 				 */
3985 				if (nxgep->environs == SOLARIS_DOMAIN) {
3986 					cmn_err(CE_NOTE,
3987 					    "==> nxge_m_resources: "
3988 					    "ring %d == 0", rdc);
3989 				}
3990 				continue;
3991 			}
3992 			ring->rcr_mac_handle = mac_resource_add
3993 			    (nxgep->mach, (mac_resource_t *)&mrf);
3994 
3995 			NXGE_DEBUG_MSG((nxgep, NXGE_CTL,
3996 			    "==> nxge_m_resources: RDC %d RCR %p MAC handle %p",
3997 			    rdc, ring, ring->rcr_mac_handle));
3998 		}
3999 	}
4000 
4001 nxge_m_resources_exit:
4002 	MUTEX_EXIT(nxgep->genlock);
4003 	NXGE_DEBUG_MSG((nxgep, RX_CTL, "<== nxge_m_resources"));
4004 }
4005 
4006 void
4007 nxge_mmac_kstat_update(p_nxge_t nxgep, mac_addr_slot_t slot, boolean_t factory)
4008 {
4009 	p_nxge_mmac_stats_t mmac_stats;
4010 	int i;
4011 	nxge_mmac_t *mmac_info;
4012 
4013 	mmac_info = &nxgep->nxge_mmac_info;
4014 
4015 	mmac_stats = &nxgep->statsp->mmac_stats;
4016 	mmac_stats->mmac_max_cnt = mmac_info->num_mmac;
4017 	mmac_stats->mmac_avail_cnt = mmac_info->naddrfree;
4018 
4019 	for (i = 0; i < ETHERADDRL; i++) {
4020 		if (factory) {
4021 			mmac_stats->mmac_avail_pool[slot-1].ether_addr_octet[i]
4022 			    = mmac_info->factory_mac_pool[slot][
4023 			    (ETHERADDRL-1) - i];
4024 		} else {
4025 			mmac_stats->mmac_avail_pool[slot-1].ether_addr_octet[i]
4026 			    = mmac_info->mac_pool[slot].addr[
4027 			    (ETHERADDRL - 1) - i];
4028 		}
4029 	}
4030 }
4031 
4032 /*
4033  * nxge_altmac_set() -- Set an alternate MAC address
4034  */
4035 static int
4036 nxge_altmac_set(p_nxge_t nxgep, uint8_t *maddr, mac_addr_slot_t slot)
4037 {
4038 	uint8_t addrn;
4039 	uint8_t portn;
4040 	npi_mac_addr_t altmac;
4041 	hostinfo_t mac_rdc;
4042 	p_nxge_class_pt_cfg_t clscfgp;
4043 
4044 	altmac.w2 = ((uint16_t)maddr[0] << 8) | ((uint16_t)maddr[1] & 0x0ff);
4045 	altmac.w1 = ((uint16_t)maddr[2] << 8) | ((uint16_t)maddr[3] & 0x0ff);
4046 	altmac.w0 = ((uint16_t)maddr[4] << 8) | ((uint16_t)maddr[5] & 0x0ff);
4047 
4048 	portn = nxgep->mac.portnum;
4049 	addrn = (uint8_t)slot - 1;
4050 
4051 	if (npi_mac_altaddr_entry(nxgep->npi_handle, OP_SET, portn,
4052 	    addrn, &altmac) != NPI_SUCCESS)
4053 		return (EIO);
4054 
4055 	/*
4056 	 * Set the rdc table number for the host info entry
4057 	 * for this mac address slot.
4058 	 */
4059 	clscfgp = (p_nxge_class_pt_cfg_t)&nxgep->class_config;
4060 	mac_rdc.value = 0;
4061 	mac_rdc.bits.w0.rdc_tbl_num = clscfgp->mac_host_info[addrn].rdctbl;
4062 	mac_rdc.bits.w0.mac_pref = clscfgp->mac_host_info[addrn].mpr_npr;
4063 
4064 	if (npi_mac_hostinfo_entry(nxgep->npi_handle, OP_SET,
4065 	    nxgep->function_num, addrn, &mac_rdc) != NPI_SUCCESS) {
4066 		return (EIO);
4067 	}
4068 
4069 	/*
4070 	 * Enable comparison with the alternate MAC address.
4071 	 * While the first alternate addr is enabled by bit 1 of register
4072 	 * BMAC_ALTAD_CMPEN, it is enabled by bit 0 of register
4073 	 * XMAC_ADDR_CMPEN, so slot needs to be converted to addrn
4074 	 * accordingly before calling npi_mac_altaddr_entry.
4075 	 */
4076 	if (portn == XMAC_PORT_0 || portn == XMAC_PORT_1)
4077 		addrn = (uint8_t)slot - 1;
4078 	else
4079 		addrn = (uint8_t)slot;
4080 
4081 	if (npi_mac_altaddr_enable(nxgep->npi_handle, portn, addrn)
4082 	    != NPI_SUCCESS)
4083 		return (EIO);
4084 
4085 	return (0);
4086 }
4087 
4088 /*
4089  * nxeg_m_mmac_add() - find an unused address slot, set the address
4090  * value to the one specified, enable the port to start filtering on
4091  * the new MAC address.  Returns 0 on success.
4092  */
4093 int
4094 nxge_m_mmac_add(void *arg, mac_multi_addr_t *maddr)
4095 {
4096 	p_nxge_t nxgep = arg;
4097 	mac_addr_slot_t slot;
4098 	nxge_mmac_t *mmac_info;
4099 	int err;
4100 	nxge_status_t status;
4101 
4102 	mutex_enter(nxgep->genlock);
4103 
4104 	/*
4105 	 * Make sure that nxge is initialized, if _start() has
4106 	 * not been called.
4107 	 */
4108 	if (!(nxgep->drv_state & STATE_HW_INITIALIZED)) {
4109 		status = nxge_init(nxgep);
4110 		if (status != NXGE_OK) {
4111 			mutex_exit(nxgep->genlock);
4112 			return (ENXIO);
4113 		}
4114 	}
4115 
4116 	mmac_info = &nxgep->nxge_mmac_info;
4117 	if (mmac_info->naddrfree == 0) {
4118 		mutex_exit(nxgep->genlock);
4119 		return (ENOSPC);
4120 	}
4121 	if (!mac_unicst_verify(nxgep->mach, maddr->mma_addr,
4122 	    maddr->mma_addrlen)) {
4123 		mutex_exit(nxgep->genlock);
4124 		return (EINVAL);
4125 	}
4126 	/*
4127 	 * 	Search for the first available slot. Because naddrfree
4128 	 * is not zero, we are guaranteed to find one.
4129 	 * 	Slot 0 is for unique (primary) MAC. The first alternate
4130 	 * MAC slot is slot 1.
4131 	 *	Each of the first two ports of Neptune has 16 alternate
4132 	 * MAC slots but only the first 7 (of 15) slots have assigned factory
4133 	 * MAC addresses. We first search among the slots without bundled
4134 	 * factory MACs. If we fail to find one in that range, then we
4135 	 * search the slots with bundled factory MACs.  A factory MAC
4136 	 * will be wasted while the slot is used with a user MAC address.
4137 	 * But the slot could be used by factory MAC again after calling
4138 	 * nxge_m_mmac_remove and nxge_m_mmac_reserve.
4139 	 */
4140 	if (mmac_info->num_factory_mmac < mmac_info->num_mmac) {
4141 		for (slot = mmac_info->num_factory_mmac + 1;
4142 		    slot <= mmac_info->num_mmac; slot++) {
4143 			if (!(mmac_info->mac_pool[slot].flags & MMAC_SLOT_USED))
4144 				break;
4145 		}
4146 		if (slot > mmac_info->num_mmac) {
4147 			for (slot = 1; slot <= mmac_info->num_factory_mmac;
4148 			    slot++) {
4149 				if (!(mmac_info->mac_pool[slot].flags
4150 				    & MMAC_SLOT_USED))
4151 					break;
4152 			}
4153 		}
4154 	} else {
4155 		for (slot = 1; slot <= mmac_info->num_mmac; slot++) {
4156 			if (!(mmac_info->mac_pool[slot].flags & MMAC_SLOT_USED))
4157 				break;
4158 		}
4159 	}
4160 	ASSERT(slot <= mmac_info->num_mmac);
4161 	if ((err = nxge_altmac_set(nxgep, maddr->mma_addr, slot)) != 0) {
4162 		mutex_exit(nxgep->genlock);
4163 		return (err);
4164 	}
4165 	bcopy(maddr->mma_addr, mmac_info->mac_pool[slot].addr, ETHERADDRL);
4166 	mmac_info->mac_pool[slot].flags |= MMAC_SLOT_USED;
4167 	mmac_info->mac_pool[slot].flags &= ~MMAC_VENDOR_ADDR;
4168 	mmac_info->naddrfree--;
4169 	nxge_mmac_kstat_update(nxgep, slot, B_FALSE);
4170 
4171 	maddr->mma_slot = slot;
4172 
4173 	mutex_exit(nxgep->genlock);
4174 	return (0);
4175 }
4176 
4177 /*
4178  * This function reserves an unused slot and programs the slot and the HW
4179  * with a factory mac address.
4180  */
4181 static int
4182 nxge_m_mmac_reserve(void *arg, mac_multi_addr_t *maddr)
4183 {
4184 	p_nxge_t nxgep = arg;
4185 	mac_addr_slot_t slot;
4186 	nxge_mmac_t *mmac_info;
4187 	int err;
4188 	nxge_status_t status;
4189 
4190 	mutex_enter(nxgep->genlock);
4191 
4192 	/*
4193 	 * Make sure that nxge is initialized, if _start() has
4194 	 * not been called.
4195 	 */
4196 	if (!(nxgep->drv_state & STATE_HW_INITIALIZED)) {
4197 		status = nxge_init(nxgep);
4198 		if (status != NXGE_OK) {
4199 			mutex_exit(nxgep->genlock);
4200 			return (ENXIO);
4201 		}
4202 	}
4203 
4204 	mmac_info = &nxgep->nxge_mmac_info;
4205 	if (mmac_info->naddrfree == 0) {
4206 		mutex_exit(nxgep->genlock);
4207 		return (ENOSPC);
4208 	}
4209 
4210 	slot = maddr->mma_slot;
4211 	if (slot == -1) {  /* -1: Take the first available slot */
4212 		for (slot = 1; slot <= mmac_info->num_factory_mmac; slot++) {
4213 			if (!(mmac_info->mac_pool[slot].flags & MMAC_SLOT_USED))
4214 				break;
4215 		}
4216 		if (slot > mmac_info->num_factory_mmac) {
4217 			mutex_exit(nxgep->genlock);
4218 			return (ENOSPC);
4219 		}
4220 	}
4221 	if (slot < 1 || slot > mmac_info->num_factory_mmac) {
4222 		/*
4223 		 * Do not support factory MAC at a slot greater than
4224 		 * num_factory_mmac even when there are available factory
4225 		 * MAC addresses because the alternate MACs are bundled with
4226 		 * slot[1] through slot[num_factory_mmac]
4227 		 */
4228 		mutex_exit(nxgep->genlock);
4229 		return (EINVAL);
4230 	}
4231 	if (mmac_info->mac_pool[slot].flags & MMAC_SLOT_USED) {
4232 		mutex_exit(nxgep->genlock);
4233 		return (EBUSY);
4234 	}
4235 	/* Verify the address to be reserved */
4236 	if (!mac_unicst_verify(nxgep->mach,
4237 	    mmac_info->factory_mac_pool[slot], ETHERADDRL)) {
4238 		mutex_exit(nxgep->genlock);
4239 		return (EINVAL);
4240 	}
4241 	if (err = nxge_altmac_set(nxgep,
4242 	    mmac_info->factory_mac_pool[slot], slot)) {
4243 		mutex_exit(nxgep->genlock);
4244 		return (err);
4245 	}
4246 	bcopy(mmac_info->factory_mac_pool[slot], maddr->mma_addr, ETHERADDRL);
4247 	mmac_info->mac_pool[slot].flags |= MMAC_SLOT_USED | MMAC_VENDOR_ADDR;
4248 	mmac_info->naddrfree--;
4249 
4250 	nxge_mmac_kstat_update(nxgep, slot, B_TRUE);
4251 	mutex_exit(nxgep->genlock);
4252 
4253 	/* Pass info back to the caller */
4254 	maddr->mma_slot = slot;
4255 	maddr->mma_addrlen = ETHERADDRL;
4256 	maddr->mma_flags = MMAC_SLOT_USED | MMAC_VENDOR_ADDR;
4257 
4258 	return (0);
4259 }
4260 
4261 /*
4262  * Remove the specified mac address and update the HW not to filter
4263  * the mac address anymore.
4264  */
4265 int
4266 nxge_m_mmac_remove(void *arg, mac_addr_slot_t slot)
4267 {
4268 	p_nxge_t nxgep = arg;
4269 	nxge_mmac_t *mmac_info;
4270 	uint8_t addrn;
4271 	uint8_t portn;
4272 	int err = 0;
4273 	nxge_status_t status;
4274 
4275 	mutex_enter(nxgep->genlock);
4276 
4277 	/*
4278 	 * Make sure that nxge is initialized, if _start() has
4279 	 * not been called.
4280 	 */
4281 	if (!(nxgep->drv_state & STATE_HW_INITIALIZED)) {
4282 		status = nxge_init(nxgep);
4283 		if (status != NXGE_OK) {
4284 			mutex_exit(nxgep->genlock);
4285 			return (ENXIO);
4286 		}
4287 	}
4288 
4289 	mmac_info = &nxgep->nxge_mmac_info;
4290 	if (slot < 1 || slot > mmac_info->num_mmac) {
4291 		mutex_exit(nxgep->genlock);
4292 		return (EINVAL);
4293 	}
4294 
4295 	portn = nxgep->mac.portnum;
4296 	if (portn == XMAC_PORT_0 || portn == XMAC_PORT_1)
4297 		addrn = (uint8_t)slot - 1;
4298 	else
4299 		addrn = (uint8_t)slot;
4300 
4301 	if (mmac_info->mac_pool[slot].flags & MMAC_SLOT_USED) {
4302 		if (npi_mac_altaddr_disable(nxgep->npi_handle, portn, addrn)
4303 		    == NPI_SUCCESS) {
4304 			mmac_info->naddrfree++;
4305 			mmac_info->mac_pool[slot].flags &= ~MMAC_SLOT_USED;
4306 			/*
4307 			 * Regardless if the MAC we just stopped filtering
4308 			 * is a user addr or a facory addr, we must set
4309 			 * the MMAC_VENDOR_ADDR flag if this slot has an
4310 			 * associated factory MAC to indicate that a factory
4311 			 * MAC is available.
4312 			 */
4313 			if (slot <= mmac_info->num_factory_mmac) {
4314 				mmac_info->mac_pool[slot].flags
4315 				    |= MMAC_VENDOR_ADDR;
4316 			}
4317 			/*
4318 			 * Clear mac_pool[slot].addr so that kstat shows 0
4319 			 * alternate MAC address if the slot is not used.
4320 			 * (But nxge_m_mmac_get returns the factory MAC even
4321 			 * when the slot is not used!)
4322 			 */
4323 			bzero(mmac_info->mac_pool[slot].addr, ETHERADDRL);
4324 			nxge_mmac_kstat_update(nxgep, slot, B_FALSE);
4325 		} else {
4326 			err = EIO;
4327 		}
4328 	} else {
4329 		err = EINVAL;
4330 	}
4331 
4332 	mutex_exit(nxgep->genlock);
4333 	return (err);
4334 }
4335 
4336 /*
4337  * Modify a mac address added by nxge_m_mmac_add or nxge_m_mmac_reserve().
4338  */
4339 static int
4340 nxge_m_mmac_modify(void *arg, mac_multi_addr_t *maddr)
4341 {
4342 	p_nxge_t nxgep = arg;
4343 	mac_addr_slot_t slot;
4344 	nxge_mmac_t *mmac_info;
4345 	int err = 0;
4346 	nxge_status_t status;
4347 
4348 	if (!mac_unicst_verify(nxgep->mach, maddr->mma_addr,
4349 	    maddr->mma_addrlen))
4350 		return (EINVAL);
4351 
4352 	slot = maddr->mma_slot;
4353 
4354 	mutex_enter(nxgep->genlock);
4355 
4356 	/*
4357 	 * Make sure that nxge is initialized, if _start() has
4358 	 * not been called.
4359 	 */
4360 	if (!(nxgep->drv_state & STATE_HW_INITIALIZED)) {
4361 		status = nxge_init(nxgep);
4362 		if (status != NXGE_OK) {
4363 			mutex_exit(nxgep->genlock);
4364 			return (ENXIO);
4365 		}
4366 	}
4367 
4368 	mmac_info = &nxgep->nxge_mmac_info;
4369 	if (slot < 1 || slot > mmac_info->num_mmac) {
4370 		mutex_exit(nxgep->genlock);
4371 		return (EINVAL);
4372 	}
4373 	if (mmac_info->mac_pool[slot].flags & MMAC_SLOT_USED) {
4374 		if ((err = nxge_altmac_set(nxgep, maddr->mma_addr, slot))
4375 		    != 0) {
4376 			bcopy(maddr->mma_addr, mmac_info->mac_pool[slot].addr,
4377 			    ETHERADDRL);
4378 			/*
4379 			 * Assume that the MAC passed down from the caller
4380 			 * is not a factory MAC address (The user should
4381 			 * call mmac_remove followed by mmac_reserve if
4382 			 * he wants to use the factory MAC for this slot).
4383 			 */
4384 			mmac_info->mac_pool[slot].flags &= ~MMAC_VENDOR_ADDR;
4385 			nxge_mmac_kstat_update(nxgep, slot, B_FALSE);
4386 		}
4387 	} else {
4388 		err = EINVAL;
4389 	}
4390 	mutex_exit(nxgep->genlock);
4391 	return (err);
4392 }
4393 
4394 /*
4395  * nxge_m_mmac_get() - Get the MAC address and other information
4396  * related to the slot.  mma_flags should be set to 0 in the call.
4397  * Note: although kstat shows MAC address as zero when a slot is
4398  * not used, Crossbow expects nxge_m_mmac_get to copy factory MAC
4399  * to the caller as long as the slot is not using a user MAC address.
4400  * The following table shows the rules,
4401  *
4402  *				   USED    VENDOR    mma_addr
4403  * ------------------------------------------------------------
4404  * (1) Slot uses a user MAC:        yes      no     user MAC
4405  * (2) Slot uses a factory MAC:     yes      yes    factory MAC
4406  * (3) Slot is not used but is
4407  *     factory MAC capable:         no       yes    factory MAC
4408  * (4) Slot is not used and is
4409  *     not factory MAC capable:     no       no        0
4410  * ------------------------------------------------------------
4411  */
4412 static int
4413 nxge_m_mmac_get(void *arg, mac_multi_addr_t *maddr)
4414 {
4415 	nxge_t *nxgep = arg;
4416 	mac_addr_slot_t slot;
4417 	nxge_mmac_t *mmac_info;
4418 	nxge_status_t status;
4419 
4420 	slot = maddr->mma_slot;
4421 
4422 	mutex_enter(nxgep->genlock);
4423 
4424 	/*
4425 	 * Make sure that nxge is initialized, if _start() has
4426 	 * not been called.
4427 	 */
4428 	if (!(nxgep->drv_state & STATE_HW_INITIALIZED)) {
4429 		status = nxge_init(nxgep);
4430 		if (status != NXGE_OK) {
4431 			mutex_exit(nxgep->genlock);
4432 			return (ENXIO);
4433 		}
4434 	}
4435 
4436 	mmac_info = &nxgep->nxge_mmac_info;
4437 
4438 	if (slot < 1 || slot > mmac_info->num_mmac) {
4439 		mutex_exit(nxgep->genlock);
4440 		return (EINVAL);
4441 	}
4442 	maddr->mma_flags = 0;
4443 	if (mmac_info->mac_pool[slot].flags & MMAC_SLOT_USED)
4444 		maddr->mma_flags |= MMAC_SLOT_USED;
4445 
4446 	if (mmac_info->mac_pool[slot].flags & MMAC_VENDOR_ADDR) {
4447 		maddr->mma_flags |= MMAC_VENDOR_ADDR;
4448 		bcopy(mmac_info->factory_mac_pool[slot],
4449 		    maddr->mma_addr, ETHERADDRL);
4450 		maddr->mma_addrlen = ETHERADDRL;
4451 	} else {
4452 		if (maddr->mma_flags & MMAC_SLOT_USED) {
4453 			bcopy(mmac_info->mac_pool[slot].addr,
4454 			    maddr->mma_addr, ETHERADDRL);
4455 			maddr->mma_addrlen = ETHERADDRL;
4456 		} else {
4457 			bzero(maddr->mma_addr, ETHERADDRL);
4458 			maddr->mma_addrlen = 0;
4459 		}
4460 	}
4461 	mutex_exit(nxgep->genlock);
4462 	return (0);
4463 }
4464 
4465 static boolean_t
4466 nxge_m_getcapab(void *arg, mac_capab_t cap, void *cap_data)
4467 {
4468 	nxge_t *nxgep = arg;
4469 	uint32_t *txflags = cap_data;
4470 	multiaddress_capab_t *mmacp = cap_data;
4471 
4472 	switch (cap) {
4473 	case MAC_CAPAB_HCKSUM:
4474 		NXGE_DEBUG_MSG((nxgep, NXGE_CTL,
4475 		    "==> nxge_m_getcapab: checksum %d", nxge_cksum_offload));
4476 		if (nxge_cksum_offload <= 1) {
4477 			*txflags = HCKSUM_INET_PARTIAL;
4478 		}
4479 		break;
4480 
4481 	case MAC_CAPAB_POLL:
4482 		/*
4483 		 * There's nothing for us to fill in, simply returning
4484 		 * B_TRUE stating that we support polling is sufficient.
4485 		 */
4486 		break;
4487 
4488 	case MAC_CAPAB_MULTIADDRESS:
4489 		mmacp = (multiaddress_capab_t *)cap_data;
4490 		mutex_enter(nxgep->genlock);
4491 
4492 		mmacp->maddr_naddr = nxgep->nxge_mmac_info.num_mmac;
4493 		mmacp->maddr_naddrfree = nxgep->nxge_mmac_info.naddrfree;
4494 		mmacp->maddr_flag = 0; /* 0 is required by PSARC2006/265 */
4495 		/*
4496 		 * maddr_handle is driver's private data, passed back to
4497 		 * entry point functions as arg.
4498 		 */
4499 		mmacp->maddr_handle	= nxgep;
4500 		mmacp->maddr_add	= nxge_m_mmac_add;
4501 		mmacp->maddr_remove	= nxge_m_mmac_remove;
4502 		mmacp->maddr_modify	= nxge_m_mmac_modify;
4503 		mmacp->maddr_get	= nxge_m_mmac_get;
4504 		mmacp->maddr_reserve	= nxge_m_mmac_reserve;
4505 
4506 		mutex_exit(nxgep->genlock);
4507 		break;
4508 
4509 	case MAC_CAPAB_LSO: {
4510 		mac_capab_lso_t *cap_lso = cap_data;
4511 
4512 		if (nxgep->soft_lso_enable) {
4513 			if (nxge_cksum_offload <= 1) {
4514 				cap_lso->lso_flags = LSO_TX_BASIC_TCP_IPV4;
4515 				if (nxge_lso_max > NXGE_LSO_MAXLEN) {
4516 					nxge_lso_max = NXGE_LSO_MAXLEN;
4517 				}
4518 				cap_lso->lso_basic_tcp_ipv4.lso_max =
4519 				    nxge_lso_max;
4520 			}
4521 			break;
4522 		} else {
4523 			return (B_FALSE);
4524 		}
4525 	}
4526 
4527 #if defined(sun4v)
4528 	case MAC_CAPAB_RINGS: {
4529 		mac_capab_rings_t *mrings = (mac_capab_rings_t *)cap_data;
4530 
4531 		/*
4532 		 * Only the service domain driver responds to
4533 		 * this capability request.
4534 		 */
4535 		if (isLDOMservice(nxgep)) {
4536 			mrings->mr_handle = (void *)nxgep;
4537 
4538 			/*
4539 			 * No dynamic allocation of groups and
4540 			 * rings at this time.  Shares dictate the
4541 			 * configuration.
4542 			 */
4543 			mrings->mr_gadd_ring = NULL;
4544 			mrings->mr_grem_ring = NULL;
4545 			mrings->mr_rget = NULL;
4546 			mrings->mr_gget = nxge_hio_group_get;
4547 
4548 			if (mrings->mr_type == MAC_RING_TYPE_RX) {
4549 				mrings->mr_rnum = 8; /* XXX */
4550 				mrings->mr_gnum = 6; /* XXX */
4551 			} else {
4552 				mrings->mr_rnum = 8; /* XXX */
4553 				mrings->mr_gnum = 0; /* XXX */
4554 			}
4555 		} else
4556 			return (B_FALSE);
4557 		break;
4558 	}
4559 
4560 	case MAC_CAPAB_SHARES: {
4561 		mac_capab_share_t *mshares = (mac_capab_share_t *)cap_data;
4562 
4563 		/*
4564 		 * Only the service domain driver responds to
4565 		 * this capability request.
4566 		 */
4567 		if (isLDOMservice(nxgep)) {
4568 			mshares->ms_snum = 3;
4569 			mshares->ms_handle = (void *)nxgep;
4570 			mshares->ms_salloc = nxge_hio_share_alloc;
4571 			mshares->ms_sfree = nxge_hio_share_free;
4572 			mshares->ms_sadd = NULL;
4573 			mshares->ms_sremove = NULL;
4574 			mshares->ms_squery = nxge_hio_share_query;
4575 		} else
4576 			return (B_FALSE);
4577 		break;
4578 	}
4579 #endif
4580 	default:
4581 		return (B_FALSE);
4582 	}
4583 	return (B_TRUE);
4584 }
4585 
4586 static boolean_t
4587 nxge_param_locked(mac_prop_id_t pr_num)
4588 {
4589 	/*
4590 	 * All adv_* parameters are locked (read-only) while
4591 	 * the device is in any sort of loopback mode ...
4592 	 */
4593 	switch (pr_num) {
4594 		case MAC_PROP_ADV_1000FDX_CAP:
4595 		case MAC_PROP_EN_1000FDX_CAP:
4596 		case MAC_PROP_ADV_1000HDX_CAP:
4597 		case MAC_PROP_EN_1000HDX_CAP:
4598 		case MAC_PROP_ADV_100FDX_CAP:
4599 		case MAC_PROP_EN_100FDX_CAP:
4600 		case MAC_PROP_ADV_100HDX_CAP:
4601 		case MAC_PROP_EN_100HDX_CAP:
4602 		case MAC_PROP_ADV_10FDX_CAP:
4603 		case MAC_PROP_EN_10FDX_CAP:
4604 		case MAC_PROP_ADV_10HDX_CAP:
4605 		case MAC_PROP_EN_10HDX_CAP:
4606 		case MAC_PROP_AUTONEG:
4607 		case MAC_PROP_FLOWCTRL:
4608 			return (B_TRUE);
4609 	}
4610 	return (B_FALSE);
4611 }
4612 
4613 /*
4614  * callback functions for set/get of properties
4615  */
4616 static int
4617 nxge_m_setprop(void *barg, const char *pr_name, mac_prop_id_t pr_num,
4618     uint_t pr_valsize, const void *pr_val)
4619 {
4620 	nxge_t		*nxgep = barg;
4621 	p_nxge_param_t	param_arr;
4622 	p_nxge_stats_t	statsp;
4623 	int		err = 0;
4624 	uint8_t		val;
4625 	uint32_t	cur_mtu, new_mtu, old_framesize;
4626 	link_flowctrl_t	fl;
4627 
4628 	NXGE_DEBUG_MSG((nxgep, NXGE_CTL, "==> nxge_m_setprop"));
4629 	param_arr = nxgep->param_arr;
4630 	statsp = nxgep->statsp;
4631 	mutex_enter(nxgep->genlock);
4632 	if (statsp->port_stats.lb_mode != nxge_lb_normal &&
4633 	    nxge_param_locked(pr_num)) {
4634 		/*
4635 		 * All adv_* parameters are locked (read-only)
4636 		 * while the device is in any sort of loopback mode.
4637 		 */
4638 		NXGE_DEBUG_MSG((nxgep, NXGE_CTL,
4639 		    "==> nxge_m_setprop: loopback mode: read only"));
4640 		mutex_exit(nxgep->genlock);
4641 		return (EBUSY);
4642 	}
4643 
4644 	val = *(uint8_t *)pr_val;
4645 	switch (pr_num) {
4646 		case MAC_PROP_EN_1000FDX_CAP:
4647 			nxgep->param_en_1000fdx = val;
4648 			param_arr[param_anar_1000fdx].value = val;
4649 
4650 			goto reprogram;
4651 
4652 		case MAC_PROP_EN_100FDX_CAP:
4653 			nxgep->param_en_100fdx = val;
4654 			param_arr[param_anar_100fdx].value = val;
4655 
4656 			goto reprogram;
4657 
4658 		case MAC_PROP_EN_10FDX_CAP:
4659 			nxgep->param_en_10fdx = val;
4660 			param_arr[param_anar_10fdx].value = val;
4661 
4662 			goto reprogram;
4663 
4664 		case MAC_PROP_EN_1000HDX_CAP:
4665 		case MAC_PROP_EN_100HDX_CAP:
4666 		case MAC_PROP_EN_10HDX_CAP:
4667 		case MAC_PROP_ADV_1000FDX_CAP:
4668 		case MAC_PROP_ADV_1000HDX_CAP:
4669 		case MAC_PROP_ADV_100FDX_CAP:
4670 		case MAC_PROP_ADV_100HDX_CAP:
4671 		case MAC_PROP_ADV_10FDX_CAP:
4672 		case MAC_PROP_ADV_10HDX_CAP:
4673 		case MAC_PROP_STATUS:
4674 		case MAC_PROP_SPEED:
4675 		case MAC_PROP_DUPLEX:
4676 			err = EINVAL; /* cannot set read-only properties */
4677 			NXGE_DEBUG_MSG((nxgep, NXGE_CTL,
4678 			    "==> nxge_m_setprop:  read only property %d",
4679 			    pr_num));
4680 			break;
4681 
4682 		case MAC_PROP_AUTONEG:
4683 			param_arr[param_autoneg].value = val;
4684 
4685 			goto reprogram;
4686 
4687 		case MAC_PROP_MTU:
4688 			if (nxgep->nxge_mac_state == NXGE_MAC_STARTED) {
4689 				err = EBUSY;
4690 				break;
4691 			}
4692 
4693 			cur_mtu = nxgep->mac.default_mtu;
4694 			bcopy(pr_val, &new_mtu, sizeof (new_mtu));
4695 			NXGE_DEBUG_MSG((nxgep, NXGE_CTL,
4696 			    "==> nxge_m_setprop: set MTU: %d is_jumbo %d",
4697 			    new_mtu, nxgep->mac.is_jumbo));
4698 
4699 			if (new_mtu == cur_mtu) {
4700 				err = 0;
4701 				break;
4702 			}
4703 			if (new_mtu < NXGE_DEFAULT_MTU ||
4704 			    new_mtu > NXGE_MAXIMUM_MTU) {
4705 				err = EINVAL;
4706 				break;
4707 			}
4708 
4709 			if ((new_mtu > NXGE_DEFAULT_MTU) &&
4710 			    !nxgep->mac.is_jumbo) {
4711 				err = EINVAL;
4712 				break;
4713 			}
4714 
4715 			old_framesize = (uint32_t)nxgep->mac.maxframesize;
4716 			nxgep->mac.maxframesize = (uint16_t)
4717 			    (new_mtu + NXGE_EHEADER_VLAN_CRC);
4718 			if (nxge_mac_set_framesize(nxgep)) {
4719 				nxgep->mac.maxframesize =
4720 				    (uint16_t)old_framesize;
4721 				err = EINVAL;
4722 				break;
4723 			}
4724 
4725 			err = mac_maxsdu_update(nxgep->mach, new_mtu);
4726 			if (err) {
4727 				nxgep->mac.maxframesize =
4728 				    (uint16_t)old_framesize;
4729 				err = EINVAL;
4730 				break;
4731 			}
4732 
4733 			nxgep->mac.default_mtu = new_mtu;
4734 			NXGE_DEBUG_MSG((nxgep, NXGE_CTL,
4735 			    "==> nxge_m_setprop: set MTU: %d maxframe %d",
4736 			    new_mtu, nxgep->mac.maxframesize));
4737 			break;
4738 
4739 		case MAC_PROP_FLOWCTRL:
4740 			bcopy(pr_val, &fl, sizeof (fl));
4741 			switch (fl) {
4742 			default:
4743 				err = EINVAL;
4744 				break;
4745 
4746 			case LINK_FLOWCTRL_NONE:
4747 				param_arr[param_anar_pause].value = 0;
4748 				break;
4749 
4750 			case LINK_FLOWCTRL_RX:
4751 				param_arr[param_anar_pause].value = 1;
4752 				break;
4753 
4754 			case LINK_FLOWCTRL_TX:
4755 			case LINK_FLOWCTRL_BI:
4756 				err = EINVAL;
4757 				break;
4758 			}
4759 
4760 reprogram:
4761 			if (err == 0) {
4762 				if (!nxge_param_link_update(nxgep)) {
4763 					err = EINVAL;
4764 				}
4765 			}
4766 			break;
4767 		case MAC_PROP_PRIVATE:
4768 			NXGE_DEBUG_MSG((nxgep, NXGE_CTL,
4769 			    "==> nxge_m_setprop: private property"));
4770 			err = nxge_set_priv_prop(nxgep, pr_name, pr_valsize,
4771 			    pr_val);
4772 			break;
4773 
4774 		default:
4775 			err = ENOTSUP;
4776 			break;
4777 	}
4778 
4779 	mutex_exit(nxgep->genlock);
4780 
4781 	NXGE_DEBUG_MSG((nxgep, NXGE_CTL,
4782 	    "<== nxge_m_setprop (return %d)", err));
4783 	return (err);
4784 }
4785 
4786 static int
4787 nxge_m_getprop(void *barg, const char *pr_name, mac_prop_id_t pr_num,
4788     uint_t pr_flags, uint_t pr_valsize, void *pr_val)
4789 {
4790 	nxge_t 		*nxgep = barg;
4791 	p_nxge_param_t	param_arr = nxgep->param_arr;
4792 	p_nxge_stats_t	statsp = nxgep->statsp;
4793 	int		err = 0;
4794 	link_flowctrl_t	fl;
4795 	uint64_t	tmp = 0;
4796 	link_state_t	ls;
4797 	boolean_t	is_default = (pr_flags & MAC_PROP_DEFAULT);
4798 
4799 	NXGE_DEBUG_MSG((nxgep, NXGE_CTL,
4800 	    "==> nxge_m_getprop: pr_num %d", pr_num));
4801 
4802 	if (pr_valsize == 0)
4803 		return (EINVAL);
4804 
4805 	if ((is_default) && (pr_num != MAC_PROP_PRIVATE)) {
4806 		err = nxge_get_def_val(nxgep, pr_num, pr_valsize, pr_val);
4807 		return (err);
4808 	}
4809 
4810 	bzero(pr_val, pr_valsize);
4811 	switch (pr_num) {
4812 		case MAC_PROP_DUPLEX:
4813 			*(uint8_t *)pr_val = statsp->mac_stats.link_duplex;
4814 			NXGE_DEBUG_MSG((nxgep, NXGE_CTL,
4815 			    "==> nxge_m_getprop: duplex mode %d",
4816 			    *(uint8_t *)pr_val));
4817 			break;
4818 
4819 		case MAC_PROP_SPEED:
4820 			if (pr_valsize < sizeof (uint64_t))
4821 				return (EINVAL);
4822 			tmp = statsp->mac_stats.link_speed * 1000000ull;
4823 			bcopy(&tmp, pr_val, sizeof (tmp));
4824 			break;
4825 
4826 		case MAC_PROP_STATUS:
4827 			if (pr_valsize < sizeof (link_state_t))
4828 				return (EINVAL);
4829 			if (!statsp->mac_stats.link_up)
4830 				ls = LINK_STATE_DOWN;
4831 			else
4832 				ls = LINK_STATE_UP;
4833 			bcopy(&ls, pr_val, sizeof (ls));
4834 			break;
4835 
4836 		case MAC_PROP_AUTONEG:
4837 			*(uint8_t *)pr_val =
4838 			    param_arr[param_autoneg].value;
4839 			break;
4840 
4841 		case MAC_PROP_FLOWCTRL:
4842 			if (pr_valsize < sizeof (link_flowctrl_t))
4843 				return (EINVAL);
4844 
4845 			fl = LINK_FLOWCTRL_NONE;
4846 			if (param_arr[param_anar_pause].value) {
4847 				fl = LINK_FLOWCTRL_RX;
4848 			}
4849 			bcopy(&fl, pr_val, sizeof (fl));
4850 			break;
4851 
4852 		case MAC_PROP_ADV_1000FDX_CAP:
4853 			*(uint8_t *)pr_val =
4854 			    param_arr[param_anar_1000fdx].value;
4855 			break;
4856 
4857 		case MAC_PROP_EN_1000FDX_CAP:
4858 			*(uint8_t *)pr_val = nxgep->param_en_1000fdx;
4859 			break;
4860 
4861 		case MAC_PROP_ADV_100FDX_CAP:
4862 			*(uint8_t *)pr_val =
4863 			    param_arr[param_anar_100fdx].value;
4864 			break;
4865 
4866 		case MAC_PROP_EN_100FDX_CAP:
4867 			*(uint8_t *)pr_val = nxgep->param_en_100fdx;
4868 			break;
4869 
4870 		case MAC_PROP_ADV_10FDX_CAP:
4871 			*(uint8_t *)pr_val =
4872 			    param_arr[param_anar_10fdx].value;
4873 			break;
4874 
4875 		case MAC_PROP_EN_10FDX_CAP:
4876 			*(uint8_t *)pr_val = nxgep->param_en_10fdx;
4877 			break;
4878 
4879 		case MAC_PROP_EN_1000HDX_CAP:
4880 		case MAC_PROP_EN_100HDX_CAP:
4881 		case MAC_PROP_EN_10HDX_CAP:
4882 		case MAC_PROP_ADV_1000HDX_CAP:
4883 		case MAC_PROP_ADV_100HDX_CAP:
4884 		case MAC_PROP_ADV_10HDX_CAP:
4885 			err = ENOTSUP;
4886 			break;
4887 
4888 		case MAC_PROP_PRIVATE:
4889 			err = nxge_get_priv_prop(nxgep, pr_name, pr_flags,
4890 			    pr_valsize, pr_val);
4891 			break;
4892 		default:
4893 			err = EINVAL;
4894 			break;
4895 	}
4896 
4897 	NXGE_DEBUG_MSG((nxgep, NXGE_CTL, "<== nxge_m_getprop"));
4898 
4899 	return (err);
4900 }
4901 
4902 /* ARGSUSED */
4903 static int
4904 nxge_set_priv_prop(p_nxge_t nxgep, const char *pr_name, uint_t pr_valsize,
4905     const void *pr_val)
4906 {
4907 	p_nxge_param_t	param_arr = nxgep->param_arr;
4908 	int		err = 0;
4909 	long		result;
4910 
4911 	NXGE_DEBUG_MSG((nxgep, NXGE_CTL,
4912 	    "==> nxge_set_priv_prop: name %s", pr_name));
4913 
4914 	if (strcmp(pr_name, "_accept_jumbo") == 0) {
4915 		(void) ddi_strtol(pr_val, (char **)NULL, 0, &result);
4916 		NXGE_DEBUG_MSG((nxgep, NXGE_CTL,
4917 		    "<== nxge_set_priv_prop: name %s "
4918 		    "pr_val %s result %d "
4919 		    "param %d is_jumbo %d",
4920 		    pr_name, pr_val, result,
4921 		    param_arr[param_accept_jumbo].value,
4922 		    nxgep->mac.is_jumbo));
4923 
4924 		if (result > 1 || result < 0) {
4925 			err = EINVAL;
4926 		} else {
4927 			if (nxgep->mac.is_jumbo ==
4928 			    (uint32_t)result) {
4929 				NXGE_DEBUG_MSG((nxgep, NXGE_CTL,
4930 				    "no change (%d %d)",
4931 				    nxgep->mac.is_jumbo,
4932 				    result));
4933 				return (0);
4934 			}
4935 		}
4936 
4937 		param_arr[param_accept_jumbo].value = result;
4938 		nxgep->mac.is_jumbo = B_FALSE;
4939 		if (result) {
4940 			nxgep->mac.is_jumbo = B_TRUE;
4941 		}
4942 
4943 		NXGE_DEBUG_MSG((nxgep, NXGE_CTL,
4944 		    "<== nxge_set_priv_prop: name %s (value %d) is_jumbo %d",
4945 		    pr_name, result, nxgep->mac.is_jumbo));
4946 
4947 		return (err);
4948 	}
4949 
4950 	/* Blanking */
4951 	if (strcmp(pr_name, "_rxdma_intr_time") == 0) {
4952 		err = nxge_param_rx_intr_time(nxgep, NULL, NULL,
4953 		    (char *)pr_val,
4954 		    (caddr_t)&param_arr[param_rxdma_intr_time]);
4955 		if (err) {
4956 			NXGE_DEBUG_MSG((nxgep, NXGE_CTL,
4957 			    "<== nxge_set_priv_prop: "
4958 			    "unable to set (%s)", pr_name));
4959 			err = EINVAL;
4960 		} else {
4961 			err = 0;
4962 			NXGE_DEBUG_MSG((nxgep, NXGE_CTL,
4963 			    "<== nxge_set_priv_prop: "
4964 			    "set (%s)", pr_name));
4965 		}
4966 
4967 		NXGE_DEBUG_MSG((nxgep, NXGE_CTL,
4968 		    "<== nxge_set_priv_prop: name %s (value %d)",
4969 		    pr_name, result));
4970 
4971 		return (err);
4972 	}
4973 
4974 	if (strcmp(pr_name, "_rxdma_intr_pkts") == 0) {
4975 		err = nxge_param_rx_intr_pkts(nxgep, NULL, NULL,
4976 		    (char *)pr_val,
4977 		    (caddr_t)&param_arr[param_rxdma_intr_pkts]);
4978 		if (err) {
4979 			NXGE_DEBUG_MSG((nxgep, NXGE_CTL,
4980 			    "<== nxge_set_priv_prop: "
4981 			    "unable to set (%s)", pr_name));
4982 			err = EINVAL;
4983 		} else {
4984 			err = 0;
4985 			NXGE_DEBUG_MSG((nxgep, NXGE_CTL,
4986 			    "<== nxge_set_priv_prop: "
4987 			    "set (%s)", pr_name));
4988 		}
4989 
4990 		NXGE_DEBUG_MSG((nxgep, NXGE_CTL,
4991 		    "<== nxge_set_priv_prop: name %s (value %d)",
4992 		    pr_name, result));
4993 
4994 		return (err);
4995 	}
4996 
4997 	/* Classification */
4998 	if (strcmp(pr_name, "_class_opt_ipv4_tcp") == 0) {
4999 		if (pr_val == NULL) {
5000 			err = EINVAL;
5001 			return (err);
5002 		}
5003 		(void) ddi_strtol(pr_val, (char **)NULL, 0, &result);
5004 
5005 		err = nxge_param_set_ip_opt(nxgep, NULL,
5006 		    NULL, (char *)pr_val,
5007 		    (caddr_t)&param_arr[param_class_opt_ipv4_tcp]);
5008 
5009 		NXGE_DEBUG_MSG((nxgep, NXGE_CTL,
5010 		    "<== nxge_set_priv_prop: name %s (value 0x%x)",
5011 		    pr_name, result));
5012 
5013 		return (err);
5014 	}
5015 
5016 	if (strcmp(pr_name, "_class_opt_ipv4_udp") == 0) {
5017 		if (pr_val == NULL) {
5018 			err = EINVAL;
5019 			return (err);
5020 		}
5021 		(void) ddi_strtol(pr_val, (char **)NULL, 0, &result);
5022 
5023 		err = nxge_param_set_ip_opt(nxgep, NULL,
5024 		    NULL, (char *)pr_val,
5025 		    (caddr_t)&param_arr[param_class_opt_ipv4_udp]);
5026 
5027 		NXGE_DEBUG_MSG((nxgep, NXGE_CTL,
5028 		    "<== nxge_set_priv_prop: name %s (value 0x%x)",
5029 		    pr_name, result));
5030 
5031 		return (err);
5032 	}
5033 	if (strcmp(pr_name, "_class_opt_ipv4_ah") == 0) {
5034 		if (pr_val == NULL) {
5035 			err = EINVAL;
5036 			return (err);
5037 		}
5038 		(void) ddi_strtol(pr_val, (char **)NULL, 0, &result);
5039 
5040 		err = nxge_param_set_ip_opt(nxgep, NULL,
5041 		    NULL, (char *)pr_val,
5042 		    (caddr_t)&param_arr[param_class_opt_ipv4_ah]);
5043 
5044 		NXGE_DEBUG_MSG((nxgep, NXGE_CTL,
5045 		    "<== nxge_set_priv_prop: name %s (value 0x%x)",
5046 		    pr_name, result));
5047 
5048 		return (err);
5049 	}
5050 	if (strcmp(pr_name, "_class_opt_ipv4_sctp") == 0) {
5051 		if (pr_val == NULL) {
5052 			err = EINVAL;
5053 			return (err);
5054 		}
5055 		(void) ddi_strtol(pr_val, (char **)NULL, 0, &result);
5056 
5057 		err = nxge_param_set_ip_opt(nxgep, NULL,
5058 		    NULL, (char *)pr_val,
5059 		    (caddr_t)&param_arr[param_class_opt_ipv4_sctp]);
5060 
5061 		NXGE_DEBUG_MSG((nxgep, NXGE_CTL,
5062 		    "<== nxge_set_priv_prop: name %s (value 0x%x)",
5063 		    pr_name, result));
5064 
5065 		return (err);
5066 	}
5067 
5068 	if (strcmp(pr_name, "_class_opt_ipv6_tcp") == 0) {
5069 		if (pr_val == NULL) {
5070 			err = EINVAL;
5071 			return (err);
5072 		}
5073 		(void) ddi_strtol(pr_val, (char **)NULL, 0, &result);
5074 
5075 		err = nxge_param_set_ip_opt(nxgep, NULL,
5076 		    NULL, (char *)pr_val,
5077 		    (caddr_t)&param_arr[param_class_opt_ipv6_tcp]);
5078 
5079 		NXGE_DEBUG_MSG((nxgep, NXGE_CTL,
5080 		    "<== nxge_set_priv_prop: name %s (value 0x%x)",
5081 		    pr_name, result));
5082 
5083 		return (err);
5084 	}
5085 
5086 	if (strcmp(pr_name, "_class_opt_ipv6_udp") == 0) {
5087 		if (pr_val == NULL) {
5088 			err = EINVAL;
5089 			return (err);
5090 		}
5091 		(void) ddi_strtol(pr_val, (char **)NULL, 0, &result);
5092 
5093 		err = nxge_param_set_ip_opt(nxgep, NULL,
5094 		    NULL, (char *)pr_val,
5095 		    (caddr_t)&param_arr[param_class_opt_ipv6_udp]);
5096 
5097 		NXGE_DEBUG_MSG((nxgep, NXGE_CTL,
5098 		    "<== nxge_set_priv_prop: name %s (value 0x%x)",
5099 		    pr_name, result));
5100 
5101 		return (err);
5102 	}
5103 	if (strcmp(pr_name, "_class_opt_ipv6_ah") == 0) {
5104 		if (pr_val == NULL) {
5105 			err = EINVAL;
5106 			return (err);
5107 		}
5108 		(void) ddi_strtol(pr_val, (char **)NULL, 0, &result);
5109 
5110 		err = nxge_param_set_ip_opt(nxgep, NULL,
5111 		    NULL, (char *)pr_val,
5112 		    (caddr_t)&param_arr[param_class_opt_ipv6_ah]);
5113 
5114 		NXGE_DEBUG_MSG((nxgep, NXGE_CTL,
5115 		    "<== nxge_set_priv_prop: name %s (value 0x%x)",
5116 		    pr_name, result));
5117 
5118 		return (err);
5119 	}
5120 	if (strcmp(pr_name, "_class_opt_ipv6_sctp") == 0) {
5121 		if (pr_val == NULL) {
5122 			err = EINVAL;
5123 			return (err);
5124 		}
5125 		(void) ddi_strtol(pr_val, (char **)NULL, 0, &result);
5126 
5127 		err = nxge_param_set_ip_opt(nxgep, NULL,
5128 		    NULL, (char *)pr_val,
5129 		    (caddr_t)&param_arr[param_class_opt_ipv6_sctp]);
5130 
5131 		NXGE_DEBUG_MSG((nxgep, NXGE_CTL,
5132 		    "<== nxge_set_priv_prop: name %s (value 0x%x)",
5133 		    pr_name, result));
5134 
5135 		return (err);
5136 	}
5137 
5138 	if (strcmp(pr_name, "_soft_lso_enable") == 0) {
5139 		if (nxgep->nxge_mac_state == NXGE_MAC_STARTED) {
5140 			NXGE_DEBUG_MSG((nxgep, NXGE_CTL,
5141 			    "==> nxge_set_priv_prop: name %s (busy)", pr_name));
5142 			err = EBUSY;
5143 			return (err);
5144 		}
5145 		if (pr_val == NULL) {
5146 			NXGE_DEBUG_MSG((nxgep, NXGE_CTL,
5147 			    "==> nxge_set_priv_prop: name %s (null)", pr_name));
5148 			err = EINVAL;
5149 			return (err);
5150 		}
5151 
5152 		(void) ddi_strtol(pr_val, (char **)NULL, 0, &result);
5153 		NXGE_DEBUG_MSG((nxgep, NXGE_CTL,
5154 		    "<== nxge_set_priv_prop: name %s "
5155 		    "(lso %d pr_val %s value %d)",
5156 		    pr_name, nxgep->soft_lso_enable, pr_val, result));
5157 
5158 		if (result > 1 || result < 0) {
5159 			err = EINVAL;
5160 		} else {
5161 			if (nxgep->soft_lso_enable == (uint32_t)result) {
5162 				NXGE_DEBUG_MSG((nxgep, NXGE_CTL,
5163 				    "no change (%d %d)",
5164 				    nxgep->soft_lso_enable, result));
5165 				return (0);
5166 			}
5167 		}
5168 
5169 		nxgep->soft_lso_enable = (int)result;
5170 
5171 		NXGE_DEBUG_MSG((nxgep, NXGE_CTL,
5172 		    "<== nxge_set_priv_prop: name %s (value %d)",
5173 		    pr_name, result));
5174 
5175 		return (err);
5176 	}
5177 	/*
5178 	 * Commands like "ndd -set /dev/nxge0 adv_10gfdx_cap 1" cause the
5179 	 * following code to be executed.
5180 	 */
5181 	if (strcmp(pr_name, "_adv_10gfdx_cap") == 0) {
5182 		err = nxge_param_set_mac(nxgep, NULL, NULL, (char *)pr_val,
5183 		    (caddr_t)&param_arr[param_anar_10gfdx]);
5184 		return (err);
5185 	}
5186 	if (strcmp(pr_name, "_adv_pause_cap") == 0) {
5187 		err = nxge_param_set_mac(nxgep, NULL, NULL, (char *)pr_val,
5188 		    (caddr_t)&param_arr[param_anar_pause]);
5189 		return (err);
5190 	}
5191 
5192 	return (EINVAL);
5193 }
5194 
5195 static int
5196 nxge_get_priv_prop(p_nxge_t nxgep, const char *pr_name, uint_t pr_flags,
5197     uint_t pr_valsize, void *pr_val)
5198 {
5199 	p_nxge_param_t	param_arr = nxgep->param_arr;
5200 	char		valstr[MAXNAMELEN];
5201 	int		err = EINVAL;
5202 	uint_t		strsize;
5203 	boolean_t	is_default = (pr_flags & MAC_PROP_DEFAULT);
5204 
5205 	NXGE_DEBUG_MSG((nxgep, NXGE_CTL,
5206 	    "==> nxge_get_priv_prop: property %s", pr_name));
5207 
5208 	/* function number */
5209 	if (strcmp(pr_name, "_function_number") == 0) {
5210 		if (is_default)
5211 			return (ENOTSUP);
5212 		(void) snprintf(valstr, sizeof (valstr), "%d",
5213 		    nxgep->function_num);
5214 		NXGE_DEBUG_MSG((nxgep, NXGE_CTL,
5215 		    "==> nxge_get_priv_prop: name %s "
5216 		    "(value %d valstr %s)",
5217 		    pr_name, nxgep->function_num, valstr));
5218 
5219 		err = 0;
5220 		goto done;
5221 	}
5222 
5223 	/* Neptune firmware version */
5224 	if (strcmp(pr_name, "_fw_version") == 0) {
5225 		if (is_default)
5226 			return (ENOTSUP);
5227 		(void) snprintf(valstr, sizeof (valstr), "%s",
5228 		    nxgep->vpd_info.ver);
5229 		NXGE_DEBUG_MSG((nxgep, NXGE_CTL,
5230 		    "==> nxge_get_priv_prop: name %s "
5231 		    "(value %d valstr %s)",
5232 		    pr_name, nxgep->vpd_info.ver, valstr));
5233 
5234 		err = 0;
5235 		goto done;
5236 	}
5237 
5238 	/* port PHY mode */
5239 	if (strcmp(pr_name, "_port_mode") == 0) {
5240 		if (is_default)
5241 			return (ENOTSUP);
5242 		switch (nxgep->mac.portmode) {
5243 		case PORT_1G_COPPER:
5244 			(void) snprintf(valstr, sizeof (valstr), "1G copper %s",
5245 			    nxgep->hot_swappable_phy ?
5246 			    "[Hot Swappable]" : "");
5247 			break;
5248 		case PORT_1G_FIBER:
5249 			(void) snprintf(valstr, sizeof (valstr), "1G fiber %s",
5250 			    nxgep->hot_swappable_phy ?
5251 			    "[hot swappable]" : "");
5252 			break;
5253 		case PORT_10G_COPPER:
5254 			(void) snprintf(valstr, sizeof (valstr),
5255 			    "10G copper %s",
5256 			    nxgep->hot_swappable_phy ?
5257 			    "[hot swappable]" : "");
5258 			break;
5259 		case PORT_10G_FIBER:
5260 			(void) snprintf(valstr, sizeof (valstr), "10G fiber %s",
5261 			    nxgep->hot_swappable_phy ?
5262 			    "[hot swappable]" : "");
5263 			break;
5264 		case PORT_10G_SERDES:
5265 			(void) snprintf(valstr, sizeof (valstr),
5266 			    "10G serdes %s", nxgep->hot_swappable_phy ?
5267 			    "[hot swappable]" : "");
5268 			break;
5269 		case PORT_1G_SERDES:
5270 			(void) snprintf(valstr, sizeof (valstr), "1G serdes %s",
5271 			    nxgep->hot_swappable_phy ?
5272 			    "[hot swappable]" : "");
5273 			break;
5274 		case PORT_1G_TN1010:
5275 			(void) snprintf(valstr, sizeof (valstr),
5276 			    "1G TN1010 copper %s", nxgep->hot_swappable_phy ?
5277 			    "[hot swappable]" : "");
5278 			break;
5279 		case PORT_10G_TN1010:
5280 			(void) snprintf(valstr, sizeof (valstr),
5281 			    "10G TN1010 copper %s", nxgep->hot_swappable_phy ?
5282 			    "[hot swappable]" : "");
5283 			break;
5284 		case PORT_1G_RGMII_FIBER:
5285 			(void) snprintf(valstr, sizeof (valstr),
5286 			    "1G rgmii fiber %s", nxgep->hot_swappable_phy ?
5287 			    "[hot swappable]" : "");
5288 			break;
5289 		case PORT_HSP_MODE:
5290 			(void) snprintf(valstr, sizeof (valstr),
5291 			    "phy not present[hot swappable]");
5292 			break;
5293 		default:
5294 			(void) snprintf(valstr, sizeof (valstr), "unknown %s",
5295 			    nxgep->hot_swappable_phy ?
5296 			    "[hot swappable]" : "");
5297 			break;
5298 		}
5299 
5300 		NXGE_DEBUG_MSG((nxgep, NXGE_CTL,
5301 		    "==> nxge_get_priv_prop: name %s (value %s)",
5302 		    pr_name, valstr));
5303 
5304 		err = 0;
5305 		goto done;
5306 	}
5307 
5308 	/* Hot swappable PHY */
5309 	if (strcmp(pr_name, "_hot_swap_phy") == 0) {
5310 		if (is_default)
5311 			return (ENOTSUP);
5312 		(void) snprintf(valstr, sizeof (valstr), "%s",
5313 		    nxgep->hot_swappable_phy ?
5314 		    "yes" : "no");
5315 
5316 		NXGE_DEBUG_MSG((nxgep, NXGE_CTL,
5317 		    "==> nxge_get_priv_prop: name %s "
5318 		    "(value %d valstr %s)",
5319 		    pr_name, nxgep->hot_swappable_phy, valstr));
5320 
5321 		err = 0;
5322 		goto done;
5323 	}
5324 
5325 
5326 	/* accept jumbo */
5327 	if (strcmp(pr_name, "_accept_jumbo") == 0) {
5328 		if (is_default)
5329 			(void) snprintf(valstr, sizeof (valstr),  "%d", 0);
5330 		else
5331 			(void) snprintf(valstr, sizeof (valstr),
5332 			    "%d", nxgep->mac.is_jumbo);
5333 		err = 0;
5334 		NXGE_DEBUG_MSG((nxgep, NXGE_CTL,
5335 		    "==> nxge_get_priv_prop: name %s (value %d (%d, %d))",
5336 		    pr_name,
5337 		    (uint32_t)param_arr[param_accept_jumbo].value,
5338 		    nxgep->mac.is_jumbo,
5339 		    nxge_jumbo_enable));
5340 
5341 		goto done;
5342 	}
5343 
5344 	/* Receive Interrupt Blanking Parameters */
5345 	if (strcmp(pr_name, "_rxdma_intr_time") == 0) {
5346 		err = 0;
5347 		if (is_default) {
5348 			(void) snprintf(valstr, sizeof (valstr),
5349 			    "%d", RXDMA_RCR_TO_DEFAULT);
5350 			goto done;
5351 		}
5352 
5353 		(void) snprintf(valstr, sizeof (valstr), "%d",
5354 		    nxgep->intr_timeout);
5355 		NXGE_DEBUG_MSG((nxgep, NXGE_CTL,
5356 		    "==> nxge_get_priv_prop: name %s (value %d)",
5357 		    pr_name,
5358 		    (uint32_t)nxgep->intr_timeout));
5359 		goto done;
5360 	}
5361 
5362 	if (strcmp(pr_name, "_rxdma_intr_pkts") == 0) {
5363 		err = 0;
5364 		if (is_default) {
5365 			(void) snprintf(valstr, sizeof (valstr),
5366 			    "%d", RXDMA_RCR_PTHRES_DEFAULT);
5367 			goto done;
5368 		}
5369 		(void) snprintf(valstr, sizeof (valstr), "%d",
5370 		    nxgep->intr_threshold);
5371 		NXGE_DEBUG_MSG((nxgep, NXGE_CTL,
5372 		    "==> nxge_get_priv_prop: name %s (value %d)",
5373 		    pr_name, (uint32_t)nxgep->intr_threshold));
5374 
5375 		goto done;
5376 	}
5377 
5378 	/* Classification and Load Distribution Configuration */
5379 	if (strcmp(pr_name, "_class_opt_ipv4_tcp") == 0) {
5380 		if (is_default) {
5381 			(void) snprintf(valstr, sizeof (valstr), "%x",
5382 			    NXGE_CLASS_FLOW_GEN_SERVER);
5383 			err = 0;
5384 			goto done;
5385 		}
5386 		err = nxge_dld_get_ip_opt(nxgep,
5387 		    (caddr_t)&param_arr[param_class_opt_ipv4_tcp]);
5388 
5389 		(void) snprintf(valstr, sizeof (valstr), "%x",
5390 		    (int)param_arr[param_class_opt_ipv4_tcp].value);
5391 
5392 		NXGE_DEBUG_MSG((nxgep, NXGE_CTL,
5393 		    "==> nxge_get_priv_prop: %s", valstr));
5394 		goto done;
5395 	}
5396 
5397 	if (strcmp(pr_name, "_class_opt_ipv4_udp") == 0) {
5398 		if (is_default) {
5399 			(void) snprintf(valstr, sizeof (valstr), "%x",
5400 			    NXGE_CLASS_FLOW_GEN_SERVER);
5401 			err = 0;
5402 			goto done;
5403 		}
5404 		err = nxge_dld_get_ip_opt(nxgep,
5405 		    (caddr_t)&param_arr[param_class_opt_ipv4_udp]);
5406 
5407 		(void) snprintf(valstr, sizeof (valstr), "%x",
5408 		    (int)param_arr[param_class_opt_ipv4_udp].value);
5409 
5410 		NXGE_DEBUG_MSG((nxgep, NXGE_CTL,
5411 		    "==> nxge_get_priv_prop: %s", valstr));
5412 		goto done;
5413 	}
5414 	if (strcmp(pr_name, "_class_opt_ipv4_ah") == 0) {
5415 		if (is_default) {
5416 			(void) snprintf(valstr, sizeof (valstr), "%x",
5417 			    NXGE_CLASS_FLOW_GEN_SERVER);
5418 			err = 0;
5419 			goto done;
5420 		}
5421 		err = nxge_dld_get_ip_opt(nxgep,
5422 		    (caddr_t)&param_arr[param_class_opt_ipv4_ah]);
5423 
5424 		(void) snprintf(valstr, sizeof (valstr), "%x",
5425 		    (int)param_arr[param_class_opt_ipv4_ah].value);
5426 
5427 		NXGE_DEBUG_MSG((nxgep, NXGE_CTL,
5428 		    "==> nxge_get_priv_prop: %s", valstr));
5429 		goto done;
5430 	}
5431 
5432 	if (strcmp(pr_name, "_class_opt_ipv4_sctp") == 0) {
5433 		if (is_default) {
5434 			(void) snprintf(valstr, sizeof (valstr), "%x",
5435 			    NXGE_CLASS_FLOW_GEN_SERVER);
5436 			err = 0;
5437 			goto done;
5438 		}
5439 		err = nxge_dld_get_ip_opt(nxgep,
5440 		    (caddr_t)&param_arr[param_class_opt_ipv4_sctp]);
5441 
5442 		(void) snprintf(valstr, sizeof (valstr), "%x",
5443 		    (int)param_arr[param_class_opt_ipv4_sctp].value);
5444 
5445 		NXGE_DEBUG_MSG((nxgep, NXGE_CTL,
5446 		    "==> nxge_get_priv_prop: %s", valstr));
5447 		goto done;
5448 	}
5449 
5450 	if (strcmp(pr_name, "_class_opt_ipv6_tcp") == 0) {
5451 		if (is_default) {
5452 			(void) snprintf(valstr, sizeof (valstr), "%x",
5453 			    NXGE_CLASS_FLOW_GEN_SERVER);
5454 			err = 0;
5455 			goto done;
5456 		}
5457 		err = nxge_dld_get_ip_opt(nxgep,
5458 		    (caddr_t)&param_arr[param_class_opt_ipv6_tcp]);
5459 
5460 		(void) snprintf(valstr, sizeof (valstr), "%x",
5461 		    (int)param_arr[param_class_opt_ipv6_tcp].value);
5462 
5463 		NXGE_DEBUG_MSG((nxgep, NXGE_CTL,
5464 		    "==> nxge_get_priv_prop: %s", valstr));
5465 		goto done;
5466 	}
5467 
5468 	if (strcmp(pr_name, "_class_opt_ipv6_udp") == 0) {
5469 		if (is_default) {
5470 			(void) snprintf(valstr, sizeof (valstr), "%x",
5471 			    NXGE_CLASS_FLOW_GEN_SERVER);
5472 			err = 0;
5473 			goto done;
5474 		}
5475 		err = nxge_dld_get_ip_opt(nxgep,
5476 		    (caddr_t)&param_arr[param_class_opt_ipv6_udp]);
5477 
5478 		(void) snprintf(valstr, sizeof (valstr), "%x",
5479 		    (int)param_arr[param_class_opt_ipv6_udp].value);
5480 
5481 		NXGE_DEBUG_MSG((nxgep, NXGE_CTL,
5482 		    "==> nxge_get_priv_prop: %s", valstr));
5483 		goto done;
5484 	}
5485 
5486 	if (strcmp(pr_name, "_class_opt_ipv6_ah") == 0) {
5487 		if (is_default) {
5488 			(void) snprintf(valstr, sizeof (valstr), "%x",
5489 			    NXGE_CLASS_FLOW_GEN_SERVER);
5490 			err = 0;
5491 			goto done;
5492 		}
5493 		err = nxge_dld_get_ip_opt(nxgep,
5494 		    (caddr_t)&param_arr[param_class_opt_ipv6_ah]);
5495 
5496 		(void) snprintf(valstr, sizeof (valstr), "%x",
5497 		    (int)param_arr[param_class_opt_ipv6_ah].value);
5498 
5499 		NXGE_DEBUG_MSG((nxgep, NXGE_CTL,
5500 		    "==> nxge_get_priv_prop: %s", valstr));
5501 		goto done;
5502 	}
5503 
5504 	if (strcmp(pr_name, "_class_opt_ipv6_sctp") == 0) {
5505 		if (is_default) {
5506 			(void) snprintf(valstr, sizeof (valstr), "%x",
5507 			    NXGE_CLASS_FLOW_GEN_SERVER);
5508 			err = 0;
5509 			goto done;
5510 		}
5511 		err = nxge_dld_get_ip_opt(nxgep,
5512 		    (caddr_t)&param_arr[param_class_opt_ipv6_sctp]);
5513 
5514 		(void) snprintf(valstr, sizeof (valstr), "%x",
5515 		    (int)param_arr[param_class_opt_ipv6_sctp].value);
5516 
5517 		NXGE_DEBUG_MSG((nxgep, NXGE_CTL,
5518 		    "==> nxge_get_priv_prop: %s", valstr));
5519 		goto done;
5520 	}
5521 
5522 	/* Software LSO */
5523 	if (strcmp(pr_name, "_soft_lso_enable") == 0) {
5524 		if (is_default) {
5525 			(void) snprintf(valstr, sizeof (valstr), "%d", 0);
5526 			err = 0;
5527 			goto done;
5528 		}
5529 		(void) snprintf(valstr, sizeof (valstr),
5530 		    "%d", nxgep->soft_lso_enable);
5531 		err = 0;
5532 		NXGE_DEBUG_MSG((nxgep, NXGE_CTL,
5533 		    "==> nxge_get_priv_prop: name %s (value %d)",
5534 		    pr_name, nxgep->soft_lso_enable));
5535 
5536 		goto done;
5537 	}
5538 	if (strcmp(pr_name, "_adv_10gfdx_cap") == 0) {
5539 		err = 0;
5540 		if (is_default ||
5541 		    nxgep->param_arr[param_anar_10gfdx].value != 0) {
5542 			(void) snprintf(valstr, sizeof (valstr), "%d", 1);
5543 			goto done;
5544 		} else {
5545 			(void) snprintf(valstr, sizeof (valstr), "%d", 0);
5546 			goto done;
5547 		}
5548 	}
5549 	if (strcmp(pr_name, "_adv_pause_cap") == 0) {
5550 		err = 0;
5551 		if (is_default ||
5552 		    nxgep->param_arr[param_anar_pause].value != 0) {
5553 			(void) snprintf(valstr, sizeof (valstr), "%d", 1);
5554 			goto done;
5555 		} else {
5556 			(void) snprintf(valstr, sizeof (valstr), "%d", 0);
5557 			goto done;
5558 		}
5559 	}
5560 
5561 done:
5562 	if (err == 0) {
5563 		strsize = (uint_t)strlen(valstr);
5564 		if (pr_valsize < strsize) {
5565 			err = ENOBUFS;
5566 		} else {
5567 			(void) strlcpy(pr_val, valstr, pr_valsize);
5568 		}
5569 	}
5570 
5571 	NXGE_DEBUG_MSG((nxgep, NXGE_CTL,
5572 	    "<== nxge_get_priv_prop: return %d", err));
5573 	return (err);
5574 }
5575 
5576 /*
5577  * Module loading and removing entry points.
5578  */
5579 
5580 DDI_DEFINE_STREAM_OPS(nxge_dev_ops, nulldev, nulldev, nxge_attach, nxge_detach,
5581     nodev, NULL, D_MP, NULL, nxge_quiesce);
5582 
5583 #define	NXGE_DESC_VER		"Sun NIU 10Gb Ethernet"
5584 
5585 /*
5586  * Module linkage information for the kernel.
5587  */
5588 static struct modldrv 	nxge_modldrv = {
5589 	&mod_driverops,
5590 	NXGE_DESC_VER,
5591 	&nxge_dev_ops
5592 };
5593 
5594 static struct modlinkage modlinkage = {
5595 	MODREV_1, (void *) &nxge_modldrv, NULL
5596 };
5597 
5598 int
5599 _init(void)
5600 {
5601 	int		status;
5602 
5603 	NXGE_DEBUG_MSG((NULL, MOD_CTL, "==> _init"));
5604 	mac_init_ops(&nxge_dev_ops, "nxge");
5605 	status = ddi_soft_state_init(&nxge_list, sizeof (nxge_t), 0);
5606 	if (status != 0) {
5607 		NXGE_ERROR_MSG((NULL, NXGE_ERR_CTL,
5608 		    "failed to init device soft state"));
5609 		goto _init_exit;
5610 	}
5611 	status = mod_install(&modlinkage);
5612 	if (status != 0) {
5613 		ddi_soft_state_fini(&nxge_list);
5614 		NXGE_ERROR_MSG((NULL, NXGE_ERR_CTL, "Mod install failed"));
5615 		goto _init_exit;
5616 	}
5617 
5618 	MUTEX_INIT(&nxge_common_lock, NULL, MUTEX_DRIVER, NULL);
5619 
5620 _init_exit:
5621 	NXGE_DEBUG_MSG((NULL, MOD_CTL, "_init status = 0x%X", status));
5622 
5623 	return (status);
5624 }
5625 
5626 int
5627 _fini(void)
5628 {
5629 	int		status;
5630 
5631 	NXGE_DEBUG_MSG((NULL, MOD_CTL, "==> _fini"));
5632 
5633 	NXGE_DEBUG_MSG((NULL, MOD_CTL, "==> _fini: mod_remove"));
5634 
5635 	if (nxge_mblks_pending)
5636 		return (EBUSY);
5637 
5638 	status = mod_remove(&modlinkage);
5639 	if (status != DDI_SUCCESS) {
5640 		NXGE_DEBUG_MSG((NULL, MOD_CTL,
5641 		    "Module removal failed 0x%08x",
5642 		    status));
5643 		goto _fini_exit;
5644 	}
5645 
5646 	mac_fini_ops(&nxge_dev_ops);
5647 
5648 	ddi_soft_state_fini(&nxge_list);
5649 
5650 	MUTEX_DESTROY(&nxge_common_lock);
5651 _fini_exit:
5652 	NXGE_DEBUG_MSG((NULL, MOD_CTL, "_fini status = 0x%08x", status));
5653 
5654 	return (status);
5655 }
5656 
5657 int
5658 _info(struct modinfo *modinfop)
5659 {
5660 	int		status;
5661 
5662 	NXGE_DEBUG_MSG((NULL, MOD_CTL, "==> _info"));
5663 	status = mod_info(&modlinkage, modinfop);
5664 	NXGE_DEBUG_MSG((NULL, MOD_CTL, " _info status = 0x%X", status));
5665 
5666 	return (status);
5667 }
5668 
5669 /*ARGSUSED*/
5670 static nxge_status_t
5671 nxge_add_intrs(p_nxge_t nxgep)
5672 {
5673 
5674 	int		intr_types;
5675 	int		type = 0;
5676 	int		ddi_status = DDI_SUCCESS;
5677 	nxge_status_t	status = NXGE_OK;
5678 
5679 	NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_add_intrs"));
5680 
5681 	nxgep->nxge_intr_type.intr_registered = B_FALSE;
5682 	nxgep->nxge_intr_type.intr_enabled = B_FALSE;
5683 	nxgep->nxge_intr_type.msi_intx_cnt = 0;
5684 	nxgep->nxge_intr_type.intr_added = 0;
5685 	nxgep->nxge_intr_type.niu_msi_enable = B_FALSE;
5686 	nxgep->nxge_intr_type.intr_type = 0;
5687 
5688 	if (nxgep->niu_type == N2_NIU) {
5689 		nxgep->nxge_intr_type.niu_msi_enable = B_TRUE;
5690 	} else if (nxge_msi_enable) {
5691 		nxgep->nxge_intr_type.niu_msi_enable = B_TRUE;
5692 	}
5693 
5694 	/* Get the supported interrupt types */
5695 	if ((ddi_status = ddi_intr_get_supported_types(nxgep->dip, &intr_types))
5696 	    != DDI_SUCCESS) {
5697 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "<== nxge_add_intrs: "
5698 		    "ddi_intr_get_supported_types failed: status 0x%08x",
5699 		    ddi_status));
5700 		return (NXGE_ERROR | NXGE_DDI_FAILED);
5701 	}
5702 	nxgep->nxge_intr_type.intr_types = intr_types;
5703 
5704 	NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_add_intrs: "
5705 	    "ddi_intr_get_supported_types: 0x%08x", intr_types));
5706 
5707 	/*
5708 	 * Solaris MSIX is not supported yet. use MSI for now.
5709 	 * nxge_msi_enable (1):
5710 	 *	1 - MSI		2 - MSI-X	others - FIXED
5711 	 */
5712 	switch (nxge_msi_enable) {
5713 	default:
5714 		type = DDI_INTR_TYPE_FIXED;
5715 		NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_add_intrs: "
5716 		    "use fixed (intx emulation) type %08x",
5717 		    type));
5718 		break;
5719 
5720 	case 2:
5721 		NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_add_intrs: "
5722 		    "ddi_intr_get_supported_types: 0x%08x", intr_types));
5723 		if (intr_types & DDI_INTR_TYPE_MSIX) {
5724 			type = DDI_INTR_TYPE_MSIX;
5725 			NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_add_intrs: "
5726 			    "ddi_intr_get_supported_types: MSIX 0x%08x",
5727 			    type));
5728 		} else if (intr_types & DDI_INTR_TYPE_MSI) {
5729 			type = DDI_INTR_TYPE_MSI;
5730 			NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_add_intrs: "
5731 			    "ddi_intr_get_supported_types: MSI 0x%08x",
5732 			    type));
5733 		} else if (intr_types & DDI_INTR_TYPE_FIXED) {
5734 			type = DDI_INTR_TYPE_FIXED;
5735 			NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_add_intrs: "
5736 			    "ddi_intr_get_supported_types: MSXED0x%08x",
5737 			    type));
5738 		}
5739 		break;
5740 
5741 	case 1:
5742 		if (intr_types & DDI_INTR_TYPE_MSI) {
5743 			type = DDI_INTR_TYPE_MSI;
5744 			NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_add_intrs: "
5745 			    "ddi_intr_get_supported_types: MSI 0x%08x",
5746 			    type));
5747 		} else if (intr_types & DDI_INTR_TYPE_MSIX) {
5748 			type = DDI_INTR_TYPE_MSIX;
5749 			NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_add_intrs: "
5750 			    "ddi_intr_get_supported_types: MSIX 0x%08x",
5751 			    type));
5752 		} else if (intr_types & DDI_INTR_TYPE_FIXED) {
5753 			type = DDI_INTR_TYPE_FIXED;
5754 			NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_add_intrs: "
5755 			    "ddi_intr_get_supported_types: MSXED0x%08x",
5756 			    type));
5757 		}
5758 	}
5759 
5760 	nxgep->nxge_intr_type.intr_type = type;
5761 	if ((type == DDI_INTR_TYPE_MSIX || type == DDI_INTR_TYPE_MSI ||
5762 	    type == DDI_INTR_TYPE_FIXED) &&
5763 	    nxgep->nxge_intr_type.niu_msi_enable) {
5764 		if ((status = nxge_add_intrs_adv(nxgep)) != DDI_SUCCESS) {
5765 			NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
5766 			    " nxge_add_intrs: "
5767 			    " nxge_add_intrs_adv failed: status 0x%08x",
5768 			    status));
5769 			return (status);
5770 		} else {
5771 			NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_add_intrs: "
5772 			    "interrupts registered : type %d", type));
5773 			nxgep->nxge_intr_type.intr_registered = B_TRUE;
5774 
5775 			NXGE_DEBUG_MSG((nxgep, DDI_CTL,
5776 			    "\nAdded advanced nxge add_intr_adv "
5777 			    "intr type 0x%x\n", type));
5778 
5779 			return (status);
5780 		}
5781 	}
5782 
5783 	if (!nxgep->nxge_intr_type.intr_registered) {
5784 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "==> nxge_add_intrs: "
5785 		    "failed to register interrupts"));
5786 		return (NXGE_ERROR | NXGE_DDI_FAILED);
5787 	}
5788 
5789 	NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_add_intrs"));
5790 	return (status);
5791 }
5792 
5793 /*ARGSUSED*/
5794 static nxge_status_t
5795 nxge_add_soft_intrs(p_nxge_t nxgep)
5796 {
5797 
5798 	int		ddi_status = DDI_SUCCESS;
5799 	nxge_status_t	status = NXGE_OK;
5800 
5801 	NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_add_soft_intrs"));
5802 
5803 	nxgep->resched_id = NULL;
5804 	nxgep->resched_running = B_FALSE;
5805 	ddi_status = ddi_add_softintr(nxgep->dip, DDI_SOFTINT_LOW,
5806 	    &nxgep->resched_id,
5807 	    NULL, NULL, nxge_reschedule, (caddr_t)nxgep);
5808 	if (ddi_status != DDI_SUCCESS) {
5809 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "<== nxge_add_soft_intrs: "
5810 		    "ddi_add_softintrs failed: status 0x%08x",
5811 		    ddi_status));
5812 		return (NXGE_ERROR | NXGE_DDI_FAILED);
5813 	}
5814 
5815 	NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_ddi_add_soft_intrs"));
5816 
5817 	return (status);
5818 }
5819 
5820 static nxge_status_t
5821 nxge_add_intrs_adv(p_nxge_t nxgep)
5822 {
5823 	int		intr_type;
5824 	p_nxge_intr_t	intrp;
5825 
5826 	NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_add_intrs_adv"));
5827 
5828 	intrp = (p_nxge_intr_t)&nxgep->nxge_intr_type;
5829 	intr_type = intrp->intr_type;
5830 	NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_add_intrs_adv: type 0x%x",
5831 	    intr_type));
5832 
5833 	switch (intr_type) {
5834 	case DDI_INTR_TYPE_MSI: /* 0x2 */
5835 	case DDI_INTR_TYPE_MSIX: /* 0x4 */
5836 		return (nxge_add_intrs_adv_type(nxgep, intr_type));
5837 
5838 	case DDI_INTR_TYPE_FIXED: /* 0x1 */
5839 		return (nxge_add_intrs_adv_type_fix(nxgep, intr_type));
5840 
5841 	default:
5842 		return (NXGE_ERROR);
5843 	}
5844 }
5845 
5846 
5847 /*ARGSUSED*/
5848 static nxge_status_t
5849 nxge_add_intrs_adv_type(p_nxge_t nxgep, uint32_t int_type)
5850 {
5851 	dev_info_t		*dip = nxgep->dip;
5852 	p_nxge_ldg_t		ldgp;
5853 	p_nxge_intr_t		intrp;
5854 	uint_t			*inthandler;
5855 	void			*arg1, *arg2;
5856 	int			behavior;
5857 	int			nintrs, navail, nrequest;
5858 	int			nactual, nrequired;
5859 	int			inum = 0;
5860 	int			x, y;
5861 	int			ddi_status = DDI_SUCCESS;
5862 	nxge_status_t		status = NXGE_OK;
5863 
5864 	NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_add_intrs_adv_type"));
5865 	intrp = (p_nxge_intr_t)&nxgep->nxge_intr_type;
5866 	intrp->start_inum = 0;
5867 
5868 	ddi_status = ddi_intr_get_nintrs(dip, int_type, &nintrs);
5869 	if ((ddi_status != DDI_SUCCESS) || (nintrs == 0)) {
5870 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
5871 		    "ddi_intr_get_nintrs() failed, status: 0x%x%, "
5872 		    "nintrs: %d", ddi_status, nintrs));
5873 		return (NXGE_ERROR | NXGE_DDI_FAILED);
5874 	}
5875 
5876 	ddi_status = ddi_intr_get_navail(dip, int_type, &navail);
5877 	if ((ddi_status != DDI_SUCCESS) || (navail == 0)) {
5878 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
5879 		    "ddi_intr_get_navail() failed, status: 0x%x%, "
5880 		    "nintrs: %d", ddi_status, navail));
5881 		return (NXGE_ERROR | NXGE_DDI_FAILED);
5882 	}
5883 
5884 	NXGE_DEBUG_MSG((nxgep, INT_CTL,
5885 	    "ddi_intr_get_navail() returned: nintrs %d, navail %d",
5886 	    nintrs, navail));
5887 
5888 	/* PSARC/2007/453 MSI-X interrupt limit override */
5889 	if (int_type == DDI_INTR_TYPE_MSIX) {
5890 		nrequest = nxge_create_msi_property(nxgep);
5891 		if (nrequest < navail) {
5892 			navail = nrequest;
5893 			NXGE_DEBUG_MSG((nxgep, INT_CTL,
5894 			    "nxge_add_intrs_adv_type: nintrs %d "
5895 			    "navail %d (nrequest %d)",
5896 			    nintrs, navail, nrequest));
5897 		}
5898 	}
5899 
5900 	if (int_type == DDI_INTR_TYPE_MSI && !ISP2(navail)) {
5901 		/* MSI must be power of 2 */
5902 		if ((navail & 16) == 16) {
5903 			navail = 16;
5904 		} else if ((navail & 8) == 8) {
5905 			navail = 8;
5906 		} else if ((navail & 4) == 4) {
5907 			navail = 4;
5908 		} else if ((navail & 2) == 2) {
5909 			navail = 2;
5910 		} else {
5911 			navail = 1;
5912 		}
5913 		NXGE_DEBUG_MSG((nxgep, INT_CTL,
5914 		    "ddi_intr_get_navail(): (msi power of 2) nintrs %d, "
5915 		    "navail %d", nintrs, navail));
5916 	}
5917 
5918 	behavior = ((int_type == DDI_INTR_TYPE_FIXED) ? DDI_INTR_ALLOC_STRICT :
5919 	    DDI_INTR_ALLOC_NORMAL);
5920 	intrp->intr_size = navail * sizeof (ddi_intr_handle_t);
5921 	intrp->htable = kmem_alloc(intrp->intr_size, KM_SLEEP);
5922 	ddi_status = ddi_intr_alloc(dip, intrp->htable, int_type, inum,
5923 	    navail, &nactual, behavior);
5924 	if (ddi_status != DDI_SUCCESS || nactual == 0) {
5925 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
5926 		    " ddi_intr_alloc() failed: %d",
5927 		    ddi_status));
5928 		kmem_free(intrp->htable, intrp->intr_size);
5929 		return (NXGE_ERROR | NXGE_DDI_FAILED);
5930 	}
5931 
5932 	if ((ddi_status = ddi_intr_get_pri(intrp->htable[0],
5933 	    (uint_t *)&intrp->pri)) != DDI_SUCCESS) {
5934 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
5935 		    " ddi_intr_get_pri() failed: %d",
5936 		    ddi_status));
5937 		/* Free already allocated interrupts */
5938 		for (y = 0; y < nactual; y++) {
5939 			(void) ddi_intr_free(intrp->htable[y]);
5940 		}
5941 
5942 		kmem_free(intrp->htable, intrp->intr_size);
5943 		return (NXGE_ERROR | NXGE_DDI_FAILED);
5944 	}
5945 
5946 	nrequired = 0;
5947 	switch (nxgep->niu_type) {
5948 	default:
5949 		status = nxge_ldgv_init(nxgep, &nactual, &nrequired);
5950 		break;
5951 
5952 	case N2_NIU:
5953 		status = nxge_ldgv_init_n2(nxgep, &nactual, &nrequired);
5954 		break;
5955 	}
5956 
5957 	if (status != NXGE_OK) {
5958 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
5959 		    "nxge_add_intrs_adv_typ:nxge_ldgv_init "
5960 		    "failed: 0x%x", status));
5961 		/* Free already allocated interrupts */
5962 		for (y = 0; y < nactual; y++) {
5963 			(void) ddi_intr_free(intrp->htable[y]);
5964 		}
5965 
5966 		kmem_free(intrp->htable, intrp->intr_size);
5967 		return (status);
5968 	}
5969 
5970 	ldgp = nxgep->ldgvp->ldgp;
5971 	for (x = 0; x < nrequired; x++, ldgp++) {
5972 		ldgp->vector = (uint8_t)x;
5973 		ldgp->intdata = SID_DATA(ldgp->func, x);
5974 		arg1 = ldgp->ldvp;
5975 		arg2 = nxgep;
5976 		if (ldgp->nldvs == 1) {
5977 			inthandler = (uint_t *)ldgp->ldvp->ldv_intr_handler;
5978 			NXGE_DEBUG_MSG((nxgep, INT_CTL,
5979 			    "nxge_add_intrs_adv_type: "
5980 			    "arg1 0x%x arg2 0x%x: "
5981 			    "1-1 int handler (entry %d intdata 0x%x)\n",
5982 			    arg1, arg2,
5983 			    x, ldgp->intdata));
5984 		} else if (ldgp->nldvs > 1) {
5985 			inthandler = (uint_t *)ldgp->sys_intr_handler;
5986 			NXGE_DEBUG_MSG((nxgep, INT_CTL,
5987 			    "nxge_add_intrs_adv_type: "
5988 			    "arg1 0x%x arg2 0x%x: "
5989 			    "nldevs %d int handler "
5990 			    "(entry %d intdata 0x%x)\n",
5991 			    arg1, arg2,
5992 			    ldgp->nldvs, x, ldgp->intdata));
5993 		}
5994 
5995 		NXGE_DEBUG_MSG((nxgep, INT_CTL,
5996 		    "==> nxge_add_intrs_adv_type: ddi_add_intr(inum) #%d "
5997 		    "htable 0x%llx", x, intrp->htable[x]));
5998 
5999 		if ((ddi_status = ddi_intr_add_handler(intrp->htable[x],
6000 		    (ddi_intr_handler_t *)inthandler, arg1, arg2))
6001 		    != DDI_SUCCESS) {
6002 			NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
6003 			    "==> nxge_add_intrs_adv_type: failed #%d "
6004 			    "status 0x%x", x, ddi_status));
6005 			for (y = 0; y < intrp->intr_added; y++) {
6006 				(void) ddi_intr_remove_handler(
6007 				    intrp->htable[y]);
6008 			}
6009 			/* Free already allocated intr */
6010 			for (y = 0; y < nactual; y++) {
6011 				(void) ddi_intr_free(intrp->htable[y]);
6012 			}
6013 			kmem_free(intrp->htable, intrp->intr_size);
6014 
6015 			(void) nxge_ldgv_uninit(nxgep);
6016 
6017 			return (NXGE_ERROR | NXGE_DDI_FAILED);
6018 		}
6019 		intrp->intr_added++;
6020 	}
6021 
6022 	intrp->msi_intx_cnt = nactual;
6023 
6024 	NXGE_DEBUG_MSG((nxgep, DDI_CTL,
6025 	    "Requested: %d, Allowed: %d msi_intx_cnt %d intr_added %d",
6026 	    navail, nactual,
6027 	    intrp->msi_intx_cnt,
6028 	    intrp->intr_added));
6029 
6030 	(void) ddi_intr_get_cap(intrp->htable[0], &intrp->intr_cap);
6031 
6032 	(void) nxge_intr_ldgv_init(nxgep);
6033 
6034 	NXGE_DEBUG_MSG((nxgep, INT_CTL, "<== nxge_add_intrs_adv_type"));
6035 
6036 	return (status);
6037 }
6038 
6039 /*ARGSUSED*/
6040 static nxge_status_t
6041 nxge_add_intrs_adv_type_fix(p_nxge_t nxgep, uint32_t int_type)
6042 {
6043 	dev_info_t		*dip = nxgep->dip;
6044 	p_nxge_ldg_t		ldgp;
6045 	p_nxge_intr_t		intrp;
6046 	uint_t			*inthandler;
6047 	void			*arg1, *arg2;
6048 	int			behavior;
6049 	int			nintrs, navail;
6050 	int			nactual, nrequired;
6051 	int			inum = 0;
6052 	int			x, y;
6053 	int			ddi_status = DDI_SUCCESS;
6054 	nxge_status_t		status = NXGE_OK;
6055 
6056 	NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_add_intrs_adv_type_fix"));
6057 	intrp = (p_nxge_intr_t)&nxgep->nxge_intr_type;
6058 	intrp->start_inum = 0;
6059 
6060 	ddi_status = ddi_intr_get_nintrs(dip, int_type, &nintrs);
6061 	if ((ddi_status != DDI_SUCCESS) || (nintrs == 0)) {
6062 		NXGE_DEBUG_MSG((nxgep, INT_CTL,
6063 		    "ddi_intr_get_nintrs() failed, status: 0x%x%, "
6064 		    "nintrs: %d", status, nintrs));
6065 		return (NXGE_ERROR | NXGE_DDI_FAILED);
6066 	}
6067 
6068 	ddi_status = ddi_intr_get_navail(dip, int_type, &navail);
6069 	if ((ddi_status != DDI_SUCCESS) || (navail == 0)) {
6070 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
6071 		    "ddi_intr_get_navail() failed, status: 0x%x%, "
6072 		    "nintrs: %d", ddi_status, navail));
6073 		return (NXGE_ERROR | NXGE_DDI_FAILED);
6074 	}
6075 
6076 	NXGE_DEBUG_MSG((nxgep, INT_CTL,
6077 	    "ddi_intr_get_navail() returned: nintrs %d, naavail %d",
6078 	    nintrs, navail));
6079 
6080 	behavior = ((int_type == DDI_INTR_TYPE_FIXED) ? DDI_INTR_ALLOC_STRICT :
6081 	    DDI_INTR_ALLOC_NORMAL);
6082 	intrp->intr_size = navail * sizeof (ddi_intr_handle_t);
6083 	intrp->htable = kmem_alloc(intrp->intr_size, KM_SLEEP);
6084 	ddi_status = ddi_intr_alloc(dip, intrp->htable, int_type, inum,
6085 	    navail, &nactual, behavior);
6086 	if (ddi_status != DDI_SUCCESS || nactual == 0) {
6087 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
6088 		    " ddi_intr_alloc() failed: %d",
6089 		    ddi_status));
6090 		kmem_free(intrp->htable, intrp->intr_size);
6091 		return (NXGE_ERROR | NXGE_DDI_FAILED);
6092 	}
6093 
6094 	if ((ddi_status = ddi_intr_get_pri(intrp->htable[0],
6095 	    (uint_t *)&intrp->pri)) != DDI_SUCCESS) {
6096 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
6097 		    " ddi_intr_get_pri() failed: %d",
6098 		    ddi_status));
6099 		/* Free already allocated interrupts */
6100 		for (y = 0; y < nactual; y++) {
6101 			(void) ddi_intr_free(intrp->htable[y]);
6102 		}
6103 
6104 		kmem_free(intrp->htable, intrp->intr_size);
6105 		return (NXGE_ERROR | NXGE_DDI_FAILED);
6106 	}
6107 
6108 	nrequired = 0;
6109 	switch (nxgep->niu_type) {
6110 	default:
6111 		status = nxge_ldgv_init(nxgep, &nactual, &nrequired);
6112 		break;
6113 
6114 	case N2_NIU:
6115 		status = nxge_ldgv_init_n2(nxgep, &nactual, &nrequired);
6116 		break;
6117 	}
6118 
6119 	if (status != NXGE_OK) {
6120 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
6121 		    "nxge_add_intrs_adv_type_fix:nxge_ldgv_init "
6122 		    "failed: 0x%x", status));
6123 		/* Free already allocated interrupts */
6124 		for (y = 0; y < nactual; y++) {
6125 			(void) ddi_intr_free(intrp->htable[y]);
6126 		}
6127 
6128 		kmem_free(intrp->htable, intrp->intr_size);
6129 		return (status);
6130 	}
6131 
6132 	ldgp = nxgep->ldgvp->ldgp;
6133 	for (x = 0; x < nrequired; x++, ldgp++) {
6134 		ldgp->vector = (uint8_t)x;
6135 		if (nxgep->niu_type != N2_NIU) {
6136 			ldgp->intdata = SID_DATA(ldgp->func, x);
6137 		}
6138 
6139 		arg1 = ldgp->ldvp;
6140 		arg2 = nxgep;
6141 		if (ldgp->nldvs == 1) {
6142 			inthandler = (uint_t *)ldgp->ldvp->ldv_intr_handler;
6143 			NXGE_DEBUG_MSG((nxgep, INT_CTL,
6144 			    "nxge_add_intrs_adv_type_fix: "
6145 			    "1-1 int handler(%d) ldg %d ldv %d "
6146 			    "arg1 $%p arg2 $%p\n",
6147 			    x, ldgp->ldg, ldgp->ldvp->ldv,
6148 			    arg1, arg2));
6149 		} else if (ldgp->nldvs > 1) {
6150 			inthandler = (uint_t *)ldgp->sys_intr_handler;
6151 			NXGE_DEBUG_MSG((nxgep, INT_CTL,
6152 			    "nxge_add_intrs_adv_type_fix: "
6153 			    "shared ldv %d int handler(%d) ldv %d ldg %d"
6154 			    "arg1 0x%016llx arg2 0x%016llx\n",
6155 			    x, ldgp->nldvs, ldgp->ldg, ldgp->ldvp->ldv,
6156 			    arg1, arg2));
6157 		}
6158 
6159 		if ((ddi_status = ddi_intr_add_handler(intrp->htable[x],
6160 		    (ddi_intr_handler_t *)inthandler, arg1, arg2))
6161 		    != DDI_SUCCESS) {
6162 			NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
6163 			    "==> nxge_add_intrs_adv_type_fix: failed #%d "
6164 			    "status 0x%x", x, ddi_status));
6165 			for (y = 0; y < intrp->intr_added; y++) {
6166 				(void) ddi_intr_remove_handler(
6167 				    intrp->htable[y]);
6168 			}
6169 			for (y = 0; y < nactual; y++) {
6170 				(void) ddi_intr_free(intrp->htable[y]);
6171 			}
6172 			/* Free already allocated intr */
6173 			kmem_free(intrp->htable, intrp->intr_size);
6174 
6175 			(void) nxge_ldgv_uninit(nxgep);
6176 
6177 			return (NXGE_ERROR | NXGE_DDI_FAILED);
6178 		}
6179 		intrp->intr_added++;
6180 	}
6181 
6182 	intrp->msi_intx_cnt = nactual;
6183 
6184 	(void) ddi_intr_get_cap(intrp->htable[0], &intrp->intr_cap);
6185 
6186 	status = nxge_intr_ldgv_init(nxgep);
6187 	NXGE_DEBUG_MSG((nxgep, INT_CTL, "<== nxge_add_intrs_adv_type_fix"));
6188 
6189 	return (status);
6190 }
6191 
6192 static void
6193 nxge_remove_intrs(p_nxge_t nxgep)
6194 {
6195 	int		i, inum;
6196 	p_nxge_intr_t	intrp;
6197 
6198 	NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_remove_intrs"));
6199 	intrp = (p_nxge_intr_t)&nxgep->nxge_intr_type;
6200 	if (!intrp->intr_registered) {
6201 		NXGE_DEBUG_MSG((nxgep, INT_CTL,
6202 		    "<== nxge_remove_intrs: interrupts not registered"));
6203 		return;
6204 	}
6205 
6206 	NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_remove_intrs:advanced"));
6207 
6208 	if (intrp->intr_cap & DDI_INTR_FLAG_BLOCK) {
6209 		(void) ddi_intr_block_disable(intrp->htable,
6210 		    intrp->intr_added);
6211 	} else {
6212 		for (i = 0; i < intrp->intr_added; i++) {
6213 			(void) ddi_intr_disable(intrp->htable[i]);
6214 		}
6215 	}
6216 
6217 	for (inum = 0; inum < intrp->intr_added; inum++) {
6218 		if (intrp->htable[inum]) {
6219 			(void) ddi_intr_remove_handler(intrp->htable[inum]);
6220 		}
6221 	}
6222 
6223 	for (inum = 0; inum < intrp->msi_intx_cnt; inum++) {
6224 		if (intrp->htable[inum]) {
6225 			NXGE_DEBUG_MSG((nxgep, DDI_CTL,
6226 			    "nxge_remove_intrs: ddi_intr_free inum %d "
6227 			    "msi_intx_cnt %d intr_added %d",
6228 			    inum,
6229 			    intrp->msi_intx_cnt,
6230 			    intrp->intr_added));
6231 
6232 			(void) ddi_intr_free(intrp->htable[inum]);
6233 		}
6234 	}
6235 
6236 	kmem_free(intrp->htable, intrp->intr_size);
6237 	intrp->intr_registered = B_FALSE;
6238 	intrp->intr_enabled = B_FALSE;
6239 	intrp->msi_intx_cnt = 0;
6240 	intrp->intr_added = 0;
6241 
6242 	(void) nxge_ldgv_uninit(nxgep);
6243 
6244 	(void) ddi_prop_remove(DDI_DEV_T_NONE, nxgep->dip,
6245 	    "#msix-request");
6246 
6247 	NXGE_DEBUG_MSG((nxgep, INT_CTL, "<== nxge_remove_intrs"));
6248 }
6249 
6250 /*ARGSUSED*/
6251 static void
6252 nxge_remove_soft_intrs(p_nxge_t nxgep)
6253 {
6254 	NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_remove_soft_intrs"));
6255 	if (nxgep->resched_id) {
6256 		ddi_remove_softintr(nxgep->resched_id);
6257 		NXGE_DEBUG_MSG((nxgep, INT_CTL,
6258 		    "==> nxge_remove_soft_intrs: removed"));
6259 		nxgep->resched_id = NULL;
6260 	}
6261 
6262 	NXGE_DEBUG_MSG((nxgep, INT_CTL, "<== nxge_remove_soft_intrs"));
6263 }
6264 
6265 /*ARGSUSED*/
6266 static void
6267 nxge_intrs_enable(p_nxge_t nxgep)
6268 {
6269 	p_nxge_intr_t	intrp;
6270 	int		i;
6271 	int		status;
6272 
6273 	NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_intrs_enable"));
6274 
6275 	intrp = (p_nxge_intr_t)&nxgep->nxge_intr_type;
6276 
6277 	if (!intrp->intr_registered) {
6278 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "<== nxge_intrs_enable: "
6279 		    "interrupts are not registered"));
6280 		return;
6281 	}
6282 
6283 	if (intrp->intr_enabled) {
6284 		NXGE_DEBUG_MSG((nxgep, INT_CTL,
6285 		    "<== nxge_intrs_enable: already enabled"));
6286 		return;
6287 	}
6288 
6289 	if (intrp->intr_cap & DDI_INTR_FLAG_BLOCK) {
6290 		status = ddi_intr_block_enable(intrp->htable,
6291 		    intrp->intr_added);
6292 		NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_intrs_enable "
6293 		    "block enable - status 0x%x total inums #%d\n",
6294 		    status, intrp->intr_added));
6295 	} else {
6296 		for (i = 0; i < intrp->intr_added; i++) {
6297 			status = ddi_intr_enable(intrp->htable[i]);
6298 			NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_intrs_enable "
6299 			    "ddi_intr_enable:enable - status 0x%x "
6300 			    "total inums %d enable inum #%d\n",
6301 			    status, intrp->intr_added, i));
6302 			if (status == DDI_SUCCESS) {
6303 				intrp->intr_enabled = B_TRUE;
6304 			}
6305 		}
6306 	}
6307 
6308 	NXGE_DEBUG_MSG((nxgep, INT_CTL, "<== nxge_intrs_enable"));
6309 }
6310 
6311 /*ARGSUSED*/
6312 static void
6313 nxge_intrs_disable(p_nxge_t nxgep)
6314 {
6315 	p_nxge_intr_t	intrp;
6316 	int		i;
6317 
6318 	NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_intrs_disable"));
6319 
6320 	intrp = (p_nxge_intr_t)&nxgep->nxge_intr_type;
6321 
6322 	if (!intrp->intr_registered) {
6323 		NXGE_DEBUG_MSG((nxgep, INT_CTL, "<== nxge_intrs_disable: "
6324 		    "interrupts are not registered"));
6325 		return;
6326 	}
6327 
6328 	if (intrp->intr_cap & DDI_INTR_FLAG_BLOCK) {
6329 		(void) ddi_intr_block_disable(intrp->htable,
6330 		    intrp->intr_added);
6331 	} else {
6332 		for (i = 0; i < intrp->intr_added; i++) {
6333 			(void) ddi_intr_disable(intrp->htable[i]);
6334 		}
6335 	}
6336 
6337 	intrp->intr_enabled = B_FALSE;
6338 	NXGE_DEBUG_MSG((nxgep, INT_CTL, "<== nxge_intrs_disable"));
6339 }
6340 
6341 static nxge_status_t
6342 nxge_mac_register(p_nxge_t nxgep)
6343 {
6344 	mac_register_t *macp;
6345 	int		status;
6346 
6347 	NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_mac_register"));
6348 
6349 	if ((macp = mac_alloc(MAC_VERSION)) == NULL)
6350 		return (NXGE_ERROR);
6351 
6352 	macp->m_type_ident = MAC_PLUGIN_IDENT_ETHER;
6353 	macp->m_driver = nxgep;
6354 	macp->m_dip = nxgep->dip;
6355 	macp->m_src_addr = nxgep->ouraddr.ether_addr_octet;
6356 	macp->m_callbacks = &nxge_m_callbacks;
6357 	macp->m_min_sdu = 0;
6358 	nxgep->mac.default_mtu = nxgep->mac.maxframesize -
6359 	    NXGE_EHEADER_VLAN_CRC;
6360 	macp->m_max_sdu = nxgep->mac.default_mtu;
6361 	macp->m_margin = VLAN_TAGSZ;
6362 	macp->m_priv_props = nxge_priv_props;
6363 	macp->m_priv_prop_count = NXGE_MAX_PRIV_PROPS;
6364 
6365 	NXGE_DEBUG_MSG((nxgep, MAC_CTL,
6366 	    "==> nxge_mac_register: instance %d "
6367 	    "max_sdu %d margin %d maxframe %d (header %d)",
6368 	    nxgep->instance,
6369 	    macp->m_max_sdu, macp->m_margin,
6370 	    nxgep->mac.maxframesize,
6371 	    NXGE_EHEADER_VLAN_CRC));
6372 
6373 	status = mac_register(macp, &nxgep->mach);
6374 	mac_free(macp);
6375 
6376 	if (status != 0) {
6377 		cmn_err(CE_WARN,
6378 		    "!nxge_mac_register failed (status %d instance %d)",
6379 		    status, nxgep->instance);
6380 		return (NXGE_ERROR);
6381 	}
6382 
6383 	NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_mac_register success "
6384 	    "(instance %d)", nxgep->instance));
6385 
6386 	return (NXGE_OK);
6387 }
6388 
6389 void
6390 nxge_err_inject(p_nxge_t nxgep, queue_t *wq, mblk_t *mp)
6391 {
6392 	ssize_t		size;
6393 	mblk_t		*nmp;
6394 	uint8_t		blk_id;
6395 	uint8_t		chan;
6396 	uint32_t	err_id;
6397 	err_inject_t	*eip;
6398 
6399 	NXGE_DEBUG_MSG((nxgep, STR_CTL, "==> nxge_err_inject"));
6400 
6401 	size = 1024;
6402 	nmp = mp->b_cont;
6403 	eip = (err_inject_t *)nmp->b_rptr;
6404 	blk_id = eip->blk_id;
6405 	err_id = eip->err_id;
6406 	chan = eip->chan;
6407 	cmn_err(CE_NOTE, "!blk_id = 0x%x\n", blk_id);
6408 	cmn_err(CE_NOTE, "!err_id = 0x%x\n", err_id);
6409 	cmn_err(CE_NOTE, "!chan = 0x%x\n", chan);
6410 	switch (blk_id) {
6411 	case MAC_BLK_ID:
6412 		break;
6413 	case TXMAC_BLK_ID:
6414 		break;
6415 	case RXMAC_BLK_ID:
6416 		break;
6417 	case MIF_BLK_ID:
6418 		break;
6419 	case IPP_BLK_ID:
6420 		nxge_ipp_inject_err(nxgep, err_id);
6421 		break;
6422 	case TXC_BLK_ID:
6423 		nxge_txc_inject_err(nxgep, err_id);
6424 		break;
6425 	case TXDMA_BLK_ID:
6426 		nxge_txdma_inject_err(nxgep, err_id, chan);
6427 		break;
6428 	case RXDMA_BLK_ID:
6429 		nxge_rxdma_inject_err(nxgep, err_id, chan);
6430 		break;
6431 	case ZCP_BLK_ID:
6432 		nxge_zcp_inject_err(nxgep, err_id);
6433 		break;
6434 	case ESPC_BLK_ID:
6435 		break;
6436 	case FFLP_BLK_ID:
6437 		break;
6438 	case PHY_BLK_ID:
6439 		break;
6440 	case ETHER_SERDES_BLK_ID:
6441 		break;
6442 	case PCIE_SERDES_BLK_ID:
6443 		break;
6444 	case VIR_BLK_ID:
6445 		break;
6446 	}
6447 
6448 	nmp->b_wptr = nmp->b_rptr + size;
6449 	NXGE_DEBUG_MSG((nxgep, STR_CTL, "<== nxge_err_inject"));
6450 
6451 	miocack(wq, mp, (int)size, 0);
6452 }
6453 
6454 static int
6455 nxge_init_common_dev(p_nxge_t nxgep)
6456 {
6457 	p_nxge_hw_list_t	hw_p;
6458 	dev_info_t 		*p_dip;
6459 
6460 	NXGE_DEBUG_MSG((nxgep, MOD_CTL, "==> nxge_init_common_device"));
6461 
6462 	p_dip = nxgep->p_dip;
6463 	MUTEX_ENTER(&nxge_common_lock);
6464 	NXGE_DEBUG_MSG((nxgep, MOD_CTL,
6465 	    "==> nxge_init_common_dev:func # %d",
6466 	    nxgep->function_num));
6467 	/*
6468 	 * Loop through existing per neptune hardware list.
6469 	 */
6470 	for (hw_p = nxge_hw_list; hw_p; hw_p = hw_p->next) {
6471 		NXGE_DEBUG_MSG((nxgep, MOD_CTL,
6472 		    "==> nxge_init_common_device:func # %d "
6473 		    "hw_p $%p parent dip $%p",
6474 		    nxgep->function_num,
6475 		    hw_p,
6476 		    p_dip));
6477 		if (hw_p->parent_devp == p_dip) {
6478 			nxgep->nxge_hw_p = hw_p;
6479 			hw_p->ndevs++;
6480 			hw_p->nxge_p[nxgep->function_num] = nxgep;
6481 			NXGE_DEBUG_MSG((nxgep, MOD_CTL,
6482 			    "==> nxge_init_common_device:func # %d "
6483 			    "hw_p $%p parent dip $%p "
6484 			    "ndevs %d (found)",
6485 			    nxgep->function_num,
6486 			    hw_p,
6487 			    p_dip,
6488 			    hw_p->ndevs));
6489 			break;
6490 		}
6491 	}
6492 
6493 	if (hw_p == NULL) {
6494 		NXGE_DEBUG_MSG((nxgep, MOD_CTL,
6495 		    "==> nxge_init_common_device:func # %d "
6496 		    "parent dip $%p (new)",
6497 		    nxgep->function_num,
6498 		    p_dip));
6499 		hw_p = kmem_zalloc(sizeof (nxge_hw_list_t), KM_SLEEP);
6500 		hw_p->parent_devp = p_dip;
6501 		hw_p->magic = NXGE_NEPTUNE_MAGIC;
6502 		nxgep->nxge_hw_p = hw_p;
6503 		hw_p->ndevs++;
6504 		hw_p->nxge_p[nxgep->function_num] = nxgep;
6505 		hw_p->next = nxge_hw_list;
6506 		if (nxgep->niu_type == N2_NIU) {
6507 			hw_p->niu_type = N2_NIU;
6508 			hw_p->platform_type = P_NEPTUNE_NIU;
6509 		} else {
6510 			hw_p->niu_type = NIU_TYPE_NONE;
6511 			hw_p->platform_type = P_NEPTUNE_NONE;
6512 		}
6513 
6514 		MUTEX_INIT(&hw_p->nxge_cfg_lock, NULL, MUTEX_DRIVER, NULL);
6515 		MUTEX_INIT(&hw_p->nxge_tcam_lock, NULL, MUTEX_DRIVER, NULL);
6516 		MUTEX_INIT(&hw_p->nxge_vlan_lock, NULL, MUTEX_DRIVER, NULL);
6517 		MUTEX_INIT(&hw_p->nxge_mdio_lock, NULL, MUTEX_DRIVER, NULL);
6518 
6519 		nxge_hw_list = hw_p;
6520 
6521 		(void) nxge_scan_ports_phy(nxgep, nxge_hw_list);
6522 	}
6523 
6524 	MUTEX_EXIT(&nxge_common_lock);
6525 
6526 	nxgep->platform_type = hw_p->platform_type;
6527 	if (nxgep->niu_type != N2_NIU) {
6528 		nxgep->niu_type = hw_p->niu_type;
6529 	}
6530 
6531 	NXGE_DEBUG_MSG((nxgep, MOD_CTL,
6532 	    "==> nxge_init_common_device (nxge_hw_list) $%p",
6533 	    nxge_hw_list));
6534 	NXGE_DEBUG_MSG((nxgep, MOD_CTL, "<== nxge_init_common_device"));
6535 
6536 	return (NXGE_OK);
6537 }
6538 
6539 static void
6540 nxge_uninit_common_dev(p_nxge_t nxgep)
6541 {
6542 	p_nxge_hw_list_t	hw_p, h_hw_p;
6543 	p_nxge_dma_pt_cfg_t	p_dma_cfgp;
6544 	p_nxge_hw_pt_cfg_t	p_cfgp;
6545 	dev_info_t 		*p_dip;
6546 
6547 	NXGE_DEBUG_MSG((nxgep, MOD_CTL, "==> nxge_uninit_common_device"));
6548 	if (nxgep->nxge_hw_p == NULL) {
6549 		NXGE_DEBUG_MSG((nxgep, MOD_CTL,
6550 		    "<== nxge_uninit_common_device (no common)"));
6551 		return;
6552 	}
6553 
6554 	MUTEX_ENTER(&nxge_common_lock);
6555 	h_hw_p = nxge_hw_list;
6556 	for (hw_p = nxge_hw_list; hw_p; hw_p = hw_p->next) {
6557 		p_dip = hw_p->parent_devp;
6558 		if (nxgep->nxge_hw_p == hw_p &&
6559 		    p_dip == nxgep->p_dip &&
6560 		    nxgep->nxge_hw_p->magic == NXGE_NEPTUNE_MAGIC &&
6561 		    hw_p->magic == NXGE_NEPTUNE_MAGIC) {
6562 
6563 			NXGE_DEBUG_MSG((nxgep, MOD_CTL,
6564 			    "==> nxge_uninit_common_device:func # %d "
6565 			    "hw_p $%p parent dip $%p "
6566 			    "ndevs %d (found)",
6567 			    nxgep->function_num,
6568 			    hw_p,
6569 			    p_dip,
6570 			    hw_p->ndevs));
6571 
6572 			/*
6573 			 * Release the RDC table, a shared resoruce
6574 			 * of the nxge hardware.  The RDC table was
6575 			 * assigned to this instance of nxge in
6576 			 * nxge_use_cfg_dma_config().
6577 			 */
6578 			if (!isLDOMguest(nxgep)) {
6579 				p_dma_cfgp =
6580 				    (p_nxge_dma_pt_cfg_t)&nxgep->pt_config;
6581 				p_cfgp =
6582 				    (p_nxge_hw_pt_cfg_t)&p_dma_cfgp->hw_config;
6583 				(void) nxge_fzc_rdc_tbl_unbind(nxgep,
6584 				    p_cfgp->def_mac_rxdma_grpid);
6585 
6586 				/* Cleanup any outstanding groups.  */
6587 				nxge_grp_cleanup(nxgep);
6588 			}
6589 
6590 			if (hw_p->ndevs) {
6591 				hw_p->ndevs--;
6592 			}
6593 			hw_p->nxge_p[nxgep->function_num] = NULL;
6594 			if (!hw_p->ndevs) {
6595 				MUTEX_DESTROY(&hw_p->nxge_vlan_lock);
6596 				MUTEX_DESTROY(&hw_p->nxge_tcam_lock);
6597 				MUTEX_DESTROY(&hw_p->nxge_cfg_lock);
6598 				MUTEX_DESTROY(&hw_p->nxge_mdio_lock);
6599 				NXGE_DEBUG_MSG((nxgep, MOD_CTL,
6600 				    "==> nxge_uninit_common_device: "
6601 				    "func # %d "
6602 				    "hw_p $%p parent dip $%p "
6603 				    "ndevs %d (last)",
6604 				    nxgep->function_num,
6605 				    hw_p,
6606 				    p_dip,
6607 				    hw_p->ndevs));
6608 
6609 				nxge_hio_uninit(nxgep);
6610 
6611 				if (hw_p == nxge_hw_list) {
6612 					NXGE_DEBUG_MSG((nxgep, MOD_CTL,
6613 					    "==> nxge_uninit_common_device:"
6614 					    "remove head func # %d "
6615 					    "hw_p $%p parent dip $%p "
6616 					    "ndevs %d (head)",
6617 					    nxgep->function_num,
6618 					    hw_p,
6619 					    p_dip,
6620 					    hw_p->ndevs));
6621 					nxge_hw_list = hw_p->next;
6622 				} else {
6623 					NXGE_DEBUG_MSG((nxgep, MOD_CTL,
6624 					    "==> nxge_uninit_common_device:"
6625 					    "remove middle func # %d "
6626 					    "hw_p $%p parent dip $%p "
6627 					    "ndevs %d (middle)",
6628 					    nxgep->function_num,
6629 					    hw_p,
6630 					    p_dip,
6631 					    hw_p->ndevs));
6632 					h_hw_p->next = hw_p->next;
6633 				}
6634 
6635 				nxgep->nxge_hw_p = NULL;
6636 				KMEM_FREE(hw_p, sizeof (nxge_hw_list_t));
6637 			}
6638 			break;
6639 		} else {
6640 			h_hw_p = hw_p;
6641 		}
6642 	}
6643 
6644 	MUTEX_EXIT(&nxge_common_lock);
6645 	NXGE_DEBUG_MSG((nxgep, MOD_CTL,
6646 	    "==> nxge_uninit_common_device (nxge_hw_list) $%p",
6647 	    nxge_hw_list));
6648 
6649 	NXGE_DEBUG_MSG((nxgep, MOD_CTL, "<= nxge_uninit_common_device"));
6650 }
6651 
6652 /*
6653  * Determines the number of ports from the niu_type or the platform type.
6654  * Returns the number of ports, or returns zero on failure.
6655  */
6656 
6657 int
6658 nxge_get_nports(p_nxge_t nxgep)
6659 {
6660 	int	nports = 0;
6661 
6662 	switch (nxgep->niu_type) {
6663 	case N2_NIU:
6664 	case NEPTUNE_2_10GF:
6665 		nports = 2;
6666 		break;
6667 	case NEPTUNE_4_1GC:
6668 	case NEPTUNE_2_10GF_2_1GC:
6669 	case NEPTUNE_1_10GF_3_1GC:
6670 	case NEPTUNE_1_1GC_1_10GF_2_1GC:
6671 	case NEPTUNE_2_10GF_2_1GRF:
6672 		nports = 4;
6673 		break;
6674 	default:
6675 		switch (nxgep->platform_type) {
6676 		case P_NEPTUNE_NIU:
6677 		case P_NEPTUNE_ATLAS_2PORT:
6678 			nports = 2;
6679 			break;
6680 		case P_NEPTUNE_ATLAS_4PORT:
6681 		case P_NEPTUNE_MARAMBA_P0:
6682 		case P_NEPTUNE_MARAMBA_P1:
6683 		case P_NEPTUNE_ALONSO:
6684 			nports = 4;
6685 			break;
6686 		default:
6687 			break;
6688 		}
6689 		break;
6690 	}
6691 
6692 	return (nports);
6693 }
6694 
6695 /*
6696  * The following two functions are to support
6697  * PSARC/2007/453 MSI-X interrupt limit override.
6698  */
6699 static int
6700 nxge_create_msi_property(p_nxge_t nxgep)
6701 {
6702 	int	nmsi;
6703 	extern	int ncpus;
6704 
6705 	NXGE_DEBUG_MSG((nxgep, MOD_CTL, "==>nxge_create_msi_property"));
6706 
6707 	switch (nxgep->mac.portmode) {
6708 	case PORT_10G_COPPER:
6709 	case PORT_10G_FIBER:
6710 	case PORT_10G_TN1010:
6711 		(void) ddi_prop_create(DDI_DEV_T_NONE, nxgep->dip,
6712 		    DDI_PROP_CANSLEEP, "#msix-request", NULL, 0);
6713 		/*
6714 		 * The maximum MSI-X requested will be 8.
6715 		 * If the # of CPUs is less than 8, we will reqeust
6716 		 * # MSI-X based on the # of CPUs.
6717 		 */
6718 		if (ncpus >= NXGE_MSIX_REQUEST_10G) {
6719 			nmsi = NXGE_MSIX_REQUEST_10G;
6720 		} else {
6721 			nmsi = ncpus;
6722 		}
6723 		NXGE_DEBUG_MSG((nxgep, MOD_CTL,
6724 		    "==>nxge_create_msi_property(10G): exists 0x%x (nmsi %d)",
6725 		    ddi_prop_exists(DDI_DEV_T_NONE, nxgep->dip,
6726 		    DDI_PROP_CANSLEEP, "#msix-request"), nmsi));
6727 		break;
6728 
6729 	default:
6730 		nmsi = NXGE_MSIX_REQUEST_1G;
6731 		NXGE_DEBUG_MSG((nxgep, MOD_CTL,
6732 		    "==>nxge_create_msi_property(1G): exists 0x%x (nmsi %d)",
6733 		    ddi_prop_exists(DDI_DEV_T_NONE, nxgep->dip,
6734 		    DDI_PROP_CANSLEEP, "#msix-request"), nmsi));
6735 		break;
6736 	}
6737 
6738 	NXGE_DEBUG_MSG((nxgep, MOD_CTL, "<==nxge_create_msi_property"));
6739 	return (nmsi);
6740 }
6741 
6742 /* ARGSUSED */
6743 static int
6744 nxge_get_def_val(nxge_t *nxgep, mac_prop_id_t pr_num, uint_t pr_valsize,
6745     void *pr_val)
6746 {
6747 	int err = 0;
6748 	link_flowctrl_t fl;
6749 
6750 	switch (pr_num) {
6751 	case MAC_PROP_AUTONEG:
6752 		*(uint8_t *)pr_val = 1;
6753 		break;
6754 	case MAC_PROP_FLOWCTRL:
6755 		if (pr_valsize < sizeof (link_flowctrl_t))
6756 			return (EINVAL);
6757 		fl = LINK_FLOWCTRL_RX;
6758 		bcopy(&fl, pr_val, sizeof (fl));
6759 		break;
6760 	case MAC_PROP_ADV_1000FDX_CAP:
6761 	case MAC_PROP_EN_1000FDX_CAP:
6762 		*(uint8_t *)pr_val = 1;
6763 		break;
6764 	case MAC_PROP_ADV_100FDX_CAP:
6765 	case MAC_PROP_EN_100FDX_CAP:
6766 		*(uint8_t *)pr_val = 1;
6767 		break;
6768 	default:
6769 		err = ENOTSUP;
6770 		break;
6771 	}
6772 	return (err);
6773 }
6774 
6775 
6776 /*
6777  * The following is a software around for the Neptune hardware's
6778  * interrupt bugs; The Neptune hardware may generate spurious interrupts when
6779  * an interrupr handler is removed.
6780  */
6781 #define	NXGE_PCI_PORT_LOGIC_OFFSET	0x98
6782 #define	NXGE_PIM_RESET			(1ULL << 29)
6783 #define	NXGE_GLU_RESET			(1ULL << 30)
6784 #define	NXGE_NIU_RESET			(1ULL << 31)
6785 #define	NXGE_PCI_RESET_ALL		(NXGE_PIM_RESET |	\
6786 					NXGE_GLU_RESET |	\
6787 					NXGE_NIU_RESET)
6788 
6789 #define	NXGE_WAIT_QUITE_TIME		200000
6790 #define	NXGE_WAIT_QUITE_RETRY		40
6791 #define	NXGE_PCI_RESET_WAIT		1000000 /* one second */
6792 
6793 static void
6794 nxge_niu_peu_reset(p_nxge_t nxgep)
6795 {
6796 	uint32_t	rvalue;
6797 	p_nxge_hw_list_t hw_p;
6798 	p_nxge_t	fnxgep;
6799 	int		i, j;
6800 
6801 	NXGE_DEBUG_MSG((nxgep, NXGE_ERR_CTL, "==> nxge_niu_peu_reset"));
6802 	if ((hw_p = nxgep->nxge_hw_p) == NULL) {
6803 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
6804 		    "==> nxge_niu_peu_reset: NULL hardware pointer"));
6805 		return;
6806 	}
6807 
6808 	NXGE_DEBUG_MSG((nxgep, NXGE_ERR_CTL,
6809 	    "==> nxge_niu_peu_reset: flags 0x%x link timer id %d timer id %d",
6810 	    hw_p->flags, nxgep->nxge_link_poll_timerid,
6811 	    nxgep->nxge_timerid));
6812 
6813 	MUTEX_ENTER(&hw_p->nxge_cfg_lock);
6814 	/*
6815 	 * Make sure other instances from the same hardware
6816 	 * stop sending PIO and in quiescent state.
6817 	 */
6818 	for (i = 0; i < NXGE_MAX_PORTS; i++) {
6819 		fnxgep = hw_p->nxge_p[i];
6820 		NXGE_DEBUG_MSG((nxgep, NXGE_ERR_CTL,
6821 		    "==> nxge_niu_peu_reset: checking entry %d "
6822 		    "nxgep $%p", i, fnxgep));
6823 #ifdef	NXGE_DEBUG
6824 		if (fnxgep) {
6825 			NXGE_DEBUG_MSG((nxgep, NXGE_ERR_CTL,
6826 			    "==> nxge_niu_peu_reset: entry %d (function %d) "
6827 			    "link timer id %d hw timer id %d",
6828 			    i, fnxgep->function_num,
6829 			    fnxgep->nxge_link_poll_timerid,
6830 			    fnxgep->nxge_timerid));
6831 		}
6832 #endif
6833 		if (fnxgep && fnxgep != nxgep &&
6834 		    (fnxgep->nxge_timerid || fnxgep->nxge_link_poll_timerid)) {
6835 			NXGE_DEBUG_MSG((nxgep, NXGE_ERR_CTL,
6836 			    "==> nxge_niu_peu_reset: checking $%p "
6837 			    "(function %d) timer ids",
6838 			    fnxgep, fnxgep->function_num));
6839 			for (j = 0; j < NXGE_WAIT_QUITE_RETRY; j++) {
6840 				NXGE_DEBUG_MSG((nxgep, NXGE_ERR_CTL,
6841 				    "==> nxge_niu_peu_reset: waiting"));
6842 				NXGE_DELAY(NXGE_WAIT_QUITE_TIME);
6843 				if (!fnxgep->nxge_timerid &&
6844 				    !fnxgep->nxge_link_poll_timerid) {
6845 					break;
6846 				}
6847 			}
6848 			NXGE_DELAY(NXGE_WAIT_QUITE_TIME);
6849 			if (fnxgep->nxge_timerid ||
6850 			    fnxgep->nxge_link_poll_timerid) {
6851 				MUTEX_EXIT(&hw_p->nxge_cfg_lock);
6852 				NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
6853 				    "<== nxge_niu_peu_reset: cannot reset "
6854 				    "hardware (devices are still in use)"));
6855 				return;
6856 			}
6857 		}
6858 	}
6859 
6860 	if ((hw_p->flags & COMMON_RESET_NIU_PCI) != COMMON_RESET_NIU_PCI) {
6861 		hw_p->flags |= COMMON_RESET_NIU_PCI;
6862 		rvalue = pci_config_get32(nxgep->dev_regs->nxge_pciregh,
6863 		    NXGE_PCI_PORT_LOGIC_OFFSET);
6864 		NXGE_DEBUG_MSG((nxgep, NXGE_ERR_CTL,
6865 		    "nxge_niu_peu_reset: read offset 0x%x (%d) "
6866 		    "(data 0x%x)",
6867 		    NXGE_PCI_PORT_LOGIC_OFFSET,
6868 		    NXGE_PCI_PORT_LOGIC_OFFSET,
6869 		    rvalue));
6870 
6871 		rvalue |= NXGE_PCI_RESET_ALL;
6872 		pci_config_put32(nxgep->dev_regs->nxge_pciregh,
6873 		    NXGE_PCI_PORT_LOGIC_OFFSET, rvalue);
6874 		NXGE_DEBUG_MSG((nxgep, NXGE_ERR_CTL,
6875 		    "nxge_niu_peu_reset: RESETTING NIU: write NIU reset 0x%x",
6876 		    rvalue));
6877 
6878 		NXGE_DELAY(NXGE_PCI_RESET_WAIT);
6879 	}
6880 
6881 	MUTEX_EXIT(&hw_p->nxge_cfg_lock);
6882 	NXGE_DEBUG_MSG((nxgep, NXGE_ERR_CTL, "<== nxge_niu_peu_reset"));
6883 }
6884 
6885 static void
6886 nxge_set_pci_replay_timeout(p_nxge_t nxgep)
6887 {
6888 	p_dev_regs_t 	dev_regs;
6889 	uint32_t	value;
6890 
6891 	NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_set_pci_replay_timeout"));
6892 
6893 	if (!nxge_set_replay_timer) {
6894 		NXGE_DEBUG_MSG((nxgep, DDI_CTL,
6895 		    "==> nxge_set_pci_replay_timeout: will not change "
6896 		    "the timeout"));
6897 		return;
6898 	}
6899 
6900 	dev_regs = nxgep->dev_regs;
6901 	NXGE_DEBUG_MSG((nxgep, DDI_CTL,
6902 	    "==> nxge_set_pci_replay_timeout: dev_regs 0x%p pcireg 0x%p",
6903 	    dev_regs, dev_regs->nxge_pciregh));
6904 
6905 	if (dev_regs == NULL || (dev_regs->nxge_pciregh == NULL)) {
6906 		NXGE_DEBUG_MSG((nxgep, DDI_CTL,
6907 		    "==> nxge_set_pci_replay_timeout: NULL dev_regs $%p or "
6908 		    "no PCI handle",
6909 		    dev_regs));
6910 		return;
6911 	}
6912 	value = (pci_config_get32(dev_regs->nxge_pciregh,
6913 	    PCI_REPLAY_TIMEOUT_CFG_OFFSET) |
6914 	    (nxge_replay_timeout << PCI_REPLAY_TIMEOUT_SHIFT));
6915 
6916 	NXGE_DEBUG_MSG((nxgep, DDI_CTL,
6917 	    "nxge_set_pci_replay_timeout: replay timeout value before set 0x%x "
6918 	    "(timeout value to set 0x%x at offset 0x%x) value 0x%x",
6919 	    pci_config_get32(dev_regs->nxge_pciregh,
6920 	    PCI_REPLAY_TIMEOUT_CFG_OFFSET), nxge_replay_timeout,
6921 	    PCI_REPLAY_TIMEOUT_CFG_OFFSET, value));
6922 
6923 	pci_config_put32(dev_regs->nxge_pciregh, PCI_REPLAY_TIMEOUT_CFG_OFFSET,
6924 	    value);
6925 
6926 	NXGE_DEBUG_MSG((nxgep, DDI_CTL,
6927 	    "nxge_set_pci_replay_timeout: replay timeout value after set 0x%x",
6928 	    pci_config_get32(dev_regs->nxge_pciregh,
6929 	    PCI_REPLAY_TIMEOUT_CFG_OFFSET)));
6930 
6931 	NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_set_pci_replay_timeout"));
6932 }
6933 
6934 /*
6935  * quiesce(9E) entry point.
6936  *
6937  * This function is called when the system is single-threaded at high
6938  * PIL with preemption disabled. Therefore, this function must not be
6939  * blocked.
6940  *
6941  * This function returns DDI_SUCCESS on success, or DDI_FAILURE on failure.
6942  * DDI_FAILURE indicates an error condition and should almost never happen.
6943  */
6944 static int
6945 nxge_quiesce(dev_info_t *dip)
6946 {
6947 	int instance = ddi_get_instance(dip);
6948 	p_nxge_t nxgep = (p_nxge_t)ddi_get_soft_state(nxge_list, instance);
6949 
6950 	if (nxgep == NULL)
6951 		return (DDI_FAILURE);
6952 
6953 	/* Turn off debugging */
6954 	nxge_debug_level = NO_DEBUG;
6955 	nxgep->nxge_debug_level = NO_DEBUG;
6956 	npi_debug_level = NO_DEBUG;
6957 
6958 	/*
6959 	 * Stop link monitor only when linkchkmod is interrupt based
6960 	 */
6961 	if (nxgep->mac.linkchkmode == LINKCHK_INTR) {
6962 		(void) nxge_link_monitor(nxgep, LINK_MONITOR_STOP);
6963 	}
6964 
6965 	(void) nxge_intr_hw_disable(nxgep);
6966 
6967 	/*
6968 	 * Reset the receive MAC side.
6969 	 */
6970 	(void) nxge_rx_mac_disable(nxgep);
6971 
6972 	/* Disable and soft reset the IPP */
6973 	if (!isLDOMguest(nxgep))
6974 		(void) nxge_ipp_disable(nxgep);
6975 
6976 	/*
6977 	 * Reset the transmit/receive DMA side.
6978 	 */
6979 	(void) nxge_txdma_hw_mode(nxgep, NXGE_DMA_STOP);
6980 	(void) nxge_rxdma_hw_mode(nxgep, NXGE_DMA_STOP);
6981 
6982 	/*
6983 	 * Reset the transmit MAC side.
6984 	 */
6985 	(void) nxge_tx_mac_disable(nxgep);
6986 
6987 	return (DDI_SUCCESS);
6988 }
6989