xref: /illumos-gate/usr/src/uts/common/io/nxge/nxge_main.c (revision 051aabe6136ff13e81542a427e9693ffe1503525)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 /*
22  * Copyright 2008 Sun Microsystems, Inc.  All rights reserved.
23  * Use is subject to license terms.
24  */
25 
26 #pragma ident	"%Z%%M%	%I%	%E% SMI"
27 
28 /*
29  * SunOs MT STREAMS NIU/Neptune 10Gb Ethernet Device Driver.
30  */
31 #include	<sys/nxge/nxge_impl.h>
32 #include	<sys/nxge/nxge_hio.h>
33 #include	<sys/nxge/nxge_rxdma.h>
34 #include	<sys/pcie.h>
35 
36 uint32_t 	nxge_use_partition = 0;		/* debug partition flag */
37 uint32_t 	nxge_dma_obp_props_only = 1;	/* use obp published props */
38 uint32_t 	nxge_use_rdc_intr = 1;		/* debug to assign rdc intr */
39 /*
40  * PSARC/2007/453 MSI-X interrupt limit override
41  * (This PSARC case is limited to MSI-X vectors
42  *  and SPARC platforms only).
43  */
44 #if defined(_BIG_ENDIAN)
45 uint32_t	nxge_msi_enable = 2;
46 #else
47 uint32_t	nxge_msi_enable = 1;
48 #endif
49 
50 /*
51  * Software workaround for a Neptune (PCI-E)
52  * hardware interrupt bug which the hardware
53  * may generate spurious interrupts after the
54  * device interrupt handler was removed. If this flag
55  * is enabled, the driver will reset the
56  * hardware when devices are being detached.
57  */
58 uint32_t	nxge_peu_reset_enable = 0;
59 
60 /*
61  * Software workaround for the hardware
62  * checksum bugs that affect packet transmission
63  * and receive:
64  *
65  * Usage of nxge_cksum_offload:
66  *
67  *  (1) nxge_cksum_offload = 0 (default):
68  *	- transmits packets:
69  *	  TCP: uses the hardware checksum feature.
70  *	  UDP: driver will compute the software checksum
71  *	       based on the partial checksum computed
72  *	       by the IP layer.
73  *	- receives packets
74  *	  TCP: marks packets checksum flags based on hardware result.
75  *	  UDP: will not mark checksum flags.
76  *
77  *  (2) nxge_cksum_offload = 1:
78  *	- transmit packets:
79  *	  TCP/UDP: uses the hardware checksum feature.
80  *	- receives packets
81  *	  TCP/UDP: marks packet checksum flags based on hardware result.
82  *
83  *  (3) nxge_cksum_offload = 2:
84  *	- The driver will not register its checksum capability.
85  *	  Checksum for both TCP and UDP will be computed
86  *	  by the stack.
87  *	- The software LSO is not allowed in this case.
88  *
89  *  (4) nxge_cksum_offload > 2:
90  *	- Will be treated as it is set to 2
91  *	  (stack will compute the checksum).
92  *
93  *  (5) If the hardware bug is fixed, this workaround
94  *	needs to be updated accordingly to reflect
95  *	the new hardware revision.
96  */
97 uint32_t	nxge_cksum_offload = 0;
98 
99 /*
100  * Globals: tunable parameters (/etc/system or adb)
101  *
102  */
103 uint32_t 	nxge_rbr_size = NXGE_RBR_RBB_DEFAULT;
104 uint32_t 	nxge_rbr_spare_size = 0;
105 uint32_t 	nxge_rcr_size = NXGE_RCR_DEFAULT;
106 uint32_t 	nxge_tx_ring_size = NXGE_TX_RING_DEFAULT;
107 boolean_t 	nxge_no_msg = B_TRUE;		/* control message display */
108 uint32_t 	nxge_no_link_notify = 0;	/* control DL_NOTIFY */
109 uint32_t 	nxge_bcopy_thresh = TX_BCOPY_MAX;
110 uint32_t 	nxge_dvma_thresh = TX_FASTDVMA_MIN;
111 uint32_t 	nxge_dma_stream_thresh = TX_STREAM_MIN;
112 uint32_t	nxge_jumbo_mtu	= TX_JUMBO_MTU;
113 boolean_t	nxge_jumbo_enable = B_FALSE;
114 uint16_t	nxge_rcr_timeout = NXGE_RDC_RCR_TIMEOUT;
115 uint16_t	nxge_rcr_threshold = NXGE_RDC_RCR_THRESHOLD;
116 nxge_tx_mode_t	nxge_tx_scheme = NXGE_USE_SERIAL;
117 
118 /* MAX LSO size */
119 #define		NXGE_LSO_MAXLEN	65535
120 uint32_t	nxge_lso_max = NXGE_LSO_MAXLEN;
121 
122 /*
123  * Debugging flags:
124  *		nxge_no_tx_lb : transmit load balancing
125  *		nxge_tx_lb_policy: 0 - TCP port (default)
126  *				   3 - DEST MAC
127  */
128 uint32_t 	nxge_no_tx_lb = 0;
129 uint32_t 	nxge_tx_lb_policy = NXGE_TX_LB_TCPUDP;
130 
131 /*
132  * Add tunable to reduce the amount of time spent in the
133  * ISR doing Rx Processing.
134  */
135 uint32_t nxge_max_rx_pkts = 1024;
136 
137 /*
138  * Tunables to manage the receive buffer blocks.
139  *
140  * nxge_rx_threshold_hi: copy all buffers.
141  * nxge_rx_bcopy_size_type: receive buffer block size type.
142  * nxge_rx_threshold_lo: copy only up to tunable block size type.
143  */
144 nxge_rxbuf_threshold_t nxge_rx_threshold_hi = NXGE_RX_COPY_6;
145 nxge_rxbuf_type_t nxge_rx_buf_size_type = RCR_PKTBUFSZ_0;
146 nxge_rxbuf_threshold_t nxge_rx_threshold_lo = NXGE_RX_COPY_3;
147 
148 /* Use kmem_alloc() to allocate data buffers. */
149 #if defined(_BIG_ENDIAN)
150 uint32_t	nxge_use_kmem_alloc = 1;
151 #else
152 uint32_t	nxge_use_kmem_alloc = 0;
153 #endif
154 
155 rtrace_t npi_rtracebuf;
156 
157 /*
158  * The hardware sometimes fails to allow enough time for the link partner
159  * to send an acknowledgement for packets that the hardware sent to it. The
160  * hardware resends the packets earlier than it should be in those instances.
161  * This behavior caused some switches to acknowledge the wrong packets
162  * and it triggered the fatal error.
163  * This software workaround is to set the replay timer to a value
164  * suggested by the hardware team.
165  *
166  * PCI config space replay timer register:
167  *     The following replay timeout value is 0xc
168  *     for bit 14:18.
169  */
170 #define	PCI_REPLAY_TIMEOUT_CFG_OFFSET	0xb8
171 #define	PCI_REPLAY_TIMEOUT_SHIFT	14
172 
173 uint32_t	nxge_set_replay_timer = 1;
174 uint32_t	nxge_replay_timeout = 0xc;
175 
176 #if	defined(sun4v)
177 /*
178  * Hypervisor N2/NIU services information.
179  */
180 static hsvc_info_t niu_hsvc = {
181 	HSVC_REV_1, NULL, HSVC_GROUP_NIU, NIU_MAJOR_VER,
182 	NIU_MINOR_VER, "nxge"
183 };
184 
185 static int nxge_hsvc_register(p_nxge_t);
186 #endif
187 
188 /*
189  * Function Prototypes
190  */
191 static int nxge_attach(dev_info_t *, ddi_attach_cmd_t);
192 static int nxge_detach(dev_info_t *, ddi_detach_cmd_t);
193 static void nxge_unattach(p_nxge_t);
194 
195 #if NXGE_PROPERTY
196 static void nxge_remove_hard_properties(p_nxge_t);
197 #endif
198 
199 /*
200  * These two functions are required by nxge_hio.c
201  */
202 extern int nxge_m_mmac_add(void *arg, mac_multi_addr_t *maddr);
203 extern int nxge_m_mmac_remove(void *arg, mac_addr_slot_t slot);
204 
205 static nxge_status_t nxge_setup_system_dma_pages(p_nxge_t);
206 
207 static nxge_status_t nxge_setup_mutexes(p_nxge_t);
208 static void nxge_destroy_mutexes(p_nxge_t);
209 
210 static nxge_status_t nxge_map_regs(p_nxge_t nxgep);
211 static void nxge_unmap_regs(p_nxge_t nxgep);
212 #ifdef	NXGE_DEBUG
213 static void nxge_test_map_regs(p_nxge_t nxgep);
214 #endif
215 
216 static nxge_status_t nxge_add_intrs(p_nxge_t nxgep);
217 static nxge_status_t nxge_add_soft_intrs(p_nxge_t nxgep);
218 static void nxge_remove_intrs(p_nxge_t nxgep);
219 static void nxge_remove_soft_intrs(p_nxge_t nxgep);
220 
221 static nxge_status_t nxge_add_intrs_adv(p_nxge_t nxgep);
222 static nxge_status_t nxge_add_intrs_adv_type(p_nxge_t, uint32_t);
223 static nxge_status_t nxge_add_intrs_adv_type_fix(p_nxge_t, uint32_t);
224 static void nxge_intrs_enable(p_nxge_t nxgep);
225 static void nxge_intrs_disable(p_nxge_t nxgep);
226 
227 static void nxge_suspend(p_nxge_t);
228 static nxge_status_t nxge_resume(p_nxge_t);
229 
230 static nxge_status_t nxge_setup_dev(p_nxge_t);
231 static void nxge_destroy_dev(p_nxge_t);
232 
233 static nxge_status_t nxge_alloc_mem_pool(p_nxge_t);
234 static void nxge_free_mem_pool(p_nxge_t);
235 
236 nxge_status_t nxge_alloc_rx_mem_pool(p_nxge_t);
237 static void nxge_free_rx_mem_pool(p_nxge_t);
238 
239 nxge_status_t nxge_alloc_tx_mem_pool(p_nxge_t);
240 static void nxge_free_tx_mem_pool(p_nxge_t);
241 
242 static nxge_status_t nxge_dma_mem_alloc(p_nxge_t, dma_method_t,
243 	struct ddi_dma_attr *,
244 	size_t, ddi_device_acc_attr_t *, uint_t,
245 	p_nxge_dma_common_t);
246 
247 static void nxge_dma_mem_free(p_nxge_dma_common_t);
248 static void nxge_dma_free_rx_data_buf(p_nxge_dma_common_t);
249 
250 static nxge_status_t nxge_alloc_rx_buf_dma(p_nxge_t, uint16_t,
251 	p_nxge_dma_common_t *, size_t, size_t, uint32_t *);
252 static void nxge_free_rx_buf_dma(p_nxge_t, p_nxge_dma_common_t, uint32_t);
253 
254 static nxge_status_t nxge_alloc_rx_cntl_dma(p_nxge_t, uint16_t,
255 	p_nxge_dma_common_t *, size_t);
256 static void nxge_free_rx_cntl_dma(p_nxge_t, p_nxge_dma_common_t);
257 
258 extern nxge_status_t nxge_alloc_tx_buf_dma(p_nxge_t, uint16_t,
259 	p_nxge_dma_common_t *, size_t, size_t, uint32_t *);
260 static void nxge_free_tx_buf_dma(p_nxge_t, p_nxge_dma_common_t, uint32_t);
261 
262 extern nxge_status_t nxge_alloc_tx_cntl_dma(p_nxge_t, uint16_t,
263 	p_nxge_dma_common_t *,
264 	size_t);
265 static void nxge_free_tx_cntl_dma(p_nxge_t, p_nxge_dma_common_t);
266 
267 static int nxge_init_common_dev(p_nxge_t);
268 static void nxge_uninit_common_dev(p_nxge_t);
269 extern int nxge_param_set_mac(p_nxge_t, queue_t *, mblk_t *,
270     char *, caddr_t);
271 
272 /*
273  * The next declarations are for the GLDv3 interface.
274  */
275 static int nxge_m_start(void *);
276 static void nxge_m_stop(void *);
277 static int nxge_m_unicst(void *, const uint8_t *);
278 static int nxge_m_multicst(void *, boolean_t, const uint8_t *);
279 static int nxge_m_promisc(void *, boolean_t);
280 static void nxge_m_ioctl(void *, queue_t *, mblk_t *);
281 static void nxge_m_resources(void *);
282 mblk_t *nxge_m_tx(void *arg, mblk_t *);
283 static nxge_status_t nxge_mac_register(p_nxge_t);
284 static int nxge_altmac_set(p_nxge_t nxgep, uint8_t *mac_addr,
285 	mac_addr_slot_t slot);
286 void nxge_mmac_kstat_update(p_nxge_t nxgep, mac_addr_slot_t slot,
287 	boolean_t factory);
288 static int nxge_m_mmac_reserve(void *arg, mac_multi_addr_t *maddr);
289 static int nxge_m_mmac_modify(void *arg, mac_multi_addr_t *maddr);
290 static int nxge_m_mmac_get(void *arg, mac_multi_addr_t *maddr);
291 static	boolean_t nxge_m_getcapab(void *, mac_capab_t, void *);
292 static int nxge_m_setprop(void *, const char *, mac_prop_id_t,
293     uint_t, const void *);
294 static int nxge_m_getprop(void *, const char *, mac_prop_id_t,
295     uint_t, uint_t, void *);
296 static int nxge_set_priv_prop(nxge_t *, const char *, uint_t,
297     const void *);
298 static int nxge_get_priv_prop(nxge_t *, const char *, uint_t, uint_t,
299     void *);
300 static int nxge_get_def_val(nxge_t *, mac_prop_id_t, uint_t, void *);
301 
302 static void nxge_niu_peu_reset(p_nxge_t nxgep);
303 static void nxge_set_pci_replay_timeout(nxge_t *);
304 
305 mac_priv_prop_t nxge_priv_props[] = {
306 	{"_adv_10gfdx_cap", MAC_PROP_PERM_RW},
307 	{"_adv_pause_cap", MAC_PROP_PERM_RW},
308 	{"_function_number", MAC_PROP_PERM_READ},
309 	{"_fw_version", MAC_PROP_PERM_READ},
310 	{"_port_mode", MAC_PROP_PERM_READ},
311 	{"_hot_swap_phy", MAC_PROP_PERM_READ},
312 	{"_accept_jumbo", MAC_PROP_PERM_RW},
313 	{"_rxdma_intr_time", MAC_PROP_PERM_RW},
314 	{"_rxdma_intr_pkts", MAC_PROP_PERM_RW},
315 	{"_class_opt_ipv4_tcp", MAC_PROP_PERM_RW},
316 	{"_class_opt_ipv4_udp", MAC_PROP_PERM_RW},
317 	{"_class_opt_ipv4_ah", MAC_PROP_PERM_RW},
318 	{"_class_opt_ipv4_sctp", MAC_PROP_PERM_RW},
319 	{"_class_opt_ipv6_tcp", MAC_PROP_PERM_RW},
320 	{"_class_opt_ipv6_udp", MAC_PROP_PERM_RW},
321 	{"_class_opt_ipv6_ah", MAC_PROP_PERM_RW},
322 	{"_class_opt_ipv6_sctp", MAC_PROP_PERM_RW},
323 	{"_soft_lso_enable", MAC_PROP_PERM_RW}
324 };
325 
326 #define	NXGE_MAX_PRIV_PROPS	\
327 	(sizeof (nxge_priv_props)/sizeof (mac_priv_prop_t))
328 
329 #define	NXGE_M_CALLBACK_FLAGS\
330 	(MC_RESOURCES | MC_IOCTL | MC_GETCAPAB | MC_SETPROP | MC_GETPROP)
331 
332 
333 #define	NXGE_NEPTUNE_MAGIC	0x4E584745UL
334 #define	MAX_DUMP_SZ 256
335 
336 #define	NXGE_M_CALLBACK_FLAGS	\
337 	(MC_RESOURCES | MC_IOCTL | MC_GETCAPAB | MC_SETPROP | MC_GETPROP)
338 
339 mac_callbacks_t nxge_m_callbacks = {
340 	NXGE_M_CALLBACK_FLAGS,
341 	nxge_m_stat,
342 	nxge_m_start,
343 	nxge_m_stop,
344 	nxge_m_promisc,
345 	nxge_m_multicst,
346 	nxge_m_unicst,
347 	nxge_m_tx,
348 	nxge_m_resources,
349 	nxge_m_ioctl,
350 	nxge_m_getcapab,
351 	NULL,
352 	NULL,
353 	nxge_m_setprop,
354 	nxge_m_getprop
355 };
356 
357 void
358 nxge_err_inject(p_nxge_t, queue_t *, mblk_t *);
359 
360 /* PSARC/2007/453 MSI-X interrupt limit override. */
361 #define	NXGE_MSIX_REQUEST_10G	8
362 #define	NXGE_MSIX_REQUEST_1G	2
363 static int nxge_create_msi_property(p_nxge_t);
364 
365 /*
366  * These global variables control the message
367  * output.
368  */
369 out_dbgmsg_t nxge_dbgmsg_out = DBG_CONSOLE | STR_LOG;
370 uint64_t nxge_debug_level;
371 
372 /*
373  * This list contains the instance structures for the Neptune
374  * devices present in the system. The lock exists to guarantee
375  * mutually exclusive access to the list.
376  */
377 void 			*nxge_list = NULL;
378 
379 void			*nxge_hw_list = NULL;
380 nxge_os_mutex_t 	nxge_common_lock;
381 
382 extern uint64_t 	npi_debug_level;
383 
384 extern nxge_status_t	nxge_ldgv_init(p_nxge_t, int *, int *);
385 extern nxge_status_t	nxge_ldgv_init_n2(p_nxge_t, int *, int *);
386 extern nxge_status_t	nxge_ldgv_uninit(p_nxge_t);
387 extern nxge_status_t	nxge_intr_ldgv_init(p_nxge_t);
388 extern void		nxge_fm_init(p_nxge_t,
389 					ddi_device_acc_attr_t *,
390 					ddi_device_acc_attr_t *,
391 					ddi_dma_attr_t *);
392 extern void		nxge_fm_fini(p_nxge_t);
393 extern npi_status_t	npi_mac_altaddr_disable(npi_handle_t, uint8_t, uint8_t);
394 
395 /*
396  * Count used to maintain the number of buffers being used
397  * by Neptune instances and loaned up to the upper layers.
398  */
399 uint32_t nxge_mblks_pending = 0;
400 
401 /*
402  * Device register access attributes for PIO.
403  */
404 static ddi_device_acc_attr_t nxge_dev_reg_acc_attr = {
405 	DDI_DEVICE_ATTR_V0,
406 	DDI_STRUCTURE_LE_ACC,
407 	DDI_STRICTORDER_ACC,
408 };
409 
410 /*
411  * Device descriptor access attributes for DMA.
412  */
413 static ddi_device_acc_attr_t nxge_dev_desc_dma_acc_attr = {
414 	DDI_DEVICE_ATTR_V0,
415 	DDI_STRUCTURE_LE_ACC,
416 	DDI_STRICTORDER_ACC
417 };
418 
419 /*
420  * Device buffer access attributes for DMA.
421  */
422 static ddi_device_acc_attr_t nxge_dev_buf_dma_acc_attr = {
423 	DDI_DEVICE_ATTR_V0,
424 	DDI_STRUCTURE_BE_ACC,
425 	DDI_STRICTORDER_ACC
426 };
427 
428 ddi_dma_attr_t nxge_desc_dma_attr = {
429 	DMA_ATTR_V0,		/* version number. */
430 	0,			/* low address */
431 	0xffffffffffffffff,	/* high address */
432 	0xffffffffffffffff,	/* address counter max */
433 #ifndef NIU_PA_WORKAROUND
434 	0x100000,		/* alignment */
435 #else
436 	0x2000,
437 #endif
438 	0xfc00fc,		/* dlim_burstsizes */
439 	0x1,			/* minimum transfer size */
440 	0xffffffffffffffff,	/* maximum transfer size */
441 	0xffffffffffffffff,	/* maximum segment size */
442 	1,			/* scatter/gather list length */
443 	(unsigned int) 1,	/* granularity */
444 	0			/* attribute flags */
445 };
446 
447 ddi_dma_attr_t nxge_tx_dma_attr = {
448 	DMA_ATTR_V0,		/* version number. */
449 	0,			/* low address */
450 	0xffffffffffffffff,	/* high address */
451 	0xffffffffffffffff,	/* address counter max */
452 #if defined(_BIG_ENDIAN)
453 	0x2000,			/* alignment */
454 #else
455 	0x1000,			/* alignment */
456 #endif
457 	0xfc00fc,		/* dlim_burstsizes */
458 	0x1,			/* minimum transfer size */
459 	0xffffffffffffffff,	/* maximum transfer size */
460 	0xffffffffffffffff,	/* maximum segment size */
461 	5,			/* scatter/gather list length */
462 	(unsigned int) 1,	/* granularity */
463 	0			/* attribute flags */
464 };
465 
466 ddi_dma_attr_t nxge_rx_dma_attr = {
467 	DMA_ATTR_V0,		/* version number. */
468 	0,			/* low address */
469 	0xffffffffffffffff,	/* high address */
470 	0xffffffffffffffff,	/* address counter max */
471 	0x2000,			/* alignment */
472 	0xfc00fc,		/* dlim_burstsizes */
473 	0x1,			/* minimum transfer size */
474 	0xffffffffffffffff,	/* maximum transfer size */
475 	0xffffffffffffffff,	/* maximum segment size */
476 	1,			/* scatter/gather list length */
477 	(unsigned int) 1,	/* granularity */
478 	DDI_DMA_RELAXED_ORDERING /* attribute flags */
479 };
480 
481 ddi_dma_lim_t nxge_dma_limits = {
482 	(uint_t)0,		/* dlim_addr_lo */
483 	(uint_t)0xffffffff,	/* dlim_addr_hi */
484 	(uint_t)0xffffffff,	/* dlim_cntr_max */
485 	(uint_t)0xfc00fc,	/* dlim_burstsizes for 32 and 64 bit xfers */
486 	0x1,			/* dlim_minxfer */
487 	1024			/* dlim_speed */
488 };
489 
490 dma_method_t nxge_force_dma = DVMA;
491 
492 /*
493  * dma chunk sizes.
494  *
495  * Try to allocate the largest possible size
496  * so that fewer number of dma chunks would be managed
497  */
498 #ifdef NIU_PA_WORKAROUND
499 size_t alloc_sizes [] = {0x2000};
500 #else
501 size_t alloc_sizes [] = {0x1000, 0x2000, 0x4000, 0x8000,
502 		0x10000, 0x20000, 0x40000, 0x80000,
503 		0x100000, 0x200000, 0x400000, 0x800000,
504 		0x1000000, 0x2000000, 0x4000000};
505 #endif
506 
507 /*
508  * Translate "dev_t" to a pointer to the associated "dev_info_t".
509  */
510 
511 extern void nxge_get_environs(nxge_t *);
512 
513 static int
514 nxge_attach(dev_info_t *dip, ddi_attach_cmd_t cmd)
515 {
516 	p_nxge_t	nxgep = NULL;
517 	int		instance;
518 	int		status = DDI_SUCCESS;
519 	uint8_t		portn;
520 	nxge_mmac_t	*mmac_info;
521 
522 	NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_attach"));
523 
524 	/*
525 	 * Get the device instance since we'll need to setup
526 	 * or retrieve a soft state for this instance.
527 	 */
528 	instance = ddi_get_instance(dip);
529 
530 	switch (cmd) {
531 	case DDI_ATTACH:
532 		NXGE_DEBUG_MSG((nxgep, DDI_CTL, "doing DDI_ATTACH"));
533 		break;
534 
535 	case DDI_RESUME:
536 		NXGE_DEBUG_MSG((nxgep, DDI_CTL, "doing DDI_RESUME"));
537 		nxgep = (p_nxge_t)ddi_get_soft_state(nxge_list, instance);
538 		if (nxgep == NULL) {
539 			status = DDI_FAILURE;
540 			break;
541 		}
542 		if (nxgep->dip != dip) {
543 			status = DDI_FAILURE;
544 			break;
545 		}
546 		if (nxgep->suspended == DDI_PM_SUSPEND) {
547 			status = ddi_dev_is_needed(nxgep->dip, 0, 1);
548 		} else {
549 			status = nxge_resume(nxgep);
550 		}
551 		goto nxge_attach_exit;
552 
553 	case DDI_PM_RESUME:
554 		NXGE_DEBUG_MSG((nxgep, DDI_CTL, "doing DDI_PM_RESUME"));
555 		nxgep = (p_nxge_t)ddi_get_soft_state(nxge_list, instance);
556 		if (nxgep == NULL) {
557 			status = DDI_FAILURE;
558 			break;
559 		}
560 		if (nxgep->dip != dip) {
561 			status = DDI_FAILURE;
562 			break;
563 		}
564 		status = nxge_resume(nxgep);
565 		goto nxge_attach_exit;
566 
567 	default:
568 		NXGE_DEBUG_MSG((nxgep, DDI_CTL, "doing unknown"));
569 		status = DDI_FAILURE;
570 		goto nxge_attach_exit;
571 	}
572 
573 
574 	if (ddi_soft_state_zalloc(nxge_list, instance) == DDI_FAILURE) {
575 		status = DDI_FAILURE;
576 		goto nxge_attach_exit;
577 	}
578 
579 	nxgep = ddi_get_soft_state(nxge_list, instance);
580 	if (nxgep == NULL) {
581 		status = NXGE_ERROR;
582 		goto nxge_attach_fail2;
583 	}
584 
585 	nxgep->nxge_magic = NXGE_MAGIC;
586 
587 	nxgep->drv_state = 0;
588 	nxgep->dip = dip;
589 	nxgep->instance = instance;
590 	nxgep->p_dip = ddi_get_parent(dip);
591 	nxgep->nxge_debug_level = nxge_debug_level;
592 	npi_debug_level = nxge_debug_level;
593 
594 	/* Are we a guest running in a Hybrid I/O environment? */
595 	nxge_get_environs(nxgep);
596 
597 	status = nxge_map_regs(nxgep);
598 
599 	if (status != NXGE_OK) {
600 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "nxge_map_regs failed"));
601 		goto nxge_attach_fail3;
602 	}
603 
604 	nxge_fm_init(nxgep, &nxge_dev_reg_acc_attr,
605 	    &nxge_dev_desc_dma_acc_attr,
606 	    &nxge_rx_dma_attr);
607 
608 	/* Create & initialize the per-Neptune data structure */
609 	/* (even if we're a guest). */
610 	status = nxge_init_common_dev(nxgep);
611 	if (status != NXGE_OK) {
612 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
613 		    "nxge_init_common_dev failed"));
614 		goto nxge_attach_fail4;
615 	}
616 
617 	/*
618 	 * Software workaround: set the replay timer.
619 	 */
620 	if (nxgep->niu_type != N2_NIU) {
621 		nxge_set_pci_replay_timeout(nxgep);
622 	}
623 
624 #if defined(sun4v)
625 	/* This is required by nxge_hio_init(), which follows. */
626 	if ((status = nxge_hsvc_register(nxgep)) != DDI_SUCCESS)
627 		goto nxge_attach_fail;
628 #endif
629 
630 	if ((status = nxge_hio_init(nxgep)) != NXGE_OK) {
631 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
632 		    "nxge_hio_init failed"));
633 		goto nxge_attach_fail4;
634 	}
635 
636 	if (nxgep->niu_type == NEPTUNE_2_10GF) {
637 		if (nxgep->function_num > 1) {
638 			NXGE_DEBUG_MSG((nxgep, DDI_CTL, "Unsupported"
639 			    " function %d. Only functions 0 and 1 are "
640 			    "supported for this card.", nxgep->function_num));
641 			status = NXGE_ERROR;
642 			goto nxge_attach_fail4;
643 		}
644 	}
645 
646 	if (isLDOMguest(nxgep)) {
647 		/*
648 		 * Use the function number here.
649 		 */
650 		nxgep->mac.portnum = nxgep->function_num;
651 		nxgep->mac.porttype = PORT_TYPE_LOGICAL;
652 
653 		/* XXX We'll set the MAC address counts to 1 for now. */
654 		mmac_info = &nxgep->nxge_mmac_info;
655 		mmac_info->num_mmac = 1;
656 		mmac_info->naddrfree = 1;
657 	} else {
658 		portn = NXGE_GET_PORT_NUM(nxgep->function_num);
659 		nxgep->mac.portnum = portn;
660 		if ((portn == 0) || (portn == 1))
661 			nxgep->mac.porttype = PORT_TYPE_XMAC;
662 		else
663 			nxgep->mac.porttype = PORT_TYPE_BMAC;
664 		/*
665 		 * Neptune has 4 ports, the first 2 ports use XMAC (10G MAC)
666 		 * internally, the rest 2 ports use BMAC (1G "Big" MAC).
667 		 * The two types of MACs have different characterizations.
668 		 */
669 		mmac_info = &nxgep->nxge_mmac_info;
670 		if (nxgep->function_num < 2) {
671 			mmac_info->num_mmac = XMAC_MAX_ALT_ADDR_ENTRY;
672 			mmac_info->naddrfree = XMAC_MAX_ALT_ADDR_ENTRY;
673 		} else {
674 			mmac_info->num_mmac = BMAC_MAX_ALT_ADDR_ENTRY;
675 			mmac_info->naddrfree = BMAC_MAX_ALT_ADDR_ENTRY;
676 		}
677 	}
678 	/*
679 	 * Setup the Ndd parameters for the this instance.
680 	 */
681 	nxge_init_param(nxgep);
682 
683 	/*
684 	 * Setup Register Tracing Buffer.
685 	 */
686 	npi_rtrace_buf_init((rtrace_t *)&npi_rtracebuf);
687 
688 	/* init stats ptr */
689 	nxge_init_statsp(nxgep);
690 
691 	/*
692 	 * Copy the vpd info from eeprom to a local data
693 	 * structure, and then check its validity.
694 	 */
695 	if (!isLDOMguest(nxgep)) {
696 		int *regp;
697 		uint_t reglen;
698 		int rv;
699 
700 		nxge_vpd_info_get(nxgep);
701 
702 		/* Find the NIU config handle. */
703 		rv = ddi_prop_lookup_int_array(DDI_DEV_T_ANY,
704 		    ddi_get_parent(nxgep->dip), DDI_PROP_DONTPASS,
705 		    "reg", &regp, &reglen);
706 
707 		if (rv != DDI_PROP_SUCCESS) {
708 			goto nxge_attach_fail5;
709 		}
710 		/*
711 		 * The address_hi, that is the first int, in the reg
712 		 * property consists of config handle, but need to remove
713 		 * the bits 28-31 which are OBP specific info.
714 		 */
715 		nxgep->niu_cfg_hdl = (*regp) & 0xFFFFFFF;
716 		ddi_prop_free(regp);
717 	}
718 
719 	if (isLDOMguest(nxgep)) {
720 		uchar_t *prop_val;
721 		uint_t prop_len;
722 
723 		extern void nxge_get_logical_props(p_nxge_t);
724 
725 		nxgep->statsp->mac_stats.xcvr_inuse = LOGICAL_XCVR;
726 		nxgep->mac.portmode = PORT_LOGICAL;
727 		(void) ddi_prop_update_string(DDI_DEV_T_NONE, nxgep->dip,
728 		    "phy-type", "virtual transceiver");
729 
730 		nxgep->nports = 1;
731 		nxgep->board_ver = 0;	/* XXX What? */
732 
733 		/*
734 		 * local-mac-address property gives us info on which
735 		 * specific MAC address the Hybrid resource is associated
736 		 * with.
737 		 */
738 		if (ddi_prop_lookup_byte_array(DDI_DEV_T_ANY, nxgep->dip, 0,
739 		    "local-mac-address", &prop_val,
740 		    &prop_len) != DDI_PROP_SUCCESS) {
741 			goto nxge_attach_fail5;
742 		}
743 		if (prop_len !=  ETHERADDRL) {
744 			ddi_prop_free(prop_val);
745 			goto nxge_attach_fail5;
746 		}
747 		ether_copy(prop_val, nxgep->hio_mac_addr);
748 		ddi_prop_free(prop_val);
749 		nxge_get_logical_props(nxgep);
750 
751 	} else {
752 		status = nxge_xcvr_find(nxgep);
753 
754 		if (status != NXGE_OK) {
755 			NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "nxge_attach: "
756 			    " Couldn't determine card type"
757 			    " .... exit "));
758 			goto nxge_attach_fail5;
759 		}
760 
761 		status = nxge_get_config_properties(nxgep);
762 
763 		if (status != NXGE_OK) {
764 			NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
765 			    "get_hw create failed"));
766 			goto nxge_attach_fail;
767 		}
768 	}
769 
770 	/*
771 	 * Setup the Kstats for the driver.
772 	 */
773 	nxge_setup_kstats(nxgep);
774 
775 	if (!isLDOMguest(nxgep))
776 		nxge_setup_param(nxgep);
777 
778 	status = nxge_setup_system_dma_pages(nxgep);
779 	if (status != NXGE_OK) {
780 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "set dma page failed"));
781 		goto nxge_attach_fail;
782 	}
783 
784 	nxge_hw_id_init(nxgep);
785 
786 	if (!isLDOMguest(nxgep))
787 		nxge_hw_init_niu_common(nxgep);
788 
789 	status = nxge_setup_mutexes(nxgep);
790 	if (status != NXGE_OK) {
791 		NXGE_DEBUG_MSG((nxgep, DDI_CTL, "set mutex failed"));
792 		goto nxge_attach_fail;
793 	}
794 
795 #if defined(sun4v)
796 	if (isLDOMguest(nxgep)) {
797 		/* Find our VR & channel sets. */
798 		status = nxge_hio_vr_add(nxgep);
799 		goto nxge_attach_exit;
800 	}
801 #endif
802 
803 	status = nxge_setup_dev(nxgep);
804 	if (status != DDI_SUCCESS) {
805 		NXGE_DEBUG_MSG((nxgep, DDI_CTL, "set dev failed"));
806 		goto nxge_attach_fail;
807 	}
808 
809 	status = nxge_add_intrs(nxgep);
810 	if (status != DDI_SUCCESS) {
811 		NXGE_DEBUG_MSG((nxgep, DDI_CTL, "add_intr failed"));
812 		goto nxge_attach_fail;
813 	}
814 	status = nxge_add_soft_intrs(nxgep);
815 	if (status != DDI_SUCCESS) {
816 		NXGE_DEBUG_MSG((nxgep, NXGE_ERR_CTL,
817 		    "add_soft_intr failed"));
818 		goto nxge_attach_fail;
819 	}
820 
821 	/*
822 	 * Enable interrupts.
823 	 */
824 	nxge_intrs_enable(nxgep);
825 
826 	/* If a guest, register with vio_net instead. */
827 	if ((status = nxge_mac_register(nxgep)) != NXGE_OK) {
828 		NXGE_DEBUG_MSG((nxgep, DDI_CTL,
829 		    "unable to register to mac layer (%d)", status));
830 		goto nxge_attach_fail;
831 	}
832 
833 	mac_link_update(nxgep->mach, LINK_STATE_UNKNOWN);
834 
835 	NXGE_DEBUG_MSG((nxgep, DDI_CTL,
836 	    "registered to mac (instance %d)", instance));
837 
838 	/* nxge_link_monitor calls xcvr.check_link recursively */
839 	(void) nxge_link_monitor(nxgep, LINK_MONITOR_START);
840 
841 	goto nxge_attach_exit;
842 
843 nxge_attach_fail:
844 	nxge_unattach(nxgep);
845 	goto nxge_attach_fail1;
846 
847 nxge_attach_fail5:
848 	/*
849 	 * Tear down the ndd parameters setup.
850 	 */
851 	nxge_destroy_param(nxgep);
852 
853 	/*
854 	 * Tear down the kstat setup.
855 	 */
856 	nxge_destroy_kstats(nxgep);
857 
858 nxge_attach_fail4:
859 	if (nxgep->nxge_hw_p) {
860 		nxge_uninit_common_dev(nxgep);
861 		nxgep->nxge_hw_p = NULL;
862 	}
863 
864 nxge_attach_fail3:
865 	/*
866 	 * Unmap the register setup.
867 	 */
868 	nxge_unmap_regs(nxgep);
869 
870 	nxge_fm_fini(nxgep);
871 
872 nxge_attach_fail2:
873 	ddi_soft_state_free(nxge_list, nxgep->instance);
874 
875 nxge_attach_fail1:
876 	if (status != NXGE_OK)
877 		status = (NXGE_ERROR | NXGE_DDI_FAILED);
878 	nxgep = NULL;
879 
880 nxge_attach_exit:
881 	NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_attach status = 0x%08x",
882 	    status));
883 
884 	return (status);
885 }
886 
887 static int
888 nxge_detach(dev_info_t *dip, ddi_detach_cmd_t cmd)
889 {
890 	int 		status = DDI_SUCCESS;
891 	int 		instance;
892 	p_nxge_t 	nxgep = NULL;
893 
894 	NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_detach"));
895 	instance = ddi_get_instance(dip);
896 	nxgep = ddi_get_soft_state(nxge_list, instance);
897 	if (nxgep == NULL) {
898 		status = DDI_FAILURE;
899 		goto nxge_detach_exit;
900 	}
901 
902 	switch (cmd) {
903 	case DDI_DETACH:
904 		NXGE_DEBUG_MSG((nxgep, DDI_CTL, "doing DDI_DETACH"));
905 		break;
906 
907 	case DDI_PM_SUSPEND:
908 		NXGE_DEBUG_MSG((nxgep, DDI_CTL, "doing DDI_PM_SUSPEND"));
909 		nxgep->suspended = DDI_PM_SUSPEND;
910 		nxge_suspend(nxgep);
911 		break;
912 
913 	case DDI_SUSPEND:
914 		NXGE_DEBUG_MSG((nxgep, DDI_CTL, "doing DDI_SUSPEND"));
915 		if (nxgep->suspended != DDI_PM_SUSPEND) {
916 			nxgep->suspended = DDI_SUSPEND;
917 			nxge_suspend(nxgep);
918 		}
919 		break;
920 
921 	default:
922 		status = DDI_FAILURE;
923 	}
924 
925 	if (cmd != DDI_DETACH)
926 		goto nxge_detach_exit;
927 
928 	/*
929 	 * Stop the xcvr polling.
930 	 */
931 	nxgep->suspended = cmd;
932 
933 	(void) nxge_link_monitor(nxgep, LINK_MONITOR_STOP);
934 
935 	if (isLDOMguest(nxgep)) {
936 		nxge_hio_unregister(nxgep);
937 	} else if (nxgep->mach && (status = mac_unregister(nxgep->mach)) != 0) {
938 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
939 		    "<== nxge_detach status = 0x%08X", status));
940 		return (DDI_FAILURE);
941 	}
942 
943 	NXGE_DEBUG_MSG((nxgep, DDI_CTL,
944 	    "<== nxge_detach (mac_unregister) status = 0x%08X", status));
945 
946 	nxge_unattach(nxgep);
947 	nxgep = NULL;
948 
949 nxge_detach_exit:
950 	NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_detach status = 0x%08X",
951 	    status));
952 
953 	return (status);
954 }
955 
956 static void
957 nxge_unattach(p_nxge_t nxgep)
958 {
959 	NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_unattach"));
960 
961 	if (nxgep == NULL || nxgep->dev_regs == NULL) {
962 		return;
963 	}
964 
965 	nxgep->nxge_magic = 0;
966 
967 	if (nxgep->nxge_timerid) {
968 		nxge_stop_timer(nxgep, nxgep->nxge_timerid);
969 		nxgep->nxge_timerid = 0;
970 	}
971 
972 	/*
973 	 * If this flag is set, it will affect the Neptune
974 	 * only.
975 	 */
976 	if ((nxgep->niu_type != N2_NIU) && nxge_peu_reset_enable) {
977 		nxge_niu_peu_reset(nxgep);
978 	}
979 
980 #if	defined(sun4v)
981 	if (isLDOMguest(nxgep)) {
982 		(void) nxge_hio_vr_release(nxgep);
983 	}
984 #endif
985 
986 	if (nxgep->nxge_hw_p) {
987 		nxge_uninit_common_dev(nxgep);
988 		nxgep->nxge_hw_p = NULL;
989 	}
990 
991 #if	defined(sun4v)
992 	if (nxgep->niu_type == N2_NIU && nxgep->niu_hsvc_available == B_TRUE) {
993 		(void) hsvc_unregister(&nxgep->niu_hsvc);
994 		nxgep->niu_hsvc_available = B_FALSE;
995 	}
996 #endif
997 	/*
998 	 * Stop any further interrupts.
999 	 */
1000 	nxge_remove_intrs(nxgep);
1001 
1002 	/* remove soft interrups */
1003 	nxge_remove_soft_intrs(nxgep);
1004 
1005 	/*
1006 	 * Stop the device and free resources.
1007 	 */
1008 	if (!isLDOMguest(nxgep)) {
1009 		nxge_destroy_dev(nxgep);
1010 	}
1011 
1012 	/*
1013 	 * Tear down the ndd parameters setup.
1014 	 */
1015 	nxge_destroy_param(nxgep);
1016 
1017 	/*
1018 	 * Tear down the kstat setup.
1019 	 */
1020 	nxge_destroy_kstats(nxgep);
1021 
1022 	/*
1023 	 * Destroy all mutexes.
1024 	 */
1025 	nxge_destroy_mutexes(nxgep);
1026 
1027 	/*
1028 	 * Remove the list of ndd parameters which
1029 	 * were setup during attach.
1030 	 */
1031 	if (nxgep->dip) {
1032 		NXGE_DEBUG_MSG((nxgep, OBP_CTL,
1033 		    " nxge_unattach: remove all properties"));
1034 
1035 		(void) ddi_prop_remove_all(nxgep->dip);
1036 	}
1037 
1038 #if NXGE_PROPERTY
1039 	nxge_remove_hard_properties(nxgep);
1040 #endif
1041 
1042 	/*
1043 	 * Unmap the register setup.
1044 	 */
1045 	nxge_unmap_regs(nxgep);
1046 
1047 	nxge_fm_fini(nxgep);
1048 
1049 	ddi_soft_state_free(nxge_list, nxgep->instance);
1050 
1051 	NXGE_DEBUG_MSG((NULL, DDI_CTL, "<== nxge_unattach"));
1052 }
1053 
1054 #if defined(sun4v)
1055 int
1056 nxge_hsvc_register(
1057 	nxge_t *nxgep)
1058 {
1059 	nxge_status_t status;
1060 
1061 	if (nxgep->niu_type == N2_NIU) {
1062 		nxgep->niu_hsvc_available = B_FALSE;
1063 		bcopy(&niu_hsvc, &nxgep->niu_hsvc, sizeof (hsvc_info_t));
1064 		if ((status = hsvc_register(&nxgep->niu_hsvc,
1065 		    &nxgep->niu_min_ver)) != 0) {
1066 			NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
1067 			    "nxge_attach: %s: cannot negotiate "
1068 			    "hypervisor services revision %d group: 0x%lx "
1069 			    "major: 0x%lx minor: 0x%lx errno: %d",
1070 			    niu_hsvc.hsvc_modname, niu_hsvc.hsvc_rev,
1071 			    niu_hsvc.hsvc_group, niu_hsvc.hsvc_major,
1072 			    niu_hsvc.hsvc_minor, status));
1073 			return (DDI_FAILURE);
1074 		}
1075 		nxgep->niu_hsvc_available = B_TRUE;
1076 		NXGE_DEBUG_MSG((nxgep, DDI_CTL,
1077 		    "NIU Hypervisor service enabled"));
1078 	}
1079 
1080 	return (DDI_SUCCESS);
1081 }
1082 #endif
1083 
1084 static char n2_siu_name[] = "niu";
1085 
1086 static nxge_status_t
1087 nxge_map_regs(p_nxge_t nxgep)
1088 {
1089 	int		ddi_status = DDI_SUCCESS;
1090 	p_dev_regs_t 	dev_regs;
1091 	char		buf[MAXPATHLEN + 1];
1092 	char 		*devname;
1093 #ifdef	NXGE_DEBUG
1094 	char 		*sysname;
1095 #endif
1096 	off_t		regsize;
1097 	nxge_status_t	status = NXGE_OK;
1098 #if !defined(_BIG_ENDIAN)
1099 	off_t pci_offset;
1100 	uint16_t pcie_devctl;
1101 #endif
1102 
1103 	if (isLDOMguest(nxgep)) {
1104 		return (nxge_guest_regs_map(nxgep));
1105 	}
1106 
1107 	NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_map_regs"));
1108 	nxgep->dev_regs = NULL;
1109 	dev_regs = KMEM_ZALLOC(sizeof (dev_regs_t), KM_SLEEP);
1110 	dev_regs->nxge_regh = NULL;
1111 	dev_regs->nxge_pciregh = NULL;
1112 	dev_regs->nxge_msix_regh = NULL;
1113 	dev_regs->nxge_vir_regh = NULL;
1114 	dev_regs->nxge_vir2_regh = NULL;
1115 	nxgep->niu_type = NIU_TYPE_NONE;
1116 
1117 	devname = ddi_pathname(nxgep->dip, buf);
1118 	ASSERT(strlen(devname) > 0);
1119 	NXGE_DEBUG_MSG((nxgep, DDI_CTL,
1120 	    "nxge_map_regs: pathname devname %s", devname));
1121 
1122 	/*
1123 	 * The driver is running on a N2-NIU system if devname is something
1124 	 * like "/niu@80/network@0"
1125 	 */
1126 	if (strstr(devname, n2_siu_name)) {
1127 		/* N2/NIU */
1128 		nxgep->niu_type = N2_NIU;
1129 		NXGE_DEBUG_MSG((nxgep, DDI_CTL,
1130 		    "nxge_map_regs: N2/NIU devname %s", devname));
1131 		/* get function number */
1132 		nxgep->function_num =
1133 		    (devname[strlen(devname) -1] == '1' ? 1 : 0);
1134 		NXGE_DEBUG_MSG((nxgep, DDI_CTL,
1135 		    "nxge_map_regs: N2/NIU function number %d",
1136 		    nxgep->function_num));
1137 	} else {
1138 		int		*prop_val;
1139 		uint_t 		prop_len;
1140 		uint8_t 	func_num;
1141 
1142 		if (ddi_prop_lookup_int_array(DDI_DEV_T_ANY, nxgep->dip,
1143 		    0, "reg",
1144 		    &prop_val, &prop_len) != DDI_PROP_SUCCESS) {
1145 			NXGE_DEBUG_MSG((nxgep, VPD_CTL,
1146 			    "Reg property not found"));
1147 			ddi_status = DDI_FAILURE;
1148 			goto nxge_map_regs_fail0;
1149 
1150 		} else {
1151 			func_num = (prop_val[0] >> 8) & 0x7;
1152 			NXGE_DEBUG_MSG((nxgep, DDI_CTL,
1153 			    "Reg property found: fun # %d",
1154 			    func_num));
1155 			nxgep->function_num = func_num;
1156 			if (isLDOMguest(nxgep)) {
1157 				nxgep->function_num /= 2;
1158 				return (NXGE_OK);
1159 			}
1160 			ddi_prop_free(prop_val);
1161 		}
1162 	}
1163 
1164 	switch (nxgep->niu_type) {
1165 	default:
1166 		(void) ddi_dev_regsize(nxgep->dip, 0, &regsize);
1167 		NXGE_DEBUG_MSG((nxgep, DDI_CTL,
1168 		    "nxge_map_regs: pci config size 0x%x", regsize));
1169 
1170 		ddi_status = ddi_regs_map_setup(nxgep->dip, 0,
1171 		    (caddr_t *)&(dev_regs->nxge_pciregp), 0, 0,
1172 		    &nxge_dev_reg_acc_attr, &dev_regs->nxge_pciregh);
1173 		if (ddi_status != DDI_SUCCESS) {
1174 			NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
1175 			    "ddi_map_regs, nxge bus config regs failed"));
1176 			goto nxge_map_regs_fail0;
1177 		}
1178 		NXGE_DEBUG_MSG((nxgep, DDI_CTL,
1179 		    "nxge_map_reg: PCI config addr 0x%0llx "
1180 		    " handle 0x%0llx", dev_regs->nxge_pciregp,
1181 		    dev_regs->nxge_pciregh));
1182 			/*
1183 			 * IMP IMP
1184 			 * workaround  for bit swapping bug in HW
1185 			 * which ends up in no-snoop = yes
1186 			 * resulting, in DMA not synched properly
1187 			 */
1188 #if !defined(_BIG_ENDIAN)
1189 		/* workarounds for x86 systems */
1190 		pci_offset = 0x80 + PCIE_DEVCTL;
1191 		pcie_devctl = 0x0;
1192 		pcie_devctl &= PCIE_DEVCTL_ENABLE_NO_SNOOP;
1193 		pcie_devctl |= PCIE_DEVCTL_RO_EN;
1194 		pci_config_put16(dev_regs->nxge_pciregh, pci_offset,
1195 		    pcie_devctl);
1196 #endif
1197 
1198 		(void) ddi_dev_regsize(nxgep->dip, 1, &regsize);
1199 		NXGE_DEBUG_MSG((nxgep, DDI_CTL,
1200 		    "nxge_map_regs: pio size 0x%x", regsize));
1201 		/* set up the device mapped register */
1202 		ddi_status = ddi_regs_map_setup(nxgep->dip, 1,
1203 		    (caddr_t *)&(dev_regs->nxge_regp), 0, 0,
1204 		    &nxge_dev_reg_acc_attr, &dev_regs->nxge_regh);
1205 		if (ddi_status != DDI_SUCCESS) {
1206 			NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
1207 			    "ddi_map_regs for Neptune global reg failed"));
1208 			goto nxge_map_regs_fail1;
1209 		}
1210 
1211 		/* set up the msi/msi-x mapped register */
1212 		(void) ddi_dev_regsize(nxgep->dip, 2, &regsize);
1213 		NXGE_DEBUG_MSG((nxgep, DDI_CTL,
1214 		    "nxge_map_regs: msix size 0x%x", regsize));
1215 		ddi_status = ddi_regs_map_setup(nxgep->dip, 2,
1216 		    (caddr_t *)&(dev_regs->nxge_msix_regp), 0, 0,
1217 		    &nxge_dev_reg_acc_attr, &dev_regs->nxge_msix_regh);
1218 		if (ddi_status != DDI_SUCCESS) {
1219 			NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
1220 			    "ddi_map_regs for msi reg failed"));
1221 			goto nxge_map_regs_fail2;
1222 		}
1223 
1224 		/* set up the vio region mapped register */
1225 		(void) ddi_dev_regsize(nxgep->dip, 3, &regsize);
1226 		NXGE_DEBUG_MSG((nxgep, DDI_CTL,
1227 		    "nxge_map_regs: vio size 0x%x", regsize));
1228 		ddi_status = ddi_regs_map_setup(nxgep->dip, 3,
1229 		    (caddr_t *)&(dev_regs->nxge_vir_regp), 0, 0,
1230 		    &nxge_dev_reg_acc_attr, &dev_regs->nxge_vir_regh);
1231 
1232 		if (ddi_status != DDI_SUCCESS) {
1233 			NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
1234 			    "ddi_map_regs for nxge vio reg failed"));
1235 			goto nxge_map_regs_fail3;
1236 		}
1237 		nxgep->dev_regs = dev_regs;
1238 
1239 		NPI_PCI_ACC_HANDLE_SET(nxgep, dev_regs->nxge_pciregh);
1240 		NPI_PCI_ADD_HANDLE_SET(nxgep,
1241 		    (npi_reg_ptr_t)dev_regs->nxge_pciregp);
1242 		NPI_MSI_ACC_HANDLE_SET(nxgep, dev_regs->nxge_msix_regh);
1243 		NPI_MSI_ADD_HANDLE_SET(nxgep,
1244 		    (npi_reg_ptr_t)dev_regs->nxge_msix_regp);
1245 
1246 		NPI_ACC_HANDLE_SET(nxgep, dev_regs->nxge_regh);
1247 		NPI_ADD_HANDLE_SET(nxgep, (npi_reg_ptr_t)dev_regs->nxge_regp);
1248 
1249 		NPI_REG_ACC_HANDLE_SET(nxgep, dev_regs->nxge_regh);
1250 		NPI_REG_ADD_HANDLE_SET(nxgep,
1251 		    (npi_reg_ptr_t)dev_regs->nxge_regp);
1252 
1253 		NPI_VREG_ACC_HANDLE_SET(nxgep, dev_regs->nxge_vir_regh);
1254 		NPI_VREG_ADD_HANDLE_SET(nxgep,
1255 		    (npi_reg_ptr_t)dev_regs->nxge_vir_regp);
1256 
1257 		break;
1258 
1259 	case N2_NIU:
1260 		NXGE_DEBUG_MSG((nxgep, DDI_CTL, "ddi_map_regs, NIU"));
1261 		/*
1262 		 * Set up the device mapped register (FWARC 2006/556)
1263 		 * (changed back to 1: reg starts at 1!)
1264 		 */
1265 		(void) ddi_dev_regsize(nxgep->dip, 1, &regsize);
1266 		NXGE_DEBUG_MSG((nxgep, DDI_CTL,
1267 		    "nxge_map_regs: dev size 0x%x", regsize));
1268 		ddi_status = ddi_regs_map_setup(nxgep->dip, 1,
1269 		    (caddr_t *)&(dev_regs->nxge_regp), 0, 0,
1270 		    &nxge_dev_reg_acc_attr, &dev_regs->nxge_regh);
1271 
1272 		if (ddi_status != DDI_SUCCESS) {
1273 			NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
1274 			    "ddi_map_regs for N2/NIU, global reg failed "));
1275 			goto nxge_map_regs_fail1;
1276 		}
1277 
1278 		/* set up the first vio region mapped register */
1279 		(void) ddi_dev_regsize(nxgep->dip, 2, &regsize);
1280 		NXGE_DEBUG_MSG((nxgep, DDI_CTL,
1281 		    "nxge_map_regs: vio (1) size 0x%x", regsize));
1282 		ddi_status = ddi_regs_map_setup(nxgep->dip, 2,
1283 		    (caddr_t *)&(dev_regs->nxge_vir_regp), 0, 0,
1284 		    &nxge_dev_reg_acc_attr, &dev_regs->nxge_vir_regh);
1285 
1286 		if (ddi_status != DDI_SUCCESS) {
1287 			NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
1288 			    "ddi_map_regs for nxge vio reg failed"));
1289 			goto nxge_map_regs_fail2;
1290 		}
1291 		/* set up the second vio region mapped register */
1292 		(void) ddi_dev_regsize(nxgep->dip, 3, &regsize);
1293 		NXGE_DEBUG_MSG((nxgep, DDI_CTL,
1294 		    "nxge_map_regs: vio (3) size 0x%x", regsize));
1295 		ddi_status = ddi_regs_map_setup(nxgep->dip, 3,
1296 		    (caddr_t *)&(dev_regs->nxge_vir2_regp), 0, 0,
1297 		    &nxge_dev_reg_acc_attr, &dev_regs->nxge_vir2_regh);
1298 
1299 		if (ddi_status != DDI_SUCCESS) {
1300 			NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
1301 			    "ddi_map_regs for nxge vio2 reg failed"));
1302 			goto nxge_map_regs_fail3;
1303 		}
1304 		nxgep->dev_regs = dev_regs;
1305 
1306 		NPI_ACC_HANDLE_SET(nxgep, dev_regs->nxge_regh);
1307 		NPI_ADD_HANDLE_SET(nxgep, (npi_reg_ptr_t)dev_regs->nxge_regp);
1308 
1309 		NPI_REG_ACC_HANDLE_SET(nxgep, dev_regs->nxge_regh);
1310 		NPI_REG_ADD_HANDLE_SET(nxgep,
1311 		    (npi_reg_ptr_t)dev_regs->nxge_regp);
1312 
1313 		NPI_VREG_ACC_HANDLE_SET(nxgep, dev_regs->nxge_vir_regh);
1314 		NPI_VREG_ADD_HANDLE_SET(nxgep,
1315 		    (npi_reg_ptr_t)dev_regs->nxge_vir_regp);
1316 
1317 		NPI_V2REG_ACC_HANDLE_SET(nxgep, dev_regs->nxge_vir2_regh);
1318 		NPI_V2REG_ADD_HANDLE_SET(nxgep,
1319 		    (npi_reg_ptr_t)dev_regs->nxge_vir2_regp);
1320 
1321 		break;
1322 	}
1323 
1324 	NXGE_DEBUG_MSG((nxgep, DDI_CTL, "nxge_map_reg: hardware addr 0x%0llx "
1325 	    " handle 0x%0llx", dev_regs->nxge_regp, dev_regs->nxge_regh));
1326 
1327 	goto nxge_map_regs_exit;
1328 nxge_map_regs_fail3:
1329 	if (dev_regs->nxge_msix_regh) {
1330 		ddi_regs_map_free(&dev_regs->nxge_msix_regh);
1331 	}
1332 	if (dev_regs->nxge_vir_regh) {
1333 		ddi_regs_map_free(&dev_regs->nxge_regh);
1334 	}
1335 nxge_map_regs_fail2:
1336 	if (dev_regs->nxge_regh) {
1337 		ddi_regs_map_free(&dev_regs->nxge_regh);
1338 	}
1339 nxge_map_regs_fail1:
1340 	if (dev_regs->nxge_pciregh) {
1341 		ddi_regs_map_free(&dev_regs->nxge_pciregh);
1342 	}
1343 nxge_map_regs_fail0:
1344 	NXGE_DEBUG_MSG((nxgep, DDI_CTL, "Freeing register set memory"));
1345 	kmem_free(dev_regs, sizeof (dev_regs_t));
1346 
1347 nxge_map_regs_exit:
1348 	if (ddi_status != DDI_SUCCESS)
1349 		status |= (NXGE_ERROR | NXGE_DDI_FAILED);
1350 	NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_map_regs"));
1351 	return (status);
1352 }
1353 
1354 static void
1355 nxge_unmap_regs(p_nxge_t nxgep)
1356 {
1357 	NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_unmap_regs"));
1358 
1359 	if (isLDOMguest(nxgep)) {
1360 		nxge_guest_regs_map_free(nxgep);
1361 		return;
1362 	}
1363 
1364 	if (nxgep->dev_regs) {
1365 		if (nxgep->dev_regs->nxge_pciregh) {
1366 			NXGE_DEBUG_MSG((nxgep, DDI_CTL,
1367 			    "==> nxge_unmap_regs: bus"));
1368 			ddi_regs_map_free(&nxgep->dev_regs->nxge_pciregh);
1369 			nxgep->dev_regs->nxge_pciregh = NULL;
1370 		}
1371 		if (nxgep->dev_regs->nxge_regh) {
1372 			NXGE_DEBUG_MSG((nxgep, DDI_CTL,
1373 			    "==> nxge_unmap_regs: device registers"));
1374 			ddi_regs_map_free(&nxgep->dev_regs->nxge_regh);
1375 			nxgep->dev_regs->nxge_regh = NULL;
1376 		}
1377 		if (nxgep->dev_regs->nxge_msix_regh) {
1378 			NXGE_DEBUG_MSG((nxgep, DDI_CTL,
1379 			    "==> nxge_unmap_regs: device interrupts"));
1380 			ddi_regs_map_free(&nxgep->dev_regs->nxge_msix_regh);
1381 			nxgep->dev_regs->nxge_msix_regh = NULL;
1382 		}
1383 		if (nxgep->dev_regs->nxge_vir_regh) {
1384 			NXGE_DEBUG_MSG((nxgep, DDI_CTL,
1385 			    "==> nxge_unmap_regs: vio region"));
1386 			ddi_regs_map_free(&nxgep->dev_regs->nxge_vir_regh);
1387 			nxgep->dev_regs->nxge_vir_regh = NULL;
1388 		}
1389 		if (nxgep->dev_regs->nxge_vir2_regh) {
1390 			NXGE_DEBUG_MSG((nxgep, DDI_CTL,
1391 			    "==> nxge_unmap_regs: vio2 region"));
1392 			ddi_regs_map_free(&nxgep->dev_regs->nxge_vir2_regh);
1393 			nxgep->dev_regs->nxge_vir2_regh = NULL;
1394 		}
1395 
1396 		kmem_free(nxgep->dev_regs, sizeof (dev_regs_t));
1397 		nxgep->dev_regs = NULL;
1398 	}
1399 
1400 	NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_unmap_regs"));
1401 }
1402 
1403 static nxge_status_t
1404 nxge_setup_mutexes(p_nxge_t nxgep)
1405 {
1406 	int ddi_status = DDI_SUCCESS;
1407 	nxge_status_t status = NXGE_OK;
1408 	nxge_classify_t *classify_ptr;
1409 	int partition;
1410 
1411 	NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_setup_mutexes"));
1412 
1413 	/*
1414 	 * Get the interrupt cookie so the mutexes can be
1415 	 * Initialized.
1416 	 */
1417 	if (isLDOMguest(nxgep)) {
1418 		nxgep->interrupt_cookie = 0;
1419 	} else {
1420 		ddi_status = ddi_get_iblock_cookie(nxgep->dip, 0,
1421 		    &nxgep->interrupt_cookie);
1422 
1423 		if (ddi_status != DDI_SUCCESS) {
1424 			NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
1425 			    "<== nxge_setup_mutexes: failed 0x%x",
1426 			    ddi_status));
1427 			goto nxge_setup_mutexes_exit;
1428 		}
1429 	}
1430 
1431 	cv_init(&nxgep->poll_cv, NULL, CV_DRIVER, NULL);
1432 	MUTEX_INIT(&nxgep->poll_lock, NULL,
1433 	    MUTEX_DRIVER, (void *)nxgep->interrupt_cookie);
1434 
1435 	/*
1436 	 * Initialize mutexes for this device.
1437 	 */
1438 	MUTEX_INIT(nxgep->genlock, NULL,
1439 	    MUTEX_DRIVER, (void *)nxgep->interrupt_cookie);
1440 	MUTEX_INIT(&nxgep->ouraddr_lock, NULL,
1441 	    MUTEX_DRIVER, (void *)nxgep->interrupt_cookie);
1442 	MUTEX_INIT(&nxgep->mif_lock, NULL,
1443 	    MUTEX_DRIVER, (void *)nxgep->interrupt_cookie);
1444 	MUTEX_INIT(&nxgep->group_lock, NULL,
1445 	    MUTEX_DRIVER, (void *)nxgep->interrupt_cookie);
1446 	RW_INIT(&nxgep->filter_lock, NULL,
1447 	    RW_DRIVER, (void *)nxgep->interrupt_cookie);
1448 
1449 	classify_ptr = &nxgep->classifier;
1450 		/*
1451 		 * FFLP Mutexes are never used in interrupt context
1452 		 * as fflp operation can take very long time to
1453 		 * complete and hence not suitable to invoke from interrupt
1454 		 * handlers.
1455 		 */
1456 	MUTEX_INIT(&classify_ptr->tcam_lock, NULL,
1457 	    NXGE_MUTEX_DRIVER, (void *)nxgep->interrupt_cookie);
1458 	if (NXGE_IS_VALID_NEPTUNE_TYPE(nxgep)) {
1459 		MUTEX_INIT(&classify_ptr->fcram_lock, NULL,
1460 		    NXGE_MUTEX_DRIVER, (void *)nxgep->interrupt_cookie);
1461 		for (partition = 0; partition < MAX_PARTITION; partition++) {
1462 			MUTEX_INIT(&classify_ptr->hash_lock[partition], NULL,
1463 			    NXGE_MUTEX_DRIVER, (void *)nxgep->interrupt_cookie);
1464 		}
1465 	}
1466 
1467 nxge_setup_mutexes_exit:
1468 	NXGE_DEBUG_MSG((nxgep, DDI_CTL,
1469 	    "<== nxge_setup_mutexes status = %x", status));
1470 
1471 	if (ddi_status != DDI_SUCCESS)
1472 		status |= (NXGE_ERROR | NXGE_DDI_FAILED);
1473 
1474 	return (status);
1475 }
1476 
1477 static void
1478 nxge_destroy_mutexes(p_nxge_t nxgep)
1479 {
1480 	int partition;
1481 	nxge_classify_t *classify_ptr;
1482 
1483 	NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_destroy_mutexes"));
1484 	RW_DESTROY(&nxgep->filter_lock);
1485 	MUTEX_DESTROY(&nxgep->group_lock);
1486 	MUTEX_DESTROY(&nxgep->mif_lock);
1487 	MUTEX_DESTROY(&nxgep->ouraddr_lock);
1488 	MUTEX_DESTROY(nxgep->genlock);
1489 
1490 	classify_ptr = &nxgep->classifier;
1491 	MUTEX_DESTROY(&classify_ptr->tcam_lock);
1492 
1493 	/* Destroy all polling resources. */
1494 	MUTEX_DESTROY(&nxgep->poll_lock);
1495 	cv_destroy(&nxgep->poll_cv);
1496 
1497 	/* free data structures, based on HW type */
1498 	if (NXGE_IS_VALID_NEPTUNE_TYPE(nxgep)) {
1499 		MUTEX_DESTROY(&classify_ptr->fcram_lock);
1500 		for (partition = 0; partition < MAX_PARTITION; partition++) {
1501 			MUTEX_DESTROY(&classify_ptr->hash_lock[partition]);
1502 		}
1503 	}
1504 
1505 	NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_destroy_mutexes"));
1506 }
1507 
1508 nxge_status_t
1509 nxge_init(p_nxge_t nxgep)
1510 {
1511 	nxge_status_t status = NXGE_OK;
1512 
1513 	NXGE_DEBUG_MSG((nxgep, STR_CTL, "==> nxge_init"));
1514 
1515 	if (nxgep->drv_state & STATE_HW_INITIALIZED) {
1516 		return (status);
1517 	}
1518 
1519 	/*
1520 	 * Allocate system memory for the receive/transmit buffer blocks
1521 	 * and receive/transmit descriptor rings.
1522 	 */
1523 	status = nxge_alloc_mem_pool(nxgep);
1524 	if (status != NXGE_OK) {
1525 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "alloc mem failed\n"));
1526 		goto nxge_init_fail1;
1527 	}
1528 
1529 	if (!isLDOMguest(nxgep)) {
1530 		/*
1531 		 * Initialize and enable the TXC registers.
1532 		 * (Globally enable the Tx controller,
1533 		 *  enable the port, configure the dma channel bitmap,
1534 		 *  configure the max burst size).
1535 		 */
1536 		status = nxge_txc_init(nxgep);
1537 		if (status != NXGE_OK) {
1538 			NXGE_ERROR_MSG((nxgep,
1539 			    NXGE_ERR_CTL, "init txc failed\n"));
1540 			goto nxge_init_fail2;
1541 		}
1542 	}
1543 
1544 	/*
1545 	 * Initialize and enable TXDMA channels.
1546 	 */
1547 	status = nxge_init_txdma_channels(nxgep);
1548 	if (status != NXGE_OK) {
1549 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "init txdma failed\n"));
1550 		goto nxge_init_fail3;
1551 	}
1552 
1553 	/*
1554 	 * Initialize and enable RXDMA channels.
1555 	 */
1556 	status = nxge_init_rxdma_channels(nxgep);
1557 	if (status != NXGE_OK) {
1558 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "init rxdma failed\n"));
1559 		goto nxge_init_fail4;
1560 	}
1561 
1562 	/*
1563 	 * The guest domain is now done.
1564 	 */
1565 	if (isLDOMguest(nxgep)) {
1566 		nxgep->drv_state |= STATE_HW_INITIALIZED;
1567 		goto nxge_init_exit;
1568 	}
1569 
1570 	/*
1571 	 * Initialize TCAM and FCRAM (Neptune).
1572 	 */
1573 	status = nxge_classify_init(nxgep);
1574 	if (status != NXGE_OK) {
1575 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "init classify failed\n"));
1576 		goto nxge_init_fail5;
1577 	}
1578 
1579 	/*
1580 	 * Initialize ZCP
1581 	 */
1582 	status = nxge_zcp_init(nxgep);
1583 	if (status != NXGE_OK) {
1584 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "init ZCP failed\n"));
1585 		goto nxge_init_fail5;
1586 	}
1587 
1588 	/*
1589 	 * Initialize IPP.
1590 	 */
1591 	status = nxge_ipp_init(nxgep);
1592 	if (status != NXGE_OK) {
1593 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "init IPP failed\n"));
1594 		goto nxge_init_fail5;
1595 	}
1596 
1597 	/*
1598 	 * Initialize the MAC block.
1599 	 */
1600 	status = nxge_mac_init(nxgep);
1601 	if (status != NXGE_OK) {
1602 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "init MAC failed\n"));
1603 		goto nxge_init_fail5;
1604 	}
1605 
1606 	nxge_intrs_enable(nxgep); /* XXX What changes do I need to make here? */
1607 
1608 	/*
1609 	 * Enable hardware interrupts.
1610 	 */
1611 	nxge_intr_hw_enable(nxgep);
1612 	nxgep->drv_state |= STATE_HW_INITIALIZED;
1613 
1614 	goto nxge_init_exit;
1615 
1616 nxge_init_fail5:
1617 	nxge_uninit_rxdma_channels(nxgep);
1618 nxge_init_fail4:
1619 	nxge_uninit_txdma_channels(nxgep);
1620 nxge_init_fail3:
1621 	if (!isLDOMguest(nxgep)) {
1622 		(void) nxge_txc_uninit(nxgep);
1623 	}
1624 nxge_init_fail2:
1625 	nxge_free_mem_pool(nxgep);
1626 nxge_init_fail1:
1627 	NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
1628 	    "<== nxge_init status (failed) = 0x%08x", status));
1629 	return (status);
1630 
1631 nxge_init_exit:
1632 	NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_init status = 0x%08x",
1633 	    status));
1634 	return (status);
1635 }
1636 
1637 
1638 timeout_id_t
1639 nxge_start_timer(p_nxge_t nxgep, fptrv_t func, int msec)
1640 {
1641 	if ((nxgep->suspended == 0) || (nxgep->suspended == DDI_RESUME)) {
1642 		return (timeout(func, (caddr_t)nxgep,
1643 		    drv_usectohz(1000 * msec)));
1644 	}
1645 	return (NULL);
1646 }
1647 
1648 /*ARGSUSED*/
1649 void
1650 nxge_stop_timer(p_nxge_t nxgep, timeout_id_t timerid)
1651 {
1652 	if (timerid) {
1653 		(void) untimeout(timerid);
1654 	}
1655 }
1656 
1657 void
1658 nxge_uninit(p_nxge_t nxgep)
1659 {
1660 	NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_uninit"));
1661 
1662 	if (!(nxgep->drv_state & STATE_HW_INITIALIZED)) {
1663 		NXGE_DEBUG_MSG((nxgep, DDI_CTL,
1664 		    "==> nxge_uninit: not initialized"));
1665 		NXGE_DEBUG_MSG((nxgep, DDI_CTL,
1666 		    "<== nxge_uninit"));
1667 		return;
1668 	}
1669 
1670 	/* stop timer */
1671 	if (nxgep->nxge_timerid) {
1672 		nxge_stop_timer(nxgep, nxgep->nxge_timerid);
1673 		nxgep->nxge_timerid = 0;
1674 	}
1675 
1676 	(void) nxge_link_monitor(nxgep, LINK_MONITOR_STOP);
1677 	(void) nxge_intr_hw_disable(nxgep);
1678 
1679 	/*
1680 	 * Reset the receive MAC side.
1681 	 */
1682 	(void) nxge_rx_mac_disable(nxgep);
1683 
1684 	/* Disable and soft reset the IPP */
1685 	if (!isLDOMguest(nxgep))
1686 		(void) nxge_ipp_disable(nxgep);
1687 
1688 	/* Free classification resources */
1689 	(void) nxge_classify_uninit(nxgep);
1690 
1691 	/*
1692 	 * Reset the transmit/receive DMA side.
1693 	 */
1694 	(void) nxge_txdma_hw_mode(nxgep, NXGE_DMA_STOP);
1695 	(void) nxge_rxdma_hw_mode(nxgep, NXGE_DMA_STOP);
1696 
1697 	nxge_uninit_txdma_channels(nxgep);
1698 	nxge_uninit_rxdma_channels(nxgep);
1699 
1700 	/*
1701 	 * Reset the transmit MAC side.
1702 	 */
1703 	(void) nxge_tx_mac_disable(nxgep);
1704 
1705 	nxge_free_mem_pool(nxgep);
1706 
1707 	/*
1708 	 * Start the timer if the reset flag is not set.
1709 	 * If this reset flag is set, the link monitor
1710 	 * will not be started in order to stop furthur bus
1711 	 * activities coming from this interface.
1712 	 * The driver will start the monitor function
1713 	 * if the interface was initialized again later.
1714 	 */
1715 	if (!nxge_peu_reset_enable) {
1716 		(void) nxge_link_monitor(nxgep, LINK_MONITOR_START);
1717 	}
1718 
1719 	nxgep->drv_state &= ~STATE_HW_INITIALIZED;
1720 
1721 	NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_uninit: "
1722 	    "nxge_mblks_pending %d", nxge_mblks_pending));
1723 }
1724 
1725 void
1726 nxge_get64(p_nxge_t nxgep, p_mblk_t mp)
1727 {
1728 #if defined(__i386)
1729 	size_t		reg;
1730 #else
1731 	uint64_t	reg;
1732 #endif
1733 	uint64_t	regdata;
1734 	int		i, retry;
1735 
1736 	bcopy((char *)mp->b_rptr, (char *)&reg, sizeof (uint64_t));
1737 	regdata = 0;
1738 	retry = 1;
1739 
1740 	for (i = 0; i < retry; i++) {
1741 		NXGE_REG_RD64(nxgep->npi_handle, reg, &regdata);
1742 	}
1743 	bcopy((char *)&regdata, (char *)mp->b_rptr, sizeof (uint64_t));
1744 }
1745 
1746 void
1747 nxge_put64(p_nxge_t nxgep, p_mblk_t mp)
1748 {
1749 #if defined(__i386)
1750 	size_t		reg;
1751 #else
1752 	uint64_t	reg;
1753 #endif
1754 	uint64_t	buf[2];
1755 
1756 	bcopy((char *)mp->b_rptr, (char *)&buf[0], 2 * sizeof (uint64_t));
1757 #if defined(__i386)
1758 	reg = (size_t)buf[0];
1759 #else
1760 	reg = buf[0];
1761 #endif
1762 
1763 	NXGE_NPI_PIO_WRITE64(nxgep->npi_handle, reg, buf[1]);
1764 }
1765 
1766 
1767 nxge_os_mutex_t nxgedebuglock;
1768 int nxge_debug_init = 0;
1769 
1770 /*ARGSUSED*/
1771 /*VARARGS*/
1772 void
1773 nxge_debug_msg(p_nxge_t nxgep, uint64_t level, char *fmt, ...)
1774 {
1775 	char msg_buffer[1048];
1776 	char prefix_buffer[32];
1777 	int instance;
1778 	uint64_t debug_level;
1779 	int cmn_level = CE_CONT;
1780 	va_list ap;
1781 
1782 	if (nxgep && nxgep->nxge_debug_level != nxge_debug_level) {
1783 		/* In case a developer has changed nxge_debug_level. */
1784 		if (nxgep->nxge_debug_level != nxge_debug_level)
1785 			nxgep->nxge_debug_level = nxge_debug_level;
1786 	}
1787 
1788 	debug_level = (nxgep == NULL) ? nxge_debug_level :
1789 	    nxgep->nxge_debug_level;
1790 
1791 	if ((level & debug_level) ||
1792 	    (level == NXGE_NOTE) ||
1793 	    (level == NXGE_ERR_CTL)) {
1794 		/* do the msg processing */
1795 		if (nxge_debug_init == 0) {
1796 			MUTEX_INIT(&nxgedebuglock, NULL, MUTEX_DRIVER, NULL);
1797 			nxge_debug_init = 1;
1798 		}
1799 
1800 		MUTEX_ENTER(&nxgedebuglock);
1801 
1802 		if ((level & NXGE_NOTE)) {
1803 			cmn_level = CE_NOTE;
1804 		}
1805 
1806 		if (level & NXGE_ERR_CTL) {
1807 			cmn_level = CE_WARN;
1808 		}
1809 
1810 		va_start(ap, fmt);
1811 		(void) vsprintf(msg_buffer, fmt, ap);
1812 		va_end(ap);
1813 		if (nxgep == NULL) {
1814 			instance = -1;
1815 			(void) sprintf(prefix_buffer, "%s :", "nxge");
1816 		} else {
1817 			instance = nxgep->instance;
1818 			(void) sprintf(prefix_buffer,
1819 			    "%s%d :", "nxge", instance);
1820 		}
1821 
1822 		MUTEX_EXIT(&nxgedebuglock);
1823 		cmn_err(cmn_level, "!%s %s\n",
1824 		    prefix_buffer, msg_buffer);
1825 
1826 	}
1827 }
1828 
1829 char *
1830 nxge_dump_packet(char *addr, int size)
1831 {
1832 	uchar_t *ap = (uchar_t *)addr;
1833 	int i;
1834 	static char etherbuf[1024];
1835 	char *cp = etherbuf;
1836 	char digits[] = "0123456789abcdef";
1837 
1838 	if (!size)
1839 		size = 60;
1840 
1841 	if (size > MAX_DUMP_SZ) {
1842 		/* Dump the leading bytes */
1843 		for (i = 0; i < MAX_DUMP_SZ/2; i++) {
1844 			if (*ap > 0x0f)
1845 				*cp++ = digits[*ap >> 4];
1846 			*cp++ = digits[*ap++ & 0xf];
1847 			*cp++ = ':';
1848 		}
1849 		for (i = 0; i < 20; i++)
1850 			*cp++ = '.';
1851 		/* Dump the last MAX_DUMP_SZ/2 bytes */
1852 		ap = (uchar_t *)(addr + (size - MAX_DUMP_SZ/2));
1853 		for (i = 0; i < MAX_DUMP_SZ/2; i++) {
1854 			if (*ap > 0x0f)
1855 				*cp++ = digits[*ap >> 4];
1856 			*cp++ = digits[*ap++ & 0xf];
1857 			*cp++ = ':';
1858 		}
1859 	} else {
1860 		for (i = 0; i < size; i++) {
1861 			if (*ap > 0x0f)
1862 				*cp++ = digits[*ap >> 4];
1863 			*cp++ = digits[*ap++ & 0xf];
1864 			*cp++ = ':';
1865 		}
1866 	}
1867 	*--cp = 0;
1868 	return (etherbuf);
1869 }
1870 
1871 #ifdef	NXGE_DEBUG
1872 static void
1873 nxge_test_map_regs(p_nxge_t nxgep)
1874 {
1875 	ddi_acc_handle_t cfg_handle;
1876 	p_pci_cfg_t	cfg_ptr;
1877 	ddi_acc_handle_t dev_handle;
1878 	char		*dev_ptr;
1879 	ddi_acc_handle_t pci_config_handle;
1880 	uint32_t	regval;
1881 	int		i;
1882 
1883 	NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_test_map_regs"));
1884 
1885 	dev_handle = nxgep->dev_regs->nxge_regh;
1886 	dev_ptr = (char *)nxgep->dev_regs->nxge_regp;
1887 
1888 	if (NXGE_IS_VALID_NEPTUNE_TYPE(nxgep)) {
1889 		cfg_handle = nxgep->dev_regs->nxge_pciregh;
1890 		cfg_ptr = (void *)nxgep->dev_regs->nxge_pciregp;
1891 
1892 		NXGE_DEBUG_MSG((nxgep, DDI_CTL,
1893 		    "Neptune PCI regp cfg_ptr 0x%llx", (char *)cfg_ptr));
1894 		NXGE_DEBUG_MSG((nxgep, DDI_CTL,
1895 		    "Neptune PCI cfg_ptr vendor id ptr 0x%llx",
1896 		    &cfg_ptr->vendorid));
1897 		NXGE_DEBUG_MSG((nxgep, DDI_CTL,
1898 		    "\tvendorid 0x%x devid 0x%x",
1899 		    NXGE_PIO_READ16(cfg_handle, &cfg_ptr->vendorid, 0),
1900 		    NXGE_PIO_READ16(cfg_handle, &cfg_ptr->devid,    0)));
1901 		NXGE_DEBUG_MSG((nxgep, DDI_CTL,
1902 		    "PCI BAR: base 0x%x base14 0x%x base 18 0x%x "
1903 		    "bar1c 0x%x",
1904 		    NXGE_PIO_READ32(cfg_handle, &cfg_ptr->base,   0),
1905 		    NXGE_PIO_READ32(cfg_handle, &cfg_ptr->base14, 0),
1906 		    NXGE_PIO_READ32(cfg_handle, &cfg_ptr->base18, 0),
1907 		    NXGE_PIO_READ32(cfg_handle, &cfg_ptr->base1c, 0)));
1908 		NXGE_DEBUG_MSG((nxgep, DDI_CTL,
1909 		    "\nNeptune PCI BAR: base20 0x%x base24 0x%x "
1910 		    "base 28 0x%x bar2c 0x%x\n",
1911 		    NXGE_PIO_READ32(cfg_handle, &cfg_ptr->base20, 0),
1912 		    NXGE_PIO_READ32(cfg_handle, &cfg_ptr->base24, 0),
1913 		    NXGE_PIO_READ32(cfg_handle, &cfg_ptr->base28, 0),
1914 		    NXGE_PIO_READ32(cfg_handle, &cfg_ptr->base2c, 0)));
1915 		NXGE_DEBUG_MSG((nxgep, DDI_CTL,
1916 		    "\nNeptune PCI BAR: base30 0x%x\n",
1917 		    NXGE_PIO_READ32(cfg_handle, &cfg_ptr->base30, 0)));
1918 
1919 		cfg_handle = nxgep->dev_regs->nxge_pciregh;
1920 		cfg_ptr = (void *)nxgep->dev_regs->nxge_pciregp;
1921 		NXGE_DEBUG_MSG((nxgep, DDI_CTL,
1922 		    "first  0x%llx second 0x%llx third 0x%llx "
1923 		    "last 0x%llx ",
1924 		    NXGE_PIO_READ64(dev_handle,
1925 		    (uint64_t *)(dev_ptr + 0),  0),
1926 		    NXGE_PIO_READ64(dev_handle,
1927 		    (uint64_t *)(dev_ptr + 8),  0),
1928 		    NXGE_PIO_READ64(dev_handle,
1929 		    (uint64_t *)(dev_ptr + 16), 0),
1930 		    NXGE_PIO_READ64(cfg_handle,
1931 		    (uint64_t *)(dev_ptr + 24), 0)));
1932 	}
1933 }
1934 
1935 #endif
1936 
1937 static void
1938 nxge_suspend(p_nxge_t nxgep)
1939 {
1940 	NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_suspend"));
1941 
1942 	nxge_intrs_disable(nxgep);
1943 	nxge_destroy_dev(nxgep);
1944 
1945 	NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_suspend"));
1946 }
1947 
1948 static nxge_status_t
1949 nxge_resume(p_nxge_t nxgep)
1950 {
1951 	nxge_status_t status = NXGE_OK;
1952 
1953 	NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_resume"));
1954 
1955 	nxgep->suspended = DDI_RESUME;
1956 	(void) nxge_link_monitor(nxgep, LINK_MONITOR_START);
1957 	(void) nxge_rxdma_hw_mode(nxgep, NXGE_DMA_START);
1958 	(void) nxge_txdma_hw_mode(nxgep, NXGE_DMA_START);
1959 	(void) nxge_rx_mac_enable(nxgep);
1960 	(void) nxge_tx_mac_enable(nxgep);
1961 	nxge_intrs_enable(nxgep);
1962 	nxgep->suspended = 0;
1963 
1964 	NXGE_DEBUG_MSG((nxgep, DDI_CTL,
1965 	    "<== nxge_resume status = 0x%x", status));
1966 	return (status);
1967 }
1968 
1969 static nxge_status_t
1970 nxge_setup_dev(p_nxge_t nxgep)
1971 {
1972 	nxge_status_t	status = NXGE_OK;
1973 
1974 	NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_setup_dev port %d",
1975 	    nxgep->mac.portnum));
1976 
1977 	status = nxge_link_init(nxgep);
1978 
1979 	if (fm_check_acc_handle(nxgep->dev_regs->nxge_regh) != DDI_FM_OK) {
1980 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
1981 		    "port%d Bad register acc handle", nxgep->mac.portnum));
1982 		status = NXGE_ERROR;
1983 	}
1984 
1985 	if (status != NXGE_OK) {
1986 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
1987 		    " nxge_setup_dev status "
1988 		    "(xcvr init 0x%08x)", status));
1989 		goto nxge_setup_dev_exit;
1990 	}
1991 
1992 nxge_setup_dev_exit:
1993 	NXGE_DEBUG_MSG((nxgep, DDI_CTL,
1994 	    "<== nxge_setup_dev port %d status = 0x%08x",
1995 	    nxgep->mac.portnum, status));
1996 
1997 	return (status);
1998 }
1999 
2000 static void
2001 nxge_destroy_dev(p_nxge_t nxgep)
2002 {
2003 	NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_destroy_dev"));
2004 
2005 	(void) nxge_link_monitor(nxgep, LINK_MONITOR_STOP);
2006 
2007 	(void) nxge_hw_stop(nxgep);
2008 
2009 	NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_destroy_dev"));
2010 }
2011 
2012 static nxge_status_t
2013 nxge_setup_system_dma_pages(p_nxge_t nxgep)
2014 {
2015 	int 			ddi_status = DDI_SUCCESS;
2016 	uint_t 			count;
2017 	ddi_dma_cookie_t 	cookie;
2018 	uint_t 			iommu_pagesize;
2019 	nxge_status_t		status = NXGE_OK;
2020 
2021 	NXGE_ERROR_MSG((nxgep, DDI_CTL, "==> nxge_setup_system_dma_pages"));
2022 	nxgep->sys_page_sz = ddi_ptob(nxgep->dip, (ulong_t)1);
2023 	if (nxgep->niu_type != N2_NIU) {
2024 		iommu_pagesize = dvma_pagesize(nxgep->dip);
2025 		NXGE_DEBUG_MSG((nxgep, DDI_CTL,
2026 		    " nxge_setup_system_dma_pages: page %d (ddi_ptob %d) "
2027 		    " default_block_size %d iommu_pagesize %d",
2028 		    nxgep->sys_page_sz,
2029 		    ddi_ptob(nxgep->dip, (ulong_t)1),
2030 		    nxgep->rx_default_block_size,
2031 		    iommu_pagesize));
2032 
2033 		if (iommu_pagesize != 0) {
2034 			if (nxgep->sys_page_sz == iommu_pagesize) {
2035 				if (iommu_pagesize > 0x4000)
2036 					nxgep->sys_page_sz = 0x4000;
2037 			} else {
2038 				if (nxgep->sys_page_sz > iommu_pagesize)
2039 					nxgep->sys_page_sz = iommu_pagesize;
2040 			}
2041 		}
2042 	}
2043 	nxgep->sys_page_mask = ~(nxgep->sys_page_sz - 1);
2044 	NXGE_DEBUG_MSG((nxgep, DDI_CTL,
2045 	    "==> nxge_setup_system_dma_pages: page %d (ddi_ptob %d) "
2046 	    "default_block_size %d page mask %d",
2047 	    nxgep->sys_page_sz,
2048 	    ddi_ptob(nxgep->dip, (ulong_t)1),
2049 	    nxgep->rx_default_block_size,
2050 	    nxgep->sys_page_mask));
2051 
2052 
2053 	switch (nxgep->sys_page_sz) {
2054 	default:
2055 		nxgep->sys_page_sz = 0x1000;
2056 		nxgep->sys_page_mask = ~(nxgep->sys_page_sz - 1);
2057 		nxgep->rx_default_block_size = 0x1000;
2058 		nxgep->rx_bksize_code = RBR_BKSIZE_4K;
2059 		break;
2060 	case 0x1000:
2061 		nxgep->rx_default_block_size = 0x1000;
2062 		nxgep->rx_bksize_code = RBR_BKSIZE_4K;
2063 		break;
2064 	case 0x2000:
2065 		nxgep->rx_default_block_size = 0x2000;
2066 		nxgep->rx_bksize_code = RBR_BKSIZE_8K;
2067 		break;
2068 	case 0x4000:
2069 		nxgep->rx_default_block_size = 0x4000;
2070 		nxgep->rx_bksize_code = RBR_BKSIZE_16K;
2071 		break;
2072 	case 0x8000:
2073 		nxgep->rx_default_block_size = 0x8000;
2074 		nxgep->rx_bksize_code = RBR_BKSIZE_32K;
2075 		break;
2076 	}
2077 
2078 #ifndef USE_RX_BIG_BUF
2079 	nxge_rx_dma_attr.dma_attr_align = nxgep->sys_page_sz;
2080 #else
2081 		nxgep->rx_default_block_size = 0x2000;
2082 		nxgep->rx_bksize_code = RBR_BKSIZE_8K;
2083 #endif
2084 	/*
2085 	 * Get the system DMA burst size.
2086 	 */
2087 	ddi_status = ddi_dma_alloc_handle(nxgep->dip, &nxge_tx_dma_attr,
2088 	    DDI_DMA_DONTWAIT, 0,
2089 	    &nxgep->dmasparehandle);
2090 	if (ddi_status != DDI_SUCCESS) {
2091 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
2092 		    "ddi_dma_alloc_handle: failed "
2093 		    " status 0x%x", ddi_status));
2094 		goto nxge_get_soft_properties_exit;
2095 	}
2096 
2097 	ddi_status = ddi_dma_addr_bind_handle(nxgep->dmasparehandle, NULL,
2098 	    (caddr_t)nxgep->dmasparehandle,
2099 	    sizeof (nxgep->dmasparehandle),
2100 	    DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
2101 	    DDI_DMA_DONTWAIT, 0,
2102 	    &cookie, &count);
2103 	if (ddi_status != DDI_DMA_MAPPED) {
2104 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
2105 		    "Binding spare handle to find system"
2106 		    " burstsize failed."));
2107 		ddi_status = DDI_FAILURE;
2108 		goto nxge_get_soft_properties_fail1;
2109 	}
2110 
2111 	nxgep->sys_burst_sz = ddi_dma_burstsizes(nxgep->dmasparehandle);
2112 	(void) ddi_dma_unbind_handle(nxgep->dmasparehandle);
2113 
2114 nxge_get_soft_properties_fail1:
2115 	ddi_dma_free_handle(&nxgep->dmasparehandle);
2116 
2117 nxge_get_soft_properties_exit:
2118 
2119 	if (ddi_status != DDI_SUCCESS)
2120 		status |= (NXGE_ERROR | NXGE_DDI_FAILED);
2121 
2122 	NXGE_DEBUG_MSG((nxgep, DDI_CTL,
2123 	    "<== nxge_setup_system_dma_pages status = 0x%08x", status));
2124 	return (status);
2125 }
2126 
2127 static nxge_status_t
2128 nxge_alloc_mem_pool(p_nxge_t nxgep)
2129 {
2130 	nxge_status_t	status = NXGE_OK;
2131 
2132 	NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_alloc_mem_pool"));
2133 
2134 	status = nxge_alloc_rx_mem_pool(nxgep);
2135 	if (status != NXGE_OK) {
2136 		return (NXGE_ERROR);
2137 	}
2138 
2139 	status = nxge_alloc_tx_mem_pool(nxgep);
2140 	if (status != NXGE_OK) {
2141 		nxge_free_rx_mem_pool(nxgep);
2142 		return (NXGE_ERROR);
2143 	}
2144 
2145 	NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_alloc_mem_pool"));
2146 	return (NXGE_OK);
2147 }
2148 
2149 static void
2150 nxge_free_mem_pool(p_nxge_t nxgep)
2151 {
2152 	NXGE_DEBUG_MSG((nxgep, MEM_CTL, "==> nxge_free_mem_pool"));
2153 
2154 	nxge_free_rx_mem_pool(nxgep);
2155 	nxge_free_tx_mem_pool(nxgep);
2156 
2157 	NXGE_DEBUG_MSG((nxgep, MEM_CTL, "<== nxge_free_mem_pool"));
2158 }
2159 
2160 nxge_status_t
2161 nxge_alloc_rx_mem_pool(p_nxge_t nxgep)
2162 {
2163 	uint32_t		rdc_max;
2164 	p_nxge_dma_pt_cfg_t	p_all_cfgp;
2165 	p_nxge_hw_pt_cfg_t	p_cfgp;
2166 	p_nxge_dma_pool_t	dma_poolp;
2167 	p_nxge_dma_common_t	*dma_buf_p;
2168 	p_nxge_dma_pool_t	dma_cntl_poolp;
2169 	p_nxge_dma_common_t	*dma_cntl_p;
2170 	uint32_t 		*num_chunks; /* per dma */
2171 	nxge_status_t		status = NXGE_OK;
2172 
2173 	uint32_t		nxge_port_rbr_size;
2174 	uint32_t		nxge_port_rbr_spare_size;
2175 	uint32_t		nxge_port_rcr_size;
2176 	uint32_t		rx_cntl_alloc_size;
2177 
2178 	NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_alloc_rx_mem_pool"));
2179 
2180 	p_all_cfgp = (p_nxge_dma_pt_cfg_t)&nxgep->pt_config;
2181 	p_cfgp = (p_nxge_hw_pt_cfg_t)&p_all_cfgp->hw_config;
2182 	rdc_max = NXGE_MAX_RDCS;
2183 
2184 	/*
2185 	 * Allocate memory for the common DMA data structures.
2186 	 */
2187 	dma_poolp = (p_nxge_dma_pool_t)KMEM_ZALLOC(sizeof (nxge_dma_pool_t),
2188 	    KM_SLEEP);
2189 	dma_buf_p = (p_nxge_dma_common_t *)KMEM_ZALLOC(
2190 	    sizeof (p_nxge_dma_common_t) * rdc_max, KM_SLEEP);
2191 
2192 	dma_cntl_poolp = (p_nxge_dma_pool_t)
2193 	    KMEM_ZALLOC(sizeof (nxge_dma_pool_t), KM_SLEEP);
2194 	dma_cntl_p = (p_nxge_dma_common_t *)KMEM_ZALLOC(
2195 	    sizeof (p_nxge_dma_common_t) * rdc_max, KM_SLEEP);
2196 
2197 	num_chunks = (uint32_t *)KMEM_ZALLOC(
2198 	    sizeof (uint32_t) * rdc_max, KM_SLEEP);
2199 
2200 	/*
2201 	 * Assume that each DMA channel will be configured with
2202 	 * the default block size.
2203 	 * rbr block counts are modulo the batch count (16).
2204 	 */
2205 	nxge_port_rbr_size = p_all_cfgp->rbr_size;
2206 	nxge_port_rcr_size = p_all_cfgp->rcr_size;
2207 
2208 	if (!nxge_port_rbr_size) {
2209 		nxge_port_rbr_size = NXGE_RBR_RBB_DEFAULT;
2210 	}
2211 	if (nxge_port_rbr_size % NXGE_RXDMA_POST_BATCH) {
2212 		nxge_port_rbr_size = (NXGE_RXDMA_POST_BATCH *
2213 		    (nxge_port_rbr_size / NXGE_RXDMA_POST_BATCH + 1));
2214 	}
2215 
2216 	p_all_cfgp->rbr_size = nxge_port_rbr_size;
2217 	nxge_port_rbr_spare_size = nxge_rbr_spare_size;
2218 
2219 	if (nxge_port_rbr_spare_size % NXGE_RXDMA_POST_BATCH) {
2220 		nxge_port_rbr_spare_size = (NXGE_RXDMA_POST_BATCH *
2221 		    (nxge_port_rbr_spare_size / NXGE_RXDMA_POST_BATCH + 1));
2222 	}
2223 	if (nxge_port_rbr_size > RBR_DEFAULT_MAX_BLKS) {
2224 		NXGE_DEBUG_MSG((nxgep, MEM_CTL,
2225 		    "nxge_alloc_rx_mem_pool: RBR size too high %d, "
2226 		    "set to default %d",
2227 		    nxge_port_rbr_size, RBR_DEFAULT_MAX_BLKS));
2228 		nxge_port_rbr_size = RBR_DEFAULT_MAX_BLKS;
2229 	}
2230 	if (nxge_port_rcr_size > RCR_DEFAULT_MAX) {
2231 		NXGE_DEBUG_MSG((nxgep, MEM_CTL,
2232 		    "nxge_alloc_rx_mem_pool: RCR too high %d, "
2233 		    "set to default %d",
2234 		    nxge_port_rcr_size, RCR_DEFAULT_MAX));
2235 		nxge_port_rcr_size = RCR_DEFAULT_MAX;
2236 	}
2237 
2238 	/*
2239 	 * N2/NIU has limitation on the descriptor sizes (contiguous
2240 	 * memory allocation on data buffers to 4M (contig_mem_alloc)
2241 	 * and little endian for control buffers (must use the ddi/dki mem alloc
2242 	 * function).
2243 	 */
2244 #if	defined(sun4v) && defined(NIU_LP_WORKAROUND)
2245 	if (nxgep->niu_type == N2_NIU) {
2246 		nxge_port_rbr_spare_size = 0;
2247 		if ((nxge_port_rbr_size > NXGE_NIU_CONTIG_RBR_MAX) ||
2248 		    (!ISP2(nxge_port_rbr_size))) {
2249 			nxge_port_rbr_size = NXGE_NIU_CONTIG_RBR_MAX;
2250 		}
2251 		if ((nxge_port_rcr_size > NXGE_NIU_CONTIG_RCR_MAX) ||
2252 		    (!ISP2(nxge_port_rcr_size))) {
2253 			nxge_port_rcr_size = NXGE_NIU_CONTIG_RCR_MAX;
2254 		}
2255 	}
2256 #endif
2257 
2258 	/*
2259 	 * Addresses of receive block ring, receive completion ring and the
2260 	 * mailbox must be all cache-aligned (64 bytes).
2261 	 */
2262 	rx_cntl_alloc_size = nxge_port_rbr_size + nxge_port_rbr_spare_size;
2263 	rx_cntl_alloc_size *= (sizeof (rx_desc_t));
2264 	rx_cntl_alloc_size += (sizeof (rcr_entry_t) * nxge_port_rcr_size);
2265 	rx_cntl_alloc_size += sizeof (rxdma_mailbox_t);
2266 
2267 	NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_alloc_rx_mem_pool: "
2268 	    "nxge_port_rbr_size = %d nxge_port_rbr_spare_size = %d "
2269 	    "nxge_port_rcr_size = %d "
2270 	    "rx_cntl_alloc_size = %d",
2271 	    nxge_port_rbr_size, nxge_port_rbr_spare_size,
2272 	    nxge_port_rcr_size,
2273 	    rx_cntl_alloc_size));
2274 
2275 #if	defined(sun4v) && defined(NIU_LP_WORKAROUND)
2276 	if (nxgep->niu_type == N2_NIU) {
2277 		uint32_t rx_buf_alloc_size = (nxgep->rx_default_block_size *
2278 		    (nxge_port_rbr_size + nxge_port_rbr_spare_size));
2279 
2280 		if (!ISP2(rx_buf_alloc_size)) {
2281 			NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
2282 			    "==> nxge_alloc_rx_mem_pool: "
2283 			    " must be power of 2"));
2284 			status |= (NXGE_ERROR | NXGE_DDI_FAILED);
2285 			goto nxge_alloc_rx_mem_pool_exit;
2286 		}
2287 
2288 		if (rx_buf_alloc_size > (1 << 22)) {
2289 			NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
2290 			    "==> nxge_alloc_rx_mem_pool: "
2291 			    " limit size to 4M"));
2292 			status |= (NXGE_ERROR | NXGE_DDI_FAILED);
2293 			goto nxge_alloc_rx_mem_pool_exit;
2294 		}
2295 
2296 		if (rx_cntl_alloc_size < 0x2000) {
2297 			rx_cntl_alloc_size = 0x2000;
2298 		}
2299 	}
2300 #endif
2301 	nxgep->nxge_port_rbr_size = nxge_port_rbr_size;
2302 	nxgep->nxge_port_rcr_size = nxge_port_rcr_size;
2303 	nxgep->nxge_port_rbr_spare_size = nxge_port_rbr_spare_size;
2304 	nxgep->nxge_port_rx_cntl_alloc_size = rx_cntl_alloc_size;
2305 
2306 	dma_poolp->ndmas = p_cfgp->max_rdcs;
2307 	dma_poolp->num_chunks = num_chunks;
2308 	dma_poolp->buf_allocated = B_TRUE;
2309 	nxgep->rx_buf_pool_p = dma_poolp;
2310 	dma_poolp->dma_buf_pool_p = dma_buf_p;
2311 
2312 	dma_cntl_poolp->ndmas = p_cfgp->max_rdcs;
2313 	dma_cntl_poolp->buf_allocated = B_TRUE;
2314 	nxgep->rx_cntl_pool_p = dma_cntl_poolp;
2315 	dma_cntl_poolp->dma_buf_pool_p = dma_cntl_p;
2316 
2317 	/* Allocate the receive rings, too. */
2318 	nxgep->rx_rbr_rings =
2319 	    KMEM_ZALLOC(sizeof (rx_rbr_rings_t), KM_SLEEP);
2320 	nxgep->rx_rbr_rings->rbr_rings =
2321 	    KMEM_ZALLOC(sizeof (p_rx_rbr_ring_t) * rdc_max, KM_SLEEP);
2322 	nxgep->rx_rcr_rings =
2323 	    KMEM_ZALLOC(sizeof (rx_rcr_rings_t), KM_SLEEP);
2324 	nxgep->rx_rcr_rings->rcr_rings =
2325 	    KMEM_ZALLOC(sizeof (p_rx_rcr_ring_t) * rdc_max, KM_SLEEP);
2326 	nxgep->rx_mbox_areas_p =
2327 	    KMEM_ZALLOC(sizeof (rx_mbox_areas_t), KM_SLEEP);
2328 	nxgep->rx_mbox_areas_p->rxmbox_areas =
2329 	    KMEM_ZALLOC(sizeof (p_rx_mbox_t) * rdc_max, KM_SLEEP);
2330 
2331 	nxgep->rx_rbr_rings->ndmas = nxgep->rx_rcr_rings->ndmas =
2332 	    p_cfgp->max_rdcs;
2333 
2334 	NXGE_DEBUG_MSG((nxgep, DMA_CTL,
2335 	    "<== nxge_alloc_rx_mem_pool:status 0x%08x", status));
2336 
2337 nxge_alloc_rx_mem_pool_exit:
2338 	return (status);
2339 }
2340 
2341 /*
2342  * nxge_alloc_rxb
2343  *
2344  *	Allocate buffers for an RDC.
2345  *
2346  * Arguments:
2347  * 	nxgep
2348  * 	channel	The channel to map into our kernel space.
2349  *
2350  * Notes:
2351  *
2352  * NPI function calls:
2353  *
2354  * NXGE function calls:
2355  *
2356  * Registers accessed:
2357  *
2358  * Context:
2359  *
2360  * Taking apart:
2361  *
2362  * Open questions:
2363  *
2364  */
2365 nxge_status_t
2366 nxge_alloc_rxb(
2367 	p_nxge_t nxgep,
2368 	int channel)
2369 {
2370 	size_t			rx_buf_alloc_size;
2371 	nxge_status_t		status = NXGE_OK;
2372 
2373 	nxge_dma_common_t	**data;
2374 	nxge_dma_common_t	**control;
2375 	uint32_t 		*num_chunks;
2376 
2377 	NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_alloc_rbb"));
2378 
2379 	/*
2380 	 * Allocate memory for the receive buffers and descriptor rings.
2381 	 * Replace these allocation functions with the interface functions
2382 	 * provided by the partition manager if/when they are available.
2383 	 */
2384 
2385 	/*
2386 	 * Allocate memory for the receive buffer blocks.
2387 	 */
2388 	rx_buf_alloc_size = (nxgep->rx_default_block_size *
2389 	    (nxgep->nxge_port_rbr_size + nxgep->nxge_port_rbr_spare_size));
2390 
2391 	data = &nxgep->rx_buf_pool_p->dma_buf_pool_p[channel];
2392 	num_chunks = &nxgep->rx_buf_pool_p->num_chunks[channel];
2393 
2394 	if ((status = nxge_alloc_rx_buf_dma(
2395 	    nxgep, channel, data, rx_buf_alloc_size,
2396 	    nxgep->rx_default_block_size, num_chunks)) != NXGE_OK) {
2397 		return (status);
2398 	}
2399 
2400 	NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "<== nxge_alloc_rxb(): "
2401 	    "dma %d dma_buf_p %llx &dma_buf_p %llx", channel, *data, data));
2402 
2403 	/*
2404 	 * Allocate memory for descriptor rings and mailbox.
2405 	 */
2406 	control = &nxgep->rx_cntl_pool_p->dma_buf_pool_p[channel];
2407 
2408 	if ((status = nxge_alloc_rx_cntl_dma(
2409 	    nxgep, channel, control, nxgep->nxge_port_rx_cntl_alloc_size))
2410 	    != NXGE_OK) {
2411 		nxge_free_rx_cntl_dma(nxgep, *control);
2412 		(*data)->buf_alloc_state |= BUF_ALLOCATED_WAIT_FREE;
2413 		nxge_free_rx_buf_dma(nxgep, *data, *num_chunks);
2414 		return (status);
2415 	}
2416 
2417 	NXGE_DEBUG_MSG((nxgep, DMA_CTL,
2418 	    "<== nxge_alloc_rx_mem_pool:status 0x%08x", status));
2419 
2420 	return (status);
2421 }
2422 
2423 void
2424 nxge_free_rxb(
2425 	p_nxge_t nxgep,
2426 	int channel)
2427 {
2428 	nxge_dma_common_t	*data;
2429 	nxge_dma_common_t	*control;
2430 	uint32_t 		num_chunks;
2431 
2432 	NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_alloc_rbb"));
2433 
2434 	data = nxgep->rx_buf_pool_p->dma_buf_pool_p[channel];
2435 	num_chunks = nxgep->rx_buf_pool_p->num_chunks[channel];
2436 	nxge_free_rx_buf_dma(nxgep, data, num_chunks);
2437 
2438 	nxgep->rx_buf_pool_p->dma_buf_pool_p[channel] = 0;
2439 	nxgep->rx_buf_pool_p->num_chunks[channel] = 0;
2440 
2441 	control = nxgep->rx_cntl_pool_p->dma_buf_pool_p[channel];
2442 	nxge_free_rx_cntl_dma(nxgep, control);
2443 
2444 	nxgep->rx_cntl_pool_p->dma_buf_pool_p[channel] = 0;
2445 
2446 	KMEM_FREE(data, sizeof (nxge_dma_common_t) * NXGE_DMA_BLOCK);
2447 	KMEM_FREE(control, sizeof (nxge_dma_common_t));
2448 
2449 	NXGE_DEBUG_MSG((nxgep, DMA_CTL, "<== nxge_alloc_rbb"));
2450 }
2451 
2452 static void
2453 nxge_free_rx_mem_pool(p_nxge_t nxgep)
2454 {
2455 	int rdc_max = NXGE_MAX_RDCS;
2456 
2457 	NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_free_rx_mem_pool"));
2458 
2459 	if (!nxgep->rx_buf_pool_p || !nxgep->rx_buf_pool_p->buf_allocated) {
2460 		NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
2461 		    "<== nxge_free_rx_mem_pool "
2462 		    "(null rx buf pool or buf not allocated"));
2463 		return;
2464 	}
2465 	if (!nxgep->rx_cntl_pool_p || !nxgep->rx_cntl_pool_p->buf_allocated) {
2466 		NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
2467 		    "<== nxge_free_rx_mem_pool "
2468 		    "(null rx cntl buf pool or cntl buf not allocated"));
2469 		return;
2470 	}
2471 
2472 	KMEM_FREE(nxgep->rx_cntl_pool_p->dma_buf_pool_p,
2473 	    sizeof (p_nxge_dma_common_t) * rdc_max);
2474 	KMEM_FREE(nxgep->rx_cntl_pool_p, sizeof (nxge_dma_pool_t));
2475 
2476 	KMEM_FREE(nxgep->rx_buf_pool_p->num_chunks,
2477 	    sizeof (uint32_t) * rdc_max);
2478 	KMEM_FREE(nxgep->rx_buf_pool_p->dma_buf_pool_p,
2479 	    sizeof (p_nxge_dma_common_t) * rdc_max);
2480 	KMEM_FREE(nxgep->rx_buf_pool_p, sizeof (nxge_dma_pool_t));
2481 
2482 	nxgep->rx_buf_pool_p = 0;
2483 	nxgep->rx_cntl_pool_p = 0;
2484 
2485 	KMEM_FREE(nxgep->rx_rbr_rings->rbr_rings,
2486 	    sizeof (p_rx_rbr_ring_t) * rdc_max);
2487 	KMEM_FREE(nxgep->rx_rbr_rings, sizeof (rx_rbr_rings_t));
2488 	KMEM_FREE(nxgep->rx_rcr_rings->rcr_rings,
2489 	    sizeof (p_rx_rcr_ring_t) * rdc_max);
2490 	KMEM_FREE(nxgep->rx_rcr_rings, sizeof (rx_rcr_rings_t));
2491 	KMEM_FREE(nxgep->rx_mbox_areas_p->rxmbox_areas,
2492 	    sizeof (p_rx_mbox_t) * rdc_max);
2493 	KMEM_FREE(nxgep->rx_mbox_areas_p, sizeof (rx_mbox_areas_t));
2494 
2495 	nxgep->rx_rbr_rings = 0;
2496 	nxgep->rx_rcr_rings = 0;
2497 	nxgep->rx_mbox_areas_p = 0;
2498 
2499 	NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "<== nxge_free_rx_mem_pool"));
2500 }
2501 
2502 
2503 static nxge_status_t
2504 nxge_alloc_rx_buf_dma(p_nxge_t nxgep, uint16_t dma_channel,
2505 	p_nxge_dma_common_t *dmap,
2506 	size_t alloc_size, size_t block_size, uint32_t *num_chunks)
2507 {
2508 	p_nxge_dma_common_t 	rx_dmap;
2509 	nxge_status_t		status = NXGE_OK;
2510 	size_t			total_alloc_size;
2511 	size_t			allocated = 0;
2512 	int			i, size_index, array_size;
2513 	boolean_t		use_kmem_alloc = B_FALSE;
2514 
2515 	NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_alloc_rx_buf_dma"));
2516 
2517 	rx_dmap = (p_nxge_dma_common_t)
2518 	    KMEM_ZALLOC(sizeof (nxge_dma_common_t) * NXGE_DMA_BLOCK,
2519 	    KM_SLEEP);
2520 
2521 	NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
2522 	    " alloc_rx_buf_dma rdc %d asize %x bsize %x bbuf %llx ",
2523 	    dma_channel, alloc_size, block_size, dmap));
2524 
2525 	total_alloc_size = alloc_size;
2526 
2527 #if defined(RX_USE_RECLAIM_POST)
2528 	total_alloc_size = alloc_size + alloc_size/4;
2529 #endif
2530 
2531 	i = 0;
2532 	size_index = 0;
2533 	array_size =  sizeof (alloc_sizes)/sizeof (size_t);
2534 	while ((alloc_sizes[size_index] < alloc_size) &&
2535 	    (size_index < array_size))
2536 		size_index++;
2537 	if (size_index >= array_size) {
2538 		size_index = array_size - 1;
2539 	}
2540 
2541 	/* For Neptune, use kmem_alloc if the kmem flag is set. */
2542 	if (nxgep->niu_type != N2_NIU && nxge_use_kmem_alloc) {
2543 		use_kmem_alloc = B_TRUE;
2544 #if defined(__i386) || defined(__amd64)
2545 		size_index = 0;
2546 #endif
2547 		NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
2548 		    "==> nxge_alloc_rx_buf_dma: "
2549 		    "Neptune use kmem_alloc() - size_index %d",
2550 		    size_index));
2551 	}
2552 
2553 	while ((allocated < total_alloc_size) &&
2554 	    (size_index >= 0) && (i < NXGE_DMA_BLOCK)) {
2555 		rx_dmap[i].dma_chunk_index = i;
2556 		rx_dmap[i].block_size = block_size;
2557 		rx_dmap[i].alength = alloc_sizes[size_index];
2558 		rx_dmap[i].orig_alength = rx_dmap[i].alength;
2559 		rx_dmap[i].nblocks = alloc_sizes[size_index] / block_size;
2560 		rx_dmap[i].dma_channel = dma_channel;
2561 		rx_dmap[i].contig_alloc_type = B_FALSE;
2562 		rx_dmap[i].kmem_alloc_type = B_FALSE;
2563 		rx_dmap[i].buf_alloc_type = DDI_MEM_ALLOC;
2564 
2565 		/*
2566 		 * N2/NIU: data buffers must be contiguous as the driver
2567 		 *	   needs to call Hypervisor api to set up
2568 		 *	   logical pages.
2569 		 */
2570 		if ((nxgep->niu_type == N2_NIU) && (NXGE_DMA_BLOCK == 1)) {
2571 			rx_dmap[i].contig_alloc_type = B_TRUE;
2572 			rx_dmap[i].buf_alloc_type = CONTIG_MEM_ALLOC;
2573 		} else if (use_kmem_alloc) {
2574 			/* For Neptune, use kmem_alloc */
2575 			NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
2576 			    "==> nxge_alloc_rx_buf_dma: "
2577 			    "Neptune use kmem_alloc()"));
2578 			rx_dmap[i].kmem_alloc_type = B_TRUE;
2579 			rx_dmap[i].buf_alloc_type = KMEM_ALLOC;
2580 		}
2581 
2582 		NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
2583 		    "alloc_rx_buf_dma rdc %d chunk %d bufp %llx size %x "
2584 		    "i %d nblocks %d alength %d",
2585 		    dma_channel, i, &rx_dmap[i], block_size,
2586 		    i, rx_dmap[i].nblocks,
2587 		    rx_dmap[i].alength));
2588 		status = nxge_dma_mem_alloc(nxgep, nxge_force_dma,
2589 		    &nxge_rx_dma_attr,
2590 		    rx_dmap[i].alength,
2591 		    &nxge_dev_buf_dma_acc_attr,
2592 		    DDI_DMA_READ | DDI_DMA_STREAMING,
2593 		    (p_nxge_dma_common_t)(&rx_dmap[i]));
2594 		if (status != NXGE_OK) {
2595 			NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
2596 			    "nxge_alloc_rx_buf_dma: Alloc Failed: "
2597 			    "dma %d size_index %d size requested %d",
2598 			    dma_channel,
2599 			    size_index,
2600 			    rx_dmap[i].alength));
2601 			size_index--;
2602 		} else {
2603 			rx_dmap[i].buf_alloc_state = BUF_ALLOCATED;
2604 			NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
2605 			    " nxge_alloc_rx_buf_dma DONE  alloc mem: "
2606 			    "dma %d dma_buf_p $%p kaddrp $%p alength %d "
2607 			    "buf_alloc_state %d alloc_type %d",
2608 			    dma_channel,
2609 			    &rx_dmap[i],
2610 			    rx_dmap[i].kaddrp,
2611 			    rx_dmap[i].alength,
2612 			    rx_dmap[i].buf_alloc_state,
2613 			    rx_dmap[i].buf_alloc_type));
2614 			NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
2615 			    " alloc_rx_buf_dma allocated rdc %d "
2616 			    "chunk %d size %x dvma %x bufp %llx kaddrp $%p",
2617 			    dma_channel, i, rx_dmap[i].alength,
2618 			    rx_dmap[i].ioaddr_pp, &rx_dmap[i],
2619 			    rx_dmap[i].kaddrp));
2620 			i++;
2621 			allocated += alloc_sizes[size_index];
2622 		}
2623 	}
2624 
2625 	if (allocated < total_alloc_size) {
2626 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
2627 		    "==> nxge_alloc_rx_buf_dma: not enough for channel %d "
2628 		    "allocated 0x%x requested 0x%x",
2629 		    dma_channel,
2630 		    allocated, total_alloc_size));
2631 		status = NXGE_ERROR;
2632 		goto nxge_alloc_rx_mem_fail1;
2633 	}
2634 
2635 	NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
2636 	    "==> nxge_alloc_rx_buf_dma: Allocated for channel %d "
2637 	    "allocated 0x%x requested 0x%x",
2638 	    dma_channel,
2639 	    allocated, total_alloc_size));
2640 
2641 	NXGE_DEBUG_MSG((nxgep, DMA_CTL,
2642 	    " alloc_rx_buf_dma rdc %d allocated %d chunks",
2643 	    dma_channel, i));
2644 	*num_chunks = i;
2645 	*dmap = rx_dmap;
2646 
2647 	goto nxge_alloc_rx_mem_exit;
2648 
2649 nxge_alloc_rx_mem_fail1:
2650 	KMEM_FREE(rx_dmap, sizeof (nxge_dma_common_t) * NXGE_DMA_BLOCK);
2651 
2652 nxge_alloc_rx_mem_exit:
2653 	NXGE_DEBUG_MSG((nxgep, DMA_CTL,
2654 	    "<== nxge_alloc_rx_buf_dma status 0x%08x", status));
2655 
2656 	return (status);
2657 }
2658 
2659 /*ARGSUSED*/
2660 static void
2661 nxge_free_rx_buf_dma(p_nxge_t nxgep, p_nxge_dma_common_t dmap,
2662     uint32_t num_chunks)
2663 {
2664 	int		i;
2665 
2666 	NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
2667 	    "==> nxge_free_rx_buf_dma: # of chunks %d", num_chunks));
2668 
2669 	if (dmap == 0)
2670 		return;
2671 
2672 	for (i = 0; i < num_chunks; i++) {
2673 		NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
2674 		    "==> nxge_free_rx_buf_dma: chunk %d dmap 0x%llx",
2675 		    i, dmap));
2676 		nxge_dma_free_rx_data_buf(dmap++);
2677 	}
2678 
2679 	NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_free_rx_buf_dma"));
2680 }
2681 
2682 /*ARGSUSED*/
2683 static nxge_status_t
2684 nxge_alloc_rx_cntl_dma(p_nxge_t nxgep, uint16_t dma_channel,
2685     p_nxge_dma_common_t *dmap, size_t size)
2686 {
2687 	p_nxge_dma_common_t 	rx_dmap;
2688 	nxge_status_t		status = NXGE_OK;
2689 
2690 	NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_alloc_rx_cntl_dma"));
2691 
2692 	rx_dmap = (p_nxge_dma_common_t)
2693 	    KMEM_ZALLOC(sizeof (nxge_dma_common_t), KM_SLEEP);
2694 
2695 	rx_dmap->contig_alloc_type = B_FALSE;
2696 	rx_dmap->kmem_alloc_type = B_FALSE;
2697 
2698 	status = nxge_dma_mem_alloc(nxgep, nxge_force_dma,
2699 	    &nxge_desc_dma_attr,
2700 	    size,
2701 	    &nxge_dev_desc_dma_acc_attr,
2702 	    DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
2703 	    rx_dmap);
2704 	if (status != NXGE_OK) {
2705 		goto nxge_alloc_rx_cntl_dma_fail1;
2706 	}
2707 
2708 	*dmap = rx_dmap;
2709 	goto nxge_alloc_rx_cntl_dma_exit;
2710 
2711 nxge_alloc_rx_cntl_dma_fail1:
2712 	KMEM_FREE(rx_dmap, sizeof (nxge_dma_common_t));
2713 
2714 nxge_alloc_rx_cntl_dma_exit:
2715 	NXGE_DEBUG_MSG((nxgep, DMA_CTL,
2716 	    "<== nxge_alloc_rx_cntl_dma status 0x%08x", status));
2717 
2718 	return (status);
2719 }
2720 
2721 /*ARGSUSED*/
2722 static void
2723 nxge_free_rx_cntl_dma(p_nxge_t nxgep, p_nxge_dma_common_t dmap)
2724 {
2725 	NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_free_rx_cntl_dma"));
2726 
2727 	if (dmap == 0)
2728 		return;
2729 
2730 	nxge_dma_mem_free(dmap);
2731 
2732 	NXGE_DEBUG_MSG((nxgep, DMA_CTL, "<== nxge_free_rx_cntl_dma"));
2733 }
2734 
2735 typedef struct {
2736 	size_t	tx_size;
2737 	size_t	cr_size;
2738 	size_t	threshhold;
2739 } nxge_tdc_sizes_t;
2740 
2741 static
2742 nxge_status_t
2743 nxge_tdc_sizes(
2744 	nxge_t *nxgep,
2745 	nxge_tdc_sizes_t *sizes)
2746 {
2747 	uint32_t threshhold;	/* The bcopy() threshhold */
2748 	size_t tx_size;		/* Transmit buffer size */
2749 	size_t cr_size;		/* Completion ring size */
2750 
2751 	/*
2752 	 * Assume that each DMA channel will be configured with the
2753 	 * default transmit buffer size for copying transmit data.
2754 	 * (If a packet is bigger than this, it will not be copied.)
2755 	 */
2756 	if (nxgep->niu_type == N2_NIU) {
2757 		threshhold = TX_BCOPY_SIZE;
2758 	} else {
2759 		threshhold = nxge_bcopy_thresh;
2760 	}
2761 	tx_size = nxge_tx_ring_size * threshhold;
2762 
2763 	cr_size = nxge_tx_ring_size * sizeof (tx_desc_t);
2764 	cr_size += sizeof (txdma_mailbox_t);
2765 
2766 #if	defined(sun4v) && defined(NIU_LP_WORKAROUND)
2767 	if (nxgep->niu_type == N2_NIU) {
2768 		if (!ISP2(tx_size)) {
2769 			NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
2770 			    "==> nxge_tdc_sizes: Tx size"
2771 			    " must be power of 2"));
2772 			return (NXGE_ERROR);
2773 		}
2774 
2775 		if (tx_size > (1 << 22)) {
2776 			NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
2777 			    "==> nxge_tdc_sizes: Tx size"
2778 			    " limited to 4M"));
2779 			return (NXGE_ERROR);
2780 		}
2781 
2782 		if (cr_size < 0x2000)
2783 			cr_size = 0x2000;
2784 	}
2785 #endif
2786 
2787 	sizes->threshhold = threshhold;
2788 	sizes->tx_size = tx_size;
2789 	sizes->cr_size = cr_size;
2790 
2791 	return (NXGE_OK);
2792 }
2793 /*
2794  * nxge_alloc_txb
2795  *
2796  *	Allocate buffers for an TDC.
2797  *
2798  * Arguments:
2799  * 	nxgep
2800  * 	channel	The channel to map into our kernel space.
2801  *
2802  * Notes:
2803  *
2804  * NPI function calls:
2805  *
2806  * NXGE function calls:
2807  *
2808  * Registers accessed:
2809  *
2810  * Context:
2811  *
2812  * Taking apart:
2813  *
2814  * Open questions:
2815  *
2816  */
2817 nxge_status_t
2818 nxge_alloc_txb(
2819 	p_nxge_t nxgep,
2820 	int channel)
2821 {
2822 	nxge_dma_common_t	**dma_buf_p;
2823 	nxge_dma_common_t	**dma_cntl_p;
2824 	uint32_t 		*num_chunks;
2825 	nxge_status_t		status = NXGE_OK;
2826 
2827 	nxge_tdc_sizes_t	sizes;
2828 
2829 	NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_alloc_tbb"));
2830 
2831 	if (nxge_tdc_sizes(nxgep, &sizes) != NXGE_OK)
2832 		return (NXGE_ERROR);
2833 
2834 	/*
2835 	 * Allocate memory for transmit buffers and descriptor rings.
2836 	 * Replace these allocation functions with the interface functions
2837 	 * provided by the partition manager Real Soon Now.
2838 	 */
2839 	dma_buf_p = &nxgep->tx_buf_pool_p->dma_buf_pool_p[channel];
2840 	num_chunks = &nxgep->tx_buf_pool_p->num_chunks[channel];
2841 
2842 	dma_cntl_p = &nxgep->tx_cntl_pool_p->dma_buf_pool_p[channel];
2843 
2844 	/*
2845 	 * Allocate memory for transmit buffers and descriptor rings.
2846 	 * Replace allocation functions with interface functions provided
2847 	 * by the partition manager when it is available.
2848 	 *
2849 	 * Allocate memory for the transmit buffer pool.
2850 	 */
2851 	NXGE_DEBUG_MSG((nxgep, DMA_CTL,
2852 	    "sizes: tx: %ld, cr:%ld, th:%ld",
2853 	    sizes.tx_size, sizes.cr_size, sizes.threshhold));
2854 
2855 	*num_chunks = 0;
2856 	status = nxge_alloc_tx_buf_dma(nxgep, channel, dma_buf_p,
2857 	    sizes.tx_size, sizes.threshhold, num_chunks);
2858 	if (status != NXGE_OK) {
2859 		cmn_err(CE_NOTE, "nxge_alloc_tx_buf_dma failed!");
2860 		return (status);
2861 	}
2862 
2863 	/*
2864 	 * Allocate memory for descriptor rings and mailbox.
2865 	 */
2866 	status = nxge_alloc_tx_cntl_dma(nxgep, channel, dma_cntl_p,
2867 	    sizes.cr_size);
2868 	if (status != NXGE_OK) {
2869 		nxge_free_tx_buf_dma(nxgep, *dma_buf_p, *num_chunks);
2870 		cmn_err(CE_NOTE, "nxge_alloc_tx_cntl_dma failed!");
2871 		return (status);
2872 	}
2873 
2874 	return (NXGE_OK);
2875 }
2876 
2877 void
2878 nxge_free_txb(
2879 	p_nxge_t nxgep,
2880 	int channel)
2881 {
2882 	nxge_dma_common_t	*data;
2883 	nxge_dma_common_t	*control;
2884 	uint32_t 		num_chunks;
2885 
2886 	NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_free_txb"));
2887 
2888 	data = nxgep->tx_buf_pool_p->dma_buf_pool_p[channel];
2889 	num_chunks = nxgep->tx_buf_pool_p->num_chunks[channel];
2890 	nxge_free_tx_buf_dma(nxgep, data, num_chunks);
2891 
2892 	nxgep->tx_buf_pool_p->dma_buf_pool_p[channel] = 0;
2893 	nxgep->tx_buf_pool_p->num_chunks[channel] = 0;
2894 
2895 	control = nxgep->tx_cntl_pool_p->dma_buf_pool_p[channel];
2896 	nxge_free_tx_cntl_dma(nxgep, control);
2897 
2898 	nxgep->tx_cntl_pool_p->dma_buf_pool_p[channel] = 0;
2899 
2900 	KMEM_FREE(data, sizeof (nxge_dma_common_t) * NXGE_DMA_BLOCK);
2901 	KMEM_FREE(control, sizeof (nxge_dma_common_t));
2902 
2903 	NXGE_DEBUG_MSG((nxgep, DMA_CTL, "<== nxge_free_txb"));
2904 }
2905 
2906 /*
2907  * nxge_alloc_tx_mem_pool
2908  *
2909  *	This function allocates all of the per-port TDC control data structures.
2910  *	The per-channel (TDC) data structures are allocated when needed.
2911  *
2912  * Arguments:
2913  * 	nxgep
2914  *
2915  * Notes:
2916  *
2917  * Context:
2918  *	Any domain
2919  */
2920 nxge_status_t
2921 nxge_alloc_tx_mem_pool(p_nxge_t nxgep)
2922 {
2923 	nxge_hw_pt_cfg_t	*p_cfgp;
2924 	nxge_dma_pool_t		*dma_poolp;
2925 	nxge_dma_common_t	**dma_buf_p;
2926 	nxge_dma_pool_t		*dma_cntl_poolp;
2927 	nxge_dma_common_t	**dma_cntl_p;
2928 	uint32_t		*num_chunks; /* per dma */
2929 	int			tdc_max;
2930 
2931 	NXGE_DEBUG_MSG((nxgep, MEM_CTL, "==> nxge_alloc_tx_mem_pool"));
2932 
2933 	p_cfgp = &nxgep->pt_config.hw_config;
2934 	tdc_max = NXGE_MAX_TDCS;
2935 
2936 	/*
2937 	 * Allocate memory for each transmit DMA channel.
2938 	 */
2939 	dma_poolp = (p_nxge_dma_pool_t)KMEM_ZALLOC(sizeof (nxge_dma_pool_t),
2940 	    KM_SLEEP);
2941 	dma_buf_p = (p_nxge_dma_common_t *)KMEM_ZALLOC(
2942 	    sizeof (p_nxge_dma_common_t) * tdc_max, KM_SLEEP);
2943 
2944 	dma_cntl_poolp = (p_nxge_dma_pool_t)
2945 	    KMEM_ZALLOC(sizeof (nxge_dma_pool_t), KM_SLEEP);
2946 	dma_cntl_p = (p_nxge_dma_common_t *)KMEM_ZALLOC(
2947 	    sizeof (p_nxge_dma_common_t) * tdc_max, KM_SLEEP);
2948 
2949 	if (nxge_tx_ring_size > TDC_DEFAULT_MAX) {
2950 		NXGE_DEBUG_MSG((nxgep, MEM_CTL,
2951 		    "nxge_alloc_tx_mem_pool: TDC too high %d, "
2952 		    "set to default %d",
2953 		    nxge_tx_ring_size, TDC_DEFAULT_MAX));
2954 		nxge_tx_ring_size = TDC_DEFAULT_MAX;
2955 	}
2956 
2957 #if	defined(sun4v) && defined(NIU_LP_WORKAROUND)
2958 	/*
2959 	 * N2/NIU has limitation on the descriptor sizes (contiguous
2960 	 * memory allocation on data buffers to 4M (contig_mem_alloc)
2961 	 * and little endian for control buffers (must use the ddi/dki mem alloc
2962 	 * function). The transmit ring is limited to 8K (includes the
2963 	 * mailbox).
2964 	 */
2965 	if (nxgep->niu_type == N2_NIU) {
2966 		if ((nxge_tx_ring_size > NXGE_NIU_CONTIG_TX_MAX) ||
2967 		    (!ISP2(nxge_tx_ring_size))) {
2968 			nxge_tx_ring_size = NXGE_NIU_CONTIG_TX_MAX;
2969 		}
2970 	}
2971 #endif
2972 
2973 	nxgep->nxge_port_tx_ring_size = nxge_tx_ring_size;
2974 
2975 	num_chunks = (uint32_t *)KMEM_ZALLOC(
2976 	    sizeof (uint32_t) * tdc_max, KM_SLEEP);
2977 
2978 	dma_poolp->ndmas = p_cfgp->tdc.owned;
2979 	dma_poolp->num_chunks = num_chunks;
2980 	dma_poolp->dma_buf_pool_p = dma_buf_p;
2981 	nxgep->tx_buf_pool_p = dma_poolp;
2982 
2983 	dma_poolp->buf_allocated = B_TRUE;
2984 
2985 	dma_cntl_poolp->ndmas = p_cfgp->tdc.owned;
2986 	dma_cntl_poolp->dma_buf_pool_p = dma_cntl_p;
2987 	nxgep->tx_cntl_pool_p = dma_cntl_poolp;
2988 
2989 	dma_cntl_poolp->buf_allocated = B_TRUE;
2990 
2991 	nxgep->tx_rings =
2992 	    KMEM_ZALLOC(sizeof (tx_rings_t), KM_SLEEP);
2993 	nxgep->tx_rings->rings =
2994 	    KMEM_ZALLOC(sizeof (p_tx_ring_t) * tdc_max, KM_SLEEP);
2995 	nxgep->tx_mbox_areas_p =
2996 	    KMEM_ZALLOC(sizeof (tx_mbox_areas_t), KM_SLEEP);
2997 	nxgep->tx_mbox_areas_p->txmbox_areas_p =
2998 	    KMEM_ZALLOC(sizeof (p_tx_mbox_t) * tdc_max, KM_SLEEP);
2999 
3000 	nxgep->tx_rings->ndmas = p_cfgp->tdc.owned;
3001 
3002 	NXGE_DEBUG_MSG((nxgep, MEM_CTL,
3003 	    "==> nxge_alloc_tx_mem_pool: ndmas %d poolp->ndmas %d",
3004 	    tdc_max, dma_poolp->ndmas));
3005 
3006 	return (NXGE_OK);
3007 }
3008 
3009 nxge_status_t
3010 nxge_alloc_tx_buf_dma(p_nxge_t nxgep, uint16_t dma_channel,
3011     p_nxge_dma_common_t *dmap, size_t alloc_size,
3012     size_t block_size, uint32_t *num_chunks)
3013 {
3014 	p_nxge_dma_common_t 	tx_dmap;
3015 	nxge_status_t		status = NXGE_OK;
3016 	size_t			total_alloc_size;
3017 	size_t			allocated = 0;
3018 	int			i, size_index, array_size;
3019 
3020 	NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_alloc_tx_buf_dma"));
3021 
3022 	tx_dmap = (p_nxge_dma_common_t)
3023 	    KMEM_ZALLOC(sizeof (nxge_dma_common_t) * NXGE_DMA_BLOCK,
3024 	    KM_SLEEP);
3025 
3026 	total_alloc_size = alloc_size;
3027 	i = 0;
3028 	size_index = 0;
3029 	array_size =  sizeof (alloc_sizes) /  sizeof (size_t);
3030 	while ((alloc_sizes[size_index] < alloc_size) &&
3031 	    (size_index < array_size))
3032 		size_index++;
3033 	if (size_index >= array_size) {
3034 		size_index = array_size - 1;
3035 	}
3036 
3037 	while ((allocated < total_alloc_size) &&
3038 	    (size_index >= 0) && (i < NXGE_DMA_BLOCK)) {
3039 
3040 		tx_dmap[i].dma_chunk_index = i;
3041 		tx_dmap[i].block_size = block_size;
3042 		tx_dmap[i].alength = alloc_sizes[size_index];
3043 		tx_dmap[i].orig_alength = tx_dmap[i].alength;
3044 		tx_dmap[i].nblocks = alloc_sizes[size_index] / block_size;
3045 		tx_dmap[i].dma_channel = dma_channel;
3046 		tx_dmap[i].contig_alloc_type = B_FALSE;
3047 		tx_dmap[i].kmem_alloc_type = B_FALSE;
3048 
3049 		/*
3050 		 * N2/NIU: data buffers must be contiguous as the driver
3051 		 *	   needs to call Hypervisor api to set up
3052 		 *	   logical pages.
3053 		 */
3054 		if ((nxgep->niu_type == N2_NIU) && (NXGE_DMA_BLOCK == 1)) {
3055 			tx_dmap[i].contig_alloc_type = B_TRUE;
3056 		}
3057 
3058 		status = nxge_dma_mem_alloc(nxgep, nxge_force_dma,
3059 		    &nxge_tx_dma_attr,
3060 		    tx_dmap[i].alength,
3061 		    &nxge_dev_buf_dma_acc_attr,
3062 		    DDI_DMA_WRITE | DDI_DMA_STREAMING,
3063 		    (p_nxge_dma_common_t)(&tx_dmap[i]));
3064 		if (status != NXGE_OK) {
3065 			size_index--;
3066 		} else {
3067 			i++;
3068 			allocated += alloc_sizes[size_index];
3069 		}
3070 	}
3071 
3072 	if (allocated < total_alloc_size) {
3073 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
3074 		    "==> nxge_alloc_tx_buf_dma: not enough channel %d: "
3075 		    "allocated 0x%x requested 0x%x",
3076 		    dma_channel,
3077 		    allocated, total_alloc_size));
3078 		status = NXGE_ERROR;
3079 		goto nxge_alloc_tx_mem_fail1;
3080 	}
3081 
3082 	NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
3083 	    "==> nxge_alloc_tx_buf_dma: Allocated for channel %d: "
3084 	    "allocated 0x%x requested 0x%x",
3085 	    dma_channel,
3086 	    allocated, total_alloc_size));
3087 
3088 	*num_chunks = i;
3089 	*dmap = tx_dmap;
3090 	NXGE_DEBUG_MSG((nxgep, DMA_CTL,
3091 	    "==> nxge_alloc_tx_buf_dma dmap 0x%016llx num chunks %d",
3092 	    *dmap, i));
3093 	goto nxge_alloc_tx_mem_exit;
3094 
3095 nxge_alloc_tx_mem_fail1:
3096 	KMEM_FREE(tx_dmap, sizeof (nxge_dma_common_t) * NXGE_DMA_BLOCK);
3097 
3098 nxge_alloc_tx_mem_exit:
3099 	NXGE_DEBUG_MSG((nxgep, DMA_CTL,
3100 	    "<== nxge_alloc_tx_buf_dma status 0x%08x", status));
3101 
3102 	return (status);
3103 }
3104 
3105 /*ARGSUSED*/
3106 static void
3107 nxge_free_tx_buf_dma(p_nxge_t nxgep, p_nxge_dma_common_t dmap,
3108     uint32_t num_chunks)
3109 {
3110 	int		i;
3111 
3112 	NXGE_DEBUG_MSG((nxgep, MEM_CTL, "==> nxge_free_tx_buf_dma"));
3113 
3114 	if (dmap == 0)
3115 		return;
3116 
3117 	for (i = 0; i < num_chunks; i++) {
3118 		nxge_dma_mem_free(dmap++);
3119 	}
3120 
3121 	NXGE_DEBUG_MSG((nxgep, MEM_CTL, "<== nxge_free_tx_buf_dma"));
3122 }
3123 
3124 /*ARGSUSED*/
3125 nxge_status_t
3126 nxge_alloc_tx_cntl_dma(p_nxge_t nxgep, uint16_t dma_channel,
3127     p_nxge_dma_common_t *dmap, size_t size)
3128 {
3129 	p_nxge_dma_common_t 	tx_dmap;
3130 	nxge_status_t		status = NXGE_OK;
3131 
3132 	NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_alloc_tx_cntl_dma"));
3133 	tx_dmap = (p_nxge_dma_common_t)
3134 	    KMEM_ZALLOC(sizeof (nxge_dma_common_t), KM_SLEEP);
3135 
3136 	tx_dmap->contig_alloc_type = B_FALSE;
3137 	tx_dmap->kmem_alloc_type = B_FALSE;
3138 
3139 	status = nxge_dma_mem_alloc(nxgep, nxge_force_dma,
3140 	    &nxge_desc_dma_attr,
3141 	    size,
3142 	    &nxge_dev_desc_dma_acc_attr,
3143 	    DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
3144 	    tx_dmap);
3145 	if (status != NXGE_OK) {
3146 		goto nxge_alloc_tx_cntl_dma_fail1;
3147 	}
3148 
3149 	*dmap = tx_dmap;
3150 	goto nxge_alloc_tx_cntl_dma_exit;
3151 
3152 nxge_alloc_tx_cntl_dma_fail1:
3153 	KMEM_FREE(tx_dmap, sizeof (nxge_dma_common_t));
3154 
3155 nxge_alloc_tx_cntl_dma_exit:
3156 	NXGE_DEBUG_MSG((nxgep, DMA_CTL,
3157 	    "<== nxge_alloc_tx_cntl_dma status 0x%08x", status));
3158 
3159 	return (status);
3160 }
3161 
3162 /*ARGSUSED*/
3163 static void
3164 nxge_free_tx_cntl_dma(p_nxge_t nxgep, p_nxge_dma_common_t dmap)
3165 {
3166 	NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_free_tx_cntl_dma"));
3167 
3168 	if (dmap == 0)
3169 		return;
3170 
3171 	nxge_dma_mem_free(dmap);
3172 
3173 	NXGE_DEBUG_MSG((nxgep, DMA_CTL, "<== nxge_free_tx_cntl_dma"));
3174 }
3175 
3176 /*
3177  * nxge_free_tx_mem_pool
3178  *
3179  *	This function frees all of the per-port TDC control data structures.
3180  *	The per-channel (TDC) data structures are freed when the channel
3181  *	is stopped.
3182  *
3183  * Arguments:
3184  * 	nxgep
3185  *
3186  * Notes:
3187  *
3188  * Context:
3189  *	Any domain
3190  */
3191 static void
3192 nxge_free_tx_mem_pool(p_nxge_t nxgep)
3193 {
3194 	int tdc_max = NXGE_MAX_TDCS;
3195 
3196 	NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_free_tx_mem_pool"));
3197 
3198 	if (!nxgep->tx_buf_pool_p || !nxgep->tx_buf_pool_p->buf_allocated) {
3199 		NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
3200 		    "<== nxge_free_tx_mem_pool "
3201 		    "(null tx buf pool or buf not allocated"));
3202 		return;
3203 	}
3204 	if (!nxgep->tx_cntl_pool_p || !nxgep->tx_cntl_pool_p->buf_allocated) {
3205 		NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
3206 		    "<== nxge_free_tx_mem_pool "
3207 		    "(null tx cntl buf pool or cntl buf not allocated"));
3208 		return;
3209 	}
3210 
3211 	/* 1. Free the mailboxes. */
3212 	KMEM_FREE(nxgep->tx_mbox_areas_p->txmbox_areas_p,
3213 	    sizeof (p_tx_mbox_t) * tdc_max);
3214 	KMEM_FREE(nxgep->tx_mbox_areas_p, sizeof (tx_mbox_areas_t));
3215 
3216 	nxgep->tx_mbox_areas_p = 0;
3217 
3218 	/* 2. Free the transmit ring arrays. */
3219 	KMEM_FREE(nxgep->tx_rings->rings,
3220 	    sizeof (p_tx_ring_t) * tdc_max);
3221 	KMEM_FREE(nxgep->tx_rings, sizeof (tx_rings_t));
3222 
3223 	nxgep->tx_rings = 0;
3224 
3225 	/* 3. Free the completion ring data structures. */
3226 	KMEM_FREE(nxgep->tx_cntl_pool_p->dma_buf_pool_p,
3227 	    sizeof (p_nxge_dma_common_t) * tdc_max);
3228 	KMEM_FREE(nxgep->tx_cntl_pool_p, sizeof (nxge_dma_pool_t));
3229 
3230 	nxgep->tx_cntl_pool_p = 0;
3231 
3232 	/* 4. Free the data ring data structures. */
3233 	KMEM_FREE(nxgep->tx_buf_pool_p->num_chunks,
3234 	    sizeof (uint32_t) * tdc_max);
3235 	KMEM_FREE(nxgep->tx_buf_pool_p->dma_buf_pool_p,
3236 	    sizeof (p_nxge_dma_common_t) * tdc_max);
3237 	KMEM_FREE(nxgep->tx_buf_pool_p, sizeof (nxge_dma_pool_t));
3238 
3239 	nxgep->tx_buf_pool_p = 0;
3240 
3241 	NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "<== nxge_free_tx_mem_pool"));
3242 }
3243 
3244 /*ARGSUSED*/
3245 static nxge_status_t
3246 nxge_dma_mem_alloc(p_nxge_t nxgep, dma_method_t method,
3247 	struct ddi_dma_attr *dma_attrp,
3248 	size_t length, ddi_device_acc_attr_t *acc_attr_p, uint_t xfer_flags,
3249 	p_nxge_dma_common_t dma_p)
3250 {
3251 	caddr_t 		kaddrp;
3252 	int			ddi_status = DDI_SUCCESS;
3253 	boolean_t		contig_alloc_type;
3254 	boolean_t		kmem_alloc_type;
3255 
3256 	contig_alloc_type = dma_p->contig_alloc_type;
3257 
3258 	if (contig_alloc_type && (nxgep->niu_type != N2_NIU)) {
3259 		/*
3260 		 * contig_alloc_type for contiguous memory only allowed
3261 		 * for N2/NIU.
3262 		 */
3263 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
3264 		    "nxge_dma_mem_alloc: alloc type not allowed (%d)",
3265 		    dma_p->contig_alloc_type));
3266 		return (NXGE_ERROR | NXGE_DDI_FAILED);
3267 	}
3268 
3269 	dma_p->dma_handle = NULL;
3270 	dma_p->acc_handle = NULL;
3271 	dma_p->kaddrp = dma_p->last_kaddrp = NULL;
3272 	dma_p->first_ioaddr_pp = dma_p->last_ioaddr_pp = NULL;
3273 	ddi_status = ddi_dma_alloc_handle(nxgep->dip, dma_attrp,
3274 	    DDI_DMA_DONTWAIT, NULL, &dma_p->dma_handle);
3275 	if (ddi_status != DDI_SUCCESS) {
3276 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
3277 		    "nxge_dma_mem_alloc:ddi_dma_alloc_handle failed."));
3278 		return (NXGE_ERROR | NXGE_DDI_FAILED);
3279 	}
3280 
3281 	kmem_alloc_type = dma_p->kmem_alloc_type;
3282 
3283 	switch (contig_alloc_type) {
3284 	case B_FALSE:
3285 		switch (kmem_alloc_type) {
3286 		case B_FALSE:
3287 			ddi_status = ddi_dma_mem_alloc(dma_p->dma_handle,
3288 			    length,
3289 			    acc_attr_p,
3290 			    xfer_flags,
3291 			    DDI_DMA_DONTWAIT, 0, &kaddrp, &dma_p->alength,
3292 			    &dma_p->acc_handle);
3293 			if (ddi_status != DDI_SUCCESS) {
3294 				NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
3295 				    "nxge_dma_mem_alloc: "
3296 				    "ddi_dma_mem_alloc failed"));
3297 				ddi_dma_free_handle(&dma_p->dma_handle);
3298 				dma_p->dma_handle = NULL;
3299 				return (NXGE_ERROR | NXGE_DDI_FAILED);
3300 			}
3301 			if (dma_p->alength < length) {
3302 				NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
3303 				    "nxge_dma_mem_alloc:di_dma_mem_alloc "
3304 				    "< length."));
3305 				ddi_dma_mem_free(&dma_p->acc_handle);
3306 				ddi_dma_free_handle(&dma_p->dma_handle);
3307 				dma_p->acc_handle = NULL;
3308 				dma_p->dma_handle = NULL;
3309 				return (NXGE_ERROR);
3310 			}
3311 
3312 			ddi_status = ddi_dma_addr_bind_handle(dma_p->dma_handle,
3313 			    NULL,
3314 			    kaddrp, dma_p->alength, xfer_flags,
3315 			    DDI_DMA_DONTWAIT,
3316 			    0, &dma_p->dma_cookie, &dma_p->ncookies);
3317 			if (ddi_status != DDI_DMA_MAPPED) {
3318 				NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
3319 				    "nxge_dma_mem_alloc: ddi_dma_addr_bind "
3320 				    "failed "
3321 				    "(staus 0x%x ncookies %d.)", ddi_status,
3322 				    dma_p->ncookies));
3323 				if (dma_p->acc_handle) {
3324 					ddi_dma_mem_free(&dma_p->acc_handle);
3325 					dma_p->acc_handle = NULL;
3326 				}
3327 				ddi_dma_free_handle(&dma_p->dma_handle);
3328 				dma_p->dma_handle = NULL;
3329 				return (NXGE_ERROR | NXGE_DDI_FAILED);
3330 			}
3331 
3332 			if (dma_p->ncookies != 1) {
3333 				NXGE_DEBUG_MSG((nxgep, DMA_CTL,
3334 				    "nxge_dma_mem_alloc:ddi_dma_addr_bind "
3335 				    "> 1 cookie"
3336 				    "(staus 0x%x ncookies %d.)", ddi_status,
3337 				    dma_p->ncookies));
3338 				if (dma_p->acc_handle) {
3339 					ddi_dma_mem_free(&dma_p->acc_handle);
3340 					dma_p->acc_handle = NULL;
3341 				}
3342 				(void) ddi_dma_unbind_handle(dma_p->dma_handle);
3343 				ddi_dma_free_handle(&dma_p->dma_handle);
3344 				dma_p->dma_handle = NULL;
3345 				return (NXGE_ERROR);
3346 			}
3347 			break;
3348 
3349 		case B_TRUE:
3350 			kaddrp = KMEM_ALLOC(length, KM_NOSLEEP);
3351 			if (kaddrp == NULL) {
3352 				NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
3353 				    "nxge_dma_mem_alloc:ddi_dma_mem_alloc "
3354 				    "kmem alloc failed"));
3355 				return (NXGE_ERROR);
3356 			}
3357 
3358 			dma_p->alength = length;
3359 			ddi_status = ddi_dma_addr_bind_handle(dma_p->dma_handle,
3360 			    NULL, kaddrp, dma_p->alength, xfer_flags,
3361 			    DDI_DMA_DONTWAIT, 0,
3362 			    &dma_p->dma_cookie, &dma_p->ncookies);
3363 			if (ddi_status != DDI_DMA_MAPPED) {
3364 				NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
3365 				    "nxge_dma_mem_alloc:ddi_dma_addr_bind: "
3366 				    "(kmem_alloc) failed kaddrp $%p length %d "
3367 				    "(staus 0x%x (%d) ncookies %d.)",
3368 				    kaddrp, length,
3369 				    ddi_status, ddi_status, dma_p->ncookies));
3370 				KMEM_FREE(kaddrp, length);
3371 				dma_p->acc_handle = NULL;
3372 				ddi_dma_free_handle(&dma_p->dma_handle);
3373 				dma_p->dma_handle = NULL;
3374 				dma_p->kaddrp = NULL;
3375 				return (NXGE_ERROR | NXGE_DDI_FAILED);
3376 			}
3377 
3378 			if (dma_p->ncookies != 1) {
3379 				NXGE_DEBUG_MSG((nxgep, DMA_CTL,
3380 				    "nxge_dma_mem_alloc:ddi_dma_addr_bind "
3381 				    "(kmem_alloc) > 1 cookie"
3382 				    "(staus 0x%x ncookies %d.)", ddi_status,
3383 				    dma_p->ncookies));
3384 				KMEM_FREE(kaddrp, length);
3385 				dma_p->acc_handle = NULL;
3386 				(void) ddi_dma_unbind_handle(dma_p->dma_handle);
3387 				ddi_dma_free_handle(&dma_p->dma_handle);
3388 				dma_p->dma_handle = NULL;
3389 				dma_p->kaddrp = NULL;
3390 				return (NXGE_ERROR);
3391 			}
3392 
3393 			dma_p->kaddrp = kaddrp;
3394 
3395 			NXGE_DEBUG_MSG((nxgep, NXGE_ERR_CTL,
3396 			    "nxge_dma_mem_alloc: kmem_alloc dmap $%p "
3397 			    "kaddr $%p alength %d",
3398 			    dma_p,
3399 			    kaddrp,
3400 			    dma_p->alength));
3401 			break;
3402 		}
3403 		break;
3404 
3405 #if	defined(sun4v) && defined(NIU_LP_WORKAROUND)
3406 	case B_TRUE:
3407 		kaddrp = (caddr_t)contig_mem_alloc(length);
3408 		if (kaddrp == NULL) {
3409 			NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
3410 			    "nxge_dma_mem_alloc:contig_mem_alloc failed."));
3411 			ddi_dma_free_handle(&dma_p->dma_handle);
3412 			return (NXGE_ERROR | NXGE_DDI_FAILED);
3413 		}
3414 
3415 		dma_p->alength = length;
3416 		ddi_status = ddi_dma_addr_bind_handle(dma_p->dma_handle, NULL,
3417 		    kaddrp, dma_p->alength, xfer_flags, DDI_DMA_DONTWAIT, 0,
3418 		    &dma_p->dma_cookie, &dma_p->ncookies);
3419 		if (ddi_status != DDI_DMA_MAPPED) {
3420 			NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
3421 			    "nxge_dma_mem_alloc:di_dma_addr_bind failed "
3422 			    "(status 0x%x ncookies %d.)", ddi_status,
3423 			    dma_p->ncookies));
3424 
3425 			NXGE_DEBUG_MSG((nxgep, DMA_CTL,
3426 			    "==> nxge_dma_mem_alloc: (not mapped)"
3427 			    "length %lu (0x%x) "
3428 			    "free contig kaddrp $%p "
3429 			    "va_to_pa $%p",
3430 			    length, length,
3431 			    kaddrp,
3432 			    va_to_pa(kaddrp)));
3433 
3434 
3435 			contig_mem_free((void *)kaddrp, length);
3436 			ddi_dma_free_handle(&dma_p->dma_handle);
3437 
3438 			dma_p->dma_handle = NULL;
3439 			dma_p->acc_handle = NULL;
3440 			dma_p->alength = NULL;
3441 			dma_p->kaddrp = NULL;
3442 
3443 			return (NXGE_ERROR | NXGE_DDI_FAILED);
3444 		}
3445 
3446 		if (dma_p->ncookies != 1 ||
3447 		    (dma_p->dma_cookie.dmac_laddress == NULL)) {
3448 			NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
3449 			    "nxge_dma_mem_alloc:di_dma_addr_bind > 1 "
3450 			    "cookie or "
3451 			    "dmac_laddress is NULL $%p size %d "
3452 			    " (status 0x%x ncookies %d.)",
3453 			    ddi_status,
3454 			    dma_p->dma_cookie.dmac_laddress,
3455 			    dma_p->dma_cookie.dmac_size,
3456 			    dma_p->ncookies));
3457 
3458 			contig_mem_free((void *)kaddrp, length);
3459 			(void) ddi_dma_unbind_handle(dma_p->dma_handle);
3460 			ddi_dma_free_handle(&dma_p->dma_handle);
3461 
3462 			dma_p->alength = 0;
3463 			dma_p->dma_handle = NULL;
3464 			dma_p->acc_handle = NULL;
3465 			dma_p->kaddrp = NULL;
3466 
3467 			return (NXGE_ERROR | NXGE_DDI_FAILED);
3468 		}
3469 		break;
3470 
3471 #else
3472 	case B_TRUE:
3473 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
3474 		    "nxge_dma_mem_alloc: invalid alloc type for !sun4v"));
3475 		return (NXGE_ERROR | NXGE_DDI_FAILED);
3476 #endif
3477 	}
3478 
3479 	dma_p->kaddrp = kaddrp;
3480 	dma_p->last_kaddrp = (unsigned char *)kaddrp +
3481 	    dma_p->alength - RXBUF_64B_ALIGNED;
3482 #if defined(__i386)
3483 	dma_p->ioaddr_pp =
3484 	    (unsigned char *)(uint32_t)dma_p->dma_cookie.dmac_laddress;
3485 #else
3486 	dma_p->ioaddr_pp = (unsigned char *)dma_p->dma_cookie.dmac_laddress;
3487 #endif
3488 	dma_p->last_ioaddr_pp =
3489 #if defined(__i386)
3490 	    (unsigned char *)(uint32_t)dma_p->dma_cookie.dmac_laddress +
3491 #else
3492 	    (unsigned char *)dma_p->dma_cookie.dmac_laddress +
3493 #endif
3494 	    dma_p->alength - RXBUF_64B_ALIGNED;
3495 
3496 	NPI_DMA_ACC_HANDLE_SET(dma_p, dma_p->acc_handle);
3497 
3498 #if	defined(sun4v) && defined(NIU_LP_WORKAROUND)
3499 	dma_p->orig_ioaddr_pp =
3500 	    (unsigned char *)dma_p->dma_cookie.dmac_laddress;
3501 	dma_p->orig_alength = length;
3502 	dma_p->orig_kaddrp = kaddrp;
3503 	dma_p->orig_vatopa = (uint64_t)va_to_pa(kaddrp);
3504 #endif
3505 
3506 	NXGE_DEBUG_MSG((nxgep, DMA_CTL, "<== nxge_dma_mem_alloc: "
3507 	    "dma buffer allocated: dma_p $%p "
3508 	    "return dmac_ladress from cookie $%p cookie dmac_size %d "
3509 	    "dma_p->ioaddr_p $%p "
3510 	    "dma_p->orig_ioaddr_p $%p "
3511 	    "orig_vatopa $%p "
3512 	    "alength %d (0x%x) "
3513 	    "kaddrp $%p "
3514 	    "length %d (0x%x)",
3515 	    dma_p,
3516 	    dma_p->dma_cookie.dmac_laddress, dma_p->dma_cookie.dmac_size,
3517 	    dma_p->ioaddr_pp,
3518 	    dma_p->orig_ioaddr_pp,
3519 	    dma_p->orig_vatopa,
3520 	    dma_p->alength, dma_p->alength,
3521 	    kaddrp,
3522 	    length, length));
3523 
3524 	return (NXGE_OK);
3525 }
3526 
3527 static void
3528 nxge_dma_mem_free(p_nxge_dma_common_t dma_p)
3529 {
3530 	if (dma_p->dma_handle != NULL) {
3531 		if (dma_p->ncookies) {
3532 			(void) ddi_dma_unbind_handle(dma_p->dma_handle);
3533 			dma_p->ncookies = 0;
3534 		}
3535 		ddi_dma_free_handle(&dma_p->dma_handle);
3536 		dma_p->dma_handle = NULL;
3537 	}
3538 
3539 	if (dma_p->acc_handle != NULL) {
3540 		ddi_dma_mem_free(&dma_p->acc_handle);
3541 		dma_p->acc_handle = NULL;
3542 		NPI_DMA_ACC_HANDLE_SET(dma_p, NULL);
3543 	}
3544 
3545 #if	defined(sun4v) && defined(NIU_LP_WORKAROUND)
3546 	if (dma_p->contig_alloc_type &&
3547 	    dma_p->orig_kaddrp && dma_p->orig_alength) {
3548 		NXGE_DEBUG_MSG((NULL, DMA_CTL, "nxge_dma_mem_free: "
3549 		    "kaddrp $%p (orig_kaddrp $%p)"
3550 		    "mem type %d ",
3551 		    "orig_alength %d "
3552 		    "alength 0x%x (%d)",
3553 		    dma_p->kaddrp,
3554 		    dma_p->orig_kaddrp,
3555 		    dma_p->contig_alloc_type,
3556 		    dma_p->orig_alength,
3557 		    dma_p->alength, dma_p->alength));
3558 
3559 		contig_mem_free(dma_p->orig_kaddrp, dma_p->orig_alength);
3560 		dma_p->orig_alength = NULL;
3561 		dma_p->orig_kaddrp = NULL;
3562 		dma_p->contig_alloc_type = B_FALSE;
3563 	}
3564 #endif
3565 	dma_p->kaddrp = NULL;
3566 	dma_p->alength = NULL;
3567 }
3568 
3569 static void
3570 nxge_dma_free_rx_data_buf(p_nxge_dma_common_t dma_p)
3571 {
3572 	uint64_t kaddr;
3573 	uint32_t buf_size;
3574 
3575 	NXGE_DEBUG_MSG((NULL, DMA_CTL, "==> nxge_dma_free_rx_data_buf"));
3576 
3577 	if (dma_p->dma_handle != NULL) {
3578 		if (dma_p->ncookies) {
3579 			(void) ddi_dma_unbind_handle(dma_p->dma_handle);
3580 			dma_p->ncookies = 0;
3581 		}
3582 		ddi_dma_free_handle(&dma_p->dma_handle);
3583 		dma_p->dma_handle = NULL;
3584 	}
3585 
3586 	if (dma_p->acc_handle != NULL) {
3587 		ddi_dma_mem_free(&dma_p->acc_handle);
3588 		dma_p->acc_handle = NULL;
3589 		NPI_DMA_ACC_HANDLE_SET(dma_p, NULL);
3590 	}
3591 
3592 	NXGE_DEBUG_MSG((NULL, DMA_CTL,
3593 	    "==> nxge_dma_free_rx_data_buf: dmap $%p buf_alloc_state %d",
3594 	    dma_p,
3595 	    dma_p->buf_alloc_state));
3596 
3597 	if (!(dma_p->buf_alloc_state & BUF_ALLOCATED_WAIT_FREE)) {
3598 		NXGE_DEBUG_MSG((NULL, DMA_CTL,
3599 		    "<== nxge_dma_free_rx_data_buf: "
3600 		    "outstanding data buffers"));
3601 		return;
3602 	}
3603 
3604 #if	defined(sun4v) && defined(NIU_LP_WORKAROUND)
3605 	if (dma_p->contig_alloc_type &&
3606 	    dma_p->orig_kaddrp && dma_p->orig_alength) {
3607 		NXGE_DEBUG_MSG((NULL, DMA_CTL, "nxge_dma_free_rx_data_buf: "
3608 		    "kaddrp $%p (orig_kaddrp $%p)"
3609 		    "mem type %d ",
3610 		    "orig_alength %d "
3611 		    "alength 0x%x (%d)",
3612 		    dma_p->kaddrp,
3613 		    dma_p->orig_kaddrp,
3614 		    dma_p->contig_alloc_type,
3615 		    dma_p->orig_alength,
3616 		    dma_p->alength, dma_p->alength));
3617 
3618 		kaddr = (uint64_t)dma_p->orig_kaddrp;
3619 		buf_size = dma_p->orig_alength;
3620 		nxge_free_buf(CONTIG_MEM_ALLOC, kaddr, buf_size);
3621 		dma_p->orig_alength = NULL;
3622 		dma_p->orig_kaddrp = NULL;
3623 		dma_p->contig_alloc_type = B_FALSE;
3624 		dma_p->kaddrp = NULL;
3625 		dma_p->alength = NULL;
3626 		return;
3627 	}
3628 #endif
3629 
3630 	if (dma_p->kmem_alloc_type) {
3631 		NXGE_DEBUG_MSG((NULL, DMA_CTL,
3632 		    "nxge_dma_free_rx_data_buf: free kmem "
3633 		    "kaddrp $%p (orig_kaddrp $%p)"
3634 		    "alloc type %d "
3635 		    "orig_alength %d "
3636 		    "alength 0x%x (%d)",
3637 		    dma_p->kaddrp,
3638 		    dma_p->orig_kaddrp,
3639 		    dma_p->kmem_alloc_type,
3640 		    dma_p->orig_alength,
3641 		    dma_p->alength, dma_p->alength));
3642 #if defined(__i386)
3643 		kaddr = (uint64_t)(uint32_t)dma_p->kaddrp;
3644 #else
3645 		kaddr = (uint64_t)dma_p->kaddrp;
3646 #endif
3647 		buf_size = dma_p->orig_alength;
3648 		NXGE_DEBUG_MSG((NULL, DMA_CTL,
3649 		    "nxge_dma_free_rx_data_buf: free dmap $%p "
3650 		    "kaddr $%p buf_size %d",
3651 		    dma_p,
3652 		    kaddr, buf_size));
3653 		nxge_free_buf(KMEM_ALLOC, kaddr, buf_size);
3654 		dma_p->alength = 0;
3655 		dma_p->orig_alength = 0;
3656 		dma_p->kaddrp = NULL;
3657 		dma_p->kmem_alloc_type = B_FALSE;
3658 	}
3659 
3660 	NXGE_DEBUG_MSG((NULL, DMA_CTL, "<== nxge_dma_free_rx_data_buf"));
3661 }
3662 
3663 /*
3664  *	nxge_m_start() -- start transmitting and receiving.
3665  *
3666  *	This function is called by the MAC layer when the first
3667  *	stream is open to prepare the hardware ready for sending
3668  *	and transmitting packets.
3669  */
3670 static int
3671 nxge_m_start(void *arg)
3672 {
3673 	p_nxge_t 	nxgep = (p_nxge_t)arg;
3674 
3675 	NXGE_DEBUG_MSG((nxgep, NXGE_CTL, "==> nxge_m_start"));
3676 
3677 	if (nxge_peu_reset_enable && !nxgep->nxge_link_poll_timerid) {
3678 		(void) nxge_link_monitor(nxgep, LINK_MONITOR_START);
3679 	}
3680 
3681 	MUTEX_ENTER(nxgep->genlock);
3682 	if (nxge_init(nxgep) != NXGE_OK) {
3683 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
3684 		    "<== nxge_m_start: initialization failed"));
3685 		MUTEX_EXIT(nxgep->genlock);
3686 		return (EIO);
3687 	}
3688 
3689 	if (nxgep->nxge_mac_state == NXGE_MAC_STARTED)
3690 		goto nxge_m_start_exit;
3691 	/*
3692 	 * Start timer to check the system error and tx hangs
3693 	 */
3694 	if (!isLDOMguest(nxgep))
3695 		nxgep->nxge_timerid = nxge_start_timer(nxgep,
3696 		    nxge_check_hw_state, NXGE_CHECK_TIMER);
3697 #if	defined(sun4v)
3698 	else
3699 		nxge_hio_start_timer(nxgep);
3700 #endif
3701 
3702 	nxgep->link_notify = B_TRUE;
3703 
3704 	nxgep->nxge_mac_state = NXGE_MAC_STARTED;
3705 
3706 nxge_m_start_exit:
3707 	MUTEX_EXIT(nxgep->genlock);
3708 	NXGE_DEBUG_MSG((nxgep, NXGE_CTL, "<== nxge_m_start"));
3709 	return (0);
3710 }
3711 
3712 /*
3713  *	nxge_m_stop(): stop transmitting and receiving.
3714  */
3715 static void
3716 nxge_m_stop(void *arg)
3717 {
3718 	p_nxge_t 	nxgep = (p_nxge_t)arg;
3719 
3720 	NXGE_DEBUG_MSG((nxgep, NXGE_CTL, "==> nxge_m_stop"));
3721 
3722 	if (nxgep->nxge_timerid) {
3723 		nxge_stop_timer(nxgep, nxgep->nxge_timerid);
3724 		nxgep->nxge_timerid = 0;
3725 	}
3726 
3727 	MUTEX_ENTER(nxgep->genlock);
3728 	nxgep->nxge_mac_state = NXGE_MAC_STOPPING;
3729 	nxge_uninit(nxgep);
3730 
3731 	nxgep->nxge_mac_state = NXGE_MAC_STOPPED;
3732 
3733 	MUTEX_EXIT(nxgep->genlock);
3734 
3735 	NXGE_DEBUG_MSG((nxgep, NXGE_CTL, "<== nxge_m_stop"));
3736 }
3737 
3738 static int
3739 nxge_m_unicst(void *arg, const uint8_t *macaddr)
3740 {
3741 	p_nxge_t 	nxgep = (p_nxge_t)arg;
3742 	struct 		ether_addr addrp;
3743 
3744 	NXGE_DEBUG_MSG((nxgep, MAC_CTL, "==> nxge_m_unicst"));
3745 
3746 	bcopy(macaddr, (uint8_t *)&addrp, ETHERADDRL);
3747 	if (nxge_set_mac_addr(nxgep, &addrp)) {
3748 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
3749 		    "<== nxge_m_unicst: set unitcast failed"));
3750 		return (EINVAL);
3751 	}
3752 
3753 	NXGE_DEBUG_MSG((nxgep, MAC_CTL, "<== nxge_m_unicst"));
3754 
3755 	return (0);
3756 }
3757 
3758 static int
3759 nxge_m_multicst(void *arg, boolean_t add, const uint8_t *mca)
3760 {
3761 	p_nxge_t 	nxgep = (p_nxge_t)arg;
3762 	struct 		ether_addr addrp;
3763 
3764 	NXGE_DEBUG_MSG((nxgep, MAC_CTL,
3765 	    "==> nxge_m_multicst: add %d", add));
3766 
3767 	bcopy(mca, (uint8_t *)&addrp, ETHERADDRL);
3768 	if (add) {
3769 		if (nxge_add_mcast_addr(nxgep, &addrp)) {
3770 			NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
3771 			    "<== nxge_m_multicst: add multicast failed"));
3772 			return (EINVAL);
3773 		}
3774 	} else {
3775 		if (nxge_del_mcast_addr(nxgep, &addrp)) {
3776 			NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
3777 			    "<== nxge_m_multicst: del multicast failed"));
3778 			return (EINVAL);
3779 		}
3780 	}
3781 
3782 	NXGE_DEBUG_MSG((nxgep, MAC_CTL, "<== nxge_m_multicst"));
3783 
3784 	return (0);
3785 }
3786 
3787 static int
3788 nxge_m_promisc(void *arg, boolean_t on)
3789 {
3790 	p_nxge_t 	nxgep = (p_nxge_t)arg;
3791 
3792 	NXGE_DEBUG_MSG((nxgep, MAC_CTL,
3793 	    "==> nxge_m_promisc: on %d", on));
3794 
3795 	if (nxge_set_promisc(nxgep, on)) {
3796 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
3797 		    "<== nxge_m_promisc: set promisc failed"));
3798 		return (EINVAL);
3799 	}
3800 
3801 	NXGE_DEBUG_MSG((nxgep, MAC_CTL,
3802 	    "<== nxge_m_promisc: on %d", on));
3803 
3804 	return (0);
3805 }
3806 
3807 static void
3808 nxge_m_ioctl(void *arg,  queue_t *wq, mblk_t *mp)
3809 {
3810 	p_nxge_t 	nxgep = (p_nxge_t)arg;
3811 	struct 		iocblk *iocp;
3812 	boolean_t 	need_privilege;
3813 	int 		err;
3814 	int 		cmd;
3815 
3816 	NXGE_DEBUG_MSG((nxgep, NXGE_CTL, "==> nxge_m_ioctl"));
3817 
3818 	iocp = (struct iocblk *)mp->b_rptr;
3819 	iocp->ioc_error = 0;
3820 	need_privilege = B_TRUE;
3821 	cmd = iocp->ioc_cmd;
3822 	NXGE_DEBUG_MSG((nxgep, NXGE_CTL, "==> nxge_m_ioctl: cmd 0x%08x", cmd));
3823 	switch (cmd) {
3824 	default:
3825 		miocnak(wq, mp, 0, EINVAL);
3826 		NXGE_DEBUG_MSG((nxgep, NXGE_CTL, "<== nxge_m_ioctl: invalid"));
3827 		return;
3828 
3829 	case LB_GET_INFO_SIZE:
3830 	case LB_GET_INFO:
3831 	case LB_GET_MODE:
3832 		need_privilege = B_FALSE;
3833 		break;
3834 	case LB_SET_MODE:
3835 		break;
3836 
3837 
3838 	case NXGE_GET_MII:
3839 	case NXGE_PUT_MII:
3840 	case NXGE_GET64:
3841 	case NXGE_PUT64:
3842 	case NXGE_GET_TX_RING_SZ:
3843 	case NXGE_GET_TX_DESC:
3844 	case NXGE_TX_SIDE_RESET:
3845 	case NXGE_RX_SIDE_RESET:
3846 	case NXGE_GLOBAL_RESET:
3847 	case NXGE_RESET_MAC:
3848 	case NXGE_TX_REGS_DUMP:
3849 	case NXGE_RX_REGS_DUMP:
3850 	case NXGE_INT_REGS_DUMP:
3851 	case NXGE_VIR_INT_REGS_DUMP:
3852 	case NXGE_PUT_TCAM:
3853 	case NXGE_GET_TCAM:
3854 	case NXGE_RTRACE:
3855 	case NXGE_RDUMP:
3856 
3857 		need_privilege = B_FALSE;
3858 		break;
3859 	case NXGE_INJECT_ERR:
3860 		cmn_err(CE_NOTE, "!nxge_m_ioctl: Inject error\n");
3861 		nxge_err_inject(nxgep, wq, mp);
3862 		break;
3863 	}
3864 
3865 	if (need_privilege) {
3866 		err = secpolicy_net_config(iocp->ioc_cr, B_FALSE);
3867 		if (err != 0) {
3868 			miocnak(wq, mp, 0, err);
3869 			NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
3870 			    "<== nxge_m_ioctl: no priv"));
3871 			return;
3872 		}
3873 	}
3874 
3875 	switch (cmd) {
3876 
3877 	case LB_GET_MODE:
3878 	case LB_SET_MODE:
3879 	case LB_GET_INFO_SIZE:
3880 	case LB_GET_INFO:
3881 		nxge_loopback_ioctl(nxgep, wq, mp, iocp);
3882 		break;
3883 
3884 	case NXGE_GET_MII:
3885 	case NXGE_PUT_MII:
3886 	case NXGE_PUT_TCAM:
3887 	case NXGE_GET_TCAM:
3888 	case NXGE_GET64:
3889 	case NXGE_PUT64:
3890 	case NXGE_GET_TX_RING_SZ:
3891 	case NXGE_GET_TX_DESC:
3892 	case NXGE_TX_SIDE_RESET:
3893 	case NXGE_RX_SIDE_RESET:
3894 	case NXGE_GLOBAL_RESET:
3895 	case NXGE_RESET_MAC:
3896 	case NXGE_TX_REGS_DUMP:
3897 	case NXGE_RX_REGS_DUMP:
3898 	case NXGE_INT_REGS_DUMP:
3899 	case NXGE_VIR_INT_REGS_DUMP:
3900 		NXGE_DEBUG_MSG((nxgep, NXGE_CTL,
3901 		    "==> nxge_m_ioctl: cmd 0x%x", cmd));
3902 		nxge_hw_ioctl(nxgep, wq, mp, iocp);
3903 		break;
3904 	}
3905 
3906 	NXGE_DEBUG_MSG((nxgep, NXGE_CTL, "<== nxge_m_ioctl"));
3907 }
3908 
3909 extern void nxge_rx_hw_blank(void *arg, time_t ticks, uint_t count);
3910 
3911 static void
3912 nxge_m_resources(void *arg)
3913 {
3914 	p_nxge_t		nxgep = arg;
3915 	mac_rx_fifo_t 		mrf;
3916 
3917 	nxge_grp_set_t		*set = &nxgep->rx_set;
3918 	uint8_t			rdc;
3919 
3920 	rx_rcr_ring_t		*ring;
3921 
3922 	NXGE_DEBUG_MSG((nxgep, RX_CTL, "==> nxge_m_resources"));
3923 
3924 	MUTEX_ENTER(nxgep->genlock);
3925 
3926 	if (set->owned.map == 0) {
3927 		NXGE_ERROR_MSG((NULL, NXGE_ERR_CTL,
3928 		    "nxge_m_resources: no receive resources"));
3929 		goto nxge_m_resources_exit;
3930 	}
3931 
3932 	/*
3933 	 * CR 6492541 Check to see if the drv_state has been initialized,
3934 	 * if not * call nxge_init().
3935 	 */
3936 	if (!(nxgep->drv_state & STATE_HW_INITIALIZED)) {
3937 		if (nxge_init(nxgep) != NXGE_OK)
3938 			goto nxge_m_resources_exit;
3939 	}
3940 
3941 	mrf.mrf_type = MAC_RX_FIFO;
3942 	mrf.mrf_blank = nxge_rx_hw_blank;
3943 	mrf.mrf_arg = (void *)nxgep;
3944 
3945 	mrf.mrf_normal_blank_time = 128;
3946 	mrf.mrf_normal_pkt_count = 8;
3947 
3948 	/*
3949 	 * Export our receive resources to the MAC layer.
3950 	 */
3951 	for (rdc = 0; rdc < NXGE_MAX_RDCS; rdc++) {
3952 		if ((1 << rdc) & set->owned.map) {
3953 			ring = nxgep->rx_rcr_rings->rcr_rings[rdc];
3954 			if (ring == 0) {
3955 				/*
3956 				 * This is a big deal only if we are
3957 				 * *not* in an LDOMs environment.
3958 				 */
3959 				if (nxgep->environs == SOLARIS_DOMAIN) {
3960 					cmn_err(CE_NOTE,
3961 					    "==> nxge_m_resources: "
3962 					    "ring %d == 0", rdc);
3963 				}
3964 				continue;
3965 			}
3966 			ring->rcr_mac_handle = mac_resource_add
3967 			    (nxgep->mach, (mac_resource_t *)&mrf);
3968 
3969 			NXGE_DEBUG_MSG((nxgep, NXGE_CTL,
3970 			    "==> nxge_m_resources: RDC %d RCR %p MAC handle %p",
3971 			    rdc, ring, ring->rcr_mac_handle));
3972 		}
3973 	}
3974 
3975 nxge_m_resources_exit:
3976 	MUTEX_EXIT(nxgep->genlock);
3977 	NXGE_DEBUG_MSG((nxgep, RX_CTL, "<== nxge_m_resources"));
3978 }
3979 
3980 void
3981 nxge_mmac_kstat_update(p_nxge_t nxgep, mac_addr_slot_t slot, boolean_t factory)
3982 {
3983 	p_nxge_mmac_stats_t mmac_stats;
3984 	int i;
3985 	nxge_mmac_t *mmac_info;
3986 
3987 	mmac_info = &nxgep->nxge_mmac_info;
3988 
3989 	mmac_stats = &nxgep->statsp->mmac_stats;
3990 	mmac_stats->mmac_max_cnt = mmac_info->num_mmac;
3991 	mmac_stats->mmac_avail_cnt = mmac_info->naddrfree;
3992 
3993 	for (i = 0; i < ETHERADDRL; i++) {
3994 		if (factory) {
3995 			mmac_stats->mmac_avail_pool[slot-1].ether_addr_octet[i]
3996 			    = mmac_info->factory_mac_pool[slot][
3997 			    (ETHERADDRL-1) - i];
3998 		} else {
3999 			mmac_stats->mmac_avail_pool[slot-1].ether_addr_octet[i]
4000 			    = mmac_info->mac_pool[slot].addr[
4001 			    (ETHERADDRL - 1) - i];
4002 		}
4003 	}
4004 }
4005 
4006 /*
4007  * nxge_altmac_set() -- Set an alternate MAC address
4008  */
4009 static int
4010 nxge_altmac_set(p_nxge_t nxgep, uint8_t *maddr, mac_addr_slot_t slot)
4011 {
4012 	uint8_t addrn;
4013 	uint8_t portn;
4014 	npi_mac_addr_t altmac;
4015 	hostinfo_t mac_rdc;
4016 	p_nxge_class_pt_cfg_t clscfgp;
4017 
4018 	altmac.w2 = ((uint16_t)maddr[0] << 8) | ((uint16_t)maddr[1] & 0x0ff);
4019 	altmac.w1 = ((uint16_t)maddr[2] << 8) | ((uint16_t)maddr[3] & 0x0ff);
4020 	altmac.w0 = ((uint16_t)maddr[4] << 8) | ((uint16_t)maddr[5] & 0x0ff);
4021 
4022 	portn = nxgep->mac.portnum;
4023 	addrn = (uint8_t)slot - 1;
4024 
4025 	if (npi_mac_altaddr_entry(nxgep->npi_handle, OP_SET, portn,
4026 	    addrn, &altmac) != NPI_SUCCESS)
4027 		return (EIO);
4028 
4029 	/*
4030 	 * Set the rdc table number for the host info entry
4031 	 * for this mac address slot.
4032 	 */
4033 	clscfgp = (p_nxge_class_pt_cfg_t)&nxgep->class_config;
4034 	mac_rdc.value = 0;
4035 	mac_rdc.bits.w0.rdc_tbl_num = clscfgp->mac_host_info[addrn].rdctbl;
4036 	mac_rdc.bits.w0.mac_pref = clscfgp->mac_host_info[addrn].mpr_npr;
4037 
4038 	if (npi_mac_hostinfo_entry(nxgep->npi_handle, OP_SET,
4039 	    nxgep->function_num, addrn, &mac_rdc) != NPI_SUCCESS) {
4040 		return (EIO);
4041 	}
4042 
4043 	/*
4044 	 * Enable comparison with the alternate MAC address.
4045 	 * While the first alternate addr is enabled by bit 1 of register
4046 	 * BMAC_ALTAD_CMPEN, it is enabled by bit 0 of register
4047 	 * XMAC_ADDR_CMPEN, so slot needs to be converted to addrn
4048 	 * accordingly before calling npi_mac_altaddr_entry.
4049 	 */
4050 	if (portn == XMAC_PORT_0 || portn == XMAC_PORT_1)
4051 		addrn = (uint8_t)slot - 1;
4052 	else
4053 		addrn = (uint8_t)slot;
4054 
4055 	if (npi_mac_altaddr_enable(nxgep->npi_handle, portn, addrn)
4056 	    != NPI_SUCCESS)
4057 		return (EIO);
4058 
4059 	return (0);
4060 }
4061 
4062 /*
4063  * nxeg_m_mmac_add() - find an unused address slot, set the address
4064  * value to the one specified, enable the port to start filtering on
4065  * the new MAC address.  Returns 0 on success.
4066  */
4067 int
4068 nxge_m_mmac_add(void *arg, mac_multi_addr_t *maddr)
4069 {
4070 	p_nxge_t nxgep = arg;
4071 	mac_addr_slot_t slot;
4072 	nxge_mmac_t *mmac_info;
4073 	int err;
4074 	nxge_status_t status;
4075 
4076 	mutex_enter(nxgep->genlock);
4077 
4078 	/*
4079 	 * Make sure that nxge is initialized, if _start() has
4080 	 * not been called.
4081 	 */
4082 	if (!(nxgep->drv_state & STATE_HW_INITIALIZED)) {
4083 		status = nxge_init(nxgep);
4084 		if (status != NXGE_OK) {
4085 			mutex_exit(nxgep->genlock);
4086 			return (ENXIO);
4087 		}
4088 	}
4089 
4090 	mmac_info = &nxgep->nxge_mmac_info;
4091 	if (mmac_info->naddrfree == 0) {
4092 		mutex_exit(nxgep->genlock);
4093 		return (ENOSPC);
4094 	}
4095 	if (!mac_unicst_verify(nxgep->mach, maddr->mma_addr,
4096 	    maddr->mma_addrlen)) {
4097 		mutex_exit(nxgep->genlock);
4098 		return (EINVAL);
4099 	}
4100 	/*
4101 	 * 	Search for the first available slot. Because naddrfree
4102 	 * is not zero, we are guaranteed to find one.
4103 	 * 	Slot 0 is for unique (primary) MAC. The first alternate
4104 	 * MAC slot is slot 1.
4105 	 *	Each of the first two ports of Neptune has 16 alternate
4106 	 * MAC slots but only the first 7 (of 15) slots have assigned factory
4107 	 * MAC addresses. We first search among the slots without bundled
4108 	 * factory MACs. If we fail to find one in that range, then we
4109 	 * search the slots with bundled factory MACs.  A factory MAC
4110 	 * will be wasted while the slot is used with a user MAC address.
4111 	 * But the slot could be used by factory MAC again after calling
4112 	 * nxge_m_mmac_remove and nxge_m_mmac_reserve.
4113 	 */
4114 	if (mmac_info->num_factory_mmac < mmac_info->num_mmac) {
4115 		for (slot = mmac_info->num_factory_mmac + 1;
4116 		    slot <= mmac_info->num_mmac; slot++) {
4117 			if (!(mmac_info->mac_pool[slot].flags & MMAC_SLOT_USED))
4118 				break;
4119 		}
4120 		if (slot > mmac_info->num_mmac) {
4121 			for (slot = 1; slot <= mmac_info->num_factory_mmac;
4122 			    slot++) {
4123 				if (!(mmac_info->mac_pool[slot].flags
4124 				    & MMAC_SLOT_USED))
4125 					break;
4126 			}
4127 		}
4128 	} else {
4129 		for (slot = 1; slot <= mmac_info->num_mmac; slot++) {
4130 			if (!(mmac_info->mac_pool[slot].flags & MMAC_SLOT_USED))
4131 				break;
4132 		}
4133 	}
4134 	ASSERT(slot <= mmac_info->num_mmac);
4135 	if ((err = nxge_altmac_set(nxgep, maddr->mma_addr, slot)) != 0) {
4136 		mutex_exit(nxgep->genlock);
4137 		return (err);
4138 	}
4139 	bcopy(maddr->mma_addr, mmac_info->mac_pool[slot].addr, ETHERADDRL);
4140 	mmac_info->mac_pool[slot].flags |= MMAC_SLOT_USED;
4141 	mmac_info->mac_pool[slot].flags &= ~MMAC_VENDOR_ADDR;
4142 	mmac_info->naddrfree--;
4143 	nxge_mmac_kstat_update(nxgep, slot, B_FALSE);
4144 
4145 	maddr->mma_slot = slot;
4146 
4147 	mutex_exit(nxgep->genlock);
4148 	return (0);
4149 }
4150 
4151 /*
4152  * This function reserves an unused slot and programs the slot and the HW
4153  * with a factory mac address.
4154  */
4155 static int
4156 nxge_m_mmac_reserve(void *arg, mac_multi_addr_t *maddr)
4157 {
4158 	p_nxge_t nxgep = arg;
4159 	mac_addr_slot_t slot;
4160 	nxge_mmac_t *mmac_info;
4161 	int err;
4162 	nxge_status_t status;
4163 
4164 	mutex_enter(nxgep->genlock);
4165 
4166 	/*
4167 	 * Make sure that nxge is initialized, if _start() has
4168 	 * not been called.
4169 	 */
4170 	if (!(nxgep->drv_state & STATE_HW_INITIALIZED)) {
4171 		status = nxge_init(nxgep);
4172 		if (status != NXGE_OK) {
4173 			mutex_exit(nxgep->genlock);
4174 			return (ENXIO);
4175 		}
4176 	}
4177 
4178 	mmac_info = &nxgep->nxge_mmac_info;
4179 	if (mmac_info->naddrfree == 0) {
4180 		mutex_exit(nxgep->genlock);
4181 		return (ENOSPC);
4182 	}
4183 
4184 	slot = maddr->mma_slot;
4185 	if (slot == -1) {  /* -1: Take the first available slot */
4186 		for (slot = 1; slot <= mmac_info->num_factory_mmac; slot++) {
4187 			if (!(mmac_info->mac_pool[slot].flags & MMAC_SLOT_USED))
4188 				break;
4189 		}
4190 		if (slot > mmac_info->num_factory_mmac) {
4191 			mutex_exit(nxgep->genlock);
4192 			return (ENOSPC);
4193 		}
4194 	}
4195 	if (slot < 1 || slot > mmac_info->num_factory_mmac) {
4196 		/*
4197 		 * Do not support factory MAC at a slot greater than
4198 		 * num_factory_mmac even when there are available factory
4199 		 * MAC addresses because the alternate MACs are bundled with
4200 		 * slot[1] through slot[num_factory_mmac]
4201 		 */
4202 		mutex_exit(nxgep->genlock);
4203 		return (EINVAL);
4204 	}
4205 	if (mmac_info->mac_pool[slot].flags & MMAC_SLOT_USED) {
4206 		mutex_exit(nxgep->genlock);
4207 		return (EBUSY);
4208 	}
4209 	/* Verify the address to be reserved */
4210 	if (!mac_unicst_verify(nxgep->mach,
4211 	    mmac_info->factory_mac_pool[slot], ETHERADDRL)) {
4212 		mutex_exit(nxgep->genlock);
4213 		return (EINVAL);
4214 	}
4215 	if (err = nxge_altmac_set(nxgep,
4216 	    mmac_info->factory_mac_pool[slot], slot)) {
4217 		mutex_exit(nxgep->genlock);
4218 		return (err);
4219 	}
4220 	bcopy(mmac_info->factory_mac_pool[slot], maddr->mma_addr, ETHERADDRL);
4221 	mmac_info->mac_pool[slot].flags |= MMAC_SLOT_USED | MMAC_VENDOR_ADDR;
4222 	mmac_info->naddrfree--;
4223 
4224 	nxge_mmac_kstat_update(nxgep, slot, B_TRUE);
4225 	mutex_exit(nxgep->genlock);
4226 
4227 	/* Pass info back to the caller */
4228 	maddr->mma_slot = slot;
4229 	maddr->mma_addrlen = ETHERADDRL;
4230 	maddr->mma_flags = MMAC_SLOT_USED | MMAC_VENDOR_ADDR;
4231 
4232 	return (0);
4233 }
4234 
4235 /*
4236  * Remove the specified mac address and update the HW not to filter
4237  * the mac address anymore.
4238  */
4239 int
4240 nxge_m_mmac_remove(void *arg, mac_addr_slot_t slot)
4241 {
4242 	p_nxge_t nxgep = arg;
4243 	nxge_mmac_t *mmac_info;
4244 	uint8_t addrn;
4245 	uint8_t portn;
4246 	int err = 0;
4247 	nxge_status_t status;
4248 
4249 	mutex_enter(nxgep->genlock);
4250 
4251 	/*
4252 	 * Make sure that nxge is initialized, if _start() has
4253 	 * not been called.
4254 	 */
4255 	if (!(nxgep->drv_state & STATE_HW_INITIALIZED)) {
4256 		status = nxge_init(nxgep);
4257 		if (status != NXGE_OK) {
4258 			mutex_exit(nxgep->genlock);
4259 			return (ENXIO);
4260 		}
4261 	}
4262 
4263 	mmac_info = &nxgep->nxge_mmac_info;
4264 	if (slot < 1 || slot > mmac_info->num_mmac) {
4265 		mutex_exit(nxgep->genlock);
4266 		return (EINVAL);
4267 	}
4268 
4269 	portn = nxgep->mac.portnum;
4270 	if (portn == XMAC_PORT_0 || portn == XMAC_PORT_1)
4271 		addrn = (uint8_t)slot - 1;
4272 	else
4273 		addrn = (uint8_t)slot;
4274 
4275 	if (mmac_info->mac_pool[slot].flags & MMAC_SLOT_USED) {
4276 		if (npi_mac_altaddr_disable(nxgep->npi_handle, portn, addrn)
4277 		    == NPI_SUCCESS) {
4278 			mmac_info->naddrfree++;
4279 			mmac_info->mac_pool[slot].flags &= ~MMAC_SLOT_USED;
4280 			/*
4281 			 * Regardless if the MAC we just stopped filtering
4282 			 * is a user addr or a facory addr, we must set
4283 			 * the MMAC_VENDOR_ADDR flag if this slot has an
4284 			 * associated factory MAC to indicate that a factory
4285 			 * MAC is available.
4286 			 */
4287 			if (slot <= mmac_info->num_factory_mmac) {
4288 				mmac_info->mac_pool[slot].flags
4289 				    |= MMAC_VENDOR_ADDR;
4290 			}
4291 			/*
4292 			 * Clear mac_pool[slot].addr so that kstat shows 0
4293 			 * alternate MAC address if the slot is not used.
4294 			 * (But nxge_m_mmac_get returns the factory MAC even
4295 			 * when the slot is not used!)
4296 			 */
4297 			bzero(mmac_info->mac_pool[slot].addr, ETHERADDRL);
4298 			nxge_mmac_kstat_update(nxgep, slot, B_FALSE);
4299 		} else {
4300 			err = EIO;
4301 		}
4302 	} else {
4303 		err = EINVAL;
4304 	}
4305 
4306 	mutex_exit(nxgep->genlock);
4307 	return (err);
4308 }
4309 
4310 /*
4311  * Modify a mac address added by nxge_m_mmac_add or nxge_m_mmac_reserve().
4312  */
4313 static int
4314 nxge_m_mmac_modify(void *arg, mac_multi_addr_t *maddr)
4315 {
4316 	p_nxge_t nxgep = arg;
4317 	mac_addr_slot_t slot;
4318 	nxge_mmac_t *mmac_info;
4319 	int err = 0;
4320 	nxge_status_t status;
4321 
4322 	if (!mac_unicst_verify(nxgep->mach, maddr->mma_addr,
4323 	    maddr->mma_addrlen))
4324 		return (EINVAL);
4325 
4326 	slot = maddr->mma_slot;
4327 
4328 	mutex_enter(nxgep->genlock);
4329 
4330 	/*
4331 	 * Make sure that nxge is initialized, if _start() has
4332 	 * not been called.
4333 	 */
4334 	if (!(nxgep->drv_state & STATE_HW_INITIALIZED)) {
4335 		status = nxge_init(nxgep);
4336 		if (status != NXGE_OK) {
4337 			mutex_exit(nxgep->genlock);
4338 			return (ENXIO);
4339 		}
4340 	}
4341 
4342 	mmac_info = &nxgep->nxge_mmac_info;
4343 	if (slot < 1 || slot > mmac_info->num_mmac) {
4344 		mutex_exit(nxgep->genlock);
4345 		return (EINVAL);
4346 	}
4347 	if (mmac_info->mac_pool[slot].flags & MMAC_SLOT_USED) {
4348 		if ((err = nxge_altmac_set(nxgep, maddr->mma_addr, slot))
4349 		    != 0) {
4350 			bcopy(maddr->mma_addr, mmac_info->mac_pool[slot].addr,
4351 			    ETHERADDRL);
4352 			/*
4353 			 * Assume that the MAC passed down from the caller
4354 			 * is not a factory MAC address (The user should
4355 			 * call mmac_remove followed by mmac_reserve if
4356 			 * he wants to use the factory MAC for this slot).
4357 			 */
4358 			mmac_info->mac_pool[slot].flags &= ~MMAC_VENDOR_ADDR;
4359 			nxge_mmac_kstat_update(nxgep, slot, B_FALSE);
4360 		}
4361 	} else {
4362 		err = EINVAL;
4363 	}
4364 	mutex_exit(nxgep->genlock);
4365 	return (err);
4366 }
4367 
4368 /*
4369  * nxge_m_mmac_get() - Get the MAC address and other information
4370  * related to the slot.  mma_flags should be set to 0 in the call.
4371  * Note: although kstat shows MAC address as zero when a slot is
4372  * not used, Crossbow expects nxge_m_mmac_get to copy factory MAC
4373  * to the caller as long as the slot is not using a user MAC address.
4374  * The following table shows the rules,
4375  *
4376  *				   USED    VENDOR    mma_addr
4377  * ------------------------------------------------------------
4378  * (1) Slot uses a user MAC:        yes      no     user MAC
4379  * (2) Slot uses a factory MAC:     yes      yes    factory MAC
4380  * (3) Slot is not used but is
4381  *     factory MAC capable:         no       yes    factory MAC
4382  * (4) Slot is not used and is
4383  *     not factory MAC capable:     no       no        0
4384  * ------------------------------------------------------------
4385  */
4386 static int
4387 nxge_m_mmac_get(void *arg, mac_multi_addr_t *maddr)
4388 {
4389 	nxge_t *nxgep = arg;
4390 	mac_addr_slot_t slot;
4391 	nxge_mmac_t *mmac_info;
4392 	nxge_status_t status;
4393 
4394 	slot = maddr->mma_slot;
4395 
4396 	mutex_enter(nxgep->genlock);
4397 
4398 	/*
4399 	 * Make sure that nxge is initialized, if _start() has
4400 	 * not been called.
4401 	 */
4402 	if (!(nxgep->drv_state & STATE_HW_INITIALIZED)) {
4403 		status = nxge_init(nxgep);
4404 		if (status != NXGE_OK) {
4405 			mutex_exit(nxgep->genlock);
4406 			return (ENXIO);
4407 		}
4408 	}
4409 
4410 	mmac_info = &nxgep->nxge_mmac_info;
4411 
4412 	if (slot < 1 || slot > mmac_info->num_mmac) {
4413 		mutex_exit(nxgep->genlock);
4414 		return (EINVAL);
4415 	}
4416 	maddr->mma_flags = 0;
4417 	if (mmac_info->mac_pool[slot].flags & MMAC_SLOT_USED)
4418 		maddr->mma_flags |= MMAC_SLOT_USED;
4419 
4420 	if (mmac_info->mac_pool[slot].flags & MMAC_VENDOR_ADDR) {
4421 		maddr->mma_flags |= MMAC_VENDOR_ADDR;
4422 		bcopy(mmac_info->factory_mac_pool[slot],
4423 		    maddr->mma_addr, ETHERADDRL);
4424 		maddr->mma_addrlen = ETHERADDRL;
4425 	} else {
4426 		if (maddr->mma_flags & MMAC_SLOT_USED) {
4427 			bcopy(mmac_info->mac_pool[slot].addr,
4428 			    maddr->mma_addr, ETHERADDRL);
4429 			maddr->mma_addrlen = ETHERADDRL;
4430 		} else {
4431 			bzero(maddr->mma_addr, ETHERADDRL);
4432 			maddr->mma_addrlen = 0;
4433 		}
4434 	}
4435 	mutex_exit(nxgep->genlock);
4436 	return (0);
4437 }
4438 
4439 static boolean_t
4440 nxge_m_getcapab(void *arg, mac_capab_t cap, void *cap_data)
4441 {
4442 	nxge_t *nxgep = arg;
4443 	uint32_t *txflags = cap_data;
4444 	multiaddress_capab_t *mmacp = cap_data;
4445 
4446 	switch (cap) {
4447 	case MAC_CAPAB_HCKSUM:
4448 		NXGE_DEBUG_MSG((nxgep, NXGE_CTL,
4449 		    "==> nxge_m_getcapab: checksum %d", nxge_cksum_offload));
4450 		if (nxge_cksum_offload <= 1) {
4451 			*txflags = HCKSUM_INET_PARTIAL;
4452 		}
4453 		break;
4454 
4455 	case MAC_CAPAB_POLL:
4456 		/*
4457 		 * There's nothing for us to fill in, simply returning
4458 		 * B_TRUE stating that we support polling is sufficient.
4459 		 */
4460 		break;
4461 
4462 	case MAC_CAPAB_MULTIADDRESS:
4463 		mmacp = (multiaddress_capab_t *)cap_data;
4464 		mutex_enter(nxgep->genlock);
4465 
4466 		mmacp->maddr_naddr = nxgep->nxge_mmac_info.num_mmac;
4467 		mmacp->maddr_naddrfree = nxgep->nxge_mmac_info.naddrfree;
4468 		mmacp->maddr_flag = 0; /* 0 is required by PSARC2006/265 */
4469 		/*
4470 		 * maddr_handle is driver's private data, passed back to
4471 		 * entry point functions as arg.
4472 		 */
4473 		mmacp->maddr_handle	= nxgep;
4474 		mmacp->maddr_add	= nxge_m_mmac_add;
4475 		mmacp->maddr_remove	= nxge_m_mmac_remove;
4476 		mmacp->maddr_modify	= nxge_m_mmac_modify;
4477 		mmacp->maddr_get	= nxge_m_mmac_get;
4478 		mmacp->maddr_reserve	= nxge_m_mmac_reserve;
4479 
4480 		mutex_exit(nxgep->genlock);
4481 		break;
4482 
4483 	case MAC_CAPAB_LSO: {
4484 		mac_capab_lso_t *cap_lso = cap_data;
4485 
4486 		if (nxgep->soft_lso_enable) {
4487 			if (nxge_cksum_offload <= 1) {
4488 				cap_lso->lso_flags = LSO_TX_BASIC_TCP_IPV4;
4489 				if (nxge_lso_max > NXGE_LSO_MAXLEN) {
4490 					nxge_lso_max = NXGE_LSO_MAXLEN;
4491 				}
4492 				cap_lso->lso_basic_tcp_ipv4.lso_max =
4493 				    nxge_lso_max;
4494 			}
4495 			break;
4496 		} else {
4497 			return (B_FALSE);
4498 		}
4499 	}
4500 
4501 #if defined(sun4v)
4502 	case MAC_CAPAB_RINGS: {
4503 		mac_capab_rings_t *mrings = (mac_capab_rings_t *)cap_data;
4504 
4505 		/*
4506 		 * Only the service domain driver responds to
4507 		 * this capability request.
4508 		 */
4509 		if (isLDOMservice(nxgep)) {
4510 			mrings->mr_handle = (void *)nxgep;
4511 
4512 			/*
4513 			 * No dynamic allocation of groups and
4514 			 * rings at this time.  Shares dictate the
4515 			 * configuration.
4516 			 */
4517 			mrings->mr_gadd_ring = NULL;
4518 			mrings->mr_grem_ring = NULL;
4519 			mrings->mr_rget = NULL;
4520 			mrings->mr_gget = nxge_hio_group_get;
4521 
4522 			if (mrings->mr_type == MAC_RING_TYPE_RX) {
4523 				mrings->mr_rnum = 8; /* XXX */
4524 				mrings->mr_gnum = 6; /* XXX */
4525 			} else {
4526 				mrings->mr_rnum = 8; /* XXX */
4527 				mrings->mr_gnum = 0; /* XXX */
4528 			}
4529 		} else
4530 			return (B_FALSE);
4531 		break;
4532 	}
4533 
4534 	case MAC_CAPAB_SHARES: {
4535 		mac_capab_share_t *mshares = (mac_capab_share_t *)cap_data;
4536 
4537 		/*
4538 		 * Only the service domain driver responds to
4539 		 * this capability request.
4540 		 */
4541 		if (isLDOMservice(nxgep)) {
4542 			mshares->ms_snum = 3;
4543 			mshares->ms_handle = (void *)nxgep;
4544 			mshares->ms_salloc = nxge_hio_share_alloc;
4545 			mshares->ms_sfree = nxge_hio_share_free;
4546 			mshares->ms_sadd = NULL;
4547 			mshares->ms_sremove = NULL;
4548 			mshares->ms_squery = nxge_hio_share_query;
4549 		} else
4550 			return (B_FALSE);
4551 		break;
4552 	}
4553 #endif
4554 	default:
4555 		return (B_FALSE);
4556 	}
4557 	return (B_TRUE);
4558 }
4559 
4560 static boolean_t
4561 nxge_param_locked(mac_prop_id_t pr_num)
4562 {
4563 	/*
4564 	 * All adv_* parameters are locked (read-only) while
4565 	 * the device is in any sort of loopback mode ...
4566 	 */
4567 	switch (pr_num) {
4568 		case MAC_PROP_ADV_1000FDX_CAP:
4569 		case MAC_PROP_EN_1000FDX_CAP:
4570 		case MAC_PROP_ADV_1000HDX_CAP:
4571 		case MAC_PROP_EN_1000HDX_CAP:
4572 		case MAC_PROP_ADV_100FDX_CAP:
4573 		case MAC_PROP_EN_100FDX_CAP:
4574 		case MAC_PROP_ADV_100HDX_CAP:
4575 		case MAC_PROP_EN_100HDX_CAP:
4576 		case MAC_PROP_ADV_10FDX_CAP:
4577 		case MAC_PROP_EN_10FDX_CAP:
4578 		case MAC_PROP_ADV_10HDX_CAP:
4579 		case MAC_PROP_EN_10HDX_CAP:
4580 		case MAC_PROP_AUTONEG:
4581 		case MAC_PROP_FLOWCTRL:
4582 			return (B_TRUE);
4583 	}
4584 	return (B_FALSE);
4585 }
4586 
4587 /*
4588  * callback functions for set/get of properties
4589  */
4590 static int
4591 nxge_m_setprop(void *barg, const char *pr_name, mac_prop_id_t pr_num,
4592     uint_t pr_valsize, const void *pr_val)
4593 {
4594 	nxge_t		*nxgep = barg;
4595 	p_nxge_param_t	param_arr;
4596 	p_nxge_stats_t	statsp;
4597 	int		err = 0;
4598 	uint8_t		val;
4599 	uint32_t	cur_mtu, new_mtu, old_framesize;
4600 	link_flowctrl_t	fl;
4601 
4602 	NXGE_DEBUG_MSG((nxgep, NXGE_CTL, "==> nxge_m_setprop"));
4603 	param_arr = nxgep->param_arr;
4604 	statsp = nxgep->statsp;
4605 	mutex_enter(nxgep->genlock);
4606 	if (statsp->port_stats.lb_mode != nxge_lb_normal &&
4607 	    nxge_param_locked(pr_num)) {
4608 		/*
4609 		 * All adv_* parameters are locked (read-only)
4610 		 * while the device is in any sort of loopback mode.
4611 		 */
4612 		NXGE_DEBUG_MSG((nxgep, NXGE_CTL,
4613 		    "==> nxge_m_setprop: loopback mode: read only"));
4614 		mutex_exit(nxgep->genlock);
4615 		return (EBUSY);
4616 	}
4617 
4618 	val = *(uint8_t *)pr_val;
4619 	switch (pr_num) {
4620 		case MAC_PROP_EN_1000FDX_CAP:
4621 			nxgep->param_en_1000fdx = val;
4622 			param_arr[param_anar_1000fdx].value = val;
4623 
4624 			goto reprogram;
4625 
4626 		case MAC_PROP_EN_100FDX_CAP:
4627 			nxgep->param_en_100fdx = val;
4628 			param_arr[param_anar_100fdx].value = val;
4629 
4630 			goto reprogram;
4631 
4632 		case MAC_PROP_EN_10FDX_CAP:
4633 			nxgep->param_en_10fdx = val;
4634 			param_arr[param_anar_10fdx].value = val;
4635 
4636 			goto reprogram;
4637 
4638 		case MAC_PROP_EN_1000HDX_CAP:
4639 		case MAC_PROP_EN_100HDX_CAP:
4640 		case MAC_PROP_EN_10HDX_CAP:
4641 		case MAC_PROP_ADV_1000FDX_CAP:
4642 		case MAC_PROP_ADV_1000HDX_CAP:
4643 		case MAC_PROP_ADV_100FDX_CAP:
4644 		case MAC_PROP_ADV_100HDX_CAP:
4645 		case MAC_PROP_ADV_10FDX_CAP:
4646 		case MAC_PROP_ADV_10HDX_CAP:
4647 		case MAC_PROP_STATUS:
4648 		case MAC_PROP_SPEED:
4649 		case MAC_PROP_DUPLEX:
4650 			err = EINVAL; /* cannot set read-only properties */
4651 			NXGE_DEBUG_MSG((nxgep, NXGE_CTL,
4652 			    "==> nxge_m_setprop:  read only property %d",
4653 			    pr_num));
4654 			break;
4655 
4656 		case MAC_PROP_AUTONEG:
4657 			param_arr[param_autoneg].value = val;
4658 
4659 			goto reprogram;
4660 
4661 		case MAC_PROP_MTU:
4662 			if (nxgep->nxge_mac_state == NXGE_MAC_STARTED) {
4663 				err = EBUSY;
4664 				break;
4665 			}
4666 
4667 			cur_mtu = nxgep->mac.default_mtu;
4668 			bcopy(pr_val, &new_mtu, sizeof (new_mtu));
4669 			NXGE_DEBUG_MSG((nxgep, NXGE_CTL,
4670 			    "==> nxge_m_setprop: set MTU: %d is_jumbo %d",
4671 			    new_mtu, nxgep->mac.is_jumbo));
4672 
4673 			if (new_mtu == cur_mtu) {
4674 				err = 0;
4675 				break;
4676 			}
4677 			if (new_mtu < NXGE_DEFAULT_MTU ||
4678 			    new_mtu > NXGE_MAXIMUM_MTU) {
4679 				err = EINVAL;
4680 				break;
4681 			}
4682 
4683 			if ((new_mtu > NXGE_DEFAULT_MTU) &&
4684 			    !nxgep->mac.is_jumbo) {
4685 				err = EINVAL;
4686 				break;
4687 			}
4688 
4689 			old_framesize = (uint32_t)nxgep->mac.maxframesize;
4690 			nxgep->mac.maxframesize = (uint16_t)
4691 			    (new_mtu + NXGE_EHEADER_VLAN_CRC);
4692 			if (nxge_mac_set_framesize(nxgep)) {
4693 				nxgep->mac.maxframesize =
4694 				    (uint16_t)old_framesize;
4695 				err = EINVAL;
4696 				break;
4697 			}
4698 
4699 			err = mac_maxsdu_update(nxgep->mach, new_mtu);
4700 			if (err) {
4701 				nxgep->mac.maxframesize =
4702 				    (uint16_t)old_framesize;
4703 				err = EINVAL;
4704 				break;
4705 			}
4706 
4707 			nxgep->mac.default_mtu = new_mtu;
4708 			NXGE_DEBUG_MSG((nxgep, NXGE_CTL,
4709 			    "==> nxge_m_setprop: set MTU: %d maxframe %d",
4710 			    new_mtu, nxgep->mac.maxframesize));
4711 			break;
4712 
4713 		case MAC_PROP_FLOWCTRL:
4714 			bcopy(pr_val, &fl, sizeof (fl));
4715 			switch (fl) {
4716 			default:
4717 				err = EINVAL;
4718 				break;
4719 
4720 			case LINK_FLOWCTRL_NONE:
4721 				param_arr[param_anar_pause].value = 0;
4722 				break;
4723 
4724 			case LINK_FLOWCTRL_RX:
4725 				param_arr[param_anar_pause].value = 1;
4726 				break;
4727 
4728 			case LINK_FLOWCTRL_TX:
4729 			case LINK_FLOWCTRL_BI:
4730 				err = EINVAL;
4731 				break;
4732 			}
4733 
4734 reprogram:
4735 			if (err == 0) {
4736 				if (!nxge_param_link_update(nxgep)) {
4737 					err = EINVAL;
4738 				}
4739 			}
4740 			break;
4741 		case MAC_PROP_PRIVATE:
4742 			NXGE_DEBUG_MSG((nxgep, NXGE_CTL,
4743 			    "==> nxge_m_setprop: private property"));
4744 			err = nxge_set_priv_prop(nxgep, pr_name, pr_valsize,
4745 			    pr_val);
4746 			break;
4747 
4748 		default:
4749 			err = ENOTSUP;
4750 			break;
4751 	}
4752 
4753 	mutex_exit(nxgep->genlock);
4754 
4755 	NXGE_DEBUG_MSG((nxgep, NXGE_CTL,
4756 	    "<== nxge_m_setprop (return %d)", err));
4757 	return (err);
4758 }
4759 
4760 static int
4761 nxge_m_getprop(void *barg, const char *pr_name, mac_prop_id_t pr_num,
4762     uint_t pr_flags, uint_t pr_valsize, void *pr_val)
4763 {
4764 	nxge_t 		*nxgep = barg;
4765 	p_nxge_param_t	param_arr = nxgep->param_arr;
4766 	p_nxge_stats_t	statsp = nxgep->statsp;
4767 	int		err = 0;
4768 	link_flowctrl_t	fl;
4769 	uint64_t	tmp = 0;
4770 	link_state_t	ls;
4771 	boolean_t	is_default = (pr_flags & MAC_PROP_DEFAULT);
4772 
4773 	NXGE_DEBUG_MSG((nxgep, NXGE_CTL,
4774 	    "==> nxge_m_getprop: pr_num %d", pr_num));
4775 
4776 	if (pr_valsize == 0)
4777 		return (EINVAL);
4778 
4779 	if ((is_default) && (pr_num != MAC_PROP_PRIVATE)) {
4780 		err = nxge_get_def_val(nxgep, pr_num, pr_valsize, pr_val);
4781 		return (err);
4782 	}
4783 
4784 	bzero(pr_val, pr_valsize);
4785 	switch (pr_num) {
4786 		case MAC_PROP_DUPLEX:
4787 			*(uint8_t *)pr_val = statsp->mac_stats.link_duplex;
4788 			NXGE_DEBUG_MSG((nxgep, NXGE_CTL,
4789 			    "==> nxge_m_getprop: duplex mode %d",
4790 			    *(uint8_t *)pr_val));
4791 			break;
4792 
4793 		case MAC_PROP_SPEED:
4794 			if (pr_valsize < sizeof (uint64_t))
4795 				return (EINVAL);
4796 			tmp = statsp->mac_stats.link_speed * 1000000ull;
4797 			bcopy(&tmp, pr_val, sizeof (tmp));
4798 			break;
4799 
4800 		case MAC_PROP_STATUS:
4801 			if (pr_valsize < sizeof (link_state_t))
4802 				return (EINVAL);
4803 			if (!statsp->mac_stats.link_up)
4804 				ls = LINK_STATE_DOWN;
4805 			else
4806 				ls = LINK_STATE_UP;
4807 			bcopy(&ls, pr_val, sizeof (ls));
4808 			break;
4809 
4810 		case MAC_PROP_AUTONEG:
4811 			*(uint8_t *)pr_val =
4812 			    param_arr[param_autoneg].value;
4813 			break;
4814 
4815 		case MAC_PROP_FLOWCTRL:
4816 			if (pr_valsize < sizeof (link_flowctrl_t))
4817 				return (EINVAL);
4818 
4819 			fl = LINK_FLOWCTRL_NONE;
4820 			if (param_arr[param_anar_pause].value) {
4821 				fl = LINK_FLOWCTRL_RX;
4822 			}
4823 			bcopy(&fl, pr_val, sizeof (fl));
4824 			break;
4825 
4826 		case MAC_PROP_ADV_1000FDX_CAP:
4827 			*(uint8_t *)pr_val =
4828 			    param_arr[param_anar_1000fdx].value;
4829 			break;
4830 
4831 		case MAC_PROP_EN_1000FDX_CAP:
4832 			*(uint8_t *)pr_val = nxgep->param_en_1000fdx;
4833 			break;
4834 
4835 		case MAC_PROP_ADV_100FDX_CAP:
4836 			*(uint8_t *)pr_val =
4837 			    param_arr[param_anar_100fdx].value;
4838 			break;
4839 
4840 		case MAC_PROP_EN_100FDX_CAP:
4841 			*(uint8_t *)pr_val = nxgep->param_en_100fdx;
4842 			break;
4843 
4844 		case MAC_PROP_ADV_10FDX_CAP:
4845 			*(uint8_t *)pr_val =
4846 			    param_arr[param_anar_10fdx].value;
4847 			break;
4848 
4849 		case MAC_PROP_EN_10FDX_CAP:
4850 			*(uint8_t *)pr_val = nxgep->param_en_10fdx;
4851 			break;
4852 
4853 		case MAC_PROP_EN_1000HDX_CAP:
4854 		case MAC_PROP_EN_100HDX_CAP:
4855 		case MAC_PROP_EN_10HDX_CAP:
4856 		case MAC_PROP_ADV_1000HDX_CAP:
4857 		case MAC_PROP_ADV_100HDX_CAP:
4858 		case MAC_PROP_ADV_10HDX_CAP:
4859 			err = ENOTSUP;
4860 			break;
4861 
4862 		case MAC_PROP_PRIVATE:
4863 			err = nxge_get_priv_prop(nxgep, pr_name, pr_flags,
4864 			    pr_valsize, pr_val);
4865 			break;
4866 		default:
4867 			err = EINVAL;
4868 			break;
4869 	}
4870 
4871 	NXGE_DEBUG_MSG((nxgep, NXGE_CTL, "<== nxge_m_getprop"));
4872 
4873 	return (err);
4874 }
4875 
4876 /* ARGSUSED */
4877 static int
4878 nxge_set_priv_prop(p_nxge_t nxgep, const char *pr_name, uint_t pr_valsize,
4879     const void *pr_val)
4880 {
4881 	p_nxge_param_t	param_arr = nxgep->param_arr;
4882 	int		err = 0;
4883 	long		result;
4884 
4885 	NXGE_DEBUG_MSG((nxgep, NXGE_CTL,
4886 	    "==> nxge_set_priv_prop: name %s", pr_name));
4887 
4888 	if (strcmp(pr_name, "_accept_jumbo") == 0) {
4889 		(void) ddi_strtol(pr_val, (char **)NULL, 0, &result);
4890 		NXGE_DEBUG_MSG((nxgep, NXGE_CTL,
4891 		    "<== nxge_set_priv_prop: name %s "
4892 		    "pr_val %s result %d "
4893 		    "param %d is_jumbo %d",
4894 		    pr_name, pr_val, result,
4895 		    param_arr[param_accept_jumbo].value,
4896 		    nxgep->mac.is_jumbo));
4897 
4898 		if (result > 1 || result < 0) {
4899 			err = EINVAL;
4900 		} else {
4901 			if (nxgep->mac.is_jumbo ==
4902 			    (uint32_t)result) {
4903 				NXGE_DEBUG_MSG((nxgep, NXGE_CTL,
4904 				    "no change (%d %d)",
4905 				    nxgep->mac.is_jumbo,
4906 				    result));
4907 				return (0);
4908 			}
4909 		}
4910 
4911 		param_arr[param_accept_jumbo].value = result;
4912 		nxgep->mac.is_jumbo = B_FALSE;
4913 		if (result) {
4914 			nxgep->mac.is_jumbo = B_TRUE;
4915 		}
4916 
4917 		NXGE_DEBUG_MSG((nxgep, NXGE_CTL,
4918 		    "<== nxge_set_priv_prop: name %s (value %d) is_jumbo %d",
4919 		    pr_name, result, nxgep->mac.is_jumbo));
4920 
4921 		return (err);
4922 	}
4923 
4924 	/* Blanking */
4925 	if (strcmp(pr_name, "_rxdma_intr_time") == 0) {
4926 		err = nxge_param_rx_intr_time(nxgep, NULL, NULL,
4927 		    (char *)pr_val,
4928 		    (caddr_t)&param_arr[param_rxdma_intr_time]);
4929 		if (err) {
4930 			NXGE_DEBUG_MSG((nxgep, NXGE_CTL,
4931 			    "<== nxge_set_priv_prop: "
4932 			    "unable to set (%s)", pr_name));
4933 			err = EINVAL;
4934 		} else {
4935 			err = 0;
4936 			NXGE_DEBUG_MSG((nxgep, NXGE_CTL,
4937 			    "<== nxge_set_priv_prop: "
4938 			    "set (%s)", pr_name));
4939 		}
4940 
4941 		NXGE_DEBUG_MSG((nxgep, NXGE_CTL,
4942 		    "<== nxge_set_priv_prop: name %s (value %d)",
4943 		    pr_name, result));
4944 
4945 		return (err);
4946 	}
4947 
4948 	if (strcmp(pr_name, "_rxdma_intr_pkts") == 0) {
4949 		err = nxge_param_rx_intr_pkts(nxgep, NULL, NULL,
4950 		    (char *)pr_val,
4951 		    (caddr_t)&param_arr[param_rxdma_intr_pkts]);
4952 		if (err) {
4953 			NXGE_DEBUG_MSG((nxgep, NXGE_CTL,
4954 			    "<== nxge_set_priv_prop: "
4955 			    "unable to set (%s)", pr_name));
4956 			err = EINVAL;
4957 		} else {
4958 			err = 0;
4959 			NXGE_DEBUG_MSG((nxgep, NXGE_CTL,
4960 			    "<== nxge_set_priv_prop: "
4961 			    "set (%s)", pr_name));
4962 		}
4963 
4964 		NXGE_DEBUG_MSG((nxgep, NXGE_CTL,
4965 		    "<== nxge_set_priv_prop: name %s (value %d)",
4966 		    pr_name, result));
4967 
4968 		return (err);
4969 	}
4970 
4971 	/* Classification */
4972 	if (strcmp(pr_name, "_class_opt_ipv4_tcp") == 0) {
4973 		if (pr_val == NULL) {
4974 			err = EINVAL;
4975 			return (err);
4976 		}
4977 		(void) ddi_strtol(pr_val, (char **)NULL, 0, &result);
4978 
4979 		err = nxge_param_set_ip_opt(nxgep, NULL,
4980 		    NULL, (char *)pr_val,
4981 		    (caddr_t)&param_arr[param_class_opt_ipv4_tcp]);
4982 
4983 		NXGE_DEBUG_MSG((nxgep, NXGE_CTL,
4984 		    "<== nxge_set_priv_prop: name %s (value 0x%x)",
4985 		    pr_name, result));
4986 
4987 		return (err);
4988 	}
4989 
4990 	if (strcmp(pr_name, "_class_opt_ipv4_udp") == 0) {
4991 		if (pr_val == NULL) {
4992 			err = EINVAL;
4993 			return (err);
4994 		}
4995 		(void) ddi_strtol(pr_val, (char **)NULL, 0, &result);
4996 
4997 		err = nxge_param_set_ip_opt(nxgep, NULL,
4998 		    NULL, (char *)pr_val,
4999 		    (caddr_t)&param_arr[param_class_opt_ipv4_udp]);
5000 
5001 		NXGE_DEBUG_MSG((nxgep, NXGE_CTL,
5002 		    "<== nxge_set_priv_prop: name %s (value 0x%x)",
5003 		    pr_name, result));
5004 
5005 		return (err);
5006 	}
5007 	if (strcmp(pr_name, "_class_opt_ipv4_ah") == 0) {
5008 		if (pr_val == NULL) {
5009 			err = EINVAL;
5010 			return (err);
5011 		}
5012 		(void) ddi_strtol(pr_val, (char **)NULL, 0, &result);
5013 
5014 		err = nxge_param_set_ip_opt(nxgep, NULL,
5015 		    NULL, (char *)pr_val,
5016 		    (caddr_t)&param_arr[param_class_opt_ipv4_ah]);
5017 
5018 		NXGE_DEBUG_MSG((nxgep, NXGE_CTL,
5019 		    "<== nxge_set_priv_prop: name %s (value 0x%x)",
5020 		    pr_name, result));
5021 
5022 		return (err);
5023 	}
5024 	if (strcmp(pr_name, "_class_opt_ipv4_sctp") == 0) {
5025 		if (pr_val == NULL) {
5026 			err = EINVAL;
5027 			return (err);
5028 		}
5029 		(void) ddi_strtol(pr_val, (char **)NULL, 0, &result);
5030 
5031 		err = nxge_param_set_ip_opt(nxgep, NULL,
5032 		    NULL, (char *)pr_val,
5033 		    (caddr_t)&param_arr[param_class_opt_ipv4_sctp]);
5034 
5035 		NXGE_DEBUG_MSG((nxgep, NXGE_CTL,
5036 		    "<== nxge_set_priv_prop: name %s (value 0x%x)",
5037 		    pr_name, result));
5038 
5039 		return (err);
5040 	}
5041 
5042 	if (strcmp(pr_name, "_class_opt_ipv6_tcp") == 0) {
5043 		if (pr_val == NULL) {
5044 			err = EINVAL;
5045 			return (err);
5046 		}
5047 		(void) ddi_strtol(pr_val, (char **)NULL, 0, &result);
5048 
5049 		err = nxge_param_set_ip_opt(nxgep, NULL,
5050 		    NULL, (char *)pr_val,
5051 		    (caddr_t)&param_arr[param_class_opt_ipv6_tcp]);
5052 
5053 		NXGE_DEBUG_MSG((nxgep, NXGE_CTL,
5054 		    "<== nxge_set_priv_prop: name %s (value 0x%x)",
5055 		    pr_name, result));
5056 
5057 		return (err);
5058 	}
5059 
5060 	if (strcmp(pr_name, "_class_opt_ipv6_udp") == 0) {
5061 		if (pr_val == NULL) {
5062 			err = EINVAL;
5063 			return (err);
5064 		}
5065 		(void) ddi_strtol(pr_val, (char **)NULL, 0, &result);
5066 
5067 		err = nxge_param_set_ip_opt(nxgep, NULL,
5068 		    NULL, (char *)pr_val,
5069 		    (caddr_t)&param_arr[param_class_opt_ipv6_udp]);
5070 
5071 		NXGE_DEBUG_MSG((nxgep, NXGE_CTL,
5072 		    "<== nxge_set_priv_prop: name %s (value 0x%x)",
5073 		    pr_name, result));
5074 
5075 		return (err);
5076 	}
5077 	if (strcmp(pr_name, "_class_opt_ipv6_ah") == 0) {
5078 		if (pr_val == NULL) {
5079 			err = EINVAL;
5080 			return (err);
5081 		}
5082 		(void) ddi_strtol(pr_val, (char **)NULL, 0, &result);
5083 
5084 		err = nxge_param_set_ip_opt(nxgep, NULL,
5085 		    NULL, (char *)pr_val,
5086 		    (caddr_t)&param_arr[param_class_opt_ipv6_ah]);
5087 
5088 		NXGE_DEBUG_MSG((nxgep, NXGE_CTL,
5089 		    "<== nxge_set_priv_prop: name %s (value 0x%x)",
5090 		    pr_name, result));
5091 
5092 		return (err);
5093 	}
5094 	if (strcmp(pr_name, "_class_opt_ipv6_sctp") == 0) {
5095 		if (pr_val == NULL) {
5096 			err = EINVAL;
5097 			return (err);
5098 		}
5099 		(void) ddi_strtol(pr_val, (char **)NULL, 0, &result);
5100 
5101 		err = nxge_param_set_ip_opt(nxgep, NULL,
5102 		    NULL, (char *)pr_val,
5103 		    (caddr_t)&param_arr[param_class_opt_ipv6_sctp]);
5104 
5105 		NXGE_DEBUG_MSG((nxgep, NXGE_CTL,
5106 		    "<== nxge_set_priv_prop: name %s (value 0x%x)",
5107 		    pr_name, result));
5108 
5109 		return (err);
5110 	}
5111 
5112 	if (strcmp(pr_name, "_soft_lso_enable") == 0) {
5113 		if (nxgep->nxge_mac_state == NXGE_MAC_STARTED) {
5114 			NXGE_DEBUG_MSG((nxgep, NXGE_CTL,
5115 			    "==> nxge_set_priv_prop: name %s (busy)", pr_name));
5116 			err = EBUSY;
5117 			return (err);
5118 		}
5119 		if (pr_val == NULL) {
5120 			NXGE_DEBUG_MSG((nxgep, NXGE_CTL,
5121 			    "==> nxge_set_priv_prop: name %s (null)", pr_name));
5122 			err = EINVAL;
5123 			return (err);
5124 		}
5125 
5126 		(void) ddi_strtol(pr_val, (char **)NULL, 0, &result);
5127 		NXGE_DEBUG_MSG((nxgep, NXGE_CTL,
5128 		    "<== nxge_set_priv_prop: name %s "
5129 		    "(lso %d pr_val %s value %d)",
5130 		    pr_name, nxgep->soft_lso_enable, pr_val, result));
5131 
5132 		if (result > 1 || result < 0) {
5133 			err = EINVAL;
5134 		} else {
5135 			if (nxgep->soft_lso_enable == (uint32_t)result) {
5136 				NXGE_DEBUG_MSG((nxgep, NXGE_CTL,
5137 				    "no change (%d %d)",
5138 				    nxgep->soft_lso_enable, result));
5139 				return (0);
5140 			}
5141 		}
5142 
5143 		nxgep->soft_lso_enable = (int)result;
5144 
5145 		NXGE_DEBUG_MSG((nxgep, NXGE_CTL,
5146 		    "<== nxge_set_priv_prop: name %s (value %d)",
5147 		    pr_name, result));
5148 
5149 		return (err);
5150 	}
5151 	/*
5152 	 * Commands like "ndd -set /dev/nxge0 adv_10gfdx_cap 1" cause the
5153 	 * following code to be executed.
5154 	 */
5155 	if (strcmp(pr_name, "_adv_10gfdx_cap") == 0) {
5156 		err = nxge_param_set_mac(nxgep, NULL, NULL, (char *)pr_val,
5157 		    (caddr_t)&param_arr[param_anar_10gfdx]);
5158 		return (err);
5159 	}
5160 	if (strcmp(pr_name, "_adv_pause_cap") == 0) {
5161 		err = nxge_param_set_mac(nxgep, NULL, NULL, (char *)pr_val,
5162 		    (caddr_t)&param_arr[param_anar_pause]);
5163 		return (err);
5164 	}
5165 
5166 	return (EINVAL);
5167 }
5168 
5169 static int
5170 nxge_get_priv_prop(p_nxge_t nxgep, const char *pr_name, uint_t pr_flags,
5171     uint_t pr_valsize, void *pr_val)
5172 {
5173 	p_nxge_param_t	param_arr = nxgep->param_arr;
5174 	char		valstr[MAXNAMELEN];
5175 	int		err = EINVAL;
5176 	uint_t		strsize;
5177 	boolean_t	is_default = (pr_flags & MAC_PROP_DEFAULT);
5178 
5179 	NXGE_DEBUG_MSG((nxgep, NXGE_CTL,
5180 	    "==> nxge_get_priv_prop: property %s", pr_name));
5181 
5182 	/* function number */
5183 	if (strcmp(pr_name, "_function_number") == 0) {
5184 		if (is_default)
5185 			return (ENOTSUP);
5186 		(void) snprintf(valstr, sizeof (valstr), "%d",
5187 		    nxgep->function_num);
5188 		NXGE_DEBUG_MSG((nxgep, NXGE_CTL,
5189 		    "==> nxge_get_priv_prop: name %s "
5190 		    "(value %d valstr %s)",
5191 		    pr_name, nxgep->function_num, valstr));
5192 
5193 		err = 0;
5194 		goto done;
5195 	}
5196 
5197 	/* Neptune firmware version */
5198 	if (strcmp(pr_name, "_fw_version") == 0) {
5199 		if (is_default)
5200 			return (ENOTSUP);
5201 		(void) snprintf(valstr, sizeof (valstr), "%s",
5202 		    nxgep->vpd_info.ver);
5203 		NXGE_DEBUG_MSG((nxgep, NXGE_CTL,
5204 		    "==> nxge_get_priv_prop: name %s "
5205 		    "(value %d valstr %s)",
5206 		    pr_name, nxgep->vpd_info.ver, valstr));
5207 
5208 		err = 0;
5209 		goto done;
5210 	}
5211 
5212 	/* port PHY mode */
5213 	if (strcmp(pr_name, "_port_mode") == 0) {
5214 		if (is_default)
5215 			return (ENOTSUP);
5216 		switch (nxgep->mac.portmode) {
5217 		case PORT_1G_COPPER:
5218 			(void) snprintf(valstr, sizeof (valstr), "1G copper %s",
5219 			    nxgep->hot_swappable_phy ?
5220 			    "[Hot Swappable]" : "");
5221 			break;
5222 		case PORT_1G_FIBER:
5223 			(void) snprintf(valstr, sizeof (valstr), "1G fiber %s",
5224 			    nxgep->hot_swappable_phy ?
5225 			    "[hot swappable]" : "");
5226 			break;
5227 		case PORT_10G_COPPER:
5228 			(void) snprintf(valstr, sizeof (valstr),
5229 			    "10G copper %s",
5230 			    nxgep->hot_swappable_phy ?
5231 			    "[hot swappable]" : "");
5232 			break;
5233 		case PORT_10G_FIBER:
5234 			(void) snprintf(valstr, sizeof (valstr), "10G fiber %s",
5235 			    nxgep->hot_swappable_phy ?
5236 			    "[hot swappable]" : "");
5237 			break;
5238 		case PORT_10G_SERDES:
5239 			(void) snprintf(valstr, sizeof (valstr),
5240 			    "10G serdes %s", nxgep->hot_swappable_phy ?
5241 			    "[hot swappable]" : "");
5242 			break;
5243 		case PORT_1G_SERDES:
5244 			(void) snprintf(valstr, sizeof (valstr), "1G serdes %s",
5245 			    nxgep->hot_swappable_phy ?
5246 			    "[hot swappable]" : "");
5247 			break;
5248 		case PORT_1G_TN1010:
5249 			(void) snprintf(valstr, sizeof (valstr),
5250 			    "1G TN1010 copper %s", nxgep->hot_swappable_phy ?
5251 			    "[hot swappable]" : "");
5252 			break;
5253 		case PORT_10G_TN1010:
5254 			(void) snprintf(valstr, sizeof (valstr),
5255 			    "10G TN1010 copper %s", nxgep->hot_swappable_phy ?
5256 			    "[hot swappable]" : "");
5257 			break;
5258 		case PORT_1G_RGMII_FIBER:
5259 			(void) snprintf(valstr, sizeof (valstr),
5260 			    "1G rgmii fiber %s", nxgep->hot_swappable_phy ?
5261 			    "[hot swappable]" : "");
5262 			break;
5263 		case PORT_HSP_MODE:
5264 			(void) snprintf(valstr, sizeof (valstr),
5265 			    "phy not present[hot swappable]");
5266 			break;
5267 		default:
5268 			(void) snprintf(valstr, sizeof (valstr), "unknown %s",
5269 			    nxgep->hot_swappable_phy ?
5270 			    "[hot swappable]" : "");
5271 			break;
5272 		}
5273 
5274 		NXGE_DEBUG_MSG((nxgep, NXGE_CTL,
5275 		    "==> nxge_get_priv_prop: name %s (value %s)",
5276 		    pr_name, valstr));
5277 
5278 		err = 0;
5279 		goto done;
5280 	}
5281 
5282 	/* Hot swappable PHY */
5283 	if (strcmp(pr_name, "_hot_swap_phy") == 0) {
5284 		if (is_default)
5285 			return (ENOTSUP);
5286 		(void) snprintf(valstr, sizeof (valstr), "%s",
5287 		    nxgep->hot_swappable_phy ?
5288 		    "yes" : "no");
5289 
5290 		NXGE_DEBUG_MSG((nxgep, NXGE_CTL,
5291 		    "==> nxge_get_priv_prop: name %s "
5292 		    "(value %d valstr %s)",
5293 		    pr_name, nxgep->hot_swappable_phy, valstr));
5294 
5295 		err = 0;
5296 		goto done;
5297 	}
5298 
5299 
5300 	/* accept jumbo */
5301 	if (strcmp(pr_name, "_accept_jumbo") == 0) {
5302 		if (is_default)
5303 			(void) snprintf(valstr, sizeof (valstr),  "%d", 0);
5304 		else
5305 			(void) snprintf(valstr, sizeof (valstr),
5306 			    "%d", nxgep->mac.is_jumbo);
5307 		err = 0;
5308 		NXGE_DEBUG_MSG((nxgep, NXGE_CTL,
5309 		    "==> nxge_get_priv_prop: name %s (value %d (%d, %d))",
5310 		    pr_name,
5311 		    (uint32_t)param_arr[param_accept_jumbo].value,
5312 		    nxgep->mac.is_jumbo,
5313 		    nxge_jumbo_enable));
5314 
5315 		goto done;
5316 	}
5317 
5318 	/* Receive Interrupt Blanking Parameters */
5319 	if (strcmp(pr_name, "_rxdma_intr_time") == 0) {
5320 		err = 0;
5321 		if (is_default) {
5322 			(void) snprintf(valstr, sizeof (valstr),
5323 			    "%d", RXDMA_RCR_TO_DEFAULT);
5324 			goto done;
5325 		}
5326 
5327 		(void) snprintf(valstr, sizeof (valstr), "%d",
5328 		    nxgep->intr_timeout);
5329 		NXGE_DEBUG_MSG((nxgep, NXGE_CTL,
5330 		    "==> nxge_get_priv_prop: name %s (value %d)",
5331 		    pr_name,
5332 		    (uint32_t)nxgep->intr_timeout));
5333 		goto done;
5334 	}
5335 
5336 	if (strcmp(pr_name, "_rxdma_intr_pkts") == 0) {
5337 		err = 0;
5338 		if (is_default) {
5339 			(void) snprintf(valstr, sizeof (valstr),
5340 			    "%d", RXDMA_RCR_PTHRES_DEFAULT);
5341 			goto done;
5342 		}
5343 		(void) snprintf(valstr, sizeof (valstr), "%d",
5344 		    nxgep->intr_threshold);
5345 		NXGE_DEBUG_MSG((nxgep, NXGE_CTL,
5346 		    "==> nxge_get_priv_prop: name %s (value %d)",
5347 		    pr_name, (uint32_t)nxgep->intr_threshold));
5348 
5349 		goto done;
5350 	}
5351 
5352 	/* Classification and Load Distribution Configuration */
5353 	if (strcmp(pr_name, "_class_opt_ipv4_tcp") == 0) {
5354 		if (is_default) {
5355 			(void) snprintf(valstr, sizeof (valstr), "%x",
5356 			    NXGE_CLASS_FLOW_GEN_SERVER);
5357 			err = 0;
5358 			goto done;
5359 		}
5360 		err = nxge_dld_get_ip_opt(nxgep,
5361 		    (caddr_t)&param_arr[param_class_opt_ipv4_tcp]);
5362 
5363 		(void) snprintf(valstr, sizeof (valstr), "%x",
5364 		    (int)param_arr[param_class_opt_ipv4_tcp].value);
5365 
5366 		NXGE_DEBUG_MSG((nxgep, NXGE_CTL,
5367 		    "==> nxge_get_priv_prop: %s", valstr));
5368 		goto done;
5369 	}
5370 
5371 	if (strcmp(pr_name, "_class_opt_ipv4_udp") == 0) {
5372 		if (is_default) {
5373 			(void) snprintf(valstr, sizeof (valstr), "%x",
5374 			    NXGE_CLASS_FLOW_GEN_SERVER);
5375 			err = 0;
5376 			goto done;
5377 		}
5378 		err = nxge_dld_get_ip_opt(nxgep,
5379 		    (caddr_t)&param_arr[param_class_opt_ipv4_udp]);
5380 
5381 		(void) snprintf(valstr, sizeof (valstr), "%x",
5382 		    (int)param_arr[param_class_opt_ipv4_udp].value);
5383 
5384 		NXGE_DEBUG_MSG((nxgep, NXGE_CTL,
5385 		    "==> nxge_get_priv_prop: %s", valstr));
5386 		goto done;
5387 	}
5388 	if (strcmp(pr_name, "_class_opt_ipv4_ah") == 0) {
5389 		if (is_default) {
5390 			(void) snprintf(valstr, sizeof (valstr), "%x",
5391 			    NXGE_CLASS_FLOW_GEN_SERVER);
5392 			err = 0;
5393 			goto done;
5394 		}
5395 		err = nxge_dld_get_ip_opt(nxgep,
5396 		    (caddr_t)&param_arr[param_class_opt_ipv4_ah]);
5397 
5398 		(void) snprintf(valstr, sizeof (valstr), "%x",
5399 		    (int)param_arr[param_class_opt_ipv4_ah].value);
5400 
5401 		NXGE_DEBUG_MSG((nxgep, NXGE_CTL,
5402 		    "==> nxge_get_priv_prop: %s", valstr));
5403 		goto done;
5404 	}
5405 
5406 	if (strcmp(pr_name, "_class_opt_ipv4_sctp") == 0) {
5407 		if (is_default) {
5408 			(void) snprintf(valstr, sizeof (valstr), "%x",
5409 			    NXGE_CLASS_FLOW_GEN_SERVER);
5410 			err = 0;
5411 			goto done;
5412 		}
5413 		err = nxge_dld_get_ip_opt(nxgep,
5414 		    (caddr_t)&param_arr[param_class_opt_ipv4_sctp]);
5415 
5416 		(void) snprintf(valstr, sizeof (valstr), "%x",
5417 		    (int)param_arr[param_class_opt_ipv4_sctp].value);
5418 
5419 		NXGE_DEBUG_MSG((nxgep, NXGE_CTL,
5420 		    "==> nxge_get_priv_prop: %s", valstr));
5421 		goto done;
5422 	}
5423 
5424 	if (strcmp(pr_name, "_class_opt_ipv6_tcp") == 0) {
5425 		if (is_default) {
5426 			(void) snprintf(valstr, sizeof (valstr), "%x",
5427 			    NXGE_CLASS_FLOW_GEN_SERVER);
5428 			err = 0;
5429 			goto done;
5430 		}
5431 		err = nxge_dld_get_ip_opt(nxgep,
5432 		    (caddr_t)&param_arr[param_class_opt_ipv6_tcp]);
5433 
5434 		(void) snprintf(valstr, sizeof (valstr), "%x",
5435 		    (int)param_arr[param_class_opt_ipv6_tcp].value);
5436 
5437 		NXGE_DEBUG_MSG((nxgep, NXGE_CTL,
5438 		    "==> nxge_get_priv_prop: %s", valstr));
5439 		goto done;
5440 	}
5441 
5442 	if (strcmp(pr_name, "_class_opt_ipv6_udp") == 0) {
5443 		if (is_default) {
5444 			(void) snprintf(valstr, sizeof (valstr), "%x",
5445 			    NXGE_CLASS_FLOW_GEN_SERVER);
5446 			err = 0;
5447 			goto done;
5448 		}
5449 		err = nxge_dld_get_ip_opt(nxgep,
5450 		    (caddr_t)&param_arr[param_class_opt_ipv6_udp]);
5451 
5452 		(void) snprintf(valstr, sizeof (valstr), "%x",
5453 		    (int)param_arr[param_class_opt_ipv6_udp].value);
5454 
5455 		NXGE_DEBUG_MSG((nxgep, NXGE_CTL,
5456 		    "==> nxge_get_priv_prop: %s", valstr));
5457 		goto done;
5458 	}
5459 
5460 	if (strcmp(pr_name, "_class_opt_ipv6_ah") == 0) {
5461 		if (is_default) {
5462 			(void) snprintf(valstr, sizeof (valstr), "%x",
5463 			    NXGE_CLASS_FLOW_GEN_SERVER);
5464 			err = 0;
5465 			goto done;
5466 		}
5467 		err = nxge_dld_get_ip_opt(nxgep,
5468 		    (caddr_t)&param_arr[param_class_opt_ipv6_ah]);
5469 
5470 		(void) snprintf(valstr, sizeof (valstr), "%x",
5471 		    (int)param_arr[param_class_opt_ipv6_ah].value);
5472 
5473 		NXGE_DEBUG_MSG((nxgep, NXGE_CTL,
5474 		    "==> nxge_get_priv_prop: %s", valstr));
5475 		goto done;
5476 	}
5477 
5478 	if (strcmp(pr_name, "_class_opt_ipv6_sctp") == 0) {
5479 		if (is_default) {
5480 			(void) snprintf(valstr, sizeof (valstr), "%x",
5481 			    NXGE_CLASS_FLOW_GEN_SERVER);
5482 			err = 0;
5483 			goto done;
5484 		}
5485 		err = nxge_dld_get_ip_opt(nxgep,
5486 		    (caddr_t)&param_arr[param_class_opt_ipv6_sctp]);
5487 
5488 		(void) snprintf(valstr, sizeof (valstr), "%x",
5489 		    (int)param_arr[param_class_opt_ipv6_sctp].value);
5490 
5491 		NXGE_DEBUG_MSG((nxgep, NXGE_CTL,
5492 		    "==> nxge_get_priv_prop: %s", valstr));
5493 		goto done;
5494 	}
5495 
5496 	/* Software LSO */
5497 	if (strcmp(pr_name, "_soft_lso_enable") == 0) {
5498 		if (is_default) {
5499 			(void) snprintf(valstr, sizeof (valstr), "%d", 0);
5500 			err = 0;
5501 			goto done;
5502 		}
5503 		(void) snprintf(valstr, sizeof (valstr),
5504 		    "%d", nxgep->soft_lso_enable);
5505 		err = 0;
5506 		NXGE_DEBUG_MSG((nxgep, NXGE_CTL,
5507 		    "==> nxge_get_priv_prop: name %s (value %d)",
5508 		    pr_name, nxgep->soft_lso_enable));
5509 
5510 		goto done;
5511 	}
5512 	if (strcmp(pr_name, "_adv_10gfdx_cap") == 0) {
5513 		err = 0;
5514 		if (is_default ||
5515 		    nxgep->param_arr[param_anar_10gfdx].value != 0) {
5516 			(void) snprintf(valstr, sizeof (valstr), "%d", 1);
5517 			goto done;
5518 		} else {
5519 			(void) snprintf(valstr, sizeof (valstr), "%d", 0);
5520 			goto done;
5521 		}
5522 	}
5523 	if (strcmp(pr_name, "_adv_pause_cap") == 0) {
5524 		err = 0;
5525 		if (is_default ||
5526 		    nxgep->param_arr[param_anar_pause].value != 0) {
5527 			(void) snprintf(valstr, sizeof (valstr), "%d", 1);
5528 			goto done;
5529 		} else {
5530 			(void) snprintf(valstr, sizeof (valstr), "%d", 0);
5531 			goto done;
5532 		}
5533 	}
5534 
5535 done:
5536 	if (err == 0) {
5537 		strsize = (uint_t)strlen(valstr);
5538 		if (pr_valsize < strsize) {
5539 			err = ENOBUFS;
5540 		} else {
5541 			(void) strlcpy(pr_val, valstr, pr_valsize);
5542 		}
5543 	}
5544 
5545 	NXGE_DEBUG_MSG((nxgep, NXGE_CTL,
5546 	    "<== nxge_get_priv_prop: return %d", err));
5547 	return (err);
5548 }
5549 
5550 /*
5551  * Module loading and removing entry points.
5552  */
5553 
5554 DDI_DEFINE_STREAM_OPS(nxge_dev_ops, nulldev, nulldev, nxge_attach, nxge_detach,
5555     nodev, NULL, D_MP, NULL);
5556 
5557 #define	NXGE_DESC_VER		"Sun NIU 10Gb Ethernet"
5558 
5559 /*
5560  * Module linkage information for the kernel.
5561  */
5562 static struct modldrv 	nxge_modldrv = {
5563 	&mod_driverops,
5564 	NXGE_DESC_VER,
5565 	&nxge_dev_ops
5566 };
5567 
5568 static struct modlinkage modlinkage = {
5569 	MODREV_1, (void *) &nxge_modldrv, NULL
5570 };
5571 
5572 int
5573 _init(void)
5574 {
5575 	int		status;
5576 
5577 	NXGE_DEBUG_MSG((NULL, MOD_CTL, "==> _init"));
5578 	mac_init_ops(&nxge_dev_ops, "nxge");
5579 	status = ddi_soft_state_init(&nxge_list, sizeof (nxge_t), 0);
5580 	if (status != 0) {
5581 		NXGE_ERROR_MSG((NULL, NXGE_ERR_CTL,
5582 		    "failed to init device soft state"));
5583 		goto _init_exit;
5584 	}
5585 	status = mod_install(&modlinkage);
5586 	if (status != 0) {
5587 		ddi_soft_state_fini(&nxge_list);
5588 		NXGE_ERROR_MSG((NULL, NXGE_ERR_CTL, "Mod install failed"));
5589 		goto _init_exit;
5590 	}
5591 
5592 	MUTEX_INIT(&nxge_common_lock, NULL, MUTEX_DRIVER, NULL);
5593 
5594 _init_exit:
5595 	NXGE_DEBUG_MSG((NULL, MOD_CTL, "_init status = 0x%X", status));
5596 
5597 	return (status);
5598 }
5599 
5600 int
5601 _fini(void)
5602 {
5603 	int		status;
5604 
5605 	NXGE_DEBUG_MSG((NULL, MOD_CTL, "==> _fini"));
5606 
5607 	NXGE_DEBUG_MSG((NULL, MOD_CTL, "==> _fini: mod_remove"));
5608 
5609 	if (nxge_mblks_pending)
5610 		return (EBUSY);
5611 
5612 	status = mod_remove(&modlinkage);
5613 	if (status != DDI_SUCCESS) {
5614 		NXGE_DEBUG_MSG((NULL, MOD_CTL,
5615 		    "Module removal failed 0x%08x",
5616 		    status));
5617 		goto _fini_exit;
5618 	}
5619 
5620 	mac_fini_ops(&nxge_dev_ops);
5621 
5622 	ddi_soft_state_fini(&nxge_list);
5623 
5624 	MUTEX_DESTROY(&nxge_common_lock);
5625 _fini_exit:
5626 	NXGE_DEBUG_MSG((NULL, MOD_CTL, "_fini status = 0x%08x", status));
5627 
5628 	return (status);
5629 }
5630 
5631 int
5632 _info(struct modinfo *modinfop)
5633 {
5634 	int		status;
5635 
5636 	NXGE_DEBUG_MSG((NULL, MOD_CTL, "==> _info"));
5637 	status = mod_info(&modlinkage, modinfop);
5638 	NXGE_DEBUG_MSG((NULL, MOD_CTL, " _info status = 0x%X", status));
5639 
5640 	return (status);
5641 }
5642 
5643 /*ARGSUSED*/
5644 static nxge_status_t
5645 nxge_add_intrs(p_nxge_t nxgep)
5646 {
5647 
5648 	int		intr_types;
5649 	int		type = 0;
5650 	int		ddi_status = DDI_SUCCESS;
5651 	nxge_status_t	status = NXGE_OK;
5652 
5653 	NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_add_intrs"));
5654 
5655 	nxgep->nxge_intr_type.intr_registered = B_FALSE;
5656 	nxgep->nxge_intr_type.intr_enabled = B_FALSE;
5657 	nxgep->nxge_intr_type.msi_intx_cnt = 0;
5658 	nxgep->nxge_intr_type.intr_added = 0;
5659 	nxgep->nxge_intr_type.niu_msi_enable = B_FALSE;
5660 	nxgep->nxge_intr_type.intr_type = 0;
5661 
5662 	if (nxgep->niu_type == N2_NIU) {
5663 		nxgep->nxge_intr_type.niu_msi_enable = B_TRUE;
5664 	} else if (nxge_msi_enable) {
5665 		nxgep->nxge_intr_type.niu_msi_enable = B_TRUE;
5666 	}
5667 
5668 	/* Get the supported interrupt types */
5669 	if ((ddi_status = ddi_intr_get_supported_types(nxgep->dip, &intr_types))
5670 	    != DDI_SUCCESS) {
5671 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "<== nxge_add_intrs: "
5672 		    "ddi_intr_get_supported_types failed: status 0x%08x",
5673 		    ddi_status));
5674 		return (NXGE_ERROR | NXGE_DDI_FAILED);
5675 	}
5676 	nxgep->nxge_intr_type.intr_types = intr_types;
5677 
5678 	NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_add_intrs: "
5679 	    "ddi_intr_get_supported_types: 0x%08x", intr_types));
5680 
5681 	/*
5682 	 * Solaris MSIX is not supported yet. use MSI for now.
5683 	 * nxge_msi_enable (1):
5684 	 *	1 - MSI		2 - MSI-X	others - FIXED
5685 	 */
5686 	switch (nxge_msi_enable) {
5687 	default:
5688 		type = DDI_INTR_TYPE_FIXED;
5689 		NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_add_intrs: "
5690 		    "use fixed (intx emulation) type %08x",
5691 		    type));
5692 		break;
5693 
5694 	case 2:
5695 		NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_add_intrs: "
5696 		    "ddi_intr_get_supported_types: 0x%08x", intr_types));
5697 		if (intr_types & DDI_INTR_TYPE_MSIX) {
5698 			type = DDI_INTR_TYPE_MSIX;
5699 			NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_add_intrs: "
5700 			    "ddi_intr_get_supported_types: MSIX 0x%08x",
5701 			    type));
5702 		} else if (intr_types & DDI_INTR_TYPE_MSI) {
5703 			type = DDI_INTR_TYPE_MSI;
5704 			NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_add_intrs: "
5705 			    "ddi_intr_get_supported_types: MSI 0x%08x",
5706 			    type));
5707 		} else if (intr_types & DDI_INTR_TYPE_FIXED) {
5708 			type = DDI_INTR_TYPE_FIXED;
5709 			NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_add_intrs: "
5710 			    "ddi_intr_get_supported_types: MSXED0x%08x",
5711 			    type));
5712 		}
5713 		break;
5714 
5715 	case 1:
5716 		if (intr_types & DDI_INTR_TYPE_MSI) {
5717 			type = DDI_INTR_TYPE_MSI;
5718 			NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_add_intrs: "
5719 			    "ddi_intr_get_supported_types: MSI 0x%08x",
5720 			    type));
5721 		} else if (intr_types & DDI_INTR_TYPE_MSIX) {
5722 			type = DDI_INTR_TYPE_MSIX;
5723 			NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_add_intrs: "
5724 			    "ddi_intr_get_supported_types: MSIX 0x%08x",
5725 			    type));
5726 		} else if (intr_types & DDI_INTR_TYPE_FIXED) {
5727 			type = DDI_INTR_TYPE_FIXED;
5728 			NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_add_intrs: "
5729 			    "ddi_intr_get_supported_types: MSXED0x%08x",
5730 			    type));
5731 		}
5732 	}
5733 
5734 	nxgep->nxge_intr_type.intr_type = type;
5735 	if ((type == DDI_INTR_TYPE_MSIX || type == DDI_INTR_TYPE_MSI ||
5736 	    type == DDI_INTR_TYPE_FIXED) &&
5737 	    nxgep->nxge_intr_type.niu_msi_enable) {
5738 		if ((status = nxge_add_intrs_adv(nxgep)) != DDI_SUCCESS) {
5739 			NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
5740 			    " nxge_add_intrs: "
5741 			    " nxge_add_intrs_adv failed: status 0x%08x",
5742 			    status));
5743 			return (status);
5744 		} else {
5745 			NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_add_intrs: "
5746 			    "interrupts registered : type %d", type));
5747 			nxgep->nxge_intr_type.intr_registered = B_TRUE;
5748 
5749 			NXGE_DEBUG_MSG((nxgep, DDI_CTL,
5750 			    "\nAdded advanced nxge add_intr_adv "
5751 			    "intr type 0x%x\n", type));
5752 
5753 			return (status);
5754 		}
5755 	}
5756 
5757 	if (!nxgep->nxge_intr_type.intr_registered) {
5758 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "==> nxge_add_intrs: "
5759 		    "failed to register interrupts"));
5760 		return (NXGE_ERROR | NXGE_DDI_FAILED);
5761 	}
5762 
5763 	NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_add_intrs"));
5764 	return (status);
5765 }
5766 
5767 /*ARGSUSED*/
5768 static nxge_status_t
5769 nxge_add_soft_intrs(p_nxge_t nxgep)
5770 {
5771 
5772 	int		ddi_status = DDI_SUCCESS;
5773 	nxge_status_t	status = NXGE_OK;
5774 
5775 	NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_add_soft_intrs"));
5776 
5777 	nxgep->resched_id = NULL;
5778 	nxgep->resched_running = B_FALSE;
5779 	ddi_status = ddi_add_softintr(nxgep->dip, DDI_SOFTINT_LOW,
5780 	    &nxgep->resched_id,
5781 	    NULL, NULL, nxge_reschedule, (caddr_t)nxgep);
5782 	if (ddi_status != DDI_SUCCESS) {
5783 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "<== nxge_add_soft_intrs: "
5784 		    "ddi_add_softintrs failed: status 0x%08x",
5785 		    ddi_status));
5786 		return (NXGE_ERROR | NXGE_DDI_FAILED);
5787 	}
5788 
5789 	NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_ddi_add_soft_intrs"));
5790 
5791 	return (status);
5792 }
5793 
5794 static nxge_status_t
5795 nxge_add_intrs_adv(p_nxge_t nxgep)
5796 {
5797 	int		intr_type;
5798 	p_nxge_intr_t	intrp;
5799 
5800 	NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_add_intrs_adv"));
5801 
5802 	intrp = (p_nxge_intr_t)&nxgep->nxge_intr_type;
5803 	intr_type = intrp->intr_type;
5804 	NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_add_intrs_adv: type 0x%x",
5805 	    intr_type));
5806 
5807 	switch (intr_type) {
5808 	case DDI_INTR_TYPE_MSI: /* 0x2 */
5809 	case DDI_INTR_TYPE_MSIX: /* 0x4 */
5810 		return (nxge_add_intrs_adv_type(nxgep, intr_type));
5811 
5812 	case DDI_INTR_TYPE_FIXED: /* 0x1 */
5813 		return (nxge_add_intrs_adv_type_fix(nxgep, intr_type));
5814 
5815 	default:
5816 		return (NXGE_ERROR);
5817 	}
5818 }
5819 
5820 
5821 /*ARGSUSED*/
5822 static nxge_status_t
5823 nxge_add_intrs_adv_type(p_nxge_t nxgep, uint32_t int_type)
5824 {
5825 	dev_info_t		*dip = nxgep->dip;
5826 	p_nxge_ldg_t		ldgp;
5827 	p_nxge_intr_t		intrp;
5828 	uint_t			*inthandler;
5829 	void			*arg1, *arg2;
5830 	int			behavior;
5831 	int			nintrs, navail, nrequest;
5832 	int			nactual, nrequired;
5833 	int			inum = 0;
5834 	int			x, y;
5835 	int			ddi_status = DDI_SUCCESS;
5836 	nxge_status_t		status = NXGE_OK;
5837 
5838 	NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_add_intrs_adv_type"));
5839 	intrp = (p_nxge_intr_t)&nxgep->nxge_intr_type;
5840 	intrp->start_inum = 0;
5841 
5842 	ddi_status = ddi_intr_get_nintrs(dip, int_type, &nintrs);
5843 	if ((ddi_status != DDI_SUCCESS) || (nintrs == 0)) {
5844 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
5845 		    "ddi_intr_get_nintrs() failed, status: 0x%x%, "
5846 		    "nintrs: %d", ddi_status, nintrs));
5847 		return (NXGE_ERROR | NXGE_DDI_FAILED);
5848 	}
5849 
5850 	ddi_status = ddi_intr_get_navail(dip, int_type, &navail);
5851 	if ((ddi_status != DDI_SUCCESS) || (navail == 0)) {
5852 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
5853 		    "ddi_intr_get_navail() failed, status: 0x%x%, "
5854 		    "nintrs: %d", ddi_status, navail));
5855 		return (NXGE_ERROR | NXGE_DDI_FAILED);
5856 	}
5857 
5858 	NXGE_DEBUG_MSG((nxgep, INT_CTL,
5859 	    "ddi_intr_get_navail() returned: nintrs %d, navail %d",
5860 	    nintrs, navail));
5861 
5862 	/* PSARC/2007/453 MSI-X interrupt limit override */
5863 	if (int_type == DDI_INTR_TYPE_MSIX) {
5864 		nrequest = nxge_create_msi_property(nxgep);
5865 		if (nrequest < navail) {
5866 			navail = nrequest;
5867 			NXGE_DEBUG_MSG((nxgep, INT_CTL,
5868 			    "nxge_add_intrs_adv_type: nintrs %d "
5869 			    "navail %d (nrequest %d)",
5870 			    nintrs, navail, nrequest));
5871 		}
5872 	}
5873 
5874 	if (int_type == DDI_INTR_TYPE_MSI && !ISP2(navail)) {
5875 		/* MSI must be power of 2 */
5876 		if ((navail & 16) == 16) {
5877 			navail = 16;
5878 		} else if ((navail & 8) == 8) {
5879 			navail = 8;
5880 		} else if ((navail & 4) == 4) {
5881 			navail = 4;
5882 		} else if ((navail & 2) == 2) {
5883 			navail = 2;
5884 		} else {
5885 			navail = 1;
5886 		}
5887 		NXGE_DEBUG_MSG((nxgep, INT_CTL,
5888 		    "ddi_intr_get_navail(): (msi power of 2) nintrs %d, "
5889 		    "navail %d", nintrs, navail));
5890 	}
5891 
5892 	behavior = ((int_type == DDI_INTR_TYPE_FIXED) ? DDI_INTR_ALLOC_STRICT :
5893 	    DDI_INTR_ALLOC_NORMAL);
5894 	intrp->intr_size = navail * sizeof (ddi_intr_handle_t);
5895 	intrp->htable = kmem_alloc(intrp->intr_size, KM_SLEEP);
5896 	ddi_status = ddi_intr_alloc(dip, intrp->htable, int_type, inum,
5897 	    navail, &nactual, behavior);
5898 	if (ddi_status != DDI_SUCCESS || nactual == 0) {
5899 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
5900 		    " ddi_intr_alloc() failed: %d",
5901 		    ddi_status));
5902 		kmem_free(intrp->htable, intrp->intr_size);
5903 		return (NXGE_ERROR | NXGE_DDI_FAILED);
5904 	}
5905 
5906 	if ((ddi_status = ddi_intr_get_pri(intrp->htable[0],
5907 	    (uint_t *)&intrp->pri)) != DDI_SUCCESS) {
5908 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
5909 		    " ddi_intr_get_pri() failed: %d",
5910 		    ddi_status));
5911 		/* Free already allocated interrupts */
5912 		for (y = 0; y < nactual; y++) {
5913 			(void) ddi_intr_free(intrp->htable[y]);
5914 		}
5915 
5916 		kmem_free(intrp->htable, intrp->intr_size);
5917 		return (NXGE_ERROR | NXGE_DDI_FAILED);
5918 	}
5919 
5920 	nrequired = 0;
5921 	switch (nxgep->niu_type) {
5922 	default:
5923 		status = nxge_ldgv_init(nxgep, &nactual, &nrequired);
5924 		break;
5925 
5926 	case N2_NIU:
5927 		status = nxge_ldgv_init_n2(nxgep, &nactual, &nrequired);
5928 		break;
5929 	}
5930 
5931 	if (status != NXGE_OK) {
5932 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
5933 		    "nxge_add_intrs_adv_typ:nxge_ldgv_init "
5934 		    "failed: 0x%x", status));
5935 		/* Free already allocated interrupts */
5936 		for (y = 0; y < nactual; y++) {
5937 			(void) ddi_intr_free(intrp->htable[y]);
5938 		}
5939 
5940 		kmem_free(intrp->htable, intrp->intr_size);
5941 		return (status);
5942 	}
5943 
5944 	ldgp = nxgep->ldgvp->ldgp;
5945 	for (x = 0; x < nrequired; x++, ldgp++) {
5946 		ldgp->vector = (uint8_t)x;
5947 		ldgp->intdata = SID_DATA(ldgp->func, x);
5948 		arg1 = ldgp->ldvp;
5949 		arg2 = nxgep;
5950 		if (ldgp->nldvs == 1) {
5951 			inthandler = (uint_t *)ldgp->ldvp->ldv_intr_handler;
5952 			NXGE_DEBUG_MSG((nxgep, INT_CTL,
5953 			    "nxge_add_intrs_adv_type: "
5954 			    "arg1 0x%x arg2 0x%x: "
5955 			    "1-1 int handler (entry %d intdata 0x%x)\n",
5956 			    arg1, arg2,
5957 			    x, ldgp->intdata));
5958 		} else if (ldgp->nldvs > 1) {
5959 			inthandler = (uint_t *)ldgp->sys_intr_handler;
5960 			NXGE_DEBUG_MSG((nxgep, INT_CTL,
5961 			    "nxge_add_intrs_adv_type: "
5962 			    "arg1 0x%x arg2 0x%x: "
5963 			    "nldevs %d int handler "
5964 			    "(entry %d intdata 0x%x)\n",
5965 			    arg1, arg2,
5966 			    ldgp->nldvs, x, ldgp->intdata));
5967 		}
5968 
5969 		NXGE_DEBUG_MSG((nxgep, INT_CTL,
5970 		    "==> nxge_add_intrs_adv_type: ddi_add_intr(inum) #%d "
5971 		    "htable 0x%llx", x, intrp->htable[x]));
5972 
5973 		if ((ddi_status = ddi_intr_add_handler(intrp->htable[x],
5974 		    (ddi_intr_handler_t *)inthandler, arg1, arg2))
5975 		    != DDI_SUCCESS) {
5976 			NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
5977 			    "==> nxge_add_intrs_adv_type: failed #%d "
5978 			    "status 0x%x", x, ddi_status));
5979 			for (y = 0; y < intrp->intr_added; y++) {
5980 				(void) ddi_intr_remove_handler(
5981 				    intrp->htable[y]);
5982 			}
5983 			/* Free already allocated intr */
5984 			for (y = 0; y < nactual; y++) {
5985 				(void) ddi_intr_free(intrp->htable[y]);
5986 			}
5987 			kmem_free(intrp->htable, intrp->intr_size);
5988 
5989 			(void) nxge_ldgv_uninit(nxgep);
5990 
5991 			return (NXGE_ERROR | NXGE_DDI_FAILED);
5992 		}
5993 		intrp->intr_added++;
5994 	}
5995 
5996 	intrp->msi_intx_cnt = nactual;
5997 
5998 	NXGE_DEBUG_MSG((nxgep, DDI_CTL,
5999 	    "Requested: %d, Allowed: %d msi_intx_cnt %d intr_added %d",
6000 	    navail, nactual,
6001 	    intrp->msi_intx_cnt,
6002 	    intrp->intr_added));
6003 
6004 	(void) ddi_intr_get_cap(intrp->htable[0], &intrp->intr_cap);
6005 
6006 	(void) nxge_intr_ldgv_init(nxgep);
6007 
6008 	NXGE_DEBUG_MSG((nxgep, INT_CTL, "<== nxge_add_intrs_adv_type"));
6009 
6010 	return (status);
6011 }
6012 
6013 /*ARGSUSED*/
6014 static nxge_status_t
6015 nxge_add_intrs_adv_type_fix(p_nxge_t nxgep, uint32_t int_type)
6016 {
6017 	dev_info_t		*dip = nxgep->dip;
6018 	p_nxge_ldg_t		ldgp;
6019 	p_nxge_intr_t		intrp;
6020 	uint_t			*inthandler;
6021 	void			*arg1, *arg2;
6022 	int			behavior;
6023 	int			nintrs, navail;
6024 	int			nactual, nrequired;
6025 	int			inum = 0;
6026 	int			x, y;
6027 	int			ddi_status = DDI_SUCCESS;
6028 	nxge_status_t		status = NXGE_OK;
6029 
6030 	NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_add_intrs_adv_type_fix"));
6031 	intrp = (p_nxge_intr_t)&nxgep->nxge_intr_type;
6032 	intrp->start_inum = 0;
6033 
6034 	ddi_status = ddi_intr_get_nintrs(dip, int_type, &nintrs);
6035 	if ((ddi_status != DDI_SUCCESS) || (nintrs == 0)) {
6036 		NXGE_DEBUG_MSG((nxgep, INT_CTL,
6037 		    "ddi_intr_get_nintrs() failed, status: 0x%x%, "
6038 		    "nintrs: %d", status, nintrs));
6039 		return (NXGE_ERROR | NXGE_DDI_FAILED);
6040 	}
6041 
6042 	ddi_status = ddi_intr_get_navail(dip, int_type, &navail);
6043 	if ((ddi_status != DDI_SUCCESS) || (navail == 0)) {
6044 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
6045 		    "ddi_intr_get_navail() failed, status: 0x%x%, "
6046 		    "nintrs: %d", ddi_status, navail));
6047 		return (NXGE_ERROR | NXGE_DDI_FAILED);
6048 	}
6049 
6050 	NXGE_DEBUG_MSG((nxgep, INT_CTL,
6051 	    "ddi_intr_get_navail() returned: nintrs %d, naavail %d",
6052 	    nintrs, navail));
6053 
6054 	behavior = ((int_type == DDI_INTR_TYPE_FIXED) ? DDI_INTR_ALLOC_STRICT :
6055 	    DDI_INTR_ALLOC_NORMAL);
6056 	intrp->intr_size = navail * sizeof (ddi_intr_handle_t);
6057 	intrp->htable = kmem_alloc(intrp->intr_size, KM_SLEEP);
6058 	ddi_status = ddi_intr_alloc(dip, intrp->htable, int_type, inum,
6059 	    navail, &nactual, behavior);
6060 	if (ddi_status != DDI_SUCCESS || nactual == 0) {
6061 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
6062 		    " ddi_intr_alloc() failed: %d",
6063 		    ddi_status));
6064 		kmem_free(intrp->htable, intrp->intr_size);
6065 		return (NXGE_ERROR | NXGE_DDI_FAILED);
6066 	}
6067 
6068 	if ((ddi_status = ddi_intr_get_pri(intrp->htable[0],
6069 	    (uint_t *)&intrp->pri)) != DDI_SUCCESS) {
6070 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
6071 		    " ddi_intr_get_pri() failed: %d",
6072 		    ddi_status));
6073 		/* Free already allocated interrupts */
6074 		for (y = 0; y < nactual; y++) {
6075 			(void) ddi_intr_free(intrp->htable[y]);
6076 		}
6077 
6078 		kmem_free(intrp->htable, intrp->intr_size);
6079 		return (NXGE_ERROR | NXGE_DDI_FAILED);
6080 	}
6081 
6082 	nrequired = 0;
6083 	switch (nxgep->niu_type) {
6084 	default:
6085 		status = nxge_ldgv_init(nxgep, &nactual, &nrequired);
6086 		break;
6087 
6088 	case N2_NIU:
6089 		status = nxge_ldgv_init_n2(nxgep, &nactual, &nrequired);
6090 		break;
6091 	}
6092 
6093 	if (status != NXGE_OK) {
6094 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
6095 		    "nxge_add_intrs_adv_type_fix:nxge_ldgv_init "
6096 		    "failed: 0x%x", status));
6097 		/* Free already allocated interrupts */
6098 		for (y = 0; y < nactual; y++) {
6099 			(void) ddi_intr_free(intrp->htable[y]);
6100 		}
6101 
6102 		kmem_free(intrp->htable, intrp->intr_size);
6103 		return (status);
6104 	}
6105 
6106 	ldgp = nxgep->ldgvp->ldgp;
6107 	for (x = 0; x < nrequired; x++, ldgp++) {
6108 		ldgp->vector = (uint8_t)x;
6109 		if (nxgep->niu_type != N2_NIU) {
6110 			ldgp->intdata = SID_DATA(ldgp->func, x);
6111 		}
6112 
6113 		arg1 = ldgp->ldvp;
6114 		arg2 = nxgep;
6115 		if (ldgp->nldvs == 1) {
6116 			inthandler = (uint_t *)ldgp->ldvp->ldv_intr_handler;
6117 			NXGE_DEBUG_MSG((nxgep, INT_CTL,
6118 			    "nxge_add_intrs_adv_type_fix: "
6119 			    "1-1 int handler(%d) ldg %d ldv %d "
6120 			    "arg1 $%p arg2 $%p\n",
6121 			    x, ldgp->ldg, ldgp->ldvp->ldv,
6122 			    arg1, arg2));
6123 		} else if (ldgp->nldvs > 1) {
6124 			inthandler = (uint_t *)ldgp->sys_intr_handler;
6125 			NXGE_DEBUG_MSG((nxgep, INT_CTL,
6126 			    "nxge_add_intrs_adv_type_fix: "
6127 			    "shared ldv %d int handler(%d) ldv %d ldg %d"
6128 			    "arg1 0x%016llx arg2 0x%016llx\n",
6129 			    x, ldgp->nldvs, ldgp->ldg, ldgp->ldvp->ldv,
6130 			    arg1, arg2));
6131 		}
6132 
6133 		if ((ddi_status = ddi_intr_add_handler(intrp->htable[x],
6134 		    (ddi_intr_handler_t *)inthandler, arg1, arg2))
6135 		    != DDI_SUCCESS) {
6136 			NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
6137 			    "==> nxge_add_intrs_adv_type_fix: failed #%d "
6138 			    "status 0x%x", x, ddi_status));
6139 			for (y = 0; y < intrp->intr_added; y++) {
6140 				(void) ddi_intr_remove_handler(
6141 				    intrp->htable[y]);
6142 			}
6143 			for (y = 0; y < nactual; y++) {
6144 				(void) ddi_intr_free(intrp->htable[y]);
6145 			}
6146 			/* Free already allocated intr */
6147 			kmem_free(intrp->htable, intrp->intr_size);
6148 
6149 			(void) nxge_ldgv_uninit(nxgep);
6150 
6151 			return (NXGE_ERROR | NXGE_DDI_FAILED);
6152 		}
6153 		intrp->intr_added++;
6154 	}
6155 
6156 	intrp->msi_intx_cnt = nactual;
6157 
6158 	(void) ddi_intr_get_cap(intrp->htable[0], &intrp->intr_cap);
6159 
6160 	status = nxge_intr_ldgv_init(nxgep);
6161 	NXGE_DEBUG_MSG((nxgep, INT_CTL, "<== nxge_add_intrs_adv_type_fix"));
6162 
6163 	return (status);
6164 }
6165 
6166 static void
6167 nxge_remove_intrs(p_nxge_t nxgep)
6168 {
6169 	int		i, inum;
6170 	p_nxge_intr_t	intrp;
6171 
6172 	NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_remove_intrs"));
6173 	intrp = (p_nxge_intr_t)&nxgep->nxge_intr_type;
6174 	if (!intrp->intr_registered) {
6175 		NXGE_DEBUG_MSG((nxgep, INT_CTL,
6176 		    "<== nxge_remove_intrs: interrupts not registered"));
6177 		return;
6178 	}
6179 
6180 	NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_remove_intrs:advanced"));
6181 
6182 	if (intrp->intr_cap & DDI_INTR_FLAG_BLOCK) {
6183 		(void) ddi_intr_block_disable(intrp->htable,
6184 		    intrp->intr_added);
6185 	} else {
6186 		for (i = 0; i < intrp->intr_added; i++) {
6187 			(void) ddi_intr_disable(intrp->htable[i]);
6188 		}
6189 	}
6190 
6191 	for (inum = 0; inum < intrp->intr_added; inum++) {
6192 		if (intrp->htable[inum]) {
6193 			(void) ddi_intr_remove_handler(intrp->htable[inum]);
6194 		}
6195 	}
6196 
6197 	for (inum = 0; inum < intrp->msi_intx_cnt; inum++) {
6198 		if (intrp->htable[inum]) {
6199 			NXGE_DEBUG_MSG((nxgep, DDI_CTL,
6200 			    "nxge_remove_intrs: ddi_intr_free inum %d "
6201 			    "msi_intx_cnt %d intr_added %d",
6202 			    inum,
6203 			    intrp->msi_intx_cnt,
6204 			    intrp->intr_added));
6205 
6206 			(void) ddi_intr_free(intrp->htable[inum]);
6207 		}
6208 	}
6209 
6210 	kmem_free(intrp->htable, intrp->intr_size);
6211 	intrp->intr_registered = B_FALSE;
6212 	intrp->intr_enabled = B_FALSE;
6213 	intrp->msi_intx_cnt = 0;
6214 	intrp->intr_added = 0;
6215 
6216 	(void) nxge_ldgv_uninit(nxgep);
6217 
6218 	(void) ddi_prop_remove(DDI_DEV_T_NONE, nxgep->dip,
6219 	    "#msix-request");
6220 
6221 	NXGE_DEBUG_MSG((nxgep, INT_CTL, "<== nxge_remove_intrs"));
6222 }
6223 
6224 /*ARGSUSED*/
6225 static void
6226 nxge_remove_soft_intrs(p_nxge_t nxgep)
6227 {
6228 	NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_remove_soft_intrs"));
6229 	if (nxgep->resched_id) {
6230 		ddi_remove_softintr(nxgep->resched_id);
6231 		NXGE_DEBUG_MSG((nxgep, INT_CTL,
6232 		    "==> nxge_remove_soft_intrs: removed"));
6233 		nxgep->resched_id = NULL;
6234 	}
6235 
6236 	NXGE_DEBUG_MSG((nxgep, INT_CTL, "<== nxge_remove_soft_intrs"));
6237 }
6238 
6239 /*ARGSUSED*/
6240 static void
6241 nxge_intrs_enable(p_nxge_t nxgep)
6242 {
6243 	p_nxge_intr_t	intrp;
6244 	int		i;
6245 	int		status;
6246 
6247 	NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_intrs_enable"));
6248 
6249 	intrp = (p_nxge_intr_t)&nxgep->nxge_intr_type;
6250 
6251 	if (!intrp->intr_registered) {
6252 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "<== nxge_intrs_enable: "
6253 		    "interrupts are not registered"));
6254 		return;
6255 	}
6256 
6257 	if (intrp->intr_enabled) {
6258 		NXGE_DEBUG_MSG((nxgep, INT_CTL,
6259 		    "<== nxge_intrs_enable: already enabled"));
6260 		return;
6261 	}
6262 
6263 	if (intrp->intr_cap & DDI_INTR_FLAG_BLOCK) {
6264 		status = ddi_intr_block_enable(intrp->htable,
6265 		    intrp->intr_added);
6266 		NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_intrs_enable "
6267 		    "block enable - status 0x%x total inums #%d\n",
6268 		    status, intrp->intr_added));
6269 	} else {
6270 		for (i = 0; i < intrp->intr_added; i++) {
6271 			status = ddi_intr_enable(intrp->htable[i]);
6272 			NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_intrs_enable "
6273 			    "ddi_intr_enable:enable - status 0x%x "
6274 			    "total inums %d enable inum #%d\n",
6275 			    status, intrp->intr_added, i));
6276 			if (status == DDI_SUCCESS) {
6277 				intrp->intr_enabled = B_TRUE;
6278 			}
6279 		}
6280 	}
6281 
6282 	NXGE_DEBUG_MSG((nxgep, INT_CTL, "<== nxge_intrs_enable"));
6283 }
6284 
6285 /*ARGSUSED*/
6286 static void
6287 nxge_intrs_disable(p_nxge_t nxgep)
6288 {
6289 	p_nxge_intr_t	intrp;
6290 	int		i;
6291 
6292 	NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_intrs_disable"));
6293 
6294 	intrp = (p_nxge_intr_t)&nxgep->nxge_intr_type;
6295 
6296 	if (!intrp->intr_registered) {
6297 		NXGE_DEBUG_MSG((nxgep, INT_CTL, "<== nxge_intrs_disable: "
6298 		    "interrupts are not registered"));
6299 		return;
6300 	}
6301 
6302 	if (intrp->intr_cap & DDI_INTR_FLAG_BLOCK) {
6303 		(void) ddi_intr_block_disable(intrp->htable,
6304 		    intrp->intr_added);
6305 	} else {
6306 		for (i = 0; i < intrp->intr_added; i++) {
6307 			(void) ddi_intr_disable(intrp->htable[i]);
6308 		}
6309 	}
6310 
6311 	intrp->intr_enabled = B_FALSE;
6312 	NXGE_DEBUG_MSG((nxgep, INT_CTL, "<== nxge_intrs_disable"));
6313 }
6314 
6315 static nxge_status_t
6316 nxge_mac_register(p_nxge_t nxgep)
6317 {
6318 	mac_register_t *macp;
6319 	int		status;
6320 
6321 	NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_mac_register"));
6322 
6323 	if ((macp = mac_alloc(MAC_VERSION)) == NULL)
6324 		return (NXGE_ERROR);
6325 
6326 	macp->m_type_ident = MAC_PLUGIN_IDENT_ETHER;
6327 	macp->m_driver = nxgep;
6328 	macp->m_dip = nxgep->dip;
6329 	macp->m_src_addr = nxgep->ouraddr.ether_addr_octet;
6330 	macp->m_callbacks = &nxge_m_callbacks;
6331 	macp->m_min_sdu = 0;
6332 	nxgep->mac.default_mtu = nxgep->mac.maxframesize -
6333 	    NXGE_EHEADER_VLAN_CRC;
6334 	macp->m_max_sdu = nxgep->mac.default_mtu;
6335 	macp->m_margin = VLAN_TAGSZ;
6336 	macp->m_priv_props = nxge_priv_props;
6337 	macp->m_priv_prop_count = NXGE_MAX_PRIV_PROPS;
6338 
6339 	NXGE_DEBUG_MSG((nxgep, MAC_CTL,
6340 	    "==> nxge_mac_register: instance %d "
6341 	    "max_sdu %d margin %d maxframe %d (header %d)",
6342 	    nxgep->instance,
6343 	    macp->m_max_sdu, macp->m_margin,
6344 	    nxgep->mac.maxframesize,
6345 	    NXGE_EHEADER_VLAN_CRC));
6346 
6347 	status = mac_register(macp, &nxgep->mach);
6348 	mac_free(macp);
6349 
6350 	if (status != 0) {
6351 		cmn_err(CE_WARN,
6352 		    "!nxge_mac_register failed (status %d instance %d)",
6353 		    status, nxgep->instance);
6354 		return (NXGE_ERROR);
6355 	}
6356 
6357 	NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_mac_register success "
6358 	    "(instance %d)", nxgep->instance));
6359 
6360 	return (NXGE_OK);
6361 }
6362 
6363 void
6364 nxge_err_inject(p_nxge_t nxgep, queue_t *wq, mblk_t *mp)
6365 {
6366 	ssize_t		size;
6367 	mblk_t		*nmp;
6368 	uint8_t		blk_id;
6369 	uint8_t		chan;
6370 	uint32_t	err_id;
6371 	err_inject_t	*eip;
6372 
6373 	NXGE_DEBUG_MSG((nxgep, STR_CTL, "==> nxge_err_inject"));
6374 
6375 	size = 1024;
6376 	nmp = mp->b_cont;
6377 	eip = (err_inject_t *)nmp->b_rptr;
6378 	blk_id = eip->blk_id;
6379 	err_id = eip->err_id;
6380 	chan = eip->chan;
6381 	cmn_err(CE_NOTE, "!blk_id = 0x%x\n", blk_id);
6382 	cmn_err(CE_NOTE, "!err_id = 0x%x\n", err_id);
6383 	cmn_err(CE_NOTE, "!chan = 0x%x\n", chan);
6384 	switch (blk_id) {
6385 	case MAC_BLK_ID:
6386 		break;
6387 	case TXMAC_BLK_ID:
6388 		break;
6389 	case RXMAC_BLK_ID:
6390 		break;
6391 	case MIF_BLK_ID:
6392 		break;
6393 	case IPP_BLK_ID:
6394 		nxge_ipp_inject_err(nxgep, err_id);
6395 		break;
6396 	case TXC_BLK_ID:
6397 		nxge_txc_inject_err(nxgep, err_id);
6398 		break;
6399 	case TXDMA_BLK_ID:
6400 		nxge_txdma_inject_err(nxgep, err_id, chan);
6401 		break;
6402 	case RXDMA_BLK_ID:
6403 		nxge_rxdma_inject_err(nxgep, err_id, chan);
6404 		break;
6405 	case ZCP_BLK_ID:
6406 		nxge_zcp_inject_err(nxgep, err_id);
6407 		break;
6408 	case ESPC_BLK_ID:
6409 		break;
6410 	case FFLP_BLK_ID:
6411 		break;
6412 	case PHY_BLK_ID:
6413 		break;
6414 	case ETHER_SERDES_BLK_ID:
6415 		break;
6416 	case PCIE_SERDES_BLK_ID:
6417 		break;
6418 	case VIR_BLK_ID:
6419 		break;
6420 	}
6421 
6422 	nmp->b_wptr = nmp->b_rptr + size;
6423 	NXGE_DEBUG_MSG((nxgep, STR_CTL, "<== nxge_err_inject"));
6424 
6425 	miocack(wq, mp, (int)size, 0);
6426 }
6427 
6428 static int
6429 nxge_init_common_dev(p_nxge_t nxgep)
6430 {
6431 	p_nxge_hw_list_t	hw_p;
6432 	dev_info_t 		*p_dip;
6433 
6434 	NXGE_DEBUG_MSG((nxgep, MOD_CTL, "==> nxge_init_common_device"));
6435 
6436 	p_dip = nxgep->p_dip;
6437 	MUTEX_ENTER(&nxge_common_lock);
6438 	NXGE_DEBUG_MSG((nxgep, MOD_CTL,
6439 	    "==> nxge_init_common_dev:func # %d",
6440 	    nxgep->function_num));
6441 	/*
6442 	 * Loop through existing per neptune hardware list.
6443 	 */
6444 	for (hw_p = nxge_hw_list; hw_p; hw_p = hw_p->next) {
6445 		NXGE_DEBUG_MSG((nxgep, MOD_CTL,
6446 		    "==> nxge_init_common_device:func # %d "
6447 		    "hw_p $%p parent dip $%p",
6448 		    nxgep->function_num,
6449 		    hw_p,
6450 		    p_dip));
6451 		if (hw_p->parent_devp == p_dip) {
6452 			nxgep->nxge_hw_p = hw_p;
6453 			hw_p->ndevs++;
6454 			hw_p->nxge_p[nxgep->function_num] = nxgep;
6455 			NXGE_DEBUG_MSG((nxgep, MOD_CTL,
6456 			    "==> nxge_init_common_device:func # %d "
6457 			    "hw_p $%p parent dip $%p "
6458 			    "ndevs %d (found)",
6459 			    nxgep->function_num,
6460 			    hw_p,
6461 			    p_dip,
6462 			    hw_p->ndevs));
6463 			break;
6464 		}
6465 	}
6466 
6467 	if (hw_p == NULL) {
6468 		NXGE_DEBUG_MSG((nxgep, MOD_CTL,
6469 		    "==> nxge_init_common_device:func # %d "
6470 		    "parent dip $%p (new)",
6471 		    nxgep->function_num,
6472 		    p_dip));
6473 		hw_p = kmem_zalloc(sizeof (nxge_hw_list_t), KM_SLEEP);
6474 		hw_p->parent_devp = p_dip;
6475 		hw_p->magic = NXGE_NEPTUNE_MAGIC;
6476 		nxgep->nxge_hw_p = hw_p;
6477 		hw_p->ndevs++;
6478 		hw_p->nxge_p[nxgep->function_num] = nxgep;
6479 		hw_p->next = nxge_hw_list;
6480 		if (nxgep->niu_type == N2_NIU) {
6481 			hw_p->niu_type = N2_NIU;
6482 			hw_p->platform_type = P_NEPTUNE_NIU;
6483 		} else {
6484 			hw_p->niu_type = NIU_TYPE_NONE;
6485 			hw_p->platform_type = P_NEPTUNE_NONE;
6486 		}
6487 
6488 		MUTEX_INIT(&hw_p->nxge_cfg_lock, NULL, MUTEX_DRIVER, NULL);
6489 		MUTEX_INIT(&hw_p->nxge_tcam_lock, NULL, MUTEX_DRIVER, NULL);
6490 		MUTEX_INIT(&hw_p->nxge_vlan_lock, NULL, MUTEX_DRIVER, NULL);
6491 		MUTEX_INIT(&hw_p->nxge_mdio_lock, NULL, MUTEX_DRIVER, NULL);
6492 
6493 		nxge_hw_list = hw_p;
6494 
6495 		(void) nxge_scan_ports_phy(nxgep, nxge_hw_list);
6496 	}
6497 
6498 	MUTEX_EXIT(&nxge_common_lock);
6499 
6500 	nxgep->platform_type = hw_p->platform_type;
6501 	if (nxgep->niu_type != N2_NIU) {
6502 		nxgep->niu_type = hw_p->niu_type;
6503 	}
6504 
6505 	NXGE_DEBUG_MSG((nxgep, MOD_CTL,
6506 	    "==> nxge_init_common_device (nxge_hw_list) $%p",
6507 	    nxge_hw_list));
6508 	NXGE_DEBUG_MSG((nxgep, MOD_CTL, "<== nxge_init_common_device"));
6509 
6510 	return (NXGE_OK);
6511 }
6512 
6513 static void
6514 nxge_uninit_common_dev(p_nxge_t nxgep)
6515 {
6516 	p_nxge_hw_list_t	hw_p, h_hw_p;
6517 	p_nxge_dma_pt_cfg_t	p_dma_cfgp;
6518 	p_nxge_hw_pt_cfg_t	p_cfgp;
6519 	dev_info_t 		*p_dip;
6520 
6521 	NXGE_DEBUG_MSG((nxgep, MOD_CTL, "==> nxge_uninit_common_device"));
6522 	if (nxgep->nxge_hw_p == NULL) {
6523 		NXGE_DEBUG_MSG((nxgep, MOD_CTL,
6524 		    "<== nxge_uninit_common_device (no common)"));
6525 		return;
6526 	}
6527 
6528 	MUTEX_ENTER(&nxge_common_lock);
6529 	h_hw_p = nxge_hw_list;
6530 	for (hw_p = nxge_hw_list; hw_p; hw_p = hw_p->next) {
6531 		p_dip = hw_p->parent_devp;
6532 		if (nxgep->nxge_hw_p == hw_p &&
6533 		    p_dip == nxgep->p_dip &&
6534 		    nxgep->nxge_hw_p->magic == NXGE_NEPTUNE_MAGIC &&
6535 		    hw_p->magic == NXGE_NEPTUNE_MAGIC) {
6536 
6537 			NXGE_DEBUG_MSG((nxgep, MOD_CTL,
6538 			    "==> nxge_uninit_common_device:func # %d "
6539 			    "hw_p $%p parent dip $%p "
6540 			    "ndevs %d (found)",
6541 			    nxgep->function_num,
6542 			    hw_p,
6543 			    p_dip,
6544 			    hw_p->ndevs));
6545 
6546 			/*
6547 			 * Release the RDC table, a shared resoruce
6548 			 * of the nxge hardware.  The RDC table was
6549 			 * assigned to this instance of nxge in
6550 			 * nxge_use_cfg_dma_config().
6551 			 */
6552 			p_dma_cfgp = (p_nxge_dma_pt_cfg_t)&nxgep->pt_config;
6553 			p_cfgp = (p_nxge_hw_pt_cfg_t)&p_dma_cfgp->hw_config;
6554 			(void) nxge_fzc_rdc_tbl_unbind(nxgep,
6555 			    p_cfgp->def_mac_rxdma_grpid);
6556 
6557 			if (hw_p->ndevs) {
6558 				hw_p->ndevs--;
6559 			}
6560 			hw_p->nxge_p[nxgep->function_num] = NULL;
6561 			if (!hw_p->ndevs) {
6562 				MUTEX_DESTROY(&hw_p->nxge_vlan_lock);
6563 				MUTEX_DESTROY(&hw_p->nxge_tcam_lock);
6564 				MUTEX_DESTROY(&hw_p->nxge_cfg_lock);
6565 				MUTEX_DESTROY(&hw_p->nxge_mdio_lock);
6566 				NXGE_DEBUG_MSG((nxgep, MOD_CTL,
6567 				    "==> nxge_uninit_common_device: "
6568 				    "func # %d "
6569 				    "hw_p $%p parent dip $%p "
6570 				    "ndevs %d (last)",
6571 				    nxgep->function_num,
6572 				    hw_p,
6573 				    p_dip,
6574 				    hw_p->ndevs));
6575 
6576 				nxge_hio_uninit(nxgep);
6577 
6578 				if (hw_p == nxge_hw_list) {
6579 					NXGE_DEBUG_MSG((nxgep, MOD_CTL,
6580 					    "==> nxge_uninit_common_device:"
6581 					    "remove head func # %d "
6582 					    "hw_p $%p parent dip $%p "
6583 					    "ndevs %d (head)",
6584 					    nxgep->function_num,
6585 					    hw_p,
6586 					    p_dip,
6587 					    hw_p->ndevs));
6588 					nxge_hw_list = hw_p->next;
6589 				} else {
6590 					NXGE_DEBUG_MSG((nxgep, MOD_CTL,
6591 					    "==> nxge_uninit_common_device:"
6592 					    "remove middle func # %d "
6593 					    "hw_p $%p parent dip $%p "
6594 					    "ndevs %d (middle)",
6595 					    nxgep->function_num,
6596 					    hw_p,
6597 					    p_dip,
6598 					    hw_p->ndevs));
6599 					h_hw_p->next = hw_p->next;
6600 				}
6601 
6602 				nxgep->nxge_hw_p = NULL;
6603 				KMEM_FREE(hw_p, sizeof (nxge_hw_list_t));
6604 			}
6605 			break;
6606 		} else {
6607 			h_hw_p = hw_p;
6608 		}
6609 	}
6610 
6611 	MUTEX_EXIT(&nxge_common_lock);
6612 	NXGE_DEBUG_MSG((nxgep, MOD_CTL,
6613 	    "==> nxge_uninit_common_device (nxge_hw_list) $%p",
6614 	    nxge_hw_list));
6615 
6616 	NXGE_DEBUG_MSG((nxgep, MOD_CTL, "<= nxge_uninit_common_device"));
6617 }
6618 
6619 /*
6620  * Determines the number of ports from the niu_type or the platform type.
6621  * Returns the number of ports, or returns zero on failure.
6622  */
6623 
6624 int
6625 nxge_get_nports(p_nxge_t nxgep)
6626 {
6627 	int	nports = 0;
6628 
6629 	switch (nxgep->niu_type) {
6630 	case N2_NIU:
6631 	case NEPTUNE_2_10GF:
6632 		nports = 2;
6633 		break;
6634 	case NEPTUNE_4_1GC:
6635 	case NEPTUNE_2_10GF_2_1GC:
6636 	case NEPTUNE_1_10GF_3_1GC:
6637 	case NEPTUNE_1_1GC_1_10GF_2_1GC:
6638 	case NEPTUNE_2_10GF_2_1GRF:
6639 		nports = 4;
6640 		break;
6641 	default:
6642 		switch (nxgep->platform_type) {
6643 		case P_NEPTUNE_NIU:
6644 		case P_NEPTUNE_ATLAS_2PORT:
6645 			nports = 2;
6646 			break;
6647 		case P_NEPTUNE_ATLAS_4PORT:
6648 		case P_NEPTUNE_MARAMBA_P0:
6649 		case P_NEPTUNE_MARAMBA_P1:
6650 		case P_NEPTUNE_ALONSO:
6651 			nports = 4;
6652 			break;
6653 		default:
6654 			break;
6655 		}
6656 		break;
6657 	}
6658 
6659 	return (nports);
6660 }
6661 
6662 /*
6663  * The following two functions are to support
6664  * PSARC/2007/453 MSI-X interrupt limit override.
6665  */
6666 static int
6667 nxge_create_msi_property(p_nxge_t nxgep)
6668 {
6669 	int	nmsi;
6670 	extern	int ncpus;
6671 
6672 	NXGE_DEBUG_MSG((nxgep, MOD_CTL, "==>nxge_create_msi_property"));
6673 
6674 	switch (nxgep->mac.portmode) {
6675 	case PORT_10G_COPPER:
6676 	case PORT_10G_FIBER:
6677 	case PORT_10G_TN1010:
6678 		(void) ddi_prop_create(DDI_DEV_T_NONE, nxgep->dip,
6679 		    DDI_PROP_CANSLEEP, "#msix-request", NULL, 0);
6680 		/*
6681 		 * The maximum MSI-X requested will be 8.
6682 		 * If the # of CPUs is less than 8, we will reqeust
6683 		 * # MSI-X based on the # of CPUs.
6684 		 */
6685 		if (ncpus >= NXGE_MSIX_REQUEST_10G) {
6686 			nmsi = NXGE_MSIX_REQUEST_10G;
6687 		} else {
6688 			nmsi = ncpus;
6689 		}
6690 		NXGE_DEBUG_MSG((nxgep, MOD_CTL,
6691 		    "==>nxge_create_msi_property(10G): exists 0x%x (nmsi %d)",
6692 		    ddi_prop_exists(DDI_DEV_T_NONE, nxgep->dip,
6693 		    DDI_PROP_CANSLEEP, "#msix-request"), nmsi));
6694 		break;
6695 
6696 	default:
6697 		nmsi = NXGE_MSIX_REQUEST_1G;
6698 		NXGE_DEBUG_MSG((nxgep, MOD_CTL,
6699 		    "==>nxge_create_msi_property(1G): exists 0x%x (nmsi %d)",
6700 		    ddi_prop_exists(DDI_DEV_T_NONE, nxgep->dip,
6701 		    DDI_PROP_CANSLEEP, "#msix-request"), nmsi));
6702 		break;
6703 	}
6704 
6705 	NXGE_DEBUG_MSG((nxgep, MOD_CTL, "<==nxge_create_msi_property"));
6706 	return (nmsi);
6707 }
6708 
6709 /* ARGSUSED */
6710 static int
6711 nxge_get_def_val(nxge_t *nxgep, mac_prop_id_t pr_num, uint_t pr_valsize,
6712     void *pr_val)
6713 {
6714 	int err = 0;
6715 	link_flowctrl_t fl;
6716 
6717 	switch (pr_num) {
6718 	case MAC_PROP_AUTONEG:
6719 		*(uint8_t *)pr_val = 1;
6720 		break;
6721 	case MAC_PROP_FLOWCTRL:
6722 		if (pr_valsize < sizeof (link_flowctrl_t))
6723 			return (EINVAL);
6724 		fl = LINK_FLOWCTRL_RX;
6725 		bcopy(&fl, pr_val, sizeof (fl));
6726 		break;
6727 	case MAC_PROP_ADV_1000FDX_CAP:
6728 	case MAC_PROP_EN_1000FDX_CAP:
6729 		*(uint8_t *)pr_val = 1;
6730 		break;
6731 	case MAC_PROP_ADV_100FDX_CAP:
6732 	case MAC_PROP_EN_100FDX_CAP:
6733 		*(uint8_t *)pr_val = 1;
6734 		break;
6735 	default:
6736 		err = ENOTSUP;
6737 		break;
6738 	}
6739 	return (err);
6740 }
6741 
6742 
6743 /*
6744  * The following is a software around for the Neptune hardware's
6745  * interrupt bugs; The Neptune hardware may generate spurious interrupts when
6746  * an interrupr handler is removed.
6747  */
6748 #define	NXGE_PCI_PORT_LOGIC_OFFSET	0x98
6749 #define	NXGE_PIM_RESET			(1ULL << 29)
6750 #define	NXGE_GLU_RESET			(1ULL << 30)
6751 #define	NXGE_NIU_RESET			(1ULL << 31)
6752 #define	NXGE_PCI_RESET_ALL		(NXGE_PIM_RESET |	\
6753 					NXGE_GLU_RESET |	\
6754 					NXGE_NIU_RESET)
6755 
6756 #define	NXGE_WAIT_QUITE_TIME		200000
6757 #define	NXGE_WAIT_QUITE_RETRY		40
6758 #define	NXGE_PCI_RESET_WAIT		1000000 /* one second */
6759 
6760 static void
6761 nxge_niu_peu_reset(p_nxge_t nxgep)
6762 {
6763 	uint32_t	rvalue;
6764 	p_nxge_hw_list_t hw_p;
6765 	p_nxge_t	fnxgep;
6766 	int		i, j;
6767 
6768 	NXGE_DEBUG_MSG((nxgep, NXGE_ERR_CTL, "==> nxge_niu_peu_reset"));
6769 	if ((hw_p = nxgep->nxge_hw_p) == NULL) {
6770 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
6771 		    "==> nxge_niu_peu_reset: NULL hardware pointer"));
6772 		return;
6773 	}
6774 
6775 	NXGE_DEBUG_MSG((nxgep, NXGE_ERR_CTL,
6776 	    "==> nxge_niu_peu_reset: flags 0x%x link timer id %d timer id %d",
6777 	    hw_p->flags, nxgep->nxge_link_poll_timerid,
6778 	    nxgep->nxge_timerid));
6779 
6780 	MUTEX_ENTER(&hw_p->nxge_cfg_lock);
6781 	/*
6782 	 * Make sure other instances from the same hardware
6783 	 * stop sending PIO and in quiescent state.
6784 	 */
6785 	for (i = 0; i < NXGE_MAX_PORTS; i++) {
6786 		fnxgep = hw_p->nxge_p[i];
6787 		NXGE_DEBUG_MSG((nxgep, NXGE_ERR_CTL,
6788 		    "==> nxge_niu_peu_reset: checking entry %d "
6789 		    "nxgep $%p", i, fnxgep));
6790 #ifdef	NXGE_DEBUG
6791 		if (fnxgep) {
6792 			NXGE_DEBUG_MSG((nxgep, NXGE_ERR_CTL,
6793 			    "==> nxge_niu_peu_reset: entry %d (function %d) "
6794 			    "link timer id %d hw timer id %d",
6795 			    i, fnxgep->function_num,
6796 			    fnxgep->nxge_link_poll_timerid,
6797 			    fnxgep->nxge_timerid));
6798 		}
6799 #endif
6800 		if (fnxgep && fnxgep != nxgep &&
6801 		    (fnxgep->nxge_timerid || fnxgep->nxge_link_poll_timerid)) {
6802 			NXGE_DEBUG_MSG((nxgep, NXGE_ERR_CTL,
6803 			    "==> nxge_niu_peu_reset: checking $%p "
6804 			    "(function %d) timer ids",
6805 			    fnxgep, fnxgep->function_num));
6806 			for (j = 0; j < NXGE_WAIT_QUITE_RETRY; j++) {
6807 				NXGE_DEBUG_MSG((nxgep, NXGE_ERR_CTL,
6808 				    "==> nxge_niu_peu_reset: waiting"));
6809 				NXGE_DELAY(NXGE_WAIT_QUITE_TIME);
6810 				if (!fnxgep->nxge_timerid &&
6811 				    !fnxgep->nxge_link_poll_timerid) {
6812 					break;
6813 				}
6814 			}
6815 			NXGE_DELAY(NXGE_WAIT_QUITE_TIME);
6816 			if (fnxgep->nxge_timerid ||
6817 			    fnxgep->nxge_link_poll_timerid) {
6818 				MUTEX_EXIT(&hw_p->nxge_cfg_lock);
6819 				NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
6820 				    "<== nxge_niu_peu_reset: cannot reset "
6821 				    "hardware (devices are still in use)"));
6822 				return;
6823 			}
6824 		}
6825 	}
6826 
6827 	if ((hw_p->flags & COMMON_RESET_NIU_PCI) != COMMON_RESET_NIU_PCI) {
6828 		hw_p->flags |= COMMON_RESET_NIU_PCI;
6829 		rvalue = pci_config_get32(nxgep->dev_regs->nxge_pciregh,
6830 		    NXGE_PCI_PORT_LOGIC_OFFSET);
6831 		NXGE_DEBUG_MSG((nxgep, NXGE_ERR_CTL,
6832 		    "nxge_niu_peu_reset: read offset 0x%x (%d) "
6833 		    "(data 0x%x)",
6834 		    NXGE_PCI_PORT_LOGIC_OFFSET,
6835 		    NXGE_PCI_PORT_LOGIC_OFFSET,
6836 		    rvalue));
6837 
6838 		rvalue |= NXGE_PCI_RESET_ALL;
6839 		pci_config_put32(nxgep->dev_regs->nxge_pciregh,
6840 		    NXGE_PCI_PORT_LOGIC_OFFSET, rvalue);
6841 		NXGE_DEBUG_MSG((nxgep, NXGE_ERR_CTL,
6842 		    "nxge_niu_peu_reset: RESETTING NIU: write NIU reset 0x%x",
6843 		    rvalue));
6844 
6845 		NXGE_DELAY(NXGE_PCI_RESET_WAIT);
6846 	}
6847 
6848 	MUTEX_EXIT(&hw_p->nxge_cfg_lock);
6849 	NXGE_DEBUG_MSG((nxgep, NXGE_ERR_CTL, "<== nxge_niu_peu_reset"));
6850 }
6851 
6852 static void
6853 nxge_set_pci_replay_timeout(p_nxge_t nxgep)
6854 {
6855 	p_dev_regs_t 	dev_regs;
6856 	uint32_t	value;
6857 
6858 	NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_set_pci_replay_timeout"));
6859 
6860 	if (!nxge_set_replay_timer) {
6861 		NXGE_DEBUG_MSG((nxgep, DDI_CTL,
6862 		    "==> nxge_set_pci_replay_timeout: will not change "
6863 		    "the timeout"));
6864 		return;
6865 	}
6866 
6867 	dev_regs = nxgep->dev_regs;
6868 	NXGE_DEBUG_MSG((nxgep, DDI_CTL,
6869 	    "==> nxge_set_pci_replay_timeout: dev_regs 0x%p pcireg 0x%p",
6870 	    dev_regs, dev_regs->nxge_pciregh));
6871 
6872 	if (dev_regs == NULL || (dev_regs->nxge_pciregh == NULL)) {
6873 		NXGE_DEBUG_MSG((nxgep, DDI_CTL,
6874 		    "==> nxge_set_pci_replay_timeout: NULL dev_regs $%p or "
6875 		    "no PCI handle",
6876 		    dev_regs));
6877 		return;
6878 	}
6879 	value = (pci_config_get32(dev_regs->nxge_pciregh,
6880 	    PCI_REPLAY_TIMEOUT_CFG_OFFSET) |
6881 	    (nxge_replay_timeout << PCI_REPLAY_TIMEOUT_SHIFT));
6882 
6883 	NXGE_DEBUG_MSG((nxgep, DDI_CTL,
6884 	    "nxge_set_pci_replay_timeout: replay timeout value before set 0x%x "
6885 	    "(timeout value to set 0x%x at offset 0x%x) value 0x%x",
6886 	    pci_config_get32(dev_regs->nxge_pciregh,
6887 	    PCI_REPLAY_TIMEOUT_CFG_OFFSET), nxge_replay_timeout,
6888 	    PCI_REPLAY_TIMEOUT_CFG_OFFSET, value));
6889 
6890 	pci_config_put32(dev_regs->nxge_pciregh, PCI_REPLAY_TIMEOUT_CFG_OFFSET,
6891 	    value);
6892 
6893 	NXGE_DEBUG_MSG((nxgep, DDI_CTL,
6894 	    "nxge_set_pci_replay_timeout: replay timeout value after set 0x%x",
6895 	    pci_config_get32(dev_regs->nxge_pciregh,
6896 	    PCI_REPLAY_TIMEOUT_CFG_OFFSET)));
6897 
6898 	NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_set_pci_replay_timeout"));
6899 }
6900